summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--Documentation/ABI/testing/sysfs-block-rssd12
-rw-r--r--Documentation/ABI/testing/sysfs-bus-fcoe77
-rw-r--r--Documentation/ABI/testing/sysfs-bus-i2c-devices-lm353315
-rw-r--r--Documentation/ABI/testing/sysfs-bus-rbd4
-rw-r--r--Documentation/ABI/testing/sysfs-class-backlight-driver-lm353348
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-driver-lm353365
-rw-r--r--Documentation/ABI/testing/sysfs-driver-wacom15
-rw-r--r--Documentation/CodingStyle16
-rw-r--r--Documentation/DocBook/media/Makefile4
-rw-r--r--Documentation/DocBook/media/dvb/dvbproperty.xml160
-rw-r--r--Documentation/DocBook/media/v4l/biblio.xml29
-rw-r--r--Documentation/DocBook/media/v4l/common.xml38
-rw-r--r--Documentation/DocBook/media/v4l/compat.xml75
-rw-r--r--Documentation/DocBook/media/v4l/controls.xml708
-rw-r--r--Documentation/DocBook/media/v4l/dev-subdev.xml202
-rw-r--r--Documentation/DocBook/media/v4l/io.xml12
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-srggb10.xml2
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-srggb10dpcm8.xml29
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt.xml6
-rw-r--r--Documentation/DocBook/media/v4l/subdev-image-processing-crop.dia614
-rw-r--r--Documentation/DocBook/media/v4l/subdev-image-processing-crop.svg63
-rw-r--r--Documentation/DocBook/media/v4l/subdev-image-processing-full.dia1588
-rw-r--r--Documentation/DocBook/media/v4l/subdev-image-processing-full.svg163
-rw-r--r--Documentation/DocBook/media/v4l/subdev-image-processing-scaling-multi-source.dia1152
-rw-r--r--Documentation/DocBook/media/v4l/subdev-image-processing-scaling-multi-source.svg116
-rw-r--r--Documentation/DocBook/media/v4l/v4l2.xml44
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-create-bufs.xml16
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-cropcap.xml4
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml211
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enum-dv-presets.xml4
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml119
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enum-fmt.xml4
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enuminput.xml2
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enumoutput.xml2
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-crop.xml4
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-dv-preset.xml6
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml130
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml26
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-fmt.xml2
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-frequency.xml6
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-parm.xml5
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-sliced-vbi-cap.xml2
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-tuner.xml2
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-prepare-buf.xml6
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-query-dv-preset.xml4
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml104
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-queryctrl.xml41
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-reqbufs.xml7
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-s-hw-freq-seek.xml5
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-subdev-g-crop.xml9
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml228
-rw-r--r--Documentation/SubmittingPatches3
-rw-r--r--Documentation/arm/SPEAr/overview.txt32
-rw-r--r--Documentation/cgroups/memory.txt37
-rw-r--r--Documentation/cgroups/resource_counter.txt8
-rw-r--r--Documentation/cris/README62
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.txt12
-rw-r--r--Documentation/devicetree/bindings/arm/gic.txt35
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt52
-rw-r--r--Documentation/devicetree/bindings/arm/spear-timer.txt18
-rw-r--r--Documentation/devicetree/bindings/arm/spear.txt14
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt11
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt19
-rw-r--r--Documentation/devicetree/bindings/dma/snps-dma.txt17
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt38
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mxs.txt87
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt42
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio_lpc32xx.txt43
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mxs.txt16
-rw-r--r--Documentation/devicetree/bindings/i2c/mux.txt60
-rw-r--r--Documentation/devicetree/bindings/i2c/samsung-i2c.txt8
-rw-r--r--Documentation/devicetree/bindings/i2c/xiic.txt22
-rw-r--r--Documentation/devicetree/bindings/input/spear-keyboard.txt20
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/lpc32xx-tsc.txt16
-rw-r--r--Documentation/devicetree/bindings/input/twl6040-vibra.txt37
-rw-r--r--Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt14
-rw-r--r--Documentation/devicetree/bindings/mfd/da9052-i2c.txt60
-rw-r--r--Documentation/devicetree/bindings/mfd/tps65910.txt133
-rw-r--r--Documentation/devicetree/bindings/mfd/twl6040.txt62
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-esdhc.txt6
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt3
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc.txt27
-rw-r--r--Documentation/devicetree/bindings/mmc/mmci.txt19
-rw-r--r--Documentation/devicetree/bindings/mmc/mxs-mmc.txt25
-rw-r--r--Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt4
-rw-r--r--Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt4
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt47
-rw-r--r--Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt15
-rw-r--r--Documentation/devicetree/bindings/rtc/spear-rtc.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/omap-dmic.txt21
-rw-r--r--Documentation/devicetree/bindings/sound/omap-mcpdm.txt21
-rw-r--r--Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/tegra-usb.txt3
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/devicetree/booting-without-of.txt55
-rw-r--r--Documentation/dma-buf-sharing.txt109
-rwxr-xr-xDocumentation/dvb/get_dvb_firmware20
-rw-r--r--Documentation/feature-removal-schedule.txt45
-rw-r--r--Documentation/filesystems/Locking2
-rw-r--r--Documentation/filesystems/ext3.txt6
-rw-r--r--Documentation/filesystems/porting16
-rw-r--r--Documentation/filesystems/proc.txt25
-rw-r--r--Documentation/filesystems/vfs.txt13
-rw-r--r--Documentation/gpio.txt3
-rw-r--r--Documentation/i2c/functionality9
-rw-r--r--Documentation/i2c/i2c-protocol9
-rw-r--r--Documentation/i2c/muxes/i2c-mux-gpio (renamed from Documentation/i2c/muxes/gpio-i2cmux)12
-rw-r--r--Documentation/initrd.txt4
-rw-r--r--Documentation/kbuild/kbuild.txt19
-rw-r--r--Documentation/kbuild/kconfig.txt18
-rw-r--r--Documentation/kernel-parameters.txt33
-rw-r--r--Documentation/leds/ledtrig-transient.txt152
-rw-r--r--Documentation/media-framework.txt19
-rw-r--r--Documentation/nfc/nfc-hci.txt45
-rw-r--r--Documentation/power/charger-manager.txt41
-rw-r--r--Documentation/power/power_supply_class.txt2
-rw-r--r--Documentation/sysctl/fs.txt7
-rw-r--r--Documentation/trace/uprobetracer.txt113
-rw-r--r--Documentation/video4linux/4CCs.txt32
-rw-r--r--Documentation/video4linux/gspca.txt1
-rw-r--r--Documentation/video4linux/v4l2-controls.txt21
-rw-r--r--Documentation/video4linux/v4l2-framework.txt106
-rw-r--r--Documentation/virtual/kvm/api.txt281
-rw-r--r--Documentation/virtual/kvm/cpuid.txt6
-rw-r--r--Documentation/virtual/kvm/msr.txt4
-rw-r--r--Documentation/vm/pagemap.txt2
-rw-r--r--Documentation/vm/transhuge.txt62
-rw-r--r--Documentation/watchdog/watchdog-kernel-api.txt43
-rw-r--r--Documentation/watchdog/watchdog-parameters.txt5
-rw-r--r--MAINTAINERS78
-rw-r--r--Makefile231
-rw-r--r--arch/Kconfig20
-rw-r--r--arch/alpha/Kconfig4
-rw-r--r--arch/alpha/include/asm/gpio.h59
-rw-r--r--arch/alpha/include/asm/io.h5
-rw-r--r--arch/alpha/include/asm/kvm_para.h1
-rw-r--r--arch/alpha/include/asm/sysinfo.h1
-rw-r--r--arch/alpha/include/asm/unistd.h6
-rw-r--r--arch/alpha/kernel/osf_sys.c166
-rw-r--r--arch/alpha/kernel/systbls.S10
-rw-r--r--arch/arm/Kconfig39
-rw-r--r--arch/arm/Kconfig.debug37
-rw-r--r--arch/arm/Makefile8
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts48
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi76
-rw-r--r--arch/arm/boot/dts/imx23-evk.dts43
-rw-r--r--arch/arm/boot/dts/imx23.dtsi295
-rw-r--r--arch/arm/boot/dts/imx27-phytec-phycore.dts8
-rw-r--r--arch/arm/boot/dts/imx27.dtsi14
-rw-r--r--arch/arm/boot/dts/imx28-evk.dts114
-rw-r--r--arch/arm/boot/dts/imx28.dtsi497
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts40
-rw-r--r--arch/arm/boot/dts/imx51.dtsi41
-rw-r--r--arch/arm/boot/dts/imx53-ard.dts6
-rw-r--r--arch/arm/boot/dts/imx53-evk.dts8
-rw-r--r--arch/arm/boot/dts/imx53-qsb.dts121
-rw-r--r--arch/arm/boot/dts/imx53-smd.dts16
-rw-r--r--arch/arm/boot/dts/imx53.dtsi45
-rw-r--r--arch/arm/boot/dts/imx6q-arm2.dts15
-rw-r--r--arch/arm/boot/dts/imx6q-sabrelite.dts50
-rw-r--r--arch/arm/boot/dts/imx6q-sabresd.dts53
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi171
-rw-r--r--arch/arm/boot/dts/lpc32xx.dtsi41
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts2
-rw-r--r--arch/arm/boot/dts/omap4-panda.dts4
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts6
-rw-r--r--arch/arm/boot/dts/phy3250.dts4
-rw-r--r--arch/arm/boot/dts/spear1310-evb.dts292
-rw-r--r--arch/arm/boot/dts/spear1310.dtsi184
-rw-r--r--arch/arm/boot/dts/spear1340-evb.dts308
-rw-r--r--arch/arm/boot/dts/spear1340.dtsi56
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi262
-rw-r--r--arch/arm/boot/dts/spear300-evb.dts25
-rw-r--r--arch/arm/boot/dts/spear310-evb.dts20
-rw-r--r--arch/arm/boot/dts/spear320-evb.dts25
-rw-r--r--arch/arm/boot/dts/spear3xx.dtsi6
-rw-r--r--arch/arm/boot/dts/spear600-evb.dts29
-rw-r--r--arch/arm/boot/dts/spear600.dtsi6
-rw-r--r--arch/arm/boot/dts/tegra-cardhu.dts110
-rw-r--r--arch/arm/boot/dts/tegra-harmony.dts118
-rw-r--r--arch/arm/boot/dts/tegra-paz00.dts128
-rw-r--r--arch/arm/boot/dts/tegra-seaboard.dts213
-rw-r--r--arch/arm/boot/dts/tegra-trimslice.dts99
-rw-r--r--arch/arm/boot/dts/tegra-ventana.dts96
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi275
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi305
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts13
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca5s.dts13
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca9.dts9
-rw-r--r--arch/arm/common/dmabounce.c84
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig3
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig8
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/configs/prima2_defconfig69
-rw-r--r--arch/arm/configs/spear13xx_defconfig95
-rw-r--r--arch/arm/configs/spear3xx_defconfig4
-rw-r--r--arch/arm/configs/spear6xx_defconfig5
-rw-r--r--arch/arm/configs/tegra_defconfig11
-rw-r--r--arch/arm/include/asm/device.h4
-rw-r--r--arch/arm/include/asm/dma-contiguous.h15
-rw-r--r--arch/arm/include/asm/dma-iommu.h34
-rw-r--r--arch/arm/include/asm/dma-mapping.h407
-rw-r--r--arch/arm/include/asm/hardware/pl080.h2
-rw-r--r--arch/arm/include/asm/io.h24
-rw-r--r--arch/arm/include/asm/kvm_para.h1
-rw-r--r--arch/arm/include/asm/mach/arch.h1
-rw-r--r--arch/arm/include/asm/mach/map.h1
-rw-r--r--arch/arm/include/asm/thread_info.h8
-rw-r--r--arch/arm/kernel/entry-common.S8
-rw-r--r--arch/arm/kernel/ptrace.c3
-rw-r--r--arch/arm/kernel/setup.c17
-rw-r--r--arch/arm/kernel/signal.c85
-rw-r--r--arch/arm/kernel/signal.h2
-rw-r--r--arch/arm/kernel/smp.c8
-rw-r--r--arch/arm/kernel/traps.c2
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c1
-rw-r--r--arch/arm/mach-at91/include/mach/at_hdmac.h26
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c1
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-mityomapl138.c1
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c1
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c1
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c1
-rw-r--r--arch/arm/mach-davinci/board-tnetv107x-evm.c1
-rw-r--r--arch/arm/mach-davinci/clock.c3
-rw-r--r--arch/arm/mach-davinci/common.c7
-rw-r--r--arch/arm/mach-davinci/cpufreq.c3
-rw-r--r--arch/arm/mach-davinci/dma.c69
-rw-r--r--arch/arm/mach-davinci/include/mach/common.h19
-rw-r--r--arch/arm/mach-davinci/include/mach/debug-macro.S58
-rw-r--r--arch/arm/mach-davinci/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-davinci/include/mach/serial.h10
-rw-r--r--arch/arm/mach-davinci/include/mach/uncompress.h30
-rw-r--r--arch/arm/mach-davinci/pm.c3
-rw-r--r--arch/arm/mach-dove/common.c39
-rw-r--r--arch/arm/mach-dove/dove-db-setup.c1
-rw-r--r--arch/arm/mach-ep93xx/adssphere.c1
-rw-r--r--arch/arm/mach-ep93xx/core.c7
-rw-r--r--arch/arm/mach-ep93xx/crunch.c4
-rw-r--r--arch/arm/mach-ep93xx/edb93xx.c8
-rw-r--r--arch/arm/mach-ep93xx/gesbc9312.c1
-rw-r--r--arch/arm/mach-ep93xx/include/mach/platform.h7
-rw-r--r--arch/arm/mach-ep93xx/micro9.c4
-rw-r--r--arch/arm/mach-ep93xx/simone.c1
-rw-r--r--arch/arm/mach-ep93xx/snappercl15.c1
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c1
-rw-r--r--arch/arm/mach-ep93xx/vision_ep9307.c1
-rw-r--r--arch/arm/mach-exynos/Kconfig26
-rw-r--r--arch/arm/mach-exynos/Makefile9
-rw-r--r--arch/arm/mach-exynos/Makefile.boot3
-rw-r--r--arch/arm/mach-exynos/clock-exynos4.c79
-rw-r--r--arch/arm/mach-exynos/clock-exynos4.h2
-rw-r--r--arch/arm/mach-exynos/clock-exynos4210.c11
-rw-r--r--arch/arm/mach-exynos/clock-exynos4212.c38
-rw-r--r--arch/arm/mach-exynos/clock-exynos5.c192
-rw-r--r--arch/arm/mach-exynos/common.c187
-rw-r--r--arch/arm/mach-exynos/common.h7
-rw-r--r--arch/arm/mach-exynos/cpuidle.c2
-rw-r--r--arch/arm/mach-exynos/dev-drm.c29
-rw-r--r--arch/arm/mach-exynos/dev-sysmmu.c457
-rw-r--r--arch/arm/mach-exynos/dma.c141
-rw-r--r--arch/arm/mach-exynos/include/mach/gpio.h9
-rw-r--r--arch/arm/mach-exynos/include/mach/irqs.h65
-rw-r--r--arch/arm/mach-exynos/include/mach/map.h45
-rw-r--r--arch/arm/mach-exynos/include/mach/pm-core.h2
-rw-r--r--arch/arm/mach-exynos/include/mach/pmu.h4
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-clock.h25
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-pmu.h151
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-sysmmu.h28
-rw-r--r--arch/arm/mach-exynos/include/mach/spi-clocks.h2
-rw-r--r--arch/arm/mach-exynos/include/mach/sysmmu.h88
-rw-r--r--arch/arm/mach-exynos/mach-armlex4210.c2
-rw-r--r--arch/arm/mach-exynos/mach-exynos4-dt.c1
-rw-r--r--arch/arm/mach-exynos/mach-exynos5-dt.c5
-rw-r--r--arch/arm/mach-exynos/mach-nuri.c1
-rw-r--r--arch/arm/mach-exynos/mach-origen.c1
-rw-r--r--arch/arm/mach-exynos/mach-smdk4x12.c1
-rw-r--r--arch/arm/mach-exynos/mach-smdkv310.c2
-rw-r--r--arch/arm/mach-exynos/mach-universal_c210.c1
-rw-r--r--arch/arm/mach-exynos/mct.c17
-rw-r--r--arch/arm/mach-exynos/pm.c225
-rw-r--r--arch/arm/mach-exynos/pm_domains.c3
-rw-r--r--arch/arm/mach-exynos/pmu.c218
-rw-r--r--arch/arm/mach-imx/Kconfig8
-rw-r--r--arch/arm/mach-imx/Makefile19
-rw-r--r--arch/arm/mach-imx/Makefile.boot3
-rw-r--r--arch/arm/mach-imx/clk-busy.c189
-rw-r--r--arch/arm/mach-imx/clk-gate2.c118
-rw-r--r--arch/arm/mach-imx/clk-imx1.c115
-rw-r--r--arch/arm/mach-imx/clk-imx21.c186
-rw-r--r--arch/arm/mach-imx/clk-imx25.c248
-rw-r--r--arch/arm/mach-imx/clk-imx27.c290
-rw-r--r--arch/arm/mach-imx/clk-imx31.c182
-rw-r--r--arch/arm/mach-imx/clk-imx35.c278
-rw-r--r--arch/arm/mach-imx/clk-imx51-imx53.c506
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c444
-rw-r--r--arch/arm/mach-imx/clk-pfd.c147
-rw-r--r--arch/arm/mach-imx/clk-pllv1.c66
-rw-r--r--arch/arm/mach-imx/clk-pllv2.c249
-rw-r--r--arch/arm/mach-imx/clk-pllv3.c419
-rw-r--r--arch/arm/mach-imx/clk.h83
-rw-r--r--arch/arm/mach-imx/clock-imx1.c636
-rw-r--r--arch/arm/mach-imx/clock-imx21.c1239
-rw-r--r--arch/arm/mach-imx/clock-imx25.c346
-rw-r--r--arch/arm/mach-imx/clock-imx27.c785
-rw-r--r--arch/arm/mach-imx/clock-imx31.c630
-rw-r--r--arch/arm/mach-imx/clock-imx35.c536
-rw-r--r--arch/arm/mach-imx/clock-imx6q.c2111
-rw-r--r--arch/arm/mach-imx/clock-mx51-mx53.c1675
-rw-r--r--arch/arm/mach-imx/cpu-imx5.c6
-rw-r--r--arch/arm/mach-imx/crmregs-imx3.h79
-rw-r--r--arch/arm/mach-imx/imx51-dt.c1
-rw-r--r--arch/arm/mach-imx/imx53-dt.c19
-rw-r--r--arch/arm/mach-imx/lluart.c6
-rw-r--r--arch/arm/mach-imx/mach-cpuimx51sd.c1
-rw-r--r--arch/arm/mach-imx/mach-imx27_visstrim_m10.c2
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c55
-rw-r--r--arch/arm/mach-imx/mach-mx35_3ds.c3
-rw-r--r--arch/arm/mach-imx/mach-mx51_3ds.c1
-rw-r--r--arch/arm/mach-imx/mach-mx51_babbage.c7
-rw-r--r--arch/arm/mach-imx/mach-mx51_efikamx.c42
-rw-r--r--arch/arm/mach-imx/mach-mx51_efikasb.c28
-rw-r--r--arch/arm/mach-imx/mach-pcm037.c6
-rw-r--r--arch/arm/mach-imx/mach-pcm037_eet.c5
-rw-r--r--arch/arm/mach-imx/mm-imx3.c6
-rw-r--r--arch/arm/mach-imx/mm-imx5.c6
-rw-r--r--arch/arm/mach-imx/pcm037.h6
-rw-r--r--arch/arm/mach-imx/pm-imx3.c4
-rw-r--r--arch/arm/mach-ixp4xx/common.c48
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/gpio.h79
-rw-r--r--arch/arm/mach-kirkwood/board-dreamplug.c1
-rw-r--r--arch/arm/mach-kirkwood/board-dt.c3
-rw-r--r--arch/arm/mach-kirkwood/common.c286
-rw-r--r--arch/arm/mach-kirkwood/common.h1
-rw-r--r--arch/arm/mach-kirkwood/include/mach/bridge-regs.h16
-rw-r--r--arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/pcie.c25
-rw-r--r--arch/arm/mach-kirkwood/rd88f6192-nas-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/t5325-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/tsx1x-common.c1
-rw-r--r--arch/arm/mach-lpc32xx/include/mach/gpio.h9
-rw-r--r--arch/arm/mach-msm/board-halibut.c6
-rw-r--r--arch/arm/mach-msm/board-mahimahi.c6
-rw-r--r--arch/arm/mach-msm/board-msm7x27.c9
-rw-r--r--arch/arm/mach-msm/board-msm7x30.c8
-rw-r--r--arch/arm/mach-msm/board-msm8960.c7
-rw-r--r--arch/arm/mach-msm/board-msm8x60.c10
-rw-r--r--arch/arm/mach-msm/board-qsd8x50.c7
-rw-r--r--arch/arm/mach-msm/board-sapphire.c6
-rw-r--r--arch/arm/mach-msm/board-trout.c6
-rw-r--r--arch/arm/mach-msm/include/mach/board.h6
-rw-r--r--arch/arm/mach-msm/smd_debug.c3
-rw-r--r--arch/arm/mach-mv78xx0/common.c45
-rw-r--r--arch/arm/mach-mxs/Kconfig10
-rw-r--r--arch/arm/mach-mxs/Makefile6
-rw-r--r--arch/arm/mach-mxs/clock-mx23.c536
-rw-r--r--arch/arm/mach-mxs/clock-mx28.c803
-rw-r--r--arch/arm/mach-mxs/clock.c211
-rw-r--r--arch/arm/mach-mxs/devices/Kconfig1
-rw-r--r--arch/arm/mach-mxs/devices/platform-dma.c21
-rw-r--r--arch/arm/mach-mxs/devices/platform-gpio-mxs.c24
-rw-r--r--arch/arm/mach-mxs/devices/platform-mxs-mmc.c21
-rw-r--r--arch/arm/mach-mxs/include/mach/clock.h62
-rw-r--r--arch/arm/mach-mxs/include/mach/common.h11
-rw-r--r--arch/arm/mach-mxs/include/mach/devices-common.h3
-rw-r--r--arch/arm/mach-mxs/mach-mx28evk.c2
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c121
-rw-r--r--arch/arm/mach-mxs/mm.c16
-rw-r--r--arch/arm/mach-mxs/regs-clkctrl-mx23.h331
-rw-r--r--arch/arm/mach-mxs/regs-clkctrl-mx28.h486
-rw-r--r--arch/arm/mach-mxs/system.c16
-rw-r--r--arch/arm/mach-mxs/timer.c11
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c8
-rw-r--r--arch/arm/mach-omap1/board-fsample.c1
-rw-r--r--arch/arm/mach-omap1/board-generic.c1
-rw-r--r--arch/arm/mach-omap1/board-h2.c1
-rw-r--r--arch/arm/mach-omap1/board-h3.c1
-rw-r--r--arch/arm/mach-omap1/board-htcherald.c1
-rw-r--r--arch/arm/mach-omap1/board-innovator.c1
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c1
-rw-r--r--arch/arm/mach-omap1/board-osk.c1
-rw-r--r--arch/arm/mach-omap1/board-palmte.c1
-rw-r--r--arch/arm/mach-omap1/board-palmtt.c1
-rw-r--r--arch/arm/mach-omap1/board-palmz71.c1
-rw-r--r--arch/arm/mach-omap1/board-perseus2.c1
-rw-r--r--arch/arm/mach-omap1/board-sx1.c1
-rw-r--r--arch/arm/mach-omap1/board-voiceblue.c1
-rw-r--r--arch/arm/mach-omap1/common.h19
-rw-r--r--arch/arm/mach-omap1/devices.c121
-rw-r--r--arch/arm/mach-omap1/gpio15xx.c2
-rw-r--r--arch/arm/mach-omap1/gpio16xx.c5
-rw-r--r--arch/arm/mach-omap1/gpio7xx.c7
-rw-r--r--arch/arm/mach-omap1/io.c5
-rw-r--r--arch/arm/mach-omap1/serial.c3
-rw-r--r--arch/arm/mach-omap1/time.c16
-rw-r--r--arch/arm/mach-omap1/timer32k.c28
-rw-r--r--arch/arm/mach-omap2/Kconfig8
-rw-r--r--arch/arm/mach-omap2/Makefile167
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c1
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c1
-rw-r--r--arch/arm/mach-omap2/board-3630sdp.c1
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c1
-rw-r--r--arch/arm/mach-omap2/board-am3517crane.c1
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c1
-rw-r--r--arch/arm/mach-omap2/board-apollon.c1
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c2
-rw-r--r--arch/arm/mach-omap2/board-cm-t3517.c1
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c1
-rw-r--r--arch/arm/mach-omap2/board-generic.c1
-rw-r--r--arch/arm/mach-omap2/board-h4.c1
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c2
-rw-r--r--arch/arm/mach-omap2/board-ldp.c1
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c3
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c1
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c1
-rw-r--r--arch/arm/mach-omap2/board-omap3logic.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c1
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c1
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c1
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c1
-rw-r--r--arch/arm/mach-omap2/board-overo.c1
-rw-r--r--arch/arm/mach-omap2/board-rm680.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51.c1
-rw-r--r--arch/arm/mach-omap2/board-ti8168evm.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom.c2
-rw-r--r--arch/arm/mach-omap2/common.h51
-rw-r--r--arch/arm/mach-omap2/devices.c19
-rw-r--r--arch/arm/mach-omap2/dma.c11
-rw-r--r--arch/arm/mach-omap2/dsp.c27
-rw-r--r--arch/arm/mach-omap2/gpio.c3
-rw-r--r--arch/arm/mach-omap2/gpmc.c30
-rw-r--r--arch/arm/mach-omap2/hsmmc.c8
-rw-r--r--arch/arm/mach-omap2/id.c7
-rw-r--r--arch/arm/mach-omap2/include/mach/omap-wakeupgen.h8
-rw-r--r--arch/arm/mach-omap2/io.c101
-rw-r--r--arch/arm/mach-omap2/iomap.h28
-rw-r--r--arch/arm/mach-omap2/irq.c2
-rw-r--r--arch/arm/mach-omap2/mux.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/pm.c3
-rw-r--r--arch/arm/mach-omap2/pm24xx.c17
-rw-r--r--arch/arm/mach-omap2/pm34xx.c7
-rw-r--r--arch/arm/mach-omap2/pm44xx.c6
-rw-r--r--arch/arm/mach-omap2/powerdomains3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/timer.c118
-rw-r--r--arch/arm/mach-omap2/usb-musb.c2
-rw-r--r--arch/arm/mach-omap2/voltagedomains3xxx_data.c2
-rw-r--r--arch/arm/mach-orion5x/common.c27
-rw-r--r--arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c1
-rw-r--r--arch/arm/mach-pnx4008/core.c12
-rw-r--r--arch/arm/mach-pnx4008/pm.c4
-rw-r--r--arch/arm/mach-prima2/common.h6
-rw-r--r--arch/arm/mach-prima2/pm.c3
-rw-r--r--arch/arm/mach-prima2/prima2.c6
-rw-r--r--arch/arm/mach-s3c24xx/Kconfig5
-rw-r--r--arch/arm/mach-s3c24xx/Makefile7
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2416.c1
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2443.c6
-rw-r--r--arch/arm/mach-s3c24xx/common-s3c2443.c15
-rw-r--r--arch/arm/mach-s3c24xx/common.c (renamed from arch/arm/plat-s3c24xx/cpu.c)69
-rw-r--r--arch/arm/mach-s3c24xx/dma-s3c2443.c16
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/dma.h4
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/irqs.h15
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/map.h5
-rw-r--r--arch/arm/mach-s3c24xx/irq-pm.c (renamed from arch/arm/plat-s3c24xx/irq-pm.c)0
-rw-r--r--arch/arm/mach-s3c24xx/irq-s3c2416.c98
-rw-r--r--arch/arm/mach-s3c24xx/pm.c (renamed from arch/arm/plat-s3c24xx/pm.c)0
-rw-r--r--arch/arm/mach-s3c24xx/s3c2416.c1
-rw-r--r--arch/arm/mach-s3c24xx/setup-spi.c39
-rw-r--r--arch/arm/mach-s3c24xx/sleep.S (renamed from arch/arm/plat-s3c24xx/sleep.S)0
-rw-r--r--arch/arm/mach-s3c64xx/common.c5
-rw-r--r--arch/arm/mach-s3c64xx/common.h7
-rw-r--r--arch/arm/mach-s3c64xx/cpuidle.c45
-rw-r--r--arch/arm/mach-s3c64xx/mach-anw6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410-module.c9
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-hmt.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-mini6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-ncp.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-real6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq5.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq7.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6400.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/pm.c3
-rw-r--r--arch/arm/mach-sa1100/assabet.c1
-rw-r--r--arch/arm/mach-sa1100/badge4.c1
-rw-r--r--arch/arm/mach-sa1100/cerf.c1
-rw-r--r--arch/arm/mach-sa1100/collie.c1
-rw-r--r--arch/arm/mach-sa1100/generic.c4
-rw-r--r--arch/arm/mach-sa1100/generic.h7
-rw-r--r--arch/arm/mach-sa1100/h3100.c1
-rw-r--r--arch/arm/mach-sa1100/h3600.c1
-rw-r--r--arch/arm/mach-sa1100/hackkit.c1
-rw-r--r--arch/arm/mach-sa1100/jornada720.c1
-rw-r--r--arch/arm/mach-sa1100/lart.c1
-rw-r--r--arch/arm/mach-sa1100/nanoengine.c1
-rw-r--r--arch/arm/mach-sa1100/neponset.c1
-rw-r--r--arch/arm/mach-sa1100/pleb.c1
-rw-r--r--arch/arm/mach-sa1100/pm.c4
-rw-r--r--arch/arm/mach-sa1100/shannon.c1
-rw-r--r--arch/arm/mach-sa1100/simpad.c1
-rw-r--r--arch/arm/mach-shmobile/Makefile2
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c1
-rw-r--r--arch/arm/mach-shmobile/board-bonito.c1
-rw-r--r--arch/arm/mach-shmobile/board-g3evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-g4evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-kota2.c1
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c1
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c1
-rw-r--r--arch/arm/mach-shmobile/common.c24
-rw-r--r--arch/arm/mach-shmobile/cpuidle.c3
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h14
-rw-r--r--arch/arm/mach-shmobile/suspend.c3
-rw-r--r--arch/arm/mach-spear13xx/Kconfig20
-rw-r--r--arch/arm/mach-spear13xx/Makefile10
-rw-r--r--arch/arm/mach-spear13xx/Makefile.boot6
-rw-r--r--arch/arm/mach-spear13xx/headsmp.S47
-rw-r--r--arch/arm/mach-spear13xx/hotplug.c119
-rw-r--r--arch/arm/mach-spear13xx/include/mach/debug-macro.S14
-rw-r--r--arch/arm/mach-spear13xx/include/mach/dma.h128
-rw-r--r--arch/arm/mach-spear13xx/include/mach/generic.h49
-rw-r--r--arch/arm/mach-spear13xx/include/mach/gpio.h19
-rw-r--r--arch/arm/mach-spear13xx/include/mach/hardware.h1
-rw-r--r--arch/arm/mach-spear13xx/include/mach/irqs.h20
-rw-r--r--arch/arm/mach-spear13xx/include/mach/spear.h62
-rw-r--r--arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h0
-rw-r--r--arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h0
-rw-r--r--arch/arm/mach-spear13xx/include/mach/timex.h19
-rw-r--r--arch/arm/mach-spear13xx/include/mach/uncompress.h19
-rw-r--r--arch/arm/mach-spear13xx/platsmp.c127
-rw-r--r--arch/arm/mach-spear13xx/spear1310.c88
-rw-r--r--arch/arm/mach-spear13xx/spear1340.c192
-rw-r--r--arch/arm/mach-spear13xx/spear13xx.c197
-rw-r--r--arch/arm/mach-spear3xx/Makefile2
-rw-r--r--arch/arm/mach-spear3xx/clock.c892
-rw-r--r--arch/arm/mach-spear3xx/include/mach/generic.h21
-rw-r--r--arch/arm/mach-spear3xx/include/mach/irqs.h1
-rw-r--r--arch/arm/mach-spear3xx/include/mach/misc_regs.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear.h14
-rw-r--r--arch/arm/mach-spear3xx/spear300.c1
-rw-r--r--arch/arm/mach-spear3xx/spear310.c1
-rw-r--r--arch/arm/mach-spear3xx/spear320.c12
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c4
-rw-r--r--arch/arm/mach-spear6xx/Makefile2
-rw-r--r--arch/arm/mach-spear6xx/clock.c789
-rw-r--r--arch/arm/mach-spear6xx/include/mach/generic.h2
-rw-r--r--arch/arm/mach-spear6xx/include/mach/irqs.h3
-rw-r--r--arch/arm/mach-spear6xx/include/mach/misc_regs.h2
-rw-r--r--arch/arm/mach-spear6xx/include/mach/spear.h1
-rw-r--r--arch/arm/mach-spear6xx/spear6xx.c7
-rw-r--r--arch/arm/mach-tegra/Kconfig37
-rw-r--r--arch/arm/mach-tegra/board-dt-tegra20.c1
-rw-r--r--arch/arm/mach-tegra/board-dt-tegra30.c11
-rw-r--r--arch/arm/mach-tegra/board-harmony.c1
-rw-r--r--arch/arm/mach-tegra/board-paz00.c4
-rw-r--r--arch/arm/mach-tegra/board-seaboard.c3
-rw-r--r--arch/arm/mach-tegra/board-trimslice.c3
-rw-r--r--arch/arm/mach-tegra/board.h14
-rw-r--r--arch/arm/mach-tegra/clock.c3
-rw-r--r--arch/arm/mach-tegra/common.c28
-rw-r--r--arch/arm/mach-tegra/devices.c5
-rw-r--r--arch/arm/mach-tegra/devices.h4
-rw-r--r--arch/arm/mach-tegra/include/mach/tegra-ahb.h19
-rw-r--r--arch/arm/mach-tegra/include/mach/uncompress.h176
-rw-r--r--arch/arm/mach-tegra/include/mach/usb_phy.h4
-rw-r--r--arch/arm/mach-tegra/powergate.c4
-rw-r--r--arch/arm/mach-tegra/tegra2_clocks.c4
-rw-r--r--arch/arm/mach-tegra/tegra30_clocks.c9
-rw-r--r--arch/arm/mach-tegra/usb_phy.c15
-rw-r--r--arch/arm/mach-ux500/board-mop500.c6
-rw-r--r--arch/arm/mach-ux500/clock.c6
-rw-r--r--arch/arm/mach-ux500/clock.h12
-rw-r--r--arch/arm/mach-ux500/cpu.c6
-rw-r--r--arch/arm/mach-ux500/include/mach/setup.h1
-rw-r--r--arch/arm/mach-vexpress/v2m.c2
-rw-r--r--arch/arm/mm/dma-mapping.c1348
-rw-r--r--arch/arm/mm/init.c23
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmu.c31
-rw-r--r--arch/arm/mm/vmregion.h2
-rw-r--r--arch/arm/plat-mxc/clock.c11
-rw-r--r--arch/arm/plat-mxc/include/mach/clock.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h7
-rw-r--r--arch/arm/plat-mxc/include/mach/debug-macro.S2
-rw-r--r--arch/arm/plat-mxc/include/mach/mx2_cam.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/mx6q.h2
-rw-r--r--arch/arm/plat-mxc/time.c14
-rw-r--r--arch/arm/plat-omap/counter_32k.c93
-rw-r--r--arch/arm/plat-omap/devices.c122
-rw-r--r--arch/arm/plat-omap/dma.c4
-rw-r--r--arch/arm/plat-omap/dmtimer.c2
-rw-r--r--arch/arm/plat-omap/include/plat/common.h2
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h8
-rw-r--r--arch/arm/plat-omap/include/plat/dma.h5
-rw-r--r--arch/arm/plat-omap/include/plat/dmtimer.h1
-rw-r--r--arch/arm/plat-omap/include/plat/gpio.h3
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h9
-rw-r--r--arch/arm/plat-orion/common.c104
-rw-r--r--arch/arm/plat-orion/include/plat/common.h34
-rw-r--r--arch/arm/plat-orion/include/plat/orion_wdt.h18
-rw-r--r--arch/arm/plat-orion/pcie.c4
-rw-r--r--arch/arm/plat-pxa/include/plat/pxa27x_keypad.h4
-rw-r--r--arch/arm/plat-s3c24xx/Makefile6
-rw-r--r--arch/arm/plat-s3c24xx/clock.c59
-rw-r--r--arch/arm/plat-s3c24xx/dev-uart.c100
-rw-r--r--arch/arm/plat-s5p/Kconfig140
-rw-r--r--arch/arm/plat-s5p/Makefile28
-rw-r--r--arch/arm/plat-s5p/sysmmu.c313
-rw-r--r--arch/arm/plat-samsung/Kconfig142
-rw-r--r--arch/arm/plat-samsung/Makefile13
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h3
-rw-r--r--arch/arm/plat-samsung/include/plat/dma-pl330.h1
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c2416.h3
-rw-r--r--arch/arm/plat-samsung/include/plat/s5p-clock.h4
-rw-r--r--arch/arm/plat-samsung/include/plat/sysmmu.h95
-rw-r--r--arch/arm/plat-samsung/s5p-clock.c (renamed from arch/arm/plat-s5p/clock.c)33
-rw-r--r--arch/arm/plat-samsung/s5p-dev-mfc.c (renamed from arch/arm/plat-s5p/dev-mfc.c)4
-rw-r--r--arch/arm/plat-samsung/s5p-dev-uart.c (renamed from arch/arm/plat-s5p/dev-uart.c)78
-rw-r--r--arch/arm/plat-samsung/s5p-irq-eint.c (renamed from arch/arm/plat-s5p/irq-eint.c)3
-rw-r--r--arch/arm/plat-samsung/s5p-irq-gpioint.c (renamed from arch/arm/plat-s5p/irq-gpioint.c)3
-rw-r--r--arch/arm/plat-samsung/s5p-irq-pm.c (renamed from arch/arm/plat-s5p/irq-pm.c)3
-rw-r--r--arch/arm/plat-samsung/s5p-irq.c (renamed from arch/arm/plat-s5p/irq.c)3
-rw-r--r--arch/arm/plat-samsung/s5p-pm.c (renamed from arch/arm/plat-s5p/pm.c)3
-rw-r--r--arch/arm/plat-samsung/s5p-sleep.S (renamed from arch/arm/plat-s5p/sleep.S)3
-rw-r--r--arch/arm/plat-samsung/s5p-time.c (renamed from arch/arm/plat-s5p/s5p-time.c)3
-rw-r--r--arch/arm/plat-samsung/setup-mipiphy.c (renamed from arch/arm/plat-s5p/setup-mipiphy.c)0
-rw-r--r--arch/arm/plat-spear/Kconfig12
-rw-r--r--arch/arm/plat-spear/Makefile5
-rw-r--r--arch/arm/plat-spear/clock.c1005
-rw-r--r--arch/arm/plat-spear/include/plat/clock.h249
-rw-r--r--arch/arm/plat-spear/restart.c5
-rw-r--r--arch/arm/plat-spear/time.c39
-rw-r--r--arch/avr32/Kconfig7
-rw-r--r--arch/avr32/include/asm/kvm_para.h1
-rw-r--r--arch/blackfin/Kconfig11
-rw-r--r--arch/blackfin/include/asm/kvm_para.h1
-rw-r--r--arch/blackfin/kernel/trace.c32
-rw-r--r--arch/c6x/Kconfig8
-rw-r--r--arch/c6x/include/asm/kvm_para.h1
-rw-r--r--arch/cris/Kconfig8
-rw-r--r--arch/cris/arch-v10/drivers/ds1302.c515
-rw-r--r--arch/cris/arch-v10/drivers/pcf8563.c380
-rw-r--r--arch/cris/arch-v10/kernel/fasttimer.c2
-rw-r--r--arch/cris/arch-v10/kernel/kgdb.c2
-rw-r--r--arch/cris/arch-v10/kernel/time.c9
-rw-r--r--arch/cris/arch-v10/lib/Makefile3
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c6
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c2
-rw-r--r--arch/cris/arch-v32/kernel/time.c7
-rw-r--r--arch/cris/include/arch-v32/arch/cache.h2
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/posix_types.h2
-rw-r--r--arch/cris/include/asm/rtc.h107
-rw-r--r--arch/cris/kernel/time.c76
-rw-r--r--arch/cris/kernel/vmlinux.lds.S1
-rw-r--r--arch/cris/mm/fault.c31
-rw-r--r--arch/frv/include/asm/kvm_para.h1
-rw-r--r--arch/h8300/Kconfig.cpu2
-rw-r--r--arch/h8300/include/asm/kvm_para.h1
-rw-r--r--arch/hexagon/Kconfig16
-rw-r--r--arch/hexagon/include/asm/kvm_para.h1
-rw-r--r--arch/ia64/Kconfig9
-rw-r--r--arch/ia64/include/asm/gpio.h59
-rw-r--r--arch/ia64/include/asm/kvm_host.h3
-rw-r--r--arch/ia64/include/asm/kvm_para.h5
-rw-r--r--arch/ia64/kvm/kvm-ia64.c30
-rw-r--r--arch/m32r/Kconfig4
-rw-r--r--arch/m68k/Kconfig11
-rw-r--r--arch/m68k/Kconfig.cpu1
-rw-r--r--arch/m68k/include/asm/kvm_para.h1
-rw-r--r--arch/microblaze/Kconfig11
-rw-r--r--arch/microblaze/include/asm/gpio.h57
-rw-r--r--arch/microblaze/include/asm/kvm_para.h1
-rw-r--r--arch/microblaze/kernel/entry.S7
-rw-r--r--arch/microblaze/kernel/mcount.S2
-rw-r--r--arch/microblaze/kernel/process.c6
-rw-r--r--arch/microblaze/mm/fault.c33
-rw-r--r--arch/mips/Kconfig31
-rw-r--r--arch/mips/Makefile4
-rw-r--r--arch/mips/alchemy/devboards/db1200.c1
-rw-r--r--arch/mips/ath79/Kconfig25
-rw-r--r--arch/mips/ath79/Makefile2
-rw-r--r--arch/mips/ath79/clock.c81
-rw-r--r--arch/mips/ath79/common.c9
-rw-r--r--arch/mips/ath79/dev-common.c3
-rw-r--r--arch/mips/ath79/dev-gpio-buttons.c4
-rw-r--r--arch/mips/ath79/dev-leds-gpio.c4
-rw-r--r--arch/mips/ath79/dev-wmac.c30
-rw-r--r--arch/mips/ath79/early_printk.c3
-rw-r--r--arch/mips/ath79/gpio.c47
-rw-r--r--arch/mips/ath79/irq.c147
-rw-r--r--arch/mips/ath79/mach-db120.c134
-rw-r--r--arch/mips/ath79/mach-pb44.c2
-rw-r--r--arch/mips/ath79/mach-ubnt-xm.c43
-rw-r--r--arch/mips/ath79/machtypes.h1
-rw-r--r--arch/mips/ath79/pci.c130
-rw-r--r--arch/mips/ath79/pci.h34
-rw-r--r--arch/mips/ath79/setup.c45
-rw-r--r--arch/mips/bcm47xx/setup.c15
-rw-r--r--arch/mips/bcm47xx/sprom.c28
-rw-r--r--arch/mips/bcm63xx/boards/Makefile2
-rw-r--r--arch/mips/cavium-octeon/setup.c1
-rw-r--r--arch/mips/cavium-octeon/smp.c6
-rw-r--r--arch/mips/fw/arc/Makefile2
-rw-r--r--arch/mips/include/asm/clkdev.h25
-rw-r--r--arch/mips/include/asm/kvm_para.h1
-rw-r--r--arch/mips/include/asm/mach-ath79/ar71xx_regs.h91
-rw-r--r--arch/mips/include/asm/mach-ath79/ath79.h23
-rw-r--r--arch/mips/include/asm/mach-ath79/irq.h10
-rw-r--r--arch/mips/include/asm/mach-ath79/pci-ath724x.h21
-rw-r--r--arch/mips/include/asm/mach-ath79/pci.h28
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx.h9
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h1
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h23
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/irq.h18
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h67
-rw-r--r--arch/mips/include/asm/mach-lantiq/gpio.h16
-rw-r--r--arch/mips/include/asm/mach-lantiq/lantiq.h34
-rw-r--r--arch/mips/include/asm/mach-lantiq/lantiq_platform.h33
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h44
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h136
-rw-r--r--arch/mips/include/asm/mips-boards/generic.h4
-rw-r--r--arch/mips/include/asm/module.h1
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pcieep-defs.h1365
-rw-r--r--arch/mips/include/asm/pci.h6
-rw-r--r--arch/mips/include/asm/prom.h26
-rw-r--r--arch/mips/include/asm/setup.h3
-rw-r--r--arch/mips/include/asm/sparsemem.h6
-rw-r--r--arch/mips/include/asm/termios.h2
-rw-r--r--arch/mips/include/asm/traps.h1
-rw-r--r--arch/mips/include/asm/uasm.h2
-rw-r--r--arch/mips/jz4740/Makefile2
-rw-r--r--arch/mips/kernel/cpu-probe.c54
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c3
-rw-r--r--arch/mips/kernel/proc.c26
-rw-r--r--arch/mips/kernel/prom.c13
-rw-r--r--arch/mips/kernel/setup.c2
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/kernel/traps.c17
-rw-r--r--arch/mips/lantiq/Kconfig16
-rw-r--r--arch/mips/lantiq/Makefile5
-rw-r--r--arch/mips/lantiq/Platform1
-rw-r--r--arch/mips/lantiq/clk.c146
-rw-r--r--arch/mips/lantiq/clk.h68
-rw-r--r--arch/mips/lantiq/devices.c120
-rw-r--r--arch/mips/lantiq/devices.h23
-rw-r--r--arch/mips/lantiq/dts/Makefile4
-rw-r--r--arch/mips/lantiq/dts/danube.dtsi105
-rw-r--r--arch/mips/lantiq/dts/easy50712.dts113
-rw-r--r--arch/mips/lantiq/early_printk.c17
-rw-r--r--arch/mips/lantiq/falcon/Makefile1
-rw-r--r--arch/mips/lantiq/falcon/prom.c87
-rw-r--r--arch/mips/lantiq/falcon/reset.c90
-rw-r--r--arch/mips/lantiq/falcon/sysctrl.c260
-rw-r--r--arch/mips/lantiq/irq.c255
-rw-r--r--arch/mips/lantiq/machtypes.h20
-rw-r--r--arch/mips/lantiq/prom.c74
-rw-r--r--arch/mips/lantiq/prom.h8
-rw-r--r--arch/mips/lantiq/setup.c66
-rw-r--r--arch/mips/lantiq/xway/Kconfig23
-rw-r--r--arch/mips/lantiq/xway/Makefile8
-rw-r--r--arch/mips/lantiq/xway/clk-ase.c48
-rw-r--r--arch/mips/lantiq/xway/clk-xway.c223
-rw-r--r--arch/mips/lantiq/xway/clk.c151
-rw-r--r--arch/mips/lantiq/xway/devices.c119
-rw-r--r--arch/mips/lantiq/xway/devices.h20
-rw-r--r--arch/mips/lantiq/xway/dma.c61
-rw-r--r--arch/mips/lantiq/xway/ebu.c52
-rw-r--r--arch/mips/lantiq/xway/gpio.c12
-rw-r--r--arch/mips/lantiq/xway/gpio_ebu.c126
-rw-r--r--arch/mips/lantiq/xway/gpio_stp.c157
-rw-r--r--arch/mips/lantiq/xway/mach-easy50601.c57
-rw-r--r--arch/mips/lantiq/xway/mach-easy50712.c74
-rw-r--r--arch/mips/lantiq/xway/pmu.c69
-rw-r--r--arch/mips/lantiq/xway/prom-ase.c39
-rw-r--r--arch/mips/lantiq/xway/prom-xway.c54
-rw-r--r--arch/mips/lantiq/xway/prom.c115
-rw-r--r--arch/mips/lantiq/xway/reset.c77
-rw-r--r--arch/mips/lantiq/xway/setup-ase.c19
-rw-r--r--arch/mips/lantiq/xway/setup-xway.c20
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c371
-rw-r--r--arch/mips/mm/c-octeon.c14
-rw-r--r--arch/mips/mm/c-r4k.c14
-rw-r--r--arch/mips/oprofile/Makefile2
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c12
-rw-r--r--arch/mips/pci/Makefile6
-rw-r--r--arch/mips/pci/fixup-lantiq.c40
-rw-r--r--arch/mips/pci/ops-loongson2.c1
-rw-r--r--arch/mips/pci/pci-ar71xx.c375
-rw-r--r--arch/mips/pci/pci-ar724x.c292
-rw-r--r--arch/mips/pci/pci-ath724x.c174
-rw-r--r--arch/mips/pci/pci-lantiq.c219
-rw-r--r--arch/mips/pci/pci.c55
-rw-r--r--arch/mips/pmc-sierra/yosemite/Makefile2
-rw-r--r--arch/mips/pmc-sierra/yosemite/setup.c1
-rw-r--r--arch/mips/powertv/Makefile2
-rw-r--r--arch/mips/powertv/asic/Makefile2
-rw-r--r--arch/mips/powertv/pci/Makefile2
-rw-r--r--arch/mips/rb532/devices.c1
-rw-r--r--arch/mips/sni/setup.c1
-rw-r--r--arch/mn10300/Kconfig8
-rw-r--r--arch/mn10300/include/asm/kvm_para.h1
-rw-r--r--arch/openrisc/Kconfig7
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/gpio.h69
-rw-r--r--arch/openrisc/include/asm/kvm_para.h1
-rw-r--r--arch/openrisc/include/asm/uaccess.h40
-rw-r--r--arch/openrisc/lib/string.S99
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/kvm_para.h1
-rw-r--r--arch/parisc/include/asm/smp.h2
-rw-r--r--arch/parisc/include/asm/uaccess.h5
-rw-r--r--arch/parisc/kernel/entry.S30
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c1
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S6
-rw-r--r--arch/parisc/lib/lusercopy.S41
-rw-r--r--arch/powerpc/Kconfig15
-rw-r--r--arch/powerpc/boot/dts/mpc8569mds.dts1
-rw-r--r--arch/powerpc/include/asm/cputable.h23
-rw-r--r--arch/powerpc/include/asm/dbell.h3
-rw-r--r--arch/powerpc/include/asm/gpio.h57
-rw-r--r--arch/powerpc/include/asm/hvcall.h10
-rw-r--r--arch/powerpc/include/asm/hw_irq.h1
-rw-r--r--arch/powerpc/include/asm/kvm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h18
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h3
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h8
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h3
-rw-r--r--arch/powerpc/include/asm/kvm_booke_hv_asm.h49
-rw-r--r--arch/powerpc/include/asm/kvm_e500.h96
-rw-r--r--arch/powerpc/include/asm/kvm_host.h60
-rw-r--r--arch/powerpc/include/asm/kvm_para.h5
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h20
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h6
-rw-r--r--arch/powerpc/include/asm/processor.h3
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/reg_booke.h34
-rw-r--r--arch/powerpc/include/asm/switch_to.h1
-rw-r--r--arch/powerpc/include/asm/time.h1
-rw-r--r--arch/powerpc/include/asm/uaccess.h41
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h41
-rw-r--r--arch/powerpc/kernel/asm-offsets.c19
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S1
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S12
-rw-r--r--arch/powerpc/kernel/head_44x.S23
-rw-r--r--arch/powerpc/kernel/head_booke.h69
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S98
-rw-r--r--arch/powerpc/kernel/idle_power7.S7
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c6
-rw-r--r--arch/powerpc/kernel/time.c3
-rw-r--r--arch/powerpc/kvm/44x.c12
-rw-r--r--arch/powerpc/kvm/44x_emulate.c51
-rw-r--r--arch/powerpc/kvm/Kconfig28
-rw-r--r--arch/powerpc/kvm/Makefile17
-rw-r--r--arch/powerpc/kvm/book3s.c7
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c31
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S2
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c150
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c3
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c106
-rw-r--r--arch/powerpc/kvm/book3s_hv.c467
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S9
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S185
-rw-r--r--arch/powerpc/kvm/book3s_pr.c59
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c101
-rw-r--r--arch/powerpc/kvm/book3s_segment.S13
-rw-r--r--arch/powerpc/kvm/booke.c471
-rw-r--r--arch/powerpc/kvm/booke.h62
-rw-r--r--arch/powerpc/kvm/booke_emulate.c118
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S8
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S597
-rw-r--r--arch/powerpc/kvm/e500.c372
-rw-r--r--arch/powerpc/kvm/e500.h306
-rw-r--r--arch/powerpc/kvm/e500_emulate.c210
-rw-r--r--arch/powerpc/kvm/e500_tlb.c666
-rw-r--r--arch/powerpc/kvm/e500_tlb.h174
-rw-r--r--arch/powerpc/kvm/e500mc.c342
-rw-r--r--arch/powerpc/kvm/emulate.c197
-rw-r--r--arch/powerpc/kvm/powerpc.c94
-rw-r--r--arch/powerpc/kvm/timing.h6
-rw-r--r--arch/powerpc/lib/string.S45
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c11
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/s390/Kconfig15
-rw-r--r--arch/s390/hypfs/inode.c2
-rw-r--r--arch/s390/include/asm/bitops.h21
-rw-r--r--arch/s390/include/asm/cio.h4
-rw-r--r--arch/s390/include/asm/cmpxchg.h54
-rw-r--r--arch/s390/include/asm/cputime.h10
-rw-r--r--arch/s390/include/asm/ctl_reg.h6
-rw-r--r--arch/s390/include/asm/current.h3
-rw-r--r--arch/s390/include/asm/elf.h12
-rw-r--r--arch/s390/include/asm/futex.h3
-rw-r--r--arch/s390/include/asm/idals.h10
-rw-r--r--arch/s390/include/asm/io.h4
-rw-r--r--arch/s390/include/asm/irq.h3
-rw-r--r--arch/s390/include/asm/kexec.h4
-rw-r--r--arch/s390/include/asm/kmap_types.h2
-rw-r--r--arch/s390/include/asm/kvm.h5
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/kvm_para.h5
-rw-r--r--arch/s390/include/asm/mmu_context.h2
-rw-r--r--arch/s390/include/asm/module.h2
-rw-r--r--arch/s390/include/asm/os_info.h5
-rw-r--r--arch/s390/include/asm/percpu.h2
-rw-r--r--arch/s390/include/asm/pgalloc.h6
-rw-r--r--arch/s390/include/asm/pgtable.h44
-rw-r--r--arch/s390/include/asm/processor.h39
-rw-r--r--arch/s390/include/asm/rwsem.h63
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/include/asm/setup.h18
-rw-r--r--arch/s390/include/asm/sfp-util.h2
-rw-r--r--arch/s390/include/asm/string.h4
-rw-r--r--arch/s390/include/asm/thread_info.h10
-rw-r--r--arch/s390/include/asm/timer.h4
-rw-r--r--arch/s390/include/asm/tlb.h4
-rw-r--r--arch/s390/include/asm/tlbflush.h4
-rw-r--r--arch/s390/include/asm/types.h4
-rw-r--r--arch/s390/include/asm/uaccess.h15
-rw-r--r--arch/s390/include/asm/vdso.h5
-rw-r--r--arch/s390/kernel/base.S12
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/head_kdump.S7
-rw-r--r--arch/s390/kernel/ipl.c16
-rw-r--r--arch/s390/kernel/irq.c3
-rw-r--r--arch/s390/kernel/machine_kexec.c11
-rw-r--r--arch/s390/kernel/os_info.c3
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kernel/setup.c12
-rw-r--r--arch/s390/kernel/smp.c45
-rw-r--r--arch/s390/kernel/sysinfo.c21
-rw-r--r--arch/s390/kvm/diag.c29
-rw-r--r--arch/s390/kvm/intercept.c1
-rw-r--r--arch/s390/kvm/kvm-s390.c87
-rw-r--r--arch/s390/kvm/kvm-s390.h1
-rw-r--r--arch/s390/kvm/priv.c31
-rw-r--r--arch/s390/lib/uaccess_mvcos.c2
-rw-r--r--arch/s390/lib/uaccess_std.c2
-rw-r--r--arch/s390/mm/maccess.c38
-rw-r--r--arch/s390/mm/vmem.c2
-rw-r--r--arch/s390/oprofile/hwsampler.c2
-rw-r--r--arch/score/Kconfig5
-rw-r--r--arch/score/include/asm/kvm_para.h1
-rw-r--r--arch/sh/Kconfig15
-rw-r--r--arch/sh/include/asm/kvm_para.h1
-rw-r--r--arch/sh/kernel/smp.c7
-rw-r--r--arch/sparc/Kconfig14
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/asi.h4
-rw-r--r--arch/sparc/include/asm/asmmacro.h22
-rw-r--r--arch/sparc/include/asm/dma-mapping.h9
-rw-r--r--arch/sparc/include/asm/gpio.h40
-rw-r--r--arch/sparc/include/asm/kvm_para.h1
-rw-r--r--arch/sparc/include/asm/leon.h82
-rw-r--r--arch/sparc/include/asm/leon_amba.h4
-rw-r--r--arch/sparc/include/asm/pgtsrmmu.h86
-rw-r--r--arch/sparc/include/asm/processor_64.h4
-rw-r--r--arch/sparc/include/asm/psr.h8
-rw-r--r--arch/sparc/include/asm/sections.h3
-rw-r--r--arch/sparc/include/asm/thread_info_32.h11
-rw-r--r--arch/sparc/include/asm/uaccess.h6
-rw-r--r--arch/sparc/include/asm/uaccess_32.h32
-rw-r--r--arch/sparc/include/asm/uaccess_64.h12
-rw-r--r--arch/sparc/kernel/Makefile4
-rw-r--r--arch/sparc/kernel/cpu.c18
-rw-r--r--arch/sparc/kernel/entry.S10
-rw-r--r--arch/sparc/kernel/etrap_32.S18
-rw-r--r--arch/sparc/kernel/head_32.S168
-rw-r--r--arch/sparc/kernel/ioport.c24
-rw-r--r--arch/sparc/kernel/irq_32.c22
-rw-r--r--arch/sparc/kernel/kernel.h3
-rw-r--r--arch/sparc/kernel/leon_kernel.c1
-rw-r--r--arch/sparc/kernel/leon_pmc.c15
-rw-r--r--arch/sparc/kernel/leon_smp.c8
-rw-r--r--arch/sparc/kernel/process_32.c35
-rw-r--r--arch/sparc/kernel/prom_common.c1
-rw-r--r--arch/sparc/kernel/rtrap_32.S18
-rw-r--r--arch/sparc/kernel/setup_32.c62
-rw-r--r--arch/sparc/kernel/trampoline_32.S6
-rw-r--r--arch/sparc/kernel/traps_64.c12
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S5
-rw-r--r--arch/sparc/kernel/wof.S18
-rw-r--r--arch/sparc/kernel/wuf.S27
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/lib/ksyms.c5
-rw-r--r--arch/sparc/lib/strlen_user_32.S109
-rw-r--r--arch/sparc/lib/strlen_user_64.S97
-rw-r--r--arch/sparc/lib/strncpy_from_user_32.S47
-rw-r--r--arch/sparc/lib/strncpy_from_user_64.S133
-rw-r--r--arch/sparc/lib/usercopy.c1
-rw-r--r--arch/sparc/math-emu/math_64.c20
-rw-r--r--arch/sparc/mm/Makefile3
-rw-r--r--arch/sparc/mm/leon_mm.c2
-rw-r--r--arch/sparc/mm/srmmu.c52
-rw-r--r--arch/sparc/mm/srmmu_access.S82
-rw-r--r--arch/tile/Kconfig43
-rw-r--r--arch/tile/Makefile7
-rw-r--r--arch/tile/include/arch/spr_def_32.h56
-rw-r--r--arch/tile/include/arch/spr_def_64.h43
-rw-r--r--arch/tile/include/asm/Kbuild2
-rw-r--r--arch/tile/include/asm/atomic_32.h10
-rw-r--r--arch/tile/include/asm/bitops.h12
-rw-r--r--arch/tile/include/asm/byteorder.h20
-rw-r--r--arch/tile/include/asm/cachectl.h42
-rw-r--r--arch/tile/include/asm/compat.h3
-rw-r--r--arch/tile/include/asm/elf.h5
-rw-r--r--arch/tile/include/asm/futex.h143
-rw-r--r--arch/tile/include/asm/hardwall.h18
-rw-r--r--arch/tile/include/asm/hugetlb.h21
-rw-r--r--arch/tile/include/asm/irqflags.h34
-rw-r--r--arch/tile/include/asm/kexec.h12
-rw-r--r--arch/tile/include/asm/kvm_para.h1
-rw-r--r--arch/tile/include/asm/mmu.h2
-rw-r--r--arch/tile/include/asm/mmu_context.h8
-rw-r--r--arch/tile/include/asm/module.h40
-rw-r--r--arch/tile/include/asm/page.h18
-rw-r--r--arch/tile/include/asm/pgalloc.h92
-rw-r--r--arch/tile/include/asm/pgtable.h111
-rw-r--r--arch/tile/include/asm/pgtable_32.h40
-rw-r--r--arch/tile/include/asm/pgtable_64.h57
-rw-r--r--arch/tile/include/asm/processor.h17
-rw-r--r--arch/tile/include/asm/setup.h10
-rw-r--r--arch/tile/include/asm/syscalls.h3
-rw-r--r--arch/tile/include/asm/tlbflush.h17
-rw-r--r--arch/tile/include/asm/uaccess.h222
-rw-r--r--arch/tile/include/asm/unistd.h4
-rw-r--r--arch/tile/include/hv/drv_xgbe_intf.h2
-rw-r--r--arch/tile/include/hv/hypervisor.h325
-rw-r--r--arch/tile/kernel/Makefile3
-rw-r--r--arch/tile/kernel/entry.S3
-rw-r--r--arch/tile/kernel/hardwall.c754
-rw-r--r--arch/tile/kernel/head_32.S8
-rw-r--r--arch/tile/kernel/head_64.S22
-rw-r--r--arch/tile/kernel/hvglue.lds3
-rw-r--r--arch/tile/kernel/intvec_64.S80
-rw-r--r--arch/tile/kernel/machine_kexec.c42
-rw-r--r--arch/tile/kernel/module.c12
-rw-r--r--arch/tile/kernel/proc.c1
-rw-r--r--arch/tile/kernel/process.c16
-rw-r--r--arch/tile/kernel/relocate_kernel_32.S (renamed from arch/tile/kernel/relocate_kernel.S)0
-rw-r--r--arch/tile/kernel/relocate_kernel_64.S260
-rw-r--r--arch/tile/kernel/setup.c169
-rw-r--r--arch/tile/kernel/single_step.c16
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/sys.c10
-rw-r--r--arch/tile/kernel/sysfs.c8
-rw-r--r--arch/tile/kernel/tlb.c11
-rw-r--r--arch/tile/kernel/traps.c30
-rw-r--r--arch/tile/lib/atomic_32.c47
-rw-r--r--arch/tile/lib/exports.c8
-rw-r--r--arch/tile/lib/memchr_64.c8
-rw-r--r--arch/tile/lib/memcpy_64.c23
-rw-r--r--arch/tile/lib/memcpy_tile64.c8
-rw-r--r--arch/tile/lib/strchr_64.c15
-rw-r--r--arch/tile/lib/string-endian.h33
-rw-r--r--arch/tile/lib/strlen_64.c11
-rw-r--r--arch/tile/lib/usercopy_32.S76
-rw-r--r--arch/tile/lib/usercopy_64.S49
-rw-r--r--arch/tile/mm/fault.c34
-rw-r--r--arch/tile/mm/homecache.c1
-rw-r--r--arch/tile/mm/hugetlbpage.c285
-rw-r--r--arch/tile/mm/init.c19
-rw-r--r--arch/tile/mm/migrate.h6
-rw-r--r--arch/tile/mm/migrate_32.S36
-rw-r--r--arch/tile/mm/migrate_64.S34
-rw-r--r--arch/tile/mm/pgtable.c40
-rw-r--r--arch/um/Kconfig.common5
-rw-r--r--arch/um/Kconfig.um1
-rw-r--r--arch/um/Makefile11
-rw-r--r--arch/um/include/asm/kvm_para.h1
-rw-r--r--arch/um/kernel/reboot.c13
-rw-r--r--arch/um/kernel/trap.c24
-rw-r--r--arch/unicore32/Kconfig6
-rw-r--r--arch/unicore32/include/asm/kvm_para.h1
-rw-r--r--arch/x86/Kbuild2
-rw-r--r--arch/x86/Kconfig41
-rw-r--r--arch/x86/include/asm/acpi.h9
-rw-r--r--arch/x86/include/asm/bitops.h2
-rw-r--r--arch/x86/include/asm/dma-contiguous.h13
-rw-r--r--arch/x86/include/asm/dma-mapping.h5
-rw-r--r--arch/x86/include/asm/gpio.h57
-rw-r--r--arch/x86/include/asm/kvm_emulate.h4
-rw-r--r--arch/x86/include/asm/kvm_host.h13
-rw-r--r--arch/x86/include/asm/kvm_para.h24
-rw-r--r--arch/x86/include/asm/pgtable-3level.h50
-rw-r--r--arch/x86/include/asm/processor.h7
-rw-r--r--arch/x86/include/asm/pvclock-abi.h1
-rw-r--r--arch/x86/include/asm/realmode.h62
-rw-r--r--arch/x86/include/asm/sta2x11.h12
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/trampoline.h39
-rw-r--r--arch/x86/include/asm/uaccess.h4
-rw-r--r--arch/x86/include/asm/uaccess_32.h17
-rw-r--r--arch/x86/include/asm/uaccess_64.h3
-rw-r--r--arch/x86/include/asm/uprobes.h57
-rw-r--r--arch/x86/include/asm/vga.h6
-rw-r--r--arch/x86/include/asm/word-at-a-time.h34
-rw-r--r--arch/x86/include/asm/xen/events.h1
-rw-r--r--arch/x86/include/asm/xen/page.h1
-rw-r--r--arch/x86/kernel/Makefile3
-rw-r--r--arch/x86/kernel/acpi/Makefile9
-rw-r--r--arch/x86/kernel/acpi/realmode/.gitignore3
-rw-r--r--arch/x86/kernel/acpi/realmode/Makefile59
-rw-r--r--arch/x86/kernel/acpi/realmode/bioscall.S1
-rw-r--r--arch/x86/kernel/acpi/realmode/copy.S1
-rw-r--r--arch/x86/kernel/acpi/realmode/regs.c1
-rw-r--r--arch/x86/kernel/acpi/realmode/video-bios.c1
-rw-r--r--arch/x86/kernel/acpi/realmode/video-mode.c1
-rw-r--r--arch/x86/kernel/acpi/realmode/video-vesa.c1
-rw-r--r--arch/x86/kernel/acpi/realmode/video-vga.c1
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.lds.S62
-rw-r--r--arch/x86/kernel/acpi/sleep.c33
-rw-r--r--arch/x86/kernel/acpi/sleep.h2
-rw-r--r--arch/x86/kernel/acpi/wakeup_rm.S12
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-apei.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c26
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c61
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c2
-rw-r--r--arch/x86/kernel/e820.c53
-rw-r--r--arch/x86/kernel/head32.c1
-rw-r--r--arch/x86/kernel/head64.c1
-rw-r--r--arch/x86/kernel/head_32.S5
-rw-r--r--arch/x86/kernel/head_64.S4
-rw-r--r--arch/x86/kernel/hpet.c66
-rw-r--r--arch/x86/kernel/kvmclock.c20
-rw-r--r--arch/x86/kernel/mpparse.c11
-rw-r--r--arch/x86/kernel/pci-dma.c18
-rw-r--r--arch/x86/kernel/pci-nommu.c8
-rw-r--r--arch/x86/kernel/reboot.c25
-rw-r--r--arch/x86/kernel/setup.c24
-rw-r--r--arch/x86/kernel/signal.c6
-rw-r--r--arch/x86/kernel/smpboot.c18
-rw-r--r--arch/x86/kernel/tboot.c7
-rw-r--r--arch/x86/kernel/trampoline.c42
-rw-r--r--arch/x86/kernel/trampoline_32.S83
-rw-r--r--arch/x86/kernel/uprobes.c674
-rw-r--r--arch/x86/kernel/vmlinux.lds.S12
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/cpuid.c5
-rw-r--r--arch/x86/kvm/emulate.c293
-rw-r--r--arch/x86/kvm/i8254.c31
-rw-r--r--arch/x86/kvm/i8254.h7
-rw-r--r--arch/x86/kvm/lapic.c31
-rw-r--r--arch/x86/kvm/mmu.c348
-rw-r--r--arch/x86/kvm/mmu_audit.c10
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/svm.c9
-rw-r--r--arch/x86/kvm/vmx.c41
-rw-r--r--arch/x86/kvm/x86.c280
-rw-r--r--arch/x86/kvm/x86.h2
-rw-r--r--arch/x86/lib/usercopy.c97
-rw-r--r--arch/x86/lib/usercopy_32.c41
-rw-r--r--arch/x86/lib/usercopy_64.c48
-rw-r--r--arch/x86/mm/init.c16
-rw-r--r--arch/x86/mm/numa.c32
-rw-r--r--arch/x86/mm/numa_emulation.c4
-rw-r--r--arch/x86/mm/pat.c98
-rw-r--r--arch/x86/mm/srat.c5
-rw-r--r--arch/x86/pci/fixup.c3
-rw-r--r--arch/x86/pci/xen.c4
-rw-r--r--arch/x86/realmode/Makefile18
-rw-r--r--arch/x86/realmode/init.c115
-rw-r--r--arch/x86/realmode/rm/.gitignore3
-rw-r--r--arch/x86/realmode/rm/Makefile82
-rw-r--r--arch/x86/realmode/rm/bioscall.S1
-rw-r--r--arch/x86/realmode/rm/copy.S1
-rw-r--r--arch/x86/realmode/rm/header.S41
-rw-r--r--arch/x86/realmode/rm/realmode.h21
-rw-r--r--arch/x86/realmode/rm/realmode.lds.S76
-rw-r--r--arch/x86/realmode/rm/reboot_32.S (renamed from arch/x86/kernel/reboot_32.S)89
-rw-r--r--arch/x86/realmode/rm/regs.c1
-rw-r--r--arch/x86/realmode/rm/stack.S19
-rw-r--r--arch/x86/realmode/rm/trampoline_32.S74
-rw-r--r--arch/x86/realmode/rm/trampoline_64.S (renamed from arch/x86/kernel/trampoline_64.S)148
-rw-r--r--arch/x86/realmode/rm/trampoline_common.S7
-rw-r--r--arch/x86/realmode/rm/video-bios.c1
-rw-r--r--arch/x86/realmode/rm/video-mode.c1
-rw-r--r--arch/x86/realmode/rm/video-vesa.c1
-rw-r--r--arch/x86/realmode/rm/video-vga.c1
-rw-r--r--arch/x86/realmode/rm/wakemain.c (renamed from arch/x86/kernel/acpi/realmode/wakemain.c)3
-rw-r--r--arch/x86/realmode/rm/wakeup.h (renamed from arch/x86/kernel/acpi/realmode/wakeup.h)10
-rw-r--r--arch/x86/realmode/rm/wakeup_asm.S (renamed from arch/x86/kernel/acpi/realmode/wakeup.S)131
-rw-r--r--arch/x86/realmode/rmpiggy.S20
-rw-r--r--arch/x86/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/tools/relocs.c19
-rw-r--r--arch/x86/video/fbdev.c20
-rw-r--r--arch/x86/xen/debugfs.c104
-rw-r--r--arch/x86/xen/debugfs.h4
-rw-r--r--arch/x86/xen/enlighten.c16
-rw-r--r--arch/x86/xen/mmu.c23
-rw-r--r--arch/x86/xen/p2m.c104
-rw-r--r--arch/x86/xen/setup.c171
-rw-r--r--arch/x86/xen/smp.c112
-rw-r--r--arch/x86/xen/smp.h12
-rw-r--r--arch/x86/xen/spinlock.c12
-rw-r--r--arch/x86/xen/xen-ops.h1
-rw-r--r--arch/xtensa/include/asm/gpio.h60
-rw-r--r--arch/xtensa/include/asm/kvm_para.h1
-rw-r--r--block/Kconfig.iosched4
-rw-r--r--block/blk-cgroup.c2100
-rw-r--r--block/blk-cgroup.h647
-rw-r--r--block/blk-core.c281
-rw-r--r--block/blk-ioc.c130
-rw-r--r--block/blk-sysfs.c6
-rw-r--r--block/blk-throttle.c697
-rw-r--r--block/blk.h32
-rw-r--r--block/cfq-iosched.c1072
-rw-r--r--block/cfq.h115
-rw-r--r--block/deadline-iosched.c8
-rw-r--r--block/elevator.c121
-rw-r--r--block/noop-iosched.c8
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/bgrt.c1
-rw-r--r--drivers/acpi/sleep.c6
-rw-r--r--drivers/amba/Makefile4
-rw-r--r--drivers/amba/tegra-ahb.c293
-rw-r--r--drivers/ata/sata_mv.c40
-rw-r--r--drivers/atm/solos-pci.c4
-rw-r--r--drivers/base/Kconfig89
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/dma-buf.c99
-rw-r--r--drivers/base/dma-coherent.c42
-rw-r--r--drivers/base/dma-contiguous.c401
-rw-r--r--drivers/base/node.c8
-rw-r--r--drivers/base/regmap/regmap-i2c.c2
-rw-r--r--drivers/bcma/core.c3
-rw-r--r--drivers/bcma/driver_pci.c53
-rw-r--r--drivers/bcma/driver_pci_host.c10
-rw-r--r--drivers/bcma/host_pci.c7
-rw-r--r--drivers/bcma/scan.c54
-rw-r--r--drivers/bcma/sprom.c149
-rw-r--r--drivers/block/drbd/drbd_actlog.c104
-rw-r--r--drivers/block/drbd/drbd_bitmap.c146
-rw-r--r--drivers/block/drbd/drbd_int.h90
-rw-r--r--drivers/block/drbd/drbd_main.c357
-rw-r--r--drivers/block/drbd/drbd_nl.c48
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c95
-rw-r--r--drivers/block/drbd/drbd_req.c132
-rw-r--r--drivers/block/drbd/drbd_req.h19
-rw-r--r--drivers/block/drbd/drbd_worker.c31
-rw-r--r--drivers/block/floppy.c161
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c276
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h48
-rw-r--r--drivers/block/rbd.c72
-rw-r--r--drivers/block/xen-blkfront.c44
-rw-r--r--drivers/bluetooth/ath3k.c6
-rw-r--r--drivers/bluetooth/btmrvl_drv.h3
-rw-r--r--drivers/bluetooth/btmrvl_main.c56
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c112
-rw-r--r--drivers/bluetooth/btusb.c16
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_vhci.c3
-rw-r--r--drivers/char/agp/generic.c4
-rw-r--r--drivers/char/agp/intel-agp.c5
-rw-r--r--drivers/char/agp/intel-agp.h14
-rw-r--r--drivers/char/agp/intel-gtt.c45
-rw-r--r--drivers/char/agp/sgi-agp.c1
-rw-r--r--drivers/clk/Kconfig12
-rw-r--r--drivers/clk/Makefile5
-rw-r--r--drivers/clk/clk-divider.c68
-rw-r--r--drivers/clk/clk-fixed-factor.c95
-rw-r--r--drivers/clk/clk-fixed-rate.c49
-rw-r--r--drivers/clk/clk-gate.c104
-rw-r--r--drivers/clk/clk-mux.c27
-rw-r--r--drivers/clk/clk.c279
-rw-r--r--drivers/clk/mxs/Makefile8
-rw-r--r--drivers/clk/mxs/clk-div.c110
-rw-r--r--drivers/clk/mxs/clk-frac.c139
-rw-r--r--drivers/clk/mxs/clk-imx23.c205
-rw-r--r--drivers/clk/mxs/clk-imx28.c338
-rw-r--r--drivers/clk/mxs/clk-pll.c116
-rw-r--r--drivers/clk/mxs/clk-ref.c154
-rw-r--r--drivers/clk/mxs/clk.c28
-rw-r--r--drivers/clk/mxs/clk.h66
-rw-r--r--drivers/clk/spear/Makefile10
-rw-r--r--drivers/clk/spear/clk-aux-synth.c198
-rw-r--r--drivers/clk/spear/clk-frac-synth.c165
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c154
-rw-r--r--drivers/clk/spear/clk-vco-pll.c363
-rw-r--r--drivers/clk/spear/clk.c36
-rw-r--r--drivers/clk/spear/clk.h134
-rw-r--r--drivers/clk/spear/spear1310_clock.c1106
-rw-r--r--drivers/clk/spear/spear1340_clock.c964
-rw-r--r--drivers/clk/spear/spear3xx_clock.c612
-rw-r--r--drivers/clk/spear/spear6xx_clock.c342
-rw-r--r--drivers/crypto/mv_cesa.c14
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/amba-pl08x.c52
-rw-r--r--drivers/dma/at_hdmac.c15
-rw-r--r--drivers/dma/at_hdmac_regs.h21
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/coh901318_lli.c4
-rw-r--r--drivers/dma/dw_dmac.c26
-rw-r--r--drivers/dma/ep93xx_dma.c117
-rw-r--r--drivers/dma/imx-dma.c12
-rw-r--r--drivers/dma/imx-sdma.c108
-rw-r--r--drivers/dma/intel_mid_dma.c8
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/mv_xor.c15
-rw-r--r--drivers/dma/mv_xor.h1
-rw-r--r--drivers/dma/mxs-dma.c194
-rw-r--r--drivers/dma/pch_dma.c2
-rw-r--r--drivers/dma/pl330.c1
-rw-r--r--drivers/dma/ste_dma40.c2
-rw-r--r--drivers/edac/amd64_edac.c200
-rw-r--r--drivers/edac/amd76x_edac.c42
-rw-r--r--drivers/edac/cell_edac.c42
-rw-r--r--drivers/edac/cpc925_edac.c91
-rw-r--r--drivers/edac/e752x_edac.c116
-rw-r--r--drivers/edac/e7xxx_edac.c86
-rw-r--r--drivers/edac/edac_core.h47
-rw-r--r--drivers/edac/edac_device.c27
-rw-r--r--drivers/edac/edac_mc.c716
-rw-r--r--drivers/edac/edac_mc_sysfs.c70
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/edac/edac_pci.c6
-rw-r--r--drivers/edac/i3000_edac.c49
-rw-r--r--drivers/edac/i3200_edac.c56
-rw-r--r--drivers/edac/i5000_edac.c236
-rw-r--r--drivers/edac/i5100_edac.c106
-rw-r--r--drivers/edac/i5400_edac.c265
-rw-r--r--drivers/edac/i7300_edac.c115
-rw-r--r--drivers/edac/i7core_edac.c270
-rw-r--r--drivers/edac/i82443bxgx_edac.c41
-rw-r--r--drivers/edac/i82860_edac.c55
-rw-r--r--drivers/edac/i82875p_edac.c51
-rw-r--r--drivers/edac/i82975x_edac.c58
-rw-r--r--drivers/edac/mce_amd.h2
-rw-r--r--drivers/edac/mpc85xx_edac.c37
-rw-r--r--drivers/edac/mv64x60_edac.c47
-rw-r--r--drivers/edac/pasemi_edac.c49
-rw-r--r--drivers/edac/ppc4xx_edac.c50
-rw-r--r--drivers/edac/r82600_edac.c40
-rw-r--r--drivers/edac/sb_edac.c212
-rw-r--r--drivers/edac/tile_edac.c33
-rw-r--r--drivers/edac/x38_edac.c52
-rw-r--r--drivers/firewire/core-card.c4
-rw-r--r--drivers/firewire/core-cdev.c51
-rw-r--r--drivers/firewire/core-device.c116
-rw-r--r--drivers/firewire/core-iso.c80
-rw-r--r--drivers/firewire/core-transaction.c26
-rw-r--r--drivers/firewire/core.h7
-rw-r--r--drivers/firewire/nosy.c20
-rw-r--r--drivers/firewire/ohci.c42
-rw-r--r--drivers/firewire/sbp2.c28
-rw-r--r--drivers/gpio/Kconfig87
-rw-r--r--drivers/gpio/Makefile7
-rw-r--r--drivers/gpio/devres.c29
-rw-r--r--drivers/gpio/gpio-bt8xx.c12
-rw-r--r--drivers/gpio/gpio-ep93xx.c2
-rw-r--r--drivers/gpio/gpio-generic.c16
-rw-r--r--drivers/gpio/gpio-ich.c419
-rw-r--r--drivers/gpio/gpio-langwell.c91
-rw-r--r--drivers/gpio/gpio-lpc32xx.c52
-rw-r--r--drivers/gpio/gpio-mcp23s08.c2
-rw-r--r--drivers/gpio/gpio-ml-ioh.c12
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c158
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c3
-rw-r--r--drivers/gpio/gpio-msic.c339
-rw-r--r--drivers/gpio/gpio-mxc.c2
-rw-r--r--drivers/gpio/gpio-mxs.c156
-rw-r--r--drivers/gpio/gpio-omap.c106
-rw-r--r--drivers/gpio/gpio-pca953x.c43
-rw-r--r--drivers/gpio/gpio-pch.c12
-rw-r--r--drivers/gpio/gpio-rc5t583.c180
-rw-r--r--drivers/gpio/gpio-samsung.c416
-rw-r--r--drivers/gpio/gpio-sch.c8
-rw-r--r--drivers/gpio/gpio-sodaville.c14
-rw-r--r--drivers/gpio/gpio-sta2x11.c435
-rw-r--r--drivers/gpio/gpio-stp-xway.c301
-rw-r--r--drivers/gpio/gpio-tps65910.c188
-rw-r--r--drivers/gpio/gpio-wm831x.c6
-rw-r--r--drivers/gpio/gpiolib-of.c (renamed from drivers/of/gpio.c)80
-rw-r--r--drivers/gpio/gpiolib.c18
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/ast/Kconfig16
-rw-r--r--drivers/gpu/drm/ast/Makefile9
-rw-r--r--drivers/gpu/drm/ast/ast_dram_tables.h144
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c244
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h356
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c341
-rw-r--r--drivers/gpu/drm/ast/ast_main.c527
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c1160
-rw-r--r--drivers/gpu/drm/ast/ast_post.c1780
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h265
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c453
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig12
-rw-r--r--drivers/gpu/drm/cirrus/Makefile5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c108
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h246
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c307
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c335
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c629
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c453
-rw-r--r--drivers/gpu/drm/drm_cache.c23
-rw-r--r--drivers/gpu/drm/drm_context.c9
-rw-r--r--drivers/gpu/drm/drm_crtc.c585
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c35
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c244
-rw-r--r--drivers/gpu/drm/drm_edid_load.c12
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h292
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c10
-rw-r--r--drivers/gpu/drm/drm_gem.c35
-rw-r--r--drivers/gpu/drm/drm_ioctl.c4
-rw-r--r--drivers/gpu/drm/drm_irq.c23
-rw-r--r--drivers/gpu/drm/drm_lock.c2
-rw-r--r--drivers/gpu/drm/drm_prime.c48
-rw-r--r--drivers/gpu/drm/drm_stub.c7
-rw-r--r--drivers/gpu/drm/drm_sysfs.c10
-rw-r--r--drivers/gpu/drm/drm_vm.c18
-rw-r--r--drivers/gpu/drm/exynos/Kconfig12
-rw-r--r--drivers/gpu/drm/exynos/Makefile2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c272
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.h39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c43
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c937
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.h36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c77
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c429
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c401
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h6
-rw-r--r--drivers/gpu/drm/gma500/Makefile5
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c231
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c30
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c697
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c7
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c76
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c57
-rw-r--r--drivers/gpu/drm/gma500/gem.c2
-rw-r--r--drivers/gpu/drm/gma500/gtt.c32
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c274
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h161
-rw-r--r--drivers/gpu/drm/gma500/mdfld_device.c452
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c1
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c24
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c330
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c295
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h25
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c134
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c138
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c66
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c4
-rw-r--r--drivers/gpu/drm/gma500/opregion.c344
-rw-r--r--drivers/gpu/drm/gma500/opregion.h (renamed from drivers/gpu/drm/gma500/intel_opregion.c)64
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c75
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c66
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h208
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c348
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h9
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h35
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c9
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c36
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c14
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c385
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1160
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c277
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h248
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1932
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c232
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c38
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c200
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c96
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c202
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c18
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1767
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h488
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c18
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c111
-rw-r--r--drivers/gpu/drm/i915/i915_trace_points.c2
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c45
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c76
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c755
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4372
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c68
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h105
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c310
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c389
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c19
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c3
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c71
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c209
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c29
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3820
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c725
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h23
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c119
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h5
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c102
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c69
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig15
-rw-r--r--drivers/gpu/drm/mgag200/Makefile5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c116
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h276
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c294
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c156
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c388
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c1533
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h661
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c452
-rw-r--r--drivers/gpu/drm/nouveau/Makefile11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c385
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c88
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h179
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c578
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fifo.h32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c215
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c208
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_software.h69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c270
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c57
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c48
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c140
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c419
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c39
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv04_software.c147
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c214
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c278
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fifo.c177
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv31_mpeg.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c351
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c37
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c102
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c75
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c59
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c596
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c229
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c33
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mpeg.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv50_software.c214
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c177
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fifo.c241
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.c166
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.fuc698
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.fuc.h584
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.c31
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c290
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c184
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c310
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_software.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c16
-rw-r--r--drivers/gpu/drm/nouveau/nve0_fifo.c423
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.c831
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.h89
-rw-r--r--drivers/gpu/drm/nouveau/nve0_grctx.c2777
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c27
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c20
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c155
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c216
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h220
-rw-r--r--drivers/gpu/drm/radeon/ni.c43
-rw-r--r--drivers/gpu/drm/radeon/r100.c121
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c34
-rw-r--r--drivers/gpu/drm/radeon/r420.c7
-rw-r--r--drivers/gpu/drm/radeon/r520.c8
-rw-r--r--drivers/gpu/drm/radeon/r600.c191
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c215
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c101
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c22
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c464
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h45
-rw-r--r--drivers/gpu/drm/radeon/r600d.h233
-rw-r--r--drivers/gpu/drm/radeon/radeon.h239
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h27
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c170
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c113
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c621
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h30
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c220
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c415
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c325
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c187
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c67
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c48
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h14
-rw-r--r--drivers/gpu/drm/radeon/rs690.c7
-rw-r--r--drivers/gpu/drm/radeon/rv515.c8
-rw-r--r--drivers/gpu/drm/radeon/rv770.c12
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h191
-rw-r--r--drivers/gpu/drm/radeon/si.c30
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c17
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c8
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h3
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c22
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c94
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/vga/Kconfig1
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c284
-rw-r--r--drivers/gpu/vga/vgaarb.c9
-rw-r--r--drivers/hwmon/Kconfig6
-rw-r--r--drivers/hwmon/sch5627.c2
-rw-r--r--drivers/hwmon/sch5636.c2
-rw-r--r--drivers/hwmon/sch56xx-common.c406
-rw-r--r--drivers/hwmon/sch56xx-common.h2
-rw-r--r--drivers/i2c/Kconfig1
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c2
-rw-r--r--drivers/i2c/busses/Kconfig15
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c31
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h5
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c33
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c246
-rw-r--r--drivers/i2c/busses/i2c-gpio.c7
-rw-r--r--drivers/i2c/busses/i2c-imx.c2
-rw-r--r--drivers/i2c/busses/i2c-ixp2000.c157
-rw-r--r--drivers/i2c/busses/i2c-mpc.c30
-rw-r--r--drivers/i2c/busses/i2c-mxs.c22
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c3
-rw-r--r--drivers/i2c/busses/i2c-ocores.c3
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c5
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c112
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c11
-rw-r--r--drivers/i2c/busses/i2c-tegra.c24
-rw-r--r--drivers/i2c/busses/i2c-versatile.c9
-rw-r--r--drivers/i2c/busses/i2c-xiic.c23
-rw-r--r--drivers/i2c/i2c-core.c17
-rw-r--r--drivers/i2c/i2c-dev.c30
-rw-r--r--drivers/i2c/i2c-mux.c42
-rw-r--r--drivers/i2c/muxes/Kconfig6
-rw-r--r--drivers/i2c/muxes/Makefile6
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c (renamed from drivers/i2c/muxes/gpio-i2cmux.c)42
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c (renamed from drivers/i2c/muxes/pca9541.c)3
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c (renamed from drivers/i2c/muxes/pca954x.c)2
-rw-r--r--drivers/input/Kconfig17
-rw-r--r--drivers/input/Makefile2
-rw-r--r--drivers/input/evdev.c69
-rw-r--r--drivers/input/ff-memless.c3
-rw-r--r--drivers/input/gameport/emu10k1-gp.c13
-rw-r--r--drivers/input/gameport/fm801-gp.c16
-rw-r--r--drivers/input/joystick/a3d.c13
-rw-r--r--drivers/input/joystick/adi.c17
-rw-r--r--drivers/input/joystick/as5011.c1
-rw-r--r--drivers/input/joystick/cobra.c13
-rw-r--r--drivers/input/joystick/gf2k.c13
-rw-r--r--drivers/input/joystick/grip.c13
-rw-r--r--drivers/input/joystick/grip_mp.c13
-rw-r--r--drivers/input/joystick/guillemot.c13
-rw-r--r--drivers/input/joystick/interact.c13
-rw-r--r--drivers/input/joystick/joydump.c13
-rw-r--r--drivers/input/joystick/magellan.c17
-rw-r--r--drivers/input/joystick/sidewinder.c13
-rw-r--r--drivers/input/joystick/spaceball.c17
-rw-r--r--drivers/input/joystick/spaceorb.c17
-rw-r--r--drivers/input/joystick/stinger.c17
-rw-r--r--drivers/input/joystick/tmdc.c13
-rw-r--r--drivers/input/joystick/twidjoy.c17
-rw-r--r--drivers/input/joystick/warrior.c17
-rw-r--r--drivers/input/joystick/zhenhua.c17
-rw-r--r--drivers/input/keyboard/Kconfig32
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adp5588-keys.c1
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c44
-rw-r--r--drivers/input/keyboard/hil_kbd.c13
-rw-r--r--drivers/input/keyboard/imx_keypad.c17
-rw-r--r--drivers/input/keyboard/lkkbd.c17
-rw-r--r--drivers/input/keyboard/lm8333.c235
-rw-r--r--drivers/input/keyboard/matrix_keypad.c99
-rw-r--r--drivers/input/keyboard/newtonkbd.c13
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c20
-rw-r--r--drivers/input/keyboard/omap-keypad.c20
-rw-r--r--drivers/input/keyboard/omap4-keypad.c133
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c20
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c52
-rw-r--r--drivers/input/keyboard/samsung-keypad.c20
-rw-r--r--drivers/input/keyboard/spear-keyboard.c92
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c16
-rw-r--r--drivers/input/keyboard/stowaway.c13
-rw-r--r--drivers/input/keyboard/sunkbd.c17
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c41
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c15
-rw-r--r--drivers/input/keyboard/tegra-kbc.c68
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c21
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c25
-rw-r--r--drivers/input/keyboard/w90p910_keypad.c27
-rw-r--r--drivers/input/keyboard/xtkbd.c13
-rw-r--r--drivers/input/matrix-keymap.c163
-rw-r--r--drivers/input/misc/cma3000_d0x.c2
-rw-r--r--drivers/input/misc/mpu3050.c2
-rw-r--r--drivers/input/misc/twl6040-vibra.c46
-rw-r--r--drivers/input/misc/wm831x-on.c2
-rw-r--r--drivers/input/mouse/Kconfig12
-rw-r--r--drivers/input/mouse/Makefile1
-rw-r--r--drivers/input/mouse/alps.c81
-rw-r--r--drivers/input/mouse/alps.h2
-rw-r--r--drivers/input/mouse/navpoint.c369
-rw-r--r--drivers/input/mouse/sentelic.c34
-rw-r--r--drivers/input/mouse/sentelic.h8
-rw-r--r--drivers/input/mouse/sermouse.c13
-rw-r--r--drivers/input/mouse/synaptics.c20
-rw-r--r--drivers/input/mouse/vsxxxaa.c14
-rw-r--r--drivers/input/of_keymap.c87
-rw-r--r--drivers/input/serio/pcips2.c15
-rw-r--r--drivers/input/serio/ps2mult.c13
-rw-r--r--drivers/input/serio/serio_raw.c65
-rw-r--r--drivers/input/serio/xilinx_ps2.c35
-rw-r--r--drivers/input/tablet/aiptek.c2
-rw-r--r--drivers/input/tablet/wacom.h4
-rw-r--r--drivers/input/tablet/wacom_sys.c244
-rw-r--r--drivers/input/tablet/wacom_wac.c304
-rw-r--r--drivers/input/tablet/wacom_wac.h13
-rw-r--r--drivers/input/touchscreen/Kconfig34
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c33
-rw-r--r--drivers/input/touchscreen/da9052_tsi.c370
-rw-r--r--drivers/input/touchscreen/dynapro.c17
-rw-r--r--drivers/input/touchscreen/elo.c17
-rw-r--r--drivers/input/touchscreen/fujitsu_ts.c13
-rw-r--r--drivers/input/touchscreen/gunze.c17
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c17
-rw-r--r--drivers/input/touchscreen/hampshire.c17
-rw-r--r--drivers/input/touchscreen/inexio.c17
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c10
-rw-r--r--drivers/input/touchscreen/mtouch.c17
-rw-r--r--drivers/input/touchscreen/penmount.c17
-rw-r--r--drivers/input/touchscreen/st1232.c20
-rw-r--r--drivers/input/touchscreen/touchit213.c17
-rw-r--r--drivers/input/touchscreen/touchright.c17
-rw-r--r--drivers/input/touchscreen/touchwin.c17
-rw-r--r--drivers/input/touchscreen/tsc40.c12
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c282
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c13
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c9
-rw-r--r--drivers/iommu/Kconfig21
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c37
-rw-r--r--drivers/iommu/exynos-iommu.c1076
-rw-r--r--drivers/iommu/intel-iommu.c40
-rw-r--r--drivers/iommu/iommu.c5
-rw-r--r--drivers/iommu/omap-iommu.c32
-rw-r--r--drivers/iommu/tegra-gart.c20
-rw-r--r--drivers/iommu/tegra-smmu.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.h6
-rw-r--r--drivers/leds/Kconfig29
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/led-class.c21
-rw-r--r--drivers/leds/leds-da9052.c214
-rw-r--r--drivers/leds/leds-lm3530.c100
-rw-r--r--drivers/leds/leds-lm3533.c785
-rw-r--r--drivers/leds/leds-lp5521.c12
-rw-r--r--drivers/leds/leds-mc13783.c2
-rw-r--r--drivers/leds/leds-pca955x.c95
-rw-r--r--drivers/leds/ledtrig-backlight.c4
-rw-r--r--drivers/leds/ledtrig-gpio.c4
-rw-r--r--drivers/leds/ledtrig-heartbeat.c32
-rw-r--r--drivers/leds/ledtrig-timer.c54
-rw-r--r--drivers/leds/ledtrig-transient.c237
-rw-r--r--drivers/media/common/saa7146_fops.c126
-rw-r--r--drivers/media/common/saa7146_hlp.c23
-rw-r--r--drivers/media/common/saa7146_vbi.c54
-rw-r--r--drivers/media/common/saa7146_video.c367
-rw-r--r--drivers/media/common/tuners/Kconfig27
-rw-r--r--drivers/media/common/tuners/Makefile4
-rw-r--r--drivers/media/common/tuners/fc0011.c524
-rw-r--r--drivers/media/common/tuners/fc0011.h41
-rw-r--r--drivers/media/common/tuners/fc0012-priv.h43
-rw-r--r--drivers/media/common/tuners/fc0012.c467
-rw-r--r--drivers/media/common/tuners/fc0012.h44
-rw-r--r--drivers/media/common/tuners/fc0013-priv.h44
-rw-r--r--drivers/media/common/tuners/fc0013.c634
-rw-r--r--drivers/media/common/tuners/fc0013.h57
-rw-r--r--drivers/media/common/tuners/fc001x-common.h39
-rw-r--r--drivers/media/common/tuners/tua9001.c215
-rw-r--r--drivers/media/common/tuners/tua9001.h46
-rw-r--r--drivers/media/common/tuners/tua9001_priv.h34
-rw-r--r--drivers/media/common/tuners/xc5000.c7
-rw-r--r--drivers/media/common/tuners/xc5000.h2
-rw-r--r--drivers/media/dvb/bt8xx/dst_ca.c2
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge-core.c3
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c10
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.h2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c80
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.h18
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig13
-rw-r--r--drivers/media/dvb/dvb-usb/Makefile3
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c495
-rw-r--r--drivers/media/dvb/dvb-usb/af9035.c1242
-rw-r--r--drivers/media/dvb/dvb-usb/af9035.h113
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c24
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c7
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h12
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-urb.c12
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h3
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.c76
-rw-r--r--drivers/media/dvb/dvb-usb/it913x.c4
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.c5
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-tuner.c1
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf.c850
-rw-r--r--drivers/media/dvb/dvb-usb/rtl28xxu.c28
-rw-r--r--drivers/media/dvb/frontends/Kconfig35
-rw-r--r--drivers/media/dvb/frontends/Makefile7
-rw-r--r--drivers/media/dvb/frontends/af9013.c13
-rw-r--r--drivers/media/dvb/frontends/af9033.c980
-rw-r--r--drivers/media/dvb/frontends/af9033.h75
-rw-r--r--drivers/media/dvb/frontends/af9033_priv.h470
-rw-r--r--drivers/media/dvb/frontends/au8522_common.c259
-rw-r--r--drivers/media/dvb/frontends/au8522_dig.c215
-rw-r--r--drivers/media/dvb/frontends/au8522_priv.h2
-rw-r--r--drivers/media/dvb/frontends/cx24110.c7
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_core.c4
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c5
-rw-r--r--drivers/media/dvb/frontends/dib9000.c131
-rw-r--r--drivers/media/dvb/frontends/drxd.h14
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.c18
-rw-r--r--drivers/media/dvb/frontends/drxk_map.h2
-rw-r--r--drivers/media/dvb/frontends/ds3000.c5
-rw-r--r--drivers/media/dvb/frontends/it913x-fe.c26
-rw-r--r--drivers/media/dvb/frontends/lg2160.c1468
-rw-r--r--drivers/media/dvb/frontends/lg2160.h84
-rw-r--r--drivers/media/dvb/frontends/lgs8gxx.c3
-rw-r--r--drivers/media/dvb/frontends/m88rs2000.c29
-rw-r--r--drivers/media/dvb/frontends/rtl2830.c201
-rw-r--r--drivers/media/dvb/frontends/rtl2830_priv.h1
-rw-r--r--drivers/media/dvb/frontends/stb0899_drv.c8
-rw-r--r--drivers/media/dvb/frontends/stb6100.c3
-rw-r--r--drivers/media/dvb/frontends/stv0297.c2
-rw-r--r--drivers/media/dvb/frontends/stv0900_sw.c2
-rw-r--r--drivers/media/dvb/frontends/stv090x.c2
-rw-r--r--drivers/media/dvb/frontends/zl10353.c5
-rw-r--r--drivers/media/dvb/mantis/hopper_cards.c3
-rw-r--r--drivers/media/dvb/mantis/mantis_cards.c3
-rw-r--r--drivers/media/dvb/mantis/mantis_dma.c4
-rw-r--r--drivers/media/dvb/mantis/mantis_evm.c3
-rw-r--r--drivers/media/dvb/ngene/ngene-core.c4
-rw-r--r--drivers/media/dvb/pluto2/pluto2.c8
-rw-r--r--drivers/media/dvb/siano/smssdio.c4
-rw-r--r--drivers/media/dvb/siano/smsusb.c2
-rw-r--r--drivers/media/dvb/ttpci/av7110_v4l.c72
-rw-r--r--drivers/media/dvb/ttpci/budget-av.c6
-rw-r--r--drivers/media/media-entity.c57
-rw-r--r--drivers/media/radio/Kconfig4
-rw-r--r--drivers/media/radio/dsbr100.c528
-rw-r--r--drivers/media/radio/radio-gemtek.c25
-rw-r--r--drivers/media/radio/radio-isa.c173
-rw-r--r--drivers/media/radio/radio-isa.h9
-rw-r--r--drivers/media/radio/radio-keene.c36
-rw-r--r--drivers/media/radio/radio-mr800.c524
-rw-r--r--drivers/media/radio/radio-rtrack2.c1
-rw-r--r--drivers/media/radio/radio-sf16fmi.c14
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c144
-rw-r--r--drivers/media/radio/radio-timb.c2
-rw-r--r--drivers/media/radio/saa7706h.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c305
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c65
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c265
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h14
-rw-r--r--drivers/media/radio/tef6862.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c4
-rw-r--r--drivers/media/rc/Kconfig1
-rw-r--r--drivers/media/rc/ati_remote.c146
-rw-r--r--drivers/media/rc/fintek-cir.c13
-rw-r--r--drivers/media/rc/imon.c2
-rw-r--r--drivers/media/rc/ir-raw.c8
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c4
-rw-r--r--drivers/media/rc/ite-cir.c14
-rw-r--r--drivers/media/rc/keymaps/Makefile3
-rw-r--r--drivers/media/rc/keymaps/rc-asus-ps3-100.c91
-rw-r--r--drivers/media/rc/keymaps/rc-it913x-v2.c2
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10-digitainer.c123
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10-or2x.c108
-rw-r--r--drivers/media/rc/mceusb.c5
-rw-r--r--drivers/media/rc/nuvoton-cir.c26
-rw-r--r--drivers/media/rc/rc-loopback.c1
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/video/Kconfig48
-rw-r--r--drivers/media/video/Makefile5
-rw-r--r--drivers/media/video/adp1653.c9
-rw-r--r--drivers/media/video/adv7180.c417
-rw-r--r--drivers/media/video/adv7343.c4
-rw-r--r--drivers/media/video/aptina-pll.c5
-rw-r--r--drivers/media/video/arv.c7
-rw-r--r--drivers/media/video/as3645a.c10
-rw-r--r--drivers/media/video/atmel-isi.c18
-rw-r--r--drivers/media/video/au0828/Kconfig3
-rw-r--r--drivers/media/video/au0828/au0828-cards.c2
-rw-r--r--drivers/media/video/au0828/au0828-dvb.c27
-rw-r--r--drivers/media/video/au0828/au0828-video.c25
-rw-r--r--drivers/media/video/blackfin/bfin_capture.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c4
-rw-r--r--drivers/media/video/bw-qcam.c132
-rw-r--r--drivers/media/video/c-qcam.c140
-rw-r--r--drivers/media/video/cpia2/cpia2.h34
-rw-r--r--drivers/media/video/cpia2/cpia2_core.c142
-rw-r--r--drivers/media/video/cpia2/cpia2_usb.c78
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c850
-rw-r--r--drivers/media/video/cpia2/cpia2dev.h50
-rw-r--r--drivers/media/video/cx18/cx18-alsa-main.c1
-rw-r--r--drivers/media/video/cx18/cx18-alsa-pcm.c10
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c2
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.c6
-rw-r--r--drivers/media/video/cx18/cx18-streams.c3
-rw-r--r--drivers/media/video/cx231xx/cx231xx-417.c18
-rw-r--r--drivers/media/video/cx231xx/cx231xx-audio.c18
-rw-r--r--drivers/media/video/cx231xx/cx231xx-avcore.c148
-rw-r--r--drivers/media/video/cx231xx/cx231xx-core.c76
-rw-r--r--drivers/media/video/cx231xx/cx231xx-vbi.c6
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c20
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c9
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c7
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c7
-rw-r--r--drivers/media/video/cx23885/cx23885.h1
-rw-r--r--drivers/media/video/cx23885/cx23888-ir.c4
-rw-r--r--drivers/media/video/cx25821/cx25821-alsa.c2
-rw-r--r--drivers/media/video/cx25821/cx25821-audio-upstream.c3
-rw-r--r--drivers/media/video/cx25821/cx25821-core.c14
-rw-r--r--drivers/media/video/cx25821/cx25821-i2c.c3
-rw-r--r--drivers/media/video/cx25821/cx25821-medusa-video.c13
-rw-r--r--drivers/media/video/cx25821/cx25821-video-upstream-ch2.c3
-rw-r--r--drivers/media/video/cx25821/cx25821-video-upstream.c3
-rw-r--r--drivers/media/video/cx25821/cx25821-video.c25
-rw-r--r--drivers/media/video/cx25821/cx25821-video.h2
-rw-r--r--drivers/media/video/cx25840/cx25840-ir.c6
-rw-r--r--drivers/media/video/davinci/Kconfig1
-rw-r--r--drivers/media/video/davinci/vpbe_display.c4
-rw-r--r--drivers/media/video/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/video/davinci/vpif_capture.c4
-rw-r--r--drivers/media/video/davinci/vpif_display.c4
-rw-r--r--drivers/media/video/em28xx/Kconfig4
-rw-r--r--drivers/media/video/em28xx/Makefile5
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c11
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c81
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c30
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c11
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c3
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c250
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c13
-rw-r--r--drivers/media/video/em28xx/em28xx.h60
-rw-r--r--drivers/media/video/et61x251/Kconfig18
-rw-r--r--drivers/media/video/et61x251/Makefile4
-rw-r--r--drivers/media/video/et61x251/et61x251.h213
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c2683
-rw-r--r--drivers/media/video/et61x251/et61x251_sensor.h108
-rw-r--r--drivers/media/video/et61x251/et61x251_tas5130d1b.c143
-rw-r--r--drivers/media/video/fsl-viu.c4
-rw-r--r--drivers/media/video/gspca/Makefile2
-rw-r--r--drivers/media/video/gspca/autogain_functions.c178
-rw-r--r--drivers/media/video/gspca/autogain_functions.h6
-rw-r--r--drivers/media/video/gspca/conex.c4
-rw-r--r--drivers/media/video/gspca/finepix.c18
-rw-r--r--drivers/media/video/gspca/gl860/gl860.c3
-rw-r--r--drivers/media/video/gspca/gspca.c545
-rw-r--r--drivers/media/video/gspca/gspca.h26
-rw-r--r--drivers/media/video/gspca/jl2005bcd.c10
-rw-r--r--drivers/media/video/gspca/mars.c292
-rw-r--r--drivers/media/video/gspca/nw80x.c2
-rw-r--r--drivers/media/video/gspca/ov519.c10
-rw-r--r--drivers/media/video/gspca/ov534.c146
-rw-r--r--drivers/media/video/gspca/pac207.c336
-rw-r--r--drivers/media/video/gspca/pac7302.c185
-rw-r--r--drivers/media/video/gspca/pac7311.c505
-rw-r--r--drivers/media/video/gspca/sn9c20x.c594
-rw-r--r--drivers/media/video/gspca/sonixb.c2
-rw-r--r--drivers/media/video/gspca/sonixj.c5
-rw-r--r--drivers/media/video/gspca/sq905.c12
-rw-r--r--drivers/media/video/gspca/sq905c.c10
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c21
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.h3
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c143
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h7
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c359
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_pb0100.h12
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_sensor.h4
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_st6422.c236
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_st6422.h4
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c198
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h8
-rw-r--r--drivers/media/video/gspca/topro.c6
-rw-r--r--drivers/media/video/gspca/vicam.c13
-rw-r--r--drivers/media/video/gspca/zc3xx.c620
-rw-r--r--drivers/media/video/hdpvr/hdpvr-control.c2
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c2
-rw-r--r--drivers/media/video/hexium_gemini.c129
-rw-r--r--drivers/media/video/hexium_orion.c24
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c6
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c8
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c4
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/video/m5mols/m5mols.h81
-rw-r--r--drivers/media/video/m5mols/m5mols_capture.c11
-rw-r--r--drivers/media/video/m5mols/m5mols_controls.c479
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c93
-rw-r--r--drivers/media/video/m5mols/m5mols_reg.h1
-rw-r--r--drivers/media/video/marvell-ccic/mcam-core.c1
-rw-r--r--drivers/media/video/mem2mem_testdev.c6
-rw-r--r--drivers/media/video/meye.c2
-rw-r--r--drivers/media/video/msp3400-driver.c15
-rw-r--r--drivers/media/video/mt9m032.c4
-rw-r--r--drivers/media/video/mt9p031.c161
-rw-r--r--drivers/media/video/mt9t112.c1
-rw-r--r--drivers/media/video/mt9v032.c2
-rw-r--r--drivers/media/video/mx1_camera.c14
-rw-r--r--drivers/media/video/mx2_camera.c78
-rw-r--r--drivers/media/video/mx2_emmaprp.c8
-rw-r--r--drivers/media/video/mx3_camera.c45
-rw-r--r--drivers/media/video/mxb.c351
-rw-r--r--drivers/media/video/mxb.h42
-rw-r--r--drivers/media/video/omap1_camera.c22
-rw-r--r--drivers/media/video/omap24xxcam-dma.c20
-rw-r--r--drivers/media/video/omap24xxcam.c3
-rw-r--r--drivers/media/video/omap24xxcam.h14
-rw-r--r--drivers/media/video/omap3isp/isp.c59
-rw-r--r--drivers/media/video/omap3isp/isp.h8
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c256
-rw-r--r--drivers/media/video/omap3isp/ispccdc.h12
-rw-r--r--drivers/media/video/omap3isp/ispccp2.c24
-rw-r--r--drivers/media/video/omap3isp/ispcsi2.c21
-rw-r--r--drivers/media/video/omap3isp/ispcsi2.h1
-rw-r--r--drivers/media/video/omap3isp/ispcsiphy.c4
-rw-r--r--drivers/media/video/omap3isp/ispcsiphy.h15
-rw-r--r--drivers/media/video/omap3isp/isppreview.c634
-rw-r--r--drivers/media/video/omap3isp/isppreview.h76
-rw-r--r--drivers/media/video/omap3isp/ispqueue.h2
-rw-r--r--drivers/media/video/omap3isp/ispresizer.c139
-rw-r--r--drivers/media/video/omap3isp/ispstat.c2
-rw-r--r--drivers/media/video/omap3isp/ispvideo.c303
-rw-r--r--drivers/media/video/omap3isp/ispvideo.h5
-rw-r--r--drivers/media/video/ov5642.c2
-rw-r--r--drivers/media/video/pms.c239
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c193
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.h9
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c1341
-rw-r--r--drivers/media/video/pwc/pwc-if.c191
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c145
-rw-r--r--drivers/media/video/pwc/pwc.h21
-rw-r--r--drivers/media/video/pxa_camera.c15
-rw-r--r--drivers/media/video/s2255drv.c11
-rw-r--r--drivers/media/video/s5p-fimc/Kconfig48
-rw-r--r--drivers/media/video/s5p-fimc/Makefile6
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c506
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c1159
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h272
-rw-r--r--drivers/media/video/s5p-fimc/fimc-lite-reg.c300
-rw-r--r--drivers/media/video/s5p-fimc/fimc-lite-reg.h150
-rw-r--r--drivers/media/video/s5p-fimc/fimc-lite.c1576
-rw-r--r--drivers/media/video/s5p-fimc/fimc-lite.h213
-rw-r--r--drivers/media/video/s5p-fimc/fimc-m2m.c824
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.c476
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.h18
-rw-r--r--drivers/media/video/s5p-fimc/fimc-reg.c616
-rw-r--r--drivers/media/video/s5p-fimc/fimc-reg.h326
-rw-r--r--drivers/media/video/s5p-fimc/mipi-csis.c21
-rw-r--r--drivers/media/video/s5p-fimc/regs-fimc.h301
-rw-r--r--drivers/media/video/s5p-g2d/g2d.c69
-rw-r--r--drivers/media/video/s5p-g2d/g2d.h1
-rw-r--r--drivers/media/video/s5p-jpeg/jpeg-core.c68
-rw-r--r--drivers/media/video/s5p-jpeg/jpeg-core.h2
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc.c81
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_common.h2
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c16
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c6
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_opr.c28
-rw-r--r--drivers/media/video/s5p-tv/hdmi_drv.c480
-rw-r--r--drivers/media/video/s5p-tv/hdmiphy_drv.c225
-rw-r--r--drivers/media/video/s5p-tv/mixer.h3
-rw-r--r--drivers/media/video/s5p-tv/mixer_drv.c2
-rw-r--r--drivers/media/video/s5p-tv/mixer_reg.c15
-rw-r--r--drivers/media/video/s5p-tv/mixer_video.c10
-rw-r--r--drivers/media/video/s5p-tv/regs-hdmi.h1
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c45
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c39
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c7
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/video/saa7134/saa7134.h1
-rw-r--r--drivers/media/video/saa7164/saa7164-vbi.c4
-rw-r--r--drivers/media/video/saa7164/saa7164.h5
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c92
-rw-r--r--drivers/media/video/sh_vou.c4
-rw-r--r--drivers/media/video/smiapp-pll.c418
-rw-r--r--drivers/media/video/smiapp-pll.h103
-rw-r--r--drivers/media/video/smiapp/Kconfig6
-rw-r--r--drivers/media/video/smiapp/Makefile5
-rw-r--r--drivers/media/video/smiapp/smiapp-core.c2894
-rw-r--r--drivers/media/video/smiapp/smiapp-limits.c132
-rw-r--r--drivers/media/video/smiapp/smiapp-limits.h128
-rw-r--r--drivers/media/video/smiapp/smiapp-quirk.c306
-rw-r--r--drivers/media/video/smiapp/smiapp-quirk.h83
-rw-r--r--drivers/media/video/smiapp/smiapp-reg-defs.h503
-rw-r--r--drivers/media/video/smiapp/smiapp-reg.h122
-rw-r--r--drivers/media/video/smiapp/smiapp-regs.c273
-rw-r--r--drivers/media/video/smiapp/smiapp-regs.h49
-rw-r--r--drivers/media/video/smiapp/smiapp.h252
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c4
-rw-r--r--drivers/media/video/soc_camera.c55
-rw-r--r--drivers/media/video/soc_mediabus.c54
-rw-r--r--drivers/media/video/sta2x11_vip.c1550
-rw-r--r--drivers/media/video/sta2x11_vip.h40
-rw-r--r--drivers/media/video/stk-webcam.c8
-rw-r--r--drivers/media/video/tda9840.c75
-rw-r--r--drivers/media/video/tlg2300/pd-video.c1
-rw-r--r--drivers/media/video/tm6000/tm6000-input.c3
-rw-r--r--drivers/media/video/tm6000/tm6000-stds.c2
-rw-r--r--drivers/media/video/tm6000/tm6000-video.c14
-rw-r--r--drivers/media/video/tm6000/tm6000.h2
-rw-r--r--drivers/media/video/tuner-core.c15
-rw-r--r--drivers/media/video/tvp5150.c11
-rw-r--r--drivers/media/video/tvp7002.c105
-rw-r--r--drivers/media/video/usbvision/usbvision-core.c12
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c4
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c330
-rw-r--r--drivers/media/video/uvc/uvc_queue.c43
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c50
-rw-r--r--drivers/media/video/uvc/uvcvideo.h26
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c15
-rw-r--r--drivers/media/video/v4l2-ctrls.c302
-rw-r--r--drivers/media/video/v4l2-dev.c218
-rw-r--r--drivers/media/video/v4l2-event.c71
-rw-r--r--drivers/media/video/v4l2-ioctl.c662
-rw-r--r--drivers/media/video/v4l2-subdev.c143
-rw-r--r--drivers/media/video/via-camera.c15
-rw-r--r--drivers/media/video/videobuf-core.c3
-rw-r--r--drivers/media/video/videobuf-dma-contig.c199
-rw-r--r--drivers/media/video/videobuf-dvb.c3
-rw-r--r--drivers/media/video/videobuf2-core.c50
-rw-r--r--drivers/media/video/vivi.c223
-rw-r--r--drivers/media/video/w9966.c94
-rw-r--r--drivers/media/video/zoran/zoran_device.c2
-rw-r--r--drivers/media/video/zoran/zoran_driver.c20
-rw-r--r--drivers/media/video/zr364xx.c2
-rw-r--r--drivers/message/fusion/mptbase.c13
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/mfd/Kconfig76
-rw-r--r--drivers/mfd/Makefile10
-rw-r--r--drivers/mfd/ab8500-core.c423
-rw-r--r--drivers/mfd/ab8500-debugfs.c6
-rw-r--r--drivers/mfd/ab8500-gpadc.c8
-rw-r--r--drivers/mfd/ab8500-i2c.c128
-rw-r--r--drivers/mfd/ab8500-sysctrl.c6
-rw-r--r--drivers/mfd/anatop-mfd.c35
-rw-r--r--drivers/mfd/asic3.c33
-rw-r--r--drivers/mfd/cs5535-mfd.c13
-rw-r--r--drivers/mfd/da9052-core.c140
-rw-r--r--drivers/mfd/da9052-i2c.c72
-rw-r--r--drivers/mfd/da9052-spi.c19
-rw-r--r--drivers/mfd/db8500-prcmu.c35
-rw-r--r--drivers/mfd/intel_msic.c31
-rw-r--r--drivers/mfd/janz-cmodio.c17
-rw-r--r--drivers/mfd/lm3533-core.c667
-rw-r--r--drivers/mfd/lm3533-ctrlbank.c148
-rw-r--r--drivers/mfd/lpc_ich.c888
-rw-r--r--drivers/mfd/lpc_sch.c26
-rw-r--r--drivers/mfd/max77693-irq.c309
-rw-r--r--drivers/mfd/max77693.c249
-rw-r--r--drivers/mfd/mc13xxx-core.c239
-rw-r--r--drivers/mfd/mc13xxx-i2c.c128
-rw-r--r--drivers/mfd/mc13xxx-spi.c140
-rw-r--r--drivers/mfd/mc13xxx.h45
-rw-r--r--drivers/mfd/pcf50633-core.c36
-rw-r--r--drivers/mfd/rc5t583.c8
-rw-r--r--drivers/mfd/rdc321x-southbridge.c13
-rw-r--r--drivers/mfd/s5m-core.c6
-rw-r--r--drivers/mfd/sta2x11-mfd.c467
-rw-r--r--drivers/mfd/stmpe-spi.c1
-rw-r--r--drivers/mfd/tps65090.c33
-rw-r--r--drivers/mfd/tps65217.c17
-rw-r--r--drivers/mfd/tps65910-irq.c130
-rw-r--r--drivers/mfd/tps65910.c205
-rw-r--r--drivers/mfd/twl4030-irq.c1
-rw-r--r--drivers/mfd/twl6040-core.c120
-rw-r--r--drivers/mfd/twl6040-irq.c32
-rw-r--r--drivers/mfd/vx855.c12
-rw-r--r--drivers/mfd/wm831x-auxadc.c6
-rw-r--r--drivers/mfd/wm831x-core.c45
-rw-r--r--drivers/mfd/wm831x-irq.c148
-rw-r--r--drivers/mfd/wm8350-core.c31
-rw-r--r--drivers/mfd/wm8350-i2c.c61
-rw-r--r--drivers/mfd/wm8400-core.c250
-rw-r--r--drivers/mfd/wm8994-core.c25
-rw-r--r--drivers/mfd/wm8994-regmap.c1
-rw-r--r--drivers/misc/ab8500-pwm.c6
-rw-r--r--drivers/mmc/card/block.c22
-rw-r--r--drivers/mmc/card/queue.c6
-rw-r--r--drivers/mmc/core/bus.c2
-rw-r--r--drivers/mmc/core/cd-gpio.c3
-rw-r--r--drivers/mmc/core/core.c18
-rw-r--r--drivers/mmc/core/mmc.c119
-rw-r--r--drivers/mmc/core/sdio.c2
-rw-r--r--drivers/mmc/core/sdio_irq.c11
-rw-r--r--drivers/mmc/host/Kconfig17
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci.c469
-rw-r--r--drivers/mmc/host/davinci_mmc.c1
-rw-r--r--drivers/mmc/host/dw_mmc.c18
-rw-r--r--drivers/mmc/host/imxmmc.c1169
-rw-r--r--drivers/mmc/host/imxmmc.h64
-rw-r--r--drivers/mmc/host/mmci.c65
-rw-r--r--drivers/mmc/host/mvsdio.c14
-rw-r--r--drivers/mmc/host/mxcmmc.c39
-rw-r--r--drivers/mmc/host/mxs-mmc.c197
-rw-r--r--drivers/mmc/host/omap.c48
-rw-r--r--drivers/mmc/host/omap_hsmmc.c86
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c44
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c8
-rw-r--r--drivers/mmc/host/sdhci-spear.c82
-rw-r--r--drivers/mmc/host/sdhci-tegra.c26
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/mtd/maps/lantiq-flash.c76
-rw-r--r--drivers/mtd/nand/mxc_nand.c6
-rw-r--r--drivers/mtd/nand/orion_nand.c18
-rw-r--r--drivers/net/cris/eth_v10.c1
-rw-r--r--drivers/net/ethernet/freescale/fec.c35
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c42
-rw-r--r--drivers/net/ethernet/rdc/r6040.c15
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c8
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c7
-rw-r--r--drivers/net/ethernet/ti/Kconfig2
-rw-r--r--drivers/net/usb/asix.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/virtio_net.c5
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c19
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c238
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h33
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c12
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c45
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c11
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c29
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c104
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c12
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c12
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c94
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h24
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c50
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.c84
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h178
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c16
-rw-r--r--drivers/net/wireless/b43/bus.c6
-rw-r--r--drivers/net/wireless/b43/dma.c2
-rw-r--r--drivers/net/wireless/b43/main.c4
-rw-r--r--drivers/net/wireless/b43legacy/main.c4
-rw-r--r--drivers/net/wireless/b43legacy/phy.c4
-rw-r--r--drivers/net/wireless/b43legacy/radio.c10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c244
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c32
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c350
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c265
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h37
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/Makefile3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c479
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h24
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/antsel.c16
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c11
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c142
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/nicpci.c826
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/nicpci.h77
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/otp.c410
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/otp.h36
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c67
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c333
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy_shim.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h228
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.c980
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.h29
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/stf.c6
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig8
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c35
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-mac80211.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c288
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.h129
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c52
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/mwifiex/Makefile2
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c498
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.h2
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c21
-rw-r--r--drivers/net/wireless/mwifiex/decl.h13
-rw-r--r--drivers/net/wireless/mwifiex/fw.h159
-rw-r--r--drivers/net/wireless/mwifiex/ie.c396
-rw-r--r--drivers/net/wireless/mwifiex/init.c1
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h32
-rw-r--r--drivers/net/wireless/mwifiex/join.c26
-rw-r--r--drivers/net/wireless/mwifiex/main.c57
-rw-r--r--drivers/net/wireless/mwifiex/main.h26
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c69
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c8
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c51
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c9
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c432
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c4
-rw-r--r--drivers/net/wireless/rndis_wlan.c14
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c3
-rw-r--r--drivers/net/wireless/ti/wl12xx/Kconfig1
-rw-r--r--drivers/net/wireless/ti/wlcore/Kconfig2
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c82
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h32
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c29
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c323
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c38
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/wl12xx.h41
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h6
-rw-r--r--drivers/net/xen-netback/netback.c3
-rw-r--r--drivers/net/xen-netfront.c6
-rw-r--r--drivers/nfc/Kconfig13
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/pn533.c19
-rw-r--r--drivers/nfc/pn544_hci.c947
-rw-r--r--drivers/of/Kconfig12
-rw-r--r--drivers/of/Makefile2
-rw-r--r--drivers/of/of_i2c.c16
-rw-r--r--drivers/of/of_pci_irq.c2
-rw-r--r--drivers/of/of_spi.c99
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pinctrl/spear/Kconfig10
-rw-r--r--drivers/pinctrl/spear/Makefile2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h251
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c2198
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c1989
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.c103
-rw-r--r--drivers/platform/x86/toshiba_acpi.c1
-rw-r--r--drivers/power/Kconfig10
-rw-r--r--drivers/power/ab8500_btemp.c12
-rw-r--r--drivers/power/ab8500_charger.c13
-rw-r--r--drivers/power/ab8500_fg.c12
-rw-r--r--drivers/power/charger-manager.c392
-rw-r--r--drivers/power/ds2781_battery.c20
-rw-r--r--drivers/power/isp1704_charger.c2
-rw-r--r--drivers/power/max17042_battery.c148
-rw-r--r--drivers/power/power_supply_sysfs.c1
-rw-r--r--drivers/power/sbs-battery.c2
-rw-r--r--drivers/power/smb347-charger.c712
-rw-r--r--drivers/power/wm831x_power.c21
-rw-r--r--drivers/rapidio/Kconfig14
-rw-r--r--drivers/rapidio/devices/Makefile3
-rw-r--r--drivers/rapidio/devices/tsi721.c211
-rw-r--r--drivers/rapidio/devices/tsi721.h105
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c823
-rw-r--r--drivers/rapidio/rio.c81
-rw-r--r--drivers/regulator/anatop-regulator.c18
-rw-r--r--drivers/regulator/tps65910-regulator.c82
-rw-r--r--drivers/regulator/wm831x-dcdc.c24
-rw-r--r--drivers/regulator/wm831x-isink.c4
-rw-r--r--drivers/regulator/wm831x-ldo.c10
-rw-r--r--drivers/remoteproc/remoteproc_core.c4
-rw-r--r--drivers/rtc/Kconfig42
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-ds1307.c20
-rw-r--r--drivers/rtc/rtc-ep93xx.c24
-rw-r--r--drivers/rtc/rtc-imxdi.c6
-rw-r--r--drivers/rtc/rtc-lpc32xx.c12
-rw-r--r--drivers/rtc/rtc-m41t93.c46
-rw-r--r--drivers/rtc/rtc-pcf8563.c44
-rw-r--r--drivers/rtc/rtc-pl031.c14
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/rtc/rtc-spear.c10
-rw-r--r--drivers/rtc/rtc-tegra.c50
-rw-r--r--drivers/rtc/rtc-wm831x.c2
-rw-r--r--drivers/s390/block/dasd_int.h4
-rw-r--r--drivers/s390/char/sclp_cmd.c12
-rw-r--r--drivers/s390/char/sclp_sdias.c2
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c5
-rw-r--r--drivers/scsi/bfa/bfad_attr.c17
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.h1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h9
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c18
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c173
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c39
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c122
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c21
-rw-r--r--drivers/scsi/fcoe/Makefile2
-rw-r--r--drivers/scsi/fcoe/fcoe.c200
-rw-r--r--drivers/scsi/fcoe/fcoe.h8
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c159
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c832
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c13
-rw-r--r--drivers/scsi/qla2xxx/Kconfig9
-rw-r--r--drivers/scsi/qla2xxx/Makefile3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c81
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h78
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c199
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c615
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c66
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c173
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c4973
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1005
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c1955
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h82
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c134
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h22
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h28
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h8
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c95
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c111
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c738
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h192
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c78
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c11
-rw-r--r--drivers/scsi/scsi_pm.c5
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_wait_scan.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c5
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-ath79.c3
-rw-r--r--drivers/spi/spi-coldfire-qspi.c255
-rw-r--r--drivers/spi/spi-dw-pci.c13
-rw-r--r--drivers/spi/spi-ep93xx.c37
-rw-r--r--drivers/spi/spi-fsl-espi.c1
-rw-r--r--drivers/spi/spi-fsl-lib.c2
-rw-r--r--drivers/spi/spi-fsl-spi.c2
-rw-r--r--drivers/spi/spi-imx.c30
-rw-r--r--drivers/spi/spi-lm70llp.c3
-rw-r--r--drivers/spi/spi-mpc52xx.c3
-rw-r--r--drivers/spi/spi-omap2-mcspi.c373
-rw-r--r--drivers/spi/spi-orion.c30
-rw-r--r--drivers/spi/spi-ppc4xx.c4
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c12
-rw-r--r--drivers/spi/spi-rspi.c320
-rw-r--r--drivers/spi/spi-sirf.c20
-rw-r--r--drivers/spi/spi-topcliff-pch.c3
-rw-r--r--drivers/spi/spi.c98
-rw-r--r--drivers/ssb/b43_pci_bridge.c2
-rw-r--r--drivers/ssb/pci.c88
-rw-r--r--drivers/staging/android/ashmem.c10
-rw-r--r--drivers/staging/media/as102/as10x_cmd.c28
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c4
-rw-r--r--drivers/staging/media/easycap/easycap_main.c1639
-rw-r--r--drivers/staging/media/go7007/go7007-v4l2.c2
-rw-r--r--drivers/staging/media/go7007/s2250-loader.c2
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c4
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c4
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c7
-rw-r--r--drivers/staging/omapdrm/omap_drv.c4
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/serial/imx.c38
-rw-r--r--drivers/tty/serial/lantiq.c83
-rw-r--r--drivers/tty/serial/sb1250-duart.c1
-rw-r--r--drivers/tty/serial/zs.c1
-rw-r--r--drivers/tty/tty_ldisc.c41
-rw-r--r--drivers/tty/tty_mutex.c19
-rw-r--r--drivers/usb/gadget/uvc_queue.c2
-rw-r--r--drivers/usb/gadget/uvc_v4l2.c2
-rw-r--r--drivers/usb/host/ehci-mxc.c62
-rw-r--r--drivers/usb/host/ehci-orion.c16
-rw-r--r--drivers/usb/host/ehci-tegra.c5
-rw-r--r--drivers/video/backlight/Kconfig12
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/adp5520_bl.c4
-rw-r--r--drivers/video/backlight/adp8860_bl.c28
-rw-r--r--drivers/video/backlight/adp8870_bl.c28
-rw-r--r--drivers/video/backlight/ams369fg06.c16
-rw-r--r--drivers/video/backlight/apple_bl.c21
-rw-r--r--drivers/video/backlight/backlight.c11
-rw-r--r--drivers/video/backlight/corgi_lcd.c12
-rw-r--r--drivers/video/backlight/cr_bllcd.c9
-rw-r--r--drivers/video/backlight/da903x_bl.c1
-rw-r--r--drivers/video/backlight/generic_bl.c6
-rw-r--r--drivers/video/backlight/ili9320.c9
-rw-r--r--drivers/video/backlight/jornada720_bl.c14
-rw-r--r--drivers/video/backlight/jornada720_lcd.c8
-rw-r--r--drivers/video/backlight/l4f00242t03.c27
-rw-r--r--drivers/video/backlight/lcd.c20
-rw-r--r--drivers/video/backlight/ld9040.c15
-rw-r--r--drivers/video/backlight/lm3533_bl.c423
-rw-r--r--drivers/video/backlight/lms283gf05.c9
-rw-r--r--drivers/video/backlight/ltv350qv.c24
-rw-r--r--drivers/video/backlight/omap1_bl.c4
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c1
-rw-r--r--drivers/video/backlight/progear_bl.c6
-rw-r--r--drivers/video/backlight/s6e63m0.c16
-rw-r--r--drivers/video/backlight/tdo24m.c21
-rw-r--r--drivers/video/backlight/tosa_bl.c11
-rw-r--r--drivers/video/backlight/tosa_lcd.c8
-rw-r--r--drivers/video/backlight/wm831x_bl.c1
-rw-r--r--drivers/video/efifb.c79
-rw-r--r--drivers/video/fbmem.c21
-rw-r--r--drivers/video/imxfb.c50
-rw-r--r--drivers/video/matrox/matroxfb_maven.c1
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c1
-rw-r--r--drivers/w1/masters/mxc_w1.c4
-rw-r--r--drivers/watchdog/Kconfig14
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/da9052_wdt.c251
-rw-r--r--drivers/watchdog/iTCO_vendor.h6
-rw-r--r--drivers/watchdog/iTCO_vendor_support.c43
-rw-r--r--drivers/watchdog/iTCO_wdt.c533
-rw-r--r--drivers/watchdog/imx2_wdt.c2
-rw-r--r--drivers/watchdog/lantiq_wdt.c56
-rw-r--r--drivers/watchdog/orion_wdt.c16
-rw-r--r--drivers/watchdog/sp805_wdt.c249
-rw-r--r--drivers/watchdog/via_wdt.c2
-rw-r--r--drivers/watchdog/watchdog_core.c74
-rw-r--r--drivers/watchdog/watchdog_core.h (renamed from drivers/watchdog/watchdog_dev.h)8
-rw-r--r--drivers/watchdog/watchdog_dev.c375
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/acpi.c62
-rw-r--r--drivers/xen/events.c5
-rw-r--r--drivers/xen/grant-table.c125
-rw-r--r--drivers/xen/xen-acpi-processor.c1
-rw-r--r--drivers/xen/xen-selfballoon.c34
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c6
-rw-r--r--drivers/xen/xenbus/xenbus_comms.h1
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c51
-rw-r--r--fs/9p/vfs_inode.c2
-rw-r--r--fs/affs/inode.c2
-rw-r--r--fs/afs/inode.c2
-rw-r--r--fs/aio.c4
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/bad_inode.c1
-rw-r--r--fs/bfs/inode.c2
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/bio.c61
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/ceph/file.c1
-rw-r--r--fs/ceph/ioctl.c102
-rw-r--r--fs/ceph/ioctl.h2
-rw-r--r--fs/ceph/mds_client.c54
-rw-r--r--fs/ceph/mds_client.h5
-rw-r--r--fs/ceph/snap.c2
-rw-r--r--fs/ceph/xattr.c9
-rw-r--r--fs/cifs/Kconfig20
-rw-r--r--fs/cifs/Makefile4
-rw-r--r--fs/cifs/README5
-rw-r--r--fs/cifs/cifs_debug.c56
-rw-r--r--fs/cifs/cifs_debug.h4
-rw-r--r--fs/cifs/cifsfs.c25
-rw-r--r--fs/cifs/cifsglob.h107
-rw-r--r--fs/cifs/cifsproto.h19
-rw-r--r--fs/cifs/cifssmb.c181
-rw-r--r--fs/cifs/connect.c178
-rw-r--r--fs/cifs/file.c683
-rw-r--r--fs/cifs/ioctl.c8
-rw-r--r--fs/cifs/misc.c66
-rw-r--r--fs/cifs/readdir.c15
-rw-r--r--fs/cifs/smb1ops.c154
-rw-r--r--fs/cifs/smb2ops.c27
-rw-r--r--fs/cifs/transport.c76
-rw-r--r--fs/coda/inode.c2
-rw-r--r--fs/compat.c6
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/debugfs/file.c128
-rw-r--r--fs/direct-io.c44
-rw-r--r--fs/ecryptfs/super.c2
-rw-r--r--fs/eventfd.c12
-rw-r--r--fs/exofs/Kbuild2
-rw-r--r--fs/exofs/exofs.h14
-rw-r--r--fs/exofs/inode.c4
-rw-r--r--fs/exofs/super.c16
-rw-r--r--fs/exofs/sys.c200
-rw-r--r--fs/ext2/balloc.c4
-rw-r--r--fs/ext2/ialloc.c2
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext2/super.c18
-rw-r--r--fs/ext2/xattr.c1
-rw-r--r--fs/ext3/dir.c167
-rw-r--r--fs/ext3/ext3.h6
-rw-r--r--fs/ext3/hash.c4
-rw-r--r--fs/ext3/ialloc.c20
-rw-r--r--fs/ext3/inode.c6
-rw-r--r--fs/ext3/super.c6
-rw-r--r--fs/ext4/super.c8
-rw-r--r--fs/fat/dir.c4
-rw-r--r--fs/fat/fat.h6
-rw-r--r--fs/fat/fatent.c21
-rw-r--r--fs/fat/inode.c56
-rw-r--r--fs/freevxfs/vxfs_inode.c2
-rw-r--r--fs/fs-writeback.c336
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/super.c2
-rw-r--r--fs/hostfs/hostfs_kern.c2
-rw-r--r--fs/hpfs/buffer.c1
-rw-r--r--fs/hpfs/hpfs_fn.h7
-rw-r--r--fs/hpfs/inode.c2
-rw-r--r--fs/hppfs/hppfs.c2
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/inode.c64
-rw-r--r--fs/ioprio.c2
-rw-r--r--fs/jbd/checkpoint.c23
-rw-r--r--fs/jbd/commit.c21
-rw-r--r--fs/jbd/journal.c206
-rw-r--r--fs/jbd/transaction.c2
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jfs/inode.c2
-rw-r--r--fs/lockd/clntlock.c13
-rw-r--r--fs/lockd/svc.c7
-rw-r--r--fs/logfs/readwrite.c2
-rw-r--r--fs/minix/inode.c2
-rw-r--r--fs/namei.c23
-rw-r--r--fs/ncpfs/inode.c2
-rw-r--r--fs/nfs/Kconfig11
-rw-r--r--fs/nfs/Makefile5
-rw-r--r--fs/nfs/blocklayout/blocklayout.c90
-rw-r--r--fs/nfs/blocklayout/blocklayoutdev.c2
-rw-r--r--fs/nfs/client.c268
-rw-r--r--fs/nfs/delegation.c16
-rw-r--r--fs/nfs/delegation.h1
-rw-r--r--fs/nfs/dir.c39
-rw-r--r--fs/nfs/direct.c751
-rw-r--r--fs/nfs/file.c8
-rw-r--r--fs/nfs/fscache.c15
-rw-r--r--fs/nfs/fscache.h10
-rw-r--r--fs/nfs/getroot.c85
-rw-r--r--fs/nfs/idmap.c30
-rw-r--r--fs/nfs/inode.c127
-rw-r--r--fs/nfs/internal.h139
-rw-r--r--fs/nfs/namespace.c103
-rw-r--r--fs/nfs/netns.h5
-rw-r--r--fs/nfs/nfs2xdr.c5
-rw-r--r--fs/nfs/nfs3proc.c27
-rw-r--r--fs/nfs/nfs3xdr.c112
-rw-r--r--fs/nfs/nfs4_fs.h23
-rw-r--r--fs/nfs/nfs4filelayout.c688
-rw-r--r--fs/nfs/nfs4filelayout.h63
-rw-r--r--fs/nfs/nfs4filelayoutdev.c102
-rw-r--r--fs/nfs/nfs4namespace.c55
-rw-r--r--fs/nfs/nfs4proc.c537
-rw-r--r--fs/nfs/nfs4renewd.c2
-rw-r--r--fs/nfs/nfs4state.c225
-rw-r--r--fs/nfs/nfs4xdr.c399
-rw-r--r--fs/nfs/objlayout/objio_osd.c18
-rw-r--r--fs/nfs/objlayout/objlayout.c19
-rw-r--r--fs/nfs/pagelist.c61
-rw-r--r--fs/nfs/pnfs.c352
-rw-r--r--fs/nfs/pnfs.h127
-rw-r--r--fs/nfs/proc.c21
-rw-r--r--fs/nfs/read.c437
-rw-r--r--fs/nfs/super.c760
-rw-r--r--fs/nfs/write.c809
-rw-r--r--fs/nfsd/export.c175
-rw-r--r--fs/nfsd/idmap.h8
-rw-r--r--fs/nfsd/netns.h6
-rw-r--r--fs/nfsd/nfs4idmap.c109
-rw-r--r--fs/nfsd/nfs4state.c13
-rw-r--r--fs/nfsd/nfsctl.c55
-rw-r--r--fs/nfsd/nfsfh.c2
-rw-r--r--fs/nfsd/nfssvc.c8
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/nilfs2/file.c24
-rw-r--r--fs/nilfs2/inode.c4
-rw-r--r--fs/nilfs2/ioctl.c8
-rw-r--r--fs/nls/Kconfig157
-rw-r--r--fs/nls/Makefile23
-rw-r--r--fs/nls/nls_macceltic.c602
-rw-r--r--fs/nls/nls_maccenteuro.c532
-rw-r--r--fs/nls/nls_maccroatian.c602
-rw-r--r--fs/nls/nls_maccyrillic.c497
-rw-r--r--fs/nls/nls_macgaelic.c567
-rw-r--r--fs/nls/nls_macgreek.c497
-rw-r--r--fs/nls/nls_maciceland.c602
-rw-r--r--fs/nls/nls_macinuit.c532
-rw-r--r--fs/nls/nls_macroman.c637
-rw-r--r--fs/nls/nls_macromanian.c602
-rw-r--r--fs/nls/nls_macturkish.c602
-rw-r--r--fs/ntfs/inode.c2
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c2
-rw-r--r--fs/ocfs2/inode.c2
-rw-r--r--fs/omfs/inode.c2
-rw-r--r--fs/pipe.c2
-rw-r--r--fs/proc/array.c147
-rw-r--r--fs/proc/base.c86
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/internal.h3
-rw-r--r--fs/proc/task_mmu.c84
-rw-r--r--fs/proc/task_nommu.c2
-rw-r--r--fs/pstore/inode.c2
-rw-r--r--fs/quota/dquot.c32
-rw-r--r--fs/read_write.c7
-rw-r--r--fs/reiserfs/inode.c4
-rw-r--r--fs/reiserfs/super.c6
-rw-r--r--fs/splice.c4
-rw-r--r--fs/sysfs/inode.c2
-rw-r--r--fs/sysv/inode.c2
-rw-r--r--fs/ubifs/super.c2
-rw-r--r--fs/udf/inode.c2
-rw-r--r--fs/ufs/inode.c2
-rw-r--r--fs/xfs/Makefile2
-rw-r--r--fs/xfs/xfs_ag.h18
-rw-r--r--fs/xfs/xfs_alloc.c585
-rw-r--r--fs/xfs/xfs_alloc.h28
-rw-r--r--fs/xfs/xfs_alloc_btree.c9
-rw-r--r--fs/xfs/xfs_aops.c218
-rw-r--r--fs/xfs/xfs_attr.c25
-rw-r--r--fs/xfs/xfs_attr_leaf.c3
-rw-r--r--fs/xfs/xfs_bmap.c32
-rw-r--r--fs/xfs/xfs_bmap.h3
-rw-r--r--fs/xfs/xfs_bmap_btree.c1
-rw-r--r--fs/xfs/xfs_btree.c1
-rw-r--r--fs/xfs/xfs_buf.c593
-rw-r--r--fs/xfs/xfs_buf.h96
-rw-r--r--fs/xfs/xfs_buf_item.c123
-rw-r--r--fs/xfs/xfs_da_btree.c17
-rw-r--r--fs/xfs/xfs_dfrag.c2
-rw-r--r--fs/xfs/xfs_dir2.c1
-rw-r--r--fs/xfs/xfs_dir2_block.c1
-rw-r--r--fs/xfs/xfs_dir2_data.c1
-rw-r--r--fs/xfs/xfs_dir2_leaf.c1
-rw-r--r--fs/xfs/xfs_dir2_node.c1
-rw-r--r--fs/xfs/xfs_dir2_sf.c1
-rw-r--r--fs/xfs/xfs_discard.c6
-rw-r--r--fs/xfs/xfs_dquot.c91
-rw-r--r--fs/xfs/xfs_dquot.h3
-rw-r--r--fs/xfs/xfs_dquot_item.c162
-rw-r--r--fs/xfs/xfs_error.c1
-rw-r--r--fs/xfs/xfs_export.c1
-rw-r--r--fs/xfs/xfs_extent_busy.c603
-rw-r--r--fs/xfs/xfs_extent_busy.h69
-rw-r--r--fs/xfs/xfs_extfree_item.c59
-rw-r--r--fs/xfs/xfs_file.c327
-rw-r--r--fs/xfs/xfs_fsops.c82
-rw-r--r--fs/xfs/xfs_ialloc.c10
-rw-r--r--fs/xfs/xfs_ialloc.h9
-rw-r--r--fs/xfs/xfs_ialloc_btree.c1
-rw-r--r--fs/xfs/xfs_iget.c24
-rw-r--r--fs/xfs/xfs_inode.c132
-rw-r--r--fs/xfs/xfs_inode.h5
-rw-r--r--fs/xfs/xfs_inode_item.c176
-rw-r--r--fs/xfs/xfs_inode_item.h2
-rw-r--r--fs/xfs/xfs_inum.h5
-rw-r--r--fs/xfs/xfs_ioctl.c2
-rw-r--r--fs/xfs/xfs_ioctl32.c2
-rw-r--r--fs/xfs/xfs_iomap.c59
-rw-r--r--fs/xfs/xfs_iops.c15
-rw-r--r--fs/xfs/xfs_itable.c1
-rw-r--r--fs/xfs/xfs_log.c49
-rw-r--r--fs/xfs/xfs_log.h1
-rw-r--r--fs/xfs/xfs_log_cil.c253
-rw-r--r--fs/xfs/xfs_log_priv.h2
-rw-r--r--fs/xfs/xfs_log_recover.c103
-rw-r--r--fs/xfs/xfs_message.c1
-rw-r--r--fs/xfs/xfs_mount.c77
-rw-r--r--fs/xfs/xfs_mount.h2
-rw-r--r--fs/xfs/xfs_qm.c196
-rw-r--r--fs/xfs/xfs_qm_bhv.c2
-rw-r--r--fs/xfs/xfs_qm_syscalls.c1
-rw-r--r--fs/xfs/xfs_quotaops.c1
-rw-r--r--fs/xfs/xfs_rename.c1
-rw-r--r--fs/xfs/xfs_rtalloc.c10
-rw-r--r--fs/xfs/xfs_rw.c156
-rw-r--r--fs/xfs/xfs_rw.h47
-rw-r--r--fs/xfs/xfs_super.c51
-rw-r--r--fs/xfs/xfs_sync.c281
-rw-r--r--fs/xfs/xfs_trace.c2
-rw-r--r--fs/xfs/xfs_trace.h53
-rw-r--r--fs/xfs/xfs_trans.c7
-rw-r--r--fs/xfs/xfs_trans.h18
-rw-r--r--fs/xfs/xfs_trans_ail.c207
-rw-r--r--fs/xfs/xfs_trans_buf.c126
-rw-r--r--fs/xfs/xfs_trans_dquot.c2
-rw-r--r--fs/xfs/xfs_trans_extfree.c1
-rw-r--r--fs/xfs/xfs_trans_inode.c2
-rw-r--r--fs/xfs/xfs_trans_priv.h12
-rw-r--r--fs/xfs/xfs_types.h5
-rw-r--r--fs/xfs/xfs_utils.c2
-rw-r--r--fs/xfs/xfs_vnodeops.c31
-rw-r--r--include/asm-generic/Kbuild1
-rw-r--r--include/asm-generic/bitsperlong.h4
-rw-r--r--include/asm-generic/dma-coherent.h4
-rw-r--r--include/asm-generic/dma-contiguous.h28
-rw-r--r--include/asm-generic/gpio.h6
-rw-r--r--include/asm-generic/kvm_para.h22
-rw-r--r--include/asm-generic/pgtable.h27
-rw-r--r--include/asm-generic/word-at-a-time.h52
-rw-r--r--include/drm/drm.h6
-rw-r--r--include/drm/drmP.h19
-rw-r--r--include/drm/drm_crtc.h86
-rw-r--r--include/drm/drm_crtc_helper.h23
-rw-r--r--include/drm/drm_dp_helper.h8
-rw-r--r--include/drm/drm_edid.h26
-rw-r--r--include/drm/drm_fixed.h1
-rw-r--r--include/drm/drm_mem_util.h4
-rw-r--r--include/drm/drm_mode.h16
-rw-r--r--include/drm/exynos_drm.h99
-rw-r--r--include/drm/i915_drm.h3
-rw-r--r--include/drm/radeon_drm.h1
-rw-r--r--include/drm/ttm/ttm_bo_api.h9
-rw-r--r--include/drm/ttm/ttm_bo_driver.h2
-rw-r--r--include/linux/Kbuild4
-rw-r--r--include/linux/amba/pl08x.h3
-rw-r--r--include/linux/apple_bl.h2
-rw-r--r--include/linux/basic_mmio_gpio.h6
-rw-r--r--include/linux/bcma/bcma.h7
-rw-r--r--include/linux/bcma/bcma_driver_pci.h11
-rw-r--r--include/linux/bio.h8
-rw-r--r--include/linux/blk_types.h10
-rw-r--r--include/linux/blkdev.h20
-rw-r--r--include/linux/bootmem.h6
-rw-r--r--include/linux/bug.h7
-rw-r--r--include/linux/ceph/auth.h12
-rw-r--r--include/linux/ceph/ceph_fs.h4
-rw-r--r--include/linux/ceph/decode.h9
-rw-r--r--include/linux/ceph/messenger.h6
-rw-r--r--include/linux/ceph/osd_client.h11
-rw-r--r--include/linux/ceph/osdmap.h2
-rw-r--r--include/linux/clk-private.h99
-rw-r--r--include/linux/clk-provider.h120
-rw-r--r--include/linux/clk.h6
-rw-r--r--include/linux/compaction.h19
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/cpu.h1
-rw-r--r--include/linux/cred.h10
-rw-r--r--include/linux/crush/crush.h18
-rw-r--r--include/linux/crush/mapper.h7
-rw-r--r--include/linux/debugfs.h11
-rw-r--r--include/linux/device.h4
-rw-r--r--include/linux/dma-buf.h33
-rw-r--r--include/linux/dma-contiguous.h110
-rw-r--r--include/linux/dmaengine.h18
-rw-r--r--include/linux/drbd.h6
-rw-r--r--include/linux/drbd_limits.h7
-rw-r--r--include/linux/drbd_nl.h5
-rw-r--r--include/linux/dvb/frontend.h51
-rw-r--r--include/linux/dvb/version.h2
-rw-r--r--include/linux/edac.h182
-rw-r--r--include/linux/elevator.h8
-rw-r--r--include/linux/eventfd.h2
-rw-r--r--include/linux/fb.h4
-rw-r--r--include/linux/firewire.h2
-rw-r--r--include/linux/fixp-arith.h (renamed from drivers/input/fixp-arith.h)0
-rw-r--r--include/linux/fs.h35
-rw-r--r--include/linux/fsl/mxs-dma.h12
-rw-r--r--include/linux/gameport.h13
-rw-r--r--include/linux/genetlink.h3
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/gpio.h59
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/hugetlb.h8
-rw-r--r--include/linux/i2c-mux-gpio.h (renamed from include/linux/gpio-i2cmux.h)14
-rw-r--r--include/linux/i2c-mux.h3
-rw-r--r--include/linux/i2c.h6
-rw-r--r--include/linux/i2c/adp5588.h1
-rw-r--r--include/linux/if_arp.h2
-rw-r--r--include/linux/input/lm8333.h24
-rw-r--r--include/linux/input/matrix_keypad.h54
-rw-r--r--include/linux/input/navpoint.h12
-rw-r--r--include/linux/iocontext.h39
-rw-r--r--include/linux/iommu.h10
-rw-r--r--include/linux/ioprio.h22
-rw-r--r--include/linux/ipc_namespace.h42
-rw-r--r--include/linux/ipx.h2
-rw-r--r--include/linux/irqdomain.h3
-rw-r--r--include/linux/jbd.h18
-rw-r--r--include/linux/kallsyms.h7
-rw-r--r--include/linux/kcmp.h17
-rw-r--r--include/linux/kernel-page-flags.h4
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/kexec.h75
-rw-r--r--include/linux/kmod.h34
-rw-r--r--include/linux/kvm.h42
-rw-r--r--include/linux/kvm_host.h55
-rw-r--r--include/linux/lcd.h10
-rw-r--r--include/linux/led-lm3530.h2
-rw-r--r--include/linux/leds.h2
-rw-r--r--include/linux/lockd/bind.h4
-rw-r--r--include/linux/memcontrol.h69
-rw-r--r--include/linux/mempolicy.h9
-rw-r--r--include/linux/mfd/abx500/ab8500.h18
-rw-r--r--include/linux/mfd/anatop.h4
-rw-r--r--include/linux/mfd/asic3.h2
-rw-r--r--include/linux/mfd/da9052/da9052.h19
-rw-r--r--include/linux/mfd/lm3533.h104
-rw-r--r--include/linux/mfd/lpc_ich.h48
-rw-r--r--include/linux/mfd/max77693-private.h227
-rw-r--r--include/linux/mfd/max77693.h36
-rw-r--r--include/linux/mfd/rc5t583.h2
-rw-r--r--include/linux/mfd/sta2x11-mfd.h324
-rw-r--r--include/linux/mfd/stmpe.h2
-rw-r--r--include/linux/mfd/tps65910.h49
-rw-r--r--include/linux/mfd/twl6040.h2
-rw-r--r--include/linux/mfd/wm831x/core.h12
-rw-r--r--include/linux/mfd/wm8350/core.h9
-rw-r--r--include/linux/mfd/wm8400-private.h14
-rw-r--r--include/linux/mfd/wm8994/core.h1
-rw-r--r--include/linux/mfd/wm8994/registers.h3
-rw-r--r--include/linux/micrel_phy.h2
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_inline.h24
-rw-r--r--include/linux/mm_types.h13
-rw-r--r--include/linux/mmc/card.h4
-rw-r--r--include/linux/mmc/dw_mmc.h1
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmc/mmc.h60
-rw-r--r--include/linux/mmc/mxs-mmc.h (renamed from arch/arm/mach-mxs/include/mach/mmc.h)7
-rw-r--r--include/linux/mmdebug.h2
-rw-r--r--include/linux/mmzone.h100
-rw-r--r--include/linux/msdos_fs.h3
-rw-r--r--include/linux/mv643xx_eth.h1
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdevice.h8
-rw-r--r--include/linux/nfc/pn544.h7
-rw-r--r--include/linux/nfs4.h13
-rw-r--r--include/linux/nfs_fs.h31
-rw-r--r--include/linux/nfs_fs_sb.h17
-rw-r--r--include/linux/nfs_page.h20
-rw-r--r--include/linux/nfs_xdr.h210
-rw-r--r--include/linux/nfsd/export.h13
-rw-r--r--include/linux/nl80211.h8
-rw-r--r--include/linux/of_gpio.h1
-rw-r--r--include/linux/of_i2c.h4
-rw-r--r--include/linux/of_irq.h12
-rw-r--r--include/linux/of_pci.h2
-rw-r--r--include/linux/of_spi.h23
-rw-r--r--include/linux/oom.h5
-rw-r--r--include/linux/page-isolation.h18
-rw-r--r--include/linux/pagemap.h64
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/power/charger-manager.h50
-rw-r--r--include/linux/power/max17042_battery.h17
-rw-r--r--include/linux/power_supply.h4
-rw-r--r--include/linux/prctl.h6
-rw-r--r--include/linux/res_counter.h5
-rw-r--r--include/linux/rio.h47
-rw-r--r--include/linux/rio_drv.h9
-rw-r--r--include/linux/rmap.h2
-rw-r--r--include/linux/rtc.h3
-rw-r--r--include/linux/rtc/ds1307.h22
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/serio.h13
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/spi/orion_spi.h17
-rw-r--r--include/linux/spi/rspi.h31
-rw-r--r--include/linux/ssb/ssb.h1
-rw-r--r--include/linux/ssb/ssb_regs.h61
-rw-r--r--include/linux/stmp_device.h20
-rw-r--r--include/linux/sunrpc/svcauth.h3
-rw-r--r--include/linux/swap.h56
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/time.h1
-rw-r--r--include/linux/uprobes.h165
-rw-r--r--include/linux/v4l2-dv-timings.h816
-rw-r--r--include/linux/v4l2-subdev.h41
-rw-r--r--include/linux/vga_switcheroo.h19
-rw-r--r--include/linux/vgaarb.h7
-rw-r--r--include/linux/videodev2.h372
-rw-r--r--include/linux/watchdog.h28
-rw-r--r--include/linux/writeback.h10
-rw-r--r--include/media/media-entity.h5
-rw-r--r--include/media/mt9p031.h19
-rw-r--r--include/media/omap3isp.h29
-rw-r--r--include/media/rc-map.h3
-rw-r--r--include/media/s5p_fimc.h16
-rw-r--r--include/media/saa7146.h4
-rw-r--r--include/media/saa7146_vv.h25
-rw-r--r--include/media/sh_mobile_ceu.h1
-rw-r--r--include/media/smiapp.h84
-rw-r--r--include/media/soc_camera.h6
-rw-r--r--include/media/soc_mediabus.h21
-rw-r--r--include/media/v4l2-ctrls.h40
-rw-r--r--include/media/v4l2-dev.h25
-rw-r--r--include/media/v4l2-event.h24
-rw-r--r--include/media/v4l2-ioctl.h6
-rw-r--r--include/media/v4l2-subdev.h55
-rw-r--r--include/media/videobuf-dma-contig.h10
-rw-r--r--include/net/bluetooth/bluetooth.h32
-rw-r--r--include/net/bluetooth/hci.h8
-rw-r--r--include/net/bluetooth/hci_core.h67
-rw-r--r--include/net/bluetooth/l2cap.h93
-rw-r--r--include/net/bluetooth/mgmt.h9
-rw-r--r--include/net/bluetooth/smp.h2
-rw-r--r--include/net/cfg80211.h6
-rw-r--r--include/net/dst.h1
-rw-r--r--include/net/mac80211.h12
-rw-r--r--include/net/nfc/hci.h6
-rw-r--r--include/net/nfc/nfc.h19
-rw-r--r--include/net/nfc/shdlc.h2
-rw-r--r--include/net/sock.h22
-rw-r--r--include/scsi/fcoe_sysfs.h124
-rw-r--r--include/scsi/libfcoe.h27
-rw-r--r--include/trace/events/jbd.h39
-rw-r--r--include/trace/events/vmscan.h122
-rw-r--r--include/trace/events/writeback.h36
-rw-r--r--include/xen/acpi.h58
-rw-r--r--include/xen/events.h3
-rw-r--r--include/xen/grant_table.h2
-rw-r--r--include/xen/xenbus_dev.h3
-rw-r--r--init/Kconfig14
-rw-r--r--init/do_mounts.c14
-rw-r--r--init/do_mounts_initrd.c10
-rw-r--r--init/do_mounts_md.c12
-rw-r--r--init/do_mounts_rd.c13
-rw-r--r--init/initramfs.c16
-rw-r--r--ipc/mq_sysctl.c49
-rw-r--r--ipc/mqueue.c294
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/cgroup.c20
-rw-r--r--kernel/cpu.c44
-rw-r--r--kernel/cpu_pm.c16
-rw-r--r--kernel/events/Makefile3
-rw-r--r--kernel/events/uprobes.c1667
-rw-r--r--kernel/exit.c8
-rw-r--r--kernel/fork.c36
-rw-r--r--kernel/irq/irqdomain.c106
-rw-r--r--kernel/irq/manage.c14
-rw-r--r--kernel/kallsyms.c32
-rw-r--r--kernel/kcmp.c196
-rw-r--r--kernel/kfifo.c1
-rw-r--r--kernel/kmod.c30
-rw-r--r--kernel/pid.c3
-rw-r--r--kernel/pid_namespace.c13
-rw-r--r--kernel/res_counter.c10
-rw-r--r--kernel/resource.c4
-rw-r--r--kernel/signal.c15
-rw-r--r--kernel/sys.c213
-rw-r--r--kernel/sys_ni.c3
-rw-r--r--kernel/time/Kconfig58
-rw-r--r--kernel/time/ntp.c8
-rw-r--r--kernel/time/timekeeping.c4
-rw-r--r--kernel/trace/Kconfig20
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/ring_buffer.c5
-rw-r--r--kernel/trace/trace.h5
-rw-r--r--kernel/trace/trace_kprobe.c899
-rw-r--r--kernel/trace/trace_probe.c839
-rw-r--r--kernel/trace/trace_probe.h161
-rw-r--r--kernel/trace/trace_uprobe.c788
-rw-r--r--kernel/watchdog.c12
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Makefile5
-rw-r--r--lib/bitmap.c12
-rw-r--r--lib/dma-debug.c10
-rw-r--r--lib/list_debug.c3
-rw-r--r--lib/radix-tree.c15
-rw-r--r--lib/spinlock_debug.c2
-rw-r--r--lib/stmp_device.c80
-rw-r--r--lib/string_helpers.c8
-rw-r--r--lib/strncpy_from_user.c113
-rw-r--r--lib/strnlen_user.c138
-rw-r--r--lib/swiotlb.c8
-rw-r--r--lib/test-kstrtox.c4
-rw-r--r--lib/vsprintf.c303
-rw-r--r--mm/Kconfig12
-rw-r--r--mm/Makefile12
-rw-r--r--mm/bootmem.c134
-rw-r--r--mm/compaction.c547
-rw-r--r--mm/filemap.c39
-rw-r--r--mm/huge_memory.c29
-rw-r--r--mm/hugetlb.c34
-rw-r--r--mm/internal.h45
-rw-r--r--mm/madvise.c15
-rw-r--r--mm/memblock.c42
-rw-r--r--mm/memcontrol.c642
-rw-r--r--mm/memory-failure.c10
-rw-r--r--mm/memory.c23
-rw-r--r--mm/memory_hotplug.c20
-rw-r--r--mm/mempolicy.c36
-rw-r--r--mm/mmap.c86
-rw-r--r--mm/mmzone.c14
-rw-r--r--mm/nobootmem.c112
-rw-r--r--mm/oom_kill.c44
-rw-r--r--mm/page-writeback.c3
-rw-r--r--mm/page_alloc.c492
-rw-r--r--mm/page_isolation.c15
-rw-r--r--mm/pgtable-generic.c4
-rw-r--r--mm/process_vm_access.c16
-rw-r--r--mm/readahead.c40
-rw-r--r--mm/rmap.c6
-rw-r--r--mm/shmem.c515
-rw-r--r--mm/sparse.c25
-rw-r--r--mm/swap.c129
-rw-r--r--mm/swapfile.c33
-rw-r--r--mm/thrash.c155
-rw-r--r--mm/truncate.c25
-rw-r--r--mm/vmalloc.c7
-rw-r--r--mm/vmscan.c738
-rw-r--r--mm/vmstat.c13
-rw-r--r--net/bluetooth/af_bluetooth.c8
-rw-r--r--net/bluetooth/bnep/core.c2
-rw-r--r--net/bluetooth/hci_conn.c56
-rw-r--r--net/bluetooth/hci_core.c267
-rw-r--r--net/bluetooth/hci_event.c75
-rw-r--r--net/bluetooth/hci_sysfs.c5
-rw-r--r--net/bluetooth/l2cap_core.c762
-rw-r--r--net/bluetooth/l2cap_sock.c76
-rw-r--r--net/bluetooth/mgmt.c286
-rw-r--r--net/bluetooth/rfcomm/sock.c14
-rw-r--r--net/bluetooth/sco.c75
-rw-r--r--net/bluetooth/smp.c2
-rw-r--r--net/ceph/auth_none.c15
-rw-r--r--net/ceph/auth_x.c15
-rw-r--r--net/ceph/crush/crush.c39
-rw-r--r--net/ceph/crush/mapper.c124
-rw-r--r--net/ceph/messenger.c182
-rw-r--r--net/ceph/osd_client.c63
-rw-r--r--net/ceph/osdmap.c73
-rw-r--r--net/core/drop_monitor.c1
-rw-r--r--net/ipv4/esp4.c24
-rw-r--r--net/ipv4/fib_semantics.c12
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/tcp_memcontrol.c34
-rw-r--r--net/ipv4/udp.c30
-rw-r--r--net/ipv6/esp6.c18
-rw-r--r--net/ipv6/ip6_output.c68
-rw-r--r--net/l2tp/l2tp_ip.c24
-rw-r--r--net/l2tp/l2tp_ip6.c18
-rw-r--r--net/l2tp/l2tp_netlink.c3
-rw-r--r--net/mac80211/agg-tx.c10
-rw-r--r--net/mac80211/debugfs_netdev.c2
-rw-r--r--net/mac80211/ibss.c5
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/main.c3
-rw-r--r--net/mac80211/mesh.c6
-rw-r--r--net/mac80211/mesh_hwmp.c5
-rw-r--r--net/mac80211/mesh_plink.c65
-rw-r--r--net/mac80211/mlme.c3
-rw-r--r--net/mac80211/rx.c6
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/mac80211/util.c12
-rw-r--r--net/mac80211/wep.c15
-rw-r--r--net/mac80211/wpa.c10
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/nfc/core.c112
-rw-r--r--net/nfc/hci/Kconfig1
-rw-r--r--net/nfc/hci/core.c78
-rw-r--r--net/nfc/hci/shdlc.c12
-rw-r--r--net/nfc/llcp/commands.c4
-rw-r--r--net/nfc/llcp/llcp.c7
-rw-r--r--net/nfc/llcp/sock.c57
-rw-r--r--net/nfc/nci/core.c27
-rw-r--r--net/nfc/nci/data.c8
-rw-r--r--net/nfc/nci/lib.c1
-rw-r--r--net/nfc/nci/ntf.c2
-rw-r--r--net/nfc/netlink.c6
-rw-r--r--net/nfc/nfc.h2
-rw-r--r--net/rds/ib.h3
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c100
-rw-r--r--net/sunrpc/clnt.c2
-rw-r--r--net/sunrpc/rpc_pipe.c10
-rw-r--r--net/sunrpc/rpcb_clnt.c2
-rw-r--r--net/sunrpc/svcauth_unix.c13
-rw-r--r--net/sunrpc/xprt.c7
-rw-r--r--net/wanrouter/Kconfig2
-rw-r--r--net/wireless/chan.c2
-rw-r--r--net/wireless/core.c4
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c69
-rw-r--r--net/wireless/util.c2
-rw-r--r--net/xfrm/xfrm_policy.c3
-rwxr-xr-xscripts/checkpatch.pl20
-rw-r--r--scripts/coccinelle/misc/ifaddr.cocci35
-rw-r--r--scripts/coccinelle/misc/noderef.cocci65
-rwxr-xr-xscripts/config11
-rw-r--r--scripts/kconfig/conf.c22
-rw-r--r--scripts/link-vmlinux.sh221
-rw-r--r--scripts/package/builddeb2
-rw-r--r--security/keys/compat.c2
-rw-r--r--security/keys/keyctl.c4
-rw-r--r--security/keys/request_key.c13
-rw-r--r--sound/core/pcm_lib.c23
-rw-r--r--sound/firewire/cmp.c2
-rw-r--r--sound/firewire/lib.c28
-rw-r--r--sound/firewire/lib.h1
-rw-r--r--sound/i2c/other/tea575x-tuner.c3
-rw-r--r--sound/pci/hda/hda_codec.c66
-rw-r--r--sound/pci/hda/hda_codec.h3
-rw-r--r--sound/pci/hda/hda_intel.c314
-rw-r--r--sound/pci/hda/patch_realtek.c38
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c13
-rw-r--r--sound/soc/kirkwood/kirkwood.h1
-rw-r--r--sound/soc/omap/Kconfig7
-rw-r--r--sound/soc/omap/Makefile4
-rw-r--r--sound/soc/omap/mcbsp.c115
-rw-r--r--sound/soc/omap/mcbsp.h8
-rw-r--r--sound/soc/omap/omap-abe-twl6040.c68
-rw-r--r--sound/soc/omap/omap-dmic.c8
-rw-r--r--sound/soc/omap/omap-hdmi-card.c87
-rw-r--r--sound/soc/omap/omap-hdmi.c238
-rw-r--r--sound/soc/omap/omap-hdmi.h4
-rw-r--r--sound/soc/omap/omap-mcbsp.c45
-rw-r--r--sound/soc/omap/omap-mcpdm.c8
-rw-r--r--sound/soc/omap/omap4-hdmi-card.c121
-rw-r--r--tools/lib/traceevent/event-parse.c22
-rw-r--r--tools/lib/traceevent/parse-filter.c7
-rw-r--r--tools/perf/Documentation/perf-probe.txt19
-rw-r--r--tools/perf/Documentation/perfconfig.example8
-rw-r--r--tools/perf/Makefile4
-rw-r--r--tools/perf/builtin-annotate.c2
-rw-r--r--tools/perf/builtin-evlist.c2
-rw-r--r--tools/perf/builtin-probe.c86
-rw-r--r--tools/perf/builtin-record.c14
-rw-r--r--tools/perf/builtin-report.c14
-rw-r--r--tools/perf/builtin-top.c24
-rw-r--r--tools/perf/perf.h2
-rw-r--r--tools/perf/ui/browser.c182
-rw-r--r--tools/perf/ui/browser.h1
-rw-r--r--tools/perf/ui/browsers/annotate.c262
-rw-r--r--tools/perf/ui/browsers/hists.c338
-rw-r--r--tools/perf/ui/setup.c1
-rw-r--r--tools/perf/util/config.c2
-rw-r--r--tools/perf/util/evsel.c90
-rw-r--r--tools/perf/util/evsel.h3
-rw-r--r--tools/perf/util/parse-events.c27
-rw-r--r--tools/perf/util/probe-event.c422
-rw-r--r--tools/perf/util/probe-event.h12
-rw-r--r--tools/perf/util/symbol.c8
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/thread_map.c21
-rw-r--r--tools/testing/selftests/Makefile2
-rw-r--r--tools/testing/selftests/kcmp/Makefile29
-rw-r--r--tools/testing/selftests/kcmp/kcmp_test.c94
-rw-r--r--tools/testing/selftests/mqueue/.gitignore2
-rw-r--r--tools/testing/selftests/mqueue/Makefile10
-rw-r--r--tools/testing/selftests/mqueue/mq_open_tests.c492
-rw-r--r--tools/testing/selftests/mqueue/mq_perf_tests.c741
-rw-r--r--tools/vm/page-types.c50
-rw-r--r--usr/Kconfig10
-rw-r--r--virt/kvm/Kconfig3
-rw-r--r--virt/kvm/ioapic.c10
-rw-r--r--virt/kvm/ioapic.h1
-rw-r--r--virt/kvm/irq_comm.c14
-rw-r--r--virt/kvm/kvm_main.c132
3343 files changed, 197284 insertions, 93679 deletions
diff --git a/.mailmap b/.mailmap
index 9b0d0267a3c3..2909c33bc54e 100644
--- a/.mailmap
+++ b/.mailmap
@@ -113,3 +113,5 @@ Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
Yusuke Goda <goda.yusuke@renesas.com>
+Gustavo Padovan <gustavo@las.ic.unicamp.br>
+Gustavo Padovan <padovan@profusion.mobi>
diff --git a/Documentation/ABI/testing/sysfs-block-rssd b/Documentation/ABI/testing/sysfs-block-rssd
index d535757799fe..679ce3543122 100644
--- a/Documentation/ABI/testing/sysfs-block-rssd
+++ b/Documentation/ABI/testing/sysfs-block-rssd
@@ -6,13 +6,21 @@ Description: This is a read-only file. Dumps below driver information and
hardware registers.
- S ACTive
- Command Issue
- - Allocated
- Completed
- PORT IRQ STAT
- HOST IRQ STAT
+ - Allocated
+ - Commands in Q
What: /sys/block/rssd*/status
Date: April 2012
KernelVersion: 3.4
Contact: Asai Thambi S P <asamymuthupa@micron.com>
-Description: This is a read-only file. Indicates the status of the device.
+Description: This is a read-only file. Indicates the status of the device.
+
+What: /sys/block/rssd*/flags
+Date: May 2012
+KernelVersion: 3.5
+Contact: Asai Thambi S P <asamymuthupa@micron.com>
+Description: This is a read-only file. Dumps the flags in port and driver
+ data structure
diff --git a/Documentation/ABI/testing/sysfs-bus-fcoe b/Documentation/ABI/testing/sysfs-bus-fcoe
new file mode 100644
index 000000000000..469d09c02f6b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-fcoe
@@ -0,0 +1,77 @@
+What: /sys/bus/fcoe/ctlr_X
+Date: March 2012
+KernelVersion: TBD
+Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
+Description: 'FCoE Controller' instances on the fcoe bus
+Attributes:
+
+ fcf_dev_loss_tmo: Device loss timeout peroid (see below). Changing
+ this value will change the dev_loss_tmo for all
+ FCFs discovered by this controller.
+
+ lesb_link_fail: Link Error Status Block (LESB) link failure count.
+
+ lesb_vlink_fail: Link Error Status Block (LESB) virtual link
+ failure count.
+
+ lesb_miss_fka: Link Error Status Block (LESB) missed FCoE
+ Initialization Protocol (FIP) Keep-Alives (FKA).
+
+ lesb_symb_err: Link Error Status Block (LESB) symbolic error count.
+
+ lesb_err_block: Link Error Status Block (LESB) block error count.
+
+ lesb_fcs_error: Link Error Status Block (LESB) Fibre Channel
+ Serivces error count.
+
+Notes: ctlr_X (global increment starting at 0)
+
+What: /sys/bus/fcoe/fcf_X
+Date: March 2012
+KernelVersion: TBD
+Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
+Description: 'FCoE FCF' instances on the fcoe bus. A FCF is a Fibre Channel
+ Forwarder, which is a FCoE switch that can accept FCoE
+ (Ethernet) packets, unpack them, and forward the embedded
+ Fibre Channel frames into a FC fabric. It can also take
+ outbound FC frames and pack them in Ethernet packets to
+ be sent to their destination on the Ethernet segment.
+Attributes:
+
+ fabric_name: Identifies the fabric that the FCF services.
+
+ switch_name: Identifies the FCF.
+
+ priority: The switch's priority amongst other FCFs on the same
+ fabric.
+
+ selected: 1 indicates that the switch has been selected for use;
+ 0 indicates that the swich will not be used.
+
+ fc_map: The Fibre Channel MAP
+
+ vfid: The Virtual Fabric ID
+
+ mac: The FCF's MAC address
+
+ fka_peroid: The FIP Keep-Alive peroid
+
+ fabric_state: The internal kernel state
+ "Unknown" - Initialization value
+ "Disconnected" - No link to the FCF/fabric
+ "Connected" - Host is connected to the FCF
+ "Deleted" - FCF is being removed from the system
+
+ dev_loss_tmo: The device loss timeout peroid for this FCF.
+
+Notes: A device loss infrastructre similar to the FC Transport's
+ is present in fcoe_sysfs. It is nice to have so that a
+ link flapping adapter doesn't continually advance the count
+ used to identify the discovered FCF. FCFs will exist in a
+ "Disconnected" state until either the timer expires and the
+ FCF becomes "Deleted" or the FCF is rediscovered and becomes
+ "Connected."
+
+
+Users: The first user of this interface will be the fcoeadm application,
+ which is commonly packaged in the fcoe-utils package.
diff --git a/Documentation/ABI/testing/sysfs-bus-i2c-devices-lm3533 b/Documentation/ABI/testing/sysfs-bus-i2c-devices-lm3533
new file mode 100644
index 000000000000..1b62230b33b9
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-i2c-devices-lm3533
@@ -0,0 +1,15 @@
+What: /sys/bus/i2c/devices/.../output_hvled[n]
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set the controlling backlight device for high-voltage current
+ sink HVLED[n] (n = 1, 2) (0, 1).
+
+What: /sys/bus/i2c/devices/.../output_lvled[n]
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set the controlling led device for low-voltage current sink
+ LVLED[n] (n = 1..5) (0..3).
diff --git a/Documentation/ABI/testing/sysfs-bus-rbd b/Documentation/ABI/testing/sysfs-bus-rbd
index dbedafb095e2..bcd88eb7ebcd 100644
--- a/Documentation/ABI/testing/sysfs-bus-rbd
+++ b/Documentation/ABI/testing/sysfs-bus-rbd
@@ -65,11 +65,11 @@ snap_*
Entries under /sys/bus/rbd/devices/<dev-id>/snap_<snap-name>
-------------------------------------------------------------
-id
+snap_id
The rados internal snapshot id assigned for this snapshot
-size
+snap_size
The size of the image when this snapshot was taken.
diff --git a/Documentation/ABI/testing/sysfs-class-backlight-driver-lm3533 b/Documentation/ABI/testing/sysfs-class-backlight-driver-lm3533
new file mode 100644
index 000000000000..77cf7ac949af
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-backlight-driver-lm3533
@@ -0,0 +1,48 @@
+What: /sys/class/backlight/<backlight>/als_channel
+Date: May 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Get the ALS output channel used as input in
+ ALS-current-control mode (0, 1), where
+
+ 0 - out_current0 (backlight 0)
+ 1 - out_current1 (backlight 1)
+
+What: /sys/class/backlight/<backlight>/als_en
+Date: May 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Enable ALS-current-control mode (0, 1).
+
+What: /sys/class/backlight/<backlight>/id
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Get the id of this backlight (0, 1).
+
+What: /sys/class/backlight/<backlight>/linear
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set the brightness-mapping mode (0, 1), where
+
+ 0 - exponential mode
+ 1 - linear mode
+
+What: /sys/class/backlight/<backlight>/pwm
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set the PWM-input control mask (5 bits), where
+
+ bit 5 - PWM-input enabled in Zone 4
+ bit 4 - PWM-input enabled in Zone 3
+ bit 3 - PWM-input enabled in Zone 2
+ bit 2 - PWM-input enabled in Zone 1
+ bit 1 - PWM-input enabled in Zone 0
+ bit 0 - PWM-input enabled
diff --git a/Documentation/ABI/testing/sysfs-class-led-driver-lm3533 b/Documentation/ABI/testing/sysfs-class-led-driver-lm3533
new file mode 100644
index 000000000000..620ebb3b9baa
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-led-driver-lm3533
@@ -0,0 +1,65 @@
+What: /sys/class/leds/<led>/als_channel
+Date: May 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set the ALS output channel to use as input in
+ ALS-current-control mode (1, 2), where
+
+ 1 - out_current1
+ 2 - out_current2
+
+What: /sys/class/leds/<led>/als_en
+Date: May 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Enable ALS-current-control mode (0, 1).
+
+What: /sys/class/leds/<led>/falltime
+What: /sys/class/leds/<led>/risetime
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set the pattern generator fall and rise times (0..7), where
+
+ 0 - 2048 us
+ 1 - 262 ms
+ 2 - 524 ms
+ 3 - 1.049 s
+ 4 - 2.097 s
+ 5 - 4.194 s
+ 6 - 8.389 s
+ 7 - 16.78 s
+
+What: /sys/class/leds/<led>/id
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Get the id of this led (0..3).
+
+What: /sys/class/leds/<led>/linear
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set the brightness-mapping mode (0, 1), where
+
+ 0 - exponential mode
+ 1 - linear mode
+
+What: /sys/class/leds/<led>/pwm
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set the PWM-input control mask (5 bits), where
+
+ bit 5 - PWM-input enabled in Zone 4
+ bit 4 - PWM-input enabled in Zone 3
+ bit 3 - PWM-input enabled in Zone 2
+ bit 2 - PWM-input enabled in Zone 1
+ bit 1 - PWM-input enabled in Zone 0
+ bit 0 - PWM-input enabled
diff --git a/Documentation/ABI/testing/sysfs-driver-wacom b/Documentation/ABI/testing/sysfs-driver-wacom
index 56c54558c8a4..8d55a83d6921 100644
--- a/Documentation/ABI/testing/sysfs-driver-wacom
+++ b/Documentation/ABI/testing/sysfs-driver-wacom
@@ -23,9 +23,10 @@ Contact: linux-input@vger.kernel.org
Description:
Attribute group for control of the status LEDs and the OLEDs.
This attribute group is only available for Intuos 4 M, L,
- and XL (with LEDs and OLEDs) and Cintiq 21UX2 and Cintiq 24HD
- (LEDs only). Therefore its presence implicitly signifies the
- presence of said LEDs and OLEDs on the tablet device.
+ and XL (with LEDs and OLEDs), Intuos 5 (LEDs only), and Cintiq
+ 21UX2 and Cintiq 24HD (LEDs only). Therefore its presence
+ implicitly signifies the presence of said LEDs and OLEDs on the
+ tablet device.
What: /sys/bus/usb/devices/<busnum>-<devnum>:<cfg>.<intf>/wacom_led/status0_luminance
Date: August 2011
@@ -48,10 +49,10 @@ What: /sys/bus/usb/devices/<busnum>-<devnum>:<cfg>.<intf>/wacom_led/status_led0
Date: August 2011
Contact: linux-input@vger.kernel.org
Description:
- Writing to this file sets which one of the four (for Intuos 4)
- or of the right four (for Cintiq 21UX2 and Cintiq 24HD) status
- LEDs is active (0..3). The other three LEDs on the same side are
- always inactive.
+ Writing to this file sets which one of the four (for Intuos 4
+ and Intuos 5) or of the right four (for Cintiq 21UX2 and Cintiq
+ 24HD) status LEDs is active (0..3). The other three LEDs on the
+ same side are always inactive.
What: /sys/bus/usb/devices/<busnum>-<devnum>:<cfg>.<intf>/wacom_led/status_led1_select
Date: September 2011
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index c58b236bbe04..cb9258b8fd35 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -671,8 +671,9 @@ ones already enabled by DEBUG.
Chapter 14: Allocating memory
The kernel provides the following general purpose memory allocators:
-kmalloc(), kzalloc(), kcalloc(), vmalloc(), and vzalloc(). Please refer to
-the API documentation for further information about them.
+kmalloc(), kzalloc(), kmalloc_array(), kcalloc(), vmalloc(), and
+vzalloc(). Please refer to the API documentation for further information
+about them.
The preferred form for passing a size of a struct is the following:
@@ -686,6 +687,17 @@ Casting the return value which is a void pointer is redundant. The conversion
from void pointer to any other pointer type is guaranteed by the C programming
language.
+The preferred form for allocating an array is the following:
+
+ p = kmalloc_array(n, sizeof(...), ...);
+
+The preferred form for allocating a zeroed array is the following:
+
+ p = kcalloc(n, sizeof(...), ...);
+
+Both forms check for overflow on the allocation size n * sizeof(...),
+and return NULL if that occurred.
+
Chapter 15: The inline disease
diff --git a/Documentation/DocBook/media/Makefile b/Documentation/DocBook/media/Makefile
index 6628b4b9cac4..362520992ced 100644
--- a/Documentation/DocBook/media/Makefile
+++ b/Documentation/DocBook/media/Makefile
@@ -70,6 +70,8 @@ IOCTLS = \
VIDIOC_SUBDEV_ENUM_MBUS_CODE \
VIDIOC_SUBDEV_ENUM_FRAME_SIZE \
VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL \
+ VIDIOC_SUBDEV_G_SELECTION \
+ VIDIOC_SUBDEV_S_SELECTION \
TYPES = \
$(shell perl -ne 'print "$$1 " if /^typedef\s+[^\s]+\s+([^\s]+)\;/' $(srctree)/include/linux/videodev2.h) \
@@ -193,7 +195,7 @@ DVB_DOCUMENTED = \
#
install_media_images = \
- $(Q)cp $(OBJIMGFILES) $(MEDIA_OBJ_DIR)/media_api
+ $(Q)cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
$(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
$(Q)base64 -d $< >$@
diff --git a/Documentation/DocBook/media/dvb/dvbproperty.xml b/Documentation/DocBook/media/dvb/dvbproperty.xml
index c7a4ca517859..e633c097a8d1 100644
--- a/Documentation/DocBook/media/dvb/dvbproperty.xml
+++ b/Documentation/DocBook/media/dvb/dvbproperty.xml
@@ -531,6 +531,139 @@ typedef enum fe_delivery_system {
here are referring to what can be found in the TMCC-structure -
independent of the mode.</para>
</section>
+ <section id="DTV-ATSCMH-FIC-VER">
+ <title><constant>DTV_ATSCMH_FIC_VER</constant></title>
+ <para>Version number of the FIC (Fast Information Channel) signaling data.</para>
+ <para>FIC is used for relaying information to allow rapid service acquisition by the receiver.</para>
+ <para>Possible values: 0, 1, 2, 3, ..., 30, 31</para>
+ </section>
+ <section id="DTV-ATSCMH-PARADE-ID">
+ <title><constant>DTV_ATSCMH_PARADE_ID</constant></title>
+ <para>Parade identification number</para>
+ <para>A parade is a collection of up to eight MH groups, conveying one or two ensembles.</para>
+ <para>Possible values: 0, 1, 2, 3, ..., 126, 127</para>
+ </section>
+ <section id="DTV-ATSCMH-NOG">
+ <title><constant>DTV_ATSCMH_NOG</constant></title>
+ <para>Number of MH groups per MH subframe for a designated parade.</para>
+ <para>Possible values: 1, 2, 3, 4, 5, 6, 7, 8</para>
+ </section>
+ <section id="DTV-ATSCMH-TNOG">
+ <title><constant>DTV_ATSCMH_TNOG</constant></title>
+ <para>Total number of MH groups including all MH groups belonging to all MH parades in one MH subframe.</para>
+ <para>Possible values: 0, 1, 2, 3, ..., 30, 31</para>
+ </section>
+ <section id="DTV-ATSCMH-SGN">
+ <title><constant>DTV_ATSCMH_SGN</constant></title>
+ <para>Start group number.</para>
+ <para>Possible values: 0, 1, 2, 3, ..., 14, 15</para>
+ </section>
+ <section id="DTV-ATSCMH-PRC">
+ <title><constant>DTV_ATSCMH_PRC</constant></title>
+ <para>Parade repetition cycle.</para>
+ <para>Possible values: 1, 2, 3, 4, 5, 6, 7, 8</para>
+ </section>
+ <section id="DTV-ATSCMH-RS-FRAME-MODE">
+ <title><constant>DTV_ATSCMH_RS_FRAME_MODE</constant></title>
+ <para>RS frame mode.</para>
+ <para>Possible values are:</para>
+<programlisting>
+typedef enum atscmh_rs_frame_mode {
+ ATSCMH_RSFRAME_PRI_ONLY = 0,
+ ATSCMH_RSFRAME_PRI_SEC = 1,
+} atscmh_rs_frame_mode_t;
+</programlisting>
+ </section>
+ <section id="DTV-ATSCMH-RS-FRAME-ENSEMBLE">
+ <title><constant>DTV_ATSCMH_RS_FRAME_ENSEMBLE</constant></title>
+ <para>RS frame ensemble.</para>
+ <para>Possible values are:</para>
+<programlisting>
+typedef enum atscmh_rs_frame_ensemble {
+ ATSCMH_RSFRAME_ENS_PRI = 0,
+ ATSCMH_RSFRAME_ENS_SEC = 1,
+} atscmh_rs_frame_ensemble_t;
+</programlisting>
+ </section>
+ <section id="DTV-ATSCMH-RS-CODE-MODE-PRI">
+ <title><constant>DTV_ATSCMH_RS_CODE_MODE_PRI</constant></title>
+ <para>RS code mode (primary).</para>
+ <para>Possible values are:</para>
+<programlisting>
+typedef enum atscmh_rs_code_mode {
+ ATSCMH_RSCODE_211_187 = 0,
+ ATSCMH_RSCODE_223_187 = 1,
+ ATSCMH_RSCODE_235_187 = 2,
+} atscmh_rs_code_mode_t;
+</programlisting>
+ </section>
+ <section id="DTV-ATSCMH-RS-CODE-MODE-SEC">
+ <title><constant>DTV_ATSCMH_RS_CODE_MODE_SEC</constant></title>
+ <para>RS code mode (secondary).</para>
+ <para>Possible values are:</para>
+<programlisting>
+typedef enum atscmh_rs_code_mode {
+ ATSCMH_RSCODE_211_187 = 0,
+ ATSCMH_RSCODE_223_187 = 1,
+ ATSCMH_RSCODE_235_187 = 2,
+} atscmh_rs_code_mode_t;
+</programlisting>
+ </section>
+ <section id="DTV-ATSCMH-SCCC-BLOCK-MODE">
+ <title><constant>DTV_ATSCMH_SCCC_BLOCK_MODE</constant></title>
+ <para>Series Concatenated Convolutional Code Block Mode.</para>
+ <para>Possible values are:</para>
+<programlisting>
+typedef enum atscmh_sccc_block_mode {
+ ATSCMH_SCCC_BLK_SEP = 0,
+ ATSCMH_SCCC_BLK_COMB = 1,
+} atscmh_sccc_block_mode_t;
+</programlisting>
+ </section>
+ <section id="DTV-ATSCMH-SCCC-CODE-MODE-A">
+ <title><constant>DTV_ATSCMH_SCCC_CODE_MODE_A</constant></title>
+ <para>Series Concatenated Convolutional Code Rate.</para>
+ <para>Possible values are:</para>
+<programlisting>
+typedef enum atscmh_sccc_code_mode {
+ ATSCMH_SCCC_CODE_HLF = 0,
+ ATSCMH_SCCC_CODE_QTR = 1,
+} atscmh_sccc_code_mode_t;
+</programlisting>
+ </section>
+ <section id="DTV-ATSCMH-SCCC-CODE-MODE-B">
+ <title><constant>DTV_ATSCMH_SCCC_CODE_MODE_B</constant></title>
+ <para>Series Concatenated Convolutional Code Rate.</para>
+ <para>Possible values are:</para>
+<programlisting>
+typedef enum atscmh_sccc_code_mode {
+ ATSCMH_SCCC_CODE_HLF = 0,
+ ATSCMH_SCCC_CODE_QTR = 1,
+} atscmh_sccc_code_mode_t;
+</programlisting>
+ </section>
+ <section id="DTV-ATSCMH-SCCC-CODE-MODE-C">
+ <title><constant>DTV_ATSCMH_SCCC_CODE_MODE_C</constant></title>
+ <para>Series Concatenated Convolutional Code Rate.</para>
+ <para>Possible values are:</para>
+<programlisting>
+typedef enum atscmh_sccc_code_mode {
+ ATSCMH_SCCC_CODE_HLF = 0,
+ ATSCMH_SCCC_CODE_QTR = 1,
+} atscmh_sccc_code_mode_t;
+</programlisting>
+ </section>
+ <section id="DTV-ATSCMH-SCCC-CODE-MODE-D">
+ <title><constant>DTV_ATSCMH_SCCC_CODE_MODE_D</constant></title>
+ <para>Series Concatenated Convolutional Code Rate.</para>
+ <para>Possible values are:</para>
+<programlisting>
+typedef enum atscmh_sccc_code_mode {
+ ATSCMH_SCCC_CODE_HLF = 0,
+ ATSCMH_SCCC_CODE_QTR = 1,
+} atscmh_sccc_code_mode_t;
+</programlisting>
+ </section>
</section>
<section id="DTV-API-VERSION">
<title><constant>DTV_API_VERSION</constant></title>
@@ -774,6 +907,33 @@ typedef enum fe_hierarchy {
<listitem><para><link linkend="DTV-BANDWIDTH-HZ"><constant>DTV_BANDWIDTH_HZ</constant></link></para></listitem>
</itemizedlist>
</section>
+ <section id="atscmh-params">
+ <title>ATSC-MH delivery system</title>
+ <para>The following parameters are valid for ATSC-MH:</para>
+ <itemizedlist mark='opencircle'>
+ <listitem><para><link linkend="DTV-API-VERSION"><constant>DTV_API_VERSION</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-DELIVERY-SYSTEM"><constant>DTV_DELIVERY_SYSTEM</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-TUNE"><constant>DTV_TUNE</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-CLEAR"><constant>DTV_CLEAR</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-FREQUENCY"><constant>DTV_FREQUENCY</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-BANDWIDTH-HZ"><constant>DTV_BANDWIDTH_HZ</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-ATSCMH-FIC-VER"><constant>DTV_ATSCMH_FIC_VER</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-ATSCMH-PARADE-ID"><constant>DTV_ATSCMH_PARADE_ID</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-ATSCMH-NOG"><constant>DTV_ATSCMH_NOG</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-ATSCMH-TNOG"><constant>DTV_ATSCMH_TNOG</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-ATSCMH-SGN"><constant>DTV_ATSCMH_SGN</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-ATSCMH-PRC"><constant>DTV_ATSCMH_PRC</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-ATSCMH-RS-FRAME-MODE"><constant>DTV_ATSCMH_RS_FRAME_MODE</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-ATSCMH-RS-FRAME-ENSEMBLE"><constant>DTV_ATSCMH_RS_FRAME_ENSEMBLE</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-ATSCMH-CODE-MODE-PRI"><constant>DTV_ATSCMH_CODE_MODE_PRI</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-ATSCMH-CODE-MODE-SEC"><constant>DTV_ATSCMH_CODE_MODE_SEC</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-ATSCMH-SCCC-BLOCK-MODE"><constant>DTV_ATSCMH_SCCC_BLOCK_MODE</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-ATSCMH-SCCC-CODE_MODE-A"><constant>DTV_ATSCMH_SCCC_CODE_MODE_A</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-ATSCMH-SCCC-CODE_MODE-B"><constant>DTV_ATSCMH_SCCC_CODE_MODE_B</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-ATSCMH-SCCC-CODE_MODE-C"><constant>DTV_ATSCMH_SCCC_CODE_MODE_C</constant></link></para></listitem>
+ <listitem><para><link linkend="DTV-ATSCMH-SCCC-CODE_MODE-D"><constant>DTV_ATSCMH_SCCC_CODE_MODE_D</constant></link></para></listitem>
+ </itemizedlist>
+ </section>
</section>
<section id="frontend-property-cable-systems">
<title>Properties used on cable delivery systems</title>
diff --git a/Documentation/DocBook/media/v4l/biblio.xml b/Documentation/DocBook/media/v4l/biblio.xml
index 7dc65c592a87..7c49facecd25 100644
--- a/Documentation/DocBook/media/v4l/biblio.xml
+++ b/Documentation/DocBook/media/v4l/biblio.xml
@@ -197,4 +197,33 @@ in the frequency range from 87,5 to 108,0 MHz</title>
<title>NTSC-4: United States RBDS Standard</title>
</biblioentry>
+ <biblioentry id="iso12232">
+ <abbrev>ISO&nbsp;12232:2006</abbrev>
+ <authorgroup>
+ <corpauthor>International Organization for Standardization
+(<ulink url="http://www.iso.org">http://www.iso.org</ulink>)</corpauthor>
+ </authorgroup>
+ <title>Photography &mdash; Digital still cameras &mdash; Determination
+ of exposure index, ISO speed ratings, standard output sensitivity, and
+ recommended exposure index</title>
+ </biblioentry>
+
+ <biblioentry id="cea861">
+ <abbrev>CEA-861-E</abbrev>
+ <authorgroup>
+ <corpauthor>Consumer Electronics Association
+(<ulink url="http://www.ce.org">http://www.ce.org</ulink>)</corpauthor>
+ </authorgroup>
+ <title>A DTV Profile for Uncompressed High Speed Digital Interfaces</title>
+ </biblioentry>
+
+ <biblioentry id="vesadmt">
+ <abbrev>VESA&nbsp;DMT</abbrev>
+ <authorgroup>
+ <corpauthor>Video Electronics Standards Association
+(<ulink url="http://www.vesa.org">http://www.vesa.org</ulink>)</corpauthor>
+ </authorgroup>
+ <title>VESA and Industry Standards and Guidelines for Computer Display Monitor Timing (DMT)</title>
+ </biblioentry>
+
</bibliography>
diff --git a/Documentation/DocBook/media/v4l/common.xml b/Documentation/DocBook/media/v4l/common.xml
index c79278acfb0e..4101aeb56540 100644
--- a/Documentation/DocBook/media/v4l/common.xml
+++ b/Documentation/DocBook/media/v4l/common.xml
@@ -724,41 +724,49 @@ if (-1 == ioctl (fd, &VIDIOC-S-STD;, &amp;std_id)) {
}
</programlisting>
</example>
+ </section>
<section id="dv-timings">
<title>Digital Video (DV) Timings</title>
<para>
- The video standards discussed so far has been dealing with Analog TV and the
+ The video standards discussed so far have been dealing with Analog TV and the
corresponding video timings. Today there are many more different hardware interfaces
such as High Definition TV interfaces (HDMI), VGA, DVI connectors etc., that carry
video signals and there is a need to extend the API to select the video timings
for these interfaces. Since it is not possible to extend the &v4l2-std-id; due to
-the limited bits available, a new set of IOCTLs is added to set/get video timings at
+the limited bits available, a new set of IOCTLs was added to set/get video timings at
the input and output: </para><itemizedlist>
<listitem>
- <para>DV Presets: Digital Video (DV) presets. These are IDs representing a
+ <para>DV Timings: This will allow applications to define detailed
+video timings for the interface. This includes parameters such as width, height,
+polarities, frontporch, backporch etc. The <filename>linux/v4l2-dv-timings.h</filename>
+header can be used to get the timings of the formats in the <xref linkend="cea861" /> and
+<xref linkend="vesadmt" /> standards.
+ </para>
+ </listitem>
+ <listitem>
+ <para>DV Presets: Digital Video (DV) presets (<emphasis role="bold">deprecated</emphasis>).
+ These are IDs representing a
video timing at the input/output. Presets are pre-defined timings implemented
by the hardware according to video standards. A __u32 data type is used to represent
a preset unlike the bit mask that is used in &v4l2-std-id; allowing future extensions
-to support as many different presets as needed.</para>
- </listitem>
- <listitem>
- <para>Custom DV Timings: This will allow applications to define more detailed
-custom video timings for the interface. This includes parameters such as width, height,
-polarities, frontporch, backporch etc.
- </para>
+to support as many different presets as needed. This API is deprecated in favor of the DV Timings
+API.</para>
</listitem>
</itemizedlist>
+ <para>To enumerate and query the attributes of the DV timings supported by a device,
+ applications use the &VIDIOC-ENUM-DV-TIMINGS; and &VIDIOC-DV-TIMINGS-CAP; ioctls.
+ To set DV timings for the device, applications use the
+&VIDIOC-S-DV-TIMINGS; ioctl and to get current DV timings they use the
+&VIDIOC-G-DV-TIMINGS; ioctl. To detect the DV timings as seen by the video receiver applications
+use the &VIDIOC-QUERY-DV-TIMINGS; ioctl.</para>
<para>To enumerate and query the attributes of DV presets supported by a device,
applications use the &VIDIOC-ENUM-DV-PRESETS; ioctl. To get the current DV preset,
applications use the &VIDIOC-G-DV-PRESET; ioctl and to set a preset they use the
-&VIDIOC-S-DV-PRESET; ioctl.</para>
- <para>To set custom DV timings for the device, applications use the
-&VIDIOC-S-DV-TIMINGS; ioctl and to get current custom DV timings they use the
-&VIDIOC-G-DV-TIMINGS; ioctl.</para>
+&VIDIOC-S-DV-PRESET; ioctl. To detect the preset as seen by the video receiver applications
+use the &VIDIOC-QUERY-DV-PRESET; ioctl.</para>
<para>Applications can make use of the <xref linkend="input-capabilities" /> and
<xref linkend="output-capabilities"/> flags to decide what ioctls are available to set the
video timings for the device.</para>
- </section>
</section>
&sub-controls;
diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml
index bce97c50391b..ea42ef824948 100644
--- a/Documentation/DocBook/media/v4l/compat.xml
+++ b/Documentation/DocBook/media/v4l/compat.xml
@@ -2407,6 +2407,54 @@ details.</para>
<para>Added <link linkend="jpeg-controls">JPEG compression control
class</link>.</para>
</listitem>
+ <listitem>
+ <para>Extended the DV Timings API:
+ &VIDIOC-ENUM-DV-TIMINGS;, &VIDIOC-QUERY-DV-TIMINGS; and
+ &VIDIOC-DV-TIMINGS-CAP;.</para>
+ </listitem>
+ </orderedlist>
+ </section>
+
+ <section>
+ <title>V4L2 in Linux 3.5</title>
+ <orderedlist>
+ <listitem>
+ <para>Added integer menus, the new type will be
+ V4L2_CTRL_TYPE_INTEGER_MENU.</para>
+ </listitem>
+ <listitem>
+ <para>Added selection API for V4L2 subdev interface:
+ &VIDIOC-SUBDEV-G-SELECTION; and
+ &VIDIOC-SUBDEV-S-SELECTION;.</para>
+ </listitem>
+ <listitem>
+ <para> Added <constant>V4L2_COLORFX_ANTIQUE</constant>,
+ <constant>V4L2_COLORFX_ART_FREEZE</constant>,
+ <constant>V4L2_COLORFX_AQUA</constant>,
+ <constant>V4L2_COLORFX_SILHOUETTE</constant>,
+ <constant>V4L2_COLORFX_SOLARIZATION</constant>,
+ <constant>V4L2_COLORFX_VIVID</constant> and
+ <constant>V4L2_COLORFX_ARBITRARY_CBCR</constant> menu items
+ to the <constant>V4L2_CID_COLORFX</constant> control.</para>
+ </listitem>
+ <listitem>
+ <para> Added <constant>V4L2_CID_COLORFX_CBCR</constant> control.</para>
+ </listitem>
+ <listitem>
+ <para> Added camera controls <constant>V4L2_CID_AUTO_EXPOSURE_BIAS</constant>,
+ <constant>V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE</constant>,
+ <constant>V4L2_CID_IMAGE_STABILIZATION</constant>,
+ <constant>V4L2_CID_ISO_SENSITIVITY</constant>,
+ <constant>V4L2_CID_ISO_SENSITIVITY_AUTO</constant>,
+ <constant>V4L2_CID_EXPOSURE_METERING</constant>,
+ <constant>V4L2_CID_SCENE_MODE</constant>,
+ <constant>V4L2_CID_3A_LOCK</constant>,
+ <constant>V4L2_CID_AUTO_FOCUS_START</constant>,
+ <constant>V4L2_CID_AUTO_FOCUS_STOP</constant>,
+ <constant>V4L2_CID_AUTO_FOCUS_STATUS</constant> and
+ <constant>V4L2_CID_AUTO_FOCUS_RANGE</constant>.
+ </para>
+ </listitem>
</orderedlist>
</section>
@@ -2508,6 +2556,10 @@ and may change in the future.</para>
ioctls.</para>
</listitem>
<listitem>
+ <para>&VIDIOC-DECODER-CMD; and &VIDIOC-TRY-DECODER-CMD;
+ioctls.</para>
+ </listitem>
+ <listitem>
<para>&VIDIOC-DBG-G-REGISTER; and &VIDIOC-DBG-S-REGISTER;
ioctls.</para>
</listitem>
@@ -2515,6 +2567,10 @@ ioctls.</para>
<para>&VIDIOC-DBG-G-CHIP-IDENT; ioctl.</para>
</listitem>
<listitem>
+ <para>&VIDIOC-ENUM-DV-TIMINGS;, &VIDIOC-QUERY-DV-TIMINGS; and
+ &VIDIOC-DV-TIMINGS-CAP; ioctls.</para>
+ </listitem>
+ <listitem>
<para>Flash API. <xref linkend="flash-controls" /></para>
</listitem>
<listitem>
@@ -2523,6 +2579,14 @@ ioctls.</para>
<listitem>
<para>Selection API. <xref linkend="selection-api" /></para>
</listitem>
+ <listitem>
+ <para>Sub-device selection API: &VIDIOC-SUBDEV-G-SELECTION;
+ and &VIDIOC-SUBDEV-S-SELECTION; ioctls.</para>
+ </listitem>
+ <listitem>
+ <para><link linkend="v4l2-auto-focus-area"><constant>
+ V4L2_CID_AUTO_FOCUS_AREA</constant></link> control.</para>
+ </listitem>
</itemizedlist>
</section>
@@ -2538,6 +2602,17 @@ interfaces and should not be implemented in new drivers.</para>
<constant>VIDIOC_S_MPEGCOMP</constant> ioctls. Use Extended Controls,
<xref linkend="extended-controls" />.</para>
</listitem>
+ <listitem>
+ <para>&VIDIOC-G-DV-PRESET;, &VIDIOC-S-DV-PRESET;, &VIDIOC-ENUM-DV-PRESETS; and
+ &VIDIOC-QUERY-DV-PRESET; ioctls. Use the DV Timings API (<xref linkend="dv-timings" />).</para>
+ </listitem>
+ <listitem>
+ <para><constant>VIDIOC_SUBDEV_G_CROP</constant> and
+ <constant>VIDIOC_SUBDEV_S_CROP</constant> ioctls. Use
+ <constant>VIDIOC_SUBDEV_G_SELECTION</constant> and
+ <constant>VIDIOC_SUBDEV_S_SELECTION</constant>, <xref
+ linkend="vidioc-subdev-g-selection" />.</para>
+ </listitem>
</itemizedlist>
</section>
</section>
diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml
index dd03cf4a6539..676bc46f9c52 100644
--- a/Documentation/DocBook/media/v4l/controls.xml
+++ b/Documentation/DocBook/media/v4l/controls.xml
@@ -285,18 +285,92 @@ minimum value disables backlight compensation.</entry>
<row id="v4l2-colorfx">
<entry><constant>V4L2_CID_COLORFX</constant></entry>
<entry>enum</entry>
- <entry>Selects a color effect. Possible values for
-<constant>enum v4l2_colorfx</constant> are:
-<constant>V4L2_COLORFX_NONE</constant> (0),
-<constant>V4L2_COLORFX_BW</constant> (1),
-<constant>V4L2_COLORFX_SEPIA</constant> (2),
-<constant>V4L2_COLORFX_NEGATIVE</constant> (3),
-<constant>V4L2_COLORFX_EMBOSS</constant> (4),
-<constant>V4L2_COLORFX_SKETCH</constant> (5),
-<constant>V4L2_COLORFX_SKY_BLUE</constant> (6),
-<constant>V4L2_COLORFX_GRASS_GREEN</constant> (7),
-<constant>V4L2_COLORFX_SKIN_WHITEN</constant> (8) and
-<constant>V4L2_COLORFX_VIVID</constant> (9).</entry>
+ <entry>Selects a color effect. The following values are defined:
+ </entry>
+ </row><row>
+ <entry></entry>
+ <entry></entry>
+ <entrytbl spanname="descr" cols="2">
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_COLORFX_NONE</constant>&nbsp;</entry>
+ <entry>Color effect is disabled.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORFX_ANTIQUE</constant>&nbsp;</entry>
+ <entry>An aging (old photo) effect.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORFX_ART_FREEZE</constant>&nbsp;</entry>
+ <entry>Frost color effect.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORFX_AQUA</constant>&nbsp;</entry>
+ <entry>Water color, cool tone.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORFX_BW</constant>&nbsp;</entry>
+ <entry>Black and white.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORFX_EMBOSS</constant>&nbsp;</entry>
+ <entry>Emboss, the highlights and shadows replace light/dark boundaries
+ and low contrast areas are set to a gray background.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORFX_GRASS_GREEN</constant>&nbsp;</entry>
+ <entry>Grass green.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORFX_NEGATIVE</constant>&nbsp;</entry>
+ <entry>Negative.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORFX_SEPIA</constant>&nbsp;</entry>
+ <entry>Sepia tone.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORFX_SKETCH</constant>&nbsp;</entry>
+ <entry>Sketch.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORFX_SKIN_WHITEN</constant>&nbsp;</entry>
+ <entry>Skin whiten.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORFX_SKY_BLUE</constant>&nbsp;</entry>
+ <entry>Sky blue.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORFX_SOLARIZATION</constant>&nbsp;</entry>
+ <entry>Solarization, the image is partially reversed in tone,
+ only color values above or below a certain threshold are inverted.
+ </entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORFX_SILHOUETTE</constant>&nbsp;</entry>
+ <entry>Silhouette (outline).</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORFX_VIVID</constant>&nbsp;</entry>
+ <entry>Vivid colors.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_COLORFX_SET_CBCR</constant>&nbsp;</entry>
+ <entry>The Cb and Cr chroma components are replaced by fixed
+ coefficients determined by <constant>V4L2_CID_COLORFX_CBCR</constant>
+ control.</entry>
+ </row>
+ </tbody>
+ </entrytbl>
+ </row>
+ <row>
+ <entry><constant>V4L2_CID_COLORFX_CBCR</constant></entry>
+ <entry>integer</entry>
+ <entry>Determines the Cb and Cr coefficients for <constant>V4L2_COLORFX_SET_CBCR</constant>
+ color effect. Bits [7:0] of the supplied 32 bit value are interpreted as
+ Cr component, bits [15:8] as Cb component and bits [31:16] must be zero.
+ </entry>
</row>
<row>
<entry><constant>V4L2_CID_ROTATE</constant></entry>
@@ -2775,6 +2849,51 @@ remain constant.</entry>
<row><entry></entry></row>
<row>
+ <entry spanname="id"><constant>V4L2_CID_EXPOSURE_BIAS</constant>&nbsp;</entry>
+ <entry>integer menu</entry>
+ </row><row><entry spanname="descr"> Determines the automatic
+exposure compensation, it is effective only when <constant>V4L2_CID_EXPOSURE_AUTO</constant>
+control is set to <constant>AUTO</constant>, <constant>SHUTTER_PRIORITY </constant>
+or <constant>APERTURE_PRIORITY</constant>.
+It is expressed in terms of EV, drivers should interpret the values as 0.001 EV
+units, where the value 1000 stands for +1 EV.
+<para>Increasing the exposure compensation value is equivalent to decreasing
+the exposure value (EV) and will increase the amount of light at the image
+sensor. The camera performs the exposure compensation by adjusting absolute
+exposure time and/or aperture.</para></entry>
+ </row>
+ <row><entry></entry></row>
+
+ <row id="v4l2-exposure-metering">
+ <entry spanname="id"><constant>V4L2_CID_EXPOSURE_METERING</constant>&nbsp;</entry>
+ <entry>enum&nbsp;v4l2_exposure_metering</entry>
+ </row><row><entry spanname="descr">Determines how the camera measures
+the amount of light available for the frame exposure. Possible values are:</entry>
+ </row>
+ <row>
+ <entrytbl spanname="descr" cols="2">
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_EXPOSURE_METERING_AVERAGE</constant>&nbsp;</entry>
+ <entry>Use the light information coming from the entire frame
+and average giving no weighting to any particular portion of the metered area.
+ </entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_EXPOSURE_METERING_CENTER_WEIGHTED</constant>&nbsp;</entry>
+ <entry>Average the light information coming from the entire frame
+giving priority to the center of the metered area.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_EXPOSURE_METERING_SPOT</constant>&nbsp;</entry>
+ <entry>Measure only very small area at the center of the frame.</entry>
+ </row>
+ </tbody>
+ </entrytbl>
+ </row>
+ <row><entry></entry></row>
+
+ <row>
<entry spanname="id"><constant>V4L2_CID_PAN_RELATIVE</constant>&nbsp;</entry>
<entry>integer</entry>
</row><row><entry spanname="descr">This control turns the
@@ -2857,13 +2976,107 @@ negative values towards infinity. This is a write-only control.</entry>
<row>
<entry spanname="id"><constant>V4L2_CID_FOCUS_AUTO</constant>&nbsp;</entry>
<entry>boolean</entry>
- </row><row><entry spanname="descr">Enables automatic focus
-adjustments. The effect of manual focus adjustments while this feature
+ </row><row><entry spanname="descr">Enables continuous automatic
+focus adjustments. The effect of manual focus adjustments while this feature
is enabled is undefined, drivers should ignore such requests.</entry>
</row>
<row><entry></entry></row>
<row>
+ <entry spanname="id"><constant>V4L2_CID_AUTO_FOCUS_START</constant>&nbsp;</entry>
+ <entry>button</entry>
+ </row><row><entry spanname="descr">Starts single auto focus process.
+The effect of setting this control when <constant>V4L2_CID_FOCUS_AUTO</constant>
+is set to <constant>TRUE</constant> (1) is undefined, drivers should ignore
+such requests.</entry>
+ </row>
+ <row><entry></entry></row>
+
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_AUTO_FOCUS_STOP</constant>&nbsp;</entry>
+ <entry>button</entry>
+ </row><row><entry spanname="descr">Aborts automatic focusing
+started with <constant>V4L2_CID_AUTO_FOCUS_START</constant> control. It is
+effective only when the continuous autofocus is disabled, that is when
+<constant>V4L2_CID_FOCUS_AUTO</constant> control is set to <constant>FALSE
+</constant> (0).</entry>
+ </row>
+ <row><entry></entry></row>
+
+ <row id="v4l2-auto-focus-status">
+ <entry spanname="id">
+ <constant>V4L2_CID_AUTO_FOCUS_STATUS</constant>&nbsp;</entry>
+ <entry>bitmask</entry>
+ </row>
+ <row><entry spanname="descr">The automatic focus status. This is a read-only
+ control.</entry>
+ </row>
+ <row>
+ <entrytbl spanname="descr" cols="2">
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_AUTO_FOCUS_STATUS_IDLE</constant>&nbsp;</entry>
+ <entry>Automatic focus is not active.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_AUTO_FOCUS_STATUS_BUSY</constant>&nbsp;</entry>
+ <entry>Automatic focusing is in progress.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_AUTO_FOCUS_STATUS_REACHED</constant>&nbsp;</entry>
+ <entry>Focus has been reached.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_AUTO_FOCUS_STATUS_FAILED</constant>&nbsp;</entry>
+ <entry>Automatic focus has failed, the driver will not
+ transition from this state until another action is
+ performed by an application.</entry>
+ </row>
+ </tbody>
+ </entrytbl>
+ </row>
+ <row><entry spanname="descr">
+Setting <constant>V4L2_LOCK_FOCUS</constant> lock bit of the <constant>V4L2_CID_3A_LOCK
+</constant> control may stop updates of the <constant>V4L2_CID_AUTO_FOCUS_STATUS</constant>
+control value.</entry>
+ </row>
+ <row><entry></entry></row>
+
+ <row id="v4l2-auto-focus-range">
+ <entry spanname="id">
+ <constant>V4L2_CID_AUTO_FOCUS_RANGE</constant>&nbsp;</entry>
+ <entry>enum&nbsp;v4l2_auto_focus_range</entry>
+ </row>
+ <row><entry spanname="descr">Determines auto focus distance range
+for which lens may be adjusted. </entry>
+ </row>
+ <row>
+ <entrytbl spanname="descr" cols="2">
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_AUTO_FOCUS_RANGE_AUTO</constant>&nbsp;</entry>
+ <entry>The camera automatically selects the focus range.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_AUTO_FOCUS_RANGE_NORMAL</constant>&nbsp;</entry>
+ <entry>Normal distance range, limited for best automatic focus
+performance.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_AUTO_FOCUS_RANGE_MACRO</constant>&nbsp;</entry>
+ <entry>Macro (close-up) auto focus. The camera will
+use its minimum possible distance for auto focus.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_AUTO_FOCUS_RANGE_INFINITY</constant>&nbsp;</entry>
+ <entry>The lens is set to focus on an object at infinite distance.</entry>
+ </row>
+ </tbody>
+ </entrytbl>
+ </row>
+ <row><entry></entry></row>
+
+ <row>
<entry spanname="id"><constant>V4L2_CID_ZOOM_ABSOLUTE</constant>&nbsp;</entry>
<entry>integer</entry>
</row><row><entry spanname="descr">Specify the objective lens
@@ -2932,6 +3145,295 @@ camera sensor on or off, or specify its strength. Such band-stop filters can
be used, for example, to filter out the fluorescent light component.</entry>
</row>
<row><entry></entry></row>
+
+ <row id="v4l2-auto-n-preset-white-balance">
+ <entry spanname="id"><constant>V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE</constant>&nbsp;</entry>
+ <entry>enum&nbsp;v4l2_auto_n_preset_white_balance</entry>
+ </row><row><entry spanname="descr">Sets white balance to automatic,
+manual or a preset. The presets determine color temperature of the light as
+a hint to the camera for white balance adjustments resulting in most accurate
+color representation. The following white balance presets are listed in order
+of increasing color temperature.</entry>
+ </row>
+ <row>
+ <entrytbl spanname="descr" cols="2">
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_WHITE_BALANCE_MANUAL</constant>&nbsp;</entry>
+ <entry>Manual white balance.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_WHITE_BALANCE_AUTO</constant>&nbsp;</entry>
+ <entry>Automatic white balance adjustments.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_WHITE_BALANCE_INCANDESCENT</constant>&nbsp;</entry>
+ <entry>White balance setting for incandescent (tungsten) lighting.
+It generally cools down the colors and corresponds approximately to 2500...3500 K
+color temperature range.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_WHITE_BALANCE_FLUORESCENT</constant>&nbsp;</entry>
+ <entry>White balance preset for fluorescent lighting.
+It corresponds approximately to 4000...5000 K color temperature.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_WHITE_BALANCE_FLUORESCENT_H</constant>&nbsp;</entry>
+ <entry>With this setting the camera will compensate for
+fluorescent H lighting.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_WHITE_BALANCE_HORIZON</constant>&nbsp;</entry>
+ <entry>White balance setting for horizon daylight.
+It corresponds approximately to 5000 K color temperature.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_WHITE_BALANCE_DAYLIGHT</constant>&nbsp;</entry>
+ <entry>White balance preset for daylight (with clear sky).
+It corresponds approximately to 5000...6500 K color temperature.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_WHITE_BALANCE_FLASH</constant>&nbsp;</entry>
+ <entry>With this setting the camera will compensate for the flash
+light. It slightly warms up the colors and corresponds roughly to 5000...5500 K
+color temperature.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_WHITE_BALANCE_CLOUDY</constant>&nbsp;</entry>
+ <entry>White balance preset for moderately overcast sky.
+This option corresponds approximately to 6500...8000 K color temperature
+range.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_WHITE_BALANCE_SHADE</constant>&nbsp;</entry>
+ <entry>White balance preset for shade or heavily overcast
+sky. It corresponds approximately to 9000...10000 K color temperature.
+</entry>
+ </row>
+ </tbody>
+ </entrytbl>
+ </row>
+ <row><entry></entry></row>
+
+ <row id="v4l2-wide-dynamic-range">
+ <entry spanname="id"><constant>V4L2_CID_WIDE_DYNAMIC_RANGE</constant></entry>
+ <entry>boolean</entry>
+ </row>
+ <row>
+ <entry spanname="descr">Enables or disables the camera's wide dynamic
+range feature. This feature allows to obtain clear images in situations where
+intensity of the illumination varies significantly throughout the scene, i.e.
+there are simultaneously very dark and very bright areas. It is most commonly
+realized in cameras by combining two subsequent frames with different exposure
+times. <footnote id="ctypeconv"><para> This control may be changed to a menu
+control in the future, if more options are required.</para></footnote></entry>
+ </row>
+ <row><entry></entry></row>
+
+ <row id="v4l2-image-stabilization">
+ <entry spanname="id"><constant>V4L2_CID_IMAGE_STABILIZATION</constant></entry>
+ <entry>boolean</entry>
+ </row>
+ <row>
+ <entry spanname="descr">Enables or disables image stabilization.
+ <footnoteref linkend="ctypeconv"/></entry>
+ </row>
+ <row><entry></entry></row>
+
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_ISO_SENSITIVITY</constant>&nbsp;</entry>
+ <entry>integer menu</entry>
+ </row><row><entry spanname="descr">Determines ISO equivalent of an
+image sensor indicating the sensor's sensitivity to light. The numbers are
+expressed in arithmetic scale, as per <xref linkend="iso12232" /> standard,
+where doubling the sensor sensitivity is represented by doubling the numerical
+ISO value. Applications should interpret the values as standard ISO values
+multiplied by 1000, e.g. control value 800 stands for ISO 0.8. Drivers will
+usually support only a subset of standard ISO values. The effect of setting
+this control while the <constant>V4L2_CID_ISO_SENSITIVITY_AUTO</constant>
+control is set to a value other than <constant>V4L2_CID_ISO_SENSITIVITY_MANUAL
+</constant> is undefined, drivers should ignore such requests.</entry>
+ </row>
+ <row><entry></entry></row>
+
+ <row id="v4l2-iso-sensitivity-auto-type">
+ <entry spanname="id"><constant>V4L2_CID_ISO_SENSITIVITY_AUTO</constant>&nbsp;</entry>
+ <entry>enum&nbsp;v4l2_iso_sensitivity_type</entry>
+ </row><row><entry spanname="descr">Enables or disables automatic ISO
+sensitivity adjustments.</entry>
+ </row>
+ <row>
+ <entrytbl spanname="descr" cols="2">
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_CID_ISO_SENSITIVITY_MANUAL</constant>&nbsp;</entry>
+ <entry>Manual ISO sensitivity.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_CID_ISO_SENSITIVITY_AUTO</constant>&nbsp;</entry>
+ <entry>Automatic ISO sensitivity adjustments.</entry>
+ </row>
+ </tbody>
+ </entrytbl>
+ </row>
+ <row><entry></entry></row>
+
+ <row id="v4l2-scene-mode">
+ <entry spanname="id"><constant>V4L2_CID_SCENE_MODE</constant>&nbsp;</entry>
+ <entry>enum&nbsp;v4l2_scene_mode</entry>
+ </row><row><entry spanname="descr">This control allows to select
+scene programs as the camera automatic modes optimized for common shooting
+scenes. Within these modes the camera determines best exposure, aperture,
+focusing, light metering, white balance and equivalent sensitivity. The
+controls of those parameters are influenced by the scene mode control.
+An exact behavior in each mode is subject to the camera specification.
+
+<para>When the scene mode feature is not used, this control should be set to
+<constant>V4L2_SCENE_MODE_NONE</constant> to make sure the other possibly
+related controls are accessible. The following scene programs are defined:
+</para>
+</entry>
+ </row>
+ <row>
+ <entrytbl spanname="descr" cols="2">
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_SCENE_MODE_NONE</constant>&nbsp;</entry>
+ <entry>The scene mode feature is disabled.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SCENE_MODE_BACKLIGHT</constant>&nbsp;</entry>
+ <entry>Backlight. Compensates for dark shadows when light is
+ coming from behind a subject, also by automatically turning
+ on the flash.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SCENE_MODE_BEACH_SNOW</constant>&nbsp;</entry>
+ <entry>Beach and snow. This mode compensates for all-white or
+bright scenes, which tend to look gray and low contrast, when camera's automatic
+exposure is based on an average scene brightness. To compensate, this mode
+automatically slightly overexposes the frames. The white balance may also be
+adjusted to compensate for the fact that reflected snow looks bluish rather
+than white.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SCENE_MODE_CANDLELIGHT</constant>&nbsp;</entry>
+ <entry>Candle light. The camera generally raises the ISO
+sensitivity and lowers the shutter speed. This mode compensates for relatively
+close subject in the scene. The flash is disabled in order to preserve the
+ambiance of the light.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SCENE_MODE_DAWN_DUSK</constant>&nbsp;</entry>
+ <entry>Dawn and dusk. Preserves the colors seen in low
+natural light before dusk and after down. The camera may turn off the flash,
+and automatically focus at infinity. It will usually boost saturation and
+lower the shutter speed.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SCENE_MODE_FALL_COLORS</constant>&nbsp;</entry>
+ <entry>Fall colors. Increases saturation and adjusts white
+balance for color enhancement. Pictures of autumn leaves get saturated reds
+and yellows.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SCENE_MODE_FIREWORKS</constant>&nbsp;</entry>
+ <entry>Fireworks. Long exposure times are used to capture
+the expanding burst of light from a firework. The camera may invoke image
+stabilization.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SCENE_MODE_LANDSCAPE</constant>&nbsp;</entry>
+ <entry>Landscape. The camera may choose a small aperture to
+provide deep depth of field and long exposure duration to help capture detail
+in dim light conditions. The focus is fixed at infinity. Suitable for distant
+and wide scenery.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SCENE_MODE_NIGHT</constant>&nbsp;</entry>
+ <entry>Night, also known as Night Landscape. Designed for low
+light conditions, it preserves detail in the dark areas without blowing out bright
+objects. The camera generally sets itself to a medium-to-high ISO sensitivity,
+with a relatively long exposure time, and turns flash off. As such, there will be
+increased image noise and the possibility of blurred image.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SCENE_MODE_PARTY_INDOOR</constant>&nbsp;</entry>
+ <entry>Party and indoor. Designed to capture indoor scenes
+that are lit by indoor background lighting as well as the flash. The camera
+usually increases ISO sensitivity, and adjusts exposure for the low light
+conditions.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SCENE_MODE_PORTRAIT</constant>&nbsp;</entry>
+ <entry>Portrait. The camera adjusts the aperture so that the
+depth of field is reduced, which helps to isolate the subject against a smooth
+background. Most cameras recognize the presence of faces in the scene and focus
+on them. The color hue is adjusted to enhance skin tones. The intensity of the
+flash is often reduced.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SCENE_MODE_SPORTS</constant>&nbsp;</entry>
+ <entry>Sports. Significantly increases ISO and uses a fast
+shutter speed to freeze motion of rapidly-moving subjects. Increased image
+noise may be seen in this mode.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SCENE_MODE_SUNSET</constant>&nbsp;</entry>
+ <entry>Sunset. Preserves deep hues seen in sunsets and
+sunrises. It bumps up the saturation.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SCENE_MODE_TEXT</constant>&nbsp;</entry>
+ <entry>Text. It applies extra contrast and sharpness, it is
+typically a black-and-white mode optimized for readability. Automatic focus
+may be switched to close-up mode and this setting may also involve some
+lens-distortion correction.</entry>
+ </row>
+ </tbody>
+ </entrytbl>
+ </row>
+ <row><entry></entry></row>
+
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_3A_LOCK</constant></entry>
+ <entry>bitmask</entry>
+ </row>
+ <row>
+ <entry spanname="descr">This control locks or unlocks the automatic
+focus, exposure and white balance. The automatic adjustments can be paused
+independently by setting the corresponding lock bit to 1. The camera then retains
+the settings until the lock bit is cleared. The following lock bits are defined:
+</entry>
+ </row>
+ <row>
+ <entrytbl spanname="descr" cols="2">
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_LOCK_EXPOSURE</constant></entry>
+ <entry>Automatic exposure adjustments lock.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_LOCK_WHITE_BALANCE</constant></entry>
+ <entry>Automatic white balance adjustments lock.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_LOCK_FOCUS</constant></entry>
+ <entry>Automatic focus lock.</entry>
+ </row>
+ </tbody>
+ </entrytbl>
+ </row>
+ <row><entry spanname="descr">
+When a given algorithm is not enabled, drivers should ignore requests
+to lock it and should return no error. An example might be an application
+setting bit <constant>V4L2_LOCK_WHITE_BALANCE</constant> when the
+<constant>V4L2_CID_AUTO_WHITE_BALANCE</constant> control is set to
+<constant>FALSE</constant>. The value of this control may be changed
+by exposure, white balance or focus controls.</entry>
+ </row>
+ <row><entry></entry></row>
+
</tbody>
</tgroup>
</table>
@@ -3476,7 +3978,7 @@ interface and may change in the future.</para>
<entry spanname="id"><constant>V4L2_CID_JPEG_CHROMA_SUBSAMPLING</constant></entry>
<entry>menu</entry>
</row>
- <row id="jpeg-chroma-subsampling-control">
+ <row id="v4l2-jpeg-chroma-subsampling">
<entry spanname="descr">The chroma subsampling factors describe how
each component of an input image is sampled, in respect to maximum
sample rate in each spatial dimension. See <xref linkend="itu-t81"/>,
@@ -3486,7 +3988,7 @@ interface and may change in the future.</para>
from RGB to Y'CbCr color space.
</entry>
</row>
- <row>
+ <row id = "v4l2-jpeg-chroma-subsampling">
<entrytbl spanname="descr" cols="2">
<tbody valign="top">
<row>
@@ -3538,12 +4040,12 @@ interface and may change in the future.</para>
</entry>
</row>
<row id="jpeg-quality-control">
- <entry spanname="id"><constant>V4L2_CID_JPEG_COMPRESION_QUALITY</constant></entry>
+ <entry spanname="id"><constant>V4L2_CID_JPEG_COMPRESSION_QUALITY</constant></entry>
<entry>integer</entry>
</row>
<row>
<entry spanname="descr">
- <constant>V4L2_CID_JPEG_COMPRESION_QUALITY</constant> control
+ <constant>V4L2_CID_JPEG_COMPRESSION_QUALITY</constant> control
determines trade-off between image quality and size.
It provides simpler method for applications to control image quality,
without a need for direct reconfiguration of luminance and chrominance
@@ -3551,7 +4053,7 @@ interface and may change in the future.</para>
In cases where a driver uses quantization tables configured directly
by an application, using interfaces defined elsewhere, <constant>
- V4L2_CID_JPEG_COMPRESION_QUALITY</constant> control should be set
+ V4L2_CID_JPEG_COMPRESSION_QUALITY</constant> control should be set
by driver to 0.
<para>The value range of this control is driver-specific. Only
@@ -3599,4 +4101,172 @@ interface and may change in the future.</para>
to <xref linkend="itu-t81"/>, <xref linkend="jfif"/>,
<xref linkend="w3c-jpeg-jfif"/>.</para>
</section>
+
+ <section id="image-source-controls">
+ <title>Image Source Control Reference</title>
+
+ <note>
+ <title>Experimental</title>
+
+ <para>This is an <link
+ linkend="experimental">experimental</link> interface and may
+ change in the future.</para>
+ </note>
+
+ <para>
+ The Image Source control class is intended for low-level
+ control of image source devices such as image sensors. The
+ devices feature an analogue to digital converter and a bus
+ transmitter to transmit the image data out of the device.
+ </para>
+
+ <table pgwide="1" frame="none" id="image-source-control-id">
+ <title>Image Source Control IDs</title>
+
+ <tgroup cols="4">
+ <colspec colname="c1" colwidth="1*" />
+ <colspec colname="c2" colwidth="6*" />
+ <colspec colname="c3" colwidth="2*" />
+ <colspec colname="c4" colwidth="6*" />
+ <spanspec namest="c1" nameend="c2" spanname="id" />
+ <spanspec namest="c2" nameend="c4" spanname="descr" />
+ <thead>
+ <row>
+ <entry spanname="id" align="left">ID</entry>
+ <entry align="left">Type</entry>
+ </row><row rowsep="1"><entry spanname="descr" align="left">Description</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row><entry></entry></row>
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_IMAGE_SOURCE_CLASS</constant></entry>
+ <entry>class</entry>
+ </row>
+ <row>
+ <entry spanname="descr">The IMAGE_SOURCE class descriptor.</entry>
+ </row>
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_VBLANK</constant></entry>
+ <entry>integer</entry>
+ </row>
+ <row>
+ <entry spanname="descr">Vertical blanking. The idle period
+ after every frame during which no image data is produced.
+ The unit of vertical blanking is a line. Every line has
+ length of the image width plus horizontal blanking at the
+ pixel rate defined by
+ <constant>V4L2_CID_PIXEL_RATE</constant> control in the
+ same sub-device.</entry>
+ </row>
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_HBLANK</constant></entry>
+ <entry>integer</entry>
+ </row>
+ <row>
+ <entry spanname="descr">Horizontal blanking. The idle
+ period after every line of image data during which no
+ image data is produced. The unit of horizontal blanking is
+ pixels.</entry>
+ </row>
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_ANALOGUE_GAIN</constant></entry>
+ <entry>integer</entry>
+ </row>
+ <row>
+ <entry spanname="descr">Analogue gain is gain affecting
+ all colour components in the pixel matrix. The gain
+ operation is performed in the analogue domain before A/D
+ conversion.
+ </entry>
+ </row>
+ <row><entry></entry></row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ </section>
+
+ <section id="image-process-controls">
+ <title>Image Process Control Reference</title>
+
+ <note>
+ <title>Experimental</title>
+
+ <para>This is an <link
+ linkend="experimental">experimental</link> interface and may
+ change in the future.</para>
+ </note>
+
+ <para>
+ The Image Source control class is intended for low-level control of
+ image processing functions. Unlike
+ <constant>V4L2_CID_IMAGE_SOURCE_CLASS</constant>, the controls in
+ this class affect processing the image, and do not control capturing
+ of it.
+ </para>
+
+ <table pgwide="1" frame="none" id="image-process-control-id">
+ <title>Image Source Control IDs</title>
+
+ <tgroup cols="4">
+ <colspec colname="c1" colwidth="1*" />
+ <colspec colname="c2" colwidth="6*" />
+ <colspec colname="c3" colwidth="2*" />
+ <colspec colname="c4" colwidth="6*" />
+ <spanspec namest="c1" nameend="c2" spanname="id" />
+ <spanspec namest="c2" nameend="c4" spanname="descr" />
+ <thead>
+ <row>
+ <entry spanname="id" align="left">ID</entry>
+ <entry align="left">Type</entry>
+ </row><row rowsep="1"><entry spanname="descr" align="left">Description</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row><entry></entry></row>
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_IMAGE_PROC_CLASS</constant></entry>
+ <entry>class</entry>
+ </row>
+ <row>
+ <entry spanname="descr">The IMAGE_PROC class descriptor.</entry>
+ </row>
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_LINK_FREQ</constant></entry>
+ <entry>integer menu</entry>
+ </row>
+ <row>
+ <entry spanname="descr">Data bus frequency. Together with the
+ media bus pixel code, bus type (clock cycles per sample), the
+ data bus frequency defines the pixel rate
+ (<constant>V4L2_CID_PIXEL_RATE</constant>) in the
+ pixel array (or possibly elsewhere, if the device is not an
+ image sensor). The frame rate can be calculated from the pixel
+ clock, image width and height and horizontal and vertical
+ blanking. While the pixel rate control may be defined elsewhere
+ than in the subdev containing the pixel array, the frame rate
+ cannot be obtained from that information. This is because only
+ on the pixel array it can be assumed that the vertical and
+ horizontal blanking information is exact: no other blanking is
+ allowed in the pixel array. The selection of frame rate is
+ performed by selecting the desired horizontal and vertical
+ blanking. The unit of this control is Hz. </entry>
+ </row>
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_PIXEL_RATE</constant></entry>
+ <entry>64-bit integer</entry>
+ </row>
+ <row>
+ <entry spanname="descr">Pixel rate in the source pads of
+ the subdev. This control is read-only and its unit is
+ pixels / second.
+ </entry>
+ </row>
+ <row><entry></entry></row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ </section>
</section>
diff --git a/Documentation/DocBook/media/v4l/dev-subdev.xml b/Documentation/DocBook/media/v4l/dev-subdev.xml
index 0916a7343a16..4afcbbec5eda 100644
--- a/Documentation/DocBook/media/v4l/dev-subdev.xml
+++ b/Documentation/DocBook/media/v4l/dev-subdev.xml
@@ -76,11 +76,12 @@
<wordasword>format</wordasword> means the combination of media bus data
format, frame width and frame height.</para></note>
- <para>Image formats are typically negotiated on video capture and output
- devices using the <link linkend="crop">cropping and scaling</link> ioctls.
- The driver is responsible for configuring every block in the video pipeline
- according to the requested format at the pipeline input and/or
- output.</para>
+ <para>Image formats are typically negotiated on video capture and
+ output devices using the format and <link
+ linkend="vidioc-subdev-g-selection">selection</link> ioctls. The
+ driver is responsible for configuring every block in the video
+ pipeline according to the requested format at the pipeline input
+ and/or output.</para>
<para>For complex devices, such as often found in embedded systems,
identical image sizes at the output of a pipeline can be achieved using
@@ -276,11 +277,11 @@
</section>
<section>
- <title>Cropping and scaling</title>
+ <title>Selections: cropping, scaling and composition</title>
<para>Many sub-devices support cropping frames on their input or output
pads (or possible even on both). Cropping is used to select the area of
- interest in an image, typically on a video sensor or video decoder. It can
+ interest in an image, typically on an image sensor or a video decoder. It can
also be used as part of digital zoom implementations to select the area of
the image that will be scaled up.</para>
@@ -288,26 +289,179 @@
&v4l2-rect; by the coordinates of the top left corner and the rectangle
size. Both the coordinates and sizes are expressed in pixels.</para>
- <para>The crop rectangle is retrieved and set using the
- &VIDIOC-SUBDEV-G-CROP; and &VIDIOC-SUBDEV-S-CROP; ioctls. Like for pad
- formats, drivers store try and active crop rectangles. The format
- negotiation mechanism applies to crop settings as well.</para>
-
- <para>On input pads, cropping is applied relatively to the current pad
- format. The pad format represents the image size as received by the
- sub-device from the previous block in the pipeline, and the crop rectangle
- represents the sub-image that will be transmitted further inside the
- sub-device for processing. The crop rectangle be entirely containted
- inside the input image size.</para>
-
- <para>Input crop rectangle are reset to their default value when the input
- image format is modified. Drivers should use the input image size as the
- crop rectangle default value, but hardware requirements may prevent this.
- </para>
+ <para>As for pad formats, drivers store try and active
+ rectangles for the selection targets of ACTUAL type <xref
+ linkend="v4l2-subdev-selection-targets">.</xref></para>
+
+ <para>On sink pads, cropping is applied relative to the
+ current pad format. The pad format represents the image size as
+ received by the sub-device from the previous block in the
+ pipeline, and the crop rectangle represents the sub-image that
+ will be transmitted further inside the sub-device for
+ processing.</para>
+
+ <para>The scaling operation changes the size of the image by
+ scaling it to new dimensions. The scaling ratio isn't specified
+ explicitly, but is implied from the original and scaled image
+ sizes. Both sizes are represented by &v4l2-rect;.</para>
+
+ <para>Scaling support is optional. When supported by a subdev,
+ the crop rectangle on the subdev's sink pad is scaled to the
+ size configured using the &VIDIOC-SUBDEV-S-SELECTION; IOCTL
+ using <constant>V4L2_SUBDEV_SEL_COMPOSE_ACTUAL</constant>
+ selection target on the same pad. If the subdev supports scaling
+ but not composing, the top and left values are not used and must
+ always be set to zero.</para>
+
+ <para>On source pads, cropping is similar to sink pads, with the
+ exception that the source size from which the cropping is
+ performed, is the COMPOSE rectangle on the sink pad. In both
+ sink and source pads, the crop rectangle must be entirely
+ contained inside the source image size for the crop
+ operation.</para>
+
+ <para>The drivers should always use the closest possible
+ rectangle the user requests on all selection targets, unless
+ specifically told otherwise.
+ <constant>V4L2_SUBDEV_SEL_FLAG_SIZE_GE</constant> and
+ <constant>V4L2_SUBDEV_SEL_FLAG_SIZE_LE</constant> flags may be
+ used to round the image size either up or down. <xref
+ linkend="v4l2-subdev-selection-flags"></xref></para>
+ </section>
+
+ <section>
+ <title>Types of selection targets</title>
+
+ <section>
+ <title>ACTUAL targets</title>
+
+ <para>ACTUAL targets reflect the actual hardware configuration
+ at any point of time. There is a BOUNDS target
+ corresponding to every ACTUAL.</para>
+ </section>
+
+ <section>
+ <title>BOUNDS targets</title>
+
+ <para>BOUNDS targets is the smallest rectangle that contains
+ all valid ACTUAL rectangles. It may not be possible to set the
+ ACTUAL rectangle as large as the BOUNDS rectangle, however.
+ This may be because e.g. a sensor's pixel array is not
+ rectangular but cross-shaped or round. The maximum size may
+ also be smaller than the BOUNDS rectangle.</para>
+ </section>
- <para>Cropping behaviour on output pads is not defined.</para>
+ </section>
+
+ <section>
+ <title>Order of configuration and format propagation</title>
+
+ <para>Inside subdevs, the order of image processing steps will
+ always be from the sink pad towards the source pad. This is also
+ reflected in the order in which the configuration must be
+ performed by the user: the changes made will be propagated to
+ any subsequent stages. If this behaviour is not desired, the
+ user must set
+ <constant>V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG</constant> flag. This
+ flag causes no propagation of the changes are allowed in any
+ circumstances. This may also cause the accessed rectangle to be
+ adjusted by the driver, depending on the properties of the
+ underlying hardware.</para>
+
+ <para>The coordinates to a step always refer to the actual size
+ of the previous step. The exception to this rule is the source
+ compose rectangle, which refers to the sink compose bounds
+ rectangle --- if it is supported by the hardware.</para>
+
+ <orderedlist>
+ <listitem>Sink pad format. The user configures the sink pad
+ format. This format defines the parameters of the image the
+ entity receives through the pad for further processing.</listitem>
+
+ <listitem>Sink pad actual crop selection. The sink pad crop
+ defines the crop performed to the sink pad format.</listitem>
+
+ <listitem>Sink pad actual compose selection. The size of the
+ sink pad compose rectangle defines the scaling ratio compared
+ to the size of the sink pad crop rectangle. The location of
+ the compose rectangle specifies the location of the actual
+ sink compose rectangle in the sink compose bounds
+ rectangle.</listitem>
+
+ <listitem>Source pad actual crop selection. Crop on the source
+ pad defines crop performed to the image in the sink compose
+ bounds rectangle.</listitem>
+
+ <listitem>Source pad format. The source pad format defines the
+ output pixel format of the subdev, as well as the other
+ parameters with the exception of the image width and height.
+ Width and height are defined by the size of the source pad
+ actual crop selection.</listitem>
+ </orderedlist>
+
+ <para>Accessing any of the above rectangles not supported by the
+ subdev will return <constant>EINVAL</constant>. Any rectangle
+ referring to a previous unsupported rectangle coordinates will
+ instead refer to the previous supported rectangle. For example,
+ if sink crop is not supported, the compose selection will refer
+ to the sink pad format dimensions instead.</para>
+
+ <figure id="subdev-image-processing-crop">
+ <title>Image processing in subdevs: simple crop example</title>
+ <mediaobject>
+ <imageobject>
+ <imagedata fileref="subdev-image-processing-crop.svg"
+ format="SVG" scale="200" />
+ </imageobject>
+ </mediaobject>
+ </figure>
+
+ <para>In the above example, the subdev supports cropping on its
+ sink pad. To configure it, the user sets the media bus format on
+ the subdev's sink pad. Now the actual crop rectangle can be set
+ on the sink pad --- the location and size of this rectangle
+ reflect the location and size of a rectangle to be cropped from
+ the sink format. The size of the sink crop rectangle will also
+ be the size of the format of the subdev's source pad.</para>
+
+ <figure id="subdev-image-processing-scaling-multi-source">
+ <title>Image processing in subdevs: scaling with multiple sources</title>
+ <mediaobject>
+ <imageobject>
+ <imagedata fileref="subdev-image-processing-scaling-multi-source.svg"
+ format="SVG" scale="200" />
+ </imageobject>
+ </mediaobject>
+ </figure>
+
+ <para>In this example, the subdev is capable of first cropping,
+ then scaling and finally cropping for two source pads
+ individually from the resulting scaled image. The location of
+ the scaled image in the cropped image is ignored in sink compose
+ target. Both of the locations of the source crop rectangles
+ refer to the sink scaling rectangle, independently cropping an
+ area at location specified by the source crop rectangle from
+ it.</para>
+
+ <figure id="subdev-image-processing-full">
+ <title>Image processing in subdevs: scaling and composition
+ with multiple sinks and sources</title>
+ <mediaobject>
+ <imageobject>
+ <imagedata fileref="subdev-image-processing-full.svg"
+ format="SVG" scale="200" />
+ </imageobject>
+ </mediaobject>
+ </figure>
+
+ <para>The subdev driver supports two sink pads and two source
+ pads. The images from both of the sink pads are individually
+ cropped, then scaled and further composed on the composition
+ bounds rectangle. From that, two independent streams are cropped
+ and sent out of the subdev from the source pads.</para>
</section>
+
</section>
&sub-subdev-formats;
diff --git a/Documentation/DocBook/media/v4l/io.xml b/Documentation/DocBook/media/v4l/io.xml
index b815929b5bba..fd6aca2922b6 100644
--- a/Documentation/DocBook/media/v4l/io.xml
+++ b/Documentation/DocBook/media/v4l/io.xml
@@ -543,12 +543,13 @@ and can range from zero to the number of buffers allocated
with the &VIDIOC-REQBUFS; ioctl (&v4l2-requestbuffers; <structfield>count</structfield>) minus one.</entry>
</row>
<row>
- <entry>&v4l2-buf-type;</entry>
+ <entry>__u32</entry>
<entry><structfield>type</structfield></entry>
<entry></entry>
<entry>Type of the buffer, same as &v4l2-format;
<structfield>type</structfield> or &v4l2-requestbuffers;
-<structfield>type</structfield>, set by the application.</entry>
+<structfield>type</structfield>, set by the application. See <xref
+linkend="v4l2-buf-type" /></entry>
</row>
<row>
<entry>__u32</entry>
@@ -568,7 +569,7 @@ refers to an input stream, applications when an output stream.</entry>
linkend="buffer-flags" />.</entry>
</row>
<row>
- <entry>&v4l2-field;</entry>
+ <entry>__u32</entry>
<entry><structfield>field</structfield></entry>
<entry></entry>
<entry>Indicates the field order of the image in the
@@ -630,11 +631,12 @@ bandwidth. These devices identify by not enumerating any video
standards, see <xref linkend="standard" />.</para></entry>
</row>
<row>
- <entry>&v4l2-memory;</entry>
+ <entry>__u32</entry>
<entry><structfield>memory</structfield></entry>
<entry></entry>
<entry>This field must be set by applications and/or drivers
-in accordance with the selected I/O method.</entry>
+in accordance with the selected I/O method. See <xref linkend="v4l2-memory"
+ /></entry>
</row>
<row>
<entry>union</entry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-srggb10.xml b/Documentation/DocBook/media/v4l/pixfmt-srggb10.xml
index 7b274092e60c..c1c62a9acc2a 100644
--- a/Documentation/DocBook/media/v4l/pixfmt-srggb10.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt-srggb10.xml
@@ -1,4 +1,4 @@
- <refentry>
+ <refentry id="pixfmt-srggb10">
<refmeta>
<refentrytitle>V4L2_PIX_FMT_SRGGB10 ('RG10'),
V4L2_PIX_FMT_SGRBG10 ('BA10'),
diff --git a/Documentation/DocBook/media/v4l/pixfmt-srggb10dpcm8.xml b/Documentation/DocBook/media/v4l/pixfmt-srggb10dpcm8.xml
new file mode 100644
index 000000000000..8eace3e2e7d4
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/pixfmt-srggb10dpcm8.xml
@@ -0,0 +1,29 @@
+ <refentry id="pixfmt-srggb10dpcm8">
+ <refmeta>
+ <refentrytitle>
+ V4L2_PIX_FMT_SBGGR10DPCM8 ('bBA8'),
+ V4L2_PIX_FMT_SGBRG10DPCM8 ('bGA8'),
+ V4L2_PIX_FMT_SGRBG10DPCM8 ('BD10'),
+ V4L2_PIX_FMT_SRGGB10DPCM8 ('bRA8'),
+ </refentrytitle>
+ &manvol;
+ </refmeta>
+ <refnamediv>
+ <refname id="V4L2-PIX-FMT-SBGGR10DPCM8"><constant>V4L2_PIX_FMT_SBGGR10DPCM8</constant></refname>
+ <refname id="V4L2-PIX-FMT-SGBRG10DPCM8"><constant>V4L2_PIX_FMT_SGBRG10DPCM8</constant></refname>
+ <refname id="V4L2-PIX-FMT-SGRBG10DPCM8"><constant>V4L2_PIX_FMT_SGRBG10DPCM8</constant></refname>
+ <refname id="V4L2-PIX-FMT-SRGGB10DPCM8"><constant>V4L2_PIX_FMT_SRGGB10DPCM8</constant></refname>
+ <refpurpose>10-bit Bayer formats compressed to 8 bits</refpurpose>
+ </refnamediv>
+ <refsect1>
+ <title>Description</title>
+
+ <para>The following four pixel formats are raw sRGB / Bayer formats
+ with 10 bits per colour compressed to 8 bits each, using DPCM
+ compression. DPCM, differential pulse-code modulation, is lossy.
+ Each colour component consumes 8 bits of memory. In other respects
+ this format is similar to <xref
+ linkend="pixfmt-srggb10">.</xref></para>
+
+ </refsect1>
+ </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt.xml b/Documentation/DocBook/media/v4l/pixfmt.xml
index 31eaae2469f9..f5ac15ed0549 100644
--- a/Documentation/DocBook/media/v4l/pixfmt.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt.xml
@@ -673,6 +673,7 @@ access the palette, this must be done with ioctls of the Linux framebuffer API.<
&sub-srggb8;
&sub-sbggr16;
&sub-srggb10;
+ &sub-srggb10dpcm8;
&sub-srggb12;
</section>
@@ -876,11 +877,6 @@ kernel sources in the file <filename>Documentation/video4linux/cx2341x/README.hm
<entry>'S561'</entry>
<entry>Compressed GBRG Bayer format used by the gspca driver.</entry>
</row>
- <row id="V4L2-PIX-FMT-SGRBG10DPCM8">
- <entry><constant>V4L2_PIX_FMT_SGRBG10DPCM8</constant></entry>
- <entry>'DB10'</entry>
- <entry>10 bit raw Bayer DPCM compressed to 8 bits.</entry>
- </row>
<row id="V4L2-PIX-FMT-PAC207">
<entry><constant>V4L2_PIX_FMT_PAC207</constant></entry>
<entry>'P207'</entry>
diff --git a/Documentation/DocBook/media/v4l/subdev-image-processing-crop.dia b/Documentation/DocBook/media/v4l/subdev-image-processing-crop.dia
new file mode 100644
index 000000000000..e32ba5362e1d
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/subdev-image-processing-crop.dia
@@ -0,0 +1,614 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<dia:diagram xmlns:dia="http://www.lysator.liu.se/~alla/dia/">
+ <dia:diagramdata>
+ <dia:attribute name="background">
+ <dia:color val="#ffffff"/>
+ </dia:attribute>
+ <dia:attribute name="pagebreak">
+ <dia:color val="#000099"/>
+ </dia:attribute>
+ <dia:attribute name="paper">
+ <dia:composite type="paper">
+ <dia:attribute name="name">
+ <dia:string>#A4#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="tmargin">
+ <dia:real val="2.8222000598907471"/>
+ </dia:attribute>
+ <dia:attribute name="bmargin">
+ <dia:real val="2.8222000598907471"/>
+ </dia:attribute>
+ <dia:attribute name="lmargin">
+ <dia:real val="2.8222000598907471"/>
+ </dia:attribute>
+ <dia:attribute name="rmargin">
+ <dia:real val="2.8222000598907471"/>
+ </dia:attribute>
+ <dia:attribute name="is_portrait">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="scaling">
+ <dia:real val="0.49000000953674316"/>
+ </dia:attribute>
+ <dia:attribute name="fitto">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="grid">
+ <dia:composite type="grid">
+ <dia:attribute name="width_x">
+ <dia:real val="1"/>
+ </dia:attribute>
+ <dia:attribute name="width_y">
+ <dia:real val="1"/>
+ </dia:attribute>
+ <dia:attribute name="visible_x">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="visible_y">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:composite type="color"/>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#d8e5e5"/>
+ </dia:attribute>
+ <dia:attribute name="guides">
+ <dia:composite type="guides">
+ <dia:attribute name="hguides"/>
+ <dia:attribute name="vguides"/>
+ </dia:composite>
+ </dia:attribute>
+ </dia:diagramdata>
+ <dia:layer name="Background" visible="true" active="true">
+ <dia:object type="Standard - Box" version="0" id="O0">
+ <dia:attribute name="obj_pos">
+ <dia:point val="-0.4,6.5"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="-0.45,6.45;23.1387,16.2"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="-0.4,6.5"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="23.48871579904775"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="9.6500000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O1">
+ <dia:attribute name="obj_pos">
+ <dia:point val="0.225,9.45"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="0.175,9.4;8.225,14.7"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="0.225,9.45"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="7.9499999999999975"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="5.1999999999999975"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#a52a2a"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O2">
+ <dia:attribute name="obj_pos">
+ <dia:point val="3.175,10.55"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="3.125,10.5;7.925,14.45"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="3.175,10.55"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="4.6999999999999975"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="3.8499999999999979"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#0000ff"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O3">
+ <dia:attribute name="obj_pos">
+ <dia:point val="3.725,11.3875"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="3.725,10.7925;6.6025,13.14"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#sink
+crop
+selection#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="3.725,11.3875"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#0000ff"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O4">
+ <dia:attribute name="obj_pos">
+ <dia:point val="1.475,7.9"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="1.475,7.305;1.475,8.0525"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>##</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="1.475,7.9"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O5">
+ <dia:attribute name="obj_pos">
+ <dia:point val="0.426918,7.89569"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="0.426918,7.30069;3.90942,8.84819"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#sink media
+bus format#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="0.426918,7.89569"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#a52a2a"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O6">
+ <dia:attribute name="obj_pos">
+ <dia:point val="17.4887,7.75"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="17.4887,7.155;21.8112,8.7025"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#source media
+bus format#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="17.4887,7.75"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#8b6914"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O7">
+ <dia:attribute name="obj_pos">
+ <dia:point val="17.5244,9.5417"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="17.4744,9.4917;22.2387,13.35"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="17.5244,9.5417"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="4.6643157990477508"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="3.758300000000002"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#8b6914"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O8">
+ <dia:attribute name="obj_pos">
+ <dia:point val="17.5244,13.3"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="3.12132,13.2463;17.5781,14.4537"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="17.5244,13.3"/>
+ <dia:point val="3.175,14.4"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O7" connection="5"/>
+ <dia:connection handle="1" to="O2" connection="5"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O9">
+ <dia:attribute name="obj_pos">
+ <dia:point val="17.5244,9.5417"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="3.12162,9.48832;17.5778,10.6034"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="17.5244,9.5417"/>
+ <dia:point val="3.175,10.55"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O7" connection="0"/>
+ <dia:connection handle="1" to="O2" connection="0"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O10">
+ <dia:attribute name="obj_pos">
+ <dia:point val="22.1887,13.3"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="7.82132,13.2463;22.2424,14.4537"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="22.1887,13.3"/>
+ <dia:point val="7.875,14.4"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O7" connection="7"/>
+ <dia:connection handle="1" to="O2" connection="7"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O11">
+ <dia:attribute name="obj_pos">
+ <dia:point val="22.1887,9.5417"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="7.82161,9.48831;22.2421,10.6034"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="22.1887,9.5417"/>
+ <dia:point val="7.875,10.55"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O7" connection="2"/>
+ <dia:connection handle="1" to="O2" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Geometric - Perfect Circle" version="1" id="O12">
+ <dia:attribute name="obj_pos">
+ <dia:point val="23.23,10.5742"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="23.18,10.5242;24.13,11.4742"/>
+ </dia:attribute>
+ <dia:attribute name="meta">
+ <dia:composite type="dict"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="23.23,10.5742"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="line_width">
+ <dia:real val="0.10000000000000001"/>
+ </dia:attribute>
+ <dia:attribute name="line_colour">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="fill_colour">
+ <dia:color val="#ffffff"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="0"/>
+ <dia:real val="1"/>
+ </dia:attribute>
+ <dia:attribute name="flip_horizontal">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="flip_vertical">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="subscale">
+ <dia:real val="1"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O13">
+ <dia:attribute name="obj_pos">
+ <dia:point val="24.08,10.9992"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="24.03,10.6388;32.4953,11.3624"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="24.08,10.9992"/>
+ <dia:point val="32.3835,11.0007"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow">
+ <dia:enum val="22"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_length">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_width">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O12" connection="3"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O14">
+ <dia:attribute name="obj_pos">
+ <dia:point val="25.3454,10.49"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="25.3454,9.895;29.9904,10.6425"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#pad 1 (source)#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="25.3454,10.49"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Geometric - Perfect Circle" version="1" id="O15">
+ <dia:attribute name="obj_pos">
+ <dia:point val="-1.44491,11.6506"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="-1.49491,11.6006;-0.54491,12.5506"/>
+ </dia:attribute>
+ <dia:attribute name="meta">
+ <dia:composite type="dict"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="-1.44491,11.6506"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="line_width">
+ <dia:real val="0.10000000000000001"/>
+ </dia:attribute>
+ <dia:attribute name="line_colour">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="fill_colour">
+ <dia:color val="#ffffff"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="0"/>
+ <dia:real val="1"/>
+ </dia:attribute>
+ <dia:attribute name="flip_horizontal">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="flip_vertical">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="subscale">
+ <dia:real val="1"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O16">
+ <dia:attribute name="obj_pos">
+ <dia:point val="-9.61991,12.09"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="-9.67,11.7149;-1.33311,12.4385"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="-9.61991,12.09"/>
+ <dia:point val="-1.44491,12.0756"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow">
+ <dia:enum val="22"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_length">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_width">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="1" to="O15" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O17">
+ <dia:attribute name="obj_pos">
+ <dia:point val="-7.39291,11.49"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="-7.39291,10.895;-3.58791,11.6425"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#pad 0 (sink)#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="-7.39291,11.49"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ </dia:layer>
+</dia:diagram>
diff --git a/Documentation/DocBook/media/v4l/subdev-image-processing-crop.svg b/Documentation/DocBook/media/v4l/subdev-image-processing-crop.svg
new file mode 100644
index 000000000000..18b0f5de9ed2
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/subdev-image-processing-crop.svg
@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/PR-SVG-20010719/DTD/svg10.dtd">
+<svg width="43cm" height="10cm" viewBox="-194 128 844 196" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x="-8" y="130" width="469.774" height="193"/>
+ <g>
+ <rect style="fill: #ffffff" x="4.5" y="189" width="159" height="104"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a52a2a" x="4.5" y="189" width="159" height="104"/>
+ </g>
+ <g>
+ <rect style="fill: #ffffff" x="63.5" y="211" width="94" height="77"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x="63.5" y="211" width="94" height="77"/>
+ </g>
+ <text style="fill: #0000ff;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="74.5" y="227.75">
+ <tspan x="74.5" y="227.75">sink</tspan>
+ <tspan x="74.5" y="243.75">crop</tspan>
+ <tspan x="74.5" y="259.75">selection</tspan>
+ </text>
+ <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="29.5" y="158">
+ <tspan x="29.5" y="158"></tspan>
+ </text>
+ <text style="fill: #a52a2a;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="8.53836" y="157.914">
+ <tspan x="8.53836" y="157.914">sink media</tspan>
+ <tspan x="8.53836" y="173.914">bus format</tspan>
+ </text>
+ <text style="fill: #8b6914;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="349.774" y="155">
+ <tspan x="349.774" y="155">source media</tspan>
+ <tspan x="349.774" y="171">bus format</tspan>
+ </text>
+ <g>
+ <rect style="fill: #ffffff" x="350.488" y="190.834" width="93.2863" height="75.166"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #8b6914" x="350.488" y="190.834" width="93.2863" height="75.166"/>
+ </g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="350.488" y1="266" x2="63.5" y2="288"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="350.488" y1="190.834" x2="63.5" y2="211"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="443.774" y1="266" x2="157.5" y2="288"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="443.774" y1="190.834" x2="157.5" y2="211"/>
+ <g>
+ <ellipse style="fill: #ffffff" cx="473.1" cy="219.984" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="473.1" cy="219.984" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="473.1" cy="219.984" rx="8.5" ry="8.5"/>
+ </g>
+ <g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="481.6" y1="219.984" x2="637.934" y2="220.012"/>
+ <polygon style="fill: #000000" points="645.434,220.014 635.433,225.012 637.934,220.012 635.435,215.012 "/>
+ <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="645.434,220.014 635.433,225.012 637.934,220.012 635.435,215.012 "/>
+ </g>
+ <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="506.908" y="209.8">
+ <tspan x="506.908" y="209.8">pad 1 (source)</tspan>
+ </text>
+ <g>
+ <ellipse style="fill: #ffffff" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
+ </g>
+ <g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="-192.398" y1="241.8" x2="-38.6343" y2="241.529"/>
+ <polygon style="fill: #000000" points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "/>
+ <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "/>
+ </g>
+ <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="-147.858" y="229.8">
+ <tspan x="-147.858" y="229.8">pad 0 (sink)</tspan>
+ </text>
+</svg>
diff --git a/Documentation/DocBook/media/v4l/subdev-image-processing-full.dia b/Documentation/DocBook/media/v4l/subdev-image-processing-full.dia
new file mode 100644
index 000000000000..a0d782927840
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/subdev-image-processing-full.dia
@@ -0,0 +1,1588 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<dia:diagram xmlns:dia="http://www.lysator.liu.se/~alla/dia/">
+ <dia:diagramdata>
+ <dia:attribute name="background">
+ <dia:color val="#ffffff"/>
+ </dia:attribute>
+ <dia:attribute name="pagebreak">
+ <dia:color val="#000099"/>
+ </dia:attribute>
+ <dia:attribute name="paper">
+ <dia:composite type="paper">
+ <dia:attribute name="name">
+ <dia:string>#A4#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="tmargin">
+ <dia:real val="2.8222000598907471"/>
+ </dia:attribute>
+ <dia:attribute name="bmargin">
+ <dia:real val="2.8222000598907471"/>
+ </dia:attribute>
+ <dia:attribute name="lmargin">
+ <dia:real val="2.8222000598907471"/>
+ </dia:attribute>
+ <dia:attribute name="rmargin">
+ <dia:real val="2.8222000598907471"/>
+ </dia:attribute>
+ <dia:attribute name="is_portrait">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="scaling">
+ <dia:real val="0.49000000953674316"/>
+ </dia:attribute>
+ <dia:attribute name="fitto">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="grid">
+ <dia:composite type="grid">
+ <dia:attribute name="width_x">
+ <dia:real val="1"/>
+ </dia:attribute>
+ <dia:attribute name="width_y">
+ <dia:real val="1"/>
+ </dia:attribute>
+ <dia:attribute name="visible_x">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="visible_y">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:composite type="color"/>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#d8e5e5"/>
+ </dia:attribute>
+ <dia:attribute name="guides">
+ <dia:composite type="guides">
+ <dia:attribute name="hguides"/>
+ <dia:attribute name="vguides"/>
+ </dia:composite>
+ </dia:attribute>
+ </dia:diagramdata>
+ <dia:layer name="Background" visible="true" active="true">
+ <dia:object type="Standard - Box" version="0" id="O0">
+ <dia:attribute name="obj_pos">
+ <dia:point val="15.945,6.45"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="15.895,6.4;26.4,18.95"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="15.945,6.45"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="10.404999999254942"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="12.449999999999992"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#ff765a"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O1">
+ <dia:attribute name="obj_pos">
+ <dia:point val="-0.1,3.65"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="-0.15,3.6;40.25,20.85"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="-0.1,3.65"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="40.300000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="17.149999999999999"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Geometric - Perfect Circle" version="1" id="O2">
+ <dia:attribute name="obj_pos">
+ <dia:point val="-1.05,7.9106"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="-1.1,7.8606;-0.15,8.8106"/>
+ </dia:attribute>
+ <dia:attribute name="meta">
+ <dia:composite type="dict"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="-1.05,7.9106"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="line_width">
+ <dia:real val="0.10000000000000001"/>
+ </dia:attribute>
+ <dia:attribute name="line_colour">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="fill_colour">
+ <dia:color val="#ffffff"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="0"/>
+ <dia:real val="1"/>
+ </dia:attribute>
+ <dia:attribute name="flip_horizontal">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="flip_vertical">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="subscale">
+ <dia:real val="1"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Geometric - Perfect Circle" version="1" id="O3">
+ <dia:attribute name="obj_pos">
+ <dia:point val="40.3366,9.8342"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="40.2866,9.7842;41.2366,10.7342"/>
+ </dia:attribute>
+ <dia:attribute name="meta">
+ <dia:composite type="dict"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="40.3366,9.8342"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="line_width">
+ <dia:real val="0.10000000000000001"/>
+ </dia:attribute>
+ <dia:attribute name="line_colour">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="fill_colour">
+ <dia:color val="#ffffff"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="0"/>
+ <dia:real val="1"/>
+ </dia:attribute>
+ <dia:attribute name="flip_horizontal">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="flip_vertical">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="subscale">
+ <dia:real val="1"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O4">
+ <dia:attribute name="obj_pos">
+ <dia:point val="-9.225,8.35"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="-9.27509,7.97487;-0.938197,8.69848"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="-9.225,8.35"/>
+ <dia:point val="-1.05,8.3356"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow">
+ <dia:enum val="22"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_length">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_width">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="1" to="O2" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O5">
+ <dia:attribute name="obj_pos">
+ <dia:point val="41.1866,10.2592"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="41.1366,9.89879;49.6019,10.6224"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="41.1866,10.2592"/>
+ <dia:point val="49.4901,10.2607"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow">
+ <dia:enum val="22"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_length">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_width">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O3" connection="3"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O6">
+ <dia:attribute name="obj_pos">
+ <dia:point val="-6.998,7.75"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="-6.998,7.155;-3.193,7.9025"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#pad 0 (sink)#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="-6.998,7.75"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O7">
+ <dia:attribute name="obj_pos">
+ <dia:point val="42.452,9.75"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="42.452,9.155;47.097,9.9025"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#pad 2 (source)#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="42.452,9.75"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O8">
+ <dia:attribute name="obj_pos">
+ <dia:point val="0.275,6"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="0.225,5.95;8.275,11.25"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="0.275,6"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="7.9499999999999975"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="5.1999999999999975"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#a52a2a"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O9">
+ <dia:attribute name="obj_pos">
+ <dia:point val="3.125,6.8"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="3.075,6.75;7.875,10.7"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="3.125,6.8"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="4.6999999999999975"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="3.8499999999999979"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#0000ff"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O10">
+ <dia:attribute name="obj_pos">
+ <dia:point val="1.525,4.45"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="1.525,3.855;1.525,4.6025"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>##</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="1.525,4.45"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O11">
+ <dia:attribute name="obj_pos">
+ <dia:point val="0.476918,4.44569"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="0.476918,3.85069;3.95942,5.39819"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#sink media
+bus format#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="0.476918,4.44569"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#a52a2a"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O12">
+ <dia:attribute name="obj_pos">
+ <dia:point val="16.6822,9.28251"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="16.6322,9.23251;24.9922,17.9564"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="16.6822,9.28251"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="8.2600228398861297"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="8.6238900617957164"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#00ff00"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O13">
+ <dia:attribute name="obj_pos">
+ <dia:point val="16.6822,17.9064"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="3.05732,10.5823;16.7499,17.9741"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="16.6822,17.9064"/>
+ <dia:point val="3.125,10.65"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O12" connection="5"/>
+ <dia:connection handle="1" to="O9" connection="5"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O14">
+ <dia:attribute name="obj_pos">
+ <dia:point val="16.6822,9.28251"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="3.06681,6.74181;16.7404,9.3407"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="16.6822,9.28251"/>
+ <dia:point val="3.125,6.8"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O12" connection="0"/>
+ <dia:connection handle="1" to="O9" connection="0"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O15">
+ <dia:attribute name="obj_pos">
+ <dia:point val="24.9422,17.9064"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="7.75945,10.5845;25.0077,17.9719"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="24.9422,17.9064"/>
+ <dia:point val="7.825,10.65"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O12" connection="7"/>
+ <dia:connection handle="1" to="O9" connection="7"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O16">
+ <dia:attribute name="obj_pos">
+ <dia:point val="24.9422,9.28251"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="7.76834,6.74334;24.9989,9.33917"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="24.9422,9.28251"/>
+ <dia:point val="7.825,6.8"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O12" connection="2"/>
+ <dia:connection handle="1" to="O9" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O17">
+ <dia:attribute name="obj_pos">
+ <dia:point val="16.7352,7.47209"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="16.7352,6.87709;22.5602,8.42459"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#sink compose
+selection (scaling)#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="16.7352,7.47209"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#00ff00"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O18">
+ <dia:attribute name="obj_pos">
+ <dia:point val="20.4661,9.72825"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="20.4161,9.67825;25.5254,13.3509"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="20.4661,9.72825"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="5.009308462554376"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="3.5726155970598077"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#a020f0"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O19">
+ <dia:attribute name="obj_pos">
+ <dia:point val="34.475,5.2564"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="34.475,4.6614;38.7975,6.2089"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#source media
+bus format#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="34.475,5.2564"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#8b6914"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O20">
+ <dia:attribute name="obj_pos">
+ <dia:point val="34.4244,8.6917"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="34.3744,8.6417;39.4837,12.3143"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="34.4244,8.6917"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="5.009308462554376"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="3.5726155970598077"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#8b6914"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O21">
+ <dia:attribute name="obj_pos">
+ <dia:point val="34.4244,12.2643"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="20.4125,12.2107;34.478,13.3545"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="34.4244,12.2643"/>
+ <dia:point val="20.4661,13.3009"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O20" connection="5"/>
+ <dia:connection handle="1" to="O18" connection="5"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O22">
+ <dia:attribute name="obj_pos">
+ <dia:point val="34.4244,8.6917"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="20.4125,8.63813;34.478,9.78182"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="34.4244,8.6917"/>
+ <dia:point val="20.4661,9.72825"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O20" connection="0"/>
+ <dia:connection handle="1" to="O18" connection="0"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O23">
+ <dia:attribute name="obj_pos">
+ <dia:point val="39.4337,12.2643"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="25.4218,12.2107;39.4873,13.3545"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="39.4337,12.2643"/>
+ <dia:point val="25.4754,13.3009"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O20" connection="7"/>
+ <dia:connection handle="1" to="O18" connection="7"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O24">
+ <dia:attribute name="obj_pos">
+ <dia:point val="39.4337,8.6917"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="25.4218,8.63813;39.4873,9.78182"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="39.4337,8.6917"/>
+ <dia:point val="25.4754,9.72825"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O20" connection="2"/>
+ <dia:connection handle="1" to="O18" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O25">
+ <dia:attribute name="obj_pos">
+ <dia:point val="16.25,5.15"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="16.25,4.555;21.68,6.1025"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#sink compose
+bounds selection#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="16.25,5.15"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#ff765a"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Geometric - Perfect Circle" version="1" id="O26">
+ <dia:attribute name="obj_pos">
+ <dia:point val="-1.02991,16.6506"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="-1.07991,16.6006;-0.12991,17.5506"/>
+ </dia:attribute>
+ <dia:attribute name="meta">
+ <dia:composite type="dict"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="-1.02991,16.6506"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="line_width">
+ <dia:real val="0.10000000000000001"/>
+ </dia:attribute>
+ <dia:attribute name="line_colour">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="fill_colour">
+ <dia:color val="#ffffff"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="0"/>
+ <dia:real val="1"/>
+ </dia:attribute>
+ <dia:attribute name="flip_horizontal">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="flip_vertical">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="subscale">
+ <dia:real val="1"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O27">
+ <dia:attribute name="obj_pos">
+ <dia:point val="-9.20491,17.09"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="-9.255,16.7149;-0.918107,17.4385"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="-9.20491,17.09"/>
+ <dia:point val="-1.02991,17.0756"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow">
+ <dia:enum val="22"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_length">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_width">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="1" to="O26" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O28">
+ <dia:attribute name="obj_pos">
+ <dia:point val="-6.95,16.45"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="-6.95,15.855;-3.145,16.6025"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#pad 1 (sink)#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="-6.95,16.45"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O29">
+ <dia:attribute name="obj_pos">
+ <dia:point val="0.390412,14.64"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="0.340412,14.59;6.045,18.8"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="0.390412,14.64"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="5.604587512785236"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="4.1099999999999994"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#a52a2a"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O30">
+ <dia:attribute name="obj_pos">
+ <dia:point val="2.645,15.74"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="2.595,15.69;5.6,18.3"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="2.645,15.74"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="2.904999999254942"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="2.5100000000000016"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#0000ff"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O31">
+ <dia:attribute name="obj_pos">
+ <dia:point val="1.595,12.99"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="1.595,12.395;1.595,13.1425"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>##</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="1.595,12.99"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O32">
+ <dia:attribute name="obj_pos">
+ <dia:point val="17.945,12.595"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="2.58596,12.536;18.004,15.799"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="17.945,12.595"/>
+ <dia:point val="2.645,15.74"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O36" connection="0"/>
+ <dia:connection handle="1" to="O30" connection="0"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O33">
+ <dia:attribute name="obj_pos">
+ <dia:point val="17.945,15.8"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="2.58772,15.7427;18.0023,18.3073"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="17.945,15.8"/>
+ <dia:point val="2.645,18.25"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O36" connection="5"/>
+ <dia:connection handle="1" to="O30" connection="5"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O34">
+ <dia:attribute name="obj_pos">
+ <dia:point val="21.7,15.8"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="5.49307,15.7431;21.7569,18.3069"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="21.7,15.8"/>
+ <dia:point val="5.55,18.25"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O36" connection="7"/>
+ <dia:connection handle="1" to="O30" connection="7"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O35">
+ <dia:attribute name="obj_pos">
+ <dia:point val="21.7,12.595"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="5.49136,12.5364;21.7586,15.7986"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="21.7,12.595"/>
+ <dia:point val="5.55,15.74"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O36" connection="2"/>
+ <dia:connection handle="1" to="O30" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O36">
+ <dia:attribute name="obj_pos">
+ <dia:point val="17.945,12.595"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="17.895,12.545;21.75,15.85"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="17.945,12.595"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="3.7549999992549452"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="3.2049999992549427"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#00ff00"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O37">
+ <dia:attribute name="obj_pos">
+ <dia:point val="22.1631,14.2233"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="22.1131,14.1733;25.45,16.7"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="22.1631,14.2233"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="3.2369000000000021"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="2.4267000000000003"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#a020f0"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O38">
+ <dia:attribute name="obj_pos">
+ <dia:point val="34.6714,16.2367"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="34.6214,16.1867;37.9,18.75"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="34.6714,16.2367"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="3.178600000000003"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="2.4632999999999967"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#8b6914"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O39">
+ <dia:attribute name="obj_pos">
+ <dia:point val="34.6714,18.7"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="22.1057,16.5926;34.7288,18.7574"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="34.6714,18.7"/>
+ <dia:point val="22.1631,16.65"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O38" connection="5"/>
+ <dia:connection handle="1" to="O37" connection="5"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O40">
+ <dia:attribute name="obj_pos">
+ <dia:point val="34.6714,16.2367"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="22.1058,14.166;34.7287,16.294"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="34.6714,16.2367"/>
+ <dia:point val="22.1631,14.2233"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O38" connection="0"/>
+ <dia:connection handle="1" to="O37" connection="0"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O41">
+ <dia:attribute name="obj_pos">
+ <dia:point val="37.85,18.7"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="25.3425,16.5925;37.9075,18.7575"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="37.85,18.7"/>
+ <dia:point val="25.4,16.65"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O38" connection="7"/>
+ <dia:connection handle="1" to="O37" connection="7"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O42">
+ <dia:attribute name="obj_pos">
+ <dia:point val="37.85,16.2367"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="25.3427,14.166;37.9073,16.294"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="37.85,16.2367"/>
+ <dia:point val="25.4,14.2233"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O38" connection="2"/>
+ <dia:connection handle="1" to="O37" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Geometric - Perfect Circle" version="1" id="O43">
+ <dia:attribute name="obj_pos">
+ <dia:point val="40.347,16.7742"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="40.297,16.7242;41.247,17.6742"/>
+ </dia:attribute>
+ <dia:attribute name="meta">
+ <dia:composite type="dict"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="40.347,16.7742"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="line_width">
+ <dia:real val="0.10000000000000001"/>
+ </dia:attribute>
+ <dia:attribute name="line_colour">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="fill_colour">
+ <dia:color val="#ffffff"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="0"/>
+ <dia:real val="1"/>
+ </dia:attribute>
+ <dia:attribute name="flip_horizontal">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="flip_vertical">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="subscale">
+ <dia:real val="1"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O44">
+ <dia:attribute name="obj_pos">
+ <dia:point val="41.197,17.1992"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="41.147,16.8388;49.6123,17.5624"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="41.197,17.1992"/>
+ <dia:point val="49.5005,17.2007"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow">
+ <dia:enum val="22"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_length">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_width">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O43" connection="3"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O45">
+ <dia:attribute name="obj_pos">
+ <dia:point val="42.4624,16.69"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="42.4624,16.095;47.1074,16.8425"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#pad 3 (source)#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="42.4624,16.69"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O46">
+ <dia:attribute name="obj_pos">
+ <dia:point val="9.85,4.55"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="9.85,3.955;12.7275,6.3025"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#sink
+crop
+selection#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="9.85,4.55"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#0000ff"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O47">
+ <dia:attribute name="obj_pos">
+ <dia:point val="27.65,4.75"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="27.65,4.155;30.5275,6.5025"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#source
+crop
+selection#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="27.65,4.75"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#a020f0"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O48">
+ <dia:attribute name="obj_pos">
+ <dia:point val="10.55,6.6"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="7.7135,6.39438;10.6035,7.11605"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="10.55,6.6"/>
+ <dia:point val="7.825,6.8"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#0000ff"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow">
+ <dia:enum val="22"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_length">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_width">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="1" to="O9" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O49">
+ <dia:attribute name="obj_pos">
+ <dia:point val="10.45,6.55"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="5.48029,6.48236;10.5176,15.8387"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="10.45,6.55"/>
+ <dia:point val="5.55,15.74"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#0000ff"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow">
+ <dia:enum val="22"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_length">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_width">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="1" to="O30" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O50">
+ <dia:attribute name="obj_pos">
+ <dia:point val="27.5246,6.66071"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="25.406,6.59136;27.594,9.82122"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="27.5246,6.66071"/>
+ <dia:point val="25.4754,9.72825"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#a020f0"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow">
+ <dia:enum val="22"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_length">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_width">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="1" to="O18" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O51">
+ <dia:attribute name="obj_pos">
+ <dia:point val="27.5036,6.68935"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="25.2161,6.62775;27.5652,14.331"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="27.5036,6.68935"/>
+ <dia:point val="25.4,14.2233"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#a020f0"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow">
+ <dia:enum val="22"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_length">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_width">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="1" to="O37" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ </dia:layer>
+</dia:diagram>
diff --git a/Documentation/DocBook/media/v4l/subdev-image-processing-full.svg b/Documentation/DocBook/media/v4l/subdev-image-processing-full.svg
new file mode 100644
index 000000000000..3322cf4c0093
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/subdev-image-processing-full.svg
@@ -0,0 +1,163 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/PR-SVG-20010719/DTD/svg10.dtd">
+<svg width="59cm" height="18cm" viewBox="-186 71 1178 346" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+ <g>
+ <rect style="fill: #ffffff" x="318.9" y="129" width="208.1" height="249"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #ff765a" x="318.9" y="129" width="208.1" height="249"/>
+ </g>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x="-2" y="73" width="806" height="343"/>
+ <g>
+ <ellipse style="fill: #ffffff" cx="-12.5" cy="166.712" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-12.5" cy="166.712" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-12.5" cy="166.712" rx="8.5" ry="8.5"/>
+ </g>
+ <g>
+ <ellipse style="fill: #ffffff" cx="815.232" cy="205.184" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="815.232" cy="205.184" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="815.232" cy="205.184" rx="8.5" ry="8.5"/>
+ </g>
+ <g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="-184.5" y1="167" x2="-30.7361" y2="166.729"/>
+ <polygon style="fill: #000000" points="-23.2361,166.716 -33.2272,171.734 -30.7361,166.729 -33.2449,161.734 "/>
+ <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="-23.2361,166.716 -33.2272,171.734 -30.7361,166.729 -33.2449,161.734 "/>
+ </g>
+ <g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="823.732" y1="205.184" x2="980.066" y2="205.212"/>
+ <polygon style="fill: #000000" points="987.566,205.214 977.565,210.212 980.066,205.212 977.567,200.212 "/>
+ <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="987.566,205.214 977.565,210.212 980.066,205.212 977.567,200.212 "/>
+ </g>
+ <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="-139.96" y="155">
+ <tspan x="-139.96" y="155">pad 0 (sink)</tspan>
+ </text>
+ <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="849.04" y="195">
+ <tspan x="849.04" y="195">pad 2 (source)</tspan>
+ </text>
+ <g>
+ <rect style="fill: #ffffff" x="5.5" y="120" width="159" height="104"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a52a2a" x="5.5" y="120" width="159" height="104"/>
+ </g>
+ <g>
+ <rect style="fill: #ffffff" x="62.5" y="136" width="94" height="77"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x="62.5" y="136" width="94" height="77"/>
+ </g>
+ <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="30.5" y="89">
+ <tspan x="30.5" y="89"></tspan>
+ </text>
+ <text style="fill: #a52a2a;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="9.53836" y="88.9138">
+ <tspan x="9.53836" y="88.9138">sink media</tspan>
+ <tspan x="9.53836" y="104.914">bus format</tspan>
+ </text>
+ <g>
+ <rect style="fill: #ffffff" x="333.644" y="185.65" width="165.2" height="172.478"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #00ff00" x="333.644" y="185.65" width="165.2" height="172.478"/>
+ </g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="333.644" y1="358.128" x2="62.5" y2="213"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="333.644" y1="185.65" x2="62.5" y2="136"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="498.844" y1="358.128" x2="156.5" y2="213"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="498.844" y1="185.65" x2="156.5" y2="136"/>
+ <text style="fill: #00ff00;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="334.704" y="149.442">
+ <tspan x="334.704" y="149.442">sink compose</tspan>
+ <tspan x="334.704" y="165.442">selection (scaling)</tspan>
+ </text>
+ <g>
+ <rect style="fill: #ffffff" x="409.322" y="194.565" width="100.186" height="71.4523"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x="409.322" y="194.565" width="100.186" height="71.4523"/>
+ </g>
+ <text style="fill: #8b6914;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="689.5" y="105.128">
+ <tspan x="689.5" y="105.128">source media</tspan>
+ <tspan x="689.5" y="121.128">bus format</tspan>
+ </text>
+ <g>
+ <rect style="fill: #ffffff" x="688.488" y="173.834" width="100.186" height="71.4523"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #8b6914" x="688.488" y="173.834" width="100.186" height="71.4523"/>
+ </g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="688.488" y1="245.286" x2="409.322" y2="266.018"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="688.488" y1="173.834" x2="409.322" y2="194.565"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="788.674" y1="245.286" x2="509.508" y2="266.018"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="788.674" y1="173.834" x2="509.508" y2="194.565"/>
+ <text style="fill: #ff765a;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="325" y="103">
+ <tspan x="325" y="103">sink compose</tspan>
+ <tspan x="325" y="119">bounds selection</tspan>
+ </text>
+ <g>
+ <ellipse style="fill: #ffffff" cx="-12.0982" cy="341.512" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-12.0982" cy="341.512" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-12.0982" cy="341.512" rx="8.5" ry="8.5"/>
+ </g>
+ <g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="-184.098" y1="341.8" x2="-30.3343" y2="341.529"/>
+ <polygon style="fill: #000000" points="-22.8343,341.516 -32.8254,346.534 -30.3343,341.529 -32.8431,336.534 "/>
+ <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="-22.8343,341.516 -32.8254,346.534 -30.3343,341.529 -32.8431,336.534 "/>
+ </g>
+ <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="-139" y="329">
+ <tspan x="-139" y="329">pad 1 (sink)</tspan>
+ </text>
+ <g>
+ <rect style="fill: #ffffff" x="7.80824" y="292.8" width="112.092" height="82.2"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a52a2a" x="7.80824" y="292.8" width="112.092" height="82.2"/>
+ </g>
+ <g>
+ <rect style="fill: #ffffff" x="52.9" y="314.8" width="58.1" height="50.2"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x="52.9" y="314.8" width="58.1" height="50.2"/>
+ </g>
+ <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="31.9" y="259.8">
+ <tspan x="31.9" y="259.8"></tspan>
+ </text>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="358.9" y1="251.9" x2="52.9" y2="314.8"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="358.9" y1="316" x2="52.9" y2="365"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="434" y1="316" x2="111" y2="365"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="434" y1="251.9" x2="111" y2="314.8"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #00ff00" x="358.9" y="251.9" width="75.1" height="64.1"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x="443.262" y="284.466" width="64.738" height="48.534"/>
+ <g>
+ <rect style="fill: #ffffff" x="693.428" y="324.734" width="63.572" height="49.266"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #8b6914" x="693.428" y="324.734" width="63.572" height="49.266"/>
+ </g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="693.428" y1="374" x2="443.262" y2="333"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="693.428" y1="324.734" x2="443.262" y2="284.466"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="757" y1="374" x2="508" y2="333"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="757" y1="324.734" x2="508" y2="284.466"/>
+ <g>
+ <ellipse style="fill: #ffffff" cx="815.44" cy="343.984" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="815.44" cy="343.984" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="815.44" cy="343.984" rx="8.5" ry="8.5"/>
+ </g>
+ <g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="823.94" y1="343.984" x2="980.274" y2="344.012"/>
+ <polygon style="fill: #000000" points="987.774,344.014 977.773,349.012 980.274,344.012 977.775,339.012 "/>
+ <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="987.774,344.014 977.773,349.012 980.274,344.012 977.775,339.012 "/>
+ </g>
+ <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="849.248" y="333.8">
+ <tspan x="849.248" y="333.8">pad 3 (source)</tspan>
+ </text>
+ <text style="fill: #0000ff;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="197" y="91">
+ <tspan x="197" y="91">sink</tspan>
+ <tspan x="197" y="107">crop</tspan>
+ <tspan x="197" y="123">selection</tspan>
+ </text>
+ <text style="fill: #a020f0;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="553" y="95">
+ <tspan x="553" y="95">source</tspan>
+ <tspan x="553" y="111">crop</tspan>
+ <tspan x="553" y="127">selection</tspan>
+ </text>
+ <g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x1="211" y1="132" x2="166.21" y2="135.287"/>
+ <polygon style="fill: #0000ff" points="158.73,135.836 168.337,130.118 166.21,135.287 169.069,140.091 "/>
+ <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" points="158.73,135.836 168.337,130.118 166.21,135.287 169.069,140.091 "/>
+ </g>
+ <g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x1="209" y1="131" x2="115.581" y2="306.209"/>
+ <polygon style="fill: #0000ff" points="112.052,312.827 112.345,301.65 115.581,306.209 121.169,306.355 "/>
+ <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" points="112.052,312.827 112.345,301.65 115.581,306.209 121.169,306.355 "/>
+ </g>
+ <g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x1="550.492" y1="133.214" x2="514.916" y2="186.469"/>
+ <polygon style="fill: #a020f0" points="510.75,192.706 512.147,181.613 514.916,186.469 520.463,187.168 "/>
+ <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" points="510.75,192.706 512.147,181.613 514.916,186.469 520.463,187.168 "/>
+ </g>
+ <g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x1="550.072" y1="133.787" x2="510.618" y2="275.089"/>
+ <polygon style="fill: #a020f0" points="508.601,282.312 506.475,271.336 510.618,275.089 516.106,274.025 "/>
+ <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" points="508.601,282.312 506.475,271.336 510.618,275.089 516.106,274.025 "/>
+ </g>
+</svg>
diff --git a/Documentation/DocBook/media/v4l/subdev-image-processing-scaling-multi-source.dia b/Documentation/DocBook/media/v4l/subdev-image-processing-scaling-multi-source.dia
new file mode 100644
index 000000000000..0cd50a7bda80
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/subdev-image-processing-scaling-multi-source.dia
@@ -0,0 +1,1152 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<dia:diagram xmlns:dia="http://www.lysator.liu.se/~alla/dia/">
+ <dia:diagramdata>
+ <dia:attribute name="background">
+ <dia:color val="#ffffff"/>
+ </dia:attribute>
+ <dia:attribute name="pagebreak">
+ <dia:color val="#000099"/>
+ </dia:attribute>
+ <dia:attribute name="paper">
+ <dia:composite type="paper">
+ <dia:attribute name="name">
+ <dia:string>#A4#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="tmargin">
+ <dia:real val="2.8222000598907471"/>
+ </dia:attribute>
+ <dia:attribute name="bmargin">
+ <dia:real val="2.8222000598907471"/>
+ </dia:attribute>
+ <dia:attribute name="lmargin">
+ <dia:real val="2.8222000598907471"/>
+ </dia:attribute>
+ <dia:attribute name="rmargin">
+ <dia:real val="2.8222000598907471"/>
+ </dia:attribute>
+ <dia:attribute name="is_portrait">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="scaling">
+ <dia:real val="0.49000000953674316"/>
+ </dia:attribute>
+ <dia:attribute name="fitto">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="grid">
+ <dia:composite type="grid">
+ <dia:attribute name="width_x">
+ <dia:real val="1"/>
+ </dia:attribute>
+ <dia:attribute name="width_y">
+ <dia:real val="1"/>
+ </dia:attribute>
+ <dia:attribute name="visible_x">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="visible_y">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:composite type="color"/>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#d8e5e5"/>
+ </dia:attribute>
+ <dia:attribute name="guides">
+ <dia:composite type="guides">
+ <dia:attribute name="hguides"/>
+ <dia:attribute name="vguides"/>
+ </dia:composite>
+ </dia:attribute>
+ </dia:diagramdata>
+ <dia:layer name="Background" visible="true" active="true">
+ <dia:object type="Standard - Box" version="0" id="O0">
+ <dia:attribute name="obj_pos">
+ <dia:point val="-0.4,6.5"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="-0.45,6.45;39.95,22.9"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="-0.4,6.5"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="40.299999999999997"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="16.349999999999998"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O1">
+ <dia:attribute name="obj_pos">
+ <dia:point val="0.225,9.45"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="0.175,9.4;8.225,14.7"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="0.225,9.45"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="7.9499999999999975"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="5.1999999999999975"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#a52a2a"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O2">
+ <dia:attribute name="obj_pos">
+ <dia:point val="2.475,10.2"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="2.425,10.15;7.225,14.1"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="2.475,10.2"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="4.6999999999999975"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="3.8499999999999979"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#0000ff"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O3">
+ <dia:attribute name="obj_pos">
+ <dia:point val="3,11.2"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="3,10.605;5.8775,12.9525"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#sink
+crop
+selection#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="3,11.2"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#0000ff"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O4">
+ <dia:attribute name="obj_pos">
+ <dia:point val="1.475,7.9"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="1.475,7.305;1.475,8.0525"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>##</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="1.475,7.9"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O5">
+ <dia:attribute name="obj_pos">
+ <dia:point val="0.426918,7.89569"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="0.426918,7.30069;3.90942,8.84819"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#sink media
+bus format#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="0.426918,7.89569"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#a52a2a"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O6">
+ <dia:attribute name="obj_pos">
+ <dia:point val="16.6822,9.28251"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="16.6322,9.23251;24.9922,17.9564"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="16.6822,9.28251"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="8.2600228398861297"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="8.6238900617957164"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#00ff00"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O7">
+ <dia:attribute name="obj_pos">
+ <dia:point val="16.6822,17.9064"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="2.41365,13.9886;16.7436,17.9678"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="16.6822,17.9064"/>
+ <dia:point val="2.475,14.05"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O6" connection="5"/>
+ <dia:connection handle="1" to="O2" connection="5"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O8">
+ <dia:attribute name="obj_pos">
+ <dia:point val="16.6822,9.28251"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="2.42188,9.22939;16.7353,10.2531"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="16.6822,9.28251"/>
+ <dia:point val="2.475,10.2"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O6" connection="0"/>
+ <dia:connection handle="1" to="O2" connection="0"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O9">
+ <dia:attribute name="obj_pos">
+ <dia:point val="24.9422,17.9064"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="7.11553,13.9905;25.0017,17.9659"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="24.9422,17.9064"/>
+ <dia:point val="7.175,14.05"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O6" connection="7"/>
+ <dia:connection handle="1" to="O2" connection="7"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O10">
+ <dia:attribute name="obj_pos">
+ <dia:point val="24.9422,9.28251"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="7.12249,9.23;24.9947,10.2525"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="24.9422,9.28251"/>
+ <dia:point val="7.175,10.2"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O6" connection="2"/>
+ <dia:connection handle="1" to="O2" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O11">
+ <dia:attribute name="obj_pos">
+ <dia:point val="16.7352,7.47209"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="16.7352,6.87709;22.5602,8.42459"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#sink compose
+selection (scaling)#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="16.7352,7.47209"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#00ff00"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O12">
+ <dia:attribute name="obj_pos">
+ <dia:point val="19.1161,9.97825"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="19.0661,9.92825;24.1754,13.6009"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="19.1161,9.97825"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="5.009308462554376"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="3.5726155970598077"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#a020f0"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O13">
+ <dia:attribute name="obj_pos">
+ <dia:point val="27.1661,7.47209"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="27.1661,6.87709;30.0436,9.22459"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#source
+crop
+selection#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="27.1661,7.47209"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#a020f0"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O14">
+ <dia:attribute name="obj_pos">
+ <dia:point val="34.575,7.8564"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="34.575,7.2614;38.8975,8.8089"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#source media
+bus format#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="34.575,7.8564"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#8b6914"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O15">
+ <dia:attribute name="obj_pos">
+ <dia:point val="34.5244,11.2917"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="34.4744,11.2417;39.5837,14.9143"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="34.5244,11.2917"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="5.009308462554376"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="3.5726155970598077"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#8b6914"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O16">
+ <dia:attribute name="obj_pos">
+ <dia:point val="34.5244,14.8643"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="19.062,13.4968;34.5785,14.9184"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="34.5244,14.8643"/>
+ <dia:point val="19.1161,13.5509"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O15" connection="5"/>
+ <dia:connection handle="1" to="O12" connection="5"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O17">
+ <dia:attribute name="obj_pos">
+ <dia:point val="34.5244,11.2917"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="19.062,9.92418;34.5785,11.3458"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="34.5244,11.2917"/>
+ <dia:point val="19.1161,9.97825"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O15" connection="0"/>
+ <dia:connection handle="1" to="O12" connection="0"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O18">
+ <dia:attribute name="obj_pos">
+ <dia:point val="39.5337,14.8643"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="24.0713,13.4968;39.5878,14.9184"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="39.5337,14.8643"/>
+ <dia:point val="24.1254,13.5509"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O15" connection="7"/>
+ <dia:connection handle="1" to="O12" connection="7"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O19">
+ <dia:attribute name="obj_pos">
+ <dia:point val="39.5337,11.2917"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="24.0713,9.92418;39.5878,11.3458"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="39.5337,11.2917"/>
+ <dia:point val="24.1254,9.97825"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O15" connection="2"/>
+ <dia:connection handle="1" to="O12" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Geometric - Perfect Circle" version="1" id="O20">
+ <dia:attribute name="obj_pos">
+ <dia:point val="39.98,12.0742"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="39.93,12.0242;40.88,12.9742"/>
+ </dia:attribute>
+ <dia:attribute name="meta">
+ <dia:composite type="dict"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="39.98,12.0742"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="line_width">
+ <dia:real val="0.10000000000000001"/>
+ </dia:attribute>
+ <dia:attribute name="line_colour">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="fill_colour">
+ <dia:color val="#ffffff"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="0"/>
+ <dia:real val="1"/>
+ </dia:attribute>
+ <dia:attribute name="flip_horizontal">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="flip_vertical">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="subscale">
+ <dia:real val="1"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O21">
+ <dia:attribute name="obj_pos">
+ <dia:point val="40.83,12.4992"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="40.78,12.1388;49.2453,12.8624"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="40.83,12.4992"/>
+ <dia:point val="49.1335,12.5007"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow">
+ <dia:enum val="22"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_length">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_width">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O20" connection="3"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O22">
+ <dia:attribute name="obj_pos">
+ <dia:point val="42.0954,11.99"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="42.0954,11.395;46.7404,12.1425"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#pad 1 (source)#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="42.0954,11.99"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Geometric - Perfect Circle" version="1" id="O23">
+ <dia:attribute name="obj_pos">
+ <dia:point val="-1.44491,11.6506"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="-1.49491,11.6006;-0.54491,12.5506"/>
+ </dia:attribute>
+ <dia:attribute name="meta">
+ <dia:composite type="dict"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="-1.44491,11.6506"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="line_width">
+ <dia:real val="0.10000000000000001"/>
+ </dia:attribute>
+ <dia:attribute name="line_colour">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="fill_colour">
+ <dia:color val="#ffffff"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="0"/>
+ <dia:real val="1"/>
+ </dia:attribute>
+ <dia:attribute name="flip_horizontal">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="flip_vertical">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="subscale">
+ <dia:real val="1"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O24">
+ <dia:attribute name="obj_pos">
+ <dia:point val="-9.61991,12.09"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="-9.67,11.7149;-1.33311,12.4385"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="-9.61991,12.09"/>
+ <dia:point val="-1.44491,12.0756"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow">
+ <dia:enum val="22"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_length">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_width">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="1" to="O23" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O25">
+ <dia:attribute name="obj_pos">
+ <dia:point val="-7.39291,11.49"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="-7.39291,10.895;-3.58791,11.6425"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#pad 0 (sink)#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="-7.39291,11.49"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O26">
+ <dia:attribute name="obj_pos">
+ <dia:point val="19.4911,13.8333"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="19.4411,13.7833;24.5504,17.4559"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="19.4911,13.8333"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="5.009308462554376"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="3.5726155970598077"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#a020f0"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Box" version="0" id="O27">
+ <dia:attribute name="obj_pos">
+ <dia:point val="34.4994,17.2967"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="34.4494,17.2467;39.5587,20.9193"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="34.4994,17.2967"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="5.009308462554376"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="3.5726155970598077"/>
+ </dia:attribute>
+ <dia:attribute name="border_width">
+ <dia:real val="0.10000000149011612"/>
+ </dia:attribute>
+ <dia:attribute name="border_color">
+ <dia:color val="#8b6914"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O28">
+ <dia:attribute name="obj_pos">
+ <dia:point val="34.4994,20.8693"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="19.4311,17.3459;34.5594,20.9293"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="34.4994,20.8693"/>
+ <dia:point val="19.4911,17.4059"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O27" connection="5"/>
+ <dia:connection handle="1" to="O26" connection="5"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O29">
+ <dia:attribute name="obj_pos">
+ <dia:point val="34.4994,17.2967"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="19.4311,13.7733;34.5594,17.3567"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="34.4994,17.2967"/>
+ <dia:point val="19.4911,13.8333"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O27" connection="0"/>
+ <dia:connection handle="1" to="O26" connection="0"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O30">
+ <dia:attribute name="obj_pos">
+ <dia:point val="39.5087,20.8693"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="24.4404,17.3459;39.5687,20.9293"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="39.5087,20.8693"/>
+ <dia:point val="24.5004,17.4059"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O27" connection="7"/>
+ <dia:connection handle="1" to="O26" connection="7"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O31">
+ <dia:attribute name="obj_pos">
+ <dia:point val="39.5087,17.2967"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="24.4404,13.7733;39.5687,17.3567"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="39.5087,17.2967"/>
+ <dia:point val="24.5004,13.8333"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#e60505"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="4"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O27" connection="2"/>
+ <dia:connection handle="1" to="O26" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Geometric - Perfect Circle" version="1" id="O32">
+ <dia:attribute name="obj_pos">
+ <dia:point val="39.855,18.7792"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="39.805,18.7292;40.755,19.6792"/>
+ </dia:attribute>
+ <dia:attribute name="meta">
+ <dia:composite type="dict"/>
+ </dia:attribute>
+ <dia:attribute name="elem_corner">
+ <dia:point val="39.855,18.7792"/>
+ </dia:attribute>
+ <dia:attribute name="elem_width">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="elem_height">
+ <dia:real val="0.84999999999999787"/>
+ </dia:attribute>
+ <dia:attribute name="line_width">
+ <dia:real val="0.10000000000000001"/>
+ </dia:attribute>
+ <dia:attribute name="line_colour">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="fill_colour">
+ <dia:color val="#ffffff"/>
+ </dia:attribute>
+ <dia:attribute name="show_background">
+ <dia:boolean val="true"/>
+ </dia:attribute>
+ <dia:attribute name="line_style">
+ <dia:enum val="0"/>
+ <dia:real val="1"/>
+ </dia:attribute>
+ <dia:attribute name="flip_horizontal">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="flip_vertical">
+ <dia:boolean val="false"/>
+ </dia:attribute>
+ <dia:attribute name="subscale">
+ <dia:real val="1"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O33">
+ <dia:attribute name="obj_pos">
+ <dia:point val="40.705,19.2042"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="40.655,18.8438;49.1203,19.5674"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="40.705,19.2042"/>
+ <dia:point val="49.0085,19.2057"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow">
+ <dia:enum val="22"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_length">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_width">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="0" to="O32" connection="3"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Text" version="1" id="O34">
+ <dia:attribute name="obj_pos">
+ <dia:point val="41.9704,18.695"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="41.9704,18.1;46.6154,18.8475"/>
+ </dia:attribute>
+ <dia:attribute name="text">
+ <dia:composite type="text">
+ <dia:attribute name="string">
+ <dia:string>#pad 2 (source)#</dia:string>
+ </dia:attribute>
+ <dia:attribute name="font">
+ <dia:font family="sans" style="0" name="Helvetica"/>
+ </dia:attribute>
+ <dia:attribute name="height">
+ <dia:real val="0.80000000000000004"/>
+ </dia:attribute>
+ <dia:attribute name="pos">
+ <dia:point val="41.9704,18.695"/>
+ </dia:attribute>
+ <dia:attribute name="color">
+ <dia:color val="#000000"/>
+ </dia:attribute>
+ <dia:attribute name="alignment">
+ <dia:enum val="0"/>
+ </dia:attribute>
+ </dia:composite>
+ </dia:attribute>
+ <dia:attribute name="valign">
+ <dia:enum val="3"/>
+ </dia:attribute>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O35">
+ <dia:attribute name="obj_pos">
+ <dia:point val="27.3,9.55"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="24.0146,9.49376;27.3562,10.255"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="27.3,9.55"/>
+ <dia:point val="24.1254,9.97825"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#a020f0"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow">
+ <dia:enum val="22"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_length">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_width">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="1" to="O12" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ <dia:object type="Standard - Line" version="0" id="O36">
+ <dia:attribute name="obj_pos">
+ <dia:point val="27.3454,9.53624"/>
+ </dia:attribute>
+ <dia:attribute name="obj_bb">
+ <dia:rectangle val="24.4311,9.46695;27.4147,13.9265"/>
+ </dia:attribute>
+ <dia:attribute name="conn_endpoints">
+ <dia:point val="27.3454,9.53624"/>
+ <dia:point val="24.5004,13.8333"/>
+ </dia:attribute>
+ <dia:attribute name="numcp">
+ <dia:int val="1"/>
+ </dia:attribute>
+ <dia:attribute name="line_color">
+ <dia:color val="#a020f0"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow">
+ <dia:enum val="22"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_length">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:attribute name="end_arrow_width">
+ <dia:real val="0.5"/>
+ </dia:attribute>
+ <dia:connections>
+ <dia:connection handle="1" to="O26" connection="2"/>
+ </dia:connections>
+ </dia:object>
+ </dia:layer>
+</dia:diagram>
diff --git a/Documentation/DocBook/media/v4l/subdev-image-processing-scaling-multi-source.svg b/Documentation/DocBook/media/v4l/subdev-image-processing-scaling-multi-source.svg
new file mode 100644
index 000000000000..2340c0f8bc92
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/subdev-image-processing-scaling-multi-source.svg
@@ -0,0 +1,116 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/PR-SVG-20010719/DTD/svg10.dtd">
+<svg width="59cm" height="17cm" viewBox="-194 128 1179 330" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x="-8" y="130" width="806" height="327"/>
+ <g>
+ <rect style="fill: #ffffff" x="4.5" y="189" width="159" height="104"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a52a2a" x="4.5" y="189" width="159" height="104"/>
+ </g>
+ <g>
+ <rect style="fill: #ffffff" x="49.5" y="204" width="94" height="77"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x="49.5" y="204" width="94" height="77"/>
+ </g>
+ <text style="fill: #0000ff;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="60" y="224">
+ <tspan x="60" y="224">sink</tspan>
+ <tspan x="60" y="240">crop</tspan>
+ <tspan x="60" y="256">selection</tspan>
+ </text>
+ <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="29.5" y="158">
+ <tspan x="29.5" y="158"></tspan>
+ </text>
+ <text style="fill: #a52a2a;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="8.53836" y="157.914">
+ <tspan x="8.53836" y="157.914">sink media</tspan>
+ <tspan x="8.53836" y="173.914">bus format</tspan>
+ </text>
+ <g>
+ <rect style="fill: #ffffff" x="333.644" y="185.65" width="165.2" height="172.478"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #00ff00" x="333.644" y="185.65" width="165.2" height="172.478"/>
+ </g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="333.644" y1="358.128" x2="49.5" y2="281"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="333.644" y1="185.65" x2="49.5" y2="204"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="498.844" y1="358.128" x2="143.5" y2="281"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="498.844" y1="185.65" x2="143.5" y2="204"/>
+ <text style="fill: #00ff00;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="334.704" y="149.442">
+ <tspan x="334.704" y="149.442">sink compose</tspan>
+ <tspan x="334.704" y="165.442">selection (scaling)</tspan>
+ </text>
+ <g>
+ <rect style="fill: #ffffff" x="382.322" y="199.565" width="100.186" height="71.4523"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x="382.322" y="199.565" width="100.186" height="71.4523"/>
+ </g>
+ <text style="fill: #a020f0;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="543.322" y="149.442">
+ <tspan x="543.322" y="149.442">source</tspan>
+ <tspan x="543.322" y="165.442">crop</tspan>
+ <tspan x="543.322" y="181.442">selection</tspan>
+ </text>
+ <text style="fill: #8b6914;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="691.5" y="157.128">
+ <tspan x="691.5" y="157.128">source media</tspan>
+ <tspan x="691.5" y="173.128">bus format</tspan>
+ </text>
+ <g>
+ <rect style="fill: #ffffff" x="690.488" y="225.834" width="100.186" height="71.4523"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #8b6914" x="690.488" y="225.834" width="100.186" height="71.4523"/>
+ </g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="690.488" y1="297.286" x2="382.322" y2="271.018"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="690.488" y1="225.834" x2="382.322" y2="199.565"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="790.674" y1="297.286" x2="482.508" y2="271.018"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="790.674" y1="225.834" x2="482.508" y2="199.565"/>
+ <g>
+ <ellipse style="fill: #ffffff" cx="808.1" cy="249.984" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="808.1" cy="249.984" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="808.1" cy="249.984" rx="8.5" ry="8.5"/>
+ </g>
+ <g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="816.6" y1="249.984" x2="972.934" y2="250.012"/>
+ <polygon style="fill: #000000" points="980.434,250.014 970.433,255.012 972.934,250.012 970.435,245.012 "/>
+ <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="980.434,250.014 970.433,255.012 972.934,250.012 970.435,245.012 "/>
+ </g>
+ <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="841.908" y="239.8">
+ <tspan x="841.908" y="239.8">pad 1 (source)</tspan>
+ </text>
+ <g>
+ <ellipse style="fill: #ffffff" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
+ </g>
+ <g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="-192.398" y1="241.8" x2="-38.6343" y2="241.529"/>
+ <polygon style="fill: #000000" points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "/>
+ <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "/>
+ </g>
+ <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="-147.858" y="229.8">
+ <tspan x="-147.858" y="229.8">pad 0 (sink)</tspan>
+ </text>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x="389.822" y="276.666" width="100.186" height="71.4523"/>
+ <g>
+ <rect style="fill: #ffffff" x="689.988" y="345.934" width="100.186" height="71.4523"/>
+ <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #8b6914" x="689.988" y="345.934" width="100.186" height="71.4523"/>
+ </g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="689.988" y1="417.386" x2="389.822" y2="348.118"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="689.988" y1="345.934" x2="389.822" y2="276.666"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="790.174" y1="417.386" x2="490.008" y2="348.118"/>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="790.174" y1="345.934" x2="490.008" y2="276.666"/>
+ <g>
+ <ellipse style="fill: #ffffff" cx="805.6" cy="384.084" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="805.6" cy="384.084" rx="8.5" ry="8.5"/>
+ <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="805.6" cy="384.084" rx="8.5" ry="8.5"/>
+ </g>
+ <g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="814.1" y1="384.084" x2="970.434" y2="384.112"/>
+ <polygon style="fill: #000000" points="977.934,384.114 967.933,389.112 970.434,384.112 967.935,379.112 "/>
+ <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="977.934,384.114 967.933,389.112 970.434,384.112 967.935,379.112 "/>
+ </g>
+ <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="839.408" y="373.9">
+ <tspan x="839.408" y="373.9">pad 2 (source)</tspan>
+ </text>
+ <g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x1="546" y1="191" x2="492.157" y2="198.263"/>
+ <polygon style="fill: #a020f0" points="484.724,199.266 493.966,192.974 492.157,198.263 495.303,202.884 "/>
+ <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" points="484.724,199.266 493.966,192.974 492.157,198.263 495.303,202.884 "/>
+ </g>
+ <g>
+ <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x1="546.908" y1="190.725" x2="495.383" y2="268.548"/>
+ <polygon style="fill: #a020f0" points="491.242,274.802 492.594,263.703 495.383,268.548 500.932,269.224 "/>
+ <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" points="491.242,274.802 492.594,263.703 495.383,268.548 500.932,269.224 "/>
+ </g>
+</svg>
diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml
index 8ae38876172e..015c561754b7 100644
--- a/Documentation/DocBook/media/v4l/v4l2.xml
+++ b/Documentation/DocBook/media/v4l/v4l2.xml
@@ -28,8 +28,8 @@ documentation.</contrib>
<firstname>Hans</firstname>
<surname>Verkuil</surname>
<contrib>Designed and documented the VIDIOC_LOG_STATUS ioctl,
-the extended control ioctls and major parts of the sliced VBI
-API.</contrib>
+the extended control ioctls, major parts of the sliced VBI API, the
+MPEG encoder and decoder APIs and the DV Timings API.</contrib>
<affiliation>
<address>
<email>hverkuil@xs4all.nl</email>
@@ -96,6 +96,17 @@ Remote Controller chapter.</contrib>
</address>
</affiliation>
</author>
+
+ <author>
+ <firstname>Sakari</firstname>
+ <surname>Ailus</surname>
+ <contrib>Subdev selections API.</contrib>
+ <affiliation>
+ <address>
+ <email>sakari.ailus@iki.fi</email>
+ </address>
+ </affiliation>
+ </author>
</authorgroup>
<copyright>
@@ -112,6 +123,7 @@ Remote Controller chapter.</contrib>
<year>2009</year>
<year>2010</year>
<year>2011</year>
+ <year>2012</year>
<holder>Bill Dirks, Michael H. Schimek, Hans Verkuil, Martin
Rubli, Andy Walls, Muralidharan Karicheri, Mauro Carvalho Chehab,
Pawel Osciak</holder>
@@ -128,6 +140,28 @@ structs, ioctls) must be noted in more detail in the history chapter
applications. -->
<revision>
+ <revnumber>3.5</revnumber>
+ <date>2012-05-07</date>
+ <authorinitials>sa, sn</authorinitials>
+ <revremark>Added V4L2_CTRL_TYPE_INTEGER_MENU and V4L2 subdev
+ selections API. Improved the description of V4L2_CID_COLORFX
+ control, added V4L2_CID_COLORFX_CBCR control.
+ Added camera controls V4L2_CID_AUTO_EXPOSURE_BIAS,
+ V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE, V4L2_CID_IMAGE_STABILIZATION,
+ V4L2_CID_ISO_SENSITIVITY, V4L2_CID_ISO_SENSITIVITY_AUTO,
+ V4L2_CID_EXPOSURE_METERING, V4L2_CID_SCENE_MODE,
+ V4L2_CID_3A_LOCK, V4L2_CID_AUTO_FOCUS_START,
+ V4L2_CID_AUTO_FOCUS_STOP, V4L2_CID_AUTO_FOCUS_STATUS
+ and V4L2_CID_AUTO_FOCUS_RANGE.
+ </revremark>
+ <date>2012-05-01</date>
+ <authorinitials>hv</authorinitials>
+ <revremark>Added VIDIOC_ENUM_DV_TIMINGS, VIDIOC_QUERY_DV_TIMINGS and
+ VIDIOC_DV_TIMINGS_CAP.
+ </revremark>
+ </revision>
+
+ <revision>
<revnumber>3.4</revnumber>
<date>2012-01-25</date>
<authorinitials>sn</authorinitials>
@@ -433,7 +467,7 @@ and discussions on the V4L mailing list.</revremark>
</partinfo>
<title>Video for Linux Two API Specification</title>
- <subtitle>Revision 3.3</subtitle>
+ <subtitle>Revision 3.5</subtitle>
<chapter id="common">
&sub-common;
@@ -491,10 +525,12 @@ and discussions on the V4L mailing list.</revremark>
&sub-dbg-g-register;
&sub-decoder-cmd;
&sub-dqevent;
+ &sub-dv-timings-cap;
&sub-encoder-cmd;
&sub-enumaudio;
&sub-enumaudioout;
&sub-enum-dv-presets;
+ &sub-enum-dv-timings;
&sub-enum-fmt;
&sub-enum-framesizes;
&sub-enum-frameintervals;
@@ -529,6 +565,7 @@ and discussions on the V4L mailing list.</revremark>
&sub-querycap;
&sub-queryctrl;
&sub-query-dv-preset;
+ &sub-query-dv-timings;
&sub-querystd;
&sub-prepare-buf;
&sub-reqbufs;
@@ -540,6 +577,7 @@ and discussions on the V4L mailing list.</revremark>
&sub-subdev-g-crop;
&sub-subdev-g-fmt;
&sub-subdev-g-frame-interval;
+ &sub-subdev-g-selection;
&sub-subscribe-event;
<!-- End of ioctls. -->
&sub-mmap;
diff --git a/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml b/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
index 73ae8a6cd004..765549ff8a71 100644
--- a/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
@@ -48,6 +48,12 @@
<refsect1>
<title>Description</title>
+ <note>
+ <title>Experimental</title>
+ <para>This is an <link linkend="experimental"> experimental </link>
+ interface and may change in the future.</para>
+ </note>
+
<para>This ioctl is used to create buffers for <link linkend="mmap">memory
mapped</link> or <link linkend="userp">user pointer</link>
I/O. It can be used as an alternative or in addition to the
@@ -94,16 +100,18 @@ information.</para>
<entry>The number of buffers requested or granted.</entry>
</row>
<row>
- <entry>&v4l2-memory;</entry>
+ <entry>__u32</entry>
<entry><structfield>memory</structfield></entry>
<entry>Applications set this field to
<constant>V4L2_MEMORY_MMAP</constant> or
-<constant>V4L2_MEMORY_USERPTR</constant>.</entry>
+<constant>V4L2_MEMORY_USERPTR</constant>. See <xref linkend="v4l2-memory"
+/></entry>
</row>
<row>
- <entry>&v4l2-format;</entry>
+ <entry>__u32</entry>
<entry><structfield>format</structfield></entry>
- <entry>Filled in by the application, preserved by the driver.</entry>
+ <entry>Filled in by the application, preserved by the driver.
+ See <xref linkend="v4l2-format" />.</entry>
</row>
<row>
<entry>__u32</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-cropcap.xml b/Documentation/DocBook/media/v4l/vidioc-cropcap.xml
index b4f2f255211e..f1bac2c6e978 100644
--- a/Documentation/DocBook/media/v4l/vidioc-cropcap.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-cropcap.xml
@@ -65,7 +65,7 @@ output.</para>
&cs-str;
<tbody valign="top">
<row>
- <entry>&v4l2-buf-type;</entry>
+ <entry>__u32</entry>
<entry><structfield>type</structfield></entry>
<entry>Type of the data stream, set by the application.
Only these types are valid here:
@@ -73,7 +73,7 @@ Only these types are valid here:
<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant>,
<constant>V4L2_BUF_TYPE_VIDEO_OVERLAY</constant>, and custom (driver
defined) types with code <constant>V4L2_BUF_TYPE_PRIVATE</constant>
-and higher.</entry>
+and higher. See <xref linkend="v4l2-buf-type" />.</entry>
</row>
<row>
<entry>struct <link linkend="v4l2-rect-crop">v4l2_rect</link></entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml b/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml
new file mode 100644
index 000000000000..6673ce582050
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml
@@ -0,0 +1,211 @@
+<refentry id="vidioc-dv-timings-cap">
+ <refmeta>
+ <refentrytitle>ioctl VIDIOC_DV_TIMINGS_CAP</refentrytitle>
+ &manvol;
+ </refmeta>
+
+ <refnamediv>
+ <refname>VIDIOC_DV_TIMINGS_CAP</refname>
+ <refpurpose>The capabilities of the Digital Video receiver/transmitter</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcprototype>
+ <funcdef>int <function>ioctl</function></funcdef>
+ <paramdef>int <parameter>fd</parameter></paramdef>
+ <paramdef>int <parameter>request</parameter></paramdef>
+ <paramdef>struct v4l2_dv_timings_cap *<parameter>argp</parameter></paramdef>
+ </funcprototype>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Arguments</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><parameter>fd</parameter></term>
+ <listitem>
+ <para>&fd;</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>request</parameter></term>
+ <listitem>
+ <para>VIDIOC_DV_TIMINGS_CAP</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>argp</parameter></term>
+ <listitem>
+ <para></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Description</title>
+
+ <note>
+ <title>Experimental</title>
+ <para>This is an <link linkend="experimental"> experimental </link>
+ interface and may change in the future.</para>
+ </note>
+
+ <para>To query the available timings, applications initialize the
+<structfield>index</structfield> field and zero the reserved array of &v4l2-dv-timings-cap;
+and call the <constant>VIDIOC_DV_TIMINGS_CAP</constant> ioctl with a pointer to this
+structure. Drivers fill the rest of the structure or return an
+&EINVAL; when the index is out of bounds. To enumerate all supported DV timings,
+applications shall begin at index zero, incrementing by one until the
+driver returns <errorcode>EINVAL</errorcode>. Note that drivers may enumerate a
+different set of DV timings after switching the video input or
+output.</para>
+
+ <table pgwide="1" frame="none" id="v4l2-bt-timings-cap">
+ <title>struct <structname>v4l2_bt_timings_cap</structname></title>
+ <tgroup cols="3">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>min_width</structfield></entry>
+ <entry>Minimum width of the active video in pixels.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>max_width</structfield></entry>
+ <entry>Maximum width of the active video in pixels.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>min_height</structfield></entry>
+ <entry>Minimum height of the active video in lines.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>max_height</structfield></entry>
+ <entry>Maximum height of the active video in lines.</entry>
+ </row>
+ <row>
+ <entry>__u64</entry>
+ <entry><structfield>min_pixelclock</structfield></entry>
+ <entry>Minimum pixelclock frequency in Hz.</entry>
+ </row>
+ <row>
+ <entry>__u64</entry>
+ <entry><structfield>max_pixelclock</structfield></entry>
+ <entry>Maximum pixelclock frequency in Hz.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>standards</structfield></entry>
+ <entry>The video standard(s) supported by the hardware.
+ See <xref linkend="dv-bt-standards"/> for a list of standards.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>capabilities</structfield></entry>
+ <entry>Several flags giving more information about the capabilities.
+ See <xref linkend="dv-bt-cap-capabilities"/> for a description of the flags.
+ </entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[16]</entry>
+ <entry></entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <table pgwide="1" frame="none" id="v4l2-dv-timings-cap">
+ <title>struct <structname>v4l2_dv_timings_cap</structname></title>
+ <tgroup cols="4">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>type</structfield></entry>
+ <entry>Type of DV timings as listed in <xref linkend="dv-timing-types"/>.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[3]</entry>
+ <entry>Reserved for future extensions. Drivers must set the array to zero.</entry>
+ </row>
+ <row>
+ <entry>union</entry>
+ <entry><structfield></structfield></entry>
+ <entry></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry>&v4l2-bt-timings-cap;</entry>
+ <entry><structfield>bt</structfield></entry>
+ <entry>BT.656/1120 timings capabilities of the hardware.</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry>__u32</entry>
+ <entry><structfield>raw_data</structfield>[32]</entry>
+ <entry></entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <table pgwide="1" frame="none" id="dv-bt-cap-capabilities">
+ <title>DV BT Timing capabilities</title>
+ <tgroup cols="2">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>Flag</entry>
+ <entry>Description</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_BT_CAP_INTERLACED</entry>
+ <entry>Interlaced formats are supported.
+ </entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_BT_CAP_PROGRESSIVE</entry>
+ <entry>Progressive formats are supported.
+ </entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_BT_CAP_REDUCED_BLANKING</entry>
+ <entry>CVT/GTF specific: the timings can make use of reduced blanking (CVT)
+or the 'Secondary GTF' curve (GTF).
+ </entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_BT_CAP_CUSTOM</entry>
+ <entry>Can support non-standard timings, i.e. timings not belonging to the
+standards set in the <structfield>standards</structfield> field.
+ </entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </refsect1>
+
+ <refsect1>
+ &return-value;
+ </refsect1>
+</refentry>
+
+<!--
+Local Variables:
+mode: sgml
+sgml-parent-document: "v4l2.sgml"
+indent-tabs-mode: nil
+End:
+-->
diff --git a/Documentation/DocBook/media/v4l/vidioc-enum-dv-presets.xml b/Documentation/DocBook/media/v4l/vidioc-enum-dv-presets.xml
index 0be17c232d3a..509f0012d2a6 100644
--- a/Documentation/DocBook/media/v4l/vidioc-enum-dv-presets.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-enum-dv-presets.xml
@@ -48,6 +48,10 @@
<refsect1>
<title>Description</title>
+ <para>This ioctl is <emphasis role="bold">deprecated</emphasis>.
+ New drivers and applications should use &VIDIOC-ENUM-DV-TIMINGS; instead.
+ </para>
+
<para>To query the attributes of a DV preset, applications initialize the
<structfield>index</structfield> field and zero the reserved array of &v4l2-dv-enum-preset;
and call the <constant>VIDIOC_ENUM_DV_PRESETS</constant> ioctl with a pointer to this
diff --git a/Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml b/Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml
new file mode 100644
index 000000000000..24c3bf4fd29a
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml
@@ -0,0 +1,119 @@
+<refentry id="vidioc-enum-dv-timings">
+ <refmeta>
+ <refentrytitle>ioctl VIDIOC_ENUM_DV_TIMINGS</refentrytitle>
+ &manvol;
+ </refmeta>
+
+ <refnamediv>
+ <refname>VIDIOC_ENUM_DV_TIMINGS</refname>
+ <refpurpose>Enumerate supported Digital Video timings</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcprototype>
+ <funcdef>int <function>ioctl</function></funcdef>
+ <paramdef>int <parameter>fd</parameter></paramdef>
+ <paramdef>int <parameter>request</parameter></paramdef>
+ <paramdef>struct v4l2_enum_dv_timings *<parameter>argp</parameter></paramdef>
+ </funcprototype>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Arguments</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><parameter>fd</parameter></term>
+ <listitem>
+ <para>&fd;</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>request</parameter></term>
+ <listitem>
+ <para>VIDIOC_ENUM_DV_TIMINGS</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>argp</parameter></term>
+ <listitem>
+ <para></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Description</title>
+
+ <note>
+ <title>Experimental</title>
+ <para>This is an <link linkend="experimental"> experimental </link>
+ interface and may change in the future.</para>
+ </note>
+
+ <para>While some DV receivers or transmitters support a wide range of timings, others
+support only a limited number of timings. With this ioctl applications can enumerate a list
+of known supported timings. Call &VIDIOC-DV-TIMINGS-CAP; to check if it also supports other
+standards or even custom timings that are not in this list.</para>
+
+ <para>To query the available timings, applications initialize the
+<structfield>index</structfield> field and zero the reserved array of &v4l2-enum-dv-timings;
+and call the <constant>VIDIOC_ENUM_DV_TIMINGS</constant> ioctl with a pointer to this
+structure. Drivers fill the rest of the structure or return an
+&EINVAL; when the index is out of bounds. To enumerate all supported DV timings,
+applications shall begin at index zero, incrementing by one until the
+driver returns <errorcode>EINVAL</errorcode>. Note that drivers may enumerate a
+different set of DV timings after switching the video input or
+output.</para>
+
+ <table pgwide="1" frame="none" id="v4l2-enum-dv-timings">
+ <title>struct <structname>v4l2_enum_dv_timings</structname></title>
+ <tgroup cols="3">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>index</structfield></entry>
+ <entry>Number of the DV timings, set by the
+application.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[3]</entry>
+ <entry>Reserved for future extensions. Drivers must set the array to zero.</entry>
+ </row>
+ <row>
+ <entry>&v4l2-dv-timings;</entry>
+ <entry><structfield>timings</structfield></entry>
+ <entry>The timings.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </refsect1>
+
+ <refsect1>
+ &return-value;
+
+ <variablelist>
+ <varlistentry>
+ <term><errorcode>EINVAL</errorcode></term>
+ <listitem>
+ <para>The &v4l2-enum-dv-timings; <structfield>index</structfield>
+is out of bounds.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+</refentry>
+
+<!--
+Local Variables:
+mode: sgml
+sgml-parent-document: "v4l2.sgml"
+indent-tabs-mode: nil
+End:
+-->
diff --git a/Documentation/DocBook/media/v4l/vidioc-enum-fmt.xml b/Documentation/DocBook/media/v4l/vidioc-enum-fmt.xml
index 347d142e7431..81ebe48317fe 100644
--- a/Documentation/DocBook/media/v4l/vidioc-enum-fmt.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-enum-fmt.xml
@@ -71,7 +71,7 @@ the application. This is in no way related to the <structfield>
pixelformat</structfield> field.</entry>
</row>
<row>
- <entry>&v4l2-buf-type;</entry>
+ <entry>__u32</entry>
<entry><structfield>type</structfield></entry>
<entry>Type of the data stream, set by the application.
Only these types are valid here:
@@ -81,7 +81,7 @@ Only these types are valid here:
<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant>,
<constant>V4L2_BUF_TYPE_VIDEO_OVERLAY</constant>, and custom (driver
defined) types with code <constant>V4L2_BUF_TYPE_PRIVATE</constant>
-and higher.</entry>
+and higher. See <xref linkend="v4l2-buf-type" />.</entry>
</row>
<row>
<entry>__u32</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-enuminput.xml b/Documentation/DocBook/media/v4l/vidioc-enuminput.xml
index 9b8efcd6e947..46d5a044a537 100644
--- a/Documentation/DocBook/media/v4l/vidioc-enuminput.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-enuminput.xml
@@ -285,7 +285,7 @@ input/output interface to linux-media@vger.kernel.org on 19 Oct 2009.
<row>
<entry><constant>V4L2_IN_CAP_CUSTOM_TIMINGS</constant></entry>
<entry>0x00000002</entry>
- <entry>This input supports setting custom video timings by using VIDIOC_S_DV_TIMINGS.</entry>
+ <entry>This input supports setting video timings by using VIDIOC_S_DV_TIMINGS.</entry>
</row>
<row>
<entry><constant>V4L2_IN_CAP_STD</constant></entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-enumoutput.xml b/Documentation/DocBook/media/v4l/vidioc-enumoutput.xml
index a64d5ef103fa..428020000ef0 100644
--- a/Documentation/DocBook/media/v4l/vidioc-enumoutput.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-enumoutput.xml
@@ -170,7 +170,7 @@ input/output interface to linux-media@vger.kernel.org on 19 Oct 2009.
<row>
<entry><constant>V4L2_OUT_CAP_CUSTOM_TIMINGS</constant></entry>
<entry>0x00000002</entry>
- <entry>This output supports setting custom video timings by using VIDIOC_S_DV_TIMINGS.</entry>
+ <entry>This output supports setting video timings by using VIDIOC_S_DV_TIMINGS.</entry>
</row>
<row>
<entry><constant>V4L2_OUT_CAP_STD</constant></entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-crop.xml b/Documentation/DocBook/media/v4l/vidioc-g-crop.xml
index 01a50640dce0..c4ff3b1887fb 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-crop.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-crop.xml
@@ -100,14 +100,14 @@ changed and <constant>VIDIOC_S_CROP</constant> returns the
&cs-str;
<tbody valign="top">
<row>
- <entry>&v4l2-buf-type;</entry>
+ <entry>__u32</entry>
<entry><structfield>type</structfield></entry>
<entry>Type of the data stream, set by the application.
Only these types are valid here: <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>,
<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant>,
<constant>V4L2_BUF_TYPE_VIDEO_OVERLAY</constant>, and custom (driver
defined) types with code <constant>V4L2_BUF_TYPE_PRIVATE</constant>
-and higher.</entry>
+and higher. See <xref linkend="v4l2-buf-type" />.</entry>
</row>
<row>
<entry>&v4l2-rect;</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-dv-preset.xml b/Documentation/DocBook/media/v4l/vidioc-g-dv-preset.xml
index 7940c1149393..61be9fa3803a 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-dv-preset.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-dv-preset.xml
@@ -48,6 +48,12 @@
<refsect1>
<title>Description</title>
+
+ <para>These ioctls are <emphasis role="bold">deprecated</emphasis>.
+ New drivers and applications should use &VIDIOC-G-DV-TIMINGS; and &VIDIOC-S-DV-TIMINGS;
+ instead.
+ </para>
+
<para>To query and select the current DV preset, applications
use the <constant>VIDIOC_G_DV_PRESET</constant> and <constant>VIDIOC_S_DV_PRESET</constant>
ioctls which take a pointer to a &v4l2-dv-preset; type as argument.
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml b/Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml
index 4a8648ae9a63..eda1a2991bbe 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml
@@ -7,7 +7,7 @@
<refnamediv>
<refname>VIDIOC_G_DV_TIMINGS</refname>
<refname>VIDIOC_S_DV_TIMINGS</refname>
- <refpurpose>Get or set custom DV timings for input or output</refpurpose>
+ <refpurpose>Get or set DV timings for input or output</refpurpose>
</refnamediv>
<refsynopsisdiv>
@@ -48,12 +48,15 @@
<refsect1>
<title>Description</title>
- <para>To set custom DV timings for the input or output, applications use the
-<constant>VIDIOC_S_DV_TIMINGS</constant> ioctl and to get the current custom timings,
+ <para>To set DV timings for the input or output, applications use the
+<constant>VIDIOC_S_DV_TIMINGS</constant> ioctl and to get the current timings,
applications use the <constant>VIDIOC_G_DV_TIMINGS</constant> ioctl. The detailed timing
information is filled in using the structure &v4l2-dv-timings;. These ioctls take
a pointer to the &v4l2-dv-timings; structure as argument. If the ioctl is not supported
or the timing values are not correct, the driver returns &EINVAL;.</para>
+<para>The <filename>linux/v4l2-dv-timings.h</filename> header can be used to get the
+timings of the formats in the <xref linkend="cea861" /> and <xref linkend="vesadmt" />
+standards.</para>
</refsect1>
<refsect1>
@@ -83,12 +86,13 @@ or the timing values are not correct, the driver returns &EINVAL;.</para>
<row>
<entry>__u32</entry>
<entry><structfield>width</structfield></entry>
- <entry>Width of the active video in pixels</entry>
+ <entry>Width of the active video in pixels.</entry>
</row>
<row>
<entry>__u32</entry>
<entry><structfield>height</structfield></entry>
- <entry>Height of the active video in lines</entry>
+ <entry>Height of the active video frame in lines. So for interlaced formats the
+ height of the active video in each field is <structfield>height</structfield>/2.</entry>
</row>
<row>
<entry>__u32</entry>
@@ -125,32 +129,52 @@ bit 0 (V4L2_DV_VSYNC_POS_POL) is for vertical sync polarity and bit 1 (V4L2_DV_H
<row>
<entry>__u32</entry>
<entry><structfield>vfrontporch</structfield></entry>
- <entry>Vertical front porch in lines</entry>
+ <entry>Vertical front porch in lines. For interlaced formats this refers to the
+ odd field (aka field 1).</entry>
</row>
<row>
<entry>__u32</entry>
<entry><structfield>vsync</structfield></entry>
- <entry>Vertical sync length in lines</entry>
+ <entry>Vertical sync length in lines. For interlaced formats this refers to the
+ odd field (aka field 1).</entry>
</row>
<row>
<entry>__u32</entry>
<entry><structfield>vbackporch</structfield></entry>
- <entry>Vertical back porch in lines</entry>
+ <entry>Vertical back porch in lines. For interlaced formats this refers to the
+ odd field (aka field 1).</entry>
</row>
<row>
<entry>__u32</entry>
<entry><structfield>il_vfrontporch</structfield></entry>
- <entry>Vertical front porch in lines for bottom field of interlaced field formats</entry>
+ <entry>Vertical front porch in lines for the even field (aka field 2) of
+ interlaced field formats.</entry>
</row>
<row>
<entry>__u32</entry>
<entry><structfield>il_vsync</structfield></entry>
- <entry>Vertical sync length in lines for bottom field of interlaced field formats</entry>
+ <entry>Vertical sync length in lines for the even field (aka field 2) of
+ interlaced field formats.</entry>
</row>
<row>
<entry>__u32</entry>
<entry><structfield>il_vbackporch</structfield></entry>
- <entry>Vertical back porch in lines for bottom field of interlaced field formats</entry>
+ <entry>Vertical back porch in lines for the even field (aka field 2) of
+ interlaced field formats.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>standards</structfield></entry>
+ <entry>The video standard(s) this format belongs to. This will be filled in by
+ the driver. Applications must set this to 0. See <xref linkend="dv-bt-standards"/>
+ for a list of standards.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>flags</structfield></entry>
+ <entry>Several flags giving more information about the format.
+ See <xref linkend="dv-bt-flags"/> for a description of the flags.
+ </entry>
</row>
</tbody>
</tgroup>
@@ -211,6 +235,90 @@ bit 0 (V4L2_DV_VSYNC_POS_POL) is for vertical sync polarity and bit 1 (V4L2_DV_H
</tbody>
</tgroup>
</table>
+ <table pgwide="1" frame="none" id="dv-bt-standards">
+ <title>DV BT Timing standards</title>
+ <tgroup cols="2">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>Timing standard</entry>
+ <entry>Description</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_BT_STD_CEA861</entry>
+ <entry>The timings follow the CEA-861 Digital TV Profile standard</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_BT_STD_DMT</entry>
+ <entry>The timings follow the VESA Discrete Monitor Timings standard</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_BT_STD_CVT</entry>
+ <entry>The timings follow the VESA Coordinated Video Timings standard</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_BT_STD_GTF</entry>
+ <entry>The timings follow the VESA Generalized Timings Formula standard</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ <table pgwide="1" frame="none" id="dv-bt-flags">
+ <title>DV BT Timing flags</title>
+ <tgroup cols="2">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>Flag</entry>
+ <entry>Description</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_FL_REDUCED_BLANKING</entry>
+ <entry>CVT/GTF specific: the timings use reduced blanking (CVT) or the 'Secondary
+GTF' curve (GTF). In both cases the horizontal and/or vertical blanking
+intervals are reduced, allowing a higher resolution over the same
+bandwidth. This is a read-only flag, applications must not set this.
+ </entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_FL_CAN_REDUCE_FPS</entry>
+ <entry>CEA-861 specific: set for CEA-861 formats with a framerate that is a multiple
+of six. These formats can be optionally played at 1 / 1.001 speed to
+be compatible with 60 Hz based standards such as NTSC and PAL-M that use a framerate of
+29.97 frames per second. If the transmitter can't generate such frequencies, then the
+flag will also be cleared. This is a read-only flag, applications must not set this.
+ </entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_FL_REDUCED_FPS</entry>
+ <entry>CEA-861 specific: only valid for video transmitters, the flag is cleared
+by receivers. It is also only valid for formats with the V4L2_DV_FL_CAN_REDUCE_FPS flag
+set, for other formats the flag will be cleared by the driver.
+
+If the application sets this flag, then the pixelclock used to set up the transmitter is
+divided by 1.001 to make it compatible with NTSC framerates. If the transmitter
+can't generate such frequencies, then the flag will also be cleared.
+ </entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_FL_HALF_LINE</entry>
+ <entry>Specific to interlaced formats: if set, then field 1 (aka the odd field)
+is really one half-line longer and field 2 (aka the even field) is really one half-line
+shorter, so each field has exactly the same number of half-lines. Whether half-lines can be
+detected or used depends on the hardware.
+ </entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
</refsect1>
<refsect1>
&return-value;
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml b/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
index b17a7aac6997..e3d5afcdafbb 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
@@ -265,6 +265,32 @@ These controls are described in <xref
These controls are described in <xref
linkend="flash-controls" />.</entry>
</row>
+ <row>
+ <entry><constant>V4L2_CTRL_CLASS_JPEG</constant></entry>
+ <entry>0x9d0000</entry>
+ <entry>The class containing JPEG compression controls.
+These controls are described in <xref
+ linkend="jpeg-controls" />.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_CTRL_CLASS_IMAGE_SOURCE</constant></entry>
+ <entry>0x9e0000</entry> <entry>The class containing image
+ source controls. These controls are described in <xref
+ linkend="image-source-controls" />.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_CTRL_CLASS_IMAGE_PROC</constant></entry>
+ <entry>0x9f0000</entry> <entry>The class containing image
+ processing controls. These controls are described in <xref
+ linkend="image-process-controls" />.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_CTRL_CLASS_JPEG</constant></entry>
+ <entry>0x9d0000</entry>
+ <entry>The class containing JPEG compression controls.
+These controls are described in <xref
+ linkend="jpeg-controls" />.</entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-fmt.xml b/Documentation/DocBook/media/v4l/vidioc-g-fmt.xml
index 17fbda15137b..52acff193a6f 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-fmt.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-fmt.xml
@@ -116,7 +116,7 @@ this ioctl.</para>
<colspec colname="c4" />
<tbody valign="top">
<row>
- <entry>&v4l2-buf-type;</entry>
+ <entry>__u32</entry>
<entry><structfield>type</structfield></entry>
<entry></entry>
<entry>Type of the data stream, see <xref
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml b/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
index 66e9a5257861..69c178a4d205 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
@@ -95,14 +95,14 @@ the &v4l2-output; <structfield>modulator</structfield> field and the
&v4l2-modulator; <structfield>index</structfield> field.</entry>
</row>
<row>
- <entry>&v4l2-tuner-type;</entry>
+ <entry>__u32</entry>
<entry><structfield>type</structfield></entry>
<entry>The tuner type. This is the same value as in the
-&v4l2-tuner; <structfield>type</structfield> field. The type must be set
+&v4l2-tuner; <structfield>type</structfield> field. See The type must be set
to <constant>V4L2_TUNER_RADIO</constant> for <filename>/dev/radioX</filename>
device nodes, and to <constant>V4L2_TUNER_ANALOG_TV</constant>
for all others. The field is not applicable to modulators, &ie; ignored
-by drivers.</entry>
+by drivers. See <xref linkend="v4l2-tuner-type" /></entry>
</row>
<row>
<entry>__u32</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-parm.xml b/Documentation/DocBook/media/v4l/vidioc-g-parm.xml
index 19b1d85dd668..f83d2cdd1185 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-parm.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-parm.xml
@@ -75,11 +75,12 @@ devices.</para>
&cs-ustr;
<tbody valign="top">
<row>
- <entry>&v4l2-buf-type;</entry>
+ <entry>__u32</entry>
<entry><structfield>type</structfield></entry>
<entry></entry>
<entry>The buffer (stream) type, same as &v4l2-format;
-<structfield>type</structfield>, set by the application.</entry>
+<structfield>type</structfield>, set by the application. See <xref
+ linkend="v4l2-buf-type" /></entry>
</row>
<row>
<entry>union</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-sliced-vbi-cap.xml b/Documentation/DocBook/media/v4l/vidioc-g-sliced-vbi-cap.xml
index 71741daaf725..bd015d1563ff 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-sliced-vbi-cap.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-sliced-vbi-cap.xml
@@ -148,7 +148,7 @@ using the &VIDIOC-S-FMT; ioctl as described in <xref
<structfield>service_lines</structfield>[1][0] to zero.</entry>
</row>
<row>
- <entry>&v4l2-buf-type;</entry>
+ <entry>__u32</entry>
<entry><structfield>type</structfield></entry>
<entry>Type of the data stream, see <xref
linkend="v4l2-buf-type" />. Should be
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml b/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
index 91ec2fb658f8..62a1aa200a36 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
@@ -107,7 +107,7 @@ user.<!-- FIXME Video inputs already have a name, the purpose of this
field is not quite clear.--></para></entry>
</row>
<row>
- <entry>&v4l2-tuner-type;</entry>
+ <entry>__u32</entry>
<entry><structfield>type</structfield></entry>
<entry spanname="hspan">Type of the tuner, see <xref
linkend="v4l2-tuner-type" />.</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-prepare-buf.xml b/Documentation/DocBook/media/v4l/vidioc-prepare-buf.xml
index 7bde698760e4..fa7ad7e33228 100644
--- a/Documentation/DocBook/media/v4l/vidioc-prepare-buf.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-prepare-buf.xml
@@ -48,6 +48,12 @@
<refsect1>
<title>Description</title>
+ <note>
+ <title>Experimental</title>
+ <para>This is an <link linkend="experimental"> experimental </link>
+ interface and may change in the future.</para>
+ </note>
+
<para>Applications can optionally call the
<constant>VIDIOC_PREPARE_BUF</constant> ioctl to pass ownership of the buffer
to the driver before actually enqueuing it, using the
diff --git a/Documentation/DocBook/media/v4l/vidioc-query-dv-preset.xml b/Documentation/DocBook/media/v4l/vidioc-query-dv-preset.xml
index 23b17f604211..1bc8aeb3ff1f 100644
--- a/Documentation/DocBook/media/v4l/vidioc-query-dv-preset.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-query-dv-preset.xml
@@ -49,6 +49,10 @@ input</refpurpose>
<refsect1>
<title>Description</title>
+ <para>This ioctl is <emphasis role="bold">deprecated</emphasis>.
+ New drivers and applications should use &VIDIOC-QUERY-DV-TIMINGS; instead.
+ </para>
+
<para>The hardware may be able to detect the current DV preset
automatically, similar to sensing the video standard. To do so, applications
call <constant> VIDIOC_QUERY_DV_PRESET</constant> with a pointer to a
diff --git a/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml b/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml
new file mode 100644
index 000000000000..44935a0ffcf0
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml
@@ -0,0 +1,104 @@
+<refentry id="vidioc-query-dv-timings">
+ <refmeta>
+ <refentrytitle>ioctl VIDIOC_QUERY_DV_TIMINGS</refentrytitle>
+ &manvol;
+ </refmeta>
+
+ <refnamediv>
+ <refname>VIDIOC_QUERY_DV_TIMINGS</refname>
+ <refpurpose>Sense the DV preset received by the current
+input</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcprototype>
+ <funcdef>int <function>ioctl</function></funcdef>
+ <paramdef>int <parameter>fd</parameter></paramdef>
+ <paramdef>int <parameter>request</parameter></paramdef>
+ <paramdef>struct v4l2_dv_timings *<parameter>argp</parameter></paramdef>
+ </funcprototype>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Arguments</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><parameter>fd</parameter></term>
+ <listitem>
+ <para>&fd;</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>request</parameter></term>
+ <listitem>
+ <para>VIDIOC_QUERY_DV_TIMINGS</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>argp</parameter></term>
+ <listitem>
+ <para></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Description</title>
+
+ <note>
+ <title>Experimental</title>
+ <para>This is an <link linkend="experimental"> experimental </link>
+ interface and may change in the future.</para>
+ </note>
+
+ <para>The hardware may be able to detect the current DV timings
+automatically, similar to sensing the video standard. To do so, applications
+call <constant>VIDIOC_QUERY_DV_TIMINGS</constant> with a pointer to a
+&v4l2-dv-timings;. Once the hardware detects the timings, it will fill in the
+timings structure.
+
+If the timings could not be detected because there was no signal, then
+<errorcode>ENOLINK</errorcode> is returned. If a signal was detected, but
+it was unstable and the receiver could not lock to the signal, then
+<errorcode>ENOLCK</errorcode> is returned. If the receiver could lock to the signal,
+but the format is unsupported (e.g. because the pixelclock is out of range
+of the hardware capabilities), then the driver fills in whatever timings it
+could find and returns <errorcode>ERANGE</errorcode>. In that case the application
+can call &VIDIOC-DV-TIMINGS-CAP; to compare the found timings with the hardware's
+capabilities in order to give more precise feedback to the user.
+</para>
+ </refsect1>
+
+ <refsect1>
+ &return-value;
+
+ <variablelist>
+ <varlistentry>
+ <term><errorcode>ENOLINK</errorcode></term>
+ <listitem>
+ <para>No timings could be detected because no signal was found.
+</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorcode>ENOLCK</errorcode></term>
+ <listitem>
+ <para>The signal was unstable and the hardware could not lock on to it.
+</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorcode>ERANGE</errorcode></term>
+ <listitem>
+ <para>Timings were found, but they are out of range of the hardware
+capabilities.
+</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml b/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml
index 36660d311b51..e6645b996558 100644
--- a/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml
@@ -127,7 +127,7 @@ the first control with a higher ID. Drivers which do not support this
flag yet always return an &EINVAL;.</entry>
</row>
<row>
- <entry>&v4l2-ctrl-type;</entry>
+ <entry>__u32</entry>
<entry><structfield>type</structfield></entry>
<entry>Type of control, see <xref
linkend="v4l2-ctrl-type" />.</entry>
@@ -215,11 +215,12 @@ the array to zero.</entry>
<table pgwide="1" frame="none" id="v4l2-querymenu">
<title>struct <structname>v4l2_querymenu</structname></title>
- <tgroup cols="3">
+ <tgroup cols="4">
&cs-str;
<tbody valign="top">
<row>
<entry>__u32</entry>
+ <entry></entry>
<entry><structfield>id</structfield></entry>
<entry>Identifies the control, set by the application
from the respective &v4l2-queryctrl;
@@ -227,18 +228,38 @@ from the respective &v4l2-queryctrl;
</row>
<row>
<entry>__u32</entry>
+ <entry></entry>
<entry><structfield>index</structfield></entry>
<entry>Index of the menu item, starting at zero, set by
the application.</entry>
</row>
<row>
+ <entry>union</entry>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ </row>
+ <row>
+ <entry></entry>
<entry>__u8</entry>
<entry><structfield>name</structfield>[32]</entry>
<entry>Name of the menu item, a NUL-terminated ASCII
-string. This information is intended for the user.</entry>
+string. This information is intended for the user. This field is valid
+for <constant>V4L2_CTRL_FLAG_MENU</constant> type controls.</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry>__s64</entry>
+ <entry><structfield>value</structfield></entry>
+ <entry>
+ Value of the integer menu item. This field is valid for
+ <constant>V4L2_CTRL_FLAG_INTEGER_MENU</constant> type
+ controls.
+ </entry>
</row>
<row>
<entry>__u32</entry>
+ <entry></entry>
<entry><structfield>reserved</structfield></entry>
<entry>Reserved for future extensions. Drivers must set
the array to zero.</entry>
@@ -292,6 +313,20 @@ the menu items can be enumerated with the
<constant>VIDIOC_QUERYMENU</constant> ioctl.</entry>
</row>
<row>
+ <entry><constant>V4L2_CTRL_TYPE_INTEGER_MENU</constant></entry>
+ <entry>&ge; 0</entry>
+ <entry>1</entry>
+ <entry>N-1</entry>
+ <entry>
+ The control has a menu of N choices. The values of the
+ menu items can be enumerated with the
+ <constant>VIDIOC_QUERYMENU</constant> ioctl. This is
+ similar to <constant>V4L2_CTRL_TYPE_MENU</constant>
+ except that instead of strings, the menu items are
+ signed 64-bit integers.
+ </entry>
+ </row>
+ <row>
<entry><constant>V4L2_CTRL_TYPE_BITMASK</constant></entry>
<entry>0</entry>
<entry>n/a</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-reqbufs.xml b/Documentation/DocBook/media/v4l/vidioc-reqbufs.xml
index 7be4b1d29b90..d7c95057bc51 100644
--- a/Documentation/DocBook/media/v4l/vidioc-reqbufs.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-reqbufs.xml
@@ -92,18 +92,19 @@ streamoff.--></para>
<entry>The number of buffers requested or granted.</entry>
</row>
<row>
- <entry>&v4l2-buf-type;</entry>
+ <entry>__u32</entry>
<entry><structfield>type</structfield></entry>
<entry>Type of the stream or buffers, this is the same
as the &v4l2-format; <structfield>type</structfield> field. See <xref
linkend="v4l2-buf-type" /> for valid values.</entry>
</row>
<row>
- <entry>&v4l2-memory;</entry>
+ <entry>__u32</entry>
<entry><structfield>memory</structfield></entry>
<entry>Applications set this field to
<constant>V4L2_MEMORY_MMAP</constant> or
-<constant>V4L2_MEMORY_USERPTR</constant>.</entry>
+<constant>V4L2_MEMORY_USERPTR</constant>. See <xref linkend="v4l2-memory"
+/>.</entry>
</row>
<row>
<entry>__u32</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-s-hw-freq-seek.xml b/Documentation/DocBook/media/v4l/vidioc-s-hw-freq-seek.xml
index 18b1a8266f7c..407dfceb71f0 100644
--- a/Documentation/DocBook/media/v4l/vidioc-s-hw-freq-seek.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-s-hw-freq-seek.xml
@@ -73,10 +73,11 @@ same value as in the &v4l2-input; <structfield>tuner</structfield>
field and the &v4l2-tuner; <structfield>index</structfield> field.</entry>
</row>
<row>
- <entry>&v4l2-tuner-type;</entry>
+ <entry>__u32</entry>
<entry><structfield>type</structfield></entry>
<entry>The tuner type. This is the same value as in the
-&v4l2-tuner; <structfield>type</structfield> field.</entry>
+&v4l2-tuner; <structfield>type</structfield> field. See <xref
+ linkend="v4l2-tuner-type" /></entry>
</row>
<row>
<entry>__u32</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-g-crop.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-g-crop.xml
index 06197323a8cc..4cddd788c589 100644
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-g-crop.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-subdev-g-crop.xml
@@ -58,9 +58,12 @@
<title>Description</title>
<note>
- <title>Experimental</title>
- <para>This is an <link linkend="experimental">experimental</link>
- interface and may change in the future.</para>
+ <title>Obsolete</title>
+
+ <para>This is an <link linkend="obsolete">obsolete</link>
+ interface and may be removed in the future. It is superseded by
+ <link linkend="vidioc-subdev-g-selection">the selection
+ API</link>.</para>
</note>
<para>To retrieve the current crop rectangle applications set the
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml
new file mode 100644
index 000000000000..208e9f0da3f3
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml
@@ -0,0 +1,228 @@
+<refentry id="vidioc-subdev-g-selection">
+ <refmeta>
+ <refentrytitle>ioctl VIDIOC_SUBDEV_G_SELECTION, VIDIOC_SUBDEV_S_SELECTION</refentrytitle>
+ &manvol;
+ </refmeta>
+
+ <refnamediv>
+ <refname>VIDIOC_SUBDEV_G_SELECTION</refname>
+ <refname>VIDIOC_SUBDEV_S_SELECTION</refname>
+ <refpurpose>Get or set selection rectangles on a subdev pad</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcprototype>
+ <funcdef>int <function>ioctl</function></funcdef>
+ <paramdef>int <parameter>fd</parameter></paramdef>
+ <paramdef>int <parameter>request</parameter></paramdef>
+ <paramdef>struct v4l2_subdev_selection *<parameter>argp</parameter></paramdef>
+ </funcprototype>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Arguments</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><parameter>fd</parameter></term>
+ <listitem>
+ <para>&fd;</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>request</parameter></term>
+ <listitem>
+ <para>VIDIOC_SUBDEV_G_SELECTION, VIDIOC_SUBDEV_S_SELECTION</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>argp</parameter></term>
+ <listitem>
+ <para></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Description</title>
+
+ <note>
+ <title>Experimental</title>
+ <para>This is an <link linkend="experimental">experimental</link>
+ interface and may change in the future.</para>
+ </note>
+
+ <para>The selections are used to configure various image
+ processing functionality performed by the subdevs which affect the
+ image size. This currently includes cropping, scaling and
+ composition.</para>
+
+ <para>The selection API replaces <link
+ linkend="vidioc-subdev-g-crop">the old subdev crop API</link>. All
+ the function of the crop API, and more, are supported by the
+ selections API.</para>
+
+ <para>See <xref linkend="subdev"></xref> for
+ more information on how each selection target affects the image
+ processing pipeline inside the subdevice.</para>
+
+ <section>
+ <title>Types of selection targets</title>
+
+ <para>There are two types of selection targets: actual and bounds.
+ The ACTUAL targets are the targets which configure the hardware.
+ The BOUNDS target will return a rectangle that contain all
+ possible ACTUAL rectangles.</para>
+ </section>
+
+ <section>
+ <title>Discovering supported features</title>
+
+ <para>To discover which targets are supported, the user can
+ perform <constant>VIDIOC_SUBDEV_G_SELECTION</constant> on them.
+ Any unsupported target will return
+ <constant>EINVAL</constant>.</para>
+ </section>
+
+ <table pgwide="1" frame="none" id="v4l2-subdev-selection-targets">
+ <title>V4L2 subdev selection targets</title>
+ <tgroup cols="3">
+ &cs-def;
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL</constant></entry>
+ <entry>0x0000</entry>
+ <entry>Actual crop. Defines the cropping
+ performed by the processing step.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS</constant></entry>
+ <entry>0x0002</entry>
+ <entry>Bounds of the crop rectangle.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL</constant></entry>
+ <entry>0x0100</entry>
+ <entry>Actual compose rectangle. Used to configure scaling
+ on sink pads and composition on source pads.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS</constant></entry>
+ <entry>0x0102</entry>
+ <entry>Bounds of the compose rectangle.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <table pgwide="1" frame="none" id="v4l2-subdev-selection-flags">
+ <title>V4L2 subdev selection flags</title>
+ <tgroup cols="3">
+ &cs-def;
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_SUBDEV_SEL_FLAG_SIZE_GE</constant></entry>
+ <entry>(1 &lt;&lt; 0)</entry> <entry>Suggest the driver it
+ should choose greater or equal rectangle (in size) than
+ was requested. Albeit the driver may choose a lesser size,
+ it will only do so due to hardware limitations. Without
+ this flag (and
+ <constant>V4L2_SUBDEV_SEL_FLAG_SIZE_LE</constant>) the
+ behaviour is to choose the closest possible
+ rectangle.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SUBDEV_SEL_FLAG_SIZE_LE</constant></entry>
+ <entry>(1 &lt;&lt; 1)</entry> <entry>Suggest the driver it
+ should choose lesser or equal rectangle (in size) than was
+ requested. Albeit the driver may choose a greater size, it
+ will only do so due to hardware limitations.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG</constant></entry>
+ <entry>(1 &lt;&lt; 2)</entry>
+ <entry>The configuration should not be propagated to any
+ further processing steps. If this flag is not given, the
+ configuration is propagated inside the subdevice to all
+ further processing steps.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <table pgwide="1" frame="none" id="v4l2-subdev-selection">
+ <title>struct <structname>v4l2_subdev_selection</structname></title>
+ <tgroup cols="3">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>which</structfield></entry>
+ <entry>Active or try selection, from
+ &v4l2-subdev-format-whence;.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>pad</structfield></entry>
+ <entry>Pad number as reported by the media framework.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>target</structfield></entry>
+ <entry>Target selection rectangle. See
+ <xref linkend="v4l2-subdev-selection-targets">.</xref>.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>flags</structfield></entry>
+ <entry>Flags. See
+ <xref linkend="v4l2-subdev-selection-flags">.</xref></entry>
+ </row>
+ <row>
+ <entry>&v4l2-rect;</entry>
+ <entry><structfield>rect</structfield></entry>
+ <entry>Selection rectangle, in pixels.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[8]</entry>
+ <entry>Reserved for future extensions. Applications and drivers must
+ set the array to zero.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ </refsect1>
+
+ <refsect1>
+ &return-value;
+
+ <variablelist>
+ <varlistentry>
+ <term><errorcode>EBUSY</errorcode></term>
+ <listitem>
+ <para>The selection rectangle can't be changed because the
+ pad is currently busy. This can be caused, for instance, by
+ an active video stream on the pad. The ioctl must not be
+ retried without performing another action to fix the problem
+ first. Only returned by
+ <constant>VIDIOC_SUBDEV_S_SELECTION</constant></para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorcode>EINVAL</errorcode></term>
+ <listitem>
+ <para>The &v4l2-subdev-selection;
+ <structfield>pad</structfield> references a non-existing
+ pad, the <structfield>which</structfield> field references a
+ non-existing format, or the selection target is not
+ supported on the given subdev pad.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+</refentry>
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 4468ce24427c..c379a2a6949f 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -150,7 +150,8 @@ be able to justify all violations that remain in your patch.
Look through the MAINTAINERS file and the source code, and determine
if your change applies to a specific subsystem of the kernel, with
-an assigned maintainer. If so, e-mail that person.
+an assigned maintainer. If so, e-mail that person. The script
+scripts/get_maintainer.pl can be very useful at this step.
If no maintainer is listed, or the maintainer does not respond, send
your patch to the primary Linux kernel developer's mailing list,
diff --git a/Documentation/arm/SPEAr/overview.txt b/Documentation/arm/SPEAr/overview.txt
index 28a9af953b9d..57aae7765c74 100644
--- a/Documentation/arm/SPEAr/overview.txt
+++ b/Documentation/arm/SPEAr/overview.txt
@@ -8,9 +8,8 @@ Introduction
weblink : http://www.st.com/spear
The ST Microelectronics SPEAr range of ARM9/CortexA9 System-on-Chip CPUs are
- supported by the 'spear' platform of ARM Linux. Currently SPEAr300,
- SPEAr310, SPEAr320 and SPEAr600 SOCs are supported. Support for the SPEAr13XX
- series is in progress.
+ supported by the 'spear' platform of ARM Linux. Currently SPEAr1310,
+ SPEAr1340, SPEAr300, SPEAr310, SPEAr320 and SPEAr600 SOCs are supported.
Hierarchy in SPEAr is as follows:
@@ -26,33 +25,36 @@ Introduction
- SPEAr600 (SOC)
- SPEAr600 Evaluation Board
- SPEAr13XX (13XX SOC series, based on ARM CORTEXA9)
- - SPEAr1300 (SOC)
+ - SPEAr1310 (SOC)
+ - SPEAr1310 Evaluation Board
+ - SPEAr1340 (SOC)
+ - SPEAr1340 Evaluation Board
Configuration
-------------
A generic configuration is provided for each machine, and can be used as the
default by
- make spear600_defconfig
- make spear300_defconfig
- make spear310_defconfig
- make spear320_defconfig
+ make spear13xx_defconfig
+ make spear3xx_defconfig
+ make spear6xx_defconfig
Layout
------
- The common files for multiple machine families (SPEAr3XX, SPEAr6XX and
- SPEAr13XX) are located in the platform code contained in arch/arm/plat-spear
+ The common files for multiple machine families (SPEAr3xx, SPEAr6xx and
+ SPEAr13xx) are located in the platform code contained in arch/arm/plat-spear
with headers in plat/.
Each machine series have a directory with name arch/arm/mach-spear followed by
series name. Like mach-spear3xx, mach-spear6xx and mach-spear13xx.
- Common file for machines of spear3xx family is mach-spear3xx/spear3xx.c and for
- spear6xx is mach-spear6xx/spear6xx.c. mach-spear* also contain soc/machine
- specific files, like spear300.c, spear310.c, spear320.c and spear600.c.
- mach-spear* doesn't contains board specific files as they fully support
- Flattened Device Tree.
+ Common file for machines of spear3xx family is mach-spear3xx/spear3xx.c, for
+ spear6xx is mach-spear6xx/spear6xx.c and for spear13xx family is
+ mach-spear13xx/spear13xx.c. mach-spear* also contain soc/machine specific
+ files, like spear1310.c, spear1340.c spear300.c, spear310.c, spear320.c and
+ spear600.c. mach-spear* doesn't contains board specific files as they fully
+ support Flattened Device Tree.
Document Author
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 9b1067afb224..dd88540bb995 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -184,12 +184,14 @@ behind this approach is that a cgroup that aggressively uses a shared
page will eventually get charged for it (once it is uncharged from
the cgroup that brought it in -- this will happen on memory pressure).
+But see section 8.2: when moving a task to another cgroup, its pages may
+be recharged to the new cgroup, if move_charge_at_immigrate has been chosen.
+
Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used.
When you do swapoff and make swapped-out pages of shmem(tmpfs) to
be backed into memory in force, charges for pages are accounted against the
caller of swapoff rather than the users of shmem.
-
2.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP)
Swap Extension allows you to record charge for swap. A swapped-in page is
@@ -374,14 +376,15 @@ cgroup might have some charge associated with it, even though all
tasks have migrated away from it. (because we charge against pages, not
against tasks.)
-Such charges are freed or moved to their parent. At moving, both of RSS
-and CACHES are moved to parent.
-rmdir() may return -EBUSY if freeing/moving fails. See 5.1 also.
+We move the stats to root (if use_hierarchy==0) or parent (if
+use_hierarchy==1), and no change on the charge except uncharging
+from the child.
Charges recorded in swap information is not updated at removal of cgroup.
Recorded information is discarded and a cgroup which uses swap (swapcache)
will be charged as a new owner of it.
+About use_hierarchy, see Section 6.
5. Misc. interfaces.
@@ -394,13 +397,15 @@ will be charged as a new owner of it.
Almost all pages tracked by this memory cgroup will be unmapped and freed.
Some pages cannot be freed because they are locked or in-use. Such pages are
- moved to parent and this cgroup will be empty. This may return -EBUSY if
- VM is too busy to free/move all pages immediately.
+ moved to parent(if use_hierarchy==1) or root (if use_hierarchy==0) and this
+ cgroup will be empty.
Typical use case of this interface is that calling this before rmdir().
Because rmdir() moves all pages to parent, some out-of-use page caches can be
moved to the parent. If you want to avoid that, force_empty will be useful.
+ About use_hierarchy, see Section 6.
+
5.2 stat file
memory.stat file includes following statistics
@@ -430,17 +435,10 @@ hierarchical_memory_limit - # of bytes of memory limit with regard to hierarchy
hierarchical_memsw_limit - # of bytes of memory+swap limit with regard to
hierarchy under which memory cgroup is.
-total_cache - sum of all children's "cache"
-total_rss - sum of all children's "rss"
-total_mapped_file - sum of all children's "cache"
-total_pgpgin - sum of all children's "pgpgin"
-total_pgpgout - sum of all children's "pgpgout"
-total_swap - sum of all children's "swap"
-total_inactive_anon - sum of all children's "inactive_anon"
-total_active_anon - sum of all children's "active_anon"
-total_inactive_file - sum of all children's "inactive_file"
-total_active_file - sum of all children's "active_file"
-total_unevictable - sum of all children's "unevictable"
+total_<counter> - # hierarchical version of <counter>, which in
+ addition to the cgroup's own value includes the
+ sum of all hierarchical children's values of
+ <counter>, i.e. total_cache
# The following additional stats are dependent on CONFIG_DEBUG_VM.
@@ -622,8 +620,7 @@ memory cgroup.
bit | what type of charges would be moved ?
-----+------------------------------------------------------------------------
0 | A charge of an anonymous page(or swap of it) used by the target task.
- | Those pages and swaps must be used only by the target task. You must
- | enable Swap Extension(see 2.4) to enable move of swap charges.
+ | You must enable Swap Extension(see 2.4) to enable move of swap charges.
-----+------------------------------------------------------------------------
1 | A charge of file pages(normal file, tmpfs file(e.g. ipc shared memory)
| and swaps of tmpfs file) mmapped by the target task. Unlike the case of
@@ -636,8 +633,6 @@ memory cgroup.
8.3 TODO
-- Implement madvise(2) to let users decide the vma to be moved or not to be
- moved.
- All of moving charge operations are done under cgroup_mutex. It's not good
behavior to hold the mutex too long, so we may need some trick.
diff --git a/Documentation/cgroups/resource_counter.txt b/Documentation/cgroups/resource_counter.txt
index f3c4ec3626a2..0c4a344e78fa 100644
--- a/Documentation/cgroups/resource_counter.txt
+++ b/Documentation/cgroups/resource_counter.txt
@@ -92,6 +92,14 @@ to work with it.
The _locked routines imply that the res_counter->lock is taken.
+ f. void res_counter_uncharge_until
+ (struct res_counter *rc, struct res_counter *top,
+ unsinged long val)
+
+ Almost same as res_cunter_uncharge() but propagation of uncharge
+ stops when rc == top. This is useful when kill a res_coutner in
+ child cgroup.
+
2.1 Other accounting routines
There are more routines that may help you with common needs, like
diff --git a/Documentation/cris/README b/Documentation/cris/README
index d9b086869a60..8dbdb1a44429 100644
--- a/Documentation/cris/README
+++ b/Documentation/cris/README
@@ -1,38 +1,34 @@
-Linux 2.4 on the CRIS architecture
-==================================
-$Id: README,v 1.7 2001/04/19 12:38:32 bjornw Exp $
+Linux on the CRIS architecture
+==============================
-This is a port of Linux 2.4 to Axis Communications ETRAX 100LX embedded
-network CPU. For more information about CRIS and ETRAX please see further
-below.
+This is a port of Linux to Axis Communications ETRAX 100LX,
+ETRAX FS and ARTPEC-3 embedded network CPUs.
+
+For more information about CRIS and ETRAX please see further below.
In order to compile this you need a version of gcc with support for the
-ETRAX chip family. Please see this link for more information on how to
+ETRAX chip family. Please see this link for more information on how to
download the compiler and other tools useful when building and booting
software for the ETRAX platform:
-http://developer.axis.com/doc/software/devboard_lx/install-howto.html
-
-<more specific information should come in this document later>
+http://developer.axis.com/wiki/doku.php?id=axis:install-howto-2_20
What is CRIS ?
--------------
CRIS is an acronym for 'Code Reduced Instruction Set'. It is the CPU
architecture in Axis Communication AB's range of embedded network CPU's,
-called ETRAX. The latest CPU is called ETRAX 100LX, where LX stands for
-'Linux' because the chip was designed to be a good host for the Linux
-operating system.
+called ETRAX.
The ETRAX 100LX chip
--------------------
-For reference, please see the press-release:
+For reference, please see the following link:
-http://www.axis.com/news/us/001101_etrax.htm
+http://www.axis.com/products/dev_etrax_100lx/index.htm
-The ETRAX 100LX is a 100 MIPS processor with 8kB cache, MMU, and a very broad
-range of built-in interfaces, all with modern scatter/gather DMA.
+The ETRAX 100LX is a 100 MIPS processor with 8kB cache, MMU, and a very broad
+range of built-in interfaces, all with modern scatter/gather DMA.
Memory interfaces:
@@ -51,20 +47,28 @@ I/O interfaces:
* SCSI
* two parallel-ports
* two generic 8-bit ports
-
- (not all interfaces are available at the same time due to chip pin
+
+ (not all interfaces are available at the same time due to chip pin
multiplexing)
-The previous version of the ETRAX, the ETRAX 100, sits in almost all of
-Axis shipping thin-servers like the Axis 2100 web camera or the ETRAX 100
-developer-board. It lacks an MMU so the Linux we run on that is a version
-of uClinux (Linux 2.0 without MM-support) ported to the CRIS architecture.
-The new Linux 2.4 port has full MM and needs a CPU with an MMU, so it will
-not run on the ETRAX 100.
+ETRAX 100LX is CRISv10 architecture.
+
+
+The ETRAX FS and ARTPEC-3 chips
+-------------------------------
-A version of the Axis developer-board with ETRAX 100LX (running Linux
-2.4) is now available. For more information please see developer.axis.com.
+The ETRAX FS is a 200MHz 32-bit RISC processor with on-chip 16kB
+I-cache and 16kB D-cache and with a wide range of device interfaces
+including multiple high speed serial ports and an integrated USB 1.1 PHY.
+The ARTPEC-3 is a variant of the ETRAX FS with additional IO-units
+used by the Axis Communications network cameras.
+
+See below link for more information:
+
+http://www.axis.com/products/dev_etrax_fs/index.htm
+
+ETRAX FS and ARTPEC-3 are both CRISv32 architectures.
Bootlog
-------
@@ -182,10 +186,6 @@ SwapFree: 0 kB
-rwxr-xr-x 1 342 100 16252 Jan 01 00:00 telnetd
-(All programs are statically linked to the libc at this point - we have not ported the
- shared libraries yet)
-
-
diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt
index bfbc771a65f8..ac9e7516756e 100644
--- a/Documentation/devicetree/bindings/arm/fsl.txt
+++ b/Documentation/devicetree/bindings/arm/fsl.txt
@@ -1,6 +1,14 @@
Freescale i.MX Platforms Device Tree Bindings
-----------------------------------------------
+i.MX23 Evaluation Kit
+Required root node properties:
+ - compatible = "fsl,imx23-evk", "fsl,imx23";
+
+i.MX28 Evaluation Kit
+Required root node properties:
+ - compatible = "fsl,imx28-evk", "fsl,imx28";
+
i.MX51 Babbage Board
Required root node properties:
- compatible = "fsl,imx51-babbage", "fsl,imx51";
@@ -29,6 +37,10 @@ i.MX6 Quad SABRE Lite Board
Required root node properties:
- compatible = "fsl,imx6q-sabrelite", "fsl,imx6q";
+i.MX6 Quad SABRE Smart Device Board
+Required root node properties:
+ - compatible = "fsl,imx6q-sabresd", "fsl,imx6q";
+
Generic i.MX boards
-------------------
diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt
index 9b4b82a721b6..62eb8df1e08d 100644
--- a/Documentation/devicetree/bindings/arm/gic.txt
+++ b/Documentation/devicetree/bindings/arm/gic.txt
@@ -11,7 +11,9 @@ have PPIs or SGIs.
Main node required properties:
- compatible : should be one of:
+ "arm,cortex-a15-gic"
"arm,cortex-a9-gic"
+ "arm,cortex-a7-gic"
"arm,arm11mp-gic"
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an
@@ -39,8 +41,9 @@ Main node required properties:
the GIC cpu interface register base and size.
Optional
-- interrupts : Interrupt source of the parent interrupt controller. Only
- present on secondary GICs.
+- interrupts : Interrupt source of the parent interrupt controller on
+ secondary GICs, or VGIC maintainance interrupt on primary GIC (see
+ below).
- cpu-offset : per-cpu offset within the distributor and cpu interface
regions, used when the GIC doesn't have banked registers. The offset is
@@ -57,3 +60,31 @@ Example:
<0xfff10100 0x100>;
};
+
+* GIC virtualization extensions (VGIC)
+
+For ARM cores that support the virtualization extensions, additional
+properties must be described (they only exist if the GIC is the
+primary interrupt controller).
+
+Required properties:
+
+- reg : Additional regions specifying the base physical address and
+ size of the VGIC registers. The first additional region is the GIC
+ virtual interface control register base and size. The 2nd additional
+ region is the GIC virtual cpu interface register base and size.
+
+- interrupts : VGIC maintainance interrupt.
+
+Example:
+
+ interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a15-gic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x2c001000 0x1000>,
+ <0x2c002000 0x1000>,
+ <0x2c004000 0x2000>,
+ <0x2c006000 0x2000>;
+ interrupts = <1 9 0xf04>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt b/Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt
new file mode 100644
index 000000000000..f2f2171e530e
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt
@@ -0,0 +1,52 @@
+* Samsung Exynos Interrupt Combiner Controller
+
+Samsung's Exynos4 architecture includes a interrupt combiner controller which
+can combine interrupt sources as a group and provide a single interrupt request
+for the group. The interrupt request from each group are connected to a parent
+interrupt controller, such as GIC in case of Exynos4210.
+
+The interrupt combiner controller consists of multiple combiners. Upto eight
+interrupt sources can be connected to a combiner. The combiner outputs one
+combined interrupt for its eight interrupt sources. The combined interrupt
+is usually connected to a parent interrupt controller.
+
+A single node in the device tree is used to describe the interrupt combiner
+controller module (which includes multiple combiners). A combiner in the
+interrupt controller module shares config/control registers with other
+combiners. For example, a 32-bit interrupt enable/disable config register
+can accommodate upto 4 interrupt combiners (with each combiner supporting
+upto 8 interrupt sources).
+
+Required properties:
+- compatible: should be "samsung,exynos4210-combiner".
+- interrupt-controller: Identifies the node as an interrupt controller.
+- #interrupt-cells: should be <2>. The meaning of the cells are
+ * First Cell: Combiner Group Number.
+ * Second Cell: Interrupt number within the group.
+- reg: Base address and size of interrupt combiner registers.
+- interrupts: The list of interrupts generated by the combiners which are then
+ connected to a parent interrupt controller. The format of the interrupt
+ specifier depends in the interrupt parent controller.
+
+Optional properties:
+- samsung,combiner-nr: The number of interrupt combiners supported. If this
+ property is not specified, the default number of combiners is assumed
+ to be 16.
+- interrupt-parent: pHandle of the parent interrupt controller, if not
+ inherited from the parent node.
+
+
+Example:
+
+ The following is a an example from the Exynos4210 SoC dtsi file.
+
+ combiner:interrupt-controller@10440000 {
+ compatible = "samsung,exynos4210-combiner";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x10440000 0x1000>;
+ interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
+ <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
+ <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
+ <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/spear-timer.txt b/Documentation/devicetree/bindings/arm/spear-timer.txt
new file mode 100644
index 000000000000..c0017221cf55
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/spear-timer.txt
@@ -0,0 +1,18 @@
+* SPEAr ARM Timer
+
+** Timer node required properties:
+
+- compatible : Should be:
+ "st,spear-timer"
+- reg: Address range of the timer registers
+- interrupt-parent: Should be the phandle for the interrupt controller
+ that services interrupts for this device
+- interrupt: Should contain the timer interrupt number
+
+Example:
+
+ timer@f0000000 {
+ compatible = "st,spear-timer";
+ reg = <0xf0000000 0x400>;
+ interrupts = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/spear.txt b/Documentation/devicetree/bindings/arm/spear.txt
index aa5f355cc947..0d42949df6c2 100644
--- a/Documentation/devicetree/bindings/arm/spear.txt
+++ b/Documentation/devicetree/bindings/arm/spear.txt
@@ -2,25 +2,25 @@ ST SPEAr Platforms Device Tree Bindings
---------------------------------------
Boards with the ST SPEAr600 SoC shall have the following properties:
-
Required root node property:
-
compatible = "st,spear600";
Boards with the ST SPEAr300 SoC shall have the following properties:
-
Required root node property:
-
compatible = "st,spear300";
Boards with the ST SPEAr310 SoC shall have the following properties:
-
Required root node property:
-
compatible = "st,spear310";
Boards with the ST SPEAr320 SoC shall have the following properties:
+Required root node property:
+compatible = "st,spear320";
+Boards with the ST SPEAr1310 SoC shall have the following properties:
Required root node property:
+compatible = "st,spear1310";
-compatible = "st,spear320";
+Boards with the ST SPEAr1340 SoC shall have the following properties:
+Required root node property:
+compatible = "st,spear1340";
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
new file mode 100644
index 000000000000..234406d41c12
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
@@ -0,0 +1,11 @@
+NVIDIA Tegra AHB
+
+Required properties:
+- compatible : "nvidia,tegra20-ahb" or "nvidia,tegra30-ahb"
+- reg : Should contain 1 register ranges(address and length)
+
+Example:
+ ahb: ahb@6000c004 {
+ compatible = "nvidia,tegra20-ahb";
+ reg = <0x6000c004 0x10c>; /* AHB Arbitration + Gizmo Controller */
+ };
diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
new file mode 100644
index 000000000000..ded0398d3bdc
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
@@ -0,0 +1,19 @@
+* Freescale MXS DMA
+
+Required properties:
+- compatible : Should be "fsl,<chip>-dma-apbh" or "fsl,<chip>-dma-apbx"
+- reg : Should contain registers location and length
+
+Supported chips:
+imx23, imx28.
+
+Examples:
+dma-apbh@80004000 {
+ compatible = "fsl,imx28-dma-apbh";
+ reg = <0x80004000 2000>;
+};
+
+dma-apbx@80024000 {
+ compatible = "fsl,imx28-dma-apbx";
+ reg = <0x80024000 2000>;
+};
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
new file mode 100644
index 000000000000..c0d85dbcada5
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
@@ -0,0 +1,17 @@
+* Synopsys Designware DMA Controller
+
+Required properties:
+- compatible: "snps,dma-spear1340"
+- reg: Address range of the DMAC registers
+- interrupt-parent: Should be the phandle for the interrupt controller
+ that services interrupts for this device
+- interrupt: Should contain the DMAC interrupt number
+
+Example:
+
+ dma@fc000000 {
+ compatible = "snps,dma-spear1340";
+ reg = <0xfc000000 0x1000>;
+ interrupt-parent = <&vic1>;
+ interrupts = <12>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt b/Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt
new file mode 100644
index 000000000000..f93d51478d5a
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt
@@ -0,0 +1,38 @@
+Lantiq SoC External Bus memory mapped GPIO controller
+
+By attaching hardware latches to the EBU it is possible to create output
+only gpios. This driver configures a special memory address, which when
+written to outputs 16 bit to the latches.
+
+The node describing the memory mapped GPIOs needs to be a child of the node
+describing the "lantiq,localbus".
+
+Required properties:
+- compatible : Should be "lantiq,gpio-mm-lantiq"
+- reg : Address and length of the register set for the device
+- #gpio-cells : Should be two. The first cell is the pin number and
+ the second cell is used to specify optional parameters (currently
+ unused).
+- gpio-controller : Marks the device node as a gpio controller.
+
+Optional properties:
+- lantiq,shadow : The default value that we shall assume as already set on the
+ shift register cascade.
+
+Example:
+
+localbus@0 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0x0 0x3ffffff /* addrsel0 */
+ 1 0 0x4000000 0x4000010>; /* addsel1 */
+ compatible = "lantiq,localbus", "simple-bus";
+
+ gpio_mm0: gpio@4000000 {
+ compatible = "lantiq,gpio-mm";
+ reg = <1 0x0 0x10>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ lantiq,shadow = <0x77f>
+ };
+}
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mxs.txt b/Documentation/devicetree/bindings/gpio/gpio-mxs.txt
new file mode 100644
index 000000000000..0c35673f7a3e
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-mxs.txt
@@ -0,0 +1,87 @@
+* Freescale MXS GPIO controller
+
+The Freescale MXS GPIO controller is part of MXS PIN controller. The
+GPIOs are organized in port/bank. Each port consists of 32 GPIOs.
+
+As the GPIO controller is embedded in the PIN controller and all the
+GPIO ports share the same IO space with PIN controller, the GPIO node
+will be represented as sub-nodes of MXS pinctrl node.
+
+Required properties for GPIO node:
+- compatible : Should be "fsl,<soc>-gpio". The supported SoCs include
+ imx23 and imx28.
+- interrupts : Should be the port interrupt shared by all 32 pins.
+- gpio-controller : Marks the device node as a gpio controller.
+- #gpio-cells : Should be two. The first cell is the pin number and
+ the second cell is used to specify optional parameters (currently
+ unused).
+- interrupt-controller: Marks the device node as an interrupt controller.
+- #interrupt-cells : Should be 2. The first cell is the GPIO number.
+ The second cell bits[3:0] is used to specify trigger type and level flags:
+ 1 = low-to-high edge triggered.
+ 2 = high-to-low edge triggered.
+ 4 = active high level-sensitive.
+ 8 = active low level-sensitive.
+
+Note: Each GPIO port should have an alias correctly numbered in "aliases"
+node.
+
+Examples:
+
+aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
+ gpio2 = &gpio2;
+ gpio3 = &gpio3;
+ gpio4 = &gpio4;
+};
+
+pinctrl@80018000 {
+ compatible = "fsl,imx28-pinctrl", "simple-bus";
+ reg = <0x80018000 2000>;
+
+ gpio0: gpio@0 {
+ compatible = "fsl,imx28-gpio";
+ interrupts = <127>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio@1 {
+ compatible = "fsl,imx28-gpio";
+ interrupts = <126>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@2 {
+ compatible = "fsl,imx28-gpio";
+ interrupts = <125>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@3 {
+ compatible = "fsl,imx28-gpio";
+ interrupts = <124>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio@4 {
+ compatible = "fsl,imx28-gpio";
+ interrupts = <123>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt b/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt
new file mode 100644
index 000000000000..854de130a971
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt
@@ -0,0 +1,42 @@
+Lantiq SoC Serial To Parallel (STP) GPIO controller
+
+The Serial To Parallel (STP) is found on MIPS based Lantiq socs. It is a
+peripheral controller used to drive external shift register cascades. At most
+3 groups of 8 bits can be driven. The hardware is able to allow the DSL modem
+to drive the 2 LSBs of the cascade automatically.
+
+
+Required properties:
+- compatible : Should be "lantiq,gpio-stp-xway"
+- reg : Address and length of the register set for the device
+- #gpio-cells : Should be two. The first cell is the pin number and
+ the second cell is used to specify optional parameters (currently
+ unused).
+- gpio-controller : Marks the device node as a gpio controller.
+
+Optional properties:
+- lantiq,shadow : The default value that we shall assume as already set on the
+ shift register cascade.
+- lantiq,groups : Set the 3 bit mask to select which of the 3 groups are enabled
+ in the shift register cascade.
+- lantiq,dsl : The dsl core can control the 2 LSBs of the gpio cascade. This 2 bit
+ property can enable this feature.
+- lantiq,phy1 : The gphy1 core can control 3 bits of the gpio cascade.
+- lantiq,phy2 : The gphy2 core can control 3 bits of the gpio cascade.
+- lantiq,rising : use rising instead of falling edge for the shift register
+
+Example:
+
+gpio1: stp@E100BB0 {
+ compatible = "lantiq,gpio-stp-xway";
+ reg = <0xE100BB0 0x40>;
+ #gpio-cells = <2>;
+ gpio-controller;
+
+ lantiq,shadow = <0xffff>;
+ lantiq,groups = <0x7>;
+ lantiq,dsl = <0x3>;
+ lantiq,phy1 = <0x7>;
+ lantiq,phy2 = <0x7>;
+ /* lantiq,rising; */
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio_lpc32xx.txt b/Documentation/devicetree/bindings/gpio/gpio_lpc32xx.txt
new file mode 100644
index 000000000000..49819367a011
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio_lpc32xx.txt
@@ -0,0 +1,43 @@
+NXP LPC32xx SoC GPIO controller
+
+Required properties:
+- compatible: must be "nxp,lpc3220-gpio"
+- reg: Physical base address and length of the controller's registers.
+- gpio-controller: Marks the device node as a GPIO controller.
+- #gpio-cells: Should be 3:
+ 1) bank:
+ 0: GPIO P0
+ 1: GPIO P1
+ 2: GPIO P2
+ 3: GPIO P3
+ 4: GPI P3
+ 5: GPO P3
+ 2) pin number
+ 3) optional parameters:
+ - bit 0 specifies polarity (0 for normal, 1 for inverted)
+- reg: Index of the GPIO group
+
+Example:
+
+ gpio: gpio@40028000 {
+ compatible = "nxp,lpc3220-gpio";
+ reg = <0x40028000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <3>; /* bank, pin, flags */
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led0 {
+ gpios = <&gpio 5 1 1>; /* GPO_P3 1, active low */
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+
+ led1 {
+ gpios = <&gpio 5 14 1>; /* GPO_P3 14, active low */
+ linux,default-trigger = "timer";
+ default-state = "off";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mxs.txt b/Documentation/devicetree/bindings/i2c/i2c-mxs.txt
new file mode 100644
index 000000000000..1bfc02de1b0c
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-mxs.txt
@@ -0,0 +1,16 @@
+* Freescale MXS Inter IC (I2C) Controller
+
+Required properties:
+- compatible: Should be "fsl,<chip>-i2c"
+- reg: Should contain registers location and length
+- interrupts: Should contain ERROR and DMA interrupts
+
+Examples:
+
+i2c0: i2c@80058000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx28-i2c";
+ reg = <0x80058000 2000>;
+ interrupts = <111 68>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/mux.txt b/Documentation/devicetree/bindings/i2c/mux.txt
new file mode 100644
index 000000000000..af84cce5cd7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/mux.txt
@@ -0,0 +1,60 @@
+Common i2c bus multiplexer/switch properties.
+
+An i2c bus multiplexer/switch will have several child busses that are
+numbered uniquely in a device dependent manner. The nodes for an i2c bus
+multiplexer/switch will have one child node for each child
+bus.
+
+Required properties:
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Required properties for child nodes:
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg : The sub-bus number.
+
+Optional properties for child nodes:
+- Other properties specific to the multiplexer/switch hardware.
+- Child nodes conforming to i2c bus binding
+
+
+Example :
+
+ /*
+ An NXP pca9548 8 channel I2C multiplexer at address 0x70
+ with two NXP pca8574 GPIO expanders attached, one each to
+ ports 3 and 4.
+ */
+
+ mux@70 {
+ compatible = "nxp,pca9548";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+
+ gpio1: gpio@38 {
+ compatible = "nxp,pca8574";
+ reg = <0x38>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+ };
+ i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+
+ gpio2: gpio@38 {
+ compatible = "nxp,pca8574";
+ reg = <0x38>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/samsung-i2c.txt b/Documentation/devicetree/bindings/i2c/samsung-i2c.txt
index 38832c712919..b6cb5a12c672 100644
--- a/Documentation/devicetree/bindings/i2c/samsung-i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/samsung-i2c.txt
@@ -6,14 +6,18 @@ Required properties:
- compatible: value should be either of the following.
(a) "samsung, s3c2410-i2c", for i2c compatible with s3c2410 i2c.
(b) "samsung, s3c2440-i2c", for i2c compatible with s3c2440 i2c.
+ (c) "samsung, s3c2440-hdmiphy-i2c", for s3c2440-like i2c used
+ inside HDMIPHY block found on several samsung SoCs
- reg: physical base address of the controller and length of memory mapped
region.
- interrupts: interrupt number to the cpu.
- samsung,i2c-sda-delay: Delay (in ns) applied to data line (SDA) edges.
- - gpios: The order of the gpios should be the following: <SDA, SCL>.
- The gpio specifier depends on the gpio controller.
Optional properties:
+ - gpios: The order of the gpios should be the following: <SDA, SCL>.
+ The gpio specifier depends on the gpio controller. Required in all
+ cases except for "samsung,s3c2440-hdmiphy-i2c" whose input/output
+ lines are permanently wired to the respective client
- samsung,i2c-slave-addr: Slave address in multi-master enviroment. If not
specified, default value is 0.
- samsung,i2c-max-bus-freq: Desired frequency in Hz of the bus. If not
diff --git a/Documentation/devicetree/bindings/i2c/xiic.txt b/Documentation/devicetree/bindings/i2c/xiic.txt
new file mode 100644
index 000000000000..ceabbe91ae44
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/xiic.txt
@@ -0,0 +1,22 @@
+Xilinx IIC controller:
+
+Required properties:
+- compatible : Must be "xlnx,xps-iic-2.00.a"
+- reg : IIC register location and length
+- interrupts : IIC controller unterrupt
+- #address-cells = <1>
+- #size-cells = <0>
+
+Optional properties:
+- Child nodes conforming to i2c bus binding
+
+Example:
+
+ axi_iic_0: i2c@40800000 {
+ compatible = "xlnx,xps-iic-2.00.a";
+ interrupts = < 1 2 >;
+ reg = < 0x40800000 0x10000 >;
+
+ #size-cells = <0>;
+ #address-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/input/spear-keyboard.txt b/Documentation/devicetree/bindings/input/spear-keyboard.txt
new file mode 100644
index 000000000000..4a846d26da23
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/spear-keyboard.txt
@@ -0,0 +1,20 @@
+* SPEAr keyboard controller
+
+Required properties:
+- compatible: "st,spear300-kbd"
+
+Optional properties, in addition to those specified by the shared
+matrix-keyboard bindings:
+- autorepeat: bool: enables key autorepeat
+- st,mode: keyboard mode: 0 - 9x9, 1 - 6x6, 2 - 2x2
+
+Example:
+
+kbd@fc400000 {
+ compatible = "st,spear300-kbd";
+ reg = <0xfc400000 0x100>;
+ linux,keymap = < 0x00030012
+ 0x0102003a >;
+ autorepeat;
+ st,mode = <0>;
+};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/lpc32xx-tsc.txt b/Documentation/devicetree/bindings/input/touchscreen/lpc32xx-tsc.txt
new file mode 100644
index 000000000000..41cbf4b7a670
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/lpc32xx-tsc.txt
@@ -0,0 +1,16 @@
+* NXP LPC32xx SoC Touchscreen Controller (TSC)
+
+Required properties:
+- compatible: must be "nxp,lpc3220-tsc"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: The TSC/ADC interrupt
+
+Example:
+
+ tsc@40048000 {
+ compatible = "nxp,lpc3220-tsc";
+ reg = <0x40048000 0x1000>;
+ interrupt-parent = <&mic>;
+ interrupts = <39 0>;
+ };
diff --git a/Documentation/devicetree/bindings/input/twl6040-vibra.txt b/Documentation/devicetree/bindings/input/twl6040-vibra.txt
new file mode 100644
index 000000000000..5b1918b818fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/twl6040-vibra.txt
@@ -0,0 +1,37 @@
+Vibra driver for the twl6040 family
+
+The vibra driver is a child of the twl6040 MFD dirver.
+Documentation/devicetree/bindings/mfd/twl6040.txt
+
+Required properties:
+- compatible : Must be "ti,twl6040-vibra";
+- interrupts: 4, Vibra overcurrent interrupt
+- vddvibl-supply: Regulator supplying the left vibra motor
+- vddvibr-supply: Regulator supplying the right vibra motor
+- vibldrv_res: Board specific left driver resistance
+- vibrdrv_res: Board specific right driver resistance
+- viblmotor_res: Board specific left motor resistance
+- vibrmotor_res: Board specific right motor resistance
+
+Optional properties:
+- vddvibl_uV: If the vddvibl default voltage need to be changed
+- vddvibr_uV: If the vddvibr default voltage need to be changed
+
+Example:
+/*
+ * 8-channel high quality low-power audio codec
+ * http://www.ti.com/lit/ds/symlink/twl6040.pdf
+ */
+twl6040: twl6040@4b {
+ ...
+ twl6040_vibra: twl6040@1 {
+ compatible = "ti,twl6040-vibra";
+ interrupts = <4>;
+ vddvibl-supply = <&vbat>;
+ vddvibr-supply = <&vbat>;
+ vibldrv_res = <8>;
+ vibrdrv_res = <3>;
+ viblmotor_res = <10>;
+ vibrmotor_res = <10>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt b/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt
new file mode 100644
index 000000000000..099d9362ebc1
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt
@@ -0,0 +1,14 @@
+NVIDIA Tegra 20 GART
+
+Required properties:
+- compatible: "nvidia,tegra20-gart"
+- reg: Two pairs of cells specifying the physical address and size of
+ the memory controller registers and the GART aperture respectively.
+
+Example:
+
+ gart {
+ compatible = "nvidia,tegra20-gart";
+ reg = <0x7000f024 0x00000018 /* controller registers */
+ 0x58000000 0x02000000>; /* GART aperture */
+ };
diff --git a/Documentation/devicetree/bindings/mfd/da9052-i2c.txt b/Documentation/devicetree/bindings/mfd/da9052-i2c.txt
new file mode 100644
index 000000000000..1857f4a6b9a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/da9052-i2c.txt
@@ -0,0 +1,60 @@
+* Dialog DA9052/53 Power Management Integrated Circuit (PMIC)
+
+Required properties:
+- compatible : Should be "dlg,da9052", "dlg,da9053-aa",
+ "dlg,da9053-ab", or "dlg,da9053-bb"
+
+Sub-nodes:
+- regulators : Contain the regulator nodes. The DA9052/53 regulators are
+ bound using their names as listed below:
+
+ buck0 : regulator BUCK0
+ buck1 : regulator BUCK1
+ buck2 : regulator BUCK2
+ buck3 : regulator BUCK3
+ ldo4 : regulator LDO4
+ ldo5 : regulator LDO5
+ ldo6 : regulator LDO6
+ ldo7 : regulator LDO7
+ ldo8 : regulator LDO8
+ ldo9 : regulator LDO9
+ ldo10 : regulator LDO10
+ ldo11 : regulator LDO11
+ ldo12 : regulator LDO12
+ ldo13 : regulator LDO13
+
+ The bindings details of individual regulator device can be found in:
+ Documentation/devicetree/bindings/regulator/regulator.txt
+
+Examples:
+
+i2c@63fc8000 { /* I2C1 */
+ status = "okay";
+
+ pmic: dialog@48 {
+ compatible = "dlg,da9053-aa";
+ reg = <0x48>;
+
+ regulators {
+ buck0 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <2075000>;
+ };
+
+ buck1 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <2075000>;
+ };
+
+ buck2 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <2500000>;
+ };
+
+ buck3 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <2500000>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/mfd/tps65910.txt b/Documentation/devicetree/bindings/mfd/tps65910.txt
new file mode 100644
index 000000000000..645f5eaadb3f
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/tps65910.txt
@@ -0,0 +1,133 @@
+TPS65910 Power Management Integrated Circuit
+
+Required properties:
+- compatible: "ti,tps65910" or "ti,tps65911"
+- reg: I2C slave address
+- interrupts: the interrupt outputs of the controller
+- #gpio-cells: number of cells to describe a GPIO, this should be 2.
+ The first cell is the GPIO number.
+ The second cell is used to specify additional options <unused>.
+- gpio-controller: mark the device as a GPIO controller
+- #interrupt-cells: the number of cells to describe an IRQ, this should be 2.
+ The first cell is the IRQ number.
+ The second cell is the flags, encoded as the trigger masks from
+ Documentation/devicetree/bindings/interrupts.txt
+- regulators: This is the list of child nodes that specify the regulator
+ initialization data for defined regulators. Not all regulators for the given
+ device need to be present. The definition for each of these nodes is defined
+ using the standard binding for regulators found at
+ Documentation/devicetree/bindings/regulator/regulator.txt.
+
+ The valid names for regulators are:
+ tps65910: vrtc, vio, vdd1, vdd2, vdd3, vdig1, vdig2, vpll, vdac, vaux1,
+ vaux2, vaux33, vmmc
+ tps65911: vrtc, vio, vdd1, vdd3, vddctrl, ldo1, ldo2, ldo3, ldo4, ldo5,
+ ldo6, ldo7, ldo8
+
+Optional properties:
+- ti,vmbch-threshold: (tps65911) main battery charged threshold
+ comparator. (see VMBCH_VSEL in TPS65910 datasheet)
+- ti,vmbch2-threshold: (tps65911) main battery discharged threshold
+ comparator. (see VMBCH_VSEL in TPS65910 datasheet)
+- ti,en-gpio-sleep: enable sleep control for gpios
+ There should be 9 entries here, one for each gpio.
+
+Regulator Optional properties:
+- ti,regulator-ext-sleep-control: enable external sleep
+ control through external inputs [0 (not enabled), 1 (EN1), 2 (EN2) or 4(EN3)]
+ If this property is not defined, it defaults to 0 (not enabled).
+
+Example:
+
+ pmu: tps65910@d2 {
+ compatible = "ti,tps65910";
+ reg = <0xd2>;
+ interrupt-parent = <&intc>;
+ interrupts = < 0 118 0x04 >;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ ti,vmbch-threshold = 0;
+ ti,vmbch2-threshold = 0;
+
+ ti,en-gpio-sleep = <0 0 1 0 0 0 0 0 0>;
+
+ regulators {
+ vdd1_reg: vdd1 {
+ regulator-min-microvolt = < 600000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ vdd2_reg: vdd2 {
+ regulator-min-microvolt = < 600000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ti,regulator-ext-sleep-control = <4>;
+ };
+ vddctrl_reg: vddctrl {
+ regulator-min-microvolt = < 600000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ vio_reg: vio {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ti,regulator-ext-sleep-control = <1>;
+ };
+ ldo1_reg: ldo1 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3300000>;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ ldo2_reg: ldo2 {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ ldo3_reg: ldo3 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3300000>;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ ldo4_reg: ldo4 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ ldo5_reg: ldo5 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3300000>;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ ldo6_reg: ldo6 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ ldo7_reg: ldo7 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ti,regulator-ext-sleep-control = <1>;
+ };
+ ldo8_reg: ldo8 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ ti,regulator-ext-sleep-control = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/twl6040.txt b/Documentation/devicetree/bindings/mfd/twl6040.txt
new file mode 100644
index 000000000000..bc67c6f424aa
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/twl6040.txt
@@ -0,0 +1,62 @@
+Texas Instruments TWL6040 family
+
+The TWL6040s are 8-channel high quality low-power audio codecs providing audio
+and vibra functionality on OMAP4+ platforms.
+They are connected ot the host processor via i2c for commands, McPDM for audio
+data and commands.
+
+Required properties:
+- compatible : Must be "ti,twl6040";
+- reg: must be 0x4b for i2c address
+- interrupts: twl6040 has one interrupt line connecteded to the main SoC
+- interrupt-parent: The parent interrupt controller
+- twl6040,audpwron-gpio: Power on GPIO line for the twl6040
+
+- vio-supply: Regulator for the twl6040 VIO supply
+- v2v1-supply: Regulator for the twl6040 V2V1 supply
+
+Optional properties, nodes:
+- enable-active-high: To power on the twl6040 during boot.
+
+Vibra functionality
+Required properties:
+- vddvibl-supply: Regulator for the left vibra motor
+- vddvibr-supply: Regulator for the right vibra motor
+- vibra { }: Configuration section for vibra parameters containing the following
+ properties:
+- ti,vibldrv-res: Resistance parameter for left driver
+- ti,vibrdrv-res: Resistance parameter for right driver
+- ti,viblmotor-res: Resistance parameter for left motor
+- ti,viblmotor-res: Resistance parameter for right motor
+
+Optional properties within vibra { } section:
+- vddvibl_uV: If the vddvibl default voltage need to be changed
+- vddvibr_uV: If the vddvibr default voltage need to be changed
+
+Example:
+&i2c1 {
+ twl6040: twl@4b {
+ compatible = "ti,twl6040";
+ reg = <0x4b>;
+
+ interrupts = <0 119 4>;
+ interrupt-parent = <&gic>;
+ twl6040,audpwron-gpio = <&gpio4 31 0>;
+
+ vio-supply = <&v1v8>;
+ v2v1-supply = <&v2v1>;
+ enable-active-high;
+
+ /* regulators for vibra motor */
+ vddvibl-supply = <&vbat>;
+ vddvibr-supply = <&vbat>;
+
+ vibra {
+ /* Vibra driver, motor resistance parameters */
+ ti,vibldrv-res = <8>;
+ ti,vibrdrv-res = <3>;
+ ti,viblmotor-res = <10>;
+ ti,vibrmotor-res = <10>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
index 64bcb8be973c..0d93b4b0e0e3 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
@@ -11,9 +11,11 @@ Required properties:
- interrupt-parent : interrupt source phandle.
- clock-frequency : specifies eSDHC base clock frequency.
- sdhci,wp-inverted : (optional) specifies that eSDHC controller
- reports inverted write-protect state;
+ reports inverted write-protect state; New devices should use
+ the generic "wp-inverted" property.
- sdhci,1-bit-only : (optional) specifies that a controller can
- only handle 1-bit data transfers.
+ only handle 1-bit data transfers. New devices should use the
+ generic "bus-width = <1>" property.
- sdhci,auto-cmd12: (optional) specifies that a controller can
only handle auto CMD12.
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index ab22fe6e73ab..c7e404b3ef05 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -9,7 +9,7 @@ Required properties:
- interrupts : Should contain eSDHC interrupt
Optional properties:
-- fsl,card-wired : Indicate the card is wired to host permanently
+- non-removable : Indicate the card is wired to host permanently
- fsl,cd-internal : Indicate to use controller internal card detection
- fsl,wp-internal : Indicate to use controller internal write protection
- cd-gpios : Specify GPIOs for card detection
diff --git a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
index 89a0084df2f7..d64aea5a4203 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
@@ -10,7 +10,8 @@ Required properties:
Optional properties:
- gpios : may specify GPIOs in this order: Card-Detect GPIO,
- Write-Protect GPIO.
+ Write-Protect GPIO. Note that this does not follow the
+ binding from mmc.txt, for historic reasons.
- interrupts : the interrupt of a card detect interrupt.
- interrupt-parent : the phandle for the interrupt controller that
services interrupts for this device.
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
new file mode 100644
index 000000000000..6e70dcde0a71
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -0,0 +1,27 @@
+These properties are common to multiple MMC host controllers. Any host
+that requires the respective functionality should implement them using
+these definitions.
+
+Required properties:
+- bus-width: Number of data lines, can be <1>, <4>, or <8>
+
+Optional properties:
+- cd-gpios : Specify GPIOs for card detection, see gpio binding
+- wp-gpios : Specify GPIOs for write protection, see gpio binding
+- cd-inverted: when present, polarity on the wp gpio line is inverted
+- wp-inverted: when present, polarity on the wp gpio line is inverted
+- non-removable: non-removable slot (like eMMC)
+- max-frequency: maximum operating clock frequency
+
+Example:
+
+sdhci@ab000000 {
+ compatible = "sdhci";
+ reg = <0xab000000 0x200>;
+ interrupts = <23>;
+ bus-width = <4>;
+ cd-gpios = <&gpio 69 0>;
+ cd-inverted;
+ wp-gpios = <&gpio 70 0>;
+ max-frequency = <50000000>;
+}
diff --git a/Documentation/devicetree/bindings/mmc/mmci.txt b/Documentation/devicetree/bindings/mmc/mmci.txt
new file mode 100644
index 000000000000..14a81d526118
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/mmci.txt
@@ -0,0 +1,19 @@
+* ARM PrimeCell MultiMedia Card Interface (MMCI) PL180/1
+
+The ARM PrimeCell MMCI PL180 and PL181 provides and interface for
+reading and writing to MultiMedia and SD cards alike.
+
+Required properties:
+- compatible : contains "arm,pl18x", "arm,primecell".
+- reg : contains pl18x registers and length.
+- interrupts : contains the device IRQ(s).
+- arm,primecell-periphid : contains the PrimeCell Peripheral ID.
+
+Optional properties:
+- wp-gpios : contains any write protect (ro) gpios
+- cd-gpios : contains any card detection gpios
+- cd-inverted : indicates whether the cd gpio is inverted
+- max-frequency : contains the maximum operating frequency
+- bus-width : number of data lines, can be <1>, <4>, or <8>
+- mmc-cap-mmc-highspeed : indicates whether MMC is high speed capable
+- mmc-cap-sd-highspeed : indicates whether SD is high speed capable
diff --git a/Documentation/devicetree/bindings/mmc/mxs-mmc.txt b/Documentation/devicetree/bindings/mmc/mxs-mmc.txt
new file mode 100644
index 000000000000..14d870a9e3db
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/mxs-mmc.txt
@@ -0,0 +1,25 @@
+* Freescale MXS MMC controller
+
+The Freescale MXS Synchronous Serial Ports (SSP) can act as a MMC controller
+to support MMC, SD, and SDIO types of memory cards.
+
+Required properties:
+- compatible: Should be "fsl,<chip>-mmc". The supported chips include
+ imx23 and imx28.
+- reg: Should contain registers location and length
+- interrupts: Should contain ERROR and DMA interrupts
+- fsl,ssp-dma-channel: APBH DMA channel for the SSP
+- bus-width: Number of data lines, can be <1>, <4>, or <8>
+
+Optional properties:
+- wp-gpios: Specify GPIOs for write protection
+
+Examples:
+
+ssp0: ssp@80010000 {
+ compatible = "fsl,imx28-mmc";
+ reg = <0x80010000 2000>;
+ interrupts = <96 82>;
+ fsl,ssp-dma-channel = <0>;
+ bus-width = <8>;
+};
diff --git a/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt b/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt
index 7e51154679a6..f77c3031607f 100644
--- a/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt
@@ -7,12 +7,12 @@ Required properties:
- compatible : Should be "nvidia,<chip>-sdhci"
- reg : Should contain SD/MMC registers location and length
- interrupts : Should contain SD/MMC interrupt
+- bus-width : Number of data lines, can be <1>, <4>, or <8>
Optional properties:
- cd-gpios : Specify GPIOs for card detection
- wp-gpios : Specify GPIOs for write protection
- power-gpios : Specify GPIOs for power control
-- support-8bit : Boolean, indicates if 8-bit mode should be used.
Example:
@@ -23,5 +23,5 @@ sdhci@c8000200 {
cd-gpios = <&gpio 69 0>; /* gpio PI5 */
wp-gpios = <&gpio 57 0>; /* gpio PH1 */
power-gpios = <&gpio 155 0>; /* gpio PT3 */
- support-8bit;
+ bus-width = <8>;
};
diff --git a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
index dbd4368ab8cc..8a53958c9a9f 100644
--- a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
+++ b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
@@ -15,7 +15,7 @@ Optional properties:
ti,dual-volt: boolean, supports dual voltage cards
<supply-name>-supply: phandle to the regulator device tree node
"supply-name" examples are "vmmc", "vmmc_aux" etc
-ti,bus-width: Number of data lines, default assumed is 1 if the property is missing.
+bus-width: Number of data lines, default assumed is 1 if the property is missing.
cd-gpios: GPIOs for card detection
wp-gpios: GPIOs for write protection
ti,non-removable: non-removable slot (like eMMC)
@@ -27,7 +27,7 @@ Example:
reg = <0x4809c000 0x400>;
ti,hwmods = "mmc1";
ti,dual-volt;
- ti,bus-width = <4>;
+ bus-width = <4>;
vmmc-supply = <&vmmc>; /* phandle to regulator node */
ti,non-removable;
};
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index de439517dff0..7ab9e1a2d8be 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -14,7 +14,7 @@ Optional properties:
Example:
-fec@83fec000 {
+ethernet@83fec000 {
compatible = "fsl,imx51-fec", "fsl,imx27-fec";
reg = <0x83fec000 0x4000>;
interrupts = <87>;
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
index 3664d37e6799..b4480d5c3aca 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
@@ -4,6 +4,8 @@ Required properties:
- compatible : "st,spear300-pinmux"
: "st,spear310-pinmux"
: "st,spear320-pinmux"
+ : "st,spear1310-pinmux"
+ : "st,spear1340-pinmux"
- reg : Address range of the pinctrl registers
- st,pinmux-mode: Mandatory for SPEAr300 and SPEAr320 and invalid for others.
- Its values for SPEAr300:
@@ -89,6 +91,37 @@ For SPEAr320 machines:
"rmii0_1_grp", "i2c1_8_9_grp", "i2c1_98_99_grp", "i2c2_0_1_grp",
"i2c2_2_3_grp", "i2c2_19_20_grp", "i2c2_75_76_grp", "i2c2_96_97_grp"
+For SPEAr1310 machines:
+ "i2c0_grp", "ssp0_grp", "ssp0_cs0_grp", "ssp0_cs1_2_grp", "i2s0_grp",
+ "i2s1_grp", "clcd_grp", "clcd_high_res_grp", "arm_gpio_grp",
+ "smi_2_chips_grp", "smi_4_chips_grp", "gmii_grp", "rgmii_grp",
+ "smii_0_1_2_grp", "ras_mii_txclk_grp", "nand_8bit_grp",
+ "nand_16bit_grp", "nand_4_chips_grp", "keyboard_6x6_grp",
+ "keyboard_rowcol6_8_grp", "uart0_grp", "uart0_modem_grp",
+ "gpt0_tmr0_grp", "gpt0_tmr1_grp", "gpt1_tmr0_grp", "gpt1_tmr1_grp",
+ "sdhci_grp", "cf_grp", "xd_grp", "touch_xy_grp",
+ "uart1_disable_i2c_grp", "uart1_disable_sd_grp", "uart2_3_grp",
+ "uart4_grp", "uart5_grp", "rs485_0_1_tdm_0_1_grp", "i2c_1_2_grp",
+ "i2c3_dis_smi_clcd_grp", "i2c3_dis_sd_i2s0_grp", "i2c_4_5_dis_smi_grp",
+ "i2c4_dis_sd_grp", "i2c5_dis_sd_grp", "i2c_6_7_dis_kbd_grp",
+ "i2c6_dis_sd_grp", "i2c7_dis_sd_grp", "can0_dis_nor_grp",
+ "can0_dis_sd_grp", "can1_dis_sd_grp", "can1_dis_kbd_grp", "pcie0_grp",
+ "pcie1_grp", "pcie2_grp", "sata0_grp", "sata1_grp", "sata2_grp",
+ "ssp1_dis_kbd_grp", "ssp1_dis_sd_grp", "gpt64_grp"
+
+For SPEAr1340 machines:
+ "pads_as_gpio_grp", "fsmc_8bit_grp", "fsmc_16bit_grp", "fsmc_pnor_grp",
+ "keyboard_row_col_grp", "keyboard_col5_grp", "spdif_in_grp",
+ "spdif_out_grp", "gpt_0_1_grp", "pwm0_grp", "pwm1_grp", "pwm2_grp",
+ "pwm3_grp", "vip_mux_grp", "vip_mux_cam0_grp", "vip_mux_cam1_grp",
+ "vip_mux_cam2_grp", "vip_mux_cam3_grp", "cam0_grp", "cam1_grp",
+ "cam2_grp", "cam3_grp", "smi_grp", "ssp0_grp", "ssp0_cs1_grp",
+ "ssp0_cs2_grp", "ssp0_cs3_grp", "uart0_grp", "uart0_enh_grp",
+ "uart1_grp", "i2s_in_grp", "i2s_out_grp", "gmii_grp", "rgmii_grp",
+ "rmii_grp", "sgmii_grp", "i2c0_grp", "i2c1_grp", "cec0_grp", "cec1_grp",
+ "sdhci_grp", "cf_grp", "xd_grp", "clcd_grp", "arm_trace_grp",
+ "miphy_dbg_grp", "pcie_grp", "sata_grp"
+
Valid values for function names are:
For All SPEAr3xx machines:
"firda", "i2c0", "ssp_cs", "ssp0", "mii0", "gpio0", "uart0_ext",
@@ -106,3 +139,17 @@ For SPEAr320 machines:
"uart2", "uart3", "uart4", "uart5", "uart6", "rs485", "touchscreen",
"can0", "can1", "pwm0_1", "pwm2", "pwm3", "ssp1", "ssp2", "mii2",
"mii0_1", "i2c1", "i2c2"
+
+
+For SPEAr1310 machines:
+ "i2c0", "ssp0", "i2s0", "i2s1", "clcd", "arm_gpio", "smi", "gmii",
+ "rgmii", "smii_0_1_2", "ras_mii_txclk", "nand", "keyboard", "uart0",
+ "gpt0", "gpt1", "sdhci", "cf", "xd", "touchscreen", "uart1", "uart2_3",
+ "uart4", "uart5", "rs485_0_1_tdm_0_1", "i2c_1_2", "i2c3_i2s1",
+ "i2c_4_5", "i2c_6_7", "can0", "can1", "pci", "sata", "ssp1", "gpt64"
+
+For SPEAr1340 machines:
+ "pads_as_gpio", "fsmc", "keyboard", "spdif_in", "spdif_out", "gpt_0_1",
+ "pwm", "vip", "cam0", "cam1", "cam2", "cam3", "smi", "ssp0", "uart0",
+ "uart1", "i2s", "gmac", "i2c0", "i2c1", "cec0", "cec1", "sdhci", "cf",
+ "xd", "clcd", "arm_trace", "miphy_dbg", "pcie", "sata"
diff --git a/Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt b/Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt
new file mode 100644
index 000000000000..a87a1e9bc060
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt
@@ -0,0 +1,15 @@
+* NXP LPC32xx SoC Real Time Clock controller
+
+Required properties:
+- compatible: must be "nxp,lpc3220-rtc"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: The RTC interrupt
+
+Example:
+
+ rtc@40024000 {
+ compatible = "nxp,lpc3220-rtc";
+ reg = <0x40024000 0x1000>;
+ interrupts = <52 0>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/spear-rtc.txt b/Documentation/devicetree/bindings/rtc/spear-rtc.txt
new file mode 100644
index 000000000000..ca67ac62108e
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/spear-rtc.txt
@@ -0,0 +1,17 @@
+* SPEAr RTC
+
+Required properties:
+- compatible : "st,spear600-rtc"
+- reg : Address range of the rtc registers
+- interrupt-parent: Should be the phandle for the interrupt controller
+ that services interrupts for this device
+- interrupt: Should contain the rtc interrupt number
+
+Example:
+
+ rtc@fc000000 {
+ compatible = "st,spear600-rtc";
+ reg = <0xfc000000 0x1000>;
+ interrupt-parent = <&vic1>;
+ interrupts = <12>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/omap-dmic.txt b/Documentation/devicetree/bindings/sound/omap-dmic.txt
new file mode 100644
index 000000000000..fd8105f18978
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/omap-dmic.txt
@@ -0,0 +1,21 @@
+* Texas Instruments OMAP4+ Digital Microphone Module
+
+Required properties:
+- compatible: "ti,omap4-dmic"
+- reg: Register location and size as an array:
+ <MPU access base address, size>,
+ <L3 interconnect address, size>;
+- interrupts: Interrupt number for DMIC
+- interrupt-parent: The parent interrupt controller
+- ti,hwmods: Name of the hwmod associated with OMAP dmic IP
+
+Example:
+
+dmic: dmic@4012e000 {
+ compatible = "ti,omap4-dmic";
+ reg = <0x4012e000 0x7f>, /* MPU private access */
+ <0x4902e000 0x7f>; /* L3 Interconnect */
+ interrupts = <0 114 0x4>;
+ interrupt-parent = <&gic>;
+ ti,hwmods = "dmic";
+};
diff --git a/Documentation/devicetree/bindings/sound/omap-mcpdm.txt b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
new file mode 100644
index 000000000000..0741dff048dd
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
@@ -0,0 +1,21 @@
+* Texas Instruments OMAP4+ McPDM
+
+Required properties:
+- compatible: "ti,omap4-mcpdm"
+- reg: Register location and size as an array:
+ <MPU access base address, size>,
+ <L3 interconnect address, size>;
+- interrupts: Interrupt number for McPDM
+- interrupt-parent: The parent interrupt controller
+- ti,hwmods: Name of the hwmod associated to the McPDM
+
+Example:
+
+mcpdm: mcpdm@40132000 {
+ compatible = "ti,omap4-mcpdm";
+ reg = <0x40132000 0x7f>, /* MPU private access */
+ <0x49032000 0x7f>; /* L3 Interconnect */
+ interrupts = <0 112 0x4>;
+ interrupt-parent = <&gic>;
+ ti,hwmods = "mcpdm";
+};
diff --git a/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt
index a9c0406280e8..b462d0c54823 100644
--- a/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt
+++ b/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt
@@ -11,7 +11,7 @@ Optional properties:
Example:
-uart@73fbc000 {
+serial@73fbc000 {
compatible = "fsl,imx51-uart", "fsl,imx21-uart";
reg = <0x73fbc000 0x4000>;
interrupts = <31>;
diff --git a/Documentation/devicetree/bindings/usb/tegra-usb.txt b/Documentation/devicetree/bindings/usb/tegra-usb.txt
index 007005ddbe12..e9b005dc7625 100644
--- a/Documentation/devicetree/bindings/usb/tegra-usb.txt
+++ b/Documentation/devicetree/bindings/usb/tegra-usb.txt
@@ -12,6 +12,9 @@ Required properties :
- nvidia,vbus-gpio : If present, specifies a gpio that needs to be
activated for the bus to be powered.
+Required properties for phy_type == ulpi:
+ - nvidia,phy-reset-gpio : The GPIO used to reset the PHY.
+
Optional properties:
- dr_mode : dual role mode. Indicates the working mode for
nvidia,tegra20-ehci compatible controllers. Can be "host", "peripheral",
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 107d8addf0e4..6eab91747a86 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -14,6 +14,7 @@ chrp Common Hardware Reference Platform
cortina Cortina Systems, Inc.
dallas Maxim Integrated Products (formerly Dallas Semiconductor)
denx Denx Software Engineering
+emmicro EM Microelectronic
epson Seiko Epson Corp.
est ESTeem Wireless Modems
fsl Freescale Semiconductor
diff --git a/Documentation/devicetree/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt
index da0bfeb4253d..d4d66757354e 100644
--- a/Documentation/devicetree/booting-without-of.txt
+++ b/Documentation/devicetree/booting-without-of.txt
@@ -551,12 +551,13 @@ Here is an example of a simple device-tree. In this example, an "o"
designates a node followed by the node unit name. Properties are
presented with their name followed by their content. "content"
represents an ASCII string (zero terminated) value, while <content>
-represents a 32-bit hexadecimal value. The various nodes in this
-example will be discussed in a later chapter. At this point, it is
-only meant to give you a idea of what a device-tree looks like. I have
-purposefully kept the "name" and "linux,phandle" properties which
-aren't necessary in order to give you a better idea of what the tree
-looks like in practice.
+represents a 32-bit value, specified in decimal or hexadecimal (the
+latter prefixed 0x). The various nodes in this example will be
+discussed in a later chapter. At this point, it is only meant to give
+you a idea of what a device-tree looks like. I have purposefully kept
+the "name" and "linux,phandle" properties which aren't necessary in
+order to give you a better idea of what the tree looks like in
+practice.
/ o device-tree
|- name = "device-tree"
@@ -576,14 +577,14 @@ looks like in practice.
| |- name = "PowerPC,970"
| |- device_type = "cpu"
| |- reg = <0>
- | |- clock-frequency = <5f5e1000>
+ | |- clock-frequency = <0x5f5e1000>
| |- 64-bit
| |- linux,phandle = <2>
|
o memory@0
| |- name = "memory"
| |- device_type = "memory"
- | |- reg = <00000000 00000000 00000000 20000000>
+ | |- reg = <0x00000000 0x00000000 0x00000000 0x20000000>
| |- linux,phandle = <3>
|
o chosen
@@ -1010,8 +1011,8 @@ compatibility.
#size-cells = <1>;
#interrupt-cells = <2>;
device_type = "soc";
- ranges = <00000000 e0000000 00100000>
- reg = <e0000000 00003000>;
+ ranges = <0x00000000 0xe0000000 0x00100000>
+ reg = <0xe0000000 0x00003000>;
bus-frequency = <0>;
}
@@ -1085,16 +1086,16 @@ supported currently at the toplevel.
* terminated string
*/
- property2 = <1234abcd>; /* define a property containing a
+ property2 = <0x1234abcd>; /* define a property containing a
* numerical 32-bit value (hexadecimal)
*/
- property3 = <12345678 12345678 deadbeef>;
+ property3 = <0x12345678 0x12345678 0xdeadbeef>;
/* define a property containing 3
* numerical 32-bit values (cells) in
* hexadecimal
*/
- property4 = [0a 0b 0c 0d de ea ad be ef];
+ property4 = [0x0a 0x0b 0x0c 0x0d 0xde 0xea 0xad 0xbe 0xef];
/* define a property whose content is
* an arbitrary array of bytes
*/
@@ -1350,10 +1351,10 @@ Appendix A - Sample SOC node for MPC8540
model = "TSEC";
compatible = "gianfar", "simple-bus";
reg = <0x24000 0x1000>;
- local-mac-address = [ 00 E0 0C 00 73 00 ];
- interrupts = <29 2 30 2 34 2>;
+ local-mac-address = [ 0x00 0xE0 0x0C 0x00 0x73 0x00 ];
+ interrupts = <0x29 2 0x30 2 0x34 2>;
phy-handle = <&phy0>;
- sleep = <&pmc 00000080>;
+ sleep = <&pmc 0x00000080>;
ranges;
mdio@24520 {
@@ -1385,10 +1386,10 @@ Appendix A - Sample SOC node for MPC8540
model = "TSEC";
compatible = "gianfar";
reg = <0x25000 0x1000>;
- local-mac-address = [ 00 E0 0C 00 73 01 ];
- interrupts = <13 2 14 2 18 2>;
+ local-mac-address = [ 0x00 0xE0 0x0C 0x00 0x73 0x01 ];
+ interrupts = <0x13 2 0x14 2 0x18 2>;
phy-handle = <&phy1>;
- sleep = <&pmc 00000040>;
+ sleep = <&pmc 0x00000040>;
};
ethernet@26000 {
@@ -1396,17 +1397,17 @@ Appendix A - Sample SOC node for MPC8540
model = "FEC";
compatible = "gianfar";
reg = <0x26000 0x1000>;
- local-mac-address = [ 00 E0 0C 00 73 02 ];
- interrupts = <41 2>;
+ local-mac-address = [ 0x00 0xE0 0x0C 0x00 0x73 0x02 ];
+ interrupts = <0x41 2>;
phy-handle = <&phy3>;
- sleep = <&pmc 00000020>;
+ sleep = <&pmc 0x00000020>;
};
serial@4500 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,mpc8540-duart", "simple-bus";
- sleep = <&pmc 00000002>;
+ sleep = <&pmc 0x00000002>;
ranges;
serial@4500 {
@@ -1414,7 +1415,7 @@ Appendix A - Sample SOC node for MPC8540
compatible = "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
- interrupts = <42 2>;
+ interrupts = <0x42 2>;
};
serial@4600 {
@@ -1422,7 +1423,7 @@ Appendix A - Sample SOC node for MPC8540
compatible = "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
- interrupts = <42 2>;
+ interrupts = <0x42 2>;
};
};
@@ -1436,11 +1437,11 @@ Appendix A - Sample SOC node for MPC8540
};
i2c@3000 {
- interrupts = <43 2>;
+ interrupts = <0x43 2>;
reg = <0x3000 0x100>;
compatible = "fsl-i2c";
dfsrr;
- sleep = <&pmc 00000004>;
+ sleep = <&pmc 0x00000004>;
};
pmc: power@e0070 {
diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt
index 3bbd5c51605a..ad86fb86c9a0 100644
--- a/Documentation/dma-buf-sharing.txt
+++ b/Documentation/dma-buf-sharing.txt
@@ -29,13 +29,6 @@ The buffer-user
in memory, mapped into its own address space, so it can access the same area
of memory.
-*IMPORTANT*: [see https://lkml.org/lkml/2011/12/20/211 for more details]
-For this first version, A buffer shared using the dma_buf sharing API:
-- *may* be exported to user space using "mmap" *ONLY* by exporter, outside of
- this framework.
-- with this new iteration of the dma-buf api cpu access from the kernel has been
- enable, see below for the details.
-
dma-buf operations for device dma only
--------------------------------------
@@ -300,6 +293,17 @@ Access to a dma_buf from the kernel context involves three steps:
Note that these calls need to always succeed. The exporter needs to complete
any preparations that might fail in begin_cpu_access.
+ For some cases the overhead of kmap can be too high, a vmap interface
+ is introduced. This interface should be used very carefully, as vmalloc
+ space is a limited resources on many architectures.
+
+ Interfaces:
+ void *dma_buf_vmap(struct dma_buf *dmabuf)
+ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+
+ The vmap call can fail if there is no vmap support in the exporter, or if it
+ runs out of vmalloc space. Fallback to kmap should be implemented.
+
3. Finish access
When the importer is done accessing the range specified in begin_cpu_access,
@@ -313,6 +317,83 @@ Access to a dma_buf from the kernel context involves three steps:
enum dma_data_direction dir);
+Direct Userspace Access/mmap Support
+------------------------------------
+
+Being able to mmap an export dma-buf buffer object has 2 main use-cases:
+- CPU fallback processing in a pipeline and
+- supporting existing mmap interfaces in importers.
+
+1. CPU fallback processing in a pipeline
+
+ In many processing pipelines it is sometimes required that the cpu can access
+ the data in a dma-buf (e.g. for thumbnail creation, snapshots, ...). To avoid
+ the need to handle this specially in userspace frameworks for buffer sharing
+ it's ideal if the dma_buf fd itself can be used to access the backing storage
+ from userspace using mmap.
+
+ Furthermore Android's ION framework already supports this (and is otherwise
+ rather similar to dma-buf from a userspace consumer side with using fds as
+ handles, too). So it's beneficial to support this in a similar fashion on
+ dma-buf to have a good transition path for existing Android userspace.
+
+ No special interfaces, userspace simply calls mmap on the dma-buf fd.
+
+2. Supporting existing mmap interfaces in exporters
+
+ Similar to the motivation for kernel cpu access it is again important that
+ the userspace code of a given importing subsystem can use the same interfaces
+ with a imported dma-buf buffer object as with a native buffer object. This is
+ especially important for drm where the userspace part of contemporary OpenGL,
+ X, and other drivers is huge, and reworking them to use a different way to
+ mmap a buffer rather invasive.
+
+ The assumption in the current dma-buf interfaces is that redirecting the
+ initial mmap is all that's needed. A survey of some of the existing
+ subsystems shows that no driver seems to do any nefarious thing like syncing
+ up with outstanding asynchronous processing on the device or allocating
+ special resources at fault time. So hopefully this is good enough, since
+ adding interfaces to intercept pagefaults and allow pte shootdowns would
+ increase the complexity quite a bit.
+
+ Interface:
+ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
+ unsigned long);
+
+ If the importing subsystem simply provides a special-purpose mmap call to set
+ up a mapping in userspace, calling do_mmap with dma_buf->file will equally
+ achieve that for a dma-buf object.
+
+3. Implementation notes for exporters
+
+ Because dma-buf buffers have invariant size over their lifetime, the dma-buf
+ core checks whether a vma is too large and rejects such mappings. The
+ exporter hence does not need to duplicate this check.
+
+ Because existing importing subsystems might presume coherent mappings for
+ userspace, the exporter needs to set up a coherent mapping. If that's not
+ possible, it needs to fake coherency by manually shooting down ptes when
+ leaving the cpu domain and flushing caches at fault time. Note that all the
+ dma_buf files share the same anon inode, hence the exporter needs to replace
+ the dma_buf file stored in vma->vm_file with it's own if pte shootdown is
+ requred. This is because the kernel uses the underlying inode's address_space
+ for vma tracking (and hence pte tracking at shootdown time with
+ unmap_mapping_range).
+
+ If the above shootdown dance turns out to be too expensive in certain
+ scenarios, we can extend dma-buf with a more explicit cache tracking scheme
+ for userspace mappings. But the current assumption is that using mmap is
+ always a slower path, so some inefficiencies should be acceptable.
+
+ Exporters that shoot down mappings (for any reasons) shall not do any
+ synchronization at fault time with outstanding device operations.
+ Synchronization is an orthogonal issue to sharing the backing storage of a
+ buffer and hence should not be handled by dma-buf itself. This is explictly
+ mentioned here because many people seem to want something like this, but if
+ different exporters handle this differently, buffer sharing can fail in
+ interesting ways depending upong the exporter (if userspace starts depending
+ upon this implicit synchronization).
+
Miscellaneous notes
-------------------
@@ -336,6 +417,20 @@ Miscellaneous notes
the exporting driver to create a dmabuf fd must provide a way to let
userspace control setting of O_CLOEXEC flag passed in to dma_buf_fd().
+- If an exporter needs to manually flush caches and hence needs to fake
+ coherency for mmap support, it needs to be able to zap all the ptes pointing
+ at the backing storage. Now linux mm needs a struct address_space associated
+ with the struct file stored in vma->vm_file to do that with the function
+ unmap_mapping_range. But the dma_buf framework only backs every dma_buf fd
+ with the anon_file struct file, i.e. all dma_bufs share the same file.
+
+ Hence exporters need to setup their own file (and address_space) association
+ by setting vma->vm_file and adjusting vma->vm_pgoff in the dma_buf mmap
+ callback. In the specific case of a gem driver the exporter could use the
+ shmem file already provided by gem (and set vm_pgoff = 0). Exporters can then
+ zap ptes by unmapping the corresponding range of the struct address_space
+ associated with their own file.
+
References:
[1] struct dma_buf_ops in include/linux/dma-buf.h
[2] All interfaces mentioned above defined in include/linux/dma-buf.h
diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware
index d1d4a179a382..fbb241174486 100755
--- a/Documentation/dvb/get_dvb_firmware
+++ b/Documentation/dvb/get_dvb_firmware
@@ -28,7 +28,8 @@ use IO::Handle;
"opera1", "cx231xx", "cx18", "cx23885", "pvrusb2", "mpc718",
"af9015", "ngene", "az6027", "lme2510_lg", "lme2510c_s7395",
"lme2510c_s7395_old", "drxk", "drxk_terratec_h5",
- "drxk_hauppauge_hvr930c", "tda10071", "it9135", "it9137");
+ "drxk_hauppauge_hvr930c", "tda10071", "it9135", "it9137",
+ "drxk_pctv");
# Check args
syntax() if (scalar(@ARGV) != 1);
@@ -730,6 +731,23 @@ sub tda10071 {
"$fwfile";
}
+sub drxk_pctv {
+ my $sourcefile = "PCTV_460e_reference.zip";
+ my $url = "ftp://ftp.pctvsystems.com/TV/driver/PCTV%2070e%2080e%20100e%20320e%20330e%20800e/";
+ my $hash = "4403de903bf2593464c8d74bbc200a57";
+ my $fwfile = "dvb-demod-drxk-pctv.fw";
+ my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1);
+
+ checkstandard();
+
+ wgetfile($sourcefile, $url . $sourcefile);
+ verify($sourcefile, $hash);
+ unzip($sourcefile, $tmpdir);
+ extract("$tmpdir/PCTV\ 70e\ 80e\ 100e\ 320e\ 330e\ 800e/32\ bit/emOEM.sys", 0x72b80, 42692, $fwfile);
+
+ "$fwfile";
+}
+
# ---------------------------------------------------------------
# Utilities
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 1e69a81e99d4..56000b33340b 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -541,6 +541,18 @@ Who: Kees Cook <keescook@chromium.org>
----------------------------
+What: Removing the pn544 raw driver.
+When: 3.6
+Why: With the introduction of the NFC HCI and SHDL kernel layers, pn544.c
+ is being replaced by pn544_hci.c which is accessible through the netlink
+ and socket NFC APIs. Moreover, pn544.c is outdated and does not seem to
+ work properly with the latest Android stacks.
+ Having 2 drivers for the same hardware is confusing and as such we
+ should only keep the one following the kernel NFC APIs.
+Who: Samuel Ortiz <sameo@linux.intel.com>
+
+----------------------------
+
What: setitimer accepts user NULL pointer (value)
When: 3.6
Why: setitimer is not returning -EFAULT if user pointer is NULL. This
@@ -549,6 +561,15 @@ Who: Sasikantha Babu <sasikanth.v19@gmail.com>
----------------------------
+What: remove bogus DV presets V4L2_DV_1080I29_97, V4L2_DV_1080I30 and
+ V4L2_DV_1080I25
+When: 3.6
+Why: These HDTV formats do not exist and were added by a confused mind
+ (that was me, to be precise...)
+Who: Hans Verkuil <hans.verkuil@cisco.com>
+
+----------------------------
+
What: V4L2_CID_HCENTER, V4L2_CID_VCENTER V4L2 controls
When: 3.7
Why: The V4L2_CID_VCENTER, V4L2_CID_HCENTER controls have been deprecated
@@ -567,3 +588,27 @@ Why: Remount currently allows changing bound subsystems and
replaced with conventional fsnotify.
----------------------------
+
+What: KVM debugfs statistics
+When: 2013
+Why: KVM tracepoints provide mostly equivalent information in a much more
+ flexible fashion.
+
+----------------------------
+
+What: at91-mci driver ("CONFIG_MMC_AT91")
+When: 3.7
+Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support
+ was added to atmel-mci as a first step to support more chips.
+ Then at91-mci was kept only for old IP versions (on at91rm9200 and
+ at91sam9261). The support of these IP versions has just been added
+ to atmel-mci, so atmel-mci can be used for all chips.
+Who: Ludovic Desroches <ludovic.desroches@atmel.com>
+
+----------------------------
+
+What: net/wanrouter/
+When: June 2013
+Why: Unsupported/unmaintained/unused since 2.6
+
+----------------------------
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 4fca82e5276e..d449e632e6a0 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -60,7 +60,6 @@ ata *);
ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
- void (*truncate_range)(struct inode *, loff_t, loff_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
locking rules:
@@ -87,7 +86,6 @@ setxattr: yes
getxattr: no
listxattr: no
removexattr: yes
-truncate_range: yes
fiemap: no
Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
victim.
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index b100adc38adb..293855e95000 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -59,9 +59,9 @@ commit=nrsec (*) Ext3 can be told to sync all its data and metadata
Setting it to very large values will improve
performance.
-barrier=<0(*)|1> This enables/disables the use of write barriers in
-barrier the jbd code. barrier=0 disables, barrier=1 enables.
-nobarrier (*) This also requires an IO stack which can support
+barrier=<0|1(*)> This enables/disables the use of write barriers in
+barrier (*) the jbd code. barrier=0 disables, barrier=1 enables.
+nobarrier This also requires an IO stack which can support
barriers, and if jbd gets an error on a barrier
write, it will disable again with a warning.
Write barriers enforce proper on-disk ordering
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 74acd9618819..8c91d1057d9a 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -297,7 +297,8 @@ in the beginning of ->setattr unconditionally.
be used instead. It gets called whenever the inode is evicted, whether it has
remaining links or not. Caller does *not* evict the pagecache or inode-associated
metadata buffers; getting rid of those is responsibility of method, as it had
-been for ->delete_inode().
+been for ->delete_inode(). Caller makes sure async writeback cannot be running
+for the inode while (or after) ->evict_inode() is called.
->drop_inode() returns int now; it's called on final iput() with
inode->i_lock held and it returns true if filesystems wants the inode to be
@@ -306,14 +307,11 @@ updated appropriately. generic_delete_inode() is also alive and it consists
simply of return 1. Note that all actual eviction work is done by caller after
->drop_inode() returns.
- clear_inode() is gone; use end_writeback() instead. As before, it must
-be called exactly once on each call of ->evict_inode() (as it used to be for
-each call of ->delete_inode()). Unlike before, if you are using inode-associated
-metadata buffers (i.e. mark_buffer_dirty_inode()), it's your responsibility to
-call invalidate_inode_buffers() before end_writeback().
- No async writeback (and thus no calls of ->write_inode()) will happen
-after end_writeback() returns, so actions that should not overlap with ->write_inode()
-(e.g. freeing on-disk inode if i_nlink is 0) ought to be done after that call.
+ As before, clear_inode() must be called exactly once on each call of
+->evict_inode() (as it used to be for each call of ->delete_inode()). Unlike
+before, if you are using inode-associated metadata buffers (i.e.
+mark_buffer_dirty_inode()), it's your responsibility to call
+invalidate_inode_buffers() before clear_inode().
NOTE: checking i_nlink in the beginning of ->write_inode() and bailing out
if it's zero is not *and* *never* *had* *been* enough. Final unlink() and iput()
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index ef088e55ab2e..fb0a6aeb936c 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -40,6 +40,7 @@ Table of Contents
3.4 /proc/<pid>/coredump_filter - Core dump filtering settings
3.5 /proc/<pid>/mountinfo - Information about mounts
3.6 /proc/<pid>/comm & /proc/<pid>/task/<tid>/comm
+ 3.7 /proc/<pid>/task/<tid>/children - Information about task children
4 Configuring procfs
4.1 Mount options
@@ -310,6 +311,11 @@ Table 1-4: Contents of the stat files (as of 2.6.30-rc7)
start_data address above which program data+bss is placed
end_data address below which program data+bss is placed
start_brk address above which program heap can be expanded with brk()
+ arg_start address above which program command line is placed
+ arg_end address below which program command line is placed
+ env_start address above which program environment is placed
+ env_end address below which program environment is placed
+ exit_code the thread's exit_code in the form reported by the waitpid system call
..............................................................................
The /proc/PID/maps file containing the currently mapped memory regions and
@@ -743,6 +749,7 @@ Committed_AS: 100056 kB
VmallocTotal: 112216 kB
VmallocUsed: 428 kB
VmallocChunk: 111088 kB
+AnonHugePages: 49152 kB
MemTotal: Total usable ram (i.e. physical ram minus a few reserved
bits and the kernel binary code)
@@ -776,6 +783,7 @@ VmallocChunk: 111088 kB
Dirty: Memory which is waiting to get written back to the disk
Writeback: Memory which is actively being written back to the disk
AnonPages: Non-file backed pages mapped into userspace page tables
+AnonHugePages: Non-file backed huge pages mapped into userspace page tables
Mapped: files which have been mmaped, such as libraries
Slab: in-kernel data structures cache
SReclaimable: Part of Slab, that might be reclaimed, such as caches
@@ -1576,6 +1584,23 @@ then the kernel's TASK_COMM_LEN (currently 16 chars) will result in a truncated
comm value.
+3.7 /proc/<pid>/task/<tid>/children - Information about task children
+-------------------------------------------------------------------------
+This file provides a fast way to retrieve first level children pids
+of a task pointed by <pid>/<tid> pair. The format is a space separated
+stream of pids.
+
+Note the "first level" here -- if a child has own children they will
+not be listed here, one needs to read /proc/<children-pid>/task/<tid>/children
+to obtain the descendants.
+
+Since this interface is intended to be fast and cheap it doesn't
+guarantee to provide precise results and some children might be
+skipped, especially if they've exited right after we printed their
+pids, so one need to either stop or freeze processes being inspected
+if precise results are needed.
+
+
------------------------------------------------------------------------------
Configuring procfs
------------------------------------------------------------------------------
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 0d0492028082..ef19f91a0f12 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -363,7 +363,6 @@ struct inode_operations {
ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
- void (*truncate_range)(struct inode *, loff_t, loff_t);
};
Again, all methods are called without any locks being held, unless
@@ -472,9 +471,6 @@ otherwise noted.
removexattr: called by the VFS to remove an extended attribute from
a file. This method is called by removexattr(2) system call.
- truncate_range: a method provided by the underlying filesystem to truncate a
- range of blocks , i.e. punch a hole somewhere in a file.
-
The Address Space Object
========================
@@ -760,7 +756,7 @@ struct file_operations
----------------------
This describes how the VFS can manipulate an open file. As of kernel
-2.6.22, the following members are defined:
+3.5, the following members are defined:
struct file_operations {
struct module *owner;
@@ -790,6 +786,8 @@ struct file_operations {
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, struct pipe_inode_info *, size_t, unsigned int);
+ int (*setlease)(struct file *, long arg, struct file_lock **);
+ long (*fallocate)(struct file *, int mode, loff_t offset, loff_t len);
};
Again, all methods are called without any locks being held, unless
@@ -858,6 +856,11 @@ otherwise noted.
splice_read: called by the VFS to splice data from file to a pipe. This
method is used by the splice(2) system call
+ setlease: called by the VFS to set or release a file lock lease.
+ setlease has the file_lock_lock held and must not sleep.
+
+ fallocate: called by the VFS to preallocate blocks or punch a hole.
+
Note that the file operations are implemented by the specific
filesystem in which the inode resides. When opening a device node
(character or block special) most filesystems will call special
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index 620a07844e8c..e08a883de36e 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -322,6 +322,9 @@ where 'flags' is currently defined to specify the following properties:
* GPIOF_OPEN_DRAIN - gpio pin is open drain type.
* GPIOF_OPEN_SOURCE - gpio pin is open source type.
+ * GPIOF_EXPORT_DIR_FIXED - export gpio to sysfs, keep direction
+ * GPIOF_EXPORT_DIR_CHANGEABLE - also export, allow changing direction
+
since GPIOF_INIT_* are only valid when configured as output, so group valid
combinations as:
diff --git a/Documentation/i2c/functionality b/Documentation/i2c/functionality
index 42c17c1fb3cd..b0ff2ab596ce 100644
--- a/Documentation/i2c/functionality
+++ b/Documentation/i2c/functionality
@@ -18,9 +18,9 @@ For the most up-to-date list of functionality constants, please check
adapters typically can not do these)
I2C_FUNC_10BIT_ADDR Handles the 10-bit address extensions
I2C_FUNC_PROTOCOL_MANGLING Knows about the I2C_M_IGNORE_NAK,
- I2C_M_REV_DIR_ADDR, I2C_M_NOSTART and
- I2C_M_NO_RD_ACK flags (which modify the
- I2C protocol!)
+ I2C_M_REV_DIR_ADDR and I2C_M_NO_RD_ACK
+ flags (which modify the I2C protocol!)
+ I2C_FUNC_NOSTART Can skip repeated start sequence
I2C_FUNC_SMBUS_QUICK Handles the SMBus write_quick command
I2C_FUNC_SMBUS_READ_BYTE Handles the SMBus read_byte command
I2C_FUNC_SMBUS_WRITE_BYTE Handles the SMBus write_byte command
@@ -50,6 +50,9 @@ A few combinations of the above flags are also defined for your convenience:
emulated by a real I2C adapter (using
the transparent emulation layer)
+In kernel versions prior to 3.5 I2C_FUNC_NOSTART was implemented as
+part of I2C_FUNC_PROTOCOL_MANGLING.
+
ADAPTER IMPLEMENTATION
----------------------
diff --git a/Documentation/i2c/i2c-protocol b/Documentation/i2c/i2c-protocol
index 10518dd58814..0b3e62d1f77a 100644
--- a/Documentation/i2c/i2c-protocol
+++ b/Documentation/i2c/i2c-protocol
@@ -49,7 +49,9 @@ a byte read, followed by a byte write:
Modified transactions
=====================
-We have found some I2C devices that needs the following modifications:
+The following modifications to the I2C protocol can also be generated,
+with the exception of I2C_M_NOSTART these are usually only needed to
+work around device issues:
Flag I2C_M_NOSTART:
In a combined transaction, no 'S Addr Wr/Rd [A]' is generated at some
@@ -60,6 +62,11 @@ We have found some I2C devices that needs the following modifications:
we do not generate Addr, but we do generate the startbit S. This will
probably confuse all other clients on your bus, so don't try this.
+ This is often used to gather transmits from multiple data buffers in
+ system memory into something that appears as a single transfer to the
+ I2C device but may also be used between direction changes by some
+ rare devices.
+
Flags I2C_M_REV_DIR_ADDR
This toggles the Rd/Wr flag. That is, if you want to do a write, but
need to emit an Rd instead of a Wr, or vice versa, you set this
diff --git a/Documentation/i2c/muxes/gpio-i2cmux b/Documentation/i2c/muxes/i2c-mux-gpio
index 811cd78d4cdc..bd9b2299b739 100644
--- a/Documentation/i2c/muxes/gpio-i2cmux
+++ b/Documentation/i2c/muxes/i2c-mux-gpio
@@ -1,11 +1,11 @@
-Kernel driver gpio-i2cmux
+Kernel driver i2c-gpio-mux
Author: Peter Korsgaard <peter.korsgaard@barco.com>
Description
-----------
-gpio-i2cmux is an i2c mux driver providing access to I2C bus segments
+i2c-gpio-mux is an i2c mux driver providing access to I2C bus segments
from a master I2C bus and a hardware MUX controlled through GPIO pins.
E.G.:
@@ -26,16 +26,16 @@ according to the settings of the GPIO pins 1..N.
Usage
-----
-gpio-i2cmux uses the platform bus, so you need to provide a struct
+i2c-gpio-mux uses the platform bus, so you need to provide a struct
platform_device with the platform_data pointing to a struct
gpio_i2cmux_platform_data with the I2C adapter number of the master
bus, the number of bus segments to create and the GPIO pins used
-to control it. See include/linux/gpio-i2cmux.h for details.
+to control it. See include/linux/i2c-gpio-mux.h for details.
E.G. something like this for a MUX providing 4 bus segments
controlled through 3 GPIO pins:
-#include <linux/gpio-i2cmux.h>
+#include <linux/i2c-gpio-mux.h>
#include <linux/platform_device.h>
static const unsigned myboard_gpiomux_gpios[] = {
@@ -57,7 +57,7 @@ static struct gpio_i2cmux_platform_data myboard_i2cmux_data = {
};
static struct platform_device myboard_i2cmux = {
- .name = "gpio-i2cmux",
+ .name = "i2c-gpio-mux",
.id = 0,
.dev = {
.platform_data = &myboard_i2cmux_data,
diff --git a/Documentation/initrd.txt b/Documentation/initrd.txt
index 1ba84f3584e3..4e1839ccb555 100644
--- a/Documentation/initrd.txt
+++ b/Documentation/initrd.txt
@@ -362,5 +362,5 @@ Resources
http://www.almesberger.net/cv/papers/ols2k-9.ps.gz
[2] newlib package (experimental), with initrd example
http://sources.redhat.com/newlib/
-[3] Brouwer, Andries; "util-linux: Miscellaneous utilities for Linux"
- ftp://ftp.win.tue.nl/pub/linux-local/utils/util-linux/
+[3] util-linux: Miscellaneous utilities for Linux
+ http://www.kernel.org/pub/linux/utils/util-linux/
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index 68e32bb6bd80..6466704d47b5 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -50,6 +50,10 @@ LDFLAGS_MODULE
--------------------------------------------------
Additional options used for $(LD) when linking modules.
+LDFLAGS_vmlinux
+--------------------------------------------------
+Additional options passed to final link of vmlinux.
+
KBUILD_VERBOSE
--------------------------------------------------
Set the kbuild verbosity. Can be assigned same values as "V=...".
@@ -214,3 +218,18 @@ KBUILD_BUILD_USER, KBUILD_BUILD_HOST
These two variables allow to override the user@host string displayed during
boot and in /proc/version. The default value is the output of the commands
whoami and host, respectively.
+
+KBUILD_LDS
+--------------------------------------------------
+The linker script with full path. Assigned by the top-level Makefile.
+
+KBUILD_VMLINUX_INIT
+--------------------------------------------------
+All object files for the init (first) part of vmlinux.
+Files specified with KBUILD_VMLINUX_INIT are linked first.
+
+KBUILD_VMLINUX_MAIN
+--------------------------------------------------
+All object files for the main part of vmlinux.
+KBUILD_VMLINUX_INIT and KBUILD_VMLINUX_MAIN together specify
+all the object files used to link vmlinux.
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index 9d5f2a90dca9..a09f1a6a830c 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -53,15 +53,15 @@ KCONFIG_ALLCONFIG
--------------------------------------------------
(partially based on lkml email from/by Rob Landley, re: miniconfig)
--------------------------------------------------
-The allyesconfig/allmodconfig/allnoconfig/randconfig variants can
-also use the environment variable KCONFIG_ALLCONFIG as a flag or a
-filename that contains config symbols that the user requires to be
-set to a specific value. If KCONFIG_ALLCONFIG is used without a
-filename, "make *config" checks for a file named
-"all{yes/mod/no/def/random}.config" (corresponding to the *config command
-that was used) for symbol values that are to be forced. If this file
-is not found, it checks for a file named "all.config" to contain forced
-values.
+The allyesconfig/allmodconfig/allnoconfig/randconfig variants can also
+use the environment variable KCONFIG_ALLCONFIG as a flag or a filename
+that contains config symbols that the user requires to be set to a
+specific value. If KCONFIG_ALLCONFIG is used without a filename where
+KCONFIG_ALLCONFIG == "" or KCONFIG_ALLCONFIG == "1", "make *config"
+checks for a file named "all{yes/mod/no/def/random}.config"
+(corresponding to the *config command that was used) for symbol values
+that are to be forced. If this file is not found, it checks for a
+file named "all.config" to contain forced values.
This enables you to create "miniature" config (miniconfig) or custom
config files containing just the config symbols that you are interested
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 5b6e58492229..c45513d806ab 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -335,6 +335,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
requirements as needed. This option
does not override iommu=pt
+ amd_iommu_dump= [HW,X86-64]
+ Enable AMD IOMMU driver option to dump the ACPI table
+ for AMD IOMMU. With this option enabled, AMD IOMMU
+ driver will print ACPI tables for AMD IOMMU during
+ IOMMU initialization.
+
amijoy.map= [HW,JOY] Amiga joystick support
Map of devices attached to JOY0DAT and JOY1DAT
Format: <a>,<b>
@@ -397,8 +403,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
atkbd.softrepeat= [HW]
Use software keyboard repeat
- autotest [IA-64]
-
baycom_epp= [HW,AX25]
Format: <io>,<mode>
@@ -508,6 +512,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Also note the kernel might malfunction if you disable
some critical bits.
+ cma=nn[MG] [ARM,KNL]
+ Sets the size of kernel global memory area for contiguous
+ memory allocations. For more information, see
+ include/linux/dma-contiguous.h
+
cmo_free_hint= [PPC] Format: { yes | no }
Specify whether pages are marked as being inactive
when they are freed. This is used in CMO environments
@@ -515,6 +524,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
a hypervisor.
Default: yes
+ coherent_pool=nn[KMG] [ARM,KNL]
+ Sets the size of memory pool for coherent, atomic dma
+ allocations if Contiguous Memory Allocator (CMA) is used.
+
code_bytes [X86] How many bytes of object code to print
in an oops report.
Range: 0 - 8192
@@ -987,6 +1000,20 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
i8k.restricted [HW] Allow controlling fans only if SYS_ADMIN
capability is set.
+ i915.invert_brightness=
+ [DRM] Invert the sense of the variable that is used to
+ set the brightness of the panel backlight. Normally a
+ brightness value of 0 indicates backlight switched off,
+ and the maximum of the brightness value sets the backlight
+ to maximum brightness. If this parameter is set to 0
+ (default) and the machine requires it, or this parameter
+ is set to 1, a brightness value of 0 sets the backlight
+ to maximum brightness, and the maximum of the brightness
+ value switches the backlight off.
+ -1 -- never invert brightness
+ 0 -- machine default
+ 1 -- force brightness inversion
+
icn= [HW,ISDN]
Format: <io>[,<membase>[,<icn_id>[,<icn_id2>]]]
@@ -1430,8 +1457,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
devices can be requested on-demand with the
/dev/loop-control interface.
- mcatest= [IA-64]
-
mce [X86-32] Machine Check Exception
mce=option [X86-64] See Documentation/x86/x86_64/boot-options.txt
diff --git a/Documentation/leds/ledtrig-transient.txt b/Documentation/leds/ledtrig-transient.txt
new file mode 100644
index 000000000000..3bd38b487df1
--- /dev/null
+++ b/Documentation/leds/ledtrig-transient.txt
@@ -0,0 +1,152 @@
+LED Transient Trigger
+=====================
+
+The leds timer trigger does not currently have an interface to activate
+a one shot timer. The current support allows for setting two timers, one for
+specifying how long a state to be on, and the second for how long the state
+to be off. The delay_on value specifies the time period an LED should stay
+in on state, followed by a delay_off value that specifies how long the LED
+should stay in off state. The on and off cycle repeats until the trigger
+gets deactivated. There is no provision for one time activation to implement
+features that require an on or off state to be held just once and then stay in
+the original state forever.
+
+Without one shot timer interface, user space can still use timer trigger to
+set a timer to hold a state, however when user space application crashes or
+goes away without deactivating the timer, the hardware will be left in that
+state permanently.
+
+As a specific example of this use-case, let's look at vibrate feature on
+phones. Vibrate function on phones is implemented using PWM pins on SoC or
+PMIC. There is a need to activate one shot timer to control the vibrate
+feature, to prevent user space crashes leaving the phone in vibrate mode
+permanently causing the battery to drain.
+
+Transient trigger addresses the need for one shot timer activation. The
+transient trigger can be enabled and disabled just like the other leds
+triggers.
+
+When an led class device driver registers itself, it can specify all leds
+triggers it supports and a default trigger. During registration, activation
+routine for the default trigger gets called. During registration of an led
+class device, the LED state does not change.
+
+When the driver unregisters, deactivation routine for the currently active
+trigger will be called, and LED state is changed to LED_OFF.
+
+Driver suspend changes the LED state to LED_OFF and resume doesn't change
+the state. Please note that there is no explicit interaction between the
+suspend and resume actions and the currently enabled trigger. LED state
+changes are suspended while the driver is in suspend state. Any timers
+that are active at the time driver gets suspended, continue to run, without
+being able to actually change the LED state. Once driver is resumed, triggers
+start functioning again.
+
+LED state changes are controlled using brightness which is a common led
+class device property. When brightness is set to 0 from user space via
+echo 0 > brightness, it will result in deactivating the current trigger.
+
+Transient trigger uses standard register and unregister interfaces. During
+trigger registration, for each led class device that specifies this trigger
+as its default trigger, trigger activation routine will get called. During
+registration, the LED state does not change, unless there is another trigger
+active, in which case LED state changes to LED_OFF.
+
+During trigger unregistration, LED state gets changed to LED_OFF.
+
+Transient trigger activation routine doesn't change the LED state. It
+creates its properties and does its initialization. Transient trigger
+deactivation routine, will cancel any timer that is active before it cleans
+up and removes the properties it created. It will restore the LED state to
+non-transient state. When driver gets suspended, irrespective of the transient
+state, the LED state changes to LED_OFF.
+
+Transient trigger can be enabled and disabled from user space on led class
+devices, that support this trigger as shown below:
+
+echo transient > trigger
+echo none > trigger
+
+NOTE: Add a new property trigger state to control the state.
+
+This trigger exports three properties, activate, state, and duration. When
+transient trigger is activated these properties are set to default values.
+
+- duration allows setting timer value in msecs. The initial value is 0.
+- activate allows activating and deactivating the timer specified by
+ duration as needed. The initial and default value is 0. This will allow
+ duration to be set after trigger activation.
+- state allows user to specify a transient state to be held for the specified
+ duration.
+
+ activate - one shot timer activate mechanism.
+ 1 when activated, 0 when deactivated.
+ default value is zero when transient trigger is enabled,
+ to allow duration to be set.
+
+ activate state indicates a timer with a value of specified
+ duration running.
+ deactivated state indicates that there is no active timer
+ running.
+
+ duration - one shot timer value. When activate is set, duration value
+ is used to start a timer that runs once. This value doesn't
+ get changed by the trigger unless user does a set via
+ echo new_value > duration
+
+ state - transient state to be held. It has two values 0 or 1. 0 maps
+ to LED_OFF and 1 maps to LED_FULL. The specified state is
+ held for the duration of the one shot timer and then the
+ state gets changed to the non-transient state which is the
+ inverse of transient state.
+ If state = LED_FULL, when the timer runs out the state will
+ go back to LED_OFF.
+ If state = LED_OFF, when the timer runs out the state will
+ go back to LED_FULL.
+ Please note that current LED state is not checked prior to
+ changing the state to the specified state.
+ Driver could map these values to inverted depending on the
+ default states it defines for the LED in its brightness_set()
+ interface which is called from the led brightness_set()
+ interfaces to control the LED state.
+
+When timer expires activate goes back to deactivated state, duration is left
+at the set value to be used when activate is set at a future time. This will
+allow user app to set the time once and activate it to run it once for the
+specified value as needed. When timer expires, state is restored to the
+non-transient state which is the inverse of the transient state.
+
+ echo 1 > activate - starts timer = duration when duration is not 0.
+ echo 0 > activate - cancels currently running timer.
+ echo n > duration - stores timer value to be used upon next
+ activate. Currently active timer if
+ any, continues to run for the specified time.
+ echo 0 > duration - stores timer value to be used upon next
+ activate. Currently active timer if any,
+ continues to run for the specified time.
+ echo 1 > state - stores desired transient state LED_FULL to be
+ held for the specified duration.
+ echo 0 > state - stores desired transient state LED_OFF to be
+ held for the specified duration.
+
+What is not supported:
+======================
+- Timer activation is one shot and extending and/or shortening the timer
+ is not supported.
+
+Example use-case 1:
+ echo transient > trigger
+ echo n > duration
+ echo 1 > state
+repeat the following step as needed:
+ echo 1 > activate - start timer = duration to run once
+ echo 1 > activate - start timer = duration to run once
+ echo none > trigger
+
+This trigger is intended to be used for for the following example use cases:
+ - Control of vibrate (phones, tablets etc.) hardware by user space app.
+ - Use of LED by user space app as activity indicator.
+ - Use of LED by user space app as a kind of watchdog indicator -- as
+ long as the app is alive, it can keep the LED illuminated, if it dies
+ the LED will be extinguished automatically.
+ - Use by any user space app that needs a transient GPIO output.
diff --git a/Documentation/media-framework.txt b/Documentation/media-framework.txt
index 3a0f879533ce..802875413873 100644
--- a/Documentation/media-framework.txt
+++ b/Documentation/media-framework.txt
@@ -335,6 +335,9 @@ the media_entity pipe field.
Calls to media_entity_pipeline_start() can be nested. The pipeline pointer must
be identical for all nested calls to the function.
+media_entity_pipeline_start() may return an error. In that case, it will
+clean up any the changes it did by itself.
+
When stopping the stream, drivers must notify the entities with
media_entity_pipeline_stop(struct media_entity *entity);
@@ -351,3 +354,19 @@ If other operations need to be disallowed on streaming entities (such as
changing entities configuration parameters) drivers can explicitly check the
media_entity stream_count field to find out if an entity is streaming. This
operation must be done with the media_device graph_mutex held.
+
+
+Link validation
+---------------
+
+Link validation is performed by media_entity_pipeline_start() for any
+entity which has sink pads in the pipeline. The
+media_entity::link_validate() callback is used for that purpose. In
+link_validate() callback, entity driver should check that the properties of
+the source pad of the connected entity and its own sink pad match. It is up
+to the type of the entity (and in the end, the properties of the hardware)
+what matching actually means.
+
+Subsystems should facilitate link validation by providing subsystem specific
+helper functions to provide easy access for commonly needed information, and
+in the end provide a way to use driver-specific callbacks.
diff --git a/Documentation/nfc/nfc-hci.txt b/Documentation/nfc/nfc-hci.txt
index 216b7254fcc3..320f9336c781 100644
--- a/Documentation/nfc/nfc-hci.txt
+++ b/Documentation/nfc/nfc-hci.txt
@@ -22,9 +22,9 @@ response to arrive.
HCI events can also be received from the host controller. They will be handled
and a translation will be forwarded to NFC Core as needed.
HCI uses 2 execution contexts:
-- one if for executing commands : nfc_hci_msg_tx_work(). Only one command
+- one for executing commands : nfc_hci_msg_tx_work(). Only one command
can be executing at any given moment.
-- one if for dispatching received events and responses : nfc_hci_msg_rx_work()
+- one for dispatching received events and commands : nfc_hci_msg_rx_work().
HCI Session initialization:
---------------------------
@@ -52,18 +52,42 @@ entry points:
struct nfc_hci_ops {
int (*open)(struct nfc_hci_dev *hdev);
void (*close)(struct nfc_hci_dev *hdev);
+ int (*hci_ready) (struct nfc_hci_dev *hdev);
int (*xmit)(struct nfc_hci_dev *hdev, struct sk_buff *skb);
int (*start_poll)(struct nfc_hci_dev *hdev, u32 protocols);
int (*target_from_gate)(struct nfc_hci_dev *hdev, u8 gate,
struct nfc_target *target);
+ int (*complete_target_discovered) (struct nfc_hci_dev *hdev, u8 gate,
+ struct nfc_target *target);
+ int (*data_exchange) (struct nfc_hci_dev *hdev,
+ struct nfc_target *target,
+ struct sk_buff *skb, struct sk_buff **res_skb);
+ int (*check_presence)(struct nfc_hci_dev *hdev,
+ struct nfc_target *target);
};
-open() and close() shall turn the hardware on and off. xmit() shall simply
-write a frame to the chip. start_poll() is an optional entrypoint that shall
-set the hardware in polling mode. This must be implemented only if the hardware
-uses proprietary gates or a mechanism slightly different from the HCI standard.
-target_from_gate() is another optional entrypoint to return the protocols
+- open() and close() shall turn the hardware on and off.
+- hci_ready() is an optional entry point that is called right after the hci
+session has been set up. The driver can use it to do additional initialization
+that must be performed using HCI commands.
+- xmit() shall simply write a frame to the chip.
+- start_poll() is an optional entrypoint that shall set the hardware in polling
+mode. This must be implemented only if the hardware uses proprietary gates or a
+mechanism slightly different from the HCI standard.
+- target_from_gate() is an optional entrypoint to return the nfc protocols
corresponding to a proprietary gate.
+- complete_target_discovered() is an optional entry point to let the driver
+perform additional proprietary processing necessary to auto activate the
+discovered target.
+- data_exchange() must be implemented by the driver if proprietary HCI commands
+are required to send data to the tag. Some tag types will require custom
+commands, others can be written to using the standard HCI commands. The driver
+can check the tag type and either do proprietary processing, or return 1 to ask
+for standard processing.
+- check_presence() is an optional entry point that will be called regularly
+by the core to check that an activated tag is still in the field. If this is
+not implemented, the core will not be able to push tag_lost events to the user
+space
On the rx path, the driver is responsible to push incoming HCP frames to HCI
using nfc_hci_recv_frame(). HCI will take care of re-aggregation and handling
@@ -99,7 +123,8 @@ fast, cannot sleep. stores incoming frames into an shdlc rx queue
handles shdlc rx & tx queues. Dispatches HCI cmd responses.
- HCI Tx Cmd worker (MSGTXWQ)
-Serialize execution of HCI commands. Complete execution in case of resp timeout.
+Serializes execution of HCI commands. Completes execution in case of response
+timeout.
- HCI Rx worker (MSGRXWQ)
Dispatches incoming HCI commands or events.
@@ -133,11 +158,11 @@ able to complete the command with a timeout error if no response arrive.
SMW context gets scheduled and invokes nfc_shdlc_sm_work(). This function
handles shdlc framing in and out. It uses the driver xmit to send frames and
receives incoming frames in an skb queue filled from the driver IRQ handler.
-SHDLC I(nformation) frames payload are HCP fragments. They are agregated to
+SHDLC I(nformation) frames payload are HCP fragments. They are aggregated to
form complete HCI frames, which can be a response, command, or event.
HCI Responses are dispatched immediately from this context to unblock
-waiting command execution. Reponse processing involves invoking the completion
+waiting command execution. Response processing involves invoking the completion
callback that was provided by nfc_hci_msg_tx_work() when it sent the command.
The completion callback will then wake the syscall context.
diff --git a/Documentation/power/charger-manager.txt b/Documentation/power/charger-manager.txt
index fdcca991df30..b4f7f4b23f64 100644
--- a/Documentation/power/charger-manager.txt
+++ b/Documentation/power/charger-manager.txt
@@ -44,6 +44,16 @@ Charger Manager supports the following:
Normally, the platform will need to resume and suspend some devices
that are used by Charger Manager.
+* Support for premature full-battery event handling
+ If the battery voltage drops by "fullbatt_vchkdrop_uV" after
+ "fullbatt_vchkdrop_ms" from the full-battery event, the framework
+ restarts charging. This check is also performed while suspended by
+ setting wakeup time accordingly and using suspend_again.
+
+* Support for uevent-notify
+ With the charger-related events, the device sends
+ notification to users with UEVENT.
+
2. Global Charger-Manager Data related with suspend_again
========================================================
In order to setup Charger Manager with suspend-again feature
@@ -55,7 +65,7 @@ if there are multiple batteries. If there are multiple batteries, the
multiple instances of Charger Manager share the same charger_global_desc
and it will manage in-suspend monitoring for all instances of Charger Manager.
-The user needs to provide all the two entries properly in order to activate
+The user needs to provide all the three entries properly in order to activate
in-suspend monitoring:
struct charger_global_desc {
@@ -74,6 +84,11 @@ bool (*rtc_only_wakeup)(void);
same struct. If there is any other wakeup source triggered the
wakeup, it should return false. If the "rtc" is the only wakeup
reason, it should return true.
+
+bool assume_timer_stops_in_suspend;
+ : if true, Charger Manager assumes that
+ the timer (CM uses jiffies as timer) stops during suspend. Then, CM
+ assumes that the suspend-duration is same as the alarm length.
};
3. How to setup suspend_again
@@ -111,6 +126,16 @@ enum polling_modes polling_mode;
CM_POLL_CHARGING_ONLY: poll this battery if and only if the
battery is being charged.
+unsigned int fullbatt_vchkdrop_ms;
+unsigned int fullbatt_vchkdrop_uV;
+ : If both have non-zero values, Charger Manager will check the
+ battery voltage drop fullbatt_vchkdrop_ms after the battery is fully
+ charged. If the voltage drop is over fullbatt_vchkdrop_uV, Charger
+ Manager will try to recharge the battery by disabling and enabling
+ chargers. Recharge with voltage drop condition only (without delay
+ condition) is needed to be implemented with hardware interrupts from
+ fuel gauges or charger devices/chips.
+
unsigned int fullbatt_uV;
: If specified with a non-zero value, Charger Manager assumes
that the battery is full (capacity = 100) if the battery is not being
@@ -122,6 +147,8 @@ unsigned int polling_interval_ms;
this battery every polling_interval_ms or more frequently.
enum data_source battery_present;
+ : CM_BATTERY_PRESENT: assume that the battery exists.
+ CM_NO_BATTERY: assume that the battery does not exists.
CM_FUEL_GAUGE: get battery presence information from fuel gauge.
CM_CHARGER_STAT: get battery presence from chargers.
@@ -151,7 +178,17 @@ bool measure_battery_temp;
the value of measure_battery_temp.
};
-5. Other Considerations
+5. Notify Charger-Manager of charger events: cm_notify_event()
+=========================================================
+If there is an charger event is required to notify
+Charger Manager, a charger device driver that triggers the event can call
+cm_notify_event(psy, type, msg) to notify the corresponding Charger Manager.
+In the function, psy is the charger driver's power_supply pointer, which is
+associated with Charger-Manager. The parameter "type"
+is the same as irq's type (enum cm_event_types). The event message "msg" is
+optional and is effective only if the event type is "UNDESCRIBED" or "OTHERS".
+
+6. Other Considerations
=======================
At the charger/battery-related events such as battery-pulled-out,
diff --git a/Documentation/power/power_supply_class.txt b/Documentation/power/power_supply_class.txt
index 9f16c5178b66..211831d4095f 100644
--- a/Documentation/power/power_supply_class.txt
+++ b/Documentation/power/power_supply_class.txt
@@ -84,6 +84,8 @@ are already charged or discharging, 'n/a' can be displayed (or
HEALTH - represents health of the battery, values corresponds to
POWER_SUPPLY_HEALTH_*, defined in battery.h.
+VOLTAGE_OCV - open circuit voltage of the battery.
+
VOLTAGE_MAX_DESIGN, VOLTAGE_MIN_DESIGN - design values for maximal and
minimal power supply voltages. Maximal/minimal means values of voltages
when battery considered "full"/"empty" at normal conditions. Yes, there is
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 88fd7f5c8dcd..13d6166d7a27 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -225,6 +225,13 @@ a queue must be less or equal then msg_max.
maximum message size value (it is every message queue's attribute set during
its creation).
+/proc/sys/fs/mqueue/msg_default is a read/write file for setting/getting the
+default number of messages in a queue value if attr parameter of mq_open(2) is
+NULL. If it exceed msg_max, the default value is initialized msg_max.
+
+/proc/sys/fs/mqueue/msgsize_default is a read/write file for setting/getting
+the default message size value if attr parameter of mq_open(2) is NULL. If it
+exceed msgsize_max, the default value is initialized msgsize_max.
4. /proc/sys/fs/epoll - Configuration options for the epoll interface
--------------------------------------------------------
diff --git a/Documentation/trace/uprobetracer.txt b/Documentation/trace/uprobetracer.txt
new file mode 100644
index 000000000000..24ce6823a09e
--- /dev/null
+++ b/Documentation/trace/uprobetracer.txt
@@ -0,0 +1,113 @@
+ Uprobe-tracer: Uprobe-based Event Tracing
+ =========================================
+ Documentation written by Srikar Dronamraju
+
+Overview
+--------
+Uprobe based trace events are similar to kprobe based trace events.
+To enable this feature, build your kernel with CONFIG_UPROBE_EVENT=y.
+
+Similar to the kprobe-event tracer, this doesn't need to be activated via
+current_tracer. Instead of that, add probe points via
+/sys/kernel/debug/tracing/uprobe_events, and enable it via
+/sys/kernel/debug/tracing/events/uprobes/<EVENT>/enabled.
+
+However unlike kprobe-event tracer, the uprobe event interface expects the
+user to calculate the offset of the probepoint in the object
+
+Synopsis of uprobe_tracer
+-------------------------
+ p[:[GRP/]EVENT] PATH:SYMBOL[+offs] [FETCHARGS] : Set a probe
+
+ GRP : Group name. If omitted, use "uprobes" for it.
+ EVENT : Event name. If omitted, the event name is generated
+ based on SYMBOL+offs.
+ PATH : path to an executable or a library.
+ SYMBOL[+offs] : Symbol+offset where the probe is inserted.
+
+ FETCHARGS : Arguments. Each probe can have up to 128 args.
+ %REG : Fetch register REG
+
+Event Profiling
+---------------
+ You can check the total number of probe hits and probe miss-hits via
+/sys/kernel/debug/tracing/uprobe_profile.
+ The first column is event name, the second is the number of probe hits,
+the third is the number of probe miss-hits.
+
+Usage examples
+--------------
+To add a probe as a new event, write a new definition to uprobe_events
+as below.
+
+ echo 'p: /bin/bash:0x4245c0' > /sys/kernel/debug/tracing/uprobe_events
+
+ This sets a uprobe at an offset of 0x4245c0 in the executable /bin/bash
+
+ echo > /sys/kernel/debug/tracing/uprobe_events
+
+ This clears all probe points.
+
+The following example shows how to dump the instruction pointer and %ax
+a register at the probed text address. Here we are trying to probe
+function zfree in /bin/zsh
+
+ # cd /sys/kernel/debug/tracing/
+ # cat /proc/`pgrep zsh`/maps | grep /bin/zsh | grep r-xp
+ 00400000-0048a000 r-xp 00000000 08:03 130904 /bin/zsh
+ # objdump -T /bin/zsh | grep -w zfree
+ 0000000000446420 g DF .text 0000000000000012 Base zfree
+
+0x46420 is the offset of zfree in object /bin/zsh that is loaded at
+0x00400000. Hence the command to probe would be :
+
+ # echo 'p /bin/zsh:0x46420 %ip %ax' > uprobe_events
+
+Please note: User has to explicitly calculate the offset of the probepoint
+in the object. We can see the events that are registered by looking at the
+uprobe_events file.
+
+ # cat uprobe_events
+ p:uprobes/p_zsh_0x46420 /bin/zsh:0x00046420 arg1=%ip arg2=%ax
+
+The format of events can be seen by viewing the file events/uprobes/p_zsh_0x46420/format
+
+ # cat events/uprobes/p_zsh_0x46420/format
+ name: p_zsh_0x46420
+ ID: 922
+ format:
+ field:unsigned short common_type; offset:0; size:2; signed:0;
+ field:unsigned char common_flags; offset:2; size:1; signed:0;
+ field:unsigned char common_preempt_count; offset:3; size:1; signed:0;
+ field:int common_pid; offset:4; size:4; signed:1;
+ field:int common_padding; offset:8; size:4; signed:1;
+
+ field:unsigned long __probe_ip; offset:12; size:4; signed:0;
+ field:u32 arg1; offset:16; size:4; signed:0;
+ field:u32 arg2; offset:20; size:4; signed:0;
+
+ print fmt: "(%lx) arg1=%lx arg2=%lx", REC->__probe_ip, REC->arg1, REC->arg2
+
+Right after definition, each event is disabled by default. For tracing these
+events, you need to enable it by:
+
+ # echo 1 > events/uprobes/enable
+
+Lets disable the event after sleeping for some time.
+ # sleep 20
+ # echo 0 > events/uprobes/enable
+
+And you can see the traced information via /sys/kernel/debug/tracing/trace.
+
+ # cat trace
+ # tracer: nop
+ #
+ # TASK-PID CPU# TIMESTAMP FUNCTION
+ # | | | | |
+ zsh-24842 [006] 258544.995456: p_zsh_0x46420: (0x446420) arg1=446421 arg2=79
+ zsh-24842 [007] 258545.000270: p_zsh_0x46420: (0x446420) arg1=446421 arg2=79
+ zsh-24842 [002] 258545.043929: p_zsh_0x46420: (0x446420) arg1=446421 arg2=79
+ zsh-24842 [004] 258547.046129: p_zsh_0x46420: (0x446420) arg1=446421 arg2=79
+
+Each line shows us probes were triggered for a pid 24842 with ip being
+0x446421 and contents of ax register being 79.
diff --git a/Documentation/video4linux/4CCs.txt b/Documentation/video4linux/4CCs.txt
new file mode 100644
index 000000000000..41241af1ebfe
--- /dev/null
+++ b/Documentation/video4linux/4CCs.txt
@@ -0,0 +1,32 @@
+Guidelines for Linux4Linux pixel format 4CCs
+============================================
+
+Guidelines for Video4Linux 4CC codes defined using v4l2_fourcc() are
+specified in this document. First of the characters defines the nature of
+the pixel format, compression and colour space. The interpretation of the
+other three characters depends on the first one.
+
+Existing 4CCs may not obey these guidelines.
+
+Formats
+=======
+
+Raw bayer
+---------
+
+The following first characters are used by raw bayer formats:
+
+ B: raw bayer, uncompressed
+ b: raw bayer, DPCM compressed
+ a: A-law compressed
+ u: u-law compressed
+
+2nd character: pixel order
+ B: BGGR
+ G: GBRG
+ g: GRBG
+ R: RGGB
+
+3rd character: uncompressed bits-per-pixel 0--9, A--
+
+4th character: compressed bits-per-pixel 0--9, A--
diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt
index e6c2842407a4..1e6b6531bbcc 100644
--- a/Documentation/video4linux/gspca.txt
+++ b/Documentation/video4linux/gspca.txt
@@ -276,6 +276,7 @@ pac7302 093a:2622 Genius Eye 312
pac7302 093a:2624 PAC7302
pac7302 093a:2625 Genius iSlim 310
pac7302 093a:2626 Labtec 2200
+pac7302 093a:2627 Genius FaceCam 300
pac7302 093a:2628 Genius iLook 300
pac7302 093a:2629 Genious iSlim 300
pac7302 093a:262a Webcam 300k
diff --git a/Documentation/video4linux/v4l2-controls.txt b/Documentation/video4linux/v4l2-controls.txt
index e2492a9d1027..43da22b89728 100644
--- a/Documentation/video4linux/v4l2-controls.txt
+++ b/Documentation/video4linux/v4l2-controls.txt
@@ -130,8 +130,18 @@ Menu controls are added by calling v4l2_ctrl_new_std_menu:
const struct v4l2_ctrl_ops *ops,
u32 id, s32 max, s32 skip_mask, s32 def);
+Or alternatively for integer menu controls, by calling v4l2_ctrl_new_int_menu:
+
+ struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, s32 max, s32 def, const s64 *qmenu_int);
+
These functions are typically called right after the v4l2_ctrl_handler_init:
+ static const s64 exp_bias_qmenu[] = {
+ -2, -1, 0, 1, 2
+ };
+
v4l2_ctrl_handler_init(&foo->ctrl_handler, nr_of_controls);
v4l2_ctrl_new_std(&foo->ctrl_handler, &foo_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
@@ -141,6 +151,11 @@ These functions are typically called right after the v4l2_ctrl_handler_init:
V4L2_CID_POWER_LINE_FREQUENCY,
V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0,
V4L2_CID_POWER_LINE_FREQUENCY_DISABLED);
+ v4l2_ctrl_new_int_menu(&foo->ctrl_handler, &foo_ctrl_ops,
+ V4L2_CID_EXPOSURE_BIAS,
+ ARRAY_SIZE(exp_bias_qmenu) - 1,
+ ARRAY_SIZE(exp_bias_qmenu) / 2 - 1,
+ exp_bias_qmenu);
...
if (foo->ctrl_handler.error) {
int err = foo->ctrl_handler.error;
@@ -164,6 +179,12 @@ controls. There is no min argument since that is always 0 for menu controls,
and instead of a step there is a skip_mask argument: if bit X is 1, then menu
item X is skipped.
+The v4l2_ctrl_new_int_menu function creates a new standard integer menu
+control with driver-specific items in the menu. It differs from
+v4l2_ctrl_new_std_menu in that it doesn't have the mask argument and takes
+as the last argument an array of signed 64-bit integers that form an exact
+menu item list.
+
Note that if something fails, the function will return NULL or an error and
set ctrl_handler->error to the error code. If ctrl_handler->error was already
set, then it will just return and do nothing. This is also true for
diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt
index 659b2ba12a4f..1f5905270050 100644
--- a/Documentation/video4linux/v4l2-framework.txt
+++ b/Documentation/video4linux/v4l2-framework.txt
@@ -182,11 +182,11 @@ static int __devinit drv_probe(struct pci_dev *pdev,
}
If you have multiple device nodes then it can be difficult to know when it is
-safe to unregister v4l2_device. For this purpose v4l2_device has refcounting
-support. The refcount is increased whenever video_register_device is called and
-it is decreased whenever that device node is released. When the refcount reaches
-zero, then the v4l2_device release() callback is called. You can do your final
-cleanup there.
+safe to unregister v4l2_device for hotpluggable devices. For this purpose
+v4l2_device has refcounting support. The refcount is increased whenever
+video_register_device is called and it is decreased whenever that device node
+is released. When the refcount reaches zero, then the v4l2_device release()
+callback is called. You can do your final cleanup there.
If other device nodes (e.g. ALSA) are created, then you can increase and
decrease the refcount manually as well by calling:
@@ -197,6 +197,10 @@ or:
int v4l2_device_put(struct v4l2_device *v4l2_dev);
+Since the initial refcount is 1 you also need to call v4l2_device_put in the
+disconnect() callback (for USB devices) or in the remove() callback (for e.g.
+PCI devices), otherwise the refcount will never reach 0.
+
struct v4l2_subdev
------------------
@@ -262,11 +266,16 @@ struct v4l2_subdev_video_ops {
...
};
+struct v4l2_subdev_pad_ops {
+ ...
+};
+
struct v4l2_subdev_ops {
const struct v4l2_subdev_core_ops *core;
const struct v4l2_subdev_tuner_ops *tuner;
const struct v4l2_subdev_audio_ops *audio;
const struct v4l2_subdev_video_ops *video;
+ const struct v4l2_subdev_pad_ops *video;
};
The core ops are common to all subdevs, the other categories are implemented
@@ -303,6 +312,22 @@ Don't forget to cleanup the media entity before the sub-device is destroyed:
media_entity_cleanup(&sd->entity);
+If the subdev driver intends to process video and integrate with the media
+framework, it must implement format related functionality using
+v4l2_subdev_pad_ops instead of v4l2_subdev_video_ops.
+
+In that case, the subdev driver may set the link_validate field to provide
+its own link validation function. The link validation function is called for
+every link in the pipeline where both of the ends of the links are V4L2
+sub-devices. The driver is still responsible for validating the correctness
+of the format configuration between sub-devices and video nodes.
+
+If link_validate op is not set, the default function
+v4l2_subdev_link_validate_default() is used instead. This function ensures
+that width, height and the media bus pixel code are equal on both source and
+sink of the link. Subdev drivers are also free to use this function to
+perform the checks mentioned above in addition to their own checks.
+
A device (bridge) driver needs to register the v4l2_subdev with the
v4l2_device:
@@ -555,19 +580,25 @@ allocated memory.
You should also set these fields:
- v4l2_dev: set to the v4l2_device parent device.
+
- name: set to something descriptive and unique.
+
- fops: set to the v4l2_file_operations struct.
+
- ioctl_ops: if you use the v4l2_ioctl_ops to simplify ioctl maintenance
(highly recommended to use this and it might become compulsory in the
future!), then set this to your v4l2_ioctl_ops struct.
+
- lock: leave to NULL if you want to do all the locking in the driver.
- Otherwise you give it a pointer to a struct mutex_lock and before any
- of the v4l2_file_operations is called this lock will be taken by the
- core and released afterwards.
+ Otherwise you give it a pointer to a struct mutex_lock and before the
+ unlocked_ioctl file operation is called this lock will be taken by the
+ core and released afterwards. See the next section for more details.
+
- prio: keeps track of the priorities. Used to implement VIDIOC_G/S_PRIORITY.
If left to NULL, then it will use the struct v4l2_prio_state in v4l2_device.
If you want to have a separate priority state per (group of) device node(s),
then you can point it to your own struct v4l2_prio_state.
+
- parent: you only set this if v4l2_device was registered with NULL as
the parent device struct. This only happens in cases where one hardware
device has multiple PCI devices that all share the same v4l2_device core.
@@ -577,6 +608,7 @@ You should also set these fields:
(cx8802). Since the v4l2_device cannot be associated with a particular
PCI device it is setup without a parent device. But when the struct
video_device is setup you do know which parent PCI device to use.
+
- flags: optional. Set to V4L2_FL_USE_FH_PRIO if you want to let the framework
handle the VIDIOC_G/S_PRIORITY ioctls. This requires that you use struct
v4l2_fh. Eventually this flag will disappear once all drivers use the core
@@ -587,6 +619,16 @@ in your v4l2_file_operations struct.
Do not use .ioctl! This is deprecated and will go away in the future.
+In some cases you want to tell the core that a function you had specified in
+your v4l2_ioctl_ops should be ignored. You can mark such ioctls by calling this
+function before video_device_register is called:
+
+void v4l2_disable_ioctl(struct video_device *vdev, unsigned int cmd);
+
+This tends to be needed if based on external factors (e.g. which card is
+being used) you want to turns off certain features in v4l2_ioctl_ops without
+having to make a new struct.
+
The v4l2_file_operations struct is a subset of file_operations. The main
difference is that the inode argument is omitted since it is never used.
@@ -609,8 +651,22 @@ v4l2_file_operations and locking
--------------------------------
You can set a pointer to a mutex_lock in struct video_device. Usually this
-will be either a top-level mutex or a mutex per device node. If you want
-finer-grained locking then you have to set it to NULL and do you own locking.
+will be either a top-level mutex or a mutex per device node. By default this
+lock will be used for unlocked_ioctl, but you can disable locking for
+selected ioctls by calling:
+
+ void v4l2_disable_ioctl_locking(struct video_device *vdev, unsigned int cmd);
+
+E.g.: v4l2_disable_ioctl_locking(vdev, VIDIOC_DQBUF);
+
+You have to call this before you register the video_device.
+
+Particularly with USB drivers where certain commands such as setting controls
+can take a long time you may want to do your own locking for the buffer queuing
+ioctls.
+
+If you want still finer-grained locking then you have to set mutex_lock to NULL
+and do you own locking completely.
It is up to the driver developer to decide which method to use. However, if
your driver has high-latency operations (for example, changing the exposure
@@ -618,7 +674,7 @@ of a USB webcam might take a long time), then you might be better off with
doing your own locking if you want to allow the user to do other things with
the device while waiting for the high-latency command to finish.
-If a lock is specified then all file operations will be serialized on that
+If a lock is specified then all ioctl commands will be serialized on that
lock. If you use videobuf then you must pass the same lock to the videobuf
queue initialize function: if videobuf has to wait for a frame to arrive, then
it will temporarily unlock the lock and relock it afterwards. If your driver
@@ -941,21 +997,35 @@ fast.
Useful functions:
-- v4l2_event_queue()
+void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
Queue events to video device. The driver's only responsibility is to fill
in the type and the data fields. The other fields will be filled in by
V4L2.
-- v4l2_event_subscribe()
+int v4l2_event_subscribe(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub, unsigned elems,
+ const struct v4l2_subscribed_event_ops *ops)
The video_device->ioctl_ops->vidioc_subscribe_event must check the driver
is able to produce events with specified event id. Then it calls
- v4l2_event_subscribe() to subscribe the event. The last argument is the
- size of the event queue for this event. If it is 0, then the framework
- will fill in a default value (this depends on the event type).
+ v4l2_event_subscribe() to subscribe the event.
+
+ The elems argument is the size of the event queue for this event. If it is 0,
+ then the framework will fill in a default value (this depends on the event
+ type).
+
+ The ops argument allows the driver to specify a number of callbacks:
+ * add: called when a new listener gets added (subscribing to the same
+ event twice will only cause this callback to get called once)
+ * del: called when a listener stops listening
+ * replace: replace event 'old' with event 'new'.
+ * merge: merge event 'old' into event 'new'.
+ All 4 callbacks are optional, if you don't want to specify any callbacks
+ the ops argument itself maybe NULL.
-- v4l2_event_unsubscribe()
+int v4l2_event_unsubscribe(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
vidioc_unsubscribe_event in struct v4l2_ioctl_ops. A driver may use
v4l2_event_unsubscribe() directly unless it wants to be involved in
@@ -964,7 +1034,7 @@ Useful functions:
The special type V4L2_EVENT_ALL may be used to unsubscribe all events. The
drivers may want to handle this in a special way.
-- v4l2_event_pending()
+int v4l2_event_pending(struct v4l2_fh *fh)
Returns the number of pending events. Useful when implementing poll.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 6386f8c0482e..930126698a0f 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2,6 +2,7 @@ The Definitive KVM (Kernel-based Virtual Machine) API Documentation
===================================================================
1. General description
+----------------------
The kvm API is a set of ioctls that are issued to control various aspects
of a virtual machine. The ioctls belong to three classes
@@ -23,7 +24,9 @@ of a virtual machine. The ioctls belong to three classes
Only run vcpu ioctls from the same thread that was used to create the
vcpu.
+
2. File descriptors
+-------------------
The kvm API is centered around file descriptors. An initial
open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
@@ -41,7 +44,9 @@ not cause harm to the host, their actual behavior is not guaranteed by
the API. The only supported use is one virtual machine per process,
and one vcpu per thread.
+
3. Extensions
+-------------
As of Linux 2.6.22, the KVM ABI has been stabilized: no backward
incompatible change are allowed. However, there is an extension
@@ -53,7 +58,9 @@ Instead, kvm defines extension identifiers and a facility to query
whether a particular extension identifier is available. If it is, a
set of ioctls is available for application use.
+
4. API description
+------------------
This section describes ioctls that can be used to control kvm guests.
For each ioctl, the following information is provided along with a
@@ -75,6 +82,7 @@ description:
Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL)
are not detailed, but errors with specific meanings are.
+
4.1 KVM_GET_API_VERSION
Capability: basic
@@ -90,6 +98,7 @@ supported. Applications should refuse to run if KVM_GET_API_VERSION
returns a value other than 12. If this check passes, all ioctls
described as 'basic' will be available.
+
4.2 KVM_CREATE_VM
Capability: basic
@@ -109,6 +118,7 @@ In order to create user controlled virtual machines on S390, check
KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as
privileged user (CAP_SYS_ADMIN).
+
4.3 KVM_GET_MSR_INDEX_LIST
Capability: basic
@@ -135,6 +145,7 @@ Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are
not returned in the MSR list, as different vcpus can have a different number
of banks, as set via the KVM_X86_SETUP_MCE ioctl.
+
4.4 KVM_CHECK_EXTENSION
Capability: basic
@@ -149,6 +160,7 @@ receives an integer that describes the extension availability.
Generally 0 means no and 1 means yes, but some extensions may report
additional information in the integer return value.
+
4.5 KVM_GET_VCPU_MMAP_SIZE
Capability: basic
@@ -161,6 +173,7 @@ The KVM_RUN ioctl (cf.) communicates with userspace via a shared
memory region. This ioctl returns the size of that region. See the
KVM_RUN documentation for details.
+
4.6 KVM_SET_MEMORY_REGION
Capability: basic
@@ -171,6 +184,7 @@ Returns: 0 on success, -1 on error
This ioctl is obsolete and has been removed.
+
4.7 KVM_CREATE_VCPU
Capability: basic
@@ -223,6 +237,7 @@ machines, the resulting vcpu fd can be memory mapped at page offset
KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of the virtual
cpu's hardware control block.
+
4.8 KVM_GET_DIRTY_LOG (vm ioctl)
Capability: basic
@@ -246,6 +261,7 @@ since the last call to this ioctl. Bit 0 is the first page in the
memory slot. Ensure the entire structure is cleared to avoid padding
issues.
+
4.9 KVM_SET_MEMORY_ALIAS
Capability: basic
@@ -256,6 +272,7 @@ Returns: 0 (success), -1 (error)
This ioctl is obsolete and has been removed.
+
4.10 KVM_RUN
Capability: basic
@@ -272,6 +289,7 @@ obtained by mmap()ing the vcpu fd at offset 0, with the size given by
KVM_GET_VCPU_MMAP_SIZE. The parameter block is formatted as a 'struct
kvm_run' (see below).
+
4.11 KVM_GET_REGS
Capability: basic
@@ -292,6 +310,7 @@ struct kvm_regs {
__u64 rip, rflags;
};
+
4.12 KVM_SET_REGS
Capability: basic
@@ -304,6 +323,7 @@ Writes the general purpose registers into the vcpu.
See KVM_GET_REGS for the data structure.
+
4.13 KVM_GET_SREGS
Capability: basic
@@ -331,6 +351,7 @@ interrupt_bitmap is a bitmap of pending external interrupts. At most
one bit may be set. This interrupt has been acknowledged by the APIC
but not yet injected into the cpu core.
+
4.14 KVM_SET_SREGS
Capability: basic
@@ -342,6 +363,7 @@ Returns: 0 on success, -1 on error
Writes special registers into the vcpu. See KVM_GET_SREGS for the
data structures.
+
4.15 KVM_TRANSLATE
Capability: basic
@@ -365,6 +387,7 @@ struct kvm_translation {
__u8 pad[5];
};
+
4.16 KVM_INTERRUPT
Capability: basic
@@ -413,6 +436,7 @@ c) KVM_INTERRUPT_SET_LEVEL
Note that any value for 'irq' other than the ones stated above is invalid
and incurs unexpected behavior.
+
4.17 KVM_DEBUG_GUEST
Capability: basic
@@ -423,6 +447,7 @@ Returns: -1 on error
Support for this has been removed. Use KVM_SET_GUEST_DEBUG instead.
+
4.18 KVM_GET_MSRS
Capability: basic
@@ -451,6 +476,7 @@ Application code should set the 'nmsrs' member (which indicates the
size of the entries array) and the 'index' member of each array entry.
kvm will fill in the 'data' member.
+
4.19 KVM_SET_MSRS
Capability: basic
@@ -466,6 +492,7 @@ Application code should set the 'nmsrs' member (which indicates the
size of the entries array), and the 'index' and 'data' members of each
array entry.
+
4.20 KVM_SET_CPUID
Capability: basic
@@ -494,6 +521,7 @@ struct kvm_cpuid {
struct kvm_cpuid_entry entries[0];
};
+
4.21 KVM_SET_SIGNAL_MASK
Capability: basic
@@ -516,6 +544,7 @@ struct kvm_signal_mask {
__u8 sigset[0];
};
+
4.22 KVM_GET_FPU
Capability: basic
@@ -541,6 +570,7 @@ struct kvm_fpu {
__u32 pad2;
};
+
4.23 KVM_SET_FPU
Capability: basic
@@ -566,6 +596,7 @@ struct kvm_fpu {
__u32 pad2;
};
+
4.24 KVM_CREATE_IRQCHIP
Capability: KVM_CAP_IRQCHIP
@@ -579,6 +610,7 @@ ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
only go to the IOAPIC. On ia64, a IOSAPIC is created.
+
4.25 KVM_IRQ_LINE
Capability: KVM_CAP_IRQCHIP
@@ -600,6 +632,7 @@ struct kvm_irq_level {
__u32 level; /* 0 or 1 */
};
+
4.26 KVM_GET_IRQCHIP
Capability: KVM_CAP_IRQCHIP
@@ -621,6 +654,7 @@ struct kvm_irqchip {
} chip;
};
+
4.27 KVM_SET_IRQCHIP
Capability: KVM_CAP_IRQCHIP
@@ -642,6 +676,7 @@ struct kvm_irqchip {
} chip;
};
+
4.28 KVM_XEN_HVM_CONFIG
Capability: KVM_CAP_XEN_HVM
@@ -666,6 +701,7 @@ struct kvm_xen_hvm_config {
__u8 pad2[30];
};
+
4.29 KVM_GET_CLOCK
Capability: KVM_CAP_ADJUST_CLOCK
@@ -684,6 +720,7 @@ struct kvm_clock_data {
__u32 pad[9];
};
+
4.30 KVM_SET_CLOCK
Capability: KVM_CAP_ADJUST_CLOCK
@@ -702,6 +739,7 @@ struct kvm_clock_data {
__u32 pad[9];
};
+
4.31 KVM_GET_VCPU_EVENTS
Capability: KVM_CAP_VCPU_EVENTS
@@ -741,6 +779,7 @@ struct kvm_vcpu_events {
KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
interrupt.shadow contains a valid state. Otherwise, this field is undefined.
+
4.32 KVM_SET_VCPU_EVENTS
Capability: KVM_CAP_VCPU_EVENTS
@@ -767,6 +806,7 @@ If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in
the flags field to signal that interrupt.shadow contains a valid state and
shall be written into the VCPU.
+
4.33 KVM_GET_DEBUGREGS
Capability: KVM_CAP_DEBUGREGS
@@ -785,6 +825,7 @@ struct kvm_debugregs {
__u64 reserved[9];
};
+
4.34 KVM_SET_DEBUGREGS
Capability: KVM_CAP_DEBUGREGS
@@ -798,6 +839,7 @@ Writes debug registers into the vcpu.
See KVM_GET_DEBUGREGS for the data structure. The flags field is unused
yet and must be cleared on entry.
+
4.35 KVM_SET_USER_MEMORY_REGION
Capability: KVM_CAP_USER_MEM
@@ -844,6 +886,7 @@ It is recommended to use this API instead of the KVM_SET_MEMORY_REGION ioctl.
The KVM_SET_MEMORY_REGION does not allow fine grained control over memory
allocation and is deprecated.
+
4.36 KVM_SET_TSS_ADDR
Capability: KVM_CAP_SET_TSS_ADDR
@@ -862,6 +905,7 @@ This ioctl is required on Intel-based hosts. This is needed on Intel hardware
because of a quirk in the virtualization implementation (see the internals
documentation when it pops into existence).
+
4.37 KVM_ENABLE_CAP
Capability: KVM_CAP_ENABLE_CAP
@@ -897,6 +941,7 @@ function properly, this is the place to put them.
__u8 pad[64];
};
+
4.38 KVM_GET_MP_STATE
Capability: KVM_CAP_MP_STATE
@@ -927,6 +972,7 @@ Possible values are:
This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel
irqchip, the multiprocessing state must be maintained by userspace.
+
4.39 KVM_SET_MP_STATE
Capability: KVM_CAP_MP_STATE
@@ -941,6 +987,7 @@ arguments.
This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel
irqchip, the multiprocessing state must be maintained by userspace.
+
4.40 KVM_SET_IDENTITY_MAP_ADDR
Capability: KVM_CAP_SET_IDENTITY_MAP_ADDR
@@ -959,6 +1006,7 @@ This ioctl is required on Intel-based hosts. This is needed on Intel hardware
because of a quirk in the virtualization implementation (see the internals
documentation when it pops into existence).
+
4.41 KVM_SET_BOOT_CPU_ID
Capability: KVM_CAP_SET_BOOT_CPU_ID
@@ -971,6 +1019,7 @@ Define which vcpu is the Bootstrap Processor (BSP). Values are the same
as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default
is vcpu 0.
+
4.42 KVM_GET_XSAVE
Capability: KVM_CAP_XSAVE
@@ -985,6 +1034,7 @@ struct kvm_xsave {
This ioctl would copy current vcpu's xsave struct to the userspace.
+
4.43 KVM_SET_XSAVE
Capability: KVM_CAP_XSAVE
@@ -999,6 +1049,7 @@ struct kvm_xsave {
This ioctl would copy userspace's xsave struct to the kernel.
+
4.44 KVM_GET_XCRS
Capability: KVM_CAP_XCRS
@@ -1022,6 +1073,7 @@ struct kvm_xcrs {
This ioctl would copy current vcpu's xcrs to the userspace.
+
4.45 KVM_SET_XCRS
Capability: KVM_CAP_XCRS
@@ -1045,6 +1097,7 @@ struct kvm_xcrs {
This ioctl would set vcpu's xcr to the value userspace specified.
+
4.46 KVM_GET_SUPPORTED_CPUID
Capability: KVM_CAP_EXT_CPUID
@@ -1119,6 +1172,7 @@ support. Instead it is reported via
if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the
feature in userspace, then you can enable the feature for KVM_SET_CPUID2.
+
4.47 KVM_PPC_GET_PVINFO
Capability: KVM_CAP_PPC_GET_PVINFO
@@ -1142,6 +1196,7 @@ of 4 instructions that make up a hypercall.
If any additional field gets added to this structure later on, a bit for that
additional piece of information will be set in the flags bitmap.
+
4.48 KVM_ASSIGN_PCI_DEVICE
Capability: KVM_CAP_DEVICE_ASSIGNMENT
@@ -1185,6 +1240,7 @@ Only PCI header type 0 devices with PCI BAR resources are supported by
device assignment. The user requesting this ioctl must have read/write
access to the PCI sysfs resource files associated with the device.
+
4.49 KVM_DEASSIGN_PCI_DEVICE
Capability: KVM_CAP_DEVICE_DEASSIGNMENT
@@ -1198,6 +1254,7 @@ Ends PCI device assignment, releasing all associated resources.
See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is
used in kvm_assigned_pci_dev to identify the device.
+
4.50 KVM_ASSIGN_DEV_IRQ
Capability: KVM_CAP_ASSIGN_DEV_IRQ
@@ -1231,6 +1288,7 @@ The following flags are defined:
It is not valid to specify multiple types per host or guest IRQ. However, the
IRQ type of host and guest can differ or can even be null.
+
4.51 KVM_DEASSIGN_DEV_IRQ
Capability: KVM_CAP_ASSIGN_DEV_IRQ
@@ -1245,6 +1303,7 @@ See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
by assigned_dev_id, flags must correspond to the IRQ type specified on
KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
+
4.52 KVM_SET_GSI_ROUTING
Capability: KVM_CAP_IRQ_ROUTING
@@ -1293,6 +1352,7 @@ struct kvm_irq_routing_msi {
__u32 pad;
};
+
4.53 KVM_ASSIGN_SET_MSIX_NR
Capability: KVM_CAP_DEVICE_MSIX
@@ -1314,6 +1374,7 @@ struct kvm_assigned_msix_nr {
#define KVM_MAX_MSIX_PER_DEV 256
+
4.54 KVM_ASSIGN_SET_MSIX_ENTRY
Capability: KVM_CAP_DEVICE_MSIX
@@ -1332,7 +1393,8 @@ struct kvm_assigned_msix_entry {
__u16 padding[3];
};
-4.54 KVM_SET_TSC_KHZ
+
+4.55 KVM_SET_TSC_KHZ
Capability: KVM_CAP_TSC_CONTROL
Architectures: x86
@@ -1343,7 +1405,8 @@ Returns: 0 on success, -1 on error
Specifies the tsc frequency for the virtual machine. The unit of the
frequency is KHz.
-4.55 KVM_GET_TSC_KHZ
+
+4.56 KVM_GET_TSC_KHZ
Capability: KVM_CAP_GET_TSC_KHZ
Architectures: x86
@@ -1355,7 +1418,8 @@ Returns the tsc frequency of the guest. The unit of the return value is
KHz. If the host has unstable tsc this ioctl returns -EIO instead as an
error.
-4.56 KVM_GET_LAPIC
+
+4.57 KVM_GET_LAPIC
Capability: KVM_CAP_IRQCHIP
Architectures: x86
@@ -1371,7 +1435,8 @@ struct kvm_lapic_state {
Reads the Local APIC registers and copies them into the input argument. The
data format and layout are the same as documented in the architecture manual.
-4.57 KVM_SET_LAPIC
+
+4.58 KVM_SET_LAPIC
Capability: KVM_CAP_IRQCHIP
Architectures: x86
@@ -1387,7 +1452,8 @@ struct kvm_lapic_state {
Copies the input argument into the the Local APIC registers. The data format
and layout are the same as documented in the architecture manual.
-4.58 KVM_IOEVENTFD
+
+4.59 KVM_IOEVENTFD
Capability: KVM_CAP_IOEVENTFD
Architectures: all
@@ -1417,7 +1483,8 @@ The following flags are defined:
If datamatch flag is set, the event will be signaled only if the written value
to the registered address is equal to datamatch in struct kvm_ioeventfd.
-4.59 KVM_DIRTY_TLB
+
+4.60 KVM_DIRTY_TLB
Capability: KVM_CAP_SW_TLB
Architectures: ppc
@@ -1449,7 +1516,8 @@ The "num_dirty" field is a performance hint for KVM to determine whether it
should skip processing the bitmap and just invalidate everything. It must
be set to the number of set bits in the bitmap.
-4.60 KVM_ASSIGN_SET_INTX_MASK
+
+4.61 KVM_ASSIGN_SET_INTX_MASK
Capability: KVM_CAP_PCI_2_3
Architectures: x86
@@ -1482,6 +1550,7 @@ See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
by assigned_dev_id. In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
evaluated.
+
4.62 KVM_CREATE_SPAPR_TCE
Capability: KVM_CAP_SPAPR_TCE
@@ -1517,6 +1586,7 @@ the entries written by kernel-handled H_PUT_TCE calls, and also lets
userspace update the TCE table directly which is useful in some
circumstances.
+
4.63 KVM_ALLOCATE_RMA
Capability: KVM_CAP_PPC_RMA
@@ -1549,6 +1619,7 @@ is supported; 2 if the processor requires all virtual machines to have
an RMA, or 1 if the processor can use an RMA but doesn't require it,
because it supports the Virtual RMA (VRMA) facility.
+
4.64 KVM_NMI
Capability: KVM_CAP_USER_NMI
@@ -1574,6 +1645,7 @@ following algorithm:
Some guests configure the LINT1 NMI input to cause a panic, aiding in
debugging.
+
4.65 KVM_S390_UCAS_MAP
Capability: KVM_CAP_S390_UCONTROL
@@ -1593,6 +1665,7 @@ This ioctl maps the memory at "user_addr" with the length "length" to
the vcpu's address space starting at "vcpu_addr". All parameters need to
be alligned by 1 megabyte.
+
4.66 KVM_S390_UCAS_UNMAP
Capability: KVM_CAP_S390_UCONTROL
@@ -1612,6 +1685,7 @@ This ioctl unmaps the memory in the vcpu's address space starting at
"vcpu_addr" with the length "length". The field "user_addr" is ignored.
All parameters need to be alligned by 1 megabyte.
+
4.67 KVM_S390_VCPU_FAULT
Capability: KVM_CAP_S390_UCONTROL
@@ -1628,6 +1702,7 @@ table upfront. This is useful to handle validity intercepts for user
controlled virtual machines to fault in the virtual cpu's lowcore pages
prior to calling the KVM_RUN ioctl.
+
4.68 KVM_SET_ONE_REG
Capability: KVM_CAP_ONE_REG
@@ -1653,6 +1728,7 @@ registers, find a list below:
| |
PPC | KVM_REG_PPC_HIOR | 64
+
4.69 KVM_GET_ONE_REG
Capability: KVM_CAP_ONE_REG
@@ -1669,7 +1745,193 @@ at the memory location pointed to by "addr".
The list of registers accessible using this interface is identical to the
list in 4.64.
+
+4.70 KVM_KVMCLOCK_CTRL
+
+Capability: KVM_CAP_KVMCLOCK_CTRL
+Architectures: Any that implement pvclocks (currently x86 only)
+Type: vcpu ioctl
+Parameters: None
+Returns: 0 on success, -1 on error
+
+This signals to the host kernel that the specified guest is being paused by
+userspace. The host will set a flag in the pvclock structure that is checked
+from the soft lockup watchdog. The flag is part of the pvclock structure that
+is shared between guest and host, specifically the second bit of the flags
+field of the pvclock_vcpu_time_info structure. It will be set exclusively by
+the host and read/cleared exclusively by the guest. The guest operation of
+checking and clearing the flag must an atomic operation so
+load-link/store-conditional, or equivalent must be used. There are two cases
+where the guest will clear the flag: when the soft lockup watchdog timer resets
+itself or when a soft lockup is detected. This ioctl can be called any time
+after pausing the vcpu, but before it is resumed.
+
+
+4.71 KVM_SIGNAL_MSI
+
+Capability: KVM_CAP_SIGNAL_MSI
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_msi (in)
+Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
+
+Directly inject a MSI message. Only valid with in-kernel irqchip that handles
+MSI messages.
+
+struct kvm_msi {
+ __u32 address_lo;
+ __u32 address_hi;
+ __u32 data;
+ __u32 flags;
+ __u8 pad[16];
+};
+
+No flags are defined so far. The corresponding field must be 0.
+
+
+4.71 KVM_CREATE_PIT2
+
+Capability: KVM_CAP_PIT2
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_pit_config (in)
+Returns: 0 on success, -1 on error
+
+Creates an in-kernel device model for the i8254 PIT. This call is only valid
+after enabling in-kernel irqchip support via KVM_CREATE_IRQCHIP. The following
+parameters have to be passed:
+
+struct kvm_pit_config {
+ __u32 flags;
+ __u32 pad[15];
+};
+
+Valid flags are:
+
+#define KVM_PIT_SPEAKER_DUMMY 1 /* emulate speaker port stub */
+
+PIT timer interrupts may use a per-VM kernel thread for injection. If it
+exists, this thread will have a name of the following pattern:
+
+kvm-pit/<owner-process-pid>
+
+When running a guest with elevated priorities, the scheduling parameters of
+this thread may have to be adjusted accordingly.
+
+This IOCTL replaces the obsolete KVM_CREATE_PIT.
+
+
+4.72 KVM_GET_PIT2
+
+Capability: KVM_CAP_PIT_STATE2
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_pit_state2 (out)
+Returns: 0 on success, -1 on error
+
+Retrieves the state of the in-kernel PIT model. Only valid after
+KVM_CREATE_PIT2. The state is returned in the following structure:
+
+struct kvm_pit_state2 {
+ struct kvm_pit_channel_state channels[3];
+ __u32 flags;
+ __u32 reserved[9];
+};
+
+Valid flags are:
+
+/* disable PIT in HPET legacy mode */
+#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
+
+This IOCTL replaces the obsolete KVM_GET_PIT.
+
+
+4.73 KVM_SET_PIT2
+
+Capability: KVM_CAP_PIT_STATE2
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_pit_state2 (in)
+Returns: 0 on success, -1 on error
+
+Sets the state of the in-kernel PIT model. Only valid after KVM_CREATE_PIT2.
+See KVM_GET_PIT2 for details on struct kvm_pit_state2.
+
+This IOCTL replaces the obsolete KVM_SET_PIT.
+
+
+4.74 KVM_PPC_GET_SMMU_INFO
+
+Capability: KVM_CAP_PPC_GET_SMMU_INFO
+Architectures: powerpc
+Type: vm ioctl
+Parameters: None
+Returns: 0 on success, -1 on error
+
+This populates and returns a structure describing the features of
+the "Server" class MMU emulation supported by KVM.
+This can in turn be used by userspace to generate the appropariate
+device-tree properties for the guest operating system.
+
+The structure contains some global informations, followed by an
+array of supported segment page sizes:
+
+ struct kvm_ppc_smmu_info {
+ __u64 flags;
+ __u32 slb_size;
+ __u32 pad;
+ struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
+ };
+
+The supported flags are:
+
+ - KVM_PPC_PAGE_SIZES_REAL:
+ When that flag is set, guest page sizes must "fit" the backing
+ store page sizes. When not set, any page size in the list can
+ be used regardless of how they are backed by userspace.
+
+ - KVM_PPC_1T_SEGMENTS
+ The emulated MMU supports 1T segments in addition to the
+ standard 256M ones.
+
+The "slb_size" field indicates how many SLB entries are supported
+
+The "sps" array contains 8 entries indicating the supported base
+page sizes for a segment in increasing order. Each entry is defined
+as follow:
+
+ struct kvm_ppc_one_seg_page_size {
+ __u32 page_shift; /* Base page shift of segment (or 0) */
+ __u32 slb_enc; /* SLB encoding for BookS */
+ struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
+ };
+
+An entry with a "page_shift" of 0 is unused. Because the array is
+organized in increasing order, a lookup can stop when encoutering
+such an entry.
+
+The "slb_enc" field provides the encoding to use in the SLB for the
+page size. The bits are in positions such as the value can directly
+be OR'ed into the "vsid" argument of the slbmte instruction.
+
+The "enc" array is a list which for each of those segment base page
+size provides the list of supported actual page sizes (which can be
+only larger or equal to the base page size), along with the
+corresponding encoding in the hash PTE. Similarily, the array is
+8 entries sorted by increasing sizes and an entry with a "0" shift
+is an empty entry and a terminator:
+
+ struct kvm_ppc_one_page_size {
+ __u32 page_shift; /* Page shift (or 0) */
+ __u32 pte_enc; /* Encoding in the HPTE (>>12) */
+ };
+
+The "pte_enc" field provides a value that can OR'ed into the hash
+PTE's RPN field (ie, it needs to be shifted left by 12 to OR it
+into the hash PTE second double word).
+
5. The kvm_run structure
+------------------------
Application code obtains a pointer to the kvm_run structure by
mmap()ing a vcpu fd. From that point, application code can control
@@ -1910,7 +2172,9 @@ and usually define the validity of a groups of registers. (e.g. one bit
};
+
6. Capabilities that can be enabled
+-----------------------------------
There are certain capabilities that change the behavior of the virtual CPU when
enabled. To enable them, please see section 4.37. Below you can find a list of
@@ -1926,6 +2190,7 @@ The following information is provided along with the description:
Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL)
are not detailed, but errors with specific meanings are.
+
6.1 KVM_CAP_PPC_OSI
Architectures: ppc
@@ -1939,6 +2204,7 @@ between the guest and the host.
When this capability is enabled, KVM_EXIT_OSI can occur.
+
6.2 KVM_CAP_PPC_PAPR
Architectures: ppc
@@ -1957,6 +2223,7 @@ HTAB invisible to the guest.
When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur.
+
6.3 KVM_CAP_SW_TLB
Architectures: ppc
diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt
index 882068538c9c..83afe65d4966 100644
--- a/Documentation/virtual/kvm/cpuid.txt
+++ b/Documentation/virtual/kvm/cpuid.txt
@@ -10,11 +10,15 @@ a guest.
KVM cpuid functions are:
function: KVM_CPUID_SIGNATURE (0x40000000)
-returns : eax = 0,
+returns : eax = 0x40000001,
ebx = 0x4b4d564b,
ecx = 0x564b4d56,
edx = 0x4d.
Note that this value in ebx, ecx and edx corresponds to the string "KVMKVMKVM".
+The value in eax corresponds to the maximum cpuid function present in this leaf,
+and will be updated if more functions are added in the future.
+Note also that old hosts set eax value to 0x0. This should
+be interpreted as if the value was 0x40000001.
This function queries the presence of KVM cpuid leafs.
diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt
index 50317809113d..96b41bd97523 100644
--- a/Documentation/virtual/kvm/msr.txt
+++ b/Documentation/virtual/kvm/msr.txt
@@ -109,6 +109,10 @@ MSR_KVM_SYSTEM_TIME_NEW: 0x4b564d01
0 | 24 | multiple cpus are guaranteed to
| | be monotonic
-------------------------------------------------------------
+ | | guest vcpu has been paused by
+ 1 | N/A | the host
+ | | See 4.70 in api.txt
+ -------------------------------------------------------------
Availability of this MSR must be checked via bit 3 in 0x4000001 cpuid
leaf prior to usage.
diff --git a/Documentation/vm/pagemap.txt b/Documentation/vm/pagemap.txt
index 4600cbe3d6be..7587493c67f1 100644
--- a/Documentation/vm/pagemap.txt
+++ b/Documentation/vm/pagemap.txt
@@ -16,7 +16,7 @@ There are three components to pagemap:
* Bits 0-4 swap type if swapped
* Bits 5-54 swap offset if swapped
* Bits 55-60 page shift (page size = 1<<page shift)
- * Bit 61 reserved for future use
+ * Bit 61 page is file-page or shared-anon
* Bit 62 page swapped
* Bit 63 page present
diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt
index 29bdf62aac09..f734bb2a78dc 100644
--- a/Documentation/vm/transhuge.txt
+++ b/Documentation/vm/transhuge.txt
@@ -166,6 +166,68 @@ behavior. So to make them effective you need to restart any
application that could have been using hugepages. This also applies to
the regions registered in khugepaged.
+== Monitoring usage ==
+
+The number of transparent huge pages currently used by the system is
+available by reading the AnonHugePages field in /proc/meminfo. To
+identify what applications are using transparent huge pages, it is
+necessary to read /proc/PID/smaps and count the AnonHugePages fields
+for each mapping. Note that reading the smaps file is expensive and
+reading it frequently will incur overhead.
+
+There are a number of counters in /proc/vmstat that may be used to
+monitor how successfully the system is providing huge pages for use.
+
+thp_fault_alloc is incremented every time a huge page is successfully
+ allocated to handle a page fault. This applies to both the
+ first time a page is faulted and for COW faults.
+
+thp_collapse_alloc is incremented by khugepaged when it has found
+ a range of pages to collapse into one huge page and has
+ successfully allocated a new huge page to store the data.
+
+thp_fault_fallback is incremented if a page fault fails to allocate
+ a huge page and instead falls back to using small pages.
+
+thp_collapse_alloc_failed is incremented if khugepaged found a range
+ of pages that should be collapsed into one huge page but failed
+ the allocation.
+
+thp_split is incremented every time a huge page is split into base
+ pages. This can happen for a variety of reasons but a common
+ reason is that a huge page is old and is being reclaimed.
+
+As the system ages, allocating huge pages may be expensive as the
+system uses memory compaction to copy data around memory to free a
+huge page for use. There are some counters in /proc/vmstat to help
+monitor this overhead.
+
+compact_stall is incremented every time a process stalls to run
+ memory compaction so that a huge page is free for use.
+
+compact_success is incremented if the system compacted memory and
+ freed a huge page for use.
+
+compact_fail is incremented if the system tries to compact memory
+ but failed.
+
+compact_pages_moved is incremented each time a page is moved. If
+ this value is increasing rapidly, it implies that the system
+ is copying a lot of data to satisfy the huge page allocation.
+ It is possible that the cost of copying exceeds any savings
+ from reduced TLB misses.
+
+compact_pagemigrate_failed is incremented when the underlying mechanism
+ for moving a page failed.
+
+compact_blocks_moved is incremented each time memory compaction examines
+ a huge page aligned range of pages.
+
+It is possible to establish how long the stalls were using the function
+tracer to record how long was spent in __alloc_pages_nodemask and
+using the mm_page_alloc tracepoint to identify which allocations were
+for huge pages.
+
== get_user_pages and follow_page ==
get_user_pages and follow_page if run on a hugepage, will return the
diff --git a/Documentation/watchdog/watchdog-kernel-api.txt b/Documentation/watchdog/watchdog-kernel-api.txt
index 25fe4304f2fc..086638f6c82d 100644
--- a/Documentation/watchdog/watchdog-kernel-api.txt
+++ b/Documentation/watchdog/watchdog-kernel-api.txt
@@ -1,6 +1,6 @@
The Linux WatchDog Timer Driver Core kernel API.
===============================================
-Last reviewed: 16-Mar-2012
+Last reviewed: 22-May-2012
Wim Van Sebroeck <wim@iguana.be>
@@ -39,6 +39,10 @@ watchdog_device structure.
The watchdog device structure looks like this:
struct watchdog_device {
+ int id;
+ struct cdev cdev;
+ struct device *dev;
+ struct device *parent;
const struct watchdog_info *info;
const struct watchdog_ops *ops;
unsigned int bootstatus;
@@ -46,10 +50,20 @@ struct watchdog_device {
unsigned int min_timeout;
unsigned int max_timeout;
void *driver_data;
+ struct mutex lock;
unsigned long status;
};
It contains following fields:
+* id: set by watchdog_register_device, id 0 is special. It has both a
+ /dev/watchdog0 cdev (dynamic major, minor 0) as well as the old
+ /dev/watchdog miscdev. The id is set automatically when calling
+ watchdog_register_device.
+* cdev: cdev for the dynamic /dev/watchdog<id> device nodes. This
+ field is also populated by watchdog_register_device.
+* dev: device under the watchdog class (created by watchdog_register_device).
+* parent: set this to the parent device (or NULL) before calling
+ watchdog_register_device.
* info: a pointer to a watchdog_info structure. This structure gives some
additional information about the watchdog timer itself. (Like it's unique name)
* ops: a pointer to the list of watchdog operations that the watchdog supports.
@@ -61,6 +75,7 @@ It contains following fields:
* driver_data: a pointer to the drivers private data of a watchdog device.
This data should only be accessed via the watchdog_set_drvdata and
watchdog_get_drvdata routines.
+* lock: Mutex for WatchDog Timer Driver Core internal use only.
* status: this field contains a number of status bits that give extra
information about the status of the device (Like: is the watchdog timer
running/active, is the nowayout bit set, is the device opened via
@@ -78,6 +93,8 @@ struct watchdog_ops {
unsigned int (*status)(struct watchdog_device *);
int (*set_timeout)(struct watchdog_device *, unsigned int);
unsigned int (*get_timeleft)(struct watchdog_device *);
+ void (*ref)(struct watchdog_device *);
+ void (*unref)(struct watchdog_device *);
long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long);
};
@@ -85,6 +102,21 @@ It is important that you first define the module owner of the watchdog timer
driver's operations. This module owner will be used to lock the module when
the watchdog is active. (This to avoid a system crash when you unload the
module and /dev/watchdog is still open).
+
+If the watchdog_device struct is dynamically allocated, just locking the module
+is not enough and a driver also needs to define the ref and unref operations to
+ensure the structure holding the watchdog_device does not go away.
+
+The simplest (and usually sufficient) implementation of this is to:
+1) Add a kref struct to the same structure which is holding the watchdog_device
+2) Define a release callback for the kref which frees the struct holding both
+3) Call kref_init on this kref *before* calling watchdog_register_device()
+4) Define a ref operation calling kref_get on this kref
+5) Define a unref operation calling kref_put on this kref
+6) When it is time to cleanup:
+ * Do not kfree() the struct holding both, the last kref_put will do this!
+ * *After* calling watchdog_unregister_device() call kref_put on the kref
+
Some operations are mandatory and some are optional. The mandatory operations
are:
* start: this is a pointer to the routine that starts the watchdog timer
@@ -125,6 +157,10 @@ they are supported. These optional routines/operations are:
(Note: the WDIOF_SETTIMEOUT needs to be set in the options field of the
watchdog's info structure).
* get_timeleft: this routines returns the time that's left before a reset.
+* ref: the operation that calls kref_get on the kref of a dynamically
+ allocated watchdog_device struct.
+* unref: the operation that calls kref_put on the kref of a dynamically
+ allocated watchdog_device struct.
* ioctl: if this routine is present then it will be called first before we do
our own internal ioctl call handling. This routine should return -ENOIOCTLCMD
if a command is not supported. The parameters that are passed to the ioctl
@@ -144,6 +180,11 @@ bit-operations. The status bits that are defined are:
(This bit should only be used by the WatchDog Timer Driver Core).
* WDOG_NO_WAY_OUT: this bit stores the nowayout setting for the watchdog.
If this bit is set then the watchdog timer will not be able to stop.
+* WDOG_UNREGISTERED: this bit gets set by the WatchDog Timer Driver Core
+ after calling watchdog_unregister_device, and then checked before calling
+ any watchdog_ops, so that you can be sure that no operations (other then
+ unref) will get called after unregister, even if userspace still holds a
+ reference to /dev/watchdog
To set the WDOG_NO_WAY_OUT status bit (before registering your watchdog
timer device) you can either:
diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt
index 17ddd822b456..04fddbacdbde 100644
--- a/Documentation/watchdog/watchdog-parameters.txt
+++ b/Documentation/watchdog/watchdog-parameters.txt
@@ -78,6 +78,11 @@ wd0_timeout: Default watchdog0 timeout in 1/10secs
wd1_timeout: Default watchdog1 timeout in 1/10secs
wd2_timeout: Default watchdog2 timeout in 1/10secs
-------------------------------------------------
+da9052wdt:
+timeout: Watchdog timeout in seconds. 2<= timeout <=131, default=2.048s
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
davinci_wdt:
heartbeat: Watchdog heartbeat period in seconds from 1 to 600, default 60
-------------------------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 27a1d3c6eec8..55f0fda602ec 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1905,6 +1905,16 @@ F: Documentation/filesystems/coda.txt
F: fs/coda/
F: include/linux/coda*.h
+COMMON CLK FRAMEWORK
+M: Mike Turquette <mturquette@ti.com>
+M: Mike Turquette <mturquette@linaro.org>
+L: linux-arm-kernel@lists.infradead.org (same as CLK API & CLKDEV)
+T: git git://git.linaro.org/people/mturquette/linux.git
+S: Maintained
+F: drivers/clk/clk.c
+F: drivers/clk/clk-*
+F: include/linux/clk-pr*
+
COMMON INTERNET FILE SYSTEM (CIFS)
M: Steve French <sfrench@samba.org>
L: linux-cifs@vger.kernel.org
@@ -2398,10 +2408,10 @@ F: drivers/gpu/drm/
F: include/drm/
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
-M: Keith Packard <keithp@keithp.com>
+M: Daniel Vetter <daniel.vetter@ffwll.ch>
L: intel-gfx@lists.freedesktop.org (subscribers-only)
L: dri-devel@lists.freedesktop.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/keithp/linux.git
+T: git git://people.freedesktop.org/~danvet/drm-intel
S: Supported
F: drivers/gpu/drm/i915
F: include/drm/i915*
@@ -2718,6 +2728,13 @@ S: Maintained
F: Documentation/hwmon/f71805f
F: drivers/hwmon/f71805f.c
+FC0011 TUNER DRIVER
+M: Michael Buesch <m@bues.ch>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/common/tuners/fc0011.h
+F: drivers/media/common/tuners/fc0011.c
+
FANOTIFY
M: Eric Paris <eparis@redhat.com>
S: Maintained
@@ -2801,6 +2818,12 @@ F: Documentation/firmware_class/
F: drivers/base/firmware*.c
F: include/linux/firmware.h
+FLOPPY DRIVER
+M: Jiri Kosina <jkosina@suse.cz>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
+S: Odd fixes
+F: drivers/block/floppy.c
+
FPU EMULATOR
M: Bill Metzenthen <billm@melbpc.org.au>
W: http://floatingpoint.sourceforge.net/emulator/index.html
@@ -2971,9 +2994,9 @@ GENERIC GPIO I2C MULTIPLEXER DRIVER
M: Peter Korsgaard <peter.korsgaard@barco.com>
L: linux-i2c@vger.kernel.org
S: Supported
-F: drivers/i2c/muxes/gpio-i2cmux.c
-F: include/linux/gpio-i2cmux.h
-F: Documentation/i2c/muxes/gpio-i2cmux
+F: drivers/i2c/muxes/i2c-mux-gpio.c
+F: include/linux/i2c-mux-gpio.h
+F: Documentation/i2c/muxes/i2c-mux-gpio
GENERIC HDLC (WAN) DRIVERS
M: Krzysztof Halasa <khc@pm.waw.pl>
@@ -3215,10 +3238,8 @@ F: include/linux/clockchips.h
F: include/linux/hrtimer.h
HIGH-SPEED SCC DRIVER FOR AX.25
-M: Klaus Kudielka <klaus.kudielka@ieee.org>
L: linux-hams@vger.kernel.org
-W: http://www.nt.tuwien.ac.at/~kkudielk/Linux/
-S: Maintained
+S: Orphan
F: drivers/net/hamradio/dmascc.c
F: drivers/net/hamradio/scc.c
@@ -3365,6 +3386,12 @@ W: http://www.developer.ibm.com/welcome/netfinity/serveraid.html
S: Supported
F: drivers/scsi/ips.*
+ICH LPC AND GPIO DRIVER
+M: Peter Tyser <ptyser@xes-inc.com>
+S: Maintained
+F: drivers/mfd/lpc_ich.c
+F: drivers/gpio/gpio-ich.c
+
IDE SUBSYSTEM
M: "David S. Miller" <davem@davemloft.net>
L: linux-ide@vger.kernel.org
@@ -3458,6 +3485,8 @@ Q: http://patchwork.kernel.org/project/linux-input/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input.git
S: Maintained
F: drivers/input/
+F: include/linux/input.h
+F: include/linux/input/
INPUT MULTITOUCH (MT) PROTOCOL
M: Henrik Rydberg <rydberg@euromail.se>
@@ -4486,12 +4515,6 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/mmc/host/imxmmc.*
-MOUSE AND MISC DEVICES [GENERAL]
-M: Alessandro Rubini <rubini@ipvvis.unipv.it>
-S: Maintained
-F: drivers/input/mouse/
-F: include/linux/gpio_mouse.h
-
MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
M: Jiri Slaby <jirislaby@gmail.com>
S: Maintained
@@ -5129,7 +5152,7 @@ PCA9541 I2C BUS MASTER SELECTOR DRIVER
M: Guenter Roeck <guenter.roeck@ericsson.com>
L: linux-i2c@vger.kernel.org
S: Maintained
-F: drivers/i2c/muxes/pca9541.c
+F: drivers/i2c/muxes/i2c-mux-pca9541.c
PCA9564/PCA9665 I2C BUS DRIVER
M: Wolfram Sang <w.sang@pengutronix.de>
@@ -5314,7 +5337,7 @@ M: David Woodhouse <dwmw2@infradead.org>
T: git git://git.infradead.org/battery-2.6.git
S: Maintained
F: include/linux/power_supply.h
-F: drivers/power/power_supply*
+F: drivers/power/
PNP SUPPORT
M: Adam Belay <abelay@mit.edu>
@@ -6331,14 +6354,25 @@ F: include/linux/compiler.h
SPEAR PLATFORM SUPPORT
M: Viresh Kumar <viresh.kumar@st.com>
+M: Shiraz Hashim <shiraz.hashim@st.com>
L: spear-devel@list.st.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.st.com/spear
S: Maintained
F: arch/arm/plat-spear/
+SPEAR13XX MACHINE SUPPORT
+M: Viresh Kumar <viresh.kumar@st.com>
+M: Shiraz Hashim <shiraz.hashim@st.com>
+L: spear-devel@list.st.com
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+W: http://www.st.com/spear
+S: Maintained
+F: arch/arm/mach-spear13xx/
+
SPEAR3XX MACHINE SUPPORT
M: Viresh Kumar <viresh.kumar@st.com>
+M: Shiraz Hashim <shiraz.hashim@st.com>
L: spear-devel@list.st.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.st.com/spear
@@ -6347,6 +6381,8 @@ F: arch/arm/mach-spear3xx/
SPEAR6XX MACHINE SUPPORT
M: Rajeev Kumar <rajeev-dlh.kumar@st.com>
+M: Shiraz Hashim <shiraz.hashim@st.com>
+M: Viresh Kumar <viresh.kumar@st.com>
L: spear-devel@list.st.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.st.com/spear
@@ -6359,9 +6395,7 @@ L: spear-devel@list.st.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.st.com/spear
S: Maintained
-F: arch/arm/mach-spear*/clock.c
-F: arch/arm/plat-spear/clock.c
-F: arch/arm/plat-spear/include/plat/clock.h
+F: drivers/clk/spear/
SPI SUBSYSTEM
M: Grant Likely <grant.likely@secretlab.ca>
@@ -6623,7 +6657,7 @@ F: include/linux/taskstats*
F: kernel/taskstats.c
TC CLASSIFIER
-M: Jamal Hadi Salim <hadi@cyberus.ca>
+M: Jamal Hadi Salim <jhs@mojatatu.com>
L: netdev@vger.kernel.org
S: Maintained
F: include/linux/pkt_cls.h
@@ -7191,7 +7225,7 @@ F: include/linux/usb/usbnet.h
USB VIDEO CLASS
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
-L: linux-uvc-devel@lists.berlios.de (subscribers-only)
+L: linux-uvc-devel@lists.sourceforge.net (subscribers-only)
L: linux-media@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
W: http://www.ideasonboard.org/uvc/
@@ -7614,7 +7648,7 @@ XFS FILESYSTEM
P: Silicon Graphics Inc
M: Ben Myers <bpm@sgi.com>
M: Alex Elder <elder@kernel.org>
-M: xfs-masters@oss.sgi.com
+M: xfs@oss.sgi.com
L: xfs@oss.sgi.com
W: http://oss.sgi.com/projects/xfs
T: git git://oss.sgi.com/xfs/xfs.git
diff --git a/Makefile b/Makefile
index b62c1e09444a..dda21c3efc7b 100644
--- a/Makefile
+++ b/Makefile
@@ -231,10 +231,6 @@ endif
# Where to locate arch specific headers
hdr-arch := $(SRCARCH)
-ifeq ($(ARCH),m68knommu)
- hdr-arch := m68k
-endif
-
KCONFIG_CONFIG ?= .config
export KCONFIG_CONFIG
@@ -341,7 +337,6 @@ AWK = awk
GENKSYMS = scripts/genksyms/genksyms
INSTALLKERNEL := installkernel
DEPMOD = /sbin/depmod
-KALLSYMS = scripts/kallsyms
PERL = perl
CHECK = sparse
@@ -739,197 +734,21 @@ libs-y1 := $(patsubst %/, %/lib.a, $(libs-y))
libs-y2 := $(patsubst %/, %/built-in.o, $(libs-y))
libs-y := $(libs-y1) $(libs-y2)
-# Build vmlinux
-# ---------------------------------------------------------------------------
-# vmlinux is built from the objects selected by $(vmlinux-init) and
-# $(vmlinux-main). Most are built-in.o files from top-level directories
-# in the kernel tree, others are specified in arch/$(ARCH)/Makefile.
-# Ordering when linking is important, and $(vmlinux-init) must be first.
-#
-# vmlinux
-# ^
-# |
-# +-< $(vmlinux-init)
-# | +--< init/version.o + more
-# |
-# +--< $(vmlinux-main)
-# | +--< driver/built-in.o mm/built-in.o + more
-# |
-# +-< kallsyms.o (see description in CONFIG_KALLSYMS section)
-#
-# vmlinux version (uname -v) cannot be updated during normal
-# descending-into-subdirs phase since we do not yet know if we need to
-# update vmlinux.
-# Therefore this step is delayed until just before final link of vmlinux -
-# except in the kallsyms case where it is done just before adding the
-# symbols to the kernel.
-#
-# System.map is generated to document addresses of all kernel symbols
-
-vmlinux-init := $(head-y) $(init-y)
-vmlinux-main := $(core-y) $(libs-y) $(drivers-y) $(net-y)
-vmlinux-all := $(vmlinux-init) $(vmlinux-main)
-vmlinux-lds := arch/$(SRCARCH)/kernel/vmlinux.lds
-export KBUILD_VMLINUX_OBJS := $(vmlinux-all)
-
-# Rule to link vmlinux - also used during CONFIG_KALLSYMS
-# May be overridden by arch/$(ARCH)/Makefile
-quiet_cmd_vmlinux__ ?= LD $@
- cmd_vmlinux__ ?= $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) -o $@ \
- -T $(vmlinux-lds) $(vmlinux-init) \
- --start-group $(vmlinux-main) --end-group \
- $(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o FORCE ,$^)
-
-# Generate new vmlinux version
-quiet_cmd_vmlinux_version = GEN .version
- cmd_vmlinux_version = set -e; \
- if [ ! -r .version ]; then \
- rm -f .version; \
- echo 1 >.version; \
- else \
- mv .version .old_version; \
- expr 0$$(cat .old_version) + 1 >.version; \
- fi; \
- $(MAKE) $(build)=init
-
-# Generate System.map
-quiet_cmd_sysmap = SYSMAP
- cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap
-
-# Sort exception table at build time
-quiet_cmd_sortextable = SORTEX
- cmd_sortextable = $(objtree)/scripts/sortextable
-
-# Link of vmlinux
-# If CONFIG_KALLSYMS is set .version is already updated
-# Generate System.map and verify that the content is consistent
-# Use + in front of the vmlinux_version rule to silent warning with make -j2
-# First command is ':' to allow us to use + in front of the rule
-define rule_vmlinux__
- :
- $(if $(CONFIG_KALLSYMS),,+$(call cmd,vmlinux_version))
-
- $(call cmd,vmlinux__)
- $(Q)echo 'cmd_$@ := $(cmd_vmlinux__)' > $(@D)/.$(@F).cmd
-
- $(if $(CONFIG_BUILDTIME_EXTABLE_SORT), \
- $(Q)$(if $($(quiet)cmd_sortextable), \
- echo ' $($(quiet)cmd_sortextable) vmlinux' &&) \
- $(cmd_sortextable) vmlinux)
-
-
- $(Q)$(if $($(quiet)cmd_sysmap), \
- echo ' $($(quiet)cmd_sysmap) System.map' &&) \
- $(cmd_sysmap) $@ System.map; \
- if [ $$? -ne 0 ]; then \
- rm -f $@; \
- /bin/false; \
- fi;
- $(verify_kallsyms)
-endef
-
-
-ifdef CONFIG_KALLSYMS
-# Generate section listing all symbols and add it into vmlinux $(kallsyms.o)
-# It's a three stage process:
-# o .tmp_vmlinux1 has all symbols and sections, but __kallsyms is
-# empty
-# Running kallsyms on that gives us .tmp_kallsyms1.o with
-# the right size - vmlinux version (uname -v) is updated during this step
-# o .tmp_vmlinux2 now has a __kallsyms section of the right size,
-# but due to the added section, some addresses have shifted.
-# From here, we generate a correct .tmp_kallsyms2.o
-# o The correct .tmp_kallsyms2.o is linked into the final vmlinux.
-# o Verify that the System.map from vmlinux matches the map from
-# .tmp_vmlinux2, just in case we did not generate kallsyms correctly.
-# o If 'make KALLSYMS_EXTRA_PASS=1" was used, do an extra pass using
-# .tmp_vmlinux3 and .tmp_kallsyms3.o. This is only meant as a
-# temporary bypass to allow the kernel to be built while the
-# maintainers work out what went wrong with kallsyms.
-
-last_kallsyms := 2
-
-ifdef KALLSYMS_EXTRA_PASS
-ifneq ($(KALLSYMS_EXTRA_PASS),0)
-last_kallsyms := 3
-endif
-endif
-
-kallsyms.o := .tmp_kallsyms$(last_kallsyms).o
-
-define verify_kallsyms
- $(Q)$(if $($(quiet)cmd_sysmap), \
- echo ' $($(quiet)cmd_sysmap) .tmp_System.map' &&) \
- $(cmd_sysmap) .tmp_vmlinux$(last_kallsyms) .tmp_System.map
- $(Q)cmp -s System.map .tmp_System.map || \
- (echo Inconsistent kallsyms data; \
- echo This is a bug - please report about it; \
- echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround; \
- rm .tmp_kallsyms* ; /bin/false )
-endef
-
-# Update vmlinux version before link
-# Use + in front of this rule to silent warning about make -j1
-# First command is ':' to allow us to use + in front of this rule
-cmd_ksym_ld = $(cmd_vmlinux__)
-define rule_ksym_ld
- :
- +$(call cmd,vmlinux_version)
- $(call cmd,vmlinux__)
- $(Q)echo 'cmd_$@ := $(cmd_vmlinux__)' > $(@D)/.$(@F).cmd
-endef
-
-# Generate .S file with all kernel symbols
-quiet_cmd_kallsyms = KSYM $@
- cmd_kallsyms = $(NM) -n $< | $(KALLSYMS) \
- $(if $(CONFIG_KALLSYMS_ALL),--all-symbols) > $@
+# Externally visible symbols (used by link-vmlinux.sh)
+export KBUILD_VMLINUX_INIT := $(head-y) $(init-y)
+export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y) $(drivers-y) $(net-y)
+export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
+export LDFLAGS_vmlinux
-.tmp_kallsyms1.o .tmp_kallsyms2.o .tmp_kallsyms3.o: %.o: %.S scripts FORCE
- $(call if_changed_dep,as_o_S)
+vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN)
-.tmp_kallsyms%.S: .tmp_vmlinux% $(KALLSYMS)
- $(call cmd,kallsyms)
+# Final link of vmlinux
+ cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux)
+quiet_cmd_link-vmlinux = LINK $@
-# .tmp_vmlinux1 must be complete except kallsyms, so update vmlinux version
-.tmp_vmlinux1: $(vmlinux-lds) $(vmlinux-all) FORCE
- $(call if_changed_rule,ksym_ld)
-
-.tmp_vmlinux2: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms1.o FORCE
- $(call if_changed,vmlinux__)
-
-.tmp_vmlinux3: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms2.o FORCE
- $(call if_changed,vmlinux__)
-
-# Needs to visit scripts/ before $(KALLSYMS) can be used.
-$(KALLSYMS): scripts ;
-
-# Generate some data for debugging strange kallsyms problems
-debug_kallsyms: .tmp_map$(last_kallsyms)
-
-.tmp_map%: .tmp_vmlinux% FORCE
- ($(OBJDUMP) -h $< | $(AWK) '/^ +[0-9]/{print $$4 " 0 " $$2}'; $(NM) $<) | sort > $@
-
-.tmp_map3: .tmp_map2
-
-.tmp_map2: .tmp_map1
-
-endif # ifdef CONFIG_KALLSYMS
-
-# Do modpost on a prelinked vmlinux. The finally linked vmlinux has
-# relevant sections renamed as per the linker script.
-quiet_cmd_vmlinux-modpost = LD $@
- cmd_vmlinux-modpost = $(LD) $(LDFLAGS) -r -o $@ \
- $(vmlinux-init) --start-group $(vmlinux-main) --end-group \
- $(filter-out $(vmlinux-init) $(vmlinux-main) FORCE ,$^)
-define rule_vmlinux-modpost
- :
- +$(call cmd,vmlinux-modpost)
- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost $@
- $(Q)echo 'cmd_$@ := $(cmd_vmlinux-modpost)' > $(dot-target).cmd
-endef
-
-# vmlinux image - including updated kernel symbols
-vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
+# Include targets which we want to
+# execute if the rest of the kernel build went well.
+vmlinux: scripts/link-vmlinux.sh $(vmlinux-deps) FORCE
ifdef CONFIG_HEADERS_CHECK
$(Q)$(MAKE) -f $(srctree)/Makefile headers_check
endif
@@ -939,22 +758,11 @@ endif
ifdef CONFIG_BUILD_DOCSRC
$(Q)$(MAKE) $(build)=Documentation
endif
- $(call vmlinux-modpost)
- $(call if_changed_rule,vmlinux__)
- $(Q)rm -f .old_version
-
-# build vmlinux.o first to catch section mismatch errors early
-ifdef CONFIG_KALLSYMS
-.tmp_vmlinux1: vmlinux.o
-endif
-
-modpost-init := $(filter-out init/built-in.o, $(vmlinux-init))
-vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
- $(call if_changed_rule,vmlinux-modpost)
+ +$(call if_changed,link-vmlinux)
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
-$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+$(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
# Handle descending into subdirectories listed in $(vmlinux-dirs)
# Preset locale variables to speed up the build process. Limit locale
@@ -1181,8 +989,6 @@ endif # CONFIG_MODULES
# Directories & files removed with 'make clean'
CLEAN_DIRS += $(MODVERDIR)
-CLEAN_FILES += vmlinux System.map \
- .tmp_kallsyms* .tmp_version .tmp_vmlinux* .tmp_System.map
# Directories & files removed with 'make mrproper'
MRPROPER_DIRS += include/config usr/include include/generated \
@@ -1428,6 +1234,7 @@ scripts: ;
endif # KBUILD_EXTMOD
clean: $(clean-dirs)
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
$(call cmd,rmdirs)
$(call cmd,rmfiles)
@find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
@@ -1568,14 +1375,6 @@ quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
cmd_crmodverdir = $(Q)mkdir -p $(MODVERDIR) \
$(if $(KBUILD_MODULES),; rm -f $(MODVERDIR)/*)
-a_flags = -Wp,-MD,$(depfile) $(KBUILD_AFLAGS) $(AFLAGS_KERNEL) \
- $(KBUILD_AFLAGS_KERNEL) \
- $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(KBUILD_CPPFLAGS) \
- $(modkern_aflags) $(EXTRA_AFLAGS) $(AFLAGS_$(basetarget).o)
-
-quiet_cmd_as_o_S = AS $@
-cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
-
# read all saved command lines
targets := $(wildcard $(sort $(targets)))
diff --git a/arch/Kconfig b/arch/Kconfig
index 1f9461b9cc89..8c3d957fa8e2 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -76,6 +76,23 @@ config OPTPROBES
depends on KPROBES && HAVE_OPTPROBES
depends on !PREEMPT
+config UPROBES
+ bool "Transparent user-space probes (EXPERIMENTAL)"
+ depends on UPROBE_EVENT && PERF_EVENTS
+ default n
+ help
+ Uprobes is the user-space counterpart to kprobes: they
+ enable instrumentation applications (such as 'perf probe')
+ to establish unintrusive probes in user-space binaries and
+ libraries, by executing handler functions when the probes
+ are hit by user-space applications.
+
+ ( These probes come in the form of single-byte breakpoints,
+ managed by the kernel and kept transparent to the probed
+ application. )
+
+ If in doubt, say "N".
+
config HAVE_EFFICIENT_UNALIGNED_ACCESS
bool
help
@@ -142,6 +159,9 @@ config HAVE_ARCH_TRACEHOOK
config HAVE_DMA_ATTRS
bool
+config HAVE_DMA_CONTIGUOUS
+ bool
+
config USE_GENERIC_SMP_HELPERS
bool
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 0893f023efb8..3de74c9f9610 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -16,6 +16,7 @@ config ALPHA
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_CMOS_UPDATE
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
@@ -48,9 +49,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
-config GENERIC_CMOS_UPDATE
- def_bool y
-
config GENERIC_GPIO
bool
diff --git a/arch/alpha/include/asm/gpio.h b/arch/alpha/include/asm/gpio.h
index 7dc6a6343c06..b3799d88ffcf 100644
--- a/arch/alpha/include/asm/gpio.h
+++ b/arch/alpha/include/asm/gpio.h
@@ -1,55 +1,4 @@
-/*
- * Generic GPIO API implementation for Alpha.
- *
- * A stright copy of that for PowerPC which was:
- *
- * Copyright (c) 2007-2008 MontaVista Software, Inc.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef _ASM_ALPHA_GPIO_H
-#define _ASM_ALPHA_GPIO_H
-
-#include <linux/errno.h>
-#include <asm-generic/gpio.h>
-
-#ifdef CONFIG_GPIOLIB
-
-/*
- * We don't (yet) implement inlined/rapid versions for on-chip gpios.
- * Just call gpiolib.
- */
-static inline int gpio_get_value(unsigned int gpio)
-{
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned int gpio, int value)
-{
- __gpio_set_value(gpio, value);
-}
-
-static inline int gpio_cansleep(unsigned int gpio)
-{
- return __gpio_cansleep(gpio);
-}
-
-static inline int gpio_to_irq(unsigned int gpio)
-{
- return __gpio_to_irq(gpio);
-}
-
-static inline int irq_to_gpio(unsigned int irq)
-{
- return -EINVAL;
-}
-
-#endif /* CONFIG_GPIOLIB */
-
-#endif /* _ASM_ALPHA_GPIO_H */
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index 7a3d38d5ed6b..5ebab5895edb 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -489,6 +489,11 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
}
#endif
+#define ioread16be(p) be16_to_cpu(ioread16(p))
+#define ioread32be(p) be32_to_cpu(ioread32(p))
+#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
+#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
+
#define inb_p inb
#define inw_p inw
#define inl_p inl
diff --git a/arch/alpha/include/asm/kvm_para.h b/arch/alpha/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/alpha/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/alpha/include/asm/sysinfo.h b/arch/alpha/include/asm/sysinfo.h
index e77d77cd07b8..0b80e79d75e5 100644
--- a/arch/alpha/include/asm/sysinfo.h
+++ b/arch/alpha/include/asm/sysinfo.h
@@ -15,6 +15,7 @@
#define GSI_GET_HWRPB 101
#define SSI_NVPAIRS 1
+#define SSI_LMF 7
#define SSI_IEEE_FP_CONTROL 14
#define SSI_IEEE_STATE_AT_SIGNAL 15
#define SSI_IEEE_IGNORE_STATE_AT_SIGNAL 16
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 2207fc61665d..d1f23b722df4 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -203,6 +203,12 @@
#define __NR_osf_security 222 /* not implemented */
#define __NR_osf_kloadcall 223 /* not implemented */
+#define __NR_osf_stat 224
+#define __NR_osf_lstat 225
+#define __NR_osf_fstat 226
+#define __NR_osf_statfs64 227
+#define __NR_osf_fstatfs64 228
+
#define __NR_getpgid 233
#define __NR_getsid 234
#define __NR_sigaltstack 235
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 49ee3193477a..98a103621af6 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -191,6 +191,39 @@ SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len,
return ret;
}
+struct osf_stat {
+ int st_dev;
+ int st_pad1;
+ unsigned st_mode;
+ unsigned short st_nlink;
+ short st_nlink_reserved;
+ unsigned st_uid;
+ unsigned st_gid;
+ int st_rdev;
+ int st_ldev;
+ long st_size;
+ int st_pad2;
+ int st_uatime;
+ int st_pad3;
+ int st_umtime;
+ int st_pad4;
+ int st_uctime;
+ int st_pad5;
+ int st_pad6;
+ unsigned st_flags;
+ unsigned st_gen;
+ long st_spare[4];
+ unsigned st_ino;
+ int st_ino_reserved;
+ int st_atime;
+ int st_atime_reserved;
+ int st_mtime;
+ int st_mtime_reserved;
+ int st_ctime;
+ int st_ctime_reserved;
+ long st_blksize;
+ long st_blocks;
+};
/*
* The OSF/1 statfs structure is much larger, but this should
@@ -209,6 +242,60 @@ struct osf_statfs {
__kernel_fsid_t f_fsid;
};
+struct osf_statfs64 {
+ short f_type;
+ short f_flags;
+ int f_pad1;
+ int f_pad2;
+ int f_pad3;
+ int f_pad4;
+ int f_pad5;
+ int f_pad6;
+ int f_pad7;
+ __kernel_fsid_t f_fsid;
+ u_short f_namemax;
+ short f_reserved1;
+ int f_spare[8];
+ char f_pad8[90];
+ char f_pad9[90];
+ long mount_info[10];
+ u_long f_flags2;
+ long f_spare2[14];
+ long f_fsize;
+ long f_bsize;
+ long f_blocks;
+ long f_bfree;
+ long f_bavail;
+ long f_files;
+ long f_ffree;
+};
+
+static int
+linux_to_osf_stat(struct kstat *lstat, struct osf_stat __user *osf_stat)
+{
+ struct osf_stat tmp = { 0 };
+
+ tmp.st_dev = lstat->dev;
+ tmp.st_mode = lstat->mode;
+ tmp.st_nlink = lstat->nlink;
+ tmp.st_uid = lstat->uid;
+ tmp.st_gid = lstat->gid;
+ tmp.st_rdev = lstat->rdev;
+ tmp.st_ldev = lstat->rdev;
+ tmp.st_size = lstat->size;
+ tmp.st_uatime = lstat->atime.tv_nsec / 1000;
+ tmp.st_umtime = lstat->mtime.tv_nsec / 1000;
+ tmp.st_uctime = lstat->ctime.tv_nsec / 1000;
+ tmp.st_ino = lstat->ino;
+ tmp.st_atime = lstat->atime.tv_sec;
+ tmp.st_mtime = lstat->mtime.tv_sec;
+ tmp.st_ctime = lstat->ctime.tv_sec;
+ tmp.st_blksize = lstat->blksize;
+ tmp.st_blocks = lstat->blocks;
+
+ return copy_to_user(osf_stat, &tmp, sizeof(tmp)) ? -EFAULT : 0;
+}
+
static int
linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_stat,
unsigned long bufsiz)
@@ -230,6 +317,26 @@ linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_st
return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0;
}
+static int
+linux_to_osf_statfs64(struct kstatfs *linux_stat, struct osf_statfs64 __user *osf_stat,
+ unsigned long bufsiz)
+{
+ struct osf_statfs64 tmp_stat = { 0 };
+
+ tmp_stat.f_type = linux_stat->f_type;
+ tmp_stat.f_fsize = linux_stat->f_frsize;
+ tmp_stat.f_bsize = linux_stat->f_bsize;
+ tmp_stat.f_blocks = linux_stat->f_blocks;
+ tmp_stat.f_bfree = linux_stat->f_bfree;
+ tmp_stat.f_bavail = linux_stat->f_bavail;
+ tmp_stat.f_files = linux_stat->f_files;
+ tmp_stat.f_ffree = linux_stat->f_ffree;
+ tmp_stat.f_fsid = linux_stat->f_fsid;
+ if (bufsiz > sizeof(tmp_stat))
+ bufsiz = sizeof(tmp_stat);
+ return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0;
+}
+
SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname,
struct osf_statfs __user *, buffer, unsigned long, bufsiz)
{
@@ -240,6 +347,42 @@ SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname,
return error;
}
+SYSCALL_DEFINE2(osf_stat, char __user *, name, struct osf_stat __user *, buf)
+{
+ struct kstat stat;
+ int error;
+
+ error = vfs_stat(name, &stat);
+ if (error)
+ return error;
+
+ return linux_to_osf_stat(&stat, buf);
+}
+
+SYSCALL_DEFINE2(osf_lstat, char __user *, name, struct osf_stat __user *, buf)
+{
+ struct kstat stat;
+ int error;
+
+ error = vfs_lstat(name, &stat);
+ if (error)
+ return error;
+
+ return linux_to_osf_stat(&stat, buf);
+}
+
+SYSCALL_DEFINE2(osf_fstat, int, fd, struct osf_stat __user *, buf)
+{
+ struct kstat stat;
+ int error;
+
+ error = vfs_fstat(fd, &stat);
+ if (error)
+ return error;
+
+ return linux_to_osf_stat(&stat, buf);
+}
+
SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd,
struct osf_statfs __user *, buffer, unsigned long, bufsiz)
{
@@ -250,6 +393,26 @@ SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd,
return error;
}
+SYSCALL_DEFINE3(osf_statfs64, char __user *, pathname,
+ struct osf_statfs64 __user *, buffer, unsigned long, bufsiz)
+{
+ struct kstatfs linux_stat;
+ int error = user_statfs(pathname, &linux_stat);
+ if (!error)
+ error = linux_to_osf_statfs64(&linux_stat, buffer, bufsiz);
+ return error;
+}
+
+SYSCALL_DEFINE3(osf_fstatfs64, unsigned long, fd,
+ struct osf_statfs64 __user *, buffer, unsigned long, bufsiz)
+{
+ struct kstatfs linux_stat;
+ int error = fd_statfs(fd, &linux_stat);
+ if (!error)
+ error = linux_to_osf_statfs64(&linux_stat, buffer, bufsiz);
+ return error;
+}
+
/*
* Uhh.. OSF/1 mount parameters aren't exactly obvious..
*
@@ -771,6 +934,9 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
return 0;
}
+ case SSI_LMF:
+ return 0;
+
default:
break;
}
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index e534e1c5bc11..87835235f114 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -241,11 +241,11 @@ sys_call_table:
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 225 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_osf_stat
+ .quad sys_osf_lstat /* 225 */
+ .quad sys_osf_fstat
+ .quad sys_osf_statfs64
+ .quad sys_osf_fstatfs64
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 230 */
.quad alpha_ni_syscall
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4f4c8115d79b..b649c5904a4f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1,9 +1,13 @@
config ARM
bool
default y
+ select ARCH_HAVE_CUSTOM_GPIO_H
select HAVE_AOUT
select HAVE_DMA_API_DEBUG
select HAVE_IDE if PCI || ISA || PCMCIA
+ select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
+ select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
@@ -40,6 +44,8 @@ config ARM
select GENERIC_PCI_IOMAP
select HAVE_BPF_JIT
select GENERIC_SMP_IDLE_THREAD
+ select KTIME_SCALAR
+ select GENERIC_CLOCKEVENTS_BROADCAST if SMP
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
@@ -51,33 +57,25 @@ config ARM
config ARM_HAS_SG_CHAIN
bool
-config HAVE_PWM
+config NEED_SG_DMA_LENGTH
bool
-config MIGHT_HAVE_PCI
- bool
-
-config SYS_SUPPORTS_APM_EMULATION
+config ARM_DMA_USE_IOMMU
+ select NEED_SG_DMA_LENGTH
+ select ARM_HAS_SG_CHAIN
bool
-config GENERIC_GPIO
- bool
-
-config ARCH_USES_GETTIMEOFFSET
+config HAVE_PWM
bool
- default n
-config GENERIC_CLOCKEVENTS
+config MIGHT_HAVE_PCI
bool
-config GENERIC_CLOCKEVENTS_BROADCAST
+config SYS_SUPPORTS_APM_EMULATION
bool
- depends on GENERIC_CLOCKEVENTS
- default y if SMP
-config KTIME_SCALAR
+config GENERIC_GPIO
bool
- default y
config HAVE_TCM
bool
@@ -458,8 +456,10 @@ config ARCH_MXS
select ARCH_REQUIRE_GPIOLIB
select CLKDEV_LOOKUP
select CLKSRC_MMIO
+ select COMMON_CLK
select HAVE_CLK_PREPARE
select PINCTRL
+ select USE_OF
help
Support for Freescale MXS-based family of processors
@@ -525,7 +525,7 @@ config ARCH_IXP4XX
select ARCH_HAS_DMA_SET_COHERENT_MASK
select CLKSRC_MMIO
select CPU_XSCALE
- select GENERIC_GPIO
+ select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
select MIGHT_HAVE_PCI
select NEED_MACH_IO_H
@@ -949,6 +949,7 @@ config PLAT_SPEAR
select ARM_AMBA
select ARCH_REQUIRE_GPIOLIB
select CLKDEV_LOOKUP
+ select COMMON_CLK
select CLKSRC_MMIO
select GENERIC_CLOCKEVENTS
select HAVE_CLK
@@ -1053,7 +1054,6 @@ source "arch/arm/mach-sa1100/Kconfig"
source "arch/arm/plat-samsung/Kconfig"
source "arch/arm/plat-s3c24xx/Kconfig"
-source "arch/arm/plat-s5p/Kconfig"
source "arch/arm/plat-spear/Kconfig"
@@ -1104,6 +1104,7 @@ config PLAT_ORION
bool
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
+ select COMMON_CLK
config PLAT_PXA
bool
@@ -1438,8 +1439,6 @@ endmenu
menu "Kernel Features"
-source "kernel/time/Kconfig"
-
config HAVE_SMP
bool
help
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 85348a09d655..01a134141216 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -103,6 +103,35 @@ choice
Say Y here if you want the debug print routines to direct
their output to the second serial port on these devices.
+ config DEBUG_DAVINCI_DA8XX_UART1
+ bool "Kernel low-level debugging on DaVinci DA8XX using UART1"
+ depends on ARCH_DAVINCI_DA8XX
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to UART1 serial port on DaVinci DA8XX devices.
+
+ config DEBUG_DAVINCI_DA8XX_UART2
+ bool "Kernel low-level debugging on DaVinci DA8XX using UART2"
+ depends on ARCH_DAVINCI_DA8XX
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to UART2 serial port on DaVinci DA8XX devices.
+
+ config DEBUG_DAVINCI_DMx_UART0
+ bool "Kernel low-level debugging on DaVinci DMx using UART0"
+ depends on ARCH_DAVINCI_DMx
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to UART0 serial port on DaVinci DMx devices.
+
+ config DEBUG_DAVINCI_TNETV107X_UART1
+ bool "Kernel low-level debugging on DaVinci TNETV107x using UART1"
+ depends on ARCH_DAVINCI_TNETV107X
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to UART1 serial port on DaVinci TNETV107X
+ devices.
+
config DEBUG_DC21285_PORT
bool "Kernel low-level debugging messages via footbridge serial port"
depends on FOOTBRIDGE
@@ -180,6 +209,14 @@ choice
Say Y here if you want kernel low-level debugging support
on i.MX50 or i.MX53.
+ config DEBUG_IMX6Q_UART2
+ bool "i.MX6Q Debug UART2"
+ depends on SOC_IMX6Q
+ help
+ Say Y here if you want kernel low-level debugging support
+ on i.MX6Q UART2. This is correct for e.g. the SabreLite
+ board.
+
config DEBUG_IMX6Q_UART4
bool "i.MX6Q Debug UART4"
depends on SOC_IMX6Q
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 157900da8782..0298b00fe241 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -160,9 +160,7 @@ machine-$(CONFIG_ARCH_MXS) := mxs
machine-$(CONFIG_ARCH_NETX) := netx
machine-$(CONFIG_ARCH_NOMADIK) := nomadik
machine-$(CONFIG_ARCH_OMAP1) := omap1
-machine-$(CONFIG_ARCH_OMAP2) := omap2
-machine-$(CONFIG_ARCH_OMAP3) := omap2
-machine-$(CONFIG_ARCH_OMAP4) := omap2
+machine-$(CONFIG_ARCH_OMAP2PLUS) := omap2
machine-$(CONFIG_ARCH_ORION5X) := orion5x
machine-$(CONFIG_ARCH_PICOXCELL) := picoxcell
machine-$(CONFIG_ARCH_PNX4008) := pnx4008
@@ -188,6 +186,8 @@ machine-$(CONFIG_ARCH_VEXPRESS) := vexpress
machine-$(CONFIG_ARCH_VT8500) := vt8500
machine-$(CONFIG_ARCH_W90X900) := w90x900
machine-$(CONFIG_FOOTBRIDGE) := footbridge
+machine-$(CONFIG_MACH_SPEAR1310) := spear13xx
+machine-$(CONFIG_MACH_SPEAR1340) := spear13xx
machine-$(CONFIG_MACH_SPEAR300) := spear3xx
machine-$(CONFIG_MACH_SPEAR310) := spear3xx
machine-$(CONFIG_MACH_SPEAR320) := spear3xx
@@ -205,7 +205,7 @@ plat-$(CONFIG_PLAT_NOMADIK) := nomadik
plat-$(CONFIG_PLAT_ORION) := orion
plat-$(CONFIG_PLAT_PXA) := pxa
plat-$(CONFIG_PLAT_S3C24XX) := s3c24xx samsung
-plat-$(CONFIG_PLAT_S5P) := s5p samsung
+plat-$(CONFIG_PLAT_S5P) := samsung
plat-$(CONFIG_PLAT_SPEAR) := spear
plat-$(CONFIG_PLAT_VERSATILE) := versatile
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 399d17b231d2..49945cc1bc7d 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -23,4 +23,52 @@
chosen {
bootargs = "root=/dev/ram0 rw ramdisk=8192 console=ttySAC1,115200";
};
+
+ i2c@12C60000 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <20000>;
+ gpios = <&gpb3 0 2 3 0>,
+ <&gpb3 1 2 3 0>;
+
+ eeprom@50 {
+ compatible = "samsung,s524ad0xd1";
+ reg = <0x50>;
+ };
+ };
+
+ i2c@12C70000 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <20000>;
+ gpios = <&gpb3 2 2 3 0>,
+ <&gpb3 3 2 3 0>;
+
+ eeprom@51 {
+ compatible = "samsung,s524ad0xd1";
+ reg = <0x51>;
+ };
+ };
+
+ i2c@12C80000 {
+ status = "disabled";
+ };
+
+ i2c@12C90000 {
+ status = "disabled";
+ };
+
+ i2c@12CA0000 {
+ status = "disabled";
+ };
+
+ i2c@12CB0000 {
+ status = "disabled";
+ };
+
+ i2c@12CC0000 {
+ status = "disabled";
+ };
+
+ i2c@12CD0000 {
+ status = "disabled";
+ };
};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index dfc433599436..4272b2949228 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -23,11 +23,27 @@
compatible = "samsung,exynos5250";
interrupt-parent = <&gic>;
- gic:interrupt-controller@10490000 {
+ gic:interrupt-controller@10481000 {
compatible = "arm,cortex-a9-gic";
#interrupt-cells = <3>;
interrupt-controller;
- reg = <0x10490000 0x1000>, <0x10480000 0x100>;
+ reg = <0x10481000 0x1000>, <0x10482000 0x2000>;
+ };
+
+ combiner:interrupt-controller@10440000 {
+ compatible = "samsung,exynos4210-combiner";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ samsung,combiner-nr = <32>;
+ reg = <0x10440000 0x1000>;
+ interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
+ <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
+ <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
+ <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>,
+ <0 16 0>, <0 17 0>, <0 18 0>, <0 19 0>,
+ <0 20 0>, <0 21 0>, <0 22 0>, <0 23 0>,
+ <0 24 0>, <0 25 0>, <0 26 0>, <0 27 0>,
+ <0 28 0>, <0 29 0>, <0 30 0>, <0 31 0>;
};
watchdog {
@@ -42,30 +58,6 @@
interrupts = <0 43 0>, <0 44 0>;
};
- sdhci@12200000 {
- compatible = "samsung,exynos4210-sdhci";
- reg = <0x12200000 0x100>;
- interrupts = <0 75 0>;
- };
-
- sdhci@12210000 {
- compatible = "samsung,exynos4210-sdhci";
- reg = <0x12210000 0x100>;
- interrupts = <0 76 0>;
- };
-
- sdhci@12220000 {
- compatible = "samsung,exynos4210-sdhci";
- reg = <0x12220000 0x100>;
- interrupts = <0 77 0>;
- };
-
- sdhci@12230000 {
- compatible = "samsung,exynos4210-sdhci";
- reg = <0x12230000 0x100>;
- interrupts = <0 78 0>;
- };
-
serial@12C00000 {
compatible = "samsung,exynos4210-uart";
reg = <0x12C00000 0x100>;
@@ -94,48 +86,64 @@
compatible = "samsung,s3c2440-i2c";
reg = <0x12C60000 0x100>;
interrupts = <0 56 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
i2c@12C70000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12C70000 0x100>;
interrupts = <0 57 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
i2c@12C80000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12C80000 0x100>;
interrupts = <0 58 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
i2c@12C90000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12C90000 0x100>;
interrupts = <0 59 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
i2c@12CA0000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12CA0000 0x100>;
interrupts = <0 60 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
i2c@12CB0000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12CB0000 0x100>;
interrupts = <0 61 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
i2c@12CC0000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12CC0000 0x100>;
interrupts = <0 62 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
i2c@12CD0000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12CD0000 0x100>;
interrupts = <0 63 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
amba {
@@ -157,13 +165,13 @@
interrupts = <0 35 0>;
};
- mdma0: pdma@10800000 {
+ mdma0: mdma@10800000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x10800000 0x1000>;
interrupts = <0 33 0>;
};
- mdma1: pdma@11C10000 {
+ mdma1: mdma@11C10000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x11C10000 0x1000>;
interrupts = <0 124 0>;
@@ -242,6 +250,12 @@
#gpio-cells = <4>;
};
+ gpc4: gpio-controller@114002E0 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x114002E0 0x20>;
+ #gpio-cells = <4>;
+ };
+
gpd0: gpio-controller@11400160 {
compatible = "samsung,exynos4-gpio";
reg = <0x11400160 0x20>;
@@ -388,19 +402,19 @@
gpv2: gpio-controller@10D10040 {
compatible = "samsung,exynos4-gpio";
- reg = <0x10D10040 0x20>;
+ reg = <0x10D10060 0x20>;
#gpio-cells = <4>;
};
gpv3: gpio-controller@10D10060 {
compatible = "samsung,exynos4-gpio";
- reg = <0x10D10060 0x20>;
+ reg = <0x10D10080 0x20>;
#gpio-cells = <4>;
};
gpv4: gpio-controller@10D10080 {
compatible = "samsung,exynos4-gpio";
- reg = <0x10D10080 0x20>;
+ reg = <0x10D100C0 0x20>;
#gpio-cells = <4>;
};
diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts
new file mode 100644
index 000000000000..70bffa929b65
--- /dev/null
+++ b/arch/arm/boot/dts/imx23-evk.dts
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx23.dtsi"
+
+/ {
+ model = "Freescale i.MX23 Evaluation Kit";
+ compatible = "fsl,imx23-evk", "fsl,imx23";
+
+ memory {
+ reg = <0x40000000 0x08000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx23-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_8bit_pins_a &mmc0_pins_fixup>;
+ bus-width = <8>;
+ wp-gpios = <&gpio1 30 0>;
+ status = "okay";
+ };
+ };
+
+ apbx@80040000 {
+ duart: serial@80070000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
new file mode 100644
index 000000000000..8c5f9994f3fc
--- /dev/null
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ interrupt-parent = <&icoll>;
+
+ aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
+ gpio2 = &gpio2;
+ };
+
+ cpus {
+ cpu@0 {
+ compatible = "arm,arm926ejs";
+ };
+ };
+
+ apb@80000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80000000 0x80000>;
+ ranges;
+
+ apbh@80000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80000000 0x40000>;
+ ranges;
+
+ icoll: interrupt-controller@80000000 {
+ compatible = "fsl,imx23-icoll", "fsl,mxs-icoll";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x80000000 0x2000>;
+ };
+
+ dma-apbh@80004000 {
+ compatible = "fsl,imx23-dma-apbh";
+ reg = <0x80004000 2000>;
+ };
+
+ ecc@80008000 {
+ reg = <0x80008000 2000>;
+ status = "disabled";
+ };
+
+ bch@8000a000 {
+ reg = <0x8000a000 2000>;
+ status = "disabled";
+ };
+
+ gpmi@8000c000 {
+ reg = <0x8000c000 2000>;
+ status = "disabled";
+ };
+
+ ssp0: ssp@80010000 {
+ reg = <0x80010000 2000>;
+ interrupts = <15 14>;
+ fsl,ssp-dma-channel = <1>;
+ status = "disabled";
+ };
+
+ etm@80014000 {
+ reg = <0x80014000 2000>;
+ status = "disabled";
+ };
+
+ pinctrl@80018000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx23-pinctrl", "simple-bus";
+ reg = <0x80018000 2000>;
+
+ gpio0: gpio@0 {
+ compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
+ interrupts = <16>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio@1 {
+ compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
+ interrupts = <17>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@2 {
+ compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
+ interrupts = <18>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ duart_pins_a: duart@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <0x11a2 0x11b2>;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ mmc0_8bit_pins_a: mmc0-8bit@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <0x2020 0x2030 0x2040
+ 0x2050 0x0082 0x0092 0x00a2
+ 0x00b2 0x2000 0x2010 0x2060>;
+ fsl,drive-strength = <1>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+
+ mmc0_pins_fixup: mmc0-pins-fixup {
+ fsl,pinmux-ids = <0x2010 0x2060>;
+ fsl,pull-up = <0>;
+ };
+ };
+
+ digctl@8001c000 {
+ reg = <0x8001c000 2000>;
+ status = "disabled";
+ };
+
+ emi@80020000 {
+ reg = <0x80020000 2000>;
+ status = "disabled";
+ };
+
+ dma-apbx@80024000 {
+ compatible = "fsl,imx23-dma-apbx";
+ reg = <0x80024000 2000>;
+ };
+
+ dcp@80028000 {
+ reg = <0x80028000 2000>;
+ status = "disabled";
+ };
+
+ pxp@8002a000 {
+ reg = <0x8002a000 2000>;
+ status = "disabled";
+ };
+
+ ocotp@8002c000 {
+ reg = <0x8002c000 2000>;
+ status = "disabled";
+ };
+
+ axi-ahb@8002e000 {
+ reg = <0x8002e000 2000>;
+ status = "disabled";
+ };
+
+ lcdif@80030000 {
+ reg = <0x80030000 2000>;
+ status = "disabled";
+ };
+
+ ssp1: ssp@80034000 {
+ reg = <0x80034000 2000>;
+ interrupts = <2 20>;
+ fsl,ssp-dma-channel = <2>;
+ status = "disabled";
+ };
+
+ tvenc@80038000 {
+ reg = <0x80038000 2000>;
+ status = "disabled";
+ };
+ };
+
+ apbx@80040000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80040000 0x40000>;
+ ranges;
+
+ clkctl@80040000 {
+ reg = <0x80040000 2000>;
+ status = "disabled";
+ };
+
+ saif0: saif@80042000 {
+ reg = <0x80042000 2000>;
+ status = "disabled";
+ };
+
+ power@80044000 {
+ reg = <0x80044000 2000>;
+ status = "disabled";
+ };
+
+ saif1: saif@80046000 {
+ reg = <0x80046000 2000>;
+ status = "disabled";
+ };
+
+ audio-out@80048000 {
+ reg = <0x80048000 2000>;
+ status = "disabled";
+ };
+
+ audio-in@8004c000 {
+ reg = <0x8004c000 2000>;
+ status = "disabled";
+ };
+
+ lradc@80050000 {
+ reg = <0x80050000 2000>;
+ status = "disabled";
+ };
+
+ spdif@80054000 {
+ reg = <0x80054000 2000>;
+ status = "disabled";
+ };
+
+ i2c@80058000 {
+ reg = <0x80058000 2000>;
+ status = "disabled";
+ };
+
+ rtc@8005c000 {
+ reg = <0x8005c000 2000>;
+ status = "disabled";
+ };
+
+ pwm@80064000 {
+ reg = <0x80064000 2000>;
+ status = "disabled";
+ };
+
+ timrot@80068000 {
+ reg = <0x80068000 2000>;
+ status = "disabled";
+ };
+
+ auart0: serial@8006c000 {
+ reg = <0x8006c000 0x2000>;
+ status = "disabled";
+ };
+
+ auart1: serial@8006e000 {
+ reg = <0x8006e000 0x2000>;
+ status = "disabled";
+ };
+
+ duart: serial@80070000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x80070000 0x2000>;
+ interrupts = <0>;
+ status = "disabled";
+ };
+
+ usbphy@8007c000 {
+ reg = <0x8007c000 0x2000>;
+ status = "disabled";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80080000 0x80000>;
+ ranges;
+
+ usbctrl@80080000 {
+ reg = <0x80080000 0x10000>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx27-phytec-phycore.dts b/arch/arm/boot/dts/imx27-phytec-phycore.dts
index a51a08fc2af9..2b0ff60247a4 100644
--- a/arch/arm/boot/dts/imx27-phytec-phycore.dts
+++ b/arch/arm/boot/dts/imx27-phytec-phycore.dts
@@ -27,22 +27,22 @@
status = "okay";
};
- uart@1000a000 {
+ serial@1000a000 {
fsl,uart-has-rtscts;
status = "okay";
};
- uart@1000b000 {
+ serial@1000b000 {
fsl,uart-has-rtscts;
status = "okay";
};
- uart@1000c000 {
+ serial@1000c000 {
fsl,uart-has-rtscts;
status = "okay";
};
- fec@1002b000 {
+ ethernet@1002b000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index bc5e7d5ddd54..2b1a166d41f9 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -59,28 +59,28 @@
status = "disabled";
};
- uart1: uart@1000a000 {
+ uart1: serial@1000a000 {
compatible = "fsl,imx27-uart", "fsl,imx21-uart";
reg = <0x1000a000 0x1000>;
interrupts = <20>;
status = "disabled";
};
- uart2: uart@1000b000 {
+ uart2: serial@1000b000 {
compatible = "fsl,imx27-uart", "fsl,imx21-uart";
reg = <0x1000b000 0x1000>;
interrupts = <19>;
status = "disabled";
};
- uart3: uart@1000c000 {
+ uart3: serial@1000c000 {
compatible = "fsl,imx27-uart", "fsl,imx21-uart";
reg = <0x1000c000 0x1000>;
interrupts = <18>;
status = "disabled";
};
- uart4: uart@1000d000 {
+ uart4: serial@1000d000 {
compatible = "fsl,imx27-uart", "fsl,imx21-uart";
reg = <0x1000d000 0x1000>;
interrupts = <17>;
@@ -183,14 +183,14 @@
status = "disabled";
};
- uart5: uart@1001b000 {
+ uart5: serial@1001b000 {
compatible = "fsl,imx27-uart", "fsl,imx21-uart";
reg = <0x1001b000 0x1000>;
interrupts = <49>;
status = "disabled";
};
- uart6: uart@1001c000 {
+ uart6: serial@1001c000 {
compatible = "fsl,imx27-uart", "fsl,imx21-uart";
reg = <0x1001c000 0x1000>;
interrupts = <48>;
@@ -206,7 +206,7 @@
status = "disabled";
};
- fec: fec@1002b000 {
+ fec: ethernet@1002b000 {
compatible = "fsl,imx27-fec";
reg = <0x1002b000 0x4000>;
interrupts = <50>;
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
new file mode 100644
index 000000000000..ee520a529cb4
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx28.dtsi"
+
+/ {
+ model = "Freescale i.MX28 Evaluation Kit";
+ compatible = "fsl,imx28-evk", "fsl,imx28";
+
+ memory {
+ reg = <0x40000000 0x08000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_8bit_pins_a
+ &mmc0_cd_cfg &mmc0_sck_cfg>;
+ bus-width = <8>;
+ wp-gpios = <&gpio2 12 0>;
+ status = "okay";
+ };
+
+ ssp1: ssp@80012000 {
+ compatible = "fsl,imx28-mmc";
+ bus-width = <8>;
+ wp-gpios = <&gpio0 28 0>;
+ status = "okay";
+ };
+ };
+
+ apbx@80040000 {
+ saif0: saif@80042000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&saif0_pins_a>;
+ status = "okay";
+ };
+
+ saif1: saif@80046000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&saif1_pins_a>;
+ fsl,saif-master = <&saif0>;
+ status = "okay";
+ };
+
+ i2c0: i2c@80058000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ status = "okay";
+
+ sgtl5000: codec@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ VDDA-supply = <&reg_3p3v>;
+ VDDIO-supply = <&reg_3p3v>;
+
+ };
+ };
+
+ duart: serial@80074000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ mac0: ethernet@800f0000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_pins_a>;
+ status = "okay";
+ };
+
+ mac1: ethernet@800f4000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac1_pins_a>;
+ status = "okay";
+ };
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+
+ sound {
+ compatible = "fsl,imx28-evk-sgtl5000",
+ "fsl,mxs-audio-sgtl5000";
+ model = "imx28-evk-sgtl5000";
+ saif-controllers = <&saif0 &saif1>;
+ audio-codec = <&sgtl5000>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
new file mode 100644
index 000000000000..4634cb861a59
--- /dev/null
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -0,0 +1,497 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ interrupt-parent = <&icoll>;
+
+ aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
+ gpio2 = &gpio2;
+ gpio3 = &gpio3;
+ gpio4 = &gpio4;
+ saif0 = &saif0;
+ saif1 = &saif1;
+ };
+
+ cpus {
+ cpu@0 {
+ compatible = "arm,arm926ejs";
+ };
+ };
+
+ apb@80000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80000000 0x80000>;
+ ranges;
+
+ apbh@80000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80000000 0x3c900>;
+ ranges;
+
+ icoll: interrupt-controller@80000000 {
+ compatible = "fsl,imx28-icoll", "fsl,mxs-icoll";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x80000000 0x2000>;
+ };
+
+ hsadc@80002000 {
+ reg = <0x80002000 2000>;
+ interrupts = <13 87>;
+ status = "disabled";
+ };
+
+ dma-apbh@80004000 {
+ compatible = "fsl,imx28-dma-apbh";
+ reg = <0x80004000 2000>;
+ };
+
+ perfmon@80006000 {
+ reg = <0x80006000 800>;
+ interrupts = <27>;
+ status = "disabled";
+ };
+
+ bch@8000a000 {
+ reg = <0x8000a000 2000>;
+ interrupts = <41>;
+ status = "disabled";
+ };
+
+ gpmi@8000c000 {
+ reg = <0x8000c000 2000>;
+ interrupts = <42 88>;
+ status = "disabled";
+ };
+
+ ssp0: ssp@80010000 {
+ reg = <0x80010000 2000>;
+ interrupts = <96 82>;
+ fsl,ssp-dma-channel = <0>;
+ status = "disabled";
+ };
+
+ ssp1: ssp@80012000 {
+ reg = <0x80012000 2000>;
+ interrupts = <97 83>;
+ fsl,ssp-dma-channel = <1>;
+ status = "disabled";
+ };
+
+ ssp2: ssp@80014000 {
+ reg = <0x80014000 2000>;
+ interrupts = <98 84>;
+ fsl,ssp-dma-channel = <2>;
+ status = "disabled";
+ };
+
+ ssp3: ssp@80016000 {
+ reg = <0x80016000 2000>;
+ interrupts = <99 85>;
+ fsl,ssp-dma-channel = <3>;
+ status = "disabled";
+ };
+
+ pinctrl@80018000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx28-pinctrl", "simple-bus";
+ reg = <0x80018000 2000>;
+
+ gpio0: gpio@0 {
+ compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+ interrupts = <127>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio@1 {
+ compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+ interrupts = <126>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@2 {
+ compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+ interrupts = <125>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@3 {
+ compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+ interrupts = <124>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio@4 {
+ compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+ interrupts = <123>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ duart_pins_a: duart@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <0x3102 0x3112>;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ mac0_pins_a: mac0@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <0x4000 0x4010 0x4020
+ 0x4030 0x4040 0x4060 0x4070
+ 0x4080 0x4100>;
+ fsl,drive-strength = <1>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+
+ mac1_pins_a: mac1@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <0x40f1 0x4091 0x40a1
+ 0x40e1 0x40b1 0x40c1>;
+ fsl,drive-strength = <1>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+
+ mmc0_8bit_pins_a: mmc0-8bit@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <0x2000 0x2010 0x2020
+ 0x2030 0x2040 0x2050 0x2060
+ 0x2070 0x2080 0x2090 0x20a0>;
+ fsl,drive-strength = <1>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+
+ mmc0_cd_cfg: mmc0-cd-cfg {
+ fsl,pinmux-ids = <0x2090>;
+ fsl,pull-up = <0>;
+ };
+
+ mmc0_sck_cfg: mmc0-sck-cfg {
+ fsl,pinmux-ids = <0x20a0>;
+ fsl,drive-strength = <2>;
+ fsl,pull-up = <0>;
+ };
+
+ i2c0_pins_a: i2c0@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <0x3180 0x3190>;
+ fsl,drive-strength = <1>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+
+ saif0_pins_a: saif0@0 {
+ reg = <0>;
+ fsl,pinmux-ids =
+ <0x3140 0x3150 0x3160 0x3170>;
+ fsl,drive-strength = <2>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+
+ saif1_pins_a: saif1@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <0x31a0>;
+ fsl,drive-strength = <2>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+ };
+
+ digctl@8001c000 {
+ reg = <0x8001c000 2000>;
+ interrupts = <89>;
+ status = "disabled";
+ };
+
+ etm@80022000 {
+ reg = <0x80022000 2000>;
+ status = "disabled";
+ };
+
+ dma-apbx@80024000 {
+ compatible = "fsl,imx28-dma-apbx";
+ reg = <0x80024000 2000>;
+ };
+
+ dcp@80028000 {
+ reg = <0x80028000 2000>;
+ interrupts = <52 53 54>;
+ status = "disabled";
+ };
+
+ pxp@8002a000 {
+ reg = <0x8002a000 2000>;
+ interrupts = <39>;
+ status = "disabled";
+ };
+
+ ocotp@8002c000 {
+ reg = <0x8002c000 2000>;
+ status = "disabled";
+ };
+
+ axi-ahb@8002e000 {
+ reg = <0x8002e000 2000>;
+ status = "disabled";
+ };
+
+ lcdif@80030000 {
+ reg = <0x80030000 2000>;
+ interrupts = <38 86>;
+ status = "disabled";
+ };
+
+ can0: can@80032000 {
+ reg = <0x80032000 2000>;
+ interrupts = <8>;
+ status = "disabled";
+ };
+
+ can1: can@80034000 {
+ reg = <0x80034000 2000>;
+ interrupts = <9>;
+ status = "disabled";
+ };
+
+ simdbg@8003c000 {
+ reg = <0x8003c000 200>;
+ status = "disabled";
+ };
+
+ simgpmisel@8003c200 {
+ reg = <0x8003c200 100>;
+ status = "disabled";
+ };
+
+ simsspsel@8003c300 {
+ reg = <0x8003c300 100>;
+ status = "disabled";
+ };
+
+ simmemsel@8003c400 {
+ reg = <0x8003c400 100>;
+ status = "disabled";
+ };
+
+ gpiomon@8003c500 {
+ reg = <0x8003c500 100>;
+ status = "disabled";
+ };
+
+ simenet@8003c700 {
+ reg = <0x8003c700 100>;
+ status = "disabled";
+ };
+
+ armjtag@8003c800 {
+ reg = <0x8003c800 100>;
+ status = "disabled";
+ };
+ };
+
+ apbx@80040000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80040000 0x40000>;
+ ranges;
+
+ clkctl@80040000 {
+ reg = <0x80040000 2000>;
+ status = "disabled";
+ };
+
+ saif0: saif@80042000 {
+ compatible = "fsl,imx28-saif";
+ reg = <0x80042000 2000>;
+ interrupts = <59 80>;
+ fsl,saif-dma-channel = <4>;
+ status = "disabled";
+ };
+
+ power@80044000 {
+ reg = <0x80044000 2000>;
+ status = "disabled";
+ };
+
+ saif1: saif@80046000 {
+ compatible = "fsl,imx28-saif";
+ reg = <0x80046000 2000>;
+ interrupts = <58 81>;
+ fsl,saif-dma-channel = <5>;
+ status = "disabled";
+ };
+
+ lradc@80050000 {
+ reg = <0x80050000 2000>;
+ status = "disabled";
+ };
+
+ spdif@80054000 {
+ reg = <0x80054000 2000>;
+ interrupts = <45 66>;
+ status = "disabled";
+ };
+
+ rtc@80056000 {
+ reg = <0x80056000 2000>;
+ interrupts = <28 29>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@80058000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx28-i2c";
+ reg = <0x80058000 2000>;
+ interrupts = <111 68>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@8005a000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx28-i2c";
+ reg = <0x8005a000 2000>;
+ interrupts = <110 69>;
+ status = "disabled";
+ };
+
+ pwm@80064000 {
+ reg = <0x80064000 2000>;
+ status = "disabled";
+ };
+
+ timrot@80068000 {
+ reg = <0x80068000 2000>;
+ status = "disabled";
+ };
+
+ auart0: serial@8006a000 {
+ reg = <0x8006a000 0x2000>;
+ interrupts = <112 70 71>;
+ status = "disabled";
+ };
+
+ auart1: serial@8006c000 {
+ reg = <0x8006c000 0x2000>;
+ interrupts = <113 72 73>;
+ status = "disabled";
+ };
+
+ auart2: serial@8006e000 {
+ reg = <0x8006e000 0x2000>;
+ interrupts = <114 74 75>;
+ status = "disabled";
+ };
+
+ auart3: serial@80070000 {
+ reg = <0x80070000 0x2000>;
+ interrupts = <115 76 77>;
+ status = "disabled";
+ };
+
+ auart4: serial@80072000 {
+ reg = <0x80072000 0x2000>;
+ interrupts = <116 78 79>;
+ status = "disabled";
+ };
+
+ duart: serial@80074000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x80074000 0x1000>;
+ interrupts = <47>;
+ status = "disabled";
+ };
+
+ usbphy0: usbphy@8007c000 {
+ reg = <0x8007c000 0x2000>;
+ status = "disabled";
+ };
+
+ usbphy1: usbphy@8007e000 {
+ reg = <0x8007e000 0x2000>;
+ status = "disabled";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80080000 0x80000>;
+ ranges;
+
+ usbctrl0: usbctrl@80080000 {
+ reg = <0x80080000 0x10000>;
+ status = "disabled";
+ };
+
+ usbctrl1: usbctrl@80090000 {
+ reg = <0x80090000 0x10000>;
+ status = "disabled";
+ };
+
+ dflpt@800c0000 {
+ reg = <0x800c0000 0x10000>;
+ status = "disabled";
+ };
+
+ mac0: ethernet@800f0000 {
+ compatible = "fsl,imx28-fec";
+ reg = <0x800f0000 0x4000>;
+ interrupts = <101>;
+ status = "disabled";
+ };
+
+ mac1: ethernet@800f4000 {
+ compatible = "fsl,imx28-fec";
+ reg = <0x800f4000 0x4000>;
+ interrupts = <102>;
+ status = "disabled";
+ };
+
+ switch@800f8000 {
+ reg = <0x800f8000 0x8000>;
+ status = "disabled";
+ };
+
+ };
+};
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 9949e6060dee..de065b5976e6 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -17,10 +17,6 @@
model = "Freescale i.MX51 Babbage Board";
compatible = "fsl,imx51-babbage", "fsl,imx51";
- chosen {
- bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
- };
-
memory {
reg = <0x90000000 0x20000000>;
};
@@ -40,7 +36,7 @@
status = "okay";
};
- uart3: uart@7000c000 {
+ uart3: serial@7000c000 {
fsl,uart-has-rtscts;
status = "okay";
};
@@ -166,6 +162,11 @@
};
};
};
+
+ ssi2: ssi@70014000 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+ };
};
wdog@73f98000 { /* WDOG1 */
@@ -177,12 +178,12 @@
reg = <0x73fa8000 0x4000>;
};
- uart1: uart@73fbc000 {
+ uart1: serial@73fbc000 {
fsl,uart-has-rtscts;
status = "okay";
};
- uart2: uart@73fc0000 {
+ uart2: serial@73fc0000 {
status = "okay";
};
};
@@ -195,13 +196,20 @@
i2c@83fc4000 { /* I2C2 */
status = "okay";
- codec: sgtl5000@0a {
+ sgtl5000: codec@0a {
compatible = "fsl,sgtl5000";
reg = <0x0a>;
+ clock-frequency = <26000000>;
+ VDDA-supply = <&vdig_reg>;
+ VDDIO-supply = <&vvideo_reg>;
};
};
- fec@83fec000 {
+ audmux@83fd0000 {
+ status = "okay";
+ };
+
+ ethernet@83fec000 {
phy-mode = "mii";
status = "okay";
};
@@ -218,4 +226,18 @@
gpio-key,wakeup;
};
};
+
+ sound {
+ compatible = "fsl,imx51-babbage-sgtl5000",
+ "fsl,imx-audio-sgtl5000";
+ model = "imx51-babbage-sgtl5000";
+ ssi-controller = <&ssi2>;
+ audio-codec = <&sgtl5000>;
+ audio-routing =
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
+ mux-int-port = <2>;
+ mux-ext-port = <3>;
+ };
};
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 6663986fe1c8..bfa65abe8ef2 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -86,7 +86,7 @@
status = "disabled";
};
- uart3: uart@7000c000 {
+ uart3: serial@7000c000 {
compatible = "fsl,imx51-uart", "fsl,imx21-uart";
reg = <0x7000c000 0x4000>;
interrupts = <33>;
@@ -102,6 +102,15 @@
status = "disabled";
};
+ ssi2: ssi@70014000 {
+ compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
+ reg = <0x70014000 0x4000>;
+ interrupts = <30>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <25 24 23 22>; /* TX0 RX0 TX1 RX1 */
+ status = "disabled";
+ };
+
esdhc@70020000 { /* ESDHC3 */
compatible = "fsl,imx51-esdhc";
reg = <0x70020000 0x4000>;
@@ -171,14 +180,14 @@
status = "disabled";
};
- uart1: uart@73fbc000 {
+ uart1: serial@73fbc000 {
compatible = "fsl,imx51-uart", "fsl,imx21-uart";
reg = <0x73fbc000 0x4000>;
interrupts = <31>;
status = "disabled";
};
- uart2: uart@73fc0000 {
+ uart2: serial@73fc0000 {
compatible = "fsl,imx51-uart", "fsl,imx21-uart";
reg = <0x73fc0000 0x4000>;
interrupts = <32>;
@@ -235,7 +244,31 @@
status = "disabled";
};
- fec@83fec000 {
+ ssi1: ssi@83fcc000 {
+ compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
+ reg = <0x83fcc000 0x4000>;
+ interrupts = <29>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <29 28 27 26>; /* TX0 RX0 TX1 RX1 */
+ status = "disabled";
+ };
+
+ audmux@83fd0000 {
+ compatible = "fsl,imx51-audmux", "fsl,imx31-audmux";
+ reg = <0x83fd0000 0x4000>;
+ status = "disabled";
+ };
+
+ ssi3: ssi@83fe8000 {
+ compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
+ reg = <0x83fe8000 0x4000>;
+ interrupts = <96>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <47 46 37 35>; /* TX0 RX0 TX1 RX1 */
+ status = "disabled";
+ };
+
+ ethernet@83fec000 {
compatible = "fsl,imx51-fec", "fsl,imx27-fec";
reg = <0x83fec000 0x4000>;
interrupts = <87>;
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
index 2dccce46ed81..5b8eafcdbeec 100644
--- a/arch/arm/boot/dts/imx53-ard.dts
+++ b/arch/arm/boot/dts/imx53-ard.dts
@@ -17,10 +17,6 @@
model = "Freescale i.MX53 Automotive Reference Design Board";
compatible = "fsl,imx53-ard", "fsl,imx53";
- chosen {
- bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
- };
-
memory {
reg = <0x70000000 0x40000000>;
};
@@ -44,7 +40,7 @@
reg = <0x53fa8000 0x4000>;
};
- uart1: uart@53fbc000 {
+ uart1: serial@53fbc000 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/imx53-evk.dts b/arch/arm/boot/dts/imx53-evk.dts
index 5bac4aa4800b..9c798034675e 100644
--- a/arch/arm/boot/dts/imx53-evk.dts
+++ b/arch/arm/boot/dts/imx53-evk.dts
@@ -17,10 +17,6 @@
model = "Freescale i.MX53 Evaluation Kit";
compatible = "fsl,imx53-evk", "fsl,imx53";
- chosen {
- bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
- };
-
memory {
reg = <0x70000000 0x80000000>;
};
@@ -75,7 +71,7 @@
reg = <0x53fa8000 0x4000>;
};
- uart1: uart@53fbc000 {
+ uart1: serial@53fbc000 {
status = "okay";
};
};
@@ -99,7 +95,7 @@
};
};
- fec@63fec000 {
+ ethernet@63fec000 {
phy-mode = "rmii";
phy-reset-gpios = <&gpio7 6 0>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
index 5c57c8672c36..2d803a9a6949 100644
--- a/arch/arm/boot/dts/imx53-qsb.dts
+++ b/arch/arm/boot/dts/imx53-qsb.dts
@@ -17,10 +17,6 @@
model = "Freescale i.MX53 Quick Start Board";
compatible = "fsl,imx53-qsb", "fsl,imx53";
- chosen {
- bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
- };
-
memory {
reg = <0x70000000 0x40000000>;
};
@@ -33,6 +29,11 @@
status = "okay";
};
+ ssi2: ssi@50014000 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+ };
+
esdhc@50020000 { /* ESDHC3 */
cd-gpios = <&gpio3 11 0>;
wp-gpios = <&gpio3 12 0>;
@@ -49,7 +50,7 @@
reg = <0x53fa8000 0x4000>;
};
- uart1: uart@53fbc000 {
+ uart1: serial@53fbc000 {
status = "okay";
};
};
@@ -62,9 +63,11 @@
i2c@63fc4000 { /* I2C2 */
status = "okay";
- codec: sgtl5000@0a {
+ sgtl5000: codec@0a {
compatible = "fsl,sgtl5000";
reg = <0x0a>;
+ VDDA-supply = <&reg_3p2v>;
+ VDDIO-supply = <&reg_3p2v>;
};
};
@@ -77,12 +80,88 @@
};
pmic: dialog@48 {
- compatible = "dialog,da9053", "dialog,da9052";
+ compatible = "dlg,da9053-aa", "dlg,da9052";
reg = <0x48>;
+
+ regulators {
+ buck0 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <2075000>;
+ };
+
+ buck1 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <2075000>;
+ };
+
+ buck2 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <2500000>;
+ };
+
+ buck3 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <2500000>;
+ };
+
+ ldo4 {
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ ldo5 {
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ ldo6 {
+ regulator-min-microvolt = <1725000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo7 {
+ regulator-min-microvolt = <1725000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo8 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
+ };
+
+ ldo9 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
+ };
+
+ ldo10 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
+ };
+
+ ldo11 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
+ };
+
+ ldo12 {
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <3650000>;
+ };
+
+ ldo13 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
+ };
+ };
};
};
- fec@63fec000 {
+ audmux@63fd0000 {
+ status = "okay";
+ };
+
+ ethernet@63fec000 {
phy-mode = "rmii";
phy-reset-gpios = <&gpio7 6 0>;
status = "okay";
@@ -122,4 +201,30 @@
linux,default-trigger = "heartbeat";
};
};
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_3p2v: 3p2v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P2V";
+ regulator-min-microvolt = <3200000>;
+ regulator-max-microvolt = <3200000>;
+ regulator-always-on;
+ };
+ };
+
+ sound {
+ compatible = "fsl,imx53-qsb-sgtl5000",
+ "fsl,imx-audio-sgtl5000";
+ model = "imx53-qsb-sgtl5000";
+ ssi-controller = <&ssi2>;
+ audio-codec = <&sgtl5000>;
+ audio-routing =
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
+ mux-int-port = <2>;
+ mux-ext-port = <5>;
+ };
};
diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts
index c7ee86c2dfb5..08091029168e 100644
--- a/arch/arm/boot/dts/imx53-smd.dts
+++ b/arch/arm/boot/dts/imx53-smd.dts
@@ -17,10 +17,6 @@
model = "Freescale i.MX53 Smart Mobile Reference Design Board";
compatible = "fsl,imx53-smd", "fsl,imx53";
- chosen {
- bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
- };
-
memory {
reg = <0x70000000 0x40000000>;
};
@@ -35,11 +31,11 @@
};
esdhc@50008000 { /* ESDHC2 */
- fsl,card-wired;
+ non-removable;
status = "okay";
};
- uart3: uart@5000c000 {
+ uart3: serial@5000c000 {
fsl,uart-has-rtscts;
status = "okay";
};
@@ -76,7 +72,7 @@
};
esdhc@50020000 { /* ESDHC3 */
- fsl,card-wired;
+ non-removable;
status = "okay";
};
};
@@ -90,11 +86,11 @@
reg = <0x53fa8000 0x4000>;
};
- uart1: uart@53fbc000 {
+ uart1: serial@53fbc000 {
status = "okay";
};
- uart2: uart@53fc0000 {
+ uart2: serial@53fc0000 {
status = "okay";
};
};
@@ -142,7 +138,7 @@
};
};
- fec@63fec000 {
+ ethernet@63fec000 {
phy-mode = "rmii";
phy-reset-gpios = <&gpio7 6 0>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 5dd91b942c91..e3e869470cd3 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -88,7 +88,7 @@
status = "disabled";
};
- uart3: uart@5000c000 {
+ uart3: serial@5000c000 {
compatible = "fsl,imx53-uart", "fsl,imx21-uart";
reg = <0x5000c000 0x4000>;
interrupts = <33>;
@@ -104,6 +104,15 @@
status = "disabled";
};
+ ssi2: ssi@50014000 {
+ compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
+ reg = <0x50014000 0x4000>;
+ interrupts = <30>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <25 24 23 22>; /* TX0 RX0 TX1 RX1 */
+ status = "disabled";
+ };
+
esdhc@50020000 { /* ESDHC3 */
compatible = "fsl,imx53-esdhc";
reg = <0x50020000 0x4000>;
@@ -173,14 +182,14 @@
status = "disabled";
};
- uart1: uart@53fbc000 {
+ uart1: serial@53fbc000 {
compatible = "fsl,imx53-uart", "fsl,imx21-uart";
reg = <0x53fbc000 0x4000>;
interrupts = <31>;
status = "disabled";
};
- uart2: uart@53fc0000 {
+ uart2: serial@53fc0000 {
compatible = "fsl,imx53-uart", "fsl,imx21-uart";
reg = <0x53fc0000 0x4000>;
interrupts = <32>;
@@ -226,7 +235,7 @@
status = "disabled";
};
- uart4: uart@53ff0000 {
+ uart4: serial@53ff0000 {
compatible = "fsl,imx53-uart", "fsl,imx21-uart";
reg = <0x53ff0000 0x4000>;
interrupts = <13>;
@@ -241,7 +250,7 @@
reg = <0x60000000 0x10000000>;
ranges;
- uart5: uart@63f90000 {
+ uart5: serial@63f90000 {
compatible = "fsl,imx53-uart", "fsl,imx21-uart";
reg = <0x63f90000 0x4000>;
interrupts = <86>;
@@ -290,7 +299,31 @@
status = "disabled";
};
- fec@63fec000 {
+ ssi1: ssi@63fcc000 {
+ compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
+ reg = <0x63fcc000 0x4000>;
+ interrupts = <29>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <29 28 27 26>; /* TX0 RX0 TX1 RX1 */
+ status = "disabled";
+ };
+
+ audmux@63fd0000 {
+ compatible = "fsl,imx53-audmux", "fsl,imx31-audmux";
+ reg = <0x63fd0000 0x4000>;
+ status = "disabled";
+ };
+
+ ssi3: ssi@63fe8000 {
+ compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
+ reg = <0x63fe8000 0x4000>;
+ interrupts = <96>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <47 46 45 44>; /* TX0 RX0 TX1 RX1 */
+ status = "disabled";
+ };
+
+ ethernet@63fec000 {
compatible = "fsl,imx53-fec", "fsl,imx25-fec";
reg = <0x63fec000 0x4000>;
interrupts = <87>;
diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts
index ce1c8238c897..db4c6096c562 100644
--- a/arch/arm/boot/dts/imx6q-arm2.dts
+++ b/arch/arm/boot/dts/imx6q-arm2.dts
@@ -17,19 +17,14 @@
model = "Freescale i.MX6 Quad Armadillo2 Board";
compatible = "fsl,imx6q-arm2", "fsl,imx6q";
- chosen {
- bootargs = "console=ttymxc0,115200 root=/dev/mmcblk3p3 rootwait";
- };
-
memory {
reg = <0x10000000 0x80000000>;
};
soc {
aips-bus@02100000 { /* AIPS2 */
- enet@02188000 {
+ ethernet@02188000 {
phy-mode = "rgmii";
- local-mac-address = [00 04 9F 01 1B 61];
status = "okay";
};
@@ -37,16 +32,20 @@
cd-gpios = <&gpio6 11 0>;
wp-gpios = <&gpio6 14 0>;
vmmc-supply = <&reg_3p3v>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3_1>;
status = "okay";
};
usdhc@0219c000 { /* uSDHC4 */
- fsl,card-wired;
+ non-removable;
vmmc-supply = <&reg_3p3v>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc4_1>;
status = "okay";
};
- uart4: uart@021f0000 {
+ uart4: serial@021f0000 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/imx6q-sabrelite.dts b/arch/arm/boot/dts/imx6q-sabrelite.dts
index 4663a4e5a285..e0ec92973e7e 100644
--- a/arch/arm/boot/dts/imx6q-sabrelite.dts
+++ b/arch/arm/boot/dts/imx6q-sabrelite.dts
@@ -22,8 +22,30 @@
};
soc {
+ aips-bus@02000000 { /* AIPS1 */
+ spba-bus@02000000 {
+ ecspi@02008000 { /* eCSPI1 */
+ fsl,spi-num-chipselects = <1>;
+ cs-gpios = <&gpio3 19 0>;
+ status = "okay";
+
+ flash: m25p80@0 {
+ compatible = "sst,sst25vf016b";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ };
+ };
+
+ ssi1: ssi@02028000 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+ };
+ };
+
+ };
+
aips-bus@02100000 { /* AIPS2 */
- enet@02188000 {
+ ethernet@02188000 {
phy-mode = "rgmii";
phy-reset-gpios = <&gpio3 23 0>;
status = "okay";
@@ -43,13 +65,23 @@
status = "okay";
};
- uart2: uart@021e8000 {
+ audmux@021d8000 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux_1>;
+ };
+
+ uart2: serial@021e8000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_serial2_1>;
};
i2c@021a0000 { /* I2C1 */
status = "okay";
clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1_1>;
codec: sgtl5000@0a {
compatible = "fsl,sgtl5000";
@@ -80,4 +112,18 @@
regulator-always-on;
};
};
+
+ sound {
+ compatible = "fsl,imx6q-sabrelite-sgtl5000",
+ "fsl,imx-audio-sgtl5000";
+ model = "imx6q-sabrelite-sgtl5000";
+ ssi-controller = <&ssi1>;
+ audio-codec = <&codec>;
+ audio-routing =
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
+ mux-int-port = <1>;
+ mux-ext-port = <4>;
+ };
};
diff --git a/arch/arm/boot/dts/imx6q-sabresd.dts b/arch/arm/boot/dts/imx6q-sabresd.dts
new file mode 100644
index 000000000000..07509a181178
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-sabresd.dts
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx6q.dtsi"
+
+/ {
+ model = "Freescale i.MX6Q SABRE Smart Device Board";
+ compatible = "fsl,imx6q-sabresd", "fsl,imx6q";
+
+ memory {
+ reg = <0x10000000 0x40000000>;
+ };
+
+ soc {
+
+ aips-bus@02000000 { /* AIPS1 */
+ spba-bus@02000000 {
+ uart1: serial@02020000 {
+ status = "okay";
+ };
+ };
+ };
+
+ aips-bus@02100000 { /* AIPS2 */
+ ethernet@02188000 {
+ phy-mode = "rgmii";
+ status = "okay";
+ };
+
+ usdhc@02194000 { /* uSDHC2 */
+ cd-gpios = <&gpio2 2 0>;
+ wp-gpios = <&gpio2 3 0>;
+ status = "okay";
+ };
+
+ usdhc@02198000 { /* uSDHC3 */
+ cd-gpios = <&gpio2 0 0>;
+ wp-gpios = <&gpio2 1 0>;
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 4905f51a106f..8c90cbac945f 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -165,7 +165,7 @@
status = "disabled";
};
- uart1: uart@02020000 {
+ uart1: serial@02020000 {
compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x02020000 0x4000>;
interrupts = <0 26 0x04>;
@@ -177,19 +177,31 @@
interrupts = <0 51 0x04>;
};
- ssi@02028000 { /* SSI1 */
+ ssi1: ssi@02028000 {
+ compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
reg = <0x02028000 0x4000>;
interrupts = <0 46 0x04>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <38 37>;
+ status = "disabled";
};
- ssi@0202c000 { /* SSI2 */
+ ssi2: ssi@0202c000 {
+ compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
reg = <0x0202c000 0x4000>;
interrupts = <0 47 0x04>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <42 41>;
+ status = "disabled";
};
- ssi@02030000 { /* SSI3 */
+ ssi3: ssi@02030000 {
+ compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
reg = <0x02030000 0x4000>;
interrupts = <0 48 0x04>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <46 45>;
+ status = "disabled";
};
asrc@02034000 {
@@ -346,6 +358,90 @@
compatible = "fsl,imx6q-anatop";
reg = <0x020c8000 0x1000>;
interrupts = <0 49 0x04 0 54 0x04 0 127 0x04>;
+
+ regulator-1p1@110 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vdd1p1";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1375000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x110>;
+ anatop-vol-bit-shift = <8>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <4>;
+ anatop-min-voltage = <800000>;
+ anatop-max-voltage = <1375000>;
+ };
+
+ regulator-3p0@120 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vdd3p0";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3150000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x120>;
+ anatop-vol-bit-shift = <8>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <0>;
+ anatop-min-voltage = <2625000>;
+ anatop-max-voltage = <3400000>;
+ };
+
+ regulator-2p5@130 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vdd2p5";
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2750000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x130>;
+ anatop-vol-bit-shift = <8>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <0>;
+ anatop-min-voltage = <2000000>;
+ anatop-max-voltage = <2750000>;
+ };
+
+ regulator-vddcore@140 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "cpu";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x140>;
+ anatop-vol-bit-shift = <0>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <1>;
+ anatop-min-voltage = <725000>;
+ anatop-max-voltage = <1450000>;
+ };
+
+ regulator-vddpu@140 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vddpu";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x140>;
+ anatop-vol-bit-shift = <9>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <1>;
+ anatop-min-voltage = <725000>;
+ anatop-max-voltage = <1450000>;
+ };
+
+ regulator-vddsoc@140 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vddsoc";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x140>;
+ anatop-vol-bit-shift = <18>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <1>;
+ anatop-min-voltage = <725000>;
+ anatop-max-voltage = <1450000>;
+ };
};
usbphy@020c9000 { /* USBPHY1 */
@@ -386,7 +482,62 @@
};
iomuxc@020e0000 {
+ compatible = "fsl,imx6q-iomuxc";
reg = <0x020e0000 0x4000>;
+
+ /* shared pinctrl settings */
+ audmux {
+ pinctrl_audmux_1: audmux-1 {
+ fsl,pins = <18 0x80000000 /* MX6Q_PAD_SD2_DAT0__AUDMUX_AUD4_RXD */
+ 1586 0x80000000 /* MX6Q_PAD_SD2_DAT3__AUDMUX_AUD4_TXC */
+ 11 0x80000000 /* MX6Q_PAD_SD2_DAT2__AUDMUX_AUD4_TXD */
+ 3 0x80000000>; /* MX6Q_PAD_SD2_DAT1__AUDMUX_AUD4_TXFS */
+ };
+ };
+
+ i2c1 {
+ pinctrl_i2c1_1: i2c1grp-1 {
+ fsl,pins = <137 0x4001b8b1 /* MX6Q_PAD_EIM_D21__I2C1_SCL */
+ 196 0x4001b8b1>; /* MX6Q_PAD_EIM_D28__I2C1_SDA */
+ };
+ };
+
+ serial2 {
+ pinctrl_serial2_1: serial2grp-1 {
+ fsl,pins = <183 0x1b0b1 /* MX6Q_PAD_EIM_D26__UART2_TXD */
+ 191 0x1b0b1>; /* MX6Q_PAD_EIM_D27__UART2_RXD */
+ };
+ };
+
+ usdhc3 {
+ pinctrl_usdhc3_1: usdhc3grp-1 {
+ fsl,pins = <1273 0x17059 /* MX6Q_PAD_SD3_CMD__USDHC3_CMD */
+ 1281 0x10059 /* MX6Q_PAD_SD3_CLK__USDHC3_CLK */
+ 1289 0x17059 /* MX6Q_PAD_SD3_DAT0__USDHC3_DAT0 */
+ 1297 0x17059 /* MX6Q_PAD_SD3_DAT1__USDHC3_DAT1 */
+ 1305 0x17059 /* MX6Q_PAD_SD3_DAT2__USDHC3_DAT2 */
+ 1312 0x17059 /* MX6Q_PAD_SD3_DAT3__USDHC3_DAT3 */
+ 1265 0x17059 /* MX6Q_PAD_SD3_DAT4__USDHC3_DAT4 */
+ 1257 0x17059 /* MX6Q_PAD_SD3_DAT5__USDHC3_DAT5 */
+ 1249 0x17059 /* MX6Q_PAD_SD3_DAT6__USDHC3_DAT6 */
+ 1241 0x17059>; /* MX6Q_PAD_SD3_DAT7__USDHC3_DAT7 */
+ };
+ };
+
+ usdhc4 {
+ pinctrl_usdhc4_1: usdhc4grp-1 {
+ fsl,pins = <1386 0x17059 /* MX6Q_PAD_SD4_CMD__USDHC4_CMD */
+ 1392 0x10059 /* MX6Q_PAD_SD4_CLK__USDHC4_CLK */
+ 1462 0x17059 /* MX6Q_PAD_SD4_DAT0__USDHC4_DAT0 */
+ 1470 0x17059 /* MX6Q_PAD_SD4_DAT1__USDHC4_DAT1 */
+ 1478 0x17059 /* MX6Q_PAD_SD4_DAT2__USDHC4_DAT2 */
+ 1486 0x17059 /* MX6Q_PAD_SD4_DAT3__USDHC4_DAT3 */
+ 1493 0x17059 /* MX6Q_PAD_SD4_DAT4__USDHC4_DAT4 */
+ 1501 0x17059 /* MX6Q_PAD_SD4_DAT5__USDHC4_DAT5 */
+ 1509 0x17059 /* MX6Q_PAD_SD4_DAT6__USDHC4_DAT6 */
+ 1517 0x17059>; /* MX6Q_PAD_SD4_DAT7__USDHC4_DAT7 */
+ };
+ };
};
dcic@020e4000 { /* DCIC1 */
@@ -422,7 +573,7 @@
reg = <0x0217c000 0x4000>;
};
- enet@02188000 {
+ ethernet@02188000 {
compatible = "fsl,imx6q-fec";
reg = <0x02188000 0x4000>;
interrupts = <0 118 0x04 0 119 0x04>;
@@ -527,7 +678,9 @@
};
audmux@021d8000 {
+ compatible = "fsl,imx6q-audmux", "fsl,imx31-audmux";
reg = <0x021d8000 0x4000>;
+ status = "disabled";
};
mipi@021dc000 { /* MIPI-CSI */
@@ -543,28 +696,28 @@
interrupts = <0 18 0x04>;
};
- uart2: uart@021e8000 {
+ uart2: serial@021e8000 {
compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x021e8000 0x4000>;
interrupts = <0 27 0x04>;
status = "disabled";
};
- uart3: uart@021ec000 {
+ uart3: serial@021ec000 {
compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x021ec000 0x4000>;
interrupts = <0 28 0x04>;
status = "disabled";
};
- uart4: uart@021f0000 {
+ uart4: serial@021f0000 {
compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x021f0000 0x4000>;
interrupts = <0 29 0x04>;
status = "disabled";
};
- uart5: uart@021f4000 {
+ uart5: serial@021f4000 {
compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x021f4000 0x4000>;
interrupts = <0 30 0x04>;
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
index 2d696866f71c..3f5dad801a98 100644
--- a/arch/arm/boot/dts/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
@@ -215,45 +215,8 @@
gpio: gpio@40028000 {
compatible = "nxp,lpc3220-gpio";
reg = <0x40028000 0x1000>;
- /* create a private address space for enumeration */
- #address-cells = <1>;
- #size-cells = <0>;
-
- gpio_p0: gpio-bank@0 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <0>;
- };
-
- gpio_p1: gpio-bank@1 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <1>;
- };
-
- gpio_p2: gpio-bank@2 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <2>;
- };
-
- gpio_p3: gpio-bank@3 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <3>;
- };
-
- gpi_p3: gpio-bank@4 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <4>;
- };
-
- gpo_p3: gpio-bank@5 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <5>;
- };
+ gpio-controller;
+ #gpio-cells = <3>; /* bank, pin, flags */
};
watchdog@4003C000 {
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index 8c756be4d7ad..5b4506c0a8c4 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -57,7 +57,7 @@
&mmc1 {
vmmc-supply = <&vmmc1>;
vmmc_aux-supply = <&vsim>;
- ti,bus-width = <8>;
+ bus-width = <8>;
};
&mmc2 {
diff --git a/arch/arm/boot/dts/omap4-panda.dts b/arch/arm/boot/dts/omap4-panda.dts
index e671361bc791..1efe0c587985 100644
--- a/arch/arm/boot/dts/omap4-panda.dts
+++ b/arch/arm/boot/dts/omap4-panda.dts
@@ -70,7 +70,7 @@
&mmc1 {
vmmc-supply = <&vmmc>;
- ti,bus-width = <8>;
+ bus-width = <8>;
};
&mmc2 {
@@ -87,5 +87,5 @@
&mmc5 {
ti,non-removable;
- ti,bus-width = <4>;
+ bus-width = <4>;
};
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index e5eeb6f9c6e6..d08c4d137280 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -137,12 +137,12 @@
&mmc1 {
vmmc-supply = <&vmmc>;
- ti,bus-width = <8>;
+ bus-width = <8>;
};
&mmc2 {
vmmc-supply = <&vaux1>;
- ti,bus-width = <8>;
+ bus-width = <8>;
ti,non-removable;
};
@@ -155,6 +155,6 @@
};
&mmc5 {
- ti,bus-width = <4>;
+ bus-width = <4>;
ti,non-removable;
};
diff --git a/arch/arm/boot/dts/phy3250.dts b/arch/arm/boot/dts/phy3250.dts
index 0167e86314c0..c4ff6d1a018b 100644
--- a/arch/arm/boot/dts/phy3250.dts
+++ b/arch/arm/boot/dts/phy3250.dts
@@ -131,13 +131,13 @@
compatible = "gpio-leds";
led0 {
- gpios = <&gpo_p3 1 1>; /* GPO_P3 1, GPIO 80, active low */
+ gpios = <&gpio 5 1 1>; /* GPO_P3 1, GPIO 80, active low */
linux,default-trigger = "heartbeat";
default-state = "off";
};
led1 {
- gpios = <&gpo_p3 14 1>; /* GPO_P3 14, GPIO 93, active low */
+ gpios = <&gpio 5 14 1>; /* GPO_P3 14, GPIO 93, active low */
linux,default-trigger = "timer";
default-state = "off";
};
diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
new file mode 100644
index 000000000000..8314e4171884
--- /dev/null
+++ b/arch/arm/boot/dts/spear1310-evb.dts
@@ -0,0 +1,292 @@
+/*
+ * DTS file for SPEAr1310 Evaluation Baord
+ *
+ * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "spear1310.dtsi"
+
+/ {
+ model = "ST SPEAr1310 Evaluation Board";
+ compatible = "st,spear1310-evb", "st,spear1310";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory {
+ reg = <0 0x40000000>;
+ };
+
+ ahb {
+ pinmux@e0700000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&state_default>;
+
+ state_default: pinmux {
+ i2c0-pmx {
+ st,pins = "i2c0_grp";
+ st,function = "i2c0";
+ };
+ i2s1 {
+ st,pins = "i2s1_grp";
+ st,function = "i2s1";
+ };
+ gpio {
+ st,pins = "arm_gpio_grp";
+ st,function = "arm_gpio";
+ };
+ eth {
+ st,pins = "gmii_grp";
+ st,function = "gmii";
+ };
+ ssp0 {
+ st,pins = "ssp0_grp";
+ st,function = "ssp0";
+ };
+ kbd {
+ st,pins = "keyboard_6x6_grp";
+ st,function = "keyboard";
+ };
+ sdhci {
+ st,pins = "sdhci_grp";
+ st,function = "sdhci";
+ };
+ smi-pmx {
+ st,pins = "smi_2_chips_grp";
+ st,function = "smi";
+ };
+ uart0 {
+ st,pins = "uart0_grp";
+ st,function = "uart0";
+ };
+ rs485 {
+ st,pins = "rs485_0_1_tdm_0_1_grp";
+ st,function = "rs485_0_1_tdm_0_1";
+ };
+ i2c1_2 {
+ st,pins = "i2c_1_2_grp";
+ st,function = "i2c_1_2";
+ };
+ pci {
+ st,pins = "pcie0_grp","pcie1_grp",
+ "pcie2_grp";
+ st,function = "pci";
+ };
+ smii {
+ st,pins = "smii_0_1_2_grp";
+ st,function = "smii_0_1_2";
+ };
+ nand {
+ st,pins = "nand_8bit_grp",
+ "nand_16bit_grp";
+ st,function = "nand";
+ };
+ };
+ };
+
+ ahci@b1000000 {
+ status = "okay";
+ };
+
+ cf@b2800000 {
+ status = "okay";
+ };
+
+ dma@ea800000 {
+ status = "okay";
+ };
+
+ dma@eb000000 {
+ status = "okay";
+ };
+
+ fsmc: flash@b0000000 {
+ status = "okay";
+ };
+
+ gmac0: eth@e2000000 {
+ status = "okay";
+ };
+
+ sdhci@b3000000 {
+ status = "okay";
+ };
+
+ smi: flash@ea000000 {
+ status = "okay";
+ clock-rate=<50000000>;
+
+ flash@e6000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xe6000000 0x800000>;
+ st,smi-fast-mode;
+
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x10000>;
+ };
+ partition@10000 {
+ label = "u-boot";
+ reg = <0x10000 0x40000>;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x2c0000>;
+ };
+ partition@310000 {
+ label = "rootfs";
+ reg = <0x310000 0x4f0000>;
+ };
+ };
+ };
+
+ spi0: spi@e0100000 {
+ status = "okay";
+ };
+
+ ehci@e4800000 {
+ status = "okay";
+ };
+
+ ehci@e5800000 {
+ status = "okay";
+ };
+
+ ohci@e4000000 {
+ status = "okay";
+ };
+
+ ohci@e5000000 {
+ status = "okay";
+ };
+
+ apb {
+ adc@e0080000 {
+ status = "okay";
+ };
+
+ gpio0: gpio@e0600000 {
+ status = "okay";
+ };
+
+ gpio1: gpio@e0680000 {
+ status = "okay";
+ };
+
+ i2c0: i2c@e0280000 {
+ status = "okay";
+ };
+
+ i2c1: i2c@5cd00000 {
+ status = "okay";
+ };
+
+ kbd@e0300000 {
+ linux,keymap = < 0x00000001
+ 0x00010002
+ 0x00020003
+ 0x00030004
+ 0x00040005
+ 0x00050006
+ 0x00060007
+ 0x00070008
+ 0x00080009
+ 0x0100000a
+ 0x0101000c
+ 0x0102000d
+ 0x0103000e
+ 0x0104000f
+ 0x01050010
+ 0x01060011
+ 0x01070012
+ 0x01080013
+ 0x02000014
+ 0x02010015
+ 0x02020016
+ 0x02030017
+ 0x02040018
+ 0x02050019
+ 0x0206001a
+ 0x0207001b
+ 0x0208001c
+ 0x0300001d
+ 0x0301001e
+ 0x0302001f
+ 0x03030020
+ 0x03040021
+ 0x03050022
+ 0x03060023
+ 0x03070024
+ 0x03080025
+ 0x04000026
+ 0x04010027
+ 0x04020028
+ 0x04030029
+ 0x0404002a
+ 0x0405002b
+ 0x0406002c
+ 0x0407002d
+ 0x0408002e
+ 0x0500002f
+ 0x05010030
+ 0x05020031
+ 0x05030032
+ 0x05040033
+ 0x05050034
+ 0x05060035
+ 0x05070036
+ 0x05080037
+ 0x06000038
+ 0x06010039
+ 0x0602003a
+ 0x0603003b
+ 0x0604003c
+ 0x0605003d
+ 0x0606003e
+ 0x0607003f
+ 0x06080040
+ 0x07000041
+ 0x07010042
+ 0x07020043
+ 0x07030044
+ 0x07040045
+ 0x07050046
+ 0x07060047
+ 0x07070048
+ 0x07080049
+ 0x0800004a
+ 0x0801004b
+ 0x0802004c
+ 0x0803004d
+ 0x0804004e
+ 0x0805004f
+ 0x08060050
+ 0x08070051
+ 0x08080052 >;
+ autorepeat;
+ st,mode = <0>;
+ status = "okay";
+ };
+
+ rtc@e0580000 {
+ status = "okay";
+ };
+
+ serial@e0000000 {
+ status = "okay";
+ };
+
+ wdt@ec800620 {
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
new file mode 100644
index 000000000000..9e61da404d57
--- /dev/null
+++ b/arch/arm/boot/dts/spear1310.dtsi
@@ -0,0 +1,184 @@
+/*
+ * DTS file for all SPEAr1310 SoCs
+ *
+ * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "spear13xx.dtsi"
+
+/ {
+ compatible = "st,spear1310";
+
+ ahb {
+ ahci@b1000000 {
+ compatible = "snps,spear-ahci";
+ reg = <0xb1000000 0x10000>;
+ interrupts = <0 68 0x4>;
+ status = "disabled";
+ };
+
+ ahci@b1800000 {
+ compatible = "snps,spear-ahci";
+ reg = <0xb1800000 0x10000>;
+ interrupts = <0 69 0x4>;
+ status = "disabled";
+ };
+
+ ahci@b4000000 {
+ compatible = "snps,spear-ahci";
+ reg = <0xb4000000 0x10000>;
+ interrupts = <0 70 0x4>;
+ status = "disabled";
+ };
+
+ gmac1: eth@5c400000 {
+ compatible = "st,spear600-gmac";
+ reg = <0x5c400000 0x8000>;
+ interrupts = <0 95 0x4>;
+ interrupt-names = "macirq";
+ status = "disabled";
+ };
+
+ gmac2: eth@5c500000 {
+ compatible = "st,spear600-gmac";
+ reg = <0x5c500000 0x8000>;
+ interrupts = <0 96 0x4>;
+ interrupt-names = "macirq";
+ status = "disabled";
+ };
+
+ gmac3: eth@5c600000 {
+ compatible = "st,spear600-gmac";
+ reg = <0x5c600000 0x8000>;
+ interrupts = <0 97 0x4>;
+ interrupt-names = "macirq";
+ status = "disabled";
+ };
+
+ gmac4: eth@5c700000 {
+ compatible = "st,spear600-gmac";
+ reg = <0x5c700000 0x8000>;
+ interrupts = <0 98 0x4>;
+ interrupt-names = "macirq";
+ status = "disabled";
+ };
+
+ spi1: spi@5d400000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x5d400000 0x1000>;
+ interrupts = <0 99 0x4>;
+ status = "disabled";
+ };
+
+ apb {
+ i2c1: i2c@5cd00000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x5cd00000 0x1000>;
+ interrupts = <0 87 0x4>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@5ce00000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x5ce00000 0x1000>;
+ interrupts = <0 88 0x4>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@5cf00000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x5cf00000 0x1000>;
+ interrupts = <0 89 0x4>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@5d000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x5d000000 0x1000>;
+ interrupts = <0 90 0x4>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@5d100000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x5d100000 0x1000>;
+ interrupts = <0 91 0x4>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@5d200000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x5d200000 0x1000>;
+ interrupts = <0 92 0x4>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@5d300000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x5d300000 0x1000>;
+ interrupts = <0 93 0x4>;
+ status = "disabled";
+ };
+
+ serial@5c800000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x5c800000 0x1000>;
+ interrupts = <0 82 0x4>;
+ status = "disabled";
+ };
+
+ serial@5c900000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x5c900000 0x1000>;
+ interrupts = <0 83 0x4>;
+ status = "disabled";
+ };
+
+ serial@5ca00000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x5ca00000 0x1000>;
+ interrupts = <0 84 0x4>;
+ status = "disabled";
+ };
+
+ serial@5cb00000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x5cb00000 0x1000>;
+ interrupts = <0 85 0x4>;
+ status = "disabled";
+ };
+
+ serial@5cc00000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x5cc00000 0x1000>;
+ interrupts = <0 86 0x4>;
+ status = "disabled";
+ };
+
+ thermal@e07008c4 {
+ st,thermal-flags = <0x7000>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/spear1340-evb.dts b/arch/arm/boot/dts/spear1340-evb.dts
new file mode 100644
index 000000000000..0d8472e5ab9f
--- /dev/null
+++ b/arch/arm/boot/dts/spear1340-evb.dts
@@ -0,0 +1,308 @@
+/*
+ * DTS file for SPEAr1340 Evaluation Baord
+ *
+ * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "spear1340.dtsi"
+
+/ {
+ model = "ST SPEAr1340 Evaluation Board";
+ compatible = "st,spear1340-evb", "st,spear1340";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory {
+ reg = <0 0x40000000>;
+ };
+
+ ahb {
+ pinmux@e0700000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&state_default>;
+
+ state_default: pinmux {
+ pads_as_gpio {
+ st,pins = "pads_as_gpio_grp";
+ st,function = "pads_as_gpio";
+ };
+ fsmc {
+ st,pins = "fsmc_8bit_grp";
+ st,function = "fsmc";
+ };
+ kbd {
+ st,pins = "keyboard_row_col_grp",
+ "keyboard_col5_grp";
+ st,function = "keyboard";
+ };
+ uart0 {
+ st,pins = "uart0_grp", "uart0_enh_grp";
+ st,function = "uart0";
+ };
+ i2c0-pmx {
+ st,pins = "i2c0_grp";
+ st,function = "i2c0";
+ };
+ i2c1-pmx {
+ st,pins = "i2c1_grp";
+ st,function = "i2c1";
+ };
+ spdif-in {
+ st,pins = "spdif_in_grp";
+ st,function = "spdif_in";
+ };
+ spdif-out {
+ st,pins = "spdif_out_grp";
+ st,function = "spdif_out";
+ };
+ ssp0 {
+ st,pins = "ssp0_grp", "ssp0_cs1_grp",
+ "ssp0_cs3_grp";
+ st,function = "ssp0";
+ };
+ pwm {
+ st,pins = "pwm2_grp", "pwm3_grp";
+ st,function = "pwm";
+ };
+ smi-pmx {
+ st,pins = "smi_grp";
+ st,function = "smi";
+ };
+ i2s {
+ st,pins = "i2s_in_grp", "i2s_out_grp";
+ st,function = "i2s";
+ };
+ gmac {
+ st,pins = "gmii_grp", "rgmii_grp";
+ st,function = "gmac";
+ };
+ cam3 {
+ st,pins = "cam3_grp";
+ st,function = "cam3";
+ };
+ cec0 {
+ st,pins = "cec0_grp";
+ st,function = "cec0";
+ };
+ cec1 {
+ st,pins = "cec1_grp";
+ st,function = "cec1";
+ };
+ sdhci {
+ st,pins = "sdhci_grp";
+ st,function = "sdhci";
+ };
+ clcd {
+ st,pins = "clcd_grp";
+ st,function = "clcd";
+ };
+ sata {
+ st,pins = "sata_grp";
+ st,function = "sata";
+ };
+ };
+ };
+
+ dma@ea800000 {
+ status = "okay";
+ };
+
+ dma@eb000000 {
+ status = "okay";
+ };
+
+ fsmc: flash@b0000000 {
+ status = "okay";
+ };
+
+ gmac0: eth@e2000000 {
+ status = "okay";
+ };
+
+ sdhci@b3000000 {
+ status = "okay";
+ };
+
+ smi: flash@ea000000 {
+ status = "okay";
+ clock-rate=<50000000>;
+
+ flash@e6000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xe6000000 0x800000>;
+ st,smi-fast-mode;
+
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x10000>;
+ };
+ partition@10000 {
+ label = "u-boot";
+ reg = <0x10000 0x40000>;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x2c0000>;
+ };
+ partition@310000 {
+ label = "rootfs";
+ reg = <0x310000 0x4f0000>;
+ };
+ };
+ };
+
+ spi0: spi@e0100000 {
+ status = "okay";
+ };
+
+ ehci@e4800000 {
+ status = "okay";
+ };
+
+ ehci@e5800000 {
+ status = "okay";
+ };
+
+ ohci@e4000000 {
+ status = "okay";
+ };
+
+ ohci@e5000000 {
+ status = "okay";
+ };
+
+ apb {
+ adc@e0080000 {
+ status = "okay";
+ };
+
+ gpio0: gpio@e0600000 {
+ status = "okay";
+ };
+
+ gpio1: gpio@e0680000 {
+ status = "okay";
+ };
+
+ i2c0: i2c@e0280000 {
+ status = "okay";
+ };
+
+ i2c1: i2c@b4000000 {
+ status = "okay";
+ };
+
+ kbd@e0300000 {
+ linux,keymap = < 0x00000001
+ 0x00010002
+ 0x00020003
+ 0x00030004
+ 0x00040005
+ 0x00050006
+ 0x00060007
+ 0x00070008
+ 0x00080009
+ 0x0100000a
+ 0x0101000c
+ 0x0102000d
+ 0x0103000e
+ 0x0104000f
+ 0x01050010
+ 0x01060011
+ 0x01070012
+ 0x01080013
+ 0x02000014
+ 0x02010015
+ 0x02020016
+ 0x02030017
+ 0x02040018
+ 0x02050019
+ 0x0206001a
+ 0x0207001b
+ 0x0208001c
+ 0x0300001d
+ 0x0301001e
+ 0x0302001f
+ 0x03030020
+ 0x03040021
+ 0x03050022
+ 0x03060023
+ 0x03070024
+ 0x03080025
+ 0x04000026
+ 0x04010027
+ 0x04020028
+ 0x04030029
+ 0x0404002a
+ 0x0405002b
+ 0x0406002c
+ 0x0407002d
+ 0x0408002e
+ 0x0500002f
+ 0x05010030
+ 0x05020031
+ 0x05030032
+ 0x05040033
+ 0x05050034
+ 0x05060035
+ 0x05070036
+ 0x05080037
+ 0x06000038
+ 0x06010039
+ 0x0602003a
+ 0x0603003b
+ 0x0604003c
+ 0x0605003d
+ 0x0606003e
+ 0x0607003f
+ 0x06080040
+ 0x07000041
+ 0x07010042
+ 0x07020043
+ 0x07030044
+ 0x07040045
+ 0x07050046
+ 0x07060047
+ 0x07070048
+ 0x07080049
+ 0x0800004a
+ 0x0801004b
+ 0x0802004c
+ 0x0803004d
+ 0x0804004e
+ 0x0805004f
+ 0x08060050
+ 0x08070051
+ 0x08080052 >;
+ autorepeat;
+ st,mode = <0>;
+ status = "okay";
+ };
+
+ rtc@e0580000 {
+ status = "okay";
+ };
+
+ serial@e0000000 {
+ status = "okay";
+ };
+
+ serial@b4100000 {
+ status = "okay";
+ };
+
+ wdt@ec800620 {
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
new file mode 100644
index 000000000000..a26fc47a55e8
--- /dev/null
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -0,0 +1,56 @@
+/*
+ * DTS file for all SPEAr1340 SoCs
+ *
+ * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "spear13xx.dtsi"
+
+/ {
+ compatible = "st,spear1340";
+
+ ahb {
+ ahci@b1000000 {
+ compatible = "snps,spear-ahci";
+ reg = <0xb1000000 0x10000>;
+ interrupts = <0 72 0x4>;
+ status = "disabled";
+ };
+
+ spi1: spi@5d400000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x5d400000 0x1000>;
+ interrupts = <0 99 0x4>;
+ status = "disabled";
+ };
+
+ apb {
+ i2c1: i2c@b4000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xb4000000 0x1000>;
+ interrupts = <0 104 0x4>;
+ status = "disabled";
+ };
+
+ serial@b4100000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0xb4100000 0x1000>;
+ interrupts = <0 105 0x4>;
+ status = "disabled";
+ };
+
+ thermal@e07008c4 {
+ st,thermal-flags = <0x2a00>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
new file mode 100644
index 000000000000..1f8e1e1481df
--- /dev/null
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -0,0 +1,262 @@
+/*
+ * DTS file for all SPEAr13xx SoCs
+ *
+ * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ interrupt-parent = <&gic>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,cortex-a9";
+ reg = <0>;
+ next-level-cache = <&L2>;
+ };
+
+ cpu@1 {
+ compatible = "arm,cortex-a9";
+ reg = <1>;
+ next-level-cache = <&L2>;
+ };
+ };
+
+ gic: interrupt-controller@ec801000 {
+ compatible = "arm,cortex-a9-gic";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = < 0xec801000 0x1000 >,
+ < 0xec800100 0x0100 >;
+ };
+
+ pmu {
+ compatible = "arm,cortex-a9-pmu";
+ interrupts = <0 8 0x04
+ 0 9 0x04>;
+ };
+
+ L2: l2-cache {
+ compatible = "arm,pl310-cache";
+ reg = <0xed000000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+ };
+
+ memory {
+ name = "memory";
+ device_type = "memory";
+ reg = <0 0x40000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyAMA0,115200";
+ };
+
+ ahb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0x50000000 0x50000000 0x10000000
+ 0xb0000000 0xb0000000 0x10000000
+ 0xe0000000 0xe0000000 0x10000000>;
+
+ sdhci@b3000000 {
+ compatible = "st,sdhci-spear";
+ reg = <0xb3000000 0x100>;
+ interrupts = <0 28 0x4>;
+ status = "disabled";
+ };
+
+ cf@b2800000 {
+ compatible = "arasan,cf-spear1340";
+ reg = <0xb2800000 0x100>;
+ interrupts = <0 29 0x4>;
+ status = "disabled";
+ };
+
+ dma@ea800000 {
+ compatible = "snps,dma-spear1340";
+ reg = <0xea800000 0x1000>;
+ interrupts = <0 19 0x4>;
+ status = "disabled";
+ };
+
+ dma@eb000000 {
+ compatible = "snps,dma-spear1340";
+ reg = <0xeb000000 0x1000>;
+ interrupts = <0 59 0x4>;
+ status = "disabled";
+ };
+
+ fsmc: flash@b0000000 {
+ compatible = "st,spear600-fsmc-nand";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb0000000 0x1000 /* FSMC Register */
+ 0xb0800000 0x0010>; /* NAND Base */
+ reg-names = "fsmc_regs", "nand_data";
+ interrupts = <0 20 0x4
+ 0 21 0x4
+ 0 22 0x4
+ 0 23 0x4>;
+ st,ale-off = <0x20000>;
+ st,cle-off = <0x10000>;
+ status = "disabled";
+ };
+
+ gmac0: eth@e2000000 {
+ compatible = "st,spear600-gmac";
+ reg = <0xe2000000 0x8000>;
+ interrupts = <0 23 0x4
+ 0 24 0x4>;
+ interrupt-names = "macirq", "eth_wake_irq";
+ status = "disabled";
+ };
+
+ smi: flash@ea000000 {
+ compatible = "st,spear600-smi";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xea000000 0x1000>;
+ interrupts = <0 30 0x4>;
+ status = "disabled";
+ };
+
+ spi0: spi@e0100000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0xe0100000 0x1000>;
+ interrupts = <0 31 0x4>;
+ status = "disabled";
+ };
+
+ ehci@e4800000 {
+ compatible = "st,spear600-ehci", "usb-ehci";
+ reg = <0xe4800000 0x1000>;
+ interrupts = <0 64 0x4>;
+ status = "disabled";
+ };
+
+ ehci@e5800000 {
+ compatible = "st,spear600-ehci", "usb-ehci";
+ reg = <0xe5800000 0x1000>;
+ interrupts = <0 66 0x4>;
+ status = "disabled";
+ };
+
+ ohci@e4000000 {
+ compatible = "st,spear600-ohci", "usb-ohci";
+ reg = <0xe4000000 0x1000>;
+ interrupts = <0 65 0x4>;
+ status = "disabled";
+ };
+
+ ohci@e5000000 {
+ compatible = "st,spear600-ohci", "usb-ohci";
+ reg = <0xe5000000 0x1000>;
+ interrupts = <0 67 0x4>;
+ status = "disabled";
+ };
+
+ apb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0x50000000 0x50000000 0x10000000
+ 0xb0000000 0xb0000000 0x10000000
+ 0xe0000000 0xe0000000 0x10000000>;
+
+ gpio0: gpio@e0600000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0xe0600000 0x1000>;
+ interrupts = <0 24 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ status = "disabled";
+ };
+
+ gpio1: gpio@e0680000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0xe0680000 0x1000>;
+ interrupts = <0 25 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ status = "disabled";
+ };
+
+ kbd@e0300000 {
+ compatible = "st,spear300-kbd";
+ reg = <0xe0300000 0x1000>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@e0280000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xe0280000 0x1000>;
+ interrupts = <0 41 0x4>;
+ status = "disabled";
+ };
+
+ rtc@e0580000 {
+ compatible = "st,spear-rtc";
+ reg = <0xe0580000 0x1000>;
+ interrupts = <0 36 0x4>;
+ status = "disabled";
+ };
+
+ serial@e0000000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0xe0000000 0x1000>;
+ interrupts = <0 36 0x4>;
+ status = "disabled";
+ };
+
+ adc@e0080000 {
+ compatible = "st,spear600-adc";
+ reg = <0xe0080000 0x1000>;
+ interrupts = <0 44 0x4>;
+ status = "disabled";
+ };
+
+ timer@e0380000 {
+ compatible = "st,spear-timer";
+ reg = <0xe0380000 0x400>;
+ interrupts = <0 37 0x4>;
+ };
+
+ timer@ec800600 {
+ compatible = "arm,cortex-a9-twd-timer";
+ reg = <0xec800600 0x20>;
+ interrupts = <1 13 0x301>;
+ };
+
+ wdt@ec800620 {
+ compatible = "arm,cortex-a9-twd-wdt";
+ reg = <0xec800620 0x20>;
+ status = "disabled";
+ };
+
+ thermal@e07008c4 {
+ compatible = "st,thermal-spear1340";
+ reg = <0xe07008c4 0x4>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/spear300-evb.dts b/arch/arm/boot/dts/spear300-evb.dts
index 910e264b87c0..fc82b1a26458 100644
--- a/arch/arm/boot/dts/spear300-evb.dts
+++ b/arch/arm/boot/dts/spear300-evb.dts
@@ -87,6 +87,31 @@
smi: flash@fc000000 {
status = "okay";
+ clock-rate=<50000000>;
+
+ flash@f8000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xf8000000 0x800000>;
+ st,smi-fast-mode;
+
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x10000>;
+ };
+ partition@10000 {
+ label = "u-boot";
+ reg = <0x10000 0x40000>;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x2c0000>;
+ };
+ partition@310000 {
+ label = "rootfs";
+ reg = <0x310000 0x4f0000>;
+ };
+ };
};
spi0: spi@d0100000 {
diff --git a/arch/arm/boot/dts/spear310-evb.dts b/arch/arm/boot/dts/spear310-evb.dts
index 6d95317100ad..dc5e2d445a93 100644
--- a/arch/arm/boot/dts/spear310-evb.dts
+++ b/arch/arm/boot/dts/spear310-evb.dts
@@ -103,11 +103,27 @@
clock-rate=<50000000>;
flash@f8000000 {
- label = "m25p64";
- reg = <0xf8000000 0x800000>;
#address-cells = <1>;
#size-cells = <1>;
+ reg = <0xf8000000 0x800000>;
st,smi-fast-mode;
+
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x10000>;
+ };
+ partition@10000 {
+ label = "u-boot";
+ reg = <0x10000 0x40000>;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x2c0000>;
+ };
+ partition@310000 {
+ label = "rootfs";
+ reg = <0x310000 0x4f0000>;
+ };
};
};
diff --git a/arch/arm/boot/dts/spear320-evb.dts b/arch/arm/boot/dts/spear320-evb.dts
index 0c6463b71a37..6308fa3bec1e 100644
--- a/arch/arm/boot/dts/spear320-evb.dts
+++ b/arch/arm/boot/dts/spear320-evb.dts
@@ -110,6 +110,31 @@
smi: flash@fc000000 {
status = "okay";
+ clock-rate=<50000000>;
+
+ flash@f8000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xf8000000 0x800000>;
+ st,smi-fast-mode;
+
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x10000>;
+ };
+ partition@10000 {
+ label = "u-boot";
+ reg = <0x10000 0x40000>;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x2c0000>;
+ };
+ partition@310000 {
+ label = "rootfs";
+ reg = <0x310000 0x4f0000>;
+ };
+ };
};
spi0: spi@d0100000 {
diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi
index 0ae7c8e86311..91072553963f 100644
--- a/arch/arm/boot/dts/spear3xx.dtsi
+++ b/arch/arm/boot/dts/spear3xx.dtsi
@@ -139,6 +139,12 @@
interrupts = <12>;
status = "disabled";
};
+
+ timer@f0000000 {
+ compatible = "st,spear-timer";
+ reg = <0xf0000000 0x400>;
+ interrupts = <2>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/spear600-evb.dts b/arch/arm/boot/dts/spear600-evb.dts
index 790a7a8a5ccd..1119c22c9a82 100644
--- a/arch/arm/boot/dts/spear600-evb.dts
+++ b/arch/arm/boot/dts/spear600-evb.dts
@@ -33,6 +33,35 @@
status = "okay";
};
+ smi: flash@fc000000 {
+ status = "okay";
+ clock-rate=<50000000>;
+
+ flash@f8000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xf8000000 0x800000>;
+ st,smi-fast-mode;
+
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x10000>;
+ };
+ partition@10000 {
+ label = "u-boot";
+ reg = <0x10000 0x40000>;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x2c0000>;
+ };
+ partition@310000 {
+ label = "rootfs";
+ reg = <0x310000 0x4f0000>;
+ };
+ };
+ };
+
apb {
serial@d0000000 {
status = "okay";
diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
index d777e3a6f178..089f0a42c50e 100644
--- a/arch/arm/boot/dts/spear600.dtsi
+++ b/arch/arm/boot/dts/spear600.dtsi
@@ -177,6 +177,12 @@
interrupts = <28>;
status = "disabled";
};
+
+ timer@f0000000 {
+ compatible = "st,spear-timer";
+ reg = <0xf0000000 0x400>;
+ interrupts = <16>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/tegra-cardhu.dts b/arch/arm/boot/dts/tegra-cardhu.dts
index 0a9f34a2c3aa..36321bceec46 100644
--- a/arch/arm/boot/dts/tegra-cardhu.dts
+++ b/arch/arm/boot/dts/tegra-cardhu.dts
@@ -7,10 +7,10 @@
compatible = "nvidia,cardhu", "nvidia,tegra30";
memory {
- reg = < 0x80000000 0x40000000 >;
+ reg = <0x80000000 0x40000000>;
};
- pinmux@70000000 {
+ pinmux {
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
@@ -51,64 +51,122 @@
nvidia,pull = <2>;
nvidia,tristate = <0>;
};
+ dap2_fs_pa2 {
+ nvidia,pins = "dap2_fs_pa2",
+ "dap2_sclk_pa3",
+ "dap2_din_pa4",
+ "dap2_dout_pa5";
+ nvidia,function = "i2s1";
+ nvidia,pull = <0>;
+ nvidia,tristate = <0>;
+ };
};
};
serial@70006000 {
- clock-frequency = < 408000000 >;
- };
-
- serial@70006040 {
- status = "disable";
- };
-
- serial@70006200 {
- status = "disable";
- };
-
- serial@70006300 {
- status = "disable";
- };
-
- serial@70006400 {
- status = "disable";
+ status = "okay";
+ clock-frequency = <408000000>;
};
i2c@7000c000 {
+ status = "okay";
clock-frequency = <100000>;
};
i2c@7000c400 {
+ status = "okay";
clock-frequency = <100000>;
};
i2c@7000c500 {
+ status = "okay";
clock-frequency = <100000>;
+
+ /* ALS and Proximity sensor */
+ isl29028@44 {
+ compatible = "isil,isl29028";
+ reg = <0x44>;
+ interrupt-parent = <&gpio>;
+ interrupts = <88 0x04>; /*gpio PL0 */
+ };
};
i2c@7000c700 {
+ status = "okay";
clock-frequency = <100000>;
};
i2c@7000d000 {
+ status = "okay";
clock-frequency = <100000>;
+
+ wm8903: wm8903@1a {
+ compatible = "wlf,wm8903";
+ reg = <0x1a>;
+ interrupt-parent = <&gpio>;
+ interrupts = <179 0x04>; /* gpio PW3 */
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ micdet-cfg = <0>;
+ micdet-delay = <100>;
+ gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
+ };
+
+ tps62361 {
+ compatible = "ti,tps62361";
+ reg = <0x60>;
+
+ regulator-name = "tps62361-vout";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ ti,vsel0-state-high;
+ ti,vsel1-state-high;
+ };
+ };
+
+ ahub {
+ i2s@70080400 {
+ status = "okay";
+ };
};
sdhci@78000000 {
+ status = "okay";
cd-gpios = <&gpio 69 0>; /* gpio PI5 */
wp-gpios = <&gpio 155 0>; /* gpio PT3 */
power-gpios = <&gpio 31 0>; /* gpio PD7 */
+ bus-width = <4>;
};
- sdhci@78000200 {
- status = "disable";
+ sdhci@78000600 {
+ status = "okay";
+ support-8bit;
+ bus-width = <8>;
};
- sdhci@78000400 {
- status = "disable";
- };
+ sound {
+ compatible = "nvidia,tegra-audio-wm8903-cardhu",
+ "nvidia,tegra-audio-wm8903";
+ nvidia,model = "NVIDIA Tegra Cardhu";
- sdhci@78000400 {
- support-8bit;
+ nvidia,audio-routing =
+ "Headphone Jack", "HPOUTR",
+ "Headphone Jack", "HPOUTL",
+ "Int Spk", "ROP",
+ "Int Spk", "RON",
+ "Int Spk", "LOP",
+ "Int Spk", "LON",
+ "Mic Jack", "MICBIAS",
+ "IN1L", "Mic Jack";
+
+ nvidia,i2s-controller = <&tegra_i2s1>;
+ nvidia,audio-codec = <&wm8903>;
+
+ nvidia,spkr-en-gpios = <&wm8903 2 0>;
+ nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
};
};
diff --git a/arch/arm/boot/dts/tegra-harmony.dts b/arch/arm/boot/dts/tegra-harmony.dts
index 1a0b1f182944..7de701365fce 100644
--- a/arch/arm/boot/dts/tegra-harmony.dts
+++ b/arch/arm/boot/dts/tegra-harmony.dts
@@ -6,11 +6,11 @@
model = "NVIDIA Tegra2 Harmony evaluation board";
compatible = "nvidia,harmony", "nvidia,tegra20";
- memory@0 {
- reg = < 0x00000000 0x40000000 >;
+ memory {
+ reg = <0x00000000 0x40000000>;
};
- pinmux@70000000 {
+ pinmux {
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
@@ -167,28 +167,28 @@
};
conf_ata {
nvidia,pins = "ata", "atb", "atc", "atd", "ate",
- "cdev1", "dap1", "dtb", "gma", "gmb",
- "gmc", "gmd", "gme", "gpu7", "gpv",
- "i2cp", "pta", "rm", "slxa", "slxk",
- "spia", "spib";
+ "cdev1", "cdev2", "dap1", "dtb", "gma",
+ "gmb", "gmc", "gmd", "gme", "gpu7",
+ "gpv", "i2cp", "pta", "rm", "slxa",
+ "slxk", "spia", "spib", "uac";
nvidia,pull = <0>;
nvidia,tristate = <0>;
};
- conf_cdev2 {
- nvidia,pins = "cdev2", "csus", "spid", "spif";
- nvidia,pull = <1>;
- nvidia,tristate = <1>;
- };
conf_ck32 {
nvidia,pins = "ck32", "ddrc", "pmca", "pmcb",
"pmcc", "pmcd", "pmce", "xm2c", "xm2d";
nvidia,pull = <0>;
};
+ conf_csus {
+ nvidia,pins = "csus", "spid", "spif";
+ nvidia,pull = <1>;
+ nvidia,tristate = <1>;
+ };
conf_crtp {
nvidia,pins = "crtp", "dap2", "dap3", "dap4",
"dtc", "dte", "dtf", "gpu", "sdio1",
"slxc", "slxd", "spdi", "spdo", "spig",
- "uac", "uda";
+ "uda";
nvidia,pull = <0>;
nvidia,tristate = <1>;
};
@@ -234,42 +234,81 @@
};
};
- pmc@7000f400 {
- nvidia,invert-interrupt;
+ i2s@70002800 {
+ status = "okay";
+ };
+
+ serial@70006300 {
+ status = "okay";
+ clock-frequency = <216000000>;
};
i2c@7000c000 {
+ status = "okay";
clock-frequency = <400000>;
wm8903: wm8903@1a {
compatible = "wlf,wm8903";
reg = <0x1a>;
interrupt-parent = <&gpio>;
- interrupts = < 187 0x04 >;
+ interrupts = <187 0x04>;
gpio-controller;
#gpio-cells = <2>;
micdet-cfg = <0>;
micdet-delay = <100>;
- gpio-cfg = < 0xffffffff 0xffffffff 0 0xffffffff 0xffffffff >;
+ gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
};
};
i2c@7000c400 {
+ status = "okay";
clock-frequency = <400000>;
};
i2c@7000c500 {
+ status = "okay";
clock-frequency = <400000>;
};
i2c@7000d000 {
+ status = "okay";
clock-frequency = <400000>;
};
- i2s@70002a00 {
- status = "disable";
+ pmc {
+ nvidia,invert-interrupt;
+ };
+
+ usb@c5000000 {
+ status = "okay";
+ };
+
+ usb@c5004000 {
+ status = "okay";
+ nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
+ };
+
+ usb@c5008000 {
+ status = "okay";
+ };
+
+ sdhci@c8000200 {
+ status = "okay";
+ cd-gpios = <&gpio 69 0>; /* gpio PI5 */
+ wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+ power-gpios = <&gpio 155 0>; /* gpio PT3 */
+ bus-width = <4>;
+ };
+
+ sdhci@c8000600 {
+ status = "okay";
+ cd-gpios = <&gpio 58 0>; /* gpio PH2 */
+ wp-gpios = <&gpio 59 0>; /* gpio PH3 */
+ power-gpios = <&gpio 70 0>; /* gpio PI6 */
+ support-8bit;
+ bus-width = <8>;
};
sound {
@@ -295,45 +334,4 @@
nvidia,int-mic-en-gpios = <&gpio 184 0>; /*gpio PX0 */
nvidia,ext-mic-en-gpios = <&gpio 185 0>; /* gpio PX1 */
};
-
- serial@70006000 {
- status = "disable";
- };
-
- serial@70006040 {
- status = "disable";
- };
-
- serial@70006200 {
- status = "disable";
- };
-
- serial@70006300 {
- clock-frequency = < 216000000 >;
- };
-
- serial@70006400 {
- status = "disable";
- };
-
- sdhci@c8000000 {
- status = "disable";
- };
-
- sdhci@c8000200 {
- cd-gpios = <&gpio 69 0>; /* gpio PI5 */
- wp-gpios = <&gpio 57 0>; /* gpio PH1 */
- power-gpios = <&gpio 155 0>; /* gpio PT3 */
- };
-
- sdhci@c8000400 {
- status = "disable";
- };
-
- sdhci@c8000600 {
- cd-gpios = <&gpio 58 0>; /* gpio PH2 */
- wp-gpios = <&gpio 59 0>; /* gpio PH3 */
- power-gpios = <&gpio 70 0>; /* gpio PI6 */
- support-8bit;
- };
};
diff --git a/arch/arm/boot/dts/tegra-paz00.dts b/arch/arm/boot/dts/tegra-paz00.dts
index 10943fb2561c..bfeb117d5aea 100644
--- a/arch/arm/boot/dts/tegra-paz00.dts
+++ b/arch/arm/boot/dts/tegra-paz00.dts
@@ -6,11 +6,11 @@
model = "Toshiba AC100 / Dynabook AZ";
compatible = "compal,paz00", "nvidia,tegra20";
- memory@0 {
+ memory {
reg = <0x00000000 0x20000000>;
};
- pinmux@70000000 {
+ pinmux {
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
@@ -159,18 +159,14 @@
};
conf_ata {
nvidia,pins = "ata", "atb", "atc", "atd", "ate",
- "cdev1", "dap1", "dap2", "dtf", "gma",
- "gmb", "gmc", "gmd", "gme", "gpu",
- "gpu7", "gpv", "i2cp", "pta", "rm",
- "sdio1", "slxk", "spdo", "uac", "uda";
+ "cdev1", "cdev2", "dap1", "dap2", "dtf",
+ "gma", "gmb", "gmc", "gmd", "gme",
+ "gpu", "gpu7", "gpv", "i2cp", "pta",
+ "rm", "sdio1", "slxk", "spdo", "uac",
+ "uda";
nvidia,pull = <0>;
nvidia,tristate = <0>;
};
- conf_cdev2 {
- nvidia,pins = "cdev2";
- nvidia,pull = <1>;
- nvidia,tristate = <0>;
- };
conf_ck32 {
nvidia,pins = "ck32", "ddrc", "pmca", "pmcb",
"pmcc", "pmcd", "pmce", "xm2c", "xm2d";
@@ -230,7 +226,22 @@
};
};
+ i2s@70002800 {
+ status = "okay";
+ };
+
+ serial@70006000 {
+ status = "okay";
+ clock-frequency = <216000000>;
+ };
+
+ serial@70006200 {
+ status = "okay";
+ clock-frequency = <216000000>;
+ };
+
i2c@7000c000 {
+ status = "okay";
clock-frequency = <400000>;
alc5632: alc5632@1e {
@@ -242,25 +253,23 @@
};
i2c@7000c400 {
+ status = "okay";
clock-frequency = <400000>;
};
- i2c@7000c500 {
- status = "disable";
- };
-
- nvec@7000c500 {
- #address-cells = <1>;
- #size-cells = <0>;
+ nvec {
compatible = "nvidia,nvec";
- reg = <0x7000C500 0x100>;
+ reg = <0x7000c500 0x100>;
interrupts = <0 92 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clock-frequency = <80000>;
- request-gpios = <&gpio 170 0>;
+ request-gpios = <&gpio 170 0>; /* gpio PV2 */
slave-addr = <138>;
};
i2c@7000d000 {
+ status = "okay";
clock-frequency = <400000>;
adt7461@4c {
@@ -269,66 +278,31 @@
};
};
- i2s@70002a00 {
- status = "disable";
- };
-
- sound {
- compatible = "nvidia,tegra-audio-alc5632-paz00",
- "nvidia,tegra-audio-alc5632";
-
- nvidia,model = "Compal PAZ00";
-
- nvidia,audio-routing =
- "Int Spk", "SPKOUT",
- "Int Spk", "SPKOUTN",
- "Headset Mic", "MICBIAS1",
- "MIC1", "Headset Mic",
- "Headset Stereophone", "HPR",
- "Headset Stereophone", "HPL",
- "DMICDAT", "Digital Mic";
-
- nvidia,audio-codec = <&alc5632>;
- nvidia,i2s-controller = <&tegra_i2s1>;
- nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
- };
-
- serial@70006000 {
- clock-frequency = <216000000>;
+ usb@c5000000 {
+ status = "okay";
};
- serial@70006040 {
- status = "disable";
+ usb@c5004000 {
+ status = "okay";
+ nvidia,phy-reset-gpio = <&gpio 168 0>; /* gpio PV0 */
};
- serial@70006200 {
- clock-frequency = <216000000>;
- };
-
- serial@70006300 {
- status = "disable";
- };
-
- serial@70006400 {
- status = "disable";
+ usb@c5008000 {
+ status = "okay";
};
sdhci@c8000000 {
+ status = "okay";
cd-gpios = <&gpio 173 0>; /* gpio PV5 */
wp-gpios = <&gpio 57 0>; /* gpio PH1 */
power-gpios = <&gpio 169 0>; /* gpio PV1 */
- };
-
- sdhci@c8000200 {
- status = "disable";
- };
-
- sdhci@c8000400 {
- status = "disable";
+ bus-width = <4>;
};
sdhci@c8000600 {
+ status = "okay";
support-8bit;
+ bus-width = <8>;
};
gpio-keys {
@@ -347,8 +321,28 @@
wifi {
label = "wifi-led";
- gpios = <&gpio 24 0>;
+ gpios = <&gpio 24 0>; /* gpio PD0 */
linux,default-trigger = "rfkill0";
};
};
+
+ sound {
+ compatible = "nvidia,tegra-audio-alc5632-paz00",
+ "nvidia,tegra-audio-alc5632";
+
+ nvidia,model = "Compal PAZ00";
+
+ nvidia,audio-routing =
+ "Int Spk", "SPKOUT",
+ "Int Spk", "SPKOUTN",
+ "Headset Mic", "MICBIAS1",
+ "MIC1", "Headset Mic",
+ "Headset Stereophone", "HPR",
+ "Headset Stereophone", "HPL",
+ "DMICDAT", "Digital Mic";
+
+ nvidia,audio-codec = <&alc5632>;
+ nvidia,i2s-controller = <&tegra_i2s1>;
+ nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
+ };
};
diff --git a/arch/arm/boot/dts/tegra-seaboard.dts b/arch/arm/boot/dts/tegra-seaboard.dts
index ec33116f5df9..89cb7f2acd92 100644
--- a/arch/arm/boot/dts/tegra-seaboard.dts
+++ b/arch/arm/boot/dts/tegra-seaboard.dts
@@ -7,11 +7,10 @@
compatible = "nvidia,seaboard", "nvidia,tegra20";
memory {
- device_type = "memory";
- reg = < 0x00000000 0x40000000 >;
+ reg = <0x00000000 0x40000000>;
};
- pinmux@70000000 {
+ pinmux {
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
@@ -100,7 +99,7 @@
};
hdint {
nvidia,pins = "hdint", "lpw0", "lpw2", "lsc1",
- "lsck", "lsda", "pta";
+ "lsck", "lsda";
nvidia,function = "hdmi";
};
i2cp {
@@ -134,6 +133,10 @@
nvidia,pins = "pmc";
nvidia,function = "pwr_on";
};
+ pta {
+ nvidia,pins = "pta";
+ nvidia,function = "i2c2";
+ };
rm {
nvidia,pins = "rm";
nvidia,function = "i2c1";
@@ -254,108 +257,148 @@
};
};
+ i2s@70002800 {
+ status = "okay";
+ };
+
+ serial@70006300 {
+ status = "okay";
+ clock-frequency = <216000000>;
+ };
+
i2c@7000c000 {
+ status = "okay";
clock-frequency = <400000>;
wm8903: wm8903@1a {
compatible = "wlf,wm8903";
reg = <0x1a>;
interrupt-parent = <&gpio>;
- interrupts = < 187 0x04 >;
+ interrupts = <187 0x04>;
gpio-controller;
#gpio-cells = <2>;
micdet-cfg = <0>;
micdet-delay = <100>;
- gpio-cfg = < 0xffffffff 0xffffffff 0 0xffffffff 0xffffffff >;
+ gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
+ };
+
+ /* ALS and proximity sensor */
+ isl29018@44 {
+ compatible = "isil,isl29018";
+ reg = <0x44>;
+ interrupt-parent = <&gpio>;
+ interrupts = <202 0x04>; /* GPIO PZ2 */
+ };
+
+ gyrometer@68 {
+ compatible = "invn,mpu3050";
+ reg = <0x68>;
+ interrupt-parent = <&gpio>;
+ interrupts = <204 0x04>; /* gpio PZ4 */
};
};
i2c@7000c400 {
- clock-frequency = <400000>;
+ status = "okay";
+ clock-frequency = <100000>;
+
+ smart-battery@b {
+ compatible = "ti,bq20z75", "smart-battery-1.1";
+ reg = <0xb>;
+ ti,i2c-retry-count = <2>;
+ ti,poll-retry-count = <10>;
+ };
};
i2c@7000c500 {
+ status = "okay";
clock-frequency = <400000>;
};
i2c@7000d000 {
+ status = "okay";
clock-frequency = <400000>;
- adt7461@4c {
- compatible = "adt7461";
+ temperature-sensor@4c {
+ compatible = "nct1008";
reg = <0x4c>;
};
- };
-
- i2s@70002a00 {
- status = "disable";
- };
-
- sound {
- compatible = "nvidia,tegra-audio-wm8903-seaboard",
- "nvidia,tegra-audio-wm8903";
- nvidia,model = "NVIDIA Tegra Seaboard";
-
- nvidia,audio-routing =
- "Headphone Jack", "HPOUTR",
- "Headphone Jack", "HPOUTL",
- "Int Spk", "ROP",
- "Int Spk", "RON",
- "Int Spk", "LOP",
- "Int Spk", "LON",
- "Mic Jack", "MICBIAS",
- "IN1R", "Mic Jack";
-
- nvidia,i2s-controller = <&tegra_i2s1>;
- nvidia,audio-codec = <&wm8903>;
-
- nvidia,spkr-en-gpios = <&wm8903 2 0>;
- nvidia,hp-det-gpios = <&gpio 185 0>; /* gpio PX1 */
- };
- serial@70006000 {
- status = "disable";
- };
-
- serial@70006040 {
- status = "disable";
+ magnetometer@c {
+ compatible = "ak8975";
+ reg = <0xc>;
+ interrupt-parent = <&gpio>;
+ interrupts = <109 0x04>; /* gpio PN5 */
+ };
};
- serial@70006200 {
- status = "disable";
- };
+ emc {
+ emc-table@190000 {
+ reg = <190000>;
+ compatible = "nvidia,tegra20-emc-table";
+ clock-frequency = <190000>;
+ nvidia,emc-registers = <0x0000000c 0x00000026
+ 0x00000009 0x00000003 0x00000004 0x00000004
+ 0x00000002 0x0000000c 0x00000003 0x00000003
+ 0x00000002 0x00000001 0x00000004 0x00000005
+ 0x00000004 0x00000009 0x0000000d 0x0000059f
+ 0x00000000 0x00000003 0x00000003 0x00000003
+ 0x00000003 0x00000001 0x0000000b 0x000000c8
+ 0x00000003 0x00000007 0x00000004 0x0000000f
+ 0x00000002 0x00000000 0x00000000 0x00000002
+ 0x00000000 0x00000000 0x00000083 0xa06204ae
+ 0x007dc010 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000>;
+ };
- serial@70006300 {
- clock-frequency = < 216000000 >;
+ emc-table@380000 {
+ reg = <380000>;
+ compatible = "nvidia,tegra20-emc-table";
+ clock-frequency = <380000>;
+ nvidia,emc-registers = <0x00000017 0x0000004b
+ 0x00000012 0x00000006 0x00000004 0x00000005
+ 0x00000003 0x0000000c 0x00000006 0x00000006
+ 0x00000003 0x00000001 0x00000004 0x00000005
+ 0x00000004 0x00000009 0x0000000d 0x00000b5f
+ 0x00000000 0x00000003 0x00000003 0x00000006
+ 0x00000006 0x00000001 0x00000011 0x000000c8
+ 0x00000003 0x0000000e 0x00000007 0x0000000f
+ 0x00000002 0x00000000 0x00000000 0x00000002
+ 0x00000000 0x00000000 0x00000083 0xe044048b
+ 0x007d8010 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000>;
+ };
};
- serial@70006400 {
- status = "disable";
+ usb@c5000000 {
+ status = "okay";
+ nvidia,vbus-gpio = <&gpio 24 0>; /* PD0 */
+ dr_mode = "otg";
};
- sdhci@c8000000 {
- status = "disable";
+ usb@c5004000 {
+ status = "okay";
+ nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
};
- sdhci@c8000200 {
- status = "disable";
+ usb@c5008000 {
+ status = "okay";
};
sdhci@c8000400 {
+ status = "okay";
cd-gpios = <&gpio 69 0>; /* gpio PI5 */
wp-gpios = <&gpio 57 0>; /* gpio PH1 */
power-gpios = <&gpio 70 0>; /* gpio PI6 */
+ bus-width = <4>;
};
sdhci@c8000600 {
+ status = "okay";
support-8bit;
- };
-
- usb@c5000000 {
- nvidia,vbus-gpio = <&gpio 24 0>; /* PD0 */
- dr_mode = "otg";
+ bus-width = <8>;
};
gpio-keys {
@@ -378,41 +421,25 @@
};
};
- emc@7000f400 {
- emc-table@190000 {
- reg = < 190000 >;
- compatible = "nvidia,tegra20-emc-table";
- clock-frequency = < 190000 >;
- nvidia,emc-registers = < 0x0000000c 0x00000026
- 0x00000009 0x00000003 0x00000004 0x00000004
- 0x00000002 0x0000000c 0x00000003 0x00000003
- 0x00000002 0x00000001 0x00000004 0x00000005
- 0x00000004 0x00000009 0x0000000d 0x0000059f
- 0x00000000 0x00000003 0x00000003 0x00000003
- 0x00000003 0x00000001 0x0000000b 0x000000c8
- 0x00000003 0x00000007 0x00000004 0x0000000f
- 0x00000002 0x00000000 0x00000000 0x00000002
- 0x00000000 0x00000000 0x00000083 0xa06204ae
- 0x007dc010 0x00000000 0x00000000 0x00000000
- 0x00000000 0x00000000 0x00000000 0x00000000 >;
- };
+ sound {
+ compatible = "nvidia,tegra-audio-wm8903-seaboard",
+ "nvidia,tegra-audio-wm8903";
+ nvidia,model = "NVIDIA Tegra Seaboard";
- emc-table@380000 {
- reg = < 380000 >;
- compatible = "nvidia,tegra20-emc-table";
- clock-frequency = < 380000 >;
- nvidia,emc-registers = < 0x00000017 0x0000004b
- 0x00000012 0x00000006 0x00000004 0x00000005
- 0x00000003 0x0000000c 0x00000006 0x00000006
- 0x00000003 0x00000001 0x00000004 0x00000005
- 0x00000004 0x00000009 0x0000000d 0x00000b5f
- 0x00000000 0x00000003 0x00000003 0x00000006
- 0x00000006 0x00000001 0x00000011 0x000000c8
- 0x00000003 0x0000000e 0x00000007 0x0000000f
- 0x00000002 0x00000000 0x00000000 0x00000002
- 0x00000000 0x00000000 0x00000083 0xe044048b
- 0x007d8010 0x00000000 0x00000000 0x00000000
- 0x00000000 0x00000000 0x00000000 0x00000000 >;
- };
+ nvidia,audio-routing =
+ "Headphone Jack", "HPOUTR",
+ "Headphone Jack", "HPOUTL",
+ "Int Spk", "ROP",
+ "Int Spk", "RON",
+ "Int Spk", "LOP",
+ "Int Spk", "LON",
+ "Mic Jack", "MICBIAS",
+ "IN1R", "Mic Jack";
+
+ nvidia,i2s-controller = <&tegra_i2s1>;
+ nvidia,audio-codec = <&wm8903>;
+
+ nvidia,spkr-en-gpios = <&wm8903 2 0>;
+ nvidia,hp-det-gpios = <&gpio 185 0>; /* gpio PX1 */
};
};
diff --git a/arch/arm/boot/dts/tegra-trimslice.dts b/arch/arm/boot/dts/tegra-trimslice.dts
index 98efd5b0d7f9..9de5636023f6 100644
--- a/arch/arm/boot/dts/tegra-trimslice.dts
+++ b/arch/arm/boot/dts/tegra-trimslice.dts
@@ -6,11 +6,11 @@
model = "Compulab TrimSlice board";
compatible = "compulab,trimslice", "nvidia,tegra20";
- memory@0 {
- reg = < 0x00000000 0x40000000 >;
+ memory {
+ reg = <0x00000000 0x40000000>;
};
- pinmux@70000000 {
+ pinmux {
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
@@ -182,23 +182,23 @@
nvidia,tristate = <1>;
};
conf_atb {
- nvidia,pins = "atb", "cdev1", "dap1", "gma",
- "gmc", "gmd", "gpu", "gpu7", "gpv",
- "sdio1", "slxa", "slxk", "uac";
+ nvidia,pins = "atb", "cdev1", "cdev2", "dap1",
+ "gma", "gmc", "gmd", "gpu", "gpu7",
+ "gpv", "sdio1", "slxa", "slxk", "uac";
nvidia,pull = <0>;
nvidia,tristate = <0>;
};
- conf_cdev2 {
- nvidia,pins = "cdev2", "csus", "spia", "spib",
- "spid", "spif";
- nvidia,pull = <1>;
- nvidia,tristate = <1>;
- };
conf_ck32 {
nvidia,pins = "ck32", "ddrc", "pmca", "pmcb",
"pmcc", "pmcd", "pmce", "xm2c", "xm2d";
nvidia,pull = <0>;
};
+ conf_csus {
+ nvidia,pins = "csus", "spia", "spib",
+ "spid", "spif";
+ nvidia,pull = <1>;
+ nvidia,tristate = <1>;
+ };
conf_ddc {
nvidia,pins = "ddc", "dtf", "rm", "sdc", "sdd";
nvidia,pull = <2>;
@@ -240,68 +240,67 @@
};
};
+ i2s@70002800 {
+ status = "okay";
+ };
+
+ serial@70006000 {
+ status = "okay";
+ clock-frequency = <216000000>;
+ };
+
i2c@7000c000 {
+ status = "okay";
clock-frequency = <400000>;
};
i2c@7000c400 {
+ status = "okay";
clock-frequency = <400000>;
};
i2c@7000c500 {
+ status = "okay";
clock-frequency = <400000>;
- };
-
- i2c@7000d000 {
- status = "disable";
- };
-
- i2s@70002800 {
- status = "disable";
- };
-
- i2s@70002a00 {
- status = "disable";
- };
-
- das@70000c00 {
- status = "disable";
- };
- serial@70006000 {
- clock-frequency = < 216000000 >;
- };
+ codec: codec@1a {
+ compatible = "ti,tlv320aic23";
+ reg = <0x1a>;
+ };
- serial@70006040 {
- status = "disable";
+ rtc@56 {
+ compatible = "emmicro,em3027";
+ reg = <0x56>;
+ };
};
- serial@70006200 {
- status = "disable";
+ usb@c5000000 {
+ status = "okay";
};
- serial@70006300 {
- status = "disable";
+ usb@c5004000 {
+ nvidia,phy-reset-gpio = <&gpio 168 0>; /* gpio PV0 */
};
- serial@70006400 {
- status = "disable";
+ usb@c5008000 {
+ status = "okay";
};
sdhci@c8000000 {
- status = "disable";
+ status = "okay";
+ bus-width = <4>;
};
- sdhci@c8000200 {
- status = "disable";
- };
-
- sdhci@c8000400 {
- status = "disable";
+ sdhci@c8000600 {
+ status = "okay";
+ cd-gpios = <&gpio 121 0>; /* gpio PP1 */
+ wp-gpios = <&gpio 122 0>; /* gpio PP2 */
+ bus-width = <4>;
};
- sdhci@c8000600 {
- cd-gpios = <&gpio 121 0>;
- wp-gpios = <&gpio 122 0>;
+ sound {
+ compatible = "nvidia,tegra-audio-trimslice";
+ nvidia,i2s-controller = <&tegra_i2s1>;
+ nvidia,audio-codec = <&codec>;
};
};
diff --git a/arch/arm/boot/dts/tegra-ventana.dts b/arch/arm/boot/dts/tegra-ventana.dts
index 71eb2e50a668..445343b0fbdd 100644
--- a/arch/arm/boot/dts/tegra-ventana.dts
+++ b/arch/arm/boot/dts/tegra-ventana.dts
@@ -7,10 +7,10 @@
compatible = "nvidia,ventana", "nvidia,tegra20";
memory {
- reg = < 0x00000000 0x40000000 >;
+ reg = <0x00000000 0x40000000>;
};
- pinmux@70000000 {
+ pinmux {
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
@@ -240,38 +240,82 @@
};
};
+ i2s@70002800 {
+ status = "okay";
+ };
+
+ serial@70006300 {
+ status = "okay";
+ clock-frequency = <216000000>;
+ };
+
i2c@7000c000 {
+ status = "okay";
clock-frequency = <400000>;
wm8903: wm8903@1a {
compatible = "wlf,wm8903";
reg = <0x1a>;
interrupt-parent = <&gpio>;
- interrupts = < 187 0x04 >;
+ interrupts = <187 0x04>;
gpio-controller;
#gpio-cells = <2>;
micdet-cfg = <0>;
micdet-delay = <100>;
- gpio-cfg = < 0xffffffff 0xffffffff 0 0xffffffff 0xffffffff >;
+ gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
+ };
+
+ /* ALS and proximity sensor */
+ isl29018@44 {
+ compatible = "isil,isl29018";
+ reg = <0x44>;
+ interrupt-parent = <&gpio>;
+ interrupts = <202 0x04>; /*gpio PZ2 */
};
};
i2c@7000c400 {
+ status = "okay";
clock-frequency = <400000>;
};
i2c@7000c500 {
+ status = "okay";
clock-frequency = <400000>;
};
i2c@7000d000 {
+ status = "okay";
clock-frequency = <400000>;
};
- i2s@70002a00 {
- status = "disable";
+ usb@c5000000 {
+ status = "okay";
+ };
+
+ usb@c5004000 {
+ status = "okay";
+ nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
+ };
+
+ usb@c5008000 {
+ status = "okay";
+ };
+
+ sdhci@c8000400 {
+ status = "okay";
+ cd-gpios = <&gpio 69 0>; /* gpio PI5 */
+ wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+ power-gpios = <&gpio 70 0>; /* gpio PI6 */
+ bus-width = <4>;
+ };
+
+ sdhci@c8000600 {
+ status = "okay";
+ support-8bit;
+ bus-width = <8>;
};
sound {
@@ -294,45 +338,7 @@
nvidia,spkr-en-gpios = <&wm8903 2 0>;
nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
- nvidia,int-mic-en-gpios = <&gpio 184 0>; /*gpio PX0 */
+ nvidia,int-mic-en-gpios = <&gpio 184 0>; /* gpio PX0 */
nvidia,ext-mic-en-gpios = <&gpio 185 0>; /* gpio PX1 */
};
-
- serial@70006000 {
- status = "disable";
- };
-
- serial@70006040 {
- status = "disable";
- };
-
- serial@70006200 {
- status = "disable";
- };
-
- serial@70006300 {
- clock-frequency = < 216000000 >;
- };
-
- serial@70006400 {
- status = "disable";
- };
-
- sdhci@c8000000 {
- status = "disable";
- };
-
- sdhci@c8000200 {
- status = "disable";
- };
-
- sdhci@c8000400 {
- cd-gpios = <&gpio 69 0>; /* gpio PI5 */
- wp-gpios = <&gpio 57 0>; /* gpio PH1 */
- power-gpios = <&gpio 70 0>; /* gpio PI6 */
- };
-
- sdhci@c8000600 {
- support-8bit;
- };
};
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 108e894a8926..c417d67e9027 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -4,207 +4,242 @@
compatible = "nvidia,tegra20";
interrupt-parent = <&intc>;
- pmc@7000f400 {
- compatible = "nvidia,tegra20-pmc";
- reg = <0x7000e400 0x400>;
- };
-
- intc: interrupt-controller@50041000 {
+ intc: interrupt-controller {
compatible = "arm,cortex-a9-gic";
+ reg = <0x50041000 0x1000
+ 0x50040100 0x0100>;
interrupt-controller;
#interrupt-cells = <3>;
- reg = < 0x50041000 0x1000 >,
- < 0x50040100 0x0100 >;
};
- pmu {
- compatible = "arm,cortex-a9-pmu";
- interrupts = <0 56 0x04
- 0 57 0x04>;
- };
-
- apbdma: dma@6000a000 {
+ apbdma: dma {
compatible = "nvidia,tegra20-apbdma";
reg = <0x6000a000 0x1200>;
- interrupts = < 0 104 0x04
- 0 105 0x04
- 0 106 0x04
- 0 107 0x04
- 0 108 0x04
- 0 109 0x04
- 0 110 0x04
- 0 111 0x04
- 0 112 0x04
- 0 113 0x04
- 0 114 0x04
- 0 115 0x04
- 0 116 0x04
- 0 117 0x04
- 0 118 0x04
- 0 119 0x04 >;
- };
-
- i2c@7000c000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra20-i2c";
- reg = <0x7000C000 0x100>;
- interrupts = < 0 38 0x04 >;
- };
-
- i2c@7000c400 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra20-i2c";
- reg = <0x7000C400 0x100>;
- interrupts = < 0 84 0x04 >;
+ interrupts = <0 104 0x04
+ 0 105 0x04
+ 0 106 0x04
+ 0 107 0x04
+ 0 108 0x04
+ 0 109 0x04
+ 0 110 0x04
+ 0 111 0x04
+ 0 112 0x04
+ 0 113 0x04
+ 0 114 0x04
+ 0 115 0x04
+ 0 116 0x04
+ 0 117 0x04
+ 0 118 0x04
+ 0 119 0x04>;
+ };
+
+ ahb {
+ compatible = "nvidia,tegra20-ahb";
+ reg = <0x6000c004 0x10c>; /* AHB Arbitration + Gizmo Controller */
+ };
+
+ gpio: gpio {
+ compatible = "nvidia,tegra20-gpio";
+ reg = <0x6000d000 0x1000>;
+ interrupts = <0 32 0x04
+ 0 33 0x04
+ 0 34 0x04
+ 0 35 0x04
+ 0 55 0x04
+ 0 87 0x04
+ 0 89 0x04>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ #interrupt-cells = <2>;
+ interrupt-controller;
};
- i2c@7000c500 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra20-i2c";
- reg = <0x7000C500 0x100>;
- interrupts = < 0 92 0x04 >;
+ pinmux: pinmux {
+ compatible = "nvidia,tegra20-pinmux";
+ reg = <0x70000014 0x10 /* Tri-state registers */
+ 0x70000080 0x20 /* Mux registers */
+ 0x700000a0 0x14 /* Pull-up/down registers */
+ 0x70000868 0xa8>; /* Pad control registers */
};
- i2c@7000d000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra20-i2c-dvc";
- reg = <0x7000D000 0x200>;
- interrupts = < 0 53 0x04 >;
+ das {
+ compatible = "nvidia,tegra20-das";
+ reg = <0x70000c00 0x80>;
};
tegra_i2s1: i2s@70002800 {
compatible = "nvidia,tegra20-i2s";
reg = <0x70002800 0x200>;
- interrupts = < 0 13 0x04 >;
- nvidia,dma-request-selector = < &apbdma 2 >;
+ interrupts = <0 13 0x04>;
+ nvidia,dma-request-selector = <&apbdma 2>;
+ status = "disable";
};
tegra_i2s2: i2s@70002a00 {
compatible = "nvidia,tegra20-i2s";
reg = <0x70002a00 0x200>;
- interrupts = < 0 3 0x04 >;
- nvidia,dma-request-selector = < &apbdma 1 >;
- };
-
- das@70000c00 {
- compatible = "nvidia,tegra20-das";
- reg = <0x70000c00 0x80>;
- };
-
- gpio: gpio@6000d000 {
- compatible = "nvidia,tegra20-gpio";
- reg = < 0x6000d000 0x1000 >;
- interrupts = < 0 32 0x04
- 0 33 0x04
- 0 34 0x04
- 0 35 0x04
- 0 55 0x04
- 0 87 0x04
- 0 89 0x04 >;
- #gpio-cells = <2>;
- gpio-controller;
- #interrupt-cells = <2>;
- interrupt-controller;
- };
-
- pinmux: pinmux@70000000 {
- compatible = "nvidia,tegra20-pinmux";
- reg = < 0x70000014 0x10 /* Tri-state registers */
- 0x70000080 0x20 /* Mux registers */
- 0x700000a0 0x14 /* Pull-up/down registers */
- 0x70000868 0xa8 >; /* Pad control registers */
+ interrupts = <0 3 0x04>;
+ nvidia,dma-request-selector = <&apbdma 1>;
+ status = "disable";
};
serial@70006000 {
compatible = "nvidia,tegra20-uart";
reg = <0x70006000 0x40>;
reg-shift = <2>;
- interrupts = < 0 36 0x04 >;
+ interrupts = <0 36 0x04>;
+ status = "disable";
};
serial@70006040 {
compatible = "nvidia,tegra20-uart";
reg = <0x70006040 0x40>;
reg-shift = <2>;
- interrupts = < 0 37 0x04 >;
+ interrupts = <0 37 0x04>;
+ status = "disable";
};
serial@70006200 {
compatible = "nvidia,tegra20-uart";
reg = <0x70006200 0x100>;
reg-shift = <2>;
- interrupts = < 0 46 0x04 >;
+ interrupts = <0 46 0x04>;
+ status = "disable";
};
serial@70006300 {
compatible = "nvidia,tegra20-uart";
reg = <0x70006300 0x100>;
reg-shift = <2>;
- interrupts = < 0 90 0x04 >;
+ interrupts = <0 90 0x04>;
+ status = "disable";
};
serial@70006400 {
compatible = "nvidia,tegra20-uart";
reg = <0x70006400 0x100>;
reg-shift = <2>;
- interrupts = < 0 91 0x04 >;
+ interrupts = <0 91 0x04>;
+ status = "disable";
};
- emc@7000f400 {
+ i2c@7000c000 {
+ compatible = "nvidia,tegra20-i2c";
+ reg = <0x7000c000 0x100>;
+ interrupts = <0 38 0x04>;
#address-cells = <1>;
#size-cells = <0>;
- compatible = "nvidia,tegra20-emc";
- reg = <0x7000f400 0x200>;
+ status = "disable";
};
- sdhci@c8000000 {
- compatible = "nvidia,tegra20-sdhci";
- reg = <0xc8000000 0x200>;
- interrupts = < 0 14 0x04 >;
+ i2c@7000c400 {
+ compatible = "nvidia,tegra20-i2c";
+ reg = <0x7000c400 0x100>;
+ interrupts = <0 84 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
};
- sdhci@c8000200 {
- compatible = "nvidia,tegra20-sdhci";
- reg = <0xc8000200 0x200>;
- interrupts = < 0 15 0x04 >;
+ i2c@7000c500 {
+ compatible = "nvidia,tegra20-i2c";
+ reg = <0x7000c500 0x100>;
+ interrupts = <0 92 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
};
- sdhci@c8000400 {
- compatible = "nvidia,tegra20-sdhci";
- reg = <0xc8000400 0x200>;
- interrupts = < 0 19 0x04 >;
+ i2c@7000d000 {
+ compatible = "nvidia,tegra20-i2c-dvc";
+ reg = <0x7000d000 0x200>;
+ interrupts = <0 53 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
};
- sdhci@c8000600 {
- compatible = "nvidia,tegra20-sdhci";
- reg = <0xc8000600 0x200>;
- interrupts = < 0 31 0x04 >;
+ pmc {
+ compatible = "nvidia,tegra20-pmc";
+ reg = <0x7000e400 0x400>;
+ };
+
+ mc {
+ compatible = "nvidia,tegra20-mc";
+ reg = <0x7000f000 0x024
+ 0x7000f03c 0x3c4>;
+ interrupts = <0 77 0x04>;
+ };
+
+ gart {
+ compatible = "nvidia,tegra20-gart";
+ reg = <0x7000f024 0x00000018 /* controller registers */
+ 0x58000000 0x02000000>; /* GART aperture */
+ };
+
+ emc {
+ compatible = "nvidia,tegra20-emc";
+ reg = <0x7000f400 0x200>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
usb@c5000000 {
compatible = "nvidia,tegra20-ehci", "usb-ehci";
reg = <0xc5000000 0x4000>;
- interrupts = < 0 20 0x04 >;
+ interrupts = <0 20 0x04>;
phy_type = "utmi";
nvidia,has-legacy-mode;
+ status = "disable";
};
usb@c5004000 {
compatible = "nvidia,tegra20-ehci", "usb-ehci";
reg = <0xc5004000 0x4000>;
- interrupts = < 0 21 0x04 >;
+ interrupts = <0 21 0x04>;
phy_type = "ulpi";
+ status = "disable";
};
usb@c5008000 {
compatible = "nvidia,tegra20-ehci", "usb-ehci";
reg = <0xc5008000 0x4000>;
- interrupts = < 0 97 0x04 >;
+ interrupts = <0 97 0x04>;
phy_type = "utmi";
+ status = "disable";
+ };
+
+ sdhci@c8000000 {
+ compatible = "nvidia,tegra20-sdhci";
+ reg = <0xc8000000 0x200>;
+ interrupts = <0 14 0x04>;
+ status = "disable";
};
-};
+ sdhci@c8000200 {
+ compatible = "nvidia,tegra20-sdhci";
+ reg = <0xc8000200 0x200>;
+ interrupts = <0 15 0x04>;
+ status = "disable";
+ };
+
+ sdhci@c8000400 {
+ compatible = "nvidia,tegra20-sdhci";
+ reg = <0xc8000400 0x200>;
+ interrupts = <0 19 0x04>;
+ status = "disable";
+ };
+
+ sdhci@c8000600 {
+ compatible = "nvidia,tegra20-sdhci";
+ reg = <0xc8000600 0x200>;
+ interrupts = <0 31 0x04>;
+ status = "disable";
+ };
+
+ pmu {
+ compatible = "arm,cortex-a9-pmu";
+ interrupts = <0 56 0x04
+ 0 57 0x04>;
+ };
+};
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 62a7b39f1c9a..2dcc09e784b5 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -4,183 +4,268 @@
compatible = "nvidia,tegra30";
interrupt-parent = <&intc>;
- pmc@7000f400 {
- compatible = "nvidia,tegra20-pmc", "nvidia,tegra30-pmc";
- reg = <0x7000e400 0x400>;
- };
-
- intc: interrupt-controller@50041000 {
+ intc: interrupt-controller {
compatible = "arm,cortex-a9-gic";
+ reg = <0x50041000 0x1000
+ 0x50040100 0x0100>;
interrupt-controller;
#interrupt-cells = <3>;
- reg = < 0x50041000 0x1000 >,
- < 0x50040100 0x0100 >;
};
- pmu {
- compatible = "arm,cortex-a9-pmu";
- interrupts = <0 144 0x04
- 0 145 0x04
- 0 146 0x04
- 0 147 0x04>;
- };
-
- apbdma: dma@6000a000 {
+ apbdma: dma {
compatible = "nvidia,tegra30-apbdma", "nvidia,tegra20-apbdma";
reg = <0x6000a000 0x1400>;
- interrupts = < 0 104 0x04
- 0 105 0x04
- 0 106 0x04
- 0 107 0x04
- 0 108 0x04
- 0 109 0x04
- 0 110 0x04
- 0 111 0x04
- 0 112 0x04
- 0 113 0x04
- 0 114 0x04
- 0 115 0x04
- 0 116 0x04
- 0 117 0x04
- 0 118 0x04
- 0 119 0x04
- 0 128 0x04
- 0 129 0x04
- 0 130 0x04
- 0 131 0x04
- 0 132 0x04
- 0 133 0x04
- 0 134 0x04
- 0 135 0x04
- 0 136 0x04
- 0 137 0x04
- 0 138 0x04
- 0 139 0x04
- 0 140 0x04
- 0 141 0x04
- 0 142 0x04
- 0 143 0x04 >;
- };
-
- i2c@7000c000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
- reg = <0x7000C000 0x100>;
- interrupts = < 0 38 0x04 >;
- };
-
- i2c@7000c400 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
- reg = <0x7000C400 0x100>;
- interrupts = < 0 84 0x04 >;
- };
-
- i2c@7000c500 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
- reg = <0x7000C500 0x100>;
- interrupts = < 0 92 0x04 >;
+ interrupts = <0 104 0x04
+ 0 105 0x04
+ 0 106 0x04
+ 0 107 0x04
+ 0 108 0x04
+ 0 109 0x04
+ 0 110 0x04
+ 0 111 0x04
+ 0 112 0x04
+ 0 113 0x04
+ 0 114 0x04
+ 0 115 0x04
+ 0 116 0x04
+ 0 117 0x04
+ 0 118 0x04
+ 0 119 0x04
+ 0 128 0x04
+ 0 129 0x04
+ 0 130 0x04
+ 0 131 0x04
+ 0 132 0x04
+ 0 133 0x04
+ 0 134 0x04
+ 0 135 0x04
+ 0 136 0x04
+ 0 137 0x04
+ 0 138 0x04
+ 0 139 0x04
+ 0 140 0x04
+ 0 141 0x04
+ 0 142 0x04
+ 0 143 0x04>;
};
- i2c@7000c700 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
- reg = <0x7000c700 0x100>;
- interrupts = < 0 120 0x04 >;
+ ahb: ahb {
+ compatible = "nvidia,tegra30-ahb";
+ reg = <0x6000c004 0x14c>; /* AHB Arbitration + Gizmo Controller */
};
- i2c@7000d000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
- reg = <0x7000D000 0x100>;
- interrupts = < 0 53 0x04 >;
- };
-
- gpio: gpio@6000d000 {
+ gpio: gpio {
compatible = "nvidia,tegra30-gpio", "nvidia,tegra20-gpio";
- reg = < 0x6000d000 0x1000 >;
- interrupts = < 0 32 0x04
- 0 33 0x04
- 0 34 0x04
- 0 35 0x04
- 0 55 0x04
- 0 87 0x04
- 0 89 0x04
- 0 125 0x04 >;
+ reg = <0x6000d000 0x1000>;
+ interrupts = <0 32 0x04
+ 0 33 0x04
+ 0 34 0x04
+ 0 35 0x04
+ 0 55 0x04
+ 0 87 0x04
+ 0 89 0x04
+ 0 125 0x04>;
#gpio-cells = <2>;
gpio-controller;
#interrupt-cells = <2>;
interrupt-controller;
};
+ pinmux: pinmux {
+ compatible = "nvidia,tegra30-pinmux";
+ reg = <0x70000868 0xd0 /* Pad control registers */
+ 0x70003000 0x3e0>; /* Mux registers */
+ };
+
serial@70006000 {
compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
reg = <0x70006000 0x40>;
reg-shift = <2>;
- interrupts = < 0 36 0x04 >;
+ interrupts = <0 36 0x04>;
+ status = "disable";
};
serial@70006040 {
compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
reg = <0x70006040 0x40>;
reg-shift = <2>;
- interrupts = < 0 37 0x04 >;
+ interrupts = <0 37 0x04>;
+ status = "disable";
};
serial@70006200 {
compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
reg = <0x70006200 0x100>;
reg-shift = <2>;
- interrupts = < 0 46 0x04 >;
+ interrupts = <0 46 0x04>;
+ status = "disable";
};
serial@70006300 {
compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
reg = <0x70006300 0x100>;
reg-shift = <2>;
- interrupts = < 0 90 0x04 >;
+ interrupts = <0 90 0x04>;
+ status = "disable";
};
serial@70006400 {
compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
reg = <0x70006400 0x100>;
reg-shift = <2>;
- interrupts = < 0 91 0x04 >;
+ interrupts = <0 91 0x04>;
+ status = "disable";
+ };
+
+ i2c@7000c000 {
+ compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+ reg = <0x7000c000 0x100>;
+ interrupts = <0 38 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
+ };
+
+ i2c@7000c400 {
+ compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+ reg = <0x7000c400 0x100>;
+ interrupts = <0 84 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
+ };
+
+ i2c@7000c500 {
+ compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+ reg = <0x7000c500 0x100>;
+ interrupts = <0 92 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
+ };
+
+ i2c@7000c700 {
+ compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+ reg = <0x7000c700 0x100>;
+ interrupts = <0 120 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
+ };
+
+ i2c@7000d000 {
+ compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+ reg = <0x7000d000 0x100>;
+ interrupts = <0 53 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
+ };
+
+ pmc {
+ compatible = "nvidia,tegra20-pmc", "nvidia,tegra30-pmc";
+ reg = <0x7000e400 0x400>;
+ };
+
+ mc {
+ compatible = "nvidia,tegra30-mc";
+ reg = <0x7000f000 0x010
+ 0x7000f03c 0x1b4
+ 0x7000f200 0x028
+ 0x7000f284 0x17c>;
+ interrupts = <0 77 0x04>;
+ };
+
+ smmu {
+ compatible = "nvidia,tegra30-smmu";
+ reg = <0x7000f010 0x02c
+ 0x7000f1f0 0x010
+ 0x7000f228 0x05c>;
+ nvidia,#asids = <4>; /* # of ASIDs */
+ dma-window = <0 0x40000000>; /* IOVA start & length */
+ nvidia,ahb = <&ahb>;
+ };
+
+ ahub {
+ compatible = "nvidia,tegra30-ahub";
+ reg = <0x70080000 0x200
+ 0x70080200 0x100>;
+ interrupts = <0 103 0x04>;
+ nvidia,dma-request-selector = <&apbdma 1>;
+
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ tegra_i2s0: i2s@70080300 {
+ compatible = "nvidia,tegra30-i2s";
+ reg = <0x70080300 0x100>;
+ nvidia,ahub-cif-ids = <4 4>;
+ status = "disable";
+ };
+
+ tegra_i2s1: i2s@70080400 {
+ compatible = "nvidia,tegra30-i2s";
+ reg = <0x70080400 0x100>;
+ nvidia,ahub-cif-ids = <5 5>;
+ status = "disable";
+ };
+
+ tegra_i2s2: i2s@70080500 {
+ compatible = "nvidia,tegra30-i2s";
+ reg = <0x70080500 0x100>;
+ nvidia,ahub-cif-ids = <6 6>;
+ status = "disable";
+ };
+
+ tegra_i2s3: i2s@70080600 {
+ compatible = "nvidia,tegra30-i2s";
+ reg = <0x70080600 0x100>;
+ nvidia,ahub-cif-ids = <7 7>;
+ status = "disable";
+ };
+
+ tegra_i2s4: i2s@70080700 {
+ compatible = "nvidia,tegra30-i2s";
+ reg = <0x70080700 0x100>;
+ nvidia,ahub-cif-ids = <8 8>;
+ status = "disable";
+ };
};
sdhci@78000000 {
compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
reg = <0x78000000 0x200>;
- interrupts = < 0 14 0x04 >;
+ interrupts = <0 14 0x04>;
+ status = "disable";
};
sdhci@78000200 {
compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
reg = <0x78000200 0x200>;
- interrupts = < 0 15 0x04 >;
+ interrupts = <0 15 0x04>;
+ status = "disable";
};
sdhci@78000400 {
compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
reg = <0x78000400 0x200>;
- interrupts = < 0 19 0x04 >;
+ interrupts = <0 19 0x04>;
+ status = "disable";
};
sdhci@78000600 {
compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
reg = <0x78000600 0x200>;
- interrupts = < 0 31 0x04 >;
+ interrupts = <0 31 0x04>;
+ status = "disable";
};
- pinmux: pinmux@70000000 {
- compatible = "nvidia,tegra30-pinmux";
- reg = < 0x70000868 0xd0 /* Pad control registers */
- 0x70003000 0x3e0 >; /* Mux registers */
+ pmu {
+ compatible = "arm,cortex-a9-pmu";
+ interrupts = <0 144 0x04
+ 0 145 0x04
+ 0 146 0x04
+ 0 147 0x04>;
};
};
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
index 941b161ab78c..7e1091d91af8 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
@@ -73,7 +73,10 @@
#address-cells = <0>;
interrupt-controller;
reg = <0x2c001000 0x1000>,
- <0x2c002000 0x100>;
+ <0x2c002000 0x1000>,
+ <0x2c004000 0x2000>,
+ <0x2c006000 0x2000>;
+ interrupts = <1 9 0xf04>;
};
memory-controller@7ffd0000 {
@@ -93,6 +96,14 @@
<0 91 4>;
};
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
pmu {
compatible = "arm,cortex-a15-pmu", "arm,cortex-a9-pmu";
interrupts = <0 68 4>,
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
index 6905e66d4748..18917a0f8604 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
@@ -77,13 +77,18 @@
timer@2c000600 {
compatible = "arm,cortex-a5-twd-timer";
- reg = <0x2c000600 0x38>;
- interrupts = <1 2 0x304>,
- <1 3 0x304>;
+ reg = <0x2c000600 0x20>;
+ interrupts = <1 13 0x304>;
+ };
+
+ watchdog@2c000620 {
+ compatible = "arm,cortex-a5-twd-wdt";
+ reg = <0x2c000620 0x20>;
+ interrupts = <1 14 0x304>;
};
gic: interrupt-controller@2c001000 {
- compatible = "arm,corex-a5-gic", "arm,cortex-a9-gic";
+ compatible = "arm,cortex-a5-gic", "arm,cortex-a9-gic";
#interrupt-cells = <3>;
#address-cells = <0>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca9.dts b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
index da778693be54..3f0c736d31d6 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca9.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
@@ -105,8 +105,13 @@
timer@1e000600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0x1e000600 0x20>;
- interrupts = <1 2 0xf04>,
- <1 3 0xf04>;
+ interrupts = <1 13 0xf04>;
+ };
+
+ watchdog@1e000620 {
+ compatible = "arm,cortex-a9-twd-wdt";
+ reg = <0x1e000620 0x20>;
+ interrupts = <1 14 0xf04>;
};
gic: interrupt-controller@1e001000 {
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 595ecd290ebf..9d7eb530f95f 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -173,7 +173,8 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_
read_lock_irqsave(&device_info->lock, flags);
list_for_each_entry(b, &device_info->safe_buffers, node)
- if (b->safe_dma_addr == safe_dma_addr) {
+ if (b->safe_dma_addr <= safe_dma_addr &&
+ b->safe_dma_addr + b->size > safe_dma_addr) {
rb = b;
break;
}
@@ -254,7 +255,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
if (buf == NULL) {
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
__func__, ptr);
- return ~0;
+ return DMA_ERROR_CODE;
}
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -307,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
*/
-dma_addr_t __dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir)
+static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
dma_addr_t dma_addr;
int ret;
@@ -320,21 +322,20 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
ret = needs_bounce(dev, dma_addr, size);
if (ret < 0)
- return ~0;
+ return DMA_ERROR_CODE;
if (ret == 0) {
- __dma_page_cpu_to_dev(page, offset, size, dir);
+ arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
return dma_addr;
}
if (PageHighMem(page)) {
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
- return ~0;
+ return DMA_ERROR_CODE;
}
return map_single(dev, page_address(page) + offset, size, dir);
}
-EXPORT_SYMBOL(__dma_map_page);
/*
* see if a mapped address was really a "safe" buffer and if so, copy
@@ -342,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page);
* the safe buffer. (basically return things back to the way they
* should be)
*/
-void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
+static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
struct safe_buffer *buf;
@@ -352,19 +353,18 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
buf = find_safe_buffer_dev(dev, dma_addr, __func__);
if (!buf) {
- __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
- dma_addr & ~PAGE_MASK, size, dir);
+ arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
return;
}
unmap_single(dev, buf, size, dir);
}
-EXPORT_SYMBOL(__dma_unmap_page);
-int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
- unsigned long off, size_t sz, enum dma_data_direction dir)
+static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t sz, enum dma_data_direction dir)
{
struct safe_buffer *buf;
+ unsigned long off;
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir);
@@ -373,6 +373,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
if (!buf)
return 1;
+ off = addr - buf->safe_dma_addr;
+
BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -388,12 +390,21 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
}
return 0;
}
-EXPORT_SYMBOL(dmabounce_sync_for_cpu);
-int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
- unsigned long off, size_t sz, enum dma_data_direction dir)
+static void dmabounce_sync_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
+ return;
+
+ arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
+}
+
+static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+ size_t sz, enum dma_data_direction dir)
{
struct safe_buffer *buf;
+ unsigned long off;
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir);
@@ -402,6 +413,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
if (!buf)
return 1;
+ off = addr - buf->safe_dma_addr;
+
BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -417,7 +430,38 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
}
return 0;
}
-EXPORT_SYMBOL(dmabounce_sync_for_device);
+
+static void dmabounce_sync_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (!__dmabounce_sync_for_device(dev, handle, size, dir))
+ return;
+
+ arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
+}
+
+static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (dev->archdata.dmabounce)
+ return 0;
+
+ return arm_dma_ops.set_dma_mask(dev, dma_mask);
+}
+
+static struct dma_map_ops dmabounce_ops = {
+ .alloc = arm_dma_alloc,
+ .free = arm_dma_free,
+ .mmap = arm_dma_mmap,
+ .map_page = dmabounce_map_page,
+ .unmap_page = dmabounce_unmap_page,
+ .sync_single_for_cpu = dmabounce_sync_for_cpu,
+ .sync_single_for_device = dmabounce_sync_for_device,
+ .map_sg = arm_dma_map_sg,
+ .unmap_sg = arm_dma_unmap_sg,
+ .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_dma_sync_sg_for_device,
+ .set_dma_mask = dmabounce_set_mask,
+};
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
const char *name, unsigned long size)
@@ -479,6 +523,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
#endif
dev->archdata.dmabounce = device_info;
+ set_dma_ops(dev, &dmabounce_ops);
dev_info(dev, "dmabounce: registered device\n");
@@ -497,6 +542,7 @@ void dmabounce_unregister_dev(struct device *dev)
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
dev->archdata.dmabounce = NULL;
+ set_dma_ops(dev, NULL);
if (!device_info) {
dev_warn(dev,
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index 09a02963cf58..e05a2f1665a7 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -33,6 +33,7 @@ CONFIG_MACH_IMX27LITE=y
CONFIG_MACH_PCA100=y
CONFIG_MACH_MXT_TD60=y
CONFIG_MACH_IMX27IPCAM=y
+CONFIG_MACH_IMX27_DT=y
CONFIG_MXC_IRQ_PRIOR=y
CONFIG_MXC_PWM=y
CONFIG_NO_HZ=y
@@ -172,7 +173,7 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PCF8563=y
CONFIG_RTC_DRV_IMXDI=y
-CONFIG_RTC_MXC=y
+CONFIG_RTC_DRV_MXC=y
CONFIG_DMADEVICES=y
CONFIG_IMX_SDMA=y
CONFIG_IMX_DMA=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index dc6f6411bbf5..b1d3675df72c 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -64,6 +64,12 @@ CONFIG_IPV6=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_SST25L=y
# CONFIG_STANDALONE is not set
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=y
@@ -172,7 +178,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
-CONFIG_RTC_MXC=y
+CONFIG_RTC_DRV_MXC=y
CONFIG_DMADEVICES=y
CONFIG_IMX_SDMA=y
CONFIG_EXT2_FS=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 1ebbf451c48d..5406c23a02e3 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -22,6 +22,7 @@ CONFIG_BLK_DEV_INTEGRITY=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_MXS=y
+CONFIG_MACH_MXS_DT=y
CONFIG_MACH_MX23EVK=y
CONFIG_MACH_MX28EVK=y
CONFIG_MACH_STMP378X_DEVB=y
diff --git a/arch/arm/configs/prima2_defconfig b/arch/arm/configs/prima2_defconfig
new file mode 100644
index 000000000000..c328ac65479a
--- /dev/null
+++ b/arch/arm/configs/prima2_defconfig
@@ -0,0 +1,69 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_ARCH_PRIMA2=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_KEXEC=y
+CONFIG_BINFMT_MISC=y
+CONFIG_PM_RUNTIME=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_SERIAL_SIRFSOC=y
+CONFIG_SERIAL_SIRFSOC_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_SIRF=y
+CONFIG_SPI=y
+CONFIG_SPI_SIRF=y
+CONFIG_SPI_SPIDEV=y
+# CONFIG_HWMON is not set
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB_GADGET=y
+CONFIG_USB_FILE_STORAGE=m
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_DMADEVICES=y
+CONFIG_DMADEVICES_DEBUG=y
+CONFIG_DMADEVICES_VDEBUG=y
+CONFIG_SIRF_DMA=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CRAMFS=y
+CONFIG_ROMFS_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_INFO=y
+CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/spear13xx_defconfig b/arch/arm/configs/spear13xx_defconfig
new file mode 100644
index 000000000000..1fdb82694ca2
--- /dev/null
+++ b/arch/arm/configs/spear13xx_defconfig
@@ -0,0 +1,95 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PLAT_SPEAR=y
+CONFIG_ARCH_SPEAR13XX=y
+CONFIG_MACH_SPEAR1310=y
+CONFIG_MACH_SPEAR1340=y
+# CONFIG_SWP_EMULATE is not set
+CONFIG_SMP=y
+# CONFIG_SMP_ON_UP is not set
+# CONFIG_ARM_CPU_TOPOLOGY is not set
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_BINFMT_MISC=y
+CONFIG_NET=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_FSMC=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_ATA=y
+# CONFIG_SATA_PMP is not set
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_PATA_ARASAN_CF=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_SPEAR=y
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=8192
+CONFIG_I2C=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_SPI=y
+CONFIG_SPI_PL022=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_PL061=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_MPCORE_WATCHDOG=y
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_SPEAR=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_DW_DMAC=y
+CONFIG_DMATEST=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_AUTOFS4_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=m
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_INFO=y
diff --git a/arch/arm/configs/spear3xx_defconfig b/arch/arm/configs/spear3xx_defconfig
index 7ed42912d69a..865980c5f212 100644
--- a/arch/arm/configs/spear3xx_defconfig
+++ b/arch/arm/configs/spear3xx_defconfig
@@ -14,6 +14,9 @@ CONFIG_BINFMT_MISC=y
CONFIG_NET=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSMC=y
CONFIG_BLK_DEV_RAM=y
@@ -73,6 +76,7 @@ CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=m
diff --git a/arch/arm/configs/spear6xx_defconfig b/arch/arm/configs/spear6xx_defconfig
index cf94bc73a0e0..a2a1265f86b6 100644
--- a/arch/arm/configs/spear6xx_defconfig
+++ b/arch/arm/configs/spear6xx_defconfig
@@ -8,11 +8,13 @@ CONFIG_MODVERSIONS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_PLAT_SPEAR=y
CONFIG_ARCH_SPEAR6XX=y
-CONFIG_BOARD_SPEAR600_DT=y
CONFIG_BINFMT_MISC=y
CONFIG_NET=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSMC=y
CONFIG_BLK_DEV_RAM=y
@@ -64,6 +66,7 @@ CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=m
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 351d6708c3ae..1198dd61c7c4 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -45,6 +45,7 @@ CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
+CONFIG_PM_RUNTIME=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -91,6 +92,8 @@ CONFIG_USB_NET_SMSC75XX=y
CONFIG_USB_NET_SMSC95XX=y
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_MPU3050=y
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
@@ -103,12 +106,15 @@ CONFIG_I2C=y
CONFIG_I2C_TEGRA=y
CONFIG_SPI=y
CONFIG_SPI_TEGRA=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_BATTERY_SBS=y
CONFIG_SENSORS_LM90=y
CONFIG_MFD_TPS6586X=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_TPS62360=y
CONFIG_REGULATOR_TPS6586X=y
CONFIG_SOUND=y
CONFIG_SND=y
@@ -133,16 +139,19 @@ CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_TEGRA=y
CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EM3027=y
CONFIG_RTC_DRV_TEGRA=y
CONFIG_STAGING=y
-CONFIG_IIO=y
CONFIG_SENSORS_ISL29018=y
+CONFIG_SENSORS_ISL29028=y
CONFIG_SENSORS_AK8975=y
CONFIG_MFD_NVEC=y
CONFIG_KEYBOARD_NVEC=y
CONFIG_SERIO_NVEC_PS2=y
CONFIG_TEGRA_IOMMU_GART=y
CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_MEMORY=y
+CONFIG_IIO=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 7aa368003b05..b69c0d3285f8 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -7,12 +7,16 @@
#define ASMARM_DEVICE_H
struct dev_archdata {
+ struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce;
#endif
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ struct dma_iommu_mapping *mapping;
+#endif
};
struct omap_device;
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
new file mode 100644
index 000000000000..3ed37b4d93da
--- /dev/null
+++ b/arch/arm/include/asm/dma-contiguous.h
@@ -0,0 +1,15 @@
+#ifndef ASMARM_DMA_CONTIGUOUS_H
+#define ASMARM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_CMA
+
+#include <linux/types.h>
+#include <asm-generic/dma-contiguous.h>
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+
+#endif
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
new file mode 100644
index 000000000000..799b09409fad
--- /dev/null
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -0,0 +1,34 @@
+#ifndef ASMARM_DMA_IOMMU_H
+#define ASMARM_DMA_IOMMU_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/kmemcheck.h>
+
+struct dma_iommu_mapping {
+ /* iommu specific data */
+ struct iommu_domain *domain;
+
+ void *bitmap;
+ size_t bits;
+ unsigned int order;
+ dma_addr_t base;
+
+ spinlock_t lock;
+ struct kref kref;
+};
+
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+ int order);
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
+
+int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index cb3b7c981c4b..bbef15d04890 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -5,11 +5,35 @@
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
+#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <asm-generic/dma-coherent.h>
#include <asm/memory.h>
+#define DMA_ERROR_CODE (~0)
+extern struct dma_map_ops arm_dma_ops;
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (dev && dev->archdata.dma_ops)
+ return dev->archdata.dma_ops;
+ return &arm_dma_ops;
+}
+
+static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+{
+ BUG_ON(!dev);
+ dev->archdata.dma_ops = ops;
+}
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ return get_dma_ops(dev)->set_dma_mask(dev, mask);
+}
+
#ifdef __arch_page_to_dma
#error Please update to __arch_pfn_to_dma
#endif
@@ -62,68 +86,11 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
#endif
/*
- * The DMA API is built upon the notion of "buffer ownership". A buffer
- * is either exclusively owned by the CPU (and therefore may be accessed
- * by it) or exclusively owned by the DMA device. These helper functions
- * represent the transitions between these two ownership states.
- *
- * Note, however, that on later ARMs, this notion does not work due to
- * speculative prefetches. We model our approach on the assumption that
- * the CPU does do speculative prefetches, which means we clean caches
- * before transfers and delay cache invalidation until transfer completion.
- *
- * Private support functions: these are not part of the API and are
- * liable to change. Drivers must not use these.
- */
-static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- extern void ___dma_single_cpu_to_dev(const void *, size_t,
- enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_single_cpu_to_dev(kaddr, size, dir);
-}
-
-static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- extern void ___dma_single_dev_to_cpu(const void *, size_t,
- enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_single_dev_to_cpu(kaddr, size, dir);
-}
-
-static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
- size_t size, enum dma_data_direction dir)
-{
- extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
- size_t, enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_page_cpu_to_dev(page, off, size, dir);
-}
-
-static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
- size_t size, enum dma_data_direction dir)
-{
- extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
- size_t, enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_page_dev_to_cpu(page, off, size, dir);
-}
-
-extern int dma_supported(struct device *, u64);
-extern int dma_set_mask(struct device *, u64);
-
-/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- return dma_addr == ~0;
+ return dma_addr == DMA_ERROR_CODE;
}
/*
@@ -141,69 +108,118 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
{
}
+extern int dma_supported(struct device *dev, u64 mask);
+
/**
- * dma_alloc_coherent - allocate consistent memory for DMA
+ * arm_dma_alloc - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: required memory size
* @handle: bus-specific DMA address
+ * @attrs: optinal attributes that specific mapping properties
*
- * Allocate some uncached, unbuffered memory for a device for
- * performing DMA. This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
+ * Allocate some memory for a device for performing DMA. This function
+ * allocates pages, and will return the CPU-viewed address, and sets @handle
+ * to be the device-viewed address.
*/
-extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
+extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, struct dma_attrs *attrs);
+
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ void *cpu_addr;
+ BUG_ON(!ops);
+
+ cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+ return cpu_addr;
+}
/**
- * dma_free_coherent - free memory allocated by dma_alloc_coherent
+ * arm_dma_free - free memory allocated by arm_dma_alloc
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: size of memory originally requested in dma_alloc_coherent
* @cpu_addr: CPU-view address returned from dma_alloc_coherent
* @handle: device-view address returned from dma_alloc_coherent
+ * @attrs: optinal attributes that specific mapping properties
*
* Free (and unmap) a DMA buffer previously allocated by
- * dma_alloc_coherent().
+ * arm_dma_alloc().
*
* References to memory and mappings associated with cpu_addr/handle
* during and after this call executing are illegal.
*/
-extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
+extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs);
+
+#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
/**
- * dma_mmap_coherent - map a coherent DMA allocation into user space
+ * arm_dma_mmap - map a coherent DMA allocation into user space
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @vma: vm_area_struct describing requested user mapping
* @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
* @handle: device-view address returned from dma_alloc_coherent
* @size: size of memory originally requested in dma_alloc_coherent
+ * @attrs: optinal attributes that specific mapping properties
*
* Map a coherent DMA buffer previously allocated by dma_alloc_coherent
* into user space. The coherent DMA buffer must not be freed by the
* driver until the user space mapping has been released.
*/
-int dma_mmap_coherent(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t);
+extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs);
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
-/**
- * dma_alloc_writecombine - allocate writecombining memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- *
- * Allocate some uncached, buffered memory for a device for
- * performing DMA. This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
- */
-extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
- gfp_t);
+static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
+
+static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
+}
-#define dma_free_writecombine(dev,size,cpu_addr,handle) \
- dma_free_coherent(dev,size,cpu_addr,handle)
+static inline void dma_free_writecombine(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
-int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t);
+static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
/*
* This can be called during boot to increase the size of the consistent
@@ -212,8 +228,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
*/
extern void __init init_consistent_dma_size(unsigned long size);
-
-#ifdef CONFIG_DMABOUNCE
/*
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
* and utilize bounce buffers as needed to work around limited DMA windows.
@@ -253,222 +267,19 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
*/
extern void dmabounce_unregister_dev(struct device *);
-/*
- * The DMA API, implemented by dmabounce.c. See below for descriptions.
- */
-extern dma_addr_t __dma_map_page(struct device *, struct page *,
- unsigned long, size_t, enum dma_data_direction);
-extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
- enum dma_data_direction);
-
-/*
- * Private functions
- */
-int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
- size_t, enum dma_data_direction);
-int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
- size_t, enum dma_data_direction);
-#else
-static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- return 1;
-}
-static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- return 1;
-}
-
-
-static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- __dma_page_cpu_to_dev(page, offset, size, dir);
- return pfn_to_dma(dev, page_to_pfn(page)) + offset;
-}
-
-static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
- handle & ~PAGE_MASK, size, dir);
-}
-#endif /* CONFIG_DMABOUNCE */
-
-/**
- * dma_map_single - map a single buffer for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @cpu_addr: CPU direct mapped address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed. The CPU
- * can regain ownership by calling dma_unmap_single() or
- * dma_sync_single_for_cpu().
- */
-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size, enum dma_data_direction dir)
-{
- unsigned long offset;
- struct page *page;
- dma_addr_t addr;
-
- BUG_ON(!virt_addr_valid(cpu_addr));
- BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
- BUG_ON(!valid_dma_direction(dir));
-
- page = virt_to_page(cpu_addr);
- offset = (unsigned long)cpu_addr & ~PAGE_MASK;
- addr = __dma_map_page(dev, page, offset, size, dir);
- debug_dma_map_page(dev, page, offset, size, dir, addr, true);
-
- return addr;
-}
-
-/**
- * dma_map_page - map a portion of a page for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed. The CPU
- * can regain ownership by calling dma_unmap_page().
- */
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- dma_addr_t addr;
-
- BUG_ON(!valid_dma_direction(dir));
-
- addr = __dma_map_page(dev, page, offset, size, dir);
- debug_dma_map_page(dev, page, offset, size, dir, addr, false);
-
- return addr;
-}
-
-/**
- * dma_unmap_single - unmap a single buffer previously mapped
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_single)
- * @dir: DMA transfer direction (same as passed to dma_map_single)
- *
- * Unmap a single streaming mode DMA translation. The handle and size
- * must match what was provided in the previous dma_map_single() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- debug_dma_unmap_page(dev, handle, size, dir, true);
- __dma_unmap_page(dev, handle, size, dir);
-}
-
-/**
- * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_page)
- * @dir: DMA transfer direction (same as passed to dma_map_page)
- *
- * Unmap a page streaming mode DMA translation. The handle and size
- * must match what was provided in the previous dma_map_page() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- debug_dma_unmap_page(dev, handle, size, dir, false);
- __dma_unmap_page(dev, handle, size, dir);
-}
-
-/**
- * dma_sync_single_range_for_cpu
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @offset: offset of region to start sync
- * @size: size of region to sync
- * @dir: DMA transfer direction (same as passed to dma_map_single)
- *
- * Make physical memory consistent for a single streaming mode DMA
- * translation after a transfer.
- *
- * If you perform a dma_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so. At the
- * next point you give the PCI dma address back to the card, you
- * must first the perform a dma_sync_for_device, and then the
- * device again owns the buffer.
- */
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t handle, unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!valid_dma_direction(dir));
-
- debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
-
- if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
- return;
-
- __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t handle, unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!valid_dma_direction(dir));
-
- debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
-
- if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
- return;
-
- __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- dma_sync_single_range_for_device(dev, handle, 0, size, dir);
-}
/*
* The scatter list versions of the above methods.
*/
-extern int dma_map_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
+extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
+ enum dma_data_direction, struct dma_attrs *attrs);
+extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
+ enum dma_data_direction, struct dma_attrs *attrs);
+extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
enum dma_data_direction);
-extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
+extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
enum dma_data_direction);
-extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/hardware/pl080.h b/arch/arm/include/asm/hardware/pl080.h
index 33c78d7af2e1..4eea2107214b 100644
--- a/arch/arm/include/asm/hardware/pl080.h
+++ b/arch/arm/include/asm/hardware/pl080.h
@@ -102,6 +102,8 @@
#define PL080_WIDTH_16BIT (0x1)
#define PL080_WIDTH_32BIT (0x2)
+#define PL080N_CONFIG_ITPROT (1 << 20)
+#define PL080N_CONFIG_SECPROT (1 << 19)
#define PL080_CONFIG_HALT (1 << 18)
#define PL080_CONFIG_ACTIVE (1 << 17) /* RO */
#define PL080_CONFIG_LOCK (1 << 16)
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 9af5563dd3eb..815c669fec0a 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -47,9 +47,9 @@ extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
-#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a) = (v))
-#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))
-#define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a) = (v))
+#define __raw_writeb(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned char __force *)(a) = (v)))
+#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
+#define __raw_writel(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned int __force *)(a) = (v)))
#define __raw_readb(a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a))
#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
@@ -229,11 +229,9 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
__raw_readl(c)); __r; })
-#define writeb_relaxed(v,c) ((void)__raw_writeb(v,c))
-#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \
- cpu_to_le16(v),c))
-#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
- cpu_to_le32(v),c))
+#define writeb_relaxed(v,c) __raw_writeb(v,c)
+#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
+#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
@@ -281,12 +279,12 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define ioread16be(p) ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
#define ioread32be(p) ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
-#define iowrite8(v,p) ({ __iowmb(); (void)__raw_writeb(v, p); })
-#define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); })
-#define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); })
+#define iowrite8(v,p) ({ __iowmb(); __raw_writeb(v, p); })
+#define iowrite16(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_le16(v), p); })
+#define iowrite32(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_le32(v), p); })
-#define iowrite16be(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_be16(v), p); })
-#define iowrite32be(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_be32(v), p); })
+#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
#define ioread8_rep(p,d,c) __raw_readsb(p,d,c)
#define ioread16_rep(p,d,c) __raw_readsw(p,d,c)
diff --git a/arch/arm/include/asm/kvm_para.h b/arch/arm/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/arm/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index d7692cafde7f..0b1c94b8c652 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -43,6 +43,7 @@ struct machine_desc {
void (*init_irq)(void);
struct sys_timer *timer; /* system tick timer */
void (*init_machine)(void);
+ void (*init_late)(void);
#ifdef CONFIG_MULTI_IRQ_HANDLER
void (*handle_irq)(struct pt_regs *);
#endif
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index b36f3654bf54..a6efcdd6fd25 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -30,6 +30,7 @@ struct map_desc {
#define MT_MEMORY_DTCM 12
#define MT_MEMORY_ITCM 13
#define MT_MEMORY_SO 14
+#define MT_MEMORY_DMA_READY 15
#ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int);
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 68388eb4946b..b79f8e97f775 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -148,6 +148,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
+#define TIF_SYSCALL_RESTARTSYS 10
#define TIF_POLLING_NRFLAG 16
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
@@ -162,16 +163,17 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
-#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_SYSCALL_RESTARTSYS (1 << TIF_SYSCALL_RESTARTSYS)
/* Checks for any syscall work in entry-common.S */
-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SYSCALL_RESTARTSYS)
/*
* Change these and you break ASM code in entry-common.S
*/
-#define _TIF_WORK_MASK 0x000000ff
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_RESUME)
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 7bd2d3cb8957..4afed88d250a 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -53,9 +53,13 @@ fast_work_pending:
work_pending:
tst r1, #_TIF_NEED_RESCHED
bne work_resched
- tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
- beq no_work_pending
+ /*
+ * TIF_SIGPENDING or TIF_NOTIFY_RESUME must've been set if we got here
+ */
+ ldr r2, [sp, #S_PSR]
mov r0, sp @ 'regs'
+ tst r2, #15 @ are we returning to user mode?
+ bne no_work_pending @ no? just leave, then...
mov r2, why @ 'syscall'
tst r1, #_TIF_SIGPENDING @ delivering a signal?
movne why, #0 @ prevent further restarts
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 14e38261cd31..5700a7ae7f0b 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -25,6 +25,7 @@
#include <linux/regset.h>
#include <linux/audit.h>
#include <linux/tracehook.h>
+#include <linux/unistd.h>
#include <asm/pgtable.h>
#include <asm/traps.h>
@@ -917,6 +918,8 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
+ if (why == 0 && test_and_clear_thread_flag(TIF_SYSCALL_RESTARTSYS))
+ scno = __NR_restart_syscall - __NR_SYSCALL_BASE;
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return scno;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index ebfac782593f..e15d83bb4ea3 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -81,6 +81,7 @@ __setup("fpe=", fpe_setup);
extern void paging_init(struct machine_desc *desc);
extern void sanity_check_meminfo(void);
extern void reboot_setup(char *str);
+extern void setup_dma_zone(struct machine_desc *desc);
unsigned int processor_id;
EXPORT_SYMBOL(processor_id);
@@ -800,6 +801,14 @@ static int __init customize_machine(void)
}
arch_initcall(customize_machine);
+static int __init init_machine_late(void)
+{
+ if (machine_desc->init_late)
+ machine_desc->init_late();
+ return 0;
+}
+late_initcall(init_machine_late);
+
#ifdef CONFIG_KEXEC
static inline unsigned long long get_total_mem(void)
{
@@ -939,12 +948,8 @@ void __init setup_arch(char **cmdline_p)
machine_desc = mdesc;
machine_name = mdesc->name;
-#ifdef CONFIG_ZONE_DMA
- if (mdesc->dma_zone_size) {
- extern unsigned long arm_dma_zone_size;
- arm_dma_zone_size = mdesc->dma_zone_size;
- }
-#endif
+ setup_dma_zone(mdesc);
+
if (mdesc->restart_mode)
reboot_setup(&mdesc->restart_mode);
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index ec640412aed0..63f327dd5198 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -29,7 +29,6 @@
*/
#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-#define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
/*
* With EABI, the syscall number has to be loaded into r7.
@@ -50,18 +49,6 @@ const unsigned long sigreturn_codes[7] = {
};
/*
- * Either we support OABI only, or we have EABI with the OABI
- * compat layer enabled. In the later case we don't know if
- * user space is EABI or not, and if not we must not clobber r7.
- * Always using the OABI syscall solves that issue and works for
- * all those cases.
- */
-const unsigned long syscall_restart_code[2] = {
- SWI_SYS_RESTART, /* swi __NR_restart_syscall */
- 0xe49df004, /* ldr pc, [sp], #4 */
-};
-
-/*
* atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
@@ -82,10 +69,10 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+ __get_user(mask, &act->sa_mask))
return -EFAULT;
- __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- __get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
}
@@ -94,10 +81,10 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
@@ -602,15 +589,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
int signr;
/*
- * We want the common case to go fast, which
- * is why we may in certain cases get here from
- * kernel mode. Just return without doing anything
- * if so.
- */
- if (!user_mode(regs))
- return;
-
- /*
* If we were from a system call, check for system call restarting...
*/
if (syscall) {
@@ -626,18 +604,13 @@ static void do_signal(struct pt_regs *regs, int syscall)
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
+ case -ERESTART_RESTARTBLOCK:
regs->ARM_r0 = regs->ARM_ORIG_r0;
regs->ARM_pc = restart_addr;
break;
- case -ERESTART_RESTARTBLOCK:
- regs->ARM_r0 = -EINTR;
- break;
}
}
- if (try_to_freeze())
- goto no_signal;
-
/*
* Get the signal to deliver. When running under ptrace, at this
* point the debugger may change all our registers ...
@@ -652,12 +625,14 @@ static void do_signal(struct pt_regs *regs, int syscall)
* debugger has chosen to restart at a different PC.
*/
if (regs->ARM_pc == restart_addr) {
- if (retval == -ERESTARTNOHAND
+ if (retval == -ERESTARTNOHAND ||
+ retval == -ERESTART_RESTARTBLOCK
|| (retval == -ERESTARTSYS
&& !(ka.sa.sa_flags & SA_RESTART))) {
regs->ARM_r0 = -EINTR;
regs->ARM_pc = continue_addr;
}
+ clear_thread_flag(TIF_SYSCALL_RESTARTSYS);
}
if (test_thread_flag(TIF_RESTORE_SIGMASK))
@@ -677,7 +652,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
return;
}
- no_signal:
if (syscall) {
/*
* Handle restarting a different system call. As above,
@@ -685,38 +659,15 @@ static void do_signal(struct pt_regs *regs, int syscall)
* ignore the restart.
*/
if (retval == -ERESTART_RESTARTBLOCK
- && regs->ARM_pc == continue_addr) {
- if (thumb_mode(regs)) {
- regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
- regs->ARM_pc -= 2;
- } else {
-#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
- regs->ARM_r7 = __NR_restart_syscall;
- regs->ARM_pc -= 4;
-#else
- u32 __user *usp;
-
- regs->ARM_sp -= 4;
- usp = (u32 __user *)regs->ARM_sp;
-
- if (put_user(regs->ARM_pc, usp) == 0) {
- regs->ARM_pc = KERN_RESTART_CODE;
- } else {
- regs->ARM_sp += 4;
- force_sigsegv(0, current);
- }
-#endif
- }
- }
-
- /* If there's no signal to deliver, we just put the saved sigmask
- * back.
- */
- if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
- }
+ && regs->ARM_pc == restart_addr)
+ set_thread_flag(TIF_SYSCALL_RESTARTSYS);
}
+
+ /* If there's no signal to deliver, we just put the saved sigmask
+ * back.
+ */
+ if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
+ set_current_blocked(&current->saved_sigmask);
}
asmlinkage void
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
index 6fcfe8398aa4..5ff067b7c752 100644
--- a/arch/arm/kernel/signal.h
+++ b/arch/arm/kernel/signal.h
@@ -8,7 +8,5 @@
* published by the Free Software Foundation.
*/
#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
-#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
extern const unsigned long sigreturn_codes[7];
-extern const unsigned long syscall_restart_code[2];
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b735521a4a54..2c7217d971db 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -109,7 +109,6 @@ static void percpu_timer_stop(void);
int __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
- struct task_struct *p;
int ret;
ret = platform_cpu_disable(cpu);
@@ -139,12 +138,7 @@ int __cpu_disable(void)
flush_cache_all();
local_flush_tlb_all();
- read_lock(&tasklist_lock);
- for_each_process(p) {
- if (p->mm)
- cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
- }
- read_unlock(&tasklist_lock);
+ clear_tasks_mm_cpumask(cpu);
return 0;
}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 3647170e9a16..4928d89758f4 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -820,8 +820,6 @@ void __init early_trap_init(void *vectors_base)
*/
memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
sigreturn_codes, sizeof(sigreturn_codes));
- memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
- syscall_restart_code, sizeof(syscall_restart_code));
flush_icache_range(vectors, vectors + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index f6747246d649..933fc9afe7d0 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -436,7 +436,6 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
atslave->dma_dev = &at_hdmac_device.dev;
atslave->cfg = ATC_FIFOCFG_HALFFIFO
| ATC_SRC_H2SEL_HW | ATC_DST_H2SEL_HW;
- atslave->ctrla = ATC_SCSIZE_16 | ATC_DCSIZE_16;
if (mmc_id == 0) /* MCI0 */
atslave->cfg |= ATC_SRC_PER(AT_DMA_ID_MCI0)
| ATC_DST_PER(AT_DMA_ID_MCI0);
diff --git a/arch/arm/mach-at91/include/mach/at_hdmac.h b/arch/arm/mach-at91/include/mach/at_hdmac.h
index fff48d1a0f4e..cab0997be3de 100644
--- a/arch/arm/mach-at91/include/mach/at_hdmac.h
+++ b/arch/arm/mach-at91/include/mach/at_hdmac.h
@@ -26,18 +26,11 @@ struct at_dma_platform_data {
/**
* struct at_dma_slave - Controller-specific information about a slave
* @dma_dev: required DMA master device
- * @tx_reg: physical address of data register used for
- * memory-to-peripheral transfers
- * @rx_reg: physical address of data register used for
- * peripheral-to-memory transfers
- * @reg_width: peripheral register width
* @cfg: Platform-specific initializer for the CFG register
- * @ctrla: Platform-specific initializer for the CTRLA register
*/
struct at_dma_slave {
struct device *dma_dev;
u32 cfg;
- u32 ctrla;
};
@@ -64,24 +57,5 @@ struct at_dma_slave {
#define ATC_FIFOCFG_HALFFIFO (0x1 << 28)
#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28)
-/* Platform-configurable bits in CTRLA */
-#define ATC_SCSIZE_MASK (0x7 << 16) /* Source Chunk Transfer Size */
-#define ATC_SCSIZE_1 (0x0 << 16)
-#define ATC_SCSIZE_4 (0x1 << 16)
-#define ATC_SCSIZE_8 (0x2 << 16)
-#define ATC_SCSIZE_16 (0x3 << 16)
-#define ATC_SCSIZE_32 (0x4 << 16)
-#define ATC_SCSIZE_64 (0x5 << 16)
-#define ATC_SCSIZE_128 (0x6 << 16)
-#define ATC_SCSIZE_256 (0x7 << 16)
-#define ATC_DCSIZE_MASK (0x7 << 20) /* Destination Chunk Transfer Size */
-#define ATC_DCSIZE_1 (0x0 << 20)
-#define ATC_DCSIZE_4 (0x1 << 20)
-#define ATC_DCSIZE_8 (0x2 << 20)
-#define ATC_DCSIZE_16 (0x3 << 20)
-#define ATC_DCSIZE_32 (0x4 << 20)
-#define ATC_DCSIZE_64 (0x5 << 20)
-#define ATC_DCSIZE_128 (0x6 << 20)
-#define ATC_DCSIZE_256 (0x7 << 20)
#endif /* AT_HDMAC_H */
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index dc1afe5be20c..0031864e7f11 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -681,6 +681,7 @@ MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM")
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = da830_evm_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = da8xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 09f61073c8d9..0149fb453be3 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -1411,6 +1411,7 @@ MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM")
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = da850_evm_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = da8xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 82ed753fb360..1c7b1f46a8f3 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -357,6 +357,7 @@ MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = dm355_evm_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index d74a8b3445fb..8e7703213b08 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -276,6 +276,7 @@ MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = dm355_leopard_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 5bce2b83bb4f..688a9c556dc9 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -618,6 +618,7 @@ MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = dm365_evm_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 3683306e0245..d34ed55912b2 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -825,6 +825,7 @@ MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = davinci_evm_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index d72ab948d630..958679a20e13 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -788,6 +788,7 @@ MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = evm_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
@@ -798,6 +799,7 @@ MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = evm_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index 672d820e2aa4..beecde3a1d2f 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -572,6 +572,7 @@ MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808")
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = mityomapl138_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = da8xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index a772bb45570a..5de69f2fcca9 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -278,6 +278,7 @@ MACHINE_START(NEUROS_OSD2, "Neuros OSD2")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = davinci_ntosd2_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index 45e815760a27..dc1208e9e664 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -343,6 +343,7 @@ MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard")
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = omapl138_hawk_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = da8xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index 76e675096104..9078acf94bac 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -157,6 +157,7 @@ MACHINE_START(SFFSDR, "Lyrtech SFFSDR")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = davinci_sffsdr_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-tnetv107x-evm.c b/arch/arm/mach-davinci/board-tnetv107x-evm.c
index 5f14e30b00d8..ac4e003ad863 100644
--- a/arch/arm/mach-davinci/board-tnetv107x-evm.c
+++ b/arch/arm/mach-davinci/board-tnetv107x-evm.c
@@ -282,6 +282,7 @@ MACHINE_START(TNETV107X, "TNETV107X EVM")
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = tnetv107x_evm_board_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = tnetv107x_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index 008772e3b843..34668ead53c7 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -213,7 +213,7 @@ EXPORT_SYMBOL(clk_unregister);
/*
* Disable any unused clocks left on by the bootloader
*/
-static int __init clk_disable_unused(void)
+int __init davinci_clk_disable_unused(void)
{
struct clk *ck;
@@ -237,7 +237,6 @@ static int __init clk_disable_unused(void)
return 0;
}
-late_initcall(clk_disable_unused);
#endif
static unsigned long clk_sysclk_recalc(struct clk *clk)
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c
index cb9b2e47510c..64b0f65a8639 100644
--- a/arch/arm/mach-davinci/common.c
+++ b/arch/arm/mach-davinci/common.c
@@ -117,3 +117,10 @@ void __init davinci_common_init(struct davinci_soc_info *soc_info)
err:
panic("davinci_common_init: SoC Initialization failed\n");
}
+
+void __init davinci_init_late(void)
+{
+ davinci_cpufreq_init();
+ davinci_pm_init();
+ davinci_clk_disable_unused();
+}
diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c
index 031048fec9f5..4729eaab0f40 100644
--- a/arch/arm/mach-davinci/cpufreq.c
+++ b/arch/arm/mach-davinci/cpufreq.c
@@ -240,10 +240,9 @@ static struct platform_driver davinci_cpufreq_driver = {
.remove = __exit_p(davinci_cpufreq_remove),
};
-static int __init davinci_cpufreq_init(void)
+int __init davinci_cpufreq_init(void)
{
return platform_driver_probe(&davinci_cpufreq_driver,
davinci_cpufreq_probe);
}
-late_initcall(davinci_cpufreq_init);
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
index 95ce019c9b98..a685e9706b7b 100644
--- a/arch/arm/mach-davinci/dma.c
+++ b/arch/arm/mach-davinci/dma.c
@@ -353,9 +353,10 @@ static int irq2ctlr(int irq)
*****************************************************************************/
static irqreturn_t dma_irq_handler(int irq, void *data)
{
- int i;
int ctlr;
- unsigned int cnt = 0;
+ u32 sh_ier;
+ u32 sh_ipr;
+ u32 bank;
ctlr = irq2ctlr(irq);
if (ctlr < 0)
@@ -363,41 +364,39 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
dev_dbg(data, "dma_irq_handler\n");
- if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0) &&
- (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
- return IRQ_NONE;
+ sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0);
+ if (!sh_ipr) {
+ sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 1);
+ if (!sh_ipr)
+ return IRQ_NONE;
+ sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 1);
+ bank = 1;
+ } else {
+ sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 0);
+ bank = 0;
+ }
- while (1) {
- int j;
- if (edma_shadow0_read_array(ctlr, SH_IPR, 0) &
- edma_shadow0_read_array(ctlr, SH_IER, 0))
- j = 0;
- else if (edma_shadow0_read_array(ctlr, SH_IPR, 1) &
- edma_shadow0_read_array(ctlr, SH_IER, 1))
- j = 1;
- else
- break;
- dev_dbg(data, "IPR%d %08x\n", j,
- edma_shadow0_read_array(ctlr, SH_IPR, j));
- for (i = 0; i < 32; i++) {
- int k = (j << 5) + i;
- if ((edma_shadow0_read_array(ctlr, SH_IPR, j) & BIT(i))
- && (edma_shadow0_read_array(ctlr,
- SH_IER, j) & BIT(i))) {
- /* Clear the corresponding IPR bits */
- edma_shadow0_write_array(ctlr, SH_ICR, j,
- BIT(i));
- if (edma_cc[ctlr]->intr_data[k].callback)
- edma_cc[ctlr]->intr_data[k].callback(
- k, DMA_COMPLETE,
- edma_cc[ctlr]->intr_data[k].
- data);
- }
+ do {
+ u32 slot;
+ u32 channel;
+
+ dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr);
+
+ slot = __ffs(sh_ipr);
+ sh_ipr &= ~(BIT(slot));
+
+ if (sh_ier & BIT(slot)) {
+ channel = (bank << 5) | slot;
+ /* Clear the corresponding IPR bits */
+ edma_shadow0_write_array(ctlr, SH_ICR, bank,
+ BIT(slot));
+ if (edma_cc[ctlr]->intr_data[channel].callback)
+ edma_cc[ctlr]->intr_data[channel].callback(
+ channel, DMA_COMPLETE,
+ edma_cc[ctlr]->intr_data[channel].data);
}
- cnt++;
- if (cnt > 10)
- break;
- }
+ } while (sh_ipr);
+
edma_shadow0_write(ctlr, SH_IEVAL, 1);
return IRQ_HANDLED;
}
diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h
index 5cd39a4e0c96..bdc4aa8e672a 100644
--- a/arch/arm/mach-davinci/include/mach/common.h
+++ b/arch/arm/mach-davinci/include/mach/common.h
@@ -84,6 +84,25 @@ extern struct davinci_soc_info davinci_soc_info;
extern void davinci_common_init(struct davinci_soc_info *soc_info);
extern void davinci_init_ide(void);
void davinci_restart(char mode, const char *cmd);
+void davinci_init_late(void);
+
+#ifdef CONFIG_DAVINCI_RESET_CLOCKS
+int davinci_clk_disable_unused(void);
+#else
+static inline int davinci_clk_disable_unused(void) { return 0; }
+#endif
+
+#ifdef CONFIG_CPU_FREQ
+int davinci_cpufreq_init(void);
+#else
+static inline int davinci_cpufreq_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_SUSPEND
+int davinci_pm_init(void);
+#else
+static inline int davinci_pm_init(void) { return 0; }
+#endif
/* standard place to map on-chip SRAMs; they *may* support DMA */
#define SRAM_VIRT 0xfffe0000
diff --git a/arch/arm/mach-davinci/include/mach/debug-macro.S b/arch/arm/mach-davinci/include/mach/debug-macro.S
index cf94552d5274..34290d14754b 100644
--- a/arch/arm/mach-davinci/include/mach/debug-macro.S
+++ b/arch/arm/mach-davinci/include/mach/debug-macro.S
@@ -22,46 +22,28 @@
#define UART_SHIFT 2
- .pushsection .data
-davinci_uart_phys: .word 0
-davinci_uart_virt: .word 0
- .popsection
-
- .macro addruart, rp, rv, tmp
-
- /* Use davinci_uart_phys/virt if already configured */
-10: adr \rp, 99f @ get effective addr of 99f
- ldr \rv, [\rp] @ get absolute addr of 99f
- sub \rv, \rv, \rp @ offset between the two
- ldr \rp, [\rp, #4] @ abs addr of omap_uart_phys
- sub \tmp, \rp, \rv @ make it effective
- ldr \rp, [\tmp, #0] @ davinci_uart_phys
- ldr \rv, [\tmp, #4] @ davinci_uart_virt
- cmp \rp, #0 @ is port configured?
- cmpne \rv, #0
- bne 100f @ already configured
-
- /* Check the debug UART address set in uncompress.h */
- and \rp, pc, #0xff000000
- ldr \rv, =DAVINCI_UART_INFO_OFS
- add \rp, \rp, \rv
-
- /* Copy uart phys address from decompressor uart info */
- ldr \rv, [\rp, #0]
- str \rv, [\tmp, #0]
-
- /* Copy uart virt address from decompressor uart info */
- ldr \rv, [\rp, #4]
- str \rv, [\tmp, #4]
-
- b 10b
+#if defined(CONFIG_DEBUG_DAVINCI_DMx_UART0)
+#define UART_BASE DAVINCI_UART0_BASE
+#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART0)
+#define UART_BASE DA8XX_UART0_BASE
+#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART1)
+#define UART_BASE DA8XX_UART1_BASE
+#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART2)
+#define UART_BASE DA8XX_UART2_BASE
+#elif defined(CONFIG_DEBUG_DAVINCI_TNETV107X_UART1)
+#define UART_BASE TNETV107X_UART2_BASE
+#define UART_VIRTBASE TNETV107X_UART2_VIRT
+#else
+#error "Select a specifc port for DEBUG_LL"
+#endif
- .align
-99: .word .
- .word davinci_uart_phys
- .ltorg
+#ifndef UART_VIRTBASE
+#define UART_VIRTBASE IO_ADDRESS(UART_BASE)
+#endif
-100:
+ .macro addruart, rp, rv, tmp
+ ldr \rp, =UART_BASE
+ ldr \rv, =UART_VIRTBASE
.endm
.macro senduart,rd,rx
diff --git a/arch/arm/mach-davinci/include/mach/hardware.h b/arch/arm/mach-davinci/include/mach/hardware.h
index 2184691ebc2f..16bb42291d39 100644
--- a/arch/arm/mach-davinci/include/mach/hardware.h
+++ b/arch/arm/mach-davinci/include/mach/hardware.h
@@ -22,7 +22,7 @@
/*
* I/O mapping
*/
-#define IO_PHYS 0x01c00000UL
+#define IO_PHYS UL(0x01c00000)
#define IO_OFFSET 0xfd000000 /* Virtual IO = 0xfec00000 */
#define IO_SIZE 0x00400000
#define IO_VIRT (IO_PHYS + IO_OFFSET)
diff --git a/arch/arm/mach-davinci/include/mach/serial.h b/arch/arm/mach-davinci/include/mach/serial.h
index e347d88fef91..46b3cd11c3c2 100644
--- a/arch/arm/mach-davinci/include/mach/serial.h
+++ b/arch/arm/mach-davinci/include/mach/serial.h
@@ -15,16 +15,6 @@
#include <mach/hardware.h>
-/*
- * Stolen area that contains debug uart physical and virtual addresses. These
- * addresses are filled in by the uncompress.h code, and are used by the debug
- * macros in debug-macro.S.
- *
- * This area sits just below the page tables (see arch/arm/kernel/head.S).
- * We define it as a relative offset from start of usable RAM.
- */
-#define DAVINCI_UART_INFO_OFS 0x3ff8
-
#define DAVINCI_UART0_BASE (IO_PHYS + 0x20000)
#define DAVINCI_UART1_BASE (IO_PHYS + 0x20400)
#define DAVINCI_UART2_BASE (IO_PHYS + 0x20800)
diff --git a/arch/arm/mach-davinci/include/mach/uncompress.h b/arch/arm/mach-davinci/include/mach/uncompress.h
index da2fb2c2155a..18cfd4977155 100644
--- a/arch/arm/mach-davinci/include/mach/uncompress.h
+++ b/arch/arm/mach-davinci/include/mach/uncompress.h
@@ -43,37 +43,27 @@ static inline void flush(void)
barrier();
}
-static inline void set_uart_info(u32 phys, void * __iomem virt)
+static inline void set_uart_info(u32 phys)
{
- /*
- * Get address of some.bss variable and round it down
- * a la CONFIG_AUTO_ZRELADDR.
- */
- u32 ram_start = (u32)&uart & 0xf8000000;
- u32 *uart_info = (u32 *)(ram_start + DAVINCI_UART_INFO_OFS);
-
uart = (u32 *)phys;
- uart_info[0] = phys;
- uart_info[1] = (u32)virt;
}
-#define _DEBUG_LL_ENTRY(machine, phys, virt) \
- if (machine_is_##machine()) { \
- set_uart_info(phys, virt); \
- break; \
+#define _DEBUG_LL_ENTRY(machine, phys) \
+ { \
+ if (machine_is_##machine()) { \
+ set_uart_info(phys); \
+ break; \
+ } \
}
#define DEBUG_LL_DAVINCI(machine, port) \
- _DEBUG_LL_ENTRY(machine, DAVINCI_UART##port##_BASE, \
- IO_ADDRESS(DAVINCI_UART##port##_BASE))
+ _DEBUG_LL_ENTRY(machine, DAVINCI_UART##port##_BASE)
#define DEBUG_LL_DA8XX(machine, port) \
- _DEBUG_LL_ENTRY(machine, DA8XX_UART##port##_BASE, \
- IO_ADDRESS(DA8XX_UART##port##_BASE))
+ _DEBUG_LL_ENTRY(machine, DA8XX_UART##port##_BASE)
#define DEBUG_LL_TNETV107X(machine, port) \
- _DEBUG_LL_ENTRY(machine, TNETV107X_UART##port##_BASE, \
- TNETV107X_UART##port##_VIRT)
+ _DEBUG_LL_ENTRY(machine, TNETV107X_UART##port##_BASE)
static inline void __arch_decomp_setup(unsigned long arch_id)
{
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index 04c49f7543ef..eb8360b33aa9 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -152,8 +152,7 @@ static struct platform_driver davinci_pm_driver = {
.remove = __exit_p(davinci_pm_remove),
};
-static int __init davinci_pm_init(void)
+int __init davinci_pm_init(void)
{
return platform_driver_probe(&davinci_pm_driver, davinci_pm_probe);
}
-late_initcall(davinci_pm_init);
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 42ab1e7c4ecc..9493076fc594 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/ata_platform.h>
#include <linux/gpio.h>
#include <asm/page.h>
@@ -68,6 +68,19 @@ void __init dove_map_io(void)
}
/*****************************************************************************
+ * CLK tree
+ ****************************************************************************/
+static struct clk *tclk;
+
+static void __init clk_init(void)
+{
+ tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
+ get_tclk());
+
+ orion_clkdev_init(tclk);
+}
+
+/*****************************************************************************
* EHCI0
****************************************************************************/
void __init dove_ehci0_init(void)
@@ -89,8 +102,7 @@ void __init dove_ehci1_init(void)
void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data,
- DOVE_GE00_PHYS_BASE, IRQ_DOVE_GE00_SUM,
- 0, get_tclk());
+ DOVE_GE00_PHYS_BASE, IRQ_DOVE_GE00_SUM, 0);
}
/*****************************************************************************
@@ -116,7 +128,7 @@ void __init dove_sata_init(struct mv_sata_platform_data *sata_data)
void __init dove_uart0_init(void)
{
orion_uart0_init(DOVE_UART0_VIRT_BASE, DOVE_UART0_PHYS_BASE,
- IRQ_DOVE_UART_0, get_tclk());
+ IRQ_DOVE_UART_0, tclk);
}
/*****************************************************************************
@@ -125,7 +137,7 @@ void __init dove_uart0_init(void)
void __init dove_uart1_init(void)
{
orion_uart1_init(DOVE_UART1_VIRT_BASE, DOVE_UART1_PHYS_BASE,
- IRQ_DOVE_UART_1, get_tclk());
+ IRQ_DOVE_UART_1, tclk);
}
/*****************************************************************************
@@ -134,7 +146,7 @@ void __init dove_uart1_init(void)
void __init dove_uart2_init(void)
{
orion_uart2_init(DOVE_UART2_VIRT_BASE, DOVE_UART2_PHYS_BASE,
- IRQ_DOVE_UART_2, get_tclk());
+ IRQ_DOVE_UART_2, tclk);
}
/*****************************************************************************
@@ -143,7 +155,7 @@ void __init dove_uart2_init(void)
void __init dove_uart3_init(void)
{
orion_uart3_init(DOVE_UART3_VIRT_BASE, DOVE_UART3_PHYS_BASE,
- IRQ_DOVE_UART_3, get_tclk());
+ IRQ_DOVE_UART_3, tclk);
}
/*****************************************************************************
@@ -151,12 +163,12 @@ void __init dove_uart3_init(void)
****************************************************************************/
void __init dove_spi0_init(void)
{
- orion_spi_init(DOVE_SPI0_PHYS_BASE, get_tclk());
+ orion_spi_init(DOVE_SPI0_PHYS_BASE);
}
void __init dove_spi1_init(void)
{
- orion_spi_1_init(DOVE_SPI1_PHYS_BASE, get_tclk());
+ orion_spi_1_init(DOVE_SPI1_PHYS_BASE);
}
/*****************************************************************************
@@ -272,18 +284,17 @@ void __init dove_sdio1_init(void)
void __init dove_init(void)
{
- int tclk;
-
- tclk = get_tclk();
-
printk(KERN_INFO "Dove 88AP510 SoC, ");
- printk(KERN_INFO "TCLK = %dMHz\n", (tclk + 499999) / 1000000);
+ printk(KERN_INFO "TCLK = %dMHz\n", (get_tclk() + 499999) / 1000000);
#ifdef CONFIG_CACHE_TAUROS2
tauros2_init();
#endif
dove_setup_cpu_mbus();
+ /* Setup root of clk tree */
+ clk_init();
+
/* internal devices that every board has */
dove_rtc_init();
dove_xor0_init();
diff --git a/arch/arm/mach-dove/dove-db-setup.c b/arch/arm/mach-dove/dove-db-setup.c
index ea77ae430b2d..bc2867f11346 100644
--- a/arch/arm/mach-dove/dove-db-setup.c
+++ b/arch/arm/mach-dove/dove-db-setup.c
@@ -20,7 +20,6 @@
#include <linux/i2c.h>
#include <linux/pci.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <linux/spi/flash.h>
#include <linux/gpio.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-ep93xx/adssphere.c b/arch/arm/mach-ep93xx/adssphere.c
index 2d45947a3034..a472777e9eba 100644
--- a/arch/arm/mach-ep93xx/adssphere.c
+++ b/arch/arm/mach-ep93xx/adssphere.c
@@ -41,5 +41,6 @@ MACHINE_START(ADSSPHERE, "ADS Sphere board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = adssphere_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 66b1494f23a6..4dd07a0e3604 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -675,7 +675,7 @@ int ep93xx_keypad_acquire_gpio(struct platform_device *pdev)
fail_gpio_d:
gpio_free(EP93XX_GPIO_LINE_C(i));
fail_gpio_c:
- for ( ; i >= 0; --i) {
+ for (--i; i >= 0; --i) {
gpio_free(EP93XX_GPIO_LINE_C(i));
gpio_free(EP93XX_GPIO_LINE_D(i));
}
@@ -834,3 +834,8 @@ void ep93xx_restart(char mode, const char *cmd)
while (1)
;
}
+
+void __init ep93xx_init_late(void)
+{
+ crunch_init();
+}
diff --git a/arch/arm/mach-ep93xx/crunch.c b/arch/arm/mach-ep93xx/crunch.c
index 74753e2df603..a4a2ab9648c9 100644
--- a/arch/arm/mach-ep93xx/crunch.c
+++ b/arch/arm/mach-ep93xx/crunch.c
@@ -79,12 +79,10 @@ static struct notifier_block crunch_notifier_block = {
.notifier_call = crunch_do,
};
-static int __init crunch_init(void)
+int __init crunch_init(void)
{
thread_register_notifier(&crunch_notifier_block);
elf_hwcap |= HWCAP_CRUNCH;
return 0;
}
-
-late_initcall(crunch_init);
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
index da9047d726f0..d74c5cddb98b 100644
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ b/arch/arm/mach-ep93xx/edb93xx.c
@@ -255,6 +255,7 @@ MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -268,6 +269,7 @@ MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -281,6 +283,7 @@ MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -294,6 +297,7 @@ MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -307,6 +311,7 @@ MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -320,6 +325,7 @@ MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -333,6 +339,7 @@ MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -346,6 +353,7 @@ MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-ep93xx/gesbc9312.c b/arch/arm/mach-ep93xx/gesbc9312.c
index fcdffbe49dcc..437c34111155 100644
--- a/arch/arm/mach-ep93xx/gesbc9312.c
+++ b/arch/arm/mach-ep93xx/gesbc9312.c
@@ -41,5 +41,6 @@ MACHINE_START(GESBC9312, "Glomation GESBC-9312-sx")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = gesbc9312_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h
index 602bd87fd0ab..1ecb040d98bf 100644
--- a/arch/arm/mach-ep93xx/include/mach/platform.h
+++ b/arch/arm/mach-ep93xx/include/mach/platform.h
@@ -53,5 +53,12 @@ void ep93xx_init_devices(void);
extern struct sys_timer ep93xx_timer;
void ep93xx_restart(char, const char *);
+void ep93xx_init_late(void);
+
+#ifdef CONFIG_CRUNCH
+int crunch_init(void);
+#else
+static inline int crunch_init(void) { return 0; }
+#endif
#endif
diff --git a/arch/arm/mach-ep93xx/micro9.c b/arch/arm/mach-ep93xx/micro9.c
index dc431c5f04ce..3d7cdab725b2 100644
--- a/arch/arm/mach-ep93xx/micro9.c
+++ b/arch/arm/mach-ep93xx/micro9.c
@@ -85,6 +85,7 @@ MACHINE_START(MICRO9, "Contec Micro9-High")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -98,6 +99,7 @@ MACHINE_START(MICRO9M, "Contec Micro9-Mid")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -111,6 +113,7 @@ MACHINE_START(MICRO9L, "Contec Micro9-Lite")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -124,6 +127,7 @@ MACHINE_START(MICRO9S, "Contec Micro9-Slim")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c
index f40c2987e545..33dc07917417 100644
--- a/arch/arm/mach-ep93xx/simone.c
+++ b/arch/arm/mach-ep93xx/simone.c
@@ -86,5 +86,6 @@ MACHINE_START(SIM_ONE, "Simplemachines Sim.One Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = simone_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c
index 0c00852ef160..eb282378fa78 100644
--- a/arch/arm/mach-ep93xx/snappercl15.c
+++ b/arch/arm/mach-ep93xx/snappercl15.c
@@ -183,5 +183,6 @@ MACHINE_START(SNAPPER_CL15, "Bluewater Systems Snapper CL15")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = snappercl15_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 5ea790942e94..d4ef339d961e 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -252,5 +252,6 @@ MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = ts72xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
index ba156eb225e8..2905a4929bdc 100644
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ b/arch/arm/mach-ep93xx/vision_ep9307.c
@@ -367,5 +367,6 @@ MACHINE_START(VISION_EP9307, "Vision Engraving Systems EP9307")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = vision_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 15b05b89cc39..573be57d3d28 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -61,6 +61,9 @@ config SOC_EXYNOS5250
bool "SAMSUNG EXYNOS5250"
default y
depends on ARCH_EXYNOS5
+ select SAMSUNG_DMADEV
+ select S5P_PM if PM
+ select S5P_SLEEP if PM
help
Enable EXYNOS5250 SoC support
@@ -70,7 +73,7 @@ config EXYNOS4_MCT
help
Use MCT (Multi Core Timer) as kernel timers
-config EXYNOS4_DEV_DMA
+config EXYNOS_DEV_DMA
bool
help
Compile in amba device definitions for DMA controller
@@ -80,15 +83,20 @@ config EXYNOS4_DEV_AHCI
help
Compile in platform device definitions for AHCI
+config EXYNOS_DEV_DRM
+ bool
+ help
+ Compile in platform device definitions for core DRM device
+
config EXYNOS4_SETUP_FIMD0
bool
help
Common setup code for FIMD0.
-config EXYNOS4_DEV_SYSMMU
+config EXYNOS_DEV_SYSMMU
bool
help
- Common setup code for SYSTEM MMU in EXYNOS4
+ Common setup code for SYSTEM MMU in EXYNOS platforms
config EXYNOS4_DEV_DWMCI
bool
@@ -161,7 +169,7 @@ config EXYNOS4_SETUP_USB_PHY
help
Common setup code for USB PHY controller
-config EXYNOS4_SETUP_SPI
+config EXYNOS_SETUP_SPI
bool
help
Common setup code for SPI GPIO configurations.
@@ -201,12 +209,12 @@ config MACH_SMDKV310
select S3C_DEV_HSMMC3
select SAMSUNG_DEV_BACKLIGHT
select EXYNOS_DEV_DRM
+ select EXYNOS_DEV_SYSMMU
select EXYNOS4_DEV_AHCI
select SAMSUNG_DEV_KEYPAD
select EXYNOS4_DEV_DMA
select SAMSUNG_DEV_PWM
select EXYNOS4_DEV_USB_OHCI
- select EXYNOS4_DEV_SYSMMU
select EXYNOS4_SETUP_FIMD0
select EXYNOS4_SETUP_I2C1
select EXYNOS4_SETUP_KEYPAD
@@ -224,8 +232,7 @@ config MACH_ARMLEX4210
select S3C_DEV_HSMMC2
select S3C_DEV_HSMMC3
select EXYNOS4_DEV_AHCI
- select EXYNOS4_DEV_DMA
- select EXYNOS4_DEV_SYSMMU
+ select EXYNOS_DEV_DMA
select EXYNOS4_SETUP_SDHCI
help
Machine support for Samsung ARMLEX4210 based on EXYNOS4210
@@ -256,6 +263,7 @@ config MACH_UNIVERSAL_C210
select S5P_DEV_MFC
select S5P_DEV_ONENAND
select S5P_DEV_TV
+ select EXYNOS_DEV_SYSMMU
select EXYNOS4_DEV_DMA
select EXYNOS_DEV_DRM
select EXYNOS4_SETUP_FIMD0
@@ -332,6 +340,7 @@ config MACH_ORIGEN
select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_PWM
select EXYNOS_DEV_DRM
+ select EXYNOS_DEV_SYSMMU
select EXYNOS4_DEV_DMA
select EXYNOS4_DEV_USB_OHCI
select EXYNOS4_SETUP_FIMD0
@@ -360,7 +369,8 @@ config MACH_SMDK4212
select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_KEYPAD
select SAMSUNG_DEV_PWM
- select EXYNOS4_DEV_DMA
+ select EXYNOS_DEV_SYSMMU
+ select EXYNOS_DEV_DMA
select EXYNOS4_SETUP_I2C1
select EXYNOS4_SETUP_I2C3
select EXYNOS4_SETUP_I2C7
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index 8631840d1b5e..9b58024f7d43 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
-obj-$(CONFIG_ARCH_EXYNOS4) += pmu.o
+obj-$(CONFIG_ARCH_EXYNOS) += pmu.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
@@ -50,10 +50,11 @@ obj-$(CONFIG_MACH_EXYNOS5_DT) += mach-exynos5-dt.o
obj-y += dev-uart.o
obj-$(CONFIG_ARCH_EXYNOS4) += dev-audio.o
obj-$(CONFIG_EXYNOS4_DEV_AHCI) += dev-ahci.o
-obj-$(CONFIG_EXYNOS4_DEV_SYSMMU) += dev-sysmmu.o
obj-$(CONFIG_EXYNOS4_DEV_DWMCI) += dev-dwmci.o
-obj-$(CONFIG_EXYNOS4_DEV_DMA) += dma.o
+obj-$(CONFIG_EXYNOS_DEV_DMA) += dma.o
obj-$(CONFIG_EXYNOS4_DEV_USB_OHCI) += dev-ohci.o
+obj-$(CONFIG_EXYNOS_DEV_DRM) += dev-drm.o
+obj-$(CONFIG_EXYNOS_DEV_SYSMMU) += dev-sysmmu.o
obj-$(CONFIG_ARCH_EXYNOS) += setup-i2c0.o
obj-$(CONFIG_EXYNOS4_SETUP_FIMC) += setup-fimc.o
@@ -68,4 +69,4 @@ obj-$(CONFIG_EXYNOS4_SETUP_I2C7) += setup-i2c7.o
obj-$(CONFIG_EXYNOS4_SETUP_KEYPAD) += setup-keypad.o
obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
obj-$(CONFIG_EXYNOS4_SETUP_USB_PHY) += setup-usb-phy.o
-obj-$(CONFIG_EXYNOS4_SETUP_SPI) += setup-spi.o
+obj-$(CONFIG_EXYNOS_SETUP_SPI) += setup-spi.o
diff --git a/arch/arm/mach-exynos/Makefile.boot b/arch/arm/mach-exynos/Makefile.boot
index b9862e22bf10..31bd181b0514 100644
--- a/arch/arm/mach-exynos/Makefile.boot
+++ b/arch/arm/mach-exynos/Makefile.boot
@@ -1,2 +1,5 @@
zreladdr-y += 0x40008000
params_phys-y := 0x40000100
+
+dtb-$(CONFIG_MACH_EXYNOS4_DT) += exynos4210-origen.dtb exynos4210-smdkv310.dtb
+dtb-$(CONFIG_MACH_EXYNOS5_DT) += exynos5250-smdk5250.dtb
diff --git a/arch/arm/mach-exynos/clock-exynos4.c b/arch/arm/mach-exynos/clock-exynos4.c
index 6efd1e5919fd..bcb7db453145 100644
--- a/arch/arm/mach-exynos/clock-exynos4.c
+++ b/arch/arm/mach-exynos/clock-exynos4.c
@@ -168,7 +168,7 @@ static int exynos4_clk_ip_tv_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_TV, clk, enable);
}
-static int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable)
+int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_IMAGE, clk, enable);
}
@@ -198,6 +198,11 @@ static int exynos4_clk_ip_perir_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_PERIR, clk, enable);
}
+int exynos4_clk_ip_dmc_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(EXYNOS4_CLKGATE_IP_DMC, clk, enable);
+}
+
static int exynos4_clk_hdmiphy_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_HDMI_PHY_CONTROL, clk, enable);
@@ -678,61 +683,55 @@ static struct clk exynos4_init_clocks_off[] = {
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 14),
}, {
- .name = "SYSMMU_MDMA",
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(mfc_l, 0),
+ .enable = exynos4_clk_ip_mfc_ctrl,
+ .ctrlbit = (1 << 1),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(mfc_r, 1),
+ .enable = exynos4_clk_ip_mfc_ctrl,
+ .ctrlbit = (1 << 2),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(tv, 2),
+ .enable = exynos4_clk_ip_tv_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(jpeg, 3),
+ .enable = exynos4_clk_ip_cam_ctrl,
+ .ctrlbit = (1 << 11),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(rot, 4),
.enable = exynos4_clk_ip_image_ctrl,
- .ctrlbit = (1 << 5),
+ .ctrlbit = (1 << 4),
}, {
- .name = "SYSMMU_FIMC0",
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(fimc0, 5),
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 7),
}, {
- .name = "SYSMMU_FIMC1",
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(fimc1, 6),
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 8),
}, {
- .name = "SYSMMU_FIMC2",
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(fimc2, 7),
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 9),
}, {
- .name = "SYSMMU_FIMC3",
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(fimc3, 8),
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 10),
}, {
- .name = "SYSMMU_JPEG",
- .enable = exynos4_clk_ip_cam_ctrl,
- .ctrlbit = (1 << 11),
- }, {
- .name = "SYSMMU_FIMD0",
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(fimd0, 10),
.enable = exynos4_clk_ip_lcd0_ctrl,
.ctrlbit = (1 << 4),
- }, {
- .name = "SYSMMU_FIMD1",
- .enable = exynos4_clk_ip_lcd1_ctrl,
- .ctrlbit = (1 << 4),
- }, {
- .name = "SYSMMU_PCIe",
- .enable = exynos4_clk_ip_fsys_ctrl,
- .ctrlbit = (1 << 18),
- }, {
- .name = "SYSMMU_G2D",
- .enable = exynos4_clk_ip_image_ctrl,
- .ctrlbit = (1 << 3),
- }, {
- .name = "SYSMMU_ROTATOR",
- .enable = exynos4_clk_ip_image_ctrl,
- .ctrlbit = (1 << 4),
- }, {
- .name = "SYSMMU_TV",
- .enable = exynos4_clk_ip_tv_ctrl,
- .ctrlbit = (1 << 4),
- }, {
- .name = "SYSMMU_MFC_L",
- .enable = exynos4_clk_ip_mfc_ctrl,
- .ctrlbit = (1 << 1),
- }, {
- .name = "SYSMMU_MFC_R",
- .enable = exynos4_clk_ip_mfc_ctrl,
- .ctrlbit = (1 << 2),
}
};
diff --git a/arch/arm/mach-exynos/clock-exynos4.h b/arch/arm/mach-exynos/clock-exynos4.h
index cb71c29c14d1..28a119701182 100644
--- a/arch/arm/mach-exynos/clock-exynos4.h
+++ b/arch/arm/mach-exynos/clock-exynos4.h
@@ -26,5 +26,7 @@ extern struct clk *exynos4_clkset_group_list[];
extern int exynos4_clksrc_mask_fsys_ctrl(struct clk *clk, int enable);
extern int exynos4_clk_ip_fsys_ctrl(struct clk *clk, int enable);
extern int exynos4_clk_ip_lcd1_ctrl(struct clk *clk, int enable);
+extern int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable);
+extern int exynos4_clk_ip_dmc_ctrl(struct clk *clk, int enable);
#endif /* __ASM_ARCH_CLOCK_H */
diff --git a/arch/arm/mach-exynos/clock-exynos4210.c b/arch/arm/mach-exynos/clock-exynos4210.c
index 3b131e4b6ef5..b8689ff60baf 100644
--- a/arch/arm/mach-exynos/clock-exynos4210.c
+++ b/arch/arm/mach-exynos/clock-exynos4210.c
@@ -26,6 +26,7 @@
#include <mach/hardware.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
+#include <mach/sysmmu.h>
#include "common.h"
#include "clock-exynos4.h"
@@ -94,6 +95,16 @@ static struct clk init_clocks_off[] = {
.devname = "exynos4-fb.1",
.enable = exynos4_clk_ip_lcd1_ctrl,
.ctrlbit = (1 << 0),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(2d, 14),
+ .enable = exynos4_clk_ip_image_ctrl,
+ .ctrlbit = (1 << 3),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(fimd1, 11),
+ .enable = exynos4_clk_ip_lcd1_ctrl,
+ .ctrlbit = (1 << 4),
},
};
diff --git a/arch/arm/mach-exynos/clock-exynos4212.c b/arch/arm/mach-exynos/clock-exynos4212.c
index 3ecc01e06f74..da397d21bbcf 100644
--- a/arch/arm/mach-exynos/clock-exynos4212.c
+++ b/arch/arm/mach-exynos/clock-exynos4212.c
@@ -26,6 +26,7 @@
#include <mach/hardware.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
+#include <mach/sysmmu.h>
#include "common.h"
#include "clock-exynos4.h"
@@ -39,6 +40,16 @@ static struct sleep_save exynos4212_clock_save[] = {
};
#endif
+static int exynos4212_clk_ip_isp0_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(EXYNOS4_CLKGATE_IP_ISP0, clk, enable);
+}
+
+static int exynos4212_clk_ip_isp1_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(EXYNOS4_CLKGATE_IP_ISP1, clk, enable);
+}
+
static struct clk *clk_src_mpll_user_list[] = {
[0] = &clk_fin_mpll,
[1] = &exynos4_clk_mout_mpll.clk,
@@ -66,7 +77,32 @@ static struct clksrc_clk clksrcs[] = {
};
static struct clk init_clocks_off[] = {
- /* nothing here yet */
+ {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(2d, 14),
+ .enable = exynos4_clk_ip_dmc_ctrl,
+ .ctrlbit = (1 << 24),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
+ .enable = exynos4212_clk_ip_isp0_ctrl,
+ .ctrlbit = (7 << 8),
+ }, {
+ .name = SYSMMU_CLOCK_NAME2,
+ .devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
+ .enable = exynos4212_clk_ip_isp1_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
+ .name = "flite",
+ .devname = "exynos-fimc-lite.0",
+ .enable = exynos4212_clk_ip_isp0_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
+ .name = "flite",
+ .devname = "exynos-fimc-lite.1",
+ .enable = exynos4212_clk_ip_isp0_ctrl,
+ .ctrlbit = (1 << 3),
+ }
};
#ifdef CONFIG_PM_SLEEP
diff --git a/arch/arm/mach-exynos/clock-exynos5.c b/arch/arm/mach-exynos/clock-exynos5.c
index 7ac6ff4c46bd..fefa336be2b4 100644
--- a/arch/arm/mach-exynos/clock-exynos5.c
+++ b/arch/arm/mach-exynos/clock-exynos5.c
@@ -30,7 +30,56 @@
#ifdef CONFIG_PM_SLEEP
static struct sleep_save exynos5_clock_save[] = {
- /* will be implemented */
+ SAVE_ITEM(EXYNOS5_CLKSRC_MASK_TOP),
+ SAVE_ITEM(EXYNOS5_CLKSRC_MASK_GSCL),
+ SAVE_ITEM(EXYNOS5_CLKSRC_MASK_DISP1_0),
+ SAVE_ITEM(EXYNOS5_CLKSRC_MASK_FSYS),
+ SAVE_ITEM(EXYNOS5_CLKSRC_MASK_MAUDIO),
+ SAVE_ITEM(EXYNOS5_CLKSRC_MASK_PERIC0),
+ SAVE_ITEM(EXYNOS5_CLKSRC_MASK_PERIC1),
+ SAVE_ITEM(EXYNOS5_CLKGATE_IP_GSCL),
+ SAVE_ITEM(EXYNOS5_CLKGATE_IP_DISP1),
+ SAVE_ITEM(EXYNOS5_CLKGATE_IP_MFC),
+ SAVE_ITEM(EXYNOS5_CLKGATE_IP_G3D),
+ SAVE_ITEM(EXYNOS5_CLKGATE_IP_GEN),
+ SAVE_ITEM(EXYNOS5_CLKGATE_IP_FSYS),
+ SAVE_ITEM(EXYNOS5_CLKGATE_IP_PERIC),
+ SAVE_ITEM(EXYNOS5_CLKGATE_IP_PERIS),
+ SAVE_ITEM(EXYNOS5_CLKGATE_BLOCK),
+ SAVE_ITEM(EXYNOS5_CLKDIV_TOP0),
+ SAVE_ITEM(EXYNOS5_CLKDIV_TOP1),
+ SAVE_ITEM(EXYNOS5_CLKDIV_GSCL),
+ SAVE_ITEM(EXYNOS5_CLKDIV_DISP1_0),
+ SAVE_ITEM(EXYNOS5_CLKDIV_GEN),
+ SAVE_ITEM(EXYNOS5_CLKDIV_MAUDIO),
+ SAVE_ITEM(EXYNOS5_CLKDIV_FSYS0),
+ SAVE_ITEM(EXYNOS5_CLKDIV_FSYS1),
+ SAVE_ITEM(EXYNOS5_CLKDIV_FSYS2),
+ SAVE_ITEM(EXYNOS5_CLKDIV_FSYS3),
+ SAVE_ITEM(EXYNOS5_CLKDIV_PERIC0),
+ SAVE_ITEM(EXYNOS5_CLKDIV_PERIC1),
+ SAVE_ITEM(EXYNOS5_CLKDIV_PERIC2),
+ SAVE_ITEM(EXYNOS5_CLKDIV_PERIC3),
+ SAVE_ITEM(EXYNOS5_CLKDIV_PERIC4),
+ SAVE_ITEM(EXYNOS5_CLKDIV_PERIC5),
+ SAVE_ITEM(EXYNOS5_SCLK_DIV_ISP),
+ SAVE_ITEM(EXYNOS5_CLKSRC_TOP0),
+ SAVE_ITEM(EXYNOS5_CLKSRC_TOP1),
+ SAVE_ITEM(EXYNOS5_CLKSRC_TOP2),
+ SAVE_ITEM(EXYNOS5_CLKSRC_TOP3),
+ SAVE_ITEM(EXYNOS5_CLKSRC_GSCL),
+ SAVE_ITEM(EXYNOS5_CLKSRC_DISP1_0),
+ SAVE_ITEM(EXYNOS5_CLKSRC_MAUDIO),
+ SAVE_ITEM(EXYNOS5_CLKSRC_FSYS),
+ SAVE_ITEM(EXYNOS5_CLKSRC_PERIC0),
+ SAVE_ITEM(EXYNOS5_CLKSRC_PERIC1),
+ SAVE_ITEM(EXYNOS5_SCLK_SRC_ISP),
+ SAVE_ITEM(EXYNOS5_EPLL_CON0),
+ SAVE_ITEM(EXYNOS5_EPLL_CON1),
+ SAVE_ITEM(EXYNOS5_EPLL_CON2),
+ SAVE_ITEM(EXYNOS5_VPLL_CON0),
+ SAVE_ITEM(EXYNOS5_VPLL_CON1),
+ SAVE_ITEM(EXYNOS5_VPLL_CON2),
};
#endif
@@ -82,6 +131,11 @@ static int exynos5_clksrc_mask_peric0_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(EXYNOS5_CLKSRC_MASK_PERIC0, clk, enable);
}
+static int exynos5_clk_ip_acp_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ACP, clk, enable);
+}
+
static int exynos5_clk_ip_core_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS5_CLKGATE_IP_CORE, clk, enable);
@@ -127,6 +181,21 @@ static int exynos5_clk_ip_peris_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(EXYNOS5_CLKGATE_IP_PERIS, clk, enable);
}
+static int exynos5_clk_ip_gscl_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(EXYNOS5_CLKGATE_IP_GSCL, clk, enable);
+}
+
+static int exynos5_clk_ip_isp0_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ISP0, clk, enable);
+}
+
+static int exynos5_clk_ip_isp1_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ISP1, clk, enable);
+}
+
/* Core list of CMU_CPU side */
static struct clksrc_clk exynos5_clk_mout_apll = {
@@ -145,11 +214,29 @@ static struct clksrc_clk exynos5_clk_sclk_apll = {
.reg_div = { .reg = EXYNOS5_CLKDIV_CPU0, .shift = 24, .size = 3 },
};
+static struct clksrc_clk exynos5_clk_mout_bpll_fout = {
+ .clk = {
+ .name = "mout_bpll_fout",
+ },
+ .sources = &clk_src_bpll_fout,
+ .reg_src = { .reg = EXYNOS5_PLL_DIV2_SEL, .shift = 0, .size = 1 },
+};
+
+static struct clk *exynos5_clk_src_bpll_list[] = {
+ [0] = &clk_fin_bpll,
+ [1] = &exynos5_clk_mout_bpll_fout.clk,
+};
+
+static struct clksrc_sources exynos5_clk_src_bpll = {
+ .sources = exynos5_clk_src_bpll_list,
+ .nr_sources = ARRAY_SIZE(exynos5_clk_src_bpll_list),
+};
+
static struct clksrc_clk exynos5_clk_mout_bpll = {
.clk = {
.name = "mout_bpll",
},
- .sources = &clk_src_bpll,
+ .sources = &exynos5_clk_src_bpll,
.reg_src = { .reg = EXYNOS5_CLKSRC_CDREX, .shift = 0, .size = 1 },
};
@@ -187,11 +274,29 @@ static struct clksrc_clk exynos5_clk_mout_epll = {
.reg_src = { .reg = EXYNOS5_CLKSRC_TOP2, .shift = 12, .size = 1 },
};
+static struct clksrc_clk exynos5_clk_mout_mpll_fout = {
+ .clk = {
+ .name = "mout_mpll_fout",
+ },
+ .sources = &clk_src_mpll_fout,
+ .reg_src = { .reg = EXYNOS5_PLL_DIV2_SEL, .shift = 4, .size = 1 },
+};
+
+static struct clk *exynos5_clk_src_mpll_list[] = {
+ [0] = &clk_fin_mpll,
+ [1] = &exynos5_clk_mout_mpll_fout.clk,
+};
+
+static struct clksrc_sources exynos5_clk_src_mpll = {
+ .sources = exynos5_clk_src_mpll_list,
+ .nr_sources = ARRAY_SIZE(exynos5_clk_src_mpll_list),
+};
+
struct clksrc_clk exynos5_clk_mout_mpll = {
.clk = {
.name = "mout_mpll",
},
- .sources = &clk_src_mpll,
+ .sources = &exynos5_clk_src_mpll,
.reg_src = { .reg = EXYNOS5_CLKSRC_CORE1, .shift = 8, .size = 1 },
};
@@ -454,6 +559,11 @@ static struct clk exynos5_init_clocks_off[] = {
.enable = exynos5_clk_ip_peris_ctrl,
.ctrlbit = (1 << 20),
}, {
+ .name = "watchdog",
+ .parent = &exynos5_clk_aclk_66.clk,
+ .enable = exynos5_clk_ip_peris_ctrl,
+ .ctrlbit = (1 << 19),
+ }, {
.name = "hsmmc",
.devname = "exynos4-sdhci.0",
.parent = &exynos5_clk_aclk_200.clk,
@@ -630,6 +740,76 @@ static struct clk exynos5_init_clocks_off[] = {
.parent = &exynos5_clk_aclk_66.clk,
.enable = exynos5_clk_ip_peric_ctrl,
.ctrlbit = (1 << 14),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(mfc_l, 0),
+ .enable = &exynos5_clk_ip_mfc_ctrl,
+ .ctrlbit = (1 << 1),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(mfc_r, 1),
+ .enable = &exynos5_clk_ip_mfc_ctrl,
+ .ctrlbit = (1 << 2),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(tv, 2),
+ .enable = &exynos5_clk_ip_disp1_ctrl,
+ .ctrlbit = (1 << 9)
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(jpeg, 3),
+ .enable = &exynos5_clk_ip_gen_ctrl,
+ .ctrlbit = (1 << 7),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(rot, 4),
+ .enable = &exynos5_clk_ip_gen_ctrl,
+ .ctrlbit = (1 << 6)
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(gsc0, 5),
+ .enable = &exynos5_clk_ip_gscl_ctrl,
+ .ctrlbit = (1 << 7),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(gsc1, 6),
+ .enable = &exynos5_clk_ip_gscl_ctrl,
+ .ctrlbit = (1 << 8),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(gsc2, 7),
+ .enable = &exynos5_clk_ip_gscl_ctrl,
+ .ctrlbit = (1 << 9),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(gsc3, 8),
+ .enable = &exynos5_clk_ip_gscl_ctrl,
+ .ctrlbit = (1 << 10),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
+ .enable = &exynos5_clk_ip_isp0_ctrl,
+ .ctrlbit = (0x3F << 8),
+ }, {
+ .name = SYSMMU_CLOCK_NAME2,
+ .devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
+ .enable = &exynos5_clk_ip_isp1_ctrl,
+ .ctrlbit = (0xF << 4),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(camif0, 12),
+ .enable = &exynos5_clk_ip_gscl_ctrl,
+ .ctrlbit = (1 << 11),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(camif1, 13),
+ .enable = &exynos5_clk_ip_gscl_ctrl,
+ .ctrlbit = (1 << 12),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(2d, 14),
+ .enable = &exynos5_clk_ip_acp_ctrl,
+ .ctrlbit = (1 << 7)
}
};
@@ -941,10 +1121,12 @@ static struct clksrc_clk *exynos5_sysclks[] = {
&exynos5_clk_mout_apll,
&exynos5_clk_sclk_apll,
&exynos5_clk_mout_bpll,
+ &exynos5_clk_mout_bpll_fout,
&exynos5_clk_mout_bpll_user,
&exynos5_clk_mout_cpll,
&exynos5_clk_mout_epll,
&exynos5_clk_mout_mpll,
+ &exynos5_clk_mout_mpll_fout,
&exynos5_clk_mout_mpll_user,
&exynos5_clk_vpllsrc,
&exynos5_clk_sclk_vpll,
@@ -1008,7 +1190,9 @@ static struct clk *exynos5_clks[] __initdata = {
&exynos5_clk_sclk_hdmi27m,
&exynos5_clk_sclk_hdmiphy,
&clk_fout_bpll,
+ &clk_fout_bpll_div2,
&clk_fout_cpll,
+ &clk_fout_mpll_div2,
&exynos5_clk_armclk,
};
@@ -1173,8 +1357,10 @@ void __init_or_cpufreq exynos5_setup_clocks(void)
clk_fout_apll.ops = &exynos5_fout_apll_ops;
clk_fout_bpll.rate = bpll;
+ clk_fout_bpll_div2.rate = bpll >> 1;
clk_fout_cpll.rate = cpll;
clk_fout_mpll.rate = mpll;
+ clk_fout_mpll_div2.rate = mpll >> 1;
clk_fout_epll.rate = epll;
clk_fout_vpll.rate = vpll;
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index 5ccd6e80a607..742edd3bbec3 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -19,6 +19,9 @@
#include <linux/serial_core.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/export.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
#include <asm/proc-fns.h>
#include <asm/exception.h>
@@ -265,12 +268,12 @@ static struct map_desc exynos5_iodesc[] __initdata = {
}, {
.virtual = (unsigned long)S5P_VA_GIC_CPU,
.pfn = __phys_to_pfn(EXYNOS5_PA_GIC_CPU),
- .length = SZ_64K,
+ .length = SZ_8K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_GIC_DIST,
.pfn = __phys_to_pfn(EXYNOS5_PA_GIC_DIST),
- .length = SZ_64K,
+ .length = SZ_4K,
.type = MT_DEVICE,
},
};
@@ -285,6 +288,11 @@ void exynos5_restart(char mode, const char *cmd)
__raw_writel(0x1, EXYNOS_SWRESET);
}
+void __init exynos_init_late(void)
+{
+ exynos_pm_late_initcall();
+}
+
/*
* exynos_map_io
*
@@ -399,6 +407,7 @@ struct combiner_chip_data {
void __iomem *base;
};
+static struct irq_domain *combiner_irq_domain;
static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
static inline void __iomem *combiner_base(struct irq_data *data)
@@ -411,14 +420,14 @@ static inline void __iomem *combiner_base(struct irq_data *data)
static void combiner_mask_irq(struct irq_data *data)
{
- u32 mask = 1 << (data->irq % 32);
+ u32 mask = 1 << (data->hwirq % 32);
__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
}
static void combiner_unmask_irq(struct irq_data *data)
{
- u32 mask = 1 << (data->irq % 32);
+ u32 mask = 1 << (data->hwirq % 32);
__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
}
@@ -474,49 +483,131 @@ static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int i
irq_set_chained_handler(irq, combiner_handle_cascade_irq);
}
-static void __init combiner_init(unsigned int combiner_nr, void __iomem *base,
- unsigned int irq_start)
+static void __init combiner_init_one(unsigned int combiner_nr,
+ void __iomem *base)
{
- unsigned int i;
- unsigned int max_nr;
-
- if (soc_is_exynos5250())
- max_nr = EXYNOS5_MAX_COMBINER_NR;
- else
- max_nr = EXYNOS4_MAX_COMBINER_NR;
-
- if (combiner_nr >= max_nr)
- BUG();
-
combiner_data[combiner_nr].base = base;
- combiner_data[combiner_nr].irq_offset = irq_start;
+ combiner_data[combiner_nr].irq_offset = irq_find_mapping(
+ combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
/* Disable all interrupts */
-
__raw_writel(combiner_data[combiner_nr].irq_mask,
base + COMBINER_ENABLE_CLEAR);
+}
+
+#ifdef CONFIG_OF
+static int combiner_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (d->of_node != controller)
+ return -EINVAL;
+
+ if (intsize < 2)
+ return -EINVAL;
+
+ *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
+ *out_type = 0;
+
+ return 0;
+}
+#else
+static int combiner_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ return -EINVAL;
+}
+#endif
+
+static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
+ irq_set_chip_data(irq, &combiner_data[hw >> 3]);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+
+ return 0;
+}
+
+static struct irq_domain_ops combiner_irq_domain_ops = {
+ .xlate = combiner_irq_domain_xlate,
+ .map = combiner_irq_domain_map,
+};
+
+void __init combiner_init(void __iomem *combiner_base, struct device_node *np)
+{
+ int i, irq, irq_base;
+ unsigned int max_nr, nr_irq;
+
+ if (np) {
+ if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
+ pr_warning("%s: number of combiners not specified, "
+ "setting default as %d.\n",
+ __func__, EXYNOS4_MAX_COMBINER_NR);
+ max_nr = EXYNOS4_MAX_COMBINER_NR;
+ }
+ } else {
+ max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
+ EXYNOS4_MAX_COMBINER_NR;
+ }
+ nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
- /* Setup the Linux IRQ subsystem */
+ irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
+ if (IS_ERR_VALUE(irq_base)) {
+ irq_base = COMBINER_IRQ(0, 0);
+ pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
+ }
- for (i = irq_start; i < combiner_data[combiner_nr].irq_offset
- + MAX_IRQ_IN_COMBINER; i++) {
- irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq);
- irq_set_chip_data(i, &combiner_data[combiner_nr]);
- set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+ combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
+ &combiner_irq_domain_ops, &combiner_data);
+ if (WARN_ON(!combiner_irq_domain)) {
+ pr_warning("%s: irq domain init failed\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < max_nr; i++) {
+ combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
+ irq = IRQ_SPI(i);
+#ifdef CONFIG_OF
+ if (np)
+ irq = irq_of_parse_and_map(np, i);
+#endif
+ combiner_cascade_irq(i, irq);
}
}
#ifdef CONFIG_OF
+int __init combiner_of_init(struct device_node *np, struct device_node *parent)
+{
+ void __iomem *combiner_base;
+
+ combiner_base = of_iomap(np, 0);
+ if (!combiner_base) {
+ pr_err("%s: failed to map combiner registers\n", __func__);
+ return -ENXIO;
+ }
+
+ combiner_init(combiner_base, np);
+
+ return 0;
+}
+
static const struct of_device_id exynos4_dt_irq_match[] = {
{ .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
+ { .compatible = "samsung,exynos4210-combiner",
+ .data = combiner_of_init, },
{},
};
#endif
void __init exynos4_init_irq(void)
{
- int irq;
unsigned int gic_bank_offset;
gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
@@ -528,12 +619,8 @@ void __init exynos4_init_irq(void)
of_irq_init(exynos4_dt_irq_match);
#endif
- for (irq = 0; irq < EXYNOS4_MAX_COMBINER_NR; irq++) {
-
- combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
- COMBINER_IRQ(irq, 0));
- combiner_cascade_irq(irq, IRQ_SPI(irq));
- }
+ if (!of_have_populated_dt())
+ combiner_init(S5P_VA_COMBINER_BASE, NULL);
/*
* The parameters of s5p_init_irq() are for VIC init.
@@ -545,18 +632,9 @@ void __init exynos4_init_irq(void)
void __init exynos5_init_irq(void)
{
- int irq;
-
#ifdef CONFIG_OF
of_irq_init(exynos4_dt_irq_match);
#endif
-
- for (irq = 0; irq < EXYNOS5_MAX_COMBINER_NR; irq++) {
- combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
- COMBINER_IRQ(irq, 0));
- combiner_cascade_irq(irq, IRQ_SPI(irq));
- }
-
/*
* The parameters of s5p_init_irq() are for VIC init.
* Theses parameters should be NULL and 0 because EXYNOS4
@@ -565,30 +643,18 @@ void __init exynos5_init_irq(void)
s5p_init_irq(NULL, 0);
}
-struct bus_type exynos4_subsys = {
- .name = "exynos4-core",
- .dev_name = "exynos4-core",
-};
-
-struct bus_type exynos5_subsys = {
- .name = "exynos5-core",
- .dev_name = "exynos5-core",
+struct bus_type exynos_subsys = {
+ .name = "exynos-core",
+ .dev_name = "exynos-core",
};
static struct device exynos4_dev = {
- .bus = &exynos4_subsys,
-};
-
-static struct device exynos5_dev = {
- .bus = &exynos5_subsys,
+ .bus = &exynos_subsys,
};
static int __init exynos_core_init(void)
{
- if (soc_is_exynos5250())
- return subsys_system_register(&exynos5_subsys, NULL);
- else
- return subsys_system_register(&exynos4_subsys, NULL);
+ return subsys_system_register(&exynos_subsys, NULL);
}
core_initcall(exynos_core_init);
@@ -675,10 +741,7 @@ static int __init exynos_init(void)
{
printk(KERN_INFO "EXYNOS: Initializing architecture\n");
- if (soc_is_exynos5250())
- return device_register(&exynos5_dev);
- else
- return device_register(&exynos4_dev);
+ return device_register(&exynos4_dev);
}
/* uart registration process */
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index 677b5467df18..aed2eeb06517 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -19,6 +19,13 @@ void exynos4_init_irq(void);
void exynos5_init_irq(void);
void exynos4_restart(char mode, const char *cmd);
void exynos5_restart(char mode, const char *cmd);
+void exynos_init_late(void);
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS
+int exynos_pm_late_initcall(void);
+#else
+static int exynos_pm_late_initcall(void) { return 0; }
+#endif
#ifdef CONFIG_ARCH_EXYNOS4
void exynos4_register_clocks(void);
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index 26dac2893b8e..cff0595d0d35 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -100,7 +100,7 @@ static int exynos4_enter_core0_aftr(struct cpuidle_device *dev,
exynos4_set_wakeupmask();
/* Set value of power down register for aftr mode */
- exynos4_sys_powerdown_conf(SYS_AFTR);
+ exynos_sys_powerdown_conf(SYS_AFTR);
__raw_writel(virt_to_phys(s3c_cpu_resume), REG_DIRECTGO_ADDR);
__raw_writel(S5P_CHECK_AFTR, REG_DIRECTGO_FLAG);
diff --git a/arch/arm/mach-exynos/dev-drm.c b/arch/arm/mach-exynos/dev-drm.c
new file mode 100644
index 000000000000..17c9c6ecc2e0
--- /dev/null
+++ b/arch/arm/mach-exynos/dev-drm.c
@@ -0,0 +1,29 @@
+/*
+ * linux/arch/arm/mach-exynos/dev-drm.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS - core DRM device
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+#include <plat/devs.h>
+
+static u64 exynos_drm_dma_mask = DMA_BIT_MASK(32);
+
+struct platform_device exynos_device_drm = {
+ .name = "exynos-drm",
+ .dev = {
+ .dma_mask = &exynos_drm_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
+};
diff --git a/arch/arm/mach-exynos/dev-sysmmu.c b/arch/arm/mach-exynos/dev-sysmmu.c
index 781563fcb156..c5b1ea301df0 100644
--- a/arch/arm/mach-exynos/dev-sysmmu.c
+++ b/arch/arm/mach-exynos/dev-sysmmu.c
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-exynos4/dev-sysmmu.c
+/* linux/arch/arm/mach-exynos/dev-sysmmu.c
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * EXYNOS4 - System MMU support
+ * EXYNOS - System MMU support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -12,222 +12,263 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <linux/export.h>
+
+#include <plat/cpu.h>
#include <mach/map.h>
#include <mach/irqs.h>
#include <mach/sysmmu.h>
-#include <plat/s5p-clock.h>
-
-/* These names must be equal to the clock names in mach-exynos4/clock.c */
-const char *sysmmu_ips_name[EXYNOS4_SYSMMU_TOTAL_IPNUM] = {
- "SYSMMU_MDMA" ,
- "SYSMMU_SSS" ,
- "SYSMMU_FIMC0" ,
- "SYSMMU_FIMC1" ,
- "SYSMMU_FIMC2" ,
- "SYSMMU_FIMC3" ,
- "SYSMMU_JPEG" ,
- "SYSMMU_FIMD0" ,
- "SYSMMU_FIMD1" ,
- "SYSMMU_PCIe" ,
- "SYSMMU_G2D" ,
- "SYSMMU_ROTATOR",
- "SYSMMU_MDMA2" ,
- "SYSMMU_TV" ,
- "SYSMMU_MFC_L" ,
- "SYSMMU_MFC_R" ,
-};
-static struct resource exynos4_sysmmu_resource[] = {
- [0] = {
- .start = EXYNOS4_PA_SYSMMU_MDMA,
- .end = EXYNOS4_PA_SYSMMU_MDMA + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_SYSMMU_MDMA0_0,
- .end = IRQ_SYSMMU_MDMA0_0,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = EXYNOS4_PA_SYSMMU_SSS,
- .end = EXYNOS4_PA_SYSMMU_SSS + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [3] = {
- .start = IRQ_SYSMMU_SSS_0,
- .end = IRQ_SYSMMU_SSS_0,
- .flags = IORESOURCE_IRQ,
- },
- [4] = {
- .start = EXYNOS4_PA_SYSMMU_FIMC0,
- .end = EXYNOS4_PA_SYSMMU_FIMC0 + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [5] = {
- .start = IRQ_SYSMMU_FIMC0_0,
- .end = IRQ_SYSMMU_FIMC0_0,
- .flags = IORESOURCE_IRQ,
- },
- [6] = {
- .start = EXYNOS4_PA_SYSMMU_FIMC1,
- .end = EXYNOS4_PA_SYSMMU_FIMC1 + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [7] = {
- .start = IRQ_SYSMMU_FIMC1_0,
- .end = IRQ_SYSMMU_FIMC1_0,
- .flags = IORESOURCE_IRQ,
- },
- [8] = {
- .start = EXYNOS4_PA_SYSMMU_FIMC2,
- .end = EXYNOS4_PA_SYSMMU_FIMC2 + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [9] = {
- .start = IRQ_SYSMMU_FIMC2_0,
- .end = IRQ_SYSMMU_FIMC2_0,
- .flags = IORESOURCE_IRQ,
- },
- [10] = {
- .start = EXYNOS4_PA_SYSMMU_FIMC3,
- .end = EXYNOS4_PA_SYSMMU_FIMC3 + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [11] = {
- .start = IRQ_SYSMMU_FIMC3_0,
- .end = IRQ_SYSMMU_FIMC3_0,
- .flags = IORESOURCE_IRQ,
- },
- [12] = {
- .start = EXYNOS4_PA_SYSMMU_JPEG,
- .end = EXYNOS4_PA_SYSMMU_JPEG + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [13] = {
- .start = IRQ_SYSMMU_JPEG_0,
- .end = IRQ_SYSMMU_JPEG_0,
- .flags = IORESOURCE_IRQ,
- },
- [14] = {
- .start = EXYNOS4_PA_SYSMMU_FIMD0,
- .end = EXYNOS4_PA_SYSMMU_FIMD0 + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [15] = {
- .start = IRQ_SYSMMU_LCD0_M0_0,
- .end = IRQ_SYSMMU_LCD0_M0_0,
- .flags = IORESOURCE_IRQ,
- },
- [16] = {
- .start = EXYNOS4_PA_SYSMMU_FIMD1,
- .end = EXYNOS4_PA_SYSMMU_FIMD1 + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [17] = {
- .start = IRQ_SYSMMU_LCD1_M1_0,
- .end = IRQ_SYSMMU_LCD1_M1_0,
- .flags = IORESOURCE_IRQ,
- },
- [18] = {
- .start = EXYNOS4_PA_SYSMMU_PCIe,
- .end = EXYNOS4_PA_SYSMMU_PCIe + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [19] = {
- .start = IRQ_SYSMMU_PCIE_0,
- .end = IRQ_SYSMMU_PCIE_0,
- .flags = IORESOURCE_IRQ,
- },
- [20] = {
- .start = EXYNOS4_PA_SYSMMU_G2D,
- .end = EXYNOS4_PA_SYSMMU_G2D + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [21] = {
- .start = IRQ_SYSMMU_2D_0,
- .end = IRQ_SYSMMU_2D_0,
- .flags = IORESOURCE_IRQ,
- },
- [22] = {
- .start = EXYNOS4_PA_SYSMMU_ROTATOR,
- .end = EXYNOS4_PA_SYSMMU_ROTATOR + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [23] = {
- .start = IRQ_SYSMMU_ROTATOR_0,
- .end = IRQ_SYSMMU_ROTATOR_0,
- .flags = IORESOURCE_IRQ,
- },
- [24] = {
- .start = EXYNOS4_PA_SYSMMU_MDMA2,
- .end = EXYNOS4_PA_SYSMMU_MDMA2 + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [25] = {
- .start = IRQ_SYSMMU_MDMA1_0,
- .end = IRQ_SYSMMU_MDMA1_0,
- .flags = IORESOURCE_IRQ,
- },
- [26] = {
- .start = EXYNOS4_PA_SYSMMU_TV,
- .end = EXYNOS4_PA_SYSMMU_TV + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [27] = {
- .start = IRQ_SYSMMU_TV_M0_0,
- .end = IRQ_SYSMMU_TV_M0_0,
- .flags = IORESOURCE_IRQ,
- },
- [28] = {
- .start = EXYNOS4_PA_SYSMMU_MFC_L,
- .end = EXYNOS4_PA_SYSMMU_MFC_L + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [29] = {
- .start = IRQ_SYSMMU_MFC_M0_0,
- .end = IRQ_SYSMMU_MFC_M0_0,
- .flags = IORESOURCE_IRQ,
- },
- [30] = {
- .start = EXYNOS4_PA_SYSMMU_MFC_R,
- .end = EXYNOS4_PA_SYSMMU_MFC_R + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [31] = {
- .start = IRQ_SYSMMU_MFC_M1_0,
- .end = IRQ_SYSMMU_MFC_M1_0,
- .flags = IORESOURCE_IRQ,
- },
-};
+static u64 exynos_sysmmu_dma_mask = DMA_BIT_MASK(32);
+
+#define SYSMMU_PLATFORM_DEVICE(ipname, devid) \
+static struct sysmmu_platform_data platdata_##ipname = { \
+ .dbgname = #ipname, \
+}; \
+struct platform_device SYSMMU_PLATDEV(ipname) = \
+{ \
+ .name = SYSMMU_DEVNAME_BASE, \
+ .id = devid, \
+ .dev = { \
+ .dma_mask = &exynos_sysmmu_dma_mask, \
+ .coherent_dma_mask = DMA_BIT_MASK(32), \
+ .platform_data = &platdata_##ipname, \
+ }, \
+}
+
+SYSMMU_PLATFORM_DEVICE(mfc_l, 0);
+SYSMMU_PLATFORM_DEVICE(mfc_r, 1);
+SYSMMU_PLATFORM_DEVICE(tv, 2);
+SYSMMU_PLATFORM_DEVICE(jpeg, 3);
+SYSMMU_PLATFORM_DEVICE(rot, 4);
+SYSMMU_PLATFORM_DEVICE(fimc0, 5); /* fimc* and gsc* exist exclusively */
+SYSMMU_PLATFORM_DEVICE(fimc1, 6);
+SYSMMU_PLATFORM_DEVICE(fimc2, 7);
+SYSMMU_PLATFORM_DEVICE(fimc3, 8);
+SYSMMU_PLATFORM_DEVICE(gsc0, 5);
+SYSMMU_PLATFORM_DEVICE(gsc1, 6);
+SYSMMU_PLATFORM_DEVICE(gsc2, 7);
+SYSMMU_PLATFORM_DEVICE(gsc3, 8);
+SYSMMU_PLATFORM_DEVICE(isp, 9);
+SYSMMU_PLATFORM_DEVICE(fimd0, 10);
+SYSMMU_PLATFORM_DEVICE(fimd1, 11);
+SYSMMU_PLATFORM_DEVICE(camif0, 12);
+SYSMMU_PLATFORM_DEVICE(camif1, 13);
+SYSMMU_PLATFORM_DEVICE(2d, 14);
+
+#define SYSMMU_RESOURCE_NAME(core, ipname) sysmmures_##core##_##ipname
+
+#define SYSMMU_RESOURCE(core, ipname) \
+ static struct resource SYSMMU_RESOURCE_NAME(core, ipname)[] __initdata =
+
+#define DEFINE_SYSMMU_RESOURCE(core, mem, irq) \
+ DEFINE_RES_MEM_NAMED(core##_PA_SYSMMU_##mem, SZ_4K, #mem), \
+ DEFINE_RES_IRQ_NAMED(core##_IRQ_SYSMMU_##irq##_0, #mem)
+
+#define SYSMMU_RESOURCE_DEFINE(core, ipname, mem, irq) \
+ SYSMMU_RESOURCE(core, ipname) { \
+ DEFINE_SYSMMU_RESOURCE(core, mem, irq) \
+ }
-struct platform_device exynos4_device_sysmmu = {
- .name = "s5p-sysmmu",
- .id = 32,
- .num_resources = ARRAY_SIZE(exynos4_sysmmu_resource),
- .resource = exynos4_sysmmu_resource,
+struct sysmmu_resource_map {
+ struct platform_device *pdev;
+ struct resource *res;
+ u32 rnum;
+ struct device *pdd;
+ char *clocknames;
};
-EXPORT_SYMBOL(exynos4_device_sysmmu);
-static struct clk *sysmmu_clk[S5P_SYSMMU_TOTAL_IPNUM];
-void sysmmu_clk_init(struct device *dev, sysmmu_ips ips)
-{
- sysmmu_clk[ips] = clk_get(dev, sysmmu_ips_name[ips]);
- if (IS_ERR(sysmmu_clk[ips]))
- sysmmu_clk[ips] = NULL;
- else
- clk_put(sysmmu_clk[ips]);
+#define SYSMMU_RESOURCE_MAPPING(core, ipname, resname) { \
+ .pdev = &SYSMMU_PLATDEV(ipname), \
+ .res = SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
+ .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
+ .clocknames = SYSMMU_CLOCK_NAME, \
}
-void sysmmu_clk_enable(sysmmu_ips ips)
-{
- if (sysmmu_clk[ips])
- clk_enable(sysmmu_clk[ips]);
+#define SYSMMU_RESOURCE_MAPPING_MC(core, ipname, resname, pdata) { \
+ .pdev = &SYSMMU_PLATDEV(ipname), \
+ .res = SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
+ .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
+ .clocknames = SYSMMU_CLOCK_NAME "," SYSMMU_CLOCK_NAME2, \
+}
+
+#ifdef CONFIG_EXYNOS_DEV_PD
+#define SYSMMU_RESOURCE_MAPPING_PD(core, ipname, resname, pd) { \
+ .pdev = &SYSMMU_PLATDEV(ipname), \
+ .res = &SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
+ .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
+ .clocknames = SYSMMU_CLOCK_NAME, \
+ .pdd = &exynos##core##_device_pd[pd].dev, \
+}
+
+#define SYSMMU_RESOURCE_MAPPING_MCPD(core, ipname, resname, pd, pdata) {\
+ .pdev = &SYSMMU_PLATDEV(ipname), \
+ .res = &SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
+ .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
+ .clocknames = SYSMMU_CLOCK_NAME "," SYSMMU_CLOCK_NAME2, \
+ .pdd = &exynos##core##_device_pd[pd].dev, \
}
+#else
+#define SYSMMU_RESOURCE_MAPPING_PD(core, ipname, resname, pd) \
+ SYSMMU_RESOURCE_MAPPING(core, ipname, resname)
+#define SYSMMU_RESOURCE_MAPPING_MCPD(core, ipname, resname, pd, pdata) \
+ SYSMMU_RESOURCE_MAPPING_MC(core, ipname, resname, pdata)
+
+#endif /* CONFIG_EXYNOS_DEV_PD */
+
+#ifdef CONFIG_ARCH_EXYNOS4
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc0, FIMC0, FIMC0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc1, FIMC1, FIMC1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc2, FIMC2, FIMC2);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc3, FIMC3, FIMC3);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, jpeg, JPEG, JPEG);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, 2d, G2D, 2D);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, tv, TV, TV_M0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, 2d_acp, 2D_ACP, 2D);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, rot, ROTATOR, ROTATOR);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimd0, FIMD0, LCD0_M0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimd1, FIMD1, LCD1_M1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, flite0, FIMC_LITE0, FIMC_LITE0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, flite1, FIMC_LITE1, FIMC_LITE1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, mfc_r, MFC_R, MFC_M0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, mfc_l, MFC_L, MFC_M1);
+SYSMMU_RESOURCE(EXYNOS4, isp) {
+ DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_ISP, FIMC_ISP),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_DRC, FIMC_DRC),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_FD, FIMC_FD),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS4, ISPCPU, FIMC_CX),
+};
+
+static struct sysmmu_resource_map sysmmu_resmap4[] __initdata = {
+ SYSMMU_RESOURCE_MAPPING_PD(4, fimc0, fimc0, PD_CAM),
+ SYSMMU_RESOURCE_MAPPING_PD(4, fimc1, fimc1, PD_CAM),
+ SYSMMU_RESOURCE_MAPPING_PD(4, fimc2, fimc2, PD_CAM),
+ SYSMMU_RESOURCE_MAPPING_PD(4, fimc3, fimc3, PD_CAM),
+ SYSMMU_RESOURCE_MAPPING_PD(4, tv, tv, PD_TV),
+ SYSMMU_RESOURCE_MAPPING_PD(4, mfc_r, mfc_r, PD_MFC),
+ SYSMMU_RESOURCE_MAPPING_PD(4, mfc_l, mfc_l, PD_MFC),
+ SYSMMU_RESOURCE_MAPPING_PD(4, rot, rot, PD_LCD0),
+ SYSMMU_RESOURCE_MAPPING_PD(4, jpeg, jpeg, PD_CAM),
+ SYSMMU_RESOURCE_MAPPING_PD(4, fimd0, fimd0, PD_LCD0),
+};
+
+static struct sysmmu_resource_map sysmmu_resmap4210[] __initdata = {
+ SYSMMU_RESOURCE_MAPPING_PD(4, 2d, 2d, PD_LCD0),
+ SYSMMU_RESOURCE_MAPPING_PD(4, fimd1, fimd1, PD_LCD1),
+};
+
+static struct sysmmu_resource_map sysmmu_resmap4212[] __initdata = {
+ SYSMMU_RESOURCE_MAPPING(4, 2d, 2d_acp),
+ SYSMMU_RESOURCE_MAPPING_PD(4, camif0, flite0, PD_ISP),
+ SYSMMU_RESOURCE_MAPPING_PD(4, camif1, flite1, PD_ISP),
+ SYSMMU_RESOURCE_MAPPING_PD(4, isp, isp, PD_ISP),
+};
+#endif /* CONFIG_ARCH_EXYNOS4 */
-void sysmmu_clk_disable(sysmmu_ips ips)
+#ifdef CONFIG_ARCH_EXYNOS5
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, jpeg, JPEG, JPEG);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, fimd1, FIMD1, FIMD1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, 2d, 2D, 2D);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, rot, ROTATOR, ROTATOR);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, tv, TV, TV);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, flite0, LITE0, LITE0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, flite1, LITE1, LITE1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc0, GSC0, GSC0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc1, GSC1, GSC1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc2, GSC2, GSC2);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc3, GSC3, GSC3);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, mfc_r, MFC_R, MFC_R);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, mfc_l, MFC_L, MFC_L);
+SYSMMU_RESOURCE(EXYNOS5, isp) {
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, ISP, ISP),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, DRC, DRC),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, FD, FD),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, ISPCPU, MCUISP),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, SCALERC, SCALERCISP),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, SCALERP, SCALERPISP),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, ODC, ODC),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, DIS0, DIS0),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, DIS1, DIS1),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, 3DNR, 3DNR),
+};
+
+static struct sysmmu_resource_map sysmmu_resmap5[] __initdata = {
+ SYSMMU_RESOURCE_MAPPING(5, jpeg, jpeg),
+ SYSMMU_RESOURCE_MAPPING(5, fimd1, fimd1),
+ SYSMMU_RESOURCE_MAPPING(5, 2d, 2d),
+ SYSMMU_RESOURCE_MAPPING(5, rot, rot),
+ SYSMMU_RESOURCE_MAPPING_PD(5, tv, tv, PD_DISP1),
+ SYSMMU_RESOURCE_MAPPING_PD(5, camif0, flite0, PD_GSCL),
+ SYSMMU_RESOURCE_MAPPING_PD(5, camif1, flite1, PD_GSCL),
+ SYSMMU_RESOURCE_MAPPING_PD(5, gsc0, gsc0, PD_GSCL),
+ SYSMMU_RESOURCE_MAPPING_PD(5, gsc1, gsc1, PD_GSCL),
+ SYSMMU_RESOURCE_MAPPING_PD(5, gsc2, gsc2, PD_GSCL),
+ SYSMMU_RESOURCE_MAPPING_PD(5, gsc3, gsc3, PD_GSCL),
+ SYSMMU_RESOURCE_MAPPING_PD(5, mfc_r, mfc_r, PD_MFC),
+ SYSMMU_RESOURCE_MAPPING_PD(5, mfc_l, mfc_l, PD_MFC),
+ SYSMMU_RESOURCE_MAPPING_MCPD(5, isp, isp, PD_ISP, mc_platdata),
+};
+#endif /* CONFIG_ARCH_EXYNOS5 */
+
+static int __init init_sysmmu_platform_device(void)
{
- if (sysmmu_clk[ips])
- clk_disable(sysmmu_clk[ips]);
+ int i, j;
+ struct sysmmu_resource_map *resmap[2] = {NULL, NULL};
+ int nmap[2] = {0, 0};
+
+#ifdef CONFIG_ARCH_EXYNOS5
+ if (soc_is_exynos5250()) {
+ resmap[0] = sysmmu_resmap5;
+ nmap[0] = ARRAY_SIZE(sysmmu_resmap5);
+ nmap[1] = 0;
+ }
+#endif
+
+#ifdef CONFIG_ARCH_EXYNOS4
+ if (resmap[0] == NULL) {
+ resmap[0] = sysmmu_resmap4;
+ nmap[0] = ARRAY_SIZE(sysmmu_resmap4);
+ }
+
+ if (soc_is_exynos4210()) {
+ resmap[1] = sysmmu_resmap4210;
+ nmap[1] = ARRAY_SIZE(sysmmu_resmap4210);
+ }
+
+ if (soc_is_exynos4412() || soc_is_exynos4212()) {
+ resmap[1] = sysmmu_resmap4212;
+ nmap[1] = ARRAY_SIZE(sysmmu_resmap4212);
+ }
+#endif
+
+ for (j = 0; j < 2; j++) {
+ for (i = 0; i < nmap[j]; i++) {
+ struct sysmmu_resource_map *map;
+ struct sysmmu_platform_data *platdata;
+
+ map = &resmap[j][i];
+
+ map->pdev->dev.parent = map->pdd;
+
+ platdata = map->pdev->dev.platform_data;
+ platdata->clockname = map->clocknames;
+
+ if (platform_device_add_resources(map->pdev, map->res,
+ map->rnum)) {
+ pr_err("%s: Failed to add device resources for "
+ "%s.%d\n", __func__,
+ map->pdev->name, map->pdev->id);
+ continue;
+ }
+
+ if (platform_device_register(map->pdev)) {
+ pr_err("%s: Failed to register %s.%d\n",
+ __func__, map->pdev->name,
+ map->pdev->id);
+ }
+ }
+ }
+
+ return 0;
}
+arch_initcall(init_sysmmu_platform_device);
diff --git a/arch/arm/mach-exynos/dma.c b/arch/arm/mach-exynos/dma.c
index 69aaa4503205..f60b66dbcf84 100644
--- a/arch/arm/mach-exynos/dma.c
+++ b/arch/arm/mach-exynos/dma.c
@@ -103,10 +103,45 @@ static u8 exynos4212_pdma0_peri[] = {
DMACH_MIPI_HSI5,
};
-struct dma_pl330_platdata exynos4_pdma0_pdata;
+static u8 exynos5250_pdma0_peri[] = {
+ DMACH_PCM0_RX,
+ DMACH_PCM0_TX,
+ DMACH_PCM2_RX,
+ DMACH_PCM2_TX,
+ DMACH_SPI0_RX,
+ DMACH_SPI0_TX,
+ DMACH_SPI2_RX,
+ DMACH_SPI2_TX,
+ DMACH_I2S0S_TX,
+ DMACH_I2S0_RX,
+ DMACH_I2S0_TX,
+ DMACH_I2S2_RX,
+ DMACH_I2S2_TX,
+ DMACH_UART0_RX,
+ DMACH_UART0_TX,
+ DMACH_UART2_RX,
+ DMACH_UART2_TX,
+ DMACH_UART4_RX,
+ DMACH_UART4_TX,
+ DMACH_SLIMBUS0_RX,
+ DMACH_SLIMBUS0_TX,
+ DMACH_SLIMBUS2_RX,
+ DMACH_SLIMBUS2_TX,
+ DMACH_SLIMBUS4_RX,
+ DMACH_SLIMBUS4_TX,
+ DMACH_AC97_MICIN,
+ DMACH_AC97_PCMIN,
+ DMACH_AC97_PCMOUT,
+ DMACH_MIPI_HSI0,
+ DMACH_MIPI_HSI2,
+ DMACH_MIPI_HSI4,
+ DMACH_MIPI_HSI6,
+};
+
+static struct dma_pl330_platdata exynos_pdma0_pdata;
-static AMBA_AHB_DEVICE(exynos4_pdma0, "dma-pl330.0", 0x00041330,
- EXYNOS4_PA_PDMA0, {EXYNOS4_IRQ_PDMA0}, &exynos4_pdma0_pdata);
+static AMBA_AHB_DEVICE(exynos_pdma0, "dma-pl330.0", 0x00041330,
+ EXYNOS4_PA_PDMA0, {EXYNOS4_IRQ_PDMA0}, &exynos_pdma0_pdata);
static u8 exynos4210_pdma1_peri[] = {
DMACH_PCM0_RX,
@@ -169,10 +204,45 @@ static u8 exynos4212_pdma1_peri[] = {
DMACH_MIPI_HSI7,
};
-static struct dma_pl330_platdata exynos4_pdma1_pdata;
+static u8 exynos5250_pdma1_peri[] = {
+ DMACH_PCM0_RX,
+ DMACH_PCM0_TX,
+ DMACH_PCM1_RX,
+ DMACH_PCM1_TX,
+ DMACH_SPI1_RX,
+ DMACH_SPI1_TX,
+ DMACH_PWM,
+ DMACH_SPDIF,
+ DMACH_I2S0S_TX,
+ DMACH_I2S0_RX,
+ DMACH_I2S0_TX,
+ DMACH_I2S1_RX,
+ DMACH_I2S1_TX,
+ DMACH_UART0_RX,
+ DMACH_UART0_TX,
+ DMACH_UART1_RX,
+ DMACH_UART1_TX,
+ DMACH_UART3_RX,
+ DMACH_UART3_TX,
+ DMACH_SLIMBUS1_RX,
+ DMACH_SLIMBUS1_TX,
+ DMACH_SLIMBUS3_RX,
+ DMACH_SLIMBUS3_TX,
+ DMACH_SLIMBUS5_RX,
+ DMACH_SLIMBUS5_TX,
+ DMACH_SLIMBUS0AUX_RX,
+ DMACH_SLIMBUS0AUX_TX,
+ DMACH_DISP1,
+ DMACH_MIPI_HSI1,
+ DMACH_MIPI_HSI3,
+ DMACH_MIPI_HSI5,
+ DMACH_MIPI_HSI7,
+};
-static AMBA_AHB_DEVICE(exynos4_pdma1, "dma-pl330.1", 0x00041330,
- EXYNOS4_PA_PDMA1, {EXYNOS4_IRQ_PDMA1}, &exynos4_pdma1_pdata);
+static struct dma_pl330_platdata exynos_pdma1_pdata;
+
+static AMBA_AHB_DEVICE(exynos_pdma1, "dma-pl330.1", 0x00041330,
+ EXYNOS4_PA_PDMA1, {EXYNOS4_IRQ_PDMA1}, &exynos_pdma1_pdata);
static u8 mdma_peri[] = {
DMACH_MTOM_0,
@@ -185,46 +255,63 @@ static u8 mdma_peri[] = {
DMACH_MTOM_7,
};
-static struct dma_pl330_platdata exynos4_mdma1_pdata = {
+static struct dma_pl330_platdata exynos_mdma1_pdata = {
.nr_valid_peri = ARRAY_SIZE(mdma_peri),
.peri_id = mdma_peri,
};
-static AMBA_AHB_DEVICE(exynos4_mdma1, "dma-pl330.2", 0x00041330,
- EXYNOS4_PA_MDMA1, {EXYNOS4_IRQ_MDMA1}, &exynos4_mdma1_pdata);
+static AMBA_AHB_DEVICE(exynos_mdma1, "dma-pl330.2", 0x00041330,
+ EXYNOS4_PA_MDMA1, {EXYNOS4_IRQ_MDMA1}, &exynos_mdma1_pdata);
-static int __init exynos4_dma_init(void)
+static int __init exynos_dma_init(void)
{
if (of_have_populated_dt())
return 0;
if (soc_is_exynos4210()) {
- exynos4_pdma0_pdata.nr_valid_peri =
+ exynos_pdma0_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4210_pdma0_peri);
- exynos4_pdma0_pdata.peri_id = exynos4210_pdma0_peri;
- exynos4_pdma1_pdata.nr_valid_peri =
+ exynos_pdma0_pdata.peri_id = exynos4210_pdma0_peri;
+ exynos_pdma1_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4210_pdma1_peri);
- exynos4_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
+ exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
} else if (soc_is_exynos4212() || soc_is_exynos4412()) {
- exynos4_pdma0_pdata.nr_valid_peri =
+ exynos_pdma0_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4212_pdma0_peri);
- exynos4_pdma0_pdata.peri_id = exynos4212_pdma0_peri;
- exynos4_pdma1_pdata.nr_valid_peri =
+ exynos_pdma0_pdata.peri_id = exynos4212_pdma0_peri;
+ exynos_pdma1_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4212_pdma1_peri);
- exynos4_pdma1_pdata.peri_id = exynos4212_pdma1_peri;
+ exynos_pdma1_pdata.peri_id = exynos4212_pdma1_peri;
+ } else if (soc_is_exynos5250()) {
+ exynos_pdma0_pdata.nr_valid_peri =
+ ARRAY_SIZE(exynos5250_pdma0_peri);
+ exynos_pdma0_pdata.peri_id = exynos5250_pdma0_peri;
+ exynos_pdma1_pdata.nr_valid_peri =
+ ARRAY_SIZE(exynos5250_pdma1_peri);
+ exynos_pdma1_pdata.peri_id = exynos5250_pdma1_peri;
+
+ exynos_pdma0_device.res.start = EXYNOS5_PA_PDMA0;
+ exynos_pdma0_device.res.end = EXYNOS5_PA_PDMA0 + SZ_4K;
+ exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_PDMA0;
+ exynos_pdma1_device.res.start = EXYNOS5_PA_PDMA1;
+ exynos_pdma1_device.res.end = EXYNOS5_PA_PDMA1 + SZ_4K;
+ exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_PDMA1;
+ exynos_mdma1_device.res.start = EXYNOS5_PA_MDMA1;
+ exynos_mdma1_device.res.end = EXYNOS5_PA_MDMA1 + SZ_4K;
+ exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_MDMA1;
}
- dma_cap_set(DMA_SLAVE, exynos4_pdma0_pdata.cap_mask);
- dma_cap_set(DMA_CYCLIC, exynos4_pdma0_pdata.cap_mask);
- amba_device_register(&exynos4_pdma0_device, &iomem_resource);
+ dma_cap_set(DMA_SLAVE, exynos_pdma0_pdata.cap_mask);
+ dma_cap_set(DMA_CYCLIC, exynos_pdma0_pdata.cap_mask);
+ amba_device_register(&exynos_pdma0_device, &iomem_resource);
- dma_cap_set(DMA_SLAVE, exynos4_pdma1_pdata.cap_mask);
- dma_cap_set(DMA_CYCLIC, exynos4_pdma1_pdata.cap_mask);
- amba_device_register(&exynos4_pdma1_device, &iomem_resource);
+ dma_cap_set(DMA_SLAVE, exynos_pdma1_pdata.cap_mask);
+ dma_cap_set(DMA_CYCLIC, exynos_pdma1_pdata.cap_mask);
+ amba_device_register(&exynos_pdma1_device, &iomem_resource);
- dma_cap_set(DMA_MEMCPY, exynos4_mdma1_pdata.cap_mask);
- amba_device_register(&exynos4_mdma1_device, &iomem_resource);
+ dma_cap_set(DMA_MEMCPY, exynos_mdma1_pdata.cap_mask);
+ amba_device_register(&exynos_mdma1_device, &iomem_resource);
return 0;
}
-arch_initcall(exynos4_dma_init);
+arch_initcall(exynos_dma_init);
diff --git a/arch/arm/mach-exynos/include/mach/gpio.h b/arch/arm/mach-exynos/include/mach/gpio.h
index d7498afe036a..eb24f1eb8e3b 100644
--- a/arch/arm/mach-exynos/include/mach/gpio.h
+++ b/arch/arm/mach-exynos/include/mach/gpio.h
@@ -153,10 +153,11 @@ enum exynos4_gpio_number {
#define EXYNOS5_GPIO_B2_NR (4)
#define EXYNOS5_GPIO_B3_NR (4)
#define EXYNOS5_GPIO_C0_NR (7)
-#define EXYNOS5_GPIO_C1_NR (7)
+#define EXYNOS5_GPIO_C1_NR (4)
#define EXYNOS5_GPIO_C2_NR (7)
#define EXYNOS5_GPIO_C3_NR (7)
-#define EXYNOS5_GPIO_D0_NR (8)
+#define EXYNOS5_GPIO_C4_NR (7)
+#define EXYNOS5_GPIO_D0_NR (4)
#define EXYNOS5_GPIO_D1_NR (8)
#define EXYNOS5_GPIO_Y0_NR (6)
#define EXYNOS5_GPIO_Y1_NR (4)
@@ -199,7 +200,8 @@ enum exynos5_gpio_number {
EXYNOS5_GPIO_C1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C0),
EXYNOS5_GPIO_C2_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C1),
EXYNOS5_GPIO_C3_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C2),
- EXYNOS5_GPIO_D0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C3),
+ EXYNOS5_GPIO_C4_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C3),
+ EXYNOS5_GPIO_D0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C4),
EXYNOS5_GPIO_D1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_D0),
EXYNOS5_GPIO_Y0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_D1),
EXYNOS5_GPIO_Y1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_Y0),
@@ -242,6 +244,7 @@ enum exynos5_gpio_number {
#define EXYNOS5_GPC1(_nr) (EXYNOS5_GPIO_C1_START + (_nr))
#define EXYNOS5_GPC2(_nr) (EXYNOS5_GPIO_C2_START + (_nr))
#define EXYNOS5_GPC3(_nr) (EXYNOS5_GPIO_C3_START + (_nr))
+#define EXYNOS5_GPC4(_nr) (EXYNOS5_GPIO_C4_START + (_nr))
#define EXYNOS5_GPD0(_nr) (EXYNOS5_GPIO_D0_START + (_nr))
#define EXYNOS5_GPD1(_nr) (EXYNOS5_GPIO_D1_START + (_nr))
#define EXYNOS5_GPY0(_nr) (EXYNOS5_GPIO_Y0_START + (_nr))
diff --git a/arch/arm/mach-exynos/include/mach/irqs.h b/arch/arm/mach-exynos/include/mach/irqs.h
index c02dae7bf4a3..7a4b4789eb72 100644
--- a/arch/arm/mach-exynos/include/mach/irqs.h
+++ b/arch/arm/mach-exynos/include/mach/irqs.h
@@ -154,6 +154,13 @@
#define EXYNOS4_IRQ_SYSMMU_MFC_M1_0 COMBINER_IRQ(5, 6)
#define EXYNOS4_IRQ_SYSMMU_PCIE_0 COMBINER_IRQ(5, 7)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_LITE0_0 COMBINER_IRQ(16, 0)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_LITE1_0 COMBINER_IRQ(16, 1)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_ISP_0 COMBINER_IRQ(16, 2)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_DRC_0 COMBINER_IRQ(16, 3)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_FD_0 COMBINER_IRQ(16, 4)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_CX_0 COMBINER_IRQ(16, 5)
+
#define EXYNOS4_IRQ_FIMD0_FIFO COMBINER_IRQ(11, 0)
#define EXYNOS4_IRQ_FIMD0_VSYNC COMBINER_IRQ(11, 1)
#define EXYNOS4_IRQ_FIMD0_SYSTEM COMBINER_IRQ(11, 2)
@@ -221,24 +228,6 @@
#define IRQ_KEYPAD EXYNOS4_IRQ_KEYPAD
#define IRQ_PMU EXYNOS4_IRQ_PMU
-#define IRQ_SYSMMU_MDMA0_0 EXYNOS4_IRQ_SYSMMU_MDMA0_0
-#define IRQ_SYSMMU_SSS_0 EXYNOS4_IRQ_SYSMMU_SSS_0
-#define IRQ_SYSMMU_FIMC0_0 EXYNOS4_IRQ_SYSMMU_FIMC0_0
-#define IRQ_SYSMMU_FIMC1_0 EXYNOS4_IRQ_SYSMMU_FIMC1_0
-#define IRQ_SYSMMU_FIMC2_0 EXYNOS4_IRQ_SYSMMU_FIMC2_0
-#define IRQ_SYSMMU_FIMC3_0 EXYNOS4_IRQ_SYSMMU_FIMC3_0
-#define IRQ_SYSMMU_JPEG_0 EXYNOS4_IRQ_SYSMMU_JPEG_0
-#define IRQ_SYSMMU_2D_0 EXYNOS4_IRQ_SYSMMU_2D_0
-
-#define IRQ_SYSMMU_ROTATOR_0 EXYNOS4_IRQ_SYSMMU_ROTATOR_0
-#define IRQ_SYSMMU_MDMA1_0 EXYNOS4_IRQ_SYSMMU_MDMA1_0
-#define IRQ_SYSMMU_LCD0_M0_0 EXYNOS4_IRQ_SYSMMU_LCD0_M0_0
-#define IRQ_SYSMMU_LCD1_M1_0 EXYNOS4_IRQ_SYSMMU_LCD1_M1_0
-#define IRQ_SYSMMU_TV_M0_0 EXYNOS4_IRQ_SYSMMU_TV_M0_0
-#define IRQ_SYSMMU_MFC_M0_0 EXYNOS4_IRQ_SYSMMU_MFC_M0_0
-#define IRQ_SYSMMU_MFC_M1_0 EXYNOS4_IRQ_SYSMMU_MFC_M1_0
-#define IRQ_SYSMMU_PCIE_0 EXYNOS4_IRQ_SYSMMU_PCIE_0
-
#define IRQ_FIMD0_FIFO EXYNOS4_IRQ_FIMD0_FIFO
#define IRQ_FIMD0_VSYNC EXYNOS4_IRQ_FIMD0_VSYNC
#define IRQ_FIMD0_SYSTEM EXYNOS4_IRQ_FIMD0_SYSTEM
@@ -298,6 +287,7 @@
#define EXYNOS5_IRQ_MIPICSI1 IRQ_SPI(80)
#define EXYNOS5_IRQ_EFNFCON_DMA_ABORT IRQ_SPI(81)
#define EXYNOS5_IRQ_MIPIDSI0 IRQ_SPI(82)
+#define EXYNOS5_IRQ_WDT_IOP IRQ_SPI(83)
#define EXYNOS5_IRQ_ROTATOR IRQ_SPI(84)
#define EXYNOS5_IRQ_GSC0 IRQ_SPI(85)
#define EXYNOS5_IRQ_GSC1 IRQ_SPI(86)
@@ -306,8 +296,8 @@
#define EXYNOS5_IRQ_JPEG IRQ_SPI(89)
#define EXYNOS5_IRQ_EFNFCON_DMA IRQ_SPI(90)
#define EXYNOS5_IRQ_2D IRQ_SPI(91)
-#define EXYNOS5_IRQ_SFMC0 IRQ_SPI(92)
-#define EXYNOS5_IRQ_SFMC1 IRQ_SPI(93)
+#define EXYNOS5_IRQ_EFNFCON_0 IRQ_SPI(92)
+#define EXYNOS5_IRQ_EFNFCON_1 IRQ_SPI(93)
#define EXYNOS5_IRQ_MIXER IRQ_SPI(94)
#define EXYNOS5_IRQ_HDMI IRQ_SPI(95)
#define EXYNOS5_IRQ_MFC IRQ_SPI(96)
@@ -321,7 +311,7 @@
#define EXYNOS5_IRQ_PCM2 IRQ_SPI(104)
#define EXYNOS5_IRQ_SPDIF IRQ_SPI(105)
#define EXYNOS5_IRQ_ADC0 IRQ_SPI(106)
-
+#define EXYNOS5_IRQ_ADC1 IRQ_SPI(107)
#define EXYNOS5_IRQ_SATA_PHY IRQ_SPI(108)
#define EXYNOS5_IRQ_SATA_PMEMREQ IRQ_SPI(109)
#define EXYNOS5_IRQ_CAM_C IRQ_SPI(110)
@@ -330,8 +320,9 @@
#define EXYNOS5_IRQ_DP1_INTP1 IRQ_SPI(113)
#define EXYNOS5_IRQ_CEC IRQ_SPI(114)
#define EXYNOS5_IRQ_SATA IRQ_SPI(115)
-#define EXYNOS5_IRQ_NFCON IRQ_SPI(116)
+#define EXYNOS5_IRQ_MCT_L0 IRQ_SPI(120)
+#define EXYNOS5_IRQ_MCT_L1 IRQ_SPI(121)
#define EXYNOS5_IRQ_MMC44 IRQ_SPI(123)
#define EXYNOS5_IRQ_MDMA1 IRQ_SPI(124)
#define EXYNOS5_IRQ_FIMC_LITE0 IRQ_SPI(125)
@@ -339,7 +330,6 @@
#define EXYNOS5_IRQ_RP_TIMER IRQ_SPI(127)
#define EXYNOS5_IRQ_PMU COMBINER_IRQ(1, 2)
-#define EXYNOS5_IRQ_PMU_CPU1 COMBINER_IRQ(1, 6)
#define EXYNOS5_IRQ_SYSMMU_GSC0_0 COMBINER_IRQ(2, 0)
#define EXYNOS5_IRQ_SYSMMU_GSC0_1 COMBINER_IRQ(2, 1)
@@ -350,6 +340,8 @@
#define EXYNOS5_IRQ_SYSMMU_GSC3_0 COMBINER_IRQ(2, 6)
#define EXYNOS5_IRQ_SYSMMU_GSC3_1 COMBINER_IRQ(2, 7)
+#define EXYNOS5_IRQ_SYSMMU_LITE2_0 COMBINER_IRQ(3, 0)
+#define EXYNOS5_IRQ_SYSMMU_LITE2_1 COMBINER_IRQ(3, 1)
#define EXYNOS5_IRQ_SYSMMU_FIMD1_0 COMBINER_IRQ(3, 2)
#define EXYNOS5_IRQ_SYSMMU_FIMD1_1 COMBINER_IRQ(3, 3)
#define EXYNOS5_IRQ_SYSMMU_LITE0_0 COMBINER_IRQ(3, 4)
@@ -373,8 +365,8 @@
#define EXYNOS5_IRQ_SYSMMU_ARM_0 COMBINER_IRQ(6, 0)
#define EXYNOS5_IRQ_SYSMMU_ARM_1 COMBINER_IRQ(6, 1)
-#define EXYNOS5_IRQ_SYSMMU_MFC_L_0 COMBINER_IRQ(6, 2)
-#define EXYNOS5_IRQ_SYSMMU_MFC_L_1 COMBINER_IRQ(6, 3)
+#define EXYNOS5_IRQ_SYSMMU_MFC_R_0 COMBINER_IRQ(6, 2)
+#define EXYNOS5_IRQ_SYSMMU_MFC_R_1 COMBINER_IRQ(6, 3)
#define EXYNOS5_IRQ_SYSMMU_RTIC_0 COMBINER_IRQ(6, 4)
#define EXYNOS5_IRQ_SYSMMU_RTIC_1 COMBINER_IRQ(6, 5)
#define EXYNOS5_IRQ_SYSMMU_SSS_0 COMBINER_IRQ(6, 6)
@@ -386,11 +378,9 @@
#define EXYNOS5_IRQ_SYSMMU_MDMA1_1 COMBINER_IRQ(7, 3)
#define EXYNOS5_IRQ_SYSMMU_TV_0 COMBINER_IRQ(7, 4)
#define EXYNOS5_IRQ_SYSMMU_TV_1 COMBINER_IRQ(7, 5)
-#define EXYNOS5_IRQ_SYSMMU_GPSX_0 COMBINER_IRQ(7, 6)
-#define EXYNOS5_IRQ_SYSMMU_GPSX_1 COMBINER_IRQ(7, 7)
-#define EXYNOS5_IRQ_SYSMMU_MFC_R_0 COMBINER_IRQ(8, 5)
-#define EXYNOS5_IRQ_SYSMMU_MFC_R_1 COMBINER_IRQ(8, 6)
+#define EXYNOS5_IRQ_SYSMMU_MFC_L_0 COMBINER_IRQ(8, 5)
+#define EXYNOS5_IRQ_SYSMMU_MFC_L_1 COMBINER_IRQ(8, 6)
#define EXYNOS5_IRQ_SYSMMU_DIS1_0 COMBINER_IRQ(9, 4)
#define EXYNOS5_IRQ_SYSMMU_DIS1_1 COMBINER_IRQ(9, 5)
@@ -406,17 +396,24 @@
#define EXYNOS5_IRQ_SYSMMU_DRC_0 COMBINER_IRQ(11, 6)
#define EXYNOS5_IRQ_SYSMMU_DRC_1 COMBINER_IRQ(11, 7)
+#define EXYNOS5_IRQ_MDMA1_ABORT COMBINER_IRQ(13, 1)
+
+#define EXYNOS5_IRQ_MDMA0_ABORT COMBINER_IRQ(15, 3)
+
#define EXYNOS5_IRQ_FIMD1_FIFO COMBINER_IRQ(18, 4)
#define EXYNOS5_IRQ_FIMD1_VSYNC COMBINER_IRQ(18, 5)
#define EXYNOS5_IRQ_FIMD1_SYSTEM COMBINER_IRQ(18, 6)
+#define EXYNOS5_IRQ_ARMIOP_GIC COMBINER_IRQ(19, 0)
+#define EXYNOS5_IRQ_ARMISP_GIC COMBINER_IRQ(19, 1)
+#define EXYNOS5_IRQ_IOP_GIC COMBINER_IRQ(19, 3)
+#define EXYNOS5_IRQ_ISP_GIC COMBINER_IRQ(19, 4)
+
+#define EXYNOS5_IRQ_PMU_CPU1 COMBINER_IRQ(22, 4)
+
#define EXYNOS5_IRQ_EINT0 COMBINER_IRQ(23, 0)
-#define EXYNOS5_IRQ_MCT_L0 COMBINER_IRQ(23, 1)
-#define EXYNOS5_IRQ_MCT_L1 COMBINER_IRQ(23, 2)
#define EXYNOS5_IRQ_MCT_G0 COMBINER_IRQ(23, 3)
#define EXYNOS5_IRQ_MCT_G1 COMBINER_IRQ(23, 4)
-#define EXYNOS5_IRQ_MCT_G2 COMBINER_IRQ(23, 5)
-#define EXYNOS5_IRQ_MCT_G3 COMBINER_IRQ(23, 6)
#define EXYNOS5_IRQ_EINT1 COMBINER_IRQ(24, 0)
#define EXYNOS5_IRQ_SYSMMU_LITE1_0 COMBINER_IRQ(24, 1)
@@ -447,7 +444,7 @@
#define EXYNOS5_MAX_COMBINER_NR 32
-#define EXYNOS5_IRQ_GPIO1_NR_GROUPS 13
+#define EXYNOS5_IRQ_GPIO1_NR_GROUPS 14
#define EXYNOS5_IRQ_GPIO2_NR_GROUPS 9
#define EXYNOS5_IRQ_GPIO3_NR_GROUPS 5
#define EXYNOS5_IRQ_GPIO4_NR_GROUPS 1
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h
index e009a66477f4..ca4aa89aa46b 100644
--- a/arch/arm/mach-exynos/include/mach/map.h
+++ b/arch/arm/mach-exynos/include/mach/map.h
@@ -34,6 +34,9 @@
#define EXYNOS4_PA_JPEG 0x11840000
+/* x = 0...1 */
+#define EXYNOS4_PA_FIMC_LITE(x) (0x12390000 + ((x) * 0x10000))
+
#define EXYNOS4_PA_G2D 0x12800000
#define EXYNOS4_PA_I2S0 0x03830000
@@ -78,8 +81,8 @@
#define EXYNOS4_PA_GIC_CPU 0x10480000
#define EXYNOS4_PA_GIC_DIST 0x10490000
-#define EXYNOS5_PA_GIC_CPU 0x10480000
-#define EXYNOS5_PA_GIC_DIST 0x10490000
+#define EXYNOS5_PA_GIC_CPU 0x10482000
+#define EXYNOS5_PA_GIC_DIST 0x10481000
#define EXYNOS4_PA_COREPERI 0x10500000
#define EXYNOS4_PA_TWD 0x10500600
@@ -95,6 +98,7 @@
#define EXYNOS5_PA_PDMA1 0x121B0000
#define EXYNOS4_PA_SYSMMU_MDMA 0x10A40000
+#define EXYNOS4_PA_SYSMMU_2D_ACP 0x10A40000
#define EXYNOS4_PA_SYSMMU_SSS 0x10A50000
#define EXYNOS4_PA_SYSMMU_FIMC0 0x11A20000
#define EXYNOS4_PA_SYSMMU_FIMC1 0x11A30000
@@ -103,6 +107,12 @@
#define EXYNOS4_PA_SYSMMU_JPEG 0x11A60000
#define EXYNOS4_PA_SYSMMU_FIMD0 0x11E20000
#define EXYNOS4_PA_SYSMMU_FIMD1 0x12220000
+#define EXYNOS4_PA_SYSMMU_FIMC_ISP 0x12260000
+#define EXYNOS4_PA_SYSMMU_FIMC_DRC 0x12270000
+#define EXYNOS4_PA_SYSMMU_FIMC_FD 0x122A0000
+#define EXYNOS4_PA_SYSMMU_ISPCPU 0x122B0000
+#define EXYNOS4_PA_SYSMMU_FIMC_LITE0 0x123B0000
+#define EXYNOS4_PA_SYSMMU_FIMC_LITE1 0x123C0000
#define EXYNOS4_PA_SYSMMU_PCIe 0x12620000
#define EXYNOS4_PA_SYSMMU_G2D 0x12A20000
#define EXYNOS4_PA_SYSMMU_ROTATOR 0x12A30000
@@ -110,6 +120,37 @@
#define EXYNOS4_PA_SYSMMU_TV 0x12E20000
#define EXYNOS4_PA_SYSMMU_MFC_L 0x13620000
#define EXYNOS4_PA_SYSMMU_MFC_R 0x13630000
+
+#define EXYNOS5_PA_SYSMMU_MDMA1 0x10A40000
+#define EXYNOS5_PA_SYSMMU_SSS 0x10A50000
+#define EXYNOS5_PA_SYSMMU_2D 0x10A60000
+#define EXYNOS5_PA_SYSMMU_MFC_L 0x11200000
+#define EXYNOS5_PA_SYSMMU_MFC_R 0x11210000
+#define EXYNOS5_PA_SYSMMU_ROTATOR 0x11D40000
+#define EXYNOS5_PA_SYSMMU_MDMA2 0x11D50000
+#define EXYNOS5_PA_SYSMMU_JPEG 0x11F20000
+#define EXYNOS5_PA_SYSMMU_IOP 0x12360000
+#define EXYNOS5_PA_SYSMMU_RTIC 0x12370000
+#define EXYNOS5_PA_SYSMMU_GPS 0x12630000
+#define EXYNOS5_PA_SYSMMU_ISP 0x13260000
+#define EXYNOS5_PA_SYSMMU_DRC 0x12370000
+#define EXYNOS5_PA_SYSMMU_SCALERC 0x13280000
+#define EXYNOS5_PA_SYSMMU_SCALERP 0x13290000
+#define EXYNOS5_PA_SYSMMU_FD 0x132A0000
+#define EXYNOS5_PA_SYSMMU_ISPCPU 0x132B0000
+#define EXYNOS5_PA_SYSMMU_ODC 0x132C0000
+#define EXYNOS5_PA_SYSMMU_DIS0 0x132D0000
+#define EXYNOS5_PA_SYSMMU_DIS1 0x132E0000
+#define EXYNOS5_PA_SYSMMU_3DNR 0x132F0000
+#define EXYNOS5_PA_SYSMMU_LITE0 0x13C40000
+#define EXYNOS5_PA_SYSMMU_LITE1 0x13C50000
+#define EXYNOS5_PA_SYSMMU_GSC0 0x13E80000
+#define EXYNOS5_PA_SYSMMU_GSC1 0x13E90000
+#define EXYNOS5_PA_SYSMMU_GSC2 0x13EA0000
+#define EXYNOS5_PA_SYSMMU_GSC3 0x13EB0000
+#define EXYNOS5_PA_SYSMMU_FIMD1 0x14640000
+#define EXYNOS5_PA_SYSMMU_TV 0x14650000
+
#define EXYNOS4_PA_SPI0 0x13920000
#define EXYNOS4_PA_SPI1 0x13930000
#define EXYNOS4_PA_SPI2 0x13940000
diff --git a/arch/arm/mach-exynos/include/mach/pm-core.h b/arch/arm/mach-exynos/include/mach/pm-core.h
index 9d8da51e35ca..a67ecfaf1216 100644
--- a/arch/arm/mach-exynos/include/mach/pm-core.h
+++ b/arch/arm/mach-exynos/include/mach/pm-core.h
@@ -33,7 +33,7 @@ static inline void s3c_pm_arch_prepare_irqs(void)
__raw_writel(tmp, S5P_WAKEUP_MASK);
__raw_writel(s3c_irqwake_intmask, S5P_WAKEUP_MASK);
- __raw_writel(s3c_irqwake_eintmask, S5P_EINT_WAKEUP_MASK);
+ __raw_writel(s3c_irqwake_eintmask & 0xFFFFFFFE, S5P_EINT_WAKEUP_MASK);
}
static inline void s3c_pm_arch_stop_clocks(void)
diff --git a/arch/arm/mach-exynos/include/mach/pmu.h b/arch/arm/mach-exynos/include/mach/pmu.h
index e76b7faba66b..7c27c2d4bf44 100644
--- a/arch/arm/mach-exynos/include/mach/pmu.h
+++ b/arch/arm/mach-exynos/include/mach/pmu.h
@@ -23,12 +23,12 @@ enum sys_powerdown {
};
extern unsigned long l2x0_regs_phys;
-struct exynos4_pmu_conf {
+struct exynos_pmu_conf {
void __iomem *reg;
unsigned int val[NUM_SYS_POWERDOWN];
};
-extern void exynos4_sys_powerdown_conf(enum sys_powerdown mode);
+extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
extern void s3c_cpu_resume(void);
#endif /* __ASM_ARCH_PMU_H */
diff --git a/arch/arm/mach-exynos/include/mach/regs-clock.h b/arch/arm/mach-exynos/include/mach/regs-clock.h
index d9578a58ae7f..8c9b38c9c504 100644
--- a/arch/arm/mach-exynos/include/mach/regs-clock.h
+++ b/arch/arm/mach-exynos/include/mach/regs-clock.h
@@ -135,6 +135,9 @@
#define EXYNOS4_CLKGATE_SCLKCPU EXYNOS_CLKREG(0x14800)
#define EXYNOS4_CLKGATE_IP_CPU EXYNOS_CLKREG(0x14900)
+#define EXYNOS4_CLKGATE_IP_ISP0 EXYNOS_CLKREG(0x18800)
+#define EXYNOS4_CLKGATE_IP_ISP1 EXYNOS_CLKREG(0x18804)
+
#define EXYNOS4_APLL_LOCKTIME (0x1C20) /* 300us */
#define EXYNOS4_APLLCON0_ENABLE_SHIFT (31)
@@ -271,41 +274,59 @@
#define EXYNOS5_CLKDIV_ACP EXYNOS_CLKREG(0x08500)
-#define EXYNOS5_CLKSRC_TOP2 EXYNOS_CLKREG(0x10218)
#define EXYNOS5_EPLL_CON0 EXYNOS_CLKREG(0x10130)
#define EXYNOS5_EPLL_CON1 EXYNOS_CLKREG(0x10134)
+#define EXYNOS5_EPLL_CON2 EXYNOS_CLKREG(0x10138)
#define EXYNOS5_VPLL_CON0 EXYNOS_CLKREG(0x10140)
#define EXYNOS5_VPLL_CON1 EXYNOS_CLKREG(0x10144)
+#define EXYNOS5_VPLL_CON2 EXYNOS_CLKREG(0x10148)
#define EXYNOS5_CPLL_CON0 EXYNOS_CLKREG(0x10120)
#define EXYNOS5_CLKSRC_TOP0 EXYNOS_CLKREG(0x10210)
+#define EXYNOS5_CLKSRC_TOP1 EXYNOS_CLKREG(0x10214)
+#define EXYNOS5_CLKSRC_TOP2 EXYNOS_CLKREG(0x10218)
#define EXYNOS5_CLKSRC_TOP3 EXYNOS_CLKREG(0x1021C)
#define EXYNOS5_CLKSRC_GSCL EXYNOS_CLKREG(0x10220)
#define EXYNOS5_CLKSRC_DISP1_0 EXYNOS_CLKREG(0x1022C)
+#define EXYNOS5_CLKSRC_MAUDIO EXYNOS_CLKREG(0x10240)
#define EXYNOS5_CLKSRC_FSYS EXYNOS_CLKREG(0x10244)
#define EXYNOS5_CLKSRC_PERIC0 EXYNOS_CLKREG(0x10250)
+#define EXYNOS5_CLKSRC_PERIC1 EXYNOS_CLKREG(0x10254)
+#define EXYNOS5_SCLK_SRC_ISP EXYNOS_CLKREG(0x10270)
#define EXYNOS5_CLKSRC_MASK_TOP EXYNOS_CLKREG(0x10310)
#define EXYNOS5_CLKSRC_MASK_GSCL EXYNOS_CLKREG(0x10320)
#define EXYNOS5_CLKSRC_MASK_DISP1_0 EXYNOS_CLKREG(0x1032C)
+#define EXYNOS5_CLKSRC_MASK_MAUDIO EXYNOS_CLKREG(0x10334)
#define EXYNOS5_CLKSRC_MASK_FSYS EXYNOS_CLKREG(0x10340)
#define EXYNOS5_CLKSRC_MASK_PERIC0 EXYNOS_CLKREG(0x10350)
+#define EXYNOS5_CLKSRC_MASK_PERIC1 EXYNOS_CLKREG(0x10354)
#define EXYNOS5_CLKDIV_TOP0 EXYNOS_CLKREG(0x10510)
#define EXYNOS5_CLKDIV_TOP1 EXYNOS_CLKREG(0x10514)
#define EXYNOS5_CLKDIV_GSCL EXYNOS_CLKREG(0x10520)
#define EXYNOS5_CLKDIV_DISP1_0 EXYNOS_CLKREG(0x1052C)
#define EXYNOS5_CLKDIV_GEN EXYNOS_CLKREG(0x1053C)
+#define EXYNOS5_CLKDIV_MAUDIO EXYNOS_CLKREG(0x10544)
#define EXYNOS5_CLKDIV_FSYS0 EXYNOS_CLKREG(0x10548)
#define EXYNOS5_CLKDIV_FSYS1 EXYNOS_CLKREG(0x1054C)
#define EXYNOS5_CLKDIV_FSYS2 EXYNOS_CLKREG(0x10550)
#define EXYNOS5_CLKDIV_FSYS3 EXYNOS_CLKREG(0x10554)
#define EXYNOS5_CLKDIV_PERIC0 EXYNOS_CLKREG(0x10558)
+#define EXYNOS5_CLKDIV_PERIC1 EXYNOS_CLKREG(0x1055C)
+#define EXYNOS5_CLKDIV_PERIC2 EXYNOS_CLKREG(0x10560)
+#define EXYNOS5_CLKDIV_PERIC3 EXYNOS_CLKREG(0x10564)
+#define EXYNOS5_CLKDIV_PERIC4 EXYNOS_CLKREG(0x10568)
+#define EXYNOS5_CLKDIV_PERIC5 EXYNOS_CLKREG(0x1056C)
+#define EXYNOS5_SCLK_DIV_ISP EXYNOS_CLKREG(0x10580)
#define EXYNOS5_CLKGATE_IP_ACP EXYNOS_CLKREG(0x08800)
+#define EXYNOS5_CLKGATE_IP_ISP0 EXYNOS_CLKREG(0x0C800)
+#define EXYNOS5_CLKGATE_IP_ISP1 EXYNOS_CLKREG(0x0C804)
#define EXYNOS5_CLKGATE_IP_GSCL EXYNOS_CLKREG(0x10920)
#define EXYNOS5_CLKGATE_IP_DISP1 EXYNOS_CLKREG(0x10928)
#define EXYNOS5_CLKGATE_IP_MFC EXYNOS_CLKREG(0x1092C)
+#define EXYNOS5_CLKGATE_IP_G3D EXYNOS_CLKREG(0x10930)
#define EXYNOS5_CLKGATE_IP_GEN EXYNOS_CLKREG(0x10934)
#define EXYNOS5_CLKGATE_IP_FSYS EXYNOS_CLKREG(0x10944)
#define EXYNOS5_CLKGATE_IP_GPS EXYNOS_CLKREG(0x1094C)
@@ -317,6 +338,8 @@
#define EXYNOS5_CLKSRC_CDREX EXYNOS_CLKREG(0x20200)
#define EXYNOS5_CLKDIV_CDREX EXYNOS_CLKREG(0x20500)
+#define EXYNOS5_PLL_DIV2_SEL EXYNOS_CLKREG(0x20A24)
+
#define EXYNOS5_EPLL_LOCK EXYNOS_CLKREG(0x10030)
#define EXYNOS5_EPLLCON0_LOCKED_SHIFT (29)
diff --git a/arch/arm/mach-exynos/include/mach/regs-pmu.h b/arch/arm/mach-exynos/include/mach/regs-pmu.h
index d457d052a420..43a99e6f56ab 100644
--- a/arch/arm/mach-exynos/include/mach/regs-pmu.h
+++ b/arch/arm/mach-exynos/include/mach/regs-pmu.h
@@ -1,9 +1,8 @@
-/* linux/arch/arm/mach-exynos4/include/mach/regs-pmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * EXYNOS4 - Power management unit definition
+ * EXYNOS - Power management unit definition
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -180,7 +179,7 @@
#define S5P_PMU_LCD1_CONF S5P_PMUREG(0x3CA0)
-/* Only for EXYNOS4212 */
+/* Only for EXYNOS4x12 */
#define S5P_ISP_ARM_LOWPWR S5P_PMUREG(0x1050)
#define S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR S5P_PMUREG(0x1054)
#define S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR S5P_PMUREG(0x1058)
@@ -221,4 +220,146 @@
#define S5P_SECSS_MEM_OPTION S5P_PMUREG(0x2EC8)
#define S5P_ROTATOR_MEM_OPTION S5P_PMUREG(0x2F48)
+/* Only for EXYNOS4412 */
+#define S5P_ARM_CORE2_LOWPWR S5P_PMUREG(0x1020)
+#define S5P_DIS_IRQ_CORE2 S5P_PMUREG(0x1024)
+#define S5P_DIS_IRQ_CENTRAL2 S5P_PMUREG(0x1028)
+#define S5P_ARM_CORE3_LOWPWR S5P_PMUREG(0x1030)
+#define S5P_DIS_IRQ_CORE3 S5P_PMUREG(0x1034)
+#define S5P_DIS_IRQ_CENTRAL3 S5P_PMUREG(0x1038)
+
+/* For EXYNOS5 */
+
+#define EXYNOS5_USB_CFG S5P_PMUREG(0x0230)
+
+#define EXYNOS5_ARM_CORE0_SYS_PWR_REG S5P_PMUREG(0x1000)
+#define EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG S5P_PMUREG(0x1004)
+#define EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG S5P_PMUREG(0x1008)
+#define EXYNOS5_ARM_CORE1_SYS_PWR_REG S5P_PMUREG(0x1010)
+#define EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG S5P_PMUREG(0x1014)
+#define EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG S5P_PMUREG(0x1018)
+#define EXYNOS5_FSYS_ARM_SYS_PWR_REG S5P_PMUREG(0x1040)
+#define EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG S5P_PMUREG(0x1048)
+#define EXYNOS5_ISP_ARM_SYS_PWR_REG S5P_PMUREG(0x1050)
+#define EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG S5P_PMUREG(0x1054)
+#define EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG S5P_PMUREG(0x1058)
+#define EXYNOS5_ARM_COMMON_SYS_PWR_REG S5P_PMUREG(0x1080)
+#define EXYNOS5_ARM_L2_SYS_PWR_REG S5P_PMUREG(0x10C0)
+#define EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG S5P_PMUREG(0x1100)
+#define EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG S5P_PMUREG(0x1104)
+#define EXYNOS5_CMU_RESET_SYS_PWR_REG S5P_PMUREG(0x110C)
+#define EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1120)
+#define EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1124)
+#define EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x112C)
+#define EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG S5P_PMUREG(0x1130)
+#define EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG S5P_PMUREG(0x1134)
+#define EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG S5P_PMUREG(0x1138)
+#define EXYNOS5_APLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1140)
+#define EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1144)
+#define EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1148)
+#define EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x114C)
+#define EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1150)
+#define EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1154)
+#define EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1164)
+#define EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1170)
+#define EXYNOS5_TOP_BUS_SYS_PWR_REG S5P_PMUREG(0x1180)
+#define EXYNOS5_TOP_RETENTION_SYS_PWR_REG S5P_PMUREG(0x1184)
+#define EXYNOS5_TOP_PWR_SYS_PWR_REG S5P_PMUREG(0x1188)
+#define EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1190)
+#define EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1194)
+#define EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1198)
+#define EXYNOS5_LOGIC_RESET_SYS_PWR_REG S5P_PMUREG(0x11A0)
+#define EXYNOS5_OSCCLK_GATE_SYS_PWR_REG S5P_PMUREG(0x11A4)
+#define EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x11B0)
+#define EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x11B4)
+#define EXYNOS5_USBOTG_MEM_SYS_PWR_REG S5P_PMUREG(0x11C0)
+#define EXYNOS5_G2D_MEM_SYS_PWR_REG S5P_PMUREG(0x11C8)
+#define EXYNOS5_USBDRD_MEM_SYS_PWR_REG S5P_PMUREG(0x11CC)
+#define EXYNOS5_SDMMC_MEM_SYS_PWR_REG S5P_PMUREG(0x11D0)
+#define EXYNOS5_CSSYS_MEM_SYS_PWR_REG S5P_PMUREG(0x11D4)
+#define EXYNOS5_SECSS_MEM_SYS_PWR_REG S5P_PMUREG(0x11D8)
+#define EXYNOS5_ROTATOR_MEM_SYS_PWR_REG S5P_PMUREG(0x11DC)
+#define EXYNOS5_INTRAM_MEM_SYS_PWR_REG S5P_PMUREG(0x11E0)
+#define EXYNOS5_INTROM_MEM_SYS_PWR_REG S5P_PMUREG(0x11E4)
+#define EXYNOS5_JPEG_MEM_SYS_PWR_REG S5P_PMUREG(0x11E8)
+#define EXYNOS5_HSI_MEM_SYS_PWR_REG S5P_PMUREG(0x11EC)
+#define EXYNOS5_MCUIOP_MEM_SYS_PWR_REG S5P_PMUREG(0x11F4)
+#define EXYNOS5_SATA_MEM_SYS_PWR_REG S5P_PMUREG(0x11FC)
+#define EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG S5P_PMUREG(0x1200)
+#define EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG S5P_PMUREG(0x1204)
+#define EXYNOS5_PAD_RETENTION_EFNAND_SYS_PWR_REG S5P_PMUREG(0x1208)
+#define EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG S5P_PMUREG(0x1220)
+#define EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG S5P_PMUREG(0x1224)
+#define EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG S5P_PMUREG(0x1228)
+#define EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG S5P_PMUREG(0x122C)
+#define EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG S5P_PMUREG(0x1230)
+#define EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG S5P_PMUREG(0x1234)
+#define EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG S5P_PMUREG(0x1238)
+#define EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x123C)
+#define EXYNOS5_PAD_ISOLATION_SYS_PWR_REG S5P_PMUREG(0x1240)
+#define EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1250)
+#define EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG S5P_PMUREG(0x1260)
+#define EXYNOS5_XUSBXTI_SYS_PWR_REG S5P_PMUREG(0x1280)
+#define EXYNOS5_XXTI_SYS_PWR_REG S5P_PMUREG(0x1284)
+#define EXYNOS5_EXT_REGULATOR_SYS_PWR_REG S5P_PMUREG(0x12C0)
+#define EXYNOS5_GPIO_MODE_SYS_PWR_REG S5P_PMUREG(0x1300)
+#define EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1320)
+#define EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG S5P_PMUREG(0x1340)
+#define EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG S5P_PMUREG(0x1344)
+#define EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG S5P_PMUREG(0x1348)
+#define EXYNOS5_GSCL_SYS_PWR_REG S5P_PMUREG(0x1400)
+#define EXYNOS5_ISP_SYS_PWR_REG S5P_PMUREG(0x1404)
+#define EXYNOS5_MFC_SYS_PWR_REG S5P_PMUREG(0x1408)
+#define EXYNOS5_G3D_SYS_PWR_REG S5P_PMUREG(0x140C)
+#define EXYNOS5_DISP1_SYS_PWR_REG S5P_PMUREG(0x1414)
+#define EXYNOS5_MAU_SYS_PWR_REG S5P_PMUREG(0x1418)
+#define EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG S5P_PMUREG(0x1480)
+#define EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG S5P_PMUREG(0x1484)
+#define EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG S5P_PMUREG(0x1488)
+#define EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG S5P_PMUREG(0x148C)
+#define EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG S5P_PMUREG(0x1494)
+#define EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG S5P_PMUREG(0x1498)
+#define EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG S5P_PMUREG(0x14C0)
+#define EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG S5P_PMUREG(0x14C4)
+#define EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG S5P_PMUREG(0x14C8)
+#define EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG S5P_PMUREG(0x14CC)
+#define EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG S5P_PMUREG(0x14D4)
+#define EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG S5P_PMUREG(0x14D8)
+#define EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG S5P_PMUREG(0x1580)
+#define EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG S5P_PMUREG(0x1584)
+#define EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG S5P_PMUREG(0x1588)
+#define EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG S5P_PMUREG(0x158C)
+#define EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG S5P_PMUREG(0x1594)
+#define EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG S5P_PMUREG(0x1598)
+
+#define EXYNOS5_ARM_CORE0_OPTION S5P_PMUREG(0x2008)
+#define EXYNOS5_ARM_CORE1_OPTION S5P_PMUREG(0x2088)
+#define EXYNOS5_FSYS_ARM_OPTION S5P_PMUREG(0x2208)
+#define EXYNOS5_ISP_ARM_OPTION S5P_PMUREG(0x2288)
+#define EXYNOS5_ARM_COMMON_OPTION S5P_PMUREG(0x2408)
+#define EXYNOS5_TOP_PWR_OPTION S5P_PMUREG(0x2C48)
+#define EXYNOS5_TOP_PWR_SYSMEM_OPTION S5P_PMUREG(0x2CC8)
+#define EXYNOS5_JPEG_MEM_OPTION S5P_PMUREG(0x2F48)
+#define EXYNOS5_GSCL_STATUS S5P_PMUREG(0x4004)
+#define EXYNOS5_ISP_STATUS S5P_PMUREG(0x4024)
+#define EXYNOS5_GSCL_OPTION S5P_PMUREG(0x4008)
+#define EXYNOS5_ISP_OPTION S5P_PMUREG(0x4028)
+#define EXYNOS5_MFC_OPTION S5P_PMUREG(0x4048)
+#define EXYNOS5_G3D_CONFIGURATION S5P_PMUREG(0x4060)
+#define EXYNOS5_G3D_STATUS S5P_PMUREG(0x4064)
+#define EXYNOS5_G3D_OPTION S5P_PMUREG(0x4068)
+#define EXYNOS5_DISP1_OPTION S5P_PMUREG(0x40A8)
+#define EXYNOS5_MAU_OPTION S5P_PMUREG(0x40C8)
+
+#define EXYNOS5_USE_SC_FEEDBACK (1 << 1)
+#define EXYNOS5_USE_SC_COUNTER (1 << 0)
+
+#define EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL (1 << 2)
+#define EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN (1 << 7)
+
+#define EXYNOS5_OPTION_USE_STANDBYWFE (1 << 24)
+#define EXYNOS5_OPTION_USE_STANDBYWFI (1 << 16)
+
+#define EXYNOS5_OPTION_USE_RETENTION (1 << 4)
+
#endif /* __ASM_ARCH_REGS_PMU_H */
diff --git a/arch/arm/mach-exynos/include/mach/regs-sysmmu.h b/arch/arm/mach-exynos/include/mach/regs-sysmmu.h
deleted file mode 100644
index 68ff6ad08a2b..000000000000
--- a/arch/arm/mach-exynos/include/mach/regs-sysmmu.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* linux/arch/arm/mach-exynos4/include/mach/regs-sysmmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS4 - System MMU register
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_REGS_SYSMMU_H
-#define __ASM_ARCH_REGS_SYSMMU_H __FILE__
-
-#define S5P_MMU_CTRL 0x000
-#define S5P_MMU_CFG 0x004
-#define S5P_MMU_STATUS 0x008
-#define S5P_MMU_FLUSH 0x00C
-#define S5P_PT_BASE_ADDR 0x014
-#define S5P_INT_STATUS 0x018
-#define S5P_INT_CLEAR 0x01C
-#define S5P_PAGE_FAULT_ADDR 0x024
-#define S5P_AW_FAULT_ADDR 0x028
-#define S5P_AR_FAULT_ADDR 0x02C
-#define S5P_DEFAULT_SLAVE_ADDR 0x030
-
-#endif /* __ASM_ARCH_REGS_SYSMMU_H */
diff --git a/arch/arm/mach-exynos/include/mach/spi-clocks.h b/arch/arm/mach-exynos/include/mach/spi-clocks.h
index 576efdf6d091..c71a5fba6a84 100644
--- a/arch/arm/mach-exynos/include/mach/spi-clocks.h
+++ b/arch/arm/mach-exynos/include/mach/spi-clocks.h
@@ -11,6 +11,6 @@
#define __ASM_ARCH_SPI_CLKS_H __FILE__
/* Must source from SCLK_SPI */
-#define EXYNOS4_SPI_SRCCLK_SCLK 0
+#define EXYNOS_SPI_SRCCLK_SCLK 0
#endif /* __ASM_ARCH_SPI_CLKS_H */
diff --git a/arch/arm/mach-exynos/include/mach/sysmmu.h b/arch/arm/mach-exynos/include/mach/sysmmu.h
index 6a5fbb534e82..998daf2add92 100644
--- a/arch/arm/mach-exynos/include/mach/sysmmu.h
+++ b/arch/arm/mach-exynos/include/mach/sysmmu.h
@@ -1,46 +1,66 @@
-/* linux/arch/arm/mach-exynos4/include/mach/sysmmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Samsung sysmmu driver for EXYNOS4
+ * EXYNOS - System MMU support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARM_ARCH_SYSMMU_H
-#define __ASM_ARM_ARCH_SYSMMU_H __FILE__
-
-enum exynos4_sysmmu_ips {
- SYSMMU_MDMA,
- SYSMMU_SSS,
- SYSMMU_FIMC0,
- SYSMMU_FIMC1,
- SYSMMU_FIMC2,
- SYSMMU_FIMC3,
- SYSMMU_JPEG,
- SYSMMU_FIMD0,
- SYSMMU_FIMD1,
- SYSMMU_PCIe,
- SYSMMU_G2D,
- SYSMMU_ROTATOR,
- SYSMMU_MDMA2,
- SYSMMU_TV,
- SYSMMU_MFC_L,
- SYSMMU_MFC_R,
- EXYNOS4_SYSMMU_TOTAL_IPNUM,
+ */
+
+#ifndef _ARM_MACH_EXYNOS_SYSMMU_H_
+#define _ARM_MACH_EXYNOS_SYSMMU_H_
+
+struct sysmmu_platform_data {
+ char *dbgname;
+ /* comma(,) separated list of clock names for clock gating */
+ char *clockname;
};
-#define S5P_SYSMMU_TOTAL_IPNUM EXYNOS4_SYSMMU_TOTAL_IPNUM
+#define SYSMMU_DEVNAME_BASE "exynos-sysmmu"
+
+#define SYSMMU_CLOCK_NAME "sysmmu"
+#define SYSMMU_CLOCK_NAME2 "sysmmu_mc"
+
+#ifdef CONFIG_EXYNOS_DEV_SYSMMU
+#include <linux/device.h>
+struct platform_device;
+
+#define SYSMMU_PLATDEV(ipname) exynos_device_sysmmu_##ipname
+
+extern struct platform_device SYSMMU_PLATDEV(mfc_l);
+extern struct platform_device SYSMMU_PLATDEV(mfc_r);
+extern struct platform_device SYSMMU_PLATDEV(tv);
+extern struct platform_device SYSMMU_PLATDEV(jpeg);
+extern struct platform_device SYSMMU_PLATDEV(rot);
+extern struct platform_device SYSMMU_PLATDEV(fimc0);
+extern struct platform_device SYSMMU_PLATDEV(fimc1);
+extern struct platform_device SYSMMU_PLATDEV(fimc2);
+extern struct platform_device SYSMMU_PLATDEV(fimc3);
+extern struct platform_device SYSMMU_PLATDEV(gsc0);
+extern struct platform_device SYSMMU_PLATDEV(gsc1);
+extern struct platform_device SYSMMU_PLATDEV(gsc2);
+extern struct platform_device SYSMMU_PLATDEV(gsc3);
+extern struct platform_device SYSMMU_PLATDEV(isp);
+extern struct platform_device SYSMMU_PLATDEV(fimd0);
+extern struct platform_device SYSMMU_PLATDEV(fimd1);
+extern struct platform_device SYSMMU_PLATDEV(camif0);
+extern struct platform_device SYSMMU_PLATDEV(camif1);
+extern struct platform_device SYSMMU_PLATDEV(2d);
-extern const char *sysmmu_ips_name[EXYNOS4_SYSMMU_TOTAL_IPNUM];
+#ifdef CONFIG_IOMMU_API
+static inline void platform_set_sysmmu(
+ struct device *sysmmu, struct device *dev)
+{
+ dev->archdata.iommu = sysmmu;
+}
+#endif
-typedef enum exynos4_sysmmu_ips sysmmu_ips;
+#else /* !CONFIG_EXYNOS_DEV_SYSMMU */
+#define platform_set_sysmmu(dev, sysmmu) do { } while (0)
+#endif
-void sysmmu_clk_init(struct device *dev, sysmmu_ips ips);
-void sysmmu_clk_enable(sysmmu_ips ips);
-void sysmmu_clk_disable(sysmmu_ips ips);
+#define SYSMMU_CLOCK_DEVNAME(ipname, id) (SYSMMU_DEVNAME_BASE "." #id)
-#endif /* __ASM_ARM_ARCH_SYSMMU_H */
+#endif /* _ARM_MACH_EXYNOS_SYSMMU_H_ */
diff --git a/arch/arm/mach-exynos/mach-armlex4210.c b/arch/arm/mach-exynos/mach-armlex4210.c
index fed7116418eb..5a3daa0168d8 100644
--- a/arch/arm/mach-exynos/mach-armlex4210.c
+++ b/arch/arm/mach-exynos/mach-armlex4210.c
@@ -147,7 +147,6 @@ static struct platform_device *armlex4210_devices[] __initdata = {
&s3c_device_hsmmc3,
&s3c_device_rtc,
&s3c_device_wdt,
- &exynos4_device_sysmmu,
&samsung_asoc_dma,
&armlex4210_smsc911x,
&exynos4_device_ahci,
@@ -204,6 +203,7 @@ MACHINE_START(ARMLEX4210, "ARMLEX4210")
.map_io = armlex4210_map_io,
.handle_irq = gic_handle_irq,
.init_machine = armlex4210_machine_init,
+ .init_late = exynos_init_late,
.timer = &exynos4_timer,
.restart = exynos4_restart,
MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-exynos4-dt.c b/arch/arm/mach-exynos/mach-exynos4-dt.c
index 8245f1c761d9..e7e9743543ac 100644
--- a/arch/arm/mach-exynos/mach-exynos4-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos4-dt.c
@@ -83,6 +83,7 @@ DT_MACHINE_START(EXYNOS4210_DT, "Samsung Exynos4 (Flattened Device Tree)")
.map_io = exynos4210_dt_map_io,
.handle_irq = gic_handle_irq,
.init_machine = exynos4210_dt_machine_init,
+ .init_late = exynos_init_late,
.timer = &exynos4_timer,
.dt_compat = exynos4210_dt_compat,
.restart = exynos4_restart,
diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c
index 4711c8920e37..7b1e11a228cc 100644
--- a/arch/arm/mach-exynos/mach-exynos5-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos5-dt.c
@@ -43,6 +43,10 @@ static const struct of_dev_auxdata exynos5250_auxdata_lookup[] __initconst = {
"exynos4210-uart.2", NULL),
OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS5_PA_UART3,
"exynos4210-uart.3", NULL),
+ OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(0),
+ "s3c2440-i2c.0", NULL),
+ OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(1),
+ "s3c2440-i2c.1", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA0, "dma-pl330.0", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.1", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_MDMA1, "dma-pl330.2", NULL),
@@ -72,6 +76,7 @@ DT_MACHINE_START(EXYNOS5_DT, "SAMSUNG EXYNOS5 (Flattened Device Tree)")
.map_io = exynos5250_dt_map_io,
.handle_irq = gic_handle_irq,
.init_machine = exynos5250_dt_machine_init,
+ .init_late = exynos_init_late,
.timer = &exynos4_timer,
.dt_compat = exynos5250_dt_compat,
.restart = exynos5_restart,
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index 6c31f2ad765d..972983e392bc 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -1389,6 +1389,7 @@ MACHINE_START(NURI, "NURI")
.map_io = nuri_map_io,
.handle_irq = gic_handle_irq,
.init_machine = nuri_machine_init,
+ .init_late = exynos_init_late,
.timer = &exynos4_timer,
.reserve = &nuri_reserve,
.restart = exynos4_restart,
diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c
index 26124a38bcbd..a7f7fd567dde 100644
--- a/arch/arm/mach-exynos/mach-origen.c
+++ b/arch/arm/mach-exynos/mach-origen.c
@@ -766,6 +766,7 @@ MACHINE_START(ORIGEN, "ORIGEN")
.map_io = origen_map_io,
.handle_irq = gic_handle_irq,
.init_machine = origen_machine_init,
+ .init_late = exynos_init_late,
.timer = &exynos4_timer,
.reserve = &origen_reserve,
.restart = exynos4_restart,
diff --git a/arch/arm/mach-exynos/mach-smdk4x12.c b/arch/arm/mach-exynos/mach-smdk4x12.c
index fe772d893cc9..fb09c70e195a 100644
--- a/arch/arm/mach-exynos/mach-smdk4x12.c
+++ b/arch/arm/mach-exynos/mach-smdk4x12.c
@@ -316,6 +316,7 @@ MACHINE_START(SMDK4412, "SMDK4412")
.map_io = smdk4x12_map_io,
.handle_irq = gic_handle_irq,
.init_machine = smdk4x12_machine_init,
+ .init_late = exynos_init_late,
.timer = &exynos4_timer,
.restart = exynos4_restart,
.reserve = &smdk4x12_reserve,
diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c
index 5af96064ca51..70df1a0c2118 100644
--- a/arch/arm/mach-exynos/mach-smdkv310.c
+++ b/arch/arm/mach-exynos/mach-smdkv310.c
@@ -295,7 +295,6 @@ static struct platform_device *smdkv310_devices[] __initdata = {
&s5p_device_mfc_l,
&s5p_device_mfc_r,
&exynos4_device_spdif,
- &exynos4_device_sysmmu,
&samsung_asoc_dma,
&samsung_asoc_idma,
&s5p_device_fimd0,
@@ -412,6 +411,7 @@ MACHINE_START(SMDKC210, "SMDKC210")
.map_io = smdkv310_map_io,
.handle_irq = gic_handle_irq,
.init_machine = smdkv310_machine_init,
+ .init_late = exynos_init_late,
.timer = &exynos4_timer,
.restart = exynos4_restart,
MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index 6b731b863275..083b44de9c10 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -1157,6 +1157,7 @@ MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210")
.map_io = universal_map_io,
.handle_irq = gic_handle_irq,
.init_machine = universal_machine_init,
+ .init_late = exynos_init_late,
.timer = &s5p_timer,
.reserve = &universal_reserve,
.restart = exynos4_restart,
diff --git a/arch/arm/mach-exynos/mct.c b/arch/arm/mach-exynos/mct.c
index 897d9a9cf226..b601fb8a408b 100644
--- a/arch/arm/mach-exynos/mct.c
+++ b/arch/arm/mach-exynos/mct.c
@@ -388,6 +388,7 @@ static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
{
struct mct_clock_event_device *mevt;
unsigned int cpu = smp_processor_id();
+ int mct_lx_irq;
mevt = this_cpu_ptr(&percpu_mct_tick);
mevt->evt = evt;
@@ -414,14 +415,18 @@ static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
if (mct_int_type == MCT_INT_SPI) {
if (cpu == 0) {
+ mct_lx_irq = soc_is_exynos4210() ? EXYNOS4_IRQ_MCT_L0 :
+ EXYNOS5_IRQ_MCT_L0;
mct_tick0_event_irq.dev_id = mevt;
- evt->irq = EXYNOS4_IRQ_MCT_L0;
- setup_irq(EXYNOS4_IRQ_MCT_L0, &mct_tick0_event_irq);
+ evt->irq = mct_lx_irq;
+ setup_irq(mct_lx_irq, &mct_tick0_event_irq);
} else {
+ mct_lx_irq = soc_is_exynos4210() ? EXYNOS4_IRQ_MCT_L1 :
+ EXYNOS5_IRQ_MCT_L1;
mct_tick1_event_irq.dev_id = mevt;
- evt->irq = EXYNOS4_IRQ_MCT_L1;
- setup_irq(EXYNOS4_IRQ_MCT_L1, &mct_tick1_event_irq);
- irq_set_affinity(EXYNOS4_IRQ_MCT_L1, cpumask_of(1));
+ evt->irq = mct_lx_irq;
+ setup_irq(mct_lx_irq, &mct_tick1_event_irq);
+ irq_set_affinity(mct_lx_irq, cpumask_of(1));
}
} else {
enable_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER, 0);
@@ -473,7 +478,7 @@ static void __init exynos4_timer_resources(void)
static void __init exynos4_timer_init(void)
{
- if (soc_is_exynos4210())
+ if ((soc_is_exynos4210()) || (soc_is_exynos5250()))
mct_int_type = MCT_INT_SPI;
else
mct_int_type = MCT_INT_PPI;
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 428cfeb57724..c06c992943a1 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -1,9 +1,8 @@
-/* linux/arch/arm/mach-exynos4/pm.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * EXYNOS4210 - Power Management support
+ * EXYNOS - Power Management support
*
* Based on arch/arm/mach-s3c2410/pm.c
* Copyright (c) 2006 Simtec Electronics
@@ -63,90 +62,7 @@ static struct sleep_save exynos4_vpll_save[] = {
SAVE_ITEM(EXYNOS4_VPLL_CON1),
};
-static struct sleep_save exynos4_core_save[] = {
- /* GIC side */
- SAVE_ITEM(S5P_VA_GIC_CPU + 0x000),
- SAVE_ITEM(S5P_VA_GIC_CPU + 0x004),
- SAVE_ITEM(S5P_VA_GIC_CPU + 0x008),
- SAVE_ITEM(S5P_VA_GIC_CPU + 0x00C),
- SAVE_ITEM(S5P_VA_GIC_CPU + 0x014),
- SAVE_ITEM(S5P_VA_GIC_CPU + 0x018),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x000),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x004),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x100),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x104),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x108),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x300),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x304),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x308),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x400),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x404),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x408),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x40C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x410),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x414),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x418),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x41C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x420),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x424),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x428),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x42C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x430),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x434),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x438),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x43C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x440),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x444),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x448),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x44C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x450),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x454),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x458),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x45C),
-
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x800),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x804),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x808),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x80C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x810),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x814),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x818),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x81C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x820),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x824),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x828),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x82C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x830),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x834),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x838),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x83C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x840),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x844),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x848),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x84C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x850),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x854),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x858),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0x85C),
-
- SAVE_ITEM(S5P_VA_GIC_DIST + 0xC00),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0xC04),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0xC08),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0xC0C),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0xC10),
- SAVE_ITEM(S5P_VA_GIC_DIST + 0xC14),
-
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x000),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x010),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x020),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x030),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x040),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x050),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x060),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x070),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x080),
- SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x090),
-
+static struct sleep_save exynos_core_save[] = {
/* SROM side */
SAVE_ITEM(S5P_SROM_BW),
SAVE_ITEM(S5P_SROM_BC0),
@@ -159,9 +75,11 @@ static struct sleep_save exynos4_core_save[] = {
/* For Cortex-A9 Diagnostic and Power control register */
static unsigned int save_arm_register[2];
-static int exynos4_cpu_suspend(unsigned long arg)
+static int exynos_cpu_suspend(unsigned long arg)
{
+#ifdef CONFIG_CACHE_L2X0
outer_flush_all();
+#endif
/* issue the standby signal into the pm unit. */
cpu_do_idle();
@@ -170,19 +88,25 @@ static int exynos4_cpu_suspend(unsigned long arg)
panic("sleep resumed to originator?");
}
-static void exynos4_pm_prepare(void)
+static void exynos_pm_prepare(void)
{
- u32 tmp;
+ unsigned int tmp;
- s3c_pm_do_save(exynos4_core_save, ARRAY_SIZE(exynos4_core_save));
- s3c_pm_do_save(exynos4_epll_save, ARRAY_SIZE(exynos4_epll_save));
- s3c_pm_do_save(exynos4_vpll_save, ARRAY_SIZE(exynos4_vpll_save));
+ s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
- tmp = __raw_readl(S5P_INFORM1);
+ if (!soc_is_exynos5250()) {
+ s3c_pm_do_save(exynos4_epll_save, ARRAY_SIZE(exynos4_epll_save));
+ s3c_pm_do_save(exynos4_vpll_save, ARRAY_SIZE(exynos4_vpll_save));
+ } else {
+ /* Disable USE_RETENTION of JPEG_MEM_OPTION */
+ tmp = __raw_readl(EXYNOS5_JPEG_MEM_OPTION);
+ tmp &= ~EXYNOS5_OPTION_USE_RETENTION;
+ __raw_writel(tmp, EXYNOS5_JPEG_MEM_OPTION);
+ }
/* Set value of power down register for sleep mode */
- exynos4_sys_powerdown_conf(SYS_SLEEP);
+ exynos_sys_powerdown_conf(SYS_SLEEP);
__raw_writel(S5P_CHECK_SLEEP, S5P_INFORM1);
/* ensure at least INFORM0 has the resume address */
@@ -191,17 +115,18 @@ static void exynos4_pm_prepare(void)
/* Before enter central sequence mode, clock src register have to set */
- s3c_pm_do_restore_core(exynos4_set_clksrc, ARRAY_SIZE(exynos4_set_clksrc));
+ if (!soc_is_exynos5250())
+ s3c_pm_do_restore_core(exynos4_set_clksrc, ARRAY_SIZE(exynos4_set_clksrc));
if (soc_is_exynos4210())
s3c_pm_do_restore_core(exynos4210_set_clksrc, ARRAY_SIZE(exynos4210_set_clksrc));
}
-static int exynos4_pm_add(struct device *dev, struct subsys_interface *sif)
+static int exynos_pm_add(struct device *dev, struct subsys_interface *sif)
{
- pm_cpu_prep = exynos4_pm_prepare;
- pm_cpu_sleep = exynos4_cpu_suspend;
+ pm_cpu_prep = exynos_pm_prepare;
+ pm_cpu_sleep = exynos_cpu_suspend;
return 0;
}
@@ -273,13 +198,13 @@ static void exynos4_restore_pll(void)
} while (epll_wait || vpll_wait);
}
-static struct subsys_interface exynos4_pm_interface = {
- .name = "exynos4_pm",
- .subsys = &exynos4_subsys,
- .add_dev = exynos4_pm_add,
+static struct subsys_interface exynos_pm_interface = {
+ .name = "exynos_pm",
+ .subsys = &exynos_subsys,
+ .add_dev = exynos_pm_add,
};
-static __init int exynos4_pm_drvinit(void)
+static __init int exynos_pm_drvinit(void)
{
struct clk *pll_base;
unsigned int tmp;
@@ -292,18 +217,20 @@ static __init int exynos4_pm_drvinit(void)
tmp |= ((0xFF << 8) | (0x1F << 1));
__raw_writel(tmp, S5P_WAKEUP_MASK);
- pll_base = clk_get(NULL, "xtal");
+ if (!soc_is_exynos5250()) {
+ pll_base = clk_get(NULL, "xtal");
- if (!IS_ERR(pll_base)) {
- pll_base_rate = clk_get_rate(pll_base);
- clk_put(pll_base);
+ if (!IS_ERR(pll_base)) {
+ pll_base_rate = clk_get_rate(pll_base);
+ clk_put(pll_base);
+ }
}
- return subsys_interface_register(&exynos4_pm_interface);
+ return subsys_interface_register(&exynos_pm_interface);
}
-arch_initcall(exynos4_pm_drvinit);
+arch_initcall(exynos_pm_drvinit);
-static int exynos4_pm_suspend(void)
+static int exynos_pm_suspend(void)
{
unsigned long tmp;
@@ -313,27 +240,27 @@ static int exynos4_pm_suspend(void)
tmp &= ~S5P_CENTRAL_LOWPWR_CFG;
__raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION);
- if (soc_is_exynos4212()) {
- tmp = __raw_readl(S5P_CENTRAL_SEQ_OPTION);
- tmp &= ~(S5P_USE_STANDBYWFI_ISP_ARM |
- S5P_USE_STANDBYWFE_ISP_ARM);
- __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
- }
+ /* Setting SEQ_OPTION register */
+
+ tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0);
+ __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
- /* Save Power control register */
- asm ("mrc p15, 0, %0, c15, c0, 0"
- : "=r" (tmp) : : "cc");
- save_arm_register[0] = tmp;
+ if (!soc_is_exynos5250()) {
+ /* Save Power control register */
+ asm ("mrc p15, 0, %0, c15, c0, 0"
+ : "=r" (tmp) : : "cc");
+ save_arm_register[0] = tmp;
- /* Save Diagnostic register */
- asm ("mrc p15, 0, %0, c15, c0, 1"
- : "=r" (tmp) : : "cc");
- save_arm_register[1] = tmp;
+ /* Save Diagnostic register */
+ asm ("mrc p15, 0, %0, c15, c0, 1"
+ : "=r" (tmp) : : "cc");
+ save_arm_register[1] = tmp;
+ }
return 0;
}
-static void exynos4_pm_resume(void)
+static void exynos_pm_resume(void)
{
unsigned long tmp;
@@ -350,17 +277,19 @@ static void exynos4_pm_resume(void)
/* No need to perform below restore code */
goto early_wakeup;
}
- /* Restore Power control register */
- tmp = save_arm_register[0];
- asm volatile ("mcr p15, 0, %0, c15, c0, 0"
- : : "r" (tmp)
- : "cc");
-
- /* Restore Diagnostic register */
- tmp = save_arm_register[1];
- asm volatile ("mcr p15, 0, %0, c15, c0, 1"
- : : "r" (tmp)
- : "cc");
+ if (!soc_is_exynos5250()) {
+ /* Restore Power control register */
+ tmp = save_arm_register[0];
+ asm volatile ("mcr p15, 0, %0, c15, c0, 0"
+ : : "r" (tmp)
+ : "cc");
+
+ /* Restore Diagnostic register */
+ tmp = save_arm_register[1];
+ asm volatile ("mcr p15, 0, %0, c15, c0, 1"
+ : : "r" (tmp)
+ : "cc");
+ }
/* For release retention */
@@ -372,26 +301,28 @@ static void exynos4_pm_resume(void)
__raw_writel((1 << 28), S5P_PAD_RET_EBIA_OPTION);
__raw_writel((1 << 28), S5P_PAD_RET_EBIB_OPTION);
- s3c_pm_do_restore_core(exynos4_core_save, ARRAY_SIZE(exynos4_core_save));
+ s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
- exynos4_restore_pll();
+ if (!soc_is_exynos5250()) {
+ exynos4_restore_pll();
#ifdef CONFIG_SMP
- scu_enable(S5P_VA_SCU);
+ scu_enable(S5P_VA_SCU);
#endif
+ }
early_wakeup:
return;
}
-static struct syscore_ops exynos4_pm_syscore_ops = {
- .suspend = exynos4_pm_suspend,
- .resume = exynos4_pm_resume,
+static struct syscore_ops exynos_pm_syscore_ops = {
+ .suspend = exynos_pm_suspend,
+ .resume = exynos_pm_resume,
};
-static __init int exynos4_pm_syscore_init(void)
+static __init int exynos_pm_syscore_init(void)
{
- register_syscore_ops(&exynos4_pm_syscore_ops);
+ register_syscore_ops(&exynos_pm_syscore_ops);
return 0;
}
-arch_initcall(exynos4_pm_syscore_init);
+arch_initcall(exynos_pm_syscore_init);
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 13b306808b42..e9fafcf163de 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -193,9 +193,8 @@ static __init int exynos4_pm_init_power_domain(void)
}
arch_initcall(exynos4_pm_init_power_domain);
-static __init int exynos_pm_late_initcall(void)
+int __init exynos_pm_late_initcall(void)
{
pm_genpd_poweroff_unused();
return 0;
}
-late_initcall(exynos_pm_late_initcall);
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
index bba48f5c3e8f..4aacb66f7161 100644
--- a/arch/arm/mach-exynos/pmu.c
+++ b/arch/arm/mach-exynos/pmu.c
@@ -1,9 +1,8 @@
-/* linux/arch/arm/mach-exynos4/pmu.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
- * EXYNOS4210 - CPU PMU(Power Management Unit) support
+ * EXYNOS - CPU PMU(Power Management Unit) support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -12,13 +11,14 @@
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/bug.h>
#include <mach/regs-clock.h>
#include <mach/pmu.h>
-static struct exynos4_pmu_conf *exynos4_pmu_config;
+static struct exynos_pmu_conf *exynos_pmu_config;
-static struct exynos4_pmu_conf exynos4210_pmu_config[] = {
+static struct exynos_pmu_conf exynos4210_pmu_config[] = {
/* { .reg = address, .val = { AFTR, LPA, SLEEP } */
{ S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } },
{ S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } },
@@ -94,7 +94,7 @@ static struct exynos4_pmu_conf exynos4210_pmu_config[] = {
{ PMU_TABLE_END,},
};
-static struct exynos4_pmu_conf exynos4212_pmu_config[] = {
+static struct exynos_pmu_conf exynos4x12_pmu_config[] = {
{ S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } },
{ S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } },
{ S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } },
@@ -202,29 +202,209 @@ static struct exynos4_pmu_conf exynos4212_pmu_config[] = {
{ PMU_TABLE_END,},
};
-void exynos4_sys_powerdown_conf(enum sys_powerdown mode)
+static struct exynos_pmu_conf exynos4412_pmu_config[] = {
+ { S5P_ARM_CORE2_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE2, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL2, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_CORE3_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE3, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL3, { 0x0, 0x0, 0x0 } },
+ { PMU_TABLE_END,},
+};
+
+static struct exynos_pmu_conf exynos5250_pmu_config[] = {
+ /* { .reg = address, .val = { AFTR, LPA, SLEEP } */
+ { EXYNOS5_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ARM_CORE1_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_FSYS_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_ISP_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS5_ARM_L2_SYS_PWR_REG, { 0x3, 0x3, 0x3} },
+ { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_TOP_BUS_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_TOP_RETENTION_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_TOP_PWR_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
+ { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
+ { EXYNOS5_LOGIC_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_USBOTG_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_G2D_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_USBDRD_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_SDMMC_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_CSSYS_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_SECSS_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_ROTATOR_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_INTRAM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_INTROM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_JPEG_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_HSI_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_MCUIOP_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_SATA_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_XUSBXTI_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_XXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_GPIO_MODE_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_GSCL_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_ISP_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_MFC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_G3D_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_DISP1_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_MAU_SYS_PWR_REG, { 0x7, 0x7, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { PMU_TABLE_END,},
+};
+
+void __iomem *exynos5_list_both_cnt_feed[] = {
+ EXYNOS5_ARM_CORE0_OPTION,
+ EXYNOS5_ARM_CORE1_OPTION,
+ EXYNOS5_ARM_COMMON_OPTION,
+ EXYNOS5_GSCL_OPTION,
+ EXYNOS5_ISP_OPTION,
+ EXYNOS5_MFC_OPTION,
+ EXYNOS5_G3D_OPTION,
+ EXYNOS5_DISP1_OPTION,
+ EXYNOS5_MAU_OPTION,
+ EXYNOS5_TOP_PWR_OPTION,
+ EXYNOS5_TOP_PWR_SYSMEM_OPTION,
+};
+
+void __iomem *exynos5_list_diable_wfi_wfe[] = {
+ EXYNOS5_ARM_CORE1_OPTION,
+ EXYNOS5_FSYS_ARM_OPTION,
+ EXYNOS5_ISP_ARM_OPTION,
+};
+
+static void exynos5_init_pmu(void)
{
unsigned int i;
+ unsigned int tmp;
- for (i = 0; (exynos4_pmu_config[i].reg != PMU_TABLE_END) ; i++)
- __raw_writel(exynos4_pmu_config[i].val[mode],
- exynos4_pmu_config[i].reg);
+ /*
+ * Enable both SC_FEEDBACK and SC_COUNTER
+ */
+ for (i = 0 ; i < ARRAY_SIZE(exynos5_list_both_cnt_feed) ; i++) {
+ tmp = __raw_readl(exynos5_list_both_cnt_feed[i]);
+ tmp |= (EXYNOS5_USE_SC_FEEDBACK |
+ EXYNOS5_USE_SC_COUNTER);
+ __raw_writel(tmp, exynos5_list_both_cnt_feed[i]);
+ }
+
+ /*
+ * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
+ * MANUAL_L2RSTDISABLE_CONTROL_BITFIELD Enable
+ */
+ tmp = __raw_readl(EXYNOS5_ARM_COMMON_OPTION);
+ tmp |= (EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL |
+ EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN);
+ __raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
+
+ /*
+ * Disable WFI/WFE on XXX_OPTION
+ */
+ for (i = 0 ; i < ARRAY_SIZE(exynos5_list_diable_wfi_wfe) ; i++) {
+ tmp = __raw_readl(exynos5_list_diable_wfi_wfe[i]);
+ tmp &= ~(EXYNOS5_OPTION_USE_STANDBYWFE |
+ EXYNOS5_OPTION_USE_STANDBYWFI);
+ __raw_writel(tmp, exynos5_list_diable_wfi_wfe[i]);
+ }
+}
+
+void exynos_sys_powerdown_conf(enum sys_powerdown mode)
+{
+ unsigned int i;
+
+ if (soc_is_exynos5250())
+ exynos5_init_pmu();
+
+ for (i = 0; (exynos_pmu_config[i].reg != PMU_TABLE_END) ; i++)
+ __raw_writel(exynos_pmu_config[i].val[mode],
+ exynos_pmu_config[i].reg);
+
+ if (soc_is_exynos4412()) {
+ for (i = 0; exynos4412_pmu_config[i].reg != PMU_TABLE_END ; i++)
+ __raw_writel(exynos4412_pmu_config[i].val[mode],
+ exynos4412_pmu_config[i].reg);
+ }
}
-static int __init exynos4_pmu_init(void)
+static int __init exynos_pmu_init(void)
{
- exynos4_pmu_config = exynos4210_pmu_config;
+ exynos_pmu_config = exynos4210_pmu_config;
if (soc_is_exynos4210()) {
- exynos4_pmu_config = exynos4210_pmu_config;
+ exynos_pmu_config = exynos4210_pmu_config;
pr_info("EXYNOS4210 PMU Initialize\n");
- } else if (soc_is_exynos4212()) {
- exynos4_pmu_config = exynos4212_pmu_config;
- pr_info("EXYNOS4212 PMU Initialize\n");
+ } else if (soc_is_exynos4212() || soc_is_exynos4412()) {
+ exynos_pmu_config = exynos4x12_pmu_config;
+ pr_info("EXYNOS4x12 PMU Initialize\n");
+ } else if (soc_is_exynos5250()) {
+ exynos_pmu_config = exynos5250_pmu_config;
+ pr_info("EXYNOS5250 PMU Initialize\n");
} else {
- pr_info("EXYNOS4: PMU not supported\n");
+ pr_info("EXYNOS: PMU not supported\n");
}
return 0;
}
-arch_initcall(exynos4_pmu_init);
+arch_initcall(exynos_pmu_init);
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index cca8c0c74794..0021f726b153 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -34,6 +34,7 @@ config ARCH_MX53
config SOC_IMX1
bool
select ARCH_MX1
+ select COMMON_CLK
select CPU_ARM920T
select IMX_HAVE_IOMUX_V1
select MXC_AVIC
@@ -42,12 +43,14 @@ config SOC_IMX21
bool
select MACH_MX21
select CPU_ARM926T
+ select COMMON_CLK
select IMX_HAVE_IOMUX_V1
select MXC_AVIC
config SOC_IMX25
bool
select ARCH_MX25
+ select COMMON_CLK
select CPU_ARM926T
select ARCH_MXC_IOMUX_V3
select MXC_AVIC
@@ -56,6 +59,7 @@ config SOC_IMX27
bool
select MACH_MX27
select CPU_ARM926T
+ select COMMON_CLK
select IMX_HAVE_IOMUX_V1
select MXC_AVIC
@@ -64,12 +68,14 @@ config SOC_IMX31
select CPU_V6
select IMX_HAVE_PLATFORM_MXC_RNGA
select MXC_AVIC
+ select COMMON_CLK
select SMP_ON_UP if SMP
config SOC_IMX35
bool
select CPU_V6
select ARCH_MXC_IOMUX_V3
+ select COMMON_CLK
select HAVE_EPIT
select MXC_AVIC
select SMP_ON_UP if SMP
@@ -77,6 +83,7 @@ config SOC_IMX35
config SOC_IMX5
select CPU_V7
select MXC_TZIC
+ select COMMON_CLK
select ARCH_MXC_IOMUX_V3
select ARCH_HAS_CPUFREQ
select ARCH_MX5
@@ -815,6 +822,7 @@ config SOC_IMX6Q
bool "i.MX6 Quad support"
select ARM_CPU_SUSPEND if PM
select ARM_GIC
+ select COMMON_CLK
select CPU_V7
select HAVE_ARM_SCU
select HAVE_IMX_GPC
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 4937c070a57e..ff29421414f2 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -1,15 +1,18 @@
-obj-$(CONFIG_SOC_IMX1) += clock-imx1.o mm-imx1.o
-obj-$(CONFIG_SOC_IMX21) += clock-imx21.o mm-imx21.o
+obj-$(CONFIG_SOC_IMX1) += clk-imx1.o mm-imx1.o
+obj-$(CONFIG_SOC_IMX21) += clk-imx21.o mm-imx21.o
-obj-$(CONFIG_SOC_IMX25) += clock-imx25.o mm-imx25.o ehci-imx25.o cpu-imx25.o
+obj-$(CONFIG_SOC_IMX25) += clk-imx25.o mm-imx25.o ehci-imx25.o cpu-imx25.o
obj-$(CONFIG_SOC_IMX27) += cpu-imx27.o pm-imx27.o
-obj-$(CONFIG_SOC_IMX27) += clock-imx27.o mm-imx27.o ehci-imx27.o
+obj-$(CONFIG_SOC_IMX27) += clk-imx27.o mm-imx27.o ehci-imx27.o
-obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clock-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o
-obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clock-imx35.o ehci-imx35.o pm-imx3.o
+obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clk-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o
+obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clk-imx35.o ehci-imx35.o pm-imx3.o
-obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clock-mx51-mx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
+obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
+
+obj-$(CONFIG_COMMON_CLK) += clk-pllv1.o clk-pllv2.o clk-pllv3.o clk-gate2.o \
+ clk-pfd.o clk-busy.o
# Support for CMOS sensor interface
obj-$(CONFIG_MX1_VIDEO) += mx1-camera-fiq.o mx1-camera-fiq-ksym.o
@@ -70,7 +73,7 @@ obj-$(CONFIG_CPU_V7) += head-v7.o
AFLAGS_head-v7.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
-obj-$(CONFIG_SOC_IMX6Q) += clock-imx6q.o mach-imx6q.o
+obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o
diff --git a/arch/arm/mach-imx/Makefile.boot b/arch/arm/mach-imx/Makefile.boot
index 3851d8a27875..05541cf4a878 100644
--- a/arch/arm/mach-imx/Makefile.boot
+++ b/arch/arm/mach-imx/Makefile.boot
@@ -42,4 +42,5 @@ dtb-$(CONFIG_MACH_IMX51_DT) += imx51-babbage.dtb
dtb-$(CONFIG_MACH_IMX53_DT) += imx53-ard.dtb imx53-evk.dtb \
imx53-qsb.dtb imx53-smd.dtb
dtb-$(CONFIG_SOC_IMX6Q) += imx6q-arm2.dtb \
- imx6q-sabrelite.dtb
+ imx6q-sabrelite.dtb \
+ imx6q-sabresd.dtb \
diff --git a/arch/arm/mach-imx/clk-busy.c b/arch/arm/mach-imx/clk-busy.c
new file mode 100644
index 000000000000..1a7a8dd045a1
--- /dev/null
+++ b/arch/arm/mach-imx/clk-busy.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include "clk.h"
+
+static int clk_busy_wait(void __iomem *reg, u8 shift)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(10);
+
+ while (readl_relaxed(reg) & (1 << shift))
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+struct clk_busy_divider {
+ struct clk_divider div;
+ const struct clk_ops *div_ops;
+ void __iomem *reg;
+ u8 shift;
+};
+
+static inline struct clk_busy_divider *to_clk_busy_divider(struct clk_hw *hw)
+{
+ struct clk_divider *div = container_of(hw, struct clk_divider, hw);
+
+ return container_of(div, struct clk_busy_divider, div);
+}
+
+static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_busy_divider *busy = to_clk_busy_divider(hw);
+
+ return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
+}
+
+static long clk_busy_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_busy_divider *busy = to_clk_busy_divider(hw);
+
+ return busy->div_ops->round_rate(&busy->div.hw, rate, prate);
+}
+
+static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_busy_divider *busy = to_clk_busy_divider(hw);
+ int ret;
+
+ ret = busy->div_ops->set_rate(&busy->div.hw, rate, parent_rate);
+ if (!ret)
+ ret = clk_busy_wait(busy->reg, busy->shift);
+
+ return ret;
+}
+
+static struct clk_ops clk_busy_divider_ops = {
+ .recalc_rate = clk_busy_divider_recalc_rate,
+ .round_rate = clk_busy_divider_round_rate,
+ .set_rate = clk_busy_divider_set_rate,
+};
+
+struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
+ void __iomem *reg, u8 shift, u8 width,
+ void __iomem *busy_reg, u8 busy_shift)
+{
+ struct clk_busy_divider *busy;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ busy = kzalloc(sizeof(*busy), GFP_KERNEL);
+ if (!busy)
+ return ERR_PTR(-ENOMEM);
+
+ busy->reg = busy_reg;
+ busy->shift = busy_shift;
+
+ busy->div.reg = reg;
+ busy->div.shift = shift;
+ busy->div.width = width;
+ busy->div.lock = &imx_ccm_lock;
+ busy->div_ops = &clk_divider_ops;
+
+ init.name = name;
+ init.ops = &clk_busy_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ busy->div.hw.init = &init;
+
+ clk = clk_register(NULL, &busy->div.hw);
+ if (!clk)
+ kfree(busy);
+
+ return clk;
+}
+
+struct clk_busy_mux {
+ struct clk_mux mux;
+ const struct clk_ops *mux_ops;
+ void __iomem *reg;
+ u8 shift;
+};
+
+static inline struct clk_busy_mux *to_clk_busy_mux(struct clk_hw *hw)
+{
+ struct clk_mux *mux = container_of(hw, struct clk_mux, hw);
+
+ return container_of(mux, struct clk_busy_mux, mux);
+}
+
+static u8 clk_busy_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_busy_mux *busy = to_clk_busy_mux(hw);
+
+ return busy->mux_ops->get_parent(&busy->mux.hw);
+}
+
+static int clk_busy_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_busy_mux *busy = to_clk_busy_mux(hw);
+ int ret;
+
+ ret = busy->mux_ops->set_parent(&busy->mux.hw, index);
+ if (!ret)
+ ret = clk_busy_wait(busy->reg, busy->shift);
+
+ return ret;
+}
+
+struct clk_ops clk_busy_mux_ops = {
+ .get_parent = clk_busy_mux_get_parent,
+ .set_parent = clk_busy_mux_set_parent,
+};
+
+struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
+ u8 width, void __iomem *busy_reg, u8 busy_shift,
+ const char **parent_names, int num_parents)
+{
+ struct clk_busy_mux *busy;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ busy = kzalloc(sizeof(*busy), GFP_KERNEL);
+ if (!busy)
+ return ERR_PTR(-ENOMEM);
+
+ busy->reg = busy_reg;
+ busy->shift = busy_shift;
+
+ busy->mux.reg = reg;
+ busy->mux.shift = shift;
+ busy->mux.width = width;
+ busy->mux.lock = &imx_ccm_lock;
+ busy->mux_ops = &clk_mux_ops;
+
+ init.name = name;
+ init.ops = &clk_busy_mux_ops;
+ init.flags = 0;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ busy->mux.hw.init = &init;
+
+ clk = clk_register(NULL, &busy->mux.hw);
+ if (IS_ERR(clk))
+ kfree(busy);
+
+ return clk;
+}
diff --git a/arch/arm/mach-imx/clk-gate2.c b/arch/arm/mach-imx/clk-gate2.c
new file mode 100644
index 000000000000..3c1b8ff9a0a6
--- /dev/null
+++ b/arch/arm/mach-imx/clk-gate2.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
+ * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Gated clock implementation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+
+/**
+ * DOC: basic gatable clock which can gate and ungate it's ouput
+ *
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parent is (un)prepared
+ * enable - clk_enable and clk_disable are functional & control gating
+ * rate - inherits rate from parent. No clk_set_rate support
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+
+static int clk_gate2_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ u32 reg;
+ unsigned long flags = 0;
+
+ if (gate->lock)
+ spin_lock_irqsave(gate->lock, flags);
+
+ reg = readl(gate->reg);
+ reg |= 3 << gate->bit_idx;
+ writel(reg, gate->reg);
+
+ if (gate->lock)
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return 0;
+}
+
+static void clk_gate2_disable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ u32 reg;
+ unsigned long flags = 0;
+
+ if (gate->lock)
+ spin_lock_irqsave(gate->lock, flags);
+
+ reg = readl(gate->reg);
+ reg &= ~(3 << gate->bit_idx);
+ writel(reg, gate->reg);
+
+ if (gate->lock)
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int clk_gate2_is_enabled(struct clk_hw *hw)
+{
+ u32 reg;
+ struct clk_gate *gate = to_clk_gate(hw);
+
+ reg = readl(gate->reg);
+
+ if (((reg >> gate->bit_idx) & 3) == 3)
+ return 1;
+
+ return 0;
+}
+
+static struct clk_ops clk_gate2_ops = {
+ .enable = clk_gate2_enable,
+ .disable = clk_gate2_disable,
+ .is_enabled = clk_gate2_is_enabled,
+};
+
+struct clk *clk_register_gate2(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate2_flags, spinlock_t *lock)
+{
+ struct clk_gate *gate;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ /* struct clk_gate assignments */
+ gate->reg = reg;
+ gate->bit_idx = bit_idx;
+ gate->flags = clk_gate2_flags;
+ gate->lock = lock;
+
+ init.name = name;
+ init.ops = &clk_gate2_ops;
+ init.flags = flags;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ gate->hw.init = &init;
+
+ clk = clk_register(dev, &gate->hw);
+ if (IS_ERR(clk))
+ kfree(clk);
+
+ return clk;
+}
diff --git a/arch/arm/mach-imx/clk-imx1.c b/arch/arm/mach-imx/clk-imx1.c
new file mode 100644
index 000000000000..0f0beb580b73
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx1.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+#include "clk.h"
+
+/* CCM register addresses */
+#define IO_ADDR_CCM(off) (MX1_IO_ADDRESS(MX1_CCM_BASE_ADDR + (off)))
+
+#define CCM_CSCR IO_ADDR_CCM(0x0)
+#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
+#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
+#define CCM_PCDR IO_ADDR_CCM(0x20)
+
+/* SCM register addresses */
+#define IO_ADDR_SCM(off) (MX1_IO_ADDRESS(MX1_SCM_BASE_ADDR + (off)))
+
+#define SCM_GCCR IO_ADDR_SCM(0xc)
+
+static const char *prem_sel_clks[] = { "clk32_premult", "clk16m", };
+static const char *clko_sel_clks[] = { "per1", "hclk", "clk48m", "clk16m", "prem",
+ "fclk", };
+enum imx1_clks {
+ dummy, clk32, clk16m_ext, clk16m, clk32_premult, prem, mpll, spll, mcu,
+ fclk, hclk, clk48m, per1, per2, per3, clko, dma_gate, csi_gate,
+ mma_gate, usbd_gate, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx1_clocks_init(unsigned long fref)
+{
+ int i;
+
+ clk[dummy] = imx_clk_fixed("dummy", 0);
+ clk[clk32] = imx_clk_fixed("clk32", fref);
+ clk[clk16m_ext] = imx_clk_fixed("clk16m_ext", 16000000);
+ clk[clk16m] = imx_clk_gate("clk16m", "clk16m_ext", CCM_CSCR, 17);
+ clk[clk32_premult] = imx_clk_fixed_factor("clk32_premult", "clk32", 512, 1);
+ clk[prem] = imx_clk_mux("prem", CCM_CSCR, 16, 1, prem_sel_clks,
+ ARRAY_SIZE(prem_sel_clks));
+ clk[mpll] = imx_clk_pllv1("mpll", "clk32_premult", CCM_MPCTL0);
+ clk[spll] = imx_clk_pllv1("spll", "prem", CCM_SPCTL0);
+ clk[mcu] = imx_clk_divider("mcu", "clk32_premult", CCM_CSCR, 15, 1);
+ clk[fclk] = imx_clk_divider("fclk", "mpll", CCM_CSCR, 15, 1);
+ clk[hclk] = imx_clk_divider("hclk", "spll", CCM_CSCR, 10, 4);
+ clk[clk48m] = imx_clk_divider("clk48m", "spll", CCM_CSCR, 26, 3);
+ clk[per1] = imx_clk_divider("per1", "spll", CCM_PCDR, 0, 4);
+ clk[per2] = imx_clk_divider("per2", "spll", CCM_PCDR, 4, 4);
+ clk[per3] = imx_clk_divider("per3", "spll", CCM_PCDR, 16, 7);
+ clk[clko] = imx_clk_mux("clko", CCM_CSCR, 29, 3, clko_sel_clks,
+ ARRAY_SIZE(clko_sel_clks));
+ clk[dma_gate] = imx_clk_gate("dma_gate", "hclk", SCM_GCCR, 4);
+ clk[csi_gate] = imx_clk_gate("csi_gate", "hclk", SCM_GCCR, 2);
+ clk[mma_gate] = imx_clk_gate("mma_gate", "hclk", SCM_GCCR, 1);
+ clk[usbd_gate] = imx_clk_gate("usbd_gate", "clk48m", SCM_GCCR, 0);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("imx1 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ clk_register_clkdev(clk[dma_gate], "ahb", "imx-dma");
+ clk_register_clkdev(clk[csi_gate], NULL, "mx1-camera.0");
+ clk_register_clkdev(clk[mma_gate], "mma", NULL);
+ clk_register_clkdev(clk[usbd_gate], NULL, "imx_udc.0");
+ clk_register_clkdev(clk[per1], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[hclk], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[per1], "per", "imx1-uart.0");
+ clk_register_clkdev(clk[hclk], "ipg", "imx1-uart.0");
+ clk_register_clkdev(clk[per1], "per", "imx1-uart.1");
+ clk_register_clkdev(clk[hclk], "ipg", "imx1-uart.1");
+ clk_register_clkdev(clk[per1], "per", "imx1-uart.2");
+ clk_register_clkdev(clk[hclk], "ipg", "imx1-uart.2");
+ clk_register_clkdev(clk[hclk], NULL, "imx-i2c.0");
+ clk_register_clkdev(clk[per2], "per", "imx1-cspi.0");
+ clk_register_clkdev(clk[dummy], "ipg", "imx1-cspi.0");
+ clk_register_clkdev(clk[per2], "per", "imx1-cspi.1");
+ clk_register_clkdev(clk[dummy], "ipg", "imx1-cspi.1");
+ clk_register_clkdev(clk[per2], NULL, "imx-mmc.0");
+ clk_register_clkdev(clk[per2], "per", "imx-fb.0");
+ clk_register_clkdev(clk[dummy], "ipg", "imx-fb.0");
+ clk_register_clkdev(clk[dummy], "ahb", "imx-fb.0");
+ clk_register_clkdev(clk[hclk], "mshc", NULL);
+ clk_register_clkdev(clk[per3], "ssi", NULL);
+ clk_register_clkdev(clk[clk32], NULL, "mxc_rtc.0");
+ clk_register_clkdev(clk[clko], "clko", NULL);
+
+ mxc_timer_init(NULL, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR),
+ MX1_TIM1_INT);
+
+ return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx21.c b/arch/arm/mach-imx/clk-imx21.c
new file mode 100644
index 000000000000..4e4f384ee8dd
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx21.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+ * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+#include "clk.h"
+
+#define IO_ADDR_CCM(off) (MX21_IO_ADDRESS(MX21_CCM_BASE_ADDR + (off)))
+
+/* Register offsets */
+#define CCM_CSCR IO_ADDR_CCM(0x0)
+#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
+#define CCM_MPCTL1 IO_ADDR_CCM(0x8)
+#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
+#define CCM_SPCTL1 IO_ADDR_CCM(0x10)
+#define CCM_OSC26MCTL IO_ADDR_CCM(0x14)
+#define CCM_PCDR0 IO_ADDR_CCM(0x18)
+#define CCM_PCDR1 IO_ADDR_CCM(0x1c)
+#define CCM_PCCR0 IO_ADDR_CCM(0x20)
+#define CCM_PCCR1 IO_ADDR_CCM(0x24)
+#define CCM_CCSR IO_ADDR_CCM(0x28)
+#define CCM_PMCTL IO_ADDR_CCM(0x2c)
+#define CCM_PMCOUNT IO_ADDR_CCM(0x30)
+#define CCM_WKGDCTL IO_ADDR_CCM(0x34)
+
+static const char *mpll_sel_clks[] = { "fpm", "ckih", };
+static const char *spll_sel_clks[] = { "fpm", "ckih", };
+
+enum imx21_clks {
+ ckil, ckih, fpm, mpll_sel, spll_sel, mpll, spll, fclk, hclk, ipg, per1,
+ per2, per3, per4, uart1_ipg_gate, uart2_ipg_gate, uart3_ipg_gate,
+ uart4_ipg_gate, gpt1_ipg_gate, gpt2_ipg_gate, gpt3_ipg_gate,
+ pwm_ipg_gate, sdhc1_ipg_gate, sdhc2_ipg_gate, lcdc_ipg_gate,
+ lcdc_hclk_gate, cspi3_ipg_gate, cspi2_ipg_gate, cspi1_ipg_gate,
+ per4_gate, csi_hclk_gate, usb_div, usb_gate, usb_hclk_gate, ssi1_gate,
+ ssi2_gate, nfc_div, nfc_gate, dma_gate, dma_hclk_gate, brom_gate,
+ emma_gate, emma_hclk_gate, slcdc_gate, slcdc_hclk_gate, wdog_gate,
+ gpio_gate, i2c_gate, kpp_gate, owire_gate, rtc_gate, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+/*
+ * must be called very early to get information about the
+ * available clock rate when the timer framework starts
+ */
+int __init mx21_clocks_init(unsigned long lref, unsigned long href)
+{
+ int i;
+
+ clk[ckil] = imx_clk_fixed("ckil", lref);
+ clk[ckih] = imx_clk_fixed("ckih", href);
+ clk[fpm] = imx_clk_fixed_factor("fpm", "ckil", 512, 1);
+ clk[mpll_sel] = imx_clk_mux("mpll_sel", CCM_CSCR, 16, 1, mpll_sel_clks,
+ ARRAY_SIZE(mpll_sel_clks));
+ clk[spll_sel] = imx_clk_mux("spll_sel", CCM_CSCR, 17, 1, spll_sel_clks,
+ ARRAY_SIZE(spll_sel_clks));
+ clk[mpll] = imx_clk_pllv1("mpll", "mpll_sel", CCM_MPCTL0);
+ clk[spll] = imx_clk_pllv1("spll", "spll_sel", CCM_SPCTL0);
+ clk[fclk] = imx_clk_divider("fclk", "mpll", CCM_CSCR, 29, 3);
+ clk[hclk] = imx_clk_divider("hclk", "fclk", CCM_CSCR, 10, 4);
+ clk[ipg] = imx_clk_divider("ipg", "hclk", CCM_CSCR, 9, 1);
+ clk[per1] = imx_clk_divider("per1", "mpll", CCM_PCDR1, 0, 6);
+ clk[per2] = imx_clk_divider("per2", "mpll", CCM_PCDR1, 8, 6);
+ clk[per3] = imx_clk_divider("per3", "mpll", CCM_PCDR1, 16, 6);
+ clk[per4] = imx_clk_divider("per4", "mpll", CCM_PCDR1, 24, 6);
+ clk[uart1_ipg_gate] = imx_clk_gate("uart1_ipg_gate", "ipg", CCM_PCCR0, 0);
+ clk[uart2_ipg_gate] = imx_clk_gate("uart2_ipg_gate", "ipg", CCM_PCCR0, 1);
+ clk[uart3_ipg_gate] = imx_clk_gate("uart3_ipg_gate", "ipg", CCM_PCCR0, 2);
+ clk[uart4_ipg_gate] = imx_clk_gate("uart4_ipg_gate", "ipg", CCM_PCCR0, 3);
+ clk[gpt1_ipg_gate] = imx_clk_gate("gpt1_ipg_gate", "ipg", CCM_PCCR1, 25);
+ clk[gpt2_ipg_gate] = imx_clk_gate("gpt2_ipg_gate", "ipg", CCM_PCCR1, 26);
+ clk[gpt3_ipg_gate] = imx_clk_gate("gpt3_ipg_gate", "ipg", CCM_PCCR1, 27);
+ clk[pwm_ipg_gate] = imx_clk_gate("pwm_ipg_gate", "ipg", CCM_PCCR1, 28);
+ clk[sdhc1_ipg_gate] = imx_clk_gate("sdhc1_ipg_gate", "ipg", CCM_PCCR0, 9);
+ clk[sdhc2_ipg_gate] = imx_clk_gate("sdhc2_ipg_gate", "ipg", CCM_PCCR0, 10);
+ clk[lcdc_ipg_gate] = imx_clk_gate("lcdc_ipg_gate", "ipg", CCM_PCCR0, 18);
+ clk[lcdc_hclk_gate] = imx_clk_gate("lcdc_hclk_gate", "hclk", CCM_PCCR0, 26);
+ clk[cspi3_ipg_gate] = imx_clk_gate("cspi3_ipg_gate", "ipg", CCM_PCCR1, 23);
+ clk[cspi2_ipg_gate] = imx_clk_gate("cspi2_ipg_gate", "ipg", CCM_PCCR0, 5);
+ clk[cspi1_ipg_gate] = imx_clk_gate("cspi1_ipg_gate", "ipg", CCM_PCCR0, 4);
+ clk[per4_gate] = imx_clk_gate("per4_gate", "per4", CCM_PCCR0, 22);
+ clk[csi_hclk_gate] = imx_clk_gate("csi_hclk_gate", "hclk", CCM_PCCR0, 31);
+ clk[usb_div] = imx_clk_divider("usb_div", "spll", CCM_CSCR, 26, 3);
+ clk[usb_gate] = imx_clk_gate("usb_gate", "usb_div", CCM_PCCR0, 14);
+ clk[usb_hclk_gate] = imx_clk_gate("usb_hclk_gate", "hclk", CCM_PCCR0, 24);
+ clk[ssi1_gate] = imx_clk_gate("ssi1_gate", "ipg", CCM_PCCR0, 6);
+ clk[ssi2_gate] = imx_clk_gate("ssi2_gate", "ipg", CCM_PCCR0, 7);
+ clk[nfc_div] = imx_clk_divider("nfc_div", "ipg", CCM_PCDR0, 12, 4);
+ clk[nfc_gate] = imx_clk_gate("nfc_gate", "nfc_div", CCM_PCCR0, 19);
+ clk[dma_gate] = imx_clk_gate("dma_gate", "ipg", CCM_PCCR0, 13);
+ clk[dma_hclk_gate] = imx_clk_gate("dma_hclk_gate", "hclk", CCM_PCCR0, 30);
+ clk[brom_gate] = imx_clk_gate("brom_gate", "hclk", CCM_PCCR0, 28);
+ clk[emma_gate] = imx_clk_gate("emma_gate", "ipg", CCM_PCCR0, 15);
+ clk[emma_hclk_gate] = imx_clk_gate("emma_hclk_gate", "hclk", CCM_PCCR0, 27);
+ clk[slcdc_gate] = imx_clk_gate("slcdc_gate", "ipg", CCM_PCCR0, 25);
+ clk[slcdc_hclk_gate] = imx_clk_gate("slcdc_hclk_gate", "hclk", CCM_PCCR0, 21);
+ clk[wdog_gate] = imx_clk_gate("wdog_gate", "ipg", CCM_PCCR1, 24);
+ clk[gpio_gate] = imx_clk_gate("gpio_gate", "ipg", CCM_PCCR0, 11);
+ clk[i2c_gate] = imx_clk_gate("i2c_gate", "ipg", CCM_PCCR0, 12);
+ clk[kpp_gate] = imx_clk_gate("kpp_gate", "ipg", CCM_PCCR1, 30);
+ clk[owire_gate] = imx_clk_gate("owire_gate", "ipg", CCM_PCCR1, 31);
+ clk[rtc_gate] = imx_clk_gate("rtc_gate", "ipg", CCM_PCCR1, 29);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX21 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ clk_register_clkdev(clk[per1], "per1", NULL);
+ clk_register_clkdev(clk[per2], "per2", NULL);
+ clk_register_clkdev(clk[per3], "per3", NULL);
+ clk_register_clkdev(clk[per4], "per4", NULL);
+ clk_register_clkdev(clk[per1], "per", "imx21-uart.0");
+ clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
+ clk_register_clkdev(clk[per1], "per", "imx21-uart.1");
+ clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
+ clk_register_clkdev(clk[per1], "per", "imx21-uart.2");
+ clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
+ clk_register_clkdev(clk[per1], "per", "imx21-uart.3");
+ clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
+ clk_register_clkdev(clk[gpt1_ipg_gate], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[per1], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[gpt2_ipg_gate], "ipg", "imx-gpt.1");
+ clk_register_clkdev(clk[per1], "per", "imx-gpt.1");
+ clk_register_clkdev(clk[gpt3_ipg_gate], "ipg", "imx-gpt.2");
+ clk_register_clkdev(clk[per1], "per", "imx-gpt.2");
+ clk_register_clkdev(clk[pwm_ipg_gate], "pwm", "mxc_pwm.0");
+ clk_register_clkdev(clk[per2], "per", "imx21-cspi.0");
+ clk_register_clkdev(clk[cspi1_ipg_gate], "ipg", "imx21-cspi.0");
+ clk_register_clkdev(clk[per2], "per", "imx21-cspi.1");
+ clk_register_clkdev(clk[cspi2_ipg_gate], "ipg", "imx21-cspi.1");
+ clk_register_clkdev(clk[per2], "per", "imx21-cspi.2");
+ clk_register_clkdev(clk[cspi3_ipg_gate], "ipg", "imx21-cspi.2");
+ clk_register_clkdev(clk[per3], "per", "imx-fb.0");
+ clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0");
+ clk_register_clkdev(clk[lcdc_hclk_gate], "ahb", "imx-fb.0");
+ clk_register_clkdev(clk[usb_gate], "per", "imx21-hcd.0");
+ clk_register_clkdev(clk[usb_hclk_gate], "ahb", "imx21-hcd.0");
+ clk_register_clkdev(clk[nfc_gate], NULL, "mxc_nand.0");
+ clk_register_clkdev(clk[dma_hclk_gate], "ahb", "imx-dma");
+ clk_register_clkdev(clk[dma_gate], "ipg", "imx-dma");
+ clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
+ clk_register_clkdev(clk[i2c_gate], NULL, "imx-i2c.0");
+ clk_register_clkdev(clk[kpp_gate], NULL, "mxc-keypad");
+ clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1.0");
+ clk_register_clkdev(clk[brom_gate], "brom", NULL);
+ clk_register_clkdev(clk[emma_gate], "emma", NULL);
+ clk_register_clkdev(clk[slcdc_gate], "slcdc", NULL);
+ clk_register_clkdev(clk[gpio_gate], "gpio", NULL);
+ clk_register_clkdev(clk[rtc_gate], "rtc", NULL);
+ clk_register_clkdev(clk[csi_hclk_gate], "csi", NULL);
+ clk_register_clkdev(clk[ssi1_gate], "ssi1", NULL);
+ clk_register_clkdev(clk[ssi2_gate], "ssi2", NULL);
+ clk_register_clkdev(clk[sdhc1_ipg_gate], "sdhc1", NULL);
+ clk_register_clkdev(clk[sdhc2_ipg_gate], "sdhc2", NULL);
+
+ mxc_timer_init(NULL, MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR),
+ MX21_INT_GPT1);
+ return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c
new file mode 100644
index 000000000000..d9833bb5fd61
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx25.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2009 by Sascha Hauer, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+#include <mach/mx25.h>
+#include "clk.h"
+
+#define CRM_BASE MX25_IO_ADDRESS(MX25_CRM_BASE_ADDR)
+
+#define CCM_MPCTL 0x00
+#define CCM_UPCTL 0x04
+#define CCM_CCTL 0x08
+#define CCM_CGCR0 0x0C
+#define CCM_CGCR1 0x10
+#define CCM_CGCR2 0x14
+#define CCM_PCDR0 0x18
+#define CCM_PCDR1 0x1C
+#define CCM_PCDR2 0x20
+#define CCM_PCDR3 0x24
+#define CCM_RCSR 0x28
+#define CCM_CRDR 0x2C
+#define CCM_DCVR0 0x30
+#define CCM_DCVR1 0x34
+#define CCM_DCVR2 0x38
+#define CCM_DCVR3 0x3c
+#define CCM_LTR0 0x40
+#define CCM_LTR1 0x44
+#define CCM_LTR2 0x48
+#define CCM_LTR3 0x4c
+#define CCM_MCR 0x64
+
+#define ccm(x) (CRM_BASE + (x))
+
+static const char *cpu_sel_clks[] = { "mpll", "mpll_cpu_3_4", };
+static const char *per_sel_clks[] = { "ahb", "upll", };
+
+enum mx25_clks {
+ dummy, osc, mpll, upll, mpll_cpu_3_4, cpu_sel, cpu, ahb, usb_div, ipg,
+ per0_sel, per1_sel, per2_sel, per3_sel, per4_sel, per5_sel, per6_sel,
+ per7_sel, per8_sel, per9_sel, per10_sel, per11_sel, per12_sel,
+ per13_sel, per14_sel, per15_sel, per0, per1, per2, per3, per4, per5,
+ per6, per7, per8, per9, per10, per11, per12, per13, per14, per15,
+ csi_ipg_per, esdhc1_ipg_per, esdhc2_ipg_per, gpt_ipg_per, i2c_ipg_per,
+ lcdc_ipg_per, nfc_ipg_per, ssi1_ipg_per, ssi2_ipg_per, uart_ipg_per,
+ csi_ahb, esdhc1_ahb, esdhc2_ahb, fec_ahb, lcdc_ahb, sdma_ahb,
+ usbotg_ahb, can1_ipg, can2_ipg, csi_ipg, cspi1_ipg, cspi2_ipg,
+ cspi3_ipg, dryice_ipg, esdhc1_ipg, esdhc2_ipg, fec_ipg, iim_ipg,
+ kpp_ipg, lcdc_ipg, pwm1_ipg, pwm2_ipg, pwm3_ipg, pwm4_ipg, sdma_ipg,
+ ssi1_ipg, ssi2_ipg, tsc_ipg, uart1_ipg, uart2_ipg, uart3_ipg,
+ uart4_ipg, uart5_ipg, wdt_ipg, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx25_clocks_init(void)
+{
+ int i;
+
+ clk[dummy] = imx_clk_fixed("dummy", 0);
+ clk[osc] = imx_clk_fixed("osc", 24000000);
+ clk[mpll] = imx_clk_pllv1("mpll", "osc", ccm(CCM_MPCTL));
+ clk[upll] = imx_clk_pllv1("upll", "osc", ccm(CCM_UPCTL));
+ clk[mpll_cpu_3_4] = imx_clk_fixed_factor("mpll_cpu_3_4", "mpll", 3, 4);
+ clk[cpu_sel] = imx_clk_mux("cpu_sel", ccm(CCM_CCTL), 14, 1, cpu_sel_clks, ARRAY_SIZE(cpu_sel_clks));
+ clk[cpu] = imx_clk_divider("cpu", "cpu_sel", ccm(CCM_CCTL), 30, 2);
+ clk[ahb] = imx_clk_divider("ahb", "cpu", ccm(CCM_CCTL), 28, 2);
+ clk[usb_div] = imx_clk_divider("usb_div", "upll", ccm(CCM_CCTL), 16, 6);
+ clk[ipg] = imx_clk_fixed_factor("ipg", "ahb", 1, 2);
+ clk[per0_sel] = imx_clk_mux("per0_sel", ccm(CCM_MCR), 0, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per1_sel] = imx_clk_mux("per1_sel", ccm(CCM_MCR), 1, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per2_sel] = imx_clk_mux("per2_sel", ccm(CCM_MCR), 2, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per3_sel] = imx_clk_mux("per3_sel", ccm(CCM_MCR), 3, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per4_sel] = imx_clk_mux("per4_sel", ccm(CCM_MCR), 4, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per5_sel] = imx_clk_mux("per5_sel", ccm(CCM_MCR), 5, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per6_sel] = imx_clk_mux("per6_sel", ccm(CCM_MCR), 6, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per7_sel] = imx_clk_mux("per7_sel", ccm(CCM_MCR), 7, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per8_sel] = imx_clk_mux("per8_sel", ccm(CCM_MCR), 8, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per9_sel] = imx_clk_mux("per9_sel", ccm(CCM_MCR), 9, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per10_sel] = imx_clk_mux("per10_sel", ccm(CCM_MCR), 10, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per11_sel] = imx_clk_mux("per11_sel", ccm(CCM_MCR), 11, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per12_sel] = imx_clk_mux("per12_sel", ccm(CCM_MCR), 12, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per13_sel] = imx_clk_mux("per13_sel", ccm(CCM_MCR), 13, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per14_sel] = imx_clk_mux("per14_sel", ccm(CCM_MCR), 14, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per15_sel] = imx_clk_mux("per15_sel", ccm(CCM_MCR), 15, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per0] = imx_clk_divider("per0", "per0_sel", ccm(CCM_PCDR0), 0, 6);
+ clk[per1] = imx_clk_divider("per1", "per1_sel", ccm(CCM_PCDR0), 8, 6);
+ clk[per2] = imx_clk_divider("per2", "per2_sel", ccm(CCM_PCDR0), 16, 6);
+ clk[per3] = imx_clk_divider("per3", "per3_sel", ccm(CCM_PCDR0), 24, 6);
+ clk[per4] = imx_clk_divider("per4", "per4_sel", ccm(CCM_PCDR1), 0, 6);
+ clk[per5] = imx_clk_divider("per5", "per5_sel", ccm(CCM_PCDR1), 8, 6);
+ clk[per6] = imx_clk_divider("per6", "per6_sel", ccm(CCM_PCDR1), 16, 6);
+ clk[per7] = imx_clk_divider("per7", "per7_sel", ccm(CCM_PCDR1), 24, 6);
+ clk[per8] = imx_clk_divider("per8", "per8_sel", ccm(CCM_PCDR2), 0, 6);
+ clk[per9] = imx_clk_divider("per9", "per9_sel", ccm(CCM_PCDR2), 8, 6);
+ clk[per10] = imx_clk_divider("per10", "per10_sel", ccm(CCM_PCDR2), 16, 6);
+ clk[per11] = imx_clk_divider("per11", "per11_sel", ccm(CCM_PCDR2), 24, 6);
+ clk[per12] = imx_clk_divider("per12", "per12_sel", ccm(CCM_PCDR3), 0, 6);
+ clk[per13] = imx_clk_divider("per13", "per13_sel", ccm(CCM_PCDR3), 8, 6);
+ clk[per14] = imx_clk_divider("per14", "per14_sel", ccm(CCM_PCDR3), 16, 6);
+ clk[per15] = imx_clk_divider("per15", "per15_sel", ccm(CCM_PCDR3), 24, 6);
+ clk[csi_ipg_per] = imx_clk_gate("csi_ipg_per", "per0", ccm(CCM_CGCR0), 0);
+ clk[esdhc1_ipg_per] = imx_clk_gate("esdhc1_ipg_per", "per3", ccm(CCM_CGCR0), 3);
+ clk[esdhc2_ipg_per] = imx_clk_gate("esdhc2_ipg_per", "per4", ccm(CCM_CGCR0), 4);
+ clk[gpt_ipg_per] = imx_clk_gate("gpt_ipg_per", "per5", ccm(CCM_CGCR0), 5);
+ clk[i2c_ipg_per] = imx_clk_gate("i2c_ipg_per", "per6", ccm(CCM_CGCR0), 6);
+ clk[lcdc_ipg_per] = imx_clk_gate("lcdc_ipg_per", "per8", ccm(CCM_CGCR0), 7);
+ clk[nfc_ipg_per] = imx_clk_gate("nfc_ipg_per", "ipg_per", ccm(CCM_CGCR0), 8);
+ clk[ssi1_ipg_per] = imx_clk_gate("ssi1_ipg_per", "per13", ccm(CCM_CGCR0), 13);
+ clk[ssi2_ipg_per] = imx_clk_gate("ssi2_ipg_per", "per14", ccm(CCM_CGCR0), 14);
+ clk[uart_ipg_per] = imx_clk_gate("uart_ipg_per", "per15", ccm(CCM_CGCR0), 15);
+ clk[csi_ahb] = imx_clk_gate("csi_ahb", "ahb", ccm(CCM_CGCR0), 18);
+ clk[esdhc1_ahb] = imx_clk_gate("esdhc1_ahb", "ahb", ccm(CCM_CGCR0), 21);
+ clk[esdhc2_ahb] = imx_clk_gate("esdhc2_ahb", "ahb", ccm(CCM_CGCR0), 22);
+ clk[fec_ahb] = imx_clk_gate("fec_ahb", "ahb", ccm(CCM_CGCR0), 23);
+ clk[lcdc_ahb] = imx_clk_gate("lcdc_ahb", "ahb", ccm(CCM_CGCR0), 24);
+ clk[sdma_ahb] = imx_clk_gate("sdma_ahb", "ahb", ccm(CCM_CGCR0), 26);
+ clk[usbotg_ahb] = imx_clk_gate("usbotg_ahb", "ahb", ccm(CCM_CGCR0), 28);
+ clk[can1_ipg] = imx_clk_gate("can1_ipg", "ipg", ccm(CCM_CGCR1), 2);
+ clk[can2_ipg] = imx_clk_gate("can2_ipg", "ipg", ccm(CCM_CGCR1), 3);
+ clk[csi_ipg] = imx_clk_gate("csi_ipg", "ipg", ccm(CCM_CGCR1), 4);
+ clk[cspi1_ipg] = imx_clk_gate("cspi1_ipg", "ipg", ccm(CCM_CGCR1), 5);
+ clk[cspi2_ipg] = imx_clk_gate("cspi2_ipg", "ipg", ccm(CCM_CGCR1), 6);
+ clk[cspi3_ipg] = imx_clk_gate("cspi3_ipg", "ipg", ccm(CCM_CGCR1), 7);
+ clk[dryice_ipg] = imx_clk_gate("dryice_ipg", "ipg", ccm(CCM_CGCR1), 8);
+ clk[esdhc1_ipg] = imx_clk_gate("esdhc1_ipg", "ipg", ccm(CCM_CGCR1), 13);
+ clk[esdhc2_ipg] = imx_clk_gate("esdhc2_ipg", "ipg", ccm(CCM_CGCR1), 14);
+ clk[fec_ipg] = imx_clk_gate("fec_ipg", "ipg", ccm(CCM_CGCR1), 15);
+ clk[iim_ipg] = imx_clk_gate("iim_ipg", "ipg", ccm(CCM_CGCR1), 26);
+ clk[kpp_ipg] = imx_clk_gate("kpp_ipg", "ipg", ccm(CCM_CGCR1), 28);
+ clk[lcdc_ipg] = imx_clk_gate("lcdc_ipg", "ipg", ccm(CCM_CGCR1), 29);
+ clk[pwm1_ipg] = imx_clk_gate("pwm1_ipg", "ipg", ccm(CCM_CGCR1), 31);
+ clk[pwm2_ipg] = imx_clk_gate("pwm2_ipg", "ipg", ccm(CCM_CGCR2), 0);
+ clk[pwm3_ipg] = imx_clk_gate("pwm3_ipg", "ipg", ccm(CCM_CGCR2), 1);
+ clk[pwm4_ipg] = imx_clk_gate("pwm4_ipg", "ipg", ccm(CCM_CGCR2), 2);
+ clk[sdma_ipg] = imx_clk_gate("sdma_ipg", "ipg", ccm(CCM_CGCR2), 6);
+ clk[ssi1_ipg] = imx_clk_gate("ssi1_ipg", "ipg", ccm(CCM_CGCR2), 11);
+ clk[ssi2_ipg] = imx_clk_gate("ssi2_ipg", "ipg", ccm(CCM_CGCR2), 12);
+ clk[tsc_ipg] = imx_clk_gate("tsc_ipg", "ipg", ccm(CCM_CGCR2), 13);
+ clk[uart1_ipg] = imx_clk_gate("uart1_ipg", "ipg", ccm(CCM_CGCR2), 14);
+ clk[uart2_ipg] = imx_clk_gate("uart2_ipg", "ipg", ccm(CCM_CGCR2), 15);
+ clk[uart3_ipg] = imx_clk_gate("uart3_ipg", "ipg", ccm(CCM_CGCR2), 16);
+ clk[uart4_ipg] = imx_clk_gate("uart4_ipg", "ipg", ccm(CCM_CGCR2), 17);
+ clk[uart5_ipg] = imx_clk_gate("uart5_ipg", "ipg", ccm(CCM_CGCR2), 18);
+ clk[wdt_ipg] = imx_clk_gate("wdt_ipg", "ipg", ccm(CCM_CGCR2), 19);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX25 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ /* i.mx25 has the i.mx21 type uart */
+ clk_register_clkdev(clk[uart1_ipg], "ipg", "imx21-uart.0");
+ clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.0");
+ clk_register_clkdev(clk[uart2_ipg], "ipg", "imx21-uart.1");
+ clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.1");
+ clk_register_clkdev(clk[uart3_ipg], "ipg", "imx21-uart.2");
+ clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.2");
+ clk_register_clkdev(clk[uart4_ipg], "ipg", "imx21-uart.3");
+ clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.3");
+ clk_register_clkdev(clk[uart5_ipg], "ipg", "imx21-uart.4");
+ clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.4");
+ clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
+ clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.0");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
+ clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.1");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
+ clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.2");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
+ clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usbotg_ahb], "ahb", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
+ clk_register_clkdev(clk[nfc_ipg_per], NULL, "mxc_nand.0");
+ /* i.mx25 has the i.mx35 type cspi */
+ clk_register_clkdev(clk[cspi1_ipg], NULL, "imx35-cspi.0");
+ clk_register_clkdev(clk[cspi2_ipg], NULL, "imx35-cspi.1");
+ clk_register_clkdev(clk[cspi3_ipg], NULL, "imx35-cspi.2");
+ clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.0");
+ clk_register_clkdev(clk[per10], "per", "mxc_pwm.0");
+ clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.1");
+ clk_register_clkdev(clk[per10], "per", "mxc_pwm.1");
+ clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.2");
+ clk_register_clkdev(clk[per10], "per", "mxc_pwm.2");
+ clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.3");
+ clk_register_clkdev(clk[per10], "per", "mxc_pwm.3");
+ clk_register_clkdev(clk[kpp_ipg], NULL, "imx-keypad");
+ clk_register_clkdev(clk[tsc_ipg], NULL, "mx25-adc");
+ clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx-i2c.0");
+ clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx-i2c.1");
+ clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx-i2c.2");
+ clk_register_clkdev(clk[fec_ipg], "ipg", "imx25-fec.0");
+ clk_register_clkdev(clk[fec_ahb], "ahb", "imx25-fec.0");
+ clk_register_clkdev(clk[dryice_ipg], NULL, "imxdi_rtc.0");
+ clk_register_clkdev(clk[lcdc_ipg_per], "per", "imx-fb.0");
+ clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");
+ clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");
+ clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
+ clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0");
+ clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0");
+ clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1");
+ clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1");
+ clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
+ clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
+ clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");
+ clk_register_clkdev(clk[esdhc2_ipg_per], "per", "sdhci-esdhc-imx25.1");
+ clk_register_clkdev(clk[esdhc2_ipg], "ipg", "sdhci-esdhc-imx25.1");
+ clk_register_clkdev(clk[esdhc2_ahb], "ahb", "sdhci-esdhc-imx25.1");
+ clk_register_clkdev(clk[csi_ipg_per], "per", "mx2-camera.0");
+ clk_register_clkdev(clk[csi_ipg], "ipg", "mx2-camera.0");
+ clk_register_clkdev(clk[csi_ahb], "ahb", "mx2-camera.0");
+ clk_register_clkdev(clk[dummy], "audmux", NULL);
+ clk_register_clkdev(clk[can1_ipg], NULL, "flexcan.0");
+ clk_register_clkdev(clk[can2_ipg], NULL, "flexcan.1");
+ /* i.mx25 has the i.mx35 type sdma */
+ clk_register_clkdev(clk[sdma_ipg], "ipg", "imx35-sdma");
+ clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma");
+ clk_register_clkdev(clk[iim_ipg], "iim", NULL);
+
+ mxc_timer_init(NULL, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
+ return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c
new file mode 100644
index 000000000000..50a7ebd8d1b2
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx27.c
@@ -0,0 +1,290 @@
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+
+#include <mach/common.h>
+#include <mach/hardware.h>
+#include "clk.h"
+
+#define IO_ADDR_CCM(off) (MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR + (off)))
+
+/* Register offsets */
+#define CCM_CSCR IO_ADDR_CCM(0x0)
+#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
+#define CCM_MPCTL1 IO_ADDR_CCM(0x8)
+#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
+#define CCM_SPCTL1 IO_ADDR_CCM(0x10)
+#define CCM_OSC26MCTL IO_ADDR_CCM(0x14)
+#define CCM_PCDR0 IO_ADDR_CCM(0x18)
+#define CCM_PCDR1 IO_ADDR_CCM(0x1c)
+#define CCM_PCCR0 IO_ADDR_CCM(0x20)
+#define CCM_PCCR1 IO_ADDR_CCM(0x24)
+#define CCM_CCSR IO_ADDR_CCM(0x28)
+#define CCM_PMCTL IO_ADDR_CCM(0x2c)
+#define CCM_PMCOUNT IO_ADDR_CCM(0x30)
+#define CCM_WKGDCTL IO_ADDR_CCM(0x34)
+
+#define CCM_CSCR_UPDATE_DIS (1 << 31)
+#define CCM_CSCR_SSI2 (1 << 23)
+#define CCM_CSCR_SSI1 (1 << 22)
+#define CCM_CSCR_VPU (1 << 21)
+#define CCM_CSCR_MSHC (1 << 20)
+#define CCM_CSCR_SPLLRES (1 << 19)
+#define CCM_CSCR_MPLLRES (1 << 18)
+#define CCM_CSCR_SP (1 << 17)
+#define CCM_CSCR_MCU (1 << 16)
+#define CCM_CSCR_OSC26MDIV (1 << 4)
+#define CCM_CSCR_OSC26M (1 << 3)
+#define CCM_CSCR_FPM (1 << 2)
+#define CCM_CSCR_SPEN (1 << 1)
+#define CCM_CSCR_MPEN (1 << 0)
+
+/* i.MX27 TO 2+ */
+#define CCM_CSCR_ARM_SRC (1 << 15)
+
+#define CCM_SPCTL1_LF (1 << 15)
+#define CCM_SPCTL1_BRMO (1 << 6)
+
+static const char *vpu_sel_clks[] = { "spll", "mpll_main2", };
+static const char *cpu_sel_clks[] = { "mpll_main2", "mpll", };
+static const char *clko_sel_clks[] = {
+ "ckil", "prem", "ckih", "ckih",
+ "ckih", "mpll", "spll", "cpu_div",
+ "ahb", "ipg", "per1_div", "per2_div",
+ "per3_div", "per4_div", "ssi1_div", "ssi2_div",
+ "nfc_div", "mshc_div", "vpu_div", "60m",
+ "32k", "usb_div", "dptc",
+};
+
+static const char *ssi_sel_clks[] = { "spll", "mpll", };
+
+enum mx27_clks {
+ dummy, ckih, ckil, mpll, spll, mpll_main2, ahb, ipg, nfc_div, per1_div,
+ per2_div, per3_div, per4_div, vpu_sel, vpu_div, usb_div, cpu_sel,
+ clko_sel, cpu_div, clko_div, ssi1_sel, ssi2_sel, ssi1_div, ssi2_div,
+ clko_en, ssi2_ipg_gate, ssi1_ipg_gate, slcdc_ipg_gate, sdhc3_ipg_gate,
+ sdhc2_ipg_gate, sdhc1_ipg_gate, scc_ipg_gate, sahara_ipg_gate,
+ rtc_ipg_gate, pwm_ipg_gate, owire_ipg_gate, lcdc_ipg_gate,
+ kpp_ipg_gate, iim_ipg_gate, i2c2_ipg_gate, i2c1_ipg_gate,
+ gpt6_ipg_gate, gpt5_ipg_gate, gpt4_ipg_gate, gpt3_ipg_gate,
+ gpt2_ipg_gate, gpt1_ipg_gate, gpio_ipg_gate, fec_ipg_gate,
+ emma_ipg_gate, dma_ipg_gate, cspi3_ipg_gate, cspi2_ipg_gate,
+ cspi1_ipg_gate, nfc_baud_gate, ssi2_baud_gate, ssi1_baud_gate,
+ vpu_baud_gate, per4_gate, per3_gate, per2_gate, per1_gate,
+ usb_ahb_gate, slcdc_ahb_gate, sahara_ahb_gate, lcdc_ahb_gate,
+ vpu_ahb_gate, fec_ahb_gate, emma_ahb_gate, emi_ahb_gate, dma_ahb_gate,
+ csi_ahb_gate, brom_ahb_gate, ata_ahb_gate, wdog_ipg_gate, usb_ipg_gate,
+ uart6_ipg_gate, uart5_ipg_gate, uart4_ipg_gate, uart3_ipg_gate,
+ uart2_ipg_gate, uart1_ipg_gate, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx27_clocks_init(unsigned long fref)
+{
+ int i;
+
+ clk[dummy] = imx_clk_fixed("dummy", 0);
+ clk[ckih] = imx_clk_fixed("ckih", fref);
+ clk[ckil] = imx_clk_fixed("ckil", 32768);
+ clk[mpll] = imx_clk_pllv1("mpll", "ckih", CCM_MPCTL0);
+ clk[spll] = imx_clk_pllv1("spll", "ckih", CCM_SPCTL0);
+ clk[mpll_main2] = imx_clk_fixed_factor("mpll_main2", "mpll", 2, 3);
+
+ if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
+ clk[ahb] = imx_clk_divider("ahb", "mpll_main2", CCM_CSCR, 8, 2);
+ clk[ipg] = imx_clk_fixed_factor("ipg", "ahb", 1, 2);
+ } else {
+ clk[ahb] = imx_clk_divider("ahb", "mpll_main2", CCM_CSCR, 9, 4);
+ clk[ipg] = imx_clk_divider("ipg", "ahb", CCM_CSCR, 8, 1);
+ }
+
+ clk[nfc_div] = imx_clk_divider("nfc_div", "ahb", CCM_PCDR0, 6, 4);
+ clk[per1_div] = imx_clk_divider("per1_div", "mpll_main2", CCM_PCDR1, 0, 6);
+ clk[per2_div] = imx_clk_divider("per2_div", "mpll_main2", CCM_PCDR1, 8, 6);
+ clk[per3_div] = imx_clk_divider("per3_div", "mpll_main2", CCM_PCDR1, 16, 6);
+ clk[per4_div] = imx_clk_divider("per4_div", "mpll_main2", CCM_PCDR1, 24, 6);
+ clk[vpu_sel] = imx_clk_mux("vpu_sel", CCM_CSCR, 21, 1, vpu_sel_clks, ARRAY_SIZE(vpu_sel_clks));
+ clk[vpu_div] = imx_clk_divider("vpu_div", "vpu_sel", CCM_PCDR0, 10, 3);
+ clk[usb_div] = imx_clk_divider("usb_div", "spll", CCM_CSCR, 28, 3);
+ clk[cpu_sel] = imx_clk_mux("cpu_sel", CCM_CSCR, 15, 1, cpu_sel_clks, ARRAY_SIZE(cpu_sel_clks));
+ clk[clko_sel] = imx_clk_mux("clko_sel", CCM_CCSR, 0, 5, clko_sel_clks, ARRAY_SIZE(clko_sel_clks));
+ if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
+ clk[cpu_div] = imx_clk_divider("cpu_div", "cpu_sel", CCM_CSCR, 12, 2);
+ else
+ clk[cpu_div] = imx_clk_divider("cpu_div", "cpu_sel", CCM_CSCR, 13, 3);
+ clk[clko_div] = imx_clk_divider("clko_div", "clko_sel", CCM_PCDR0, 22, 3);
+ clk[ssi1_sel] = imx_clk_mux("ssi1_sel", CCM_CSCR, 22, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks));
+ clk[ssi2_sel] = imx_clk_mux("ssi2_sel", CCM_CSCR, 23, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks));
+ clk[ssi1_div] = imx_clk_divider("ssi1_div", "ssi1_sel", CCM_PCDR0, 16, 6);
+ clk[ssi2_div] = imx_clk_divider("ssi2_div", "ssi2_sel", CCM_PCDR0, 26, 3);
+ clk[clko_en] = imx_clk_gate("clko_en", "clko_div", CCM_PCCR0, 0);
+ clk[ssi2_ipg_gate] = imx_clk_gate("ssi2_ipg_gate", "ipg", CCM_PCCR0, 0);
+ clk[ssi1_ipg_gate] = imx_clk_gate("ssi1_ipg_gate", "ipg", CCM_PCCR0, 1);
+ clk[slcdc_ipg_gate] = imx_clk_gate("slcdc_ipg_gate", "ipg", CCM_PCCR0, 2);
+ clk[sdhc3_ipg_gate] = imx_clk_gate("sdhc3_ipg_gate", "ipg", CCM_PCCR0, 3);
+ clk[sdhc2_ipg_gate] = imx_clk_gate("sdhc2_ipg_gate", "ipg", CCM_PCCR0, 4);
+ clk[sdhc1_ipg_gate] = imx_clk_gate("sdhc1_ipg_gate", "ipg", CCM_PCCR0, 5);
+ clk[scc_ipg_gate] = imx_clk_gate("scc_ipg_gate", "ipg", CCM_PCCR0, 6);
+ clk[sahara_ipg_gate] = imx_clk_gate("sahara_ipg_gate", "ipg", CCM_PCCR0, 7);
+ clk[rtc_ipg_gate] = imx_clk_gate("rtc_ipg_gate", "ipg", CCM_PCCR0, 9);
+ clk[pwm_ipg_gate] = imx_clk_gate("pwm_ipg_gate", "ipg", CCM_PCCR0, 11);
+ clk[owire_ipg_gate] = imx_clk_gate("owire_ipg_gate", "ipg", CCM_PCCR0, 12);
+ clk[lcdc_ipg_gate] = imx_clk_gate("lcdc_ipg_gate", "ipg", CCM_PCCR0, 14);
+ clk[kpp_ipg_gate] = imx_clk_gate("kpp_ipg_gate", "ipg", CCM_PCCR0, 15);
+ clk[iim_ipg_gate] = imx_clk_gate("iim_ipg_gate", "ipg", CCM_PCCR0, 16);
+ clk[i2c2_ipg_gate] = imx_clk_gate("i2c2_ipg_gate", "ipg", CCM_PCCR0, 17);
+ clk[i2c1_ipg_gate] = imx_clk_gate("i2c1_ipg_gate", "ipg", CCM_PCCR0, 18);
+ clk[gpt6_ipg_gate] = imx_clk_gate("gpt6_ipg_gate", "ipg", CCM_PCCR0, 19);
+ clk[gpt5_ipg_gate] = imx_clk_gate("gpt5_ipg_gate", "ipg", CCM_PCCR0, 20);
+ clk[gpt4_ipg_gate] = imx_clk_gate("gpt4_ipg_gate", "ipg", CCM_PCCR0, 21);
+ clk[gpt3_ipg_gate] = imx_clk_gate("gpt3_ipg_gate", "ipg", CCM_PCCR0, 22);
+ clk[gpt2_ipg_gate] = imx_clk_gate("gpt2_ipg_gate", "ipg", CCM_PCCR0, 23);
+ clk[gpt1_ipg_gate] = imx_clk_gate("gpt1_ipg_gate", "ipg", CCM_PCCR0, 24);
+ clk[gpio_ipg_gate] = imx_clk_gate("gpio_ipg_gate", "ipg", CCM_PCCR0, 25);
+ clk[fec_ipg_gate] = imx_clk_gate("fec_ipg_gate", "ipg", CCM_PCCR0, 26);
+ clk[emma_ipg_gate] = imx_clk_gate("emma_ipg_gate", "ipg", CCM_PCCR0, 27);
+ clk[dma_ipg_gate] = imx_clk_gate("dma_ipg_gate", "ipg", CCM_PCCR0, 28);
+ clk[cspi3_ipg_gate] = imx_clk_gate("cspi3_ipg_gate", "ipg", CCM_PCCR0, 29);
+ clk[cspi2_ipg_gate] = imx_clk_gate("cspi2_ipg_gate", "ipg", CCM_PCCR0, 30);
+ clk[cspi1_ipg_gate] = imx_clk_gate("cspi1_ipg_gate", "ipg", CCM_PCCR0, 31);
+ clk[nfc_baud_gate] = imx_clk_gate("nfc_baud_gate", "nfc_div", CCM_PCCR1, 3);
+ clk[ssi2_baud_gate] = imx_clk_gate("ssi2_baud_gate", "ssi2_div", CCM_PCCR1, 4);
+ clk[ssi1_baud_gate] = imx_clk_gate("ssi1_baud_gate", "ssi1_div", CCM_PCCR1, 5);
+ clk[vpu_baud_gate] = imx_clk_gate("vpu_baud_gate", "vpu_div", CCM_PCCR1, 6);
+ clk[per4_gate] = imx_clk_gate("per4_gate", "per4_div", CCM_PCCR1, 7);
+ clk[per3_gate] = imx_clk_gate("per3_gate", "per3_div", CCM_PCCR1, 8);
+ clk[per2_gate] = imx_clk_gate("per2_gate", "per2_div", CCM_PCCR1, 9);
+ clk[per1_gate] = imx_clk_gate("per1_gate", "per1_div", CCM_PCCR1, 10);
+ clk[usb_ahb_gate] = imx_clk_gate("usb_ahb_gate", "ahb", CCM_PCCR1, 11);
+ clk[slcdc_ahb_gate] = imx_clk_gate("slcdc_ahb_gate", "ahb", CCM_PCCR1, 12);
+ clk[sahara_ahb_gate] = imx_clk_gate("sahara_ahb_gate", "ahb", CCM_PCCR1, 13);
+ clk[lcdc_ahb_gate] = imx_clk_gate("lcdc_ahb_gate", "ahb", CCM_PCCR1, 15);
+ clk[vpu_ahb_gate] = imx_clk_gate("vpu_ahb_gate", "ahb", CCM_PCCR1, 16);
+ clk[fec_ahb_gate] = imx_clk_gate("fec_ahb_gate", "ahb", CCM_PCCR1, 17);
+ clk[emma_ahb_gate] = imx_clk_gate("emma_ahb_gate", "ahb", CCM_PCCR1, 18);
+ clk[emi_ahb_gate] = imx_clk_gate("emi_ahb_gate", "ahb", CCM_PCCR1, 19);
+ clk[dma_ahb_gate] = imx_clk_gate("dma_ahb_gate", "ahb", CCM_PCCR1, 20);
+ clk[csi_ahb_gate] = imx_clk_gate("csi_ahb_gate", "ahb", CCM_PCCR1, 21);
+ clk[brom_ahb_gate] = imx_clk_gate("brom_ahb_gate", "ahb", CCM_PCCR1, 22);
+ clk[ata_ahb_gate] = imx_clk_gate("ata_ahb_gate", "ahb", CCM_PCCR1, 23);
+ clk[wdog_ipg_gate] = imx_clk_gate("wdog_ipg_gate", "ipg", CCM_PCCR1, 24);
+ clk[usb_ipg_gate] = imx_clk_gate("usb_ipg_gate", "ipg", CCM_PCCR1, 25);
+ clk[uart6_ipg_gate] = imx_clk_gate("uart6_ipg_gate", "ipg", CCM_PCCR1, 26);
+ clk[uart5_ipg_gate] = imx_clk_gate("uart5_ipg_gate", "ipg", CCM_PCCR1, 27);
+ clk[uart4_ipg_gate] = imx_clk_gate("uart4_ipg_gate", "ipg", CCM_PCCR1, 28);
+ clk[uart3_ipg_gate] = imx_clk_gate("uart3_ipg_gate", "ipg", CCM_PCCR1, 29);
+ clk[uart2_ipg_gate] = imx_clk_gate("uart2_ipg_gate", "ipg", CCM_PCCR1, 30);
+ clk[uart1_ipg_gate] = imx_clk_gate("uart1_ipg_gate", "ipg", CCM_PCCR1, 31);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX27 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
+ clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.0");
+ clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
+ clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.1");
+ clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
+ clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.2");
+ clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
+ clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.3");
+ clk_register_clkdev(clk[uart5_ipg_gate], "ipg", "imx21-uart.4");
+ clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.4");
+ clk_register_clkdev(clk[uart6_ipg_gate], "ipg", "imx21-uart.5");
+ clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.5");
+ clk_register_clkdev(clk[gpt1_ipg_gate], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[gpt2_ipg_gate], "ipg", "imx-gpt.1");
+ clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.1");
+ clk_register_clkdev(clk[gpt3_ipg_gate], "ipg", "imx-gpt.2");
+ clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.2");
+ clk_register_clkdev(clk[gpt4_ipg_gate], "ipg", "imx-gpt.3");
+ clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.3");
+ clk_register_clkdev(clk[gpt5_ipg_gate], "ipg", "imx-gpt.4");
+ clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.4");
+ clk_register_clkdev(clk[gpt6_ipg_gate], "ipg", "imx-gpt.5");
+ clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.5");
+ clk_register_clkdev(clk[pwm_ipg_gate], NULL, "mxc_pwm.0");
+ clk_register_clkdev(clk[per2_gate], "per", "mxc-mmc.0");
+ clk_register_clkdev(clk[sdhc1_ipg_gate], "ipg", "mxc-mmc.0");
+ clk_register_clkdev(clk[per2_gate], "per", "mxc-mmc.1");
+ clk_register_clkdev(clk[sdhc2_ipg_gate], "ipg", "mxc-mmc.1");
+ clk_register_clkdev(clk[per2_gate], "per", "mxc-mmc.2");
+ clk_register_clkdev(clk[sdhc2_ipg_gate], "ipg", "mxc-mmc.2");
+ clk_register_clkdev(clk[cspi1_ipg_gate], NULL, "imx27-cspi.0");
+ clk_register_clkdev(clk[cspi2_ipg_gate], NULL, "imx27-cspi.1");
+ clk_register_clkdev(clk[cspi3_ipg_gate], NULL, "imx27-cspi.2");
+ clk_register_clkdev(clk[per3_gate], "per", "imx-fb.0");
+ clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0");
+ clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx-fb.0");
+ clk_register_clkdev(clk[csi_ahb_gate], NULL, "mx2-camera.0");
+ clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
+ clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.0");
+ clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.0");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
+ clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.1");
+ clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.1");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
+ clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.2");
+ clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.2");
+ clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0");
+ clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
+ clk_register_clkdev(clk[nfc_baud_gate], NULL, "mxc_nand.0");
+ clk_register_clkdev(clk[vpu_baud_gate], "per", "imx-vpu");
+ clk_register_clkdev(clk[vpu_ahb_gate], "ahb", "imx-vpu");
+ clk_register_clkdev(clk[dma_ahb_gate], "ahb", "imx-dma");
+ clk_register_clkdev(clk[dma_ipg_gate], "ipg", "imx-dma");
+ clk_register_clkdev(clk[fec_ipg_gate], "ipg", "imx27-fec.0");
+ clk_register_clkdev(clk[fec_ahb_gate], "ahb", "imx27-fec.0");
+ clk_register_clkdev(clk[wdog_ipg_gate], NULL, "imx2-wdt.0");
+ clk_register_clkdev(clk[i2c1_ipg_gate], NULL, "imx-i2c.0");
+ clk_register_clkdev(clk[i2c2_ipg_gate], NULL, "imx-i2c.1");
+ clk_register_clkdev(clk[owire_ipg_gate], NULL, "mxc_w1.0");
+ clk_register_clkdev(clk[kpp_ipg_gate], NULL, "imx-keypad");
+ clk_register_clkdev(clk[emma_ahb_gate], "ahb", "imx-emma");
+ clk_register_clkdev(clk[emma_ipg_gate], "ipg", "imx-emma");
+ clk_register_clkdev(clk[iim_ipg_gate], "iim", NULL);
+ clk_register_clkdev(clk[gpio_ipg_gate], "gpio", NULL);
+ clk_register_clkdev(clk[brom_ahb_gate], "brom", NULL);
+ clk_register_clkdev(clk[ata_ahb_gate], "ata", NULL);
+ clk_register_clkdev(clk[rtc_ipg_gate], "rtc", NULL);
+ clk_register_clkdev(clk[scc_ipg_gate], "scc", NULL);
+ clk_register_clkdev(clk[cpu_div], "cpu", NULL);
+ clk_register_clkdev(clk[emi_ahb_gate], "emi_ahb" , NULL);
+ clk_register_clkdev(clk[ssi1_baud_gate], "bitrate" , "imx-ssi.0");
+ clk_register_clkdev(clk[ssi2_baud_gate], "bitrate" , "imx-ssi.1");
+
+ mxc_timer_init(NULL, MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR),
+ MX27_INT_GPT1);
+
+ clk_prepare_enable(clk[emi_ahb_gate]);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+int __init mx27_clocks_init_dt(void)
+{
+ struct device_node *np;
+ u32 fref = 26000000; /* default */
+
+ for_each_compatible_node(np, NULL, "fixed-clock") {
+ if (!of_device_is_compatible(np, "fsl,imx-osc26m"))
+ continue;
+
+ if (!of_property_read_u32(np, "clock-frequency", &fref))
+ break;
+ }
+
+ return mx27_clocks_init(fref);
+}
+#endif
diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c
new file mode 100644
index 000000000000..a854b9cae5ea
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx31.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2012 Sascha Hauer <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/mx31.h>
+#include <mach/common.h>
+
+#include "clk.h"
+#include "crmregs-imx3.h"
+
+static const char *mcu_main_sel[] = { "spll", "mpll", };
+static const char *per_sel[] = { "per_div", "ipg", };
+static const char *csi_sel[] = { "upll", "spll", };
+static const char *fir_sel[] = { "mcu_main", "upll", "spll" };
+
+enum mx31_clks {
+ ckih, ckil, mpll, spll, upll, mcu_main, hsp, ahb, nfc, ipg, per_div,
+ per, csi, fir, csi_div, usb_div_pre, usb_div_post, fir_div_pre,
+ fir_div_post, sdhc1_gate, sdhc2_gate, gpt_gate, epit1_gate, epit2_gate,
+ iim_gate, ata_gate, sdma_gate, cspi3_gate, rng_gate, uart1_gate,
+ uart2_gate, ssi1_gate, i2c1_gate, i2c2_gate, i2c3_gate, hantro_gate,
+ mstick1_gate, mstick2_gate, csi_gate, rtc_gate, wdog_gate, pwm_gate,
+ sim_gate, ect_gate, usb_gate, kpp_gate, ipu_gate, uart3_gate,
+ uart4_gate, uart5_gate, owire_gate, ssi2_gate, cspi1_gate, cspi2_gate,
+ gacc_gate, emi_gate, rtic_gate, firi_gate, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx31_clocks_init(unsigned long fref)
+{
+ void __iomem *base = MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR);
+ int i;
+
+ clk[ckih] = imx_clk_fixed("ckih", fref);
+ clk[ckil] = imx_clk_fixed("ckil", 32768);
+ clk[mpll] = imx_clk_pllv1("mpll", "ckih", base + MXC_CCM_MPCTL);
+ clk[spll] = imx_clk_pllv1("spll", "ckih", base + MXC_CCM_SRPCTL);
+ clk[upll] = imx_clk_pllv1("upll", "ckih", base + MXC_CCM_UPCTL);
+ clk[mcu_main] = imx_clk_mux("mcu_main", base + MXC_CCM_PMCR0, 31, 1, mcu_main_sel, ARRAY_SIZE(mcu_main_sel));
+ clk[hsp] = imx_clk_divider("hsp", "mcu_main", base + MXC_CCM_PDR0, 11, 3);
+ clk[ahb] = imx_clk_divider("ahb", "mcu_main", base + MXC_CCM_PDR0, 3, 3);
+ clk[nfc] = imx_clk_divider("nfc", "ahb", base + MXC_CCM_PDR0, 8, 3);
+ clk[ipg] = imx_clk_divider("ipg", "ahb", base + MXC_CCM_PDR0, 6, 2);
+ clk[per_div] = imx_clk_divider("per_div", "upll", base + MXC_CCM_PDR0, 16, 5);
+ clk[per] = imx_clk_mux("per", base + MXC_CCM_CCMR, 24, 1, per_sel, ARRAY_SIZE(per_sel));
+ clk[csi] = imx_clk_mux("csi_sel", base + MXC_CCM_CCMR, 25, 1, csi_sel, ARRAY_SIZE(csi_sel));
+ clk[fir] = imx_clk_mux("fir_sel", base + MXC_CCM_CCMR, 11, 2, fir_sel, ARRAY_SIZE(fir_sel));
+ clk[csi_div] = imx_clk_divider("csi_div", "csi_sel", base + MXC_CCM_PDR0, 23, 9);
+ clk[usb_div_pre] = imx_clk_divider("usb_div_pre", "upll", base + MXC_CCM_PDR1, 30, 2);
+ clk[usb_div_post] = imx_clk_divider("usb_div_post", "usb_div_pre", base + MXC_CCM_PDR1, 27, 3);
+ clk[fir_div_pre] = imx_clk_divider("fir_div_pre", "fir_sel", base + MXC_CCM_PDR1, 24, 3);
+ clk[fir_div_post] = imx_clk_divider("fir_div_post", "fir_div_pre", base + MXC_CCM_PDR1, 23, 6);
+ clk[sdhc1_gate] = imx_clk_gate2("sdhc1_gate", "per", base + MXC_CCM_CGR0, 0);
+ clk[sdhc2_gate] = imx_clk_gate2("sdhc2_gate", "per", base + MXC_CCM_CGR0, 2);
+ clk[gpt_gate] = imx_clk_gate2("gpt_gate", "per", base + MXC_CCM_CGR0, 4);
+ clk[epit1_gate] = imx_clk_gate2("epit1_gate", "per", base + MXC_CCM_CGR0, 6);
+ clk[epit2_gate] = imx_clk_gate2("epit2_gate", "per", base + MXC_CCM_CGR0, 8);
+ clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", base + MXC_CCM_CGR0, 10);
+ clk[ata_gate] = imx_clk_gate2("ata_gate", "ipg", base + MXC_CCM_CGR0, 12);
+ clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ahb", base + MXC_CCM_CGR0, 14);
+ clk[cspi3_gate] = imx_clk_gate2("cspi3_gate", "ipg", base + MXC_CCM_CGR0, 16);
+ clk[rng_gate] = imx_clk_gate2("rng_gate", "ipg", base + MXC_CCM_CGR0, 18);
+ clk[uart1_gate] = imx_clk_gate2("uart1_gate", "per", base + MXC_CCM_CGR0, 20);
+ clk[uart2_gate] = imx_clk_gate2("uart2_gate", "per", base + MXC_CCM_CGR0, 22);
+ clk[ssi1_gate] = imx_clk_gate2("ssi1_gate", "spll", base + MXC_CCM_CGR0, 24);
+ clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "per", base + MXC_CCM_CGR0, 26);
+ clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "per", base + MXC_CCM_CGR0, 28);
+ clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "per", base + MXC_CCM_CGR0, 30);
+ clk[hantro_gate] = imx_clk_gate2("hantro_gate", "per", base + MXC_CCM_CGR1, 0);
+ clk[mstick1_gate] = imx_clk_gate2("mstick1_gate", "per", base + MXC_CCM_CGR1, 2);
+ clk[mstick2_gate] = imx_clk_gate2("mstick2_gate", "per", base + MXC_CCM_CGR1, 4);
+ clk[csi_gate] = imx_clk_gate2("csi_gate", "csi_div", base + MXC_CCM_CGR1, 6);
+ clk[rtc_gate] = imx_clk_gate2("rtc_gate", "ipg", base + MXC_CCM_CGR1, 8);
+ clk[wdog_gate] = imx_clk_gate2("wdog_gate", "ipg", base + MXC_CCM_CGR1, 10);
+ clk[pwm_gate] = imx_clk_gate2("pwm_gate", "per", base + MXC_CCM_CGR1, 12);
+ clk[sim_gate] = imx_clk_gate2("sim_gate", "per", base + MXC_CCM_CGR1, 14);
+ clk[ect_gate] = imx_clk_gate2("ect_gate", "per", base + MXC_CCM_CGR1, 16);
+ clk[usb_gate] = imx_clk_gate2("usb_gate", "ahb", base + MXC_CCM_CGR1, 18);
+ clk[kpp_gate] = imx_clk_gate2("kpp_gate", "ipg", base + MXC_CCM_CGR1, 20);
+ clk[ipu_gate] = imx_clk_gate2("ipu_gate", "hsp", base + MXC_CCM_CGR1, 22);
+ clk[uart3_gate] = imx_clk_gate2("uart3_gate", "per", base + MXC_CCM_CGR1, 24);
+ clk[uart4_gate] = imx_clk_gate2("uart4_gate", "per", base + MXC_CCM_CGR1, 26);
+ clk[uart5_gate] = imx_clk_gate2("uart5_gate", "per", base + MXC_CCM_CGR1, 28);
+ clk[owire_gate] = imx_clk_gate2("owire_gate", "per", base + MXC_CCM_CGR1, 30);
+ clk[ssi2_gate] = imx_clk_gate2("ssi2_gate", "spll", base + MXC_CCM_CGR2, 0);
+ clk[cspi1_gate] = imx_clk_gate2("cspi1_gate", "ipg", base + MXC_CCM_CGR2, 2);
+ clk[cspi2_gate] = imx_clk_gate2("cspi2_gate", "ipg", base + MXC_CCM_CGR2, 4);
+ clk[gacc_gate] = imx_clk_gate2("gacc_gate", "per", base + MXC_CCM_CGR2, 6);
+ clk[emi_gate] = imx_clk_gate2("emi_gate", "ahb", base + MXC_CCM_CGR2, 8);
+ clk[rtic_gate] = imx_clk_gate2("rtic_gate", "ahb", base + MXC_CCM_CGR2, 10);
+ clk[firi_gate] = imx_clk_gate2("firi_gate", "upll", base+MXC_CCM_CGR2, 12);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("imx31 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[cspi1_gate], NULL, "imx31-cspi.0");
+ clk_register_clkdev(clk[cspi2_gate], NULL, "imx31-cspi.1");
+ clk_register_clkdev(clk[cspi3_gate], NULL, "imx31-cspi.2");
+ clk_register_clkdev(clk[pwm_gate], "pwm", NULL);
+ clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
+ clk_register_clkdev(clk[rtc_gate], "rtc", NULL);
+ clk_register_clkdev(clk[epit1_gate], "epit", NULL);
+ clk_register_clkdev(clk[epit2_gate], "epit", NULL);
+ clk_register_clkdev(clk[nfc], NULL, "mxc_nand.0");
+ clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core");
+ clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
+ clk_register_clkdev(clk[kpp_gate], "kpp", NULL);
+ clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.0");
+ clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.0");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
+ clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.1");
+ clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.1");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
+ clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.2");
+ clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.2");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
+ clk_register_clkdev(clk[usb_div_post], "per", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usb_gate], "ahb", "fsl-usb2-udc");
+ clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
+ clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
+ /* i.mx31 has the i.mx21 type uart */
+ clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
+ clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
+ clk_register_clkdev(clk[uart2_gate], "per", "imx21-uart.1");
+ clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1");
+ clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2");
+ clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2");
+ clk_register_clkdev(clk[uart4_gate], "per", "imx21-uart.3");
+ clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.3");
+ clk_register_clkdev(clk[uart5_gate], "per", "imx21-uart.4");
+ clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.4");
+ clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0");
+ clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1");
+ clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2");
+ clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1.0");
+ clk_register_clkdev(clk[sdhc1_gate], NULL, "mxc-mmc.0");
+ clk_register_clkdev(clk[sdhc2_gate], NULL, "mxc-mmc.1");
+ clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
+ clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
+ clk_register_clkdev(clk[firi_gate], "firi", NULL);
+ clk_register_clkdev(clk[ata_gate], NULL, "pata_imx");
+ clk_register_clkdev(clk[rtic_gate], "rtic", NULL);
+ clk_register_clkdev(clk[rng_gate], "rng", NULL);
+ clk_register_clkdev(clk[sdma_gate], NULL, "imx31-sdma");
+ clk_register_clkdev(clk[iim_gate], "iim", NULL);
+
+ clk_set_parent(clk[csi], clk[upll]);
+ clk_prepare_enable(clk[emi_gate]);
+ clk_prepare_enable(clk[iim_gate]);
+ mx31_revision();
+ clk_disable_unprepare(clk[iim_gate]);
+
+ mxc_timer_init(NULL, MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR),
+ MX31_INT_GPT);
+
+ return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
new file mode 100644
index 000000000000..a9e60bf7dd75
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+
+#include "crmregs-imx3.h"
+#include "clk.h"
+
+struct arm_ahb_div {
+ unsigned char arm, ahb, sel;
+};
+
+static struct arm_ahb_div clk_consumer[] = {
+ { .arm = 1, .ahb = 4, .sel = 0},
+ { .arm = 1, .ahb = 3, .sel = 1},
+ { .arm = 2, .ahb = 2, .sel = 0},
+ { .arm = 0, .ahb = 0, .sel = 0},
+ { .arm = 0, .ahb = 0, .sel = 0},
+ { .arm = 0, .ahb = 0, .sel = 0},
+ { .arm = 4, .ahb = 1, .sel = 0},
+ { .arm = 1, .ahb = 5, .sel = 0},
+ { .arm = 1, .ahb = 8, .sel = 0},
+ { .arm = 1, .ahb = 6, .sel = 1},
+ { .arm = 2, .ahb = 4, .sel = 0},
+ { .arm = 0, .ahb = 0, .sel = 0},
+ { .arm = 0, .ahb = 0, .sel = 0},
+ { .arm = 0, .ahb = 0, .sel = 0},
+ { .arm = 4, .ahb = 2, .sel = 0},
+ { .arm = 0, .ahb = 0, .sel = 0},
+};
+
+static char hsp_div_532[] = { 4, 8, 3, 0 };
+static char hsp_div_400[] = { 3, 6, 3, 0 };
+
+static const char *std_sel[] = {"ppll", "arm"};
+static const char *ipg_per_sel[] = {"ahb_per_div", "arm_per_div"};
+
+enum mx35_clks {
+ ckih, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg,
+ arm_per_div, ahb_per_div, ipg_per, uart_sel, uart_div, esdhc_sel,
+ esdhc1_div, esdhc2_div, esdhc3_div, spdif_sel, spdif_div_pre,
+ spdif_div_post, ssi_sel, ssi1_div_pre, ssi1_div_post, ssi2_div_pre,
+ ssi2_div_post, usb_sel, usb_div, nfc_div, asrc_gate, pata_gate,
+ audmux_gate, can1_gate, can2_gate, cspi1_gate, cspi2_gate, ect_gate,
+ edio_gate, emi_gate, epit1_gate, epit2_gate, esai_gate, esdhc1_gate,
+ esdhc2_gate, esdhc3_gate, fec_gate, gpio1_gate, gpio2_gate, gpio3_gate,
+ gpt_gate, i2c1_gate, i2c2_gate, i2c3_gate, iomuxc_gate, ipu_gate,
+ kpp_gate, mlb_gate, mshc_gate, owire_gate, pwm_gate, rngc_gate,
+ rtc_gate, rtic_gate, scc_gate, sdma_gate, spba_gate, spdif_gate,
+ ssi1_gate, ssi2_gate, uart1_gate, uart2_gate, uart3_gate, usbotg_gate,
+ wdog_gate, max_gate, admux_gate, csi_gate, iim_gate, gpu2d_gate,
+ clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx35_clocks_init()
+{
+ void __iomem *base = MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR);
+ u32 pdr0, consumer_sel, hsp_sel;
+ struct arm_ahb_div *aad;
+ unsigned char *hsp_div;
+ int i;
+
+ pdr0 = __raw_readl(base + MXC_CCM_PDR0);
+ consumer_sel = (pdr0 >> 16) & 0xf;
+ aad = &clk_consumer[consumer_sel];
+ if (!aad->arm) {
+ pr_err("i.MX35 clk: illegal consumer mux selection 0x%x\n", consumer_sel);
+ /*
+ * We are basically stuck. Continue with a default entry and hope we
+ * get far enough to actually show the above message
+ */
+ aad = &clk_consumer[0];
+ }
+
+ clk[ckih] = imx_clk_fixed("ckih", 24000000);
+ clk[mpll] = imx_clk_pllv1("mpll", "ckih", base + MX35_CCM_MPCTL);
+ clk[ppll] = imx_clk_pllv1("ppll", "ckih", base + MX35_CCM_PPCTL);
+
+ clk[mpll] = imx_clk_fixed_factor("mpll_075", "mpll", 3, 4);
+
+ if (aad->sel)
+ clk[arm] = imx_clk_fixed_factor("arm", "mpll_075", 1, aad->arm);
+ else
+ clk[arm] = imx_clk_fixed_factor("arm", "mpll", 1, aad->arm);
+
+ if (clk_get_rate(clk[arm]) > 400000000)
+ hsp_div = hsp_div_532;
+ else
+ hsp_div = hsp_div_400;
+
+ hsp_sel = (pdr0 >> 20) & 0x3;
+ if (!hsp_div[hsp_sel]) {
+ pr_err("i.MX35 clk: illegal hsp clk selection 0x%x\n", hsp_sel);
+ hsp_sel = 0;
+ }
+
+ clk[hsp] = imx_clk_fixed_factor("hsp", "arm", 1, hsp_div[hsp_sel]);
+
+ clk[ahb] = imx_clk_fixed_factor("ahb", "arm", 1, aad->ahb);
+ clk[ipg] = imx_clk_fixed_factor("ipg", "ahb", 1, 2);
+
+ clk[arm_per_div] = imx_clk_divider("arm_per_div", "arm", base + MX35_CCM_PDR4, 16, 6);
+ clk[ahb_per_div] = imx_clk_divider("ahb_per_div", "ahb", base + MXC_CCM_PDR0, 12, 3);
+ clk[ipg_per] = imx_clk_mux("ipg_per", base + MXC_CCM_PDR0, 26, 1, ipg_per_sel, ARRAY_SIZE(ipg_per_sel));
+
+ clk[uart_sel] = imx_clk_mux("uart_sel", base + MX35_CCM_PDR3, 14, 1, std_sel, ARRAY_SIZE(std_sel));
+ clk[uart_div] = imx_clk_divider("uart_div", "uart_sel", base + MX35_CCM_PDR4, 10, 6);
+
+ clk[esdhc_sel] = imx_clk_mux("esdhc_sel", base + MX35_CCM_PDR4, 9, 1, std_sel, ARRAY_SIZE(std_sel));
+ clk[esdhc1_div] = imx_clk_divider("esdhc1_div", "esdhc_sel", base + MX35_CCM_PDR3, 0, 6);
+ clk[esdhc2_div] = imx_clk_divider("esdhc2_div", "esdhc_sel", base + MX35_CCM_PDR3, 8, 6);
+ clk[esdhc3_div] = imx_clk_divider("esdhc3_div", "esdhc_sel", base + MX35_CCM_PDR3, 16, 6);
+
+ clk[spdif_sel] = imx_clk_mux("spdif_sel", base + MX35_CCM_PDR3, 22, 1, std_sel, ARRAY_SIZE(std_sel));
+ clk[spdif_div_pre] = imx_clk_divider("spdif_div_pre", "spdif_sel", base + MX35_CCM_PDR3, 29, 3); /* divide by 1 not allowed */
+ clk[spdif_div_post] = imx_clk_divider("spdif_div_post", "spdif_div_pre", base + MX35_CCM_PDR3, 23, 6);
+
+ clk[ssi_sel] = imx_clk_mux("ssi_sel", base + MX35_CCM_PDR2, 6, 1, std_sel, ARRAY_SIZE(std_sel));
+ clk[ssi1_div_pre] = imx_clk_divider("ssi1_div_pre", "ssi_sel", base + MX35_CCM_PDR2, 24, 3);
+ clk[ssi1_div_post] = imx_clk_divider("ssi1_div_post", "ssi1_div_pre", base + MX35_CCM_PDR2, 0, 6);
+ clk[ssi2_div_pre] = imx_clk_divider("ssi2_div_pre", "ssi_sel", base + MX35_CCM_PDR2, 27, 3);
+ clk[ssi2_div_post] = imx_clk_divider("ssi2_div_post", "ssi2_div_pre", base + MX35_CCM_PDR2, 8, 6);
+
+ clk[usb_sel] = imx_clk_mux("usb_sel", base + MX35_CCM_PDR4, 9, 1, std_sel, ARRAY_SIZE(std_sel));
+ clk[usb_div] = imx_clk_divider("usb_div", "usb_sel", base + MX35_CCM_PDR4, 22, 6);
+
+ clk[nfc_div] = imx_clk_divider("nfc_div", "ahb", base + MX35_CCM_PDR4, 28, 4);
+
+ clk[asrc_gate] = imx_clk_gate2("asrc_gate", "ipg", base + MX35_CCM_CGR0, 0);
+ clk[pata_gate] = imx_clk_gate2("pata_gate", "ipg", base + MX35_CCM_CGR0, 2);
+ clk[audmux_gate] = imx_clk_gate2("audmux_gate", "ipg", base + MX35_CCM_CGR0, 4);
+ clk[can1_gate] = imx_clk_gate2("can1_gate", "ipg", base + MX35_CCM_CGR0, 6);
+ clk[can2_gate] = imx_clk_gate2("can2_gate", "ipg", base + MX35_CCM_CGR0, 8);
+ clk[cspi1_gate] = imx_clk_gate2("cspi1_gate", "ipg", base + MX35_CCM_CGR0, 10);
+ clk[cspi2_gate] = imx_clk_gate2("cspi2_gate", "ipg", base + MX35_CCM_CGR0, 12);
+ clk[ect_gate] = imx_clk_gate2("ect_gate", "ipg", base + MX35_CCM_CGR0, 14);
+ clk[edio_gate] = imx_clk_gate2("edio_gate", "ipg", base + MX35_CCM_CGR0, 16);
+ clk[emi_gate] = imx_clk_gate2("emi_gate", "ipg", base + MX35_CCM_CGR0, 18);
+ clk[epit1_gate] = imx_clk_gate2("epit1_gate", "ipg", base + MX35_CCM_CGR0, 20);
+ clk[epit2_gate] = imx_clk_gate2("epit2_gate", "ipg", base + MX35_CCM_CGR0, 22);
+ clk[esai_gate] = imx_clk_gate2("esai_gate", "ipg", base + MX35_CCM_CGR0, 24);
+ clk[esdhc1_gate] = imx_clk_gate2("esdhc1_gate", "esdhc1_div", base + MX35_CCM_CGR0, 26);
+ clk[esdhc2_gate] = imx_clk_gate2("esdhc2_gate", "esdhc2_div", base + MX35_CCM_CGR0, 28);
+ clk[esdhc3_gate] = imx_clk_gate2("esdhc3_gate", "esdhc3_div", base + MX35_CCM_CGR0, 30);
+
+ clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", base + MX35_CCM_CGR1, 0);
+ clk[gpio1_gate] = imx_clk_gate2("gpio1_gate", "ipg", base + MX35_CCM_CGR1, 2);
+ clk[gpio2_gate] = imx_clk_gate2("gpio2_gate", "ipg", base + MX35_CCM_CGR1, 4);
+ clk[gpio3_gate] = imx_clk_gate2("gpio3_gate", "ipg", base + MX35_CCM_CGR1, 6);
+ clk[gpt_gate] = imx_clk_gate2("gpt_gate", "ipg", base + MX35_CCM_CGR1, 8);
+ clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "ipg_per", base + MX35_CCM_CGR1, 10);
+ clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "ipg_per", base + MX35_CCM_CGR1, 12);
+ clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "ipg_per", base + MX35_CCM_CGR1, 14);
+ clk[iomuxc_gate] = imx_clk_gate2("iomuxc_gate", "ipg", base + MX35_CCM_CGR1, 16);
+ clk[ipu_gate] = imx_clk_gate2("ipu_gate", "hsp", base + MX35_CCM_CGR1, 18);
+ clk[kpp_gate] = imx_clk_gate2("kpp_gate", "ipg", base + MX35_CCM_CGR1, 20);
+ clk[mlb_gate] = imx_clk_gate2("mlb_gate", "ahb", base + MX35_CCM_CGR1, 22);
+ clk[mshc_gate] = imx_clk_gate2("mshc_gate", "dummy", base + MX35_CCM_CGR1, 24);
+ clk[owire_gate] = imx_clk_gate2("owire_gate", "ipg_per", base + MX35_CCM_CGR1, 26);
+ clk[pwm_gate] = imx_clk_gate2("pwm_gate", "ipg_per", base + MX35_CCM_CGR1, 28);
+ clk[rngc_gate] = imx_clk_gate2("rngc_gate", "ipg", base + MX35_CCM_CGR1, 30);
+
+ clk[rtc_gate] = imx_clk_gate2("rtc_gate", "ipg", base + MX35_CCM_CGR2, 0);
+ clk[rtic_gate] = imx_clk_gate2("rtic_gate", "ahb", base + MX35_CCM_CGR2, 2);
+ clk[scc_gate] = imx_clk_gate2("scc_gate", "ipg", base + MX35_CCM_CGR2, 4);
+ clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ahb", base + MX35_CCM_CGR2, 6);
+ clk[spba_gate] = imx_clk_gate2("spba_gate", "ipg", base + MX35_CCM_CGR2, 8);
+ clk[spdif_gate] = imx_clk_gate2("spdif_gate", "spdif_div_post", base + MX35_CCM_CGR2, 10);
+ clk[ssi1_gate] = imx_clk_gate2("ssi1_gate", "ssi1_div_post", base + MX35_CCM_CGR2, 12);
+ clk[ssi2_gate] = imx_clk_gate2("ssi2_gate", "ssi2_div_post", base + MX35_CCM_CGR2, 14);
+ clk[uart1_gate] = imx_clk_gate2("uart1_gate", "uart_div", base + MX35_CCM_CGR2, 16);
+ clk[uart2_gate] = imx_clk_gate2("uart2_gate", "uart_div", base + MX35_CCM_CGR2, 18);
+ clk[uart3_gate] = imx_clk_gate2("uart3_gate", "uart_div", base + MX35_CCM_CGR2, 20);
+ clk[usbotg_gate] = imx_clk_gate2("usbotg_gate", "ahb", base + MX35_CCM_CGR2, 22);
+ clk[wdog_gate] = imx_clk_gate2("wdog_gate", "ipg", base + MX35_CCM_CGR2, 24);
+ clk[max_gate] = imx_clk_gate2("max_gate", "dummy", base + MX35_CCM_CGR2, 26);
+ clk[admux_gate] = imx_clk_gate2("admux_gate", "ipg", base + MX35_CCM_CGR2, 30);
+
+ clk[csi_gate] = imx_clk_gate2("csi_gate", "ipg", base + MX35_CCM_CGR3, 0);
+ clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", base + MX35_CCM_CGR3, 2);
+ clk[gpu2d_gate] = imx_clk_gate2("gpu2d_gate", "ahb", base + MX35_CCM_CGR3, 4);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX35 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+
+ clk_register_clkdev(clk[pata_gate], NULL, "pata_imx");
+ clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0");
+ clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1");
+ clk_register_clkdev(clk[cspi1_gate], "per", "imx35-cspi.0");
+ clk_register_clkdev(clk[cspi1_gate], "ipg", "imx35-cspi.0");
+ clk_register_clkdev(clk[cspi2_gate], "per", "imx35-cspi.1");
+ clk_register_clkdev(clk[cspi2_gate], "ipg", "imx35-cspi.1");
+ clk_register_clkdev(clk[epit1_gate], NULL, "imx-epit.0");
+ clk_register_clkdev(clk[epit2_gate], NULL, "imx-epit.1");
+ clk_register_clkdev(clk[esdhc1_gate], "per", "sdhci-esdhc-imx35.0");
+ clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.0");
+ clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.0");
+ clk_register_clkdev(clk[esdhc2_gate], "per", "sdhci-esdhc-imx35.1");
+ clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.1");
+ clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.1");
+ clk_register_clkdev(clk[esdhc3_gate], "per", "sdhci-esdhc-imx35.2");
+ clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.2");
+ clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.2");
+ /* i.mx35 has the i.mx27 type fec */
+ clk_register_clkdev(clk[fec_gate], NULL, "imx27-fec.0");
+ clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0");
+ clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1");
+ clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2");
+ clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core");
+ clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
+ clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
+ clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
+ clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0");
+ clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0");
+ clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1");
+ clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1");
+ /* i.mx35 has the i.mx21 type uart */
+ clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
+ clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
+ clk_register_clkdev(clk[uart2_gate], "per", "imx21-uart.1");
+ clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1");
+ clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2");
+ clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
+ clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.0");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
+ clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.1");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
+ clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.2");
+ clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
+ clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usbotg_gate], "ahb", "fsl-usb2-udc");
+ clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
+ clk_register_clkdev(clk[nfc_div], NULL, "mxc_nand.0");
+
+ clk_prepare_enable(clk[spba_gate]);
+ clk_prepare_enable(clk[gpio1_gate]);
+ clk_prepare_enable(clk[gpio2_gate]);
+ clk_prepare_enable(clk[gpio3_gate]);
+ clk_prepare_enable(clk[iim_gate]);
+ clk_prepare_enable(clk[emi_gate]);
+
+ imx_print_silicon_rev("i.MX35", mx35_revision());
+
+#ifdef CONFIG_MXC_USE_EPIT
+ epit_timer_init(&epit1_clk,
+ MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
+#else
+ mxc_timer_init(NULL, MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR),
+ MX35_INT_GPT);
+#endif
+
+ return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
new file mode 100644
index 000000000000..fcd94f3b0f0e
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+
+#include "crm-regs-imx5.h"
+#include "clk.h"
+
+/* Low-power Audio Playback Mode clock */
+static const char *lp_apm_sel[] = { "osc", };
+
+/* This is used multiple times */
+static const char *standard_pll_sel[] = { "pll1_sw", "pll2_sw", "pll3_sw", "lp_apm", };
+static const char *periph_apm_sel[] = { "pll1_sw", "pll3_sw", "lp_apm", };
+static const char *main_bus_sel[] = { "pll2_sw", "periph_apm", };
+static const char *per_lp_apm_sel[] = { "main_bus", "lp_apm", };
+static const char *per_root_sel[] = { "per_podf", "ipg", };
+static const char *esdhc_c_sel[] = { "esdhc_a_podf", "esdhc_b_podf", };
+static const char *esdhc_d_sel[] = { "esdhc_a_podf", "esdhc_b_podf", };
+static const char *ssi_apm_sels[] = { "ckih1", "lp_amp", "ckih2", };
+static const char *ssi_clk_sels[] = { "pll1_sw", "pll2_sw", "pll3_sw", "ssi_apm", };
+static const char *ssi3_clk_sels[] = { "ssi1_root_gate", "ssi2_root_gate", };
+static const char *ssi_ext1_com_sels[] = { "ssi_ext1_podf", "ssi1_root_gate", };
+static const char *ssi_ext2_com_sels[] = { "ssi_ext2_podf", "ssi2_root_gate", };
+static const char *emi_slow_sel[] = { "main_bus", "ahb", };
+static const char *usb_phy_sel_str[] = { "osc", "usb_phy_podf", };
+static const char *mx51_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "tve_di", };
+static const char *mx53_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "di_pll4_podf", "dummy", "ldb_di0", };
+static const char *mx53_ldb_di0_sel[] = { "pll3_sw", "pll4_sw", };
+static const char *mx51_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", };
+static const char *mx53_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", "ldb_di1", };
+static const char *mx53_ldb_di1_sel[] = { "pll3_sw", "pll4_sw", };
+static const char *mx51_tve_ext_sel[] = { "osc", "ckih1", };
+static const char *mx53_tve_ext_sel[] = { "pll4_sw", "ckih1", };
+static const char *tve_sel[] = { "tve_pred", "tve_ext_sel", };
+static const char *ipu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
+static const char *vpu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
+
+enum imx5_clks {
+ dummy, ckil, osc, ckih1, ckih2, ahb, ipg, axi_a, axi_b, uart_pred,
+ uart_root, esdhc_a_pred, esdhc_b_pred, esdhc_c_s, esdhc_d_s,
+ emi_sel, emi_slow_podf, nfc_podf, ecspi_pred, ecspi_podf, usboh3_pred,
+ usboh3_podf, usb_phy_pred, usb_phy_podf, cpu_podf, di_pred, tve_di,
+ tve_s, uart1_ipg_gate, uart1_per_gate, uart2_ipg_gate,
+ uart2_per_gate, uart3_ipg_gate, uart3_per_gate, i2c1_gate, i2c2_gate,
+ gpt_ipg_gate, pwm1_ipg_gate, pwm1_hf_gate, pwm2_ipg_gate, pwm2_hf_gate,
+ gpt_gate, fec_gate, usboh3_per_gate, esdhc1_ipg_gate, esdhc2_ipg_gate,
+ esdhc3_ipg_gate, esdhc4_ipg_gate, ssi1_ipg_gate, ssi2_ipg_gate,
+ ssi3_ipg_gate, ecspi1_ipg_gate, ecspi1_per_gate, ecspi2_ipg_gate,
+ ecspi2_per_gate, cspi_ipg_gate, sdma_gate, emi_slow_gate, ipu_s,
+ ipu_gate, nfc_gate, ipu_di1_gate, vpu_s, vpu_gate,
+ vpu_reference_gate, uart4_ipg_gate, uart4_per_gate, uart5_ipg_gate,
+ uart5_per_gate, tve_gate, tve_pred, esdhc1_per_gate, esdhc2_per_gate,
+ esdhc3_per_gate, esdhc4_per_gate, usb_phy_gate, hsi2c_gate,
+ mipi_hsc1_gate, mipi_hsc2_gate, mipi_esc_gate, mipi_hsp_gate,
+ ldb_di1_div_3_5, ldb_di1_div, ldb_di0_div_3_5, ldb_di0_div,
+ ldb_di1_gate, can2_serial_gate, can2_ipg_gate, i2c3_gate, lp_apm,
+ periph_apm, main_bus, ahb_max, aips_tz1, aips_tz2, tmax1, tmax2,
+ tmax3, spba, uart_sel, esdhc_a_sel, esdhc_b_sel, esdhc_a_podf,
+ esdhc_b_podf, ecspi_sel, usboh3_sel, usb_phy_sel, iim_gate,
+ usboh3_gate, emi_fast_gate, ipu_di0_gate,gpc_dvfs, pll1_sw, pll2_sw,
+ pll3_sw, ipu_di0_sel, ipu_di1_sel, tve_ext_sel, mx51_mipi, pll4_sw,
+ ldb_di1_sel, di_pll4_podf, ldb_di0_sel, ldb_di0_gate, usb_phy1_gate,
+ usb_phy2_gate, per_lp_apm, per_pred1, per_pred2, per_podf, per_root,
+ ssi_apm, ssi1_root_sel, ssi2_root_sel, ssi3_root_sel, ssi_ext1_sel,
+ ssi_ext2_sel, ssi_ext1_com_sel, ssi_ext2_com_sel, ssi1_root_pred,
+ ssi1_root_podf, ssi2_root_pred, ssi2_root_podf, ssi_ext1_pred,
+ ssi_ext1_podf, ssi_ext2_pred, ssi_ext2_podf, ssi1_root_gate,
+ ssi2_root_gate, ssi3_root_gate, ssi_ext1_gate, ssi_ext2_gate,
+ clk_max
+};
+
+static struct clk *clk[clk_max];
+
+static void __init mx5_clocks_common_init(unsigned long rate_ckil,
+ unsigned long rate_osc, unsigned long rate_ckih1,
+ unsigned long rate_ckih2)
+{
+ int i;
+
+ clk[dummy] = imx_clk_fixed("dummy", 0);
+ clk[ckil] = imx_clk_fixed("ckil", rate_ckil);
+ clk[osc] = imx_clk_fixed("osc", rate_osc);
+ clk[ckih1] = imx_clk_fixed("ckih1", rate_ckih1);
+ clk[ckih2] = imx_clk_fixed("ckih2", rate_ckih2);
+
+ clk[lp_apm] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 9, 1,
+ lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
+ clk[periph_apm] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2,
+ periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
+ clk[main_bus] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
+ main_bus_sel, ARRAY_SIZE(main_bus_sel));
+ clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCDR, 1, 1,
+ per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel));
+ clk[per_pred1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2);
+ clk[per_pred2] = imx_clk_divider("per_pred2", "per_pred1", MXC_CCM_CBCDR, 3, 3);
+ clk[per_podf] = imx_clk_divider("per_podf", "per_pred2", MXC_CCM_CBCDR, 0, 3);
+ clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCDR, 1, 0,
+ per_root_sel, ARRAY_SIZE(per_root_sel));
+ clk[ahb] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3);
+ clk[ahb_max] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28);
+ clk[aips_tz1] = imx_clk_gate2("aips_tz1", "ahb", MXC_CCM_CCGR0, 24);
+ clk[aips_tz2] = imx_clk_gate2("aips_tz2", "ahb", MXC_CCM_CCGR0, 26);
+ clk[tmax1] = imx_clk_gate2("tmax1", "ahb", MXC_CCM_CCGR1, 0);
+ clk[tmax2] = imx_clk_gate2("tmax2", "ahb", MXC_CCM_CCGR1, 2);
+ clk[tmax3] = imx_clk_gate2("tmax3", "ahb", MXC_CCM_CCGR1, 4);
+ clk[spba] = imx_clk_gate2("spba", "ipg", MXC_CCM_CCGR5, 0);
+ clk[ipg] = imx_clk_divider("ipg", "ahb", MXC_CCM_CBCDR, 8, 2);
+ clk[axi_a] = imx_clk_divider("axi_a", "main_bus", MXC_CCM_CBCDR, 16, 3);
+ clk[axi_b] = imx_clk_divider("axi_b", "main_bus", MXC_CCM_CBCDR, 19, 3);
+ clk[uart_sel] = imx_clk_mux("uart_sel", MXC_CCM_CSCMR1, 24, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[uart_pred] = imx_clk_divider("uart_pred", "uart_sel", MXC_CCM_CSCDR1, 3, 3);
+ clk[uart_root] = imx_clk_divider("uart_root", "uart_pred", MXC_CCM_CSCDR1, 0, 3);
+
+ clk[esdhc_a_sel] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[esdhc_b_sel] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[esdhc_a_pred] = imx_clk_divider("esdhc_a_pred", "esdhc_a_sel", MXC_CCM_CSCDR1, 16, 3);
+ clk[esdhc_a_podf] = imx_clk_divider("esdhc_a_podf", "esdhc_a_pred", MXC_CCM_CSCDR1, 11, 3);
+ clk[esdhc_b_pred] = imx_clk_divider("esdhc_b_pred", "esdhc_b_sel", MXC_CCM_CSCDR1, 22, 3);
+ clk[esdhc_b_podf] = imx_clk_divider("esdhc_b_podf", "esdhc_b_pred", MXC_CCM_CSCDR1, 19, 3);
+ clk[esdhc_c_s] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel));
+ clk[esdhc_d_s] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel));
+
+ clk[emi_sel] = imx_clk_mux("emi_sel", MXC_CCM_CBCDR, 26, 1,
+ emi_slow_sel, ARRAY_SIZE(emi_slow_sel));
+ clk[emi_slow_podf] = imx_clk_divider("emi_slow_podf", "emi_sel", MXC_CCM_CBCDR, 22, 3);
+ clk[nfc_podf] = imx_clk_divider("nfc_podf", "emi_slow_podf", MXC_CCM_CBCDR, 13, 3);
+ clk[ecspi_sel] = imx_clk_mux("ecspi_sel", MXC_CCM_CSCMR1, 4, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[ecspi_pred] = imx_clk_divider("ecspi_pred", "ecspi_sel", MXC_CCM_CSCDR2, 25, 3);
+ clk[ecspi_podf] = imx_clk_divider("ecspi_podf", "ecspi_pred", MXC_CCM_CSCDR2, 19, 6);
+ clk[usboh3_sel] = imx_clk_mux("usboh3_sel", MXC_CCM_CSCMR1, 22, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[usboh3_pred] = imx_clk_divider("usboh3_pred", "usboh3_sel", MXC_CCM_CSCDR1, 8, 3);
+ clk[usboh3_podf] = imx_clk_divider("usboh3_podf", "usboh3_pred", MXC_CCM_CSCDR1, 6, 2);
+ clk[usb_phy_pred] = imx_clk_divider("usb_phy_pred", "pll3_sw", MXC_CCM_CDCDR, 3, 3);
+ clk[usb_phy_podf] = imx_clk_divider("usb_phy_podf", "usb_phy_pred", MXC_CCM_CDCDR, 0, 3);
+ clk[usb_phy_sel] = imx_clk_mux("usb_phy_sel", MXC_CCM_CSCMR1, 26, 1,
+ usb_phy_sel_str, ARRAY_SIZE(usb_phy_sel_str));
+ clk[cpu_podf] = imx_clk_divider("cpu_podf", "pll1_sw", MXC_CCM_CACRR, 0, 3);
+ clk[di_pred] = imx_clk_divider("di_pred", "pll3_sw", MXC_CCM_CDCDR, 6, 3);
+ clk[tve_di] = imx_clk_fixed("tve_di", 65000000); /* FIXME */
+ clk[tve_s] = imx_clk_mux("tve_sel", MXC_CCM_CSCMR1, 7, 1, tve_sel, ARRAY_SIZE(tve_sel));
+ clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", MXC_CCM_CCGR0, 30);
+ clk[uart1_ipg_gate] = imx_clk_gate2("uart1_ipg_gate", "ipg", MXC_CCM_CCGR1, 6);
+ clk[uart1_per_gate] = imx_clk_gate2("uart1_per_gate", "uart_root", MXC_CCM_CCGR1, 8);
+ clk[uart2_ipg_gate] = imx_clk_gate2("uart2_ipg_gate", "ipg", MXC_CCM_CCGR1, 10);
+ clk[uart2_per_gate] = imx_clk_gate2("uart2_per_gate", "uart_root", MXC_CCM_CCGR1, 12);
+ clk[uart3_ipg_gate] = imx_clk_gate2("uart3_ipg_gate", "ipg", MXC_CCM_CCGR1, 14);
+ clk[uart3_per_gate] = imx_clk_gate2("uart3_per_gate", "uart_root", MXC_CCM_CCGR1, 16);
+ clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "per_root", MXC_CCM_CCGR1, 18);
+ clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "per_root", MXC_CCM_CCGR1, 20);
+ clk[gpt_ipg_gate] = imx_clk_gate2("gpt_ipg_gate", "ipg", MXC_CCM_CCGR2, 20);
+ clk[pwm1_ipg_gate] = imx_clk_gate2("pwm1_ipg_gate", "ipg", MXC_CCM_CCGR2, 10);
+ clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "ipg", MXC_CCM_CCGR2, 12);
+ clk[pwm2_ipg_gate] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14);
+ clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "ipg", MXC_CCM_CCGR2, 16);
+ clk[gpt_gate] = imx_clk_gate2("gpt_gate", "ipg", MXC_CCM_CCGR2, 18);
+ clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24);
+ clk[usboh3_gate] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26);
+ clk[usboh3_per_gate] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28);
+ clk[esdhc1_ipg_gate] = imx_clk_gate2("esdhc1_ipg_gate", "ipg", MXC_CCM_CCGR3, 0);
+ clk[esdhc2_ipg_gate] = imx_clk_gate2("esdhc2_ipg_gate", "ipg", MXC_CCM_CCGR3, 4);
+ clk[esdhc3_ipg_gate] = imx_clk_gate2("esdhc3_ipg_gate", "ipg", MXC_CCM_CCGR3, 8);
+ clk[esdhc4_ipg_gate] = imx_clk_gate2("esdhc4_ipg_gate", "ipg", MXC_CCM_CCGR3, 12);
+ clk[ssi1_ipg_gate] = imx_clk_gate2("ssi1_ipg_gate", "ipg", MXC_CCM_CCGR3, 16);
+ clk[ssi2_ipg_gate] = imx_clk_gate2("ssi2_ipg_gate", "ipg", MXC_CCM_CCGR3, 20);
+ clk[ssi3_ipg_gate] = imx_clk_gate2("ssi3_ipg_gate", "ipg", MXC_CCM_CCGR3, 24);
+ clk[ecspi1_ipg_gate] = imx_clk_gate2("ecspi1_ipg_gate", "ipg", MXC_CCM_CCGR4, 18);
+ clk[ecspi1_per_gate] = imx_clk_gate2("ecspi1_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 20);
+ clk[ecspi2_ipg_gate] = imx_clk_gate2("ecspi2_ipg_gate", "ipg", MXC_CCM_CCGR4, 22);
+ clk[ecspi2_per_gate] = imx_clk_gate2("ecspi2_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 24);
+ clk[cspi_ipg_gate] = imx_clk_gate2("cspi_ipg_gate", "ipg", MXC_CCM_CCGR4, 26);
+ clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ipg", MXC_CCM_CCGR4, 30);
+ clk[emi_fast_gate] = imx_clk_gate2("emi_fast_gate", "dummy", MXC_CCM_CCGR5, 14);
+ clk[emi_slow_gate] = imx_clk_gate2("emi_slow_gate", "emi_slow_podf", MXC_CCM_CCGR5, 16);
+ clk[ipu_s] = imx_clk_mux("ipu_sel", MXC_CCM_CBCMR, 6, 2, ipu_sel, ARRAY_SIZE(ipu_sel));
+ clk[ipu_gate] = imx_clk_gate2("ipu_gate", "ipu_sel", MXC_CCM_CCGR5, 10);
+ clk[nfc_gate] = imx_clk_gate2("nfc_gate", "nfc_podf", MXC_CCM_CCGR5, 20);
+ clk[ipu_di0_gate] = imx_clk_gate2("ipu_di0_gate", "ipu_di0_sel", MXC_CCM_CCGR6, 10);
+ clk[ipu_di1_gate] = imx_clk_gate2("ipu_di1_gate", "ipu_di1_sel", MXC_CCM_CCGR6, 12);
+ clk[vpu_s] = imx_clk_mux("vpu_sel", MXC_CCM_CBCMR, 14, 2, vpu_sel, ARRAY_SIZE(vpu_sel));
+ clk[vpu_gate] = imx_clk_gate2("vpu_gate", "vpu_sel", MXC_CCM_CCGR5, 6);
+ clk[vpu_reference_gate] = imx_clk_gate2("vpu_reference_gate", "osc", MXC_CCM_CCGR5, 8);
+ clk[uart4_ipg_gate] = imx_clk_gate2("uart4_ipg_gate", "ipg", MXC_CCM_CCGR7, 8);
+ clk[uart4_per_gate] = imx_clk_gate2("uart4_per_gate", "uart_root", MXC_CCM_CCGR7, 10);
+ clk[uart5_ipg_gate] = imx_clk_gate2("uart5_ipg_gate", "ipg", MXC_CCM_CCGR7, 12);
+ clk[uart5_per_gate] = imx_clk_gate2("uart5_per_gate", "uart_root", MXC_CCM_CCGR7, 14);
+ clk[gpc_dvfs] = imx_clk_gate2("gpc_dvfs", "dummy", MXC_CCM_CCGR5, 24);
+
+ clk[ssi_apm] = imx_clk_mux("ssi_apm", MXC_CCM_CSCMR1, 8, 2, ssi_apm_sels, ARRAY_SIZE(ssi_apm_sels));
+ clk[ssi1_root_sel] = imx_clk_mux("ssi1_root_sel", MXC_CCM_CSCMR1, 14, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
+ clk[ssi2_root_sel] = imx_clk_mux("ssi2_root_sel", MXC_CCM_CSCMR1, 12, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
+ clk[ssi3_root_sel] = imx_clk_mux("ssi3_root_sel", MXC_CCM_CSCMR1, 11, 1, ssi3_clk_sels, ARRAY_SIZE(ssi3_clk_sels));
+ clk[ssi_ext1_sel] = imx_clk_mux("ssi_ext1_sel", MXC_CCM_CSCMR1, 28, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
+ clk[ssi_ext2_sel] = imx_clk_mux("ssi_ext2_sel", MXC_CCM_CSCMR1, 30, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
+ clk[ssi_ext1_com_sel] = imx_clk_mux("ssi_ext1_com_sel", MXC_CCM_CSCMR1, 0, 1, ssi_ext1_com_sels, ARRAY_SIZE(ssi_ext1_com_sels));
+ clk[ssi_ext2_com_sel] = imx_clk_mux("ssi_ext2_com_sel", MXC_CCM_CSCMR1, 1, 1, ssi_ext2_com_sels, ARRAY_SIZE(ssi_ext2_com_sels));
+ clk[ssi1_root_pred] = imx_clk_divider("ssi1_root_pred", "ssi1_root_sel", MXC_CCM_CS1CDR, 6, 3);
+ clk[ssi1_root_podf] = imx_clk_divider("ssi1_root_podf", "ssi1_root_pred", MXC_CCM_CS1CDR, 0, 6);
+ clk[ssi2_root_pred] = imx_clk_divider("ssi2_root_pred", "ssi2_root_sel", MXC_CCM_CS2CDR, 6, 3);
+ clk[ssi2_root_podf] = imx_clk_divider("ssi2_root_podf", "ssi2_root_pred", MXC_CCM_CS2CDR, 0, 6);
+ clk[ssi_ext1_pred] = imx_clk_divider("ssi_ext1_pred", "ssi_ext1_sel", MXC_CCM_CS1CDR, 22, 3);
+ clk[ssi_ext1_podf] = imx_clk_divider("ssi_ext1_podf", "ssi_ext1_pred", MXC_CCM_CS1CDR, 16, 6);
+ clk[ssi_ext2_pred] = imx_clk_divider("ssi_ext2_pred", "ssi_ext2_sel", MXC_CCM_CS2CDR, 22, 3);
+ clk[ssi_ext2_podf] = imx_clk_divider("ssi_ext2_podf", "ssi_ext2_pred", MXC_CCM_CS2CDR, 16, 6);
+ clk[ssi1_root_gate] = imx_clk_gate2("ssi1_root_gate", "ssi1_root_podf", MXC_CCM_CCGR3, 18);
+ clk[ssi2_root_gate] = imx_clk_gate2("ssi2_root_gate", "ssi2_root_podf", MXC_CCM_CCGR3, 22);
+ clk[ssi3_root_gate] = imx_clk_gate2("ssi3_root_gate", "ssi3_root_sel", MXC_CCM_CCGR3, 26);
+ clk[ssi_ext1_gate] = imx_clk_gate2("ssi_ext1_gate", "ssi_ext1_com_sel", MXC_CCM_CCGR3, 28);
+ clk[ssi_ext2_gate] = imx_clk_gate2("ssi_ext2_gate", "ssi_ext2_com_sel", MXC_CCM_CCGR3, 30);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX5 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[gpt_ipg_gate], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[uart1_per_gate], "per", "imx21-uart.0");
+ clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
+ clk_register_clkdev(clk[uart2_per_gate], "per", "imx21-uart.1");
+ clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
+ clk_register_clkdev(clk[uart3_per_gate], "per", "imx21-uart.2");
+ clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
+ clk_register_clkdev(clk[uart4_per_gate], "per", "imx21-uart.3");
+ clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
+ clk_register_clkdev(clk[uart5_per_gate], "per", "imx21-uart.4");
+ clk_register_clkdev(clk[uart5_ipg_gate], "ipg", "imx21-uart.4");
+ clk_register_clkdev(clk[ecspi1_per_gate], "per", "imx51-ecspi.0");
+ clk_register_clkdev(clk[ecspi1_ipg_gate], "ipg", "imx51-ecspi.0");
+ clk_register_clkdev(clk[ecspi2_per_gate], "per", "imx51-ecspi.1");
+ clk_register_clkdev(clk[ecspi2_ipg_gate], "ipg", "imx51-ecspi.1");
+ clk_register_clkdev(clk[cspi_ipg_gate], NULL, "imx51-cspi.0");
+ clk_register_clkdev(clk[pwm1_ipg_gate], "pwm", "mxc_pwm.0");
+ clk_register_clkdev(clk[pwm2_ipg_gate], "pwm", "mxc_pwm.1");
+ clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0");
+ clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1");
+ clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.0");
+ clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.0");
+ clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.0");
+ clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.1");
+ clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.1");
+ clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.1");
+ clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.2");
+ clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.2");
+ clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.2");
+ clk_register_clkdev(clk[usboh3_per_gate], "per", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usboh3_gate], "ipg", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usboh3_gate], "ahb", "fsl-usb2-udc");
+ clk_register_clkdev(clk[nfc_gate], NULL, "mxc_nand");
+ clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0");
+ clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
+ clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "imx-ssi.2");
+ clk_register_clkdev(clk[ssi_ext1_gate], "ssi_ext1", NULL);
+ clk_register_clkdev(clk[ssi_ext2_gate], "ssi_ext2", NULL);
+ clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
+ clk_register_clkdev(clk[cpu_podf], "cpu", NULL);
+ clk_register_clkdev(clk[iim_gate], "iim", NULL);
+ clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.0");
+ clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.1");
+ clk_register_clkdev(clk[dummy], NULL, "imx-keypad");
+ clk_register_clkdev(clk[tve_gate], NULL, "imx-tve.0");
+ clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx-tve.0");
+
+ /* Set SDHC parents to be PLL2 */
+ clk_set_parent(clk[esdhc_a_sel], clk[pll2_sw]);
+ clk_set_parent(clk[esdhc_b_sel], clk[pll2_sw]);
+
+ /* move usb phy clk to 24MHz */
+ clk_set_parent(clk[usb_phy_sel], clk[osc]);
+
+ clk_prepare_enable(clk[gpc_dvfs]);
+ clk_prepare_enable(clk[ahb_max]); /* esdhc3 */
+ clk_prepare_enable(clk[aips_tz1]);
+ clk_prepare_enable(clk[aips_tz2]); /* fec */
+ clk_prepare_enable(clk[spba]);
+ clk_prepare_enable(clk[emi_fast_gate]); /* fec */
+ clk_prepare_enable(clk[tmax1]);
+ clk_prepare_enable(clk[tmax2]); /* esdhc2, fec */
+ clk_prepare_enable(clk[tmax3]); /* esdhc1, esdhc4 */
+}
+
+int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
+ unsigned long rate_ckih1, unsigned long rate_ckih2)
+{
+ int i;
+
+ clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX51_DPLL1_BASE);
+ clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX51_DPLL2_BASE);
+ clk[pll3_sw] = imx_clk_pllv2("pll3_sw", "osc", MX51_DPLL3_BASE);
+ clk[ipu_di0_sel] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
+ mx51_ipu_di0_sel, ARRAY_SIZE(mx51_ipu_di0_sel));
+ clk[ipu_di1_sel] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
+ mx51_ipu_di1_sel, ARRAY_SIZE(mx51_ipu_di1_sel));
+ clk[tve_ext_sel] = imx_clk_mux("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
+ mx51_tve_ext_sel, ARRAY_SIZE(mx51_tve_ext_sel));
+ clk[tve_gate] = imx_clk_gate2("tve_gate", "tve_sel", MXC_CCM_CCGR2, 30);
+ clk[tve_pred] = imx_clk_divider("tve_pred", "pll3_sw", MXC_CCM_CDCDR, 28, 3);
+ clk[esdhc1_per_gate] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
+ clk[esdhc2_per_gate] = imx_clk_gate2("esdhc2_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 6);
+ clk[esdhc3_per_gate] = imx_clk_gate2("esdhc3_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 10);
+ clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
+ clk[usb_phy_gate] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0);
+ clk[hsi2c_gate] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22);
+ clk[mipi_hsc1_gate] = imx_clk_gate2("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6);
+ clk[mipi_hsc2_gate] = imx_clk_gate2("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8);
+ clk[mipi_esc_gate] = imx_clk_gate2("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10);
+ clk[mipi_hsp_gate] = imx_clk_gate2("mipi_hsp_gate", "ipg", MXC_CCM_CCGR4, 12);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX51 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ mx5_clocks_common_init(rate_ckil, rate_osc, rate_ckih1, rate_ckih2);
+
+ clk_register_clkdev(clk[hsi2c_gate], NULL, "imx-i2c.2");
+ clk_register_clkdev(clk[mx51_mipi], "mipi_hsp", NULL);
+ clk_register_clkdev(clk[vpu_gate], NULL, "imx51-vpu.0");
+ clk_register_clkdev(clk[fec_gate], NULL, "imx27-fec.0");
+ clk_register_clkdev(clk[gpc_dvfs], "gpc_dvfs", NULL);
+ clk_register_clkdev(clk[ipu_gate], "bus", "imx51-ipu");
+ clk_register_clkdev(clk[ipu_di0_gate], "di0", "imx51-ipu");
+ clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx51-ipu");
+ clk_register_clkdev(clk[ipu_gate], "hsp", "imx51-ipu");
+ clk_register_clkdev(clk[usb_phy_gate], "phy", "mxc-ehci.0");
+ clk_register_clkdev(clk[esdhc1_ipg_gate], "ipg", "sdhci-esdhc-imx51.0");
+ clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.0");
+ clk_register_clkdev(clk[esdhc1_per_gate], "per", "sdhci-esdhc-imx51.0");
+ clk_register_clkdev(clk[esdhc2_ipg_gate], "ipg", "sdhci-esdhc-imx51.1");
+ clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.1");
+ clk_register_clkdev(clk[esdhc2_per_gate], "per", "sdhci-esdhc-imx51.1");
+ clk_register_clkdev(clk[esdhc3_ipg_gate], "ipg", "sdhci-esdhc-imx51.2");
+ clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.2");
+ clk_register_clkdev(clk[esdhc3_per_gate], "per", "sdhci-esdhc-imx51.2");
+ clk_register_clkdev(clk[esdhc4_ipg_gate], "ipg", "sdhci-esdhc-imx51.3");
+ clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.3");
+ clk_register_clkdev(clk[esdhc4_per_gate], "per", "sdhci-esdhc-imx51.3");
+ clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "83fcc000.ssi");
+ clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "70014000.ssi");
+ clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "83fe8000.ssi");
+
+ /* set the usboh3 parent to pll2_sw */
+ clk_set_parent(clk[usboh3_sel], clk[pll2_sw]);
+
+ /* set SDHC root clock to 166.25MHZ*/
+ clk_set_rate(clk[esdhc_a_podf], 166250000);
+ clk_set_rate(clk[esdhc_b_podf], 166250000);
+
+ /* System timer */
+ mxc_timer_init(NULL, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
+ MX51_INT_GPT);
+
+ clk_prepare_enable(clk[iim_gate]);
+ imx_print_silicon_rev("i.MX51", mx51_revision());
+ clk_disable_unprepare(clk[iim_gate]);
+
+ return 0;
+}
+
+int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
+ unsigned long rate_ckih1, unsigned long rate_ckih2)
+{
+ int i;
+ unsigned long r;
+
+ clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX53_DPLL1_BASE);
+ clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX53_DPLL2_BASE);
+ clk[pll3_sw] = imx_clk_pllv2("pll3_sw", "osc", MX53_DPLL3_BASE);
+ clk[pll4_sw] = imx_clk_pllv2("pll4_sw", "osc", MX53_DPLL4_BASE);
+
+ clk[ldb_di1_sel] = imx_clk_mux("ldb_di1_sel", MXC_CCM_CSCMR2, 9, 1,
+ mx53_ldb_di1_sel, ARRAY_SIZE(mx53_ldb_di1_sel));
+ clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
+ clk[ldb_di1_div] = imx_clk_divider("ldb_di1_div", "ldb_di1_div_3_5", MXC_CCM_CSCMR2, 11, 1);
+ clk[di_pll4_podf] = imx_clk_divider("di_pll4_podf", "pll4_sw", MXC_CCM_CDCDR, 16, 3);
+ clk[ldb_di0_sel] = imx_clk_mux("ldb_di0_sel", MXC_CCM_CSCMR2, 8, 1,
+ mx53_ldb_di0_sel, ARRAY_SIZE(mx53_ldb_di0_sel));
+ clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+ clk[ldb_di0_div] = imx_clk_divider("ldb_di0_div", "ldb_di0_div_3_5", MXC_CCM_CSCMR2, 10, 1);
+ clk[ldb_di0_gate] = imx_clk_gate2("ldb_di0_gate", "ldb_di0_div", MXC_CCM_CCGR6, 28);
+ clk[ldb_di1_gate] = imx_clk_gate2("ldb_di1_gate", "ldb_di1_div", MXC_CCM_CCGR6, 30);
+ clk[ipu_di0_sel] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
+ mx53_ipu_di0_sel, ARRAY_SIZE(mx53_ipu_di0_sel));
+ clk[ipu_di1_sel] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
+ mx53_ipu_di1_sel, ARRAY_SIZE(mx53_ipu_di1_sel));
+ clk[tve_ext_sel] = imx_clk_mux("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
+ mx53_tve_ext_sel, ARRAY_SIZE(mx53_tve_ext_sel));
+ clk[tve_gate] = imx_clk_gate2("tve_gate", "tve_pred", MXC_CCM_CCGR2, 30);
+ clk[tve_pred] = imx_clk_divider("tve_pred", "tve_ext_sel", MXC_CCM_CDCDR, 28, 3);
+ clk[esdhc1_per_gate] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
+ clk[esdhc2_per_gate] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6);
+ clk[esdhc3_per_gate] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10);
+ clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
+ clk[usb_phy1_gate] = imx_clk_gate2("usb_phy1_gate", "usb_phy_sel", MXC_CCM_CCGR4, 10);
+ clk[usb_phy2_gate] = imx_clk_gate2("usb_phy2_gate", "usb_phy_sel", MXC_CCM_CCGR4, 12);
+ clk[can2_serial_gate] = imx_clk_gate2("can2_serial_gate", "ipg", MXC_CCM_CCGR4, 6);
+ clk[can2_ipg_gate] = imx_clk_gate2("can2_ipg_gate", "ipg", MXC_CCM_CCGR4, 8);
+ clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "per_root", MXC_CCM_CCGR1, 22);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX53 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ mx5_clocks_common_init(rate_ckil, rate_osc, rate_ckih1, rate_ckih2);
+
+ clk_register_clkdev(clk[vpu_gate], NULL, "imx53-vpu.0");
+ clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2");
+ clk_register_clkdev(clk[fec_gate], NULL, "imx25-fec.0");
+ clk_register_clkdev(clk[ipu_gate], "bus", "imx53-ipu");
+ clk_register_clkdev(clk[ipu_di0_gate], "di0", "imx53-ipu");
+ clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx53-ipu");
+ clk_register_clkdev(clk[ipu_gate], "hsp", "imx53-ipu");
+ clk_register_clkdev(clk[usb_phy1_gate], "usb_phy1", "mxc-ehci.0");
+ clk_register_clkdev(clk[esdhc1_ipg_gate], "ipg", "sdhci-esdhc-imx53.0");
+ clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.0");
+ clk_register_clkdev(clk[esdhc1_per_gate], "per", "sdhci-esdhc-imx53.0");
+ clk_register_clkdev(clk[esdhc2_ipg_gate], "ipg", "sdhci-esdhc-imx53.1");
+ clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.1");
+ clk_register_clkdev(clk[esdhc2_per_gate], "per", "sdhci-esdhc-imx53.1");
+ clk_register_clkdev(clk[esdhc3_ipg_gate], "ipg", "sdhci-esdhc-imx53.2");
+ clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.2");
+ clk_register_clkdev(clk[esdhc3_per_gate], "per", "sdhci-esdhc-imx53.2");
+ clk_register_clkdev(clk[esdhc4_ipg_gate], "ipg", "sdhci-esdhc-imx53.3");
+ clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.3");
+ clk_register_clkdev(clk[esdhc4_per_gate], "per", "sdhci-esdhc-imx53.3");
+ clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "63fcc000.ssi");
+ clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "50014000.ssi");
+ clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "63fd0000.ssi");
+
+ /* set SDHC root clock to 200MHZ*/
+ clk_set_rate(clk[esdhc_a_podf], 200000000);
+ clk_set_rate(clk[esdhc_b_podf], 200000000);
+
+ /* System timer */
+ mxc_timer_init(NULL, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR),
+ MX53_INT_GPT);
+
+ clk_prepare_enable(clk[iim_gate]);
+ imx_print_silicon_rev("i.MX53", mx53_revision());
+ clk_disable_unprepare(clk[iim_gate]);
+
+ r = clk_round_rate(clk[usboh3_per_gate], 54000000);
+ clk_set_rate(clk[usboh3_per_gate], r);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc,
+ unsigned long *ckih1, unsigned long *ckih2)
+{
+ struct device_node *np;
+
+ /* retrieve the freqency of fixed clocks from device tree */
+ for_each_compatible_node(np, NULL, "fixed-clock") {
+ u32 rate;
+ if (of_property_read_u32(np, "clock-frequency", &rate))
+ continue;
+
+ if (of_device_is_compatible(np, "fsl,imx-ckil"))
+ *ckil = rate;
+ else if (of_device_is_compatible(np, "fsl,imx-osc"))
+ *osc = rate;
+ else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
+ *ckih1 = rate;
+ else if (of_device_is_compatible(np, "fsl,imx-ckih2"))
+ *ckih2 = rate;
+ }
+}
+
+int __init mx51_clocks_init_dt(void)
+{
+ unsigned long ckil, osc, ckih1, ckih2;
+
+ clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
+ return mx51_clocks_init(ckil, osc, ckih1, ckih2);
+}
+
+int __init mx53_clocks_init_dt(void)
+{
+ unsigned long ckil, osc, ckih1, ckih2;
+
+ clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
+ return mx53_clocks_init(ckil, osc, ckih1, ckih2);
+}
+#endif
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
new file mode 100644
index 000000000000..cab02d0a15d6
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <mach/common.h>
+#include "clk.h"
+
+#define CCGR0 0x68
+#define CCGR1 0x6c
+#define CCGR2 0x70
+#define CCGR3 0x74
+#define CCGR4 0x78
+#define CCGR5 0x7c
+#define CCGR6 0x80
+#define CCGR7 0x84
+
+#define CLPCR 0x54
+#define BP_CLPCR_LPM 0
+#define BM_CLPCR_LPM (0x3 << 0)
+#define BM_CLPCR_BYPASS_PMIC_READY (0x1 << 2)
+#define BM_CLPCR_ARM_CLK_DIS_ON_LPM (0x1 << 5)
+#define BM_CLPCR_SBYOS (0x1 << 6)
+#define BM_CLPCR_DIS_REF_OSC (0x1 << 7)
+#define BM_CLPCR_VSTBY (0x1 << 8)
+#define BP_CLPCR_STBY_COUNT 9
+#define BM_CLPCR_STBY_COUNT (0x3 << 9)
+#define BM_CLPCR_COSC_PWRDOWN (0x1 << 11)
+#define BM_CLPCR_WB_PER_AT_LPM (0x1 << 16)
+#define BM_CLPCR_WB_CORE_AT_LPM (0x1 << 17)
+#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS (0x1 << 19)
+#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS (0x1 << 21)
+#define BM_CLPCR_MASK_CORE0_WFI (0x1 << 22)
+#define BM_CLPCR_MASK_CORE1_WFI (0x1 << 23)
+#define BM_CLPCR_MASK_CORE2_WFI (0x1 << 24)
+#define BM_CLPCR_MASK_CORE3_WFI (0x1 << 25)
+#define BM_CLPCR_MASK_SCU_IDLE (0x1 << 26)
+#define BM_CLPCR_MASK_L2CC_IDLE (0x1 << 27)
+
+static void __iomem *ccm_base;
+
+void __init imx6q_clock_map_io(void) { }
+
+int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
+{
+ u32 val = readl_relaxed(ccm_base + CLPCR);
+
+ val &= ~BM_CLPCR_LPM;
+ switch (mode) {
+ case WAIT_CLOCKED:
+ break;
+ case WAIT_UNCLOCKED:
+ val |= 0x1 << BP_CLPCR_LPM;
+ break;
+ case STOP_POWER_ON:
+ val |= 0x2 << BP_CLPCR_LPM;
+ break;
+ case WAIT_UNCLOCKED_POWER_OFF:
+ val |= 0x1 << BP_CLPCR_LPM;
+ val &= ~BM_CLPCR_VSTBY;
+ val &= ~BM_CLPCR_SBYOS;
+ break;
+ case STOP_POWER_OFF:
+ val |= 0x2 << BP_CLPCR_LPM;
+ val |= 0x3 << BP_CLPCR_STBY_COUNT;
+ val |= BM_CLPCR_VSTBY;
+ val |= BM_CLPCR_SBYOS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel_relaxed(val, ccm_base + CLPCR);
+
+ return 0;
+}
+
+static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
+static const char *pll1_sw_sels[] = { "pll1_sys", "step", };
+static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
+static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", };
+static const char *periph_sels[] = { "periph_pre", "periph_clk2", };
+static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
+static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *audio_sels[] = { "pll4_audio", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", };
+static const char *gpu_axi_sels[] = { "axi", "ahb", };
+static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
+static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
+static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
+static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
+static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *ipu2_di0_sels[] = { "ipu2_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *ipu2_di1_sels[] = { "ipu2_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *hsi_tx_sels[] = { "pll3_120m", "pll2_pfd2_396m", };
+static const char *pcie_axi_sels[] = { "axi", "ahb", };
+static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_audio", };
+static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", };
+static const char *emi_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *vdo_axi_sels[] = { "axi", "ahb", };
+static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video",
+ "dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
+ "ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio", };
+
+static const char * const clks_init_on[] __initconst = {
+ "mmdc_ch0_axi", "mmdc_ch1_axi", "usboh3",
+};
+
+enum mx6q_clks {
+ dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m,
+ pll3_pfd0_720m, pll3_pfd1_540m, pll3_pfd2_508m, pll3_pfd3_454m,
+ pll2_198m, pll3_120m, pll3_80m, pll3_60m, twd, step, pll1_sw,
+ periph_pre, periph2_pre, periph_clk2_sel, periph2_clk2_sel, axi_sel,
+ esai_sel, asrc_sel, spdif_sel, gpu2d_axi, gpu3d_axi, gpu2d_core_sel,
+ gpu3d_core_sel, gpu3d_shader_sel, ipu1_sel, ipu2_sel, ldb_di0_sel,
+ ldb_di1_sel, ipu1_di0_pre_sel, ipu1_di1_pre_sel, ipu2_di0_pre_sel,
+ ipu2_di1_pre_sel, ipu1_di0_sel, ipu1_di1_sel, ipu2_di0_sel,
+ ipu2_di1_sel, hsi_tx_sel, pcie_axi_sel, ssi1_sel, ssi2_sel, ssi3_sel,
+ usdhc1_sel, usdhc2_sel, usdhc3_sel, usdhc4_sel, enfc_sel, emi_sel,
+ emi_slow_sel, vdo_axi_sel, vpu_axi_sel, cko1_sel, periph, periph2,
+ periph_clk2, periph2_clk2, ipg, ipg_per, esai_pred, esai_podf,
+ asrc_pred, asrc_podf, spdif_pred, spdif_podf, can_root, ecspi_root,
+ gpu2d_core_podf, gpu3d_core_podf, gpu3d_shader, ipu1_podf, ipu2_podf,
+ ldb_di0_podf, ldb_di1_podf, ipu1_di0_pre, ipu1_di1_pre, ipu2_di0_pre,
+ ipu2_di1_pre, hsi_tx_podf, ssi1_pred, ssi1_podf, ssi2_pred, ssi2_podf,
+ ssi3_pred, ssi3_podf, uart_serial_podf, usdhc1_podf, usdhc2_podf,
+ usdhc3_podf, usdhc4_podf, enfc_pred, enfc_podf, emi_podf,
+ emi_slow_podf, vpu_axi_podf, cko1_podf, axi, mmdc_ch0_axi_podf,
+ mmdc_ch1_axi_podf, arm, ahb, apbh_dma, asrc, can1_ipg, can1_serial,
+ can2_ipg, can2_serial, ecspi1, ecspi2, ecspi3, ecspi4, ecspi5, enet,
+ esai, gpt_ipg, gpt_ipg_per, gpu2d_core, gpu3d_core, hdmi_iahb,
+ hdmi_isfr, i2c1, i2c2, i2c3, iim, enfc, ipu1, ipu1_di0, ipu1_di1, ipu2,
+ ipu2_di0, ldb_di0, ldb_di1, ipu2_di1, hsi_tx, mlb, mmdc_ch0_axi,
+ mmdc_ch1_axi, ocram, openvg_axi, pcie_axi, pwm1, pwm2, pwm3, pwm4,
+ gpmi_bch_apb, gpmi_bch, gpmi_io, gpmi_apb, sata, sdma, spba, ssi1,
+ ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
+ usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
+ pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
+ ssi2_ipg, ssi3_ipg, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx6q_clocks_init(void)
+{
+ struct device_node *np;
+ void __iomem *base;
+ struct clk *c;
+ int i, irq;
+
+ clk[dummy] = imx_clk_fixed("dummy", 0);
+
+ /* retrieve the freqency of fixed clocks from device tree */
+ for_each_compatible_node(np, NULL, "fixed-clock") {
+ u32 rate;
+ if (of_property_read_u32(np, "clock-frequency", &rate))
+ continue;
+
+ if (of_device_is_compatible(np, "fsl,imx-ckil"))
+ clk[ckil] = imx_clk_fixed("ckil", rate);
+ else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
+ clk[ckih] = imx_clk_fixed("ckih", rate);
+ else if (of_device_is_compatible(np, "fsl,imx-osc"))
+ clk[osc] = imx_clk_fixed("osc", rate);
+ }
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ /* type name parent_name base gate_mask div_mask */
+ clk[pll1_sys] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x2000, 0x7f);
+ clk[pll2_bus] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2_bus", "osc", base + 0x30, 0x2000, 0x1);
+ clk[pll3_usb_otg] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3_usb_otg", "osc", base + 0x10, 0x2000, 0x3);
+ clk[pll4_audio] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4_audio", "osc", base + 0x70, 0x2000, 0x7f);
+ clk[pll5_video] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5_video", "osc", base + 0xa0, 0x2000, 0x7f);
+ clk[pll6_mlb] = imx_clk_pllv3(IMX_PLLV3_MLB, "pll6_mlb", "osc", base + 0xd0, 0x2000, 0x0);
+ clk[pll7_usb_host] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host","osc", base + 0x20, 0x2000, 0x3);
+ clk[pll8_enet] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll8_enet", "osc", base + 0xe0, 0x182000, 0x3);
+
+ /* name parent_name reg idx */
+ clk[pll2_pfd0_352m] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0);
+ clk[pll2_pfd1_594m] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1);
+ clk[pll2_pfd2_396m] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2);
+ clk[pll3_pfd0_720m] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0);
+ clk[pll3_pfd1_540m] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1);
+ clk[pll3_pfd2_508m] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2);
+ clk[pll3_pfd3_454m] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3);
+
+ /* name parent_name mult div */
+ clk[pll2_198m] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2);
+ clk[pll3_120m] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4);
+ clk[pll3_80m] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
+ clk[pll3_60m] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
+ clk[twd] = imx_clk_fixed_factor("twd", "arm", 1, 2);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ccm");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+ ccm_base = base;
+
+ /* name reg shift width parent_names num_parents */
+ clk[step] = imx_clk_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels));
+ clk[pll1_sw] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels));
+ clk[periph_pre] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
+ clk[periph2_pre] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
+ clk[periph_clk2_sel] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 1, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
+ clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
+ clk[axi_sel] = imx_clk_mux("axi_sel", base + 0x14, 6, 2, axi_sels, ARRAY_SIZE(axi_sels));
+ clk[esai_sel] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels));
+ clk[asrc_sel] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels));
+ clk[spdif_sel] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels));
+ clk[gpu2d_axi] = imx_clk_mux("gpu2d_axi", base + 0x18, 0, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels));
+ clk[gpu3d_axi] = imx_clk_mux("gpu3d_axi", base + 0x18, 1, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels));
+ clk[gpu2d_core_sel] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels));
+ clk[gpu3d_core_sel] = imx_clk_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels));
+ clk[gpu3d_shader_sel] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
+ clk[ipu1_sel] = imx_clk_mux("ipu1_sel", base + 0x3c, 9, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
+ clk[ipu2_sel] = imx_clk_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
+ clk[ldb_di0_sel] = imx_clk_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels));
+ clk[ldb_di1_sel] = imx_clk_mux("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels));
+ clk[ipu1_di0_pre_sel] = imx_clk_mux("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
+ clk[ipu1_di1_pre_sel] = imx_clk_mux("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
+ clk[ipu2_di0_pre_sel] = imx_clk_mux("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
+ clk[ipu2_di1_pre_sel] = imx_clk_mux("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
+ clk[ipu1_di0_sel] = imx_clk_mux("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels));
+ clk[ipu1_di1_sel] = imx_clk_mux("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels));
+ clk[ipu2_di0_sel] = imx_clk_mux("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels));
+ clk[ipu2_di1_sel] = imx_clk_mux("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels));
+ clk[hsi_tx_sel] = imx_clk_mux("hsi_tx_sel", base + 0x30, 28, 1, hsi_tx_sels, ARRAY_SIZE(hsi_tx_sels));
+ clk[pcie_axi_sel] = imx_clk_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels));
+ clk[ssi1_sel] = imx_clk_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clk[ssi2_sel] = imx_clk_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clk[ssi3_sel] = imx_clk_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clk[usdhc1_sel] = imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clk[usdhc2_sel] = imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clk[usdhc3_sel] = imx_clk_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clk[usdhc4_sel] = imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clk[enfc_sel] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels));
+ clk[emi_sel] = imx_clk_mux("emi_sel", base + 0x1c, 27, 2, emi_sels, ARRAY_SIZE(emi_sels));
+ clk[emi_slow_sel] = imx_clk_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_sels, ARRAY_SIZE(emi_sels));
+ clk[vdo_axi_sel] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels));
+ clk[vpu_axi_sel] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels));
+ clk[cko1_sel] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels));
+
+ /* name reg shift width busy: reg, shift parent_names num_parents */
+ clk[periph] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels));
+ clk[periph2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels));
+
+ /* name parent_name reg shift width */
+ clk[periph_clk2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
+ clk[periph2_clk2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
+ clk[ipg] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2);
+ clk[ipg_per] = imx_clk_divider("ipg_per", "ipg", base + 0x1c, 0, 6);
+ clk[esai_pred] = imx_clk_divider("esai_pred", "esai_sel", base + 0x28, 9, 3);
+ clk[esai_podf] = imx_clk_divider("esai_podf", "esai_pred", base + 0x28, 25, 3);
+ clk[asrc_pred] = imx_clk_divider("asrc_pred", "asrc_sel", base + 0x30, 12, 3);
+ clk[asrc_podf] = imx_clk_divider("asrc_podf", "asrc_pred", base + 0x30, 9, 3);
+ clk[spdif_pred] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3);
+ clk[spdif_podf] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3);
+ clk[can_root] = imx_clk_divider("can_root", "pll3_usb_otg", base + 0x20, 2, 6);
+ clk[ecspi_root] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
+ clk[gpu2d_core_podf] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3);
+ clk[gpu3d_core_podf] = imx_clk_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3);
+ clk[gpu3d_shader] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3);
+ clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
+ clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
+ clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_sel", base + 0x20, 10, 1);
+ clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_sel", base + 0x20, 11, 1);
+ clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3);
+ clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3);
+ clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3);
+ clk[ipu2_di1_pre] = imx_clk_divider("ipu2_di1_pre", "ipu2_di1_pre_sel", base + 0x38, 12, 3);
+ clk[hsi_tx_podf] = imx_clk_divider("hsi_tx_podf", "hsi_tx_sel", base + 0x30, 29, 3);
+ clk[ssi1_pred] = imx_clk_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3);
+ clk[ssi1_podf] = imx_clk_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6);
+ clk[ssi2_pred] = imx_clk_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3);
+ clk[ssi2_podf] = imx_clk_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6);
+ clk[ssi3_pred] = imx_clk_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3);
+ clk[ssi3_podf] = imx_clk_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6);
+ clk[uart_serial_podf] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6);
+ clk[usdhc1_podf] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3);
+ clk[usdhc2_podf] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3);
+ clk[usdhc3_podf] = imx_clk_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3);
+ clk[usdhc4_podf] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3);
+ clk[enfc_pred] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3);
+ clk[enfc_podf] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6);
+ clk[emi_podf] = imx_clk_divider("emi_podf", "emi_sel", base + 0x1c, 20, 3);
+ clk[emi_slow_podf] = imx_clk_divider("emi_slow_podf", "emi_slow_sel", base + 0x1c, 23, 3);
+ clk[vpu_axi_podf] = imx_clk_divider("vpu_axi_podf", "vpu_axi_sel", base + 0x24, 25, 3);
+ clk[cko1_podf] = imx_clk_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3);
+
+ /* name parent_name reg shift width busy: reg, shift */
+ clk[axi] = imx_clk_busy_divider("axi", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0);
+ clk[mmdc_ch0_axi_podf] = imx_clk_busy_divider("mmdc_ch0_axi_podf", "periph", base + 0x14, 19, 3, base + 0x48, 4);
+ clk[mmdc_ch1_axi_podf] = imx_clk_busy_divider("mmdc_ch1_axi_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2);
+ clk[arm] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16);
+ clk[ahb] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
+
+ /* name parent_name reg shift */
+ clk[apbh_dma] = imx_clk_gate2("apbh_dma", "ahb", base + 0x68, 4);
+ clk[asrc] = imx_clk_gate2("asrc", "asrc_podf", base + 0x68, 6);
+ clk[can1_ipg] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14);
+ clk[can1_serial] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16);
+ clk[can2_ipg] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18);
+ clk[can2_serial] = imx_clk_gate2("can2_serial", "can_root", base + 0x68, 20);
+ clk[ecspi1] = imx_clk_gate2("ecspi1", "ecspi_root", base + 0x6c, 0);
+ clk[ecspi2] = imx_clk_gate2("ecspi2", "ecspi_root", base + 0x6c, 2);
+ clk[ecspi3] = imx_clk_gate2("ecspi3", "ecspi_root", base + 0x6c, 4);
+ clk[ecspi4] = imx_clk_gate2("ecspi4", "ecspi_root", base + 0x6c, 6);
+ clk[ecspi5] = imx_clk_gate2("ecspi5", "ecspi_root", base + 0x6c, 8);
+ clk[enet] = imx_clk_gate2("enet", "ipg", base + 0x6c, 10);
+ clk[esai] = imx_clk_gate2("esai", "esai_podf", base + 0x6c, 16);
+ clk[gpt_ipg] = imx_clk_gate2("gpt_ipg", "ipg", base + 0x6c, 20);
+ clk[gpt_ipg_per] = imx_clk_gate2("gpt_ipg_per", "ipg_per", base + 0x6c, 22);
+ clk[gpu2d_core] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
+ clk[gpu3d_core] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26);
+ clk[hdmi_iahb] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0);
+ clk[hdmi_isfr] = imx_clk_gate2("hdmi_isfr", "pll3_pfd1_540m", base + 0x70, 4);
+ clk[i2c1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6);
+ clk[i2c2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8);
+ clk[i2c3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10);
+ clk[iim] = imx_clk_gate2("iim", "ipg", base + 0x70, 12);
+ clk[enfc] = imx_clk_gate2("enfc", "enfc_podf", base + 0x70, 14);
+ clk[ipu1] = imx_clk_gate2("ipu1", "ipu1_podf", base + 0x74, 0);
+ clk[ipu1_di0] = imx_clk_gate2("ipu1_di0", "ipu1_di0_sel", base + 0x74, 2);
+ clk[ipu1_di1] = imx_clk_gate2("ipu1_di1", "ipu1_di1_sel", base + 0x74, 4);
+ clk[ipu2] = imx_clk_gate2("ipu2", "ipu2_podf", base + 0x74, 6);
+ clk[ipu2_di0] = imx_clk_gate2("ipu2_di0", "ipu2_di0_sel", base + 0x74, 8);
+ clk[ldb_di0] = imx_clk_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12);
+ clk[ldb_di1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14);
+ clk[ipu2_di1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10);
+ clk[hsi_tx] = imx_clk_gate2("hsi_tx", "hsi_tx_podf", base + 0x74, 16);
+ clk[mlb] = imx_clk_gate2("mlb", "pll6_mlb", base + 0x74, 18);
+ clk[mmdc_ch0_axi] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20);
+ clk[mmdc_ch1_axi] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22);
+ clk[ocram] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28);
+ clk[openvg_axi] = imx_clk_gate2("openvg_axi", "axi", base + 0x74, 30);
+ clk[pcie_axi] = imx_clk_gate2("pcie_axi", "pcie_axi_sel", base + 0x78, 0);
+ clk[pwm1] = imx_clk_gate2("pwm1", "ipg_per", base + 0x78, 16);
+ clk[pwm2] = imx_clk_gate2("pwm2", "ipg_per", base + 0x78, 18);
+ clk[pwm3] = imx_clk_gate2("pwm3", "ipg_per", base + 0x78, 20);
+ clk[pwm4] = imx_clk_gate2("pwm4", "ipg_per", base + 0x78, 22);
+ clk[gpmi_bch_apb] = imx_clk_gate2("gpmi_bch_apb", "usdhc3", base + 0x78, 24);
+ clk[gpmi_bch] = imx_clk_gate2("gpmi_bch", "usdhc4", base + 0x78, 26);
+ clk[gpmi_io] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
+ clk[gpmi_apb] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
+ clk[sata] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4);
+ clk[sdma] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
+ clk[spba] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
+ clk[ssi1_ipg] = imx_clk_gate2("ssi1_ipg", "ipg", base + 0x7c, 18);
+ clk[ssi2_ipg] = imx_clk_gate2("ssi2_ipg", "ipg", base + 0x7c, 20);
+ clk[ssi3_ipg] = imx_clk_gate2("ssi3_ipg", "ipg", base + 0x7c, 22);
+ clk[uart_ipg] = imx_clk_gate2("uart_ipg", "ipg", base + 0x7c, 24);
+ clk[uart_serial] = imx_clk_gate2("uart_serial", "uart_serial_podf", base + 0x7c, 26);
+ clk[usboh3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0);
+ clk[usdhc1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2);
+ clk[usdhc2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4);
+ clk[usdhc3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
+ clk[usdhc4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8);
+ clk[vdo_axi] = imx_clk_gate2("vdo_axi", "vdo_axi_sel", base + 0x80, 12);
+ clk[vpu_axi] = imx_clk_gate2("vpu_axi", "vpu_axi_podf", base + 0x80, 14);
+ clk[cko1] = imx_clk_gate("cko1", "cko1_podf", base + 0x60, 7);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX6q clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ clk_register_clkdev(clk[mmdc_ch0_axi], NULL, "mmdc_ch0_axi");
+ clk_register_clkdev(clk[mmdc_ch1_axi], NULL, "mmdc_ch1_axi");
+ clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[twd], NULL, "smp_twd");
+ clk_register_clkdev(clk[usboh3], NULL, "usboh3");
+ clk_register_clkdev(clk[uart_serial], "per", "2020000.serial");
+ clk_register_clkdev(clk[uart_ipg], "ipg", "2020000.serial");
+ clk_register_clkdev(clk[uart_serial], "per", "21e8000.serial");
+ clk_register_clkdev(clk[uart_ipg], "ipg", "21e8000.serial");
+ clk_register_clkdev(clk[uart_serial], "per", "21ec000.serial");
+ clk_register_clkdev(clk[uart_ipg], "ipg", "21ec000.serial");
+ clk_register_clkdev(clk[uart_serial], "per", "21f0000.serial");
+ clk_register_clkdev(clk[uart_ipg], "ipg", "21f0000.serial");
+ clk_register_clkdev(clk[uart_serial], "per", "21f4000.serial");
+ clk_register_clkdev(clk[uart_ipg], "ipg", "21f4000.serial");
+ clk_register_clkdev(clk[enet], NULL, "2188000.ethernet");
+ clk_register_clkdev(clk[usdhc1], NULL, "2190000.usdhc");
+ clk_register_clkdev(clk[usdhc2], NULL, "2194000.usdhc");
+ clk_register_clkdev(clk[usdhc3], NULL, "2198000.usdhc");
+ clk_register_clkdev(clk[usdhc4], NULL, "219c000.usdhc");
+ clk_register_clkdev(clk[i2c1], NULL, "21a0000.i2c");
+ clk_register_clkdev(clk[i2c2], NULL, "21a4000.i2c");
+ clk_register_clkdev(clk[i2c3], NULL, "21a8000.i2c");
+ clk_register_clkdev(clk[ecspi1], NULL, "2008000.ecspi");
+ clk_register_clkdev(clk[ecspi2], NULL, "200c000.ecspi");
+ clk_register_clkdev(clk[ecspi3], NULL, "2010000.ecspi");
+ clk_register_clkdev(clk[ecspi4], NULL, "2014000.ecspi");
+ clk_register_clkdev(clk[ecspi5], NULL, "2018000.ecspi");
+ clk_register_clkdev(clk[sdma], NULL, "20ec000.sdma");
+ clk_register_clkdev(clk[dummy], NULL, "20bc000.wdog");
+ clk_register_clkdev(clk[dummy], NULL, "20c0000.wdog");
+ clk_register_clkdev(clk[ssi1_ipg], NULL, "2028000.ssi");
+ clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
+ clk_register_clkdev(clk[ahb], "ahb", NULL);
+ clk_register_clkdev(clk[cko1], "cko1", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) {
+ c = clk_get_sys(clks_init_on[i], NULL);
+ if (IS_ERR(c)) {
+ pr_err("%s: failed to get clk %s", __func__,
+ clks_init_on[i]);
+ return PTR_ERR(c);
+ }
+ clk_prepare_enable(c);
+ }
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+ irq = irq_of_parse_and_map(np, 0);
+ mxc_timer_init(NULL, base, irq);
+
+ return 0;
+}
diff --git a/arch/arm/mach-imx/clk-pfd.c b/arch/arm/mach-imx/clk-pfd.c
new file mode 100644
index 000000000000..e2ed4160f329
--- /dev/null
+++ b/arch/arm/mach-imx/clk-pfd.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include "clk.h"
+
+/**
+ * struct clk_pfd - IMX PFD clock
+ * @clk_hw: clock source
+ * @reg: PFD register address
+ * @idx: the index of PFD encoded in the register
+ *
+ * PFD clock found on i.MX6 series. Each register for PFD has 4 clk_pfd
+ * data encoded, and member idx is used to specify the one. And each
+ * register has SET, CLR and TOG registers at offset 0x4 0x8 and 0xc.
+ */
+struct clk_pfd {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 idx;
+};
+
+#define to_clk_pfd(_hw) container_of(_hw, struct clk_pfd, hw)
+
+#define SET 0x4
+#define CLR 0x8
+#define OTG 0xc
+
+static int clk_pfd_enable(struct clk_hw *hw)
+{
+ struct clk_pfd *pfd = to_clk_pfd(hw);
+
+ writel_relaxed(1 << ((pfd->idx + 1) * 8 - 1), pfd->reg + CLR);
+
+ return 0;
+}
+
+static void clk_pfd_disable(struct clk_hw *hw)
+{
+ struct clk_pfd *pfd = to_clk_pfd(hw);
+
+ writel_relaxed(1 << ((pfd->idx + 1) * 8 - 1), pfd->reg + SET);
+}
+
+static unsigned long clk_pfd_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pfd *pfd = to_clk_pfd(hw);
+ u64 tmp = parent_rate;
+ u8 frac = (readl_relaxed(pfd->reg) >> (pfd->idx * 8)) & 0x3f;
+
+ tmp *= 18;
+ do_div(tmp, frac);
+
+ return tmp;
+}
+
+static long clk_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u64 tmp = *prate;
+ u8 frac;
+
+ tmp = tmp * 18 + rate / 2;
+ do_div(tmp, rate);
+ frac = tmp;
+ if (frac < 12)
+ frac = 12;
+ else if (frac > 35)
+ frac = 35;
+ tmp = *prate;
+ tmp *= 18;
+ do_div(tmp, frac);
+
+ return tmp;
+}
+
+static int clk_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pfd *pfd = to_clk_pfd(hw);
+ u64 tmp = parent_rate;
+ u8 frac;
+
+ tmp = tmp * 18 + rate / 2;
+ do_div(tmp, rate);
+ frac = tmp;
+ if (frac < 12)
+ frac = 12;
+ else if (frac > 35)
+ frac = 35;
+
+ writel_relaxed(0x3f << (pfd->idx * 8), pfd->reg + CLR);
+ writel_relaxed(frac << (pfd->idx * 8), pfd->reg + SET);
+
+ return 0;
+}
+
+static const struct clk_ops clk_pfd_ops = {
+ .enable = clk_pfd_enable,
+ .disable = clk_pfd_disable,
+ .recalc_rate = clk_pfd_recalc_rate,
+ .round_rate = clk_pfd_round_rate,
+ .set_rate = clk_pfd_set_rate,
+};
+
+struct clk *imx_clk_pfd(const char *name, const char *parent_name,
+ void __iomem *reg, u8 idx)
+{
+ struct clk_pfd *pfd;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pfd = kzalloc(sizeof(*pfd), GFP_KERNEL);
+ if (!pfd)
+ return ERR_PTR(-ENOMEM);
+
+ pfd->reg = reg;
+ pfd->idx = idx;
+
+ init.name = name;
+ init.ops = &clk_pfd_ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pfd->hw.init = &init;
+
+ clk = clk_register(NULL, &pfd->hw);
+ if (IS_ERR(clk))
+ kfree(pfd);
+
+ return clk;
+}
diff --git a/arch/arm/mach-imx/clk-pllv1.c b/arch/arm/mach-imx/clk-pllv1.c
new file mode 100644
index 000000000000..2d856f9ccf59
--- /dev/null
+++ b/arch/arm/mach-imx/clk-pllv1.c
@@ -0,0 +1,66 @@
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <mach/common.h>
+#include <mach/hardware.h>
+#include <mach/clock.h>
+#include "clk.h"
+
+/**
+ * pll v1
+ *
+ * @clk_hw clock source
+ * @parent the parent clock name
+ * @base base address of pll registers
+ *
+ * PLL clock version 1, found on i.MX1/21/25/27/31/35
+ */
+struct clk_pllv1 {
+ struct clk_hw hw;
+ void __iomem *base;
+};
+
+#define to_clk_pllv1(clk) (container_of(clk, struct clk_pllv1, clk))
+
+static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pllv1 *pll = to_clk_pllv1(hw);
+
+ return mxc_decode_pll(readl(pll->base), parent_rate);
+}
+
+struct clk_ops clk_pllv1_ops = {
+ .recalc_rate = clk_pllv1_recalc_rate,
+};
+
+struct clk *imx_clk_pllv1(const char *name, const char *parent,
+ void __iomem *base)
+{
+ struct clk_pllv1 *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kmalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ pll->base = base;
+
+ init.name = name;
+ init.ops = &clk_pllv1_ops;
+ init.flags = 0;
+ init.parent_names = &parent;
+ init.num_parents = 1;
+
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
diff --git a/arch/arm/mach-imx/clk-pllv2.c b/arch/arm/mach-imx/clk-pllv2.c
new file mode 100644
index 000000000000..4685919deb63
--- /dev/null
+++ b/arch/arm/mach-imx/clk-pllv2.c
@@ -0,0 +1,249 @@
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include <asm/div64.h>
+
+#include "clk.h"
+
+#define to_clk_pllv2(clk) (container_of(clk, struct clk_pllv2, clk))
+
+/* PLL Register Offsets */
+#define MXC_PLL_DP_CTL 0x00
+#define MXC_PLL_DP_CONFIG 0x04
+#define MXC_PLL_DP_OP 0x08
+#define MXC_PLL_DP_MFD 0x0C
+#define MXC_PLL_DP_MFN 0x10
+#define MXC_PLL_DP_MFNMINUS 0x14
+#define MXC_PLL_DP_MFNPLUS 0x18
+#define MXC_PLL_DP_HFS_OP 0x1C
+#define MXC_PLL_DP_HFS_MFD 0x20
+#define MXC_PLL_DP_HFS_MFN 0x24
+#define MXC_PLL_DP_MFN_TOGC 0x28
+#define MXC_PLL_DP_DESTAT 0x2c
+
+/* PLL Register Bit definitions */
+#define MXC_PLL_DP_CTL_MUL_CTRL 0x2000
+#define MXC_PLL_DP_CTL_DPDCK0_2_EN 0x1000
+#define MXC_PLL_DP_CTL_DPDCK0_2_OFFSET 12
+#define MXC_PLL_DP_CTL_ADE 0x800
+#define MXC_PLL_DP_CTL_REF_CLK_DIV 0x400
+#define MXC_PLL_DP_CTL_REF_CLK_SEL_MASK (3 << 8)
+#define MXC_PLL_DP_CTL_REF_CLK_SEL_OFFSET 8
+#define MXC_PLL_DP_CTL_HFSM 0x80
+#define MXC_PLL_DP_CTL_PRE 0x40
+#define MXC_PLL_DP_CTL_UPEN 0x20
+#define MXC_PLL_DP_CTL_RST 0x10
+#define MXC_PLL_DP_CTL_RCP 0x8
+#define MXC_PLL_DP_CTL_PLM 0x4
+#define MXC_PLL_DP_CTL_BRM0 0x2
+#define MXC_PLL_DP_CTL_LRF 0x1
+
+#define MXC_PLL_DP_CONFIG_BIST 0x8
+#define MXC_PLL_DP_CONFIG_SJC_CE 0x4
+#define MXC_PLL_DP_CONFIG_AREN 0x2
+#define MXC_PLL_DP_CONFIG_LDREQ 0x1
+
+#define MXC_PLL_DP_OP_MFI_OFFSET 4
+#define MXC_PLL_DP_OP_MFI_MASK (0xF << 4)
+#define MXC_PLL_DP_OP_PDF_OFFSET 0
+#define MXC_PLL_DP_OP_PDF_MASK 0xF
+
+#define MXC_PLL_DP_MFD_OFFSET 0
+#define MXC_PLL_DP_MFD_MASK 0x07FFFFFF
+
+#define MXC_PLL_DP_MFN_OFFSET 0x0
+#define MXC_PLL_DP_MFN_MASK 0x07FFFFFF
+
+#define MXC_PLL_DP_MFN_TOGC_TOG_DIS (1 << 17)
+#define MXC_PLL_DP_MFN_TOGC_TOG_EN (1 << 16)
+#define MXC_PLL_DP_MFN_TOGC_CNT_OFFSET 0x0
+#define MXC_PLL_DP_MFN_TOGC_CNT_MASK 0xFFFF
+
+#define MXC_PLL_DP_DESTAT_TOG_SEL (1 << 31)
+#define MXC_PLL_DP_DESTAT_MFN 0x07FFFFFF
+
+#define MAX_DPLL_WAIT_TRIES 1000 /* 1000 * udelay(1) = 1ms */
+
+struct clk_pllv2 {
+ struct clk_hw hw;
+ void __iomem *base;
+};
+
+static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
+ unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl;
+ void __iomem *pllbase;
+ s64 temp;
+ struct clk_pllv2 *pll = to_clk_pllv2(hw);
+
+ pllbase = pll->base;
+
+ dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
+ pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
+ dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
+
+ if (pll_hfsm == 0) {
+ dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
+ dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
+ dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
+ } else {
+ dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP);
+ dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD);
+ dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN);
+ }
+ pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
+ mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
+ mfi = (mfi <= 5) ? 5 : mfi;
+ mfd = dp_mfd & MXC_PLL_DP_MFD_MASK;
+ mfn = mfn_abs = dp_mfn & MXC_PLL_DP_MFN_MASK;
+ /* Sign extend to 32-bits */
+ if (mfn >= 0x04000000) {
+ mfn |= 0xFC000000;
+ mfn_abs = -mfn;
+ }
+
+ ref_clk = 2 * parent_rate;
+ if (dbl != 0)
+ ref_clk *= 2;
+
+ ref_clk /= (pdf + 1);
+ temp = (u64) ref_clk * mfn_abs;
+ do_div(temp, mfd + 1);
+ if (mfn < 0)
+ temp = -temp;
+ temp = (ref_clk * mfi) + temp;
+
+ return temp;
+}
+
+static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pllv2 *pll = to_clk_pllv2(hw);
+ u32 reg;
+ void __iomem *pllbase;
+ long mfi, pdf, mfn, mfd = 999999;
+ s64 temp64;
+ unsigned long quad_parent_rate;
+ unsigned long pll_hfsm, dp_ctl;
+
+ pllbase = pll->base;
+
+ quad_parent_rate = 4 * parent_rate;
+ pdf = mfi = -1;
+ while (++pdf < 16 && mfi < 5)
+ mfi = rate * (pdf+1) / quad_parent_rate;
+ if (mfi > 15)
+ return -EINVAL;
+ pdf--;
+
+ temp64 = rate * (pdf+1) - quad_parent_rate * mfi;
+ do_div(temp64, quad_parent_rate/1000000);
+ mfn = (long)temp64;
+
+ dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
+ /* use dpdck0_2 */
+ __raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
+ pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
+ if (pll_hfsm == 0) {
+ reg = mfi << 4 | pdf;
+ __raw_writel(reg, pllbase + MXC_PLL_DP_OP);
+ __raw_writel(mfd, pllbase + MXC_PLL_DP_MFD);
+ __raw_writel(mfn, pllbase + MXC_PLL_DP_MFN);
+ } else {
+ reg = mfi << 4 | pdf;
+ __raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP);
+ __raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD);
+ __raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN);
+ }
+
+ return 0;
+}
+
+static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return rate;
+}
+
+static int clk_pllv2_prepare(struct clk_hw *hw)
+{
+ struct clk_pllv2 *pll = to_clk_pllv2(hw);
+ u32 reg;
+ void __iomem *pllbase;
+ int i = 0;
+
+ pllbase = pll->base;
+ reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) | MXC_PLL_DP_CTL_UPEN;
+ __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
+
+ /* Wait for lock */
+ do {
+ reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
+ if (reg & MXC_PLL_DP_CTL_LRF)
+ break;
+
+ udelay(1);
+ } while (++i < MAX_DPLL_WAIT_TRIES);
+
+ if (i == MAX_DPLL_WAIT_TRIES) {
+ pr_err("MX5: pll locking failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void clk_pllv2_unprepare(struct clk_hw *hw)
+{
+ struct clk_pllv2 *pll = to_clk_pllv2(hw);
+ u32 reg;
+ void __iomem *pllbase;
+
+ pllbase = pll->base;
+ reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) & ~MXC_PLL_DP_CTL_UPEN;
+ __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
+}
+
+struct clk_ops clk_pllv2_ops = {
+ .prepare = clk_pllv2_prepare,
+ .unprepare = clk_pllv2_unprepare,
+ .recalc_rate = clk_pllv2_recalc_rate,
+ .round_rate = clk_pllv2_round_rate,
+ .set_rate = clk_pllv2_set_rate,
+};
+
+struct clk *imx_clk_pllv2(const char *name, const char *parent,
+ void __iomem *base)
+{
+ struct clk_pllv2 *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ pll->base = base;
+
+ init.name = name;
+ init.ops = &clk_pllv2_ops;
+ init.flags = 0;
+ init.parent_names = &parent;
+ init.num_parents = 1;
+
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
diff --git a/arch/arm/mach-imx/clk-pllv3.c b/arch/arm/mach-imx/clk-pllv3.c
new file mode 100644
index 000000000000..36aac947bce1
--- /dev/null
+++ b/arch/arm/mach-imx/clk-pllv3.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include "clk.h"
+
+#define PLL_NUM_OFFSET 0x10
+#define PLL_DENOM_OFFSET 0x20
+
+#define BM_PLL_POWER (0x1 << 12)
+#define BM_PLL_ENABLE (0x1 << 13)
+#define BM_PLL_BYPASS (0x1 << 16)
+#define BM_PLL_LOCK (0x1 << 31)
+
+/**
+ * struct clk_pllv3 - IMX PLL clock version 3
+ * @clk_hw: clock source
+ * @base: base address of PLL registers
+ * @powerup_set: set POWER bit to power up the PLL
+ * @gate_mask: mask of gate bits
+ * @div_mask: mask of divider bits
+ *
+ * IMX PLL clock version 3, found on i.MX6 series. Divider for pllv3
+ * is actually a multiplier, and always sits at bit 0.
+ */
+struct clk_pllv3 {
+ struct clk_hw hw;
+ void __iomem *base;
+ bool powerup_set;
+ u32 gate_mask;
+ u32 div_mask;
+};
+
+#define to_clk_pllv3(_hw) container_of(_hw, struct clk_pllv3, hw)
+
+static int clk_pllv3_prepare(struct clk_hw *hw)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ unsigned long timeout = jiffies + msecs_to_jiffies(10);
+ u32 val;
+
+ val = readl_relaxed(pll->base);
+ val &= ~BM_PLL_BYPASS;
+ if (pll->powerup_set)
+ val |= BM_PLL_POWER;
+ else
+ val &= ~BM_PLL_POWER;
+ writel_relaxed(val, pll->base);
+
+ /* Wait for PLL to lock */
+ while (!(readl_relaxed(pll->base) & BM_PLL_LOCK))
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void clk_pllv3_unprepare(struct clk_hw *hw)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 val;
+
+ val = readl_relaxed(pll->base);
+ val |= BM_PLL_BYPASS;
+ if (pll->powerup_set)
+ val &= ~BM_PLL_POWER;
+ else
+ val |= BM_PLL_POWER;
+ writel_relaxed(val, pll->base);
+}
+
+static int clk_pllv3_enable(struct clk_hw *hw)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 val;
+
+ val = readl_relaxed(pll->base);
+ val |= pll->gate_mask;
+ writel_relaxed(val, pll->base);
+
+ return 0;
+}
+
+static void clk_pllv3_disable(struct clk_hw *hw)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 val;
+
+ val = readl_relaxed(pll->base);
+ val &= ~pll->gate_mask;
+ writel_relaxed(val, pll->base);
+}
+
+static unsigned long clk_pllv3_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 div = readl_relaxed(pll->base) & pll->div_mask;
+
+ return (div == 1) ? parent_rate * 22 : parent_rate * 20;
+}
+
+static long clk_pllv3_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ unsigned long parent_rate = *prate;
+
+ return (rate >= parent_rate * 22) ? parent_rate * 22 :
+ parent_rate * 20;
+}
+
+static int clk_pllv3_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 val, div;
+
+ if (rate == parent_rate * 22)
+ div = 1;
+ else if (rate == parent_rate * 20)
+ div = 0;
+ else
+ return -EINVAL;
+
+ val = readl_relaxed(pll->base);
+ val &= ~pll->div_mask;
+ val |= div;
+ writel_relaxed(val, pll->base);
+
+ return 0;
+}
+
+static const struct clk_ops clk_pllv3_ops = {
+ .prepare = clk_pllv3_prepare,
+ .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+ .recalc_rate = clk_pllv3_recalc_rate,
+ .round_rate = clk_pllv3_round_rate,
+ .set_rate = clk_pllv3_set_rate,
+};
+
+static unsigned long clk_pllv3_sys_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 div = readl_relaxed(pll->base) & pll->div_mask;
+
+ return parent_rate * div / 2;
+}
+
+static long clk_pllv3_sys_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ unsigned long parent_rate = *prate;
+ unsigned long min_rate = parent_rate * 54 / 2;
+ unsigned long max_rate = parent_rate * 108 / 2;
+ u32 div;
+
+ if (rate > max_rate)
+ rate = max_rate;
+ else if (rate < min_rate)
+ rate = min_rate;
+ div = rate * 2 / parent_rate;
+
+ return parent_rate * div / 2;
+}
+
+static int clk_pllv3_sys_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ unsigned long min_rate = parent_rate * 54 / 2;
+ unsigned long max_rate = parent_rate * 108 / 2;
+ u32 val, div;
+
+ if (rate < min_rate || rate > max_rate)
+ return -EINVAL;
+
+ div = rate * 2 / parent_rate;
+ val = readl_relaxed(pll->base);
+ val &= ~pll->div_mask;
+ val |= div;
+ writel_relaxed(val, pll->base);
+
+ return 0;
+}
+
+static const struct clk_ops clk_pllv3_sys_ops = {
+ .prepare = clk_pllv3_prepare,
+ .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+ .recalc_rate = clk_pllv3_sys_recalc_rate,
+ .round_rate = clk_pllv3_sys_round_rate,
+ .set_rate = clk_pllv3_sys_set_rate,
+};
+
+static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET);
+ u32 mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET);
+ u32 div = readl_relaxed(pll->base) & pll->div_mask;
+
+ return (parent_rate * div) + ((parent_rate / mfd) * mfn);
+}
+
+static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ unsigned long parent_rate = *prate;
+ unsigned long min_rate = parent_rate * 27;
+ unsigned long max_rate = parent_rate * 54;
+ u32 div;
+ u32 mfn, mfd = 1000000;
+ s64 temp64;
+
+ if (rate > max_rate)
+ rate = max_rate;
+ else if (rate < min_rate)
+ rate = min_rate;
+
+ div = rate / parent_rate;
+ temp64 = (u64) (rate - div * parent_rate);
+ temp64 *= mfd;
+ do_div(temp64, parent_rate);
+ mfn = temp64;
+
+ return parent_rate * div + parent_rate / mfd * mfn;
+}
+
+static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ unsigned long min_rate = parent_rate * 27;
+ unsigned long max_rate = parent_rate * 54;
+ u32 val, div;
+ u32 mfn, mfd = 1000000;
+ s64 temp64;
+
+ if (rate < min_rate || rate > max_rate)
+ return -EINVAL;
+
+ div = rate / parent_rate;
+ temp64 = (u64) (rate - div * parent_rate);
+ temp64 *= mfd;
+ do_div(temp64, parent_rate);
+ mfn = temp64;
+
+ val = readl_relaxed(pll->base);
+ val &= ~pll->div_mask;
+ val |= div;
+ writel_relaxed(val, pll->base);
+ writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
+ writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
+
+ return 0;
+}
+
+static const struct clk_ops clk_pllv3_av_ops = {
+ .prepare = clk_pllv3_prepare,
+ .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+ .recalc_rate = clk_pllv3_av_recalc_rate,
+ .round_rate = clk_pllv3_av_round_rate,
+ .set_rate = clk_pllv3_av_set_rate,
+};
+
+static unsigned long clk_pllv3_enet_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 div = readl_relaxed(pll->base) & pll->div_mask;
+
+ switch (div) {
+ case 0:
+ return 25000000;
+ case 1:
+ return 50000000;
+ case 2:
+ return 100000000;
+ case 3:
+ return 125000000;
+ }
+
+ return 0;
+}
+
+static long clk_pllv3_enet_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ if (rate >= 125000000)
+ rate = 125000000;
+ else if (rate >= 100000000)
+ rate = 100000000;
+ else if (rate >= 50000000)
+ rate = 50000000;
+ else
+ rate = 25000000;
+ return rate;
+}
+
+static int clk_pllv3_enet_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 val, div;
+
+ switch (rate) {
+ case 25000000:
+ div = 0;
+ break;
+ case 50000000:
+ div = 1;
+ break;
+ case 100000000:
+ div = 2;
+ break;
+ case 125000000:
+ div = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = readl_relaxed(pll->base);
+ val &= ~pll->div_mask;
+ val |= div;
+ writel_relaxed(val, pll->base);
+
+ return 0;
+}
+
+static const struct clk_ops clk_pllv3_enet_ops = {
+ .prepare = clk_pllv3_prepare,
+ .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+ .recalc_rate = clk_pllv3_enet_recalc_rate,
+ .round_rate = clk_pllv3_enet_round_rate,
+ .set_rate = clk_pllv3_enet_set_rate,
+};
+
+static const struct clk_ops clk_pllv3_mlb_ops = {
+ .prepare = clk_pllv3_prepare,
+ .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+};
+
+struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
+ const char *parent_name, void __iomem *base,
+ u32 gate_mask, u32 div_mask)
+{
+ struct clk_pllv3 *pll;
+ const struct clk_ops *ops;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ switch (type) {
+ case IMX_PLLV3_SYS:
+ ops = &clk_pllv3_sys_ops;
+ break;
+ case IMX_PLLV3_USB:
+ ops = &clk_pllv3_ops;
+ pll->powerup_set = true;
+ break;
+ case IMX_PLLV3_AV:
+ ops = &clk_pllv3_av_ops;
+ break;
+ case IMX_PLLV3_ENET:
+ ops = &clk_pllv3_enet_ops;
+ break;
+ case IMX_PLLV3_MLB:
+ ops = &clk_pllv3_mlb_ops;
+ break;
+ default:
+ ops = &clk_pllv3_ops;
+ }
+ pll->base = base;
+ pll->gate_mask = gate_mask;
+ pll->div_mask = div_mask;
+
+ init.name = name;
+ init.ops = ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
diff --git a/arch/arm/mach-imx/clk.h b/arch/arm/mach-imx/clk.h
new file mode 100644
index 000000000000..1bf64fe2523c
--- /dev/null
+++ b/arch/arm/mach-imx/clk.h
@@ -0,0 +1,83 @@
+#ifndef __MACH_IMX_CLK_H
+#define __MACH_IMX_CLK_H
+
+#include <linux/spinlock.h>
+#include <linux/clk-provider.h>
+#include <mach/clock.h>
+
+struct clk *imx_clk_pllv1(const char *name, const char *parent,
+ void __iomem *base);
+
+struct clk *imx_clk_pllv2(const char *name, const char *parent,
+ void __iomem *base);
+
+enum imx_pllv3_type {
+ IMX_PLLV3_GENERIC,
+ IMX_PLLV3_SYS,
+ IMX_PLLV3_USB,
+ IMX_PLLV3_AV,
+ IMX_PLLV3_ENET,
+ IMX_PLLV3_MLB,
+};
+
+struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
+ const char *parent_name, void __iomem *base, u32 gate_mask,
+ u32 div_mask);
+
+struct clk *clk_register_gate2(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock);
+
+static inline struct clk *imx_clk_gate2(const char *name, const char *parent,
+ void __iomem *reg, u8 shift)
+{
+ return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
+ shift, 0, &imx_ccm_lock);
+}
+
+struct clk *imx_clk_pfd(const char *name, const char *parent_name,
+ void __iomem *reg, u8 idx);
+
+struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
+ void __iomem *reg, u8 shift, u8 width,
+ void __iomem *busy_reg, u8 busy_shift);
+
+struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
+ u8 width, void __iomem *busy_reg, u8 busy_shift,
+ const char **parent_names, int num_parents);
+
+static inline struct clk *imx_clk_fixed(const char *name, int rate)
+{
+ return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+}
+
+static inline struct clk *imx_clk_divider(const char *name, const char *parent,
+ void __iomem *reg, u8 shift, u8 width)
+{
+ return clk_register_divider(NULL, name, parent, CLK_SET_RATE_PARENT,
+ reg, shift, width, 0, &imx_ccm_lock);
+}
+
+static inline struct clk *imx_clk_gate(const char *name, const char *parent,
+ void __iomem *reg, u8 shift)
+{
+ return clk_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
+ shift, 0, &imx_ccm_lock);
+}
+
+static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
+ u8 shift, u8 width, const char **parents, int num_parents)
+{
+ return clk_register_mux(NULL, name, parents, num_parents, 0, reg, shift,
+ width, 0, &imx_ccm_lock);
+}
+
+static inline struct clk *imx_clk_fixed_factor(const char *name,
+ const char *parent, unsigned int mult, unsigned int div)
+{
+ return clk_register_fixed_factor(NULL, name, parent,
+ CLK_SET_RATE_PARENT, mult, div);
+}
+
+#endif
diff --git a/arch/arm/mach-imx/clock-imx1.c b/arch/arm/mach-imx/clock-imx1.c
deleted file mode 100644
index 4aabeb241563..000000000000
--- a/arch/arm/mach-imx/clock-imx1.c
+++ /dev/null
@@ -1,636 +0,0 @@
-/*
- * Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/math64.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-
-#define IO_ADDR_CCM(off) (MX1_IO_ADDRESS(MX1_CCM_BASE_ADDR + (off)))
-
-/* CCM register addresses */
-#define CCM_CSCR IO_ADDR_CCM(0x0)
-#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
-#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
-#define CCM_PCDR IO_ADDR_CCM(0x20)
-
-#define CCM_CSCR_CLKO_OFFSET 29
-#define CCM_CSCR_CLKO_MASK (0x7 << 29)
-#define CCM_CSCR_USB_OFFSET 26
-#define CCM_CSCR_USB_MASK (0x7 << 26)
-#define CCM_CSCR_OSC_EN_SHIFT 17
-#define CCM_CSCR_SYSTEM_SEL (1 << 16)
-#define CCM_CSCR_BCLK_OFFSET 10
-#define CCM_CSCR_BCLK_MASK (0xf << 10)
-#define CCM_CSCR_PRESC (1 << 15)
-
-#define CCM_PCDR_PCLK3_OFFSET 16
-#define CCM_PCDR_PCLK3_MASK (0x7f << 16)
-#define CCM_PCDR_PCLK2_OFFSET 4
-#define CCM_PCDR_PCLK2_MASK (0xf << 4)
-#define CCM_PCDR_PCLK1_OFFSET 0
-#define CCM_PCDR_PCLK1_MASK 0xf
-
-#define IO_ADDR_SCM(off) (MX1_IO_ADDRESS(MX1_SCM_BASE_ADDR + (off)))
-
-/* SCM register addresses */
-#define SCM_GCCR IO_ADDR_SCM(0xc)
-
-#define SCM_GCCR_DMA_CLK_EN_OFFSET 3
-#define SCM_GCCR_CSI_CLK_EN_OFFSET 2
-#define SCM_GCCR_MMA_CLK_EN_OFFSET 1
-#define SCM_GCCR_USBD_CLK_EN_OFFSET 0
-
-static int _clk_enable(struct clk *clk)
-{
- unsigned int reg;
-
- reg = __raw_readl(clk->enable_reg);
- reg |= 1 << clk->enable_shift;
- __raw_writel(reg, clk->enable_reg);
-
- return 0;
-}
-
-static void _clk_disable(struct clk *clk)
-{
- unsigned int reg;
-
- reg = __raw_readl(clk->enable_reg);
- reg &= ~(1 << clk->enable_shift);
- __raw_writel(reg, clk->enable_reg);
-}
-
-static int _clk_can_use_parent(const struct clk *clk_arr[], unsigned int size,
- struct clk *parent)
-{
- int i;
-
- for (i = 0; i < size; i++)
- if (parent == clk_arr[i])
- return i;
-
- return -EINVAL;
-}
-
-static unsigned long
-_clk_simple_round_rate(struct clk *clk, unsigned long rate, unsigned int limit)
-{
- int div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
- if (parent_rate % rate)
- div++;
-
- if (div > limit)
- div = limit;
-
- return parent_rate / div;
-}
-
-static unsigned long _clk_parent_round_rate(struct clk *clk, unsigned long rate)
-{
- return clk->parent->round_rate(clk->parent, rate);
-}
-
-static int _clk_parent_set_rate(struct clk *clk, unsigned long rate)
-{
- return clk->parent->set_rate(clk->parent, rate);
-}
-
-static unsigned long clk16m_get_rate(struct clk *clk)
-{
- return 16000000;
-}
-
-static struct clk clk16m = {
- .get_rate = clk16m_get_rate,
- .enable = _clk_enable,
- .enable_reg = CCM_CSCR,
- .enable_shift = CCM_CSCR_OSC_EN_SHIFT,
- .disable = _clk_disable,
-};
-
-/* in Hz */
-static unsigned long clk32_rate;
-
-static unsigned long clk32_get_rate(struct clk *clk)
-{
- return clk32_rate;
-}
-
-static struct clk clk32 = {
- .get_rate = clk32_get_rate,
-};
-
-static unsigned long clk32_premult_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) * 512;
-}
-
-static struct clk clk32_premult = {
- .parent = &clk32,
- .get_rate = clk32_premult_get_rate,
-};
-
-static const struct clk *prem_clk_clocks[] = {
- &clk32_premult,
- &clk16m,
-};
-
-static int prem_clk_set_parent(struct clk *clk, struct clk *parent)
-{
- int i;
- unsigned int reg = __raw_readl(CCM_CSCR);
-
- i = _clk_can_use_parent(prem_clk_clocks, ARRAY_SIZE(prem_clk_clocks),
- parent);
-
- switch (i) {
- case 0:
- reg &= ~CCM_CSCR_SYSTEM_SEL;
- break;
- case 1:
- reg |= CCM_CSCR_SYSTEM_SEL;
- break;
- default:
- return i;
- }
-
- __raw_writel(reg, CCM_CSCR);
-
- return 0;
-}
-
-static struct clk prem_clk = {
- .set_parent = prem_clk_set_parent,
-};
-
-static unsigned long system_clk_get_rate(struct clk *clk)
-{
- return mxc_decode_pll(__raw_readl(CCM_SPCTL0),
- clk_get_rate(clk->parent));
-}
-
-static struct clk system_clk = {
- .parent = &prem_clk,
- .get_rate = system_clk_get_rate,
-};
-
-static unsigned long mcu_clk_get_rate(struct clk *clk)
-{
- return mxc_decode_pll(__raw_readl(CCM_MPCTL0),
- clk_get_rate(clk->parent));
-}
-
-static struct clk mcu_clk = {
- .parent = &clk32_premult,
- .get_rate = mcu_clk_get_rate,
-};
-
-static unsigned long fclk_get_rate(struct clk *clk)
-{
- unsigned long fclk = clk_get_rate(clk->parent);
-
- if (__raw_readl(CCM_CSCR) & CCM_CSCR_PRESC)
- fclk /= 2;
-
- return fclk;
-}
-
-static struct clk fclk = {
- .parent = &mcu_clk,
- .get_rate = fclk_get_rate,
-};
-
-/*
- * get hclk ( SDRAM, CSI, Memory Stick, I2C, DMA )
- */
-static unsigned long hclk_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / (((__raw_readl(CCM_CSCR) &
- CCM_CSCR_BCLK_MASK) >> CCM_CSCR_BCLK_OFFSET) + 1);
-}
-
-static unsigned long hclk_round_rate(struct clk *clk, unsigned long rate)
-{
- return _clk_simple_round_rate(clk, rate, 16);
-}
-
-static int hclk_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned int div;
- unsigned int reg;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
-
- if (div > 16 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
-
- div--;
-
- reg = __raw_readl(CCM_CSCR);
- reg &= ~CCM_CSCR_BCLK_MASK;
- reg |= div << CCM_CSCR_BCLK_OFFSET;
- __raw_writel(reg, CCM_CSCR);
-
- return 0;
-}
-
-static struct clk hclk = {
- .parent = &system_clk,
- .get_rate = hclk_get_rate,
- .round_rate = hclk_round_rate,
- .set_rate = hclk_set_rate,
-};
-
-static unsigned long clk48m_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / (((__raw_readl(CCM_CSCR) &
- CCM_CSCR_USB_MASK) >> CCM_CSCR_USB_OFFSET) + 1);
-}
-
-static unsigned long clk48m_round_rate(struct clk *clk, unsigned long rate)
-{
- return _clk_simple_round_rate(clk, rate, 8);
-}
-
-static int clk48m_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned int div;
- unsigned int reg;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
-
- if (div > 8 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
-
- div--;
-
- reg = __raw_readl(CCM_CSCR);
- reg &= ~CCM_CSCR_USB_MASK;
- reg |= div << CCM_CSCR_USB_OFFSET;
- __raw_writel(reg, CCM_CSCR);
-
- return 0;
-}
-
-static struct clk clk48m = {
- .parent = &system_clk,
- .get_rate = clk48m_get_rate,
- .round_rate = clk48m_round_rate,
- .set_rate = clk48m_set_rate,
-};
-
-/*
- * get peripheral clock 1 ( UART[12], Timer[12], PWM )
- */
-static unsigned long perclk1_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
- CCM_PCDR_PCLK1_MASK) >> CCM_PCDR_PCLK1_OFFSET) + 1);
-}
-
-static unsigned long perclk1_round_rate(struct clk *clk, unsigned long rate)
-{
- return _clk_simple_round_rate(clk, rate, 16);
-}
-
-static int perclk1_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned int div;
- unsigned int reg;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
-
- if (div > 16 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
-
- div--;
-
- reg = __raw_readl(CCM_PCDR);
- reg &= ~CCM_PCDR_PCLK1_MASK;
- reg |= div << CCM_PCDR_PCLK1_OFFSET;
- __raw_writel(reg, CCM_PCDR);
-
- return 0;
-}
-
-/*
- * get peripheral clock 2 ( LCD, SD, SPI[12] )
- */
-static unsigned long perclk2_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
- CCM_PCDR_PCLK2_MASK) >> CCM_PCDR_PCLK2_OFFSET) + 1);
-}
-
-static unsigned long perclk2_round_rate(struct clk *clk, unsigned long rate)
-{
- return _clk_simple_round_rate(clk, rate, 16);
-}
-
-static int perclk2_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned int div;
- unsigned int reg;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
-
- if (div > 16 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
-
- div--;
-
- reg = __raw_readl(CCM_PCDR);
- reg &= ~CCM_PCDR_PCLK2_MASK;
- reg |= div << CCM_PCDR_PCLK2_OFFSET;
- __raw_writel(reg, CCM_PCDR);
-
- return 0;
-}
-
-/*
- * get peripheral clock 3 ( SSI )
- */
-static unsigned long perclk3_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
- CCM_PCDR_PCLK3_MASK) >> CCM_PCDR_PCLK3_OFFSET) + 1);
-}
-
-static unsigned long perclk3_round_rate(struct clk *clk, unsigned long rate)
-{
- return _clk_simple_round_rate(clk, rate, 128);
-}
-
-static int perclk3_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned int div;
- unsigned int reg;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
-
- if (div > 128 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
-
- div--;
-
- reg = __raw_readl(CCM_PCDR);
- reg &= ~CCM_PCDR_PCLK3_MASK;
- reg |= div << CCM_PCDR_PCLK3_OFFSET;
- __raw_writel(reg, CCM_PCDR);
-
- return 0;
-}
-
-static struct clk perclk[] = {
- {
- .id = 0,
- .parent = &system_clk,
- .get_rate = perclk1_get_rate,
- .round_rate = perclk1_round_rate,
- .set_rate = perclk1_set_rate,
- }, {
- .id = 1,
- .parent = &system_clk,
- .get_rate = perclk2_get_rate,
- .round_rate = perclk2_round_rate,
- .set_rate = perclk2_set_rate,
- }, {
- .id = 2,
- .parent = &system_clk,
- .get_rate = perclk3_get_rate,
- .round_rate = perclk3_round_rate,
- .set_rate = perclk3_set_rate,
- }
-};
-
-static const struct clk *clko_clocks[] = {
- &perclk[0],
- &hclk,
- &clk48m,
- &clk16m,
- &prem_clk,
- &fclk,
-};
-
-static int clko_set_parent(struct clk *clk, struct clk *parent)
-{
- int i;
- unsigned int reg;
-
- i = _clk_can_use_parent(clko_clocks, ARRAY_SIZE(clko_clocks), parent);
- if (i < 0)
- return i;
-
- reg = __raw_readl(CCM_CSCR) & ~CCM_CSCR_CLKO_MASK;
- reg |= i << CCM_CSCR_CLKO_OFFSET;
- __raw_writel(reg, CCM_CSCR);
-
- if (clko_clocks[i]->set_rate && clko_clocks[i]->round_rate) {
- clk->set_rate = _clk_parent_set_rate;
- clk->round_rate = _clk_parent_round_rate;
- } else {
- clk->set_rate = NULL;
- clk->round_rate = NULL;
- }
-
- return 0;
-}
-
-static struct clk clko_clk = {
- .set_parent = clko_set_parent,
-};
-
-static struct clk dma_clk = {
- .parent = &hclk,
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
- .enable = _clk_enable,
- .enable_reg = SCM_GCCR,
- .enable_shift = SCM_GCCR_DMA_CLK_EN_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk csi_clk = {
- .parent = &hclk,
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
- .enable = _clk_enable,
- .enable_reg = SCM_GCCR,
- .enable_shift = SCM_GCCR_CSI_CLK_EN_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk mma_clk = {
- .parent = &hclk,
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
- .enable = _clk_enable,
- .enable_reg = SCM_GCCR,
- .enable_shift = SCM_GCCR_MMA_CLK_EN_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk usbd_clk = {
- .parent = &clk48m,
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
- .enable = _clk_enable,
- .enable_reg = SCM_GCCR,
- .enable_shift = SCM_GCCR_USBD_CLK_EN_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk gpt_clk = {
- .parent = &perclk[0],
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk uart_clk = {
- .parent = &perclk[0],
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk i2c_clk = {
- .parent = &hclk,
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk spi_clk = {
- .parent = &perclk[1],
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk sdhc_clk = {
- .parent = &perclk[1],
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk lcdc_clk = {
- .parent = &perclk[1],
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk mshc_clk = {
- .parent = &hclk,
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk ssi_clk = {
- .parent = &perclk[2],
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk rtc_clk = {
- .parent = &clk32,
-};
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-static struct clk_lookup lookups[] __initdata = {
- _REGISTER_CLOCK(NULL, "dma", dma_clk)
- _REGISTER_CLOCK("mx1-camera.0", NULL, csi_clk)
- _REGISTER_CLOCK(NULL, "mma", mma_clk)
- _REGISTER_CLOCK("imx_udc.0", NULL, usbd_clk)
- _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
- _REGISTER_CLOCK("imx1-uart.0", NULL, uart_clk)
- _REGISTER_CLOCK("imx1-uart.1", NULL, uart_clk)
- _REGISTER_CLOCK("imx1-uart.2", NULL, uart_clk)
- _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
- _REGISTER_CLOCK("imx1-cspi.0", NULL, spi_clk)
- _REGISTER_CLOCK("imx1-cspi.1", NULL, spi_clk)
- _REGISTER_CLOCK("imx-mmc.0", NULL, sdhc_clk)
- _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
- _REGISTER_CLOCK(NULL, "mshc", mshc_clk)
- _REGISTER_CLOCK(NULL, "ssi", ssi_clk)
- _REGISTER_CLOCK("mxc_rtc.0", NULL, rtc_clk)
-};
-
-int __init mx1_clocks_init(unsigned long fref)
-{
- unsigned int reg;
-
- /* disable clocks we are able to */
- __raw_writel(0, SCM_GCCR);
-
- clk32_rate = fref;
- reg = __raw_readl(CCM_CSCR);
-
- /* detect clock reference for system PLL */
- if (reg & CCM_CSCR_SYSTEM_SEL) {
- prem_clk.parent = &clk16m;
- } else {
- /* ensure that oscillator is disabled */
- reg &= ~(1 << CCM_CSCR_OSC_EN_SHIFT);
- __raw_writel(reg, CCM_CSCR);
- prem_clk.parent = &clk32_premult;
- }
-
- /* detect reference for CLKO */
- reg = (reg & CCM_CSCR_CLKO_MASK) >> CCM_CSCR_CLKO_OFFSET;
- clko_clk.parent = (struct clk *)clko_clocks[reg];
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- clk_enable(&hclk);
- clk_enable(&fclk);
-
- mxc_timer_init(&gpt_clk, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR),
- MX1_TIM1_INT);
-
- return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx21.c b/arch/arm/mach-imx/clock-imx21.c
deleted file mode 100644
index ee15d8c9db08..000000000000
--- a/arch/arm/mach-imx/clock-imx21.c
+++ /dev/null
@@ -1,1239 +0,0 @@
-/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-#include <asm/div64.h>
-
-#define IO_ADDR_CCM(off) (MX21_IO_ADDRESS(MX21_CCM_BASE_ADDR + (off)))
-
-/* Register offsets */
-#define CCM_CSCR IO_ADDR_CCM(0x0)
-#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
-#define CCM_MPCTL1 IO_ADDR_CCM(0x8)
-#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
-#define CCM_SPCTL1 IO_ADDR_CCM(0x10)
-#define CCM_OSC26MCTL IO_ADDR_CCM(0x14)
-#define CCM_PCDR0 IO_ADDR_CCM(0x18)
-#define CCM_PCDR1 IO_ADDR_CCM(0x1c)
-#define CCM_PCCR0 IO_ADDR_CCM(0x20)
-#define CCM_PCCR1 IO_ADDR_CCM(0x24)
-#define CCM_CCSR IO_ADDR_CCM(0x28)
-#define CCM_PMCTL IO_ADDR_CCM(0x2c)
-#define CCM_PMCOUNT IO_ADDR_CCM(0x30)
-#define CCM_WKGDCTL IO_ADDR_CCM(0x34)
-
-#define CCM_CSCR_PRESC_OFFSET 29
-#define CCM_CSCR_PRESC_MASK (0x7 << CCM_CSCR_PRESC_OFFSET)
-
-#define CCM_CSCR_USB_OFFSET 26
-#define CCM_CSCR_USB_MASK (0x7 << CCM_CSCR_USB_OFFSET)
-#define CCM_CSCR_SD_OFFSET 24
-#define CCM_CSCR_SD_MASK (0x3 << CCM_CSCR_SD_OFFSET)
-#define CCM_CSCR_SPLLRES (1 << 22)
-#define CCM_CSCR_MPLLRES (1 << 21)
-#define CCM_CSCR_SSI2_OFFSET 20
-#define CCM_CSCR_SSI2 (1 << CCM_CSCR_SSI2_OFFSET)
-#define CCM_CSCR_SSI1_OFFSET 19
-#define CCM_CSCR_SSI1 (1 << CCM_CSCR_SSI1_OFFSET)
-#define CCM_CSCR_FIR_OFFSET 18
-#define CCM_CSCR_FIR (1 << CCM_CSCR_FIR_OFFSET)
-#define CCM_CSCR_SP (1 << 17)
-#define CCM_CSCR_MCU (1 << 16)
-#define CCM_CSCR_BCLK_OFFSET 10
-#define CCM_CSCR_BCLK_MASK (0xf << CCM_CSCR_BCLK_OFFSET)
-#define CCM_CSCR_IPDIV_OFFSET 9
-#define CCM_CSCR_IPDIV (1 << CCM_CSCR_IPDIV_OFFSET)
-
-#define CCM_CSCR_OSC26MDIV (1 << 4)
-#define CCM_CSCR_OSC26M (1 << 3)
-#define CCM_CSCR_FPM (1 << 2)
-#define CCM_CSCR_SPEN (1 << 1)
-#define CCM_CSCR_MPEN 1
-
-#define CCM_MPCTL0_CPLM (1 << 31)
-#define CCM_MPCTL0_PD_OFFSET 26
-#define CCM_MPCTL0_PD_MASK (0xf << 26)
-#define CCM_MPCTL0_MFD_OFFSET 16
-#define CCM_MPCTL0_MFD_MASK (0x3ff << 16)
-#define CCM_MPCTL0_MFI_OFFSET 10
-#define CCM_MPCTL0_MFI_MASK (0xf << 10)
-#define CCM_MPCTL0_MFN_OFFSET 0
-#define CCM_MPCTL0_MFN_MASK 0x3ff
-
-#define CCM_MPCTL1_LF (1 << 15)
-#define CCM_MPCTL1_BRMO (1 << 6)
-
-#define CCM_SPCTL0_CPLM (1 << 31)
-#define CCM_SPCTL0_PD_OFFSET 26
-#define CCM_SPCTL0_PD_MASK (0xf << 26)
-#define CCM_SPCTL0_MFD_OFFSET 16
-#define CCM_SPCTL0_MFD_MASK (0x3ff << 16)
-#define CCM_SPCTL0_MFI_OFFSET 10
-#define CCM_SPCTL0_MFI_MASK (0xf << 10)
-#define CCM_SPCTL0_MFN_OFFSET 0
-#define CCM_SPCTL0_MFN_MASK 0x3ff
-
-#define CCM_SPCTL1_LF (1 << 15)
-#define CCM_SPCTL1_BRMO (1 << 6)
-
-#define CCM_OSC26MCTL_PEAK_OFFSET 16
-#define CCM_OSC26MCTL_PEAK_MASK (0x3 << 16)
-#define CCM_OSC26MCTL_AGC_OFFSET 8
-#define CCM_OSC26MCTL_AGC_MASK (0x3f << 8)
-#define CCM_OSC26MCTL_ANATEST_OFFSET 0
-#define CCM_OSC26MCTL_ANATEST_MASK 0x3f
-
-#define CCM_PCDR0_SSI2BAUDDIV_OFFSET 26
-#define CCM_PCDR0_SSI2BAUDDIV_MASK (0x3f << 26)
-#define CCM_PCDR0_SSI1BAUDDIV_OFFSET 16
-#define CCM_PCDR0_SSI1BAUDDIV_MASK (0x3f << 16)
-#define CCM_PCDR0_NFCDIV_OFFSET 12
-#define CCM_PCDR0_NFCDIV_MASK (0xf << 12)
-#define CCM_PCDR0_48MDIV_OFFSET 5
-#define CCM_PCDR0_48MDIV_MASK (0x7 << CCM_PCDR0_48MDIV_OFFSET)
-#define CCM_PCDR0_FIRIDIV_OFFSET 0
-#define CCM_PCDR0_FIRIDIV_MASK 0x1f
-#define CCM_PCDR1_PERDIV4_OFFSET 24
-#define CCM_PCDR1_PERDIV4_MASK (0x3f << 24)
-#define CCM_PCDR1_PERDIV3_OFFSET 16
-#define CCM_PCDR1_PERDIV3_MASK (0x3f << 16)
-#define CCM_PCDR1_PERDIV2_OFFSET 8
-#define CCM_PCDR1_PERDIV2_MASK (0x3f << 8)
-#define CCM_PCDR1_PERDIV1_OFFSET 0
-#define CCM_PCDR1_PERDIV1_MASK 0x3f
-
-#define CCM_PCCR_HCLK_CSI_OFFSET 31
-#define CCM_PCCR_HCLK_CSI_REG CCM_PCCR0
-#define CCM_PCCR_HCLK_DMA_OFFSET 30
-#define CCM_PCCR_HCLK_DMA_REG CCM_PCCR0
-#define CCM_PCCR_HCLK_BROM_OFFSET 28
-#define CCM_PCCR_HCLK_BROM_REG CCM_PCCR0
-#define CCM_PCCR_HCLK_EMMA_OFFSET 27
-#define CCM_PCCR_HCLK_EMMA_REG CCM_PCCR0
-#define CCM_PCCR_HCLK_LCDC_OFFSET 26
-#define CCM_PCCR_HCLK_LCDC_REG CCM_PCCR0
-#define CCM_PCCR_HCLK_SLCDC_OFFSET 25
-#define CCM_PCCR_HCLK_SLCDC_REG CCM_PCCR0
-#define CCM_PCCR_HCLK_USBOTG_OFFSET 24
-#define CCM_PCCR_HCLK_USBOTG_REG CCM_PCCR0
-#define CCM_PCCR_HCLK_BMI_OFFSET 23
-#define CCM_PCCR_BMI_MASK (1 << CCM_PCCR_BMI_MASK)
-#define CCM_PCCR_HCLK_BMI_REG CCM_PCCR0
-#define CCM_PCCR_PERCLK4_OFFSET 22
-#define CCM_PCCR_PERCLK4_REG CCM_PCCR0
-#define CCM_PCCR_SLCDC_OFFSET 21
-#define CCM_PCCR_SLCDC_REG CCM_PCCR0
-#define CCM_PCCR_FIRI_BAUD_OFFSET 20
-#define CCM_PCCR_FIRI_BAUD_MASK (1 << CCM_PCCR_FIRI_BAUD_MASK)
-#define CCM_PCCR_FIRI_BAUD_REG CCM_PCCR0
-#define CCM_PCCR_NFC_OFFSET 19
-#define CCM_PCCR_NFC_REG CCM_PCCR0
-#define CCM_PCCR_LCDC_OFFSET 18
-#define CCM_PCCR_LCDC_REG CCM_PCCR0
-#define CCM_PCCR_SSI1_BAUD_OFFSET 17
-#define CCM_PCCR_SSI1_BAUD_REG CCM_PCCR0
-#define CCM_PCCR_SSI2_BAUD_OFFSET 16
-#define CCM_PCCR_SSI2_BAUD_REG CCM_PCCR0
-#define CCM_PCCR_EMMA_OFFSET 15
-#define CCM_PCCR_EMMA_REG CCM_PCCR0
-#define CCM_PCCR_USBOTG_OFFSET 14
-#define CCM_PCCR_USBOTG_REG CCM_PCCR0
-#define CCM_PCCR_DMA_OFFSET 13
-#define CCM_PCCR_DMA_REG CCM_PCCR0
-#define CCM_PCCR_I2C1_OFFSET 12
-#define CCM_PCCR_I2C1_REG CCM_PCCR0
-#define CCM_PCCR_GPIO_OFFSET 11
-#define CCM_PCCR_GPIO_REG CCM_PCCR0
-#define CCM_PCCR_SDHC2_OFFSET 10
-#define CCM_PCCR_SDHC2_REG CCM_PCCR0
-#define CCM_PCCR_SDHC1_OFFSET 9
-#define CCM_PCCR_SDHC1_REG CCM_PCCR0
-#define CCM_PCCR_FIRI_OFFSET 8
-#define CCM_PCCR_FIRI_MASK (1 << CCM_PCCR_BAUD_MASK)
-#define CCM_PCCR_FIRI_REG CCM_PCCR0
-#define CCM_PCCR_SSI2_IPG_OFFSET 7
-#define CCM_PCCR_SSI2_REG CCM_PCCR0
-#define CCM_PCCR_SSI1_IPG_OFFSET 6
-#define CCM_PCCR_SSI1_REG CCM_PCCR0
-#define CCM_PCCR_CSPI2_OFFSET 5
-#define CCM_PCCR_CSPI2_REG CCM_PCCR0
-#define CCM_PCCR_CSPI1_OFFSET 4
-#define CCM_PCCR_CSPI1_REG CCM_PCCR0
-#define CCM_PCCR_UART4_OFFSET 3
-#define CCM_PCCR_UART4_REG CCM_PCCR0
-#define CCM_PCCR_UART3_OFFSET 2
-#define CCM_PCCR_UART3_REG CCM_PCCR0
-#define CCM_PCCR_UART2_OFFSET 1
-#define CCM_PCCR_UART2_REG CCM_PCCR0
-#define CCM_PCCR_UART1_OFFSET 0
-#define CCM_PCCR_UART1_REG CCM_PCCR0
-
-#define CCM_PCCR_OWIRE_OFFSET 31
-#define CCM_PCCR_OWIRE_REG CCM_PCCR1
-#define CCM_PCCR_KPP_OFFSET 30
-#define CCM_PCCR_KPP_REG CCM_PCCR1
-#define CCM_PCCR_RTC_OFFSET 29
-#define CCM_PCCR_RTC_REG CCM_PCCR1
-#define CCM_PCCR_PWM_OFFSET 28
-#define CCM_PCCR_PWM_REG CCM_PCCR1
-#define CCM_PCCR_GPT3_OFFSET 27
-#define CCM_PCCR_GPT3_REG CCM_PCCR1
-#define CCM_PCCR_GPT2_OFFSET 26
-#define CCM_PCCR_GPT2_REG CCM_PCCR1
-#define CCM_PCCR_GPT1_OFFSET 25
-#define CCM_PCCR_GPT1_REG CCM_PCCR1
-#define CCM_PCCR_WDT_OFFSET 24
-#define CCM_PCCR_WDT_REG CCM_PCCR1
-#define CCM_PCCR_CSPI3_OFFSET 23
-#define CCM_PCCR_CSPI3_REG CCM_PCCR1
-
-#define CCM_PCCR_CSPI1_MASK (1 << CCM_PCCR_CSPI1_OFFSET)
-#define CCM_PCCR_CSPI2_MASK (1 << CCM_PCCR_CSPI2_OFFSET)
-#define CCM_PCCR_CSPI3_MASK (1 << CCM_PCCR_CSPI3_OFFSET)
-#define CCM_PCCR_DMA_MASK (1 << CCM_PCCR_DMA_OFFSET)
-#define CCM_PCCR_EMMA_MASK (1 << CCM_PCCR_EMMA_OFFSET)
-#define CCM_PCCR_GPIO_MASK (1 << CCM_PCCR_GPIO_OFFSET)
-#define CCM_PCCR_GPT1_MASK (1 << CCM_PCCR_GPT1_OFFSET)
-#define CCM_PCCR_GPT2_MASK (1 << CCM_PCCR_GPT2_OFFSET)
-#define CCM_PCCR_GPT3_MASK (1 << CCM_PCCR_GPT3_OFFSET)
-#define CCM_PCCR_HCLK_BROM_MASK (1 << CCM_PCCR_HCLK_BROM_OFFSET)
-#define CCM_PCCR_HCLK_CSI_MASK (1 << CCM_PCCR_HCLK_CSI_OFFSET)
-#define CCM_PCCR_HCLK_DMA_MASK (1 << CCM_PCCR_HCLK_DMA_OFFSET)
-#define CCM_PCCR_HCLK_EMMA_MASK (1 << CCM_PCCR_HCLK_EMMA_OFFSET)
-#define CCM_PCCR_HCLK_LCDC_MASK (1 << CCM_PCCR_HCLK_LCDC_OFFSET)
-#define CCM_PCCR_HCLK_SLCDC_MASK (1 << CCM_PCCR_HCLK_SLCDC_OFFSET)
-#define CCM_PCCR_HCLK_USBOTG_MASK (1 << CCM_PCCR_HCLK_USBOTG_OFFSET)
-#define CCM_PCCR_I2C1_MASK (1 << CCM_PCCR_I2C1_OFFSET)
-#define CCM_PCCR_KPP_MASK (1 << CCM_PCCR_KPP_OFFSET)
-#define CCM_PCCR_LCDC_MASK (1 << CCM_PCCR_LCDC_OFFSET)
-#define CCM_PCCR_NFC_MASK (1 << CCM_PCCR_NFC_OFFSET)
-#define CCM_PCCR_OWIRE_MASK (1 << CCM_PCCR_OWIRE_OFFSET)
-#define CCM_PCCR_PERCLK4_MASK (1 << CCM_PCCR_PERCLK4_OFFSET)
-#define CCM_PCCR_PWM_MASK (1 << CCM_PCCR_PWM_OFFSET)
-#define CCM_PCCR_RTC_MASK (1 << CCM_PCCR_RTC_OFFSET)
-#define CCM_PCCR_SDHC1_MASK (1 << CCM_PCCR_SDHC1_OFFSET)
-#define CCM_PCCR_SDHC2_MASK (1 << CCM_PCCR_SDHC2_OFFSET)
-#define CCM_PCCR_SLCDC_MASK (1 << CCM_PCCR_SLCDC_OFFSET)
-#define CCM_PCCR_SSI1_BAUD_MASK (1 << CCM_PCCR_SSI1_BAUD_OFFSET)
-#define CCM_PCCR_SSI1_IPG_MASK (1 << CCM_PCCR_SSI1_IPG_OFFSET)
-#define CCM_PCCR_SSI2_BAUD_MASK (1 << CCM_PCCR_SSI2_BAUD_OFFSET)
-#define CCM_PCCR_SSI2_IPG_MASK (1 << CCM_PCCR_SSI2_IPG_OFFSET)
-#define CCM_PCCR_UART1_MASK (1 << CCM_PCCR_UART1_OFFSET)
-#define CCM_PCCR_UART2_MASK (1 << CCM_PCCR_UART2_OFFSET)
-#define CCM_PCCR_UART3_MASK (1 << CCM_PCCR_UART3_OFFSET)
-#define CCM_PCCR_UART4_MASK (1 << CCM_PCCR_UART4_OFFSET)
-#define CCM_PCCR_USBOTG_MASK (1 << CCM_PCCR_USBOTG_OFFSET)
-#define CCM_PCCR_WDT_MASK (1 << CCM_PCCR_WDT_OFFSET)
-
-#define CCM_CCSR_32KSR (1 << 15)
-
-#define CCM_CCSR_CLKMODE1 (1 << 9)
-#define CCM_CCSR_CLKMODE0 (1 << 8)
-
-#define CCM_CCSR_CLKOSEL_OFFSET 0
-#define CCM_CCSR_CLKOSEL_MASK 0x1f
-
-#define SYS_FMCR 0x14 /* Functional Muxing Control Reg */
-#define SYS_CHIP_ID 0x00 /* The offset of CHIP ID register */
-
-static int _clk_enable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(clk->enable_reg);
- reg |= 1 << clk->enable_shift;
- __raw_writel(reg, clk->enable_reg);
- return 0;
-}
-
-static void _clk_disable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(clk->enable_reg);
- reg &= ~(1 << clk->enable_shift);
- __raw_writel(reg, clk->enable_reg);
-}
-
-static unsigned long _clk_generic_round_rate(struct clk *clk,
- unsigned long rate,
- u32 max_divisor)
-{
- u32 div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
- if (parent_rate % rate)
- div++;
-
- if (div > max_divisor)
- div = max_divisor;
-
- return parent_rate / div;
-}
-
-static int _clk_spll_enable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(CCM_CSCR);
- reg |= CCM_CSCR_SPEN;
- __raw_writel(reg, CCM_CSCR);
-
- while ((__raw_readl(CCM_SPCTL1) & CCM_SPCTL1_LF) == 0)
- ;
- return 0;
-}
-
-static void _clk_spll_disable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(CCM_CSCR);
- reg &= ~CCM_CSCR_SPEN;
- __raw_writel(reg, CCM_CSCR);
-}
-
-
-#define CSCR() (__raw_readl(CCM_CSCR))
-#define PCDR0() (__raw_readl(CCM_PCDR0))
-#define PCDR1() (__raw_readl(CCM_PCDR1))
-
-static unsigned long _clk_perclkx_round_rate(struct clk *clk,
- unsigned long rate)
-{
- return _clk_generic_round_rate(clk, rate, 64);
-}
-
-static int _clk_perclkx_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg;
- u32 div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (clk->id < 0 || clk->id > 3)
- return -EINVAL;
-
- div = parent_rate / rate;
- if (div > 64 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
- div--;
-
- reg =
- __raw_readl(CCM_PCDR1) & ~(CCM_PCDR1_PERDIV1_MASK <<
- (clk->id << 3));
- reg |= div << (clk->id << 3);
- __raw_writel(reg, CCM_PCDR1);
-
- return 0;
-}
-
-static unsigned long _clk_usb_recalc(struct clk *clk)
-{
- unsigned long usb_pdf;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- usb_pdf = (CSCR() & CCM_CSCR_USB_MASK) >> CCM_CSCR_USB_OFFSET;
-
- return parent_rate / (usb_pdf + 1U);
-}
-
-static unsigned long _clk_usb_round_rate(struct clk *clk,
- unsigned long rate)
-{
- return _clk_generic_round_rate(clk, rate, 8);
-}
-
-static int _clk_usb_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg;
- u32 div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
- if (div > 8 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
- div--;
-
- reg = CSCR() & ~CCM_CSCR_USB_MASK;
- reg |= div << CCM_CSCR_USB_OFFSET;
- __raw_writel(reg, CCM_CSCR);
-
- return 0;
-}
-
-static unsigned long _clk_ssix_recalc(struct clk *clk, unsigned long pdf)
-{
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- pdf = (pdf < 2) ? 124UL : pdf; /* MX21 & MX27 TO1 */
-
- return 2UL * parent_rate / pdf;
-}
-
-static unsigned long _clk_ssi1_recalc(struct clk *clk)
-{
- return _clk_ssix_recalc(clk,
- (PCDR0() & CCM_PCDR0_SSI1BAUDDIV_MASK)
- >> CCM_PCDR0_SSI1BAUDDIV_OFFSET);
-}
-
-static unsigned long _clk_ssi2_recalc(struct clk *clk)
-{
- return _clk_ssix_recalc(clk,
- (PCDR0() & CCM_PCDR0_SSI2BAUDDIV_MASK) >>
- CCM_PCDR0_SSI2BAUDDIV_OFFSET);
-}
-
-static unsigned long _clk_nfc_recalc(struct clk *clk)
-{
- unsigned long nfc_pdf;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- nfc_pdf = (PCDR0() & CCM_PCDR0_NFCDIV_MASK)
- >> CCM_PCDR0_NFCDIV_OFFSET;
-
- return parent_rate / (nfc_pdf + 1);
-}
-
-static unsigned long _clk_parent_round_rate(struct clk *clk, unsigned long rate)
-{
- return clk->parent->round_rate(clk->parent, rate);
-}
-
-static int _clk_parent_set_rate(struct clk *clk, unsigned long rate)
-{
- return clk->parent->set_rate(clk->parent, rate);
-}
-
-static unsigned long external_high_reference; /* in Hz */
-
-static unsigned long get_high_reference_clock_rate(struct clk *clk)
-{
- return external_high_reference;
-}
-
-/*
- * the high frequency external clock reference
- * Default case is 26MHz.
- */
-static struct clk ckih_clk = {
- .get_rate = get_high_reference_clock_rate,
-};
-
-static unsigned long external_low_reference; /* in Hz */
-
-static unsigned long get_low_reference_clock_rate(struct clk *clk)
-{
- return external_low_reference;
-}
-
-/*
- * the low frequency external clock reference
- * Default case is 32.768kHz.
- */
-static struct clk ckil_clk = {
- .get_rate = get_low_reference_clock_rate,
-};
-
-
-static unsigned long _clk_fpm_recalc(struct clk *clk)
-{
- return clk_get_rate(clk->parent) * 512;
-}
-
-/* Output of frequency pre multiplier */
-static struct clk fpm_clk = {
- .parent = &ckil_clk,
- .get_rate = _clk_fpm_recalc,
-};
-
-static unsigned long get_mpll_clk(struct clk *clk)
-{
- uint32_t reg;
- unsigned long ref_clk;
- unsigned long mfi = 0, mfn = 0, mfd = 0, pdf = 0;
- unsigned long long temp;
-
- ref_clk = clk_get_rate(clk->parent);
-
- reg = __raw_readl(CCM_MPCTL0);
- pdf = (reg & CCM_MPCTL0_PD_MASK) >> CCM_MPCTL0_PD_OFFSET;
- mfd = (reg & CCM_MPCTL0_MFD_MASK) >> CCM_MPCTL0_MFD_OFFSET;
- mfi = (reg & CCM_MPCTL0_MFI_MASK) >> CCM_MPCTL0_MFI_OFFSET;
- mfn = (reg & CCM_MPCTL0_MFN_MASK) >> CCM_MPCTL0_MFN_OFFSET;
-
- mfi = (mfi <= 5) ? 5 : mfi;
- temp = 2LL * ref_clk * mfn;
- do_div(temp, mfd + 1);
- temp = 2LL * ref_clk * mfi + temp;
- do_div(temp, pdf + 1);
-
- return (unsigned long)temp;
-}
-
-static struct clk mpll_clk = {
- .parent = &ckih_clk,
- .get_rate = get_mpll_clk,
-};
-
-static unsigned long _clk_fclk_get_rate(struct clk *clk)
-{
- unsigned long parent_rate;
- u32 div;
-
- div = (CSCR() & CCM_CSCR_PRESC_MASK) >> CCM_CSCR_PRESC_OFFSET;
- parent_rate = clk_get_rate(clk->parent);
-
- return parent_rate / (div+1);
-}
-
-static struct clk fclk_clk = {
- .parent = &mpll_clk,
- .get_rate = _clk_fclk_get_rate
-};
-
-static unsigned long get_spll_clk(struct clk *clk)
-{
- uint32_t reg;
- unsigned long ref_clk;
- unsigned long mfi = 0, mfn = 0, mfd = 0, pdf = 0;
- unsigned long long temp;
-
- ref_clk = clk_get_rate(clk->parent);
-
- reg = __raw_readl(CCM_SPCTL0);
- pdf = (reg & CCM_SPCTL0_PD_MASK) >> CCM_SPCTL0_PD_OFFSET;
- mfd = (reg & CCM_SPCTL0_MFD_MASK) >> CCM_SPCTL0_MFD_OFFSET;
- mfi = (reg & CCM_SPCTL0_MFI_MASK) >> CCM_SPCTL0_MFI_OFFSET;
- mfn = (reg & CCM_SPCTL0_MFN_MASK) >> CCM_SPCTL0_MFN_OFFSET;
-
- mfi = (mfi <= 5) ? 5 : mfi;
- temp = 2LL * ref_clk * mfn;
- do_div(temp, mfd + 1);
- temp = 2LL * ref_clk * mfi + temp;
- do_div(temp, pdf + 1);
-
- return (unsigned long)temp;
-}
-
-static struct clk spll_clk = {
- .parent = &ckih_clk,
- .get_rate = get_spll_clk,
- .enable = _clk_spll_enable,
- .disable = _clk_spll_disable,
-};
-
-static unsigned long get_hclk_clk(struct clk *clk)
-{
- unsigned long rate;
- unsigned long bclk_pdf;
-
- bclk_pdf = (CSCR() & CCM_CSCR_BCLK_MASK)
- >> CCM_CSCR_BCLK_OFFSET;
-
- rate = clk_get_rate(clk->parent);
- return rate / (bclk_pdf + 1);
-}
-
-static struct clk hclk_clk = {
- .parent = &fclk_clk,
- .get_rate = get_hclk_clk,
-};
-
-static unsigned long get_ipg_clk(struct clk *clk)
-{
- unsigned long rate;
- unsigned long ipg_pdf;
-
- ipg_pdf = (CSCR() & CCM_CSCR_IPDIV) >> CCM_CSCR_IPDIV_OFFSET;
-
- rate = clk_get_rate(clk->parent);
- return rate / (ipg_pdf + 1);
-}
-
-static struct clk ipg_clk = {
- .parent = &hclk_clk,
- .get_rate = get_ipg_clk,
-};
-
-static unsigned long _clk_perclkx_recalc(struct clk *clk)
-{
- unsigned long perclk_pdf;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (clk->id < 0 || clk->id > 3)
- return 0;
-
- perclk_pdf = (PCDR1() >> (clk->id << 3)) & CCM_PCDR1_PERDIV1_MASK;
-
- return parent_rate / (perclk_pdf + 1);
-}
-
-static struct clk per_clk[] = {
- {
- .id = 0,
- .parent = &mpll_clk,
- .get_rate = _clk_perclkx_recalc,
- }, {
- .id = 1,
- .parent = &mpll_clk,
- .get_rate = _clk_perclkx_recalc,
- }, {
- .id = 2,
- .parent = &mpll_clk,
- .round_rate = _clk_perclkx_round_rate,
- .set_rate = _clk_perclkx_set_rate,
- .get_rate = _clk_perclkx_recalc,
- /* Enable/Disable done via lcd_clkc[1] */
- }, {
- .id = 3,
- .parent = &mpll_clk,
- .round_rate = _clk_perclkx_round_rate,
- .set_rate = _clk_perclkx_set_rate,
- .get_rate = _clk_perclkx_recalc,
- /* Enable/Disable done via csi_clk[1] */
- },
-};
-
-static struct clk uart_ipg_clk[];
-
-static struct clk uart_clk[] = {
- {
- .id = 0,
- .parent = &per_clk[0],
- .secondary = &uart_ipg_clk[0],
- }, {
- .id = 1,
- .parent = &per_clk[0],
- .secondary = &uart_ipg_clk[1],
- }, {
- .id = 2,
- .parent = &per_clk[0],
- .secondary = &uart_ipg_clk[2],
- }, {
- .id = 3,
- .parent = &per_clk[0],
- .secondary = &uart_ipg_clk[3],
- },
-};
-
-static struct clk uart_ipg_clk[] = {
- {
- .id = 0,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_UART1_REG,
- .enable_shift = CCM_PCCR_UART1_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 1,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_UART2_REG,
- .enable_shift = CCM_PCCR_UART2_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 2,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_UART3_REG,
- .enable_shift = CCM_PCCR_UART3_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 3,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_UART4_REG,
- .enable_shift = CCM_PCCR_UART4_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk gpt_ipg_clk[];
-
-static struct clk gpt_clk[] = {
- {
- .id = 0,
- .parent = &per_clk[0],
- .secondary = &gpt_ipg_clk[0],
- }, {
- .id = 1,
- .parent = &per_clk[0],
- .secondary = &gpt_ipg_clk[1],
- }, {
- .id = 2,
- .parent = &per_clk[0],
- .secondary = &gpt_ipg_clk[2],
- },
-};
-
-static struct clk gpt_ipg_clk[] = {
- {
- .id = 0,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_GPT1_REG,
- .enable_shift = CCM_PCCR_GPT1_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 1,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_GPT2_REG,
- .enable_shift = CCM_PCCR_GPT2_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 2,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_GPT3_REG,
- .enable_shift = CCM_PCCR_GPT3_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk pwm_clk[] = {
- {
- .parent = &per_clk[0],
- .secondary = &pwm_clk[1],
- }, {
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_PWM_REG,
- .enable_shift = CCM_PCCR_PWM_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk sdhc_ipg_clk[];
-
-static struct clk sdhc_clk[] = {
- {
- .id = 0,
- .parent = &per_clk[1],
- .secondary = &sdhc_ipg_clk[0],
- }, {
- .id = 1,
- .parent = &per_clk[1],
- .secondary = &sdhc_ipg_clk[1],
- },
-};
-
-static struct clk sdhc_ipg_clk[] = {
- {
- .id = 0,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_SDHC1_REG,
- .enable_shift = CCM_PCCR_SDHC1_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 1,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_SDHC2_REG,
- .enable_shift = CCM_PCCR_SDHC2_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk cspi_ipg_clk[];
-
-static struct clk cspi_clk[] = {
- {
- .id = 0,
- .parent = &per_clk[1],
- .secondary = &cspi_ipg_clk[0],
- }, {
- .id = 1,
- .parent = &per_clk[1],
- .secondary = &cspi_ipg_clk[1],
- }, {
- .id = 2,
- .parent = &per_clk[1],
- .secondary = &cspi_ipg_clk[2],
- },
-};
-
-static struct clk cspi_ipg_clk[] = {
- {
- .id = 0,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_CSPI1_REG,
- .enable_shift = CCM_PCCR_CSPI1_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 1,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_CSPI2_REG,
- .enable_shift = CCM_PCCR_CSPI2_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 3,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_CSPI3_REG,
- .enable_shift = CCM_PCCR_CSPI3_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk lcdc_clk[] = {
- {
- .parent = &per_clk[2],
- .secondary = &lcdc_clk[1],
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
- }, {
- .parent = &ipg_clk,
- .secondary = &lcdc_clk[2],
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_LCDC_REG,
- .enable_shift = CCM_PCCR_LCDC_OFFSET,
- .disable = _clk_disable,
- }, {
- .parent = &hclk_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_HCLK_LCDC_REG,
- .enable_shift = CCM_PCCR_HCLK_LCDC_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk csi_clk[] = {
- {
- .parent = &per_clk[3],
- .secondary = &csi_clk[1],
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
- }, {
- .parent = &hclk_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_HCLK_CSI_REG,
- .enable_shift = CCM_PCCR_HCLK_CSI_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk usb_clk[] = {
- {
- .parent = &spll_clk,
- .secondary = &usb_clk[1],
- .get_rate = _clk_usb_recalc,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_USBOTG_REG,
- .enable_shift = CCM_PCCR_USBOTG_OFFSET,
- .disable = _clk_disable,
- .round_rate = _clk_usb_round_rate,
- .set_rate = _clk_usb_set_rate,
- }, {
- .parent = &hclk_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_HCLK_USBOTG_REG,
- .enable_shift = CCM_PCCR_HCLK_USBOTG_OFFSET,
- .disable = _clk_disable,
- }
-};
-
-static struct clk ssi_ipg_clk[];
-
-static struct clk ssi_clk[] = {
- {
- .id = 0,
- .parent = &mpll_clk,
- .secondary = &ssi_ipg_clk[0],
- .get_rate = _clk_ssi1_recalc,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_SSI1_BAUD_REG,
- .enable_shift = CCM_PCCR_SSI1_BAUD_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 1,
- .parent = &mpll_clk,
- .secondary = &ssi_ipg_clk[1],
- .get_rate = _clk_ssi2_recalc,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_SSI2_BAUD_REG,
- .enable_shift = CCM_PCCR_SSI2_BAUD_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk ssi_ipg_clk[] = {
- {
- .id = 0,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_SSI1_REG,
- .enable_shift = CCM_PCCR_SSI1_IPG_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 1,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_SSI2_REG,
- .enable_shift = CCM_PCCR_SSI2_IPG_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-
-static struct clk nfc_clk = {
- .parent = &fclk_clk,
- .get_rate = _clk_nfc_recalc,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_NFC_REG,
- .enable_shift = CCM_PCCR_NFC_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk dma_clk[] = {
- {
- .parent = &hclk_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_DMA_REG,
- .enable_shift = CCM_PCCR_DMA_OFFSET,
- .disable = _clk_disable,
- .secondary = &dma_clk[1],
- }, {
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_HCLK_DMA_REG,
- .enable_shift = CCM_PCCR_HCLK_DMA_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk brom_clk = {
- .parent = &hclk_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_HCLK_BROM_REG,
- .enable_shift = CCM_PCCR_HCLK_BROM_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk emma_clk[] = {
- {
- .parent = &hclk_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_EMMA_REG,
- .enable_shift = CCM_PCCR_EMMA_OFFSET,
- .disable = _clk_disable,
- .secondary = &emma_clk[1],
- }, {
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_HCLK_EMMA_REG,
- .enable_shift = CCM_PCCR_HCLK_EMMA_OFFSET,
- .disable = _clk_disable,
- }
-};
-
-static struct clk slcdc_clk[] = {
- {
- .parent = &hclk_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_SLCDC_REG,
- .enable_shift = CCM_PCCR_SLCDC_OFFSET,
- .disable = _clk_disable,
- .secondary = &slcdc_clk[1],
- }, {
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_HCLK_SLCDC_REG,
- .enable_shift = CCM_PCCR_HCLK_SLCDC_OFFSET,
- .disable = _clk_disable,
- }
-};
-
-static struct clk wdog_clk = {
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_WDT_REG,
- .enable_shift = CCM_PCCR_WDT_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk gpio_clk = {
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_GPIO_REG,
- .enable_shift = CCM_PCCR_GPIO_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk i2c_clk = {
- .id = 0,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_I2C1_REG,
- .enable_shift = CCM_PCCR_I2C1_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk kpp_clk = {
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_KPP_REG,
- .enable_shift = CCM_PCCR_KPP_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk owire_clk = {
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_OWIRE_REG,
- .enable_shift = CCM_PCCR_OWIRE_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk rtc_clk = {
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_RTC_REG,
- .enable_shift = CCM_PCCR_RTC_OFFSET,
- .disable = _clk_disable,
-};
-
-static unsigned long _clk_clko_round_rate(struct clk *clk, unsigned long rate)
-{
- return _clk_generic_round_rate(clk, rate, 8);
-}
-
-static int _clk_clko_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg;
- u32 div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
-
- if (div > 8 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
- div--;
-
- reg = __raw_readl(CCM_PCDR0);
-
- if (clk->parent == &usb_clk[0]) {
- reg &= ~CCM_PCDR0_48MDIV_MASK;
- reg |= div << CCM_PCDR0_48MDIV_OFFSET;
- }
- __raw_writel(reg, CCM_PCDR0);
-
- return 0;
-}
-
-static unsigned long _clk_clko_recalc(struct clk *clk)
-{
- u32 div = 0;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (clk->parent == &usb_clk[0]) /* 48M */
- div = __raw_readl(CCM_PCDR0) & CCM_PCDR0_48MDIV_MASK
- >> CCM_PCDR0_48MDIV_OFFSET;
- div++;
-
- return parent_rate / div;
-}
-
-static struct clk clko_clk;
-
-static int _clk_clko_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(CCM_CCSR) & ~CCM_CCSR_CLKOSEL_MASK;
-
- if (parent == &ckil_clk)
- reg |= 0 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &fpm_clk)
- reg |= 1 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &ckih_clk)
- reg |= 2 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == mpll_clk.parent)
- reg |= 3 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == spll_clk.parent)
- reg |= 4 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &mpll_clk)
- reg |= 5 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &spll_clk)
- reg |= 6 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &fclk_clk)
- reg |= 7 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &hclk_clk)
- reg |= 8 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &ipg_clk)
- reg |= 9 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &per_clk[0])
- reg |= 0xA << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &per_clk[1])
- reg |= 0xB << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &per_clk[2])
- reg |= 0xC << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &per_clk[3])
- reg |= 0xD << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &ssi_clk[0])
- reg |= 0xE << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &ssi_clk[1])
- reg |= 0xF << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &nfc_clk)
- reg |= 0x10 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &usb_clk[0])
- reg |= 0x14 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &clko_clk)
- reg |= 0x15 << CCM_CCSR_CLKOSEL_OFFSET;
- else
- return -EINVAL;
-
- __raw_writel(reg, CCM_CCSR);
-
- return 0;
-}
-
-static struct clk clko_clk = {
- .get_rate = _clk_clko_recalc,
- .set_rate = _clk_clko_set_rate,
- .round_rate = _clk_clko_round_rate,
- .set_parent = _clk_clko_set_parent,
-};
-
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-static struct clk_lookup lookups[] = {
-/* It's unlikely that any driver wants one of them directly:
- _REGISTER_CLOCK(NULL, "ckih", ckih_clk)
- _REGISTER_CLOCK(NULL, "ckil", ckil_clk)
- _REGISTER_CLOCK(NULL, "fpm", fpm_clk)
- _REGISTER_CLOCK(NULL, "mpll", mpll_clk)
- _REGISTER_CLOCK(NULL, "spll", spll_clk)
- _REGISTER_CLOCK(NULL, "fclk", fclk_clk)
- _REGISTER_CLOCK(NULL, "hclk", hclk_clk)
- _REGISTER_CLOCK(NULL, "ipg", ipg_clk)
-*/
- _REGISTER_CLOCK(NULL, "perclk1", per_clk[0])
- _REGISTER_CLOCK(NULL, "perclk2", per_clk[1])
- _REGISTER_CLOCK(NULL, "perclk3", per_clk[2])
- _REGISTER_CLOCK(NULL, "perclk4", per_clk[3])
- _REGISTER_CLOCK(NULL, "clko", clko_clk)
- _REGISTER_CLOCK("imx21-uart.0", NULL, uart_clk[0])
- _REGISTER_CLOCK("imx21-uart.1", NULL, uart_clk[1])
- _REGISTER_CLOCK("imx21-uart.2", NULL, uart_clk[2])
- _REGISTER_CLOCK("imx21-uart.3", NULL, uart_clk[3])
- _REGISTER_CLOCK(NULL, "gpt1", gpt_clk[0])
- _REGISTER_CLOCK(NULL, "gpt1", gpt_clk[1])
- _REGISTER_CLOCK(NULL, "gpt1", gpt_clk[2])
- _REGISTER_CLOCK(NULL, "pwm", pwm_clk[0])
- _REGISTER_CLOCK(NULL, "sdhc1", sdhc_clk[0])
- _REGISTER_CLOCK(NULL, "sdhc2", sdhc_clk[1])
- _REGISTER_CLOCK("imx21-cspi.0", NULL, cspi_clk[0])
- _REGISTER_CLOCK("imx21-cspi.1", NULL, cspi_clk[1])
- _REGISTER_CLOCK("imx21-cspi.2", NULL, cspi_clk[2])
- _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk[0])
- _REGISTER_CLOCK(NULL, "csi", csi_clk[0])
- _REGISTER_CLOCK("imx21-hcd.0", NULL, usb_clk[0])
- _REGISTER_CLOCK(NULL, "ssi1", ssi_clk[0])
- _REGISTER_CLOCK(NULL, "ssi2", ssi_clk[1])
- _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
- _REGISTER_CLOCK(NULL, "dma", dma_clk[0])
- _REGISTER_CLOCK(NULL, "brom", brom_clk)
- _REGISTER_CLOCK(NULL, "emma", emma_clk[0])
- _REGISTER_CLOCK(NULL, "slcdc", slcdc_clk[0])
- _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
- _REGISTER_CLOCK(NULL, "gpio", gpio_clk)
- _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
- _REGISTER_CLOCK("mxc-keypad", NULL, kpp_clk)
- _REGISTER_CLOCK(NULL, "owire", owire_clk)
- _REGISTER_CLOCK(NULL, "rtc", rtc_clk)
-};
-
-/*
- * must be called very early to get information about the
- * available clock rate when the timer framework starts
- */
-int __init mx21_clocks_init(unsigned long lref, unsigned long href)
-{
- u32 cscr;
-
- external_low_reference = lref;
- external_high_reference = href;
-
- /* detect clock reference for both system PLL */
- cscr = CSCR();
- if (cscr & CCM_CSCR_MCU)
- mpll_clk.parent = &ckih_clk;
- else
- mpll_clk.parent = &fpm_clk;
-
- if (cscr & CCM_CSCR_SP)
- spll_clk.parent = &ckih_clk;
- else
- spll_clk.parent = &fpm_clk;
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- /* Turn off all clock gates */
- __raw_writel(0, CCM_PCCR0);
- __raw_writel(CCM_PCCR_GPT1_MASK, CCM_PCCR1);
-
- /* This turns of the serial PLL as well */
- spll_clk.disable(&spll_clk);
-
- /* This will propagate to all children and init all the clock rates. */
- clk_enable(&per_clk[0]);
- clk_enable(&gpio_clk);
-
-#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
- clk_enable(&uart_clk[0]);
-#endif
-
- mxc_timer_init(&gpt_clk[0], MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR),
- MX21_INT_GPT1);
- return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx25.c b/arch/arm/mach-imx/clock-imx25.c
deleted file mode 100644
index b0fec74c8c91..000000000000
--- a/arch/arm/mach-imx/clock-imx25.c
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Copyright (C) 2009 by Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-#include <mach/mx25.h>
-
-#define CRM_BASE MX25_IO_ADDRESS(MX25_CRM_BASE_ADDR)
-
-#define CCM_MPCTL 0x00
-#define CCM_UPCTL 0x04
-#define CCM_CCTL 0x08
-#define CCM_CGCR0 0x0C
-#define CCM_CGCR1 0x10
-#define CCM_CGCR2 0x14
-#define CCM_PCDR0 0x18
-#define CCM_PCDR1 0x1C
-#define CCM_PCDR2 0x20
-#define CCM_PCDR3 0x24
-#define CCM_RCSR 0x28
-#define CCM_CRDR 0x2C
-#define CCM_DCVR0 0x30
-#define CCM_DCVR1 0x34
-#define CCM_DCVR2 0x38
-#define CCM_DCVR3 0x3c
-#define CCM_LTR0 0x40
-#define CCM_LTR1 0x44
-#define CCM_LTR2 0x48
-#define CCM_LTR3 0x4c
-
-static unsigned long get_rate_mpll(void)
-{
- ulong mpctl = __raw_readl(CRM_BASE + CCM_MPCTL);
-
- return mxc_decode_pll(mpctl, 24000000);
-}
-
-static unsigned long get_rate_upll(void)
-{
- ulong mpctl = __raw_readl(CRM_BASE + CCM_UPCTL);
-
- return mxc_decode_pll(mpctl, 24000000);
-}
-
-unsigned long get_rate_arm(struct clk *clk)
-{
- unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
- unsigned long rate = get_rate_mpll();
-
- if (cctl & (1 << 14))
- rate = (rate * 3) >> 2;
-
- return rate / ((cctl >> 30) + 1);
-}
-
-static unsigned long get_rate_ahb(struct clk *clk)
-{
- unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
-
- return get_rate_arm(NULL) / (((cctl >> 28) & 0x3) + 1);
-}
-
-static unsigned long get_rate_ipg(struct clk *clk)
-{
- return get_rate_ahb(NULL) >> 1;
-}
-
-static unsigned long get_rate_per(int per)
-{
- unsigned long ofs = (per & 0x3) * 8;
- unsigned long reg = per & ~0x3;
- unsigned long val = (readl(CRM_BASE + CCM_PCDR0 + reg) >> ofs) & 0x3f;
- unsigned long fref;
-
- if (readl(CRM_BASE + 0x64) & (1 << per))
- fref = get_rate_upll();
- else
- fref = get_rate_ahb(NULL);
-
- return fref / (val + 1);
-}
-
-static unsigned long get_rate_uart(struct clk *clk)
-{
- return get_rate_per(15);
-}
-
-static unsigned long get_rate_ssi2(struct clk *clk)
-{
- return get_rate_per(14);
-}
-
-static unsigned long get_rate_ssi1(struct clk *clk)
-{
- return get_rate_per(13);
-}
-
-static unsigned long get_rate_i2c(struct clk *clk)
-{
- return get_rate_per(6);
-}
-
-static unsigned long get_rate_nfc(struct clk *clk)
-{
- return get_rate_per(8);
-}
-
-static unsigned long get_rate_gpt(struct clk *clk)
-{
- return get_rate_per(5);
-}
-
-static unsigned long get_rate_lcdc(struct clk *clk)
-{
- return get_rate_per(7);
-}
-
-static unsigned long get_rate_esdhc1(struct clk *clk)
-{
- return get_rate_per(3);
-}
-
-static unsigned long get_rate_esdhc2(struct clk *clk)
-{
- return get_rate_per(4);
-}
-
-static unsigned long get_rate_csi(struct clk *clk)
-{
- return get_rate_per(0);
-}
-
-static unsigned long get_rate_otg(struct clk *clk)
-{
- unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
- unsigned long rate = get_rate_upll();
-
- return (cctl & (1 << 23)) ? 0 : rate / ((0x3F & (cctl >> 16)) + 1);
-}
-
-static int clk_cgcr_enable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(clk->enable_reg);
- reg |= 1 << clk->enable_shift;
- __raw_writel(reg, clk->enable_reg);
-
- return 0;
-}
-
-static void clk_cgcr_disable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(clk->enable_reg);
- reg &= ~(1 << clk->enable_shift);
- __raw_writel(reg, clk->enable_reg);
-}
-
-#define DEFINE_CLOCK(name, i, er, es, gr, sr, s) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = CRM_BASE + er, \
- .enable_shift = es, \
- .get_rate = gr, \
- .set_rate = sr, \
- .enable = clk_cgcr_enable, \
- .disable = clk_cgcr_disable, \
- .secondary = s, \
- }
-
-/*
- * Note: the following IPG clock gating bits are wrongly marked "Reserved" in
- * the i.MX25 Reference Manual Rev 1, table 15-13. The information below is
- * taken from the Freescale released BSP.
- *
- * bit reg offset clock
- *
- * 0 CGCR1 0 AUDMUX
- * 12 CGCR1 12 ESAI
- * 16 CGCR1 16 GPIO1
- * 17 CGCR1 17 GPIO2
- * 18 CGCR1 18 GPIO3
- * 23 CGCR1 23 I2C1
- * 24 CGCR1 24 I2C2
- * 25 CGCR1 25 I2C3
- * 27 CGCR1 27 IOMUXC
- * 28 CGCR1 28 KPP
- * 30 CGCR1 30 OWIRE
- * 36 CGCR2 4 RTIC
- * 51 CGCR2 19 WDOG
- */
-
-DEFINE_CLOCK(gpt_clk, 0, CCM_CGCR0, 5, get_rate_gpt, NULL, NULL);
-DEFINE_CLOCK(uart_per_clk, 0, CCM_CGCR0, 15, get_rate_uart, NULL, NULL);
-DEFINE_CLOCK(ssi1_per_clk, 0, CCM_CGCR0, 13, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(ssi2_per_clk, 0, CCM_CGCR0, 14, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(cspi1_clk, 0, CCM_CGCR1, 5, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(cspi2_clk, 0, CCM_CGCR1, 6, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(cspi3_clk, 0, CCM_CGCR1, 7, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(esdhc1_ahb_clk, 0, CCM_CGCR0, 21, get_rate_esdhc1, NULL, NULL);
-DEFINE_CLOCK(esdhc1_per_clk, 0, CCM_CGCR0, 3, get_rate_esdhc1, NULL,
- &esdhc1_ahb_clk);
-DEFINE_CLOCK(esdhc2_ahb_clk, 0, CCM_CGCR0, 22, get_rate_esdhc2, NULL, NULL);
-DEFINE_CLOCK(esdhc2_per_clk, 0, CCM_CGCR0, 4, get_rate_esdhc2, NULL,
- &esdhc2_ahb_clk);
-DEFINE_CLOCK(sdma_ahb_clk, 0, CCM_CGCR0, 26, NULL, NULL, NULL);
-DEFINE_CLOCK(fec_ahb_clk, 0, CCM_CGCR0, 23, NULL, NULL, NULL);
-DEFINE_CLOCK(lcdc_ahb_clk, 0, CCM_CGCR0, 24, NULL, NULL, NULL);
-DEFINE_CLOCK(lcdc_per_clk, 0, CCM_CGCR0, 7, NULL, NULL, &lcdc_ahb_clk);
-DEFINE_CLOCK(csi_ahb_clk, 0, CCM_CGCR0, 18, get_rate_csi, NULL, NULL);
-DEFINE_CLOCK(csi_per_clk, 0, CCM_CGCR0, 0, get_rate_csi, NULL, &csi_ahb_clk);
-DEFINE_CLOCK(uart1_clk, 0, CCM_CGCR2, 14, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(uart2_clk, 0, CCM_CGCR2, 15, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(uart3_clk, 0, CCM_CGCR2, 16, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(uart4_clk, 0, CCM_CGCR2, 17, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(uart5_clk, 0, CCM_CGCR2, 18, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(nfc_clk, 0, CCM_CGCR0, 8, get_rate_nfc, NULL, NULL);
-DEFINE_CLOCK(usbotg_clk, 0, CCM_CGCR0, 28, get_rate_otg, NULL, NULL);
-DEFINE_CLOCK(pwm1_clk, 0, CCM_CGCR1, 31, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(pwm2_clk, 0, CCM_CGCR2, 0, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(pwm3_clk, 0, CCM_CGCR2, 1, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(pwm4_clk, 0, CCM_CGCR2, 2, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(kpp_clk, 0, CCM_CGCR1, 28, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(tsc_clk, 0, CCM_CGCR2, 13, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(i2c_clk, 0, CCM_CGCR0, 6, get_rate_i2c, NULL, NULL);
-DEFINE_CLOCK(fec_clk, 0, CCM_CGCR1, 15, get_rate_ipg, NULL, &fec_ahb_clk);
-DEFINE_CLOCK(dryice_clk, 0, CCM_CGCR1, 8, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(lcdc_clk, 0, CCM_CGCR1, 29, get_rate_lcdc, NULL, &lcdc_per_clk);
-DEFINE_CLOCK(wdt_clk, 0, CCM_CGCR2, 19, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(ssi1_clk, 0, CCM_CGCR2, 11, get_rate_ssi1, NULL, &ssi1_per_clk);
-DEFINE_CLOCK(ssi2_clk, 1, CCM_CGCR2, 12, get_rate_ssi2, NULL, &ssi2_per_clk);
-DEFINE_CLOCK(sdma_clk, 0, CCM_CGCR2, 6, get_rate_ipg, NULL, &sdma_ahb_clk);
-DEFINE_CLOCK(esdhc1_clk, 0, CCM_CGCR1, 13, get_rate_esdhc1, NULL,
- &esdhc1_per_clk);
-DEFINE_CLOCK(esdhc2_clk, 1, CCM_CGCR1, 14, get_rate_esdhc2, NULL,
- &esdhc2_per_clk);
-DEFINE_CLOCK(audmux_clk, 0, CCM_CGCR1, 0, NULL, NULL, NULL);
-DEFINE_CLOCK(csi_clk, 0, CCM_CGCR1, 4, get_rate_csi, NULL, &csi_per_clk);
-DEFINE_CLOCK(can1_clk, 0, CCM_CGCR1, 2, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(can2_clk, 1, CCM_CGCR1, 3, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(iim_clk, 0, CCM_CGCR1, 26, NULL, NULL, NULL);
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-
-static struct clk_lookup lookups[] = {
- /* i.mx25 has the i.mx21 type uart */
- _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
- _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
- _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
- _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
- _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
- _REGISTER_CLOCK("mxc-ehci.0", "usb", usbotg_clk)
- _REGISTER_CLOCK("mxc-ehci.1", "usb", usbotg_clk)
- _REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk)
- _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
- /* i.mx25 has the i.mx35 type cspi */
- _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk)
- _REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk)
- _REGISTER_CLOCK("imx35-cspi.2", NULL, cspi3_clk)
- _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm1_clk)
- _REGISTER_CLOCK("mxc_pwm.1", NULL, pwm2_clk)
- _REGISTER_CLOCK("mxc_pwm.2", NULL, pwm3_clk)
- _REGISTER_CLOCK("mxc_pwm.3", NULL, pwm4_clk)
- _REGISTER_CLOCK("imx-keypad", NULL, kpp_clk)
- _REGISTER_CLOCK("mx25-adc", NULL, tsc_clk)
- _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
- _REGISTER_CLOCK("imx-i2c.1", NULL, i2c_clk)
- _REGISTER_CLOCK("imx-i2c.2", NULL, i2c_clk)
- _REGISTER_CLOCK("imx25-fec.0", NULL, fec_clk)
- _REGISTER_CLOCK("imxdi_rtc.0", NULL, dryice_clk)
- _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
- _REGISTER_CLOCK("imx2-wdt.0", NULL, wdt_clk)
- _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
- _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx25.0", NULL, esdhc1_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx25.1", NULL, esdhc2_clk)
- _REGISTER_CLOCK("mx2-camera.0", NULL, csi_clk)
- _REGISTER_CLOCK(NULL, "audmux", audmux_clk)
- _REGISTER_CLOCK("flexcan.0", NULL, can1_clk)
- _REGISTER_CLOCK("flexcan.1", NULL, can2_clk)
- /* i.mx25 has the i.mx35 type sdma */
- _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
- _REGISTER_CLOCK(NULL, "iim", iim_clk)
-};
-
-int __init mx25_clocks_init(void)
-{
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- /* Turn off all clocks except the ones we need to survive, namely:
- * EMI, GPIO1-3 (CCM_CGCR1[18:16]), GPT1, IOMUXC (CCM_CGCR1[27]), IIM,
- * SCC
- */
- __raw_writel((1 << 19), CRM_BASE + CCM_CGCR0);
- __raw_writel((0xf << 16) | (3 << 26), CRM_BASE + CCM_CGCR1);
- __raw_writel((1 << 5), CRM_BASE + CCM_CGCR2);
-#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
- clk_enable(&uart1_clk);
-#endif
-
- /* Clock source for lcdc and csi is upll */
- __raw_writel(__raw_readl(CRM_BASE+0x64) | (1 << 7) | (1 << 0),
- CRM_BASE + 0x64);
-
- /* Clock source for gpt is ahb_div */
- __raw_writel(__raw_readl(CRM_BASE+0x64) & ~(1 << 5), CRM_BASE + 0x64);
-
- clk_enable(&iim_clk);
- imx_print_silicon_rev("i.MX25", mx25_revision());
- clk_disable(&iim_clk);
-
- mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
-
- return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx27.c b/arch/arm/mach-imx/clock-imx27.c
deleted file mode 100644
index 98e04f5a87dd..000000000000
--- a/arch/arm/mach-imx/clock-imx27.c
+++ /dev/null
@@ -1,785 +0,0 @@
-/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/clkdev.h>
-#include <linux/of.h>
-
-#include <asm/div64.h>
-
-#include <mach/clock.h>
-#include <mach/common.h>
-#include <mach/hardware.h>
-
-#define IO_ADDR_CCM(off) (MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR + (off)))
-
-/* Register offsets */
-#define CCM_CSCR IO_ADDR_CCM(0x0)
-#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
-#define CCM_MPCTL1 IO_ADDR_CCM(0x8)
-#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
-#define CCM_SPCTL1 IO_ADDR_CCM(0x10)
-#define CCM_OSC26MCTL IO_ADDR_CCM(0x14)
-#define CCM_PCDR0 IO_ADDR_CCM(0x18)
-#define CCM_PCDR1 IO_ADDR_CCM(0x1c)
-#define CCM_PCCR0 IO_ADDR_CCM(0x20)
-#define CCM_PCCR1 IO_ADDR_CCM(0x24)
-#define CCM_CCSR IO_ADDR_CCM(0x28)
-#define CCM_PMCTL IO_ADDR_CCM(0x2c)
-#define CCM_PMCOUNT IO_ADDR_CCM(0x30)
-#define CCM_WKGDCTL IO_ADDR_CCM(0x34)
-
-#define CCM_CSCR_UPDATE_DIS (1 << 31)
-#define CCM_CSCR_SSI2 (1 << 23)
-#define CCM_CSCR_SSI1 (1 << 22)
-#define CCM_CSCR_VPU (1 << 21)
-#define CCM_CSCR_MSHC (1 << 20)
-#define CCM_CSCR_SPLLRES (1 << 19)
-#define CCM_CSCR_MPLLRES (1 << 18)
-#define CCM_CSCR_SP (1 << 17)
-#define CCM_CSCR_MCU (1 << 16)
-#define CCM_CSCR_OSC26MDIV (1 << 4)
-#define CCM_CSCR_OSC26M (1 << 3)
-#define CCM_CSCR_FPM (1 << 2)
-#define CCM_CSCR_SPEN (1 << 1)
-#define CCM_CSCR_MPEN (1 << 0)
-
-/* i.MX27 TO 2+ */
-#define CCM_CSCR_ARM_SRC (1 << 15)
-
-#define CCM_SPCTL1_LF (1 << 15)
-#define CCM_SPCTL1_BRMO (1 << 6)
-
-static struct clk mpll_main1_clk, mpll_main2_clk;
-
-static int clk_pccr_enable(struct clk *clk)
-{
- unsigned long reg;
-
- if (!clk->enable_reg)
- return 0;
-
- reg = __raw_readl(clk->enable_reg);
- reg |= 1 << clk->enable_shift;
- __raw_writel(reg, clk->enable_reg);
-
- return 0;
-}
-
-static void clk_pccr_disable(struct clk *clk)
-{
- unsigned long reg;
-
- if (!clk->enable_reg)
- return;
-
- reg = __raw_readl(clk->enable_reg);
- reg &= ~(1 << clk->enable_shift);
- __raw_writel(reg, clk->enable_reg);
-}
-
-static int clk_spll_enable(struct clk *clk)
-{
- unsigned long reg;
-
- reg = __raw_readl(CCM_CSCR);
- reg |= CCM_CSCR_SPEN;
- __raw_writel(reg, CCM_CSCR);
-
- while (!(__raw_readl(CCM_SPCTL1) & CCM_SPCTL1_LF));
-
- return 0;
-}
-
-static void clk_spll_disable(struct clk *clk)
-{
- unsigned long reg;
-
- reg = __raw_readl(CCM_CSCR);
- reg &= ~CCM_CSCR_SPEN;
- __raw_writel(reg, CCM_CSCR);
-}
-
-static int clk_cpu_set_parent(struct clk *clk, struct clk *parent)
-{
- int cscr = __raw_readl(CCM_CSCR);
-
- if (clk->parent == parent)
- return 0;
-
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
- if (parent == &mpll_main1_clk) {
- cscr |= CCM_CSCR_ARM_SRC;
- } else {
- if (parent == &mpll_main2_clk)
- cscr &= ~CCM_CSCR_ARM_SRC;
- else
- return -EINVAL;
- }
- __raw_writel(cscr, CCM_CSCR);
- clk->parent = parent;
- return 0;
- }
- return -ENODEV;
-}
-
-static unsigned long round_rate_cpu(struct clk *clk, unsigned long rate)
-{
- int div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
- if (parent_rate % rate)
- div++;
-
- if (div > 4)
- div = 4;
-
- return parent_rate / div;
-}
-
-static int set_rate_cpu(struct clk *clk, unsigned long rate)
-{
- unsigned int div;
- uint32_t reg;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
-
- if (div > 4 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
-
- div--;
-
- reg = __raw_readl(CCM_CSCR);
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
- reg &= ~(3 << 12);
- reg |= div << 12;
- reg &= ~(CCM_CSCR_FPM | CCM_CSCR_SPEN);
- __raw_writel(reg | CCM_CSCR_UPDATE_DIS, CCM_CSCR);
- } else {
- printk(KERN_ERR "Can't set CPU frequency!\n");
- }
-
- return 0;
-}
-
-static unsigned long round_rate_per(struct clk *clk, unsigned long rate)
-{
- u32 div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
- if (parent_rate % rate)
- div++;
-
- if (div > 64)
- div = 64;
-
- return parent_rate / div;
-}
-
-static int set_rate_per(struct clk *clk, unsigned long rate)
-{
- u32 reg;
- u32 div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (clk->id < 0 || clk->id > 3)
- return -EINVAL;
-
- div = parent_rate / rate;
- if (div > 64 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
- div--;
-
- reg = __raw_readl(CCM_PCDR1) & ~(0x3f << (clk->id << 3));
- reg |= div << (clk->id << 3);
- __raw_writel(reg, CCM_PCDR1);
-
- return 0;
-}
-
-static unsigned long get_rate_usb(struct clk *clk)
-{
- unsigned long usb_pdf;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- usb_pdf = (__raw_readl(CCM_CSCR) >> 28) & 0x7;
-
- return parent_rate / (usb_pdf + 1U);
-}
-
-static unsigned long get_rate_ssix(struct clk *clk, unsigned long pdf)
-{
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
- pdf += 4; /* MX27 TO2+ */
- else
- pdf = (pdf < 2) ? 124UL : pdf; /* MX21 & MX27 TO1 */
-
- return 2UL * parent_rate / pdf;
-}
-
-static unsigned long get_rate_ssi1(struct clk *clk)
-{
- return get_rate_ssix(clk, (__raw_readl(CCM_PCDR0) >> 16) & 0x3f);
-}
-
-static unsigned long get_rate_ssi2(struct clk *clk)
-{
- return get_rate_ssix(clk, (__raw_readl(CCM_PCDR0) >> 26) & 0x3f);
-}
-
-static unsigned long get_rate_nfc(struct clk *clk)
-{
- unsigned long nfc_pdf;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
- nfc_pdf = (__raw_readl(CCM_PCDR0) >> 6) & 0xf;
- else
- nfc_pdf = (__raw_readl(CCM_PCDR0) >> 12) & 0xf;
-
- return parent_rate / (nfc_pdf + 1);
-}
-
-static unsigned long get_rate_vpu(struct clk *clk)
-{
- unsigned long vpu_pdf;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
- vpu_pdf = (__raw_readl(CCM_PCDR0) >> 10) & 0x3f;
- vpu_pdf += 4;
- } else {
- vpu_pdf = (__raw_readl(CCM_PCDR0) >> 8) & 0xf;
- vpu_pdf = (vpu_pdf < 2) ? 124 : vpu_pdf;
- }
-
- return 2UL * parent_rate / vpu_pdf;
-}
-
-static unsigned long round_rate_parent(struct clk *clk, unsigned long rate)
-{
- return clk->parent->round_rate(clk->parent, rate);
-}
-
-static unsigned long get_rate_parent(struct clk *clk)
-{
- return clk_get_rate(clk->parent);
-}
-
-static int set_rate_parent(struct clk *clk, unsigned long rate)
-{
- return clk->parent->set_rate(clk->parent, rate);
-}
-
-/* in Hz */
-static unsigned long external_high_reference = 26000000;
-
-static unsigned long get_rate_high_reference(struct clk *clk)
-{
- return external_high_reference;
-}
-
-/* in Hz */
-static unsigned long external_low_reference = 32768;
-
-static unsigned long get_rate_low_reference(struct clk *clk)
-{
- return external_low_reference;
-}
-
-static unsigned long get_rate_fpm(struct clk *clk)
-{
- return clk_get_rate(clk->parent) * 1024;
-}
-
-static unsigned long get_rate_mpll(struct clk *clk)
-{
- return mxc_decode_pll(__raw_readl(CCM_MPCTL0),
- clk_get_rate(clk->parent));
-}
-
-static unsigned long get_rate_mpll_main(struct clk *clk)
-{
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- /* i.MX27 TO2:
- * clk->id == 0: arm clock source path 1 which is from 2 * MPLL / 2
- * clk->id == 1: arm clock source path 2 which is from 2 * MPLL / 3
- */
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0 && clk->id == 1)
- return 2UL * parent_rate / 3UL;
-
- return parent_rate;
-}
-
-static unsigned long get_rate_spll(struct clk *clk)
-{
- uint32_t reg;
- unsigned long rate;
-
- rate = clk_get_rate(clk->parent);
-
- reg = __raw_readl(CCM_SPCTL0);
-
- /* On TO2 we have to write the value back. Otherwise we
- * read 0 from this register the next time.
- */
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
- __raw_writel(reg, CCM_SPCTL0);
-
- return mxc_decode_pll(reg, rate);
-}
-
-static unsigned long get_rate_cpu(struct clk *clk)
-{
- u32 div;
- unsigned long rate;
-
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
- div = (__raw_readl(CCM_CSCR) >> 12) & 0x3;
- else
- div = (__raw_readl(CCM_CSCR) >> 13) & 0x7;
-
- rate = clk_get_rate(clk->parent);
- return rate / (div + 1);
-}
-
-static unsigned long get_rate_ahb(struct clk *clk)
-{
- unsigned long rate, bclk_pdf;
-
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
- bclk_pdf = (__raw_readl(CCM_CSCR) >> 8) & 0x3;
- else
- bclk_pdf = (__raw_readl(CCM_CSCR) >> 9) & 0xf;
-
- rate = clk_get_rate(clk->parent);
- return rate / (bclk_pdf + 1);
-}
-
-static unsigned long get_rate_ipg(struct clk *clk)
-{
- unsigned long rate, ipg_pdf;
-
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
- return clk_get_rate(clk->parent);
- else
- ipg_pdf = (__raw_readl(CCM_CSCR) >> 8) & 1;
-
- rate = clk_get_rate(clk->parent);
- return rate / (ipg_pdf + 1);
-}
-
-static unsigned long get_rate_per(struct clk *clk)
-{
- unsigned long perclk_pdf, parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (clk->id < 0 || clk->id > 3)
- return 0;
-
- perclk_pdf = (__raw_readl(CCM_PCDR1) >> (clk->id << 3)) & 0x3f;
-
- return parent_rate / (perclk_pdf + 1);
-}
-
-/*
- * the high frequency external clock reference
- * Default case is 26MHz. Could be changed at runtime
- * with a call to change_external_high_reference()
- */
-static struct clk ckih_clk = {
- .get_rate = get_rate_high_reference,
-};
-
-static struct clk mpll_clk = {
- .parent = &ckih_clk,
- .get_rate = get_rate_mpll,
-};
-
-/* For i.MX27 TO2, it is the MPLL path 1 of ARM core
- * It provides the clock source whose rate is same as MPLL
- */
-static struct clk mpll_main1_clk = {
- .id = 0,
- .parent = &mpll_clk,
- .get_rate = get_rate_mpll_main,
-};
-
-/* For i.MX27 TO2, it is the MPLL path 2 of ARM core
- * It provides the clock source whose rate is same MPLL * 2 / 3
- */
-static struct clk mpll_main2_clk = {
- .id = 1,
- .parent = &mpll_clk,
- .get_rate = get_rate_mpll_main,
-};
-
-static struct clk ahb_clk = {
- .parent = &mpll_main2_clk,
- .get_rate = get_rate_ahb,
-};
-
-static struct clk ipg_clk = {
- .parent = &ahb_clk,
- .get_rate = get_rate_ipg,
-};
-
-static struct clk cpu_clk = {
- .parent = &mpll_main2_clk,
- .set_parent = clk_cpu_set_parent,
- .round_rate = round_rate_cpu,
- .get_rate = get_rate_cpu,
- .set_rate = set_rate_cpu,
-};
-
-static struct clk spll_clk = {
- .parent = &ckih_clk,
- .get_rate = get_rate_spll,
- .enable = clk_spll_enable,
- .disable = clk_spll_disable,
-};
-
-/*
- * the low frequency external clock reference
- * Default case is 32.768kHz.
- */
-static struct clk ckil_clk = {
- .get_rate = get_rate_low_reference,
-};
-
-/* Output of frequency pre multiplier */
-static struct clk fpm_clk = {
- .parent = &ckil_clk,
- .get_rate = get_rate_fpm,
-};
-
-#define PCCR0 CCM_PCCR0
-#define PCCR1 CCM_PCCR1
-
-#define DEFINE_CLOCK(name, i, er, es, gr, s, p) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = er, \
- .enable_shift = es, \
- .get_rate = gr, \
- .enable = clk_pccr_enable, \
- .disable = clk_pccr_disable, \
- .secondary = s, \
- .parent = p, \
- }
-
-#define DEFINE_CLOCK1(name, i, er, es, getsetround, s, p) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = er, \
- .enable_shift = es, \
- .get_rate = get_rate_##getsetround, \
- .set_rate = set_rate_##getsetround, \
- .round_rate = round_rate_##getsetround, \
- .enable = clk_pccr_enable, \
- .disable = clk_pccr_disable, \
- .secondary = s, \
- .parent = p, \
- }
-
-/* Forward declaration to keep the following list in order */
-static struct clk slcdc_clk1, sahara2_clk1, rtic_clk1, fec_clk1, emma_clk1,
- dma_clk1, lcdc_clk2, vpu_clk1;
-
-/* All clocks we can gate through PCCRx in the order of PCCRx bits */
-DEFINE_CLOCK(ssi2_clk1, 1, PCCR0, 0, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(ssi1_clk1, 0, PCCR0, 1, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(slcdc_clk, 0, PCCR0, 2, NULL, &slcdc_clk1, &ahb_clk);
-DEFINE_CLOCK(sdhc3_clk1, 0, PCCR0, 3, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(sdhc2_clk1, 0, PCCR0, 4, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(sdhc1_clk1, 0, PCCR0, 5, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(scc_clk, 0, PCCR0, 6, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(sahara2_clk, 0, PCCR0, 7, NULL, &sahara2_clk1, &ahb_clk);
-DEFINE_CLOCK(rtic_clk, 0, PCCR0, 8, NULL, &rtic_clk1, &ahb_clk);
-DEFINE_CLOCK(rtc_clk, 0, PCCR0, 9, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(pwm_clk1, 0, PCCR0, 11, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(owire_clk, 0, PCCR0, 12, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(mstick_clk1, 0, PCCR0, 13, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(lcdc_clk1, 0, PCCR0, 14, NULL, &lcdc_clk2, &ipg_clk);
-DEFINE_CLOCK(kpp_clk, 0, PCCR0, 15, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(iim_clk, 0, PCCR0, 16, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(i2c2_clk, 1, PCCR0, 17, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(i2c1_clk, 0, PCCR0, 18, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt6_clk1, 0, PCCR0, 29, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt5_clk1, 0, PCCR0, 20, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt4_clk1, 0, PCCR0, 21, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt3_clk1, 0, PCCR0, 22, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt2_clk1, 0, PCCR0, 23, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt1_clk1, 0, PCCR0, 24, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpio_clk, 0, PCCR0, 25, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(fec_clk, 0, PCCR0, 26, NULL, &fec_clk1, &ahb_clk);
-DEFINE_CLOCK(emma_clk, 0, PCCR0, 27, NULL, &emma_clk1, &ahb_clk);
-DEFINE_CLOCK(dma_clk, 0, PCCR0, 28, NULL, &dma_clk1, &ahb_clk);
-DEFINE_CLOCK(cspi13_clk1, 0, PCCR0, 29, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(cspi2_clk1, 0, PCCR0, 30, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(cspi1_clk1, 0, PCCR0, 31, NULL, NULL, &ipg_clk);
-
-DEFINE_CLOCK(mstick_clk, 0, PCCR1, 2, NULL, &mstick_clk1, &ipg_clk);
-DEFINE_CLOCK(nfc_clk, 0, PCCR1, 3, get_rate_nfc, NULL, &cpu_clk);
-DEFINE_CLOCK(ssi2_clk, 1, PCCR1, 4, get_rate_ssi2, &ssi2_clk1, &mpll_main2_clk);
-DEFINE_CLOCK(ssi1_clk, 0, PCCR1, 5, get_rate_ssi1, &ssi1_clk1, &mpll_main2_clk);
-DEFINE_CLOCK(vpu_clk, 0, PCCR1, 6, get_rate_vpu, &vpu_clk1, &mpll_main2_clk);
-DEFINE_CLOCK1(per4_clk, 3, PCCR1, 7, per, NULL, &mpll_main2_clk);
-DEFINE_CLOCK1(per3_clk, 2, PCCR1, 8, per, NULL, &mpll_main2_clk);
-DEFINE_CLOCK1(per2_clk, 1, PCCR1, 9, per, NULL, &mpll_main2_clk);
-DEFINE_CLOCK1(per1_clk, 0, PCCR1, 10, per, NULL, &mpll_main2_clk);
-DEFINE_CLOCK(usb_clk1, 0, PCCR1, 11, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(slcdc_clk1, 0, PCCR1, 12, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(sahara2_clk1, 0, PCCR1, 13, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(rtic_clk1, 0, PCCR1, 14, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(lcdc_clk2, 0, PCCR1, 15, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(vpu_clk1, 0, PCCR1, 16, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(fec_clk1, 0, PCCR1, 17, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(emma_clk1, 0, PCCR1, 18, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(emi_clk, 0, PCCR1, 19, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(dma_clk1, 0, PCCR1, 20, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(csi_clk1, 0, PCCR1, 21, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(brom_clk, 0, PCCR1, 22, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(pata_clk, 0, PCCR1, 23, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(wdog_clk, 0, PCCR1, 24, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(usb_clk, 0, PCCR1, 25, get_rate_usb, &usb_clk1, &spll_clk);
-DEFINE_CLOCK(uart6_clk1, 0, PCCR1, 26, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart5_clk1, 0, PCCR1, 27, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart4_clk1, 0, PCCR1, 28, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart3_clk1, 0, PCCR1, 29, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart2_clk1, 0, PCCR1, 30, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart1_clk1, 0, PCCR1, 31, NULL, NULL, &ipg_clk);
-
-/* Clocks we cannot directly gate, but drivers need their rates */
-DEFINE_CLOCK(cspi1_clk, 0, NULL, 0, NULL, &cspi1_clk1, &per2_clk);
-DEFINE_CLOCK(cspi2_clk, 1, NULL, 0, NULL, &cspi2_clk1, &per2_clk);
-DEFINE_CLOCK(cspi3_clk, 2, NULL, 0, NULL, &cspi13_clk1, &per2_clk);
-DEFINE_CLOCK(sdhc1_clk, 0, NULL, 0, NULL, &sdhc1_clk1, &per2_clk);
-DEFINE_CLOCK(sdhc2_clk, 1, NULL, 0, NULL, &sdhc2_clk1, &per2_clk);
-DEFINE_CLOCK(sdhc3_clk, 2, NULL, 0, NULL, &sdhc3_clk1, &per2_clk);
-DEFINE_CLOCK(pwm_clk, 0, NULL, 0, NULL, &pwm_clk1, &per1_clk);
-DEFINE_CLOCK(gpt1_clk, 0, NULL, 0, NULL, &gpt1_clk1, &per1_clk);
-DEFINE_CLOCK(gpt2_clk, 1, NULL, 0, NULL, &gpt2_clk1, &per1_clk);
-DEFINE_CLOCK(gpt3_clk, 2, NULL, 0, NULL, &gpt3_clk1, &per1_clk);
-DEFINE_CLOCK(gpt4_clk, 3, NULL, 0, NULL, &gpt4_clk1, &per1_clk);
-DEFINE_CLOCK(gpt5_clk, 4, NULL, 0, NULL, &gpt5_clk1, &per1_clk);
-DEFINE_CLOCK(gpt6_clk, 5, NULL, 0, NULL, &gpt6_clk1, &per1_clk);
-DEFINE_CLOCK(uart1_clk, 0, NULL, 0, NULL, &uart1_clk1, &per1_clk);
-DEFINE_CLOCK(uart2_clk, 1, NULL, 0, NULL, &uart2_clk1, &per1_clk);
-DEFINE_CLOCK(uart3_clk, 2, NULL, 0, NULL, &uart3_clk1, &per1_clk);
-DEFINE_CLOCK(uart4_clk, 3, NULL, 0, NULL, &uart4_clk1, &per1_clk);
-DEFINE_CLOCK(uart5_clk, 4, NULL, 0, NULL, &uart5_clk1, &per1_clk);
-DEFINE_CLOCK(uart6_clk, 5, NULL, 0, NULL, &uart6_clk1, &per1_clk);
-DEFINE_CLOCK1(lcdc_clk, 0, NULL, 0, parent, &lcdc_clk1, &per3_clk);
-DEFINE_CLOCK1(csi_clk, 0, NULL, 0, parent, &csi_clk1, &per4_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-
-static struct clk_lookup lookups[] = {
- /* i.mx27 has the i.mx21 type uart */
- _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
- _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
- _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
- _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
- _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
- _REGISTER_CLOCK("imx21-uart.5", NULL, uart6_clk)
- _REGISTER_CLOCK(NULL, "gpt1", gpt1_clk)
- _REGISTER_CLOCK(NULL, "gpt2", gpt2_clk)
- _REGISTER_CLOCK(NULL, "gpt3", gpt3_clk)
- _REGISTER_CLOCK(NULL, "gpt4", gpt4_clk)
- _REGISTER_CLOCK(NULL, "gpt5", gpt5_clk)
- _REGISTER_CLOCK(NULL, "gpt6", gpt6_clk)
- _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm_clk)
- _REGISTER_CLOCK("mxc-mmc.0", NULL, sdhc1_clk)
- _REGISTER_CLOCK("mxc-mmc.1", NULL, sdhc2_clk)
- _REGISTER_CLOCK("mxc-mmc.2", NULL, sdhc3_clk)
- _REGISTER_CLOCK("imx27-cspi.0", NULL, cspi1_clk)
- _REGISTER_CLOCK("imx27-cspi.1", NULL, cspi2_clk)
- _REGISTER_CLOCK("imx27-cspi.2", NULL, cspi3_clk)
- _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
- _REGISTER_CLOCK("mx2-camera.0", NULL, csi_clk)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb", usb_clk)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", usb_clk1)
- _REGISTER_CLOCK("mxc-ehci.0", "usb", usb_clk)
- _REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_clk1)
- _REGISTER_CLOCK("mxc-ehci.1", "usb", usb_clk)
- _REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_clk1)
- _REGISTER_CLOCK("mxc-ehci.2", "usb", usb_clk)
- _REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_clk1)
- _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
- _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
- _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
- _REGISTER_CLOCK(NULL, "vpu", vpu_clk)
- _REGISTER_CLOCK(NULL, "dma", dma_clk)
- _REGISTER_CLOCK(NULL, "rtic", rtic_clk)
- _REGISTER_CLOCK(NULL, "brom", brom_clk)
- _REGISTER_CLOCK(NULL, "emma", emma_clk)
- _REGISTER_CLOCK("m2m-emmaprp.0", NULL, emma_clk)
- _REGISTER_CLOCK(NULL, "slcdc", slcdc_clk)
- _REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
- _REGISTER_CLOCK(NULL, "emi", emi_clk)
- _REGISTER_CLOCK(NULL, "sahara2", sahara2_clk)
- _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
- _REGISTER_CLOCK(NULL, "mstick", mstick_clk)
- _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
- _REGISTER_CLOCK(NULL, "gpio", gpio_clk)
- _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
- _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
- _REGISTER_CLOCK(NULL, "iim", iim_clk)
- _REGISTER_CLOCK(NULL, "kpp", kpp_clk)
- _REGISTER_CLOCK("mxc_w1.0", NULL, owire_clk)
- _REGISTER_CLOCK(NULL, "rtc", rtc_clk)
- _REGISTER_CLOCK(NULL, "scc", scc_clk)
-};
-
-/* Adjust the clock path for TO2 and later */
-static void __init to2_adjust_clocks(void)
-{
- unsigned long cscr = __raw_readl(CCM_CSCR);
-
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
- if (cscr & CCM_CSCR_ARM_SRC)
- cpu_clk.parent = &mpll_main1_clk;
-
- if (!(cscr & CCM_CSCR_SSI2))
- ssi1_clk.parent = &spll_clk;
-
- if (!(cscr & CCM_CSCR_SSI1))
- ssi1_clk.parent = &spll_clk;
-
- if (!(cscr & CCM_CSCR_VPU))
- vpu_clk.parent = &spll_clk;
- } else {
- cpu_clk.parent = &mpll_clk;
- cpu_clk.set_parent = NULL;
- cpu_clk.round_rate = NULL;
- cpu_clk.set_rate = NULL;
- ahb_clk.parent = &mpll_clk;
-
- per1_clk.parent = &mpll_clk;
- per2_clk.parent = &mpll_clk;
- per3_clk.parent = &mpll_clk;
- per4_clk.parent = &mpll_clk;
-
- ssi1_clk.parent = &mpll_clk;
- ssi2_clk.parent = &mpll_clk;
-
- vpu_clk.parent = &mpll_clk;
- }
-}
-
-/*
- * must be called very early to get information about the
- * available clock rate when the timer framework starts
- */
-int __init mx27_clocks_init(unsigned long fref)
-{
- u32 cscr = __raw_readl(CCM_CSCR);
-
- external_high_reference = fref;
-
- /* detect clock reference for both system PLLs */
- if (cscr & CCM_CSCR_MCU)
- mpll_clk.parent = &ckih_clk;
- else
- mpll_clk.parent = &fpm_clk;
-
- if (cscr & CCM_CSCR_SP)
- spll_clk.parent = &ckih_clk;
- else
- spll_clk.parent = &fpm_clk;
-
- to2_adjust_clocks();
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- /* Turn off all clocks we do not need */
- __raw_writel(0, CCM_PCCR0);
- __raw_writel((1 << 10) | (1 << 19), CCM_PCCR1);
-
- spll_clk.disable(&spll_clk);
-
- /* enable basic clocks */
- clk_enable(&per1_clk);
- clk_enable(&gpio_clk);
- clk_enable(&emi_clk);
- clk_enable(&iim_clk);
- imx_print_silicon_rev("i.MX27", mx27_revision());
- clk_disable(&iim_clk);
-
-#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
- clk_enable(&uart1_clk);
-#endif
-
- mxc_timer_init(&gpt1_clk, MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR),
- MX27_INT_GPT1);
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-int __init mx27_clocks_init_dt(void)
-{
- struct device_node *np;
- u32 fref = 26000000; /* default */
-
- for_each_compatible_node(np, NULL, "fixed-clock") {
- if (!of_device_is_compatible(np, "fsl,imx-osc26m"))
- continue;
-
- if (!of_property_read_u32(np, "clock-frequency", &fref))
- break;
- }
-
- return mx27_clocks_init(fref);
-}
-#endif
diff --git a/arch/arm/mach-imx/clock-imx31.c b/arch/arm/mach-imx/clock-imx31.c
deleted file mode 100644
index 3a943cd4159f..000000000000
--- a/arch/arm/mach-imx/clock-imx31.c
+++ /dev/null
@@ -1,630 +0,0 @@
-/*
- * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-
-#include <asm/div64.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/mx31.h>
-#include <mach/common.h>
-
-#include "crmregs-imx3.h"
-
-#define PRE_DIV_MIN_FREQ 10000000 /* Minimum Frequency after Predivider */
-
-static void __calc_pre_post_dividers(u32 div, u32 *pre, u32 *post)
-{
- u32 min_pre, temp_pre, old_err, err;
-
- if (div >= 512) {
- *pre = 8;
- *post = 64;
- } else if (div >= 64) {
- min_pre = (div - 1) / 64 + 1;
- old_err = 8;
- for (temp_pre = 8; temp_pre >= min_pre; temp_pre--) {
- err = div % temp_pre;
- if (err == 0) {
- *pre = temp_pre;
- break;
- }
- err = temp_pre - err;
- if (err < old_err) {
- old_err = err;
- *pre = temp_pre;
- }
- }
- *post = (div + *pre - 1) / *pre;
- } else if (div <= 8) {
- *pre = div;
- *post = 1;
- } else {
- *pre = 1;
- *post = div;
- }
-}
-
-static struct clk mcu_pll_clk;
-static struct clk serial_pll_clk;
-static struct clk ipg_clk;
-static struct clk ckih_clk;
-
-static int cgr_enable(struct clk *clk)
-{
- u32 reg;
-
- if (!clk->enable_reg)
- return 0;
-
- reg = __raw_readl(clk->enable_reg);
- reg |= 3 << clk->enable_shift;
- __raw_writel(reg, clk->enable_reg);
-
- return 0;
-}
-
-static void cgr_disable(struct clk *clk)
-{
- u32 reg;
-
- if (!clk->enable_reg)
- return;
-
- reg = __raw_readl(clk->enable_reg);
- reg &= ~(3 << clk->enable_shift);
-
- /* special case for EMI clock */
- if (clk->enable_reg == MXC_CCM_CGR2 && clk->enable_shift == 8)
- reg |= (1 << clk->enable_shift);
-
- __raw_writel(reg, clk->enable_reg);
-}
-
-static unsigned long pll_ref_get_rate(void)
-{
- unsigned long ccmr;
- unsigned int prcs;
-
- ccmr = __raw_readl(MXC_CCM_CCMR);
- prcs = (ccmr & MXC_CCM_CCMR_PRCS_MASK) >> MXC_CCM_CCMR_PRCS_OFFSET;
- if (prcs == 0x1)
- return CKIL_CLK_FREQ * 1024;
- else
- return clk_get_rate(&ckih_clk);
-}
-
-static unsigned long usb_pll_get_rate(struct clk *clk)
-{
- unsigned long reg;
-
- reg = __raw_readl(MXC_CCM_UPCTL);
-
- return mxc_decode_pll(reg, pll_ref_get_rate());
-}
-
-static unsigned long serial_pll_get_rate(struct clk *clk)
-{
- unsigned long reg;
-
- reg = __raw_readl(MXC_CCM_SRPCTL);
-
- return mxc_decode_pll(reg, pll_ref_get_rate());
-}
-
-static unsigned long mcu_pll_get_rate(struct clk *clk)
-{
- unsigned long reg, ccmr;
-
- ccmr = __raw_readl(MXC_CCM_CCMR);
-
- if (!(ccmr & MXC_CCM_CCMR_MPE) || (ccmr & MXC_CCM_CCMR_MDS))
- return clk_get_rate(&ckih_clk);
-
- reg = __raw_readl(MXC_CCM_MPCTL);
-
- return mxc_decode_pll(reg, pll_ref_get_rate());
-}
-
-static int usb_pll_enable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CCMR);
- reg |= MXC_CCM_CCMR_UPE;
- __raw_writel(reg, MXC_CCM_CCMR);
-
- /* No lock bit on MX31, so using max time from spec */
- udelay(80);
-
- return 0;
-}
-
-static void usb_pll_disable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CCMR);
- reg &= ~MXC_CCM_CCMR_UPE;
- __raw_writel(reg, MXC_CCM_CCMR);
-}
-
-static int serial_pll_enable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CCMR);
- reg |= MXC_CCM_CCMR_SPE;
- __raw_writel(reg, MXC_CCM_CCMR);
-
- /* No lock bit on MX31, so using max time from spec */
- udelay(80);
-
- return 0;
-}
-
-static void serial_pll_disable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CCMR);
- reg &= ~MXC_CCM_CCMR_SPE;
- __raw_writel(reg, MXC_CCM_CCMR);
-}
-
-#define PDR0(mask, off) ((__raw_readl(MXC_CCM_PDR0) & mask) >> off)
-#define PDR1(mask, off) ((__raw_readl(MXC_CCM_PDR1) & mask) >> off)
-#define PDR2(mask, off) ((__raw_readl(MXC_CCM_PDR2) & mask) >> off)
-
-static unsigned long mcu_main_get_rate(struct clk *clk)
-{
- u32 pmcr0 = __raw_readl(MXC_CCM_PMCR0);
-
- if ((pmcr0 & MXC_CCM_PMCR0_DFSUP1) == MXC_CCM_PMCR0_DFSUP1_SPLL)
- return clk_get_rate(&serial_pll_clk);
- else
- return clk_get_rate(&mcu_pll_clk);
-}
-
-static unsigned long ahb_get_rate(struct clk *clk)
-{
- unsigned long max_pdf;
-
- max_pdf = PDR0(MXC_CCM_PDR0_MAX_PODF_MASK,
- MXC_CCM_PDR0_MAX_PODF_OFFSET);
- return clk_get_rate(clk->parent) / (max_pdf + 1);
-}
-
-static unsigned long ipg_get_rate(struct clk *clk)
-{
- unsigned long ipg_pdf;
-
- ipg_pdf = PDR0(MXC_CCM_PDR0_IPG_PODF_MASK,
- MXC_CCM_PDR0_IPG_PODF_OFFSET);
- return clk_get_rate(clk->parent) / (ipg_pdf + 1);
-}
-
-static unsigned long nfc_get_rate(struct clk *clk)
-{
- unsigned long nfc_pdf;
-
- nfc_pdf = PDR0(MXC_CCM_PDR0_NFC_PODF_MASK,
- MXC_CCM_PDR0_NFC_PODF_OFFSET);
- return clk_get_rate(clk->parent) / (nfc_pdf + 1);
-}
-
-static unsigned long hsp_get_rate(struct clk *clk)
-{
- unsigned long hsp_pdf;
-
- hsp_pdf = PDR0(MXC_CCM_PDR0_HSP_PODF_MASK,
- MXC_CCM_PDR0_HSP_PODF_OFFSET);
- return clk_get_rate(clk->parent) / (hsp_pdf + 1);
-}
-
-static unsigned long usb_get_rate(struct clk *clk)
-{
- unsigned long usb_pdf, usb_prepdf;
-
- usb_pdf = PDR1(MXC_CCM_PDR1_USB_PODF_MASK,
- MXC_CCM_PDR1_USB_PODF_OFFSET);
- usb_prepdf = PDR1(MXC_CCM_PDR1_USB_PRDF_MASK,
- MXC_CCM_PDR1_USB_PRDF_OFFSET);
- return clk_get_rate(clk->parent) / (usb_prepdf + 1) / (usb_pdf + 1);
-}
-
-static unsigned long csi_get_rate(struct clk *clk)
-{
- u32 reg, pre, post;
-
- reg = __raw_readl(MXC_CCM_PDR0);
- pre = (reg & MXC_CCM_PDR0_CSI_PRDF_MASK) >>
- MXC_CCM_PDR0_CSI_PRDF_OFFSET;
- pre++;
- post = (reg & MXC_CCM_PDR0_CSI_PODF_MASK) >>
- MXC_CCM_PDR0_CSI_PODF_OFFSET;
- post++;
- return clk_get_rate(clk->parent) / (pre * post);
-}
-
-static unsigned long csi_round_rate(struct clk *clk, unsigned long rate)
-{
- u32 pre, post, parent = clk_get_rate(clk->parent);
- u32 div = parent / rate;
-
- if (parent % rate)
- div++;
-
- __calc_pre_post_dividers(div, &pre, &post);
-
- return parent / (pre * post);
-}
-
-static int csi_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg, div, pre, post, parent = clk_get_rate(clk->parent);
-
- div = parent / rate;
-
- if ((parent / div) != rate)
- return -EINVAL;
-
- __calc_pre_post_dividers(div, &pre, &post);
-
- /* Set CSI clock divider */
- reg = __raw_readl(MXC_CCM_PDR0) &
- ~(MXC_CCM_PDR0_CSI_PODF_MASK | MXC_CCM_PDR0_CSI_PRDF_MASK);
- reg |= (post - 1) << MXC_CCM_PDR0_CSI_PODF_OFFSET;
- reg |= (pre - 1) << MXC_CCM_PDR0_CSI_PRDF_OFFSET;
- __raw_writel(reg, MXC_CCM_PDR0);
-
- return 0;
-}
-
-static unsigned long ssi1_get_rate(struct clk *clk)
-{
- unsigned long ssi1_pdf, ssi1_prepdf;
-
- ssi1_pdf = PDR1(MXC_CCM_PDR1_SSI1_PODF_MASK,
- MXC_CCM_PDR1_SSI1_PODF_OFFSET);
- ssi1_prepdf = PDR1(MXC_CCM_PDR1_SSI1_PRE_PODF_MASK,
- MXC_CCM_PDR1_SSI1_PRE_PODF_OFFSET);
- return clk_get_rate(clk->parent) / (ssi1_prepdf + 1) / (ssi1_pdf + 1);
-}
-
-static unsigned long ssi2_get_rate(struct clk *clk)
-{
- unsigned long ssi2_pdf, ssi2_prepdf;
-
- ssi2_pdf = PDR1(MXC_CCM_PDR1_SSI2_PODF_MASK,
- MXC_CCM_PDR1_SSI2_PODF_OFFSET);
- ssi2_prepdf = PDR1(MXC_CCM_PDR1_SSI2_PRE_PODF_MASK,
- MXC_CCM_PDR1_SSI2_PRE_PODF_OFFSET);
- return clk_get_rate(clk->parent) / (ssi2_prepdf + 1) / (ssi2_pdf + 1);
-}
-
-static unsigned long firi_get_rate(struct clk *clk)
-{
- unsigned long firi_pdf, firi_prepdf;
-
- firi_pdf = PDR1(MXC_CCM_PDR1_FIRI_PODF_MASK,
- MXC_CCM_PDR1_FIRI_PODF_OFFSET);
- firi_prepdf = PDR1(MXC_CCM_PDR1_FIRI_PRE_PODF_MASK,
- MXC_CCM_PDR1_FIRI_PRE_PODF_OFFSET);
- return clk_get_rate(clk->parent) / (firi_prepdf + 1) / (firi_pdf + 1);
-}
-
-static unsigned long firi_round_rate(struct clk *clk, unsigned long rate)
-{
- u32 pre, post;
- u32 parent = clk_get_rate(clk->parent);
- u32 div = parent / rate;
-
- if (parent % rate)
- div++;
-
- __calc_pre_post_dividers(div, &pre, &post);
-
- return parent / (pre * post);
-
-}
-
-static int firi_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg, div, pre, post, parent = clk_get_rate(clk->parent);
-
- div = parent / rate;
-
- if ((parent / div) != rate)
- return -EINVAL;
-
- __calc_pre_post_dividers(div, &pre, &post);
-
- /* Set FIRI clock divider */
- reg = __raw_readl(MXC_CCM_PDR1) &
- ~(MXC_CCM_PDR1_FIRI_PODF_MASK | MXC_CCM_PDR1_FIRI_PRE_PODF_MASK);
- reg |= (pre - 1) << MXC_CCM_PDR1_FIRI_PRE_PODF_OFFSET;
- reg |= (post - 1) << MXC_CCM_PDR1_FIRI_PODF_OFFSET;
- __raw_writel(reg, MXC_CCM_PDR1);
-
- return 0;
-}
-
-static unsigned long mbx_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / 2;
-}
-
-static unsigned long mstick1_get_rate(struct clk *clk)
-{
- unsigned long msti_pdf;
-
- msti_pdf = PDR2(MXC_CCM_PDR2_MST1_PDF_MASK,
- MXC_CCM_PDR2_MST1_PDF_OFFSET);
- return clk_get_rate(clk->parent) / (msti_pdf + 1);
-}
-
-static unsigned long mstick2_get_rate(struct clk *clk)
-{
- unsigned long msti_pdf;
-
- msti_pdf = PDR2(MXC_CCM_PDR2_MST2_PDF_MASK,
- MXC_CCM_PDR2_MST2_PDF_OFFSET);
- return clk_get_rate(clk->parent) / (msti_pdf + 1);
-}
-
-static unsigned long ckih_rate;
-
-static unsigned long clk_ckih_get_rate(struct clk *clk)
-{
- return ckih_rate;
-}
-
-static unsigned long clk_ckil_get_rate(struct clk *clk)
-{
- return CKIL_CLK_FREQ;
-}
-
-static struct clk ckih_clk = {
- .get_rate = clk_ckih_get_rate,
-};
-
-static struct clk mcu_pll_clk = {
- .parent = &ckih_clk,
- .get_rate = mcu_pll_get_rate,
-};
-
-static struct clk mcu_main_clk = {
- .parent = &mcu_pll_clk,
- .get_rate = mcu_main_get_rate,
-};
-
-static struct clk serial_pll_clk = {
- .parent = &ckih_clk,
- .get_rate = serial_pll_get_rate,
- .enable = serial_pll_enable,
- .disable = serial_pll_disable,
-};
-
-static struct clk usb_pll_clk = {
- .parent = &ckih_clk,
- .get_rate = usb_pll_get_rate,
- .enable = usb_pll_enable,
- .disable = usb_pll_disable,
-};
-
-static struct clk ahb_clk = {
- .parent = &mcu_main_clk,
- .get_rate = ahb_get_rate,
-};
-
-#define DEFINE_CLOCK(name, i, er, es, gr, s, p) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = er, \
- .enable_shift = es, \
- .get_rate = gr, \
- .enable = cgr_enable, \
- .disable = cgr_disable, \
- .secondary = s, \
- .parent = p, \
- }
-
-#define DEFINE_CLOCK1(name, i, er, es, getsetround, s, p) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = er, \
- .enable_shift = es, \
- .get_rate = getsetround##_get_rate, \
- .set_rate = getsetround##_set_rate, \
- .round_rate = getsetround##_round_rate, \
- .enable = cgr_enable, \
- .disable = cgr_disable, \
- .secondary = s, \
- .parent = p, \
- }
-
-DEFINE_CLOCK(perclk_clk, 0, NULL, 0, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(ckil_clk, 0, NULL, 0, clk_ckil_get_rate, NULL, NULL);
-
-DEFINE_CLOCK(sdhc1_clk, 0, MXC_CCM_CGR0, 0, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(sdhc2_clk, 1, MXC_CCM_CGR0, 2, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CGR0, 4, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(epit1_clk, 0, MXC_CCM_CGR0, 6, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(epit2_clk, 1, MXC_CCM_CGR0, 8, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(iim_clk, 0, MXC_CCM_CGR0, 10, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(pata_clk, 0, MXC_CCM_CGR0, 12, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(sdma_clk1, 0, MXC_CCM_CGR0, 14, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(cspi3_clk, 2, MXC_CCM_CGR0, 16, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(rng_clk, 0, MXC_CCM_CGR0, 18, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart1_clk, 0, MXC_CCM_CGR0, 20, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(uart2_clk, 1, MXC_CCM_CGR0, 22, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(ssi1_clk, 0, MXC_CCM_CGR0, 24, ssi1_get_rate, NULL, &serial_pll_clk);
-DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CGR0, 26, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(i2c2_clk, 1, MXC_CCM_CGR0, 28, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(i2c3_clk, 2, MXC_CCM_CGR0, 30, NULL, NULL, &perclk_clk);
-
-DEFINE_CLOCK(mpeg4_clk, 0, MXC_CCM_CGR1, 0, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(mstick1_clk, 0, MXC_CCM_CGR1, 2, mstick1_get_rate, NULL, &usb_pll_clk);
-DEFINE_CLOCK(mstick2_clk, 1, MXC_CCM_CGR1, 4, mstick2_get_rate, NULL, &usb_pll_clk);
-DEFINE_CLOCK1(csi_clk, 0, MXC_CCM_CGR1, 6, csi, NULL, &serial_pll_clk);
-DEFINE_CLOCK(rtc_clk, 0, MXC_CCM_CGR1, 8, NULL, NULL, &ckil_clk);
-DEFINE_CLOCK(wdog_clk, 0, MXC_CCM_CGR1, 10, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(pwm_clk, 0, MXC_CCM_CGR1, 12, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(usb_clk2, 0, MXC_CCM_CGR1, 18, usb_get_rate, NULL, &ahb_clk);
-DEFINE_CLOCK(kpp_clk, 0, MXC_CCM_CGR1, 20, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(ipu_clk, 0, MXC_CCM_CGR1, 22, hsp_get_rate, NULL, &mcu_main_clk);
-DEFINE_CLOCK(uart3_clk, 2, MXC_CCM_CGR1, 24, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(uart4_clk, 3, MXC_CCM_CGR1, 26, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(uart5_clk, 4, MXC_CCM_CGR1, 28, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(owire_clk, 0, MXC_CCM_CGR1, 30, NULL, NULL, &perclk_clk);
-
-DEFINE_CLOCK(ssi2_clk, 1, MXC_CCM_CGR2, 0, ssi2_get_rate, NULL, &serial_pll_clk);
-DEFINE_CLOCK(cspi1_clk, 0, MXC_CCM_CGR2, 2, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(cspi2_clk, 1, MXC_CCM_CGR2, 4, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(mbx_clk, 0, MXC_CCM_CGR2, 6, mbx_get_rate, NULL, &ahb_clk);
-DEFINE_CLOCK(emi_clk, 0, MXC_CCM_CGR2, 8, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(rtic_clk, 0, MXC_CCM_CGR2, 10, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK1(firi_clk, 0, MXC_CCM_CGR2, 12, firi, NULL, &usb_pll_clk);
-
-DEFINE_CLOCK(sdma_clk2, 0, NULL, 0, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(usb_clk1, 0, NULL, 0, usb_get_rate, NULL, &usb_pll_clk);
-DEFINE_CLOCK(nfc_clk, 0, NULL, 0, nfc_get_rate, NULL, &ahb_clk);
-DEFINE_CLOCK(scc_clk, 0, NULL, 0, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(ipg_clk, 0, NULL, 0, ipg_get_rate, NULL, &ahb_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-
-static struct clk_lookup lookups[] = {
- _REGISTER_CLOCK(NULL, "emi", emi_clk)
- _REGISTER_CLOCK("imx31-cspi.0", NULL, cspi1_clk)
- _REGISTER_CLOCK("imx31-cspi.1", NULL, cspi2_clk)
- _REGISTER_CLOCK("imx31-cspi.2", NULL, cspi3_clk)
- _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
- _REGISTER_CLOCK(NULL, "pwm", pwm_clk)
- _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
- _REGISTER_CLOCK(NULL, "rtc", rtc_clk)
- _REGISTER_CLOCK(NULL, "epit", epit1_clk)
- _REGISTER_CLOCK(NULL, "epit", epit2_clk)
- _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
- _REGISTER_CLOCK("ipu-core", NULL, ipu_clk)
- _REGISTER_CLOCK("mx3_sdc_fb", NULL, ipu_clk)
- _REGISTER_CLOCK(NULL, "kpp", kpp_clk)
- _REGISTER_CLOCK("mxc-ehci.0", "usb", usb_clk1)
- _REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_clk2)
- _REGISTER_CLOCK("mxc-ehci.1", "usb", usb_clk1)
- _REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_clk2)
- _REGISTER_CLOCK("mxc-ehci.2", "usb", usb_clk1)
- _REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_clk2)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb", usb_clk1)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", usb_clk2)
- _REGISTER_CLOCK("mx3-camera.0", NULL, csi_clk)
- /* i.mx31 has the i.mx21 type uart */
- _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
- _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
- _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
- _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
- _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
- _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
- _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
- _REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_clk)
- _REGISTER_CLOCK("mxc_w1.0", NULL, owire_clk)
- _REGISTER_CLOCK("mxc-mmc.0", NULL, sdhc1_clk)
- _REGISTER_CLOCK("mxc-mmc.1", NULL, sdhc2_clk)
- _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
- _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
- _REGISTER_CLOCK(NULL, "firi", firi_clk)
- _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
- _REGISTER_CLOCK(NULL, "rtic", rtic_clk)
- _REGISTER_CLOCK(NULL, "rng", rng_clk)
- _REGISTER_CLOCK("imx31-sdma", NULL, sdma_clk1)
- _REGISTER_CLOCK(NULL, "sdma_ipg", sdma_clk2)
- _REGISTER_CLOCK(NULL, "mstick", mstick1_clk)
- _REGISTER_CLOCK(NULL, "mstick", mstick2_clk)
- _REGISTER_CLOCK(NULL, "scc", scc_clk)
- _REGISTER_CLOCK(NULL, "iim", iim_clk)
- _REGISTER_CLOCK(NULL, "mpeg4", mpeg4_clk)
- _REGISTER_CLOCK(NULL, "mbx", mbx_clk)
-};
-
-int __init mx31_clocks_init(unsigned long fref)
-{
- u32 reg;
-
- ckih_rate = fref;
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- /* change the csi_clk parent if necessary */
- reg = __raw_readl(MXC_CCM_CCMR);
- if (!(reg & MXC_CCM_CCMR_CSCS))
- if (clk_set_parent(&csi_clk, &usb_pll_clk))
- pr_err("%s: error changing csi_clk parent\n", __func__);
-
-
- /* Turn off all possible clocks */
- __raw_writel((3 << 4), MXC_CCM_CGR0);
- __raw_writel(0, MXC_CCM_CGR1);
- __raw_writel((3 << 8) | (3 << 14) | (3 << 16)|
- 1 << 27 | 1 << 28, /* Bit 27 and 28 are not defined for
- MX32, but still required to be set */
- MXC_CCM_CGR2);
-
- /*
- * Before turning off usb_pll make sure ipg_per_clk is generated
- * by ipg_clk and not usb_pll.
- */
- __raw_writel(__raw_readl(MXC_CCM_CCMR) | (1 << 24), MXC_CCM_CCMR);
-
- usb_pll_disable(&usb_pll_clk);
-
- pr_info("Clock input source is %ld\n", clk_get_rate(&ckih_clk));
-
- clk_enable(&gpt_clk);
- clk_enable(&emi_clk);
- clk_enable(&iim_clk);
- mx31_revision();
- clk_disable(&iim_clk);
-
- clk_enable(&serial_pll_clk);
-
- if (mx31_revision() >= IMX_CHIP_REVISION_2_0) {
- reg = __raw_readl(MXC_CCM_PMCR1);
- /* No PLL restart on DVFS switch; enable auto EMI handshake */
- reg |= MXC_CCM_PMCR1_PLLRDIS | MXC_CCM_PMCR1_EMIRQ_EN;
- __raw_writel(reg, MXC_CCM_PMCR1);
- }
-
- mxc_timer_init(&ipg_clk, MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR),
- MX31_INT_GPT);
-
- return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx35.c b/arch/arm/mach-imx/clock-imx35.c
deleted file mode 100644
index e56c1a83eee3..000000000000
--- a/arch/arm/mach-imx/clock-imx35.c
+++ /dev/null
@@ -1,536 +0,0 @@
-/*
- * Copyright (C) 2009 by Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-
-#include "crmregs-imx3.h"
-
-#ifdef HAVE_SET_RATE_SUPPORT
-static void calc_dividers(u32 div, u32 *pre, u32 *post, u32 maxpost)
-{
- u32 min_pre, temp_pre, old_err, err;
-
- min_pre = (div - 1) / maxpost + 1;
- old_err = 8;
-
- for (temp_pre = 8; temp_pre >= min_pre; temp_pre--) {
- if (div > (temp_pre * maxpost))
- break;
-
- if (div < (temp_pre * temp_pre))
- continue;
-
- err = div % temp_pre;
-
- if (err == 0) {
- *pre = temp_pre;
- break;
- }
-
- err = temp_pre - err;
-
- if (err < old_err) {
- old_err = err;
- *pre = temp_pre;
- }
- }
-
- *post = (div + *pre - 1) / *pre;
-}
-
-/* get the best values for a 3-bit divider combined with a 6-bit divider */
-static void calc_dividers_3_6(u32 div, u32 *pre, u32 *post)
-{
- if (div >= 512) {
- *pre = 8;
- *post = 64;
- } else if (div >= 64) {
- calc_dividers(div, pre, post, 64);
- } else if (div <= 8) {
- *pre = div;
- *post = 1;
- } else {
- *pre = 1;
- *post = div;
- }
-}
-
-/* get the best values for two cascaded 3-bit dividers */
-static void calc_dividers_3_3(u32 div, u32 *pre, u32 *post)
-{
- if (div >= 64) {
- *pre = *post = 8;
- } else if (div > 8) {
- calc_dividers(div, pre, post, 8);
- } else {
- *pre = 1;
- *post = div;
- }
-}
-#endif
-
-static unsigned long get_rate_mpll(void)
-{
- ulong mpctl = __raw_readl(MX35_CCM_MPCTL);
-
- return mxc_decode_pll(mpctl, 24000000);
-}
-
-static unsigned long get_rate_ppll(void)
-{
- ulong ppctl = __raw_readl(MX35_CCM_PPCTL);
-
- return mxc_decode_pll(ppctl, 24000000);
-}
-
-struct arm_ahb_div {
- unsigned char arm, ahb, sel;
-};
-
-static struct arm_ahb_div clk_consumer[] = {
- { .arm = 1, .ahb = 4, .sel = 0},
- { .arm = 1, .ahb = 3, .sel = 1},
- { .arm = 2, .ahb = 2, .sel = 0},
- { .arm = 0, .ahb = 0, .sel = 0},
- { .arm = 0, .ahb = 0, .sel = 0},
- { .arm = 0, .ahb = 0, .sel = 0},
- { .arm = 4, .ahb = 1, .sel = 0},
- { .arm = 1, .ahb = 5, .sel = 0},
- { .arm = 1, .ahb = 8, .sel = 0},
- { .arm = 1, .ahb = 6, .sel = 1},
- { .arm = 2, .ahb = 4, .sel = 0},
- { .arm = 0, .ahb = 0, .sel = 0},
- { .arm = 0, .ahb = 0, .sel = 0},
- { .arm = 0, .ahb = 0, .sel = 0},
- { .arm = 4, .ahb = 2, .sel = 0},
- { .arm = 0, .ahb = 0, .sel = 0},
-};
-
-static unsigned long get_rate_arm(void)
-{
- unsigned long pdr0 = __raw_readl(MXC_CCM_PDR0);
- struct arm_ahb_div *aad;
- unsigned long fref = get_rate_mpll();
-
- aad = &clk_consumer[(pdr0 >> 16) & 0xf];
- if (aad->sel)
- fref = fref * 3 / 4;
-
- return fref / aad->arm;
-}
-
-static unsigned long get_rate_ahb(struct clk *clk)
-{
- unsigned long pdr0 = __raw_readl(MXC_CCM_PDR0);
- struct arm_ahb_div *aad;
- unsigned long fref = get_rate_arm();
-
- aad = &clk_consumer[(pdr0 >> 16) & 0xf];
-
- return fref / aad->ahb;
-}
-
-static unsigned long get_rate_ipg(struct clk *clk)
-{
- return get_rate_ahb(NULL) >> 1;
-}
-
-static unsigned long get_rate_uart(struct clk *clk)
-{
- unsigned long pdr3 = __raw_readl(MX35_CCM_PDR3);
- unsigned long pdr4 = __raw_readl(MX35_CCM_PDR4);
- unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
-
- if (pdr3 & (1 << 14))
- return get_rate_arm() / div;
- else
- return get_rate_ppll() / div;
-}
-
-static unsigned long get_rate_sdhc(struct clk *clk)
-{
- unsigned long pdr3 = __raw_readl(MX35_CCM_PDR3);
- unsigned long div, rate;
-
- if (pdr3 & (1 << 6))
- rate = get_rate_arm();
- else
- rate = get_rate_ppll();
-
- switch (clk->id) {
- default:
- case 0:
- div = pdr3 & 0x3f;
- break;
- case 1:
- div = (pdr3 >> 8) & 0x3f;
- break;
- case 2:
- div = (pdr3 >> 16) & 0x3f;
- break;
- }
-
- return rate / (div + 1);
-}
-
-static unsigned long get_rate_mshc(struct clk *clk)
-{
- unsigned long pdr1 = __raw_readl(MXC_CCM_PDR1);
- unsigned long div1, div2, rate;
-
- if (pdr1 & (1 << 7))
- rate = get_rate_arm();
- else
- rate = get_rate_ppll();
-
- div1 = (pdr1 >> 29) & 0x7;
- div2 = (pdr1 >> 22) & 0x3f;
-
- return rate / ((div1 + 1) * (div2 + 1));
-}
-
-static unsigned long get_rate_ssi(struct clk *clk)
-{
- unsigned long pdr2 = __raw_readl(MX35_CCM_PDR2);
- unsigned long div1, div2, rate;
-
- if (pdr2 & (1 << 6))
- rate = get_rate_arm();
- else
- rate = get_rate_ppll();
-
- switch (clk->id) {
- default:
- case 0:
- div1 = pdr2 & 0x3f;
- div2 = (pdr2 >> 24) & 0x7;
- break;
- case 1:
- div1 = (pdr2 >> 8) & 0x3f;
- div2 = (pdr2 >> 27) & 0x7;
- break;
- }
-
- return rate / ((div1 + 1) * (div2 + 1));
-}
-
-static unsigned long get_rate_csi(struct clk *clk)
-{
- unsigned long pdr2 = __raw_readl(MX35_CCM_PDR2);
- unsigned long rate;
-
- if (pdr2 & (1 << 7))
- rate = get_rate_arm();
- else
- rate = get_rate_ppll();
-
- return rate / (((pdr2 >> 16) & 0x3f) + 1);
-}
-
-static unsigned long get_rate_otg(struct clk *clk)
-{
- unsigned long pdr4 = __raw_readl(MX35_CCM_PDR4);
- unsigned long rate;
-
- if (pdr4 & (1 << 9))
- rate = get_rate_arm();
- else
- rate = get_rate_ppll();
-
- return rate / (((pdr4 >> 22) & 0x3f) + 1);
-}
-
-static unsigned long get_rate_ipg_per(struct clk *clk)
-{
- unsigned long pdr0 = __raw_readl(MXC_CCM_PDR0);
- unsigned long pdr4 = __raw_readl(MX35_CCM_PDR4);
- unsigned long div;
-
- if (pdr0 & (1 << 26)) {
- div = (pdr4 >> 16) & 0x3f;
- return get_rate_arm() / (div + 1);
- } else {
- div = (pdr0 >> 12) & 0x7;
- return get_rate_ahb(NULL) / (div + 1);
- }
-}
-
-static unsigned long get_rate_hsp(struct clk *clk)
-{
- unsigned long hsp_podf = (__raw_readl(MXC_CCM_PDR0) >> 20) & 0x03;
- unsigned long fref = get_rate_mpll();
-
- if (fref > 400 * 1000 * 1000) {
- switch (hsp_podf) {
- case 0:
- return fref >> 2;
- case 1:
- return fref >> 3;
- case 2:
- return fref / 3;
- }
- } else {
- switch (hsp_podf) {
- case 0:
- case 2:
- return fref / 3;
- case 1:
- return fref / 6;
- }
- }
-
- return 0;
-}
-
-static int clk_cgr_enable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(clk->enable_reg);
- reg |= 3 << clk->enable_shift;
- __raw_writel(reg, clk->enable_reg);
-
- return 0;
-}
-
-static void clk_cgr_disable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(clk->enable_reg);
- reg &= ~(3 << clk->enable_shift);
- __raw_writel(reg, clk->enable_reg);
-}
-
-#define DEFINE_CLOCK(name, i, er, es, gr, sr) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = er, \
- .enable_shift = es, \
- .get_rate = gr, \
- .set_rate = sr, \
- .enable = clk_cgr_enable, \
- .disable = clk_cgr_disable, \
- }
-
-DEFINE_CLOCK(asrc_clk, 0, MX35_CCM_CGR0, 0, NULL, NULL);
-DEFINE_CLOCK(pata_clk, 0, MX35_CCM_CGR0, 2, get_rate_ipg, NULL);
-/* DEFINE_CLOCK(audmux_clk, 0, MX35_CCM_CGR0, 4, NULL, NULL); */
-DEFINE_CLOCK(can1_clk, 0, MX35_CCM_CGR0, 6, get_rate_ipg, NULL);
-DEFINE_CLOCK(can2_clk, 1, MX35_CCM_CGR0, 8, get_rate_ipg, NULL);
-DEFINE_CLOCK(cspi1_clk, 0, MX35_CCM_CGR0, 10, get_rate_ipg, NULL);
-DEFINE_CLOCK(cspi2_clk, 1, MX35_CCM_CGR0, 12, get_rate_ipg, NULL);
-DEFINE_CLOCK(ect_clk, 0, MX35_CCM_CGR0, 14, get_rate_ipg, NULL);
-DEFINE_CLOCK(edio_clk, 0, MX35_CCM_CGR0, 16, NULL, NULL);
-DEFINE_CLOCK(emi_clk, 0, MX35_CCM_CGR0, 18, get_rate_ipg, NULL);
-DEFINE_CLOCK(epit1_clk, 0, MX35_CCM_CGR0, 20, get_rate_ipg, NULL);
-DEFINE_CLOCK(epit2_clk, 1, MX35_CCM_CGR0, 22, get_rate_ipg, NULL);
-DEFINE_CLOCK(esai_clk, 0, MX35_CCM_CGR0, 24, NULL, NULL);
-DEFINE_CLOCK(esdhc1_clk, 0, MX35_CCM_CGR0, 26, get_rate_sdhc, NULL);
-DEFINE_CLOCK(esdhc2_clk, 1, MX35_CCM_CGR0, 28, get_rate_sdhc, NULL);
-DEFINE_CLOCK(esdhc3_clk, 2, MX35_CCM_CGR0, 30, get_rate_sdhc, NULL);
-
-DEFINE_CLOCK(fec_clk, 0, MX35_CCM_CGR1, 0, get_rate_ipg, NULL);
-DEFINE_CLOCK(gpio1_clk, 0, MX35_CCM_CGR1, 2, NULL, NULL);
-DEFINE_CLOCK(gpio2_clk, 1, MX35_CCM_CGR1, 4, NULL, NULL);
-DEFINE_CLOCK(gpio3_clk, 2, MX35_CCM_CGR1, 6, NULL, NULL);
-DEFINE_CLOCK(gpt_clk, 0, MX35_CCM_CGR1, 8, get_rate_ipg, NULL);
-DEFINE_CLOCK(i2c1_clk, 0, MX35_CCM_CGR1, 10, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(i2c2_clk, 1, MX35_CCM_CGR1, 12, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(i2c3_clk, 2, MX35_CCM_CGR1, 14, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(iomuxc_clk, 0, MX35_CCM_CGR1, 16, NULL, NULL);
-DEFINE_CLOCK(ipu_clk, 0, MX35_CCM_CGR1, 18, get_rate_hsp, NULL);
-DEFINE_CLOCK(kpp_clk, 0, MX35_CCM_CGR1, 20, get_rate_ipg, NULL);
-DEFINE_CLOCK(mlb_clk, 0, MX35_CCM_CGR1, 22, get_rate_ahb, NULL);
-DEFINE_CLOCK(mshc_clk, 0, MX35_CCM_CGR1, 24, get_rate_mshc, NULL);
-DEFINE_CLOCK(owire_clk, 0, MX35_CCM_CGR1, 26, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(pwm_clk, 0, MX35_CCM_CGR1, 28, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(rngc_clk, 0, MX35_CCM_CGR1, 30, get_rate_ipg, NULL);
-
-DEFINE_CLOCK(rtc_clk, 0, MX35_CCM_CGR2, 0, get_rate_ipg, NULL);
-DEFINE_CLOCK(rtic_clk, 0, MX35_CCM_CGR2, 2, get_rate_ahb, NULL);
-DEFINE_CLOCK(scc_clk, 0, MX35_CCM_CGR2, 4, get_rate_ipg, NULL);
-DEFINE_CLOCK(sdma_clk, 0, MX35_CCM_CGR2, 6, NULL, NULL);
-DEFINE_CLOCK(spba_clk, 0, MX35_CCM_CGR2, 8, get_rate_ipg, NULL);
-DEFINE_CLOCK(spdif_clk, 0, MX35_CCM_CGR2, 10, NULL, NULL);
-DEFINE_CLOCK(ssi1_clk, 0, MX35_CCM_CGR2, 12, get_rate_ssi, NULL);
-DEFINE_CLOCK(ssi2_clk, 1, MX35_CCM_CGR2, 14, get_rate_ssi, NULL);
-DEFINE_CLOCK(uart1_clk, 0, MX35_CCM_CGR2, 16, get_rate_uart, NULL);
-DEFINE_CLOCK(uart2_clk, 1, MX35_CCM_CGR2, 18, get_rate_uart, NULL);
-DEFINE_CLOCK(uart3_clk, 2, MX35_CCM_CGR2, 20, get_rate_uart, NULL);
-DEFINE_CLOCK(usbotg_clk, 0, MX35_CCM_CGR2, 22, get_rate_otg, NULL);
-DEFINE_CLOCK(wdog_clk, 0, MX35_CCM_CGR2, 24, NULL, NULL);
-DEFINE_CLOCK(max_clk, 0, MX35_CCM_CGR2, 26, NULL, NULL);
-DEFINE_CLOCK(audmux_clk, 0, MX35_CCM_CGR2, 30, NULL, NULL);
-
-DEFINE_CLOCK(csi_clk, 0, MX35_CCM_CGR3, 0, get_rate_csi, NULL);
-DEFINE_CLOCK(iim_clk, 0, MX35_CCM_CGR3, 2, NULL, NULL);
-DEFINE_CLOCK(gpu2d_clk, 0, MX35_CCM_CGR3, 4, NULL, NULL);
-
-DEFINE_CLOCK(usbahb_clk, 0, 0, 0, get_rate_ahb, NULL);
-
-static int clk_dummy_enable(struct clk *clk)
-{
- return 0;
-}
-
-static void clk_dummy_disable(struct clk *clk)
-{
-}
-
-static unsigned long get_rate_nfc(struct clk *clk)
-{
- unsigned long div1;
-
- div1 = (__raw_readl(MX35_CCM_PDR4) >> 28) + 1;
-
- return get_rate_ahb(NULL) / div1;
-}
-
-/* NAND Controller: It seems it can't be disabled */
-static struct clk nfc_clk = {
- .id = 0,
- .enable_reg = 0,
- .enable_shift = 0,
- .get_rate = get_rate_nfc,
- .set_rate = NULL, /* set_rate_nfc, */
- .enable = clk_dummy_enable,
- .disable = clk_dummy_disable
-};
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-
-static struct clk_lookup lookups[] = {
- _REGISTER_CLOCK(NULL, "asrc", asrc_clk)
- _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
- _REGISTER_CLOCK("flexcan.0", NULL, can1_clk)
- _REGISTER_CLOCK("flexcan.1", NULL, can2_clk)
- _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk)
- _REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk)
- _REGISTER_CLOCK(NULL, "ect", ect_clk)
- _REGISTER_CLOCK(NULL, "edio", edio_clk)
- _REGISTER_CLOCK(NULL, "emi", emi_clk)
- _REGISTER_CLOCK("imx-epit.0", NULL, epit1_clk)
- _REGISTER_CLOCK("imx-epit.1", NULL, epit2_clk)
- _REGISTER_CLOCK(NULL, "esai", esai_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx35.0", NULL, esdhc1_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx35.1", NULL, esdhc2_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx35.2", NULL, esdhc3_clk)
- /* i.mx35 has the i.mx27 type fec */
- _REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
- _REGISTER_CLOCK(NULL, "gpio", gpio1_clk)
- _REGISTER_CLOCK(NULL, "gpio", gpio2_clk)
- _REGISTER_CLOCK(NULL, "gpio", gpio3_clk)
- _REGISTER_CLOCK("gpt.0", NULL, gpt_clk)
- _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
- _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
- _REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_clk)
- _REGISTER_CLOCK(NULL, "iomuxc", iomuxc_clk)
- _REGISTER_CLOCK("ipu-core", NULL, ipu_clk)
- _REGISTER_CLOCK("mx3_sdc_fb", NULL, ipu_clk)
- _REGISTER_CLOCK(NULL, "kpp", kpp_clk)
- _REGISTER_CLOCK(NULL, "mlb", mlb_clk)
- _REGISTER_CLOCK(NULL, "mshc", mshc_clk)
- _REGISTER_CLOCK("mxc_w1", NULL, owire_clk)
- _REGISTER_CLOCK(NULL, "pwm", pwm_clk)
- _REGISTER_CLOCK(NULL, "rngc", rngc_clk)
- _REGISTER_CLOCK(NULL, "rtc", rtc_clk)
- _REGISTER_CLOCK(NULL, "rtic", rtic_clk)
- _REGISTER_CLOCK(NULL, "scc", scc_clk)
- _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
- _REGISTER_CLOCK(NULL, "spba", spba_clk)
- _REGISTER_CLOCK(NULL, "spdif", spdif_clk)
- _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
- _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
- /* i.mx35 has the i.mx21 type uart */
- _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
- _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
- _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
- _REGISTER_CLOCK("mxc-ehci.0", "usb", usbotg_clk)
- _REGISTER_CLOCK("mxc-ehci.1", "usb", usbotg_clk)
- _REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", usbahb_clk)
- _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
- _REGISTER_CLOCK(NULL, "max", max_clk)
- _REGISTER_CLOCK(NULL, "audmux", audmux_clk)
- _REGISTER_CLOCK("mx3-camera.0", NULL, csi_clk)
- _REGISTER_CLOCK(NULL, "iim", iim_clk)
- _REGISTER_CLOCK(NULL, "gpu2d", gpu2d_clk)
- _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
-};
-
-int __init mx35_clocks_init()
-{
- unsigned int cgr2 = 3 << 26;
-
-#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
- cgr2 |= 3 << 16;
-#endif
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- /* Turn off all clocks except the ones we need to survive, namely:
- * EMI, GPIO1/2/3, GPT, IOMUX, MAX and eventually uart
- */
- __raw_writel((3 << 18), MX35_CCM_CGR0);
- __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
- MX35_CCM_CGR1);
- __raw_writel(cgr2, MX35_CCM_CGR2);
- __raw_writel(0, MX35_CCM_CGR3);
-
- clk_enable(&iim_clk);
- imx_print_silicon_rev("i.MX35", mx35_revision());
- clk_disable(&iim_clk);
-
- /*
- * Check if we came up in internal boot mode. If yes, we need some
- * extra clocks turned on, otherwise the MX35 boot ROM code will
- * hang after a watchdog reset.
- */
- if (!(__raw_readl(MX35_CCM_RCSR) & (3 << 10))) {
- /* Additionally turn on UART1, SCC, and IIM clocks */
- clk_enable(&iim_clk);
- clk_enable(&uart1_clk);
- clk_enable(&scc_clk);
- }
-
-#ifdef CONFIG_MXC_USE_EPIT
- epit_timer_init(&epit1_clk,
- MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
-#else
- mxc_timer_init(&gpt_clk,
- MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
-#endif
-
- return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx6q.c b/arch/arm/mach-imx/clock-imx6q.c
deleted file mode 100644
index 111c328f5420..000000000000
--- a/arch/arm/mach-imx/clock-imx6q.c
+++ /dev/null
@@ -1,2111 +0,0 @@
-/*
- * Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2011 Linaro Ltd.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <asm/div64.h>
-#include <asm/mach/map.h>
-#include <mach/clock.h>
-#include <mach/common.h>
-#include <mach/hardware.h>
-
-#define PLL_BASE IMX_IO_ADDRESS(MX6Q_ANATOP_BASE_ADDR)
-#define PLL1_SYS (PLL_BASE + 0x000)
-#define PLL2_BUS (PLL_BASE + 0x030)
-#define PLL3_USB_OTG (PLL_BASE + 0x010)
-#define PLL4_AUDIO (PLL_BASE + 0x070)
-#define PLL5_VIDEO (PLL_BASE + 0x0a0)
-#define PLL6_MLB (PLL_BASE + 0x0d0)
-#define PLL7_USB_HOST (PLL_BASE + 0x020)
-#define PLL8_ENET (PLL_BASE + 0x0e0)
-#define PFD_480 (PLL_BASE + 0x0f0)
-#define PFD_528 (PLL_BASE + 0x100)
-#define PLL_NUM_OFFSET 0x010
-#define PLL_DENOM_OFFSET 0x020
-
-#define PFD0 7
-#define PFD1 15
-#define PFD2 23
-#define PFD3 31
-#define PFD_FRAC_MASK 0x3f
-
-#define BM_PLL_BYPASS (0x1 << 16)
-#define BM_PLL_ENABLE (0x1 << 13)
-#define BM_PLL_POWER_DOWN (0x1 << 12)
-#define BM_PLL_LOCK (0x1 << 31)
-#define BP_PLL_SYS_DIV_SELECT 0
-#define BM_PLL_SYS_DIV_SELECT (0x7f << 0)
-#define BP_PLL_BUS_DIV_SELECT 0
-#define BM_PLL_BUS_DIV_SELECT (0x1 << 0)
-#define BP_PLL_USB_DIV_SELECT 0
-#define BM_PLL_USB_DIV_SELECT (0x3 << 0)
-#define BP_PLL_AV_DIV_SELECT 0
-#define BM_PLL_AV_DIV_SELECT (0x7f << 0)
-#define BP_PLL_ENET_DIV_SELECT 0
-#define BM_PLL_ENET_DIV_SELECT (0x3 << 0)
-#define BM_PLL_ENET_EN_PCIE (0x1 << 19)
-#define BM_PLL_ENET_EN_SATA (0x1 << 20)
-
-#define CCM_BASE IMX_IO_ADDRESS(MX6Q_CCM_BASE_ADDR)
-#define CCR (CCM_BASE + 0x00)
-#define CCDR (CCM_BASE + 0x04)
-#define CSR (CCM_BASE + 0x08)
-#define CCSR (CCM_BASE + 0x0c)
-#define CACRR (CCM_BASE + 0x10)
-#define CBCDR (CCM_BASE + 0x14)
-#define CBCMR (CCM_BASE + 0x18)
-#define CSCMR1 (CCM_BASE + 0x1c)
-#define CSCMR2 (CCM_BASE + 0x20)
-#define CSCDR1 (CCM_BASE + 0x24)
-#define CS1CDR (CCM_BASE + 0x28)
-#define CS2CDR (CCM_BASE + 0x2c)
-#define CDCDR (CCM_BASE + 0x30)
-#define CHSCCDR (CCM_BASE + 0x34)
-#define CSCDR2 (CCM_BASE + 0x38)
-#define CSCDR3 (CCM_BASE + 0x3c)
-#define CSCDR4 (CCM_BASE + 0x40)
-#define CWDR (CCM_BASE + 0x44)
-#define CDHIPR (CCM_BASE + 0x48)
-#define CDCR (CCM_BASE + 0x4c)
-#define CTOR (CCM_BASE + 0x50)
-#define CLPCR (CCM_BASE + 0x54)
-#define CISR (CCM_BASE + 0x58)
-#define CIMR (CCM_BASE + 0x5c)
-#define CCOSR (CCM_BASE + 0x60)
-#define CGPR (CCM_BASE + 0x64)
-#define CCGR0 (CCM_BASE + 0x68)
-#define CCGR1 (CCM_BASE + 0x6c)
-#define CCGR2 (CCM_BASE + 0x70)
-#define CCGR3 (CCM_BASE + 0x74)
-#define CCGR4 (CCM_BASE + 0x78)
-#define CCGR5 (CCM_BASE + 0x7c)
-#define CCGR6 (CCM_BASE + 0x80)
-#define CCGR7 (CCM_BASE + 0x84)
-#define CMEOR (CCM_BASE + 0x88)
-
-#define CG0 0
-#define CG1 2
-#define CG2 4
-#define CG3 6
-#define CG4 8
-#define CG5 10
-#define CG6 12
-#define CG7 14
-#define CG8 16
-#define CG9 18
-#define CG10 20
-#define CG11 22
-#define CG12 24
-#define CG13 26
-#define CG14 28
-#define CG15 30
-
-#define BM_CCSR_PLL1_SW_SEL (0x1 << 2)
-#define BM_CCSR_STEP_SEL (0x1 << 8)
-
-#define BP_CACRR_ARM_PODF 0
-#define BM_CACRR_ARM_PODF (0x7 << 0)
-
-#define BP_CBCDR_PERIPH2_CLK2_PODF 0
-#define BM_CBCDR_PERIPH2_CLK2_PODF (0x7 << 0)
-#define BP_CBCDR_MMDC_CH1_AXI_PODF 3
-#define BM_CBCDR_MMDC_CH1_AXI_PODF (0x7 << 3)
-#define BP_CBCDR_AXI_SEL 6
-#define BM_CBCDR_AXI_SEL (0x3 << 6)
-#define BP_CBCDR_IPG_PODF 8
-#define BM_CBCDR_IPG_PODF (0x3 << 8)
-#define BP_CBCDR_AHB_PODF 10
-#define BM_CBCDR_AHB_PODF (0x7 << 10)
-#define BP_CBCDR_AXI_PODF 16
-#define BM_CBCDR_AXI_PODF (0x7 << 16)
-#define BP_CBCDR_MMDC_CH0_AXI_PODF 19
-#define BM_CBCDR_MMDC_CH0_AXI_PODF (0x7 << 19)
-#define BP_CBCDR_PERIPH_CLK_SEL 25
-#define BM_CBCDR_PERIPH_CLK_SEL (0x1 << 25)
-#define BP_CBCDR_PERIPH2_CLK_SEL 26
-#define BM_CBCDR_PERIPH2_CLK_SEL (0x1 << 26)
-#define BP_CBCDR_PERIPH_CLK2_PODF 27
-#define BM_CBCDR_PERIPH_CLK2_PODF (0x7 << 27)
-
-#define BP_CBCMR_GPU2D_AXI_SEL 0
-#define BM_CBCMR_GPU2D_AXI_SEL (0x1 << 0)
-#define BP_CBCMR_GPU3D_AXI_SEL 1
-#define BM_CBCMR_GPU3D_AXI_SEL (0x1 << 1)
-#define BP_CBCMR_GPU3D_CORE_SEL 4
-#define BM_CBCMR_GPU3D_CORE_SEL (0x3 << 4)
-#define BP_CBCMR_GPU3D_SHADER_SEL 8
-#define BM_CBCMR_GPU3D_SHADER_SEL (0x3 << 8)
-#define BP_CBCMR_PCIE_AXI_SEL 10
-#define BM_CBCMR_PCIE_AXI_SEL (0x1 << 10)
-#define BP_CBCMR_VDO_AXI_SEL 11
-#define BM_CBCMR_VDO_AXI_SEL (0x1 << 11)
-#define BP_CBCMR_PERIPH_CLK2_SEL 12
-#define BM_CBCMR_PERIPH_CLK2_SEL (0x3 << 12)
-#define BP_CBCMR_VPU_AXI_SEL 14
-#define BM_CBCMR_VPU_AXI_SEL (0x3 << 14)
-#define BP_CBCMR_GPU2D_CORE_SEL 16
-#define BM_CBCMR_GPU2D_CORE_SEL (0x3 << 16)
-#define BP_CBCMR_PRE_PERIPH_CLK_SEL 18
-#define BM_CBCMR_PRE_PERIPH_CLK_SEL (0x3 << 18)
-#define BP_CBCMR_PERIPH2_CLK2_SEL 20
-#define BM_CBCMR_PERIPH2_CLK2_SEL (0x1 << 20)
-#define BP_CBCMR_PRE_PERIPH2_CLK_SEL 21
-#define BM_CBCMR_PRE_PERIPH2_CLK_SEL (0x3 << 21)
-#define BP_CBCMR_GPU2D_CORE_PODF 23
-#define BM_CBCMR_GPU2D_CORE_PODF (0x7 << 23)
-#define BP_CBCMR_GPU3D_CORE_PODF 26
-#define BM_CBCMR_GPU3D_CORE_PODF (0x7 << 26)
-#define BP_CBCMR_GPU3D_SHADER_PODF 29
-#define BM_CBCMR_GPU3D_SHADER_PODF (0x7 << 29)
-
-#define BP_CSCMR1_PERCLK_PODF 0
-#define BM_CSCMR1_PERCLK_PODF (0x3f << 0)
-#define BP_CSCMR1_SSI1_SEL 10
-#define BM_CSCMR1_SSI1_SEL (0x3 << 10)
-#define BP_CSCMR1_SSI2_SEL 12
-#define BM_CSCMR1_SSI2_SEL (0x3 << 12)
-#define BP_CSCMR1_SSI3_SEL 14
-#define BM_CSCMR1_SSI3_SEL (0x3 << 14)
-#define BP_CSCMR1_USDHC1_SEL 16
-#define BM_CSCMR1_USDHC1_SEL (0x1 << 16)
-#define BP_CSCMR1_USDHC2_SEL 17
-#define BM_CSCMR1_USDHC2_SEL (0x1 << 17)
-#define BP_CSCMR1_USDHC3_SEL 18
-#define BM_CSCMR1_USDHC3_SEL (0x1 << 18)
-#define BP_CSCMR1_USDHC4_SEL 19
-#define BM_CSCMR1_USDHC4_SEL (0x1 << 19)
-#define BP_CSCMR1_EMI_PODF 20
-#define BM_CSCMR1_EMI_PODF (0x7 << 20)
-#define BP_CSCMR1_EMI_SLOW_PODF 23
-#define BM_CSCMR1_EMI_SLOW_PODF (0x7 << 23)
-#define BP_CSCMR1_EMI_SEL 27
-#define BM_CSCMR1_EMI_SEL (0x3 << 27)
-#define BP_CSCMR1_EMI_SLOW_SEL 29
-#define BM_CSCMR1_EMI_SLOW_SEL (0x3 << 29)
-
-#define BP_CSCMR2_CAN_PODF 2
-#define BM_CSCMR2_CAN_PODF (0x3f << 2)
-#define BM_CSCMR2_LDB_DI0_IPU_DIV (0x1 << 10)
-#define BM_CSCMR2_LDB_DI1_IPU_DIV (0x1 << 11)
-#define BP_CSCMR2_ESAI_SEL 19
-#define BM_CSCMR2_ESAI_SEL (0x3 << 19)
-
-#define BP_CSCDR1_UART_PODF 0
-#define BM_CSCDR1_UART_PODF (0x3f << 0)
-#define BP_CSCDR1_USDHC1_PODF 11
-#define BM_CSCDR1_USDHC1_PODF (0x7 << 11)
-#define BP_CSCDR1_USDHC2_PODF 16
-#define BM_CSCDR1_USDHC2_PODF (0x7 << 16)
-#define BP_CSCDR1_USDHC3_PODF 19
-#define BM_CSCDR1_USDHC3_PODF (0x7 << 19)
-#define BP_CSCDR1_USDHC4_PODF 22
-#define BM_CSCDR1_USDHC4_PODF (0x7 << 22)
-#define BP_CSCDR1_VPU_AXI_PODF 25
-#define BM_CSCDR1_VPU_AXI_PODF (0x7 << 25)
-
-#define BP_CS1CDR_SSI1_PODF 0
-#define BM_CS1CDR_SSI1_PODF (0x3f << 0)
-#define BP_CS1CDR_SSI1_PRED 6
-#define BM_CS1CDR_SSI1_PRED (0x7 << 6)
-#define BP_CS1CDR_ESAI_PRED 9
-#define BM_CS1CDR_ESAI_PRED (0x7 << 9)
-#define BP_CS1CDR_SSI3_PODF 16
-#define BM_CS1CDR_SSI3_PODF (0x3f << 16)
-#define BP_CS1CDR_SSI3_PRED 22
-#define BM_CS1CDR_SSI3_PRED (0x7 << 22)
-#define BP_CS1CDR_ESAI_PODF 25
-#define BM_CS1CDR_ESAI_PODF (0x7 << 25)
-
-#define BP_CS2CDR_SSI2_PODF 0
-#define BM_CS2CDR_SSI2_PODF (0x3f << 0)
-#define BP_CS2CDR_SSI2_PRED 6
-#define BM_CS2CDR_SSI2_PRED (0x7 << 6)
-#define BP_CS2CDR_LDB_DI0_SEL 9
-#define BM_CS2CDR_LDB_DI0_SEL (0x7 << 9)
-#define BP_CS2CDR_LDB_DI1_SEL 12
-#define BM_CS2CDR_LDB_DI1_SEL (0x7 << 12)
-#define BP_CS2CDR_ENFC_SEL 16
-#define BM_CS2CDR_ENFC_SEL (0x3 << 16)
-#define BP_CS2CDR_ENFC_PRED 18
-#define BM_CS2CDR_ENFC_PRED (0x7 << 18)
-#define BP_CS2CDR_ENFC_PODF 21
-#define BM_CS2CDR_ENFC_PODF (0x3f << 21)
-
-#define BP_CDCDR_ASRC_SERIAL_SEL 7
-#define BM_CDCDR_ASRC_SERIAL_SEL (0x3 << 7)
-#define BP_CDCDR_ASRC_SERIAL_PODF 9
-#define BM_CDCDR_ASRC_SERIAL_PODF (0x7 << 9)
-#define BP_CDCDR_ASRC_SERIAL_PRED 12
-#define BM_CDCDR_ASRC_SERIAL_PRED (0x7 << 12)
-#define BP_CDCDR_SPDIF_SEL 20
-#define BM_CDCDR_SPDIF_SEL (0x3 << 20)
-#define BP_CDCDR_SPDIF_PODF 22
-#define BM_CDCDR_SPDIF_PODF (0x7 << 22)
-#define BP_CDCDR_SPDIF_PRED 25
-#define BM_CDCDR_SPDIF_PRED (0x7 << 25)
-#define BP_CDCDR_HSI_TX_PODF 29
-#define BM_CDCDR_HSI_TX_PODF (0x7 << 29)
-#define BP_CDCDR_HSI_TX_SEL 28
-#define BM_CDCDR_HSI_TX_SEL (0x1 << 28)
-
-#define BP_CHSCCDR_IPU1_DI0_SEL 0
-#define BM_CHSCCDR_IPU1_DI0_SEL (0x7 << 0)
-#define BP_CHSCCDR_IPU1_DI0_PRE_PODF 3
-#define BM_CHSCCDR_IPU1_DI0_PRE_PODF (0x7 << 3)
-#define BP_CHSCCDR_IPU1_DI0_PRE_SEL 6
-#define BM_CHSCCDR_IPU1_DI0_PRE_SEL (0x7 << 6)
-#define BP_CHSCCDR_IPU1_DI1_SEL 9
-#define BM_CHSCCDR_IPU1_DI1_SEL (0x7 << 9)
-#define BP_CHSCCDR_IPU1_DI1_PRE_PODF 12
-#define BM_CHSCCDR_IPU1_DI1_PRE_PODF (0x7 << 12)
-#define BP_CHSCCDR_IPU1_DI1_PRE_SEL 15
-#define BM_CHSCCDR_IPU1_DI1_PRE_SEL (0x7 << 15)
-
-#define BP_CSCDR2_IPU2_DI0_SEL 0
-#define BM_CSCDR2_IPU2_DI0_SEL (0x7)
-#define BP_CSCDR2_IPU2_DI0_PRE_PODF 3
-#define BM_CSCDR2_IPU2_DI0_PRE_PODF (0x7 << 3)
-#define BP_CSCDR2_IPU2_DI0_PRE_SEL 6
-#define BM_CSCDR2_IPU2_DI0_PRE_SEL (0x7 << 6)
-#define BP_CSCDR2_IPU2_DI1_SEL 9
-#define BM_CSCDR2_IPU2_DI1_SEL (0x7 << 9)
-#define BP_CSCDR2_IPU2_DI1_PRE_PODF 12
-#define BM_CSCDR2_IPU2_DI1_PRE_PODF (0x7 << 12)
-#define BP_CSCDR2_IPU2_DI1_PRE_SEL 15
-#define BM_CSCDR2_IPU2_DI1_PRE_SEL (0x7 << 15)
-#define BP_CSCDR2_ECSPI_CLK_PODF 19
-#define BM_CSCDR2_ECSPI_CLK_PODF (0x3f << 19)
-
-#define BP_CSCDR3_IPU1_HSP_SEL 9
-#define BM_CSCDR3_IPU1_HSP_SEL (0x3 << 9)
-#define BP_CSCDR3_IPU1_HSP_PODF 11
-#define BM_CSCDR3_IPU1_HSP_PODF (0x7 << 11)
-#define BP_CSCDR3_IPU2_HSP_SEL 14
-#define BM_CSCDR3_IPU2_HSP_SEL (0x3 << 14)
-#define BP_CSCDR3_IPU2_HSP_PODF 16
-#define BM_CSCDR3_IPU2_HSP_PODF (0x7 << 16)
-
-#define BM_CDHIPR_AXI_PODF_BUSY (0x1 << 0)
-#define BM_CDHIPR_AHB_PODF_BUSY (0x1 << 1)
-#define BM_CDHIPR_MMDC_CH1_PODF_BUSY (0x1 << 2)
-#define BM_CDHIPR_PERIPH2_SEL_BUSY (0x1 << 3)
-#define BM_CDHIPR_MMDC_CH0_PODF_BUSY (0x1 << 4)
-#define BM_CDHIPR_PERIPH_SEL_BUSY (0x1 << 5)
-#define BM_CDHIPR_ARM_PODF_BUSY (0x1 << 16)
-
-#define BP_CLPCR_LPM 0
-#define BM_CLPCR_LPM (0x3 << 0)
-#define BM_CLPCR_BYPASS_PMIC_READY (0x1 << 2)
-#define BM_CLPCR_ARM_CLK_DIS_ON_LPM (0x1 << 5)
-#define BM_CLPCR_SBYOS (0x1 << 6)
-#define BM_CLPCR_DIS_REF_OSC (0x1 << 7)
-#define BM_CLPCR_VSTBY (0x1 << 8)
-#define BP_CLPCR_STBY_COUNT 9
-#define BM_CLPCR_STBY_COUNT (0x3 << 9)
-#define BM_CLPCR_COSC_PWRDOWN (0x1 << 11)
-#define BM_CLPCR_WB_PER_AT_LPM (0x1 << 16)
-#define BM_CLPCR_WB_CORE_AT_LPM (0x1 << 17)
-#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS (0x1 << 19)
-#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS (0x1 << 21)
-#define BM_CLPCR_MASK_CORE0_WFI (0x1 << 22)
-#define BM_CLPCR_MASK_CORE1_WFI (0x1 << 23)
-#define BM_CLPCR_MASK_CORE2_WFI (0x1 << 24)
-#define BM_CLPCR_MASK_CORE3_WFI (0x1 << 25)
-#define BM_CLPCR_MASK_SCU_IDLE (0x1 << 26)
-#define BM_CLPCR_MASK_L2CC_IDLE (0x1 << 27)
-
-#define BP_CCOSR_CKO1_EN 7
-#define BP_CCOSR_CKO1_PODF 4
-#define BM_CCOSR_CKO1_PODF (0x7 << 4)
-#define BP_CCOSR_CKO1_SEL 0
-#define BM_CCOSR_CKO1_SEL (0xf << 0)
-
-#define FREQ_480M 480000000
-#define FREQ_528M 528000000
-#define FREQ_594M 594000000
-#define FREQ_650M 650000000
-#define FREQ_1300M 1300000000
-
-static struct clk pll1_sys;
-static struct clk pll2_bus;
-static struct clk pll3_usb_otg;
-static struct clk pll4_audio;
-static struct clk pll5_video;
-static struct clk pll6_mlb;
-static struct clk pll7_usb_host;
-static struct clk pll8_enet;
-static struct clk apbh_dma_clk;
-static struct clk arm_clk;
-static struct clk ipg_clk;
-static struct clk ahb_clk;
-static struct clk axi_clk;
-static struct clk mmdc_ch0_axi_clk;
-static struct clk mmdc_ch1_axi_clk;
-static struct clk periph_clk;
-static struct clk periph_pre_clk;
-static struct clk periph_clk2_clk;
-static struct clk periph2_clk;
-static struct clk periph2_pre_clk;
-static struct clk periph2_clk2_clk;
-static struct clk gpu2d_core_clk;
-static struct clk gpu3d_core_clk;
-static struct clk gpu3d_shader_clk;
-static struct clk ipg_perclk;
-static struct clk emi_clk;
-static struct clk emi_slow_clk;
-static struct clk can1_clk;
-static struct clk uart_clk;
-static struct clk usdhc1_clk;
-static struct clk usdhc2_clk;
-static struct clk usdhc3_clk;
-static struct clk usdhc4_clk;
-static struct clk vpu_clk;
-static struct clk hsi_tx_clk;
-static struct clk ipu1_di0_pre_clk;
-static struct clk ipu1_di1_pre_clk;
-static struct clk ipu2_di0_pre_clk;
-static struct clk ipu2_di1_pre_clk;
-static struct clk ipu1_clk;
-static struct clk ipu2_clk;
-static struct clk ssi1_clk;
-static struct clk ssi3_clk;
-static struct clk esai_clk;
-static struct clk ssi2_clk;
-static struct clk spdif_clk;
-static struct clk asrc_serial_clk;
-static struct clk gpu2d_axi_clk;
-static struct clk gpu3d_axi_clk;
-static struct clk pcie_clk;
-static struct clk vdo_axi_clk;
-static struct clk ldb_di0_clk;
-static struct clk ldb_di1_clk;
-static struct clk ipu1_di0_clk;
-static struct clk ipu1_di1_clk;
-static struct clk ipu2_di0_clk;
-static struct clk ipu2_di1_clk;
-static struct clk enfc_clk;
-static struct clk cko1_clk;
-static struct clk dummy_clk = {};
-
-static unsigned long external_high_reference;
-static unsigned long external_low_reference;
-static unsigned long oscillator_reference;
-
-static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
-{
- return oscillator_reference;
-}
-
-static unsigned long get_high_reference_clock_rate(struct clk *clk)
-{
- return external_high_reference;
-}
-
-static unsigned long get_low_reference_clock_rate(struct clk *clk)
-{
- return external_low_reference;
-}
-
-static struct clk ckil_clk = {
- .get_rate = get_low_reference_clock_rate,
-};
-
-static struct clk ckih_clk = {
- .get_rate = get_high_reference_clock_rate,
-};
-
-static struct clk osc_clk = {
- .get_rate = get_oscillator_reference_clock_rate,
-};
-
-static inline void __iomem *pll_get_reg_addr(struct clk *pll)
-{
- if (pll == &pll1_sys)
- return PLL1_SYS;
- else if (pll == &pll2_bus)
- return PLL2_BUS;
- else if (pll == &pll3_usb_otg)
- return PLL3_USB_OTG;
- else if (pll == &pll4_audio)
- return PLL4_AUDIO;
- else if (pll == &pll5_video)
- return PLL5_VIDEO;
- else if (pll == &pll6_mlb)
- return PLL6_MLB;
- else if (pll == &pll7_usb_host)
- return PLL7_USB_HOST;
- else if (pll == &pll8_enet)
- return PLL8_ENET;
- else
- BUG();
-
- return NULL;
-}
-
-static int pll_enable(struct clk *clk)
-{
- int timeout = 0x100000;
- void __iomem *reg;
- u32 val;
-
- reg = pll_get_reg_addr(clk);
- val = readl_relaxed(reg);
- val &= ~BM_PLL_BYPASS;
- val &= ~BM_PLL_POWER_DOWN;
- /* 480MHz PLLs have the opposite definition for power bit */
- if (clk == &pll3_usb_otg || clk == &pll7_usb_host)
- val |= BM_PLL_POWER_DOWN;
- writel_relaxed(val, reg);
-
- /* Wait for PLL to lock */
- while (!(readl_relaxed(reg) & BM_PLL_LOCK) && --timeout)
- cpu_relax();
-
- if (unlikely(!timeout))
- return -EBUSY;
-
- /* Enable the PLL output now */
- val = readl_relaxed(reg);
- val |= BM_PLL_ENABLE;
- writel_relaxed(val, reg);
-
- return 0;
-}
-
-static void pll_disable(struct clk *clk)
-{
- void __iomem *reg;
- u32 val;
-
- reg = pll_get_reg_addr(clk);
- val = readl_relaxed(reg);
- val &= ~BM_PLL_ENABLE;
- val |= BM_PLL_BYPASS;
- val |= BM_PLL_POWER_DOWN;
- if (clk == &pll3_usb_otg || clk == &pll7_usb_host)
- val &= ~BM_PLL_POWER_DOWN;
- writel_relaxed(val, reg);
-}
-
-static unsigned long pll1_sys_get_rate(struct clk *clk)
-{
- u32 div = (readl_relaxed(PLL1_SYS) & BM_PLL_SYS_DIV_SELECT) >>
- BP_PLL_SYS_DIV_SELECT;
-
- return clk_get_rate(clk->parent) * div / 2;
-}
-
-static int pll1_sys_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 val, div;
-
- if (rate < FREQ_650M || rate > FREQ_1300M)
- return -EINVAL;
-
- div = rate * 2 / clk_get_rate(clk->parent);
- val = readl_relaxed(PLL1_SYS);
- val &= ~BM_PLL_SYS_DIV_SELECT;
- val |= div << BP_PLL_SYS_DIV_SELECT;
- writel_relaxed(val, PLL1_SYS);
-
- return 0;
-}
-
-static unsigned long pll8_enet_get_rate(struct clk *clk)
-{
- u32 div = (readl_relaxed(PLL8_ENET) & BM_PLL_ENET_DIV_SELECT) >>
- BP_PLL_ENET_DIV_SELECT;
-
- switch (div) {
- case 0:
- return 25000000;
- case 1:
- return 50000000;
- case 2:
- return 100000000;
- case 3:
- return 125000000;
- }
-
- return 0;
-}
-
-static int pll8_enet_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 val, div;
-
- switch (rate) {
- case 25000000:
- div = 0;
- break;
- case 50000000:
- div = 1;
- break;
- case 100000000:
- div = 2;
- break;
- case 125000000:
- div = 3;
- break;
- default:
- return -EINVAL;
- }
-
- val = readl_relaxed(PLL8_ENET);
- val &= ~BM_PLL_ENET_DIV_SELECT;
- val |= div << BP_PLL_ENET_DIV_SELECT;
- writel_relaxed(val, PLL8_ENET);
-
- return 0;
-}
-
-static unsigned long pll_av_get_rate(struct clk *clk)
-{
- void __iomem *reg = (clk == &pll4_audio) ? PLL4_AUDIO : PLL5_VIDEO;
- unsigned long parent_rate = clk_get_rate(clk->parent);
- u32 mfn = readl_relaxed(reg + PLL_NUM_OFFSET);
- u32 mfd = readl_relaxed(reg + PLL_DENOM_OFFSET);
- u32 div = (readl_relaxed(reg) & BM_PLL_AV_DIV_SELECT) >>
- BP_PLL_AV_DIV_SELECT;
-
- return (parent_rate * div) + ((parent_rate / mfd) * mfn);
-}
-
-static int pll_av_set_rate(struct clk *clk, unsigned long rate)
-{
- void __iomem *reg = (clk == &pll4_audio) ? PLL4_AUDIO : PLL5_VIDEO;
- unsigned int parent_rate = clk_get_rate(clk->parent);
- u32 val, div;
- u32 mfn, mfd = 1000000;
- s64 temp64;
-
- if (rate < FREQ_650M || rate > FREQ_1300M)
- return -EINVAL;
-
- div = rate / parent_rate;
- temp64 = (u64) (rate - div * parent_rate);
- temp64 *= mfd;
- do_div(temp64, parent_rate);
- mfn = temp64;
-
- val = readl_relaxed(reg);
- val &= ~BM_PLL_AV_DIV_SELECT;
- val |= div << BP_PLL_AV_DIV_SELECT;
- writel_relaxed(val, reg);
- writel_relaxed(mfn, reg + PLL_NUM_OFFSET);
- writel_relaxed(mfd, reg + PLL_DENOM_OFFSET);
-
- return 0;
-}
-
-static void __iomem *pll_get_div_reg_bit(struct clk *clk, u32 *bp, u32 *bm)
-{
- void __iomem *reg;
-
- if (clk == &pll2_bus) {
- reg = PLL2_BUS;
- *bp = BP_PLL_BUS_DIV_SELECT;
- *bm = BM_PLL_BUS_DIV_SELECT;
- } else if (clk == &pll3_usb_otg) {
- reg = PLL3_USB_OTG;
- *bp = BP_PLL_USB_DIV_SELECT;
- *bm = BM_PLL_USB_DIV_SELECT;
- } else if (clk == &pll7_usb_host) {
- reg = PLL7_USB_HOST;
- *bp = BP_PLL_USB_DIV_SELECT;
- *bm = BM_PLL_USB_DIV_SELECT;
- } else {
- BUG();
- }
-
- return reg;
-}
-
-static unsigned long pll_get_rate(struct clk *clk)
-{
- void __iomem *reg;
- u32 div, bp, bm;
-
- reg = pll_get_div_reg_bit(clk, &bp, &bm);
- div = (readl_relaxed(reg) & bm) >> bp;
-
- return (div == 1) ? clk_get_rate(clk->parent) * 22 :
- clk_get_rate(clk->parent) * 20;
-}
-
-static int pll_set_rate(struct clk *clk, unsigned long rate)
-{
- void __iomem *reg;
- u32 val, div, bp, bm;
-
- if (rate == FREQ_528M)
- div = 1;
- else if (rate == FREQ_480M)
- div = 0;
- else
- return -EINVAL;
-
- reg = pll_get_div_reg_bit(clk, &bp, &bm);
- val = readl_relaxed(reg);
- val &= ~bm;
- val |= div << bp;
- writel_relaxed(val, reg);
-
- return 0;
-}
-
-#define pll2_bus_get_rate pll_get_rate
-#define pll2_bus_set_rate pll_set_rate
-#define pll3_usb_otg_get_rate pll_get_rate
-#define pll3_usb_otg_set_rate pll_set_rate
-#define pll7_usb_host_get_rate pll_get_rate
-#define pll7_usb_host_set_rate pll_set_rate
-#define pll4_audio_get_rate pll_av_get_rate
-#define pll4_audio_set_rate pll_av_set_rate
-#define pll5_video_get_rate pll_av_get_rate
-#define pll5_video_set_rate pll_av_set_rate
-#define pll6_mlb_get_rate NULL
-#define pll6_mlb_set_rate NULL
-
-#define DEF_PLL(name) \
- static struct clk name = { \
- .enable = pll_enable, \
- .disable = pll_disable, \
- .get_rate = name##_get_rate, \
- .set_rate = name##_set_rate, \
- .parent = &osc_clk, \
- }
-
-DEF_PLL(pll1_sys);
-DEF_PLL(pll2_bus);
-DEF_PLL(pll3_usb_otg);
-DEF_PLL(pll4_audio);
-DEF_PLL(pll5_video);
-DEF_PLL(pll6_mlb);
-DEF_PLL(pll7_usb_host);
-DEF_PLL(pll8_enet);
-
-static unsigned long pfd_get_rate(struct clk *clk)
-{
- u64 tmp = (u64) clk_get_rate(clk->parent) * 18;
- u32 frac, bp_frac;
-
- if (apbh_dma_clk.usecount == 0)
- apbh_dma_clk.enable(&apbh_dma_clk);
-
- bp_frac = clk->enable_shift - 7;
- frac = readl_relaxed(clk->enable_reg) >> bp_frac & PFD_FRAC_MASK;
- do_div(tmp, frac);
-
- return tmp;
-}
-
-static int pfd_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 val, frac, bp_frac;
- u64 tmp = (u64) clk_get_rate(clk->parent) * 18;
-
- if (apbh_dma_clk.usecount == 0)
- apbh_dma_clk.enable(&apbh_dma_clk);
-
- /*
- * Round up the divider so that we don't set a rate
- * higher than what is requested
- */
- tmp += rate / 2;
- do_div(tmp, rate);
- frac = tmp;
- frac = (frac < 12) ? 12 : frac;
- frac = (frac > 35) ? 35 : frac;
-
- /*
- * The frac field always starts from 7 bits lower
- * position of enable bit
- */
- bp_frac = clk->enable_shift - 7;
- val = readl_relaxed(clk->enable_reg);
- val &= ~(PFD_FRAC_MASK << bp_frac);
- val |= frac << bp_frac;
- writel_relaxed(val, clk->enable_reg);
-
- tmp = (u64) clk_get_rate(clk->parent) * 18;
- do_div(tmp, frac);
-
- if (apbh_dma_clk.usecount == 0)
- apbh_dma_clk.disable(&apbh_dma_clk);
-
- return 0;
-}
-
-static unsigned long pfd_round_rate(struct clk *clk, unsigned long rate)
-{
- u32 frac;
- u64 tmp;
-
- tmp = (u64) clk_get_rate(clk->parent) * 18;
- tmp += rate / 2;
- do_div(tmp, rate);
- frac = tmp;
- frac = (frac < 12) ? 12 : frac;
- frac = (frac > 35) ? 35 : frac;
- tmp = (u64) clk_get_rate(clk->parent) * 18;
- do_div(tmp, frac);
-
- return tmp;
-}
-
-static int pfd_enable(struct clk *clk)
-{
- u32 val;
-
- if (apbh_dma_clk.usecount == 0)
- apbh_dma_clk.enable(&apbh_dma_clk);
-
- val = readl_relaxed(clk->enable_reg);
- val &= ~(1 << clk->enable_shift);
- writel_relaxed(val, clk->enable_reg);
-
- if (apbh_dma_clk.usecount == 0)
- apbh_dma_clk.disable(&apbh_dma_clk);
-
- return 0;
-}
-
-static void pfd_disable(struct clk *clk)
-{
- u32 val;
-
- if (apbh_dma_clk.usecount == 0)
- apbh_dma_clk.enable(&apbh_dma_clk);
-
- val = readl_relaxed(clk->enable_reg);
- val |= 1 << clk->enable_shift;
- writel_relaxed(val, clk->enable_reg);
-
- if (apbh_dma_clk.usecount == 0)
- apbh_dma_clk.disable(&apbh_dma_clk);
-}
-
-#define DEF_PFD(name, er, es, p) \
- static struct clk name = { \
- .enable_reg = er, \
- .enable_shift = es, \
- .enable = pfd_enable, \
- .disable = pfd_disable, \
- .get_rate = pfd_get_rate, \
- .set_rate = pfd_set_rate, \
- .round_rate = pfd_round_rate, \
- .parent = p, \
- }
-
-DEF_PFD(pll2_pfd_352m, PFD_528, PFD0, &pll2_bus);
-DEF_PFD(pll2_pfd_594m, PFD_528, PFD1, &pll2_bus);
-DEF_PFD(pll2_pfd_400m, PFD_528, PFD2, &pll2_bus);
-DEF_PFD(pll3_pfd_720m, PFD_480, PFD0, &pll3_usb_otg);
-DEF_PFD(pll3_pfd_540m, PFD_480, PFD1, &pll3_usb_otg);
-DEF_PFD(pll3_pfd_508m, PFD_480, PFD2, &pll3_usb_otg);
-DEF_PFD(pll3_pfd_454m, PFD_480, PFD3, &pll3_usb_otg);
-
-static unsigned long twd_clk_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / 2;
-}
-
-static struct clk twd_clk = {
- .parent = &arm_clk,
- .get_rate = twd_clk_get_rate,
-};
-
-static unsigned long pll2_200m_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / 2;
-}
-
-static struct clk pll2_200m = {
- .parent = &pll2_pfd_400m,
- .get_rate = pll2_200m_get_rate,
-};
-
-static unsigned long pll3_120m_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / 4;
-}
-
-static struct clk pll3_120m = {
- .parent = &pll3_usb_otg,
- .get_rate = pll3_120m_get_rate,
-};
-
-static unsigned long pll3_80m_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / 6;
-}
-
-static struct clk pll3_80m = {
- .parent = &pll3_usb_otg,
- .get_rate = pll3_80m_get_rate,
-};
-
-static unsigned long pll3_60m_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / 8;
-}
-
-static struct clk pll3_60m = {
- .parent = &pll3_usb_otg,
- .get_rate = pll3_60m_get_rate,
-};
-
-static int pll1_sw_clk_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 val = readl_relaxed(CCSR);
-
- if (parent == &pll1_sys) {
- val &= ~BM_CCSR_PLL1_SW_SEL;
- val &= ~BM_CCSR_STEP_SEL;
- } else if (parent == &osc_clk) {
- val |= BM_CCSR_PLL1_SW_SEL;
- val &= ~BM_CCSR_STEP_SEL;
- } else if (parent == &pll2_pfd_400m) {
- val |= BM_CCSR_PLL1_SW_SEL;
- val |= BM_CCSR_STEP_SEL;
- } else {
- return -EINVAL;
- }
-
- writel_relaxed(val, CCSR);
-
- return 0;
-}
-
-static struct clk pll1_sw_clk = {
- .parent = &pll1_sys,
- .set_parent = pll1_sw_clk_set_parent,
-};
-
-static void calc_pred_podf_dividers(u32 div, u32 *pred, u32 *podf)
-{
- u32 min_pred, temp_pred, old_err, err;
-
- if (div >= 512) {
- *pred = 8;
- *podf = 64;
- } else if (div >= 8) {
- min_pred = (div - 1) / 64 + 1;
- old_err = 8;
- for (temp_pred = 8; temp_pred >= min_pred; temp_pred--) {
- err = div % temp_pred;
- if (err == 0) {
- *pred = temp_pred;
- break;
- }
- err = temp_pred - err;
- if (err < old_err) {
- old_err = err;
- *pred = temp_pred;
- }
- }
- *podf = (div + *pred - 1) / *pred;
- } else if (div < 8) {
- *pred = div;
- *podf = 1;
- }
-}
-
-static int _clk_enable(struct clk *clk)
-{
- u32 reg;
- reg = readl_relaxed(clk->enable_reg);
- reg |= 0x3 << clk->enable_shift;
- writel_relaxed(reg, clk->enable_reg);
-
- return 0;
-}
-
-static void _clk_disable(struct clk *clk)
-{
- u32 reg;
- reg = readl_relaxed(clk->enable_reg);
- reg &= ~(0x3 << clk->enable_shift);
- writel_relaxed(reg, clk->enable_reg);
-}
-
-static int _clk_enable_1b(struct clk *clk)
-{
- u32 reg;
- reg = readl_relaxed(clk->enable_reg);
- reg |= 0x1 << clk->enable_shift;
- writel_relaxed(reg, clk->enable_reg);
-
- return 0;
-}
-
-static void _clk_disable_1b(struct clk *clk)
-{
- u32 reg;
- reg = readl_relaxed(clk->enable_reg);
- reg &= ~(0x1 << clk->enable_shift);
- writel_relaxed(reg, clk->enable_reg);
-}
-
-struct divider {
- struct clk *clk;
- void __iomem *reg;
- u32 bp_pred;
- u32 bm_pred;
- u32 bp_podf;
- u32 bm_podf;
-};
-
-#define DEF_CLK_DIV1(d, c, r, b) \
- static struct divider d = { \
- .clk = c, \
- .reg = r, \
- .bp_podf = BP_##r##_##b##_PODF, \
- .bm_podf = BM_##r##_##b##_PODF, \
- }
-
-DEF_CLK_DIV1(arm_div, &arm_clk, CACRR, ARM);
-DEF_CLK_DIV1(ipg_div, &ipg_clk, CBCDR, IPG);
-DEF_CLK_DIV1(ahb_div, &ahb_clk, CBCDR, AHB);
-DEF_CLK_DIV1(axi_div, &axi_clk, CBCDR, AXI);
-DEF_CLK_DIV1(mmdc_ch0_axi_div, &mmdc_ch0_axi_clk, CBCDR, MMDC_CH0_AXI);
-DEF_CLK_DIV1(mmdc_ch1_axi_div, &mmdc_ch1_axi_clk, CBCDR, MMDC_CH1_AXI);
-DEF_CLK_DIV1(periph_clk2_div, &periph_clk2_clk, CBCDR, PERIPH_CLK2);
-DEF_CLK_DIV1(periph2_clk2_div, &periph2_clk2_clk, CBCDR, PERIPH2_CLK2);
-DEF_CLK_DIV1(gpu2d_core_div, &gpu2d_core_clk, CBCMR, GPU2D_CORE);
-DEF_CLK_DIV1(gpu3d_core_div, &gpu3d_core_clk, CBCMR, GPU3D_CORE);
-DEF_CLK_DIV1(gpu3d_shader_div, &gpu3d_shader_clk, CBCMR, GPU3D_SHADER);
-DEF_CLK_DIV1(ipg_perclk_div, &ipg_perclk, CSCMR1, PERCLK);
-DEF_CLK_DIV1(emi_div, &emi_clk, CSCMR1, EMI);
-DEF_CLK_DIV1(emi_slow_div, &emi_slow_clk, CSCMR1, EMI_SLOW);
-DEF_CLK_DIV1(can_div, &can1_clk, CSCMR2, CAN);
-DEF_CLK_DIV1(uart_div, &uart_clk, CSCDR1, UART);
-DEF_CLK_DIV1(usdhc1_div, &usdhc1_clk, CSCDR1, USDHC1);
-DEF_CLK_DIV1(usdhc2_div, &usdhc2_clk, CSCDR1, USDHC2);
-DEF_CLK_DIV1(usdhc3_div, &usdhc3_clk, CSCDR1, USDHC3);
-DEF_CLK_DIV1(usdhc4_div, &usdhc4_clk, CSCDR1, USDHC4);
-DEF_CLK_DIV1(vpu_div, &vpu_clk, CSCDR1, VPU_AXI);
-DEF_CLK_DIV1(hsi_tx_div, &hsi_tx_clk, CDCDR, HSI_TX);
-DEF_CLK_DIV1(ipu1_di0_pre_div, &ipu1_di0_pre_clk, CHSCCDR, IPU1_DI0_PRE);
-DEF_CLK_DIV1(ipu1_di1_pre_div, &ipu1_di1_pre_clk, CHSCCDR, IPU1_DI1_PRE);
-DEF_CLK_DIV1(ipu2_di0_pre_div, &ipu2_di0_pre_clk, CSCDR2, IPU2_DI0_PRE);
-DEF_CLK_DIV1(ipu2_di1_pre_div, &ipu2_di1_pre_clk, CSCDR2, IPU2_DI1_PRE);
-DEF_CLK_DIV1(ipu1_div, &ipu1_clk, CSCDR3, IPU1_HSP);
-DEF_CLK_DIV1(ipu2_div, &ipu2_clk, CSCDR3, IPU2_HSP);
-DEF_CLK_DIV1(cko1_div, &cko1_clk, CCOSR, CKO1);
-
-#define DEF_CLK_DIV2(d, c, r, b) \
- static struct divider d = { \
- .clk = c, \
- .reg = r, \
- .bp_pred = BP_##r##_##b##_PRED, \
- .bm_pred = BM_##r##_##b##_PRED, \
- .bp_podf = BP_##r##_##b##_PODF, \
- .bm_podf = BM_##r##_##b##_PODF, \
- }
-
-DEF_CLK_DIV2(ssi1_div, &ssi1_clk, CS1CDR, SSI1);
-DEF_CLK_DIV2(ssi3_div, &ssi3_clk, CS1CDR, SSI3);
-DEF_CLK_DIV2(esai_div, &esai_clk, CS1CDR, ESAI);
-DEF_CLK_DIV2(ssi2_div, &ssi2_clk, CS2CDR, SSI2);
-DEF_CLK_DIV2(enfc_div, &enfc_clk, CS2CDR, ENFC);
-DEF_CLK_DIV2(spdif_div, &spdif_clk, CDCDR, SPDIF);
-DEF_CLK_DIV2(asrc_serial_div, &asrc_serial_clk, CDCDR, ASRC_SERIAL);
-
-static struct divider *dividers[] = {
- &arm_div,
- &ipg_div,
- &ahb_div,
- &axi_div,
- &mmdc_ch0_axi_div,
- &mmdc_ch1_axi_div,
- &periph_clk2_div,
- &periph2_clk2_div,
- &gpu2d_core_div,
- &gpu3d_core_div,
- &gpu3d_shader_div,
- &ipg_perclk_div,
- &emi_div,
- &emi_slow_div,
- &can_div,
- &uart_div,
- &usdhc1_div,
- &usdhc2_div,
- &usdhc3_div,
- &usdhc4_div,
- &vpu_div,
- &hsi_tx_div,
- &ipu1_di0_pre_div,
- &ipu1_di1_pre_div,
- &ipu2_di0_pre_div,
- &ipu2_di1_pre_div,
- &ipu1_div,
- &ipu2_div,
- &ssi1_div,
- &ssi3_div,
- &esai_div,
- &ssi2_div,
- &enfc_div,
- &spdif_div,
- &asrc_serial_div,
- &cko1_div,
-};
-
-static unsigned long ldb_di_clk_get_rate(struct clk *clk)
-{
- u32 val = readl_relaxed(CSCMR2);
-
- val &= (clk == &ldb_di0_clk) ? BM_CSCMR2_LDB_DI0_IPU_DIV :
- BM_CSCMR2_LDB_DI1_IPU_DIV;
- if (val)
- return clk_get_rate(clk->parent) / 7;
- else
- return clk_get_rate(clk->parent) * 2 / 7;
-}
-
-static int ldb_di_clk_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long parent_rate = clk_get_rate(clk->parent);
- u32 val = readl_relaxed(CSCMR2);
-
- if (rate * 7 <= parent_rate + parent_rate / 20)
- val |= BM_CSCMR2_LDB_DI0_IPU_DIV;
- else
- val &= ~BM_CSCMR2_LDB_DI0_IPU_DIV;
-
- writel_relaxed(val, CSCMR2);
-
- return 0;
-}
-
-static unsigned long ldb_di_clk_round_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long parent_rate = clk_get_rate(clk->parent);
-
- if (rate * 7 <= parent_rate + parent_rate / 20)
- return parent_rate / 7;
- else
- return 2 * parent_rate / 7;
-}
-
-static unsigned long _clk_get_rate(struct clk *clk)
-{
- struct divider *d;
- u32 val, pred, podf;
- int i, num;
-
- if (clk == &ldb_di0_clk || clk == &ldb_di1_clk)
- return ldb_di_clk_get_rate(clk);
-
- num = ARRAY_SIZE(dividers);
- for (i = 0; i < num; i++)
- if (dividers[i]->clk == clk) {
- d = dividers[i];
- break;
- }
- if (i == num)
- return clk_get_rate(clk->parent);
-
- val = readl_relaxed(d->reg);
- pred = ((val & d->bm_pred) >> d->bp_pred) + 1;
- podf = ((val & d->bm_podf) >> d->bp_podf) + 1;
-
- return clk_get_rate(clk->parent) / (pred * podf);
-}
-
-static int clk_busy_wait(struct clk *clk)
-{
- int timeout = 0x100000;
- u32 bm;
-
- if (clk == &axi_clk)
- bm = BM_CDHIPR_AXI_PODF_BUSY;
- else if (clk == &ahb_clk)
- bm = BM_CDHIPR_AHB_PODF_BUSY;
- else if (clk == &mmdc_ch0_axi_clk)
- bm = BM_CDHIPR_MMDC_CH0_PODF_BUSY;
- else if (clk == &periph_clk)
- bm = BM_CDHIPR_PERIPH_SEL_BUSY;
- else if (clk == &arm_clk)
- bm = BM_CDHIPR_ARM_PODF_BUSY;
- else
- return -EINVAL;
-
- while ((readl_relaxed(CDHIPR) & bm) && --timeout)
- cpu_relax();
-
- if (unlikely(!timeout))
- return -EBUSY;
-
- return 0;
-}
-
-static int _clk_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long parent_rate = clk_get_rate(clk->parent);
- struct divider *d;
- u32 val, div, max_div, pred = 0, podf;
- int i, num;
-
- if (clk == &ldb_di0_clk || clk == &ldb_di1_clk)
- return ldb_di_clk_set_rate(clk, rate);
-
- num = ARRAY_SIZE(dividers);
- for (i = 0; i < num; i++)
- if (dividers[i]->clk == clk) {
- d = dividers[i];
- break;
- }
- if (i == num)
- return -EINVAL;
-
- max_div = ((d->bm_pred >> d->bp_pred) + 1) *
- ((d->bm_podf >> d->bp_podf) + 1);
-
- div = parent_rate / rate;
- if (div == 0)
- div++;
-
- if ((parent_rate / div != rate) || div > max_div)
- return -EINVAL;
-
- if (d->bm_pred) {
- calc_pred_podf_dividers(div, &pred, &podf);
- } else {
- pred = 1;
- podf = div;
- }
-
- val = readl_relaxed(d->reg);
- val &= ~(d->bm_pred | d->bm_podf);
- val |= (pred - 1) << d->bp_pred | (podf - 1) << d->bp_podf;
- writel_relaxed(val, d->reg);
-
- if (clk == &axi_clk || clk == &ahb_clk ||
- clk == &mmdc_ch0_axi_clk || clk == &arm_clk)
- return clk_busy_wait(clk);
-
- return 0;
-}
-
-static unsigned long _clk_round_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long parent_rate = clk_get_rate(clk->parent);
- u32 div = parent_rate / rate;
- u32 div_max, pred = 0, podf;
- struct divider *d;
- int i, num;
-
- if (clk == &ldb_di0_clk || clk == &ldb_di1_clk)
- return ldb_di_clk_round_rate(clk, rate);
-
- num = ARRAY_SIZE(dividers);
- for (i = 0; i < num; i++)
- if (dividers[i]->clk == clk) {
- d = dividers[i];
- break;
- }
- if (i == num)
- return -EINVAL;
-
- if (div == 0 || parent_rate % rate)
- div++;
-
- if (d->bm_pred) {
- calc_pred_podf_dividers(div, &pred, &podf);
- div = pred * podf;
- } else {
- div_max = (d->bm_podf >> d->bp_podf) + 1;
- if (div > div_max)
- div = div_max;
- }
-
- return parent_rate / div;
-}
-
-struct multiplexer {
- struct clk *clk;
- void __iomem *reg;
- u32 bp;
- u32 bm;
- int pnum;
- struct clk *parents[];
-};
-
-static struct multiplexer axi_mux = {
- .clk = &axi_clk,
- .reg = CBCDR,
- .bp = BP_CBCDR_AXI_SEL,
- .bm = BM_CBCDR_AXI_SEL,
- .parents = {
- &periph_clk,
- &pll2_pfd_400m,
- &pll3_pfd_540m,
- NULL
- },
-};
-
-static struct multiplexer periph_mux = {
- .clk = &periph_clk,
- .reg = CBCDR,
- .bp = BP_CBCDR_PERIPH_CLK_SEL,
- .bm = BM_CBCDR_PERIPH_CLK_SEL,
- .parents = {
- &periph_pre_clk,
- &periph_clk2_clk,
- NULL
- },
-};
-
-static struct multiplexer periph_pre_mux = {
- .clk = &periph_pre_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_PRE_PERIPH_CLK_SEL,
- .bm = BM_CBCMR_PRE_PERIPH_CLK_SEL,
- .parents = {
- &pll2_bus,
- &pll2_pfd_400m,
- &pll2_pfd_352m,
- &pll2_200m,
- NULL
- },
-};
-
-static struct multiplexer periph_clk2_mux = {
- .clk = &periph_clk2_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_PERIPH_CLK2_SEL,
- .bm = BM_CBCMR_PERIPH_CLK2_SEL,
- .parents = {
- &pll3_usb_otg,
- &osc_clk,
- NULL
- },
-};
-
-static struct multiplexer periph2_mux = {
- .clk = &periph2_clk,
- .reg = CBCDR,
- .bp = BP_CBCDR_PERIPH2_CLK_SEL,
- .bm = BM_CBCDR_PERIPH2_CLK_SEL,
- .parents = {
- &periph2_pre_clk,
- &periph2_clk2_clk,
- NULL
- },
-};
-
-static struct multiplexer periph2_pre_mux = {
- .clk = &periph2_pre_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_PRE_PERIPH2_CLK_SEL,
- .bm = BM_CBCMR_PRE_PERIPH2_CLK_SEL,
- .parents = {
- &pll2_bus,
- &pll2_pfd_400m,
- &pll2_pfd_352m,
- &pll2_200m,
- NULL
- },
-};
-
-static struct multiplexer periph2_clk2_mux = {
- .clk = &periph2_clk2_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_PERIPH2_CLK2_SEL,
- .bm = BM_CBCMR_PERIPH2_CLK2_SEL,
- .parents = {
- &pll3_usb_otg,
- &osc_clk,
- NULL
- },
-};
-
-static struct multiplexer gpu2d_axi_mux = {
- .clk = &gpu2d_axi_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_GPU2D_AXI_SEL,
- .bm = BM_CBCMR_GPU2D_AXI_SEL,
- .parents = {
- &axi_clk,
- &ahb_clk,
- NULL
- },
-};
-
-static struct multiplexer gpu3d_axi_mux = {
- .clk = &gpu3d_axi_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_GPU3D_AXI_SEL,
- .bm = BM_CBCMR_GPU3D_AXI_SEL,
- .parents = {
- &axi_clk,
- &ahb_clk,
- NULL
- },
-};
-
-static struct multiplexer gpu3d_core_mux = {
- .clk = &gpu3d_core_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_GPU3D_CORE_SEL,
- .bm = BM_CBCMR_GPU3D_CORE_SEL,
- .parents = {
- &mmdc_ch0_axi_clk,
- &pll3_usb_otg,
- &pll2_pfd_594m,
- &pll2_pfd_400m,
- NULL
- },
-};
-
-static struct multiplexer gpu3d_shader_mux = {
- .clk = &gpu3d_shader_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_GPU3D_SHADER_SEL,
- .bm = BM_CBCMR_GPU3D_SHADER_SEL,
- .parents = {
- &mmdc_ch0_axi_clk,
- &pll3_usb_otg,
- &pll2_pfd_594m,
- &pll3_pfd_720m,
- NULL
- },
-};
-
-static struct multiplexer pcie_axi_mux = {
- .clk = &pcie_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_PCIE_AXI_SEL,
- .bm = BM_CBCMR_PCIE_AXI_SEL,
- .parents = {
- &axi_clk,
- &ahb_clk,
- NULL
- },
-};
-
-static struct multiplexer vdo_axi_mux = {
- .clk = &vdo_axi_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_VDO_AXI_SEL,
- .bm = BM_CBCMR_VDO_AXI_SEL,
- .parents = {
- &axi_clk,
- &ahb_clk,
- NULL
- },
-};
-
-static struct multiplexer vpu_axi_mux = {
- .clk = &vpu_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_VPU_AXI_SEL,
- .bm = BM_CBCMR_VPU_AXI_SEL,
- .parents = {
- &axi_clk,
- &pll2_pfd_400m,
- &pll2_pfd_352m,
- NULL
- },
-};
-
-static struct multiplexer gpu2d_core_mux = {
- .clk = &gpu2d_core_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_GPU2D_CORE_SEL,
- .bm = BM_CBCMR_GPU2D_CORE_SEL,
- .parents = {
- &axi_clk,
- &pll3_usb_otg,
- &pll2_pfd_352m,
- &pll2_pfd_400m,
- NULL
- },
-};
-
-#define DEF_SSI_MUX(id) \
- static struct multiplexer ssi##id##_mux = { \
- .clk = &ssi##id##_clk, \
- .reg = CSCMR1, \
- .bp = BP_CSCMR1_SSI##id##_SEL, \
- .bm = BM_CSCMR1_SSI##id##_SEL, \
- .parents = { \
- &pll3_pfd_508m, \
- &pll3_pfd_454m, \
- &pll4_audio, \
- NULL \
- }, \
- }
-
-DEF_SSI_MUX(1);
-DEF_SSI_MUX(2);
-DEF_SSI_MUX(3);
-
-#define DEF_USDHC_MUX(id) \
- static struct multiplexer usdhc##id##_mux = { \
- .clk = &usdhc##id##_clk, \
- .reg = CSCMR1, \
- .bp = BP_CSCMR1_USDHC##id##_SEL, \
- .bm = BM_CSCMR1_USDHC##id##_SEL, \
- .parents = { \
- &pll2_pfd_400m, \
- &pll2_pfd_352m, \
- NULL \
- }, \
- }
-
-DEF_USDHC_MUX(1);
-DEF_USDHC_MUX(2);
-DEF_USDHC_MUX(3);
-DEF_USDHC_MUX(4);
-
-static struct multiplexer emi_mux = {
- .clk = &emi_clk,
- .reg = CSCMR1,
- .bp = BP_CSCMR1_EMI_SEL,
- .bm = BM_CSCMR1_EMI_SEL,
- .parents = {
- &axi_clk,
- &pll3_usb_otg,
- &pll2_pfd_400m,
- &pll2_pfd_352m,
- NULL
- },
-};
-
-static struct multiplexer emi_slow_mux = {
- .clk = &emi_slow_clk,
- .reg = CSCMR1,
- .bp = BP_CSCMR1_EMI_SLOW_SEL,
- .bm = BM_CSCMR1_EMI_SLOW_SEL,
- .parents = {
- &axi_clk,
- &pll3_usb_otg,
- &pll2_pfd_400m,
- &pll2_pfd_352m,
- NULL
- },
-};
-
-static struct multiplexer esai_mux = {
- .clk = &esai_clk,
- .reg = CSCMR2,
- .bp = BP_CSCMR2_ESAI_SEL,
- .bm = BM_CSCMR2_ESAI_SEL,
- .parents = {
- &pll4_audio,
- &pll3_pfd_508m,
- &pll3_pfd_454m,
- &pll3_usb_otg,
- NULL
- },
-};
-
-#define DEF_LDB_DI_MUX(id) \
- static struct multiplexer ldb_di##id##_mux = { \
- .clk = &ldb_di##id##_clk, \
- .reg = CS2CDR, \
- .bp = BP_CS2CDR_LDB_DI##id##_SEL, \
- .bm = BM_CS2CDR_LDB_DI##id##_SEL, \
- .parents = { \
- &pll5_video, \
- &pll2_pfd_352m, \
- &pll2_pfd_400m, \
- &pll3_pfd_540m, \
- &pll3_usb_otg, \
- NULL \
- }, \
- }
-
-DEF_LDB_DI_MUX(0);
-DEF_LDB_DI_MUX(1);
-
-static struct multiplexer enfc_mux = {
- .clk = &enfc_clk,
- .reg = CS2CDR,
- .bp = BP_CS2CDR_ENFC_SEL,
- .bm = BM_CS2CDR_ENFC_SEL,
- .parents = {
- &pll2_pfd_352m,
- &pll2_bus,
- &pll3_usb_otg,
- &pll2_pfd_400m,
- NULL
- },
-};
-
-static struct multiplexer spdif_mux = {
- .clk = &spdif_clk,
- .reg = CDCDR,
- .bp = BP_CDCDR_SPDIF_SEL,
- .bm = BM_CDCDR_SPDIF_SEL,
- .parents = {
- &pll4_audio,
- &pll3_pfd_508m,
- &pll3_pfd_454m,
- &pll3_usb_otg,
- NULL
- },
-};
-
-static struct multiplexer asrc_serial_mux = {
- .clk = &asrc_serial_clk,
- .reg = CDCDR,
- .bp = BP_CDCDR_ASRC_SERIAL_SEL,
- .bm = BM_CDCDR_ASRC_SERIAL_SEL,
- .parents = {
- &pll4_audio,
- &pll3_pfd_508m,
- &pll3_pfd_454m,
- &pll3_usb_otg,
- NULL
- },
-};
-
-static struct multiplexer hsi_tx_mux = {
- .clk = &hsi_tx_clk,
- .reg = CDCDR,
- .bp = BP_CDCDR_HSI_TX_SEL,
- .bm = BM_CDCDR_HSI_TX_SEL,
- .parents = {
- &pll3_120m,
- &pll2_pfd_400m,
- NULL
- },
-};
-
-#define DEF_IPU_DI_PRE_MUX(r, i, d) \
- static struct multiplexer ipu##i##_di##d##_pre_mux = { \
- .clk = &ipu##i##_di##d##_pre_clk, \
- .reg = r, \
- .bp = BP_##r##_IPU##i##_DI##d##_PRE_SEL, \
- .bm = BM_##r##_IPU##i##_DI##d##_PRE_SEL, \
- .parents = { \
- &mmdc_ch0_axi_clk, \
- &pll3_usb_otg, \
- &pll5_video, \
- &pll2_pfd_352m, \
- &pll2_pfd_400m, \
- &pll3_pfd_540m, \
- NULL \
- }, \
- }
-
-DEF_IPU_DI_PRE_MUX(CHSCCDR, 1, 0);
-DEF_IPU_DI_PRE_MUX(CHSCCDR, 1, 1);
-DEF_IPU_DI_PRE_MUX(CSCDR2, 2, 0);
-DEF_IPU_DI_PRE_MUX(CSCDR2, 2, 1);
-
-#define DEF_IPU_DI_MUX(r, i, d) \
- static struct multiplexer ipu##i##_di##d##_mux = { \
- .clk = &ipu##i##_di##d##_clk, \
- .reg = r, \
- .bp = BP_##r##_IPU##i##_DI##d##_SEL, \
- .bm = BM_##r##_IPU##i##_DI##d##_SEL, \
- .parents = { \
- &ipu##i##_di##d##_pre_clk, \
- &dummy_clk, \
- &dummy_clk, \
- &ldb_di0_clk, \
- &ldb_di1_clk, \
- NULL \
- }, \
- }
-
-DEF_IPU_DI_MUX(CHSCCDR, 1, 0);
-DEF_IPU_DI_MUX(CHSCCDR, 1, 1);
-DEF_IPU_DI_MUX(CSCDR2, 2, 0);
-DEF_IPU_DI_MUX(CSCDR2, 2, 1);
-
-#define DEF_IPU_MUX(id) \
- static struct multiplexer ipu##id##_mux = { \
- .clk = &ipu##id##_clk, \
- .reg = CSCDR3, \
- .bp = BP_CSCDR3_IPU##id##_HSP_SEL, \
- .bm = BM_CSCDR3_IPU##id##_HSP_SEL, \
- .parents = { \
- &mmdc_ch0_axi_clk, \
- &pll2_pfd_400m, \
- &pll3_120m, \
- &pll3_pfd_540m, \
- NULL \
- }, \
- }
-
-DEF_IPU_MUX(1);
-DEF_IPU_MUX(2);
-
-static struct multiplexer cko1_mux = {
- .clk = &cko1_clk,
- .reg = CCOSR,
- .bp = BP_CCOSR_CKO1_SEL,
- .bm = BM_CCOSR_CKO1_SEL,
- .parents = {
- &pll3_usb_otg,
- &pll2_bus,
- &pll1_sys,
- &pll5_video,
- &dummy_clk,
- &axi_clk,
- &enfc_clk,
- &ipu1_di0_clk,
- &ipu1_di1_clk,
- &ipu2_di0_clk,
- &ipu2_di1_clk,
- &ahb_clk,
- &ipg_clk,
- &ipg_perclk,
- &ckil_clk,
- &pll4_audio,
- NULL
- },
-};
-
-static struct multiplexer *multiplexers[] = {
- &axi_mux,
- &periph_mux,
- &periph_pre_mux,
- &periph_clk2_mux,
- &periph2_mux,
- &periph2_pre_mux,
- &periph2_clk2_mux,
- &gpu2d_axi_mux,
- &gpu3d_axi_mux,
- &gpu3d_core_mux,
- &gpu3d_shader_mux,
- &pcie_axi_mux,
- &vdo_axi_mux,
- &vpu_axi_mux,
- &gpu2d_core_mux,
- &ssi1_mux,
- &ssi2_mux,
- &ssi3_mux,
- &usdhc1_mux,
- &usdhc2_mux,
- &usdhc3_mux,
- &usdhc4_mux,
- &emi_mux,
- &emi_slow_mux,
- &esai_mux,
- &ldb_di0_mux,
- &ldb_di1_mux,
- &enfc_mux,
- &spdif_mux,
- &asrc_serial_mux,
- &hsi_tx_mux,
- &ipu1_di0_pre_mux,
- &ipu1_di0_mux,
- &ipu1_di1_pre_mux,
- &ipu1_di1_mux,
- &ipu2_di0_pre_mux,
- &ipu2_di0_mux,
- &ipu2_di1_pre_mux,
- &ipu2_di1_mux,
- &ipu1_mux,
- &ipu2_mux,
- &cko1_mux,
-};
-
-static int _clk_set_parent(struct clk *clk, struct clk *parent)
-{
- struct multiplexer *m;
- int i, num;
- u32 val;
-
- num = ARRAY_SIZE(multiplexers);
- for (i = 0; i < num; i++)
- if (multiplexers[i]->clk == clk) {
- m = multiplexers[i];
- break;
- }
- if (i == num)
- return -EINVAL;
-
- i = 0;
- while (m->parents[i]) {
- if (parent == m->parents[i])
- break;
- i++;
- }
- if (!m->parents[i] || m->parents[i] == &dummy_clk)
- return -EINVAL;
-
- val = readl_relaxed(m->reg);
- val &= ~m->bm;
- val |= i << m->bp;
- writel_relaxed(val, m->reg);
-
- if (clk == &periph_clk)
- return clk_busy_wait(clk);
-
- return 0;
-}
-
-#define DEF_NG_CLK(name, p) \
- static struct clk name = { \
- .get_rate = _clk_get_rate, \
- .set_rate = _clk_set_rate, \
- .round_rate = _clk_round_rate, \
- .set_parent = _clk_set_parent, \
- .parent = p, \
- }
-
-DEF_NG_CLK(periph_clk2_clk, &osc_clk);
-DEF_NG_CLK(periph_pre_clk, &pll2_bus);
-DEF_NG_CLK(periph_clk, &periph_pre_clk);
-DEF_NG_CLK(periph2_clk2_clk, &osc_clk);
-DEF_NG_CLK(periph2_pre_clk, &pll2_bus);
-DEF_NG_CLK(periph2_clk, &periph2_pre_clk);
-DEF_NG_CLK(axi_clk, &periph_clk);
-DEF_NG_CLK(emi_clk, &axi_clk);
-DEF_NG_CLK(arm_clk, &pll1_sw_clk);
-DEF_NG_CLK(ahb_clk, &periph_clk);
-DEF_NG_CLK(ipg_clk, &ahb_clk);
-DEF_NG_CLK(ipg_perclk, &ipg_clk);
-DEF_NG_CLK(ipu1_di0_pre_clk, &pll3_pfd_540m);
-DEF_NG_CLK(ipu1_di1_pre_clk, &pll3_pfd_540m);
-DEF_NG_CLK(ipu2_di0_pre_clk, &pll3_pfd_540m);
-DEF_NG_CLK(ipu2_di1_pre_clk, &pll3_pfd_540m);
-DEF_NG_CLK(asrc_serial_clk, &pll3_usb_otg);
-
-#define DEF_CLK(name, er, es, p, s) \
- static struct clk name = { \
- .enable_reg = er, \
- .enable_shift = es, \
- .enable = _clk_enable, \
- .disable = _clk_disable, \
- .get_rate = _clk_get_rate, \
- .set_rate = _clk_set_rate, \
- .round_rate = _clk_round_rate, \
- .set_parent = _clk_set_parent, \
- .parent = p, \
- .secondary = s, \
- }
-
-#define DEF_CLK_1B(name, er, es, p, s) \
- static struct clk name = { \
- .enable_reg = er, \
- .enable_shift = es, \
- .enable = _clk_enable_1b, \
- .disable = _clk_disable_1b, \
- .get_rate = _clk_get_rate, \
- .set_rate = _clk_set_rate, \
- .round_rate = _clk_round_rate, \
- .set_parent = _clk_set_parent, \
- .parent = p, \
- .secondary = s, \
- }
-
-DEF_CLK(aips_tz1_clk, CCGR0, CG0, &ahb_clk, NULL);
-DEF_CLK(aips_tz2_clk, CCGR0, CG1, &ahb_clk, NULL);
-DEF_CLK(apbh_dma_clk, CCGR0, CG2, &ahb_clk, NULL);
-DEF_CLK(asrc_clk, CCGR0, CG3, &pll4_audio, NULL);
-DEF_CLK(can1_serial_clk, CCGR0, CG8, &pll3_usb_otg, NULL);
-DEF_CLK(can1_clk, CCGR0, CG7, &pll3_usb_otg, &can1_serial_clk);
-DEF_CLK(can2_serial_clk, CCGR0, CG10, &pll3_usb_otg, NULL);
-DEF_CLK(can2_clk, CCGR0, CG9, &pll3_usb_otg, &can2_serial_clk);
-DEF_CLK(ecspi1_clk, CCGR1, CG0, &pll3_60m, NULL);
-DEF_CLK(ecspi2_clk, CCGR1, CG1, &pll3_60m, NULL);
-DEF_CLK(ecspi3_clk, CCGR1, CG2, &pll3_60m, NULL);
-DEF_CLK(ecspi4_clk, CCGR1, CG3, &pll3_60m, NULL);
-DEF_CLK(ecspi5_clk, CCGR1, CG4, &pll3_60m, NULL);
-DEF_CLK(enet_clk, CCGR1, CG5, &ipg_clk, NULL);
-DEF_CLK(esai_clk, CCGR1, CG8, &pll3_usb_otg, NULL);
-DEF_CLK(gpt_serial_clk, CCGR1, CG11, &ipg_perclk, NULL);
-DEF_CLK(gpt_clk, CCGR1, CG10, &ipg_perclk, &gpt_serial_clk);
-DEF_CLK(gpu2d_core_clk, CCGR1, CG12, &pll2_pfd_352m, &gpu2d_axi_clk);
-DEF_CLK(gpu3d_core_clk, CCGR1, CG13, &pll2_pfd_594m, &gpu3d_axi_clk);
-DEF_CLK(gpu3d_shader_clk, CCGR1, CG13, &pll3_pfd_720m, &gpu3d_axi_clk);
-DEF_CLK(hdmi_iahb_clk, CCGR2, CG0, &ahb_clk, NULL);
-DEF_CLK(hdmi_isfr_clk, CCGR2, CG2, &pll3_pfd_540m, &hdmi_iahb_clk);
-DEF_CLK(i2c1_clk, CCGR2, CG3, &ipg_perclk, NULL);
-DEF_CLK(i2c2_clk, CCGR2, CG4, &ipg_perclk, NULL);
-DEF_CLK(i2c3_clk, CCGR2, CG5, &ipg_perclk, NULL);
-DEF_CLK(iim_clk, CCGR2, CG6, &ipg_clk, NULL);
-DEF_CLK(enfc_clk, CCGR2, CG7, &pll2_pfd_352m, NULL);
-DEF_CLK(ipu1_clk, CCGR3, CG0, &mmdc_ch0_axi_clk, NULL);
-DEF_CLK(ipu1_di0_clk, CCGR3, CG1, &ipu1_di0_pre_clk, NULL);
-DEF_CLK(ipu1_di1_clk, CCGR3, CG2, &ipu1_di1_pre_clk, NULL);
-DEF_CLK(ipu2_clk, CCGR3, CG3, &mmdc_ch0_axi_clk, NULL);
-DEF_CLK(ipu2_di0_clk, CCGR3, CG4, &ipu2_di0_pre_clk, NULL);
-DEF_CLK(ipu2_di1_clk, CCGR3, CG5, &ipu2_di1_pre_clk, NULL);
-DEF_CLK(ldb_di0_clk, CCGR3, CG6, &pll3_pfd_540m, NULL);
-DEF_CLK(ldb_di1_clk, CCGR3, CG7, &pll3_pfd_540m, NULL);
-DEF_CLK(hsi_tx_clk, CCGR3, CG8, &pll2_pfd_400m, NULL);
-DEF_CLK(mlb_clk, CCGR3, CG9, &pll6_mlb, NULL);
-DEF_CLK(mmdc_ch0_ipg_clk, CCGR3, CG12, &ipg_clk, NULL);
-DEF_CLK(mmdc_ch0_axi_clk, CCGR3, CG10, &periph_clk, &mmdc_ch0_ipg_clk);
-DEF_CLK(mmdc_ch1_ipg_clk, CCGR3, CG13, &ipg_clk, NULL);
-DEF_CLK(mmdc_ch1_axi_clk, CCGR3, CG11, &periph2_clk, &mmdc_ch1_ipg_clk);
-DEF_CLK(openvg_axi_clk, CCGR3, CG13, &axi_clk, NULL);
-DEF_CLK(pwm1_clk, CCGR4, CG8, &ipg_perclk, NULL);
-DEF_CLK(pwm2_clk, CCGR4, CG9, &ipg_perclk, NULL);
-DEF_CLK(pwm3_clk, CCGR4, CG10, &ipg_perclk, NULL);
-DEF_CLK(pwm4_clk, CCGR4, CG11, &ipg_perclk, NULL);
-DEF_CLK(gpmi_bch_apb_clk, CCGR4, CG12, &usdhc3_clk, NULL);
-DEF_CLK(gpmi_bch_clk, CCGR4, CG13, &usdhc4_clk, &gpmi_bch_apb_clk);
-DEF_CLK(gpmi_apb_clk, CCGR4, CG15, &usdhc3_clk, &gpmi_bch_clk);
-DEF_CLK(gpmi_io_clk, CCGR4, CG14, &enfc_clk, &gpmi_apb_clk);
-DEF_CLK(sdma_clk, CCGR5, CG3, &ahb_clk, NULL);
-DEF_CLK(spba_clk, CCGR5, CG6, &ipg_clk, NULL);
-DEF_CLK(spdif_clk, CCGR5, CG7, &pll3_usb_otg, &spba_clk);
-DEF_CLK(ssi1_clk, CCGR5, CG9, &pll3_pfd_508m, NULL);
-DEF_CLK(ssi2_clk, CCGR5, CG10, &pll3_pfd_508m, NULL);
-DEF_CLK(ssi3_clk, CCGR5, CG11, &pll3_pfd_508m, NULL);
-DEF_CLK(uart_serial_clk, CCGR5, CG13, &pll3_usb_otg, NULL);
-DEF_CLK(uart_clk, CCGR5, CG12, &pll3_80m, &uart_serial_clk);
-DEF_CLK(usboh3_clk, CCGR6, CG0, &ipg_clk, NULL);
-DEF_CLK(usdhc1_clk, CCGR6, CG1, &pll2_pfd_400m, NULL);
-DEF_CLK(usdhc2_clk, CCGR6, CG2, &pll2_pfd_400m, NULL);
-DEF_CLK(usdhc3_clk, CCGR6, CG3, &pll2_pfd_400m, NULL);
-DEF_CLK(usdhc4_clk, CCGR6, CG4, &pll2_pfd_400m, NULL);
-DEF_CLK(emi_slow_clk, CCGR6, CG5, &axi_clk, NULL);
-DEF_CLK(vdo_axi_clk, CCGR6, CG6, &axi_clk, NULL);
-DEF_CLK(vpu_clk, CCGR6, CG7, &axi_clk, NULL);
-DEF_CLK_1B(cko1_clk, CCOSR, BP_CCOSR_CKO1_EN, &pll2_bus, NULL);
-
-static int pcie_clk_enable(struct clk *clk)
-{
- u32 val;
-
- val = readl_relaxed(PLL8_ENET);
- val |= BM_PLL_ENET_EN_PCIE;
- writel_relaxed(val, PLL8_ENET);
-
- return _clk_enable(clk);
-}
-
-static void pcie_clk_disable(struct clk *clk)
-{
- u32 val;
-
- _clk_disable(clk);
-
- val = readl_relaxed(PLL8_ENET);
- val &= BM_PLL_ENET_EN_PCIE;
- writel_relaxed(val, PLL8_ENET);
-}
-
-static struct clk pcie_clk = {
- .enable_reg = CCGR4,
- .enable_shift = CG0,
- .enable = pcie_clk_enable,
- .disable = pcie_clk_disable,
- .set_parent = _clk_set_parent,
- .parent = &axi_clk,
- .secondary = &pll8_enet,
-};
-
-static int sata_clk_enable(struct clk *clk)
-{
- u32 val;
-
- val = readl_relaxed(PLL8_ENET);
- val |= BM_PLL_ENET_EN_SATA;
- writel_relaxed(val, PLL8_ENET);
-
- return _clk_enable(clk);
-}
-
-static void sata_clk_disable(struct clk *clk)
-{
- u32 val;
-
- _clk_disable(clk);
-
- val = readl_relaxed(PLL8_ENET);
- val &= BM_PLL_ENET_EN_SATA;
- writel_relaxed(val, PLL8_ENET);
-}
-
-static struct clk sata_clk = {
- .enable_reg = CCGR5,
- .enable_shift = CG2,
- .enable = sata_clk_enable,
- .disable = sata_clk_disable,
- .parent = &ipg_clk,
- .secondary = &pll8_enet,
-};
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- }
-
-static struct clk_lookup lookups[] = {
- _REGISTER_CLOCK("2020000.uart", NULL, uart_clk),
- _REGISTER_CLOCK("21e8000.uart", NULL, uart_clk),
- _REGISTER_CLOCK("21ec000.uart", NULL, uart_clk),
- _REGISTER_CLOCK("21f0000.uart", NULL, uart_clk),
- _REGISTER_CLOCK("21f4000.uart", NULL, uart_clk),
- _REGISTER_CLOCK("2188000.enet", NULL, enet_clk),
- _REGISTER_CLOCK("2190000.usdhc", NULL, usdhc1_clk),
- _REGISTER_CLOCK("2194000.usdhc", NULL, usdhc2_clk),
- _REGISTER_CLOCK("2198000.usdhc", NULL, usdhc3_clk),
- _REGISTER_CLOCK("219c000.usdhc", NULL, usdhc4_clk),
- _REGISTER_CLOCK("21a0000.i2c", NULL, i2c1_clk),
- _REGISTER_CLOCK("21a4000.i2c", NULL, i2c2_clk),
- _REGISTER_CLOCK("21a8000.i2c", NULL, i2c3_clk),
- _REGISTER_CLOCK("2008000.ecspi", NULL, ecspi1_clk),
- _REGISTER_CLOCK("200c000.ecspi", NULL, ecspi2_clk),
- _REGISTER_CLOCK("2010000.ecspi", NULL, ecspi3_clk),
- _REGISTER_CLOCK("2014000.ecspi", NULL, ecspi4_clk),
- _REGISTER_CLOCK("2018000.ecspi", NULL, ecspi5_clk),
- _REGISTER_CLOCK("20ec000.sdma", NULL, sdma_clk),
- _REGISTER_CLOCK("20bc000.wdog", NULL, dummy_clk),
- _REGISTER_CLOCK("20c0000.wdog", NULL, dummy_clk),
- _REGISTER_CLOCK("smp_twd", NULL, twd_clk),
- _REGISTER_CLOCK(NULL, "ckih", ckih_clk),
- _REGISTER_CLOCK(NULL, "ckil_clk", ckil_clk),
- _REGISTER_CLOCK(NULL, "aips_tz1_clk", aips_tz1_clk),
- _REGISTER_CLOCK(NULL, "aips_tz2_clk", aips_tz2_clk),
- _REGISTER_CLOCK(NULL, "asrc_clk", asrc_clk),
- _REGISTER_CLOCK(NULL, "can2_clk", can2_clk),
- _REGISTER_CLOCK(NULL, "hdmi_isfr_clk", hdmi_isfr_clk),
- _REGISTER_CLOCK(NULL, "iim_clk", iim_clk),
- _REGISTER_CLOCK(NULL, "mlb_clk", mlb_clk),
- _REGISTER_CLOCK(NULL, "openvg_axi_clk", openvg_axi_clk),
- _REGISTER_CLOCK(NULL, "pwm1_clk", pwm1_clk),
- _REGISTER_CLOCK(NULL, "pwm2_clk", pwm2_clk),
- _REGISTER_CLOCK(NULL, "pwm3_clk", pwm3_clk),
- _REGISTER_CLOCK(NULL, "pwm4_clk", pwm4_clk),
- _REGISTER_CLOCK(NULL, "gpmi_io_clk", gpmi_io_clk),
- _REGISTER_CLOCK(NULL, "usboh3_clk", usboh3_clk),
- _REGISTER_CLOCK(NULL, "sata_clk", sata_clk),
- _REGISTER_CLOCK(NULL, "cko1_clk", cko1_clk),
-};
-
-int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
-{
- u32 val = readl_relaxed(CLPCR);
-
- val &= ~BM_CLPCR_LPM;
- switch (mode) {
- case WAIT_CLOCKED:
- break;
- case WAIT_UNCLOCKED:
- val |= 0x1 << BP_CLPCR_LPM;
- break;
- case STOP_POWER_ON:
- val |= 0x2 << BP_CLPCR_LPM;
- break;
- case WAIT_UNCLOCKED_POWER_OFF:
- val |= 0x1 << BP_CLPCR_LPM;
- val &= ~BM_CLPCR_VSTBY;
- val &= ~BM_CLPCR_SBYOS;
- break;
- case STOP_POWER_OFF:
- val |= 0x2 << BP_CLPCR_LPM;
- val |= 0x3 << BP_CLPCR_STBY_COUNT;
- val |= BM_CLPCR_VSTBY;
- val |= BM_CLPCR_SBYOS;
- break;
- default:
- return -EINVAL;
- }
- writel_relaxed(val, CLPCR);
-
- return 0;
-}
-
-static struct map_desc imx6q_clock_desc[] = {
- imx_map_entry(MX6Q, CCM, MT_DEVICE),
- imx_map_entry(MX6Q, ANATOP, MT_DEVICE),
-};
-
-void __init imx6q_clock_map_io(void)
-{
- iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
-}
-
-int __init mx6q_clocks_init(void)
-{
- struct device_node *np;
- void __iomem *base;
- int i, irq;
-
- /* retrieve the freqency of fixed clocks from device tree */
- for_each_compatible_node(np, NULL, "fixed-clock") {
- u32 rate;
- if (of_property_read_u32(np, "clock-frequency", &rate))
- continue;
-
- if (of_device_is_compatible(np, "fsl,imx-ckil"))
- external_low_reference = rate;
- else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
- external_high_reference = rate;
- else if (of_device_is_compatible(np, "fsl,imx-osc"))
- oscillator_reference = rate;
- }
-
- for (i = 0; i < ARRAY_SIZE(lookups); i++)
- clkdev_add(&lookups[i]);
-
- /* only keep necessary clocks on */
- writel_relaxed(0x3 << CG0 | 0x3 << CG1 | 0x3 << CG2, CCGR0);
- writel_relaxed(0x3 << CG8 | 0x3 << CG9 | 0x3 << CG10, CCGR2);
- writel_relaxed(0x3 << CG10 | 0x3 << CG12, CCGR3);
- writel_relaxed(0x3 << CG4 | 0x3 << CG6 | 0x3 << CG7, CCGR4);
- writel_relaxed(0x3 << CG0, CCGR5);
- writel_relaxed(0, CCGR6);
- writel_relaxed(0, CCGR7);
-
- clk_enable(&uart_clk);
- clk_enable(&mmdc_ch0_axi_clk);
-
- clk_set_rate(&pll4_audio, FREQ_650M);
- clk_set_rate(&pll5_video, FREQ_650M);
- clk_set_parent(&ipu1_di0_clk, &ipu1_di0_pre_clk);
- clk_set_parent(&ipu1_di0_pre_clk, &pll5_video);
- clk_set_parent(&gpu3d_shader_clk, &pll2_pfd_594m);
- clk_set_rate(&gpu3d_shader_clk, FREQ_594M);
- clk_set_parent(&gpu3d_core_clk, &mmdc_ch0_axi_clk);
- clk_set_rate(&gpu3d_core_clk, FREQ_528M);
- clk_set_parent(&asrc_serial_clk, &pll3_usb_otg);
- clk_set_rate(&asrc_serial_clk, 1500000);
- clk_set_rate(&enfc_clk, 11000000);
-
- /*
- * Before pinctrl API is available, we have to rely on the pad
- * configuration set up by bootloader. For usdhc example here,
- * u-boot sets up the pads for 49.5 MHz case, and we have to lower
- * the usdhc clock from 198 to 49.5 MHz to match the pad configuration.
- *
- * FIXME: This is should be removed after pinctrl API is available.
- * At that time, usdhc driver can call pinctrl API to change pad
- * configuration dynamically per different usdhc clock settings.
- */
- clk_set_rate(&usdhc1_clk, 49500000);
- clk_set_rate(&usdhc2_clk, 49500000);
- clk_set_rate(&usdhc3_clk, 49500000);
- clk_set_rate(&usdhc4_clk, 49500000);
-
- clk_set_parent(&cko1_clk, &ahb_clk);
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
- base = of_iomap(np, 0);
- WARN_ON(!base);
- irq = irq_of_parse_and_map(np, 0);
- mxc_timer_init(&gpt_clk, base, irq);
-
- return 0;
-}
diff --git a/arch/arm/mach-imx/clock-mx51-mx53.c b/arch/arm/mach-imx/clock-mx51-mx53.c
deleted file mode 100644
index 08470504a088..000000000000
--- a/arch/arm/mach-imx/clock-mx51-mx53.c
+++ /dev/null
@@ -1,1675 +0,0 @@
-/*
- * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright (C) 2009-2010 Amit Kucheria <amit.kucheria@canonical.com>
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-#include <linux/of.h>
-
-#include <asm/div64.h>
-
-#include <mach/hardware.h>
-#include <mach/common.h>
-#include <mach/clock.h>
-
-#include "crm-regs-imx5.h"
-
-/* External clock values passed-in by the board code */
-static unsigned long external_high_reference, external_low_reference;
-static unsigned long oscillator_reference, ckih2_reference;
-
-static struct clk osc_clk;
-static struct clk pll1_main_clk;
-static struct clk pll1_sw_clk;
-static struct clk pll2_sw_clk;
-static struct clk pll3_sw_clk;
-static struct clk mx53_pll4_sw_clk;
-static struct clk lp_apm_clk;
-static struct clk periph_apm_clk;
-static struct clk ahb_clk;
-static struct clk ipg_clk;
-static struct clk usboh3_clk;
-static struct clk emi_fast_clk;
-static struct clk ipu_clk;
-static struct clk mipi_hsc1_clk;
-static struct clk esdhc1_clk;
-static struct clk esdhc2_clk;
-static struct clk esdhc3_mx53_clk;
-
-#define MAX_DPLL_WAIT_TRIES 1000 /* 1000 * udelay(1) = 1ms */
-
-/* calculate best pre and post dividers to get the required divider */
-static void __calc_pre_post_dividers(u32 div, u32 *pre, u32 *post,
- u32 max_pre, u32 max_post)
-{
- if (div >= max_pre * max_post) {
- *pre = max_pre;
- *post = max_post;
- } else if (div >= max_pre) {
- u32 min_pre, temp_pre, old_err, err;
- min_pre = DIV_ROUND_UP(div, max_post);
- old_err = max_pre;
- for (temp_pre = max_pre; temp_pre >= min_pre; temp_pre--) {
- err = div % temp_pre;
- if (err == 0) {
- *pre = temp_pre;
- break;
- }
- err = temp_pre - err;
- if (err < old_err) {
- old_err = err;
- *pre = temp_pre;
- }
- }
- *post = DIV_ROUND_UP(div, *pre);
- } else {
- *pre = div;
- *post = 1;
- }
-}
-
-static void _clk_ccgr_setclk(struct clk *clk, unsigned mode)
-{
- u32 reg = __raw_readl(clk->enable_reg);
-
- reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
- reg |= mode << clk->enable_shift;
-
- __raw_writel(reg, clk->enable_reg);
-}
-
-static int _clk_ccgr_enable(struct clk *clk)
-{
- _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_ON);
- return 0;
-}
-
-static void _clk_ccgr_disable(struct clk *clk)
-{
- _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_OFF);
-}
-
-static int _clk_ccgr_enable_inrun(struct clk *clk)
-{
- _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_IDLE);
- return 0;
-}
-
-static void _clk_ccgr_disable_inwait(struct clk *clk)
-{
- _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_IDLE);
-}
-
-/*
- * For the 4-to-1 muxed input clock
- */
-static inline u32 _get_mux(struct clk *parent, struct clk *m0,
- struct clk *m1, struct clk *m2, struct clk *m3)
-{
- if (parent == m0)
- return 0;
- else if (parent == m1)
- return 1;
- else if (parent == m2)
- return 2;
- else if (parent == m3)
- return 3;
- else
- BUG();
-
- return -EINVAL;
-}
-
-static inline void __iomem *_mx51_get_pll_base(struct clk *pll)
-{
- if (pll == &pll1_main_clk)
- return MX51_DPLL1_BASE;
- else if (pll == &pll2_sw_clk)
- return MX51_DPLL2_BASE;
- else if (pll == &pll3_sw_clk)
- return MX51_DPLL3_BASE;
- else
- BUG();
-
- return NULL;
-}
-
-static inline void __iomem *_mx53_get_pll_base(struct clk *pll)
-{
- if (pll == &pll1_main_clk)
- return MX53_DPLL1_BASE;
- else if (pll == &pll2_sw_clk)
- return MX53_DPLL2_BASE;
- else if (pll == &pll3_sw_clk)
- return MX53_DPLL3_BASE;
- else if (pll == &mx53_pll4_sw_clk)
- return MX53_DPLL4_BASE;
- else
- BUG();
-
- return NULL;
-}
-
-static inline void __iomem *_get_pll_base(struct clk *pll)
-{
- if (cpu_is_mx51())
- return _mx51_get_pll_base(pll);
- else
- return _mx53_get_pll_base(pll);
-}
-
-static unsigned long clk_pll_get_rate(struct clk *clk)
-{
- long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
- unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl;
- void __iomem *pllbase;
- s64 temp;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- pllbase = _get_pll_base(clk);
-
- dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
- pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
- dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
-
- if (pll_hfsm == 0) {
- dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
- dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
- dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
- } else {
- dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP);
- dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD);
- dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN);
- }
- pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
- mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
- mfi = (mfi <= 5) ? 5 : mfi;
- mfd = dp_mfd & MXC_PLL_DP_MFD_MASK;
- mfn = mfn_abs = dp_mfn & MXC_PLL_DP_MFN_MASK;
- /* Sign extend to 32-bits */
- if (mfn >= 0x04000000) {
- mfn |= 0xFC000000;
- mfn_abs = -mfn;
- }
-
- ref_clk = 2 * parent_rate;
- if (dbl != 0)
- ref_clk *= 2;
-
- ref_clk /= (pdf + 1);
- temp = (u64) ref_clk * mfn_abs;
- do_div(temp, mfd + 1);
- if (mfn < 0)
- temp = -temp;
- temp = (ref_clk * mfi) + temp;
-
- return temp;
-}
-
-static int _clk_pll_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg;
- void __iomem *pllbase;
-
- long mfi, pdf, mfn, mfd = 999999;
- s64 temp64;
- unsigned long quad_parent_rate;
- unsigned long pll_hfsm, dp_ctl;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- pllbase = _get_pll_base(clk);
-
- quad_parent_rate = 4 * parent_rate;
- pdf = mfi = -1;
- while (++pdf < 16 && mfi < 5)
- mfi = rate * (pdf+1) / quad_parent_rate;
- if (mfi > 15)
- return -EINVAL;
- pdf--;
-
- temp64 = rate * (pdf+1) - quad_parent_rate * mfi;
- do_div(temp64, quad_parent_rate/1000000);
- mfn = (long)temp64;
-
- dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
- /* use dpdck0_2 */
- __raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
- pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
- if (pll_hfsm == 0) {
- reg = mfi << 4 | pdf;
- __raw_writel(reg, pllbase + MXC_PLL_DP_OP);
- __raw_writel(mfd, pllbase + MXC_PLL_DP_MFD);
- __raw_writel(mfn, pllbase + MXC_PLL_DP_MFN);
- } else {
- reg = mfi << 4 | pdf;
- __raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP);
- __raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD);
- __raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN);
- }
-
- return 0;
-}
-
-static int _clk_pll_enable(struct clk *clk)
-{
- u32 reg;
- void __iomem *pllbase;
- int i = 0;
-
- pllbase = _get_pll_base(clk);
- reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
- if (reg & MXC_PLL_DP_CTL_UPEN)
- return 0;
-
- reg |= MXC_PLL_DP_CTL_UPEN;
- __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
-
- /* Wait for lock */
- do {
- reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
- if (reg & MXC_PLL_DP_CTL_LRF)
- break;
-
- udelay(1);
- } while (++i < MAX_DPLL_WAIT_TRIES);
-
- if (i == MAX_DPLL_WAIT_TRIES) {
- pr_err("MX5: pll locking failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void _clk_pll_disable(struct clk *clk)
-{
- u32 reg;
- void __iomem *pllbase;
-
- pllbase = _get_pll_base(clk);
- reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) & ~MXC_PLL_DP_CTL_UPEN;
- __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
-}
-
-static int _clk_pll1_sw_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg, step;
-
- reg = __raw_readl(MXC_CCM_CCSR);
-
- /* When switching from pll_main_clk to a bypass clock, first select a
- * multiplexed clock in 'step_sel', then shift the glitchless mux
- * 'pll1_sw_clk_sel'.
- *
- * When switching back, do it in reverse order
- */
- if (parent == &pll1_main_clk) {
- /* Switch to pll1_main_clk */
- reg &= ~MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
- __raw_writel(reg, MXC_CCM_CCSR);
- /* step_clk mux switched to lp_apm, to save power. */
- reg = __raw_readl(MXC_CCM_CCSR);
- reg &= ~MXC_CCM_CCSR_STEP_SEL_MASK;
- reg |= (MXC_CCM_CCSR_STEP_SEL_LP_APM <<
- MXC_CCM_CCSR_STEP_SEL_OFFSET);
- } else {
- if (parent == &lp_apm_clk) {
- step = MXC_CCM_CCSR_STEP_SEL_LP_APM;
- } else if (parent == &pll2_sw_clk) {
- step = MXC_CCM_CCSR_STEP_SEL_PLL2_DIVIDED;
- } else if (parent == &pll3_sw_clk) {
- step = MXC_CCM_CCSR_STEP_SEL_PLL3_DIVIDED;
- } else
- return -EINVAL;
-
- reg &= ~MXC_CCM_CCSR_STEP_SEL_MASK;
- reg |= (step << MXC_CCM_CCSR_STEP_SEL_OFFSET);
-
- __raw_writel(reg, MXC_CCM_CCSR);
- /* Switch to step_clk */
- reg = __raw_readl(MXC_CCM_CCSR);
- reg |= MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
- }
- __raw_writel(reg, MXC_CCM_CCSR);
- return 0;
-}
-
-static unsigned long clk_pll1_sw_get_rate(struct clk *clk)
-{
- u32 reg, div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- reg = __raw_readl(MXC_CCM_CCSR);
-
- if (clk->parent == &pll2_sw_clk) {
- div = ((reg & MXC_CCM_CCSR_PLL2_PODF_MASK) >>
- MXC_CCM_CCSR_PLL2_PODF_OFFSET) + 1;
- } else if (clk->parent == &pll3_sw_clk) {
- div = ((reg & MXC_CCM_CCSR_PLL3_PODF_MASK) >>
- MXC_CCM_CCSR_PLL3_PODF_OFFSET) + 1;
- } else
- div = 1;
- return parent_rate / div;
-}
-
-static int _clk_pll2_sw_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CCSR);
-
- if (parent == &pll2_sw_clk)
- reg &= ~MXC_CCM_CCSR_PLL2_SW_CLK_SEL;
- else
- reg |= MXC_CCM_CCSR_PLL2_SW_CLK_SEL;
-
- __raw_writel(reg, MXC_CCM_CCSR);
- return 0;
-}
-
-static int _clk_lp_apm_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- if (parent == &osc_clk)
- reg = __raw_readl(MXC_CCM_CCSR) & ~MXC_CCM_CCSR_LP_APM_SEL;
- else
- return -EINVAL;
-
- __raw_writel(reg, MXC_CCM_CCSR);
-
- return 0;
-}
-
-static unsigned long clk_cpu_get_rate(struct clk *clk)
-{
- u32 cacrr, div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
- cacrr = __raw_readl(MXC_CCM_CACRR);
- div = (cacrr & MXC_CCM_CACRR_ARM_PODF_MASK) + 1;
-
- return parent_rate / div;
-}
-
-static int clk_cpu_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg, cpu_podf;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
- cpu_podf = parent_rate / rate - 1;
- /* use post divider to change freq */
- reg = __raw_readl(MXC_CCM_CACRR);
- reg &= ~MXC_CCM_CACRR_ARM_PODF_MASK;
- reg |= cpu_podf << MXC_CCM_CACRR_ARM_PODF_OFFSET;
- __raw_writel(reg, MXC_CCM_CACRR);
-
- return 0;
-}
-
-static int _clk_periph_apm_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg, mux;
- int i = 0;
-
- mux = _get_mux(parent, &pll1_sw_clk, &pll3_sw_clk, &lp_apm_clk, NULL);
-
- reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_PERIPH_CLK_SEL_MASK;
- reg |= mux << MXC_CCM_CBCMR_PERIPH_CLK_SEL_OFFSET;
- __raw_writel(reg, MXC_CCM_CBCMR);
-
- /* Wait for lock */
- do {
- reg = __raw_readl(MXC_CCM_CDHIPR);
- if (!(reg & MXC_CCM_CDHIPR_PERIPH_CLK_SEL_BUSY))
- break;
-
- udelay(1);
- } while (++i < MAX_DPLL_WAIT_TRIES);
-
- if (i == MAX_DPLL_WAIT_TRIES) {
- pr_err("MX5: Set parent for periph_apm clock failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int _clk_main_bus_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CBCDR);
-
- if (parent == &pll2_sw_clk)
- reg &= ~MXC_CCM_CBCDR_PERIPH_CLK_SEL;
- else if (parent == &periph_apm_clk)
- reg |= MXC_CCM_CBCDR_PERIPH_CLK_SEL;
- else
- return -EINVAL;
-
- __raw_writel(reg, MXC_CCM_CBCDR);
-
- return 0;
-}
-
-static struct clk main_bus_clk = {
- .parent = &pll2_sw_clk,
- .set_parent = _clk_main_bus_set_parent,
-};
-
-static unsigned long clk_ahb_get_rate(struct clk *clk)
-{
- u32 reg, div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- reg = __raw_readl(MXC_CCM_CBCDR);
- div = ((reg & MXC_CCM_CBCDR_AHB_PODF_MASK) >>
- MXC_CCM_CBCDR_AHB_PODF_OFFSET) + 1;
- return parent_rate / div;
-}
-
-
-static int _clk_ahb_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg, div;
- unsigned long parent_rate;
- int i = 0;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
- if (div > 8 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
-
- reg = __raw_readl(MXC_CCM_CBCDR);
- reg &= ~MXC_CCM_CBCDR_AHB_PODF_MASK;
- reg |= (div - 1) << MXC_CCM_CBCDR_AHB_PODF_OFFSET;
- __raw_writel(reg, MXC_CCM_CBCDR);
-
- /* Wait for lock */
- do {
- reg = __raw_readl(MXC_CCM_CDHIPR);
- if (!(reg & MXC_CCM_CDHIPR_AHB_PODF_BUSY))
- break;
-
- udelay(1);
- } while (++i < MAX_DPLL_WAIT_TRIES);
-
- if (i == MAX_DPLL_WAIT_TRIES) {
- pr_err("MX5: clk_ahb_set_rate failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static unsigned long _clk_ahb_round_rate(struct clk *clk,
- unsigned long rate)
-{
- u32 div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
- if (div > 8)
- div = 8;
- else if (div == 0)
- div++;
- return parent_rate / div;
-}
-
-
-static int _clk_max_enable(struct clk *clk)
-{
- u32 reg;
-
- _clk_ccgr_enable(clk);
-
- /* Handshake with MAX when LPM is entered. */
- reg = __raw_readl(MXC_CCM_CLPCR);
- if (cpu_is_mx51())
- reg &= ~MX51_CCM_CLPCR_BYPASS_MAX_LPM_HS;
- else if (cpu_is_mx53())
- reg &= ~MX53_CCM_CLPCR_BYPASS_MAX_LPM_HS;
- __raw_writel(reg, MXC_CCM_CLPCR);
-
- return 0;
-}
-
-static void _clk_max_disable(struct clk *clk)
-{
- u32 reg;
-
- _clk_ccgr_disable_inwait(clk);
-
- /* No Handshake with MAX when LPM is entered as its disabled. */
- reg = __raw_readl(MXC_CCM_CLPCR);
- if (cpu_is_mx51())
- reg |= MX51_CCM_CLPCR_BYPASS_MAX_LPM_HS;
- else if (cpu_is_mx53())
- reg &= ~MX53_CCM_CLPCR_BYPASS_MAX_LPM_HS;
- __raw_writel(reg, MXC_CCM_CLPCR);
-}
-
-static unsigned long clk_ipg_get_rate(struct clk *clk)
-{
- u32 reg, div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- reg = __raw_readl(MXC_CCM_CBCDR);
- div = ((reg & MXC_CCM_CBCDR_IPG_PODF_MASK) >>
- MXC_CCM_CBCDR_IPG_PODF_OFFSET) + 1;
-
- return parent_rate / div;
-}
-
-static unsigned long clk_ipg_per_get_rate(struct clk *clk)
-{
- u32 reg, prediv1, prediv2, podf;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (clk->parent == &main_bus_clk || clk->parent == &lp_apm_clk) {
- /* the main_bus_clk is the one before the DVFS engine */
- reg = __raw_readl(MXC_CCM_CBCDR);
- prediv1 = ((reg & MXC_CCM_CBCDR_PERCLK_PRED1_MASK) >>
- MXC_CCM_CBCDR_PERCLK_PRED1_OFFSET) + 1;
- prediv2 = ((reg & MXC_CCM_CBCDR_PERCLK_PRED2_MASK) >>
- MXC_CCM_CBCDR_PERCLK_PRED2_OFFSET) + 1;
- podf = ((reg & MXC_CCM_CBCDR_PERCLK_PODF_MASK) >>
- MXC_CCM_CBCDR_PERCLK_PODF_OFFSET) + 1;
- return parent_rate / (prediv1 * prediv2 * podf);
- } else if (clk->parent == &ipg_clk)
- return parent_rate;
- else
- BUG();
-}
-
-static int _clk_ipg_per_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CBCMR);
-
- reg &= ~MXC_CCM_CBCMR_PERCLK_LP_APM_CLK_SEL;
- reg &= ~MXC_CCM_CBCMR_PERCLK_IPG_CLK_SEL;
-
- if (parent == &ipg_clk)
- reg |= MXC_CCM_CBCMR_PERCLK_IPG_CLK_SEL;
- else if (parent == &lp_apm_clk)
- reg |= MXC_CCM_CBCMR_PERCLK_LP_APM_CLK_SEL;
- else if (parent != &main_bus_clk)
- return -EINVAL;
-
- __raw_writel(reg, MXC_CCM_CBCMR);
-
- return 0;
-}
-
-#define clk_nfc_set_parent NULL
-
-static unsigned long clk_nfc_get_rate(struct clk *clk)
-{
- unsigned long rate;
- u32 reg, div;
-
- reg = __raw_readl(MXC_CCM_CBCDR);
- div = ((reg & MXC_CCM_CBCDR_NFC_PODF_MASK) >>
- MXC_CCM_CBCDR_NFC_PODF_OFFSET) + 1;
- rate = clk_get_rate(clk->parent) / div;
- WARN_ON(rate == 0);
- return rate;
-}
-
-static unsigned long clk_nfc_round_rate(struct clk *clk,
- unsigned long rate)
-{
- u32 div;
- unsigned long parent_rate = clk_get_rate(clk->parent);
-
- if (!rate)
- return -EINVAL;
-
- div = parent_rate / rate;
-
- if (parent_rate % rate)
- div++;
-
- if (div > 8)
- return -EINVAL;
-
- return parent_rate / div;
-
-}
-
-static int clk_nfc_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg, div;
-
- div = clk_get_rate(clk->parent) / rate;
- if (div == 0)
- div++;
- if (((clk_get_rate(clk->parent) / div) != rate) || (div > 8))
- return -EINVAL;
-
- reg = __raw_readl(MXC_CCM_CBCDR);
- reg &= ~MXC_CCM_CBCDR_NFC_PODF_MASK;
- reg |= (div - 1) << MXC_CCM_CBCDR_NFC_PODF_OFFSET;
- __raw_writel(reg, MXC_CCM_CBCDR);
-
- while (__raw_readl(MXC_CCM_CDHIPR) &
- MXC_CCM_CDHIPR_NFC_IPG_INT_MEM_PODF_BUSY){
- }
-
- return 0;
-}
-
-static unsigned long get_high_reference_clock_rate(struct clk *clk)
-{
- return external_high_reference;
-}
-
-static unsigned long get_low_reference_clock_rate(struct clk *clk)
-{
- return external_low_reference;
-}
-
-static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
-{
- return oscillator_reference;
-}
-
-static unsigned long get_ckih2_reference_clock_rate(struct clk *clk)
-{
- return ckih2_reference;
-}
-
-static unsigned long clk_emi_slow_get_rate(struct clk *clk)
-{
- u32 reg, div;
-
- reg = __raw_readl(MXC_CCM_CBCDR);
- div = ((reg & MXC_CCM_CBCDR_EMI_PODF_MASK) >>
- MXC_CCM_CBCDR_EMI_PODF_OFFSET) + 1;
-
- return clk_get_rate(clk->parent) / div;
-}
-
-static unsigned long _clk_ddr_hf_get_rate(struct clk *clk)
-{
- unsigned long rate;
- u32 reg, div;
-
- reg = __raw_readl(MXC_CCM_CBCDR);
- div = ((reg & MXC_CCM_CBCDR_DDR_PODF_MASK) >>
- MXC_CCM_CBCDR_DDR_PODF_OFFSET) + 1;
- rate = clk_get_rate(clk->parent) / div;
-
- return rate;
-}
-
-/* External high frequency clock */
-static struct clk ckih_clk = {
- .get_rate = get_high_reference_clock_rate,
-};
-
-static struct clk ckih2_clk = {
- .get_rate = get_ckih2_reference_clock_rate,
-};
-
-static struct clk osc_clk = {
- .get_rate = get_oscillator_reference_clock_rate,
-};
-
-/* External low frequency (32kHz) clock */
-static struct clk ckil_clk = {
- .get_rate = get_low_reference_clock_rate,
-};
-
-static struct clk pll1_main_clk = {
- .parent = &osc_clk,
- .get_rate = clk_pll_get_rate,
- .enable = _clk_pll_enable,
- .disable = _clk_pll_disable,
-};
-
-/* Clock tree block diagram (WIP):
- * CCM: Clock Controller Module
- *
- * PLL output -> |
- * | CCM Switcher -> CCM_CLK_ROOT_GEN ->
- * PLL bypass -> |
- *
- */
-
-/* PLL1 SW supplies to ARM core */
-static struct clk pll1_sw_clk = {
- .parent = &pll1_main_clk,
- .set_parent = _clk_pll1_sw_set_parent,
- .get_rate = clk_pll1_sw_get_rate,
-};
-
-/* PLL2 SW supplies to AXI/AHB/IP buses */
-static struct clk pll2_sw_clk = {
- .parent = &osc_clk,
- .get_rate = clk_pll_get_rate,
- .set_rate = _clk_pll_set_rate,
- .set_parent = _clk_pll2_sw_set_parent,
- .enable = _clk_pll_enable,
- .disable = _clk_pll_disable,
-};
-
-/* PLL3 SW supplies to serial clocks like USB, SSI, etc. */
-static struct clk pll3_sw_clk = {
- .parent = &osc_clk,
- .set_rate = _clk_pll_set_rate,
- .get_rate = clk_pll_get_rate,
- .enable = _clk_pll_enable,
- .disable = _clk_pll_disable,
-};
-
-/* PLL4 SW supplies to LVDS Display Bridge(LDB) */
-static struct clk mx53_pll4_sw_clk = {
- .parent = &osc_clk,
- .set_rate = _clk_pll_set_rate,
- .enable = _clk_pll_enable,
- .disable = _clk_pll_disable,
-};
-
-/* Low-power Audio Playback Mode clock */
-static struct clk lp_apm_clk = {
- .parent = &osc_clk,
- .set_parent = _clk_lp_apm_set_parent,
-};
-
-static struct clk periph_apm_clk = {
- .parent = &pll1_sw_clk,
- .set_parent = _clk_periph_apm_set_parent,
-};
-
-static struct clk cpu_clk = {
- .parent = &pll1_sw_clk,
- .get_rate = clk_cpu_get_rate,
- .set_rate = clk_cpu_set_rate,
-};
-
-static struct clk ahb_clk = {
- .parent = &main_bus_clk,
- .get_rate = clk_ahb_get_rate,
- .set_rate = _clk_ahb_set_rate,
- .round_rate = _clk_ahb_round_rate,
-};
-
-static struct clk iim_clk = {
- .parent = &ipg_clk,
- .enable_reg = MXC_CCM_CCGR0,
- .enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
-};
-
-/* Main IP interface clock for access to registers */
-static struct clk ipg_clk = {
- .parent = &ahb_clk,
- .get_rate = clk_ipg_get_rate,
-};
-
-static struct clk ipg_perclk = {
- .parent = &lp_apm_clk,
- .get_rate = clk_ipg_per_get_rate,
- .set_parent = _clk_ipg_per_set_parent,
-};
-
-static struct clk ahb_max_clk = {
- .parent = &ahb_clk,
- .enable_reg = MXC_CCM_CCGR0,
- .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
- .enable = _clk_max_enable,
- .disable = _clk_max_disable,
-};
-
-static struct clk aips_tz1_clk = {
- .parent = &ahb_clk,
- .secondary = &ahb_max_clk,
- .enable_reg = MXC_CCM_CCGR0,
- .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
- .enable = _clk_ccgr_enable,
- .disable = _clk_ccgr_disable_inwait,
-};
-
-static struct clk aips_tz2_clk = {
- .parent = &ahb_clk,
- .secondary = &ahb_max_clk,
- .enable_reg = MXC_CCM_CCGR0,
- .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
- .enable = _clk_ccgr_enable,
- .disable = _clk_ccgr_disable_inwait,
-};
-
-static struct clk gpc_dvfs_clk = {
- .enable_reg = MXC_CCM_CCGR5,
- .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
- .enable = _clk_ccgr_enable,
- .disable = _clk_ccgr_disable,
-};
-
-static struct clk gpt_32k_clk = {
- .id = 0,
- .parent = &ckil_clk,
-};
-
-static struct clk dummy_clk = {
- .id = 0,
-};
-
-static struct clk emi_slow_clk = {
- .parent = &pll2_sw_clk,
- .enable_reg = MXC_CCM_CCGR5,
- .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
- .enable = _clk_ccgr_enable,
- .disable = _clk_ccgr_disable_inwait,
- .get_rate = clk_emi_slow_get_rate,
-};
-
-static int clk_ipu_enable(struct clk *clk)
-{
- u32 reg;
-
- _clk_ccgr_enable(clk);
-
- /* Enable handshake with IPU when certain clock rates are changed */
- reg = __raw_readl(MXC_CCM_CCDR);
- reg &= ~MXC_CCM_CCDR_IPU_HS_MASK;
- __raw_writel(reg, MXC_CCM_CCDR);
-
- /* Enable handshake with IPU when LPM is entered */
- reg = __raw_readl(MXC_CCM_CLPCR);
- reg &= ~MXC_CCM_CLPCR_BYPASS_IPU_LPM_HS;
- __raw_writel(reg, MXC_CCM_CLPCR);
-
- return 0;
-}
-
-static void clk_ipu_disable(struct clk *clk)
-{
- u32 reg;
-
- _clk_ccgr_disable(clk);
-
- /* Disable handshake with IPU whe dividers are changed */
- reg = __raw_readl(MXC_CCM_CCDR);
- reg |= MXC_CCM_CCDR_IPU_HS_MASK;
- __raw_writel(reg, MXC_CCM_CCDR);
-
- /* Disable handshake with IPU when LPM is entered */
- reg = __raw_readl(MXC_CCM_CLPCR);
- reg |= MXC_CCM_CLPCR_BYPASS_IPU_LPM_HS;
- __raw_writel(reg, MXC_CCM_CLPCR);
-}
-
-static struct clk ahbmux1_clk = {
- .parent = &ahb_clk,
- .secondary = &ahb_max_clk,
- .enable_reg = MXC_CCM_CCGR0,
- .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
- .enable = _clk_ccgr_enable,
- .disable = _clk_ccgr_disable_inwait,
-};
-
-static struct clk ipu_sec_clk = {
- .parent = &emi_fast_clk,
- .secondary = &ahbmux1_clk,
-};
-
-static struct clk ddr_hf_clk = {
- .parent = &pll1_sw_clk,
- .get_rate = _clk_ddr_hf_get_rate,
-};
-
-static struct clk ddr_clk = {
- .parent = &ddr_hf_clk,
-};
-
-/* clock definitions for MIPI HSC unit which has been removed
- * from documentation, but not from hardware
- */
-static int _clk_hsc_enable(struct clk *clk)
-{
- u32 reg;
-
- _clk_ccgr_enable(clk);
- /* Handshake with IPU when certain clock rates are changed. */
- reg = __raw_readl(MXC_CCM_CCDR);
- reg &= ~MXC_CCM_CCDR_HSC_HS_MASK;
- __raw_writel(reg, MXC_CCM_CCDR);
-
- reg = __raw_readl(MXC_CCM_CLPCR);
- reg &= ~MXC_CCM_CLPCR_BYPASS_HSC_LPM_HS;
- __raw_writel(reg, MXC_CCM_CLPCR);
-
- return 0;
-}
-
-static void _clk_hsc_disable(struct clk *clk)
-{
- u32 reg;
-
- _clk_ccgr_disable(clk);
- /* No handshake with HSC as its not enabled. */
- reg = __raw_readl(MXC_CCM_CCDR);
- reg |= MXC_CCM_CCDR_HSC_HS_MASK;
- __raw_writel(reg, MXC_CCM_CCDR);
-
- reg = __raw_readl(MXC_CCM_CLPCR);
- reg |= MXC_CCM_CLPCR_BYPASS_HSC_LPM_HS;
- __raw_writel(reg, MXC_CCM_CLPCR);
-}
-
-static struct clk mipi_hsp_clk = {
- .parent = &ipu_clk,
- .enable_reg = MXC_CCM_CCGR4,
- .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
- .enable = _clk_hsc_enable,
- .disable = _clk_hsc_disable,
- .secondary = &mipi_hsc1_clk,
-};
-
-#define DEFINE_CLOCK_CCGR(name, i, er, es, pfx, p, s) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = er, \
- .enable_shift = es, \
- .get_rate = pfx##_get_rate, \
- .set_rate = pfx##_set_rate, \
- .round_rate = pfx##_round_rate, \
- .set_parent = pfx##_set_parent, \
- .enable = _clk_ccgr_enable, \
- .disable = _clk_ccgr_disable, \
- .parent = p, \
- .secondary = s, \
- }
-
-#define DEFINE_CLOCK_MAX(name, i, er, es, pfx, p, s) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = er, \
- .enable_shift = es, \
- .get_rate = pfx##_get_rate, \
- .set_rate = pfx##_set_rate, \
- .set_parent = pfx##_set_parent, \
- .enable = _clk_max_enable, \
- .disable = _clk_max_disable, \
- .parent = p, \
- .secondary = s, \
- }
-
-#define CLK_GET_RATE(name, nr, bitsname) \
-static unsigned long clk_##name##_get_rate(struct clk *clk) \
-{ \
- u32 reg, pred, podf; \
- \
- reg = __raw_readl(MXC_CCM_CSCDR##nr); \
- pred = (reg & MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK) \
- >> MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET; \
- podf = (reg & MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK) \
- >> MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET; \
- \
- return DIV_ROUND_CLOSEST(clk_get_rate(clk->parent), \
- (pred + 1) * (podf + 1)); \
-}
-
-#define CLK_SET_PARENT(name, nr, bitsname) \
-static int clk_##name##_set_parent(struct clk *clk, struct clk *parent) \
-{ \
- u32 reg, mux; \
- \
- mux = _get_mux(parent, &pll1_sw_clk, &pll2_sw_clk, \
- &pll3_sw_clk, &lp_apm_clk); \
- reg = __raw_readl(MXC_CCM_CSCMR##nr) & \
- ~MXC_CCM_CSCMR##nr##_##bitsname##_CLK_SEL_MASK; \
- reg |= mux << MXC_CCM_CSCMR##nr##_##bitsname##_CLK_SEL_OFFSET; \
- __raw_writel(reg, MXC_CCM_CSCMR##nr); \
- \
- return 0; \
-}
-
-#define CLK_SET_RATE(name, nr, bitsname) \
-static int clk_##name##_set_rate(struct clk *clk, unsigned long rate) \
-{ \
- u32 reg, div, parent_rate; \
- u32 pre = 0, post = 0; \
- \
- parent_rate = clk_get_rate(clk->parent); \
- div = parent_rate / rate; \
- \
- if ((parent_rate / div) != rate) \
- return -EINVAL; \
- \
- __calc_pre_post_dividers(div, &pre, &post, \
- (MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK >> \
- MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET) + 1, \
- (MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK >> \
- MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET) + 1);\
- \
- /* Set sdhc1 clock divider */ \
- reg = __raw_readl(MXC_CCM_CSCDR##nr) & \
- ~(MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK \
- | MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK); \
- reg |= (post - 1) << \
- MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET; \
- reg |= (pre - 1) << \
- MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET; \
- __raw_writel(reg, MXC_CCM_CSCDR##nr); \
- \
- return 0; \
-}
-
-/* UART */
-CLK_GET_RATE(uart, 1, UART)
-CLK_SET_PARENT(uart, 1, UART)
-
-static struct clk uart_root_clk = {
- .parent = &pll2_sw_clk,
- .get_rate = clk_uart_get_rate,
- .set_parent = clk_uart_set_parent,
-};
-
-/* USBOH3 */
-CLK_GET_RATE(usboh3, 1, USBOH3)
-CLK_SET_PARENT(usboh3, 1, USBOH3)
-
-static struct clk usboh3_clk = {
- .parent = &pll2_sw_clk,
- .get_rate = clk_usboh3_get_rate,
- .set_parent = clk_usboh3_set_parent,
- .enable = _clk_ccgr_enable,
- .disable = _clk_ccgr_disable,
- .enable_reg = MXC_CCM_CCGR2,
- .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
-};
-
-static struct clk usb_ahb_clk = {
- .parent = &ipg_clk,
- .enable = _clk_ccgr_enable,
- .disable = _clk_ccgr_disable,
- .enable_reg = MXC_CCM_CCGR2,
- .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
-};
-
-static int clk_usb_phy1_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USB_PHY_CLK_SEL;
-
- if (parent == &pll3_sw_clk)
- reg |= 1 << MXC_CCM_CSCMR1_USB_PHY_CLK_SEL_OFFSET;
-
- __raw_writel(reg, MXC_CCM_CSCMR1);
-
- return 0;
-}
-
-static struct clk usb_phy1_clk = {
- .parent = &pll3_sw_clk,
- .set_parent = clk_usb_phy1_set_parent,
- .enable = _clk_ccgr_enable,
- .enable_reg = MXC_CCM_CCGR2,
- .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
- .disable = _clk_ccgr_disable,
-};
-
-/* eCSPI */
-CLK_GET_RATE(ecspi, 2, CSPI)
-CLK_SET_PARENT(ecspi, 1, CSPI)
-
-static struct clk ecspi_main_clk = {
- .parent = &pll3_sw_clk,
- .get_rate = clk_ecspi_get_rate,
- .set_parent = clk_ecspi_set_parent,
-};
-
-/* eSDHC */
-CLK_GET_RATE(esdhc1, 1, ESDHC1_MSHC1)
-CLK_SET_PARENT(esdhc1, 1, ESDHC1_MSHC1)
-CLK_SET_RATE(esdhc1, 1, ESDHC1_MSHC1)
-
-/* mx51 specific */
-CLK_GET_RATE(esdhc2, 1, ESDHC2_MSHC2)
-CLK_SET_PARENT(esdhc2, 1, ESDHC2_MSHC2)
-CLK_SET_RATE(esdhc2, 1, ESDHC2_MSHC2)
-
-static int clk_esdhc3_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CSCMR1);
- if (parent == &esdhc1_clk)
- reg &= ~MXC_CCM_CSCMR1_ESDHC3_CLK_SEL;
- else if (parent == &esdhc2_clk)
- reg |= MXC_CCM_CSCMR1_ESDHC3_CLK_SEL;
- else
- return -EINVAL;
- __raw_writel(reg, MXC_CCM_CSCMR1);
-
- return 0;
-}
-
-static int clk_esdhc4_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CSCMR1);
- if (parent == &esdhc1_clk)
- reg &= ~MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
- else if (parent == &esdhc2_clk)
- reg |= MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
- else
- return -EINVAL;
- __raw_writel(reg, MXC_CCM_CSCMR1);
-
- return 0;
-}
-
-/* mx53 specific */
-static int clk_esdhc2_mx53_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CSCMR1);
- if (parent == &esdhc1_clk)
- reg &= ~MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL;
- else if (parent == &esdhc3_mx53_clk)
- reg |= MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL;
- else
- return -EINVAL;
- __raw_writel(reg, MXC_CCM_CSCMR1);
-
- return 0;
-}
-
-CLK_GET_RATE(esdhc3_mx53, 1, ESDHC3_MX53)
-CLK_SET_PARENT(esdhc3_mx53, 1, ESDHC3_MX53)
-CLK_SET_RATE(esdhc3_mx53, 1, ESDHC3_MX53)
-
-static int clk_esdhc4_mx53_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CSCMR1);
- if (parent == &esdhc1_clk)
- reg &= ~MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
- else if (parent == &esdhc3_mx53_clk)
- reg |= MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
- else
- return -EINVAL;
- __raw_writel(reg, MXC_CCM_CSCMR1);
-
- return 0;
-}
-
-#define DEFINE_CLOCK_FULL(name, i, er, es, gr, sr, e, d, p, s) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = er, \
- .enable_shift = es, \
- .get_rate = gr, \
- .set_rate = sr, \
- .enable = e, \
- .disable = d, \
- .parent = p, \
- .secondary = s, \
- }
-
-#define DEFINE_CLOCK(name, i, er, es, gr, sr, p, s) \
- DEFINE_CLOCK_FULL(name, i, er, es, gr, sr, _clk_ccgr_enable, _clk_ccgr_disable, p, s)
-
-/* Shared peripheral bus arbiter */
-DEFINE_CLOCK(spba_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG0_OFFSET,
- NULL, NULL, &ipg_clk, NULL);
-
-/* UART */
-DEFINE_CLOCK(uart1_ipg_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG3_OFFSET,
- NULL, NULL, &ipg_clk, &aips_tz1_clk);
-DEFINE_CLOCK(uart2_ipg_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG5_OFFSET,
- NULL, NULL, &ipg_clk, &aips_tz1_clk);
-DEFINE_CLOCK(uart3_ipg_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG7_OFFSET,
- NULL, NULL, &ipg_clk, &spba_clk);
-DEFINE_CLOCK(uart4_ipg_clk, 3, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG4_OFFSET,
- NULL, NULL, &ipg_clk, &spba_clk);
-DEFINE_CLOCK(uart5_ipg_clk, 4, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG6_OFFSET,
- NULL, NULL, &ipg_clk, &spba_clk);
-DEFINE_CLOCK(uart1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG4_OFFSET,
- NULL, NULL, &uart_root_clk, &uart1_ipg_clk);
-DEFINE_CLOCK(uart2_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG6_OFFSET,
- NULL, NULL, &uart_root_clk, &uart2_ipg_clk);
-DEFINE_CLOCK(uart3_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG8_OFFSET,
- NULL, NULL, &uart_root_clk, &uart3_ipg_clk);
-DEFINE_CLOCK(uart4_clk, 3, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG5_OFFSET,
- NULL, NULL, &uart_root_clk, &uart4_ipg_clk);
-DEFINE_CLOCK(uart5_clk, 4, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG7_OFFSET,
- NULL, NULL, &uart_root_clk, &uart5_ipg_clk);
-
-/* GPT */
-DEFINE_CLOCK(gpt_ipg_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG10_OFFSET,
- NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET,
- NULL, NULL, &ipg_clk, &gpt_ipg_clk);
-
-DEFINE_CLOCK(pwm1_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG6_OFFSET,
- NULL, NULL, &ipg_perclk, NULL);
-DEFINE_CLOCK(pwm2_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG8_OFFSET,
- NULL, NULL, &ipg_perclk, NULL);
-
-/* I2C */
-DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG9_OFFSET,
- NULL, NULL, &ipg_perclk, NULL);
-DEFINE_CLOCK(i2c2_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG10_OFFSET,
- NULL, NULL, &ipg_perclk, NULL);
-DEFINE_CLOCK(hsi2c_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG11_OFFSET,
- NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(i2c3_mx53_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG11_OFFSET,
- NULL, NULL, &ipg_perclk, NULL);
-
-/* FEC */
-DEFINE_CLOCK(fec_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG12_OFFSET,
- NULL, NULL, &ipg_clk, NULL);
-
-/* NFC */
-DEFINE_CLOCK_CCGR(nfc_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG10_OFFSET,
- clk_nfc, &emi_slow_clk, NULL);
-
-/* SSI */
-DEFINE_CLOCK(ssi1_ipg_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG8_OFFSET,
- NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(ssi1_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG9_OFFSET,
- NULL, NULL, &pll3_sw_clk, &ssi1_ipg_clk);
-DEFINE_CLOCK(ssi2_ipg_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG10_OFFSET,
- NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(ssi2_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG11_OFFSET,
- NULL, NULL, &pll3_sw_clk, &ssi2_ipg_clk);
-DEFINE_CLOCK(ssi3_ipg_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG12_OFFSET,
- NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(ssi3_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG13_OFFSET,
- NULL, NULL, &pll3_sw_clk, &ssi3_ipg_clk);
-
-/* eCSPI */
-DEFINE_CLOCK_FULL(ecspi1_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG9_OFFSET,
- NULL, NULL, _clk_ccgr_enable_inrun, _clk_ccgr_disable,
- &ipg_clk, &spba_clk);
-DEFINE_CLOCK(ecspi1_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG10_OFFSET,
- NULL, NULL, &ecspi_main_clk, &ecspi1_ipg_clk);
-DEFINE_CLOCK_FULL(ecspi2_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG11_OFFSET,
- NULL, NULL, _clk_ccgr_enable_inrun, _clk_ccgr_disable,
- &ipg_clk, &aips_tz2_clk);
-DEFINE_CLOCK(ecspi2_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG12_OFFSET,
- NULL, NULL, &ecspi_main_clk, &ecspi2_ipg_clk);
-
-/* CSPI */
-DEFINE_CLOCK(cspi_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG9_OFFSET,
- NULL, NULL, &ipg_clk, &aips_tz2_clk);
-DEFINE_CLOCK(cspi_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG13_OFFSET,
- NULL, NULL, &ipg_clk, &cspi_ipg_clk);
-
-/* SDMA */
-DEFINE_CLOCK(sdma_clk, 1, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG15_OFFSET,
- NULL, NULL, &ahb_clk, NULL);
-
-/* eSDHC */
-DEFINE_CLOCK_FULL(esdhc1_ipg_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG0_OFFSET,
- NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
-DEFINE_CLOCK_MAX(esdhc1_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG1_OFFSET,
- clk_esdhc1, &pll2_sw_clk, &esdhc1_ipg_clk);
-DEFINE_CLOCK_FULL(esdhc2_ipg_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG2_OFFSET,
- NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
-DEFINE_CLOCK_FULL(esdhc3_ipg_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG4_OFFSET,
- NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
-DEFINE_CLOCK_FULL(esdhc4_ipg_clk, 3, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG6_OFFSET,
- NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
-
-/* mx51 specific */
-DEFINE_CLOCK_MAX(esdhc2_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG3_OFFSET,
- clk_esdhc2, &pll2_sw_clk, &esdhc2_ipg_clk);
-
-static struct clk esdhc3_clk = {
- .id = 2,
- .parent = &esdhc1_clk,
- .set_parent = clk_esdhc3_set_parent,
- .enable_reg = MXC_CCM_CCGR3,
- .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
- .enable = _clk_max_enable,
- .disable = _clk_max_disable,
- .secondary = &esdhc3_ipg_clk,
-};
-static struct clk esdhc4_clk = {
- .id = 3,
- .parent = &esdhc1_clk,
- .set_parent = clk_esdhc4_set_parent,
- .enable_reg = MXC_CCM_CCGR3,
- .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
- .enable = _clk_max_enable,
- .disable = _clk_max_disable,
- .secondary = &esdhc4_ipg_clk,
-};
-
-/* mx53 specific */
-static struct clk esdhc2_mx53_clk = {
- .id = 2,
- .parent = &esdhc1_clk,
- .set_parent = clk_esdhc2_mx53_set_parent,
- .enable_reg = MXC_CCM_CCGR3,
- .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
- .enable = _clk_max_enable,
- .disable = _clk_max_disable,
- .secondary = &esdhc3_ipg_clk,
-};
-
-DEFINE_CLOCK_MAX(esdhc3_mx53_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG5_OFFSET,
- clk_esdhc3_mx53, &pll2_sw_clk, &esdhc2_ipg_clk);
-
-static struct clk esdhc4_mx53_clk = {
- .id = 3,
- .parent = &esdhc1_clk,
- .set_parent = clk_esdhc4_mx53_set_parent,
- .enable_reg = MXC_CCM_CCGR3,
- .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
- .enable = _clk_max_enable,
- .disable = _clk_max_disable,
- .secondary = &esdhc4_ipg_clk,
-};
-
-static struct clk sata_clk = {
- .parent = &ipg_clk,
- .enable = _clk_max_enable,
- .enable_reg = MXC_CCM_CCGR4,
- .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
- .disable = _clk_max_disable,
-};
-
-static struct clk ahci_phy_clk = {
- .parent = &usb_phy1_clk,
-};
-
-static struct clk ahci_dma_clk = {
- .parent = &ahb_clk,
-};
-
-DEFINE_CLOCK(mipi_esc_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG5_OFFSET, NULL, NULL, NULL, &pll2_sw_clk);
-DEFINE_CLOCK(mipi_hsc2_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG4_OFFSET, NULL, NULL, &mipi_esc_clk, &pll2_sw_clk);
-DEFINE_CLOCK(mipi_hsc1_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG3_OFFSET, NULL, NULL, &mipi_hsc2_clk, &pll2_sw_clk);
-
-/* IPU */
-DEFINE_CLOCK_FULL(ipu_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG5_OFFSET,
- NULL, NULL, clk_ipu_enable, clk_ipu_disable, &ahb_clk, &ipu_sec_clk);
-
-DEFINE_CLOCK_FULL(emi_fast_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG7_OFFSET,
- NULL, NULL, _clk_ccgr_enable, _clk_ccgr_disable_inwait,
- &ddr_clk, NULL);
-
-DEFINE_CLOCK(ipu_di0_clk, 0, MXC_CCM_CCGR6, MXC_CCM_CCGRx_CG5_OFFSET,
- NULL, NULL, &pll3_sw_clk, NULL);
-DEFINE_CLOCK(ipu_di1_clk, 0, MXC_CCM_CCGR6, MXC_CCM_CCGRx_CG6_OFFSET,
- NULL, NULL, &pll3_sw_clk, NULL);
-
-/* PATA */
-DEFINE_CLOCK(pata_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG0_OFFSET,
- NULL, NULL, &ipg_clk, &spba_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-
-static struct clk_lookup mx51_lookups[] = {
- /* i.mx51 has the i.mx21 type uart */
- _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
- _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
- _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
- _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
- /* i.mx51 has the i.mx27 type fec */
- _REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
- _REGISTER_CLOCK("mxc_pwm.0", "pwm", pwm1_clk)
- _REGISTER_CLOCK("mxc_pwm.1", "pwm", pwm2_clk)
- _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
- _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
- _REGISTER_CLOCK("imx-i2c.2", NULL, hsi2c_clk)
- _REGISTER_CLOCK("mxc-ehci.0", "usb", usboh3_clk)
- _REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_ahb_clk)
- _REGISTER_CLOCK("mxc-ehci.0", "usb_phy1", usb_phy1_clk)
- _REGISTER_CLOCK("mxc-ehci.1", "usb", usboh3_clk)
- _REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_ahb_clk)
- _REGISTER_CLOCK("mxc-ehci.2", "usb", usboh3_clk)
- _REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_ahb_clk)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb", usboh3_clk)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", ahb_clk)
- _REGISTER_CLOCK("imx-keypad", NULL, dummy_clk)
- _REGISTER_CLOCK("mxc_nand", NULL, nfc_clk)
- _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
- _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
- _REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk)
- /* i.mx51 has the i.mx35 type sdma */
- _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
- _REGISTER_CLOCK(NULL, "ckih", ckih_clk)
- _REGISTER_CLOCK(NULL, "ckih2", ckih2_clk)
- _REGISTER_CLOCK(NULL, "gpt_32k", gpt_32k_clk)
- _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
- _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
- /* i.mx51 has the i.mx35 type cspi */
- _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx51.0", NULL, esdhc1_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx51.1", NULL, esdhc2_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx51.2", NULL, esdhc3_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx51.3", NULL, esdhc4_clk)
- _REGISTER_CLOCK(NULL, "cpu_clk", cpu_clk)
- _REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
- _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
- _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
- _REGISTER_CLOCK(NULL, "mipi_hsp", mipi_hsp_clk)
- _REGISTER_CLOCK("imx-ipuv3", NULL, ipu_clk)
- _REGISTER_CLOCK("imx-ipuv3", "di0", ipu_di0_clk)
- _REGISTER_CLOCK("imx-ipuv3", "di1", ipu_di1_clk)
- _REGISTER_CLOCK(NULL, "gpc_dvfs", gpc_dvfs_clk)
- _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
-};
-
-static struct clk_lookup mx53_lookups[] = {
- /* i.mx53 has the i.mx21 type uart */
- _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
- _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
- _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
- _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
- _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
- _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
- /* i.mx53 has the i.mx25 type fec */
- _REGISTER_CLOCK("imx25-fec.0", NULL, fec_clk)
- _REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
- _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
- _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
- _REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_mx53_clk)
- /* i.mx53 has the i.mx51 type ecspi */
- _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
- _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
- /* i.mx53 has the i.mx25 type cspi */
- _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx53.0", NULL, esdhc1_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx53.1", NULL, esdhc2_mx53_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx53.2", NULL, esdhc3_mx53_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx53.3", NULL, esdhc4_mx53_clk)
- _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
- _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
- /* i.mx53 has the i.mx35 type sdma */
- _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
- _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
- _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
- _REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk)
- _REGISTER_CLOCK("imx-keypad", NULL, dummy_clk)
- _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
- _REGISTER_CLOCK("imx53-ahci.0", "ahci", sata_clk)
- _REGISTER_CLOCK("imx53-ahci.0", "ahci_phy", ahci_phy_clk)
- _REGISTER_CLOCK("imx53-ahci.0", "ahci_dma", ahci_dma_clk)
-};
-
-static void clk_tree_init(void)
-{
- u32 reg;
-
- ipg_perclk.set_parent(&ipg_perclk, &lp_apm_clk);
-
- /*
- * Initialise the IPG PER CLK dividers to 3. IPG_PER_CLK should be at
- * 8MHz, its derived from lp_apm.
- *
- * FIXME: Verify if true for all boards
- */
- reg = __raw_readl(MXC_CCM_CBCDR);
- reg &= ~MXC_CCM_CBCDR_PERCLK_PRED1_MASK;
- reg &= ~MXC_CCM_CBCDR_PERCLK_PRED2_MASK;
- reg &= ~MXC_CCM_CBCDR_PERCLK_PODF_MASK;
- reg |= (2 << MXC_CCM_CBCDR_PERCLK_PRED1_OFFSET);
- __raw_writel(reg, MXC_CCM_CBCDR);
-}
-
-int __init mx51_clocks_init(unsigned long ckil, unsigned long osc,
- unsigned long ckih1, unsigned long ckih2)
-{
- int i;
-
- external_low_reference = ckil;
- external_high_reference = ckih1;
- ckih2_reference = ckih2;
- oscillator_reference = osc;
-
- for (i = 0; i < ARRAY_SIZE(mx51_lookups); i++)
- clkdev_add(&mx51_lookups[i]);
-
- clk_tree_init();
-
- clk_enable(&cpu_clk);
- clk_enable(&main_bus_clk);
-
- clk_enable(&iim_clk);
- imx_print_silicon_rev("i.MX51", mx51_revision());
- clk_disable(&iim_clk);
-
- /* move usb_phy_clk to 24MHz */
- clk_set_parent(&usb_phy1_clk, &osc_clk);
-
- /* set the usboh3_clk parent to pll2_sw_clk */
- clk_set_parent(&usboh3_clk, &pll2_sw_clk);
-
- /* Set SDHC parents to be PLL2 */
- clk_set_parent(&esdhc1_clk, &pll2_sw_clk);
- clk_set_parent(&esdhc2_clk, &pll2_sw_clk);
-
- /* set SDHC root clock as 166.25MHZ*/
- clk_set_rate(&esdhc1_clk, 166250000);
- clk_set_rate(&esdhc2_clk, 166250000);
-
- /* System timer */
- mxc_timer_init(&gpt_clk, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
- MX51_INT_GPT);
- return 0;
-}
-
-int __init mx53_clocks_init(unsigned long ckil, unsigned long osc,
- unsigned long ckih1, unsigned long ckih2)
-{
- int i;
-
- external_low_reference = ckil;
- external_high_reference = ckih1;
- ckih2_reference = ckih2;
- oscillator_reference = osc;
-
- for (i = 0; i < ARRAY_SIZE(mx53_lookups); i++)
- clkdev_add(&mx53_lookups[i]);
-
- clk_tree_init();
-
- clk_set_parent(&uart_root_clk, &pll3_sw_clk);
- clk_enable(&cpu_clk);
- clk_enable(&main_bus_clk);
-
- clk_enable(&iim_clk);
- imx_print_silicon_rev("i.MX53", mx53_revision());
- clk_disable(&iim_clk);
-
- /* Set SDHC parents to be PLL2 */
- clk_set_parent(&esdhc1_clk, &pll2_sw_clk);
- clk_set_parent(&esdhc3_mx53_clk, &pll2_sw_clk);
-
- /* set SDHC root clock as 200MHZ*/
- clk_set_rate(&esdhc1_clk, 200000000);
- clk_set_rate(&esdhc3_mx53_clk, 200000000);
-
- /* System timer */
- mxc_timer_init(&gpt_clk, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR),
- MX53_INT_GPT);
- return 0;
-}
-
-#ifdef CONFIG_OF
-static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc,
- unsigned long *ckih1, unsigned long *ckih2)
-{
- struct device_node *np;
-
- /* retrieve the freqency of fixed clocks from device tree */
- for_each_compatible_node(np, NULL, "fixed-clock") {
- u32 rate;
- if (of_property_read_u32(np, "clock-frequency", &rate))
- continue;
-
- if (of_device_is_compatible(np, "fsl,imx-ckil"))
- *ckil = rate;
- else if (of_device_is_compatible(np, "fsl,imx-osc"))
- *osc = rate;
- else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
- *ckih1 = rate;
- else if (of_device_is_compatible(np, "fsl,imx-ckih2"))
- *ckih2 = rate;
- }
-}
-
-int __init mx51_clocks_init_dt(void)
-{
- unsigned long ckil, osc, ckih1, ckih2;
-
- clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
- return mx51_clocks_init(ckil, osc, ckih1, ckih2);
-}
-
-int __init mx53_clocks_init_dt(void)
-{
- unsigned long ckil, osc, ckih1, ckih2;
-
- clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
- return mx53_clocks_init(ckil, osc, ckih1, ckih2);
-}
-#endif
diff --git a/arch/arm/mach-imx/cpu-imx5.c b/arch/arm/mach-imx/cpu-imx5.c
index aa15c517d06e..8eb15a2fcaf9 100644
--- a/arch/arm/mach-imx/cpu-imx5.c
+++ b/arch/arm/mach-imx/cpu-imx5.c
@@ -62,11 +62,8 @@ EXPORT_SYMBOL(mx51_revision);
* Dependent on link order - so the assumption is that vfp_init is called
* before us.
*/
-static int __init mx51_neon_fixup(void)
+int __init mx51_neon_fixup(void)
{
- if (!cpu_is_mx51())
- return 0;
-
if (mx51_revision() < IMX_CHIP_REVISION_3_0 &&
(elf_hwcap & HWCAP_NEON)) {
elf_hwcap &= ~HWCAP_NEON;
@@ -75,7 +72,6 @@ static int __init mx51_neon_fixup(void)
return 0;
}
-late_initcall(mx51_neon_fixup);
#endif
static int get_mx53_srev(void)
diff --git a/arch/arm/mach-imx/crmregs-imx3.h b/arch/arm/mach-imx/crmregs-imx3.h
index 53141273df45..a1dfde53e335 100644
--- a/arch/arm/mach-imx/crmregs-imx3.h
+++ b/arch/arm/mach-imx/crmregs-imx3.h
@@ -24,48 +24,47 @@
#define CKIH_CLK_FREQ_27MHZ 27000000
#define CKIL_CLK_FREQ 32768
-#define MXC_CCM_BASE (cpu_is_mx31() ? \
-MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR) : MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR))
+extern void __iomem *mx3_ccm_base;
/* Register addresses */
-#define MXC_CCM_CCMR (MXC_CCM_BASE + 0x00)
-#define MXC_CCM_PDR0 (MXC_CCM_BASE + 0x04)
-#define MXC_CCM_PDR1 (MXC_CCM_BASE + 0x08)
-#define MX35_CCM_PDR2 (MXC_CCM_BASE + 0x0C)
-#define MXC_CCM_RCSR (MXC_CCM_BASE + 0x0C)
-#define MX35_CCM_PDR3 (MXC_CCM_BASE + 0x10)
-#define MXC_CCM_MPCTL (MXC_CCM_BASE + 0x10)
-#define MX35_CCM_PDR4 (MXC_CCM_BASE + 0x14)
-#define MXC_CCM_UPCTL (MXC_CCM_BASE + 0x14)
-#define MX35_CCM_RCSR (MXC_CCM_BASE + 0x18)
-#define MXC_CCM_SRPCTL (MXC_CCM_BASE + 0x18)
-#define MX35_CCM_MPCTL (MXC_CCM_BASE + 0x1C)
-#define MXC_CCM_COSR (MXC_CCM_BASE + 0x1C)
-#define MX35_CCM_PPCTL (MXC_CCM_BASE + 0x20)
-#define MXC_CCM_CGR0 (MXC_CCM_BASE + 0x20)
-#define MX35_CCM_ACMR (MXC_CCM_BASE + 0x24)
-#define MXC_CCM_CGR1 (MXC_CCM_BASE + 0x24)
-#define MX35_CCM_COSR (MXC_CCM_BASE + 0x28)
-#define MXC_CCM_CGR2 (MXC_CCM_BASE + 0x28)
-#define MX35_CCM_CGR0 (MXC_CCM_BASE + 0x2C)
-#define MXC_CCM_WIMR (MXC_CCM_BASE + 0x2C)
-#define MX35_CCM_CGR1 (MXC_CCM_BASE + 0x30)
-#define MXC_CCM_LDC (MXC_CCM_BASE + 0x30)
-#define MX35_CCM_CGR2 (MXC_CCM_BASE + 0x34)
-#define MXC_CCM_DCVR0 (MXC_CCM_BASE + 0x34)
-#define MX35_CCM_CGR3 (MXC_CCM_BASE + 0x38)
-#define MXC_CCM_DCVR1 (MXC_CCM_BASE + 0x38)
-#define MXC_CCM_DCVR2 (MXC_CCM_BASE + 0x3C)
-#define MXC_CCM_DCVR3 (MXC_CCM_BASE + 0x40)
-#define MXC_CCM_LTR0 (MXC_CCM_BASE + 0x44)
-#define MXC_CCM_LTR1 (MXC_CCM_BASE + 0x48)
-#define MXC_CCM_LTR2 (MXC_CCM_BASE + 0x4C)
-#define MXC_CCM_LTR3 (MXC_CCM_BASE + 0x50)
-#define MXC_CCM_LTBR0 (MXC_CCM_BASE + 0x54)
-#define MXC_CCM_LTBR1 (MXC_CCM_BASE + 0x58)
-#define MXC_CCM_PMCR0 (MXC_CCM_BASE + 0x5C)
-#define MXC_CCM_PMCR1 (MXC_CCM_BASE + 0x60)
-#define MXC_CCM_PDR2 (MXC_CCM_BASE + 0x64)
+#define MXC_CCM_CCMR 0x00
+#define MXC_CCM_PDR0 0x04
+#define MXC_CCM_PDR1 0x08
+#define MX35_CCM_PDR2 0x0C
+#define MXC_CCM_RCSR 0x0C
+#define MX35_CCM_PDR3 0x10
+#define MXC_CCM_MPCTL 0x10
+#define MX35_CCM_PDR4 0x14
+#define MXC_CCM_UPCTL 0x14
+#define MX35_CCM_RCSR 0x18
+#define MXC_CCM_SRPCTL 0x18
+#define MX35_CCM_MPCTL 0x1C
+#define MXC_CCM_COSR 0x1C
+#define MX35_CCM_PPCTL 0x20
+#define MXC_CCM_CGR0 0x20
+#define MX35_CCM_ACMR 0x24
+#define MXC_CCM_CGR1 0x24
+#define MX35_CCM_COSR 0x28
+#define MXC_CCM_CGR2 0x28
+#define MX35_CCM_CGR0 0x2C
+#define MXC_CCM_WIMR 0x2C
+#define MX35_CCM_CGR1 0x30
+#define MXC_CCM_LDC 0x30
+#define MX35_CCM_CGR2 0x34
+#define MXC_CCM_DCVR0 0x34
+#define MX35_CCM_CGR3 0x38
+#define MXC_CCM_DCVR1 0x38
+#define MXC_CCM_DCVR2 0x3C
+#define MXC_CCM_DCVR3 0x40
+#define MXC_CCM_LTR0 0x44
+#define MXC_CCM_LTR1 0x48
+#define MXC_CCM_LTR2 0x4C
+#define MXC_CCM_LTR3 0x50
+#define MXC_CCM_LTBR0 0x54
+#define MXC_CCM_LTBR1 0x58
+#define MXC_CCM_PMCR0 0x5C
+#define MXC_CCM_PMCR1 0x60
+#define MXC_CCM_PDR2 0x64
/* Register bit definitions */
#define MXC_CCM_CCMR_WBEN (1 << 27)
diff --git a/arch/arm/mach-imx/imx51-dt.c b/arch/arm/mach-imx/imx51-dt.c
index 5f577fbda2c8..18e78dba4298 100644
--- a/arch/arm/mach-imx/imx51-dt.c
+++ b/arch/arm/mach-imx/imx51-dt.c
@@ -118,6 +118,7 @@ DT_MACHINE_START(IMX51_DT, "Freescale i.MX51 (Device Tree Support)")
.handle_irq = imx51_handle_irq,
.timer = &imx51_timer,
.init_machine = imx51_dt_init,
+ .init_late = imx51_init_late,
.dt_compat = imx51_dt_board_compat,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/imx53-dt.c b/arch/arm/mach-imx/imx53-dt.c
index 574eca4b89a5..eb04b6248e48 100644
--- a/arch/arm/mach-imx/imx53-dt.c
+++ b/arch/arm/mach-imx/imx53-dt.c
@@ -10,6 +10,9 @@
* http://www.gnu.org/copyleft/gpl.html
*/
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
@@ -81,6 +84,19 @@ static const struct of_device_id imx53_iomuxc_of_match[] __initconst = {
{ /* sentinel */ }
};
+static void __init imx53_qsb_init(void)
+{
+ struct clk *clk;
+
+ clk = clk_get_sys(NULL, "ssi_ext1");
+ if (IS_ERR(clk)) {
+ pr_err("failed to get clk ssi_ext1\n");
+ return;
+ }
+
+ clk_register_clkdev(clk, NULL, "0-000a");
+}
+
static void __init imx53_dt_init(void)
{
struct device_node *node;
@@ -99,6 +115,9 @@ static void __init imx53_dt_init(void)
of_node_put(node);
}
+ if (of_machine_is_compatible("fsl,imx53-qsb"))
+ imx53_qsb_init();
+
of_platform_populate(NULL, of_default_bus_match_table,
imx53_auxdata_lookup, NULL);
}
diff --git a/arch/arm/mach-imx/lluart.c b/arch/arm/mach-imx/lluart.c
index 0213f8dcee81..c40a34c00489 100644
--- a/arch/arm/mach-imx/lluart.c
+++ b/arch/arm/mach-imx/lluart.c
@@ -17,6 +17,12 @@
#include <mach/hardware.h>
static struct map_desc imx_lluart_desc = {
+#ifdef CONFIG_DEBUG_IMX6Q_UART2
+ .virtual = MX6Q_IO_P2V(MX6Q_UART2_BASE_ADDR),
+ .pfn = __phys_to_pfn(MX6Q_UART2_BASE_ADDR),
+ .length = MX6Q_UART2_SIZE,
+ .type = MT_DEVICE,
+#endif
#ifdef CONFIG_DEBUG_IMX6Q_UART4
.virtual = MX6Q_IO_P2V(MX6Q_UART4_BASE_ADDR),
.pfn = __phys_to_pfn(MX6Q_UART4_BASE_ADDR),
diff --git a/arch/arm/mach-imx/mach-cpuimx51sd.c b/arch/arm/mach-imx/mach-cpuimx51sd.c
index ce341a6874fc..ac50f1671e38 100644
--- a/arch/arm/mach-imx/mach-cpuimx51sd.c
+++ b/arch/arm/mach-imx/mach-cpuimx51sd.c
@@ -369,5 +369,6 @@ MACHINE_START(EUKREA_CPUIMX51SD, "Eukrea CPUIMX51SD")
.handle_irq = imx51_handle_irq,
.timer = &mxc_timer,
.init_machine = eukrea_cpuimx51sd_init,
+ .init_late = imx51_init_late,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
index 748ba2e311b5..dff82eb57cd9 100644
--- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
+++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
@@ -178,7 +178,7 @@ static struct soc_camera_link iclink_tvp5150 = {
static struct mx2_camera_platform_data visstrim_camera = {
.flags = MX2_CAMERA_CCIR | MX2_CAMERA_CCIR_INTERLACE |
- MX2_CAMERA_SWAP16 | MX2_CAMERA_PCLK_SAMPLE_RISING,
+ MX2_CAMERA_PCLK_SAMPLE_RISING,
.clk = 100000,
};
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 3df360a52c17..b47e98b7d539 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -10,6 +10,8 @@
* http://www.gnu.org/copyleft/gpl.html
*/
+#include <linux/clk.h>
+#include <linux/clkdev.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -64,18 +66,53 @@ soft:
/* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */
static int ksz9021rn_phy_fixup(struct phy_device *phydev)
{
- /* min rx data delay */
- phy_write(phydev, 0x0b, 0x8105);
- phy_write(phydev, 0x0c, 0x0000);
+ if (IS_ENABLED(CONFIG_PHYLIB)) {
+ /* min rx data delay */
+ phy_write(phydev, 0x0b, 0x8105);
+ phy_write(phydev, 0x0c, 0x0000);
- /* max rx/tx clock delay, min rx/tx control delay */
- phy_write(phydev, 0x0b, 0x8104);
- phy_write(phydev, 0x0c, 0xf0f0);
- phy_write(phydev, 0x0b, 0x104);
+ /* max rx/tx clock delay, min rx/tx control delay */
+ phy_write(phydev, 0x0b, 0x8104);
+ phy_write(phydev, 0x0c, 0xf0f0);
+ phy_write(phydev, 0x0b, 0x104);
+ }
return 0;
}
+static void __init imx6q_sabrelite_cko1_setup(void)
+{
+ struct clk *cko1_sel, *ahb, *cko1;
+ unsigned long rate;
+
+ cko1_sel = clk_get_sys(NULL, "cko1_sel");
+ ahb = clk_get_sys(NULL, "ahb");
+ cko1 = clk_get_sys(NULL, "cko1");
+ if (IS_ERR(cko1_sel) || IS_ERR(ahb) || IS_ERR(cko1)) {
+ pr_err("cko1 setup failed!\n");
+ goto put_clk;
+ }
+ clk_set_parent(cko1_sel, ahb);
+ rate = clk_round_rate(cko1, 16000000);
+ clk_set_rate(cko1, rate);
+ clk_register_clkdev(cko1, NULL, "0-000a");
+put_clk:
+ if (!IS_ERR(cko1_sel))
+ clk_put(cko1_sel);
+ if (!IS_ERR(ahb))
+ clk_put(ahb);
+ if (!IS_ERR(cko1))
+ clk_put(cko1);
+}
+
+static void __init imx6q_sabrelite_init(void)
+{
+ if (IS_ENABLED(CONFIG_PHYLIB))
+ phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
+ ksz9021rn_phy_fixup);
+ imx6q_sabrelite_cko1_setup();
+}
+
static void __init imx6q_init_machine(void)
{
/*
@@ -85,8 +122,7 @@ static void __init imx6q_init_machine(void)
pinctrl_provide_dummies();
if (of_machine_is_compatible("fsl,imx6q-sabrelite"))
- phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
- ksz9021rn_phy_fixup);
+ imx6q_sabrelite_init();
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
@@ -139,6 +175,7 @@ static struct sys_timer imx6q_timer = {
static const char *imx6q_dt_compat[] __initdata = {
"fsl,imx6q-arm2",
"fsl,imx6q-sabrelite",
+ "fsl,imx6q-sabresd",
"fsl,imx6q",
NULL,
};
diff --git a/arch/arm/mach-imx/mach-mx35_3ds.c b/arch/arm/mach-imx/mach-mx35_3ds.c
index 86284bba46d3..28aa19476de7 100644
--- a/arch/arm/mach-imx/mach-mx35_3ds.c
+++ b/arch/arm/mach-imx/mach-mx35_3ds.c
@@ -98,8 +98,7 @@ static struct i2c_board_info __initdata i2c_devices_3ds[] = {
static int lcd_power_gpio = -ENXIO;
-static int mc9s08dz60_gpiochip_match(struct gpio_chip *chip,
- const void *data)
+static int mc9s08dz60_gpiochip_match(struct gpio_chip *chip, void *data)
{
return !strcmp(chip->label, data);
}
diff --git a/arch/arm/mach-imx/mach-mx51_3ds.c b/arch/arm/mach-imx/mach-mx51_3ds.c
index 83eab4176ca4..3c5b163923f6 100644
--- a/arch/arm/mach-imx/mach-mx51_3ds.c
+++ b/arch/arm/mach-imx/mach-mx51_3ds.c
@@ -175,5 +175,6 @@ MACHINE_START(MX51_3DS, "Freescale MX51 3-Stack Board")
.handle_irq = imx51_handle_irq,
.timer = &mx51_3ds_timer,
.init_machine = mx51_3ds_init,
+ .init_late = imx51_init_late,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx51_babbage.c b/arch/arm/mach-imx/mach-mx51_babbage.c
index e4b822e9f719..dde397014d4b 100644
--- a/arch/arm/mach-imx/mach-mx51_babbage.c
+++ b/arch/arm/mach-imx/mach-mx51_babbage.c
@@ -163,6 +163,12 @@ static iomux_v3_cfg_t mx51babbage_pads[] = {
MX51_PAD_CSPI1_SCLK__ECSPI1_SCLK,
MX51_PAD_CSPI1_SS0__GPIO4_24,
MX51_PAD_CSPI1_SS1__GPIO4_25,
+
+ /* Audio */
+ MX51_PAD_AUD3_BB_TXD__AUD3_TXD,
+ MX51_PAD_AUD3_BB_RXD__AUD3_RXD,
+ MX51_PAD_AUD3_BB_CK__AUD3_TXC,
+ MX51_PAD_AUD3_BB_FS__AUD3_TXFS,
};
/* Serial ports */
@@ -426,5 +432,6 @@ MACHINE_START(MX51_BABBAGE, "Freescale MX51 Babbage Board")
.handle_irq = imx51_handle_irq,
.timer = &mx51_babbage_timer,
.init_machine = mx51_babbage_init,
+ .init_late = imx51_init_late,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx51_efikamx.c b/arch/arm/mach-imx/mach-mx51_efikamx.c
index 86e96ef11f9d..8d09c0126cab 100644
--- a/arch/arm/mach-imx/mach-mx51_efikamx.c
+++ b/arch/arm/mach-imx/mach-mx51_efikamx.c
@@ -207,29 +207,32 @@ static void mx51_efikamx_power_off(void)
static int __init mx51_efikamx_power_init(void)
{
- if (machine_is_mx51_efikamx()) {
- pwgt1 = regulator_get(NULL, "pwgt1");
- pwgt2 = regulator_get(NULL, "pwgt2");
- if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
- regulator_enable(pwgt1);
- regulator_enable(pwgt2);
- }
- gpio_request(EFIKAMX_POWEROFF, "poweroff");
- pm_power_off = mx51_efikamx_power_off;
-
- /* enable coincell charger. maybe need a small power driver ? */
- coincell = regulator_get(NULL, "coincell");
- if (!IS_ERR(coincell)) {
- regulator_set_voltage(coincell, 3000000, 3000000);
- regulator_enable(coincell);
- }
-
- regulator_has_full_constraints();
+ pwgt1 = regulator_get(NULL, "pwgt1");
+ pwgt2 = regulator_get(NULL, "pwgt2");
+ if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
+ regulator_enable(pwgt1);
+ regulator_enable(pwgt2);
+ }
+ gpio_request(EFIKAMX_POWEROFF, "poweroff");
+ pm_power_off = mx51_efikamx_power_off;
+
+ /* enable coincell charger. maybe need a small power driver ? */
+ coincell = regulator_get(NULL, "coincell");
+ if (!IS_ERR(coincell)) {
+ regulator_set_voltage(coincell, 3000000, 3000000);
+ regulator_enable(coincell);
}
+ regulator_has_full_constraints();
+
return 0;
}
-late_initcall(mx51_efikamx_power_init);
+
+static void __init mx51_efikamx_init_late(void)
+{
+ imx51_init_late();
+ mx51_efikamx_power_init();
+}
static void __init mx51_efikamx_init(void)
{
@@ -292,5 +295,6 @@ MACHINE_START(MX51_EFIKAMX, "Genesi Efika MX (Smarttop)")
.handle_irq = imx51_handle_irq,
.timer = &mx51_efikamx_timer,
.init_machine = mx51_efikamx_init,
+ .init_late = mx51_efikamx_init_late,
.restart = mx51_efikamx_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx51_efikasb.c b/arch/arm/mach-imx/mach-mx51_efikasb.c
index 88f837a6cc76..fdbd181b97ef 100644
--- a/arch/arm/mach-imx/mach-mx51_efikasb.c
+++ b/arch/arm/mach-imx/mach-mx51_efikasb.c
@@ -211,22 +211,25 @@ static void mx51_efikasb_power_off(void)
static int __init mx51_efikasb_power_init(void)
{
- if (machine_is_mx51_efikasb()) {
- pwgt1 = regulator_get(NULL, "pwgt1");
- pwgt2 = regulator_get(NULL, "pwgt2");
- if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
- regulator_enable(pwgt1);
- regulator_enable(pwgt2);
- }
- gpio_request(EFIKASB_POWEROFF, "poweroff");
- pm_power_off = mx51_efikasb_power_off;
-
- regulator_has_full_constraints();
+ pwgt1 = regulator_get(NULL, "pwgt1");
+ pwgt2 = regulator_get(NULL, "pwgt2");
+ if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
+ regulator_enable(pwgt1);
+ regulator_enable(pwgt2);
}
+ gpio_request(EFIKASB_POWEROFF, "poweroff");
+ pm_power_off = mx51_efikasb_power_off;
+
+ regulator_has_full_constraints();
return 0;
}
-late_initcall(mx51_efikasb_power_init);
+
+static void __init mx51_efikasb_init_late(void)
+{
+ imx51_init_late();
+ mx51_efikasb_power_init();
+}
/* 01 R1.3 board
10 R2.0 board */
@@ -287,6 +290,7 @@ MACHINE_START(MX51_EFIKASB, "Genesi Efika MX (Smartbook)")
.init_irq = mx51_init_irq,
.handle_irq = imx51_handle_irq,
.init_machine = efikasb_board_init,
+ .init_late = mx51_efikasb_init_late,
.timer = &mx51_efikasb_timer,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c
index 10c9795934a3..0a40004154f2 100644
--- a/arch/arm/mach-imx/mach-pcm037.c
+++ b/arch/arm/mach-imx/mach-pcm037.c
@@ -694,6 +694,11 @@ static void __init pcm037_reserve(void)
MX3_CAMERA_BUF_SIZE);
}
+static void __init pcm037_init_late(void)
+{
+ pcm037_eet_init_devices();
+}
+
MACHINE_START(PCM037, "Phytec Phycore pcm037")
/* Maintainer: Pengutronix */
.atag_offset = 0x100,
@@ -704,5 +709,6 @@ MACHINE_START(PCM037, "Phytec Phycore pcm037")
.handle_irq = imx31_handle_irq,
.timer = &pcm037_timer,
.init_machine = pcm037_init,
+ .init_late = pcm037_init_late,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pcm037_eet.c b/arch/arm/mach-imx/mach-pcm037_eet.c
index 1b7606bef8f4..11ffa81ad17d 100644
--- a/arch/arm/mach-imx/mach-pcm037_eet.c
+++ b/arch/arm/mach-imx/mach-pcm037_eet.c
@@ -160,9 +160,9 @@ static const struct gpio_keys_platform_data
.rep = 0, /* No auto-repeat */
};
-static int __init eet_init_devices(void)
+int __init pcm037_eet_init_devices(void)
{
- if (!machine_is_pcm037() || pcm037_variant() != PCM037_EET)
+ if (pcm037_variant() != PCM037_EET)
return 0;
mxc_iomux_setup_multiple_pins(pcm037_eet_pins,
@@ -176,4 +176,3 @@ static int __init eet_init_devices(void)
return 0;
}
-late_initcall(eet_init_devices);
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index 9128d15b1eb7..967ed5b35a45 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -32,6 +32,10 @@
#include <mach/iomux-v3.h>
#include <mach/irqs.h>
+#include "crmregs-imx3.h"
+
+void __iomem *mx3_ccm_base;
+
static void imx3_idle(void)
{
unsigned long reg = 0;
@@ -138,6 +142,7 @@ void __init imx31_init_early(void)
mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
arch_ioremap_caller = imx3_ioremap_caller;
arm_pm_idle = imx3_idle;
+ mx3_ccm_base = MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR);
}
void __init mx31_init_irq(void)
@@ -211,6 +216,7 @@ void __init imx35_init_early(void)
mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
arm_pm_idle = imx3_idle;
arch_ioremap_caller = imx3_ioremap_caller;
+ mx3_ccm_base = MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR);
}
void __init mx35_init_irq(void)
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c
index ba91e6b31cf4..feeee17da96b 100644
--- a/arch/arm/mach-imx/mm-imx5.c
+++ b/arch/arm/mach-imx/mm-imx5.c
@@ -33,6 +33,7 @@ static void imx5_idle(void)
gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
if (IS_ERR(gpc_dvfs_clk))
return;
+ clk_prepare(gpc_dvfs_clk);
}
clk_enable(gpc_dvfs_clk);
mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
@@ -236,3 +237,8 @@ void __init imx53_soc_init(void)
platform_device_register_simple("imx31-audmux", 0, imx53_audmux_res,
ARRAY_SIZE(imx53_audmux_res));
}
+
+void __init imx51_init_late(void)
+{
+ mx51_neon_fixup();
+}
diff --git a/arch/arm/mach-imx/pcm037.h b/arch/arm/mach-imx/pcm037.h
index d6929721a5fd..7d167690e17d 100644
--- a/arch/arm/mach-imx/pcm037.h
+++ b/arch/arm/mach-imx/pcm037.h
@@ -8,4 +8,10 @@ enum pcm037_board_variant {
extern enum pcm037_board_variant pcm037_variant(void);
+#ifdef CONFIG_MACH_PCM037_EET
+int pcm037_eet_init_devices(void);
+#else
+static inline int pcm037_eet_init_devices(void) { return 0; }
+#endif
+
#endif
diff --git a/arch/arm/mach-imx/pm-imx3.c b/arch/arm/mach-imx/pm-imx3.c
index b3752439632e..822103bdb709 100644
--- a/arch/arm/mach-imx/pm-imx3.c
+++ b/arch/arm/mach-imx/pm-imx3.c
@@ -21,14 +21,14 @@
*/
void mx3_cpu_lp_set(enum mx3_cpu_pwr_mode mode)
{
- int reg = __raw_readl(MXC_CCM_CCMR);
+ int reg = __raw_readl(mx3_ccm_base + MXC_CCM_CCMR);
reg &= ~MXC_CCM_CCMR_LPM_MASK;
switch (mode) {
case MX3_WAIT:
if (cpu_is_mx35())
reg |= MXC_CCM_CCMR_LPM_WAIT_MX35;
- __raw_writel(reg, MXC_CCM_CCMR);
+ __raw_writel(reg, mx3_ccm_base + MXC_CCM_CCMR);
break;
default:
pr_err("Unknown cpu power mode: %d\n", mode);
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index ebbd7fc90eb4..a9f80943d01f 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -28,6 +28,7 @@
#include <linux/clockchips.h>
#include <linux/io.h>
#include <linux/export.h>
+#include <linux/gpio.h>
#include <mach/udc.h>
#include <mach/hardware.h>
@@ -107,7 +108,7 @@ static signed char irq2gpio[32] = {
7, 8, 9, 10, 11, 12, -1, -1,
};
-int gpio_to_irq(int gpio)
+static int ixp4xx_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
{
int irq;
@@ -117,7 +118,6 @@ int gpio_to_irq(int gpio)
}
return -EINVAL;
}
-EXPORT_SYMBOL(gpio_to_irq);
int irq_to_gpio(unsigned int irq)
{
@@ -383,12 +383,56 @@ static struct platform_device *ixp46x_devices[] __initdata = {
unsigned long ixp4xx_exp_bus_size;
EXPORT_SYMBOL(ixp4xx_exp_bus_size);
+static int ixp4xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ gpio_line_config(gpio, IXP4XX_GPIO_IN);
+
+ return 0;
+}
+
+static int ixp4xx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+ int level)
+{
+ gpio_line_set(gpio, level);
+ gpio_line_config(gpio, IXP4XX_GPIO_OUT);
+
+ return 0;
+}
+
+static int ixp4xx_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ int value;
+
+ gpio_line_get(gpio, &value);
+
+ return value;
+}
+
+static void ixp4xx_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
+ int value)
+{
+ gpio_line_set(gpio, value);
+}
+
+static struct gpio_chip ixp4xx_gpio_chip = {
+ .label = "IXP4XX_GPIO_CHIP",
+ .direction_input = ixp4xx_gpio_direction_input,
+ .direction_output = ixp4xx_gpio_direction_output,
+ .get = ixp4xx_gpio_get_value,
+ .set = ixp4xx_gpio_set_value,
+ .to_irq = ixp4xx_gpio_to_irq,
+ .base = 0,
+ .ngpio = 16,
+};
+
void __init ixp4xx_sys_init(void)
{
ixp4xx_exp_bus_size = SZ_16M;
platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices));
+ gpiochip_add(&ixp4xx_gpio_chip);
+
if (cpu_is_ixp46x()) {
int region;
diff --git a/arch/arm/mach-ixp4xx/include/mach/gpio.h b/arch/arm/mach-ixp4xx/include/mach/gpio.h
index 83d6b4ed60bb..ef37f2635b0e 100644
--- a/arch/arm/mach-ixp4xx/include/mach/gpio.h
+++ b/arch/arm/mach-ixp4xx/include/mach/gpio.h
@@ -1,79 +1,2 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/gpio.h
- *
- * IXP4XX GPIO wrappers for arch-neutral GPIO calls
- *
- * Written by Milan Svoboda <msvoboda@ra.rockwell.com>
- * Based on PXA implementation by Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ASM_ARCH_IXP4XX_GPIO_H
-#define __ASM_ARCH_IXP4XX_GPIO_H
-
-#include <linux/kernel.h>
-#include <mach/hardware.h>
-
-#define __ARM_GPIOLIB_COMPLEX
-
-static inline int gpio_request(unsigned gpio, const char *label)
-{
- return 0;
-}
-
-static inline void gpio_free(unsigned gpio)
-{
- might_sleep();
-
- return;
-}
-
-static inline int gpio_direction_input(unsigned gpio)
-{
- gpio_line_config(gpio, IXP4XX_GPIO_IN);
- return 0;
-}
-
-static inline int gpio_direction_output(unsigned gpio, int level)
-{
- gpio_line_set(gpio, level);
- gpio_line_config(gpio, IXP4XX_GPIO_OUT);
- return 0;
-}
-
-static inline int gpio_get_value(unsigned gpio)
-{
- int value;
-
- gpio_line_get(gpio, &value);
-
- return value;
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
- gpio_line_set(gpio, value);
-}
-
-#include <asm-generic/gpio.h> /* cansleep wrappers */
-
-extern int gpio_to_irq(int gpio);
-#define gpio_to_irq gpio_to_irq
-extern int irq_to_gpio(unsigned int irq);
-
-#endif
+/* empty */
diff --git a/arch/arm/mach-kirkwood/board-dreamplug.c b/arch/arm/mach-kirkwood/board-dreamplug.c
index 985453994dd3..55e357ab2923 100644
--- a/arch/arm/mach-kirkwood/board-dreamplug.c
+++ b/arch/arm/mach-kirkwood/board-dreamplug.c
@@ -27,7 +27,6 @@
#include <linux/mtd/physmap.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
index 10d1969b9e3a..edc3f8a9d45e 100644
--- a/arch/arm/mach-kirkwood/board-dt.c
+++ b/arch/arm/mach-kirkwood/board-dt.c
@@ -43,6 +43,9 @@ static void __init kirkwood_dt_init(void)
kirkwood_l2_init();
#endif
+ /* Setup root of clk tree */
+ kirkwood_clk_init();
+
/* internal devices that every board has */
kirkwood_wdt_init();
kirkwood_xor0_init();
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 3ad037385a5e..25fb3fd418ef 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -15,7 +15,8 @@
#include <linux/ata_platform.h>
#include <linux/mtd/nand.h>
#include <linux/dma-mapping.h>
-#include <linux/of.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
#include <net/dsa.h>
#include <asm/page.h>
#include <asm/timex.h>
@@ -32,6 +33,7 @@
#include <plat/common.h>
#include <plat/time.h>
#include <plat/addr-map.h>
+#include <plat/mv_xor.h>
#include "common.h"
/*****************************************************************************
@@ -61,20 +63,188 @@ void __init kirkwood_map_io(void)
iotable_init(kirkwood_io_desc, ARRAY_SIZE(kirkwood_io_desc));
}
-/*
- * Default clock control bits. Any bit _not_ set in this variable
- * will be cleared from the hardware after platform devices have been
- * registered. Some reserved bits must be set to 1.
- */
-unsigned int kirkwood_clk_ctrl = CGC_DUNIT | CGC_RESERVED;
+/*****************************************************************************
+ * CLK tree
+ ****************************************************************************/
+
+static void disable_sata0(void)
+{
+ /* Disable PLL and IVREF */
+ writel(readl(SATA0_PHY_MODE_2) & ~0xf, SATA0_PHY_MODE_2);
+ /* Disable PHY */
+ writel(readl(SATA0_IF_CTRL) | 0x200, SATA0_IF_CTRL);
+}
+
+static void disable_sata1(void)
+{
+ /* Disable PLL and IVREF */
+ writel(readl(SATA1_PHY_MODE_2) & ~0xf, SATA1_PHY_MODE_2);
+ /* Disable PHY */
+ writel(readl(SATA1_IF_CTRL) | 0x200, SATA1_IF_CTRL);
+}
+
+static void disable_pcie0(void)
+{
+ writel(readl(PCIE_LINK_CTRL) | 0x10, PCIE_LINK_CTRL);
+ while (1)
+ if (readl(PCIE_STATUS) & 0x1)
+ break;
+ writel(readl(PCIE_LINK_CTRL) & ~0x10, PCIE_LINK_CTRL);
+}
+
+static void disable_pcie1(void)
+{
+ u32 dev, rev;
+
+ kirkwood_pcie_id(&dev, &rev);
+
+ if (dev == MV88F6282_DEV_ID) {
+ writel(readl(PCIE1_LINK_CTRL) | 0x10, PCIE1_LINK_CTRL);
+ while (1)
+ if (readl(PCIE1_STATUS) & 0x1)
+ break;
+ writel(readl(PCIE1_LINK_CTRL) & ~0x10, PCIE1_LINK_CTRL);
+ }
+}
+
+/* An extended version of the gated clk. This calls fn() before
+ * disabling the clock. We use this to turn off PHYs etc. */
+struct clk_gate_fn {
+ struct clk_gate gate;
+ void (*fn)(void);
+};
+
+#define to_clk_gate_fn(_gate) container_of(_gate, struct clk_gate_fn, gate)
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+
+static void clk_gate_fn_disable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct clk_gate_fn *gate_fn = to_clk_gate_fn(gate);
+
+ if (gate_fn->fn)
+ gate_fn->fn();
+
+ clk_gate_ops.disable(hw);
+}
+
+static struct clk_ops clk_gate_fn_ops;
+
+static struct clk __init *clk_register_gate_fn(struct device *dev,
+ const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock,
+ void (*fn)(void))
+{
+ struct clk_gate_fn *gate_fn;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ gate_fn = kzalloc(sizeof(struct clk_gate_fn), GFP_KERNEL);
+ if (!gate_fn) {
+ pr_err("%s: could not allocate gated clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &clk_gate_fn_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_gate assignments */
+ gate_fn->gate.reg = reg;
+ gate_fn->gate.bit_idx = bit_idx;
+ gate_fn->gate.flags = clk_gate_flags;
+ gate_fn->gate.lock = lock;
+ gate_fn->gate.hw.init = &init;
+
+ /* ops is the gate ops, but with our disable function */
+ if (clk_gate_fn_ops.disable != clk_gate_fn_disable) {
+ clk_gate_fn_ops = clk_gate_ops;
+ clk_gate_fn_ops.disable = clk_gate_fn_disable;
+ }
+ clk = clk_register(dev, &gate_fn->gate.hw);
+
+ if (IS_ERR(clk))
+ kfree(gate_fn);
+
+ return clk;
+}
+
+static DEFINE_SPINLOCK(gating_lock);
+static struct clk *tclk;
+
+static struct clk __init *kirkwood_register_gate(const char *name, u8 bit_idx)
+{
+ return clk_register_gate(NULL, name, "tclk", 0,
+ (void __iomem *)CLOCK_GATING_CTRL,
+ bit_idx, 0, &gating_lock);
+}
+
+static struct clk __init *kirkwood_register_gate_fn(const char *name,
+ u8 bit_idx,
+ void (*fn)(void))
+{
+ return clk_register_gate_fn(NULL, name, "tclk", 0,
+ (void __iomem *)CLOCK_GATING_CTRL,
+ bit_idx, 0, &gating_lock, fn);
+}
+
+void __init kirkwood_clk_init(void)
+{
+ struct clk *runit, *ge0, *ge1, *sata0, *sata1, *usb0, *sdio;
+ struct clk *crypto, *xor0, *xor1, *pex0, *pex1, *audio;
+
+ tclk = clk_register_fixed_rate(NULL, "tclk", NULL,
+ CLK_IS_ROOT, kirkwood_tclk);
+
+ runit = kirkwood_register_gate("runit", CGC_BIT_RUNIT);
+ ge0 = kirkwood_register_gate("ge0", CGC_BIT_GE0);
+ ge1 = kirkwood_register_gate("ge1", CGC_BIT_GE1);
+ sata0 = kirkwood_register_gate_fn("sata0", CGC_BIT_SATA0,
+ disable_sata0);
+ sata1 = kirkwood_register_gate_fn("sata1", CGC_BIT_SATA1,
+ disable_sata1);
+ usb0 = kirkwood_register_gate("usb0", CGC_BIT_USB0);
+ sdio = kirkwood_register_gate("sdio", CGC_BIT_SDIO);
+ crypto = kirkwood_register_gate("crypto", CGC_BIT_CRYPTO);
+ xor0 = kirkwood_register_gate("xor0", CGC_BIT_XOR0);
+ xor1 = kirkwood_register_gate("xor1", CGC_BIT_XOR1);
+ pex0 = kirkwood_register_gate_fn("pex0", CGC_BIT_PEX0,
+ disable_pcie0);
+ pex1 = kirkwood_register_gate_fn("pex1", CGC_BIT_PEX1,
+ disable_pcie1);
+ audio = kirkwood_register_gate("audio", CGC_BIT_AUDIO);
+ kirkwood_register_gate("tdm", CGC_BIT_TDM);
+ kirkwood_register_gate("tsu", CGC_BIT_TSU);
+
+ /* clkdev entries, mapping clks to devices */
+ orion_clkdev_add(NULL, "orion_spi.0", runit);
+ orion_clkdev_add(NULL, "orion_spi.1", runit);
+ orion_clkdev_add(NULL, MV643XX_ETH_NAME ".0", ge0);
+ orion_clkdev_add(NULL, MV643XX_ETH_NAME ".1", ge1);
+ orion_clkdev_add(NULL, "orion_wdt", tclk);
+ orion_clkdev_add("0", "sata_mv.0", sata0);
+ orion_clkdev_add("1", "sata_mv.0", sata1);
+ orion_clkdev_add(NULL, "orion-ehci.0", usb0);
+ orion_clkdev_add(NULL, "orion_nand", runit);
+ orion_clkdev_add(NULL, "mvsdio", sdio);
+ orion_clkdev_add(NULL, "mv_crypto", crypto);
+ orion_clkdev_add(NULL, MV_XOR_SHARED_NAME ".0", xor0);
+ orion_clkdev_add(NULL, MV_XOR_SHARED_NAME ".1", xor1);
+ orion_clkdev_add("0", "pcie", pex0);
+ orion_clkdev_add("1", "pcie", pex1);
+ orion_clkdev_add(NULL, "kirkwood-i2s", audio);
+}
/*****************************************************************************
* EHCI0
****************************************************************************/
void __init kirkwood_ehci_init(void)
{
- kirkwood_clk_ctrl |= CGC_USB0;
orion_ehci_init(USB_PHYS_BASE, IRQ_KIRKWOOD_USB, EHCI_PHY_NA);
}
@@ -84,11 +254,9 @@ void __init kirkwood_ehci_init(void)
****************************************************************************/
void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
- kirkwood_clk_ctrl |= CGC_GE0;
-
orion_ge00_init(eth_data,
GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
- IRQ_KIRKWOOD_GE00_ERR, kirkwood_tclk);
+ IRQ_KIRKWOOD_GE00_ERR);
}
@@ -97,12 +265,9 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
****************************************************************************/
void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
{
-
- kirkwood_clk_ctrl |= CGC_GE1;
-
orion_ge01_init(eth_data,
GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
- IRQ_KIRKWOOD_GE01_ERR, kirkwood_tclk);
+ IRQ_KIRKWOOD_GE01_ERR);
}
@@ -144,7 +309,6 @@ static struct platform_device kirkwood_nand_flash = {
void __init kirkwood_nand_init(struct mtd_partition *parts, int nr_parts,
int chip_delay)
{
- kirkwood_clk_ctrl |= CGC_RUNIT;
kirkwood_nand_data.parts = parts;
kirkwood_nand_data.nr_parts = nr_parts;
kirkwood_nand_data.chip_delay = chip_delay;
@@ -154,7 +318,6 @@ void __init kirkwood_nand_init(struct mtd_partition *parts, int nr_parts,
void __init kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts,
int (*dev_ready)(struct mtd_info *))
{
- kirkwood_clk_ctrl |= CGC_RUNIT;
kirkwood_nand_data.parts = parts;
kirkwood_nand_data.nr_parts = nr_parts;
kirkwood_nand_data.dev_ready = dev_ready;
@@ -175,10 +338,6 @@ static void __init kirkwood_rtc_init(void)
****************************************************************************/
void __init kirkwood_sata_init(struct mv_sata_platform_data *sata_data)
{
- kirkwood_clk_ctrl |= CGC_SATA0;
- if (sata_data->n_ports > 1)
- kirkwood_clk_ctrl |= CGC_SATA1;
-
orion_sata_init(sata_data, SATA_PHYS_BASE, IRQ_KIRKWOOD_SATA);
}
@@ -221,7 +380,6 @@ void __init kirkwood_sdio_init(struct mvsdio_platform_data *mvsdio_data)
mvsdio_data->clock = 100000000;
else
mvsdio_data->clock = 200000000;
- kirkwood_clk_ctrl |= CGC_SDIO;
kirkwood_sdio.dev.platform_data = mvsdio_data;
platform_device_register(&kirkwood_sdio);
}
@@ -232,8 +390,7 @@ void __init kirkwood_sdio_init(struct mvsdio_platform_data *mvsdio_data)
****************************************************************************/
void __init kirkwood_spi_init()
{
- kirkwood_clk_ctrl |= CGC_RUNIT;
- orion_spi_init(SPI_PHYS_BASE, kirkwood_tclk);
+ orion_spi_init(SPI_PHYS_BASE);
}
@@ -253,7 +410,7 @@ void __init kirkwood_i2c_init(void)
void __init kirkwood_uart0_init(void)
{
orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
- IRQ_KIRKWOOD_UART_0, kirkwood_tclk);
+ IRQ_KIRKWOOD_UART_0, tclk);
}
@@ -263,7 +420,7 @@ void __init kirkwood_uart0_init(void)
void __init kirkwood_uart1_init(void)
{
orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
- IRQ_KIRKWOOD_UART_1, kirkwood_tclk);
+ IRQ_KIRKWOOD_UART_1, tclk);
}
/*****************************************************************************
@@ -271,7 +428,6 @@ void __init kirkwood_uart1_init(void)
****************************************************************************/
void __init kirkwood_crypto_init(void)
{
- kirkwood_clk_ctrl |= CGC_CRYPTO;
orion_crypto_init(CRYPTO_PHYS_BASE, KIRKWOOD_SRAM_PHYS_BASE,
KIRKWOOD_SRAM_SIZE, IRQ_KIRKWOOD_CRYPTO);
}
@@ -282,8 +438,6 @@ void __init kirkwood_crypto_init(void)
****************************************************************************/
void __init kirkwood_xor0_init(void)
{
- kirkwood_clk_ctrl |= CGC_XOR0;
-
orion_xor0_init(XOR0_PHYS_BASE, XOR0_HIGH_PHYS_BASE,
IRQ_KIRKWOOD_XOR_00, IRQ_KIRKWOOD_XOR_01);
}
@@ -294,8 +448,6 @@ void __init kirkwood_xor0_init(void)
****************************************************************************/
void __init kirkwood_xor1_init(void)
{
- kirkwood_clk_ctrl |= CGC_XOR1;
-
orion_xor1_init(XOR1_PHYS_BASE, XOR1_HIGH_PHYS_BASE,
IRQ_KIRKWOOD_XOR_10, IRQ_KIRKWOOD_XOR_11);
}
@@ -306,7 +458,7 @@ void __init kirkwood_xor1_init(void)
****************************************************************************/
void __init kirkwood_wdt_init(void)
{
- orion_wdt_init(kirkwood_tclk);
+ orion_wdt_init();
}
@@ -382,7 +534,6 @@ static struct platform_device kirkwood_pcm_device = {
void __init kirkwood_audio_init(void)
{
- kirkwood_clk_ctrl |= CGC_AUDIO;
platform_device_register(&kirkwood_i2s_device);
platform_device_register(&kirkwood_pcm_device);
}
@@ -466,6 +617,9 @@ void __init kirkwood_init(void)
kirkwood_l2_init();
#endif
+ /* Setup root of clk tree */
+ kirkwood_clk_init();
+
/* internal devices that every board has */
kirkwood_rtc_init();
kirkwood_wdt_init();
@@ -478,72 +632,6 @@ void __init kirkwood_init(void)
#endif
}
-static int __init kirkwood_clock_gate(void)
-{
- unsigned int curr = readl(CLOCK_GATING_CTRL);
- u32 dev, rev;
-
-#ifdef CONFIG_OF
- struct device_node *np;
-#endif
- kirkwood_pcie_id(&dev, &rev);
- printk(KERN_DEBUG "Gating clock of unused units\n");
- printk(KERN_DEBUG "before: 0x%08x\n", curr);
-
- /* Make sure those units are accessible */
- writel(curr | CGC_SATA0 | CGC_SATA1 | CGC_PEX0 | CGC_PEX1, CLOCK_GATING_CTRL);
-
-#ifdef CONFIG_OF
- np = of_find_compatible_node(NULL, NULL, "mrvl,orion-nand");
- if (np && of_device_is_available(np)) {
- kirkwood_clk_ctrl |= CGC_RUNIT;
- of_node_put(np);
- }
-#endif
-
- /* For SATA: first shutdown the phy */
- if (!(kirkwood_clk_ctrl & CGC_SATA0)) {
- /* Disable PLL and IVREF */
- writel(readl(SATA0_PHY_MODE_2) & ~0xf, SATA0_PHY_MODE_2);
- /* Disable PHY */
- writel(readl(SATA0_IF_CTRL) | 0x200, SATA0_IF_CTRL);
- }
- if (!(kirkwood_clk_ctrl & CGC_SATA1)) {
- /* Disable PLL and IVREF */
- writel(readl(SATA1_PHY_MODE_2) & ~0xf, SATA1_PHY_MODE_2);
- /* Disable PHY */
- writel(readl(SATA1_IF_CTRL) | 0x200, SATA1_IF_CTRL);
- }
-
- /* For PCIe: first shutdown the phy */
- if (!(kirkwood_clk_ctrl & CGC_PEX0)) {
- writel(readl(PCIE_LINK_CTRL) | 0x10, PCIE_LINK_CTRL);
- while (1)
- if (readl(PCIE_STATUS) & 0x1)
- break;
- writel(readl(PCIE_LINK_CTRL) & ~0x10, PCIE_LINK_CTRL);
- }
-
- /* For PCIe 1: first shutdown the phy */
- if (dev == MV88F6282_DEV_ID) {
- if (!(kirkwood_clk_ctrl & CGC_PEX1)) {
- writel(readl(PCIE1_LINK_CTRL) | 0x10, PCIE1_LINK_CTRL);
- while (1)
- if (readl(PCIE1_STATUS) & 0x1)
- break;
- writel(readl(PCIE1_LINK_CTRL) & ~0x10, PCIE1_LINK_CTRL);
- }
- } else /* keep this bit set for devices that don't have PCIe1 */
- kirkwood_clk_ctrl |= CGC_PEX1;
-
- /* Now gate clock the required units */
- writel(kirkwood_clk_ctrl, CLOCK_GATING_CTRL);
- printk(KERN_DEBUG " after: 0x%08x\n", readl(CLOCK_GATING_CTRL));
-
- return 0;
-}
-late_initcall(kirkwood_clock_gate);
-
void kirkwood_restart(char mode, const char *cmd)
{
/*
diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h
index a34c41a5172e..9248fa2c165b 100644
--- a/arch/arm/mach-kirkwood/common.h
+++ b/arch/arm/mach-kirkwood/common.h
@@ -50,6 +50,7 @@ void kirkwood_nand_init(struct mtd_partition *parts, int nr_parts, int delay);
void kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts, int (*dev_ready)(struct mtd_info *));
void kirkwood_audio_init(void);
void kirkwood_restart(char, const char *);
+void kirkwood_clk_init(void);
/* board init functions for boards not fully converted to fdt */
#ifdef CONFIG_MACH_DREAMPLUG_DT
diff --git a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
index 957bd7997d7e..3eee37a3b501 100644
--- a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
+++ b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
@@ -43,6 +43,22 @@
#define L2_WRITETHROUGH 0x00000010
#define CLOCK_GATING_CTRL (BRIDGE_VIRT_BASE | 0x11c)
+#define CGC_BIT_GE0 (0)
+#define CGC_BIT_PEX0 (2)
+#define CGC_BIT_USB0 (3)
+#define CGC_BIT_SDIO (4)
+#define CGC_BIT_TSU (5)
+#define CGC_BIT_DUNIT (6)
+#define CGC_BIT_RUNIT (7)
+#define CGC_BIT_XOR0 (8)
+#define CGC_BIT_AUDIO (9)
+#define CGC_BIT_SATA0 (14)
+#define CGC_BIT_SATA1 (15)
+#define CGC_BIT_XOR1 (16)
+#define CGC_BIT_CRYPTO (17)
+#define CGC_BIT_PEX1 (18)
+#define CGC_BIT_GE1 (19)
+#define CGC_BIT_TDM (20)
#define CGC_GE0 (1 << 0)
#define CGC_PEX0 (1 << 2)
#define CGC_USB0 (1 << 3)
diff --git a/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c b/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
index 85f6169c2484..6d8364a97810 100644
--- a/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
+++ b/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
@@ -23,7 +23,6 @@
#include <linux/gpio_keys.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <net/dsa.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
index de373176ee67..6e8b2efa3c35 100644
--- a/arch/arm/mach-kirkwood/pcie.c
+++ b/arch/arm/mach-kirkwood/pcie.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
+#include <linux/clk.h>
#include <video/vga.h>
#include <asm/irq.h>
#include <asm/mach/pci.h>
@@ -19,6 +20,23 @@
#include <plat/addr-map.h>
#include "common.h"
+static void kirkwood_enable_pcie_clk(const char *port)
+{
+ struct clk *clk;
+
+ clk = clk_get_sys("pcie", port);
+ if (IS_ERR(clk)) {
+ printk(KERN_ERR "PCIE clock %s missing\n", port);
+ return;
+ }
+ clk_prepare_enable(clk);
+ clk_put(clk);
+}
+
+/* This function is called very early in the boot when probing the
+ hardware to determine what we actually are, and what rate tclk is
+ ticking at. Hence calling kirkwood_enable_pcie_clk() is not
+ possible since the clk tree has not been created yet. */
void kirkwood_enable_pcie(void)
{
u32 curr = readl(CLOCK_GATING_CTRL);
@@ -26,7 +44,7 @@ void kirkwood_enable_pcie(void)
writel(curr | CGC_PEX0, CLOCK_GATING_CTRL);
}
-void __init kirkwood_pcie_id(u32 *dev, u32 *rev)
+void kirkwood_pcie_id(u32 *dev, u32 *rev)
{
kirkwood_enable_pcie();
*dev = orion_pcie_dev_id((void __iomem *)PCIE_VIRT_BASE);
@@ -159,7 +177,6 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp)
static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys)
{
- extern unsigned int kirkwood_clk_ctrl;
struct pcie_port *pp;
int index;
@@ -178,11 +195,11 @@ static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys)
switch (index) {
case 0:
- kirkwood_clk_ctrl |= CGC_PEX0;
+ kirkwood_enable_pcie_clk("0");
pcie0_ioresources_init(pp);
break;
case 1:
- kirkwood_clk_ctrl |= CGC_PEX1;
+ kirkwood_enable_pcie_clk("1");
pcie1_ioresources_init(pp);
break;
default:
diff --git a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
index fd2c9c8b6831..f742a66a7045 100644
--- a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
@@ -16,7 +16,6 @@
#include <linux/gpio.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/kirkwood.h>
diff --git a/arch/arm/mach-kirkwood/t5325-setup.c b/arch/arm/mach-kirkwood/t5325-setup.c
index f9d2a11b7f96..bad738e44044 100644
--- a/arch/arm/mach-kirkwood/t5325-setup.c
+++ b/arch/arm/mach-kirkwood/t5325-setup.c
@@ -16,7 +16,6 @@
#include <linux/mtd/physmap.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <linux/i2c.h>
#include <linux/mv643xx_eth.h>
#include <linux/ata_platform.h>
diff --git a/arch/arm/mach-kirkwood/tsx1x-common.c b/arch/arm/mach-kirkwood/tsx1x-common.c
index 24294b2bc469..8943ede29b44 100644
--- a/arch/arm/mach-kirkwood/tsx1x-common.c
+++ b/arch/arm/mach-kirkwood/tsx1x-common.c
@@ -4,7 +4,6 @@
#include <linux/mtd/physmap.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <linux/serial_reg.h>
#include <mach/kirkwood.h>
#include "common.h"
diff --git a/arch/arm/mach-lpc32xx/include/mach/gpio.h b/arch/arm/mach-lpc32xx/include/mach/gpio.h
index 40a8c178f10d..2ba6ca412bef 100644
--- a/arch/arm/mach-lpc32xx/include/mach/gpio.h
+++ b/arch/arm/mach-lpc32xx/include/mach/gpio.h
@@ -1 +1,8 @@
-/* empty */
+#ifndef __MACH_GPIO_H
+#define __MACH_GPIO_H
+
+#include "gpio-lpc32xx.h"
+
+#define ARCH_NR_GPIOS (LPC32XX_GPO_P3_GRP + LPC32XX_GPO_P3_MAX)
+
+#endif /* __MACH_GPIO_H */
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index 26aac363a064..4fa3e99d9a62 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -94,6 +94,11 @@ static void __init halibut_map_io(void)
msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a);
}
+static void __init halibut_init_late(void)
+{
+ smd_debugfs_init();
+}
+
MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
.atag_offset = 0x100,
.fixup = halibut_fixup,
@@ -101,5 +106,6 @@ MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
.init_early = halibut_init_early,
.init_irq = halibut_init_irq,
.init_machine = halibut_init,
+ .init_late = halibut_init_late,
.timer = &msm_timer,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-mahimahi.c b/arch/arm/mach-msm/board-mahimahi.c
index 5a4882fc6f7a..cf1f89a5dc62 100644
--- a/arch/arm/mach-msm/board-mahimahi.c
+++ b/arch/arm/mach-msm/board-mahimahi.c
@@ -71,6 +71,11 @@ static void __init mahimahi_map_io(void)
msm_clock_init();
}
+static void __init mahimahi_init_late(void)
+{
+ smd_debugfs_init();
+}
+
extern struct sys_timer msm_timer;
MACHINE_START(MAHIMAHI, "mahimahi")
@@ -79,5 +84,6 @@ MACHINE_START(MAHIMAHI, "mahimahi")
.map_io = mahimahi_map_io,
.init_irq = msm_init_irq,
.init_machine = mahimahi_init,
+ .init_late = mahimahi_init_late,
.timer = &msm_timer,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm7x27.c b/arch/arm/mach-msm/board-msm7x27.c
index 6d84ee740df4..451ab1d43c92 100644
--- a/arch/arm/mach-msm/board-msm7x27.c
+++ b/arch/arm/mach-msm/board-msm7x27.c
@@ -128,11 +128,17 @@ static void __init msm7x2x_map_io(void)
#endif
}
+static void __init msm7x2x_init_late(void)
+{
+ smd_debugfs_init();
+}
+
MACHINE_START(MSM7X27_SURF, "QCT MSM7x27 SURF")
.atag_offset = 0x100,
.map_io = msm7x2x_map_io,
.init_irq = msm7x2x_init_irq,
.init_machine = msm7x2x_init,
+ .init_late = msm7x2x_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -141,6 +147,7 @@ MACHINE_START(MSM7X27_FFA, "QCT MSM7x27 FFA")
.map_io = msm7x2x_map_io,
.init_irq = msm7x2x_init_irq,
.init_machine = msm7x2x_init,
+ .init_late = msm7x2x_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -149,6 +156,7 @@ MACHINE_START(MSM7X25_SURF, "QCT MSM7x25 SURF")
.map_io = msm7x2x_map_io,
.init_irq = msm7x2x_init_irq,
.init_machine = msm7x2x_init,
+ .init_late = msm7x2x_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -157,5 +165,6 @@ MACHINE_START(MSM7X25_FFA, "QCT MSM7x25 FFA")
.map_io = msm7x2x_map_io,
.init_irq = msm7x2x_init_irq,
.init_machine = msm7x2x_init,
+ .init_late = msm7x2x_init_late,
.timer = &msm_timer,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
index 75b3cfcada6d..a5001378135d 100644
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -119,6 +119,11 @@ static void __init msm7x30_map_io(void)
msm_clock_init(msm_clocks_7x30, msm_num_clocks_7x30);
}
+static void __init msm7x30_init_late(void)
+{
+ smd_debugfs_init();
+}
+
MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
.atag_offset = 0x100,
.fixup = msm7x30_fixup,
@@ -126,6 +131,7 @@ MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
+ .init_late = msm7x30_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -136,6 +142,7 @@ MACHINE_START(MSM7X30_FFA, "QCT MSM7X30 FFA")
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
+ .init_late = msm7x30_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -146,5 +153,6 @@ MACHINE_START(MSM7X30_FLUID, "QCT MSM7X30 FLUID")
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
+ .init_late = msm7x30_init_late,
.timer = &msm_timer,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm8960.c b/arch/arm/mach-msm/board-msm8960.c
index ed3598128530..65f4a1daa2e5 100644
--- a/arch/arm/mach-msm/board-msm8960.c
+++ b/arch/arm/mach-msm/board-msm8960.c
@@ -93,6 +93,11 @@ static void __init msm8960_rumi3_init(void)
platform_add_devices(rumi3_devices, ARRAY_SIZE(rumi3_devices));
}
+static void __init msm8960_init_late(void)
+{
+ smd_debugfs_init();
+}
+
MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
.fixup = msm8960_fixup,
.reserve = msm8960_reserve,
@@ -101,6 +106,7 @@ MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
.timer = &msm_timer,
.handle_irq = gic_handle_irq,
.init_machine = msm8960_sim_init,
+ .init_late = msm8960_init_late,
MACHINE_END
MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
@@ -111,5 +117,6 @@ MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
.timer = &msm_timer,
.handle_irq = gic_handle_irq,
.init_machine = msm8960_rumi3_init,
+ .init_late = msm8960_init_late,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index fb3496a52ef4..e37a724cd1eb 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -81,6 +81,11 @@ static void __init msm8x60_init(void)
{
}
+static void __init msm8x60_init_late(void)
+{
+ smd_debugfs_init();
+}
+
#ifdef CONFIG_OF
static struct of_dev_auxdata msm_auxdata_lookup[] __initdata = {
{}
@@ -111,6 +116,7 @@ MACHINE_START(MSM8X60_RUMI3, "QCT MSM8X60 RUMI3")
.init_irq = msm8x60_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = msm8x60_init,
+ .init_late = msm8x60_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -121,6 +127,7 @@ MACHINE_START(MSM8X60_SURF, "QCT MSM8X60 SURF")
.init_irq = msm8x60_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = msm8x60_init,
+ .init_late = msm8x60_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -131,6 +138,7 @@ MACHINE_START(MSM8X60_SIM, "QCT MSM8X60 SIMULATOR")
.init_irq = msm8x60_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = msm8x60_init,
+ .init_late = msm8x60_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -141,6 +149,7 @@ MACHINE_START(MSM8X60_FFA, "QCT MSM8X60 FFA")
.init_irq = msm8x60_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = msm8x60_init,
+ .init_late = msm8x60_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -150,6 +159,7 @@ DT_MACHINE_START(MSM_DT, "Qualcomm MSM (Flattened Device Tree)")
.map_io = msm8x60_map_io,
.init_irq = msm8x60_init_irq,
.init_machine = msm8x60_dt_init,
+ .init_late = msm8x60_init_late,
.timer = &msm_timer,
.dt_compat = msm8x60_fluid_match,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
index fbaa4ed95a3c..c8fe0edb9761 100644
--- a/arch/arm/mach-msm/board-qsd8x50.c
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -190,11 +190,17 @@ static void __init qsd8x50_init(void)
qsd8x50_init_mmc();
}
+static void __init qsd8x50_init_late(void)
+{
+ smd_debugfs_init();
+}
+
MACHINE_START(QSD8X50_SURF, "QCT QSD8X50 SURF")
.atag_offset = 0x100,
.map_io = qsd8x50_map_io,
.init_irq = qsd8x50_init_irq,
.init_machine = qsd8x50_init,
+ .init_late = qsd8x50_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -203,5 +209,6 @@ MACHINE_START(QSD8X50A_ST1_5, "QCT QSD8X50A ST1.5")
.map_io = qsd8x50_map_io,
.init_irq = qsd8x50_init_irq,
.init_machine = qsd8x50_init,
+ .init_late = qsd8x50_init_late,
.timer = &msm_timer,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c
index 4a8ea0d40b6f..2e569ab10eef 100644
--- a/arch/arm/mach-msm/board-sapphire.c
+++ b/arch/arm/mach-msm/board-sapphire.c
@@ -101,6 +101,11 @@ static void __init sapphire_map_io(void)
msm_clock_init();
}
+static void __init sapphire_init_late(void)
+{
+ smd_debugfs_init();
+}
+
MACHINE_START(SAPPHIRE, "sapphire")
/* Maintainer: Brian Swetland <swetland@google.com> */
.atag_offset = 0x100,
@@ -108,5 +113,6 @@ MACHINE_START(SAPPHIRE, "sapphire")
.map_io = sapphire_map_io,
.init_irq = sapphire_init_irq,
.init_machine = sapphire_init,
+ .init_late = sapphire_init_late,
.timer = &msm_timer,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c
index d4060a37e23d..bbe13f12fa01 100644
--- a/arch/arm/mach-msm/board-trout.c
+++ b/arch/arm/mach-msm/board-trout.c
@@ -98,6 +98,11 @@ static void __init trout_map_io(void)
msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a);
}
+static void __init trout_init_late(void)
+{
+ smd_debugfs_init();
+}
+
MACHINE_START(TROUT, "HTC Dream")
.atag_offset = 0x100,
.fixup = trout_fixup,
@@ -105,5 +110,6 @@ MACHINE_START(TROUT, "HTC Dream")
.init_early = trout_init_early,
.init_irq = trout_init_irq,
.init_machine = trout_init,
+ .init_late = trout_init_late,
.timer = &msm_timer,
MACHINE_END
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 2ce8f1f2fc4d..435f8edfafd1 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -47,4 +47,10 @@ int __init msm_add_sdcc(unsigned int controller,
struct msm_mmc_platform_data *plat,
unsigned int stat_irq, unsigned long stat_irq_flags);
+#if defined(CONFIG_MSM_SMD) && defined(CONFIG_DEBUG_FS)
+int smd_debugfs_init(void);
+#else
+static inline int smd_debugfs_init(void) { return 0; }
+#endif
+
#endif
diff --git a/arch/arm/mach-msm/smd_debug.c b/arch/arm/mach-msm/smd_debug.c
index c56df9e932ae..8056b3e5590f 100644
--- a/arch/arm/mach-msm/smd_debug.c
+++ b/arch/arm/mach-msm/smd_debug.c
@@ -216,7 +216,7 @@ static void debug_create(const char *name, umode_t mode,
debugfs_create_file(name, mode, dent, fill, &debug_ops);
}
-static int smd_debugfs_init(void)
+int __init smd_debugfs_init(void)
{
struct dentry *dent;
@@ -234,7 +234,6 @@ static int smd_debugfs_init(void)
return 0;
}
-late_initcall(smd_debugfs_init);
#endif
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index a5dcf766a3f9..b4c53b846c9c 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/ata_platform.h>
+#include <linux/clk-provider.h>
#include <linux/ethtool.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
@@ -103,24 +104,24 @@ static void get_pclk_l2clk(int hclk, int core_index, int *pclk, int *l2clk)
static int get_tclk(void)
{
- int tclk;
+ int tclk_freq;
/*
* TCLK tick rate is configured by DEV_A[2:0] strap pins.
*/
switch ((readl(SAMPLE_AT_RESET_HIGH) >> 6) & 7) {
case 1:
- tclk = 166666667;
+ tclk_freq = 166666667;
break;
case 3:
- tclk = 200000000;
+ tclk_freq = 200000000;
break;
default:
panic("unknown TCLK PLL setting: %.8x\n",
readl(SAMPLE_AT_RESET_HIGH));
}
- return tclk;
+ return tclk_freq;
}
@@ -166,6 +167,19 @@ void __init mv78xx0_map_io(void)
/*****************************************************************************
+ * CLK tree
+ ****************************************************************************/
+static struct clk *tclk;
+
+static void __init clk_init(void)
+{
+ tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
+ get_tclk());
+
+ orion_clkdev_init(tclk);
+}
+
+/*****************************************************************************
* EHCI
****************************************************************************/
void __init mv78xx0_ehci0_init(void)
@@ -199,7 +213,7 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data,
GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM,
- IRQ_MV78XX0_GE_ERR, get_tclk());
+ IRQ_MV78XX0_GE_ERR);
}
@@ -210,7 +224,7 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge01_init(eth_data,
GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
- NO_IRQ, get_tclk());
+ NO_IRQ);
}
@@ -234,7 +248,7 @@ void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data)
orion_ge10_init(eth_data,
GE10_PHYS_BASE, IRQ_MV78XX0_GE10_SUM,
- NO_IRQ, get_tclk());
+ NO_IRQ);
}
@@ -258,7 +272,7 @@ void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data)
orion_ge11_init(eth_data,
GE11_PHYS_BASE, IRQ_MV78XX0_GE11_SUM,
- NO_IRQ, get_tclk());
+ NO_IRQ);
}
/*****************************************************************************
@@ -285,7 +299,7 @@ void __init mv78xx0_sata_init(struct mv_sata_platform_data *sata_data)
void __init mv78xx0_uart0_init(void)
{
orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
- IRQ_MV78XX0_UART_0, get_tclk());
+ IRQ_MV78XX0_UART_0, tclk);
}
@@ -295,7 +309,7 @@ void __init mv78xx0_uart0_init(void)
void __init mv78xx0_uart1_init(void)
{
orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
- IRQ_MV78XX0_UART_1, get_tclk());
+ IRQ_MV78XX0_UART_1, tclk);
}
@@ -305,7 +319,7 @@ void __init mv78xx0_uart1_init(void)
void __init mv78xx0_uart2_init(void)
{
orion_uart2_init(UART2_VIRT_BASE, UART2_PHYS_BASE,
- IRQ_MV78XX0_UART_2, get_tclk());
+ IRQ_MV78XX0_UART_2, tclk);
}
/*****************************************************************************
@@ -314,7 +328,7 @@ void __init mv78xx0_uart2_init(void)
void __init mv78xx0_uart3_init(void)
{
orion_uart3_init(UART3_VIRT_BASE, UART3_PHYS_BASE,
- IRQ_MV78XX0_UART_3, get_tclk());
+ IRQ_MV78XX0_UART_3, tclk);
}
/*****************************************************************************
@@ -378,25 +392,26 @@ void __init mv78xx0_init(void)
int hclk;
int pclk;
int l2clk;
- int tclk;
core_index = mv78xx0_core_index();
hclk = get_hclk();
get_pclk_l2clk(hclk, core_index, &pclk, &l2clk);
- tclk = get_tclk();
printk(KERN_INFO "%s ", mv78xx0_id());
printk("core #%d, ", core_index);
printk("PCLK = %dMHz, ", (pclk + 499999) / 1000000);
printk("L2 = %dMHz, ", (l2clk + 499999) / 1000000);
printk("HCLK = %dMHz, ", (hclk + 499999) / 1000000);
- printk("TCLK = %dMHz\n", (tclk + 499999) / 1000000);
+ printk("TCLK = %dMHz\n", (get_tclk() + 499999) / 1000000);
mv78xx0_setup_cpu_mbus();
#ifdef CONFIG_CACHE_FEROCEON_L2
feroceon_l2_init(is_l2_writethrough());
#endif
+
+ /* Setup root of clk tree */
+ clk_init();
}
void mv78xx0_restart(char mode, const char *cmd)
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig
index 07d5383d68ee..91cf0625819c 100644
--- a/arch/arm/mach-mxs/Kconfig
+++ b/arch/arm/mach-mxs/Kconfig
@@ -7,18 +7,28 @@ config MXS_OCOTP
config SOC_IMX23
bool
+ select ARM_AMBA
select CPU_ARM926T
select HAVE_PWM
select PINCTRL_IMX23
config SOC_IMX28
bool
+ select ARM_AMBA
select CPU_ARM926T
select HAVE_PWM
select PINCTRL_IMX28
comment "MXS platforms:"
+config MACH_MXS_DT
+ bool "Support MXS platforms from device tree"
+ select SOC_IMX23
+ select SOC_IMX28
+ help
+ Include support for Freescale MXS platforms(i.MX23 and i.MX28)
+ using the device tree for discovery
+
config MACH_STMP378X_DEVB
bool "Support STMP378x_devb Platform"
select SOC_IMX23
diff --git a/arch/arm/mach-mxs/Makefile b/arch/arm/mach-mxs/Makefile
index 908bf9a567f1..e41590ccb437 100644
--- a/arch/arm/mach-mxs/Makefile
+++ b/arch/arm/mach-mxs/Makefile
@@ -1,12 +1,10 @@
# Common support
-obj-y := clock.o devices.o icoll.o iomux.o system.o timer.o mm.o
+obj-y := devices.o icoll.o iomux.o system.o timer.o mm.o
obj-$(CONFIG_MXS_OCOTP) += ocotp.o
obj-$(CONFIG_PM) += pm.o
-obj-$(CONFIG_SOC_IMX23) += clock-mx23.o
-obj-$(CONFIG_SOC_IMX28) += clock-mx28.o
-
+obj-$(CONFIG_MACH_MXS_DT) += mach-mxs.o
obj-$(CONFIG_MACH_STMP378X_DEVB) += mach-stmp378x_devb.o
obj-$(CONFIG_MACH_MX23EVK) += mach-mx23evk.o
obj-$(CONFIG_MACH_MX28EVK) += mach-mx28evk.o
diff --git a/arch/arm/mach-mxs/clock-mx23.c b/arch/arm/mach-mxs/clock-mx23.c
deleted file mode 100644
index e3ac52c34019..000000000000
--- a/arch/arm/mach-mxs/clock-mx23.c
+++ /dev/null
@@ -1,536 +0,0 @@
-/*
- * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/clkdev.h>
-
-#include <asm/clkdev.h>
-#include <asm/div64.h>
-
-#include <mach/mx23.h>
-#include <mach/common.h>
-#include <mach/clock.h>
-
-#include "regs-clkctrl-mx23.h"
-
-#define CLKCTRL_BASE_ADDR MX23_IO_ADDRESS(MX23_CLKCTRL_BASE_ADDR)
-#define DIGCTRL_BASE_ADDR MX23_IO_ADDRESS(MX23_DIGCTL_BASE_ADDR)
-
-#define PARENT_RATE_SHIFT 8
-
-static int _raw_clk_enable(struct clk *clk)
-{
- u32 reg;
-
- if (clk->enable_reg) {
- reg = __raw_readl(clk->enable_reg);
- reg &= ~(1 << clk->enable_shift);
- __raw_writel(reg, clk->enable_reg);
- }
-
- return 0;
-}
-
-static void _raw_clk_disable(struct clk *clk)
-{
- u32 reg;
-
- if (clk->enable_reg) {
- reg = __raw_readl(clk->enable_reg);
- reg |= 1 << clk->enable_shift;
- __raw_writel(reg, clk->enable_reg);
- }
-}
-
-/*
- * ref_xtal_clk
- */
-static unsigned long ref_xtal_clk_get_rate(struct clk *clk)
-{
- return 24000000;
-}
-
-static struct clk ref_xtal_clk = {
- .get_rate = ref_xtal_clk_get_rate,
-};
-
-/*
- * pll_clk
- */
-static unsigned long pll_clk_get_rate(struct clk *clk)
-{
- return 480000000;
-}
-
-static int pll_clk_enable(struct clk *clk)
-{
- __raw_writel(BM_CLKCTRL_PLLCTRL0_POWER |
- BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_PLLCTRL0_SET);
-
- /* Only a 10us delay is need. PLLCTRL1 LOCK bitfied is only a timer
- * and is incorrect (excessive). Per definition of the PLLCTRL0
- * POWER field, waiting at least 10us.
- */
- udelay(10);
-
- return 0;
-}
-
-static void pll_clk_disable(struct clk *clk)
-{
- __raw_writel(BM_CLKCTRL_PLLCTRL0_POWER |
- BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_PLLCTRL0_CLR);
-}
-
-static struct clk pll_clk = {
- .get_rate = pll_clk_get_rate,
- .enable = pll_clk_enable,
- .disable = pll_clk_disable,
- .parent = &ref_xtal_clk,
-};
-
-/*
- * ref_clk
- */
-#define _CLK_GET_RATE_REF(name, sr, ss) \
-static unsigned long name##_get_rate(struct clk *clk) \
-{ \
- unsigned long parent_rate; \
- u32 reg, div; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##sr); \
- div = (reg >> BP_CLKCTRL_##sr##_##ss##FRAC) & 0x3f; \
- parent_rate = clk_get_rate(clk->parent); \
- \
- return SH_DIV((parent_rate >> PARENT_RATE_SHIFT) * 18, \
- div, PARENT_RATE_SHIFT); \
-}
-
-_CLK_GET_RATE_REF(ref_cpu_clk, FRAC, CPU)
-_CLK_GET_RATE_REF(ref_emi_clk, FRAC, EMI)
-_CLK_GET_RATE_REF(ref_pix_clk, FRAC, PIX)
-_CLK_GET_RATE_REF(ref_io_clk, FRAC, IO)
-
-#define _DEFINE_CLOCK_REF(name, er, es) \
- static struct clk name = { \
- .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er, \
- .enable_shift = BP_CLKCTRL_##er##_CLKGATE##es, \
- .get_rate = name##_get_rate, \
- .enable = _raw_clk_enable, \
- .disable = _raw_clk_disable, \
- .parent = &pll_clk, \
- }
-
-_DEFINE_CLOCK_REF(ref_cpu_clk, FRAC, CPU);
-_DEFINE_CLOCK_REF(ref_emi_clk, FRAC, EMI);
-_DEFINE_CLOCK_REF(ref_pix_clk, FRAC, PIX);
-_DEFINE_CLOCK_REF(ref_io_clk, FRAC, IO);
-
-/*
- * General clocks
- *
- * clk_get_rate
- */
-static unsigned long rtc_clk_get_rate(struct clk *clk)
-{
- /* ref_xtal_clk is implemented as the only parent */
- return clk_get_rate(clk->parent) / 768;
-}
-
-static unsigned long clk32k_clk_get_rate(struct clk *clk)
-{
- return clk->parent->get_rate(clk->parent) / 750;
-}
-
-#define _CLK_GET_RATE(name, rs) \
-static unsigned long name##_get_rate(struct clk *clk) \
-{ \
- u32 reg, div; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
- \
- if (clk->parent == &ref_xtal_clk) \
- div = (reg & BM_CLKCTRL_##rs##_DIV_XTAL) >> \
- BP_CLKCTRL_##rs##_DIV_XTAL; \
- else \
- div = (reg & BM_CLKCTRL_##rs##_DIV_##rs) >> \
- BP_CLKCTRL_##rs##_DIV_##rs; \
- \
- if (!div) \
- return -EINVAL; \
- \
- return clk_get_rate(clk->parent) / div; \
-}
-
-_CLK_GET_RATE(cpu_clk, CPU)
-_CLK_GET_RATE(emi_clk, EMI)
-
-#define _CLK_GET_RATE1(name, rs) \
-static unsigned long name##_get_rate(struct clk *clk) \
-{ \
- u32 reg, div; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
- div = (reg & BM_CLKCTRL_##rs##_DIV) >> BP_CLKCTRL_##rs##_DIV; \
- \
- if (!div) \
- return -EINVAL; \
- \
- return clk_get_rate(clk->parent) / div; \
-}
-
-_CLK_GET_RATE1(hbus_clk, HBUS)
-_CLK_GET_RATE1(xbus_clk, XBUS)
-_CLK_GET_RATE1(ssp_clk, SSP)
-_CLK_GET_RATE1(gpmi_clk, GPMI)
-_CLK_GET_RATE1(lcdif_clk, PIX)
-
-#define _CLK_GET_RATE_STUB(name) \
-static unsigned long name##_get_rate(struct clk *clk) \
-{ \
- return clk_get_rate(clk->parent); \
-}
-
-_CLK_GET_RATE_STUB(uart_clk)
-_CLK_GET_RATE_STUB(audio_clk)
-_CLK_GET_RATE_STUB(pwm_clk)
-
-/*
- * clk_set_rate
- */
-static int cpu_clk_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg, bm_busy, div_max, d, f, div, frac;
- unsigned long diff, parent_rate, calc_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (clk->parent == &ref_xtal_clk) {
- div_max = BM_CLKCTRL_CPU_DIV_XTAL >> BP_CLKCTRL_CPU_DIV_XTAL;
- bm_busy = BM_CLKCTRL_CPU_BUSY_REF_XTAL;
- div = DIV_ROUND_UP(parent_rate, rate);
- if (div == 0 || div > div_max)
- return -EINVAL;
- } else {
- div_max = BM_CLKCTRL_CPU_DIV_CPU >> BP_CLKCTRL_CPU_DIV_CPU;
- bm_busy = BM_CLKCTRL_CPU_BUSY_REF_CPU;
- rate >>= PARENT_RATE_SHIFT;
- parent_rate >>= PARENT_RATE_SHIFT;
- diff = parent_rate;
- div = frac = 1;
- for (d = 1; d <= div_max; d++) {
- f = parent_rate * 18 / d / rate;
- if ((parent_rate * 18 / d) % rate)
- f++;
- if (f < 18 || f > 35)
- continue;
-
- calc_rate = parent_rate * 18 / f / d;
- if (calc_rate > rate)
- continue;
-
- if (rate - calc_rate < diff) {
- frac = f;
- div = d;
- diff = rate - calc_rate;
- }
-
- if (diff == 0)
- break;
- }
-
- if (diff == parent_rate)
- return -EINVAL;
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
- reg &= ~BM_CLKCTRL_FRAC_CPUFRAC;
- reg |= frac;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
- }
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU);
- reg &= ~BM_CLKCTRL_CPU_DIV_CPU;
- reg |= div << BP_CLKCTRL_CPU_DIV_CPU;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU);
-
- mxs_clkctrl_timeout(HW_CLKCTRL_CPU, bm_busy);
-
- return 0;
-}
-
-#define _CLK_SET_RATE(name, dr) \
-static int name##_set_rate(struct clk *clk, unsigned long rate) \
-{ \
- u32 reg, div_max, div; \
- unsigned long parent_rate; \
- \
- parent_rate = clk_get_rate(clk->parent); \
- div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV; \
- \
- div = DIV_ROUND_UP(parent_rate, rate); \
- if (div == 0 || div > div_max) \
- return -EINVAL; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
- reg &= ~BM_CLKCTRL_##dr##_DIV; \
- reg |= div << BP_CLKCTRL_##dr##_DIV; \
- if (reg & (1 << clk->enable_shift)) { \
- pr_err("%s: clock is gated\n", __func__); \
- return -EINVAL; \
- } \
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
- \
- mxs_clkctrl_timeout(HW_CLKCTRL_##dr, BM_CLKCTRL_##dr##_BUSY); \
- return 0; \
-}
-
-_CLK_SET_RATE(xbus_clk, XBUS)
-_CLK_SET_RATE(ssp_clk, SSP)
-_CLK_SET_RATE(gpmi_clk, GPMI)
-_CLK_SET_RATE(lcdif_clk, PIX)
-
-#define _CLK_SET_RATE_STUB(name) \
-static int name##_set_rate(struct clk *clk, unsigned long rate) \
-{ \
- return -EINVAL; \
-}
-
-_CLK_SET_RATE_STUB(emi_clk)
-_CLK_SET_RATE_STUB(uart_clk)
-_CLK_SET_RATE_STUB(audio_clk)
-_CLK_SET_RATE_STUB(pwm_clk)
-_CLK_SET_RATE_STUB(clk32k_clk)
-
-/*
- * clk_set_parent
- */
-#define _CLK_SET_PARENT(name, bit) \
-static int name##_set_parent(struct clk *clk, struct clk *parent) \
-{ \
- if (parent != clk->parent) { \
- __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG); \
- clk->parent = parent; \
- } \
- \
- return 0; \
-}
-
-_CLK_SET_PARENT(cpu_clk, CPU)
-_CLK_SET_PARENT(emi_clk, EMI)
-_CLK_SET_PARENT(ssp_clk, SSP)
-_CLK_SET_PARENT(gpmi_clk, GPMI)
-_CLK_SET_PARENT(lcdif_clk, PIX)
-
-#define _CLK_SET_PARENT_STUB(name) \
-static int name##_set_parent(struct clk *clk, struct clk *parent) \
-{ \
- if (parent != clk->parent) \
- return -EINVAL; \
- else \
- return 0; \
-}
-
-_CLK_SET_PARENT_STUB(uart_clk)
-_CLK_SET_PARENT_STUB(audio_clk)
-_CLK_SET_PARENT_STUB(pwm_clk)
-_CLK_SET_PARENT_STUB(clk32k_clk)
-
-/*
- * clk definition
- */
-static struct clk cpu_clk = {
- .get_rate = cpu_clk_get_rate,
- .set_rate = cpu_clk_set_rate,
- .set_parent = cpu_clk_set_parent,
- .parent = &ref_cpu_clk,
-};
-
-static struct clk hbus_clk = {
- .get_rate = hbus_clk_get_rate,
- .parent = &cpu_clk,
-};
-
-static struct clk xbus_clk = {
- .get_rate = xbus_clk_get_rate,
- .set_rate = xbus_clk_set_rate,
- .parent = &ref_xtal_clk,
-};
-
-static struct clk rtc_clk = {
- .get_rate = rtc_clk_get_rate,
- .parent = &ref_xtal_clk,
-};
-
-/* usb_clk gate is controlled in DIGCTRL other than CLKCTRL */
-static struct clk usb_clk = {
- .enable_reg = DIGCTRL_BASE_ADDR,
- .enable_shift = 2,
- .enable = _raw_clk_enable,
- .disable = _raw_clk_disable,
- .parent = &pll_clk,
-};
-
-#define _DEFINE_CLOCK(name, er, es, p) \
- static struct clk name = { \
- .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er, \
- .enable_shift = BP_CLKCTRL_##er##_##es, \
- .get_rate = name##_get_rate, \
- .set_rate = name##_set_rate, \
- .set_parent = name##_set_parent, \
- .enable = _raw_clk_enable, \
- .disable = _raw_clk_disable, \
- .parent = p, \
- }
-
-_DEFINE_CLOCK(emi_clk, EMI, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp_clk, SSP, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(gpmi_clk, GPMI, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(lcdif_clk, PIX, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(uart_clk, XTAL, UART_CLK_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(audio_clk, XTAL, FILT_CLK24M_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(pwm_clk, XTAL, PWM_CLK24M_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(clk32k_clk, XTAL, TIMROT_CLK32K_GATE, &ref_xtal_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-
-static struct clk_lookup lookups[] = {
- /* for amba bus driver */
- _REGISTER_CLOCK("duart", "apb_pclk", xbus_clk)
- /* for amba-pl011 driver */
- _REGISTER_CLOCK("duart", NULL, uart_clk)
- _REGISTER_CLOCK("mxs-auart.0", NULL, uart_clk)
- _REGISTER_CLOCK("rtc", NULL, rtc_clk)
- _REGISTER_CLOCK("mxs-dma-apbh", NULL, hbus_clk)
- _REGISTER_CLOCK("mxs-dma-apbx", NULL, xbus_clk)
- _REGISTER_CLOCK("mxs-mmc.0", NULL, ssp_clk)
- _REGISTER_CLOCK("mxs-mmc.1", NULL, ssp_clk)
- _REGISTER_CLOCK(NULL, "usb", usb_clk)
- _REGISTER_CLOCK(NULL, "audio", audio_clk)
- _REGISTER_CLOCK("mxs-pwm.0", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.1", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.2", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.3", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.4", NULL, pwm_clk)
- _REGISTER_CLOCK("imx23-fb", NULL, lcdif_clk)
- _REGISTER_CLOCK("imx23-gpmi-nand", NULL, gpmi_clk)
-};
-
-static int clk_misc_init(void)
-{
- u32 reg;
- int ret;
-
- /* Fix up parent per register setting */
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ);
- cpu_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_CPU) ?
- &ref_xtal_clk : &ref_cpu_clk;
- emi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_EMI) ?
- &ref_xtal_clk : &ref_emi_clk;
- ssp_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP) ?
- &ref_xtal_clk : &ref_io_clk;
- gpmi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_GPMI) ?
- &ref_xtal_clk : &ref_io_clk;
- lcdif_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_PIX) ?
- &ref_xtal_clk : &ref_pix_clk;
-
- /* Use int div over frac when both are available */
- __raw_writel(BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
- __raw_writel(BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
- __raw_writel(BM_CLKCTRL_HBUS_DIV_FRAC_EN,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS_CLR);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
- reg &= ~BM_CLKCTRL_XBUS_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP);
- reg &= ~BM_CLKCTRL_SSP_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
- reg &= ~BM_CLKCTRL_GPMI_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_PIX);
- reg &= ~BM_CLKCTRL_PIX_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_PIX);
-
- /*
- * Set safe hbus clock divider. A divider of 3 ensure that
- * the Vddd voltage required for the cpu clock is sufficiently
- * high for the hbus clock.
- */
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
- reg &= BM_CLKCTRL_HBUS_DIV;
- reg |= 3 << BP_CLKCTRL_HBUS_DIV;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
-
- ret = mxs_clkctrl_timeout(HW_CLKCTRL_HBUS, BM_CLKCTRL_HBUS_BUSY);
-
- /* Gate off cpu clock in WFI for power saving */
- __raw_writel(BM_CLKCTRL_CPU_INTERRUPT_WAIT,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_SET);
-
- /*
- * 480 MHz seems too high to be ssp clock source directly,
- * so set frac to get a 288 MHz ref_io.
- */
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
- reg &= ~BM_CLKCTRL_FRAC_IOFRAC;
- reg |= 30 << BP_CLKCTRL_FRAC_IOFRAC;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
-
- return ret;
-}
-
-int __init mx23_clocks_init(void)
-{
- clk_misc_init();
-
- /*
- * source ssp clock from ref_io than ref_xtal,
- * as ref_xtal only provides 24 MHz as maximum.
- */
- clk_set_parent(&ssp_clk, &ref_io_clk);
-
- clk_prepare_enable(&cpu_clk);
- clk_prepare_enable(&hbus_clk);
- clk_prepare_enable(&xbus_clk);
- clk_prepare_enable(&emi_clk);
- clk_prepare_enable(&uart_clk);
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- mxs_timer_init(&clk32k_clk, MX23_INT_TIMER0);
-
- return 0;
-}
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c
deleted file mode 100644
index cea29c99e214..000000000000
--- a/arch/arm/mach-mxs/clock-mx28.c
+++ /dev/null
@@ -1,803 +0,0 @@
-/*
- * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/clkdev.h>
-#include <linux/spinlock.h>
-
-#include <asm/clkdev.h>
-#include <asm/div64.h>
-
-#include <mach/mx28.h>
-#include <mach/common.h>
-#include <mach/clock.h>
-#include <mach/digctl.h>
-
-#include "regs-clkctrl-mx28.h"
-
-#define CLKCTRL_BASE_ADDR MX28_IO_ADDRESS(MX28_CLKCTRL_BASE_ADDR)
-#define DIGCTRL_BASE_ADDR MX28_IO_ADDRESS(MX28_DIGCTL_BASE_ADDR)
-
-#define PARENT_RATE_SHIFT 8
-
-static struct clk pll2_clk;
-static struct clk cpu_clk;
-static struct clk emi_clk;
-static struct clk saif0_clk;
-static struct clk saif1_clk;
-static struct clk clk32k_clk;
-static DEFINE_SPINLOCK(clkmux_lock);
-
-/*
- * HW_SAIF_CLKMUX_SEL:
- * DIRECT(0x0): SAIF0 clock pins selected for SAIF0 input clocks, and SAIF1
- * clock pins selected for SAIF1 input clocks.
- * CROSSINPUT(0x1): SAIF1 clock inputs selected for SAIF0 input clocks, and
- * SAIF0 clock inputs selected for SAIF1 input clocks.
- * EXTMSTR0(0x2): SAIF0 clock pin selected for both SAIF0 and SAIF1 input
- * clocks.
- * EXTMSTR1(0x3): SAIF1 clock pin selected for both SAIF0 and SAIF1 input
- * clocks.
- */
-int mxs_saif_clkmux_select(unsigned int clkmux)
-{
- if (clkmux > 0x3)
- return -EINVAL;
-
- spin_lock(&clkmux_lock);
- __raw_writel(BM_DIGCTL_CTRL_SAIF_CLKMUX,
- DIGCTRL_BASE_ADDR + HW_DIGCTL_CTRL + MXS_CLR_ADDR);
- __raw_writel(clkmux << BP_DIGCTL_CTRL_SAIF_CLKMUX,
- DIGCTRL_BASE_ADDR + HW_DIGCTL_CTRL + MXS_SET_ADDR);
- spin_unlock(&clkmux_lock);
-
- return 0;
-}
-
-static int _raw_clk_enable(struct clk *clk)
-{
- u32 reg;
-
- if (clk->enable_reg) {
- reg = __raw_readl(clk->enable_reg);
- reg &= ~(1 << clk->enable_shift);
- __raw_writel(reg, clk->enable_reg);
- }
-
- return 0;
-}
-
-static void _raw_clk_disable(struct clk *clk)
-{
- u32 reg;
-
- if (clk->enable_reg) {
- reg = __raw_readl(clk->enable_reg);
- reg |= 1 << clk->enable_shift;
- __raw_writel(reg, clk->enable_reg);
- }
-}
-
-/*
- * ref_xtal_clk
- */
-static unsigned long ref_xtal_clk_get_rate(struct clk *clk)
-{
- return 24000000;
-}
-
-static struct clk ref_xtal_clk = {
- .get_rate = ref_xtal_clk_get_rate,
-};
-
-/*
- * pll_clk
- */
-static unsigned long pll0_clk_get_rate(struct clk *clk)
-{
- return 480000000;
-}
-
-static unsigned long pll1_clk_get_rate(struct clk *clk)
-{
- return 480000000;
-}
-
-static unsigned long pll2_clk_get_rate(struct clk *clk)
-{
- return 50000000;
-}
-
-#define _CLK_ENABLE_PLL(name, r, g) \
-static int name##_enable(struct clk *clk) \
-{ \
- __raw_writel(BM_CLKCTRL_##r##CTRL0_POWER, \
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_SET); \
- udelay(10); \
- \
- if (clk == &pll2_clk) \
- __raw_writel(BM_CLKCTRL_##r##CTRL0_##g, \
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_CLR); \
- else \
- __raw_writel(BM_CLKCTRL_##r##CTRL0_##g, \
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_SET); \
- \
- return 0; \
-}
-
-_CLK_ENABLE_PLL(pll0_clk, PLL0, EN_USB_CLKS)
-_CLK_ENABLE_PLL(pll1_clk, PLL1, EN_USB_CLKS)
-_CLK_ENABLE_PLL(pll2_clk, PLL2, CLKGATE)
-
-#define _CLK_DISABLE_PLL(name, r, g) \
-static void name##_disable(struct clk *clk) \
-{ \
- __raw_writel(BM_CLKCTRL_##r##CTRL0_POWER, \
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_CLR); \
- \
- if (clk == &pll2_clk) \
- __raw_writel(BM_CLKCTRL_##r##CTRL0_##g, \
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_SET); \
- else \
- __raw_writel(BM_CLKCTRL_##r##CTRL0_##g, \
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_CLR); \
- \
-}
-
-_CLK_DISABLE_PLL(pll0_clk, PLL0, EN_USB_CLKS)
-_CLK_DISABLE_PLL(pll1_clk, PLL1, EN_USB_CLKS)
-_CLK_DISABLE_PLL(pll2_clk, PLL2, CLKGATE)
-
-#define _DEFINE_CLOCK_PLL(name) \
- static struct clk name = { \
- .get_rate = name##_get_rate, \
- .enable = name##_enable, \
- .disable = name##_disable, \
- .parent = &ref_xtal_clk, \
- }
-
-_DEFINE_CLOCK_PLL(pll0_clk);
-_DEFINE_CLOCK_PLL(pll1_clk);
-_DEFINE_CLOCK_PLL(pll2_clk);
-
-/*
- * ref_clk
- */
-#define _CLK_GET_RATE_REF(name, sr, ss) \
-static unsigned long name##_get_rate(struct clk *clk) \
-{ \
- unsigned long parent_rate; \
- u32 reg, div; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##sr); \
- div = (reg >> BP_CLKCTRL_##sr##_##ss##FRAC) & 0x3f; \
- parent_rate = clk_get_rate(clk->parent); \
- \
- return SH_DIV((parent_rate >> PARENT_RATE_SHIFT) * 18, \
- div, PARENT_RATE_SHIFT); \
-}
-
-_CLK_GET_RATE_REF(ref_cpu_clk, FRAC0, CPU)
-_CLK_GET_RATE_REF(ref_emi_clk, FRAC0, EMI)
-_CLK_GET_RATE_REF(ref_io0_clk, FRAC0, IO0)
-_CLK_GET_RATE_REF(ref_io1_clk, FRAC0, IO1)
-_CLK_GET_RATE_REF(ref_pix_clk, FRAC1, PIX)
-_CLK_GET_RATE_REF(ref_gpmi_clk, FRAC1, GPMI)
-
-#define _DEFINE_CLOCK_REF(name, er, es) \
- static struct clk name = { \
- .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er, \
- .enable_shift = BP_CLKCTRL_##er##_CLKGATE##es, \
- .get_rate = name##_get_rate, \
- .enable = _raw_clk_enable, \
- .disable = _raw_clk_disable, \
- .parent = &pll0_clk, \
- }
-
-_DEFINE_CLOCK_REF(ref_cpu_clk, FRAC0, CPU);
-_DEFINE_CLOCK_REF(ref_emi_clk, FRAC0, EMI);
-_DEFINE_CLOCK_REF(ref_io0_clk, FRAC0, IO0);
-_DEFINE_CLOCK_REF(ref_io1_clk, FRAC0, IO1);
-_DEFINE_CLOCK_REF(ref_pix_clk, FRAC1, PIX);
-_DEFINE_CLOCK_REF(ref_gpmi_clk, FRAC1, GPMI);
-
-/*
- * General clocks
- *
- * clk_get_rate
- */
-static unsigned long lradc_clk_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / 16;
-}
-
-static unsigned long rtc_clk_get_rate(struct clk *clk)
-{
- /* ref_xtal_clk is implemented as the only parent */
- return clk_get_rate(clk->parent) / 768;
-}
-
-static unsigned long clk32k_clk_get_rate(struct clk *clk)
-{
- return clk->parent->get_rate(clk->parent) / 750;
-}
-
-static unsigned long spdif_clk_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / 4;
-}
-
-#define _CLK_GET_RATE(name, rs) \
-static unsigned long name##_get_rate(struct clk *clk) \
-{ \
- u32 reg, div; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
- \
- if (clk->parent == &ref_xtal_clk) \
- div = (reg & BM_CLKCTRL_##rs##_DIV_XTAL) >> \
- BP_CLKCTRL_##rs##_DIV_XTAL; \
- else \
- div = (reg & BM_CLKCTRL_##rs##_DIV_##rs) >> \
- BP_CLKCTRL_##rs##_DIV_##rs; \
- \
- if (!div) \
- return -EINVAL; \
- \
- return clk_get_rate(clk->parent) / div; \
-}
-
-_CLK_GET_RATE(cpu_clk, CPU)
-_CLK_GET_RATE(emi_clk, EMI)
-
-#define _CLK_GET_RATE1(name, rs) \
-static unsigned long name##_get_rate(struct clk *clk) \
-{ \
- u32 reg, div; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
- div = (reg & BM_CLKCTRL_##rs##_DIV) >> BP_CLKCTRL_##rs##_DIV; \
- \
- if (!div) \
- return -EINVAL; \
- \
- if (clk == &saif0_clk || clk == &saif1_clk) \
- return clk_get_rate(clk->parent) >> 16 * div; \
- else \
- return clk_get_rate(clk->parent) / div; \
-}
-
-_CLK_GET_RATE1(hbus_clk, HBUS)
-_CLK_GET_RATE1(xbus_clk, XBUS)
-_CLK_GET_RATE1(ssp0_clk, SSP0)
-_CLK_GET_RATE1(ssp1_clk, SSP1)
-_CLK_GET_RATE1(ssp2_clk, SSP2)
-_CLK_GET_RATE1(ssp3_clk, SSP3)
-_CLK_GET_RATE1(gpmi_clk, GPMI)
-_CLK_GET_RATE1(lcdif_clk, DIS_LCDIF)
-_CLK_GET_RATE1(saif0_clk, SAIF0)
-_CLK_GET_RATE1(saif1_clk, SAIF1)
-
-#define _CLK_GET_RATE_STUB(name) \
-static unsigned long name##_get_rate(struct clk *clk) \
-{ \
- return clk_get_rate(clk->parent); \
-}
-
-_CLK_GET_RATE_STUB(uart_clk)
-_CLK_GET_RATE_STUB(pwm_clk)
-_CLK_GET_RATE_STUB(can0_clk)
-_CLK_GET_RATE_STUB(can1_clk)
-_CLK_GET_RATE_STUB(fec_clk)
-
-/*
- * clk_set_rate
- */
-/* fool compiler */
-#define BM_CLKCTRL_CPU_DIV 0
-#define BP_CLKCTRL_CPU_DIV 0
-#define BM_CLKCTRL_CPU_BUSY 0
-
-#define _CLK_SET_RATE(name, dr, fr, fs) \
-static int name##_set_rate(struct clk *clk, unsigned long rate) \
-{ \
- u32 reg, bm_busy, div_max, d, f, div, frac; \
- unsigned long diff, parent_rate, calc_rate; \
- \
- div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV; \
- bm_busy = BM_CLKCTRL_##dr##_BUSY; \
- \
- if (clk->parent == &ref_xtal_clk) { \
- parent_rate = clk_get_rate(clk->parent); \
- div = DIV_ROUND_UP(parent_rate, rate); \
- if (clk == &cpu_clk) { \
- div_max = BM_CLKCTRL_CPU_DIV_XTAL >> \
- BP_CLKCTRL_CPU_DIV_XTAL; \
- bm_busy = BM_CLKCTRL_CPU_BUSY_REF_XTAL; \
- } \
- if (div == 0 || div > div_max) \
- return -EINVAL; \
- } else { \
- /* \
- * hack alert: this block modifies clk->parent, too, \
- * so the base to use it the grand parent. \
- */ \
- parent_rate = clk_get_rate(clk->parent->parent); \
- rate >>= PARENT_RATE_SHIFT; \
- parent_rate >>= PARENT_RATE_SHIFT; \
- diff = parent_rate; \
- div = frac = 1; \
- if (clk == &cpu_clk) { \
- div_max = BM_CLKCTRL_CPU_DIV_CPU >> \
- BP_CLKCTRL_CPU_DIV_CPU; \
- bm_busy = BM_CLKCTRL_CPU_BUSY_REF_CPU; \
- } \
- for (d = 1; d <= div_max; d++) { \
- f = parent_rate * 18 / d / rate; \
- if ((parent_rate * 18 / d) % rate) \
- f++; \
- if (f < 18 || f > 35) \
- continue; \
- \
- calc_rate = parent_rate * 18 / f / d; \
- if (calc_rate > rate) \
- continue; \
- \
- if (rate - calc_rate < diff) { \
- frac = f; \
- div = d; \
- diff = rate - calc_rate; \
- } \
- \
- if (diff == 0) \
- break; \
- } \
- \
- if (diff == parent_rate) \
- return -EINVAL; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##fr); \
- reg &= ~BM_CLKCTRL_##fr##_##fs##FRAC; \
- reg |= frac << BP_CLKCTRL_##fr##_##fs##FRAC; \
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##fr); \
- } \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
- if (clk == &cpu_clk) { \
- reg &= ~BM_CLKCTRL_CPU_DIV_CPU; \
- reg |= div << BP_CLKCTRL_CPU_DIV_CPU; \
- } else { \
- reg &= ~BM_CLKCTRL_##dr##_DIV; \
- reg |= div << BP_CLKCTRL_##dr##_DIV; \
- if (reg & (1 << clk->enable_shift)) { \
- pr_err("%s: clock is gated\n", __func__); \
- return -EINVAL; \
- } \
- } \
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
- \
- return mxs_clkctrl_timeout(HW_CLKCTRL_##dr, bm_busy); \
-}
-
-_CLK_SET_RATE(cpu_clk, CPU, FRAC0, CPU)
-_CLK_SET_RATE(ssp0_clk, SSP0, FRAC0, IO0)
-_CLK_SET_RATE(ssp1_clk, SSP1, FRAC0, IO0)
-_CLK_SET_RATE(ssp2_clk, SSP2, FRAC0, IO1)
-_CLK_SET_RATE(ssp3_clk, SSP3, FRAC0, IO1)
-_CLK_SET_RATE(lcdif_clk, DIS_LCDIF, FRAC1, PIX)
-_CLK_SET_RATE(gpmi_clk, GPMI, FRAC1, GPMI)
-
-#define _CLK_SET_RATE1(name, dr) \
-static int name##_set_rate(struct clk *clk, unsigned long rate) \
-{ \
- u32 reg, div_max, div; \
- unsigned long parent_rate; \
- \
- parent_rate = clk_get_rate(clk->parent); \
- div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV; \
- \
- div = DIV_ROUND_UP(parent_rate, rate); \
- if (div == 0 || div > div_max) \
- return -EINVAL; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
- reg &= ~BM_CLKCTRL_##dr##_DIV; \
- reg |= div << BP_CLKCTRL_##dr##_DIV; \
- if (reg & (1 << clk->enable_shift)) { \
- pr_err("%s: clock is gated\n", __func__); \
- return -EINVAL; \
- } \
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
- \
- return mxs_clkctrl_timeout(HW_CLKCTRL_##dr, BM_CLKCTRL_##dr##_BUSY);\
-}
-
-_CLK_SET_RATE1(xbus_clk, XBUS)
-
-/* saif clock uses 16 bits frac div */
-#define _CLK_SET_RATE_SAIF(name, rs) \
-static int name##_set_rate(struct clk *clk, unsigned long rate) \
-{ \
- u16 div; \
- u32 reg; \
- u64 lrate; \
- unsigned long parent_rate; \
- \
- parent_rate = clk_get_rate(clk->parent); \
- if (rate > parent_rate) \
- return -EINVAL; \
- \
- lrate = (u64)rate << 16; \
- do_div(lrate, parent_rate); \
- div = (u16)lrate; \
- \
- if (!div) \
- return -EINVAL; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
- reg &= ~BM_CLKCTRL_##rs##_DIV; \
- reg |= div << BP_CLKCTRL_##rs##_DIV; \
- if (reg & (1 << clk->enable_shift)) { \
- pr_err("%s: clock is gated\n", __func__); \
- return -EINVAL; \
- } \
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
- \
- return mxs_clkctrl_timeout(HW_CLKCTRL_##rs, BM_CLKCTRL_##rs##_BUSY);\
-}
-
-_CLK_SET_RATE_SAIF(saif0_clk, SAIF0)
-_CLK_SET_RATE_SAIF(saif1_clk, SAIF1)
-
-#define _CLK_SET_RATE_STUB(name) \
-static int name##_set_rate(struct clk *clk, unsigned long rate) \
-{ \
- return -EINVAL; \
-}
-
-_CLK_SET_RATE_STUB(emi_clk)
-_CLK_SET_RATE_STUB(uart_clk)
-_CLK_SET_RATE_STUB(pwm_clk)
-_CLK_SET_RATE_STUB(spdif_clk)
-_CLK_SET_RATE_STUB(clk32k_clk)
-_CLK_SET_RATE_STUB(can0_clk)
-_CLK_SET_RATE_STUB(can1_clk)
-_CLK_SET_RATE_STUB(fec_clk)
-
-/*
- * clk_set_parent
- */
-#define _CLK_SET_PARENT(name, bit) \
-static int name##_set_parent(struct clk *clk, struct clk *parent) \
-{ \
- if (parent != clk->parent) { \
- __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG); \
- clk->parent = parent; \
- } \
- \
- return 0; \
-}
-
-_CLK_SET_PARENT(cpu_clk, CPU)
-_CLK_SET_PARENT(emi_clk, EMI)
-_CLK_SET_PARENT(ssp0_clk, SSP0)
-_CLK_SET_PARENT(ssp1_clk, SSP1)
-_CLK_SET_PARENT(ssp2_clk, SSP2)
-_CLK_SET_PARENT(ssp3_clk, SSP3)
-_CLK_SET_PARENT(lcdif_clk, DIS_LCDIF)
-_CLK_SET_PARENT(gpmi_clk, GPMI)
-_CLK_SET_PARENT(saif0_clk, SAIF0)
-_CLK_SET_PARENT(saif1_clk, SAIF1)
-
-#define _CLK_SET_PARENT_STUB(name) \
-static int name##_set_parent(struct clk *clk, struct clk *parent) \
-{ \
- if (parent != clk->parent) \
- return -EINVAL; \
- else \
- return 0; \
-}
-
-_CLK_SET_PARENT_STUB(pwm_clk)
-_CLK_SET_PARENT_STUB(uart_clk)
-_CLK_SET_PARENT_STUB(clk32k_clk)
-_CLK_SET_PARENT_STUB(spdif_clk)
-_CLK_SET_PARENT_STUB(fec_clk)
-_CLK_SET_PARENT_STUB(can0_clk)
-_CLK_SET_PARENT_STUB(can1_clk)
-
-/*
- * clk definition
- */
-static struct clk cpu_clk = {
- .get_rate = cpu_clk_get_rate,
- .set_rate = cpu_clk_set_rate,
- .set_parent = cpu_clk_set_parent,
- .parent = &ref_cpu_clk,
-};
-
-static struct clk hbus_clk = {
- .get_rate = hbus_clk_get_rate,
- .parent = &cpu_clk,
-};
-
-static struct clk xbus_clk = {
- .get_rate = xbus_clk_get_rate,
- .set_rate = xbus_clk_set_rate,
- .parent = &ref_xtal_clk,
-};
-
-static struct clk lradc_clk = {
- .get_rate = lradc_clk_get_rate,
- .parent = &clk32k_clk,
-};
-
-static struct clk rtc_clk = {
- .get_rate = rtc_clk_get_rate,
- .parent = &ref_xtal_clk,
-};
-
-/* usb_clk gate is controlled in DIGCTRL other than CLKCTRL */
-static struct clk usb0_clk = {
- .enable_reg = DIGCTRL_BASE_ADDR,
- .enable_shift = 2,
- .enable = _raw_clk_enable,
- .disable = _raw_clk_disable,
- .parent = &pll0_clk,
-};
-
-static struct clk usb1_clk = {
- .enable_reg = DIGCTRL_BASE_ADDR,
- .enable_shift = 16,
- .enable = _raw_clk_enable,
- .disable = _raw_clk_disable,
- .parent = &pll1_clk,
-};
-
-#define _DEFINE_CLOCK(name, er, es, p) \
- static struct clk name = { \
- .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er, \
- .enable_shift = BP_CLKCTRL_##er##_##es, \
- .get_rate = name##_get_rate, \
- .set_rate = name##_set_rate, \
- .set_parent = name##_set_parent, \
- .enable = _raw_clk_enable, \
- .disable = _raw_clk_disable, \
- .parent = p, \
- }
-
-_DEFINE_CLOCK(emi_clk, EMI, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp0_clk, SSP0, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp1_clk, SSP1, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp2_clk, SSP2, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp3_clk, SSP3, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(lcdif_clk, DIS_LCDIF, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(gpmi_clk, GPMI, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(saif0_clk, SAIF0, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(saif1_clk, SAIF1, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(can0_clk, FLEXCAN, STOP_CAN0, &ref_xtal_clk);
-_DEFINE_CLOCK(can1_clk, FLEXCAN, STOP_CAN1, &ref_xtal_clk);
-_DEFINE_CLOCK(pwm_clk, XTAL, PWM_CLK24M_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(uart_clk, XTAL, UART_CLK_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(clk32k_clk, XTAL, TIMROT_CLK32K_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(spdif_clk, SPDIF, CLKGATE, &pll0_clk);
-_DEFINE_CLOCK(fec_clk, ENET, DISABLE, &hbus_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-
-static struct clk_lookup lookups[] = {
- /* for amba bus driver */
- _REGISTER_CLOCK("duart", "apb_pclk", xbus_clk)
- /* for amba-pl011 driver */
- _REGISTER_CLOCK("duart", NULL, uart_clk)
- _REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk)
- _REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk)
- _REGISTER_CLOCK("imx28-gpmi-nand", NULL, gpmi_clk)
- _REGISTER_CLOCK("mxs-auart.0", NULL, uart_clk)
- _REGISTER_CLOCK("mxs-auart.1", NULL, uart_clk)
- _REGISTER_CLOCK("mxs-auart.2", NULL, uart_clk)
- _REGISTER_CLOCK("mxs-auart.3", NULL, uart_clk)
- _REGISTER_CLOCK("mxs-auart.4", NULL, uart_clk)
- _REGISTER_CLOCK("rtc", NULL, rtc_clk)
- _REGISTER_CLOCK("pll2", NULL, pll2_clk)
- _REGISTER_CLOCK("mxs-dma-apbh", NULL, hbus_clk)
- _REGISTER_CLOCK("mxs-dma-apbx", NULL, xbus_clk)
- _REGISTER_CLOCK("mxs-mmc.0", NULL, ssp0_clk)
- _REGISTER_CLOCK("mxs-mmc.1", NULL, ssp1_clk)
- _REGISTER_CLOCK("mxs-mmc.2", NULL, ssp2_clk)
- _REGISTER_CLOCK("mxs-mmc.3", NULL, ssp3_clk)
- _REGISTER_CLOCK("flexcan.0", NULL, can0_clk)
- _REGISTER_CLOCK("flexcan.1", NULL, can1_clk)
- _REGISTER_CLOCK(NULL, "usb0", usb0_clk)
- _REGISTER_CLOCK(NULL, "usb1", usb1_clk)
- _REGISTER_CLOCK("mxs-pwm.0", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.1", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.2", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.3", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.4", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.5", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.6", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.7", NULL, pwm_clk)
- _REGISTER_CLOCK(NULL, "lradc", lradc_clk)
- _REGISTER_CLOCK(NULL, "spdif", spdif_clk)
- _REGISTER_CLOCK("imx28-fb", NULL, lcdif_clk)
- _REGISTER_CLOCK("mxs-saif.0", NULL, saif0_clk)
- _REGISTER_CLOCK("mxs-saif.1", NULL, saif1_clk)
-};
-
-static int clk_misc_init(void)
-{
- u32 reg;
- int ret;
-
- /* Fix up parent per register setting */
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ);
- cpu_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_CPU) ?
- &ref_xtal_clk : &ref_cpu_clk;
- emi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_EMI) ?
- &ref_xtal_clk : &ref_emi_clk;
- ssp0_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP0) ?
- &ref_xtal_clk : &ref_io0_clk;
- ssp1_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP1) ?
- &ref_xtal_clk : &ref_io0_clk;
- ssp2_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP2) ?
- &ref_xtal_clk : &ref_io1_clk;
- ssp3_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP3) ?
- &ref_xtal_clk : &ref_io1_clk;
- lcdif_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF) ?
- &ref_xtal_clk : &ref_pix_clk;
- gpmi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_GPMI) ?
- &ref_xtal_clk : &ref_gpmi_clk;
- saif0_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SAIF0) ?
- &ref_xtal_clk : &pll0_clk;
- saif1_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SAIF1) ?
- &ref_xtal_clk : &pll0_clk;
-
- /* Use int div over frac when both are available */
- __raw_writel(BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
- __raw_writel(BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
- __raw_writel(BM_CLKCTRL_HBUS_DIV_FRAC_EN,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS_CLR);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
- reg &= ~BM_CLKCTRL_XBUS_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP0);
- reg &= ~BM_CLKCTRL_SSP0_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP0);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP1);
- reg &= ~BM_CLKCTRL_SSP1_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP1);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP2);
- reg &= ~BM_CLKCTRL_SSP2_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP2);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP3);
- reg &= ~BM_CLKCTRL_SSP3_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP3);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
- reg &= ~BM_CLKCTRL_GPMI_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_DIS_LCDIF);
- reg &= ~BM_CLKCTRL_DIS_LCDIF_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_DIS_LCDIF);
-
- /* SAIF has to use frac div for functional operation */
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF0);
- reg |= BM_CLKCTRL_SAIF0_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF0);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF1);
- reg |= BM_CLKCTRL_SAIF1_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF1);
-
- /*
- * Set safe hbus clock divider. A divider of 3 ensure that
- * the Vddd voltage required for the cpu clock is sufficiently
- * high for the hbus clock.
- */
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
- reg &= BM_CLKCTRL_HBUS_DIV;
- reg |= 3 << BP_CLKCTRL_HBUS_DIV;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
-
- ret = mxs_clkctrl_timeout(HW_CLKCTRL_HBUS, BM_CLKCTRL_HBUS_ASM_BUSY);
-
- /* Gate off cpu clock in WFI for power saving */
- __raw_writel(BM_CLKCTRL_CPU_INTERRUPT_WAIT,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_SET);
-
- /*
- * Extra fec clock setting
- * The DENX M28 uses an external clock source
- * and the clock output must not be enabled
- */
- if (!machine_is_m28evk()) {
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_ENET);
- reg &= ~BM_CLKCTRL_ENET_SLEEP;
- reg |= BM_CLKCTRL_ENET_CLK_OUT_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_ENET);
- }
-
- /*
- * 480 MHz seems too high to be ssp clock source directly,
- * so set frac0 to get a 288 MHz ref_io0.
- */
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC0);
- reg &= ~BM_CLKCTRL_FRAC0_IO0FRAC;
- reg |= 30 << BP_CLKCTRL_FRAC0_IO0FRAC;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC0);
-
- return ret;
-}
-
-int __init mx28_clocks_init(void)
-{
- clk_misc_init();
-
- /*
- * source ssp clock from ref_io0 than ref_xtal,
- * as ref_xtal only provides 24 MHz as maximum.
- */
- clk_set_parent(&ssp0_clk, &ref_io0_clk);
- clk_set_parent(&ssp1_clk, &ref_io0_clk);
- clk_set_parent(&ssp2_clk, &ref_io1_clk);
- clk_set_parent(&ssp3_clk, &ref_io1_clk);
-
- clk_prepare_enable(&cpu_clk);
- clk_prepare_enable(&hbus_clk);
- clk_prepare_enable(&xbus_clk);
- clk_prepare_enable(&emi_clk);
- clk_prepare_enable(&uart_clk);
-
- clk_set_parent(&lcdif_clk, &ref_pix_clk);
- clk_set_parent(&saif0_clk, &pll0_clk);
- clk_set_parent(&saif1_clk, &pll0_clk);
-
- /*
- * Set an initial clock rate for the saif internal logic to work
- * properly. This is important when working in EXTMASTER mode that
- * uses the other saif's BITCLK&LRCLK but it still needs a basic
- * clock which should be fast enough for the internal logic.
- */
- clk_set_rate(&saif0_clk, 24000000);
- clk_set_rate(&saif1_clk, 24000000);
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- mxs_timer_init(&clk32k_clk, MX28_INT_TIMER0);
-
- return 0;
-}
diff --git a/arch/arm/mach-mxs/clock.c b/arch/arm/mach-mxs/clock.c
deleted file mode 100644
index 97a6f4acc6cc..000000000000
--- a/arch/arm/mach-mxs/clock.c
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Based on arch/arm/plat-omap/clock.c
- *
- * Copyright (C) 2004 - 2005 Nokia corporation
- * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
- * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
- * Copyright 2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-/* #define DEBUG */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/proc_fs.h>
-#include <linux/semaphore.h>
-#include <linux/string.h>
-
-#include <mach/clock.h>
-
-static LIST_HEAD(clocks);
-static DEFINE_MUTEX(clocks_mutex);
-
-/*-------------------------------------------------------------------------
- * Standard clock functions defined in include/linux/clk.h
- *-------------------------------------------------------------------------*/
-
-static void __clk_disable(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return;
- WARN_ON(!clk->usecount);
-
- if (!(--clk->usecount)) {
- if (clk->disable)
- clk->disable(clk);
- __clk_disable(clk->parent);
- }
-}
-
-static int __clk_enable(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return -EINVAL;
-
- if (clk->usecount++ == 0) {
- __clk_enable(clk->parent);
-
- if (clk->enable)
- clk->enable(clk);
- }
- return 0;
-}
-
-/*
- * The clk_enable/clk_disable could be called by drivers in atomic context,
- * so they should not really hold mutex. Instead, clk_prepare/clk_unprepare
- * can hold a mutex, as the pair will only be called in non-atomic context.
- * Before migrating to common clk framework, we can have __clk_enable and
- * __clk_disable called in clk_prepare/clk_unprepare with mutex held and
- * leave clk_enable/clk_disable as the dummy functions.
- */
-int clk_prepare(struct clk *clk)
-{
- int ret = 0;
-
- if (clk == NULL || IS_ERR(clk))
- return -EINVAL;
-
- mutex_lock(&clocks_mutex);
- ret = __clk_enable(clk);
- mutex_unlock(&clocks_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_prepare);
-
-void clk_unprepare(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return;
-
- mutex_lock(&clocks_mutex);
- __clk_disable(clk);
- mutex_unlock(&clocks_mutex);
-}
-EXPORT_SYMBOL(clk_unprepare);
-
-int clk_enable(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
- /* nothing to do */
-}
-EXPORT_SYMBOL(clk_disable);
-
-/* Retrieve the *current* clock rate. If the clock itself
- * does not provide a special calculation routine, ask
- * its parent and so on, until one is able to return
- * a valid clock rate
- */
-unsigned long clk_get_rate(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return 0UL;
-
- if (clk->get_rate)
- return clk->get_rate(clk);
-
- return clk_get_rate(clk->parent);
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-/* Round the requested clock rate to the nearest supported
- * rate that is less than or equal to the requested rate.
- * This is dependent on the clock's current parent.
- */
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- if (clk == NULL || IS_ERR(clk) || !clk->round_rate)
- return 0;
-
- return clk->round_rate(clk, rate);
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-/* Set the clock to the requested clock rate. The rate must
- * match a supported rate exactly based on what clk_round_rate returns
- */
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- int ret = -EINVAL;
-
- if (clk == NULL || IS_ERR(clk) || clk->set_rate == NULL || rate == 0)
- return ret;
-
- mutex_lock(&clocks_mutex);
- ret = clk->set_rate(clk, rate);
- mutex_unlock(&clocks_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-/* Set the clock's parent to another clock source */
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
- int ret = -EINVAL;
- struct clk *old;
-
- if (clk == NULL || IS_ERR(clk) || parent == NULL ||
- IS_ERR(parent) || clk->set_parent == NULL)
- return ret;
-
- if (clk->usecount)
- clk_prepare_enable(parent);
-
- mutex_lock(&clocks_mutex);
- ret = clk->set_parent(clk, parent);
- if (ret == 0) {
- old = clk->parent;
- clk->parent = parent;
- } else {
- old = parent;
- }
- mutex_unlock(&clocks_mutex);
-
- if (clk->usecount)
- clk_disable(old);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_set_parent);
-
-/* Retrieve the clock's parent clock source */
-struct clk *clk_get_parent(struct clk *clk)
-{
- struct clk *ret = NULL;
-
- if (clk == NULL || IS_ERR(clk))
- return ret;
-
- return clk->parent;
-}
-EXPORT_SYMBOL(clk_get_parent);
diff --git a/arch/arm/mach-mxs/devices/Kconfig b/arch/arm/mach-mxs/devices/Kconfig
index b8913df4cfa2..19659de1c4e8 100644
--- a/arch/arm/mach-mxs/devices/Kconfig
+++ b/arch/arm/mach-mxs/devices/Kconfig
@@ -1,6 +1,5 @@
config MXS_HAVE_AMBA_DUART
bool
- select ARM_AMBA
config MXS_HAVE_PLATFORM_AUART
bool
diff --git a/arch/arm/mach-mxs/devices/platform-dma.c b/arch/arm/mach-mxs/devices/platform-dma.c
index 6a0202b1016c..46824501de00 100644
--- a/arch/arm/mach-mxs/devices/platform-dma.c
+++ b/arch/arm/mach-mxs/devices/platform-dma.c
@@ -14,7 +14,7 @@
#include <mach/mx28.h>
#include <mach/devices-common.h>
-static struct platform_device *__init mxs_add_dma(const char *devid,
+struct platform_device *__init mxs_add_dma(const char *devid,
resource_size_t base)
{
struct resource res[] = {
@@ -29,22 +29,3 @@ static struct platform_device *__init mxs_add_dma(const char *devid,
res, ARRAY_SIZE(res), NULL, 0,
DMA_BIT_MASK(32));
}
-
-static int __init mxs_add_mxs_dma(void)
-{
- char *apbh = "mxs-dma-apbh";
- char *apbx = "mxs-dma-apbx";
-
- if (cpu_is_mx23()) {
- mxs_add_dma(apbh, MX23_APBH_DMA_BASE_ADDR);
- mxs_add_dma(apbx, MX23_APBX_DMA_BASE_ADDR);
- }
-
- if (cpu_is_mx28()) {
- mxs_add_dma(apbh, MX28_APBH_DMA_BASE_ADDR);
- mxs_add_dma(apbx, MX28_APBX_DMA_BASE_ADDR);
- }
-
- return 0;
-}
-arch_initcall(mxs_add_mxs_dma);
diff --git a/arch/arm/mach-mxs/devices/platform-gpio-mxs.c b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
index ed0885e414e0..cd99f19ec637 100644
--- a/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
+++ b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
@@ -14,7 +14,7 @@
#include <mach/devices-common.h>
struct platform_device *__init mxs_add_gpio(
- int id, resource_size_t iobase, int irq)
+ char *name, int id, resource_size_t iobase, int irq)
{
struct resource res[] = {
{
@@ -29,25 +29,5 @@ struct platform_device *__init mxs_add_gpio(
};
return platform_device_register_resndata(&mxs_apbh_bus,
- "gpio-mxs", id, res, ARRAY_SIZE(res), NULL, 0);
+ name, id, res, ARRAY_SIZE(res), NULL, 0);
}
-
-static int __init mxs_add_mxs_gpio(void)
-{
- if (cpu_is_mx23()) {
- mxs_add_gpio(0, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO0);
- mxs_add_gpio(1, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO1);
- mxs_add_gpio(2, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO2);
- }
-
- if (cpu_is_mx28()) {
- mxs_add_gpio(0, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO0);
- mxs_add_gpio(1, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO1);
- mxs_add_gpio(2, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO2);
- mxs_add_gpio(3, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO3);
- mxs_add_gpio(4, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO4);
- }
-
- return 0;
-}
-postcore_initcall(mxs_add_mxs_gpio);
diff --git a/arch/arm/mach-mxs/devices/platform-mxs-mmc.c b/arch/arm/mach-mxs/devices/platform-mxs-mmc.c
index bef9d923f54e..b33c9d05c552 100644
--- a/arch/arm/mach-mxs/devices/platform-mxs-mmc.c
+++ b/arch/arm/mach-mxs/devices/platform-mxs-mmc.c
@@ -17,8 +17,9 @@
#include <mach/mx28.h>
#include <mach/devices-common.h>
-#define mxs_mxs_mmc_data_entry_single(soc, _id, hwid) \
+#define mxs_mxs_mmc_data_entry_single(soc, _devid, _id, hwid) \
{ \
+ .devid = _devid, \
.id = _id, \
.iobase = soc ## _SSP ## hwid ## _BASE_ADDR, \
.dma = soc ## _DMA_SSP ## hwid, \
@@ -26,23 +27,23 @@
.irq_dma = soc ## _INT_SSP ## hwid ## _DMA, \
}
-#define mxs_mxs_mmc_data_entry(soc, _id, hwid) \
- [_id] = mxs_mxs_mmc_data_entry_single(soc, _id, hwid)
+#define mxs_mxs_mmc_data_entry(soc, _devid, _id, hwid) \
+ [_id] = mxs_mxs_mmc_data_entry_single(soc, _devid, _id, hwid)
#ifdef CONFIG_SOC_IMX23
const struct mxs_mxs_mmc_data mx23_mxs_mmc_data[] __initconst = {
- mxs_mxs_mmc_data_entry(MX23, 0, 1),
- mxs_mxs_mmc_data_entry(MX23, 1, 2),
+ mxs_mxs_mmc_data_entry(MX23, "imx23-mmc", 0, 1),
+ mxs_mxs_mmc_data_entry(MX23, "imx23-mmc", 1, 2),
};
#endif
#ifdef CONFIG_SOC_IMX28
const struct mxs_mxs_mmc_data mx28_mxs_mmc_data[] __initconst = {
- mxs_mxs_mmc_data_entry(MX28, 0, 0),
- mxs_mxs_mmc_data_entry(MX28, 1, 1),
- mxs_mxs_mmc_data_entry(MX28, 2, 2),
- mxs_mxs_mmc_data_entry(MX28, 3, 3),
+ mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 0, 0),
+ mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 1, 1),
+ mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 2, 2),
+ mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 3, 3),
};
#endif
@@ -70,6 +71,6 @@ struct platform_device *__init mxs_add_mxs_mmc(
},
};
- return mxs_add_platform_device("mxs-mmc", data->id,
+ return mxs_add_platform_device(data->devid, data->id,
res, ARRAY_SIZE(res), pdata, sizeof(*pdata));
}
diff --git a/arch/arm/mach-mxs/include/mach/clock.h b/arch/arm/mach-mxs/include/mach/clock.h
deleted file mode 100644
index 592c9ab5d760..000000000000
--- a/arch/arm/mach-mxs/include/mach/clock.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#ifndef __MACH_MXS_CLOCK_H__
-#define __MACH_MXS_CLOCK_H__
-
-#ifndef __ASSEMBLY__
-#include <linux/list.h>
-
-struct module;
-
-struct clk {
- int id;
- /* Source clock this clk depends on */
- struct clk *parent;
- /* Reference count of clock enable/disable */
- __s8 usecount;
- /* Register bit position for clock's enable/disable control. */
- u8 enable_shift;
- /* Register address for clock's enable/disable control. */
- void __iomem *enable_reg;
- u32 flags;
- /* get the current clock rate (always a fresh value) */
- unsigned long (*get_rate) (struct clk *);
- /* Function ptr to set the clock to a new rate. The rate must match a
- supported rate returned from round_rate. Leave blank if clock is not
- programmable */
- int (*set_rate) (struct clk *, unsigned long);
- /* Function ptr to round the requested clock rate to the nearest
- supported rate that is less than or equal to the requested rate. */
- unsigned long (*round_rate) (struct clk *, unsigned long);
- /* Function ptr to enable the clock. Leave blank if clock can not
- be gated. */
- int (*enable) (struct clk *);
- /* Function ptr to disable the clock. Leave blank if clock can not
- be gated. */
- void (*disable) (struct clk *);
- /* Function ptr to set the parent clock of the clock. */
- int (*set_parent) (struct clk *, struct clk *);
-};
-
-int clk_register(struct clk *clk);
-void clk_unregister(struct clk *clk);
-
-#endif /* __ASSEMBLY__ */
-#endif /* __MACH_MXS_CLOCK_H__ */
diff --git a/arch/arm/mach-mxs/include/mach/common.h b/arch/arm/mach-mxs/include/mach/common.h
index 8d88399b73ef..de6c7ba42544 100644
--- a/arch/arm/mach-mxs/include/mach/common.h
+++ b/arch/arm/mach-mxs/include/mach/common.h
@@ -11,28 +11,27 @@
#ifndef __MACH_MXS_COMMON_H__
#define __MACH_MXS_COMMON_H__
-struct clk;
-
extern const u32 *mxs_get_ocotp(void);
extern int mxs_reset_block(void __iomem *);
-extern void mxs_timer_init(struct clk *, int);
+extern void mxs_timer_init(int);
extern void mxs_restart(char, const char *);
extern int mxs_saif_clkmux_select(unsigned int clkmux);
extern void mx23_soc_init(void);
-extern int mx23_register_gpios(void);
extern int mx23_clocks_init(void);
extern void mx23_map_io(void);
extern void mx23_init_irq(void);
extern void mx28_soc_init(void);
-extern int mx28_register_gpios(void);
extern int mx28_clocks_init(void);
extern void mx28_map_io(void);
extern void mx28_init_irq(void);
extern void icoll_init_irq(void);
-extern int mxs_clkctrl_timeout(unsigned int reg_offset, unsigned int mask);
+extern struct platform_device *mxs_add_dma(const char *devid,
+ resource_size_t base);
+extern struct platform_device *mxs_add_gpio(char *name, int id,
+ resource_size_t iobase, int irq);
#endif /* __MACH_MXS_COMMON_H__ */
diff --git a/arch/arm/mach-mxs/include/mach/devices-common.h b/arch/arm/mach-mxs/include/mach/devices-common.h
index 21e45a70d344..e8b1d958240b 100644
--- a/arch/arm/mach-mxs/include/mach/devices-common.h
+++ b/arch/arm/mach-mxs/include/mach/devices-common.h
@@ -82,8 +82,9 @@ struct platform_device * __init mxs_add_mxs_i2c(
const struct mxs_mxs_i2c_data *data);
/* mmc */
-#include <mach/mmc.h>
+#include <linux/mmc/mxs-mmc.h>
struct mxs_mxs_mmc_data {
+ const char *devid;
int id;
resource_size_t iobase;
resource_size_t dma;
diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c
index da4610ebe9e6..dafd48e86c8c 100644
--- a/arch/arm/mach-mxs/mach-mx28evk.c
+++ b/arch/arm/mach-mxs/mach-mx28evk.c
@@ -226,7 +226,7 @@ static void __init mx28evk_fec_reset(void)
struct clk *clk;
/* Enable fec phy clock */
- clk = clk_get_sys("pll2", NULL);
+ clk = clk_get_sys("enet_out", NULL);
if (!IS_ERR(clk))
clk_prepare_enable(clk);
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
new file mode 100644
index 000000000000..8cac94b33020
--- /dev/null
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/init.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+#include <mach/common.h>
+
+static int __init mxs_icoll_add_irq_domain(struct device_node *np,
+ struct device_node *interrupt_parent)
+{
+ irq_domain_add_legacy(np, 128, 0, 0, &irq_domain_simple_ops, NULL);
+
+ return 0;
+}
+
+static int __init mxs_gpio_add_irq_domain(struct device_node *np,
+ struct device_node *interrupt_parent)
+{
+ static int gpio_irq_base = MXS_GPIO_IRQ_START;
+
+ irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, NULL);
+ gpio_irq_base += 32;
+
+ return 0;
+}
+
+static const struct of_device_id mxs_irq_match[] __initconst = {
+ { .compatible = "fsl,mxs-icoll", .data = mxs_icoll_add_irq_domain, },
+ { .compatible = "fsl,mxs-gpio", .data = mxs_gpio_add_irq_domain, },
+ { /* sentinel */ }
+};
+
+static void __init mxs_dt_init_irq(void)
+{
+ icoll_init_irq();
+ of_irq_init(mxs_irq_match);
+}
+
+static void __init imx23_timer_init(void)
+{
+ mx23_clocks_init();
+}
+
+static struct sys_timer imx23_timer = {
+ .init = imx23_timer_init,
+};
+
+static void __init imx28_timer_init(void)
+{
+ mx28_clocks_init();
+}
+
+static struct sys_timer imx28_timer = {
+ .init = imx28_timer_init,
+};
+
+static void __init imx28_evk_init(void)
+{
+ struct clk *clk;
+
+ /* Enable fec phy clock */
+ clk = clk_get_sys("enet_out", NULL);
+ if (!IS_ERR(clk))
+ clk_prepare_enable(clk);
+}
+
+static void __init mxs_machine_init(void)
+{
+ if (of_machine_is_compatible("fsl,imx28-evk"))
+ imx28_evk_init();
+
+ of_platform_populate(NULL, of_default_bus_match_table,
+ NULL, NULL);
+}
+
+static const char *imx23_dt_compat[] __initdata = {
+ "fsl,imx23-evk",
+ "fsl,imx23",
+ NULL,
+};
+
+static const char *imx28_dt_compat[] __initdata = {
+ "fsl,imx28-evk",
+ "fsl,imx28",
+ NULL,
+};
+
+DT_MACHINE_START(IMX23, "Freescale i.MX23 (Device Tree)")
+ .map_io = mx23_map_io,
+ .init_irq = mxs_dt_init_irq,
+ .timer = &imx23_timer,
+ .init_machine = mxs_machine_init,
+ .dt_compat = imx23_dt_compat,
+ .restart = mxs_restart,
+MACHINE_END
+
+DT_MACHINE_START(IMX28, "Freescale i.MX28 (Device Tree)")
+ .map_io = mx28_map_io,
+ .init_irq = mxs_dt_init_irq,
+ .timer = &imx28_timer,
+ .init_machine = mxs_machine_init,
+ .dt_compat = imx28_dt_compat,
+ .restart = mxs_restart,
+MACHINE_END
diff --git a/arch/arm/mach-mxs/mm.c b/arch/arm/mach-mxs/mm.c
index 67a384edcf5b..dccb67a9e7c4 100644
--- a/arch/arm/mach-mxs/mm.c
+++ b/arch/arm/mach-mxs/mm.c
@@ -66,9 +66,25 @@ void __init mx28_init_irq(void)
void __init mx23_soc_init(void)
{
pinctrl_provide_dummies();
+
+ mxs_add_dma("imx23-dma-apbh", MX23_APBH_DMA_BASE_ADDR);
+ mxs_add_dma("imx23-dma-apbx", MX23_APBX_DMA_BASE_ADDR);
+
+ mxs_add_gpio("imx23-gpio", 0, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO0);
+ mxs_add_gpio("imx23-gpio", 1, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO1);
+ mxs_add_gpio("imx23-gpio", 2, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO2);
}
void __init mx28_soc_init(void)
{
pinctrl_provide_dummies();
+
+ mxs_add_dma("imx28-dma-apbh", MX23_APBH_DMA_BASE_ADDR);
+ mxs_add_dma("imx28-dma-apbx", MX23_APBX_DMA_BASE_ADDR);
+
+ mxs_add_gpio("imx28-gpio", 0, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO0);
+ mxs_add_gpio("imx28-gpio", 1, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO1);
+ mxs_add_gpio("imx28-gpio", 2, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO2);
+ mxs_add_gpio("imx28-gpio", 3, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO3);
+ mxs_add_gpio("imx28-gpio", 4, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO4);
}
diff --git a/arch/arm/mach-mxs/regs-clkctrl-mx23.h b/arch/arm/mach-mxs/regs-clkctrl-mx23.h
deleted file mode 100644
index 0ea5c9d0e2b2..000000000000
--- a/arch/arm/mach-mxs/regs-clkctrl-mx23.h
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * Freescale CLKCTRL Register Definitions
- *
- * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
- * Copyright 2008-2010 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This file is created by xml file. Don't Edit it.
- *
- * Xml Revision: 1.48
- * Template revision: 26195
- */
-
-#ifndef __REGS_CLKCTRL_MX23_H__
-#define __REGS_CLKCTRL_MX23_H__
-
-
-#define HW_CLKCTRL_PLLCTRL0 (0x00000000)
-#define HW_CLKCTRL_PLLCTRL0_SET (0x00000004)
-#define HW_CLKCTRL_PLLCTRL0_CLR (0x00000008)
-#define HW_CLKCTRL_PLLCTRL0_TOG (0x0000000c)
-
-#define BP_CLKCTRL_PLLCTRL0_LFR_SEL 28
-#define BM_CLKCTRL_PLLCTRL0_LFR_SEL 0x30000000
-#define BF_CLKCTRL_PLLCTRL0_LFR_SEL(v) \
- (((v) << 28) & BM_CLKCTRL_PLLCTRL0_LFR_SEL)
-#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__TIMES_2 0x1
-#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__TIMES_05 0x2
-#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLLCTRL0_CP_SEL 24
-#define BM_CLKCTRL_PLLCTRL0_CP_SEL 0x03000000
-#define BF_CLKCTRL_PLLCTRL0_CP_SEL(v) \
- (((v) << 24) & BM_CLKCTRL_PLLCTRL0_CP_SEL)
-#define BV_CLKCTRL_PLLCTRL0_CP_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLLCTRL0_CP_SEL__TIMES_2 0x1
-#define BV_CLKCTRL_PLLCTRL0_CP_SEL__TIMES_05 0x2
-#define BV_CLKCTRL_PLLCTRL0_CP_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLLCTRL0_DIV_SEL 20
-#define BM_CLKCTRL_PLLCTRL0_DIV_SEL 0x00300000
-#define BF_CLKCTRL_PLLCTRL0_DIV_SEL(v) \
- (((v) << 20) & BM_CLKCTRL_PLLCTRL0_DIV_SEL)
-#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__LOWER 0x1
-#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__LOWEST 0x2
-#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS 0x00040000
-#define BM_CLKCTRL_PLLCTRL0_POWER 0x00010000
-
-#define HW_CLKCTRL_PLLCTRL1 (0x00000010)
-
-#define BM_CLKCTRL_PLLCTRL1_LOCK 0x80000000
-#define BM_CLKCTRL_PLLCTRL1_FORCE_LOCK 0x40000000
-#define BP_CLKCTRL_PLLCTRL1_LOCK_COUNT 0
-#define BM_CLKCTRL_PLLCTRL1_LOCK_COUNT 0x0000FFFF
-#define BF_CLKCTRL_PLLCTRL1_LOCK_COUNT(v) \
- (((v) << 0) & BM_CLKCTRL_PLLCTRL1_LOCK_COUNT)
-
-#define HW_CLKCTRL_CPU (0x00000020)
-#define HW_CLKCTRL_CPU_SET (0x00000024)
-#define HW_CLKCTRL_CPU_CLR (0x00000028)
-#define HW_CLKCTRL_CPU_TOG (0x0000002c)
-
-#define BM_CLKCTRL_CPU_BUSY_REF_XTAL 0x20000000
-#define BM_CLKCTRL_CPU_BUSY_REF_CPU 0x10000000
-#define BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN 0x04000000
-#define BP_CLKCTRL_CPU_DIV_XTAL 16
-#define BM_CLKCTRL_CPU_DIV_XTAL 0x03FF0000
-#define BF_CLKCTRL_CPU_DIV_XTAL(v) \
- (((v) << 16) & BM_CLKCTRL_CPU_DIV_XTAL)
-#define BM_CLKCTRL_CPU_INTERRUPT_WAIT 0x00001000
-#define BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN 0x00000400
-#define BP_CLKCTRL_CPU_DIV_CPU 0
-#define BM_CLKCTRL_CPU_DIV_CPU 0x0000003F
-#define BF_CLKCTRL_CPU_DIV_CPU(v) \
- (((v) << 0) & BM_CLKCTRL_CPU_DIV_CPU)
-
-#define HW_CLKCTRL_HBUS (0x00000030)
-#define HW_CLKCTRL_HBUS_SET (0x00000034)
-#define HW_CLKCTRL_HBUS_CLR (0x00000038)
-#define HW_CLKCTRL_HBUS_TOG (0x0000003c)
-
-#define BM_CLKCTRL_HBUS_BUSY 0x20000000
-#define BM_CLKCTRL_HBUS_DCP_AS_ENABLE 0x10000000
-#define BM_CLKCTRL_HBUS_PXP_AS_ENABLE 0x08000000
-#define BM_CLKCTRL_HBUS_APBHDMA_AS_ENABLE 0x04000000
-#define BM_CLKCTRL_HBUS_APBXDMA_AS_ENABLE 0x02000000
-#define BM_CLKCTRL_HBUS_TRAFFIC_JAM_AS_ENABLE 0x01000000
-#define BM_CLKCTRL_HBUS_TRAFFIC_AS_ENABLE 0x00800000
-#define BM_CLKCTRL_HBUS_CPU_DATA_AS_ENABLE 0x00400000
-#define BM_CLKCTRL_HBUS_CPU_INSTR_AS_ENABLE 0x00200000
-#define BM_CLKCTRL_HBUS_AUTO_SLOW_MODE 0x00100000
-#define BP_CLKCTRL_HBUS_SLOW_DIV 16
-#define BM_CLKCTRL_HBUS_SLOW_DIV 0x00070000
-#define BF_CLKCTRL_HBUS_SLOW_DIV(v) \
- (((v) << 16) & BM_CLKCTRL_HBUS_SLOW_DIV)
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY1 0x0
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY2 0x1
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY4 0x2
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY8 0x3
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY16 0x4
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY32 0x5
-#define BM_CLKCTRL_HBUS_DIV_FRAC_EN 0x00000020
-#define BP_CLKCTRL_HBUS_DIV 0
-#define BM_CLKCTRL_HBUS_DIV 0x0000001F
-#define BF_CLKCTRL_HBUS_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_HBUS_DIV)
-
-#define HW_CLKCTRL_XBUS (0x00000040)
-
-#define BM_CLKCTRL_XBUS_BUSY 0x80000000
-#define BM_CLKCTRL_XBUS_DIV_FRAC_EN 0x00000400
-#define BP_CLKCTRL_XBUS_DIV 0
-#define BM_CLKCTRL_XBUS_DIV 0x000003FF
-#define BF_CLKCTRL_XBUS_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_XBUS_DIV)
-
-#define HW_CLKCTRL_XTAL (0x00000050)
-#define HW_CLKCTRL_XTAL_SET (0x00000054)
-#define HW_CLKCTRL_XTAL_CLR (0x00000058)
-#define HW_CLKCTRL_XTAL_TOG (0x0000005c)
-
-#define BP_CLKCTRL_XTAL_UART_CLK_GATE 31
-#define BM_CLKCTRL_XTAL_UART_CLK_GATE 0x80000000
-#define BP_CLKCTRL_XTAL_FILT_CLK24M_GATE 30
-#define BM_CLKCTRL_XTAL_FILT_CLK24M_GATE 0x40000000
-#define BP_CLKCTRL_XTAL_PWM_CLK24M_GATE 29
-#define BM_CLKCTRL_XTAL_PWM_CLK24M_GATE 0x20000000
-#define BM_CLKCTRL_XTAL_DRI_CLK24M_GATE 0x10000000
-#define BM_CLKCTRL_XTAL_DIGCTRL_CLK1M_GATE 0x08000000
-#define BP_CLKCTRL_XTAL_TIMROT_CLK32K_GATE 26
-#define BM_CLKCTRL_XTAL_TIMROT_CLK32K_GATE 0x04000000
-#define BP_CLKCTRL_XTAL_DIV_UART 0
-#define BM_CLKCTRL_XTAL_DIV_UART 0x00000003
-#define BF_CLKCTRL_XTAL_DIV_UART(v) \
- (((v) << 0) & BM_CLKCTRL_XTAL_DIV_UART)
-
-#define HW_CLKCTRL_PIX (0x00000060)
-
-#define BP_CLKCTRL_PIX_CLKGATE 31
-#define BM_CLKCTRL_PIX_CLKGATE 0x80000000
-#define BM_CLKCTRL_PIX_BUSY 0x20000000
-#define BM_CLKCTRL_PIX_DIV_FRAC_EN 0x00001000
-#define BP_CLKCTRL_PIX_DIV 0
-#define BM_CLKCTRL_PIX_DIV 0x00000FFF
-#define BF_CLKCTRL_PIX_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_PIX_DIV)
-
-#define HW_CLKCTRL_SSP (0x00000070)
-
-#define BP_CLKCTRL_SSP_CLKGATE 31
-#define BM_CLKCTRL_SSP_CLKGATE 0x80000000
-#define BM_CLKCTRL_SSP_BUSY 0x20000000
-#define BM_CLKCTRL_SSP_DIV_FRAC_EN 0x00000200
-#define BP_CLKCTRL_SSP_DIV 0
-#define BM_CLKCTRL_SSP_DIV 0x000001FF
-#define BF_CLKCTRL_SSP_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_SSP_DIV)
-
-#define HW_CLKCTRL_GPMI (0x00000080)
-
-#define BP_CLKCTRL_GPMI_CLKGATE 31
-#define BM_CLKCTRL_GPMI_CLKGATE 0x80000000
-#define BM_CLKCTRL_GPMI_BUSY 0x20000000
-#define BM_CLKCTRL_GPMI_DIV_FRAC_EN 0x00000400
-#define BP_CLKCTRL_GPMI_DIV 0
-#define BM_CLKCTRL_GPMI_DIV 0x000003FF
-#define BF_CLKCTRL_GPMI_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_GPMI_DIV)
-
-#define HW_CLKCTRL_SPDIF (0x00000090)
-
-#define BM_CLKCTRL_SPDIF_CLKGATE 0x80000000
-
-#define HW_CLKCTRL_EMI (0x000000a0)
-
-#define BP_CLKCTRL_EMI_CLKGATE 31
-#define BM_CLKCTRL_EMI_CLKGATE 0x80000000
-#define BM_CLKCTRL_EMI_SYNC_MODE_EN 0x40000000
-#define BM_CLKCTRL_EMI_BUSY_REF_XTAL 0x20000000
-#define BM_CLKCTRL_EMI_BUSY_REF_EMI 0x10000000
-#define BM_CLKCTRL_EMI_BUSY_REF_CPU 0x08000000
-#define BM_CLKCTRL_EMI_BUSY_SYNC_MODE 0x04000000
-#define BM_CLKCTRL_EMI_BUSY_DCC_RESYNC 0x00020000
-#define BM_CLKCTRL_EMI_DCC_RESYNC_ENABLE 0x00010000
-#define BP_CLKCTRL_EMI_DIV_XTAL 8
-#define BM_CLKCTRL_EMI_DIV_XTAL 0x00000F00
-#define BF_CLKCTRL_EMI_DIV_XTAL(v) \
- (((v) << 8) & BM_CLKCTRL_EMI_DIV_XTAL)
-#define BP_CLKCTRL_EMI_DIV_EMI 0
-#define BM_CLKCTRL_EMI_DIV_EMI 0x0000003F
-#define BF_CLKCTRL_EMI_DIV_EMI(v) \
- (((v) << 0) & BM_CLKCTRL_EMI_DIV_EMI)
-
-#define HW_CLKCTRL_IR (0x000000b0)
-
-#define BM_CLKCTRL_IR_CLKGATE 0x80000000
-#define BM_CLKCTRL_IR_AUTO_DIV 0x20000000
-#define BM_CLKCTRL_IR_IR_BUSY 0x10000000
-#define BM_CLKCTRL_IR_IROV_BUSY 0x08000000
-#define BP_CLKCTRL_IR_IROV_DIV 16
-#define BM_CLKCTRL_IR_IROV_DIV 0x01FF0000
-#define BF_CLKCTRL_IR_IROV_DIV(v) \
- (((v) << 16) & BM_CLKCTRL_IR_IROV_DIV)
-#define BP_CLKCTRL_IR_IR_DIV 0
-#define BM_CLKCTRL_IR_IR_DIV 0x000003FF
-#define BF_CLKCTRL_IR_IR_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_IR_IR_DIV)
-
-#define HW_CLKCTRL_SAIF (0x000000c0)
-
-#define BM_CLKCTRL_SAIF_CLKGATE 0x80000000
-#define BM_CLKCTRL_SAIF_BUSY 0x20000000
-#define BM_CLKCTRL_SAIF_DIV_FRAC_EN 0x00010000
-#define BP_CLKCTRL_SAIF_DIV 0
-#define BM_CLKCTRL_SAIF_DIV 0x0000FFFF
-#define BF_CLKCTRL_SAIF_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_SAIF_DIV)
-
-#define HW_CLKCTRL_TV (0x000000d0)
-
-#define BM_CLKCTRL_TV_CLK_TV108M_GATE 0x80000000
-#define BM_CLKCTRL_TV_CLK_TV_GATE 0x40000000
-
-#define HW_CLKCTRL_ETM (0x000000e0)
-
-#define BM_CLKCTRL_ETM_CLKGATE 0x80000000
-#define BM_CLKCTRL_ETM_BUSY 0x20000000
-#define BM_CLKCTRL_ETM_DIV_FRAC_EN 0x00000040
-#define BP_CLKCTRL_ETM_DIV 0
-#define BM_CLKCTRL_ETM_DIV 0x0000003F
-#define BF_CLKCTRL_ETM_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_ETM_DIV)
-
-#define HW_CLKCTRL_FRAC (0x000000f0)
-#define HW_CLKCTRL_FRAC_SET (0x000000f4)
-#define HW_CLKCTRL_FRAC_CLR (0x000000f8)
-#define HW_CLKCTRL_FRAC_TOG (0x000000fc)
-
-#define BP_CLKCTRL_FRAC_CLKGATEIO 31
-#define BM_CLKCTRL_FRAC_CLKGATEIO 0x80000000
-#define BM_CLKCTRL_FRAC_IO_STABLE 0x40000000
-#define BP_CLKCTRL_FRAC_IOFRAC 24
-#define BM_CLKCTRL_FRAC_IOFRAC 0x3F000000
-#define BF_CLKCTRL_FRAC_IOFRAC(v) \
- (((v) << 24) & BM_CLKCTRL_FRAC_IOFRAC)
-#define BP_CLKCTRL_FRAC_CLKGATEPIX 23
-#define BM_CLKCTRL_FRAC_CLKGATEPIX 0x00800000
-#define BM_CLKCTRL_FRAC_PIX_STABLE 0x00400000
-#define BP_CLKCTRL_FRAC_PIXFRAC 16
-#define BM_CLKCTRL_FRAC_PIXFRAC 0x003F0000
-#define BF_CLKCTRL_FRAC_PIXFRAC(v) \
- (((v) << 16) & BM_CLKCTRL_FRAC_PIXFRAC)
-#define BP_CLKCTRL_FRAC_CLKGATEEMI 15
-#define BM_CLKCTRL_FRAC_CLKGATEEMI 0x00008000
-#define BM_CLKCTRL_FRAC_EMI_STABLE 0x00004000
-#define BP_CLKCTRL_FRAC_EMIFRAC 8
-#define BM_CLKCTRL_FRAC_EMIFRAC 0x00003F00
-#define BF_CLKCTRL_FRAC_EMIFRAC(v) \
- (((v) << 8) & BM_CLKCTRL_FRAC_EMIFRAC)
-#define BP_CLKCTRL_FRAC_CLKGATECPU 7
-#define BM_CLKCTRL_FRAC_CLKGATECPU 0x00000080
-#define BM_CLKCTRL_FRAC_CPU_STABLE 0x00000040
-#define BP_CLKCTRL_FRAC_CPUFRAC 0
-#define BM_CLKCTRL_FRAC_CPUFRAC 0x0000003F
-#define BF_CLKCTRL_FRAC_CPUFRAC(v) \
- (((v) << 0) & BM_CLKCTRL_FRAC_CPUFRAC)
-
-#define HW_CLKCTRL_FRAC1 (0x00000100)
-#define HW_CLKCTRL_FRAC1_SET (0x00000104)
-#define HW_CLKCTRL_FRAC1_CLR (0x00000108)
-#define HW_CLKCTRL_FRAC1_TOG (0x0000010c)
-
-#define BM_CLKCTRL_FRAC1_CLKGATEVID 0x80000000
-#define BM_CLKCTRL_FRAC1_VID_STABLE 0x40000000
-
-#define HW_CLKCTRL_CLKSEQ (0x00000110)
-#define HW_CLKCTRL_CLKSEQ_SET (0x00000114)
-#define HW_CLKCTRL_CLKSEQ_CLR (0x00000118)
-#define HW_CLKCTRL_CLKSEQ_TOG (0x0000011c)
-
-#define BM_CLKCTRL_CLKSEQ_BYPASS_ETM 0x00000100
-#define BM_CLKCTRL_CLKSEQ_BYPASS_CPU 0x00000080
-#define BM_CLKCTRL_CLKSEQ_BYPASS_EMI 0x00000040
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP 0x00000020
-#define BM_CLKCTRL_CLKSEQ_BYPASS_GPMI 0x00000010
-#define BM_CLKCTRL_CLKSEQ_BYPASS_IR 0x00000008
-#define BM_CLKCTRL_CLKSEQ_BYPASS_PIX 0x00000002
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SAIF 0x00000001
-
-#define HW_CLKCTRL_RESET (0x00000120)
-
-#define BM_CLKCTRL_RESET_CHIP 0x00000002
-#define BM_CLKCTRL_RESET_DIG 0x00000001
-
-#define HW_CLKCTRL_STATUS (0x00000130)
-
-#define BP_CLKCTRL_STATUS_CPU_LIMIT 30
-#define BM_CLKCTRL_STATUS_CPU_LIMIT 0xC0000000
-#define BF_CLKCTRL_STATUS_CPU_LIMIT(v) \
- (((v) << 30) & BM_CLKCTRL_STATUS_CPU_LIMIT)
-
-#define HW_CLKCTRL_VERSION (0x00000140)
-
-#define BP_CLKCTRL_VERSION_MAJOR 24
-#define BM_CLKCTRL_VERSION_MAJOR 0xFF000000
-#define BF_CLKCTRL_VERSION_MAJOR(v) \
- (((v) << 24) & BM_CLKCTRL_VERSION_MAJOR)
-#define BP_CLKCTRL_VERSION_MINOR 16
-#define BM_CLKCTRL_VERSION_MINOR 0x00FF0000
-#define BF_CLKCTRL_VERSION_MINOR(v) \
- (((v) << 16) & BM_CLKCTRL_VERSION_MINOR)
-#define BP_CLKCTRL_VERSION_STEP 0
-#define BM_CLKCTRL_VERSION_STEP 0x0000FFFF
-#define BF_CLKCTRL_VERSION_STEP(v) \
- (((v) << 0) & BM_CLKCTRL_VERSION_STEP)
-
-#endif /* __REGS_CLKCTRL_MX23_H__ */
diff --git a/arch/arm/mach-mxs/regs-clkctrl-mx28.h b/arch/arm/mach-mxs/regs-clkctrl-mx28.h
deleted file mode 100644
index 7d1b061d7943..000000000000
--- a/arch/arm/mach-mxs/regs-clkctrl-mx28.h
+++ /dev/null
@@ -1,486 +0,0 @@
-/*
- * Freescale CLKCTRL Register Definitions
- *
- * Copyright 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This file is created by xml file. Don't Edit it.
- *
- * Xml Revision: 1.48
- * Template revision: 26195
- */
-
-#ifndef __REGS_CLKCTRL_MX28_H__
-#define __REGS_CLKCTRL_MX28_H__
-
-#define HW_CLKCTRL_PLL0CTRL0 (0x00000000)
-#define HW_CLKCTRL_PLL0CTRL0_SET (0x00000004)
-#define HW_CLKCTRL_PLL0CTRL0_CLR (0x00000008)
-#define HW_CLKCTRL_PLL0CTRL0_TOG (0x0000000c)
-
-#define BP_CLKCTRL_PLL0CTRL0_LFR_SEL 28
-#define BM_CLKCTRL_PLL0CTRL0_LFR_SEL 0x30000000
-#define BF_CLKCTRL_PLL0CTRL0_LFR_SEL(v) \
- (((v) << 28) & BM_CLKCTRL_PLL0CTRL0_LFR_SEL)
-#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__TIMES_2 0x1
-#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__TIMES_05 0x2
-#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL0CTRL0_CP_SEL 24
-#define BM_CLKCTRL_PLL0CTRL0_CP_SEL 0x03000000
-#define BF_CLKCTRL_PLL0CTRL0_CP_SEL(v) \
- (((v) << 24) & BM_CLKCTRL_PLL0CTRL0_CP_SEL)
-#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__TIMES_2 0x1
-#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__TIMES_05 0x2
-#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL0CTRL0_DIV_SEL 20
-#define BM_CLKCTRL_PLL0CTRL0_DIV_SEL 0x00300000
-#define BF_CLKCTRL_PLL0CTRL0_DIV_SEL(v) \
- (((v) << 20) & BM_CLKCTRL_PLL0CTRL0_DIV_SEL)
-#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__LOWER 0x1
-#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__LOWEST 0x2
-#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_PLL0CTRL0_EN_USB_CLKS 0x00040000
-#define BM_CLKCTRL_PLL0CTRL0_POWER 0x00020000
-
-#define HW_CLKCTRL_PLL0CTRL1 (0x00000010)
-
-#define BM_CLKCTRL_PLL0CTRL1_LOCK 0x80000000
-#define BM_CLKCTRL_PLL0CTRL1_FORCE_LOCK 0x40000000
-#define BP_CLKCTRL_PLL0CTRL1_LOCK_COUNT 0
-#define BM_CLKCTRL_PLL0CTRL1_LOCK_COUNT 0x0000FFFF
-#define BF_CLKCTRL_PLL0CTRL1_LOCK_COUNT(v) \
- (((v) << 0) & BM_CLKCTRL_PLL0CTRL1_LOCK_COUNT)
-
-#define HW_CLKCTRL_PLL1CTRL0 (0x00000020)
-#define HW_CLKCTRL_PLL1CTRL0_SET (0x00000024)
-#define HW_CLKCTRL_PLL1CTRL0_CLR (0x00000028)
-#define HW_CLKCTRL_PLL1CTRL0_TOG (0x0000002c)
-
-#define BM_CLKCTRL_PLL1CTRL0_CLKGATEEMI 0x80000000
-#define BP_CLKCTRL_PLL1CTRL0_LFR_SEL 28
-#define BM_CLKCTRL_PLL1CTRL0_LFR_SEL 0x30000000
-#define BF_CLKCTRL_PLL1CTRL0_LFR_SEL(v) \
- (((v) << 28) & BM_CLKCTRL_PLL1CTRL0_LFR_SEL)
-#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__TIMES_2 0x1
-#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__TIMES_05 0x2
-#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL1CTRL0_CP_SEL 24
-#define BM_CLKCTRL_PLL1CTRL0_CP_SEL 0x03000000
-#define BF_CLKCTRL_PLL1CTRL0_CP_SEL(v) \
- (((v) << 24) & BM_CLKCTRL_PLL1CTRL0_CP_SEL)
-#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__TIMES_2 0x1
-#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__TIMES_05 0x2
-#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL1CTRL0_DIV_SEL 20
-#define BM_CLKCTRL_PLL1CTRL0_DIV_SEL 0x00300000
-#define BF_CLKCTRL_PLL1CTRL0_DIV_SEL(v) \
- (((v) << 20) & BM_CLKCTRL_PLL1CTRL0_DIV_SEL)
-#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__LOWER 0x1
-#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__LOWEST 0x2
-#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_PLL1CTRL0_EN_USB_CLKS 0x00040000
-#define BM_CLKCTRL_PLL1CTRL0_POWER 0x00020000
-
-#define HW_CLKCTRL_PLL1CTRL1 (0x00000030)
-
-#define BM_CLKCTRL_PLL1CTRL1_LOCK 0x80000000
-#define BM_CLKCTRL_PLL1CTRL1_FORCE_LOCK 0x40000000
-#define BP_CLKCTRL_PLL1CTRL1_LOCK_COUNT 0
-#define BM_CLKCTRL_PLL1CTRL1_LOCK_COUNT 0x0000FFFF
-#define BF_CLKCTRL_PLL1CTRL1_LOCK_COUNT(v) \
- (((v) << 0) & BM_CLKCTRL_PLL1CTRL1_LOCK_COUNT)
-
-#define HW_CLKCTRL_PLL2CTRL0 (0x00000040)
-#define HW_CLKCTRL_PLL2CTRL0_SET (0x00000044)
-#define HW_CLKCTRL_PLL2CTRL0_CLR (0x00000048)
-#define HW_CLKCTRL_PLL2CTRL0_TOG (0x0000004c)
-
-#define BM_CLKCTRL_PLL2CTRL0_CLKGATE 0x80000000
-#define BP_CLKCTRL_PLL2CTRL0_LFR_SEL 28
-#define BM_CLKCTRL_PLL2CTRL0_LFR_SEL 0x30000000
-#define BF_CLKCTRL_PLL2CTRL0_LFR_SEL(v) \
- (((v) << 28) & BM_CLKCTRL_PLL2CTRL0_LFR_SEL)
-#define BM_CLKCTRL_PLL2CTRL0_HOLD_RING_OFF_B 0x04000000
-#define BP_CLKCTRL_PLL2CTRL0_CP_SEL 24
-#define BM_CLKCTRL_PLL2CTRL0_CP_SEL 0x03000000
-#define BF_CLKCTRL_PLL2CTRL0_CP_SEL(v) \
- (((v) << 24) & BM_CLKCTRL_PLL2CTRL0_CP_SEL)
-#define BM_CLKCTRL_PLL2CTRL0_POWER 0x00800000
-
-#define HW_CLKCTRL_CPU (0x00000050)
-#define HW_CLKCTRL_CPU_SET (0x00000054)
-#define HW_CLKCTRL_CPU_CLR (0x00000058)
-#define HW_CLKCTRL_CPU_TOG (0x0000005c)
-
-#define BM_CLKCTRL_CPU_BUSY_REF_XTAL 0x20000000
-#define BM_CLKCTRL_CPU_BUSY_REF_CPU 0x10000000
-#define BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN 0x04000000
-#define BP_CLKCTRL_CPU_DIV_XTAL 16
-#define BM_CLKCTRL_CPU_DIV_XTAL 0x03FF0000
-#define BF_CLKCTRL_CPU_DIV_XTAL(v) \
- (((v) << 16) & BM_CLKCTRL_CPU_DIV_XTAL)
-#define BM_CLKCTRL_CPU_INTERRUPT_WAIT 0x00001000
-#define BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN 0x00000400
-#define BP_CLKCTRL_CPU_DIV_CPU 0
-#define BM_CLKCTRL_CPU_DIV_CPU 0x0000003F
-#define BF_CLKCTRL_CPU_DIV_CPU(v) \
- (((v) << 0) & BM_CLKCTRL_CPU_DIV_CPU)
-
-#define HW_CLKCTRL_HBUS (0x00000060)
-#define HW_CLKCTRL_HBUS_SET (0x00000064)
-#define HW_CLKCTRL_HBUS_CLR (0x00000068)
-#define HW_CLKCTRL_HBUS_TOG (0x0000006c)
-
-#define BM_CLKCTRL_HBUS_ASM_BUSY 0x80000000
-#define BM_CLKCTRL_HBUS_DCP_AS_ENABLE 0x40000000
-#define BM_CLKCTRL_HBUS_PXP_AS_ENABLE 0x20000000
-#define BM_CLKCTRL_HBUS_ASM_EMIPORT_AS_ENABLE 0x08000000
-#define BM_CLKCTRL_HBUS_APBHDMA_AS_ENABLE 0x04000000
-#define BM_CLKCTRL_HBUS_APBXDMA_AS_ENABLE 0x02000000
-#define BM_CLKCTRL_HBUS_TRAFFIC_JAM_AS_ENABLE 0x01000000
-#define BM_CLKCTRL_HBUS_TRAFFIC_AS_ENABLE 0x00800000
-#define BM_CLKCTRL_HBUS_CPU_DATA_AS_ENABLE 0x00400000
-#define BM_CLKCTRL_HBUS_CPU_INSTR_AS_ENABLE 0x00200000
-#define BM_CLKCTRL_HBUS_ASM_ENABLE 0x00100000
-#define BM_CLKCTRL_HBUS_AUTO_CLEAR_DIV_ENABLE 0x00080000
-#define BP_CLKCTRL_HBUS_SLOW_DIV 16
-#define BM_CLKCTRL_HBUS_SLOW_DIV 0x00070000
-#define BF_CLKCTRL_HBUS_SLOW_DIV(v) \
- (((v) << 16) & BM_CLKCTRL_HBUS_SLOW_DIV)
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY1 0x0
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY2 0x1
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY4 0x2
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY8 0x3
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY16 0x4
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY32 0x5
-#define BM_CLKCTRL_HBUS_DIV_FRAC_EN 0x00000020
-#define BP_CLKCTRL_HBUS_DIV 0
-#define BM_CLKCTRL_HBUS_DIV 0x0000001F
-#define BF_CLKCTRL_HBUS_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_HBUS_DIV)
-
-#define HW_CLKCTRL_XBUS (0x00000070)
-
-#define BM_CLKCTRL_XBUS_BUSY 0x80000000
-#define BM_CLKCTRL_XBUS_AUTO_CLEAR_DIV_ENABLE 0x00000800
-#define BM_CLKCTRL_XBUS_DIV_FRAC_EN 0x00000400
-#define BP_CLKCTRL_XBUS_DIV 0
-#define BM_CLKCTRL_XBUS_DIV 0x000003FF
-#define BF_CLKCTRL_XBUS_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_XBUS_DIV)
-
-#define HW_CLKCTRL_XTAL (0x00000080)
-#define HW_CLKCTRL_XTAL_SET (0x00000084)
-#define HW_CLKCTRL_XTAL_CLR (0x00000088)
-#define HW_CLKCTRL_XTAL_TOG (0x0000008c)
-
-#define BP_CLKCTRL_XTAL_UART_CLK_GATE 31
-#define BM_CLKCTRL_XTAL_UART_CLK_GATE 0x80000000
-#define BP_CLKCTRL_XTAL_PWM_CLK24M_GATE 29
-#define BM_CLKCTRL_XTAL_PWM_CLK24M_GATE 0x20000000
-#define BP_CLKCTRL_XTAL_TIMROT_CLK32K_GATE 26
-#define BM_CLKCTRL_XTAL_TIMROT_CLK32K_GATE 0x04000000
-#define BP_CLKCTRL_XTAL_DIV_UART 0
-#define BM_CLKCTRL_XTAL_DIV_UART 0x00000003
-#define BF_CLKCTRL_XTAL_DIV_UART(v) \
- (((v) << 0) & BM_CLKCTRL_XTAL_DIV_UART)
-
-#define HW_CLKCTRL_SSP0 (0x00000090)
-
-#define BP_CLKCTRL_SSP0_CLKGATE 31
-#define BM_CLKCTRL_SSP0_CLKGATE 0x80000000
-#define BM_CLKCTRL_SSP0_BUSY 0x20000000
-#define BM_CLKCTRL_SSP0_DIV_FRAC_EN 0x00000200
-#define BP_CLKCTRL_SSP0_DIV 0
-#define BM_CLKCTRL_SSP0_DIV 0x000001FF
-#define BF_CLKCTRL_SSP0_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_SSP0_DIV)
-
-#define HW_CLKCTRL_SSP1 (0x000000a0)
-
-#define BP_CLKCTRL_SSP1_CLKGATE 31
-#define BM_CLKCTRL_SSP1_CLKGATE 0x80000000
-#define BM_CLKCTRL_SSP1_BUSY 0x20000000
-#define BM_CLKCTRL_SSP1_DIV_FRAC_EN 0x00000200
-#define BP_CLKCTRL_SSP1_DIV 0
-#define BM_CLKCTRL_SSP1_DIV 0x000001FF
-#define BF_CLKCTRL_SSP1_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_SSP1_DIV)
-
-#define HW_CLKCTRL_SSP2 (0x000000b0)
-
-#define BP_CLKCTRL_SSP2_CLKGATE 31
-#define BM_CLKCTRL_SSP2_CLKGATE 0x80000000
-#define BM_CLKCTRL_SSP2_BUSY 0x20000000
-#define BM_CLKCTRL_SSP2_DIV_FRAC_EN 0x00000200
-#define BP_CLKCTRL_SSP2_DIV 0
-#define BM_CLKCTRL_SSP2_DIV 0x000001FF
-#define BF_CLKCTRL_SSP2_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_SSP2_DIV)
-
-#define HW_CLKCTRL_SSP3 (0x000000c0)
-
-#define BP_CLKCTRL_SSP3_CLKGATE 31
-#define BM_CLKCTRL_SSP3_CLKGATE 0x80000000
-#define BM_CLKCTRL_SSP3_BUSY 0x20000000
-#define BM_CLKCTRL_SSP3_DIV_FRAC_EN 0x00000200
-#define BP_CLKCTRL_SSP3_DIV 0
-#define BM_CLKCTRL_SSP3_DIV 0x000001FF
-#define BF_CLKCTRL_SSP3_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_SSP3_DIV)
-
-#define HW_CLKCTRL_GPMI (0x000000d0)
-
-#define BP_CLKCTRL_GPMI_CLKGATE 31
-#define BM_CLKCTRL_GPMI_CLKGATE 0x80000000
-#define BM_CLKCTRL_GPMI_BUSY 0x20000000
-#define BM_CLKCTRL_GPMI_DIV_FRAC_EN 0x00000400
-#define BP_CLKCTRL_GPMI_DIV 0
-#define BM_CLKCTRL_GPMI_DIV 0x000003FF
-#define BF_CLKCTRL_GPMI_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_GPMI_DIV)
-
-#define HW_CLKCTRL_SPDIF (0x000000e0)
-
-#define BP_CLKCTRL_SPDIF_CLKGATE 31
-#define BM_CLKCTRL_SPDIF_CLKGATE 0x80000000
-
-#define HW_CLKCTRL_EMI (0x000000f0)
-
-#define BP_CLKCTRL_EMI_CLKGATE 31
-#define BM_CLKCTRL_EMI_CLKGATE 0x80000000
-#define BM_CLKCTRL_EMI_SYNC_MODE_EN 0x40000000
-#define BM_CLKCTRL_EMI_BUSY_REF_XTAL 0x20000000
-#define BM_CLKCTRL_EMI_BUSY_REF_EMI 0x10000000
-#define BM_CLKCTRL_EMI_BUSY_REF_CPU 0x08000000
-#define BM_CLKCTRL_EMI_BUSY_SYNC_MODE 0x04000000
-#define BM_CLKCTRL_EMI_BUSY_DCC_RESYNC 0x00020000
-#define BM_CLKCTRL_EMI_DCC_RESYNC_ENABLE 0x00010000
-#define BP_CLKCTRL_EMI_DIV_XTAL 8
-#define BM_CLKCTRL_EMI_DIV_XTAL 0x00000F00
-#define BF_CLKCTRL_EMI_DIV_XTAL(v) \
- (((v) << 8) & BM_CLKCTRL_EMI_DIV_XTAL)
-#define BP_CLKCTRL_EMI_DIV_EMI 0
-#define BM_CLKCTRL_EMI_DIV_EMI 0x0000003F
-#define BF_CLKCTRL_EMI_DIV_EMI(v) \
- (((v) << 0) & BM_CLKCTRL_EMI_DIV_EMI)
-
-#define HW_CLKCTRL_SAIF0 (0x00000100)
-
-#define BP_CLKCTRL_SAIF0_CLKGATE 31
-#define BM_CLKCTRL_SAIF0_CLKGATE 0x80000000
-#define BM_CLKCTRL_SAIF0_BUSY 0x20000000
-#define BM_CLKCTRL_SAIF0_DIV_FRAC_EN 0x00010000
-#define BP_CLKCTRL_SAIF0_DIV 0
-#define BM_CLKCTRL_SAIF0_DIV 0x0000FFFF
-#define BF_CLKCTRL_SAIF0_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_SAIF0_DIV)
-
-#define HW_CLKCTRL_SAIF1 (0x00000110)
-
-#define BP_CLKCTRL_SAIF1_CLKGATE 31
-#define BM_CLKCTRL_SAIF1_CLKGATE 0x80000000
-#define BM_CLKCTRL_SAIF1_BUSY 0x20000000
-#define BM_CLKCTRL_SAIF1_DIV_FRAC_EN 0x00010000
-#define BP_CLKCTRL_SAIF1_DIV 0
-#define BM_CLKCTRL_SAIF1_DIV 0x0000FFFF
-#define BF_CLKCTRL_SAIF1_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_SAIF1_DIV)
-
-#define HW_CLKCTRL_DIS_LCDIF (0x00000120)
-
-#define BP_CLKCTRL_DIS_LCDIF_CLKGATE 31
-#define BM_CLKCTRL_DIS_LCDIF_CLKGATE 0x80000000
-#define BM_CLKCTRL_DIS_LCDIF_BUSY 0x20000000
-#define BM_CLKCTRL_DIS_LCDIF_DIV_FRAC_EN 0x00002000
-#define BP_CLKCTRL_DIS_LCDIF_DIV 0
-#define BM_CLKCTRL_DIS_LCDIF_DIV 0x00001FFF
-#define BF_CLKCTRL_DIS_LCDIF_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_DIS_LCDIF_DIV)
-
-#define HW_CLKCTRL_ETM (0x00000130)
-
-#define BM_CLKCTRL_ETM_CLKGATE 0x80000000
-#define BM_CLKCTRL_ETM_BUSY 0x20000000
-#define BM_CLKCTRL_ETM_DIV_FRAC_EN 0x00000080
-#define BP_CLKCTRL_ETM_DIV 0
-#define BM_CLKCTRL_ETM_DIV 0x0000007F
-#define BF_CLKCTRL_ETM_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_ETM_DIV)
-
-#define HW_CLKCTRL_ENET (0x00000140)
-
-#define BM_CLKCTRL_ENET_SLEEP 0x80000000
-#define BP_CLKCTRL_ENET_DISABLE 30
-#define BM_CLKCTRL_ENET_DISABLE 0x40000000
-#define BM_CLKCTRL_ENET_STATUS 0x20000000
-#define BM_CLKCTRL_ENET_BUSY_TIME 0x08000000
-#define BP_CLKCTRL_ENET_DIV_TIME 21
-#define BM_CLKCTRL_ENET_DIV_TIME 0x07E00000
-#define BF_CLKCTRL_ENET_DIV_TIME(v) \
- (((v) << 21) & BM_CLKCTRL_ENET_DIV_TIME)
-#define BM_CLKCTRL_ENET_BUSY 0x08000000
-#define BP_CLKCTRL_ENET_DIV 21
-#define BM_CLKCTRL_ENET_DIV 0x07E00000
-#define BF_CLKCTRL_ENET_DIV(v) \
- (((v) << 21) & BM_CLKCTRL_ENET_DIV)
-#define BP_CLKCTRL_ENET_TIME_SEL 19
-#define BM_CLKCTRL_ENET_TIME_SEL 0x00180000
-#define BF_CLKCTRL_ENET_TIME_SEL(v) \
- (((v) << 19) & BM_CLKCTRL_ENET_TIME_SEL)
-#define BV_CLKCTRL_ENET_TIME_SEL__XTAL 0x0
-#define BV_CLKCTRL_ENET_TIME_SEL__PLL 0x1
-#define BV_CLKCTRL_ENET_TIME_SEL__RMII_CLK 0x2
-#define BV_CLKCTRL_ENET_TIME_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_ENET_CLK_OUT_EN 0x00040000
-#define BM_CLKCTRL_ENET_RESET_BY_SW_CHIP 0x00020000
-#define BM_CLKCTRL_ENET_RESET_BY_SW 0x00010000
-
-#define HW_CLKCTRL_HSADC (0x00000150)
-
-#define BM_CLKCTRL_HSADC_RESETB 0x40000000
-#define BP_CLKCTRL_HSADC_FREQDIV 28
-#define BM_CLKCTRL_HSADC_FREQDIV 0x30000000
-#define BF_CLKCTRL_HSADC_FREQDIV(v) \
- (((v) << 28) & BM_CLKCTRL_HSADC_FREQDIV)
-
-#define HW_CLKCTRL_FLEXCAN (0x00000160)
-
-#define BP_CLKCTRL_FLEXCAN_STOP_CAN0 30
-#define BM_CLKCTRL_FLEXCAN_STOP_CAN0 0x40000000
-#define BM_CLKCTRL_FLEXCAN_CAN0_STATUS 0x20000000
-#define BP_CLKCTRL_FLEXCAN_STOP_CAN1 28
-#define BM_CLKCTRL_FLEXCAN_STOP_CAN1 0x10000000
-#define BM_CLKCTRL_FLEXCAN_CAN1_STATUS 0x08000000
-
-#define HW_CLKCTRL_FRAC0 (0x000001b0)
-#define HW_CLKCTRL_FRAC0_SET (0x000001b4)
-#define HW_CLKCTRL_FRAC0_CLR (0x000001b8)
-#define HW_CLKCTRL_FRAC0_TOG (0x000001bc)
-
-#define BP_CLKCTRL_FRAC0_CLKGATEIO0 31
-#define BM_CLKCTRL_FRAC0_CLKGATEIO0 0x80000000
-#define BM_CLKCTRL_FRAC0_IO0_STABLE 0x40000000
-#define BP_CLKCTRL_FRAC0_IO0FRAC 24
-#define BM_CLKCTRL_FRAC0_IO0FRAC 0x3F000000
-#define BF_CLKCTRL_FRAC0_IO0FRAC(v) \
- (((v) << 24) & BM_CLKCTRL_FRAC0_IO0FRAC)
-#define BP_CLKCTRL_FRAC0_CLKGATEIO1 23
-#define BM_CLKCTRL_FRAC0_CLKGATEIO1 0x00800000
-#define BM_CLKCTRL_FRAC0_IO1_STABLE 0x00400000
-#define BP_CLKCTRL_FRAC0_IO1FRAC 16
-#define BM_CLKCTRL_FRAC0_IO1FRAC 0x003F0000
-#define BF_CLKCTRL_FRAC0_IO1FRAC(v) \
- (((v) << 16) & BM_CLKCTRL_FRAC0_IO1FRAC)
-#define BP_CLKCTRL_FRAC0_CLKGATEEMI 15
-#define BM_CLKCTRL_FRAC0_CLKGATEEMI 0x00008000
-#define BM_CLKCTRL_FRAC0_EMI_STABLE 0x00004000
-#define BP_CLKCTRL_FRAC0_EMIFRAC 8
-#define BM_CLKCTRL_FRAC0_EMIFRAC 0x00003F00
-#define BF_CLKCTRL_FRAC0_EMIFRAC(v) \
- (((v) << 8) & BM_CLKCTRL_FRAC0_EMIFRAC)
-#define BP_CLKCTRL_FRAC0_CLKGATECPU 7
-#define BM_CLKCTRL_FRAC0_CLKGATECPU 0x00000080
-#define BM_CLKCTRL_FRAC0_CPU_STABLE 0x00000040
-#define BP_CLKCTRL_FRAC0_CPUFRAC 0
-#define BM_CLKCTRL_FRAC0_CPUFRAC 0x0000003F
-#define BF_CLKCTRL_FRAC0_CPUFRAC(v) \
- (((v) << 0) & BM_CLKCTRL_FRAC0_CPUFRAC)
-
-#define HW_CLKCTRL_FRAC1 (0x000001c0)
-#define HW_CLKCTRL_FRAC1_SET (0x000001c4)
-#define HW_CLKCTRL_FRAC1_CLR (0x000001c8)
-#define HW_CLKCTRL_FRAC1_TOG (0x000001cc)
-
-#define BP_CLKCTRL_FRAC1_CLKGATEGPMI 23
-#define BM_CLKCTRL_FRAC1_CLKGATEGPMI 0x00800000
-#define BM_CLKCTRL_FRAC1_GPMI_STABLE 0x00400000
-#define BP_CLKCTRL_FRAC1_GPMIFRAC 16
-#define BM_CLKCTRL_FRAC1_GPMIFRAC 0x003F0000
-#define BF_CLKCTRL_FRAC1_GPMIFRAC(v) \
- (((v) << 16) & BM_CLKCTRL_FRAC1_GPMIFRAC)
-#define BP_CLKCTRL_FRAC1_CLKGATEHSADC 15
-#define BM_CLKCTRL_FRAC1_CLKGATEHSADC 0x00008000
-#define BM_CLKCTRL_FRAC1_HSADC_STABLE 0x00004000
-#define BP_CLKCTRL_FRAC1_HSADCFRAC 8
-#define BM_CLKCTRL_FRAC1_HSADCFRAC 0x00003F00
-#define BF_CLKCTRL_FRAC1_HSADCFRAC(v) \
- (((v) << 8) & BM_CLKCTRL_FRAC1_HSADCFRAC)
-#define BP_CLKCTRL_FRAC1_CLKGATEPIX 7
-#define BM_CLKCTRL_FRAC1_CLKGATEPIX 0x00000080
-#define BM_CLKCTRL_FRAC1_PIX_STABLE 0x00000040
-#define BP_CLKCTRL_FRAC1_PIXFRAC 0
-#define BM_CLKCTRL_FRAC1_PIXFRAC 0x0000003F
-#define BF_CLKCTRL_FRAC1_PIXFRAC(v) \
- (((v) << 0) & BM_CLKCTRL_FRAC1_PIXFRAC)
-
-#define HW_CLKCTRL_CLKSEQ (0x000001d0)
-#define HW_CLKCTRL_CLKSEQ_SET (0x000001d4)
-#define HW_CLKCTRL_CLKSEQ_CLR (0x000001d8)
-#define HW_CLKCTRL_CLKSEQ_TOG (0x000001dc)
-
-#define BM_CLKCTRL_CLKSEQ_BYPASS_CPU 0x00040000
-#define BM_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF 0x00004000
-#define BV_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF__BYPASS 0x1
-#define BV_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF__PFD 0x0
-#define BM_CLKCTRL_CLKSEQ_BYPASS_ETM 0x00000100
-#define BM_CLKCTRL_CLKSEQ_BYPASS_EMI 0x00000080
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP3 0x00000040
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP2 0x00000020
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP1 0x00000010
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP0 0x00000008
-#define BM_CLKCTRL_CLKSEQ_BYPASS_GPMI 0x00000004
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SAIF1 0x00000002
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SAIF0 0x00000001
-
-#define HW_CLKCTRL_RESET (0x000001e0)
-
-#define BM_CLKCTRL_RESET_WDOG_POR_DISABLE 0x00000020
-#define BM_CLKCTRL_RESET_EXTERNAL_RESET_ENABLE 0x00000010
-#define BM_CLKCTRL_RESET_THERMAL_RESET_ENABLE 0x00000008
-#define BM_CLKCTRL_RESET_THERMAL_RESET_DEFAULT 0x00000004
-#define BM_CLKCTRL_RESET_CHIP 0x00000002
-#define BM_CLKCTRL_RESET_DIG 0x00000001
-
-#define HW_CLKCTRL_STATUS (0x000001f0)
-
-#define BP_CLKCTRL_STATUS_CPU_LIMIT 30
-#define BM_CLKCTRL_STATUS_CPU_LIMIT 0xC0000000
-#define BF_CLKCTRL_STATUS_CPU_LIMIT(v) \
- (((v) << 30) & BM_CLKCTRL_STATUS_CPU_LIMIT)
-
-#define HW_CLKCTRL_VERSION (0x00000200)
-
-#define BP_CLKCTRL_VERSION_MAJOR 24
-#define BM_CLKCTRL_VERSION_MAJOR 0xFF000000
-#define BF_CLKCTRL_VERSION_MAJOR(v) \
- (((v) << 24) & BM_CLKCTRL_VERSION_MAJOR)
-#define BP_CLKCTRL_VERSION_MINOR 16
-#define BM_CLKCTRL_VERSION_MINOR 0x00FF0000
-#define BF_CLKCTRL_VERSION_MINOR(v) \
- (((v) << 16) & BM_CLKCTRL_VERSION_MINOR)
-#define BP_CLKCTRL_VERSION_STEP 0
-#define BM_CLKCTRL_VERSION_STEP 0x0000FFFF
-#define BF_CLKCTRL_VERSION_STEP(v) \
- (((v) << 0) & BM_CLKCTRL_VERSION_STEP)
-
-#endif /* __REGS_CLKCTRL_MX28_H__ */
diff --git a/arch/arm/mach-mxs/system.c b/arch/arm/mach-mxs/system.c
index 80ac1fca8a00..30042e23bfa7 100644
--- a/arch/arm/mach-mxs/system.c
+++ b/arch/arm/mach-mxs/system.c
@@ -37,8 +37,6 @@
#define MXS_MODULE_CLKGATE (1 << 30)
#define MXS_MODULE_SFTRST (1 << 31)
-#define CLKCTRL_TIMEOUT 10 /* 10 ms */
-
static void __iomem *mxs_clkctrl_reset_addr;
/*
@@ -139,17 +137,3 @@ error:
return -ETIMEDOUT;
}
EXPORT_SYMBOL(mxs_reset_block);
-
-int mxs_clkctrl_timeout(unsigned int reg_offset, unsigned int mask)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(CLKCTRL_TIMEOUT);
- while (readl_relaxed(MXS_IO_ADDRESS(MXS_CLKCTRL_BASE_ADDR)
- + reg_offset) & mask) {
- if (time_after(jiffies, timeout)) {
- pr_err("Timeout at CLKCTRL + 0x%x\n", reg_offset);
- return -ETIMEDOUT;
- }
- }
-
- return 0;
-}
diff --git a/arch/arm/mach-mxs/timer.c b/arch/arm/mach-mxs/timer.c
index 564a63279f18..02d36de9c4e8 100644
--- a/arch/arm/mach-mxs/timer.c
+++ b/arch/arm/mach-mxs/timer.c
@@ -20,6 +20,7 @@
* MA 02110-1301, USA.
*/
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/clockchips.h>
@@ -243,8 +244,16 @@ static int __init mxs_clocksource_init(struct clk *timer_clk)
return 0;
}
-void __init mxs_timer_init(struct clk *timer_clk, int irq)
+void __init mxs_timer_init(int irq)
{
+ struct clk *timer_clk;
+
+ timer_clk = clk_get_sys("timrot", NULL);
+ if (IS_ERR(timer_clk)) {
+ pr_err("%s: failed to get clk\n", __func__);
+ return;
+ }
+
clk_prepare_enable(timer_clk);
/*
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index c1b681ef4cba..f2f8a5847018 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -595,7 +595,12 @@ gpio_free:
gpio_free(AMS_DELTA_GPIO_PIN_MODEM_IRQ);
return err;
}
-late_initcall(late_init);
+
+static void __init ams_delta_init_late(void)
+{
+ omap1_init_late();
+ late_init();
+}
static void __init ams_delta_map_io(void)
{
@@ -611,6 +616,7 @@ MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = ams_delta_init,
+ .init_late = ams_delta_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c
index 4a4afb371022..c7364fdbda05 100644
--- a/arch/arm/mach-omap1/board-fsample.c
+++ b/arch/arm/mach-omap1/board-fsample.c
@@ -369,6 +369,7 @@ MACHINE_START(OMAP_FSAMPLE, "OMAP730 F-Sample")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = omap_fsample_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-generic.c b/arch/arm/mach-omap1/board-generic.c
index 9a5fe581bc1c..e75e2d55a2d7 100644
--- a/arch/arm/mach-omap1/board-generic.c
+++ b/arch/arm/mach-omap1/board-generic.c
@@ -88,6 +88,7 @@ MACHINE_START(OMAP_GENERIC, "Generic OMAP1510/1610/1710")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = omap_generic_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index 057ec13f0649..7e503686f7af 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -431,6 +431,7 @@ MACHINE_START(OMAP_H2, "TI-H2")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = h2_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index f6ddf8759657..9fb03f189d93 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -425,6 +425,7 @@ MACHINE_START(OMAP_H3, "TI OMAP1710 H3 board")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = h3_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index 60c06ee23855..118a9d4a4c54 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -605,6 +605,7 @@ MACHINE_START(HERALD, "HTC Herald")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = htcherald_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index 67d7fd57a692..7970223a559d 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -457,6 +457,7 @@ MACHINE_START(OMAP_INNOVATOR, "TI-Innovator")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = innovator_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index d21dcc2fbc5a..7212ae97f44a 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -255,6 +255,7 @@ MACHINE_START(NOKIA770, "Nokia 770")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = omap_nokia770_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index a5f85dda3f69..da8d872d3d1c 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -574,6 +574,7 @@ MACHINE_START(OMAP_OSK, "TI-OSK")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = osk_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
index a60e6c22f816..949b62a73693 100644
--- a/arch/arm/mach-omap1/board-palmte.c
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -267,6 +267,7 @@ MACHINE_START(OMAP_PALMTE, "OMAP310 based Palm Tungsten E")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = omap_palmte_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmtt.c b/arch/arm/mach-omap1/board-palmtt.c
index 8d854878547b..7f1e1cf2bf46 100644
--- a/arch/arm/mach-omap1/board-palmtt.c
+++ b/arch/arm/mach-omap1/board-palmtt.c
@@ -313,6 +313,7 @@ MACHINE_START(OMAP_PALMTT, "OMAP1510 based Palm Tungsten|T")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = omap_palmtt_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c
index 61ed4f0247ce..3c71c6bace2c 100644
--- a/arch/arm/mach-omap1/board-palmz71.c
+++ b/arch/arm/mach-omap1/board-palmz71.c
@@ -330,6 +330,7 @@ MACHINE_START(OMAP_PALMZ71, "OMAP310 based Palm Zire71")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = omap_palmz71_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index a2c88890e767..f2cb24387c22 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -331,6 +331,7 @@ MACHINE_START(OMAP_PERSEUS2, "OMAP730 Perseus2")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = omap_perseus2_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-sx1.c b/arch/arm/mach-omap1/board-sx1.c
index f34cb74a9f41..3b7b82b13684 100644
--- a/arch/arm/mach-omap1/board-sx1.c
+++ b/arch/arm/mach-omap1/board-sx1.c
@@ -407,6 +407,7 @@ MACHINE_START(SX1, "OMAP310 based Siemens SX1")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = omap_sx1_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index 37232d04233f..afd67f0ec495 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -294,6 +294,7 @@ MACHINE_START(VOICEBLUE, "VoiceBlue OMAP5910")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = voiceblue_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = voiceblue_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h
index bb7779b57795..c2552b24f9f2 100644
--- a/arch/arm/mach-omap1/common.h
+++ b/arch/arm/mach-omap1/common.h
@@ -53,8 +53,18 @@ static inline void omap16xx_map_io(void)
}
#endif
+#ifdef CONFIG_OMAP_SERIAL_WAKE
+int omap_serial_wakeup_init(void);
+#else
+static inline int omap_serial_wakeup_init(void)
+{
+ return 0;
+}
+#endif
+
void omap1_init_early(void);
void omap1_init_irq(void);
+void omap1_init_late(void);
void omap1_restart(char, const char *);
extern void __init omap_check_revision(void);
@@ -63,7 +73,14 @@ extern void omap1_nand_cmd_ctl(struct mtd_info *mtd, int cmd,
unsigned int ctrl);
extern struct sys_timer omap1_timer;
-extern bool omap_32k_timer_init(void);
+#ifdef CONFIG_OMAP_32K_TIMER
+extern int omap_32k_timer_init(void);
+#else
+static inline int __init omap_32k_timer_init(void)
+{
+ return -ENODEV;
+}
+#endif
extern u32 omap_irq_flags;
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index dcd8ddbec2bb..fa1fa4deb6aa 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -22,6 +22,7 @@
#include <plat/tc.h>
#include <plat/board.h>
#include <plat/mux.h>
+#include <plat/dma.h>
#include <plat/mmc.h>
#include <plat/omap7xx.h>
@@ -31,6 +32,22 @@
#include "common.h"
#include "clock.h"
+#if defined(CONFIG_SND_SOC) || defined(CONFIG_SND_SOC_MODULE)
+
+static struct platform_device omap_pcm = {
+ .name = "omap-pcm-audio",
+ .id = -1,
+};
+
+static void omap_init_audio(void)
+{
+ platform_device_register(&omap_pcm);
+}
+
+#else
+static inline void omap_init_audio(void) {}
+#endif
+
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
@@ -128,6 +145,56 @@ static inline void omap1_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
}
}
+#define OMAP_MMC_NR_RES 4
+
+/*
+ * Register MMC devices.
+ */
+static int __init omap_mmc_add(const char *name, int id, unsigned long base,
+ unsigned long size, unsigned int irq,
+ unsigned rx_req, unsigned tx_req,
+ struct omap_mmc_platform_data *data)
+{
+ struct platform_device *pdev;
+ struct resource res[OMAP_MMC_NR_RES];
+ int ret;
+
+ pdev = platform_device_alloc(name, id);
+ if (!pdev)
+ return -ENOMEM;
+
+ memset(res, 0, OMAP_MMC_NR_RES * sizeof(struct resource));
+ res[0].start = base;
+ res[0].end = base + size - 1;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = res[1].end = irq;
+ res[1].flags = IORESOURCE_IRQ;
+ res[2].start = rx_req;
+ res[2].name = "rx";
+ res[2].flags = IORESOURCE_DMA;
+ res[3].start = tx_req;
+ res[3].name = "tx";
+ res[3].flags = IORESOURCE_DMA;
+
+ ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
+ if (ret == 0)
+ ret = platform_device_add_data(pdev, data, sizeof(*data));
+ if (ret)
+ goto fail;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto fail;
+
+ /* return device handle to board setup code */
+ data->dev = &pdev->dev;
+ return 0;
+
+fail:
+ platform_device_put(pdev);
+ return ret;
+}
+
void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
int nr_controllers)
{
@@ -135,6 +202,7 @@ void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
for (i = 0; i < nr_controllers; i++) {
unsigned long base, size;
+ unsigned rx_req, tx_req;
unsigned int irq = 0;
if (!mmc_data[i])
@@ -146,19 +214,24 @@ void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
case 0:
base = OMAP1_MMC1_BASE;
irq = INT_MMC;
+ rx_req = OMAP_DMA_MMC_RX;
+ tx_req = OMAP_DMA_MMC_TX;
break;
case 1:
if (!cpu_is_omap16xx())
return;
base = OMAP1_MMC2_BASE;
irq = INT_1610_MMC2;
+ rx_req = OMAP_DMA_MMC2_RX;
+ tx_req = OMAP_DMA_MMC2_TX;
break;
default:
continue;
}
size = OMAP1_MMC_SIZE;
- omap_mmc_add("mmci-omap", i, base, size, irq, mmc_data[i]);
+ omap_mmc_add("mmci-omap", i, base, size, irq,
+ rx_req, tx_req, mmc_data[i]);
};
}
@@ -242,23 +315,48 @@ void __init omap1_camera_init(void *info)
static inline void omap_init_sti(void) {}
-#if defined(CONFIG_SND_SOC) || defined(CONFIG_SND_SOC_MODULE)
+/* Numbering for the SPI-capable controllers when used for SPI:
+ * spi = 1
+ * uwire = 2
+ * mmc1..2 = 3..4
+ * mcbsp1..3 = 5..7
+ */
-static struct platform_device omap_pcm = {
- .name = "omap-pcm-audio",
- .id = -1,
+#if defined(CONFIG_SPI_OMAP_UWIRE) || defined(CONFIG_SPI_OMAP_UWIRE_MODULE)
+
+#define OMAP_UWIRE_BASE 0xfffb3000
+
+static struct resource uwire_resources[] = {
+ {
+ .start = OMAP_UWIRE_BASE,
+ .end = OMAP_UWIRE_BASE + 0x20,
+ .flags = IORESOURCE_MEM,
+ },
};
-static void omap_init_audio(void)
+static struct platform_device omap_uwire_device = {
+ .name = "omap_uwire",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(uwire_resources),
+ .resource = uwire_resources,
+};
+
+static void omap_init_uwire(void)
{
- platform_device_register(&omap_pcm);
-}
+ /* FIXME define and use a boot tag; not all boards will be hooking
+ * up devices to the microwire controller, and multi-board configs
+ * mean that CONFIG_SPI_OMAP_UWIRE may be configured anyway...
+ */
+ /* board-specific code must configure chipselects (only a few
+ * are normally used) and SCLK/SDI/SDO (each has two choices).
+ */
+ (void) platform_device_register(&omap_uwire_device);
+}
#else
-static inline void omap_init_audio(void) {}
+static inline void omap_init_uwire(void) {}
#endif
-/*-------------------------------------------------------------------------*/
/*
* This gets called after board-specific INIT_MACHINE, and initializes most
@@ -292,11 +390,12 @@ static int __init omap1_init_devices(void)
* in alphabetical order so they're easier to sort through.
*/
+ omap_init_audio();
omap_init_mbox();
omap_init_rtc();
omap_init_spi100k();
omap_init_sti();
- omap_init_audio();
+ omap_init_uwire();
return 0;
}
diff --git a/arch/arm/mach-omap1/gpio15xx.c b/arch/arm/mach-omap1/gpio15xx.c
index 634903ef8292..ebef15e5e7b7 100644
--- a/arch/arm/mach-omap1/gpio15xx.c
+++ b/arch/arm/mach-omap1/gpio15xx.c
@@ -46,7 +46,6 @@ static struct omap_gpio_reg_offs omap15xx_mpuio_regs = {
};
static struct __initdata omap_gpio_platform_data omap15xx_mpu_gpio_config = {
- .virtual_irq_start = IH_MPUIO_BASE,
.is_mpuio = true,
.bank_width = 16,
.bank_stride = 1,
@@ -89,7 +88,6 @@ static struct omap_gpio_reg_offs omap15xx_gpio_regs = {
};
static struct __initdata omap_gpio_platform_data omap15xx_gpio_config = {
- .virtual_irq_start = IH_GPIO_BASE,
.bank_width = 16,
.regs = &omap15xx_gpio_regs,
};
diff --git a/arch/arm/mach-omap1/gpio16xx.c b/arch/arm/mach-omap1/gpio16xx.c
index 1fb3b9ad496e..2a48cd2e1754 100644
--- a/arch/arm/mach-omap1/gpio16xx.c
+++ b/arch/arm/mach-omap1/gpio16xx.c
@@ -52,7 +52,6 @@ static struct omap_gpio_reg_offs omap16xx_mpuio_regs = {
};
static struct __initdata omap_gpio_platform_data omap16xx_mpu_gpio_config = {
- .virtual_irq_start = IH_MPUIO_BASE,
.is_mpuio = true,
.bank_width = 16,
.bank_stride = 1,
@@ -99,7 +98,6 @@ static struct omap_gpio_reg_offs omap16xx_gpio_regs = {
};
static struct __initdata omap_gpio_platform_data omap16xx_gpio1_config = {
- .virtual_irq_start = IH_GPIO_BASE,
.bank_width = 16,
.regs = &omap16xx_gpio_regs,
};
@@ -128,7 +126,6 @@ static struct __initdata resource omap16xx_gpio2_resources[] = {
};
static struct __initdata omap_gpio_platform_data omap16xx_gpio2_config = {
- .virtual_irq_start = IH_GPIO_BASE + 16,
.bank_width = 16,
.regs = &omap16xx_gpio_regs,
};
@@ -157,7 +154,6 @@ static struct __initdata resource omap16xx_gpio3_resources[] = {
};
static struct __initdata omap_gpio_platform_data omap16xx_gpio3_config = {
- .virtual_irq_start = IH_GPIO_BASE + 32,
.bank_width = 16,
.regs = &omap16xx_gpio_regs,
};
@@ -186,7 +182,6 @@ static struct __initdata resource omap16xx_gpio4_resources[] = {
};
static struct __initdata omap_gpio_platform_data omap16xx_gpio4_config = {
- .virtual_irq_start = IH_GPIO_BASE + 48,
.bank_width = 16,
.regs = &omap16xx_gpio_regs,
};
diff --git a/arch/arm/mach-omap1/gpio7xx.c b/arch/arm/mach-omap1/gpio7xx.c
index 4771d6b68b96..acf12b73eace 100644
--- a/arch/arm/mach-omap1/gpio7xx.c
+++ b/arch/arm/mach-omap1/gpio7xx.c
@@ -51,7 +51,6 @@ static struct omap_gpio_reg_offs omap7xx_mpuio_regs = {
};
static struct __initdata omap_gpio_platform_data omap7xx_mpu_gpio_config = {
- .virtual_irq_start = IH_MPUIO_BASE,
.is_mpuio = true,
.bank_width = 16,
.bank_stride = 2,
@@ -93,7 +92,6 @@ static struct omap_gpio_reg_offs omap7xx_gpio_regs = {
};
static struct __initdata omap_gpio_platform_data omap7xx_gpio1_config = {
- .virtual_irq_start = IH_GPIO_BASE,
.bank_width = 32,
.regs = &omap7xx_gpio_regs,
};
@@ -122,7 +120,6 @@ static struct __initdata resource omap7xx_gpio2_resources[] = {
};
static struct __initdata omap_gpio_platform_data omap7xx_gpio2_config = {
- .virtual_irq_start = IH_GPIO_BASE + 32,
.bank_width = 32,
.regs = &omap7xx_gpio_regs,
};
@@ -151,7 +148,6 @@ static struct __initdata resource omap7xx_gpio3_resources[] = {
};
static struct __initdata omap_gpio_platform_data omap7xx_gpio3_config = {
- .virtual_irq_start = IH_GPIO_BASE + 64,
.bank_width = 32,
.regs = &omap7xx_gpio_regs,
};
@@ -180,7 +176,6 @@ static struct __initdata resource omap7xx_gpio4_resources[] = {
};
static struct __initdata omap_gpio_platform_data omap7xx_gpio4_config = {
- .virtual_irq_start = IH_GPIO_BASE + 96,
.bank_width = 32,
.regs = &omap7xx_gpio_regs,
};
@@ -209,7 +204,6 @@ static struct __initdata resource omap7xx_gpio5_resources[] = {
};
static struct __initdata omap_gpio_platform_data omap7xx_gpio5_config = {
- .virtual_irq_start = IH_GPIO_BASE + 128,
.bank_width = 32,
.regs = &omap7xx_gpio_regs,
};
@@ -238,7 +232,6 @@ static struct __initdata resource omap7xx_gpio6_resources[] = {
};
static struct __initdata omap_gpio_platform_data omap7xx_gpio6_config = {
- .virtual_irq_start = IH_GPIO_BASE + 160,
.bank_width = 32,
.regs = &omap7xx_gpio_regs,
};
diff --git a/arch/arm/mach-omap1/io.c b/arch/arm/mach-omap1/io.c
index 71ce017bf5d8..6c95a59f0f16 100644
--- a/arch/arm/mach-omap1/io.c
+++ b/arch/arm/mach-omap1/io.c
@@ -137,6 +137,11 @@ void __init omap1_init_early(void)
omap_init_consistent_dma_size();
}
+void __init omap1_init_late(void)
+{
+ omap_serial_wakeup_init();
+}
+
/*
* NOTE: Please use ioremap + __raw_read/write where possible instead of these
*/
diff --git a/arch/arm/mach-omap1/serial.c b/arch/arm/mach-omap1/serial.c
index 93ae8f29727e..6809c9e56c93 100644
--- a/arch/arm/mach-omap1/serial.c
+++ b/arch/arm/mach-omap1/serial.c
@@ -237,7 +237,7 @@ static void __init omap_serial_set_port_wakeup(int gpio_nr)
enable_irq_wake(gpio_to_irq(gpio_nr));
}
-static int __init omap_serial_wakeup_init(void)
+int __init omap_serial_wakeup_init(void)
{
if (!cpu_is_omap16xx())
return 0;
@@ -251,7 +251,6 @@ static int __init omap_serial_wakeup_init(void)
return 0;
}
-late_initcall(omap_serial_wakeup_init);
#endif /* CONFIG_OMAP_SERIAL_WAKE */
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index 4d8dd9a1b04c..4062480bfec7 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -232,20 +232,6 @@ static inline void omap_mpu_timer_init(void)
}
#endif /* CONFIG_OMAP_MPU_TIMER */
-static inline int omap_32k_timer_usable(void)
-{
- int res = false;
-
- if (cpu_is_omap730() || cpu_is_omap15xx())
- return res;
-
-#ifdef CONFIG_OMAP_32K_TIMER
- res = omap_32k_timer_init();
-#endif
-
- return res;
-}
-
/*
* ---------------------------------------------------------------------------
* Timer initialization
@@ -253,7 +239,7 @@ static inline int omap_32k_timer_usable(void)
*/
static void __init omap1_timer_init(void)
{
- if (!omap_32k_timer_usable())
+ if (omap_32k_timer_init() != 0)
omap_mpu_timer_init();
}
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 325b9a0aa4a0..eae49c3980c9 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -71,6 +71,7 @@
/* 16xx specific defines */
#define OMAP1_32K_TIMER_BASE 0xfffb9000
+#define OMAP1_32KSYNC_TIMER_BASE 0xfffbc400
#define OMAP1_32K_TIMER_CR 0x08
#define OMAP1_32K_TIMER_TVR 0x00
#define OMAP1_32K_TIMER_TCR 0x04
@@ -182,10 +183,29 @@ static __init void omap_init_32k_timer(void)
* Timer initialization
* ---------------------------------------------------------------------------
*/
-bool __init omap_32k_timer_init(void)
+int __init omap_32k_timer_init(void)
{
- omap_init_clocksource_32k();
- omap_init_32k_timer();
+ int ret = -ENODEV;
- return true;
+ if (cpu_is_omap16xx()) {
+ void __iomem *base;
+ struct clk *sync32k_ick;
+
+ base = ioremap(OMAP1_32KSYNC_TIMER_BASE, SZ_1K);
+ if (!base) {
+ pr_err("32k_counter: failed to map base addr\n");
+ return -ENODEV;
+ }
+
+ sync32k_ick = clk_get(NULL, "omap_32ksync_ick");
+ if (!IS_ERR(sync32k_ick))
+ clk_enable(sync32k_ick);
+
+ ret = omap_init_clocksource_32k(base);
+ }
+
+ if (!ret)
+ omap_init_32k_timer();
+
+ return ret;
}
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 964ee67a3b77..4cf5142f22cc 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -78,12 +78,12 @@ config SOC_OMAP3430
default y
select ARCH_OMAP_OTG
-config SOC_OMAPTI81XX
+config SOC_TI81XX
bool "TI81XX support"
depends on ARCH_OMAP3
default y
-config SOC_OMAPAM33XX
+config SOC_AM33XX
bool "AM33XX support"
depends on ARCH_OMAP3
default y
@@ -320,12 +320,12 @@ config MACH_OMAP_3630SDP
config MACH_TI8168EVM
bool "TI8168 Evaluation Module"
- depends on SOC_OMAPTI81XX
+ depends on SOC_TI81XX
default y
config MACH_TI8148EVM
bool "TI8148 Evaluation Module"
- depends on SOC_OMAPTI81XX
+ depends on SOC_TI81XX
default y
config MACH_OMAP_4430SDP
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 385c083d24b2..fa742f3c2629 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -24,10 +24,11 @@ endif
obj-$(CONFIG_TWL4030_CORE) += omap_twl.o
# SMP support ONLY available for OMAP4
+
obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o
-obj-$(CONFIG_ARCH_OMAP4) += omap4-common.o omap-wakeupgen.o \
- sleep44xx.o
+obj-$(CONFIG_ARCH_OMAP4) += omap4-common.o omap-wakeupgen.o
+obj-$(CONFIG_ARCH_OMAP4) += sleep44xx.o
plus_sec := $(call as-instr,.arch_extension sec,+sec)
AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec)
@@ -64,10 +65,10 @@ endif
ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o
obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o
-obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \
- cpuidle34xx.o
-obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o omap-mpuss-lowpower.o \
- cpuidle44xx.o
+obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o
+obj-$(CONFIG_ARCH_OMAP3) += cpuidle34xx.o
+obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o omap-mpuss-lowpower.o
+obj-$(CONFIG_ARCH_OMAP4) += cpuidle44xx.o
obj-$(CONFIG_PM_DEBUG) += pm-debug.o
obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o
obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o
@@ -84,90 +85,86 @@ endif
# PRCM
obj-y += prm_common.o
obj-$(CONFIG_ARCH_OMAP2) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
-obj-$(CONFIG_ARCH_OMAP3) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o \
- vc3xxx_data.o vp3xxx_data.o
-# XXX The presence of cm2xxx_3xxx.o on the line below is temporary and
-# will be removed once the OMAP4 part of the codebase is converted to
-# use OMAP4-specific PRCM functions.
-obj-$(CONFIG_ARCH_OMAP4) += prcm.o cm2xxx_3xxx.o cminst44xx.o \
- cm44xx.o prcm_mpu44xx.o \
- prminst44xx.o vc44xx_data.o \
- vp44xx_data.o prm44xx.o
+obj-$(CONFIG_ARCH_OMAP3) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP3) += vc3xxx_data.o vp3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4) += prcm.o cminst44xx.o cm44xx.o
+obj-$(CONFIG_ARCH_OMAP4) += prcm_mpu44xx.o prminst44xx.o
+obj-$(CONFIG_ARCH_OMAP4) += vc44xx_data.o vp44xx_data.o prm44xx.o
# OMAP voltage domains
voltagedomain-common := voltage.o vc.o vp.o
-obj-$(CONFIG_ARCH_OMAP2) += $(voltagedomain-common) \
- voltagedomains2xxx_data.o
-obj-$(CONFIG_ARCH_OMAP3) += $(voltagedomain-common) \
- voltagedomains3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP4) += $(voltagedomain-common) \
- voltagedomains44xx_data.o
+obj-$(CONFIG_ARCH_OMAP2) += $(voltagedomain-common)
+obj-$(CONFIG_ARCH_OMAP2) += voltagedomains2xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3) += $(voltagedomain-common)
+obj-$(CONFIG_ARCH_OMAP3) += voltagedomains3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4) += $(voltagedomain-common)
+obj-$(CONFIG_ARCH_OMAP4) += voltagedomains44xx_data.o
# OMAP powerdomain framework
powerdomain-common += powerdomain.o powerdomain-common.o
-obj-$(CONFIG_ARCH_OMAP2) += $(powerdomain-common) \
- powerdomain2xxx_3xxx.o \
- powerdomains2xxx_data.o \
- powerdomains2xxx_3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP3) += $(powerdomain-common) \
- powerdomain2xxx_3xxx.o \
- powerdomains3xxx_data.o \
- powerdomains2xxx_3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP4) += $(powerdomain-common) \
- powerdomain44xx.o \
- powerdomains44xx_data.o
+obj-$(CONFIG_ARCH_OMAP2) += $(powerdomain-common)
+obj-$(CONFIG_ARCH_OMAP2) += powerdomains2xxx_data.o
+obj-$(CONFIG_ARCH_OMAP2) += powerdomain2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP2) += powerdomains2xxx_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3) += $(powerdomain-common)
+obj-$(CONFIG_ARCH_OMAP3) += powerdomain2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP3) += powerdomains3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3) += powerdomains2xxx_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4) += $(powerdomain-common)
+obj-$(CONFIG_ARCH_OMAP4) += powerdomain44xx.o
+obj-$(CONFIG_ARCH_OMAP4) += powerdomains44xx_data.o
# PRCM clockdomain control
-clockdomain-common += clockdomain.o \
- clockdomains_common_data.o
-obj-$(CONFIG_ARCH_OMAP2) += $(clockdomain-common) \
- clockdomain2xxx_3xxx.o \
- clockdomains2xxx_3xxx_data.o
+clockdomain-common += clockdomain.o
+clockdomain-common += clockdomains_common_data.o
+obj-$(CONFIG_ARCH_OMAP2) += $(clockdomain-common)
+obj-$(CONFIG_ARCH_OMAP2) += clockdomain2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP2) += clockdomains2xxx_3xxx_data.o
obj-$(CONFIG_SOC_OMAP2420) += clockdomains2420_data.o
obj-$(CONFIG_SOC_OMAP2430) += clockdomains2430_data.o
-obj-$(CONFIG_ARCH_OMAP3) += $(clockdomain-common) \
- clockdomain2xxx_3xxx.o \
- clockdomains2xxx_3xxx_data.o \
- clockdomains3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP4) += $(clockdomain-common) \
- clockdomain44xx.o \
- clockdomains44xx_data.o
+obj-$(CONFIG_ARCH_OMAP3) += $(clockdomain-common)
+obj-$(CONFIG_ARCH_OMAP3) += clockdomain2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP3) += clockdomains2xxx_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3) += clockdomains3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4) += $(clockdomain-common)
+obj-$(CONFIG_ARCH_OMAP4) += clockdomain44xx.o
+obj-$(CONFIG_ARCH_OMAP4) += clockdomains44xx_data.o
# Clock framework
-obj-$(CONFIG_ARCH_OMAP2) += $(clock-common) clock2xxx.o \
- clkt2xxx_sys.o \
- clkt2xxx_dpllcore.o \
- clkt2xxx_virt_prcm_set.o \
- clkt2xxx_apll.o clkt2xxx_osc.o \
- clkt2xxx_dpll.o clkt_iclk.o
+obj-$(CONFIG_ARCH_OMAP2) += $(clock-common) clock2xxx.o
+obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_sys.o
+obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_dpllcore.o
+obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_virt_prcm_set.o
+obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_apll.o clkt2xxx_osc.o
+obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_dpll.o clkt_iclk.o
obj-$(CONFIG_SOC_OMAP2420) += clock2420_data.o
obj-$(CONFIG_SOC_OMAP2430) += clock2430.o clock2430_data.o
-obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) clock3xxx.o \
- clock34xx.o clkt34xx_dpll3m2.o \
- clock3517.o clock36xx.o \
- dpll3xxx.o clock3xxx_data.o \
- clkt_iclk.o
-obj-$(CONFIG_ARCH_OMAP4) += $(clock-common) clock44xx_data.o \
- dpll3xxx.o dpll44xx.o
+obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) clock3xxx.o
+obj-$(CONFIG_ARCH_OMAP3) += clock34xx.o clkt34xx_dpll3m2.o
+obj-$(CONFIG_ARCH_OMAP3) += clock3517.o clock36xx.o
+obj-$(CONFIG_ARCH_OMAP3) += dpll3xxx.o clock3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3) += clkt_iclk.o
+obj-$(CONFIG_ARCH_OMAP4) += $(clock-common) clock44xx_data.o
+obj-$(CONFIG_ARCH_OMAP4) += dpll3xxx.o dpll44xx.o
# OMAP2 clock rate set data (old "OPP" data)
obj-$(CONFIG_SOC_OMAP2420) += opp2420_data.o
obj-$(CONFIG_SOC_OMAP2430) += opp2430_data.o
# hwmod data
-obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2xxx_ipblock_data.o \
- omap_hwmod_2xxx_3xxx_ipblock_data.o \
- omap_hwmod_2xxx_interconnect_data.o \
- omap_hwmod_2xxx_3xxx_interconnect_data.o \
- omap_hwmod_2420_data.o
-obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_ipblock_data.o \
- omap_hwmod_2xxx_3xxx_ipblock_data.o \
- omap_hwmod_2xxx_interconnect_data.o \
- omap_hwmod_2xxx_3xxx_interconnect_data.o \
- omap_hwmod_2430_data.o
-obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_2xxx_3xxx_ipblock_data.o \
- omap_hwmod_2xxx_3xxx_interconnect_data.o \
- omap_hwmod_3xxx_data.o
+obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2xxx_ipblock_data.o
+obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2xxx_3xxx_ipblock_data.o
+obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2xxx_interconnect_data.o
+obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2xxx_3xxx_interconnect_data.o
+obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2420_data.o
+obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_ipblock_data.o
+obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_3xxx_ipblock_data.o
+obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_interconnect_data.o
+obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_3xxx_interconnect_data.o
+obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2430_data.o
+obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_2xxx_3xxx_ipblock_data.o
+obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_2xxx_3xxx_interconnect_data.o
+obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_3xxx_data.o
obj-$(CONFIG_ARCH_OMAP4) += omap_hwmod_44xx_data.o
# EMU peripherals
@@ -208,23 +205,19 @@ obj-$(CONFIG_MACH_OMAP3EVM) += board-omap3evm.o
obj-$(CONFIG_MACH_OMAP3_PANDORA) += board-omap3pandora.o
obj-$(CONFIG_MACH_OMAP_3430SDP) += board-3430sdp.o
obj-$(CONFIG_MACH_NOKIA_N8X0) += board-n8x0.o
-obj-$(CONFIG_MACH_NOKIA_RM680) += board-rm680.o \
- sdram-nokia.o
-obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o \
- sdram-nokia.o \
- board-rx51-peripherals.o \
- board-rx51-video.o
-obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom.o \
- board-zoom-peripherals.o \
- board-zoom-display.o \
- board-zoom-debugboard.o
-obj-$(CONFIG_MACH_OMAP_ZOOM3) += board-zoom.o \
- board-zoom-peripherals.o \
- board-zoom-display.o \
- board-zoom-debugboard.o
-obj-$(CONFIG_MACH_OMAP_3630SDP) += board-3630sdp.o \
- board-zoom-peripherals.o \
- board-zoom-display.o
+obj-$(CONFIG_MACH_NOKIA_RM680) += board-rm680.o sdram-nokia.o
+obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o sdram-nokia.o
+obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51-peripherals.o
+obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51-video.o
+obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom.o board-zoom-peripherals.o
+obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom-display.o
+obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom-debugboard.o
+obj-$(CONFIG_MACH_OMAP_ZOOM3) += board-zoom.o board-zoom-peripherals.o
+obj-$(CONFIG_MACH_OMAP_ZOOM3) += board-zoom-display.o
+obj-$(CONFIG_MACH_OMAP_ZOOM3) += board-zoom-debugboard.o
+obj-$(CONFIG_MACH_OMAP_3630SDP) += board-3630sdp.o
+obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-peripherals.o
+obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-display.o
obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o
obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o
obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index e658f835d0de..99ca6bad5c30 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -303,6 +303,7 @@ MACHINE_START(OMAP_2430SDP, "OMAP2430 sdp2430 board")
.init_irq = omap2_init_irq,
.handle_irq = omap2_intc_handle_irq,
.init_machine = omap_2430sdp_init,
+ .init_late = omap2430_init_late,
.timer = &omap2_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 37abb0d49b51..a98c688058a9 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -605,6 +605,7 @@ MACHINE_START(OMAP_3430SDP, "OMAP3430 3430SDP board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap_3430sdp_init,
+ .init_late = omap3430_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
index 6ef350d1ae4f..2dc9ba523c7a 100644
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ b/arch/arm/mach-omap2/board-3630sdp.c
@@ -217,6 +217,7 @@ MACHINE_START(OMAP_3630SDP, "OMAP 3630SDP board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap_sdp_init,
+ .init_late = omap3630_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 94af6cde2e36..8e17284a803f 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -912,6 +912,7 @@ MACHINE_START(OMAP_4430SDP, "OMAP4430 4430SDP board")
.init_irq = gic_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = omap_4430sdp_init,
+ .init_late = omap4430_init_late,
.timer = &omap4_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c
index 3b8a53c1f2a8..92432c28673d 100644
--- a/arch/arm/mach-omap2/board-am3517crane.c
+++ b/arch/arm/mach-omap2/board-am3517crane.c
@@ -102,6 +102,7 @@ MACHINE_START(CRANEBOARD, "AM3517/05 CRANEBOARD")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = am3517_crane_init,
+ .init_late = am35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index 99790eb646e8..18f601096ce1 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -385,6 +385,7 @@ MACHINE_START(OMAP3517EVM, "OMAP3517/AM3517 EVM")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = am3517_evm_init,
+ .init_late = am35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index 768ece2e9c3b..502c31e123be 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -356,6 +356,7 @@ MACHINE_START(OMAP_APOLLON, "OMAP24xx Apollon")
.init_irq = omap2_init_irq,
.handle_irq = omap2_intc_handle_irq,
.init_machine = omap_apollon_init,
+ .init_late = omap2420_init_late,
.timer = &omap2_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index c03df142ea67..ded100c80a91 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -669,6 +669,7 @@ MACHINE_START(CM_T35, "Compulab CM-T35")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = cm_t35_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
@@ -681,6 +682,7 @@ MACHINE_START(CM_T3730, "Compulab CM-T3730")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = cm_t3730_init,
+ .init_late = omap3630_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
index 9e66e167e4f3..a33ad4641d9a 100644
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -303,6 +303,7 @@ MACHINE_START(CM_T3517, "Compulab CM-T3517")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = cm_t3517_init,
+ .init_late = am35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index b063f0d2faa6..6567c1cd5572 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -644,6 +644,7 @@ MACHINE_START(DEVKIT8000, "OMAP3 Devkit8000")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = devkit8000_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_secure_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 7302ba7ff1b9..202934657867 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -125,6 +125,7 @@ DT_MACHINE_START(OMAP4_DT, "Generic OMAP4 (Flattened Device Tree)")
.init_irq = omap_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = omap_generic_init,
+ .init_late = omap4430_init_late,
.timer = &omap4_timer,
.dt_compat = omap4_boards_compat,
.restart = omap_prcm_restart,
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 0bbbabe28fcc..876becf8205a 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -398,6 +398,7 @@ MACHINE_START(OMAP_H4, "OMAP2420 H4 board")
.init_irq = omap2_init_irq,
.handle_irq = omap2_intc_handle_irq,
.init_machine = omap_h4_init,
+ .init_late = omap2420_init_late,
.timer = &omap2_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 7a274098f67b..74915295482e 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -650,6 +650,7 @@ MACHINE_START(IGEP0020, "IGEP v2 board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = igep_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
@@ -662,6 +663,7 @@ MACHINE_START(IGEP0030, "IGEP OMAP3 module")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = igep_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 1b6049567ab4..ef9e82977499 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -442,6 +442,7 @@ MACHINE_START(OMAP_LDP, "OMAP LDP board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap_ldp_init,
+ .init_late = omap3430_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 518091c5f77c..8ca14e88a31a 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -694,6 +694,7 @@ MACHINE_START(NOKIA_N800, "Nokia N800")
.init_irq = omap2_init_irq,
.handle_irq = omap2_intc_handle_irq,
.init_machine = n8x0_init_machine,
+ .init_late = omap2420_init_late,
.timer = &omap2_timer,
.restart = omap_prcm_restart,
MACHINE_END
@@ -706,6 +707,7 @@ MACHINE_START(NOKIA_N810, "Nokia N810")
.init_irq = omap2_init_irq,
.handle_irq = omap2_intc_handle_irq,
.init_machine = n8x0_init_machine,
+ .init_late = omap2420_init_late,
.timer = &omap2_timer,
.restart = omap_prcm_restart,
MACHINE_END
@@ -718,6 +720,7 @@ MACHINE_START(NOKIA_N810_WIMAX, "Nokia N810 WiMAX")
.init_irq = omap2_init_irq,
.handle_irq = omap2_intc_handle_irq,
.init_machine = n8x0_init_machine,
+ .init_late = omap2420_init_late,
.timer = &omap2_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 2a7b9a9da1db..79c6909eeb78 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -543,6 +543,7 @@ MACHINE_START(OMAP3_BEAGLE, "OMAP3 Beagle Board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3_beagle_init,
+ .init_late = omap3_init_late,
.timer = &omap3_secure_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index ace3c675e9c2..639bd07ea38a 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -671,6 +671,7 @@ MACHINE_START(OMAP3EVM, "OMAP3 EVM")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3_evm_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
index c008bf8e1c36..932e1778aff9 100644
--- a/arch/arm/mach-omap2/board-omap3logic.c
+++ b/arch/arm/mach-omap2/board-omap3logic.c
@@ -242,6 +242,7 @@ MACHINE_START(OMAP3_TORPEDO, "Logic OMAP3 Torpedo board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3logic_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
@@ -254,6 +255,7 @@ MACHINE_START(OMAP3530_LV_SOM, "OMAP Logic 3530 LV SOM board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3logic_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 33d995d0f075..57aebee44fd0 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -622,6 +622,7 @@ MACHINE_START(OMAP3_PANDORA, "Pandora Handheld Console")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3pandora_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index 4396bae91677..b318f5602e36 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -436,6 +436,7 @@ MACHINE_START(SBC3530, "OMAP3 STALKER")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3_stalker_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_secure_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index ae2251fa4a69..485d14d6a8cd 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -387,6 +387,7 @@ MACHINE_START(TOUCHBOOK, "OMAP3 touchbook Board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3_touchbook_init,
+ .init_late = omap3430_init_late,
.timer = &omap3_secure_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 68b8fc9ff010..982fb2622ab8 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -521,6 +521,7 @@ MACHINE_START(OMAP4_PANDA, "OMAP4 Panda board")
.init_irq = gic_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = omap4_panda_init,
+ .init_late = omap4430_init_late,
.timer = &omap4_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 5527c1979a16..8fa2fc3a4c3c 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -554,6 +554,7 @@ MACHINE_START(OVERO, "Gumstix Overo")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = overo_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
index ae53d71f0ce0..0ad1bb3bdb98 100644
--- a/arch/arm/mach-omap2/board-rm680.c
+++ b/arch/arm/mach-omap2/board-rm680.c
@@ -151,6 +151,7 @@ MACHINE_START(NOKIA_RM680, "Nokia RM-680 board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = rm680_init,
+ .init_late = omap3630_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
@@ -163,6 +164,7 @@ MACHINE_START(NOKIA_RM696, "Nokia RM-696 board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = rm680_init,
+ .init_late = omap3630_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index 2da92a6ba40a..345dd931f76f 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -127,6 +127,7 @@ MACHINE_START(NOKIA_RX51, "Nokia RX-51 board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = rx51_init,
+ .init_late = omap3430_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ti8168evm.c b/arch/arm/mach-omap2/board-ti8168evm.c
index ab9a7a9e9d64..d4c8392cadb6 100644
--- a/arch/arm/mach-omap2/board-ti8168evm.c
+++ b/arch/arm/mach-omap2/board-ti8168evm.c
@@ -52,6 +52,7 @@ MACHINE_START(TI8168EVM, "ti8168evm")
.init_irq = ti81xx_init_irq,
.timer = &omap3_timer,
.init_machine = ti81xx_evm_init,
+ .init_late = ti81xx_init_late,
.restart = omap_prcm_restart,
MACHINE_END
@@ -63,5 +64,6 @@ MACHINE_START(TI8148EVM, "ti8148evm")
.init_irq = ti81xx_init_irq,
.timer = &omap3_timer,
.init_machine = ti81xx_evm_init,
+ .init_late = ti81xx_init_late,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c
index 5c20bcc57f2b..4e7e56142e6f 100644
--- a/arch/arm/mach-omap2/board-zoom.c
+++ b/arch/arm/mach-omap2/board-zoom.c
@@ -137,6 +137,7 @@ MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap_zoom_init,
+ .init_late = omap3430_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
@@ -149,6 +150,7 @@ MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap_zoom_init,
+ .init_late = omap3630_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index d6c9e6180318..be9dfd1abe60 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -55,7 +55,7 @@ static inline void omap34xx_map_common_io(void)
}
#endif
-#ifdef CONFIG_SOC_OMAPTI81XX
+#ifdef CONFIG_SOC_TI81XX
extern void omapti81xx_map_common_io(void);
#else
static inline void omapti81xx_map_common_io(void)
@@ -63,7 +63,7 @@ static inline void omapti81xx_map_common_io(void)
}
#endif
-#ifdef CONFIG_SOC_OMAPAM33XX
+#ifdef CONFIG_SOC_AM33XX
extern void omapam33xx_map_common_io(void);
#else
static inline void omapam33xx_map_common_io(void)
@@ -79,6 +79,42 @@ static inline void omap44xx_map_common_io(void)
}
#endif
+#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP2)
+int omap2_pm_init(void);
+#else
+static inline int omap2_pm_init(void)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3)
+int omap3_pm_init(void);
+#else
+static inline int omap3_pm_init(void)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP4)
+int omap4_pm_init(void);
+#else
+static inline int omap4_pm_init(void)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_OMAP_MUX
+int omap_mux_late_init(void);
+#else
+static inline int omap_mux_late_init(void)
+{
+ return 0;
+}
+#endif
+
extern void omap2_init_common_infrastructure(void);
extern struct sys_timer omap2_timer;
@@ -95,6 +131,17 @@ void omap3_init_early(void); /* Do not use this one */
void am35xx_init_early(void);
void ti81xx_init_early(void);
void omap4430_init_early(void);
+void omap3_init_late(void); /* Do not use this one */
+void omap4430_init_late(void);
+void omap2420_init_late(void);
+void omap2430_init_late(void);
+void omap3430_init_late(void);
+void omap35xx_init_late(void);
+void omap3630_init_late(void);
+void am35xx_init_late(void);
+void ti81xx_init_late(void);
+void omap4430_init_late(void);
+int omap2_common_pm_late_init(void);
void omap_prcm_restart(char, const char *);
/*
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index ae62ece04ef9..7b4b9327e543 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -645,7 +645,11 @@ static inline void omap242x_mmc_mux(struct omap_mmc_platform_data
void __init omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
{
- char *name = "mmci-omap";
+ struct platform_device *pdev;
+ struct omap_hwmod *oh;
+ int id = 0;
+ char *oh_name = "msdi1";
+ char *dev_name = "mmci-omap";
if (!mmc_data[0]) {
pr_err("%s fails: Incomplete platform data\n", __func__);
@@ -653,8 +657,17 @@ void __init omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
}
omap242x_mmc_mux(mmc_data[0]);
- omap_mmc_add(name, 0, OMAP2_MMC1_BASE, OMAP2420_MMC_SIZE,
- INT_24XX_MMC_IRQ, mmc_data[0]);
+
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh) {
+ pr_err("Could not look up %s\n", oh_name);
+ return;
+ }
+ pdev = omap_device_build(dev_name, id, oh, mmc_data[0],
+ sizeof(struct omap_mmc_platform_data), NULL, 0, 0);
+ if (IS_ERR(pdev))
+ WARN(1, "Can'd build omap_device for %s:%s.\n",
+ dev_name, oh->name);
}
#endif
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c
index b19d8496c16e..ff75abe60af2 100644
--- a/arch/arm/mach-omap2/dma.c
+++ b/arch/arm/mach-omap2/dma.c
@@ -227,10 +227,6 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
dma_stride = OMAP2_DMA_STRIDE;
dma_common_ch_start = CSDP;
- if (cpu_is_omap3630() || cpu_is_omap44xx())
- dma_common_ch_end = CCDN;
- else
- dma_common_ch_end = CCFN;
p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
if (!p) {
@@ -277,6 +273,13 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
dev_err(&pdev->dev, "%s: kzalloc fail\n", __func__);
return -ENOMEM;
}
+
+ /* Check the capabilities register for descriptor loading feature */
+ if (dma_read(CAPS_0, 0) & DMA_HAS_DESCRIPTOR_CAPS)
+ dma_common_ch_end = CCDN;
+ else
+ dma_common_ch_end = CCFN;
+
return 0;
}
diff --git a/arch/arm/mach-omap2/dsp.c b/arch/arm/mach-omap2/dsp.c
index 3376388b317a..845309f146fe 100644
--- a/arch/arm/mach-omap2/dsp.c
+++ b/arch/arm/mach-omap2/dsp.c
@@ -28,8 +28,6 @@
#include <plat/dsp.h>
-extern phys_addr_t omap_dsp_get_mempool_base(void);
-
static struct platform_device *omap_dsp_pdev;
static struct omap_dsp_platform_data omap_dsp_pdata __initdata = {
@@ -47,6 +45,31 @@ static struct omap_dsp_platform_data omap_dsp_pdata __initdata = {
.dsp_cm_rmw_bits = omap2_cm_rmw_mod_reg_bits,
};
+static phys_addr_t omap_dsp_phys_mempool_base;
+
+void __init omap_dsp_reserve_sdram_memblock(void)
+{
+ phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
+ phys_addr_t paddr;
+
+ if (!size)
+ return;
+
+ paddr = arm_memblock_steal(size, SZ_1M);
+ if (!paddr) {
+ pr_err("%s: failed to reserve %llx bytes\n",
+ __func__, (unsigned long long)size);
+ return;
+ }
+
+ omap_dsp_phys_mempool_base = paddr;
+}
+
+static phys_addr_t omap_dsp_get_mempool_base(void)
+{
+ return omap_dsp_phys_mempool_base;
+}
+
static int __init omap_dsp_init(void)
{
struct platform_device *pdev;
diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c
index a80e093b039f..9ad7d489b0de 100644
--- a/arch/arm/mach-omap2/gpio.c
+++ b/arch/arm/mach-omap2/gpio.c
@@ -56,7 +56,6 @@ static int __init omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
dev_attr = (struct omap_gpio_dev_attr *)oh->dev_attr;
pdata->bank_width = dev_attr->bank_width;
pdata->dbck_flag = dev_attr->dbck_flag;
- pdata->virtual_irq_start = IH_GPIO_BASE + 32 * (id - 1);
pdata->get_context_loss_count = omap_pm_get_dev_context_loss_count;
pdata->regs = kzalloc(sizeof(struct omap_gpio_reg_offs), GFP_KERNEL);
if (!pdata->regs) {
@@ -103,6 +102,8 @@ static int __init omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
pdata->regs->dataout = OMAP4_GPIO_DATAOUT;
pdata->regs->set_dataout = OMAP4_GPIO_SETDATAOUT;
pdata->regs->clr_dataout = OMAP4_GPIO_CLEARDATAOUT;
+ pdata->regs->irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0;
+ pdata->regs->irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1;
pdata->regs->irqstatus = OMAP4_GPIO_IRQSTATUS0;
pdata->regs->irqstatus2 = OMAP4_GPIO_IRQSTATUS1;
pdata->regs->irqenable = OMAP4_GPIO_IRQSTATUSSET0;
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 580e684e8825..46b09dae770e 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -50,6 +50,19 @@
#define GPMC_ECC_SIZE_CONFIG 0x1fc
#define GPMC_ECC1_RESULT 0x200
+/* GPMC ECC control settings */
+#define GPMC_ECC_CTRL_ECCCLEAR 0x100
+#define GPMC_ECC_CTRL_ECCDISABLE 0x000
+#define GPMC_ECC_CTRL_ECCREG1 0x001
+#define GPMC_ECC_CTRL_ECCREG2 0x002
+#define GPMC_ECC_CTRL_ECCREG3 0x003
+#define GPMC_ECC_CTRL_ECCREG4 0x004
+#define GPMC_ECC_CTRL_ECCREG5 0x005
+#define GPMC_ECC_CTRL_ECCREG6 0x006
+#define GPMC_ECC_CTRL_ECCREG7 0x007
+#define GPMC_ECC_CTRL_ECCREG8 0x008
+#define GPMC_ECC_CTRL_ECCREG9 0x009
+
#define GPMC_CS0_OFFSET 0x60
#define GPMC_CS_SIZE 0x30
@@ -860,8 +873,9 @@ int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size)
gpmc_ecc_used = cs;
/* clear ecc and enable bits */
- val = ((0x00000001<<8) | 0x00000001);
- gpmc_write_reg(GPMC_ECC_CONTROL, val);
+ gpmc_write_reg(GPMC_ECC_CONTROL,
+ GPMC_ECC_CTRL_ECCCLEAR |
+ GPMC_ECC_CTRL_ECCREG1);
/* program ecc and result sizes */
val = ((((ecc_size >> 1) - 1) << 22) | (0x0000000F));
@@ -869,13 +883,15 @@ int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size)
switch (mode) {
case GPMC_ECC_READ:
- gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
+ case GPMC_ECC_WRITE:
+ gpmc_write_reg(GPMC_ECC_CONTROL,
+ GPMC_ECC_CTRL_ECCCLEAR |
+ GPMC_ECC_CTRL_ECCREG1);
break;
case GPMC_ECC_READSYN:
- gpmc_write_reg(GPMC_ECC_CONTROL, 0x100);
- break;
- case GPMC_ECC_WRITE:
- gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
+ gpmc_write_reg(GPMC_ECC_CONTROL,
+ GPMC_ECC_CTRL_ECCCLEAR |
+ GPMC_ECC_CTRL_ECCDISABLE);
break;
default:
printk(KERN_INFO "Error: Unrecognized Mode[%d]!\n", mode);
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index b0268eaffe13..be697d4e0843 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -355,7 +355,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
*
* temporary HACK: ocr_mask instead of fixed supply
*/
- if (cpu_is_omap3505() || cpu_is_omap3517())
+ if (soc_is_am35xx())
mmc->slots[0].ocr_mask = MMC_VDD_165_195 |
MMC_VDD_26_27 |
MMC_VDD_27_28 |
@@ -365,7 +365,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
else
mmc->slots[0].ocr_mask = c->ocr_mask;
- if (!cpu_is_omap3517() && !cpu_is_omap3505())
+ if (!soc_is_am35xx())
mmc->slots[0].features |= HSMMC_HAS_PBIAS;
if (cpu_is_omap44xx() && (omap_rev() > OMAP4430_REV_ES1_0))
@@ -388,7 +388,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
}
}
- if (cpu_is_omap3517() || cpu_is_omap3505())
+ if (soc_is_am35xx())
mmc->slots[0].set_power = nop_mmc_set_power;
/* OMAP3630 HSMMC1 supports only 4-bit */
@@ -400,7 +400,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
}
break;
case 2:
- if (cpu_is_omap3517() || cpu_is_omap3505())
+ if (soc_is_am35xx())
mmc->slots[0].set_power = am35x_hsmmc2_set_power;
if (c->ext_clock)
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index f1398171d8a2..0389b3264abe 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -185,8 +185,7 @@ static void __init omap3_cpuinfo(void)
*/
if (cpu_is_omap3630()) {
cpu_name = "OMAP3630";
- } else if (cpu_is_omap3517()) {
- /* AM35xx devices */
+ } else if (soc_is_am35xx()) {
cpu_name = (omap3_has_sgx()) ? "AM3517" : "AM3505";
} else if (cpu_is_ti816x()) {
cpu_name = "TI816X";
@@ -352,13 +351,13 @@ void __init omap3xxx_check_revision(void)
*/
switch (rev) {
case 0:
- omap_revision = OMAP3517_REV_ES1_0;
+ omap_revision = AM35XX_REV_ES1_0;
cpu_rev = "1.0";
break;
case 1:
/* FALLTHROUGH */
default:
- omap_revision = OMAP3517_REV_ES1_1;
+ omap_revision = AM35XX_REV_ES1_1;
cpu_rev = "1.1";
}
break;
diff --git a/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
index d79321b0f2a2..548de90b58c2 100644
--- a/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
+++ b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
@@ -16,18 +16,10 @@
#define OMAP_WKG_ENB_B_0 0x14
#define OMAP_WKG_ENB_C_0 0x18
#define OMAP_WKG_ENB_D_0 0x1c
-#define OMAP_WKG_ENB_SECURE_A_0 0x20
-#define OMAP_WKG_ENB_SECURE_B_0 0x24
-#define OMAP_WKG_ENB_SECURE_C_0 0x28
-#define OMAP_WKG_ENB_SECURE_D_0 0x2c
#define OMAP_WKG_ENB_A_1 0x410
#define OMAP_WKG_ENB_B_1 0x414
#define OMAP_WKG_ENB_C_1 0x418
#define OMAP_WKG_ENB_D_1 0x41c
-#define OMAP_WKG_ENB_SECURE_A_1 0x420
-#define OMAP_WKG_ENB_SECURE_B_1 0x424
-#define OMAP_WKG_ENB_SECURE_C_1 0x428
-#define OMAP_WKG_ENB_SECURE_D_1 0x42c
#define OMAP_AUX_CORE_BOOT_0 0x800
#define OMAP_AUX_CORE_BOOT_1 0x804
#define OMAP_PTMSYNCREQ_MASK 0xc00
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 4b9491aa36fa..8d014ba04abc 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -173,7 +173,7 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
};
#endif
-#ifdef CONFIG_SOC_OMAPTI81XX
+#ifdef CONFIG_SOC_TI81XX
static struct map_desc omapti81xx_io_desc[] __initdata = {
{
.virtual = L4_34XX_VIRT,
@@ -184,7 +184,7 @@ static struct map_desc omapti81xx_io_desc[] __initdata = {
};
#endif
-#ifdef CONFIG_SOC_OMAPAM33XX
+#ifdef CONFIG_SOC_AM33XX
static struct map_desc omapam33xx_io_desc[] __initdata = {
{
.virtual = L4_34XX_VIRT,
@@ -216,41 +216,11 @@ static struct map_desc omap44xx_io_desc[] __initdata = {
.type = MT_DEVICE,
},
{
- .virtual = OMAP44XX_GPMC_VIRT,
- .pfn = __phys_to_pfn(OMAP44XX_GPMC_PHYS),
- .length = OMAP44XX_GPMC_SIZE,
- .type = MT_DEVICE,
- },
- {
- .virtual = OMAP44XX_EMIF1_VIRT,
- .pfn = __phys_to_pfn(OMAP44XX_EMIF1_PHYS),
- .length = OMAP44XX_EMIF1_SIZE,
- .type = MT_DEVICE,
- },
- {
- .virtual = OMAP44XX_EMIF2_VIRT,
- .pfn = __phys_to_pfn(OMAP44XX_EMIF2_PHYS),
- .length = OMAP44XX_EMIF2_SIZE,
- .type = MT_DEVICE,
- },
- {
- .virtual = OMAP44XX_DMM_VIRT,
- .pfn = __phys_to_pfn(OMAP44XX_DMM_PHYS),
- .length = OMAP44XX_DMM_SIZE,
- .type = MT_DEVICE,
- },
- {
.virtual = L4_PER_44XX_VIRT,
.pfn = __phys_to_pfn(L4_PER_44XX_PHYS),
.length = L4_PER_44XX_SIZE,
.type = MT_DEVICE,
},
- {
- .virtual = L4_EMU_44XX_VIRT,
- .pfn = __phys_to_pfn(L4_EMU_44XX_PHYS),
- .length = L4_EMU_44XX_SIZE,
- .type = MT_DEVICE,
- },
#ifdef CONFIG_OMAP4_ERRATA_I688
{
.virtual = OMAP4_SRAM_VA,
@@ -286,14 +256,14 @@ void __init omap34xx_map_common_io(void)
}
#endif
-#ifdef CONFIG_SOC_OMAPTI81XX
+#ifdef CONFIG_SOC_TI81XX
void __init omapti81xx_map_common_io(void)
{
iotable_init(omapti81xx_io_desc, ARRAY_SIZE(omapti81xx_io_desc));
}
#endif
-#ifdef CONFIG_SOC_OMAPAM33XX
+#ifdef CONFIG_SOC_AM33XX
void __init omapam33xx_map_common_io(void)
{
iotable_init(omapam33xx_io_desc, ARRAY_SIZE(omapam33xx_io_desc));
@@ -380,6 +350,13 @@ void __init omap2420_init_early(void)
omap_hwmod_init_postsetup();
omap2420_clk_init();
}
+
+void __init omap2420_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap2_pm_init();
+}
#endif
#ifdef CONFIG_SOC_OMAP2430
@@ -395,6 +372,13 @@ void __init omap2430_init_early(void)
omap_hwmod_init_postsetup();
omap2430_clk_init();
}
+
+void __init omap2430_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap2_pm_init();
+}
#endif
/*
@@ -449,6 +433,48 @@ void __init ti81xx_init_early(void)
omap_hwmod_init_postsetup();
omap3xxx_clk_init();
}
+
+void __init omap3_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap3_pm_init();
+}
+
+void __init omap3430_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap3_pm_init();
+}
+
+void __init omap35xx_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap3_pm_init();
+}
+
+void __init omap3630_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap3_pm_init();
+}
+
+void __init am35xx_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap3_pm_init();
+}
+
+void __init ti81xx_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap3_pm_init();
+}
#endif
#ifdef CONFIG_ARCH_OMAP4
@@ -465,6 +491,13 @@ void __init omap4430_init_early(void)
omap_hwmod_init_postsetup();
omap4xxx_clk_init();
}
+
+void __init omap4430_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap4_pm_init();
+}
#endif
void __init omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
diff --git a/arch/arm/mach-omap2/iomap.h b/arch/arm/mach-omap2/iomap.h
index 0812b154f5b5..80b88921faba 100644
--- a/arch/arm/mach-omap2/iomap.h
+++ b/arch/arm/mach-omap2/iomap.h
@@ -37,9 +37,6 @@
#define OMAP4_L3_PER_IO_OFFSET 0xb1100000
#define OMAP4_L3_PER_IO_ADDRESS(pa) IOMEM((pa) + OMAP4_L3_PER_IO_OFFSET)
-#define OMAP4_GPMC_IO_OFFSET 0xa9000000
-#define OMAP4_GPMC_IO_ADDRESS(pa) IOMEM((pa) + OMAP4_GPMC_IO_OFFSET)
-
#define OMAP2_EMU_IO_OFFSET 0xaa800000 /* Emulation */
#define OMAP2_EMU_IO_ADDRESS(pa) IOMEM((pa) + OMAP2_EMU_IO_OFFSET)
@@ -170,28 +167,3 @@
#define L4_ABE_44XX_VIRT (L4_ABE_44XX_PHYS + OMAP2_L4_IO_OFFSET)
#define L4_ABE_44XX_SIZE SZ_1M
-#define L4_EMU_44XX_PHYS L4_EMU_44XX_BASE
- /* 0x54000000 --> 0xfe800000 */
-#define L4_EMU_44XX_VIRT (L4_EMU_44XX_PHYS + OMAP2_EMU_IO_OFFSET)
-#define L4_EMU_44XX_SIZE SZ_8M
-
-#define OMAP44XX_GPMC_PHYS OMAP44XX_GPMC_BASE
- /* 0x50000000 --> 0xf9000000 */
-#define OMAP44XX_GPMC_VIRT (OMAP44XX_GPMC_PHYS + OMAP4_GPMC_IO_OFFSET)
-#define OMAP44XX_GPMC_SIZE SZ_1M
-
-
-#define OMAP44XX_EMIF1_PHYS OMAP44XX_EMIF1_BASE
- /* 0x4c000000 --> 0xfd100000 */
-#define OMAP44XX_EMIF1_VIRT (OMAP44XX_EMIF1_PHYS + OMAP4_L3_PER_IO_OFFSET)
-#define OMAP44XX_EMIF1_SIZE SZ_1M
-
-#define OMAP44XX_EMIF2_PHYS OMAP44XX_EMIF2_BASE
- /* 0x4d000000 --> 0xfd200000 */
-#define OMAP44XX_EMIF2_SIZE SZ_1M
-#define OMAP44XX_EMIF2_VIRT (OMAP44XX_EMIF1_VIRT + OMAP44XX_EMIF1_SIZE)
-
-#define OMAP44XX_DMM_PHYS OMAP44XX_DMM_BASE
- /* 0x4e000000 --> 0xfd300000 */
-#define OMAP44XX_DMM_SIZE SZ_1M
-#define OMAP44XX_DMM_VIRT (OMAP44XX_EMIF2_VIRT + OMAP44XX_EMIF2_SIZE)
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 1ecf54565fe2..fdc4303be563 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -231,7 +231,7 @@ static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs
goto out;
irqnr = readl_relaxed(base_addr + 0xd8);
-#ifdef CONFIG_SOC_OMAPTI81XX
+#ifdef CONFIG_SOC_TI81XX
if (irqnr)
goto out;
irqnr = readl_relaxed(base_addr + 0xf8);
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 3268ee24eada..80e55c5c9998 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -788,7 +788,7 @@ static void __init omap_mux_free_names(struct omap_mux *m)
}
/* Free all data except for GPIO pins unless CONFIG_DEBUG_FS is set */
-static int __init omap_mux_late_init(void)
+int __init omap_mux_late_init(void)
{
struct omap_mux_partition *partition;
int ret;
@@ -823,7 +823,6 @@ static int __init omap_mux_late_init(void)
return 0;
}
-late_initcall(omap_mux_late_init);
static void __init omap_mux_package_fixup(struct omap_mux *p,
struct omap_mux *superset)
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index fd48797fa95a..b26d3c9bca16 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -3306,7 +3306,7 @@ int __init omap3xxx_hwmod_init(void)
rev == OMAP3430_REV_ES2_1 || rev == OMAP3430_REV_ES3_0 ||
rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2) {
h = omap34xx_hwmod_ocp_ifs;
- } else if (rev == OMAP3517_REV_ES1_0 || rev == OMAP3517_REV_ES1_1) {
+ } else if (rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1) {
h = am35xx_hwmod_ocp_ifs;
} else if (rev == OMAP3630_REV_ES1_0 || rev == OMAP3630_REV_ES1_1 ||
rev == OMAP3630_REV_ES1_2) {
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index d0c1c9695996..9cb5cede0f50 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -295,7 +295,7 @@ static int __init omap2_common_pm_init(void)
}
postcore_initcall(omap2_common_pm_init);
-static int __init omap2_common_pm_late_init(void)
+int __init omap2_common_pm_late_init(void)
{
/*
* In the case of DT, the PMIC and SR initialization will be done using
@@ -322,4 +322,3 @@ static int __init omap2_common_pm_late_init(void)
return 0;
}
-late_initcall(omap2_common_pm_late_init);
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index facfffca9eac..2edeffc923a6 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -298,13 +298,10 @@ static void __init prcm_setup_regs(void)
WKUP_MOD, PM_WKEN);
}
-static int __init omap2_pm_init(void)
+int __init omap2_pm_init(void)
{
u32 l;
- if (!cpu_is_omap24xx())
- return -ENODEV;
-
printk(KERN_INFO "Power Management for OMAP2 initializing\n");
l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET);
printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
@@ -370,17 +367,13 @@ static int __init omap2_pm_init(void)
* These routines need to be in SRAM as that's the only
* memory the MPU can see when it wakes up.
*/
- if (cpu_is_omap24xx()) {
- omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend,
- omap24xx_idle_loop_suspend_sz);
+ omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend,
+ omap24xx_idle_loop_suspend_sz);
- omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,
- omap24xx_cpu_suspend_sz);
- }
+ omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,
+ omap24xx_cpu_suspend_sz);
arm_pm_idle = omap2_pm_idle;
return 0;
}
-
-late_initcall(omap2_pm_init);
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 8b43aefba0ea..a34023d0ca7c 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -697,15 +697,12 @@ static void __init pm_errata_configure(void)
}
}
-static int __init omap3_pm_init(void)
+int __init omap3_pm_init(void)
{
struct power_state *pwrst, *tmp;
struct clockdomain *neon_clkdm, *mpu_clkdm;
int ret;
- if (!cpu_is_omap34xx())
- return -ENODEV;
-
if (!omap3_has_io_chain_ctrl())
pr_warning("PM: no software I/O chain control; some wakeups may be lost\n");
@@ -804,5 +801,3 @@ err2:
err1:
return ret;
}
-
-late_initcall(omap3_pm_init);
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index 885625352429..ea24174f5707 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -141,15 +141,12 @@ static void omap_default_idle(void)
* Initializes all powerdomain and clockdomain target states
* and all PRCM settings.
*/
-static int __init omap4_pm_init(void)
+int __init omap4_pm_init(void)
{
int ret;
struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm, *l4wkup;
struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm;
- if (!cpu_is_omap44xx())
- return -ENODEV;
-
if (omap_rev() == OMAP4430_REV_ES1_0) {
WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
return -ENODEV;
@@ -217,4 +214,3 @@ static int __init omap4_pm_init(void)
err2:
return ret;
}
-late_initcall(omap4_pm_init);
diff --git a/arch/arm/mach-omap2/powerdomains3xxx_data.c b/arch/arm/mach-omap2/powerdomains3xxx_data.c
index b7ea468eea32..fb0a0a6869d1 100644
--- a/arch/arm/mach-omap2/powerdomains3xxx_data.c
+++ b/arch/arm/mach-omap2/powerdomains3xxx_data.c
@@ -311,7 +311,7 @@ void __init omap3xxx_powerdomains_init(void)
rev == OMAP3430_REV_ES3_0 || rev == OMAP3630_REV_ES1_0)
pwrdm_register_pwrdms(powerdomains_omap3430es2_es3_0);
else if (rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2 ||
- rev == OMAP3517_REV_ES1_0 || rev == OMAP3517_REV_ES1_1 ||
+ rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1 ||
rev == OMAP3630_REV_ES1_1 || rev == OMAP3630_REV_ES1_2)
pwrdm_register_pwrdms(powerdomains_omap3430es3_1plus);
else
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 1b7835865c83..840929bd9dae 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -90,7 +90,7 @@ static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id)
}
static struct irqaction omap2_gp_timer_irq = {
- .name = "gp timer",
+ .name = "gp_timer",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.handler = omap2_gp_timer_interrupt,
};
@@ -132,7 +132,7 @@ static void omap2_gp_timer_set_mode(enum clock_event_mode mode,
}
static struct clock_event_device clockevent_gpt = {
- .name = "gp timer",
+ .name = "gp_timer",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.shift = 32,
.set_next_event = omap2_gp_timer_set_next_event,
@@ -236,22 +236,8 @@ static void __init omap2_gp_clockevent_init(int gptimer_id,
}
/* Clocksource code */
-
-#ifdef CONFIG_OMAP_32K_TIMER
-/*
- * When 32k-timer is enabled, don't use GPTimer for clocksource
- * instead, just leave default clocksource which uses the 32k
- * sync counter. See clocksource setup in plat-omap/counter_32k.c
- */
-
-static void __init omap2_gp_clocksource_init(int unused, const char *dummy)
-{
- omap_init_clocksource_32k();
-}
-
-#else
-
static struct omap_dm_timer clksrc;
+static bool use_gptimer_clksrc;
/*
* clocksource
@@ -262,7 +248,7 @@ static cycle_t clocksource_read_cycles(struct clocksource *cs)
}
static struct clocksource clocksource_gpt = {
- .name = "gp timer",
+ .name = "gp_timer",
.rating = 300,
.read = clocksource_read_cycles,
.mask = CLOCKSOURCE_MASK(32),
@@ -278,7 +264,46 @@ static u32 notrace dmtimer_read_sched_clock(void)
}
/* Setup free-running counter for clocksource */
-static void __init omap2_gp_clocksource_init(int gptimer_id,
+static int __init omap2_sync32k_clocksource_init(void)
+{
+ int ret;
+ struct omap_hwmod *oh;
+ void __iomem *vbase;
+ const char *oh_name = "counter_32k";
+
+ /*
+ * First check hwmod data is available for sync32k counter
+ */
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh || oh->slaves_cnt == 0)
+ return -ENODEV;
+
+ omap_hwmod_setup_one(oh_name);
+
+ vbase = omap_hwmod_get_mpu_rt_va(oh);
+ if (!vbase) {
+ pr_warn("%s: failed to get counter_32k resource\n", __func__);
+ return -ENXIO;
+ }
+
+ ret = omap_hwmod_enable(oh);
+ if (ret) {
+ pr_warn("%s: failed to enable counter_32k module (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = omap_init_clocksource_32k(vbase);
+ if (ret) {
+ pr_warn("%s: failed to initialize counter_32k as a clocksource (%d)\n",
+ __func__, ret);
+ omap_hwmod_idle(oh);
+ }
+
+ return ret;
+}
+
+static void __init omap2_gptimer_clocksource_init(int gptimer_id,
const char *fck_source)
{
int res;
@@ -286,9 +311,6 @@ static void __init omap2_gp_clocksource_init(int gptimer_id,
res = omap_dm_timer_init_one(&clksrc, gptimer_id, fck_source);
BUG_ON(res);
- pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n",
- gptimer_id, clksrc.rate);
-
__omap_dm_timer_load_start(&clksrc,
OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, 1);
setup_sched_clock(dmtimer_read_sched_clock, 32, clksrc.rate);
@@ -296,15 +318,36 @@ static void __init omap2_gp_clocksource_init(int gptimer_id,
if (clocksource_register_hz(&clocksource_gpt, clksrc.rate))
pr_err("Could not register clocksource %s\n",
clocksource_gpt.name);
+ else
+ pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n",
+ gptimer_id, clksrc.rate);
+}
+
+static void __init omap2_clocksource_init(int gptimer_id,
+ const char *fck_source)
+{
+ /*
+ * First give preference to kernel parameter configuration
+ * by user (clocksource="gp_timer").
+ *
+ * In case of missing kernel parameter for clocksource,
+ * first check for availability for 32k-sync timer, in case
+ * of failure in finding 32k_counter module or registering
+ * it as clocksource, execution will fallback to gp-timer.
+ */
+ if (use_gptimer_clksrc == true)
+ omap2_gptimer_clocksource_init(gptimer_id, fck_source);
+ else if (omap2_sync32k_clocksource_init())
+ /* Fall back to gp-timer code */
+ omap2_gptimer_clocksource_init(gptimer_id, fck_source);
}
-#endif
#define OMAP_SYS_TIMER_INIT(name, clkev_nr, clkev_src, \
clksrc_nr, clksrc_src) \
static void __init omap##name##_timer_init(void) \
{ \
omap2_gp_clockevent_init((clkev_nr), clkev_src); \
- omap2_gp_clocksource_init((clksrc_nr), clksrc_src); \
+ omap2_clocksource_init((clksrc_nr), clksrc_src); \
}
#define OMAP_SYS_TIMER(name) \
@@ -335,7 +378,7 @@ static DEFINE_TWD_LOCAL_TIMER(twd_local_timer,
static void __init omap4_timer_init(void)
{
omap2_gp_clockevent_init(1, OMAP4_CLKEV_SOURCE);
- omap2_gp_clocksource_init(2, OMAP4_MPU_SOURCE);
+ omap2_clocksource_init(2, OMAP4_MPU_SOURCE);
#ifdef CONFIG_LOCAL_TIMERS
/* Local timers are not supprted on OMAP4430 ES1.0 */
if (omap_rev() != OMAP4430_REV_ES1_0) {
@@ -503,3 +546,28 @@ static int __init omap2_dm_timer_init(void)
return 0;
}
arch_initcall(omap2_dm_timer_init);
+
+/**
+ * omap2_override_clocksource - clocksource override with user configuration
+ *
+ * Allows user to override default clocksource, using kernel parameter
+ * clocksource="gp_timer" (For all OMAP2PLUS architectures)
+ *
+ * Note that, here we are using same standard kernel parameter "clocksource=",
+ * and not introducing any OMAP specific interface.
+ */
+static int __init omap2_override_clocksource(char *str)
+{
+ if (!str)
+ return 0;
+ /*
+ * For OMAP architecture, we only have two options
+ * - sync_32k (default)
+ * - gp_timer (sys_clk based)
+ */
+ if (!strcmp(str, "gp_timer"))
+ use_gptimer_clksrc = true;
+
+ return 0;
+}
+early_param("clocksource", omap2_override_clocksource);
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 8d5ed775dd56..b19d1b43c12e 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -90,7 +90,7 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
musb_plat.mode = board_data->mode;
musb_plat.extvbus = board_data->extvbus;
- if (cpu_is_omap3517() || cpu_is_omap3505()) {
+ if (soc_is_am35xx()) {
oh_name = "am35x_otg_hs";
name = "musb-am35x";
} else if (cpu_is_ti81xx()) {
diff --git a/arch/arm/mach-omap2/voltagedomains3xxx_data.c b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
index 57db2038b23c..d0103c80d040 100644
--- a/arch/arm/mach-omap2/voltagedomains3xxx_data.c
+++ b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
@@ -118,7 +118,7 @@ void __init omap3xxx_voltagedomains_init(void)
}
#endif
- if (cpu_is_omap3517() || cpu_is_omap3505())
+ if (soc_is_am35xx())
voltdms = voltagedomains_am35xx;
else
voltdms = voltagedomains_omap3;
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index e2e9db492d0c..9148b229d0de 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -18,6 +18,7 @@
#include <linux/mv643xx_i2c.h>
#include <linux/ata_platform.h>
#include <linux/delay.h>
+#include <linux/clk-provider.h>
#include <net/dsa.h>
#include <asm/page.h>
#include <asm/setup.h>
@@ -70,6 +71,19 @@ void __init orion5x_map_io(void)
/*****************************************************************************
+ * CLK tree
+ ****************************************************************************/
+static struct clk *tclk;
+
+static void __init clk_init(void)
+{
+ tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
+ orion5x_tclk);
+
+ orion_clkdev_init(tclk);
+}
+
+/*****************************************************************************
* EHCI0
****************************************************************************/
void __init orion5x_ehci0_init(void)
@@ -95,7 +109,7 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data,
ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM,
- IRQ_ORION5X_ETH_ERR, orion5x_tclk);
+ IRQ_ORION5X_ETH_ERR);
}
@@ -132,7 +146,7 @@ void __init orion5x_sata_init(struct mv_sata_platform_data *sata_data)
****************************************************************************/
void __init orion5x_spi_init()
{
- orion_spi_init(SPI_PHYS_BASE, orion5x_tclk);
+ orion_spi_init(SPI_PHYS_BASE);
}
@@ -142,7 +156,7 @@ void __init orion5x_spi_init()
void __init orion5x_uart0_init(void)
{
orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
- IRQ_ORION5X_UART0, orion5x_tclk);
+ IRQ_ORION5X_UART0, tclk);
}
/*****************************************************************************
@@ -151,7 +165,7 @@ void __init orion5x_uart0_init(void)
void __init orion5x_uart1_init(void)
{
orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
- IRQ_ORION5X_UART1, orion5x_tclk);
+ IRQ_ORION5X_UART1, tclk);
}
/*****************************************************************************
@@ -179,7 +193,7 @@ static void __init orion5x_crypto_init(void)
****************************************************************************/
void __init orion5x_wdt_init(void)
{
- orion_wdt_init(orion5x_tclk);
+ orion_wdt_init();
}
@@ -276,6 +290,9 @@ void __init orion5x_init(void)
*/
orion5x_setup_cpu_mbus_bridge();
+ /* Setup root of clk tree */
+ clk_init();
+
/*
* Don't issue "Wait for Interrupt" instruction if we are
* running on D0 5281 silicon.
diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
index e91bf0ba4e8e..92df49c1b62a 100644
--- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
@@ -16,7 +16,6 @@
#include <linux/mtd/physmap.h>
#include <linux/mv643xx_eth.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <linux/spi/flash.h>
#include <linux/ethtool.h>
#include <net/dsa.h>
diff --git a/arch/arm/mach-pnx4008/core.c b/arch/arm/mach-pnx4008/core.c
index be4c92858509..a00d2f1254ed 100644
--- a/arch/arm/mach-pnx4008/core.c
+++ b/arch/arm/mach-pnx4008/core.c
@@ -265,6 +265,17 @@ static void pnx4008_restart(char mode, const char *cmd)
soft_restart(0);
}
+#ifdef CONFIG_PM
+extern int pnx4008_pm_init(void);
+#else
+static inline int pnx4008_pm_init(void) { return 0; }
+#endif
+
+void __init pnx4008_init_late(void)
+{
+ pnx4008_pm_init();
+}
+
extern struct sys_timer pnx4008_timer;
MACHINE_START(PNX4008, "Philips PNX4008")
@@ -273,6 +284,7 @@ MACHINE_START(PNX4008, "Philips PNX4008")
.map_io = pnx4008_map_io,
.init_irq = pnx4008_init_irq,
.init_machine = pnx4008_init,
+ .init_late = pnx4008_init_late,
.timer = &pnx4008_timer,
.restart = pnx4008_restart,
MACHINE_END
diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
index f3e60a049f98..26f8d06b142a 100644
--- a/arch/arm/mach-pnx4008/pm.c
+++ b/arch/arm/mach-pnx4008/pm.c
@@ -124,7 +124,7 @@ static const struct platform_suspend_ops pnx4008_pm_ops = {
.valid = pnx4008_pm_valid,
};
-static int __init pnx4008_pm_init(void)
+int __init pnx4008_pm_init(void)
{
u32 sram_size_to_allocate;
@@ -151,5 +151,3 @@ static int __init pnx4008_pm_init(void)
suspend_set_ops(&pnx4008_pm_ops);
return 0;
}
-
-late_initcall(pnx4008_pm_init);
diff --git a/arch/arm/mach-prima2/common.h b/arch/arm/mach-prima2/common.h
index b28a930d4f8a..60d826fc2185 100644
--- a/arch/arm/mach-prima2/common.h
+++ b/arch/arm/mach-prima2/common.h
@@ -24,4 +24,10 @@ static inline void sirfsoc_map_lluart(void) {}
extern void __init sirfsoc_map_lluart(void);
#endif
+#ifdef CONFIG_SUSPEND
+extern int sirfsoc_pm_init(void);
+#else
+static inline int sirfsoc_pm_init(void) { return 0; }
+#endif
+
#endif
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index 26ebb57719df..fb5a7910af35 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -85,12 +85,11 @@ static const struct platform_suspend_ops sirfsoc_pm_ops = {
.valid = suspend_valid_only_mem,
};
-static int __init sirfsoc_pm_init(void)
+int __init sirfsoc_pm_init(void)
{
suspend_set_ops(&sirfsoc_pm_ops);
return 0;
}
-late_initcall(sirfsoc_pm_init);
static const struct of_device_id pwrc_ids[] = {
{ .compatible = "sirf,prima2-pwrc" },
diff --git a/arch/arm/mach-prima2/prima2.c b/arch/arm/mach-prima2/prima2.c
index 02b9c05ff990..8f0429d4b79f 100644
--- a/arch/arm/mach-prima2/prima2.c
+++ b/arch/arm/mach-prima2/prima2.c
@@ -25,6 +25,11 @@ void __init sirfsoc_mach_init(void)
of_platform_bus_probe(NULL, sirfsoc_of_bus_ids, NULL);
}
+void __init sirfsoc_init_late(void)
+{
+ sirfsoc_pm_init();
+}
+
static const char *prima2cb_dt_match[] __initdata = {
"sirf,prima2-cb",
NULL
@@ -39,6 +44,7 @@ MACHINE_START(PRIMA2_EVB, "prima2cb")
.timer = &sirfsoc_timer,
.dma_zone_size = SZ_256M,
.init_machine = sirfsoc_mach_init,
+ .init_late = sirfsoc_init_late,
.dt_compat = prima2cb_dt_match,
.restart = sirfsoc_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index b34287ab5afd..e24961109b70 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -518,6 +518,11 @@ config S3C2443_DMA
help
Internal config node for S3C2443 DMA support
+config S3C2443_SETUP_SPI
+ bool
+ help
+ Common setup code for SPI GPIO configurations
+
endif # CPU_S3C2443 || CPU_S3C2416
if CPU_S3C2443
diff --git a/arch/arm/mach-s3c24xx/Makefile b/arch/arm/mach-s3c24xx/Makefile
index 3518fe812d5f..0ab6ab15da4c 100644
--- a/arch/arm/mach-s3c24xx/Makefile
+++ b/arch/arm/mach-s3c24xx/Makefile
@@ -14,6 +14,8 @@ obj- :=
# core
+obj-y += common.o
+
obj-$(CONFIG_CPU_S3C2410) += s3c2410.o
obj-$(CONFIG_S3C2410_DMA) += dma-s3c2410.o
obj-$(CONFIG_S3C2410_PM) += pm-s3c2410.o sleep-s3c2410.o
@@ -33,6 +35,10 @@ obj-$(CONFIG_S3C2440_DMA) += dma-s3c2440.o
obj-$(CONFIG_CPU_S3C2443) += s3c2443.o irq-s3c2443.o clock-s3c2443.o
+# PM
+
+obj-$(CONFIG_PM) += pm.o irq-pm.o sleep.o
+
# common code
obj-$(CONFIG_S3C2443_COMMON) += common-s3c2443.o
@@ -91,5 +97,6 @@ obj-$(CONFIG_MACH_OSIRIS_DVS) += mach-osiris-dvs.o
# device setup
obj-$(CONFIG_S3C2416_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
+obj-$(CONFIG_S3C2443_SETUP_SPI) += setup-spi.o
obj-$(CONFIG_ARCH_S3C24XX) += setup-i2c.o
obj-$(CONFIG_S3C24XX_SETUP_TS) += setup-ts.o
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2416.c b/arch/arm/mach-s3c24xx/clock-s3c2416.c
index dbc9ab4aaca2..8702ecfaab30 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2416.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2416.c
@@ -144,6 +144,7 @@ static struct clk_lookup s3c2416_clk_lookup[] = {
CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &hsmmc0_clk),
CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &hsmmc_mux0.clk),
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &hsmmc_mux1.clk),
+ CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &hsspi_mux.clk),
};
void __init s3c2416_init_clocks(int xtal)
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2443.c b/arch/arm/mach-s3c24xx/clock-s3c2443.c
index efb3ac359566..a4c5a520d994 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2443.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2443.c
@@ -179,6 +179,11 @@ static struct clk *clks[] __initdata = {
&clk_hsmmc,
};
+static struct clk_lookup s3c2443_clk_lookup[] = {
+ CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_hsmmc),
+ CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &clk_hsspi.clk),
+};
+
void __init s3c2443_init_clocks(int xtal)
{
unsigned long epllcon = __raw_readl(S3C2443_EPLLCON);
@@ -210,6 +215,7 @@ void __init s3c2443_init_clocks(int xtal)
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+ clkdev_add_table(s3c2443_clk_lookup, ARRAY_SIZE(s3c2443_clk_lookup));
s3c_pwmclk_init();
}
diff --git a/arch/arm/mach-s3c24xx/common-s3c2443.c b/arch/arm/mach-s3c24xx/common-s3c2443.c
index 460431589f39..aeeb2be283fa 100644
--- a/arch/arm/mach-s3c24xx/common-s3c2443.c
+++ b/arch/arm/mach-s3c24xx/common-s3c2443.c
@@ -424,11 +424,6 @@ static struct clk init_clocks_off[] = {
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_IIS,
}, {
- .name = "hsspi",
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_HSSPI,
- }, {
.name = "adc",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
@@ -562,6 +557,14 @@ static struct clk hsmmc1_clk = {
.ctrlbit = S3C2443_HCLKCON_HSMMC,
};
+static struct clk hsspi_clk = {
+ .name = "spi",
+ .devname = "s3c64xx-spi.0",
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_HSSPI,
+};
+
/* EPLLCON compatible enough to get on/off information */
void __init_or_cpufreq s3c2443_common_setup_clocks(pll_fn get_mpll)
@@ -612,6 +615,7 @@ static struct clk *clks[] __initdata = {
&clk_usb_bus,
&clk_armdiv,
&hsmmc1_clk,
+ &hsspi_clk,
};
static struct clksrc_clk *clksrcs[] __initdata = {
@@ -629,6 +633,7 @@ static struct clk_lookup s3c2443_clk_lookup[] = {
CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_esys_uart.clk),
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &hsmmc1_clk),
+ CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk0", &hsspi_clk),
};
void __init s3c2443_common_init_clocks(int xtal, pll_fn get_mpll,
diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/mach-s3c24xx/common.c
index 290942d9adda..56cdd34cce41 100644
--- a/arch/arm/plat-s3c24xx/cpu.c
+++ b/arch/arm/mach-s3c24xx/common.c
@@ -4,7 +4,7 @@
* http://www.simtec.co.uk/products/SWLINUX/
* Ben Dooks <ben@simtec.co.uk>
*
- * S3C24XX CPU Support
+ * Common code for S3C24XX machines
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -41,6 +41,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
+#include <mach/regs-clock.h>
#include <mach/regs-gpio.h>
#include <plat/regs-serial.h>
@@ -52,6 +53,8 @@
#include <plat/s3c2416.h>
#include <plat/s3c244x.h>
#include <plat/s3c2443.h>
+#include <plat/cpu-freq.h>
+#include <plat/pll.h>
/* table of supported CPUs */
@@ -234,3 +237,67 @@ void __init s3c24xx_init_io(struct map_desc *mach_desc, int size)
s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
}
+
+/* Serial port registrations */
+
+static struct resource s3c2410_uart0_resource[] = {
+ [0] = DEFINE_RES_MEM(S3C2410_PA_UART0, SZ_16K),
+ [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX0, \
+ IRQ_S3CUART_ERR0 - IRQ_S3CUART_RX0 + 1, \
+ NULL, IORESOURCE_IRQ)
+};
+
+static struct resource s3c2410_uart1_resource[] = {
+ [0] = DEFINE_RES_MEM(S3C2410_PA_UART1, SZ_16K),
+ [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX1, \
+ IRQ_S3CUART_ERR1 - IRQ_S3CUART_RX1 + 1, \
+ NULL, IORESOURCE_IRQ)
+};
+
+static struct resource s3c2410_uart2_resource[] = {
+ [0] = DEFINE_RES_MEM(S3C2410_PA_UART2, SZ_16K),
+ [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX2, \
+ IRQ_S3CUART_ERR2 - IRQ_S3CUART_RX2 + 1, \
+ NULL, IORESOURCE_IRQ)
+};
+
+static struct resource s3c2410_uart3_resource[] = {
+ [0] = DEFINE_RES_MEM(S3C2443_PA_UART3, SZ_16K),
+ [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX3, \
+ IRQ_S3CUART_ERR3 - IRQ_S3CUART_RX3 + 1, \
+ NULL, IORESOURCE_IRQ)
+};
+
+struct s3c24xx_uart_resources s3c2410_uart_resources[] __initdata = {
+ [0] = {
+ .resources = s3c2410_uart0_resource,
+ .nr_resources = ARRAY_SIZE(s3c2410_uart0_resource),
+ },
+ [1] = {
+ .resources = s3c2410_uart1_resource,
+ .nr_resources = ARRAY_SIZE(s3c2410_uart1_resource),
+ },
+ [2] = {
+ .resources = s3c2410_uart2_resource,
+ .nr_resources = ARRAY_SIZE(s3c2410_uart2_resource),
+ },
+ [3] = {
+ .resources = s3c2410_uart3_resource,
+ .nr_resources = ARRAY_SIZE(s3c2410_uart3_resource),
+ },
+};
+
+/* initialise all the clocks */
+
+void __init_or_cpufreq s3c24xx_setup_clocks(unsigned long fclk,
+ unsigned long hclk,
+ unsigned long pclk)
+{
+ clk_upll.rate = s3c24xx_get_pll(__raw_readl(S3C2410_UPLLCON),
+ clk_xtal.rate);
+
+ clk_mpll.rate = fclk;
+ clk_h.rate = hclk;
+ clk_p.rate = pclk;
+ clk_f.rate = fclk;
+}
diff --git a/arch/arm/mach-s3c24xx/dma-s3c2443.c b/arch/arm/mach-s3c24xx/dma-s3c2443.c
index e227c472a40a..2d94228d2866 100644
--- a/arch/arm/mach-s3c24xx/dma-s3c2443.c
+++ b/arch/arm/mach-s3c24xx/dma-s3c2443.c
@@ -55,12 +55,20 @@ static struct s3c24xx_dma_map __initdata s3c2443_dma_mappings[] = {
.name = "sdi",
.channels = MAP(S3C2443_DMAREQSEL_SDI),
},
- [DMACH_SPI0] = {
- .name = "spi0",
+ [DMACH_SPI0_RX] = {
+ .name = "spi0-rx",
+ .channels = MAP(S3C2443_DMAREQSEL_SPI0RX),
+ },
+ [DMACH_SPI0_TX] = {
+ .name = "spi0-tx",
.channels = MAP(S3C2443_DMAREQSEL_SPI0TX),
},
- [DMACH_SPI1] = { /* only on S3C2443/S3C2450 */
- .name = "spi1",
+ [DMACH_SPI1_RX] = { /* only on S3C2443/S3C2450 */
+ .name = "spi1-rx",
+ .channels = MAP(S3C2443_DMAREQSEL_SPI1RX),
+ },
+ [DMACH_SPI1_TX] = { /* only on S3C2443/S3C2450 */
+ .name = "spi1-tx",
.channels = MAP(S3C2443_DMAREQSEL_SPI1TX),
},
[DMACH_UART0] = {
diff --git a/arch/arm/mach-s3c24xx/include/mach/dma.h b/arch/arm/mach-s3c24xx/include/mach/dma.h
index acbdfecd4186..454831b66037 100644
--- a/arch/arm/mach-s3c24xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c24xx/include/mach/dma.h
@@ -47,6 +47,10 @@ enum dma_ch {
DMACH_UART2_SRC2,
DMACH_UART3, /* s3c2443 has extra uart */
DMACH_UART3_SRC2,
+ DMACH_SPI0_TX, /* s3c2443/2416/2450 hsspi0 */
+ DMACH_SPI0_RX, /* s3c2443/2416/2450 hsspi0 */
+ DMACH_SPI1_TX, /* s3c2443/2450 hsspi1 */
+ DMACH_SPI1_RX, /* s3c2443/2450 hsspi1 */
DMACH_MAX, /* the end entry */
};
diff --git a/arch/arm/mach-s3c24xx/include/mach/irqs.h b/arch/arm/mach-s3c24xx/include/mach/irqs.h
index e53b2177319e..b7a9f4d469e8 100644
--- a/arch/arm/mach-s3c24xx/include/mach/irqs.h
+++ b/arch/arm/mach-s3c24xx/include/mach/irqs.h
@@ -134,6 +134,17 @@
#define IRQ_S32416_WDT S3C2410_IRQSUB(27)
#define IRQ_S32416_AC97 S3C2410_IRQSUB(28)
+/* second interrupt-register of s3c2416/s3c2450 */
+
+#define S3C2416_IRQ(x) S3C2410_IRQ((x) + 54 + 29)
+#define IRQ_S3C2416_2D S3C2416_IRQ(0)
+#define IRQ_S3C2416_IIC1 S3C2416_IRQ(1)
+#define IRQ_S3C2416_RESERVED2 S3C2416_IRQ(2)
+#define IRQ_S3C2416_RESERVED3 S3C2416_IRQ(3)
+#define IRQ_S3C2416_PCM0 S3C2416_IRQ(4)
+#define IRQ_S3C2416_PCM1 S3C2416_IRQ(5)
+#define IRQ_S3C2416_I2S0 S3C2416_IRQ(6)
+#define IRQ_S3C2416_I2S1 S3C2416_IRQ(7)
/* extra irqs for s3c2440 */
@@ -175,7 +186,9 @@
#define IRQ_S3C2443_WDT S3C2410_IRQSUB(27)
#define IRQ_S3C2443_AC97 S3C2410_IRQSUB(28)
-#if defined(CONFIG_CPU_S3C2443) || defined(CONFIG_CPU_S3C2416)
+#if defined(CONFIG_CPU_S3C2416)
+#define NR_IRQS (IRQ_S3C2416_I2S1 + 1)
+#elif defined(CONFIG_CPU_S3C2443)
#define NR_IRQS (IRQ_S3C2443_AC97+1)
#else
#define NR_IRQS (IRQ_S3C2440_AC97+1)
diff --git a/arch/arm/mach-s3c24xx/include/mach/map.h b/arch/arm/mach-s3c24xx/include/mach/map.h
index 78ae807f1281..8ba381f2dbe1 100644
--- a/arch/arm/mach-s3c24xx/include/mach/map.h
+++ b/arch/arm/mach-s3c24xx/include/mach/map.h
@@ -98,6 +98,8 @@
/* SPI */
#define S3C2410_PA_SPI (0x59000000)
+#define S3C2443_PA_SPI0 (0x52000000)
+#define S3C2443_PA_SPI1 S3C2410_PA_SPI
/* SDI */
#define S3C2410_PA_SDI (0x5A000000)
@@ -162,4 +164,7 @@
#define S3C_PA_WDT S3C2410_PA_WATCHDOG
#define S3C_PA_NAND S3C24XX_PA_NAND
+#define S3C_PA_SPI0 S3C2443_PA_SPI0
+#define S3C_PA_SPI1 S3C2443_PA_SPI1
+
#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/plat-s3c24xx/irq-pm.c b/arch/arm/mach-s3c24xx/irq-pm.c
index 0efb2e2848c8..0efb2e2848c8 100644
--- a/arch/arm/plat-s3c24xx/irq-pm.c
+++ b/arch/arm/mach-s3c24xx/irq-pm.c
diff --git a/arch/arm/mach-s3c24xx/irq-s3c2416.c b/arch/arm/mach-s3c24xx/irq-s3c2416.c
index fd49f35e448e..23ec97370f32 100644
--- a/arch/arm/mach-s3c24xx/irq-s3c2416.c
+++ b/arch/arm/mach-s3c24xx/irq-s3c2416.c
@@ -27,6 +27,7 @@
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/io.h>
+#include <linux/syscore_ops.h>
#include <mach/hardware.h>
#include <asm/irq.h>
@@ -192,6 +193,43 @@ static struct irq_chip s3c2416_irq_uart3 = {
.irq_ack = s3c2416_irq_uart3_ack,
};
+/* second interrupt register */
+
+static inline void s3c2416_irq_ack_second(struct irq_data *data)
+{
+ unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
+
+ __raw_writel(bitval, S3C2416_SRCPND2);
+ __raw_writel(bitval, S3C2416_INTPND2);
+}
+
+static void s3c2416_irq_mask_second(struct irq_data *data)
+{
+ unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
+ unsigned long mask;
+
+ mask = __raw_readl(S3C2416_INTMSK2);
+ mask |= bitval;
+ __raw_writel(mask, S3C2416_INTMSK2);
+}
+
+static void s3c2416_irq_unmask_second(struct irq_data *data)
+{
+ unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
+ unsigned long mask;
+
+ mask = __raw_readl(S3C2416_INTMSK2);
+ mask &= ~bitval;
+ __raw_writel(mask, S3C2416_INTMSK2);
+}
+
+struct irq_chip s3c2416_irq_second = {
+ .irq_ack = s3c2416_irq_ack_second,
+ .irq_mask = s3c2416_irq_mask_second,
+ .irq_unmask = s3c2416_irq_unmask_second,
+};
+
+
/* IRQ initialisation code */
static int __init s3c2416_add_sub(unsigned int base,
@@ -213,6 +251,42 @@ static int __init s3c2416_add_sub(unsigned int base,
return 0;
}
+static void __init s3c2416_irq_add_second(void)
+{
+ unsigned long pend;
+ unsigned long last;
+ int irqno;
+ int i;
+
+ /* first, clear all interrupts pending... */
+ last = 0;
+ for (i = 0; i < 4; i++) {
+ pend = __raw_readl(S3C2416_INTPND2);
+
+ if (pend == 0 || pend == last)
+ break;
+
+ __raw_writel(pend, S3C2416_SRCPND2);
+ __raw_writel(pend, S3C2416_INTPND2);
+ printk(KERN_INFO "irq: clearing pending status %08x\n",
+ (int)pend);
+ last = pend;
+ }
+
+ for (irqno = IRQ_S3C2416_2D; irqno <= IRQ_S3C2416_I2S1; irqno++) {
+ switch (irqno) {
+ case IRQ_S3C2416_RESERVED2:
+ case IRQ_S3C2416_RESERVED3:
+ /* no IRQ here */
+ break;
+ default:
+ irq_set_chip_and_handler(irqno, &s3c2416_irq_second,
+ handle_edge_irq);
+ set_irq_flags(irqno, IRQF_VALID);
+ }
+ }
+}
+
static int __init s3c2416_irq_add(struct device *dev,
struct subsys_interface *sif)
{
@@ -232,6 +306,8 @@ static int __init s3c2416_irq_add(struct device *dev,
&s3c2416_irq_wdtac97,
IRQ_S3C2443_WDT, IRQ_S3C2443_AC97);
+ s3c2416_irq_add_second();
+
return 0;
}
@@ -248,3 +324,25 @@ static int __init s3c2416_irq_init(void)
arch_initcall(s3c2416_irq_init);
+#ifdef CONFIG_PM
+static struct sleep_save irq_save[] = {
+ SAVE_ITEM(S3C2416_INTMSK2),
+};
+
+int s3c2416_irq_suspend(void)
+{
+ s3c_pm_do_save(irq_save, ARRAY_SIZE(irq_save));
+
+ return 0;
+}
+
+void s3c2416_irq_resume(void)
+{
+ s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save));
+}
+
+struct syscore_ops s3c2416_irq_syscore_ops = {
+ .suspend = s3c2416_irq_suspend,
+ .resume = s3c2416_irq_resume,
+};
+#endif
diff --git a/arch/arm/plat-s3c24xx/pm.c b/arch/arm/mach-s3c24xx/pm.c
index 60627e63a254..60627e63a254 100644
--- a/arch/arm/plat-s3c24xx/pm.c
+++ b/arch/arm/mach-s3c24xx/pm.c
diff --git a/arch/arm/mach-s3c24xx/s3c2416.c b/arch/arm/mach-s3c24xx/s3c2416.c
index 7743fade50df..ed5a95ece9eb 100644
--- a/arch/arm/mach-s3c24xx/s3c2416.c
+++ b/arch/arm/mach-s3c24xx/s3c2416.c
@@ -106,6 +106,7 @@ int __init s3c2416_init(void)
register_syscore_ops(&s3c2416_pm_syscore_ops);
#endif
register_syscore_ops(&s3c24xx_irq_syscore_ops);
+ register_syscore_ops(&s3c2416_irq_syscore_ops);
return device_register(&s3c2416_dev);
}
diff --git a/arch/arm/mach-s3c24xx/setup-spi.c b/arch/arm/mach-s3c24xx/setup-spi.c
new file mode 100644
index 000000000000..5712c85f39b1
--- /dev/null
+++ b/arch/arm/mach-s3c24xx/setup-spi.c
@@ -0,0 +1,39 @@
+/*
+ * HS-SPI device setup for S3C2443/S3C2416
+ *
+ * Copyright (C) 2011 Samsung Electronics Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/s3c64xx-spi.h>
+
+#include <mach/hardware.h>
+#include <mach/regs-gpio.h>
+
+#ifdef CONFIG_S3C64XX_DEV_SPI0
+struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = {
+ .fifo_lvl_mask = 0x7f,
+ .rx_lvl_offset = 13,
+ .tx_st_done = 21,
+ .high_speed = 1,
+};
+
+int s3c64xx_spi0_cfg_gpio(struct platform_device *pdev)
+{
+ /* enable hsspi bit in misccr */
+ s3c2410_modify_misccr(S3C2416_MISCCR_HSSPI_EN2, 1);
+
+ s3c_gpio_cfgall_range(S3C2410_GPE(11), 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+
+ return 0;
+}
+#endif
diff --git a/arch/arm/plat-s3c24xx/sleep.S b/arch/arm/mach-s3c24xx/sleep.S
index c56612569b40..c56612569b40 100644
--- a/arch/arm/plat-s3c24xx/sleep.S
+++ b/arch/arm/mach-s3c24xx/sleep.S
diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c
index b313380342a5..be746e33e86c 100644
--- a/arch/arm/mach-s3c64xx/common.c
+++ b/arch/arm/mach-s3c64xx/common.c
@@ -384,3 +384,8 @@ void s3c64xx_restart(char mode, const char *cmd)
/* if all else fails, or mode was for soft, jump to 0 */
soft_restart(0);
}
+
+void __init s3c64xx_init_late(void)
+{
+ s3c64xx_pm_late_initcall();
+}
diff --git a/arch/arm/mach-s3c64xx/common.h b/arch/arm/mach-s3c64xx/common.h
index 7a10be629aba..6cfc99bdfb37 100644
--- a/arch/arm/mach-s3c64xx/common.h
+++ b/arch/arm/mach-s3c64xx/common.h
@@ -24,6 +24,7 @@ void s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_limit);
void s3c64xx_setup_clocks(void);
void s3c64xx_restart(char mode, const char *cmd);
+void s3c64xx_init_late(void);
#ifdef CONFIG_CPU_S3C6400
@@ -51,4 +52,10 @@ extern void s3c6410_init_clocks(int xtal);
#define s3c6410_init NULL
#endif
+#ifdef CONFIG_PM
+int __init s3c64xx_pm_late_initcall(void);
+#else
+static inline int s3c64xx_pm_late_initcall(void) { return 0; }
+#endif
+
#endif /* __ARCH_ARM_MACH_S3C64XX_COMMON_H */
diff --git a/arch/arm/mach-s3c64xx/cpuidle.c b/arch/arm/mach-s3c64xx/cpuidle.c
index 179460f38db7..acb197ccf3f7 100644
--- a/arch/arm/mach-s3c64xx/cpuidle.c
+++ b/arch/arm/mach-s3c64xx/cpuidle.c
@@ -27,12 +27,7 @@ static int s3c64xx_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- struct timeval before, after;
unsigned long tmp;
- int idle_time;
-
- local_irq_disable();
- do_gettimeofday(&before);
/* Setup PWRCFG to enter idle mode */
tmp = __raw_readl(S3C64XX_PWR_CFG);
@@ -42,42 +37,32 @@ static int s3c64xx_enter_idle(struct cpuidle_device *dev,
cpu_do_idle();
- do_gettimeofday(&after);
- local_irq_enable();
- idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
- (after.tv_usec - before.tv_usec);
-
- dev->last_residency = idle_time;
return index;
}
-static struct cpuidle_state s3c64xx_cpuidle_set[] = {
- [0] = {
- .enter = s3c64xx_enter_idle,
- .exit_latency = 1,
- .target_residency = 1,
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .name = "IDLE",
- .desc = "System active, ARM gated",
- },
-};
+static DEFINE_PER_CPU(struct cpuidle_device, s3c64xx_cpuidle_device);
static struct cpuidle_driver s3c64xx_cpuidle_driver = {
- .name = "s3c64xx_cpuidle",
- .owner = THIS_MODULE,
- .state_count = ARRAY_SIZE(s3c64xx_cpuidle_set),
-};
-
-static struct cpuidle_device s3c64xx_cpuidle_device = {
- .state_count = ARRAY_SIZE(s3c64xx_cpuidle_set),
+ .name = "s3c64xx_cpuidle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
+ .states = {
+ {
+ .enter = s3c64xx_enter_idle,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "IDLE",
+ .desc = "System active, ARM gated",
+ },
+ },
+ .state_count = 1,
};
static int __init s3c64xx_init_cpuidle(void)
{
int ret;
- memcpy(s3c64xx_cpuidle_driver.states, s3c64xx_cpuidle_set,
- sizeof(s3c64xx_cpuidle_set));
cpuidle_register_driver(&s3c64xx_cpuidle_driver);
ret = cpuidle_register_device(&s3c64xx_cpuidle_device);
diff --git a/arch/arm/mach-s3c64xx/mach-anw6410.c b/arch/arm/mach-s3c64xx/mach-anw6410.c
index f252691fb209..314df0518afd 100644
--- a/arch/arm/mach-s3c64xx/mach-anw6410.c
+++ b/arch/arm/mach-s3c64xx/mach-anw6410.c
@@ -230,6 +230,7 @@ MACHINE_START(ANW6410, "A&W6410")
.handle_irq = vic_handle_irq,
.map_io = anw6410_map_io,
.init_machine = anw6410_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
index 0ace108c3e3d..7a27f5603c74 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
@@ -182,6 +182,11 @@ static const struct i2c_board_info wm1277_devs[] = {
},
};
+static const struct i2c_board_info wm6230_i2c_devs[] = {
+ { I2C_BOARD_INFO("wm9081", 0x6c),
+ .platform_data = &wm9081_pdata, },
+};
+
static __devinitdata const struct {
u8 id;
const char *name;
@@ -195,7 +200,9 @@ static __devinitdata const struct {
{ .id = 0x03, .name = "1252-EV1 Glenlivet" },
{ .id = 0x11, .name = "6249-EV2 Glenfarclas", },
{ .id = 0x14, .name = "6271-EV1 Lochnagar" },
- { .id = 0x15, .name = "XXXX-EV1 Bells" },
+ { .id = 0x15, .name = "6320-EV1 Bells",
+ .i2c_devs = wm6230_i2c_devs,
+ .num_i2c_devs = ARRAY_SIZE(wm6230_i2c_devs) },
{ .id = 0x21, .name = "1275-EV1 Mortlach" },
{ .id = 0x25, .name = "1274-EV1 Glencadam" },
{ .id = 0x31, .name = "1253-EV1 Tomatin",
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index aa1137fb47e6..6b20a71d7dbf 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -671,6 +671,7 @@ static struct i2c_board_info i2c_devs1[] __initdata = {
.irq = S3C_EINT(0),
.platform_data = &glenfarclas_pmic_pdata },
+ { I2C_BOARD_INFO("wlf-gf-module", 0x22) },
{ I2C_BOARD_INFO("wlf-gf-module", 0x24) },
{ I2C_BOARD_INFO("wlf-gf-module", 0x25) },
{ I2C_BOARD_INFO("wlf-gf-module", 0x26) },
@@ -813,6 +814,7 @@ MACHINE_START(WLF_CRAGG_6410, "Wolfson Cragganmore 6410")
.handle_irq = vic_handle_irq,
.map_io = crag6410_map_io,
.init_machine = crag6410_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c
index 521e07b8501b..1bf6b9da20fc 100644
--- a/arch/arm/mach-s3c64xx/mach-hmt.c
+++ b/arch/arm/mach-s3c64xx/mach-hmt.c
@@ -272,6 +272,7 @@ MACHINE_START(HMT, "Airgoo-HMT")
.handle_irq = vic_handle_irq,
.map_io = hmt_map_io,
.init_machine = hmt_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c
index b2166d4a5538..f8ea61ea3b33 100644
--- a/arch/arm/mach-s3c64xx/mach-mini6410.c
+++ b/arch/arm/mach-s3c64xx/mach-mini6410.c
@@ -339,6 +339,7 @@ MACHINE_START(MINI6410, "MINI6410")
.handle_irq = vic_handle_irq,
.map_io = mini6410_map_io,
.init_machine = mini6410_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-ncp.c b/arch/arm/mach-s3c64xx/mach-ncp.c
index 0efa2ba783b2..cad2e05eddf7 100644
--- a/arch/arm/mach-s3c64xx/mach-ncp.c
+++ b/arch/arm/mach-s3c64xx/mach-ncp.c
@@ -104,6 +104,7 @@ MACHINE_START(NCP, "NCP")
.handle_irq = vic_handle_irq,
.map_io = ncp_map_io,
.init_machine = ncp_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index 5c08266cea21..b92d8e17d502 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -320,6 +320,7 @@ MACHINE_START(REAL6410, "REAL6410")
.handle_irq = vic_handle_irq,
.map_io = real6410_map_io,
.init_machine = real6410_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smartq5.c b/arch/arm/mach-s3c64xx/mach-smartq5.c
index 3f42431d4dda..c5021d0335c6 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq5.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq5.c
@@ -152,6 +152,7 @@ MACHINE_START(SMARTQ5, "SmartQ 5")
.handle_irq = vic_handle_irq,
.map_io = smartq_map_io,
.init_machine = smartq5_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smartq7.c b/arch/arm/mach-s3c64xx/mach-smartq7.c
index e5c09b6db967..aa9072a4cbef 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq7.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq7.c
@@ -168,6 +168,7 @@ MACHINE_START(SMARTQ7, "SmartQ 7")
.handle_irq = vic_handle_irq,
.map_io = smartq_map_io,
.init_machine = smartq7_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6400.c b/arch/arm/mach-s3c64xx/mach-smdk6400.c
index 5f096534f4c4..b0f4525c66bd 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6400.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6400.c
@@ -93,6 +93,7 @@ MACHINE_START(SMDK6400, "SMDK6400")
.handle_irq = vic_handle_irq,
.map_io = smdk6400_map_io,
.init_machine = smdk6400_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 7da044f738ac..d44319b09412 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -702,6 +702,7 @@ MACHINE_START(SMDK6410, "SMDK6410")
.handle_irq = vic_handle_irq,
.map_io = smdk6410_map_io,
.init_machine = smdk6410_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index 7d3e81b9dd06..7feb426fc202 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -365,10 +365,9 @@ static __init int s3c64xx_pm_initcall(void)
}
arch_initcall(s3c64xx_pm_initcall);
-static __init int s3c64xx_pm_late_initcall(void)
+int __init s3c64xx_pm_late_initcall(void)
{
pm_genpd_poweroff_unused();
return 0;
}
-late_initcall(s3c64xx_pm_late_initcall);
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 375d3f779a88..d1dc7f1a239c 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -538,6 +538,7 @@ MACHINE_START(ASSABET, "Intel-Assabet")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = assabet_init,
+ .init_late = sa11x0_init_late,
#ifdef CONFIG_SA1111
.dma_zone_size = SZ_1M,
#endif
diff --git a/arch/arm/mach-sa1100/badge4.c b/arch/arm/mach-sa1100/badge4.c
index e0f0c030258c..b30fb99b587c 100644
--- a/arch/arm/mach-sa1100/badge4.c
+++ b/arch/arm/mach-sa1100/badge4.c
@@ -305,6 +305,7 @@ MACHINE_START(BADGE4, "Hewlett-Packard Laboratories BadgePAD 4")
.map_io = badge4_map_io,
.nr_irqs = SA1100_NR_IRQS,
.init_irq = sa1100_init_irq,
+ .init_late = sa11x0_init_late,
.timer = &sa1100_timer,
#ifdef CONFIG_SA1111
.dma_zone_size = SZ_1M,
diff --git a/arch/arm/mach-sa1100/cerf.c b/arch/arm/mach-sa1100/cerf.c
index 4a61f60e0502..09d7f4b4b354 100644
--- a/arch/arm/mach-sa1100/cerf.c
+++ b/arch/arm/mach-sa1100/cerf.c
@@ -134,5 +134,6 @@ MACHINE_START(CERF, "Intrinsyc CerfBoard/CerfCube")
.init_irq = cerf_init_irq,
.timer = &sa1100_timer,
.init_machine = cerf_init,
+ .init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index c7f418b0cde9..ea5cff38745c 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -401,5 +401,6 @@ MACHINE_START(COLLIE, "Sharp-Collie")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = collie_init,
+ .init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 16be4c56abe3..9db3e98e8b85 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -359,6 +359,10 @@ static int __init sa1100_init(void)
arch_initcall(sa1100_init);
+void __init sa11x0_init_late(void)
+{
+ sa11x0_pm_init();
+}
/*
* Common I/O mapping:
diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h
index 9eb3b3cd5a63..a5b7c13da3e3 100644
--- a/arch/arm/mach-sa1100/generic.h
+++ b/arch/arm/mach-sa1100/generic.h
@@ -11,6 +11,7 @@ extern void __init sa1100_map_io(void);
extern void __init sa1100_init_irq(void);
extern void __init sa1100_init_gpio(void);
extern void sa11x0_restart(char, const char *);
+extern void sa11x0_init_late(void);
#define SET_BANK(__nr,__start,__size) \
mi->bank[__nr].start = (__start), \
@@ -41,3 +42,9 @@ void sa11x0_register_mcp(struct mcp_plat_data *data);
struct sa1100fb_mach_info;
void sa11x0_register_lcd(struct sa1100fb_mach_info *inf);
+
+#ifdef CONFIG_PM
+int sa11x0_pm_init(void);
+#else
+static inline int sa11x0_pm_init(void) { return 0; }
+#endif
diff --git a/arch/arm/mach-sa1100/h3100.c b/arch/arm/mach-sa1100/h3100.c
index b2e8d0f418e0..e1571eab08ae 100644
--- a/arch/arm/mach-sa1100/h3100.c
+++ b/arch/arm/mach-sa1100/h3100.c
@@ -110,6 +110,7 @@ MACHINE_START(H3100, "Compaq iPAQ H3100")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = h3100_mach_init,
+ .init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/h3600.c b/arch/arm/mach-sa1100/h3600.c
index cb6659f294fe..ba7a2901ab88 100644
--- a/arch/arm/mach-sa1100/h3600.c
+++ b/arch/arm/mach-sa1100/h3600.c
@@ -160,6 +160,7 @@ MACHINE_START(H3600, "Compaq iPAQ H3600")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = h3600_mach_init,
+ .init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/hackkit.c b/arch/arm/mach-sa1100/hackkit.c
index 5535475bf583..7f86bd911826 100644
--- a/arch/arm/mach-sa1100/hackkit.c
+++ b/arch/arm/mach-sa1100/hackkit.c
@@ -199,5 +199,6 @@ MACHINE_START(HACKKIT, "HackKit Cpu Board")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = hackkit_init,
+ .init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/jornada720.c b/arch/arm/mach-sa1100/jornada720.c
index ca7a7e834720..e3084f47027d 100644
--- a/arch/arm/mach-sa1100/jornada720.c
+++ b/arch/arm/mach-sa1100/jornada720.c
@@ -348,6 +348,7 @@ MACHINE_START(JORNADA720, "HP Jornada 720")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = jornada720_mach_init,
+ .init_late = sa11x0_init_late,
#ifdef CONFIG_SA1111
.dma_zone_size = SZ_1M,
#endif
diff --git a/arch/arm/mach-sa1100/lart.c b/arch/arm/mach-sa1100/lart.c
index eb6534e0b0d0..b775a0abec0a 100644
--- a/arch/arm/mach-sa1100/lart.c
+++ b/arch/arm/mach-sa1100/lart.c
@@ -147,6 +147,7 @@ MACHINE_START(LART, "LART")
.nr_irqs = SA1100_NR_IRQS,
.init_irq = sa1100_init_irq,
.init_machine = lart_init,
+ .init_late = sa11x0_init_late,
.timer = &sa1100_timer,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/nanoengine.c b/arch/arm/mach-sa1100/nanoengine.c
index 8f6446b9f025..41f69d97066f 100644
--- a/arch/arm/mach-sa1100/nanoengine.c
+++ b/arch/arm/mach-sa1100/nanoengine.c
@@ -112,5 +112,6 @@ MACHINE_START(NANOENGINE, "BSE nanoEngine")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = nanoengine_init,
+ .init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 6c58f01b358a..266db873a4e4 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -89,6 +89,7 @@ void neponset_ncr_frob(unsigned int mask, unsigned int val)
WARN(1, "nep_base unset\n");
}
}
+EXPORT_SYMBOL(neponset_ncr_frob);
static void neponset_set_mctrl(struct uart_port *port, u_int mctrl)
{
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c
index 1602575a0d5c..37fe0a0a5369 100644
--- a/arch/arm/mach-sa1100/pleb.c
+++ b/arch/arm/mach-sa1100/pleb.c
@@ -135,5 +135,6 @@ MACHINE_START(PLEB, "PLEB")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = pleb_init,
+ .init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index 2fa499ec6afe..690cf0ce5c0c 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -117,10 +117,8 @@ static const struct platform_suspend_ops sa11x0_pm_ops = {
.valid = suspend_valid_only_mem,
};
-static int __init sa11x0_pm_init(void)
+int __init sa11x0_pm_init(void)
{
suspend_set_ops(&sa11x0_pm_ops);
return 0;
}
-
-late_initcall(sa11x0_pm_init);
diff --git a/arch/arm/mach-sa1100/shannon.c b/arch/arm/mach-sa1100/shannon.c
index ca8bf59b9047..5d33fc3108ef 100644
--- a/arch/arm/mach-sa1100/shannon.c
+++ b/arch/arm/mach-sa1100/shannon.c
@@ -104,5 +104,6 @@ MACHINE_START(SHANNON, "Shannon (AKA: Tuxscreen)")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = shannon_init,
+ .init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
index 3efae03cb3d7..fbd53593be54 100644
--- a/arch/arm/mach-sa1100/simpad.c
+++ b/arch/arm/mach-sa1100/simpad.c
@@ -395,6 +395,7 @@ MACHINE_START(SIMPAD, "Simpad")
.map_io = simpad_map_io,
.nr_irqs = SA1100_NR_IRQS,
.init_irq = sa1100_init_irq,
+ .init_late = sa11x0_init_late,
.timer = &sa1100_timer,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index e6b177bc9410..8aa1962c22a2 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -3,7 +3,7 @@
#
# Common objects
-obj-y := timer.o console.o clock.o
+obj-y := timer.o console.o clock.o common.o
# CPU objects
obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index 0891ec6e27f5..5a6f22f05e99 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -580,5 +580,6 @@ MACHINE_START(AG5EVM, "ag5evm")
.init_irq = sh73a0_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = ag5evm_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index b540b8eb20ca..ace60246a5df 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -1469,5 +1469,6 @@ MACHINE_START(AP4EVB, "ap4evb")
.init_irq = sh7372_init_irq,
.handle_irq = shmobile_handle_irq_intc,
.init_machine = ap4evb_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-bonito.c b/arch/arm/mach-shmobile/board-bonito.c
index 63ab7062bee3..e9b32cfbf741 100644
--- a/arch/arm/mach-shmobile/board-bonito.c
+++ b/arch/arm/mach-shmobile/board-bonito.c
@@ -500,5 +500,6 @@ MACHINE_START(BONITO, "bonito")
.init_irq = r8a7740_init_irq,
.handle_irq = shmobile_handle_irq_intc,
.init_machine = bonito_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c
index 39b6cf85ced6..796fa00ad3c4 100644
--- a/arch/arm/mach-shmobile/board-g3evm.c
+++ b/arch/arm/mach-shmobile/board-g3evm.c
@@ -338,5 +338,6 @@ MACHINE_START(G3EVM, "g3evm")
.init_irq = sh7367_init_irq,
.handle_irq = shmobile_handle_irq_intc,
.init_machine = g3evm_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c
index 0e5a39c670bc..f1257321999a 100644
--- a/arch/arm/mach-shmobile/board-g4evm.c
+++ b/arch/arm/mach-shmobile/board-g4evm.c
@@ -381,5 +381,6 @@ MACHINE_START(G4EVM, "g4evm")
.init_irq = sh7377_init_irq,
.handle_irq = shmobile_handle_irq_intc,
.init_machine = g4evm_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-kota2.c b/arch/arm/mach-shmobile/board-kota2.c
index 200dcd42a3a0..f60f1b281cc4 100644
--- a/arch/arm/mach-shmobile/board-kota2.c
+++ b/arch/arm/mach-shmobile/board-kota2.c
@@ -521,5 +521,6 @@ MACHINE_START(KOTA2, "kota2")
.init_irq = sh73a0_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = kota2_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 50c67b22d087..b577f7c44678 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1638,5 +1638,6 @@ MACHINE_START(MACKEREL, "mackerel")
.init_irq = sh7372_init_irq,
.handle_irq = shmobile_handle_irq_intc,
.init_machine = mackerel_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index ef0e13bf0b3a..14de3787cafc 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -98,5 +98,6 @@ MACHINE_START(MARZEN, "marzen")
.init_irq = r8a7779_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = marzen_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/common.c b/arch/arm/mach-shmobile/common.c
new file mode 100644
index 000000000000..608aba9d60d7
--- /dev/null
+++ b/arch/arm/mach-shmobile/common.c
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <mach/common.h>
+
+void __init shmobile_init_late(void)
+{
+ shmobile_suspend_init();
+ shmobile_cpuidle_init();
+}
diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c
index 7e6559105d40..7b541e911ab4 100644
--- a/arch/arm/mach-shmobile/cpuidle.c
+++ b/arch/arm/mach-shmobile/cpuidle.c
@@ -46,7 +46,7 @@ static struct cpuidle_driver shmobile_cpuidle_driver = {
void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
-static int shmobile_cpuidle_init(void)
+int shmobile_cpuidle_init(void)
{
struct cpuidle_device *dev = &shmobile_cpuidle_dev;
struct cpuidle_driver *drv = &shmobile_cpuidle_driver;
@@ -65,4 +65,3 @@ static int shmobile_cpuidle_init(void)
return 0;
}
-late_initcall(shmobile_cpuidle_init);
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index ff5f12fd742f..01e2bc014f15 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -85,4 +85,18 @@ extern int r8a7779_boot_secondary(unsigned int cpu);
extern void r8a7779_smp_prepare_cpus(void);
extern void r8a7779_register_twd(void);
+extern void shmobile_init_late(void);
+
+#ifdef CONFIG_SUSPEND
+int shmobile_suspend_init(void);
+#else
+static inline int shmobile_suspend_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_CPU_IDLE
+int shmobile_cpuidle_init(void);
+#else
+static inline int shmobile_cpuidle_init(void) { return 0; }
+#endif
+
#endif /* __ARCH_MACH_COMMON_H */
diff --git a/arch/arm/mach-shmobile/suspend.c b/arch/arm/mach-shmobile/suspend.c
index 4d1b86a49923..47d83f7a70b6 100644
--- a/arch/arm/mach-shmobile/suspend.c
+++ b/arch/arm/mach-shmobile/suspend.c
@@ -39,9 +39,8 @@ struct platform_suspend_ops shmobile_suspend_ops = {
.valid = suspend_valid_only_mem,
};
-static int __init shmobile_suspend_init(void)
+int __init shmobile_suspend_init(void)
{
suspend_set_ops(&shmobile_suspend_ops);
return 0;
}
-late_initcall(shmobile_suspend_init);
diff --git a/arch/arm/mach-spear13xx/Kconfig b/arch/arm/mach-spear13xx/Kconfig
new file mode 100644
index 000000000000..eaadc66d96b3
--- /dev/null
+++ b/arch/arm/mach-spear13xx/Kconfig
@@ -0,0 +1,20 @@
+#
+# SPEAr13XX Machine configuration file
+#
+
+if ARCH_SPEAR13XX
+
+menu "SPEAr13xx Implementations"
+config MACH_SPEAR1310
+ bool "SPEAr1310 Machine support with Device Tree"
+ select PINCTRL_SPEAR1310
+ help
+ Supports ST SPEAr1310 machine configured via the device-tree
+
+config MACH_SPEAR1340
+ bool "SPEAr1340 Machine support with Device Tree"
+ select PINCTRL_SPEAR1340
+ help
+ Supports ST SPEAr1340 machine configured via the device-tree
+endmenu
+endif #ARCH_SPEAR13XX
diff --git a/arch/arm/mach-spear13xx/Makefile b/arch/arm/mach-spear13xx/Makefile
new file mode 100644
index 000000000000..3435ea78c15d
--- /dev/null
+++ b/arch/arm/mach-spear13xx/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for SPEAr13XX machine series
+#
+
+obj-$(CONFIG_SMP) += headsmp.o platsmp.o
+obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+
+obj-$(CONFIG_ARCH_SPEAR13XX) += spear13xx.o
+obj-$(CONFIG_MACH_SPEAR1310) += spear1310.o
+obj-$(CONFIG_MACH_SPEAR1340) += spear1340.o
diff --git a/arch/arm/mach-spear13xx/Makefile.boot b/arch/arm/mach-spear13xx/Makefile.boot
new file mode 100644
index 000000000000..403efd7e6d27
--- /dev/null
+++ b/arch/arm/mach-spear13xx/Makefile.boot
@@ -0,0 +1,6 @@
+zreladdr-y += 0x00008000
+params_phys-y := 0x00000100
+initrd_phys-y := 0x00800000
+
+dtb-$(CONFIG_MACH_SPEAR1310) += spear1310-evb.dtb
+dtb-$(CONFIG_MACH_SPEAR1340) += spear1340-evb.dtb
diff --git a/arch/arm/mach-spear13xx/headsmp.S b/arch/arm/mach-spear13xx/headsmp.S
new file mode 100644
index 000000000000..ed85473a047f
--- /dev/null
+++ b/arch/arm/mach-spear13xx/headsmp.S
@@ -0,0 +1,47 @@
+/*
+ * arch/arm/mach-spear13XX/headsmp.S
+ *
+ * Picked from realview
+ * Copyright (c) 2012 ST Microelectronics Limited
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+ __INIT
+
+/*
+ * spear13xx specific entry point for secondary CPUs. This provides
+ * a "holding pen" into which all secondary cores are held until we're
+ * ready for them to initialise.
+ */
+ENTRY(spear13xx_secondary_startup)
+ mrc p15, 0, r0, c0, c0, 5
+ and r0, r0, #15
+ adr r4, 1f
+ ldmia r4, {r5, r6}
+ sub r4, r4, r5
+ add r6, r6, r4
+pen: ldr r7, [r6]
+ cmp r7, r0
+ bne pen
+
+ /* re-enable coherency */
+ mrc p15, 0, r0, c1, c0, 1
+ orr r0, r0, #(1 << 6) | (1 << 0)
+ mcr p15, 0, r0, c1, c0, 1
+ /*
+ * we've been released from the holding pen: secondary_stack
+ * should now contain the SVC stack for this core
+ */
+ b secondary_startup
+
+ .align
+1: .long .
+ .long pen_release
+ENDPROC(spear13xx_secondary_startup)
diff --git a/arch/arm/mach-spear13xx/hotplug.c b/arch/arm/mach-spear13xx/hotplug.c
new file mode 100644
index 000000000000..5c6867b46d09
--- /dev/null
+++ b/arch/arm/mach-spear13xx/hotplug.c
@@ -0,0 +1,119 @@
+/*
+ * linux/arch/arm/mach-spear13xx/hotplug.c
+ *
+ * Copyright (C) 2012 ST Microelectronics Ltd.
+ * Deepak Sikri <deepak.sikri@st.com>
+ *
+ * based upon linux/arch/arm/mach-realview/hotplug.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+#include <asm/smp_plat.h>
+
+extern volatile int pen_release;
+
+static inline void cpu_enter_lowpower(void)
+{
+ unsigned int v;
+
+ flush_cache_all();
+ asm volatile(
+ " mcr p15, 0, %1, c7, c5, 0\n"
+ " dsb\n"
+ /*
+ * Turn off coherency
+ */
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, #0x20\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " bic %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (v)
+ : "r" (0), "Ir" (CR_C)
+ : "cc", "memory");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+ unsigned int v;
+
+ asm volatile("mrc p15, 0, %0, c1, c0, 0\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " orr %0, %0, #0x20\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (v)
+ : "Ir" (CR_C)
+ : "cc");
+}
+
+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
+{
+ for (;;) {
+ wfi();
+
+ if (pen_release == cpu) {
+ /*
+ * OK, proper wakeup, we're done
+ */
+ break;
+ }
+
+ /*
+ * Getting here, means that we have come out of WFI without
+ * having been woken up - this shouldn't happen
+ *
+ * Just note it happening - when we're woken, we can report
+ * its occurrence.
+ */
+ (*spurious)++;
+ }
+}
+
+int platform_cpu_kill(unsigned int cpu)
+{
+ return 1;
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void __cpuinit platform_cpu_die(unsigned int cpu)
+{
+ int spurious = 0;
+
+ /*
+ * we're ready for shutdown now, so do it
+ */
+ cpu_enter_lowpower();
+ platform_do_lowpower(cpu, &spurious);
+
+ /*
+ * bring this CPU back into the world of cache
+ * coherency, and then restore interrupts
+ */
+ cpu_leave_lowpower();
+
+ if (spurious)
+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
+}
+
+int platform_cpu_disable(unsigned int cpu)
+{
+ /*
+ * we don't allow CPU 0 to be shutdown (it is still too special
+ * e.g. clock tick interrupts)
+ */
+ return cpu == 0 ? -EPERM : 0;
+}
diff --git a/arch/arm/mach-spear13xx/include/mach/debug-macro.S b/arch/arm/mach-spear13xx/include/mach/debug-macro.S
new file mode 100644
index 000000000000..ea1564609bd4
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/debug-macro.S
@@ -0,0 +1,14 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/debug-macro.S
+ *
+ * Debugging macro include header spear13xx machine family
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <plat/debug-macro.S>
diff --git a/arch/arm/mach-spear13xx/include/mach/dma.h b/arch/arm/mach-spear13xx/include/mach/dma.h
new file mode 100644
index 000000000000..383ab04dc6c9
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/dma.h
@@ -0,0 +1,128 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/dma.h
+ *
+ * DMA information for SPEAr13xx machine family
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_DMA_H
+#define __MACH_DMA_H
+
+/* request id of all the peripherals */
+enum dma_master_info {
+ /* Accessible from only one master */
+ DMA_MASTER_MCIF = 0,
+ DMA_MASTER_FSMC = 1,
+ /* Accessible from both 0 & 1 */
+ DMA_MASTER_MEMORY = 0,
+ DMA_MASTER_ADC = 0,
+ DMA_MASTER_UART0 = 0,
+ DMA_MASTER_SSP0 = 0,
+ DMA_MASTER_I2C0 = 0,
+
+#ifdef CONFIG_MACH_SPEAR1310
+ /* Accessible from only one master */
+ SPEAR1310_DMA_MASTER_JPEG = 1,
+
+ /* Accessible from both 0 & 1 */
+ SPEAR1310_DMA_MASTER_I2S = 0,
+ SPEAR1310_DMA_MASTER_UART1 = 0,
+ SPEAR1310_DMA_MASTER_UART2 = 0,
+ SPEAR1310_DMA_MASTER_UART3 = 0,
+ SPEAR1310_DMA_MASTER_UART4 = 0,
+ SPEAR1310_DMA_MASTER_UART5 = 0,
+ SPEAR1310_DMA_MASTER_I2C1 = 0,
+ SPEAR1310_DMA_MASTER_I2C2 = 0,
+ SPEAR1310_DMA_MASTER_I2C3 = 0,
+ SPEAR1310_DMA_MASTER_I2C4 = 0,
+ SPEAR1310_DMA_MASTER_I2C5 = 0,
+ SPEAR1310_DMA_MASTER_I2C6 = 0,
+ SPEAR1310_DMA_MASTER_I2C7 = 0,
+ SPEAR1310_DMA_MASTER_SSP1 = 0,
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+ /* Accessible from only one master */
+ SPEAR1340_DMA_MASTER_I2S_PLAY = 1,
+ SPEAR1340_DMA_MASTER_I2S_REC = 1,
+ SPEAR1340_DMA_MASTER_I2C1 = 1,
+ SPEAR1340_DMA_MASTER_UART1 = 1,
+
+ /* following are accessible from both master 0 & 1 */
+ SPEAR1340_DMA_MASTER_SPDIF = 0,
+ SPEAR1340_DMA_MASTER_CAM = 1,
+ SPEAR1340_DMA_MASTER_VIDEO_IN = 0,
+ SPEAR1340_DMA_MASTER_MALI = 0,
+#endif
+};
+
+enum request_id {
+ DMA_REQ_ADC = 0,
+ DMA_REQ_SSP0_TX = 4,
+ DMA_REQ_SSP0_RX = 5,
+ DMA_REQ_UART0_TX = 6,
+ DMA_REQ_UART0_RX = 7,
+ DMA_REQ_I2C0_TX = 8,
+ DMA_REQ_I2C0_RX = 9,
+
+#ifdef CONFIG_MACH_SPEAR1310
+ SPEAR1310_DMA_REQ_FROM_JPEG = 2,
+ SPEAR1310_DMA_REQ_TO_JPEG = 3,
+ SPEAR1310_DMA_REQ_I2S_TX = 10,
+ SPEAR1310_DMA_REQ_I2S_RX = 11,
+
+ SPEAR1310_DMA_REQ_I2C1_RX = 0,
+ SPEAR1310_DMA_REQ_I2C1_TX = 1,
+ SPEAR1310_DMA_REQ_I2C2_RX = 2,
+ SPEAR1310_DMA_REQ_I2C2_TX = 3,
+ SPEAR1310_DMA_REQ_I2C3_RX = 4,
+ SPEAR1310_DMA_REQ_I2C3_TX = 5,
+ SPEAR1310_DMA_REQ_I2C4_RX = 6,
+ SPEAR1310_DMA_REQ_I2C4_TX = 7,
+ SPEAR1310_DMA_REQ_I2C5_RX = 8,
+ SPEAR1310_DMA_REQ_I2C5_TX = 9,
+ SPEAR1310_DMA_REQ_I2C6_RX = 10,
+ SPEAR1310_DMA_REQ_I2C6_TX = 11,
+ SPEAR1310_DMA_REQ_UART1_RX = 12,
+ SPEAR1310_DMA_REQ_UART1_TX = 13,
+ SPEAR1310_DMA_REQ_UART2_RX = 14,
+ SPEAR1310_DMA_REQ_UART2_TX = 15,
+ SPEAR1310_DMA_REQ_UART5_RX = 16,
+ SPEAR1310_DMA_REQ_UART5_TX = 17,
+ SPEAR1310_DMA_REQ_SSP1_RX = 18,
+ SPEAR1310_DMA_REQ_SSP1_TX = 19,
+ SPEAR1310_DMA_REQ_I2C7_RX = 20,
+ SPEAR1310_DMA_REQ_I2C7_TX = 21,
+ SPEAR1310_DMA_REQ_UART3_RX = 28,
+ SPEAR1310_DMA_REQ_UART3_TX = 29,
+ SPEAR1310_DMA_REQ_UART4_RX = 30,
+ SPEAR1310_DMA_REQ_UART4_TX = 31,
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+ SPEAR1340_DMA_REQ_SPDIF_TX = 2,
+ SPEAR1340_DMA_REQ_SPDIF_RX = 3,
+ SPEAR1340_DMA_REQ_I2S_TX = 10,
+ SPEAR1340_DMA_REQ_I2S_RX = 11,
+ SPEAR1340_DMA_REQ_UART1_TX = 12,
+ SPEAR1340_DMA_REQ_UART1_RX = 13,
+ SPEAR1340_DMA_REQ_I2C1_TX = 14,
+ SPEAR1340_DMA_REQ_I2C1_RX = 15,
+ SPEAR1340_DMA_REQ_CAM0_EVEN = 0,
+ SPEAR1340_DMA_REQ_CAM0_ODD = 1,
+ SPEAR1340_DMA_REQ_CAM1_EVEN = 2,
+ SPEAR1340_DMA_REQ_CAM1_ODD = 3,
+ SPEAR1340_DMA_REQ_CAM2_EVEN = 4,
+ SPEAR1340_DMA_REQ_CAM2_ODD = 5,
+ SPEAR1340_DMA_REQ_CAM3_EVEN = 6,
+ SPEAR1340_DMA_REQ_CAM3_ODD = 7,
+#endif
+};
+
+#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/generic.h b/arch/arm/mach-spear13xx/include/mach/generic.h
new file mode 100644
index 000000000000..6d8c45b9f298
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/generic.h
@@ -0,0 +1,49 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/generic.h
+ *
+ * spear13xx machine family generic header file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_GENERIC_H
+#define __MACH_GENERIC_H
+
+#include <linux/dmaengine.h>
+#include <asm/mach/time.h>
+
+/* Add spear13xx structure declarations here */
+extern struct sys_timer spear13xx_timer;
+extern struct pl022_ssp_controller pl022_plat_data;
+extern struct dw_dma_platform_data dmac_plat_data;
+extern struct dw_dma_slave cf_dma_priv;
+extern struct dw_dma_slave nand_read_dma_priv;
+extern struct dw_dma_slave nand_write_dma_priv;
+
+/* Add spear13xx family function declarations here */
+void __init spear_setup_of_timer(void);
+void __init spear13xx_map_io(void);
+void __init spear13xx_dt_init_irq(void);
+void __init spear13xx_l2x0_init(void);
+bool dw_dma_filter(struct dma_chan *chan, void *slave);
+void spear_restart(char, const char *);
+void spear13xx_secondary_startup(void);
+
+#ifdef CONFIG_MACH_SPEAR1310
+void __init spear1310_clk_init(void);
+#else
+static inline void spear1310_clk_init(void) {}
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+void __init spear1340_clk_init(void);
+#else
+static inline void spear1340_clk_init(void) {}
+#endif
+
+#endif /* __MACH_GENERIC_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/gpio.h b/arch/arm/mach-spear13xx/include/mach/gpio.h
new file mode 100644
index 000000000000..cd6f4f86a56b
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/gpio.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/gpio.h
+ *
+ * GPIO macros for SPEAr13xx machine family
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_GPIO_H
+#define __MACH_GPIO_H
+
+#include <plat/gpio.h>
+
+#endif /* __MACH_GPIO_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/hardware.h b/arch/arm/mach-spear13xx/include/mach/hardware.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/hardware.h
@@ -0,0 +1 @@
+/* empty */
diff --git a/arch/arm/mach-spear13xx/include/mach/irqs.h b/arch/arm/mach-spear13xx/include/mach/irqs.h
new file mode 100644
index 000000000000..f542a24aa5f2
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/irqs.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/irqs.h
+ *
+ * IRQ helper macros for spear13xx machine family
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_IRQS_H
+#define __MACH_IRQS_H
+
+#define IRQ_GIC_END 160
+#define NR_IRQS IRQ_GIC_END
+
+#endif /* __MACH_IRQS_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/spear.h b/arch/arm/mach-spear13xx/include/mach/spear.h
new file mode 100644
index 000000000000..30c57ef72686
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/spear.h
@@ -0,0 +1,62 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/spear.h
+ *
+ * spear13xx Machine family specific definition
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_SPEAR13XX_H
+#define __MACH_SPEAR13XX_H
+
+#include <asm/memory.h>
+
+#define PERIP_GRP2_BASE UL(0xB3000000)
+#define VA_PERIP_GRP2_BASE UL(0xFE000000)
+#define MCIF_SDHCI_BASE UL(0xB3000000)
+#define SYSRAM0_BASE UL(0xB3800000)
+#define VA_SYSRAM0_BASE UL(0xFE800000)
+#define SYS_LOCATION (VA_SYSRAM0_BASE + 0x600)
+
+#define PERIP_GRP1_BASE UL(0xE0000000)
+#define VA_PERIP_GRP1_BASE UL(0xFD000000)
+#define UART_BASE UL(0xE0000000)
+#define VA_UART_BASE UL(0xFD000000)
+#define SSP_BASE UL(0xE0100000)
+#define MISC_BASE UL(0xE0700000)
+#define VA_MISC_BASE IOMEM(UL(0xFD700000))
+
+#define A9SM_AND_MPMC_BASE UL(0xEC000000)
+#define VA_A9SM_AND_MPMC_BASE UL(0xFC000000)
+
+/* A9SM peripheral offsets */
+#define A9SM_PERIP_BASE UL(0xEC800000)
+#define VA_A9SM_PERIP_BASE UL(0xFC800000)
+#define VA_SCU_BASE (VA_A9SM_PERIP_BASE + 0x00)
+
+#define L2CC_BASE UL(0xED000000)
+#define VA_L2CC_BASE IOMEM(UL(0xFB000000))
+
+/* others */
+#define DMAC0_BASE UL(0xEA800000)
+#define DMAC1_BASE UL(0xEB000000)
+#define MCIF_CF_BASE UL(0xB2800000)
+
+/* Devices present in SPEAr1310 */
+#ifdef CONFIG_MACH_SPEAR1310
+#define SPEAR1310_RAS_GRP1_BASE UL(0xD8000000)
+#define VA_SPEAR1310_RAS_GRP1_BASE UL(0xFA000000)
+#define SPEAR1310_RAS_BASE UL(0xD8400000)
+#define VA_SPEAR1310_RAS_BASE IOMEM(UL(0xFA400000))
+#endif /* CONFIG_MACH_SPEAR1310 */
+
+/* Debug uart for linux, will be used for debug and uncompress messages */
+#define SPEAR_DBG_UART_BASE UART_BASE
+#define VA_SPEAR_DBG_UART_BASE VA_UART_BASE
+
+#endif /* __MACH_SPEAR13XX_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h b/arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h
diff --git a/arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h b/arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h
diff --git a/arch/arm/mach-spear13xx/include/mach/timex.h b/arch/arm/mach-spear13xx/include/mach/timex.h
new file mode 100644
index 000000000000..31af3e8d976e
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/timex.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/timex.h
+ *
+ * SPEAr3XX machine family specific timex definitions
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_TIMEX_H
+#define __MACH_TIMEX_H
+
+#include <plat/timex.h>
+
+#endif /* __MACH_TIMEX_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/uncompress.h b/arch/arm/mach-spear13xx/include/mach/uncompress.h
new file mode 100644
index 000000000000..c7840896ae6e
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/uncompress.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/uncompress.h
+ *
+ * Serial port stubs for kernel decompress status messages
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_UNCOMPRESS_H
+#define __MACH_UNCOMPRESS_H
+
+#include <plat/uncompress.h>
+
+#endif /* __MACH_UNCOMPRESS_H */
diff --git a/arch/arm/mach-spear13xx/platsmp.c b/arch/arm/mach-spear13xx/platsmp.c
new file mode 100644
index 000000000000..f5d07f2663d7
--- /dev/null
+++ b/arch/arm/mach-spear13xx/platsmp.c
@@ -0,0 +1,127 @@
+/*
+ * arch/arm/mach-spear13xx/platsmp.c
+ *
+ * based upon linux/arch/arm/mach-realview/platsmp.c
+ *
+ * Copyright (C) 2012 ST Microelectronics Ltd.
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+#include <linux/smp.h>
+#include <asm/cacheflush.h>
+#include <asm/hardware/gic.h>
+#include <asm/smp_scu.h>
+#include <mach/spear.h>
+
+/*
+ * control for which core is the next to come out of the secondary
+ * boot "holding pen"
+ */
+volatile int __cpuinitdata pen_release = -1;
+static DEFINE_SPINLOCK(boot_lock);
+
+static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
+extern void spear13xx_secondary_startup(void);
+
+void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+ /*
+ * if any interrupts are already enabled for the primary
+ * core (e.g. timer irq), then they will not have been enabled
+ * for us: do so
+ */
+ gic_secondary_init(0);
+
+ /*
+ * let the primary processor know we're out of the
+ * pen, then head off into the C entry point
+ */
+ pen_release = -1;
+ smp_wmb();
+
+ /*
+ * Synchronise with the boot thread.
+ */
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
+}
+
+int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ unsigned long timeout;
+
+ /*
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+ * the holding pen - release it, then wait for it to flag
+ * that it has been released by resetting pen_release.
+ *
+ * Note that "pen_release" is the hardware CPU ID, whereas
+ * "cpu" is Linux's internal ID.
+ */
+ pen_release = cpu;
+ flush_cache_all();
+ outer_flush_all();
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ smp_rmb();
+ if (pen_release == -1)
+ break;
+
+ udelay(10);
+ }
+
+ /*
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+ spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init smp_init_cpus(void)
+{
+ unsigned int i, ncores = scu_get_core_count(scu_base);
+
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
+ }
+
+ for (i = 0; i < ncores; i++)
+ set_cpu_possible(i, true);
+
+ set_smp_cross_call(gic_raise_softirq);
+}
+
+void __init platform_smp_prepare_cpus(unsigned int max_cpus)
+{
+
+ scu_enable(scu_base);
+
+ /*
+ * Write the address of secondary startup into the system-wide location
+ * (presently it is in SRAM). The BootMonitor waits until it receives a
+ * soft interrupt, and then the secondary CPU branches to this address.
+ */
+ __raw_writel(virt_to_phys(spear13xx_secondary_startup), SYS_LOCATION);
+}
diff --git a/arch/arm/mach-spear13xx/spear1310.c b/arch/arm/mach-spear13xx/spear1310.c
new file mode 100644
index 000000000000..fefd15b2f380
--- /dev/null
+++ b/arch/arm/mach-spear13xx/spear1310.c
@@ -0,0 +1,88 @@
+/*
+ * arch/arm/mach-spear13xx/spear1310.c
+ *
+ * SPEAr1310 machine source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "SPEAr1310: " fmt
+
+#include <linux/amba/pl022.h>
+#include <linux/of_platform.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* Base addresses */
+#define SPEAR1310_SSP1_BASE UL(0x5D400000)
+#define SPEAR1310_SATA0_BASE UL(0xB1000000)
+#define SPEAR1310_SATA1_BASE UL(0xB1800000)
+#define SPEAR1310_SATA2_BASE UL(0xB4000000)
+
+/* ssp device registration */
+static struct pl022_ssp_controller ssp1_plat_data = {
+ .bus_id = 0,
+ .enable_dma = 0,
+ .num_chipselect = 3,
+};
+
+/* Add SPEAr1310 auxdata to pass platform data */
+static struct of_dev_auxdata spear1310_auxdata_lookup[] __initdata = {
+ OF_DEV_AUXDATA("arasan,cf-spear1340", MCIF_CF_BASE, NULL, &cf_dma_priv),
+ OF_DEV_AUXDATA("snps,dma-spear1340", DMAC0_BASE, NULL, &dmac_plat_data),
+ OF_DEV_AUXDATA("snps,dma-spear1340", DMAC1_BASE, NULL, &dmac_plat_data),
+ OF_DEV_AUXDATA("arm,pl022", SSP_BASE, NULL, &pl022_plat_data),
+
+ OF_DEV_AUXDATA("arm,pl022", SPEAR1310_SSP1_BASE, NULL, &ssp1_plat_data),
+ {}
+};
+
+static void __init spear1310_dt_init(void)
+{
+ of_platform_populate(NULL, of_default_bus_match_table,
+ spear1310_auxdata_lookup, NULL);
+}
+
+static const char * const spear1310_dt_board_compat[] = {
+ "st,spear1310",
+ "st,spear1310-evb",
+ NULL,
+};
+
+/*
+ * Following will create 16MB static virtual/physical mappings
+ * PHYSICAL VIRTUAL
+ * 0xD8000000 0xFA000000
+ */
+struct map_desc spear1310_io_desc[] __initdata = {
+ {
+ .virtual = VA_SPEAR1310_RAS_GRP1_BASE,
+ .pfn = __phys_to_pfn(SPEAR1310_RAS_GRP1_BASE),
+ .length = SZ_16M,
+ .type = MT_DEVICE
+ },
+};
+
+static void __init spear1310_map_io(void)
+{
+ iotable_init(spear1310_io_desc, ARRAY_SIZE(spear1310_io_desc));
+ spear13xx_map_io();
+}
+
+DT_MACHINE_START(SPEAR1310_DT, "ST SPEAr1310 SoC with Flattened Device Tree")
+ .map_io = spear1310_map_io,
+ .init_irq = spear13xx_dt_init_irq,
+ .handle_irq = gic_handle_irq,
+ .timer = &spear13xx_timer,
+ .init_machine = spear1310_dt_init,
+ .restart = spear_restart,
+ .dt_compat = spear1310_dt_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-spear13xx/spear1340.c b/arch/arm/mach-spear13xx/spear1340.c
new file mode 100644
index 000000000000..ee38cbc56869
--- /dev/null
+++ b/arch/arm/mach-spear13xx/spear1340.c
@@ -0,0 +1,192 @@
+/*
+ * arch/arm/mach-spear13xx/spear1340.c
+ *
+ * SPEAr1340 machine source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "SPEAr1340: " fmt
+
+#include <linux/ahci_platform.h>
+#include <linux/amba/serial.h>
+#include <linux/delay.h>
+#include <linux/dw_dmac.h>
+#include <linux/of_platform.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/arch.h>
+#include <mach/dma.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* Base addresses */
+#define SPEAR1340_SATA_BASE UL(0xB1000000)
+#define SPEAR1340_UART1_BASE UL(0xB4100000)
+
+/* Power Management Registers */
+#define SPEAR1340_PCM_CFG (VA_MISC_BASE + 0x100)
+#define SPEAR1340_PCM_WKUP_CFG (VA_MISC_BASE + 0x104)
+#define SPEAR1340_SWITCH_CTR (VA_MISC_BASE + 0x108)
+
+#define SPEAR1340_PERIP1_SW_RST (VA_MISC_BASE + 0x318)
+#define SPEAR1340_PERIP2_SW_RST (VA_MISC_BASE + 0x31C)
+#define SPEAR1340_PERIP3_SW_RST (VA_MISC_BASE + 0x320)
+
+/* PCIE - SATA configuration registers */
+#define SPEAR1340_PCIE_SATA_CFG (VA_MISC_BASE + 0x424)
+ /* PCIE CFG MASks */
+ #define SPEAR1340_PCIE_CFG_DEVICE_PRESENT (1 << 11)
+ #define SPEAR1340_PCIE_CFG_POWERUP_RESET (1 << 10)
+ #define SPEAR1340_PCIE_CFG_CORE_CLK_EN (1 << 9)
+ #define SPEAR1340_PCIE_CFG_AUX_CLK_EN (1 << 8)
+ #define SPEAR1340_SATA_CFG_TX_CLK_EN (1 << 4)
+ #define SPEAR1340_SATA_CFG_RX_CLK_EN (1 << 3)
+ #define SPEAR1340_SATA_CFG_POWERUP_RESET (1 << 2)
+ #define SPEAR1340_SATA_CFG_PM_CLK_EN (1 << 1)
+ #define SPEAR1340_PCIE_SATA_SEL_PCIE (0)
+ #define SPEAR1340_PCIE_SATA_SEL_SATA (1)
+ #define SPEAR1340_SATA_PCIE_CFG_MASK 0xF1F
+ #define SPEAR1340_PCIE_CFG_VAL (SPEAR1340_PCIE_SATA_SEL_PCIE | \
+ SPEAR1340_PCIE_CFG_AUX_CLK_EN | \
+ SPEAR1340_PCIE_CFG_CORE_CLK_EN | \
+ SPEAR1340_PCIE_CFG_POWERUP_RESET | \
+ SPEAR1340_PCIE_CFG_DEVICE_PRESENT)
+ #define SPEAR1340_SATA_CFG_VAL (SPEAR1340_PCIE_SATA_SEL_SATA | \
+ SPEAR1340_SATA_CFG_PM_CLK_EN | \
+ SPEAR1340_SATA_CFG_POWERUP_RESET | \
+ SPEAR1340_SATA_CFG_RX_CLK_EN | \
+ SPEAR1340_SATA_CFG_TX_CLK_EN)
+
+#define SPEAR1340_PCIE_MIPHY_CFG (VA_MISC_BASE + 0x428)
+ #define SPEAR1340_MIPHY_OSC_BYPASS_EXT (1 << 31)
+ #define SPEAR1340_MIPHY_CLK_REF_DIV2 (1 << 27)
+ #define SPEAR1340_MIPHY_CLK_REF_DIV4 (2 << 27)
+ #define SPEAR1340_MIPHY_CLK_REF_DIV8 (3 << 27)
+ #define SPEAR1340_MIPHY_PLL_RATIO_TOP(x) (x << 0)
+ #define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA \
+ (SPEAR1340_MIPHY_OSC_BYPASS_EXT | \
+ SPEAR1340_MIPHY_CLK_REF_DIV2 | \
+ SPEAR1340_MIPHY_PLL_RATIO_TOP(60))
+ #define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK \
+ (SPEAR1340_MIPHY_PLL_RATIO_TOP(120))
+ #define SPEAR1340_PCIE_SATA_MIPHY_CFG_PCIE \
+ (SPEAR1340_MIPHY_OSC_BYPASS_EXT | \
+ SPEAR1340_MIPHY_PLL_RATIO_TOP(25))
+
+static struct dw_dma_slave uart1_dma_param[] = {
+ {
+ /* Tx */
+ .cfg_hi = DWC_CFGH_DST_PER(SPEAR1340_DMA_REQ_UART1_TX),
+ .cfg_lo = 0,
+ .src_master = DMA_MASTER_MEMORY,
+ .dst_master = SPEAR1340_DMA_MASTER_UART1,
+ }, {
+ /* Rx */
+ .cfg_hi = DWC_CFGH_SRC_PER(SPEAR1340_DMA_REQ_UART1_RX),
+ .cfg_lo = 0,
+ .src_master = SPEAR1340_DMA_MASTER_UART1,
+ .dst_master = DMA_MASTER_MEMORY,
+ }
+};
+
+static struct amba_pl011_data uart1_data = {
+ .dma_filter = dw_dma_filter,
+ .dma_tx_param = &uart1_dma_param[0],
+ .dma_rx_param = &uart1_dma_param[1],
+};
+
+/* SATA device registration */
+static int sata_miphy_init(struct device *dev, void __iomem *addr)
+{
+ writel(SPEAR1340_SATA_CFG_VAL, SPEAR1340_PCIE_SATA_CFG);
+ writel(SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK,
+ SPEAR1340_PCIE_MIPHY_CFG);
+ /* Switch on sata power domain */
+ writel((readl(SPEAR1340_PCM_CFG) | (0x800)), SPEAR1340_PCM_CFG);
+ msleep(20);
+ /* Disable PCIE SATA Controller reset */
+ writel((readl(SPEAR1340_PERIP1_SW_RST) & (~0x1000)),
+ SPEAR1340_PERIP1_SW_RST);
+ msleep(20);
+
+ return 0;
+}
+
+void sata_miphy_exit(struct device *dev)
+{
+ writel(0, SPEAR1340_PCIE_SATA_CFG);
+ writel(0, SPEAR1340_PCIE_MIPHY_CFG);
+
+ /* Enable PCIE SATA Controller reset */
+ writel((readl(SPEAR1340_PERIP1_SW_RST) | (0x1000)),
+ SPEAR1340_PERIP1_SW_RST);
+ msleep(20);
+ /* Switch off sata power domain */
+ writel((readl(SPEAR1340_PCM_CFG) & (~0x800)), SPEAR1340_PCM_CFG);
+ msleep(20);
+}
+
+int sata_suspend(struct device *dev)
+{
+ if (dev->power.power_state.event == PM_EVENT_FREEZE)
+ return 0;
+
+ sata_miphy_exit(dev);
+
+ return 0;
+}
+
+int sata_resume(struct device *dev)
+{
+ if (dev->power.power_state.event == PM_EVENT_THAW)
+ return 0;
+
+ return sata_miphy_init(dev, NULL);
+}
+
+static struct ahci_platform_data sata_pdata = {
+ .init = sata_miphy_init,
+ .exit = sata_miphy_exit,
+ .suspend = sata_suspend,
+ .resume = sata_resume,
+};
+
+/* Add SPEAr1340 auxdata to pass platform data */
+static struct of_dev_auxdata spear1340_auxdata_lookup[] __initdata = {
+ OF_DEV_AUXDATA("arasan,cf-spear1340", MCIF_CF_BASE, NULL, &cf_dma_priv),
+ OF_DEV_AUXDATA("snps,dma-spear1340", DMAC0_BASE, NULL, &dmac_plat_data),
+ OF_DEV_AUXDATA("snps,dma-spear1340", DMAC1_BASE, NULL, &dmac_plat_data),
+ OF_DEV_AUXDATA("arm,pl022", SSP_BASE, NULL, &pl022_plat_data),
+
+ OF_DEV_AUXDATA("snps,spear-ahci", SPEAR1340_SATA_BASE, NULL,
+ &sata_pdata),
+ OF_DEV_AUXDATA("arm,pl011", SPEAR1340_UART1_BASE, NULL, &uart1_data),
+ {}
+};
+
+static void __init spear1340_dt_init(void)
+{
+ of_platform_populate(NULL, of_default_bus_match_table,
+ spear1340_auxdata_lookup, NULL);
+}
+
+static const char * const spear1340_dt_board_compat[] = {
+ "st,spear1340",
+ "st,spear1340-evb",
+ NULL,
+};
+
+DT_MACHINE_START(SPEAR1340_DT, "ST SPEAr1340 SoC with Flattened Device Tree")
+ .map_io = spear13xx_map_io,
+ .init_irq = spear13xx_dt_init_irq,
+ .handle_irq = gic_handle_irq,
+ .timer = &spear13xx_timer,
+ .init_machine = spear1340_dt_init,
+ .restart = spear_restart,
+ .dt_compat = spear1340_dt_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-spear13xx/spear13xx.c b/arch/arm/mach-spear13xx/spear13xx.c
new file mode 100644
index 000000000000..50b349ae863d
--- /dev/null
+++ b/arch/arm/mach-spear13xx/spear13xx.c
@@ -0,0 +1,197 @@
+/*
+ * arch/arm/mach-spear13xx/spear13xx.c
+ *
+ * SPEAr13XX machines common source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "SPEAr13xx: " fmt
+
+#include <linux/amba/pl022.h>
+#include <linux/clk.h>
+#include <linux/dw_dmac.h>
+#include <linux/err.h>
+#include <linux/of_irq.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/map.h>
+#include <asm/smp_twd.h>
+#include <mach/dma.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* common dw_dma filter routine to be used by peripherals */
+bool dw_dma_filter(struct dma_chan *chan, void *slave)
+{
+ struct dw_dma_slave *dws = (struct dw_dma_slave *)slave;
+
+ if (chan->device->dev == dws->dma_dev) {
+ chan->private = slave;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/* ssp device registration */
+static struct dw_dma_slave ssp_dma_param[] = {
+ {
+ /* Tx */
+ .cfg_hi = DWC_CFGH_DST_PER(DMA_REQ_SSP0_TX),
+ .cfg_lo = 0,
+ .src_master = DMA_MASTER_MEMORY,
+ .dst_master = DMA_MASTER_SSP0,
+ }, {
+ /* Rx */
+ .cfg_hi = DWC_CFGH_SRC_PER(DMA_REQ_SSP0_RX),
+ .cfg_lo = 0,
+ .src_master = DMA_MASTER_SSP0,
+ .dst_master = DMA_MASTER_MEMORY,
+ }
+};
+
+struct pl022_ssp_controller pl022_plat_data = {
+ .bus_id = 0,
+ .enable_dma = 1,
+ .dma_filter = dw_dma_filter,
+ .dma_rx_param = &ssp_dma_param[1],
+ .dma_tx_param = &ssp_dma_param[0],
+ .num_chipselect = 3,
+};
+
+/* CF device registration */
+struct dw_dma_slave cf_dma_priv = {
+ .cfg_hi = 0,
+ .cfg_lo = 0,
+ .src_master = 0,
+ .dst_master = 0,
+};
+
+/* dmac device registeration */
+struct dw_dma_platform_data dmac_plat_data = {
+ .nr_channels = 8,
+ .chan_allocation_order = CHAN_ALLOCATION_DESCENDING,
+ .chan_priority = CHAN_PRIORITY_DESCENDING,
+};
+
+void __init spear13xx_l2x0_init(void)
+{
+ /*
+ * 512KB (64KB/way), 8-way associativity, parity supported
+ *
+ * FIXME: 9th bit, of Auxillary Controller register must be set
+ * for some spear13xx devices for stable L2 operation.
+ *
+ * Enable Early BRESP, L2 prefetch for Instruction and Data,
+ * write alloc and 'Full line of zero' options
+ *
+ */
+
+ writel_relaxed(0x06, VA_L2CC_BASE + L2X0_PREFETCH_CTRL);
+
+ /*
+ * Program following latencies in order to make
+ * SPEAr1340 work at 600 MHz
+ */
+ writel_relaxed(0x221, VA_L2CC_BASE + L2X0_TAG_LATENCY_CTRL);
+ writel_relaxed(0x441, VA_L2CC_BASE + L2X0_DATA_LATENCY_CTRL);
+ l2x0_init(VA_L2CC_BASE, 0x70A60001, 0xfe00ffff);
+}
+
+/*
+ * Following will create 16MB static virtual/physical mappings
+ * PHYSICAL VIRTUAL
+ * 0xB3000000 0xFE000000
+ * 0xE0000000 0xFD000000
+ * 0xEC000000 0xFC000000
+ * 0xED000000 0xFB000000
+ */
+struct map_desc spear13xx_io_desc[] __initdata = {
+ {
+ .virtual = VA_PERIP_GRP2_BASE,
+ .pfn = __phys_to_pfn(PERIP_GRP2_BASE),
+ .length = SZ_16M,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_PERIP_GRP1_BASE,
+ .pfn = __phys_to_pfn(PERIP_GRP1_BASE),
+ .length = SZ_16M,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_A9SM_AND_MPMC_BASE,
+ .pfn = __phys_to_pfn(A9SM_AND_MPMC_BASE),
+ .length = SZ_16M,
+ .type = MT_DEVICE
+ }, {
+ .virtual = (unsigned long)VA_L2CC_BASE,
+ .pfn = __phys_to_pfn(L2CC_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE
+ },
+};
+
+/* This will create static memory mapping for selected devices */
+void __init spear13xx_map_io(void)
+{
+ iotable_init(spear13xx_io_desc, ARRAY_SIZE(spear13xx_io_desc));
+}
+
+static void __init spear13xx_clk_init(void)
+{
+ if (of_machine_is_compatible("st,spear1310"))
+ spear1310_clk_init();
+ else if (of_machine_is_compatible("st,spear1340"))
+ spear1340_clk_init();
+ else
+ pr_err("%s: Unknown machine\n", __func__);
+}
+
+static void __init spear13xx_timer_init(void)
+{
+ char pclk_name[] = "osc_24m_clk";
+ struct clk *gpt_clk, *pclk;
+
+ spear13xx_clk_init();
+
+ /* get the system timer clock */
+ gpt_clk = clk_get_sys("gpt0", NULL);
+ if (IS_ERR(gpt_clk)) {
+ pr_err("%s:couldn't get clk for gpt\n", __func__);
+ BUG();
+ }
+
+ /* get the suitable parent clock for timer*/
+ pclk = clk_get(NULL, pclk_name);
+ if (IS_ERR(pclk)) {
+ pr_err("%s:couldn't get %s as parent for gpt\n", __func__,
+ pclk_name);
+ BUG();
+ }
+
+ clk_set_parent(gpt_clk, pclk);
+ clk_put(gpt_clk);
+ clk_put(pclk);
+
+ spear_setup_of_timer();
+ twd_local_timer_of_register();
+}
+
+struct sys_timer spear13xx_timer = {
+ .init = spear13xx_timer_init,
+};
+
+static const struct of_device_id gic_of_match[] __initconst = {
+ { .compatible = "arm,cortex-a9-gic", .data = gic_of_init },
+ { /* Sentinel */ }
+};
+
+void __init spear13xx_dt_init_irq(void)
+{
+ of_irq_init(gic_of_match);
+}
diff --git a/arch/arm/mach-spear3xx/Makefile b/arch/arm/mach-spear3xx/Makefile
index 17b5d83cf2d5..8d12faa178fd 100644
--- a/arch/arm/mach-spear3xx/Makefile
+++ b/arch/arm/mach-spear3xx/Makefile
@@ -3,7 +3,7 @@
#
# common files
-obj-$(CONFIG_ARCH_SPEAR3XX) += spear3xx.o clock.o
+obj-$(CONFIG_ARCH_SPEAR3XX) += spear3xx.o
# spear300 specific files
obj-$(CONFIG_MACH_SPEAR300) += spear300.o
diff --git a/arch/arm/mach-spear3xx/clock.c b/arch/arm/mach-spear3xx/clock.c
deleted file mode 100644
index cd6c11099083..000000000000
--- a/arch/arm/mach-spear3xx/clock.c
+++ /dev/null
@@ -1,892 +0,0 @@
-/*
- * arch/arm/mach-spear3xx/clock.c
- *
- * SPEAr3xx machines clock framework source file
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/of_platform.h>
-#include <asm/mach-types.h>
-#include <plat/clock.h>
-#include <mach/misc_regs.h>
-#include <mach/spear.h>
-
-#define PLL1_CTR (MISC_BASE + 0x008)
-#define PLL1_FRQ (MISC_BASE + 0x00C)
-#define PLL1_MOD (MISC_BASE + 0x010)
-#define PLL2_CTR (MISC_BASE + 0x014)
-/* PLL_CTR register masks */
-#define PLL_ENABLE 2
-#define PLL_MODE_SHIFT 4
-#define PLL_MODE_MASK 0x3
-#define PLL_MODE_NORMAL 0
-#define PLL_MODE_FRACTION 1
-#define PLL_MODE_DITH_DSB 2
-#define PLL_MODE_DITH_SSB 3
-
-#define PLL2_FRQ (MISC_BASE + 0x018)
-/* PLL FRQ register masks */
-#define PLL_DIV_N_SHIFT 0
-#define PLL_DIV_N_MASK 0xFF
-#define PLL_DIV_P_SHIFT 8
-#define PLL_DIV_P_MASK 0x7
-#define PLL_NORM_FDBK_M_SHIFT 24
-#define PLL_NORM_FDBK_M_MASK 0xFF
-#define PLL_DITH_FDBK_M_SHIFT 16
-#define PLL_DITH_FDBK_M_MASK 0xFFFF
-
-#define PLL2_MOD (MISC_BASE + 0x01C)
-#define PLL_CLK_CFG (MISC_BASE + 0x020)
-#define CORE_CLK_CFG (MISC_BASE + 0x024)
-/* CORE CLK CFG register masks */
-#define PLL_HCLK_RATIO_SHIFT 10
-#define PLL_HCLK_RATIO_MASK 0x3
-#define HCLK_PCLK_RATIO_SHIFT 8
-#define HCLK_PCLK_RATIO_MASK 0x3
-
-#define PERIP_CLK_CFG (MISC_BASE + 0x028)
-/* PERIP_CLK_CFG register masks */
-#define UART_CLK_SHIFT 4
-#define UART_CLK_MASK 0x1
-#define FIRDA_CLK_SHIFT 5
-#define FIRDA_CLK_MASK 0x3
-#define GPT0_CLK_SHIFT 8
-#define GPT1_CLK_SHIFT 11
-#define GPT2_CLK_SHIFT 12
-#define GPT_CLK_MASK 0x1
-#define AUX_CLK_PLL3_VAL 0
-#define AUX_CLK_PLL1_VAL 1
-
-#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
-/* PERIP1_CLK_ENB register masks */
-#define UART_CLK_ENB 3
-#define SSP_CLK_ENB 5
-#define I2C_CLK_ENB 7
-#define JPEG_CLK_ENB 8
-#define FIRDA_CLK_ENB 10
-#define GPT1_CLK_ENB 11
-#define GPT2_CLK_ENB 12
-#define ADC_CLK_ENB 15
-#define RTC_CLK_ENB 17
-#define GPIO_CLK_ENB 18
-#define DMA_CLK_ENB 19
-#define SMI_CLK_ENB 21
-#define GMAC_CLK_ENB 23
-#define USBD_CLK_ENB 24
-#define USBH_CLK_ENB 25
-#define C3_CLK_ENB 31
-
-#define RAS_CLK_ENB (MISC_BASE + 0x034)
-
-#define PRSC1_CLK_CFG (MISC_BASE + 0x044)
-#define PRSC2_CLK_CFG (MISC_BASE + 0x048)
-#define PRSC3_CLK_CFG (MISC_BASE + 0x04C)
-/* gpt synthesizer register masks */
-#define GPT_MSCALE_SHIFT 0
-#define GPT_MSCALE_MASK 0xFFF
-#define GPT_NSCALE_SHIFT 12
-#define GPT_NSCALE_MASK 0xF
-
-#define AMEM_CLK_CFG (MISC_BASE + 0x050)
-#define EXPI_CLK_CFG (MISC_BASE + 0x054)
-#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
-#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
-#define UART_CLK_SYNT (MISC_BASE + 0x064)
-#define GMAC_CLK_SYNT (MISC_BASE + 0x068)
-#define RAS1_CLK_SYNT (MISC_BASE + 0x06C)
-#define RAS2_CLK_SYNT (MISC_BASE + 0x070)
-#define RAS3_CLK_SYNT (MISC_BASE + 0x074)
-#define RAS4_CLK_SYNT (MISC_BASE + 0x078)
-/* aux clk synthesiser register masks for irda to ras4 */
-#define AUX_SYNT_ENB 31
-#define AUX_EQ_SEL_SHIFT 30
-#define AUX_EQ_SEL_MASK 1
-#define AUX_EQ1_SEL 0
-#define AUX_EQ2_SEL 1
-#define AUX_XSCALE_SHIFT 16
-#define AUX_XSCALE_MASK 0xFFF
-#define AUX_YSCALE_SHIFT 0
-#define AUX_YSCALE_MASK 0xFFF
-
-/* root clks */
-/* 32 KHz oscillator clock */
-static struct clk osc_32k_clk = {
- .flags = ALWAYS_ENABLED,
- .rate = 32000,
-};
-
-/* 24 MHz oscillator clock */
-static struct clk osc_24m_clk = {
- .flags = ALWAYS_ENABLED,
- .rate = 24000000,
-};
-
-/* clock derived from 32 KHz osc clk */
-/* rtc clock */
-static struct clk rtc_clk = {
- .pclk = &osc_32k_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = RTC_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* clock derived from 24 MHz osc clk */
-/* pll masks structure */
-static struct pll_clk_masks pll1_masks = {
- .mode_mask = PLL_MODE_MASK,
- .mode_shift = PLL_MODE_SHIFT,
- .norm_fdbk_m_mask = PLL_NORM_FDBK_M_MASK,
- .norm_fdbk_m_shift = PLL_NORM_FDBK_M_SHIFT,
- .dith_fdbk_m_mask = PLL_DITH_FDBK_M_MASK,
- .dith_fdbk_m_shift = PLL_DITH_FDBK_M_SHIFT,
- .div_p_mask = PLL_DIV_P_MASK,
- .div_p_shift = PLL_DIV_P_SHIFT,
- .div_n_mask = PLL_DIV_N_MASK,
- .div_n_shift = PLL_DIV_N_SHIFT,
-};
-
-/* pll1 configuration structure */
-static struct pll_clk_config pll1_config = {
- .mode_reg = PLL1_CTR,
- .cfg_reg = PLL1_FRQ,
- .masks = &pll1_masks,
-};
-
-/* pll rate configuration table, in ascending order of rates */
-struct pll_rate_tbl pll_rtbl[] = {
- {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* 266 MHz */
- {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* 332 MHz */
-};
-
-/* PLL1 clock */
-static struct clk pll1_clk = {
- .flags = ENABLED_ON_INIT,
- .pclk = &osc_24m_clk,
- .en_reg = PLL1_CTR,
- .en_reg_bit = PLL_ENABLE,
- .calc_rate = &pll_calc_rate,
- .recalc = &pll_clk_recalc,
- .set_rate = &pll_clk_set_rate,
- .rate_config = {pll_rtbl, ARRAY_SIZE(pll_rtbl), 1},
- .private_data = &pll1_config,
-};
-
-/* PLL3 48 MHz clock */
-static struct clk pll3_48m_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &osc_24m_clk,
- .rate = 48000000,
-};
-
-/* watch dog timer clock */
-static struct clk wdt_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &osc_24m_clk,
- .recalc = &follow_parent,
-};
-
-/* clock derived from pll1 clk */
-/* cpu clock */
-static struct clk cpu_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .recalc = &follow_parent,
-};
-
-/* ahb masks structure */
-static struct bus_clk_masks ahb_masks = {
- .mask = PLL_HCLK_RATIO_MASK,
- .shift = PLL_HCLK_RATIO_SHIFT,
-};
-
-/* ahb configuration structure */
-static struct bus_clk_config ahb_config = {
- .reg = CORE_CLK_CFG,
- .masks = &ahb_masks,
-};
-
-/* ahb rate configuration table, in ascending order of rates */
-struct bus_rate_tbl bus_rtbl[] = {
- {.div = 3}, /* == parent divided by 4 */
- {.div = 2}, /* == parent divided by 3 */
- {.div = 1}, /* == parent divided by 2 */
- {.div = 0}, /* == parent divided by 1 */
-};
-
-/* ahb clock */
-static struct clk ahb_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .calc_rate = &bus_calc_rate,
- .recalc = &bus_clk_recalc,
- .set_rate = &bus_clk_set_rate,
- .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
- .private_data = &ahb_config,
-};
-
-/* auxiliary synthesizers masks */
-static struct aux_clk_masks aux_masks = {
- .eq_sel_mask = AUX_EQ_SEL_MASK,
- .eq_sel_shift = AUX_EQ_SEL_SHIFT,
- .eq1_mask = AUX_EQ1_SEL,
- .eq2_mask = AUX_EQ2_SEL,
- .xscale_sel_mask = AUX_XSCALE_MASK,
- .xscale_sel_shift = AUX_XSCALE_SHIFT,
- .yscale_sel_mask = AUX_YSCALE_MASK,
- .yscale_sel_shift = AUX_YSCALE_SHIFT,
-};
-
-/* uart synth configurations */
-static struct aux_clk_config uart_synth_config = {
- .synth_reg = UART_CLK_SYNT,
- .masks = &aux_masks,
-};
-
-/* aux rate configuration table, in ascending order of rates */
-struct aux_rate_tbl aux_rtbl[] = {
- /* For PLL1 = 332 MHz */
- {.xscale = 1, .yscale = 8, .eq = 1}, /* 41.5 MHz */
- {.xscale = 1, .yscale = 4, .eq = 1}, /* 83 MHz */
- {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
-};
-
-/* uart synth clock */
-static struct clk uart_synth_clk = {
- .en_reg = UART_CLK_SYNT,
- .en_reg_bit = AUX_SYNT_ENB,
- .pclk = &pll1_clk,
- .calc_rate = &aux_calc_rate,
- .recalc = &aux_clk_recalc,
- .set_rate = &aux_clk_set_rate,
- .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 1},
- .private_data = &uart_synth_config,
-};
-
-/* uart parents */
-static struct pclk_info uart_pclk_info[] = {
- {
- .pclk = &uart_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* uart parent select structure */
-static struct pclk_sel uart_pclk_sel = {
- .pclk_info = uart_pclk_info,
- .pclk_count = ARRAY_SIZE(uart_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = UART_CLK_MASK,
-};
-
-/* uart clock */
-static struct clk uart_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = UART_CLK_ENB,
- .pclk_sel = &uart_pclk_sel,
- .pclk_sel_shift = UART_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* firda configurations */
-static struct aux_clk_config firda_synth_config = {
- .synth_reg = FIRDA_CLK_SYNT,
- .masks = &aux_masks,
-};
-
-/* firda synth clock */
-static struct clk firda_synth_clk = {
- .en_reg = FIRDA_CLK_SYNT,
- .en_reg_bit = AUX_SYNT_ENB,
- .pclk = &pll1_clk,
- .calc_rate = &aux_calc_rate,
- .recalc = &aux_clk_recalc,
- .set_rate = &aux_clk_set_rate,
- .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 1},
- .private_data = &firda_synth_config,
-};
-
-/* firda parents */
-static struct pclk_info firda_pclk_info[] = {
- {
- .pclk = &firda_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* firda parent select structure */
-static struct pclk_sel firda_pclk_sel = {
- .pclk_info = firda_pclk_info,
- .pclk_count = ARRAY_SIZE(firda_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = FIRDA_CLK_MASK,
-};
-
-/* firda clock */
-static struct clk firda_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = FIRDA_CLK_ENB,
- .pclk_sel = &firda_pclk_sel,
- .pclk_sel_shift = FIRDA_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* gpt synthesizer masks */
-static struct gpt_clk_masks gpt_masks = {
- .mscale_sel_mask = GPT_MSCALE_MASK,
- .mscale_sel_shift = GPT_MSCALE_SHIFT,
- .nscale_sel_mask = GPT_NSCALE_MASK,
- .nscale_sel_shift = GPT_NSCALE_SHIFT,
-};
-
-/* gpt rate configuration table, in ascending order of rates */
-struct gpt_rate_tbl gpt_rtbl[] = {
- /* For pll1 = 332 MHz */
- {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
- {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
- {.mscale = 1, .nscale = 0}, /* 83 MHz */
-};
-
-/* gpt0 synth clk config*/
-static struct gpt_clk_config gpt0_synth_config = {
- .synth_reg = PRSC1_CLK_CFG,
- .masks = &gpt_masks,
-};
-
-/* gpt synth clock */
-static struct clk gpt0_synth_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .calc_rate = &gpt_calc_rate,
- .recalc = &gpt_clk_recalc,
- .set_rate = &gpt_clk_set_rate,
- .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
- .private_data = &gpt0_synth_config,
-};
-
-/* gpt parents */
-static struct pclk_info gpt0_pclk_info[] = {
- {
- .pclk = &gpt0_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt0_pclk_sel = {
- .pclk_info = gpt0_pclk_info,
- .pclk_count = ARRAY_SIZE(gpt0_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt0 timer clock */
-static struct clk gpt0_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk_sel = &gpt0_pclk_sel,
- .pclk_sel_shift = GPT0_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* gpt1 synth clk configurations */
-static struct gpt_clk_config gpt1_synth_config = {
- .synth_reg = PRSC2_CLK_CFG,
- .masks = &gpt_masks,
-};
-
-/* gpt1 synth clock */
-static struct clk gpt1_synth_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .calc_rate = &gpt_calc_rate,
- .recalc = &gpt_clk_recalc,
- .set_rate = &gpt_clk_set_rate,
- .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
- .private_data = &gpt1_synth_config,
-};
-
-static struct pclk_info gpt1_pclk_info[] = {
- {
- .pclk = &gpt1_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt1_pclk_sel = {
- .pclk_info = gpt1_pclk_info,
- .pclk_count = ARRAY_SIZE(gpt1_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt1 timer clock */
-static struct clk gpt1_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = GPT1_CLK_ENB,
- .pclk_sel = &gpt1_pclk_sel,
- .pclk_sel_shift = GPT1_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* gpt2 synth clk configurations */
-static struct gpt_clk_config gpt2_synth_config = {
- .synth_reg = PRSC3_CLK_CFG,
- .masks = &gpt_masks,
-};
-
-/* gpt1 synth clock */
-static struct clk gpt2_synth_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .calc_rate = &gpt_calc_rate,
- .recalc = &gpt_clk_recalc,
- .set_rate = &gpt_clk_set_rate,
- .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
- .private_data = &gpt2_synth_config,
-};
-
-static struct pclk_info gpt2_pclk_info[] = {
- {
- .pclk = &gpt2_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt2_pclk_sel = {
- .pclk_info = gpt2_pclk_info,
- .pclk_count = ARRAY_SIZE(gpt2_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt2 timer clock */
-static struct clk gpt2_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = GPT2_CLK_ENB,
- .pclk_sel = &gpt2_pclk_sel,
- .pclk_sel_shift = GPT2_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* clock derived from pll3 clk */
-/* usbh clock */
-static struct clk usbh_clk = {
- .pclk = &pll3_48m_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = USBH_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* usbd clock */
-static struct clk usbd_clk = {
- .pclk = &pll3_48m_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = USBD_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* clock derived from usbh clk */
-/* usbh0 clock */
-static struct clk usbh0_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &usbh_clk,
- .recalc = &follow_parent,
-};
-
-/* usbh1 clock */
-static struct clk usbh1_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &usbh_clk,
- .recalc = &follow_parent,
-};
-
-/* clock derived from ahb clk */
-/* apb masks structure */
-static struct bus_clk_masks apb_masks = {
- .mask = HCLK_PCLK_RATIO_MASK,
- .shift = HCLK_PCLK_RATIO_SHIFT,
-};
-
-/* apb configuration structure */
-static struct bus_clk_config apb_config = {
- .reg = CORE_CLK_CFG,
- .masks = &apb_masks,
-};
-
-/* apb clock */
-static struct clk apb_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &ahb_clk,
- .calc_rate = &bus_calc_rate,
- .recalc = &bus_clk_recalc,
- .set_rate = &bus_clk_set_rate,
- .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
- .private_data = &apb_config,
-};
-
-/* i2c clock */
-static struct clk i2c_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = I2C_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* dma clock */
-static struct clk dma_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = DMA_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* jpeg clock */
-static struct clk jpeg_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = JPEG_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* gmac clock */
-static struct clk gmac_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = GMAC_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* smi clock */
-static struct clk smi_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = SMI_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* c3 clock */
-static struct clk c3_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = C3_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* clock derived from apb clk */
-/* adc clock */
-static struct clk adc_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = ADC_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
-/* emi clock */
-static struct clk emi_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &ahb_clk,
- .recalc = &follow_parent,
-};
-#endif
-
-/* ssp clock */
-static struct clk ssp0_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = SSP_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* gpio clock */
-static struct clk gpio_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = GPIO_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-static struct clk dummy_apb_pclk;
-
-#if defined(CONFIG_MACH_SPEAR300) || defined(CONFIG_MACH_SPEAR310) || \
- defined(CONFIG_MACH_SPEAR320)
-/* fsmc clock */
-static struct clk fsmc_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &ahb_clk,
- .recalc = &follow_parent,
-};
-#endif
-
-/* common clocks to spear310 and spear320 */
-#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
-/* uart1 clock */
-static struct clk uart1_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* uart2 clock */
-static struct clk uart2_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-#endif /* CONFIG_MACH_SPEAR310 || CONFIG_MACH_SPEAR320 */
-
-/* common clocks to spear300 and spear320 */
-#if defined(CONFIG_MACH_SPEAR300) || defined(CONFIG_MACH_SPEAR320)
-/* clcd clock */
-static struct clk clcd_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll3_48m_clk,
- .recalc = &follow_parent,
-};
-
-/* sdhci clock */
-static struct clk sdhci_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &ahb_clk,
- .recalc = &follow_parent,
-};
-#endif /* CONFIG_MACH_SPEAR300 || CONFIG_MACH_SPEAR320 */
-
-/* spear300 machine specific clock structures */
-#ifdef CONFIG_MACH_SPEAR300
-/* gpio1 clock */
-static struct clk gpio1_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* keyboard clock */
-static struct clk kbd_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-#endif
-
-/* spear310 machine specific clock structures */
-#ifdef CONFIG_MACH_SPEAR310
-/* uart3 clock */
-static struct clk uart3_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* uart4 clock */
-static struct clk uart4_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* uart5 clock */
-static struct clk uart5_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-#endif
-
-/* spear320 machine specific clock structures */
-#ifdef CONFIG_MACH_SPEAR320
-/* can0 clock */
-static struct clk can0_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* can1 clock */
-static struct clk can1_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* i2c1 clock */
-static struct clk i2c1_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &ahb_clk,
- .recalc = &follow_parent,
-};
-
-/* ssp1 clock */
-static struct clk ssp1_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* ssp2 clock */
-static struct clk ssp2_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* pwm clock */
-static struct clk pwm_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-#endif
-
-/* array of all spear 3xx clock lookups */
-static struct clk_lookup spear_clk_lookups[] = {
- CLKDEV_INIT(NULL, "apb_pclk", &dummy_apb_pclk),
- /* root clks */
- CLKDEV_INIT(NULL, "osc_32k_clk", &osc_32k_clk),
- CLKDEV_INIT(NULL, "osc_24m_clk", &osc_24m_clk),
- /* clock derived from 32 KHz osc clk */
- CLKDEV_INIT("fc900000.rtc", NULL, &rtc_clk),
- /* clock derived from 24 MHz osc clk */
- CLKDEV_INIT(NULL, "pll1_clk", &pll1_clk),
- CLKDEV_INIT(NULL, "pll3_48m_clk", &pll3_48m_clk),
- CLKDEV_INIT("fc880000.wdt", NULL, &wdt_clk),
- /* clock derived from pll1 clk */
- CLKDEV_INIT(NULL, "cpu_clk", &cpu_clk),
- CLKDEV_INIT(NULL, "ahb_clk", &ahb_clk),
- CLKDEV_INIT(NULL, "uart_synth_clk", &uart_synth_clk),
- CLKDEV_INIT(NULL, "firda_synth_clk", &firda_synth_clk),
- CLKDEV_INIT(NULL, "gpt0_synth_clk", &gpt0_synth_clk),
- CLKDEV_INIT(NULL, "gpt1_synth_clk", &gpt1_synth_clk),
- CLKDEV_INIT(NULL, "gpt2_synth_clk", &gpt2_synth_clk),
- CLKDEV_INIT("d0000000.serial", NULL, &uart_clk),
- CLKDEV_INIT("firda", NULL, &firda_clk),
- CLKDEV_INIT("gpt0", NULL, &gpt0_clk),
- CLKDEV_INIT("gpt1", NULL, &gpt1_clk),
- CLKDEV_INIT("gpt2", NULL, &gpt2_clk),
- /* clock derived from pll3 clk */
- CLKDEV_INIT("designware_udc", NULL, &usbd_clk),
- CLKDEV_INIT(NULL, "usbh_clk", &usbh_clk),
- /* clock derived from usbh clk */
- CLKDEV_INIT(NULL, "usbh.0_clk", &usbh0_clk),
- CLKDEV_INIT(NULL, "usbh.1_clk", &usbh1_clk),
- /* clock derived from ahb clk */
- CLKDEV_INIT(NULL, "apb_clk", &apb_clk),
- CLKDEV_INIT("d0180000.i2c", NULL, &i2c_clk),
- CLKDEV_INIT("fc400000.dma", NULL, &dma_clk),
- CLKDEV_INIT("jpeg", NULL, &jpeg_clk),
- CLKDEV_INIT("e0800000.eth", NULL, &gmac_clk),
- CLKDEV_INIT("fc000000.flash", NULL, &smi_clk),
- CLKDEV_INIT("c3", NULL, &c3_clk),
- /* clock derived from apb clk */
- CLKDEV_INIT("adc", NULL, &adc_clk),
- CLKDEV_INIT("d0100000.spi", NULL, &ssp0_clk),
- CLKDEV_INIT("fc980000.gpio", NULL, &gpio_clk),
-};
-
-/* array of all spear 300 clock lookups */
-#ifdef CONFIG_MACH_SPEAR300
-static struct clk_lookup spear300_clk_lookups[] = {
- CLKDEV_INIT("60000000.clcd", NULL, &clcd_clk),
- CLKDEV_INIT("94000000.flash", NULL, &fsmc_clk),
- CLKDEV_INIT("a9000000.gpio", NULL, &gpio1_clk),
- CLKDEV_INIT("a0000000.kbd", NULL, &kbd_clk),
- CLKDEV_INIT("70000000.sdhci", NULL, &sdhci_clk),
-};
-
-void __init spear300_clk_init(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
- clk_register(&spear_clk_lookups[i]);
-
- for (i = 0; i < ARRAY_SIZE(spear300_clk_lookups); i++)
- clk_register(&spear300_clk_lookups[i]);
-
- clk_init();
-}
-#endif
-
-/* array of all spear 310 clock lookups */
-#ifdef CONFIG_MACH_SPEAR310
-static struct clk_lookup spear310_clk_lookups[] = {
- CLKDEV_INIT("44000000.flash", NULL, &fsmc_clk),
- CLKDEV_INIT(NULL, "emi", &emi_clk),
- CLKDEV_INIT("b2000000.serial", NULL, &uart1_clk),
- CLKDEV_INIT("b2080000.serial", NULL, &uart2_clk),
- CLKDEV_INIT("b2100000.serial", NULL, &uart3_clk),
- CLKDEV_INIT("b2180000.serial", NULL, &uart4_clk),
- CLKDEV_INIT("b2200000.serial", NULL, &uart5_clk),
-};
-
-void __init spear310_clk_init(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
- clk_register(&spear_clk_lookups[i]);
-
- for (i = 0; i < ARRAY_SIZE(spear310_clk_lookups); i++)
- clk_register(&spear310_clk_lookups[i]);
-
- clk_init();
-}
-#endif
-
-/* array of all spear 320 clock lookups */
-#ifdef CONFIG_MACH_SPEAR320
-static struct clk_lookup spear320_clk_lookups[] = {
- CLKDEV_INIT("90000000.clcd", NULL, &clcd_clk),
- CLKDEV_INIT("4c000000.flash", NULL, &fsmc_clk),
- CLKDEV_INIT("a7000000.i2c", NULL, &i2c1_clk),
- CLKDEV_INIT(NULL, "emi", &emi_clk),
- CLKDEV_INIT("pwm", NULL, &pwm_clk),
- CLKDEV_INIT("70000000.sdhci", NULL, &sdhci_clk),
- CLKDEV_INIT("c_can_platform.0", NULL, &can0_clk),
- CLKDEV_INIT("c_can_platform.1", NULL, &can1_clk),
- CLKDEV_INIT("a5000000.spi", NULL, &ssp1_clk),
- CLKDEV_INIT("a6000000.spi", NULL, &ssp2_clk),
- CLKDEV_INIT("a3000000.serial", NULL, &uart1_clk),
- CLKDEV_INIT("a4000000.serial", NULL, &uart2_clk),
-};
-
-void __init spear320_clk_init(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
- clk_register(&spear_clk_lookups[i]);
-
- for (i = 0; i < ARRAY_SIZE(spear320_clk_lookups); i++)
- clk_register(&spear320_clk_lookups[i]);
-
- clk_init();
-}
-#endif
diff --git a/arch/arm/mach-spear3xx/include/mach/generic.h b/arch/arm/mach-spear3xx/include/mach/generic.h
index bdb304551caf..4a95b9453c2a 100644
--- a/arch/arm/mach-spear3xx/include/mach/generic.h
+++ b/arch/arm/mach-spear3xx/include/mach/generic.h
@@ -27,28 +27,11 @@ extern struct pl022_ssp_controller pl022_plat_data;
extern struct pl08x_platform_data pl080_plat_data;
/* Add spear3xx family function declarations here */
-void __init spear_setup_timer(resource_size_t base, int irq);
+void __init spear_setup_of_timer(void);
+void __init spear3xx_clk_init(void);
void __init spear3xx_map_io(void);
void __init spear3xx_dt_init_irq(void);
void spear_restart(char, const char *);
-/* spear300 declarations */
-#ifdef CONFIG_MACH_SPEAR300
-void __init spear300_clk_init(void);
-
-#endif /* CONFIG_MACH_SPEAR300 */
-
-/* spear310 declarations */
-#ifdef CONFIG_MACH_SPEAR310
-void __init spear310_clk_init(void);
-
-#endif /* CONFIG_MACH_SPEAR310 */
-
-/* spear320 declarations */
-#ifdef CONFIG_MACH_SPEAR320
-void __init spear320_clk_init(void);
-
-#endif /* CONFIG_MACH_SPEAR320 */
-
#endif /* __MACH_GENERIC_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/irqs.h b/arch/arm/mach-spear3xx/include/mach/irqs.h
index 319620a1afb4..51bd62a0254c 100644
--- a/arch/arm/mach-spear3xx/include/mach/irqs.h
+++ b/arch/arm/mach-spear3xx/include/mach/irqs.h
@@ -16,7 +16,6 @@
/* FIXME: probe all these from DT */
#define SPEAR3XX_IRQ_INTRCOMM_RAS_ARM 1
-#define SPEAR3XX_IRQ_CPU_GPT1_1 2
#define SPEAR3XX_IRQ_GEN_RAS_1 28
#define SPEAR3XX_IRQ_GEN_RAS_2 29
#define SPEAR3XX_IRQ_GEN_RAS_3 30
diff --git a/arch/arm/mach-spear3xx/include/mach/misc_regs.h b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
index e0ab72e61507..18e2ac576f25 100644
--- a/arch/arm/mach-spear3xx/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
@@ -14,6 +14,8 @@
#ifndef __MACH_MISC_REGS_H
#define __MACH_MISC_REGS_H
+#include <mach/spear.h>
+
#define MISC_BASE IOMEM(VA_SPEAR3XX_ICM3_MISC_REG_BASE)
#define DMA_CHN_CFG (MISC_BASE + 0x0A0)
diff --git a/arch/arm/mach-spear3xx/include/mach/spear.h b/arch/arm/mach-spear3xx/include/mach/spear.h
index 6d4dadc67633..51eb953148a9 100644
--- a/arch/arm/mach-spear3xx/include/mach/spear.h
+++ b/arch/arm/mach-spear3xx/include/mach/spear.h
@@ -26,7 +26,6 @@
/* ML1 - Multi Layer CPU Subsystem */
#define SPEAR3XX_ICM3_ML1_2_BASE UL(0xF0000000)
#define VA_SPEAR6XX_ML_CPU_BASE UL(0xF0000000)
-#define SPEAR3XX_CPU_TMR_BASE UL(0xF0000000)
/* ICM3 - Basic Subsystem */
#define SPEAR3XX_ICM3_SMI_CTRL_BASE UL(0xFC000000)
@@ -45,4 +44,17 @@
#define SPEAR_SYS_CTRL_BASE SPEAR3XX_ICM3_SYS_CTRL_BASE
#define VA_SPEAR_SYS_CTRL_BASE VA_SPEAR3XX_ICM3_SYS_CTRL_BASE
+/* SPEAr320 Macros */
+#define SPEAR320_SOC_CONFIG_BASE UL(0xB3000000)
+#define VA_SPEAR320_SOC_CONFIG_BASE UL(0xFE000000)
+#define SPEAR320_CONTROL_REG IOMEM(VA_SPEAR320_SOC_CONFIG_BASE)
+#define SPEAR320_EXT_CTRL_REG IOMEM(VA_SPEAR320_SOC_CONFIG_BASE + 0x0018)
+ #define SPEAR320_UARTX_PCLK_MASK 0x1
+ #define SPEAR320_UART2_PCLK_SHIFT 8
+ #define SPEAR320_UART3_PCLK_SHIFT 9
+ #define SPEAR320_UART4_PCLK_SHIFT 10
+ #define SPEAR320_UART5_PCLK_SHIFT 11
+ #define SPEAR320_UART6_PCLK_SHIFT 12
+ #define SPEAR320_RS485_PCLK_SHIFT 13
+
#endif /* __MACH_SPEAR3XX_H */
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c
index f75fe25a620c..f74a05bdb829 100644
--- a/arch/arm/mach-spear3xx/spear300.c
+++ b/arch/arm/mach-spear3xx/spear300.c
@@ -337,7 +337,6 @@ static const char * const spear300_dt_board_compat[] = {
static void __init spear300_map_io(void)
{
spear3xx_map_io();
- spear300_clk_init();
}
DT_MACHINE_START(SPEAR300_DT, "ST SPEAr300 SoC with Flattened Device Tree")
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c
index f0842a58dc02..84dfb0900747 100644
--- a/arch/arm/mach-spear3xx/spear310.c
+++ b/arch/arm/mach-spear3xx/spear310.c
@@ -478,7 +478,6 @@ static const char * const spear310_dt_board_compat[] = {
static void __init spear310_map_io(void)
{
spear3xx_map_io();
- spear310_clk_init();
}
DT_MACHINE_START(SPEAR310_DT, "ST SPEAr310 SoC with Flattened Device Tree")
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c
index e8caeef50a5c..a88fa841d29d 100644
--- a/arch/arm/mach-spear3xx/spear320.c
+++ b/arch/arm/mach-spear3xx/spear320.c
@@ -27,7 +27,6 @@
#define SPEAR320_UART2_BASE UL(0xA4000000)
#define SPEAR320_SSP0_BASE UL(0xA5000000)
#define SPEAR320_SSP1_BASE UL(0xA6000000)
-#define SPEAR320_SOC_CONFIG_BASE UL(0xB3000000)
/* Interrupt registers offsets and masks */
#define SPEAR320_INT_STS_MASK_REG 0x04
@@ -481,10 +480,19 @@ static const char * const spear320_dt_board_compat[] = {
NULL,
};
+struct map_desc spear320_io_desc[] __initdata = {
+ {
+ .virtual = VA_SPEAR320_SOC_CONFIG_BASE,
+ .pfn = __phys_to_pfn(SPEAR320_SOC_CONFIG_BASE),
+ .length = SZ_16M,
+ .type = MT_DEVICE
+ },
+};
+
static void __init spear320_map_io(void)
{
+ iotable_init(spear320_io_desc, ARRAY_SIZE(spear320_io_desc));
spear3xx_map_io();
- spear320_clk_init();
}
DT_MACHINE_START(SPEAR320_DT, "ST SPEAr320 SoC with Flattened Device Tree")
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index 826ac20ef1e7..f22419ed74a8 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -90,6 +90,8 @@ static void __init spear3xx_timer_init(void)
char pclk_name[] = "pll3_48m_clk";
struct clk *gpt_clk, *pclk;
+ spear3xx_clk_init();
+
/* get the system timer clock */
gpt_clk = clk_get_sys("gpt0", NULL);
if (IS_ERR(gpt_clk)) {
@@ -109,7 +111,7 @@ static void __init spear3xx_timer_init(void)
clk_put(gpt_clk);
clk_put(pclk);
- spear_setup_timer(SPEAR3XX_CPU_TMR_BASE, SPEAR3XX_IRQ_CPU_GPT1_1);
+ spear_setup_of_timer();
}
struct sys_timer spear3xx_timer = {
diff --git a/arch/arm/mach-spear6xx/Makefile b/arch/arm/mach-spear6xx/Makefile
index 76e5750552fc..898831d93f37 100644
--- a/arch/arm/mach-spear6xx/Makefile
+++ b/arch/arm/mach-spear6xx/Makefile
@@ -3,4 +3,4 @@
#
# common files
-obj-y += clock.o spear6xx.o
+obj-y += spear6xx.o
diff --git a/arch/arm/mach-spear6xx/clock.c b/arch/arm/mach-spear6xx/clock.c
deleted file mode 100644
index bef77d43db87..000000000000
--- a/arch/arm/mach-spear6xx/clock.c
+++ /dev/null
@@ -1,789 +0,0 @@
-/*
- * arch/arm/mach-spear6xx/clock.c
- *
- * SPEAr6xx machines clock framework source file
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <plat/clock.h>
-#include <mach/misc_regs.h>
-#include <mach/spear.h>
-
-#define PLL1_CTR (MISC_BASE + 0x008)
-#define PLL1_FRQ (MISC_BASE + 0x00C)
-#define PLL1_MOD (MISC_BASE + 0x010)
-#define PLL2_CTR (MISC_BASE + 0x014)
-/* PLL_CTR register masks */
-#define PLL_ENABLE 2
-#define PLL_MODE_SHIFT 4
-#define PLL_MODE_MASK 0x3
-#define PLL_MODE_NORMAL 0
-#define PLL_MODE_FRACTION 1
-#define PLL_MODE_DITH_DSB 2
-#define PLL_MODE_DITH_SSB 3
-
-#define PLL2_FRQ (MISC_BASE + 0x018)
-/* PLL FRQ register masks */
-#define PLL_DIV_N_SHIFT 0
-#define PLL_DIV_N_MASK 0xFF
-#define PLL_DIV_P_SHIFT 8
-#define PLL_DIV_P_MASK 0x7
-#define PLL_NORM_FDBK_M_SHIFT 24
-#define PLL_NORM_FDBK_M_MASK 0xFF
-#define PLL_DITH_FDBK_M_SHIFT 16
-#define PLL_DITH_FDBK_M_MASK 0xFFFF
-
-#define PLL2_MOD (MISC_BASE + 0x01C)
-#define PLL_CLK_CFG (MISC_BASE + 0x020)
-#define CORE_CLK_CFG (MISC_BASE + 0x024)
-/* CORE CLK CFG register masks */
-#define PLL_HCLK_RATIO_SHIFT 10
-#define PLL_HCLK_RATIO_MASK 0x3
-#define HCLK_PCLK_RATIO_SHIFT 8
-#define HCLK_PCLK_RATIO_MASK 0x3
-
-#define PERIP_CLK_CFG (MISC_BASE + 0x028)
-/* PERIP_CLK_CFG register masks */
-#define CLCD_CLK_SHIFT 2
-#define CLCD_CLK_MASK 0x3
-#define UART_CLK_SHIFT 4
-#define UART_CLK_MASK 0x1
-#define FIRDA_CLK_SHIFT 5
-#define FIRDA_CLK_MASK 0x3
-#define GPT0_CLK_SHIFT 8
-#define GPT1_CLK_SHIFT 10
-#define GPT2_CLK_SHIFT 11
-#define GPT3_CLK_SHIFT 12
-#define GPT_CLK_MASK 0x1
-#define AUX_CLK_PLL3_VAL 0
-#define AUX_CLK_PLL1_VAL 1
-
-#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
-/* PERIP1_CLK_ENB register masks */
-#define UART0_CLK_ENB 3
-#define UART1_CLK_ENB 4
-#define SSP0_CLK_ENB 5
-#define SSP1_CLK_ENB 6
-#define I2C_CLK_ENB 7
-#define JPEG_CLK_ENB 8
-#define FSMC_CLK_ENB 9
-#define FIRDA_CLK_ENB 10
-#define GPT2_CLK_ENB 11
-#define GPT3_CLK_ENB 12
-#define GPIO2_CLK_ENB 13
-#define SSP2_CLK_ENB 14
-#define ADC_CLK_ENB 15
-#define GPT1_CLK_ENB 11
-#define RTC_CLK_ENB 17
-#define GPIO1_CLK_ENB 18
-#define DMA_CLK_ENB 19
-#define SMI_CLK_ENB 21
-#define CLCD_CLK_ENB 22
-#define GMAC_CLK_ENB 23
-#define USBD_CLK_ENB 24
-#define USBH0_CLK_ENB 25
-#define USBH1_CLK_ENB 26
-
-#define PRSC1_CLK_CFG (MISC_BASE + 0x044)
-#define PRSC2_CLK_CFG (MISC_BASE + 0x048)
-#define PRSC3_CLK_CFG (MISC_BASE + 0x04C)
-/* gpt synthesizer register masks */
-#define GPT_MSCALE_SHIFT 0
-#define GPT_MSCALE_MASK 0xFFF
-#define GPT_NSCALE_SHIFT 12
-#define GPT_NSCALE_MASK 0xF
-
-#define AMEM_CLK_CFG (MISC_BASE + 0x050)
-#define EXPI_CLK_CFG (MISC_BASE + 0x054)
-#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
-#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
-#define UART_CLK_SYNT (MISC_BASE + 0x064)
-#define GMAC_CLK_SYNT (MISC_BASE + 0x068)
-#define RAS1_CLK_SYNT (MISC_BASE + 0x06C)
-#define RAS2_CLK_SYNT (MISC_BASE + 0x070)
-#define RAS3_CLK_SYNT (MISC_BASE + 0x074)
-#define RAS4_CLK_SYNT (MISC_BASE + 0x078)
-/* aux clk synthesiser register masks for irda to ras4 */
-#define AUX_SYNT_ENB 31
-#define AUX_EQ_SEL_SHIFT 30
-#define AUX_EQ_SEL_MASK 1
-#define AUX_EQ1_SEL 0
-#define AUX_EQ2_SEL 1
-#define AUX_XSCALE_SHIFT 16
-#define AUX_XSCALE_MASK 0xFFF
-#define AUX_YSCALE_SHIFT 0
-#define AUX_YSCALE_MASK 0xFFF
-
-/* root clks */
-/* 32 KHz oscillator clock */
-static struct clk osc_32k_clk = {
- .flags = ALWAYS_ENABLED,
- .rate = 32000,
-};
-
-/* 30 MHz oscillator clock */
-static struct clk osc_30m_clk = {
- .flags = ALWAYS_ENABLED,
- .rate = 30000000,
-};
-
-/* clock derived from 32 KHz osc clk */
-/* rtc clock */
-static struct clk rtc_clk = {
- .pclk = &osc_32k_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = RTC_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* clock derived from 30 MHz osc clk */
-/* pll masks structure */
-static struct pll_clk_masks pll1_masks = {
- .mode_mask = PLL_MODE_MASK,
- .mode_shift = PLL_MODE_SHIFT,
- .norm_fdbk_m_mask = PLL_NORM_FDBK_M_MASK,
- .norm_fdbk_m_shift = PLL_NORM_FDBK_M_SHIFT,
- .dith_fdbk_m_mask = PLL_DITH_FDBK_M_MASK,
- .dith_fdbk_m_shift = PLL_DITH_FDBK_M_SHIFT,
- .div_p_mask = PLL_DIV_P_MASK,
- .div_p_shift = PLL_DIV_P_SHIFT,
- .div_n_mask = PLL_DIV_N_MASK,
- .div_n_shift = PLL_DIV_N_SHIFT,
-};
-
-/* pll1 configuration structure */
-static struct pll_clk_config pll1_config = {
- .mode_reg = PLL1_CTR,
- .cfg_reg = PLL1_FRQ,
- .masks = &pll1_masks,
-};
-
-/* pll rate configuration table, in ascending order of rates */
-struct pll_rate_tbl pll_rtbl[] = {
- {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* 266 MHz */
- {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* 332 MHz */
-};
-
-/* PLL1 clock */
-static struct clk pll1_clk = {
- .flags = ENABLED_ON_INIT,
- .pclk = &osc_30m_clk,
- .en_reg = PLL1_CTR,
- .en_reg_bit = PLL_ENABLE,
- .calc_rate = &pll_calc_rate,
- .recalc = &pll_clk_recalc,
- .set_rate = &pll_clk_set_rate,
- .rate_config = {pll_rtbl, ARRAY_SIZE(pll_rtbl), 1},
- .private_data = &pll1_config,
-};
-
-/* PLL3 48 MHz clock */
-static struct clk pll3_48m_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &osc_30m_clk,
- .rate = 48000000,
-};
-
-/* watch dog timer clock */
-static struct clk wdt_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &osc_30m_clk,
- .recalc = &follow_parent,
-};
-
-/* clock derived from pll1 clk */
-/* cpu clock */
-static struct clk cpu_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .recalc = &follow_parent,
-};
-
-/* ahb masks structure */
-static struct bus_clk_masks ahb_masks = {
- .mask = PLL_HCLK_RATIO_MASK,
- .shift = PLL_HCLK_RATIO_SHIFT,
-};
-
-/* ahb configuration structure */
-static struct bus_clk_config ahb_config = {
- .reg = CORE_CLK_CFG,
- .masks = &ahb_masks,
-};
-
-/* ahb rate configuration table, in ascending order of rates */
-struct bus_rate_tbl bus_rtbl[] = {
- {.div = 3}, /* == parent divided by 4 */
- {.div = 2}, /* == parent divided by 3 */
- {.div = 1}, /* == parent divided by 2 */
- {.div = 0}, /* == parent divided by 1 */
-};
-
-/* ahb clock */
-static struct clk ahb_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .calc_rate = &bus_calc_rate,
- .recalc = &bus_clk_recalc,
- .set_rate = &bus_clk_set_rate,
- .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
- .private_data = &ahb_config,
-};
-
-/* auxiliary synthesizers masks */
-static struct aux_clk_masks aux_masks = {
- .eq_sel_mask = AUX_EQ_SEL_MASK,
- .eq_sel_shift = AUX_EQ_SEL_SHIFT,
- .eq1_mask = AUX_EQ1_SEL,
- .eq2_mask = AUX_EQ2_SEL,
- .xscale_sel_mask = AUX_XSCALE_MASK,
- .xscale_sel_shift = AUX_XSCALE_SHIFT,
- .yscale_sel_mask = AUX_YSCALE_MASK,
- .yscale_sel_shift = AUX_YSCALE_SHIFT,
-};
-
-/* uart configurations */
-static struct aux_clk_config uart_synth_config = {
- .synth_reg = UART_CLK_SYNT,
- .masks = &aux_masks,
-};
-
-/* aux rate configuration table, in ascending order of rates */
-struct aux_rate_tbl aux_rtbl[] = {
- /* For PLL1 = 332 MHz */
- {.xscale = 1, .yscale = 8, .eq = 1}, /* 41.5 MHz */
- {.xscale = 1, .yscale = 4, .eq = 1}, /* 83 MHz */
- {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
-};
-
-/* uart synth clock */
-static struct clk uart_synth_clk = {
- .en_reg = UART_CLK_SYNT,
- .en_reg_bit = AUX_SYNT_ENB,
- .pclk = &pll1_clk,
- .calc_rate = &aux_calc_rate,
- .recalc = &aux_clk_recalc,
- .set_rate = &aux_clk_set_rate,
- .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
- .private_data = &uart_synth_config,
-};
-
-/* uart parents */
-static struct pclk_info uart_pclk_info[] = {
- {
- .pclk = &uart_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* uart parent select structure */
-static struct pclk_sel uart_pclk_sel = {
- .pclk_info = uart_pclk_info,
- .pclk_count = ARRAY_SIZE(uart_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = UART_CLK_MASK,
-};
-
-/* uart0 clock */
-static struct clk uart0_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = UART0_CLK_ENB,
- .pclk_sel = &uart_pclk_sel,
- .pclk_sel_shift = UART_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* uart1 clock */
-static struct clk uart1_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = UART1_CLK_ENB,
- .pclk_sel = &uart_pclk_sel,
- .pclk_sel_shift = UART_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* firda configurations */
-static struct aux_clk_config firda_synth_config = {
- .synth_reg = FIRDA_CLK_SYNT,
- .masks = &aux_masks,
-};
-
-/* firda synth clock */
-static struct clk firda_synth_clk = {
- .en_reg = FIRDA_CLK_SYNT,
- .en_reg_bit = AUX_SYNT_ENB,
- .pclk = &pll1_clk,
- .calc_rate = &aux_calc_rate,
- .recalc = &aux_clk_recalc,
- .set_rate = &aux_clk_set_rate,
- .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
- .private_data = &firda_synth_config,
-};
-
-/* firda parents */
-static struct pclk_info firda_pclk_info[] = {
- {
- .pclk = &firda_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* firda parent select structure */
-static struct pclk_sel firda_pclk_sel = {
- .pclk_info = firda_pclk_info,
- .pclk_count = ARRAY_SIZE(firda_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = FIRDA_CLK_MASK,
-};
-
-/* firda clock */
-static struct clk firda_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = FIRDA_CLK_ENB,
- .pclk_sel = &firda_pclk_sel,
- .pclk_sel_shift = FIRDA_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* clcd configurations */
-static struct aux_clk_config clcd_synth_config = {
- .synth_reg = CLCD_CLK_SYNT,
- .masks = &aux_masks,
-};
-
-/* firda synth clock */
-static struct clk clcd_synth_clk = {
- .en_reg = CLCD_CLK_SYNT,
- .en_reg_bit = AUX_SYNT_ENB,
- .pclk = &pll1_clk,
- .calc_rate = &aux_calc_rate,
- .recalc = &aux_clk_recalc,
- .set_rate = &aux_clk_set_rate,
- .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
- .private_data = &clcd_synth_config,
-};
-
-/* clcd parents */
-static struct pclk_info clcd_pclk_info[] = {
- {
- .pclk = &clcd_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* clcd parent select structure */
-static struct pclk_sel clcd_pclk_sel = {
- .pclk_info = clcd_pclk_info,
- .pclk_count = ARRAY_SIZE(clcd_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = CLCD_CLK_MASK,
-};
-
-/* clcd clock */
-static struct clk clcd_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = CLCD_CLK_ENB,
- .pclk_sel = &clcd_pclk_sel,
- .pclk_sel_shift = CLCD_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* gpt synthesizer masks */
-static struct gpt_clk_masks gpt_masks = {
- .mscale_sel_mask = GPT_MSCALE_MASK,
- .mscale_sel_shift = GPT_MSCALE_SHIFT,
- .nscale_sel_mask = GPT_NSCALE_MASK,
- .nscale_sel_shift = GPT_NSCALE_SHIFT,
-};
-
-/* gpt rate configuration table, in ascending order of rates */
-struct gpt_rate_tbl gpt_rtbl[] = {
- /* For pll1 = 332 MHz */
- {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
- {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
- {.mscale = 1, .nscale = 0}, /* 83 MHz */
-};
-
-/* gpt0 synth clk config*/
-static struct gpt_clk_config gpt0_synth_config = {
- .synth_reg = PRSC1_CLK_CFG,
- .masks = &gpt_masks,
-};
-
-/* gpt synth clock */
-static struct clk gpt0_synth_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .calc_rate = &gpt_calc_rate,
- .recalc = &gpt_clk_recalc,
- .set_rate = &gpt_clk_set_rate,
- .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
- .private_data = &gpt0_synth_config,
-};
-
-/* gpt parents */
-static struct pclk_info gpt0_pclk_info[] = {
- {
- .pclk = &gpt0_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt0_pclk_sel = {
- .pclk_info = gpt0_pclk_info,
- .pclk_count = ARRAY_SIZE(gpt0_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt0 ARM1 subsystem timer clock */
-static struct clk gpt0_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk_sel = &gpt0_pclk_sel,
- .pclk_sel_shift = GPT0_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-
-/* Note: gpt0 and gpt1 share same parent clocks */
-/* gpt parent select structure */
-static struct pclk_sel gpt1_pclk_sel = {
- .pclk_info = gpt0_pclk_info,
- .pclk_count = ARRAY_SIZE(gpt0_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt1 timer clock */
-static struct clk gpt1_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk_sel = &gpt1_pclk_sel,
- .pclk_sel_shift = GPT1_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* gpt2 synth clk config*/
-static struct gpt_clk_config gpt2_synth_config = {
- .synth_reg = PRSC2_CLK_CFG,
- .masks = &gpt_masks,
-};
-
-/* gpt synth clock */
-static struct clk gpt2_synth_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .calc_rate = &gpt_calc_rate,
- .recalc = &gpt_clk_recalc,
- .set_rate = &gpt_clk_set_rate,
- .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
- .private_data = &gpt2_synth_config,
-};
-
-/* gpt parents */
-static struct pclk_info gpt2_pclk_info[] = {
- {
- .pclk = &gpt2_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt2_pclk_sel = {
- .pclk_info = gpt2_pclk_info,
- .pclk_count = ARRAY_SIZE(gpt2_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt2 timer clock */
-static struct clk gpt2_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk_sel = &gpt2_pclk_sel,
- .pclk_sel_shift = GPT2_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* gpt3 synth clk config*/
-static struct gpt_clk_config gpt3_synth_config = {
- .synth_reg = PRSC3_CLK_CFG,
- .masks = &gpt_masks,
-};
-
-/* gpt synth clock */
-static struct clk gpt3_synth_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .calc_rate = &gpt_calc_rate,
- .recalc = &gpt_clk_recalc,
- .set_rate = &gpt_clk_set_rate,
- .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
- .private_data = &gpt3_synth_config,
-};
-
-/* gpt parents */
-static struct pclk_info gpt3_pclk_info[] = {
- {
- .pclk = &gpt3_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt3_pclk_sel = {
- .pclk_info = gpt3_pclk_info,
- .pclk_count = ARRAY_SIZE(gpt3_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt3 timer clock */
-static struct clk gpt3_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk_sel = &gpt3_pclk_sel,
- .pclk_sel_shift = GPT3_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* clock derived from pll3 clk */
-/* usbh0 clock */
-static struct clk usbh0_clk = {
- .pclk = &pll3_48m_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = USBH0_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* usbh1 clock */
-static struct clk usbh1_clk = {
- .pclk = &pll3_48m_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = USBH1_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* usbd clock */
-static struct clk usbd_clk = {
- .pclk = &pll3_48m_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = USBD_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* clock derived from ahb clk */
-/* apb masks structure */
-static struct bus_clk_masks apb_masks = {
- .mask = HCLK_PCLK_RATIO_MASK,
- .shift = HCLK_PCLK_RATIO_SHIFT,
-};
-
-/* apb configuration structure */
-static struct bus_clk_config apb_config = {
- .reg = CORE_CLK_CFG,
- .masks = &apb_masks,
-};
-
-/* apb clock */
-static struct clk apb_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &ahb_clk,
- .calc_rate = &bus_calc_rate,
- .recalc = &bus_clk_recalc,
- .set_rate = &bus_clk_set_rate,
- .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
- .private_data = &apb_config,
-};
-
-/* i2c clock */
-static struct clk i2c_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = I2C_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* dma clock */
-static struct clk dma_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = DMA_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* jpeg clock */
-static struct clk jpeg_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = JPEG_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* gmac clock */
-static struct clk gmac_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = GMAC_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* smi clock */
-static struct clk smi_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = SMI_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* fsmc clock */
-static struct clk fsmc_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = FSMC_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* clock derived from apb clk */
-/* adc clock */
-static struct clk adc_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = ADC_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* ssp0 clock */
-static struct clk ssp0_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = SSP0_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* ssp1 clock */
-static struct clk ssp1_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = SSP1_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* ssp2 clock */
-static struct clk ssp2_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = SSP2_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* gpio0 ARM subsystem clock */
-static struct clk gpio0_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* gpio1 clock */
-static struct clk gpio1_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = GPIO1_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* gpio2 clock */
-static struct clk gpio2_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = GPIO2_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-static struct clk dummy_apb_pclk;
-
-/* array of all spear 6xx clock lookups */
-static struct clk_lookup spear_clk_lookups[] = {
- CLKDEV_INIT(NULL, "apb_pclk", &dummy_apb_pclk),
- /* root clks */
- CLKDEV_INIT(NULL, "osc_32k_clk", &osc_32k_clk),
- CLKDEV_INIT(NULL, "osc_30m_clk", &osc_30m_clk),
- /* clock derived from 32 KHz os clk */
- CLKDEV_INIT("rtc-spear", NULL, &rtc_clk),
- /* clock derived from 30 MHz os clk */
- CLKDEV_INIT(NULL, "pll1_clk", &pll1_clk),
- CLKDEV_INIT(NULL, "pll3_48m_clk", &pll3_48m_clk),
- CLKDEV_INIT("wdt", NULL, &wdt_clk),
- /* clock derived from pll1 clk */
- CLKDEV_INIT(NULL, "cpu_clk", &cpu_clk),
- CLKDEV_INIT(NULL, "ahb_clk", &ahb_clk),
- CLKDEV_INIT(NULL, "uart_synth_clk", &uart_synth_clk),
- CLKDEV_INIT(NULL, "firda_synth_clk", &firda_synth_clk),
- CLKDEV_INIT(NULL, "clcd_synth_clk", &clcd_synth_clk),
- CLKDEV_INIT(NULL, "gpt0_synth_clk", &gpt0_synth_clk),
- CLKDEV_INIT(NULL, "gpt2_synth_clk", &gpt2_synth_clk),
- CLKDEV_INIT(NULL, "gpt3_synth_clk", &gpt3_synth_clk),
- CLKDEV_INIT("d0000000.serial", NULL, &uart0_clk),
- CLKDEV_INIT("d0080000.serial", NULL, &uart1_clk),
- CLKDEV_INIT("firda", NULL, &firda_clk),
- CLKDEV_INIT("clcd", NULL, &clcd_clk),
- CLKDEV_INIT("gpt0", NULL, &gpt0_clk),
- CLKDEV_INIT("gpt1", NULL, &gpt1_clk),
- CLKDEV_INIT("gpt2", NULL, &gpt2_clk),
- CLKDEV_INIT("gpt3", NULL, &gpt3_clk),
- /* clock derived from pll3 clk */
- CLKDEV_INIT("designware_udc", NULL, &usbd_clk),
- CLKDEV_INIT(NULL, "usbh.0_clk", &usbh0_clk),
- CLKDEV_INIT(NULL, "usbh.1_clk", &usbh1_clk),
- /* clock derived from ahb clk */
- CLKDEV_INIT(NULL, "apb_clk", &apb_clk),
- CLKDEV_INIT("d0200000.i2c", NULL, &i2c_clk),
- CLKDEV_INIT("fc400000.dma", NULL, &dma_clk),
- CLKDEV_INIT("jpeg", NULL, &jpeg_clk),
- CLKDEV_INIT("gmac", NULL, &gmac_clk),
- CLKDEV_INIT("fc000000.flash", NULL, &smi_clk),
- CLKDEV_INIT("d1800000.flash", NULL, &fsmc_clk),
- /* clock derived from apb clk */
- CLKDEV_INIT("adc", NULL, &adc_clk),
- CLKDEV_INIT("ssp-pl022.0", NULL, &ssp0_clk),
- CLKDEV_INIT("ssp-pl022.1", NULL, &ssp1_clk),
- CLKDEV_INIT("ssp-pl022.2", NULL, &ssp2_clk),
- CLKDEV_INIT("f0100000.gpio", NULL, &gpio0_clk),
- CLKDEV_INIT("fc980000.gpio", NULL, &gpio1_clk),
- CLKDEV_INIT("d8100000.gpio", NULL, &gpio2_clk),
-};
-
-void __init spear6xx_clk_init(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
- clk_register(&spear_clk_lookups[i]);
-
- clk_init();
-}
diff --git a/arch/arm/mach-spear6xx/include/mach/generic.h b/arch/arm/mach-spear6xx/include/mach/generic.h
index 7167fd331d86..65514b159370 100644
--- a/arch/arm/mach-spear6xx/include/mach/generic.h
+++ b/arch/arm/mach-spear6xx/include/mach/generic.h
@@ -16,7 +16,7 @@
#include <linux/init.h>
-void __init spear_setup_timer(resource_size_t base, int irq);
+void __init spear_setup_of_timer(void);
void spear_restart(char, const char *);
void __init spear6xx_clk_init(void);
diff --git a/arch/arm/mach-spear6xx/include/mach/irqs.h b/arch/arm/mach-spear6xx/include/mach/irqs.h
index 2b735389e74b..37a5c411a866 100644
--- a/arch/arm/mach-spear6xx/include/mach/irqs.h
+++ b/arch/arm/mach-spear6xx/include/mach/irqs.h
@@ -16,9 +16,6 @@
/* IRQ definitions */
/* VIC 1 */
-/* FIXME: probe this from DT */
-#define IRQ_CPU_GPT1_1 16
-
#define IRQ_VIC_END 64
/* GPIO pins virtual irqs */
diff --git a/arch/arm/mach-spear6xx/include/mach/misc_regs.h b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
index 2b9aaa6cdd11..179e45774b3a 100644
--- a/arch/arm/mach-spear6xx/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
@@ -14,6 +14,8 @@
#ifndef __MACH_MISC_REGS_H
#define __MACH_MISC_REGS_H
+#include <mach/spear.h>
+
#define MISC_BASE IOMEM(VA_SPEAR6XX_ICM3_MISC_REG_BASE)
#define DMA_CHN_CFG (MISC_BASE + 0x0A0)
diff --git a/arch/arm/mach-spear6xx/include/mach/spear.h b/arch/arm/mach-spear6xx/include/mach/spear.h
index d278ed047a53..cb8ed2f4dc85 100644
--- a/arch/arm/mach-spear6xx/include/mach/spear.h
+++ b/arch/arm/mach-spear6xx/include/mach/spear.h
@@ -25,7 +25,6 @@
/* ML-1, 2 - Multi Layer CPU Subsystem */
#define SPEAR6XX_ML_CPU_BASE UL(0xF0000000)
#define VA_SPEAR6XX_ML_CPU_BASE UL(0xF0000000)
-#define SPEAR6XX_CPU_TMR_BASE UL(0xF0000000)
/* ICM3 - Basic Subsystem */
#define SPEAR6XX_ICM3_SMI_CTRL_BASE UL(0xFC000000)
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c
index de194dbb8371..2e2e3596583e 100644
--- a/arch/arm/mach-spear6xx/spear6xx.c
+++ b/arch/arm/mach-spear6xx/spear6xx.c
@@ -419,9 +419,6 @@ struct map_desc spear6xx_io_desc[] __initdata = {
void __init spear6xx_map_io(void)
{
iotable_init(spear6xx_io_desc, ARRAY_SIZE(spear6xx_io_desc));
-
- /* This will initialize clock framework */
- spear6xx_clk_init();
}
static void __init spear6xx_timer_init(void)
@@ -429,6 +426,8 @@ static void __init spear6xx_timer_init(void)
char pclk_name[] = "pll3_48m_clk";
struct clk *gpt_clk, *pclk;
+ spear6xx_clk_init();
+
/* get the system timer clock */
gpt_clk = clk_get_sys("gpt0", NULL);
if (IS_ERR(gpt_clk)) {
@@ -448,7 +447,7 @@ static void __init spear6xx_timer_init(void)
clk_put(gpt_clk);
clk_put(pclk);
- spear_setup_timer(SPEAR6XX_CPU_TMR_BASE, IRQ_CPU_GPT1_1);
+ spear_setup_of_timer();
}
struct sys_timer spear6xx_timer = {
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index d0f2546706ca..6a113a9bb87a 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -50,6 +50,14 @@ config TEGRA_PCI
depends on ARCH_TEGRA_2x_SOC
select PCI
+config TEGRA_AHB
+ bool "Enable AHB driver for NVIDIA Tegra SoCs"
+ default y
+ help
+ Adds AHB configuration functionality for NVIDIA Tegra SoCs,
+ which controls AHB bus master arbitration and some
+ perfomance parameters(priority, prefech size).
+
comment "Tegra board type"
config MACH_HARMONY
@@ -111,7 +119,7 @@ config MACH_VENTANA
Support for the nVidia Ventana development platform
choice
- prompt "Low-level debug console UART"
+ prompt "Default low-level debug console UART"
default TEGRA_DEBUG_UART_NONE
config TEGRA_DEBUG_UART_NONE
@@ -134,6 +142,33 @@ config TEGRA_DEBUG_UARTE
endchoice
+choice
+ prompt "Automatic low-level debug console UART"
+ default TEGRA_DEBUG_UART_AUTO_NONE
+
+config TEGRA_DEBUG_UART_AUTO_NONE
+ bool "None"
+
+config TEGRA_DEBUG_UART_AUTO_ODMDATA
+ bool "Via ODMDATA"
+ help
+ Automatically determines which UART to use for low-level debug based
+ on the ODMDATA value. This value is part of the BCT, and is written
+ to the boot memory device using nvflash, or other flashing tool.
+ When bits 19:18 are 3, then bits 17:15 indicate which UART to use;
+ 0/1/2/3/4 are UART A/B/C/D/E.
+
+config TEGRA_DEBUG_UART_AUTO_SCRATCH
+ bool "Via UART scratch register"
+ help
+ Automatically determines which UART to use for low-level debug based
+ on the UART scratch register value. Some bootloaders put ASCII 'D'
+ in this register when they initialize their own console UART output.
+ Using this option allows the kernel to automatically pick the same
+ UART.
+
+endchoice
+
config TEGRA_SYSTEM_DMA
bool "Enable system DMA driver for NVIDIA Tegra SoCs"
default y
diff --git a/arch/arm/mach-tegra/board-dt-tegra20.c b/arch/arm/mach-tegra/board-dt-tegra20.c
index fac3eb1af17e..eb7249db50a5 100644
--- a/arch/arm/mach-tegra/board-dt-tegra20.c
+++ b/arch/arm/mach-tegra/board-dt-tegra20.c
@@ -110,6 +110,7 @@ DT_MACHINE_START(TEGRA_DT, "nVidia Tegra20 (Flattened Device Tree)")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_dt_init,
+ .init_late = tegra_init_late,
.restart = tegra_assert_system_reset,
.dt_compat = tegra20_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board-dt-tegra30.c b/arch/arm/mach-tegra/board-dt-tegra30.c
index 5f7c03e972f3..4f76fa7a5da3 100644
--- a/arch/arm/mach-tegra/board-dt-tegra30.c
+++ b/arch/arm/mach-tegra/board-dt-tegra30.c
@@ -51,12 +51,22 @@ struct of_dev_auxdata tegra30_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000C500, "tegra-i2c.2", NULL),
OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000C700, "tegra-i2c.3", NULL),
OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000D000, "tegra-i2c.4", NULL),
+ OF_DEV_AUXDATA("nvidia,tegra30-ahub", 0x70080000, "tegra30-ahub", NULL),
{}
};
static __initdata struct tegra_clk_init_table tegra_dt_clk_init_table[] = {
/* name parent rate enabled */
{ "uarta", "pll_p", 408000000, true },
+ { "pll_a", "pll_p_out1", 564480000, true },
+ { "pll_a_out0", "pll_a", 11289600, true },
+ { "extern1", "pll_a_out0", 0, true },
+ { "clk_out_1", "extern1", 0, true },
+ { "i2s0", "pll_a_out0", 11289600, false},
+ { "i2s1", "pll_a_out0", 11289600, false},
+ { "i2s2", "pll_a_out0", 11289600, false},
+ { "i2s3", "pll_a_out0", 11289600, false},
+ { "i2s4", "pll_a_out0", 11289600, false},
{ NULL, NULL, 0, 0},
};
@@ -80,6 +90,7 @@ DT_MACHINE_START(TEGRA30_DT, "NVIDIA Tegra30 (Flattened Device Tree)")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra30_dt_init,
+ .init_late = tegra_init_late,
.restart = tegra_assert_system_reset,
.dt_compat = tegra30_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board-harmony.c b/arch/arm/mach-tegra/board-harmony.c
index b906b3b6077b..e65e837f4013 100644
--- a/arch/arm/mach-tegra/board-harmony.c
+++ b/arch/arm/mach-tegra/board-harmony.c
@@ -192,5 +192,6 @@ MACHINE_START(HARMONY, "harmony")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_harmony_init,
+ .init_late = tegra_init_late,
.restart = tegra_assert_system_reset,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
index d0735c70d688..bbc1907e98a6 100644
--- a/arch/arm/mach-tegra/board-paz00.c
+++ b/arch/arm/mach-tegra/board-paz00.c
@@ -162,6 +162,8 @@ static void paz00_i2c_init(void)
static void paz00_usb_init(void)
{
+ tegra_ehci2_ulpi_phy_config.reset_gpio = TEGRA_ULPI_RST;
+
platform_device_register(&tegra_ehci2_device);
platform_device_register(&tegra_ehci3_device);
}
@@ -179,7 +181,6 @@ static __initdata struct tegra_clk_init_table paz00_clk_init_table[] = {
{ "uarta", "pll_p", 216000000, true },
{ "uartc", "pll_p", 216000000, true },
- { "pll_p_out4", "pll_p", 24000000, true },
{ "usbd", "clk_m", 12000000, false },
{ "usb2", "clk_m", 12000000, false },
{ "usb3", "clk_m", 12000000, false },
@@ -224,5 +225,6 @@ MACHINE_START(PAZ00, "Toshiba AC100 / Dynabook AZ")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_paz00_init,
+ .init_late = tegra_init_late,
.restart = tegra_assert_system_reset,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board-seaboard.c b/arch/arm/mach-tegra/board-seaboard.c
index 79064c7a7907..71e9f3fc7fba 100644
--- a/arch/arm/mach-tegra/board-seaboard.c
+++ b/arch/arm/mach-tegra/board-seaboard.c
@@ -277,6 +277,7 @@ MACHINE_START(SEABOARD, "seaboard")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_seaboard_init,
+ .init_late = tegra_init_late,
.restart = tegra_assert_system_reset,
MACHINE_END
@@ -288,6 +289,7 @@ MACHINE_START(KAEN, "kaen")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_kaen_init,
+ .init_late = tegra_init_late,
.restart = tegra_assert_system_reset,
MACHINE_END
@@ -299,5 +301,6 @@ MACHINE_START(WARIO, "wario")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_wario_init,
+ .init_late = tegra_init_late,
.restart = tegra_assert_system_reset,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board-trimslice.c b/arch/arm/mach-tegra/board-trimslice.c
index bc59b379c6fe..776aa9564d5d 100644
--- a/arch/arm/mach-tegra/board-trimslice.c
+++ b/arch/arm/mach-tegra/board-trimslice.c
@@ -118,6 +118,8 @@ static void trimslice_usb_init(void)
pdata = tegra_ehci1_device.dev.platform_data;
pdata->vbus_gpio = TRIMSLICE_GPIO_USB1_MODE;
+ tegra_ehci2_ulpi_phy_config.reset_gpio = TEGRA_GPIO_PV0;
+
platform_device_register(&tegra_ehci3_device);
platform_device_register(&tegra_ehci2_device);
platform_device_register(&tegra_ehci1_device);
@@ -176,5 +178,6 @@ MACHINE_START(TRIMSLICE, "trimslice")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_trimslice_init,
+ .init_late = tegra_init_late,
.restart = tegra_assert_system_reset,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board.h b/arch/arm/mach-tegra/board.h
index 75d1543d77c0..65014968fc6c 100644
--- a/arch/arm/mach-tegra/board.h
+++ b/arch/arm/mach-tegra/board.h
@@ -32,5 +32,19 @@ void __init tegra_init_irq(void);
void __init tegra_dt_init_irq(void);
int __init tegra_pcie_init(bool init_port0, bool init_port1);
+void tegra_init_late(void);
+
+#ifdef CONFIG_DEBUG_FS
+int tegra_clk_debugfs_init(void);
+#else
+static inline int tegra_clk_debugfs_init(void) { return 0; }
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC) && defined(CONFIG_DEBUG_FS)
+int __init tegra_powergate_debugfs_init(void);
+#else
+static inline int tegra_powergate_debugfs_init(void) { return 0; }
+#endif
+
extern struct sys_timer tegra_timer;
#endif
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c
index 8dad8d18cb49..58f981c0819c 100644
--- a/arch/arm/mach-tegra/clock.c
+++ b/arch/arm/mach-tegra/clock.c
@@ -642,7 +642,7 @@ static int clk_debugfs_register(struct clk *c)
return 0;
}
-static int __init clk_debugfs_init(void)
+int __init tegra_clk_debugfs_init(void)
{
struct clk *c;
struct dentry *d;
@@ -669,5 +669,4 @@ err_out:
return err;
}
-late_initcall(clk_debugfs_init);
#endif
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index 22df10fb9972..204a5c8b0b57 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -82,10 +82,12 @@ static __initdata struct tegra_clk_init_table tegra20_clk_init_table[] = {
{ "pll_p_out1", "pll_p", 28800000, true },
{ "pll_p_out2", "pll_p", 48000000, true },
{ "pll_p_out3", "pll_p", 72000000, true },
- { "pll_p_out4", "pll_p", 108000000, true },
- { "sclk", "pll_p_out4", 108000000, true },
- { "hclk", "sclk", 108000000, true },
- { "pclk", "hclk", 54000000, true },
+ { "pll_p_out4", "pll_p", 24000000, true },
+ { "pll_c", "clk_m", 600000000, true },
+ { "pll_c_out1", "pll_c", 120000000, true },
+ { "sclk", "pll_c_out1", 120000000, true },
+ { "hclk", "sclk", 120000000, true },
+ { "pclk", "hclk", 60000000, true },
{ "csite", NULL, 0, true },
{ "emc", NULL, 0, true },
{ "cpu", NULL, 0, true },
@@ -93,6 +95,17 @@ static __initdata struct tegra_clk_init_table tegra20_clk_init_table[] = {
};
#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+static __initdata struct tegra_clk_init_table tegra30_clk_init_table[] = {
+ /* name parent rate enabled */
+ { "clk_m", NULL, 0, true },
+ { "pll_p", "clk_m", 408000000, true },
+ { "pll_p_out1", "pll_p", 9600000, true },
+ { NULL, NULL, 0, 0},
+};
+#endif
+
+
static void __init tegra_init_cache(u32 tag_latency, u32 data_latency)
{
#ifdef CONFIG_CACHE_L2X0
@@ -127,8 +140,15 @@ void __init tegra30_init_early(void)
{
tegra_init_fuse();
tegra30_init_clocks();
+ tegra_clk_init_from_table(tegra30_clk_init_table);
tegra_init_cache(0x441, 0x551);
tegra_pmc_init();
tegra_powergate_init();
}
#endif
+
+void __init tegra_init_late(void)
+{
+ tegra_clk_debugfs_init();
+ tegra_powergate_debugfs_init();
+}
diff --git a/arch/arm/mach-tegra/devices.c b/arch/arm/mach-tegra/devices.c
index 2d8dfa2faf8f..c70e65ffa36b 100644
--- a/arch/arm/mach-tegra/devices.c
+++ b/arch/arm/mach-tegra/devices.c
@@ -439,9 +439,8 @@ static struct resource tegra_usb3_resources[] = {
},
};
-static struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config = {
- /* All existing boards use GPIO PV0 for phy reset */
- .reset_gpio = TEGRA_GPIO_PV0,
+struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config = {
+ .reset_gpio = -1,
.clk = "cdev2",
};
diff --git a/arch/arm/mach-tegra/devices.h b/arch/arm/mach-tegra/devices.h
index 138c642e59f4..4f5052726495 100644
--- a/arch/arm/mach-tegra/devices.h
+++ b/arch/arm/mach-tegra/devices.h
@@ -22,6 +22,10 @@
#include <linux/platform_device.h>
#include <linux/platform_data/tegra_usb.h>
+#include <mach/usb_phy.h>
+
+extern struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config;
+
extern struct tegra_ehci_platform_data tegra_ehci1_pdata;
extern struct tegra_ehci_platform_data tegra_ehci2_pdata;
extern struct tegra_ehci_platform_data tegra_ehci3_pdata;
diff --git a/arch/arm/mach-tegra/include/mach/tegra-ahb.h b/arch/arm/mach-tegra/include/mach/tegra-ahb.h
new file mode 100644
index 000000000000..e0f8c84b1d8c
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/tegra-ahb.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MACH_TEGRA_AHB_H__
+#define __MACH_TEGRA_AHB_H__
+
+extern int tegra_ahb_enable_smmu(struct device_node *ahb);
+
+#endif /* __MACH_TEGRA_AHB_H__ */
diff --git a/arch/arm/mach-tegra/include/mach/uncompress.h b/arch/arm/mach-tegra/include/mach/uncompress.h
index 5a440f315e57..937c4c50219e 100644
--- a/arch/arm/mach-tegra/include/mach/uncompress.h
+++ b/arch/arm/mach-tegra/include/mach/uncompress.h
@@ -63,52 +63,86 @@ static inline void save_uart_address(void)
buf[0] = 0;
}
-/*
- * Setup before decompression. This is where we do UART selection for
- * earlyprintk and init the uart_base register.
- */
-static inline void arch_decomp_setup(void)
+static const struct {
+ u32 base;
+ u32 reset_reg;
+ u32 clock_reg;
+ u32 bit;
+} uarts[] = {
+ {
+ TEGRA_UARTA_BASE,
+ TEGRA_CLK_RESET_BASE + 0x04,
+ TEGRA_CLK_RESET_BASE + 0x10,
+ 6,
+ },
+ {
+ TEGRA_UARTB_BASE,
+ TEGRA_CLK_RESET_BASE + 0x04,
+ TEGRA_CLK_RESET_BASE + 0x10,
+ 7,
+ },
+ {
+ TEGRA_UARTC_BASE,
+ TEGRA_CLK_RESET_BASE + 0x08,
+ TEGRA_CLK_RESET_BASE + 0x14,
+ 23,
+ },
+ {
+ TEGRA_UARTD_BASE,
+ TEGRA_CLK_RESET_BASE + 0x0c,
+ TEGRA_CLK_RESET_BASE + 0x18,
+ 1,
+ },
+ {
+ TEGRA_UARTE_BASE,
+ TEGRA_CLK_RESET_BASE + 0x0c,
+ TEGRA_CLK_RESET_BASE + 0x18,
+ 2,
+ },
+};
+
+static inline bool uart_clocked(int i)
+{
+ if (*(u8 *)uarts[i].reset_reg & BIT(uarts[i].bit))
+ return false;
+
+ if (!(*(u8 *)uarts[i].clock_reg & BIT(uarts[i].bit)))
+ return false;
+
+ return true;
+}
+
+#ifdef CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA
+int auto_odmdata(void)
+{
+ volatile u32 *pmc = (volatile u32 *)TEGRA_PMC_BASE;
+ u32 odmdata = pmc[0xa0 / 4];
+
+ /*
+ * Bits 19:18 are the console type: 0=default, 1=none, 2==DCC, 3==UART
+ * Some boards apparently swap the last two values, but we don't have
+ * any way of catering for that here, so we just accept either. If this
+ * doesn't make sense for your board, just don't enable this feature.
+ *
+ * Bits 17:15 indicate the UART to use, 0/1/2/3/4 are UART A/B/C/D/E.
+ */
+
+ switch ((odmdata >> 18) & 3) {
+ case 2:
+ case 3:
+ break;
+ default:
+ return -1;
+ }
+
+ return (odmdata >> 15) & 7;
+}
+#endif
+
+#ifdef CONFIG_TEGRA_DEBUG_UART_AUTO_SCRATCH
+int auto_scratch(void)
{
- static const struct {
- u32 base;
- u32 reset_reg;
- u32 clock_reg;
- u32 bit;
- } uarts[] = {
- {
- TEGRA_UARTA_BASE,
- TEGRA_CLK_RESET_BASE + 0x04,
- TEGRA_CLK_RESET_BASE + 0x10,
- 6,
- },
- {
- TEGRA_UARTB_BASE,
- TEGRA_CLK_RESET_BASE + 0x04,
- TEGRA_CLK_RESET_BASE + 0x10,
- 7,
- },
- {
- TEGRA_UARTC_BASE,
- TEGRA_CLK_RESET_BASE + 0x08,
- TEGRA_CLK_RESET_BASE + 0x14,
- 23,
- },
- {
- TEGRA_UARTD_BASE,
- TEGRA_CLK_RESET_BASE + 0x0c,
- TEGRA_CLK_RESET_BASE + 0x18,
- 1,
- },
- {
- TEGRA_UARTE_BASE,
- TEGRA_CLK_RESET_BASE + 0x0c,
- TEGRA_CLK_RESET_BASE + 0x18,
- 2,
- },
- };
int i;
- volatile u32 *apb_misc = (volatile u32 *)TEGRA_APB_MISC_BASE;
- u32 chip, div;
/*
* Look for the first UART that:
@@ -125,20 +159,60 @@ static inline void arch_decomp_setup(void)
* back to what's specified in TEGRA_DEBUG_UART_BASE.
*/
for (i = 0; i < ARRAY_SIZE(uarts); i++) {
- if (*(u8 *)uarts[i].reset_reg & BIT(uarts[i].bit))
- continue;
-
- if (!(*(u8 *)uarts[i].clock_reg & BIT(uarts[i].bit)))
+ if (!uart_clocked(i))
continue;
uart = (volatile u8 *)uarts[i].base;
if (uart[UART_SCR << DEBUG_UART_SHIFT] != 'D')
continue;
- break;
+ return i;
}
- if (i == ARRAY_SIZE(uarts))
- uart = (volatile u8 *)TEGRA_DEBUG_UART_BASE;
+
+ return -1;
+}
+#endif
+
+/*
+ * Setup before decompression. This is where we do UART selection for
+ * earlyprintk and init the uart_base register.
+ */
+static inline void arch_decomp_setup(void)
+{
+ int uart_id, auto_uart_id;
+ volatile u32 *apb_misc = (volatile u32 *)TEGRA_APB_MISC_BASE;
+ u32 chip, div;
+
+#if defined(CONFIG_TEGRA_DEBUG_UARTA)
+ uart_id = 0;
+#elif defined(CONFIG_TEGRA_DEBUG_UARTB)
+ uart_id = 1;
+#elif defined(CONFIG_TEGRA_DEBUG_UARTC)
+ uart_id = 2;
+#elif defined(CONFIG_TEGRA_DEBUG_UARTD)
+ uart_id = 3;
+#elif defined(CONFIG_TEGRA_DEBUG_UARTE)
+ uart_id = 4;
+#else
+ uart_id = -1;
+#endif
+
+#if defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA)
+ auto_uart_id = auto_odmdata();
+#elif defined(CONFIG_TEGRA_DEBUG_UART_AUTO_SCRATCH)
+ auto_uart_id = auto_scratch();
+#else
+ auto_uart_id = -1;
+#endif
+ if (auto_uart_id != -1)
+ uart_id = auto_uart_id;
+
+ if (uart_id < 0 || uart_id >= ARRAY_SIZE(uarts) ||
+ !uart_clocked(uart_id))
+ uart = NULL;
+ else
+ uart = (volatile u8 *)uarts[uart_id].base;
+
save_uart_address();
if (uart == NULL)
return;
diff --git a/arch/arm/mach-tegra/include/mach/usb_phy.h b/arch/arm/mach-tegra/include/mach/usb_phy.h
index de1a0f602b28..935ce9f65590 100644
--- a/arch/arm/mach-tegra/include/mach/usb_phy.h
+++ b/arch/arm/mach-tegra/include/mach/usb_phy.h
@@ -61,8 +61,8 @@ struct tegra_usb_phy {
struct usb_phy *ulpi;
};
-struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
- void *config, enum tegra_usb_phy_mode phy_mode);
+struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
+ void __iomem *regs, void *config, enum tegra_usb_phy_mode phy_mode);
int tegra_usb_phy_power_on(struct tegra_usb_phy *phy);
diff --git a/arch/arm/mach-tegra/powergate.c b/arch/arm/mach-tegra/powergate.c
index c238699ae86f..f5b12fb4ff12 100644
--- a/arch/arm/mach-tegra/powergate.c
+++ b/arch/arm/mach-tegra/powergate.c
@@ -234,7 +234,7 @@ static const struct file_operations powergate_fops = {
.release = single_release,
};
-static int __init powergate_debugfs_init(void)
+int __init tegra_powergate_debugfs_init(void)
{
struct dentry *d;
int err = -ENOMEM;
@@ -247,6 +247,4 @@ static int __init powergate_debugfs_init(void)
return err;
}
-late_initcall(powergate_debugfs_init);
-
#endif
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c
index bae09b859891..b59315ce3691 100644
--- a/arch/arm/mach-tegra/tegra2_clocks.c
+++ b/arch/arm/mach-tegra/tegra2_clocks.c
@@ -1486,6 +1486,10 @@ static struct clk tegra_clk_m = {
};
static struct clk_pll_freq_table tegra_pll_c_freq_table[] = {
+ { 12000000, 600000000, 600, 12, 1, 8 },
+ { 13000000, 600000000, 600, 13, 1, 8 },
+ { 19200000, 600000000, 500, 16, 1, 6 },
+ { 26000000, 600000000, 600, 26, 1, 8 },
{ 0, 0, 0, 0, 0, 0 },
};
diff --git a/arch/arm/mach-tegra/tegra30_clocks.c b/arch/arm/mach-tegra/tegra30_clocks.c
index 6d08b53f92d2..e33fe4b14a2a 100644
--- a/arch/arm/mach-tegra/tegra30_clocks.c
+++ b/arch/arm/mach-tegra/tegra30_clocks.c
@@ -3015,6 +3015,15 @@ struct clk_duplicate tegra_clk_duplicates[] = {
CLK_DUPLICATE("sbc6", "spi_slave_tegra.5", NULL),
CLK_DUPLICATE("twd", "smp_twd", NULL),
CLK_DUPLICATE("vcp", "nvavp", "vcp"),
+ CLK_DUPLICATE("i2s0", NULL, "i2s0"),
+ CLK_DUPLICATE("i2s1", NULL, "i2s1"),
+ CLK_DUPLICATE("i2s2", NULL, "i2s2"),
+ CLK_DUPLICATE("i2s3", NULL, "i2s3"),
+ CLK_DUPLICATE("i2s4", NULL, "i2s4"),
+ CLK_DUPLICATE("dam0", NULL, "dam0"),
+ CLK_DUPLICATE("dam1", NULL, "dam1"),
+ CLK_DUPLICATE("dam2", NULL, "dam2"),
+ CLK_DUPLICATE("spdif_in", NULL, "spdif_in"),
};
struct clk *tegra_ptr_clks[] = {
diff --git a/arch/arm/mach-tegra/usb_phy.c b/arch/arm/mach-tegra/usb_phy.c
index d71d2fed6721..54e353c8e304 100644
--- a/arch/arm/mach-tegra/usb_phy.c
+++ b/arch/arm/mach-tegra/usb_phy.c
@@ -26,6 +26,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
#include <asm/mach-types.h>
@@ -654,8 +655,8 @@ static void ulpi_phy_power_off(struct tegra_usb_phy *phy)
clk_disable(phy->clk);
}
-struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
- void *config, enum tegra_usb_phy_mode phy_mode)
+struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
+ void __iomem *regs, void *config, enum tegra_usb_phy_mode phy_mode)
{
struct tegra_usb_phy *phy;
struct tegra_ulpi_config *ulpi_config;
@@ -711,6 +712,16 @@ struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
err = -ENXIO;
goto err1;
}
+ if (!gpio_is_valid(ulpi_config->reset_gpio))
+ ulpi_config->reset_gpio =
+ of_get_named_gpio(dev->of_node,
+ "nvidia,phy-reset-gpio", 0);
+ if (!gpio_is_valid(ulpi_config->reset_gpio)) {
+ pr_err("%s: invalid reset gpio: %d\n", __func__,
+ ulpi_config->reset_gpio);
+ err = -EINVAL;
+ goto err1;
+ }
gpio_request(ulpi_config->reset_gpio, "ulpi_phy_reset_b");
gpio_direction_output(ulpi_config->reset_gpio, 0);
phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0);
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index f943687acaf0..fba8adea421e 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -206,7 +206,7 @@ static struct resource ab8500_resources[] = {
};
struct platform_device ab8500_device = {
- .name = "ab8500-i2c",
+ .name = "ab8500-core",
.id = 0,
.dev = {
.platform_data = &ab8500_platdata,
@@ -785,6 +785,7 @@ MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
.timer = &ux500_timer,
.handle_irq = gic_handle_irq,
.init_machine = mop500_init_machine,
+ .init_late = ux500_init_late,
MACHINE_END
MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+")
@@ -794,6 +795,7 @@ MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+")
.timer = &ux500_timer,
.handle_irq = gic_handle_irq,
.init_machine = hrefv60_init_machine,
+ .init_late = ux500_init_late,
MACHINE_END
MACHINE_START(SNOWBALL, "Calao Systems Snowball platform")
@@ -804,6 +806,7 @@ MACHINE_START(SNOWBALL, "Calao Systems Snowball platform")
.timer = &ux500_timer,
.handle_irq = gic_handle_irq,
.init_machine = snowball_init_machine,
+ .init_late = ux500_init_late,
MACHINE_END
#ifdef CONFIG_MACH_UX500_DT
@@ -918,6 +921,7 @@ DT_MACHINE_START(U8500_DT, "ST-Ericsson U8500 platform (Device Tree Support)")
.timer = &ux500_timer,
.handle_irq = gic_handle_irq,
.init_machine = u8500_init_machine,
+ .init_late = ux500_init_late,
.dt_compat = u8500_dt_board_compat,
MACHINE_END
#endif
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index 1762c4728f1e..8d73b066a18d 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -635,7 +635,7 @@ static int clk_debugfs_register(struct clk *c)
return 0;
}
-static int __init clk_debugfs_init(void)
+int __init clk_debugfs_init(void)
{
struct clk *c;
struct dentry *d;
@@ -657,7 +657,6 @@ err_out:
return err;
}
-late_initcall(clk_debugfs_init);
#endif /* defined(CONFIG_DEBUG_FS) */
unsigned long clk_smp_twd_rate = 500000000;
@@ -696,12 +695,11 @@ static struct notifier_block clk_twd_cpufreq_nb = {
.notifier_call = clk_twd_cpufreq_transition,
};
-static int clk_init_smp_twd_cpufreq(void)
+int clk_init_smp_twd_cpufreq(void)
{
return cpufreq_register_notifier(&clk_twd_cpufreq_nb,
CPUFREQ_TRANSITION_NOTIFIER);
}
-late_initcall(clk_init_smp_twd_cpufreq);
#endif
diff --git a/arch/arm/mach-ux500/clock.h b/arch/arm/mach-ux500/clock.h
index d776ada08dbf..65d27a13f46d 100644
--- a/arch/arm/mach-ux500/clock.h
+++ b/arch/arm/mach-ux500/clock.h
@@ -150,3 +150,15 @@ struct clk clk_##_name = { \
int __init clk_db8500_ed_fixup(void);
int __init clk_init(void);
+
+#ifdef CONFIG_DEBUG_FS
+int clk_debugfs_init(void);
+#else
+static inline int clk_debugfs_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_CPU_FREQ
+int clk_init_smp_twd_cpufreq(void);
+#else
+static inline int clk_init_smp_twd_cpufreq(void) { return 0; }
+#endif
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index a29a0e3adcf9..e2360e7c770d 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -73,6 +73,12 @@ void __init ux500_init_irq(void)
clk_init();
}
+void __init ux500_init_late(void)
+{
+ clk_debugfs_init();
+ clk_init_smp_twd_cpufreq();
+}
+
static const char * __init ux500_get_machine(void)
{
return kasprintf(GFP_KERNEL, "DB%4x", dbx500_partnumber());
diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
index 4e369f1645ec..8b7ed82a2866 100644
--- a/arch/arm/mach-ux500/include/mach/setup.h
+++ b/arch/arm/mach-ux500/include/mach/setup.h
@@ -20,6 +20,7 @@ extern void __init u8500_map_io(void);
extern struct device * __init u8500_init_devices(void);
extern void __init ux500_init_irq(void);
+extern void __init ux500_init_late(void);
extern struct device *ux500_soc_device_init(const char *soc_id);
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 04dd092211b8..fde26adaef32 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -14,7 +14,6 @@
#include <linux/ata_platform.h>
#include <linux/smsc911x.h>
#include <linux/spinlock.h>
-#include <linux/device.h>
#include <linux/usb/isp1760.h>
#include <linux/clkdev.h>
#include <linux/mtd/physmap.h>
@@ -31,7 +30,6 @@
#include <asm/hardware/gic.h>
#include <asm/hardware/timer-sp.h>
#include <asm/hardware/sp810.h>
-#include <asm/hardware/gic.h>
#include <mach/ct-ca9x4.h>
#include <mach/motherboard.h>
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index db23ae4aaaab..ea6b43154090 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -17,8 +17,12 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
#include <linux/highmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/vmalloc.h>
#include <asm/memory.h>
#include <asm/highmem.h>
@@ -26,9 +30,112 @@
#include <asm/tlbflush.h>
#include <asm/sizes.h>
#include <asm/mach/arch.h>
+#include <asm/dma-iommu.h>
+#include <asm/mach/map.h>
+#include <asm/system_info.h>
+#include <asm/dma-contiguous.h>
#include "mm.h"
+/*
+ * The DMA API is built upon the notion of "buffer ownership". A buffer
+ * is either exclusively owned by the CPU (and therefore may be accessed
+ * by it) or exclusively owned by the DMA device. These helper functions
+ * represent the transitions between these two ownership states.
+ *
+ * Note, however, that on later ARMs, this notion does not work due to
+ * speculative prefetches. We model our approach on the assumption that
+ * the CPU does do speculative prefetches, which means we clean caches
+ * before transfers and delay cache invalidation until transfer completion.
+ *
+ */
+static void __dma_page_cpu_to_dev(struct page *, unsigned long,
+ size_t, enum dma_data_direction);
+static void __dma_page_dev_to_cpu(struct page *, unsigned long,
+ size_t, enum dma_data_direction);
+
+/**
+ * arm_dma_map_page - map a portion of a page for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed. The CPU
+ * can regain ownership by calling dma_unmap_page().
+ */
+static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+ return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+}
+
+/**
+ * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * Unmap a page streaming mode DMA translation. The handle and size
+ * must match what was provided in the previous dma_map_page() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
+ handle & ~PAGE_MASK, size, dir);
+}
+
+static void arm_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned int offset = handle & (PAGE_SIZE - 1);
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned int offset = handle & (PAGE_SIZE - 1);
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
+
+struct dma_map_ops arm_dma_ops = {
+ .alloc = arm_dma_alloc,
+ .free = arm_dma_free,
+ .mmap = arm_dma_mmap,
+ .map_page = arm_dma_map_page,
+ .unmap_page = arm_dma_unmap_page,
+ .map_sg = arm_dma_map_sg,
+ .unmap_sg = arm_dma_unmap_sg,
+ .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
+ .sync_single_for_device = arm_dma_sync_single_for_device,
+ .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_dma_sync_sg_for_device,
+ .set_dma_mask = arm_dma_set_mask,
+};
+EXPORT_SYMBOL(arm_dma_ops);
+
static u64 get_coherent_dma_mask(struct device *dev)
{
u64 mask = (u64)arm_dma_limit;
@@ -56,6 +163,21 @@ static u64 get_coherent_dma_mask(struct device *dev)
return mask;
}
+static void __dma_clear_buffer(struct page *page, size_t size)
+{
+ void *ptr;
+ /*
+ * Ensure that the allocated pages are zeroed, and that any data
+ * lurking in the kernel direct-mapped region is invalidated.
+ */
+ ptr = page_address(page);
+ if (ptr) {
+ memset(ptr, 0, size);
+ dmac_flush_range(ptr, ptr + size);
+ outer_flush_range(__pa(ptr), __pa(ptr) + size);
+ }
+}
+
/*
* Allocate a DMA buffer for 'dev' of size 'size' using the
* specified gfp mask. Note that 'size' must be page aligned.
@@ -64,23 +186,6 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
{
unsigned long order = get_order(size);
struct page *page, *p, *e;
- void *ptr;
- u64 mask = get_coherent_dma_mask(dev);
-
-#ifdef CONFIG_DMA_API_DEBUG
- u64 limit = (mask + 1) & ~mask;
- if (limit && size >= limit) {
- dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
- size, mask);
- return NULL;
- }
-#endif
-
- if (!mask)
- return NULL;
-
- if (mask < 0xffffffffULL)
- gfp |= GFP_DMA;
page = alloc_pages(gfp, order);
if (!page)
@@ -93,14 +198,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
__free_page(p);
- /*
- * Ensure that the allocated pages are zeroed, and that any data
- * lurking in the kernel direct-mapped region is invalidated.
- */
- ptr = page_address(page);
- memset(ptr, 0, size);
- dmac_flush_range(ptr, ptr + size);
- outer_flush_range(__pa(ptr), __pa(ptr) + size);
+ __dma_clear_buffer(page, size);
return page;
}
@@ -170,6 +268,11 @@ static int __init consistent_init(void)
unsigned long base = consistent_base;
unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
+#ifndef CONFIG_ARM_DMA_USE_IOMMU
+ if (cpu_architecture() >= CPU_ARCH_ARMv6)
+ return 0;
+#endif
+
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
if (!consistent_pte) {
pr_err("%s: no memory\n", __func__);
@@ -184,14 +287,14 @@ static int __init consistent_init(void)
pud = pud_alloc(&init_mm, pgd, base);
if (!pud) {
- printk(KERN_ERR "%s: no pud tables\n", __func__);
+ pr_err("%s: no pud tables\n", __func__);
ret = -ENOMEM;
break;
}
pmd = pmd_alloc(&init_mm, pud, base);
if (!pmd) {
- printk(KERN_ERR "%s: no pmd tables\n", __func__);
+ pr_err("%s: no pmd tables\n", __func__);
ret = -ENOMEM;
break;
}
@@ -199,7 +302,7 @@ static int __init consistent_init(void)
pte = pte_alloc_kernel(pmd, base);
if (!pte) {
- printk(KERN_ERR "%s: no pte tables\n", __func__);
+ pr_err("%s: no pte tables\n", __func__);
ret = -ENOMEM;
break;
}
@@ -210,9 +313,101 @@ static int __init consistent_init(void)
return ret;
}
-
core_initcall(consistent_init);
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+ pgprot_t prot, struct page **ret_page);
+
+static struct arm_vmregion_head coherent_head = {
+ .vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
+ .vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
+};
+
+size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+
+static int __init early_coherent_pool(char *p)
+{
+ coherent_pool_size = memparse(p, &p);
+ return 0;
+}
+early_param("coherent_pool", early_coherent_pool);
+
+/*
+ * Initialise the coherent pool for atomic allocations.
+ */
+static int __init coherent_init(void)
+{
+ pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
+ size_t size = coherent_pool_size;
+ struct page *page;
+ void *ptr;
+
+ if (cpu_architecture() < CPU_ARCH_ARMv6)
+ return 0;
+
+ ptr = __alloc_from_contiguous(NULL, size, prot, &page);
+ if (ptr) {
+ coherent_head.vm_start = (unsigned long) ptr;
+ coherent_head.vm_end = (unsigned long) ptr + size;
+ printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
+ (unsigned)size / 1024);
+ return 0;
+ }
+ printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
+ (unsigned)size / 1024);
+ return -ENOMEM;
+}
+/*
+ * CMA is activated by core_initcall, so we must be called after it.
+ */
+postcore_initcall(coherent_init);
+
+struct dma_contig_early_reserve {
+ phys_addr_t base;
+ unsigned long size;
+};
+
+static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
+
+static int dma_mmu_remap_num __initdata;
+
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+ dma_mmu_remap[dma_mmu_remap_num].base = base;
+ dma_mmu_remap[dma_mmu_remap_num].size = size;
+ dma_mmu_remap_num++;
+}
+
+void __init dma_contiguous_remap(void)
+{
+ int i;
+ for (i = 0; i < dma_mmu_remap_num; i++) {
+ phys_addr_t start = dma_mmu_remap[i].base;
+ phys_addr_t end = start + dma_mmu_remap[i].size;
+ struct map_desc map;
+ unsigned long addr;
+
+ if (end > arm_lowmem_limit)
+ end = arm_lowmem_limit;
+ if (start >= end)
+ return;
+
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY_DMA_READY;
+
+ /*
+ * Clear previous low-memory mapping
+ */
+ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
+ addr += PMD_SIZE)
+ pmd_clear(pmd_off_k(addr));
+
+ iotable_init(&map, 1);
+ }
+}
+
static void *
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
const void *caller)
@@ -222,7 +417,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
int bit;
if (!consistent_pte) {
- printk(KERN_ERR "%s: not initialised\n", __func__);
+ pr_err("%s: not initialised\n", __func__);
dump_stack();
return NULL;
}
@@ -249,7 +444,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
pte = consistent_pte[idx] + off;
- c->vm_pages = page;
+ c->priv = page;
do {
BUG_ON(!pte_none(*pte));
@@ -281,14 +476,14 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
if (!c) {
- printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
+ pr_err("%s: trying to free invalid coherent area: %p\n",
__func__, cpu_addr);
dump_stack();
return;
}
if ((c->vm_end - c->vm_start) != size) {
- printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+ pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
__func__, c->vm_end - c->vm_start, size);
dump_stack();
size = c->vm_end - c->vm_start;
@@ -310,8 +505,8 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
}
if (pte_none(pte) || !pte_present(pte))
- printk(KERN_CRIT "%s: bad page in kernel page table\n",
- __func__);
+ pr_crit("%s: bad page in kernel page table\n",
+ __func__);
} while (size -= PAGE_SIZE);
flush_tlb_kernel_range(c->vm_start, c->vm_end);
@@ -319,20 +514,182 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
arm_vmregion_free(&consistent_head, c);
}
+static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ struct page *page = virt_to_page(addr);
+ pgprot_t prot = *(pgprot_t *)data;
+
+ set_pte_ext(pte, mk_pte(page, prot), 0);
+ return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
+{
+ unsigned long start = (unsigned long) page_address(page);
+ unsigned end = start + size;
+
+ apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
+ dsb();
+ flush_tlb_kernel_range(start, end);
+}
+
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
+ pgprot_t prot, struct page **ret_page,
+ const void *caller)
+{
+ struct page *page;
+ void *ptr;
+ page = __dma_alloc_buffer(dev, size, gfp);
+ if (!page)
+ return NULL;
+
+ ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
+ if (!ptr) {
+ __dma_free_buffer(page, size);
+ return NULL;
+ }
+
+ *ret_page = page;
+ return ptr;
+}
+
+static void *__alloc_from_pool(struct device *dev, size_t size,
+ struct page **ret_page, const void *caller)
+{
+ struct arm_vmregion *c;
+ size_t align;
+
+ if (!coherent_head.vm_start) {
+ printk(KERN_ERR "%s: coherent pool not initialised!\n",
+ __func__);
+ dump_stack();
+ return NULL;
+ }
+
+ /*
+ * Align the region allocation - allocations from pool are rather
+ * small, so align them to their order in pages, minimum is a page
+ * size. This helps reduce fragmentation of the DMA space.
+ */
+ align = PAGE_SIZE << get_order(size);
+ c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
+ if (c) {
+ void *ptr = (void *)c->vm_start;
+ struct page *page = virt_to_page(ptr);
+ *ret_page = page;
+ return ptr;
+ }
+ return NULL;
+}
+
+static int __free_from_pool(void *cpu_addr, size_t size)
+{
+ unsigned long start = (unsigned long)cpu_addr;
+ unsigned long end = start + size;
+ struct arm_vmregion *c;
+
+ if (start < coherent_head.vm_start || end > coherent_head.vm_end)
+ return 0;
+
+ c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start);
+
+ if ((c->vm_end - c->vm_start) != size) {
+ printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+ __func__, c->vm_end - c->vm_start, size);
+ dump_stack();
+ size = c->vm_end - c->vm_start;
+ }
+
+ arm_vmregion_free(&coherent_head, c);
+ return 1;
+}
+
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+ pgprot_t prot, struct page **ret_page)
+{
+ unsigned long order = get_order(size);
+ size_t count = size >> PAGE_SHIFT;
+ struct page *page;
+
+ page = dma_alloc_from_contiguous(dev, count, order);
+ if (!page)
+ return NULL;
+
+ __dma_clear_buffer(page, size);
+ __dma_remap(page, size, prot);
+
+ *ret_page = page;
+ return page_address(page);
+}
+
+static void __free_from_contiguous(struct device *dev, struct page *page,
+ size_t size)
+{
+ __dma_remap(page, size, pgprot_kernel);
+ dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+}
+
+static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
+{
+ prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
+ pgprot_writecombine(prot) :
+ pgprot_dmacoherent(prot);
+ return prot;
+}
+
+#define nommu() 0
+
#else /* !CONFIG_MMU */
-#define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page)
-#define __dma_free_remap(addr, size) do { } while (0)
+#define nommu() 1
+
+#define __get_dma_pgprot(attrs, prot) __pgprot(0)
+#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
+#define __alloc_from_pool(dev, size, ret_page, c) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret) NULL
+#define __free_from_pool(cpu_addr, size) 0
+#define __free_from_contiguous(dev, page, size) do { } while (0)
+#define __dma_free_remap(cpu_addr, size) do { } while (0)
#endif /* CONFIG_MMU */
-static void *
-__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
- pgprot_t prot, const void *caller)
+static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
+ struct page **ret_page)
+{
+ struct page *page;
+ page = __dma_alloc_buffer(dev, size, gfp);
+ if (!page)
+ return NULL;
+
+ *ret_page = page;
+ return page_address(page);
+}
+
+
+
+static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, pgprot_t prot, const void *caller)
{
+ u64 mask = get_coherent_dma_mask(dev);
struct page *page;
void *addr;
+#ifdef CONFIG_DMA_API_DEBUG
+ u64 limit = (mask + 1) & ~mask;
+ if (limit && size >= limit) {
+ dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
+ size, mask);
+ return NULL;
+ }
+#endif
+
+ if (!mask)
+ return NULL;
+
+ if (mask < 0xffffffffULL)
+ gfp |= GFP_DMA;
+
/*
* Following is a work-around (a.k.a. hack) to prevent pages
* with __GFP_COMP being passed to split_page() which cannot
@@ -342,22 +699,20 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
*/
gfp &= ~(__GFP_COMP);
- *handle = ~0;
+ *handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
- page = __dma_alloc_buffer(dev, size, gfp);
- if (!page)
- return NULL;
-
- if (!arch_is_coherent())
- addr = __dma_alloc_remap(page, size, gfp, prot, caller);
+ if (arch_is_coherent() || nommu())
+ addr = __alloc_simple_buffer(dev, size, gfp, &page);
+ else if (cpu_architecture() < CPU_ARCH_ARMv6)
+ addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
+ else if (gfp & GFP_ATOMIC)
+ addr = __alloc_from_pool(dev, size, &page, caller);
else
- addr = page_address(page);
+ addr = __alloc_from_contiguous(dev, size, prot, &page);
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
- else
- __dma_free_buffer(page, size);
return addr;
}
@@ -366,138 +721,71 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
* Allocate DMA-coherent memory space and return both the kernel remapped
* virtual and bus address for that space.
*/
-void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
+void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, struct dma_attrs *attrs)
{
+ pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
return memory;
- return __dma_alloc(dev, size, handle, gfp,
- pgprot_dmacoherent(pgprot_kernel),
+ return __dma_alloc(dev, size, handle, gfp, prot,
__builtin_return_address(0));
}
-EXPORT_SYMBOL(dma_alloc_coherent);
/*
- * Allocate a writecombining region, in much the same way as
- * dma_alloc_coherent above.
+ * Create userspace mapping for the DMA-coherent memory.
*/
-void *
-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
-{
- return __dma_alloc(dev, size, handle, gfp,
- pgprot_writecombine(pgprot_kernel),
- __builtin_return_address(0));
-}
-EXPORT_SYMBOL(dma_alloc_writecombine);
-
-static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
+int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
{
int ret = -ENXIO;
#ifdef CONFIG_MMU
- unsigned long user_size, kern_size;
- struct arm_vmregion *c;
+ unsigned long pfn = dma_to_pfn(dev, dma_addr);
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
- user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
- c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
- if (c) {
- unsigned long off = vma->vm_pgoff;
-
- kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
-
- if (off < kern_size &&
- user_size <= (kern_size - off)) {
- ret = remap_pfn_range(vma, vma->vm_start,
- page_to_pfn(c->vm_pages) + off,
- user_size << PAGE_SHIFT,
- vma->vm_page_prot);
- }
- }
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
#endif /* CONFIG_MMU */
return ret;
}
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_coherent);
-
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_writecombine);
-
/*
- * free a page as defined by the above mapping.
- * Must not be called with IRQs disabled.
+ * Free a buffer as defined by the above mapping.
*/
-void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
+void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
{
- WARN_ON(irqs_disabled());
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
return;
size = PAGE_ALIGN(size);
- if (!arch_is_coherent())
+ if (arch_is_coherent() || nommu()) {
+ __dma_free_buffer(page, size);
+ } else if (cpu_architecture() < CPU_ARCH_ARMv6) {
__dma_free_remap(cpu_addr, size);
-
- __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
-}
-EXPORT_SYMBOL(dma_free_coherent);
-
-/*
- * Make an area consistent for devices.
- * Note: Drivers should NOT use this function directly, as it will break
- * platforms with CONFIG_DMABOUNCE.
- * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
- */
-void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- unsigned long paddr;
-
- BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
- dmac_map_area(kaddr, size, dir);
-
- paddr = __pa(kaddr);
- if (dir == DMA_FROM_DEVICE) {
- outer_inv_range(paddr, paddr + size);
+ __dma_free_buffer(page, size);
} else {
- outer_clean_range(paddr, paddr + size);
- }
- /* FIXME: non-speculating: flush on bidirectional mappings? */
-}
-EXPORT_SYMBOL(___dma_single_cpu_to_dev);
-
-void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
- /* FIXME: non-speculating: not required */
- /* don't bother invalidating if DMA to device */
- if (dir != DMA_TO_DEVICE) {
- unsigned long paddr = __pa(kaddr);
- outer_inv_range(paddr, paddr + size);
+ if (__free_from_pool(cpu_addr, size))
+ return;
+ /*
+ * Non-atomic allocations cannot be freed with IRQs disabled
+ */
+ WARN_ON(irqs_disabled());
+ __free_from_contiguous(dev, page, size);
}
-
- dmac_unmap_area(kaddr, size, dir);
}
-EXPORT_SYMBOL(___dma_single_dev_to_cpu);
static void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
@@ -543,7 +831,13 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
} while (left);
}
-void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
+/*
+ * Make an area consistent for devices.
+ * Note: Drivers should NOT use this function directly, as it will break
+ * platforms with CONFIG_DMABOUNCE.
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ */
+static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr;
@@ -558,9 +852,8 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
}
/* FIXME: non-speculating: flush on bidirectional mappings? */
}
-EXPORT_SYMBOL(___dma_page_cpu_to_dev);
-void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
+static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr = page_to_phys(page) + off;
@@ -578,10 +871,9 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
set_bit(PG_dcache_clean, &page->flags);
}
-EXPORT_SYMBOL(___dma_page_dev_to_cpu);
/**
- * dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map
@@ -596,32 +888,32 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu);
* Device ownership issues as mentioned for dma_map_single are the same
* here.
*/
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
+int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i, j;
- BUG_ON(!valid_dma_direction(dir));
-
for_each_sg(sg, s, nents, i) {
- s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
- s->length, dir);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ s->dma_length = s->length;
+#endif
+ s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
+ s->length, dir, attrs);
if (dma_mapping_error(dev, s->dma_address))
goto bad_mapping;
}
- debug_dma_map_sg(dev, sg, nents, nents, dir);
return nents;
bad_mapping:
for_each_sg(sg, s, i, j)
- __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+ ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
return 0;
}
-EXPORT_SYMBOL(dma_map_sg);
/**
- * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to unmap (same as was passed to dma_map_sg)
@@ -630,70 +922,55 @@ EXPORT_SYMBOL(dma_map_sg);
* Unmap a set of streaming mode DMA translations. Again, CPU access
* rules concerning calls here are the same as for dma_unmap_single().
*/
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
+void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
- int i;
- debug_dma_unmap_sg(dev, sg, nents, dir);
+ int i;
for_each_sg(sg, s, nents, i)
- __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+ ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
}
-EXPORT_SYMBOL(dma_unmap_sg);
/**
- * dma_sync_sg_for_cpu
+ * arm_dma_sync_sg_for_cpu
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;
- for_each_sg(sg, s, nents, i) {
- if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
- sg_dma_len(s), dir))
- continue;
-
- __dma_page_dev_to_cpu(sg_page(s), s->offset,
- s->length, dir);
- }
-
- debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+ for_each_sg(sg, s, nents, i)
+ ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
+ dir);
}
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
/**
- * dma_sync_sg_for_device
+ * arm_dma_sync_sg_for_device
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;
- for_each_sg(sg, s, nents, i) {
- if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
- sg_dma_len(s), dir))
- continue;
-
- __dma_page_cpu_to_dev(sg_page(s), s->offset,
- s->length, dir);
- }
-
- debug_dma_sync_sg_for_device(dev, sg, nents, dir);
+ for_each_sg(sg, s, nents, i)
+ ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
+ dir);
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
/*
* Return whether the given device DMA address mask can be supported
@@ -709,18 +986,15 @@ int dma_supported(struct device *dev, u64 mask)
}
EXPORT_SYMBOL(dma_supported);
-int dma_set_mask(struct device *dev, u64 dma_mask)
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
{
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;
-#ifndef CONFIG_DMABOUNCE
*dev->dma_mask = dma_mask;
-#endif
return 0;
}
-EXPORT_SYMBOL(dma_set_mask);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
@@ -733,3 +1007,679 @@ static int __init dma_debug_do_init(void)
return 0;
}
fs_initcall(dma_debug_do_init);
+
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+
+/* IOMMU */
+
+static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
+ size_t size)
+{
+ unsigned int order = get_order(size);
+ unsigned int align = 0;
+ unsigned int count, start;
+ unsigned long flags;
+
+ count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+
+ if (order > mapping->order)
+ align = (1 << (order - mapping->order)) - 1;
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+ count, align);
+ if (start > mapping->bits) {
+ spin_unlock_irqrestore(&mapping->lock, flags);
+ return DMA_ERROR_CODE;
+ }
+
+ bitmap_set(mapping->bitmap, start, count);
+ spin_unlock_irqrestore(&mapping->lock, flags);
+
+ return mapping->base + (start << (mapping->order + PAGE_SHIFT));
+}
+
+static inline void __free_iova(struct dma_iommu_mapping *mapping,
+ dma_addr_t addr, size_t size)
+{
+ unsigned int start = (addr - mapping->base) >>
+ (mapping->order + PAGE_SHIFT);
+ unsigned int count = ((size >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ bitmap_clear(mapping->bitmap, start, count);
+ spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+{
+ struct page **pages;
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i = 0;
+
+ if (array_size <= PAGE_SIZE)
+ pages = kzalloc(array_size, gfp);
+ else
+ pages = vzalloc(array_size);
+ if (!pages)
+ return NULL;
+
+ while (count) {
+ int j, order = __ffs(count);
+
+ pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
+ while (!pages[i] && order)
+ pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
+ if (!pages[i])
+ goto error;
+
+ if (order)
+ split_page(pages[i], order);
+ j = 1 << order;
+ while (--j)
+ pages[i + j] = pages[i] + j;
+
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order);
+ i += 1 << order;
+ count -= 1 << order;
+ }
+
+ return pages;
+error:
+ while (--i)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ if (array_size < PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+ return NULL;
+}
+
+static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
+{
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i;
+ for (i = 0; i < count; i++)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ if (array_size < PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+ return 0;
+}
+
+/*
+ * Create a CPU mapping for a specified pages
+ */
+static void *
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot)
+{
+ struct arm_vmregion *c;
+ size_t align;
+ size_t count = size >> PAGE_SHIFT;
+ int bit;
+
+ if (!consistent_pte[0]) {
+ pr_err("%s: not initialised\n", __func__);
+ dump_stack();
+ return NULL;
+ }
+
+ /*
+ * Align the virtual region allocation - maximum alignment is
+ * a section size, minimum is a page size. This helps reduce
+ * fragmentation of the DMA space, and also prevents allocations
+ * smaller than a section from crossing a section boundary.
+ */
+ bit = fls(size - 1);
+ if (bit > SECTION_SHIFT)
+ bit = SECTION_SHIFT;
+ align = 1 << bit;
+
+ /*
+ * Allocate a virtual address in the consistent mapping region.
+ */
+ c = arm_vmregion_alloc(&consistent_head, align, size,
+ gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL);
+ if (c) {
+ pte_t *pte;
+ int idx = CONSISTENT_PTE_INDEX(c->vm_start);
+ int i = 0;
+ u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
+
+ pte = consistent_pte[idx] + off;
+ c->priv = pages;
+
+ do {
+ BUG_ON(!pte_none(*pte));
+
+ set_pte_ext(pte, mk_pte(pages[i], prot), 0);
+ pte++;
+ off++;
+ i++;
+ if (off >= PTRS_PER_PTE) {
+ off = 0;
+ pte = consistent_pte[++idx];
+ }
+ } while (i < count);
+
+ dsb();
+
+ return (void *)c->vm_start;
+ }
+ return NULL;
+}
+
+/*
+ * Create a mapping in device IO address space for specified pages
+ */
+static dma_addr_t
+__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ dma_addr_t dma_addr, iova;
+ int i, ret = DMA_ERROR_CODE;
+
+ dma_addr = __alloc_iova(mapping, size);
+ if (dma_addr == DMA_ERROR_CODE)
+ return dma_addr;
+
+ iova = dma_addr;
+ for (i = 0; i < count; ) {
+ unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
+ phys_addr_t phys = page_to_phys(pages[i]);
+ unsigned int len, j;
+
+ for (j = i + 1; j < count; j++, next_pfn++)
+ if (page_to_pfn(pages[j]) != next_pfn)
+ break;
+
+ len = (j - i) << PAGE_SHIFT;
+ ret = iommu_map(mapping->domain, iova, phys, len, 0);
+ if (ret < 0)
+ goto fail;
+ iova += len;
+ i = j;
+ }
+ return dma_addr;
+fail:
+ iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
+ __free_iova(mapping, dma_addr, size);
+ return DMA_ERROR_CODE;
+}
+
+static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ /*
+ * add optional in-page offset from iova to size and align
+ * result to page size
+ */
+ size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
+ iova &= PAGE_MASK;
+
+ iommu_unmap(mapping->domain, iova, size);
+ __free_iova(mapping, iova, size);
+ return 0;
+}
+
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+ pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+ struct page **pages;
+ void *addr = NULL;
+
+ *handle = DMA_ERROR_CODE;
+ size = PAGE_ALIGN(size);
+
+ pages = __iommu_alloc_buffer(dev, size, gfp);
+ if (!pages)
+ return NULL;
+
+ *handle = __iommu_create_mapping(dev, pages, size);
+ if (*handle == DMA_ERROR_CODE)
+ goto err_buffer;
+
+ addr = __iommu_alloc_remap(pages, size, gfp, prot);
+ if (!addr)
+ goto err_mapping;
+
+ return addr;
+
+err_mapping:
+ __iommu_remove_mapping(dev, *handle, size);
+err_buffer:
+ __iommu_free_buffer(dev, pages, size);
+ return NULL;
+}
+
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ struct arm_vmregion *c;
+
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+ c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+
+ if (c) {
+ struct page **pages = c->priv;
+
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+ int i = 0;
+
+ do {
+ int ret;
+
+ ret = vm_insert_page(vma, uaddr, pages[i++]);
+ if (ret) {
+ pr_err("Remapping memory, error: %d\n", ret);
+ return ret;
+ }
+
+ uaddr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ } while (usize > 0);
+ }
+ return 0;
+}
+
+/*
+ * free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
+ */
+void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
+{
+ struct arm_vmregion *c;
+ size = PAGE_ALIGN(size);
+
+ c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+ if (c) {
+ struct page **pages = c->priv;
+ __dma_free_remap(cpu_addr, size);
+ __iommu_remove_mapping(dev, handle, size);
+ __iommu_free_buffer(dev, pages, size);
+ }
+}
+
+/*
+ * Map a part of the scatter-gather list into contiguous io address space
+ */
+static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
+ size_t size, dma_addr_t *handle,
+ enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova, iova_base;
+ int ret = 0;
+ unsigned int count;
+ struct scatterlist *s;
+
+ size = PAGE_ALIGN(size);
+ *handle = DMA_ERROR_CODE;
+
+ iova_base = iova = __alloc_iova(mapping, size);
+ if (iova == DMA_ERROR_CODE)
+ return -ENOMEM;
+
+ for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
+ phys_addr_t phys = page_to_phys(sg_page(s));
+ unsigned int len = PAGE_ALIGN(s->offset + s->length);
+
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+
+ ret = iommu_map(mapping->domain, iova, phys, len, 0);
+ if (ret < 0)
+ goto fail;
+ count += len >> PAGE_SHIFT;
+ iova += len;
+ }
+ *handle = iova_base;
+
+ return 0;
+fail:
+ iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
+ __free_iova(mapping, iova_base, size);
+ return ret;
+}
+
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct scatterlist *s = sg, *dma = sg, *start = sg;
+ int i, count = 0;
+ unsigned int offset = s->offset;
+ unsigned int size = s->offset + s->length;
+ unsigned int max = dma_get_max_seg_size(dev);
+
+ for (i = 1; i < nents; i++) {
+ s = sg_next(s);
+
+ s->dma_address = DMA_ERROR_CODE;
+ s->dma_length = 0;
+
+ if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address,
+ dir) < 0)
+ goto bad_mapping;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ size = offset = s->offset;
+ start = s;
+ dma = sg_next(dma);
+ count += 1;
+ }
+ size += s->length;
+ }
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
+ goto bad_mapping;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ return count+1;
+
+bad_mapping:
+ for_each_sg(sg, s, count, i)
+ __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
+ return 0;
+}
+
+/**
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations. Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ if (sg_dma_len(s))
+ __iommu_remove_mapping(dev, sg_dma_address(s),
+ sg_dma_len(s));
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(sg_page(s), s->offset,
+ s->length, dir);
+ }
+}
+
+/**
+ * arm_iommu_sync_sg_for_cpu
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+
+}
+
+/**
+ * arm_iommu_sync_sg_for_device
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+}
+
+
+/**
+ * arm_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t dma_addr;
+ int ret, len = PAGE_ALIGN(size + offset);
+
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+
+ dma_addr = __alloc_iova(mapping, len);
+ if (dma_addr == DMA_ERROR_CODE)
+ return dma_addr;
+
+ ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
+ if (ret < 0)
+ goto fail;
+
+ return dma_addr + offset;
+fail:
+ __free_iova(mapping, dma_addr, len);
+ return DMA_ERROR_CODE;
+}
+
+/**
+ * arm_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ int offset = handle & ~PAGE_MASK;
+ int len = PAGE_ALIGN(size + offset);
+
+ if (!iova)
+ return;
+
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+
+ iommu_unmap(mapping->domain, iova, len);
+ __free_iova(mapping, iova, len);
+}
+
+static void arm_iommu_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ unsigned int offset = handle & ~PAGE_MASK;
+
+ if (!iova)
+ return;
+
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_iommu_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ unsigned int offset = handle & ~PAGE_MASK;
+
+ if (!iova)
+ return;
+
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+struct dma_map_ops iommu_ops = {
+ .alloc = arm_iommu_alloc_attrs,
+ .free = arm_iommu_free_attrs,
+ .mmap = arm_iommu_mmap_attrs,
+
+ .map_page = arm_iommu_map_page,
+ .unmap_page = arm_iommu_unmap_page,
+ .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
+ .sync_single_for_device = arm_iommu_sync_single_for_device,
+
+ .map_sg = arm_iommu_map_sg,
+ .unmap_sg = arm_iommu_unmap_sg,
+ .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_iommu_sync_sg_for_device,
+};
+
+/**
+ * arm_iommu_create_mapping
+ * @bus: pointer to the bus holding the client device (for IOMMU calls)
+ * @base: start address of the valid IO address space
+ * @size: size of the valid IO address space
+ * @order: accuracy of the IO addresses allocations
+ *
+ * Creates a mapping structure which holds information about used/unused
+ * IO address ranges, which is required to perform memory allocation and
+ * mapping with IOMMU aware functions.
+ *
+ * The client device need to be attached to the mapping with
+ * arm_iommu_attach_device function.
+ */
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+ int order)
+{
+ unsigned int count = size >> (PAGE_SHIFT + order);
+ unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+ struct dma_iommu_mapping *mapping;
+ int err = -ENOMEM;
+
+ if (!count)
+ return ERR_PTR(-EINVAL);
+
+ mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+ if (!mapping)
+ goto err;
+
+ mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!mapping->bitmap)
+ goto err2;
+
+ mapping->base = base;
+ mapping->bits = BITS_PER_BYTE * bitmap_size;
+ mapping->order = order;
+ spin_lock_init(&mapping->lock);
+
+ mapping->domain = iommu_domain_alloc(bus);
+ if (!mapping->domain)
+ goto err3;
+
+ kref_init(&mapping->kref);
+ return mapping;
+err3:
+ kfree(mapping->bitmap);
+err2:
+ kfree(mapping);
+err:
+ return ERR_PTR(err);
+}
+
+static void release_iommu_mapping(struct kref *kref)
+{
+ struct dma_iommu_mapping *mapping =
+ container_of(kref, struct dma_iommu_mapping, kref);
+
+ iommu_domain_free(mapping->domain);
+ kfree(mapping->bitmap);
+ kfree(mapping);
+}
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+ if (mapping)
+ kref_put(&mapping->kref, release_iommu_mapping);
+}
+
+/**
+ * arm_iommu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ * arm_iommu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version. More than one client might be attached to
+ * the same io address space mapping.
+ */
+int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping)
+{
+ int err;
+
+ err = iommu_attach_device(mapping->domain, dev);
+ if (err)
+ return err;
+
+ kref_get(&mapping->kref);
+ dev->archdata.mapping = mapping;
+ set_dma_ops(dev, &iommu_ops);
+
+ pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev));
+ return 0;
+}
+
+#endif
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8f5813bbffb5..c21d06c7dd7e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -20,6 +20,7 @@
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
+#include <linux/dma-contiguous.h>
#include <asm/mach-types.h>
#include <asm/memblock.h>
@@ -226,6 +227,17 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
}
#endif
+void __init setup_dma_zone(struct machine_desc *mdesc)
+{
+#ifdef CONFIG_ZONE_DMA
+ if (mdesc->dma_zone_size) {
+ arm_dma_zone_size = mdesc->dma_zone_size;
+ arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
+ } else
+ arm_dma_limit = 0xffffffff;
+#endif
+}
+
static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
unsigned long max_high)
{
@@ -273,12 +285,9 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
* Adjust the sizes according to any special requirements for
* this machine type.
*/
- if (arm_dma_zone_size) {
+ if (arm_dma_zone_size)
arm_adjust_dma_zone(zone_size, zhole_size,
arm_dma_zone_size >> PAGE_SHIFT);
- arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
- } else
- arm_dma_limit = 0xffffffff;
#endif
free_area_init_node(0, zone_size, min, zhole_size);
@@ -364,6 +373,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
if (mdesc->reserve)
mdesc->reserve();
+ /*
+ * reserve memory for DMA contigouos allocations,
+ * must come from DMA area inside low memory
+ */
+ dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
+
arm_memblock_steal_permitted = false;
memblock_allow_resize();
memblock_dump_all();
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 27f4a619b35d..93dc0c17cdcb 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -67,5 +67,8 @@ extern u32 arm_dma_limit;
#define arm_dma_limit ((u32)~0)
#endif
+extern phys_addr_t arm_lowmem_limit;
+
void __init bootmem_init(void);
void arm_mm_memblock_reserve(void);
+void dma_contiguous_remap(void);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index aa78de8bfdd3..e5dad60b558b 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -288,6 +288,11 @@ static struct mem_type mem_types[] = {
PMD_SECT_UNCACHED | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
+ [MT_MEMORY_DMA_READY] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .domain = DOMAIN_KERNEL,
+ },
};
const struct mem_type *get_mem_type(unsigned int type)
@@ -429,6 +434,7 @@ static void __init build_mem_type_table(void)
if (arch_is_coherent() && cpu_is_xsc3()) {
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
}
@@ -460,6 +466,7 @@ static void __init build_mem_type_table(void)
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
}
@@ -512,6 +519,7 @@ static void __init build_mem_type_table(void)
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd;
@@ -596,7 +604,7 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
* L1 entries, whereas PGDs refer to a group of L1 entries making
* up one logical pointer to an L2 table.
*/
- if (((addr | end | phys) & ~SECTION_MASK) == 0) {
+ if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
pmd_t *p = pmd;
#ifndef CONFIG_ARM_LPAE
@@ -814,7 +822,7 @@ static int __init early_vmalloc(char *arg)
}
early_param("vmalloc", early_vmalloc);
-static phys_addr_t lowmem_limit __initdata = 0;
+phys_addr_t arm_lowmem_limit __initdata = 0;
void __init sanity_check_meminfo(void)
{
@@ -897,8 +905,8 @@ void __init sanity_check_meminfo(void)
bank->size = newsize;
}
#endif
- if (!bank->highmem && bank->start + bank->size > lowmem_limit)
- lowmem_limit = bank->start + bank->size;
+ if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
+ arm_lowmem_limit = bank->start + bank->size;
j++;
}
@@ -923,8 +931,8 @@ void __init sanity_check_meminfo(void)
}
#endif
meminfo.nr_banks = j;
- high_memory = __va(lowmem_limit - 1) + 1;
- memblock_set_current_limit(lowmem_limit);
+ high_memory = __va(arm_lowmem_limit - 1) + 1;
+ memblock_set_current_limit(arm_lowmem_limit);
}
static inline void prepare_page_table(void)
@@ -949,8 +957,8 @@ static inline void prepare_page_table(void)
* Find the end of the first block of lowmem.
*/
end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
- if (end >= lowmem_limit)
- end = lowmem_limit;
+ if (end >= arm_lowmem_limit)
+ end = arm_lowmem_limit;
/*
* Clear out all the kernel space mappings, except for the first
@@ -1093,8 +1101,8 @@ static void __init map_lowmem(void)
phys_addr_t end = start + reg->size;
struct map_desc map;
- if (end > lowmem_limit)
- end = lowmem_limit;
+ if (end > arm_lowmem_limit)
+ end = arm_lowmem_limit;
if (start >= end)
break;
@@ -1115,11 +1123,12 @@ void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
- memblock_set_current_limit(lowmem_limit);
+ memblock_set_current_limit(arm_lowmem_limit);
build_mem_type_table();
prepare_page_table();
map_lowmem();
+ dma_contiguous_remap();
devicemaps_init(mdesc);
kmap_init();
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
index 162be662c088..bf312c354a21 100644
--- a/arch/arm/mm/vmregion.h
+++ b/arch/arm/mm/vmregion.h
@@ -17,7 +17,7 @@ struct arm_vmregion {
struct list_head vm_list;
unsigned long vm_start;
unsigned long vm_end;
- struct page *vm_pages;
+ void *priv;
int vm_active;
const void *caller;
};
diff --git a/arch/arm/plat-mxc/clock.c b/arch/arm/plat-mxc/clock.c
index 2ed3ab173add..5079787273d2 100644
--- a/arch/arm/plat-mxc/clock.c
+++ b/arch/arm/plat-mxc/clock.c
@@ -41,6 +41,7 @@
#include <mach/clock.h>
#include <mach/hardware.h>
+#ifndef CONFIG_COMMON_CLK
static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
@@ -200,6 +201,16 @@ struct clk *clk_get_parent(struct clk *clk)
}
EXPORT_SYMBOL(clk_get_parent);
+#else
+
+/*
+ * Lock to protect the clock module (ccm) registers. Used
+ * on all i.MXs
+ */
+DEFINE_SPINLOCK(imx_ccm_lock);
+
+#endif /* CONFIG_COMMON_CLK */
+
/*
* Get the resulting clock rate from a PLL register value and the input
* frequency. PLLs with this register layout can at least be found on
diff --git a/arch/arm/plat-mxc/include/mach/clock.h b/arch/arm/plat-mxc/include/mach/clock.h
index 753a5988d85c..bd940c795cbb 100644
--- a/arch/arm/plat-mxc/include/mach/clock.h
+++ b/arch/arm/plat-mxc/include/mach/clock.h
@@ -23,6 +23,7 @@
#ifndef __ASSEMBLY__
#include <linux/list.h>
+#ifndef CONFIG_COMMON_CLK
struct module;
struct clk {
@@ -59,6 +60,9 @@ struct clk {
int clk_register(struct clk *clk);
void clk_unregister(struct clk *clk);
+#endif /* CONFIG_COMMON_CLK */
+
+extern spinlock_t imx_ccm_lock;
unsigned long mxc_decode_pll(unsigned int pll, u32 f_ref);
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index 0319c4a0cafa..cf663d84e7c1 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -53,6 +53,7 @@ extern void imx35_soc_init(void);
extern void imx50_soc_init(void);
extern void imx51_soc_init(void);
extern void imx53_soc_init(void);
+extern void imx51_init_late(void);
extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq);
extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int);
extern int mx1_clocks_init(unsigned long fref);
@@ -149,4 +150,10 @@ extern void imx6q_pm_init(void);
static inline void imx6q_pm_init(void) {}
#endif
+#ifdef CONFIG_NEON
+extern int mx51_neon_fixup(void);
+#else
+static inline int mx51_neon_fixup(void) { return 0; }
+#endif
+
#endif
diff --git a/arch/arm/plat-mxc/include/mach/debug-macro.S b/arch/arm/plat-mxc/include/mach/debug-macro.S
index 8ddda365f1a0..761e45f9456f 100644
--- a/arch/arm/plat-mxc/include/mach/debug-macro.S
+++ b/arch/arm/plat-mxc/include/mach/debug-macro.S
@@ -24,6 +24,8 @@
#define UART_PADDR MX51_UART1_BASE_ADDR
#elif defined (CONFIG_DEBUG_IMX50_IMX53_UART)
#define UART_PADDR MX53_UART1_BASE_ADDR
+#elif defined (CONFIG_DEBUG_IMX6Q_UART2)
+#define UART_PADDR MX6Q_UART2_BASE_ADDR
#elif defined (CONFIG_DEBUG_IMX6Q_UART4)
#define UART_PADDR MX6Q_UART4_BASE_ADDR
#endif
diff --git a/arch/arm/plat-mxc/include/mach/mx2_cam.h b/arch/arm/plat-mxc/include/mach/mx2_cam.h
index 3c080a32dbf5..7ded6f1f74bc 100644
--- a/arch/arm/plat-mxc/include/mach/mx2_cam.h
+++ b/arch/arm/plat-mxc/include/mach/mx2_cam.h
@@ -23,7 +23,6 @@
#ifndef __MACH_MX2_CAM_H_
#define __MACH_MX2_CAM_H_
-#define MX2_CAMERA_SWAP16 (1 << 0)
#define MX2_CAMERA_EXT_VSYNC (1 << 1)
#define MX2_CAMERA_CCIR (1 << 2)
#define MX2_CAMERA_CCIR_INTERLACE (1 << 3)
@@ -31,7 +30,6 @@
#define MX2_CAMERA_GATED_CLOCK (1 << 5)
#define MX2_CAMERA_INV_DATA (1 << 6)
#define MX2_CAMERA_PCLK_SAMPLE_RISING (1 << 7)
-#define MX2_CAMERA_PACK_DIR_MSB (1 << 8)
/**
* struct mx2_camera_platform_data - optional platform data for mx2_camera
diff --git a/arch/arm/plat-mxc/include/mach/mx6q.h b/arch/arm/plat-mxc/include/mach/mx6q.h
index 254a561a2799..f7e7dbac8f4b 100644
--- a/arch/arm/plat-mxc/include/mach/mx6q.h
+++ b/arch/arm/plat-mxc/include/mach/mx6q.h
@@ -27,6 +27,8 @@
#define MX6Q_CCM_SIZE 0x4000
#define MX6Q_ANATOP_BASE_ADDR 0x020c8000
#define MX6Q_ANATOP_SIZE 0x1000
+#define MX6Q_UART2_BASE_ADDR 0x021e8000
+#define MX6Q_UART2_SIZE 0x4000
#define MX6Q_UART4_BASE_ADDR 0x021f0000
#define MX6Q_UART4_SIZE 0x4000
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c
index 7daf7c9a413b..99f958ca6cb8 100644
--- a/arch/arm/plat-mxc/time.c
+++ b/arch/arm/plat-mxc/time.c
@@ -25,6 +25,7 @@
#include <linux/irq.h>
#include <linux/clockchips.h>
#include <linux/clk.h>
+#include <linux/err.h>
#include <mach/hardware.h>
#include <asm/sched_clock.h>
@@ -282,6 +283,19 @@ static int __init mxc_clockevent_init(struct clk *timer_clk)
void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
{
uint32_t tctl_val;
+ struct clk *timer_ipg_clk;
+
+ if (!timer_clk) {
+ timer_clk = clk_get_sys("imx-gpt.0", "per");
+ if (IS_ERR(timer_clk)) {
+ pr_err("i.MX timer: unable to get clk\n");
+ return;
+ }
+
+ timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg");
+ if (!IS_ERR(timer_ipg_clk))
+ clk_prepare_enable(timer_ipg_clk);
+ }
clk_prepare_enable(timer_clk);
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index 44ae077dbc28..2132c4f389e1 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -28,19 +28,20 @@
#include <plat/clock.h>
+/* OMAP2_32KSYNCNT_CR_OFF: offset of 32ksync counter register */
+#define OMAP2_32KSYNCNT_CR_OFF 0x10
+
/*
* 32KHz clocksource ... always available, on pretty most chips except
* OMAP 730 and 1510. Other timers could be used as clocksources, with
* higher resolution in free-running counter modes (e.g. 12 MHz xtal),
* but systems won't necessarily want to spend resources that way.
*/
-static void __iomem *timer_32k_base;
-
-#define OMAP16XX_TIMER_32K_SYNCHRONIZED 0xfffbc410
+static void __iomem *sync32k_cnt_reg;
static u32 notrace omap_32k_read_sched_clock(void)
{
- return timer_32k_base ? __raw_readl(timer_32k_base) : 0;
+ return sync32k_cnt_reg ? __raw_readl(sync32k_cnt_reg) : 0;
}
/**
@@ -60,7 +61,7 @@ static void omap_read_persistent_clock(struct timespec *ts)
struct timespec *tsp = &persistent_ts;
last_cycles = cycles;
- cycles = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
+ cycles = sync32k_cnt_reg ? __raw_readl(sync32k_cnt_reg) : 0;
delta = cycles - last_cycles;
nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift);
@@ -69,55 +70,41 @@ static void omap_read_persistent_clock(struct timespec *ts)
*ts = *tsp;
}
-int __init omap_init_clocksource_32k(void)
+/**
+ * omap_init_clocksource_32k - setup and register counter 32k as a
+ * kernel clocksource
+ * @pbase: base addr of counter_32k module
+ * @size: size of counter_32k to map
+ *
+ * Returns 0 upon success or negative error code upon failure.
+ *
+ */
+int __init omap_init_clocksource_32k(void __iomem *vbase)
{
- static char err[] __initdata = KERN_ERR
- "%s: can't register clocksource!\n";
-
- if (cpu_is_omap16xx() || cpu_class_is_omap2()) {
- u32 pbase;
- unsigned long size = SZ_4K;
- void __iomem *base;
- struct clk *sync_32k_ick;
-
- if (cpu_is_omap16xx()) {
- pbase = OMAP16XX_TIMER_32K_SYNCHRONIZED;
- size = SZ_1K;
- } else if (cpu_is_omap2420())
- pbase = OMAP2420_32KSYNCT_BASE + 0x10;
- else if (cpu_is_omap2430())
- pbase = OMAP2430_32KSYNCT_BASE + 0x10;
- else if (cpu_is_omap34xx())
- pbase = OMAP3430_32KSYNCT_BASE + 0x10;
- else if (cpu_is_omap44xx())
- pbase = OMAP4430_32KSYNCT_BASE + 0x10;
- else
- return -ENODEV;
-
- /* For this to work we must have a static mapping in io.c for this area */
- base = ioremap(pbase, size);
- if (!base)
- return -ENODEV;
-
- sync_32k_ick = clk_get(NULL, "omap_32ksync_ick");
- if (!IS_ERR(sync_32k_ick))
- clk_enable(sync_32k_ick);
-
- timer_32k_base = base;
-
- /*
- * 120000 rough estimate from the calculations in
- * __clocksource_updatefreq_scale.
- */
- clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
- 32768, NSEC_PER_SEC, 120000);
-
- if (clocksource_mmio_init(base, "32k_counter", 32768, 250, 32,
- clocksource_mmio_readl_up))
- printk(err, "32k_counter");
-
- setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
- register_persistent_clock(NULL, omap_read_persistent_clock);
+ int ret;
+
+ /*
+ * 32k sync Counter register offset is at 0x10
+ */
+ sync32k_cnt_reg = vbase + OMAP2_32KSYNCNT_CR_OFF;
+
+ /*
+ * 120000 rough estimate from the calculations in
+ * __clocksource_updatefreq_scale.
+ */
+ clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
+ 32768, NSEC_PER_SEC, 120000);
+
+ ret = clocksource_mmio_init(sync32k_cnt_reg, "32k_counter", 32768,
+ 250, 32, clocksource_mmio_readl_up);
+ if (ret) {
+ pr_err("32k_counter: can't register clocksource\n");
+ return ret;
}
+
+ setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
+ register_persistent_clock(NULL, omap_read_persistent_clock);
+ pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
+
return 0;
}
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index 09b07d252892..1cba9273d2cb 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -28,54 +28,6 @@
#include <plat/menelaus.h>
#include <plat/omap44xx.h>
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \
- defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
-
-#define OMAP_MMC_NR_RES 2
-
-/*
- * Register MMC devices. Called from mach-omap1 and mach-omap2 device init.
- */
-int __init omap_mmc_add(const char *name, int id, unsigned long base,
- unsigned long size, unsigned int irq,
- struct omap_mmc_platform_data *data)
-{
- struct platform_device *pdev;
- struct resource res[OMAP_MMC_NR_RES];
- int ret;
-
- pdev = platform_device_alloc(name, id);
- if (!pdev)
- return -ENOMEM;
-
- memset(res, 0, OMAP_MMC_NR_RES * sizeof(struct resource));
- res[0].start = base;
- res[0].end = base + size - 1;
- res[0].flags = IORESOURCE_MEM;
- res[1].start = res[1].end = irq;
- res[1].flags = IORESOURCE_IRQ;
-
- ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
- if (ret == 0)
- ret = platform_device_add_data(pdev, data, sizeof(*data));
- if (ret)
- goto fail;
-
- ret = platform_device_add(pdev);
- if (ret)
- goto fail;
-
- /* return device handle to board setup code */
- data->dev = &pdev->dev;
- return 0;
-
-fail:
- platform_device_put(pdev);
- return ret;
-}
-
-#endif
-
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_HW_RANDOM_OMAP) || defined(CONFIG_HW_RANDOM_OMAP_MODULE)
@@ -109,79 +61,6 @@ static void omap_init_rng(void)
static inline void omap_init_rng(void) {}
#endif
-/*-------------------------------------------------------------------------*/
-
-/* Numbering for the SPI-capable controllers when used for SPI:
- * spi = 1
- * uwire = 2
- * mmc1..2 = 3..4
- * mcbsp1..3 = 5..7
- */
-
-#if defined(CONFIG_SPI_OMAP_UWIRE) || defined(CONFIG_SPI_OMAP_UWIRE_MODULE)
-
-#define OMAP_UWIRE_BASE 0xfffb3000
-
-static struct resource uwire_resources[] = {
- {
- .start = OMAP_UWIRE_BASE,
- .end = OMAP_UWIRE_BASE + 0x20,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device omap_uwire_device = {
- .name = "omap_uwire",
- .id = -1,
- .num_resources = ARRAY_SIZE(uwire_resources),
- .resource = uwire_resources,
-};
-
-static void omap_init_uwire(void)
-{
- /* FIXME define and use a boot tag; not all boards will be hooking
- * up devices to the microwire controller, and multi-board configs
- * mean that CONFIG_SPI_OMAP_UWIRE may be configured anyway...
- */
-
- /* board-specific code must configure chipselects (only a few
- * are normally used) and SCLK/SDI/SDO (each has two choices).
- */
- (void) platform_device_register(&omap_uwire_device);
-}
-#else
-static inline void omap_init_uwire(void) {}
-#endif
-
-#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
-
-static phys_addr_t omap_dsp_phys_mempool_base;
-
-void __init omap_dsp_reserve_sdram_memblock(void)
-{
- phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
- phys_addr_t paddr;
-
- if (!size)
- return;
-
- paddr = arm_memblock_steal(size, SZ_1M);
- if (!paddr) {
- pr_err("%s: failed to reserve %llx bytes\n",
- __func__, (unsigned long long)size);
- return;
- }
-
- omap_dsp_phys_mempool_base = paddr;
-}
-
-phys_addr_t omap_dsp_get_mempool_base(void)
-{
- return omap_dsp_phys_mempool_base;
-}
-EXPORT_SYMBOL(omap_dsp_get_mempool_base);
-#endif
-
/*
* This gets called after board-specific INIT_MACHINE, and initializes most
* on-chip peripherals accessible on this board (except for few like USB):
@@ -208,7 +87,6 @@ static int __init omap_init_devices(void)
* in alphabetical order so they're easier to sort through.
*/
omap_init_rng();
- omap_init_uwire();
return 0;
}
arch_initcall(omap_init_devices);
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 987e6101267d..cb16ade437cb 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -852,7 +852,7 @@ omap_dma_set_prio_lch(int lch, unsigned char read_prio,
}
l = p->dma_read(CCR, lch);
l &= ~((1 << 6) | (1 << 26));
- if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
+ if (cpu_class_is_omap2() && !cpu_is_omap242x())
l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
else
l |= ((read_prio & 0x1) << 6);
@@ -2080,7 +2080,7 @@ static int __devinit omap_system_dma_probe(struct platform_device *pdev)
}
}
- if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
+ if (cpu_class_is_omap2() && !cpu_is_omap242x())
omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
DMA_DEFAULT_FIFO_DEPTH, 0);
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index c4ed35e89fbd..3b0cfeb33d05 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -82,8 +82,6 @@ static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
static void omap_timer_restore_context(struct omap_dm_timer *timer)
{
- __raw_writel(timer->context.tiocp_cfg,
- timer->io_base + OMAP_TIMER_OCP_CFG_OFFSET);
if (timer->revision == 1)
__raw_writel(timer->context.tistat, timer->sys_stat);
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index a557b8484e6c..d1cb6f527b7e 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -30,7 +30,7 @@
#include <plat/i2c.h>
#include <plat/omap_hwmod.h>
-extern int __init omap_init_clocksource_32k(void);
+extern int __init omap_init_clocksource_32k(void __iomem *vbase);
extern void __init omap_check_revision(void);
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 4bdf14ec6747..297245dba66e 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -121,6 +121,7 @@ IS_OMAP_CLASS(16xx, 0x16)
IS_OMAP_CLASS(24xx, 0x24)
IS_OMAP_CLASS(34xx, 0x34)
IS_OMAP_CLASS(44xx, 0x44)
+IS_AM_CLASS(35xx, 0x35)
IS_AM_CLASS(33xx, 0x33)
IS_TI_CLASS(81xx, 0x81)
@@ -148,6 +149,7 @@ IS_AM_SUBCLASS(335x, 0x335)
#define cpu_is_ti81xx() 0
#define cpu_is_ti816x() 0
#define cpu_is_ti814x() 0
+#define soc_is_am35xx() 0
#define cpu_is_am33xx() 0
#define cpu_is_am335x() 0
#define cpu_is_omap44xx() 0
@@ -357,6 +359,7 @@ IS_OMAP_TYPE(3517, 0x3517)
# undef cpu_is_ti81xx
# undef cpu_is_ti816x
# undef cpu_is_ti814x
+# undef soc_is_am35xx
# undef cpu_is_am33xx
# undef cpu_is_am335x
# define cpu_is_omap3430() is_omap3430()
@@ -378,6 +381,7 @@ IS_OMAP_TYPE(3517, 0x3517)
# define cpu_is_ti81xx() is_ti81xx()
# define cpu_is_ti816x() is_ti816x()
# define cpu_is_ti814x() is_ti814x()
+# define soc_is_am35xx() is_am35xx()
# define cpu_is_am33xx() is_am33xx()
# define cpu_is_am335x() is_am335x()
#endif
@@ -433,6 +437,10 @@ IS_OMAP_TYPE(3517, 0x3517)
#define TI8148_REV_ES2_0 (TI814X_CLASS | (0x1 << 8))
#define TI8148_REV_ES2_1 (TI814X_CLASS | (0x2 << 8))
+#define AM35XX_CLASS 0x35170034
+#define AM35XX_REV_ES1_0 AM35XX_CLASS
+#define AM35XX_REV_ES1_1 (AM35XX_CLASS | (0x1 << 8))
+
#define AM335X_CLASS 0x33500034
#define AM335X_REV_ES1_0 AM335X_CLASS
diff --git a/arch/arm/plat-omap/include/plat/dma.h b/arch/arm/plat-omap/include/plat/dma.h
index 42afb4c45517..c5811d4409b0 100644
--- a/arch/arm/plat-omap/include/plat/dma.h
+++ b/arch/arm/plat-omap/include/plat/dma.h
@@ -312,6 +312,11 @@
#define CLEAR_CSR_ON_READ BIT(0xC)
#define IS_WORD_16 BIT(0xD)
+/* Defines for DMA Capabilities */
+#define DMA_HAS_TRANSPARENT_CAPS (0x1 << 18)
+#define DMA_HAS_CONSTANT_FILL_CAPS (0x1 << 19)
+#define DMA_HAS_DESCRIPTOR_CAPS (0x3 << 20)
+
enum omap_reg_offsets {
GCR, GSCR, GRST1, HW_ID,
diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h
index bdf871a84d62..5da73562e486 100644
--- a/arch/arm/plat-omap/include/plat/dmtimer.h
+++ b/arch/arm/plat-omap/include/plat/dmtimer.h
@@ -75,7 +75,6 @@ struct clk;
struct timer_regs {
u32 tidr;
- u32 tiocp_cfg;
u32 tistat;
u32 tisr;
u32 tier;
diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h
index 2f6e9924a814..50fb7cc000ea 100644
--- a/arch/arm/plat-omap/include/plat/gpio.h
+++ b/arch/arm/plat-omap/include/plat/gpio.h
@@ -172,6 +172,8 @@ struct omap_gpio_reg_offs {
u16 clr_dataout;
u16 irqstatus;
u16 irqstatus2;
+ u16 irqstatus_raw0;
+ u16 irqstatus_raw1;
u16 irqenable;
u16 irqenable2;
u16 set_irqenable;
@@ -193,7 +195,6 @@ struct omap_gpio_reg_offs {
};
struct omap_gpio_platform_data {
- u16 virtual_irq_start;
int bank_type;
int bank_width; /* GPIO bank width */
int bank_stride; /* Only needed for omap1 MPUIO */
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index 3e7ae0f0215f..a7754a886d42 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -177,9 +177,6 @@ extern void omap_mmc_notify_cover_event(struct device *dev, int slot,
void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
int nr_controllers);
void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data);
-int omap_mmc_add(const char *name, int id, unsigned long base,
- unsigned long size, unsigned int irq,
- struct omap_mmc_platform_data *data);
#else
static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
int nr_controllers)
@@ -188,12 +185,6 @@ static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
static inline void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
{
}
-static inline int omap_mmc_add(const char *name, int id, unsigned long base,
- unsigned long size, unsigned int irq,
- struct omap_mmc_platform_data *data)
-{
- return 0;
-}
#endif
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 74daf5ed1432..61fd837624a8 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -14,15 +14,41 @@
#include <linux/dma-mapping.h>
#include <linux/serial_8250.h>
#include <linux/ata_platform.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
#include <linux/mv643xx_eth.h>
#include <linux/mv643xx_i2c.h>
#include <net/dsa.h>
-#include <linux/spi/orion_spi.h>
-#include <plat/orion_wdt.h>
#include <plat/mv_xor.h>
#include <plat/ehci-orion.h>
#include <mach/bridge-regs.h>
+/* Create a clkdev entry for a given device/clk */
+void __init orion_clkdev_add(const char *con_id, const char *dev_id,
+ struct clk *clk)
+{
+ struct clk_lookup *cl;
+
+ cl = clkdev_alloc(clk, con_id, dev_id);
+ if (cl)
+ clkdev_add(cl);
+}
+
+/* Create clkdev entries for all orion platforms except kirkwood.
+ Kirkwood has gated clocks for some of its peripherals, so creates
+ its own clkdev entries. For all the other orion devices, create
+ clkdev entries to the tclk. */
+void __init orion_clkdev_init(struct clk *tclk)
+{
+ orion_clkdev_add(NULL, "orion_spi.0", tclk);
+ orion_clkdev_add(NULL, "orion_spi.1", tclk);
+ orion_clkdev_add(NULL, MV643XX_ETH_NAME ".0", tclk);
+ orion_clkdev_add(NULL, MV643XX_ETH_NAME ".1", tclk);
+ orion_clkdev_add(NULL, MV643XX_ETH_NAME ".2", tclk);
+ orion_clkdev_add(NULL, MV643XX_ETH_NAME ".3", tclk);
+ orion_clkdev_add(NULL, "orion_wdt", tclk);
+}
+
/* Fill in the resources structure and link it into the platform
device structure. There is always a memory region, and nearly
always an interrupt.*/
@@ -49,6 +75,12 @@ static void fill_resources(struct platform_device *device,
/*****************************************************************************
* UART
****************************************************************************/
+static unsigned long __init uart_get_clk_rate(struct clk *clk)
+{
+ clk_prepare_enable(clk);
+ return clk_get_rate(clk);
+}
+
static void __init uart_complete(
struct platform_device *orion_uart,
struct plat_serial8250_port *data,
@@ -56,12 +88,12 @@ static void __init uart_complete(
unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk)
+ struct clk *clk)
{
data->mapbase = mapbase;
data->membase = (void __iomem *)membase;
data->irq = irq;
- data->uartclk = uartclk;
+ data->uartclk = uart_get_clk_rate(clk);
orion_uart->dev.platform_data = data;
fill_resources(orion_uart, resources, mapbase, 0xff, irq);
@@ -90,10 +122,10 @@ static struct platform_device orion_uart0 = {
void __init orion_uart0_init(unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk)
+ struct clk *clk)
{
uart_complete(&orion_uart0, orion_uart0_data, orion_uart0_resources,
- membase, mapbase, irq, uartclk);
+ membase, mapbase, irq, clk);
}
/*****************************************************************************
@@ -118,10 +150,10 @@ static struct platform_device orion_uart1 = {
void __init orion_uart1_init(unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk)
+ struct clk *clk)
{
uart_complete(&orion_uart1, orion_uart1_data, orion_uart1_resources,
- membase, mapbase, irq, uartclk);
+ membase, mapbase, irq, clk);
}
/*****************************************************************************
@@ -146,10 +178,10 @@ static struct platform_device orion_uart2 = {
void __init orion_uart2_init(unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk)
+ struct clk *clk)
{
uart_complete(&orion_uart2, orion_uart2_data, orion_uart2_resources,
- membase, mapbase, irq, uartclk);
+ membase, mapbase, irq, clk);
}
/*****************************************************************************
@@ -174,10 +206,10 @@ static struct platform_device orion_uart3 = {
void __init orion_uart3_init(unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk)
+ struct clk *clk)
{
uart_complete(&orion_uart3, orion_uart3_data, orion_uart3_resources,
- membase, mapbase, irq, uartclk);
+ membase, mapbase, irq, clk);
}
/*****************************************************************************
@@ -203,13 +235,11 @@ void __init orion_rtc_init(unsigned long mapbase,
****************************************************************************/
static __init void ge_complete(
struct mv643xx_eth_shared_platform_data *orion_ge_shared_data,
- int tclk,
struct resource *orion_ge_resource, unsigned long irq,
struct platform_device *orion_ge_shared,
struct mv643xx_eth_platform_data *eth_data,
struct platform_device *orion_ge)
{
- orion_ge_shared_data->t_clk = tclk;
orion_ge_resource->start = irq;
orion_ge_resource->end = irq;
eth_data->shared = orion_ge_shared;
@@ -260,12 +290,11 @@ static struct platform_device orion_ge00 = {
void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
- int tclk)
+ unsigned long irq_err)
{
fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
- ge_complete(&orion_ge00_shared_data, tclk,
+ ge_complete(&orion_ge00_shared_data,
orion_ge00_resources, irq, &orion_ge00_shared,
eth_data, &orion_ge00);
}
@@ -313,12 +342,11 @@ static struct platform_device orion_ge01 = {
void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
- int tclk)
+ unsigned long irq_err)
{
fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
- ge_complete(&orion_ge01_shared_data, tclk,
+ ge_complete(&orion_ge01_shared_data,
orion_ge01_resources, irq, &orion_ge01_shared,
eth_data, &orion_ge01);
}
@@ -366,12 +394,11 @@ static struct platform_device orion_ge10 = {
void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
- int tclk)
+ unsigned long irq_err)
{
fill_resources(&orion_ge10_shared, orion_ge10_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
- ge_complete(&orion_ge10_shared_data, tclk,
+ ge_complete(&orion_ge10_shared_data,
orion_ge10_resources, irq, &orion_ge10_shared,
eth_data, &orion_ge10);
}
@@ -419,12 +446,11 @@ static struct platform_device orion_ge11 = {
void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
- int tclk)
+ unsigned long irq_err)
{
fill_resources(&orion_ge11_shared, orion_ge11_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
- ge_complete(&orion_ge11_shared_data, tclk,
+ ge_complete(&orion_ge11_shared_data,
orion_ge11_resources, irq, &orion_ge11_shared,
eth_data, &orion_ge11);
}
@@ -521,44 +547,32 @@ void __init orion_i2c_1_init(unsigned long mapbase,
/*****************************************************************************
* SPI
****************************************************************************/
-static struct orion_spi_info orion_spi_plat_data;
static struct resource orion_spi_resources;
static struct platform_device orion_spi = {
.name = "orion_spi",
.id = 0,
- .dev = {
- .platform_data = &orion_spi_plat_data,
- },
};
-static struct orion_spi_info orion_spi_1_plat_data;
static struct resource orion_spi_1_resources;
static struct platform_device orion_spi_1 = {
.name = "orion_spi",
.id = 1,
- .dev = {
- .platform_data = &orion_spi_1_plat_data,
- },
};
/* Note: The SPI silicon core does have interrupts. However the
* current Linux software driver does not use interrupts. */
-void __init orion_spi_init(unsigned long mapbase,
- unsigned long tclk)
+void __init orion_spi_init(unsigned long mapbase)
{
- orion_spi_plat_data.tclk = tclk;
fill_resources(&orion_spi, &orion_spi_resources,
mapbase, SZ_512 - 1, NO_IRQ);
platform_device_register(&orion_spi);
}
-void __init orion_spi_1_init(unsigned long mapbase,
- unsigned long tclk)
+void __init orion_spi_1_init(unsigned long mapbase)
{
- orion_spi_1_plat_data.tclk = tclk;
fill_resources(&orion_spi_1, &orion_spi_1_resources,
mapbase, SZ_512 - 1, NO_IRQ);
platform_device_register(&orion_spi_1);
@@ -567,24 +581,18 @@ void __init orion_spi_1_init(unsigned long mapbase,
/*****************************************************************************
* Watchdog
****************************************************************************/
-static struct orion_wdt_platform_data orion_wdt_data;
-
static struct resource orion_wdt_resource =
DEFINE_RES_MEM(TIMER_VIRT_BASE, 0x28);
static struct platform_device orion_wdt_device = {
.name = "orion_wdt",
.id = -1,
- .dev = {
- .platform_data = &orion_wdt_data,
- },
- .resource = &orion_wdt_resource,
.num_resources = 1,
+ .resource = &orion_wdt_resource,
};
-void __init orion_wdt_init(unsigned long tclk)
+void __init orion_wdt_init(void)
{
- orion_wdt_data.tclk = tclk;
platform_device_register(&orion_wdt_device);
}
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
index a7fa005a5a0e..e00fdb213609 100644
--- a/arch/arm/plat-orion/include/plat/common.h
+++ b/arch/arm/plat-orion/include/plat/common.h
@@ -16,22 +16,22 @@ struct dsa_platform_data;
void __init orion_uart0_init(unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk);
+ struct clk *clk);
void __init orion_uart1_init(unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk);
+ struct clk *clk);
void __init orion_uart2_init(unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk);
+ struct clk *clk);
void __init orion_uart3_init(unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk);
+ struct clk *clk);
void __init orion_rtc_init(unsigned long mapbase,
unsigned long irq);
@@ -39,29 +39,26 @@ void __init orion_rtc_init(unsigned long mapbase,
void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
- int tclk);
+ unsigned long irq_err);
void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
- int tclk);
+ unsigned long irq_err);
void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
- int tclk);
+ unsigned long irq_err);
void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
- int tclk);
+ unsigned long irq_err);
void __init orion_ge00_switch_init(struct dsa_platform_data *d,
int irq);
+
void __init orion_i2c_init(unsigned long mapbase,
unsigned long irq,
unsigned long freq_m);
@@ -70,13 +67,11 @@ void __init orion_i2c_1_init(unsigned long mapbase,
unsigned long irq,
unsigned long freq_m);
-void __init orion_spi_init(unsigned long mapbase,
- unsigned long tclk);
+void __init orion_spi_init(unsigned long mapbase);
-void __init orion_spi_1_init(unsigned long mapbase,
- unsigned long tclk);
+void __init orion_spi_1_init(unsigned long mapbase);
-void __init orion_wdt_init(unsigned long tclk);
+void __init orion_wdt_init(void);
void __init orion_xor0_init(unsigned long mapbase_low,
unsigned long mapbase_high,
@@ -106,4 +101,9 @@ void __init orion_crypto_init(unsigned long mapbase,
unsigned long srambase,
unsigned long sram_size,
unsigned long irq);
+
+void __init orion_clkdev_add(const char *con_id, const char *dev_id,
+ struct clk *clk);
+
+void __init orion_clkdev_init(struct clk *tclk);
#endif
diff --git a/arch/arm/plat-orion/include/plat/orion_wdt.h b/arch/arm/plat-orion/include/plat/orion_wdt.h
deleted file mode 100644
index 665c362a2fba..000000000000
--- a/arch/arm/plat-orion/include/plat/orion_wdt.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * arch/arm/plat-orion/include/plat/orion_wdt.h
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __PLAT_ORION_WDT_H
-#define __PLAT_ORION_WDT_H
-
-struct orion_wdt_platform_data {
- u32 tclk; /* no <linux/clk.h> support yet */
-};
-
-
-#endif
-
diff --git a/arch/arm/plat-orion/pcie.c b/arch/arm/plat-orion/pcie.c
index 86dbb5bdb172..f20a321088a2 100644
--- a/arch/arm/plat-orion/pcie.c
+++ b/arch/arm/plat-orion/pcie.c
@@ -52,12 +52,12 @@
#define PCIE_DEBUG_SOFT_RESET (1<<20)
-u32 __init orion_pcie_dev_id(void __iomem *base)
+u32 orion_pcie_dev_id(void __iomem *base)
{
return readl(base + PCIE_DEV_ID_OFF) >> 16;
}
-u32 __init orion_pcie_rev(void __iomem *base)
+u32 orion_pcie_rev(void __iomem *base)
{
return readl(base + PCIE_DEV_REV_OFF) & 0xff;
}
diff --git a/arch/arm/plat-pxa/include/plat/pxa27x_keypad.h b/arch/arm/plat-pxa/include/plat/pxa27x_keypad.h
index abcc36eb1242..5ce8d5e6ea51 100644
--- a/arch/arm/plat-pxa/include/plat/pxa27x_keypad.h
+++ b/arch/arm/plat-pxa/include/plat/pxa27x_keypad.h
@@ -44,6 +44,10 @@ struct pxa27x_keypad_platform_data {
/* direct keys */
int direct_key_num;
unsigned int direct_key_map[MAX_DIRECT_KEY_NUM];
+ /* the key output may be low active */
+ int direct_key_low_active;
+ /* give board a chance to choose the start direct key */
+ unsigned int direct_key_mask;
/* rotary encoders 0 */
int enable_rotary0;
diff --git a/arch/arm/plat-s3c24xx/Makefile b/arch/arm/plat-s3c24xx/Makefile
index 2467b800cc76..9f60549c8da1 100644
--- a/arch/arm/plat-s3c24xx/Makefile
+++ b/arch/arm/plat-s3c24xx/Makefile
@@ -12,10 +12,7 @@ obj- :=
# Core files
-obj-y += cpu.o
obj-y += irq.o
-obj-y += dev-uart.o
-obj-y += clock.o
obj-$(CONFIG_S3C24XX_DCLK) += clock-dclk.o
obj-$(CONFIG_CPU_FREQ_S3C24XX) += cpu-freq.o
@@ -23,9 +20,6 @@ obj-$(CONFIG_CPU_FREQ_S3C24XX_DEBUGFS) += cpu-freq-debugfs.o
# Architecture dependent builds
-obj-$(CONFIG_PM) += pm.o
-obj-$(CONFIG_PM) += irq-pm.o
-obj-$(CONFIG_PM) += sleep.o
obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o
obj-$(CONFIG_S3C24XX_DMA) += dma.o
obj-$(CONFIG_S3C2410_IOTIMING) += s3c2410-iotiming.o
diff --git a/arch/arm/plat-s3c24xx/clock.c b/arch/arm/plat-s3c24xx/clock.c
deleted file mode 100644
index 931d26d1a54b..000000000000
--- a/arch/arm/plat-s3c24xx/clock.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/* linux/arch/arm/plat-s3c24xx/clock.c
- *
- * Copyright (c) 2004-2005 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX Core clock control support
- *
- * Based on, and code from linux/arch/arm/mach-versatile/clock.c
- **
- ** Copyright (C) 2004 ARM Limited.
- ** Written by Deep Blue Solutions Limited.
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <asm/irq.h>
-
-#include <mach/regs-clock.h>
-#include <mach/regs-gpio.h>
-
-#include <plat/cpu-freq.h>
-
-#include <plat/clock.h>
-#include <plat/cpu.h>
-#include <plat/pll.h>
-
-/* initialise all the clocks */
-
-void __init_or_cpufreq s3c24xx_setup_clocks(unsigned long fclk,
- unsigned long hclk,
- unsigned long pclk)
-{
- clk_upll.rate = s3c24xx_get_pll(__raw_readl(S3C2410_UPLLCON),
- clk_xtal.rate);
-
- clk_mpll.rate = fclk;
- clk_h.rate = hclk;
- clk_p.rate = pclk;
- clk_f.rate = fclk;
-}
diff --git a/arch/arm/plat-s3c24xx/dev-uart.c b/arch/arm/plat-s3c24xx/dev-uart.c
deleted file mode 100644
index 9ab22e662fff..000000000000
--- a/arch/arm/plat-s3c24xx/dev-uart.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/* linux/arch/arm/plat-s3c24xx/dev-uart.c
- *
- * Copyright (c) 2004 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * Base S3C24XX UART resource and platform device definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/serial_core.h>
-#include <linux/platform_device.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/irq.h>
-#include <mach/hardware.h>
-#include <mach/map.h>
-
-#include <plat/devs.h>
-#include <plat/regs-serial.h>
-
-/* Serial port registrations */
-
-static struct resource s3c2410_uart0_resource[] = {
- [0] = {
- .start = S3C2410_PA_UART0,
- .end = S3C2410_PA_UART0 + 0x3fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_S3CUART_RX0,
- .end = IRQ_S3CUART_ERR0,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static struct resource s3c2410_uart1_resource[] = {
- [0] = {
- .start = S3C2410_PA_UART1,
- .end = S3C2410_PA_UART1 + 0x3fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_S3CUART_RX1,
- .end = IRQ_S3CUART_ERR1,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static struct resource s3c2410_uart2_resource[] = {
- [0] = {
- .start = S3C2410_PA_UART2,
- .end = S3C2410_PA_UART2 + 0x3fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_S3CUART_RX2,
- .end = IRQ_S3CUART_ERR2,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static struct resource s3c2410_uart3_resource[] = {
- [0] = {
- .start = S3C2443_PA_UART3,
- .end = S3C2443_PA_UART3 + 0x3fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_S3CUART_RX3,
- .end = IRQ_S3CUART_ERR3,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct s3c24xx_uart_resources s3c2410_uart_resources[] __initdata = {
- [0] = {
- .resources = s3c2410_uart0_resource,
- .nr_resources = ARRAY_SIZE(s3c2410_uart0_resource),
- },
- [1] = {
- .resources = s3c2410_uart1_resource,
- .nr_resources = ARRAY_SIZE(s3c2410_uart1_resource),
- },
- [2] = {
- .resources = s3c2410_uart2_resource,
- .nr_resources = ARRAY_SIZE(s3c2410_uart2_resource),
- },
- [3] = {
- .resources = s3c2410_uart3_resource,
- .nr_resources = ARRAY_SIZE(s3c2410_uart3_resource),
- },
-};
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
deleted file mode 100644
index 96bea3202304..000000000000
--- a/arch/arm/plat-s5p/Kconfig
+++ /dev/null
@@ -1,140 +0,0 @@
-# arch/arm/plat-s5p/Kconfig
-#
-# Copyright (c) 2009 Samsung Electronics Co., Ltd.
-# http://www.samsung.com/
-#
-# Licensed under GPLv2
-
-config PLAT_S5P
- bool
- depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
- default y
- select ARM_VIC if !ARCH_EXYNOS
- select ARM_GIC if ARCH_EXYNOS
- select GIC_NON_BANKED if ARCH_EXYNOS4
- select NO_IOPORT
- select ARCH_REQUIRE_GPIOLIB
- select S3C_GPIO_TRACK
- select S5P_GPIO_DRVSTR
- select SAMSUNG_GPIOLIB_4BIT
- select PLAT_SAMSUNG
- select SAMSUNG_CLKSRC
- select SAMSUNG_IRQ_VIC_TIMER
- help
- Base platform code for Samsung's S5P series SoC.
-
-config S5P_EXT_INT
- bool
- help
- Use the external interrupts (other than GPIO interrupts.)
- Note: Do not choose this for S5P6440 and S5P6450.
-
-config S5P_GPIO_INT
- bool
- help
- Common code for the GPIO interrupts (other than external interrupts.)
-
-config S5P_HRT
- bool
- select SAMSUNG_DEV_PWM
- help
- Use the High Resolution timer support
-
-config S5P_DEV_UART
- def_bool y
- depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210)
-
-config S5P_PM
- bool
- help
- Common code for power management support on S5P and newer SoCs
- Note: Do not select this for S5P6440 and S5P6450.
-
-comment "System MMU"
-
-config S5P_SYSTEM_MMU
- bool "S5P SYSTEM MMU"
- depends on ARCH_EXYNOS4
- help
- Say Y here if you want to enable System MMU
-
-config S5P_SLEEP
- bool
- help
- Internal config node to apply common S5P sleep management code.
- Can be selected by S5P and newer SoCs with similar sleep procedure.
-
-config S5P_DEV_FIMC0
- bool
- help
- Compile in platform device definitions for FIMC controller 0
-
-config S5P_DEV_FIMC1
- bool
- help
- Compile in platform device definitions for FIMC controller 1
-
-config S5P_DEV_FIMC2
- bool
- help
- Compile in platform device definitions for FIMC controller 2
-
-config S5P_DEV_FIMC3
- bool
- help
- Compile in platform device definitions for FIMC controller 3
-
-config S5P_DEV_JPEG
- bool
- help
- Compile in platform device definitions for JPEG codec
-
-config S5P_DEV_G2D
- bool
- help
- Compile in platform device definitions for G2D device
-
-config S5P_DEV_FIMD0
- bool
- help
- Compile in platform device definitions for FIMD controller 0
-
-config S5P_DEV_I2C_HDMIPHY
- bool
- help
- Compile in platform device definitions for I2C HDMIPHY controller
-
-config S5P_DEV_MFC
- bool
- help
- Compile in platform device definitions for MFC
-
-config S5P_DEV_ONENAND
- bool
- help
- Compile in platform device definition for OneNAND controller
-
-config S5P_DEV_CSIS0
- bool
- help
- Compile in platform device definitions for MIPI-CSIS channel 0
-
-config S5P_DEV_CSIS1
- bool
- help
- Compile in platform device definitions for MIPI-CSIS channel 1
-
-config S5P_DEV_TV
- bool
- help
- Compile in platform device definition for TV interface
-
-config S5P_DEV_USB_EHCI
- bool
- help
- Compile in platform device definition for USB EHCI
-
-config S5P_SETUP_MIPIPHY
- bool
- help
- Compile in common setup code for MIPI-CSIS and MIPI-DSIM devices
diff --git a/arch/arm/plat-s5p/Makefile b/arch/arm/plat-s5p/Makefile
deleted file mode 100644
index 4bd824136659..000000000000
--- a/arch/arm/plat-s5p/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-# arch/arm/plat-s5p/Makefile
-#
-# Copyright (c) 2009 Samsung Electronics Co., Ltd.
-# http://www.samsung.com/
-#
-# Licensed under GPLv2
-
-obj-y :=
-obj-m :=
-obj-n := dummy.o
-obj- :=
-
-# Core files
-
-obj-y += clock.o
-obj-y += irq.o
-obj-$(CONFIG_S5P_EXT_INT) += irq-eint.o
-obj-$(CONFIG_S5P_GPIO_INT) += irq-gpioint.o
-obj-$(CONFIG_S5P_SYSTEM_MMU) += sysmmu.o
-obj-$(CONFIG_S5P_PM) += pm.o irq-pm.o
-obj-$(CONFIG_S5P_SLEEP) += sleep.o
-obj-$(CONFIG_S5P_HRT) += s5p-time.o
-
-# devices
-
-obj-$(CONFIG_S5P_DEV_UART) += dev-uart.o
-obj-$(CONFIG_S5P_DEV_MFC) += dev-mfc.o
-obj-$(CONFIG_S5P_SETUP_MIPIPHY) += setup-mipiphy.o
diff --git a/arch/arm/plat-s5p/sysmmu.c b/arch/arm/plat-s5p/sysmmu.c
deleted file mode 100644
index c8bec9c7655d..000000000000
--- a/arch/arm/plat-s5p/sysmmu.c
+++ /dev/null
@@ -1,313 +0,0 @@
-/* linux/arch/arm/plat-s5p/sysmmu.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/export.h>
-
-#include <asm/pgtable.h>
-
-#include <mach/map.h>
-#include <mach/regs-sysmmu.h>
-#include <plat/sysmmu.h>
-
-#define CTRL_ENABLE 0x5
-#define CTRL_BLOCK 0x7
-#define CTRL_DISABLE 0x0
-
-static struct device *dev;
-
-static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
- S5P_PAGE_FAULT_ADDR,
- S5P_AR_FAULT_ADDR,
- S5P_AW_FAULT_ADDR,
- S5P_DEFAULT_SLAVE_ADDR,
- S5P_AR_FAULT_ADDR,
- S5P_AR_FAULT_ADDR,
- S5P_AW_FAULT_ADDR,
- S5P_AW_FAULT_ADDR
-};
-
-static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
- "PAGE FAULT",
- "AR MULTI-HIT FAULT",
- "AW MULTI-HIT FAULT",
- "BUS ERROR",
- "AR SECURITY PROTECTION FAULT",
- "AR ACCESS PROTECTION FAULT",
- "AW SECURITY PROTECTION FAULT",
- "AW ACCESS PROTECTION FAULT"
-};
-
-static int (*fault_handlers[S5P_SYSMMU_TOTAL_IPNUM])(
- enum S5P_SYSMMU_INTERRUPT_TYPE itype,
- unsigned long pgtable_base,
- unsigned long fault_addr);
-
-/*
- * If adjacent 2 bits are true, the system MMU is enabled.
- * The system MMU is disabled, otherwise.
- */
-static unsigned long sysmmu_states;
-
-static inline void set_sysmmu_active(sysmmu_ips ips)
-{
- sysmmu_states |= 3 << (ips * 2);
-}
-
-static inline void set_sysmmu_inactive(sysmmu_ips ips)
-{
- sysmmu_states &= ~(3 << (ips * 2));
-}
-
-static inline int is_sysmmu_active(sysmmu_ips ips)
-{
- return sysmmu_states & (3 << (ips * 2));
-}
-
-static void __iomem *sysmmusfrs[S5P_SYSMMU_TOTAL_IPNUM];
-
-static inline void sysmmu_block(sysmmu_ips ips)
-{
- __raw_writel(CTRL_BLOCK, sysmmusfrs[ips] + S5P_MMU_CTRL);
- dev_dbg(dev, "%s is blocked.\n", sysmmu_ips_name[ips]);
-}
-
-static inline void sysmmu_unblock(sysmmu_ips ips)
-{
- __raw_writel(CTRL_ENABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
- dev_dbg(dev, "%s is unblocked.\n", sysmmu_ips_name[ips]);
-}
-
-static inline void __sysmmu_tlb_invalidate(sysmmu_ips ips)
-{
- __raw_writel(0x1, sysmmusfrs[ips] + S5P_MMU_FLUSH);
- dev_dbg(dev, "TLB of %s is invalidated.\n", sysmmu_ips_name[ips]);
-}
-
-static inline void __sysmmu_set_ptbase(sysmmu_ips ips, unsigned long pgd)
-{
- if (unlikely(pgd == 0)) {
- pgd = (unsigned long)ZERO_PAGE(0);
- __raw_writel(0x20, sysmmusfrs[ips] + S5P_MMU_CFG); /* 4KB LV1 */
- } else {
- __raw_writel(0x0, sysmmusfrs[ips] + S5P_MMU_CFG); /* 16KB LV1 */
- }
-
- __raw_writel(pgd, sysmmusfrs[ips] + S5P_PT_BASE_ADDR);
-
- dev_dbg(dev, "Page table base of %s is initialized with 0x%08lX.\n",
- sysmmu_ips_name[ips], pgd);
- __sysmmu_tlb_invalidate(ips);
-}
-
-void sysmmu_set_fault_handler(sysmmu_ips ips,
- int (*handler)(enum S5P_SYSMMU_INTERRUPT_TYPE itype,
- unsigned long pgtable_base,
- unsigned long fault_addr))
-{
- BUG_ON(!((ips >= SYSMMU_MDMA) && (ips < S5P_SYSMMU_TOTAL_IPNUM)));
- fault_handlers[ips] = handler;
-}
-
-static irqreturn_t s5p_sysmmu_irq(int irq, void *dev_id)
-{
- /* SYSMMU is in blocked when interrupt occurred. */
- unsigned long base = 0;
- sysmmu_ips ips = (sysmmu_ips)dev_id;
- enum S5P_SYSMMU_INTERRUPT_TYPE itype;
-
- itype = (enum S5P_SYSMMU_INTERRUPT_TYPE)
- __ffs(__raw_readl(sysmmusfrs[ips] + S5P_INT_STATUS));
-
- BUG_ON(!((itype >= 0) && (itype < 8)));
-
- dev_alert(dev, "%s occurred by %s.\n", sysmmu_fault_name[itype],
- sysmmu_ips_name[ips]);
-
- if (fault_handlers[ips]) {
- unsigned long addr;
-
- base = __raw_readl(sysmmusfrs[ips] + S5P_PT_BASE_ADDR);
- addr = __raw_readl(sysmmusfrs[ips] + fault_reg_offset[itype]);
-
- if (fault_handlers[ips](itype, base, addr)) {
- __raw_writel(1 << itype,
- sysmmusfrs[ips] + S5P_INT_CLEAR);
- dev_notice(dev, "%s from %s is resolved."
- " Retrying translation.\n",
- sysmmu_fault_name[itype], sysmmu_ips_name[ips]);
- } else {
- base = 0;
- }
- }
-
- sysmmu_unblock(ips);
-
- if (!base)
- dev_notice(dev, "%s from %s is not handled.\n",
- sysmmu_fault_name[itype], sysmmu_ips_name[ips]);
-
- return IRQ_HANDLED;
-}
-
-void s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd)
-{
- if (is_sysmmu_active(ips)) {
- sysmmu_block(ips);
- __sysmmu_set_ptbase(ips, pgd);
- sysmmu_unblock(ips);
- } else {
- dev_dbg(dev, "%s is disabled. "
- "Skipping initializing page table base.\n",
- sysmmu_ips_name[ips]);
- }
-}
-
-void s5p_sysmmu_enable(sysmmu_ips ips, unsigned long pgd)
-{
- if (!is_sysmmu_active(ips)) {
- sysmmu_clk_enable(ips);
-
- __sysmmu_set_ptbase(ips, pgd);
-
- __raw_writel(CTRL_ENABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
-
- set_sysmmu_active(ips);
- dev_dbg(dev, "%s is enabled.\n", sysmmu_ips_name[ips]);
- } else {
- dev_dbg(dev, "%s is already enabled.\n", sysmmu_ips_name[ips]);
- }
-}
-
-void s5p_sysmmu_disable(sysmmu_ips ips)
-{
- if (is_sysmmu_active(ips)) {
- __raw_writel(CTRL_DISABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
- set_sysmmu_inactive(ips);
- sysmmu_clk_disable(ips);
- dev_dbg(dev, "%s is disabled.\n", sysmmu_ips_name[ips]);
- } else {
- dev_dbg(dev, "%s is already disabled.\n", sysmmu_ips_name[ips]);
- }
-}
-
-void s5p_sysmmu_tlb_invalidate(sysmmu_ips ips)
-{
- if (is_sysmmu_active(ips)) {
- sysmmu_block(ips);
- __sysmmu_tlb_invalidate(ips);
- sysmmu_unblock(ips);
- } else {
- dev_dbg(dev, "%s is disabled. "
- "Skipping invalidating TLB.\n", sysmmu_ips_name[ips]);
- }
-}
-
-static int s5p_sysmmu_probe(struct platform_device *pdev)
-{
- int i, ret;
- struct resource *res, *mem;
-
- dev = &pdev->dev;
-
- for (i = 0; i < S5P_SYSMMU_TOTAL_IPNUM; i++) {
- int irq;
-
- sysmmu_clk_init(dev, i);
- sysmmu_clk_disable(i);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- dev_err(dev, "Failed to get the resource of %s.\n",
- sysmmu_ips_name[i]);
- ret = -ENODEV;
- goto err_res;
- }
-
- mem = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (!mem) {
- dev_err(dev, "Failed to request the memory region of %s.\n",
- sysmmu_ips_name[i]);
- ret = -EBUSY;
- goto err_res;
- }
-
- sysmmusfrs[i] = ioremap(res->start, resource_size(res));
- if (!sysmmusfrs[i]) {
- dev_err(dev, "Failed to ioremap() for %s.\n",
- sysmmu_ips_name[i]);
- ret = -ENXIO;
- goto err_reg;
- }
-
- irq = platform_get_irq(pdev, i);
- if (irq <= 0) {
- dev_err(dev, "Failed to get the IRQ resource of %s.\n",
- sysmmu_ips_name[i]);
- ret = -ENOENT;
- goto err_map;
- }
-
- if (request_irq(irq, s5p_sysmmu_irq, IRQF_DISABLED,
- pdev->name, (void *)i)) {
- dev_err(dev, "Failed to request IRQ for %s.\n",
- sysmmu_ips_name[i]);
- ret = -ENOENT;
- goto err_map;
- }
- }
-
- return 0;
-
-err_map:
- iounmap(sysmmusfrs[i]);
-err_reg:
- release_mem_region(mem->start, resource_size(mem));
-err_res:
- return ret;
-}
-
-static int s5p_sysmmu_remove(struct platform_device *pdev)
-{
- return 0;
-}
-int s5p_sysmmu_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
-int s5p_sysmmu_runtime_resume(struct device *dev)
-{
- return 0;
-}
-
-const struct dev_pm_ops s5p_sysmmu_pm_ops = {
- .runtime_suspend = s5p_sysmmu_runtime_suspend,
- .runtime_resume = s5p_sysmmu_runtime_resume,
-};
-
-static struct platform_driver s5p_sysmmu_driver = {
- .probe = s5p_sysmmu_probe,
- .remove = s5p_sysmmu_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = "s5p-sysmmu",
- .pm = &s5p_sysmmu_pm_ops,
- }
-};
-
-static int __init s5p_sysmmu_init(void)
-{
- return platform_driver_register(&s5p_sysmmu_driver);
-}
-arch_initcall(s5p_sysmmu_init);
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index a0ffc77da809..a2fae4ea0936 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -13,6 +13,24 @@ config PLAT_SAMSUNG
help
Base platform code for all Samsung SoC based systems
+config PLAT_S5P
+ bool
+ depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
+ default y
+ select ARM_VIC if !ARCH_EXYNOS
+ select ARM_GIC if ARCH_EXYNOS
+ select GIC_NON_BANKED if ARCH_EXYNOS4
+ select NO_IOPORT
+ select ARCH_REQUIRE_GPIOLIB
+ select S3C_GPIO_TRACK
+ select S5P_GPIO_DRVSTR
+ select SAMSUNG_GPIOLIB_4BIT
+ select PLAT_SAMSUNG
+ select SAMSUNG_CLKSRC
+ select SAMSUNG_IRQ_VIC_TIMER
+ help
+ Base platform code for Samsung's S5P series SoC.
+
if PLAT_SAMSUNG
# boot configurations
@@ -50,6 +68,14 @@ config S3C_LOWLEVEL_UART_PORT
this configuration should be between zero and two. The port
must have been initialised by the boot-loader before use.
+# timer options
+
+config S5P_HRT
+ bool
+ select SAMSUNG_DEV_PWM
+ help
+ Use the High Resolution timer support
+
# clock options
config SAMSUNG_CLKSRC
@@ -58,6 +84,11 @@ config SAMSUNG_CLKSRC
Select the clock code for the clksrc implementation
used by newer systems such as the S3C64XX.
+config S5P_CLOCK
+ def_bool (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
+ help
+ Support common clock part for ARCH_S5P and ARCH_EXYNOS SoCs
+
# options for IRQ support
config SAMSUNG_IRQ_VIC_TIMER
@@ -65,6 +96,22 @@ config SAMSUNG_IRQ_VIC_TIMER
help
Internal configuration to build the VIC timer interrupt code.
+config S5P_IRQ
+ def_bool (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
+ help
+ Support common interrup part for ARCH_S5P and ARCH_EXYNOS SoCs
+
+config S5P_EXT_INT
+ bool
+ help
+ Use the external interrupts (other than GPIO interrupts.)
+ Note: Do not choose this for S5P6440 and S5P6450.
+
+config S5P_GPIO_INT
+ bool
+ help
+ Common code for the GPIO interrupts (other than external interrupts.)
+
# options for gpio configuration support
config SAMSUNG_GPIOLIB_4BIT
@@ -117,6 +164,12 @@ config S3C_GPIO_TRACK
Internal configuration option to enable the s3c specific gpio
chip tracking if the platform requires it.
+# uart options
+
+config S5P_DEV_UART
+ def_bool y
+ depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210)
+
# ADC driver
config S3C_ADC
@@ -274,6 +327,76 @@ config SAMSUNG_DEV_BACKLIGHT
help
Compile in platform device definition LCD backlight with PWM Timer
+config S5P_DEV_CSIS0
+ bool
+ help
+ Compile in platform device definitions for MIPI-CSIS channel 0
+
+config S5P_DEV_CSIS1
+ bool
+ help
+ Compile in platform device definitions for MIPI-CSIS channel 1
+
+config S5P_DEV_FIMC0
+ bool
+ help
+ Compile in platform device definitions for FIMC controller 0
+
+config S5P_DEV_FIMC1
+ bool
+ help
+ Compile in platform device definitions for FIMC controller 1
+
+config S5P_DEV_FIMC2
+ bool
+ help
+ Compile in platform device definitions for FIMC controller 2
+
+config S5P_DEV_FIMC3
+ bool
+ help
+ Compile in platform device definitions for FIMC controller 3
+
+config S5P_DEV_FIMD0
+ bool
+ help
+ Compile in platform device definitions for FIMD controller 0
+
+config S5P_DEV_G2D
+ bool
+ help
+ Compile in platform device definitions for G2D device
+
+config S5P_DEV_I2C_HDMIPHY
+ bool
+ help
+ Compile in platform device definitions for I2C HDMIPHY controller
+
+config S5P_DEV_JPEG
+ bool
+ help
+ Compile in platform device definitions for JPEG codec
+
+config S5P_DEV_MFC
+ bool
+ help
+ Compile in setup memory (init) code for MFC
+
+config S5P_DEV_ONENAND
+ bool
+ help
+ Compile in platform device definition for OneNAND controller
+
+config S5P_DEV_TV
+ bool
+ help
+ Compile in platform device definition for TV interface
+
+config S5P_DEV_USB_EHCI
+ bool
+ help
+ Compile in platform device definition for USB EHCI
+
config S3C24XX_PWM
bool "PWM device support"
select HAVE_PWM
@@ -281,6 +404,11 @@ config S3C24XX_PWM
Support for exporting the PWM timer blocks via the pwm device
system
+config S5P_SETUP_MIPIPHY
+ bool
+ help
+ Compile in common setup code for MIPI-CSIS and MIPI-DSIM devices
+
# DMA
config S3C_DMA
@@ -291,7 +419,7 @@ config S3C_DMA
config SAMSUNG_DMADEV
bool
select DMADEVICES
- select PL330_DMA if (CPU_EXYNOS4210 || CPU_S5PV210 || CPU_S5PC100 || \
+ select PL330_DMA if (ARCH_EXYNOS5 || ARCH_EXYNOS4 || CPU_S5PV210 || CPU_S5PC100 || \
CPU_S5P6450 || CPU_S5P6440)
select ARM_AMBA
help
@@ -351,6 +479,18 @@ config SAMSUNG_WAKEMASK
and above. This code allows a set of interrupt to wakeup-mask
mappings. See <plat/wakeup-mask.h>
+config S5P_PM
+ bool
+ help
+ Common code for power management support on S5P and newer SoCs
+ Note: Do not select this for S5P6440 and S5P6450.
+
+config S5P_SLEEP
+ bool
+ help
+ Internal config node to apply common S5P sleep management code.
+ Can be selected by S5P and newer SoCs with similar sleep procedure.
+
comment "Power Domain"
config SAMSUNG_PD
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 6012366f33cb..860b2db4db15 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -13,12 +13,18 @@ obj- :=
obj-y += init.o cpu.o
obj-$(CONFIG_ARCH_USES_GETTIMEOFFSET) += time.o
+obj-$(CONFIG_S5P_HRT) += s5p-time.o
+
obj-y += clock.o
obj-y += pwm-clock.o
obj-$(CONFIG_SAMSUNG_CLKSRC) += clock-clksrc.o
+obj-$(CONFIG_S5P_CLOCK) += s5p-clock.o
obj-$(CONFIG_SAMSUNG_IRQ_VIC_TIMER) += irq-vic-timer.o
+obj-$(CONFIG_S5P_IRQ) += s5p-irq.o
+obj-$(CONFIG_S5P_EXT_INT) += s5p-irq-eint.o
+obj-$(CONFIG_S5P_GPIO_INT) += s5p-irq-gpioint.o
# ADC
@@ -30,9 +36,13 @@ obj-y += platformdata.o
obj-y += devs.o
obj-y += dev-uart.o
+obj-$(CONFIG_S5P_DEV_MFC) += s5p-dev-mfc.o
+obj-$(CONFIG_S5P_DEV_UART) += s5p-dev-uart.o
obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT) += dev-backlight.o
+obj-$(CONFIG_S5P_SETUP_MIPIPHY) += setup-mipiphy.o
+
# DMA support
obj-$(CONFIG_S3C_DMA) += dma.o s3c-dma-ops.o
@@ -47,6 +57,9 @@ obj-$(CONFIG_SAMSUNG_PM_CHECK) += pm-check.o
obj-$(CONFIG_SAMSUNG_WAKEMASK) += wakeup-mask.o
+obj-$(CONFIG_S5P_PM) += s5p-pm.o s5p-irq-pm.o
+obj-$(CONFIG_S5P_SLEEP) += s5p-sleep.o
+
# PD support
obj-$(CONFIG_SAMSUNG_PD) += pd.o
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h
index 787ceaca0be8..0721293fad63 100644
--- a/arch/arm/plat-samsung/include/plat/cpu.h
+++ b/arch/arm/plat-samsung/include/plat/cpu.h
@@ -202,7 +202,7 @@ extern struct bus_type s3c2443_subsys;
extern struct bus_type s3c6410_subsys;
extern struct bus_type s5p64x0_subsys;
extern struct bus_type s5pv210_subsys;
-extern struct bus_type exynos4_subsys;
+extern struct bus_type exynos_subsys;
extern void (*s5pc1xx_idle)(void);
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 2155d4af62a3..61ca2f356c52 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -133,7 +133,8 @@ extern struct platform_device exynos4_device_pcm1;
extern struct platform_device exynos4_device_pcm2;
extern struct platform_device exynos4_device_pd[];
extern struct platform_device exynos4_device_spdif;
-extern struct platform_device exynos4_device_sysmmu;
+
+extern struct platform_device exynos_device_drm;
extern struct platform_device samsung_asoc_dma;
extern struct platform_device samsung_asoc_idma;
diff --git a/arch/arm/plat-samsung/include/plat/dma-pl330.h b/arch/arm/plat-samsung/include/plat/dma-pl330.h
index 0670f37aaaed..d384a8016b47 100644
--- a/arch/arm/plat-samsung/include/plat/dma-pl330.h
+++ b/arch/arm/plat-samsung/include/plat/dma-pl330.h
@@ -90,6 +90,7 @@ enum dma_ch {
DMACH_MIPI_HSI5,
DMACH_MIPI_HSI6,
DMACH_MIPI_HSI7,
+ DMACH_DISP1,
DMACH_MTOM_0,
DMACH_MTOM_1,
DMACH_MTOM_2,
diff --git a/arch/arm/plat-samsung/include/plat/s3c2416.h b/arch/arm/plat-samsung/include/plat/s3c2416.h
index de2b5bdc5ebd..7178e338e25e 100644
--- a/arch/arm/plat-samsung/include/plat/s3c2416.h
+++ b/arch/arm/plat-samsung/include/plat/s3c2416.h
@@ -24,6 +24,9 @@ extern void s3c2416_init_clocks(int xtal);
extern int s3c2416_baseclk_add(void);
extern void s3c2416_restart(char mode, const char *cmd);
+
+extern struct syscore_ops s3c2416_irq_syscore_ops;
+
#else
#define s3c2416_init_clocks NULL
#define s3c2416_init_uarts NULL
diff --git a/arch/arm/plat-samsung/include/plat/s5p-clock.h b/arch/arm/plat-samsung/include/plat/s5p-clock.h
index 1de4b32f98e9..8364b4bea8b8 100644
--- a/arch/arm/plat-samsung/include/plat/s5p-clock.h
+++ b/arch/arm/plat-samsung/include/plat/s5p-clock.h
@@ -32,8 +32,10 @@ extern struct clk clk_48m;
extern struct clk s5p_clk_27m;
extern struct clk clk_fout_apll;
extern struct clk clk_fout_bpll;
+extern struct clk clk_fout_bpll_div2;
extern struct clk clk_fout_cpll;
extern struct clk clk_fout_mpll;
+extern struct clk clk_fout_mpll_div2;
extern struct clk clk_fout_epll;
extern struct clk clk_fout_dpll;
extern struct clk clk_fout_vpll;
@@ -42,8 +44,10 @@ extern struct clk clk_vpll;
extern struct clksrc_sources clk_src_apll;
extern struct clksrc_sources clk_src_bpll;
+extern struct clksrc_sources clk_src_bpll_fout;
extern struct clksrc_sources clk_src_cpll;
extern struct clksrc_sources clk_src_mpll;
+extern struct clksrc_sources clk_src_mpll_fout;
extern struct clksrc_sources clk_src_epll;
extern struct clksrc_sources clk_src_dpll;
diff --git a/arch/arm/plat-samsung/include/plat/sysmmu.h b/arch/arm/plat-samsung/include/plat/sysmmu.h
deleted file mode 100644
index 5fe8ee01a5ba..000000000000
--- a/arch/arm/plat-samsung/include/plat/sysmmu.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/sysmmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Samsung System MMU driver for S5P platform
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __PLAT_SAMSUNG_SYSMMU_H
-#define __PLAT_SAMSUNG_SYSMMU_H __FILE__
-
-enum S5P_SYSMMU_INTERRUPT_TYPE {
- SYSMMU_PAGEFAULT,
- SYSMMU_AR_MULTIHIT,
- SYSMMU_AW_MULTIHIT,
- SYSMMU_BUSERROR,
- SYSMMU_AR_SECURITY,
- SYSMMU_AR_ACCESS,
- SYSMMU_AW_SECURITY,
- SYSMMU_AW_PROTECTION, /* 7 */
- SYSMMU_FAULTS_NUM
-};
-
-#ifdef CONFIG_S5P_SYSTEM_MMU
-
-#include <mach/sysmmu.h>
-
-/**
- * s5p_sysmmu_enable() - enable system mmu of ip
- * @ips: The ip connected system mmu.
- * #pgd: Base physical address of the 1st level page table
- *
- * This function enable system mmu to transfer address
- * from virtual address to physical address
- */
-void s5p_sysmmu_enable(sysmmu_ips ips, unsigned long pgd);
-
-/**
- * s5p_sysmmu_disable() - disable sysmmu mmu of ip
- * @ips: The ip connected system mmu.
- *
- * This function disable system mmu to transfer address
- * from virtual address to physical address
- */
-void s5p_sysmmu_disable(sysmmu_ips ips);
-
-/**
- * s5p_sysmmu_set_tablebase_pgd() - set page table base address to refer page table
- * @ips: The ip connected system mmu.
- * @pgd: The page table base address.
- *
- * This function set page table base address
- * When system mmu transfer address from virtaul address to physical address,
- * system mmu refer address information from page table
- */
-void s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd);
-
-/**
- * s5p_sysmmu_tlb_invalidate() - flush all TLB entry in system mmu
- * @ips: The ip connected system mmu.
- *
- * This function flush all TLB entry in system mmu
- */
-void s5p_sysmmu_tlb_invalidate(sysmmu_ips ips);
-
-/** s5p_sysmmu_set_fault_handler() - Fault handler for System MMUs
- * @itype: type of fault.
- * @pgtable_base: the physical address of page table base. This is 0 if @ips is
- * SYSMMU_BUSERROR.
- * @fault_addr: the device (virtual) address that the System MMU tried to
- * translated. This is 0 if @ips is SYSMMU_BUSERROR.
- * Called when interrupt occurred by the System MMUs
- * The device drivers of peripheral devices that has a System MMU can implement
- * a fault handler to resolve address translation fault by System MMU.
- * The meanings of return value and parameters are described below.
-
- * return value: non-zero if the fault is correctly resolved.
- * zero if the fault is not handled.
- */
-void s5p_sysmmu_set_fault_handler(sysmmu_ips ips,
- int (*handler)(enum S5P_SYSMMU_INTERRUPT_TYPE itype,
- unsigned long pgtable_base,
- unsigned long fault_addr));
-#else
-#define s5p_sysmmu_enable(ips, pgd) do { } while (0)
-#define s5p_sysmmu_disable(ips) do { } while (0)
-#define s5p_sysmmu_set_tablebase_pgd(ips, pgd) do { } while (0)
-#define s5p_sysmmu_tlb_invalidate(ips) do { } while (0)
-#define s5p_sysmmu_set_fault_handler(ips, handler) do { } while (0)
-#endif
-#endif /* __ASM_PLAT_SYSMMU_H */
diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-samsung/s5p-clock.c
index f68a9bb11948..031a61899bef 100644
--- a/arch/arm/plat-s5p/clock.c
+++ b/arch/arm/plat-samsung/s5p-clock.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/clock.c
- *
+/*
* Copyright 2009 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
@@ -68,6 +67,11 @@ struct clk clk_fout_bpll = {
.id = -1,
};
+struct clk clk_fout_bpll_div2 = {
+ .name = "fout_bpll_div2",
+ .id = -1,
+};
+
/* CPLL clock output */
struct clk clk_fout_cpll = {
@@ -83,6 +87,11 @@ struct clk clk_fout_mpll = {
.id = -1,
};
+struct clk clk_fout_mpll_div2 = {
+ .name = "fout_mpll_div2",
+ .id = -1,
+};
+
/* EPLL clock output */
struct clk clk_fout_epll = {
.name = "fout_epll",
@@ -126,6 +135,16 @@ struct clksrc_sources clk_src_bpll = {
.nr_sources = ARRAY_SIZE(clk_src_bpll_list),
};
+static struct clk *clk_src_bpll_fout_list[] = {
+ [0] = &clk_fout_bpll_div2,
+ [1] = &clk_fout_bpll,
+};
+
+struct clksrc_sources clk_src_bpll_fout = {
+ .sources = clk_src_bpll_fout_list,
+ .nr_sources = ARRAY_SIZE(clk_src_bpll_fout_list),
+};
+
/* Possible clock sources for CPLL Mux */
static struct clk *clk_src_cpll_list[] = {
[0] = &clk_fin_cpll,
@@ -148,6 +167,16 @@ struct clksrc_sources clk_src_mpll = {
.nr_sources = ARRAY_SIZE(clk_src_mpll_list),
};
+static struct clk *clk_src_mpll_fout_list[] = {
+ [0] = &clk_fout_mpll_div2,
+ [1] = &clk_fout_mpll,
+};
+
+struct clksrc_sources clk_src_mpll_fout = {
+ .sources = clk_src_mpll_fout_list,
+ .nr_sources = ARRAY_SIZE(clk_src_mpll_fout_list),
+};
+
/* Possible clock sources for EPLL Mux */
static struct clk *clk_src_epll_list[] = {
[0] = &clk_fin_epll,
diff --git a/arch/arm/plat-s5p/dev-mfc.c b/arch/arm/plat-samsung/s5p-dev-mfc.c
index a30d36b7f61b..ad6089465e2a 100644
--- a/arch/arm/plat-s5p/dev-mfc.c
+++ b/arch/arm/plat-samsung/s5p-dev-mfc.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/dev-mfc.c
- *
+/*
* Copyright (C) 2010-2011 Samsung Electronics Co.Ltd
*
* Base S5P MFC resource and device definitions
@@ -9,7 +8,6 @@
* published by the Free Software Foundation.
*/
-
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
diff --git a/arch/arm/plat-s5p/dev-uart.c b/arch/arm/plat-samsung/s5p-dev-uart.c
index c9308db36183..cafa3deddcc1 100644
--- a/arch/arm/plat-s5p/dev-uart.c
+++ b/arch/arm/plat-samsung/s5p-dev-uart.c
@@ -1,6 +1,5 @@
-/* linux/arch/arm/plat-s5p/dev-uart.c
- *
- * Copyright (c) 2009 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2009,2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* Base S5P UART resource and device definitions
@@ -14,6 +13,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <asm/mach/arch.h>
@@ -26,86 +26,38 @@
/* Serial port registrations */
static struct resource s5p_uart0_resource[] = {
- [0] = {
- .start = S5P_PA_UART0,
- .end = S5P_PA_UART0 + S5P_SZ_UART - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_UART0,
- .end = IRQ_UART0,
- .flags = IORESOURCE_IRQ,
- },
+ [0] = DEFINE_RES_MEM(S5P_PA_UART0, S5P_SZ_UART),
+ [1] = DEFINE_RES_IRQ(IRQ_UART0),
};
static struct resource s5p_uart1_resource[] = {
- [0] = {
- .start = S5P_PA_UART1,
- .end = S5P_PA_UART1 + S5P_SZ_UART - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_UART1,
- .end = IRQ_UART1,
- .flags = IORESOURCE_IRQ,
- },
+ [0] = DEFINE_RES_MEM(S5P_PA_UART1, S5P_SZ_UART),
+ [1] = DEFINE_RES_IRQ(IRQ_UART1),
};
static struct resource s5p_uart2_resource[] = {
- [0] = {
- .start = S5P_PA_UART2,
- .end = S5P_PA_UART2 + S5P_SZ_UART - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_UART2,
- .end = IRQ_UART2,
- .flags = IORESOURCE_IRQ,
- },
+ [0] = DEFINE_RES_MEM(S5P_PA_UART2, S5P_SZ_UART),
+ [1] = DEFINE_RES_IRQ(IRQ_UART2),
};
static struct resource s5p_uart3_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
- [0] = {
- .start = S5P_PA_UART3,
- .end = S5P_PA_UART3 + S5P_SZ_UART - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_UART3,
- .end = IRQ_UART3,
- .flags = IORESOURCE_IRQ,
- },
+ [0] = DEFINE_RES_MEM(S5P_PA_UART3, S5P_SZ_UART),
+ [1] = DEFINE_RES_IRQ(IRQ_UART3),
#endif
};
static struct resource s5p_uart4_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 4
- [0] = {
- .start = S5P_PA_UART4,
- .end = S5P_PA_UART4 + S5P_SZ_UART - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_UART4,
- .end = IRQ_UART4,
- .flags = IORESOURCE_IRQ,
- },
+ [0] = DEFINE_RES_MEM(S5P_PA_UART4, S5P_SZ_UART),
+ [1] = DEFINE_RES_IRQ(IRQ_UART4),
#endif
};
static struct resource s5p_uart5_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 5
- [0] = {
- .start = S5P_PA_UART5,
- .end = S5P_PA_UART5 + S5P_SZ_UART - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_UART5,
- .end = IRQ_UART5,
- .flags = IORESOURCE_IRQ,
- },
+ [0] = DEFINE_RES_MEM(S5P_PA_UART5, S5P_SZ_UART),
+ [1] = DEFINE_RES_IRQ(IRQ_UART5),
#endif
};
diff --git a/arch/arm/plat-s5p/irq-eint.c b/arch/arm/plat-samsung/s5p-irq-eint.c
index 139c050918c5..33bd3f3d20f5 100644
--- a/arch/arm/plat-s5p/irq-eint.c
+++ b/arch/arm/plat-samsung/s5p-irq-eint.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/irq-eint.c
- *
+/*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-samsung/s5p-irq-gpioint.c
index 82c7311017a2..f9431fe5b06e 100644
--- a/arch/arm/plat-s5p/irq-gpioint.c
+++ b/arch/arm/plat-samsung/s5p-irq-gpioint.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/irq-gpioint.c
- *
+/*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* Author: Kyungmin Park <kyungmin.park@samsung.com>
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
diff --git a/arch/arm/plat-s5p/irq-pm.c b/arch/arm/plat-samsung/s5p-irq-pm.c
index d1bfecae6c9f..7c1e3b7072fc 100644
--- a/arch/arm/plat-s5p/irq-pm.c
+++ b/arch/arm/plat-samsung/s5p-irq-pm.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/irq-pm.c
- *
+/*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
diff --git a/arch/arm/plat-s5p/irq.c b/arch/arm/plat-samsung/s5p-irq.c
index afdaa1082b9f..dfb47d638f03 100644
--- a/arch/arm/plat-s5p/irq.c
+++ b/arch/arm/plat-samsung/s5p-irq.c
@@ -1,5 +1,4 @@
-/* arch/arm/plat-s5p/irq.c
- *
+/*
* Copyright (c) 2009 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
diff --git a/arch/arm/plat-s5p/pm.c b/arch/arm/plat-samsung/s5p-pm.c
index d15dc47b0e3d..0747468f0936 100644
--- a/arch/arm/plat-s5p/pm.c
+++ b/arch/arm/plat-samsung/s5p-pm.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/pm.c
- *
+/*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
diff --git a/arch/arm/plat-s5p/sleep.S b/arch/arm/plat-samsung/s5p-sleep.S
index 006bd01eda02..bdf6dadf8790 100644
--- a/arch/arm/plat-s5p/sleep.S
+++ b/arch/arm/plat-samsung/s5p-sleep.S
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/sleep.S
- *
+/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
diff --git a/arch/arm/plat-s5p/s5p-time.c b/arch/arm/plat-samsung/s5p-time.c
index 17c0a2c58dfd..028b6e877eb9 100644
--- a/arch/arm/plat-s5p/s5p-time.c
+++ b/arch/arm/plat-samsung/s5p-time.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/s5p-time.c
- *
+/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
diff --git a/arch/arm/plat-s5p/setup-mipiphy.c b/arch/arm/plat-samsung/setup-mipiphy.c
index 683c466c0e6a..683c466c0e6a 100644
--- a/arch/arm/plat-s5p/setup-mipiphy.c
+++ b/arch/arm/plat-samsung/setup-mipiphy.c
diff --git a/arch/arm/plat-spear/Kconfig b/arch/arm/plat-spear/Kconfig
index 387655b5ce05..4404f82d5979 100644
--- a/arch/arm/plat-spear/Kconfig
+++ b/arch/arm/plat-spear/Kconfig
@@ -8,6 +8,17 @@ choice
prompt "ST SPEAr Family"
default ARCH_SPEAR3XX
+config ARCH_SPEAR13XX
+ bool "ST SPEAr13xx with Device Tree"
+ select ARM_GIC
+ select CPU_V7
+ select USE_OF
+ select HAVE_SMP
+ select MIGHT_HAVE_CACHE_L2X0
+ select PINCTRL
+ help
+ Supports for ARM's SPEAR13XX family
+
config ARCH_SPEAR3XX
bool "ST SPEAr3xx with Device Tree"
select ARM_VIC
@@ -27,6 +38,7 @@ config ARCH_SPEAR6XX
endchoice
# Adding SPEAr machine specific configuration files
+source "arch/arm/mach-spear13xx/Kconfig"
source "arch/arm/mach-spear3xx/Kconfig"
source "arch/arm/mach-spear6xx/Kconfig"
diff --git a/arch/arm/plat-spear/Makefile b/arch/arm/plat-spear/Makefile
index 7744802c83e7..2607bd05c525 100644
--- a/arch/arm/plat-spear/Makefile
+++ b/arch/arm/plat-spear/Makefile
@@ -3,6 +3,7 @@
#
# Common support
-obj-y := clock.o restart.o time.o pl080.o
+obj-y := restart.o time.o
-obj-$(CONFIG_ARCH_SPEAR3XX) += shirq.o
+obj-$(CONFIG_ARCH_SPEAR3XX) += pl080.o shirq.o
+obj-$(CONFIG_ARCH_SPEAR6XX) += pl080.o
diff --git a/arch/arm/plat-spear/clock.c b/arch/arm/plat-spear/clock.c
deleted file mode 100644
index 67dd00381ea6..000000000000
--- a/arch/arm/plat-spear/clock.c
+++ /dev/null
@@ -1,1005 +0,0 @@
-/*
- * arch/arm/plat-spear/clock.c
- *
- * Clock framework for SPEAr platform
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/bug.h>
-#include <linux/clk.h>
-#include <linux/debugfs.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <plat/clock.h>
-
-static DEFINE_SPINLOCK(clocks_lock);
-static LIST_HEAD(root_clks);
-#ifdef CONFIG_DEBUG_FS
-static LIST_HEAD(clocks);
-#endif
-
-static void propagate_rate(struct clk *, int on_init);
-#ifdef CONFIG_DEBUG_FS
-static int clk_debugfs_reparent(struct clk *);
-#endif
-
-static int generic_clk_enable(struct clk *clk)
-{
- unsigned int val;
-
- if (!clk->en_reg)
- return -EFAULT;
-
- val = readl(clk->en_reg);
- if (unlikely(clk->flags & RESET_TO_ENABLE))
- val &= ~(1 << clk->en_reg_bit);
- else
- val |= 1 << clk->en_reg_bit;
-
- writel(val, clk->en_reg);
-
- return 0;
-}
-
-static void generic_clk_disable(struct clk *clk)
-{
- unsigned int val;
-
- if (!clk->en_reg)
- return;
-
- val = readl(clk->en_reg);
- if (unlikely(clk->flags & RESET_TO_ENABLE))
- val |= 1 << clk->en_reg_bit;
- else
- val &= ~(1 << clk->en_reg_bit);
-
- writel(val, clk->en_reg);
-}
-
-/* generic clk ops */
-static struct clkops generic_clkops = {
- .enable = generic_clk_enable,
- .disable = generic_clk_disable,
-};
-
-/* returns current programmed clocks clock info structure */
-static struct pclk_info *pclk_info_get(struct clk *clk)
-{
- unsigned int val, i;
- struct pclk_info *info = NULL;
-
- val = (readl(clk->pclk_sel->pclk_sel_reg) >> clk->pclk_sel_shift)
- & clk->pclk_sel->pclk_sel_mask;
-
- for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
- if (clk->pclk_sel->pclk_info[i].pclk_val == val)
- info = &clk->pclk_sel->pclk_info[i];
- }
-
- return info;
-}
-
-/*
- * Set Update pclk, and pclk_info of clk and add clock sibling node to current
- * parents children list
- */
-static void clk_reparent(struct clk *clk, struct pclk_info *pclk_info)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&clocks_lock, flags);
- list_del(&clk->sibling);
- list_add(&clk->sibling, &pclk_info->pclk->children);
-
- clk->pclk = pclk_info->pclk;
- spin_unlock_irqrestore(&clocks_lock, flags);
-
-#ifdef CONFIG_DEBUG_FS
- clk_debugfs_reparent(clk);
-#endif
-}
-
-static void do_clk_disable(struct clk *clk)
-{
- if (!clk)
- return;
-
- if (!clk->usage_count) {
- WARN_ON(1);
- return;
- }
-
- clk->usage_count--;
-
- if (clk->usage_count == 0) {
- /*
- * Surely, there are no active childrens or direct users
- * of this clock
- */
- if (clk->pclk)
- do_clk_disable(clk->pclk);
-
- if (clk->ops && clk->ops->disable)
- clk->ops->disable(clk);
- }
-}
-
-static int do_clk_enable(struct clk *clk)
-{
- int ret = 0;
-
- if (!clk)
- return -EFAULT;
-
- if (clk->usage_count == 0) {
- if (clk->pclk) {
- ret = do_clk_enable(clk->pclk);
- if (ret)
- goto err;
- }
- if (clk->ops && clk->ops->enable) {
- ret = clk->ops->enable(clk);
- if (ret) {
- if (clk->pclk)
- do_clk_disable(clk->pclk);
- goto err;
- }
- }
- /*
- * Since the clock is going to be used for the first
- * time please reclac
- */
- if (clk->recalc) {
- ret = clk->recalc(clk);
- if (ret)
- goto err;
- }
- }
- clk->usage_count++;
-err:
- return ret;
-}
-
-/*
- * clk_enable - inform the system when the clock source should be running.
- * @clk: clock source
- *
- * If the clock can not be enabled/disabled, this should return success.
- *
- * Returns success (0) or negative errno.
- */
-int clk_enable(struct clk *clk)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&clocks_lock, flags);
- ret = do_clk_enable(clk);
- spin_unlock_irqrestore(&clocks_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(clk_enable);
-
-/*
- * clk_disable - inform the system when the clock source is no longer required.
- * @clk: clock source
- *
- * Inform the system that a clock source is no longer required by
- * a driver and may be shut down.
- *
- * Implementation detail: if the clock source is shared between
- * multiple drivers, clk_enable() calls must be balanced by the
- * same number of clk_disable() calls for the clock source to be
- * disabled.
- */
-void clk_disable(struct clk *clk)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&clocks_lock, flags);
- do_clk_disable(clk);
- spin_unlock_irqrestore(&clocks_lock, flags);
-}
-EXPORT_SYMBOL(clk_disable);
-
-/**
- * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
- * This is only valid once the clock source has been enabled.
- * @clk: clock source
- */
-unsigned long clk_get_rate(struct clk *clk)
-{
- unsigned long flags, rate;
-
- spin_lock_irqsave(&clocks_lock, flags);
- rate = clk->rate;
- spin_unlock_irqrestore(&clocks_lock, flags);
-
- return rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-/**
- * clk_set_parent - set the parent clock source for this clock
- * @clk: clock source
- * @parent: parent clock source
- *
- * Returns success (0) or negative errno.
- */
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
- int i, found = 0, val = 0;
- unsigned long flags;
-
- if (!clk || !parent)
- return -EFAULT;
- if (clk->pclk == parent)
- return 0;
- if (!clk->pclk_sel)
- return -EPERM;
-
- /* check if requested parent is in clk parent list */
- for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
- if (clk->pclk_sel->pclk_info[i].pclk == parent) {
- found = 1;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- spin_lock_irqsave(&clocks_lock, flags);
- /* reflect parent change in hardware */
- val = readl(clk->pclk_sel->pclk_sel_reg);
- val &= ~(clk->pclk_sel->pclk_sel_mask << clk->pclk_sel_shift);
- val |= clk->pclk_sel->pclk_info[i].pclk_val << clk->pclk_sel_shift;
- writel(val, clk->pclk_sel->pclk_sel_reg);
- spin_unlock_irqrestore(&clocks_lock, flags);
-
- /* reflect parent change in software */
- clk_reparent(clk, &clk->pclk_sel->pclk_info[i]);
-
- propagate_rate(clk, 0);
- return 0;
-}
-EXPORT_SYMBOL(clk_set_parent);
-
-/**
- * clk_set_rate - set the clock rate for a clock source
- * @clk: clock source
- * @rate: desired clock rate in Hz
- *
- * Returns success (0) or negative errno.
- */
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long flags;
- int ret = -EINVAL;
-
- if (!clk || !rate)
- return -EFAULT;
-
- if (clk->set_rate) {
- spin_lock_irqsave(&clocks_lock, flags);
- ret = clk->set_rate(clk, rate);
- if (!ret)
- /* if successful -> propagate */
- propagate_rate(clk, 0);
- spin_unlock_irqrestore(&clocks_lock, flags);
- } else if (clk->pclk) {
- u32 mult = clk->div_factor ? clk->div_factor : 1;
- ret = clk_set_rate(clk->pclk, mult * rate);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-/* registers clock in platform clock framework */
-void clk_register(struct clk_lookup *cl)
-{
- struct clk *clk;
- unsigned long flags;
-
- if (!cl || !cl->clk)
- return;
- clk = cl->clk;
-
- spin_lock_irqsave(&clocks_lock, flags);
-
- INIT_LIST_HEAD(&clk->children);
- if (clk->flags & ALWAYS_ENABLED)
- clk->ops = NULL;
- else if (!clk->ops)
- clk->ops = &generic_clkops;
-
- /* root clock don't have any parents */
- if (!clk->pclk && !clk->pclk_sel) {
- list_add(&clk->sibling, &root_clks);
- } else if (clk->pclk && !clk->pclk_sel) {
- /* add clocks with only one parent to parent's children list */
- list_add(&clk->sibling, &clk->pclk->children);
- } else {
- /* clocks with more than one parent */
- struct pclk_info *pclk_info;
-
- pclk_info = pclk_info_get(clk);
- if (!pclk_info) {
- pr_err("CLKDEV: invalid pclk info of clk with"
- " %s dev_id and %s con_id\n",
- cl->dev_id, cl->con_id);
- } else {
- clk->pclk = pclk_info->pclk;
- list_add(&clk->sibling, &pclk_info->pclk->children);
- }
- }
-
- spin_unlock_irqrestore(&clocks_lock, flags);
-
- /* debugfs specific */
-#ifdef CONFIG_DEBUG_FS
- list_add(&clk->node, &clocks);
- clk->cl = cl;
-#endif
-
- /* add clock to arm clockdev framework */
- clkdev_add(cl);
-}
-
-/**
- * propagate_rate - recalculate and propagate all clocks to children
- * @pclk: parent clock required to be propogated
- * @on_init: flag for enabling clocks which are ENABLED_ON_INIT.
- *
- * Recalculates all children clocks
- */
-void propagate_rate(struct clk *pclk, int on_init)
-{
- struct clk *clk, *_temp;
- int ret = 0;
-
- list_for_each_entry_safe(clk, _temp, &pclk->children, sibling) {
- if (clk->recalc) {
- ret = clk->recalc(clk);
- /*
- * recalc will return error if clk out is not programmed
- * In this case configure default rate.
- */
- if (ret && clk->set_rate)
- clk->set_rate(clk, 0);
- }
- propagate_rate(clk, on_init);
-
- if (!on_init)
- continue;
-
- /* Enable clks enabled on init, in software view */
- if (clk->flags & ENABLED_ON_INIT)
- do_clk_enable(clk);
- }
-}
-
-/**
- * round_rate_index - return closest programmable rate index in rate_config tbl
- * @clk: ptr to clock structure
- * @drate: desired rate
- * @rate: final rate will be returned in this variable only.
- *
- * Finds index in rate_config for highest clk rate which is less than
- * requested rate. If there is no clk rate lesser than requested rate then
- * -EINVAL is returned. This routine assumes that rate_config is written
- * in incrementing order of clk rates.
- * If drate passed is zero then default rate is programmed.
- */
-static int
-round_rate_index(struct clk *clk, unsigned long drate, unsigned long *rate)
-{
- unsigned long tmp = 0, prev_rate = 0;
- int index;
-
- if (!clk->calc_rate)
- return -EFAULT;
-
- if (!drate)
- return -EINVAL;
-
- /*
- * This loops ends on two conditions:
- * - as soon as clk is found with rate greater than requested rate.
- * - if all clks in rate_config are smaller than requested rate.
- */
- for (index = 0; index < clk->rate_config.count; index++) {
- prev_rate = tmp;
- tmp = clk->calc_rate(clk, index);
- if (drate < tmp) {
- index--;
- break;
- }
- }
- /* return if can't find suitable clock */
- if (index < 0) {
- index = -EINVAL;
- *rate = 0;
- } else if (index == clk->rate_config.count) {
- /* program with highest clk rate possible */
- index = clk->rate_config.count - 1;
- *rate = tmp;
- } else
- *rate = prev_rate;
-
- return index;
-}
-
-/**
- * clk_round_rate - adjust a rate to the exact rate a clock can provide
- * @clk: clock source
- * @rate: desired clock rate in Hz
- *
- * Returns rounded clock rate in Hz, or negative errno.
- */
-long clk_round_rate(struct clk *clk, unsigned long drate)
-{
- long rate = 0;
- int index;
-
- /*
- * propagate call to parent who supports calc_rate. Similar approach is
- * used in clk_set_rate.
- */
- if (!clk->calc_rate) {
- u32 mult;
- if (!clk->pclk)
- return clk->rate;
-
- mult = clk->div_factor ? clk->div_factor : 1;
- return clk_round_rate(clk->pclk, mult * drate) / mult;
- }
-
- index = round_rate_index(clk, drate, &rate);
- if (index >= 0)
- return rate;
- else
- return index;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-/*All below functions are called with lock held */
-
-/*
- * Calculates pll clk rate for specific value of mode, m, n and p
- *
- * In normal mode
- * rate = (2 * M[15:8] * Fin)/(N * 2^P)
- *
- * In Dithered mode
- * rate = (2 * M[15:0] * Fin)/(256 * N * 2^P)
- */
-unsigned long pll_calc_rate(struct clk *clk, int index)
-{
- unsigned long rate = clk->pclk->rate;
- struct pll_rate_tbl *tbls = clk->rate_config.tbls;
- unsigned int mode;
-
- mode = tbls[index].mode ? 256 : 1;
- return (((2 * rate / 10000) * tbls[index].m) /
- (mode * tbls[index].n * (1 << tbls[index].p))) * 10000;
-}
-
-/*
- * calculates current programmed rate of pll1
- *
- * In normal mode
- * rate = (2 * M[15:8] * Fin)/(N * 2^P)
- *
- * In Dithered mode
- * rate = (2 * M[15:0] * Fin)/(256 * N * 2^P)
- */
-int pll_clk_recalc(struct clk *clk)
-{
- struct pll_clk_config *config = clk->private_data;
- unsigned int num = 2, den = 0, val, mode = 0;
-
- mode = (readl(config->mode_reg) >> config->masks->mode_shift) &
- config->masks->mode_mask;
-
- val = readl(config->cfg_reg);
- /* calculate denominator */
- den = (val >> config->masks->div_p_shift) & config->masks->div_p_mask;
- den = 1 << den;
- den *= (val >> config->masks->div_n_shift) & config->masks->div_n_mask;
-
- /* calculate numerator & denominator */
- if (!mode) {
- /* Normal mode */
- num *= (val >> config->masks->norm_fdbk_m_shift) &
- config->masks->norm_fdbk_m_mask;
- } else {
- /* Dithered mode */
- num *= (val >> config->masks->dith_fdbk_m_shift) &
- config->masks->dith_fdbk_m_mask;
- den *= 256;
- }
-
- if (!den)
- return -EINVAL;
-
- clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000;
- return 0;
-}
-
-/*
- * Configures new clock rate of pll
- */
-int pll_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
- struct pll_rate_tbl *tbls = clk->rate_config.tbls;
- struct pll_clk_config *config = clk->private_data;
- unsigned long val, rate;
- int i;
-
- i = round_rate_index(clk, desired_rate, &rate);
- if (i < 0)
- return i;
-
- val = readl(config->mode_reg) &
- ~(config->masks->mode_mask << config->masks->mode_shift);
- val |= (tbls[i].mode & config->masks->mode_mask) <<
- config->masks->mode_shift;
- writel(val, config->mode_reg);
-
- val = readl(config->cfg_reg) &
- ~(config->masks->div_p_mask << config->masks->div_p_shift);
- val |= (tbls[i].p & config->masks->div_p_mask) <<
- config->masks->div_p_shift;
- val &= ~(config->masks->div_n_mask << config->masks->div_n_shift);
- val |= (tbls[i].n & config->masks->div_n_mask) <<
- config->masks->div_n_shift;
- val &= ~(config->masks->dith_fdbk_m_mask <<
- config->masks->dith_fdbk_m_shift);
- if (tbls[i].mode)
- val |= (tbls[i].m & config->masks->dith_fdbk_m_mask) <<
- config->masks->dith_fdbk_m_shift;
- else
- val |= (tbls[i].m & config->masks->norm_fdbk_m_mask) <<
- config->masks->norm_fdbk_m_shift;
-
- writel(val, config->cfg_reg);
-
- clk->rate = rate;
-
- return 0;
-}
-
-/*
- * Calculates ahb, apb clk rate for specific value of div
- */
-unsigned long bus_calc_rate(struct clk *clk, int index)
-{
- unsigned long rate = clk->pclk->rate;
- struct bus_rate_tbl *tbls = clk->rate_config.tbls;
-
- return rate / (tbls[index].div + 1);
-}
-
-/* calculates current programmed rate of ahb or apb bus */
-int bus_clk_recalc(struct clk *clk)
-{
- struct bus_clk_config *config = clk->private_data;
- unsigned int div;
-
- div = ((readl(config->reg) >> config->masks->shift) &
- config->masks->mask) + 1;
-
- if (!div)
- return -EINVAL;
-
- clk->rate = (unsigned long)clk->pclk->rate / div;
- return 0;
-}
-
-/* Configures new clock rate of AHB OR APB bus */
-int bus_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
- struct bus_rate_tbl *tbls = clk->rate_config.tbls;
- struct bus_clk_config *config = clk->private_data;
- unsigned long val, rate;
- int i;
-
- i = round_rate_index(clk, desired_rate, &rate);
- if (i < 0)
- return i;
-
- val = readl(config->reg) &
- ~(config->masks->mask << config->masks->shift);
- val |= (tbls[i].div & config->masks->mask) << config->masks->shift;
- writel(val, config->reg);
-
- clk->rate = rate;
-
- return 0;
-}
-
-/*
- * gives rate for different values of eq, x and y
- *
- * Fout from synthesizer can be given from two equations:
- * Fout1 = (Fin * X/Y)/2 EQ1
- * Fout2 = Fin * X/Y EQ2
- */
-unsigned long aux_calc_rate(struct clk *clk, int index)
-{
- unsigned long rate = clk->pclk->rate;
- struct aux_rate_tbl *tbls = clk->rate_config.tbls;
- u8 eq = tbls[index].eq ? 1 : 2;
-
- return (((rate/10000) * tbls[index].xscale) /
- (tbls[index].yscale * eq)) * 10000;
-}
-
-/*
- * calculates current programmed rate of auxiliary synthesizers
- * used by: UART, FIRDA
- *
- * Fout from synthesizer can be given from two equations:
- * Fout1 = (Fin * X/Y)/2
- * Fout2 = Fin * X/Y
- *
- * Selection of eqn 1 or 2 is programmed in register
- */
-int aux_clk_recalc(struct clk *clk)
-{
- struct aux_clk_config *config = clk->private_data;
- unsigned int num = 1, den = 1, val, eqn;
-
- val = readl(config->synth_reg);
-
- eqn = (val >> config->masks->eq_sel_shift) &
- config->masks->eq_sel_mask;
- if (eqn == config->masks->eq1_mask)
- den *= 2;
-
- /* calculate numerator */
- num = (val >> config->masks->xscale_sel_shift) &
- config->masks->xscale_sel_mask;
-
- /* calculate denominator */
- den *= (val >> config->masks->yscale_sel_shift) &
- config->masks->yscale_sel_mask;
-
- if (!den)
- return -EINVAL;
-
- clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000;
- return 0;
-}
-
-/* Configures new clock rate of auxiliary synthesizers used by: UART, FIRDA*/
-int aux_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
- struct aux_rate_tbl *tbls = clk->rate_config.tbls;
- struct aux_clk_config *config = clk->private_data;
- unsigned long val, rate;
- int i;
-
- i = round_rate_index(clk, desired_rate, &rate);
- if (i < 0)
- return i;
-
- val = readl(config->synth_reg) &
- ~(config->masks->eq_sel_mask << config->masks->eq_sel_shift);
- val |= (tbls[i].eq & config->masks->eq_sel_mask) <<
- config->masks->eq_sel_shift;
- val &= ~(config->masks->xscale_sel_mask <<
- config->masks->xscale_sel_shift);
- val |= (tbls[i].xscale & config->masks->xscale_sel_mask) <<
- config->masks->xscale_sel_shift;
- val &= ~(config->masks->yscale_sel_mask <<
- config->masks->yscale_sel_shift);
- val |= (tbls[i].yscale & config->masks->yscale_sel_mask) <<
- config->masks->yscale_sel_shift;
- writel(val, config->synth_reg);
-
- clk->rate = rate;
-
- return 0;
-}
-
-/*
- * Calculates gpt clk rate for different values of mscale and nscale
- *
- * Fout= Fin/((2 ^ (N+1)) * (M+1))
- */
-unsigned long gpt_calc_rate(struct clk *clk, int index)
-{
- unsigned long rate = clk->pclk->rate;
- struct gpt_rate_tbl *tbls = clk->rate_config.tbls;
-
- return rate / ((1 << (tbls[index].nscale + 1)) *
- (tbls[index].mscale + 1));
-}
-
-/*
- * calculates current programmed rate of gpt synthesizers
- * Fout from synthesizer can be given from below equations:
- * Fout= Fin/((2 ^ (N+1)) * (M+1))
- */
-int gpt_clk_recalc(struct clk *clk)
-{
- struct gpt_clk_config *config = clk->private_data;
- unsigned int div = 1, val;
-
- val = readl(config->synth_reg);
- div += (val >> config->masks->mscale_sel_shift) &
- config->masks->mscale_sel_mask;
- div *= 1 << (((val >> config->masks->nscale_sel_shift) &
- config->masks->nscale_sel_mask) + 1);
-
- if (!div)
- return -EINVAL;
-
- clk->rate = (unsigned long)clk->pclk->rate / div;
- return 0;
-}
-
-/* Configures new clock rate of gptiliary synthesizers used by: UART, FIRDA*/
-int gpt_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
- struct gpt_rate_tbl *tbls = clk->rate_config.tbls;
- struct gpt_clk_config *config = clk->private_data;
- unsigned long val, rate;
- int i;
-
- i = round_rate_index(clk, desired_rate, &rate);
- if (i < 0)
- return i;
-
- val = readl(config->synth_reg) & ~(config->masks->mscale_sel_mask <<
- config->masks->mscale_sel_shift);
- val |= (tbls[i].mscale & config->masks->mscale_sel_mask) <<
- config->masks->mscale_sel_shift;
- val &= ~(config->masks->nscale_sel_mask <<
- config->masks->nscale_sel_shift);
- val |= (tbls[i].nscale & config->masks->nscale_sel_mask) <<
- config->masks->nscale_sel_shift;
- writel(val, config->synth_reg);
-
- clk->rate = rate;
-
- return 0;
-}
-
-/*
- * Calculates clcd clk rate for different values of div
- *
- * Fout from synthesizer can be given from below equation:
- * Fout= Fin/2*div (division factor)
- * div is 17 bits:-
- * 0-13 (fractional part)
- * 14-16 (integer part)
- * To calculate Fout we left shift val by 14 bits and divide Fin by
- * complete div (including fractional part) and then right shift the
- * result by 14 places.
- */
-unsigned long clcd_calc_rate(struct clk *clk, int index)
-{
- unsigned long rate = clk->pclk->rate;
- struct clcd_rate_tbl *tbls = clk->rate_config.tbls;
-
- rate /= 1000;
- rate <<= 12;
- rate /= (2 * tbls[index].div);
- rate >>= 12;
- rate *= 1000;
-
- return rate;
-}
-
-/*
- * calculates current programmed rate of clcd synthesizer
- * Fout from synthesizer can be given from below equation:
- * Fout= Fin/2*div (division factor)
- * div is 17 bits:-
- * 0-13 (fractional part)
- * 14-16 (integer part)
- * To calculate Fout we left shift val by 14 bits and divide Fin by
- * complete div (including fractional part) and then right shift the
- * result by 14 places.
- */
-int clcd_clk_recalc(struct clk *clk)
-{
- struct clcd_clk_config *config = clk->private_data;
- unsigned int div = 1;
- unsigned long prate;
- unsigned int val;
-
- val = readl(config->synth_reg);
- div = (val >> config->masks->div_factor_shift) &
- config->masks->div_factor_mask;
-
- if (!div)
- return -EINVAL;
-
- prate = clk->pclk->rate / 1000; /* first level division, make it KHz */
-
- clk->rate = (((unsigned long)prate << 12) / (2 * div)) >> 12;
- clk->rate *= 1000;
- return 0;
-}
-
-/* Configures new clock rate of auxiliary synthesizers used by: UART, FIRDA*/
-int clcd_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
- struct clcd_rate_tbl *tbls = clk->rate_config.tbls;
- struct clcd_clk_config *config = clk->private_data;
- unsigned long val, rate;
- int i;
-
- i = round_rate_index(clk, desired_rate, &rate);
- if (i < 0)
- return i;
-
- val = readl(config->synth_reg) & ~(config->masks->div_factor_mask <<
- config->masks->div_factor_shift);
- val |= (tbls[i].div & config->masks->div_factor_mask) <<
- config->masks->div_factor_shift;
- writel(val, config->synth_reg);
-
- clk->rate = rate;
-
- return 0;
-}
-
-/*
- * Used for clocks that always have value as the parent clock divided by a
- * fixed divisor
- */
-int follow_parent(struct clk *clk)
-{
- unsigned int div_factor = (clk->div_factor < 1) ? 1 : clk->div_factor;
-
- clk->rate = clk->pclk->rate/div_factor;
- return 0;
-}
-
-/**
- * recalc_root_clocks - recalculate and propagate all root clocks
- *
- * Recalculates all root clocks (clocks with no parent), which if the
- * clock's .recalc is set correctly, should also propagate their rates.
- */
-void recalc_root_clocks(void)
-{
- struct clk *pclk;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&clocks_lock, flags);
- list_for_each_entry(pclk, &root_clks, sibling) {
- if (pclk->recalc) {
- ret = pclk->recalc(pclk);
- /*
- * recalc will return error if clk out is not programmed
- * In this case configure default clock.
- */
- if (ret && pclk->set_rate)
- pclk->set_rate(pclk, 0);
- }
- propagate_rate(pclk, 1);
- /* Enable clks enabled on init, in software view */
- if (pclk->flags & ENABLED_ON_INIT)
- do_clk_enable(pclk);
- }
- spin_unlock_irqrestore(&clocks_lock, flags);
-}
-
-void __init clk_init(void)
-{
- recalc_root_clocks();
-}
-
-#ifdef CONFIG_DEBUG_FS
-/*
- * debugfs support to trace clock tree hierarchy and attributes
- */
-static struct dentry *clk_debugfs_root;
-static int clk_debugfs_register_one(struct clk *c)
-{
- int err;
- struct dentry *d;
- struct clk *pa = c->pclk;
- char s[255];
- char *p = s;
-
- if (c) {
- if (c->cl->con_id)
- p += sprintf(p, "%s", c->cl->con_id);
- if (c->cl->dev_id)
- p += sprintf(p, "%s", c->cl->dev_id);
- }
- d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
- if (!d)
- return -ENOMEM;
- c->dent = d;
-
- d = debugfs_create_u32("usage_count", S_IRUGO, c->dent,
- (u32 *)&c->usage_count);
- if (!d) {
- err = -ENOMEM;
- goto err_out;
- }
- d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
- if (!d) {
- err = -ENOMEM;
- goto err_out;
- }
- d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
- if (!d) {
- err = -ENOMEM;
- goto err_out;
- }
- return 0;
-
-err_out:
- debugfs_remove_recursive(c->dent);
- return err;
-}
-
-static int clk_debugfs_register(struct clk *c)
-{
- int err;
- struct clk *pa = c->pclk;
-
- if (pa && !pa->dent) {
- err = clk_debugfs_register(pa);
- if (err)
- return err;
- }
-
- if (!c->dent) {
- err = clk_debugfs_register_one(c);
- if (err)
- return err;
- }
- return 0;
-}
-
-static int __init clk_debugfs_init(void)
-{
- struct clk *c;
- struct dentry *d;
- int err;
-
- d = debugfs_create_dir("clock", NULL);
- if (!d)
- return -ENOMEM;
- clk_debugfs_root = d;
-
- list_for_each_entry(c, &clocks, node) {
- err = clk_debugfs_register(c);
- if (err)
- goto err_out;
- }
- return 0;
-err_out:
- debugfs_remove_recursive(clk_debugfs_root);
- return err;
-}
-late_initcall(clk_debugfs_init);
-
-static int clk_debugfs_reparent(struct clk *c)
-{
- debugfs_remove(c->dent);
- return clk_debugfs_register_one(c);
-}
-#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/arm/plat-spear/include/plat/clock.h b/arch/arm/plat-spear/include/plat/clock.h
deleted file mode 100644
index 0062bafef12d..000000000000
--- a/arch/arm/plat-spear/include/plat/clock.h
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * arch/arm/plat-spear/include/plat/clock.h
- *
- * Clock framework definitions for SPEAr platform
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __PLAT_CLOCK_H
-#define __PLAT_CLOCK_H
-
-#include <linux/list.h>
-#include <linux/clkdev.h>
-#include <linux/types.h>
-
-/* clk structure flags */
-#define ALWAYS_ENABLED (1 << 0) /* clock always enabled */
-#define RESET_TO_ENABLE (1 << 1) /* reset register bit to enable clk */
-#define ENABLED_ON_INIT (1 << 2) /* clocks enabled at init */
-
-/**
- * struct clkops - clock operations
- * @enable: pointer to clock enable function
- * @disable: pointer to clock disable function
- */
-struct clkops {
- int (*enable) (struct clk *);
- void (*disable) (struct clk *);
-};
-
-/**
- * struct pclk_info - parents info
- * @pclk: pointer to parent clk
- * @pclk_val: value to be written for selecting this parent
- */
-struct pclk_info {
- struct clk *pclk;
- u8 pclk_val;
-};
-
-/**
- * struct pclk_sel - parents selection configuration
- * @pclk_info: pointer to array of parent clock info
- * @pclk_count: number of parents
- * @pclk_sel_reg: register for selecting a parent
- * @pclk_sel_mask: mask for selecting parent (can be used to clear bits also)
- */
-struct pclk_sel {
- struct pclk_info *pclk_info;
- u8 pclk_count;
- void __iomem *pclk_sel_reg;
- unsigned int pclk_sel_mask;
-};
-
-/**
- * struct rate_config - clk rate configurations
- * @tbls: array of device specific clk rate tables, in ascending order of rates
- * @count: size of tbls array
- * @default_index: default setting when originally disabled
- */
-struct rate_config {
- void *tbls;
- u8 count;
- u8 default_index;
-};
-
-/**
- * struct clk - clock structure
- * @usage_count: num of users who enabled this clock
- * @flags: flags for clock properties
- * @rate: programmed clock rate in Hz
- * @en_reg: clk enable/disable reg
- * @en_reg_bit: clk enable/disable bit
- * @ops: clk enable/disable ops - generic_clkops selected if NULL
- * @recalc: pointer to clock rate recalculate function
- * @set_rate: pointer to clock set rate function
- * @calc_rate: pointer to clock get rate function for index
- * @rate_config: rate configuration information, used by set_rate
- * @div_factor: division factor to parent clock.
- * @pclk: current parent clk
- * @pclk_sel: pointer to parent selection structure
- * @pclk_sel_shift: register shift for selecting parent of this clock
- * @children: list for childrens or this clock
- * @sibling: node for list of clocks having same parents
- * @private_data: clock specific private data
- * @node: list to maintain clocks linearly
- * @cl: clocklook up associated with this clock
- * @dent: object for debugfs
- */
-struct clk {
- unsigned int usage_count;
- unsigned int flags;
- unsigned long rate;
- void __iomem *en_reg;
- u8 en_reg_bit;
- const struct clkops *ops;
- int (*recalc) (struct clk *);
- int (*set_rate) (struct clk *, unsigned long rate);
- unsigned long (*calc_rate)(struct clk *, int index);
- struct rate_config rate_config;
- unsigned int div_factor;
-
- struct clk *pclk;
- struct pclk_sel *pclk_sel;
- unsigned int pclk_sel_shift;
-
- struct list_head children;
- struct list_head sibling;
- void *private_data;
-#ifdef CONFIG_DEBUG_FS
- struct list_head node;
- struct clk_lookup *cl;
- struct dentry *dent;
-#endif
-};
-
-/* pll configuration structure */
-struct pll_clk_masks {
- u32 mode_mask;
- u32 mode_shift;
-
- u32 norm_fdbk_m_mask;
- u32 norm_fdbk_m_shift;
- u32 dith_fdbk_m_mask;
- u32 dith_fdbk_m_shift;
- u32 div_p_mask;
- u32 div_p_shift;
- u32 div_n_mask;
- u32 div_n_shift;
-};
-
-struct pll_clk_config {
- void __iomem *mode_reg;
- void __iomem *cfg_reg;
- struct pll_clk_masks *masks;
-};
-
-/* pll clk rate config structure */
-struct pll_rate_tbl {
- u8 mode;
- u16 m;
- u8 n;
- u8 p;
-};
-
-/* ahb and apb bus configuration structure */
-struct bus_clk_masks {
- u32 mask;
- u32 shift;
-};
-
-struct bus_clk_config {
- void __iomem *reg;
- struct bus_clk_masks *masks;
-};
-
-/* ahb and apb clk bus rate config structure */
-struct bus_rate_tbl {
- u8 div;
-};
-
-/* Aux clk configuration structure: applicable to UART and FIRDA */
-struct aux_clk_masks {
- u32 eq_sel_mask;
- u32 eq_sel_shift;
- u32 eq1_mask;
- u32 eq2_mask;
- u32 xscale_sel_mask;
- u32 xscale_sel_shift;
- u32 yscale_sel_mask;
- u32 yscale_sel_shift;
-};
-
-struct aux_clk_config {
- void __iomem *synth_reg;
- struct aux_clk_masks *masks;
-};
-
-/* aux clk rate config structure */
-struct aux_rate_tbl {
- u16 xscale;
- u16 yscale;
- u8 eq;
-};
-
-/* GPT clk configuration structure */
-struct gpt_clk_masks {
- u32 mscale_sel_mask;
- u32 mscale_sel_shift;
- u32 nscale_sel_mask;
- u32 nscale_sel_shift;
-};
-
-struct gpt_clk_config {
- void __iomem *synth_reg;
- struct gpt_clk_masks *masks;
-};
-
-/* gpt clk rate config structure */
-struct gpt_rate_tbl {
- u16 mscale;
- u16 nscale;
-};
-
-/* clcd clk configuration structure */
-struct clcd_synth_masks {
- u32 div_factor_mask;
- u32 div_factor_shift;
-};
-
-struct clcd_clk_config {
- void __iomem *synth_reg;
- struct clcd_synth_masks *masks;
-};
-
-/* clcd clk rate config structure */
-struct clcd_rate_tbl {
- u16 div;
-};
-
-/* platform specific clock functions */
-void __init clk_init(void);
-void clk_register(struct clk_lookup *cl);
-void recalc_root_clocks(void);
-
-/* clock recalc & set rate functions */
-int follow_parent(struct clk *clk);
-unsigned long pll_calc_rate(struct clk *clk, int index);
-int pll_clk_recalc(struct clk *clk);
-int pll_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-unsigned long bus_calc_rate(struct clk *clk, int index);
-int bus_clk_recalc(struct clk *clk);
-int bus_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-unsigned long gpt_calc_rate(struct clk *clk, int index);
-int gpt_clk_recalc(struct clk *clk);
-int gpt_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-unsigned long aux_calc_rate(struct clk *clk, int index);
-int aux_clk_recalc(struct clk *clk);
-int aux_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-unsigned long clcd_calc_rate(struct clk *clk, int index);
-int clcd_clk_recalc(struct clk *clk);
-int clcd_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-
-#endif /* __PLAT_CLOCK_H */
diff --git a/arch/arm/plat-spear/restart.c b/arch/arm/plat-spear/restart.c
index 4471a232713a..ea0a61302b7e 100644
--- a/arch/arm/plat-spear/restart.c
+++ b/arch/arm/plat-spear/restart.c
@@ -16,6 +16,7 @@
#include <mach/spear.h>
#include <mach/generic.h>
+#define SPEAR13XX_SYS_SW_RES (VA_MISC_BASE + 0x204)
void spear_restart(char mode, const char *cmd)
{
if (mode == 's') {
@@ -23,6 +24,10 @@ void spear_restart(char mode, const char *cmd)
soft_restart(0);
} else {
/* hardware reset, Use on-chip reset capability */
+#ifdef CONFIG_ARCH_SPEAR13XX
+ writel_relaxed(0x01, SPEAR13XX_SYS_SW_RES);
+#else
sysctl_soft_reset((void __iomem *)VA_SPEAR_SYS_CTRL_BASE);
+#endif
}
}
diff --git a/arch/arm/plat-spear/time.c b/arch/arm/plat-spear/time.c
index a3164d1647fd..03321af5de9f 100644
--- a/arch/arm/plat-spear/time.c
+++ b/arch/arm/plat-spear/time.c
@@ -18,6 +18,8 @@
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
#include <linux/time.h>
#include <linux/irq.h>
#include <asm/mach/time.h>
@@ -197,19 +199,32 @@ static void __init spear_clockevent_init(int irq)
setup_irq(irq, &spear_timer_irq);
}
-void __init spear_setup_timer(resource_size_t base, int irq)
+const static struct of_device_id timer_of_match[] __initconst = {
+ { .compatible = "st,spear-timer", },
+ { },
+};
+
+void __init spear_setup_of_timer(void)
{
- int ret;
+ struct device_node *np;
+ int irq, ret;
+
+ np = of_find_matching_node(NULL, timer_of_match);
+ if (!np) {
+ pr_err("%s: No timer passed via DT\n", __func__);
+ return;
+ }
- if (!request_mem_region(base, SZ_1K, "gpt0")) {
- pr_err("%s:cannot get IO addr\n", __func__);
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ pr_err("%s: No irq passed for timer via DT\n", __func__);
return;
}
- gpt_base = ioremap(base, SZ_1K);
+ gpt_base = of_iomap(np, 0);
if (!gpt_base) {
- pr_err("%s:ioremap failed for gpt\n", __func__);
- goto err_mem;
+ pr_err("%s: of iomap failed\n", __func__);
+ return;
}
gpt_clk = clk_get_sys("gpt0", NULL);
@@ -218,10 +233,10 @@ void __init spear_setup_timer(resource_size_t base, int irq)
goto err_iomap;
}
- ret = clk_enable(gpt_clk);
+ ret = clk_prepare_enable(gpt_clk);
if (ret < 0) {
- pr_err("%s:couldn't enable gpt clock\n", __func__);
- goto err_clk;
+ pr_err("%s:couldn't prepare-enable gpt clock\n", __func__);
+ goto err_prepare_enable_clk;
}
spear_clockevent_init(irq);
@@ -229,10 +244,8 @@ void __init spear_setup_timer(resource_size_t base, int irq)
return;
-err_clk:
+err_prepare_enable_clk:
clk_put(gpt_clk);
err_iomap:
iounmap(gpt_base);
-err_mem:
- release_mem_region(base, SZ_1K);
}
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 3dea7231f637..71d38c76726c 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -11,7 +11,9 @@ config AVR32
select GENERIC_ATOMIC64
select HARDIRQS_SW_RESEND
select GENERIC_IRQ_SHOW
+ select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ select GENERIC_CLOCKEVENTS
help
AVR32 is a high-performance 32-bit RISC microprocessor core,
designed for cost-sensitive embedded applications, with particular
@@ -35,9 +37,6 @@ config TRACE_IRQFLAGS_SUPPORT
config RWSEM_GENERIC_SPINLOCK
def_bool y
-config GENERIC_CLOCKEVENTS
- def_bool y
-
config RWSEM_XCHGADD_ALGORITHM
def_bool n
@@ -63,8 +62,6 @@ source "kernel/Kconfig.freezer"
menu "System Type and features"
-source "kernel/time/Kconfig"
-
config SUBARCH_AVR32B
bool
config MMU
diff --git a/arch/avr32/include/asm/kvm_para.h b/arch/avr32/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/avr32/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 79cfe2614bcc..fef96f47876c 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -31,6 +31,7 @@ config BLACKFIN
select HAVE_KERNEL_LZO if RAMKERNEL
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
+ select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_GENERIC_HARDIRQS
select GENERIC_ATOMIC64
@@ -38,6 +39,7 @@ config BLACKFIN
select IRQ_PER_CPU if SMP
select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
select GENERIC_SMP_IDLE_THREAD
+ select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
config GENERIC_CSUM
def_bool y
@@ -642,9 +644,10 @@ comment "Kernel Timer/Scheduler"
source kernel/Kconfig.hz
-config GENERIC_CLOCKEVENTS
+config SET_GENERIC_CLOCKEVENTS
bool "Generic clock events"
default y
+ select GENERIC_CLOCKEVENTS
menu "Clock event device"
depends on GENERIC_CLOCKEVENTS
@@ -678,12 +681,6 @@ config GPTMR0_CLOCKSOURCE
depends on !TICKSOURCE_GPTMR0
endmenu
-config ARCH_USES_GETTIMEOFFSET
- depends on !GENERIC_CLOCKEVENTS
- def_bool y
-
-source kernel/time/Kconfig
-
comment "Misc"
choice
diff --git a/arch/blackfin/include/asm/kvm_para.h b/arch/blackfin/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/blackfin/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c
index 44bbf2f564cb..f7f7a18abca9 100644
--- a/arch/blackfin/kernel/trace.c
+++ b/arch/blackfin/kernel/trace.c
@@ -10,6 +10,8 @@
#include <linux/hardirq.h>
#include <linux/thread_info.h>
#include <linux/mm.h>
+#include <linux/oom.h>
+#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
@@ -27,8 +29,7 @@ void decode_address(char *buf, unsigned long address)
{
struct task_struct *p;
struct mm_struct *mm;
- unsigned long flags, offset;
- unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
+ unsigned long offset;
struct rb_node *n;
#ifdef CONFIG_KALLSYMS
@@ -112,17 +113,17 @@ void decode_address(char *buf, unsigned long address)
* mappings of all our processes and see if we can't be a whee
* bit more specific
*/
- write_lock_irqsave(&tasklist_lock, flags);
+ read_lock(&tasklist_lock);
for_each_process(p) {
- mm = (in_atomic ? p->mm : get_task_mm(p));
- if (!mm)
- continue;
+ struct task_struct *t;
- if (!down_read_trylock(&mm->mmap_sem)) {
- if (!in_atomic)
- mmput(mm);
+ t = find_lock_task_mm(p);
+ if (!t)
continue;
- }
+
+ mm = t->mm;
+ if (!down_read_trylock(&mm->mmap_sem))
+ goto __continue;
for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
struct vm_area_struct *vma;
@@ -131,7 +132,7 @@ void decode_address(char *buf, unsigned long address)
if (address >= vma->vm_start && address < vma->vm_end) {
char _tmpbuf[256];
- char *name = p->comm;
+ char *name = t->comm;
struct file *file = vma->vm_file;
if (file) {
@@ -164,8 +165,7 @@ void decode_address(char *buf, unsigned long address)
name, vma->vm_start, vma->vm_end);
up_read(&mm->mmap_sem);
- if (!in_atomic)
- mmput(mm);
+ task_unlock(t);
if (buf[0] == '\0')
sprintf(buf, "[ %s ] dynamic memory", name);
@@ -175,8 +175,8 @@ void decode_address(char *buf, unsigned long address)
}
up_read(&mm->mmap_sem);
- if (!in_atomic)
- mmput(mm);
+__continue:
+ task_unlock(t);
}
/*
@@ -186,7 +186,7 @@ void decode_address(char *buf, unsigned long address)
sprintf(buf, "/* kernel dynamic memory */");
done:
- write_unlock_irqrestore(&tasklist_lock, flags);
+ read_unlock(&tasklist_lock);
}
#define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 1f15b88b537f..052f81a76239 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -15,6 +15,7 @@ config C6X
select IRQ_DOMAIN
select OF
select OF_EARLY_FLATTREE
+ select GENERIC_CLOCKEVENTS
config MMU
def_bool n
@@ -31,12 +32,6 @@ config GENERIC_CALIBRATE_DELAY
config GENERIC_HWEIGHT
def_bool y
-config GENERIC_CLOCKEVENTS
- def_bool y
-
-config GENERIC_CLOCKEVENTS_BROADCAST
- bool
-
config GENERIC_BUG
def_bool y
@@ -125,7 +120,6 @@ source "mm/Kconfig"
source "kernel/Kconfig.preempt"
source "kernel/Kconfig.hz"
-source "kernel/time/Kconfig"
endmenu
diff --git a/arch/c6x/include/asm/kvm_para.h b/arch/c6x/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/c6x/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 2995035812ec..bb344650a14f 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -13,12 +13,6 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
bool
-config GENERIC_CMOS_UPDATE
- def_bool y
-
-config ARCH_USES_GETTIMEOFFSET
- def_bool n
-
config ARCH_HAS_ILOG2_U32
bool
default n
@@ -46,10 +40,12 @@ config CRIS
bool
default y
select HAVE_IDE
+ select GENERIC_ATOMIC64
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
select GENERIC_IOMAP
select GENERIC_SMP_IDLE_THREAD if ETRAX_ARCH_V32
+ select GENERIC_CMOS_UPDATE
config HZ
int
diff --git a/arch/cris/arch-v10/drivers/ds1302.c b/arch/cris/arch-v10/drivers/ds1302.c
deleted file mode 100644
index 74f99c688c8d..000000000000
--- a/arch/cris/arch-v10/drivers/ds1302.c
+++ /dev/null
@@ -1,515 +0,0 @@
-/*!***************************************************************************
-*!
-*! FILE NAME : ds1302.c
-*!
-*! DESCRIPTION: Implements an interface for the DS1302 RTC through Etrax I/O
-*!
-*! Functions exported: ds1302_readreg, ds1302_writereg, ds1302_init
-*!
-*! ---------------------------------------------------------------------------
-*!
-*! (C) Copyright 1999-2007 Axis Communications AB, LUND, SWEDEN
-*!
-*!***************************************************************************/
-
-
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/miscdevice.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/bcd.h>
-#include <linux/capability.h>
-
-#include <asm/uaccess.h>
-#include <arch/svinto.h>
-#include <asm/io.h>
-#include <asm/rtc.h>
-#include <arch/io_interface_mux.h>
-
-#include "i2c.h"
-
-#define RTC_MAJOR_NR 121 /* local major, change later */
-
-static DEFINE_MUTEX(ds1302_mutex);
-static const char ds1302_name[] = "ds1302";
-
-/* The DS1302 might be connected to different bits on different products.
- * It has three signals - SDA, SCL and RST. RST and SCL are always outputs,
- * but SDA can have a selected direction.
- * For now, only PORT_PB is hardcoded.
- */
-
-/* The RST bit may be on either the Generic Port or Port PB. */
-#ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT
-#define TK_RST_OUT(x) REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x)
-#define TK_RST_DIR(x)
-#else
-#define TK_RST_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x)
-#define TK_RST_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x)
-#endif
-
-
-#define TK_SDA_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_SDABIT, x)
-#define TK_SCL_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_SCLBIT, x)
-
-#define TK_SDA_IN() ((*R_PORT_PB_READ >> CONFIG_ETRAX_DS1302_SDABIT) & 1)
-/* 1 is out, 0 is in */
-#define TK_SDA_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, CONFIG_ETRAX_DS1302_SDABIT, x)
-#define TK_SCL_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, CONFIG_ETRAX_DS1302_SCLBIT, x)
-
-
-/*
- * The reason for tempudelay and not udelay is that loops_per_usec
- * (used in udelay) is not set when functions here are called from time.c
- */
-
-static void tempudelay(int usecs)
-{
- volatile int loops;
-
- for(loops = usecs * 12; loops > 0; loops--)
- /* nothing */;
-}
-
-
-/* Send 8 bits. */
-static void
-out_byte(unsigned char x)
-{
- int i;
- TK_SDA_DIR(1);
- for (i = 8; i--;) {
- /* The chip latches incoming bits on the rising edge of SCL. */
- TK_SCL_OUT(0);
- TK_SDA_OUT(x & 1);
- tempudelay(1);
- TK_SCL_OUT(1);
- tempudelay(1);
- x >>= 1;
- }
- TK_SDA_DIR(0);
-}
-
-static unsigned char
-in_byte(void)
-{
- unsigned char x = 0;
- int i;
-
- /* Read byte. Bits come LSB first, on the falling edge of SCL.
- * Assume SDA is in input direction already.
- */
- TK_SDA_DIR(0);
-
- for (i = 8; i--;) {
- TK_SCL_OUT(0);
- tempudelay(1);
- x >>= 1;
- x |= (TK_SDA_IN() << 7);
- TK_SCL_OUT(1);
- tempudelay(1);
- }
-
- return x;
-}
-
-/* Prepares for a transaction by de-activating RST (active-low). */
-
-static void
-start(void)
-{
- TK_SCL_OUT(0);
- tempudelay(1);
- TK_RST_OUT(0);
- tempudelay(5);
- TK_RST_OUT(1);
-}
-
-/* Ends a transaction by taking RST active again. */
-
-static void
-stop(void)
-{
- tempudelay(2);
- TK_RST_OUT(0);
-}
-
-/* Enable writing. */
-
-static void
-ds1302_wenable(void)
-{
- start();
- out_byte(0x8e); /* Write control register */
- out_byte(0x00); /* Disable write protect bit 7 = 0 */
- stop();
-}
-
-/* Disable writing. */
-
-static void
-ds1302_wdisable(void)
-{
- start();
- out_byte(0x8e); /* Write control register */
- out_byte(0x80); /* Disable write protect bit 7 = 0 */
- stop();
-}
-
-
-
-/* Read a byte from the selected register in the DS1302. */
-
-unsigned char
-ds1302_readreg(int reg)
-{
- unsigned char x;
-
- start();
- out_byte(0x81 | (reg << 1)); /* read register */
- x = in_byte();
- stop();
-
- return x;
-}
-
-/* Write a byte to the selected register. */
-
-void
-ds1302_writereg(int reg, unsigned char val)
-{
-#ifndef CONFIG_ETRAX_RTC_READONLY
- int do_writereg = 1;
-#else
- int do_writereg = 0;
-
- if (reg == RTC_TRICKLECHARGER)
- do_writereg = 1;
-#endif
-
- if (do_writereg) {
- ds1302_wenable();
- start();
- out_byte(0x80 | (reg << 1)); /* write register */
- out_byte(val);
- stop();
- ds1302_wdisable();
- }
-}
-
-void
-get_rtc_time(struct rtc_time *rtc_tm)
-{
- unsigned long flags;
-
- local_irq_save(flags);
-
- rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS);
- rtc_tm->tm_min = CMOS_READ(RTC_MINUTES);
- rtc_tm->tm_hour = CMOS_READ(RTC_HOURS);
- rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
- rtc_tm->tm_mon = CMOS_READ(RTC_MONTH);
- rtc_tm->tm_year = CMOS_READ(RTC_YEAR);
-
- local_irq_restore(flags);
-
- rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
- rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
- rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
- rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
- rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
- rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
-
- /*
- * Account for differences between how the RTC uses the values
- * and how they are defined in a struct rtc_time;
- */
-
- if (rtc_tm->tm_year <= 69)
- rtc_tm->tm_year += 100;
-
- rtc_tm->tm_mon--;
-}
-
-static unsigned char days_in_mo[] =
- {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
-
-/* ioctl that supports RTC_RD_TIME and RTC_SET_TIME (read and set time/date). */
-
-static int rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- unsigned long flags;
-
- switch(cmd) {
- case RTC_RD_TIME: /* read the time/date from RTC */
- {
- struct rtc_time rtc_tm;
-
- memset(&rtc_tm, 0, sizeof (struct rtc_time));
- get_rtc_time(&rtc_tm);
- if (copy_to_user((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time)))
- return -EFAULT;
- return 0;
- }
-
- case RTC_SET_TIME: /* set the RTC */
- {
- struct rtc_time rtc_tm;
- unsigned char mon, day, hrs, min, sec, leap_yr;
- unsigned int yrs;
-
- if (!capable(CAP_SYS_TIME))
- return -EPERM;
-
- if (copy_from_user(&rtc_tm, (struct rtc_time*)arg, sizeof(struct rtc_time)))
- return -EFAULT;
-
- yrs = rtc_tm.tm_year + 1900;
- mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */
- day = rtc_tm.tm_mday;
- hrs = rtc_tm.tm_hour;
- min = rtc_tm.tm_min;
- sec = rtc_tm.tm_sec;
-
-
- if ((yrs < 1970) || (yrs > 2069))
- return -EINVAL;
-
- leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
-
- if ((mon > 12) || (day == 0))
- return -EINVAL;
-
- if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
- return -EINVAL;
-
- if ((hrs >= 24) || (min >= 60) || (sec >= 60))
- return -EINVAL;
-
- if (yrs >= 2000)
- yrs -= 2000; /* RTC (0, 1, ... 69) */
- else
- yrs -= 1900; /* RTC (70, 71, ... 99) */
-
- sec = bin2bcd(sec);
- min = bin2bcd(min);
- hrs = bin2bcd(hrs);
- day = bin2bcd(day);
- mon = bin2bcd(mon);
- yrs = bin2bcd(yrs);
-
- local_irq_save(flags);
- CMOS_WRITE(yrs, RTC_YEAR);
- CMOS_WRITE(mon, RTC_MONTH);
- CMOS_WRITE(day, RTC_DAY_OF_MONTH);
- CMOS_WRITE(hrs, RTC_HOURS);
- CMOS_WRITE(min, RTC_MINUTES);
- CMOS_WRITE(sec, RTC_SECONDS);
- local_irq_restore(flags);
-
- /* Notice that at this point, the RTC is updated but
- * the kernel is still running with the old time.
- * You need to set that separately with settimeofday
- * or adjtimex.
- */
- return 0;
- }
-
- case RTC_SET_CHARGE: /* set the RTC TRICKLE CHARGE register */
- {
- int tcs_val;
-
- if (!capable(CAP_SYS_TIME))
- return -EPERM;
-
- if(copy_from_user(&tcs_val, (int*)arg, sizeof(int)))
- return -EFAULT;
-
- tcs_val = RTC_TCR_PATTERN | (tcs_val & 0x0F);
- ds1302_writereg(RTC_TRICKLECHARGER, tcs_val);
- return 0;
- }
- case RTC_VL_READ:
- {
- /* TODO:
- * Implement voltage low detection support
- */
- printk(KERN_WARNING "DS1302: RTC Voltage Low detection"
- " is not supported\n");
- return 0;
- }
- case RTC_VL_CLR:
- {
- /* TODO:
- * Nothing to do since Voltage Low detection is not supported
- */
- return 0;
- }
- default:
- return -ENOIOCTLCMD;
- }
-}
-
-static long rtc_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&ds1302_mutex);
- ret = rtc_ioctl(file, cmd, arg);
- mutex_unlock(&ds1302_mutex);
-
- return ret;
-}
-
-static void
-print_rtc_status(void)
-{
- struct rtc_time tm;
-
- get_rtc_time(&tm);
-
- /*
- * There is no way to tell if the luser has the RTC set for local
- * time or for Universal Standard Time (GMT). Probably local though.
- */
-
- printk(KERN_INFO "rtc_time\t: %02d:%02d:%02d\n",
- tm.tm_hour, tm.tm_min, tm.tm_sec);
- printk(KERN_INFO "rtc_date\t: %04d-%02d-%02d\n",
- tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday);
-}
-
-/* The various file operations we support. */
-
-static const struct file_operations rtc_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = rtc_unlocked_ioctl,
- .llseek = noop_llseek,
-};
-
-/* Probe for the chip by writing something to its RAM and try reading it back. */
-
-#define MAGIC_PATTERN 0x42
-
-static int __init
-ds1302_probe(void)
-{
- int retval, res;
-
- TK_RST_DIR(1);
- TK_SCL_DIR(1);
- TK_SDA_DIR(0);
-
- /* Try to talk to timekeeper. */
-
- ds1302_wenable();
- start();
- out_byte(0xc0); /* write RAM byte 0 */
- out_byte(MAGIC_PATTERN); /* write something magic */
- start();
- out_byte(0xc1); /* read RAM byte 0 */
-
- if((res = in_byte()) == MAGIC_PATTERN) {
- stop();
- ds1302_wdisable();
- printk(KERN_INFO "%s: RTC found.\n", ds1302_name);
- printk(KERN_INFO "%s: SDA, SCL, RST on PB%i, PB%i, %s%i\n",
- ds1302_name,
- CONFIG_ETRAX_DS1302_SDABIT,
- CONFIG_ETRAX_DS1302_SCLBIT,
-#ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT
- "GENIO",
-#else
- "PB",
-#endif
- CONFIG_ETRAX_DS1302_RSTBIT);
- print_rtc_status();
- retval = 1;
- } else {
- stop();
- retval = 0;
- }
-
- return retval;
-}
-
-
-/* Just probe for the RTC and register the device to handle the ioctl needed. */
-
-int __init
-ds1302_init(void)
-{
-#ifdef CONFIG_ETRAX_I2C
- i2c_init();
-#endif
-
- if (!ds1302_probe()) {
-#ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT
-#if CONFIG_ETRAX_DS1302_RSTBIT == 27
- /*
- * The only way to set g27 to output is to enable ATA.
- *
- * Make sure that R_GEN_CONFIG is setup correct.
- */
- /* Allocating the ATA interface will grab almost all
- * pins in I/O groups a, b, c and d. A consequence of
- * allocating the ATA interface is that the fixed
- * interfaces shared RAM, parallel port 0, parallel
- * port 1, parallel port W, SCSI-8 port 0, SCSI-8 port
- * 1, SCSI-W, serial port 2, serial port 3,
- * synchronous serial port 3 and USB port 2 and almost
- * all GPIO pins on port g cannot be used.
- */
- if (cris_request_io_interface(if_ata, "ds1302/ATA")) {
- printk(KERN_WARNING "ds1302: Failed to get IO interface\n");
- return -1;
- }
-
-#elif CONFIG_ETRAX_DS1302_RSTBIT == 0
- if (cris_io_interface_allocate_pins(if_gpio_grp_a,
- 'g',
- CONFIG_ETRAX_DS1302_RSTBIT,
- CONFIG_ETRAX_DS1302_RSTBIT)) {
- printk(KERN_WARNING "ds1302: Failed to get IO interface\n");
- return -1;
- }
-
- /* Set the direction of this bit to out. */
- genconfig_shadow = ((genconfig_shadow &
- ~IO_MASK(R_GEN_CONFIG, g0dir)) |
- (IO_STATE(R_GEN_CONFIG, g0dir, out)));
- *R_GEN_CONFIG = genconfig_shadow;
-#endif
- if (!ds1302_probe()) {
- printk(KERN_WARNING "%s: RTC not found.\n", ds1302_name);
- return -1;
- }
-#else
- printk(KERN_WARNING "%s: RTC not found.\n", ds1302_name);
- return -1;
-#endif
- }
- /* Initialise trickle charger */
- ds1302_writereg(RTC_TRICKLECHARGER,
- RTC_TCR_PATTERN |(CONFIG_ETRAX_DS1302_TRICKLE_CHARGE & 0x0F));
- /* Start clock by resetting CLOCK_HALT */
- ds1302_writereg(RTC_SECONDS, (ds1302_readreg(RTC_SECONDS) & 0x7F));
- return 0;
-}
-
-static int __init ds1302_register(void)
-{
- ds1302_init();
- if (register_chrdev(RTC_MAJOR_NR, ds1302_name, &rtc_fops)) {
- printk(KERN_INFO "%s: unable to get major %d for rtc\n",
- ds1302_name, RTC_MAJOR_NR);
- return -1;
- }
- return 0;
-
-}
-
-module_init(ds1302_register);
diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c
deleted file mode 100644
index 9da056860c92..000000000000
--- a/arch/cris/arch-v10/drivers/pcf8563.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * PCF8563 RTC
- *
- * From Phillips' datasheet:
- *
- * The PCF8563 is a CMOS real-time clock/calendar optimized for low power
- * consumption. A programmable clock output, interrupt output and voltage
- * low detector are also provided. All address and data are transferred
- * serially via two-line bidirectional I2C-bus. Maximum bus speed is
- * 400 kbits/s. The built-in word address register is incremented
- * automatically after each written or read byte.
- *
- * Copyright (c) 2002-2007, Axis Communications AB
- * All rights reserved.
- *
- * Author: Tobias Anderberg <tobiasa@axis.com>.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/ioctl.h>
-#include <linux/delay.h>
-#include <linux/bcd.h>
-#include <linux/mutex.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/rtc.h>
-
-#include "i2c.h"
-
-#define PCF8563_MAJOR 121 /* Local major number. */
-#define DEVICE_NAME "rtc" /* Name which is registered in /proc/devices. */
-#define PCF8563_NAME "PCF8563"
-#define DRIVER_VERSION "$Revision: 1.24 $"
-
-/* I2C bus slave registers. */
-#define RTC_I2C_READ 0xa3
-#define RTC_I2C_WRITE 0xa2
-
-/* Two simple wrapper macros, saves a few keystrokes. */
-#define rtc_read(x) i2c_readreg(RTC_I2C_READ, x)
-#define rtc_write(x,y) i2c_writereg(RTC_I2C_WRITE, x, y)
-
-static DEFINE_MUTEX(pcf8563_mutex);
-static DEFINE_MUTEX(rtc_lock); /* Protect state etc */
-
-static const unsigned char days_in_month[] =
- { 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
-
-static long pcf8563_unlocked_ioctl(struct file *, unsigned int, unsigned long);
-
-/* Cache VL bit value read at driver init since writing the RTC_SECOND
- * register clears the VL status.
- */
-static int voltage_low;
-
-static const struct file_operations pcf8563_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = pcf8563_unlocked_ioctl,
- .llseek = noop_llseek,
-};
-
-unsigned char
-pcf8563_readreg(int reg)
-{
- unsigned char res = rtc_read(reg);
-
- /* The PCF8563 does not return 0 for unimplemented bits. */
- switch (reg) {
- case RTC_SECONDS:
- case RTC_MINUTES:
- res &= 0x7F;
- break;
- case RTC_HOURS:
- case RTC_DAY_OF_MONTH:
- res &= 0x3F;
- break;
- case RTC_WEEKDAY:
- res &= 0x07;
- break;
- case RTC_MONTH:
- res &= 0x1F;
- break;
- case RTC_CONTROL1:
- res &= 0xA8;
- break;
- case RTC_CONTROL2:
- res &= 0x1F;
- break;
- case RTC_CLOCKOUT_FREQ:
- case RTC_TIMER_CONTROL:
- res &= 0x83;
- break;
- }
- return res;
-}
-
-void
-pcf8563_writereg(int reg, unsigned char val)
-{
- rtc_write(reg, val);
-}
-
-void
-get_rtc_time(struct rtc_time *tm)
-{
- tm->tm_sec = rtc_read(RTC_SECONDS);
- tm->tm_min = rtc_read(RTC_MINUTES);
- tm->tm_hour = rtc_read(RTC_HOURS);
- tm->tm_mday = rtc_read(RTC_DAY_OF_MONTH);
- tm->tm_wday = rtc_read(RTC_WEEKDAY);
- tm->tm_mon = rtc_read(RTC_MONTH);
- tm->tm_year = rtc_read(RTC_YEAR);
-
- if (tm->tm_sec & 0x80) {
- printk(KERN_ERR "%s: RTC Voltage Low - reliable date/time "
- "information is no longer guaranteed!\n", PCF8563_NAME);
- }
-
- tm->tm_year = bcd2bin(tm->tm_year) +
- ((tm->tm_mon & 0x80) ? 100 : 0);
- tm->tm_sec &= 0x7F;
- tm->tm_min &= 0x7F;
- tm->tm_hour &= 0x3F;
- tm->tm_mday &= 0x3F;
- tm->tm_wday &= 0x07; /* Not coded in BCD. */
- tm->tm_mon &= 0x1F;
-
- tm->tm_sec = bcd2bin(tm->tm_sec);
- tm->tm_min = bcd2bin(tm->tm_min);
- tm->tm_hour = bcd2bin(tm->tm_hour);
- tm->tm_mday = bcd2bin(tm->tm_mday);
- tm->tm_mon = bcd2bin(tm->tm_mon);
- tm->tm_mon--; /* Month is 1..12 in RTC but 0..11 in linux */
-}
-
-int __init
-pcf8563_init(void)
-{
- static int res;
- static int first = 1;
-
- if (!first)
- return res;
- first = 0;
-
- /* Initiate the i2c protocol. */
- res = i2c_init();
- if (res < 0) {
- printk(KERN_CRIT "pcf8563_init: Failed to init i2c.\n");
- return res;
- }
-
- /*
- * First of all we need to reset the chip. This is done by
- * clearing control1, control2 and clk freq and resetting
- * all alarms.
- */
- if (rtc_write(RTC_CONTROL1, 0x00) < 0)
- goto err;
-
- if (rtc_write(RTC_CONTROL2, 0x00) < 0)
- goto err;
-
- if (rtc_write(RTC_CLOCKOUT_FREQ, 0x00) < 0)
- goto err;
-
- if (rtc_write(RTC_TIMER_CONTROL, 0x03) < 0)
- goto err;
-
- /* Reset the alarms. */
- if (rtc_write(RTC_MINUTE_ALARM, 0x80) < 0)
- goto err;
-
- if (rtc_write(RTC_HOUR_ALARM, 0x80) < 0)
- goto err;
-
- if (rtc_write(RTC_DAY_ALARM, 0x80) < 0)
- goto err;
-
- if (rtc_write(RTC_WEEKDAY_ALARM, 0x80) < 0)
- goto err;
-
- /* Check for low voltage, and warn about it. */
- if (rtc_read(RTC_SECONDS) & 0x80) {
- voltage_low = 1;
- printk(KERN_WARNING "%s: RTC Voltage Low - reliable "
- "date/time information is no longer guaranteed!\n",
- PCF8563_NAME);
- }
-
- return res;
-
-err:
- printk(KERN_INFO "%s: Error initializing chip.\n", PCF8563_NAME);
- res = -1;
- return res;
-}
-
-void __exit
-pcf8563_exit(void)
-{
- unregister_chrdev(PCF8563_MAJOR, DEVICE_NAME);
-}
-
-/*
- * ioctl calls for this driver. Why return -ENOTTY upon error? Because
- * POSIX says so!
- */
-static int pcf8563_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- /* Some sanity checks. */
- if (_IOC_TYPE(cmd) != RTC_MAGIC)
- return -ENOTTY;
-
- if (_IOC_NR(cmd) > RTC_MAX_IOCTL)
- return -ENOTTY;
-
- switch (cmd) {
- case RTC_RD_TIME:
- {
- struct rtc_time tm;
-
- mutex_lock(&rtc_lock);
- memset(&tm, 0, sizeof tm);
- get_rtc_time(&tm);
-
- if (copy_to_user((struct rtc_time *) arg, &tm,
- sizeof tm)) {
- mutex_unlock(&rtc_lock);
- return -EFAULT;
- }
-
- mutex_unlock(&rtc_lock);
-
- return 0;
- }
- case RTC_SET_TIME:
- {
- int leap;
- int year;
- int century;
- struct rtc_time tm;
-
- memset(&tm, 0, sizeof tm);
- if (!capable(CAP_SYS_TIME))
- return -EPERM;
-
- if (copy_from_user(&tm, (struct rtc_time *) arg, sizeof tm))
- return -EFAULT;
-
- /* Convert from struct tm to struct rtc_time. */
- tm.tm_year += 1900;
- tm.tm_mon += 1;
-
- /*
- * Check if tm.tm_year is a leap year. A year is a leap
- * year if it is divisible by 4 but not 100, except
- * that years divisible by 400 _are_ leap years.
- */
- year = tm.tm_year;
- leap = (tm.tm_mon == 2) &&
- ((year % 4 == 0 && year % 100 != 0) || year % 400 == 0);
-
- /* Perform some sanity checks. */
- if ((tm.tm_year < 1970) ||
- (tm.tm_mon > 12) ||
- (tm.tm_mday == 0) ||
- (tm.tm_mday > days_in_month[tm.tm_mon] + leap) ||
- (tm.tm_wday >= 7) ||
- (tm.tm_hour >= 24) ||
- (tm.tm_min >= 60) ||
- (tm.tm_sec >= 60))
- return -EINVAL;
-
- century = (tm.tm_year >= 2000) ? 0x80 : 0;
- tm.tm_year = tm.tm_year % 100;
-
- tm.tm_year = bin2bcd(tm.tm_year);
- tm.tm_mon = bin2bcd(tm.tm_mon);
- tm.tm_mday = bin2bcd(tm.tm_mday);
- tm.tm_hour = bin2bcd(tm.tm_hour);
- tm.tm_min = bin2bcd(tm.tm_min);
- tm.tm_sec = bin2bcd(tm.tm_sec);
- tm.tm_mon |= century;
-
- mutex_lock(&rtc_lock);
-
- rtc_write(RTC_YEAR, tm.tm_year);
- rtc_write(RTC_MONTH, tm.tm_mon);
- rtc_write(RTC_WEEKDAY, tm.tm_wday); /* Not coded in BCD. */
- rtc_write(RTC_DAY_OF_MONTH, tm.tm_mday);
- rtc_write(RTC_HOURS, tm.tm_hour);
- rtc_write(RTC_MINUTES, tm.tm_min);
- rtc_write(RTC_SECONDS, tm.tm_sec);
-
- mutex_unlock(&rtc_lock);
-
- return 0;
- }
- case RTC_VL_READ:
- if (voltage_low) {
- printk(KERN_ERR "%s: RTC Voltage Low - "
- "reliable date/time information is no "
- "longer guaranteed!\n", PCF8563_NAME);
- }
-
- if (copy_to_user((int *) arg, &voltage_low, sizeof(int)))
- return -EFAULT;
- return 0;
-
- case RTC_VL_CLR:
- {
- /* Clear the VL bit in the seconds register in case
- * the time has not been set already (which would
- * have cleared it). This does not really matter
- * because of the cached voltage_low value but do it
- * anyway for consistency. */
-
- int ret = rtc_read(RTC_SECONDS);
-
- rtc_write(RTC_SECONDS, (ret & 0x7F));
-
- /* Clear the cached value. */
- voltage_low = 0;
-
- return 0;
- }
- default:
- return -ENOTTY;
- }
-
- return 0;
-}
-
-static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&pcf8563_mutex);
- ret = pcf8563_ioctl(filp, cmd, arg);
- mutex_unlock(&pcf8563_mutex);
-
- return ret;
-}
-
-static int __init pcf8563_register(void)
-{
- if (pcf8563_init() < 0) {
- printk(KERN_INFO "%s: Unable to initialize Real-Time Clock "
- "Driver, %s\n", PCF8563_NAME, DRIVER_VERSION);
- return -1;
- }
-
- if (register_chrdev(PCF8563_MAJOR, DEVICE_NAME, &pcf8563_fops) < 0) {
- printk(KERN_INFO "%s: Unable to get major number %d for RTC device.\n",
- PCF8563_NAME, PCF8563_MAJOR);
- return -1;
- }
-
- printk(KERN_INFO "%s Real-Time Clock Driver, %s\n", PCF8563_NAME,
- DRIVER_VERSION);
-
- /* Check for low voltage, and warn about it. */
- if (voltage_low) {
- printk(KERN_WARNING "%s: RTC Voltage Low - reliable date/time "
- "information is no longer guaranteed!\n", PCF8563_NAME);
- }
-
- return 0;
-}
-
-module_init(pcf8563_register);
-module_exit(pcf8563_exit);
diff --git a/arch/cris/arch-v10/kernel/fasttimer.c b/arch/cris/arch-v10/kernel/fasttimer.c
index 8a8196ee8ce8..082f1890bacb 100644
--- a/arch/cris/arch-v10/kernel/fasttimer.c
+++ b/arch/cris/arch-v10/kernel/fasttimer.c
@@ -21,8 +21,6 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/delay.h>
-#include <asm/rtc.h>
-
#include <arch/svinto.h>
#include <asm/fasttimer.h>
diff --git a/arch/cris/arch-v10/kernel/kgdb.c b/arch/cris/arch-v10/kernel/kgdb.c
index b579dd02e098..37e6d2c50b76 100644
--- a/arch/cris/arch-v10/kernel/kgdb.c
+++ b/arch/cris/arch-v10/kernel/kgdb.c
@@ -264,7 +264,7 @@ static int write_register (int regno, char *val);
/* Write a value to a specified register in the stack of a thread other
than the current thread. */
-static write_stack_register (int thread_id, int regno, char *valptr);
+static int write_stack_register(int thread_id, int regno, char *valptr);
/* Read a value from a specified register in the register image. Returns the
status of the read operation. The register value is returned in valptr. */
diff --git a/arch/cris/arch-v10/kernel/time.c b/arch/cris/arch-v10/kernel/time.c
index 20c85b5dc7d0..bcffcb6a9415 100644
--- a/arch/cris/arch-v10/kernel/time.c
+++ b/arch/cris/arch-v10/kernel/time.c
@@ -19,16 +19,12 @@
#include <asm/signal.h>
#include <asm/io.h>
#include <asm/delay.h>
-#include <asm/rtc.h>
#include <asm/irq_regs.h>
/* define this if you need to use print_timestamp */
/* it will make jiffies at 96 hz instead of 100 hz though */
#undef USE_CASCADE_TIMERS
-extern int set_rtc_mmss(unsigned long nowtime);
-extern int have_rtc;
-
unsigned long get_ns_in_jiffie(void)
{
unsigned char timer_count, t1;
@@ -203,11 +199,6 @@ time_init(void)
*/
loops_per_usec = 50;
- if(RTC_INIT() < 0)
- have_rtc = 0;
- else
- have_rtc = 1;
-
/* Setup the etrax timers
* Base frequency is 25000 hz, divider 250 -> 100 HZ
* In normal mode, we use timer0, so timer1 is free. In cascade
diff --git a/arch/cris/arch-v10/lib/Makefile b/arch/cris/arch-v10/lib/Makefile
index 36e9a9c5239b..725153edb764 100644
--- a/arch/cris/arch-v10/lib/Makefile
+++ b/arch/cris/arch-v10/lib/Makefile
@@ -2,8 +2,5 @@
# Makefile for Etrax-specific library files..
#
-
-EXTRA_AFLAGS := -traditional
-
lib-y = checksum.o checksumcopy.o string.o usercopy.o memset.o csumcpfruser.o
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index 642c6fed43d7..f8476d9e856b 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -1394,11 +1394,10 @@ static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char
if (padlen < MD5_MIN_PAD_LENGTH) padlen += MD5_BLOCK_LENGTH;
- p = kmalloc(padlen, alloc_flag);
+ p = kzalloc(padlen, alloc_flag);
if (!p) return -ENOMEM;
*p = 0x80;
- memset(p+1, 0, padlen - 1);
DEBUG(printk("create_md5_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
@@ -1426,11 +1425,10 @@ static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, cha
if (padlen < SHA1_MIN_PAD_LENGTH) padlen += SHA1_BLOCK_LENGTH;
- p = kmalloc(padlen, alloc_flag);
+ p = kzalloc(padlen, alloc_flag);
if (!p) return -ENOMEM;
*p = 0x80;
- memset(p+1, 0, padlen - 1);
DEBUG(printk("create_sha1_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index f7ad9e8637df..f085229cf870 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -114,8 +114,6 @@ void user_disable_single_step(struct task_struct *child)
void
ptrace_disable(struct task_struct *child)
{
- unsigned long tmp;
-
/* Deconfigure SPC and S-bit. */
user_disable_single_step(child);
put_reg(child, PT_SPC, 0);
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index 6773fc83a670..8c4b45efd7b6 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -18,7 +18,6 @@
#include <asm/signal.h>
#include <asm/io.h>
#include <asm/delay.h>
-#include <asm/rtc.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
@@ -67,7 +66,6 @@ unsigned long timer_regs[NR_CPUS] =
};
extern int set_rtc_mmss(unsigned long nowtime);
-extern int have_rtc;
#ifdef CONFIG_CPU_FREQ
static int
@@ -265,11 +263,6 @@ void __init time_init(void)
*/
loops_per_usec = 50;
- if(RTC_INIT() < 0)
- have_rtc = 0;
- else
- have_rtc = 1;
-
/* Start CPU local timer. */
cris_timer_init();
diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
index 1de779f4f240..7caf25d58e6b 100644
--- a/arch/cris/include/arch-v32/arch/cache.h
+++ b/arch/cris/include/arch-v32/arch/cache.h
@@ -7,7 +7,7 @@
#define L1_CACHE_BYTES 32
#define L1_CACHE_SHIFT 5
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
void flush_dma_list(dma_descr_data *descr);
void flush_dma_descr(dma_descr_data *descr, int flush_buf);
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index 956eea246b97..04d02a51c5e9 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -6,5 +6,4 @@ header-y += arch-v32/
header-y += ethernet.h
header-y += etraxgpio.h
header-y += rs485.h
-header-y += rtc.h
header-y += sync_serial.h
diff --git a/arch/cris/include/asm/posix_types.h b/arch/cris/include/asm/posix_types.h
index 72b3cd6eda0b..234891c74e2b 100644
--- a/arch/cris/include/asm/posix_types.h
+++ b/arch/cris/include/asm/posix_types.h
@@ -33,4 +33,6 @@ typedef int __kernel_ptrdiff_t;
typedef unsigned short __kernel_old_dev_t;
#define __kernel_old_dev_t __kernel_old_dev_t
+#include <asm-generic/posix_types.h>
+
#endif /* __ARCH_CRIS_POSIX_TYPES_H */
diff --git a/arch/cris/include/asm/rtc.h b/arch/cris/include/asm/rtc.h
deleted file mode 100644
index 17d3019529e1..000000000000
--- a/arch/cris/include/asm/rtc.h
+++ /dev/null
@@ -1,107 +0,0 @@
-
-#ifndef __RTC_H__
-#define __RTC_H__
-
-#ifdef CONFIG_ETRAX_DS1302
- /* Dallas DS1302 clock/calendar register numbers. */
-# define RTC_SECONDS 0
-# define RTC_MINUTES 1
-# define RTC_HOURS 2
-# define RTC_DAY_OF_MONTH 3
-# define RTC_MONTH 4
-# define RTC_WEEKDAY 5
-# define RTC_YEAR 6
-# define RTC_CONTROL 7
-
- /* Bits in CONTROL register. */
-# define RTC_CONTROL_WRITEPROTECT 0x80
-# define RTC_TRICKLECHARGER 8
-
- /* Bits in TRICKLECHARGER register TCS TCS TCS TCS DS DS RS RS. */
-# define RTC_TCR_PATTERN 0xA0 /* 1010xxxx */
-# define RTC_TCR_1DIOD 0x04 /* xxxx01xx */
-# define RTC_TCR_2DIOD 0x08 /* xxxx10xx */
-# define RTC_TCR_DISABLED 0x00 /* xxxxxx00 Disabled */
-# define RTC_TCR_2KOHM 0x01 /* xxxxxx01 2KOhm */
-# define RTC_TCR_4KOHM 0x02 /* xxxxxx10 4kOhm */
-# define RTC_TCR_8KOHM 0x03 /* xxxxxx11 8kOhm */
-
-#elif defined(CONFIG_ETRAX_PCF8563)
- /* I2C bus slave registers. */
-# define RTC_I2C_READ 0xa3
-# define RTC_I2C_WRITE 0xa2
-
- /* Phillips PCF8563 registers. */
-# define RTC_CONTROL1 0x00 /* Control/Status register 1. */
-# define RTC_CONTROL2 0x01 /* Control/Status register 2. */
-# define RTC_CLOCKOUT_FREQ 0x0d /* CLKOUT frequency. */
-# define RTC_TIMER_CONTROL 0x0e /* Timer control. */
-# define RTC_TIMER_CNTDOWN 0x0f /* Timer countdown. */
-
- /* BCD encoded clock registers. */
-# define RTC_SECONDS 0x02
-# define RTC_MINUTES 0x03
-# define RTC_HOURS 0x04
-# define RTC_DAY_OF_MONTH 0x05
-# define RTC_WEEKDAY 0x06 /* Not coded in BCD! */
-# define RTC_MONTH 0x07
-# define RTC_YEAR 0x08
-# define RTC_MINUTE_ALARM 0x09
-# define RTC_HOUR_ALARM 0x0a
-# define RTC_DAY_ALARM 0x0b
-# define RTC_WEEKDAY_ALARM 0x0c
-
-#endif
-
-#ifdef CONFIG_ETRAX_DS1302
-extern unsigned char ds1302_readreg(int reg);
-extern void ds1302_writereg(int reg, unsigned char val);
-extern int ds1302_init(void);
-# define CMOS_READ(x) ds1302_readreg(x)
-# define CMOS_WRITE(val,reg) ds1302_writereg(reg,val)
-# define RTC_INIT() ds1302_init()
-#elif defined(CONFIG_ETRAX_PCF8563)
-extern unsigned char pcf8563_readreg(int reg);
-extern void pcf8563_writereg(int reg, unsigned char val);
-extern int pcf8563_init(void);
-# define CMOS_READ(x) pcf8563_readreg(x)
-# define CMOS_WRITE(val,reg) pcf8563_writereg(reg,val)
-# define RTC_INIT() pcf8563_init()
-#else
- /* No RTC configured so we shouldn't try to access any. */
-# define CMOS_READ(x) 42
-# define CMOS_WRITE(x,y)
-# define RTC_INIT() (-1)
-#endif
-
-/*
- * The struct used to pass data via the following ioctl. Similar to the
- * struct tm in <time.h>, but it needs to be here so that the kernel
- * source is self contained, allowing cross-compiles, etc. etc.
- */
-struct rtc_time {
- int tm_sec;
- int tm_min;
- int tm_hour;
- int tm_mday;
- int tm_mon;
- int tm_year;
- int tm_wday;
- int tm_yday;
- int tm_isdst;
-};
-
-/* ioctl() calls that are permitted to the /dev/rtc interface. */
-#define RTC_MAGIC 'p'
-/* Read RTC time. */
-#define RTC_RD_TIME _IOR(RTC_MAGIC, 0x09, struct rtc_time)
-/* Set RTC time. */
-#define RTC_SET_TIME _IOW(RTC_MAGIC, 0x0a, struct rtc_time)
-#define RTC_SET_CHARGE _IOW(RTC_MAGIC, 0x0b, int)
-/* Voltage low detector */
-#define RTC_VL_READ _IOR(RTC_MAGIC, 0x13, int)
-/* Clear voltage low information */
-#define RTC_VL_CLR _IO(RTC_MAGIC, 0x14)
-#define RTC_MAX_IOCTL 0x14
-
-#endif /* __RTC_H__ */
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index 4e73092e85c0..277ffc459e4b 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -21,7 +21,6 @@
*
*/
-#include <asm/rtc.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/param.h>
@@ -32,7 +31,8 @@
#include <linux/profile.h>
#include <linux/sched.h> /* just for sched_clock() - funny that */
-int have_rtc; /* used to remember if we have an RTC or not */;
+
+#define D(x)
#define TICK_SIZE tick
@@ -50,78 +50,16 @@ u32 arch_gettimeoffset(void)
}
#endif
-/*
- * BUG: This routine does not handle hour overflow properly; it just
- * sets the minutes. Usually you'll only notice that after reboot!
- */
-
int set_rtc_mmss(unsigned long nowtime)
{
- int retval = 0;
- int real_seconds, real_minutes, cmos_minutes;
-
- printk(KERN_DEBUG "set_rtc_mmss(%lu)\n", nowtime);
-
- if(!have_rtc)
- return 0;
-
- cmos_minutes = CMOS_READ(RTC_MINUTES);
- cmos_minutes = bcd2bin(cmos_minutes);
-
- /*
- * since we're only adjusting minutes and seconds,
- * don't interfere with hour overflow. This avoids
- * messing with unknown time zones but requires your
- * RTC not to be off by more than 15 minutes
- */
- real_seconds = nowtime % 60;
- real_minutes = nowtime / 60;
- if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
- real_minutes += 30; /* correct for half hour time zone */
- real_minutes %= 60;
-
- if (abs(real_minutes - cmos_minutes) < 30) {
- real_seconds = bin2bcd(real_seconds);
- real_minutes = bin2bcd(real_minutes);
- CMOS_WRITE(real_seconds,RTC_SECONDS);
- CMOS_WRITE(real_minutes,RTC_MINUTES);
- } else {
- printk_once(KERN_NOTICE
- "set_rtc_mmss: can't update from %d to %d\n",
- cmos_minutes, real_minutes);
- retval = -1;
- }
-
- return retval;
+ D(printk(KERN_DEBUG "set_rtc_mmss(%lu)\n", nowtime));
+ return 0;
}
/* grab the time from the RTC chip */
-
-unsigned long
-get_cmos_time(void)
+unsigned long get_cmos_time(void)
{
- unsigned int year, mon, day, hour, min, sec;
- if(!have_rtc)
- return 0;
-
- sec = CMOS_READ(RTC_SECONDS);
- min = CMOS_READ(RTC_MINUTES);
- hour = CMOS_READ(RTC_HOURS);
- day = CMOS_READ(RTC_DAY_OF_MONTH);
- mon = CMOS_READ(RTC_MONTH);
- year = CMOS_READ(RTC_YEAR);
-
- sec = bcd2bin(sec);
- min = bcd2bin(min);
- hour = bcd2bin(hour);
- day = bcd2bin(day);
- mon = bcd2bin(mon);
- year = bcd2bin(year);
-
- if ((year += 1900) < 1970)
- year += 100;
-
- return mktime(year, mon, day, hour, min, sec);
+ return 0;
}
@@ -132,7 +70,7 @@ int update_persistent_clock(struct timespec now)
void read_persistent_clock(struct timespec *ts)
{
- ts->tv_sec = get_cmos_time();
+ ts->tv_sec = 0;
ts->tv_nsec = 0;
}
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index a6990cb0f098..a68b983dcea1 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -52,6 +52,7 @@ SECTIONS
EXCEPTION_TABLE(4)
+ _sdata = .;
RODATA
. = ALIGN (4);
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index b4760d86e1bb..45fd542cf173 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -58,6 +58,8 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
struct vm_area_struct * vma;
siginfo_t info;
int fault;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ ((writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
D(printk(KERN_DEBUG
"Page fault for %lX on %X at %lX, prot %d write %d\n",
@@ -115,6 +117,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
if (in_atomic() || !mm)
goto no_context;
+retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
@@ -163,7 +166,11 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -171,10 +178,24 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
- tsk->maj_flt++;
- else
- tsk->min_flt++;
+
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+ /*
+ * No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+ }
up_read(&mm->mmap_sem);
return;
diff --git a/arch/frv/include/asm/kvm_para.h b/arch/frv/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/frv/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu
index 15c22286ae79..321f3922728b 100644
--- a/arch/h8300/Kconfig.cpu
+++ b/arch/h8300/Kconfig.cpu
@@ -1,7 +1,5 @@
menu "Processor type and features"
-source "kernel/time/Kconfig"
-
choice
prompt "H8/300 platform"
default H8300H_GENERIC
diff --git a/arch/h8300/include/asm/kvm_para.h b/arch/h8300/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/h8300/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index bc979f770980..b2fdfb700f50 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -27,6 +27,9 @@ config HEXAGON
select GENERIC_IOMAP
select GENERIC_SMP_IDLE_THREAD
select STACKTRACE_SUPPORT
+ select KTIME_SCALAR
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CLOCKEVENTS_BROADCAST
---help---
Qualcomm Hexagon is a processor architecture designed for high
performance and low power across a wide variety of applications.
@@ -55,9 +58,6 @@ config PCI
config EARLY_PRINTK
def_bool y
-config KTIME_SCALAR
- def_bool y
-
config MMU
def_bool y
@@ -88,15 +88,6 @@ config GENERIC_FIND_NEXT_BIT
config GENERIC_HWEIGHT
def_bool y
-config GENERIC_TIME
- def_bool y
-
-config GENERIC_CLOCKEVENTS
- def_bool y
-
-config GENERIC_CLOCKEVENTS_BROADCAST
- def_bool y
-
config STACKTRACE_SUPPORT
def_bool y
select STACKTRACE
@@ -179,7 +170,6 @@ endchoice
source "mm/Kconfig"
source "kernel/Kconfig.hz"
-source "kernel/time/Kconfig"
config GENERIC_GPIO
def_bool n
diff --git a/arch/hexagon/include/asm/kvm_para.h b/arch/hexagon/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/hexagon/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ba667b60f32d..8186ec5ea151 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -37,6 +37,8 @@ config IA64
select ARCH_INIT_TASK
select ARCH_TASK_STRUCT_ALLOCATOR
select ARCH_THREAD_INFO_ALLOCATOR
+ select ARCH_CLOCKSOURCE_DATA
+ select GENERIC_TIME_VSYSCALL
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
@@ -92,10 +94,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
-config GENERIC_TIME_VSYSCALL
- bool
- default y
-
config HAVE_SETUP_PER_CPU_AREA
def_bool y
@@ -110,9 +108,6 @@ config EFI
bool
default y
-config ARCH_CLOCKSOURCE_DATA
- def_bool y
-
config SCHED_OMIT_FRAME_POINTER
bool
default y
diff --git a/arch/ia64/include/asm/gpio.h b/arch/ia64/include/asm/gpio.h
index 590a20debc4e..b3799d88ffcf 100644
--- a/arch/ia64/include/asm/gpio.h
+++ b/arch/ia64/include/asm/gpio.h
@@ -1,55 +1,4 @@
-/*
- * Generic GPIO API implementation for IA-64.
- *
- * A stright copy of that for PowerPC which was:
- *
- * Copyright (c) 2007-2008 MontaVista Software, Inc.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef _ASM_IA64_GPIO_H
-#define _ASM_IA64_GPIO_H
-
-#include <linux/errno.h>
-#include <asm-generic/gpio.h>
-
-#ifdef CONFIG_GPIOLIB
-
-/*
- * We don't (yet) implement inlined/rapid versions for on-chip gpios.
- * Just call gpiolib.
- */
-static inline int gpio_get_value(unsigned int gpio)
-{
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned int gpio, int value)
-{
- __gpio_set_value(gpio, value);
-}
-
-static inline int gpio_cansleep(unsigned int gpio)
-{
- return __gpio_cansleep(gpio);
-}
-
-static inline int gpio_to_irq(unsigned int gpio)
-{
- return __gpio_to_irq(gpio);
-}
-
-static inline int irq_to_gpio(unsigned int irq)
-{
- return -EINVAL;
-}
-
-#endif /* CONFIG_GPIOLIB */
-
-#endif /* _ASM_IA64_GPIO_H */
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index e35b3a84a40b..6d6a5ac48d85 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -365,6 +365,7 @@ struct thash_cb {
};
struct kvm_vcpu_stat {
+ u32 halt_wakeup;
};
struct kvm_vcpu_arch {
@@ -448,6 +449,8 @@ struct kvm_vcpu_arch {
char log_buf[VMM_LOG_LEN];
union context host;
union context guest;
+
+ char mmio_data[8];
};
struct kvm_vm_stat {
diff --git a/arch/ia64/include/asm/kvm_para.h b/arch/ia64/include/asm/kvm_para.h
index 1588aee781a2..2019cb99335e 100644
--- a/arch/ia64/include/asm/kvm_para.h
+++ b/arch/ia64/include/asm/kvm_para.h
@@ -26,6 +26,11 @@ static inline unsigned int kvm_arch_para_features(void)
return 0;
}
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
#endif
#endif
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 463fb3bbe11e..bd77cb507c1c 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -232,12 +232,12 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
goto mmio;
vcpu->mmio_needed = 1;
- vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
- vcpu->mmio_size = kvm_run->mmio.len = p->size;
+ vcpu->mmio_fragments[0].gpa = kvm_run->mmio.phys_addr = p->addr;
+ vcpu->mmio_fragments[0].len = kvm_run->mmio.len = p->size;
vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
if (vcpu->mmio_is_write)
- memcpy(vcpu->mmio_data, &p->data, p->size);
+ memcpy(vcpu->arch.mmio_data, &p->data, p->size);
memcpy(kvm_run->mmio.data, &p->data, p->size);
kvm_run->exit_reason = KVM_EXIT_MMIO;
return 0;
@@ -719,7 +719,7 @@ static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
if (!vcpu->mmio_is_write)
- memcpy(&p->data, vcpu->mmio_data, 8);
+ memcpy(&p->data, vcpu->arch.mmio_data, 8);
p->state = STATE_IORESP_READY;
}
@@ -739,7 +739,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
if (vcpu->mmio_needed) {
- memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
+ memcpy(vcpu->arch.mmio_data, kvm_run->mmio.data, 8);
kvm_set_mmio_data(vcpu);
vcpu->mmio_read_completed = 1;
vcpu->mmio_needed = 0;
@@ -1872,21 +1872,6 @@ void kvm_arch_hardware_unsetup(void)
{
}
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
-{
- int me;
- int cpu = vcpu->cpu;
-
- if (waitqueue_active(&vcpu->wq))
- wake_up_interruptible(&vcpu->wq);
-
- me = get_cpu();
- if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu))
- if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
- smp_send_reschedule(cpu);
- put_cpu();
-}
-
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
{
return __apic_accept_irq(vcpu, irq->vector);
@@ -1956,6 +1941,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
(kvm_highest_pending_irq(vcpu) != -1);
}
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+ return (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests));
+}
+
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index ef80a6546ff2..b638d5bfa14d 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -11,6 +11,7 @@ config M32R
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_ATOMIC64
+ select ARCH_USES_GETTIMEOFFSET
config SBUS
bool
@@ -33,9 +34,6 @@ config HZ
int
default 100
-config ARCH_USES_GETTIMEOFFSET
- def_bool y
-
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index d318c606c888..cac5b6be572a 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -8,6 +8,7 @@ config M68K
select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
select GENERIC_CPU_DEVICES
select FPU if MMU
+ select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
config RWSEM_GENERIC_SPINLOCK
bool
@@ -22,9 +23,6 @@ config ARCH_HAS_ILOG2_U32
config ARCH_HAS_ILOG2_U64
bool
-config GENERIC_CLOCKEVENTS
- bool
-
config GENERIC_GPIO
bool
@@ -43,9 +41,6 @@ config TIME_LOW_RES
bool
default y
-config ARCH_USES_GETTIMEOFFSET
- def_bool MMU && !COLDFIRE
-
config NO_IOPORT
def_bool y
@@ -111,10 +106,6 @@ if COLDFIRE
source "kernel/Kconfig.preempt"
endif
-if !MMU || COLDFIRE
-source "kernel/time/Kconfig"
-endif
-
source "mm/Kconfig"
endmenu
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 51b3274cbe71..2b53254ad994 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -24,6 +24,7 @@ config COLDFIRE
bool "Coldfire CPU family support"
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
+ select ARCH_HAVE_CUSTOM_GPIO_H
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_MULDIV64
select GENERIC_CSUM
diff --git a/arch/m68k/include/asm/kvm_para.h b/arch/m68k/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/m68k/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index ac22dc7f4cab..0bf44231aaf9 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -22,6 +22,7 @@ config MICROBLAZE
select GENERIC_PCI_IOMAP
select GENERIC_CPU_DEVICES
select GENERIC_ATOMIC64
+ select GENERIC_CLOCKEVENTS
config SWAP
def_bool n
@@ -50,14 +51,8 @@ config GENERIC_HWEIGHT
config GENERIC_CALIBRATE_DELAY
def_bool y
-config GENERIC_TIME_VSYSCALL
- def_bool n
-
-config GENERIC_CLOCKEVENTS
- def_bool y
-
config GENERIC_GPIO
- def_bool y
+ bool
config GENERIC_CSUM
def_bool y
@@ -79,8 +74,6 @@ source "arch/microblaze/platform/Kconfig.platform"
menu "Processor type and features"
-source "kernel/time/Kconfig"
-
source "kernel/Kconfig.preempt"
source "kernel/Kconfig.hz"
diff --git a/arch/microblaze/include/asm/gpio.h b/arch/microblaze/include/asm/gpio.h
index 2b2c18be71c6..b3799d88ffcf 100644
--- a/arch/microblaze/include/asm/gpio.h
+++ b/arch/microblaze/include/asm/gpio.h
@@ -1,53 +1,4 @@
-/*
- * Generic GPIO API implementation for PowerPC.
- *
- * Copyright (c) 2007-2008 MontaVista Software, Inc.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef _ASM_MICROBLAZE_GPIO_H
-#define _ASM_MICROBLAZE_GPIO_H
-
-#include <linux/errno.h>
-#include <asm-generic/gpio.h>
-
-#ifdef CONFIG_GPIOLIB
-
-/*
- * We don't (yet) implement inlined/rapid versions for on-chip gpios.
- * Just call gpiolib.
- */
-static inline int gpio_get_value(unsigned int gpio)
-{
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned int gpio, int value)
-{
- __gpio_set_value(gpio, value);
-}
-
-static inline int gpio_cansleep(unsigned int gpio)
-{
- return __gpio_cansleep(gpio);
-}
-
-static inline int gpio_to_irq(unsigned int gpio)
-{
- return __gpio_to_irq(gpio);
-}
-
-static inline int irq_to_gpio(unsigned int irq)
-{
- return -EINVAL;
-}
-
-#endif /* CONFIG_GPIOLIB */
-
-#endif /* _ASM_MICROBLAZE_GPIO_H */
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/arch/microblaze/include/asm/kvm_para.h b/arch/microblaze/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/microblaze/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index daff9e5e4a1f..03f7b8ce6b6b 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -492,10 +492,11 @@ C_ENTRY(sys_clone):
bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
lwi r6, r1, PT_R1; /* If so, use paret's stack ptr */
1: addik r7, r1, 0; /* Arg 2: parent context */
- add r8, r0, r0; /* Arg 3: (unused) */
- add r9, r0, r0; /* Arg 4: (unused) */
+ lwi r9, r1, PT_R8; /* parent tid. */
+ lwi r10, r1, PT_R9; /* child tid. */
+ /* do_fork will pick up TLS from regs->r10. */
brid do_fork /* Do real work (tail-call) */
- add r10, r0, r0; /* Arg 5: (unused) */
+ add r8, r0, r0; /* Arg 3: (unused) */
C_ENTRY(sys_execve):
brid microblaze_execve; /* Do real work (tail-call).*/
diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S
index e7eaa7a8cbd3..fc1e1322ce4c 100644
--- a/arch/microblaze/kernel/mcount.S
+++ b/arch/microblaze/kernel/mcount.S
@@ -138,7 +138,7 @@ NOALIGN_ENTRY(ftrace_call)
#endif /* CONFIG_DYNAMIC_FTRACE */
/* static normal trace */
lwi r6, r1, 120; /* MS: load parent addr */
- addik r5, r15, 0; /* MS: load current function addr */
+ addik r5, r15, -4; /* MS: load current function addr */
/* MS: here is dependency on previous code */
brald r15, r20; /* MS: jump to ftrace handler */
nop;
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 883b92789cdf..1944e00f07e1 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -182,8 +182,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#endif
ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
+ /*
+ * r21 is the thread reg, r10 is 6th arg to clone
+ * which contains TLS area
+ */
if (clone_flags & CLONE_SETTLS)
- ;
+ childregs->r21 = childregs->r10;
return 0;
}
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index c38a265846de..eb365d6795fa 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -92,6 +92,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
int code = SEGV_MAPERR;
int is_write = error_code & ESR_S;
int fault;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (is_write ? FAULT_FLAG_WRITE : 0);
regs->ear = address;
regs->esr = error_code;
@@ -138,6 +140,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
if (kernel_mode(regs) && !search_exception_tables(regs->pc))
goto bad_area_nosemaphore;
+retry:
down_read(&mm->mmap_sem);
}
@@ -210,7 +213,11 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -218,11 +225,27 @@ good_area:
goto do_sigbus;
BUG();
}
- if (unlikely(fault & VM_FAULT_MAJOR))
- current->maj_flt++;
- else
- current->min_flt++;
+
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (unlikely(fault & VM_FAULT_MAJOR))
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+ /*
+ * No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+ }
+
up_read(&mm->mmap_sem);
+
/*
* keep track of tlb+htab misses that are good addrs but
* just need pte's created via handle_mm_fault()
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 85aad0321397..09ab87ee6fef 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -8,6 +8,7 @@ config MIPS
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_ARCH_KGDB
+ select ARCH_HAVE_CUSTOM_GPIO_H
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_DYNAMIC_FTRACE
@@ -31,6 +32,8 @@ config MIPS
select ARCH_DISCARD_MEMBLOCK
select GENERIC_SMP_IDLE_THREAD
select BUILDTIME_EXTABLE_SORT
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CMOS_UPDATE
menu "Machine selection"
@@ -230,8 +233,9 @@ config LANTIQ
select ARCH_REQUIRE_GPIOLIB
select SWAP_IO_SPACE
select BOOT_RAW
- select HAVE_CLK
- select MIPS_MACHINE
+ select HAVE_MACH_CLKDEV
+ select CLKDEV_LOOKUP
+ select USE_OF
config LASAT
bool "LASAT Networks platforms"
@@ -858,14 +862,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
-config GENERIC_CLOCKEVENTS
- bool
- default y
-
-config GENERIC_CMOS_UPDATE
- bool
- default y
-
config SCHED_OMIT_FRAME_POINTER
bool
default y
@@ -1788,10 +1784,12 @@ endchoice
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
- range 13 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB
- default "13" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB
- range 12 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB
- default "12" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB
+ range 14 64 if HUGETLB_PAGE && PAGE_SIZE_64KB
+ default "14" if HUGETLB_PAGE && PAGE_SIZE_64KB
+ range 13 64 if HUGETLB_PAGE && PAGE_SIZE_32KB
+ default "13" if HUGETLB_PAGE && PAGE_SIZE_32KB
+ range 12 64 if HUGETLB_PAGE && PAGE_SIZE_16KB
+ default "12" if HUGETLB_PAGE && PAGE_SIZE_16KB
range 11 64
default "11"
help
@@ -2052,9 +2050,6 @@ config CPU_HAS_SYNC
depends on !CPU_R3000
default y
-config GENERIC_CLOCKEVENTS_BROADCAST
- bool
-
#
# CPU non-features
#
@@ -2216,8 +2211,6 @@ config NR_CPUS
performance should round up your number of processors to the next
power of two.
-source "kernel/time/Kconfig"
-
#
# Timer Interrupt Frequency Configuration
#
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 76017c25a9e6..764e37a9dbb3 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -219,8 +219,8 @@ endif
KBUILD_AFLAGS += $(cflags-y)
KBUILD_CFLAGS += $(cflags-y)
-KBUILD_CPPFLAGS += -D"VMLINUX_LOAD_ADDRESS=$(load-y)"
-KBUILD_CPPFLAGS += -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)"
+KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
+KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
LDFLAGS += -m $(ld-emul)
diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
index a83302b96c01..7dde01642d6b 100644
--- a/arch/mips/alchemy/devboards/db1200.c
+++ b/arch/mips/alchemy/devboards/db1200.c
@@ -22,6 +22,7 @@
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/leds.h>
diff --git a/arch/mips/ath79/Kconfig b/arch/mips/ath79/Kconfig
index e0fae8f4442b..f44feee2d67f 100644
--- a/arch/mips/ath79/Kconfig
+++ b/arch/mips/ath79/Kconfig
@@ -26,6 +26,18 @@ config ATH79_MACH_AP81
Say 'Y' here if you want your kernel to support the
Atheros AP81 reference board.
+config ATH79_MACH_DB120
+ bool "Atheros DB120 reference board"
+ select SOC_AR934X
+ select ATH79_DEV_GPIO_BUTTONS
+ select ATH79_DEV_LEDS_GPIO
+ select ATH79_DEV_SPI
+ select ATH79_DEV_USB
+ select ATH79_DEV_WMAC
+ help
+ Say 'Y' here if you want your kernel to support the
+ Atheros DB120 reference board.
+
config ATH79_MACH_PB44
bool "Atheros PB44 reference board"
select SOC_AR71XX
@@ -52,12 +64,14 @@ endmenu
config SOC_AR71XX
select USB_ARCH_HAS_EHCI
select USB_ARCH_HAS_OHCI
+ select HW_HAS_PCI
def_bool n
config SOC_AR724X
select USB_ARCH_HAS_EHCI
select USB_ARCH_HAS_OHCI
select HW_HAS_PCI
+ select PCI_AR724X if PCI
def_bool n
config SOC_AR913X
@@ -68,6 +82,15 @@ config SOC_AR933X
select USB_ARCH_HAS_EHCI
def_bool n
+config SOC_AR934X
+ select USB_ARCH_HAS_EHCI
+ select HW_HAS_PCI
+ select PCI_AR724X if PCI
+ def_bool n
+
+config PCI_AR724X
+ def_bool n
+
config ATH79_DEV_GPIO_BUTTONS
def_bool n
@@ -81,7 +104,7 @@ config ATH79_DEV_USB
def_bool n
config ATH79_DEV_WMAC
- depends on (SOC_AR913X || SOC_AR933X)
+ depends on (SOC_AR913X || SOC_AR933X || SOC_AR934X)
def_bool n
endif
diff --git a/arch/mips/ath79/Makefile b/arch/mips/ath79/Makefile
index 3b911e09dbec..2b54d98263f3 100644
--- a/arch/mips/ath79/Makefile
+++ b/arch/mips/ath79/Makefile
@@ -11,6 +11,7 @@
obj-y := prom.o setup.o irq.o common.o clock.o gpio.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_PCI) += pci.o
#
# Devices
@@ -27,5 +28,6 @@ obj-$(CONFIG_ATH79_DEV_WMAC) += dev-wmac.o
#
obj-$(CONFIG_ATH79_MACH_AP121) += mach-ap121.o
obj-$(CONFIG_ATH79_MACH_AP81) += mach-ap81.o
+obj-$(CONFIG_ATH79_MACH_DB120) += mach-db120.o
obj-$(CONFIG_ATH79_MACH_PB44) += mach-pb44.o
obj-$(CONFIG_ATH79_MACH_UBNT_XM) += mach-ubnt-xm.o
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index 54d0eb4db987..b91ad3efe29e 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -1,8 +1,11 @@
/*
* Atheros AR71XX/AR724X/AR913X common routines
*
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
* Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
*
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
@@ -163,6 +166,82 @@ static void __init ar933x_clocks_init(void)
ath79_uart_clk.rate = ath79_ref_clk.rate;
}
+static void __init ar934x_clocks_init(void)
+{
+ u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
+ u32 cpu_pll, ddr_pll;
+ u32 bootstrap;
+
+ bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & AR934X_BOOTSTRAP_REF_CLK_40)
+ ath79_ref_clk.rate = 40 * 1000 * 1000;
+ else
+ ath79_ref_clk.rate = 25 * 1000 * 1000;
+
+ pll = ath79_pll_rr(AR934X_PLL_CPU_CONFIG_REG);
+ out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+ AR934X_PLL_CPU_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+ AR934X_PLL_CPU_CONFIG_REFDIV_MASK;
+ nint = (pll >> AR934X_PLL_CPU_CONFIG_NINT_SHIFT) &
+ AR934X_PLL_CPU_CONFIG_NINT_MASK;
+ frac = (pll >> AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+ AR934X_PLL_CPU_CONFIG_NFRAC_MASK;
+
+ cpu_pll = nint * ath79_ref_clk.rate / ref_div;
+ cpu_pll += frac * ath79_ref_clk.rate / (ref_div * (2 << 6));
+ cpu_pll /= (1 << out_div);
+
+ pll = ath79_pll_rr(AR934X_PLL_DDR_CONFIG_REG);
+ out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+ AR934X_PLL_DDR_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+ AR934X_PLL_DDR_CONFIG_REFDIV_MASK;
+ nint = (pll >> AR934X_PLL_DDR_CONFIG_NINT_SHIFT) &
+ AR934X_PLL_DDR_CONFIG_NINT_MASK;
+ frac = (pll >> AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+ AR934X_PLL_DDR_CONFIG_NFRAC_MASK;
+
+ ddr_pll = nint * ath79_ref_clk.rate / ref_div;
+ ddr_pll += frac * ath79_ref_clk.rate / (ref_div * (2 << 10));
+ ddr_pll /= (1 << out_div);
+
+ clk_ctrl = ath79_pll_rr(AR934X_PLL_CPU_DDR_CLK_CTRL_REG);
+
+ postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+ AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK;
+
+ if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_PLL_BYPASS)
+ ath79_cpu_clk.rate = ath79_ref_clk.rate;
+ else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_CPUCLK_FROM_CPUPLL)
+ ath79_cpu_clk.rate = cpu_pll / (postdiv + 1);
+ else
+ ath79_cpu_clk.rate = ddr_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+ AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_MASK;
+
+ if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_PLL_BYPASS)
+ ath79_ddr_clk.rate = ath79_ref_clk.rate;
+ else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL)
+ ath79_ddr_clk.rate = ddr_pll / (postdiv + 1);
+ else
+ ath79_ddr_clk.rate = cpu_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+ AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_MASK;
+
+ if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_PLL_BYPASS)
+ ath79_ahb_clk.rate = ath79_ref_clk.rate;
+ else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+ ath79_ahb_clk.rate = ddr_pll / (postdiv + 1);
+ else
+ ath79_ahb_clk.rate = cpu_pll / (postdiv + 1);
+
+ ath79_wdt_clk.rate = ath79_ref_clk.rate;
+ ath79_uart_clk.rate = ath79_ref_clk.rate;
+}
+
void __init ath79_clocks_init(void)
{
if (soc_is_ar71xx())
@@ -173,6 +252,8 @@ void __init ath79_clocks_init(void)
ar913x_clocks_init();
else if (soc_is_ar933x())
ar933x_clocks_init();
+ else if (soc_is_ar934x())
+ ar934x_clocks_init();
else
BUG();
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index f0fda982b965..5a4adfc9d79d 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -1,9 +1,12 @@
/*
* Atheros AR71XX/AR724X/AR913X common routines
*
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
@@ -67,6 +70,8 @@ void ath79_device_reset_set(u32 mask)
reg = AR913X_RESET_REG_RESET_MODULE;
else if (soc_is_ar933x())
reg = AR933X_RESET_REG_RESET_MODULE;
+ else if (soc_is_ar934x())
+ reg = AR934X_RESET_REG_RESET_MODULE;
else
BUG();
@@ -91,6 +96,8 @@ void ath79_device_reset_clear(u32 mask)
reg = AR913X_RESET_REG_RESET_MODULE;
else if (soc_is_ar933x())
reg = AR933X_RESET_REG_RESET_MODULE;
+ else if (soc_is_ar934x())
+ reg = AR934X_RESET_REG_RESET_MODULE;
else
BUG();
diff --git a/arch/mips/ath79/dev-common.c b/arch/mips/ath79/dev-common.c
index f4956f809072..45efc63b08b6 100644
--- a/arch/mips/ath79/dev-common.c
+++ b/arch/mips/ath79/dev-common.c
@@ -89,7 +89,8 @@ void __init ath79_register_uart(void)
if (soc_is_ar71xx() ||
soc_is_ar724x() ||
- soc_is_ar913x()) {
+ soc_is_ar913x() ||
+ soc_is_ar934x()) {
ath79_uart_data[0].uartclk = clk_get_rate(clk);
platform_device_register(&ath79_uart_device);
} else if (soc_is_ar933x()) {
diff --git a/arch/mips/ath79/dev-gpio-buttons.c b/arch/mips/ath79/dev-gpio-buttons.c
index 4b0168a11c01..366b35fb164d 100644
--- a/arch/mips/ath79/dev-gpio-buttons.c
+++ b/arch/mips/ath79/dev-gpio-buttons.c
@@ -25,12 +25,10 @@ void __init ath79_register_gpio_keys_polled(int id,
struct gpio_keys_button *p;
int err;
- p = kmalloc(nbuttons * sizeof(*p), GFP_KERNEL);
+ p = kmemdup(buttons, nbuttons * sizeof(*p), GFP_KERNEL);
if (!p)
return;
- memcpy(p, buttons, nbuttons * sizeof(*p));
-
pdev = platform_device_alloc("gpio-keys-polled", id);
if (!pdev)
goto err_free_buttons;
diff --git a/arch/mips/ath79/dev-leds-gpio.c b/arch/mips/ath79/dev-leds-gpio.c
index cdade68dcd17..dcb1debcefb8 100644
--- a/arch/mips/ath79/dev-leds-gpio.c
+++ b/arch/mips/ath79/dev-leds-gpio.c
@@ -24,12 +24,10 @@ void __init ath79_register_leds_gpio(int id,
struct gpio_led *p;
int err;
- p = kmalloc(num_leds * sizeof(*p), GFP_KERNEL);
+ p = kmemdup(leds, num_leds * sizeof(*p), GFP_KERNEL);
if (!p)
return;
- memcpy(p, leds, num_leds * sizeof(*p));
-
pdev = platform_device_alloc("leds-gpio", id);
if (!pdev)
goto err_free_leds;
diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c
index 9c717bf98ffe..d6d893c16ad4 100644
--- a/arch/mips/ath79/dev-wmac.c
+++ b/arch/mips/ath79/dev-wmac.c
@@ -1,9 +1,12 @@
/*
* Atheros AR913X/AR933X SoC built-in WMAC device support
*
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
+ * Parts of this file are based on Atheros 2.6.15/2.6.31 BSP
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
@@ -26,8 +29,7 @@ static struct resource ath79_wmac_resources[] = {
/* .start and .end fields are filled dynamically */
.flags = IORESOURCE_MEM,
}, {
- .start = ATH79_CPU_IRQ_IP2,
- .end = ATH79_CPU_IRQ_IP2,
+ /* .start and .end fields are filled dynamically */
.flags = IORESOURCE_IRQ,
},
};
@@ -53,6 +55,8 @@ static void __init ar913x_wmac_setup(void)
ath79_wmac_resources[0].start = AR913X_WMAC_BASE;
ath79_wmac_resources[0].end = AR913X_WMAC_BASE + AR913X_WMAC_SIZE - 1;
+ ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2;
+ ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2;
}
@@ -79,6 +83,8 @@ static void __init ar933x_wmac_setup(void)
ath79_wmac_resources[0].start = AR933X_WMAC_BASE;
ath79_wmac_resources[0].end = AR933X_WMAC_BASE + AR933X_WMAC_SIZE - 1;
+ ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2;
+ ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2;
t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
if (t & AR933X_BOOTSTRAP_REF_CLK_40)
@@ -92,12 +98,32 @@ static void __init ar933x_wmac_setup(void)
ath79_wmac_data.external_reset = ar933x_wmac_reset;
}
+static void ar934x_wmac_setup(void)
+{
+ u32 t;
+
+ ath79_wmac_device.name = "ar934x_wmac";
+
+ ath79_wmac_resources[0].start = AR934X_WMAC_BASE;
+ ath79_wmac_resources[0].end = AR934X_WMAC_BASE + AR934X_WMAC_SIZE - 1;
+ ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
+ ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
+
+ t = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
+ if (t & AR934X_BOOTSTRAP_REF_CLK_40)
+ ath79_wmac_data.is_clk_25mhz = false;
+ else
+ ath79_wmac_data.is_clk_25mhz = true;
+}
+
void __init ath79_register_wmac(u8 *cal_data)
{
if (soc_is_ar913x())
ar913x_wmac_setup();
else if (soc_is_ar933x())
ar933x_wmac_setup();
+ else if (soc_is_ar934x())
+ ar934x_wmac_setup();
else
BUG();
diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
index 6a51ced7a293..dc938cb2ba58 100644
--- a/arch/mips/ath79/early_printk.c
+++ b/arch/mips/ath79/early_printk.c
@@ -71,6 +71,9 @@ static void prom_putchar_init(void)
case REV_ID_MAJOR_AR7241:
case REV_ID_MAJOR_AR7242:
case REV_ID_MAJOR_AR913X:
+ case REV_ID_MAJOR_AR9341:
+ case REV_ID_MAJOR_AR9342:
+ case REV_ID_MAJOR_AR9344:
_prom_putchar = prom_putchar_ar71xx;
break;
diff --git a/arch/mips/ath79/gpio.c b/arch/mips/ath79/gpio.c
index a2f8ca630ed6..29054f211832 100644
--- a/arch/mips/ath79/gpio.c
+++ b/arch/mips/ath79/gpio.c
@@ -1,9 +1,12 @@
/*
* Atheros AR71XX/AR724X/AR913X GPIO API support
*
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
@@ -89,6 +92,42 @@ static int ath79_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
+static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ void __iomem *base = ath79_gpio_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+ __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
+ base + AR71XX_GPIO_REG_OE);
+
+ spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+ return 0;
+}
+
+static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ void __iomem *base = ath79_gpio_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+ if (value)
+ __raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
+ else
+ __raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
+
+ __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
+ base + AR71XX_GPIO_REG_OE);
+
+ spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+ return 0;
+}
+
static struct gpio_chip ath79_gpio_chip = {
.label = "ath79",
.get = ath79_gpio_get_value,
@@ -155,11 +194,17 @@ void __init ath79_gpio_init(void)
ath79_gpio_count = AR913X_GPIO_COUNT;
else if (soc_is_ar933x())
ath79_gpio_count = AR933X_GPIO_COUNT;
+ else if (soc_is_ar934x())
+ ath79_gpio_count = AR934X_GPIO_COUNT;
else
BUG();
ath79_gpio_base = ioremap_nocache(AR71XX_GPIO_BASE, AR71XX_GPIO_SIZE);
ath79_gpio_chip.ngpio = ath79_gpio_count;
+ if (soc_is_ar934x()) {
+ ath79_gpio_chip.direction_input = ar934x_gpio_direction_input;
+ ath79_gpio_chip.direction_output = ar934x_gpio_direction_output;
+ }
err = gpiochip_add(&ath79_gpio_chip);
if (err)
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 1b073de44680..90d09fc15398 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -1,10 +1,11 @@
/*
* Atheros AR71xx/AR724x/AR913x specific interrupt handling
*
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
- * Parts of this file are based on Atheros' 2.6.15 BSP
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -23,8 +24,8 @@
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
-static unsigned int ath79_ip2_flush_reg;
-static unsigned int ath79_ip3_flush_reg;
+static void (*ath79_ip2_handler)(void);
+static void (*ath79_ip3_handler)(void);
static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc)
{
@@ -129,7 +130,7 @@ static void __init ath79_misc_irq_init(void)
if (soc_is_ar71xx() || soc_is_ar913x())
ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
- else if (soc_is_ar724x() || soc_is_ar933x())
+ else if (soc_is_ar724x() || soc_is_ar933x() || soc_is_ar934x())
ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
else
BUG();
@@ -143,6 +144,39 @@ static void __init ath79_misc_irq_init(void)
irq_set_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler);
}
+static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+{
+ u32 status;
+
+ disable_irq_nosync(irq);
+
+ status = ath79_reset_rr(AR934X_RESET_REG_PCIE_WMAC_INT_STATUS);
+
+ if (status & AR934X_PCIE_WMAC_INT_PCIE_ALL) {
+ ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_PCIE);
+ generic_handle_irq(ATH79_IP2_IRQ(0));
+ } else if (status & AR934X_PCIE_WMAC_INT_WMAC_ALL) {
+ ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_WMAC);
+ generic_handle_irq(ATH79_IP2_IRQ(1));
+ } else {
+ spurious_interrupt();
+ }
+
+ enable_irq(irq);
+}
+
+static void ar934x_ip2_irq_init(void)
+{
+ int i;
+
+ for (i = ATH79_IP2_IRQ_BASE;
+ i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++)
+ irq_set_chip_and_handler(i, &dummy_irq_chip,
+ handle_level_irq);
+
+ irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar934x_ip2_irq_dispatch);
+}
+
asmlinkage void plat_irq_dispatch(void)
{
unsigned long pending;
@@ -152,10 +186,8 @@ asmlinkage void plat_irq_dispatch(void)
if (pending & STATUSF_IP7)
do_IRQ(ATH79_CPU_IRQ_TIMER);
- else if (pending & STATUSF_IP2) {
- ath79_ddr_wb_flush(ath79_ip2_flush_reg);
- do_IRQ(ATH79_CPU_IRQ_IP2);
- }
+ else if (pending & STATUSF_IP2)
+ ath79_ip2_handler();
else if (pending & STATUSF_IP4)
do_IRQ(ATH79_CPU_IRQ_GE0);
@@ -163,10 +195,8 @@ asmlinkage void plat_irq_dispatch(void)
else if (pending & STATUSF_IP5)
do_IRQ(ATH79_CPU_IRQ_GE1);
- else if (pending & STATUSF_IP3) {
- ath79_ddr_wb_flush(ath79_ip3_flush_reg);
- do_IRQ(ATH79_CPU_IRQ_USB);
- }
+ else if (pending & STATUSF_IP3)
+ ath79_ip3_handler();
else if (pending & STATUSF_IP6)
do_IRQ(ATH79_CPU_IRQ_MISC);
@@ -175,24 +205,97 @@ asmlinkage void plat_irq_dispatch(void)
spurious_interrupt();
}
+/*
+ * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for
+ * these devices typically allocate coherent DMA memory, however the
+ * DMA controller may still have some unsynchronized data in the FIFO.
+ * Issue a flush in the handlers to ensure that the driver sees
+ * the update.
+ */
+static void ar71xx_ip2_handler(void)
+{
+ ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_PCI);
+ do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar724x_ip2_handler(void)
+{
+ ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_PCIE);
+ do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar913x_ip2_handler(void)
+{
+ ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_WMAC);
+ do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar933x_ip2_handler(void)
+{
+ ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_WMAC);
+ do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar934x_ip2_handler(void)
+{
+ do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar71xx_ip3_handler(void)
+{
+ ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_USB);
+ do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
+static void ar724x_ip3_handler(void)
+{
+ ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_USB);
+ do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
+static void ar913x_ip3_handler(void)
+{
+ ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_USB);
+ do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
+static void ar933x_ip3_handler(void)
+{
+ ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_USB);
+ do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
+static void ar934x_ip3_handler(void)
+{
+ ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_USB);
+ do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
void __init arch_init_irq(void)
{
if (soc_is_ar71xx()) {
- ath79_ip2_flush_reg = AR71XX_DDR_REG_FLUSH_PCI;
- ath79_ip3_flush_reg = AR71XX_DDR_REG_FLUSH_USB;
+ ath79_ip2_handler = ar71xx_ip2_handler;
+ ath79_ip3_handler = ar71xx_ip3_handler;
} else if (soc_is_ar724x()) {
- ath79_ip2_flush_reg = AR724X_DDR_REG_FLUSH_PCIE;
- ath79_ip3_flush_reg = AR724X_DDR_REG_FLUSH_USB;
+ ath79_ip2_handler = ar724x_ip2_handler;
+ ath79_ip3_handler = ar724x_ip3_handler;
} else if (soc_is_ar913x()) {
- ath79_ip2_flush_reg = AR913X_DDR_REG_FLUSH_WMAC;
- ath79_ip3_flush_reg = AR913X_DDR_REG_FLUSH_USB;
+ ath79_ip2_handler = ar913x_ip2_handler;
+ ath79_ip3_handler = ar913x_ip3_handler;
} else if (soc_is_ar933x()) {
- ath79_ip2_flush_reg = AR933X_DDR_REG_FLUSH_WMAC;
- ath79_ip3_flush_reg = AR933X_DDR_REG_FLUSH_USB;
- } else
+ ath79_ip2_handler = ar933x_ip2_handler;
+ ath79_ip3_handler = ar933x_ip3_handler;
+ } else if (soc_is_ar934x()) {
+ ath79_ip2_handler = ar934x_ip2_handler;
+ ath79_ip3_handler = ar934x_ip3_handler;
+ } else {
BUG();
+ }
cp0_perfcount_irq = ATH79_MISC_IRQ_PERFC;
mips_cpu_irq_init();
ath79_misc_irq_init();
+
+ if (soc_is_ar934x())
+ ar934x_ip2_irq_init();
}
diff --git a/arch/mips/ath79/mach-db120.c b/arch/mips/ath79/mach-db120.c
new file mode 100644
index 000000000000..1983e4d2af4b
--- /dev/null
+++ b/arch/mips/ath79/mach-db120.c
@@ -0,0 +1,134 @@
+/*
+ * Atheros DB120 reference board support
+ *
+ * Copyright (c) 2011 Qualcomm Atheros
+ * Copyright (c) 2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/ath9k_platform.h>
+
+#include "machtypes.h"
+#include "dev-gpio-buttons.h"
+#include "dev-leds-gpio.h"
+#include "dev-spi.h"
+#include "dev-wmac.h"
+#include "pci.h"
+
+#define DB120_GPIO_LED_WLAN_5G 12
+#define DB120_GPIO_LED_WLAN_2G 13
+#define DB120_GPIO_LED_STATUS 14
+#define DB120_GPIO_LED_WPS 15
+
+#define DB120_GPIO_BTN_WPS 16
+
+#define DB120_KEYS_POLL_INTERVAL 20 /* msecs */
+#define DB120_KEYS_DEBOUNCE_INTERVAL (3 * DB120_KEYS_POLL_INTERVAL)
+
+#define DB120_WMAC_CALDATA_OFFSET 0x1000
+#define DB120_PCIE_CALDATA_OFFSET 0x5000
+
+static struct gpio_led db120_leds_gpio[] __initdata = {
+ {
+ .name = "db120:green:status",
+ .gpio = DB120_GPIO_LED_STATUS,
+ .active_low = 1,
+ },
+ {
+ .name = "db120:green:wps",
+ .gpio = DB120_GPIO_LED_WPS,
+ .active_low = 1,
+ },
+ {
+ .name = "db120:green:wlan-5g",
+ .gpio = DB120_GPIO_LED_WLAN_5G,
+ .active_low = 1,
+ },
+ {
+ .name = "db120:green:wlan-2g",
+ .gpio = DB120_GPIO_LED_WLAN_2G,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_keys_button db120_gpio_keys[] __initdata = {
+ {
+ .desc = "WPS button",
+ .type = EV_KEY,
+ .code = KEY_WPS_BUTTON,
+ .debounce_interval = DB120_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = DB120_GPIO_BTN_WPS,
+ .active_low = 1,
+ },
+};
+
+static struct spi_board_info db120_spi_info[] = {
+ {
+ .bus_num = 0,
+ .chip_select = 0,
+ .max_speed_hz = 25000000,
+ .modalias = "s25sl064a",
+ }
+};
+
+static struct ath79_spi_platform_data db120_spi_data = {
+ .bus_num = 0,
+ .num_chipselect = 1,
+};
+
+#ifdef CONFIG_PCI
+static struct ath9k_platform_data db120_ath9k_data;
+
+static int db120_pci_plat_dev_init(struct pci_dev *dev)
+{
+ switch (PCI_SLOT(dev->devfn)) {
+ case 0:
+ dev->dev.platform_data = &db120_ath9k_data;
+ break;
+ }
+
+ return 0;
+}
+
+static void __init db120_pci_init(u8 *eeprom)
+{
+ memcpy(db120_ath9k_data.eeprom_data, eeprom,
+ sizeof(db120_ath9k_data.eeprom_data));
+
+ ath79_pci_set_plat_dev_init(db120_pci_plat_dev_init);
+ ath79_register_pci();
+}
+#else
+static inline void db120_pci_init(void) {}
+#endif /* CONFIG_PCI */
+
+static void __init db120_setup(void)
+{
+ u8 *art = (u8 *) KSEG1ADDR(0x1fff0000);
+
+ ath79_register_leds_gpio(-1, ARRAY_SIZE(db120_leds_gpio),
+ db120_leds_gpio);
+ ath79_register_gpio_keys_polled(-1, DB120_KEYS_POLL_INTERVAL,
+ ARRAY_SIZE(db120_gpio_keys),
+ db120_gpio_keys);
+ ath79_register_spi(&db120_spi_data, db120_spi_info,
+ ARRAY_SIZE(db120_spi_info));
+ ath79_register_wmac(art + DB120_WMAC_CALDATA_OFFSET);
+ db120_pci_init(art + DB120_PCIE_CALDATA_OFFSET);
+}
+
+MIPS_MACHINE(ATH79_MACH_DB120, "DB120", "Atheros DB120 reference board",
+ db120_setup);
diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c
index fe9701a32291..c5f0ea5e00c3 100644
--- a/arch/mips/ath79/mach-pb44.c
+++ b/arch/mips/ath79/mach-pb44.c
@@ -19,6 +19,7 @@
#include "dev-leds-gpio.h"
#include "dev-spi.h"
#include "dev-usb.h"
+#include "pci.h"
#define PB44_GPIO_I2C_SCL 0
#define PB44_GPIO_I2C_SDA 1
@@ -114,6 +115,7 @@ static void __init pb44_init(void)
ath79_register_spi(&pb44_spi_data, pb44_spi_info,
ARRAY_SIZE(pb44_spi_info));
ath79_register_usb();
+ ath79_register_pci();
}
MIPS_MACHINE(ATH79_MACH_PB44, "PB44", "Atheros PB44 reference board",
diff --git a/arch/mips/ath79/mach-ubnt-xm.c b/arch/mips/ath79/mach-ubnt-xm.c
index 3c311a539347..4a3c60694c75 100644
--- a/arch/mips/ath79/mach-ubnt-xm.c
+++ b/arch/mips/ath79/mach-ubnt-xm.c
@@ -12,16 +12,15 @@
#include <linux/init.h>
#include <linux/pci.h>
-
-#ifdef CONFIG_PCI
#include <linux/ath9k_platform.h>
-#include <asm/mach-ath79/pci-ath724x.h>
-#endif /* CONFIG_PCI */
+
+#include <asm/mach-ath79/irq.h>
#include "machtypes.h"
#include "dev-gpio-buttons.h"
#include "dev-leds-gpio.h"
#include "dev-spi.h"
+#include "pci.h"
#define UBNT_XM_GPIO_LED_L1 0
#define UBNT_XM_GPIO_LED_L2 1
@@ -33,7 +32,6 @@
#define UBNT_XM_KEYS_POLL_INTERVAL 20
#define UBNT_XM_KEYS_DEBOUNCE_INTERVAL (3 * UBNT_XM_KEYS_POLL_INTERVAL)
-#define UBNT_XM_PCI_IRQ 48
#define UBNT_XM_EEPROM_ADDR (u8 *) KSEG1ADDR(0x1fff1000)
static struct gpio_led ubnt_xm_leds_gpio[] __initdata = {
@@ -84,12 +82,27 @@ static struct ath79_spi_platform_data ubnt_xm_spi_data = {
#ifdef CONFIG_PCI
static struct ath9k_platform_data ubnt_xm_eeprom_data;
-static struct ath724x_pci_data ubnt_xm_pci_data[] = {
- {
- .irq = UBNT_XM_PCI_IRQ,
- .pdata = &ubnt_xm_eeprom_data,
- },
-};
+static int ubnt_xm_pci_plat_dev_init(struct pci_dev *dev)
+{
+ switch (PCI_SLOT(dev->devfn)) {
+ case 0:
+ dev->dev.platform_data = &ubnt_xm_eeprom_data;
+ break;
+ }
+
+ return 0;
+}
+
+static void __init ubnt_xm_pci_init(void)
+{
+ memcpy(ubnt_xm_eeprom_data.eeprom_data, UBNT_XM_EEPROM_ADDR,
+ sizeof(ubnt_xm_eeprom_data.eeprom_data));
+
+ ath79_pci_set_plat_dev_init(ubnt_xm_pci_plat_dev_init);
+ ath79_register_pci();
+}
+#else
+static inline void ubnt_xm_pci_init(void) {}
#endif /* CONFIG_PCI */
static void __init ubnt_xm_init(void)
@@ -104,13 +117,7 @@ static void __init ubnt_xm_init(void)
ath79_register_spi(&ubnt_xm_spi_data, ubnt_xm_spi_info,
ARRAY_SIZE(ubnt_xm_spi_info));
-#ifdef CONFIG_PCI
- memcpy(ubnt_xm_eeprom_data.eeprom_data, UBNT_XM_EEPROM_ADDR,
- sizeof(ubnt_xm_eeprom_data.eeprom_data));
-
- ath724x_pci_add_data(ubnt_xm_pci_data, ARRAY_SIZE(ubnt_xm_pci_data));
-#endif /* CONFIG_PCI */
-
+ ubnt_xm_pci_init();
}
MIPS_MACHINE(ATH79_MACH_UBNT_XM,
diff --git a/arch/mips/ath79/machtypes.h b/arch/mips/ath79/machtypes.h
index 9a1f3826626e..af92e5c30d66 100644
--- a/arch/mips/ath79/machtypes.h
+++ b/arch/mips/ath79/machtypes.h
@@ -18,6 +18,7 @@ enum ath79_mach_type {
ATH79_MACH_GENERIC = 0,
ATH79_MACH_AP121, /* Atheros AP121 reference board */
ATH79_MACH_AP81, /* Atheros AP81 reference board */
+ ATH79_MACH_DB120, /* Atheros DB120 reference board */
ATH79_MACH_PB44, /* Atheros PB44 reference board */
ATH79_MACH_UBNT_XM, /* Ubiquiti Networks XM board rev 1.0 */
};
diff --git a/arch/mips/ath79/pci.c b/arch/mips/ath79/pci.c
new file mode 100644
index 000000000000..ca83abd9d31e
--- /dev/null
+++ b/arch/mips/ath79/pci.c
@@ -0,0 +1,130 @@
+/*
+ * Atheros AR71XX/AR724X specific PCI setup code
+ *
+ * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15 BSP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/irq.h>
+#include <asm/mach-ath79/pci.h>
+#include "pci.h"
+
+static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev);
+static const struct ath79_pci_irq *ath79_pci_irq_map __initdata;
+static unsigned ath79_pci_nr_irqs __initdata;
+
+static const struct ath79_pci_irq ar71xx_pci_irq_map[] __initconst = {
+ {
+ .slot = 17,
+ .pin = 1,
+ .irq = ATH79_PCI_IRQ(0),
+ }, {
+ .slot = 18,
+ .pin = 1,
+ .irq = ATH79_PCI_IRQ(1),
+ }, {
+ .slot = 19,
+ .pin = 1,
+ .irq = ATH79_PCI_IRQ(2),
+ }
+};
+
+static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = {
+ {
+ .slot = 0,
+ .pin = 1,
+ .irq = ATH79_PCI_IRQ(0),
+ }
+};
+
+int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
+{
+ int irq = -1;
+ int i;
+
+ if (ath79_pci_nr_irqs == 0 ||
+ ath79_pci_irq_map == NULL) {
+ if (soc_is_ar71xx()) {
+ ath79_pci_irq_map = ar71xx_pci_irq_map;
+ ath79_pci_nr_irqs = ARRAY_SIZE(ar71xx_pci_irq_map);
+ } else if (soc_is_ar724x() ||
+ soc_is_ar9342() ||
+ soc_is_ar9344()) {
+ ath79_pci_irq_map = ar724x_pci_irq_map;
+ ath79_pci_nr_irqs = ARRAY_SIZE(ar724x_pci_irq_map);
+ } else {
+ pr_crit("pci %s: invalid irq map\n",
+ pci_name((struct pci_dev *) dev));
+ return irq;
+ }
+ }
+
+ for (i = 0; i < ath79_pci_nr_irqs; i++) {
+ const struct ath79_pci_irq *entry;
+
+ entry = &ath79_pci_irq_map[i];
+ if (entry->slot == slot && entry->pin == pin) {
+ irq = entry->irq;
+ break;
+ }
+ }
+
+ if (irq < 0)
+ pr_crit("pci %s: no irq found for pin %u\n",
+ pci_name((struct pci_dev *) dev), pin);
+ else
+ pr_info("pci %s: using irq %d for pin %u\n",
+ pci_name((struct pci_dev *) dev), irq, pin);
+
+ return irq;
+}
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ if (ath79_pci_plat_dev_init)
+ return ath79_pci_plat_dev_init(dev);
+
+ return 0;
+}
+
+void __init ath79_pci_set_irq_map(unsigned nr_irqs,
+ const struct ath79_pci_irq *map)
+{
+ ath79_pci_nr_irqs = nr_irqs;
+ ath79_pci_irq_map = map;
+}
+
+void __init ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev))
+{
+ ath79_pci_plat_dev_init = func;
+}
+
+int __init ath79_register_pci(void)
+{
+ if (soc_is_ar71xx())
+ return ar71xx_pcibios_init();
+
+ if (soc_is_ar724x())
+ return ar724x_pcibios_init(ATH79_CPU_IRQ_IP2);
+
+ if (soc_is_ar9342() || soc_is_ar9344()) {
+ u32 bootstrap;
+
+ bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & AR934X_BOOTSTRAP_PCIE_RC)
+ return ar724x_pcibios_init(ATH79_IP2_IRQ(0));
+ }
+
+ return -ENODEV;
+}
diff --git a/arch/mips/ath79/pci.h b/arch/mips/ath79/pci.h
new file mode 100644
index 000000000000..51c6625dcc6d
--- /dev/null
+++ b/arch/mips/ath79/pci.h
@@ -0,0 +1,34 @@
+/*
+ * Atheros AR71XX/AR724X PCI support
+ *
+ * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _ATH79_PCI_H
+#define _ATH79_PCI_H
+
+struct ath79_pci_irq {
+ u8 slot;
+ u8 pin;
+ int irq;
+};
+
+#ifdef CONFIG_PCI
+void ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map);
+void ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev));
+int ath79_register_pci(void);
+#else
+static inline void
+ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map) {}
+static inline void
+ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *)) {}
+static inline int ath79_register_pci(void) { return 0; }
+#endif
+
+#endif /* _ATH79_PCI_H */
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 80a7d4023d7f..60d212ef8629 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -1,10 +1,11 @@
/*
* Atheros AR71XX/AR724X/AR913X specific setup
*
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
- * Parts of this file are based on Atheros' 2.6.15 BSP
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -116,18 +117,6 @@ static void __init ath79_detect_sys_type(void)
rev = id & AR724X_REV_ID_REVISION_MASK;
break;
- case REV_ID_MAJOR_AR9330:
- ath79_soc = ATH79_SOC_AR9330;
- chip = "9330";
- rev = id & AR933X_REV_ID_REVISION_MASK;
- break;
-
- case REV_ID_MAJOR_AR9331:
- ath79_soc = ATH79_SOC_AR9331;
- chip = "9331";
- rev = id & AR933X_REV_ID_REVISION_MASK;
- break;
-
case REV_ID_MAJOR_AR913X:
minor = id & AR913X_REV_ID_MINOR_MASK;
rev = id >> AR913X_REV_ID_REVISION_SHIFT;
@@ -145,6 +134,36 @@ static void __init ath79_detect_sys_type(void)
}
break;
+ case REV_ID_MAJOR_AR9330:
+ ath79_soc = ATH79_SOC_AR9330;
+ chip = "9330";
+ rev = id & AR933X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_AR9331:
+ ath79_soc = ATH79_SOC_AR9331;
+ chip = "9331";
+ rev = id & AR933X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_AR9341:
+ ath79_soc = ATH79_SOC_AR9341;
+ chip = "9341";
+ rev = id & AR934X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_AR9342:
+ ath79_soc = ATH79_SOC_AR9342;
+ chip = "9342";
+ rev = id & AR934X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_AR9344:
+ ath79_soc = ATH79_SOC_AR9344;
+ chip = "9344";
+ rev = id & AR934X_REV_ID_REVISION_MASK;
+ break;
+
default:
panic("ath79: unknown SoC, id:0x%08x", id);
}
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 19780aa91708..95bf4d7bac21 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -90,6 +90,7 @@ static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
char prefix[10];
if (bus->bustype == SSB_BUSTYPE_PCI) {
+ memset(out, 0, sizeof(struct ssb_sprom));
snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
bus->host_pci->bus->number + 1,
PCI_SLOT(bus->host_pci->devfn));
@@ -109,15 +110,9 @@ static int bcm47xx_get_invariants(struct ssb_bus *bus,
/* Fill boardinfo structure */
memset(&(iv->boardinfo), 0 , sizeof(struct ssb_boardinfo));
- if (nvram_getenv("boardvendor", buf, sizeof(buf)) >= 0)
- iv->boardinfo.vendor = (u16)simple_strtoul(buf, NULL, 0);
- else
- iv->boardinfo.vendor = SSB_BOARDVENDOR_BCM;
- if (nvram_getenv("boardtype", buf, sizeof(buf)) >= 0)
- iv->boardinfo.type = (u16)simple_strtoul(buf, NULL, 0);
- if (nvram_getenv("boardrev", buf, sizeof(buf)) >= 0)
- iv->boardinfo.rev = (u16)simple_strtoul(buf, NULL, 0);
+ bcm47xx_fill_ssb_boardinfo(&iv->boardinfo, NULL);
+ memset(&iv->sprom, 0, sizeof(struct ssb_sprom));
bcm47xx_fill_sprom(&iv->sprom, NULL);
if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0)
@@ -166,12 +161,14 @@ static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
switch (bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
+ memset(out, 0, sizeof(struct ssb_sprom));
snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
bus->host_pci->bus->number + 1,
PCI_SLOT(bus->host_pci->devfn));
bcm47xx_fill_sprom(out, prefix);
return 0;
case BCMA_HOSTTYPE_SOC:
+ memset(out, 0, sizeof(struct ssb_sprom));
bcm47xx_fill_sprom_ethernet(out, NULL);
core = bcma_find_core(bus, BCMA_CORE_80211);
if (core) {
@@ -197,6 +194,8 @@ static void __init bcm47xx_register_bcma(void)
err = bcma_host_soc_register(&bcm47xx_bus.bcma);
if (err)
panic("Failed to initialize BCMA bus (err %d)", err);
+
+ bcm47xx_fill_bcma_boardinfo(&bcm47xx_bus.bcma.bus.boardinfo, NULL);
}
#endif
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c
index 5c8dcd2a8a93..d3a889745e20 100644
--- a/arch/mips/bcm47xx/sprom.c
+++ b/arch/mips/bcm47xx/sprom.c
@@ -165,6 +165,8 @@ static void bcm47xx_fill_sprom_r1234589(struct ssb_sprom *sprom,
const char *prefix)
{
nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0);
+ if (!sprom->board_rev)
+ nvram_read_u16(NULL, NULL, "boardrev", &sprom->board_rev, 0);
nvram_read_u16(prefix, NULL, "boardnum", &sprom->board_num, 0);
nvram_read_u8(prefix, NULL, "ledbh0", &sprom->gpio0, 0xff);
nvram_read_u8(prefix, NULL, "ledbh1", &sprom->gpio1, 0xff);
@@ -555,8 +557,6 @@ void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, const char *prefix)
void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix)
{
- memset(sprom, 0, sizeof(struct ssb_sprom));
-
bcm47xx_fill_sprom_ethernet(sprom, prefix);
nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0);
@@ -618,3 +618,27 @@ void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix)
bcm47xx_fill_sprom_r1(sprom, prefix);
}
}
+
+#ifdef CONFIG_BCM47XX_SSB
+void bcm47xx_fill_ssb_boardinfo(struct ssb_boardinfo *boardinfo,
+ const char *prefix)
+{
+ nvram_read_u16(prefix, NULL, "boardvendor", &boardinfo->vendor, 0);
+ if (!boardinfo->vendor)
+ boardinfo->vendor = SSB_BOARDVENDOR_BCM;
+
+ nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0);
+}
+#endif
+
+#ifdef CONFIG_BCM47XX_BCMA
+void bcm47xx_fill_bcma_boardinfo(struct bcma_boardinfo *boardinfo,
+ const char *prefix)
+{
+ nvram_read_u16(prefix, NULL, "boardvendor", &boardinfo->vendor, 0);
+ if (!boardinfo->vendor)
+ boardinfo->vendor = SSB_BOARDVENDOR_BCM;
+
+ nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0);
+}
+#endif
diff --git a/arch/mips/bcm63xx/boards/Makefile b/arch/mips/bcm63xx/boards/Makefile
index 9f64fb414077..af07c1aa202f 100644
--- a/arch/mips/bcm63xx/boards/Makefile
+++ b/arch/mips/bcm63xx/boards/Makefile
@@ -1,3 +1 @@
obj-$(CONFIG_BOARD_BCM963XX) += board_bcm963xx.o
-
-ccflags-y := -Werror
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index d3a9f012aa0a..260dc247c052 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/console.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/serial.h>
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 97e7ce9b50ed..4b93048044eb 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -257,8 +257,6 @@ DEFINE_PER_CPU(int, cpu_state);
extern void fixup_irqs(void);
-static DEFINE_SPINLOCK(smp_reserve_lock);
-
static int octeon_cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
@@ -266,8 +264,6 @@ static int octeon_cpu_disable(void)
if (cpu == 0)
return -EBUSY;
- spin_lock(&smp_reserve_lock);
-
set_cpu_online(cpu, false);
cpu_clear(cpu, cpu_callin_map);
local_irq_disable();
@@ -277,8 +273,6 @@ static int octeon_cpu_disable(void)
flush_cache_all();
local_flush_tlb_all();
- spin_unlock(&smp_reserve_lock);
-
return 0;
}
diff --git a/arch/mips/fw/arc/Makefile b/arch/mips/fw/arc/Makefile
index 5314b37aff2c..4f349ec1ea2d 100644
--- a/arch/mips/fw/arc/Makefile
+++ b/arch/mips/fw/arc/Makefile
@@ -8,5 +8,3 @@ lib-y += cmdline.o env.o file.o identify.o init.o \
lib-$(CONFIG_ARC_MEMORY) += memory.o
lib-$(CONFIG_ARC_CONSOLE) += arc_con.o
lib-$(CONFIG_ARC_PROMLIB) += promlib.o
-
-ccflags-y := -Werror
diff --git a/arch/mips/include/asm/clkdev.h b/arch/mips/include/asm/clkdev.h
new file mode 100644
index 000000000000..262475414e5f
--- /dev/null
+++ b/arch/mips/include/asm/clkdev.h
@@ -0,0 +1,25 @@
+/*
+ * based on arch/arm/include/asm/clkdev.h
+ *
+ * Copyright (C) 2008 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Helper for the clk API to assist looking up a struct clk.
+ */
+#ifndef __ASM_CLKDEV_H
+#define __ASM_CLKDEV_H
+
+#include <linux/slab.h>
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do { } while (0)
+
+static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
+{
+ return kzalloc(size, GFP_KERNEL);
+}
+
+#endif
diff --git a/arch/mips/include/asm/kvm_para.h b/arch/mips/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/mips/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index 2f0becb4ec8f..1caa78ad06d5 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -1,10 +1,11 @@
/*
* Atheros AR71XX/AR724X/AR913X SoC register definitions
*
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
* Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
- * Parts of this file are based on Atheros' 2.6.15 BSP
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -60,6 +61,9 @@
#define AR933X_EHCI_BASE 0x1b000000
#define AR933X_EHCI_SIZE 0x1000
+#define AR934X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+#define AR934X_WMAC_SIZE 0x20000
+
/*
* DDR_CTRL block
*/
@@ -91,6 +95,12 @@
#define AR933X_DDR_REG_FLUSH_USB 0x84
#define AR933X_DDR_REG_FLUSH_WMAC 0x88
+#define AR934X_DDR_REG_FLUSH_GE0 0x9c
+#define AR934X_DDR_REG_FLUSH_GE1 0xa0
+#define AR934X_DDR_REG_FLUSH_USB 0xa4
+#define AR934X_DDR_REG_FLUSH_PCIE 0xa8
+#define AR934X_DDR_REG_FLUSH_WMAC 0xac
+
/*
* PLL block
*/
@@ -150,6 +160,41 @@
#define AR933X_PLL_CLOCK_CTRL_AHB_DIV_SHIFT 15
#define AR933X_PLL_CLOCK_CTRL_AHB_DIV_MASK 0x7
+#define AR934X_PLL_CPU_CONFIG_REG 0x00
+#define AR934X_PLL_DDR_CONFIG_REG 0x04
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_REG 0x08
+
+#define AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
+#define AR934X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
+#define AR934X_PLL_CPU_CONFIG_NINT_SHIFT 6
+#define AR934X_PLL_CPU_CONFIG_NINT_MASK 0x3f
+#define AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT 12
+#define AR934X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f
+#define AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19
+#define AR934X_PLL_CPU_CONFIG_OUTDIV_MASK 0x3
+
+#define AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT 0
+#define AR934X_PLL_DDR_CONFIG_NFRAC_MASK 0x3ff
+#define AR934X_PLL_DDR_CONFIG_NINT_SHIFT 10
+#define AR934X_PLL_DDR_CONFIG_NINT_MASK 0x3f
+#define AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT 16
+#define AR934X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f
+#define AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23
+#define AR934X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7
+
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_PLL_BYPASS BIT(2)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_PLL_BYPASS BIT(3)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_PLL_BYPASS BIT(4)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT 5
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK 0x1f
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_SHIFT 10
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_MASK 0x1f
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_SHIFT 15
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_MASK 0x1f
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPUCLK_FROM_CPUPLL BIT(20)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+
/*
* USB_CONFIG block
*/
@@ -185,6 +230,10 @@
#define AR933X_RESET_REG_RESET_MODULE 0x1c
#define AR933X_RESET_REG_BOOTSTRAP 0xac
+#define AR934X_RESET_REG_RESET_MODULE 0x1c
+#define AR934X_RESET_REG_BOOTSTRAP 0xb0
+#define AR934X_RESET_REG_PCIE_WMAC_INT_STATUS 0xac
+
#define MISC_INT_ETHSW BIT(12)
#define MISC_INT_TIMER4 BIT(10)
#define MISC_INT_TIMER3 BIT(9)
@@ -241,6 +290,40 @@
#define AR933X_BOOTSTRAP_REF_CLK_40 BIT(0)
+#define AR934X_BOOTSTRAP_SW_OPTION8 BIT(23)
+#define AR934X_BOOTSTRAP_SW_OPTION7 BIT(22)
+#define AR934X_BOOTSTRAP_SW_OPTION6 BIT(21)
+#define AR934X_BOOTSTRAP_SW_OPTION5 BIT(20)
+#define AR934X_BOOTSTRAP_SW_OPTION4 BIT(19)
+#define AR934X_BOOTSTRAP_SW_OPTION3 BIT(18)
+#define AR934X_BOOTSTRAP_SW_OPTION2 BIT(17)
+#define AR934X_BOOTSTRAP_SW_OPTION1 BIT(16)
+#define AR934X_BOOTSTRAP_USB_MODE_DEVICE BIT(7)
+#define AR934X_BOOTSTRAP_PCIE_RC BIT(6)
+#define AR934X_BOOTSTRAP_EJTAG_MODE BIT(5)
+#define AR934X_BOOTSTRAP_REF_CLK_40 BIT(4)
+#define AR934X_BOOTSTRAP_BOOT_FROM_SPI BIT(2)
+#define AR934X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
+#define AR934X_BOOTSTRAP_DDR1 BIT(0)
+
+#define AR934X_PCIE_WMAC_INT_WMAC_MISC BIT(0)
+#define AR934X_PCIE_WMAC_INT_WMAC_TX BIT(1)
+#define AR934X_PCIE_WMAC_INT_WMAC_RXLP BIT(2)
+#define AR934X_PCIE_WMAC_INT_WMAC_RXHP BIT(3)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC BIT(4)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC0 BIT(5)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC1 BIT(6)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC2 BIT(7)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC3 BIT(8)
+#define AR934X_PCIE_WMAC_INT_WMAC_ALL \
+ (AR934X_PCIE_WMAC_INT_WMAC_MISC | AR934X_PCIE_WMAC_INT_WMAC_TX | \
+ AR934X_PCIE_WMAC_INT_WMAC_RXLP | AR934X_PCIE_WMAC_INT_WMAC_RXHP)
+
+#define AR934X_PCIE_WMAC_INT_PCIE_ALL \
+ (AR934X_PCIE_WMAC_INT_PCIE_RC | AR934X_PCIE_WMAC_INT_PCIE_RC0 | \
+ AR934X_PCIE_WMAC_INT_PCIE_RC1 | AR934X_PCIE_WMAC_INT_PCIE_RC2 | \
+ AR934X_PCIE_WMAC_INT_PCIE_RC3)
+
#define REV_ID_MAJOR_MASK 0xfff0
#define REV_ID_MAJOR_AR71XX 0x00a0
#define REV_ID_MAJOR_AR913X 0x00b0
@@ -249,6 +332,9 @@
#define REV_ID_MAJOR_AR7242 0x1100
#define REV_ID_MAJOR_AR9330 0x0110
#define REV_ID_MAJOR_AR9331 0x1110
+#define REV_ID_MAJOR_AR9341 0x0120
+#define REV_ID_MAJOR_AR9342 0x1120
+#define REV_ID_MAJOR_AR9344 0x2120
#define AR71XX_REV_ID_MINOR_MASK 0x3
#define AR71XX_REV_ID_MINOR_AR7130 0x0
@@ -267,6 +353,8 @@
#define AR724X_REV_ID_REVISION_MASK 0x3
+#define AR934X_REV_ID_REVISION_MASK 0xf
+
/*
* SPI block
*/
@@ -308,5 +396,6 @@
#define AR724X_GPIO_COUNT 18
#define AR913X_GPIO_COUNT 22
#define AR933X_GPIO_COUNT 30
+#define AR934X_GPIO_COUNT 23
#endif /* __ASM_MACH_AR71XX_REGS_H */
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
index 6d0c6c9d5622..4f248c3d7b23 100644
--- a/arch/mips/include/asm/mach-ath79/ath79.h
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -29,6 +29,9 @@ enum ath79_soc_type {
ATH79_SOC_AR9132,
ATH79_SOC_AR9330,
ATH79_SOC_AR9331,
+ ATH79_SOC_AR9341,
+ ATH79_SOC_AR9342,
+ ATH79_SOC_AR9344,
};
extern enum ath79_soc_type ath79_soc;
@@ -75,6 +78,26 @@ static inline int soc_is_ar933x(void)
ath79_soc == ATH79_SOC_AR9331);
}
+static inline int soc_is_ar9341(void)
+{
+ return (ath79_soc == ATH79_SOC_AR9341);
+}
+
+static inline int soc_is_ar9342(void)
+{
+ return (ath79_soc == ATH79_SOC_AR9342);
+}
+
+static inline int soc_is_ar9344(void)
+{
+ return (ath79_soc == ATH79_SOC_AR9344);
+}
+
+static inline int soc_is_ar934x(void)
+{
+ return soc_is_ar9341() || soc_is_ar9342() || soc_is_ar9344();
+}
+
extern void __iomem *ath79_ddr_base;
extern void __iomem *ath79_pll_base;
extern void __iomem *ath79_reset_base;
diff --git a/arch/mips/include/asm/mach-ath79/irq.h b/arch/mips/include/asm/mach-ath79/irq.h
index 519958fe4e3c..0968f69e2018 100644
--- a/arch/mips/include/asm/mach-ath79/irq.h
+++ b/arch/mips/include/asm/mach-ath79/irq.h
@@ -10,11 +10,19 @@
#define __ASM_MACH_ATH79_IRQ_H
#define MIPS_CPU_IRQ_BASE 0
-#define NR_IRQS 40
+#define NR_IRQS 48
#define ATH79_MISC_IRQ_BASE 8
#define ATH79_MISC_IRQ_COUNT 32
+#define ATH79_PCI_IRQ_BASE (ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT)
+#define ATH79_PCI_IRQ_COUNT 6
+#define ATH79_PCI_IRQ(_x) (ATH79_PCI_IRQ_BASE + (_x))
+
+#define ATH79_IP2_IRQ_BASE (ATH79_PCI_IRQ_BASE + ATH79_PCI_IRQ_COUNT)
+#define ATH79_IP2_IRQ_COUNT 2
+#define ATH79_IP2_IRQ(_x) (ATH79_IP2_IRQ_BASE + (_x))
+
#define ATH79_CPU_IRQ_IP2 (MIPS_CPU_IRQ_BASE + 2)
#define ATH79_CPU_IRQ_USB (MIPS_CPU_IRQ_BASE + 3)
#define ATH79_CPU_IRQ_GE0 (MIPS_CPU_IRQ_BASE + 4)
diff --git a/arch/mips/include/asm/mach-ath79/pci-ath724x.h b/arch/mips/include/asm/mach-ath79/pci-ath724x.h
deleted file mode 100644
index 454885fa30c3..000000000000
--- a/arch/mips/include/asm/mach-ath79/pci-ath724x.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Atheros 724x PCI support
- *
- * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef __ASM_MACH_ATH79_PCI_ATH724X_H
-#define __ASM_MACH_ATH79_PCI_ATH724X_H
-
-struct ath724x_pci_data {
- int irq;
- void *pdata;
-};
-
-void ath724x_pci_add_data(struct ath724x_pci_data *data, int size);
-
-#endif /* __ASM_MACH_ATH79_PCI_ATH724X_H */
diff --git a/arch/mips/include/asm/mach-ath79/pci.h b/arch/mips/include/asm/mach-ath79/pci.h
new file mode 100644
index 000000000000..7868f7fa028f
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/pci.h
@@ -0,0 +1,28 @@
+/*
+ * Atheros AR71XX/AR724X PCI support
+ *
+ * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef __ASM_MACH_ATH79_PCI_H
+#define __ASM_MACH_ATH79_PCI_H
+
+#if defined(CONFIG_PCI) && defined(CONFIG_SOC_AR71XX)
+int ar71xx_pcibios_init(void);
+#else
+static inline int ar71xx_pcibios_init(void) { return 0; }
+#endif
+
+#if defined(CONFIG_PCI_AR724X)
+int ar724x_pcibios_init(int irq);
+#else
+static inline int ar724x_pcibios_init(int irq) { return 0; }
+#endif
+
+#endif /* __ASM_MACH_ATH79_PCI_H */
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
index 5ecaf47b34d2..26fdaf40b930 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
@@ -47,4 +47,13 @@ extern enum bcm47xx_bus_type bcm47xx_bus_type;
void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix);
void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, const char *prefix);
+#ifdef CONFIG_BCM47XX_SSB
+void bcm47xx_fill_ssb_boardinfo(struct ssb_boardinfo *boardinfo,
+ const char *prefix);
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+void bcm47xx_fill_bcma_boardinfo(struct bcma_boardinfo *boardinfo,
+ const char *prefix);
+#endif
+
#endif /* __ASM_BCM47XX_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
index 3d5de96d4036..1d7dd96aa460 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
@@ -2,6 +2,7 @@
#define BCM63XX_GPIO_H
#include <linux/init.h>
+#include <bcm63xx_cpu.h>
int __init bcm63xx_gpio_init(void);
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
new file mode 100644
index 000000000000..318f982f04ff
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
@@ -0,0 +1,23 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#ifndef _FALCON_IRQ__
+#define _FALCON_IRQ__
+
+#define INT_NUM_IRQ0 8
+#define INT_NUM_IM0_IRL0 (INT_NUM_IRQ0 + 0)
+#define INT_NUM_IM1_IRL0 (INT_NUM_IM0_IRL0 + 32)
+#define INT_NUM_IM2_IRL0 (INT_NUM_IM1_IRL0 + 32)
+#define INT_NUM_IM3_IRL0 (INT_NUM_IM2_IRL0 + 32)
+#define INT_NUM_IM4_IRL0 (INT_NUM_IM3_IRL0 + 32)
+#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32)
+#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
+
+#define MIPS_CPU_TIMER_IRQ 7
+
+#endif /* _FALCON_IRQ__ */
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/irq.h b/arch/mips/include/asm/mach-lantiq/falcon/irq.h
new file mode 100644
index 000000000000..2caccd9f9dbc
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/falcon/irq.h
@@ -0,0 +1,18 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#ifndef __FALCON_IRQ_H
+#define __FALCON_IRQ_H
+
+#include <falcon_irq.h>
+
+#define NR_IRQS 328
+
+#include_next <irq.h>
+
+#endif
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
new file mode 100644
index 000000000000..b385252584ee
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
@@ -0,0 +1,67 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _LTQ_FALCON_H__
+#define _LTQ_FALCON_H__
+
+#ifdef CONFIG_SOC_FALCON
+
+#include <linux/pinctrl/pinctrl.h>
+#include <lantiq.h>
+
+/* Chip IDs */
+#define SOC_ID_FALCON 0x01B8
+
+/* SoC Types */
+#define SOC_TYPE_FALCON 0x01
+
+/*
+ * during early_printk no ioremap possible at this early stage
+ * lets use KSEG1 instead
+ */
+#define LTQ_ASC0_BASE_ADDR 0x1E100C00
+#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC0_BASE_ADDR)
+
+/* WDT */
+#define LTQ_RST_CAUSE_WDTRST 0x0002
+
+/* CHIP ID */
+#define LTQ_STATUS_BASE_ADDR 0x1E802000
+
+#define FALCON_CHIPID ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x0c))
+#define FALCON_CHIPTYPE ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x38))
+#define FALCON_CHIPCONF ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x40))
+
+/* SYSCTL - start/stop/restart/configure/... different parts of the Soc */
+#define SYSCTL_SYS1 0
+#define SYSCTL_SYSETH 1
+#define SYSCTL_SYSGPE 2
+
+/* BOOT_SEL - find what boot media we have */
+#define BS_FLASH 0x1
+#define BS_SPI 0x4
+
+/* global register ranges */
+extern __iomem void *ltq_ebu_membase;
+extern __iomem void *ltq_sys1_membase;
+#define ltq_ebu_w32(x, y) ltq_w32((x), ltq_ebu_membase + (y))
+#define ltq_ebu_r32(x) ltq_r32(ltq_ebu_membase + (x))
+
+#define ltq_sys1_w32(x, y) ltq_w32((x), ltq_sys1_membase + (y))
+#define ltq_sys1_r32(x) ltq_r32(ltq_sys1_membase + (x))
+#define ltq_sys1_w32_mask(clear, set, reg) \
+ ltq_sys1_w32((ltq_sys1_r32(reg) & ~(clear)) | (set), reg)
+
+/*
+ * to keep the irq code generic we need to define this to 0 as falcon
+ * has no EIU/EBU
+ */
+#define LTQ_EBU_PCC_ISTAT 0
+
+#endif /* CONFIG_SOC_FALCON */
+#endif /* _LTQ_XWAY_H__ */
diff --git a/arch/mips/include/asm/mach-lantiq/gpio.h b/arch/mips/include/asm/mach-lantiq/gpio.h
new file mode 100644
index 000000000000..f79505b43609
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/gpio.h
@@ -0,0 +1,16 @@
+#ifndef __ASM_MIPS_MACH_LANTIQ_GPIO_H
+#define __ASM_MIPS_MACH_LANTIQ_GPIO_H
+
+static inline int gpio_to_irq(unsigned int gpio)
+{
+ return -1;
+}
+
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+
+#define gpio_cansleep __gpio_cansleep
+
+#include <asm-generic/gpio.h>
+
+#endif
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h
index ce2f02929d22..5e8a6e965756 100644
--- a/arch/mips/include/asm/mach-lantiq/lantiq.h
+++ b/arch/mips/include/asm/mach-lantiq/lantiq.h
@@ -9,6 +9,8 @@
#define _LANTIQ_H__
#include <linux/irq.h>
+#include <linux/device.h>
+#include <linux/clk.h>
/* generic reg access functions */
#define ltq_r32(reg) __raw_readl(reg)
@@ -21,25 +23,9 @@
/* register access macros for EBU and CGU */
#define ltq_ebu_w32(x, y) ltq_w32((x), ltq_ebu_membase + (y))
#define ltq_ebu_r32(x) ltq_r32(ltq_ebu_membase + (x))
-#define ltq_cgu_w32(x, y) ltq_w32((x), ltq_cgu_membase + (y))
-#define ltq_cgu_r32(x) ltq_r32(ltq_cgu_membase + (x))
-
+#define ltq_ebu_w32_mask(x, y, z) \
+ ltq_w32_mask(x, y, ltq_ebu_membase + (z))
extern __iomem void *ltq_ebu_membase;
-extern __iomem void *ltq_cgu_membase;
-
-extern unsigned int ltq_get_cpu_ver(void);
-extern unsigned int ltq_get_soc_type(void);
-
-/* clock speeds */
-#define CLOCK_60M 60000000
-#define CLOCK_83M 83333333
-#define CLOCK_111M 111111111
-#define CLOCK_133M 133333333
-#define CLOCK_167M 166666667
-#define CLOCK_200M 200000000
-#define CLOCK_266M 266666666
-#define CLOCK_333M 333333333
-#define CLOCK_400M 400000000
/* spinlock all ebu i/o */
extern spinlock_t ebu_lock;
@@ -49,15 +35,21 @@ extern void ltq_disable_irq(struct irq_data *data);
extern void ltq_mask_and_ack_irq(struct irq_data *data);
extern void ltq_enable_irq(struct irq_data *data);
+/* clock handling */
+extern int clk_activate(struct clk *clk);
+extern void clk_deactivate(struct clk *clk);
+extern struct clk *clk_get_cpu(void);
+extern struct clk *clk_get_fpi(void);
+extern struct clk *clk_get_io(void);
+
+/* find out what bootsource we have */
+extern unsigned char ltq_boot_select(void);
/* find out what caused the last cpu reset */
extern int ltq_reset_cause(void);
-#define LTQ_RST_CAUSE_WDTRST 0x20
#define IOPORT_RESOURCE_START 0x10000000
#define IOPORT_RESOURCE_END 0xffffffff
#define IOMEM_RESOURCE_START 0x10000000
#define IOMEM_RESOURCE_END 0xffffffff
-#define LTQ_FLASH_START 0x10000000
-#define LTQ_FLASH_MAX 0x04000000
#endif
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
index a305f1d0259e..e23bf7c9a2d0 100644
--- a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
+++ b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
@@ -9,41 +9,8 @@
#ifndef _LANTIQ_PLATFORM_H__
#define _LANTIQ_PLATFORM_H__
-#include <linux/mtd/partitions.h>
#include <linux/socket.h>
-/* struct used to pass info to the pci core */
-enum {
- PCI_CLOCK_INT = 0,
- PCI_CLOCK_EXT
-};
-
-#define PCI_EXIN0 0x0001
-#define PCI_EXIN1 0x0002
-#define PCI_EXIN2 0x0004
-#define PCI_EXIN3 0x0008
-#define PCI_EXIN4 0x0010
-#define PCI_EXIN5 0x0020
-#define PCI_EXIN_MAX 6
-
-#define PCI_GNT1 0x0040
-#define PCI_GNT2 0x0080
-#define PCI_GNT3 0x0100
-#define PCI_GNT4 0x0200
-
-#define PCI_REQ1 0x0400
-#define PCI_REQ2 0x0800
-#define PCI_REQ3 0x1000
-#define PCI_REQ4 0x2000
-#define PCI_REQ_SHIFT 10
-#define PCI_REQ_MASK 0xf
-
-struct ltq_pci_data {
- int clock;
- int gpio;
- int irq[16];
-};
-
/* struct used to pass info to network drivers */
struct ltq_eth_data {
struct sockaddr mac;
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
index b4465a888e20..aa0b3b866f84 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
@@ -17,50 +17,8 @@
#define INT_NUM_IM4_IRL0 (INT_NUM_IRQ0 + 128)
#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
-#define LTQ_ASC_TIR(x) (INT_NUM_IM3_IRL0 + (x * 8))
-#define LTQ_ASC_RIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 1)
-#define LTQ_ASC_EIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 2)
-
-#define LTQ_ASC_ASE_TIR INT_NUM_IM2_IRL0
-#define LTQ_ASC_ASE_RIR (INT_NUM_IM2_IRL0 + 2)
-#define LTQ_ASC_ASE_EIR (INT_NUM_IM2_IRL0 + 3)
-
-#define LTQ_SSC_TIR (INT_NUM_IM0_IRL0 + 15)
-#define LTQ_SSC_RIR (INT_NUM_IM0_IRL0 + 14)
-#define LTQ_SSC_EIR (INT_NUM_IM0_IRL0 + 16)
-
-#define LTQ_MEI_DYING_GASP_INT (INT_NUM_IM1_IRL0 + 21)
-#define LTQ_MEI_INT (INT_NUM_IM1_IRL0 + 23)
-
-#define LTQ_TIMER6_INT (INT_NUM_IM1_IRL0 + 23)
-#define LTQ_USB_INT (INT_NUM_IM1_IRL0 + 22)
-#define LTQ_USB_OC_INT (INT_NUM_IM4_IRL0 + 23)
-
-#define MIPS_CPU_TIMER_IRQ 7
-
#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0)
-#define LTQ_DMA_CH1_INT (INT_NUM_IM2_IRL0 + 1)
-#define LTQ_DMA_CH2_INT (INT_NUM_IM2_IRL0 + 2)
-#define LTQ_DMA_CH3_INT (INT_NUM_IM2_IRL0 + 3)
-#define LTQ_DMA_CH4_INT (INT_NUM_IM2_IRL0 + 4)
-#define LTQ_DMA_CH5_INT (INT_NUM_IM2_IRL0 + 5)
-#define LTQ_DMA_CH6_INT (INT_NUM_IM2_IRL0 + 6)
-#define LTQ_DMA_CH7_INT (INT_NUM_IM2_IRL0 + 7)
-#define LTQ_DMA_CH8_INT (INT_NUM_IM2_IRL0 + 8)
-#define LTQ_DMA_CH9_INT (INT_NUM_IM2_IRL0 + 9)
-#define LTQ_DMA_CH10_INT (INT_NUM_IM2_IRL0 + 10)
-#define LTQ_DMA_CH11_INT (INT_NUM_IM2_IRL0 + 11)
-#define LTQ_DMA_CH12_INT (INT_NUM_IM2_IRL0 + 25)
-#define LTQ_DMA_CH13_INT (INT_NUM_IM2_IRL0 + 26)
-#define LTQ_DMA_CH14_INT (INT_NUM_IM2_IRL0 + 27)
-#define LTQ_DMA_CH15_INT (INT_NUM_IM2_IRL0 + 28)
-#define LTQ_DMA_CH16_INT (INT_NUM_IM2_IRL0 + 29)
-#define LTQ_DMA_CH17_INT (INT_NUM_IM2_IRL0 + 30)
-#define LTQ_DMA_CH18_INT (INT_NUM_IM2_IRL0 + 16)
-#define LTQ_DMA_CH19_INT (INT_NUM_IM2_IRL0 + 21)
-
-#define LTQ_PPE_MBOX_INT (INT_NUM_IM2_IRL0 + 24)
-#define INT_NUM_IM4_IRL14 (INT_NUM_IM4_IRL0 + 14)
+#define MIPS_CPU_TIMER_IRQ 7
#endif
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
index 8a3c6be669d2..6a2df709c576 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
@@ -17,38 +17,56 @@
#define SOC_ID_DANUBE1 0x129
#define SOC_ID_DANUBE2 0x12B
#define SOC_ID_TWINPASS 0x12D
-#define SOC_ID_AMAZON_SE 0x152
+#define SOC_ID_AMAZON_SE_1 0x152 /* 50601 */
+#define SOC_ID_AMAZON_SE_2 0x153 /* 50600 */
#define SOC_ID_ARX188 0x16C
-#define SOC_ID_ARX168 0x16D
+#define SOC_ID_ARX168_1 0x16D
+#define SOC_ID_ARX168_2 0x16E
#define SOC_ID_ARX182 0x16F
-
-/* SoC Types */
+#define SOC_ID_GRX188 0x170
+#define SOC_ID_GRX168 0x171
+
+#define SOC_ID_VRX288 0x1C0 /* v1.1 */
+#define SOC_ID_VRX282 0x1C1 /* v1.1 */
+#define SOC_ID_VRX268 0x1C2 /* v1.1 */
+#define SOC_ID_GRX268 0x1C8 /* v1.1 */
+#define SOC_ID_GRX288 0x1C9 /* v1.1 */
+#define SOC_ID_VRX288_2 0x00B /* v1.2 */
+#define SOC_ID_VRX268_2 0x00C /* v1.2 */
+#define SOC_ID_GRX288_2 0x00D /* v1.2 */
+#define SOC_ID_GRX282_2 0x00E /* v1.2 */
+
+ /* SoC Types */
#define SOC_TYPE_DANUBE 0x01
#define SOC_TYPE_TWINPASS 0x02
#define SOC_TYPE_AR9 0x03
-#define SOC_TYPE_VR9 0x04
-#define SOC_TYPE_AMAZON_SE 0x05
+#define SOC_TYPE_VR9 0x04 /* v1.1 */
+#define SOC_TYPE_VR9_2 0x05 /* v1.2 */
+#define SOC_TYPE_AMAZON_SE 0x06
+
+/* BOOT_SEL - find what boot media we have */
+#define BS_EXT_ROM 0x0
+#define BS_FLASH 0x1
+#define BS_MII0 0x2
+#define BS_PCI 0x3
+#define BS_UART1 0x4
+#define BS_SPI 0x5
+#define BS_NAND 0x6
+#define BS_RMII0 0x7
+
+/* helpers used to access the cgu */
+#define ltq_cgu_w32(x, y) ltq_w32((x), ltq_cgu_membase + (y))
+#define ltq_cgu_r32(x) ltq_r32(ltq_cgu_membase + (x))
+extern __iomem void *ltq_cgu_membase;
-/* ASC0/1 - serial port */
-#define LTQ_ASC0_BASE_ADDR 0x1E100400
+/*
+ * during early_printk no ioremap is possible
+ * lets use KSEG1 instead
+ */
#define LTQ_ASC1_BASE_ADDR 0x1E100C00
-#define LTQ_ASC_SIZE 0x400
-
-/* RCU - reset control unit */
-#define LTQ_RCU_BASE_ADDR 0x1F203000
-#define LTQ_RCU_SIZE 0x1000
-
-/* GPTU - general purpose timer unit */
-#define LTQ_GPTU_BASE_ADDR 0x18000300
-#define LTQ_GPTU_SIZE 0x100
+#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC1_BASE_ADDR)
/* EBU - external bus unit */
-#define LTQ_EBU_GPIO_START 0x14000000
-#define LTQ_EBU_GPIO_SIZE 0x1000
-
-#define LTQ_EBU_BASE_ADDR 0x1E105300
-#define LTQ_EBU_SIZE 0x100
-
#define LTQ_EBU_BUSCON0 0x0060
#define LTQ_EBU_PCC_CON 0x0090
#define LTQ_EBU_PCC_IEN 0x00A4
@@ -57,85 +75,17 @@
#define LTQ_EBU_ADDRSEL1 0x0024
#define EBU_WRDIS 0x80000000
-/* CGU - clock generation unit */
-#define LTQ_CGU_BASE_ADDR 0x1F103000
-#define LTQ_CGU_SIZE 0x1000
-
-/* ICU - interrupt control unit */
-#define LTQ_ICU_BASE_ADDR 0x1F880200
-#define LTQ_ICU_SIZE 0x100
-
-/* EIU - external interrupt unit */
-#define LTQ_EIU_BASE_ADDR 0x1F101000
-#define LTQ_EIU_SIZE 0x1000
-
-/* PMU - power management unit */
-#define LTQ_PMU_BASE_ADDR 0x1F102000
-#define LTQ_PMU_SIZE 0x1000
-
-#define PMU_DMA 0x0020
-#define PMU_USB 0x8041
-#define PMU_LED 0x0800
-#define PMU_GPT 0x1000
-#define PMU_PPE 0x2000
-#define PMU_FPI 0x4000
-#define PMU_SWITCH 0x10000000
-
-/* ETOP - ethernet */
-#define LTQ_ETOP_BASE_ADDR 0x1E180000
-#define LTQ_ETOP_SIZE 0x40000
-
-/* DMA */
-#define LTQ_DMA_BASE_ADDR 0x1E104100
-#define LTQ_DMA_SIZE 0x800
-
-/* PCI */
-#define PCI_CR_BASE_ADDR 0x1E105400
-#define PCI_CR_SIZE 0x400
-
/* WDT */
-#define LTQ_WDT_BASE_ADDR 0x1F8803F0
-#define LTQ_WDT_SIZE 0x10
-
-/* STP - serial to parallel conversion unit */
-#define LTQ_STP_BASE_ADDR 0x1E100BB0
-#define LTQ_STP_SIZE 0x40
-
-/* GPIO */
-#define LTQ_GPIO0_BASE_ADDR 0x1E100B10
-#define LTQ_GPIO1_BASE_ADDR 0x1E100B40
-#define LTQ_GPIO2_BASE_ADDR 0x1E100B70
-#define LTQ_GPIO_SIZE 0x30
-
-/* SSC */
-#define LTQ_SSC_BASE_ADDR 0x1e100800
-#define LTQ_SSC_SIZE 0x100
-
-/* MEI - dsl core */
-#define LTQ_MEI_BASE_ADDR 0x1E116000
-
-/* DEU - data encryption unit */
-#define LTQ_DEU_BASE_ADDR 0x1E103100
+#define LTQ_RST_CAUSE_WDTRST 0x20
/* MPS - multi processor unit (voice) */
#define LTQ_MPS_BASE_ADDR (KSEG1 + 0x1F107000)
#define LTQ_MPS_CHIPID ((u32 *)(LTQ_MPS_BASE_ADDR + 0x0344))
/* request a non-gpio and set the PIO config */
-extern int ltq_gpio_request(unsigned int pin, unsigned int alt0,
- unsigned int alt1, unsigned int dir, const char *name);
+#define PMU_PPE BIT(13)
extern void ltq_pmu_enable(unsigned int module);
extern void ltq_pmu_disable(unsigned int module);
-static inline int ltq_is_ar9(void)
-{
- return (ltq_get_soc_type() == SOC_TYPE_AR9);
-}
-
-static inline int ltq_is_vr9(void)
-{
- return (ltq_get_soc_type() == SOC_TYPE_VR9);
-}
-
#endif /* CONFIG_SOC_TYPE_XWAY */
#endif /* _LTQ_XWAY_H__ */
diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h
index 46c08563e532..6e23ceb0ba8c 100644
--- a/arch/mips/include/asm/mips-boards/generic.h
+++ b/arch/mips/include/asm/mips-boards/generic.h
@@ -93,8 +93,4 @@ extern void mips_pcibios_init(void);
#define mips_pcibios_init() do { } while (0)
#endif
-#ifdef CONFIG_KGDB
-extern void kgdb_config(void);
-#endif
-
#endif /* __ASM_MIPS_BOARDS_GENERIC_H */
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 7467d1d933d5..530008048c62 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -2,6 +2,7 @@
#define _ASM_MODULE_H
#include <linux/list.h>
+#include <linux/elf.h>
#include <asm/uaccess.h>
struct mod_arch_specific {
diff --git a/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h b/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h
deleted file mode 100644
index d553f8e88df6..000000000000
--- a/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h
+++ /dev/null
@@ -1,1365 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_PCIEEP_DEFS_H__
-#define __CVMX_PCIEEP_DEFS_H__
-
-#define CVMX_PCIEEP_CFG000 \
- (0x0000000000000000ull)
-#define CVMX_PCIEEP_CFG001 \
- (0x0000000000000004ull)
-#define CVMX_PCIEEP_CFG002 \
- (0x0000000000000008ull)
-#define CVMX_PCIEEP_CFG003 \
- (0x000000000000000Cull)
-#define CVMX_PCIEEP_CFG004 \
- (0x0000000000000010ull)
-#define CVMX_PCIEEP_CFG004_MASK \
- (0x0000000080000010ull)
-#define CVMX_PCIEEP_CFG005 \
- (0x0000000000000014ull)
-#define CVMX_PCIEEP_CFG005_MASK \
- (0x0000000080000014ull)
-#define CVMX_PCIEEP_CFG006 \
- (0x0000000000000018ull)
-#define CVMX_PCIEEP_CFG006_MASK \
- (0x0000000080000018ull)
-#define CVMX_PCIEEP_CFG007 \
- (0x000000000000001Cull)
-#define CVMX_PCIEEP_CFG007_MASK \
- (0x000000008000001Cull)
-#define CVMX_PCIEEP_CFG008 \
- (0x0000000000000020ull)
-#define CVMX_PCIEEP_CFG008_MASK \
- (0x0000000080000020ull)
-#define CVMX_PCIEEP_CFG009 \
- (0x0000000000000024ull)
-#define CVMX_PCIEEP_CFG009_MASK \
- (0x0000000080000024ull)
-#define CVMX_PCIEEP_CFG010 \
- (0x0000000000000028ull)
-#define CVMX_PCIEEP_CFG011 \
- (0x000000000000002Cull)
-#define CVMX_PCIEEP_CFG012 \
- (0x0000000000000030ull)
-#define CVMX_PCIEEP_CFG012_MASK \
- (0x0000000080000030ull)
-#define CVMX_PCIEEP_CFG013 \
- (0x0000000000000034ull)
-#define CVMX_PCIEEP_CFG015 \
- (0x000000000000003Cull)
-#define CVMX_PCIEEP_CFG016 \
- (0x0000000000000040ull)
-#define CVMX_PCIEEP_CFG017 \
- (0x0000000000000044ull)
-#define CVMX_PCIEEP_CFG020 \
- (0x0000000000000050ull)
-#define CVMX_PCIEEP_CFG021 \
- (0x0000000000000054ull)
-#define CVMX_PCIEEP_CFG022 \
- (0x0000000000000058ull)
-#define CVMX_PCIEEP_CFG023 \
- (0x000000000000005Cull)
-#define CVMX_PCIEEP_CFG028 \
- (0x0000000000000070ull)
-#define CVMX_PCIEEP_CFG029 \
- (0x0000000000000074ull)
-#define CVMX_PCIEEP_CFG030 \
- (0x0000000000000078ull)
-#define CVMX_PCIEEP_CFG031 \
- (0x000000000000007Cull)
-#define CVMX_PCIEEP_CFG032 \
- (0x0000000000000080ull)
-#define CVMX_PCIEEP_CFG033 \
- (0x0000000000000084ull)
-#define CVMX_PCIEEP_CFG034 \
- (0x0000000000000088ull)
-#define CVMX_PCIEEP_CFG037 \
- (0x0000000000000094ull)
-#define CVMX_PCIEEP_CFG038 \
- (0x0000000000000098ull)
-#define CVMX_PCIEEP_CFG039 \
- (0x000000000000009Cull)
-#define CVMX_PCIEEP_CFG040 \
- (0x00000000000000A0ull)
-#define CVMX_PCIEEP_CFG041 \
- (0x00000000000000A4ull)
-#define CVMX_PCIEEP_CFG042 \
- (0x00000000000000A8ull)
-#define CVMX_PCIEEP_CFG064 \
- (0x0000000000000100ull)
-#define CVMX_PCIEEP_CFG065 \
- (0x0000000000000104ull)
-#define CVMX_PCIEEP_CFG066 \
- (0x0000000000000108ull)
-#define CVMX_PCIEEP_CFG067 \
- (0x000000000000010Cull)
-#define CVMX_PCIEEP_CFG068 \
- (0x0000000000000110ull)
-#define CVMX_PCIEEP_CFG069 \
- (0x0000000000000114ull)
-#define CVMX_PCIEEP_CFG070 \
- (0x0000000000000118ull)
-#define CVMX_PCIEEP_CFG071 \
- (0x000000000000011Cull)
-#define CVMX_PCIEEP_CFG072 \
- (0x0000000000000120ull)
-#define CVMX_PCIEEP_CFG073 \
- (0x0000000000000124ull)
-#define CVMX_PCIEEP_CFG074 \
- (0x0000000000000128ull)
-#define CVMX_PCIEEP_CFG448 \
- (0x0000000000000700ull)
-#define CVMX_PCIEEP_CFG449 \
- (0x0000000000000704ull)
-#define CVMX_PCIEEP_CFG450 \
- (0x0000000000000708ull)
-#define CVMX_PCIEEP_CFG451 \
- (0x000000000000070Cull)
-#define CVMX_PCIEEP_CFG452 \
- (0x0000000000000710ull)
-#define CVMX_PCIEEP_CFG453 \
- (0x0000000000000714ull)
-#define CVMX_PCIEEP_CFG454 \
- (0x0000000000000718ull)
-#define CVMX_PCIEEP_CFG455 \
- (0x000000000000071Cull)
-#define CVMX_PCIEEP_CFG456 \
- (0x0000000000000720ull)
-#define CVMX_PCIEEP_CFG458 \
- (0x0000000000000728ull)
-#define CVMX_PCIEEP_CFG459 \
- (0x000000000000072Cull)
-#define CVMX_PCIEEP_CFG460 \
- (0x0000000000000730ull)
-#define CVMX_PCIEEP_CFG461 \
- (0x0000000000000734ull)
-#define CVMX_PCIEEP_CFG462 \
- (0x0000000000000738ull)
-#define CVMX_PCIEEP_CFG463 \
- (0x000000000000073Cull)
-#define CVMX_PCIEEP_CFG464 \
- (0x0000000000000740ull)
-#define CVMX_PCIEEP_CFG465 \
- (0x0000000000000744ull)
-#define CVMX_PCIEEP_CFG466 \
- (0x0000000000000748ull)
-#define CVMX_PCIEEP_CFG467 \
- (0x000000000000074Cull)
-#define CVMX_PCIEEP_CFG468 \
- (0x0000000000000750ull)
-#define CVMX_PCIEEP_CFG490 \
- (0x00000000000007A8ull)
-#define CVMX_PCIEEP_CFG491 \
- (0x00000000000007ACull)
-#define CVMX_PCIEEP_CFG492 \
- (0x00000000000007B0ull)
-#define CVMX_PCIEEP_CFG516 \
- (0x0000000000000810ull)
-#define CVMX_PCIEEP_CFG517 \
- (0x0000000000000814ull)
-
-union cvmx_pcieep_cfg000 {
- uint32_t u32;
- struct cvmx_pcieep_cfg000_s {
- uint32_t devid:16;
- uint32_t vendid:16;
- } s;
- struct cvmx_pcieep_cfg000_s cn52xx;
- struct cvmx_pcieep_cfg000_s cn52xxp1;
- struct cvmx_pcieep_cfg000_s cn56xx;
- struct cvmx_pcieep_cfg000_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg001 {
- uint32_t u32;
- struct cvmx_pcieep_cfg001_s {
- uint32_t dpe:1;
- uint32_t sse:1;
- uint32_t rma:1;
- uint32_t rta:1;
- uint32_t sta:1;
- uint32_t devt:2;
- uint32_t mdpe:1;
- uint32_t fbb:1;
- uint32_t reserved_22_22:1;
- uint32_t m66:1;
- uint32_t cl:1;
- uint32_t i_stat:1;
- uint32_t reserved_11_18:8;
- uint32_t i_dis:1;
- uint32_t fbbe:1;
- uint32_t see:1;
- uint32_t ids_wcc:1;
- uint32_t per:1;
- uint32_t vps:1;
- uint32_t mwice:1;
- uint32_t scse:1;
- uint32_t me:1;
- uint32_t msae:1;
- uint32_t isae:1;
- } s;
- struct cvmx_pcieep_cfg001_s cn52xx;
- struct cvmx_pcieep_cfg001_s cn52xxp1;
- struct cvmx_pcieep_cfg001_s cn56xx;
- struct cvmx_pcieep_cfg001_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg002 {
- uint32_t u32;
- struct cvmx_pcieep_cfg002_s {
- uint32_t bcc:8;
- uint32_t sc:8;
- uint32_t pi:8;
- uint32_t rid:8;
- } s;
- struct cvmx_pcieep_cfg002_s cn52xx;
- struct cvmx_pcieep_cfg002_s cn52xxp1;
- struct cvmx_pcieep_cfg002_s cn56xx;
- struct cvmx_pcieep_cfg002_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg003 {
- uint32_t u32;
- struct cvmx_pcieep_cfg003_s {
- uint32_t bist:8;
- uint32_t mfd:1;
- uint32_t chf:7;
- uint32_t lt:8;
- uint32_t cls:8;
- } s;
- struct cvmx_pcieep_cfg003_s cn52xx;
- struct cvmx_pcieep_cfg003_s cn52xxp1;
- struct cvmx_pcieep_cfg003_s cn56xx;
- struct cvmx_pcieep_cfg003_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg004 {
- uint32_t u32;
- struct cvmx_pcieep_cfg004_s {
- uint32_t lbab:18;
- uint32_t reserved_4_13:10;
- uint32_t pf:1;
- uint32_t typ:2;
- uint32_t mspc:1;
- } s;
- struct cvmx_pcieep_cfg004_s cn52xx;
- struct cvmx_pcieep_cfg004_s cn52xxp1;
- struct cvmx_pcieep_cfg004_s cn56xx;
- struct cvmx_pcieep_cfg004_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg004_mask {
- uint32_t u32;
- struct cvmx_pcieep_cfg004_mask_s {
- uint32_t lmask:31;
- uint32_t enb:1;
- } s;
- struct cvmx_pcieep_cfg004_mask_s cn52xx;
- struct cvmx_pcieep_cfg004_mask_s cn52xxp1;
- struct cvmx_pcieep_cfg004_mask_s cn56xx;
- struct cvmx_pcieep_cfg004_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg005 {
- uint32_t u32;
- struct cvmx_pcieep_cfg005_s {
- uint32_t ubab:32;
- } s;
- struct cvmx_pcieep_cfg005_s cn52xx;
- struct cvmx_pcieep_cfg005_s cn52xxp1;
- struct cvmx_pcieep_cfg005_s cn56xx;
- struct cvmx_pcieep_cfg005_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg005_mask {
- uint32_t u32;
- struct cvmx_pcieep_cfg005_mask_s {
- uint32_t umask:32;
- } s;
- struct cvmx_pcieep_cfg005_mask_s cn52xx;
- struct cvmx_pcieep_cfg005_mask_s cn52xxp1;
- struct cvmx_pcieep_cfg005_mask_s cn56xx;
- struct cvmx_pcieep_cfg005_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg006 {
- uint32_t u32;
- struct cvmx_pcieep_cfg006_s {
- uint32_t lbab:6;
- uint32_t reserved_4_25:22;
- uint32_t pf:1;
- uint32_t typ:2;
- uint32_t mspc:1;
- } s;
- struct cvmx_pcieep_cfg006_s cn52xx;
- struct cvmx_pcieep_cfg006_s cn52xxp1;
- struct cvmx_pcieep_cfg006_s cn56xx;
- struct cvmx_pcieep_cfg006_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg006_mask {
- uint32_t u32;
- struct cvmx_pcieep_cfg006_mask_s {
- uint32_t lmask:31;
- uint32_t enb:1;
- } s;
- struct cvmx_pcieep_cfg006_mask_s cn52xx;
- struct cvmx_pcieep_cfg006_mask_s cn52xxp1;
- struct cvmx_pcieep_cfg006_mask_s cn56xx;
- struct cvmx_pcieep_cfg006_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg007 {
- uint32_t u32;
- struct cvmx_pcieep_cfg007_s {
- uint32_t ubab:32;
- } s;
- struct cvmx_pcieep_cfg007_s cn52xx;
- struct cvmx_pcieep_cfg007_s cn52xxp1;
- struct cvmx_pcieep_cfg007_s cn56xx;
- struct cvmx_pcieep_cfg007_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg007_mask {
- uint32_t u32;
- struct cvmx_pcieep_cfg007_mask_s {
- uint32_t umask:32;
- } s;
- struct cvmx_pcieep_cfg007_mask_s cn52xx;
- struct cvmx_pcieep_cfg007_mask_s cn52xxp1;
- struct cvmx_pcieep_cfg007_mask_s cn56xx;
- struct cvmx_pcieep_cfg007_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg008 {
- uint32_t u32;
- struct cvmx_pcieep_cfg008_s {
- uint32_t reserved_4_31:28;
- uint32_t pf:1;
- uint32_t typ:2;
- uint32_t mspc:1;
- } s;
- struct cvmx_pcieep_cfg008_s cn52xx;
- struct cvmx_pcieep_cfg008_s cn52xxp1;
- struct cvmx_pcieep_cfg008_s cn56xx;
- struct cvmx_pcieep_cfg008_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg008_mask {
- uint32_t u32;
- struct cvmx_pcieep_cfg008_mask_s {
- uint32_t lmask:31;
- uint32_t enb:1;
- } s;
- struct cvmx_pcieep_cfg008_mask_s cn52xx;
- struct cvmx_pcieep_cfg008_mask_s cn52xxp1;
- struct cvmx_pcieep_cfg008_mask_s cn56xx;
- struct cvmx_pcieep_cfg008_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg009 {
- uint32_t u32;
- struct cvmx_pcieep_cfg009_s {
- uint32_t ubab:25;
- uint32_t reserved_0_6:7;
- } s;
- struct cvmx_pcieep_cfg009_s cn52xx;
- struct cvmx_pcieep_cfg009_s cn52xxp1;
- struct cvmx_pcieep_cfg009_s cn56xx;
- struct cvmx_pcieep_cfg009_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg009_mask {
- uint32_t u32;
- struct cvmx_pcieep_cfg009_mask_s {
- uint32_t umask:32;
- } s;
- struct cvmx_pcieep_cfg009_mask_s cn52xx;
- struct cvmx_pcieep_cfg009_mask_s cn52xxp1;
- struct cvmx_pcieep_cfg009_mask_s cn56xx;
- struct cvmx_pcieep_cfg009_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg010 {
- uint32_t u32;
- struct cvmx_pcieep_cfg010_s {
- uint32_t cisp:32;
- } s;
- struct cvmx_pcieep_cfg010_s cn52xx;
- struct cvmx_pcieep_cfg010_s cn52xxp1;
- struct cvmx_pcieep_cfg010_s cn56xx;
- struct cvmx_pcieep_cfg010_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg011 {
- uint32_t u32;
- struct cvmx_pcieep_cfg011_s {
- uint32_t ssid:16;
- uint32_t ssvid:16;
- } s;
- struct cvmx_pcieep_cfg011_s cn52xx;
- struct cvmx_pcieep_cfg011_s cn52xxp1;
- struct cvmx_pcieep_cfg011_s cn56xx;
- struct cvmx_pcieep_cfg011_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg012 {
- uint32_t u32;
- struct cvmx_pcieep_cfg012_s {
- uint32_t eraddr:16;
- uint32_t reserved_1_15:15;
- uint32_t er_en:1;
- } s;
- struct cvmx_pcieep_cfg012_s cn52xx;
- struct cvmx_pcieep_cfg012_s cn52xxp1;
- struct cvmx_pcieep_cfg012_s cn56xx;
- struct cvmx_pcieep_cfg012_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg012_mask {
- uint32_t u32;
- struct cvmx_pcieep_cfg012_mask_s {
- uint32_t mask:31;
- uint32_t enb:1;
- } s;
- struct cvmx_pcieep_cfg012_mask_s cn52xx;
- struct cvmx_pcieep_cfg012_mask_s cn52xxp1;
- struct cvmx_pcieep_cfg012_mask_s cn56xx;
- struct cvmx_pcieep_cfg012_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg013 {
- uint32_t u32;
- struct cvmx_pcieep_cfg013_s {
- uint32_t reserved_8_31:24;
- uint32_t cp:8;
- } s;
- struct cvmx_pcieep_cfg013_s cn52xx;
- struct cvmx_pcieep_cfg013_s cn52xxp1;
- struct cvmx_pcieep_cfg013_s cn56xx;
- struct cvmx_pcieep_cfg013_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg015 {
- uint32_t u32;
- struct cvmx_pcieep_cfg015_s {
- uint32_t ml:8;
- uint32_t mg:8;
- uint32_t inta:8;
- uint32_t il:8;
- } s;
- struct cvmx_pcieep_cfg015_s cn52xx;
- struct cvmx_pcieep_cfg015_s cn52xxp1;
- struct cvmx_pcieep_cfg015_s cn56xx;
- struct cvmx_pcieep_cfg015_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg016 {
- uint32_t u32;
- struct cvmx_pcieep_cfg016_s {
- uint32_t pmes:5;
- uint32_t d2s:1;
- uint32_t d1s:1;
- uint32_t auxc:3;
- uint32_t dsi:1;
- uint32_t reserved_20_20:1;
- uint32_t pme_clock:1;
- uint32_t pmsv:3;
- uint32_t ncp:8;
- uint32_t pmcid:8;
- } s;
- struct cvmx_pcieep_cfg016_s cn52xx;
- struct cvmx_pcieep_cfg016_s cn52xxp1;
- struct cvmx_pcieep_cfg016_s cn56xx;
- struct cvmx_pcieep_cfg016_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg017 {
- uint32_t u32;
- struct cvmx_pcieep_cfg017_s {
- uint32_t pmdia:8;
- uint32_t bpccee:1;
- uint32_t bd3h:1;
- uint32_t reserved_16_21:6;
- uint32_t pmess:1;
- uint32_t pmedsia:2;
- uint32_t pmds:4;
- uint32_t pmeens:1;
- uint32_t reserved_4_7:4;
- uint32_t nsr:1;
- uint32_t reserved_2_2:1;
- uint32_t ps:2;
- } s;
- struct cvmx_pcieep_cfg017_s cn52xx;
- struct cvmx_pcieep_cfg017_s cn52xxp1;
- struct cvmx_pcieep_cfg017_s cn56xx;
- struct cvmx_pcieep_cfg017_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg020 {
- uint32_t u32;
- struct cvmx_pcieep_cfg020_s {
- uint32_t reserved_24_31:8;
- uint32_t m64:1;
- uint32_t mme:3;
- uint32_t mmc:3;
- uint32_t msien:1;
- uint32_t ncp:8;
- uint32_t msicid:8;
- } s;
- struct cvmx_pcieep_cfg020_s cn52xx;
- struct cvmx_pcieep_cfg020_s cn52xxp1;
- struct cvmx_pcieep_cfg020_s cn56xx;
- struct cvmx_pcieep_cfg020_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg021 {
- uint32_t u32;
- struct cvmx_pcieep_cfg021_s {
- uint32_t lmsi:30;
- uint32_t reserved_0_1:2;
- } s;
- struct cvmx_pcieep_cfg021_s cn52xx;
- struct cvmx_pcieep_cfg021_s cn52xxp1;
- struct cvmx_pcieep_cfg021_s cn56xx;
- struct cvmx_pcieep_cfg021_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg022 {
- uint32_t u32;
- struct cvmx_pcieep_cfg022_s {
- uint32_t umsi:32;
- } s;
- struct cvmx_pcieep_cfg022_s cn52xx;
- struct cvmx_pcieep_cfg022_s cn52xxp1;
- struct cvmx_pcieep_cfg022_s cn56xx;
- struct cvmx_pcieep_cfg022_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg023 {
- uint32_t u32;
- struct cvmx_pcieep_cfg023_s {
- uint32_t reserved_16_31:16;
- uint32_t msimd:16;
- } s;
- struct cvmx_pcieep_cfg023_s cn52xx;
- struct cvmx_pcieep_cfg023_s cn52xxp1;
- struct cvmx_pcieep_cfg023_s cn56xx;
- struct cvmx_pcieep_cfg023_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg028 {
- uint32_t u32;
- struct cvmx_pcieep_cfg028_s {
- uint32_t reserved_30_31:2;
- uint32_t imn:5;
- uint32_t si:1;
- uint32_t dpt:4;
- uint32_t pciecv:4;
- uint32_t ncp:8;
- uint32_t pcieid:8;
- } s;
- struct cvmx_pcieep_cfg028_s cn52xx;
- struct cvmx_pcieep_cfg028_s cn52xxp1;
- struct cvmx_pcieep_cfg028_s cn56xx;
- struct cvmx_pcieep_cfg028_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg029 {
- uint32_t u32;
- struct cvmx_pcieep_cfg029_s {
- uint32_t reserved_28_31:4;
- uint32_t cspls:2;
- uint32_t csplv:8;
- uint32_t reserved_16_17:2;
- uint32_t rber:1;
- uint32_t reserved_12_14:3;
- uint32_t el1al:3;
- uint32_t el0al:3;
- uint32_t etfs:1;
- uint32_t pfs:2;
- uint32_t mpss:3;
- } s;
- struct cvmx_pcieep_cfg029_s cn52xx;
- struct cvmx_pcieep_cfg029_s cn52xxp1;
- struct cvmx_pcieep_cfg029_s cn56xx;
- struct cvmx_pcieep_cfg029_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg030 {
- uint32_t u32;
- struct cvmx_pcieep_cfg030_s {
- uint32_t reserved_22_31:10;
- uint32_t tp:1;
- uint32_t ap_d:1;
- uint32_t ur_d:1;
- uint32_t fe_d:1;
- uint32_t nfe_d:1;
- uint32_t ce_d:1;
- uint32_t reserved_15_15:1;
- uint32_t mrrs:3;
- uint32_t ns_en:1;
- uint32_t ap_en:1;
- uint32_t pf_en:1;
- uint32_t etf_en:1;
- uint32_t mps:3;
- uint32_t ro_en:1;
- uint32_t ur_en:1;
- uint32_t fe_en:1;
- uint32_t nfe_en:1;
- uint32_t ce_en:1;
- } s;
- struct cvmx_pcieep_cfg030_s cn52xx;
- struct cvmx_pcieep_cfg030_s cn52xxp1;
- struct cvmx_pcieep_cfg030_s cn56xx;
- struct cvmx_pcieep_cfg030_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg031 {
- uint32_t u32;
- struct cvmx_pcieep_cfg031_s {
- uint32_t pnum:8;
- uint32_t reserved_22_23:2;
- uint32_t lbnc:1;
- uint32_t dllarc:1;
- uint32_t sderc:1;
- uint32_t cpm:1;
- uint32_t l1el:3;
- uint32_t l0el:3;
- uint32_t aslpms:2;
- uint32_t mlw:6;
- uint32_t mls:4;
- } s;
- struct cvmx_pcieep_cfg031_s cn52xx;
- struct cvmx_pcieep_cfg031_s cn52xxp1;
- struct cvmx_pcieep_cfg031_s cn56xx;
- struct cvmx_pcieep_cfg031_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg032 {
- uint32_t u32;
- struct cvmx_pcieep_cfg032_s {
- uint32_t reserved_30_31:2;
- uint32_t dlla:1;
- uint32_t scc:1;
- uint32_t lt:1;
- uint32_t reserved_26_26:1;
- uint32_t nlw:6;
- uint32_t ls:4;
- uint32_t reserved_10_15:6;
- uint32_t hawd:1;
- uint32_t ecpm:1;
- uint32_t es:1;
- uint32_t ccc:1;
- uint32_t rl:1;
- uint32_t ld:1;
- uint32_t rcb:1;
- uint32_t reserved_2_2:1;
- uint32_t aslpc:2;
- } s;
- struct cvmx_pcieep_cfg032_s cn52xx;
- struct cvmx_pcieep_cfg032_s cn52xxp1;
- struct cvmx_pcieep_cfg032_s cn56xx;
- struct cvmx_pcieep_cfg032_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg033 {
- uint32_t u32;
- struct cvmx_pcieep_cfg033_s {
- uint32_t ps_num:13;
- uint32_t nccs:1;
- uint32_t emip:1;
- uint32_t sp_ls:2;
- uint32_t sp_lv:8;
- uint32_t hp_c:1;
- uint32_t hp_s:1;
- uint32_t pip:1;
- uint32_t aip:1;
- uint32_t mrlsp:1;
- uint32_t pcp:1;
- uint32_t abp:1;
- } s;
- struct cvmx_pcieep_cfg033_s cn52xx;
- struct cvmx_pcieep_cfg033_s cn52xxp1;
- struct cvmx_pcieep_cfg033_s cn56xx;
- struct cvmx_pcieep_cfg033_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg034 {
- uint32_t u32;
- struct cvmx_pcieep_cfg034_s {
- uint32_t reserved_25_31:7;
- uint32_t dlls_c:1;
- uint32_t emis:1;
- uint32_t pds:1;
- uint32_t mrlss:1;
- uint32_t ccint_d:1;
- uint32_t pd_c:1;
- uint32_t mrls_c:1;
- uint32_t pf_d:1;
- uint32_t abp_d:1;
- uint32_t reserved_13_15:3;
- uint32_t dlls_en:1;
- uint32_t emic:1;
- uint32_t pcc:1;
- uint32_t pic:2;
- uint32_t aic:2;
- uint32_t hpint_en:1;
- uint32_t ccint_en:1;
- uint32_t pd_en:1;
- uint32_t mrls_en:1;
- uint32_t pf_en:1;
- uint32_t abp_en:1;
- } s;
- struct cvmx_pcieep_cfg034_s cn52xx;
- struct cvmx_pcieep_cfg034_s cn52xxp1;
- struct cvmx_pcieep_cfg034_s cn56xx;
- struct cvmx_pcieep_cfg034_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg037 {
- uint32_t u32;
- struct cvmx_pcieep_cfg037_s {
- uint32_t reserved_5_31:27;
- uint32_t ctds:1;
- uint32_t ctrs:4;
- } s;
- struct cvmx_pcieep_cfg037_s cn52xx;
- struct cvmx_pcieep_cfg037_s cn52xxp1;
- struct cvmx_pcieep_cfg037_s cn56xx;
- struct cvmx_pcieep_cfg037_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg038 {
- uint32_t u32;
- struct cvmx_pcieep_cfg038_s {
- uint32_t reserved_5_31:27;
- uint32_t ctd:1;
- uint32_t ctv:4;
- } s;
- struct cvmx_pcieep_cfg038_s cn52xx;
- struct cvmx_pcieep_cfg038_s cn52xxp1;
- struct cvmx_pcieep_cfg038_s cn56xx;
- struct cvmx_pcieep_cfg038_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg039 {
- uint32_t u32;
- struct cvmx_pcieep_cfg039_s {
- uint32_t reserved_0_31:32;
- } s;
- struct cvmx_pcieep_cfg039_s cn52xx;
- struct cvmx_pcieep_cfg039_s cn52xxp1;
- struct cvmx_pcieep_cfg039_s cn56xx;
- struct cvmx_pcieep_cfg039_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg040 {
- uint32_t u32;
- struct cvmx_pcieep_cfg040_s {
- uint32_t reserved_0_31:32;
- } s;
- struct cvmx_pcieep_cfg040_s cn52xx;
- struct cvmx_pcieep_cfg040_s cn52xxp1;
- struct cvmx_pcieep_cfg040_s cn56xx;
- struct cvmx_pcieep_cfg040_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg041 {
- uint32_t u32;
- struct cvmx_pcieep_cfg041_s {
- uint32_t reserved_0_31:32;
- } s;
- struct cvmx_pcieep_cfg041_s cn52xx;
- struct cvmx_pcieep_cfg041_s cn52xxp1;
- struct cvmx_pcieep_cfg041_s cn56xx;
- struct cvmx_pcieep_cfg041_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg042 {
- uint32_t u32;
- struct cvmx_pcieep_cfg042_s {
- uint32_t reserved_0_31:32;
- } s;
- struct cvmx_pcieep_cfg042_s cn52xx;
- struct cvmx_pcieep_cfg042_s cn52xxp1;
- struct cvmx_pcieep_cfg042_s cn56xx;
- struct cvmx_pcieep_cfg042_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg064 {
- uint32_t u32;
- struct cvmx_pcieep_cfg064_s {
- uint32_t nco:12;
- uint32_t cv:4;
- uint32_t pcieec:16;
- } s;
- struct cvmx_pcieep_cfg064_s cn52xx;
- struct cvmx_pcieep_cfg064_s cn52xxp1;
- struct cvmx_pcieep_cfg064_s cn56xx;
- struct cvmx_pcieep_cfg064_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg065 {
- uint32_t u32;
- struct cvmx_pcieep_cfg065_s {
- uint32_t reserved_21_31:11;
- uint32_t ures:1;
- uint32_t ecrces:1;
- uint32_t mtlps:1;
- uint32_t ros:1;
- uint32_t ucs:1;
- uint32_t cas:1;
- uint32_t cts:1;
- uint32_t fcpes:1;
- uint32_t ptlps:1;
- uint32_t reserved_6_11:6;
- uint32_t sdes:1;
- uint32_t dlpes:1;
- uint32_t reserved_0_3:4;
- } s;
- struct cvmx_pcieep_cfg065_s cn52xx;
- struct cvmx_pcieep_cfg065_s cn52xxp1;
- struct cvmx_pcieep_cfg065_s cn56xx;
- struct cvmx_pcieep_cfg065_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg066 {
- uint32_t u32;
- struct cvmx_pcieep_cfg066_s {
- uint32_t reserved_21_31:11;
- uint32_t urem:1;
- uint32_t ecrcem:1;
- uint32_t mtlpm:1;
- uint32_t rom:1;
- uint32_t ucm:1;
- uint32_t cam:1;
- uint32_t ctm:1;
- uint32_t fcpem:1;
- uint32_t ptlpm:1;
- uint32_t reserved_6_11:6;
- uint32_t sdem:1;
- uint32_t dlpem:1;
- uint32_t reserved_0_3:4;
- } s;
- struct cvmx_pcieep_cfg066_s cn52xx;
- struct cvmx_pcieep_cfg066_s cn52xxp1;
- struct cvmx_pcieep_cfg066_s cn56xx;
- struct cvmx_pcieep_cfg066_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg067 {
- uint32_t u32;
- struct cvmx_pcieep_cfg067_s {
- uint32_t reserved_21_31:11;
- uint32_t ures:1;
- uint32_t ecrces:1;
- uint32_t mtlps:1;
- uint32_t ros:1;
- uint32_t ucs:1;
- uint32_t cas:1;
- uint32_t cts:1;
- uint32_t fcpes:1;
- uint32_t ptlps:1;
- uint32_t reserved_6_11:6;
- uint32_t sdes:1;
- uint32_t dlpes:1;
- uint32_t reserved_0_3:4;
- } s;
- struct cvmx_pcieep_cfg067_s cn52xx;
- struct cvmx_pcieep_cfg067_s cn52xxp1;
- struct cvmx_pcieep_cfg067_s cn56xx;
- struct cvmx_pcieep_cfg067_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg068 {
- uint32_t u32;
- struct cvmx_pcieep_cfg068_s {
- uint32_t reserved_14_31:18;
- uint32_t anfes:1;
- uint32_t rtts:1;
- uint32_t reserved_9_11:3;
- uint32_t rnrs:1;
- uint32_t bdllps:1;
- uint32_t btlps:1;
- uint32_t reserved_1_5:5;
- uint32_t res:1;
- } s;
- struct cvmx_pcieep_cfg068_s cn52xx;
- struct cvmx_pcieep_cfg068_s cn52xxp1;
- struct cvmx_pcieep_cfg068_s cn56xx;
- struct cvmx_pcieep_cfg068_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg069 {
- uint32_t u32;
- struct cvmx_pcieep_cfg069_s {
- uint32_t reserved_14_31:18;
- uint32_t anfem:1;
- uint32_t rttm:1;
- uint32_t reserved_9_11:3;
- uint32_t rnrm:1;
- uint32_t bdllpm:1;
- uint32_t btlpm:1;
- uint32_t reserved_1_5:5;
- uint32_t rem:1;
- } s;
- struct cvmx_pcieep_cfg069_s cn52xx;
- struct cvmx_pcieep_cfg069_s cn52xxp1;
- struct cvmx_pcieep_cfg069_s cn56xx;
- struct cvmx_pcieep_cfg069_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg070 {
- uint32_t u32;
- struct cvmx_pcieep_cfg070_s {
- uint32_t reserved_9_31:23;
- uint32_t ce:1;
- uint32_t cc:1;
- uint32_t ge:1;
- uint32_t gc:1;
- uint32_t fep:5;
- } s;
- struct cvmx_pcieep_cfg070_s cn52xx;
- struct cvmx_pcieep_cfg070_s cn52xxp1;
- struct cvmx_pcieep_cfg070_s cn56xx;
- struct cvmx_pcieep_cfg070_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg071 {
- uint32_t u32;
- struct cvmx_pcieep_cfg071_s {
- uint32_t dword1:32;
- } s;
- struct cvmx_pcieep_cfg071_s cn52xx;
- struct cvmx_pcieep_cfg071_s cn52xxp1;
- struct cvmx_pcieep_cfg071_s cn56xx;
- struct cvmx_pcieep_cfg071_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg072 {
- uint32_t u32;
- struct cvmx_pcieep_cfg072_s {
- uint32_t dword2:32;
- } s;
- struct cvmx_pcieep_cfg072_s cn52xx;
- struct cvmx_pcieep_cfg072_s cn52xxp1;
- struct cvmx_pcieep_cfg072_s cn56xx;
- struct cvmx_pcieep_cfg072_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg073 {
- uint32_t u32;
- struct cvmx_pcieep_cfg073_s {
- uint32_t dword3:32;
- } s;
- struct cvmx_pcieep_cfg073_s cn52xx;
- struct cvmx_pcieep_cfg073_s cn52xxp1;
- struct cvmx_pcieep_cfg073_s cn56xx;
- struct cvmx_pcieep_cfg073_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg074 {
- uint32_t u32;
- struct cvmx_pcieep_cfg074_s {
- uint32_t dword4:32;
- } s;
- struct cvmx_pcieep_cfg074_s cn52xx;
- struct cvmx_pcieep_cfg074_s cn52xxp1;
- struct cvmx_pcieep_cfg074_s cn56xx;
- struct cvmx_pcieep_cfg074_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg448 {
- uint32_t u32;
- struct cvmx_pcieep_cfg448_s {
- uint32_t rtl:16;
- uint32_t rtltl:16;
- } s;
- struct cvmx_pcieep_cfg448_s cn52xx;
- struct cvmx_pcieep_cfg448_s cn52xxp1;
- struct cvmx_pcieep_cfg448_s cn56xx;
- struct cvmx_pcieep_cfg448_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg449 {
- uint32_t u32;
- struct cvmx_pcieep_cfg449_s {
- uint32_t omr:32;
- } s;
- struct cvmx_pcieep_cfg449_s cn52xx;
- struct cvmx_pcieep_cfg449_s cn52xxp1;
- struct cvmx_pcieep_cfg449_s cn56xx;
- struct cvmx_pcieep_cfg449_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg450 {
- uint32_t u32;
- struct cvmx_pcieep_cfg450_s {
- uint32_t lpec:8;
- uint32_t reserved_22_23:2;
- uint32_t link_state:6;
- uint32_t force_link:1;
- uint32_t reserved_8_14:7;
- uint32_t link_num:8;
- } s;
- struct cvmx_pcieep_cfg450_s cn52xx;
- struct cvmx_pcieep_cfg450_s cn52xxp1;
- struct cvmx_pcieep_cfg450_s cn56xx;
- struct cvmx_pcieep_cfg450_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg451 {
- uint32_t u32;
- struct cvmx_pcieep_cfg451_s {
- uint32_t reserved_30_31:2;
- uint32_t l1el:3;
- uint32_t l0el:3;
- uint32_t n_fts_cc:8;
- uint32_t n_fts:8;
- uint32_t ack_freq:8;
- } s;
- struct cvmx_pcieep_cfg451_s cn52xx;
- struct cvmx_pcieep_cfg451_s cn52xxp1;
- struct cvmx_pcieep_cfg451_s cn56xx;
- struct cvmx_pcieep_cfg451_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg452 {
- uint32_t u32;
- struct cvmx_pcieep_cfg452_s {
- uint32_t reserved_26_31:6;
- uint32_t eccrc:1;
- uint32_t reserved_22_24:3;
- uint32_t lme:6;
- uint32_t reserved_8_15:8;
- uint32_t flm:1;
- uint32_t reserved_6_6:1;
- uint32_t dllle:1;
- uint32_t reserved_4_4:1;
- uint32_t ra:1;
- uint32_t le:1;
- uint32_t sd:1;
- uint32_t omr:1;
- } s;
- struct cvmx_pcieep_cfg452_s cn52xx;
- struct cvmx_pcieep_cfg452_s cn52xxp1;
- struct cvmx_pcieep_cfg452_s cn56xx;
- struct cvmx_pcieep_cfg452_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg453 {
- uint32_t u32;
- struct cvmx_pcieep_cfg453_s {
- uint32_t dlld:1;
- uint32_t reserved_26_30:5;
- uint32_t ack_nak:1;
- uint32_t fcd:1;
- uint32_t ilst:24;
- } s;
- struct cvmx_pcieep_cfg453_s cn52xx;
- struct cvmx_pcieep_cfg453_s cn52xxp1;
- struct cvmx_pcieep_cfg453_s cn56xx;
- struct cvmx_pcieep_cfg453_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg454 {
- uint32_t u32;
- struct cvmx_pcieep_cfg454_s {
- uint32_t reserved_29_31:3;
- uint32_t tmfcwt:5;
- uint32_t tmanlt:5;
- uint32_t tmrt:5;
- uint32_t reserved_11_13:3;
- uint32_t nskps:3;
- uint32_t reserved_4_7:4;
- uint32_t ntss:4;
- } s;
- struct cvmx_pcieep_cfg454_s cn52xx;
- struct cvmx_pcieep_cfg454_s cn52xxp1;
- struct cvmx_pcieep_cfg454_s cn56xx;
- struct cvmx_pcieep_cfg454_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg455 {
- uint32_t u32;
- struct cvmx_pcieep_cfg455_s {
- uint32_t m_cfg0_filt:1;
- uint32_t m_io_filt:1;
- uint32_t msg_ctrl:1;
- uint32_t m_cpl_ecrc_filt:1;
- uint32_t m_ecrc_filt:1;
- uint32_t m_cpl_len_err:1;
- uint32_t m_cpl_attr_err:1;
- uint32_t m_cpl_tc_err:1;
- uint32_t m_cpl_fun_err:1;
- uint32_t m_cpl_rid_err:1;
- uint32_t m_cpl_tag_err:1;
- uint32_t m_lk_filt:1;
- uint32_t m_cfg1_filt:1;
- uint32_t m_bar_match:1;
- uint32_t m_pois_filt:1;
- uint32_t m_fun:1;
- uint32_t dfcwt:1;
- uint32_t reserved_11_14:4;
- uint32_t skpiv:11;
- } s;
- struct cvmx_pcieep_cfg455_s cn52xx;
- struct cvmx_pcieep_cfg455_s cn52xxp1;
- struct cvmx_pcieep_cfg455_s cn56xx;
- struct cvmx_pcieep_cfg455_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg456 {
- uint32_t u32;
- struct cvmx_pcieep_cfg456_s {
- uint32_t reserved_2_31:30;
- uint32_t m_vend1_drp:1;
- uint32_t m_vend0_drp:1;
- } s;
- struct cvmx_pcieep_cfg456_s cn52xx;
- struct cvmx_pcieep_cfg456_s cn52xxp1;
- struct cvmx_pcieep_cfg456_s cn56xx;
- struct cvmx_pcieep_cfg456_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg458 {
- uint32_t u32;
- struct cvmx_pcieep_cfg458_s {
- uint32_t dbg_info_l32:32;
- } s;
- struct cvmx_pcieep_cfg458_s cn52xx;
- struct cvmx_pcieep_cfg458_s cn52xxp1;
- struct cvmx_pcieep_cfg458_s cn56xx;
- struct cvmx_pcieep_cfg458_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg459 {
- uint32_t u32;
- struct cvmx_pcieep_cfg459_s {
- uint32_t dbg_info_u32:32;
- } s;
- struct cvmx_pcieep_cfg459_s cn52xx;
- struct cvmx_pcieep_cfg459_s cn52xxp1;
- struct cvmx_pcieep_cfg459_s cn56xx;
- struct cvmx_pcieep_cfg459_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg460 {
- uint32_t u32;
- struct cvmx_pcieep_cfg460_s {
- uint32_t reserved_20_31:12;
- uint32_t tphfcc:8;
- uint32_t tpdfcc:12;
- } s;
- struct cvmx_pcieep_cfg460_s cn52xx;
- struct cvmx_pcieep_cfg460_s cn52xxp1;
- struct cvmx_pcieep_cfg460_s cn56xx;
- struct cvmx_pcieep_cfg460_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg461 {
- uint32_t u32;
- struct cvmx_pcieep_cfg461_s {
- uint32_t reserved_20_31:12;
- uint32_t tchfcc:8;
- uint32_t tcdfcc:12;
- } s;
- struct cvmx_pcieep_cfg461_s cn52xx;
- struct cvmx_pcieep_cfg461_s cn52xxp1;
- struct cvmx_pcieep_cfg461_s cn56xx;
- struct cvmx_pcieep_cfg461_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg462 {
- uint32_t u32;
- struct cvmx_pcieep_cfg462_s {
- uint32_t reserved_20_31:12;
- uint32_t tchfcc:8;
- uint32_t tcdfcc:12;
- } s;
- struct cvmx_pcieep_cfg462_s cn52xx;
- struct cvmx_pcieep_cfg462_s cn52xxp1;
- struct cvmx_pcieep_cfg462_s cn56xx;
- struct cvmx_pcieep_cfg462_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg463 {
- uint32_t u32;
- struct cvmx_pcieep_cfg463_s {
- uint32_t reserved_3_31:29;
- uint32_t rqne:1;
- uint32_t trbne:1;
- uint32_t rtlpfccnr:1;
- } s;
- struct cvmx_pcieep_cfg463_s cn52xx;
- struct cvmx_pcieep_cfg463_s cn52xxp1;
- struct cvmx_pcieep_cfg463_s cn56xx;
- struct cvmx_pcieep_cfg463_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg464 {
- uint32_t u32;
- struct cvmx_pcieep_cfg464_s {
- uint32_t wrr_vc3:8;
- uint32_t wrr_vc2:8;
- uint32_t wrr_vc1:8;
- uint32_t wrr_vc0:8;
- } s;
- struct cvmx_pcieep_cfg464_s cn52xx;
- struct cvmx_pcieep_cfg464_s cn52xxp1;
- struct cvmx_pcieep_cfg464_s cn56xx;
- struct cvmx_pcieep_cfg464_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg465 {
- uint32_t u32;
- struct cvmx_pcieep_cfg465_s {
- uint32_t wrr_vc7:8;
- uint32_t wrr_vc6:8;
- uint32_t wrr_vc5:8;
- uint32_t wrr_vc4:8;
- } s;
- struct cvmx_pcieep_cfg465_s cn52xx;
- struct cvmx_pcieep_cfg465_s cn52xxp1;
- struct cvmx_pcieep_cfg465_s cn56xx;
- struct cvmx_pcieep_cfg465_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg466 {
- uint32_t u32;
- struct cvmx_pcieep_cfg466_s {
- uint32_t rx_queue_order:1;
- uint32_t type_ordering:1;
- uint32_t reserved_24_29:6;
- uint32_t queue_mode:3;
- uint32_t reserved_20_20:1;
- uint32_t header_credits:8;
- uint32_t data_credits:12;
- } s;
- struct cvmx_pcieep_cfg466_s cn52xx;
- struct cvmx_pcieep_cfg466_s cn52xxp1;
- struct cvmx_pcieep_cfg466_s cn56xx;
- struct cvmx_pcieep_cfg466_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg467 {
- uint32_t u32;
- struct cvmx_pcieep_cfg467_s {
- uint32_t reserved_24_31:8;
- uint32_t queue_mode:3;
- uint32_t reserved_20_20:1;
- uint32_t header_credits:8;
- uint32_t data_credits:12;
- } s;
- struct cvmx_pcieep_cfg467_s cn52xx;
- struct cvmx_pcieep_cfg467_s cn52xxp1;
- struct cvmx_pcieep_cfg467_s cn56xx;
- struct cvmx_pcieep_cfg467_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg468 {
- uint32_t u32;
- struct cvmx_pcieep_cfg468_s {
- uint32_t reserved_24_31:8;
- uint32_t queue_mode:3;
- uint32_t reserved_20_20:1;
- uint32_t header_credits:8;
- uint32_t data_credits:12;
- } s;
- struct cvmx_pcieep_cfg468_s cn52xx;
- struct cvmx_pcieep_cfg468_s cn52xxp1;
- struct cvmx_pcieep_cfg468_s cn56xx;
- struct cvmx_pcieep_cfg468_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg490 {
- uint32_t u32;
- struct cvmx_pcieep_cfg490_s {
- uint32_t reserved_26_31:6;
- uint32_t header_depth:10;
- uint32_t reserved_14_15:2;
- uint32_t data_depth:14;
- } s;
- struct cvmx_pcieep_cfg490_s cn52xx;
- struct cvmx_pcieep_cfg490_s cn52xxp1;
- struct cvmx_pcieep_cfg490_s cn56xx;
- struct cvmx_pcieep_cfg490_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg491 {
- uint32_t u32;
- struct cvmx_pcieep_cfg491_s {
- uint32_t reserved_26_31:6;
- uint32_t header_depth:10;
- uint32_t reserved_14_15:2;
- uint32_t data_depth:14;
- } s;
- struct cvmx_pcieep_cfg491_s cn52xx;
- struct cvmx_pcieep_cfg491_s cn52xxp1;
- struct cvmx_pcieep_cfg491_s cn56xx;
- struct cvmx_pcieep_cfg491_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg492 {
- uint32_t u32;
- struct cvmx_pcieep_cfg492_s {
- uint32_t reserved_26_31:6;
- uint32_t header_depth:10;
- uint32_t reserved_14_15:2;
- uint32_t data_depth:14;
- } s;
- struct cvmx_pcieep_cfg492_s cn52xx;
- struct cvmx_pcieep_cfg492_s cn52xxp1;
- struct cvmx_pcieep_cfg492_s cn56xx;
- struct cvmx_pcieep_cfg492_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg516 {
- uint32_t u32;
- struct cvmx_pcieep_cfg516_s {
- uint32_t phy_stat:32;
- } s;
- struct cvmx_pcieep_cfg516_s cn52xx;
- struct cvmx_pcieep_cfg516_s cn52xxp1;
- struct cvmx_pcieep_cfg516_s cn56xx;
- struct cvmx_pcieep_cfg516_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg517 {
- uint32_t u32;
- struct cvmx_pcieep_cfg517_s {
- uint32_t phy_ctrl:32;
- } s;
- struct cvmx_pcieep_cfg517_s cn52xx;
- struct cvmx_pcieep_cfg517_s cn52xxp1;
- struct cvmx_pcieep_cfg517_s cn56xx;
- struct cvmx_pcieep_cfg517_s cn56xxp1;
-};
-
-#endif
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index fcd4060f6421..90bf3b3fce19 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -17,6 +17,7 @@
*/
#include <linux/ioport.h>
+#include <linux/of.h>
/*
* Each pci channel is a top-level PCI bus seem by CPU. A machine with
@@ -26,6 +27,7 @@
struct pci_controller {
struct pci_controller *next;
struct pci_bus *bus;
+ struct device_node *of_node;
struct pci_ops *pci_ops;
struct resource *mem_resource;
@@ -142,4 +144,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
extern char * (*pcibios_plat_setup)(char *str);
+/* this function parses memory ranges from a device node */
+extern void __devinit pci_load_of_ranges(struct pci_controller *hose,
+ struct device_node *node);
+
#endif /* _ASM_PCI_H */
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index 7a6e82ef449b..7206d445bab8 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -12,6 +12,9 @@
#define __ASM_PROM_H
#ifdef CONFIG_OF
+#include <linux/bug.h>
+#include <linux/io.h>
+#include <linux/types.h>
#include <asm/bootinfo.h>
extern int early_init_dt_scan_memory_arch(unsigned long node,
@@ -21,6 +24,29 @@ extern int reserve_mem_mach(unsigned long addr, unsigned long size);
extern void free_mem_mach(unsigned long addr, unsigned long size);
extern void device_tree_init(void);
+
+static inline unsigned long pci_address_to_pio(phys_addr_t address)
+{
+ /*
+ * The ioport address can be directly used by inX() / outX()
+ */
+ BUG_ON(address > IO_SPACE_LIMIT);
+
+ return (unsigned long) address;
+}
+#define pci_address_to_pio pci_address_to_pio
+
+struct boot_param_header;
+
+extern void __dt_setup_arch(struct boot_param_header *bph);
+
+#define dt_setup_arch(sym) \
+({ \
+ extern struct boot_param_header __dtb_##sym##_begin; \
+ \
+ __dt_setup_arch(&__dtb_##sym##_begin); \
+})
+
#else /* CONFIG_OF */
static inline void device_tree_init(void) { }
#endif /* CONFIG_OF */
diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
index 6dce6d8d09ab..2560b6b6a7d8 100644
--- a/arch/mips/include/asm/setup.h
+++ b/arch/mips/include/asm/setup.h
@@ -14,7 +14,8 @@ extern void *set_vi_handler(int n, vi_handler_t addr);
extern void *set_except_vector(int n, void *addr);
extern unsigned long ebase;
-extern void per_cpu_trap_init(void);
+extern void per_cpu_trap_init(bool);
+extern void cpu_cache_init(void);
#endif /* __KERNEL__ */
diff --git a/arch/mips/include/asm/sparsemem.h b/arch/mips/include/asm/sparsemem.h
index 7165333ad043..4461198361c9 100644
--- a/arch/mips/include/asm/sparsemem.h
+++ b/arch/mips/include/asm/sparsemem.h
@@ -6,7 +6,11 @@
* SECTION_SIZE_BITS 2^N: how big each section will be
* MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
*/
-#define SECTION_SIZE_BITS 28
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PAGE_SIZE_64KB)
+# define SECTION_SIZE_BITS 29
+#else
+# define SECTION_SIZE_BITS 28
+#endif
#define MAX_PHYSMEM_BITS 35
#endif /* CONFIG_SPARSEMEM */
diff --git a/arch/mips/include/asm/termios.h b/arch/mips/include/asm/termios.h
index 8f77f774a2a0..abdd87aaf609 100644
--- a/arch/mips/include/asm/termios.h
+++ b/arch/mips/include/asm/termios.h
@@ -60,7 +60,7 @@ struct termio {
};
#ifdef __KERNEL__
-#include <linux/module.h>
+#include <asm/uaccess.h>
/*
* intr=^C quit=^\ erase=del kill=^U
diff --git a/arch/mips/include/asm/traps.h b/arch/mips/include/asm/traps.h
index ff74aec3561a..420ca06b2f42 100644
--- a/arch/mips/include/asm/traps.h
+++ b/arch/mips/include/asm/traps.h
@@ -25,6 +25,7 @@ extern void (*board_nmi_handler_setup)(void);
extern void (*board_ejtag_handler_setup)(void);
extern void (*board_bind_eic_interrupt)(int irq, int regset);
extern void (*board_ebase_setup)(void);
+extern void (*board_cache_error_setup)(void);
extern int register_nmi_notifier(struct notifier_block *nb);
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 504d40aedfae..440a21dab575 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -11,7 +11,7 @@
#include <linux/types.h>
#ifdef CONFIG_EXPORT_UASM
-#include <linux/module.h>
+#include <linux/export.h>
#define __uasminit
#define __uasminitdata
#define UASM_EXPORT_SYMBOL(sym) EXPORT_SYMBOL(sym)
diff --git a/arch/mips/jz4740/Makefile b/arch/mips/jz4740/Makefile
index a9dff3321251..e44abea9c209 100644
--- a/arch/mips/jz4740/Makefile
+++ b/arch/mips/jz4740/Makefile
@@ -16,5 +16,3 @@ obj-$(CONFIG_JZ4740_QI_LB60) += board-qi_lb60.o
# PM support
obj-$(CONFIG_PM) += pm.o
-
-ccflags-y := -Werror -Wall
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5099201fb7bc..6ae7ce4ac63e 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -340,7 +340,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R2000";
c->isa_level = MIPS_CPU_ISA_I;
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
- MIPS_CPU_NOFPUEX;
+ MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
c->options |= MIPS_CPU_FPU;
c->tlbsize = 64;
@@ -361,7 +361,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
}
c->isa_level = MIPS_CPU_ISA_I;
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
- MIPS_CPU_NOFPUEX;
+ MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
c->options |= MIPS_CPU_FPU;
c->tlbsize = 64;
@@ -387,8 +387,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_WATCH | MIPS_CPU_VCE |
- MIPS_CPU_LLSC;
+ MIPS_CPU_WATCH | MIPS_CPU_VCE |
+ MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_VR41XX:
@@ -434,7 +434,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R4300";
c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_R4600:
@@ -446,7 +446,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->tlbsize = 48;
break;
#if 0
- case PRID_IMP_R4650:
+ case PRID_IMP_R4650:
/*
* This processor doesn't have an MMU, so it's not
* "real easy" to run Linux on it. It is left purely
@@ -455,9 +455,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
*/
c->cputype = CPU_R4650;
__cpu_name[cpu] = "R4650";
- c->isa_level = MIPS_CPU_ISA_III;
+ c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
- c->tlbsize = 48;
+ c->tlbsize = 48;
break;
#endif
case PRID_IMP_TX39:
@@ -488,7 +488,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R4700";
c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_TX49:
@@ -505,7 +505,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R5000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R5432:
@@ -513,7 +513,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R5432";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_WATCH | MIPS_CPU_LLSC;
+ MIPS_CPU_WATCH | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R5500:
@@ -521,7 +521,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R5500";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_WATCH | MIPS_CPU_LLSC;
+ MIPS_CPU_WATCH | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_NEVADA:
@@ -529,7 +529,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "Nevada";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
+ MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R6000:
@@ -537,7 +537,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R6000";
c->isa_level = MIPS_CPU_ISA_II;
c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_R6000A:
@@ -545,7 +545,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R6000A";
c->isa_level = MIPS_CPU_ISA_II;
c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_RM7000:
@@ -553,7 +553,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "RM7000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
/*
* Undocumented RM7000: Bit 29 in the info register of
* the RM7000 v2.0 indicates if the TLB has 48 or 64
@@ -569,7 +569,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "RM9000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
/*
* Bit 29 in the info register of the RM9000
* indicates if the TLB has 48 or 64 entries.
@@ -584,8 +584,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "RM8000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
- MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_LLSC;
c->tlbsize = 384; /* has weird TLB: 3-way x 128 */
break;
case PRID_IMP_R10000:
@@ -593,9 +593,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R10000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
- MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
case PRID_IMP_R12000:
@@ -603,9 +603,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R12000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
- MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
case PRID_IMP_R14000:
@@ -613,9 +613,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R14000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
- MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
case PRID_IMP_LOONGSON2:
@@ -739,7 +739,7 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
if (config3 & MIPS_CONF3_VEIC)
c->options |= MIPS_CPU_VEIC;
if (config3 & MIPS_CONF3_MT)
- c->ases |= MIPS_ASE_MIPSMT;
+ c->ases |= MIPS_ASE_MIPSMT;
if (config3 & MIPS_CONF3_ULRI)
c->options |= MIPS_CPU_ULRI;
@@ -767,7 +767,7 @@ static void __cpuinit decode_configs(struct cpuinfo_mips *c)
/* MIPS32 or MIPS64 compliant CPU. */
c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
- MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
+ MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
c->scache.flags = MIPS_CACHE_NOT_PRESENT;
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index ab73fa2fb9b5..f29099b104c4 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1532,7 +1532,8 @@ init_hw_perf_events(void)
irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
} else {
#endif
- if (cp0_perfcount_irq >= 0)
+ if ((cp0_perfcount_irq >= 0) &&
+ (cp0_compare_irq != cp0_perfcount_irq))
irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
else
irq = -1;
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index f8b2c592514d..5542817c1b49 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -41,27 +41,27 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "processor\t\t: %ld\n", n);
sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
- cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : "");
+ cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : "");
seq_printf(m, fmt, __cpu_name[n],
- (version >> 4) & 0x0f, version & 0x0f,
- (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
+ (version >> 4) & 0x0f, version & 0x0f,
+ (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
seq_printf(m, "BogoMIPS\t\t: %u.%02u\n",
- cpu_data[n].udelay_val / (500000/HZ),
- (cpu_data[n].udelay_val / (5000/HZ)) % 100);
+ cpu_data[n].udelay_val / (500000/HZ),
+ (cpu_data[n].udelay_val / (5000/HZ)) % 100);
seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
seq_printf(m, "microsecond timers\t: %s\n",
- cpu_has_counter ? "yes" : "no");
+ cpu_has_counter ? "yes" : "no");
seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize);
seq_printf(m, "extra interrupt vector\t: %s\n",
- cpu_has_divec ? "yes" : "no");
+ cpu_has_divec ? "yes" : "no");
seq_printf(m, "hardware watchpoint\t: %s",
- cpu_has_watch ? "yes, " : "no\n");
+ cpu_has_watch ? "yes, " : "no\n");
if (cpu_has_watch) {
seq_printf(m, "count: %d, address/irw mask: [",
- cpu_data[n].watch_reg_count);
+ cpu_data[n].watch_reg_count);
for (i = 0; i < cpu_data[n].watch_reg_count; i++)
seq_printf(m, "%s0x%04x", i ? ", " : "" ,
- cpu_data[n].watch_reg_masks[i]);
+ cpu_data[n].watch_reg_masks[i]);
seq_printf(m, "]\n");
}
seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n",
@@ -73,13 +73,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
cpu_has_mipsmt ? " mt" : ""
);
seq_printf(m, "shadow register sets\t: %d\n",
- cpu_data[n].srsets);
+ cpu_data[n].srsets);
seq_printf(m, "kscratch registers\t: %d\n",
- hweight8(cpu_data[n].kscratch_mask));
+ hweight8(cpu_data[n].kscratch_mask));
seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
- cpu_has_vce ? "%u" : "not available");
+ cpu_has_vce ? "%u" : "not available");
seq_printf(m, fmt, 'D', vced_count);
seq_printf(m, fmt, 'I', vcei_count);
seq_printf(m, "\n");
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 558b5395795d..f11b2bbb826d 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -95,3 +95,16 @@ void __init device_tree_init(void)
/* free the space reserved for the dt blob */
free_mem_mach(base, size);
}
+
+void __init __dt_setup_arch(struct boot_param_header *bph)
+{
+ if (be32_to_cpu(bph->magic) != OF_DT_HEADER) {
+ pr_err("DTB has bad magic, ignoring builtin OF DTB\n");
+
+ return;
+ }
+
+ initial_boot_params = bph;
+
+ early_init_devtree(initial_boot_params);
+}
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index c504b212f8f3..a53f8ec37aac 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -605,6 +605,8 @@ void __init setup_arch(char **cmdline_p)
resource_init();
plat_smp_setup();
+
+ cpu_cache_init();
}
unsigned long kernelsp[NR_CPUS];
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 71a95f55a649..48650c818040 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -106,7 +106,7 @@ asmlinkage __cpuinit void start_secondary(void)
#endif /* CONFIG_MIPS_MT_SMTC */
cpu_probe();
cpu_report();
- per_cpu_trap_init();
+ per_cpu_trap_init(false);
mips_clockevent_init();
mp_ops->init_secondary();
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index cfdaaa4cffc0..2d0c2a277f52 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -15,6 +15,7 @@
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/smp.h>
@@ -91,7 +92,7 @@ void (*board_nmi_handler_setup)(void);
void (*board_ejtag_handler_setup)(void);
void (*board_bind_eic_interrupt)(int irq, int regset);
void (*board_ebase_setup)(void);
-
+void __cpuinitdata(*board_cache_error_setup)(void);
static void show_raw_backtrace(unsigned long reg29)
{
@@ -1490,7 +1491,6 @@ void *set_vi_handler(int n, vi_handler_t addr)
return set_vi_srs_handler(n, addr, 0);
}
-extern void cpu_cache_init(void);
extern void tlb_init(void);
extern void flush_tlb_handlers(void);
@@ -1517,7 +1517,7 @@ static int __init ulri_disable(char *s)
}
__setup("noulri", ulri_disable);
-void __cpuinit per_cpu_trap_init(void)
+void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
{
unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0;
@@ -1616,7 +1616,9 @@ void __cpuinit per_cpu_trap_init(void)
#ifdef CONFIG_MIPS_MT_SMTC
if (bootTC) {
#endif /* CONFIG_MIPS_MT_SMTC */
- cpu_cache_init();
+ /* Boot CPU's cache setup in setup_arch(). */
+ if (!is_boot_cpu)
+ cpu_cache_init();
tlb_init();
#ifdef CONFIG_MIPS_MT_SMTC
} else if (!secondaryTC) {
@@ -1632,7 +1634,7 @@ void __cpuinit per_cpu_trap_init(void)
}
/* Install CPU exception handler */
-void __init set_handler(unsigned long offset, void *addr, unsigned long size)
+void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
{
memcpy((void *)(ebase + offset), addr, size);
local_flush_icache_range(ebase + offset, ebase + offset + size);
@@ -1693,7 +1695,7 @@ void __init trap_init(void)
if (board_ebase_setup)
board_ebase_setup();
- per_cpu_trap_init();
+ per_cpu_trap_init(true);
/*
* Copy the generic exception handlers to their final destination.
@@ -1797,6 +1799,9 @@ void __init trap_init(void)
set_except_vector(26, handle_dsp);
+ if (board_cache_error_setup)
+ board_cache_error_setup();
+
if (cpu_has_vce)
/* Special exception: R4[04]00 uses also the divec space. */
memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig
index 3fccf2104513..20bdf40b3efa 100644
--- a/arch/mips/lantiq/Kconfig
+++ b/arch/mips/lantiq/Kconfig
@@ -16,8 +16,22 @@ config SOC_XWAY
bool "XWAY"
select SOC_TYPE_XWAY
select HW_HAS_PCI
+
+config SOC_FALCON
+ bool "FALCON"
+
+endchoice
+
+choice
+ prompt "Devicetree"
+
+config DT_EASY50712
+ bool "Easy50712"
+ depends on SOC_XWAY
endchoice
-source "arch/mips/lantiq/xway/Kconfig"
+config PCI_LANTIQ
+ bool "PCI Support"
+ depends on SOC_XWAY && PCI
endif
diff --git a/arch/mips/lantiq/Makefile b/arch/mips/lantiq/Makefile
index e5dae0e24b00..d6bdc579419f 100644
--- a/arch/mips/lantiq/Makefile
+++ b/arch/mips/lantiq/Makefile
@@ -4,8 +4,11 @@
# under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation.
-obj-y := irq.o setup.o clk.o prom.o devices.o
+obj-y := irq.o clk.o prom.o
+
+obj-y += dts/
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_SOC_TYPE_XWAY) += xway/
+obj-$(CONFIG_SOC_FALCON) += falcon/
diff --git a/arch/mips/lantiq/Platform b/arch/mips/lantiq/Platform
index f3dff05722de..b3ec49838fd7 100644
--- a/arch/mips/lantiq/Platform
+++ b/arch/mips/lantiq/Platform
@@ -6,3 +6,4 @@ platform-$(CONFIG_LANTIQ) += lantiq/
cflags-$(CONFIG_LANTIQ) += -I$(srctree)/arch/mips/include/asm/mach-lantiq
load-$(CONFIG_LANTIQ) = 0xffffffff80002000
cflags-$(CONFIG_SOC_TYPE_XWAY) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/xway
+cflags-$(CONFIG_SOC_FALCON) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/falcon
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
index 412814fdd3ee..d3bcc33f4699 100644
--- a/arch/mips/lantiq/clk.c
+++ b/arch/mips/lantiq/clk.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/clk.h>
+#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/list.h>
@@ -22,44 +23,32 @@
#include <lantiq_soc.h>
#include "clk.h"
+#include "prom.h"
-struct clk {
- const char *name;
- unsigned long rate;
- unsigned long (*get_rate) (void);
-};
+/* lantiq socs have 3 static clocks */
+static struct clk cpu_clk_generic[3];
-static struct clk *cpu_clk;
-static int cpu_clk_cnt;
+void clkdev_add_static(unsigned long cpu, unsigned long fpi, unsigned long io)
+{
+ cpu_clk_generic[0].rate = cpu;
+ cpu_clk_generic[1].rate = fpi;
+ cpu_clk_generic[2].rate = io;
+}
-/* lantiq socs have 3 static clocks */
-static struct clk cpu_clk_generic[] = {
- {
- .name = "cpu",
- .get_rate = ltq_get_cpu_hz,
- }, {
- .name = "fpi",
- .get_rate = ltq_get_fpi_hz,
- }, {
- .name = "io",
- .get_rate = ltq_get_io_region_clock,
- },
-};
-
-static struct resource ltq_cgu_resource = {
- .name = "cgu",
- .start = LTQ_CGU_BASE_ADDR,
- .end = LTQ_CGU_BASE_ADDR + LTQ_CGU_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-/* remapped clock register range */
-void __iomem *ltq_cgu_membase;
-
-void clk_init(void)
+struct clk *clk_get_cpu(void)
+{
+ return &cpu_clk_generic[0];
+}
+
+struct clk *clk_get_fpi(void)
+{
+ return &cpu_clk_generic[1];
+}
+EXPORT_SYMBOL_GPL(clk_get_fpi);
+
+struct clk *clk_get_io(void)
{
- cpu_clk = cpu_clk_generic;
- cpu_clk_cnt = ARRAY_SIZE(cpu_clk_generic);
+ return &cpu_clk_generic[2];
}
static inline int clk_good(struct clk *clk)
@@ -82,38 +71,71 @@ unsigned long clk_get_rate(struct clk *clk)
}
EXPORT_SYMBOL(clk_get_rate);
-struct clk *clk_get(struct device *dev, const char *id)
+int clk_set_rate(struct clk *clk, unsigned long rate)
{
- int i;
-
- for (i = 0; i < cpu_clk_cnt; i++)
- if (!strcmp(id, cpu_clk[i].name))
- return &cpu_clk[i];
- BUG();
- return ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(clk_get);
-
-void clk_put(struct clk *clk)
-{
- /* not used */
+ if (unlikely(!clk_good(clk)))
+ return 0;
+ if (clk->rates && *clk->rates) {
+ unsigned long *r = clk->rates;
+
+ while (*r && (*r != rate))
+ r++;
+ if (!*r) {
+ pr_err("clk %s.%s: trying to set invalid rate %ld\n",
+ clk->cl.dev_id, clk->cl.con_id, rate);
+ return -1;
+ }
+ }
+ clk->rate = rate;
+ return 0;
}
-EXPORT_SYMBOL(clk_put);
+EXPORT_SYMBOL(clk_set_rate);
int clk_enable(struct clk *clk)
{
- /* not used */
- return 0;
+ if (unlikely(!clk_good(clk)))
+ return -1;
+
+ if (clk->enable)
+ return clk->enable(clk);
+
+ return -1;
}
EXPORT_SYMBOL(clk_enable);
void clk_disable(struct clk *clk)
{
- /* not used */
+ if (unlikely(!clk_good(clk)))
+ return;
+
+ if (clk->disable)
+ clk->disable(clk);
}
EXPORT_SYMBOL(clk_disable);
-static inline u32 ltq_get_counter_resolution(void)
+int clk_activate(struct clk *clk)
+{
+ if (unlikely(!clk_good(clk)))
+ return -1;
+
+ if (clk->activate)
+ return clk->activate(clk);
+
+ return -1;
+}
+EXPORT_SYMBOL(clk_activate);
+
+void clk_deactivate(struct clk *clk)
+{
+ if (unlikely(!clk_good(clk)))
+ return;
+
+ if (clk->deactivate)
+ clk->deactivate(clk);
+}
+EXPORT_SYMBOL(clk_deactivate);
+
+static inline u32 get_counter_resolution(void)
{
u32 res;
@@ -133,21 +155,11 @@ void __init plat_time_init(void)
{
struct clk *clk;
- if (insert_resource(&iomem_resource, &ltq_cgu_resource) < 0)
- panic("Failed to insert cgu memory");
+ ltq_soc_init();
- if (request_mem_region(ltq_cgu_resource.start,
- resource_size(&ltq_cgu_resource), "cgu") < 0)
- panic("Failed to request cgu memory");
-
- ltq_cgu_membase = ioremap_nocache(ltq_cgu_resource.start,
- resource_size(&ltq_cgu_resource));
- if (!ltq_cgu_membase) {
- pr_err("Failed to remap cgu memory\n");
- unreachable();
- }
- clk = clk_get(0, "cpu");
- mips_hpt_frequency = clk_get_rate(clk) / ltq_get_counter_resolution();
+ clk = clk_get_cpu();
+ mips_hpt_frequency = clk_get_rate(clk) / get_counter_resolution();
write_c0_compare(read_c0_count());
+ pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
clk_put(clk);
}
diff --git a/arch/mips/lantiq/clk.h b/arch/mips/lantiq/clk.h
index 3328925f2c3f..fa670602b91b 100644
--- a/arch/mips/lantiq/clk.h
+++ b/arch/mips/lantiq/clk.h
@@ -9,10 +9,70 @@
#ifndef _LTQ_CLK_H__
#define _LTQ_CLK_H__
-extern void clk_init(void);
+#include <linux/clkdev.h>
-extern unsigned long ltq_get_cpu_hz(void);
-extern unsigned long ltq_get_fpi_hz(void);
-extern unsigned long ltq_get_io_region_clock(void);
+/* clock speeds */
+#define CLOCK_33M 33333333
+#define CLOCK_60M 60000000
+#define CLOCK_62_5M 62500000
+#define CLOCK_83M 83333333
+#define CLOCK_83_5M 83500000
+#define CLOCK_98_304M 98304000
+#define CLOCK_100M 100000000
+#define CLOCK_111M 111111111
+#define CLOCK_125M 125000000
+#define CLOCK_133M 133333333
+#define CLOCK_150M 150000000
+#define CLOCK_166M 166666666
+#define CLOCK_167M 166666667
+#define CLOCK_196_608M 196608000
+#define CLOCK_200M 200000000
+#define CLOCK_250M 250000000
+#define CLOCK_266M 266666666
+#define CLOCK_300M 300000000
+#define CLOCK_333M 333333333
+#define CLOCK_393M 393215332
+#define CLOCK_400M 400000000
+#define CLOCK_500M 500000000
+#define CLOCK_600M 600000000
+
+/* clock out speeds */
+#define CLOCK_32_768K 32768
+#define CLOCK_1_536M 1536000
+#define CLOCK_2_5M 2500000
+#define CLOCK_12M 12000000
+#define CLOCK_24M 24000000
+#define CLOCK_25M 25000000
+#define CLOCK_30M 30000000
+#define CLOCK_40M 40000000
+#define CLOCK_48M 48000000
+#define CLOCK_50M 50000000
+#define CLOCK_60M 60000000
+
+struct clk {
+ struct clk_lookup cl;
+ unsigned long rate;
+ unsigned long *rates;
+ unsigned int module;
+ unsigned int bits;
+ unsigned long (*get_rate) (void);
+ int (*enable) (struct clk *clk);
+ void (*disable) (struct clk *clk);
+ int (*activate) (struct clk *clk);
+ void (*deactivate) (struct clk *clk);
+ void (*reboot) (struct clk *clk);
+};
+
+extern void clkdev_add_static(unsigned long cpu, unsigned long fpi,
+ unsigned long io);
+
+extern unsigned long ltq_danube_cpu_hz(void);
+extern unsigned long ltq_danube_fpi_hz(void);
+
+extern unsigned long ltq_ar9_cpu_hz(void);
+extern unsigned long ltq_ar9_fpi_hz(void);
+
+extern unsigned long ltq_vr9_cpu_hz(void);
+extern unsigned long ltq_vr9_fpi_hz(void);
#endif
diff --git a/arch/mips/lantiq/devices.c b/arch/mips/lantiq/devices.c
deleted file mode 100644
index de1cb2bcd79a..000000000000
--- a/arch/mips/lantiq/devices.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/reboot.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/etherdevice.h>
-#include <linux/time.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-
-#include <lantiq_soc.h>
-
-#include "devices.h"
-
-/* nor flash */
-static struct resource ltq_nor_resource = {
- .name = "nor",
- .start = LTQ_FLASH_START,
- .end = LTQ_FLASH_START + LTQ_FLASH_MAX - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct platform_device ltq_nor = {
- .name = "ltq_nor",
- .resource = &ltq_nor_resource,
- .num_resources = 1,
-};
-
-void __init ltq_register_nor(struct physmap_flash_data *data)
-{
- ltq_nor.dev.platform_data = data;
- platform_device_register(&ltq_nor);
-}
-
-/* watchdog */
-static struct resource ltq_wdt_resource = {
- .name = "watchdog",
- .start = LTQ_WDT_BASE_ADDR,
- .end = LTQ_WDT_BASE_ADDR + LTQ_WDT_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-void __init ltq_register_wdt(void)
-{
- platform_device_register_simple("ltq_wdt", 0, &ltq_wdt_resource, 1);
-}
-
-/* asc ports */
-static struct resource ltq_asc0_resources[] = {
- {
- .name = "asc0",
- .start = LTQ_ASC0_BASE_ADDR,
- .end = LTQ_ASC0_BASE_ADDR + LTQ_ASC_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- IRQ_RES(tx, LTQ_ASC_TIR(0)),
- IRQ_RES(rx, LTQ_ASC_RIR(0)),
- IRQ_RES(err, LTQ_ASC_EIR(0)),
-};
-
-static struct resource ltq_asc1_resources[] = {
- {
- .name = "asc1",
- .start = LTQ_ASC1_BASE_ADDR,
- .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- IRQ_RES(tx, LTQ_ASC_TIR(1)),
- IRQ_RES(rx, LTQ_ASC_RIR(1)),
- IRQ_RES(err, LTQ_ASC_EIR(1)),
-};
-
-void __init ltq_register_asc(int port)
-{
- switch (port) {
- case 0:
- platform_device_register_simple("ltq_asc", 0,
- ltq_asc0_resources, ARRAY_SIZE(ltq_asc0_resources));
- break;
- case 1:
- platform_device_register_simple("ltq_asc", 1,
- ltq_asc1_resources, ARRAY_SIZE(ltq_asc1_resources));
- break;
- default:
- break;
- }
-}
-
-#ifdef CONFIG_PCI
-/* pci */
-static struct platform_device ltq_pci = {
- .name = "ltq_pci",
- .num_resources = 0,
-};
-
-void __init ltq_register_pci(struct ltq_pci_data *data)
-{
- ltq_pci.dev.platform_data = data;
- platform_device_register(&ltq_pci);
-}
-#else
-void __init ltq_register_pci(struct ltq_pci_data *data)
-{
- pr_err("kernel is compiled without PCI support\n");
-}
-#endif
diff --git a/arch/mips/lantiq/devices.h b/arch/mips/lantiq/devices.h
deleted file mode 100644
index 2947bb19a528..000000000000
--- a/arch/mips/lantiq/devices.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#ifndef _LTQ_DEVICES_H__
-#define _LTQ_DEVICES_H__
-
-#include <lantiq_platform.h>
-#include <linux/mtd/physmap.h>
-
-#define IRQ_RES(resname, irq) \
- {.name = #resname, .start = (irq), .flags = IORESOURCE_IRQ}
-
-extern void ltq_register_nor(struct physmap_flash_data *data);
-extern void ltq_register_wdt(void);
-extern void ltq_register_asc(int port);
-extern void ltq_register_pci(struct ltq_pci_data *data);
-
-#endif
diff --git a/arch/mips/lantiq/dts/Makefile b/arch/mips/lantiq/dts/Makefile
new file mode 100644
index 000000000000..674fca45f72d
--- /dev/null
+++ b/arch/mips/lantiq/dts/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_DT_EASY50712) := easy50712.dtb.o
+
+$(obj)/%.dtb: $(obj)/%.dts
+ $(call if_changed,dtc)
diff --git a/arch/mips/lantiq/dts/danube.dtsi b/arch/mips/lantiq/dts/danube.dtsi
new file mode 100644
index 000000000000..3a4520f009cf
--- /dev/null
+++ b/arch/mips/lantiq/dts/danube.dtsi
@@ -0,0 +1,105 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "lantiq,xway", "lantiq,danube";
+
+ cpus {
+ cpu@0 {
+ compatible = "mips,mips24Kc";
+ };
+ };
+
+ biu@1F800000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "lantiq,biu", "simple-bus";
+ reg = <0x1F800000 0x800000>;
+ ranges = <0x0 0x1F800000 0x7FFFFF>;
+
+ icu0: icu@80200 {
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "lantiq,icu";
+ reg = <0x80200 0x120>;
+ };
+
+ watchdog@803F0 {
+ compatible = "lantiq,wdt";
+ reg = <0x803F0 0x10>;
+ };
+ };
+
+ sram@1F000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "lantiq,sram";
+ reg = <0x1F000000 0x800000>;
+ ranges = <0x0 0x1F000000 0x7FFFFF>;
+
+ eiu0: eiu@101000 {
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ interrupt-parent;
+ compatible = "lantiq,eiu-xway";
+ reg = <0x101000 0x1000>;
+ };
+
+ pmu0: pmu@102000 {
+ compatible = "lantiq,pmu-xway";
+ reg = <0x102000 0x1000>;
+ };
+
+ cgu0: cgu@103000 {
+ compatible = "lantiq,cgu-xway";
+ reg = <0x103000 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ rcu0: rcu@203000 {
+ compatible = "lantiq,rcu-xway";
+ reg = <0x203000 0x1000>;
+ };
+ };
+
+ fpi@10000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "lantiq,fpi", "simple-bus";
+ ranges = <0x0 0x10000000 0xEEFFFFF>;
+ reg = <0x10000000 0xEF00000>;
+
+ gptu@E100A00 {
+ compatible = "lantiq,gptu-xway";
+ reg = <0xE100A00 0x100>;
+ };
+
+ serial@E100C00 {
+ compatible = "lantiq,asc";
+ reg = <0xE100C00 0x400>;
+ interrupt-parent = <&icu0>;
+ interrupts = <112 113 114>;
+ };
+
+ dma0: dma@E104100 {
+ compatible = "lantiq,dma-xway";
+ reg = <0xE104100 0x800>;
+ };
+
+ ebu0: ebu@E105300 {
+ compatible = "lantiq,ebu-xway";
+ reg = <0xE105300 0x100>;
+ };
+
+ pci0: pci@E105400 {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ compatible = "lantiq,pci-xway";
+ bus-range = <0x0 0x0>;
+ ranges = <0x2000000 0 0x8000000 0x8000000 0 0x2000000 /* pci memory */
+ 0x1000000 0 0x00000000 0xAE00000 0 0x200000>; /* io space */
+ reg = <0x7000000 0x8000 /* config space */
+ 0xE105400 0x400>; /* pci bridge */
+ };
+ };
+};
diff --git a/arch/mips/lantiq/dts/easy50712.dts b/arch/mips/lantiq/dts/easy50712.dts
new file mode 100644
index 000000000000..68c17310bc82
--- /dev/null
+++ b/arch/mips/lantiq/dts/easy50712.dts
@@ -0,0 +1,113 @@
+/dts-v1/;
+
+/include/ "danube.dtsi"
+
+/ {
+ chosen {
+ bootargs = "console=ttyLTQ0,115200 init=/etc/preinit";
+ };
+
+ memory@0 {
+ reg = <0x0 0x2000000>;
+ };
+
+ fpi@10000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ localbus@0 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0x0 0x3ffffff /* addrsel0 */
+ 1 0 0x4000000 0x4000010>; /* addsel1 */
+ compatible = "lantiq,localbus", "simple-bus";
+
+ nor-boot@0 {
+ compatible = "lantiq,nor";
+ bank-width = <2>;
+ reg = <0 0x0 0x2000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x00000 0x10000>; /* 64 KB */
+ };
+
+ partition@10000 {
+ label = "uboot_env";
+ reg = <0x10000 0x10000>; /* 64 KB */
+ };
+
+ partition@20000 {
+ label = "linux";
+ reg = <0x20000 0x3d0000>;
+ };
+
+ partition@400000 {
+ label = "rootfs";
+ reg = <0x400000 0x400000>;
+ };
+ };
+ };
+
+ gpio: pinmux@E100B10 {
+ compatible = "lantiq,pinctrl-xway";
+ pinctrl-names = "default";
+ pinctrl-0 = <&state_default>;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+ reg = <0xE100B10 0xA0>;
+
+ state_default: pinmux {
+ stp {
+ lantiq,groups = "stp";
+ lantiq,function = "stp";
+ };
+ exin {
+ lantiq,groups = "exin1";
+ lantiq,function = "exin";
+ };
+ pci {
+ lantiq,groups = "gnt1";
+ lantiq,function = "pci";
+ };
+ conf_out {
+ lantiq,pins = "io4", "io5", "io6"; /* stp */
+ lantiq,open-drain;
+ lantiq,pull = <0>;
+ };
+ };
+ };
+
+ etop@E180000 {
+ compatible = "lantiq,etop-xway";
+ reg = <0xE180000 0x40000>;
+ interrupt-parent = <&icu0>;
+ interrupts = <73 78>;
+ phy-mode = "rmii";
+ mac-address = [ 00 11 22 33 44 55 ];
+ };
+
+ stp0: stp@E100BB0 {
+ #gpio-cells = <2>;
+ compatible = "lantiq,gpio-stp-xway";
+ gpio-controller;
+ reg = <0xE100BB0 0x40>;
+
+ lantiq,shadow = <0xfff>;
+ lantiq,groups = <0x3>;
+ };
+
+ pci@E105400 {
+ lantiq,bus-clock = <33333333>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ 0x7000 0 0 1 &icu0 29 1 // slot 14, irq 29
+ >;
+ gpios-reset = <&gpio 21 0>;
+ req-mask = <0x1>; /* GNT1 */
+ };
+
+ };
+};
diff --git a/arch/mips/lantiq/early_printk.c b/arch/mips/lantiq/early_printk.c
index 972e05f87631..9b28d0940ef4 100644
--- a/arch/mips/lantiq/early_printk.c
+++ b/arch/mips/lantiq/early_printk.c
@@ -6,17 +6,16 @@
* Copyright (C) 2010 John Crispin <blogic@openwrt.org>
*/
-#include <linux/init.h>
#include <linux/cpu.h>
-
-#include <lantiq.h>
#include <lantiq_soc.h>
-/* no ioremap possible at this early stage, lets use KSEG1 instead */
-#define LTQ_ASC_BASE KSEG1ADDR(LTQ_ASC1_BASE_ADDR)
#define ASC_BUF 1024
-#define LTQ_ASC_FSTAT ((u32 *)(LTQ_ASC_BASE + 0x0048))
-#define LTQ_ASC_TBUF ((u32 *)(LTQ_ASC_BASE + 0x0020))
+#define LTQ_ASC_FSTAT ((u32 *)(LTQ_EARLY_ASC + 0x0048))
+#ifdef __BIG_ENDIAN
+#define LTQ_ASC_TBUF ((u32 *)(LTQ_EARLY_ASC + 0x0020 + 3))
+#else
+#define LTQ_ASC_TBUF ((u32 *)(LTQ_EARLY_ASC + 0x0020))
+#endif
#define TXMASK 0x3F00
#define TXOFFSET 8
@@ -27,7 +26,7 @@ void prom_putchar(char c)
local_irq_save(flags);
do { } while ((ltq_r32(LTQ_ASC_FSTAT) & TXMASK) >> TXOFFSET);
if (c == '\n')
- ltq_w32('\r', LTQ_ASC_TBUF);
- ltq_w32(c, LTQ_ASC_TBUF);
+ ltq_w8('\r', LTQ_ASC_TBUF);
+ ltq_w8(c, LTQ_ASC_TBUF);
local_irq_restore(flags);
}
diff --git a/arch/mips/lantiq/falcon/Makefile b/arch/mips/lantiq/falcon/Makefile
new file mode 100644
index 000000000000..ff220f97693d
--- /dev/null
+++ b/arch/mips/lantiq/falcon/Makefile
@@ -0,0 +1 @@
+obj-y := prom.o reset.o sysctrl.o
diff --git a/arch/mips/lantiq/falcon/prom.c b/arch/mips/lantiq/falcon/prom.c
new file mode 100644
index 000000000000..c1d278f05a3a
--- /dev/null
+++ b/arch/mips/lantiq/falcon/prom.c
@@ -0,0 +1,87 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <asm/io.h>
+
+#include <lantiq_soc.h>
+
+#include "../prom.h"
+
+#define SOC_FALCON "Falcon"
+#define SOC_FALCON_D "Falcon-D"
+#define SOC_FALCON_V "Falcon-V"
+#define SOC_FALCON_M "Falcon-M"
+
+#define COMP_FALCON "lantiq,falcon"
+
+#define PART_SHIFT 12
+#define PART_MASK 0x0FFFF000
+#define REV_SHIFT 28
+#define REV_MASK 0xF0000000
+#define SREV_SHIFT 22
+#define SREV_MASK 0x03C00000
+#define TYPE_SHIFT 26
+#define TYPE_MASK 0x3C000000
+
+/* reset, nmi and ejtag exception vectors */
+#define BOOT_REG_BASE (KSEG1 | 0x1F200000)
+#define BOOT_RVEC (BOOT_REG_BASE | 0x00)
+#define BOOT_NVEC (BOOT_REG_BASE | 0x04)
+#define BOOT_EVEC (BOOT_REG_BASE | 0x08)
+
+void __init ltq_soc_nmi_setup(void)
+{
+ extern void (*nmi_handler)(void);
+
+ ltq_w32((unsigned long)&nmi_handler, (void *)BOOT_NVEC);
+}
+
+void __init ltq_soc_ejtag_setup(void)
+{
+ extern void (*ejtag_debug_handler)(void);
+
+ ltq_w32((unsigned long)&ejtag_debug_handler, (void *)BOOT_EVEC);
+}
+
+void __init ltq_soc_detect(struct ltq_soc_info *i)
+{
+ u32 type;
+ i->partnum = (ltq_r32(FALCON_CHIPID) & PART_MASK) >> PART_SHIFT;
+ i->rev = (ltq_r32(FALCON_CHIPID) & REV_MASK) >> REV_SHIFT;
+ i->srev = ((ltq_r32(FALCON_CHIPCONF) & SREV_MASK) >> SREV_SHIFT);
+ i->compatible = COMP_FALCON;
+ i->type = SOC_TYPE_FALCON;
+ sprintf(i->rev_type, "%c%d%d", (i->srev & 0x4) ? ('B') : ('A'),
+ i->rev & 0x7, (i->srev & 0x3) + 1);
+
+ switch (i->partnum) {
+ case SOC_ID_FALCON:
+ type = (ltq_r32(FALCON_CHIPTYPE) & TYPE_MASK) >> TYPE_SHIFT;
+ switch (type) {
+ case 0:
+ i->name = SOC_FALCON_D;
+ break;
+ case 1:
+ i->name = SOC_FALCON_V;
+ break;
+ case 2:
+ i->name = SOC_FALCON_M;
+ break;
+ default:
+ i->name = SOC_FALCON;
+ break;
+ }
+ break;
+
+ default:
+ unreachable();
+ break;
+ }
+}
diff --git a/arch/mips/lantiq/falcon/reset.c b/arch/mips/lantiq/falcon/reset.c
new file mode 100644
index 000000000000..568248253426
--- /dev/null
+++ b/arch/mips/lantiq/falcon/reset.c
@@ -0,0 +1,90 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <asm/reboot.h>
+#include <linux/export.h>
+
+#include <lantiq_soc.h>
+
+/* CPU0 Reset Source Register */
+#define SYS1_CPU0RS 0x0040
+/* reset cause mask */
+#define CPU0RS_MASK 0x0003
+/* CPU0 Boot Mode Register */
+#define SYS1_BM 0x00a0
+/* boot mode mask */
+#define BM_MASK 0x0005
+
+/* allow platform code to find out what surce we booted from */
+unsigned char ltq_boot_select(void)
+{
+ return ltq_sys1_r32(SYS1_BM) & BM_MASK;
+}
+
+/* allow the watchdog driver to find out what the boot reason was */
+int ltq_reset_cause(void)
+{
+ return ltq_sys1_r32(SYS1_CPU0RS) & CPU0RS_MASK;
+}
+EXPORT_SYMBOL_GPL(ltq_reset_cause);
+
+#define BOOT_REG_BASE (KSEG1 | 0x1F200000)
+#define BOOT_PW1_REG (BOOT_REG_BASE | 0x20)
+#define BOOT_PW2_REG (BOOT_REG_BASE | 0x24)
+#define BOOT_PW1 0x4C545100
+#define BOOT_PW2 0x0051544C
+
+#define WDT_REG_BASE (KSEG1 | 0x1F8803F0)
+#define WDT_PW1 0x00BE0000
+#define WDT_PW2 0x00DC0000
+
+static void machine_restart(char *command)
+{
+ local_irq_disable();
+
+ /* reboot magic */
+ ltq_w32(BOOT_PW1, (void *)BOOT_PW1_REG); /* 'LTQ\0' */
+ ltq_w32(BOOT_PW2, (void *)BOOT_PW2_REG); /* '\0QTL' */
+ ltq_w32(0, (void *)BOOT_REG_BASE); /* reset Bootreg RVEC */
+
+ /* watchdog magic */
+ ltq_w32(WDT_PW1, (void *)WDT_REG_BASE);
+ ltq_w32(WDT_PW2 |
+ (0x3 << 26) | /* PWL */
+ (0x2 << 24) | /* CLKDIV */
+ (0x1 << 31) | /* enable */
+ (1), /* reload */
+ (void *)WDT_REG_BASE);
+ unreachable();
+}
+
+static void machine_halt(void)
+{
+ local_irq_disable();
+ unreachable();
+}
+
+static void machine_power_off(void)
+{
+ local_irq_disable();
+ unreachable();
+}
+
+static int __init mips_reboot_setup(void)
+{
+ _machine_restart = machine_restart;
+ _machine_halt = machine_halt;
+ pm_power_off = machine_power_off;
+ return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
new file mode 100644
index 000000000000..ba0123d13d40
--- /dev/null
+++ b/arch/mips/lantiq/falcon/sysctrl.c
@@ -0,0 +1,260 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/clkdev.h>
+#include <linux/of_address.h>
+#include <asm/delay.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+
+/* infrastructure control register */
+#define SYS1_INFRAC 0x00bc
+/* Configuration fuses for drivers and pll */
+#define STATUS_CONFIG 0x0040
+
+/* GPE frequency selection */
+#define GPPC_OFFSET 24
+#define GPEFREQ_MASK 0x00000C0
+#define GPEFREQ_OFFSET 10
+/* Clock status register */
+#define SYSCTL_CLKS 0x0000
+/* Clock enable register */
+#define SYSCTL_CLKEN 0x0004
+/* Clock clear register */
+#define SYSCTL_CLKCLR 0x0008
+/* Activation Status Register */
+#define SYSCTL_ACTS 0x0020
+/* Activation Register */
+#define SYSCTL_ACT 0x0024
+/* Deactivation Register */
+#define SYSCTL_DEACT 0x0028
+/* reboot Register */
+#define SYSCTL_RBT 0x002c
+/* CPU0 Clock Control Register */
+#define SYS1_CPU0CC 0x0040
+/* HRST_OUT_N Control Register */
+#define SYS1_HRSTOUTC 0x00c0
+/* clock divider bit */
+#define CPU0CC_CPUDIV 0x0001
+
+/* Activation Status Register */
+#define ACTS_ASC1_ACT 0x00000800
+#define ACTS_I2C_ACT 0x00004000
+#define ACTS_P0 0x00010000
+#define ACTS_P1 0x00010000
+#define ACTS_P2 0x00020000
+#define ACTS_P3 0x00020000
+#define ACTS_P4 0x00040000
+#define ACTS_PADCTRL0 0x00100000
+#define ACTS_PADCTRL1 0x00100000
+#define ACTS_PADCTRL2 0x00200000
+#define ACTS_PADCTRL3 0x00200000
+#define ACTS_PADCTRL4 0x00400000
+
+#define sysctl_w32(m, x, y) ltq_w32((x), sysctl_membase[m] + (y))
+#define sysctl_r32(m, x) ltq_r32(sysctl_membase[m] + (x))
+#define sysctl_w32_mask(m, clear, set, reg) \
+ sysctl_w32(m, (sysctl_r32(m, reg) & ~(clear)) | (set), reg)
+
+#define status_w32(x, y) ltq_w32((x), status_membase + (y))
+#define status_r32(x) ltq_r32(status_membase + (x))
+
+static void __iomem *sysctl_membase[3], *status_membase;
+void __iomem *ltq_sys1_membase, *ltq_ebu_membase;
+
+void falcon_trigger_hrst(int level)
+{
+ sysctl_w32(SYSCTL_SYS1, level & 1, SYS1_HRSTOUTC);
+}
+
+static inline void sysctl_wait(struct clk *clk,
+ unsigned int test, unsigned int reg)
+{
+ int err = 1000000;
+
+ do {} while (--err && ((sysctl_r32(clk->module, reg)
+ & clk->bits) != test));
+ if (!err)
+ pr_err("module de/activation failed %d %08X %08X %08X\n",
+ clk->module, clk->bits, test,
+ sysctl_r32(clk->module, reg) & clk->bits);
+}
+
+static int sysctl_activate(struct clk *clk)
+{
+ sysctl_w32(clk->module, clk->bits, SYSCTL_CLKEN);
+ sysctl_w32(clk->module, clk->bits, SYSCTL_ACT);
+ sysctl_wait(clk, clk->bits, SYSCTL_ACTS);
+ return 0;
+}
+
+static void sysctl_deactivate(struct clk *clk)
+{
+ sysctl_w32(clk->module, clk->bits, SYSCTL_CLKCLR);
+ sysctl_w32(clk->module, clk->bits, SYSCTL_DEACT);
+ sysctl_wait(clk, 0, SYSCTL_ACTS);
+}
+
+static int sysctl_clken(struct clk *clk)
+{
+ sysctl_w32(clk->module, clk->bits, SYSCTL_CLKEN);
+ sysctl_wait(clk, clk->bits, SYSCTL_CLKS);
+ return 0;
+}
+
+static void sysctl_clkdis(struct clk *clk)
+{
+ sysctl_w32(clk->module, clk->bits, SYSCTL_CLKCLR);
+ sysctl_wait(clk, 0, SYSCTL_CLKS);
+}
+
+static void sysctl_reboot(struct clk *clk)
+{
+ unsigned int act;
+ unsigned int bits;
+
+ act = sysctl_r32(clk->module, SYSCTL_ACT);
+ bits = ~act & clk->bits;
+ if (bits != 0) {
+ sysctl_w32(clk->module, bits, SYSCTL_CLKEN);
+ sysctl_w32(clk->module, bits, SYSCTL_ACT);
+ sysctl_wait(clk, bits, SYSCTL_ACTS);
+ }
+ sysctl_w32(clk->module, act & clk->bits, SYSCTL_RBT);
+ sysctl_wait(clk, clk->bits, SYSCTL_ACTS);
+}
+
+/* enable the ONU core */
+static void falcon_gpe_enable(void)
+{
+ unsigned int freq;
+ unsigned int status;
+
+ /* if if the clock is already enabled */
+ status = sysctl_r32(SYSCTL_SYS1, SYS1_INFRAC);
+ if (status & (1 << (GPPC_OFFSET + 1)))
+ return;
+
+ if (status_r32(STATUS_CONFIG) == 0)
+ freq = 1; /* use 625MHz on unfused chip */
+ else
+ freq = (status_r32(STATUS_CONFIG) &
+ GPEFREQ_MASK) >>
+ GPEFREQ_OFFSET;
+
+ /* apply new frequency */
+ sysctl_w32_mask(SYSCTL_SYS1, 7 << (GPPC_OFFSET + 1),
+ freq << (GPPC_OFFSET + 2) , SYS1_INFRAC);
+ udelay(1);
+
+ /* enable new frequency */
+ sysctl_w32_mask(SYSCTL_SYS1, 0, 1 << (GPPC_OFFSET + 1), SYS1_INFRAC);
+ udelay(1);
+}
+
+static inline void clkdev_add_sys(const char *dev, unsigned int module,
+ unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = NULL;
+ clk->cl.clk = clk;
+ clk->module = module;
+ clk->activate = sysctl_activate;
+ clk->deactivate = sysctl_deactivate;
+ clk->enable = sysctl_clken;
+ clk->disable = sysctl_clkdis;
+ clk->reboot = sysctl_reboot;
+ clkdev_add(&clk->cl);
+}
+
+void __init ltq_soc_init(void)
+{
+ struct device_node *np_status =
+ of_find_compatible_node(NULL, NULL, "lantiq,status-falcon");
+ struct device_node *np_ebu =
+ of_find_compatible_node(NULL, NULL, "lantiq,ebu-falcon");
+ struct device_node *np_sys1 =
+ of_find_compatible_node(NULL, NULL, "lantiq,sys1-falcon");
+ struct device_node *np_syseth =
+ of_find_compatible_node(NULL, NULL, "lantiq,syseth-falcon");
+ struct device_node *np_sysgpe =
+ of_find_compatible_node(NULL, NULL, "lantiq,sysgpe-falcon");
+ struct resource res_status, res_ebu, res_sys[3];
+ int i;
+
+ /* check if all the core register ranges are available */
+ if (!np_status || !np_ebu || !np_sys1 || !np_syseth || !np_sysgpe)
+ panic("Failed to load core nodes from devicetree");
+
+ if (of_address_to_resource(np_status, 0, &res_status) ||
+ of_address_to_resource(np_ebu, 0, &res_ebu) ||
+ of_address_to_resource(np_sys1, 0, &res_sys[0]) ||
+ of_address_to_resource(np_syseth, 0, &res_sys[1]) ||
+ of_address_to_resource(np_sysgpe, 0, &res_sys[2]))
+ panic("Failed to get core resources");
+
+ if ((request_mem_region(res_status.start, resource_size(&res_status),
+ res_status.name) < 0) ||
+ (request_mem_region(res_ebu.start, resource_size(&res_ebu),
+ res_ebu.name) < 0) ||
+ (request_mem_region(res_sys[0].start,
+ resource_size(&res_sys[0]),
+ res_sys[0].name) < 0) ||
+ (request_mem_region(res_sys[1].start,
+ resource_size(&res_sys[1]),
+ res_sys[1].name) < 0) ||
+ (request_mem_region(res_sys[2].start,
+ resource_size(&res_sys[2]),
+ res_sys[2].name) < 0))
+ pr_err("Failed to request core reources");
+
+ status_membase = ioremap_nocache(res_status.start,
+ resource_size(&res_status));
+ ltq_ebu_membase = ioremap_nocache(res_ebu.start,
+ resource_size(&res_ebu));
+
+ if (!status_membase || !ltq_ebu_membase)
+ panic("Failed to remap core resources");
+
+ for (i = 0; i < 3; i++) {
+ sysctl_membase[i] = ioremap_nocache(res_sys[i].start,
+ resource_size(&res_sys[i]));
+ if (!sysctl_membase[i])
+ panic("Failed to remap sysctrl resources");
+ }
+ ltq_sys1_membase = sysctl_membase[0];
+
+ falcon_gpe_enable();
+
+ /* get our 3 static rates for cpu, fpi and io clocks */
+ if (ltq_sys1_r32(SYS1_CPU0CC) & CPU0CC_CPUDIV)
+ clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M);
+ else
+ clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M);
+
+ /* add our clock domains */
+ clkdev_add_sys("1d810000.gpio", SYSCTL_SYSETH, ACTS_P0);
+ clkdev_add_sys("1d810100.gpio", SYSCTL_SYSETH, ACTS_P2);
+ clkdev_add_sys("1e800100.gpio", SYSCTL_SYS1, ACTS_P1);
+ clkdev_add_sys("1e800200.gpio", SYSCTL_SYS1, ACTS_P3);
+ clkdev_add_sys("1e800300.gpio", SYSCTL_SYS1, ACTS_P4);
+ clkdev_add_sys("1db01000.pad", SYSCTL_SYSETH, ACTS_PADCTRL0);
+ clkdev_add_sys("1db02000.pad", SYSCTL_SYSETH, ACTS_PADCTRL2);
+ clkdev_add_sys("1e800400.pad", SYSCTL_SYS1, ACTS_PADCTRL1);
+ clkdev_add_sys("1e800500.pad", SYSCTL_SYS1, ACTS_PADCTRL3);
+ clkdev_add_sys("1e800600.pad", SYSCTL_SYS1, ACTS_PADCTRL4);
+ clkdev_add_sys("1e100C00.serial", SYSCTL_SYS1, ACTS_ASC1_ACT);
+ clkdev_add_sys("1e200000.i2c", SYSCTL_SYS1, ACTS_I2C_ACT);
+}
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index d673731c538a..57c1a4e51408 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -9,6 +9,11 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/irqdomain.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/bootinfo.h>
#include <asm/irq_cpu.h>
@@ -16,7 +21,7 @@
#include <lantiq_soc.h>
#include <irq.h>
-/* register definitions */
+/* register definitions - internal irqs */
#define LTQ_ICU_IM0_ISR 0x0000
#define LTQ_ICU_IM0_IER 0x0008
#define LTQ_ICU_IM0_IOSR 0x0010
@@ -25,6 +30,7 @@
#define LTQ_ICU_IM1_ISR 0x0028
#define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR)
+/* register definitions - external irqs */
#define LTQ_EIU_EXIN_C 0x0000
#define LTQ_EIU_EXIN_INIC 0x0004
#define LTQ_EIU_EXIN_INEN 0x000C
@@ -37,10 +43,14 @@
#define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1)
#define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2)
#define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30)
-
+#define XWAY_EXIN_COUNT 3
#define MAX_EIU 6
-/* irqs generated by device attached to the EBU need to be acked in
+/* the performance counter */
+#define LTQ_PERF_IRQ (INT_NUM_IM4_IRL0 + 31)
+
+/*
+ * irqs generated by devices attached to the EBU need to be acked in
* a special manner
*/
#define LTQ_ICU_EBU_IRQ 22
@@ -51,6 +61,17 @@
#define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
#define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
+/* our 2 ipi interrupts for VSMP */
+#define MIPS_CPU_IPI_RESCHED_IRQ 0
+#define MIPS_CPU_IPI_CALL_IRQ 1
+
+/* we have a cascade of 8 irqs */
+#define MIPS_CPU_IRQ_CASCADE 8
+
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+int gic_present;
+#endif
+
static unsigned short ltq_eiu_irq[MAX_EIU] = {
LTQ_EIU_IR0,
LTQ_EIU_IR1,
@@ -60,64 +81,51 @@ static unsigned short ltq_eiu_irq[MAX_EIU] = {
LTQ_EIU_IR5,
};
-static struct resource ltq_icu_resource = {
- .name = "icu",
- .start = LTQ_ICU_BASE_ADDR,
- .end = LTQ_ICU_BASE_ADDR + LTQ_ICU_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct resource ltq_eiu_resource = {
- .name = "eiu",
- .start = LTQ_EIU_BASE_ADDR,
- .end = LTQ_EIU_BASE_ADDR + LTQ_ICU_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
+static int exin_avail;
static void __iomem *ltq_icu_membase;
static void __iomem *ltq_eiu_membase;
void ltq_disable_irq(struct irq_data *d)
{
u32 ier = LTQ_ICU_IM0_IER;
- int irq_nr = d->irq - INT_NUM_IRQ0;
+ int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
- ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
- irq_nr %= INT_NUM_IM_OFFSET;
- ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier);
+ ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+ offset %= INT_NUM_IM_OFFSET;
+ ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier);
}
void ltq_mask_and_ack_irq(struct irq_data *d)
{
u32 ier = LTQ_ICU_IM0_IER;
u32 isr = LTQ_ICU_IM0_ISR;
- int irq_nr = d->irq - INT_NUM_IRQ0;
+ int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
- ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
- isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
- irq_nr %= INT_NUM_IM_OFFSET;
- ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier);
- ltq_icu_w32((1 << irq_nr), isr);
+ ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+ isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+ offset %= INT_NUM_IM_OFFSET;
+ ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier);
+ ltq_icu_w32(BIT(offset), isr);
}
static void ltq_ack_irq(struct irq_data *d)
{
u32 isr = LTQ_ICU_IM0_ISR;
- int irq_nr = d->irq - INT_NUM_IRQ0;
+ int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
- isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
- irq_nr %= INT_NUM_IM_OFFSET;
- ltq_icu_w32((1 << irq_nr), isr);
+ isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+ offset %= INT_NUM_IM_OFFSET;
+ ltq_icu_w32(BIT(offset), isr);
}
void ltq_enable_irq(struct irq_data *d)
{
u32 ier = LTQ_ICU_IM0_IER;
- int irq_nr = d->irq - INT_NUM_IRQ0;
+ int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
- ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
- irq_nr %= INT_NUM_IM_OFFSET;
- ltq_icu_w32(ltq_icu_r32(ier) | (1 << irq_nr), ier);
+ ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+ offset %= INT_NUM_IM_OFFSET;
+ ltq_icu_w32(ltq_icu_r32(ier) | BIT(offset), ier);
}
static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
@@ -126,15 +134,15 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
ltq_enable_irq(d);
for (i = 0; i < MAX_EIU; i++) {
- if (d->irq == ltq_eiu_irq[i]) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
/* low level - we should really handle set_type */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
(0x6 << (i * 4)), LTQ_EIU_EXIN_C);
/* clear all pending */
- ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~(1 << i),
+ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~BIT(i),
LTQ_EIU_EXIN_INIC);
/* enable */
- ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | (1 << i),
+ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
LTQ_EIU_EXIN_INEN);
break;
}
@@ -149,9 +157,9 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d)
ltq_disable_irq(d);
for (i = 0; i < MAX_EIU; i++) {
- if (d->irq == ltq_eiu_irq[i]) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
/* disable */
- ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i),
+ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
LTQ_EIU_EXIN_INEN);
break;
}
@@ -188,14 +196,15 @@ static void ltq_hw_irqdispatch(int module)
if (irq == 0)
return;
- /* silicon bug causes only the msb set to 1 to be valid. all
+ /*
+ * silicon bug causes only the msb set to 1 to be valid. all
* other bits might be bogus
*/
irq = __fls(irq);
- do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module));
+ do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
/* if this is a EBU irq, we need to ack it or get a deadlock */
- if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0))
+ if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
LTQ_EBU_PCC_ISTAT);
}
@@ -216,6 +225,47 @@ static void ltq_hw5_irqdispatch(void)
do_IRQ(MIPS_CPU_TIMER_IRQ);
}
+#ifdef CONFIG_MIPS_MT_SMP
+void __init arch_init_ipiirq(int irq, struct irqaction *action)
+{
+ setup_irq(irq, action);
+ irq_set_handler(irq, handle_percpu_irq);
+}
+
+static void ltq_sw0_irqdispatch(void)
+{
+ do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
+}
+
+static void ltq_sw1_irqdispatch(void)
+{
+ do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
+}
+static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
+{
+ scheduler_ipi();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
+{
+ smp_call_function_interrupt();
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq_resched = {
+ .handler = ipi_resched_interrupt,
+ .flags = IRQF_PERCPU,
+ .name = "IPI_resched"
+};
+
+static struct irqaction irq_call = {
+ .handler = ipi_call_interrupt,
+ .flags = IRQF_PERCPU,
+ .name = "IPI_call"
+};
+#endif
+
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
@@ -238,45 +288,75 @@ out:
return;
}
+static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ struct irq_chip *chip = &ltq_irq_type;
+ int i;
+
+ for (i = 0; i < exin_avail; i++)
+ if (hw == ltq_eiu_irq[i])
+ chip = &ltq_eiu_type;
+
+ irq_set_chip_and_handler(hw, chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops irq_domain_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = icu_map,
+};
+
static struct irqaction cascade = {
.handler = no_action,
.name = "cascade",
};
-void __init arch_init_irq(void)
+int __init icu_of_init(struct device_node *node, struct device_node *parent)
{
+ struct device_node *eiu_node;
+ struct resource res;
int i;
- if (insert_resource(&iomem_resource, &ltq_icu_resource) < 0)
- panic("Failed to insert icu memory");
+ if (of_address_to_resource(node, 0, &res))
+ panic("Failed to get icu memory range");
- if (request_mem_region(ltq_icu_resource.start,
- resource_size(&ltq_icu_resource), "icu") < 0)
- panic("Failed to request icu memory");
+ if (request_mem_region(res.start, resource_size(&res), res.name) < 0)
+ pr_err("Failed to request icu memory");
- ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start,
- resource_size(&ltq_icu_resource));
+ ltq_icu_membase = ioremap_nocache(res.start, resource_size(&res));
if (!ltq_icu_membase)
panic("Failed to remap icu memory");
- if (insert_resource(&iomem_resource, &ltq_eiu_resource) < 0)
- panic("Failed to insert eiu memory");
-
- if (request_mem_region(ltq_eiu_resource.start,
- resource_size(&ltq_eiu_resource), "eiu") < 0)
- panic("Failed to request eiu memory");
-
- ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start,
- resource_size(&ltq_eiu_resource));
- if (!ltq_eiu_membase)
- panic("Failed to remap eiu memory");
+ /* the external interrupts are optional and xway only */
+ eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu");
+ if (eiu_node && of_address_to_resource(eiu_node, 0, &res)) {
+ /* find out how many external irq sources we have */
+ const __be32 *count = of_get_property(node,
+ "lantiq,count", NULL);
+
+ if (count)
+ exin_avail = *count;
+ if (exin_avail > MAX_EIU)
+ exin_avail = MAX_EIU;
+
+ if (request_mem_region(res.start, resource_size(&res),
+ res.name) < 0)
+ pr_err("Failed to request eiu memory");
+
+ ltq_eiu_membase = ioremap_nocache(res.start,
+ resource_size(&res));
+ if (!ltq_eiu_membase)
+ panic("Failed to remap eiu memory");
+ }
- /* make sure all irqs are turned off by default */
- for (i = 0; i < 5; i++)
+ /* turn off all irqs by default */
+ for (i = 0; i < 5; i++) {
+ /* make sure all irqs are turned off by default */
ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET));
-
- /* clear all possibly pending interrupts */
- ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET));
+ /* clear all possibly pending interrupts */
+ ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET));
+ }
mips_cpu_irq_init();
@@ -293,20 +373,19 @@ void __init arch_init_irq(void)
set_vi_handler(7, ltq_hw5_irqdispatch);
}
- for (i = INT_NUM_IRQ0;
- i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++)
- if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) ||
- (i == LTQ_EIU_IR2))
- irq_set_chip_and_handler(i, &ltq_eiu_type,
- handle_level_irq);
- /* EIU3-5 only exist on ar9 and vr9 */
- else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) ||
- (i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9()))
- irq_set_chip_and_handler(i, &ltq_eiu_type,
- handle_level_irq);
- else
- irq_set_chip_and_handler(i, &ltq_irq_type,
- handle_level_irq);
+ irq_domain_add_linear(node, 6 * INT_NUM_IM_OFFSET,
+ &irq_domain_ops, 0);
+
+#if defined(CONFIG_MIPS_MT_SMP)
+ if (cpu_has_vint) {
+ pr_info("Setting up IPI vectored interrupts\n");
+ set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ltq_sw0_irqdispatch);
+ set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ltq_sw1_irqdispatch);
+ }
+ arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ,
+ &irq_resched);
+ arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call);
+#endif
#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
@@ -315,9 +394,23 @@ void __init arch_init_irq(void)
set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#endif
+
+ /* tell oprofile which irq to use */
+ cp0_perfcount_irq = LTQ_PERF_IRQ;
+ return 0;
}
unsigned int __cpuinit get_c0_compare_int(void)
{
return CP0_LEGACY_COMPARE_IRQ;
}
+
+static struct of_device_id __initdata of_irq_ids[] = {
+ { .compatible = "lantiq,icu", .data = icu_of_init },
+ {},
+};
+
+void __init arch_init_irq(void)
+{
+ of_irq_init(of_irq_ids);
+}
diff --git a/arch/mips/lantiq/machtypes.h b/arch/mips/lantiq/machtypes.h
deleted file mode 100644
index 7e01b8c484eb..000000000000
--- a/arch/mips/lantiq/machtypes.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#ifndef _LANTIQ_MACH_H__
-#define _LANTIQ_MACH_H__
-
-#include <asm/mips_machine.h>
-
-enum lantiq_mach_type {
- LTQ_MACH_GENERIC = 0,
- LTQ_MACH_EASY50712, /* Danube evaluation board */
- LTQ_MACH_EASY50601, /* Amazon SE evaluation board */
-};
-
-#endif
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index e34fcfd0d5ca..d185e8477fdf 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -8,6 +8,7 @@
#include <linux/export.h>
#include <linux/clk.h>
+#include <linux/of_platform.h>
#include <asm/bootinfo.h>
#include <asm/time.h>
@@ -16,19 +17,15 @@
#include "prom.h"
#include "clk.h"
-static struct ltq_soc_info soc_info;
-
-unsigned int ltq_get_cpu_ver(void)
-{
- return soc_info.rev;
-}
-EXPORT_SYMBOL(ltq_get_cpu_ver);
+/* access to the ebu needs to be locked between different drivers */
+DEFINE_SPINLOCK(ebu_lock);
+EXPORT_SYMBOL_GPL(ebu_lock);
-unsigned int ltq_get_soc_type(void)
-{
- return soc_info.type;
-}
-EXPORT_SYMBOL(ltq_get_soc_type);
+/*
+ * this struct is filled by the soc specific detection code and holds
+ * information about the specific soc type, revision and name
+ */
+static struct ltq_soc_info soc_info;
const char *get_system_type(void)
{
@@ -45,27 +42,62 @@ static void __init prom_init_cmdline(void)
char **argv = (char **) KSEG1ADDR(fw_arg1);
int i;
+ arcs_cmdline[0] = '\0';
+
for (i = 0; i < argc; i++) {
- char *p = (char *) KSEG1ADDR(argv[i]);
+ char *p = (char *) KSEG1ADDR(argv[i]);
- if (p && *p) {
+ if (CPHYSADDR(p) && *p) {
strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
}
}
}
-void __init prom_init(void)
+void __init plat_mem_setup(void)
{
- struct clk *clk;
+ ioport_resource.start = IOPORT_RESOURCE_START;
+ ioport_resource.end = IOPORT_RESOURCE_END;
+ iomem_resource.start = IOMEM_RESOURCE_START;
+ iomem_resource.end = IOMEM_RESOURCE_END;
+
+ set_io_port_base((unsigned long) KSEG1);
+ /*
+ * Load the builtin devicetree. This causes the chosen node to be
+ * parsed resulting in our memory appearing
+ */
+ __dt_setup_arch(&__dtb_start);
+}
+
+void __init prom_init(void)
+{
+ /* call the soc specific detetcion code and get it to fill soc_info */
ltq_soc_detect(&soc_info);
- clk_init();
- clk = clk_get(0, "cpu");
- snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev1.%d",
- soc_info.name, soc_info.rev);
- clk_put(clk);
+ snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev %s",
+ soc_info.name, soc_info.rev_type);
soc_info.sys_type[LTQ_SYS_TYPE_LEN - 1] = '\0';
pr_info("SoC: %s\n", soc_info.sys_type);
prom_init_cmdline();
+
+#if defined(CONFIG_MIPS_MT_SMP)
+ if (register_vsmp_smp_ops())
+ panic("failed to register_vsmp_smp_ops()");
+#endif
}
+
+int __init plat_of_setup(void)
+{
+ static struct of_device_id of_ids[3];
+
+ if (!of_have_populated_dt())
+ panic("device tree not present");
+
+ strncpy(of_ids[0].compatible, soc_info.compatible,
+ sizeof(of_ids[0].compatible));
+ strncpy(of_ids[1].compatible, "simple-bus",
+ sizeof(of_ids[1].compatible));
+ return of_platform_bus_probe(NULL, of_ids, NULL);
+}
+
+arch_initcall(plat_of_setup);
diff --git a/arch/mips/lantiq/prom.h b/arch/mips/lantiq/prom.h
index b4229d94280f..a3fa1a2bfaae 100644
--- a/arch/mips/lantiq/prom.h
+++ b/arch/mips/lantiq/prom.h
@@ -10,16 +10,22 @@
#define _LTQ_PROM_H__
#define LTQ_SYS_TYPE_LEN 0x100
+#define LTQ_SYS_REV_LEN 0x10
struct ltq_soc_info {
unsigned char *name;
unsigned int rev;
+ unsigned char rev_type[LTQ_SYS_REV_LEN];
+ unsigned int srev;
unsigned int partnum;
unsigned int type;
unsigned char sys_type[LTQ_SYS_TYPE_LEN];
+ unsigned char *compatible;
};
extern void ltq_soc_detect(struct ltq_soc_info *i);
-extern void ltq_soc_setup(void);
+extern void ltq_soc_init(void);
+
+extern struct boot_param_header __dtb_start;
#endif
diff --git a/arch/mips/lantiq/setup.c b/arch/mips/lantiq/setup.c
deleted file mode 100644
index 1ff6c9d6cb93..000000000000
--- a/arch/mips/lantiq/setup.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <asm/bootinfo.h>
-
-#include <lantiq_soc.h>
-
-#include "machtypes.h"
-#include "devices.h"
-#include "prom.h"
-
-void __init plat_mem_setup(void)
-{
- /* assume 16M as default incase uboot fails to pass proper ramsize */
- unsigned long memsize = 16;
- char **envp = (char **) KSEG1ADDR(fw_arg2);
-
- ioport_resource.start = IOPORT_RESOURCE_START;
- ioport_resource.end = IOPORT_RESOURCE_END;
- iomem_resource.start = IOMEM_RESOURCE_START;
- iomem_resource.end = IOMEM_RESOURCE_END;
-
- set_io_port_base((unsigned long) KSEG1);
-
- while (*envp) {
- char *e = (char *)KSEG1ADDR(*envp);
- if (!strncmp(e, "memsize=", 8)) {
- e += 8;
- if (strict_strtoul(e, 0, &memsize))
- pr_warn("bad memsize specified\n");
- }
- envp++;
- }
- memsize *= 1024 * 1024;
- add_memory_region(0x00000000, memsize, BOOT_MEM_RAM);
-}
-
-static int __init
-lantiq_setup(void)
-{
- ltq_soc_setup();
- mips_machine_setup();
- return 0;
-}
-
-arch_initcall(lantiq_setup);
-
-static void __init
-lantiq_generic_init(void)
-{
- /* Nothing to do */
-}
-
-MIPS_MACHINE(LTQ_MACH_GENERIC,
- "Generic",
- "Generic Lantiq based board",
- lantiq_generic_init);
diff --git a/arch/mips/lantiq/xway/Kconfig b/arch/mips/lantiq/xway/Kconfig
deleted file mode 100644
index 2b857de36620..000000000000
--- a/arch/mips/lantiq/xway/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
-if SOC_XWAY
-
-menu "MIPS Machine"
-
-config LANTIQ_MACH_EASY50712
- bool "Easy50712 - Danube"
- default y
-
-endmenu
-
-endif
-
-if SOC_AMAZON_SE
-
-menu "MIPS Machine"
-
-config LANTIQ_MACH_EASY50601
- bool "Easy50601 - Amazon SE"
- default y
-
-endmenu
-
-endif
diff --git a/arch/mips/lantiq/xway/Makefile b/arch/mips/lantiq/xway/Makefile
index c517f2e77563..dc3194f6ee42 100644
--- a/arch/mips/lantiq/xway/Makefile
+++ b/arch/mips/lantiq/xway/Makefile
@@ -1,7 +1 @@
-obj-y := pmu.o ebu.o reset.o gpio.o gpio_stp.o gpio_ebu.o devices.o dma.o
-
-obj-$(CONFIG_SOC_XWAY) += clk-xway.o prom-xway.o setup-xway.o
-obj-$(CONFIG_SOC_AMAZON_SE) += clk-ase.o prom-ase.o setup-ase.o
-
-obj-$(CONFIG_LANTIQ_MACH_EASY50712) += mach-easy50712.o
-obj-$(CONFIG_LANTIQ_MACH_EASY50601) += mach-easy50601.o
+obj-y := prom.o sysctrl.o clk.o reset.o gpio.o dma.o
diff --git a/arch/mips/lantiq/xway/clk-ase.c b/arch/mips/lantiq/xway/clk-ase.c
deleted file mode 100644
index 652258309c9c..000000000000
--- a/arch/mips/lantiq/xway/clk-ase.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/io.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-
-#include <asm/time.h>
-#include <asm/irq.h>
-#include <asm/div64.h>
-
-#include <lantiq_soc.h>
-
-/* cgu registers */
-#define LTQ_CGU_SYS 0x0010
-
-unsigned int ltq_get_io_region_clock(void)
-{
- return CLOCK_133M;
-}
-EXPORT_SYMBOL(ltq_get_io_region_clock);
-
-unsigned int ltq_get_fpi_bus_clock(int fpi)
-{
- return CLOCK_133M;
-}
-EXPORT_SYMBOL(ltq_get_fpi_bus_clock);
-
-unsigned int ltq_get_cpu_hz(void)
-{
- if (ltq_cgu_r32(LTQ_CGU_SYS) & (1 << 5))
- return CLOCK_266M;
- else
- return CLOCK_133M;
-}
-EXPORT_SYMBOL(ltq_get_cpu_hz);
-
-unsigned int ltq_get_fpi_hz(void)
-{
- return CLOCK_133M;
-}
-EXPORT_SYMBOL(ltq_get_fpi_hz);
diff --git a/arch/mips/lantiq/xway/clk-xway.c b/arch/mips/lantiq/xway/clk-xway.c
deleted file mode 100644
index 696b1a3e0642..000000000000
--- a/arch/mips/lantiq/xway/clk-xway.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/io.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-
-#include <asm/time.h>
-#include <asm/irq.h>
-#include <asm/div64.h>
-
-#include <lantiq_soc.h>
-
-static unsigned int ltq_ram_clocks[] = {
- CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
-#define DDR_HZ ltq_ram_clocks[ltq_cgu_r32(LTQ_CGU_SYS) & 0x3]
-
-#define BASIC_FREQUENCY_1 35328000
-#define BASIC_FREQUENCY_2 36000000
-#define BASIS_REQUENCY_USB 12000000
-
-#define GET_BITS(x, msb, lsb) \
- (((x) & ((1 << ((msb) + 1)) - 1)) >> (lsb))
-
-#define LTQ_CGU_PLL0_CFG 0x0004
-#define LTQ_CGU_PLL1_CFG 0x0008
-#define LTQ_CGU_PLL2_CFG 0x000C
-#define LTQ_CGU_SYS 0x0010
-#define LTQ_CGU_UPDATE 0x0014
-#define LTQ_CGU_IF_CLK 0x0018
-#define LTQ_CGU_OSC_CON 0x001C
-#define LTQ_CGU_SMD 0x0020
-#define LTQ_CGU_CT1SR 0x0028
-#define LTQ_CGU_CT2SR 0x002C
-#define LTQ_CGU_PCMCR 0x0030
-#define LTQ_CGU_PCI_CR 0x0034
-#define LTQ_CGU_PD_PC 0x0038
-#define LTQ_CGU_FMR 0x003C
-
-#define CGU_PLL0_PHASE_DIVIDER_ENABLE \
- (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 31))
-#define CGU_PLL0_BYPASS \
- (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 30))
-#define CGU_PLL0_CFG_DSMSEL \
- (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 28))
-#define CGU_PLL0_CFG_FRAC_EN \
- (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 27))
-#define CGU_PLL1_SRC \
- (ltq_cgu_r32(LTQ_CGU_PLL1_CFG) & (1 << 31))
-#define CGU_PLL2_PHASE_DIVIDER_ENABLE \
- (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & (1 << 20))
-#define CGU_SYS_FPI_SEL (1 << 6)
-#define CGU_SYS_DDR_SEL 0x3
-#define CGU_PLL0_SRC (1 << 29)
-
-#define CGU_PLL0_CFG_PLLK GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 26, 17)
-#define CGU_PLL0_CFG_PLLN GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 12, 6)
-#define CGU_PLL0_CFG_PLLM GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 5, 2)
-#define CGU_PLL2_SRC GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 18, 17)
-#define CGU_PLL2_CFG_INPUT_DIV GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 16, 13)
-
-static unsigned int ltq_get_pll0_fdiv(void);
-
-static inline unsigned int get_input_clock(int pll)
-{
- switch (pll) {
- case 0:
- if (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & CGU_PLL0_SRC)
- return BASIS_REQUENCY_USB;
- else if (CGU_PLL0_PHASE_DIVIDER_ENABLE)
- return BASIC_FREQUENCY_1;
- else
- return BASIC_FREQUENCY_2;
- case 1:
- if (CGU_PLL1_SRC)
- return BASIS_REQUENCY_USB;
- else if (CGU_PLL0_PHASE_DIVIDER_ENABLE)
- return BASIC_FREQUENCY_1;
- else
- return BASIC_FREQUENCY_2;
- case 2:
- switch (CGU_PLL2_SRC) {
- case 0:
- return ltq_get_pll0_fdiv();
- case 1:
- return CGU_PLL2_PHASE_DIVIDER_ENABLE ?
- BASIC_FREQUENCY_1 :
- BASIC_FREQUENCY_2;
- case 2:
- return BASIS_REQUENCY_USB;
- }
- default:
- return 0;
- }
-}
-
-static inline unsigned int cal_dsm(int pll, unsigned int num, unsigned int den)
-{
- u64 res, clock = get_input_clock(pll);
-
- res = num * clock;
- do_div(res, den);
- return res;
-}
-
-static inline unsigned int mash_dsm(int pll, unsigned int M, unsigned int N,
- unsigned int K)
-{
- unsigned int num = ((N + 1) << 10) + K;
- unsigned int den = (M + 1) << 10;
-
- return cal_dsm(pll, num, den);
-}
-
-static inline unsigned int ssff_dsm_1(int pll, unsigned int M, unsigned int N,
- unsigned int K)
-{
- unsigned int num = ((N + 1) << 11) + K + 512;
- unsigned int den = (M + 1) << 11;
-
- return cal_dsm(pll, num, den);
-}
-
-static inline unsigned int ssff_dsm_2(int pll, unsigned int M, unsigned int N,
- unsigned int K)
-{
- unsigned int num = K >= 512 ?
- ((N + 1) << 12) + K - 512 : ((N + 1) << 12) + K + 3584;
- unsigned int den = (M + 1) << 12;
-
- return cal_dsm(pll, num, den);
-}
-
-static inline unsigned int dsm(int pll, unsigned int M, unsigned int N,
- unsigned int K, unsigned int dsmsel, unsigned int phase_div_en)
-{
- if (!dsmsel)
- return mash_dsm(pll, M, N, K);
- else if (!phase_div_en)
- return mash_dsm(pll, M, N, K);
- else
- return ssff_dsm_2(pll, M, N, K);
-}
-
-static inline unsigned int ltq_get_pll0_fosc(void)
-{
- if (CGU_PLL0_BYPASS)
- return get_input_clock(0);
- else
- return !CGU_PLL0_CFG_FRAC_EN
- ? dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, 0,
- CGU_PLL0_CFG_DSMSEL,
- CGU_PLL0_PHASE_DIVIDER_ENABLE)
- : dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN,
- CGU_PLL0_CFG_PLLK, CGU_PLL0_CFG_DSMSEL,
- CGU_PLL0_PHASE_DIVIDER_ENABLE);
-}
-
-static unsigned int ltq_get_pll0_fdiv(void)
-{
- unsigned int div = CGU_PLL2_CFG_INPUT_DIV + 1;
-
- return (ltq_get_pll0_fosc() + (div >> 1)) / div;
-}
-
-unsigned int ltq_get_io_region_clock(void)
-{
- unsigned int ret = ltq_get_pll0_fosc();
-
- switch (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & CGU_SYS_DDR_SEL) {
- default:
- case 0:
- return (ret + 1) / 2;
- case 1:
- return (ret * 2 + 2) / 5;
- case 2:
- return (ret + 1) / 3;
- case 3:
- return (ret + 2) / 4;
- }
-}
-EXPORT_SYMBOL(ltq_get_io_region_clock);
-
-unsigned int ltq_get_fpi_bus_clock(int fpi)
-{
- unsigned int ret = ltq_get_io_region_clock();
-
- if ((fpi == 2) && (ltq_cgu_r32(LTQ_CGU_SYS) & CGU_SYS_FPI_SEL))
- ret >>= 1;
- return ret;
-}
-EXPORT_SYMBOL(ltq_get_fpi_bus_clock);
-
-unsigned int ltq_get_cpu_hz(void)
-{
- switch (ltq_cgu_r32(LTQ_CGU_SYS) & 0xc) {
- case 0:
- return CLOCK_333M;
- case 4:
- return DDR_HZ;
- case 8:
- return DDR_HZ << 1;
- default:
- return DDR_HZ >> 1;
- }
-}
-EXPORT_SYMBOL(ltq_get_cpu_hz);
-
-unsigned int ltq_get_fpi_hz(void)
-{
- unsigned int ddr_clock = DDR_HZ;
-
- if (ltq_cgu_r32(LTQ_CGU_SYS) & 0x40)
- return ddr_clock >> 1;
- return ddr_clock;
-}
-EXPORT_SYMBOL(ltq_get_fpi_hz);
diff --git a/arch/mips/lantiq/xway/clk.c b/arch/mips/lantiq/xway/clk.c
new file mode 100644
index 000000000000..9aa17f79a742
--- /dev/null
+++ b/arch/mips/lantiq/xway/clk.c
@@ -0,0 +1,151 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+
+#include <asm/time.h>
+#include <asm/irq.h>
+#include <asm/div64.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+
+static unsigned int ram_clocks[] = {
+ CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
+#define DDR_HZ ram_clocks[ltq_cgu_r32(CGU_SYS) & 0x3]
+
+/* legacy xway clock */
+#define CGU_SYS 0x10
+
+/* vr9 clock */
+#define CGU_SYS_VR9 0x0c
+#define CGU_IF_CLK_VR9 0x24
+
+unsigned long ltq_danube_fpi_hz(void)
+{
+ unsigned long ddr_clock = DDR_HZ;
+
+ if (ltq_cgu_r32(CGU_SYS) & 0x40)
+ return ddr_clock >> 1;
+ return ddr_clock;
+}
+
+unsigned long ltq_danube_cpu_hz(void)
+{
+ switch (ltq_cgu_r32(CGU_SYS) & 0xc) {
+ case 0:
+ return CLOCK_333M;
+ case 4:
+ return DDR_HZ;
+ case 8:
+ return DDR_HZ << 1;
+ default:
+ return DDR_HZ >> 1;
+ }
+}
+
+unsigned long ltq_ar9_sys_hz(void)
+{
+ if (((ltq_cgu_r32(CGU_SYS) >> 3) & 0x3) == 0x2)
+ return CLOCK_393M;
+ return CLOCK_333M;
+}
+
+unsigned long ltq_ar9_fpi_hz(void)
+{
+ unsigned long sys = ltq_ar9_sys_hz();
+
+ if (ltq_cgu_r32(CGU_SYS) & BIT(0))
+ return sys;
+ return sys >> 1;
+}
+
+unsigned long ltq_ar9_cpu_hz(void)
+{
+ if (ltq_cgu_r32(CGU_SYS) & BIT(2))
+ return ltq_ar9_fpi_hz();
+ else
+ return ltq_ar9_sys_hz();
+}
+
+unsigned long ltq_vr9_cpu_hz(void)
+{
+ unsigned int cpu_sel;
+ unsigned long clk;
+
+ cpu_sel = (ltq_cgu_r32(CGU_SYS_VR9) >> 4) & 0xf;
+
+ switch (cpu_sel) {
+ case 0:
+ clk = CLOCK_600M;
+ break;
+ case 1:
+ clk = CLOCK_500M;
+ break;
+ case 2:
+ clk = CLOCK_393M;
+ break;
+ case 3:
+ clk = CLOCK_333M;
+ break;
+ case 5:
+ case 6:
+ clk = CLOCK_196_608M;
+ break;
+ case 7:
+ clk = CLOCK_167M;
+ break;
+ case 4:
+ case 8:
+ case 9:
+ clk = CLOCK_125M;
+ break;
+ default:
+ clk = 0;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_vr9_fpi_hz(void)
+{
+ unsigned int ocp_sel, cpu_clk;
+ unsigned long clk;
+
+ cpu_clk = ltq_vr9_cpu_hz();
+ ocp_sel = ltq_cgu_r32(CGU_SYS_VR9) & 0x3;
+
+ switch (ocp_sel) {
+ case 0:
+ /* OCP ratio 1 */
+ clk = cpu_clk;
+ break;
+ case 2:
+ /* OCP ratio 2 */
+ clk = cpu_clk / 2;
+ break;
+ case 3:
+ /* OCP ratio 2.5 */
+ clk = (cpu_clk * 2) / 5;
+ break;
+ case 4:
+ /* OCP ratio 3 */
+ clk = cpu_clk / 3;
+ break;
+ default:
+ clk = 0;
+ break;
+ }
+
+ return clk;
+}
diff --git a/arch/mips/lantiq/xway/devices.c b/arch/mips/lantiq/xway/devices.c
deleted file mode 100644
index d614aa7ff07f..000000000000
--- a/arch/mips/lantiq/xway/devices.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/mtd/physmap.h>
-#include <linux/kernel.h>
-#include <linux/reboot.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/etherdevice.h>
-#include <linux/time.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-
-#include <lantiq_soc.h>
-#include <lantiq_irq.h>
-#include <lantiq_platform.h>
-
-#include "devices.h"
-
-/* gpio */
-static struct resource ltq_gpio_resource[] = {
- {
- .name = "gpio0",
- .start = LTQ_GPIO0_BASE_ADDR,
- .end = LTQ_GPIO0_BASE_ADDR + LTQ_GPIO_SIZE - 1,
- .flags = IORESOURCE_MEM,
- }, {
- .name = "gpio1",
- .start = LTQ_GPIO1_BASE_ADDR,
- .end = LTQ_GPIO1_BASE_ADDR + LTQ_GPIO_SIZE - 1,
- .flags = IORESOURCE_MEM,
- }, {
- .name = "gpio2",
- .start = LTQ_GPIO2_BASE_ADDR,
- .end = LTQ_GPIO2_BASE_ADDR + LTQ_GPIO_SIZE - 1,
- .flags = IORESOURCE_MEM,
- }
-};
-
-void __init ltq_register_gpio(void)
-{
- platform_device_register_simple("ltq_gpio", 0,
- &ltq_gpio_resource[0], 1);
- platform_device_register_simple("ltq_gpio", 1,
- &ltq_gpio_resource[1], 1);
-
- /* AR9 and VR9 have an extra gpio block */
- if (ltq_is_ar9() || ltq_is_vr9()) {
- platform_device_register_simple("ltq_gpio", 2,
- &ltq_gpio_resource[2], 1);
- }
-}
-
-/* serial to parallel conversion */
-static struct resource ltq_stp_resource = {
- .name = "stp",
- .start = LTQ_STP_BASE_ADDR,
- .end = LTQ_STP_BASE_ADDR + LTQ_STP_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-void __init ltq_register_gpio_stp(void)
-{
- platform_device_register_simple("ltq_stp", 0, &ltq_stp_resource, 1);
-}
-
-/* asc ports - amazon se has its own serial mapping */
-static struct resource ltq_ase_asc_resources[] = {
- {
- .name = "asc0",
- .start = LTQ_ASC1_BASE_ADDR,
- .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- IRQ_RES(tx, LTQ_ASC_ASE_TIR),
- IRQ_RES(rx, LTQ_ASC_ASE_RIR),
- IRQ_RES(err, LTQ_ASC_ASE_EIR),
-};
-
-void __init ltq_register_ase_asc(void)
-{
- platform_device_register_simple("ltq_asc", 0,
- ltq_ase_asc_resources, ARRAY_SIZE(ltq_ase_asc_resources));
-}
-
-/* ethernet */
-static struct resource ltq_etop_resources = {
- .name = "etop",
- .start = LTQ_ETOP_BASE_ADDR,
- .end = LTQ_ETOP_BASE_ADDR + LTQ_ETOP_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct platform_device ltq_etop = {
- .name = "ltq_etop",
- .resource = &ltq_etop_resources,
- .num_resources = 1,
-};
-
-void __init
-ltq_register_etop(struct ltq_eth_data *eth)
-{
- if (eth) {
- ltq_etop.dev.platform_data = eth;
- platform_device_register(&ltq_etop);
- }
-}
diff --git a/arch/mips/lantiq/xway/devices.h b/arch/mips/lantiq/xway/devices.h
deleted file mode 100644
index e90493471bc1..000000000000
--- a/arch/mips/lantiq/xway/devices.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#ifndef _LTQ_DEVICES_XWAY_H__
-#define _LTQ_DEVICES_XWAY_H__
-
-#include "../devices.h"
-#include <linux/phy.h>
-
-extern void ltq_register_gpio(void);
-extern void ltq_register_gpio_stp(void);
-extern void ltq_register_ase_asc(void);
-extern void ltq_register_etop(struct ltq_eth_data *eth);
-
-#endif
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index b210e936c7c3..55d2c4fa4714 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -19,7 +19,8 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
-#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/clk.h>
#include <lantiq_soc.h>
#include <xway_dma.h>
@@ -55,13 +56,6 @@
#define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \
ltq_dma_membase + (z))
-static struct resource ltq_dma_resource = {
- .name = "dma",
- .start = LTQ_DMA_BASE_ADDR,
- .end = LTQ_DMA_BASE_ADDR + LTQ_DMA_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
static void __iomem *ltq_dma_membase;
void
@@ -215,27 +209,28 @@ ltq_dma_init_port(int p)
}
EXPORT_SYMBOL_GPL(ltq_dma_init_port);
-int __init
-ltq_dma_init(void)
+static int __devinit
+ltq_dma_init(struct platform_device *pdev)
{
+ struct clk *clk;
+ struct resource *res;
int i;
- /* insert and request the memory region */
- if (insert_resource(&iomem_resource, &ltq_dma_resource) < 0)
- panic("Failed to insert dma memory");
-
- if (request_mem_region(ltq_dma_resource.start,
- resource_size(&ltq_dma_resource), "dma") < 0)
- panic("Failed to request dma memory");
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ panic("Failed to get dma resource");
/* remap dma register range */
- ltq_dma_membase = ioremap_nocache(ltq_dma_resource.start,
- resource_size(&ltq_dma_resource));
+ ltq_dma_membase = devm_request_and_ioremap(&pdev->dev, res);
if (!ltq_dma_membase)
- panic("Failed to remap dma memory");
+ panic("Failed to remap dma resource");
/* power up and reset the dma engine */
- ltq_pmu_enable(PMU_DMA);
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ panic("Failed to get dma clock");
+
+ clk_enable(clk);
ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL);
/* disable all interrupts */
@@ -248,7 +243,29 @@ ltq_dma_init(void)
ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL);
ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
}
+ dev_info(&pdev->dev, "init done\n");
return 0;
}
-postcore_initcall(ltq_dma_init);
+static const struct of_device_id dma_match[] = {
+ { .compatible = "lantiq,dma-xway" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dma_match);
+
+static struct platform_driver dma_driver = {
+ .probe = ltq_dma_init,
+ .driver = {
+ .name = "dma-xway",
+ .owner = THIS_MODULE,
+ .of_match_table = dma_match,
+ },
+};
+
+int __init
+dma_init(void)
+{
+ return platform_driver_register(&dma_driver);
+}
+
+postcore_initcall(dma_init);
diff --git a/arch/mips/lantiq/xway/ebu.c b/arch/mips/lantiq/xway/ebu.c
deleted file mode 100644
index 862e3e830680..000000000000
--- a/arch/mips/lantiq/xway/ebu.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * EBU - the external bus unit attaches PCI, NOR and NAND
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-
-#include <lantiq_soc.h>
-
-/* all access to the ebu must be locked */
-DEFINE_SPINLOCK(ebu_lock);
-EXPORT_SYMBOL_GPL(ebu_lock);
-
-static struct resource ltq_ebu_resource = {
- .name = "ebu",
- .start = LTQ_EBU_BASE_ADDR,
- .end = LTQ_EBU_BASE_ADDR + LTQ_EBU_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-/* remapped base addr of the clock unit and external bus unit */
-void __iomem *ltq_ebu_membase;
-
-static int __init lantiq_ebu_init(void)
-{
- /* insert and request the memory region */
- if (insert_resource(&iomem_resource, &ltq_ebu_resource) < 0)
- panic("Failed to insert ebu memory");
-
- if (request_mem_region(ltq_ebu_resource.start,
- resource_size(&ltq_ebu_resource), "ebu") < 0)
- panic("Failed to request ebu memory");
-
- /* remap ebu register range */
- ltq_ebu_membase = ioremap_nocache(ltq_ebu_resource.start,
- resource_size(&ltq_ebu_resource));
- if (!ltq_ebu_membase)
- panic("Failed to remap ebu memory");
-
- /* make sure to unprotect the memory region where flash is located */
- ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
- return 0;
-}
-
-postcore_initcall(lantiq_ebu_init);
diff --git a/arch/mips/lantiq/xway/gpio.c b/arch/mips/lantiq/xway/gpio.c
index c429a5bc080f..2ab39e93d9be 100644
--- a/arch/mips/lantiq/xway/gpio.c
+++ b/arch/mips/lantiq/xway/gpio.c
@@ -36,18 +36,6 @@ struct ltq_gpio {
static struct ltq_gpio ltq_gpio_port[MAX_PORTS];
-int gpio_to_irq(unsigned int gpio)
-{
- return -EINVAL;
-}
-EXPORT_SYMBOL(gpio_to_irq);
-
-int irq_to_gpio(unsigned int gpio)
-{
- return -EINVAL;
-}
-EXPORT_SYMBOL(irq_to_gpio);
-
int ltq_gpio_request(unsigned int pin, unsigned int alt0,
unsigned int alt1, unsigned int dir, const char *name)
{
diff --git a/arch/mips/lantiq/xway/gpio_ebu.c b/arch/mips/lantiq/xway/gpio_ebu.c
deleted file mode 100644
index aae17170472f..000000000000
--- a/arch/mips/lantiq/xway/gpio_ebu.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-
-#include <lantiq_soc.h>
-
-/*
- * By attaching hardware latches to the EBU it is possible to create output
- * only gpios. This driver configures a special memory address, which when
- * written to outputs 16 bit to the latches.
- */
-
-#define LTQ_EBU_BUSCON 0x1e7ff /* 16 bit access, slowest timing */
-#define LTQ_EBU_WP 0x80000000 /* write protect bit */
-
-/* we keep a shadow value of the last value written to the ebu */
-static int ltq_ebu_gpio_shadow = 0x0;
-static void __iomem *ltq_ebu_gpio_membase;
-
-static void ltq_ebu_apply(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ebu_lock, flags);
- ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1);
- *((__u16 *)ltq_ebu_gpio_membase) = ltq_ebu_gpio_shadow;
- ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
- spin_unlock_irqrestore(&ebu_lock, flags);
-}
-
-static void ltq_ebu_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- if (value)
- ltq_ebu_gpio_shadow |= (1 << offset);
- else
- ltq_ebu_gpio_shadow &= ~(1 << offset);
- ltq_ebu_apply();
-}
-
-static int ltq_ebu_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- ltq_ebu_set(chip, offset, value);
-
- return 0;
-}
-
-static struct gpio_chip ltq_ebu_chip = {
- .label = "ltq_ebu",
- .direction_output = ltq_ebu_direction_output,
- .set = ltq_ebu_set,
- .base = 72,
- .ngpio = 16,
- .can_sleep = 1,
- .owner = THIS_MODULE,
-};
-
-static int ltq_ebu_probe(struct platform_device *pdev)
-{
- int ret = 0;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- if (!res) {
- dev_err(&pdev->dev, "failed to get memory resource\n");
- return -ENOENT;
- }
-
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
- if (!res) {
- dev_err(&pdev->dev, "failed to request memory resource\n");
- return -EBUSY;
- }
-
- ltq_ebu_gpio_membase = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!ltq_ebu_gpio_membase) {
- dev_err(&pdev->dev, "Failed to ioremap mem region\n");
- return -ENOMEM;
- }
-
- /* grab the default shadow value passed form the platform code */
- ltq_ebu_gpio_shadow = (unsigned int) pdev->dev.platform_data;
-
- /* tell the ebu controller which memory address we will be using */
- ltq_ebu_w32(pdev->resource->start | 0x1, LTQ_EBU_ADDRSEL1);
-
- /* write protect the region */
- ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
-
- ret = gpiochip_add(&ltq_ebu_chip);
- if (!ret)
- ltq_ebu_apply();
- return ret;
-}
-
-static struct platform_driver ltq_ebu_driver = {
- .probe = ltq_ebu_probe,
- .driver = {
- .name = "ltq_ebu",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init ltq_ebu_init(void)
-{
- int ret = platform_driver_register(&ltq_ebu_driver);
-
- if (ret)
- pr_info("ltq_ebu : Error registering platform driver!");
- return ret;
-}
-
-postcore_initcall(ltq_ebu_init);
diff --git a/arch/mips/lantiq/xway/gpio_stp.c b/arch/mips/lantiq/xway/gpio_stp.c
deleted file mode 100644
index fd07d87adaa9..000000000000
--- a/arch/mips/lantiq/xway/gpio_stp.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2007 John Crispin <blogic@openwrt.org>
- *
- */
-
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <lantiq_soc.h>
-
-#define LTQ_STP_CON0 0x00
-#define LTQ_STP_CON1 0x04
-#define LTQ_STP_CPU0 0x08
-#define LTQ_STP_CPU1 0x0C
-#define LTQ_STP_AR 0x10
-
-#define LTQ_STP_CON_SWU (1 << 31)
-#define LTQ_STP_2HZ 0
-#define LTQ_STP_4HZ (1 << 23)
-#define LTQ_STP_8HZ (2 << 23)
-#define LTQ_STP_10HZ (3 << 23)
-#define LTQ_STP_SPEED_MASK (0xf << 23)
-#define LTQ_STP_UPD_FPI (1 << 31)
-#define LTQ_STP_UPD_MASK (3 << 30)
-#define LTQ_STP_ADSL_SRC (3 << 24)
-
-#define LTQ_STP_GROUP0 (1 << 0)
-
-#define LTQ_STP_RISING 0
-#define LTQ_STP_FALLING (1 << 26)
-#define LTQ_STP_EDGE_MASK (1 << 26)
-
-#define ltq_stp_r32(reg) __raw_readl(ltq_stp_membase + reg)
-#define ltq_stp_w32(val, reg) __raw_writel(val, ltq_stp_membase + reg)
-#define ltq_stp_w32_mask(clear, set, reg) \
- ltq_w32((ltq_r32(ltq_stp_membase + reg) & ~(clear)) | (set), \
- ltq_stp_membase + (reg))
-
-static int ltq_stp_shadow = 0xffff;
-static void __iomem *ltq_stp_membase;
-
-static void ltq_stp_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- if (value)
- ltq_stp_shadow |= (1 << offset);
- else
- ltq_stp_shadow &= ~(1 << offset);
- ltq_stp_w32(ltq_stp_shadow, LTQ_STP_CPU0);
-}
-
-static int ltq_stp_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- ltq_stp_set(chip, offset, value);
-
- return 0;
-}
-
-static struct gpio_chip ltq_stp_chip = {
- .label = "ltq_stp",
- .direction_output = ltq_stp_direction_output,
- .set = ltq_stp_set,
- .base = 48,
- .ngpio = 24,
- .can_sleep = 1,
- .owner = THIS_MODULE,
-};
-
-static int ltq_stp_hw_init(void)
-{
- /* the 3 pins used to control the external stp */
- ltq_gpio_request(4, 1, 0, 1, "stp-st");
- ltq_gpio_request(5, 1, 0, 1, "stp-d");
- ltq_gpio_request(6, 1, 0, 1, "stp-sh");
-
- /* sane defaults */
- ltq_stp_w32(0, LTQ_STP_AR);
- ltq_stp_w32(0, LTQ_STP_CPU0);
- ltq_stp_w32(0, LTQ_STP_CPU1);
- ltq_stp_w32(LTQ_STP_CON_SWU, LTQ_STP_CON0);
- ltq_stp_w32(0, LTQ_STP_CON1);
-
- /* rising or falling edge */
- ltq_stp_w32_mask(LTQ_STP_EDGE_MASK, LTQ_STP_FALLING, LTQ_STP_CON0);
-
- /* per default stp 15-0 are set */
- ltq_stp_w32_mask(0, LTQ_STP_GROUP0, LTQ_STP_CON1);
-
- /* stp are update periodically by the FPI bus */
- ltq_stp_w32_mask(LTQ_STP_UPD_MASK, LTQ_STP_UPD_FPI, LTQ_STP_CON1);
-
- /* set stp update speed */
- ltq_stp_w32_mask(LTQ_STP_SPEED_MASK, LTQ_STP_8HZ, LTQ_STP_CON1);
-
- /* tell the hardware that pin (led) 0 and 1 are controlled
- * by the dsl arc
- */
- ltq_stp_w32_mask(0, LTQ_STP_ADSL_SRC, LTQ_STP_CON0);
-
- ltq_pmu_enable(PMU_LED);
- return 0;
-}
-
-static int __devinit ltq_stp_probe(struct platform_device *pdev)
-{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- int ret = 0;
-
- if (!res)
- return -ENOENT;
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
- if (!res) {
- dev_err(&pdev->dev, "failed to request STP memory\n");
- return -EBUSY;
- }
- ltq_stp_membase = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!ltq_stp_membase) {
- dev_err(&pdev->dev, "failed to remap STP memory\n");
- return -ENOMEM;
- }
- ret = gpiochip_add(&ltq_stp_chip);
- if (!ret)
- ret = ltq_stp_hw_init();
-
- return ret;
-}
-
-static struct platform_driver ltq_stp_driver = {
- .probe = ltq_stp_probe,
- .driver = {
- .name = "ltq_stp",
- .owner = THIS_MODULE,
- },
-};
-
-int __init ltq_stp_init(void)
-{
- int ret = platform_driver_register(&ltq_stp_driver);
-
- if (ret)
- pr_info("ltq_stp: error registering platform driver");
- return ret;
-}
-
-postcore_initcall(ltq_stp_init);
diff --git a/arch/mips/lantiq/xway/mach-easy50601.c b/arch/mips/lantiq/xway/mach-easy50601.c
deleted file mode 100644
index d5aaf637ab19..000000000000
--- a/arch/mips/lantiq/xway/mach-easy50601.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
-#include <linux/input.h>
-
-#include <lantiq.h>
-
-#include "../machtypes.h"
-#include "devices.h"
-
-static struct mtd_partition easy50601_partitions[] = {
- {
- .name = "uboot",
- .offset = 0x0,
- .size = 0x10000,
- },
- {
- .name = "uboot_env",
- .offset = 0x10000,
- .size = 0x10000,
- },
- {
- .name = "linux",
- .offset = 0x20000,
- .size = 0xE0000,
- },
- {
- .name = "rootfs",
- .offset = 0x100000,
- .size = 0x300000,
- },
-};
-
-static struct physmap_flash_data easy50601_flash_data = {
- .nr_parts = ARRAY_SIZE(easy50601_partitions),
- .parts = easy50601_partitions,
-};
-
-static void __init easy50601_init(void)
-{
- ltq_register_nor(&easy50601_flash_data);
-}
-
-MIPS_MACHINE(LTQ_MACH_EASY50601,
- "EASY50601",
- "EASY50601 Eval Board",
- easy50601_init);
diff --git a/arch/mips/lantiq/xway/mach-easy50712.c b/arch/mips/lantiq/xway/mach-easy50712.c
deleted file mode 100644
index ea5027b3239d..000000000000
--- a/arch/mips/lantiq/xway/mach-easy50712.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
-#include <linux/input.h>
-#include <linux/phy.h>
-
-#include <lantiq_soc.h>
-#include <irq.h>
-
-#include "../machtypes.h"
-#include "devices.h"
-
-static struct mtd_partition easy50712_partitions[] = {
- {
- .name = "uboot",
- .offset = 0x0,
- .size = 0x10000,
- },
- {
- .name = "uboot_env",
- .offset = 0x10000,
- .size = 0x10000,
- },
- {
- .name = "linux",
- .offset = 0x20000,
- .size = 0xe0000,
- },
- {
- .name = "rootfs",
- .offset = 0x100000,
- .size = 0x300000,
- },
-};
-
-static struct physmap_flash_data easy50712_flash_data = {
- .nr_parts = ARRAY_SIZE(easy50712_partitions),
- .parts = easy50712_partitions,
-};
-
-static struct ltq_pci_data ltq_pci_data = {
- .clock = PCI_CLOCK_INT,
- .gpio = PCI_GNT1 | PCI_REQ1,
- .irq = {
- [14] = INT_NUM_IM0_IRL0 + 22,
- },
-};
-
-static struct ltq_eth_data ltq_eth_data = {
- .mii_mode = PHY_INTERFACE_MODE_MII,
-};
-
-static void __init easy50712_init(void)
-{
- ltq_register_gpio_stp();
- ltq_register_nor(&easy50712_flash_data);
- ltq_register_pci(&ltq_pci_data);
- ltq_register_etop(&ltq_eth_data);
-}
-
-MIPS_MACHINE(LTQ_MACH_EASY50712,
- "EASY50712",
- "EASY50712 Eval Board",
- easy50712_init);
diff --git a/arch/mips/lantiq/xway/pmu.c b/arch/mips/lantiq/xway/pmu.c
deleted file mode 100644
index fe85361e032e..000000000000
--- a/arch/mips/lantiq/xway/pmu.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-
-#include <lantiq_soc.h>
-
-/* PMU - the power management unit allows us to turn part of the core
- * on and off
- */
-
-/* the enable / disable registers */
-#define LTQ_PMU_PWDCR 0x1C
-#define LTQ_PMU_PWDSR 0x20
-
-#define ltq_pmu_w32(x, y) ltq_w32((x), ltq_pmu_membase + (y))
-#define ltq_pmu_r32(x) ltq_r32(ltq_pmu_membase + (x))
-
-static struct resource ltq_pmu_resource = {
- .name = "pmu",
- .start = LTQ_PMU_BASE_ADDR,
- .end = LTQ_PMU_BASE_ADDR + LTQ_PMU_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static void __iomem *ltq_pmu_membase;
-
-void ltq_pmu_enable(unsigned int module)
-{
- int err = 1000000;
-
- ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) & ~module, LTQ_PMU_PWDCR);
- do {} while (--err && (ltq_pmu_r32(LTQ_PMU_PWDSR) & module));
-
- if (!err)
- panic("activating PMU module failed!");
-}
-EXPORT_SYMBOL(ltq_pmu_enable);
-
-void ltq_pmu_disable(unsigned int module)
-{
- ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) | module, LTQ_PMU_PWDCR);
-}
-EXPORT_SYMBOL(ltq_pmu_disable);
-
-int __init ltq_pmu_init(void)
-{
- if (insert_resource(&iomem_resource, &ltq_pmu_resource) < 0)
- panic("Failed to insert pmu memory");
-
- if (request_mem_region(ltq_pmu_resource.start,
- resource_size(&ltq_pmu_resource), "pmu") < 0)
- panic("Failed to request pmu memory");
-
- ltq_pmu_membase = ioremap_nocache(ltq_pmu_resource.start,
- resource_size(&ltq_pmu_resource));
- if (!ltq_pmu_membase)
- panic("Failed to remap pmu memory");
- return 0;
-}
-
-core_initcall(ltq_pmu_init);
diff --git a/arch/mips/lantiq/xway/prom-ase.c b/arch/mips/lantiq/xway/prom-ase.c
deleted file mode 100644
index ae4959ae865c..000000000000
--- a/arch/mips/lantiq/xway/prom-ase.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/export.h>
-#include <linux/clk.h>
-#include <asm/bootinfo.h>
-#include <asm/time.h>
-
-#include <lantiq_soc.h>
-
-#include "../prom.h"
-
-#define SOC_AMAZON_SE "Amazon_SE"
-
-#define PART_SHIFT 12
-#define PART_MASK 0x0FFFFFFF
-#define REV_SHIFT 28
-#define REV_MASK 0xF0000000
-
-void __init ltq_soc_detect(struct ltq_soc_info *i)
-{
- i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
- i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
- switch (i->partnum) {
- case SOC_ID_AMAZON_SE:
- i->name = SOC_AMAZON_SE;
- i->type = SOC_TYPE_AMAZON_SE;
- break;
-
- default:
- unreachable();
- break;
- }
-}
diff --git a/arch/mips/lantiq/xway/prom-xway.c b/arch/mips/lantiq/xway/prom-xway.c
deleted file mode 100644
index 2228133ca356..000000000000
--- a/arch/mips/lantiq/xway/prom-xway.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/export.h>
-#include <linux/clk.h>
-#include <asm/bootinfo.h>
-#include <asm/time.h>
-
-#include <lantiq_soc.h>
-
-#include "../prom.h"
-
-#define SOC_DANUBE "Danube"
-#define SOC_TWINPASS "Twinpass"
-#define SOC_AR9 "AR9"
-
-#define PART_SHIFT 12
-#define PART_MASK 0x0FFFFFFF
-#define REV_SHIFT 28
-#define REV_MASK 0xF0000000
-
-void __init ltq_soc_detect(struct ltq_soc_info *i)
-{
- i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
- i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
- switch (i->partnum) {
- case SOC_ID_DANUBE1:
- case SOC_ID_DANUBE2:
- i->name = SOC_DANUBE;
- i->type = SOC_TYPE_DANUBE;
- break;
-
- case SOC_ID_TWINPASS:
- i->name = SOC_TWINPASS;
- i->type = SOC_TYPE_DANUBE;
- break;
-
- case SOC_ID_ARX188:
- case SOC_ID_ARX168:
- case SOC_ID_ARX182:
- i->name = SOC_AR9;
- i->type = SOC_TYPE_AR9;
- break;
-
- default:
- unreachable();
- break;
- }
-}
diff --git a/arch/mips/lantiq/xway/prom.c b/arch/mips/lantiq/xway/prom.c
new file mode 100644
index 000000000000..248429ab2622
--- /dev/null
+++ b/arch/mips/lantiq/xway/prom.c
@@ -0,0 +1,115 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/export.h>
+#include <linux/clk.h>
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+
+#include <lantiq_soc.h>
+
+#include "../prom.h"
+
+#define SOC_DANUBE "Danube"
+#define SOC_TWINPASS "Twinpass"
+#define SOC_AMAZON_SE "Amazon_SE"
+#define SOC_AR9 "AR9"
+#define SOC_GR9 "GR9"
+#define SOC_VR9 "VR9"
+
+#define COMP_DANUBE "lantiq,danube"
+#define COMP_TWINPASS "lantiq,twinpass"
+#define COMP_AMAZON_SE "lantiq,ase"
+#define COMP_AR9 "lantiq,ar9"
+#define COMP_GR9 "lantiq,gr9"
+#define COMP_VR9 "lantiq,vr9"
+
+#define PART_SHIFT 12
+#define PART_MASK 0x0FFFFFFF
+#define REV_SHIFT 28
+#define REV_MASK 0xF0000000
+
+void __init ltq_soc_detect(struct ltq_soc_info *i)
+{
+ i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
+ i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
+ sprintf(i->rev_type, "1.%d", i->rev);
+ switch (i->partnum) {
+ case SOC_ID_DANUBE1:
+ case SOC_ID_DANUBE2:
+ i->name = SOC_DANUBE;
+ i->type = SOC_TYPE_DANUBE;
+ i->compatible = COMP_DANUBE;
+ break;
+
+ case SOC_ID_TWINPASS:
+ i->name = SOC_TWINPASS;
+ i->type = SOC_TYPE_DANUBE;
+ i->compatible = COMP_TWINPASS;
+ break;
+
+ case SOC_ID_ARX188:
+ case SOC_ID_ARX168_1:
+ case SOC_ID_ARX168_2:
+ case SOC_ID_ARX182:
+ i->name = SOC_AR9;
+ i->type = SOC_TYPE_AR9;
+ i->compatible = COMP_AR9;
+ break;
+
+ case SOC_ID_GRX188:
+ case SOC_ID_GRX168:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_AR9;
+ i->compatible = COMP_GR9;
+ break;
+
+ case SOC_ID_AMAZON_SE_1:
+ case SOC_ID_AMAZON_SE_2:
+#ifdef CONFIG_PCI
+ panic("ase is only supported for non pci kernels");
+#endif
+ i->name = SOC_AMAZON_SE;
+ i->type = SOC_TYPE_AMAZON_SE;
+ i->compatible = COMP_AMAZON_SE;
+ break;
+
+ case SOC_ID_VRX282:
+ case SOC_ID_VRX268:
+ case SOC_ID_VRX288:
+ i->name = SOC_VR9;
+ i->type = SOC_TYPE_VR9;
+ i->compatible = COMP_VR9;
+ break;
+
+ case SOC_ID_GRX268:
+ case SOC_ID_GRX288:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_VR9;
+ i->compatible = COMP_GR9;
+ break;
+
+ case SOC_ID_VRX268_2:
+ case SOC_ID_VRX288_2:
+ i->name = SOC_VR9;
+ i->type = SOC_TYPE_VR9_2;
+ i->compatible = COMP_VR9;
+ break;
+
+ case SOC_ID_GRX282_2:
+ case SOC_ID_GRX288_2:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_VR9_2;
+ i->compatible = COMP_GR9;
+ break;
+
+ default:
+ unreachable();
+ break;
+ }
+}
diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c
index 8b66bd87f0c1..22c55f73aa9d 100644
--- a/arch/mips/lantiq/xway/reset.c
+++ b/arch/mips/lantiq/xway/reset.c
@@ -11,26 +11,31 @@
#include <linux/ioport.h>
#include <linux/pm.h>
#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
#include <asm/reboot.h>
#include <lantiq_soc.h>
+#include "../prom.h"
+
#define ltq_rcu_w32(x, y) ltq_w32((x), ltq_rcu_membase + (y))
#define ltq_rcu_r32(x) ltq_r32(ltq_rcu_membase + (x))
-/* register definitions */
-#define LTQ_RCU_RST 0x0010
-#define LTQ_RCU_RST_ALL 0x40000000
-
-#define LTQ_RCU_RST_STAT 0x0014
-#define LTQ_RCU_STAT_SHIFT 26
+/* reset request register */
+#define RCU_RST_REQ 0x0010
+/* reset status register */
+#define RCU_RST_STAT 0x0014
-static struct resource ltq_rcu_resource = {
- .name = "rcu",
- .start = LTQ_RCU_BASE_ADDR,
- .end = LTQ_RCU_BASE_ADDR + LTQ_RCU_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
+/* reboot bit */
+#define RCU_RD_SRST BIT(30)
+/* reset cause */
+#define RCU_STAT_SHIFT 26
+/* boot selection */
+#define RCU_BOOT_SEL_SHIFT 26
+#define RCU_BOOT_SEL_MASK 0x7
/* remapped base addr of the reset control unit */
static void __iomem *ltq_rcu_membase;
@@ -38,48 +43,64 @@ static void __iomem *ltq_rcu_membase;
/* This function is used by the watchdog driver */
int ltq_reset_cause(void)
{
- u32 val = ltq_rcu_r32(LTQ_RCU_RST_STAT);
- return val >> LTQ_RCU_STAT_SHIFT;
+ u32 val = ltq_rcu_r32(RCU_RST_STAT);
+ return val >> RCU_STAT_SHIFT;
}
EXPORT_SYMBOL_GPL(ltq_reset_cause);
+/* allow platform code to find out what source we booted from */
+unsigned char ltq_boot_select(void)
+{
+ u32 val = ltq_rcu_r32(RCU_RST_STAT);
+ return (val >> RCU_BOOT_SEL_SHIFT) & RCU_BOOT_SEL_MASK;
+}
+
+/* reset a io domain for u micro seconds */
+void ltq_reset_once(unsigned int module, ulong u)
+{
+ ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | module, RCU_RST_REQ);
+ udelay(u);
+ ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) & ~module, RCU_RST_REQ);
+}
+
static void ltq_machine_restart(char *command)
{
- pr_notice("System restart\n");
local_irq_disable();
- ltq_rcu_w32(ltq_rcu_r32(LTQ_RCU_RST) | LTQ_RCU_RST_ALL, LTQ_RCU_RST);
+ ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | RCU_RD_SRST, RCU_RST_REQ);
unreachable();
}
static void ltq_machine_halt(void)
{
- pr_notice("System halted.\n");
local_irq_disable();
unreachable();
}
static void ltq_machine_power_off(void)
{
- pr_notice("Please turn off the power now.\n");
local_irq_disable();
unreachable();
}
static int __init mips_reboot_setup(void)
{
- /* insert and request the memory region */
- if (insert_resource(&iomem_resource, &ltq_rcu_resource) < 0)
- panic("Failed to insert rcu memory");
+ struct resource res;
+ struct device_node *np =
+ of_find_compatible_node(NULL, NULL, "lantiq,rcu-xway");
+
+ /* check if all the reset register range is available */
+ if (!np)
+ panic("Failed to load reset resources from devicetree");
+
+ if (of_address_to_resource(np, 0, &res))
+ panic("Failed to get rcu memory range");
- if (request_mem_region(ltq_rcu_resource.start,
- resource_size(&ltq_rcu_resource), "rcu") < 0)
- panic("Failed to request rcu memory");
+ if (request_mem_region(res.start, resource_size(&res), res.name) < 0)
+ pr_err("Failed to request rcu memory");
- /* remap rcu register range */
- ltq_rcu_membase = ioremap_nocache(ltq_rcu_resource.start,
- resource_size(&ltq_rcu_resource));
+ ltq_rcu_membase = ioremap_nocache(res.start, resource_size(&res));
if (!ltq_rcu_membase)
- panic("Failed to remap rcu memory");
+ panic("Failed to remap core memory");
_machine_restart = ltq_machine_restart;
_machine_halt = ltq_machine_halt;
diff --git a/arch/mips/lantiq/xway/setup-ase.c b/arch/mips/lantiq/xway/setup-ase.c
deleted file mode 100644
index f6f326798a39..000000000000
--- a/arch/mips/lantiq/xway/setup-ase.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
- */
-
-#include <lantiq_soc.h>
-
-#include "../prom.h"
-#include "devices.h"
-
-void __init ltq_soc_setup(void)
-{
- ltq_register_ase_asc();
- ltq_register_gpio();
- ltq_register_wdt();
-}
diff --git a/arch/mips/lantiq/xway/setup-xway.c b/arch/mips/lantiq/xway/setup-xway.c
deleted file mode 100644
index c292f643a858..000000000000
--- a/arch/mips/lantiq/xway/setup-xway.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
- */
-
-#include <lantiq_soc.h>
-
-#include "../prom.h"
-#include "devices.h"
-
-void __init ltq_soc_setup(void)
-{
- ltq_register_asc(0);
- ltq_register_asc(1);
- ltq_register_gpio();
- ltq_register_wdt();
-}
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
new file mode 100644
index 000000000000..83780f7c842b
--- /dev/null
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -0,0 +1,371 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011-2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+#include "../prom.h"
+
+/* clock control register */
+#define CGU_IFCCR 0x0018
+/* system clock register */
+#define CGU_SYS 0x0010
+/* pci control register */
+#define CGU_PCICR 0x0034
+/* ephy configuration register */
+#define CGU_EPHY 0x10
+/* power control register */
+#define PMU_PWDCR 0x1C
+/* power status register */
+#define PMU_PWDSR 0x20
+/* power control register */
+#define PMU_PWDCR1 0x24
+/* power status register */
+#define PMU_PWDSR1 0x28
+/* power control register */
+#define PWDCR(x) ((x) ? (PMU_PWDCR1) : (PMU_PWDCR))
+/* power status register */
+#define PWDSR(x) ((x) ? (PMU_PWDSR1) : (PMU_PWDSR))
+
+/* clock gates that we can en/disable */
+#define PMU_USB0_P BIT(0)
+#define PMU_PCI BIT(4)
+#define PMU_DMA BIT(5)
+#define PMU_USB0 BIT(6)
+#define PMU_ASC0 BIT(7)
+#define PMU_EPHY BIT(7) /* ase */
+#define PMU_SPI BIT(8)
+#define PMU_DFE BIT(9)
+#define PMU_EBU BIT(10)
+#define PMU_STP BIT(11)
+#define PMU_GPT BIT(12)
+#define PMU_AHBS BIT(13) /* vr9 */
+#define PMU_FPI BIT(14)
+#define PMU_AHBM BIT(15)
+#define PMU_ASC1 BIT(17)
+#define PMU_PPE_QSB BIT(18)
+#define PMU_PPE_SLL01 BIT(19)
+#define PMU_PPE_TC BIT(21)
+#define PMU_PPE_EMA BIT(22)
+#define PMU_PPE_DPLUM BIT(23)
+#define PMU_PPE_DPLUS BIT(24)
+#define PMU_USB1_P BIT(26)
+#define PMU_USB1 BIT(27)
+#define PMU_SWITCH BIT(28)
+#define PMU_PPE_TOP BIT(29)
+#define PMU_GPHY BIT(30)
+#define PMU_PCIE_CLK BIT(31)
+
+#define PMU1_PCIE_PHY BIT(0)
+#define PMU1_PCIE_CTL BIT(1)
+#define PMU1_PCIE_PDI BIT(4)
+#define PMU1_PCIE_MSI BIT(5)
+
+#define pmu_w32(x, y) ltq_w32((x), pmu_membase + (y))
+#define pmu_r32(x) ltq_r32(pmu_membase + (x))
+
+static void __iomem *pmu_membase;
+void __iomem *ltq_cgu_membase;
+void __iomem *ltq_ebu_membase;
+
+/* legacy function kept alive to ease clkdev transition */
+void ltq_pmu_enable(unsigned int module)
+{
+ int err = 1000000;
+
+ pmu_w32(pmu_r32(PMU_PWDCR) & ~module, PMU_PWDCR);
+ do {} while (--err && (pmu_r32(PMU_PWDSR) & module));
+
+ if (!err)
+ panic("activating PMU module failed!");
+}
+EXPORT_SYMBOL(ltq_pmu_enable);
+
+/* legacy function kept alive to ease clkdev transition */
+void ltq_pmu_disable(unsigned int module)
+{
+ pmu_w32(pmu_r32(PMU_PWDCR) | module, PMU_PWDCR);
+}
+EXPORT_SYMBOL(ltq_pmu_disable);
+
+/* enable a hw clock */
+static int cgu_enable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | clk->bits, CGU_IFCCR);
+ return 0;
+}
+
+/* disable a hw clock */
+static void cgu_disable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~clk->bits, CGU_IFCCR);
+}
+
+/* enable a clock gate */
+static int pmu_enable(struct clk *clk)
+{
+ int retry = 1000000;
+
+ pmu_w32(pmu_r32(PWDCR(clk->module)) & ~clk->bits,
+ PWDCR(clk->module));
+ do {} while (--retry && (pmu_r32(PWDSR(clk->module)) & clk->bits));
+
+ if (!retry)
+ panic("activating PMU module failed!\n");
+
+ return 0;
+}
+
+/* disable a clock gate */
+static void pmu_disable(struct clk *clk)
+{
+ pmu_w32(pmu_r32(PWDCR(clk->module)) | clk->bits,
+ PWDCR(clk->module));
+}
+
+/* the pci enable helper */
+static int pci_enable(struct clk *clk)
+{
+ unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR);
+ /* set bus clock speed */
+ if (of_machine_is_compatible("lantiq,ar9")) {
+ ifccr &= ~0x1f00000;
+ if (clk->rate == CLOCK_33M)
+ ifccr |= 0xe00000;
+ else
+ ifccr |= 0x700000; /* 62.5M */
+ } else {
+ ifccr &= ~0xf00000;
+ if (clk->rate == CLOCK_33M)
+ ifccr |= 0x800000;
+ else
+ ifccr |= 0x400000; /* 62.5M */
+ }
+ ltq_cgu_w32(ifccr, CGU_IFCCR);
+ pmu_enable(clk);
+ return 0;
+}
+
+/* enable the external clock as a source */
+static int pci_ext_enable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~(1 << 16),
+ CGU_IFCCR);
+ ltq_cgu_w32((1 << 30), CGU_PCICR);
+ return 0;
+}
+
+/* disable the external clock as a source */
+static void pci_ext_disable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | (1 << 16),
+ CGU_IFCCR);
+ ltq_cgu_w32((1 << 31) | (1 << 30), CGU_PCICR);
+}
+
+/* enable a clockout source */
+static int clkout_enable(struct clk *clk)
+{
+ int i;
+
+ /* get the correct rate */
+ for (i = 0; i < 4; i++) {
+ if (clk->rates[i] == clk->rate) {
+ int shift = 14 - (2 * clk->module);
+ unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR);
+
+ ifccr &= ~(3 << shift);
+ ifccr |= i << shift;
+ ltq_cgu_w32(ifccr, CGU_IFCCR);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+/* manage the clock gates via PMU */
+static void clkdev_add_pmu(const char *dev, const char *con,
+ unsigned int module, unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+ clk->enable = pmu_enable;
+ clk->disable = pmu_disable;
+ clk->module = module;
+ clk->bits = bits;
+ clkdev_add(&clk->cl);
+}
+
+/* manage the clock generator */
+static void clkdev_add_cgu(const char *dev, const char *con,
+ unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+ clk->enable = cgu_enable;
+ clk->disable = cgu_disable;
+ clk->bits = bits;
+ clkdev_add(&clk->cl);
+}
+
+/* pci needs its own enable function as the setup is a bit more complex */
+static unsigned long valid_pci_rates[] = {CLOCK_33M, CLOCK_62_5M, 0};
+
+static void clkdev_add_pci(void)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ /* main pci clock */
+ clk->cl.dev_id = "17000000.pci";
+ clk->cl.con_id = NULL;
+ clk->cl.clk = clk;
+ clk->rate = CLOCK_33M;
+ clk->rates = valid_pci_rates;
+ clk->enable = pci_enable;
+ clk->disable = pmu_disable;
+ clk->module = 0;
+ clk->bits = PMU_PCI;
+ clkdev_add(&clk->cl);
+
+ /* use internal/external bus clock */
+ clk_ext->cl.dev_id = "17000000.pci";
+ clk_ext->cl.con_id = "external";
+ clk_ext->cl.clk = clk_ext;
+ clk_ext->enable = pci_ext_enable;
+ clk_ext->disable = pci_ext_disable;
+ clkdev_add(&clk_ext->cl);
+}
+
+/* xway socs can generate clocks on gpio pins */
+static unsigned long valid_clkout_rates[4][5] = {
+ {CLOCK_32_768K, CLOCK_1_536M, CLOCK_2_5M, CLOCK_12M, 0},
+ {CLOCK_40M, CLOCK_12M, CLOCK_24M, CLOCK_48M, 0},
+ {CLOCK_25M, CLOCK_40M, CLOCK_30M, CLOCK_60M, 0},
+ {CLOCK_12M, CLOCK_50M, CLOCK_32_768K, CLOCK_25M, 0},
+};
+
+static void clkdev_add_clkout(void)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ struct clk *clk;
+ char *name;
+
+ name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
+ sprintf(name, "clkout%d", i);
+
+ clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ clk->cl.dev_id = "1f103000.cgu";
+ clk->cl.con_id = name;
+ clk->cl.clk = clk;
+ clk->rate = 0;
+ clk->rates = valid_clkout_rates[i];
+ clk->enable = clkout_enable;
+ clk->module = i;
+ clkdev_add(&clk->cl);
+ }
+}
+
+/* bring up all register ranges that we need for basic system control */
+void __init ltq_soc_init(void)
+{
+ struct resource res_pmu, res_cgu, res_ebu;
+ struct device_node *np_pmu =
+ of_find_compatible_node(NULL, NULL, "lantiq,pmu-xway");
+ struct device_node *np_cgu =
+ of_find_compatible_node(NULL, NULL, "lantiq,cgu-xway");
+ struct device_node *np_ebu =
+ of_find_compatible_node(NULL, NULL, "lantiq,ebu-xway");
+
+ /* check if all the core register ranges are available */
+ if (!np_pmu || !np_cgu || !np_ebu)
+ panic("Failed to load core nodess from devicetree");
+
+ if (of_address_to_resource(np_pmu, 0, &res_pmu) ||
+ of_address_to_resource(np_cgu, 0, &res_cgu) ||
+ of_address_to_resource(np_ebu, 0, &res_ebu))
+ panic("Failed to get core resources");
+
+ if ((request_mem_region(res_pmu.start, resource_size(&res_pmu),
+ res_pmu.name) < 0) ||
+ (request_mem_region(res_cgu.start, resource_size(&res_cgu),
+ res_cgu.name) < 0) ||
+ (request_mem_region(res_ebu.start, resource_size(&res_ebu),
+ res_ebu.name) < 0))
+ pr_err("Failed to request core reources");
+
+ pmu_membase = ioremap_nocache(res_pmu.start, resource_size(&res_pmu));
+ ltq_cgu_membase = ioremap_nocache(res_cgu.start,
+ resource_size(&res_cgu));
+ ltq_ebu_membase = ioremap_nocache(res_ebu.start,
+ resource_size(&res_ebu));
+ if (!pmu_membase || !ltq_cgu_membase || !ltq_ebu_membase)
+ panic("Failed to remap core resources");
+
+ /* make sure to unprotect the memory region where flash is located */
+ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
+
+ /* add our generic xway clocks */
+ clkdev_add_pmu("10000000.fpi", NULL, 0, PMU_FPI);
+ clkdev_add_pmu("1e100400.serial", NULL, 0, PMU_ASC0);
+ clkdev_add_pmu("1e100a00.gptu", NULL, 0, PMU_GPT);
+ clkdev_add_pmu("1e100bb0.stp", NULL, 0, PMU_STP);
+ clkdev_add_pmu("1e104100.dma", NULL, 0, PMU_DMA);
+ clkdev_add_pmu("1e100800.spi", NULL, 0, PMU_SPI);
+ clkdev_add_pmu("1e105300.ebu", NULL, 0, PMU_EBU);
+ clkdev_add_clkout();
+
+ /* add the soc dependent clocks */
+ if (!of_machine_is_compatible("lantiq,vr9"))
+ clkdev_add_pmu("1e180000.etop", NULL, 0, PMU_PPE);
+
+ if (!of_machine_is_compatible("lantiq,ase")) {
+ clkdev_add_pmu("1e100c00.serial", NULL, 0, PMU_ASC1);
+ clkdev_add_pci();
+ }
+
+ if (of_machine_is_compatible("lantiq,ase")) {
+ if (ltq_cgu_r32(CGU_SYS) & (1 << 5))
+ clkdev_add_static(CLOCK_266M, CLOCK_133M, CLOCK_133M);
+ else
+ clkdev_add_static(CLOCK_133M, CLOCK_133M, CLOCK_133M);
+ clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY),
+ clkdev_add_pmu("1e180000.etop", "ephy", 0, PMU_EPHY);
+ } else if (of_machine_is_compatible("lantiq,vr9")) {
+ clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(),
+ ltq_vr9_fpi_hz());
+ clkdev_add_pmu("1d900000.pcie", "phy", 1, PMU1_PCIE_PHY);
+ clkdev_add_pmu("1d900000.pcie", "bus", 0, PMU_PCIE_CLK);
+ clkdev_add_pmu("1d900000.pcie", "msi", 1, PMU1_PCIE_MSI);
+ clkdev_add_pmu("1d900000.pcie", "pdi", 1, PMU1_PCIE_PDI);
+ clkdev_add_pmu("1d900000.pcie", "ctl", 1, PMU1_PCIE_CTL);
+ clkdev_add_pmu("1d900000.pcie", "ahb", 0, PMU_AHBM | PMU_AHBS);
+ } else if (of_machine_is_compatible("lantiq,ar9")) {
+ clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
+ ltq_ar9_fpi_hz());
+ clkdev_add_pmu("1e180000.etop", "switch", 0, PMU_SWITCH);
+ } else {
+ clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
+ ltq_danube_fpi_hz());
+ }
+}
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 47037ec5589b..44e69e7a4519 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -21,6 +21,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/r4kcache.h>
+#include <asm/traps.h>
#include <asm/mmu_context.h>
#include <asm/war.h>
@@ -248,6 +249,11 @@ static void __cpuinit probe_octeon(void)
}
}
+static void __cpuinit octeon_cache_error_setup(void)
+{
+ extern char except_vec2_octeon;
+ set_handler(0x100, &except_vec2_octeon, 0x80);
+}
/**
* Setup the Octeon cache flush routines
@@ -255,12 +261,6 @@ static void __cpuinit probe_octeon(void)
*/
void __cpuinit octeon_cache_init(void)
{
- extern unsigned long ebase;
- extern char except_vec2_octeon;
-
- memcpy((void *)(ebase + 0x100), &except_vec2_octeon, 0x80);
- octeon_flush_cache_sigtramp(ebase + 0x100);
-
probe_octeon();
shm_align_mask = PAGE_SIZE - 1;
@@ -280,6 +280,8 @@ void __cpuinit octeon_cache_init(void)
build_clear_page();
build_copy_page();
+
+ board_cache_error_setup = octeon_cache_error_setup;
}
/**
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index bda8eb26ece7..5109be96d98d 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -32,7 +32,7 @@
#include <asm/mmu_context.h>
#include <asm/war.h>
#include <asm/cacheflush.h> /* for run_uncached() */
-
+#include <asm/traps.h>
/*
* Special Variant of smp_call_function for use by cache functions:
@@ -1385,10 +1385,8 @@ static int __init setcoherentio(char *str)
__setup("coherentio", setcoherentio);
#endif
-void __cpuinit r4k_cache_init(void)
+static void __cpuinit r4k_cache_error_setup(void)
{
- extern void build_clear_page(void);
- extern void build_copy_page(void);
extern char __weak except_vec2_generic;
extern char __weak except_vec2_sb1;
struct cpuinfo_mips *c = &current_cpu_data;
@@ -1403,6 +1401,13 @@ void __cpuinit r4k_cache_init(void)
set_uncached_handler(0x100, &except_vec2_generic, 0x80);
break;
}
+}
+
+void __cpuinit r4k_cache_init(void)
+{
+ extern void build_clear_page(void);
+ extern void build_copy_page(void);
+ struct cpuinfo_mips *c = &current_cpu_data;
probe_pcache();
setup_scache();
@@ -1465,4 +1470,5 @@ void __cpuinit r4k_cache_init(void)
local_r4k___flush_cache_all(NULL);
#endif
coherency_setup();
+ board_cache_error_setup = r4k_cache_error_setup;
}
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
index 29f2f13eb31c..1208c280f77d 100644
--- a/arch/mips/oprofile/Makefile
+++ b/arch/mips/oprofile/Makefile
@@ -1,5 +1,3 @@
-ccflags-y := -Werror
-
obj-$(CONFIG_OPROFILE) += oprofile.o
DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 54759f1669d3..baba3bcaa3c2 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -298,6 +298,11 @@ static void reset_counters(void *arg)
}
}
+static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
+{
+ return mipsxx_perfcount_handler();
+}
+
static int __init mipsxx_init(void)
{
int counters;
@@ -374,6 +379,10 @@ static int __init mipsxx_init(void)
save_perf_irq = perf_irq;
perf_irq = mipsxx_perfcount_handler;
+ if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq))
+ return request_irq(cp0_perfcount_irq, mipsxx_perfcount_int,
+ 0, "Perfcounter", save_perf_irq);
+
return 0;
}
@@ -381,6 +390,9 @@ static void mipsxx_exit(void)
{
int counters = op_model_mipsxx_ops.num_counters;
+ if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq))
+ free_irq(cp0_perfcount_irq, save_perf_irq);
+
counters = counters_per_cpu_to_total(counters);
on_each_cpu(reset_counters, (void *)(long)counters, 1);
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index c3ac4b086eb2..c703f43a9914 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -19,7 +19,8 @@ obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o
obj-$(CONFIG_BCM63XX) += pci-bcm63xx.o fixup-bcm63xx.o \
ops-bcm63xx.o
obj-$(CONFIG_MIPS_ALCHEMY) += pci-alchemy.o
-obj-$(CONFIG_SOC_AR724X) += pci-ath724x.o
+obj-$(CONFIG_SOC_AR71XX) += pci-ar71xx.o
+obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o
#
# These are still pretty much in the old state, watch, go blind.
@@ -41,7 +42,8 @@ obj-$(CONFIG_SIBYTE_SB1250) += fixup-sb1250.o pci-sb1250.o
obj-$(CONFIG_SIBYTE_BCM112X) += fixup-sb1250.o pci-sb1250.o
obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o
obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o
-obj-$(CONFIG_SOC_XWAY) += pci-lantiq.o ops-lantiq.o
+obj-$(CONFIG_LANTIQ) += fixup-lantiq.o
+obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o
obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o
obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o
obj-$(CONFIG_TANBAC_TB0287) += fixup-tb0287.o
diff --git a/arch/mips/pci/fixup-lantiq.c b/arch/mips/pci/fixup-lantiq.c
new file mode 100644
index 000000000000..6c829df28dc7
--- /dev/null
+++ b/arch/mips/pci/fixup-lantiq.c
@@ -0,0 +1,40 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+
+int (*ltq_pci_plat_arch_init)(struct pci_dev *dev) = NULL;
+int (*ltq_pci_plat_dev_init)(struct pci_dev *dev) = NULL;
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ if (ltq_pci_plat_arch_init)
+ return ltq_pci_plat_arch_init(dev);
+
+ if (ltq_pci_plat_dev_init)
+ return ltq_pci_plat_dev_init(dev);
+
+ return 0;
+}
+
+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct of_irq dev_irq;
+ int irq;
+
+ if (of_irq_map_pci(dev, &dev_irq)) {
+ dev_err(&dev->dev, "trying to map irq for unknown slot:%d pin:%d\n",
+ slot, pin);
+ return 0;
+ }
+ irq = irq_create_of_mapping(dev_irq.controller, dev_irq.specifier,
+ dev_irq.size);
+ dev_info(&dev->dev, "SLOT:%d PIN:%d IRQ:%d\n", slot, pin, irq);
+ return irq;
+}
diff --git a/arch/mips/pci/ops-loongson2.c b/arch/mips/pci/ops-loongson2.c
index d657ee0bc131..afd221122d22 100644
--- a/arch/mips/pci/ops-loongson2.c
+++ b/arch/mips/pci/ops-loongson2.c
@@ -15,6 +15,7 @@
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/export.h>
#include <loongson.h>
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
new file mode 100644
index 000000000000..1552522b8718
--- /dev/null
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -0,0 +1,375 @@
+/*
+ * Atheros AR71xx PCI host controller driver
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15 BSP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/pci.h>
+
+#define AR71XX_PCI_MEM_BASE 0x10000000
+#define AR71XX_PCI_MEM_SIZE 0x08000000
+
+#define AR71XX_PCI_WIN0_OFFS 0x10000000
+#define AR71XX_PCI_WIN1_OFFS 0x11000000
+#define AR71XX_PCI_WIN2_OFFS 0x12000000
+#define AR71XX_PCI_WIN3_OFFS 0x13000000
+#define AR71XX_PCI_WIN4_OFFS 0x14000000
+#define AR71XX_PCI_WIN5_OFFS 0x15000000
+#define AR71XX_PCI_WIN6_OFFS 0x16000000
+#define AR71XX_PCI_WIN7_OFFS 0x07000000
+
+#define AR71XX_PCI_CFG_BASE \
+ (AR71XX_PCI_MEM_BASE + AR71XX_PCI_WIN7_OFFS + 0x10000)
+#define AR71XX_PCI_CFG_SIZE 0x100
+
+#define AR71XX_PCI_REG_CRP_AD_CBE 0x00
+#define AR71XX_PCI_REG_CRP_WRDATA 0x04
+#define AR71XX_PCI_REG_CRP_RDDATA 0x08
+#define AR71XX_PCI_REG_CFG_AD 0x0c
+#define AR71XX_PCI_REG_CFG_CBE 0x10
+#define AR71XX_PCI_REG_CFG_WRDATA 0x14
+#define AR71XX_PCI_REG_CFG_RDDATA 0x18
+#define AR71XX_PCI_REG_PCI_ERR 0x1c
+#define AR71XX_PCI_REG_PCI_ERR_ADDR 0x20
+#define AR71XX_PCI_REG_AHB_ERR 0x24
+#define AR71XX_PCI_REG_AHB_ERR_ADDR 0x28
+
+#define AR71XX_PCI_CRP_CMD_WRITE 0x00010000
+#define AR71XX_PCI_CRP_CMD_READ 0x00000000
+#define AR71XX_PCI_CFG_CMD_READ 0x0000000a
+#define AR71XX_PCI_CFG_CMD_WRITE 0x0000000b
+
+#define AR71XX_PCI_INT_CORE BIT(4)
+#define AR71XX_PCI_INT_DEV2 BIT(2)
+#define AR71XX_PCI_INT_DEV1 BIT(1)
+#define AR71XX_PCI_INT_DEV0 BIT(0)
+
+#define AR71XX_PCI_IRQ_COUNT 5
+
+static DEFINE_SPINLOCK(ar71xx_pci_lock);
+static void __iomem *ar71xx_pcicfg_base;
+
+/* Byte lane enable bits */
+static const u8 ar71xx_pci_ble_table[4][4] = {
+ {0x0, 0xf, 0xf, 0xf},
+ {0xe, 0xd, 0xb, 0x7},
+ {0xc, 0xf, 0x3, 0xf},
+ {0xf, 0xf, 0xf, 0xf},
+};
+
+static const u32 ar71xx_pci_read_mask[8] = {
+ 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0
+};
+
+static inline u32 ar71xx_pci_get_ble(int where, int size, int local)
+{
+ u32 t;
+
+ t = ar71xx_pci_ble_table[size & 3][where & 3];
+ BUG_ON(t == 0xf);
+ t <<= (local) ? 20 : 4;
+
+ return t;
+}
+
+static inline u32 ar71xx_pci_bus_addr(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ u32 ret;
+
+ if (!bus->number) {
+ /* type 0 */
+ ret = (1 << PCI_SLOT(devfn)) | (PCI_FUNC(devfn) << 8) |
+ (where & ~3);
+ } else {
+ /* type 1 */
+ ret = (bus->number << 16) | (PCI_SLOT(devfn) << 11) |
+ (PCI_FUNC(devfn) << 8) | (where & ~3) | 1;
+ }
+
+ return ret;
+}
+
+static int ar71xx_pci_check_error(int quiet)
+{
+ void __iomem *base = ar71xx_pcicfg_base;
+ u32 pci_err;
+ u32 ahb_err;
+
+ pci_err = __raw_readl(base + AR71XX_PCI_REG_PCI_ERR) & 3;
+ if (pci_err) {
+ if (!quiet) {
+ u32 addr;
+
+ addr = __raw_readl(base + AR71XX_PCI_REG_PCI_ERR_ADDR);
+ pr_crit("ar71xx: %s bus error %d at addr 0x%x\n",
+ "PCI", pci_err, addr);
+ }
+
+ /* clear PCI error status */
+ __raw_writel(pci_err, base + AR71XX_PCI_REG_PCI_ERR);
+ }
+
+ ahb_err = __raw_readl(base + AR71XX_PCI_REG_AHB_ERR) & 1;
+ if (ahb_err) {
+ if (!quiet) {
+ u32 addr;
+
+ addr = __raw_readl(base + AR71XX_PCI_REG_AHB_ERR_ADDR);
+ pr_crit("ar71xx: %s bus error %d at addr 0x%x\n",
+ "AHB", ahb_err, addr);
+ }
+
+ /* clear AHB error status */
+ __raw_writel(ahb_err, base + AR71XX_PCI_REG_AHB_ERR);
+ }
+
+ return !!(ahb_err | pci_err);
+}
+
+static inline void ar71xx_pci_local_write(int where, int size, u32 value)
+{
+ void __iomem *base = ar71xx_pcicfg_base;
+ u32 ad_cbe;
+
+ value = value << (8 * (where & 3));
+
+ ad_cbe = AR71XX_PCI_CRP_CMD_WRITE | (where & ~3);
+ ad_cbe |= ar71xx_pci_get_ble(where, size, 1);
+
+ __raw_writel(ad_cbe, base + AR71XX_PCI_REG_CRP_AD_CBE);
+ __raw_writel(value, base + AR71XX_PCI_REG_CRP_WRDATA);
+}
+
+static inline int ar71xx_pci_set_cfgaddr(struct pci_bus *bus,
+ unsigned int devfn,
+ int where, int size, u32 cmd)
+{
+ void __iomem *base = ar71xx_pcicfg_base;
+ u32 addr;
+
+ addr = ar71xx_pci_bus_addr(bus, devfn, where);
+
+ __raw_writel(addr, base + AR71XX_PCI_REG_CFG_AD);
+ __raw_writel(cmd | ar71xx_pci_get_ble(where, size, 0),
+ base + AR71XX_PCI_REG_CFG_CBE);
+
+ return ar71xx_pci_check_error(1);
+}
+
+static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ void __iomem *base = ar71xx_pcicfg_base;
+ unsigned long flags;
+ u32 data;
+ int err;
+ int ret;
+
+ ret = PCIBIOS_SUCCESSFUL;
+ data = ~0;
+
+ spin_lock_irqsave(&ar71xx_pci_lock, flags);
+
+ err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
+ AR71XX_PCI_CFG_CMD_READ);
+ if (err)
+ ret = PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ data = __raw_readl(base + AR71XX_PCI_REG_CFG_RDDATA);
+
+ spin_unlock_irqrestore(&ar71xx_pci_lock, flags);
+
+ *value = (data >> (8 * (where & 3))) & ar71xx_pci_read_mask[size & 7];
+
+ return ret;
+}
+
+static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ void __iomem *base = ar71xx_pcicfg_base;
+ unsigned long flags;
+ int err;
+ int ret;
+
+ value = value << (8 * (where & 3));
+ ret = PCIBIOS_SUCCESSFUL;
+
+ spin_lock_irqsave(&ar71xx_pci_lock, flags);
+
+ err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
+ AR71XX_PCI_CFG_CMD_WRITE);
+ if (err)
+ ret = PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ __raw_writel(value, base + AR71XX_PCI_REG_CFG_WRDATA);
+
+ spin_unlock_irqrestore(&ar71xx_pci_lock, flags);
+
+ return ret;
+}
+
+static struct pci_ops ar71xx_pci_ops = {
+ .read = ar71xx_pci_read_config,
+ .write = ar71xx_pci_write_config,
+};
+
+static struct resource ar71xx_pci_io_resource = {
+ .name = "PCI IO space",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_IO,
+};
+
+static struct resource ar71xx_pci_mem_resource = {
+ .name = "PCI memory space",
+ .start = AR71XX_PCI_MEM_BASE,
+ .end = AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1,
+ .flags = IORESOURCE_MEM
+};
+
+static struct pci_controller ar71xx_pci_controller = {
+ .pci_ops = &ar71xx_pci_ops,
+ .mem_resource = &ar71xx_pci_mem_resource,
+ .io_resource = &ar71xx_pci_io_resource,
+};
+
+static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ void __iomem *base = ath79_reset_base;
+ u32 pending;
+
+ pending = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_STATUS) &
+ __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+
+ if (pending & AR71XX_PCI_INT_DEV0)
+ generic_handle_irq(ATH79_PCI_IRQ(0));
+
+ else if (pending & AR71XX_PCI_INT_DEV1)
+ generic_handle_irq(ATH79_PCI_IRQ(1));
+
+ else if (pending & AR71XX_PCI_INT_DEV2)
+ generic_handle_irq(ATH79_PCI_IRQ(2));
+
+ else if (pending & AR71XX_PCI_INT_CORE)
+ generic_handle_irq(ATH79_PCI_IRQ(4));
+
+ else
+ spurious_interrupt();
+}
+
+static void ar71xx_pci_irq_unmask(struct irq_data *d)
+{
+ unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE;
+ void __iomem *base = ath79_reset_base;
+ u32 t;
+
+ t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+ __raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+
+ /* flush write */
+ __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+}
+
+static void ar71xx_pci_irq_mask(struct irq_data *d)
+{
+ unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE;
+ void __iomem *base = ath79_reset_base;
+ u32 t;
+
+ t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+ __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+
+ /* flush write */
+ __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+}
+
+static struct irq_chip ar71xx_pci_irq_chip = {
+ .name = "AR71XX PCI",
+ .irq_mask = ar71xx_pci_irq_mask,
+ .irq_unmask = ar71xx_pci_irq_unmask,
+ .irq_mask_ack = ar71xx_pci_irq_mask,
+};
+
+static __init void ar71xx_pci_irq_init(void)
+{
+ void __iomem *base = ath79_reset_base;
+ int i;
+
+ __raw_writel(0, base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+ __raw_writel(0, base + AR71XX_RESET_REG_PCI_INT_STATUS);
+
+ BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR71XX_PCI_IRQ_COUNT);
+
+ for (i = ATH79_PCI_IRQ_BASE;
+ i < ATH79_PCI_IRQ_BASE + AR71XX_PCI_IRQ_COUNT; i++)
+ irq_set_chip_and_handler(i, &ar71xx_pci_irq_chip,
+ handle_level_irq);
+
+ irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar71xx_pci_irq_handler);
+}
+
+static __init void ar71xx_pci_reset(void)
+{
+ void __iomem *ddr_base = ath79_ddr_base;
+
+ ath79_device_reset_set(AR71XX_RESET_PCI_BUS | AR71XX_RESET_PCI_CORE);
+ mdelay(100);
+
+ ath79_device_reset_clear(AR71XX_RESET_PCI_BUS | AR71XX_RESET_PCI_CORE);
+ mdelay(100);
+
+ __raw_writel(AR71XX_PCI_WIN0_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN0);
+ __raw_writel(AR71XX_PCI_WIN1_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN1);
+ __raw_writel(AR71XX_PCI_WIN2_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN2);
+ __raw_writel(AR71XX_PCI_WIN3_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN3);
+ __raw_writel(AR71XX_PCI_WIN4_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN4);
+ __raw_writel(AR71XX_PCI_WIN5_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN5);
+ __raw_writel(AR71XX_PCI_WIN6_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN6);
+ __raw_writel(AR71XX_PCI_WIN7_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN7);
+
+ mdelay(100);
+}
+
+__init int ar71xx_pcibios_init(void)
+{
+ u32 t;
+
+ ar71xx_pcicfg_base = ioremap(AR71XX_PCI_CFG_BASE, AR71XX_PCI_CFG_SIZE);
+ if (ar71xx_pcicfg_base == NULL)
+ return -ENOMEM;
+
+ ar71xx_pci_reset();
+
+ /* setup COMMAND register */
+ t = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE
+ | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
+ ar71xx_pci_local_write(PCI_COMMAND, 4, t);
+
+ /* clear bus errors */
+ ar71xx_pci_check_error(1);
+
+ ar71xx_pci_irq_init();
+
+ register_pci_controller(&ar71xx_pci_controller);
+
+ return 0;
+}
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
new file mode 100644
index 000000000000..414a7459858d
--- /dev/null
+++ b/arch/mips/pci/pci-ar724x.c
@@ -0,0 +1,292 @@
+/*
+ * Atheros AR724X PCI host controller driver
+ *
+ * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/pci.h>
+
+#define AR724X_PCI_CFG_BASE 0x14000000
+#define AR724X_PCI_CFG_SIZE 0x1000
+#define AR724X_PCI_CTRL_BASE (AR71XX_APB_BASE + 0x000f0000)
+#define AR724X_PCI_CTRL_SIZE 0x100
+
+#define AR724X_PCI_MEM_BASE 0x10000000
+#define AR724X_PCI_MEM_SIZE 0x08000000
+
+#define AR724X_PCI_REG_INT_STATUS 0x4c
+#define AR724X_PCI_REG_INT_MASK 0x50
+
+#define AR724X_PCI_INT_DEV0 BIT(14)
+
+#define AR724X_PCI_IRQ_COUNT 1
+
+#define AR7240_BAR0_WAR_VALUE 0xffff
+
+static DEFINE_SPINLOCK(ar724x_pci_lock);
+static void __iomem *ar724x_pci_devcfg_base;
+static void __iomem *ar724x_pci_ctrl_base;
+
+static u32 ar724x_pci_bar0_value;
+static bool ar724x_pci_bar0_is_cached;
+
+static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, uint32_t *value)
+{
+ unsigned long flags;
+ void __iomem *base;
+ u32 data;
+
+ if (devfn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ base = ar724x_pci_devcfg_base;
+
+ spin_lock_irqsave(&ar724x_pci_lock, flags);
+ data = __raw_readl(base + (where & ~3));
+
+ switch (size) {
+ case 1:
+ if (where & 1)
+ data >>= 8;
+ if (where & 2)
+ data >>= 16;
+ data &= 0xff;
+ break;
+ case 2:
+ if (where & 2)
+ data >>= 16;
+ data &= 0xffff;
+ break;
+ case 4:
+ break;
+ default:
+ spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+
+ if (where == PCI_BASE_ADDRESS_0 && size == 4 &&
+ ar724x_pci_bar0_is_cached) {
+ /* use the cached value */
+ *value = ar724x_pci_bar0_value;
+ } else {
+ *value = data;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, uint32_t value)
+{
+ unsigned long flags;
+ void __iomem *base;
+ u32 data;
+ int s;
+
+ if (devfn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (soc_is_ar7240() && where == PCI_BASE_ADDRESS_0 && size == 4) {
+ if (value != 0xffffffff) {
+ /*
+ * WAR for a hw issue. If the BAR0 register of the
+ * device is set to the proper base address, the
+ * memory space of the device is not accessible.
+ *
+ * Cache the intended value so it can be read back,
+ * and write a SoC specific constant value to the
+ * BAR0 register in order to make the device memory
+ * accessible.
+ */
+ ar724x_pci_bar0_is_cached = true;
+ ar724x_pci_bar0_value = value;
+
+ value = AR7240_BAR0_WAR_VALUE;
+ } else {
+ ar724x_pci_bar0_is_cached = false;
+ }
+ }
+
+ base = ar724x_pci_devcfg_base;
+
+ spin_lock_irqsave(&ar724x_pci_lock, flags);
+ data = __raw_readl(base + (where & ~3));
+
+ switch (size) {
+ case 1:
+ s = ((where & 3) * 8);
+ data &= ~(0xff << s);
+ data |= ((value & 0xff) << s);
+ break;
+ case 2:
+ s = ((where & 2) * 8);
+ data &= ~(0xffff << s);
+ data |= ((value & 0xffff) << s);
+ break;
+ case 4:
+ data = value;
+ break;
+ default:
+ spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ __raw_writel(data, base + (where & ~3));
+ /* flush write */
+ __raw_readl(base + (where & ~3));
+ spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops ar724x_pci_ops = {
+ .read = ar724x_pci_read,
+ .write = ar724x_pci_write,
+};
+
+static struct resource ar724x_io_resource = {
+ .name = "PCI IO space",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_IO,
+};
+
+static struct resource ar724x_mem_resource = {
+ .name = "PCI memory space",
+ .start = AR724X_PCI_MEM_BASE,
+ .end = AR724X_PCI_MEM_BASE + AR724X_PCI_MEM_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct pci_controller ar724x_pci_controller = {
+ .pci_ops = &ar724x_pci_ops,
+ .io_resource = &ar724x_io_resource,
+ .mem_resource = &ar724x_mem_resource,
+};
+
+static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ void __iomem *base;
+ u32 pending;
+
+ base = ar724x_pci_ctrl_base;
+
+ pending = __raw_readl(base + AR724X_PCI_REG_INT_STATUS) &
+ __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+
+ if (pending & AR724X_PCI_INT_DEV0)
+ generic_handle_irq(ATH79_PCI_IRQ(0));
+
+ else
+ spurious_interrupt();
+}
+
+static void ar724x_pci_irq_unmask(struct irq_data *d)
+{
+ void __iomem *base;
+ u32 t;
+
+ base = ar724x_pci_ctrl_base;
+
+ switch (d->irq) {
+ case ATH79_PCI_IRQ(0):
+ t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+ __raw_writel(t | AR724X_PCI_INT_DEV0,
+ base + AR724X_PCI_REG_INT_MASK);
+ /* flush write */
+ __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+ }
+}
+
+static void ar724x_pci_irq_mask(struct irq_data *d)
+{
+ void __iomem *base;
+ u32 t;
+
+ base = ar724x_pci_ctrl_base;
+
+ switch (d->irq) {
+ case ATH79_PCI_IRQ(0):
+ t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+ __raw_writel(t & ~AR724X_PCI_INT_DEV0,
+ base + AR724X_PCI_REG_INT_MASK);
+
+ /* flush write */
+ __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+
+ t = __raw_readl(base + AR724X_PCI_REG_INT_STATUS);
+ __raw_writel(t | AR724X_PCI_INT_DEV0,
+ base + AR724X_PCI_REG_INT_STATUS);
+
+ /* flush write */
+ __raw_readl(base + AR724X_PCI_REG_INT_STATUS);
+ }
+}
+
+static struct irq_chip ar724x_pci_irq_chip = {
+ .name = "AR724X PCI ",
+ .irq_mask = ar724x_pci_irq_mask,
+ .irq_unmask = ar724x_pci_irq_unmask,
+ .irq_mask_ack = ar724x_pci_irq_mask,
+};
+
+static void __init ar724x_pci_irq_init(int irq)
+{
+ void __iomem *base;
+ int i;
+
+ base = ar724x_pci_ctrl_base;
+
+ __raw_writel(0, base + AR724X_PCI_REG_INT_MASK);
+ __raw_writel(0, base + AR724X_PCI_REG_INT_STATUS);
+
+ BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR724X_PCI_IRQ_COUNT);
+
+ for (i = ATH79_PCI_IRQ_BASE;
+ i < ATH79_PCI_IRQ_BASE + AR724X_PCI_IRQ_COUNT; i++)
+ irq_set_chip_and_handler(i, &ar724x_pci_irq_chip,
+ handle_level_irq);
+
+ irq_set_chained_handler(irq, ar724x_pci_irq_handler);
+}
+
+int __init ar724x_pcibios_init(int irq)
+{
+ int ret;
+
+ ret = -ENOMEM;
+
+ ar724x_pci_devcfg_base = ioremap(AR724X_PCI_CFG_BASE,
+ AR724X_PCI_CFG_SIZE);
+ if (ar724x_pci_devcfg_base == NULL)
+ goto err;
+
+ ar724x_pci_ctrl_base = ioremap(AR724X_PCI_CTRL_BASE,
+ AR724X_PCI_CTRL_SIZE);
+ if (ar724x_pci_ctrl_base == NULL)
+ goto err_unmap_devcfg;
+
+ ar724x_pci_irq_init(irq);
+ register_pci_controller(&ar724x_pci_controller);
+
+ return PCIBIOS_SUCCESSFUL;
+
+err_unmap_devcfg:
+ iounmap(ar724x_pci_devcfg_base);
+err:
+ return ret;
+}
diff --git a/arch/mips/pci/pci-ath724x.c b/arch/mips/pci/pci-ath724x.c
deleted file mode 100644
index a4dd24a4130b..000000000000
--- a/arch/mips/pci/pci-ath724x.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Atheros 724x PCI support
- *
- * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include <linux/pci.h>
-#include <asm/mach-ath79/pci-ath724x.h>
-
-#define reg_read(_phys) (*(unsigned int *) KSEG1ADDR(_phys))
-#define reg_write(_phys, _val) ((*(unsigned int *) KSEG1ADDR(_phys)) = (_val))
-
-#define ATH724X_PCI_DEV_BASE 0x14000000
-#define ATH724X_PCI_MEM_BASE 0x10000000
-#define ATH724X_PCI_MEM_SIZE 0x08000000
-
-static DEFINE_SPINLOCK(ath724x_pci_lock);
-static struct ath724x_pci_data *pci_data;
-static int pci_data_size;
-
-static int ath724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
- int size, uint32_t *value)
-{
- unsigned long flags, addr, tval, mask;
-
- if (devfn)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (where & (size - 1))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- spin_lock_irqsave(&ath724x_pci_lock, flags);
-
- switch (size) {
- case 1:
- addr = where & ~3;
- mask = 0xff000000 >> ((where % 4) * 8);
- tval = reg_read(ATH724X_PCI_DEV_BASE + addr);
- tval = tval & ~mask;
- *value = (tval >> ((4 - (where % 4))*8));
- break;
- case 2:
- addr = where & ~3;
- mask = 0xffff0000 >> ((where % 4)*8);
- tval = reg_read(ATH724X_PCI_DEV_BASE + addr);
- tval = tval & ~mask;
- *value = (tval >> ((4 - (where % 4))*8));
- break;
- case 4:
- *value = reg_read(ATH724X_PCI_DEV_BASE + where);
- break;
- default:
- spin_unlock_irqrestore(&ath724x_pci_lock, flags);
-
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
-
- spin_unlock_irqrestore(&ath724x_pci_lock, flags);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int ath724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
- int size, uint32_t value)
-{
- unsigned long flags, tval, addr, mask;
-
- if (devfn)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (where & (size - 1))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- spin_lock_irqsave(&ath724x_pci_lock, flags);
-
- switch (size) {
- case 1:
- addr = (ATH724X_PCI_DEV_BASE + where) & ~3;
- mask = 0xff000000 >> ((where % 4)*8);
- tval = reg_read(addr);
- tval = tval & ~mask;
- tval |= (value << ((4 - (where % 4))*8)) & mask;
- reg_write(addr, tval);
- break;
- case 2:
- addr = (ATH724X_PCI_DEV_BASE + where) & ~3;
- mask = 0xffff0000 >> ((where % 4)*8);
- tval = reg_read(addr);
- tval = tval & ~mask;
- tval |= (value << ((4 - (where % 4))*8)) & mask;
- reg_write(addr, tval);
- break;
- case 4:
- reg_write((ATH724X_PCI_DEV_BASE + where), value);
- break;
- default:
- spin_unlock_irqrestore(&ath724x_pci_lock, flags);
-
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
-
- spin_unlock_irqrestore(&ath724x_pci_lock, flags);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static struct pci_ops ath724x_pci_ops = {
- .read = ath724x_pci_read,
- .write = ath724x_pci_write,
-};
-
-static struct resource ath724x_io_resource = {
- .name = "PCI IO space",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_IO,
-};
-
-static struct resource ath724x_mem_resource = {
- .name = "PCI memory space",
- .start = ATH724X_PCI_MEM_BASE,
- .end = ATH724X_PCI_MEM_BASE + ATH724X_PCI_MEM_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct pci_controller ath724x_pci_controller = {
- .pci_ops = &ath724x_pci_ops,
- .io_resource = &ath724x_io_resource,
- .mem_resource = &ath724x_mem_resource,
-};
-
-void ath724x_pci_add_data(struct ath724x_pci_data *data, int size)
-{
- pci_data = data;
- pci_data_size = size;
-}
-
-int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
-{
- unsigned int devfn = dev->devfn;
- int irq = -1;
-
- if (devfn > pci_data_size - 1)
- return irq;
-
- irq = pci_data[devfn].irq;
-
- return irq;
-}
-
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- unsigned int devfn = dev->devfn;
-
- if (devfn > pci_data_size - 1)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- dev->dev.platform_data = pci_data[devfn].pdata;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int __init ath724x_pcibios_init(void)
-{
- register_pci_controller(&ath724x_pci_controller);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-arch_initcall(ath724x_pcibios_init);
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 030c77e7926e..ea453532a33c 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -13,8 +13,12 @@
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
-#include <linux/export.h>
-#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
#include <asm/pci.h>
#include <asm/gpio.h>
@@ -22,17 +26,9 @@
#include <lantiq_soc.h>
#include <lantiq_irq.h>
-#include <lantiq_platform.h>
#include "pci-lantiq.h"
-#define LTQ_PCI_CFG_BASE 0x17000000
-#define LTQ_PCI_CFG_SIZE 0x00008000
-#define LTQ_PCI_MEM_BASE 0x18000000
-#define LTQ_PCI_MEM_SIZE 0x02000000
-#define LTQ_PCI_IO_BASE 0x1AE00000
-#define LTQ_PCI_IO_SIZE 0x00200000
-
#define PCI_CR_FCI_ADDR_MAP0 0x00C0
#define PCI_CR_FCI_ADDR_MAP1 0x00C4
#define PCI_CR_FCI_ADDR_MAP2 0x00C8
@@ -68,79 +64,27 @@
#define ltq_pci_cfg_w32(x, y) ltq_w32((x), ltq_pci_mapped_cfg + (y))
#define ltq_pci_cfg_r32(x) ltq_r32(ltq_pci_mapped_cfg + (x))
-struct ltq_pci_gpio_map {
- int pin;
- int alt0;
- int alt1;
- int dir;
- char *name;
-};
-
-/* the pci core can make use of the following gpios */
-static struct ltq_pci_gpio_map ltq_pci_gpio_map[] = {
- { 0, 1, 0, 0, "pci-exin0" },
- { 1, 1, 0, 0, "pci-exin1" },
- { 2, 1, 0, 0, "pci-exin2" },
- { 39, 1, 0, 0, "pci-exin3" },
- { 10, 1, 0, 0, "pci-exin4" },
- { 9, 1, 0, 0, "pci-exin5" },
- { 30, 1, 0, 1, "pci-gnt1" },
- { 23, 1, 0, 1, "pci-gnt2" },
- { 19, 1, 0, 1, "pci-gnt3" },
- { 38, 1, 0, 1, "pci-gnt4" },
- { 29, 1, 0, 0, "pci-req1" },
- { 31, 1, 0, 0, "pci-req2" },
- { 3, 1, 0, 0, "pci-req3" },
- { 37, 1, 0, 0, "pci-req4" },
-};
-
__iomem void *ltq_pci_mapped_cfg;
static __iomem void *ltq_pci_membase;
-int (*ltqpci_plat_dev_init)(struct pci_dev *dev) = NULL;
-
-/* Since the PCI REQ pins can be reused for other functionality, make it
- possible to exclude those from interpretation by the PCI controller */
-static int ltq_pci_req_mask = 0xf;
-
-static int *ltq_pci_irq_map;
-
-struct pci_ops ltq_pci_ops = {
+static int reset_gpio;
+static struct clk *clk_pci, *clk_external;
+static struct resource pci_io_resource;
+static struct resource pci_mem_resource;
+static struct pci_ops pci_ops = {
.read = ltq_pci_read_config_dword,
.write = ltq_pci_write_config_dword
};
-static struct resource pci_io_resource = {
- .name = "pci io space",
- .start = LTQ_PCI_IO_BASE,
- .end = LTQ_PCI_IO_BASE + LTQ_PCI_IO_SIZE - 1,
- .flags = IORESOURCE_IO
-};
-
-static struct resource pci_mem_resource = {
- .name = "pci memory space",
- .start = LTQ_PCI_MEM_BASE,
- .end = LTQ_PCI_MEM_BASE + LTQ_PCI_MEM_SIZE - 1,
- .flags = IORESOURCE_MEM
-};
-
-static struct pci_controller ltq_pci_controller = {
- .pci_ops = &ltq_pci_ops,
+static struct pci_controller pci_controller = {
+ .pci_ops = &pci_ops,
.mem_resource = &pci_mem_resource,
.mem_offset = 0x00000000UL,
.io_resource = &pci_io_resource,
.io_offset = 0x00000000UL,
};
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- if (ltqpci_plat_dev_init)
- return ltqpci_plat_dev_init(dev);
-
- return 0;
-}
-
-static u32 ltq_calc_bar11mask(void)
+static inline u32 ltq_calc_bar11mask(void)
{
u32 mem, bar11mask;
@@ -151,48 +95,42 @@ static u32 ltq_calc_bar11mask(void)
return bar11mask;
}
-static void ltq_pci_setup_gpio(int gpio)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(ltq_pci_gpio_map); i++) {
- if (gpio & (1 << i)) {
- ltq_gpio_request(ltq_pci_gpio_map[i].pin,
- ltq_pci_gpio_map[i].alt0,
- ltq_pci_gpio_map[i].alt1,
- ltq_pci_gpio_map[i].dir,
- ltq_pci_gpio_map[i].name);
- }
- }
- ltq_gpio_request(21, 0, 0, 1, "pci-reset");
- ltq_pci_req_mask = (gpio >> PCI_REQ_SHIFT) & PCI_REQ_MASK;
-}
-
-static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
+static int __devinit ltq_pci_startup(struct platform_device *pdev)
{
+ struct device_node *node = pdev->dev.of_node;
+ const __be32 *req_mask, *bus_clk;
u32 temp_buffer;
- /* set clock to 33Mhz */
- if (ltq_is_ar9()) {
- ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0x1f00000, LTQ_CGU_IFCCR);
- ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0xe00000, LTQ_CGU_IFCCR);
- } else {
- ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR);
- ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR);
+ /* get our clocks */
+ clk_pci = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk_pci)) {
+ dev_err(&pdev->dev, "failed to get pci clock\n");
+ return PTR_ERR(clk_pci);
}
- /* external or internal clock ? */
- if (conf->clock) {
- ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~(1 << 16),
- LTQ_CGU_IFCCR);
- ltq_cgu_w32((1 << 30), LTQ_CGU_PCICR);
- } else {
- ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | (1 << 16),
- LTQ_CGU_IFCCR);
- ltq_cgu_w32((1 << 31) | (1 << 30), LTQ_CGU_PCICR);
+ clk_external = clk_get(&pdev->dev, "external");
+ if (IS_ERR(clk_external)) {
+ clk_put(clk_pci);
+ dev_err(&pdev->dev, "failed to get external pci clock\n");
+ return PTR_ERR(clk_external);
}
- /* setup pci clock and gpis used by pci */
- ltq_pci_setup_gpio(conf->gpio);
+ /* read the bus speed that we want */
+ bus_clk = of_get_property(node, "lantiq,bus-clock", NULL);
+ if (bus_clk)
+ clk_set_rate(clk_pci, *bus_clk);
+
+ /* and enable the clocks */
+ clk_enable(clk_pci);
+ if (of_find_property(node, "lantiq,external-clock", NULL))
+ clk_enable(clk_external);
+ else
+ clk_disable(clk_external);
+
+ /* setup reset gpio used by pci */
+ reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
+ if (reset_gpio > 0)
+ devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset");
/* enable auto-switching between PCI and EBU */
ltq_pci_w32(0xa, PCI_CR_CLK_CTRL);
@@ -205,7 +143,12 @@ static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
/* enable external 2 PCI masters */
temp_buffer = ltq_pci_r32(PCI_CR_PC_ARB);
- temp_buffer &= (~(ltq_pci_req_mask << 16));
+ /* setup the request mask */
+ req_mask = of_get_property(node, "req-mask", NULL);
+ if (req_mask)
+ temp_buffer &= ~((*req_mask & 0xf) << 16);
+ else
+ temp_buffer &= ~0xf0000;
/* enable internal arbiter */
temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT);
/* enable internal PCI master reqest */
@@ -249,47 +192,55 @@ static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);
/* toggle reset pin */
- __gpio_set_value(21, 0);
- wmb();
- mdelay(1);
- __gpio_set_value(21, 1);
- return 0;
-}
-
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- if (ltq_pci_irq_map[slot])
- return ltq_pci_irq_map[slot];
- printk(KERN_ERR "lq_pci: trying to map irq for unknown slot %d\n",
- slot);
-
+ if (reset_gpio > 0) {
+ __gpio_set_value(reset_gpio, 0);
+ wmb();
+ mdelay(1);
+ __gpio_set_value(reset_gpio, 1);
+ }
return 0;
}
static int __devinit ltq_pci_probe(struct platform_device *pdev)
{
- struct ltq_pci_data *ltq_pci_data =
- (struct ltq_pci_data *) pdev->dev.platform_data;
+ struct resource *res_cfg, *res_bridge;
pci_clear_flags(PCI_PROBE_ONLY);
- ltq_pci_irq_map = ltq_pci_data->irq;
- ltq_pci_membase = ioremap_nocache(PCI_CR_BASE_ADDR, PCI_CR_SIZE);
- ltq_pci_mapped_cfg =
- ioremap_nocache(LTQ_PCI_CFG_BASE, LTQ_PCI_CFG_BASE);
- ltq_pci_controller.io_map_base =
- (unsigned long)ioremap(LTQ_PCI_IO_BASE, LTQ_PCI_IO_SIZE - 1);
- ltq_pci_startup(ltq_pci_data);
- register_pci_controller(&ltq_pci_controller);
+ res_cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res_bridge = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_cfg || !res_bridge) {
+ dev_err(&pdev->dev, "missing memory reources\n");
+ return -EINVAL;
+ }
+
+ ltq_pci_membase = devm_request_and_ioremap(&pdev->dev, res_bridge);
+ ltq_pci_mapped_cfg = devm_request_and_ioremap(&pdev->dev, res_cfg);
+
+ if (!ltq_pci_membase || !ltq_pci_mapped_cfg) {
+ dev_err(&pdev->dev, "failed to remap resources\n");
+ return -ENOMEM;
+ }
+
+ ltq_pci_startup(pdev);
+
+ pci_load_of_ranges(&pci_controller, pdev->dev.of_node);
+ register_pci_controller(&pci_controller);
return 0;
}
-static struct platform_driver
-ltq_pci_driver = {
+static const struct of_device_id ltq_pci_match[] = {
+ { .compatible = "lantiq,pci-xway" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_pci_match);
+
+static struct platform_driver ltq_pci_driver = {
.probe = ltq_pci_probe,
.driver = {
- .name = "ltq_pci",
+ .name = "pci-xway",
.owner = THIS_MODULE,
+ .of_match_table = ltq_pci_match,
},
};
@@ -297,7 +248,7 @@ int __init pcibios_init(void)
{
int ret = platform_driver_register(&ltq_pci_driver);
if (ret)
- printk(KERN_INFO "ltq_pci: Error registering platfom driver!");
+ pr_info("pci-xway: Error registering platform driver!");
return ret;
}
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 0514866fa925..271e8c4a54c7 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
+#include <linux/of_address.h>
#include <asm/cpu-info.h>
@@ -114,9 +115,63 @@ static void __devinit pcibios_scanbus(struct pci_controller *hose)
pci_bus_assign_resources(bus);
pci_enable_bridges(bus);
}
+ bus->dev.of_node = hose->of_node;
}
}
+#ifdef CONFIG_OF
+void __devinit pci_load_of_ranges(struct pci_controller *hose,
+ struct device_node *node)
+{
+ const __be32 *ranges;
+ int rlen;
+ int pna = of_n_addr_cells(node);
+ int np = pna + 5;
+
+ pr_info("PCI host bridge %s ranges:\n", node->full_name);
+ ranges = of_get_property(node, "ranges", &rlen);
+ if (ranges == NULL)
+ return;
+ hose->of_node = node;
+
+ while ((rlen -= np * 4) >= 0) {
+ u32 pci_space;
+ struct resource *res = NULL;
+ u64 addr, size;
+
+ pci_space = be32_to_cpup(&ranges[0]);
+ addr = of_translate_address(node, ranges + 3);
+ size = of_read_number(ranges + pna + 3, 2);
+ ranges += np;
+ switch ((pci_space >> 24) & 0x3) {
+ case 1: /* PCI IO space */
+ pr_info(" IO 0x%016llx..0x%016llx\n",
+ addr, addr + size - 1);
+ hose->io_map_base =
+ (unsigned long)ioremap(addr, size);
+ res = hose->io_resource;
+ res->flags = IORESOURCE_IO;
+ break;
+ case 2: /* PCI Memory space */
+ case 3: /* PCI 64 bits Memory space */
+ pr_info(" MEM 0x%016llx..0x%016llx\n",
+ addr, addr + size - 1);
+ res = hose->mem_resource;
+ res->flags = IORESOURCE_MEM;
+ break;
+ }
+ if (res != NULL) {
+ res->start = addr;
+ res->name = node->full_name;
+ res->end = res->start + size - 1;
+ res->parent = NULL;
+ res->sibling = NULL;
+ res->child = NULL;
+ }
+ }
+}
+#endif
+
static DEFINE_MUTEX(pci_scan_mutex);
void __devinit register_pci_controller(struct pci_controller *hose)
diff --git a/arch/mips/pmc-sierra/yosemite/Makefile b/arch/mips/pmc-sierra/yosemite/Makefile
index 02f5fb94ea28..5af95ec3319d 100644
--- a/arch/mips/pmc-sierra/yosemite/Makefile
+++ b/arch/mips/pmc-sierra/yosemite/Makefile
@@ -5,5 +5,3 @@
obj-y += irq.o prom.o py-console.o setup.o
obj-$(CONFIG_SMP) += smp.o
-
-ccflags-y := -Werror
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c
index 3498ac9c35af..b6472fc88a99 100644
--- a/arch/mips/pmc-sierra/yosemite/setup.c
+++ b/arch/mips/pmc-sierra/yosemite/setup.c
@@ -27,6 +27,7 @@
#include <linux/bcd.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
diff --git a/arch/mips/powertv/Makefile b/arch/mips/powertv/Makefile
index 348d2e850ef5..39ca9f8d63ae 100644
--- a/arch/mips/powertv/Makefile
+++ b/arch/mips/powertv/Makefile
@@ -27,5 +27,3 @@ obj-y += init.o ioremap.o memory.o powertv_setup.o reset.o time.o \
asic/ pci/
obj-$(CONFIG_USB) += powertv-usb.o
-
-ccflags-y := -Wall
diff --git a/arch/mips/powertv/asic/Makefile b/arch/mips/powertv/asic/Makefile
index d810a33182a4..35dcc53eb25f 100644
--- a/arch/mips/powertv/asic/Makefile
+++ b/arch/mips/powertv/asic/Makefile
@@ -19,5 +19,3 @@
obj-y += asic-calliope.o asic-cronus.o asic-gaia.o asic-zeus.o \
asic_devices.o asic_int.o irq_asic.o prealloc-calliope.o \
prealloc-cronus.o prealloc-cronuslite.o prealloc-gaia.o prealloc-zeus.o
-
-ccflags-y := -Wall -Werror
diff --git a/arch/mips/powertv/pci/Makefile b/arch/mips/powertv/pci/Makefile
index 5783201cd2c8..2610a6af5b2c 100644
--- a/arch/mips/powertv/pci/Makefile
+++ b/arch/mips/powertv/pci/Makefile
@@ -17,5 +17,3 @@
#
obj-$(CONFIG_PCI) += fixup-powertv.o
-
-ccflags-y := -Wall -Werror
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index a969eb826634..ea774285e6c5 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -15,6 +15,7 @@
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/string.h>
diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
index d16b462154c3..413f17f8e892 100644
--- a/arch/mips/sni/setup.c
+++ b/arch/mips/sni/setup.c
@@ -10,6 +10,7 @@
*/
#include <linux/eisa.h>
#include <linux/init.h>
+#include <linux/export.h>
#include <linux/console.h>
#include <linux/fb.h>
#include <linux/screen_info.h>
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 3aa3de017159..687f9b4a2ed6 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -6,6 +6,7 @@ config MN10300
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_KGDB
select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER
+ select GENERIC_CLOCKEVENTS
config AM33_2
def_bool n
@@ -42,15 +43,9 @@ config RWSEM_XCHGADD_ALGORITHM
config GENERIC_CALIBRATE_DELAY
def_bool y
-config GENERIC_CMOS_UPDATE
- def_bool n
-
config GENERIC_HWEIGHT
def_bool y
-config GENERIC_CLOCKEVENTS
- def_bool y
-
config GENERIC_BUG
def_bool y
@@ -231,7 +226,6 @@ config MN10300_USING_JTAG
single-stepping, which are taken over completely by the JTAG unit.
source "kernel/Kconfig.hz"
-source "kernel/time/Kconfig"
config MN10300_RTC
bool "Using MN10300 RTC"
diff --git a/arch/mn10300/include/asm/kvm_para.h b/arch/mn10300/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/mn10300/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 297bd38f7c5d..49765b53f637 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -18,6 +18,9 @@ config OPENRISC
select GENERIC_IOMAP
select GENERIC_CPU_DEVICES
select GENERIC_ATOMIC64
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
config MMU
def_bool y
@@ -47,9 +50,6 @@ config NO_IOPORT
config GENERIC_GPIO
def_bool y
-config GENERIC_CLOCKEVENTS
- def_bool y
-
config TRACE_IRQFLAGS_SUPPORT
def_bool y
@@ -109,7 +109,6 @@ config OPENRISC_HAVE_INST_DIV
endmenu
-source "kernel/time/Kconfig"
source kernel/Kconfig.hz
source kernel/Kconfig.preempt
source "mm/Kconfig"
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index c936483bc8e2..3f35c38d7b64 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -66,3 +66,4 @@ generic-y += topology.h
generic-y += types.h
generic-y += ucontext.h
generic-y += user.h
+generic-y += word-at-a-time.h
diff --git a/arch/openrisc/include/asm/gpio.h b/arch/openrisc/include/asm/gpio.h
index 0b0d174f47cd..b3799d88ffcf 100644
--- a/arch/openrisc/include/asm/gpio.h
+++ b/arch/openrisc/include/asm/gpio.h
@@ -1,65 +1,4 @@
-/*
- * OpenRISC Linux
- *
- * Linux architectural port borrowing liberally from similar works of
- * others. All original copyrights apply as per the original source
- * declaration.
- *
- * OpenRISC implementation:
- * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
- * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
- * et al.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __ASM_OPENRISC_GPIO_H
-#define __ASM_OPENRISC_GPIO_H
-
-#include <linux/errno.h>
-#include <asm-generic/gpio.h>
-
-#ifdef CONFIG_GPIOLIB
-
-/*
- * OpenRISC (or1k) does not have on-chip GPIO's so there is not really
- * any standardized implementation that makes sense here. If passing
- * through gpiolib becomes a bottleneck then it may make sense, on a
- * case-by-case basis, to implement these inlined/rapid versions.
- *
- * Just call gpiolib.
- */
-static inline int gpio_get_value(unsigned int gpio)
-{
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned int gpio, int value)
-{
- __gpio_set_value(gpio, value);
-}
-
-static inline int gpio_cansleep(unsigned int gpio)
-{
- return __gpio_cansleep(gpio);
-}
-
-/*
- * Not implemented, yet.
- */
-static inline int gpio_to_irq(unsigned int gpio)
-{
- return -ENOSYS;
-}
-
-static inline int irq_to_gpio(unsigned int irq)
-{
- return -EINVAL;
-}
-
-#endif /* CONFIG_GPIOLIB */
-
-#endif /* __ASM_OPENRISC_GPIO_H */
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/arch/openrisc/include/asm/kvm_para.h b/arch/openrisc/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/openrisc/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index f5abaa0ffc38..ab2e7a198a4c 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -313,42 +313,12 @@ clear_user(void *addr, unsigned long size)
return size;
}
-extern int __strncpy_from_user(char *dst, const char *src, long count);
+#define user_addr_max() \
+ (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
-static inline long strncpy_from_user(char *dst, const char *src, long count)
-{
- if (access_ok(VERIFY_READ, src, 1))
- return __strncpy_from_user(dst, src, count);
- return -EFAULT;
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 for error
- */
-
-extern int __strnlen_user(const char *str, long len, unsigned long top);
-
-/*
- * Returns the length of the string at str (including the null byte),
- * or 0 if we hit a page we can't access,
- * or something > len if we didn't find a null byte.
- *
- * The `top' parameter to __strnlen_user is to make sure that
- * we can never overflow from the user area into kernel space.
- */
-static inline long strnlen_user(const char __user *str, long len)
-{
- unsigned long top = (unsigned long)get_fs();
- unsigned long res = 0;
-
- if (__addr_ok(str))
- res = __strnlen_user(str, len, top);
-
- return res;
-}
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
-#define strlen_user(str) strnlen_user(str, TASK_SIZE-1)
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASM_OPENRISC_UACCESS_H */
diff --git a/arch/openrisc/lib/string.S b/arch/openrisc/lib/string.S
index 465f04bc7deb..c09fee7dec14 100644
--- a/arch/openrisc/lib/string.S
+++ b/arch/openrisc/lib/string.S
@@ -103,102 +103,3 @@ __clear_user:
.section __ex_table, "a"
.long 9b, 99b // write fault
.previous
-
-/*
- * long strncpy_from_user(char *dst, const char *src, long count)
- *
- *
- */
- .global __strncpy_from_user
-__strncpy_from_user:
- l.addi r1,r1,-16
- l.sw 0(r1),r6
- l.sw 4(r1),r5
- l.sw 8(r1),r4
- l.sw 12(r1),r3
-
- l.addi r11,r5,0
-2: l.sfeq r5,r0
- l.bf 1f
- l.addi r5,r5,-1
-8: l.lbz r6,0(r4)
- l.sfeq r6,r0
- l.bf 1f
-9: l.sb 0(r3),r6
- l.addi r3,r3,1
- l.j 2b
- l.addi r4,r4,1
-1:
- l.lwz r6,0(r1)
- l.addi r5,r5,1
- l.sub r11,r11,r5 // r11 holds the return value
-
- l.lwz r6,0(r1)
- l.lwz r5,4(r1)
- l.lwz r4,8(r1)
- l.lwz r3,12(r1)
- l.jr r9
- l.addi r1,r1,16
-
- .section .fixup, "ax"
-99:
- l.movhi r11,hi(-EFAULT)
- l.ori r11,r11,lo(-EFAULT)
-
- l.lwz r6,0(r1)
- l.lwz r5,4(r1)
- l.lwz r4,8(r1)
- l.lwz r3,12(r1)
- l.jr r9
- l.addi r1,r1,16
- .previous
-
- .section __ex_table, "a"
- .long 8b, 99b // read fault
- .previous
-
-/*
- * extern int __strnlen_user(const char *str, long len, unsigned long top);
- *
- *
- * RTRN: - length of a string including NUL termination character
- * - on page fault 0
- */
-
- .global __strnlen_user
-__strnlen_user:
- l.addi r1,r1,-8
- l.sw 0(r1),r6
- l.sw 4(r1),r3
-
- l.addi r11,r0,0
-2: l.sfeq r11,r4
- l.bf 1f
- l.addi r11,r11,1
-8: l.lbz r6,0(r3)
- l.sfeq r6,r0
- l.bf 1f
- l.sfgeu r3,r5 // are we over the top ?
- l.bf 99f
- l.j 2b
- l.addi r3,r3,1
-
-1:
- l.lwz r6,0(r1)
- l.lwz r3,4(r1)
- l.jr r9
- l.addi r1,r1,8
-
- .section .fixup, "ax"
-99:
- l.addi r11,r0,0
-
- l.lwz r6,0(r1)
- l.lwz r3,4(r1)
- l.jr r9
- l.addi r1,r1,8
- .previous
-
- .section __ex_table, "a"
- .long 8b, 99b // read fault
- .previous
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index ddb8b24b823d..3ff21b536f28 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -18,6 +18,7 @@ config PARISC
select IRQ_PER_CPU
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_STRNCPY_FROM_USER
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/include/asm/kvm_para.h b/arch/parisc/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/parisc/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h
index e8f8037d872b..a5dc9066c6d8 100644
--- a/arch/parisc/include/asm/smp.h
+++ b/arch/parisc/include/asm/smp.h
@@ -25,7 +25,6 @@ typedef unsigned long address_t;
#define cpu_number_map(cpu) (cpu)
#define cpu_logical_map(cpu) (cpu)
-extern void smp_send_reschedule(int cpu);
extern void smp_send_all_nop(void);
extern void arch_send_call_function_single_ipi(int cpu);
@@ -50,6 +49,5 @@ static inline void __cpu_die (unsigned int cpu) {
while(1)
;
}
-extern int __cpu_up (unsigned int cpu);
#endif /* __ASM_SMP_H */
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 9ac066086f03..4ba2c93770f1 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -218,15 +218,14 @@ struct exception_data {
extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
-extern long lstrncpy_from_user(char *, const char __user *, long);
+extern long strncpy_from_user(char *, const char __user *, long);
extern unsigned lclear_user(void __user *,unsigned long);
extern long lstrnlen_user(const char __user *,long);
-
/*
* Complex access routines -- macros
*/
+#define user_addr_max() (~0UL)
-#define strncpy_from_user lstrncpy_from_user
#define strnlen_user lstrnlen_user
#define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
#define clear_user lclear_user
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 4f283eaf4907..c7fbc96472f3 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -552,7 +552,7 @@
* entry (identifying the physical page) and %r23 up with
* the from tlb entry (or nothing if only a to entry---for
* clear_user_page_asm) */
- .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
+ .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
cmpib,COND(<>),n 0,\spc,\fault
ldil L%(TMPALIAS_MAP_START),\tmp
#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
@@ -581,11 +581,15 @@
*/
cmpiclr,= 0x01,\tmp,%r0
ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
-#ifdef CONFIG_64BIT
+.ifc \patype,20
depd,z \prot,8,7,\prot
-#else
+.else
+.ifc \patype,11
depw,z \prot,8,7,\prot
-#endif
+.else
+ .error "undefined PA type to do_alias"
+.endif
+.endif
/*
* OK, it is in the temp alias region, check whether "from" or "to".
* Check "subtle" note in pacache.S re: r23/r26.
@@ -1189,7 +1193,7 @@ dtlb_miss_20w:
nop
dtlb_check_alias_20w:
- do_alias spc,t0,t1,va,pte,prot,dtlb_fault
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
idtlbt pte,prot
@@ -1213,7 +1217,7 @@ nadtlb_miss_20w:
nop
nadtlb_check_alias_20w:
- do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
+ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
idtlbt pte,prot
@@ -1245,7 +1249,7 @@ dtlb_miss_11:
nop
dtlb_check_alias_11:
- do_alias spc,t0,t1,va,pte,prot,dtlb_fault
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
idtlba pte,(va)
idtlbp prot,(va)
@@ -1277,7 +1281,7 @@ nadtlb_miss_11:
nop
nadtlb_check_alias_11:
- do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
+ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
idtlba pte,(va)
idtlbp prot,(va)
@@ -1304,7 +1308,7 @@ dtlb_miss_20:
nop
dtlb_check_alias_20:
- do_alias spc,t0,t1,va,pte,prot,dtlb_fault
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
idtlbt pte,prot
@@ -1330,7 +1334,7 @@ nadtlb_miss_20:
nop
nadtlb_check_alias_20:
- do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
+ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
idtlbt pte,prot
@@ -1457,7 +1461,7 @@ naitlb_miss_20w:
nop
naitlb_check_alias_20w:
- do_alias spc,t0,t1,va,pte,prot,naitlb_fault
+ do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
iitlbt pte,prot
@@ -1511,7 +1515,7 @@ naitlb_miss_11:
nop
naitlb_check_alias_11:
- do_alias spc,t0,t1,va,pte,prot,itlb_fault
+ do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
iitlba pte,(%sr0, va)
iitlbp prot,(%sr0, va)
@@ -1557,7 +1561,7 @@ naitlb_miss_20:
nop
naitlb_check_alias_20:
- do_alias spc,t0,t1,va,pte,prot,naitlb_fault
+ do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
iitlbt pte,prot
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index a7bb757a5497..ceec85de6290 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -44,7 +44,6 @@ EXPORT_SYMBOL(__cmpxchg_u64);
#endif
#include <asm/uaccess.h>
-EXPORT_SYMBOL(lstrncpy_from_user);
EXPORT_SYMBOL(lclear_user);
EXPORT_SYMBOL(lstrnlen_user);
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index fa6f2b8163e0..64a999882e4f 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -50,8 +50,10 @@ SECTIONS
. = KERNEL_BINARY_TEXT_START;
_text = .; /* Text and read-only data */
- .text ALIGN(16) : {
+ .head ALIGN(16) : {
HEAD_TEXT
+ } = 0
+ .text ALIGN(16) : {
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
@@ -65,7 +67,7 @@ SECTIONS
*(.fixup)
*(.lock.text) /* out-of-line lock text */
*(.gnu.warning)
- } = 0
+ }
/* End of text section */
_etext = .;
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index 1bd23ccec17b..6f2d9355efe2 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -61,47 +61,6 @@
.endm
/*
- * long lstrncpy_from_user(char *dst, const char *src, long n)
- *
- * Returns -EFAULT if exception before terminator,
- * N if the entire buffer filled,
- * otherwise strlen (i.e. excludes zero byte)
- */
-
-ENTRY(lstrncpy_from_user)
- .proc
- .callinfo NO_CALLS
- .entry
- comib,= 0,%r24,$lsfu_done
- copy %r24,%r23
- get_sr
-1: ldbs,ma 1(%sr1,%r25),%r1
-$lsfu_loop:
- stbs,ma %r1,1(%r26)
- comib,=,n 0,%r1,$lsfu_done
- addib,<>,n -1,%r24,$lsfu_loop
-2: ldbs,ma 1(%sr1,%r25),%r1
-$lsfu_done:
- sub %r23,%r24,%r28
-$lsfu_exit:
- bv %r0(%r2)
- nop
- .exit
-ENDPROC(lstrncpy_from_user)
-
- .section .fixup,"ax"
-3: fixup_branch $lsfu_exit
- ldi -EFAULT,%r28
- .previous
-
- .section __ex_table,"aw"
- ASM_ULONG_INSN 1b,3b
- ASM_ULONG_INSN 2b,3b
- .previous
-
- .procend
-
- /*
* unsigned long lclear_user(void *to, unsigned long n)
*
* Returns 0 for success.
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 0a947bd9c076..050cb371a69e 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -27,15 +27,6 @@ config MMU
bool
default y
-config GENERIC_CMOS_UPDATE
- def_bool y
-
-config GENERIC_TIME_VSYSCALL
- def_bool y
-
-config GENERIC_CLOCKEVENTS
- def_bool y
-
config HAVE_SETUP_PER_CPU_AREA
def_bool PPC64
@@ -141,6 +132,11 @@ config PPC
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_CMOS_UPDATE
+ select GENERIC_TIME_VSYSCALL
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
config EARLY_PRINTK
bool
@@ -281,7 +277,6 @@ config HIGHMEM
bool "High memory support"
depends on PPC32
-source kernel/time/Kconfig
source kernel/Kconfig.hz
source kernel/Kconfig.preempt
source "fs/Kconfig.binfmt"
diff --git a/arch/powerpc/boot/dts/mpc8569mds.dts b/arch/powerpc/boot/dts/mpc8569mds.dts
index 7e283c891b7f..fe0d60935e9b 100644
--- a/arch/powerpc/boot/dts/mpc8569mds.dts
+++ b/arch/powerpc/boot/dts/mpc8569mds.dts
@@ -119,6 +119,7 @@
sdhc@2e000 {
status = "disabled";
sdhci,1-bit-only;
+ bus-width = <1>;
};
par_io@e0100 {
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index b9219e99bd2a..50d82c8a037f 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -168,6 +168,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000)
#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000)
#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000)
+#define CPU_FTR_EMB_HV ASM_CONST(0x0000000040000000)
/*
* Add the 64-bit processor unique features in the top half of the word;
@@ -376,7 +377,8 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_47X (CPU_FTRS_440x6)
#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
- CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE)
+ CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_DEBUG_LVL_EXC)
#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
CPU_FTR_NOEXECUTE)
@@ -385,15 +387,15 @@ extern const char *powerpc_base_platform;
CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
- CPU_FTR_DBELL)
+ CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_DEBUG_LVL_EXC)
+ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
#define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_DEBUG_LVL_EXC)
+ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
@@ -486,8 +488,10 @@ enum {
CPU_FTRS_E200 |
#endif
#ifdef CONFIG_E500
- CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
- CPU_FTRS_E5500 | CPU_FTRS_E6500 |
+ CPU_FTRS_E500 | CPU_FTRS_E500_2 |
+#endif
+#ifdef CONFIG_PPC_E500MC
+ CPU_FTRS_E500MC | CPU_FTRS_E5500 | CPU_FTRS_E6500 |
#endif
0,
};
@@ -531,9 +535,12 @@ enum {
CPU_FTRS_E200 &
#endif
#ifdef CONFIG_E500
- CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
- CPU_FTRS_E5500 & CPU_FTRS_E6500 &
+ CPU_FTRS_E500 & CPU_FTRS_E500_2 &
+#endif
+#ifdef CONFIG_PPC_E500MC
+ CPU_FTRS_E500MC & CPU_FTRS_E5500 & CPU_FTRS_E6500 &
#endif
+ ~CPU_FTR_EMB_HV & /* can be removed at runtime */
CPU_FTRS_POSSIBLE,
};
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index efa74ac44a35..154c067761b1 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -19,6 +19,9 @@
#define PPC_DBELL_MSG_BRDCAST (0x04000000)
#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36))
+#define PPC_DBELL_TYPE_MASK PPC_DBELL_TYPE(0xf)
+#define PPC_DBELL_LPID(x) ((x) << (63 - 49))
+#define PPC_DBELL_PIR_MASK 0x3fff
enum ppc_dbell {
PPC_DBELL = 0, /* doorbell */
PPC_DBELL_CRIT = 1, /* critical doorbell */
diff --git a/arch/powerpc/include/asm/gpio.h b/arch/powerpc/include/asm/gpio.h
index 38762edb5e58..b3799d88ffcf 100644
--- a/arch/powerpc/include/asm/gpio.h
+++ b/arch/powerpc/include/asm/gpio.h
@@ -1,53 +1,4 @@
-/*
- * Generic GPIO API implementation for PowerPC.
- *
- * Copyright (c) 2007-2008 MontaVista Software, Inc.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __ASM_POWERPC_GPIO_H
-#define __ASM_POWERPC_GPIO_H
-
-#include <linux/errno.h>
-#include <asm-generic/gpio.h>
-
-#ifdef CONFIG_GPIOLIB
-
-/*
- * We don't (yet) implement inlined/rapid versions for on-chip gpios.
- * Just call gpiolib.
- */
-static inline int gpio_get_value(unsigned int gpio)
-{
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned int gpio, int value)
-{
- __gpio_set_value(gpio, value);
-}
-
-static inline int gpio_cansleep(unsigned int gpio)
-{
- return __gpio_cansleep(gpio);
-}
-
-static inline int gpio_to_irq(unsigned int gpio)
-{
- return __gpio_to_irq(gpio);
-}
-
-static inline int irq_to_gpio(unsigned int irq)
-{
- return -EINVAL;
-}
-
-#endif /* CONFIG_GPIOLIB */
-
-#endif /* __ASM_POWERPC_GPIO_H */
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 612252388190..423cf9eaf4a4 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -133,6 +133,16 @@
#define H_PP1 (1UL<<(63-62))
#define H_PP2 (1UL<<(63-63))
+/* Flags for H_REGISTER_VPA subfunction field */
+#define H_VPA_FUNC_SHIFT (63-18) /* Bit posn of subfunction code */
+#define H_VPA_FUNC_MASK 7UL
+#define H_VPA_REG_VPA 1UL /* Register Virtual Processor Area */
+#define H_VPA_REG_DTL 2UL /* Register Dispatch Trace Log */
+#define H_VPA_REG_SLB 3UL /* Register SLB shadow buffer */
+#define H_VPA_DEREG_VPA 5UL /* Deregister Virtual Processor Area */
+#define H_VPA_DEREG_DTL 6UL /* Deregister Dispatch Trace Log */
+#define H_VPA_DEREG_SLB 7UL /* Deregister SLB shadow buffer */
+
/* VASI States */
#define H_VASI_INVALID 0
#define H_VASI_ENABLED 1
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 51010bfc792e..c9aac24b02e2 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -33,6 +33,7 @@
extern void __replay_interrupt(unsigned int vector);
extern void timer_interrupt(struct pt_regs *);
+extern void performance_monitor_exception(struct pt_regs *regs);
#ifdef CONFIG_PPC64
#include <asm/paca.h>
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index b921c3f48928..1bea4d8ea6f4 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -277,6 +277,7 @@ struct kvm_sync_regs {
#define KVM_CPU_E500V2 2
#define KVM_CPU_3S_32 3
#define KVM_CPU_3S_64 4
+#define KVM_CPU_E500MC 5
/* for KVM_CAP_SPAPR_TCE */
struct kvm_create_spapr_tce {
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 7b1f0e0fc653..76fdcfef0889 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -20,6 +20,16 @@
#ifndef __POWERPC_KVM_ASM_H__
#define __POWERPC_KVM_ASM_H__
+#ifdef __ASSEMBLY__
+#ifdef CONFIG_64BIT
+#define PPC_STD(sreg, offset, areg) std sreg, (offset)(areg)
+#define PPC_LD(treg, offset, areg) ld treg, (offset)(areg)
+#else
+#define PPC_STD(sreg, offset, areg) stw sreg, (offset+4)(areg)
+#define PPC_LD(treg, offset, areg) lwz treg, (offset+4)(areg)
+#endif
+#endif
+
/* IVPR must be 64KiB-aligned. */
#define VCPU_SIZE_ORDER 4
#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
@@ -48,6 +58,14 @@
#define BOOKE_INTERRUPT_SPE_FP_DATA 33
#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
+#define BOOKE_INTERRUPT_DOORBELL 36
+#define BOOKE_INTERRUPT_DOORBELL_CRITICAL 37
+
+/* booke_hv */
+#define BOOKE_INTERRUPT_GUEST_DBELL 38
+#define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39
+#define BOOKE_INTERRUPT_HV_SYSCALL 40
+#define BOOKE_INTERRUPT_HV_PRIV 41
/* book3s */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index fd07f43d6622..f0e0c6a66d97 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -453,4 +453,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
#define INS_DCBZ 0x7c0007ec
+/* LPIDs we support with this build -- runtime limit may be lower */
+#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
+
#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 1f2f5b6156bd..88609b23b775 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -79,6 +79,9 @@ struct kvmppc_host_state {
u8 napping;
#ifdef CONFIG_KVM_BOOK3S_64_HV
+ u8 hwthread_req;
+ u8 hwthread_state;
+
struct kvm_vcpu *kvm_vcpu;
struct kvmppc_vcore *kvm_vcore;
unsigned long xics_phys;
@@ -122,4 +125,9 @@ struct kvmppc_book3s_shadow_vcpu {
#endif /*__ASSEMBLY__ */
+/* Values for kvm_state */
+#define KVM_HWTHREAD_IN_KERNEL 0
+#define KVM_HWTHREAD_IN_NAP 1
+#define KVM_HWTHREAD_IN_KVM 2
+
#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index a90e09188777..b7cd3356a532 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -23,6 +23,9 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
+/* LPIDs we support with this build -- runtime limit may be lower */
+#define KVMPPC_NR_LPIDS 64
+
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
vcpu->arch.gpr[num] = val;
diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
new file mode 100644
index 000000000000..30a600fa1b6a
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_KVM_BOOKE_HV_ASM_H
+#define ASM_KVM_BOOKE_HV_ASM_H
+
+#ifdef __ASSEMBLY__
+
+/*
+ * All exceptions from guest state must go through KVM
+ * (except for those which are delivered directly to the guest) --
+ * there are no exceptions for which we fall through directly to
+ * the normal host handler.
+ *
+ * Expected inputs (normal exceptions):
+ * SCRATCH0 = saved r10
+ * r10 = thread struct
+ * r11 = appropriate SRR1 variant (currently used as scratch)
+ * r13 = saved CR
+ * *(r10 + THREAD_NORMSAVE(0)) = saved r11
+ * *(r10 + THREAD_NORMSAVE(2)) = saved r13
+ *
+ * Expected inputs (crit/mcheck/debug exceptions):
+ * appropriate SCRATCH = saved r8
+ * r8 = exception level stack frame
+ * r9 = *(r8 + _CCR) = saved CR
+ * r11 = appropriate SRR1 variant (currently used as scratch)
+ * *(r8 + GPR9) = saved r9
+ * *(r8 + GPR10) = saved r10 (r10 not yet clobbered)
+ * *(r8 + GPR11) = saved r11
+ */
+.macro DO_KVM intno srr1
+#ifdef CONFIG_KVM_BOOKE_HV
+BEGIN_FTR_SECTION
+ mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */
+ bf 3, kvmppc_resume_\intno\()_\srr1
+ b kvmppc_handler_\intno\()_\srr1
+kvmppc_resume_\intno\()_\srr1:
+END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+#endif
+.endm
+
+#endif /*__ASSEMBLY__ */
+#endif /* ASM_KVM_BOOKE_HV_ASM_H */
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
deleted file mode 100644
index 8cd50a514271..000000000000
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Author: Yu Liu, <yu.liu@freescale.com>
- *
- * Description:
- * This file is derived from arch/powerpc/include/asm/kvm_44x.h,
- * by Hollis Blanchard <hollisb@us.ibm.com>.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_KVM_E500_H__
-#define __ASM_KVM_E500_H__
-
-#include <linux/kvm_host.h>
-
-#define BOOKE_INTERRUPT_SIZE 36
-
-#define E500_PID_NUM 3
-#define E500_TLB_NUM 2
-
-#define E500_TLB_VALID 1
-#define E500_TLB_DIRTY 2
-
-struct tlbe_ref {
- pfn_t pfn;
- unsigned int flags; /* E500_TLB_* */
-};
-
-struct tlbe_priv {
- struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
-};
-
-struct vcpu_id_table;
-
-struct kvmppc_e500_tlb_params {
- int entries, ways, sets;
-};
-
-struct kvmppc_vcpu_e500 {
- /* Unmodified copy of the guest's TLB -- shared with host userspace. */
- struct kvm_book3e_206_tlb_entry *gtlb_arch;
-
- /* Starting entry number in gtlb_arch[] */
- int gtlb_offset[E500_TLB_NUM];
-
- /* KVM internal information associated with each guest TLB entry */
- struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
-
- struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
-
- unsigned int gtlb_nv[E500_TLB_NUM];
-
- /*
- * information associated with each host TLB entry --
- * TLB1 only for now. If/when guest TLB1 entries can be
- * mapped with host TLB0, this will be used for that too.
- *
- * We don't want to use this for guest TLB0 because then we'd
- * have the overhead of doing the translation again even if
- * the entry is still in the guest TLB (e.g. we swapped out
- * and back, and our host TLB entries got evicted).
- */
- struct tlbe_ref *tlb_refs[E500_TLB_NUM];
- unsigned int host_tlb1_nv;
-
- u32 host_pid[E500_PID_NUM];
- u32 pid[E500_PID_NUM];
- u32 svr;
-
- /* vcpu id table */
- struct vcpu_id_table *idt;
-
- u32 l1csr0;
- u32 l1csr1;
- u32 hid0;
- u32 hid1;
- u32 tlb0cfg;
- u32 tlb1cfg;
- u64 mcar;
-
- struct page **shared_tlb_pages;
- int num_shared_tlb_pages;
-
- struct kvm_vcpu vcpu;
-};
-
-static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
-{
- return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
-}
-
-#endif /* __ASM_KVM_E500_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 52eb9c1f4fe0..d848cdc49715 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -82,7 +82,7 @@ struct kvm_vcpu;
struct lppaca;
struct slb_shadow;
-struct dtl;
+struct dtl_entry;
struct kvm_vm_stat {
u32 remote_tlb_flush;
@@ -106,6 +106,8 @@ struct kvm_vcpu_stat {
u32 dec_exits;
u32 ext_intr_exits;
u32 halt_wakeup;
+ u32 dbell_exits;
+ u32 gdbell_exits;
#ifdef CONFIG_PPC_BOOK3S
u32 pf_storage;
u32 pf_instruc;
@@ -140,6 +142,7 @@ enum kvm_exit_types {
EMULATED_TLBSX_EXITS,
EMULATED_TLBWE_EXITS,
EMULATED_RFI_EXITS,
+ EMULATED_RFCI_EXITS,
DEC_EXITS,
EXT_INTR_EXITS,
HALT_WAKEUP,
@@ -147,6 +150,8 @@ enum kvm_exit_types {
FP_UNAVAIL,
DEBUG_EXITS,
TIMEINGUEST,
+ DBELL_EXITS,
+ GDBELL_EXITS,
__NUMBER_OF_KVM_EXIT_TYPES
};
@@ -217,10 +222,10 @@ struct kvm_arch_memory_slot {
};
struct kvm_arch {
+ unsigned int lpid;
#ifdef CONFIG_KVM_BOOK3S_64_HV
unsigned long hpt_virt;
struct revmap_entry *revmap;
- unsigned int lpid;
unsigned int host_lpid;
unsigned long host_lpcr;
unsigned long sdr1;
@@ -232,7 +237,6 @@ struct kvm_arch {
unsigned long vrma_slb_v;
int rma_setup_done;
int using_mmu_notifiers;
- struct list_head spapr_tce_tables;
spinlock_t slot_phys_lock;
unsigned long *slot_phys[KVM_MEM_SLOTS_NUM];
int slot_npages[KVM_MEM_SLOTS_NUM];
@@ -240,6 +244,9 @@ struct kvm_arch {
struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
struct kvmppc_linear_info *hpt_li;
#endif /* CONFIG_KVM_BOOK3S_64_HV */
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct list_head spapr_tce_tables;
+#endif
};
/*
@@ -263,6 +270,9 @@ struct kvmppc_vcore {
struct list_head runnable_threads;
spinlock_t lock;
wait_queue_head_t wq;
+ u64 stolen_tb;
+ u64 preempt_tb;
+ struct kvm_vcpu *runner;
};
#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
@@ -274,6 +284,19 @@ struct kvmppc_vcore {
#define VCORE_EXITING 2
#define VCORE_SLEEPING 3
+/*
+ * Struct used to manage memory for a virtual processor area
+ * registered by a PAPR guest. There are three types of area
+ * that a guest can register.
+ */
+struct kvmppc_vpa {
+ void *pinned_addr; /* Address in kernel linear mapping */
+ void *pinned_end; /* End of region */
+ unsigned long next_gpa; /* Guest phys addr for update */
+ unsigned long len; /* Number of bytes required */
+ u8 update_pending; /* 1 => update pinned_addr from next_gpa */
+};
+
struct kvmppc_pte {
ulong eaddr;
u64 vpage;
@@ -345,6 +368,17 @@ struct kvm_vcpu_arch {
u64 vsr[64];
#endif
+#ifdef CONFIG_KVM_BOOKE_HV
+ u32 host_mas4;
+ u32 host_mas6;
+ u32 shadow_epcr;
+ u32 epcr;
+ u32 shadow_msrp;
+ u32 eplc;
+ u32 epsc;
+ u32 oldpir;
+#endif
+
#ifdef CONFIG_PPC_BOOK3S
/* For Gekko paired singles */
u32 qpr[32];
@@ -370,6 +404,7 @@ struct kvm_vcpu_arch {
#endif
u32 vrsave; /* also USPRG0 */
u32 mmucr;
+ /* shadow_msr is unused for BookE HV */
ulong shadow_msr;
ulong csrr0;
ulong csrr1;
@@ -426,8 +461,12 @@ struct kvm_vcpu_arch {
ulong fault_esr;
ulong queued_dear;
ulong queued_esr;
+ u32 tlbcfg[4];
+ u32 mmucfg;
+ u32 epr;
#endif
gpa_t paddr_accessed;
+ gva_t vaddr_accessed;
u8 io_gpr; /* GPR used as IO source/target */
u8 mmio_is_bigendian;
@@ -453,11 +492,6 @@ struct kvm_vcpu_arch {
u8 prodded;
u32 last_inst;
- struct lppaca *vpa;
- struct slb_shadow *slb_shadow;
- struct dtl *dtl;
- struct dtl *dtl_end;
-
wait_queue_head_t *wqp;
struct kvmppc_vcore *vcore;
int ret;
@@ -482,6 +516,14 @@ struct kvm_vcpu_arch {
struct task_struct *run_task;
struct kvm_run *kvm_run;
pgd_t *pgdir;
+
+ spinlock_t vpa_update_lock;
+ struct kvmppc_vpa vpa;
+ struct kvmppc_vpa dtl;
+ struct dtl_entry *dtl_ptr;
+ unsigned long dtl_index;
+ u64 stolen_logged;
+ struct kvmppc_vpa slb_shadow;
#endif
};
@@ -498,4 +540,6 @@ struct kvm_vcpu_arch {
#define KVM_MMIO_REG_QPR 0x0040
#define KVM_MMIO_REG_FQPR 0x0060
+#define __KVM_HAVE_ARCH_WQP
+
#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index 7b754e743003..c18916bff689 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -206,6 +206,11 @@ static inline unsigned int kvm_arch_para_features(void)
return r;
}
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
#endif /* __KERNEL__ */
#endif /* __POWERPC_KVM_PARA_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9d6dee0f7d48..f68c22fa2fce 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -95,7 +95,7 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
-extern void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
@@ -107,8 +107,10 @@ extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int op, int *advance);
-extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
-extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
+extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
+ ulong val);
+extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
+ ulong *val);
extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void);
@@ -126,6 +128,8 @@ extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args);
+extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba, unsigned long tce);
extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
struct kvm_allocate_rma *rma);
extern struct kvmppc_linear_info *kvm_alloc_rma(void);
@@ -138,6 +142,11 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem);
extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem);
+extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
+ struct kvm_ppc_smmu_info *info);
+
+extern int kvmppc_bookehv_init(void);
+extern void kvmppc_bookehv_exit(void);
/*
* Cuts out inst bits with ordering according to spec.
@@ -204,4 +213,9 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
struct kvm_dirty_tlb *cfg);
+long kvmppc_alloc_lpid(void);
+void kvmppc_claim_lpid(long lpid);
+void kvmppc_free_lpid(long lpid);
+void kvmppc_init_lpid(unsigned long nr_lpids);
+
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index cdb5421877e2..eeabcdbc30f7 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -104,6 +104,8 @@
#define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */
#define MAS4_TSIZED_SHIFT 7
+#define MAS5_SGS 0x80000000
+
#define MAS6_SPID0 0x3FFF0000
#define MAS6_SPID1 0x00007FFE
#define MAS6_ISIZE(x) MAS1_TSIZE(x)
@@ -118,6 +120,10 @@
#define MAS7_RPN 0xFFFFFFFF
+#define MAS8_TGS 0x80000000 /* Guest space */
+#define MAS8_VF 0x40000000 /* Virtualization Fault */
+#define MAS8_TLPID 0x000000ff
+
/* Bit definitions for MMUCFG */
#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */
#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 55e85631c42e..413a5eaef56c 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -240,6 +240,9 @@ struct thread_struct {
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
void* kvm_shadow_vcpu; /* KVM internal data */
#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
+#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
+ struct kvm_vcpu *kvm_vcpu;
+#endif
#ifdef CONFIG_PPC64
unsigned long dscr;
int dscr_inherit;
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 9d7f0fb69028..f0cb7f461b9d 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -257,7 +257,9 @@
#define LPCR_LPES_SH 2
#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */
#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */
+#ifndef SPRN_LPID
#define SPRN_LPID 0x13F /* Logical Partition Identifier */
+#endif
#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
#define SPRN_HMER 0x150 /* Hardware m? error recovery */
#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 8a97aa7289d3..2d916c4982c5 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -56,18 +56,30 @@
#define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */
#define SPRN_EPCR 0x133 /* Embedded Processor Control Register */
#define SPRN_DBCR2 0x136 /* Debug Control Register 2 */
+#define SPRN_MSRP 0x137 /* MSR Protect Register */
#define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */
#define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */
#define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */
#define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */
+#define SPRN_LPID 0x152 /* Logical Partition ID */
#define SPRN_MAS8 0x155 /* MMU Assist Register 8 */
#define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */
#define SPRN_TLB1PS 0x159 /* TLB 1 Page Size Register */
#define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */
#define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */
#define SPRN_EPTCFG 0x15e /* Embedded Page Table Config */
+#define SPRN_GSPRG0 0x170 /* Guest SPRG0 */
+#define SPRN_GSPRG1 0x171 /* Guest SPRG1 */
+#define SPRN_GSPRG2 0x172 /* Guest SPRG2 */
+#define SPRN_GSPRG3 0x173 /* Guest SPRG3 */
#define SPRN_MAS7_MAS3 0x174 /* MMU Assist Register 7 || 3 */
#define SPRN_MAS0_MAS1 0x175 /* MMU Assist Register 0 || 1 */
+#define SPRN_GSRR0 0x17A /* Guest SRR0 */
+#define SPRN_GSRR1 0x17B /* Guest SRR1 */
+#define SPRN_GEPR 0x17C /* Guest EPR */
+#define SPRN_GDEAR 0x17D /* Guest DEAR */
+#define SPRN_GPIR 0x17E /* Guest PIR */
+#define SPRN_GESR 0x17F /* Guest Exception Syndrome Register */
#define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */
#define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */
#define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */
@@ -88,6 +100,13 @@
#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */
#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */
#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */
+#define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */
+#define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */
+#define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */
+#define SPRN_GIVOR8 0x1BB /* Guest IVOR8 */
+#define SPRN_GIVOR13 0x1BC /* Guest IVOR13 */
+#define SPRN_GIVOR14 0x1BD /* Guest IVOR14 */
+#define SPRN_GIVPR 0x1BF /* Guest IVPR */
#define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */
#define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */
#define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */
@@ -240,6 +259,10 @@
#define MCSR_LDG 0x00002000UL /* Guarded Load */
#define MCSR_TLBSYNC 0x00000002UL /* Multiple tlbsyncs detected */
#define MCSR_BSL2_ERR 0x00000001UL /* Backside L2 cache error */
+
+#define MSRP_UCLEP 0x04000000 /* Protect MSR[UCLE] */
+#define MSRP_DEP 0x00000200 /* Protect MSR[DE] */
+#define MSRP_PMMP 0x00000004 /* Protect MSR[PMM] */
#endif
#ifdef CONFIG_E200
@@ -594,6 +617,17 @@
#define SPRN_EPCR_DMIUH 0x00400000 /* Disable MAS Interrupt updates
* for hypervisor */
+/* Bit definitions for EPLC/EPSC */
+#define EPC_EPR 0x80000000 /* 1 = user, 0 = kernel */
+#define EPC_EPR_SHIFT 31
+#define EPC_EAS 0x40000000 /* Address Space */
+#define EPC_EAS_SHIFT 30
+#define EPC_EGS 0x20000000 /* 1 = guest, 0 = hypervisor */
+#define EPC_EGS_SHIFT 29
+#define EPC_ELPID 0x00ff0000
+#define EPC_ELPID_SHIFT 16
+#define EPC_EPID 0x00003fff
+#define EPC_EPID_SHIFT 0
/*
* The IBM-403 is an even more odd special case, as it is much
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 1a6320290d26..200d763a0a67 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -17,6 +17,7 @@ extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
extern void giveup_fpu(struct task_struct *);
+extern void load_up_fpu(void);
extern void disable_kernel_fp(void);
extern void enable_kernel_fp(void);
extern void flush_fp_to_thread(struct task_struct *);
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 2136f58a54e8..3b4b4a8da922 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -23,6 +23,7 @@
extern unsigned long tb_ticks_per_jiffy;
extern unsigned long tb_ticks_per_usec;
extern unsigned long tb_ticks_per_sec;
+extern struct clock_event_device decrementer_clockevent;
struct rtc_time;
extern void to_tm(int tim, struct rtc_time * tm);
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index bd0fb8495154..17bb40cad5bf 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -40,6 +40,8 @@
#define segment_eq(a, b) ((a).seg == (b).seg)
+#define user_addr_max() (get_fs().seg)
+
#ifdef __powerpc64__
/*
* This check is sufficient because there is a large enough
@@ -453,42 +455,9 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
return size;
}
-extern int __strncpy_from_user(char *dst, const char __user *src, long count);
-
-static inline long strncpy_from_user(char *dst, const char __user *src,
- long count)
-{
- might_sleep();
- if (likely(access_ok(VERIFY_READ, src, 1)))
- return __strncpy_from_user(dst, src, count);
- return -EFAULT;
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 for error
- */
-extern int __strnlen_user(const char __user *str, long len, unsigned long top);
-
-/*
- * Returns the length of the string at str (including the null byte),
- * or 0 if we hit a page we can't access,
- * or something > len if we didn't find a null byte.
- *
- * The `top' parameter to __strnlen_user is to make sure that
- * we can never overflow from the user area into kernel space.
- */
-static inline int strnlen_user(const char __user *str, long len)
-{
- unsigned long top = current->thread.fs.seg;
-
- if ((unsigned long)str > top)
- return 0;
- return __strnlen_user(str, len, top);
-}
-
-#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
+extern long strncpy_from_user(char *dst, const char __user *src, long count);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..d0b6d4ac6dda
--- /dev/null
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -0,0 +1,41 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+/*
+ * Word-at-a-time interfaces for PowerPC.
+ */
+
+#include <linux/kernel.h>
+#include <asm/asm-compat.h>
+
+struct word_at_a_time {
+ const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+ unsigned long mask = (val & c->low_bits) + c->low_bits;
+ return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+ long leading_zero_bits;
+
+ asm (PPC_CNTLZL "%0,%1" : "=r" (leading_zero_bits) : "r" (mask));
+ return leading_zero_bits >> 3;
+}
+
+static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+ unsigned long rhs = val | c->low_bits;
+ *data = rhs;
+ return (val + c->high_bits) & ~rhs;
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 4554dc2fe857..52c7ad78242e 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -116,6 +116,9 @@ int main(void)
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
#endif
+#ifdef CONFIG_KVM_BOOKE_HV
+ DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu));
+#endif
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
@@ -383,6 +386,7 @@ int main(void)
#ifdef CONFIG_KVM
DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
+ DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
@@ -425,9 +429,11 @@ int main(void)
DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4));
DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
+ DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
+ DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
+
/* book3s */
#ifdef CONFIG_KVM_BOOK3S_64_HV
- DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
@@ -440,9 +446,9 @@ int main(void)
DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
+ DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
#endif
#ifdef CONFIG_PPC_BOOK3S
- DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
@@ -457,7 +463,6 @@ int main(void)
DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions));
DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded));
DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
- DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa));
DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
@@ -533,6 +538,8 @@ int main(void)
HSTATE_FIELD(HSTATE_NAPPING, napping);
#ifdef CONFIG_KVM_BOOK3S_64_HV
+ HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
+ HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
@@ -593,6 +600,12 @@ int main(void)
DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
#endif
+#ifdef CONFIG_KVM_BOOKE_HV
+ DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4));
+ DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6));
+ DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc));
+#endif
+
#ifdef CONFIG_KVM_EXIT_TIMING
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
arch.timing_exit.tv32.tbu));
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 8053db02b85e..69fdd2322a66 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -73,6 +73,7 @@ _GLOBAL(__setup_cpu_e500v2)
mtlr r4
blr
_GLOBAL(__setup_cpu_e500mc)
+ mr r5, r4
mflr r4
bl __e500_icache_setup
bl __e500_dcache_setup
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index f7bed44ee165..1c06d2971545 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -63,11 +63,13 @@ BEGIN_FTR_SECTION
GET_PACA(r13)
#ifdef CONFIG_KVM_BOOK3S_64_HV
- lbz r0,PACAPROCSTART(r13)
- cmpwi r0,0x80
- bne 1f
- li r0,1
- stb r0,PACAPROCSTART(r13)
+ li r0,KVM_HWTHREAD_IN_KERNEL
+ stb r0,HSTATE_HWTHREAD_STATE(r13)
+ /* Order setting hwthread_state vs. testing hwthread_req */
+ sync
+ lbz r0,HSTATE_HWTHREAD_REQ(r13)
+ cmpwi r0,0
+ beq 1f
b kvm_start_guest
1:
#endif
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 22d608e8bb7d..7a2e5e421abf 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -248,10 +248,11 @@ _ENTRY(_start);
interrupt_base:
/* Critical Input Interrupt */
- CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
+ CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
/* Machine Check Interrupt */
- CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
+ CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
+ machine_check_exception)
MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
/* Data Storage Interrupt */
@@ -261,7 +262,8 @@ interrupt_base:
INSTRUCTION_STORAGE_EXCEPTION
/* External Input Interrupt */
- EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
+ EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, \
+ do_IRQ, EXC_XFER_LITE)
/* Alignment Interrupt */
ALIGNMENT_EXCEPTION
@@ -273,29 +275,32 @@ interrupt_base:
#ifdef CONFIG_PPC_FPU
FP_UNAVAILABLE_EXCEPTION
#else
- EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \
+ FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
#endif
/* System Call Interrupt */
START_EXCEPTION(SystemCall)
- NORMAL_EXCEPTION_PROLOG
+ NORMAL_EXCEPTION_PROLOG(BOOKE_INTERRUPT_SYSCALL)
EXC_XFER_EE_LITE(0x0c00, DoSyscall)
/* Auxiliary Processor Unavailable Interrupt */
- EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
+ AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
/* Decrementer Interrupt */
DECREMENTER_EXCEPTION
/* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */
- EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, \
+ unknown_exception, EXC_XFER_EE)
/* Watchdog Timer Interrupt */
/* TODO: Add watchdog support */
#ifdef CONFIG_BOOKE_WDT
- CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
+ CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, WatchdogException)
#else
- CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
+ CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, unknown_exception)
#endif
/* Data TLB Error Interrupt */
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 0e4175388f47..5f051eeb93a2 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -2,6 +2,9 @@
#define __HEAD_BOOKE_H__
#include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
+#include <asm/kvm_asm.h>
+#include <asm/kvm_booke_hv_asm.h>
+
/*
* Macros used for common Book-e exception handling
*/
@@ -28,14 +31,15 @@
*/
#define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4))
-#define NORMAL_EXCEPTION_PROLOG \
+#define NORMAL_EXCEPTION_PROLOG(intno) \
mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
mfspr r10, SPRN_SPRG_THREAD; \
stw r11, THREAD_NORMSAVE(0)(r10); \
stw r13, THREAD_NORMSAVE(2)(r10); \
mfcr r13; /* save CR in r13 for now */\
- mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
- andi. r11,r11,MSR_PR; \
+ mfspr r11, SPRN_SRR1; \
+ DO_KVM BOOKE_INTERRUPT_##intno SPRN_SRR1; \
+ andi. r11, r11, MSR_PR; /* check whether user or kernel */\
mr r11, r1; \
beq 1f; \
/* if from user, start at top of this thread's kernel stack */ \
@@ -113,7 +117,7 @@
* registers as the normal prolog above. Instead we use a portion of the
* critical/machine check exception stack at low physical addresses.
*/
-#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, exc_level_srr0, exc_level_srr1) \
+#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, intno, exc_level_srr0, exc_level_srr1) \
mtspr SPRN_SPRG_WSCRATCH_##exc_level,r8; \
BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \
stw r9,GPR9(r8); /* save various registers */\
@@ -121,8 +125,9 @@
stw r10,GPR10(r8); \
stw r11,GPR11(r8); \
stw r9,_CCR(r8); /* save CR on stack */\
- mfspr r10,exc_level_srr1; /* check whether user or kernel */\
- andi. r10,r10,MSR_PR; \
+ mfspr r11,exc_level_srr1; /* check whether user or kernel */\
+ DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \
+ andi. r11,r11,MSR_PR; \
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\
@@ -162,12 +167,30 @@
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
-#define CRITICAL_EXCEPTION_PROLOG \
- EXC_LEVEL_EXCEPTION_PROLOG(CRIT, SPRN_CSRR0, SPRN_CSRR1)
+#define CRITICAL_EXCEPTION_PROLOG(intno) \
+ EXC_LEVEL_EXCEPTION_PROLOG(CRIT, intno, SPRN_CSRR0, SPRN_CSRR1)
#define DEBUG_EXCEPTION_PROLOG \
- EXC_LEVEL_EXCEPTION_PROLOG(DBG, SPRN_DSRR0, SPRN_DSRR1)
+ EXC_LEVEL_EXCEPTION_PROLOG(DBG, DEBUG, SPRN_DSRR0, SPRN_DSRR1)
#define MCHECK_EXCEPTION_PROLOG \
- EXC_LEVEL_EXCEPTION_PROLOG(MC, SPRN_MCSRR0, SPRN_MCSRR1)
+ EXC_LEVEL_EXCEPTION_PROLOG(MC, MACHINE_CHECK, \
+ SPRN_MCSRR0, SPRN_MCSRR1)
+
+/*
+ * Guest Doorbell -- this is a bit odd in that uses GSRR0/1 despite
+ * being delivered to the host. This exception can only happen
+ * inside a KVM guest -- so we just handle up to the DO_KVM rather
+ * than try to fit this into one of the existing prolog macros.
+ */
+#define GUEST_DOORBELL_EXCEPTION \
+ START_EXCEPTION(GuestDoorbell); \
+ mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
+ mfspr r10, SPRN_SPRG_THREAD; \
+ stw r11, THREAD_NORMSAVE(0)(r10); \
+ mfspr r11, SPRN_SRR1; \
+ stw r13, THREAD_NORMSAVE(2)(r10); \
+ mfcr r13; /* save CR in r13 for now */\
+ DO_KVM BOOKE_INTERRUPT_GUEST_DBELL SPRN_GSRR1; \
+ trap
/*
* Exception vectors.
@@ -181,16 +204,16 @@ label:
.long func; \
.long ret_from_except_full
-#define EXCEPTION(n, label, hdlr, xfer) \
+#define EXCEPTION(n, intno, label, hdlr, xfer) \
START_EXCEPTION(label); \
- NORMAL_EXCEPTION_PROLOG; \
+ NORMAL_EXCEPTION_PROLOG(intno); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
xfer(n, hdlr)
-#define CRITICAL_EXCEPTION(n, label, hdlr) \
- START_EXCEPTION(label); \
- CRITICAL_EXCEPTION_PROLOG; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
+#define CRITICAL_EXCEPTION(n, intno, label, hdlr) \
+ START_EXCEPTION(label); \
+ CRITICAL_EXCEPTION_PROLOG(intno); \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
NOCOPY, crit_transfer_to_handler, \
ret_from_crit_exc)
@@ -302,7 +325,7 @@ label:
#define DEBUG_CRIT_EXCEPTION \
START_EXCEPTION(DebugCrit); \
- CRITICAL_EXCEPTION_PROLOG; \
+ CRITICAL_EXCEPTION_PROLOG(DEBUG); \
\
/* \
* If there is a single step or branch-taken exception in an \
@@ -355,7 +378,7 @@ label:
#define DATA_STORAGE_EXCEPTION \
START_EXCEPTION(DataStorage) \
- NORMAL_EXCEPTION_PROLOG; \
+ NORMAL_EXCEPTION_PROLOG(DATA_STORAGE); \
mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
stw r5,_ESR(r11); \
mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \
@@ -363,7 +386,7 @@ label:
#define INSTRUCTION_STORAGE_EXCEPTION \
START_EXCEPTION(InstructionStorage) \
- NORMAL_EXCEPTION_PROLOG; \
+ NORMAL_EXCEPTION_PROLOG(INST_STORAGE); \
mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
stw r5,_ESR(r11); \
mr r4,r12; /* Pass SRR0 as arg2 */ \
@@ -372,7 +395,7 @@ label:
#define ALIGNMENT_EXCEPTION \
START_EXCEPTION(Alignment) \
- NORMAL_EXCEPTION_PROLOG; \
+ NORMAL_EXCEPTION_PROLOG(ALIGNMENT); \
mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \
stw r4,_DEAR(r11); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -380,7 +403,7 @@ label:
#define PROGRAM_EXCEPTION \
START_EXCEPTION(Program) \
- NORMAL_EXCEPTION_PROLOG; \
+ NORMAL_EXCEPTION_PROLOG(PROGRAM); \
mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \
stw r4,_ESR(r11); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -388,7 +411,7 @@ label:
#define DECREMENTER_EXCEPTION \
START_EXCEPTION(Decrementer) \
- NORMAL_EXCEPTION_PROLOG; \
+ NORMAL_EXCEPTION_PROLOG(DECREMENTER); \
lis r0,TSR_DIS@h; /* Setup the DEC interrupt mask */ \
mtspr SPRN_TSR,r0; /* Clear the DEC interrupt */ \
addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -396,7 +419,7 @@ label:
#define FP_UNAVAILABLE_EXCEPTION \
START_EXCEPTION(FloatingPointUnavailable) \
- NORMAL_EXCEPTION_PROLOG; \
+ NORMAL_EXCEPTION_PROLOG(FP_UNAVAIL); \
beq 1f; \
bl load_up_fpu; /* if from user, just load it up */ \
b fast_exception_return; \
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index de80e0f9a2bd..1f4434a38608 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -301,19 +301,20 @@ _ENTRY(__early_start)
interrupt_base:
/* Critical Input Interrupt */
- CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
+ CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
/* Machine Check Interrupt */
#ifdef CONFIG_E200
/* no RFMCI, MCSRRs on E200 */
- CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
+ CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
+ machine_check_exception)
#else
MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
#endif
/* Data Storage Interrupt */
START_EXCEPTION(DataStorage)
- NORMAL_EXCEPTION_PROLOG
+ NORMAL_EXCEPTION_PROLOG(DATA_STORAGE)
mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
stw r5,_ESR(r11)
mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
@@ -328,7 +329,7 @@ interrupt_base:
INSTRUCTION_STORAGE_EXCEPTION
/* External Input Interrupt */
- EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
+ EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ, EXC_XFER_LITE)
/* Alignment Interrupt */
ALIGNMENT_EXCEPTION
@@ -342,32 +343,36 @@ interrupt_base:
#else
#ifdef CONFIG_E200
/* E200 treats 'normal' floating point instructions as FP Unavail exception */
- EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
+ EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
+ program_check_exception, EXC_XFER_EE)
#else
- EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
+ unknown_exception, EXC_XFER_EE)
#endif
#endif
/* System Call Interrupt */
START_EXCEPTION(SystemCall)
- NORMAL_EXCEPTION_PROLOG
+ NORMAL_EXCEPTION_PROLOG(SYSCALL)
EXC_XFER_EE_LITE(0x0c00, DoSyscall)
/* Auxiliary Processor Unavailable Interrupt */
- EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \
+ unknown_exception, EXC_XFER_EE)
/* Decrementer Interrupt */
DECREMENTER_EXCEPTION
/* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */
- EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x3100, FIT, FixedIntervalTimer, \
+ unknown_exception, EXC_XFER_EE)
/* Watchdog Timer Interrupt */
#ifdef CONFIG_BOOKE_WDT
- CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
+ CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, WatchdogException)
#else
- CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
+ CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, unknown_exception)
#endif
/* Data TLB Error Interrupt */
@@ -375,10 +380,16 @@ interrupt_base:
mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
mfspr r10, SPRN_SPRG_THREAD
stw r11, THREAD_NORMSAVE(0)(r10)
+#ifdef CONFIG_KVM_BOOKE_HV
+BEGIN_FTR_SECTION
+ mfspr r11, SPRN_SRR1
+END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+#endif
stw r12, THREAD_NORMSAVE(1)(r10)
stw r13, THREAD_NORMSAVE(2)(r10)
mfcr r13
stw r13, THREAD_NORMSAVE(3)(r10)
+ DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
@@ -463,10 +474,16 @@ interrupt_base:
mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
mfspr r10, SPRN_SPRG_THREAD
stw r11, THREAD_NORMSAVE(0)(r10)
+#ifdef CONFIG_KVM_BOOKE_HV
+BEGIN_FTR_SECTION
+ mfspr r11, SPRN_SRR1
+END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+#endif
stw r12, THREAD_NORMSAVE(1)(r10)
stw r13, THREAD_NORMSAVE(2)(r10)
mfcr r13
stw r13, THREAD_NORMSAVE(3)(r10)
+ DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
@@ -538,36 +555,54 @@ interrupt_base:
#ifdef CONFIG_SPE
/* SPE Unavailable */
START_EXCEPTION(SPEUnavailable)
- NORMAL_EXCEPTION_PROLOG
+ NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL)
bne load_up_spe
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x2010, KernelSPE)
#else
- EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
+ unknown_exception, EXC_XFER_EE)
#endif /* CONFIG_SPE */
/* SPE Floating Point Data */
#ifdef CONFIG_SPE
- EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
+ EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData, \
+ SPEFloatingPointException, EXC_XFER_EE);
/* SPE Floating Point Round */
- EXCEPTION(0x2050, SPEFloatingPointRound, SPEFloatingPointRoundException, EXC_XFER_EE)
+ EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
+ SPEFloatingPointRoundException, EXC_XFER_EE)
#else
- EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, \
+ unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
+ unknown_exception, EXC_XFER_EE)
#endif /* CONFIG_SPE */
/* Performance Monitor */
- EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
+ EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \
+ performance_monitor_exception, EXC_XFER_STD)
- EXCEPTION(0x2070, Doorbell, doorbell_exception, EXC_XFER_STD)
+ EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception, EXC_XFER_STD)
- CRITICAL_EXCEPTION(0x2080, CriticalDoorbell, unknown_exception)
+ CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \
+ CriticalDoorbell, unknown_exception)
/* Debug Interrupt */
DEBUG_DEBUG_EXCEPTION
DEBUG_CRIT_EXCEPTION
+ GUEST_DOORBELL_EXCEPTION
+
+ CRITICAL_EXCEPTION(0, GUEST_DBELL_CRIT, CriticalGuestDoorbell, \
+ unknown_exception)
+
+ /* Hypercall */
+ EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception, EXC_XFER_EE)
+
+ /* Embedded Hypervisor Privilege */
+ EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception, EXC_XFER_EE)
+
/*
* Local functions
*/
@@ -871,8 +906,31 @@ _GLOBAL(__setup_e500mc_ivors)
mtspr SPRN_IVOR36,r3
li r3,CriticalDoorbell@l
mtspr SPRN_IVOR37,r3
+
+ /*
+ * We only want to touch IVOR38-41 if we're running on hardware
+ * that supports category E.HV. The architectural way to determine
+ * this is MMUCFG[LPIDSIZE].
+ */
+ mfspr r3, SPRN_MMUCFG
+ andis. r3, r3, MMUCFG_LPIDSIZE@h
+ beq no_hv
+ li r3,GuestDoorbell@l
+ mtspr SPRN_IVOR38,r3
+ li r3,CriticalGuestDoorbell@l
+ mtspr SPRN_IVOR39,r3
+ li r3,Hypercall@l
+ mtspr SPRN_IVOR40,r3
+ li r3,Ehvpriv@l
+ mtspr SPRN_IVOR41,r3
+skip_hv_ivors:
sync
blr
+no_hv:
+ lwz r3, CPU_SPEC_FEATURES(r5)
+ rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
+ stw r3, CPU_SPEC_FEATURES(r5)
+ b skip_hv_ivors
#ifdef CONFIG_SPE
/*
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 0cdc9a392839..7140d838339e 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -16,6 +16,7 @@
#include <asm/asm-offsets.h>
#include <asm/ppc-opcode.h>
#include <asm/hw_irq.h>
+#include <asm/kvm_book3s_asm.h>
#undef DEBUG
@@ -81,6 +82,12 @@ _GLOBAL(power7_idle)
std r9,_MSR(r1)
std r1,PACAR1(r13)
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ /* Tell KVM we're napping */
+ li r4,KVM_HWTHREAD_IN_NAP
+ stb r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
+
/* Magic NAP mode enter sequence */
std r0,0(r1)
ptesync
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 786a2700ec2d..3e4031581c65 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -85,8 +85,6 @@ EXPORT_SYMBOL(csum_tcpudp_magic);
EXPORT_SYMBOL(__copy_tofrom_user);
EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(copy_page);
#if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
@@ -190,3 +188,7 @@ EXPORT_SYMBOL(__arch_hweight16);
EXPORT_SYMBOL(__arch_hweight32);
EXPORT_SYMBOL(__arch_hweight64);
#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+EXPORT_SYMBOL_GPL(mmu_psize_defs);
+#endif
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 2c42cd72d0f5..99a995c2a3f2 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -100,7 +100,7 @@ static int decrementer_set_next_event(unsigned long evt,
static void decrementer_set_mode(enum clock_event_mode mode,
struct clock_event_device *dev);
-static struct clock_event_device decrementer_clockevent = {
+struct clock_event_device decrementer_clockevent = {
.name = "decrementer",
.rating = 200,
.irq = 0,
@@ -108,6 +108,7 @@ static struct clock_event_device decrementer_clockevent = {
.set_mode = decrementer_set_mode,
.features = CLOCK_EVT_FEAT_ONESHOT,
};
+EXPORT_SYMBOL(decrementer_clockevent);
DEFINE_PER_CPU(u64, decrementers_next_tb);
static DEFINE_PER_CPU(struct clock_event_device, decrementers);
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 7b612a76c701..50e7dbc7356c 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -29,15 +29,18 @@
#include <asm/kvm_ppc.h>
#include "44x_tlb.h"
+#include "booke.h"
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
+ kvmppc_booke_vcpu_load(vcpu, cpu);
kvmppc_44x_tlb_load(vcpu);
}
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
kvmppc_44x_tlb_put(vcpu);
+ kvmppc_booke_vcpu_put(vcpu);
}
int kvmppc_core_check_processor_compat(void)
@@ -160,6 +163,15 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
}
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+ return 0;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+}
+
static int __init kvmppc_44x_init(void)
{
int r;
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index 549bb2c9a47a..c8c61578fdfc 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -37,22 +37,19 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
- int dcrn;
- int ra;
- int rb;
- int rc;
- int rs;
- int rt;
- int ws;
+ int dcrn = get_dcrn(inst);
+ int ra = get_ra(inst);
+ int rb = get_rb(inst);
+ int rc = get_rc(inst);
+ int rs = get_rs(inst);
+ int rt = get_rt(inst);
+ int ws = get_ws(inst);
switch (get_op(inst)) {
case 31:
switch (get_xop(inst)) {
case XOP_MFDCR:
- dcrn = get_dcrn(inst);
- rt = get_rt(inst);
-
/* The guest may access CPR0 registers to determine the timebase
* frequency, and it must know the real host frequency because it
* can directly access the timebase registers.
@@ -88,9 +85,6 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case XOP_MTDCR:
- dcrn = get_dcrn(inst);
- rs = get_rs(inst);
-
/* emulate some access in kernel */
switch (dcrn) {
case DCRN_CPR0_CONFIG_ADDR:
@@ -108,17 +102,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case XOP_TLBWE:
- ra = get_ra(inst);
- rs = get_rs(inst);
- ws = get_ws(inst);
emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
break;
case XOP_TLBSX:
- rt = get_rt(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
- rc = get_rc(inst);
emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
break;
@@ -141,41 +128,41 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_PID:
- kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break;
+ kvmppc_set_pid(vcpu, spr_val); break;
case SPRN_MMUCR:
- vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break;
+ vcpu->arch.mmucr = spr_val; break;
case SPRN_CCR0:
- vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break;
+ vcpu->arch.ccr0 = spr_val; break;
case SPRN_CCR1:
- vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break;
+ vcpu->arch.ccr1 = spr_val; break;
default:
- emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
+ emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
}
return emulated;
}
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_PID:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break;
+ *spr_val = vcpu->arch.pid; break;
case SPRN_MMUCR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break;
+ *spr_val = vcpu->arch.mmucr; break;
case SPRN_CCR0:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break;
+ *spr_val = vcpu->arch.ccr0; break;
case SPRN_CCR1:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break;
+ *spr_val = vcpu->arch.ccr1; break;
default:
- emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
+ emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
}
return emulated;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 8f64709ae331..f4dacb9c57fa 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -90,6 +90,9 @@ config KVM_BOOK3S_64_PR
depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV
select KVM_BOOK3S_PR
+config KVM_BOOKE_HV
+ bool
+
config KVM_440
bool "KVM support for PowerPC 440 processors"
depends on EXPERIMENTAL && 44x
@@ -106,7 +109,7 @@ config KVM_440
config KVM_EXIT_TIMING
bool "Detailed exit timing"
- depends on KVM_440 || KVM_E500
+ depends on KVM_440 || KVM_E500V2 || KVM_E500MC
---help---
Calculate elapsed time for every exit/enter cycle. A per-vcpu
report is available in debugfs kvm/vm#_vcpu#_timing.
@@ -115,14 +118,29 @@ config KVM_EXIT_TIMING
If unsure, say N.
-config KVM_E500
- bool "KVM support for PowerPC E500 processors"
- depends on EXPERIMENTAL && E500
+config KVM_E500V2
+ bool "KVM support for PowerPC E500v2 processors"
+ depends on EXPERIMENTAL && E500 && !PPC_E500MC
select KVM
select KVM_MMIO
---help---
Support running unmodified E500 guest kernels in virtual machines on
- E500 host processors.
+ E500v2 host processors.
+
+ This module provides access to the hardware capabilities through
+ a character device node named /dev/kvm.
+
+ If unsure, say N.
+
+config KVM_E500MC
+ bool "KVM support for PowerPC E500MC/E5500 processors"
+ depends on EXPERIMENTAL && PPC_E500MC
+ select KVM
+ select KVM_MMIO
+ select KVM_BOOKE_HV
+ ---help---
+ Support running unmodified E500MC/E5500 (32-bit) guest kernels in
+ virtual machines on E500MC/E5500 host processors.
This module provides access to the hardware capabilities through
a character device node named /dev/kvm.
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 3688aeecc4b2..c2a08636e6d4 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -36,7 +36,17 @@ kvm-e500-objs := \
e500.o \
e500_tlb.o \
e500_emulate.o
-kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs)
+kvm-objs-$(CONFIG_KVM_E500V2) := $(kvm-e500-objs)
+
+kvm-e500mc-objs := \
+ $(common-objs-y) \
+ booke.o \
+ booke_emulate.o \
+ bookehv_interrupts.o \
+ e500mc.o \
+ e500_tlb.o \
+ e500_emulate.o
+kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
../../../virt/kvm/coalesced_mmio.o \
@@ -44,6 +54,7 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
book3s_paired_singles.o \
book3s_pr.o \
book3s_pr_papr.o \
+ book3s_64_vio_hv.o \
book3s_emulate.o \
book3s_interrupts.o \
book3s_mmu_hpte.o \
@@ -68,6 +79,7 @@ kvm-book3s_64-module-objs := \
powerpc.o \
emulate.o \
book3s.o \
+ book3s_64_vio.o \
$(kvm-book3s_64-objs-y)
kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs)
@@ -88,7 +100,8 @@ kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
obj-$(CONFIG_KVM_440) += kvm.o
-obj-$(CONFIG_KVM_E500) += kvm.o
+obj-$(CONFIG_KVM_E500V2) += kvm.o
+obj-$(CONFIG_KVM_E500MC) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 7d54f4ed6d96..3f2a8360c857 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -258,7 +258,7 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
return true;
}
-void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
+int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
{
unsigned long *pending = &vcpu->arch.pending_exceptions;
unsigned long old_pending = vcpu->arch.pending_exceptions;
@@ -283,12 +283,17 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
/* Tell the guest about our interrupt status */
kvmppc_update_int_pending(vcpu, *pending, old_pending);
+
+ return 0;
}
pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
ulong mp_pa = vcpu->arch.magic_page_pa;
+ if (!(vcpu->arch.shared->msr & MSR_SF))
+ mp_pa = (uint32_t)mp_pa;
+
/* Magic page override */
if (unlikely(mp_pa) &&
unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index c3beaeef3f60..80a577517584 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -36,13 +36,11 @@
/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
#define MAX_LPID_970 63
-#define NR_LPIDS (LPID_RSVD + 1)
-unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
long kvmppc_alloc_hpt(struct kvm *kvm)
{
unsigned long hpt;
- unsigned long lpid;
+ long lpid;
struct revmap_entry *rev;
struct kvmppc_linear_info *li;
@@ -72,14 +70,9 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
}
kvm->arch.revmap = rev;
- /* Allocate the guest's logical partition ID */
- do {
- lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
- if (lpid >= NR_LPIDS) {
- pr_err("kvm_alloc_hpt: No LPIDs free\n");
- goto out_freeboth;
- }
- } while (test_and_set_bit(lpid, lpid_inuse));
+ lpid = kvmppc_alloc_lpid();
+ if (lpid < 0)
+ goto out_freeboth;
kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
kvm->arch.lpid = lpid;
@@ -96,7 +89,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
void kvmppc_free_hpt(struct kvm *kvm)
{
- clear_bit(kvm->arch.lpid, lpid_inuse);
+ kvmppc_free_lpid(kvm->arch.lpid);
vfree(kvm->arch.revmap);
if (kvm->arch.hpt_li)
kvm_release_hpt(kvm->arch.hpt_li);
@@ -171,8 +164,7 @@ int kvmppc_mmu_hv_init(void)
if (!cpu_has_feature(CPU_FTR_HVMODE))
return -EINVAL;
- memset(lpid_inuse, 0, sizeof(lpid_inuse));
-
+ /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
if (cpu_has_feature(CPU_FTR_ARCH_206)) {
host_lpid = mfspr(SPRN_LPID); /* POWER7 */
rsvd_lpid = LPID_RSVD;
@@ -181,9 +173,11 @@ int kvmppc_mmu_hv_init(void)
rsvd_lpid = MAX_LPID_970;
}
- set_bit(host_lpid, lpid_inuse);
+ kvmppc_init_lpid(rsvd_lpid + 1);
+
+ kvmppc_claim_lpid(host_lpid);
/* rsvd_lpid is reserved for use in partition switching */
- set_bit(rsvd_lpid, lpid_inuse);
+ kvmppc_claim_lpid(rsvd_lpid);
return 0;
}
@@ -452,7 +446,7 @@ static int instruction_is_store(unsigned int instr)
}
static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned long gpa, int is_store)
+ unsigned long gpa, gva_t ea, int is_store)
{
int ret;
u32 last_inst;
@@ -499,6 +493,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
vcpu->arch.paddr_accessed = gpa;
+ vcpu->arch.vaddr_accessed = ea;
return kvmppc_emulate_mmio(run, vcpu);
}
@@ -552,7 +547,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* No memslot means it's an emulated MMIO region */
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa,
+ return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
dsisr & DSISR_ISSTORE);
}
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index f2e6e48ea463..56b983e7b738 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -90,8 +90,6 @@ slb_exit_skip_ ## num:
or r10, r10, r12
slbie r10
- isync
-
/* Fill SLB with our shadow */
lbz r12, SVCPU_SLB_MAX(r3)
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
new file mode 100644
index 000000000000..72ffc899c082
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -0,0 +1,150 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/highmem.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/hugetlb.h>
+#include <linux/list.h>
+#include <linux/anon_inodes.h>
+
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu-hash64.h>
+#include <asm/hvcall.h>
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+#include <asm/kvm_host.h>
+#include <asm/udbg.h>
+
+#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
+
+static long kvmppc_stt_npages(unsigned long window_size)
+{
+ return ALIGN((window_size >> SPAPR_TCE_SHIFT)
+ * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
+}
+
+static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
+{
+ struct kvm *kvm = stt->kvm;
+ int i;
+
+ mutex_lock(&kvm->lock);
+ list_del(&stt->list);
+ for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
+ __free_page(stt->pages[i]);
+ kfree(stt);
+ mutex_unlock(&kvm->lock);
+
+ kvm_put_kvm(kvm);
+}
+
+static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
+ struct page *page;
+
+ if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
+ return VM_FAULT_SIGBUS;
+
+ page = stt->pages[vmf->pgoff];
+ get_page(page);
+ vmf->page = page;
+ return 0;
+}
+
+static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
+ .fault = kvm_spapr_tce_fault,
+};
+
+static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &kvm_spapr_tce_vm_ops;
+ return 0;
+}
+
+static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
+{
+ struct kvmppc_spapr_tce_table *stt = filp->private_data;
+
+ release_spapr_tce_table(stt);
+ return 0;
+}
+
+static struct file_operations kvm_spapr_tce_fops = {
+ .mmap = kvm_spapr_tce_mmap,
+ .release = kvm_spapr_tce_release,
+};
+
+long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+ struct kvm_create_spapr_tce *args)
+{
+ struct kvmppc_spapr_tce_table *stt = NULL;
+ long npages;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Check this LIOBN hasn't been previously allocated */
+ list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
+ if (stt->liobn == args->liobn)
+ return -EBUSY;
+ }
+
+ npages = kvmppc_stt_npages(args->window_size);
+
+ stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!stt)
+ goto fail;
+
+ stt->liobn = args->liobn;
+ stt->window_size = args->window_size;
+ stt->kvm = kvm;
+
+ for (i = 0; i < npages; i++) {
+ stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!stt->pages[i])
+ goto fail;
+ }
+
+ kvm_get_kvm(kvm);
+
+ mutex_lock(&kvm->lock);
+ list_add(&stt->list, &kvm->arch.spapr_tce_tables);
+
+ mutex_unlock(&kvm->lock);
+
+ return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
+ stt, O_RDWR);
+
+fail:
+ if (stt) {
+ for (i = 0; i < npages; i++)
+ if (stt->pages[i])
+ __free_page(stt->pages[i]);
+
+ kfree(stt);
+ }
+ return ret;
+}
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index ea0f8c537c28..30c2f3b134c6 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -38,6 +38,9 @@
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
+/* WARNING: This will be called in real-mode on HV KVM and virtual
+ * mode on PR KVM
+ */
long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce)
{
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 135663a3e4fc..b9a989dc76cc 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -87,6 +87,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
+ int rt = get_rt(inst);
+ int rs = get_rs(inst);
+ int ra = get_ra(inst);
+ int rb = get_rb(inst);
switch (get_op(inst)) {
case 19:
@@ -106,21 +110,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
case 31:
switch (get_xop(inst)) {
case OP_31_XOP_MFMSR:
- kvmppc_set_gpr(vcpu, get_rt(inst),
- vcpu->arch.shared->msr);
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
break;
case OP_31_XOP_MTMSRD:
{
- ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
+ ulong rs_val = kvmppc_get_gpr(vcpu, rs);
if (inst & 0x10000) {
- vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE);
- vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE);
+ ulong new_msr = vcpu->arch.shared->msr;
+ new_msr &= ~(MSR_RI | MSR_EE);
+ new_msr |= rs_val & (MSR_RI | MSR_EE);
+ vcpu->arch.shared->msr = new_msr;
} else
- kvmppc_set_msr(vcpu, rs);
+ kvmppc_set_msr(vcpu, rs_val);
break;
}
case OP_31_XOP_MTMSR:
- kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
+ kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_MFSR:
{
@@ -130,7 +135,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (vcpu->arch.mmu.mfsrin) {
u32 sr;
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
- kvmppc_set_gpr(vcpu, get_rt(inst), sr);
+ kvmppc_set_gpr(vcpu, rt, sr);
}
break;
}
@@ -138,29 +143,29 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
{
int srnum;
- srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf;
+ srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
if (vcpu->arch.mmu.mfsrin) {
u32 sr;
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
- kvmppc_set_gpr(vcpu, get_rt(inst), sr);
+ kvmppc_set_gpr(vcpu, rt, sr);
}
break;
}
case OP_31_XOP_MTSR:
vcpu->arch.mmu.mtsrin(vcpu,
(inst >> 16) & 0xf,
- kvmppc_get_gpr(vcpu, get_rs(inst)));
+ kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_MTSRIN:
vcpu->arch.mmu.mtsrin(vcpu,
- (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
- kvmppc_get_gpr(vcpu, get_rs(inst)));
+ (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
+ kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_TLBIE:
case OP_31_XOP_TLBIEL:
{
bool large = (inst & 0x00200000) ? true : false;
- ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst));
+ ulong addr = kvmppc_get_gpr(vcpu, rb);
vcpu->arch.mmu.tlbie(vcpu, addr, large);
break;
}
@@ -171,15 +176,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL;
vcpu->arch.mmu.slbmte(vcpu,
- kvmppc_get_gpr(vcpu, get_rs(inst)),
- kvmppc_get_gpr(vcpu, get_rb(inst)));
+ kvmppc_get_gpr(vcpu, rs),
+ kvmppc_get_gpr(vcpu, rb));
break;
case OP_31_XOP_SLBIE:
if (!vcpu->arch.mmu.slbie)
return EMULATE_FAIL;
vcpu->arch.mmu.slbie(vcpu,
- kvmppc_get_gpr(vcpu, get_rb(inst)));
+ kvmppc_get_gpr(vcpu, rb));
break;
case OP_31_XOP_SLBIA:
if (!vcpu->arch.mmu.slbia)
@@ -191,22 +196,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (!vcpu->arch.mmu.slbmfee) {
emulated = EMULATE_FAIL;
} else {
- ulong t, rb;
+ ulong t, rb_val;
- rb = kvmppc_get_gpr(vcpu, get_rb(inst));
- t = vcpu->arch.mmu.slbmfee(vcpu, rb);
- kvmppc_set_gpr(vcpu, get_rt(inst), t);
+ rb_val = kvmppc_get_gpr(vcpu, rb);
+ t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
+ kvmppc_set_gpr(vcpu, rt, t);
}
break;
case OP_31_XOP_SLBMFEV:
if (!vcpu->arch.mmu.slbmfev) {
emulated = EMULATE_FAIL;
} else {
- ulong t, rb;
+ ulong t, rb_val;
- rb = kvmppc_get_gpr(vcpu, get_rb(inst));
- t = vcpu->arch.mmu.slbmfev(vcpu, rb);
- kvmppc_set_gpr(vcpu, get_rt(inst), t);
+ rb_val = kvmppc_get_gpr(vcpu, rb);
+ t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
+ kvmppc_set_gpr(vcpu, rt, t);
}
break;
case OP_31_XOP_DCBA:
@@ -214,17 +219,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case OP_31_XOP_DCBZ:
{
- ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
- ulong ra = 0;
+ ulong rb_val = kvmppc_get_gpr(vcpu, rb);
+ ulong ra_val = 0;
ulong addr, vaddr;
u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
u32 dsisr;
int r;
- if (get_ra(inst))
- ra = kvmppc_get_gpr(vcpu, get_ra(inst));
+ if (ra)
+ ra_val = kvmppc_get_gpr(vcpu, ra);
- addr = (ra + rb) & ~31ULL;
+ addr = (ra_val + rb_val) & ~31ULL;
if (!(vcpu->arch.shared->msr & MSR_SF))
addr &= 0xffffffff;
vaddr = addr;
@@ -313,10 +318,9 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
return bat;
}
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
- ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_SDR1:
@@ -428,7 +432,7 @@ unprivileged:
return emulated;
}
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
int emulated = EMULATE_DONE;
@@ -441,46 +445,46 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
if (sprn % 2)
- kvmppc_set_gpr(vcpu, rt, bat->raw >> 32);
+ *spr_val = bat->raw >> 32;
else
- kvmppc_set_gpr(vcpu, rt, bat->raw);
+ *spr_val = bat->raw;
break;
}
case SPRN_SDR1:
if (!spr_allowed(vcpu, PRIV_HYPER))
goto unprivileged;
- kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
+ *spr_val = to_book3s(vcpu)->sdr1;
break;
case SPRN_DSISR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr);
+ *spr_val = vcpu->arch.shared->dsisr;
break;
case SPRN_DAR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar);
+ *spr_val = vcpu->arch.shared->dar;
break;
case SPRN_HIOR:
- kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior);
+ *spr_val = to_book3s(vcpu)->hior;
break;
case SPRN_HID0:
- kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]);
+ *spr_val = to_book3s(vcpu)->hid[0];
break;
case SPRN_HID1:
- kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
+ *spr_val = to_book3s(vcpu)->hid[1];
break;
case SPRN_HID2:
case SPRN_HID2_GEKKO:
- kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
+ *spr_val = to_book3s(vcpu)->hid[2];
break;
case SPRN_HID4:
case SPRN_HID4_GEKKO:
- kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
+ *spr_val = to_book3s(vcpu)->hid[4];
break;
case SPRN_HID5:
- kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
+ *spr_val = to_book3s(vcpu)->hid[5];
break;
case SPRN_CFAR:
case SPRN_PURR:
- kvmppc_set_gpr(vcpu, rt, 0);
+ *spr_val = 0;
break;
case SPRN_GQR0:
case SPRN_GQR1:
@@ -490,8 +494,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
case SPRN_GQR5:
case SPRN_GQR6:
case SPRN_GQR7:
- kvmppc_set_gpr(vcpu, rt,
- to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]);
+ *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
break;
case SPRN_THRM1:
case SPRN_THRM2:
@@ -506,7 +509,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
case SPRN_PMC3_GEKKO:
case SPRN_PMC4_GEKKO:
case SPRN_WPAR_GEKKO:
- kvmppc_set_gpr(vcpu, rt, 0);
+ *spr_val = 0;
break;
default:
unprivileged:
@@ -565,23 +568,22 @@ u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
{
ulong dar = 0;
- ulong ra;
+ ulong ra = get_ra(inst);
+ ulong rb = get_rb(inst);
switch (get_op(inst)) {
case OP_LFS:
case OP_LFD:
case OP_STFD:
case OP_STFS:
- ra = get_ra(inst);
if (ra)
dar = kvmppc_get_gpr(vcpu, ra);
dar += (s32)((s16)inst);
break;
case 31:
- ra = get_ra(inst);
if (ra)
dar = kvmppc_get_gpr(vcpu, ra);
- dar += kvmppc_get_gpr(vcpu, get_rb(inst));
+ dar += kvmppc_get_gpr(vcpu, rb);
break;
default:
printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 108d1f580177..c6af1d623839 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -60,12 +60,20 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu);
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
local_paca->kvm_hstate.kvm_vcpu = vcpu;
- local_paca->kvm_hstate.kvm_vcore = vcpu->arch.vcore;
+ local_paca->kvm_hstate.kvm_vcore = vc;
+ if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
+ vc->stolen_tb += mftb() - vc->preempt_tb;
}
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
+ vc->preempt_tb = mftb();
}
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
@@ -134,6 +142,22 @@ static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
vpa->yield_count = 1;
}
+/* Length for a per-processor buffer is passed in at offset 4 in the buffer */
+struct reg_vpa {
+ u32 dummy;
+ union {
+ u16 hword;
+ u32 word;
+ } length;
+};
+
+static int vpa_is_registered(struct kvmppc_vpa *vpap)
+{
+ if (vpap->update_pending)
+ return vpap->next_gpa != 0;
+ return vpap->pinned_addr != NULL;
+}
+
static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
unsigned long flags,
unsigned long vcpuid, unsigned long vpa)
@@ -142,88 +166,182 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
unsigned long len, nb;
void *va;
struct kvm_vcpu *tvcpu;
- int err = H_PARAMETER;
+ int err;
+ int subfunc;
+ struct kvmppc_vpa *vpap;
tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
if (!tvcpu)
return H_PARAMETER;
- flags >>= 63 - 18;
- flags &= 7;
- if (flags == 0 || flags == 4)
- return H_PARAMETER;
- if (flags < 4) {
- if (vpa & 0x7f)
+ subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
+ if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
+ subfunc == H_VPA_REG_SLB) {
+ /* Registering new area - address must be cache-line aligned */
+ if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
return H_PARAMETER;
- if (flags >= 2 && !tvcpu->arch.vpa)
- return H_RESOURCE;
- /* registering new area; convert logical addr to real */
+
+ /* convert logical addr to kernel addr and read length */
va = kvmppc_pin_guest_page(kvm, vpa, &nb);
if (va == NULL)
return H_PARAMETER;
- if (flags <= 1)
- len = *(unsigned short *)(va + 4);
+ if (subfunc == H_VPA_REG_VPA)
+ len = ((struct reg_vpa *)va)->length.hword;
else
- len = *(unsigned int *)(va + 4);
- if (len > nb)
- goto out_unpin;
- switch (flags) {
- case 1: /* register VPA */
- if (len < 640)
- goto out_unpin;
- if (tvcpu->arch.vpa)
- kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa);
- tvcpu->arch.vpa = va;
- init_vpa(vcpu, va);
- break;
- case 2: /* register DTL */
- if (len < 48)
- goto out_unpin;
- len -= len % 48;
- if (tvcpu->arch.dtl)
- kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl);
- tvcpu->arch.dtl = va;
- tvcpu->arch.dtl_end = va + len;
+ len = ((struct reg_vpa *)va)->length.word;
+ kvmppc_unpin_guest_page(kvm, va);
+
+ /* Check length */
+ if (len > nb || len < sizeof(struct reg_vpa))
+ return H_PARAMETER;
+ } else {
+ vpa = 0;
+ len = 0;
+ }
+
+ err = H_PARAMETER;
+ vpap = NULL;
+ spin_lock(&tvcpu->arch.vpa_update_lock);
+
+ switch (subfunc) {
+ case H_VPA_REG_VPA: /* register VPA */
+ if (len < sizeof(struct lppaca))
break;
- case 3: /* register SLB shadow buffer */
- if (len < 16)
- goto out_unpin;
- if (tvcpu->arch.slb_shadow)
- kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow);
- tvcpu->arch.slb_shadow = va;
+ vpap = &tvcpu->arch.vpa;
+ err = 0;
+ break;
+
+ case H_VPA_REG_DTL: /* register DTL */
+ if (len < sizeof(struct dtl_entry))
break;
- }
- } else {
- switch (flags) {
- case 5: /* unregister VPA */
- if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
- return H_RESOURCE;
- if (!tvcpu->arch.vpa)
- break;
- kvmppc_unpin_guest_page(kvm, tvcpu->arch.vpa);
- tvcpu->arch.vpa = NULL;
+ len -= len % sizeof(struct dtl_entry);
+
+ /* Check that they have previously registered a VPA */
+ err = H_RESOURCE;
+ if (!vpa_is_registered(&tvcpu->arch.vpa))
break;
- case 6: /* unregister DTL */
- if (!tvcpu->arch.dtl)
- break;
- kvmppc_unpin_guest_page(kvm, tvcpu->arch.dtl);
- tvcpu->arch.dtl = NULL;
+
+ vpap = &tvcpu->arch.dtl;
+ err = 0;
+ break;
+
+ case H_VPA_REG_SLB: /* register SLB shadow buffer */
+ /* Check that they have previously registered a VPA */
+ err = H_RESOURCE;
+ if (!vpa_is_registered(&tvcpu->arch.vpa))
break;
- case 7: /* unregister SLB shadow buffer */
- if (!tvcpu->arch.slb_shadow)
- break;
- kvmppc_unpin_guest_page(kvm, tvcpu->arch.slb_shadow);
- tvcpu->arch.slb_shadow = NULL;
+
+ vpap = &tvcpu->arch.slb_shadow;
+ err = 0;
+ break;
+
+ case H_VPA_DEREG_VPA: /* deregister VPA */
+ /* Check they don't still have a DTL or SLB buf registered */
+ err = H_RESOURCE;
+ if (vpa_is_registered(&tvcpu->arch.dtl) ||
+ vpa_is_registered(&tvcpu->arch.slb_shadow))
break;
- }
+
+ vpap = &tvcpu->arch.vpa;
+ err = 0;
+ break;
+
+ case H_VPA_DEREG_DTL: /* deregister DTL */
+ vpap = &tvcpu->arch.dtl;
+ err = 0;
+ break;
+
+ case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */
+ vpap = &tvcpu->arch.slb_shadow;
+ err = 0;
+ break;
+ }
+
+ if (vpap) {
+ vpap->next_gpa = vpa;
+ vpap->len = len;
+ vpap->update_pending = 1;
}
- return H_SUCCESS;
- out_unpin:
- kvmppc_unpin_guest_page(kvm, va);
+ spin_unlock(&tvcpu->arch.vpa_update_lock);
+
return err;
}
+static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
+{
+ void *va;
+ unsigned long nb;
+
+ vpap->update_pending = 0;
+ va = NULL;
+ if (vpap->next_gpa) {
+ va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
+ if (nb < vpap->len) {
+ /*
+ * If it's now too short, it must be that userspace
+ * has changed the mappings underlying guest memory,
+ * so unregister the region.
+ */
+ kvmppc_unpin_guest_page(kvm, va);
+ va = NULL;
+ }
+ }
+ if (vpap->pinned_addr)
+ kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
+ vpap->pinned_addr = va;
+ if (va)
+ vpap->pinned_end = va + vpap->len;
+}
+
+static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ spin_lock(&vcpu->arch.vpa_update_lock);
+ if (vcpu->arch.vpa.update_pending) {
+ kvmppc_update_vpa(kvm, &vcpu->arch.vpa);
+ init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
+ }
+ if (vcpu->arch.dtl.update_pending) {
+ kvmppc_update_vpa(kvm, &vcpu->arch.dtl);
+ vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
+ vcpu->arch.dtl_index = 0;
+ }
+ if (vcpu->arch.slb_shadow.update_pending)
+ kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow);
+ spin_unlock(&vcpu->arch.vpa_update_lock);
+}
+
+static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
+ struct kvmppc_vcore *vc)
+{
+ struct dtl_entry *dt;
+ struct lppaca *vpa;
+ unsigned long old_stolen;
+
+ dt = vcpu->arch.dtl_ptr;
+ vpa = vcpu->arch.vpa.pinned_addr;
+ old_stolen = vcpu->arch.stolen_logged;
+ vcpu->arch.stolen_logged = vc->stolen_tb;
+ if (!dt || !vpa)
+ return;
+ memset(dt, 0, sizeof(struct dtl_entry));
+ dt->dispatch_reason = 7;
+ dt->processor_id = vc->pcpu + vcpu->arch.ptid;
+ dt->timebase = mftb();
+ dt->enqueue_to_dispatch_time = vc->stolen_tb - old_stolen;
+ dt->srr0 = kvmppc_get_pc(vcpu);
+ dt->srr1 = vcpu->arch.shregs.msr;
+ ++dt;
+ if (dt == vcpu->arch.dtl.pinned_end)
+ dt = vcpu->arch.dtl.pinned_addr;
+ vcpu->arch.dtl_ptr = dt;
+ /* order writing *dt vs. writing vpa->dtl_idx */
+ smp_wmb();
+ vpa->dtl_idx = ++vcpu->arch.dtl_index;
+}
+
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
{
unsigned long req = kvmppc_get_gpr(vcpu, 3);
@@ -468,6 +586,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
/* default to host PVR, since we can't spoof it */
vcpu->arch.pvr = mfspr(SPRN_PVR);
kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
+ spin_lock_init(&vcpu->arch.vpa_update_lock);
kvmppc_mmu_book3s_hv_init(vcpu);
@@ -486,6 +605,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
INIT_LIST_HEAD(&vcore->runnable_threads);
spin_lock_init(&vcore->lock);
init_waitqueue_head(&vcore->wq);
+ vcore->preempt_tb = mftb();
}
kvm->arch.vcores[core] = vcore;
}
@@ -498,6 +618,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
++vcore->num_threads;
spin_unlock(&vcore->lock);
vcpu->arch.vcore = vcore;
+ vcpu->arch.stolen_logged = vcore->stolen_tb;
vcpu->arch.cpu_type = KVM_CPU_3S_64;
kvmppc_sanity_check(vcpu);
@@ -512,12 +633,14 @@ out:
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.dtl)
- kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl);
- if (vcpu->arch.slb_shadow)
- kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow);
- if (vcpu->arch.vpa)
- kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa);
+ spin_lock(&vcpu->arch.vpa_update_lock);
+ if (vcpu->arch.dtl.pinned_addr)
+ kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl.pinned_addr);
+ if (vcpu->arch.slb_shadow.pinned_addr)
+ kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow.pinned_addr);
+ if (vcpu->arch.vpa.pinned_addr)
+ kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa.pinned_addr);
+ spin_unlock(&vcpu->arch.vpa_update_lock);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
@@ -569,6 +692,45 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
list_del(&vcpu->arch.run_list);
}
+static int kvmppc_grab_hwthread(int cpu)
+{
+ struct paca_struct *tpaca;
+ long timeout = 1000;
+
+ tpaca = &paca[cpu];
+
+ /* Ensure the thread won't go into the kernel if it wakes */
+ tpaca->kvm_hstate.hwthread_req = 1;
+
+ /*
+ * If the thread is already executing in the kernel (e.g. handling
+ * a stray interrupt), wait for it to get back to nap mode.
+ * The smp_mb() is to ensure that our setting of hwthread_req
+ * is visible before we look at hwthread_state, so if this
+ * races with the code at system_reset_pSeries and the thread
+ * misses our setting of hwthread_req, we are sure to see its
+ * setting of hwthread_state, and vice versa.
+ */
+ smp_mb();
+ while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
+ if (--timeout <= 0) {
+ pr_err("KVM: couldn't grab cpu %d\n", cpu);
+ return -EBUSY;
+ }
+ udelay(1);
+ }
+ return 0;
+}
+
+static void kvmppc_release_hwthread(int cpu)
+{
+ struct paca_struct *tpaca;
+
+ tpaca = &paca[cpu];
+ tpaca->kvm_hstate.hwthread_req = 0;
+ tpaca->kvm_hstate.kvm_vcpu = NULL;
+}
+
static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
{
int cpu;
@@ -588,8 +750,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
smp_wmb();
#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
if (vcpu->arch.ptid) {
- tpaca->cpu_start = 0x80;
- wmb();
+ kvmppc_grab_hwthread(cpu);
xics_wake_cpu(cpu);
++vc->n_woken;
}
@@ -639,7 +800,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
struct kvm_vcpu *vcpu, *vcpu0, *vnext;
long ret;
u64 now;
- int ptid;
+ int ptid, i;
/* don't start if any threads have a signal pending */
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
@@ -681,17 +842,29 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
vc->nap_count = 0;
vc->entry_exit_count = 0;
vc->vcore_state = VCORE_RUNNING;
+ vc->stolen_tb += mftb() - vc->preempt_tb;
vc->in_guest = 0;
vc->pcpu = smp_processor_id();
vc->napping_threads = 0;
- list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
kvmppc_start_thread(vcpu);
+ if (vcpu->arch.vpa.update_pending ||
+ vcpu->arch.slb_shadow.update_pending ||
+ vcpu->arch.dtl.update_pending)
+ kvmppc_update_vpas(vcpu);
+ kvmppc_create_dtl_entry(vcpu, vc);
+ }
+ /* Grab any remaining hw threads so they can't go into the kernel */
+ for (i = ptid; i < threads_per_core; ++i)
+ kvmppc_grab_hwthread(vc->pcpu + i);
preempt_disable();
spin_unlock(&vc->lock);
kvm_guest_enter();
__kvmppc_vcore_entry(NULL, vcpu0);
+ for (i = 0; i < threads_per_core; ++i)
+ kvmppc_release_hwthread(vc->pcpu + i);
spin_lock(&vc->lock);
/* disable sending of IPIs on virtual external irqs */
@@ -737,6 +910,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
spin_lock(&vc->lock);
out:
vc->vcore_state = VCORE_INACTIVE;
+ vc->preempt_tb = mftb();
list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
arch.run_list) {
if (vcpu->arch.ret != RESUME_GUEST) {
@@ -835,6 +1009,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
spin_lock(&vc->lock);
continue;
}
+ vc->runner = vcpu;
n_ceded = 0;
list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
n_ceded += v->arch.ceded;
@@ -854,6 +1029,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
wake_up(&v->arch.cpu_run);
}
}
+ vc->runner = NULL;
}
if (signal_pending(current)) {
@@ -917,115 +1093,6 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
return r;
}
-static long kvmppc_stt_npages(unsigned long window_size)
-{
- return ALIGN((window_size >> SPAPR_TCE_SHIFT)
- * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
-}
-
-static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
-{
- struct kvm *kvm = stt->kvm;
- int i;
-
- mutex_lock(&kvm->lock);
- list_del(&stt->list);
- for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
- __free_page(stt->pages[i]);
- kfree(stt);
- mutex_unlock(&kvm->lock);
-
- kvm_put_kvm(kvm);
-}
-
-static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
- struct page *page;
-
- if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
- return VM_FAULT_SIGBUS;
-
- page = stt->pages[vmf->pgoff];
- get_page(page);
- vmf->page = page;
- return 0;
-}
-
-static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
- .fault = kvm_spapr_tce_fault,
-};
-
-static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
-{
- vma->vm_ops = &kvm_spapr_tce_vm_ops;
- return 0;
-}
-
-static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
-{
- struct kvmppc_spapr_tce_table *stt = filp->private_data;
-
- release_spapr_tce_table(stt);
- return 0;
-}
-
-static struct file_operations kvm_spapr_tce_fops = {
- .mmap = kvm_spapr_tce_mmap,
- .release = kvm_spapr_tce_release,
-};
-
-long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
- struct kvm_create_spapr_tce *args)
-{
- struct kvmppc_spapr_tce_table *stt = NULL;
- long npages;
- int ret = -ENOMEM;
- int i;
-
- /* Check this LIOBN hasn't been previously allocated */
- list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
- if (stt->liobn == args->liobn)
- return -EBUSY;
- }
-
- npages = kvmppc_stt_npages(args->window_size);
-
- stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
- GFP_KERNEL);
- if (!stt)
- goto fail;
-
- stt->liobn = args->liobn;
- stt->window_size = args->window_size;
- stt->kvm = kvm;
-
- for (i = 0; i < npages; i++) {
- stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!stt->pages[i])
- goto fail;
- }
-
- kvm_get_kvm(kvm);
-
- mutex_lock(&kvm->lock);
- list_add(&stt->list, &kvm->arch.spapr_tce_tables);
-
- mutex_unlock(&kvm->lock);
-
- return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
- stt, O_RDWR);
-
-fail:
- if (stt) {
- for (i = 0; i < npages; i++)
- if (stt->pages[i])
- __free_page(stt->pages[i]);
-
- kfree(stt);
- }
- return ret;
-}
/* Work out RMLS (real mode limit selector) field value for a given RMA size.
Assumes POWER7 or PPC970. */
@@ -1108,6 +1175,38 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
return fd;
}
+static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
+ int linux_psize)
+{
+ struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];
+
+ if (!def->shift)
+ return;
+ (*sps)->page_shift = def->shift;
+ (*sps)->slb_enc = def->sllp;
+ (*sps)->enc[0].page_shift = def->shift;
+ (*sps)->enc[0].pte_enc = def->penc;
+ (*sps)++;
+}
+
+int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
+{
+ struct kvm_ppc_one_seg_page_size *sps;
+
+ info->flags = KVM_PPC_PAGE_SIZES_REAL;
+ if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
+ info->flags |= KVM_PPC_1T_SEGMENTS;
+ info->slb_size = mmu_slb_size;
+
+ /* We only support these sizes for now, and no muti-size segments */
+ sps = &info->sps[0];
+ kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
+ kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
+ kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);
+
+ return 0;
+}
+
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
@@ -1404,12 +1503,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL;
}
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
return EMULATE_FAIL;
}
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
return EMULATE_FAIL;
}
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index d3fb4df02c41..84035a528c80 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -68,19 +68,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
rotldi r10,r10,16
mtmsrd r10,1
- /* Save host PMU registers and load guest PMU registers */
+ /* Save host PMU registers */
/* R4 is live here (vcpu pointer) but not r3 or r5 */
li r3, 1
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
mfspr r7, SPRN_MMCR0 /* save MMCR0 */
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
+ mfspr r6, SPRN_MMCRA
+BEGIN_FTR_SECTION
+ /* On P7, clear MMCRA in order to disable SDAR updates */
+ li r5, 0
+ mtspr SPRN_MMCRA, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
isync
ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
lbz r5, LPPACA_PMCINUSE(r3)
cmpwi r5, 0
beq 31f /* skip if not */
mfspr r5, SPRN_MMCR1
- mfspr r6, SPRN_MMCRA
std r7, HSTATE_MMCR(r13)
std r5, HSTATE_MMCR + 8(r13)
std r6, HSTATE_MMCR + 16(r13)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b70bf22a3ff3..a84aafce2a12 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -26,6 +26,7 @@
#include <asm/hvcall.h>
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
+#include <asm/kvm_book3s_asm.h>
/*****************************************************************************
* *
@@ -82,6 +83,7 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
#define XICS_XIRR 4
#define XICS_QIRR 0xc
+#define XICS_IPI 2 /* interrupt source # for IPIs */
/*
* We come in here when wakened from nap mode on a secondary hw thread.
@@ -94,26 +96,54 @@ kvm_start_guest:
subi r1,r1,STACK_FRAME_OVERHEAD
ld r2,PACATOC(r13)
- /* were we napping due to cede? */
- lbz r0,HSTATE_NAPPING(r13)
- cmpwi r0,0
- bne kvm_end_cede
+ li r0,KVM_HWTHREAD_IN_KVM
+ stb r0,HSTATE_HWTHREAD_STATE(r13)
- /* get vcpu pointer */
- ld r4, HSTATE_KVM_VCPU(r13)
+ /* NV GPR values from power7_idle() will no longer be valid */
+ li r0,1
+ stb r0,PACA_NAPSTATELOST(r13)
- /* We got here with an IPI; clear it */
- ld r5, HSTATE_XICS_PHYS(r13)
- li r0, 0xff
- li r6, XICS_QIRR
- li r7, XICS_XIRR
- lwzcix r8, r5, r7 /* ack the interrupt */
+ /* get vcpu pointer, NULL if we have no vcpu to run */
+ ld r4,HSTATE_KVM_VCPU(r13)
+ cmpdi cr1,r4,0
+
+ /* Check the wake reason in SRR1 to see why we got here */
+ mfspr r3,SPRN_SRR1
+ rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
+ cmpwi r3,4 /* was it an external interrupt? */
+ bne 27f
+
+ /*
+ * External interrupt - for now assume it is an IPI, since we
+ * should never get any other interrupts sent to offline threads.
+ * Only do this for secondary threads.
+ */
+ beq cr1,25f
+ lwz r3,VCPU_PTID(r4)
+ cmpwi r3,0
+ beq 27f
+25: ld r5,HSTATE_XICS_PHYS(r13)
+ li r0,0xff
+ li r6,XICS_QIRR
+ li r7,XICS_XIRR
+ lwzcix r8,r5,r7 /* get and ack the interrupt */
sync
- stbcix r0, r5, r6 /* clear it */
- stwcix r8, r5, r7 /* EOI it */
+ clrldi. r9,r8,40 /* get interrupt source ID. */
+ beq 27f /* none there? */
+ cmpwi r9,XICS_IPI
+ bne 26f
+ stbcix r0,r5,r6 /* clear IPI */
+26: stwcix r8,r5,r7 /* EOI the interrupt */
- /* NV GPR values from power7_idle() will no longer be valid */
- stb r0, PACA_NAPSTATELOST(r13)
+27: /* XXX should handle hypervisor maintenance interrupts etc. here */
+
+ /* if we have no vcpu to run, go back to sleep */
+ beq cr1,kvm_no_guest
+
+ /* were we napping due to cede? */
+ lbz r0,HSTATE_NAPPING(r13)
+ cmpwi r0,0
+ bne kvm_end_cede
.global kvmppc_hv_entry
kvmppc_hv_entry:
@@ -129,24 +159,15 @@ kvmppc_hv_entry:
mflr r0
std r0, HSTATE_VMHANDLER(r13)
- ld r14, VCPU_GPR(r14)(r4)
- ld r15, VCPU_GPR(r15)(r4)
- ld r16, VCPU_GPR(r16)(r4)
- ld r17, VCPU_GPR(r17)(r4)
- ld r18, VCPU_GPR(r18)(r4)
- ld r19, VCPU_GPR(r19)(r4)
- ld r20, VCPU_GPR(r20)(r4)
- ld r21, VCPU_GPR(r21)(r4)
- ld r22, VCPU_GPR(r22)(r4)
- ld r23, VCPU_GPR(r23)(r4)
- ld r24, VCPU_GPR(r24)(r4)
- ld r25, VCPU_GPR(r25)(r4)
- ld r26, VCPU_GPR(r26)(r4)
- ld r27, VCPU_GPR(r27)(r4)
- ld r28, VCPU_GPR(r28)(r4)
- ld r29, VCPU_GPR(r29)(r4)
- ld r30, VCPU_GPR(r30)(r4)
- ld r31, VCPU_GPR(r31)(r4)
+ /* Set partition DABR */
+ /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
+ li r5,3
+ ld r6,VCPU_DABR(r4)
+ mtspr SPRN_DABRX,r5
+ mtspr SPRN_DABR,r6
+BEGIN_FTR_SECTION
+ isync
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Load guest PMU registers */
/* R4 is live here (vcpu pointer) */
@@ -185,6 +206,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/* Load up FP, VMX and VSX registers */
bl kvmppc_load_fp
+ ld r14, VCPU_GPR(r14)(r4)
+ ld r15, VCPU_GPR(r15)(r4)
+ ld r16, VCPU_GPR(r16)(r4)
+ ld r17, VCPU_GPR(r17)(r4)
+ ld r18, VCPU_GPR(r18)(r4)
+ ld r19, VCPU_GPR(r19)(r4)
+ ld r20, VCPU_GPR(r20)(r4)
+ ld r21, VCPU_GPR(r21)(r4)
+ ld r22, VCPU_GPR(r22)(r4)
+ ld r23, VCPU_GPR(r23)(r4)
+ ld r24, VCPU_GPR(r24)(r4)
+ ld r25, VCPU_GPR(r25)(r4)
+ ld r26, VCPU_GPR(r26)(r4)
+ ld r27, VCPU_GPR(r27)(r4)
+ ld r28, VCPU_GPR(r28)(r4)
+ ld r29, VCPU_GPR(r29)(r4)
+ ld r30, VCPU_GPR(r30)(r4)
+ ld r31, VCPU_GPR(r31)(r4)
+
BEGIN_FTR_SECTION
/* Switch DSCR to guest value */
ld r5, VCPU_DSCR(r4)
@@ -226,12 +266,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
mtspr SPRN_DAR, r5
mtspr SPRN_DSISR, r6
- /* Set partition DABR */
- li r5,3
- ld r6,VCPU_DABR(r4)
- mtspr SPRN_DABRX,r5
- mtspr SPRN_DABR,r6
-
BEGIN_FTR_SECTION
/* Restore AMR and UAMOR, set AMOR to all 1s */
ld r5,VCPU_AMR(r4)
@@ -925,12 +959,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_AMR,r6
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
- /* Restore host DABR and DABRX */
- ld r5,HSTATE_DABR(r13)
- li r6,7
- mtspr SPRN_DABR,r5
- mtspr SPRN_DABRX,r6
-
/* Switch DSCR back to host value */
BEGIN_FTR_SECTION
mfspr r8, SPRN_DSCR
@@ -969,6 +997,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r5, VCPU_SPRG2(r9)
std r6, VCPU_SPRG3(r9)
+ /* save FP state */
+ mr r3, r9
+ bl .kvmppc_save_fp
+
/* Increment yield count if they have a VPA */
ld r8, VCPU_VPA(r9) /* do they have a VPA? */
cmpdi r8, 0
@@ -983,6 +1015,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
mfspr r4, SPRN_MMCR0 /* save MMCR0 */
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
+ mfspr r6, SPRN_MMCRA
+BEGIN_FTR_SECTION
+ /* On P7, clear MMCRA in order to disable SDAR updates */
+ li r7, 0
+ mtspr SPRN_MMCRA, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
isync
beq 21f /* if no VPA, save PMU stuff anyway */
lbz r7, LPPACA_PMCINUSE(r8)
@@ -991,7 +1029,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
b 22f
21: mfspr r5, SPRN_MMCR1
- mfspr r6, SPRN_MMCRA
std r4, VCPU_MMCR(r9)
std r5, VCPU_MMCR + 8(r9)
std r6, VCPU_MMCR + 16(r9)
@@ -1016,17 +1053,20 @@ BEGIN_FTR_SECTION
stw r11, VCPU_PMC + 28(r9)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
22:
- /* save FP state */
- mr r3, r9
- bl .kvmppc_save_fp
/* Secondary threads go off to take a nap on POWER7 */
BEGIN_FTR_SECTION
- lwz r0,VCPU_PTID(r3)
+ lwz r0,VCPU_PTID(r9)
cmpwi r0,0
bne secondary_nap
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+ /* Restore host DABR and DABRX */
+ ld r5,HSTATE_DABR(r13)
+ li r6,7
+ mtspr SPRN_DABR,r5
+ mtspr SPRN_DABRX,r6
+
/*
* Reload DEC. HDEC interrupts were disabled when
* we reloaded the host's LPCR value.
@@ -1363,7 +1403,12 @@ bounce_ext_interrupt:
_GLOBAL(kvmppc_h_set_dabr)
std r4,VCPU_DABR(r3)
- mtspr SPRN_DABR,r4
+ /* Work around P7 bug where DABR can get corrupted on mtspr */
+1: mtspr SPRN_DABR,r4
+ mfspr r5, SPRN_DABR
+ cmpd r4, r5
+ bne 1b
+ isync
li r3,0
blr
@@ -1445,8 +1490,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
* Take a nap until a decrementer or external interrupt occurs,
* with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
*/
- li r0,0x80
- stb r0,PACAPROCSTART(r13)
+ li r0,1
+ stb r0,HSTATE_HWTHREAD_REQ(r13)
mfspr r5,SPRN_LPCR
ori r5,r5,LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR,r5
@@ -1463,26 +1508,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
kvm_end_cede:
/* Woken by external or decrementer interrupt */
ld r1, HSTATE_HOST_R1(r13)
- ld r2, PACATOC(r13)
- /* If we're a secondary thread and we got here by an IPI, ack it */
- ld r4,HSTATE_KVM_VCPU(r13)
- lwz r3,VCPU_PTID(r4)
- cmpwi r3,0
- beq 27f
- mfspr r3,SPRN_SRR1
- rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
- cmpwi r3,4 /* was it an external interrupt? */
- bne 27f
- ld r5, HSTATE_XICS_PHYS(r13)
- li r0,0xff
- li r6,XICS_QIRR
- li r7,XICS_XIRR
- lwzcix r8,r5,r7 /* ack the interrupt */
- sync
- stbcix r0,r5,r6 /* clear it */
- stwcix r8,r5,r7 /* EOI it */
-27:
/* load up FP state */
bl kvmppc_load_fp
@@ -1580,12 +1606,17 @@ secondary_nap:
stwcx. r3, 0, r4
bne 51b
+kvm_no_guest:
+ li r0, KVM_HWTHREAD_IN_NAP
+ stb r0, HSTATE_HWTHREAD_STATE(r13)
+ li r0, 0
+ std r0, HSTATE_KVM_VCPU(r13)
+
li r3, LPCR_PECE0
mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR, r4
isync
- li r0, 0
std r0, HSTATE_SCRATCH0(r13)
ptesync
ld r0, HSTATE_SCRATCH0(r13)
@@ -1599,8 +1630,8 @@ secondary_nap:
* r3 = vcpu pointer
*/
_GLOBAL(kvmppc_save_fp)
- mfmsr r9
- ori r8,r9,MSR_FP
+ mfmsr r5
+ ori r8,r5,MSR_FP
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
oris r8,r8,MSR_VEC@h
@@ -1649,7 +1680,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
mfspr r6,SPRN_VRSAVE
stw r6,VCPU_VRSAVE(r3)
- mtmsrd r9
+ mtmsrd r5
isync
blr
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 7759053d391b..a1baec340f7e 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -120,6 +120,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
if (msr & MSR_POW) {
if (!vcpu->arch.pending_exceptions) {
kvm_vcpu_block(vcpu);
+ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu->stat.halt_wakeup++;
/* Unset POW bit after we woke up */
@@ -144,6 +145,21 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
}
}
+ /*
+ * When switching from 32 to 64-bit, we may have a stale 32-bit
+ * magic page around, we need to flush it. Typically 32-bit magic
+ * page will be instanciated when calling into RTAS. Note: We
+ * assume that such transition only happens while in kernel mode,
+ * ie, we never transition from user 32-bit to kernel 64-bit with
+ * a 32-bit magic page around.
+ */
+ if (vcpu->arch.magic_page_pa &&
+ !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
+ /* going from RTAS to normal kernel code */
+ kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
+ ~0xFFFUL);
+ }
+
/* Preload FPU if it's enabled */
if (vcpu->arch.shared->msr & MSR_FP)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
@@ -251,6 +267,9 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
ulong mp_pa = vcpu->arch.magic_page_pa;
+ if (!(vcpu->arch.shared->msr & MSR_SF))
+ mp_pa = (uint32_t)mp_pa;
+
if (unlikely(mp_pa) &&
unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
return 1;
@@ -351,6 +370,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* MMIO */
vcpu->stat.mmio_exits++;
vcpu->arch.paddr_accessed = pte.raddr;
+ vcpu->arch.vaddr_accessed = pte.eaddr;
r = kvmppc_emulate_mmio(run, vcpu);
if ( r == RESUME_HOST_NV )
r = RESUME_HOST;
@@ -528,6 +548,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->exit_reason = KVM_EXIT_UNKNOWN;
run->ready_for_interrupt_injection = 1;
+ /* We get here with MSR.EE=0, so enable it to be a nice citizen */
+ __hard_irq_enable();
+
trace_kvm_book3s_exit(exit_nr, vcpu);
preempt_enable();
kvm_resched(vcpu);
@@ -617,10 +640,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
/* We're good on these - the host merely wanted to get our attention */
case BOOK3S_INTERRUPT_DECREMENTER:
+ case BOOK3S_INTERRUPT_HV_DECREMENTER:
vcpu->stat.dec_exits++;
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_EXTERNAL:
+ case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
+ case BOOK3S_INTERRUPT_EXTERNAL_HV:
vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST;
break;
@@ -628,6 +654,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_PROGRAM:
+ case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
{
enum emulation_result er;
struct kvmppc_book3s_shadow_vcpu *svcpu;
@@ -1131,6 +1158,31 @@ out:
return r;
}
+#ifdef CONFIG_PPC64
+int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
+{
+ /* No flags */
+ info->flags = 0;
+
+ /* SLB is always 64 entries */
+ info->slb_size = 64;
+
+ /* Standard 4k base page size segment */
+ info->sps[0].page_shift = 12;
+ info->sps[0].slb_enc = 0;
+ info->sps[0].enc[0].page_shift = 12;
+ info->sps[0].enc[0].pte_enc = 0;
+
+ /* Standard 16M large page size segment */
+ info->sps[1].page_shift = 24;
+ info->sps[1].slb_enc = SLB_VSID_L;
+ info->sps[1].enc[0].page_shift = 24;
+ info->sps[1].enc[0].pte_enc = 0;
+
+ return 0;
+}
+#endif /* CONFIG_PPC64 */
+
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem)
{
@@ -1144,11 +1196,18 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
int kvmppc_core_init_vm(struct kvm *kvm)
{
+#ifdef CONFIG_PPC64
+ INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+#endif
+
return 0;
}
void kvmppc_core_destroy_vm(struct kvm *kvm)
{
+#ifdef CONFIG_PPC64
+ WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
+#endif
}
static int kvmppc_book3s_init(void)
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index b9589324797b..3ff9013d6e79 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -15,6 +15,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/anon_inodes.h>
+
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
@@ -98,6 +100,83 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+/* Request defs for kvmppc_h_pr_bulk_remove() */
+#define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
+#define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
+#define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
+#define H_BULK_REMOVE_END 0xc000000000000000ULL
+#define H_BULK_REMOVE_CODE 0x3000000000000000ULL
+#define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
+#define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
+#define H_BULK_REMOVE_PARM 0x2000000000000000ULL
+#define H_BULK_REMOVE_HW 0x3000000000000000ULL
+#define H_BULK_REMOVE_RC 0x0c00000000000000ULL
+#define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
+#define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
+#define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
+#define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
+#define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
+#define H_BULK_REMOVE_MAX_BATCH 4
+
+static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
+{
+ int i;
+ int paramnr = 4;
+ int ret = H_SUCCESS;
+
+ for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
+ unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
+ unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
+ unsigned long pteg, rb, flags;
+ unsigned long pte[2];
+ unsigned long v = 0;
+
+ if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
+ break; /* Exit success */
+ } else if ((tsh & H_BULK_REMOVE_TYPE) !=
+ H_BULK_REMOVE_REQUEST) {
+ ret = H_PARAMETER;
+ break; /* Exit fail */
+ }
+
+ tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
+ tsh |= H_BULK_REMOVE_RESPONSE;
+
+ if ((tsh & H_BULK_REMOVE_ANDCOND) &&
+ (tsh & H_BULK_REMOVE_AVPN)) {
+ tsh |= H_BULK_REMOVE_PARM;
+ kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
+ ret = H_PARAMETER;
+ break; /* Exit fail */
+ }
+
+ pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
+ copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+
+ /* tsl = AVPN */
+ flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
+
+ if ((pte[0] & HPTE_V_VALID) == 0 ||
+ ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) ||
+ ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) {
+ tsh |= H_BULK_REMOVE_NOT_FOUND;
+ } else {
+ /* Splat the pteg in (userland) hpt */
+ copy_to_user((void __user *)pteg, &v, sizeof(v));
+
+ rb = compute_tlbie_rb(pte[0], pte[1],
+ tsh & H_BULK_REMOVE_PTEX);
+ vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
+ tsh |= H_BULK_REMOVE_SUCCESS;
+ tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43;
+ }
+ kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
+ }
+ kvmppc_set_gpr(vcpu, 3, ret);
+
+ return EMULATE_DONE;
+}
+
static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
{
unsigned long flags = kvmppc_get_gpr(vcpu, 4);
@@ -134,6 +213,20 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
+{
+ unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
+ unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
+ unsigned long tce = kvmppc_get_gpr(vcpu, 6);
+ long rc;
+
+ rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
+ if (rc == H_TOO_HARD)
+ return EMULATE_FAIL;
+ kvmppc_set_gpr(vcpu, 3, rc);
+ return EMULATE_DONE;
+}
+
int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
{
switch (cmd) {
@@ -144,12 +237,12 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
case H_PROTECT:
return kvmppc_h_pr_protect(vcpu);
case H_BULK_REMOVE:
- /* We just flush all PTEs, so user space can
- handle the HPT modifications */
- kvmppc_mmu_pte_flush(vcpu, 0, 0);
- break;
+ return kvmppc_h_pr_bulk_remove(vcpu);
+ case H_PUT_TCE:
+ return kvmppc_h_pr_put_tce(vcpu);
case H_CEDE:
kvm_vcpu_block(vcpu);
+ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu->stat.halt_wakeup++;
return EMULATE_DONE;
}
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 6e6e9cef34a8..798491a268b3 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -128,24 +128,25 @@ no_dcbz32_on:
/* First clear RI in our current MSR value */
li r0, MSR_RI
andc r6, r6, r0
- MTMSR_EERI(r6)
- mtsrr0 r9
- mtsrr1 r4
PPC_LL r0, SVCPU_R0(r3)
PPC_LL r1, SVCPU_R1(r3)
PPC_LL r2, SVCPU_R2(r3)
- PPC_LL r4, SVCPU_R4(r3)
PPC_LL r5, SVCPU_R5(r3)
- PPC_LL r6, SVCPU_R6(r3)
PPC_LL r7, SVCPU_R7(r3)
PPC_LL r8, SVCPU_R8(r3)
- PPC_LL r9, SVCPU_R9(r3)
PPC_LL r10, SVCPU_R10(r3)
PPC_LL r11, SVCPU_R11(r3)
PPC_LL r12, SVCPU_R12(r3)
PPC_LL r13, SVCPU_R13(r3)
+ MTMSR_EERI(r6)
+ mtsrr0 r9
+ mtsrr1 r4
+
+ PPC_LL r4, SVCPU_R4(r3)
+ PPC_LL r6, SVCPU_R6(r3)
+ PPC_LL r9, SVCPU_R9(r3)
PPC_LL r3, (SVCPU_R3)(r3)
RFI
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index ee9e1ee9c858..72f13f4a06e0 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -17,6 +17,8 @@
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
* Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ * Scott Wood <scottwood@freescale.com>
+ * Varun Sethi <varun.sethi@freescale.com>
*/
#include <linux/errno.h>
@@ -30,9 +32,12 @@
#include <asm/cputable.h>
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
-#include "timing.h"
#include <asm/cacheflush.h>
+#include <asm/dbell.h>
+#include <asm/hw_irq.h>
+#include <asm/irq.h>
+#include "timing.h"
#include "booke.h"
unsigned long kvmppc_booke_handlers;
@@ -55,6 +60,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "dec", VCPU_STAT(dec_exits) },
{ "ext_intr", VCPU_STAT(ext_intr_exits) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
+ { "doorbell", VCPU_STAT(dbell_exits) },
+ { "guest doorbell", VCPU_STAT(gdbell_exits) },
{ NULL }
};
@@ -121,6 +128,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
{
u32 old_msr = vcpu->arch.shared->msr;
+#ifdef CONFIG_KVM_BOOKE_HV
+ new_msr |= MSR_GS;
+#endif
+
vcpu->arch.shared->msr = new_msr;
kvmppc_mmu_msr_notify(vcpu, old_msr);
@@ -195,17 +206,87 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
}
+static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_GSRR0, srr0);
+ mtspr(SPRN_GSRR1, srr1);
+#else
+ vcpu->arch.shared->srr0 = srr0;
+ vcpu->arch.shared->srr1 = srr1;
+#endif
+}
+
+static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
+{
+ vcpu->arch.csrr0 = srr0;
+ vcpu->arch.csrr1 = srr1;
+}
+
+static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
+{
+ if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
+ vcpu->arch.dsrr0 = srr0;
+ vcpu->arch.dsrr1 = srr1;
+ } else {
+ set_guest_csrr(vcpu, srr0, srr1);
+ }
+}
+
+static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
+{
+ vcpu->arch.mcsrr0 = srr0;
+ vcpu->arch.mcsrr1 = srr1;
+}
+
+static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+ return mfspr(SPRN_GDEAR);
+#else
+ return vcpu->arch.shared->dar;
+#endif
+}
+
+static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_GDEAR, dear);
+#else
+ vcpu->arch.shared->dar = dear;
+#endif
+}
+
+static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+ return mfspr(SPRN_GESR);
+#else
+ return vcpu->arch.shared->esr;
+#endif
+}
+
+static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_GESR, esr);
+#else
+ vcpu->arch.shared->esr = esr;
+#endif
+}
+
/* Deliver the interrupt of the corresponding priority, if possible. */
static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
unsigned int priority)
{
int allowed = 0;
- ulong uninitialized_var(msr_mask);
+ ulong msr_mask = 0;
bool update_esr = false, update_dear = false;
ulong crit_raw = vcpu->arch.shared->critical;
ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
bool crit;
bool keep_irq = false;
+ enum int_class int_class;
/* Truncate crit indicators in 32 bit mode */
if (!(vcpu->arch.shared->msr & MSR_SF)) {
@@ -241,46 +322,85 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
case BOOKE_IRQPRIO_AP_UNAVAIL:
case BOOKE_IRQPRIO_ALIGNMENT:
allowed = 1;
- msr_mask = MSR_CE|MSR_ME|MSR_DE;
+ msr_mask = MSR_CE | MSR_ME | MSR_DE;
+ int_class = INT_CLASS_NONCRIT;
break;
case BOOKE_IRQPRIO_CRITICAL:
- case BOOKE_IRQPRIO_WATCHDOG:
+ case BOOKE_IRQPRIO_DBELL_CRIT:
allowed = vcpu->arch.shared->msr & MSR_CE;
+ allowed = allowed && !crit;
msr_mask = MSR_ME;
+ int_class = INT_CLASS_CRIT;
break;
case BOOKE_IRQPRIO_MACHINE_CHECK:
allowed = vcpu->arch.shared->msr & MSR_ME;
- msr_mask = 0;
+ allowed = allowed && !crit;
+ int_class = INT_CLASS_MC;
break;
case BOOKE_IRQPRIO_DECREMENTER:
case BOOKE_IRQPRIO_FIT:
keep_irq = true;
/* fall through */
case BOOKE_IRQPRIO_EXTERNAL:
+ case BOOKE_IRQPRIO_DBELL:
allowed = vcpu->arch.shared->msr & MSR_EE;
allowed = allowed && !crit;
- msr_mask = MSR_CE|MSR_ME|MSR_DE;
+ msr_mask = MSR_CE | MSR_ME | MSR_DE;
+ int_class = INT_CLASS_NONCRIT;
break;
case BOOKE_IRQPRIO_DEBUG:
allowed = vcpu->arch.shared->msr & MSR_DE;
+ allowed = allowed && !crit;
msr_mask = MSR_ME;
+ int_class = INT_CLASS_CRIT;
break;
}
if (allowed) {
- vcpu->arch.shared->srr0 = vcpu->arch.pc;
- vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
+ switch (int_class) {
+ case INT_CLASS_NONCRIT:
+ set_guest_srr(vcpu, vcpu->arch.pc,
+ vcpu->arch.shared->msr);
+ break;
+ case INT_CLASS_CRIT:
+ set_guest_csrr(vcpu, vcpu->arch.pc,
+ vcpu->arch.shared->msr);
+ break;
+ case INT_CLASS_DBG:
+ set_guest_dsrr(vcpu, vcpu->arch.pc,
+ vcpu->arch.shared->msr);
+ break;
+ case INT_CLASS_MC:
+ set_guest_mcsrr(vcpu, vcpu->arch.pc,
+ vcpu->arch.shared->msr);
+ break;
+ }
+
vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
if (update_esr == true)
- vcpu->arch.shared->esr = vcpu->arch.queued_esr;
+ set_guest_esr(vcpu, vcpu->arch.queued_esr);
if (update_dear == true)
- vcpu->arch.shared->dar = vcpu->arch.queued_dear;
+ set_guest_dear(vcpu, vcpu->arch.queued_dear);
kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
if (!keep_irq)
clear_bit(priority, &vcpu->arch.pending_exceptions);
}
+#ifdef CONFIG_KVM_BOOKE_HV
+ /*
+ * If an interrupt is pending but masked, raise a guest doorbell
+ * so that we are notified when the guest enables the relevant
+ * MSR bit.
+ */
+ if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
+ kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
+ if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
+ kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
+ if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
+ kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
+#endif
+
return allowed;
}
@@ -305,7 +425,7 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
}
priority = __ffs(*pending);
- while (priority <= BOOKE_IRQPRIO_MAX) {
+ while (priority < BOOKE_IRQPRIO_MAX) {
if (kvmppc_booke_irqprio_deliver(vcpu, priority))
break;
@@ -319,8 +439,9 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
}
/* Check pending exceptions and deliver one, if possible. */
-void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
+int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
{
+ int r = 0;
WARN_ON_ONCE(!irqs_disabled());
kvmppc_core_check_exceptions(vcpu);
@@ -328,16 +449,60 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
if (vcpu->arch.shared->msr & MSR_WE) {
local_irq_enable();
kvm_vcpu_block(vcpu);
+ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
local_irq_disable();
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
- kvmppc_core_check_exceptions(vcpu);
+ r = 1;
};
+
+ return r;
+}
+
+/*
+ * Common checks before entering the guest world. Call with interrupts
+ * disabled.
+ *
+ * returns !0 if a signal is pending and check_signal is true
+ */
+static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
+{
+ int r = 0;
+
+ WARN_ON_ONCE(!irqs_disabled());
+ while (true) {
+ if (need_resched()) {
+ local_irq_enable();
+ cond_resched();
+ local_irq_disable();
+ continue;
+ }
+
+ if (signal_pending(current)) {
+ r = 1;
+ break;
+ }
+
+ if (kvmppc_core_prepare_to_enter(vcpu)) {
+ /* interrupts got enabled in between, so we
+ are back at square 1 */
+ continue;
+ }
+
+ break;
+ }
+
+ return r;
}
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int ret;
+#ifdef CONFIG_PPC_FPU
+ unsigned int fpscr;
+ int fpexc_mode;
+ u64 fpr[32];
+#endif
if (!vcpu->arch.sane) {
kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -345,17 +510,53 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
}
local_irq_disable();
-
- kvmppc_core_prepare_to_enter(vcpu);
-
- if (signal_pending(current)) {
+ if (kvmppc_prepare_to_enter(vcpu)) {
kvm_run->exit_reason = KVM_EXIT_INTR;
ret = -EINTR;
goto out;
}
kvm_guest_enter();
+
+#ifdef CONFIG_PPC_FPU
+ /* Save userspace FPU state in stack */
+ enable_kernel_fp();
+ memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
+ fpscr = current->thread.fpscr.val;
+ fpexc_mode = current->thread.fpexc_mode;
+
+ /* Restore guest FPU state to thread */
+ memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
+ current->thread.fpscr.val = vcpu->arch.fpscr;
+
+ /*
+ * Since we can't trap on MSR_FP in GS-mode, we consider the guest
+ * as always using the FPU. Kernel usage of FP (via
+ * enable_kernel_fp()) in this thread must not occur while
+ * vcpu->fpu_active is set.
+ */
+ vcpu->fpu_active = 1;
+
+ kvmppc_load_guest_fp(vcpu);
+#endif
+
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
+
+#ifdef CONFIG_PPC_FPU
+ kvmppc_save_guest_fp(vcpu);
+
+ vcpu->fpu_active = 0;
+
+ /* Save guest FPU state from thread */
+ memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
+ vcpu->arch.fpscr = current->thread.fpscr.val;
+
+ /* Restore userspace FPU state from stack */
+ memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
+ current->thread.fpscr.val = fpscr;
+ current->thread.fpexc_mode = fpexc_mode;
+#endif
+
kvm_guest_exit();
out:
@@ -363,6 +564,84 @@ out:
return ret;
}
+static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er;
+
+ er = kvmppc_emulate_instruction(run, vcpu);
+ switch (er) {
+ case EMULATE_DONE:
+ /* don't overwrite subtypes, just account kvm_stats */
+ kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
+ /* Future optimization: only reload non-volatiles if
+ * they were actually modified by emulation. */
+ return RESUME_GUEST_NV;
+
+ case EMULATE_DO_DCR:
+ run->exit_reason = KVM_EXIT_DCR;
+ return RESUME_HOST;
+
+ case EMULATE_FAIL:
+ printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
+ __func__, vcpu->arch.pc, vcpu->arch.last_inst);
+ /* For debugging, encode the failing instruction and
+ * report it to userspace. */
+ run->hw.hardware_exit_reason = ~0ULL << 32;
+ run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
+ kvmppc_core_queue_program(vcpu, ESR_PIL);
+ return RESUME_HOST;
+
+ default:
+ BUG();
+ }
+}
+
+static void kvmppc_fill_pt_regs(struct pt_regs *regs)
+{
+ ulong r1, ip, msr, lr;
+
+ asm("mr %0, 1" : "=r"(r1));
+ asm("mflr %0" : "=r"(lr));
+ asm("mfmsr %0" : "=r"(msr));
+ asm("bl 1f; 1: mflr %0" : "=r"(ip));
+
+ memset(regs, 0, sizeof(*regs));
+ regs->gpr[1] = r1;
+ regs->nip = ip;
+ regs->msr = msr;
+ regs->link = lr;
+}
+
+static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
+ unsigned int exit_nr)
+{
+ struct pt_regs regs;
+
+ switch (exit_nr) {
+ case BOOKE_INTERRUPT_EXTERNAL:
+ kvmppc_fill_pt_regs(&regs);
+ do_IRQ(&regs);
+ break;
+ case BOOKE_INTERRUPT_DECREMENTER:
+ kvmppc_fill_pt_regs(&regs);
+ timer_interrupt(&regs);
+ break;
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
+ case BOOKE_INTERRUPT_DOORBELL:
+ kvmppc_fill_pt_regs(&regs);
+ doorbell_exception(&regs);
+ break;
+#endif
+ case BOOKE_INTERRUPT_MACHINE_CHECK:
+ /* FIXME */
+ break;
+ case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
+ kvmppc_fill_pt_regs(&regs);
+ performance_monitor_exception(&regs);
+ break;
+ }
+}
+
/**
* kvmppc_handle_exit
*
@@ -371,12 +650,14 @@ out:
int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int exit_nr)
{
- enum emulation_result er;
int r = RESUME_HOST;
/* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu);
+ /* restart interrupts if they were meant for the host */
+ kvmppc_restart_interrupt(vcpu, exit_nr);
+
local_irq_enable();
run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -386,62 +667,74 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOKE_INTERRUPT_MACHINE_CHECK:
printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
kvmppc_dump_vcpu(vcpu);
+ /* For debugging, send invalid exit reason to user space */
+ run->hw.hardware_exit_reason = ~1ULL << 32;
+ run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
r = RESUME_HOST;
break;
case BOOKE_INTERRUPT_EXTERNAL:
kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
- if (need_resched())
- cond_resched();
r = RESUME_GUEST;
break;
case BOOKE_INTERRUPT_DECREMENTER:
- /* Since we switched IVPR back to the host's value, the host
- * handled this interrupt the moment we enabled interrupts.
- * Now we just offer it a chance to reschedule the guest. */
kvmppc_account_exit(vcpu, DEC_EXITS);
- if (need_resched())
- cond_resched();
r = RESUME_GUEST;
break;
+ case BOOKE_INTERRUPT_DOORBELL:
+ kvmppc_account_exit(vcpu, DBELL_EXITS);
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
+ kvmppc_account_exit(vcpu, GDBELL_EXITS);
+
+ /*
+ * We are here because there is a pending guest interrupt
+ * which could not be delivered as MSR_CE or MSR_ME was not
+ * set. Once we break from here we will retry delivery.
+ */
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_GUEST_DBELL:
+ kvmppc_account_exit(vcpu, GDBELL_EXITS);
+
+ /*
+ * We are here because there is a pending guest interrupt
+ * which could not be delivered as MSR_EE was not set. Once
+ * we break from here we will retry delivery.
+ */
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_HV_PRIV:
+ r = emulation_exit(run, vcpu);
+ break;
+
case BOOKE_INTERRUPT_PROGRAM:
- if (vcpu->arch.shared->msr & MSR_PR) {
- /* Program traps generated by user-level software must be handled
- * by the guest kernel. */
+ if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
+ /*
+ * Program traps generated by user-level software must
+ * be handled by the guest kernel.
+ *
+ * In GS mode, hypervisor privileged instructions trap
+ * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
+ * actual program interrupts, handled by the guest.
+ */
kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
r = RESUME_GUEST;
kvmppc_account_exit(vcpu, USR_PR_INST);
break;
}
- er = kvmppc_emulate_instruction(run, vcpu);
- switch (er) {
- case EMULATE_DONE:
- /* don't overwrite subtypes, just account kvm_stats */
- kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
- /* Future optimization: only reload non-volatiles if
- * they were actually modified by emulation. */
- r = RESUME_GUEST_NV;
- break;
- case EMULATE_DO_DCR:
- run->exit_reason = KVM_EXIT_DCR;
- r = RESUME_HOST;
- break;
- case EMULATE_FAIL:
- /* XXX Deliver Program interrupt to guest. */
- printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
- __func__, vcpu->arch.pc, vcpu->arch.last_inst);
- /* For debugging, encode the failing instruction and
- * report it to userspace. */
- run->hw.hardware_exit_reason = ~0ULL << 32;
- run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
- r = RESUME_HOST;
- break;
- default:
- BUG();
- }
+ r = emulation_exit(run, vcpu);
break;
case BOOKE_INTERRUPT_FP_UNAVAIL:
@@ -506,6 +799,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
+#ifdef CONFIG_KVM_BOOKE_HV
+ case BOOKE_INTERRUPT_HV_SYSCALL:
+ if (!(vcpu->arch.shared->msr & MSR_PR)) {
+ kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
+ } else {
+ /*
+ * hcall from guest userspace -- send privileged
+ * instruction program check.
+ */
+ kvmppc_core_queue_program(vcpu, ESR_PPR);
+ }
+
+ r = RESUME_GUEST;
+ break;
+#else
case BOOKE_INTERRUPT_SYSCALL:
if (!(vcpu->arch.shared->msr & MSR_PR) &&
(((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
@@ -519,6 +827,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_account_exit(vcpu, SYSCALL_EXITS);
r = RESUME_GUEST;
break;
+#endif
case BOOKE_INTERRUPT_DTLB_MISS: {
unsigned long eaddr = vcpu->arch.fault_dear;
@@ -526,7 +835,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
gpa_t gpaddr;
gfn_t gfn;
-#ifdef CONFIG_KVM_E500
+#ifdef CONFIG_KVM_E500V2
if (!(vcpu->arch.shared->msr & MSR_PR) &&
(eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
kvmppc_map_magic(vcpu);
@@ -567,6 +876,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Guest has mapped and accessed a page which is not
* actually RAM. */
vcpu->arch.paddr_accessed = gpaddr;
+ vcpu->arch.vaddr_accessed = eaddr;
r = kvmppc_emulate_mmio(run, vcpu);
kvmppc_account_exit(vcpu, MMIO_EXITS);
}
@@ -634,15 +944,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
BUG();
}
- local_irq_disable();
-
- kvmppc_core_prepare_to_enter(vcpu);
-
+ /*
+ * To avoid clobbering exit_reason, only check for signals if we
+ * aren't already exiting to userspace for some other reason.
+ */
if (!(r & RESUME_HOST)) {
- /* To avoid clobbering exit_reason, only check for signals if
- * we aren't already exiting to userspace for some other
- * reason. */
- if (signal_pending(current)) {
+ local_irq_disable();
+ if (kvmppc_prepare_to_enter(vcpu)) {
run->exit_reason = KVM_EXIT_INTR;
r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
kvmppc_account_exit(vcpu, SIGNAL_EXITS);
@@ -659,12 +967,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
int r;
vcpu->arch.pc = 0;
- vcpu->arch.shared->msr = 0;
- vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
vcpu->arch.shared->pir = vcpu->vcpu_id;
kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
+ kvmppc_set_msr(vcpu, 0);
+#ifndef CONFIG_KVM_BOOKE_HV
+ vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
vcpu->arch.shadow_pid = 1;
+ vcpu->arch.shared->msr = 0;
+#endif
/* Eye-catching numbers so we know if the guest takes an interrupt
* before it's programmed its own IVPR/IVORs. */
@@ -745,8 +1056,8 @@ static void get_sregs_base(struct kvm_vcpu *vcpu,
sregs->u.e.csrr0 = vcpu->arch.csrr0;
sregs->u.e.csrr1 = vcpu->arch.csrr1;
sregs->u.e.mcsr = vcpu->arch.mcsr;
- sregs->u.e.esr = vcpu->arch.shared->esr;
- sregs->u.e.dear = vcpu->arch.shared->dar;
+ sregs->u.e.esr = get_guest_esr(vcpu);
+ sregs->u.e.dear = get_guest_dear(vcpu);
sregs->u.e.tsr = vcpu->arch.tsr;
sregs->u.e.tcr = vcpu->arch.tcr;
sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
@@ -763,8 +1074,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
vcpu->arch.csrr0 = sregs->u.e.csrr0;
vcpu->arch.csrr1 = sregs->u.e.csrr1;
vcpu->arch.mcsr = sregs->u.e.mcsr;
- vcpu->arch.shared->esr = sregs->u.e.esr;
- vcpu->arch.shared->dar = sregs->u.e.dear;
+ set_guest_esr(vcpu, sregs->u.e.esr);
+ set_guest_dear(vcpu, sregs->u.e.dear);
vcpu->arch.vrsave = sregs->u.e.vrsave;
kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
@@ -932,15 +1243,6 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
{
}
-int kvmppc_core_init_vm(struct kvm *kvm)
-{
- return 0;
-}
-
-void kvmppc_core_destroy_vm(struct kvm *kvm)
-{
-}
-
void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
{
vcpu->arch.tcr = new_tcr;
@@ -968,8 +1270,19 @@ void kvmppc_decrementer_func(unsigned long data)
kvmppc_set_tsr_bits(vcpu, TSR_DIS);
}
+void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ current->thread.kvm_vcpu = vcpu;
+}
+
+void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ current->thread.kvm_vcpu = NULL;
+}
+
int __init kvmppc_booke_init(void)
{
+#ifndef CONFIG_KVM_BOOKE_HV
unsigned long ivor[16];
unsigned long max_ivor = 0;
int i;
@@ -1012,7 +1325,7 @@ int __init kvmppc_booke_init(void)
}
flush_icache_range(kvmppc_booke_handlers,
kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
-
+#endif /* !BOOKE_HV */
return 0;
}
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 2fe202705a3f..ba61974c1e20 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <asm/kvm_ppc.h>
+#include <asm/switch_to.h>
#include "timing.h"
/* interrupt priortity ordering */
@@ -48,7 +49,20 @@
#define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19
/* Internal pseudo-irqprio for level triggered externals */
#define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20
-#define BOOKE_IRQPRIO_MAX 20
+#define BOOKE_IRQPRIO_DBELL 21
+#define BOOKE_IRQPRIO_DBELL_CRIT 22
+#define BOOKE_IRQPRIO_MAX 23
+
+#define BOOKE_IRQMASK_EE ((1 << BOOKE_IRQPRIO_EXTERNAL_LEVEL) | \
+ (1 << BOOKE_IRQPRIO_PERFORMANCE_MONITOR) | \
+ (1 << BOOKE_IRQPRIO_DBELL) | \
+ (1 << BOOKE_IRQPRIO_DECREMENTER) | \
+ (1 << BOOKE_IRQPRIO_FIT) | \
+ (1 << BOOKE_IRQPRIO_EXTERNAL))
+
+#define BOOKE_IRQMASK_CE ((1 << BOOKE_IRQPRIO_DBELL_CRIT) | \
+ (1 << BOOKE_IRQPRIO_WATCHDOG) | \
+ (1 << BOOKE_IRQPRIO_CRITICAL))
extern unsigned long kvmppc_booke_handlers;
@@ -61,8 +75,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
-int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
-int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
+int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
+int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
/* low-level asm code to transfer guest state */
void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
@@ -71,4 +85,46 @@ void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
/* high-level function, manages flags, host state */
void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
+void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu);
+
+enum int_class {
+ INT_CLASS_NONCRIT,
+ INT_CLASS_CRIT,
+ INT_CLASS_MC,
+ INT_CLASS_DBG,
+};
+
+void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
+
+/*
+ * Load up guest vcpu FP state if it's needed.
+ * It also set the MSR_FP in thread so that host know
+ * we're holding FPU, and then host can help to save
+ * guest vcpu FP state if other threads require to use FPU.
+ * This simulates an FP unavailable fault.
+ *
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+ if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
+ load_up_fpu();
+ current->thread.regs->msr |= MSR_FP;
+ }
+#endif
+}
+
+/*
+ * Save guest vcpu FP state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+ if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
+ giveup_fpu(current);
+#endif
+}
#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 3e652da36534..6c76397f2af4 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -40,8 +40,8 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
- int rs;
- int rt;
+ int rs = get_rs(inst);
+ int rt = get_rt(inst);
switch (get_op(inst)) {
case 19:
@@ -62,19 +62,16 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (get_xop(inst)) {
case OP_31_XOP_MFMSR:
- rt = get_rt(inst);
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
break;
case OP_31_XOP_MTMSR:
- rs = get_rs(inst);
kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_WRTEE:
- rs = get_rs(inst);
vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
| (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
@@ -99,22 +96,32 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+/*
+ * NOTE: some of these registers are not emulated on BOOKE_HV (GS-mode).
+ * Their backing store is in real registers, and these functions
+ * will return the wrong result if called for them in another context
+ * (such as debugging).
+ */
+int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
- ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_DEAR:
- vcpu->arch.shared->dar = spr_val; break;
+ vcpu->arch.shared->dar = spr_val;
+ break;
case SPRN_ESR:
- vcpu->arch.shared->esr = spr_val; break;
+ vcpu->arch.shared->esr = spr_val;
+ break;
case SPRN_DBCR0:
- vcpu->arch.dbcr0 = spr_val; break;
+ vcpu->arch.dbcr0 = spr_val;
+ break;
case SPRN_DBCR1:
- vcpu->arch.dbcr1 = spr_val; break;
+ vcpu->arch.dbcr1 = spr_val;
+ break;
case SPRN_DBSR:
- vcpu->arch.dbsr &= ~spr_val; break;
+ vcpu->arch.dbsr &= ~spr_val;
+ break;
case SPRN_TSR:
kvmppc_clr_tsr_bits(vcpu, spr_val);
break;
@@ -122,20 +129,29 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
kvmppc_set_tcr(vcpu, spr_val);
break;
- /* Note: SPRG4-7 are user-readable. These values are
- * loaded into the real SPRGs when resuming the
- * guest. */
+ /*
+ * Note: SPRG4-7 are user-readable.
+ * These values are loaded into the real SPRGs when resuming the
+ * guest (PR-mode only).
+ */
case SPRN_SPRG4:
- vcpu->arch.shared->sprg4 = spr_val; break;
+ vcpu->arch.shared->sprg4 = spr_val;
+ break;
case SPRN_SPRG5:
- vcpu->arch.shared->sprg5 = spr_val; break;
+ vcpu->arch.shared->sprg5 = spr_val;
+ break;
case SPRN_SPRG6:
- vcpu->arch.shared->sprg6 = spr_val; break;
+ vcpu->arch.shared->sprg6 = spr_val;
+ break;
case SPRN_SPRG7:
- vcpu->arch.shared->sprg7 = spr_val; break;
+ vcpu->arch.shared->sprg7 = spr_val;
+ break;
case SPRN_IVPR:
vcpu->arch.ivpr = spr_val;
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_GIVPR, spr_val);
+#endif
break;
case SPRN_IVOR0:
vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val;
@@ -145,6 +161,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
break;
case SPRN_IVOR2:
vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val;
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_GIVOR2, spr_val);
+#endif
break;
case SPRN_IVOR3:
vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val;
@@ -163,6 +182,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
break;
case SPRN_IVOR8:
vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val;
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_GIVOR8, spr_val);
+#endif
break;
case SPRN_IVOR9:
vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
@@ -193,75 +215,83 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
return emulated;
}
-int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_IVPR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break;
+ *spr_val = vcpu->arch.ivpr;
+ break;
case SPRN_DEAR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break;
+ *spr_val = vcpu->arch.shared->dar;
+ break;
case SPRN_ESR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break;
+ *spr_val = vcpu->arch.shared->esr;
+ break;
case SPRN_DBCR0:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
+ *spr_val = vcpu->arch.dbcr0;
+ break;
case SPRN_DBCR1:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break;
+ *spr_val = vcpu->arch.dbcr1;
+ break;
case SPRN_DBSR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break;
+ *spr_val = vcpu->arch.dbsr;
+ break;
case SPRN_TSR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.tsr); break;
+ *spr_val = vcpu->arch.tsr;
+ break;
case SPRN_TCR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.tcr); break;
+ *spr_val = vcpu->arch.tcr;
+ break;
case SPRN_IVOR0:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
break;
case SPRN_IVOR1:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
break;
case SPRN_IVOR2:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
break;
case SPRN_IVOR3:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
break;
case SPRN_IVOR4:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
break;
case SPRN_IVOR5:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
break;
case SPRN_IVOR6:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
break;
case SPRN_IVOR7:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
break;
case SPRN_IVOR8:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
break;
case SPRN_IVOR9:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
break;
case SPRN_IVOR10:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
break;
case SPRN_IVOR11:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
break;
case SPRN_IVOR12:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
break;
case SPRN_IVOR13:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
break;
case SPRN_IVOR14:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
break;
case SPRN_IVOR15:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
break;
default:
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index c8c4b878795a..8feec2ff3928 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -419,13 +419,13 @@ lightweight_exit:
* written directly to the shared area, so we
* need to reload them here with the guest's values.
*/
- lwz r3, VCPU_SHARED_SPRG4(r5)
+ PPC_LD(r3, VCPU_SHARED_SPRG4, r5)
mtspr SPRN_SPRG4W, r3
- lwz r3, VCPU_SHARED_SPRG5(r5)
+ PPC_LD(r3, VCPU_SHARED_SPRG5, r5)
mtspr SPRN_SPRG5W, r3
- lwz r3, VCPU_SHARED_SPRG6(r5)
+ PPC_LD(r3, VCPU_SHARED_SPRG6, r5)
mtspr SPRN_SPRG6W, r3
- lwz r3, VCPU_SHARED_SPRG7(r5)
+ PPC_LD(r3, VCPU_SHARED_SPRG7, r5)
mtspr SPRN_SPRG7W, r3
#ifdef CONFIG_KVM_EXIT_TIMING
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
new file mode 100644
index 000000000000..6048a00515d7
--- /dev/null
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -0,0 +1,597 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ *
+ * Author: Varun Sethi <varun.sethi@freescale.com>
+ * Author: Scott Wood <scotwood@freescale.com>
+ *
+ * This file is derived from arch/powerpc/kvm/booke_interrupts.S
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/mmu-44x.h>
+#include <asm/page.h>
+#include <asm/asm-compat.h>
+#include <asm/asm-offsets.h>
+#include <asm/bitsperlong.h>
+#include <asm/thread_info.h>
+
+#include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */
+
+#define GET_VCPU(vcpu, thread) \
+ PPC_LL vcpu, THREAD_KVM_VCPU(thread)
+
+#define LONGBYTES (BITS_PER_LONG / 8)
+
+#define VCPU_GPR(n) (VCPU_GPRS + (n * LONGBYTES))
+#define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES))
+
+/* The host stack layout: */
+#define HOST_R1 (0 * LONGBYTES) /* Implied by stwu. */
+#define HOST_CALLEE_LR (1 * LONGBYTES)
+#define HOST_RUN (2 * LONGBYTES) /* struct kvm_run */
+/*
+ * r2 is special: it holds 'current', and it made nonvolatile in the
+ * kernel with the -ffixed-r2 gcc option.
+ */
+#define HOST_R2 (3 * LONGBYTES)
+#define HOST_CR (4 * LONGBYTES)
+#define HOST_NV_GPRS (5 * LONGBYTES)
+#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES))
+#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + LONGBYTES)
+#define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */
+#define HOST_STACK_LR (HOST_STACK_SIZE + LONGBYTES) /* In caller stack frame. */
+
+#define NEED_EMU 0x00000001 /* emulation -- save nv regs */
+#define NEED_DEAR 0x00000002 /* save faulting DEAR */
+#define NEED_ESR 0x00000004 /* save faulting ESR */
+
+/*
+ * On entry:
+ * r4 = vcpu, r5 = srr0, r6 = srr1
+ * saved in vcpu: cr, ctr, r3-r13
+ */
+.macro kvm_handler_common intno, srr0, flags
+ /* Restore host stack pointer */
+ PPC_STL r1, VCPU_GPR(r1)(r4)
+ PPC_STL r2, VCPU_GPR(r2)(r4)
+ PPC_LL r1, VCPU_HOST_STACK(r4)
+ PPC_LL r2, HOST_R2(r1)
+
+ mfspr r10, SPRN_PID
+ lwz r8, VCPU_HOST_PID(r4)
+ PPC_LL r11, VCPU_SHARED(r4)
+ PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */
+ li r14, \intno
+
+ stw r10, VCPU_GUEST_PID(r4)
+ mtspr SPRN_PID, r8
+
+#ifdef CONFIG_KVM_EXIT_TIMING
+ /* save exit time */
+1: mfspr r7, SPRN_TBRU
+ mfspr r8, SPRN_TBRL
+ mfspr r9, SPRN_TBRU
+ cmpw r9, r7
+ stw r8, VCPU_TIMING_EXIT_TBL(r4)
+ bne- 1b
+ stw r9, VCPU_TIMING_EXIT_TBU(r4)
+#endif
+
+ oris r8, r6, MSR_CE@h
+ PPC_STD(r6, VCPU_SHARED_MSR, r11)
+ ori r8, r8, MSR_ME | MSR_RI
+ PPC_STL r5, VCPU_PC(r4)
+
+ /*
+ * Make sure CE/ME/RI are set (if appropriate for exception type)
+ * whether or not the guest had it set. Since mfmsr/mtmsr are
+ * somewhat expensive, skip in the common case where the guest
+ * had all these bits set (and thus they're still set if
+ * appropriate for the exception type).
+ */
+ cmpw r6, r8
+ beq 1f
+ mfmsr r7
+ .if \srr0 != SPRN_MCSRR0 && \srr0 != SPRN_CSRR0
+ oris r7, r7, MSR_CE@h
+ .endif
+ .if \srr0 != SPRN_MCSRR0
+ ori r7, r7, MSR_ME | MSR_RI
+ .endif
+ mtmsr r7
+1:
+
+ .if \flags & NEED_EMU
+ /*
+ * This assumes you have external PID support.
+ * To support a bookehv CPU without external PID, you'll
+ * need to look up the TLB entry and create a temporary mapping.
+ *
+ * FIXME: we don't currently handle if the lwepx faults. PR-mode
+ * booke doesn't handle it either. Since Linux doesn't use
+ * broadcast tlbivax anymore, the only way this should happen is
+ * if the guest maps its memory execute-but-not-read, or if we
+ * somehow take a TLB miss in the middle of this entry code and
+ * evict the relevant entry. On e500mc, all kernel lowmem is
+ * bolted into TLB1 large page mappings, and we don't use
+ * broadcast invalidates, so we should not take a TLB miss here.
+ *
+ * Later we'll need to deal with faults here. Disallowing guest
+ * mappings that are execute-but-not-read could be an option on
+ * e500mc, but not on chips with an LRAT if it is used.
+ */
+
+ mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */
+ PPC_STL r15, VCPU_GPR(r15)(r4)
+ PPC_STL r16, VCPU_GPR(r16)(r4)
+ PPC_STL r17, VCPU_GPR(r17)(r4)
+ PPC_STL r18, VCPU_GPR(r18)(r4)
+ PPC_STL r19, VCPU_GPR(r19)(r4)
+ mr r8, r3
+ PPC_STL r20, VCPU_GPR(r20)(r4)
+ rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS
+ PPC_STL r21, VCPU_GPR(r21)(r4)
+ rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR
+ PPC_STL r22, VCPU_GPR(r22)(r4)
+ rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID
+ PPC_STL r23, VCPU_GPR(r23)(r4)
+ PPC_STL r24, VCPU_GPR(r24)(r4)
+ PPC_STL r25, VCPU_GPR(r25)(r4)
+ PPC_STL r26, VCPU_GPR(r26)(r4)
+ PPC_STL r27, VCPU_GPR(r27)(r4)
+ PPC_STL r28, VCPU_GPR(r28)(r4)
+ PPC_STL r29, VCPU_GPR(r29)(r4)
+ PPC_STL r30, VCPU_GPR(r30)(r4)
+ PPC_STL r31, VCPU_GPR(r31)(r4)
+ mtspr SPRN_EPLC, r8
+
+ /* disable preemption, so we are sure we hit the fixup handler */
+#ifdef CONFIG_PPC64
+ clrrdi r8,r1,THREAD_SHIFT
+#else
+ rlwinm r8,r1,0,0,31-THREAD_SHIFT /* current thread_info */
+#endif
+ li r7, 1
+ stw r7, TI_PREEMPT(r8)
+
+ isync
+
+ /*
+ * In case the read goes wrong, we catch it and write an invalid value
+ * in LAST_INST instead.
+ */
+1: lwepx r9, 0, r5
+2:
+.section .fixup, "ax"
+3: li r9, KVM_INST_FETCH_FAILED
+ b 2b
+.previous
+.section __ex_table,"a"
+ PPC_LONG_ALIGN
+ PPC_LONG 1b,3b
+.previous
+
+ mtspr SPRN_EPLC, r3
+ li r7, 0
+ stw r7, TI_PREEMPT(r8)
+ stw r9, VCPU_LAST_INST(r4)
+ .endif
+
+ .if \flags & NEED_ESR
+ mfspr r8, SPRN_ESR
+ PPC_STL r8, VCPU_FAULT_ESR(r4)
+ .endif
+
+ .if \flags & NEED_DEAR
+ mfspr r9, SPRN_DEAR
+ PPC_STL r9, VCPU_FAULT_DEAR(r4)
+ .endif
+
+ b kvmppc_resume_host
+.endm
+
+/*
+ * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
+ */
+.macro kvm_handler intno srr0, srr1, flags
+_GLOBAL(kvmppc_handler_\intno\()_\srr1)
+ GET_VCPU(r11, r10)
+ PPC_STL r3, VCPU_GPR(r3)(r11)
+ mfspr r3, SPRN_SPRG_RSCRATCH0
+ PPC_STL r4, VCPU_GPR(r4)(r11)
+ PPC_LL r4, THREAD_NORMSAVE(0)(r10)
+ PPC_STL r5, VCPU_GPR(r5)(r11)
+ stw r13, VCPU_CR(r11)
+ mfspr r5, \srr0
+ PPC_STL r3, VCPU_GPR(r10)(r11)
+ PPC_LL r3, THREAD_NORMSAVE(2)(r10)
+ PPC_STL r6, VCPU_GPR(r6)(r11)
+ PPC_STL r4, VCPU_GPR(r11)(r11)
+ mfspr r6, \srr1
+ PPC_STL r7, VCPU_GPR(r7)(r11)
+ PPC_STL r8, VCPU_GPR(r8)(r11)
+ PPC_STL r9, VCPU_GPR(r9)(r11)
+ PPC_STL r3, VCPU_GPR(r13)(r11)
+ mfctr r7
+ PPC_STL r12, VCPU_GPR(r12)(r11)
+ PPC_STL r7, VCPU_CTR(r11)
+ mr r4, r11
+ kvm_handler_common \intno, \srr0, \flags
+.endm
+
+.macro kvm_lvl_handler intno scratch srr0, srr1, flags
+_GLOBAL(kvmppc_handler_\intno\()_\srr1)
+ mfspr r10, SPRN_SPRG_THREAD
+ GET_VCPU(r11, r10)
+ PPC_STL r3, VCPU_GPR(r3)(r11)
+ mfspr r3, \scratch
+ PPC_STL r4, VCPU_GPR(r4)(r11)
+ PPC_LL r4, GPR9(r8)
+ PPC_STL r5, VCPU_GPR(r5)(r11)
+ stw r9, VCPU_CR(r11)
+ mfspr r5, \srr0
+ PPC_STL r3, VCPU_GPR(r8)(r11)
+ PPC_LL r3, GPR10(r8)
+ PPC_STL r6, VCPU_GPR(r6)(r11)
+ PPC_STL r4, VCPU_GPR(r9)(r11)
+ mfspr r6, \srr1
+ PPC_LL r4, GPR11(r8)
+ PPC_STL r7, VCPU_GPR(r7)(r11)
+ PPC_STL r3, VCPU_GPR(r10)(r11)
+ mfctr r7
+ PPC_STL r12, VCPU_GPR(r12)(r11)
+ PPC_STL r13, VCPU_GPR(r13)(r11)
+ PPC_STL r4, VCPU_GPR(r11)(r11)
+ PPC_STL r7, VCPU_CTR(r11)
+ mr r4, r11
+ kvm_handler_common \intno, \srr0, \flags
+.endm
+
+kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \
+ SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \
+ SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0
+kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \
+ SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR)
+kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR
+kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \
+ SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR)
+kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, NEED_ESR
+kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_DECREMENTER, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_FIT, SPRN_SRR0, SPRN_SRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \
+ SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \
+ SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
+kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \
+ SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_handler BOOKE_INTERRUPT_HV_PRIV, SPRN_SRR0, SPRN_SRR1, NEED_EMU
+kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, SPRN_GSRR0, SPRN_GSRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, \
+ SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
+ SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
+ SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0
+
+
+/* Registers:
+ * SPRG_SCRATCH0: guest r10
+ * r4: vcpu pointer
+ * r11: vcpu->arch.shared
+ * r14: KVM exit number
+ */
+_GLOBAL(kvmppc_resume_host)
+ /* Save remaining volatile guest register state to vcpu. */
+ mfspr r3, SPRN_VRSAVE
+ PPC_STL r0, VCPU_GPR(r0)(r4)
+ mflr r5
+ mfspr r6, SPRN_SPRG4
+ PPC_STL r5, VCPU_LR(r4)
+ mfspr r7, SPRN_SPRG5
+ stw r3, VCPU_VRSAVE(r4)
+ PPC_STD(r6, VCPU_SHARED_SPRG4, r11)
+ mfspr r8, SPRN_SPRG6
+ PPC_STD(r7, VCPU_SHARED_SPRG5, r11)
+ mfspr r9, SPRN_SPRG7
+ PPC_STD(r8, VCPU_SHARED_SPRG6, r11)
+ mfxer r3
+ PPC_STD(r9, VCPU_SHARED_SPRG7, r11)
+
+ /* save guest MAS registers and restore host mas4 & mas6 */
+ mfspr r5, SPRN_MAS0
+ PPC_STL r3, VCPU_XER(r4)
+ mfspr r6, SPRN_MAS1
+ stw r5, VCPU_SHARED_MAS0(r11)
+ mfspr r7, SPRN_MAS2
+ stw r6, VCPU_SHARED_MAS1(r11)
+ PPC_STD(r7, VCPU_SHARED_MAS2, r11)
+ mfspr r5, SPRN_MAS3
+ mfspr r6, SPRN_MAS4
+ stw r5, VCPU_SHARED_MAS7_3+4(r11)
+ mfspr r7, SPRN_MAS6
+ stw r6, VCPU_SHARED_MAS4(r11)
+ mfspr r5, SPRN_MAS7
+ lwz r6, VCPU_HOST_MAS4(r4)
+ stw r7, VCPU_SHARED_MAS6(r11)
+ lwz r8, VCPU_HOST_MAS6(r4)
+ mtspr SPRN_MAS4, r6
+ stw r5, VCPU_SHARED_MAS7_3+0(r11)
+ mtspr SPRN_MAS6, r8
+ /* Enable MAS register updates via exception */
+ mfspr r3, SPRN_EPCR
+ rlwinm r3, r3, 0, ~SPRN_EPCR_DMIUH
+ mtspr SPRN_EPCR, r3
+ isync
+
+ /* Switch to kernel stack and jump to handler. */
+ PPC_LL r3, HOST_RUN(r1)
+ mr r5, r14 /* intno */
+ mr r14, r4 /* Save vcpu pointer. */
+ bl kvmppc_handle_exit
+
+ /* Restore vcpu pointer and the nonvolatiles we used. */
+ mr r4, r14
+ PPC_LL r14, VCPU_GPR(r14)(r4)
+
+ andi. r5, r3, RESUME_FLAG_NV
+ beq skip_nv_load
+ PPC_LL r15, VCPU_GPR(r15)(r4)
+ PPC_LL r16, VCPU_GPR(r16)(r4)
+ PPC_LL r17, VCPU_GPR(r17)(r4)
+ PPC_LL r18, VCPU_GPR(r18)(r4)
+ PPC_LL r19, VCPU_GPR(r19)(r4)
+ PPC_LL r20, VCPU_GPR(r20)(r4)
+ PPC_LL r21, VCPU_GPR(r21)(r4)
+ PPC_LL r22, VCPU_GPR(r22)(r4)
+ PPC_LL r23, VCPU_GPR(r23)(r4)
+ PPC_LL r24, VCPU_GPR(r24)(r4)
+ PPC_LL r25, VCPU_GPR(r25)(r4)
+ PPC_LL r26, VCPU_GPR(r26)(r4)
+ PPC_LL r27, VCPU_GPR(r27)(r4)
+ PPC_LL r28, VCPU_GPR(r28)(r4)
+ PPC_LL r29, VCPU_GPR(r29)(r4)
+ PPC_LL r30, VCPU_GPR(r30)(r4)
+ PPC_LL r31, VCPU_GPR(r31)(r4)
+skip_nv_load:
+ /* Should we return to the guest? */
+ andi. r5, r3, RESUME_FLAG_HOST
+ beq lightweight_exit
+
+ srawi r3, r3, 2 /* Shift -ERR back down. */
+
+heavyweight_exit:
+ /* Not returning to guest. */
+ PPC_LL r5, HOST_STACK_LR(r1)
+ lwz r6, HOST_CR(r1)
+
+ /*
+ * We already saved guest volatile register state; now save the
+ * non-volatiles.
+ */
+
+ PPC_STL r15, VCPU_GPR(r15)(r4)
+ PPC_STL r16, VCPU_GPR(r16)(r4)
+ PPC_STL r17, VCPU_GPR(r17)(r4)
+ PPC_STL r18, VCPU_GPR(r18)(r4)
+ PPC_STL r19, VCPU_GPR(r19)(r4)
+ PPC_STL r20, VCPU_GPR(r20)(r4)
+ PPC_STL r21, VCPU_GPR(r21)(r4)
+ PPC_STL r22, VCPU_GPR(r22)(r4)
+ PPC_STL r23, VCPU_GPR(r23)(r4)
+ PPC_STL r24, VCPU_GPR(r24)(r4)
+ PPC_STL r25, VCPU_GPR(r25)(r4)
+ PPC_STL r26, VCPU_GPR(r26)(r4)
+ PPC_STL r27, VCPU_GPR(r27)(r4)
+ PPC_STL r28, VCPU_GPR(r28)(r4)
+ PPC_STL r29, VCPU_GPR(r29)(r4)
+ PPC_STL r30, VCPU_GPR(r30)(r4)
+ PPC_STL r31, VCPU_GPR(r31)(r4)
+
+ /* Load host non-volatile register state from host stack. */
+ PPC_LL r14, HOST_NV_GPR(r14)(r1)
+ PPC_LL r15, HOST_NV_GPR(r15)(r1)
+ PPC_LL r16, HOST_NV_GPR(r16)(r1)
+ PPC_LL r17, HOST_NV_GPR(r17)(r1)
+ PPC_LL r18, HOST_NV_GPR(r18)(r1)
+ PPC_LL r19, HOST_NV_GPR(r19)(r1)
+ PPC_LL r20, HOST_NV_GPR(r20)(r1)
+ PPC_LL r21, HOST_NV_GPR(r21)(r1)
+ PPC_LL r22, HOST_NV_GPR(r22)(r1)
+ PPC_LL r23, HOST_NV_GPR(r23)(r1)
+ PPC_LL r24, HOST_NV_GPR(r24)(r1)
+ PPC_LL r25, HOST_NV_GPR(r25)(r1)
+ PPC_LL r26, HOST_NV_GPR(r26)(r1)
+ PPC_LL r27, HOST_NV_GPR(r27)(r1)
+ PPC_LL r28, HOST_NV_GPR(r28)(r1)
+ PPC_LL r29, HOST_NV_GPR(r29)(r1)
+ PPC_LL r30, HOST_NV_GPR(r30)(r1)
+ PPC_LL r31, HOST_NV_GPR(r31)(r1)
+
+ /* Return to kvm_vcpu_run(). */
+ mtlr r5
+ mtcr r6
+ addi r1, r1, HOST_STACK_SIZE
+ /* r3 still contains the return code from kvmppc_handle_exit(). */
+ blr
+
+/* Registers:
+ * r3: kvm_run pointer
+ * r4: vcpu pointer
+ */
+_GLOBAL(__kvmppc_vcpu_run)
+ stwu r1, -HOST_STACK_SIZE(r1)
+ PPC_STL r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
+
+ /* Save host state to stack. */
+ PPC_STL r3, HOST_RUN(r1)
+ mflr r3
+ mfcr r5
+ PPC_STL r3, HOST_STACK_LR(r1)
+
+ stw r5, HOST_CR(r1)
+
+ /* Save host non-volatile register state to stack. */
+ PPC_STL r14, HOST_NV_GPR(r14)(r1)
+ PPC_STL r15, HOST_NV_GPR(r15)(r1)
+ PPC_STL r16, HOST_NV_GPR(r16)(r1)
+ PPC_STL r17, HOST_NV_GPR(r17)(r1)
+ PPC_STL r18, HOST_NV_GPR(r18)(r1)
+ PPC_STL r19, HOST_NV_GPR(r19)(r1)
+ PPC_STL r20, HOST_NV_GPR(r20)(r1)
+ PPC_STL r21, HOST_NV_GPR(r21)(r1)
+ PPC_STL r22, HOST_NV_GPR(r22)(r1)
+ PPC_STL r23, HOST_NV_GPR(r23)(r1)
+ PPC_STL r24, HOST_NV_GPR(r24)(r1)
+ PPC_STL r25, HOST_NV_GPR(r25)(r1)
+ PPC_STL r26, HOST_NV_GPR(r26)(r1)
+ PPC_STL r27, HOST_NV_GPR(r27)(r1)
+ PPC_STL r28, HOST_NV_GPR(r28)(r1)
+ PPC_STL r29, HOST_NV_GPR(r29)(r1)
+ PPC_STL r30, HOST_NV_GPR(r30)(r1)
+ PPC_STL r31, HOST_NV_GPR(r31)(r1)
+
+ /* Load guest non-volatiles. */
+ PPC_LL r14, VCPU_GPR(r14)(r4)
+ PPC_LL r15, VCPU_GPR(r15)(r4)
+ PPC_LL r16, VCPU_GPR(r16)(r4)
+ PPC_LL r17, VCPU_GPR(r17)(r4)
+ PPC_LL r18, VCPU_GPR(r18)(r4)
+ PPC_LL r19, VCPU_GPR(r19)(r4)
+ PPC_LL r20, VCPU_GPR(r20)(r4)
+ PPC_LL r21, VCPU_GPR(r21)(r4)
+ PPC_LL r22, VCPU_GPR(r22)(r4)
+ PPC_LL r23, VCPU_GPR(r23)(r4)
+ PPC_LL r24, VCPU_GPR(r24)(r4)
+ PPC_LL r25, VCPU_GPR(r25)(r4)
+ PPC_LL r26, VCPU_GPR(r26)(r4)
+ PPC_LL r27, VCPU_GPR(r27)(r4)
+ PPC_LL r28, VCPU_GPR(r28)(r4)
+ PPC_LL r29, VCPU_GPR(r29)(r4)
+ PPC_LL r30, VCPU_GPR(r30)(r4)
+ PPC_LL r31, VCPU_GPR(r31)(r4)
+
+
+lightweight_exit:
+ PPC_STL r2, HOST_R2(r1)
+
+ mfspr r3, SPRN_PID
+ stw r3, VCPU_HOST_PID(r4)
+ lwz r3, VCPU_GUEST_PID(r4)
+ mtspr SPRN_PID, r3
+
+ PPC_LL r11, VCPU_SHARED(r4)
+ /* Disable MAS register updates via exception */
+ mfspr r3, SPRN_EPCR
+ oris r3, r3, SPRN_EPCR_DMIUH@h
+ mtspr SPRN_EPCR, r3
+ isync
+ /* Save host mas4 and mas6 and load guest MAS registers */
+ mfspr r3, SPRN_MAS4
+ stw r3, VCPU_HOST_MAS4(r4)
+ mfspr r3, SPRN_MAS6
+ stw r3, VCPU_HOST_MAS6(r4)
+ lwz r3, VCPU_SHARED_MAS0(r11)
+ lwz r5, VCPU_SHARED_MAS1(r11)
+ PPC_LD(r6, VCPU_SHARED_MAS2, r11)
+ lwz r7, VCPU_SHARED_MAS7_3+4(r11)
+ lwz r8, VCPU_SHARED_MAS4(r11)
+ mtspr SPRN_MAS0, r3
+ mtspr SPRN_MAS1, r5
+ mtspr SPRN_MAS2, r6
+ mtspr SPRN_MAS3, r7
+ mtspr SPRN_MAS4, r8
+ lwz r3, VCPU_SHARED_MAS6(r11)
+ lwz r5, VCPU_SHARED_MAS7_3+0(r11)
+ mtspr SPRN_MAS6, r3
+ mtspr SPRN_MAS7, r5
+
+ /*
+ * Host interrupt handlers may have clobbered these guest-readable
+ * SPRGs, so we need to reload them here with the guest's values.
+ */
+ lwz r3, VCPU_VRSAVE(r4)
+ PPC_LD(r5, VCPU_SHARED_SPRG4, r11)
+ mtspr SPRN_VRSAVE, r3
+ PPC_LD(r6, VCPU_SHARED_SPRG5, r11)
+ mtspr SPRN_SPRG4W, r5
+ PPC_LD(r7, VCPU_SHARED_SPRG6, r11)
+ mtspr SPRN_SPRG5W, r6
+ PPC_LD(r8, VCPU_SHARED_SPRG7, r11)
+ mtspr SPRN_SPRG6W, r7
+ mtspr SPRN_SPRG7W, r8
+
+ /* Load some guest volatiles. */
+ PPC_LL r3, VCPU_LR(r4)
+ PPC_LL r5, VCPU_XER(r4)
+ PPC_LL r6, VCPU_CTR(r4)
+ lwz r7, VCPU_CR(r4)
+ PPC_LL r8, VCPU_PC(r4)
+ PPC_LD(r9, VCPU_SHARED_MSR, r11)
+ PPC_LL r0, VCPU_GPR(r0)(r4)
+ PPC_LL r1, VCPU_GPR(r1)(r4)
+ PPC_LL r2, VCPU_GPR(r2)(r4)
+ PPC_LL r10, VCPU_GPR(r10)(r4)
+ PPC_LL r11, VCPU_GPR(r11)(r4)
+ PPC_LL r12, VCPU_GPR(r12)(r4)
+ PPC_LL r13, VCPU_GPR(r13)(r4)
+ mtlr r3
+ mtxer r5
+ mtctr r6
+ mtsrr0 r8
+ mtsrr1 r9
+
+#ifdef CONFIG_KVM_EXIT_TIMING
+ /* save enter time */
+1:
+ mfspr r6, SPRN_TBRU
+ mfspr r9, SPRN_TBRL
+ mfspr r8, SPRN_TBRU
+ cmpw r8, r6
+ stw r9, VCPU_TIMING_LAST_ENTER_TBL(r4)
+ bne 1b
+ stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
+#endif
+
+ /*
+ * Don't execute any instruction which can change CR after
+ * below instruction.
+ */
+ mtcr r7
+
+ /* Finish loading guest volatiles and jump to guest. */
+ PPC_LL r5, VCPU_GPR(r5)(r4)
+ PPC_LL r6, VCPU_GPR(r6)(r4)
+ PPC_LL r7, VCPU_GPR(r7)(r4)
+ PPC_LL r8, VCPU_GPR(r8)(r4)
+ PPC_LL r9, VCPU_GPR(r9)(r4)
+
+ PPC_LL r3, VCPU_GPR(r3)(r4)
+ PPC_LL r4, VCPU_GPR(r4)(r4)
+ rfi
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index ddcd896fa2ff..b479ed77c515 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -20,11 +20,282 @@
#include <asm/reg.h>
#include <asm/cputable.h>
#include <asm/tlbflush.h>
-#include <asm/kvm_e500.h>
#include <asm/kvm_ppc.h>
+#include "../mm/mmu_decl.h"
#include "booke.h"
-#include "e500_tlb.h"
+#include "e500.h"
+
+struct id {
+ unsigned long val;
+ struct id **pentry;
+};
+
+#define NUM_TIDS 256
+
+/*
+ * This table provide mappings from:
+ * (guestAS,guestTID,guestPR) --> ID of physical cpu
+ * guestAS [0..1]
+ * guestTID [0..255]
+ * guestPR [0..1]
+ * ID [1..255]
+ * Each vcpu keeps one vcpu_id_table.
+ */
+struct vcpu_id_table {
+ struct id id[2][NUM_TIDS][2];
+};
+
+/*
+ * This table provide reversed mappings of vcpu_id_table:
+ * ID --> address of vcpu_id_table item.
+ * Each physical core has one pcpu_id_table.
+ */
+struct pcpu_id_table {
+ struct id *entry[NUM_TIDS];
+};
+
+static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
+
+/* This variable keeps last used shadow ID on local core.
+ * The valid range of shadow ID is [1..255] */
+static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
+
+/*
+ * Allocate a free shadow id and setup a valid sid mapping in given entry.
+ * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+static inline int local_sid_setup_one(struct id *entry)
+{
+ unsigned long sid;
+ int ret = -1;
+
+ sid = ++(__get_cpu_var(pcpu_last_used_sid));
+ if (sid < NUM_TIDS) {
+ __get_cpu_var(pcpu_sids).entry[sid] = entry;
+ entry->val = sid;
+ entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
+ ret = sid;
+ }
+
+ /*
+ * If sid == NUM_TIDS, we've run out of sids. We return -1, and
+ * the caller will invalidate everything and start over.
+ *
+ * sid > NUM_TIDS indicates a race, which we disable preemption to
+ * avoid.
+ */
+ WARN_ON(sid > NUM_TIDS);
+
+ return ret;
+}
+
+/*
+ * Check if given entry contain a valid shadow id mapping.
+ * An ID mapping is considered valid only if
+ * both vcpu and pcpu know this mapping.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+static inline int local_sid_lookup(struct id *entry)
+{
+ if (entry && entry->val != 0 &&
+ __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
+ entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
+ return entry->val;
+ return -1;
+}
+
+/* Invalidate all id mappings on local core -- call with preempt disabled */
+static inline void local_sid_destroy_all(void)
+{
+ __get_cpu_var(pcpu_last_used_sid) = 0;
+ memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
+}
+
+static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
+ return vcpu_e500->idt;
+}
+
+static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ kfree(vcpu_e500->idt);
+ vcpu_e500->idt = NULL;
+}
+
+/* Map guest pid to shadow.
+ * We use PID to keep shadow of current guest non-zero PID,
+ * and use PID1 to keep shadow of guest zero PID.
+ * So that guest tlbe with TID=0 can be accessed at any time */
+static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ preempt_disable();
+ vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
+ get_cur_as(&vcpu_e500->vcpu),
+ get_cur_pid(&vcpu_e500->vcpu),
+ get_cur_pr(&vcpu_e500->vcpu), 1);
+ vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
+ get_cur_as(&vcpu_e500->vcpu), 0,
+ get_cur_pr(&vcpu_e500->vcpu), 1);
+ preempt_enable();
+}
+
+/* Invalidate all mappings on vcpu */
+static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
+
+ /* Update shadow pid when mappings are changed */
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+}
+
+/* Invalidate one ID mapping on vcpu */
+static inline void kvmppc_e500_id_table_reset_one(
+ struct kvmppc_vcpu_e500 *vcpu_e500,
+ int as, int pid, int pr)
+{
+ struct vcpu_id_table *idt = vcpu_e500->idt;
+
+ BUG_ON(as >= 2);
+ BUG_ON(pid >= NUM_TIDS);
+ BUG_ON(pr >= 2);
+
+ idt->id[as][pid][pr].val = 0;
+ idt->id[as][pid][pr].pentry = NULL;
+
+ /* Update shadow pid when mappings are changed */
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+}
+
+/*
+ * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
+ * This function first lookup if a valid mapping exists,
+ * if not, then creates a new one.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
+ unsigned int as, unsigned int gid,
+ unsigned int pr, int avoid_recursion)
+{
+ struct vcpu_id_table *idt = vcpu_e500->idt;
+ int sid;
+
+ BUG_ON(as >= 2);
+ BUG_ON(gid >= NUM_TIDS);
+ BUG_ON(pr >= 2);
+
+ sid = local_sid_lookup(&idt->id[as][gid][pr]);
+
+ while (sid <= 0) {
+ /* No mapping yet */
+ sid = local_sid_setup_one(&idt->id[as][gid][pr]);
+ if (sid <= 0) {
+ _tlbil_all();
+ local_sid_destroy_all();
+ }
+
+ /* Update shadow pid when mappings are changed */
+ if (!avoid_recursion)
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+ }
+
+ return sid;
+}
+
+unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
+ struct kvm_book3e_206_tlb_entry *gtlbe)
+{
+ return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe),
+ get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0);
+}
+
+void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+ if (vcpu->arch.pid != pid) {
+ vcpu_e500->pid[0] = vcpu->arch.pid = pid;
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+ }
+}
+
+/* gtlbe must not be mapped by more than one host tlbe */
+void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct kvm_book3e_206_tlb_entry *gtlbe)
+{
+ struct vcpu_id_table *idt = vcpu_e500->idt;
+ unsigned int pr, tid, ts, pid;
+ u32 val, eaddr;
+ unsigned long flags;
+
+ ts = get_tlb_ts(gtlbe);
+ tid = get_tlb_tid(gtlbe);
+
+ preempt_disable();
+
+ /* One guest ID may be mapped to two shadow IDs */
+ for (pr = 0; pr < 2; pr++) {
+ /*
+ * The shadow PID can have a valid mapping on at most one
+ * host CPU. In the common case, it will be valid on this
+ * CPU, in which case we do a local invalidation of the
+ * specific address.
+ *
+ * If the shadow PID is not valid on the current host CPU,
+ * we invalidate the entire shadow PID.
+ */
+ pid = local_sid_lookup(&idt->id[ts][tid][pr]);
+ if (pid <= 0) {
+ kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
+ continue;
+ }
+
+ /*
+ * The guest is invalidating a 4K entry which is in a PID
+ * that has a valid shadow mapping on this host CPU. We
+ * search host TLB to invalidate it's shadow TLB entry,
+ * similar to __tlbil_va except that we need to look in AS1.
+ */
+ val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
+ eaddr = get_tlb_eaddr(gtlbe);
+
+ local_irq_save(flags);
+
+ mtspr(SPRN_MAS6, val);
+ asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
+ val = mfspr(SPRN_MAS1);
+ if (val & MAS1_VALID) {
+ mtspr(SPRN_MAS1, val & ~MAS1_VALID);
+ asm volatile("tlbwe");
+ }
+
+ local_irq_restore(flags);
+ }
+
+ preempt_enable();
+}
+
+void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ kvmppc_e500_id_table_reset_all(vcpu_e500);
+}
+
+void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
+{
+ /* Recalc shadow pid since MSR changes */
+ kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
+}
void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
{
@@ -36,17 +307,20 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- kvmppc_e500_tlb_load(vcpu, cpu);
+ kvmppc_booke_vcpu_load(vcpu, cpu);
+
+ /* Shadow PID may be expired on local core */
+ kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
}
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
- kvmppc_e500_tlb_put(vcpu);
-
#ifdef CONFIG_SPE
if (vcpu->arch.shadow_msr & MSR_SPE)
kvmppc_vcpu_disable_spe(vcpu);
#endif
+
+ kvmppc_booke_vcpu_put(vcpu);
}
int kvmppc_core_check_processor_compat(void)
@@ -61,6 +335,23 @@ int kvmppc_core_check_processor_compat(void)
return r;
}
+static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ struct kvm_book3e_206_tlb_entry *tlbe;
+
+ /* Insert large initial mapping for guest. */
+ tlbe = get_entry(vcpu_e500, 1, 0);
+ tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
+ tlbe->mas2 = 0;
+ tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
+
+ /* 4K map for serial output. Used by kernel wrapper. */
+ tlbe = get_entry(vcpu_e500, 1, 1);
+ tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
+ tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
+ tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
+}
+
int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -76,32 +367,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
return 0;
}
-/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
-int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
- struct kvm_translation *tr)
-{
- int index;
- gva_t eaddr;
- u8 pid;
- u8 as;
-
- eaddr = tr->linear_address;
- pid = (tr->linear_address >> 32) & 0xff;
- as = (tr->linear_address >> 40) & 0x1;
-
- index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
- if (index < 0) {
- tr->valid = 0;
- return 0;
- }
-
- tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
- /* XXX what does "writeable" and "usermode" even mean? */
- tr->valid = 1;
-
- return 0;
-}
-
void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -115,19 +380,6 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
- sregs->u.e.mas0 = vcpu->arch.shared->mas0;
- sregs->u.e.mas1 = vcpu->arch.shared->mas1;
- sregs->u.e.mas2 = vcpu->arch.shared->mas2;
- sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
- sregs->u.e.mas4 = vcpu->arch.shared->mas4;
- sregs->u.e.mas6 = vcpu->arch.shared->mas6;
-
- sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG);
- sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg;
- sregs->u.e.tlbcfg[1] = vcpu_e500->tlb1cfg;
- sregs->u.e.tlbcfg[2] = 0;
- sregs->u.e.tlbcfg[3] = 0;
-
sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
@@ -135,11 +387,13 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
kvmppc_get_sregs_ivor(vcpu, sregs);
+ kvmppc_get_sregs_e500_tlb(vcpu, sregs);
}
int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ int ret;
if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
@@ -147,14 +401,9 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
}
- if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
- vcpu->arch.shared->mas0 = sregs->u.e.mas0;
- vcpu->arch.shared->mas1 = sregs->u.e.mas1;
- vcpu->arch.shared->mas2 = sregs->u.e.mas2;
- vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
- vcpu->arch.shared->mas4 = sregs->u.e.mas4;
- vcpu->arch.shared->mas6 = sregs->u.e.mas6;
- }
+ ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
+ if (ret < 0)
+ return ret;
if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
return 0;
@@ -193,9 +442,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
if (err)
goto free_vcpu;
+ if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
+ goto uninit_vcpu;
+
err = kvmppc_e500_tlb_init(vcpu_e500);
if (err)
- goto uninit_vcpu;
+ goto uninit_id;
vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (!vcpu->arch.shared)
@@ -205,6 +457,8 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
uninit_tlb:
kvmppc_e500_tlb_uninit(vcpu_e500);
+uninit_id:
+ kvmppc_e500_id_table_free(vcpu_e500);
uninit_vcpu:
kvm_vcpu_uninit(vcpu);
free_vcpu:
@@ -218,11 +472,21 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
free_page((unsigned long)vcpu->arch.shared);
- kvm_vcpu_uninit(vcpu);
kvmppc_e500_tlb_uninit(vcpu_e500);
+ kvmppc_e500_id_table_free(vcpu_e500);
+ kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
}
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+ return 0;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+}
+
static int __init kvmppc_e500_init(void)
{
int r, i;
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
new file mode 100644
index 000000000000..aa8b81428bf4
--- /dev/null
+++ b/arch/powerpc/kvm/e500.h
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Yu Liu <yu.liu@freescale.com>
+ * Scott Wood <scottwood@freescale.com>
+ * Ashish Kalra <ashish.kalra@freescale.com>
+ * Varun Sethi <varun.sethi@freescale.com>
+ *
+ * Description:
+ * This file is based on arch/powerpc/kvm/44x_tlb.h and
+ * arch/powerpc/include/asm/kvm_44x.h by Hollis Blanchard <hollisb@us.ibm.com>,
+ * Copyright IBM Corp. 2007-2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef KVM_E500_H
+#define KVM_E500_H
+
+#include <linux/kvm_host.h>
+#include <asm/mmu-book3e.h>
+#include <asm/tlb.h>
+
+#define E500_PID_NUM 3
+#define E500_TLB_NUM 2
+
+#define E500_TLB_VALID 1
+#define E500_TLB_DIRTY 2
+#define E500_TLB_BITMAP 4
+
+struct tlbe_ref {
+ pfn_t pfn;
+ unsigned int flags; /* E500_TLB_* */
+};
+
+struct tlbe_priv {
+ struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
+};
+
+#ifdef CONFIG_KVM_E500V2
+struct vcpu_id_table;
+#endif
+
+struct kvmppc_e500_tlb_params {
+ int entries, ways, sets;
+};
+
+struct kvmppc_vcpu_e500 {
+ struct kvm_vcpu vcpu;
+
+ /* Unmodified copy of the guest's TLB -- shared with host userspace. */
+ struct kvm_book3e_206_tlb_entry *gtlb_arch;
+
+ /* Starting entry number in gtlb_arch[] */
+ int gtlb_offset[E500_TLB_NUM];
+
+ /* KVM internal information associated with each guest TLB entry */
+ struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
+
+ struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
+
+ unsigned int gtlb_nv[E500_TLB_NUM];
+
+ /*
+ * information associated with each host TLB entry --
+ * TLB1 only for now. If/when guest TLB1 entries can be
+ * mapped with host TLB0, this will be used for that too.
+ *
+ * We don't want to use this for guest TLB0 because then we'd
+ * have the overhead of doing the translation again even if
+ * the entry is still in the guest TLB (e.g. we swapped out
+ * and back, and our host TLB entries got evicted).
+ */
+ struct tlbe_ref *tlb_refs[E500_TLB_NUM];
+ unsigned int host_tlb1_nv;
+
+ u32 svr;
+ u32 l1csr0;
+ u32 l1csr1;
+ u32 hid0;
+ u32 hid1;
+ u64 mcar;
+
+ struct page **shared_tlb_pages;
+ int num_shared_tlb_pages;
+
+ u64 *g2h_tlb1_map;
+ unsigned int *h2g_tlb1_rmap;
+
+ /* Minimum and maximum address mapped my TLB1 */
+ unsigned long tlb1_min_eaddr;
+ unsigned long tlb1_max_eaddr;
+
+#ifdef CONFIG_KVM_E500V2
+ u32 pid[E500_PID_NUM];
+
+ /* vcpu id table */
+ struct vcpu_id_table *idt;
+#endif
+};
+
+static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
+{
+ return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
+}
+
+
+/* This geometry is the legacy default -- can be overridden by userspace */
+#define KVM_E500_TLB0_WAY_SIZE 128
+#define KVM_E500_TLB0_WAY_NUM 2
+
+#define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
+#define KVM_E500_TLB1_SIZE 16
+
+#define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF))
+#define tlbsel_of(index) ((index) >> 16)
+#define esel_of(index) ((index) & 0xFFFF)
+
+#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
+#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
+#define MAS2_ATTRIB_MASK \
+ (MAS2_X0 | MAS2_X1)
+#define MAS3_ATTRIB_MASK \
+ (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
+ | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
+
+int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500,
+ ulong value);
+int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
+int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
+int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb);
+int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb);
+int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb);
+int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500);
+void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
+
+void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+
+
+#ifdef CONFIG_KVM_E500V2
+unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
+ unsigned int as, unsigned int gid,
+ unsigned int pr, int avoid_recursion);
+#endif
+
+/* TLB helper functions */
+static inline unsigned int
+get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return (tlbe->mas1 >> 7) & 0x1f;
+}
+
+static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return tlbe->mas2 & 0xfffff000;
+}
+
+static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ unsigned int pgsize = get_tlb_size(tlbe);
+ return 1ULL << 10 << pgsize;
+}
+
+static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ u64 bytes = get_tlb_bytes(tlbe);
+ return get_tlb_eaddr(tlbe) + bytes - 1;
+}
+
+static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return tlbe->mas7_3 & ~0xfffULL;
+}
+
+static inline unsigned int
+get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return (tlbe->mas1 >> 16) & 0xff;
+}
+
+static inline unsigned int
+get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return (tlbe->mas1 >> 12) & 0x1;
+}
+
+static inline unsigned int
+get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return (tlbe->mas1 >> 31) & 0x1;
+}
+
+static inline unsigned int
+get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return (tlbe->mas1 >> 30) & 0x1;
+}
+
+static inline unsigned int
+get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return (tlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
+}
+
+static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.pid & 0xff;
+}
+
+static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
+{
+ return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
+}
+
+static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
+{
+ return !!(vcpu->arch.shared->msr & MSR_PR);
+}
+
+static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.shared->mas6 >> 16) & 0xff;
+}
+
+static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.shared->mas6 & 0x1;
+}
+
+static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
+{
+ /*
+ * Manual says that tlbsel has 2 bits wide.
+ * Since we only have two TLBs, only lower bit is used.
+ */
+ return (vcpu->arch.shared->mas0 >> 28) & 0x1;
+}
+
+static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.shared->mas0 & 0xfff;
+}
+
+static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
+}
+
+static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
+ const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ gpa_t gpa;
+
+ if (!get_tlb_v(tlbe))
+ return 0;
+
+#ifndef CONFIG_KVM_BOOKE_HV
+ /* Does it match current guest AS? */
+ /* XXX what about IS != DS? */
+ if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
+ return 0;
+#endif
+
+ gpa = get_tlb_raddr(tlbe);
+ if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
+ /* Mapping is not for RAM. */
+ return 0;
+
+ return 1;
+}
+
+static inline struct kvm_book3e_206_tlb_entry *get_entry(
+ struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
+{
+ int offset = vcpu_e500->gtlb_offset[tlbsel];
+ return &vcpu_e500->gtlb_arch[offset + entry];
+}
+
+void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct kvm_book3e_206_tlb_entry *gtlbe);
+void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
+
+#ifdef CONFIG_KVM_BOOKE_HV
+#define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe)
+#define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu)
+#define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS)
+#else
+unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
+ struct kvm_book3e_206_tlb_entry *gtlbe);
+
+static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf;
+
+ return vcpu_e500->pid[tidseld];
+}
+
+/* Force TS=1 for all guest mappings. */
+#define get_tlb_sts(gtlbe) (MAS1_TS)
+#endif /* !BOOKE_HV */
+
+#endif /* KVM_E500_H */
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 6d0b2bd54fb0..8b99e076dc81 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -14,27 +14,96 @@
#include <asm/kvm_ppc.h>
#include <asm/disassemble.h>
-#include <asm/kvm_e500.h>
+#include <asm/dbell.h>
#include "booke.h"
-#include "e500_tlb.h"
+#include "e500.h"
+#define XOP_MSGSND 206
+#define XOP_MSGCLR 238
#define XOP_TLBIVAX 786
#define XOP_TLBSX 914
#define XOP_TLBRE 946
#define XOP_TLBWE 978
+#define XOP_TLBILX 18
+
+#ifdef CONFIG_KVM_E500MC
+static int dbell2prio(ulong param)
+{
+ int msg = param & PPC_DBELL_TYPE_MASK;
+ int prio = -1;
+
+ switch (msg) {
+ case PPC_DBELL_TYPE(PPC_DBELL):
+ prio = BOOKE_IRQPRIO_DBELL;
+ break;
+ case PPC_DBELL_TYPE(PPC_DBELL_CRIT):
+ prio = BOOKE_IRQPRIO_DBELL_CRIT;
+ break;
+ default:
+ break;
+ }
+
+ return prio;
+}
+
+static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
+{
+ ulong param = vcpu->arch.gpr[rb];
+ int prio = dbell2prio(param);
+
+ if (prio < 0)
+ return EMULATE_FAIL;
+
+ clear_bit(prio, &vcpu->arch.pending_exceptions);
+ return EMULATE_DONE;
+}
+
+static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
+{
+ ulong param = vcpu->arch.gpr[rb];
+ int prio = dbell2prio(rb);
+ int pir = param & PPC_DBELL_PIR_MASK;
+ int i;
+ struct kvm_vcpu *cvcpu;
+
+ if (prio < 0)
+ return EMULATE_FAIL;
+
+ kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) {
+ int cpir = cvcpu->arch.shared->pir;
+ if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) {
+ set_bit(prio, &cvcpu->arch.pending_exceptions);
+ kvm_vcpu_kick(cvcpu);
+ }
+ }
+
+ return EMULATE_DONE;
+}
+#endif
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
- int ra;
- int rb;
+ int ra = get_ra(inst);
+ int rb = get_rb(inst);
+ int rt = get_rt(inst);
switch (get_op(inst)) {
case 31:
switch (get_xop(inst)) {
+#ifdef CONFIG_KVM_E500MC
+ case XOP_MSGSND:
+ emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
+ break;
+
+ case XOP_MSGCLR:
+ emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
+ break;
+#endif
+
case XOP_TLBRE:
emulated = kvmppc_e500_emul_tlbre(vcpu);
break;
@@ -44,13 +113,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case XOP_TLBSX:
- rb = get_rb(inst);
emulated = kvmppc_e500_emul_tlbsx(vcpu,rb);
break;
+ case XOP_TLBILX:
+ emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb);
+ break;
+
case XOP_TLBIVAX:
- ra = get_ra(inst);
- rb = get_rb(inst);
emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb);
break;
@@ -70,52 +140,63 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
- ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
+#ifndef CONFIG_KVM_BOOKE_HV
case SPRN_PID:
kvmppc_set_pid(vcpu, spr_val);
break;
case SPRN_PID1:
if (spr_val != 0)
return EMULATE_FAIL;
- vcpu_e500->pid[1] = spr_val; break;
+ vcpu_e500->pid[1] = spr_val;
+ break;
case SPRN_PID2:
if (spr_val != 0)
return EMULATE_FAIL;
- vcpu_e500->pid[2] = spr_val; break;
+ vcpu_e500->pid[2] = spr_val;
+ break;
case SPRN_MAS0:
- vcpu->arch.shared->mas0 = spr_val; break;
+ vcpu->arch.shared->mas0 = spr_val;
+ break;
case SPRN_MAS1:
- vcpu->arch.shared->mas1 = spr_val; break;
+ vcpu->arch.shared->mas1 = spr_val;
+ break;
case SPRN_MAS2:
- vcpu->arch.shared->mas2 = spr_val; break;
+ vcpu->arch.shared->mas2 = spr_val;
+ break;
case SPRN_MAS3:
vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
vcpu->arch.shared->mas7_3 |= spr_val;
break;
case SPRN_MAS4:
- vcpu->arch.shared->mas4 = spr_val; break;
+ vcpu->arch.shared->mas4 = spr_val;
+ break;
case SPRN_MAS6:
- vcpu->arch.shared->mas6 = spr_val; break;
+ vcpu->arch.shared->mas6 = spr_val;
+ break;
case SPRN_MAS7:
vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
break;
+#endif
case SPRN_L1CSR0:
vcpu_e500->l1csr0 = spr_val;
vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
break;
case SPRN_L1CSR1:
- vcpu_e500->l1csr1 = spr_val; break;
+ vcpu_e500->l1csr1 = spr_val;
+ break;
case SPRN_HID0:
- vcpu_e500->hid0 = spr_val; break;
+ vcpu_e500->hid0 = spr_val;
+ break;
case SPRN_HID1:
- vcpu_e500->hid1 = spr_val; break;
+ vcpu_e500->hid1 = spr_val;
+ break;
case SPRN_MMUCSR0:
emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
@@ -135,81 +216,112 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
case SPRN_IVOR35:
vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
break;
-
+#ifdef CONFIG_KVM_BOOKE_HV
+ case SPRN_IVOR36:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val;
+ break;
+ case SPRN_IVOR37:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val;
+ break;
+#endif
default:
- emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
+ emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
}
return emulated;
}
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
- unsigned long val;
switch (sprn) {
+#ifndef CONFIG_KVM_BOOKE_HV
case SPRN_PID:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break;
+ *spr_val = vcpu_e500->pid[0];
+ break;
case SPRN_PID1:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break;
+ *spr_val = vcpu_e500->pid[1];
+ break;
case SPRN_PID2:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
+ *spr_val = vcpu_e500->pid[2];
+ break;
case SPRN_MAS0:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break;
+ *spr_val = vcpu->arch.shared->mas0;
+ break;
case SPRN_MAS1:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break;
+ *spr_val = vcpu->arch.shared->mas1;
+ break;
case SPRN_MAS2:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break;
+ *spr_val = vcpu->arch.shared->mas2;
+ break;
case SPRN_MAS3:
- val = (u32)vcpu->arch.shared->mas7_3;
- kvmppc_set_gpr(vcpu, rt, val);
+ *spr_val = (u32)vcpu->arch.shared->mas7_3;
break;
case SPRN_MAS4:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break;
+ *spr_val = vcpu->arch.shared->mas4;
+ break;
case SPRN_MAS6:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break;
+ *spr_val = vcpu->arch.shared->mas6;
+ break;
case SPRN_MAS7:
- val = vcpu->arch.shared->mas7_3 >> 32;
- kvmppc_set_gpr(vcpu, rt, val);
+ *spr_val = vcpu->arch.shared->mas7_3 >> 32;
break;
+#endif
case SPRN_TLB0CFG:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break;
+ *spr_val = vcpu->arch.tlbcfg[0];
+ break;
case SPRN_TLB1CFG:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break;
+ *spr_val = vcpu->arch.tlbcfg[1];
+ break;
case SPRN_L1CSR0:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break;
+ *spr_val = vcpu_e500->l1csr0;
+ break;
case SPRN_L1CSR1:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break;
+ *spr_val = vcpu_e500->l1csr1;
+ break;
case SPRN_HID0:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break;
+ *spr_val = vcpu_e500->hid0;
+ break;
case SPRN_HID1:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break;
+ *spr_val = vcpu_e500->hid1;
+ break;
case SPRN_SVR:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->svr); break;
+ *spr_val = vcpu_e500->svr;
+ break;
case SPRN_MMUCSR0:
- kvmppc_set_gpr(vcpu, rt, 0); break;
+ *spr_val = 0;
+ break;
case SPRN_MMUCFG:
- kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break;
+ *spr_val = vcpu->arch.mmucfg;
+ break;
/* extra exceptions */
case SPRN_IVOR32:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
break;
case SPRN_IVOR33:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
break;
case SPRN_IVOR34:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
break;
case SPRN_IVOR35:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
+ break;
+#ifdef CONFIG_KVM_BOOKE_HV
+ case SPRN_IVOR36:
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
+ break;
+ case SPRN_IVOR37:
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
break;
+#endif
default:
- emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
+ emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
}
return emulated;
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 6e53e4164de1..c510fc961302 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -2,6 +2,9 @@
* Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, yu.liu@freescale.com
+ * Scott Wood, scottwood@freescale.com
+ * Ashish Kalra, ashish.kalra@freescale.com
+ * Varun Sethi, varun.sethi@freescale.com
*
* Description:
* This file is based on arch/powerpc/kvm/44x_tlb.c,
@@ -26,210 +29,15 @@
#include <linux/vmalloc.h>
#include <linux/hugetlb.h>
#include <asm/kvm_ppc.h>
-#include <asm/kvm_e500.h>
-#include "../mm/mmu_decl.h"
-#include "e500_tlb.h"
+#include "e500.h"
#include "trace.h"
#include "timing.h"
#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
-struct id {
- unsigned long val;
- struct id **pentry;
-};
-
-#define NUM_TIDS 256
-
-/*
- * This table provide mappings from:
- * (guestAS,guestTID,guestPR) --> ID of physical cpu
- * guestAS [0..1]
- * guestTID [0..255]
- * guestPR [0..1]
- * ID [1..255]
- * Each vcpu keeps one vcpu_id_table.
- */
-struct vcpu_id_table {
- struct id id[2][NUM_TIDS][2];
-};
-
-/*
- * This table provide reversed mappings of vcpu_id_table:
- * ID --> address of vcpu_id_table item.
- * Each physical core has one pcpu_id_table.
- */
-struct pcpu_id_table {
- struct id *entry[NUM_TIDS];
-};
-
-static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
-
-/* This variable keeps last used shadow ID on local core.
- * The valid range of shadow ID is [1..255] */
-static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
-
static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
-static struct kvm_book3e_206_tlb_entry *get_entry(
- struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
-{
- int offset = vcpu_e500->gtlb_offset[tlbsel];
- return &vcpu_e500->gtlb_arch[offset + entry];
-}
-
-/*
- * Allocate a free shadow id and setup a valid sid mapping in given entry.
- * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
- *
- * The caller must have preemption disabled, and keep it that way until
- * it has finished with the returned shadow id (either written into the
- * TLB or arch.shadow_pid, or discarded).
- */
-static inline int local_sid_setup_one(struct id *entry)
-{
- unsigned long sid;
- int ret = -1;
-
- sid = ++(__get_cpu_var(pcpu_last_used_sid));
- if (sid < NUM_TIDS) {
- __get_cpu_var(pcpu_sids).entry[sid] = entry;
- entry->val = sid;
- entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
- ret = sid;
- }
-
- /*
- * If sid == NUM_TIDS, we've run out of sids. We return -1, and
- * the caller will invalidate everything and start over.
- *
- * sid > NUM_TIDS indicates a race, which we disable preemption to
- * avoid.
- */
- WARN_ON(sid > NUM_TIDS);
-
- return ret;
-}
-
-/*
- * Check if given entry contain a valid shadow id mapping.
- * An ID mapping is considered valid only if
- * both vcpu and pcpu know this mapping.
- *
- * The caller must have preemption disabled, and keep it that way until
- * it has finished with the returned shadow id (either written into the
- * TLB or arch.shadow_pid, or discarded).
- */
-static inline int local_sid_lookup(struct id *entry)
-{
- if (entry && entry->val != 0 &&
- __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
- entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
- return entry->val;
- return -1;
-}
-
-/* Invalidate all id mappings on local core -- call with preempt disabled */
-static inline void local_sid_destroy_all(void)
-{
- __get_cpu_var(pcpu_last_used_sid) = 0;
- memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
-}
-
-static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
- return vcpu_e500->idt;
-}
-
-static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- kfree(vcpu_e500->idt);
-}
-
-/* Invalidate all mappings on vcpu */
-static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
-
- /* Update shadow pid when mappings are changed */
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
-}
-
-/* Invalidate one ID mapping on vcpu */
-static inline void kvmppc_e500_id_table_reset_one(
- struct kvmppc_vcpu_e500 *vcpu_e500,
- int as, int pid, int pr)
-{
- struct vcpu_id_table *idt = vcpu_e500->idt;
-
- BUG_ON(as >= 2);
- BUG_ON(pid >= NUM_TIDS);
- BUG_ON(pr >= 2);
-
- idt->id[as][pid][pr].val = 0;
- idt->id[as][pid][pr].pentry = NULL;
-
- /* Update shadow pid when mappings are changed */
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
-}
-
-/*
- * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
- * This function first lookup if a valid mapping exists,
- * if not, then creates a new one.
- *
- * The caller must have preemption disabled, and keep it that way until
- * it has finished with the returned shadow id (either written into the
- * TLB or arch.shadow_pid, or discarded).
- */
-static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
- unsigned int as, unsigned int gid,
- unsigned int pr, int avoid_recursion)
-{
- struct vcpu_id_table *idt = vcpu_e500->idt;
- int sid;
-
- BUG_ON(as >= 2);
- BUG_ON(gid >= NUM_TIDS);
- BUG_ON(pr >= 2);
-
- sid = local_sid_lookup(&idt->id[as][gid][pr]);
-
- while (sid <= 0) {
- /* No mapping yet */
- sid = local_sid_setup_one(&idt->id[as][gid][pr]);
- if (sid <= 0) {
- _tlbil_all();
- local_sid_destroy_all();
- }
-
- /* Update shadow pid when mappings are changed */
- if (!avoid_recursion)
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
- }
-
- return sid;
-}
-
-/* Map guest pid to shadow.
- * We use PID to keep shadow of current guest non-zero PID,
- * and use PID1 to keep shadow of guest zero PID.
- * So that guest tlbe with TID=0 can be accessed at any time */
-void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- preempt_disable();
- vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
- get_cur_as(&vcpu_e500->vcpu),
- get_cur_pid(&vcpu_e500->vcpu),
- get_cur_pr(&vcpu_e500->vcpu), 1);
- vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
- get_cur_as(&vcpu_e500->vcpu), 0,
- get_cur_pr(&vcpu_e500->vcpu), 1);
- preempt_enable();
-}
-
static inline unsigned int gtlb0_get_next_victim(
struct kvmppc_vcpu_e500 *vcpu_e500)
{
@@ -258,6 +66,7 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
/* Mask off reserved bits. */
mas3 &= MAS3_ATTRIB_MASK;
+#ifndef CONFIG_KVM_BOOKE_HV
if (!usermode) {
/* Guest is in supervisor mode,
* so we need to translate guest
@@ -265,8 +74,9 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
mas3 &= ~E500_TLB_USER_PERM_MASK;
mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
}
-
- return mas3 | E500_TLB_SUPER_PERM_MASK;
+ mas3 |= E500_TLB_SUPER_PERM_MASK;
+#endif
+ return mas3;
}
static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
@@ -292,7 +102,16 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_MAS8, stlbe->mas8);
+#endif
asm volatile("isync; tlbwe" : : : "memory");
+
+#ifdef CONFIG_KVM_BOOKE_HV
+ /* Must clear mas8 for other host tlbwe's */
+ mtspr(SPRN_MAS8, 0);
+ isync();
+#endif
local_irq_restore(flags);
trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
@@ -337,6 +156,7 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
}
}
+#ifdef CONFIG_KVM_E500V2
void kvmppc_map_magic(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -361,75 +181,41 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
preempt_enable();
}
-
-void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
-{
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-
- /* Shadow PID may be expired on local core */
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
-}
-
-void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
-{
-}
+#endif
static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500,
int tlbsel, int esel)
{
struct kvm_book3e_206_tlb_entry *gtlbe =
get_entry(vcpu_e500, tlbsel, esel);
- struct vcpu_id_table *idt = vcpu_e500->idt;
- unsigned int pr, tid, ts, pid;
- u32 val, eaddr;
- unsigned long flags;
-
- ts = get_tlb_ts(gtlbe);
- tid = get_tlb_tid(gtlbe);
-
- preempt_disable();
-
- /* One guest ID may be mapped to two shadow IDs */
- for (pr = 0; pr < 2; pr++) {
- /*
- * The shadow PID can have a valid mapping on at most one
- * host CPU. In the common case, it will be valid on this
- * CPU, in which case (for TLB0) we do a local invalidation
- * of the specific address.
- *
- * If the shadow PID is not valid on the current host CPU, or
- * if we're invalidating a TLB1 entry, we invalidate the
- * entire shadow PID.
- */
- if (tlbsel == 1 ||
- (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
- kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
- continue;
- }
- /*
- * The guest is invalidating a TLB0 entry which is in a PID
- * that has a valid shadow mapping on this host CPU. We
- * search host TLB0 to invalidate it's shadow TLB entry,
- * similar to __tlbil_va except that we need to look in AS1.
- */
- val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
- eaddr = get_tlb_eaddr(gtlbe);
+ if (tlbsel == 1 &&
+ vcpu_e500->gtlb_priv[1][esel].ref.flags & E500_TLB_BITMAP) {
+ u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
+ int hw_tlb_indx;
+ unsigned long flags;
local_irq_save(flags);
-
- mtspr(SPRN_MAS6, val);
- asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
- val = mfspr(SPRN_MAS1);
- if (val & MAS1_VALID) {
- mtspr(SPRN_MAS1, val & ~MAS1_VALID);
+ while (tmp) {
+ hw_tlb_indx = __ilog2_u64(tmp & -tmp);
+ mtspr(SPRN_MAS0,
+ MAS0_TLBSEL(1) |
+ MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
+ mtspr(SPRN_MAS1, 0);
asm volatile("tlbwe");
+ vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
+ tmp &= tmp - 1;
}
-
+ mb();
+ vcpu_e500->g2h_tlb1_map[esel] = 0;
+ vcpu_e500->gtlb_priv[1][esel].ref.flags &= ~E500_TLB_BITMAP;
local_irq_restore(flags);
+
+ return;
}
- preempt_enable();
+ /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
+ kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
}
static int tlb0_set_base(gva_t addr, int sets, int ways)
@@ -475,6 +261,9 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
set_base = gtlb0_set_base(vcpu_e500, eaddr);
size = vcpu_e500->gtlb_params[0].ways;
} else {
+ if (eaddr < vcpu_e500->tlb1_min_eaddr ||
+ eaddr > vcpu_e500->tlb1_max_eaddr)
+ return -1;
set_base = 0;
}
@@ -530,6 +319,16 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
}
}
+static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ if (vcpu_e500->g2h_tlb1_map)
+ memset(vcpu_e500->g2h_tlb1_map,
+ sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0);
+ if (vcpu_e500->h2g_tlb1_rmap)
+ memset(vcpu_e500->h2g_tlb1_rmap,
+ sizeof(unsigned int) * host_tlb_params[1].entries, 0);
+}
+
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
{
int tlbsel = 0;
@@ -547,7 +346,7 @@ static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
int stlbsel = 1;
int i;
- kvmppc_e500_id_table_reset_all(vcpu_e500);
+ kvmppc_e500_tlbil_all(vcpu_e500);
for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
struct tlbe_ref *ref =
@@ -562,19 +361,18 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
unsigned int eaddr, int as)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- unsigned int victim, pidsel, tsized;
+ unsigned int victim, tsized;
int tlbsel;
/* since we only have two TLBs, only lower bit is used. */
tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
- pidsel = (vcpu->arch.shared->mas4 >> 16) & 0xf;
tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
- | MAS1_TID(vcpu_e500->pid[pidsel])
+ | MAS1_TID(get_tlbmiss_tid(vcpu))
| MAS1_TSIZE(tsized);
vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
| (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
@@ -586,23 +384,26 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
/* TID must be supplied by the caller */
static inline void kvmppc_e500_setup_stlbe(
- struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct kvm_vcpu *vcpu,
struct kvm_book3e_206_tlb_entry *gtlbe,
int tsize, struct tlbe_ref *ref, u64 gvaddr,
struct kvm_book3e_206_tlb_entry *stlbe)
{
pfn_t pfn = ref->pfn;
+ u32 pr = vcpu->arch.shared->msr & MSR_PR;
BUG_ON(!(ref->flags & E500_TLB_VALID));
- /* Force TS=1 IPROT=0 for all guest mappings. */
- stlbe->mas1 = MAS1_TSIZE(tsize) | MAS1_TS | MAS1_VALID;
- stlbe->mas2 = (gvaddr & MAS2_EPN)
- | e500_shadow_mas2_attrib(gtlbe->mas2,
- vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
- stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT)
- | e500_shadow_mas3_attrib(gtlbe->mas7_3,
- vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
+ /* Force IPROT=0 for all guest mappings. */
+ stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
+ stlbe->mas2 = (gvaddr & MAS2_EPN) |
+ e500_shadow_mas2_attrib(gtlbe->mas2, pr);
+ stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
+ e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
+
+#ifdef CONFIG_KVM_BOOKE_HV
+ stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
+#endif
}
static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
@@ -736,7 +537,8 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
kvmppc_e500_ref_release(ref);
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
- kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, ref, gvaddr, stlbe);
+ kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
+ ref, gvaddr, stlbe);
}
/* XXX only map the one-one case, for now use TLB0 */
@@ -760,7 +562,7 @@ static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
/* XXX for both one-one and one-to-many , for now use TLB1 */
static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
- struct kvm_book3e_206_tlb_entry *stlbe)
+ struct kvm_book3e_206_tlb_entry *stlbe, int esel)
{
struct tlbe_ref *ref;
unsigned int victim;
@@ -773,15 +575,74 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
ref = &vcpu_e500->tlb_refs[1][victim];
kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref);
+ vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << victim;
+ vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
+ if (vcpu_e500->h2g_tlb1_rmap[victim]) {
+ unsigned int idx = vcpu_e500->h2g_tlb1_rmap[victim];
+ vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << victim);
+ }
+ vcpu_e500->h2g_tlb1_rmap[victim] = esel;
+
return victim;
}
-void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
+static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ int size = vcpu_e500->gtlb_params[1].entries;
+ unsigned int offset;
+ gva_t eaddr;
+ int i;
+
+ vcpu_e500->tlb1_min_eaddr = ~0UL;
+ vcpu_e500->tlb1_max_eaddr = 0;
+ offset = vcpu_e500->gtlb_offset[1];
+
+ for (i = 0; i < size; i++) {
+ struct kvm_book3e_206_tlb_entry *tlbe =
+ &vcpu_e500->gtlb_arch[offset + i];
+
+ if (!get_tlb_v(tlbe))
+ continue;
+
+ eaddr = get_tlb_eaddr(tlbe);
+ vcpu_e500->tlb1_min_eaddr =
+ min(vcpu_e500->tlb1_min_eaddr, eaddr);
+
+ eaddr = get_tlb_end(tlbe);
+ vcpu_e500->tlb1_max_eaddr =
+ max(vcpu_e500->tlb1_max_eaddr, eaddr);
+ }
+}
+
+static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct kvm_book3e_206_tlb_entry *gtlbe)
{
+ unsigned long start, end, size;
+
+ size = get_tlb_bytes(gtlbe);
+ start = get_tlb_eaddr(gtlbe) & ~(size - 1);
+ end = start + size - 1;
+
+ return vcpu_e500->tlb1_min_eaddr == start ||
+ vcpu_e500->tlb1_max_eaddr == end;
+}
+
+/* This function is supposed to be called for a adding a new valid tlb entry */
+static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu,
+ struct kvm_book3e_206_tlb_entry *gtlbe)
+{
+ unsigned long start, end, size;
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- /* Recalc shadow pid since MSR changes */
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+ if (!get_tlb_v(gtlbe))
+ return;
+
+ size = get_tlb_bytes(gtlbe);
+ start = get_tlb_eaddr(gtlbe) & ~(size - 1);
+ end = start + size - 1;
+
+ vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start);
+ vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end);
}
static inline int kvmppc_e500_gtlbe_invalidate(
@@ -794,6 +655,9 @@ static inline int kvmppc_e500_gtlbe_invalidate(
if (unlikely(get_tlb_iprot(gtlbe)))
return -1;
+ if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
+ kvmppc_recalc_tlb1map_range(vcpu_e500);
+
gtlbe->mas1 = 0;
return 0;
@@ -811,7 +675,7 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
/* Invalidate all vcpu id mappings */
- kvmppc_e500_id_table_reset_all(vcpu_e500);
+ kvmppc_e500_tlbil_all(vcpu_e500);
return EMULATE_DONE;
}
@@ -844,7 +708,59 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
}
/* Invalidate all vcpu id mappings */
- kvmppc_e500_id_table_reset_all(vcpu_e500);
+ kvmppc_e500_tlbil_all(vcpu_e500);
+
+ return EMULATE_DONE;
+}
+
+static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
+ int pid, int rt)
+{
+ struct kvm_book3e_206_tlb_entry *tlbe;
+ int tid, esel;
+
+ /* invalidate all entries */
+ for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) {
+ tlbe = get_entry(vcpu_e500, tlbsel, esel);
+ tid = get_tlb_tid(tlbe);
+ if (rt == 0 || tid == pid) {
+ inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
+ kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
+ }
+ }
+}
+
+static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
+ int ra, int rb)
+{
+ int tlbsel, esel;
+ gva_t ea;
+
+ ea = kvmppc_get_gpr(&vcpu_e500->vcpu, rb);
+ if (ra)
+ ea += kvmppc_get_gpr(&vcpu_e500->vcpu, ra);
+
+ for (tlbsel = 0; tlbsel < 2; tlbsel++) {
+ esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1);
+ if (esel >= 0) {
+ inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
+ kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
+ break;
+ }
+ }
+}
+
+int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ int pid = get_cur_spid(vcpu);
+
+ if (rt == 0 || rt == 1) {
+ tlbilx_all(vcpu_e500, 0, pid, rt);
+ tlbilx_all(vcpu_e500, 1, pid, rt);
+ } else if (rt == 3) {
+ tlbilx_one(vcpu_e500, pid, ra, rb);
+ }
return EMULATE_DONE;
}
@@ -929,9 +845,7 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
int stid;
preempt_disable();
- stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
- get_tlb_tid(gtlbe),
- get_cur_pr(&vcpu_e500->vcpu), 0);
+ stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
stlbe->mas1 |= MAS1_TID(stid);
write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
@@ -941,16 +855,21 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- struct kvm_book3e_206_tlb_entry *gtlbe;
- int tlbsel, esel;
+ struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
+ int tlbsel, esel, stlbsel, sesel;
+ int recal = 0;
tlbsel = get_tlb_tlbsel(vcpu);
esel = get_tlb_esel(vcpu, tlbsel);
gtlbe = get_entry(vcpu_e500, tlbsel, esel);
- if (get_tlb_v(gtlbe))
+ if (get_tlb_v(gtlbe)) {
inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
+ if ((tlbsel == 1) &&
+ kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
+ recal = 1;
+ }
gtlbe->mas1 = vcpu->arch.shared->mas1;
gtlbe->mas2 = vcpu->arch.shared->mas2;
@@ -959,10 +878,20 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
gtlbe->mas2, gtlbe->mas7_3);
+ if (tlbsel == 1) {
+ /*
+ * If a valid tlb1 entry is overwritten then recalculate the
+ * min/max TLB1 map address range otherwise no need to look
+ * in tlb1 array.
+ */
+ if (recal)
+ kvmppc_recalc_tlb1map_range(vcpu_e500);
+ else
+ kvmppc_set_tlb1map_range(vcpu, gtlbe);
+ }
+
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe_is_host_safe(vcpu, gtlbe)) {
- struct kvm_book3e_206_tlb_entry stlbe;
- int stlbsel, sesel;
u64 eaddr;
u64 raddr;
@@ -989,7 +918,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
* are mapped on the fly. */
stlbsel = 1;
sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
- raddr >> PAGE_SHIFT, gtlbe, &stlbe);
+ raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel);
break;
default:
@@ -1003,6 +932,48 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
+ gva_t eaddr, unsigned int pid, int as)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ int esel, tlbsel;
+
+ for (tlbsel = 0; tlbsel < 2; tlbsel++) {
+ esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
+ if (esel >= 0)
+ return index_of(tlbsel, esel);
+ }
+
+ return -1;
+}
+
+/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
+int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
+{
+ int index;
+ gva_t eaddr;
+ u8 pid;
+ u8 as;
+
+ eaddr = tr->linear_address;
+ pid = (tr->linear_address >> 32) & 0xff;
+ as = (tr->linear_address >> 40) & 0x1;
+
+ index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
+ if (index < 0) {
+ tr->valid = 0;
+ return 0;
+ }
+
+ tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
+ /* XXX what does "writeable" and "usermode" even mean? */
+ tr->valid = 1;
+
+ return 0;
+}
+
+
int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
{
unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
@@ -1066,7 +1037,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
sesel = 0; /* unused */
priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
- kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
+ kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
&priv->ref, eaddr, &stlbe);
break;
@@ -1075,7 +1046,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
stlbsel = 1;
sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
- gtlbe, &stlbe);
+ gtlbe, &stlbe, esel);
break;
}
@@ -1087,52 +1058,13 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
}
-int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
- gva_t eaddr, unsigned int pid, int as)
-{
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- int esel, tlbsel;
-
- for (tlbsel = 0; tlbsel < 2; tlbsel++) {
- esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
- if (esel >= 0)
- return index_of(tlbsel, esel);
- }
-
- return -1;
-}
-
-void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
-{
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-
- if (vcpu->arch.pid != pid) {
- vcpu_e500->pid[0] = vcpu->arch.pid = pid;
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
- }
-}
-
-void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- struct kvm_book3e_206_tlb_entry *tlbe;
-
- /* Insert large initial mapping for guest. */
- tlbe = get_entry(vcpu_e500, 1, 0);
- tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
- tlbe->mas2 = 0;
- tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
-
- /* 4K map for serial output. Used by kernel wrapper. */
- tlbe = get_entry(vcpu_e500, 1, 1);
- tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
- tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
- tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
-}
-
static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
{
int i;
+ clear_tlb1_bitmap(vcpu_e500);
+ kfree(vcpu_e500->g2h_tlb1_map);
+
clear_tlb_refs(vcpu_e500);
kfree(vcpu_e500->gtlb_priv[0]);
kfree(vcpu_e500->gtlb_priv[1]);
@@ -1155,6 +1087,36 @@ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
vcpu_e500->gtlb_arch = NULL;
}
+void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ sregs->u.e.mas0 = vcpu->arch.shared->mas0;
+ sregs->u.e.mas1 = vcpu->arch.shared->mas1;
+ sregs->u.e.mas2 = vcpu->arch.shared->mas2;
+ sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
+ sregs->u.e.mas4 = vcpu->arch.shared->mas4;
+ sregs->u.e.mas6 = vcpu->arch.shared->mas6;
+
+ sregs->u.e.mmucfg = vcpu->arch.mmucfg;
+ sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0];
+ sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1];
+ sregs->u.e.tlbcfg[2] = 0;
+ sregs->u.e.tlbcfg[3] = 0;
+}
+
+int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
+ vcpu->arch.shared->mas0 = sregs->u.e.mas0;
+ vcpu->arch.shared->mas1 = sregs->u.e.mas1;
+ vcpu->arch.shared->mas2 = sregs->u.e.mas2;
+ vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
+ vcpu->arch.shared->mas4 = sregs->u.e.mas4;
+ vcpu->arch.shared->mas6 = sregs->u.e.mas6;
+ }
+
+ return 0;
+}
+
int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
struct kvm_config_tlb *cfg)
{
@@ -1163,6 +1125,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
char *virt;
struct page **pages;
struct tlbe_priv *privs[2] = {};
+ u64 *g2h_bitmap = NULL;
size_t array_len;
u32 sets;
int num_pages, ret, i;
@@ -1224,10 +1187,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
if (!privs[0] || !privs[1])
goto err_put_page;
+ g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1],
+ GFP_KERNEL);
+ if (!g2h_bitmap)
+ goto err_put_page;
+
free_gtlb(vcpu_e500);
vcpu_e500->gtlb_priv[0] = privs[0];
vcpu_e500->gtlb_priv[1] = privs[1];
+ vcpu_e500->g2h_tlb1_map = g2h_bitmap;
vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
(virt + (cfg->array & (PAGE_SIZE - 1)));
@@ -1238,14 +1207,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
vcpu_e500->gtlb_offset[0] = 0;
vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
- vcpu_e500->tlb0cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
+ vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;
+
+ vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
if (params.tlb_sizes[0] <= 2048)
- vcpu_e500->tlb0cfg |= params.tlb_sizes[0];
- vcpu_e500->tlb0cfg |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
+ vcpu->arch.tlbcfg[0] |= params.tlb_sizes[0];
+ vcpu->arch.tlbcfg[0] |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
- vcpu_e500->tlb1cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
- vcpu_e500->tlb1cfg |= params.tlb_sizes[1];
- vcpu_e500->tlb1cfg |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
+ vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
+ vcpu->arch.tlbcfg[1] |= params.tlb_sizes[1];
+ vcpu->arch.tlbcfg[1] |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
vcpu_e500->shared_tlb_pages = pages;
vcpu_e500->num_shared_tlb_pages = num_pages;
@@ -1256,6 +1227,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
vcpu_e500->gtlb_params[1].sets = 1;
+ kvmppc_recalc_tlb1map_range(vcpu_e500);
return 0;
err_put_page:
@@ -1274,13 +1246,14 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
struct kvm_dirty_tlb *dirty)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-
+ kvmppc_recalc_tlb1map_range(vcpu_e500);
clear_tlb_refs(vcpu_e500);
return 0;
}
int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
{
+ struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;
@@ -1357,22 +1330,32 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
if (!vcpu_e500->gtlb_priv[1])
goto err;
- if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
+ vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) *
+ vcpu_e500->gtlb_params[1].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->g2h_tlb1_map)
+ goto err;
+
+ vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
+ host_tlb_params[1].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->h2g_tlb1_rmap)
goto err;
/* Init TLB configuration register */
- vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) &
+ vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
- vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[0].entries;
- vcpu_e500->tlb0cfg |=
+ vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[0].entries;
+ vcpu->arch.tlbcfg[0] |=
vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT;
- vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) &
+ vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
- vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[1].entries;
- vcpu_e500->tlb0cfg |=
+ vcpu->arch.tlbcfg[1] |= vcpu_e500->gtlb_params[1].entries;
+ vcpu->arch.tlbcfg[1] |=
vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
+ kvmppc_recalc_tlb1map_range(vcpu_e500);
return 0;
err:
@@ -1385,8 +1368,7 @@ err:
void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{
free_gtlb(vcpu_e500);
- kvmppc_e500_id_table_free(vcpu_e500);
-
+ kfree(vcpu_e500->h2g_tlb1_rmap);
kfree(vcpu_e500->tlb_refs[0]);
kfree(vcpu_e500->tlb_refs[1]);
}
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
deleted file mode 100644
index 5c6d2d7bf058..000000000000
--- a/arch/powerpc/kvm/e500_tlb.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Author: Yu Liu, yu.liu@freescale.com
- *
- * Description:
- * This file is based on arch/powerpc/kvm/44x_tlb.h,
- * by Hollis Blanchard <hollisb@us.ibm.com>.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- */
-
-#ifndef __KVM_E500_TLB_H__
-#define __KVM_E500_TLB_H__
-
-#include <linux/kvm_host.h>
-#include <asm/mmu-book3e.h>
-#include <asm/tlb.h>
-#include <asm/kvm_e500.h>
-
-/* This geometry is the legacy default -- can be overridden by userspace */
-#define KVM_E500_TLB0_WAY_SIZE 128
-#define KVM_E500_TLB0_WAY_NUM 2
-
-#define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
-#define KVM_E500_TLB1_SIZE 16
-
-#define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF))
-#define tlbsel_of(index) ((index) >> 16)
-#define esel_of(index) ((index) & 0xFFFF)
-
-#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
-#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
-#define MAS2_ATTRIB_MASK \
- (MAS2_X0 | MAS2_X1)
-#define MAS3_ATTRIB_MASK \
- (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
- | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
-
-extern void kvmppc_dump_tlbs(struct kvm_vcpu *);
-extern int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *, ulong);
-extern int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *);
-extern int kvmppc_e500_emul_tlbre(struct kvm_vcpu *);
-extern int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *, int, int);
-extern int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *, int);
-extern int kvmppc_e500_tlb_search(struct kvm_vcpu *, gva_t, unsigned int, int);
-extern void kvmppc_e500_tlb_put(struct kvm_vcpu *);
-extern void kvmppc_e500_tlb_load(struct kvm_vcpu *, int);
-extern int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *);
-extern void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *);
-extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *);
-extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *);
-
-/* TLB helper functions */
-static inline unsigned int
-get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- return (tlbe->mas1 >> 7) & 0x1f;
-}
-
-static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- return tlbe->mas2 & 0xfffff000;
-}
-
-static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- unsigned int pgsize = get_tlb_size(tlbe);
- return 1ULL << 10 << pgsize;
-}
-
-static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- u64 bytes = get_tlb_bytes(tlbe);
- return get_tlb_eaddr(tlbe) + bytes - 1;
-}
-
-static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- return tlbe->mas7_3 & ~0xfffULL;
-}
-
-static inline unsigned int
-get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- return (tlbe->mas1 >> 16) & 0xff;
-}
-
-static inline unsigned int
-get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- return (tlbe->mas1 >> 12) & 0x1;
-}
-
-static inline unsigned int
-get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- return (tlbe->mas1 >> 31) & 0x1;
-}
-
-static inline unsigned int
-get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- return (tlbe->mas1 >> 30) & 0x1;
-}
-
-static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.pid & 0xff;
-}
-
-static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
-{
- return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
-}
-
-static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
-{
- return !!(vcpu->arch.shared->msr & MSR_PR);
-}
-
-static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
-{
- return (vcpu->arch.shared->mas6 >> 16) & 0xff;
-}
-
-static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.shared->mas6 & 0x1;
-}
-
-static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
-{
- /*
- * Manual says that tlbsel has 2 bits wide.
- * Since we only have two TLBs, only lower bit is used.
- */
- return (vcpu->arch.shared->mas0 >> 28) & 0x1;
-}
-
-static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.shared->mas0 & 0xfff;
-}
-
-static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
-{
- return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
-}
-
-static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
- const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- gpa_t gpa;
-
- if (!get_tlb_v(tlbe))
- return 0;
-
- /* Does it match current guest AS? */
- /* XXX what about IS != DS? */
- if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
- return 0;
-
- gpa = get_tlb_raddr(tlbe);
- if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
- /* Mapping is not for RAM. */
- return 0;
-
- return 1;
-}
-
-#endif /* __KVM_E500_TLB_H__ */
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
new file mode 100644
index 000000000000..fe6c1de6b701
--- /dev/null
+++ b/arch/powerpc/kvm/e500mc.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (C) 2010 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Varun Sethi, <varun.sethi@freescale.com>
+ *
+ * Description:
+ * This file is derived from arch/powerpc/kvm/e500.c,
+ * by Yu Liu <yu.liu@freescale.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <asm/reg.h>
+#include <asm/cputable.h>
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/dbell.h>
+
+#include "booke.h"
+#include "e500.h"
+
+void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type)
+{
+ enum ppc_dbell dbell_type;
+ unsigned long tag;
+
+ switch (type) {
+ case INT_CLASS_NONCRIT:
+ dbell_type = PPC_G_DBELL;
+ break;
+ case INT_CLASS_CRIT:
+ dbell_type = PPC_G_DBELL_CRIT;
+ break;
+ case INT_CLASS_MC:
+ dbell_type = PPC_G_DBELL_MC;
+ break;
+ default:
+ WARN_ONCE(1, "%s: unknown int type %d\n", __func__, type);
+ return;
+ }
+
+
+ tag = PPC_DBELL_LPID(vcpu->kvm->arch.lpid) | vcpu->vcpu_id;
+ mb();
+ ppc_msgsnd(dbell_type, 0, tag);
+}
+
+/* gtlbe must not be mapped by more than one host tlb entry */
+void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct kvm_book3e_206_tlb_entry *gtlbe)
+{
+ unsigned int tid, ts;
+ u32 val, eaddr, lpid;
+ unsigned long flags;
+
+ ts = get_tlb_ts(gtlbe);
+ tid = get_tlb_tid(gtlbe);
+ lpid = vcpu_e500->vcpu.kvm->arch.lpid;
+
+ /* We search the host TLB to invalidate its shadow TLB entry */
+ val = (tid << 16) | ts;
+ eaddr = get_tlb_eaddr(gtlbe);
+
+ local_irq_save(flags);
+
+ mtspr(SPRN_MAS6, val);
+ mtspr(SPRN_MAS5, MAS5_SGS | lpid);
+
+ asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr));
+ val = mfspr(SPRN_MAS1);
+ if (val & MAS1_VALID) {
+ mtspr(SPRN_MAS1, val & ~MAS1_VALID);
+ asm volatile("tlbwe");
+ }
+ mtspr(SPRN_MAS5, 0);
+ /* NOTE: tlbsx also updates mas8, so clear it for host tlbwe */
+ mtspr(SPRN_MAS8, 0);
+ isync();
+
+ local_irq_restore(flags);
+}
+
+void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.kvm->arch.lpid);
+ asm volatile("tlbilxlpid");
+ mtspr(SPRN_MAS5, 0);
+ local_irq_restore(flags);
+}
+
+void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
+{
+ vcpu->arch.pid = pid;
+}
+
+void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
+{
+}
+
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+ kvmppc_booke_vcpu_load(vcpu, cpu);
+
+ mtspr(SPRN_LPID, vcpu->kvm->arch.lpid);
+ mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
+ mtspr(SPRN_GPIR, vcpu->vcpu_id);
+ mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp);
+ mtspr(SPRN_EPLC, vcpu->arch.eplc);
+ mtspr(SPRN_EPSC, vcpu->arch.epsc);
+
+ mtspr(SPRN_GIVPR, vcpu->arch.ivpr);
+ mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
+ mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
+ mtspr(SPRN_GSPRG0, (unsigned long)vcpu->arch.shared->sprg0);
+ mtspr(SPRN_GSPRG1, (unsigned long)vcpu->arch.shared->sprg1);
+ mtspr(SPRN_GSPRG2, (unsigned long)vcpu->arch.shared->sprg2);
+ mtspr(SPRN_GSPRG3, (unsigned long)vcpu->arch.shared->sprg3);
+
+ mtspr(SPRN_GSRR0, vcpu->arch.shared->srr0);
+ mtspr(SPRN_GSRR1, vcpu->arch.shared->srr1);
+
+ mtspr(SPRN_GEPR, vcpu->arch.epr);
+ mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
+ mtspr(SPRN_GESR, vcpu->arch.shared->esr);
+
+ if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
+ kvmppc_e500_tlbil_all(vcpu_e500);
+
+ kvmppc_load_guest_fp(vcpu);
+}
+
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.eplc = mfspr(SPRN_EPLC);
+ vcpu->arch.epsc = mfspr(SPRN_EPSC);
+
+ vcpu->arch.shared->sprg0 = mfspr(SPRN_GSPRG0);
+ vcpu->arch.shared->sprg1 = mfspr(SPRN_GSPRG1);
+ vcpu->arch.shared->sprg2 = mfspr(SPRN_GSPRG2);
+ vcpu->arch.shared->sprg3 = mfspr(SPRN_GSPRG3);
+
+ vcpu->arch.shared->srr0 = mfspr(SPRN_GSRR0);
+ vcpu->arch.shared->srr1 = mfspr(SPRN_GSRR1);
+
+ vcpu->arch.epr = mfspr(SPRN_GEPR);
+ vcpu->arch.shared->dar = mfspr(SPRN_GDEAR);
+ vcpu->arch.shared->esr = mfspr(SPRN_GESR);
+
+ vcpu->arch.oldpir = mfspr(SPRN_PIR);
+
+ kvmppc_booke_vcpu_put(vcpu);
+}
+
+int kvmppc_core_check_processor_compat(void)
+{
+ int r;
+
+ if (strcmp(cur_cpu_spec->cpu_name, "e500mc") == 0)
+ r = 0;
+ else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
+ r = 0;
+ else
+ r = -ENOTSUPP;
+
+ return r;
+}
+
+int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+ vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \
+ SPRN_EPCR_DUVD;
+ vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP;
+ vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT);
+ vcpu->arch.epsc = vcpu->arch.eplc;
+
+ vcpu->arch.pvr = mfspr(SPRN_PVR);
+ vcpu_e500->svr = mfspr(SPRN_SVR);
+
+ vcpu->arch.cpu_type = KVM_CPU_E500MC;
+
+ return 0;
+}
+
+void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+ sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_PM |
+ KVM_SREGS_E_PC;
+ sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL;
+
+ sregs->u.e.impl.fsl.features = 0;
+ sregs->u.e.impl.fsl.svr = vcpu_e500->svr;
+ sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
+ sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
+
+ kvmppc_get_sregs_e500_tlb(vcpu, sregs);
+
+ sregs->u.e.ivor_high[3] =
+ vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
+ sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
+ sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
+
+ kvmppc_get_sregs_ivor(vcpu, sregs);
+}
+
+int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ int ret;
+
+ if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
+ vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
+ vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0;
+ vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
+ }
+
+ ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
+ if (ret < 0)
+ return ret;
+
+ if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
+ return 0;
+
+ if (sregs->u.e.features & KVM_SREGS_E_PM) {
+ vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] =
+ sregs->u.e.ivor_high[3];
+ }
+
+ if (sregs->u.e.features & KVM_SREGS_E_PC) {
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] =
+ sregs->u.e.ivor_high[4];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] =
+ sregs->u.e.ivor_high[5];
+ }
+
+ return kvmppc_set_sregs_ivor(vcpu, sregs);
+}
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500;
+ struct kvm_vcpu *vcpu;
+ int err;
+
+ vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ if (!vcpu_e500) {
+ err = -ENOMEM;
+ goto out;
+ }
+ vcpu = &vcpu_e500->vcpu;
+
+ /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */
+ vcpu->arch.oldpir = 0xffffffff;
+
+ err = kvm_vcpu_init(vcpu, kvm, id);
+ if (err)
+ goto free_vcpu;
+
+ err = kvmppc_e500_tlb_init(vcpu_e500);
+ if (err)
+ goto uninit_vcpu;
+
+ vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ if (!vcpu->arch.shared)
+ goto uninit_tlb;
+
+ return vcpu;
+
+uninit_tlb:
+ kvmppc_e500_tlb_uninit(vcpu_e500);
+uninit_vcpu:
+ kvm_vcpu_uninit(vcpu);
+
+free_vcpu:
+ kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
+out:
+ return ERR_PTR(err);
+}
+
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+ free_page((unsigned long)vcpu->arch.shared);
+ kvmppc_e500_tlb_uninit(vcpu_e500);
+ kvm_vcpu_uninit(vcpu);
+ kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
+}
+
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+ int lpid;
+
+ lpid = kvmppc_alloc_lpid();
+ if (lpid < 0)
+ return lpid;
+
+ kvm->arch.lpid = lpid;
+ return 0;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+ kvmppc_free_lpid(kvm->arch.lpid);
+}
+
+static int __init kvmppc_e500mc_init(void)
+{
+ int r;
+
+ r = kvmppc_booke_init();
+ if (r)
+ return r;
+
+ kvmppc_init_lpid(64);
+ kvmppc_claim_lpid(0); /* host */
+
+ return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
+}
+
+static void __exit kvmppc_e500mc_exit(void)
+{
+ kvmppc_booke_exit();
+}
+
+module_init(kvmppc_e500mc_init);
+module_exit(kvmppc_e500mc_exit);
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 968f40101883..f90e86dea7a2 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm_host.h>
+#include <linux/clockchips.h>
#include <asm/reg.h>
#include <asm/time.h>
@@ -35,7 +36,9 @@
#define OP_TRAP 3
#define OP_TRAP_64 2
+#define OP_31_XOP_TRAP 4
#define OP_31_XOP_LWZX 23
+#define OP_31_XOP_TRAP_64 68
#define OP_31_XOP_LBZX 87
#define OP_31_XOP_STWX 151
#define OP_31_XOP_STBX 215
@@ -102,8 +105,12 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
*/
dec_time = vcpu->arch.dec;
- dec_time *= 1000;
- do_div(dec_time, tb_ticks_per_usec);
+ /*
+ * Guest timebase ticks at the same frequency as host decrementer.
+ * So use the host decrementer calculations for decrementer emulation.
+ */
+ dec_time = dec_time << decrementer_clockevent.shift;
+ do_div(dec_time, decrementer_clockevent.mult);
dec_nsec = do_div(dec_time, NSEC_PER_SEC);
hrtimer_start(&vcpu->arch.dec_timer,
ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
@@ -141,14 +148,13 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
u32 inst = kvmppc_get_last_inst(vcpu);
- u32 ea;
- int ra;
- int rb;
- int rs;
- int rt;
- int sprn;
+ int ra = get_ra(inst);
+ int rs = get_rs(inst);
+ int rt = get_rt(inst);
+ int sprn = get_sprn(inst);
enum emulation_result emulated = EMULATE_DONE;
int advance = 1;
+ ulong spr_val = 0;
/* this default type might be overwritten by subcategories */
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
@@ -170,173 +176,143 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case 31:
switch (get_xop(inst)) {
+ case OP_31_XOP_TRAP:
+#ifdef CONFIG_64BIT
+ case OP_31_XOP_TRAP_64:
+#endif
+#ifdef CONFIG_PPC_BOOK3S
+ kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
+#else
+ kvmppc_core_queue_program(vcpu,
+ vcpu->arch.shared->esr | ESR_PTR);
+#endif
+ advance = 0;
+ break;
case OP_31_XOP_LWZX:
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break;
case OP_31_XOP_LBZX:
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break;
case OP_31_XOP_LBZUX:
- rt = get_rt(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
-
- ea = kvmppc_get_gpr(vcpu, rb);
- if (ra)
- ea += kvmppc_get_gpr(vcpu, ra);
-
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
- kvmppc_set_gpr(vcpu, ra, ea);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_STWX:
- rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
4, 1);
break;
case OP_31_XOP_STBX:
- rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
1, 1);
break;
case OP_31_XOP_STBUX:
- rs = get_rs(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
-
- ea = kvmppc_get_gpr(vcpu, rb);
- if (ra)
- ea += kvmppc_get_gpr(vcpu, ra);
-
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
1, 1);
- kvmppc_set_gpr(vcpu, rs, ea);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_LHAX:
- rt = get_rt(inst);
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
break;
case OP_31_XOP_LHZX:
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
break;
case OP_31_XOP_LHZUX:
- rt = get_rt(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
-
- ea = kvmppc_get_gpr(vcpu, rb);
- if (ra)
- ea += kvmppc_get_gpr(vcpu, ra);
-
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
- kvmppc_set_gpr(vcpu, ra, ea);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_MFSPR:
- sprn = get_sprn(inst);
- rt = get_rt(inst);
-
switch (sprn) {
case SPRN_SRR0:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0);
+ spr_val = vcpu->arch.shared->srr0;
break;
case SPRN_SRR1:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1);
+ spr_val = vcpu->arch.shared->srr1;
break;
case SPRN_PVR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
+ spr_val = vcpu->arch.pvr;
+ break;
case SPRN_PIR:
- kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
+ spr_val = vcpu->vcpu_id;
+ break;
case SPRN_MSSSR0:
- kvmppc_set_gpr(vcpu, rt, 0); break;
+ spr_val = 0;
+ break;
/* Note: mftb and TBRL/TBWL are user-accessible, so
* the guest can always access the real TB anyways.
* In fact, we probably will never see these traps. */
case SPRN_TBWL:
- kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
+ spr_val = get_tb() >> 32;
+ break;
case SPRN_TBWU:
- kvmppc_set_gpr(vcpu, rt, get_tb()); break;
+ spr_val = get_tb();
+ break;
case SPRN_SPRG0:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0);
+ spr_val = vcpu->arch.shared->sprg0;
break;
case SPRN_SPRG1:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1);
+ spr_val = vcpu->arch.shared->sprg1;
break;
case SPRN_SPRG2:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2);
+ spr_val = vcpu->arch.shared->sprg2;
break;
case SPRN_SPRG3:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3);
+ spr_val = vcpu->arch.shared->sprg3;
break;
/* Note: SPRG4-7 are user-readable, so we don't get
* a trap. */
case SPRN_DEC:
- {
- kvmppc_set_gpr(vcpu, rt,
- kvmppc_get_dec(vcpu, get_tb()));
+ spr_val = kvmppc_get_dec(vcpu, get_tb());
break;
- }
default:
- emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
- if (emulated == EMULATE_FAIL) {
- printk("mfspr: unknown spr %x\n", sprn);
- kvmppc_set_gpr(vcpu, rt, 0);
+ emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
+ &spr_val);
+ if (unlikely(emulated == EMULATE_FAIL)) {
+ printk(KERN_INFO "mfspr: unknown spr "
+ "0x%x\n", sprn);
}
break;
}
+ kvmppc_set_gpr(vcpu, rt, spr_val);
kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
break;
case OP_31_XOP_STHX:
- rs = get_rs(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
-
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 1);
break;
case OP_31_XOP_STHUX:
- rs = get_rs(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
-
- ea = kvmppc_get_gpr(vcpu, rb);
- if (ra)
- ea += kvmppc_get_gpr(vcpu, ra);
-
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 1);
- kvmppc_set_gpr(vcpu, ra, ea);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_MTSPR:
- sprn = get_sprn(inst);
- rs = get_rs(inst);
+ spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_SRR0:
- vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs);
+ vcpu->arch.shared->srr0 = spr_val;
break;
case SPRN_SRR1:
- vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs);
+ vcpu->arch.shared->srr1 = spr_val;
break;
/* XXX We need to context-switch the timebase for
@@ -347,27 +323,29 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case SPRN_MSSSR0: break;
case SPRN_DEC:
- vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
+ vcpu->arch.dec = spr_val;
kvmppc_emulate_dec(vcpu);
break;
case SPRN_SPRG0:
- vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs);
+ vcpu->arch.shared->sprg0 = spr_val;
break;
case SPRN_SPRG1:
- vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs);
+ vcpu->arch.shared->sprg1 = spr_val;
break;
case SPRN_SPRG2:
- vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs);
+ vcpu->arch.shared->sprg2 = spr_val;
break;
case SPRN_SPRG3:
- vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs);
+ vcpu->arch.shared->sprg3 = spr_val;
break;
default:
- emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
+ emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
+ spr_val);
if (emulated == EMULATE_FAIL)
- printk("mtspr: unknown spr %x\n", sprn);
+ printk(KERN_INFO "mtspr: unknown spr "
+ "0x%x\n", sprn);
break;
}
kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
@@ -382,7 +360,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_31_XOP_LWBRX:
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
break;
@@ -390,25 +367,16 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_31_XOP_STWBRX:
- rs = get_rs(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
-
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
4, 0);
break;
case OP_31_XOP_LHBRX:
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
break;
case OP_31_XOP_STHBRX:
- rs = get_rs(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
-
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 0);
@@ -421,99 +389,78 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_LWZ:
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break;
case OP_LWZU:
- ra = get_ra(inst);
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_LBZ:
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break;
case OP_LBZU:
- ra = get_ra(inst);
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_STW:
- rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
4, 1);
break;
case OP_STWU:
- ra = get_ra(inst);
- rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
4, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_STB:
- rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
1, 1);
break;
case OP_STBU:
- ra = get_ra(inst);
- rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
1, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_LHZ:
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
break;
case OP_LHZU:
- ra = get_ra(inst);
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_LHA:
- rt = get_rt(inst);
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
break;
case OP_LHAU:
- ra = get_ra(inst);
- rt = get_rt(inst);
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_STH:
- rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 1);
break;
case OP_STHU:
- ra = get_ra(inst);
- rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
default:
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 00d7e345b3fe..1493c8de947b 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -43,6 +43,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
v->requests;
}
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+ return 1;
+}
+
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
{
int nr = kvmppc_get_gpr(vcpu, 11);
@@ -74,7 +79,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
}
case HC_VENDOR_KVM | KVM_HC_FEATURES:
r = HC_EV_SUCCESS;
-#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
+#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
/* XXX Missing magic page on 44x */
r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
#endif
@@ -109,6 +114,11 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
goto out;
#endif
+#ifdef CONFIG_KVM_BOOKE_HV
+ if (!cpu_has_feature(CPU_FTR_EMB_HV))
+ goto out;
+#endif
+
r = true;
out:
@@ -225,7 +235,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_OSI:
case KVM_CAP_PPC_GET_PVINFO:
-#ifdef CONFIG_KVM_E500
+#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
case KVM_CAP_SW_TLB:
#endif
r = 1;
@@ -234,10 +244,12 @@ int kvm_dev_ioctl_check_extension(long ext)
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
#endif
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
r = 1;
break;
+#endif /* CONFIG_PPC_BOOK3S_64 */
+#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_CAP_PPC_SMT:
r = threads_per_core;
break;
@@ -267,6 +279,11 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
+#ifdef CONFIG_PPC_BOOK3S_64
+ case KVM_CAP_PPC_GET_SMMU_INFO:
+ r = 1;
+ break;
+#endif
default:
r = 0;
break;
@@ -588,21 +605,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
return r;
}
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
-{
- int me;
- int cpu = vcpu->cpu;
-
- me = get_cpu();
- if (waitqueue_active(vcpu->arch.wqp)) {
- wake_up_interruptible(vcpu->arch.wqp);
- vcpu->stat.halt_wakeup++;
- } else if (cpu != me && cpu != -1) {
- smp_send_reschedule(vcpu->cpu);
- }
- put_cpu();
-}
-
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{
if (irq->irq == KVM_INTERRUPT_UNSET) {
@@ -611,6 +613,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
}
kvmppc_core_queue_external(vcpu, irq);
+
kvm_vcpu_kick(vcpu);
return 0;
@@ -633,7 +636,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
r = 0;
vcpu->arch.papr_enabled = true;
break;
-#ifdef CONFIG_KVM_E500
+#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
case KVM_CAP_SW_TLB: {
struct kvm_config_tlb cfg;
void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
@@ -710,7 +713,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
-#ifdef CONFIG_KVM_E500
+#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
case KVM_DIRTY_TLB: {
struct kvm_dirty_tlb dirty;
r = -EFAULT;
@@ -720,7 +723,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
#endif
-
default:
r = -EINVAL;
}
@@ -777,7 +779,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
break;
}
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CREATE_SPAPR_TCE: {
struct kvm_create_spapr_tce create_tce;
struct kvm *kvm = filp->private_data;
@@ -788,7 +790,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
goto out;
}
+#endif /* CONFIG_PPC_BOOK3S_64 */
+#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_ALLOCATE_RMA: {
struct kvm *kvm = filp->private_data;
struct kvm_allocate_rma rma;
@@ -800,6 +804,18 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
#endif /* CONFIG_KVM_BOOK3S_64_HV */
+#ifdef CONFIG_PPC_BOOK3S_64
+ case KVM_PPC_GET_SMMU_INFO: {
+ struct kvm *kvm = filp->private_data;
+ struct kvm_ppc_smmu_info info;
+
+ memset(&info, 0, sizeof(info));
+ r = kvm_vm_ioctl_get_smmu_info(kvm, &info);
+ if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
+ r = -EFAULT;
+ break;
+ }
+#endif /* CONFIG_PPC_BOOK3S_64 */
default:
r = -ENOTTY;
}
@@ -808,6 +824,40 @@ out:
return r;
}
+static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
+static unsigned long nr_lpids;
+
+long kvmppc_alloc_lpid(void)
+{
+ long lpid;
+
+ do {
+ lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
+ if (lpid >= nr_lpids) {
+ pr_err("%s: No LPIDs free\n", __func__);
+ return -ENOMEM;
+ }
+ } while (test_and_set_bit(lpid, lpid_inuse));
+
+ return lpid;
+}
+
+void kvmppc_claim_lpid(long lpid)
+{
+ set_bit(lpid, lpid_inuse);
+}
+
+void kvmppc_free_lpid(long lpid)
+{
+ clear_bit(lpid, lpid_inuse);
+}
+
+void kvmppc_init_lpid(unsigned long nr_lpids_param)
+{
+ nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
+ memset(lpid_inuse, 0, sizeof(lpid_inuse));
+}
+
int kvm_arch_init(void *opaque)
{
return 0;
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h
index 8167d42a776f..bf191e72b2d8 100644
--- a/arch/powerpc/kvm/timing.h
+++ b/arch/powerpc/kvm/timing.h
@@ -93,6 +93,12 @@ static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type)
case SIGNAL_EXITS:
vcpu->stat.signal_exits++;
break;
+ case DBELL_EXITS:
+ vcpu->stat.dbell_exits++;
+ break;
+ case GDBELL_EXITS:
+ vcpu->stat.gdbell_exits++;
+ break;
}
}
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index 455881a5563f..093d6316435c 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -160,48 +160,3 @@ _GLOBAL(__clear_user)
PPC_LONG 1b,91b
PPC_LONG 8b,92b
.text
-
-_GLOBAL(__strncpy_from_user)
- addi r6,r3,-1
- addi r4,r4,-1
- cmpwi 0,r5,0
- beq 2f
- mtctr r5
-1: lbzu r0,1(r4)
- cmpwi 0,r0,0
- stbu r0,1(r6)
- bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
- beq 3f
-2: addi r6,r6,1
-3: subf r3,r3,r6
- blr
-99: li r3,-EFAULT
- blr
-
- .section __ex_table,"a"
- PPC_LONG 1b,99b
- .text
-
-/* r3 = str, r4 = len (> 0), r5 = top (highest addr) */
-_GLOBAL(__strnlen_user)
- addi r7,r3,-1
- subf r6,r7,r5 /* top+1 - str */
- cmplw 0,r4,r6
- bge 0f
- mr r6,r4
-0: mtctr r6 /* ctr = min(len, top - str) */
-1: lbzu r0,1(r7) /* get next byte */
- cmpwi 0,r0,0
- bdnzf 2,1b /* loop if --ctr != 0 && byte != 0 */
- addi r7,r7,1
- subf r3,r3,r7 /* number of bytes we have looked at */
- beqlr /* return if we found a 0 byte */
- cmpw 0,r3,r4 /* did we look at all len bytes? */
- blt 99f /* if not, must have hit top */
- addi r3,r4,1 /* return len + 1 to indicate no null found */
- blr
-99: li r3,0 /* bad address, return 0 */
- blr
-
- .section __ex_table,"a"
- PPC_LONG 1b,99b
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 5b63bd3da4a9..e779642c25e5 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -333,9 +333,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned int)(long)hcpu;
-#ifdef CONFIG_HOTPLUG_CPU
- struct task_struct *p;
-#endif
+
/* We don't touch CPU 0 map, it's allocated at aboot and kept
* around forever
*/
@@ -358,12 +356,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
stale_map[cpu] = NULL;
/* We also clear the cpu_vm_mask bits of CPUs going away */
- read_lock(&tasklist_lock);
- for_each_process(p) {
- if (p->mm)
- cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
- }
- read_unlock(&tasklist_lock);
+ clear_tasks_mm_cpumask(cpu);
break;
#endif /* CONFIG_HOTPLUG_CPU */
}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 1d75c92ea8fb..66519d263da7 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -151,7 +151,7 @@ static void
spufs_evict_inode(struct inode *inode)
{
struct spufs_inode_info *ei = SPUFS_I(inode);
- end_writeback(inode);
+ clear_inode(inode);
if (ei->i_ctx)
put_spu_context(ei->i_ctx);
if (ei->i_gang)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index e16390c0bca8..a39b4690c171 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -28,12 +28,6 @@ config ARCH_HAS_ILOG2_U64
config GENERIC_HWEIGHT
def_bool y
-config GENERIC_TIME_VSYSCALL
- def_bool y
-
-config GENERIC_CLOCKEVENTS
- def_bool y
-
config GENERIC_BUG
def_bool y if BUG
@@ -93,6 +87,7 @@ config S390
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
+ select HAVE_CMPXCHG_LOCAL
select ARCH_DISCARD_MEMBLOCK
select ARCH_INLINE_SPIN_TRYLOCK
select ARCH_INLINE_SPIN_TRYLOCK_BH
@@ -123,6 +118,9 @@ config S390
select ARCH_INLINE_WRITE_UNLOCK_IRQ
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_TIME_VSYSCALL
+ select GENERIC_CLOCKEVENTS
+ select KTIME_SCALAR if 32BIT
config SCHED_OMIT_FRAME_POINTER
def_bool y
@@ -135,8 +133,6 @@ menu "Base setup"
comment "Processor type and features"
-source "kernel/time/Kconfig"
-
config 64BIT
def_bool y
prompt "64 bit kernel"
@@ -147,9 +143,6 @@ config 64BIT
config 32BIT
def_bool y if !64BIT
-config KTIME_SCALAR
- def_bool 32BIT
-
config SMP
def_bool y
prompt "Symmetric multi-processing support"
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 6a2cb560e968..73dae8b9b77a 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -115,7 +115,7 @@ static struct inode *hypfs_make_inode(struct super_block *sb, umode_t mode)
static void hypfs_evict_inode(struct inode *inode)
{
- end_writeback(inode);
+ clear_inode(inode);
kfree(inode->i_private);
}
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index e5beb490959b..a6ff5a83e227 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -13,8 +13,6 @@
*
*/
-#ifdef __KERNEL__
-
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
@@ -63,7 +61,7 @@ extern const char _ni_bitmap[];
extern const char _zb_findmap[];
extern const char _sb_findmap[];
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define __BITOPS_ALIGN 3
#define __BITOPS_WORDSIZE 32
@@ -83,7 +81,7 @@ extern const char _sb_findmap[];
: "d" (__val), "Q" (*(unsigned long *) __addr) \
: "cc");
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
#define __BITOPS_ALIGN 7
#define __BITOPS_WORDSIZE 64
@@ -103,7 +101,7 @@ extern const char _sb_findmap[];
: "d" (__val), "Q" (*(unsigned long *) __addr) \
: "cc");
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
@@ -412,7 +410,7 @@ static inline unsigned long __ffz_word_loop(const unsigned long *addr,
unsigned long bytes = 0;
asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
" ahi %1,-1\n"
" sra %1,5\n"
" jz 1f\n"
@@ -449,7 +447,7 @@ static inline unsigned long __ffs_word_loop(const unsigned long *addr,
unsigned long bytes = 0;
asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
" ahi %1,-1\n"
" sra %1,5\n"
" jz 1f\n"
@@ -481,7 +479,7 @@ static inline unsigned long __ffs_word_loop(const unsigned long *addr,
*/
static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
{
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
if ((word & 0xffffffff) == 0xffffffff) {
word >>= 32;
nr += 32;
@@ -505,7 +503,7 @@ static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
*/
static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
{
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
if ((word & 0xffffffff) == 0) {
word >>= 32;
nr += 32;
@@ -546,7 +544,7 @@ static inline unsigned long __load_ulong_le(const unsigned long *p,
unsigned long word;
p = (unsigned long *)((unsigned long) p + offset);
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
asm volatile(
" ic %0,%O1(%R1)\n"
" icm %0,2,%O1+1(%R1)\n"
@@ -834,7 +832,4 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size,
#include <asm-generic/bitops/ext2-atomic-setbit.h>
-
-#endif /* __KERNEL__ */
-
#endif /* _S390_BITOPS_H */
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index fc50a3342da3..4c8d4d5b8bd2 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -10,8 +10,6 @@
#include <linux/spinlock.h>
#include <asm/types.h>
-#ifdef __KERNEL__
-
#define LPM_ANYPATH 0xff
#define __MAX_CSSID 0
@@ -291,5 +289,3 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl);
int chsc_sstpi(void *page, void *result, size_t size);
#endif
-
-#endif
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 81d7908416cf..8d798e962b63 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -29,7 +29,7 @@ static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
" cs %0,0,%4\n"
" jl 0b\n"
: "=&d" (old), "=Q" (*(int *) addr)
- : "d" (x << shift), "d" (~(255 << shift)),
+ : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)),
"Q" (*(int *) addr) : "memory", "cc", "0");
return old >> shift;
case 2:
@@ -44,7 +44,7 @@ static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
" cs %0,0,%4\n"
" jl 0b\n"
: "=&d" (old), "=Q" (*(int *) addr)
- : "d" (x << shift), "d" (~(65535 << shift)),
+ : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)),
"Q" (*(int *) addr) : "memory", "cc", "0");
return old >> shift;
case 4:
@@ -113,9 +113,10 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
" nr %1,%5\n"
" jnz 0b\n"
"1:"
- : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
- : "d" (old << shift), "d" (new << shift),
- "d" (~(255 << shift)), "Q" (*(int *) ptr)
+ : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
+ : "d" ((old & 0xff) << shift),
+ "d" ((new & 0xff) << shift),
+ "d" (~(0xff << shift))
: "memory", "cc");
return prev >> shift;
case 2:
@@ -134,9 +135,10 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
" nr %1,%5\n"
" jnz 0b\n"
"1:"
- : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
- : "d" (old << shift), "d" (new << shift),
- "d" (~(65535 << shift)), "Q" (*(int *) ptr)
+ : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
+ : "d" ((old & 0xffff) << shift),
+ "d" ((new & 0xffff) << shift),
+ "d" (~(0xffff << shift))
: "memory", "cc");
return prev >> shift;
case 4:
@@ -160,9 +162,14 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
return old;
}
-#define cmpxchg(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \
+ sizeof(*(ptr))); \
+ __ret; \
+})
#ifdef CONFIG_64BIT
#define cmpxchg64(ptr, o, n) \
@@ -181,13 +188,19 @@ static inline unsigned long long __cmpxchg64(void *ptr,
" cds %0,%2,%1"
: "+&d" (rp_old), "=Q" (ptr)
: "d" (rp_new), "Q" (ptr)
- : "cc");
+ : "memory", "cc");
return rp_old.pair;
}
-#define cmpxchg64(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
+
+#define cmpxchg64(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __cmpxchg64((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)); \
+ __ret; \
+})
#endif /* CONFIG_64BIT */
#include <asm-generic/cmpxchg-local.h>
@@ -216,8 +229,13 @@ static inline unsigned long __cmpxchg_local(void *ptr,
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))); \
+ __ret; \
+})
#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n))
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 24ef186a1c4f..718374de9c7f 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -21,15 +21,15 @@ typedef unsigned long long __nocast cputime64_t;
static inline unsigned long __div(unsigned long long n, unsigned long base)
{
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
register_pair rp;
rp.pair = n >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
return rp.subreg.odd;
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
return n / base;
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
}
#define cputime_one_jiffy jiffies_to_cputime(1)
@@ -100,7 +100,7 @@ static inline void cputime_to_timespec(const cputime_t cputime,
struct timespec *value)
{
unsigned long long __cputime = (__force unsigned long long) cputime;
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
register_pair rp;
rp.pair = __cputime >> 1;
@@ -128,7 +128,7 @@ static inline void cputime_to_timeval(const cputime_t cputime,
struct timeval *value)
{
unsigned long long __cputime = (__force unsigned long long) cputime;
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
register_pair rp;
rp.pair = __cputime >> 1;
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index ecde9417d669..debfda33d1f8 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -7,7 +7,7 @@
#ifndef __ASM_CTL_REG_H
#define __ASM_CTL_REG_H
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
#define __ctl_load(array, low, high) ({ \
typedef struct { char _[sizeof(array)]; } addrtype; \
@@ -25,7 +25,7 @@
: "i" (low), "i" (high)); \
})
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
#define __ctl_load(array, low, high) ({ \
typedef struct { char _[sizeof(array)]; } addrtype; \
@@ -43,7 +43,7 @@
: "i" (low), "i" (high)); \
})
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
#define __ctl_set_bit(cr, bit) ({ \
unsigned long __dummy; \
diff --git a/arch/s390/include/asm/current.h b/arch/s390/include/asm/current.h
index 83cf36cde2da..7a68084ec2f0 100644
--- a/arch/s390/include/asm/current.h
+++ b/arch/s390/include/asm/current.h
@@ -11,13 +11,10 @@
#ifndef _S390_CURRENT_H
#define _S390_CURRENT_H
-#ifdef __KERNEL__
#include <asm/lowcore.h>
struct task_struct;
#define current ((struct task_struct *const)S390_lowcore.current_task)
-#endif
-
#endif /* !(_S390_CURRENT_H) */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index c4ee39f7a4d6..06151e6a3098 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -107,11 +107,11 @@
/*
* These are used to set parameters in the core dumps.
*/
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define ELF_CLASS ELFCLASS32
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
#define ELF_CLASS ELFCLASS64
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_S390
@@ -181,9 +181,9 @@ extern unsigned long elf_hwcap;
extern char elf_platform[];
#define ELF_PLATFORM (elf_platform)
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
#define SET_PERSONALITY(ex) \
do { \
if (personality(current->personality) != PER_LINUX32) \
@@ -194,7 +194,7 @@ do { \
else \
clear_thread_flag(TIF_31BIT); \
} while (0)
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
#define STACK_RND_MASK 0x7ffUL
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index 81cf36b691f1..96bc83ea5c90 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -1,8 +1,6 @@
#ifndef _ASM_S390_FUTEX_H
#define _ASM_S390_FUTEX_H
-#ifdef __KERNEL__
-
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
@@ -48,5 +46,4 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
}
-#endif /* __KERNEL__ */
#endif /* _ASM_S390_FUTEX_H */
diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h
index aae276d00383..aef0dde340d1 100644
--- a/arch/s390/include/asm/idals.h
+++ b/arch/s390/include/asm/idals.h
@@ -20,7 +20,7 @@
#include <asm/cio.h>
#include <asm/uaccess.h>
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
#else
#define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
@@ -33,7 +33,7 @@
static inline int
idal_is_needed(void *vaddr, unsigned int length)
{
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
return ((__pa(vaddr) + length - 1) >> 31) != 0;
#else
return 0;
@@ -78,7 +78,7 @@ static inline unsigned long *idal_create_words(unsigned long *idaws,
static inline int
set_normalized_cda(struct ccw1 * ccw, void *vaddr)
{
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
unsigned int nridaws;
unsigned long *idal;
@@ -105,7 +105,7 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr)
static inline void
clear_normalized_cda(struct ccw1 * ccw)
{
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
if (ccw->flags & CCW_FLAG_IDA) {
kfree((void *)(unsigned long) ccw->cda);
ccw->flags &= ~CCW_FLAG_IDA;
@@ -182,7 +182,7 @@ idal_buffer_free(struct idal_buffer *ib)
static inline int
__idal_buffer_is_needed(struct idal_buffer *ib)
{
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
return ib->size > (4096ul << ib->page_order) ||
idal_is_needed(ib->data[0], ib->size);
#else
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 27216d317991..f81a0975cbea 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -11,8 +11,6 @@
#ifndef _S390_IO_H
#define _S390_IO_H
-#ifdef __KERNEL__
-
#include <asm/page.h>
#define IO_SPACE_LIMIT 0xffffffff
@@ -46,6 +44,4 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
*/
#define xlate_dev_kmem_ptr(p) p
-#endif /* __KERNEL__ */
-
#endif
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 5289cacd4861..2b9d41899d21 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -17,7 +17,8 @@ enum interruption_class {
EXTINT_VRT,
EXTINT_SCP,
EXTINT_IUC,
- EXTINT_CPM,
+ EXTINT_CMS,
+ EXTINT_CMC,
IOINT_CIO,
IOINT_QAI,
IOINT_DAS,
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index 3f30dac804ea..f4f38826eebb 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -10,10 +10,8 @@
#ifndef _S390_KEXEC_H
#define _S390_KEXEC_H
-#ifdef __KERNEL__
-#include <asm/page.h>
-#endif
#include <asm/processor.h>
+#include <asm/page.h>
/*
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
* I.e. Maximum page that is mapped directly into kernel memory,
diff --git a/arch/s390/include/asm/kmap_types.h b/arch/s390/include/asm/kmap_types.h
index 94ec3ee07983..0a88622339ee 100644
--- a/arch/s390/include/asm/kmap_types.h
+++ b/arch/s390/include/asm/kmap_types.h
@@ -1,8 +1,6 @@
-#ifdef __KERNEL__
#ifndef _ASM_KMAP_TYPES_H
#define _ASM_KMAP_TYPES_H
#include <asm-generic/kmap_types.h>
#endif
-#endif /* __KERNEL__ */
diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h
index 96076676e224..bdcbe0f8dd7b 100644
--- a/arch/s390/include/asm/kvm.h
+++ b/arch/s390/include/asm/kvm.h
@@ -52,4 +52,9 @@ struct kvm_sync_regs {
__u32 acrs[16]; /* access registers */
__u64 crs[16]; /* control registers */
};
+
+#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
+#define KVM_REG_S390_EPOCHDIFF (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x2)
+#define KVM_REG_S390_CPU_TIMER (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x3)
+#define KVM_REG_S390_CLOCK_COMP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x4)
#endif
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 7343872890a2..dd17537b9a9d 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -148,6 +148,7 @@ struct kvm_vcpu_stat {
u32 instruction_sigp_restart;
u32 diagnose_10;
u32 diagnose_44;
+ u32 diagnose_9c;
};
struct kvm_s390_io_info {
diff --git a/arch/s390/include/asm/kvm_para.h b/arch/s390/include/asm/kvm_para.h
index 6964db226f83..a98832961035 100644
--- a/arch/s390/include/asm/kvm_para.h
+++ b/arch/s390/include/asm/kvm_para.h
@@ -149,6 +149,11 @@ static inline unsigned int kvm_arch_para_features(void)
return 0;
}
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
#endif
#endif /* __S390_KVM_PARA_H */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 5d09e405c54d..69bdf72e95ec 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -49,7 +49,7 @@ static inline int init_new_context(struct task_struct *tsk,
#define destroy_context(mm) do { } while (0)
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define LCTL_OPCODE "lctl"
#else
#define LCTL_OPCODE "lctlg"
diff --git a/arch/s390/include/asm/module.h b/arch/s390/include/asm/module.h
index 1cc1c5af705a..f0b6b26b6e59 100644
--- a/arch/s390/include/asm/module.h
+++ b/arch/s390/include/asm/module.h
@@ -28,7 +28,7 @@ struct mod_arch_specific
struct mod_arch_syminfo *syminfo;
};
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
#define ElfW(x) Elf64_ ## x
#define ELFW(x) ELF64_ ## x
#else
diff --git a/arch/s390/include/asm/os_info.h b/arch/s390/include/asm/os_info.h
index d07518af09ea..295f2c4f1c96 100644
--- a/arch/s390/include/asm/os_info.h
+++ b/arch/s390/include/asm/os_info.h
@@ -13,7 +13,6 @@
#define OS_INFO_VMCOREINFO 0
#define OS_INFO_REIPL_BLOCK 1
-#define OS_INFO_INIT_FN 2
struct os_info_entry {
u64 addr;
@@ -28,8 +27,8 @@ struct os_info {
u16 version_minor;
u64 crashkernel_addr;
u64 crashkernel_size;
- struct os_info_entry entry[3];
- u8 reserved[4004];
+ struct os_info_entry entry[2];
+ u8 reserved[4024];
} __packed;
void os_info_init(void);
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 0fbd1899c7b0..6537e72e0853 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -15,7 +15,7 @@
* per cpu area, use weak definitions to force the compiler to
* generate external references.
*/
-#if defined(CONFIG_SMP) && defined(__s390x__) && defined(MODULE)
+#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE)
#define ARCH_NEEDS_WEAK_PER_CPU
#endif
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 78e3041919de..43078c194394 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -48,7 +48,7 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
clear_table(crst, entry, sizeof(unsigned long)*2048);
}
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
{
@@ -64,7 +64,7 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
#define pgd_populate(mm, pgd, pud) BUG()
#define pud_populate(mm, pud, pmd) BUG()
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
{
@@ -106,7 +106,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
}
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 011358c1b18e..b3227415abda 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -74,15 +74,15 @@ static inline int is_zero_pfn(unsigned long pfn)
* table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
*/
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
# define PMD_SHIFT 20
# define PUD_SHIFT 20
# define PGDIR_SHIFT 20
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
# define PMD_SHIFT 20
# define PUD_SHIFT 31
# define PGDIR_SHIFT 42
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
@@ -98,13 +98,13 @@ static inline int is_zero_pfn(unsigned long pfn)
* that leads to 1024 pte per pgd
*/
#define PTRS_PER_PTE 256
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define PTRS_PER_PMD 1
#define PTRS_PER_PUD 1
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
#define PTRS_PER_PMD 2048
#define PTRS_PER_PUD 2048
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
#define PTRS_PER_PGD 2048
#define FIRST_USER_ADDRESS 0
@@ -276,7 +276,7 @@ extern struct page *vmemmap;
* swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
*/
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
/* Bits in the segment table address-space-control-element */
#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
@@ -308,7 +308,7 @@ extern struct page *vmemmap;
#define KVM_UR_BIT 0x00008000UL
#define KVM_UC_BIT 0x00004000UL
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
/* Bits in the segment/region table address-space-control-element */
#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
@@ -363,7 +363,7 @@ extern struct page *vmemmap;
#define KVM_UR_BIT 0x0000800000000000UL
#define KVM_UC_BIT 0x0000400000000000UL
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
/*
* A user page table pointer has the space-switch-event bit, the
@@ -424,7 +424,7 @@ static inline int mm_has_pgste(struct mm_struct *mm)
/*
* pgd/pmd/pte query functions
*/
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
static inline int pgd_present(pgd_t pgd) { return 1; }
static inline int pgd_none(pgd_t pgd) { return 0; }
@@ -434,7 +434,7 @@ static inline int pud_present(pud_t pud) { return 1; }
static inline int pud_none(pud_t pud) { return 0; }
static inline int pud_bad(pud_t pud) { return 0; }
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
static inline int pgd_present(pgd_t pgd)
{
@@ -490,7 +490,7 @@ static inline int pud_bad(pud_t pud)
return (pud_val(pud) & mask) != 0;
}
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
static inline int pmd_present(pmd_t pmd)
{
@@ -741,7 +741,7 @@ static inline int pte_young(pte_t pte)
static inline void pgd_clear(pgd_t *pgd)
{
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
#endif
@@ -749,7 +749,7 @@ static inline void pgd_clear(pgd_t *pgd)
static inline void pud_clear(pud_t *pud)
{
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pud_val(*pud) = _REGION3_ENTRY_EMPTY;
#endif
@@ -921,7 +921,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
{
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
/* pto must point to the start of the segment table */
pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
#else
@@ -1116,7 +1116,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pmd) ({ BUG(); 0UL; })
@@ -1125,7 +1125,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pud_offset(pgd, address) ((pud_t *) pgd)
#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
@@ -1147,7 +1147,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
return pmd + pmd_index(address);
}
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
@@ -1196,7 +1196,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
* 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
* 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
*/
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define __SWP_OFFSET_MASK (~0UL >> 12)
#else
#define __SWP_OFFSET_MASK (~0UL >> 11)
@@ -1217,11 +1217,11 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
# define PTE_FILE_MAX_BITS 26
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
# define PTE_FILE_MAX_BITS 59
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
#define pte_to_pgoff(__pte) \
((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6cbf31311673..20d0585cf905 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -20,7 +20,6 @@
#include <asm/ptrace.h>
#include <asm/setup.h>
-#ifdef __KERNEL__
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
@@ -33,39 +32,33 @@ static inline void get_cpu_id(struct cpuid *ptr)
}
extern void s390_adjust_jiffies(void);
-extern int get_cpu_capability(unsigned int *);
extern const struct seq_operations cpuinfo_op;
extern int sysctl_ieee_emulation_warnings;
/*
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
*/
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define TASK_SIZE (1UL << 31)
#define TASK_UNMAPPED_BASE (1UL << 30)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
(1UL << 30) : (1UL << 41))
#define TASK_SIZE TASK_SIZE_OF(current)
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
-#ifdef __KERNEL__
-
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define STACK_TOP (1UL << 31)
#define STACK_TOP_MAX (1UL << 31)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
#define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
#define STACK_TOP_MAX (1UL << 42)
-#endif /* __s390x__ */
-
-
-#endif
+#endif /* CONFIG_64BIT */
#define HAVE_ARCH_PICK_MMAP_LAYOUT
@@ -182,7 +175,7 @@ static inline void psw_set_key(unsigned int key)
*/
static inline void __load_psw(psw_t psw)
{
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
asm volatile("lpsw %0" : : "Q" (psw) : "cc");
#else
asm volatile("lpswe %0" : : "Q" (psw) : "cc");
@@ -200,7 +193,7 @@ static inline void __load_psw_mask (unsigned long mask)
psw.mask = mask;
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
asm volatile(
" basr %0,0\n"
"0: ahi %0,1f-0b\n"
@@ -208,14 +201,14 @@ static inline void __load_psw_mask (unsigned long mask)
" lpsw %1\n"
"1:"
: "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
asm volatile(
" larl %0,1f\n"
" stg %0,%O1+8(%R1)\n"
" lpswe %1\n"
"1:"
: "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
}
/*
@@ -223,7 +216,7 @@ static inline void __load_psw_mask (unsigned long mask)
*/
static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
{
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
if (psw.addr & PSW_ADDR_AMODE)
/* 31 bit mode */
return (psw.addr - ilc) | PSW_ADDR_AMODE;
@@ -253,7 +246,7 @@ static inline void __noreturn disabled_wait(unsigned long code)
* Store status and then load disabled wait psw,
* the processor is dead afterwards
*/
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
asm volatile(
" stctl 0,0,0(%2)\n"
" ni 0(%2),0xef\n" /* switch off protection */
@@ -272,7 +265,7 @@ static inline void __noreturn disabled_wait(unsigned long code)
" lpsw 0(%1)"
: "=m" (ctl_buf)
: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc");
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
asm volatile(
" stctg 0,0,0(%2)\n"
" ni 4(%2),0xef\n" /* switch off protection */
@@ -305,7 +298,7 @@ static inline void __noreturn disabled_wait(unsigned long code)
" lpswe 0(%1)"
: "=m" (ctl_buf)
: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
while (1);
}
@@ -338,12 +331,10 @@ extern void (*s390_base_ext_handler_fn)(void);
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
-#endif
-
/*
* Helper macro for exception table entries
*/
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define EX_TABLE(_fault,_target) \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index d0eb4653cebd..1ceee10264c3 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -41,19 +41,17 @@
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
#endif
-#ifdef __KERNEL__
-
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
@@ -65,19 +63,19 @@ static inline void __down_read(struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" ahi %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" aghi %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
@@ -93,7 +91,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: ltr %1,%0\n"
" jm 1f\n"
@@ -101,7 +99,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
" cs %0,%1,%2\n"
" jl 0b\n"
"1:"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: ltgr %1,%0\n"
" jm 1f\n"
@@ -109,7 +107,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
" csg %0,%1,%2\n"
" jl 0b\n"
"1:"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
@@ -125,19 +123,19 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
tmp = RWSEM_ACTIVE_WRITE_BIAS;
asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" a %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
@@ -158,19 +156,19 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
signed long old;
asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
" l %0,%1\n"
"0: ltr %0,%0\n"
" jnz 1f\n"
" cs %0,%3,%1\n"
" jl 0b\n"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
" lg %0,%1\n"
"0: ltgr %0,%0\n"
" jnz 1f\n"
" csg %0,%3,%1\n"
" jl 0b\n"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
"1:"
: "=&d" (old), "=Q" (sem->count)
: "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
@@ -186,19 +184,19 @@ static inline void __up_read(struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" ahi %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" aghi %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
@@ -216,19 +214,19 @@ static inline void __up_write(struct rw_semaphore *sem)
tmp = -RWSEM_ACTIVE_WRITE_BIAS;
asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" a %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
@@ -246,19 +244,19 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
tmp = -RWSEM_WAITING_BIAS;
asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" a %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
@@ -274,19 +272,19 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" ar %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" agr %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "d" (delta)
: "cc", "memory");
@@ -300,24 +298,23 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
" l %0,%2\n"
"0: lr %1,%0\n"
" ar %1,%4\n"
" cs %0,%1,%2\n"
" jl 0b"
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" agr %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "d" (delta)
: "cc", "memory");
return new;
}
-#endif /* __KERNEL__ */
#endif /* _S390_RWSEM_H */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index fed7bee650a0..bf238c55740b 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -48,6 +48,7 @@ int sclp_cpu_deconfigure(u8 cpu);
void sclp_facilities_detect(void);
unsigned long long sclp_get_rnmax(void);
unsigned long long sclp_get_rzm(void);
+u8 sclp_get_fac85(void);
int sclp_sdias_blk_count(void);
int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
int sclp_chp_configure(struct chp_id chpid);
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 7244e1f64126..40eb2ff88e9e 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -22,19 +22,19 @@
#include <asm/lowcore.h>
#include <asm/types.h>
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define IPL_DEVICE (*(unsigned long *) (0x10404))
#define INITRD_START (*(unsigned long *) (0x1040C))
#define INITRD_SIZE (*(unsigned long *) (0x10414))
#define OLDMEM_BASE (*(unsigned long *) (0x1041C))
#define OLDMEM_SIZE (*(unsigned long *) (0x10424))
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
#define IPL_DEVICE (*(unsigned long *) (0x10400))
#define INITRD_START (*(unsigned long *) (0x10408))
#define INITRD_SIZE (*(unsigned long *) (0x10410))
#define OLDMEM_BASE (*(unsigned long *) (0x10418))
#define OLDMEM_SIZE (*(unsigned long *) (0x10420))
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
#define COMMAND_LINE ((char *) (0x10480))
#define CHUNK_READ_WRITE 0
@@ -89,7 +89,7 @@ extern unsigned int user_mode;
#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define MACHINE_HAS_IEEE (S390_lowcore.machine_flags & MACHINE_FLAG_IEEE)
#define MACHINE_HAS_CSP (S390_lowcore.machine_flags & MACHINE_FLAG_CSP)
#define MACHINE_HAS_IDTE (0)
@@ -100,7 +100,7 @@ extern unsigned int user_mode;
#define MACHINE_HAS_PFMF (0)
#define MACHINE_HAS_SPP (0)
#define MACHINE_HAS_TOPOLOGY (0)
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
#define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1)
#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
@@ -111,7 +111,7 @@ extern unsigned int user_mode;
#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
#define ZFCPDUMP_HSA_SIZE (32UL<<20)
#define ZFCPDUMP_HSA_SIZE_MAX (64UL<<20)
@@ -153,19 +153,19 @@ extern void (*_machine_power_off)(void);
#else /* __ASSEMBLY__ */
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define IPL_DEVICE 0x10404
#define INITRD_START 0x1040C
#define INITRD_SIZE 0x10414
#define OLDMEM_BASE 0x1041C
#define OLDMEM_SIZE 0x10424
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
#define IPL_DEVICE 0x10400
#define INITRD_START 0x10408
#define INITRD_SIZE 0x10410
#define OLDMEM_BASE 0x10418
#define OLDMEM_SIZE 0x10420
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
#define COMMAND_LINE 0x10480
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h
index ca3f8814e361..5959bfb3b693 100644
--- a/arch/s390/include/asm/sfp-util.h
+++ b/arch/s390/include/asm/sfp-util.h
@@ -51,7 +51,7 @@
wl = __wl; \
})
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
#define udiv_qrnnd(q, r, n1, n0, d) \
do { unsigned long __n; \
unsigned int __r, __d; \
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index cd0241db5a46..8cc160c9e1cb 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -9,8 +9,6 @@
#ifndef _S390_STRING_H_
#define _S390_STRING_H_
-#ifdef __KERNEL__
-
#ifndef _LINUX_TYPES_H
#include <linux/types.h>
#endif
@@ -152,6 +150,4 @@ size_t strlen(const char *s);
size_t strnlen(const char * s, size_t n);
#endif /* !IN_ARCH_STRING_C */
-#endif /* __KERNEL__ */
-
#endif /* __S390_STRING_H_ */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 003b04edcff6..4e40b25cd060 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -9,15 +9,13 @@
#ifndef _ASM_THREAD_INFO_H
#define _ASM_THREAD_INFO_H
-#ifdef __KERNEL__
-
/*
* Size of kernel stack for each process
*/
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define THREAD_ORDER 1
#define ASYNC_ORDER 1
-#else /* __s390x__ */
+#else /* CONFIG_64BIT */
#ifndef __SMALL_STACK
#define THREAD_ORDER 2
#define ASYNC_ORDER 2
@@ -25,7 +23,7 @@
#define THREAD_ORDER 1
#define ASYNC_ORDER 1
#endif
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
@@ -123,8 +121,6 @@ static inline struct thread_info *current_thread_info(void)
#define is_32bit_task() (1)
#endif
-#endif /* __KERNEL__ */
-
#define PREEMPT_ACTIVE 0x4000000
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/s390/include/asm/timer.h b/arch/s390/include/asm/timer.h
index e63069ba39e3..15d647901e5c 100644
--- a/arch/s390/include/asm/timer.h
+++ b/arch/s390/include/asm/timer.h
@@ -10,8 +10,6 @@
#ifndef _ASM_S390_TIMER_H
#define _ASM_S390_TIMER_H
-#ifdef __KERNEL__
-
#include <linux/timer.h>
#define VTIMER_MAX_SLICE (0x7ffffffffffff000LL)
@@ -50,6 +48,4 @@ extern void vtime_init(void);
extern void vtime_stop_cpu(void);
extern void vtime_start_leave(void);
-#endif /* __KERNEL__ */
-
#endif /* _ASM_S390_TIMER_H */
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 775a5eea8f9e..06e5acbc84bd 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -106,7 +106,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 31))
return;
if (!tlb->fullmm)
@@ -125,7 +125,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 42))
return;
if (!tlb->fullmm)
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 1d8648cf2fea..9fde315f3a7c 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -27,12 +27,12 @@ static inline void __tlb_flush_global(void)
register unsigned long reg4 asm("4");
long dummy;
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
if (!MACHINE_HAS_CSP) {
smp_ptlb_all();
return;
}
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
dummy = 0;
reg2 = reg3 = 0;
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index 05ebbcdbbf6b..6c8c35f8df14 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -28,7 +28,7 @@ typedef __signed__ long saddr_t;
#ifndef __ASSEMBLY__
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
typedef union {
unsigned long long pair;
struct {
@@ -37,7 +37,7 @@ typedef union {
} subreg;
} register_pair;
-#endif /* ! __s390x__ */
+#endif /* ! CONFIG_64BIT */
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _S390_TYPES_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 8f2cada4f7c9..1f3a79bcd262 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -50,10 +50,15 @@
#define segment_eq(a,b) ((a).ar4 == (b).ar4)
-#define __access_ok(addr, size) \
-({ \
- __chk_user_ptr(addr); \
- 1; \
+static inline int __range_ok(unsigned long addr, unsigned long size)
+{
+ return 1;
+}
+
+#define __access_ok(addr, size) \
+({ \
+ __chk_user_ptr(addr); \
+ __range_ok((unsigned long)(addr), (size)); \
})
#define access_ok(type, addr, size) __access_ok(addr, size)
@@ -377,7 +382,7 @@ clear_user(void __user *to, unsigned long n)
}
extern int memcpy_real(void *, void *, size_t);
-extern void copy_to_absolute_zero(void *dest, void *src, size_t count);
+extern void memcpy_absolute(void *, void *, size_t);
extern int copy_to_user_real(void __user *dest, void *src, size_t count);
extern int copy_from_user_real(void *dest, void __user *src, size_t count);
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index c4a11cfad3c8..a73eb2e1e918 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -1,8 +1,6 @@
#ifndef __S390_VDSO_H__
#define __S390_VDSO_H__
-#ifdef __KERNEL__
-
/* Default link addresses for the vDSOs */
#define VDSO32_LBASE 0
#define VDSO64_LBASE 0
@@ -45,7 +43,4 @@ void vdso_free_per_cpu(struct _lowcore *lowcore);
#endif
#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-
#endif /* __S390_VDSO_H__ */
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 3aa4d00aaf50..c880ff72db44 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -88,6 +88,9 @@ ENTRY(diag308_reset)
stctg %c0,%c15,0(%r4)
larl %r4,.Lfpctl # Floating point control register
stfpc 0(%r4)
+ larl %r4,.Lcontinue_psw # Save PSW flags
+ epsw %r2,%r3
+ stm %r2,%r3,0(%r4)
larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0
lghi %r3,0
lg %r4,0(%r4) # Save PSW
@@ -103,11 +106,20 @@ ENTRY(diag308_reset)
lctlg %c0,%c15,0(%r4)
larl %r4,.Lfpctl # Restore floating point ctl register
lfpc 0(%r4)
+ larl %r4,.Lcontinue_psw # Restore PSW flags
+ lpswe 0(%r4)
+.Lcontinue:
br %r14
.align 16
.Lrestart_psw:
.long 0x00080000,0x80000000 + .Lrestart_part2
+ .section .data..nosave,"aw",@progbits
+.align 8
+.Lcontinue_psw:
+ .quad 0,.Lcontinue
+ .previous
+
.section .bss
.align 8
.Lctlregs:
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index d84181f1f5e8..6684fff17558 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -237,7 +237,7 @@ static noinline __init void detect_machine_type(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
}
-static __init void early_pgm_check_handler(void)
+static void early_pgm_check_handler(void)
{
unsigned long addr;
const struct exception_table_entry *fixup;
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S
index e1ac3893e972..796c976b5fdc 100644
--- a/arch/s390/kernel/head_kdump.S
+++ b/arch/s390/kernel/head_kdump.S
@@ -85,11 +85,6 @@ startup_kdump_relocated:
basr %r13,0
0:
mvc 0(8,%r0),.Lrestart_psw-0b(%r13) # Setup restart PSW
- mvc 464(16,%r0),.Lpgm_psw-0b(%r13) # Setup pgm check PSW
- lhi %r1,1 # Start new kernel
- diag %r1,%r1,0x308 # with diag 308
-
-.Lno_diag308: # No diag 308
sam31 # Switch to 31 bit addr mode
sr %r1,%r1 # Erase register r1
sr %r2,%r2 # Erase register r2
@@ -98,8 +93,6 @@ startup_kdump_relocated:
.align 8
.Lrestart_psw:
.long 0x00080000,0x80000000 + startup
-.Lpgm_psw:
- .quad 0x0000000180000000,0x0000000000000000 + .Lno_diag308
#else
.align 2
.Lep_startup_kdump:
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 8342e65a140d..2f6cfd460cb6 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1528,12 +1528,15 @@ static struct shutdown_action __refdata dump_action = {
static void dump_reipl_run(struct shutdown_trigger *trigger)
{
- u32 csum;
-
- csum = csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
- copy_to_absolute_zero(&S390_lowcore.ipib_checksum, &csum, sizeof(csum));
- copy_to_absolute_zero(&S390_lowcore.ipib, &reipl_block_actual,
- sizeof(reipl_block_actual));
+ struct {
+ void *addr;
+ __u32 csum;
+ } __packed ipib;
+
+ ipib.csum = csum_partial(reipl_block_actual,
+ reipl_block_actual->hdr.len, 0);
+ ipib.addr = reipl_block_actual;
+ memcpy_absolute(&S390_lowcore.ipib, &ipib, sizeof(ipib));
dump_run(trigger);
}
@@ -1750,6 +1753,7 @@ static struct kobj_attribute on_restart_attr =
static void __do_restart(void *ignore)
{
+ __arch_local_irq_stosm(0x04); /* enable DAT */
smp_send_stop();
#ifdef CONFIG_CRASH_DUMP
crash_kexec(NULL);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 8a22c27219dd..b4f4a7133fa1 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -42,7 +42,8 @@ static const struct irq_class intrclass_names[] = {
{.name = "VRT", .desc = "[EXT] Virtio" },
{.name = "SCP", .desc = "[EXT] Service Call" },
{.name = "IUC", .desc = "[EXT] IUCV" },
- {.name = "CPM", .desc = "[EXT] CPU Measurement" },
+ {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling" },
+ {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter" },
{.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt" },
{.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
{.name = "DAS", .desc = "[I/O] DASD" },
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index bdad47d54478..cdacf8f91b2d 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -24,6 +24,7 @@
#include <asm/ipl.h>
#include <asm/diag.h>
#include <asm/asm-offsets.h>
+#include <asm/os_info.h>
typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
@@ -79,8 +80,8 @@ static void __do_machine_kdump(void *image)
#ifdef CONFIG_CRASH_DUMP
int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
- __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
setup_regs();
+ __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
start_kdump(1);
#endif
}
@@ -114,8 +115,13 @@ static void crash_map_pages(int enable)
size % KEXEC_CRASH_MEM_ALIGN);
if (enable)
vmem_add_mapping(crashk_res.start, size);
- else
+ else {
vmem_remove_mapping(crashk_res.start, size);
+ if (size)
+ os_info_crashkernel_add(crashk_res.start, size);
+ else
+ os_info_crashkernel_add(0, 0);
+ }
}
/*
@@ -208,6 +214,7 @@ static void __machine_kexec(void *data)
{
struct kimage *image = data;
+ __arch_local_irq_stosm(0x04); /* enable DAT */
pfault_fini();
tracing_off();
debug_locks_off();
diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c
index e8d6c214d498..95fa5ac6c4ce 100644
--- a/arch/s390/kernel/os_info.c
+++ b/arch/s390/kernel/os_info.c
@@ -60,7 +60,7 @@ void __init os_info_init(void)
os_info.version_minor = OS_INFO_VERSION_MINOR;
os_info.magic = OS_INFO_MAGIC;
os_info.csum = os_info_csum(&os_info);
- copy_to_absolute_zero(&S390_lowcore.os_info, &ptr, sizeof(ptr));
+ memcpy_absolute(&S390_lowcore.os_info, &ptr, sizeof(ptr));
}
#ifdef CONFIG_CRASH_DUMP
@@ -138,7 +138,6 @@ static void os_info_old_init(void)
goto fail_free;
os_info_old_alloc(OS_INFO_VMCOREINFO, 1);
os_info_old_alloc(OS_INFO_REIPL_BLOCK, 1);
- os_info_old_alloc(OS_INFO_INIT_FN, PAGE_SIZE);
pr_info("crashkernel: addr=0x%lx size=%lu\n",
(unsigned long) os_info_old->crashkernel_addr,
(unsigned long) os_info_old->crashkernel_size);
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index cb019f429e88..9871b1971ed7 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -225,7 +225,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
if (!(alert & CPU_MF_INT_CF_MASK))
return;
- kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++;
+ kstat_cpu(smp_processor_id()).irqs[EXTINT_CMC]++;
cpuhw = &__get_cpu_var(cpu_hw_events);
/* Measurement alerts are shared and might happen when the PMU
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 06264ae8ccd9..489d1d8d96b0 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -428,10 +428,12 @@ static void __init setup_lowcore(void)
lc->restart_fn = (unsigned long) do_restart;
lc->restart_data = 0;
lc->restart_source = -1UL;
- memcpy(&S390_lowcore.restart_stack, &lc->restart_stack,
- 4*sizeof(unsigned long));
- copy_to_absolute_zero(&S390_lowcore.restart_psw,
- &lc->restart_psw, sizeof(psw_t));
+
+ /* Setup absolute zero lowcore */
+ memcpy_absolute(&S390_lowcore.restart_stack, &lc->restart_stack,
+ 4 * sizeof(unsigned long));
+ memcpy_absolute(&S390_lowcore.restart_psw, &lc->restart_psw,
+ sizeof(lc->restart_psw));
set_prefix((u32)(unsigned long) lc);
lowcore_ptr[0] = lc;
@@ -598,7 +600,7 @@ static void __init setup_vmcoreinfo(void)
#ifdef CONFIG_KEXEC
unsigned long ptr = paddr_vmcoreinfo_note();
- copy_to_absolute_zero(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr));
+ memcpy_absolute(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr));
#endif
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 647ba9425893..15cca26ccb6c 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -297,26 +297,27 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
void *data, unsigned long stack)
{
- struct _lowcore *lc = pcpu->lowcore;
- unsigned short this_cpu;
+ struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
+ struct {
+ unsigned long stack;
+ void *func;
+ void *data;
+ unsigned long source;
+ } restart = { stack, func, data, stap() };
__load_psw_mask(psw_kernel_bits);
- this_cpu = stap();
- if (pcpu->address == this_cpu)
+ if (pcpu->address == restart.source)
func(data); /* should not return */
/* Stop target cpu (if func returns this stops the current cpu). */
pcpu_sigp_retry(pcpu, sigp_stop, 0);
/* Restart func on the target cpu and stop the current cpu. */
- lc->restart_stack = stack;
- lc->restart_fn = (unsigned long) func;
- lc->restart_data = (unsigned long) data;
- lc->restart_source = (unsigned long) this_cpu;
+ memcpy_absolute(&lc->restart_stack, &restart, sizeof(restart));
asm volatile(
"0: sigp 0,%0,6 # sigp restart to target cpu\n"
" brc 2,0b # busy, try again\n"
"1: sigp 0,%1,5 # sigp stop to current cpu\n"
" brc 2,1b # busy, try again\n"
- : : "d" (pcpu->address), "d" (this_cpu) : "0", "1", "cc");
+ : : "d" (pcpu->address), "d" (restart.source) : "0", "1", "cc");
for (;;) ;
}
@@ -800,17 +801,6 @@ void __noreturn cpu_die(void)
#endif /* CONFIG_HOTPLUG_CPU */
-static void smp_call_os_info_init_fn(void)
-{
- int (*init_fn)(void);
- unsigned long size;
-
- init_fn = os_info_old_entry(OS_INFO_INIT_FN, &size);
- if (!init_fn)
- return;
- init_fn();
-}
-
void __init smp_prepare_cpus(unsigned int max_cpus)
{
/* request the 0x1201 emergency signal external interrupt */
@@ -819,7 +809,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* request the 0x1202 external call external interrupt */
if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
panic("Couldn't request external interrupt 0x1202");
- smp_call_os_info_init_fn();
smp_detect_cpus();
}
@@ -943,19 +932,6 @@ static struct attribute_group cpu_common_attr_group = {
.attrs = cpu_common_attrs,
};
-static ssize_t show_capability(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned int capability;
- int rc;
-
- rc = get_cpu_capability(&capability);
- if (rc)
- return rc;
- return sprintf(buf, "%u\n", capability);
-}
-static DEVICE_ATTR(capability, 0444, show_capability, NULL);
-
static ssize_t show_idle_count(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -993,7 +969,6 @@ static ssize_t show_idle_time(struct device *dev,
static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
static struct attribute *cpu_online_attrs[] = {
- &dev_attr_capability.attr,
&dev_attr_idle_count.attr,
&dev_attr_idle_time_us.attr,
NULL,
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 2a94b774695c..fa0eb238dac7 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -393,27 +393,6 @@ static __init int create_proc_service_level(void)
subsys_initcall(create_proc_service_level);
/*
- * Bogomips calculation based on cpu capability.
- */
-int get_cpu_capability(unsigned int *capability)
-{
- struct sysinfo_1_2_2 *info;
- int rc;
-
- info = (void *) get_zeroed_page(GFP_KERNEL);
- if (!info)
- return -ENOMEM;
- rc = stsi(info, 1, 2, 2);
- if (rc == -ENOSYS)
- goto out;
- rc = 0;
- *capability = info->capability;
-out:
- free_page((unsigned long) info);
- return rc;
-}
-
-/*
* CPU capability might have changed. Therefore recalculate loops_per_jiffy.
*/
void s390_adjust_jiffies(void)
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index a353f0ea45c2..b23d9ac77dfc 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -47,9 +47,30 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
{
VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
vcpu->stat.diagnose_44++;
- vcpu_put(vcpu);
- yield();
- vcpu_load(vcpu);
+ kvm_vcpu_on_spin(vcpu);
+ return 0;
+}
+
+static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_vcpu *tcpu;
+ int tid;
+ int i;
+
+ tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
+ vcpu->stat.diagnose_9c++;
+ VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
+
+ if (tid == vcpu->vcpu_id)
+ return 0;
+
+ kvm_for_each_vcpu(i, tcpu, kvm)
+ if (tcpu->vcpu_id == tid) {
+ kvm_vcpu_yield_to(tcpu);
+ break;
+ }
+
return 0;
}
@@ -89,6 +110,8 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
return diag_release_pages(vcpu);
case 0x44:
return __diag_time_slice_end(vcpu);
+ case 0x9c:
+ return __diag_time_slice_end_directed(vcpu);
case 0x308:
return __diag_ipl_functions(vcpu);
default:
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 361456577c6f..979cbe55bf5e 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -101,6 +101,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
}
static intercept_handler_t instruction_handlers[256] = {
+ [0x01] = kvm_s390_handle_01,
[0x83] = kvm_s390_handle_diag,
[0xae] = kvm_s390_handle_sigp,
[0xb2] = kvm_s390_handle_b2,
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 217ce44395a4..664766d0c83c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -28,6 +28,7 @@
#include <asm/pgtable.h>
#include <asm/nmi.h>
#include <asm/switch_to.h>
+#include <asm/sclp.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -74,6 +75,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
{ "diagnose_10", VCPU_STAT(diagnose_10) },
{ "diagnose_44", VCPU_STAT(diagnose_44) },
+ { "diagnose_9c", VCPU_STAT(diagnose_9c) },
{ NULL }
};
@@ -133,8 +135,16 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_S390_UCONTROL:
#endif
case KVM_CAP_SYNC_REGS:
+ case KVM_CAP_ONE_REG:
r = 1;
break;
+ case KVM_CAP_NR_VCPUS:
+ case KVM_CAP_MAX_VCPUS:
+ r = KVM_MAX_VCPUS;
+ break;
+ case KVM_CAP_S390_COW:
+ r = sclp_get_fac85() & 0x2;
+ break;
default:
r = 0;
}
@@ -423,6 +433,71 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
return 0;
}
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+ /* kvm common code refers to this, but never calls it */
+ BUG();
+ return 0;
+}
+
+static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
+ struct kvm_one_reg *reg)
+{
+ int r = -EINVAL;
+
+ switch (reg->id) {
+ case KVM_REG_S390_TODPR:
+ r = put_user(vcpu->arch.sie_block->todpr,
+ (u32 __user *)reg->addr);
+ break;
+ case KVM_REG_S390_EPOCHDIFF:
+ r = put_user(vcpu->arch.sie_block->epoch,
+ (u64 __user *)reg->addr);
+ break;
+ case KVM_REG_S390_CPU_TIMER:
+ r = put_user(vcpu->arch.sie_block->cputm,
+ (u64 __user *)reg->addr);
+ break;
+ case KVM_REG_S390_CLOCK_COMP:
+ r = put_user(vcpu->arch.sie_block->ckc,
+ (u64 __user *)reg->addr);
+ break;
+ default:
+ break;
+ }
+
+ return r;
+}
+
+static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
+ struct kvm_one_reg *reg)
+{
+ int r = -EINVAL;
+
+ switch (reg->id) {
+ case KVM_REG_S390_TODPR:
+ r = get_user(vcpu->arch.sie_block->todpr,
+ (u32 __user *)reg->addr);
+ break;
+ case KVM_REG_S390_EPOCHDIFF:
+ r = get_user(vcpu->arch.sie_block->epoch,
+ (u64 __user *)reg->addr);
+ break;
+ case KVM_REG_S390_CPU_TIMER:
+ r = get_user(vcpu->arch.sie_block->cputm,
+ (u64 __user *)reg->addr);
+ break;
+ case KVM_REG_S390_CLOCK_COMP:
+ r = get_user(vcpu->arch.sie_block->ckc,
+ (u64 __user *)reg->addr);
+ break;
+ default:
+ break;
+ }
+
+ return r;
+}
+
static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
{
kvm_s390_vcpu_initial_reset(vcpu);
@@ -753,6 +828,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
case KVM_S390_INITIAL_RESET:
r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
break;
+ case KVM_SET_ONE_REG:
+ case KVM_GET_ONE_REG: {
+ struct kvm_one_reg reg;
+ r = -EFAULT;
+ if (copy_from_user(&reg, argp, sizeof(reg)))
+ break;
+ if (ioctl == KVM_SET_ONE_REG)
+ r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
+ else
+ r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
+ break;
+ }
#ifdef CONFIG_KVM_S390_UCONTROL
case KVM_S390_UCAS_MAP: {
struct kvm_s390_ucas_mapping ucasmap;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index ff28f9d1c9eb..2294377975e8 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -79,6 +79,7 @@ int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
/* implemented in priv.c */
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
/* implemented in sigp.c */
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index e5a45dbd26ac..68a6b2ed16bf 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -380,3 +380,34 @@ int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
+static int handle_sckpf(struct kvm_vcpu *vcpu)
+{
+ u32 value;
+
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ return kvm_s390_inject_program_int(vcpu,
+ PGM_PRIVILEGED_OPERATION);
+
+ if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
+ return kvm_s390_inject_program_int(vcpu,
+ PGM_SPECIFICATION);
+
+ value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
+ vcpu->arch.sie_block->todpr = value;
+
+ return 0;
+}
+
+static intercept_handler_t x01_handlers[256] = {
+ [0x07] = handle_sckpf,
+};
+
+int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
+{
+ intercept_handler_t handler;
+
+ handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
+ if (handler)
+ return handler(vcpu);
+ return -EOPNOTSUPP;
+}
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 60455f104ea3..58a75a8ae90c 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -14,7 +14,7 @@
#include <asm/futex.h>
#include "uaccess.h"
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define AHI "ahi"
#define ALR "alr"
#define CLR "clr"
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index bb1a7eed42ce..57e94298539b 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -15,7 +15,7 @@
#include <asm/futex.h>
#include "uaccess.h"
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
#define AHI "ahi"
#define ALR "alr"
#define CLR "clr"
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 795a0a9bb2eb..921fa541dc04 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -101,19 +101,27 @@ int memcpy_real(void *dest, void *src, size_t count)
}
/*
- * Copy memory to absolute zero
+ * Copy memory in absolute mode (kernel to kernel)
*/
-void copy_to_absolute_zero(void *dest, void *src, size_t count)
+void memcpy_absolute(void *dest, void *src, size_t count)
{
- unsigned long cr0;
+ unsigned long cr0, flags, prefix;
- BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore));
- preempt_disable();
+ flags = arch_local_irq_save();
__ctl_store(cr0, 0, 0);
__ctl_clear_bit(0, 28); /* disable lowcore protection */
- memcpy_real(dest + store_prefix(), src, count);
+ prefix = store_prefix();
+ if (prefix) {
+ local_mcck_disable();
+ set_prefix(0);
+ memcpy(dest, src, count);
+ set_prefix(prefix);
+ local_mcck_enable();
+ } else {
+ memcpy(dest, src, count);
+ }
__ctl_load(cr0, 0, 0);
- preempt_enable();
+ arch_local_irq_restore(flags);
}
/*
@@ -188,20 +196,6 @@ static int is_swapped(unsigned long addr)
}
/*
- * Return swapped prefix or zero page address
- */
-static unsigned long get_swapped(unsigned long addr)
-{
- unsigned long prefix = store_prefix();
-
- if (addr < sizeof(struct _lowcore))
- return addr + prefix;
- if (addr >= prefix && addr < prefix + sizeof(struct _lowcore))
- return addr - prefix;
- return addr;
-}
-
-/*
* Convert a physical pointer for /dev/mem access
*
* For swapped prefix pages a new buffer is returned that contains a copy of
@@ -218,7 +212,7 @@ void *xlate_dev_mem_ptr(unsigned long addr)
size = PAGE_SIZE - (addr & ~PAGE_MASK);
bounce = (void *) __get_free_page(GFP_ATOMIC);
if (bounce)
- memcpy_real(bounce, (void *) get_swapped(addr), size);
+ memcpy_absolute(bounce, (void *) addr, size);
}
preempt_enable();
put_online_cpus();
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 4799383e2df9..71ae20df674e 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -109,7 +109,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
pm_dir = pmd_offset(pu_dir, address);
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
(address + HPAGE_SIZE <= start + size) &&
(address >= HPAGE_SIZE)) {
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index c6646de07bf4..a4a89fa980d6 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -235,7 +235,7 @@ static void hws_ext_handler(struct ext_code ext_code,
if (!(param32 & CPU_MF_INT_SF_MASK))
return;
- kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++;
+ kstat_cpu(smp_processor_id()).irqs[EXTINT_CMS]++;
atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
if (hws_wq)
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index 4b285779ac05..ba0f412920be 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -9,6 +9,7 @@ config SCORE
select HAVE_MEMBLOCK_NODE_MAP
select ARCH_DISCARD_MEMBLOCK
select GENERIC_CPU_DEVICES
+ select GENERIC_CLOCKEVENTS
choice
prompt "System type"
@@ -51,9 +52,6 @@ config GENERIC_HWEIGHT
config GENERIC_CALIBRATE_DELAY
def_bool y
-config GENERIC_CLOCKEVENTS
- def_bool y
-
menu "Kernel type"
config 32BIT
@@ -68,7 +66,6 @@ config MEMORY_START
hex
default 0xa0000000
-source "kernel/time/Kconfig"
source "kernel/Kconfig.hz"
source "kernel/Kconfig.preempt"
diff --git a/arch/score/include/asm/kvm_para.h b/arch/score/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/score/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 3e723aaa5e18..99bcd0ee838d 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -13,6 +13,7 @@ config SUPERH
select HAVE_DMA_ATTRS
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
+ select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A)
select PERF_USE_VMALLOC
select HAVE_KERNEL_GZIP
@@ -29,6 +30,8 @@ config SUPERH
select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
help
The SuperH is a RISC processor targeted for use in embedded systems
and consumer electronics; it was also used in the Sega Dreamcast
@@ -87,16 +90,6 @@ config GENERIC_GPIO
config GENERIC_CALIBRATE_DELAY
bool
-config GENERIC_CLOCKEVENTS
- def_bool y
-
-config GENERIC_CLOCKEVENTS_BROADCAST
- bool
-
-config GENERIC_CMOS_UPDATE
- def_bool y
- depends on SH_SH03 || SH_DREAMCAST
-
config GENERIC_LOCKBREAK
def_bool y
depends on SMP && PREEMPT
@@ -611,8 +604,6 @@ config SH_CLK_CPG_LEGACY
!CPU_SUBTYPE_SH7734 && !CPU_SUBTYPE_SH7264 && \
!CPU_SUBTYPE_SH7269
-source "kernel/time/Kconfig"
-
endmenu
menu "CPU Frequency scaling"
diff --git a/arch/sh/include/asm/kvm_para.h b/arch/sh/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/sh/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index b86e9ca79455..2062aa88af41 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -123,7 +123,6 @@ void native_play_dead(void)
int __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
- struct task_struct *p;
int ret;
ret = mp_ops->cpu_disable(cpu);
@@ -153,11 +152,7 @@ int __cpu_disable(void)
flush_cache_all();
local_flush_tlb_all();
- read_lock(&tasklist_lock);
- for_each_process(p)
- if (p->mm)
- cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
- read_unlock(&tasklist_lock);
+ clear_tasks_mm_cpumask(cpu);
return 0;
}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 1ea3fd954756..e74ff1377626 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -32,12 +32,15 @@ config SPARC
select HAVE_NMI_WATCHDOG if SPARC64
select HAVE_BPF_JIT
select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_CMOS_UPDATE
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
config SPARC32
def_bool !64BIT
select GENERIC_ATOMIC64
select CLZ_TAB
- select ARCH_THREAD_INFO_ALLOCATOR
config SPARC64
def_bool 64BIT
@@ -77,13 +80,6 @@ config BITS
default 32 if SPARC32
default 64 if SPARC64
-config GENERIC_CMOS_UPDATE
- bool
- default y
-
-config GENERIC_CLOCKEVENTS
- def_bool y
-
config IOMMU_HELPER
bool
default y if SPARC64
@@ -274,8 +270,6 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
-source "kernel/time/Kconfig"
-
if SPARC64
source "drivers/cpufreq/Kconfig"
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 2c2e38821f60..67f83e0a0d68 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -21,3 +21,4 @@ generic-y += div64.h
generic-y += local64.h
generic-y += irq_regs.h
generic-y += local.h
+generic-y += word-at-a-time.h
diff --git a/arch/sparc/include/asm/asi.h b/arch/sparc/include/asm/asi.h
index cbb93e5141de..61ebe7411ceb 100644
--- a/arch/sparc/include/asm/asi.h
+++ b/arch/sparc/include/asm/asi.h
@@ -40,11 +40,7 @@
#define ASI_M_UNA01 0x01 /* Same here... */
#define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */
#define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */
-#ifndef CONFIG_SPARC_LEON
#define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */
-#else
-#define ASI_M_MMUREGS 0x19
-#endif /* CONFIG_SPARC_LEON */
#define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */
#define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */
#define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */
diff --git a/arch/sparc/include/asm/asmmacro.h b/arch/sparc/include/asm/asmmacro.h
index 02a172fb193a..a0e28ef02558 100644
--- a/arch/sparc/include/asm/asmmacro.h
+++ b/arch/sparc/include/asm/asmmacro.h
@@ -20,4 +20,26 @@
/* All traps low-level code here must end with this macro. */
#define RESTORE_ALL b ret_trap_entry; clr %l6;
+/* Support for run-time patching of single instructions.
+ * This is used to handle the differences in the ASI for
+ * MMUREGS for LEON and SUN.
+ *
+ * Sample:
+ * LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %o0
+ * SUN_PI_(lda [%g0] ASI_M_MMUREGS, %o0
+ * PI == Patch Instruction
+ *
+ * For LEON we will use the first variant,
+ * and for all other we will use the SUN variant.
+ * The order is important.
+ */
+#define LEON_PI(...) \
+662: __VA_ARGS__
+
+#define SUN_PI_(...) \
+ .section .leon_1insn_patch, "ax"; \
+ .word 662b; \
+ __VA_ARGS__; \
+ .previous
+
#endif /* !(_SPARC_ASMMACRO_H) */
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 48a7c65731d2..8493fd3c7ba5 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -12,13 +12,18 @@ extern int dma_supported(struct device *dev, u64 mask);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
+extern struct dma_map_ops *dma_ops;
+extern struct dma_map_ops *leon_dma_ops;
+extern struct dma_map_ops pci32_dma_ops;
+
extern struct bus_type pci_bus_type;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
- if (dev->bus == &pci_bus_type)
+ if (sparc_cpu_model == sparc_leon)
+ return leon_dma_ops;
+ else if (dev->bus == &pci_bus_type)
return &pci32_dma_ops;
#endif
return dma_ops;
diff --git a/arch/sparc/include/asm/gpio.h b/arch/sparc/include/asm/gpio.h
index a0e3ac0af599..b3799d88ffcf 100644
--- a/arch/sparc/include/asm/gpio.h
+++ b/arch/sparc/include/asm/gpio.h
@@ -1,36 +1,4 @@
-#ifndef __ASM_SPARC_GPIO_H
-#define __ASM_SPARC_GPIO_H
-
-#include <linux/errno.h>
-#include <asm-generic/gpio.h>
-
-#ifdef CONFIG_GPIOLIB
-
-static inline int gpio_get_value(unsigned int gpio)
-{
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned int gpio, int value)
-{
- __gpio_set_value(gpio, value);
-}
-
-static inline int gpio_cansleep(unsigned int gpio)
-{
- return __gpio_cansleep(gpio);
-}
-
-static inline int gpio_to_irq(unsigned int gpio)
-{
- return -ENOSYS;
-}
-
-static inline int irq_to_gpio(unsigned int irq)
-{
- return -EINVAL;
-}
-
-#endif /* CONFIG_GPIOLIB */
-
-#endif /* __ASM_SPARC_GPIO_H */
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/arch/sparc/include/asm/kvm_para.h b/arch/sparc/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/sparc/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 07659124c140..3375c6293893 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -8,8 +8,6 @@
#ifndef LEON_H_INCLUDE
#define LEON_H_INCLUDE
-#ifdef CONFIG_SPARC_LEON
-
/* mmu register access, ASI_LEON_MMUREGS */
#define LEON_CNR_CTRL 0x000
#define LEON_CNR_CTXP 0x100
@@ -62,15 +60,6 @@
#ifndef __ASSEMBLY__
-/* do a virtual address read without cache */
-static inline unsigned long leon_readnobuffer_reg(unsigned long paddr)
-{
- unsigned long retval;
- __asm__ __volatile__("lda [%1] %2, %0\n\t" :
- "=r"(retval) : "r"(paddr), "i"(ASI_LEON_NOCACHE));
- return retval;
-}
-
/* do a physical address bypass write, i.e. for 0x80000000 */
static inline void leon_store_reg(unsigned long paddr, unsigned long value)
{
@@ -87,47 +76,16 @@ static inline unsigned long leon_load_reg(unsigned long paddr)
return retval;
}
-static inline void leon_srmmu_disabletlb(void)
-{
- unsigned int retval;
- __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
- "i"(ASI_LEON_MMUREGS));
- retval |= LEON_CNR_CTRL_TLBDIS;
- __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
- "i"(ASI_LEON_MMUREGS) : "memory");
-}
-
-static inline void leon_srmmu_enabletlb(void)
-{
- unsigned int retval;
- __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
- "i"(ASI_LEON_MMUREGS));
- retval = retval & ~LEON_CNR_CTRL_TLBDIS;
- __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
- "i"(ASI_LEON_MMUREGS) : "memory");
-}
-
/* macro access for leon_load_reg() and leon_store_reg() */
#define LEON3_BYPASS_LOAD_PA(x) (leon_load_reg((unsigned long)(x)))
#define LEON3_BYPASS_STORE_PA(x, v) (leon_store_reg((unsigned long)(x), (unsigned long)(v)))
-#define LEON3_BYPASS_ANDIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) & v)
-#define LEON3_BYPASS_ORIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) | v)
#define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x))
#define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v))
-#define LEON_REGLOAD_PA(x) leon_load_reg((unsigned long)(x)+LEON_PREGS)
-#define LEON_REGSTORE_PA(x, v) leon_store_reg((unsigned long)(x)+LEON_PREGS, (unsigned long)(v))
-#define LEON_REGSTORE_OR_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) | (unsigned long)(v))
-#define LEON_REGSTORE_AND_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) & (unsigned long)(v))
-
-/* macro access for leon_readnobuffer_reg() */
-#define LEON_BYPASSCACHE_LOAD_VA(x) leon_readnobuffer_reg((unsigned long)(x))
extern void leon_init(void);
extern void leon_switch_mm(void);
extern void leon_init_IRQ(void);
-extern unsigned long last_valid_pfn;
-
static inline unsigned long sparc_leon3_get_dcachecfg(void)
{
unsigned int retval;
@@ -230,9 +188,6 @@ static inline int sparc_leon3_cpuid(void)
#error cannot determine LEON_PAGE_SIZE_LEON
#endif
-#define PAGE_MIN_SHIFT (12)
-#define PAGE_MIN_SIZE (1UL << PAGE_MIN_SHIFT)
-
#define LEON3_XCCR_SETS_MASK 0x07000000UL
#define LEON3_XCCR_SSIZE_MASK 0x00f00000UL
@@ -242,7 +197,7 @@ static inline int sparc_leon3_cpuid(void)
#ifndef __ASSEMBLY__
struct vm_area_struct;
-extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
+extern unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr);
extern void leon_flush_icache_all(void);
extern void leon_flush_dcache_all(void);
extern void leon_flush_cache_all(void);
@@ -258,15 +213,7 @@ struct leon3_cacheregs {
unsigned long dccr; /* 0x0c - Data Cache Configuration Register */
};
-/* struct that hold LEON2 cache configuration register
- * & configuration register
- */
-struct leon2_cacheregs {
- unsigned long ccr, cfg;
-};
-
-#ifdef __KERNEL__
-
+#include <linux/irq.h>
#include <linux/interrupt.h>
struct device_node;
@@ -292,24 +239,15 @@ extern void leon_smp_done(void);
extern void leon_boot_cpus(void);
extern int leon_boot_one_cpu(int i, struct task_struct *);
void leon_init_smp(void);
-extern void cpu_idle(void);
-extern void init_IRQ(void);
-extern void cpu_panic(void);
-extern int __leon_processor_id(void);
void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
extern irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused);
-extern unsigned int real_irq_entry[];
extern unsigned int smpleon_ipi[];
-extern unsigned int patchme_maybe_smp_msg[];
-extern unsigned int t_nmi[], linux_trap_ipi15_leon[];
-extern unsigned int linux_trap_ipi15_sun4m[];
+extern unsigned int linux_trap_ipi15_leon[];
extern int leon_ipi_irq;
#endif /* CONFIG_SMP */
-#endif /* __KERNEL__ */
-
#endif /* __ASSEMBLY__ */
/* macros used in leon_mm.c */
@@ -317,18 +255,4 @@ extern int leon_ipi_irq;
#define _pfn_valid(pfn) ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base)))
#define _SRMMU_PTE_PMASK_LEON 0xffffffff
-#else /* defined(CONFIG_SPARC_LEON) */
-
-/* nop definitions for !LEON case */
-#define leon_init() do {} while (0)
-#define leon_switch_mm() do {} while (0)
-#define leon_init_IRQ() do {} while (0)
-#define init_leon() do {} while (0)
-#define leon_smp_done() do {} while (0)
-#define leon_boot_cpus() do {} while (0)
-#define leon_boot_one_cpu(i, t) 1
-#define leon_init_smp() do {} while (0)
-
-#endif /* !defined(CONFIG_SPARC_LEON) */
-
#endif
diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h
index e50f326e71bd..f3034eddf468 100644
--- a/arch/sparc/include/asm/leon_amba.h
+++ b/arch/sparc/include/asm/leon_amba.h
@@ -87,8 +87,6 @@ struct amba_prom_registers {
#define LEON3_GPTIMER_CONFIG_NRTIMERS(c) ((c)->config & 0x7)
#define LEON3_GPTIMER_CTRL_ISPENDING(r) (((r)&LEON3_GPTIMER_CTRL_PENDING) ? 1 : 0)
-#ifdef CONFIG_SPARC_LEON
-
#ifndef __ASSEMBLY__
struct leon3_irqctrl_regs_map {
@@ -264,6 +262,4 @@ extern unsigned int sparc_leon_eirq;
#define amba_device(x) (((x) >> 12) & 0xfff)
-#endif /* !defined(CONFIG_SPARC_LEON) */
-
#endif
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
index cb828703a63a..79da17866fa8 100644
--- a/arch/sparc/include/asm/pgtsrmmu.h
+++ b/arch/sparc/include/asm/pgtsrmmu.h
@@ -139,6 +139,7 @@
restore %g0, %g0, %g0;
#ifndef __ASSEMBLY__
+extern unsigned long last_valid_pfn;
/* This makes sense. Honest it does - Anton */
/* XXX Yes but it's ugly as sin. FIXME. -KMW */
@@ -148,67 +149,13 @@ extern void *srmmu_nocache_pool;
#define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
/* Accessing the MMU control register. */
-static inline unsigned int srmmu_get_mmureg(void)
-{
- unsigned int retval;
- __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
- "=r" (retval) :
- "i" (ASI_M_MMUREGS));
- return retval;
-}
-
-static inline void srmmu_set_mmureg(unsigned long regval)
-{
- __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
- "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
-
-}
-
-static inline void srmmu_set_ctable_ptr(unsigned long paddr)
-{
- paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
- __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
- "r" (paddr), "r" (SRMMU_CTXTBL_PTR),
- "i" (ASI_M_MMUREGS) :
- "memory");
-}
-
-static inline void srmmu_set_context(int context)
-{
- __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
- "r" (context), "r" (SRMMU_CTX_REG),
- "i" (ASI_M_MMUREGS) : "memory");
-}
-
-static inline int srmmu_get_context(void)
-{
- register int retval;
- __asm__ __volatile__("lda [%1] %2, %0\n\t" :
- "=r" (retval) :
- "r" (SRMMU_CTX_REG),
- "i" (ASI_M_MMUREGS));
- return retval;
-}
-
-static inline unsigned int srmmu_get_fstatus(void)
-{
- unsigned int retval;
-
- __asm__ __volatile__("lda [%1] %2, %0\n\t" :
- "=r" (retval) :
- "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS));
- return retval;
-}
-
-static inline unsigned int srmmu_get_faddr(void)
-{
- unsigned int retval;
-
- __asm__ __volatile__("lda [%1] %2, %0\n\t" :
- "=r" (retval) :
- "r" (SRMMU_FAULT_ADDR), "i" (ASI_M_MMUREGS));
- return retval;
-}
+unsigned int srmmu_get_mmureg(void);
+void srmmu_set_mmureg(unsigned long regval);
+void srmmu_set_ctable_ptr(unsigned long paddr);
+void srmmu_set_context(int context);
+int srmmu_get_context(void);
+unsigned int srmmu_get_fstatus(void);
+unsigned int srmmu_get_faddr(void);
/* This is guaranteed on all SRMMU's. */
static inline void srmmu_flush_whole_tlb(void)
@@ -219,23 +166,6 @@ static inline void srmmu_flush_whole_tlb(void)
}
-/* These flush types are not available on all chips... */
-#ifndef CONFIG_SPARC_LEON
-static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
-{
- unsigned long retval;
-
- vaddr &= PAGE_MASK;
- __asm__ __volatile__("lda [%1] %2, %0\n\t" :
- "=r" (retval) :
- "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
-
- return retval;
-}
-#else
-#define srmmu_hwprobe(addr) srmmu_swprobe(addr, 0)
-#endif
-
static inline int
srmmu_get_pte (unsigned long addr)
{
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index 67df5cc10011..4e5a483122a0 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -42,7 +42,9 @@
#define TASK_SIZE_OF(tsk) \
(test_tsk_thread_flag(tsk,TIF_32BIT) ? \
(1UL << 32UL) : ((unsigned long)-VPTE_SIZE))
-#define TASK_SIZE TASK_SIZE_OF(current)
+#define TASK_SIZE \
+ (test_thread_flag(TIF_32BIT) ? \
+ (1UL << 32UL) : ((unsigned long)-VPTE_SIZE))
#ifdef __KERNEL__
#define STACK_TOP32 ((1UL << 32UL) - PAGE_SIZE)
diff --git a/arch/sparc/include/asm/psr.h b/arch/sparc/include/asm/psr.h
index b8c0e5f0a66b..cee7ed9c927d 100644
--- a/arch/sparc/include/asm/psr.h
+++ b/arch/sparc/include/asm/psr.h
@@ -35,6 +35,14 @@
#define PSR_VERS 0x0f000000 /* cpu-version field */
#define PSR_IMPL 0xf0000000 /* cpu-implementation field */
+#define PSR_VERS_SHIFT 24
+#define PSR_IMPL_SHIFT 28
+#define PSR_VERS_SHIFTED_MASK 0xf
+#define PSR_IMPL_SHIFTED_MASK 0xf
+
+#define PSR_IMPL_TI 0x4
+#define PSR_IMPL_LEON 0xf
+
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/sections.h b/arch/sparc/include/asm/sections.h
index 0b0553bbd8a0..f300d1a9b2b6 100644
--- a/arch/sparc/include/asm/sections.h
+++ b/arch/sparc/include/asm/sections.h
@@ -7,4 +7,7 @@
/* sparc entry point */
extern char _start[];
+extern char __leon_1insn_patch[];
+extern char __leon_1insn_patch_end[];
+
#endif
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index 21a38946541d..5af664932452 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -77,18 +77,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
/*
* thread information allocation
*/
-#define THREAD_INFO_ORDER 1
-
-struct thread_info * alloc_thread_info_node(struct task_struct *tsk, int node);
-void free_thread_info(struct thread_info *);
+#define THREAD_SIZE_ORDER 1
#endif /* __ASSEMBLY__ */
-/*
- * Size of kernel stack for each process.
- * Observe the order of get_free_pages() in alloc_thread_info_node().
- * The sun4 has 8K stack too, because it's short on memory, and 16K is a waste.
- */
+/* Size of kernel stack for each process */
#define THREAD_SIZE (2 * PAGE_SIZE)
/*
diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
index e88fbe5c0457..0167d26d0d1d 100644
--- a/arch/sparc/include/asm/uaccess.h
+++ b/arch/sparc/include/asm/uaccess.h
@@ -5,4 +5,10 @@
#else
#include <asm/uaccess_32.h>
#endif
+
+#define user_addr_max() \
+ (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
+
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
+
#endif
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index d50c310f5d38..53a28dd59f59 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -16,6 +16,8 @@
#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+
#define ARCH_HAS_SORT_EXTABLE
#define ARCH_HAS_SEARCH_EXTABLE
@@ -304,34 +306,8 @@ static inline unsigned long clear_user(void __user *addr, unsigned long n)
return n;
}
-extern long __strncpy_from_user(char *dest, const char __user *src, long count);
-
-static inline long strncpy_from_user(char *dest, const char __user *src, long count)
-{
- if (__access_ok((unsigned long) src, count))
- return __strncpy_from_user(dest, src, count);
- else
- return -EFAULT;
-}
-
-extern long __strlen_user(const char __user *);
-extern long __strnlen_user(const char __user *, long len);
-
-static inline long strlen_user(const char __user *str)
-{
- if (!access_ok(VERIFY_READ, str, 0))
- return 0;
- else
- return __strlen_user(str);
-}
-
-static inline long strnlen_user(const char __user *str, long len)
-{
- if (!access_ok(VERIFY_READ, str, 0))
- return 0;
- else
- return __strnlen_user(str, len);
-}
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASSEMBLY__ */
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index a1091afb8831..7c831d848b4e 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -17,6 +17,8 @@
#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+
/*
* Sparc64 is segmented, though more like the M68K than the I386.
* We use the secondary ASI to address user memory, which references a
@@ -257,15 +259,9 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long);
#define clear_user __clear_user
-extern long __must_check __strncpy_from_user(char *dest, const char __user *src, long count);
-
-#define strncpy_from_user __strncpy_from_user
-
-extern long __strlen_user(const char __user *);
-extern long __strnlen_user(const char __user *, long len);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
-#define strlen_user __strlen_user
-#define strnlen_user __strnlen_user
#define __copy_to_user_inatomic ___copy_to_user
#define __copy_from_user_inatomic ___copy_from_user
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 72308f9b0096..6cf591b7e1c6 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -51,8 +51,8 @@ obj-y += of_device_common.o
obj-y += of_device_$(BITS).o
obj-$(CONFIG_SPARC64) += prom_irqtrans.o
-obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o
-obj-$(CONFIG_SPARC_LEON)+= leon_pmc.o
+obj-$(CONFIG_SPARC32) += leon_kernel.o
+obj-$(CONFIG_SPARC32) += leon_pmc.o
obj-$(CONFIG_SPARC64) += reboot.o
obj-$(CONFIG_SPARC64) += sysfs.o
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 2d1819641769..a6c94a2bf9d4 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -121,7 +121,7 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
FPU(-1, NULL)
}
},{
- 4,
+ PSR_IMPL_TI,
.cpu_info = {
CPU(0, "Texas Instruments, Inc. - SuperSparc-(II)"),
/* SparcClassic -- borned STP1010TAB-50*/
@@ -191,7 +191,7 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
FPU(-1, NULL)
}
},{
- 0xF, /* Aeroflex Gaisler */
+ PSR_IMPL_LEON, /* Aeroflex Gaisler */
.cpu_info = {
CPU(3, "LEON"),
CPU(-1, NULL)
@@ -440,16 +440,16 @@ static int __init cpu_type_probe(void)
int psr_impl, psr_vers, fpu_vers;
int psr;
- psr_impl = ((get_psr() >> 28) & 0xf);
- psr_vers = ((get_psr() >> 24) & 0xf);
+ psr_impl = ((get_psr() >> PSR_IMPL_SHIFT) & PSR_IMPL_SHIFTED_MASK);
+ psr_vers = ((get_psr() >> PSR_VERS_SHIFT) & PSR_VERS_SHIFTED_MASK);
psr = get_psr();
put_psr(psr | PSR_EF);
-#ifdef CONFIG_SPARC_LEON
- fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
-#else
- fpu_vers = ((get_fsr() >> 17) & 0x7);
-#endif
+
+ if (psr_impl == PSR_IMPL_LEON)
+ fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
+ else
+ fpu_vers = ((get_fsr() >> 17) & 0x7);
put_psr(psr);
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 2dbe1806e530..dcaa1cf0de40 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -393,7 +393,6 @@ linux_trap_ipi15_sun4d:
/* FIXME */
1: b,a 1b
-#ifdef CONFIG_SPARC_LEON
.globl smpleon_ipi
.extern leon_ipi_interrupt
/* SMP per-cpu IPI interrupts are handled specially. */
@@ -424,8 +423,6 @@ linux_trap_ipi15_leon:
b ret_trap_lockless_ipi
clr %l6
-#endif /* CONFIG_SPARC_LEON */
-
#endif /* CONFIG_SMP */
/* This routine handles illegal instructions and privileged
@@ -770,8 +767,11 @@ srmmu_fault:
mov 0x400, %l5
mov 0x300, %l4
- lda [%l5] ASI_M_MMUREGS, %l6 ! read sfar first
- lda [%l4] ASI_M_MMUREGS, %l5 ! read sfsr last
+LEON_PI(lda [%l5] ASI_LEON_MMUREGS, %l6) ! read sfar first
+SUN_PI_(lda [%l5] ASI_M_MMUREGS, %l6) ! read sfar first
+
+LEON_PI(lda [%l4] ASI_LEON_MMUREGS, %l5) ! read sfsr last
+SUN_PI_(lda [%l4] ASI_M_MMUREGS, %l5) ! read sfsr last
andn %l6, 0xfff, %l6
srl %l5, 6, %l5 ! and encode all info into l7
diff --git a/arch/sparc/kernel/etrap_32.S b/arch/sparc/kernel/etrap_32.S
index 84b5f0d2afde..e3e80d65e39a 100644
--- a/arch/sparc/kernel/etrap_32.S
+++ b/arch/sparc/kernel/etrap_32.S
@@ -234,7 +234,8 @@ tsetup_srmmu_stackchk:
cmp %glob_tmp, %sp
bleu,a 1f
- lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control
+LEON_PI( lda [%g0] ASI_LEON_MMUREGS, %glob_tmp) ! read MMU control
+SUN_PI_( lda [%g0] ASI_M_MMUREGS, %glob_tmp) ! read MMU control
trap_setup_user_stack_is_bolixed:
/* From user/kernel into invalid window w/bad user
@@ -249,18 +250,25 @@ trap_setup_user_stack_is_bolixed:
1:
/* Clear the fault status and turn on the no_fault bit. */
or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
- sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it
+LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) ! set it
+SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) ! set it
/* Dump the registers and cross fingers. */
STORE_WINDOW(sp)
/* Clear the no_fault bit and check the status. */
andn %glob_tmp, 0x2, %glob_tmp
- sta %glob_tmp, [%g0] ASI_M_MMUREGS
+LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS)
+
mov AC_M_SFAR, %glob_tmp
- lda [%glob_tmp] ASI_M_MMUREGS, %g0
+LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0)
+SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0)
+
mov AC_M_SFSR, %glob_tmp
- lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp ! save away status of winstore
+LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)! save away status of winstore
+SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp) ! save away status of winstore
+
andcc %glob_tmp, 0x2, %g0 ! did we fault?
bne trap_setup_user_stack_is_bolixed ! failure
nop
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index a0f5c20e4b9c..afeb1d770303 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -30,10 +30,6 @@
* the cpu-type
*/
.align 4
-cputyp:
- .word 1
-
- .align 4
.globl cputypval
cputypval:
.asciz "sun4m"
@@ -46,8 +42,8 @@ cputypvar:
.align 4
-sun4c_notsup:
- .asciz "Sparc-Linux sun4/sun4c support does no longer exist.\n\n"
+notsup:
+ .asciz "Sparc-Linux sun4/sun4c or MMU-less not supported\n\n"
.align 4
sun4e_notsup:
@@ -123,7 +119,7 @@ current_pc:
tst %o0
be no_sun4u_here
mov %g4, %o7 /* Previous %o7. */
-
+
mov %o0, %l0 ! stash away romvec
mov %o0, %g7 ! put it here too
mov %o1, %l1 ! stash away debug_vec too
@@ -132,7 +128,7 @@ current_pc:
set current_pc, %g5
cmp %g3, %g5
be already_mapped
- nop
+ nop
/* %l6 will hold the offset we have to subtract
* from absolute symbols in order to access areas
@@ -192,9 +188,9 @@ copy_prom_done:
bne not_a_sun4
nop
-halt_sun4_or_sun4c:
+halt_notsup:
ld [%g7 + 0x68], %o1
- set sun4c_notsup, %o0
+ set notsup, %o0
sub %o0, %l6, %o0
call %o1
nop
@@ -202,18 +198,31 @@ halt_sun4_or_sun4c:
nop
not_a_sun4:
+ /* It looks like this is a machine we support.
+ * Now find out what MMU we are dealing with
+ * LEON - identified by the psr.impl field
+ * Viking - identified by the psr.impl field
+ * In all other cases a sun4m srmmu.
+ * We check that the MMU is enabled in all cases.
+ */
+
+ /* Check if this is a LEON CPU */
+ rd %psr, %g3
+ srl %g3, PSR_IMPL_SHIFT, %g3
+ and %g3, PSR_IMPL_SHIFTED_MASK, %g3
+ cmp %g3, PSR_IMPL_LEON
+ be leon_remap /* It is a LEON - jump */
+ nop
+
+ /* Sanity-check, is MMU enabled */
lda [%g0] ASI_M_MMUREGS, %g1
andcc %g1, 1, %g0
- be halt_sun4_or_sun4c
+ be halt_notsup
nop
-srmmu_remap:
- /* First, check for a viking (TI) module. */
- set 0x40000000, %g2
- rd %psr, %g3
- and %g2, %g3, %g3
- subcc %g3, 0x0, %g0
- bz srmmu_nviking
+ /* Check for a viking (TI) module. */
+ cmp %g3, PSR_IMPL_TI
+ bne srmmu_not_viking
nop
/* Figure out what kind of viking we are on.
@@ -228,14 +237,14 @@ srmmu_remap:
lda [%g0] ASI_M_MMUREGS, %g3 ! peek in the control reg
and %g2, %g3, %g3
subcc %g3, 0x0, %g0
- bnz srmmu_nviking ! is in mbus mode
+ bnz srmmu_not_viking ! is in mbus mode
nop
-
+
rd %psr, %g3 ! DO NOT TOUCH %g3
andn %g3, PSR_ET, %g2
wr %g2, 0x0, %psr
WRITE_PAUSE
-
+
/* Get context table pointer, then convert to
* a physical address, which is 36 bits.
*/
@@ -258,12 +267,12 @@ srmmu_remap:
lda [%g4] ASI_M_BYPASS, %o1 ! This is a level 1 ptr
srl %o1, 0x4, %o1 ! Clear low 4 bits
sll %o1, 0x8, %o1 ! Make physical
-
+
/* Ok, pull in the PTD. */
lda [%o1] ASI_M_BYPASS, %o2 ! This is the 0x0 16MB pgd
/* Calculate to KERNBASE entry. */
- add %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3
+ add %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3
/* Poke the entry into the calculated address. */
sta %o2, [%o3] ASI_M_BYPASS
@@ -293,12 +302,12 @@ srmmu_remap:
b go_to_highmem
nop
+srmmu_not_viking:
/* This works on viking's in Mbus mode and all
* other MBUS modules. It is virtually the same as
* the above madness sans turning traps off and flipping
* the AC bit.
*/
-srmmu_nviking:
set AC_M_CTPR, %g1
lda [%g1] ASI_M_MMUREGS, %g1 ! get ctx table ptr
sll %g1, 0x4, %g1 ! make physical addr
@@ -313,6 +322,29 @@ srmmu_nviking:
nop ! wheee....
+leon_remap:
+ /* Sanity-check, is MMU enabled */
+ lda [%g0] ASI_LEON_MMUREGS, %g1
+ andcc %g1, 1, %g0
+ be halt_notsup
+ nop
+
+ /* Same code as in the srmmu_not_viking case,
+ * with the LEON ASI for mmuregs
+ */
+ set AC_M_CTPR, %g1
+ lda [%g1] ASI_LEON_MMUREGS, %g1 ! get ctx table ptr
+ sll %g1, 0x4, %g1 ! make physical addr
+ lda [%g1] ASI_M_BYPASS, %g1 ! ptr to level 1 pg_table
+ srl %g1, 0x4, %g1
+ sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
+
+ lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
+ add %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3
+ sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
+ b go_to_highmem
+ nop ! wheee....
+
/* Now do a non-relative jump so that PC is in high-memory */
go_to_highmem:
set execute_in_high_mem, %g1
@@ -336,8 +368,9 @@ execute_in_high_mem:
sethi %hi(linux_dbvec), %g1
st %o1, [%g1 + %lo(linux_dbvec)]
-/* Get the machine type via the mysterious romvec node operations. */
-
+ /* Get the machine type via the romvec
+ * getprops node operation
+ */
add %g7, 0x1c, %l1
ld [%l1], %l0
ld [%l0], %l0
@@ -356,9 +389,42 @@ execute_in_high_mem:
! to a buf where above string
! will get stored by the prom.
-#ifdef CONFIG_SPARC_LEON
- /* no cpu-type check is needed, it is a SPARC-LEON */
+ /* Check value of "compatible" property.
+ * "value" => "model"
+ * leon => sparc_leon
+ * sun4m => sun4m
+ * sun4s => sun4m
+ * sun4d => sun4d
+ * sun4e => "no_sun4e_here"
+ * '*' => "no_sun4u_here"
+ * Check single letters only
+ */
+
+ set cputypval, %o2
+ /* If cputypval[0] == 'l' (lower case letter L) this is leon */
+ ldub [%o2], %l1
+ cmp %l1, 'l'
+ be leon_init
+ nop
+
+ /* Check cputypval[4] to find the sun model */
+ ldub [%o2 + 0x4], %l1
+
+ cmp %l1, 'm'
+ be sun4m_init
+ cmp %l1, 's'
+ be sun4m_init
+ cmp %l1, 'd'
+ be sun4d_init
+ cmp %l1, 'e'
+ be no_sun4e_here ! Could be a sun4e.
+ nop
+ b no_sun4u_here ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
+ nop
+
+leon_init:
+ /* LEON CPU - set boot_cpu_id */
sethi %hi(boot_cpu_id), %g2 ! boot-cpu index
#ifdef CONFIG_SMP
@@ -376,26 +442,6 @@ execute_in_high_mem:
ba continue_boot
nop
-#endif
-
-/* Check to cputype. We may be booted on a sun4u (64 bit box),
- * and sun4d needs special treatment.
- */
-
- set cputypval, %o2
- ldub [%o2 + 0x4], %l1
-
- cmp %l1, 'm'
- be sun4m_init
- cmp %l1, 's'
- be sun4m_init
- cmp %l1, 'd'
- be sun4d_init
- cmp %l1, 'e'
- be no_sun4e_here ! Could be a sun4e.
- nop
- b no_sun4u_here ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
- nop
/* CPUID in bootbus can be found at PA 0xff0140000 */
#define SUN4D_BOOTBUS_CPUID 0xf0140000
@@ -431,9 +477,9 @@ sun4m_init:
/* This sucks, apparently this makes Vikings call prom panic, will fix later */
2:
rd %psr, %o1
- srl %o1, 28, %o1 ! Get a type of the CPU
+ srl %o1, PSR_IMPL_SHIFT, %o1 ! Get a type of the CPU
- subcc %o1, 4, %g0 ! TI: Viking or MicroSPARC
+ subcc %o1, PSR_IMPL_TI, %g0 ! TI: Viking or MicroSPARC
be continue_boot
nop
@@ -459,10 +505,6 @@ continue_boot:
/* Aieee, now set PC and nPC, enable traps, give ourselves a stack and it's
* show-time!
*/
-
- sethi %hi(cputyp), %o0
- st %g4, [%o0 + %lo(cputyp)]
-
/* Turn on Supervisor, EnableFloating, and all the PIL bits.
* Also puts us in register window zero with traps off.
*/
@@ -480,7 +522,7 @@ continue_boot:
set __bss_start , %o0 ! First address of BSS
set _end , %o1 ! Last address of BSS
add %o0, 0x1, %o0
-1:
+1:
stb %g0, [%o0]
subcc %o0, %o1, %g0
bl 1b
@@ -546,7 +588,7 @@ continue_boot:
set dest, %g2; \
ld [%g5], %g4; \
st %g4, [%g2];
-
+
/* Patch for window spills... */
PATCH_INSN(spnwin_patch1_7win, spnwin_patch1)
PATCH_INSN(spnwin_patch2_7win, spnwin_patch2)
@@ -597,7 +639,7 @@ continue_boot:
st %g4, [%g5 + 0x18]
st %g4, [%g5 + 0x1c]
-2:
+2:
sethi %hi(nwindows), %g4
st %g3, [%g4 + %lo(nwindows)] ! store final value
sub %g3, 0x1, %g3
@@ -617,18 +659,12 @@ continue_boot:
wr %g3, PSR_ET, %psr
WRITE_PAUSE
- /* First we call prom_init() to set up PROMLIB, then
- * off to start_kernel().
- */
-
+ /* Call sparc32_start_kernel(struct linux_romvec *rp) */
sethi %hi(prom_vector_p), %g5
ld [%g5 + %lo(prom_vector_p)], %o0
- call prom_init
+ call sparc32_start_kernel
nop
- call start_kernel
- nop
-
/* We should not get here. */
call halt_me
nop
@@ -659,7 +695,7 @@ sun4u_5:
.asciz "write"
.align 4
sun4u_6:
- .asciz "\n\rOn sun4u you have to use UltraLinux (64bit) kernel\n\rand not a 32bit sun4[cdem] version\n\r\n\r"
+ .asciz "\n\rOn sun4u you have to use sparc64 kernel\n\rand not a sparc32 version\n\r\n\r"
sun4u_6e:
.align 4
sun4u_7:
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index a2846f5e32d8..0f094db918c7 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -55,17 +55,13 @@ const struct sparc32_dma_ops *sparc32_dma_ops;
/* This function must make sure that caches and memory are coherent after DMA
* On LEON systems without cache snooping it flushes the entire D-CACHE.
*/
-#ifndef CONFIG_SPARC_LEON
static inline void dma_make_coherent(unsigned long pa, unsigned long len)
{
+ if (sparc_cpu_model == sparc_leon) {
+ if (!sparc_leon3_snooping_enabled())
+ leon_flush_dcache_all();
+ }
}
-#else
-static inline void dma_make_coherent(unsigned long pa, unsigned long len)
-{
- if (!sparc_leon3_snooping_enabled())
- leon_flush_dcache_all();
-}
-#endif
static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
@@ -427,9 +423,6 @@ arch_initcall(sparc_register_ioport);
#endif /* CONFIG_SBUS */
-/* LEON reuses PCI DMA ops */
-#if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON)
-
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices.
*/
@@ -657,14 +650,11 @@ struct dma_map_ops pci32_dma_ops = {
};
EXPORT_SYMBOL(pci32_dma_ops);
-#endif /* CONFIG_PCI || CONFIG_SPARC_LEON */
+/* leon re-uses pci32_dma_ops */
+struct dma_map_ops *leon_dma_ops = &pci32_dma_ops;
+EXPORT_SYMBOL(leon_dma_ops);
-#ifdef CONFIG_SPARC_LEON
-struct dma_map_ops *dma_ops = &pci32_dma_ops;
-#elif defined(CONFIG_SBUS)
struct dma_map_ops *dma_ops = &sbus_dma_ops;
-#endif
-
EXPORT_SYMBOL(dma_ops);
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index ae04914f7774..c145f6fd123b 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -241,9 +241,6 @@ int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler)
unsigned int cpu_irq;
int err;
-#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
- struct tt_entry *trap_table;
-#endif
err = request_irq(irq, irq_handler, 0, "floppy", NULL);
if (err)
@@ -264,13 +261,18 @@ int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler)
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
INSTANTIATE(sparc_ttable)
-#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
- trap_table = &trapbase_cpu1;
- INSTANTIATE(trap_table)
- trap_table = &trapbase_cpu2;
- INSTANTIATE(trap_table)
- trap_table = &trapbase_cpu3;
- INSTANTIATE(trap_table)
+
+#if defined CONFIG_SMP
+ if (sparc_cpu_model != sparc_leon) {
+ struct tt_entry *trap_table;
+
+ trap_table = &trapbase_cpu1;
+ INSTANTIATE(trap_table)
+ trap_table = &trapbase_cpu2;
+ INSTANTIATE(trap_table)
+ trap_table = &trapbase_cpu3;
+ INSTANTIATE(trap_table)
+ }
#endif
#undef INSTANTIATE
/*
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index a86372d34587..291bb5de9ce0 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -26,6 +26,9 @@ static inline unsigned long kimage_addr_to_ra(const char *p)
#endif
#ifdef CONFIG_SPARC32
+/* setup_32.c */
+void sparc32_start_kernel(struct linux_romvec *rp);
+
/* cpu.c */
extern void cpu_probe(void);
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 77c1b916e4dd..e34e2c40c060 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -23,6 +23,7 @@
#include <asm/smp.h>
#include <asm/setup.h>
+#include "kernel.h"
#include "prom.h"
#include "irq.h"
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
index 519ca923f59f..4e174321097d 100644
--- a/arch/sparc/kernel/leon_pmc.c
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -7,6 +7,7 @@
#include <linux/pm.h>
#include <asm/leon_amba.h>
+#include <asm/cpu_type.h>
#include <asm/leon.h>
/* List of Systems that need fixup instructions around power-down instruction */
@@ -65,13 +66,15 @@ void pmc_leon_idle(void)
/* Install LEON Power Down function */
static int __init leon_pmc_install(void)
{
- /* Assign power management IDLE handler */
- if (pmc_leon_need_fixup())
- pm_idle = pmc_leon_idle_fixup;
- else
- pm_idle = pmc_leon_idle;
+ if (sparc_cpu_model == sparc_leon) {
+ /* Assign power management IDLE handler */
+ if (pmc_leon_need_fixup())
+ pm_idle = pmc_leon_idle_fixup;
+ else
+ pm_idle = pmc_leon_idle;
- printk(KERN_INFO "leon: power management initialized\n");
+ printk(KERN_INFO "leon: power management initialized\n");
+ }
return 0;
}
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index a469090faf9f..0f3fb6d9c8ef 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -48,15 +48,13 @@
#include "kernel.h"
-#ifdef CONFIG_SPARC_LEON
-
#include "irq.h"
extern ctxd_t *srmmu_ctx_table_phys;
static int smp_processors_ready;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern cpumask_t smp_commenced_mask;
-void __init leon_configure_cache_smp(void);
+void __cpuinit leon_configure_cache_smp(void);
static void leon_ipi_init(void);
/* IRQ number of LEON IPIs */
@@ -123,7 +121,7 @@ void __cpuinit leon_callin(void)
extern struct linux_prom_registers smp_penguin_ctable;
-void __init leon_configure_cache_smp(void)
+void __cpuinit leon_configure_cache_smp(void)
{
unsigned long cfg = sparc_leon3_get_dcachecfg();
int me = smp_processor_id();
@@ -507,5 +505,3 @@ void __init leon_init_smp(void)
sparc32_ipi_ops = &leon_ipi_ops;
}
-
-#endif /* CONFIG_SPARC_LEON */
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index fe6787cc62fc..cb36e82dcd5d 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -65,50 +65,25 @@ extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
struct task_struct *last_task_used_math = NULL;
struct thread_info *current_set[NR_CPUS];
-#ifndef CONFIG_SMP
-
/*
* the idle loop on a Sparc... ;)
*/
void cpu_idle(void)
{
- /* endless idle loop with no priority at all */
- for (;;) {
- if (pm_idle) {
- while (!need_resched())
- (*pm_idle)();
- } else {
- while (!need_resched())
- cpu_relax();
- }
- schedule_preempt_disabled();
- }
-}
-
-#else
+ set_thread_flag(TIF_POLLING_NRFLAG);
-/* This is being executed in task 0 'user space'. */
-void cpu_idle(void)
-{
- set_thread_flag(TIF_POLLING_NRFLAG);
/* endless idle loop with no priority at all */
- while(1) {
-#ifdef CONFIG_SPARC_LEON
- if (pm_idle) {
- while (!need_resched())
+ for (;;) {
+ while (!need_resched()) {
+ if (pm_idle)
(*pm_idle)();
- } else
-#endif
- {
- while (!need_resched())
+ else
cpu_relax();
}
schedule_preempt_disabled();
}
}
-#endif
-
/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
void machine_halt(void)
{
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 741df916c124..1303021748c8 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -23,7 +23,6 @@
#include <linux/of_pdt.h>
#include <asm/prom.h>
#include <asm/oplib.h>
-#include <asm/leon.h>
#include "prom.h"
diff --git a/arch/sparc/kernel/rtrap_32.S b/arch/sparc/kernel/rtrap_32.S
index 7abc24e2bf1a..6c34de0c2abd 100644
--- a/arch/sparc/kernel/rtrap_32.S
+++ b/arch/sparc/kernel/rtrap_32.S
@@ -231,11 +231,14 @@ srmmu_rett_stackchk:
cmp %g1, %fp
bleu ret_trap_user_stack_is_bolixed
mov AC_M_SFSR, %g1
- lda [%g1] ASI_M_MMUREGS, %g0
+LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g0)
+SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g0)
- lda [%g0] ASI_M_MMUREGS, %g1
+LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %g1)
+SUN_PI_(lda [%g0] ASI_M_MMUREGS, %g1)
or %g1, 0x2, %g1
- sta %g1, [%g0] ASI_M_MMUREGS
+LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS)
restore %g0, %g0, %g0
@@ -244,13 +247,16 @@ srmmu_rett_stackchk:
save %g0, %g0, %g0
andn %g1, 0x2, %g1
- sta %g1, [%g0] ASI_M_MMUREGS
+LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS)
mov AC_M_SFAR, %g2
- lda [%g2] ASI_M_MMUREGS, %g2
+LEON_PI(lda [%g2] ASI_LEON_MMUREGS, %g2)
+SUN_PI_(lda [%g2] ASI_M_MMUREGS, %g2)
mov AC_M_SFSR, %g1
- lda [%g1] ASI_M_MMUREGS, %g1
+LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g1)
+SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g1)
andcc %g1, 0x2, %g0
be ret_trap_userwins_ok
nop
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index c052313f4dc5..efe3e64bba38 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -32,6 +32,7 @@
#include <linux/cpu.h>
#include <linux/kdebug.h>
#include <linux/export.h>
+#include <linux/start_kernel.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -45,6 +46,7 @@
#include <asm/cpudata.h>
#include <asm/setup.h>
#include <asm/cacheflush.h>
+#include <asm/sections.h>
#include "kernel.h"
@@ -237,28 +239,42 @@ static void __init per_cpu_patch(void)
}
}
+struct leon_1insn_patch_entry {
+ unsigned int addr;
+ unsigned int insn;
+};
+
enum sparc_cpu sparc_cpu_model;
EXPORT_SYMBOL(sparc_cpu_model);
-struct tt_entry *sparc_ttable;
+static __init void leon_patch(void)
+{
+ struct leon_1insn_patch_entry *start = (void *)__leon_1insn_patch;
+ struct leon_1insn_patch_entry *end = (void *)__leon_1insn_patch_end;
-struct pt_regs fake_swapper_regs;
+ /* Default instruction is leon - no patching */
+ if (sparc_cpu_model == sparc_leon)
+ return;
-void __init setup_arch(char **cmdline_p)
-{
- int i;
- unsigned long highest_paddr;
+ while (start < end) {
+ unsigned long addr = start->addr;
- sparc_ttable = (struct tt_entry *) &trapbase;
+ *(unsigned int *)(addr) = start->insn;
+ flushi(addr);
- /* Initialize PROM console and command line. */
- *cmdline_p = prom_getbootargs();
- strcpy(boot_command_line, *cmdline_p);
- parse_early_param();
+ start++;
+ }
+}
- boot_flags_init(*cmdline_p);
+struct tt_entry *sparc_ttable;
+struct pt_regs fake_swapper_regs;
- register_console(&prom_early_console);
+/* Called from head_32.S - before we have setup anything
+ * in the kernel. Be very careful with what you do here.
+ */
+void __init sparc32_start_kernel(struct linux_romvec *rp)
+{
+ prom_init(rp);
/* Set sparc_cpu_model */
sparc_cpu_model = sun_unknown;
@@ -275,6 +291,26 @@ void __init setup_arch(char **cmdline_p)
if (!strncmp(&cputypval[0], "leon" , 4))
sparc_cpu_model = sparc_leon;
+ leon_patch();
+ start_kernel();
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ int i;
+ unsigned long highest_paddr;
+
+ sparc_ttable = (struct tt_entry *) &trapbase;
+
+ /* Initialize PROM console and command line. */
+ *cmdline_p = prom_getbootargs();
+ strcpy(boot_command_line, *cmdline_p);
+ parse_early_param();
+
+ boot_flags_init(*cmdline_p);
+
+ register_console(&prom_early_console);
+
printk("ARCH: ");
switch(sparc_cpu_model) {
case sun4m:
diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S
index 7364ddc9e5aa..af27acab4486 100644
--- a/arch/sparc/kernel/trampoline_32.S
+++ b/arch/sparc/kernel/trampoline_32.S
@@ -149,8 +149,6 @@ sun4d_cpu_startup:
b,a smp_do_cpu_idle
-#ifdef CONFIG_SPARC_LEON
-
__CPUINIT
.align 4
.global leon_smp_cpu_startup, smp_penguin_ctable
@@ -161,7 +159,7 @@ leon_smp_cpu_startup:
ld [%g1+4],%g1
srl %g1,4,%g1
set 0x00000100,%g5 /* SRMMU_CTXTBL_PTR */
- sta %g1, [%g5] ASI_M_MMUREGS
+ sta %g1, [%g5] ASI_LEON_MMUREGS
/* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
set (PSR_PIL | PSR_S | PSR_PS), %g1
@@ -207,5 +205,3 @@ leon_smp_cpu_startup:
nop
b,a smp_do_cpu_idle
-
-#endif
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index c72fdf55e1c1..3b05e6697710 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2054,7 +2054,7 @@ void do_fpieee(struct pt_regs *regs)
do_fpe_common(regs);
}
-extern int do_mathemu(struct pt_regs *, struct fpustate *);
+extern int do_mathemu(struct pt_regs *, struct fpustate *, bool);
void do_fpother(struct pt_regs *regs)
{
@@ -2068,7 +2068,7 @@ void do_fpother(struct pt_regs *regs)
switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
case (2 << 14): /* unfinished_FPop */
case (3 << 14): /* unimplemented_FPop */
- ret = do_mathemu(regs, f);
+ ret = do_mathemu(regs, f, false);
break;
}
if (ret)
@@ -2308,10 +2308,12 @@ void do_illegal_instruction(struct pt_regs *regs)
} else {
struct fpustate *f = FPUSTATE;
- /* XXX maybe verify XFSR bits like
- * XXX do_fpother() does?
+ /* On UltraSPARC T2 and later, FPU insns which
+ * are not implemented in HW signal an illegal
+ * instruction trap and do not set the FP Trap
+ * Trap in the %fsr to unimplemented_FPop.
*/
- if (do_mathemu(regs, f))
+ if (do_mathemu(regs, f, true))
return;
}
}
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 0e1605697b49..89c2c29f154b 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -107,6 +107,11 @@ SECTIONS
*(.sun4v_2insn_patch)
__sun4v_2insn_patch_end = .;
}
+ .leon_1insn_patch : {
+ __leon_1insn_patch = .;
+ *(.leon_1insn_patch)
+ __leon_1insn_patch_end = .;
+ }
.swapper_tsb_phys_patch : {
__swapper_tsb_phys_patch = .;
*(.swapper_tsb_phys_patch)
diff --git a/arch/sparc/kernel/wof.S b/arch/sparc/kernel/wof.S
index 4c2de3cf309b..28a7bc69f82b 100644
--- a/arch/sparc/kernel/wof.S
+++ b/arch/sparc/kernel/wof.S
@@ -332,24 +332,30 @@ spwin_srmmu_stackchk:
mov AC_M_SFSR, %glob_tmp
/* Clear the fault status and turn on the no_fault bit. */
- lda [%glob_tmp] ASI_M_MMUREGS, %g0 ! eat SFSR
+LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0) ! eat SFSR
+SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0) ! eat SFSR
- lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control
+LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %glob_tmp) ! read MMU control
+SUN_PI_(lda [%g0] ASI_M_MMUREGS, %glob_tmp) ! read MMU control
or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
- sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it
+LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) ! set it
+SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) ! set it
/* Dump the registers and cross fingers. */
STORE_WINDOW(sp)
/* Clear the no_fault bit and check the status. */
andn %glob_tmp, 0x2, %glob_tmp
- sta %glob_tmp, [%g0] ASI_M_MMUREGS
+LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS)
mov AC_M_SFAR, %glob_tmp
- lda [%glob_tmp] ASI_M_MMUREGS, %g0
+LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0)
+SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0)
mov AC_M_SFSR, %glob_tmp
- lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp
+LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)
+SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp)
andcc %glob_tmp, 0x2, %g0 ! did we fault?
be,a spwin_finish_up + 0x4 ! cool beans, success
restore %g0, %g0, %g0
diff --git a/arch/sparc/kernel/wuf.S b/arch/sparc/kernel/wuf.S
index 9fde91a249e0..2c21cc59683e 100644
--- a/arch/sparc/kernel/wuf.S
+++ b/arch/sparc/kernel/wuf.S
@@ -254,16 +254,19 @@ srmmu_fwin_stackchk:
mov AC_M_SFSR, %l4
cmp %l5, %sp
bleu fwin_user_stack_is_bolixed
- lda [%l4] ASI_M_MMUREGS, %g0 ! clear fault status
+LEON_PI( lda [%l4] ASI_LEON_MMUREGS, %g0) ! clear fault status
+SUN_PI_( lda [%l4] ASI_M_MMUREGS, %g0) ! clear fault status
/* The technique is, turn off faults on this processor,
* just let the load rip, then check the sfsr to see if
* a fault did occur. Then we turn on fault traps again
* and branch conditionally based upon what happened.
*/
- lda [%g0] ASI_M_MMUREGS, %l5 ! read mmu-ctrl reg
+LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %l5) ! read mmu-ctrl reg
+SUN_PI_(lda [%g0] ASI_M_MMUREGS, %l5) ! read mmu-ctrl reg
or %l5, 0x2, %l5 ! turn on no-fault bit
- sta %l5, [%g0] ASI_M_MMUREGS ! store it
+LEON_PI(sta %l5, [%g0] ASI_LEON_MMUREGS) ! store it
+SUN_PI_(sta %l5, [%g0] ASI_M_MMUREGS) ! store it
/* Cross fingers and go for it. */
LOAD_WINDOW(sp)
@@ -275,18 +278,22 @@ srmmu_fwin_stackchk:
/* LOCATION: Window 'T' */
- lda [%g0] ASI_M_MMUREGS, %twin_tmp1 ! load mmu-ctrl again
- andn %twin_tmp1, 0x2, %twin_tmp1 ! clear no-fault bit
- sta %twin_tmp1, [%g0] ASI_M_MMUREGS ! store it
+LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %twin_tmp1) ! load mmu-ctrl again
+SUN_PI_(lda [%g0] ASI_M_MMUREGS, %twin_tmp1) ! load mmu-ctrl again
+ andn %twin_tmp1, 0x2, %twin_tmp1 ! clear no-fault bit
+LEON_PI(sta %twin_tmp1, [%g0] ASI_LEON_MMUREGS) ! store it
+SUN_PI_(sta %twin_tmp1, [%g0] ASI_M_MMUREGS) ! store it
mov AC_M_SFAR, %twin_tmp2
- lda [%twin_tmp2] ASI_M_MMUREGS, %g0 ! read fault address
+LEON_PI(lda [%twin_tmp2] ASI_LEON_MMUREGS, %g0) ! read fault address
+SUN_PI_(lda [%twin_tmp2] ASI_M_MMUREGS, %g0) ! read fault address
mov AC_M_SFSR, %twin_tmp2
- lda [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2 ! read fault status
- andcc %twin_tmp2, 0x2, %g0 ! did fault occur?
+LEON_PI(lda [%twin_tmp2] ASI_LEON_MMUREGS, %twin_tmp2) ! read fault status
+SUN_PI_(lda [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2) ! read fault status
+ andcc %twin_tmp2, 0x2, %g0 ! did fault occur?
- bne 1f ! yep, cleanup
+ bne 1f ! yep, cleanup
nop
wr %t_psr, 0x0, %psr
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 389628f50a15..dff4096f3dec 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -10,7 +10,6 @@ lib-y += strlen.o
lib-y += checksum_$(BITS).o
lib-$(CONFIG_SPARC32) += blockops.o
lib-y += memscan_$(BITS).o memcmp.o strncmp_$(BITS).o
-lib-y += strncpy_from_user_$(BITS).o strlen_user_$(BITS).o
lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o
lib-$(CONFIG_SPARC32) += copy_user.o locks.o
lib-$(CONFIG_SPARC64) += atomic_64.o
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 2dc30875c8bc..3b31218cafc6 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -15,8 +15,6 @@
/* string functions */
EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(__strlen_user);
-EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(strncmp);
/* mem* functions */
@@ -33,9 +31,6 @@ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__bzero);
-/* Moving data to/from/in userspace. */
-EXPORT_SYMBOL(__strncpy_from_user);
-
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial);
diff --git a/arch/sparc/lib/strlen_user_32.S b/arch/sparc/lib/strlen_user_32.S
deleted file mode 100644
index 8c8a371df3c9..000000000000
--- a/arch/sparc/lib/strlen_user_32.S
+++ /dev/null
@@ -1,109 +0,0 @@
-/* strlen_user.S: Sparc optimized strlen_user code
- *
- * Return length of string in userspace including terminating 0
- * or 0 for error
- *
- * Copyright (C) 1991,1996 Free Software Foundation
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-
-#define LO_MAGIC 0x01010101
-#define HI_MAGIC 0x80808080
-
-10:
- ldub [%o0], %o5
- cmp %o5, 0
- be 1f
- add %o0, 1, %o0
- andcc %o0, 3, %g0
- be 4f
- or %o4, %lo(HI_MAGIC), %o3
-11:
- ldub [%o0], %o5
- cmp %o5, 0
- be 2f
- add %o0, 1, %o0
- andcc %o0, 3, %g0
- be 5f
- sethi %hi(LO_MAGIC), %o4
-12:
- ldub [%o0], %o5
- cmp %o5, 0
- be 3f
- add %o0, 1, %o0
- b 13f
- or %o4, %lo(LO_MAGIC), %o2
-1:
- retl
- mov 1, %o0
-2:
- retl
- mov 2, %o0
-3:
- retl
- mov 3, %o0
-
- .align 4
- .global __strlen_user, __strnlen_user
-__strlen_user:
- sethi %hi(32768), %o1
-__strnlen_user:
- mov %o1, %g1
- mov %o0, %o1
- andcc %o0, 3, %g0
- bne 10b
- sethi %hi(HI_MAGIC), %o4
- or %o4, %lo(HI_MAGIC), %o3
-4:
- sethi %hi(LO_MAGIC), %o4
-5:
- or %o4, %lo(LO_MAGIC), %o2
-13:
- ld [%o0], %o5
-2:
- sub %o5, %o2, %o4
- andcc %o4, %o3, %g0
- bne 82f
- add %o0, 4, %o0
- sub %o0, %o1, %g2
-81: cmp %g2, %g1
- blu 13b
- mov %o0, %o4
- ba,a 1f
-
- /* Check every byte. */
-82: srl %o5, 24, %g5
- andcc %g5, 0xff, %g0
- be 1f
- add %o0, -3, %o4
- srl %o5, 16, %g5
- andcc %g5, 0xff, %g0
- be 1f
- add %o4, 1, %o4
- srl %o5, 8, %g5
- andcc %g5, 0xff, %g0
- be 1f
- add %o4, 1, %o4
- andcc %o5, 0xff, %g0
- bne 81b
- sub %o0, %o1, %g2
-
- add %o4, 1, %o4
-1:
- retl
- sub %o4, %o1, %o0
-
- .section .fixup,#alloc,#execinstr
- .align 4
-9:
- retl
- clr %o0
-
- .section __ex_table,#alloc
- .align 4
-
- .word 10b, 9b
- .word 11b, 9b
- .word 12b, 9b
- .word 13b, 9b
diff --git a/arch/sparc/lib/strlen_user_64.S b/arch/sparc/lib/strlen_user_64.S
deleted file mode 100644
index c3df71fa4928..000000000000
--- a/arch/sparc/lib/strlen_user_64.S
+++ /dev/null
@@ -1,97 +0,0 @@
-/* strlen_user.S: Sparc64 optimized strlen_user code
- *
- * Return length of string in userspace including terminating 0
- * or 0 for error
- *
- * Copyright (C) 1991,1996 Free Software Foundation
- * Copyright (C) 1996,1999 David S. Miller (davem@redhat.com)
- * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-
-#include <linux/linkage.h>
-#include <asm/asi.h>
-
-#define LO_MAGIC 0x01010101
-#define HI_MAGIC 0x80808080
-
- .align 4
-ENTRY(__strlen_user)
- sethi %hi(32768), %o1
-ENTRY(__strnlen_user)
- mov %o1, %g1
- mov %o0, %o1
- andcc %o0, 3, %g0
- be,pt %icc, 9f
- sethi %hi(HI_MAGIC), %o4
-10: lduba [%o0] %asi, %o5
- brz,pn %o5, 21f
- add %o0, 1, %o0
- andcc %o0, 3, %g0
- be,pn %icc, 4f
- or %o4, %lo(HI_MAGIC), %o3
-11: lduba [%o0] %asi, %o5
- brz,pn %o5, 22f
- add %o0, 1, %o0
- andcc %o0, 3, %g0
- be,pt %icc, 13f
- srl %o3, 7, %o2
-12: lduba [%o0] %asi, %o5
- brz,pn %o5, 23f
- add %o0, 1, %o0
- ba,pt %icc, 2f
-15: lda [%o0] %asi, %o5
-9: or %o4, %lo(HI_MAGIC), %o3
-4: srl %o3, 7, %o2
-13: lda [%o0] %asi, %o5
-2: sub %o5, %o2, %o4
- andcc %o4, %o3, %g0
- bne,pn %icc, 82f
- add %o0, 4, %o0
- sub %o0, %o1, %g2
-81: cmp %g2, %g1
- blu,pt %icc, 13b
- mov %o0, %o4
- ba,a,pt %xcc, 1f
-
- /* Check every byte. */
-82: srl %o5, 24, %g7
- andcc %g7, 0xff, %g0
- be,pn %icc, 1f
- add %o0, -3, %o4
- srl %o5, 16, %g7
- andcc %g7, 0xff, %g0
- be,pn %icc, 1f
- add %o4, 1, %o4
- srl %o5, 8, %g7
- andcc %g7, 0xff, %g0
- be,pn %icc, 1f
- add %o4, 1, %o4
- andcc %o5, 0xff, %g0
- bne,pt %icc, 81b
- sub %o0, %o1, %g2
- add %o4, 1, %o4
-1: retl
- sub %o4, %o1, %o0
-21: retl
- mov 1, %o0
-22: retl
- mov 2, %o0
-23: retl
- mov 3, %o0
-ENDPROC(__strlen_user)
-ENDPROC(__strnlen_user)
-
- .section .fixup,#alloc,#execinstr
- .align 4
-30:
- retl
- clr %o0
-
- .section __ex_table,"a"
- .align 4
-
- .word 10b, 30b
- .word 11b, 30b
- .word 12b, 30b
- .word 15b, 30b
- .word 13b, 30b
diff --git a/arch/sparc/lib/strncpy_from_user_32.S b/arch/sparc/lib/strncpy_from_user_32.S
deleted file mode 100644
index db0ed2964bdb..000000000000
--- a/arch/sparc/lib/strncpy_from_user_32.S
+++ /dev/null
@@ -1,47 +0,0 @@
-/* strncpy_from_user.S: Sparc strncpy from userspace.
- *
- * Copyright(C) 1996 David S. Miller
- */
-
-#include <linux/linkage.h>
-#include <asm/ptrace.h>
-#include <asm/errno.h>
-
- .text
-
- /* Must return:
- *
- * -EFAULT for an exception
- * count if we hit the buffer limit
- * bytes copied if we hit a null byte
- */
-
-ENTRY(__strncpy_from_user)
- /* %o0=dest, %o1=src, %o2=count */
- mov %o2, %o3
-1:
- subcc %o2, 1, %o2
- bneg 2f
- nop
-10:
- ldub [%o1], %o4
- add %o0, 1, %o0
- cmp %o4, 0
- add %o1, 1, %o1
- bne 1b
- stb %o4, [%o0 - 1]
-2:
- add %o2, 1, %o0
- retl
- sub %o3, %o0, %o0
-ENDPROC(__strncpy_from_user)
-
- .section .fixup,#alloc,#execinstr
- .align 4
-4:
- retl
- mov -EFAULT, %o0
-
- .section __ex_table,#alloc
- .align 4
- .word 10b, 4b
diff --git a/arch/sparc/lib/strncpy_from_user_64.S b/arch/sparc/lib/strncpy_from_user_64.S
deleted file mode 100644
index d1246b713077..000000000000
--- a/arch/sparc/lib/strncpy_from_user_64.S
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * strncpy_from_user.S: Sparc64 strncpy from userspace.
- *
- * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
- */
-
-#include <linux/linkage.h>
-#include <asm/asi.h>
-#include <asm/errno.h>
-
- .data
- .align 8
-0: .xword 0x0101010101010101
-
- .text
-
- /* Must return:
- *
- * -EFAULT for an exception
- * count if we hit the buffer limit
- * bytes copied if we hit a null byte
- * (without the null byte)
- *
- * This implementation assumes:
- * %o1 is 8 aligned => !(%o2 & 7)
- * %o0 is 8 aligned (if not, it will be slooooow, but will work)
- *
- * This is optimized for the common case:
- * in my stats, 90% of src are 8 aligned (even on sparc32)
- * and average length is 18 or so.
- */
-
-ENTRY(__strncpy_from_user)
- /* %o0=dest, %o1=src, %o2=count */
- andcc %o1, 7, %g0 ! IEU1 Group
- bne,pn %icc, 30f ! CTI
- add %o0, %o2, %g3 ! IEU0
-60: ldxa [%o1] %asi, %g1 ! Load Group
- brlez,pn %o2, 10f ! CTI
- mov %o0, %o3 ! IEU0
-50: sethi %hi(0b), %o4 ! IEU0 Group
- ldx [%o4 + %lo(0b)], %o4 ! Load
- sllx %o4, 7, %o5 ! IEU1 Group
-1: sub %g1, %o4, %g2 ! IEU0 Group
- stx %g1, [%o0] ! Store
- add %o0, 8, %o0 ! IEU1
- andcc %g2, %o5, %g0 ! IEU1 Group
- bne,pn %xcc, 5f ! CTI
- add %o1, 8, %o1 ! IEU0
- cmp %o0, %g3 ! IEU1 Group
- bl,a,pt %xcc, 1b ! CTI
-61: ldxa [%o1] %asi, %g1 ! Load
-10: retl ! CTI Group
- mov %o2, %o0 ! IEU0
-5: srlx %g2, 32, %g7 ! IEU0 Group
- sethi %hi(0xff00), %o4 ! IEU1
- andcc %g7, %o5, %g0 ! IEU1 Group
- be,pn %icc, 2f ! CTI
- or %o4, %lo(0xff00), %o4 ! IEU0
- srlx %g1, 48, %g7 ! IEU0 Group
- andcc %g7, %o4, %g0 ! IEU1 Group
- be,pn %icc, 50f ! CTI
- andcc %g7, 0xff, %g0 ! IEU1 Group
- be,pn %icc, 51f ! CTI
- srlx %g1, 32, %g7 ! IEU0
- andcc %g7, %o4, %g0 ! IEU1 Group
- be,pn %icc, 52f ! CTI
- andcc %g7, 0xff, %g0 ! IEU1 Group
- be,pn %icc, 53f ! CTI
-2: andcc %g2, %o5, %g0 ! IEU1 Group
- be,pn %icc, 2f ! CTI
- srl %g1, 16, %g7 ! IEU0
- andcc %g7, %o4, %g0 ! IEU1 Group
- be,pn %icc, 54f ! CTI
- andcc %g7, 0xff, %g0 ! IEU1 Group
- be,pn %icc, 55f ! CTI
- andcc %g1, %o4, %g0 ! IEU1 Group
- be,pn %icc, 56f ! CTI
- andcc %g1, 0xff, %g0 ! IEU1 Group
- be,a,pn %icc, 57f ! CTI
- sub %o0, %o3, %o0 ! IEU0
-2: cmp %o0, %g3 ! IEU1 Group
- bl,a,pt %xcc, 50b ! CTI
-62: ldxa [%o1] %asi, %g1 ! Load
- retl ! CTI Group
- mov %o2, %o0 ! IEU0
-50: sub %o0, %o3, %o0
- retl
- sub %o0, 8, %o0
-51: sub %o0, %o3, %o0
- retl
- sub %o0, 7, %o0
-52: sub %o0, %o3, %o0
- retl
- sub %o0, 6, %o0
-53: sub %o0, %o3, %o0
- retl
- sub %o0, 5, %o0
-54: sub %o0, %o3, %o0
- retl
- sub %o0, 4, %o0
-55: sub %o0, %o3, %o0
- retl
- sub %o0, 3, %o0
-56: sub %o0, %o3, %o0
- retl
- sub %o0, 2, %o0
-57: retl
- sub %o0, 1, %o0
-30: brlez,pn %o2, 3f
- sub %g0, %o2, %o3
- add %o0, %o2, %o0
-63: lduba [%o1] %asi, %o4
-1: add %o1, 1, %o1
- brz,pn %o4, 2f
- stb %o4, [%o0 + %o3]
- addcc %o3, 1, %o3
- bne,pt %xcc, 1b
-64: lduba [%o1] %asi, %o4
-3: retl
- mov %o2, %o0
-2: retl
- add %o2, %o3, %o0
-ENDPROC(__strncpy_from_user)
-
- .section __ex_table,"a"
- .align 4
- .word 60b, __retl_efault
- .word 61b, __retl_efault
- .word 62b, __retl_efault
- .word 63b, __retl_efault
- .word 64b, __retl_efault
- .previous
diff --git a/arch/sparc/lib/usercopy.c b/arch/sparc/lib/usercopy.c
index 14b363fec8a2..5c4284ce1c03 100644
--- a/arch/sparc/lib/usercopy.c
+++ b/arch/sparc/lib/usercopy.c
@@ -1,4 +1,5 @@
#include <linux/module.h>
+#include <linux/kernel.h>
#include <linux/bug.h>
void copy_from_user_overflow(void)
diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c
index 2bbe2f28ad23..1704068da928 100644
--- a/arch/sparc/math-emu/math_64.c
+++ b/arch/sparc/math-emu/math_64.c
@@ -163,7 +163,7 @@ typedef union {
u64 q[2];
} *argp;
-int do_mathemu(struct pt_regs *regs, struct fpustate *f)
+int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap)
{
unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate;
@@ -218,7 +218,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
case FSQRTS: {
unsigned long x = current_thread_info()->xfsr[0];
- x = (x >> 14) & 0xf;
+ x = (x >> 14) & 0x7;
TYPE(x,1,1,1,1,0,0);
break;
}
@@ -226,7 +226,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
case FSQRTD: {
unsigned long x = current_thread_info()->xfsr[0];
- x = (x >> 14) & 0xf;
+ x = (x >> 14) & 0x7;
TYPE(x,2,1,2,1,0,0);
break;
}
@@ -357,9 +357,17 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
if (type) {
argp rs1 = NULL, rs2 = NULL, rd = NULL;
- freg = (current_thread_info()->xfsr[0] >> 14) & 0xf;
- if (freg != (type >> 9))
- goto err;
+ /* Starting with UltraSPARC-T2, the cpu does not set the FP Trap
+ * Type field in the %fsr to unimplemented_FPop. Nor does it
+ * use the fp_exception_other trap. Instead it signals an
+ * illegal instruction and leaves the FP trap type field of
+ * the %fsr unchanged.
+ */
+ if (!illegal_insn_trap) {
+ int ftt = (current_thread_info()->xfsr[0] >> 14) & 0x7;
+ if (ftt != (type >> 9))
+ goto err;
+ }
current_thread_info()->xfsr[0] &= ~0x1c000;
freg = ((insn >> 14) & 0x1f);
switch (type & 0x3) {
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 69ffd3112fed..30c3eccfdf5a 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -8,8 +8,9 @@ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
obj-y += fault_$(BITS).o
obj-y += init_$(BITS).o
obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o
+obj-$(CONFIG_SPARC32) += srmmu_access.o
obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
-obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
+obj-$(CONFIG_SPARC32) += leon_mm.o
# Only used by sparc64
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index 4c67ae6e5023..5bed085a2c17 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -32,7 +32,7 @@ static inline unsigned long leon_get_ctable_ptr(void)
}
-unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr)
+unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
{
unsigned int ctxtbl;
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 8e97e0305b01..62e3f5773303 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -467,33 +467,6 @@ void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
flush_tlb_all();
}
-/*
- * On the SRMMU we do not have the problems with limited tlb entries
- * for mapping kernel pages, so we just take things from the free page
- * pool. As a side effect we are putting a little too much pressure
- * on the gfp() subsystem. This setup also makes the logic of the
- * iommu mapping code a lot easier as we can transparently handle
- * mappings on the kernel stack without any special code.
- */
-struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
-{
- struct thread_info *ret;
-
- ret = (struct thread_info *)__get_free_pages(GFP_KERNEL,
- THREAD_INFO_ORDER);
-#ifdef CONFIG_DEBUG_STACK_USAGE
- if (ret)
- memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER);
-#endif /* DEBUG_STACK_USAGE */
-
- return ret;
-}
-
-void free_thread_info(struct thread_info *ti)
-{
- free_pages((unsigned long)ti, THREAD_INFO_ORDER);
-}
-
/* tsunami.S */
extern void tsunami_flush_cache_all(void);
extern void tsunami_flush_cache_mm(struct mm_struct *mm);
@@ -673,6 +646,23 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
}
}
+/* These flush types are not available on all chips... */
+static inline unsigned long srmmu_probe(unsigned long vaddr)
+{
+ unsigned long retval;
+
+ if (sparc_cpu_model != sparc_leon) {
+
+ vaddr &= PAGE_MASK;
+ __asm__ __volatile__("lda [%1] %2, %0\n\t" :
+ "=r" (retval) :
+ "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
+ } else {
+ retval = leon_swprobe(vaddr, 0);
+ }
+ return retval;
+}
+
/*
* This is much cleaner than poking around physical address space
* looking at the prom's page table directly which is what most
@@ -692,7 +682,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
break; /* probably wrap around */
if(start == 0xfef00000)
start = KADB_DEBUGGER_BEGVM;
- if(!(prompte = srmmu_hwprobe(start))) {
+ if(!(prompte = srmmu_probe(start))) {
start += PAGE_SIZE;
continue;
}
@@ -701,12 +691,12 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
what = 0;
if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
- if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
+ if(srmmu_probe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
what = 1;
}
if(!(start & ~(SRMMU_PGDIR_MASK))) {
- if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
+ if(srmmu_probe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
prompte)
what = 2;
}
@@ -1183,7 +1173,7 @@ static void turbosparc_flush_page_to_ram(unsigned long page)
#ifdef TURBOSPARC_WRITEBACK
volatile unsigned long clear;
- if (srmmu_hwprobe(page))
+ if (srmmu_probe(page))
turbosparc_flush_page_cache(page);
clear = srmmu_get_fstatus();
#endif
diff --git a/arch/sparc/mm/srmmu_access.S b/arch/sparc/mm/srmmu_access.S
new file mode 100644
index 000000000000..d0a67b2c2383
--- /dev/null
+++ b/arch/sparc/mm/srmmu_access.S
@@ -0,0 +1,82 @@
+/* Assembler variants of srmmu access functions.
+ * Implemented in assembler to allow run-time patching.
+ * LEON uses a different ASI for MMUREGS than SUN.
+ *
+ * The leon_1insn_patch infrastructure is used
+ * for the run-time patching.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/asmmacro.h>
+#include <asm/pgtsrmmu.h>
+#include <asm/asi.h>
+
+/* unsigned int srmmu_get_mmureg(void) */
+ENTRY(srmmu_get_mmureg)
+LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda [%g0] ASI_M_MMUREGS, %o0)
+ retl
+ nop
+ENDPROC(srmmu_get_mmureg)
+
+/* void srmmu_set_mmureg(unsigned long regval) */
+ENTRY(srmmu_set_mmureg)
+LEON_PI(sta %o0, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta %o0, [%g0] ASI_M_MMUREGS)
+ retl
+ nop
+ENDPROC(srmmu_set_mmureg)
+
+/* void srmmu_set_ctable_ptr(unsigned long paddr) */
+ENTRY(srmmu_set_ctable_ptr)
+ /* paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); */
+ srl %o0, 4, %g1
+ and %g1, SRMMU_CTX_PMASK, %g1
+
+ mov SRMMU_CTXTBL_PTR, %g2
+LEON_PI(sta %g1, [%g2] ASI_LEON_MMUREGS)
+SUN_PI_(sta %g1, [%g2] ASI_M_MMUREGS)
+ retl
+ nop
+ENDPROC(srmmu_set_ctable_ptr)
+
+
+/* void srmmu_set_context(int context) */
+ENTRY(srmmu_set_context)
+ mov SRMMU_CTX_REG, %g1
+LEON_PI(sta %o0, [%g1] ASI_LEON_MMUREGS)
+SUN_PI_(sta %o0, [%g1] ASI_M_MMUREGS)
+ retl
+ nop
+ENDPROC(srmmu_set_context)
+
+
+/* int srmmu_get_context(void) */
+ENTRY(srmmu_get_context)
+ mov SRMMU_CTX_REG, %o0
+LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0)
+ retl
+ nop
+ENDPROC(srmmu_get_context)
+
+
+/* unsigned int srmmu_get_fstatus(void) */
+ENTRY(srmmu_get_fstatus)
+ mov SRMMU_FAULT_STATUS, %o0
+LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0)
+ retl
+ nop
+ENDPROC(srmmu_get_fstatus)
+
+
+/* unsigned int srmmu_get_faddr(void) */
+ENTRY(srmmu_get_faddr)
+ mov SRMMU_FAULT_ADDR, %o0
+LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0)
+ retl
+ nop
+ENDPROC(srmmu_get_faddr)
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 74239dd77e06..fe128816c448 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -14,6 +14,7 @@ config TILE
select HAVE_SYSCALL_WRAPPERS if TILEGX
select SYS_HYPERVISOR
select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ select GENERIC_CLOCKEVENTS
# FIXME: investigate whether we need/want these options.
# select HAVE_IOREMAP_PROT
@@ -47,7 +48,12 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
config SYS_SUPPORTS_HUGETLBFS
def_bool y
-config GENERIC_CLOCKEVENTS
+# Support for additional huge page sizes besides HPAGE_SIZE.
+# The software support is currently only present in the TILE-Gx
+# hypervisor. TILEPro in any case does not support page sizes
+# larger than the default HPAGE_SIZE.
+config HUGETLB_SUPER_PAGES
+ depends on HUGETLB_PAGE && TILEGX
def_bool y
# FIXME: tilegx can implement a more efficient rwsem.
@@ -109,16 +115,14 @@ config HVC_TILE
select HVC_DRIVER
def_bool y
-# Please note: TILE-Gx support is not yet finalized; this is
-# the preliminary support. TILE-Gx drivers are only provided
-# with the alpha or beta test versions for Tilera customers.
config TILEGX
- depends on EXPERIMENTAL
bool "Building with TILE-Gx (64-bit) compiler and toolchain"
+config TILEPRO
+ def_bool !TILEGX
+
config 64BIT
- depends on TILEGX
- def_bool y
+ def_bool TILEGX
config ARCH_DEFCONFIG
string
@@ -139,7 +143,30 @@ config NR_CPUS
smaller kernel memory footprint results from using a smaller
value on chips with fewer tiles.
-source "kernel/time/Kconfig"
+if TILEGX
+
+choice
+ prompt "Kernel page size"
+ default PAGE_SIZE_64KB
+ help
+ This lets you select the page size of the kernel. For best
+ performance on memory-intensive applications, a page size of 64KB
+ is recommended. For workloads involving many small files, many
+ connections, etc., it may be better to select 16KB, which uses
+ memory more efficiently at some cost in TLB performance.
+
+ Note that this option is TILE-Gx specific; currently
+ TILEPro page size is set by rebuilding the hypervisor.
+
+config PAGE_SIZE_16KB
+ bool "16KB"
+
+config PAGE_SIZE_64KB
+ bool "64KB"
+
+endchoice
+
+endif
source "kernel/Kconfig.hz"
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index 9520bc5a4b7f..e20b0a0b64a1 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -34,7 +34,12 @@ LIBGCC_PATH := \
$(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
# Provide the path to use for "make defconfig".
-KBUILD_DEFCONFIG := $(ARCH)_defconfig
+# We default to the newer TILE-Gx architecture if only "tile" is given.
+ifeq ($(ARCH),tile)
+ KBUILD_DEFCONFIG := tilegx_defconfig
+else
+ KBUILD_DEFCONFIG := $(ARCH)_defconfig
+endif
# Used as a file extension when useful, e.g. head_$(BITS).o
# Not needed for (e.g.) "$(CC) -m32" since the compiler automatically
diff --git a/arch/tile/include/arch/spr_def_32.h b/arch/tile/include/arch/spr_def_32.h
index bbc1f4c924ee..78bbce2fb19a 100644
--- a/arch/tile/include/arch/spr_def_32.h
+++ b/arch/tile/include/arch/spr_def_32.h
@@ -65,6 +65,31 @@
#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
#define SPR_FAIL 0x4e09
+#define SPR_IDN_AVAIL_EN 0x3e05
+#define SPR_IDN_CA_DATA 0x0b00
+#define SPR_IDN_DATA_AVAIL 0x0b03
+#define SPR_IDN_DEADLOCK_TIMEOUT 0x3406
+#define SPR_IDN_DEMUX_CA_COUNT 0x0a05
+#define SPR_IDN_DEMUX_COUNT_0 0x0a06
+#define SPR_IDN_DEMUX_COUNT_1 0x0a07
+#define SPR_IDN_DEMUX_CTL 0x0a08
+#define SPR_IDN_DEMUX_QUEUE_SEL 0x0a0a
+#define SPR_IDN_DEMUX_STATUS 0x0a0b
+#define SPR_IDN_DEMUX_WRITE_FIFO 0x0a0c
+#define SPR_IDN_DIRECTION_PROTECT 0x2e05
+#define SPR_IDN_PENDING 0x0a0e
+#define SPR_IDN_REFILL_EN 0x0e05
+#define SPR_IDN_SP_FIFO_DATA 0x0a0f
+#define SPR_IDN_SP_FIFO_SEL 0x0a10
+#define SPR_IDN_SP_FREEZE 0x0a11
+#define SPR_IDN_SP_FREEZE__SP_FRZ_MASK 0x1
+#define SPR_IDN_SP_FREEZE__DEMUX_FRZ_MASK 0x2
+#define SPR_IDN_SP_FREEZE__NON_DEST_EXT_MASK 0x4
+#define SPR_IDN_SP_STATE 0x0a12
+#define SPR_IDN_TAG_0 0x0a13
+#define SPR_IDN_TAG_1 0x0a14
+#define SPR_IDN_TAG_VALID 0x0a15
+#define SPR_IDN_TILE_COORD 0x0a16
#define SPR_INTCTRL_0_STATUS 0x4a07
#define SPR_INTCTRL_1_STATUS 0x4807
#define SPR_INTCTRL_2_STATUS 0x4607
@@ -87,12 +112,36 @@
#define SPR_INTERRUPT_MASK_SET_1_1 0x480e
#define SPR_INTERRUPT_MASK_SET_2_0 0x460c
#define SPR_INTERRUPT_MASK_SET_2_1 0x460d
+#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x6000
+#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x6001
+#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x6002
#define SPR_MPL_DMA_CPL_SET_0 0x5800
#define SPR_MPL_DMA_CPL_SET_1 0x5801
#define SPR_MPL_DMA_CPL_SET_2 0x5802
#define SPR_MPL_DMA_NOTIFY_SET_0 0x3800
#define SPR_MPL_DMA_NOTIFY_SET_1 0x3801
#define SPR_MPL_DMA_NOTIFY_SET_2 0x3802
+#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
+#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
+#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
+#define SPR_MPL_IDN_AVAIL_SET_0 0x3e00
+#define SPR_MPL_IDN_AVAIL_SET_1 0x3e01
+#define SPR_MPL_IDN_AVAIL_SET_2 0x3e02
+#define SPR_MPL_IDN_CA_SET_0 0x3a00
+#define SPR_MPL_IDN_CA_SET_1 0x3a01
+#define SPR_MPL_IDN_CA_SET_2 0x3a02
+#define SPR_MPL_IDN_COMPLETE_SET_0 0x1200
+#define SPR_MPL_IDN_COMPLETE_SET_1 0x1201
+#define SPR_MPL_IDN_COMPLETE_SET_2 0x1202
+#define SPR_MPL_IDN_FIREWALL_SET_0 0x2e00
+#define SPR_MPL_IDN_FIREWALL_SET_1 0x2e01
+#define SPR_MPL_IDN_FIREWALL_SET_2 0x2e02
+#define SPR_MPL_IDN_REFILL_SET_0 0x0e00
+#define SPR_MPL_IDN_REFILL_SET_1 0x0e01
+#define SPR_MPL_IDN_REFILL_SET_2 0x0e02
+#define SPR_MPL_IDN_TIMER_SET_0 0x3400
+#define SPR_MPL_IDN_TIMER_SET_1 0x3401
+#define SPR_MPL_IDN_TIMER_SET_2 0x3402
#define SPR_MPL_INTCTRL_0_SET_0 0x4a00
#define SPR_MPL_INTCTRL_0_SET_1 0x4a01
#define SPR_MPL_INTCTRL_0_SET_2 0x4a02
@@ -102,6 +151,9 @@
#define SPR_MPL_INTCTRL_2_SET_0 0x4600
#define SPR_MPL_INTCTRL_2_SET_1 0x4601
#define SPR_MPL_INTCTRL_2_SET_2 0x4602
+#define SPR_MPL_PERF_COUNT_SET_0 0x4200
+#define SPR_MPL_PERF_COUNT_SET_1 0x4201
+#define SPR_MPL_PERF_COUNT_SET_2 0x4202
#define SPR_MPL_SN_ACCESS_SET_0 0x0800
#define SPR_MPL_SN_ACCESS_SET_1 0x0801
#define SPR_MPL_SN_ACCESS_SET_2 0x0802
@@ -181,6 +233,7 @@
#define SPR_UDN_DEMUX_STATUS 0x0c0d
#define SPR_UDN_DEMUX_WRITE_FIFO 0x0c0e
#define SPR_UDN_DIRECTION_PROTECT 0x3005
+#define SPR_UDN_PENDING 0x0c10
#define SPR_UDN_REFILL_EN 0x1005
#define SPR_UDN_SP_FIFO_DATA 0x0c11
#define SPR_UDN_SP_FIFO_SEL 0x0c12
@@ -195,6 +248,9 @@
#define SPR_UDN_TAG_3 0x0c18
#define SPR_UDN_TAG_VALID 0x0c19
#define SPR_UDN_TILE_COORD 0x0c1a
+#define SPR_WATCH_CTL 0x4209
+#define SPR_WATCH_MASK 0x420a
+#define SPR_WATCH_VAL 0x420b
#endif /* !defined(__ARCH_SPR_DEF_H__) */
diff --git a/arch/tile/include/arch/spr_def_64.h b/arch/tile/include/arch/spr_def_64.h
index cd3e5f95d5fd..0da86faa3370 100644
--- a/arch/tile/include/arch/spr_def_64.h
+++ b/arch/tile/include/arch/spr_def_64.h
@@ -52,6 +52,13 @@
#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
#define SPR_FAIL 0x2707
+#define SPR_IDN_AVAIL_EN 0x1a05
+#define SPR_IDN_DATA_AVAIL 0x0a80
+#define SPR_IDN_DEADLOCK_TIMEOUT 0x1806
+#define SPR_IDN_DEMUX_COUNT_0 0x0a05
+#define SPR_IDN_DEMUX_COUNT_1 0x0a06
+#define SPR_IDN_DIRECTION_PROTECT 0x1405
+#define SPR_IDN_PENDING 0x0a08
#define SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK 0x1
#define SPR_INTCTRL_0_STATUS 0x2505
#define SPR_INTCTRL_1_STATUS 0x2405
@@ -88,9 +95,27 @@
#define SPR_IPI_MASK_SET_0 0x1f0a
#define SPR_IPI_MASK_SET_1 0x1e0a
#define SPR_IPI_MASK_SET_2 0x1d0a
+#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x2100
+#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x2101
+#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x2102
#define SPR_MPL_AUX_TILE_TIMER_SET_0 0x1700
#define SPR_MPL_AUX_TILE_TIMER_SET_1 0x1701
#define SPR_MPL_AUX_TILE_TIMER_SET_2 0x1702
+#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
+#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
+#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
+#define SPR_MPL_IDN_AVAIL_SET_0 0x1a00
+#define SPR_MPL_IDN_AVAIL_SET_1 0x1a01
+#define SPR_MPL_IDN_AVAIL_SET_2 0x1a02
+#define SPR_MPL_IDN_COMPLETE_SET_0 0x0500
+#define SPR_MPL_IDN_COMPLETE_SET_1 0x0501
+#define SPR_MPL_IDN_COMPLETE_SET_2 0x0502
+#define SPR_MPL_IDN_FIREWALL_SET_0 0x1400
+#define SPR_MPL_IDN_FIREWALL_SET_1 0x1401
+#define SPR_MPL_IDN_FIREWALL_SET_2 0x1402
+#define SPR_MPL_IDN_TIMER_SET_0 0x1800
+#define SPR_MPL_IDN_TIMER_SET_1 0x1801
+#define SPR_MPL_IDN_TIMER_SET_2 0x1802
#define SPR_MPL_INTCTRL_0_SET_0 0x2500
#define SPR_MPL_INTCTRL_0_SET_1 0x2501
#define SPR_MPL_INTCTRL_0_SET_2 0x2502
@@ -100,6 +125,21 @@
#define SPR_MPL_INTCTRL_2_SET_0 0x2300
#define SPR_MPL_INTCTRL_2_SET_1 0x2301
#define SPR_MPL_INTCTRL_2_SET_2 0x2302
+#define SPR_MPL_IPI_0 0x1f04
+#define SPR_MPL_IPI_0_SET_0 0x1f00
+#define SPR_MPL_IPI_0_SET_1 0x1f01
+#define SPR_MPL_IPI_0_SET_2 0x1f02
+#define SPR_MPL_IPI_1 0x1e04
+#define SPR_MPL_IPI_1_SET_0 0x1e00
+#define SPR_MPL_IPI_1_SET_1 0x1e01
+#define SPR_MPL_IPI_1_SET_2 0x1e02
+#define SPR_MPL_IPI_2 0x1d04
+#define SPR_MPL_IPI_2_SET_0 0x1d00
+#define SPR_MPL_IPI_2_SET_1 0x1d01
+#define SPR_MPL_IPI_2_SET_2 0x1d02
+#define SPR_MPL_PERF_COUNT_SET_0 0x2000
+#define SPR_MPL_PERF_COUNT_SET_1 0x2001
+#define SPR_MPL_PERF_COUNT_SET_2 0x2002
#define SPR_MPL_UDN_ACCESS_SET_0 0x0b00
#define SPR_MPL_UDN_ACCESS_SET_1 0x0b01
#define SPR_MPL_UDN_ACCESS_SET_2 0x0b02
@@ -167,6 +207,9 @@
#define SPR_UDN_DEMUX_COUNT_2 0x0b07
#define SPR_UDN_DEMUX_COUNT_3 0x0b08
#define SPR_UDN_DIRECTION_PROTECT 0x1505
+#define SPR_UDN_PENDING 0x0b0a
+#define SPR_WATCH_MASK 0x200a
+#define SPR_WATCH_VAL 0x200b
#endif /* !defined(__ARCH_SPR_DEF_H__) */
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 0bb42642343a..143473e3a0bb 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -2,6 +2,7 @@ include include/asm-generic/Kbuild.asm
header-y += ../arch/
+header-y += cachectl.h
header-y += ucontext.h
header-y += hardwall.h
@@ -21,7 +22,6 @@ generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += local.h
-generic-y += module.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += param.h
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 54d1da826f93..e7fb5cfb9597 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -303,7 +303,14 @@ void __init_atomic_per_cpu(void);
void __atomic_fault_unlock(int *lock_ptr);
#endif
+/* Return a pointer to the lock for the given address. */
+int *__atomic_hashed_lock(volatile void *v);
+
/* Private helper routines in lib/atomic_asm_32.S */
+struct __get_user {
+ unsigned long val;
+ int err;
+};
extern struct __get_user __atomic_cmpxchg(volatile int *p,
int *lock, int o, int n);
extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
@@ -319,6 +326,9 @@ extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
int *lock, u64 o, u64 n);
+/* Return failure from the atomic wrappers. */
+struct __get_user __atomic_bad_address(int __user *addr);
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_ATOMIC_32_H */
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h
index 16f1fa51fea1..bd186c4eaa50 100644
--- a/arch/tile/include/asm/bitops.h
+++ b/arch/tile/include/asm/bitops.h
@@ -77,6 +77,11 @@ static inline int ffs(int x)
return __builtin_ffs(x);
}
+static inline int fls64(__u64 w)
+{
+ return (sizeof(__u64) * 8) - __builtin_clzll(w);
+}
+
/**
* fls - find last set bit in word
* @x: the word to search
@@ -90,12 +95,7 @@ static inline int ffs(int x)
*/
static inline int fls(int x)
{
- return (sizeof(int) * 8) - __builtin_clz(x);
-}
-
-static inline int fls64(__u64 w)
-{
- return (sizeof(__u64) * 8) - __builtin_clzll(w);
+ return fls64((unsigned int) x);
}
static inline unsigned int __arch_hweight32(unsigned int w)
diff --git a/arch/tile/include/asm/byteorder.h b/arch/tile/include/asm/byteorder.h
index 9558416d578b..fb72ecf49218 100644
--- a/arch/tile/include/asm/byteorder.h
+++ b/arch/tile/include/asm/byteorder.h
@@ -1 +1,21 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#if defined (__BIG_ENDIAN__)
+#include <linux/byteorder/big_endian.h>
+#elif defined (__LITTLE_ENDIAN__)
#include <linux/byteorder/little_endian.h>
+#else
+#error "__BIG_ENDIAN__ or __LITTLE_ENDIAN__ must be defined."
+#endif
diff --git a/arch/tile/include/asm/cachectl.h b/arch/tile/include/asm/cachectl.h
new file mode 100644
index 000000000000..af4c9f9154d1
--- /dev/null
+++ b/arch/tile/include/asm/cachectl.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ASM_TILE_CACHECTL_H
+#define _ASM_TILE_CACHECTL_H
+
+/*
+ * Options for cacheflush system call.
+ *
+ * The ICACHE flush is performed on all cores currently running the
+ * current process's address space. The intent is for user
+ * applications to be able to modify code, invoke the system call,
+ * then allow arbitrary other threads in the same address space to see
+ * the newly-modified code. Passing a length of CHIP_L1I_CACHE_SIZE()
+ * or more invalidates the entire icache on all cores in the address
+ * spaces. (Note: currently this option invalidates the entire icache
+ * regardless of the requested address and length, but we may choose
+ * to honor the arguments at some point.)
+ *
+ * Flush and invalidation of memory can normally be performed with the
+ * __insn_flush(), __insn_inv(), and __insn_finv() instructions from
+ * userspace. The DCACHE option to the system call allows userspace
+ * to flush the entire L1+L2 data cache from the core. In this case,
+ * the address and length arguments are not used. The DCACHE flush is
+ * restricted to the current core, not all cores in the address space.
+ */
+#define ICACHE (1<<0) /* invalidate L1 instruction cache */
+#define DCACHE (1<<1) /* flush and invalidate data cache */
+#define BCACHE (ICACHE|DCACHE) /* flush both caches */
+
+#endif /* _ASM_TILE_CACHECTL_H */
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 4b4b28969a65..69adc08d36a5 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -242,9 +242,6 @@ long compat_sys_fallocate(int fd, int mode,
long compat_sys_sched_rr_get_interval(compat_pid_t pid,
struct compat_timespec __user *interval);
-/* Tilera Linux syscalls that don't have "compat" versions. */
-#define compat_sys_flush_cache sys_flush_cache
-
/* These are the intvec_64.S trampolines. */
long _compat_sys_execve(const char __user *path,
const compat_uptr_t __user *argv,
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h
index 623a6bb741c1..d16d006d660e 100644
--- a/arch/tile/include/asm/elf.h
+++ b/arch/tile/include/asm/elf.h
@@ -44,7 +44,11 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#else
#define ELF_CLASS ELFCLASS32
#endif
+#ifdef __BIG_ENDIAN__
+#define ELF_DATA ELFDATA2MSB
+#else
#define ELF_DATA ELFDATA2LSB
+#endif
/*
* There seems to be a bug in how compat_binfmt_elf.c works: it
@@ -59,6 +63,7 @@ enum { ELF_ARCH = CHIP_ELF_TYPE() };
*/
#define elf_check_arch(x) \
((x)->e_ident[EI_CLASS] == ELF_CLASS && \
+ (x)->e_ident[EI_DATA] == ELF_DATA && \
(x)->e_machine == CHIP_ELF_TYPE())
/* The module loader only handles a few relocation types. */
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
index d03ec124a598..5909ac3d7218 100644
--- a/arch/tile/include/asm/futex.h
+++ b/arch/tile/include/asm/futex.h
@@ -28,29 +28,81 @@
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
+#include <asm/atomic.h>
-extern struct __get_user futex_set(u32 __user *v, int i);
-extern struct __get_user futex_add(u32 __user *v, int n);
-extern struct __get_user futex_or(u32 __user *v, int n);
-extern struct __get_user futex_andn(u32 __user *v, int n);
-extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n);
+/*
+ * Support macros for futex operations. Do not use these macros directly.
+ * They assume "ret", "val", "oparg", and "uaddr" in the lexical context.
+ * __futex_cmpxchg() additionally assumes "oldval".
+ */
+
+#ifdef __tilegx__
+
+#define __futex_asm(OP) \
+ asm("1: {" #OP " %1, %3, %4; movei %0, 0 }\n" \
+ ".pushsection .fixup,\"ax\"\n" \
+ "0: { movei %0, %5; j 9f }\n" \
+ ".section __ex_table,\"a\"\n" \
+ ".quad 1b, 0b\n" \
+ ".popsection\n" \
+ "9:" \
+ : "=r" (ret), "=r" (val), "+m" (*(uaddr)) \
+ : "r" (uaddr), "r" (oparg), "i" (-EFAULT))
+
+#define __futex_set() __futex_asm(exch4)
+#define __futex_add() __futex_asm(fetchadd4)
+#define __futex_or() __futex_asm(fetchor4)
+#define __futex_andn() ({ oparg = ~oparg; __futex_asm(fetchand4); })
+#define __futex_cmpxchg() \
+ ({ __insn_mtspr(SPR_CMPEXCH_VALUE, oldval); __futex_asm(cmpexch4); })
+
+#define __futex_xor() \
+ ({ \
+ u32 oldval, n = oparg; \
+ if ((ret = __get_user(oldval, uaddr)) == 0) { \
+ do { \
+ oparg = oldval ^ n; \
+ __futex_cmpxchg(); \
+ } while (ret == 0 && oldval != val); \
+ } \
+ })
+
+/* No need to prefetch, since the atomic ops go to the home cache anyway. */
+#define __futex_prolog()
-#ifndef __tilegx__
-extern struct __get_user futex_xor(u32 __user *v, int n);
#else
-static inline struct __get_user futex_xor(u32 __user *uaddr, int n)
-{
- struct __get_user asm_ret = __get_user_4(uaddr);
- if (!asm_ret.err) {
- int oldval, newval;
- do {
- oldval = asm_ret.val;
- newval = oldval ^ n;
- asm_ret = futex_cmpxchg(uaddr, oldval, newval);
- } while (asm_ret.err == 0 && oldval != asm_ret.val);
+
+#define __futex_call(FN) \
+ { \
+ struct __get_user gu = FN((u32 __force *)uaddr, lock, oparg); \
+ val = gu.val; \
+ ret = gu.err; \
}
- return asm_ret;
-}
+
+#define __futex_set() __futex_call(__atomic_xchg)
+#define __futex_add() __futex_call(__atomic_xchg_add)
+#define __futex_or() __futex_call(__atomic_or)
+#define __futex_andn() __futex_call(__atomic_andn)
+#define __futex_xor() __futex_call(__atomic_xor)
+
+#define __futex_cmpxchg() \
+ { \
+ struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \
+ lock, oldval, oparg); \
+ val = gu.val; \
+ ret = gu.err; \
+ }
+
+/*
+ * Find the lock pointer for the atomic calls to use, and issue a
+ * prefetch to the user address to bring it into cache. Similar to
+ * __atomic_setup(), but we can't do a read into the L1 since it might
+ * fault; instead we do a prefetch into the L2.
+ */
+#define __futex_prolog() \
+ int *lock; \
+ __insn_prefetch(uaddr); \
+ lock = __atomic_hashed_lock((int __force *)uaddr)
#endif
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
@@ -59,8 +111,12 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
- int ret;
- struct __get_user asm_ret;
+ int uninitialized_var(val), ret;
+
+ __futex_prolog();
+
+ /* The 32-bit futex code makes this assumption, so validate it here. */
+ BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
@@ -71,46 +127,45 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
- asm_ret = futex_set(uaddr, oparg);
+ __futex_set();
break;
case FUTEX_OP_ADD:
- asm_ret = futex_add(uaddr, oparg);
+ __futex_add();
break;
case FUTEX_OP_OR:
- asm_ret = futex_or(uaddr, oparg);
+ __futex_or();
break;
case FUTEX_OP_ANDN:
- asm_ret = futex_andn(uaddr, oparg);
+ __futex_andn();
break;
case FUTEX_OP_XOR:
- asm_ret = futex_xor(uaddr, oparg);
+ __futex_xor();
break;
default:
- asm_ret.err = -ENOSYS;
+ ret = -ENOSYS;
+ break;
}
pagefault_enable();
- ret = asm_ret.err;
-
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ:
- ret = (asm_ret.val == cmparg);
+ ret = (val == cmparg);
break;
case FUTEX_OP_CMP_NE:
- ret = (asm_ret.val != cmparg);
+ ret = (val != cmparg);
break;
case FUTEX_OP_CMP_LT:
- ret = (asm_ret.val < cmparg);
+ ret = (val < cmparg);
break;
case FUTEX_OP_CMP_GE:
- ret = (asm_ret.val >= cmparg);
+ ret = (val >= cmparg);
break;
case FUTEX_OP_CMP_LE:
- ret = (asm_ret.val <= cmparg);
+ ret = (val <= cmparg);
break;
case FUTEX_OP_CMP_GT:
- ret = (asm_ret.val > cmparg);
+ ret = (val > cmparg);
break;
default:
ret = -ENOSYS;
@@ -120,22 +175,20 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
}
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+ u32 oldval, u32 oparg)
{
- struct __get_user asm_ret;
+ int ret, val;
+
+ __futex_prolog();
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
- asm_ret = futex_cmpxchg(uaddr, oldval, newval);
- *uval = asm_ret.val;
- return asm_ret.err;
-}
+ __futex_cmpxchg();
-#ifndef __tilegx__
-/* Return failure from the atomic wrappers. */
-struct __get_user __atomic_bad_address(int __user *addr);
-#endif
+ *uval = val;
+ return ret;
+}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/tile/include/asm/hardwall.h b/arch/tile/include/asm/hardwall.h
index 2ac422848c7d..47514a58d685 100644
--- a/arch/tile/include/asm/hardwall.h
+++ b/arch/tile/include/asm/hardwall.h
@@ -11,12 +11,14 @@
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
- * Provide methods for the HARDWALL_FILE for accessing the UDN.
+ * Provide methods for access control of per-cpu resources like
+ * UDN, IDN, or IPI.
*/
#ifndef _ASM_TILE_HARDWALL_H
#define _ASM_TILE_HARDWALL_H
+#include <arch/chip.h>
#include <linux/ioctl.h>
#define HARDWALL_IOCTL_BASE 0xa2
@@ -24,8 +26,9 @@
/*
* The HARDWALL_CREATE() ioctl is a macro with a "size" argument.
* The resulting ioctl value is passed to the kernel in conjunction
- * with a pointer to a little-endian bitmask of cpus, which must be
- * physically in a rectangular configuration on the chip.
+ * with a pointer to a standard kernel bitmask of cpus.
+ * For network resources (UDN or IDN) the bitmask must physically
+ * represent a rectangular configuration on the chip.
* The "size" is the number of bytes of cpu mask data.
*/
#define _HARDWALL_CREATE 1
@@ -44,13 +47,7 @@
#define HARDWALL_GET_ID \
_IO(HARDWALL_IOCTL_BASE, _HARDWALL_GET_ID)
-#ifndef __KERNEL__
-
-/* This is the canonical name expected by userspace. */
-#define HARDWALL_FILE "/dev/hardwall"
-
-#else
-
+#ifdef __KERNEL__
/* /proc hooks for hardwall. */
struct proc_dir_entry;
#ifdef CONFIG_HARDWALL
@@ -59,7 +56,6 @@ int proc_pid_hardwall(struct task_struct *task, char *buffer);
#else
static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {}
#endif
-
#endif
#endif /* _ASM_TILE_HARDWALL_H */
diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h
index d396d1805163..b2042380a5aa 100644
--- a/arch/tile/include/asm/hugetlb.h
+++ b/arch/tile/include/asm/hugetlb.h
@@ -106,4 +106,25 @@ static inline void arch_release_hugepage(struct page *page)
{
}
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+ struct page *page, int writable)
+{
+ size_t pagesize = huge_page_size(hstate_vma(vma));
+ if (pagesize != PUD_SIZE && pagesize != PMD_SIZE)
+ entry = pte_mksuper(entry);
+ return entry;
+}
+#define arch_make_huge_pte arch_make_huge_pte
+
+/* Sizes to scale up page size for PTEs with HV_PTE_SUPER bit. */
+enum {
+ HUGE_SHIFT_PGDIR = 0,
+ HUGE_SHIFT_PMD = 1,
+ HUGE_SHIFT_PAGE = 2,
+ HUGE_SHIFT_ENTRIES
+};
+extern int huge_shift[HUGE_SHIFT_ENTRIES];
+#endif
+
#endif /* _ASM_TILE_HUGETLB_H */
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 5db0ce54284d..b4e96fef2cf8 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -28,10 +28,10 @@
*/
#if CHIP_HAS_AUX_PERF_COUNTERS()
#define LINUX_MASKABLE_INTERRUPTS_HI \
- (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
+ (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
#else
#define LINUX_MASKABLE_INTERRUPTS_HI \
- (~(INT_MASK_HI(INT_PERF_COUNT)))
+ (~(INT_MASK_HI(INT_PERF_COUNT)))
#endif
#else
@@ -90,6 +90,14 @@
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
} while (0)
+#define interrupt_mask_save_mask() \
+ (__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_0) | \
+ (((unsigned long long)__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_1))<<32))
+#define interrupt_mask_restore_mask(mask) do { \
+ unsigned long long __m = (mask); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_K_0, (unsigned long)(__m)); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_K_1, (unsigned long)(__m>>32)); \
+} while (0)
#else
#define interrupt_mask_set(n) \
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
@@ -101,6 +109,10 @@
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
#define interrupt_mask_reset_mask(mask) \
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
+#define interrupt_mask_save_mask() \
+ __insn_mfspr(SPR_INTERRUPT_MASK_K)
+#define interrupt_mask_restore_mask(mask) \
+ __insn_mtspr(SPR_INTERRUPT_MASK_K, (mask))
#endif
/*
@@ -122,7 +134,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Disable all interrupts, including NMIs. */
#define arch_local_irq_disable_all() \
- interrupt_mask_set_mask(-1UL)
+ interrupt_mask_set_mask(-1ULL)
/* Re-enable all maskable interrupts. */
#define arch_local_irq_enable() \
@@ -179,7 +191,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
#ifdef __tilegx__
#if INT_MEM_ERROR != 0
-# error Fix IRQ_DISABLED() macro
+# error Fix IRQS_DISABLED() macro
#endif
/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
@@ -207,9 +219,10 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
mtspr SPR_INTERRUPT_MASK_SET_K, tmp
/* Enable interrupts. */
-#define IRQ_ENABLE(tmp0, tmp1) \
+#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
- ld tmp0, tmp0; \
+ ld tmp0, tmp0
+#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0
#else /* !__tilegx__ */
@@ -253,17 +266,22 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp
/* Enable interrupts. */
-#define IRQ_ENABLE(tmp0, tmp1) \
+#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
{ \
lw tmp0, tmp0; \
addi tmp1, tmp0, 4 \
}; \
- lw tmp1, tmp1; \
+ lw tmp1, tmp1
+#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \
mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1
#endif
+#define IRQ_ENABLE(tmp0, tmp1) \
+ IRQ_ENABLE_LOAD(tmp0, tmp1); \
+ IRQ_ENABLE_APPLY(tmp0, tmp1)
+
/*
* Do the CPU's IRQ-state tracing from assembly code. We call a
* C function, but almost everywhere we do, we don't mind clobbering
diff --git a/arch/tile/include/asm/kexec.h b/arch/tile/include/asm/kexec.h
index c11a6cc73bb8..fc98ccfc98ac 100644
--- a/arch/tile/include/asm/kexec.h
+++ b/arch/tile/include/asm/kexec.h
@@ -19,12 +19,24 @@
#include <asm/page.h>
+#ifndef __tilegx__
/* Maximum physical address we can use pages from. */
#define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE
/* Maximum address we can reach in physical address mode. */
#define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE
/* Maximum address we can use for the control code buffer. */
#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+#else
+/* We need to limit the memory below PGDIR_SIZE since
+ * we only setup page table for [0, PGDIR_SIZE) before final kexec.
+ */
+/* Maximum physical address we can use pages from. */
+#define KEXEC_SOURCE_MEMORY_LIMIT PGDIR_SIZE
+/* Maximum address we can reach in physical address mode. */
+#define KEXEC_DESTINATION_MEMORY_LIMIT PGDIR_SIZE
+/* Maximum address we can use for the control code buffer. */
+#define KEXEC_CONTROL_MEMORY_LIMIT PGDIR_SIZE
+#endif
#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE
diff --git a/arch/tile/include/asm/kvm_para.h b/arch/tile/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/tile/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/tile/include/asm/mmu.h b/arch/tile/include/asm/mmu.h
index 92f94c77b6e4..e2c789096795 100644
--- a/arch/tile/include/asm/mmu.h
+++ b/arch/tile/include/asm/mmu.h
@@ -21,7 +21,7 @@ struct mm_context {
* Written under the mmap_sem semaphore; read without the
* semaphore but atomically, but it is conservatively set.
*/
- unsigned int priority_cached;
+ unsigned long priority_cached;
};
typedef struct mm_context mm_context_t;
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h
index 15fb24641120..37f0b741dee7 100644
--- a/arch/tile/include/asm/mmu_context.h
+++ b/arch/tile/include/asm/mmu_context.h
@@ -30,11 +30,15 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}
-/* Note that arch/tile/kernel/head.S also calls hv_install_context() */
+/*
+ * Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S
+ * also call hv_install_context().
+ */
static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
{
/* FIXME: DIRECTIO should not always be set. FIXME. */
- int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO);
+ int rc = hv_install_context(__pa(pgdir), prot, asid,
+ HV_CTX_DIRECTIO | CTX_PAGE_FLAG);
if (rc < 0)
panic("hv_install_context failed: %d", rc);
}
diff --git a/arch/tile/include/asm/module.h b/arch/tile/include/asm/module.h
new file mode 100644
index 000000000000..44ed07ccd3d2
--- /dev/null
+++ b/arch/tile/include/asm/module.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ASM_TILE_MODULE_H
+#define _ASM_TILE_MODULE_H
+
+#include <arch/chip.h>
+
+#include <asm-generic/module.h>
+
+/* We can't use modules built with different page sizes. */
+#if defined(CONFIG_PAGE_SIZE_16KB)
+# define MODULE_PGSZ " 16KB"
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+# define MODULE_PGSZ " 64KB"
+#else
+# define MODULE_PGSZ ""
+#endif
+
+/* We don't really support no-SMP so tag if someone tries. */
+#ifdef CONFIG_SMP
+#define MODULE_NOSMP ""
+#else
+#define MODULE_NOSMP " nosmp"
+#endif
+
+#define MODULE_ARCH_VERMAGIC CHIP_ARCH_NAME MODULE_PGSZ MODULE_NOSMP
+
+#endif /* _ASM_TILE_MODULE_H */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index db93518fac03..9d9131e5c552 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -20,8 +20,17 @@
#include <arch/chip.h>
/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
-#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
-#define HPAGE_SHIFT HV_LOG2_PAGE_SIZE_LARGE
+#if defined(CONFIG_PAGE_SIZE_16KB)
+#define PAGE_SHIFT 14
+#define CTX_PAGE_FLAG HV_CTX_PG_SM_16K
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+#define PAGE_SHIFT 16
+#define CTX_PAGE_FLAG HV_CTX_PG_SM_64K
+#else
+#define PAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_SMALL
+#define CTX_PAGE_FLAG 0
+#endif
+#define HPAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_LARGE
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
@@ -78,8 +87,7 @@ typedef HV_PTE pgprot_t;
/*
* User L2 page tables are managed as one L2 page table per page,
* because we use the page allocator for them. This keeps the allocation
- * simple and makes it potentially useful to implement HIGHPTE at some point.
- * However, it's also inefficient, since L2 page tables are much smaller
+ * simple, but it's also inefficient, since L2 page tables are much smaller
* than pages (currently 2KB vs 64KB). So we should revisit this.
*/
typedef struct page *pgtable_t;
@@ -128,7 +136,7 @@ static inline __attribute_const__ int get_order(unsigned long size)
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-#define HUGE_MAX_HSTATE 2
+#define HUGE_MAX_HSTATE 6
#ifdef CONFIG_HUGETLB_PAGE
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
diff --git a/arch/tile/include/asm/pgalloc.h b/arch/tile/include/asm/pgalloc.h
index e919c0bdc22d..1b902508b664 100644
--- a/arch/tile/include/asm/pgalloc.h
+++ b/arch/tile/include/asm/pgalloc.h
@@ -19,24 +19,24 @@
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <asm/fixmap.h>
+#include <asm/page.h>
#include <hv/hypervisor.h>
/* Bits for the size of the second-level page table. */
-#define L2_KERNEL_PGTABLE_SHIFT \
- (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE)
+#define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
+
+/* How big is a kernel L2 page table? */
+#define L2_KERNEL_PGTABLE_SIZE (1UL << L2_KERNEL_PGTABLE_SHIFT)
/* We currently allocate user L2 page tables by page (unlike kernel L2s). */
-#if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL
-#define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
+#if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
+#define L2_USER_PGTABLE_SHIFT PAGE_SHIFT
#else
#define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT
#endif
/* How many pages do we need, as an "order", for a user L2 page table? */
-#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL)
-
-/* How big is a kernel L2 page table? */
-#define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT)
+#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT)
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
@@ -50,14 +50,14 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *ptep)
{
- set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN,
+ set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)),
__pgprot(_PAGE_PRESENT)));
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
pgtable_t page)
{
- set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)),
+ set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))),
__pgprot(_PAGE_PRESENT)));
}
@@ -68,8 +68,20 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
-extern void pte_free(struct mm_struct *mm, struct page *pte);
+extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
+ int order);
+extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order);
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER);
+}
+
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
+{
+ pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER);
+}
#define pmd_pgtable(pmd) pmd_page(pmd)
@@ -85,8 +97,13 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
pte_free(mm, virt_to_page(pte));
}
-extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
- unsigned long address);
+extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
+ unsigned long address, int order);
+static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
+ unsigned long address)
+{
+ __pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER);
+}
#define check_pgt_cache() do { } while (0)
@@ -104,19 +121,44 @@ void shatter_pmd(pmd_t *pmd);
void shatter_huge_page(unsigned long addr);
#ifdef __tilegx__
-/* We share a single page allocator for both L1 and L2 page tables. */
-#if HV_L1_SIZE != HV_L2_SIZE
-# error Rework assumption that L1 and L2 page tables are same size.
-#endif
-#define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER
+
#define pud_populate(mm, pud, pmd) \
pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
-#define pmd_alloc_one(mm, addr) \
- ((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr))))
-#define pmd_free(mm, pmdp) \
- pte_free((mm), virt_to_page(pmdp))
-#define __pmd_free_tlb(tlb, pmdp, address) \
- __pte_free_tlb((tlb), virt_to_page(pmdp), (address))
+
+/* Bits for the size of the L1 (intermediate) page table. */
+#define L1_KERNEL_PGTABLE_SHIFT _HV_LOG2_L1_SIZE(HPAGE_SHIFT)
+
+/* How big is a kernel L2 page table? */
+#define L1_KERNEL_PGTABLE_SIZE (1UL << L1_KERNEL_PGTABLE_SHIFT)
+
+/* We currently allocate L1 page tables by page. */
+#if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
+#define L1_USER_PGTABLE_SHIFT PAGE_SHIFT
+#else
+#define L1_USER_PGTABLE_SHIFT L1_KERNEL_PGTABLE_SHIFT
#endif
+/* How many pages do we need, as an "order", for an L1 page table? */
+#define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT)
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER);
+ return (pmd_t *)page_to_virt(p);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
+{
+ pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER);
+}
+
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
+ unsigned long address)
+{
+ __pgtable_free_tlb(tlb, virt_to_page(pmdp), address,
+ L1_USER_PGTABLE_ORDER);
+}
+
+#endif /* __tilegx__ */
+
#endif /* _ASM_TILE_PGALLOC_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index 67490910774d..73b1a4c9ad03 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -27,8 +27,10 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/pfn.h>
#include <asm/processor.h>
#include <asm/fixmap.h>
+#include <asm/page.h>
struct mm_struct;
struct vm_area_struct;
@@ -69,6 +71,7 @@ extern void set_page_homes(void);
#define _PAGE_PRESENT HV_PTE_PRESENT
#define _PAGE_HUGE_PAGE HV_PTE_PAGE
+#define _PAGE_SUPER_PAGE HV_PTE_SUPER
#define _PAGE_READABLE HV_PTE_READABLE
#define _PAGE_WRITABLE HV_PTE_WRITABLE
#define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE
@@ -85,6 +88,7 @@ extern void set_page_homes(void);
#define _PAGE_ALL (\
_PAGE_PRESENT | \
_PAGE_HUGE_PAGE | \
+ _PAGE_SUPER_PAGE | \
_PAGE_READABLE | \
_PAGE_WRITABLE | \
_PAGE_EXECUTABLE | \
@@ -162,7 +166,7 @@ extern void set_page_homes(void);
(pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }
/* Just setting the PFN to zero suffices. */
-#define pte_pgprot(x) hv_pte_set_pfn((x), 0)
+#define pte_pgprot(x) hv_pte_set_pa((x), 0)
/*
* For PTEs and PDEs, we must clear the Present bit first when
@@ -187,6 +191,7 @@ static inline void __pte_clear(pte_t *ptep)
* Undefined behaviour if not..
*/
#define pte_present hv_pte_get_present
+#define pte_mknotpresent hv_pte_clear_present
#define pte_user hv_pte_get_user
#define pte_read hv_pte_get_readable
#define pte_dirty hv_pte_get_dirty
@@ -194,6 +199,7 @@ static inline void __pte_clear(pte_t *ptep)
#define pte_write hv_pte_get_writable
#define pte_exec hv_pte_get_executable
#define pte_huge hv_pte_get_page
+#define pte_super hv_pte_get_super
#define pte_rdprotect hv_pte_clear_readable
#define pte_exprotect hv_pte_clear_executable
#define pte_mkclean hv_pte_clear_dirty
@@ -206,6 +212,7 @@ static inline void __pte_clear(pte_t *ptep)
#define pte_mkyoung hv_pte_set_accessed
#define pte_mkwrite hv_pte_set_writable
#define pte_mkhuge hv_pte_set_page
+#define pte_mksuper hv_pte_set_super
#define pte_special(pte) 0
#define pte_mkspecial(pte) (pte)
@@ -261,7 +268,7 @@ static inline int pte_none(pte_t pte)
static inline unsigned long pte_pfn(pte_t pte)
{
- return hv_pte_get_pfn(pte);
+ return PFN_DOWN(hv_pte_get_pa(pte));
}
/* Set or get the remote cache cpu in a pgprot with remote caching. */
@@ -270,7 +277,7 @@ extern int get_remote_cache_cpu(pgprot_t prot);
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
{
- return hv_pte_set_pfn(prot, pfn);
+ return hv_pte_set_pa(prot, PFN_PHYS(pfn));
}
/* Support for priority mappings. */
@@ -312,7 +319,7 @@ extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
*/
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- return pfn_pte(hv_pte_get_pfn(pte), newprot);
+ return pfn_pte(pte_pfn(pte), newprot);
}
/*
@@ -335,13 +342,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*/
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-#if defined(CONFIG_HIGHPTE)
-extern pte_t *pte_offset_map(pmd_t *, unsigned long address);
-#define pte_unmap(pte) kunmap_atomic(pte)
-#else
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
#define pte_unmap(pte) do { } while (0)
-#endif
/* Clear a non-executable kernel PTE and flush it from the TLB. */
#define kpte_clear_flush(ptep, vaddr) \
@@ -410,6 +412,46 @@ static inline unsigned long pmd_index(unsigned long address)
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ return ptep_test_and_clear_young(vma, address, pmdp_ptep(pmdp));
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ ptep_set_wrprotect(mm, address, pmdp_ptep(pmdp));
+}
+
+
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ return pte_pmd(ptep_get_and_clear(mm, address, pmdp_ptep(pmdp)));
+}
+
+static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+ set_pte(pmdp_ptep(pmdp), pmd_pte(pmdval));
+}
+
+#define set_pmd_at(mm, addr, pmdp, pmdval) __set_pmd(pmdp, pmdval)
+
+/* Create a pmd from a PTFN. */
+static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
+{
+ return pte_pmd(hv_pte_set_ptfn(prot, ptfn));
+}
+
+/* Return the page-table frame number (ptfn) that a pmd_t points at. */
+#define pmd_ptfn(pmd) hv_pte_get_ptfn(pmd_pte(pmd))
+
/*
* A given kernel pmd_t maps to a specific virtual address (either a
* kernel huge page or a kernel pte_t table). Since kernel pte_t
@@ -430,7 +472,48 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
* OK for pte_lockptr(), since we just end up with potentially one
* lock being used for several pte_t arrays.
*/
-#define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd)))
+#define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd))))
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ __pte_clear(pmdp_ptep(pmdp));
+}
+
+#define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
+#define pmd_young(pmd) pte_young(pmd_pte(pmd))
+#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_write(pmd) pte_write(pmd_pte(pmd))
+#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_huge_page(pmd) pte_huge(pmd_pte(pmd))
+#define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
+#define __HAVE_ARCH_PMD_WRITE
+
+#define pfn_pmd(pfn, pgprot) pte_pmd(pfn_pte((pfn), (pgprot)))
+#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
+#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ return pfn_pmd(pmd_pfn(pmd), newprot);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define has_transparent_hugepage() 1
+#define pmd_trans_huge pmd_huge_page
+
+static inline pmd_t pmd_mksplitting(pmd_t pmd)
+{
+ return pte_pmd(hv_pte_set_client2(pmd_pte(pmd)));
+}
+
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+ return hv_pte_get_client2(pmd_pte(pmd));
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
* The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
@@ -448,17 +531,13 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
}
-static inline int pmd_huge_page(pmd_t pmd)
-{
- return pmd_val(pmd) & _PAGE_HUGE_PAGE;
-}
-
#include <asm-generic/pgtable.h>
/* Support /proc/NN/pgtable API. */
struct seq_file;
int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm,
- unsigned long vaddr, pte_t *ptep, void **datap);
+ unsigned long vaddr, unsigned long pagesize,
+ pte_t *ptep, void **datap);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h
index 9f98529761fd..4ce4a7a99c24 100644
--- a/arch/tile/include/asm/pgtable_32.h
+++ b/arch/tile/include/asm/pgtable_32.h
@@ -20,11 +20,12 @@
* The level-1 index is defined by the huge page size. A PGD is composed
* of PTRS_PER_PGD pgd_t's and is the top level of the page table.
*/
-#define PGDIR_SHIFT HV_LOG2_PAGE_SIZE_LARGE
-#define PGDIR_SIZE HV_PAGE_SIZE_LARGE
+#define PGDIR_SHIFT HPAGE_SHIFT
+#define PGDIR_SIZE HPAGE_SIZE
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
-#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t))
+#define PTRS_PER_PGD _HV_L1_ENTRIES(HPAGE_SHIFT)
+#define PGD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
+#define SIZEOF_PGD _HV_L1_SIZE(HPAGE_SHIFT)
/*
* The level-2 index is defined by the difference between the huge
@@ -33,8 +34,9 @@
* Note that the hypervisor docs use PTE for what we call pte_t, so
* this nomenclature is somewhat confusing.
*/
-#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
-#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t))
+#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
+#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
+#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
#ifndef __ASSEMBLY__
@@ -111,24 +113,14 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
return pte;
}
-static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
- set_pte(&pmdp->pud.pgd, pmdval.pud.pgd);
-}
-
-/* Create a pmd from a PTFN. */
-static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
-{
- return (pmd_t){ { hv_pte_set_ptfn(prot, ptfn) } };
-}
-
-/* Return the page-table frame number (ptfn) that a pmd_t points at. */
-#define pmd_ptfn(pmd) hv_pte_get_ptfn((pmd).pud.pgd)
-
-static inline void pmd_clear(pmd_t *pmdp)
-{
- __pte_clear(&pmdp->pud.pgd);
-}
+/*
+ * pmds are wrappers around pgds, which are the same as ptes.
+ * It's often convenient to "cast" back and forth and use the pte methods,
+ * which are the methods supplied by the hypervisor.
+ */
+#define pmd_pte(pmd) ((pmd).pud.pgd)
+#define pmdp_ptep(pmdp) (&(pmdp)->pud.pgd)
+#define pte_pmd(pte) ((pmd_t){ { (pte) } })
#endif /* __ASSEMBLY__ */
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
index fd80328523b4..2492fa5478e7 100644
--- a/arch/tile/include/asm/pgtable_64.h
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -21,17 +21,19 @@
#define PGDIR_SIZE HV_L1_SPAN
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD HV_L0_ENTRIES
-#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t))
+#define PGD_INDEX(va) HV_L0_INDEX(va)
+#define SIZEOF_PGD HV_L0_SIZE
/*
* The level-1 index is defined by the huge page size. A PMD is composed
* of PTRS_PER_PMD pgd_t's and is the middle level of the page table.
*/
-#define PMD_SHIFT HV_LOG2_PAGE_SIZE_LARGE
-#define PMD_SIZE HV_PAGE_SIZE_LARGE
+#define PMD_SHIFT HPAGE_SHIFT
+#define PMD_SIZE HPAGE_SIZE
#define PMD_MASK (~(PMD_SIZE-1))
-#define PTRS_PER_PMD (1 << (PGDIR_SHIFT - PMD_SHIFT))
-#define SIZEOF_PMD (PTRS_PER_PMD * sizeof(pmd_t))
+#define PTRS_PER_PMD _HV_L1_ENTRIES(HPAGE_SHIFT)
+#define PMD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
+#define SIZEOF_PMD _HV_L1_SIZE(HPAGE_SHIFT)
/*
* The level-2 index is defined by the difference between the huge
@@ -40,17 +42,19 @@
* Note that the hypervisor docs use PTE for what we call pte_t, so
* this nomenclature is somewhat confusing.
*/
-#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
-#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t))
+#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
+#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
+#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
/*
- * Align the vmalloc area to an L2 page table, and leave a guard page
- * at the beginning and end. The vmalloc code also puts in an internal
+ * Align the vmalloc area to an L2 page table. Omit guard pages at
+ * the beginning and end for simplicity (particularly in the per-cpu
+ * memory allocation code). The vmalloc code puts in an internal
* guard page between each allocation.
*/
#define _VMALLOC_END HUGE_VMAP_BASE
-#define VMALLOC_END (_VMALLOC_END - PAGE_SIZE)
-#define VMALLOC_START (_VMALLOC_START + PAGE_SIZE)
+#define VMALLOC_END _VMALLOC_END
+#define VMALLOC_START _VMALLOC_START
#define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE)
@@ -98,7 +102,7 @@ static inline int pud_bad(pud_t pud)
* A pud_t points to a pmd_t array. Since we can have multiple per
* page, we don't have a one-to-one mapping of pud_t's to pages.
*/
-#define pud_page(pud) pfn_to_page(HV_PTFN_TO_PFN(pud_ptfn(pud)))
+#define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud))))
static inline unsigned long pud_index(unsigned long address)
{
@@ -108,28 +112,6 @@ static inline unsigned long pud_index(unsigned long address)
#define pmd_offset(pud, address) \
((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address))
-static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
- set_pte(pmdp, pmdval);
-}
-
-/* Create a pmd from a PTFN and pgprot. */
-static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
-{
- return hv_pte_set_ptfn(prot, ptfn);
-}
-
-/* Return the page-table frame number (ptfn) that a pmd_t points at. */
-static inline unsigned long pmd_ptfn(pmd_t pmd)
-{
- return hv_pte_get_ptfn(pmd);
-}
-
-static inline void pmd_clear(pmd_t *pmdp)
-{
- __pte_clear(pmdp);
-}
-
/* Normalize an address to having the correct high bits set. */
#define pgd_addr_normalize pgd_addr_normalize
static inline unsigned long pgd_addr_normalize(unsigned long addr)
@@ -170,6 +152,13 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
return hv_pte(__insn_exch(&ptep->val, 0UL));
}
+/*
+ * pmds are the same as pgds and ptes, so converting is a no-op.
+ */
+#define pmd_pte(pmd) (pmd)
+#define pmdp_ptep(pmdp) (pmdp)
+#define pte_pmd(pte) (pte)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_TILE_PGTABLE_64_H */
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index 15cd8a4a06ce..8c4dd9ff91eb 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -76,6 +76,17 @@ struct async_tlb {
#ifdef CONFIG_HARDWALL
struct hardwall_info;
+struct hardwall_task {
+ /* Which hardwall is this task tied to? (or NULL if none) */
+ struct hardwall_info *info;
+ /* Chains this task into the list at info->task_head. */
+ struct list_head list;
+};
+#ifdef __tilepro__
+#define HARDWALL_TYPES 1 /* udn */
+#else
+#define HARDWALL_TYPES 3 /* udn, idn, and ipi */
+#endif
#endif
struct thread_struct {
@@ -116,10 +127,8 @@ struct thread_struct {
unsigned long dstream_pf;
#endif
#ifdef CONFIG_HARDWALL
- /* Is this task tied to an activated hardwall? */
- struct hardwall_info *hardwall;
- /* Chains this task into the list at hardwall->list. */
- struct list_head hardwall_list;
+ /* Hardwall information for various resources. */
+ struct hardwall_task hardwall[HARDWALL_TYPES];
#endif
#if CHIP_HAS_TILE_DMA()
/* Async DMA TLB fault information */
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h
index e58613e0752f..c67eb70ea78e 100644
--- a/arch/tile/include/asm/setup.h
+++ b/arch/tile/include/asm/setup.h
@@ -41,15 +41,15 @@ void restrict_dma_mpls(void);
#ifdef CONFIG_HARDWALL
/* User-level network management functions */
void reset_network_state(void);
-void grant_network_mpls(void);
-void restrict_network_mpls(void);
struct task_struct;
-int hardwall_deactivate(struct task_struct *task);
+void hardwall_switch_tasks(struct task_struct *prev, struct task_struct *next);
+void hardwall_deactivate_all(struct task_struct *task);
+int hardwall_ipi_valid(int cpu);
/* Hook hardwall code into changes in affinity. */
#define arch_set_cpus_allowed(p, new_mask) do { \
- if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \
- hardwall_deactivate(p); \
+ if (!cpumask_equal(&p->cpus_allowed, new_mask)) \
+ hardwall_deactivate_all(p); \
} while (0)
#endif
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h
index 3b5507c31eae..06f0464cfed9 100644
--- a/arch/tile/include/asm/syscalls.h
+++ b/arch/tile/include/asm/syscalls.h
@@ -43,7 +43,8 @@ long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
u32 len, int advice);
int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
u32 len_lo, u32 len_hi, int advice);
-long sys_flush_cache(void);
+long sys_cacheflush(unsigned long addr, unsigned long len,
+ unsigned long flags);
#ifndef __tilegx__ /* No mmap() in the 32-bit kernel. */
#define sys_mmap sys_mmap
#endif
diff --git a/arch/tile/include/asm/tlbflush.h b/arch/tile/include/asm/tlbflush.h
index 96199d214fb8..dcf91b25a1e5 100644
--- a/arch/tile/include/asm/tlbflush.h
+++ b/arch/tile/include/asm/tlbflush.h
@@ -38,16 +38,11 @@ DECLARE_PER_CPU(int, current_asid);
/* The hypervisor tells us what ASIDs are available to us. */
extern int min_asid, max_asid;
-static inline unsigned long hv_page_size(const struct vm_area_struct *vma)
-{
- return (vma->vm_flags & VM_HUGETLB) ? HPAGE_SIZE : PAGE_SIZE;
-}
-
/* Pass as vma pointer for non-executable mapping, if no vma available. */
-#define FLUSH_NONEXEC ((const struct vm_area_struct *)-1UL)
+#define FLUSH_NONEXEC ((struct vm_area_struct *)-1UL)
/* Flush a single user page on this cpu. */
-static inline void local_flush_tlb_page(const struct vm_area_struct *vma,
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr,
unsigned long page_size)
{
@@ -60,7 +55,7 @@ static inline void local_flush_tlb_page(const struct vm_area_struct *vma,
}
/* Flush range of user pages on this cpu. */
-static inline void local_flush_tlb_pages(const struct vm_area_struct *vma,
+static inline void local_flush_tlb_pages(struct vm_area_struct *vma,
unsigned long addr,
unsigned long page_size,
unsigned long len)
@@ -117,10 +112,10 @@ extern void flush_tlb_all(void);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void flush_tlb_current_task(void);
extern void flush_tlb_mm(struct mm_struct *);
-extern void flush_tlb_page(const struct vm_area_struct *, unsigned long);
-extern void flush_tlb_page_mm(const struct vm_area_struct *,
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_page_mm(struct vm_area_struct *,
struct mm_struct *, unsigned long);
-extern void flush_tlb_range(const struct vm_area_struct *,
+extern void flush_tlb_range(struct vm_area_struct *,
unsigned long start, unsigned long end);
#define flush_tlb() flush_tlb_current_task()
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index ef34d2caa5b1..c3dd275f25e2 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -114,45 +114,75 @@ struct exception_table_entry {
extern int fixup_exception(struct pt_regs *regs);
/*
- * We return the __get_user_N function results in a structure,
- * thus in r0 and r1. If "err" is zero, "val" is the result
- * of the read; otherwise, "err" is -EFAULT.
- *
- * We rarely need 8-byte values on a 32-bit architecture, but
- * we size the structure to accommodate. In practice, for the
- * the smaller reads, we can zero the high word for free, and
- * the caller will ignore it by virtue of casting anyway.
+ * Support macros for __get_user().
+ *
+ * Implementation note: The "case 8" logic of casting to the type of
+ * the result of subtracting the value from itself is basically a way
+ * of keeping all integer types the same, but casting any pointers to
+ * ptrdiff_t, i.e. also an integer type. This way there are no
+ * questionable casts seen by the compiler on an ILP32 platform.
+ *
+ * Note that __get_user() and __put_user() assume proper alignment.
*/
-struct __get_user {
- unsigned long long val;
- int err;
-};
-/*
- * FIXME: we should express these as inline extended assembler, since
- * they're fundamentally just a variable dereference and some
- * supporting exception_table gunk. Note that (a la i386) we can
- * extend the copy_to_user and copy_from_user routines to call into
- * such extended assembler routines, though we will have to use a
- * different return code in that case (1, 2, or 4, rather than -EFAULT).
- */
-extern struct __get_user __get_user_1(const void __user *);
-extern struct __get_user __get_user_2(const void __user *);
-extern struct __get_user __get_user_4(const void __user *);
-extern struct __get_user __get_user_8(const void __user *);
-extern int __put_user_1(long, void __user *);
-extern int __put_user_2(long, void __user *);
-extern int __put_user_4(long, void __user *);
-extern int __put_user_8(long long, void __user *);
-
-/* Unimplemented routines to cause linker failures */
-extern struct __get_user __get_user_bad(void);
-extern int __put_user_bad(void);
+#ifdef __LP64__
+#define _ASM_PTR ".quad"
+#else
+#define _ASM_PTR ".long"
+#endif
+
+#define __get_user_asm(OP, x, ptr, ret) \
+ asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \
+ ".pushsection .fixup,\"ax\"\n" \
+ "0: { movei %1, 0; movei %0, %3 }\n" \
+ "j 9f\n" \
+ ".section __ex_table,\"a\"\n" \
+ _ASM_PTR " 1b, 0b\n" \
+ ".popsection\n" \
+ "9:" \
+ : "=r" (ret), "=r" (x) \
+ : "r" (ptr), "i" (-EFAULT))
+
+#ifdef __tilegx__
+#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
+#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
+#define __get_user_4(x, ptr, ret) __get_user_asm(ld4u, x, ptr, ret)
+#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
+#else
+#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
+#define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
+#define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
+#ifdef __LITTLE_ENDIAN
+#define __lo32(a, b) a
+#define __hi32(a, b) b
+#else
+#define __lo32(a, b) b
+#define __hi32(a, b) a
+#endif
+#define __get_user_8(x, ptr, ret) \
+ ({ \
+ unsigned int __a, __b; \
+ asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n" \
+ "2: { lw %2, %2; movei %0, 0 }\n" \
+ ".pushsection .fixup,\"ax\"\n" \
+ "0: { movei %1, 0; movei %2, 0 }\n" \
+ "{ movei %0, %4; j 9f }\n" \
+ ".section __ex_table,\"a\"\n" \
+ ".word 1b, 0b\n" \
+ ".word 2b, 0b\n" \
+ ".popsection\n" \
+ "9:" \
+ : "=r" (ret), "=r" (__a), "=&r" (__b) \
+ : "r" (ptr), "i" (-EFAULT)); \
+ (x) = (__typeof(x))(__typeof((x)-(x))) \
+ (((u64)__hi32(__a, __b) << 32) | \
+ __lo32(__a, __b)); \
+ })
+#endif
+
+extern int __get_user_bad(void)
+ __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
-/*
- * Careful: we have to cast the result to the type of the pointer
- * for sign reasons.
- */
/**
* __get_user: - Get a simple variable from user space, with less checking.
* @x: Variable to store result.
@@ -174,30 +204,62 @@ extern int __put_user_bad(void);
* function.
*/
#define __get_user(x, ptr) \
-({ struct __get_user __ret; \
- __typeof__(*(ptr)) const __user *__gu_addr = (ptr); \
- __chk_user_ptr(__gu_addr); \
- switch (sizeof(*(__gu_addr))) { \
- case 1: \
- __ret = __get_user_1(__gu_addr); \
- break; \
- case 2: \
- __ret = __get_user_2(__gu_addr); \
- break; \
- case 4: \
- __ret = __get_user_4(__gu_addr); \
- break; \
- case 8: \
- __ret = __get_user_8(__gu_addr); \
- break; \
- default: \
- __ret = __get_user_bad(); \
- break; \
- } \
- (x) = (__typeof__(*__gu_addr)) (__typeof__(*__gu_addr - *__gu_addr)) \
- __ret.val; \
- __ret.err; \
-})
+ ({ \
+ int __ret; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: __get_user_1(x, ptr, __ret); break; \
+ case 2: __get_user_2(x, ptr, __ret); break; \
+ case 4: __get_user_4(x, ptr, __ret); break; \
+ case 8: __get_user_8(x, ptr, __ret); break; \
+ default: __ret = __get_user_bad(); break; \
+ } \
+ __ret; \
+ })
+
+/* Support macros for __put_user(). */
+
+#define __put_user_asm(OP, x, ptr, ret) \
+ asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \
+ ".pushsection .fixup,\"ax\"\n" \
+ "0: { movei %0, %3; j 9f }\n" \
+ ".section __ex_table,\"a\"\n" \
+ _ASM_PTR " 1b, 0b\n" \
+ ".popsection\n" \
+ "9:" \
+ : "=r" (ret) \
+ : "r" (ptr), "r" (x), "i" (-EFAULT))
+
+#ifdef __tilegx__
+#define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
+#define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
+#define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
+#define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
+#else
+#define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
+#define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
+#define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
+#define __put_user_8(x, ptr, ret) \
+ ({ \
+ u64 __x = (__typeof((x)-(x)))(x); \
+ int __lo = (int) __x, __hi = (int) (__x >> 32); \
+ asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n" \
+ "2: { sw %0, %3; movei %0, 0 }\n" \
+ ".pushsection .fixup,\"ax\"\n" \
+ "0: { movei %0, %4; j 9f }\n" \
+ ".section __ex_table,\"a\"\n" \
+ ".word 1b, 0b\n" \
+ ".word 2b, 0b\n" \
+ ".popsection\n" \
+ "9:" \
+ : "=&r" (ret) \
+ : "r" (ptr), "r" (__lo32(__lo, __hi)), \
+ "r" (__hi32(__lo, __hi)), "i" (-EFAULT)); \
+ })
+#endif
+
+extern int __put_user_bad(void)
+ __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
/**
* __put_user: - Write a simple value into user space, with less checking.
@@ -217,39 +279,19 @@ extern int __put_user_bad(void);
* function.
*
* Returns zero on success, or -EFAULT on error.
- *
- * Implementation note: The "case 8" logic of casting to the type of
- * the result of subtracting the value from itself is basically a way
- * of keeping all integer types the same, but casting any pointers to
- * ptrdiff_t, i.e. also an integer type. This way there are no
- * questionable casts seen by the compiler on an ILP32 platform.
*/
#define __put_user(x, ptr) \
({ \
- int __pu_err = 0; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- typeof(*__pu_addr) __pu_val = (x); \
- __chk_user_ptr(__pu_addr); \
- switch (sizeof(__pu_val)) { \
- case 1: \
- __pu_err = __put_user_1((long)__pu_val, __pu_addr); \
- break; \
- case 2: \
- __pu_err = __put_user_2((long)__pu_val, __pu_addr); \
- break; \
- case 4: \
- __pu_err = __put_user_4((long)__pu_val, __pu_addr); \
- break; \
- case 8: \
- __pu_err = \
- __put_user_8((__typeof__(__pu_val - __pu_val))__pu_val,\
- __pu_addr); \
- break; \
- default: \
- __pu_err = __put_user_bad(); \
- break; \
+ int __ret; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: __put_user_1(x, ptr, __ret); break; \
+ case 2: __put_user_2(x, ptr, __ret); break; \
+ case 4: __put_user_4(x, ptr, __ret); break; \
+ case 8: __put_user_8(x, ptr, __ret); break; \
+ default: __ret = __put_user_bad(); break; \
} \
- __pu_err; \
+ __ret; \
})
/*
@@ -378,7 +420,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
/**
* __copy_in_user() - copy data within user space, with less checking.
* @to: Destination address, in user space.
- * @from: Source address, in kernel space.
+ * @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep.
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
index f70bf1c541f1..a017246ca0ce 100644
--- a/arch/tile/include/asm/unistd.h
+++ b/arch/tile/include/asm/unistd.h
@@ -24,8 +24,8 @@
#include <asm-generic/unistd.h>
/* Additional Tilera-specific syscalls. */
-#define __NR_flush_cache (__NR_arch_specific_syscall + 1)
-__SYSCALL(__NR_flush_cache, sys_flush_cache)
+#define __NR_cacheflush (__NR_arch_specific_syscall + 1)
+__SYSCALL(__NR_cacheflush, sys_cacheflush)
#ifndef __tilegx__
/* "Fast" syscalls provide atomic support for 32-bit chips. */
diff --git a/arch/tile/include/hv/drv_xgbe_intf.h b/arch/tile/include/hv/drv_xgbe_intf.h
index f13188ac281a..2a20b266d944 100644
--- a/arch/tile/include/hv/drv_xgbe_intf.h
+++ b/arch/tile/include/hv/drv_xgbe_intf.h
@@ -460,7 +460,7 @@ typedef void* lepp_comp_t;
* linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for
* our page size of exactly 65536. We add one for a "body" fragment.
*/
-#define LEPP_MAX_FRAGS (65536 / HV_PAGE_SIZE_SMALL + 2 + 1)
+#define LEPP_MAX_FRAGS (65536 / HV_DEFAULT_PAGE_SIZE_SMALL + 2 + 1)
/** Total number of bytes needed for an lepp_tso_cmd_t. */
#define LEPP_TSO_CMD_SIZE(num_frags, header_size) \
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index 72ec1e972f15..ccd847e2347f 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -17,8 +17,8 @@
* The hypervisor's public API.
*/
-#ifndef _TILE_HV_H
-#define _TILE_HV_H
+#ifndef _HV_HV_H
+#define _HV_HV_H
#include <arch/chip.h>
@@ -42,25 +42,45 @@
*/
#define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN)
-/** The log2 of the size of small pages, in bytes. This value should
- * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
+/** The log2 of the initial size of small pages, in bytes.
+ * See HV_DEFAULT_PAGE_SIZE_SMALL.
*/
-#define HV_LOG2_PAGE_SIZE_SMALL 16
+#define HV_LOG2_DEFAULT_PAGE_SIZE_SMALL 16
-/** The size of small pages, in bytes. This value should be verified
+/** The initial size of small pages, in bytes. This value should be verified
* at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
+ * It may also be modified when installing a new context.
*/
-#define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL)
+#define HV_DEFAULT_PAGE_SIZE_SMALL \
+ (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_SMALL)
-/** The log2 of the size of large pages, in bytes. This value should be
- * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
+/** The log2 of the initial size of large pages, in bytes.
+ * See HV_DEFAULT_PAGE_SIZE_LARGE.
*/
-#define HV_LOG2_PAGE_SIZE_LARGE 24
+#define HV_LOG2_DEFAULT_PAGE_SIZE_LARGE 24
-/** The size of large pages, in bytes. This value should be verified
+/** The initial size of large pages, in bytes. This value should be verified
* at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
+ * It may also be modified when installing a new context.
*/
-#define HV_PAGE_SIZE_LARGE (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_LARGE)
+#define HV_DEFAULT_PAGE_SIZE_LARGE \
+ (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_LARGE)
+
+#if CHIP_VA_WIDTH() > 32
+
+/** The log2 of the initial size of jumbo pages, in bytes.
+ * See HV_DEFAULT_PAGE_SIZE_JUMBO.
+ */
+#define HV_LOG2_DEFAULT_PAGE_SIZE_JUMBO 32
+
+/** The initial size of jumbo pages, in bytes. This value should
+ * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO).
+ * It may also be modified when installing a new context.
+ */
+#define HV_DEFAULT_PAGE_SIZE_JUMBO \
+ (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_JUMBO)
+
+#endif
/** The log2 of the granularity at which page tables must be aligned;
* in other words, the CPA for a page table must have this many zero
@@ -280,8 +300,11 @@
#define HV_DISPATCH_GET_IPI_PTE 56
#endif
+/** hv_set_pte_super_shift */
+#define HV_DISPATCH_SET_PTE_SUPER_SHIFT 57
+
/** One more than the largest dispatch value */
-#define _HV_DISPATCH_END 57
+#define _HV_DISPATCH_END 58
#ifndef __ASSEMBLER__
@@ -401,7 +424,18 @@ typedef enum {
* that the temperature has hit an upper limit and is no longer being
* accurately tracked.
*/
- HV_SYSCONF_BOARD_TEMP = 6
+ HV_SYSCONF_BOARD_TEMP = 6,
+
+ /** Legal page size bitmask for hv_install_context().
+ * For example, if 16KB and 64KB small pages are supported,
+ * it would return "HV_CTX_PG_SM_16K | HV_CTX_PG_SM_64K".
+ */
+ HV_SYSCONF_VALID_PAGE_SIZES = 7,
+
+ /** The size of jumbo pages, in bytes.
+ * If no jumbo pages are available, zero will be returned.
+ */
+ HV_SYSCONF_PAGE_SIZE_JUMBO = 8,
} HV_SysconfQuery;
@@ -474,7 +508,19 @@ typedef enum {
HV_CONFSTR_SWITCH_CONTROL = 14,
/** Chip revision level. */
- HV_CONFSTR_CHIP_REV = 15
+ HV_CONFSTR_CHIP_REV = 15,
+
+ /** CPU module part number. */
+ HV_CONFSTR_CPUMOD_PART_NUM = 16,
+
+ /** CPU module serial number. */
+ HV_CONFSTR_CPUMOD_SERIAL_NUM = 17,
+
+ /** CPU module revision level. */
+ HV_CONFSTR_CPUMOD_REV = 18,
+
+ /** Human-readable CPU module description. */
+ HV_CONFSTR_CPUMOD_DESC = 19
} HV_ConfstrQuery;
@@ -494,11 +540,16 @@ int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len);
/** Tile coordinate */
typedef struct
{
+#ifndef __BIG_ENDIAN__
/** X coordinate, relative to supervisor's top-left coordinate */
int x;
/** Y coordinate, relative to supervisor's top-left coordinate */
int y;
+#else
+ int y;
+ int x;
+#endif
} HV_Coord;
@@ -649,6 +700,12 @@ void hv_set_rtc(HV_RTCTime time);
* new page table does not need to contain any mapping for the
* hv_install_context address itself.
*
+ * At most one HV_CTX_PG_SM_* flag may be specified in "flags";
+ * if multiple flags are specified, HV_EINVAL is returned.
+ * Specifying none of the flags results in using the default page size.
+ * All cores participating in a given client must request the same
+ * page size, or the results are undefined.
+ *
* @param page_table Root of the page table.
* @param access PTE providing info on how to read the page table. This
* value must be consistent between multiple tiles sharing a page table,
@@ -667,8 +724,36 @@ int hv_install_context(HV_PhysAddr page_table, HV_PTE access, HV_ASID asid,
#define HV_CTX_DIRECTIO 0x1 /**< Direct I/O requests are accepted from
PL0. */
+#define HV_CTX_PG_SM_4K 0x10 /**< Use 4K small pages, if available. */
+#define HV_CTX_PG_SM_16K 0x20 /**< Use 16K small pages, if available. */
+#define HV_CTX_PG_SM_64K 0x40 /**< Use 64K small pages, if available. */
+#define HV_CTX_PG_SM_MASK 0xf0 /**< Mask of all possible small pages. */
+
#ifndef __ASSEMBLER__
+
+/** Set the number of pages ganged together by HV_PTE_SUPER at a
+ * particular level of the page table.
+ *
+ * The current TILE-Gx hardware only supports powers of four
+ * (i.e. log2_count must be a multiple of two), and the requested
+ * "super" page size must be less than the span of the next level in
+ * the page table. The largest size that can be requested is 64GB.
+ *
+ * The shift value is initially "0" for all page table levels,
+ * indicating that the HV_PTE_SUPER bit is effectively ignored.
+ *
+ * If you change the count from one non-zero value to another, the
+ * hypervisor will flush the entire TLB and TSB to avoid confusion.
+ *
+ * @param level Page table level (0, 1, or 2)
+ * @param log2_count Base-2 log of the number of pages to gang together,
+ * i.e. how much to shift left the base page size for the super page size.
+ * @return Zero on success, or a hypervisor error code on failure.
+ */
+int hv_set_pte_super_shift(int level, int log2_count);
+
+
/** Value returned from hv_inquire_context(). */
typedef struct
{
@@ -986,8 +1071,13 @@ HV_VirtAddrRange hv_inquire_virtual(int idx);
/** A range of ASID values. */
typedef struct
{
+#ifndef __BIG_ENDIAN__
HV_ASID start; /**< First ASID in the range. */
unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */
+#else
+ unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */
+ HV_ASID start; /**< First ASID in the range. */
+#endif
} HV_ASIDRange;
/** Returns information about a range of ASIDs.
@@ -1238,11 +1328,14 @@ HV_Errno hv_set_command_line(HV_VirtAddr buf, int length);
* with the existing priority pages) or "red/black" (if they don't).
* The bitmask provides information on which parts of the cache
* have been used for pinned pages so far on this tile; if (1 << N)
- * appears in the bitmask, that indicates that a page has been marked
- * "priority" whose PFN equals N, mod 8.
+ * appears in the bitmask, that indicates that a 4KB region of the
+ * cache starting at (N * 4KB) is in use by a "priority" page.
+ * The portion of cache used by a particular page can be computed
+ * by taking the page's PA, modulo CHIP_L2_CACHE_SIZE(), and setting
+ * all the "4KB" bits corresponding to the actual page size.
* @param bitmask A bitmap of priority page set values
*/
-void hv_set_caching(unsigned int bitmask);
+void hv_set_caching(unsigned long bitmask);
/** Zero out a specified number of pages.
@@ -1308,6 +1401,7 @@ typedef enum
/** Message recipient. */
typedef struct
{
+#ifndef __BIG_ENDIAN__
/** X coordinate, relative to supervisor's top-left coordinate */
unsigned int x:11;
@@ -1316,6 +1410,11 @@ typedef struct
/** Status of this recipient */
HV_Recip_State state:10;
+#else //__BIG_ENDIAN__
+ HV_Recip_State state:10;
+ unsigned int y:11;
+ unsigned int x:11;
+#endif
} HV_Recipient;
/** Send a message to a set of recipients.
@@ -1851,12 +1950,12 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
#define HV_PTE_INDEX_USER 10 /**< Page is user-accessible */
#define HV_PTE_INDEX_ACCESSED 11 /**< Page has been accessed */
#define HV_PTE_INDEX_DIRTY 12 /**< Page has been written */
- /* Bits 13-15 are reserved for
+ /* Bits 13-14 are reserved for
future use. */
+#define HV_PTE_INDEX_SUPER 15 /**< Pages ganged together for TLB */
#define HV_PTE_INDEX_MODE 16 /**< Page mode; see HV_PTE_MODE_xxx */
#define HV_PTE_MODE_BITS 3 /**< Number of bits in mode */
- /* Bit 19 is reserved for
- future use. */
+#define HV_PTE_INDEX_CLIENT2 19 /**< Page client state 2 */
#define HV_PTE_INDEX_LOTAR 20 /**< Page's LOTAR; must be high bits
of word */
#define HV_PTE_LOTAR_BITS 12 /**< Number of bits in a LOTAR */
@@ -1869,15 +1968,6 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
of word */
#define HV_PTE_PTFN_BITS 29 /**< Number of bits in a PTFN */
-/** Position of the PFN field within the PTE (subset of the PTFN). */
-#define HV_PTE_INDEX_PFN (HV_PTE_INDEX_PTFN + (HV_LOG2_PAGE_SIZE_SMALL - \
- HV_LOG2_PAGE_TABLE_ALIGN))
-
-/** Length of the PFN field within the PTE (subset of the PTFN). */
-#define HV_PTE_INDEX_PFN_BITS (HV_PTE_INDEX_PTFN_BITS - \
- (HV_LOG2_PAGE_SIZE_SMALL - \
- HV_LOG2_PAGE_TABLE_ALIGN))
-
/*
* Legal values for the PTE's mode field
*/
@@ -1957,7 +2047,10 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
/** Does this PTE map a page?
*
- * If this bit is set in the level-1 page table, the entry should be
+ * If this bit is set in a level-0 page table, the entry should be
+ * interpreted as a level-2 page table entry mapping a jumbo page.
+ *
+ * If this bit is set in a level-1 page table, the entry should be
* interpreted as a level-2 page table entry mapping a large page.
*
* This bit should not be modified by the client while PRESENT is set, as
@@ -1967,6 +2060,18 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
*/
#define HV_PTE_PAGE (__HV_PTE_ONE << HV_PTE_INDEX_PAGE)
+/** Does this PTE implicitly reference multiple pages?
+ *
+ * If this bit is set in the page table (either in the level-2 page table,
+ * or in a higher level page table in conjunction with the PAGE bit)
+ * then the PTE specifies a range of contiguous pages, not a single page.
+ * The hv_set_pte_super_shift() allows you to specify the count for
+ * each level of the page table.
+ *
+ * Note: this bit is not supported on TILEPro systems.
+ */
+#define HV_PTE_SUPER (__HV_PTE_ONE << HV_PTE_INDEX_SUPER)
+
/** Is this a global (non-ASID) mapping?
*
* If this bit is set, the translations established by this PTE will
@@ -2046,6 +2151,13 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
*/
#define HV_PTE_CLIENT1 (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT1)
+/** Client-private bit in PTE.
+ *
+ * This bit is guaranteed not to be inspected or modified by the
+ * hypervisor.
+ */
+#define HV_PTE_CLIENT2 (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT2)
+
/** Non-coherent (NC) bit in PTE.
*
* If this bit is set, the mapping that is set up will be non-coherent
@@ -2178,8 +2290,10 @@ hv_pte_clear_##name(HV_PTE pte) \
*/
_HV_BIT(present, PRESENT)
_HV_BIT(page, PAGE)
+_HV_BIT(super, SUPER)
_HV_BIT(client0, CLIENT0)
_HV_BIT(client1, CLIENT1)
+_HV_BIT(client2, CLIENT2)
_HV_BIT(migrating, MIGRATING)
_HV_BIT(nc, NC)
_HV_BIT(readable, READABLE)
@@ -2222,40 +2336,11 @@ hv_pte_set_mode(HV_PTE pte, unsigned int val)
*
* This field contains the upper bits of the CPA (client physical
* address) of the target page; the complete CPA is this field with
- * HV_LOG2_PAGE_SIZE_SMALL zero bits appended to it.
- *
- * For PTEs in a level-1 page table where the Page bit is set, the
- * CPA must be aligned modulo the large page size.
- */
-static __inline unsigned int
-hv_pte_get_pfn(const HV_PTE pte)
-{
- return pte.val >> HV_PTE_INDEX_PFN;
-}
-
-
-/** Set the page frame number into a PTE. See hv_pte_get_pfn. */
-static __inline HV_PTE
-hv_pte_set_pfn(HV_PTE pte, unsigned int val)
-{
- /*
- * Note that the use of "PTFN" in the next line is intentional; we
- * don't want any garbage lower bits left in that field.
- */
- pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS) - 1) << HV_PTE_INDEX_PTFN);
- pte.val |= (__hv64) val << HV_PTE_INDEX_PFN;
- return pte;
-}
-
-/** Get the page table frame number from the PTE.
- *
- * This field contains the upper bits of the CPA (client physical
- * address) of the target page table; the complete CPA is this field with
- * with HV_PAGE_TABLE_ALIGN zero bits appended to it.
+ * HV_LOG2_PAGE_TABLE_ALIGN zero bits appended to it.
*
- * For PTEs in a level-1 page table when the Page bit is not set, the
- * CPA must be aligned modulo the sticter of HV_PAGE_TABLE_ALIGN and
- * the level-2 page table size.
+ * For all PTEs in the lowest-level page table, and for all PTEs with
+ * the Page bit set in all page tables, the CPA must be aligned modulo
+ * the relevant page size.
*/
static __inline unsigned long
hv_pte_get_ptfn(const HV_PTE pte)
@@ -2263,7 +2348,6 @@ hv_pte_get_ptfn(const HV_PTE pte)
return pte.val >> HV_PTE_INDEX_PTFN;
}
-
/** Set the page table frame number into a PTE. See hv_pte_get_ptfn. */
static __inline HV_PTE
hv_pte_set_ptfn(HV_PTE pte, unsigned long val)
@@ -2273,6 +2357,20 @@ hv_pte_set_ptfn(HV_PTE pte, unsigned long val)
return pte;
}
+/** Get the client physical address from the PTE. See hv_pte_set_ptfn. */
+static __inline HV_PhysAddr
+hv_pte_get_pa(const HV_PTE pte)
+{
+ return (__hv64) hv_pte_get_ptfn(pte) << HV_LOG2_PAGE_TABLE_ALIGN;
+}
+
+/** Set the client physical address into a PTE. See hv_pte_get_ptfn. */
+static __inline HV_PTE
+hv_pte_set_pa(HV_PTE pte, HV_PhysAddr pa)
+{
+ return hv_pte_set_ptfn(pte, pa >> HV_LOG2_PAGE_TABLE_ALIGN);
+}
+
/** Get the remote tile caching this page.
*
@@ -2308,28 +2406,20 @@ hv_pte_set_lotar(HV_PTE pte, unsigned int val)
#endif /* !__ASSEMBLER__ */
-/** Converts a client physical address to a pfn. */
-#define HV_CPA_TO_PFN(p) ((p) >> HV_LOG2_PAGE_SIZE_SMALL)
-
-/** Converts a pfn to a client physical address. */
-#define HV_PFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_SIZE_SMALL)
-
/** Converts a client physical address to a ptfn. */
#define HV_CPA_TO_PTFN(p) ((p) >> HV_LOG2_PAGE_TABLE_ALIGN)
/** Converts a ptfn to a client physical address. */
#define HV_PTFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_TABLE_ALIGN)
-/** Converts a ptfn to a pfn. */
-#define HV_PTFN_TO_PFN(p) \
- ((p) >> (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN))
-
-/** Converts a pfn to a ptfn. */
-#define HV_PFN_TO_PTFN(p) \
- ((p) << (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN))
-
#if CHIP_VA_WIDTH() > 32
+/*
+ * Note that we currently do not allow customizing the page size
+ * of the L0 pages, but fix them at 4GB, so we do not use the
+ * "_HV_xxx" nomenclature for the L0 macros.
+ */
+
/** Log number of HV_PTE entries in L0 page table */
#define HV_LOG2_L0_ENTRIES (CHIP_VA_WIDTH() - HV_LOG2_L1_SPAN)
@@ -2359,69 +2449,104 @@ hv_pte_set_lotar(HV_PTE pte, unsigned int val)
#endif /* CHIP_VA_WIDTH() > 32 */
/** Log number of HV_PTE entries in L1 page table */
-#define HV_LOG2_L1_ENTRIES (HV_LOG2_L1_SPAN - HV_LOG2_PAGE_SIZE_LARGE)
+#define _HV_LOG2_L1_ENTRIES(log2_page_size_large) \
+ (HV_LOG2_L1_SPAN - log2_page_size_large)
/** Number of HV_PTE entries in L1 page table */
-#define HV_L1_ENTRIES (1 << HV_LOG2_L1_ENTRIES)
+#define _HV_L1_ENTRIES(log2_page_size_large) \
+ (1 << _HV_LOG2_L1_ENTRIES(log2_page_size_large))
/** Log size of L1 page table in bytes */
-#define HV_LOG2_L1_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L1_ENTRIES)
+#define _HV_LOG2_L1_SIZE(log2_page_size_large) \
+ (HV_LOG2_PTE_SIZE + _HV_LOG2_L1_ENTRIES(log2_page_size_large))
/** Size of L1 page table in bytes */
-#define HV_L1_SIZE (1 << HV_LOG2_L1_SIZE)
+#define _HV_L1_SIZE(log2_page_size_large) \
+ (1 << _HV_LOG2_L1_SIZE(log2_page_size_large))
/** Log number of HV_PTE entries in level-2 page table */
-#define HV_LOG2_L2_ENTRIES (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)
+#define _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \
+ (log2_page_size_large - log2_page_size_small)
/** Number of HV_PTE entries in level-2 page table */
-#define HV_L2_ENTRIES (1 << HV_LOG2_L2_ENTRIES)
+#define _HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \
+ (1 << _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small))
/** Log size of level-2 page table in bytes */
-#define HV_LOG2_L2_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L2_ENTRIES)
+#define _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small) \
+ (HV_LOG2_PTE_SIZE + \
+ _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small))
/** Size of level-2 page table in bytes */
-#define HV_L2_SIZE (1 << HV_LOG2_L2_SIZE)
+#define _HV_L2_SIZE(log2_page_size_large, log2_page_size_small) \
+ (1 << _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small))
#ifdef __ASSEMBLER__
#if CHIP_VA_WIDTH() > 32
/** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
- (((va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+ (((va) >> log2_page_size_large) & (_HV_L1_ENTRIES(log2_page_size_large) - 1))
#else /* CHIP_VA_WIDTH() > 32 */
/** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
- (((va) >> HV_LOG2_PAGE_SIZE_LARGE))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+ (((va) >> log2_page_size_large))
#endif /* CHIP_VA_WIDTH() > 32 */
/** Index in level-2 page table for a specific VA */
-#define HV_L2_INDEX(va) \
- (((va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1))
+#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \
+ (((va) >> log2_page_size_small) & \
+ (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1))
#else /* __ASSEMBLER __ */
#if CHIP_VA_WIDTH() > 32
/** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
- (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+ (((HV_VirtAddr)(va) >> log2_page_size_large) & \
+ (_HV_L1_ENTRIES(log2_page_size_large) - 1))
#else /* CHIP_VA_WIDTH() > 32 */
/** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
- (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+ (((HV_VirtAddr)(va) >> log2_page_size_large))
#endif /* CHIP_VA_WIDTH() > 32 */
/** Index in level-2 page table for a specific VA */
-#define HV_L2_INDEX(va) \
- (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1))
+#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \
+ (((HV_VirtAddr)(va) >> log2_page_size_small) & \
+ (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1))
#endif /* __ASSEMBLER __ */
-#endif /* _TILE_HV_H */
+/** Position of the PFN field within the PTE (subset of the PTFN). */
+#define _HV_PTE_INDEX_PFN(log2_page_size) \
+ (HV_PTE_INDEX_PTFN + (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+/** Length of the PFN field within the PTE (subset of the PTFN). */
+#define _HV_PTE_INDEX_PFN_BITS(log2_page_size) \
+ (HV_PTE_INDEX_PTFN_BITS - (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+/** Converts a client physical address to a pfn. */
+#define _HV_CPA_TO_PFN(p, log2_page_size) ((p) >> log2_page_size)
+
+/** Converts a pfn to a client physical address. */
+#define _HV_PFN_TO_CPA(p, log2_page_size) \
+ (((HV_PhysAddr)(p)) << log2_page_size)
+
+/** Converts a ptfn to a pfn. */
+#define _HV_PTFN_TO_PFN(p, log2_page_size) \
+ ((p) >> (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+/** Converts a pfn to a ptfn. */
+#define _HV_PFN_TO_PTFN(p, log2_page_size) \
+ ((p) << (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+#endif /* _HV_HV_H */
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index 0d826faf8f35..5de99248d8df 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -9,10 +9,9 @@ obj-y := backtrace.o entry.o irq.o messaging.o \
intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
obj-$(CONFIG_HARDWALL) += hardwall.o
-obj-$(CONFIG_TILEGX) += futex_64.o
obj-$(CONFIG_COMPAT) += compat.o compat_signal.o
obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel_$(BITS).o
obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index ec91568df880..133c4b56a99e 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -100,8 +100,9 @@ STD_ENTRY(smp_nap)
*/
STD_ENTRY(_cpu_idle)
movei r1, 1
+ IRQ_ENABLE_LOAD(r2, r3)
mtspr INTERRUPT_CRITICAL_SECTION, r1
- IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
+ IRQ_ENABLE_APPLY(r2, r3) /* unmask, but still with ICS set */
mtspr INTERRUPT_CRITICAL_SECTION, zero
.global _cpu_idle_nap
_cpu_idle_nap:
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 8c41891aab34..20273ee37deb 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -33,59 +33,157 @@
/*
- * This data structure tracks the rectangle data, etc., associated
- * one-to-one with a "struct file *" from opening HARDWALL_FILE.
+ * Implement a per-cpu "hardwall" resource class such as UDN or IPI.
+ * We use "hardwall" nomenclature throughout for historical reasons.
+ * The lock here controls access to the list data structure as well as
+ * to the items on the list.
+ */
+struct hardwall_type {
+ int index;
+ int is_xdn;
+ int is_idn;
+ int disabled;
+ const char *name;
+ struct list_head list;
+ spinlock_t lock;
+ struct proc_dir_entry *proc_dir;
+};
+
+enum hardwall_index {
+ HARDWALL_UDN = 0,
+#ifndef __tilepro__
+ HARDWALL_IDN = 1,
+ HARDWALL_IPI = 2,
+#endif
+ _HARDWALL_TYPES
+};
+
+static struct hardwall_type hardwall_types[] = {
+ { /* user-space access to UDN */
+ 0,
+ 1,
+ 0,
+ 0,
+ "udn",
+ LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
+ __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock),
+ NULL
+ },
+#ifndef __tilepro__
+ { /* user-space access to IDN */
+ 1,
+ 1,
+ 1,
+ 1, /* disabled pending hypervisor support */
+ "idn",
+ LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
+ __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock),
+ NULL
+ },
+ { /* access to user-space IPI */
+ 2,
+ 0,
+ 0,
+ 0,
+ "ipi",
+ LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
+ __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock),
+ NULL
+ },
+#endif
+};
+
+/*
+ * This data structure tracks the cpu data, etc., associated
+ * one-to-one with a "struct file *" from opening a hardwall device file.
* Note that the file's private data points back to this structure.
*/
struct hardwall_info {
- struct list_head list; /* "rectangles" list */
+ struct list_head list; /* for hardwall_types.list */
struct list_head task_head; /* head of tasks in this hardwall */
- struct cpumask cpumask; /* cpus in the rectangle */
+ struct hardwall_type *type; /* type of this resource */
+ struct cpumask cpumask; /* cpus reserved */
+ int id; /* integer id for this hardwall */
+ int teardown_in_progress; /* are we tearing this one down? */
+
+ /* Remaining fields only valid for user-network resources. */
int ulhc_x; /* upper left hand corner x coord */
int ulhc_y; /* upper left hand corner y coord */
int width; /* rectangle width */
int height; /* rectangle height */
- int id; /* integer id for this hardwall */
- int teardown_in_progress; /* are we tearing this one down? */
+#if CHIP_HAS_REV1_XDN()
+ atomic_t xdn_pending_count; /* cores in phase 1 of drain */
+#endif
};
-/* Currently allocated hardwall rectangles */
-static LIST_HEAD(rectangles);
/* /proc/tile/hardwall */
static struct proc_dir_entry *hardwall_proc_dir;
/* Functions to manage files in /proc/tile/hardwall. */
-static void hardwall_add_proc(struct hardwall_info *rect);
-static void hardwall_remove_proc(struct hardwall_info *rect);
-
-/*
- * Guard changes to the hardwall data structures.
- * This could be finer grained (e.g. one lock for the list of hardwall
- * rectangles, then separate embedded locks for each one's list of tasks),
- * but there are subtle correctness issues when trying to start with
- * a task's "hardwall" pointer and lock the correct rectangle's embedded
- * lock in the presence of a simultaneous deactivation, so it seems
- * easier to have a single lock, given that none of these data
- * structures are touched very frequently during normal operation.
- */
-static DEFINE_SPINLOCK(hardwall_lock);
+static void hardwall_add_proc(struct hardwall_info *);
+static void hardwall_remove_proc(struct hardwall_info *);
/* Allow disabling UDN access. */
-static int udn_disabled;
static int __init noudn(char *str)
{
pr_info("User-space UDN access is disabled\n");
- udn_disabled = 1;
+ hardwall_types[HARDWALL_UDN].disabled = 1;
return 0;
}
early_param("noudn", noudn);
+#ifndef __tilepro__
+/* Allow disabling IDN access. */
+static int __init noidn(char *str)
+{
+ pr_info("User-space IDN access is disabled\n");
+ hardwall_types[HARDWALL_IDN].disabled = 1;
+ return 0;
+}
+early_param("noidn", noidn);
+
+/* Allow disabling IPI access. */
+static int __init noipi(char *str)
+{
+ pr_info("User-space IPI access is disabled\n");
+ hardwall_types[HARDWALL_IPI].disabled = 1;
+ return 0;
+}
+early_param("noipi", noipi);
+#endif
+
/*
- * Low-level primitives
+ * Low-level primitives for UDN/IDN
*/
+#ifdef __tilepro__
+#define mtspr_XDN(hwt, name, val) \
+ do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0)
+#define mtspr_MPL_XDN(hwt, name, val) \
+ do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0)
+#define mfspr_XDN(hwt, name) \
+ ((void)(hwt), __insn_mfspr(SPR_UDN_##name))
+#else
+#define mtspr_XDN(hwt, name, val) \
+ do { \
+ if ((hwt)->is_idn) \
+ __insn_mtspr(SPR_IDN_##name, (val)); \
+ else \
+ __insn_mtspr(SPR_UDN_##name, (val)); \
+ } while (0)
+#define mtspr_MPL_XDN(hwt, name, val) \
+ do { \
+ if ((hwt)->is_idn) \
+ __insn_mtspr(SPR_MPL_IDN_##name, (val)); \
+ else \
+ __insn_mtspr(SPR_MPL_UDN_##name, (val)); \
+ } while (0)
+#define mfspr_XDN(hwt, name) \
+ ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name))
+#endif
+
/* Set a CPU bit if the CPU is online. */
#define cpu_online_set(cpu, dst) do { \
if (cpu_online(cpu)) \
@@ -101,7 +199,7 @@ static int contains(struct hardwall_info *r, int x, int y)
}
/* Compute the rectangle parameters and validate the cpumask. */
-static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
+static int check_rectangle(struct hardwall_info *r, struct cpumask *mask)
{
int x, y, cpu, ulhc, lrhc;
@@ -114,8 +212,6 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
r->ulhc_y = cpu_y(ulhc);
r->width = cpu_x(lrhc) - r->ulhc_x + 1;
r->height = cpu_y(lrhc) - r->ulhc_y + 1;
- cpumask_copy(&r->cpumask, mask);
- r->id = ulhc; /* The ulhc cpu id can be the hardwall id. */
/* Width and height must be positive */
if (r->width <= 0 || r->height <= 0)
@@ -128,7 +224,7 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
return -EINVAL;
/*
- * Note that offline cpus can't be drained when this UDN
+ * Note that offline cpus can't be drained when this user network
* rectangle eventually closes. We used to detect this
* situation and print a warning, but it annoyed users and
* they ignored it anyway, so now we just return without a
@@ -137,16 +233,6 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
return 0;
}
-/* Do the two given rectangles overlap on any cpu? */
-static int overlaps(struct hardwall_info *a, struct hardwall_info *b)
-{
- return a->ulhc_x + a->width > b->ulhc_x && /* A not to the left */
- b->ulhc_x + b->width > a->ulhc_x && /* B not to the left */
- a->ulhc_y + a->height > b->ulhc_y && /* A not above */
- b->ulhc_y + b->height > a->ulhc_y; /* B not above */
-}
-
-
/*
* Hardware management of hardwall setup, teardown, trapping,
* and enabling/disabling PL0 access to the networks.
@@ -157,23 +243,35 @@ enum direction_protect {
N_PROTECT = (1 << 0),
E_PROTECT = (1 << 1),
S_PROTECT = (1 << 2),
- W_PROTECT = (1 << 3)
+ W_PROTECT = (1 << 3),
+ C_PROTECT = (1 << 4),
};
-static void enable_firewall_interrupts(void)
+static inline int xdn_which_interrupt(struct hardwall_type *hwt)
+{
+#ifndef __tilepro__
+ if (hwt->is_idn)
+ return INT_IDN_FIREWALL;
+#endif
+ return INT_UDN_FIREWALL;
+}
+
+static void enable_firewall_interrupts(struct hardwall_type *hwt)
{
- arch_local_irq_unmask_now(INT_UDN_FIREWALL);
+ arch_local_irq_unmask_now(xdn_which_interrupt(hwt));
}
-static void disable_firewall_interrupts(void)
+static void disable_firewall_interrupts(struct hardwall_type *hwt)
{
- arch_local_irq_mask_now(INT_UDN_FIREWALL);
+ arch_local_irq_mask_now(xdn_which_interrupt(hwt));
}
/* Set up hardwall on this cpu based on the passed hardwall_info. */
-static void hardwall_setup_ipi_func(void *info)
+static void hardwall_setup_func(void *info)
{
struct hardwall_info *r = info;
+ struct hardwall_type *hwt = r->type;
+
int cpu = smp_processor_id();
int x = cpu % smp_width;
int y = cpu / smp_width;
@@ -187,13 +285,12 @@ static void hardwall_setup_ipi_func(void *info)
if (y == r->ulhc_y + r->height - 1)
bits |= S_PROTECT;
BUG_ON(bits == 0);
- __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, bits);
- enable_firewall_interrupts();
-
+ mtspr_XDN(hwt, DIRECTION_PROTECT, bits);
+ enable_firewall_interrupts(hwt);
}
/* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */
-static void hardwall_setup(struct hardwall_info *r)
+static void hardwall_protect_rectangle(struct hardwall_info *r)
{
int x, y, cpu, delta;
struct cpumask rect_cpus;
@@ -217,37 +314,50 @@ static void hardwall_setup(struct hardwall_info *r)
}
/* Then tell all the cpus to set up their protection SPR */
- on_each_cpu_mask(&rect_cpus, hardwall_setup_ipi_func, r, 1);
+ on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
}
void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
{
struct hardwall_info *rect;
+ struct hardwall_type *hwt;
struct task_struct *p;
struct siginfo info;
- int x, y;
int cpu = smp_processor_id();
int found_processes;
unsigned long flags;
-
struct pt_regs *old_regs = set_irq_regs(regs);
+
irq_enter();
+ /* Figure out which network trapped. */
+ switch (fault_num) {
+#ifndef __tilepro__
+ case INT_IDN_FIREWALL:
+ hwt = &hardwall_types[HARDWALL_IDN];
+ break;
+#endif
+ case INT_UDN_FIREWALL:
+ hwt = &hardwall_types[HARDWALL_UDN];
+ break;
+ default:
+ BUG();
+ }
+ BUG_ON(hwt->disabled);
+
/* This tile trapped a network access; find the rectangle. */
- x = cpu % smp_width;
- y = cpu / smp_width;
- spin_lock_irqsave(&hardwall_lock, flags);
- list_for_each_entry(rect, &rectangles, list) {
- if (contains(rect, x, y))
+ spin_lock_irqsave(&hwt->lock, flags);
+ list_for_each_entry(rect, &hwt->list, list) {
+ if (cpumask_test_cpu(cpu, &rect->cpumask))
break;
}
/*
* It shouldn't be possible not to find this cpu on the
* rectangle list, since only cpus in rectangles get hardwalled.
- * The hardwall is only removed after the UDN is drained.
+ * The hardwall is only removed after the user network is drained.
*/
- BUG_ON(&rect->list == &rectangles);
+ BUG_ON(&rect->list == &hwt->list);
/*
* If we already started teardown on this hardwall, don't worry;
@@ -255,30 +365,32 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
* to quiesce.
*/
if (rect->teardown_in_progress) {
- pr_notice("cpu %d: detected hardwall violation %#lx"
+ pr_notice("cpu %d: detected %s hardwall violation %#lx"
" while teardown already in progress\n",
- cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT));
+ cpu, hwt->name,
+ (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
goto done;
}
/*
* Kill off any process that is activated in this rectangle.
* We bypass security to deliver the signal, since it must be
- * one of the activated processes that generated the UDN
+ * one of the activated processes that generated the user network
* message that caused this trap, and all the activated
* processes shared a single open file so are pretty tightly
* bound together from a security point of view to begin with.
*/
rect->teardown_in_progress = 1;
wmb(); /* Ensure visibility of rectangle before notifying processes. */
- pr_notice("cpu %d: detected hardwall violation %#lx...\n",
- cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT));
+ pr_notice("cpu %d: detected %s hardwall violation %#lx...\n",
+ cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_HARDWALL;
found_processes = 0;
- list_for_each_entry(p, &rect->task_head, thread.hardwall_list) {
- BUG_ON(p->thread.hardwall != rect);
+ list_for_each_entry(p, &rect->task_head,
+ thread.hardwall[hwt->index].list) {
+ BUG_ON(p->thread.hardwall[hwt->index].info != rect);
if (!(p->flags & PF_EXITING)) {
found_processes = 1;
pr_notice("hardwall: killing %d\n", p->pid);
@@ -289,7 +401,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
pr_notice("hardwall: no associated processes!\n");
done:
- spin_unlock_irqrestore(&hardwall_lock, flags);
+ spin_unlock_irqrestore(&hwt->lock, flags);
/*
* We have to disable firewall interrupts now, or else when we
@@ -298,48 +410,87 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
* haven't yet drained the network, and that would allow packets
* to cross out of the hardwall region.
*/
- disable_firewall_interrupts();
+ disable_firewall_interrupts(hwt);
irq_exit();
set_irq_regs(old_regs);
}
-/* Allow access from user space to the UDN. */
-void grant_network_mpls(void)
+/* Allow access from user space to the user network. */
+void grant_hardwall_mpls(struct hardwall_type *hwt)
{
- __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_0, 1);
- __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_0, 1);
- __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_0, 1);
- __insn_mtspr(SPR_MPL_UDN_TIMER_SET_0, 1);
+#ifndef __tilepro__
+ if (!hwt->is_xdn) {
+ __insn_mtspr(SPR_MPL_IPI_0_SET_0, 1);
+ return;
+ }
+#endif
+ mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1);
+ mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1);
+ mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1);
+ mtspr_MPL_XDN(hwt, TIMER_SET_0, 1);
#if !CHIP_HAS_REV1_XDN()
- __insn_mtspr(SPR_MPL_UDN_REFILL_SET_0, 1);
- __insn_mtspr(SPR_MPL_UDN_CA_SET_0, 1);
+ mtspr_MPL_XDN(hwt, REFILL_SET_0, 1);
+ mtspr_MPL_XDN(hwt, CA_SET_0, 1);
#endif
}
-/* Deny access from user space to the UDN. */
-void restrict_network_mpls(void)
+/* Deny access from user space to the user network. */
+void restrict_hardwall_mpls(struct hardwall_type *hwt)
{
- __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_1, 1);
- __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_1, 1);
- __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_1, 1);
- __insn_mtspr(SPR_MPL_UDN_TIMER_SET_1, 1);
+#ifndef __tilepro__
+ if (!hwt->is_xdn) {
+ __insn_mtspr(SPR_MPL_IPI_0_SET_1, 1);
+ return;
+ }
+#endif
+ mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1);
+ mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1);
+ mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1);
+ mtspr_MPL_XDN(hwt, TIMER_SET_1, 1);
#if !CHIP_HAS_REV1_XDN()
- __insn_mtspr(SPR_MPL_UDN_REFILL_SET_1, 1);
- __insn_mtspr(SPR_MPL_UDN_CA_SET_1, 1);
+ mtspr_MPL_XDN(hwt, REFILL_SET_1, 1);
+ mtspr_MPL_XDN(hwt, CA_SET_1, 1);
#endif
}
+/* Restrict or deny as necessary for the task we're switching to. */
+void hardwall_switch_tasks(struct task_struct *prev,
+ struct task_struct *next)
+{
+ int i;
+ for (i = 0; i < HARDWALL_TYPES; ++i) {
+ if (prev->thread.hardwall[i].info != NULL) {
+ if (next->thread.hardwall[i].info == NULL)
+ restrict_hardwall_mpls(&hardwall_types[i]);
+ } else if (next->thread.hardwall[i].info != NULL) {
+ grant_hardwall_mpls(&hardwall_types[i]);
+ }
+ }
+}
+
+/* Does this task have the right to IPI the given cpu? */
+int hardwall_ipi_valid(int cpu)
+{
+#ifdef __tilegx__
+ struct hardwall_info *info =
+ current->thread.hardwall[HARDWALL_IPI].info;
+ return info && cpumask_test_cpu(cpu, &info->cpumask);
+#else
+ return 0;
+#endif
+}
/*
- * Code to create, activate, deactivate, and destroy hardwall rectangles.
+ * Code to create, activate, deactivate, and destroy hardwall resources.
*/
-/* Create a hardwall for the given rectangle */
-static struct hardwall_info *hardwall_create(
- size_t size, const unsigned char __user *bits)
+/* Create a hardwall for the given resource */
+static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
+ size_t size,
+ const unsigned char __user *bits)
{
- struct hardwall_info *iter, *rect;
+ struct hardwall_info *iter, *info;
struct cpumask mask;
unsigned long flags;
int rc;
@@ -370,55 +521,62 @@ static struct hardwall_info *hardwall_create(
}
}
- /* Allocate a new rectangle optimistically. */
- rect = kmalloc(sizeof(struct hardwall_info),
+ /* Allocate a new hardwall_info optimistically. */
+ info = kmalloc(sizeof(struct hardwall_info),
GFP_KERNEL | __GFP_ZERO);
- if (rect == NULL)
+ if (info == NULL)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&rect->task_head);
+ INIT_LIST_HEAD(&info->task_head);
+ info->type = hwt;
/* Compute the rectangle size and validate that it's plausible. */
- rc = setup_rectangle(rect, &mask);
- if (rc != 0) {
- kfree(rect);
- return ERR_PTR(rc);
+ cpumask_copy(&info->cpumask, &mask);
+ info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits);
+ if (hwt->is_xdn) {
+ rc = check_rectangle(info, &mask);
+ if (rc != 0) {
+ kfree(info);
+ return ERR_PTR(rc);
+ }
}
/* Confirm it doesn't overlap and add it to the list. */
- spin_lock_irqsave(&hardwall_lock, flags);
- list_for_each_entry(iter, &rectangles, list) {
- if (overlaps(iter, rect)) {
- spin_unlock_irqrestore(&hardwall_lock, flags);
- kfree(rect);
+ spin_lock_irqsave(&hwt->lock, flags);
+ list_for_each_entry(iter, &hwt->list, list) {
+ if (cpumask_intersects(&iter->cpumask, &info->cpumask)) {
+ spin_unlock_irqrestore(&hwt->lock, flags);
+ kfree(info);
return ERR_PTR(-EBUSY);
}
}
- list_add_tail(&rect->list, &rectangles);
- spin_unlock_irqrestore(&hardwall_lock, flags);
+ list_add_tail(&info->list, &hwt->list);
+ spin_unlock_irqrestore(&hwt->lock, flags);
/* Set up appropriate hardwalling on all affected cpus. */
- hardwall_setup(rect);
+ if (hwt->is_xdn)
+ hardwall_protect_rectangle(info);
/* Create a /proc/tile/hardwall entry. */
- hardwall_add_proc(rect);
+ hardwall_add_proc(info);
- return rect;
+ return info;
}
/* Activate a given hardwall on this cpu for this process. */
-static int hardwall_activate(struct hardwall_info *rect)
+static int hardwall_activate(struct hardwall_info *info)
{
- int cpu, x, y;
+ int cpu;
unsigned long flags;
struct task_struct *p = current;
struct thread_struct *ts = &p->thread;
+ struct hardwall_type *hwt;
- /* Require a rectangle. */
- if (rect == NULL)
+ /* Require a hardwall. */
+ if (info == NULL)
return -ENODATA;
- /* Not allowed to activate a rectangle that is being torn down. */
- if (rect->teardown_in_progress)
+ /* Not allowed to activate a hardwall that is being torn down. */
+ if (info->teardown_in_progress)
return -EINVAL;
/*
@@ -428,78 +586,87 @@ static int hardwall_activate(struct hardwall_info *rect)
if (cpumask_weight(&p->cpus_allowed) != 1)
return -EPERM;
- /* Make sure we are bound to a cpu in this rectangle. */
+ /* Make sure we are bound to a cpu assigned to this resource. */
cpu = smp_processor_id();
BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
- x = cpu_x(cpu);
- y = cpu_y(cpu);
- if (!contains(rect, x, y))
+ if (!cpumask_test_cpu(cpu, &info->cpumask))
return -EINVAL;
/* If we are already bound to this hardwall, it's a no-op. */
- if (ts->hardwall) {
- BUG_ON(ts->hardwall != rect);
+ hwt = info->type;
+ if (ts->hardwall[hwt->index].info) {
+ BUG_ON(ts->hardwall[hwt->index].info != info);
return 0;
}
- /* Success! This process gets to use the user networks on this cpu. */
- ts->hardwall = rect;
- spin_lock_irqsave(&hardwall_lock, flags);
- list_add(&ts->hardwall_list, &rect->task_head);
- spin_unlock_irqrestore(&hardwall_lock, flags);
- grant_network_mpls();
- printk(KERN_DEBUG "Pid %d (%s) activated for hardwall: cpu %d\n",
- p->pid, p->comm, cpu);
+ /* Success! This process gets to use the resource on this cpu. */
+ ts->hardwall[hwt->index].info = info;
+ spin_lock_irqsave(&hwt->lock, flags);
+ list_add(&ts->hardwall[hwt->index].list, &info->task_head);
+ spin_unlock_irqrestore(&hwt->lock, flags);
+ grant_hardwall_mpls(hwt);
+ printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n",
+ p->pid, p->comm, hwt->name, cpu);
return 0;
}
/*
- * Deactivate a task's hardwall. Must hold hardwall_lock.
+ * Deactivate a task's hardwall. Must hold lock for hardwall_type.
* This method may be called from free_task(), so we don't want to
* rely on too many fields of struct task_struct still being valid.
* We assume the cpus_allowed, pid, and comm fields are still valid.
*/
-static void _hardwall_deactivate(struct task_struct *task)
+static void _hardwall_deactivate(struct hardwall_type *hwt,
+ struct task_struct *task)
{
struct thread_struct *ts = &task->thread;
if (cpumask_weight(&task->cpus_allowed) != 1) {
- pr_err("pid %d (%s) releasing networks with"
+ pr_err("pid %d (%s) releasing %s hardwall with"
" an affinity mask containing %d cpus!\n",
- task->pid, task->comm,
+ task->pid, task->comm, hwt->name,
cpumask_weight(&task->cpus_allowed));
BUG();
}
- BUG_ON(ts->hardwall == NULL);
- ts->hardwall = NULL;
- list_del(&ts->hardwall_list);
+ BUG_ON(ts->hardwall[hwt->index].info == NULL);
+ ts->hardwall[hwt->index].info = NULL;
+ list_del(&ts->hardwall[hwt->index].list);
if (task == current)
- restrict_network_mpls();
+ restrict_hardwall_mpls(hwt);
}
/* Deactivate a task's hardwall. */
-int hardwall_deactivate(struct task_struct *task)
+static int hardwall_deactivate(struct hardwall_type *hwt,
+ struct task_struct *task)
{
unsigned long flags;
int activated;
- spin_lock_irqsave(&hardwall_lock, flags);
- activated = (task->thread.hardwall != NULL);
+ spin_lock_irqsave(&hwt->lock, flags);
+ activated = (task->thread.hardwall[hwt->index].info != NULL);
if (activated)
- _hardwall_deactivate(task);
- spin_unlock_irqrestore(&hardwall_lock, flags);
+ _hardwall_deactivate(hwt, task);
+ spin_unlock_irqrestore(&hwt->lock, flags);
if (!activated)
return -EINVAL;
- printk(KERN_DEBUG "Pid %d (%s) deactivated for hardwall: cpu %d\n",
- task->pid, task->comm, smp_processor_id());
+ printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
+ task->pid, task->comm, hwt->name, smp_processor_id());
return 0;
}
-/* Stop a UDN switch before draining the network. */
-static void stop_udn_switch(void *ignored)
+void hardwall_deactivate_all(struct task_struct *task)
+{
+ int i;
+ for (i = 0; i < HARDWALL_TYPES; ++i)
+ if (task->thread.hardwall[i].info)
+ hardwall_deactivate(&hardwall_types[i], task);
+}
+
+/* Stop the switch before draining the network. */
+static void stop_xdn_switch(void *arg)
{
#if !CHIP_HAS_REV1_XDN()
/* Freeze the switch and the demux. */
@@ -507,13 +674,71 @@ static void stop_udn_switch(void *ignored)
SPR_UDN_SP_FREEZE__SP_FRZ_MASK |
SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK |
SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK);
+#else
+ /*
+ * Drop all packets bound for the core or off the edge.
+ * We rely on the normal hardwall protection setup code
+ * to have set the low four bits to trigger firewall interrupts,
+ * and shift those bits up to trigger "drop on send" semantics,
+ * plus adding "drop on send to core" for all switches.
+ * In practice it seems the switches latch the DIRECTION_PROTECT
+ * SPR so they won't start dropping if they're already
+ * delivering the last message to the core, but it doesn't
+ * hurt to enable it here.
+ */
+ struct hardwall_type *hwt = arg;
+ unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT);
+ mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5);
#endif
}
+static void empty_xdn_demuxes(struct hardwall_type *hwt)
+{
+#ifndef __tilepro__
+ if (hwt->is_idn) {
+ while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0))
+ (void) __tile_idn0_receive();
+ while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1))
+ (void) __tile_idn1_receive();
+ return;
+ }
+#endif
+ while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
+ (void) __tile_udn0_receive();
+ while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
+ (void) __tile_udn1_receive();
+ while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
+ (void) __tile_udn2_receive();
+ while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
+ (void) __tile_udn3_receive();
+}
+
/* Drain all the state from a stopped switch. */
-static void drain_udn_switch(void *ignored)
+static void drain_xdn_switch(void *arg)
{
-#if !CHIP_HAS_REV1_XDN()
+ struct hardwall_info *info = arg;
+ struct hardwall_type *hwt = info->type;
+
+#if CHIP_HAS_REV1_XDN()
+ /*
+ * The switches have been configured to drop any messages
+ * destined for cores (or off the edge of the rectangle).
+ * But the current message may continue to be delivered,
+ * so we wait until all the cores have finished any pending
+ * messages before we stop draining.
+ */
+ int pending = mfspr_XDN(hwt, PENDING);
+ while (pending--) {
+ empty_xdn_demuxes(hwt);
+ if (hwt->is_idn)
+ __tile_idn_send(0);
+ else
+ __tile_udn_send(0);
+ }
+ atomic_dec(&info->xdn_pending_count);
+ while (atomic_read(&info->xdn_pending_count))
+ empty_xdn_demuxes(hwt);
+#else
int i;
int from_tile_words, ca_count;
@@ -533,15 +758,7 @@ static void drain_udn_switch(void *ignored)
(void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO);
/* Empty out demuxes. */
- while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
- (void) __tile_udn0_receive();
- while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
- (void) __tile_udn1_receive();
- while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
- (void) __tile_udn2_receive();
- while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
- (void) __tile_udn3_receive();
- BUG_ON((__insn_mfspr(SPR_UDN_DATA_AVAIL) & 0xF) != 0);
+ empty_xdn_demuxes(hwt);
/* Empty out catch all. */
ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT);
@@ -563,21 +780,25 @@ static void drain_udn_switch(void *ignored)
#endif
}
-/* Reset random UDN state registers at boot up and during hardwall teardown. */
-void reset_network_state(void)
+/* Reset random XDN state registers at boot up and during hardwall teardown. */
+static void reset_xdn_network_state(struct hardwall_type *hwt)
{
-#if !CHIP_HAS_REV1_XDN()
- /* Reset UDN coordinates to their standard value */
- unsigned int cpu = smp_processor_id();
- unsigned int x = cpu % smp_width;
- unsigned int y = cpu / smp_width;
-#endif
-
- if (udn_disabled)
+ if (hwt->disabled)
return;
+ /* Clear out other random registers so we have a clean slate. */
+ mtspr_XDN(hwt, DIRECTION_PROTECT, 0);
+ mtspr_XDN(hwt, AVAIL_EN, 0);
+ mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0);
+
#if !CHIP_HAS_REV1_XDN()
- __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
+ /* Reset UDN coordinates to their standard value */
+ {
+ unsigned int cpu = smp_processor_id();
+ unsigned int x = cpu % smp_width;
+ unsigned int y = cpu / smp_width;
+ __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
+ }
/* Set demux tags to predefined values and enable them. */
__insn_mtspr(SPR_UDN_TAG_VALID, 0xf);
@@ -585,56 +806,50 @@ void reset_network_state(void)
__insn_mtspr(SPR_UDN_TAG_1, (1 << 1));
__insn_mtspr(SPR_UDN_TAG_2, (1 << 2));
__insn_mtspr(SPR_UDN_TAG_3, (1 << 3));
-#endif
- /* Clear out other random registers so we have a clean slate. */
- __insn_mtspr(SPR_UDN_AVAIL_EN, 0);
- __insn_mtspr(SPR_UDN_DEADLOCK_TIMEOUT, 0);
-#if !CHIP_HAS_REV1_XDN()
+ /* Set other rev0 random registers to a clean state. */
__insn_mtspr(SPR_UDN_REFILL_EN, 0);
__insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0);
__insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0);
-#endif
/* Start the switch and demux. */
-#if !CHIP_HAS_REV1_XDN()
__insn_mtspr(SPR_UDN_SP_FREEZE, 0);
#endif
}
-/* Restart a UDN switch after draining. */
-static void restart_udn_switch(void *ignored)
+void reset_network_state(void)
{
- reset_network_state();
-
- /* Disable firewall interrupts. */
- __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, 0);
- disable_firewall_interrupts();
+ reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]);
+#ifndef __tilepro__
+ reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]);
+#endif
}
-/* Build a struct cpumask containing all valid tiles in bounding rectangle. */
-static void fill_mask(struct hardwall_info *r, struct cpumask *result)
+/* Restart an XDN switch after draining. */
+static void restart_xdn_switch(void *arg)
{
- int x, y, cpu;
+ struct hardwall_type *hwt = arg;
- cpumask_clear(result);
+#if CHIP_HAS_REV1_XDN()
+ /* One last drain step to avoid races with injection and draining. */
+ empty_xdn_demuxes(hwt);
+#endif
- cpu = r->ulhc_y * smp_width + r->ulhc_x;
- for (y = 0; y < r->height; ++y, cpu += smp_width - r->width) {
- for (x = 0; x < r->width; ++x, ++cpu)
- cpu_online_set(cpu, result);
- }
+ reset_xdn_network_state(hwt);
+
+ /* Disable firewall interrupts. */
+ disable_firewall_interrupts(hwt);
}
/* Last reference to a hardwall is gone, so clear the network. */
-static void hardwall_destroy(struct hardwall_info *rect)
+static void hardwall_destroy(struct hardwall_info *info)
{
struct task_struct *task;
+ struct hardwall_type *hwt;
unsigned long flags;
- struct cpumask mask;
- /* Make sure this file actually represents a rectangle. */
- if (rect == NULL)
+ /* Make sure this file actually represents a hardwall. */
+ if (info == NULL)
return;
/*
@@ -644,39 +859,53 @@ static void hardwall_destroy(struct hardwall_info *rect)
* deactivate any remaining tasks before freeing the
* hardwall_info object itself.
*/
- spin_lock_irqsave(&hardwall_lock, flags);
- list_for_each_entry(task, &rect->task_head, thread.hardwall_list)
- _hardwall_deactivate(task);
- spin_unlock_irqrestore(&hardwall_lock, flags);
-
- /* Drain the UDN. */
- printk(KERN_DEBUG "Clearing hardwall rectangle %dx%d %d,%d\n",
- rect->width, rect->height, rect->ulhc_x, rect->ulhc_y);
- fill_mask(rect, &mask);
- on_each_cpu_mask(&mask, stop_udn_switch, NULL, 1);
- on_each_cpu_mask(&mask, drain_udn_switch, NULL, 1);
+ hwt = info->type;
+ info->teardown_in_progress = 1;
+ spin_lock_irqsave(&hwt->lock, flags);
+ list_for_each_entry(task, &info->task_head,
+ thread.hardwall[hwt->index].list)
+ _hardwall_deactivate(hwt, task);
+ spin_unlock_irqrestore(&hwt->lock, flags);
+
+ if (hwt->is_xdn) {
+ /* Configure the switches for draining the user network. */
+ printk(KERN_DEBUG
+ "Clearing %s hardwall rectangle %dx%d %d,%d\n",
+ hwt->name, info->width, info->height,
+ info->ulhc_x, info->ulhc_y);
+ on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1);
+
+ /* Drain the network. */
+#if CHIP_HAS_REV1_XDN()
+ atomic_set(&info->xdn_pending_count,
+ cpumask_weight(&info->cpumask));
+ on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0);
+#else
+ on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1);
+#endif
- /* Restart switch and disable firewall. */
- on_each_cpu_mask(&mask, restart_udn_switch, NULL, 1);
+ /* Restart switch and disable firewall. */
+ on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1);
+ }
/* Remove the /proc/tile/hardwall entry. */
- hardwall_remove_proc(rect);
-
- /* Now free the rectangle from the list. */
- spin_lock_irqsave(&hardwall_lock, flags);
- BUG_ON(!list_empty(&rect->task_head));
- list_del(&rect->list);
- spin_unlock_irqrestore(&hardwall_lock, flags);
- kfree(rect);
+ hardwall_remove_proc(info);
+
+ /* Now free the hardwall from the list. */
+ spin_lock_irqsave(&hwt->lock, flags);
+ BUG_ON(!list_empty(&info->task_head));
+ list_del(&info->list);
+ spin_unlock_irqrestore(&hwt->lock, flags);
+ kfree(info);
}
static int hardwall_proc_show(struct seq_file *sf, void *v)
{
- struct hardwall_info *rect = sf->private;
+ struct hardwall_info *info = sf->private;
char buf[256];
- int rc = cpulist_scnprintf(buf, sizeof(buf), &rect->cpumask);
+ int rc = cpulist_scnprintf(buf, sizeof(buf), &info->cpumask);
buf[rc++] = '\n';
seq_write(sf, buf, rc);
return 0;
@@ -695,31 +924,45 @@ static const struct file_operations hardwall_proc_fops = {
.release = single_release,
};
-static void hardwall_add_proc(struct hardwall_info *rect)
+static void hardwall_add_proc(struct hardwall_info *info)
{
char buf[64];
- snprintf(buf, sizeof(buf), "%d", rect->id);
- proc_create_data(buf, 0444, hardwall_proc_dir,
- &hardwall_proc_fops, rect);
+ snprintf(buf, sizeof(buf), "%d", info->id);
+ proc_create_data(buf, 0444, info->type->proc_dir,
+ &hardwall_proc_fops, info);
}
-static void hardwall_remove_proc(struct hardwall_info *rect)
+static void hardwall_remove_proc(struct hardwall_info *info)
{
char buf[64];
- snprintf(buf, sizeof(buf), "%d", rect->id);
- remove_proc_entry(buf, hardwall_proc_dir);
+ snprintf(buf, sizeof(buf), "%d", info->id);
+ remove_proc_entry(buf, info->type->proc_dir);
}
int proc_pid_hardwall(struct task_struct *task, char *buffer)
{
- struct hardwall_info *rect = task->thread.hardwall;
- return rect ? sprintf(buffer, "%d\n", rect->id) : 0;
+ int i;
+ int n = 0;
+ for (i = 0; i < HARDWALL_TYPES; ++i) {
+ struct hardwall_info *info = task->thread.hardwall[i].info;
+ if (info)
+ n += sprintf(&buffer[n], "%s: %d\n",
+ info->type->name, info->id);
+ }
+ return n;
}
void proc_tile_hardwall_init(struct proc_dir_entry *root)
{
- if (!udn_disabled)
- hardwall_proc_dir = proc_mkdir("hardwall", root);
+ int i;
+ for (i = 0; i < HARDWALL_TYPES; ++i) {
+ struct hardwall_type *hwt = &hardwall_types[i];
+ if (hwt->disabled)
+ continue;
+ if (hardwall_proc_dir == NULL)
+ hardwall_proc_dir = proc_mkdir("hardwall", root);
+ hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir);
+ }
}
@@ -729,34 +972,45 @@ void proc_tile_hardwall_init(struct proc_dir_entry *root)
static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
{
- struct hardwall_info *rect = file->private_data;
+ struct hardwall_info *info = file->private_data;
+ int minor = iminor(file->f_mapping->host);
+ struct hardwall_type* hwt;
if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE)
return -EINVAL;
+ BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES);
+ BUILD_BUG_ON(HARDWALL_TYPES !=
+ sizeof(hardwall_types)/sizeof(hardwall_types[0]));
+
+ if (minor < 0 || minor >= HARDWALL_TYPES)
+ return -EINVAL;
+ hwt = &hardwall_types[minor];
+ WARN_ON(info && hwt != info->type);
+
switch (_IOC_NR(a)) {
case _HARDWALL_CREATE:
- if (udn_disabled)
+ if (hwt->disabled)
return -ENOSYS;
- if (rect != NULL)
+ if (info != NULL)
return -EALREADY;
- rect = hardwall_create(_IOC_SIZE(a),
- (const unsigned char __user *)b);
- if (IS_ERR(rect))
- return PTR_ERR(rect);
- file->private_data = rect;
+ info = hardwall_create(hwt, _IOC_SIZE(a),
+ (const unsigned char __user *)b);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+ file->private_data = info;
return 0;
case _HARDWALL_ACTIVATE:
- return hardwall_activate(rect);
+ return hardwall_activate(info);
case _HARDWALL_DEACTIVATE:
- if (current->thread.hardwall != rect)
+ if (current->thread.hardwall[hwt->index].info != info)
return -EINVAL;
- return hardwall_deactivate(current);
+ return hardwall_deactivate(hwt, current);
case _HARDWALL_GET_ID:
- return rect ? rect->id : -EINVAL;
+ return info ? info->id : -EINVAL;
default:
return -EINVAL;
@@ -775,26 +1029,28 @@ static long hardwall_compat_ioctl(struct file *file,
/* The user process closed the file; revoke access to user networks. */
static int hardwall_flush(struct file *file, fl_owner_t owner)
{
- struct hardwall_info *rect = file->private_data;
+ struct hardwall_info *info = file->private_data;
struct task_struct *task, *tmp;
unsigned long flags;
- if (rect) {
+ if (info) {
/*
* NOTE: if multiple threads are activated on this hardwall
* file, the other threads will continue having access to the
- * UDN until they are context-switched out and back in again.
+ * user network until they are context-switched out and back
+ * in again.
*
* NOTE: A NULL files pointer means the task is being torn
* down, so in that case we also deactivate it.
*/
- spin_lock_irqsave(&hardwall_lock, flags);
- list_for_each_entry_safe(task, tmp, &rect->task_head,
- thread.hardwall_list) {
+ struct hardwall_type *hwt = info->type;
+ spin_lock_irqsave(&hwt->lock, flags);
+ list_for_each_entry_safe(task, tmp, &info->task_head,
+ thread.hardwall[hwt->index].list) {
if (task->files == owner || task->files == NULL)
- _hardwall_deactivate(task);
+ _hardwall_deactivate(hwt, task);
}
- spin_unlock_irqrestore(&hardwall_lock, flags);
+ spin_unlock_irqrestore(&hwt->lock, flags);
}
return 0;
@@ -824,11 +1080,11 @@ static int __init dev_hardwall_init(void)
int rc;
dev_t dev;
- rc = alloc_chrdev_region(&dev, 0, 1, "hardwall");
+ rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall");
if (rc < 0)
return rc;
cdev_init(&hardwall_dev, &dev_hardwall_fops);
- rc = cdev_add(&hardwall_dev, dev, 1);
+ rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES);
if (rc < 0)
return rc;
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index 1a39b7c1c87e..f71bfeeaf1a9 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -69,7 +69,7 @@ ENTRY(_start)
}
{
moveli lr, lo16(1f)
- move r5, zero
+ moveli r5, CTX_PAGE_FLAG
}
{
auli lr, lr, ha16(1f)
@@ -141,11 +141,11 @@ ENTRY(empty_zero_page)
.macro PTE va, cpa, bits1, no_org=0
.ifeq \no_org
- .org swapper_pg_dir + HV_L1_INDEX(\va) * HV_PTE_SIZE
+ .org swapper_pg_dir + PGD_INDEX(\va) * HV_PTE_SIZE
.endif
.word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \
(HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
- .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << (HV_PTE_INDEX_PFN - 32))
+ .word (\bits1) | (HV_CPA_TO_PTFN(\cpa) << (HV_PTE_INDEX_PTFN - 32))
.endm
__PAGE_ALIGNED_DATA
@@ -166,7 +166,7 @@ ENTRY(swapper_pg_dir)
/* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
(1 << (HV_PTE_INDEX_EXECUTABLE - 32))
- .org swapper_pg_dir + HV_L1_SIZE
+ .org swapper_pg_dir + PGDIR_SIZE
END(swapper_pg_dir)
/*
diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S
index 6bc3a932fe45..f9a2734f7b82 100644
--- a/arch/tile/kernel/head_64.S
+++ b/arch/tile/kernel/head_64.S
@@ -114,7 +114,7 @@ ENTRY(_start)
shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
}
{
- move r3, zero
+ moveli r3, CTX_PAGE_FLAG
j hv_install_context
}
1:
@@ -210,19 +210,19 @@ ENTRY(empty_zero_page)
.macro PTE cpa, bits1
.quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
- (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN)
+ (\bits1) | (HV_CPA_TO_PTFN(\cpa) << HV_PTE_INDEX_PTFN)
.endm
__PAGE_ALIGNED_DATA
.align PAGE_SIZE
ENTRY(swapper_pg_dir)
- .org swapper_pg_dir + HV_L0_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
+ .org swapper_pg_dir + PGD_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
.Lsv_data_pmd:
.quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
- .org swapper_pg_dir + HV_L0_INDEX(MEM_SV_START) * HV_PTE_SIZE
+ .org swapper_pg_dir + PGD_INDEX(MEM_SV_START) * HV_PTE_SIZE
.Lsv_code_pmd:
.quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
- .org swapper_pg_dir + HV_L0_SIZE
+ .org swapper_pg_dir + SIZEOF_PGD
END(swapper_pg_dir)
.align HV_PAGE_TABLE_ALIGN
@@ -233,11 +233,11 @@ ENTRY(temp_data_pmd)
* permissions later.
*/
.set addr, 0
- .rept HV_L1_ENTRIES
+ .rept PTRS_PER_PMD
PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
- .set addr, addr + HV_PAGE_SIZE_LARGE
+ .set addr, addr + HPAGE_SIZE
.endr
- .org temp_data_pmd + HV_L1_SIZE
+ .org temp_data_pmd + SIZEOF_PMD
END(temp_data_pmd)
.align HV_PAGE_TABLE_ALIGN
@@ -248,11 +248,11 @@ ENTRY(temp_code_pmd)
* permissions later.
*/
.set addr, 0
- .rept HV_L1_ENTRIES
+ .rept PTRS_PER_PMD
PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
- .set addr, addr + HV_PAGE_SIZE_LARGE
+ .set addr, addr + HPAGE_SIZE
.endr
- .org temp_code_pmd + HV_L1_SIZE
+ .org temp_code_pmd + SIZEOF_PMD
END(temp_code_pmd)
/*
diff --git a/arch/tile/kernel/hvglue.lds b/arch/tile/kernel/hvglue.lds
index 2b7cd0a659a9..d44c5a67a1ed 100644
--- a/arch/tile/kernel/hvglue.lds
+++ b/arch/tile/kernel/hvglue.lds
@@ -55,4 +55,5 @@ hv_store_mapping = TEXT_OFFSET + 0x106a0;
hv_inquire_realpa = TEXT_OFFSET + 0x106c0;
hv_flush_all = TEXT_OFFSET + 0x106e0;
hv_get_ipi_pte = TEXT_OFFSET + 0x10700;
-hv_glue_internals = TEXT_OFFSET + 0x10720;
+hv_set_pte_super_shift = TEXT_OFFSET + 0x10720;
+hv_glue_internals = TEXT_OFFSET + 0x10740;
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 30ae76e50c44..7c06d597ffd0 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -220,7 +220,9 @@ intvec_\vecname:
* This routine saves just the first four registers, plus the
* stack context so we can do proper backtracing right away,
* and defers to handle_interrupt to save the rest.
- * The backtracer needs pc, ex1, lr, sp, r52, and faultnum.
+ * The backtracer needs pc, ex1, lr, sp, r52, and faultnum,
+ * and needs sp set to its final location at the bottom of
+ * the stack frame.
*/
addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
wh64 r0 /* cache line 7 */
@@ -450,23 +452,6 @@ intvec_\vecname:
push_reg r5, r52
st r52, r4
- /* Load tp with our per-cpu offset. */
-#ifdef CONFIG_SMP
- {
- mfspr r20, SPR_SYSTEM_SAVE_K_0
- moveli r21, hw2_last(__per_cpu_offset)
- }
- {
- shl16insli r21, r21, hw1(__per_cpu_offset)
- bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
- }
- shl16insli r21, r21, hw0(__per_cpu_offset)
- shl3add r20, r20, r21
- ld tp, r20
-#else
- move tp, zero
-#endif
-
/*
* If we will be returning to the kernel, we will need to
* reset the interrupt masks to the state they had before.
@@ -489,6 +474,44 @@ intvec_\vecname:
.endif
st r21, r32
+ /*
+ * we've captured enough state to the stack (including in
+ * particular our EX_CONTEXT state) that we can now release
+ * the interrupt critical section and replace it with our
+ * standard "interrupts disabled" mask value. This allows
+ * synchronous interrupts (and profile interrupts) to punch
+ * through from this point onwards.
+ *
+ * It's important that no code before this point touch memory
+ * other than our own stack (to keep the invariant that this
+ * is all that gets touched under ICS), and that no code after
+ * this point reference any interrupt-specific SPR, in particular
+ * the EX_CONTEXT_K_ values.
+ */
+ .ifc \function,handle_nmi
+ IRQ_DISABLE_ALL(r20)
+ .else
+ IRQ_DISABLE(r20, r21)
+ .endif
+ mtspr INTERRUPT_CRITICAL_SECTION, zero
+
+ /* Load tp with our per-cpu offset. */
+#ifdef CONFIG_SMP
+ {
+ mfspr r20, SPR_SYSTEM_SAVE_K_0
+ moveli r21, hw2_last(__per_cpu_offset)
+ }
+ {
+ shl16insli r21, r21, hw1(__per_cpu_offset)
+ bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
+ }
+ shl16insli r21, r21, hw0(__per_cpu_offset)
+ shl3add r20, r20, r21
+ ld tp, r20
+#else
+ move tp, zero
+#endif
+
#ifdef __COLLECT_LINKER_FEEDBACK__
/*
* Notify the feedback routines that we were in the
@@ -513,21 +536,6 @@ intvec_\vecname:
#endif
/*
- * we've captured enough state to the stack (including in
- * particular our EX_CONTEXT state) that we can now release
- * the interrupt critical section and replace it with our
- * standard "interrupts disabled" mask value. This allows
- * synchronous interrupts (and profile interrupts) to punch
- * through from this point onwards.
- */
- .ifc \function,handle_nmi
- IRQ_DISABLE_ALL(r20)
- .else
- IRQ_DISABLE(r20, r21)
- .endif
- mtspr INTERRUPT_CRITICAL_SECTION, zero
-
- /*
* Prepare the first 256 stack bytes to be rapidly accessible
* without having to fetch the background data.
*/
@@ -736,9 +744,10 @@ STD_ENTRY(interrupt_return)
beqzt r30, .Lrestore_regs
j 3f
2: TRACE_IRQS_ON
+ IRQ_ENABLE_LOAD(r20, r21)
movei r0, 1
mtspr INTERRUPT_CRITICAL_SECTION, r0
- IRQ_ENABLE(r20, r21)
+ IRQ_ENABLE_APPLY(r20, r21)
beqzt r30, .Lrestore_regs
3:
@@ -755,7 +764,6 @@ STD_ENTRY(interrupt_return)
* that will save some cycles if this turns out to be a syscall.
*/
.Lrestore_regs:
- FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
/*
* Rotate so we have one high bit and one low bit to test.
@@ -1249,7 +1257,7 @@ STD_ENTRY(fill_ra_stack)
int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
- int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr
+ int_hand INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap
int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index 6255f2eab112..f0b54a934712 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -31,6 +31,8 @@
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/checksum.h>
+#include <asm/tlbflush.h>
+#include <asm/homecache.h>
#include <hv/hypervisor.h>
@@ -222,11 +224,22 @@ struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order)
return alloc_pages_node(0, gfp_mask, order);
}
+/*
+ * Address range in which pa=va mapping is set in setup_quasi_va_is_pa().
+ * For tilepro, PAGE_OFFSET is used since this is the largest possbile value
+ * for tilepro, while for tilegx, we limit it to entire middle level page
+ * table which we assume has been allocated and is undoubtedly large enough.
+ */
+#ifndef __tilegx__
+#define QUASI_VA_IS_PA_ADDR_RANGE PAGE_OFFSET
+#else
+#define QUASI_VA_IS_PA_ADDR_RANGE PGDIR_SIZE
+#endif
+
static void setup_quasi_va_is_pa(void)
{
- HV_PTE *pgtable;
HV_PTE pte;
- int i;
+ unsigned long i;
/*
* Flush our TLB to prevent conflicts between the previous contents
@@ -234,16 +247,22 @@ static void setup_quasi_va_is_pa(void)
*/
local_flush_tlb_all();
- /* setup VA is PA, at least up to PAGE_OFFSET */
-
- pgtable = (HV_PTE *)current->mm->pgd;
+ /*
+ * setup VA is PA, at least up to QUASI_VA_IS_PA_ADDR_RANGE.
+ * Note here we assume that level-1 page table is defined by
+ * HPAGE_SIZE.
+ */
pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
-
- for (i = 0; i < pgd_index(PAGE_OFFSET); i++) {
+ for (i = 0; i < (QUASI_VA_IS_PA_ADDR_RANGE >> HPAGE_SHIFT); i++) {
+ unsigned long vaddr = i << HPAGE_SHIFT;
+ pgd_t *pgd = pgd_offset(current->mm, vaddr);
+ pud_t *pud = pud_offset(pgd, vaddr);
+ pte_t *ptep = (pte_t *) pmd_offset(pud, vaddr);
unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT);
+
if (pfn_valid(pfn))
- __set_pte(&pgtable[i], pfn_pte(pfn, pte));
+ __set_pte(ptep, pfn_pte(pfn, pte));
}
}
@@ -251,6 +270,7 @@ static void setup_quasi_va_is_pa(void)
void machine_kexec(struct kimage *image)
{
void *reboot_code_buffer;
+ pte_t *ptep;
void (*rnk)(unsigned long, void *, unsigned long)
__noreturn;
@@ -266,8 +286,10 @@ void machine_kexec(struct kimage *image)
*/
homecache_change_page_home(image->control_code_page, 0,
smp_processor_id());
- reboot_code_buffer = vmap(&image->control_code_page, 1, 0,
- __pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE));
+ reboot_code_buffer = page_address(image->control_code_page);
+ BUG_ON(reboot_code_buffer == NULL);
+ ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer);
+ __set_pte(ptep, pte_mkexec(*ptep));
memcpy(reboot_code_buffer, relocate_new_kernel,
relocate_new_kernel_size);
__flush_icache_range(
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index 98d476920106..001cbfa10ac6 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -159,7 +159,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
switch (ELF_R_TYPE(rel[i].r_info)) {
-#define MUNGE(func) (*location = ((*location & ~func(-1)) | func(value)))
+#ifdef __LITTLE_ENDIAN
+# define MUNGE(func) \
+ (*location = ((*location & ~func(-1)) | func(value)))
+#else
+/*
+ * Instructions are always little-endian, so when we read them as data,
+ * we have to swap them around before and after modifying them.
+ */
+# define MUNGE(func) \
+ (*location = swab64((swab64(*location) & ~func(-1)) | func(value)))
+#endif
#ifndef __tilegx__
case R_TILE_32:
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c
index 446a7f52cc11..dafc447b5125 100644
--- a/arch/tile/kernel/proc.c
+++ b/arch/tile/kernel/proc.c
@@ -22,6 +22,7 @@
#include <linux/proc_fs.h>
#include <linux/sysctl.h>
#include <linux/hardirq.h>
+#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <asm/unaligned.h>
#include <asm/pgtable.h>
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 32817ab6062a..6be799150501 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -128,10 +128,10 @@ void arch_release_thread_info(struct thread_info *info)
* Calling deactivate here just frees up the data structures.
* If the task we're freeing held the last reference to a
* hardwall fd, it would have been released prior to this point
- * anyway via exit_files(), and "hardwall" would be NULL by now.
+ * anyway via exit_files(), and the hardwall_task.info pointers
+ * would be NULL by now.
*/
- if (info->task->thread.hardwall)
- hardwall_deactivate(info->task);
+ hardwall_deactivate_all(info->task);
#endif
if (step_state) {
@@ -245,7 +245,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
#ifdef CONFIG_HARDWALL
/* New thread does not own any networks. */
- p->thread.hardwall = NULL;
+ memset(&p->thread.hardwall[0], 0,
+ sizeof(struct hardwall_task) * HARDWALL_TYPES);
#endif
@@ -515,12 +516,7 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
#ifdef CONFIG_HARDWALL
/* Enable or disable access to the network registers appropriately. */
- if (prev->thread.hardwall != NULL) {
- if (next->thread.hardwall == NULL)
- restrict_network_mpls();
- } else if (next->thread.hardwall != NULL) {
- grant_network_mpls();
- }
+ hardwall_switch_tasks(prev, next);
#endif
/*
diff --git a/arch/tile/kernel/relocate_kernel.S b/arch/tile/kernel/relocate_kernel_32.S
index 010b418515f8..010b418515f8 100644
--- a/arch/tile/kernel/relocate_kernel.S
+++ b/arch/tile/kernel/relocate_kernel_32.S
diff --git a/arch/tile/kernel/relocate_kernel_64.S b/arch/tile/kernel/relocate_kernel_64.S
new file mode 100644
index 000000000000..1c09a4f5a4ea
--- /dev/null
+++ b/arch/tile/kernel/relocate_kernel_64.S
@@ -0,0 +1,260 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ * copy new kernel into place and then call hv_reexec
+ *
+ */
+
+#include <linux/linkage.h>
+#include <arch/chip.h>
+#include <asm/page.h>
+#include <hv/hypervisor.h>
+
+#undef RELOCATE_NEW_KERNEL_VERBOSE
+
+STD_ENTRY(relocate_new_kernel)
+
+ move r30, r0 /* page list */
+ move r31, r1 /* address of page we are on */
+ move r32, r2 /* start address of new kernel */
+
+ shrui r1, r1, PAGE_SHIFT
+ addi r1, r1, 1
+ shli sp, r1, PAGE_SHIFT
+ addi sp, sp, -8
+ /* we now have a stack (whether we need one or not) */
+
+ moveli r40, hw2_last(hv_console_putc)
+ shl16insli r40, r40, hw1(hv_console_putc)
+ shl16insli r40, r40, hw0(hv_console_putc)
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+ moveli r0, 'r'
+ jalr r40
+
+ moveli r0, '_'
+ jalr r40
+
+ moveli r0, 'n'
+ jalr r40
+
+ moveli r0, '_'
+ jalr r40
+
+ moveli r0, 'k'
+ jalr r40
+
+ moveli r0, '\n'
+ jalr r40
+#endif
+
+ /*
+ * Throughout this code r30 is pointer to the element of page
+ * list we are working on.
+ *
+ * Normally we get to the next element of the page list by
+ * incrementing r30 by eight. The exception is if the element
+ * on the page list is an IND_INDIRECTION in which case we use
+ * the element with the low bits masked off as the new value
+ * of r30.
+ *
+ * To get this started, we need the value passed to us (which
+ * will always be an IND_INDIRECTION) in memory somewhere with
+ * r30 pointing at it. To do that, we push the value passed
+ * to us on the stack and make r30 point to it.
+ */
+
+ st sp, r30
+ move r30, sp
+ addi sp, sp, -16
+
+#if CHIP_HAS_CBOX_HOME_MAP()
+ /*
+ * On TILE-GX, we need to flush all tiles' caches, since we may
+ * have been doing hash-for-home caching there. Note that we
+ * must do this _after_ we're completely done modifying any memory
+ * other than our output buffer (which we know is locally cached).
+ * We want the caches to be fully clean when we do the reexec,
+ * because the hypervisor is going to do this flush again at that
+ * point, and we don't want that second flush to overwrite any memory.
+ */
+ {
+ move r0, zero /* cache_pa */
+ moveli r1, hw2_last(HV_FLUSH_EVICT_L2)
+ }
+ {
+ shl16insli r1, r1, hw1(HV_FLUSH_EVICT_L2)
+ movei r2, -1 /* cache_cpumask; -1 means all client tiles */
+ }
+ {
+ shl16insli r1, r1, hw0(HV_FLUSH_EVICT_L2) /* cache_control */
+ move r3, zero /* tlb_va */
+ }
+ {
+ move r4, zero /* tlb_length */
+ move r5, zero /* tlb_pgsize */
+ }
+ {
+ move r6, zero /* tlb_cpumask */
+ move r7, zero /* asids */
+ }
+ {
+ moveli r20, hw2_last(hv_flush_remote)
+ move r8, zero /* asidcount */
+ }
+ shl16insli r20, r20, hw1(hv_flush_remote)
+ shl16insli r20, r20, hw0(hv_flush_remote)
+
+ jalr r20
+#endif
+
+ /* r33 is destination pointer, default to zero */
+
+ moveli r33, 0
+
+.Lloop: ld r10, r30
+
+ andi r9, r10, 0xf /* low 4 bits tell us what type it is */
+ xor r10, r10, r9 /* r10 is now value with low 4 bits stripped */
+
+ cmpeqi r0, r9, 0x1 /* IND_DESTINATION */
+ beqzt r0, .Ltry2
+
+ move r33, r10
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+ moveli r0, 'd'
+ jalr r40
+#endif
+
+ addi r30, r30, 8
+ j .Lloop
+
+.Ltry2:
+ cmpeqi r0, r9, 0x2 /* IND_INDIRECTION */
+ beqzt r0, .Ltry4
+
+ move r30, r10
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+ moveli r0, 'i'
+ jalr r40
+#endif
+
+ j .Lloop
+
+.Ltry4:
+ cmpeqi r0, r9, 0x4 /* IND_DONE */
+ beqzt r0, .Ltry8
+
+ mf
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+ moveli r0, 'D'
+ jalr r40
+ moveli r0, '\n'
+ jalr r40
+#endif
+
+ move r0, r32
+
+ moveli r41, hw2_last(hv_reexec)
+ shl16insli r41, r41, hw1(hv_reexec)
+ shl16insli r41, r41, hw0(hv_reexec)
+
+ jalr r41
+
+ /* we should not get here */
+
+ moveli r0, '?'
+ jalr r40
+ moveli r0, '\n'
+ jalr r40
+
+ j .Lhalt
+
+.Ltry8: cmpeqi r0, r9, 0x8 /* IND_SOURCE */
+ beqz r0, .Lerr /* unknown type */
+
+ /* copy page at r10 to page at r33 */
+
+ move r11, r33
+
+ moveli r0, hw2_last(PAGE_SIZE)
+ shl16insli r0, r0, hw1(PAGE_SIZE)
+ shl16insli r0, r0, hw0(PAGE_SIZE)
+ add r33, r33, r0
+
+ /* copy word at r10 to word at r11 until r11 equals r33 */
+
+ /* We know page size must be multiple of 8, so we can unroll
+ * 8 times safely without any edge case checking.
+ *
+ * Issue a flush of the destination every 8 words to avoid
+ * incoherence when starting the new kernel. (Now this is
+ * just good paranoia because the hv_reexec call will also
+ * take care of this.)
+ */
+
+1:
+ { ld r0, r10; addi r10, r10, 8 }
+ { st r11, r0; addi r11, r11, 8 }
+ { ld r0, r10; addi r10, r10, 8 }
+ { st r11, r0; addi r11, r11, 8 }
+ { ld r0, r10; addi r10, r10, 8 }
+ { st r11, r0; addi r11, r11, 8 }
+ { ld r0, r10; addi r10, r10, 8 }
+ { st r11, r0; addi r11, r11, 8 }
+ { ld r0, r10; addi r10, r10, 8 }
+ { st r11, r0; addi r11, r11, 8 }
+ { ld r0, r10; addi r10, r10, 8 }
+ { st r11, r0; addi r11, r11, 8 }
+ { ld r0, r10; addi r10, r10, 8 }
+ { st r11, r0; addi r11, r11, 8 }
+ { ld r0, r10; addi r10, r10, 8 }
+ { st r11, r0 }
+ { flush r11 ; addi r11, r11, 8 }
+
+ cmpeq r0, r33, r11
+ beqzt r0, 1b
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+ moveli r0, 's'
+ jalr r40
+#endif
+
+ addi r30, r30, 8
+ j .Lloop
+
+
+.Lerr: moveli r0, 'e'
+ jalr r40
+ moveli r0, 'r'
+ jalr r40
+ moveli r0, 'r'
+ jalr r40
+ moveli r0, '\n'
+ jalr r40
+.Lhalt:
+ moveli r41, hw2_last(hv_halt)
+ shl16insli r41, r41, hw1(hv_halt)
+ shl16insli r41, r41, hw0(hv_halt)
+
+ jalr r41
+ STD_ENDPROC(relocate_new_kernel)
+
+ .section .rodata,"a"
+
+ .globl relocate_new_kernel_size
+relocate_new_kernel_size:
+ .long .Lend_relocate_new_kernel - relocate_new_kernel
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 98d80eb49ddb..6098ccc59be2 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -28,6 +28,7 @@
#include <linux/highmem.h>
#include <linux/smp.h>
#include <linux/timex.h>
+#include <linux/hugetlb.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
@@ -49,9 +50,6 @@ char chip_model[64] __write_once;
struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
-/* We only create bootmem data on node 0. */
-static bootmem_data_t __initdata node0_bdata;
-
/* Information on the NUMA nodes that we compute early */
unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES];
unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES];
@@ -534,37 +532,96 @@ static void __init setup_memory(void)
#endif
}
-static void __init setup_bootmem_allocator(void)
+/*
+ * On 32-bit machines, we only put bootmem on the low controller,
+ * since PAs > 4GB can't be used in bootmem. In principle one could
+ * imagine, e.g., multiple 1 GB controllers all of which could support
+ * bootmem, but in practice using controllers this small isn't a
+ * particularly interesting scenario, so we just keep it simple and
+ * use only the first controller for bootmem on 32-bit machines.
+ */
+static inline int node_has_bootmem(int nid)
{
- unsigned long bootmap_size, first_alloc_pfn, last_alloc_pfn;
+#ifdef CONFIG_64BIT
+ return 1;
+#else
+ return nid == 0;
+#endif
+}
- /* Provide a node 0 bdata. */
- NODE_DATA(0)->bdata = &node0_bdata;
+static inline unsigned long alloc_bootmem_pfn(int nid,
+ unsigned long size,
+ unsigned long goal)
+{
+ void *kva = __alloc_bootmem_node(NODE_DATA(nid), size,
+ PAGE_SIZE, goal);
+ unsigned long pfn = kaddr_to_pfn(kva);
+ BUG_ON(goal && PFN_PHYS(pfn) != goal);
+ return pfn;
+}
-#ifdef CONFIG_PCI
- /* Don't let boot memory alias the PCI region. */
- last_alloc_pfn = min(max_low_pfn, pci_reserve_start_pfn);
+static void __init setup_bootmem_allocator_node(int i)
+{
+ unsigned long start, end, mapsize, mapstart;
+
+ if (node_has_bootmem(i)) {
+ NODE_DATA(i)->bdata = &bootmem_node_data[i];
+ } else {
+ /* Share controller zero's bdata for now. */
+ NODE_DATA(i)->bdata = &bootmem_node_data[0];
+ return;
+ }
+
+ /* Skip up to after the bss in node 0. */
+ start = (i == 0) ? min_low_pfn : node_start_pfn[i];
+
+ /* Only lowmem, if we're a HIGHMEM build. */
+#ifdef CONFIG_HIGHMEM
+ end = node_lowmem_end_pfn[i];
#else
- last_alloc_pfn = max_low_pfn;
+ end = node_end_pfn[i];
#endif
- /*
- * Initialize the boot-time allocator (with low memory only):
- * The first argument says where to put the bitmap, and the
- * second says where the end of allocatable memory is.
- */
- bootmap_size = init_bootmem(min_low_pfn, last_alloc_pfn);
+ /* No memory here. */
+ if (end == start)
+ return;
+
+ /* Figure out where the bootmem bitmap is located. */
+ mapsize = bootmem_bootmap_pages(end - start);
+ if (i == 0) {
+ /* Use some space right before the heap on node 0. */
+ mapstart = start;
+ start += mapsize;
+ } else {
+ /* Allocate bitmap on node 0 to avoid page table issues. */
+ mapstart = alloc_bootmem_pfn(0, PFN_PHYS(mapsize), 0);
+ }
+ /* Initialize a node. */
+ init_bootmem_node(NODE_DATA(i), mapstart, start, end);
+
+ /* Free all the space back into the allocator. */
+ free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start));
+
+#if defined(CONFIG_PCI)
/*
- * Let the bootmem allocator use all the space we've given it
- * except for its own bitmap.
+ * Throw away any memory aliased by the PCI region. FIXME: this
+ * is a temporary hack to work around bug 10502, and needs to be
+ * fixed properly.
*/
- first_alloc_pfn = min_low_pfn + PFN_UP(bootmap_size);
- if (first_alloc_pfn >= last_alloc_pfn)
- early_panic("Not enough memory on controller 0 for bootmem\n");
+ if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start)
+ reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn),
+ PFN_PHYS(pci_reserve_end_pfn -
+ pci_reserve_start_pfn),
+ BOOTMEM_EXCLUSIVE);
+#endif
+}
- free_bootmem(PFN_PHYS(first_alloc_pfn),
- PFN_PHYS(last_alloc_pfn - first_alloc_pfn));
+static void __init setup_bootmem_allocator(void)
+{
+ int i;
+ for (i = 0; i < MAX_NUMNODES; ++i)
+ setup_bootmem_allocator_node(i);
#ifdef CONFIG_KEXEC
if (crashk_res.start != crashk_res.end)
@@ -595,14 +652,6 @@ static int __init percpu_size(void)
return size;
}
-static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal)
-{
- void *kva = __alloc_bootmem(size, PAGE_SIZE, goal);
- unsigned long pfn = kaddr_to_pfn(kva);
- BUG_ON(goal && PFN_PHYS(pfn) != goal);
- return pfn;
-}
-
static void __init zone_sizes_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = { 0 };
@@ -640,21 +689,22 @@ static void __init zone_sizes_init(void)
* though, there'll be no lowmem, so we just alloc_bootmem
* the memmap. There will be no percpu memory either.
*/
- if (__pfn_to_highbits(start) == 0) {
- /* In low PAs, allocate via bootmem. */
+ if (i != 0 && cpu_isset(i, isolnodes)) {
+ node_memmap_pfn[i] =
+ alloc_bootmem_pfn(0, memmap_size, 0);
+ BUG_ON(node_percpu[i] != 0);
+ } else if (node_has_bootmem(start)) {
unsigned long goal = 0;
node_memmap_pfn[i] =
- alloc_bootmem_pfn(memmap_size, goal);
+ alloc_bootmem_pfn(i, memmap_size, 0);
if (kdata_huge)
goal = PFN_PHYS(lowmem_end) - node_percpu[i];
if (node_percpu[i])
node_percpu_pfn[i] =
- alloc_bootmem_pfn(node_percpu[i], goal);
- } else if (cpu_isset(i, isolnodes)) {
- node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0);
- BUG_ON(node_percpu[i] != 0);
+ alloc_bootmem_pfn(i, node_percpu[i],
+ goal);
} else {
- /* In high PAs, just reserve some pages. */
+ /* In non-bootmem zones, just reserve some pages. */
node_memmap_pfn[i] = node_free_pfn[i];
node_free_pfn[i] += PFN_UP(memmap_size);
if (!kdata_huge) {
@@ -678,16 +728,9 @@ static void __init zone_sizes_init(void)
zones_size[ZONE_NORMAL] = end - start;
#endif
- /*
- * Everyone shares node 0's bootmem allocator, but
- * we use alloc_remap(), above, to put the actual
- * struct page array on the individual controllers,
- * which is most of the data that we actually care about.
- * We can't place bootmem allocators on the other
- * controllers since the bootmem allocator can only
- * operate on 32-bit physical addresses.
- */
- NODE_DATA(i)->bdata = NODE_DATA(0)->bdata;
+ /* Take zone metadata from controller 0 if we're isolnode. */
+ if (node_isset(i, isolnodes))
+ NODE_DATA(i)->bdata = &bootmem_node_data[0];
free_area_init_node(i, zones_size, start, NULL);
printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n",
@@ -870,6 +913,22 @@ subsys_initcall(topology_init);
#endif /* CONFIG_NUMA */
+/*
+ * Initialize hugepage support on this cpu. We do this on all cores
+ * early in boot: before argument parsing for the boot cpu, and after
+ * argument parsing but before the init functions run on the secondaries.
+ * So the values we set up here in the hypervisor may be overridden on
+ * the boot cpu as arguments are parsed.
+ */
+static __cpuinit void init_super_pages(void)
+{
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+ int i;
+ for (i = 0; i < HUGE_SHIFT_ENTRIES; ++i)
+ hv_set_pte_super_shift(i, huge_shift[i]);
+#endif
+}
+
/**
* setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
* @boot: Is this the boot cpu?
@@ -924,6 +983,8 @@ void __cpuinit setup_cpu(int boot)
/* Reset the network state on this cpu. */
reset_network_state();
#endif
+
+ init_super_pages();
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -1412,13 +1473,13 @@ void __init setup_per_cpu_areas(void)
for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
/* Update the vmalloc mapping and page home. */
- pte_t *ptep =
- virt_to_pte(NULL, (unsigned long)ptr + i);
+ unsigned long addr = (unsigned long)ptr + i;
+ pte_t *ptep = virt_to_pte(NULL, addr);
pte_t pte = *ptep;
BUG_ON(pfn != pte_pfn(pte));
pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
pte = set_remote_cache_cpu(pte, cpu);
- set_pte(ptep, pte);
+ set_pte_at(&init_mm, addr, ptep, pte);
/* Update the lowmem mapping for consistency. */
lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
@@ -1431,7 +1492,7 @@ void __init setup_per_cpu_areas(void)
BUG_ON(pte_huge(*ptep));
}
BUG_ON(pfn != pte_pfn(*ptep));
- set_pte(ptep, pte);
+ set_pte_at(&init_mm, lowmem_va, ptep, pte);
}
}
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 89529c9f0605..27742e87e255 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -172,9 +172,6 @@ static tile_bundle_bits rewrite_load_store_unaligned(
return (tilepro_bundle_bits) 0;
}
-#ifndef __LITTLE_ENDIAN
-# error We assume little-endian representation with copy_xx_user size 2 here
-#endif
/* Handle unaligned load/store */
if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
unsigned short val_16;
@@ -195,8 +192,19 @@ static tile_bundle_bits rewrite_load_store_unaligned(
state->update = 1;
}
} else {
+ unsigned short val_16;
val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
- err = copy_to_user(addr, &val, size);
+ switch (size) {
+ case 2:
+ val_16 = val;
+ err = copy_to_user(addr, &val_16, sizeof(val_16));
+ break;
+ case 4:
+ err = copy_to_user(addr, &val, sizeof(val));
+ break;
+ default:
+ BUG();
+ }
}
if (err) {
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 91da0f721958..cbc73a8b8fe1 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -203,7 +203,7 @@ void __init ipi_init(void)
if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
panic("Failed to initialize IPI for cpu %d\n", cpu);
- offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
+ offset = PFN_PHYS(pte_pfn(pte));
ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
}
#endif
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index cb44ba7ccd2d..b08095b402d6 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -32,11 +32,17 @@
#include <asm/syscalls.h>
#include <asm/pgtable.h>
#include <asm/homecache.h>
+#include <asm/cachectl.h>
#include <arch/chip.h>
-SYSCALL_DEFINE0(flush_cache)
+SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len,
+ unsigned long, flags)
{
- homecache_evict(cpumask_of(smp_processor_id()));
+ if (flags & DCACHE)
+ homecache_evict(cpumask_of(smp_processor_id()));
+ if (flags & ICACHE)
+ flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(current->mm),
+ 0, 0, 0, NULL, NULL, 0);
return 0;
}
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c
index 71ae728e9d0b..e25b0a89c18f 100644
--- a/arch/tile/kernel/sysfs.c
+++ b/arch/tile/kernel/sysfs.c
@@ -93,6 +93,10 @@ HV_CONF_ATTR(mezz_part, HV_CONFSTR_MEZZ_PART_NUM)
HV_CONF_ATTR(mezz_serial, HV_CONFSTR_MEZZ_SERIAL_NUM)
HV_CONF_ATTR(mezz_revision, HV_CONFSTR_MEZZ_REV)
HV_CONF_ATTR(mezz_description, HV_CONFSTR_MEZZ_DESC)
+HV_CONF_ATTR(cpumod_part, HV_CONFSTR_CPUMOD_PART_NUM)
+HV_CONF_ATTR(cpumod_serial, HV_CONFSTR_CPUMOD_SERIAL_NUM)
+HV_CONF_ATTR(cpumod_revision, HV_CONFSTR_CPUMOD_REV)
+HV_CONF_ATTR(cpumod_description,HV_CONFSTR_CPUMOD_DESC)
HV_CONF_ATTR(switch_control, HV_CONFSTR_SWITCH_CONTROL)
static struct attribute *board_attrs[] = {
@@ -104,6 +108,10 @@ static struct attribute *board_attrs[] = {
&dev_attr_mezz_serial.attr,
&dev_attr_mezz_revision.attr,
&dev_attr_mezz_description.attr,
+ &dev_attr_cpumod_part.attr,
+ &dev_attr_cpumod_serial.attr,
+ &dev_attr_cpumod_revision.attr,
+ &dev_attr_cpumod_description.attr,
&dev_attr_switch_control.attr,
NULL
};
diff --git a/arch/tile/kernel/tlb.c b/arch/tile/kernel/tlb.c
index a5f241c24cac..3fd54d5bbd4c 100644
--- a/arch/tile/kernel/tlb.c
+++ b/arch/tile/kernel/tlb.c
@@ -15,6 +15,7 @@
#include <linux/cpumask.h>
#include <linux/module.h>
+#include <linux/hugetlb.h>
#include <asm/tlbflush.h>
#include <asm/homecache.h>
#include <hv/hypervisor.h>
@@ -49,25 +50,25 @@ void flush_tlb_current_task(void)
flush_tlb_mm(current->mm);
}
-void flush_tlb_page_mm(const struct vm_area_struct *vma, struct mm_struct *mm,
+void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long va)
{
- unsigned long size = hv_page_size(vma);
+ unsigned long size = vma_kernel_pagesize(vma);
int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
flush_remote(0, cache, mm_cpumask(mm),
va, size, size, mm_cpumask(mm), NULL, 0);
}
-void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va)
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
{
flush_tlb_page_mm(vma, vma->vm_mm, va);
}
EXPORT_SYMBOL(flush_tlb_page);
-void flush_tlb_range(const struct vm_area_struct *vma,
+void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- unsigned long size = hv_page_size(vma);
+ unsigned long size = vma_kernel_pagesize(vma);
struct mm_struct *mm = vma->vm_mm;
int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
flush_remote(0, cache, mm_cpumask(mm), start, end - start, size,
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 73cff814ac57..5b19a23c8908 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -195,6 +195,25 @@ static int special_ill(bundle_bits bundle, int *sigp, int *codep)
return 1;
}
+static const char *const int_name[] = {
+ [INT_MEM_ERROR] = "Memory error",
+ [INT_ILL] = "Illegal instruction",
+ [INT_GPV] = "General protection violation",
+ [INT_UDN_ACCESS] = "UDN access",
+ [INT_IDN_ACCESS] = "IDN access",
+#if CHIP_HAS_SN()
+ [INT_SN_ACCESS] = "SN access",
+#endif
+ [INT_SWINT_3] = "Software interrupt 3",
+ [INT_SWINT_2] = "Software interrupt 2",
+ [INT_SWINT_0] = "Software interrupt 0",
+ [INT_UNALIGN_DATA] = "Unaligned data",
+ [INT_DOUBLE_FAULT] = "Double fault",
+#ifdef __tilegx__
+ [INT_ILL_TRANS] = "Illegal virtual address",
+#endif
+};
+
void __kprobes do_trap(struct pt_regs *regs, int fault_num,
unsigned long reason)
{
@@ -211,10 +230,17 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
* current process and hope for the best.
*/
if (!user_mode(regs)) {
+ const char *name;
if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */
return;
- pr_alert("Kernel took bad trap %d at PC %#lx\n",
- fault_num, regs->pc);
+ if (fault_num >= 0 &&
+ fault_num < sizeof(int_name)/sizeof(int_name[0]) &&
+ int_name[fault_num] != NULL)
+ name = int_name[fault_num];
+ else
+ name = "Unknown interrupt";
+ pr_alert("Kernel took bad trap %d (%s) at PC %#lx\n",
+ fault_num, name, regs->pc);
if (fault_num == INT_GPV)
pr_alert("GPV_REASON is %#lx\n", reason);
show_regs(regs);
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 771b251b409d..f5cada70c3c8 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -18,7 +18,6 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/atomic.h>
-#include <asm/futex.h>
#include <arch/chip.h>
/* See <asm/atomic_32.h> */
@@ -50,7 +49,7 @@ int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
-static inline int *__atomic_hashed_lock(volatile void *v)
+int *__atomic_hashed_lock(volatile void *v)
{
/* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
@@ -191,47 +190,6 @@ u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
EXPORT_SYMBOL(_atomic64_cmpxchg);
-static inline int *__futex_setup(int __user *v)
-{
- /*
- * Issue a prefetch to the counter to bring it into cache.
- * As for __atomic_setup, but we can't do a read into the L1
- * since it might fault; instead we do a prefetch into the L2.
- */
- __insn_prefetch(v);
- return __atomic_hashed_lock((int __force *)v);
-}
-
-struct __get_user futex_set(u32 __user *v, int i)
-{
- return __atomic_xchg((int __force *)v, __futex_setup(v), i);
-}
-
-struct __get_user futex_add(u32 __user *v, int n)
-{
- return __atomic_xchg_add((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_or(u32 __user *v, int n)
-{
- return __atomic_or((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_andn(u32 __user *v, int n)
-{
- return __atomic_andn((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_xor(u32 __user *v, int n)
-{
- return __atomic_xor((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_cmpxchg(u32 __user *v, int o, int n)
-{
- return __atomic_cmpxchg((int __force *)v, __futex_setup(v), o, n);
-}
-
/*
* If any of the atomic or futex routines hit a bad address (not in
* the page tables at kernel PL) this routine is called. The futex
@@ -323,7 +281,4 @@ void __init __init_atomic_per_cpu(void)
BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
-
- /* The futex code makes this assumption, so we validate it here. */
- BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
}
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index 2a81d32de0da..dd5f0a33fdaf 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -18,14 +18,6 @@
/* arch/tile/lib/usercopy.S */
#include <linux/uaccess.h>
-EXPORT_SYMBOL(__get_user_1);
-EXPORT_SYMBOL(__get_user_2);
-EXPORT_SYMBOL(__get_user_4);
-EXPORT_SYMBOL(__get_user_8);
-EXPORT_SYMBOL(__put_user_1);
-EXPORT_SYMBOL(__put_user_2);
-EXPORT_SYMBOL(__put_user_4);
-EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(strnlen_user_asm);
EXPORT_SYMBOL(strncpy_from_user_asm);
EXPORT_SYMBOL(clear_user_asm);
diff --git a/arch/tile/lib/memchr_64.c b/arch/tile/lib/memchr_64.c
index 84fdc8d8e735..6f867dbf7c56 100644
--- a/arch/tile/lib/memchr_64.c
+++ b/arch/tile/lib/memchr_64.c
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
+#include "string-endian.h"
void *memchr(const void *s, int c, size_t n)
{
@@ -39,11 +40,8 @@ void *memchr(const void *s, int c, size_t n)
/* Read the first word, but munge it so that bytes before the array
* will not match goal.
- *
- * Note that this shift count expression works because we know
- * shift counts are taken mod 64.
*/
- before_mask = (1ULL << (s_int << 3)) - 1;
+ before_mask = MASK(s_int);
v = (*p | before_mask) ^ (goal & before_mask);
/* Compute the address of the last byte. */
@@ -65,7 +63,7 @@ void *memchr(const void *s, int c, size_t n)
/* We found a match, but it might be in a byte past the end
* of the array.
*/
- ret = ((char *)p) + (__insn_ctz(bits) >> 3);
+ ret = ((char *)p) + (CFZ(bits) >> 3);
return (ret <= last_byte_ptr) ? ret : NULL;
}
EXPORT_SYMBOL(memchr);
diff --git a/arch/tile/lib/memcpy_64.c b/arch/tile/lib/memcpy_64.c
index 3fab9a6a2bbe..c79b8e7c6828 100644
--- a/arch/tile/lib/memcpy_64.c
+++ b/arch/tile/lib/memcpy_64.c
@@ -15,7 +15,6 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
-#define __memcpy memcpy
/* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */
/* Must be 8 bytes in size. */
@@ -188,6 +187,7 @@ int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
/* n != 0 if we get here. Write out any trailing bytes. */
dst1 = (char *)dst8;
+#ifndef __BIG_ENDIAN__
if (n & 4) {
ST4((uint32_t *)dst1, final);
dst1 += 4;
@@ -202,11 +202,30 @@ int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
}
if (n)
ST1((uint8_t *)dst1, final);
+#else
+ if (n & 4) {
+ ST4((uint32_t *)dst1, final >> 32);
+ dst1 += 4;
+ }
+ else
+ {
+ final >>= 32;
+ }
+ if (n & 2) {
+ ST2((uint16_t *)dst1, final >> 16);
+ dst1 += 2;
+ }
+ else
+ {
+ final >>= 16;
+ }
+ if (n & 1)
+ ST1((uint8_t *)dst1, final >> 8);
+#endif
return RETVAL;
}
-
#ifdef USERCOPY_FUNC
#undef ST1
#undef ST2
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c
index b2fe15e01075..3bc4b4e40d93 100644
--- a/arch/tile/lib/memcpy_tile64.c
+++ b/arch/tile/lib/memcpy_tile64.c
@@ -160,7 +160,7 @@ retry_source:
break;
if (get_remote_cache_cpu(src_pte) == smp_processor_id())
break;
- src_page = pfn_to_page(hv_pte_get_pfn(src_pte));
+ src_page = pfn_to_page(pte_pfn(src_pte));
get_page(src_page);
if (pte_val(src_pte) != pte_val(*src_ptep)) {
put_page(src_page);
@@ -168,7 +168,7 @@ retry_source:
}
if (pte_huge(src_pte)) {
/* Adjust the PTE to correspond to a small page */
- int pfn = hv_pte_get_pfn(src_pte);
+ int pfn = pte_pfn(src_pte);
pfn += (((unsigned long)source & (HPAGE_SIZE-1))
>> PAGE_SHIFT);
src_pte = pfn_pte(pfn, src_pte);
@@ -188,7 +188,7 @@ retry_dest:
put_page(src_page);
break;
}
- dst_page = pfn_to_page(hv_pte_get_pfn(dst_pte));
+ dst_page = pfn_to_page(pte_pfn(dst_pte));
if (dst_page == src_page) {
/*
* Source and dest are on the same page; this
@@ -206,7 +206,7 @@ retry_dest:
}
if (pte_huge(dst_pte)) {
/* Adjust the PTE to correspond to a small page */
- int pfn = hv_pte_get_pfn(dst_pte);
+ int pfn = pte_pfn(dst_pte);
pfn += (((unsigned long)dest & (HPAGE_SIZE-1))
>> PAGE_SHIFT);
dst_pte = pfn_pte(pfn, dst_pte);
diff --git a/arch/tile/lib/strchr_64.c b/arch/tile/lib/strchr_64.c
index 617a9273aaa8..f39f9dc422b0 100644
--- a/arch/tile/lib/strchr_64.c
+++ b/arch/tile/lib/strchr_64.c
@@ -15,8 +15,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
-
-#undef strchr
+#include "string-endian.h"
char *strchr(const char *s, int c)
{
@@ -33,13 +32,9 @@ char *strchr(const char *s, int c)
* match neither zero nor goal (we make sure the high bit of each
* byte is 1, and the low 7 bits are all the opposite of the goal
* byte).
- *
- * Note that this shift count expression works because we know shift
- * counts are taken mod 64.
*/
- const uint64_t before_mask = (1ULL << (s_int << 3)) - 1;
- uint64_t v = (*p | before_mask) ^
- (goal & __insn_v1shrsi(before_mask, 1));
+ const uint64_t before_mask = MASK(s_int);
+ uint64_t v = (*p | before_mask) ^ (goal & __insn_v1shrui(before_mask, 1));
uint64_t zero_matches, goal_matches;
while (1) {
@@ -55,8 +50,8 @@ char *strchr(const char *s, int c)
v = *++p;
}
- z = __insn_ctz(zero_matches);
- g = __insn_ctz(goal_matches);
+ z = CFZ(zero_matches);
+ g = CFZ(goal_matches);
/* If we found c before '\0' we got a match. Note that if c == '\0'
* then g == z, and we correctly return the address of the '\0'
diff --git a/arch/tile/lib/string-endian.h b/arch/tile/lib/string-endian.h
new file mode 100644
index 000000000000..c0eed7ce69c3
--- /dev/null
+++ b/arch/tile/lib/string-endian.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ * Provide a mask based on the pointer alignment that
+ * sets up non-zero bytes before the beginning of the string.
+ * The MASK expression works because shift counts are taken mod 64.
+ * Also, specify how to count "first" and "last" bits
+ * when the bits have been read as a word.
+ */
+
+#include <asm/byteorder.h>
+
+#ifdef __LITTLE_ENDIAN
+#define MASK(x) (__insn_shl(1ULL, (x << 3)) - 1)
+#define NULMASK(x) ((2ULL << x) - 1)
+#define CFZ(x) __insn_ctz(x)
+#define REVCZ(x) __insn_clz(x)
+#else
+#define MASK(x) (__insn_shl(-2LL, ((-x << 3) - 1)))
+#define NULMASK(x) (-2LL << (63 - x))
+#define CFZ(x) __insn_clz(x)
+#define REVCZ(x) __insn_ctz(x)
+#endif
diff --git a/arch/tile/lib/strlen_64.c b/arch/tile/lib/strlen_64.c
index 1c92d46202a8..9583fc3361fa 100644
--- a/arch/tile/lib/strlen_64.c
+++ b/arch/tile/lib/strlen_64.c
@@ -15,8 +15,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
-
-#undef strlen
+#include "string-endian.h"
size_t strlen(const char *s)
{
@@ -24,15 +23,13 @@ size_t strlen(const char *s)
const uintptr_t s_int = (uintptr_t) s;
const uint64_t *p = (const uint64_t *)(s_int & -8);
- /* Read the first word, but force bytes before the string to be nonzero.
- * This expression works because we know shift counts are taken mod 64.
- */
- uint64_t v = *p | ((1ULL << (s_int << 3)) - 1);
+ /* Read and MASK the first word. */
+ uint64_t v = *p | MASK(s_int);
uint64_t bits;
while ((bits = __insn_v1cmpeqi(v, 0)) == 0)
v = *++p;
- return ((const char *)p) + (__insn_ctz(bits) >> 3) - s;
+ return ((const char *)p) + (CFZ(bits) >> 3) - s;
}
EXPORT_SYMBOL(strlen);
diff --git a/arch/tile/lib/usercopy_32.S b/arch/tile/lib/usercopy_32.S
index 979f76d83746..b62d002af009 100644
--- a/arch/tile/lib/usercopy_32.S
+++ b/arch/tile/lib/usercopy_32.S
@@ -19,82 +19,6 @@
/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
- .pushsection .fixup,"ax"
-
-get_user_fault:
- { move r0, zero; move r1, zero }
- { movei r2, -EFAULT; jrp lr }
- ENDPROC(get_user_fault)
-
-put_user_fault:
- { movei r0, -EFAULT; jrp lr }
- ENDPROC(put_user_fault)
-
- .popsection
-
-/*
- * __get_user_N functions take a pointer in r0, and return 0 in r2
- * on success, with the value in r0; or else -EFAULT in r2.
- */
-#define __get_user_N(bytes, LOAD) \
- STD_ENTRY(__get_user_##bytes); \
-1: { LOAD r0, r0; move r1, zero; move r2, zero }; \
- jrp lr; \
- STD_ENDPROC(__get_user_##bytes); \
- .pushsection __ex_table,"a"; \
- .word 1b, get_user_fault; \
- .popsection
-
-__get_user_N(1, lb_u)
-__get_user_N(2, lh_u)
-__get_user_N(4, lw)
-
-/*
- * __get_user_8 takes a pointer in r0, and returns 0 in r2
- * on success, with the value in r0/r1; or else -EFAULT in r2.
- */
- STD_ENTRY(__get_user_8);
-1: { lw r0, r0; addi r1, r0, 4 };
-2: { lw r1, r1; move r2, zero };
- jrp lr;
- STD_ENDPROC(__get_user_8);
- .pushsection __ex_table,"a";
- .word 1b, get_user_fault;
- .word 2b, get_user_fault;
- .popsection
-
-/*
- * __put_user_N functions take a value in r0 and a pointer in r1,
- * and return 0 in r0 on success or -EFAULT on failure.
- */
-#define __put_user_N(bytes, STORE) \
- STD_ENTRY(__put_user_##bytes); \
-1: { STORE r1, r0; move r0, zero }; \
- jrp lr; \
- STD_ENDPROC(__put_user_##bytes); \
- .pushsection __ex_table,"a"; \
- .word 1b, put_user_fault; \
- .popsection
-
-__put_user_N(1, sb)
-__put_user_N(2, sh)
-__put_user_N(4, sw)
-
-/*
- * __put_user_8 takes a value in r0/r1 and a pointer in r2,
- * and returns 0 in r0 on success or -EFAULT on failure.
- */
-STD_ENTRY(__put_user_8)
-1: { sw r2, r0; addi r2, r2, 4 }
-2: { sw r2, r1; move r0, zero }
- jrp lr
- STD_ENDPROC(__put_user_8)
- .pushsection __ex_table,"a"
- .word 1b, put_user_fault
- .word 2b, put_user_fault
- .popsection
-
-
/*
* strnlen_user_asm takes the pointer in r0, and the length bound in r1.
* It returns the length, including the terminating NUL, or zero on exception.
diff --git a/arch/tile/lib/usercopy_64.S b/arch/tile/lib/usercopy_64.S
index 2ff44f87b78e..adb2dbbc70cd 100644
--- a/arch/tile/lib/usercopy_64.S
+++ b/arch/tile/lib/usercopy_64.S
@@ -19,55 +19,6 @@
/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
- .pushsection .fixup,"ax"
-
-get_user_fault:
- { movei r1, -EFAULT; move r0, zero }
- jrp lr
- ENDPROC(get_user_fault)
-
-put_user_fault:
- { movei r0, -EFAULT; jrp lr }
- ENDPROC(put_user_fault)
-
- .popsection
-
-/*
- * __get_user_N functions take a pointer in r0, and return 0 in r1
- * on success, with the value in r0; or else -EFAULT in r1.
- */
-#define __get_user_N(bytes, LOAD) \
- STD_ENTRY(__get_user_##bytes); \
-1: { LOAD r0, r0; move r1, zero }; \
- jrp lr; \
- STD_ENDPROC(__get_user_##bytes); \
- .pushsection __ex_table,"a"; \
- .quad 1b, get_user_fault; \
- .popsection
-
-__get_user_N(1, ld1u)
-__get_user_N(2, ld2u)
-__get_user_N(4, ld4u)
-__get_user_N(8, ld)
-
-/*
- * __put_user_N functions take a value in r0 and a pointer in r1,
- * and return 0 in r0 on success or -EFAULT on failure.
- */
-#define __put_user_N(bytes, STORE) \
- STD_ENTRY(__put_user_##bytes); \
-1: { STORE r1, r0; move r0, zero }; \
- jrp lr; \
- STD_ENDPROC(__put_user_##bytes); \
- .pushsection __ex_table,"a"; \
- .quad 1b, put_user_fault; \
- .popsection
-
-__put_user_N(1, st1)
-__put_user_N(2, st2)
-__put_user_N(4, st4)
-__put_user_N(8, st)
-
/*
* strnlen_user_asm takes the pointer in r0, and the length bound in r1.
* It returns the length, including the terminating NUL, or zero on exception.
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 22e58f51ed23..84ce7abbf5af 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -187,7 +187,7 @@ static pgd_t *get_current_pgd(void)
HV_Context ctx = hv_inquire_context();
unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT;
struct page *pgd_page = pfn_to_page(pgd_pfn);
- BUG_ON(PageHighMem(pgd_page)); /* oops, HIGHPTE? */
+ BUG_ON(PageHighMem(pgd_page));
return (pgd_t *) __va(ctx.page_table);
}
@@ -273,11 +273,15 @@ static int handle_page_fault(struct pt_regs *regs,
int si_code;
int is_kernel_mode;
pgd_t *pgd;
+ unsigned int flags;
/* on TILE, protection faults are always writes */
if (!is_page_fault)
write = 1;
+ flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (write ? FAULT_FLAG_WRITE : 0));
+
is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL);
tsk = validate_current();
@@ -382,6 +386,8 @@ static int handle_page_fault(struct pt_regs *regs,
vma = NULL; /* happy compiler */
goto bad_area_nosemaphore;
}
+
+retry:
down_read(&mm->mmap_sem);
}
@@ -429,7 +435,11 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, write);
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return 0;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -437,10 +447,22 @@ good_area:
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
- tsk->maj_flt++;
- else
- tsk->min_flt++;
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+ /*
+ * No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+ goto retry;
+ }
+ }
#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
/*
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 499f73770b05..dbcbdf7b8aa8 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -30,6 +30,7 @@
#include <linux/cache.h>
#include <linux/smp.h>
#include <linux/module.h>
+#include <linux/hugetlb.h>
#include <asm/page.h>
#include <asm/sections.h>
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 42cfcba4e1ef..812e2d037972 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -27,85 +27,161 @@
#include <linux/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
+#include <asm/setup.h>
+
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+
+/*
+ * Provide an additional huge page size (in addition to the regular default
+ * huge page size) if no "hugepagesz" arguments are specified.
+ * Note that it must be smaller than the default huge page size so
+ * that it's possible to allocate them on demand from the buddy allocator.
+ * You can change this to 64K (on a 16K build), 256K, 1M, or 4M,
+ * or not define it at all.
+ */
+#define ADDITIONAL_HUGE_SIZE (1024 * 1024UL)
+
+/* "Extra" page-size multipliers, one per level of the page table. */
+int huge_shift[HUGE_SHIFT_ENTRIES] = {
+#ifdef ADDITIONAL_HUGE_SIZE
+#define ADDITIONAL_HUGE_SHIFT __builtin_ctzl(ADDITIONAL_HUGE_SIZE / PAGE_SIZE)
+ [HUGE_SHIFT_PAGE] = ADDITIONAL_HUGE_SHIFT
+#endif
+};
+
+/*
+ * This routine is a hybrid of pte_alloc_map() and pte_alloc_kernel().
+ * It assumes that L2 PTEs are never in HIGHMEM (we don't support that).
+ * It locks the user pagetable, and bumps up the mm->nr_ptes field,
+ * but otherwise allocate the page table using the kernel versions.
+ */
+static pte_t *pte_alloc_hugetlb(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address)
+{
+ pte_t *new;
+
+ if (pmd_none(*pmd)) {
+ new = pte_alloc_one_kernel(mm, address);
+ if (!new)
+ return NULL;
+
+ smp_wmb(); /* See comment in __pte_alloc */
+
+ spin_lock(&mm->page_table_lock);
+ if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
+ mm->nr_ptes++;
+ pmd_populate_kernel(mm, pmd, new);
+ new = NULL;
+ } else
+ VM_BUG_ON(pmd_trans_splitting(*pmd));
+ spin_unlock(&mm->page_table_lock);
+ if (new)
+ pte_free_kernel(mm, new);
+ }
+
+ return pte_offset_kernel(pmd, address);
+}
+#endif
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
- pte_t *pte = NULL;
- /* We do not yet support multiple huge page sizes. */
- BUG_ON(sz != PMD_SIZE);
+ addr &= -sz; /* Mask off any low bits in the address. */
pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr);
- if (pud)
- pte = (pte_t *) pmd_alloc(mm, pud, addr);
- BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
- return pte;
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+ if (sz >= PGDIR_SIZE) {
+ BUG_ON(sz != PGDIR_SIZE &&
+ sz != PGDIR_SIZE << huge_shift[HUGE_SHIFT_PGDIR]);
+ return (pte_t *)pud;
+ } else {
+ pmd_t *pmd = pmd_alloc(mm, pud, addr);
+ if (sz >= PMD_SIZE) {
+ BUG_ON(sz != PMD_SIZE &&
+ sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD]));
+ return (pte_t *)pmd;
+ }
+ else {
+ if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
+ panic("Unexpected page size %#lx\n", sz);
+ return pte_alloc_hugetlb(mm, pmd, addr);
+ }
+ }
+#else
+ BUG_ON(sz != PMD_SIZE);
+ return (pte_t *) pmd_alloc(mm, pud, addr);
+#endif
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+static pte_t *get_pte(pte_t *base, int index, int level)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd = NULL;
-
- pgd = pgd_offset(mm, addr);
- if (pgd_present(*pgd)) {
- pud = pud_offset(pgd, addr);
- if (pud_present(*pud))
- pmd = pmd_offset(pud, addr);
+ pte_t *ptep = base + index;
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+ if (!pte_present(*ptep) && huge_shift[level] != 0) {
+ unsigned long mask = -1UL << huge_shift[level];
+ pte_t *super_ptep = base + (index & mask);
+ pte_t pte = *super_ptep;
+ if (pte_present(pte) && pte_super(pte))
+ ptep = super_ptep;
}
- return (pte_t *) pmd;
+#endif
+ return ptep;
}
-#ifdef HUGETLB_TEST
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
- int write)
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
- unsigned long start = address;
- int length = 1;
- int nr;
- struct page *page;
- struct vm_area_struct *vma;
-
- vma = find_vma(mm, addr);
- if (!vma || !is_vm_hugetlb_page(vma))
- return ERR_PTR(-EINVAL);
-
- pte = huge_pte_offset(mm, address);
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+ pte_t *pte;
+#endif
- /* hugetlb should be locked, and hence, prefaulted */
- WARN_ON(!pte || pte_none(*pte));
+ /* Get the top-level page table entry. */
+ pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
+ if (!pgd_present(*pgd))
+ return NULL;
- page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
+ /* We don't have four levels. */
+ pud = pud_offset(pgd, addr);
+#ifndef __PAGETABLE_PUD_FOLDED
+# error support fourth page table level
+#endif
- WARN_ON(!PageHead(page));
+ /* Check for an L0 huge PTE, if we have three levels. */
+#ifndef __PAGETABLE_PMD_FOLDED
+ if (pud_huge(*pud))
+ return (pte_t *)pud;
- return page;
-}
-
-int pmd_huge(pmd_t pmd)
-{
- return 0;
-}
+ pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
+ pmd_index(addr), 1);
+ if (!pmd_present(*pmd))
+ return NULL;
+#else
+ pmd = pmd_offset(pud, addr);
+#endif
-int pud_huge(pud_t pud)
-{
- return 0;
-}
+ /* Check for an L1 huge PTE. */
+ if (pmd_huge(*pmd))
+ return (pte_t *)pmd;
+
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+ /* Check for an L2 huge PTE. */
+ pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
+ if (!pte_present(*pte))
+ return NULL;
+ if (pte_super(*pte))
+ return pte;
+#endif
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
return NULL;
}
-#else
-
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write)
{
@@ -149,8 +225,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
return 0;
}
-#endif
-
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long addr, unsigned long len,
@@ -322,21 +396,102 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags);
}
+#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
-static __init int setup_hugepagesz(char *opt)
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+static __init int __setup_hugepagesz(unsigned long ps)
{
- unsigned long ps = memparse(opt, &opt);
- if (ps == PMD_SIZE) {
- hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
- } else if (ps == PUD_SIZE) {
- hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+ int log_ps = __builtin_ctzl(ps);
+ int level, base_shift;
+
+ if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
+ pr_warn("Not enabling %ld byte huge pages;"
+ " must be a power of four.\n", ps);
+ return -EINVAL;
+ }
+
+ if (ps > 64*1024*1024*1024UL) {
+ pr_warn("Not enabling %ld MB huge pages;"
+ " largest legal value is 64 GB .\n", ps >> 20);
+ return -EINVAL;
+ } else if (ps >= PUD_SIZE) {
+ static long hv_jpage_size;
+ if (hv_jpage_size == 0)
+ hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
+ if (hv_jpage_size != PUD_SIZE) {
+ pr_warn("Not enabling >= %ld MB huge pages:"
+ " hypervisor reports size %ld\n",
+ PUD_SIZE >> 20, hv_jpage_size);
+ return -EINVAL;
+ }
+ level = 0;
+ base_shift = PUD_SHIFT;
+ } else if (ps >= PMD_SIZE) {
+ level = 1;
+ base_shift = PMD_SHIFT;
+ } else if (ps > PAGE_SIZE) {
+ level = 2;
+ base_shift = PAGE_SHIFT;
} else {
- pr_err("hugepagesz: Unsupported page size %lu M\n",
- ps >> 20);
- return 0;
+ pr_err("hugepagesz: huge page size %ld too small\n", ps);
+ return -EINVAL;
}
- return 1;
+
+ if (log_ps != base_shift) {
+ int shift_val = log_ps - base_shift;
+ if (huge_shift[level] != 0) {
+ int old_shift = base_shift + huge_shift[level];
+ pr_warn("Not enabling %ld MB huge pages;"
+ " already have size %ld MB.\n",
+ ps >> 20, (1UL << old_shift) >> 20);
+ return -EINVAL;
+ }
+ if (hv_set_pte_super_shift(level, shift_val) != 0) {
+ pr_warn("Not enabling %ld MB huge pages;"
+ " no hypervisor support.\n", ps >> 20);
+ return -EINVAL;
+ }
+ printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
+ huge_shift[level] = shift_val;
+ }
+
+ hugetlb_add_hstate(log_ps - PAGE_SHIFT);
+
+ return 0;
+}
+
+static bool saw_hugepagesz;
+
+static __init int setup_hugepagesz(char *opt)
+{
+ if (!saw_hugepagesz) {
+ saw_hugepagesz = true;
+ memset(huge_shift, 0, sizeof(huge_shift));
+ }
+ return __setup_hugepagesz(memparse(opt, NULL));
}
__setup("hugepagesz=", setup_hugepagesz);
-#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
+#ifdef ADDITIONAL_HUGE_SIZE
+/*
+ * Provide an additional huge page size if no "hugepagesz" args are given.
+ * In that case, all the cores have properly set up their hv super_shift
+ * already, but we need to notify the hugetlb code to enable the
+ * new huge page size from the Linux point of view.
+ */
+static __init int add_default_hugepagesz(void)
+{
+ if (!saw_hugepagesz) {
+ BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE ||
+ ADDITIONAL_HUGE_SIZE <= PAGE_SIZE);
+ BUILD_BUG_ON((PAGE_SIZE << ADDITIONAL_HUGE_SHIFT) !=
+ ADDITIONAL_HUGE_SIZE);
+ BUILD_BUG_ON(ADDITIONAL_HUGE_SHIFT & 1);
+ hugetlb_add_hstate(ADDITIONAL_HUGE_SHIFT);
+ }
+ return 0;
+}
+arch_initcall(add_default_hugepagesz);
+#endif
+
+#endif /* CONFIG_HUGETLB_SUPER_PAGES */
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 6a9d20ddc34f..630dd2ce2afe 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -82,7 +82,7 @@ static int num_l2_ptes[MAX_NUMNODES];
static void init_prealloc_ptes(int node, int pages)
{
- BUG_ON(pages & (HV_L2_ENTRIES-1));
+ BUG_ON(pages & (PTRS_PER_PTE - 1));
if (pages) {
num_l2_ptes[node] = pages;
l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t),
@@ -131,14 +131,9 @@ static void __init assign_pte(pmd_t *pmd, pte_t *page_table)
#ifdef __tilegx__
-#if HV_L1_SIZE != HV_L2_SIZE
-# error Rework assumption that L1 and L2 page tables are same size.
-#endif
-
-/* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */
static inline pmd_t *alloc_pmd(void)
{
- return (pmd_t *)alloc_pte();
+ return __alloc_bootmem(L1_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
}
static inline void assign_pmd(pud_t *pud, pmd_t *pmd)
@@ -444,6 +439,7 @@ static pgd_t pgtables[PTRS_PER_PGD]
*/
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
+ unsigned long long irqmask;
unsigned long address, pfn;
pmd_t *pmd;
pte_t *pte;
@@ -633,10 +629,13 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
* - install pgtables[] as the real page table
* - flush the TLB so the new page table takes effect
*/
+ irqmask = interrupt_mask_save_mask();
+ interrupt_mask_set_mask(-1ULL);
rc = flush_and_install_context(__pa(pgtables),
init_pgprot((unsigned long)pgtables),
__get_cpu_var(current_asid),
cpumask_bits(my_cpu_mask));
+ interrupt_mask_restore_mask(irqmask);
BUG_ON(rc != 0);
/* Copy the page table back to the normal swapper_pg_dir. */
@@ -699,6 +698,7 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
#endif /* CONFIG_HIGHMEM */
+#ifndef CONFIG_64BIT
static void __init init_free_pfn_range(unsigned long start, unsigned long end)
{
unsigned long pfn;
@@ -771,6 +771,7 @@ static void __init set_non_bootmem_pages_init(void)
init_free_pfn_range(start, end);
}
}
+#endif
/*
* paging_init() sets up the page tables - note that all of lowmem is
@@ -807,7 +808,7 @@ void __init paging_init(void)
* changing init_mm once we get up and running, and there's no
* need for e.g. vmalloc_sync_all().
*/
- BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END));
+ BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1));
pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START);
assign_pmd(pud, alloc_pmd());
#endif
@@ -859,8 +860,10 @@ void __init mem_init(void)
/* this will put all bootmem onto the freelists */
totalram_pages += free_all_bootmem();
+#ifndef CONFIG_64BIT
/* count all remaining LOWMEM and give all HIGHMEM to page allocator */
set_non_bootmem_pages_init();
+#endif
codesize = (unsigned long)&_etext - (unsigned long)&_text;
datasize = (unsigned long)&_end - (unsigned long)&_sdata;
diff --git a/arch/tile/mm/migrate.h b/arch/tile/mm/migrate.h
index cd45a0837fa6..91683d97917e 100644
--- a/arch/tile/mm/migrate.h
+++ b/arch/tile/mm/migrate.h
@@ -24,6 +24,9 @@
/*
* This function is used as a helper when setting up the initial
* page table (swapper_pg_dir).
+ *
+ * You must mask ALL interrupts prior to invoking this code, since
+ * you can't legally touch the stack during the cache flush.
*/
extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access,
HV_ASID asid,
@@ -39,6 +42,9 @@ extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access,
*
* Note that any non-NULL pointers must not point to the page that
* is handled by the stack_pte itself.
+ *
+ * You must mask ALL interrupts prior to invoking this code, since
+ * you can't legally touch the stack during the cache flush.
*/
extern int homecache_migrate_stack_and_flush(pte_t stack_pte, unsigned long va,
size_t length, pte_t *stack_ptep,
diff --git a/arch/tile/mm/migrate_32.S b/arch/tile/mm/migrate_32.S
index ac01a7cdf77f..5305814bf187 100644
--- a/arch/tile/mm/migrate_32.S
+++ b/arch/tile/mm/migrate_32.S
@@ -40,8 +40,7 @@
#define FRAME_R32 16
#define FRAME_R33 20
#define FRAME_R34 24
-#define FRAME_R35 28
-#define FRAME_SIZE 32
+#define FRAME_SIZE 28
@@ -66,12 +65,11 @@
#define r_my_cpumask r5
/* Locals (callee-save); must not be more than FRAME_xxx above. */
-#define r_save_ics r30
-#define r_context_lo r31
-#define r_context_hi r32
-#define r_access_lo r33
-#define r_access_hi r34
-#define r_asid r35
+#define r_context_lo r30
+#define r_context_hi r31
+#define r_access_lo r32
+#define r_access_hi r33
+#define r_asid r34
STD_ENTRY(flush_and_install_context)
/*
@@ -104,11 +102,7 @@ STD_ENTRY(flush_and_install_context)
sw r_tmp, r33
addi r_tmp, sp, FRAME_R34
}
- {
- sw r_tmp, r34
- addi r_tmp, sp, FRAME_R35
- }
- sw r_tmp, r35
+ sw r_tmp, r34
/* Move some arguments to callee-save registers. */
{
@@ -121,13 +115,6 @@ STD_ENTRY(flush_and_install_context)
}
move r_asid, r_asid_in
- /* Disable interrupts, since we can't use our stack. */
- {
- mfspr r_save_ics, INTERRUPT_CRITICAL_SECTION
- movei r_tmp, 1
- }
- mtspr INTERRUPT_CRITICAL_SECTION, r_tmp
-
/* First, flush our L2 cache. */
{
move r0, zero /* cache_pa */
@@ -163,7 +150,7 @@ STD_ENTRY(flush_and_install_context)
}
{
move r4, r_asid
- movei r5, HV_CTX_DIRECTIO
+ moveli r5, HV_CTX_DIRECTIO | CTX_PAGE_FLAG
}
jal hv_install_context
bnz r0, .Ldone
@@ -175,9 +162,6 @@ STD_ENTRY(flush_and_install_context)
}
.Ldone:
- /* Reset interrupts back how they were before. */
- mtspr INTERRUPT_CRITICAL_SECTION, r_save_ics
-
/* Restore the callee-saved registers and return. */
addli lr, sp, FRAME_SIZE
{
@@ -202,10 +186,6 @@ STD_ENTRY(flush_and_install_context)
}
{
lw r34, r_tmp
- addli r_tmp, sp, FRAME_R35
- }
- {
- lw r35, r_tmp
addi sp, sp, FRAME_SIZE
}
jrp lr
diff --git a/arch/tile/mm/migrate_64.S b/arch/tile/mm/migrate_64.S
index e76fea688beb..1d15b10833d1 100644
--- a/arch/tile/mm/migrate_64.S
+++ b/arch/tile/mm/migrate_64.S
@@ -38,8 +38,7 @@
#define FRAME_R30 16
#define FRAME_R31 24
#define FRAME_R32 32
-#define FRAME_R33 40
-#define FRAME_SIZE 48
+#define FRAME_SIZE 40
@@ -60,10 +59,9 @@
#define r_my_cpumask r3
/* Locals (callee-save); must not be more than FRAME_xxx above. */
-#define r_save_ics r30
-#define r_context r31
-#define r_access r32
-#define r_asid r33
+#define r_context r30
+#define r_access r31
+#define r_asid r32
/*
* Caller-save locals and frame constants are the same as
@@ -93,11 +91,7 @@ STD_ENTRY(flush_and_install_context)
st r_tmp, r31
addi r_tmp, sp, FRAME_R32
}
- {
- st r_tmp, r32
- addi r_tmp, sp, FRAME_R33
- }
- st r_tmp, r33
+ st r_tmp, r32
/* Move some arguments to callee-save registers. */
{
@@ -106,13 +100,6 @@ STD_ENTRY(flush_and_install_context)
}
move r_asid, r_asid_in
- /* Disable interrupts, since we can't use our stack. */
- {
- mfspr r_save_ics, INTERRUPT_CRITICAL_SECTION
- movei r_tmp, 1
- }
- mtspr INTERRUPT_CRITICAL_SECTION, r_tmp
-
/* First, flush our L2 cache. */
{
move r0, zero /* cache_pa */
@@ -147,7 +134,7 @@ STD_ENTRY(flush_and_install_context)
}
{
move r2, r_asid
- movei r3, HV_CTX_DIRECTIO
+ moveli r3, HV_CTX_DIRECTIO | CTX_PAGE_FLAG
}
jal hv_install_context
bnez r0, 1f
@@ -158,10 +145,7 @@ STD_ENTRY(flush_and_install_context)
jal hv_flush_all
}
-1: /* Reset interrupts back how they were before. */
- mtspr INTERRUPT_CRITICAL_SECTION, r_save_ics
-
- /* Restore the callee-saved registers and return. */
+1: /* Restore the callee-saved registers and return. */
addli lr, sp, FRAME_SIZE
{
ld lr, lr
@@ -177,10 +161,6 @@ STD_ENTRY(flush_and_install_context)
}
{
ld r32, r_tmp
- addli r_tmp, sp, FRAME_R33
- }
- {
- ld r33, r_tmp
addi sp, sp, FRAME_SIZE
}
jrp lr
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 2410aa899b3e..345edfed9fcd 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -132,15 +132,6 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
}
-#if defined(CONFIG_HIGHPTE)
-pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
-{
- pte_t *pte = kmap_atomic(pmd_page(*dir)) +
- (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
- return &pte[pte_index(address)];
-}
-#endif
-
/**
* shatter_huge_page() - ensure a given address is mapped by a small page.
*
@@ -289,33 +280,26 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
#define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
+ int order)
{
gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
struct page *p;
-#if L2_USER_PGTABLE_ORDER > 0
int i;
-#endif
-
-#ifdef CONFIG_HIGHPTE
- flags |= __GFP_HIGHMEM;
-#endif
p = alloc_pages(flags, L2_USER_PGTABLE_ORDER);
if (p == NULL)
return NULL;
-#if L2_USER_PGTABLE_ORDER > 0
/*
* Make every page have a page_count() of one, not just the first.
* We don't use __GFP_COMP since it doesn't look like it works
* correctly with tlb_remove_page().
*/
- for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
+ for (i = 1; i < order; ++i) {
init_page_count(p+i);
inc_zone_page_state(p+i, NR_PAGETABLE);
}
-#endif
pgtable_page_ctor(p);
return p;
@@ -326,28 +310,28 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
* process). We have to correct whatever pte_alloc_one() did before
* returning the pages to the allocator.
*/
-void pte_free(struct mm_struct *mm, struct page *p)
+void pgtable_free(struct mm_struct *mm, struct page *p, int order)
{
int i;
pgtable_page_dtor(p);
__free_page(p);
- for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
+ for (i = 1; i < order; ++i) {
__free_page(p+i);
dec_zone_page_state(p+i, NR_PAGETABLE);
}
}
-void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
- unsigned long address)
+void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
+ unsigned long address, int order)
{
int i;
pgtable_page_dtor(pte);
tlb_remove_page(tlb, pte);
- for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
+ for (i = 1; i < order; ++i) {
tlb_remove_page(tlb, pte + i);
dec_zone_page_state(pte + i, NR_PAGETABLE);
}
@@ -490,7 +474,7 @@ void set_pte(pte_t *ptep, pte_t pte)
/* Can this mm load a PTE with cached_priority set? */
static inline int mm_is_priority_cached(struct mm_struct *mm)
{
- return mm->context.priority_cached;
+ return mm->context.priority_cached != 0;
}
/*
@@ -500,8 +484,8 @@ static inline int mm_is_priority_cached(struct mm_struct *mm)
void start_mm_caching(struct mm_struct *mm)
{
if (!mm_is_priority_cached(mm)) {
- mm->context.priority_cached = -1U;
- hv_set_caching(-1U);
+ mm->context.priority_cached = -1UL;
+ hv_set_caching(-1UL);
}
}
@@ -516,7 +500,7 @@ void start_mm_caching(struct mm_struct *mm)
* Presumably we'll come back later and have more luck and clear
* the value then; for now we'll just keep the cache marked for priority.
*/
-static unsigned int update_priority_cached(struct mm_struct *mm)
+static unsigned long update_priority_cached(struct mm_struct *mm)
{
if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
struct vm_area_struct *vm;
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index 43ef890d292c..cb837c223922 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -10,6 +10,7 @@ config UML
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select GENERIC_IO
+ select GENERIC_CLOCKEVENTS
config MMU
bool
@@ -52,10 +53,6 @@ config GENERIC_BUG
default y
depends on BUG
-config GENERIC_CLOCKEVENTS
- bool
- default y
-
config HZ
int
default 100
diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um
index 70fd690964e4..bf87f25eb2de 100644
--- a/arch/um/Kconfig.um
+++ b/arch/um/Kconfig.um
@@ -10,7 +10,6 @@ config STATIC_LINK
2.75G) for UML.
source "mm/Kconfig"
-source "kernel/time/Kconfig"
config LD_SCRIPT_STATIC
bool
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 55c0661e2b5d..097091059aaa 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -121,15 +121,8 @@ LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
-CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
-define cmd_vmlinux__
- $(CC) $(CFLAGS_vmlinux) -o $@ \
- -Wl,-T,$(vmlinux-lds) $(vmlinux-init) \
- -Wl,--start-group $(vmlinux-main) -Wl,--end-group \
- -lutil \
- $(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o \
- FORCE ,$^) ; rm -f linux
-endef
+# Used by link-vmlinux.sh which has special support for um link
+export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
# When cleaning we don't include .config, so we don't include
# TT or skas makefiles and don't clean skas_ptregs.h.
diff --git a/arch/um/include/asm/kvm_para.h b/arch/um/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/um/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c
index 4d93dff6b371..3d15243ce692 100644
--- a/arch/um/kernel/reboot.c
+++ b/arch/um/kernel/reboot.c
@@ -4,7 +4,9 @@
*/
#include "linux/sched.h"
+#include "linux/spinlock.h"
#include "linux/slab.h"
+#include "linux/oom.h"
#include "kern_util.h"
#include "os.h"
#include "skas.h"
@@ -22,13 +24,18 @@ static void kill_off_processes(void)
struct task_struct *p;
int pid;
+ read_lock(&tasklist_lock);
for_each_process(p) {
- if (p->mm == NULL)
- continue;
+ struct task_struct *t;
- pid = p->mm->context.id.u.pid;
+ t = find_lock_task_mm(p);
+ if (!t)
+ continue;
+ pid = t->mm->context.id.u.pid;
+ task_unlock(t);
os_kill_ptraced_process(pid, 1);
}
+ read_unlock(&tasklist_lock);
}
}
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index dafc94715950..3be60765c0e2 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -30,6 +30,8 @@ int handle_page_fault(unsigned long address, unsigned long ip,
pmd_t *pmd;
pte_t *pte;
int err = -EFAULT;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (is_write ? FAULT_FLAG_WRITE : 0);
*code_out = SEGV_MAPERR;
@@ -40,6 +42,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
if (in_atomic())
goto out_nosemaphore;
+retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
@@ -65,7 +68,11 @@ good_area:
do {
int fault;
- fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ goto out_nosemaphore;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
@@ -75,10 +82,17 @@ good_area:
}
BUG();
}
- if (fault & VM_FAULT_MAJOR)
- current->maj_flt++;
- else
- current->min_flt++;
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+ goto retry;
+ }
+ }
pgd = pgd_offset(mm, address);
pud = pud_offset(pgd, address);
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index eeb8054c7cd8..03c9ff808b5a 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -8,6 +8,7 @@ config UNICORE32
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
+ select ARCH_HAVE_CUSTOM_GPIO_H
select GENERIC_FIND_FIRST_BIT
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
@@ -25,9 +26,6 @@ config HAVE_PWM
config GENERIC_GPIO
def_bool y
-config GENERIC_CLOCKEVENTS
- bool
-
config GENERIC_CSUM
def_bool y
@@ -146,8 +144,6 @@ endmenu
menu "Kernel Features"
-source "kernel/time/Kconfig"
-
source "kernel/Kconfig.preempt"
source "kernel/Kconfig.hz"
diff --git a/arch/unicore32/include/asm/kvm_para.h b/arch/unicore32/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/unicore32/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index 0e9dec6cadd1..e5287d8517aa 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -1,4 +1,3 @@
-
obj-$(CONFIG_KVM) += kvm/
# Xen paravirtualization support
@@ -7,6 +6,7 @@ obj-$(CONFIG_XEN) += xen/
# lguest paravirtualization support
obj-$(CONFIG_LGUEST_GUEST) += lguest/
+obj-y += realmode/
obj-y += kernel/
obj-y += mm/
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d6168994e115..d700811785ea 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -32,6 +32,7 @@ config X86
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_FRAME_POINTERS
select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS if !SWIOTLB
select HAVE_KRETPROBES
select HAVE_OPTPROBES
select HAVE_FTRACE_MCOUNT_RECORD
@@ -85,9 +86,18 @@ config X86
select GENERIC_SMP_IDLE_THREAD
select HAVE_ARCH_SECCOMP_FILTER
select BUILDTIME_EXTABLE_SORT
+ select GENERIC_CMOS_UPDATE
+ select CLOCKSOURCE_WATCHDOG
+ select GENERIC_CLOCKEVENTS
+ select ARCH_CLOCKSOURCE_DATA if X86_64
+ select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
+ select GENERIC_TIME_VSYSCALL if X86_64
+ select KTIME_SCALAR if X86_32
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
config INSTRUCTION_DECODER
- def_bool (KPROBES || PERF_EVENTS)
+ def_bool (KPROBES || PERF_EVENTS || UPROBES)
config OUTPUT_FORMAT
string
@@ -99,23 +109,6 @@ config ARCH_DEFCONFIG
default "arch/x86/configs/i386_defconfig" if X86_32
default "arch/x86/configs/x86_64_defconfig" if X86_64
-config GENERIC_CMOS_UPDATE
- def_bool y
-
-config CLOCKSOURCE_WATCHDOG
- def_bool y
-
-config GENERIC_CLOCKEVENTS
- def_bool y
-
-config ARCH_CLOCKSOURCE_DATA
- def_bool y
- depends on X86_64
-
-config GENERIC_CLOCKEVENTS_BROADCAST
- def_bool y
- depends on X86_64 || (X86_32 && X86_LOCAL_APIC)
-
config LOCKDEP_SUPPORT
def_bool y
@@ -166,10 +159,6 @@ config RWSEM_XCHGADD_ALGORITHM
config GENERIC_CALIBRATE_DELAY
def_bool y
-config GENERIC_TIME_VSYSCALL
- bool
- default X86_64
-
config ARCH_HAS_CPU_RELAX
def_bool y
@@ -236,13 +225,13 @@ config ARCH_HWEIGHT_CFLAGS
default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64
-config KTIME_SCALAR
- def_bool X86_32
-
config ARCH_CPU_PROBE_RELEASE
def_bool y
depends on HOTPLUG_CPU
+config ARCH_SUPPORTS_UPROBES
+ def_bool y
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -258,8 +247,6 @@ config ZONE_DMA
If unsure, say Y.
-source "kernel/time/Kconfig"
-
config SMP
bool "Symmetric multi-processing support"
---help---
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 610001d385dd..0c44630d1789 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -29,7 +29,7 @@
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mpspec.h>
-#include <asm/trampoline.h>
+#include <asm/realmode.h>
#define COMPILER_DEPENDENT_INT64 long long
#define COMPILER_DEPENDENT_UINT64 unsigned long long
@@ -117,11 +117,8 @@ static inline void acpi_disable_pci(void)
/* Low-level suspend routine. */
extern int acpi_suspend_lowlevel(void);
-extern const unsigned char acpi_wakeup_code[];
-#define acpi_wakeup_address (__pa(TRAMPOLINE_SYM(acpi_wakeup_code)))
-
-/* early initialization routine */
-extern void acpi_reserve_wakeup_memory(void);
+/* Physical address to resume after wakeup */
+#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start))
/*
* Check if the CPU can handle C2 and deeper
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index b97596e2b68c..a6983b277220 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -15,6 +15,8 @@
#include <linux/compiler.h>
#include <asm/alternative.h>
+#define BIT_64(n) (U64_C(1) << (n))
+
/*
* These have to be done with inline assembly: that way the bit-setting
* is guaranteed to be atomic. All bit operations return 0 if the bit
diff --git a/arch/x86/include/asm/dma-contiguous.h b/arch/x86/include/asm/dma-contiguous.h
new file mode 100644
index 000000000000..c09241659971
--- /dev/null
+++ b/arch/x86/include/asm/dma-contiguous.h
@@ -0,0 +1,13 @@
+#ifndef ASMX86_DMA_CONTIGUOUS_H
+#define ASMX86_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm-generic/dma-contiguous.h>
+
+static inline void
+dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
+
+#endif
+#endif
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 61c0bd25845a..f7b4c7903e7e 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -13,6 +13,7 @@
#include <asm/io.h>
#include <asm/swiotlb.h>
#include <asm-generic/dma-coherent.h>
+#include <linux/dma-contiguous.h>
#ifdef CONFIG_ISA
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
@@ -62,6 +63,10 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs);
+extern void dma_generic_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs);
+
#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
diff --git a/arch/x86/include/asm/gpio.h b/arch/x86/include/asm/gpio.h
index 91d915a65259..b3799d88ffcf 100644
--- a/arch/x86/include/asm/gpio.h
+++ b/arch/x86/include/asm/gpio.h
@@ -1,53 +1,4 @@
-/*
- * Generic GPIO API implementation for x86.
- *
- * Derived from the generic GPIO API for powerpc:
- *
- * Copyright (c) 2007-2008 MontaVista Software, Inc.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef _ASM_X86_GPIO_H
-#define _ASM_X86_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#ifdef CONFIG_GPIOLIB
-
-/*
- * Just call gpiolib.
- */
-static inline int gpio_get_value(unsigned int gpio)
-{
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned int gpio, int value)
-{
- __gpio_set_value(gpio, value);
-}
-
-static inline int gpio_cansleep(unsigned int gpio)
-{
- return __gpio_cansleep(gpio);
-}
-
-static inline int gpio_to_irq(unsigned int gpio)
-{
- return __gpio_to_irq(gpio);
-}
-
-static inline int irq_to_gpio(unsigned int irq)
-{
- return -EINVAL;
-}
-
-#endif /* CONFIG_GPIOLIB */
-
-#endif /* _ASM_X86_GPIO_H */
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index c222e1a1b12a..1ac46c22dd50 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -200,7 +200,7 @@ typedef u32 __attribute__((vector_size(16))) sse128_t;
/* Type, address-of, and value of an instruction's operand. */
struct operand {
- enum { OP_REG, OP_MEM, OP_IMM, OP_XMM, OP_NONE } type;
+ enum { OP_REG, OP_MEM, OP_IMM, OP_XMM, OP_MM, OP_NONE } type;
unsigned int bytes;
union {
unsigned long orig_val;
@@ -213,12 +213,14 @@ struct operand {
unsigned seg;
} mem;
unsigned xmm;
+ unsigned mm;
} addr;
union {
unsigned long val;
u64 val64;
char valptr[sizeof(unsigned long) + 2];
sse128_t vec_val;
+ u64 mm_val;
};
};
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e5b97be12d2a..db7c1f2709a2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -173,6 +173,9 @@ enum {
#define DR7_FIXED_1 0x00000400
#define DR7_VOLATILE 0xffff23ff
+/* apic attention bits */
+#define KVM_APIC_CHECK_VAPIC 0
+
/*
* We don't want allocation failures within the mmu code, so we preallocate
* enough memory for a single page fault in a cache.
@@ -238,8 +241,6 @@ struct kvm_mmu_page {
#endif
int write_flooding_count;
-
- struct rcu_head rcu;
};
struct kvm_pio_request {
@@ -338,6 +339,7 @@ struct kvm_vcpu_arch {
u64 efer;
u64 apic_base;
struct kvm_lapic *apic; /* kernel irqchip context */
+ unsigned long apic_attention;
int32_t apic_arb_prio;
int mp_state;
int sipi_vector;
@@ -537,8 +539,6 @@ struct kvm_arch {
u64 hv_guest_os_id;
u64 hv_hypercall;
- atomic_t reader_counter;
-
#ifdef CONFIG_KVM_MMU_AUDIT
int audit_point;
#endif
@@ -713,8 +713,9 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
-int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
- struct kvm_memory_slot *slot);
+void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask);
void kvm_mmu_zap_all(struct kvm *kvm);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 183922e13de1..63ab1661d00e 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -95,6 +95,14 @@ struct kvm_vcpu_pv_apf_data {
extern void kvmclock_init(void);
extern int kvm_register_clock(char *txt);
+#ifdef CONFIG_KVM_CLOCK
+bool kvm_check_and_clear_guest_paused(void);
+#else
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+#endif /* CONFIG_KVMCLOCK */
/* This instruction is vmcall. On non-VT architectures, it will generate a
* trap that we will then rewrite to the appropriate instruction.
@@ -173,14 +181,16 @@ static inline int kvm_para_available(void)
if (boot_cpu_data.cpuid_level < 0)
return 0; /* So we don't blow up on old processors */
- cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
- memcpy(signature + 0, &ebx, 4);
- memcpy(signature + 4, &ecx, 4);
- memcpy(signature + 8, &edx, 4);
- signature[12] = 0;
+ if (cpu_has_hypervisor) {
+ cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
+ memcpy(signature + 0, &ebx, 4);
+ memcpy(signature + 4, &ecx, 4);
+ memcpy(signature + 8, &edx, 4);
+ signature[12] = 0;
- if (strcmp(signature, "KVMKVMKVM") == 0)
- return 1;
+ if (strcmp(signature, "KVMKVMKVM") == 0)
+ return 1;
+ }
return 0;
}
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index effff47a3c82..43876f16caf1 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -31,6 +31,56 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
ptep->pte_low = pte.pte_low;
}
+#define pmd_read_atomic pmd_read_atomic
+/*
+ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
+ * a "*pmdp" dereference done by gcc. Problem is, in certain places
+ * where pte_offset_map_lock is called, concurrent page faults are
+ * allowed, if the mmap_sem is hold for reading. An example is mincore
+ * vs page faults vs MADV_DONTNEED. On the page fault side
+ * pmd_populate rightfully does a set_64bit, but if we're reading the
+ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
+ * because gcc will not read the 64bit of the pmd atomically. To fix
+ * this all places running pmd_offset_map_lock() while holding the
+ * mmap_sem in read mode, shall read the pmdp pointer using this
+ * function to know if the pmd is null nor not, and in turn to know if
+ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
+ * operations.
+ *
+ * Without THP if the mmap_sem is hold for reading, the
+ * pmd can only transition from null to not null while pmd_read_atomic runs.
+ * So there's no need of literally reading it atomically.
+ *
+ * With THP if the mmap_sem is hold for reading, the pmd can become
+ * THP or null or point to a pte (and in turn become "stable") at any
+ * time under pmd_read_atomic, so it's mandatory to read it atomically
+ * with cmpxchg8b.
+ */
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+ pmdval_t ret;
+ u32 *tmp = (u32 *)pmdp;
+
+ ret = (pmdval_t) (*tmp);
+ if (ret) {
+ /*
+ * If the low part is null, we must not read the high part
+ * or we can end up with a partial pmd.
+ */
+ smp_rmb();
+ ret |= ((pmdval_t)*(tmp + 1)) << 32;
+ }
+
+ return (pmd_t) { ret };
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+ return (pmd_t) { atomic64_read((atomic64_t *)pmdp) };
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
{
set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 7745b257f035..39bc5777211a 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -544,13 +544,16 @@ static inline void load_sp0(struct tss_struct *tss,
* enable), so that any CPU's that boot up
* after us can get the correct flags.
*/
-extern unsigned long mmu_cr4_features;
+extern unsigned long mmu_cr4_features;
+extern u32 *trampoline_cr4_features;
static inline void set_in_cr4(unsigned long mask)
{
unsigned long cr4;
mmu_cr4_features |= mask;
+ if (trampoline_cr4_features)
+ *trampoline_cr4_features = mmu_cr4_features;
cr4 = read_cr4();
cr4 |= mask;
write_cr4(cr4);
@@ -561,6 +564,8 @@ static inline void clear_in_cr4(unsigned long mask)
unsigned long cr4;
mmu_cr4_features &= ~mask;
+ if (trampoline_cr4_features)
+ *trampoline_cr4_features = mmu_cr4_features;
cr4 = read_cr4();
cr4 &= ~mask;
write_cr4(cr4);
diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h
index 35f2d1948ada..6167fd798188 100644
--- a/arch/x86/include/asm/pvclock-abi.h
+++ b/arch/x86/include/asm/pvclock-abi.h
@@ -40,5 +40,6 @@ struct pvclock_wall_clock {
} __attribute__((__packed__));
#define PVCLOCK_TSC_STABLE_BIT (1 << 0)
+#define PVCLOCK_GUEST_STOPPED (1 << 1)
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PVCLOCK_ABI_H */
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
new file mode 100644
index 000000000000..fce3f4ae5bd6
--- /dev/null
+++ b/arch/x86/include/asm/realmode.h
@@ -0,0 +1,62 @@
+#ifndef _ARCH_X86_REALMODE_H
+#define _ARCH_X86_REALMODE_H
+
+#include <linux/types.h>
+#include <asm/io.h>
+
+/* This must match data at realmode.S */
+struct real_mode_header {
+ u32 text_start;
+ u32 ro_end;
+ /* SMP trampoline */
+ u32 trampoline_start;
+ u32 trampoline_status;
+ u32 trampoline_header;
+#ifdef CONFIG_X86_64
+ u32 trampoline_pgd;
+#endif
+ /* ACPI S3 wakeup */
+#ifdef CONFIG_ACPI_SLEEP
+ u32 wakeup_start;
+ u32 wakeup_header;
+#endif
+ /* APM/BIOS reboot */
+#ifdef CONFIG_X86_32
+ u32 machine_real_restart_asm;
+#endif
+};
+
+/* This must match data at trampoline_32/64.S */
+struct trampoline_header {
+#ifdef CONFIG_X86_32
+ u32 start;
+ u16 gdt_pad;
+ u16 gdt_limit;
+ u32 gdt_base;
+#else
+ u64 start;
+ u64 efer;
+ u32 cr4;
+#endif
+};
+
+extern struct real_mode_header *real_mode_header;
+extern unsigned char real_mode_blob_end[];
+
+extern unsigned long init_rsp;
+extern unsigned long initial_code;
+extern unsigned long initial_gs;
+
+extern unsigned char real_mode_blob[];
+extern unsigned char real_mode_relocs[];
+
+#ifdef CONFIG_X86_32
+extern unsigned char startup_32_smp[];
+extern unsigned char boot_gdt[];
+#else
+extern unsigned char secondary_startup_64[];
+#endif
+
+extern void __init setup_real_mode(void);
+
+#endif /* _ARCH_X86_REALMODE_H */
diff --git a/arch/x86/include/asm/sta2x11.h b/arch/x86/include/asm/sta2x11.h
new file mode 100644
index 000000000000..e9d32df89ccc
--- /dev/null
+++ b/arch/x86/include/asm/sta2x11.h
@@ -0,0 +1,12 @@
+/*
+ * Header file for STMicroelectronics ConneXt (STA2X11) IOHub
+ */
+#ifndef __ASM_STA2X11_H
+#define __ASM_STA2X11_H
+
+#include <linux/pci.h>
+
+/* This needs to be called from the MFD to configure its sub-devices */
+struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev);
+
+#endif /* __ASM_STA2X11_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 3c9aebc00d39..5c25de07cba8 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -85,6 +85,7 @@ struct thread_info {
#define TIF_SECCOMP 8 /* secure computing */
#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
+#define TIF_UPROBE 12 /* breakpointed or singlestepping */
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
#define TIF_IA32 17 /* IA32 compatibility process */
#define TIF_FORK 18 /* ret_from_fork */
@@ -109,6 +110,7 @@ struct thread_info {
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
+#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_NOTSC (1 << TIF_NOTSC)
#define _TIF_IA32 (1 << TIF_IA32)
#define _TIF_FORK (1 << TIF_FORK)
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
deleted file mode 100644
index feca3118a73b..000000000000
--- a/arch/x86/include/asm/trampoline.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef _ASM_X86_TRAMPOLINE_H
-#define _ASM_X86_TRAMPOLINE_H
-
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-#include <asm/io.h>
-
-/*
- * Trampoline 80x86 program as an array. These are in the init rodata
- * segment, but that's okay, because we only care about the relative
- * addresses of the symbols.
- */
-extern const unsigned char x86_trampoline_start [];
-extern const unsigned char x86_trampoline_end [];
-extern unsigned char *x86_trampoline_base;
-
-extern unsigned long init_rsp;
-extern unsigned long initial_code;
-extern unsigned long initial_gs;
-
-extern void __init setup_trampolines(void);
-
-extern const unsigned char trampoline_data[];
-extern const unsigned char trampoline_status[];
-
-#define TRAMPOLINE_SYM(x) \
- ((void *)(x86_trampoline_base + \
- ((const unsigned char *)(x) - x86_trampoline_start)))
-
-/* Address of the SMP trampoline */
-static inline unsigned long trampoline_address(void)
-{
- return virt_to_phys(TRAMPOLINE_SYM(trampoline_data));
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_X86_TRAMPOLINE_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 851fe0dc13bc..04cd6882308e 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -32,6 +32,7 @@
#define segment_eq(a, b) ((a).seg == (b).seg)
+#define user_addr_max() (current_thread_info()->addr_limit.seg)
#define __addr_ok(addr) \
((unsigned long __force)(addr) < \
(current_thread_info()->addr_limit.seg))
@@ -565,6 +566,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
extern __must_check long
strncpy_from_user(char *dst, const char __user *src, long count);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
+
/*
* movsl can be slow when source and dest are not both 8-byte aligned
*/
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 8084bc73b18c..576e39bca6ad 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -213,23 +213,6 @@ static inline unsigned long __must_check copy_from_user(void *to,
return n;
}
-/**
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
- * Context: User context only. This function may sleep.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
- */
-#define strlen_user(str) strnlen_user(str, LONG_MAX)
-
-long strnlen_user(const char __user *str, long n);
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index fcd4b6f3ef02..8e796fbbf9c6 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -208,9 +208,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
}
}
-__must_check long strnlen_user(const char __user *str, long n);
-__must_check long __strnlen_user(const char __user *str, long n);
-__must_check long strlen_user(const char __user *str);
__must_check unsigned long clear_user(void __user *mem, unsigned long len);
__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
new file mode 100644
index 000000000000..1e9bed14f7ae
--- /dev/null
+++ b/arch/x86/include/asm/uprobes.h
@@ -0,0 +1,57 @@
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+/*
+ * User-space Probes (UProbes) for x86
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2008-2011
+ * Authors:
+ * Srikar Dronamraju
+ * Jim Keniston
+ */
+
+#include <linux/notifier.h>
+
+typedef u8 uprobe_opcode_t;
+
+#define MAX_UINSN_BYTES 16
+#define UPROBE_XOL_SLOT_BYTES 128 /* to keep it cache aligned */
+
+#define UPROBE_SWBP_INSN 0xcc
+#define UPROBE_SWBP_INSN_SIZE 1
+
+struct arch_uprobe {
+ u16 fixups;
+ u8 insn[MAX_UINSN_BYTES];
+#ifdef CONFIG_X86_64
+ unsigned long rip_rela_target_address;
+#endif
+};
+
+struct arch_uprobe_task {
+ unsigned long saved_trap_nr;
+#ifdef CONFIG_X86_64
+ unsigned long saved_scratch_register;
+#endif
+};
+
+extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm);
+extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
+extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
+extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+#endif /* _ASM_UPROBES_H */
diff --git a/arch/x86/include/asm/vga.h b/arch/x86/include/asm/vga.h
index c4b9dc2f67c5..44282fbf7bf9 100644
--- a/arch/x86/include/asm/vga.h
+++ b/arch/x86/include/asm/vga.h
@@ -17,4 +17,10 @@
#define vga_readb(x) (*(x))
#define vga_writeb(x, y) (*(y) = (x))
+#ifdef CONFIG_FB_EFI
+#define __ARCH_HAS_VGA_DEFAULT_DEVICE
+extern struct pci_dev *vga_default_device(void);
+extern void vga_set_default_device(struct pci_dev *pdev);
+#endif
+
#endif /* _ASM_X86_VGA_H */
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
index e58f03b206c3..5b238981542a 100644
--- a/arch/x86/include/asm/word-at-a-time.h
+++ b/arch/x86/include/asm/word-at-a-time.h
@@ -1,6 +1,8 @@
#ifndef _ASM_WORD_AT_A_TIME_H
#define _ASM_WORD_AT_A_TIME_H
+#include <linux/kernel.h>
+
/*
* This is largely generic for little-endian machines, but the
* optimal byte mask counting is probably going to be something
@@ -8,6 +10,11 @@
* bit count instruction, that might be better than the multiply
* and shift, for example.
*/
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
#ifdef CONFIG_64BIT
@@ -35,12 +42,31 @@ static inline long count_masked_bytes(long mask)
#endif
-#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
-/* Return the high bit set in the first byte that is a zero */
-static inline unsigned long has_zero(unsigned long a)
+static inline unsigned long find_zero(unsigned long mask)
{
- return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80);
+ return count_masked_bytes(mask);
}
/*
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index 1df35417c412..cc146d51449e 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -6,6 +6,7 @@ enum ipi_vector {
XEN_CALL_FUNCTION_VECTOR,
XEN_CALL_FUNCTION_SINGLE_VECTOR,
XEN_SPIN_UNLOCK_VECTOR,
+ XEN_IRQ_WORK_VECTOR,
XEN_NR_IPIS,
};
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index c34f96c2f7a0..93971e841dd5 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -44,6 +44,7 @@ extern unsigned long machine_to_phys_nr;
extern unsigned long get_phys_to_machine(unsigned long pfn);
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern unsigned long set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e);
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index bb8529275aab..8215e5652d97 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -35,7 +35,6 @@ obj-y += tsc.o io_delay.o rtc.o
obj-y += pci-iommu_table.o
obj-y += resource.o
-obj-y += trampoline.o trampoline_$(BITS).o
obj-y += process.o
obj-y += i387.o xsave.o
obj-y += ptrace.o
@@ -48,7 +47,6 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += cpu/
obj-y += acpi/
obj-y += reboot.o
-obj-$(CONFIG_X86_32) += reboot_32.o
obj-$(CONFIG_X86_MSR) += msr.o
obj-$(CONFIG_X86_CPUID) += cpuid.o
obj-$(CONFIG_PCI) += early-quirks.o
@@ -100,6 +98,7 @@ obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
obj-$(CONFIG_OF) += devicetree.o
+obj-$(CONFIG_UPROBES) += uprobes.o
###
# 64 bit specific files
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index 6f35260bb3ef..163b22581472 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -1,14 +1,7 @@
-subdir- := realmode
-
obj-$(CONFIG_ACPI) += boot.o
-obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_rm.o wakeup_$(BITS).o
+obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += cstate.o
endif
-$(obj)/wakeup_rm.o: $(obj)/realmode/wakeup.bin
-
-$(obj)/realmode/wakeup.bin: FORCE
- $(Q)$(MAKE) $(build)=$(obj)/realmode
-
diff --git a/arch/x86/kernel/acpi/realmode/.gitignore b/arch/x86/kernel/acpi/realmode/.gitignore
deleted file mode 100644
index 58f1f48a58f8..000000000000
--- a/arch/x86/kernel/acpi/realmode/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-wakeup.bin
-wakeup.elf
-wakeup.lds
diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
deleted file mode 100644
index 6a564ac67ef5..000000000000
--- a/arch/x86/kernel/acpi/realmode/Makefile
+++ /dev/null
@@ -1,59 +0,0 @@
-#
-# arch/x86/kernel/acpi/realmode/Makefile
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-
-always := wakeup.bin
-targets := wakeup.elf wakeup.lds
-
-wakeup-y += wakeup.o wakemain.o video-mode.o copy.o bioscall.o regs.o
-
-# The link order of the video-*.o modules can matter. In particular,
-# video-vga.o *must* be listed first, followed by video-vesa.o.
-# Hardware-specific drivers should follow in the order they should be
-# probed, and video-bios.o should typically be last.
-wakeup-y += video-vga.o
-wakeup-y += video-vesa.o
-wakeup-y += video-bios.o
-
-targets += $(wakeup-y)
-
-bootsrc := $(src)/../../../boot
-
-# ---------------------------------------------------------------------------
-
-# How to compile the 16-bit code. Note we always compile for -march=i386,
-# that way we can complain to the user if the CPU is insufficient.
-# Compile with _SETUP since this is similar to the boot-time setup code.
-KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
- -I$(srctree)/$(bootsrc) \
- $(cflags-y) \
- -Wall -Wstrict-prototypes \
- -march=i386 -mregparm=3 \
- -include $(srctree)/$(bootsrc)/code16gcc.h \
- -fno-strict-aliasing -fomit-frame-pointer \
- $(call cc-option, -ffreestanding) \
- $(call cc-option, -fno-toplevel-reorder,\
- $(call cc-option, -fno-unit-at-a-time)) \
- $(call cc-option, -fno-stack-protector) \
- $(call cc-option, -mpreferred-stack-boundary=2)
-KBUILD_CFLAGS += $(call cc-option, -m32)
-KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
-GCOV_PROFILE := n
-
-WAKEUP_OBJS = $(addprefix $(obj)/,$(wakeup-y))
-
-LDFLAGS_wakeup.elf := -T
-
-CPPFLAGS_wakeup.lds += -P -C
-
-$(obj)/wakeup.elf: $(obj)/wakeup.lds $(WAKEUP_OBJS) FORCE
- $(call if_changed,ld)
-
-OBJCOPYFLAGS_wakeup.bin := -O binary
-
-$(obj)/wakeup.bin: $(obj)/wakeup.elf FORCE
- $(call if_changed,objcopy)
diff --git a/arch/x86/kernel/acpi/realmode/bioscall.S b/arch/x86/kernel/acpi/realmode/bioscall.S
deleted file mode 100644
index f51eb0bb56ce..000000000000
--- a/arch/x86/kernel/acpi/realmode/bioscall.S
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/bioscall.S"
diff --git a/arch/x86/kernel/acpi/realmode/copy.S b/arch/x86/kernel/acpi/realmode/copy.S
deleted file mode 100644
index dc59ebee69d8..000000000000
--- a/arch/x86/kernel/acpi/realmode/copy.S
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/copy.S"
diff --git a/arch/x86/kernel/acpi/realmode/regs.c b/arch/x86/kernel/acpi/realmode/regs.c
deleted file mode 100644
index 6206033ba202..000000000000
--- a/arch/x86/kernel/acpi/realmode/regs.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/regs.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-bios.c b/arch/x86/kernel/acpi/realmode/video-bios.c
deleted file mode 100644
index 7deabc144a27..000000000000
--- a/arch/x86/kernel/acpi/realmode/video-bios.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/video-bios.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-mode.c b/arch/x86/kernel/acpi/realmode/video-mode.c
deleted file mode 100644
index 328ad209f113..000000000000
--- a/arch/x86/kernel/acpi/realmode/video-mode.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/video-mode.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-vesa.c b/arch/x86/kernel/acpi/realmode/video-vesa.c
deleted file mode 100644
index 9dbb9672226a..000000000000
--- a/arch/x86/kernel/acpi/realmode/video-vesa.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/video-vesa.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-vga.c b/arch/x86/kernel/acpi/realmode/video-vga.c
deleted file mode 100644
index bcc81255f374..000000000000
--- a/arch/x86/kernel/acpi/realmode/video-vga.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/video-vga.c"
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/arch/x86/kernel/acpi/realmode/wakeup.lds.S
deleted file mode 100644
index d4f8010a5b1b..000000000000
--- a/arch/x86/kernel/acpi/realmode/wakeup.lds.S
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * wakeup.ld
- *
- * Linker script for the real-mode wakeup code
- */
-#undef i386
-#include "wakeup.h"
-
-OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
-OUTPUT_ARCH(i386)
-ENTRY(_start)
-
-SECTIONS
-{
- . = 0;
- .jump : {
- *(.jump)
- } = 0x90909090
-
- . = WAKEUP_HEADER_OFFSET;
- .header : {
- *(.header)
- }
-
- . = ALIGN(16);
- .text : {
- *(.text*)
- } = 0x90909090
-
- . = ALIGN(16);
- .rodata : {
- *(.rodata*)
- }
-
- .videocards : {
- video_cards = .;
- *(.videocards)
- video_cards_end = .;
- }
-
- . = ALIGN(16);
- .data : {
- *(.data*)
- }
-
- . = ALIGN(16);
- .bss : {
- __bss_start = .;
- *(.bss)
- __bss_end = .;
- }
-
- .signature : {
- *(.signature)
- }
-
- _end = .;
-
- /DISCARD/ : {
- *(.note*)
- }
-}
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 146a49c763a4..95bf99de9058 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -14,8 +14,9 @@
#include <asm/desc.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
+#include <asm/realmode.h>
-#include "realmode/wakeup.h"
+#include "../../realmode/rm/wakeup.h"
#include "sleep.h"
unsigned long acpi_realmode_flags;
@@ -36,13 +37,9 @@ asmlinkage void acpi_enter_s3(void)
*/
int acpi_suspend_lowlevel(void)
{
- struct wakeup_header *header;
- /* address in low memory of the wakeup routine. */
- char *acpi_realmode;
+ struct wakeup_header *header =
+ (struct wakeup_header *) __va(real_mode_header->wakeup_header);
- acpi_realmode = TRAMPOLINE_SYM(acpi_wakeup_code);
-
- header = (struct wakeup_header *)(acpi_realmode + WAKEUP_HEADER_OFFSET);
if (header->signature != WAKEUP_HEADER_SIGNATURE) {
printk(KERN_ERR "wakeup header does not match\n");
return -EINVAL;
@@ -50,27 +47,6 @@ int acpi_suspend_lowlevel(void)
header->video_mode = saved_video_mode;
- header->wakeup_jmp_seg = acpi_wakeup_address >> 4;
-
- /*
- * Set up the wakeup GDT. We set these up as Big Real Mode,
- * that is, with limits set to 4 GB. At least the Lenovo
- * Thinkpad X61 is known to need this for the video BIOS
- * initialization quirk to work; this is likely to also
- * be the case for other laptops or integrated video devices.
- */
-
- /* GDT[0]: GDT self-pointer */
- header->wakeup_gdt[0] =
- (u64)(sizeof(header->wakeup_gdt) - 1) +
- ((u64)__pa(&header->wakeup_gdt) << 16);
- /* GDT[1]: big real mode-like code segment */
- header->wakeup_gdt[1] =
- GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff);
- /* GDT[2]: big real mode-like data segment */
- header->wakeup_gdt[2] =
- GDT_ENTRY(0x8093, acpi_wakeup_address, 0xfffff);
-
#ifndef CONFIG_64BIT
store_gdt((struct desc_ptr *)&header->pmode_gdt);
@@ -95,7 +71,6 @@ int acpi_suspend_lowlevel(void)
header->pmode_cr3 = (u32)__pa(&initial_page_table);
saved_magic = 0x12345678;
#else /* CONFIG_64BIT */
- header->trampoline_segment = trampoline_address() >> 4;
#ifdef CONFIG_SMP
stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
early_gdt_descr.address =
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index d68677a2a010..5653a5791ec9 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -2,8 +2,8 @@
* Variables and functions used by the code in sleep.c
*/
-#include <asm/trampoline.h>
#include <linux/linkage.h>
+#include <asm/realmode.h>
extern unsigned long saved_video_mode;
extern long saved_magic;
diff --git a/arch/x86/kernel/acpi/wakeup_rm.S b/arch/x86/kernel/acpi/wakeup_rm.S
deleted file mode 100644
index 63b8ab524f2c..000000000000
--- a/arch/x86/kernel/acpi/wakeup_rm.S
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Wrapper script for the realmode binary as a transport object
- * before copying to low memory.
- */
-#include <asm/page_types.h>
-
- .section ".x86_trampoline","a"
- .balign PAGE_SIZE
- .globl acpi_wakeup_code
-acpi_wakeup_code:
- .incbin "arch/x86/kernel/acpi/realmode/wakeup.bin"
- .size acpi_wakeup_code, .-acpi_wakeup_code
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c
index 507ea58688e2..cd8b166a1735 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-apei.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c
@@ -42,7 +42,8 @@ void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err)
struct mce m;
/* Only corrected MC is reported */
- if (!corrected)
+ if (!corrected || !(mem_err->validation_bits &
+ CPER_MEM_VALID_PHYSICAL_ADDRESS))
return;
mce_setup(&m);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 0c82091b1652..413c2ced887c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -126,6 +126,16 @@ static struct severity {
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
USER
),
+ MCESEV(
+ KEEP, "HT thread notices Action required: instruction fetch error",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
+ MCGMASK(MCG_STATUS_EIPV, 0)
+ ),
+ MCESEV(
+ AR, "Action required: instruction fetch error",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
+ USER
+ ),
#endif
MCESEV(
PANIC, "Action required: unknown MCACOD",
@@ -165,15 +175,19 @@ static struct severity {
};
/*
- * If the EIPV bit is set, it means the saved IP is the
- * instruction which caused the MCE.
+ * If mcgstatus indicated that ip/cs on the stack were
+ * no good, then "m->cs" will be zero and we will have
+ * to assume the worst case (IN_KERNEL) as we actually
+ * have no idea what we were executing when the machine
+ * check hit.
+ * If we do have a good "m->cs" (or a faked one in the
+ * case we were executing in VM86 mode) we can use it to
+ * distinguish an exception taken in user from from one
+ * taken in the kernel.
*/
static int error_context(struct mce *m)
{
- if (m->mcgstatus & MCG_STATUS_EIPV)
- return (m->ip && (m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
- /* Unknown, assume kernel */
- return IN_KERNEL;
+ return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
}
int mce_severity(struct mce *m, int tolerant, char **msg)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 2afcbd253e1d..0a687fd185e6 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -437,6 +437,14 @@ static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
m->ip = regs->ip;
m->cs = regs->cs;
+
+ /*
+ * When in VM86 mode make the cs look like ring 3
+ * always. This is a lie, but it's better than passing
+ * the additional vm86 bit around everywhere.
+ */
+ if (v8086_mode(regs))
+ m->cs |= 3;
}
/* Use accurate RIP reporting if available. */
if (rip_msr)
@@ -641,16 +649,18 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
* Do a quick check if any of the events requires a panic.
* This decides if we keep the events around or clear them.
*/
-static int mce_no_way_out(struct mce *m, char **msg)
+static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp)
{
- int i;
+ int i, ret = 0;
for (i = 0; i < banks; i++) {
m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
+ if (m->status & MCI_STATUS_VAL)
+ __set_bit(i, validp);
if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
- return 1;
+ ret = 1;
}
- return 0;
+ return ret;
}
/*
@@ -1013,6 +1023,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
*/
int kill_it = 0;
DECLARE_BITMAP(toclear, MAX_NR_BANKS);
+ DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
char *msg = "Unknown";
atomic_inc(&mce_entry);
@@ -1027,7 +1038,8 @@ void do_machine_check(struct pt_regs *regs, long error_code)
final = &__get_cpu_var(mces_seen);
*final = m;
- no_way_out = mce_no_way_out(&m, &msg);
+ memset(valid_banks, 0, sizeof(valid_banks));
+ no_way_out = mce_no_way_out(&m, &msg, valid_banks);
barrier();
@@ -1047,6 +1059,8 @@ void do_machine_check(struct pt_regs *regs, long error_code)
order = mce_start(&no_way_out);
for (i = 0; i < banks; i++) {
__clear_bit(i, toclear);
+ if (!test_bit(i, valid_banks))
+ continue;
if (!mce_banks[i].ctl)
continue;
@@ -1237,15 +1251,15 @@ void mce_log_therm_throt_event(__u64 status)
* poller finds an MCE, poll 2x faster. When the poller finds no more
* errors, poll 2x slower (up to check_interval seconds).
*/
-static int check_interval = 5 * 60; /* 5 minutes */
+static unsigned long check_interval = 5 * 60; /* 5 minutes */
-static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
+static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
static DEFINE_PER_CPU(struct timer_list, mce_timer);
-static void mce_start_timer(unsigned long data)
+static void mce_timer_fn(unsigned long data)
{
- struct timer_list *t = &per_cpu(mce_timer, data);
- int *n;
+ struct timer_list *t = &__get_cpu_var(mce_timer);
+ unsigned long iv;
WARN_ON(smp_processor_id() != data);
@@ -1258,13 +1272,14 @@ static void mce_start_timer(unsigned long data)
* Alert userspace if needed. If we logged an MCE, reduce the
* polling interval, otherwise increase the polling interval.
*/
- n = &__get_cpu_var(mce_next_interval);
+ iv = __this_cpu_read(mce_next_interval);
if (mce_notify_irq())
- *n = max(*n/2, HZ/100);
+ iv = max(iv, (unsigned long) HZ/100);
else
- *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
+ iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
+ __this_cpu_write(mce_next_interval, iv);
- t->expires = jiffies + *n;
+ t->expires = jiffies + iv;
add_timer_on(t, smp_processor_id());
}
@@ -1458,9 +1473,9 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
rdmsrl(msrs[i], val);
/* CntP bit set? */
- if (val & BIT(62)) {
- val &= ~BIT(62);
- wrmsrl(msrs[i], val);
+ if (val & BIT_64(62)) {
+ val &= ~BIT_64(62);
+ wrmsrl(msrs[i], val);
}
}
@@ -1542,17 +1557,17 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
static void __mcheck_cpu_init_timer(void)
{
struct timer_list *t = &__get_cpu_var(mce_timer);
- int *n = &__get_cpu_var(mce_next_interval);
+ unsigned long iv = __this_cpu_read(mce_next_interval);
- setup_timer(t, mce_start_timer, smp_processor_id());
+ setup_timer(t, mce_timer_fn, smp_processor_id());
if (mce_ignore_ce)
return;
- *n = check_interval * HZ;
- if (!*n)
+ __this_cpu_write(mce_next_interval, iv);
+ if (!iv)
return;
- t->expires = round_jiffies(jiffies + *n);
+ t->expires = round_jiffies(jiffies + iv);
add_timer_on(t, smp_processor_id());
}
@@ -2262,7 +2277,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_DOWN_FAILED_FROZEN:
if (!mce_ignore_ce && check_interval) {
t->expires = round_jiffies(jiffies +
- __get_cpu_var(mce_next_interval));
+ per_cpu(mce_next_interval, cpu));
add_timer_on(t, cpu);
}
smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index ac140c7be396..bdda2e6c673b 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -266,7 +266,7 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
if (align > max_align)
align = max_align;
- sizek = 1 << align;
+ sizek = 1UL << align;
if (debug_print) {
char start_factor = 'K', size_factor = 'K';
unsigned long start_base, size_base;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 62d61e9976eb..41857970517f 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -113,7 +113,9 @@ static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size,
int x = e820x->nr_map;
if (x >= ARRAY_SIZE(e820x->map)) {
- printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+ printk(KERN_ERR "e820: too many entries; ignoring [mem %#010llx-%#010llx]\n",
+ (unsigned long long) start,
+ (unsigned long long) (start + size - 1));
return;
}
@@ -133,19 +135,19 @@ static void __init e820_print_type(u32 type)
switch (type) {
case E820_RAM:
case E820_RESERVED_KERN:
- printk(KERN_CONT "(usable)");
+ printk(KERN_CONT "usable");
break;
case E820_RESERVED:
- printk(KERN_CONT "(reserved)");
+ printk(KERN_CONT "reserved");
break;
case E820_ACPI:
- printk(KERN_CONT "(ACPI data)");
+ printk(KERN_CONT "ACPI data");
break;
case E820_NVS:
- printk(KERN_CONT "(ACPI NVS)");
+ printk(KERN_CONT "ACPI NVS");
break;
case E820_UNUSABLE:
- printk(KERN_CONT "(unusable)");
+ printk(KERN_CONT "unusable");
break;
default:
printk(KERN_CONT "type %u", type);
@@ -158,10 +160,10 @@ void __init e820_print_map(char *who)
int i;
for (i = 0; i < e820.nr_map; i++) {
- printk(KERN_INFO " %s: %016Lx - %016Lx ", who,
+ printk(KERN_INFO "%s: [mem %#018Lx-%#018Lx] ", who,
(unsigned long long) e820.map[i].addr,
(unsigned long long)
- (e820.map[i].addr + e820.map[i].size));
+ (e820.map[i].addr + e820.map[i].size - 1));
e820_print_type(e820.map[i].type);
printk(KERN_CONT "\n");
}
@@ -428,9 +430,8 @@ static u64 __init __e820_update_range(struct e820map *e820x, u64 start,
size = ULLONG_MAX - start;
end = start + size;
- printk(KERN_DEBUG "e820 update range: %016Lx - %016Lx ",
- (unsigned long long) start,
- (unsigned long long) end);
+ printk(KERN_DEBUG "e820: update [mem %#010Lx-%#010Lx] ",
+ (unsigned long long) start, (unsigned long long) (end - 1));
e820_print_type(old_type);
printk(KERN_CONT " ==> ");
e820_print_type(new_type);
@@ -509,9 +510,8 @@ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
size = ULLONG_MAX - start;
end = start + size;
- printk(KERN_DEBUG "e820 remove range: %016Lx - %016Lx ",
- (unsigned long long) start,
- (unsigned long long) end);
+ printk(KERN_DEBUG "e820: remove [mem %#010Lx-%#010Lx] ",
+ (unsigned long long) start, (unsigned long long) (end - 1));
if (checktype)
e820_print_type(old_type);
printk(KERN_CONT "\n");
@@ -567,7 +567,7 @@ void __init update_e820(void)
if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
return;
e820.nr_map = nr_map;
- printk(KERN_INFO "modified physical RAM map:\n");
+ printk(KERN_INFO "e820: modified physical RAM map:\n");
e820_print_map("modified");
}
static void __init update_e820_saved(void)
@@ -637,8 +637,8 @@ __init void e820_setup_gap(void)
if (!found) {
gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024;
printk(KERN_ERR
- "PCI: Warning: Cannot find a gap in the 32bit address range\n"
- "PCI: Unassigned devices with 32bit resource registers may break!\n");
+ "e820: cannot find a gap in the 32bit address range\n"
+ "e820: PCI devices with unassigned 32bit BARs may break!\n");
}
#endif
@@ -648,8 +648,8 @@ __init void e820_setup_gap(void)
pci_mem_start = gapstart;
printk(KERN_INFO
- "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
- pci_mem_start, gapstart, gapsize);
+ "e820: [mem %#010lx-%#010lx] available for PCI devices\n",
+ gapstart, gapstart + gapsize - 1);
}
/**
@@ -667,7 +667,7 @@ void __init parse_e820_ext(struct setup_data *sdata)
extmap = (struct e820entry *)(sdata->data);
__append_e820_map(extmap, entries);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
- printk(KERN_INFO "extended physical RAM map:\n");
+ printk(KERN_INFO "e820: extended physical RAM map:\n");
e820_print_map("extended");
}
@@ -734,7 +734,7 @@ u64 __init early_reserve_e820(u64 size, u64 align)
addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
if (addr) {
e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED);
- printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
+ printk(KERN_INFO "e820: update e820_saved for early_reserve_e820\n");
update_e820_saved();
}
@@ -784,7 +784,7 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
if (last_pfn > max_arch_pfn)
last_pfn = max_arch_pfn;
- printk(KERN_INFO "last_pfn = %#lx max_arch_pfn = %#lx\n",
+ printk(KERN_INFO "e820: last_pfn = %#lx max_arch_pfn = %#lx\n",
last_pfn, max_arch_pfn);
return last_pfn;
}
@@ -888,7 +888,7 @@ void __init finish_e820_parsing(void)
early_panic("Invalid user supplied memory map");
e820.nr_map = nr;
- printk(KERN_INFO "user-defined physical RAM map:\n");
+ printk(KERN_INFO "e820: user-defined physical RAM map:\n");
e820_print_map("user");
}
}
@@ -996,8 +996,9 @@ void __init e820_reserve_resources_late(void)
end = MAX_RESOURCE_SIZE;
if (start >= end)
continue;
- printk(KERN_DEBUG "reserve RAM buffer: %016llx - %016llx ",
- start, end);
+ printk(KERN_DEBUG
+ "e820: reserve RAM buffer [mem %#010llx-%#010llx]\n",
+ start, end);
reserve_region_with_split(&iomem_resource, start, end,
"RAM buffer");
}
@@ -1047,7 +1048,7 @@ void __init setup_memory_map(void)
who = x86_init.resources.memory_setup();
memcpy(&e820_saved, &e820, sizeof(struct e820map));
- printk(KERN_INFO "BIOS-provided physical RAM map:\n");
+ printk(KERN_INFO "e820: BIOS-provided physical RAM map:\n");
e820_print_map(who);
}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 51ff18616d50..c18f59d10101 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -14,7 +14,6 @@
#include <asm/sections.h>
#include <asm/e820.h>
#include <asm/page.h>
-#include <asm/trampoline.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/bios_ebda.h>
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 3a3b779f41d3..037df57a99ac 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -24,7 +24,6 @@
#include <asm/sections.h>
#include <asm/kdebug.h>
#include <asm/e820.h>
-#include <asm/trampoline.h>
#include <asm/bios_ebda.h>
static void __init zap_identity_mappings(void)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 463c9797ca6a..d42ab17b7397 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -274,10 +274,7 @@ num_subarch_entries = (. - subarch_entries) / 4
* If cpu hotplug is not supported then this code can go in init section
* which will be freed later
*/
-
__CPUINIT
-
-#ifdef CONFIG_SMP
ENTRY(startup_32_smp)
cld
movl $(__BOOT_DS),%eax
@@ -288,7 +285,7 @@ ENTRY(startup_32_smp)
movl pa(stack_start),%ecx
movl %eax,%ss
leal -__PAGE_OFFSET(%ecx),%esp
-#endif /* CONFIG_SMP */
+
default_entry:
/*
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 7a40f2447321..94bf9cc2c7ee 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -139,10 +139,6 @@ ident_complete:
/* Fixup phys_base */
addq %rbp, phys_base(%rip)
- /* Fixup trampoline */
- addq %rbp, trampoline_level4_pgt + 0(%rip)
- addq %rbp, trampoline_level4_pgt + (511*8)(%rip)
-
/* Due to ENTRY(), sometimes the empty space gets filled with
* zeros. Better take a jmp than relying on empty space being
* filled with 0x90 (nop)
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index ad0de0c2714e..1460a5df92f7 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -94,13 +94,18 @@ static int hpet_verbose;
static int __init hpet_setup(char *str)
{
- if (str) {
+ while (str) {
+ char *next = strchr(str, ',');
+
+ if (next)
+ *next++ = 0;
if (!strncmp("disable", str, 7))
boot_hpet_disable = 1;
if (!strncmp("force", str, 5))
hpet_force_user = 1;
if (!strncmp("verbose", str, 7))
hpet_verbose = 1;
+ str = next;
}
return 1;
}
@@ -319,8 +324,6 @@ static void hpet_set_mode(enum clock_event_mode mode,
now = hpet_readl(HPET_COUNTER);
cmp = now + (unsigned int) delta;
cfg = hpet_readl(HPET_Tn_CFG(timer));
- /* Make sure we use edge triggered interrupts */
- cfg &= ~HPET_TN_LEVEL;
cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
HPET_TN_SETVAL | HPET_TN_32BIT;
hpet_writel(cfg, HPET_Tn_CFG(timer));
@@ -787,15 +790,16 @@ static int hpet_clocksource_register(void)
return 0;
}
+static u32 *hpet_boot_cfg;
+
/**
* hpet_enable - Try to setup the HPET timer. Returns 1 on success.
*/
int __init hpet_enable(void)
{
- unsigned long hpet_period;
- unsigned int id;
+ u32 hpet_period, cfg, id;
u64 freq;
- int i;
+ unsigned int i, last;
if (!is_hpet_capable())
return 0;
@@ -847,15 +851,45 @@ int __init hpet_enable(void)
id = hpet_readl(HPET_ID);
hpet_print_config();
+ last = (id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
+
#ifdef CONFIG_HPET_EMULATE_RTC
/*
* The legacy routing mode needs at least two channels, tick timer
* and the rtc emulation channel.
*/
- if (!(id & HPET_ID_NUMBER))
+ if (!last)
goto out_nohpet;
#endif
+ cfg = hpet_readl(HPET_CFG);
+ hpet_boot_cfg = kmalloc((last + 2) * sizeof(*hpet_boot_cfg),
+ GFP_KERNEL);
+ if (hpet_boot_cfg)
+ *hpet_boot_cfg = cfg;
+ else
+ pr_warn("HPET initial state will not be saved\n");
+ cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
+ hpet_writel(cfg, HPET_CFG);
+ if (cfg)
+ pr_warn("HPET: Unrecognized bits %#x set in global cfg\n",
+ cfg);
+
+ for (i = 0; i <= last; ++i) {
+ cfg = hpet_readl(HPET_Tn_CFG(i));
+ if (hpet_boot_cfg)
+ hpet_boot_cfg[i + 1] = cfg;
+ cfg &= ~(HPET_TN_ENABLE | HPET_TN_LEVEL | HPET_TN_FSB);
+ hpet_writel(cfg, HPET_Tn_CFG(i));
+ cfg &= ~(HPET_TN_PERIODIC | HPET_TN_PERIODIC_CAP
+ | HPET_TN_64BIT_CAP | HPET_TN_32BIT | HPET_TN_ROUTE
+ | HPET_TN_FSB | HPET_TN_FSB_CAP);
+ if (cfg)
+ pr_warn("HPET: Unrecognized bits %#x set in cfg#%u\n",
+ cfg, i);
+ }
+ hpet_print_config();
+
if (hpet_clocksource_register())
goto out_nohpet;
@@ -923,14 +957,28 @@ fs_initcall(hpet_late_init);
void hpet_disable(void)
{
if (is_hpet_capable() && hpet_virt_address) {
- unsigned int cfg = hpet_readl(HPET_CFG);
+ unsigned int cfg = hpet_readl(HPET_CFG), id, last;
- if (hpet_legacy_int_enabled) {
+ if (hpet_boot_cfg)
+ cfg = *hpet_boot_cfg;
+ else if (hpet_legacy_int_enabled) {
cfg &= ~HPET_CFG_LEGACY;
hpet_legacy_int_enabled = 0;
}
cfg &= ~HPET_CFG_ENABLE;
hpet_writel(cfg, HPET_CFG);
+
+ if (!hpet_boot_cfg)
+ return;
+
+ id = hpet_readl(HPET_ID);
+ last = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
+
+ for (id = 0; id <= last; ++id)
+ hpet_writel(hpet_boot_cfg[id + 1], HPET_Tn_CFG(id));
+
+ if (*hpet_boot_cfg & HPET_CFG_ENABLE)
+ hpet_writel(*hpet_boot_cfg, HPET_CFG);
}
}
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index f8492da65bfc..086eb58c6e80 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -22,6 +22,7 @@
#include <asm/msr.h>
#include <asm/apic.h>
#include <linux/percpu.h>
+#include <linux/hardirq.h>
#include <asm/x86_init.h>
#include <asm/reboot.h>
@@ -114,6 +115,25 @@ static void kvm_get_preset_lpj(void)
preset_lpj = lpj;
}
+bool kvm_check_and_clear_guest_paused(void)
+{
+ bool ret = false;
+ struct pvclock_vcpu_time_info *src;
+
+ /*
+ * per_cpu() is safe here because this function is only called from
+ * timer functions where preemption is already disabled.
+ */
+ WARN_ON(!in_atomic());
+ src = &__get_cpu_var(hv_clock);
+ if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
+ __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
+ ret = true;
+ }
+
+ return ret;
+}
+
static struct clocksource kvm_clock = {
.name = "kvm-clock",
.read = kvm_clock_get_cycles,
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index b02d4dd6b8a3..d2b56489d70f 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -27,7 +27,6 @@
#include <asm/proto.h>
#include <asm/bios_ebda.h>
#include <asm/e820.h>
-#include <asm/trampoline.h>
#include <asm/setup.h>
#include <asm/smp.h>
@@ -568,8 +567,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
struct mpf_intel *mpf;
unsigned long mem;
- apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
- bp, length);
+ apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
+ base, base + length - 1);
BUILD_BUG_ON(sizeof(*mpf) != 16);
while (length > 0) {
@@ -584,8 +583,10 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
#endif
mpf_found = mpf;
- printk(KERN_INFO "found SMP MP-table at [%p] %llx\n",
- mpf, (u64)virt_to_phys(mpf));
+ printk(KERN_INFO "found SMP MP-table at [mem %#010llx-%#010llx] mapped at [%p]\n",
+ (unsigned long long) virt_to_phys(mpf),
+ (unsigned long long) virt_to_phys(mpf) +
+ sizeof(*mpf) - 1, mpf);
mem = virt_to_phys(mpf);
memblock_reserve(mem, sizeof(*mpf));
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 3003250ac51d..62c9457ccd2f 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -100,14 +100,18 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
struct dma_attrs *attrs)
{
unsigned long dma_mask;
- struct page *page;
+ struct page *page = NULL;
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
dma_addr_t addr;
dma_mask = dma_alloc_coherent_mask(dev, flag);
flag |= __GFP_ZERO;
again:
- page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
+ if (!(flag & GFP_ATOMIC))
+ page = dma_alloc_from_contiguous(dev, count, get_order(size));
+ if (!page)
+ page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
if (!page)
return NULL;
@@ -127,6 +131,16 @@ again:
return page_address(page);
}
+void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_addr, struct dma_attrs *attrs)
+{
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct page *page = virt_to_page(vaddr);
+
+ if (!dma_release_from_contiguous(dev, page, count))
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+
/*
* See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
* parameter documentation.
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index f96050685b46..871be4a84c7d 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -74,12 +74,6 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
return nents;
}
-static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, struct dma_attrs *attrs)
-{
- free_pages((unsigned long)vaddr, get_order(size));
-}
-
static void nommu_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size,
enum dma_data_direction dir)
@@ -97,7 +91,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
struct dma_map_ops nommu_dma_ops = {
.alloc = dma_generic_alloc_coherent,
- .free = nommu_free_coherent,
+ .free = dma_generic_free_coherent,
.map_sg = nommu_map_sg,
.map_page = nommu_map_page,
.sync_single_for_device = nommu_sync_single_for_device,
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 77215c23fba1..79c45af81604 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -24,6 +24,7 @@
#ifdef CONFIG_X86_32
# include <linux/ctype.h>
# include <linux/mc146818rtc.h>
+# include <asm/realmode.h>
#else
# include <asm/x86_init.h>
#endif
@@ -156,15 +157,10 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
return 0;
}
-extern const unsigned char machine_real_restart_asm[];
-extern const u64 machine_real_restart_gdt[3];
-
void machine_real_restart(unsigned int type)
{
- void *restart_va;
- unsigned long restart_pa;
- void (*restart_lowmem)(unsigned int);
- u64 *lowmem_gdt;
+ void (*restart_lowmem)(unsigned int) = (void (*)(unsigned int))
+ real_mode_header->machine_real_restart_asm;
local_irq_disable();
@@ -195,21 +191,6 @@ void machine_real_restart(unsigned int type)
* too. */
*((unsigned short *)0x472) = reboot_mode;
- /* Patch the GDT in the low memory trampoline */
- lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
-
- restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
- restart_pa = virt_to_phys(restart_va);
- restart_lowmem = (void (*)(unsigned int))restart_pa;
-
- /* GDT[0]: GDT self-pointer */
- lowmem_gdt[0] =
- (u64)(sizeof(machine_real_restart_gdt) - 1) +
- ((u64)virt_to_phys(lowmem_gdt) << 16);
- /* GDT[1]: 64K real mode code segment */
- lowmem_gdt[1] =
- GDT_ENTRY(0x009b, restart_pa, 0xffff);
-
/* Jump to the identity-mapped low memory code */
restart_lowmem(type);
}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 366c688d619e..16be6dc14db1 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -49,6 +49,7 @@
#include <asm/pci-direct.h>
#include <linux/init_ohci1394_dma.h>
#include <linux/kvm_para.h>
+#include <linux/dma-contiguous.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -72,7 +73,7 @@
#include <asm/mtrr.h>
#include <asm/apic.h>
-#include <asm/trampoline.h>
+#include <asm/realmode.h>
#include <asm/e820.h>
#include <asm/mpspec.h>
#include <asm/setup.h>
@@ -333,8 +334,8 @@ static void __init relocate_initrd(void)
memblock_reserve(ramdisk_here, area_size);
initrd_start = ramdisk_here + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size;
- printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n",
- ramdisk_here, ramdisk_here + ramdisk_size);
+ printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
+ ramdisk_here, ramdisk_here + ramdisk_size - 1);
q = (char *)initrd_start;
@@ -365,8 +366,8 @@ static void __init relocate_initrd(void)
/* high pages is not converted by early_res_to_bootmem */
ramdisk_image = boot_params.hdr.ramdisk_image;
ramdisk_size = boot_params.hdr.ramdisk_size;
- printk(KERN_INFO "Move RAMDISK from %016llx - %016llx to"
- " %08llx - %08llx\n",
+ printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
+ " [mem %#010llx-%#010llx]\n",
ramdisk_image, ramdisk_image + ramdisk_size - 1,
ramdisk_here, ramdisk_here + ramdisk_size - 1);
}
@@ -391,8 +392,8 @@ static void __init reserve_initrd(void)
ramdisk_size, end_of_lowmem>>1);
}
- printk(KERN_INFO "RAMDISK: %08llx - %08llx\n", ramdisk_image,
- ramdisk_end);
+ printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
+ ramdisk_end - 1);
if (ramdisk_end <= end_of_lowmem) {
@@ -905,10 +906,10 @@ void __init setup_arch(char **cmdline_p)
setup_bios_corruption_check();
#endif
- printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n",
- max_pfn_mapped<<PAGE_SHIFT);
+ printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
+ (max_pfn_mapped<<PAGE_SHIFT) - 1);
- setup_trampolines();
+ setup_real_mode();
init_gbpages();
@@ -925,6 +926,7 @@ void __init setup_arch(char **cmdline_p)
}
#endif
memblock.current_limit = get_max_mapped();
+ dma_contiguous_reserve(0);
/*
* NOTE: On x86-32, only from this point on, fixmaps are ready for use.
@@ -966,6 +968,8 @@ void __init setup_arch(char **cmdline_p)
if (boot_cpu_data.cpuid_level >= 0) {
/* A CPU has %cr4 if and only if it has CPUID */
mmu_cr4_features = read_cr4();
+ if (trampoline_cr4_features)
+ *trampoline_cr4_features = mmu_cr4_features;
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 9363b58b967c..2e937a5ad531 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -18,6 +18,7 @@
#include <linux/personality.h>
#include <linux/uaccess.h>
#include <linux/user-return-notifier.h>
+#include <linux/uprobes.h>
#include <asm/processor.h>
#include <asm/ucontext.h>
@@ -814,6 +815,11 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
mce_notify_process();
#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
+ if (thread_info_flags & _TIF_UPROBE) {
+ clear_thread_flag(TIF_UPROBE);
+ uprobe_notify_resume(regs);
+ }
+
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 433529e29be4..f56f96da77f5 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -57,7 +57,7 @@
#include <asm/nmi.h>
#include <asm/irq.h>
#include <asm/idle.h>
-#include <asm/trampoline.h>
+#include <asm/realmode.h>
#include <asm/cpu.h>
#include <asm/numa.h>
#include <asm/pgtable.h>
@@ -73,6 +73,8 @@
#include <asm/smpboot_hooks.h>
#include <asm/i8259.h>
+#include <asm/realmode.h>
+
/* State of each CPU */
DEFINE_PER_CPU(int, cpu_state) = { 0 };
@@ -660,8 +662,12 @@ static void __cpuinit announce_cpu(int cpu, int apicid)
*/
static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
{
+ volatile u32 *trampoline_status =
+ (volatile u32 *) __va(real_mode_header->trampoline_status);
+ /* start_ip had better be page-aligned! */
+ unsigned long start_ip = real_mode_header->trampoline_start;
+
unsigned long boot_error = 0;
- unsigned long start_ip;
int timeout;
alternatives_smp_switch(1);
@@ -684,9 +690,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
initial_code = (unsigned long)start_secondary;
stack_start = idle->thread.sp;
- /* start_ip had better be page-aligned! */
- start_ip = trampoline_address();
-
/* So we see what's up */
announce_cpu(cpu, apicid);
@@ -749,8 +752,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
pr_debug("CPU%d: has booted.\n", cpu);
} else {
boot_error = 1;
- if (*(volatile u32 *)TRAMPOLINE_SYM(trampoline_status)
- == 0xA5A5A5A5)
+ if (*trampoline_status == 0xA5A5A5A5)
/* trampoline started but...? */
pr_err("CPU%d: Stuck ??\n", cpu);
else
@@ -776,7 +778,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
}
/* mark "stuck" area as not stuck */
- *(volatile u32 *)TRAMPOLINE_SYM(trampoline_status) = 0;
+ *trampoline_status = 0;
if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
/*
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 6410744ac5cb..f84fe00fad48 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -32,7 +32,7 @@
#include <linux/mm.h>
#include <linux/tboot.h>
-#include <asm/trampoline.h>
+#include <asm/realmode.h>
#include <asm/processor.h>
#include <asm/bootparam.h>
#include <asm/pgtable.h>
@@ -44,7 +44,7 @@
#include <asm/e820.h>
#include <asm/io.h>
-#include "acpi/realmode/wakeup.h"
+#include "../realmode/rm/wakeup.h"
/* Global pointer to shared data; NULL means no measured launch. */
struct tboot *tboot __read_mostly;
@@ -201,7 +201,8 @@ static int tboot_setup_sleep(void)
add_mac_region(e820.map[i].addr, e820.map[i].size);
}
- tboot->acpi_sinfo.kernel_s3_resume_vector = acpi_wakeup_address;
+ tboot->acpi_sinfo.kernel_s3_resume_vector =
+ real_mode_header->wakeup_start;
return 0;
}
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
deleted file mode 100644
index a73b61055ad6..000000000000
--- a/arch/x86/kernel/trampoline.c
+++ /dev/null
@@ -1,42 +0,0 @@
-#include <linux/io.h>
-#include <linux/memblock.h>
-
-#include <asm/trampoline.h>
-#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
-
-unsigned char *x86_trampoline_base;
-
-void __init setup_trampolines(void)
-{
- phys_addr_t mem;
- size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start);
-
- /* Has to be in very low memory so we can execute real-mode AP code. */
- mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
- if (!mem)
- panic("Cannot allocate trampoline\n");
-
- x86_trampoline_base = __va(mem);
- memblock_reserve(mem, size);
-
- printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
- x86_trampoline_base, (unsigned long long)mem, size);
-
- memcpy(x86_trampoline_base, x86_trampoline_start, size);
-}
-
-/*
- * setup_trampolines() gets called very early, to guarantee the
- * availability of low memory. This is before the proper kernel page
- * tables are set up, so we cannot set page permissions in that
- * function. Thus, we use an arch_initcall instead.
- */
-static int __init configure_trampolines(void)
-{
- size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start);
-
- set_memory_x((unsigned long)x86_trampoline_base, size >> PAGE_SHIFT);
- return 0;
-}
-arch_initcall(configure_trampolines);
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
deleted file mode 100644
index 451c0a7ef7fd..000000000000
--- a/arch/x86/kernel/trampoline_32.S
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- *
- * Trampoline.S Derived from Setup.S by Linus Torvalds
- *
- * 4 Jan 1997 Michael Chastain: changed to gnu as.
- *
- * This is only used for booting secondary CPUs in SMP machine
- *
- * Entry: CS:IP point to the start of our code, we are
- * in real mode with no stack, but the rest of the
- * trampoline page to make our stack and everything else
- * is a mystery.
- *
- * We jump into arch/x86/kernel/head_32.S.
- *
- * On entry to trampoline_data, the processor is in real mode
- * with 16-bit addressing and 16-bit data. CS has some value
- * and IP is zero. Thus, data addresses need to be absolute
- * (no relocation) and are taken with regard to r_base.
- *
- * If you work on this file, check the object module with
- * objdump --reloc to make sure there are no relocation
- * entries except for:
- *
- * TYPE VALUE
- * R_386_32 startup_32_smp
- * R_386_32 boot_gdt
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/segment.h>
-#include <asm/page_types.h>
-
-#ifdef CONFIG_SMP
-
- .section ".x86_trampoline","a"
- .balign PAGE_SIZE
- .code16
-
-ENTRY(trampoline_data)
-r_base = .
- wbinvd # Needed for NUMA-Q should be harmless for others
- mov %cs, %ax # Code and data in the same place
- mov %ax, %ds
-
- cli # We should be safe anyway
-
- movl $0xA5A5A5A5, trampoline_status - r_base
- # write marker for master knows we're running
-
- /* GDT tables in non default location kernel can be beyond 16MB and
- * lgdt will not be able to load the address as in real mode default
- * operand size is 16bit. Use lgdtl instead to force operand size
- * to 32 bit.
- */
-
- lidtl boot_idt_descr - r_base # load idt with 0, 0
- lgdtl boot_gdt_descr - r_base # load gdt with whatever is appropriate
-
- xor %ax, %ax
- inc %ax # protected mode (PE) bit
- lmsw %ax # into protected mode
- # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
-
- # These need to be in the same 64K segment as the above;
- # hence we don't use the boot_gdt_descr defined in head.S
-boot_gdt_descr:
- .word __BOOT_DS + 7 # gdt limit
- .long boot_gdt - __PAGE_OFFSET # gdt base
-
-boot_idt_descr:
- .word 0 # idt limit = 0
- .long 0 # idt base = 0L
-
-ENTRY(trampoline_status)
- .long 0
-
-.globl trampoline_end
-trampoline_end:
-
-#endif /* CONFIG_SMP */
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
new file mode 100644
index 000000000000..dc4e910a7d96
--- /dev/null
+++ b/arch/x86/kernel/uprobes.c
@@ -0,0 +1,674 @@
+/*
+ * User-space Probes (UProbes) for x86
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2008-2011
+ * Authors:
+ * Srikar Dronamraju
+ * Jim Keniston
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/uprobes.h>
+#include <linux/uaccess.h>
+
+#include <linux/kdebug.h>
+#include <asm/processor.h>
+#include <asm/insn.h>
+
+/* Post-execution fixups. */
+
+/* No fixup needed */
+#define UPROBE_FIX_NONE 0x0
+
+/* Adjust IP back to vicinity of actual insn */
+#define UPROBE_FIX_IP 0x1
+
+/* Adjust the return address of a call insn */
+#define UPROBE_FIX_CALL 0x2
+
+#define UPROBE_FIX_RIP_AX 0x8000
+#define UPROBE_FIX_RIP_CX 0x4000
+
+#define UPROBE_TRAP_NR UINT_MAX
+
+/* Adaptations for mhiramat x86 decoder v14. */
+#define OPCODE1(insn) ((insn)->opcode.bytes[0])
+#define OPCODE2(insn) ((insn)->opcode.bytes[1])
+#define OPCODE3(insn) ((insn)->opcode.bytes[2])
+#define MODRM_REG(insn) X86_MODRM_REG(insn->modrm.value)
+
+#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
+ (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
+ (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
+ (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
+ (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
+ << (row % 32))
+
+/*
+ * Good-instruction tables for 32-bit apps. This is non-const and volatile
+ * to keep gcc from statically optimizing it out, as variable_test_bit makes
+ * some versions of gcc to think only *(unsigned long*) is used.
+ */
+static volatile u32 good_insns_32[256 / 32] = {
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ /* ---------------------------------------------- */
+ W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 00 */
+ W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
+ W(0x20, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* 20 */
+ W(0x30, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) , /* 30 */
+ W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
+ W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
+ W(0x60, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
+ W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
+ W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
+ W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
+ W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
+ W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
+ W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
+ W(0xd0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
+ W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
+ W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
+ /* ---------------------------------------------- */
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+};
+
+/* Using this for both 64-bit and 32-bit apps */
+static volatile u32 good_2byte_insns[256 / 32] = {
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ /* ---------------------------------------------- */
+ W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */
+ W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
+ W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
+ W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
+ W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
+ W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
+ W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
+ W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
+ W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
+ W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
+ W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
+ W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
+ W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
+ W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
+ W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
+ W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */
+ /* ---------------------------------------------- */
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+};
+
+#ifdef CONFIG_X86_64
+/* Good-instruction tables for 64-bit apps */
+static volatile u32 good_insns_64[256 / 32] = {
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ /* ---------------------------------------------- */
+ W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 00 */
+ W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
+ W(0x20, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 20 */
+ W(0x30, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 30 */
+ W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
+ W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
+ W(0x60, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
+ W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
+ W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
+ W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
+ W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
+ W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
+ W(0xc0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
+ W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
+ W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
+ W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
+ /* ---------------------------------------------- */
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+};
+#endif
+#undef W
+
+/*
+ * opcodes we'll probably never support:
+ *
+ * 6c-6d, e4-e5, ec-ed - in
+ * 6e-6f, e6-e7, ee-ef - out
+ * cc, cd - int3, int
+ * cf - iret
+ * d6 - illegal instruction
+ * f1 - int1/icebp
+ * f4 - hlt
+ * fa, fb - cli, sti
+ * 0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2
+ *
+ * invalid opcodes in 64-bit mode:
+ *
+ * 06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5
+ * 63 - we support this opcode in x86_64 but not in i386.
+ *
+ * opcodes we may need to refine support for:
+ *
+ * 0f - 2-byte instructions: For many of these instructions, the validity
+ * depends on the prefix and/or the reg field. On such instructions, we
+ * just consider the opcode combination valid if it corresponds to any
+ * valid instruction.
+ *
+ * 8f - Group 1 - only reg = 0 is OK
+ * c6-c7 - Group 11 - only reg = 0 is OK
+ * d9-df - fpu insns with some illegal encodings
+ * f2, f3 - repnz, repz prefixes. These are also the first byte for
+ * certain floating-point instructions, such as addsd.
+ *
+ * fe - Group 4 - only reg = 0 or 1 is OK
+ * ff - Group 5 - only reg = 0-6 is OK
+ *
+ * others -- Do we need to support these?
+ *
+ * 0f - (floating-point?) prefetch instructions
+ * 07, 17, 1f - pop es, pop ss, pop ds
+ * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes --
+ * but 64 and 65 (fs: and gs:) seem to be used, so we support them
+ * 67 - addr16 prefix
+ * ce - into
+ * f0 - lock prefix
+ */
+
+/*
+ * TODO:
+ * - Where necessary, examine the modrm byte and allow only valid instructions
+ * in the different Groups and fpu instructions.
+ */
+
+static bool is_prefix_bad(struct insn *insn)
+{
+ int i;
+
+ for (i = 0; i < insn->prefixes.nbytes; i++) {
+ switch (insn->prefixes.bytes[i]) {
+ case 0x26: /* INAT_PFX_ES */
+ case 0x2E: /* INAT_PFX_CS */
+ case 0x36: /* INAT_PFX_DS */
+ case 0x3E: /* INAT_PFX_SS */
+ case 0xF0: /* INAT_PFX_LOCK */
+ return true;
+ }
+ }
+ return false;
+}
+
+static int validate_insn_32bits(struct arch_uprobe *auprobe, struct insn *insn)
+{
+ insn_init(insn, auprobe->insn, false);
+
+ /* Skip good instruction prefixes; reject "bad" ones. */
+ insn_get_opcode(insn);
+ if (is_prefix_bad(insn))
+ return -ENOTSUPP;
+
+ if (test_bit(OPCODE1(insn), (unsigned long *)good_insns_32))
+ return 0;
+
+ if (insn->opcode.nbytes == 2) {
+ if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+/*
+ * Figure out which fixups arch_uprobe_post_xol() will need to perform, and
+ * annotate arch_uprobe->fixups accordingly. To start with,
+ * arch_uprobe->fixups is either zero or it reflects rip-related fixups.
+ */
+static void prepare_fixups(struct arch_uprobe *auprobe, struct insn *insn)
+{
+ bool fix_ip = true, fix_call = false; /* defaults */
+ int reg;
+
+ insn_get_opcode(insn); /* should be a nop */
+
+ switch (OPCODE1(insn)) {
+ case 0xc3: /* ret/lret */
+ case 0xcb:
+ case 0xc2:
+ case 0xca:
+ /* ip is correct */
+ fix_ip = false;
+ break;
+ case 0xe8: /* call relative - Fix return addr */
+ fix_call = true;
+ break;
+ case 0x9a: /* call absolute - Fix return addr, not ip */
+ fix_call = true;
+ fix_ip = false;
+ break;
+ case 0xff:
+ insn_get_modrm(insn);
+ reg = MODRM_REG(insn);
+ if (reg == 2 || reg == 3) {
+ /* call or lcall, indirect */
+ /* Fix return addr; ip is correct. */
+ fix_call = true;
+ fix_ip = false;
+ } else if (reg == 4 || reg == 5) {
+ /* jmp or ljmp, indirect */
+ /* ip is correct. */
+ fix_ip = false;
+ }
+ break;
+ case 0xea: /* jmp absolute -- ip is correct */
+ fix_ip = false;
+ break;
+ default:
+ break;
+ }
+ if (fix_ip)
+ auprobe->fixups |= UPROBE_FIX_IP;
+ if (fix_call)
+ auprobe->fixups |= UPROBE_FIX_CALL;
+}
+
+#ifdef CONFIG_X86_64
+/*
+ * If arch_uprobe->insn doesn't use rip-relative addressing, return
+ * immediately. Otherwise, rewrite the instruction so that it accesses
+ * its memory operand indirectly through a scratch register. Set
+ * arch_uprobe->fixups and arch_uprobe->rip_rela_target_address
+ * accordingly. (The contents of the scratch register will be saved
+ * before we single-step the modified instruction, and restored
+ * afterward.)
+ *
+ * We do this because a rip-relative instruction can access only a
+ * relatively small area (+/- 2 GB from the instruction), and the XOL
+ * area typically lies beyond that area. At least for instructions
+ * that store to memory, we can't execute the original instruction
+ * and "fix things up" later, because the misdirected store could be
+ * disastrous.
+ *
+ * Some useful facts about rip-relative instructions:
+ *
+ * - There's always a modrm byte.
+ * - There's never a SIB byte.
+ * - The displacement is always 4 bytes.
+ */
+static void
+handle_riprel_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
+{
+ u8 *cursor;
+ u8 reg;
+
+ if (mm->context.ia32_compat)
+ return;
+
+ auprobe->rip_rela_target_address = 0x0;
+ if (!insn_rip_relative(insn))
+ return;
+
+ /*
+ * insn_rip_relative() would have decoded rex_prefix, modrm.
+ * Clear REX.b bit (extension of MODRM.rm field):
+ * we want to encode rax/rcx, not r8/r9.
+ */
+ if (insn->rex_prefix.nbytes) {
+ cursor = auprobe->insn + insn_offset_rex_prefix(insn);
+ *cursor &= 0xfe; /* Clearing REX.B bit */
+ }
+
+ /*
+ * Point cursor at the modrm byte. The next 4 bytes are the
+ * displacement. Beyond the displacement, for some instructions,
+ * is the immediate operand.
+ */
+ cursor = auprobe->insn + insn_offset_modrm(insn);
+ insn_get_length(insn);
+
+ /*
+ * Convert from rip-relative addressing to indirect addressing
+ * via a scratch register. Change the r/m field from 0x5 (%rip)
+ * to 0x0 (%rax) or 0x1 (%rcx), and squeeze out the offset field.
+ */
+ reg = MODRM_REG(insn);
+ if (reg == 0) {
+ /*
+ * The register operand (if any) is either the A register
+ * (%rax, %eax, etc.) or (if the 0x4 bit is set in the
+ * REX prefix) %r8. In any case, we know the C register
+ * is NOT the register operand, so we use %rcx (register
+ * #1) for the scratch register.
+ */
+ auprobe->fixups = UPROBE_FIX_RIP_CX;
+ /* Change modrm from 00 000 101 to 00 000 001. */
+ *cursor = 0x1;
+ } else {
+ /* Use %rax (register #0) for the scratch register. */
+ auprobe->fixups = UPROBE_FIX_RIP_AX;
+ /* Change modrm from 00 xxx 101 to 00 xxx 000 */
+ *cursor = (reg << 3);
+ }
+
+ /* Target address = address of next instruction + (signed) offset */
+ auprobe->rip_rela_target_address = (long)insn->length + insn->displacement.value;
+
+ /* Displacement field is gone; slide immediate field (if any) over. */
+ if (insn->immediate.nbytes) {
+ cursor++;
+ memmove(cursor, cursor + insn->displacement.nbytes, insn->immediate.nbytes);
+ }
+ return;
+}
+
+static int validate_insn_64bits(struct arch_uprobe *auprobe, struct insn *insn)
+{
+ insn_init(insn, auprobe->insn, true);
+
+ /* Skip good instruction prefixes; reject "bad" ones. */
+ insn_get_opcode(insn);
+ if (is_prefix_bad(insn))
+ return -ENOTSUPP;
+
+ if (test_bit(OPCODE1(insn), (unsigned long *)good_insns_64))
+ return 0;
+
+ if (insn->opcode.nbytes == 2) {
+ if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
+ return 0;
+ }
+ return -ENOTSUPP;
+}
+
+static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
+{
+ if (mm->context.ia32_compat)
+ return validate_insn_32bits(auprobe, insn);
+ return validate_insn_64bits(auprobe, insn);
+}
+#else /* 32-bit: */
+static void handle_riprel_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
+{
+ /* No RIP-relative addressing on 32-bit */
+}
+
+static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
+{
+ return validate_insn_32bits(auprobe, insn);
+}
+#endif /* CONFIG_X86_64 */
+
+/**
+ * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
+ * @mm: the probed address space.
+ * @arch_uprobe: the probepoint information.
+ * Return 0 on success or a -ve number on error.
+ */
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm)
+{
+ int ret;
+ struct insn insn;
+
+ auprobe->fixups = 0;
+ ret = validate_insn_bits(auprobe, mm, &insn);
+ if (ret != 0)
+ return ret;
+
+ handle_riprel_insn(auprobe, mm, &insn);
+ prepare_fixups(auprobe, &insn);
+
+ return 0;
+}
+
+#ifdef CONFIG_X86_64
+/*
+ * If we're emulating a rip-relative instruction, save the contents
+ * of the scratch register and store the target address in that register.
+ */
+static void
+pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs,
+ struct arch_uprobe_task *autask)
+{
+ if (auprobe->fixups & UPROBE_FIX_RIP_AX) {
+ autask->saved_scratch_register = regs->ax;
+ regs->ax = current->utask->vaddr;
+ regs->ax += auprobe->rip_rela_target_address;
+ } else if (auprobe->fixups & UPROBE_FIX_RIP_CX) {
+ autask->saved_scratch_register = regs->cx;
+ regs->cx = current->utask->vaddr;
+ regs->cx += auprobe->rip_rela_target_address;
+ }
+}
+#else
+static void
+pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs,
+ struct arch_uprobe_task *autask)
+{
+ /* No RIP-relative addressing on 32-bit */
+}
+#endif
+
+/*
+ * arch_uprobe_pre_xol - prepare to execute out of line.
+ * @auprobe: the probepoint information.
+ * @regs: reflects the saved user state of current task.
+ */
+int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct arch_uprobe_task *autask;
+
+ autask = &current->utask->autask;
+ autask->saved_trap_nr = current->thread.trap_nr;
+ current->thread.trap_nr = UPROBE_TRAP_NR;
+ regs->ip = current->utask->xol_vaddr;
+ pre_xol_rip_insn(auprobe, regs, autask);
+
+ return 0;
+}
+
+/*
+ * This function is called by arch_uprobe_post_xol() to adjust the return
+ * address pushed by a call instruction executed out of line.
+ */
+static int adjust_ret_addr(unsigned long sp, long correction)
+{
+ int rasize, ncopied;
+ long ra = 0;
+
+ if (is_ia32_task())
+ rasize = 4;
+ else
+ rasize = 8;
+
+ ncopied = copy_from_user(&ra, (void __user *)sp, rasize);
+ if (unlikely(ncopied))
+ return -EFAULT;
+
+ ra += correction;
+ ncopied = copy_to_user((void __user *)sp, &ra, rasize);
+ if (unlikely(ncopied))
+ return -EFAULT;
+
+ return 0;
+}
+
+#ifdef CONFIG_X86_64
+static bool is_riprel_insn(struct arch_uprobe *auprobe)
+{
+ return ((auprobe->fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) != 0);
+}
+
+static void
+handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction)
+{
+ if (is_riprel_insn(auprobe)) {
+ struct arch_uprobe_task *autask;
+
+ autask = &current->utask->autask;
+ if (auprobe->fixups & UPROBE_FIX_RIP_AX)
+ regs->ax = autask->saved_scratch_register;
+ else
+ regs->cx = autask->saved_scratch_register;
+
+ /*
+ * The original instruction includes a displacement, and so
+ * is 4 bytes longer than what we've just single-stepped.
+ * Fall through to handle stuff like "jmpq *...(%rip)" and
+ * "callq *...(%rip)".
+ */
+ if (correction)
+ *correction += 4;
+ }
+}
+#else
+static void
+handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction)
+{
+ /* No RIP-relative addressing on 32-bit */
+}
+#endif
+
+/*
+ * If xol insn itself traps and generates a signal(Say,
+ * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
+ * instruction jumps back to its own address. It is assumed that anything
+ * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
+ *
+ * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
+ * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
+ * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
+ */
+bool arch_uprobe_xol_was_trapped(struct task_struct *t)
+{
+ if (t->thread.trap_nr != UPROBE_TRAP_NR)
+ return true;
+
+ return false;
+}
+
+/*
+ * Called after single-stepping. To avoid the SMP problems that can
+ * occur when we temporarily put back the original opcode to
+ * single-step, we single-stepped a copy of the instruction.
+ *
+ * This function prepares to resume execution after the single-step.
+ * We have to fix things up as follows:
+ *
+ * Typically, the new ip is relative to the copied instruction. We need
+ * to make it relative to the original instruction (FIX_IP). Exceptions
+ * are return instructions and absolute or indirect jump or call instructions.
+ *
+ * If the single-stepped instruction was a call, the return address that
+ * is atop the stack is the address following the copied instruction. We
+ * need to make it the address following the original instruction (FIX_CALL).
+ *
+ * If the original instruction was a rip-relative instruction such as
+ * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent
+ * instruction using a scratch register -- e.g., "movl %edx,(%rax)".
+ * We need to restore the contents of the scratch register and adjust
+ * the ip, keeping in mind that the instruction we executed is 4 bytes
+ * shorter than the original instruction (since we squeezed out the offset
+ * field). (FIX_RIP_AX or FIX_RIP_CX)
+ */
+int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask;
+ long correction;
+ int result = 0;
+
+ WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
+
+ utask = current->utask;
+ current->thread.trap_nr = utask->autask.saved_trap_nr;
+ correction = (long)(utask->vaddr - utask->xol_vaddr);
+ handle_riprel_post_xol(auprobe, regs, &correction);
+ if (auprobe->fixups & UPROBE_FIX_IP)
+ regs->ip += correction;
+
+ if (auprobe->fixups & UPROBE_FIX_CALL)
+ result = adjust_ret_addr(regs->sp, correction);
+
+ return result;
+}
+
+/* callback routine for handling exceptions. */
+int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
+{
+ struct die_args *args = data;
+ struct pt_regs *regs = args->regs;
+ int ret = NOTIFY_DONE;
+
+ /* We are only interested in userspace traps */
+ if (regs && !user_mode_vm(regs))
+ return NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_INT3:
+ if (uprobe_pre_sstep_notifier(regs))
+ ret = NOTIFY_STOP;
+
+ break;
+
+ case DIE_DEBUG:
+ if (uprobe_post_sstep_notifier(regs))
+ ret = NOTIFY_STOP;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * This function gets called when XOL instruction either gets trapped or
+ * the thread has a fatal signal, so reset the instruction pointer to its
+ * probed address.
+ */
+void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ current->thread.trap_nr = utask->autask.saved_trap_nr;
+ handle_riprel_post_xol(auprobe, regs, NULL);
+ instruction_pointer_set(regs, utask->vaddr);
+}
+
+/*
+ * Skip these instructions as per the currently known x86 ISA.
+ * 0x66* { 0x90 | 0x0f 0x1f | 0x0f 0x19 | 0x87 0xc0 }
+ */
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ int i;
+
+ for (i = 0; i < MAX_UINSN_BYTES; i++) {
+ if ((auprobe->insn[i] == 0x66))
+ continue;
+
+ if (auprobe->insn[i] == 0x90)
+ return true;
+
+ if (i == (MAX_UINSN_BYTES - 1))
+ break;
+
+ if ((auprobe->insn[i] == 0x0f) && (auprobe->insn[i+1] == 0x1f))
+ return true;
+
+ if ((auprobe->insn[i] == 0x0f) && (auprobe->insn[i+1] == 0x19))
+ return true;
+
+ if ((auprobe->insn[i] == 0x87) && (auprobe->insn[i+1] == 0xc0))
+ return true;
+
+ break;
+ }
+ return false;
+}
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 0f703f10901a..22a1530146a8 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -197,18 +197,6 @@ SECTIONS
INIT_DATA_SECTION(16)
- /*
- * Code and data for a variety of lowlevel trampolines, to be
- * copied into base memory (< 1 MiB) during initialization.
- * Since it is copied early, the main copy can be discarded
- * afterwards.
- */
- .x86_trampoline : AT(ADDR(.x86_trampoline) - LOAD_OFFSET) {
- x86_trampoline_start = .;
- *(.x86_trampoline)
- x86_trampoline_end = .;
- }
-
.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
__x86_cpu_dev_start = .;
*(.x86_cpu_dev.init)
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 1a7fe868f375..a28f338843ea 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -36,6 +36,7 @@ config KVM
select TASKSTATS
select TASK_DELAY_ACCT
select PERF_EVENTS
+ select HAVE_KVM_MSI
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 9fed5bedaad6..7df1c6d839fb 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -247,7 +247,8 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 7.0.ebx */
const u32 kvm_supported_word9_x86_features =
- F(FSGSBASE) | F(BMI1) | F(AVX2) | F(SMEP) | F(BMI2) | F(ERMS);
+ F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
+ F(BMI2) | F(ERMS) | F(RTM);
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
@@ -397,7 +398,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
case KVM_CPUID_SIGNATURE: {
char signature[12] = "KVMKVMKVM\0\0";
u32 *sigptr = (u32 *)signature;
- entry->eax = 0;
+ entry->eax = KVM_CPUID_FEATURES;
entry->ebx = sigptr[0];
entry->ecx = sigptr[1];
entry->edx = sigptr[2];
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 83756223f8aa..f95d242ee9f7 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -142,6 +142,10 @@
#define Src2FS (OpFS << Src2Shift)
#define Src2GS (OpGS << Src2Shift)
#define Src2Mask (OpMask << Src2Shift)
+#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
+#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
+#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
+#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
#define X2(x...) x, x
#define X3(x...) X2(x), x
@@ -557,6 +561,29 @@ static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}
+/*
+ * x86 defines three classes of vector instructions: explicitly
+ * aligned, explicitly unaligned, and the rest, which change behaviour
+ * depending on whether they're AVX encoded or not.
+ *
+ * Also included is CMPXCHG16B which is not a vector instruction, yet it is
+ * subject to the same check.
+ */
+static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
+{
+ if (likely(size < 16))
+ return false;
+
+ if (ctxt->d & Aligned)
+ return true;
+ else if (ctxt->d & Unaligned)
+ return false;
+ else if (ctxt->d & Avx)
+ return false;
+ else
+ return true;
+}
+
static int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned size, bool write, bool fetch,
@@ -621,6 +648,8 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
}
if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
la &= (u32)-1;
+ if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
+ return emulate_gp(ctxt, 0);
*linear = la;
return X86EMUL_CONTINUE;
bad:
@@ -859,6 +888,40 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
ctxt->ops->put_fpu(ctxt);
}
+static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
+{
+ ctxt->ops->get_fpu(ctxt);
+ switch (reg) {
+ case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
+ case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
+ case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
+ case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
+ case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
+ case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
+ case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
+ case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
+ default: BUG();
+ }
+ ctxt->ops->put_fpu(ctxt);
+}
+
+static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
+{
+ ctxt->ops->get_fpu(ctxt);
+ switch (reg) {
+ case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
+ case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
+ case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
+ case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
+ case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
+ case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
+ case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
+ case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
+ default: BUG();
+ }
+ ctxt->ops->put_fpu(ctxt);
+}
+
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
@@ -875,6 +938,13 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
read_sse_reg(ctxt, &op->vec_val, reg);
return;
}
+ if (ctxt->d & Mmx) {
+ reg &= 7;
+ op->type = OP_MM;
+ op->bytes = 8;
+ op->addr.mm = reg;
+ return;
+ }
op->type = OP_REG;
if (ctxt->d & ByteOp) {
@@ -902,7 +972,6 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
}
- ctxt->modrm = insn_fetch(u8, ctxt);
ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
ctxt->modrm_rm |= (ctxt->modrm & 0x07);
@@ -920,6 +989,12 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
return rc;
}
+ if (ctxt->d & Mmx) {
+ op->type = OP_MM;
+ op->bytes = 8;
+ op->addr.xmm = ctxt->modrm_rm & 7;
+ return rc;
+ }
fetch_register_operand(op);
return rc;
}
@@ -1387,6 +1462,9 @@ static int writeback(struct x86_emulate_ctxt *ctxt)
case OP_XMM:
write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
break;
+ case OP_MM:
+ write_mmx_reg(ctxt, &ctxt->dst.mm_val, ctxt->dst.addr.mm);
+ break;
case OP_NONE:
/* no writeback */
break;
@@ -2790,7 +2868,7 @@ static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
- ctxt->dst.val = ctxt->src.val;
+ memcpy(ctxt->dst.valptr, ctxt->src.valptr, ctxt->op_bytes);
return X86EMUL_CONTINUE;
}
@@ -2870,12 +2948,6 @@ static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
}
-static int em_movdqu(struct x86_emulate_ctxt *ctxt)
-{
- memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
- return X86EMUL_CONTINUE;
-}
-
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
int rc;
@@ -3061,35 +3133,13 @@ static int em_btc(struct x86_emulate_ctxt *ctxt)
static int em_bsf(struct x86_emulate_ctxt *ctxt)
{
- u8 zf;
-
- __asm__ ("bsf %2, %0; setz %1"
- : "=r"(ctxt->dst.val), "=q"(zf)
- : "r"(ctxt->src.val));
-
- ctxt->eflags &= ~X86_EFLAGS_ZF;
- if (zf) {
- ctxt->eflags |= X86_EFLAGS_ZF;
- /* Disable writeback. */
- ctxt->dst.type = OP_NONE;
- }
+ emulate_2op_SrcV_nobyte(ctxt, "bsf");
return X86EMUL_CONTINUE;
}
static int em_bsr(struct x86_emulate_ctxt *ctxt)
{
- u8 zf;
-
- __asm__ ("bsr %2, %0; setz %1"
- : "=r"(ctxt->dst.val), "=q"(zf)
- : "r"(ctxt->src.val));
-
- ctxt->eflags &= ~X86_EFLAGS_ZF;
- if (zf) {
- ctxt->eflags |= X86_EFLAGS_ZF;
- /* Disable writeback. */
- ctxt->dst.type = OP_NONE;
- }
+ emulate_2op_SrcV_nobyte(ctxt, "bsr");
return X86EMUL_CONTINUE;
}
@@ -3286,8 +3336,8 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
.check_perm = (_p) }
#define N D(0)
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
-#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
-#define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
+#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
+#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
#define II(_f, _e, _i) \
{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
@@ -3307,25 +3357,25 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
static struct opcode group7_rm1[] = {
- DI(SrcNone | ModRM | Priv, monitor),
- DI(SrcNone | ModRM | Priv, mwait),
+ DI(SrcNone | Priv, monitor),
+ DI(SrcNone | Priv, mwait),
N, N, N, N, N, N,
};
static struct opcode group7_rm3[] = {
- DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
- II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
- DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
- DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
- DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
- DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme),
- DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme),
- DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
+ DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
+ II(SrcNone | Prot | VendorSpecific, em_vmmcall, vmmcall),
+ DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
+ DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
+ DIP(SrcNone | Prot | Priv, stgi, check_svme),
+ DIP(SrcNone | Prot | Priv, clgi, check_svme),
+ DIP(SrcNone | Prot | Priv, skinit, check_svme),
+ DIP(SrcNone | Prot | Priv, invlpga, check_svme),
};
static struct opcode group7_rm7[] = {
N,
- DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
+ DIP(SrcNone, rdtscp, check_rdtsc),
N, N, N, N, N, N,
};
@@ -3341,81 +3391,86 @@ static struct opcode group1[] = {
};
static struct opcode group1A[] = {
- I(DstMem | SrcNone | ModRM | Mov | Stack, em_pop), N, N, N, N, N, N, N,
+ I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
};
static struct opcode group3[] = {
- I(DstMem | SrcImm | ModRM, em_test),
- I(DstMem | SrcImm | ModRM, em_test),
- I(DstMem | SrcNone | ModRM | Lock, em_not),
- I(DstMem | SrcNone | ModRM | Lock, em_neg),
- I(SrcMem | ModRM, em_mul_ex),
- I(SrcMem | ModRM, em_imul_ex),
- I(SrcMem | ModRM, em_div_ex),
- I(SrcMem | ModRM, em_idiv_ex),
+ I(DstMem | SrcImm, em_test),
+ I(DstMem | SrcImm, em_test),
+ I(DstMem | SrcNone | Lock, em_not),
+ I(DstMem | SrcNone | Lock, em_neg),
+ I(SrcMem, em_mul_ex),
+ I(SrcMem, em_imul_ex),
+ I(SrcMem, em_div_ex),
+ I(SrcMem, em_idiv_ex),
};
static struct opcode group4[] = {
- I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45),
- I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45),
+ I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
+ I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
N, N, N, N, N, N,
};
static struct opcode group5[] = {
- I(DstMem | SrcNone | ModRM | Lock, em_grp45),
- I(DstMem | SrcNone | ModRM | Lock, em_grp45),
- I(SrcMem | ModRM | Stack, em_grp45),
- I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
- I(SrcMem | ModRM | Stack, em_grp45),
- I(SrcMemFAddr | ModRM | ImplicitOps, em_grp45),
- I(SrcMem | ModRM | Stack, em_grp45), N,
+ I(DstMem | SrcNone | Lock, em_grp45),
+ I(DstMem | SrcNone | Lock, em_grp45),
+ I(SrcMem | Stack, em_grp45),
+ I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
+ I(SrcMem | Stack, em_grp45),
+ I(SrcMemFAddr | ImplicitOps, em_grp45),
+ I(SrcMem | Stack, em_grp45), N,
};
static struct opcode group6[] = {
- DI(ModRM | Prot, sldt),
- DI(ModRM | Prot, str),
- DI(ModRM | Prot | Priv, lldt),
- DI(ModRM | Prot | Priv, ltr),
+ DI(Prot, sldt),
+ DI(Prot, str),
+ DI(Prot | Priv, lldt),
+ DI(Prot | Priv, ltr),
N, N, N, N,
};
static struct group_dual group7 = { {
- DI(ModRM | Mov | DstMem | Priv, sgdt),
- DI(ModRM | Mov | DstMem | Priv, sidt),
- II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
- II(ModRM | SrcMem | Priv, em_lidt, lidt),
- II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
- II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
- II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
+ DI(Mov | DstMem | Priv, sgdt),
+ DI(Mov | DstMem | Priv, sidt),
+ II(SrcMem | Priv, em_lgdt, lgdt),
+ II(SrcMem | Priv, em_lidt, lidt),
+ II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
+ II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
+ II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
}, {
- I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
+ I(SrcNone | Priv | VendorSpecific, em_vmcall),
EXT(0, group7_rm1),
N, EXT(0, group7_rm3),
- II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
- II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
+ II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
+ II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
+ EXT(0, group7_rm7),
} };
static struct opcode group8[] = {
N, N, N, N,
- I(DstMem | SrcImmByte | ModRM, em_bt),
- I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_bts),
- I(DstMem | SrcImmByte | ModRM | Lock, em_btr),
- I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_btc),
+ I(DstMem | SrcImmByte, em_bt),
+ I(DstMem | SrcImmByte | Lock | PageTable, em_bts),
+ I(DstMem | SrcImmByte | Lock, em_btr),
+ I(DstMem | SrcImmByte | Lock | PageTable, em_btc),
};
static struct group_dual group9 = { {
- N, I(DstMem64 | ModRM | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
+ N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
}, {
N, N, N, N, N, N, N, N,
} };
static struct opcode group11[] = {
- I(DstMem | SrcImm | ModRM | Mov | PageTable, em_mov),
+ I(DstMem | SrcImm | Mov | PageTable, em_mov),
X7(D(Undefined)),
};
static struct gprefix pfx_0f_6f_0f_7f = {
- N, N, N, I(Sse, em_movdqu),
+ I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
+};
+
+static struct gprefix pfx_vmovntpx = {
+ I(0, em_mov), N, N, N,
};
static struct opcode opcode_table[256] = {
@@ -3464,10 +3519,10 @@ static struct opcode opcode_table[256] = {
/* 0x70 - 0x7F */
X16(D(SrcImmByte)),
/* 0x80 - 0x87 */
- G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
- G(DstMem | SrcImm | ModRM | Group, group1),
- G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
- G(DstMem | SrcImmByte | ModRM | Group, group1),
+ G(ByteOp | DstMem | SrcImm, group1),
+ G(DstMem | SrcImm, group1),
+ G(ByteOp | DstMem | SrcImm | No64, group1),
+ G(DstMem | SrcImmByte, group1),
I2bv(DstMem | SrcReg | ModRM, em_test),
I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
/* 0x88 - 0x8F */
@@ -3549,7 +3604,8 @@ static struct opcode twobyte_table[256] = {
IIP(ModRM | SrcMem | Priv | Op3264, em_cr_write, cr_write, check_cr_write),
IIP(ModRM | SrcMem | Priv | Op3264, em_dr_write, dr_write, check_dr_write),
N, N, N, N,
- N, N, N, N, N, N, N, N,
+ N, N, N, GP(ModRM | DstMem | SrcReg | Sse | Mov | Aligned, &pfx_vmovntpx),
+ N, N, N, N,
/* 0x30 - 0x3F */
II(ImplicitOps | Priv, em_wrmsr, wrmsr),
IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
@@ -3897,17 +3953,16 @@ done_prefixes:
}
ctxt->d = opcode.flags;
+ if (ctxt->d & ModRM)
+ ctxt->modrm = insn_fetch(u8, ctxt);
+
while (ctxt->d & GroupMask) {
switch (ctxt->d & GroupMask) {
case Group:
- ctxt->modrm = insn_fetch(u8, ctxt);
- --ctxt->_eip;
goffset = (ctxt->modrm >> 3) & 7;
opcode = opcode.u.group[goffset];
break;
case GroupDual:
- ctxt->modrm = insn_fetch(u8, ctxt);
- --ctxt->_eip;
goffset = (ctxt->modrm >> 3) & 7;
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.gdual->mod3[goffset];
@@ -3960,6 +4015,8 @@ done_prefixes:
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
+ else if (ctxt->d & Mmx)
+ ctxt->op_bytes = 8;
/* ModRM and SIB bytes. */
if (ctxt->d & ModRM) {
@@ -4030,6 +4087,35 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
return false;
}
+static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
+{
+ bool fault = false;
+
+ ctxt->ops->get_fpu(ctxt);
+ asm volatile("1: fwait \n\t"
+ "2: \n\t"
+ ".pushsection .fixup,\"ax\" \n\t"
+ "3: \n\t"
+ "movb $1, %[fault] \n\t"
+ "jmp 2b \n\t"
+ ".popsection \n\t"
+ _ASM_EXTABLE(1b, 3b)
+ : [fault]"+qm"(fault));
+ ctxt->ops->put_fpu(ctxt);
+
+ if (unlikely(fault))
+ return emulate_exception(ctxt, MF_VECTOR, 0, false);
+
+ return X86EMUL_CONTINUE;
+}
+
+static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
+ struct operand *op)
+{
+ if (op->type == OP_MM)
+ read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
+}
+
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
{
struct x86_emulate_ops *ops = ctxt->ops;
@@ -4054,18 +4140,31 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done;
}
- if ((ctxt->d & Sse)
- && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
- || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
+ if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
+ || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
rc = emulate_ud(ctxt);
goto done;
}
- if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
+ if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
rc = emulate_nm(ctxt);
goto done;
}
+ if (ctxt->d & Mmx) {
+ rc = flush_pending_x87_faults(ctxt);
+ if (rc != X86EMUL_CONTINUE)
+ goto done;
+ /*
+ * Now that we know the fpu is exception safe, we can fetch
+ * operands from it.
+ */
+ fetch_possible_mmx_operand(ctxt, &ctxt->src);
+ fetch_possible_mmx_operand(ctxt, &ctxt->src2);
+ if (!(ctxt->d & Mov))
+ fetch_possible_mmx_operand(ctxt, &ctxt->dst);
+ }
+
if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_PRE_EXCEPT);
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index d68f99df690c..adba28f88d1a 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -34,7 +34,6 @@
#include <linux/kvm_host.h>
#include <linux/slab.h>
-#include <linux/workqueue.h>
#include "irq.h"
#include "i8254.h"
@@ -249,7 +248,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
/* in this case, we had multiple outstanding pit interrupts
* that we needed to inject. Reinject
*/
- queue_work(ps->pit->wq, &ps->pit->expired);
+ queue_kthread_work(&ps->pit->worker, &ps->pit->expired);
ps->irq_ack = 1;
spin_unlock(&ps->inject_lock);
}
@@ -270,7 +269,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
static void destroy_pit_timer(struct kvm_pit *pit)
{
hrtimer_cancel(&pit->pit_state.pit_timer.timer);
- cancel_work_sync(&pit->expired);
+ flush_kthread_work(&pit->expired);
}
static bool kpit_is_periodic(struct kvm_timer *ktimer)
@@ -284,7 +283,7 @@ static struct kvm_timer_ops kpit_ops = {
.is_periodic = kpit_is_periodic,
};
-static void pit_do_work(struct work_struct *work)
+static void pit_do_work(struct kthread_work *work)
{
struct kvm_pit *pit = container_of(work, struct kvm_pit, expired);
struct kvm *kvm = pit->kvm;
@@ -328,7 +327,7 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
atomic_inc(&ktimer->pending);
- queue_work(pt->wq, &pt->expired);
+ queue_kthread_work(&pt->worker, &pt->expired);
}
if (ktimer->t_ops->is_periodic(ktimer)) {
@@ -353,7 +352,7 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
/* TODO The new value only affected after the retriggered */
hrtimer_cancel(&pt->timer);
- cancel_work_sync(&ps->pit->expired);
+ flush_kthread_work(&ps->pit->expired);
pt->period = interval;
ps->is_periodic = is_period;
@@ -669,6 +668,8 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
{
struct kvm_pit *pit;
struct kvm_kpit_state *pit_state;
+ struct pid *pid;
+ pid_t pid_nr;
int ret;
pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL);
@@ -685,14 +686,20 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
mutex_lock(&pit->pit_state.lock);
spin_lock_init(&pit->pit_state.inject_lock);
- pit->wq = create_singlethread_workqueue("kvm-pit-wq");
- if (!pit->wq) {
+ pid = get_pid(task_tgid(current));
+ pid_nr = pid_vnr(pid);
+ put_pid(pid);
+
+ init_kthread_worker(&pit->worker);
+ pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
+ "kvm-pit/%d", pid_nr);
+ if (IS_ERR(pit->worker_task)) {
mutex_unlock(&pit->pit_state.lock);
kvm_free_irq_source_id(kvm, pit->irq_source_id);
kfree(pit);
return NULL;
}
- INIT_WORK(&pit->expired, pit_do_work);
+ init_kthread_work(&pit->expired, pit_do_work);
kvm->arch.vpit = pit;
pit->kvm = kvm;
@@ -736,7 +743,7 @@ fail:
kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
kvm_free_irq_source_id(kvm, pit->irq_source_id);
- destroy_workqueue(pit->wq);
+ kthread_stop(pit->worker_task);
kfree(pit);
return NULL;
}
@@ -756,10 +763,10 @@ void kvm_free_pit(struct kvm *kvm)
mutex_lock(&kvm->arch.vpit->pit_state.lock);
timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
hrtimer_cancel(timer);
- cancel_work_sync(&kvm->arch.vpit->expired);
+ flush_kthread_work(&kvm->arch.vpit->expired);
+ kthread_stop(kvm->arch.vpit->worker_task);
kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id);
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
- destroy_workqueue(kvm->arch.vpit->wq);
kfree(kvm->arch.vpit);
}
}
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index 51a97426e791..fdf40425ea1d 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -1,6 +1,8 @@
#ifndef __I8254_H
#define __I8254_H
+#include <linux/kthread.h>
+
#include "iodev.h"
struct kvm_kpit_channel_state {
@@ -39,8 +41,9 @@ struct kvm_pit {
struct kvm_kpit_state pit_state;
int irq_source_id;
struct kvm_irq_mask_notifier mask_notifier;
- struct workqueue_struct *wq;
- struct work_struct expired;
+ struct kthread_worker worker;
+ struct task_struct *worker_task;
+ struct kthread_work expired;
};
#define KVM_PIT_BASE_ADDRESS 0x40
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 858432287ab6..93c15743f1ee 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -92,6 +92,11 @@ static inline int apic_test_and_clear_vector(int vec, void *bitmap)
return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
+static inline int apic_test_vector(int vec, void *bitmap)
+{
+ return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
+}
+
static inline void apic_set_vector(int vec, void *bitmap)
{
set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
@@ -480,7 +485,6 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
static void apic_set_eoi(struct kvm_lapic *apic)
{
int vector = apic_find_highest_isr(apic);
- int trigger_mode;
/*
* Not every write EOI will has corresponding ISR,
* one example is when Kernel check timer on setup_IO_APIC
@@ -491,12 +495,15 @@ static void apic_set_eoi(struct kvm_lapic *apic)
apic_clear_vector(vector, apic->regs + APIC_ISR);
apic_update_ppr(apic);
- if (apic_test_and_clear_vector(vector, apic->regs + APIC_TMR))
- trigger_mode = IOAPIC_LEVEL_TRIG;
- else
- trigger_mode = IOAPIC_EDGE_TRIG;
- if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI))
+ if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
+ kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
+ int trigger_mode;
+ if (apic_test_vector(vector, apic->regs + APIC_TMR))
+ trigger_mode = IOAPIC_LEVEL_TRIG;
+ else
+ trigger_mode = IOAPIC_EDGE_TRIG;
kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
+ }
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
}
@@ -1081,6 +1088,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
apic_update_ppr(apic);
vcpu->arch.apic_arb_prio = 0;
+ vcpu->arch.apic_attention = 0;
apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
"0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
@@ -1280,7 +1288,7 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
u32 data;
void *vapic;
- if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
+ if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
@@ -1297,7 +1305,7 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic;
void *vapic;
- if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
+ if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
apic = vcpu->arch.apic;
@@ -1317,10 +1325,11 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
{
- if (!irqchip_in_kernel(vcpu->kvm))
- return;
-
vcpu->arch.apic->vapic_addr = vapic_addr;
+ if (vapic_addr)
+ __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
+ else
+ __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
}
int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4cb164268846..be3cea4407ff 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -135,8 +135,6 @@ module_param(dbg, bool, 0644);
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
| PT64_NX_MASK)
-#define PTE_LIST_EXT 4
-
#define ACC_EXEC_MASK 1
#define ACC_WRITE_MASK PT_WRITABLE_MASK
#define ACC_USER_MASK PT_USER_MASK
@@ -151,6 +149,9 @@ module_param(dbg, bool, 0644);
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
+/* make pte_list_desc fit well in cache line */
+#define PTE_LIST_EXT 3
+
struct pte_list_desc {
u64 *sptes[PTE_LIST_EXT];
struct pte_list_desc *more;
@@ -550,19 +551,29 @@ static u64 mmu_spte_get_lockless(u64 *sptep)
static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{
- rcu_read_lock();
- atomic_inc(&vcpu->kvm->arch.reader_counter);
-
- /* Increase the counter before walking shadow page table */
- smp_mb__after_atomic_inc();
+ /*
+ * Prevent page table teardown by making any free-er wait during
+ * kvm_flush_remote_tlbs() IPI to all active vcpus.
+ */
+ local_irq_disable();
+ vcpu->mode = READING_SHADOW_PAGE_TABLES;
+ /*
+ * Make sure a following spte read is not reordered ahead of the write
+ * to vcpu->mode.
+ */
+ smp_mb();
}
static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
- /* Decrease the counter after walking shadow page table finished */
- smp_mb__before_atomic_dec();
- atomic_dec(&vcpu->kvm->arch.reader_counter);
- rcu_read_unlock();
+ /*
+ * Make sure the write to vcpu->mode is not reordered in front of
+ * reads to sptes. If it does, kvm_commit_zap_page() can see us
+ * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
+ */
+ smp_mb();
+ vcpu->mode = OUTSIDE_GUEST_MODE;
+ local_irq_enable();
}
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -841,32 +852,6 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
return count;
}
-static u64 *pte_list_next(unsigned long *pte_list, u64 *spte)
-{
- struct pte_list_desc *desc;
- u64 *prev_spte;
- int i;
-
- if (!*pte_list)
- return NULL;
- else if (!(*pte_list & 1)) {
- if (!spte)
- return (u64 *)*pte_list;
- return NULL;
- }
- desc = (struct pte_list_desc *)(*pte_list & ~1ul);
- prev_spte = NULL;
- while (desc) {
- for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
- if (prev_spte == spte)
- return desc->sptes[i];
- prev_spte = desc->sptes[i];
- }
- desc = desc->more;
- }
- return NULL;
-}
-
static void
pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc,
int i, struct pte_list_desc *prev_desc)
@@ -987,11 +972,6 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
return pte_list_add(vcpu, spte, rmapp);
}
-static u64 *rmap_next(unsigned long *rmapp, u64 *spte)
-{
- return pte_list_next(rmapp, spte);
-}
-
static void rmap_remove(struct kvm *kvm, u64 *spte)
{
struct kvm_mmu_page *sp;
@@ -1004,106 +984,201 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
pte_list_remove(spte, rmapp);
}
+/*
+ * Used by the following functions to iterate through the sptes linked by a
+ * rmap. All fields are private and not assumed to be used outside.
+ */
+struct rmap_iterator {
+ /* private fields */
+ struct pte_list_desc *desc; /* holds the sptep if not NULL */
+ int pos; /* index of the sptep */
+};
+
+/*
+ * Iteration must be started by this function. This should also be used after
+ * removing/dropping sptes from the rmap link because in such cases the
+ * information in the itererator may not be valid.
+ *
+ * Returns sptep if found, NULL otherwise.
+ */
+static u64 *rmap_get_first(unsigned long rmap, struct rmap_iterator *iter)
+{
+ if (!rmap)
+ return NULL;
+
+ if (!(rmap & 1)) {
+ iter->desc = NULL;
+ return (u64 *)rmap;
+ }
+
+ iter->desc = (struct pte_list_desc *)(rmap & ~1ul);
+ iter->pos = 0;
+ return iter->desc->sptes[iter->pos];
+}
+
+/*
+ * Must be used with a valid iterator: e.g. after rmap_get_first().
+ *
+ * Returns sptep if found, NULL otherwise.
+ */
+static u64 *rmap_get_next(struct rmap_iterator *iter)
+{
+ if (iter->desc) {
+ if (iter->pos < PTE_LIST_EXT - 1) {
+ u64 *sptep;
+
+ ++iter->pos;
+ sptep = iter->desc->sptes[iter->pos];
+ if (sptep)
+ return sptep;
+ }
+
+ iter->desc = iter->desc->more;
+
+ if (iter->desc) {
+ iter->pos = 0;
+ /* desc->sptes[0] cannot be NULL */
+ return iter->desc->sptes[iter->pos];
+ }
+ }
+
+ return NULL;
+}
+
static void drop_spte(struct kvm *kvm, u64 *sptep)
{
if (mmu_spte_clear_track_bits(sptep))
rmap_remove(kvm, sptep);
}
-int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
- struct kvm_memory_slot *slot)
+static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level)
{
- unsigned long *rmapp;
- u64 *spte;
- int i, write_protected = 0;
-
- rmapp = __gfn_to_rmap(gfn, PT_PAGE_TABLE_LEVEL, slot);
- spte = rmap_next(rmapp, NULL);
- while (spte) {
- BUG_ON(!(*spte & PT_PRESENT_MASK));
- rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
- if (is_writable_pte(*spte)) {
- mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK);
- write_protected = 1;
+ u64 *sptep;
+ struct rmap_iterator iter;
+ int write_protected = 0;
+
+ for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
+ BUG_ON(!(*sptep & PT_PRESENT_MASK));
+ rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
+
+ if (!is_writable_pte(*sptep)) {
+ sptep = rmap_get_next(&iter);
+ continue;
}
- spte = rmap_next(rmapp, spte);
- }
- /* check for huge page mappings */
- for (i = PT_DIRECTORY_LEVEL;
- i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
- rmapp = __gfn_to_rmap(gfn, i, slot);
- spte = rmap_next(rmapp, NULL);
- while (spte) {
- BUG_ON(!(*spte & PT_PRESENT_MASK));
- BUG_ON(!is_large_pte(*spte));
- pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
- if (is_writable_pte(*spte)) {
- drop_spte(kvm, spte);
- --kvm->stat.lpages;
- spte = NULL;
- write_protected = 1;
- }
- spte = rmap_next(rmapp, spte);
+ if (level == PT_PAGE_TABLE_LEVEL) {
+ mmu_spte_update(sptep, *sptep & ~PT_WRITABLE_MASK);
+ sptep = rmap_get_next(&iter);
+ } else {
+ BUG_ON(!is_large_pte(*sptep));
+ drop_spte(kvm, sptep);
+ --kvm->stat.lpages;
+ sptep = rmap_get_first(*rmapp, &iter);
}
+
+ write_protected = 1;
}
return write_protected;
}
+/**
+ * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
+ * @kvm: kvm instance
+ * @slot: slot to protect
+ * @gfn_offset: start of the BITS_PER_LONG pages we care about
+ * @mask: indicates which pages we should protect
+ *
+ * Used when we do not need to care about huge page mappings: e.g. during dirty
+ * logging we do not have any such mappings.
+ */
+void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask)
+{
+ unsigned long *rmapp;
+
+ while (mask) {
+ rmapp = &slot->rmap[gfn_offset + __ffs(mask)];
+ __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL);
+
+ /* clear the first set bit */
+ mask &= mask - 1;
+ }
+}
+
static int rmap_write_protect(struct kvm *kvm, u64 gfn)
{
struct kvm_memory_slot *slot;
+ unsigned long *rmapp;
+ int i;
+ int write_protected = 0;
slot = gfn_to_memslot(kvm, gfn);
- return kvm_mmu_rmap_write_protect(kvm, gfn, slot);
+
+ for (i = PT_PAGE_TABLE_LEVEL;
+ i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
+ rmapp = __gfn_to_rmap(gfn, i, slot);
+ write_protected |= __rmap_write_protect(kvm, rmapp, i);
+ }
+
+ return write_protected;
}
static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long data)
{
- u64 *spte;
+ u64 *sptep;
+ struct rmap_iterator iter;
int need_tlb_flush = 0;
- while ((spte = rmap_next(rmapp, NULL))) {
- BUG_ON(!(*spte & PT_PRESENT_MASK));
- rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
- drop_spte(kvm, spte);
+ while ((sptep = rmap_get_first(*rmapp, &iter))) {
+ BUG_ON(!(*sptep & PT_PRESENT_MASK));
+ rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", sptep, *sptep);
+
+ drop_spte(kvm, sptep);
need_tlb_flush = 1;
}
+
return need_tlb_flush;
}
static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long data)
{
+ u64 *sptep;
+ struct rmap_iterator iter;
int need_flush = 0;
- u64 *spte, new_spte;
+ u64 new_spte;
pte_t *ptep = (pte_t *)data;
pfn_t new_pfn;
WARN_ON(pte_huge(*ptep));
new_pfn = pte_pfn(*ptep);
- spte = rmap_next(rmapp, NULL);
- while (spte) {
- BUG_ON(!is_shadow_present_pte(*spte));
- rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
+
+ for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
+ BUG_ON(!is_shadow_present_pte(*sptep));
+ rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", sptep, *sptep);
+
need_flush = 1;
+
if (pte_write(*ptep)) {
- drop_spte(kvm, spte);
- spte = rmap_next(rmapp, NULL);
+ drop_spte(kvm, sptep);
+ sptep = rmap_get_first(*rmapp, &iter);
} else {
- new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
+ new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
new_spte |= (u64)new_pfn << PAGE_SHIFT;
new_spte &= ~PT_WRITABLE_MASK;
new_spte &= ~SPTE_HOST_WRITEABLE;
new_spte &= ~shadow_accessed_mask;
- mmu_spte_clear_track_bits(spte);
- mmu_spte_set(spte, new_spte);
- spte = rmap_next(rmapp, spte);
+
+ mmu_spte_clear_track_bits(sptep);
+ mmu_spte_set(sptep, new_spte);
+ sptep = rmap_get_next(&iter);
}
}
+
if (need_flush)
kvm_flush_remote_tlbs(kvm);
@@ -1162,7 +1237,8 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long data)
{
- u64 *spte;
+ u64 *sptep;
+ struct rmap_iterator iter;
int young = 0;
/*
@@ -1175,25 +1251,24 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
if (!shadow_accessed_mask)
return kvm_unmap_rmapp(kvm, rmapp, data);
- spte = rmap_next(rmapp, NULL);
- while (spte) {
- int _young;
- u64 _spte = *spte;
- BUG_ON(!(_spte & PT_PRESENT_MASK));
- _young = _spte & PT_ACCESSED_MASK;
- if (_young) {
+ for (sptep = rmap_get_first(*rmapp, &iter); sptep;
+ sptep = rmap_get_next(&iter)) {
+ BUG_ON(!(*sptep & PT_PRESENT_MASK));
+
+ if (*sptep & PT_ACCESSED_MASK) {
young = 1;
- clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+ clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)sptep);
}
- spte = rmap_next(rmapp, spte);
}
+
return young;
}
static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long data)
{
- u64 *spte;
+ u64 *sptep;
+ struct rmap_iterator iter;
int young = 0;
/*
@@ -1204,16 +1279,14 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
if (!shadow_accessed_mask)
goto out;
- spte = rmap_next(rmapp, NULL);
- while (spte) {
- u64 _spte = *spte;
- BUG_ON(!(_spte & PT_PRESENT_MASK));
- young = _spte & PT_ACCESSED_MASK;
- if (young) {
+ for (sptep = rmap_get_first(*rmapp, &iter); sptep;
+ sptep = rmap_get_next(&iter)) {
+ BUG_ON(!(*sptep & PT_PRESENT_MASK));
+
+ if (*sptep & PT_ACCESSED_MASK) {
young = 1;
break;
}
- spte = rmap_next(rmapp, spte);
}
out:
return young;
@@ -1865,10 +1938,11 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
{
- u64 *parent_pte;
+ u64 *sptep;
+ struct rmap_iterator iter;
- while ((parent_pte = pte_list_next(&sp->parent_ptes, NULL)))
- drop_parent_pte(sp, parent_pte);
+ while ((sptep = rmap_get_first(sp->parent_ptes, &iter)))
+ drop_parent_pte(sp, sptep);
}
static int mmu_zap_unsync_children(struct kvm *kvm,
@@ -1925,30 +1999,6 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
return ret;
}
-static void kvm_mmu_isolate_pages(struct list_head *invalid_list)
-{
- struct kvm_mmu_page *sp;
-
- list_for_each_entry(sp, invalid_list, link)
- kvm_mmu_isolate_page(sp);
-}
-
-static void free_pages_rcu(struct rcu_head *head)
-{
- struct kvm_mmu_page *next, *sp;
-
- sp = container_of(head, struct kvm_mmu_page, rcu);
- while (sp) {
- if (!list_empty(&sp->link))
- next = list_first_entry(&sp->link,
- struct kvm_mmu_page, link);
- else
- next = NULL;
- kvm_mmu_free_page(sp);
- sp = next;
- }
-}
-
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list)
{
@@ -1957,17 +2007,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
if (list_empty(invalid_list))
return;
- kvm_flush_remote_tlbs(kvm);
-
- if (atomic_read(&kvm->arch.reader_counter)) {
- kvm_mmu_isolate_pages(invalid_list);
- sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
- list_del_init(invalid_list);
+ /*
+ * wmb: make sure everyone sees our modifications to the page tables
+ * rmb: make sure we see changes to vcpu->mode
+ */
+ smp_mb();
- trace_kvm_mmu_delay_free_pages(sp);
- call_rcu(&sp->rcu, free_pages_rcu);
- return;
- }
+ /*
+ * Wait for all vcpus to exit guest mode and/or lockless shadow
+ * page table walks.
+ */
+ kvm_flush_remote_tlbs(kvm);
do {
sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
@@ -1975,7 +2025,6 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
kvm_mmu_isolate_page(sp);
kvm_mmu_free_page(sp);
} while (!list_empty(invalid_list));
-
}
/*
@@ -2546,8 +2595,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
*gfnp = gfn;
kvm_release_pfn_clean(pfn);
pfn &= ~mask;
- if (!get_page_unless_zero(pfn_to_page(pfn)))
- BUG();
+ kvm_get_pfn(pfn);
*pfnp = pfn;
}
}
@@ -3554,7 +3602,7 @@ static bool detect_write_flooding(struct kvm_mmu_page *sp)
* Skip write-flooding detected for the sp whose level is 1, because
* it can become unsync, then the guest page is not write-protected.
*/
- if (sp->role.level == 1)
+ if (sp->role.level == PT_PAGE_TABLE_LEVEL)
return false;
return ++sp->write_flooding_count >= 3;
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index 715da5a19a5b..7d7d0b9e23eb 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -192,7 +192,8 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
{
struct kvm_memory_slot *slot;
unsigned long *rmapp;
- u64 *spte;
+ u64 *sptep;
+ struct rmap_iterator iter;
if (sp->role.direct || sp->unsync || sp->role.invalid)
return;
@@ -200,13 +201,12 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
slot = gfn_to_memslot(kvm, sp->gfn);
rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
- spte = rmap_next(rmapp, NULL);
- while (spte) {
- if (is_writable_pte(*spte))
+ for (sptep = rmap_get_first(*rmapp, &iter); sptep;
+ sptep = rmap_get_next(&iter)) {
+ if (is_writable_pte(*sptep))
audit_printk(kvm, "shadow page has writable "
"mappings: gfn %llx role %x\n",
sp->gfn, sp->role.word);
- spte = rmap_next(rmapp, spte);
}
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index df5a70311be8..34f970937ef1 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -658,7 +658,7 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
{
int offset = 0;
- WARN_ON(sp->role.level != 1);
+ WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
if (PTTYPE == 32)
offset = sp->role.quadrant << PT64_LEVEL_BITS;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e334389e1c75..f75af406b268 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -22,6 +22,7 @@
#include "x86.h"
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
@@ -42,6 +43,12 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+static const struct x86_cpu_id svm_cpu_id[] = {
+ X86_FEATURE_MATCH(X86_FEATURE_SVM),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
+
#define IOPM_ALLOC_ORDER 2
#define MSRPM_ALLOC_ORDER 1
@@ -3240,6 +3247,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
svm_clear_vintr(svm);
svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
mark_dirty(svm->vmcb, VMCB_INTR);
+ ++svm->vcpu.stat.irq_window_exits;
/*
* If the user space waits to inject interrupts, exit as soon as
* possible
@@ -3247,7 +3255,6 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
if (!irqchip_in_kernel(svm->vcpu.kvm) &&
kvm_run->request_interrupt_window &&
!kvm_cpu_has_interrupt(&svm->vcpu)) {
- ++svm->vcpu.stat.irq_window_exits;
kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
return 0;
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4ff0ab9bc3c8..32eb58866292 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -27,6 +27,7 @@
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/moduleparam.h>
+#include <linux/mod_devicetable.h>
#include <linux/ftrace_event.h>
#include <linux/slab.h>
#include <linux/tboot.h>
@@ -51,6 +52,12 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+static const struct x86_cpu_id vmx_cpu_id[] = {
+ X86_FEATURE_MATCH(X86_FEATURE_VMX),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
+
static bool __read_mostly enable_vpid = 1;
module_param_named(vpid, enable_vpid, bool, 0444);
@@ -386,6 +393,9 @@ struct vcpu_vmx {
struct {
int loaded;
u16 fs_sel, gs_sel, ldt_sel;
+#ifdef CONFIG_X86_64
+ u16 ds_sel, es_sel;
+#endif
int gs_ldt_reload_needed;
int fs_reload_needed;
} host_state;
@@ -1411,6 +1421,11 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
}
#ifdef CONFIG_X86_64
+ savesegment(ds, vmx->host_state.ds_sel);
+ savesegment(es, vmx->host_state.es_sel);
+#endif
+
+#ifdef CONFIG_X86_64
vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
#else
@@ -1450,6 +1465,19 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
}
if (vmx->host_state.fs_reload_needed)
loadsegment(fs, vmx->host_state.fs_sel);
+#ifdef CONFIG_X86_64
+ if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
+ loadsegment(ds, vmx->host_state.ds_sel);
+ loadsegment(es, vmx->host_state.es_sel);
+ }
+#else
+ /*
+ * The sysexit path does not restore ds/es, so we must set them to
+ * a reasonable value ourselves.
+ */
+ loadsegment(ds, __USER_DS);
+ loadsegment(es, __USER_DS);
+#endif
reload_tss();
#ifdef CONFIG_X86_64
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
@@ -3633,8 +3661,18 @@ static void vmx_set_constant_host_state(void)
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
+#ifdef CONFIG_X86_64
+ /*
+ * Load null selectors, so we can avoid reloading them in
+ * __vmx_load_host_state(), in case userspace uses the null selectors
+ * too (the expected case).
+ */
+ vmcs_write16(HOST_DS_SELECTOR, 0);
+ vmcs_write16(HOST_ES_SELECTOR, 0);
+#else
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+#endif
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
@@ -6256,7 +6294,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
}
}
- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
vmx->loaded_vmcs->launched = 1;
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
@@ -6343,7 +6380,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
return &vmx->vcpu;
free_vmcs:
- free_vmcs(vmx->loaded_vmcs->vmcs);
+ free_loaded_vmcs(vmx->loaded_vmcs);
free_msrs:
kfree(vmx->guest_msrs);
uninit_vcpu:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 185a2b823a2d..be6d54929fa7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2147,6 +2147,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_ASYNC_PF:
case KVM_CAP_GET_TSC_KHZ:
case KVM_CAP_PCI_2_3:
+ case KVM_CAP_KVMCLOCK_CTRL:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -2597,6 +2598,23 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
return r;
}
+/*
+ * kvm_set_guest_paused() indicates to the guest kernel that it has been
+ * stopped by the hypervisor. This function will be called from the host only.
+ * EINVAL is returned when the host attempts to set the flag for a guest that
+ * does not support pv clocks.
+ */
+static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
+{
+ struct pvclock_vcpu_time_info *src = &vcpu->arch.hv_clock;
+ if (!vcpu->arch.time_page)
+ return -EINVAL;
+ src->flags |= PVCLOCK_GUEST_STOPPED;
+ mark_page_dirty(vcpu->kvm, vcpu->arch.time >> PAGE_SHIFT);
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+ return 0;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2873,6 +2891,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = vcpu->arch.virtual_tsc_khz;
goto out;
}
+ case KVM_KVMCLOCK_CTRL: {
+ r = kvm_set_guest_paused(vcpu);
+ goto out;
+ }
default:
r = -EINVAL;
}
@@ -3045,57 +3067,32 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
}
/**
- * write_protect_slot - write protect a slot for dirty logging
- * @kvm: the kvm instance
- * @memslot: the slot we protect
- * @dirty_bitmap: the bitmap indicating which pages are dirty
- * @nr_dirty_pages: the number of dirty pages
+ * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
+ * @kvm: kvm instance
+ * @log: slot id and address to which we copy the log
*
- * We have two ways to find all sptes to protect:
- * 1. Use kvm_mmu_slot_remove_write_access() which walks all shadow pages and
- * checks ones that have a spte mapping a page in the slot.
- * 2. Use kvm_mmu_rmap_write_protect() for each gfn found in the bitmap.
+ * We need to keep it in mind that VCPU threads can write to the bitmap
+ * concurrently. So, to avoid losing data, we keep the following order for
+ * each bit:
*
- * Generally speaking, if there are not so many dirty pages compared to the
- * number of shadow pages, we should use the latter.
+ * 1. Take a snapshot of the bit and clear it if needed.
+ * 2. Write protect the corresponding page.
+ * 3. Flush TLB's if needed.
+ * 4. Copy the snapshot to the userspace.
*
- * Note that letting others write into a page marked dirty in the old bitmap
- * by using the remaining tlb entry is not a problem. That page will become
- * write protected again when we flush the tlb and then be reported dirty to
- * the user space by copying the old bitmap.
- */
-static void write_protect_slot(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- unsigned long *dirty_bitmap,
- unsigned long nr_dirty_pages)
-{
- spin_lock(&kvm->mmu_lock);
-
- /* Not many dirty pages compared to # of shadow pages. */
- if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
- unsigned long gfn_offset;
-
- for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) {
- unsigned long gfn = memslot->base_gfn + gfn_offset;
-
- kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
- }
- kvm_flush_remote_tlbs(kvm);
- } else
- kvm_mmu_slot_remove_write_access(kvm, memslot->id);
-
- spin_unlock(&kvm->mmu_lock);
-}
-
-/*
- * Get (and clear) the dirty memory log for a memory slot.
+ * Between 2 and 3, the guest may write to the page using the remaining TLB
+ * entry. This is not a problem because the page will be reported dirty at
+ * step 4 using the snapshot taken before and step 3 ensures that successive
+ * writes will be logged for the next call.
*/
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log)
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
int r;
struct kvm_memory_slot *memslot;
- unsigned long n, nr_dirty_pages;
+ unsigned long n, i;
+ unsigned long *dirty_bitmap;
+ unsigned long *dirty_bitmap_buffer;
+ bool is_dirty = false;
mutex_lock(&kvm->slots_lock);
@@ -3104,49 +3101,42 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
goto out;
memslot = id_to_memslot(kvm->memslots, log->slot);
+
+ dirty_bitmap = memslot->dirty_bitmap;
r = -ENOENT;
- if (!memslot->dirty_bitmap)
+ if (!dirty_bitmap)
goto out;
n = kvm_dirty_bitmap_bytes(memslot);
- nr_dirty_pages = memslot->nr_dirty_pages;
- /* If nothing is dirty, don't bother messing with page tables. */
- if (nr_dirty_pages) {
- struct kvm_memslots *slots, *old_slots;
- unsigned long *dirty_bitmap, *dirty_bitmap_head;
+ dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
+ memset(dirty_bitmap_buffer, 0, n);
- dirty_bitmap = memslot->dirty_bitmap;
- dirty_bitmap_head = memslot->dirty_bitmap_head;
- if (dirty_bitmap == dirty_bitmap_head)
- dirty_bitmap_head += n / sizeof(long);
- memset(dirty_bitmap_head, 0, n);
+ spin_lock(&kvm->mmu_lock);
- r = -ENOMEM;
- slots = kmemdup(kvm->memslots, sizeof(*kvm->memslots), GFP_KERNEL);
- if (!slots)
- goto out;
+ for (i = 0; i < n / sizeof(long); i++) {
+ unsigned long mask;
+ gfn_t offset;
- memslot = id_to_memslot(slots, log->slot);
- memslot->nr_dirty_pages = 0;
- memslot->dirty_bitmap = dirty_bitmap_head;
- update_memslots(slots, NULL);
+ if (!dirty_bitmap[i])
+ continue;
- old_slots = kvm->memslots;
- rcu_assign_pointer(kvm->memslots, slots);
- synchronize_srcu_expedited(&kvm->srcu);
- kfree(old_slots);
+ is_dirty = true;
- write_protect_slot(kvm, memslot, dirty_bitmap, nr_dirty_pages);
+ mask = xchg(&dirty_bitmap[i], 0);
+ dirty_bitmap_buffer[i] = mask;
- r = -EFAULT;
- if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
- goto out;
- } else {
- r = -EFAULT;
- if (clear_user(log->dirty_bitmap, n))
- goto out;
+ offset = i * BITS_PER_LONG;
+ kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask);
}
+ if (is_dirty)
+ kvm_flush_remote_tlbs(kvm);
+
+ spin_unlock(&kvm->mmu_lock);
+
+ r = -EFAULT;
+ if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
+ goto out;
r = 0;
out:
@@ -3728,9 +3718,8 @@ struct read_write_emulator_ops {
static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
{
if (vcpu->mmio_read_completed) {
- memcpy(val, vcpu->mmio_data, bytes);
trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
- vcpu->mmio_phys_addr, *(u64 *)val);
+ vcpu->mmio_fragments[0].gpa, *(u64 *)val);
vcpu->mmio_read_completed = 0;
return 1;
}
@@ -3766,8 +3755,9 @@ static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{
- memcpy(vcpu->mmio_data, val, bytes);
- memcpy(vcpu->run->mmio.data, vcpu->mmio_data, 8);
+ struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
+
+ memcpy(vcpu->run->mmio.data, frag->data, frag->len);
return X86EMUL_CONTINUE;
}
@@ -3794,10 +3784,7 @@ static int emulator_read_write_onepage(unsigned long addr, void *val,
gpa_t gpa;
int handled, ret;
bool write = ops->write;
-
- if (ops->read_write_prepare &&
- ops->read_write_prepare(vcpu, val, bytes))
- return X86EMUL_CONTINUE;
+ struct kvm_mmio_fragment *frag;
ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
@@ -3823,15 +3810,19 @@ mmio:
bytes -= handled;
val += handled;
- vcpu->mmio_needed = 1;
- vcpu->run->exit_reason = KVM_EXIT_MMIO;
- vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
- vcpu->mmio_size = bytes;
- vcpu->run->mmio.len = min(vcpu->mmio_size, 8);
- vcpu->run->mmio.is_write = vcpu->mmio_is_write = write;
- vcpu->mmio_index = 0;
+ while (bytes) {
+ unsigned now = min(bytes, 8U);
- return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
+ frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
+ frag->gpa = gpa;
+ frag->data = val;
+ frag->len = now;
+
+ gpa += now;
+ val += now;
+ bytes -= now;
+ }
+ return X86EMUL_CONTINUE;
}
int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
@@ -3840,10 +3831,18 @@ int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
struct read_write_emulator_ops *ops)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ gpa_t gpa;
+ int rc;
+
+ if (ops->read_write_prepare &&
+ ops->read_write_prepare(vcpu, val, bytes))
+ return X86EMUL_CONTINUE;
+
+ vcpu->mmio_nr_fragments = 0;
/* Crossing a page boundary? */
if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
- int rc, now;
+ int now;
now = -addr & ~PAGE_MASK;
rc = emulator_read_write_onepage(addr, val, now, exception,
@@ -3856,8 +3855,25 @@ int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
bytes -= now;
}
- return emulator_read_write_onepage(addr, val, bytes, exception,
- vcpu, ops);
+ rc = emulator_read_write_onepage(addr, val, bytes, exception,
+ vcpu, ops);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+ if (!vcpu->mmio_nr_fragments)
+ return rc;
+
+ gpa = vcpu->mmio_fragments[0].gpa;
+
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_cur_fragment = 0;
+
+ vcpu->run->mmio.len = vcpu->mmio_fragments[0].len;
+ vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
+ vcpu->run->mmio.phys_addr = gpa;
+
+ return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
}
static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
@@ -5263,10 +5279,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_deliver_pmi(vcpu);
}
- r = kvm_mmu_reload(vcpu);
- if (unlikely(r))
- goto out;
-
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
inject_pending_event(vcpu);
@@ -5282,6 +5294,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
}
+ r = kvm_mmu_reload(vcpu);
+ if (unlikely(r)) {
+ kvm_x86_ops->cancel_injection(vcpu);
+ goto out;
+ }
+
preempt_disable();
kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -5456,33 +5474,55 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return r;
}
+/*
+ * Implements the following, as a state machine:
+ *
+ * read:
+ * for each fragment
+ * write gpa, len
+ * exit
+ * copy data
+ * execute insn
+ *
+ * write:
+ * for each fragment
+ * write gpa, len
+ * copy data
+ * exit
+ */
static int complete_mmio(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
+ struct kvm_mmio_fragment *frag;
int r;
if (!(vcpu->arch.pio.count || vcpu->mmio_needed))
return 1;
if (vcpu->mmio_needed) {
- vcpu->mmio_needed = 0;
+ /* Complete previous fragment */
+ frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++];
if (!vcpu->mmio_is_write)
- memcpy(vcpu->mmio_data + vcpu->mmio_index,
- run->mmio.data, 8);
- vcpu->mmio_index += 8;
- if (vcpu->mmio_index < vcpu->mmio_size) {
- run->exit_reason = KVM_EXIT_MMIO;
- run->mmio.phys_addr = vcpu->mmio_phys_addr + vcpu->mmio_index;
- memcpy(run->mmio.data, vcpu->mmio_data + vcpu->mmio_index, 8);
- run->mmio.len = min(vcpu->mmio_size - vcpu->mmio_index, 8);
- run->mmio.is_write = vcpu->mmio_is_write;
- vcpu->mmio_needed = 1;
- return 0;
+ memcpy(frag->data, run->mmio.data, frag->len);
+ if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
+ vcpu->mmio_needed = 0;
+ if (vcpu->mmio_is_write)
+ return 1;
+ vcpu->mmio_read_completed = 1;
+ goto done;
}
+ /* Initiate next fragment */
+ ++frag;
+ run->exit_reason = KVM_EXIT_MMIO;
+ run->mmio.phys_addr = frag->gpa;
if (vcpu->mmio_is_write)
- return 1;
- vcpu->mmio_read_completed = 1;
+ memcpy(run->mmio.data, frag->data, frag->len);
+ run->mmio.len = frag->len;
+ run->mmio.is_write = vcpu->mmio_is_write;
+ return 0;
+
}
+done:
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -6399,21 +6439,9 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
kvm_cpu_has_interrupt(vcpu));
}
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
- int me;
- int cpu = vcpu->cpu;
-
- if (waitqueue_active(&vcpu->wq)) {
- wake_up_interruptible(&vcpu->wq);
- ++vcpu->stat.halt_wakeup;
- }
-
- me = get_cpu();
- if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
- if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE)
- smp_send_reschedule(cpu);
- put_cpu();
+ return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
}
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index cb80c293cdd8..3d1134ddb885 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -64,7 +64,7 @@ static inline int is_pse(struct kvm_vcpu *vcpu)
static inline int is_paging(struct kvm_vcpu *vcpu)
{
- return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
+ return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
}
static inline u32 bit(int bitno)
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index 2e4e4b02c37a..f61ee67ec00f 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -43,100 +43,3 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
return len;
}
EXPORT_SYMBOL_GPL(copy_from_user_nmi);
-
-/*
- * Do a strncpy, return length of string without final '\0'.
- * 'count' is the user-supplied count (return 'count' if we
- * hit it), 'max' is the address space maximum (and we return
- * -EFAULT if we hit it).
- */
-static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
-{
- long res = 0;
-
- /*
- * Truncate 'max' to the user-specified limit, so that
- * we only have one limit we need to check in the loop
- */
- if (max > count)
- max = count;
-
- while (max >= sizeof(unsigned long)) {
- unsigned long c, mask;
-
- /* Fall back to byte-at-a-time if we get a page fault */
- if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
- break;
- mask = has_zero(c);
- if (mask) {
- mask = (mask - 1) & ~mask;
- mask >>= 7;
- *(unsigned long *)(dst+res) = c & mask;
- return res + count_masked_bytes(mask);
- }
- *(unsigned long *)(dst+res) = c;
- res += sizeof(unsigned long);
- max -= sizeof(unsigned long);
- }
-
- while (max) {
- char c;
-
- if (unlikely(__get_user(c,src+res)))
- return -EFAULT;
- dst[res] = c;
- if (!c)
- return res;
- res++;
- max--;
- }
-
- /*
- * Uhhuh. We hit 'max'. But was that the user-specified maximum
- * too? If so, that's ok - we got as much as the user asked for.
- */
- if (res >= count)
- return res;
-
- /*
- * Nope: we hit the address space limit, and we still had more
- * characters the caller would have wanted. That's an EFAULT.
- */
- return -EFAULT;
-}
-
-/**
- * strncpy_from_user: - Copy a NUL terminated string from userspace.
- * @dst: Destination address, in kernel space. This buffer must be at
- * least @count bytes long.
- * @src: Source address, in user space.
- * @count: Maximum number of bytes to copy, including the trailing NUL.
- *
- * Copies a NUL-terminated string from userspace to kernel space.
- *
- * On success, returns the length of the string (not including the trailing
- * NUL).
- *
- * If access to userspace fails, returns -EFAULT (some data may have been
- * copied).
- *
- * If @count is smaller than the length of the string, copies @count bytes
- * and returns @count.
- */
-long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
- unsigned long max_addr, src_addr;
-
- if (unlikely(count <= 0))
- return 0;
-
- max_addr = current_thread_info()->addr_limit.seg;
- src_addr = (unsigned long)src;
- if (likely(src_addr < max_addr)) {
- unsigned long max = max_addr - src_addr;
- return do_strncpy_from_user(dst, src, count, max);
- }
- return -EFAULT;
-}
-EXPORT_SYMBOL(strncpy_from_user);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 883b216c60b2..1781b2f950e2 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -95,47 +95,6 @@ __clear_user(void __user *to, unsigned long n)
}
EXPORT_SYMBOL(__clear_user);
-/**
- * strnlen_user: - Get the size of a string in user space.
- * @s: The string to measure.
- * @n: The maximum valid length
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- * If the string is too long, returns a value greater than @n.
- */
-long strnlen_user(const char __user *s, long n)
-{
- unsigned long mask = -__addr_ok(s);
- unsigned long res, tmp;
-
- might_fault();
-
- __asm__ __volatile__(
- " testl %0, %0\n"
- " jz 3f\n"
- " andl %0,%%ecx\n"
- "0: repne; scasb\n"
- " setne %%al\n"
- " subl %%ecx,%0\n"
- " addl %0,%%eax\n"
- "1:\n"
- ".section .fixup,\"ax\"\n"
- "2: xorl %%eax,%%eax\n"
- " jmp 1b\n"
- "3: movb $1,%%al\n"
- " jmp 1b\n"
- ".previous\n"
- _ASM_EXTABLE(0b,2b)
- :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
- :"0" (n), "1" (s), "2" (0), "3" (mask)
- :"cc");
- return res & mask;
-}
-EXPORT_SYMBOL(strnlen_user);
-
#ifdef CONFIG_X86_INTEL_USERCOPY
static unsigned long
__copy_user_intel(void __user *to, const void *from, unsigned long size)
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 0d0326f388c0..e5b130bc2d0e 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -52,54 +52,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
}
EXPORT_SYMBOL(clear_user);
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-
-long __strnlen_user(const char __user *s, long n)
-{
- long res = 0;
- char c;
-
- while (1) {
- if (res>n)
- return n+1;
- if (__get_user(c, s))
- return 0;
- if (!c)
- return res+1;
- res++;
- s++;
- }
-}
-EXPORT_SYMBOL(__strnlen_user);
-
-long strnlen_user(const char __user *s, long n)
-{
- if (!access_ok(VERIFY_READ, s, 1))
- return 0;
- return __strnlen_user(s, n);
-}
-EXPORT_SYMBOL(strnlen_user);
-
-long strlen_user(const char __user *s)
-{
- long res = 0;
- char c;
-
- for (;;) {
- if (get_user(c, s))
- return 0;
- if (!c)
- return res+1;
- res++;
- s++;
- }
-}
-EXPORT_SYMBOL(strlen_user);
-
unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
{
if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 319b6f2fb8b9..97141c26a13a 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -84,8 +84,9 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
pgt_buf_end = pgt_buf_start;
pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
- printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
- end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
+ printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
+ end - 1, pgt_buf_start << PAGE_SHIFT,
+ (pgt_buf_top << PAGE_SHIFT) - 1);
}
void __init native_pagetable_reserve(u64 start, u64 end)
@@ -132,7 +133,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
int nr_range, i;
int use_pse, use_gbpages;
- printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
+ printk(KERN_INFO "init_memory_mapping: [mem %#010lx-%#010lx]\n",
+ start, end - 1);
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
/*
@@ -251,8 +253,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
}
for (i = 0; i < nr_range; i++)
- printk(KERN_DEBUG " %010lx - %010lx page %s\n",
- mr[i].start, mr[i].end,
+ printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
+ mr[i].start, mr[i].end - 1,
(mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
(mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
@@ -350,8 +352,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
* create a kernel page fault:
*/
#ifdef CONFIG_DEBUG_PAGEALLOC
- printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
- begin, end);
+ printk(KERN_INFO "debug: unmapping init [mem %#010lx-%#010lx]\n",
+ begin, end - 1);
set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
#else
/*
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 19d3fa08b119..2d125be1bae9 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -141,8 +141,8 @@ static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
/* whine about and ignore invalid blks */
if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
- pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
- nid, start, end);
+ pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
+ nid, start, end - 1);
return 0;
}
@@ -210,8 +210,8 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
start = roundup(start, ZONE_ALIGN);
- printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n",
- nid, start, end);
+ printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
+ nid, start, end - 1);
/*
* Allocate node data. Try remap allocator first, node-local
@@ -232,7 +232,7 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
}
/* report and initialize */
- printk(KERN_INFO " NODE_DATA [%016Lx - %016Lx]%s\n",
+ printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]%s\n",
nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : "");
tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
if (!remapped && tnid != nid)
@@ -291,14 +291,14 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
*/
if (bi->end > bj->start && bi->start < bj->end) {
if (bi->nid != bj->nid) {
- pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
- bi->nid, bi->start, bi->end,
- bj->nid, bj->start, bj->end);
+ pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
+ bi->nid, bi->start, bi->end - 1,
+ bj->nid, bj->start, bj->end - 1);
return -EINVAL;
}
- pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
- bi->nid, bi->start, bi->end,
- bj->start, bj->end);
+ pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
+ bi->nid, bi->start, bi->end - 1,
+ bj->start, bj->end - 1);
}
/*
@@ -320,9 +320,9 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
}
if (k < mi->nr_blks)
continue;
- printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%Lx,%Lx)\n",
- bi->nid, bi->start, bi->end, bj->start, bj->end,
- start, end);
+ printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
+ bi->nid, bi->start, bi->end - 1, bj->start,
+ bj->end - 1, start, end - 1);
bi->start = start;
bi->end = end;
numa_remove_memblk_from(j--, mi);
@@ -616,8 +616,8 @@ static int __init dummy_numa_init(void)
{
printk(KERN_INFO "%s\n",
numa_off ? "NUMA turned off" : "No NUMA configuration found");
- printk(KERN_INFO "Faking a node at %016Lx-%016Lx\n",
- 0LLU, PFN_PHYS(max_pfn));
+ printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
+ 0LLU, PFN_PHYS(max_pfn) - 1);
node_set(0, numa_nodes_parsed);
numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index 871dd8868170..dbbbb47260cc 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -68,8 +68,8 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
numa_remove_memblk_from(phys_blk, pi);
}
- printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
- eb->start, eb->end, (eb->end - eb->start) >> 20);
+ printk(KERN_INFO "Faking node %d at [mem %#018Lx-%#018Lx] (%LuMB)\n",
+ nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20);
return 0;
}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index f6ff57b7efa5..3d68ef6d2266 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -158,31 +158,47 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
return req_type;
}
+struct pagerange_state {
+ unsigned long cur_pfn;
+ int ram;
+ int not_ram;
+};
+
+static int
+pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
+{
+ struct pagerange_state *state = arg;
+
+ state->not_ram |= initial_pfn > state->cur_pfn;
+ state->ram |= total_nr_pages > 0;
+ state->cur_pfn = initial_pfn + total_nr_pages;
+
+ return state->ram && state->not_ram;
+}
+
static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
{
- int ram_page = 0, not_rampage = 0;
- unsigned long page_nr;
+ int ret = 0;
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ struct pagerange_state state = {start_pfn, 0, 0};
- for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
- ++page_nr) {
- /*
- * For legacy reasons, physical address range in the legacy ISA
- * region is tracked as non-RAM. This will allow users of
- * /dev/mem to map portions of legacy ISA region, even when
- * some of those portions are listed(or not even listed) with
- * different e820 types(RAM/reserved/..)
- */
- if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
- page_is_ram(page_nr))
- ram_page = 1;
- else
- not_rampage = 1;
-
- if (ram_page == not_rampage)
- return -1;
+ /*
+ * For legacy reasons, physical address range in the legacy ISA
+ * region is tracked as non-RAM. This will allow users of
+ * /dev/mem to map portions of legacy ISA region, even when
+ * some of those portions are listed(or not even listed) with
+ * different e820 types(RAM/reserved/..)
+ */
+ if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
+ start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
+
+ if (start_pfn < end_pfn) {
+ ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
+ &state, pagerange_is_ram_callback);
}
- return ram_page;
+ return (ret > 0) ? -1 : (state.ram ? 1 : 0);
}
/*
@@ -209,9 +225,8 @@ static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
page = pfn_to_page(pfn);
type = get_page_memtype(page);
if (type != -1) {
- printk(KERN_INFO "reserve_ram_pages_type failed "
- "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
- start, end, type, req_type);
+ printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n",
+ start, end - 1, type, req_type);
if (new_type)
*new_type = type;
@@ -314,9 +329,9 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
err = rbt_memtype_check_insert(new, new_type);
if (err) {
- printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
- "track %s, req %s\n",
- start, end, cattr_name(new->type), cattr_name(req_type));
+ printk(KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
+ start, end - 1,
+ cattr_name(new->type), cattr_name(req_type));
kfree(new);
spin_unlock(&memtype_lock);
@@ -325,8 +340,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
spin_unlock(&memtype_lock);
- dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
- start, end, cattr_name(new->type), cattr_name(req_type),
+ dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
+ start, end - 1, cattr_name(new->type), cattr_name(req_type),
new_type ? cattr_name(*new_type) : "-");
return err;
@@ -360,14 +375,14 @@ int free_memtype(u64 start, u64 end)
spin_unlock(&memtype_lock);
if (!entry) {
- printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
- current->comm, current->pid, start, end);
+ printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
+ current->comm, current->pid, start, end - 1);
return -EINVAL;
}
kfree(entry);
- dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
+ dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
return 0;
}
@@ -491,9 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
while (cursor < to) {
if (!devmem_is_allowed(pfn)) {
- printk(KERN_INFO
- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
- current->comm, from, to);
+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
+ current->comm, from, to - 1);
return 0;
}
cursor += PAGE_SIZE;
@@ -554,12 +568,11 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
size;
if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
- printk(KERN_INFO
- "%s:%d ioremap_change_attr failed %s "
- "for %Lx-%Lx\n",
+ printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
+ "for [mem %#010Lx-%#010Lx]\n",
current->comm, current->pid,
cattr_name(flags),
- base, (unsigned long long)(base + size));
+ base, (unsigned long long)(base + size-1));
return -EINVAL;
}
return 0;
@@ -591,12 +604,11 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
flags = lookup_memtype(paddr);
if (want_flags != flags) {
- printk(KERN_WARNING
- "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
+ printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
current->comm, current->pid,
cattr_name(want_flags),
(unsigned long long)paddr,
- (unsigned long long)(paddr + size),
+ (unsigned long long)(paddr + size - 1),
cattr_name(flags));
*vma_prot = __pgprot((pgprot_val(*vma_prot) &
(~_PAGE_CACHE_MASK)) |
@@ -614,11 +626,11 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
!is_new_memtype_allowed(paddr, size, want_flags, flags)) {
free_memtype(paddr, paddr + size);
printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
- " for %Lx-%Lx, got %s\n",
+ " for [mem %#010Lx-%#010Lx], got %s\n",
current->comm, current->pid,
cattr_name(want_flags),
(unsigned long long)paddr,
- (unsigned long long)(paddr + size),
+ (unsigned long long)(paddr + size - 1),
cattr_name(flags));
return -EINVAL;
}
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index efb5b4b93711..732af3a96183 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -176,8 +176,9 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
return;
}
- printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm,
- start, end);
+ printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
+ node, pxm,
+ (unsigned long long) start, (unsigned long long) end - 1);
}
void __init acpi_numa_arch_fixup(void) {}
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 5dd467bd6121..af8a224db216 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -6,6 +6,7 @@
#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/vgaarb.h>
#include <asm/pci_x86.h>
static void __devinit pci_fixup_i450nx(struct pci_dev *d)
@@ -348,6 +349,8 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev)
if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n");
+ if (!vga_default_device())
+ vga_set_default_device(pdev);
}
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 7415aa927913..56ab74989cf1 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -64,6 +64,10 @@ static int xen_register_pirq(u32 gsi, int gsi_override, int triggering,
int shareable = 0;
char *name;
+ irq = xen_irq_from_gsi(gsi);
+ if (irq > 0)
+ return irq;
+
if (set_pirq)
pirq = gsi;
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
new file mode 100644
index 000000000000..94f7fbe97b08
--- /dev/null
+++ b/arch/x86/realmode/Makefile
@@ -0,0 +1,18 @@
+#
+# arch/x86/realmode/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+#
+
+subdir- := rm
+
+obj-y += init.o
+obj-y += rmpiggy.o
+
+$(obj)/rmpiggy.o: $(obj)/rm/realmode.bin
+
+$(obj)/rm/realmode.bin: FORCE
+ $(Q)$(MAKE) $(build)=$(obj)/rm $@
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
new file mode 100644
index 000000000000..cbca565af5bd
--- /dev/null
+++ b/arch/x86/realmode/init.c
@@ -0,0 +1,115 @@
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include <asm/realmode.h>
+
+struct real_mode_header *real_mode_header;
+u32 *trampoline_cr4_features;
+
+void __init setup_real_mode(void)
+{
+ phys_addr_t mem;
+ u16 real_mode_seg;
+ u32 *rel;
+ u32 count;
+ u32 *ptr;
+ u16 *seg;
+ int i;
+ unsigned char *base;
+ struct trampoline_header *trampoline_header;
+ size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
+#ifdef CONFIG_X86_64
+ u64 *trampoline_pgd;
+ u64 efer;
+#endif
+
+ /* Has to be in very low memory so we can execute real-mode AP code. */
+ mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
+ if (!mem)
+ panic("Cannot allocate trampoline\n");
+
+ base = __va(mem);
+ memblock_reserve(mem, size);
+ real_mode_header = (struct real_mode_header *) base;
+ printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
+ base, (unsigned long long)mem, size);
+
+ memcpy(base, real_mode_blob, size);
+
+ real_mode_seg = __pa(base) >> 4;
+ rel = (u32 *) real_mode_relocs;
+
+ /* 16-bit segment relocations. */
+ count = rel[0];
+ rel = &rel[1];
+ for (i = 0; i < count; i++) {
+ seg = (u16 *) (base + rel[i]);
+ *seg = real_mode_seg;
+ }
+
+ /* 32-bit linear relocations. */
+ count = rel[i];
+ rel = &rel[i + 1];
+ for (i = 0; i < count; i++) {
+ ptr = (u32 *) (base + rel[i]);
+ *ptr += __pa(base);
+ }
+
+ /* Must be perfomed *after* relocation. */
+ trampoline_header = (struct trampoline_header *)
+ __va(real_mode_header->trampoline_header);
+
+#ifdef CONFIG_X86_32
+ trampoline_header->start = __pa(startup_32_smp);
+ trampoline_header->gdt_limit = __BOOT_DS + 7;
+ trampoline_header->gdt_base = __pa(boot_gdt);
+#else
+ /*
+ * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
+ * so we need to mask it out.
+ */
+ rdmsrl(MSR_EFER, efer);
+ trampoline_header->efer = efer & ~EFER_LMA;
+
+ trampoline_header->start = (u64) secondary_startup_64;
+ trampoline_cr4_features = &trampoline_header->cr4;
+ *trampoline_cr4_features = read_cr4();
+
+ trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
+ trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE;
+ trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE;
+#endif
+}
+
+/*
+ * set_real_mode_permissions() gets called very early, to guarantee the
+ * availability of low memory. This is before the proper kernel page
+ * tables are set up, so we cannot set page permissions in that
+ * function. Thus, we use an arch_initcall instead.
+ */
+static int __init set_real_mode_permissions(void)
+{
+ unsigned char *base = (unsigned char *) real_mode_header;
+ size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
+
+ size_t ro_size =
+ PAGE_ALIGN(real_mode_header->ro_end) -
+ __pa(base);
+
+ size_t text_size =
+ PAGE_ALIGN(real_mode_header->ro_end) -
+ real_mode_header->text_start;
+
+ unsigned long text_start =
+ (unsigned long) __va(real_mode_header->text_start);
+
+ set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
+ set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
+ set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
+
+ return 0;
+}
+
+arch_initcall(set_real_mode_permissions);
diff --git a/arch/x86/realmode/rm/.gitignore b/arch/x86/realmode/rm/.gitignore
new file mode 100644
index 000000000000..b6ed3a2555cb
--- /dev/null
+++ b/arch/x86/realmode/rm/.gitignore
@@ -0,0 +1,3 @@
+pasyms.h
+realmode.lds
+realmode.relocs
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
new file mode 100644
index 000000000000..5b84a2d30888
--- /dev/null
+++ b/arch/x86/realmode/rm/Makefile
@@ -0,0 +1,82 @@
+#
+# arch/x86/realmode/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+#
+
+always := realmode.bin realmode.relocs
+
+wakeup-objs := wakeup_asm.o wakemain.o video-mode.o
+wakeup-objs += copy.o bioscall.o regs.o
+# The link order of the video-*.o modules can matter. In particular,
+# video-vga.o *must* be listed first, followed by video-vesa.o.
+# Hardware-specific drivers should follow in the order they should be
+# probed, and video-bios.o should typically be last.
+wakeup-objs += video-vga.o
+wakeup-objs += video-vesa.o
+wakeup-objs += video-bios.o
+
+realmode-y += header.o
+realmode-y += trampoline_$(BITS).o
+realmode-y += stack.o
+realmode-$(CONFIG_X86_32) += reboot_32.o
+realmode-$(CONFIG_ACPI_SLEEP) += $(wakeup-objs)
+
+targets += $(realmode-y)
+
+REALMODE_OBJS = $(addprefix $(obj)/,$(realmode-y))
+
+sed-pasyms := -n -r -e 's/^([0-9a-fA-F]+) [ABCDGRSTVW] (.+)$$/pa_\2 = \2;/p'
+
+quiet_cmd_pasyms = PASYMS $@
+ cmd_pasyms = $(NM) $(filter-out FORCE,$^) | \
+ sed $(sed-pasyms) | sort | uniq > $@
+
+targets += pasyms.h
+$(obj)/pasyms.h: $(REALMODE_OBJS) FORCE
+ $(call if_changed,pasyms)
+
+targets += realmode.lds
+$(obj)/realmode.lds: $(obj)/pasyms.h
+
+LDFLAGS_realmode.elf := --emit-relocs -T
+CPPFLAGS_realmode.lds += -P -C -I$(obj)
+
+targets += realmode.elf
+$(obj)/realmode.elf: $(obj)/realmode.lds $(REALMODE_OBJS) FORCE
+ $(call if_changed,ld)
+
+OBJCOPYFLAGS_realmode.bin := -O binary
+
+targets += realmode.bin
+$(obj)/realmode.bin: $(obj)/realmode.elf $(obj)/realmode.relocs
+ $(call if_changed,objcopy)
+
+quiet_cmd_relocs = RELOCS $@
+ cmd_relocs = arch/x86/tools/relocs --realmode $< > $@
+
+targets += realmode.relocs
+$(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
+ $(call if_changed,relocs)
+
+# ---------------------------------------------------------------------------
+
+# How to compile the 16-bit code. Note we always compile for -march=i386,
+# that way we can complain to the user if the CPU is insufficient.
+KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
+ -I$(srctree)/arch/x86/boot \
+ -DDISABLE_BRANCH_PROFILING \
+ -Wall -Wstrict-prototypes \
+ -march=i386 -mregparm=3 \
+ -include $(srctree)/$(src)/../../boot/code16gcc.h \
+ -fno-strict-aliasing -fomit-frame-pointer \
+ $(call cc-option, -ffreestanding) \
+ $(call cc-option, -fno-toplevel-reorder,\
+ $(call cc-option, -fno-unit-at-a-time)) \
+ $(call cc-option, -fno-stack-protector) \
+ $(call cc-option, -mpreferred-stack-boundary=2)
+KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+GCOV_PROFILE := n
diff --git a/arch/x86/realmode/rm/bioscall.S b/arch/x86/realmode/rm/bioscall.S
new file mode 100644
index 000000000000..16162d197918
--- /dev/null
+++ b/arch/x86/realmode/rm/bioscall.S
@@ -0,0 +1 @@
+#include "../../boot/bioscall.S"
diff --git a/arch/x86/realmode/rm/copy.S b/arch/x86/realmode/rm/copy.S
new file mode 100644
index 000000000000..b785e6f38fdd
--- /dev/null
+++ b/arch/x86/realmode/rm/copy.S
@@ -0,0 +1 @@
+#include "../../boot/copy.S"
diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
new file mode 100644
index 000000000000..fadf48378ada
--- /dev/null
+++ b/arch/x86/realmode/rm/header.S
@@ -0,0 +1,41 @@
+/*
+ * Real-mode blob header; this should match realmode.h and be
+ * readonly; for mutable data instead add pointers into the .data
+ * or .bss sections as appropriate.
+ */
+
+#include <linux/linkage.h>
+#include <asm/page_types.h>
+
+#include "realmode.h"
+
+ .section ".header", "a"
+
+ .balign 16
+GLOBAL(real_mode_header)
+ .long pa_text_start
+ .long pa_ro_end
+ /* SMP trampoline */
+ .long pa_trampoline_start
+ .long pa_trampoline_status
+ .long pa_trampoline_header
+#ifdef CONFIG_X86_64
+ .long pa_trampoline_pgd;
+#endif
+ /* ACPI S3 wakeup */
+#ifdef CONFIG_ACPI_SLEEP
+ .long pa_wakeup_start
+ .long pa_wakeup_header
+#endif
+ /* APM/BIOS reboot */
+#ifdef CONFIG_X86_32
+ .long pa_machine_real_restart_asm
+#endif
+END(real_mode_header)
+
+ /* End signature, used to verify integrity */
+ .section ".signature","a"
+ .balign 4
+GLOBAL(end_signature)
+ .long REALMODE_END_SIGNATURE
+END(end_signature)
diff --git a/arch/x86/realmode/rm/realmode.h b/arch/x86/realmode/rm/realmode.h
new file mode 100644
index 000000000000..d74cff6350ed
--- /dev/null
+++ b/arch/x86/realmode/rm/realmode.h
@@ -0,0 +1,21 @@
+#ifndef ARCH_X86_REALMODE_RM_REALMODE_H
+#define ARCH_X86_REALMODE_RM_REALMODE_H
+
+#ifdef __ASSEMBLY__
+
+/*
+ * 16-bit ljmpw to the real_mode_seg
+ *
+ * This must be open-coded since gas will choke on using a
+ * relocatable symbol for the segment portion.
+ */
+#define LJMPW_RM(to) .byte 0xea ; .word (to), real_mode_seg
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Signature at the end of the realmode region
+ */
+#define REALMODE_END_SIGNATURE 0x65a22c82
+
+#endif /* ARCH_X86_REALMODE_RM_REALMODE_H */
diff --git a/arch/x86/realmode/rm/realmode.lds.S b/arch/x86/realmode/rm/realmode.lds.S
new file mode 100644
index 000000000000..86b2e8d6b1f1
--- /dev/null
+++ b/arch/x86/realmode/rm/realmode.lds.S
@@ -0,0 +1,76 @@
+/*
+ * realmode.lds.S
+ *
+ * Linker script for the real-mode code
+ */
+
+#include <asm/page_types.h>
+
+#undef i386
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+
+SECTIONS
+{
+ real_mode_seg = 0;
+
+ . = 0;
+ .header : {
+ pa_real_mode_base = .;
+ *(.header)
+ }
+
+ . = ALIGN(4);
+ .rodata : {
+ *(.rodata)
+ *(.rodata.*)
+ . = ALIGN(16);
+ video_cards = .;
+ *(.videocards)
+ video_cards_end = .;
+ }
+
+ . = ALIGN(PAGE_SIZE);
+ pa_text_start = .;
+ .text : {
+ *(.text)
+ *(.text.*)
+ }
+
+ .text32 : {
+ *(.text32)
+ *(.text32.*)
+ }
+
+ .text64 : {
+ *(.text64)
+ *(.text64.*)
+ }
+ pa_ro_end = .;
+
+ . = ALIGN(PAGE_SIZE);
+ .data : {
+ *(.data)
+ *(.data.*)
+ }
+
+ . = ALIGN(128);
+ .bss : {
+ *(.bss*)
+ }
+
+ /* End signature for integrity checking */
+ . = ALIGN(4);
+ .signature : {
+ *(.signature)
+ }
+
+ /DISCARD/ : {
+ *(.note*)
+ *(.debug*)
+ *(.eh_frame*)
+ }
+
+#include "pasyms.h"
+}
diff --git a/arch/x86/kernel/reboot_32.S b/arch/x86/realmode/rm/reboot_32.S
index 1d5c46df0d78..114044876b3d 100644
--- a/arch/x86/kernel/reboot_32.S
+++ b/arch/x86/realmode/rm/reboot_32.S
@@ -2,6 +2,7 @@
#include <linux/init.h>
#include <asm/segment.h>
#include <asm/page_types.h>
+#include "realmode.h"
/*
* The following code and data reboots the machine by switching to real
@@ -13,34 +14,20 @@
*
* This code is called with the restart type (0 = BIOS, 1 = APM) in %eax.
*/
- .section ".x86_trampoline","a"
- .balign 16
+ .section ".text32", "ax"
.code32
-ENTRY(machine_real_restart_asm)
-r_base = .
- /* Get our own relocated address */
- call 1f
-1: popl %ebx
- subl $(1b - r_base), %ebx
-
- /* Compute the equivalent real-mode segment */
- movl %ebx, %ecx
- shrl $4, %ecx
-
- /* Patch post-real-mode segment jump */
- movw (dispatch_table - r_base)(%ebx,%eax,2),%ax
- movw %ax, (101f - r_base)(%ebx)
- movw %cx, (102f - r_base)(%ebx)
+ .balign 16
+ENTRY(machine_real_restart_asm)
/* Set up the IDT for real mode. */
- lidtl (machine_real_restart_idt - r_base)(%ebx)
+ lidtl pa_machine_real_restart_idt
/*
* Set up a GDT from which we can load segment descriptors for real
* mode. The GDT is not used in real mode; it is just needed here to
* prepare the descriptors.
*/
- lgdtl (machine_real_restart_gdt - r_base)(%ebx)
+ lgdtl pa_machine_real_restart_gdt
/*
* Load the data segment registers with 16-bit compatible values
@@ -51,7 +38,7 @@ r_base = .
movl %ecx, %fs
movl %ecx, %gs
movl %ecx, %ss
- ljmpl $8, $1f - r_base
+ ljmpw $8, $1f
/*
* This is 16-bit protected mode code to disable paging and the cache,
@@ -76,27 +63,29 @@ r_base = .
*
* Most of this work is probably excessive, but it is what is tested.
*/
+ .text
.code16
+
+ .balign 16
+machine_real_restart_asm16:
1:
xorl %ecx, %ecx
- movl %cr0, %eax
- andl $0x00000011, %eax
- orl $0x60000000, %eax
- movl %eax, %cr0
+ movl %cr0, %edx
+ andl $0x00000011, %edx
+ orl $0x60000000, %edx
+ movl %edx, %cr0
movl %ecx, %cr3
movl %cr0, %edx
- andl $0x60000000, %edx /* If no cache bits -> no wbinvd */
+ testl $0x60000000, %edx /* If no cache bits -> no wbinvd */
jz 2f
wbinvd
2:
- andb $0x10, %al
- movl %eax, %cr0
- .byte 0xea /* ljmpw */
-101: .word 0 /* Offset */
-102: .word 0 /* Segment */
-
-bios:
- ljmpw $0xf000, $0xfff0
+ andb $0x10, %dl
+ movl %edx, %cr0
+ LJMPW_RM(3f)
+3:
+ andw %ax, %ax
+ jz bios
apm:
movw $0x1000, %ax
@@ -106,26 +95,34 @@ apm:
movw $0x0001, %bx
movw $0x0003, %cx
int $0x15
+ /* This should never return... */
-END(machine_real_restart_asm)
+bios:
+ ljmpw $0xf000, $0xfff0
- .balign 16
- /* These must match <asm/reboot.h */
-dispatch_table:
- .word bios - r_base
- .word apm - r_base
-END(dispatch_table)
+ .section ".rodata", "a"
- .balign 16
-machine_real_restart_idt:
+ .balign 16
+GLOBAL(machine_real_restart_idt)
.word 0xffff /* Length - real mode default value */
.long 0 /* Base - real mode default value */
END(machine_real_restart_idt)
- .balign 16
-ENTRY(machine_real_restart_gdt)
- .quad 0 /* Self-pointer, filled in by PM code */
- .quad 0 /* 16-bit code segment, filled in by PM code */
+ .balign 16
+GLOBAL(machine_real_restart_gdt)
+ /* Self-pointer */
+ .word 0xffff /* Length - real mode default value */
+ .long pa_machine_real_restart_gdt
+ .word 0
+
+ /*
+ * 16-bit code segment pointing to real_mode_seg
+ * Selector value 8
+ */
+ .word 0xffff /* Limit */
+ .long 0x9b000000 + pa_real_mode_base
+ .word 0
+
/*
* 16-bit data segment with the selector value 16 = 0x10 and
* base value 0x100; since this is consistent with real mode
diff --git a/arch/x86/realmode/rm/regs.c b/arch/x86/realmode/rm/regs.c
new file mode 100644
index 000000000000..fbb15b9f9ca9
--- /dev/null
+++ b/arch/x86/realmode/rm/regs.c
@@ -0,0 +1 @@
+#include "../../boot/regs.c"
diff --git a/arch/x86/realmode/rm/stack.S b/arch/x86/realmode/rm/stack.S
new file mode 100644
index 000000000000..867ae87adfae
--- /dev/null
+++ b/arch/x86/realmode/rm/stack.S
@@ -0,0 +1,19 @@
+/*
+ * Common heap and stack allocations
+ */
+
+#include <linux/linkage.h>
+
+ .data
+GLOBAL(HEAP)
+ .long rm_heap
+GLOBAL(heap_end)
+ .long rm_stack
+
+ .bss
+ .balign 16
+GLOBAL(rm_heap)
+ .space 2048
+GLOBAL(rm_stack)
+ .space 2048
+GLOBAL(rm_stack_end)
diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
new file mode 100644
index 000000000000..c1b2791183e7
--- /dev/null
+++ b/arch/x86/realmode/rm/trampoline_32.S
@@ -0,0 +1,74 @@
+/*
+ *
+ * Trampoline.S Derived from Setup.S by Linus Torvalds
+ *
+ * 4 Jan 1997 Michael Chastain: changed to gnu as.
+ *
+ * This is only used for booting secondary CPUs in SMP machine
+ *
+ * Entry: CS:IP point to the start of our code, we are
+ * in real mode with no stack, but the rest of the
+ * trampoline page to make our stack and everything else
+ * is a mystery.
+ *
+ * We jump into arch/x86/kernel/head_32.S.
+ *
+ * On entry to trampoline_start, the processor is in real mode
+ * with 16-bit addressing and 16-bit data. CS has some value
+ * and IP is zero. Thus, we load CS to the physical segment
+ * of the real mode code before doing anything further.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/segment.h>
+#include <asm/page_types.h>
+#include "realmode.h"
+
+ .text
+ .code16
+
+ .balign PAGE_SIZE
+ENTRY(trampoline_start)
+ wbinvd # Needed for NUMA-Q should be harmless for others
+
+ LJMPW_RM(1f)
+1:
+ mov %cs, %ax # Code and data in the same place
+ mov %ax, %ds
+
+ cli # We should be safe anyway
+
+ movl tr_start, %eax # where we need to go
+
+ movl $0xA5A5A5A5, trampoline_status
+ # write marker for master knows we're running
+
+ /*
+ * GDT tables in non default location kernel can be beyond 16MB and
+ * lgdt will not be able to load the address as in real mode default
+ * operand size is 16bit. Use lgdtl instead to force operand size
+ * to 32 bit.
+ */
+ lidtl tr_idt # load idt with 0, 0
+ lgdtl tr_gdt # load gdt with whatever is appropriate
+
+ movw $1, %dx # protected mode (PE) bit
+ lmsw %dx # into protected mode
+
+ ljmpl $__BOOT_CS, $pa_startup_32
+
+ .section ".text32","ax"
+ .code32
+ENTRY(startup_32) # note: also used from wakeup_asm.S
+ jmp *%eax
+
+ .bss
+ .balign 8
+GLOBAL(trampoline_header)
+ tr_start: .space 4
+ tr_gdt_pad: .space 2
+ tr_gdt: .space 6
+END(trampoline_header)
+
+#include "trampoline_common.S"
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
index 09ff51799e96..bb360dc39d21 100644
--- a/arch/x86/kernel/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -5,12 +5,12 @@
* 4 Jan 1997 Michael Chastain: changed to gnu as.
* 15 Sept 2005 Eric Biederman: 64bit PIC support
*
- * Entry: CS:IP point to the start of our code, we are
- * in real mode with no stack, but the rest of the
+ * Entry: CS:IP point to the start of our code, we are
+ * in real mode with no stack, but the rest of the
* trampoline page to make our stack and everything else
* is a mystery.
*
- * On entry to trampoline_data, the processor is in real mode
+ * On entry to trampoline_start, the processor is in real mode
* with 16-bit addressing and 16-bit data. CS has some value
* and IP is zero. Thus, data addresses need to be absolute
* (no relocation) and are taken with regard to r_base.
@@ -31,43 +31,33 @@
#include <asm/msr.h>
#include <asm/segment.h>
#include <asm/processor-flags.h>
+#include "realmode.h"
- .section ".x86_trampoline","a"
- .balign PAGE_SIZE
+ .text
.code16
-ENTRY(trampoline_data)
-r_base = .
+ .balign PAGE_SIZE
+ENTRY(trampoline_start)
cli # We should be safe anyway
wbinvd
+
+ LJMPW_RM(1f)
+1:
mov %cs, %ax # Code and data in the same place
mov %ax, %ds
mov %ax, %es
mov %ax, %ss
+ movl $0xA5A5A5A5, trampoline_status
+ # write marker for master knows we're running
- movl $0xA5A5A5A5, trampoline_status - r_base
- # write marker for master knows we're running
-
- # Setup stack
- movw $(trampoline_stack_end - r_base), %sp
+ # Setup stack
+ movl $rm_stack_end, %esp
call verify_cpu # Verify the cpu supports long mode
testl %eax, %eax # Check for return code
jnz no_longmode
- mov %cs, %ax
- movzx %ax, %esi # Find the 32bit trampoline location
- shll $4, %esi
-
- # Fixup the absolute vectors
- leal (startup_32 - r_base)(%esi), %eax
- movl %eax, startup_32_vector - r_base
- leal (startup_64 - r_base)(%esi), %eax
- movl %eax, startup_64_vector - r_base
- leal (tgdt - r_base)(%esi), %eax
- movl %eax, (tgdt + 2 - r_base)
-
/*
* GDT tables in non default location kernel can be beyond 16MB and
* lgdt will not be able to load the address as in real mode default
@@ -75,36 +65,49 @@ r_base = .
* to 32 bit.
*/
- lidtl tidt - r_base # load idt with 0, 0
- lgdtl tgdt - r_base # load gdt with whatever is appropriate
+ lidtl tr_idt # load idt with 0, 0
+ lgdtl tr_gdt # load gdt with whatever is appropriate
+
+ movw $__KERNEL_DS, %dx # Data segment descriptor
- mov $X86_CR0_PE, %ax # protected mode (PE) bit
- lmsw %ax # into protected mode
+ # Enable protected mode
+ movl $X86_CR0_PE, %eax # protected mode (PE) bit
+ movl %eax, %cr0 # into protected mode
# flush prefetch and jump to startup_32
- ljmpl *(startup_32_vector - r_base)
+ ljmpl $__KERNEL32_CS, $pa_startup_32
+no_longmode:
+ hlt
+ jmp no_longmode
+#include "../kernel/verify_cpu.S"
+
+ .section ".text32","ax"
.code32
.balign 4
-startup_32:
- movl $__KERNEL_DS, %eax # Initialize the %ds segment register
- movl %eax, %ds
-
- movl $X86_CR4_PAE, %eax
+ENTRY(startup_32)
+ movl %edx, %ss
+ addl $pa_real_mode_base, %esp
+ movl %edx, %ds
+ movl %edx, %es
+ movl %edx, %fs
+ movl %edx, %gs
+
+ movl pa_tr_cr4, %eax
movl %eax, %cr4 # Enable PAE mode
- # Setup trampoline 4 level pagetables
- leal (trampoline_level4_pgt - r_base)(%esi), %eax
+ # Setup trampoline 4 level pagetables
+ movl $pa_trampoline_pgd, %eax
movl %eax, %cr3
+ # Set up EFER
+ movl pa_tr_efer, %eax
+ movl pa_tr_efer + 4, %edx
movl $MSR_EFER, %ecx
- movl $(1 << _EFER_LME), %eax # Enable Long Mode
- xorl %edx, %edx
wrmsr
# Enable paging and in turn activate Long Mode
- # Enable protected mode
- movl $(X86_CR0_PG | X86_CR0_PE), %eax
+ movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
movl %eax, %cr0
/*
@@ -113,59 +116,38 @@ startup_32:
* EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
* the new gdt/idt that has __KERNEL_CS with CS.L = 1.
*/
- ljmp *(startup_64_vector - r_base)(%esi)
+ ljmpl $__KERNEL_CS, $pa_startup_64
+ .section ".text64","ax"
.code64
.balign 4
-startup_64:
+ENTRY(startup_64)
# Now jump into the kernel using virtual addresses
- movq $secondary_startup_64, %rax
- jmp *%rax
-
- .code16
-no_longmode:
- hlt
- jmp no_longmode
-#include "verify_cpu.S"
-
- .balign 4
- # Careful these need to be in the same 64K segment as the above;
-tidt:
- .word 0 # idt limit = 0
- .word 0, 0 # idt base = 0L
+ jmpq *tr_start(%rip)
+ .section ".rodata","a"
# Duplicate the global descriptor table
# so the kernel can live anywhere
- .balign 4
-tgdt:
- .short tgdt_end - tgdt # gdt limit
- .long tgdt - r_base
- .short 0
+ .balign 16
+ .globl tr_gdt
+tr_gdt:
+ .short tr_gdt_end - tr_gdt - 1 # gdt limit
+ .long pa_tr_gdt
+ .short 0
.quad 0x00cf9b000000ffff # __KERNEL32_CS
.quad 0x00af9b000000ffff # __KERNEL_CS
.quad 0x00cf93000000ffff # __KERNEL_DS
-tgdt_end:
+tr_gdt_end:
- .balign 4
-startup_32_vector:
- .long startup_32 - r_base
- .word __KERNEL32_CS, 0
+ .bss
+ .balign PAGE_SIZE
+GLOBAL(trampoline_pgd) .space PAGE_SIZE
- .balign 4
-startup_64_vector:
- .long startup_64 - r_base
- .word __KERNEL_CS, 0
+ .balign 8
+GLOBAL(trampoline_header)
+ tr_start: .space 8
+ GLOBAL(tr_efer) .space 8
+ GLOBAL(tr_cr4) .space 4
+END(trampoline_header)
- .balign 4
-ENTRY(trampoline_status)
- .long 0
-
-trampoline_stack:
- .org 0x1000
-trampoline_stack_end:
-ENTRY(trampoline_level4_pgt)
- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
- .fill 510,8,0
- .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
-
-ENTRY(trampoline_end)
+#include "trampoline_common.S"
diff --git a/arch/x86/realmode/rm/trampoline_common.S b/arch/x86/realmode/rm/trampoline_common.S
new file mode 100644
index 000000000000..b1ecdb9692ad
--- /dev/null
+++ b/arch/x86/realmode/rm/trampoline_common.S
@@ -0,0 +1,7 @@
+ .section ".rodata","a"
+ .balign 16
+tr_idt: .fill 1, 6, 0
+
+ .bss
+ .balign 4
+GLOBAL(trampoline_status) .space 4
diff --git a/arch/x86/realmode/rm/video-bios.c b/arch/x86/realmode/rm/video-bios.c
new file mode 100644
index 000000000000..848b25aaf11b
--- /dev/null
+++ b/arch/x86/realmode/rm/video-bios.c
@@ -0,0 +1 @@
+#include "../../boot/video-bios.c"
diff --git a/arch/x86/realmode/rm/video-mode.c b/arch/x86/realmode/rm/video-mode.c
new file mode 100644
index 000000000000..2a98b7e2368b
--- /dev/null
+++ b/arch/x86/realmode/rm/video-mode.c
@@ -0,0 +1 @@
+#include "../../boot/video-mode.c"
diff --git a/arch/x86/realmode/rm/video-vesa.c b/arch/x86/realmode/rm/video-vesa.c
new file mode 100644
index 000000000000..413edddb51e5
--- /dev/null
+++ b/arch/x86/realmode/rm/video-vesa.c
@@ -0,0 +1 @@
+#include "../../boot/video-vesa.c"
diff --git a/arch/x86/realmode/rm/video-vga.c b/arch/x86/realmode/rm/video-vga.c
new file mode 100644
index 000000000000..3085f5c9d288
--- /dev/null
+++ b/arch/x86/realmode/rm/video-vga.c
@@ -0,0 +1 @@
+#include "../../boot/video-vga.c"
diff --git a/arch/x86/kernel/acpi/realmode/wakemain.c b/arch/x86/realmode/rm/wakemain.c
index 883962d9eef2..91405d515ec6 100644
--- a/arch/x86/kernel/acpi/realmode/wakemain.c
+++ b/arch/x86/realmode/rm/wakemain.c
@@ -65,7 +65,8 @@ void main(void)
{
/* Kill machine if structures are wrong */
if (wakeup_header.real_magic != 0x12345678)
- while (1);
+ while (1)
+ ;
if (wakeup_header.realmode_flags & 4)
send_morse("...-");
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/realmode/rm/wakeup.h
index 97a29e1430e3..9317e0042f24 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.h
+++ b/arch/x86/realmode/rm/wakeup.h
@@ -12,9 +12,8 @@
/* This must match data at wakeup.S */
struct wakeup_header {
u16 video_mode; /* Video mode number */
- u16 _jmp1; /* ljmpl opcode, 32-bit only */
u32 pmode_entry; /* Protected mode resume point, 32-bit only */
- u16 _jmp2; /* CS value, 32-bit only */
+ u16 pmode_cs;
u32 pmode_cr0; /* Protected mode cr0 */
u32 pmode_cr3; /* Protected mode cr3 */
u32 pmode_cr4; /* Protected mode cr4 */
@@ -26,12 +25,6 @@ struct wakeup_header {
u32 pmode_behavior; /* Wakeup routine behavior flags */
u32 realmode_flags;
u32 real_magic;
- u16 trampoline_segment; /* segment with trampoline code, 64-bit only */
- u8 _pad1;
- u8 wakeup_jmp;
- u16 wakeup_jmp_off;
- u16 wakeup_jmp_seg;
- u64 wakeup_gdt[3];
u32 signature; /* To check we have correct structure */
} __attribute__((__packed__));
@@ -40,7 +33,6 @@ extern struct wakeup_header wakeup_header;
#define WAKEUP_HEADER_OFFSET 8
#define WAKEUP_HEADER_SIGNATURE 0x51ee1111
-#define WAKEUP_END_SIGNATURE 0x65a22c82
/* Wakeup behavior bits */
#define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE 0
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/realmode/rm/wakeup_asm.S
index b4fd836e4053..8905166b0bbb 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.S
+++ b/arch/x86/realmode/rm/wakeup_asm.S
@@ -1,50 +1,47 @@
/*
* ACPI wakeup real mode startup stub
*/
+#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/msr-index.h>
#include <asm/page_types.h>
#include <asm/pgtable_types.h>
#include <asm/processor-flags.h>
+#include "realmode.h"
#include "wakeup.h"
.code16
- .section ".jump", "ax"
- .globl _start
-_start:
- cli
- jmp wakeup_code
/* This should match the structure in wakeup.h */
- .section ".header", "a"
- .globl wakeup_header
-wakeup_header:
-video_mode: .short 0 /* Video mode number */
-pmode_return: .byte 0x66, 0xea /* ljmpl */
- .long 0 /* offset goes here */
- .short __KERNEL_CS
-pmode_cr0: .long 0 /* Saved %cr0 */
-pmode_cr3: .long 0 /* Saved %cr3 */
-pmode_cr4: .long 0 /* Saved %cr4 */
-pmode_efer: .quad 0 /* Saved EFER */
-pmode_gdt: .quad 0
-pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */
-pmode_behavior: .long 0 /* Wakeup behavior flags */
-realmode_flags: .long 0
-real_magic: .long 0
-trampoline_segment: .word 0
-_pad1: .byte 0
-wakeup_jmp: .byte 0xea /* ljmpw */
-wakeup_jmp_off: .word 3f
-wakeup_jmp_seg: .word 0
-wakeup_gdt: .quad 0, 0, 0
-signature: .long WAKEUP_HEADER_SIGNATURE
+ .section ".data", "aw"
+
+ .balign 16
+GLOBAL(wakeup_header)
+ video_mode: .short 0 /* Video mode number */
+ pmode_entry: .long 0
+ pmode_cs: .short __KERNEL_CS
+ pmode_cr0: .long 0 /* Saved %cr0 */
+ pmode_cr3: .long 0 /* Saved %cr3 */
+ pmode_cr4: .long 0 /* Saved %cr4 */
+ pmode_efer: .quad 0 /* Saved EFER */
+ pmode_gdt: .quad 0
+ pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */
+ pmode_behavior: .long 0 /* Wakeup behavior flags */
+ realmode_flags: .long 0
+ real_magic: .long 0
+ signature: .long WAKEUP_HEADER_SIGNATURE
+END(wakeup_header)
.text
.code16
-wakeup_code:
+
+ .balign 16
+ENTRY(wakeup_start)
+ cli
cld
+ LJMPW_RM(3f)
+3:
/* Apparently some dimwit BIOS programmers don't know how to
program a PM to RM transition, and we might end up here with
junk in the data segment descriptor registers. The only way
@@ -54,8 +51,7 @@ wakeup_code:
movl %cr0, %eax
orb $X86_CR0_PE, %al
movl %eax, %cr0
- jmp 1f
-1: ljmpw $8, $2f
+ ljmpw $8, $2f
2:
movw %cx, %ds
movw %cx, %es
@@ -65,16 +61,18 @@ wakeup_code:
andb $~X86_CR0_PE, %al
movl %eax, %cr0
- jmp wakeup_jmp
+ LJMPW_RM(3f)
3:
/* Set up segments */
movw %cs, %ax
+ movw %ax, %ss
+ movl $rm_stack_end, %esp
movw %ax, %ds
movw %ax, %es
- movw %ax, %ss
- lidtl wakeup_idt
+ movw %ax, %fs
+ movw %ax, %gs
- movl $wakeup_stack_end, %esp
+ lidtl wakeup_idt
/* Clear the EFLAGS */
pushl $0
@@ -87,7 +85,7 @@ wakeup_code:
/* Check we really have everything... */
movl end_signature, %eax
- cmpl $WAKEUP_END_SIGNATURE, %eax
+ cmpl $REALMODE_END_SIGNATURE, %eax
jne bogus_real_magic
/* Call the C code */
@@ -128,14 +126,13 @@ wakeup_code:
lgdtl pmode_gdt
/* This really couldn't... */
- movl pmode_cr0, %eax
- movl %eax, %cr0
- jmp pmode_return
+ movl pmode_entry, %eax
+ movl pmode_cr0, %ecx
+ movl %ecx, %cr0
+ ljmpl $__KERNEL_CS, $pa_startup_32
+ /* -> jmp *%eax in trampoline_32.S */
#else
- pushw $0
- pushw trampoline_segment
- pushw $0
- lret
+ jmp trampoline_start
#endif
bogus_real_magic:
@@ -143,28 +140,38 @@ bogus_real_magic:
hlt
jmp 1b
- .data
+ .section ".rodata","a"
+
+ /*
+ * Set up the wakeup GDT. We set these up as Big Real Mode,
+ * that is, with limits set to 4 GB. At least the Lenovo
+ * Thinkpad X61 is known to need this for the video BIOS
+ * initialization quirk to work; this is likely to also
+ * be the case for other laptops or integrated video devices.
+ */
+
+ .balign 16
+GLOBAL(wakeup_gdt)
+ .word 3*8-1 /* Self-descriptor */
+ .long pa_wakeup_gdt
+ .word 0
+
+ .word 0xffff /* 16-bit code segment @ real_mode_base */
+ .long 0x9b000000 + pa_real_mode_base
+ .word 0x008f /* big real mode */
+
+ .word 0xffff /* 16-bit data segment @ real_mode_base */
+ .long 0x93000000 + pa_real_mode_base
+ .word 0x008f /* big real mode */
+END(wakeup_gdt)
+
+ .section ".rodata","a"
.balign 8
/* This is the standard real-mode IDT */
-wakeup_idt:
+ .balign 16
+GLOBAL(wakeup_idt)
.word 0xffff /* limit */
.long 0 /* address */
.word 0
-
- .globl HEAP, heap_end
-HEAP:
- .long wakeup_heap
-heap_end:
- .long wakeup_stack
-
- .bss
-wakeup_heap:
- .space 2048
-wakeup_stack:
- .space 2048
-wakeup_stack_end:
-
- .section ".signature","a"
-end_signature:
- .long WAKEUP_END_SIGNATURE
+END(wakeup_idt)
diff --git a/arch/x86/realmode/rmpiggy.S b/arch/x86/realmode/rmpiggy.S
new file mode 100644
index 000000000000..204c6ece0e97
--- /dev/null
+++ b/arch/x86/realmode/rmpiggy.S
@@ -0,0 +1,20 @@
+/*
+ * Wrapper script for the realmode binary as a transport object
+ * before copying to low memory.
+ */
+#include <linux/linkage.h>
+#include <asm/page_types.h>
+
+ .section ".init.data","aw"
+
+ .balign PAGE_SIZE
+
+GLOBAL(real_mode_blob)
+ .incbin "arch/x86/realmode/rm/realmode.bin"
+END(real_mode_blob)
+
+GLOBAL(real_mode_blob_end);
+
+GLOBAL(real_mode_relocs)
+ .incbin "arch/x86/realmode/rm/realmode.relocs"
+END(real_mode_relocs)
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index 29f9f0554f7d..7a35a6e71d44 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -355,3 +355,4 @@
346 i386 setns sys_setns
347 i386 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+349 i386 kcmp sys_kcmp
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index dd29a9ea27c5..51171aeff0dc 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -318,6 +318,8 @@
309 common getcpu sys_getcpu
310 64 process_vm_readv sys_process_vm_readv
311 64 process_vm_writev sys_process_vm_writev
+312 64 kcmp sys_kcmp
+
#
# x32-specific system call numbers start at 512 to avoid cache impact
# for native 64-bit operation.
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index b43cfcd9bf40..5a1847d61930 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -60,12 +60,31 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"__x86_cpu_dev_(start|end)|"
"(__parainstructions|__alt_instructions)(|_end)|"
"(__iommu_table|__apicdrivers|__smp_locks)(|_end)|"
+ "__(start|end)_pci_.*|"
+ "__(start|end)_builtin_fw|"
+ "__(start|stop)___ksymtab(|_gpl|_unused|_unused_gpl|_gpl_future)|"
+ "__(start|stop)___kcrctab(|_gpl|_unused|_unused_gpl|_gpl_future)|"
+ "__(start|stop)___param|"
+ "__(start|stop)___modver|"
+ "__(start|stop)___bug_table|"
+ "__tracedata_(start|end)|"
+ "__(start|stop)_notes|"
+ "__end_rodata|"
+ "__initramfs_start|"
+ "(jiffies|jiffies_64)|"
"_end)$"
};
static const char * const sym_regex_realmode[S_NSYMTYPES] = {
/*
+ * These symbols are known to be relative, even if the linker marks them
+ * as absolute (typically defined outside any section in the linker script.)
+ */
+ [S_REL] =
+ "^pa_",
+
+/*
* These are 16-bit segment symbols when compiling 16-bit code.
*/
[S_SEG] =
diff --git a/arch/x86/video/fbdev.c b/arch/x86/video/fbdev.c
index c5ffb6ac8707..d5644bbe8cba 100644
--- a/arch/x86/video/fbdev.c
+++ b/arch/x86/video/fbdev.c
@@ -9,24 +9,34 @@
#include <linux/fb.h>
#include <linux/pci.h>
#include <linux/module.h>
+#include <linux/vgaarb.h>
int fb_is_primary_device(struct fb_info *info)
{
struct device *device = info->device;
struct pci_dev *pci_dev = NULL;
+ struct pci_dev *default_device = vga_default_device();
struct resource *res = NULL;
- int retval = 0;
if (device)
pci_dev = to_pci_dev(device);
- if (pci_dev)
- res = &pci_dev->resource[PCI_ROM_RESOURCE];
+ if (!pci_dev)
+ return 0;
+
+ if (default_device) {
+ if (pci_dev == default_device)
+ return 1;
+ else
+ return 0;
+ }
+
+ res = &pci_dev->resource[PCI_ROM_RESOURCE];
if (res && res->flags & IORESOURCE_ROM_SHADOW)
- retval = 1;
+ return 1;
- return retval;
+ return 0;
}
EXPORT_SYMBOL(fb_is_primary_device);
MODULE_LICENSE("GPL");
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c
index ef1db1900d86..c8377fb26cdf 100644
--- a/arch/x86/xen/debugfs.c
+++ b/arch/x86/xen/debugfs.c
@@ -19,107 +19,3 @@ struct dentry * __init xen_init_debugfs(void)
return d_xen_debug;
}
-struct array_data
-{
- void *array;
- unsigned elements;
-};
-
-static int u32_array_open(struct inode *inode, struct file *file)
-{
- file->private_data = NULL;
- return nonseekable_open(inode, file);
-}
-
-static size_t format_array(char *buf, size_t bufsize, const char *fmt,
- u32 *array, unsigned array_size)
-{
- size_t ret = 0;
- unsigned i;
-
- for(i = 0; i < array_size; i++) {
- size_t len;
-
- len = snprintf(buf, bufsize, fmt, array[i]);
- len++; /* ' ' or '\n' */
- ret += len;
-
- if (buf) {
- buf += len;
- bufsize -= len;
- buf[-1] = (i == array_size-1) ? '\n' : ' ';
- }
- }
-
- ret++; /* \0 */
- if (buf)
- *buf = '\0';
-
- return ret;
-}
-
-static char *format_array_alloc(const char *fmt, u32 *array, unsigned array_size)
-{
- size_t len = format_array(NULL, 0, fmt, array, array_size);
- char *ret;
-
- ret = kmalloc(len, GFP_KERNEL);
- if (ret == NULL)
- return NULL;
-
- format_array(ret, len, fmt, array, array_size);
- return ret;
-}
-
-static ssize_t u32_array_read(struct file *file, char __user *buf, size_t len,
- loff_t *ppos)
-{
- struct inode *inode = file->f_path.dentry->d_inode;
- struct array_data *data = inode->i_private;
- size_t size;
-
- if (*ppos == 0) {
- if (file->private_data) {
- kfree(file->private_data);
- file->private_data = NULL;
- }
-
- file->private_data = format_array_alloc("%u", data->array, data->elements);
- }
-
- size = 0;
- if (file->private_data)
- size = strlen(file->private_data);
-
- return simple_read_from_buffer(buf, len, ppos, file->private_data, size);
-}
-
-static int xen_array_release(struct inode *inode, struct file *file)
-{
- kfree(file->private_data);
-
- return 0;
-}
-
-static const struct file_operations u32_array_fops = {
- .owner = THIS_MODULE,
- .open = u32_array_open,
- .release= xen_array_release,
- .read = u32_array_read,
- .llseek = no_llseek,
-};
-
-struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode,
- struct dentry *parent,
- u32 *array, unsigned elements)
-{
- struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
-
- if (data == NULL)
- return NULL;
-
- data->array = array;
- data->elements = elements;
-
- return debugfs_create_file(name, mode, parent, data, &u32_array_fops);
-}
diff --git a/arch/x86/xen/debugfs.h b/arch/x86/xen/debugfs.h
index 78d25499be5b..12ebf3325c7b 100644
--- a/arch/x86/xen/debugfs.h
+++ b/arch/x86/xen/debugfs.h
@@ -3,8 +3,4 @@
struct dentry * __init xen_init_debugfs(void);
-struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode,
- struct dentry *parent,
- u32 *array, unsigned elements);
-
#endif /* _XEN_DEBUGFS_H */
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c0f5facdb10c..e74df9548a02 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -42,6 +42,7 @@
#include <xen/page.h>
#include <xen/hvm.h>
#include <xen/hvc-console.h>
+#include <xen/acpi.h>
#include <asm/paravirt.h>
#include <asm/apic.h>
@@ -75,6 +76,7 @@
#include "xen-ops.h"
#include "mmu.h"
+#include "smp.h"
#include "multicalls.h"
EXPORT_SYMBOL_GPL(hypercall_page);
@@ -883,6 +885,14 @@ static void set_xen_basic_apic_ops(void)
apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
apic->set_apic_id = xen_set_apic_id;
apic->get_apic_id = xen_get_apic_id;
+
+#ifdef CONFIG_SMP
+ apic->send_IPI_allbutself = xen_send_IPI_allbutself;
+ apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
+ apic->send_IPI_mask = xen_send_IPI_mask;
+ apic->send_IPI_all = xen_send_IPI_all;
+ apic->send_IPI_self = xen_send_IPI_self;
+#endif
}
#endif
@@ -1106,7 +1116,10 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.wbinvd = native_wbinvd,
.read_msr = native_read_msr_safe,
+ .rdmsr_regs = native_rdmsr_safe_regs,
.write_msr = xen_write_msr_safe,
+ .wrmsr_regs = native_wrmsr_safe_regs,
+
.read_tsc = native_read_tsc,
.read_pmc = native_read_pmc,
@@ -1340,7 +1353,6 @@ asmlinkage void __init xen_start_kernel(void)
xen_raw_console_write("mapping kernel into physical memory\n");
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
- xen_ident_map_ISA();
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();
@@ -1400,6 +1412,8 @@ asmlinkage void __init xen_start_kernel(void)
/* Make sure ACS will be enabled */
pci_request_acs();
+
+ xen_acpi_sleep_register();
}
#ifdef CONFIG_PCI
/* PCI BIOS service won't work from a PV guest. */
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3506cd4f9a43..3a73785631ce 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1933,29 +1933,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
#endif
}
-void __init xen_ident_map_ISA(void)
-{
- unsigned long pa;
-
- /*
- * If we're dom0, then linear map the ISA machine addresses into
- * the kernel's address space.
- */
- if (!xen_initial_domain())
- return;
-
- xen_raw_printk("Xen: setup ISA identity maps\n");
-
- for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
- pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
-
- if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
- BUG();
- }
-
- xen_flush_tlb();
-}
-
static void __init xen_post_allocator_init(void)
{
pv_mmu_ops.set_pte = xen_set_pte;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 1b267e75158d..ffd08c414e91 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -499,16 +499,18 @@ static bool alloc_p2m(unsigned long pfn)
return true;
}
-static bool __init __early_alloc_p2m(unsigned long pfn)
+static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary)
{
unsigned topidx, mididx, idx;
+ unsigned long *p2m;
+ unsigned long *mid_mfn_p;
topidx = p2m_top_index(pfn);
mididx = p2m_mid_index(pfn);
idx = p2m_index(pfn);
/* Pfff.. No boundary cross-over, lets get out. */
- if (!idx)
+ if (!idx && check_boundary)
return false;
WARN(p2m_top[topidx][mididx] == p2m_identity,
@@ -522,24 +524,66 @@ static bool __init __early_alloc_p2m(unsigned long pfn)
return false;
/* Boundary cross-over for the edges: */
- if (idx) {
- unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
- unsigned long *mid_mfn_p;
+ p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_init(p2m);
+ p2m_init(p2m);
- p2m_top[topidx][mididx] = p2m;
+ p2m_top[topidx][mididx] = p2m;
- /* For save/restore we need to MFN of the P2M saved */
-
- mid_mfn_p = p2m_top_mfn_p[topidx];
- WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
- "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
- topidx, mididx);
- mid_mfn_p[mididx] = virt_to_mfn(p2m);
+ /* For save/restore we need to MFN of the P2M saved */
+
+ mid_mfn_p = p2m_top_mfn_p[topidx];
+ WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
+ "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
+ topidx, mididx);
+ mid_mfn_p[mididx] = virt_to_mfn(p2m);
+
+ return true;
+}
+
+static bool __init early_alloc_p2m(unsigned long pfn)
+{
+ unsigned topidx = p2m_top_index(pfn);
+ unsigned long *mid_mfn_p;
+ unsigned long **mid;
+
+ mid = p2m_top[topidx];
+ mid_mfn_p = p2m_top_mfn_p[topidx];
+ if (mid == p2m_mid_missing) {
+ mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
+
+ p2m_mid_init(mid);
+
+ p2m_top[topidx] = mid;
+ BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
}
- return idx != 0;
+ /* And the save/restore P2M tables.. */
+ if (mid_mfn_p == p2m_mid_missing_mfn) {
+ mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_mid_mfn_init(mid_mfn_p);
+
+ p2m_top_mfn_p[topidx] = mid_mfn_p;
+ p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
+ /* Note: we don't set mid_mfn_p[midix] here,
+ * look in early_alloc_p2m_middle */
+ }
+ return true;
+}
+bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+ if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
+ if (!early_alloc_p2m(pfn))
+ return false;
+
+ if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
+ return false;
+
+ if (!__set_phys_to_machine(pfn, mfn))
+ return false;
+ }
+
+ return true;
}
unsigned long __init set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e)
@@ -559,35 +603,11 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE));
pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
{
- unsigned topidx = p2m_top_index(pfn);
- unsigned long *mid_mfn_p;
- unsigned long **mid;
-
- mid = p2m_top[topidx];
- mid_mfn_p = p2m_top_mfn_p[topidx];
- if (mid == p2m_mid_missing) {
- mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
-
- p2m_mid_init(mid);
-
- p2m_top[topidx] = mid;
-
- BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
- }
- /* And the save/restore P2M tables.. */
- if (mid_mfn_p == p2m_mid_missing_mfn) {
- mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_mid_mfn_init(mid_mfn_p);
-
- p2m_top_mfn_p[topidx] = mid_mfn_p;
- p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
- /* Note: we don't set mid_mfn_p[midix] here,
- * look in __early_alloc_p2m */
- }
+ WARN_ON(!early_alloc_p2m(pfn));
}
- __early_alloc_p2m(pfn_s);
- __early_alloc_p2m(pfn_e);
+ early_alloc_p2m_middle(pfn_s, true);
+ early_alloc_p2m_middle(pfn_e, true);
for (pfn = pfn_s; pfn < pfn_e; pfn++)
if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 1ba8dff26753..3ebba0753d38 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -26,7 +26,6 @@
#include <xen/interface/memory.h>
#include <xen/interface/physdev.h>
#include <xen/features.h>
-
#include "xen-ops.h"
#include "vdso.h"
@@ -84,8 +83,8 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
-static unsigned long __init xen_release_chunk(unsigned long start,
- unsigned long end)
+static unsigned long __init xen_do_chunk(unsigned long start,
+ unsigned long end, bool release)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
@@ -96,30 +95,138 @@ static unsigned long __init xen_release_chunk(unsigned long start,
unsigned long pfn;
int ret;
- for(pfn = start; pfn < end; pfn++) {
+ for (pfn = start; pfn < end; pfn++) {
+ unsigned long frame;
unsigned long mfn = pfn_to_mfn(pfn);
- /* Make sure pfn exists to start with */
- if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
- continue;
-
- set_xen_guest_handle(reservation.extent_start, &mfn);
+ if (release) {
+ /* Make sure pfn exists to start with */
+ if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
+ continue;
+ frame = mfn;
+ } else {
+ if (mfn != INVALID_P2M_ENTRY)
+ continue;
+ frame = pfn;
+ }
+ set_xen_guest_handle(reservation.extent_start, &frame);
reservation.nr_extents = 1;
- ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+ ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
&reservation);
- WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
+ WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
+ release ? "release" : "populate", pfn, ret);
+
if (ret == 1) {
- __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
+ if (release)
+ break;
+ set_xen_guest_handle(reservation.extent_start, &frame);
+ reservation.nr_extents = 1;
+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+ &reservation);
+ break;
+ }
len++;
- }
+ } else
+ break;
}
- printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n",
- start, end, len);
+ if (len)
+ printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
+ release ? "Freeing" : "Populating",
+ start, end, len,
+ release ? "freed" : "added");
return len;
}
+static unsigned long __init xen_release_chunk(unsigned long start,
+ unsigned long end)
+{
+ return xen_do_chunk(start, end, true);
+}
+
+static unsigned long __init xen_populate_chunk(
+ const struct e820entry *list, size_t map_size,
+ unsigned long max_pfn, unsigned long *last_pfn,
+ unsigned long credits_left)
+{
+ const struct e820entry *entry;
+ unsigned int i;
+ unsigned long done = 0;
+ unsigned long dest_pfn;
+
+ for (i = 0, entry = list; i < map_size; i++, entry++) {
+ unsigned long credits = credits_left;
+ unsigned long s_pfn;
+ unsigned long e_pfn;
+ unsigned long pfns;
+ long capacity;
+
+ if (credits <= 0)
+ break;
+
+ if (entry->type != E820_RAM)
+ continue;
+
+ e_pfn = PFN_UP(entry->addr + entry->size);
+
+ /* We only care about E820 after the xen_start_info->nr_pages */
+ if (e_pfn <= max_pfn)
+ continue;
+
+ s_pfn = PFN_DOWN(entry->addr);
+ /* If the E820 falls within the nr_pages, we want to start
+ * at the nr_pages PFN.
+ * If that would mean going past the E820 entry, skip it
+ */
+ if (s_pfn <= max_pfn) {
+ capacity = e_pfn - max_pfn;
+ dest_pfn = max_pfn;
+ } else {
+ /* last_pfn MUST be within E820_RAM regions */
+ if (*last_pfn && e_pfn >= *last_pfn)
+ s_pfn = *last_pfn;
+ capacity = e_pfn - s_pfn;
+ dest_pfn = s_pfn;
+ }
+ /* If we had filled this E820_RAM entry, go to the next one. */
+ if (capacity <= 0)
+ continue;
+
+ if (credits > capacity)
+ credits = capacity;
+
+ pfns = xen_do_chunk(dest_pfn, dest_pfn + credits, false);
+ done += pfns;
+ credits_left -= pfns;
+ *last_pfn = (dest_pfn + pfns);
+ }
+ return done;
+}
+
+static void __init xen_set_identity_and_release_chunk(
+ unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
+ unsigned long *released, unsigned long *identity)
+{
+ unsigned long pfn;
+
+ /*
+ * If the PFNs are currently mapped, the VA mapping also needs
+ * to be updated to be 1:1.
+ */
+ for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
+ (void)HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ mfn_pte(pfn, PAGE_KERNEL_IO), 0);
+
+ if (start_pfn < nr_pages)
+ *released += xen_release_chunk(
+ start_pfn, min(end_pfn, nr_pages));
+
+ *identity += set_phys_range_identity(start_pfn, end_pfn);
+}
+
static unsigned long __init xen_set_identity_and_release(
const struct e820entry *list, size_t map_size, unsigned long nr_pages)
{
@@ -142,7 +249,6 @@ static unsigned long __init xen_set_identity_and_release(
*/
for (i = 0, entry = list; i < map_size; i++, entry++) {
phys_addr_t end = entry->addr + entry->size;
-
if (entry->type == E820_RAM || i == map_size - 1) {
unsigned long start_pfn = PFN_DOWN(start);
unsigned long end_pfn = PFN_UP(end);
@@ -150,20 +256,19 @@ static unsigned long __init xen_set_identity_and_release(
if (entry->type == E820_RAM)
end_pfn = PFN_UP(entry->addr);
- if (start_pfn < end_pfn) {
- if (start_pfn < nr_pages)
- released += xen_release_chunk(
- start_pfn, min(end_pfn, nr_pages));
+ if (start_pfn < end_pfn)
+ xen_set_identity_and_release_chunk(
+ start_pfn, end_pfn, nr_pages,
+ &released, &identity);
- identity += set_phys_range_identity(
- start_pfn, end_pfn);
- }
start = end;
}
}
- printk(KERN_INFO "Released %lu pages of unused memory\n", released);
- printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
+ if (released)
+ printk(KERN_INFO "Released %lu pages of unused memory\n", released);
+ if (identity)
+ printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
return released;
}
@@ -217,7 +322,9 @@ char * __init xen_memory_setup(void)
int rc;
struct xen_memory_map memmap;
unsigned long max_pages;
+ unsigned long last_pfn = 0;
unsigned long extra_pages = 0;
+ unsigned long populated;
int i;
int op;
@@ -257,9 +364,20 @@ char * __init xen_memory_setup(void)
*/
xen_released_pages = xen_set_identity_and_release(
map, memmap.nr_entries, max_pfn);
- extra_pages += xen_released_pages;
/*
+ * Populate back the non-RAM pages and E820 gaps that had been
+ * released. */
+ populated = xen_populate_chunk(map, memmap.nr_entries,
+ max_pfn, &last_pfn, xen_released_pages);
+
+ extra_pages += (xen_released_pages - populated);
+
+ if (last_pfn > max_pfn) {
+ max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
+ mem_end = PFN_PHYS(max_pfn);
+ }
+ /*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
* factor the base size. On non-highmem systems, the base
* size is the full initial memory allocation; on highmem it
@@ -272,7 +390,6 @@ char * __init xen_memory_setup(void)
*/
extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages);
-
i = 0;
while (i < memmap.nr_entries) {
u64 addr = map[i].addr;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 3700945ed0d5..afb250d22a6b 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/smp.h>
+#include <linux/irq_work.h>
#include <asm/paravirt.h>
#include <asm/desc.h>
@@ -41,10 +42,12 @@ cpumask_var_t xen_cpu_initialized_map;
static DEFINE_PER_CPU(int, xen_resched_irq);
static DEFINE_PER_CPU(int, xen_callfunc_irq);
static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
+static DEFINE_PER_CPU(int, xen_irq_work);
static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
+static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
/*
* Reschedule call back.
@@ -143,6 +146,17 @@ static int xen_smp_intr_init(unsigned int cpu)
goto fail;
per_cpu(xen_callfuncsingle_irq, cpu) = rc;
+ callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
+ rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
+ cpu,
+ xen_irq_work_interrupt,
+ IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+ callfunc_name,
+ NULL);
+ if (rc < 0)
+ goto fail;
+ per_cpu(xen_irq_work, cpu) = rc;
+
return 0;
fail:
@@ -155,6 +169,8 @@ static int xen_smp_intr_init(unsigned int cpu)
if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
NULL);
+ if (per_cpu(xen_irq_work, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
return rc;
}
@@ -407,6 +423,7 @@ static void xen_cpu_die(unsigned int cpu)
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
xen_uninit_lock_cpu(cpu);
xen_teardown_timer(cpu);
@@ -469,8 +486,8 @@ static void xen_smp_send_reschedule(int cpu)
xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
}
-static void xen_send_IPI_mask(const struct cpumask *mask,
- enum ipi_vector vector)
+static void __xen_send_IPI_mask(const struct cpumask *mask,
+ int vector)
{
unsigned cpu;
@@ -482,7 +499,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
{
int cpu;
- xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
+ __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
/* Make sure other vcpus get a chance to run if they need to. */
for_each_cpu(cpu, mask) {
@@ -495,10 +512,86 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
static void xen_smp_send_call_function_single_ipi(int cpu)
{
- xen_send_IPI_mask(cpumask_of(cpu),
+ __xen_send_IPI_mask(cpumask_of(cpu),
XEN_CALL_FUNCTION_SINGLE_VECTOR);
}
+static inline int xen_map_vector(int vector)
+{
+ int xen_vector;
+
+ switch (vector) {
+ case RESCHEDULE_VECTOR:
+ xen_vector = XEN_RESCHEDULE_VECTOR;
+ break;
+ case CALL_FUNCTION_VECTOR:
+ xen_vector = XEN_CALL_FUNCTION_VECTOR;
+ break;
+ case CALL_FUNCTION_SINGLE_VECTOR:
+ xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
+ break;
+ case IRQ_WORK_VECTOR:
+ xen_vector = XEN_IRQ_WORK_VECTOR;
+ break;
+ default:
+ xen_vector = -1;
+ printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
+ vector);
+ }
+
+ return xen_vector;
+}
+
+void xen_send_IPI_mask(const struct cpumask *mask,
+ int vector)
+{
+ int xen_vector = xen_map_vector(vector);
+
+ if (xen_vector >= 0)
+ __xen_send_IPI_mask(mask, xen_vector);
+}
+
+void xen_send_IPI_all(int vector)
+{
+ int xen_vector = xen_map_vector(vector);
+
+ if (xen_vector >= 0)
+ __xen_send_IPI_mask(cpu_online_mask, xen_vector);
+}
+
+void xen_send_IPI_self(int vector)
+{
+ int xen_vector = xen_map_vector(vector);
+
+ if (xen_vector >= 0)
+ xen_send_IPI_one(smp_processor_id(), xen_vector);
+}
+
+void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
+ int vector)
+{
+ unsigned cpu;
+ unsigned int this_cpu = smp_processor_id();
+
+ if (!(num_online_cpus() > 1))
+ return;
+
+ for_each_cpu_and(cpu, mask, cpu_online_mask) {
+ if (this_cpu == cpu)
+ continue;
+
+ xen_smp_send_call_function_single_ipi(cpu);
+ }
+}
+
+void xen_send_IPI_allbutself(int vector)
+{
+ int xen_vector = xen_map_vector(vector);
+
+ if (xen_vector >= 0)
+ xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector);
+}
+
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
{
irq_enter();
@@ -519,6 +612,16 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
+{
+ irq_enter();
+ irq_work_run();
+ inc_irq_stat(apic_irq_work_irqs);
+ irq_exit();
+
+ return IRQ_HANDLED;
+}
+
static const struct smp_ops xen_smp_ops __initconst = {
.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
.smp_prepare_cpus = xen_smp_prepare_cpus,
@@ -565,6 +668,7 @@ static void xen_hvm_cpu_die(unsigned int cpu)
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
native_cpu_die(cpu);
}
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
new file mode 100644
index 000000000000..8981a76d081a
--- /dev/null
+++ b/arch/x86/xen/smp.h
@@ -0,0 +1,12 @@
+#ifndef _XEN_SMP_H
+
+extern void xen_send_IPI_mask(const struct cpumask *mask,
+ int vector);
+extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
+ int vector);
+extern void xen_send_IPI_allbutself(int vector);
+extern void physflat_send_IPI_allbutself(int vector);
+extern void xen_send_IPI_all(int vector);
+extern void xen_send_IPI_self(int vector);
+
+#endif
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index d69cc6c3f808..83e866d714ce 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -440,12 +440,12 @@ static int __init xen_spinlock_debugfs(void)
debugfs_create_u64("time_total", 0444, d_spin_debug,
&spinlock_stats.time_total);
- xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
- spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1);
- xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
- spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1);
- xen_debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
- spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
+ debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
+ spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1);
+ debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
+ spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1);
+ debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
+ spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
return 0;
}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 45c0c0667bd9..202d4c150154 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -28,7 +28,6 @@ void xen_setup_shared_info(void);
void xen_build_mfn_list_list(void);
void xen_setup_machphys_mapping(void);
pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
-void xen_ident_map_ISA(void);
void xen_reserve_top(void);
extern unsigned long xen_max_p2m_pfn;
diff --git a/arch/xtensa/include/asm/gpio.h b/arch/xtensa/include/asm/gpio.h
index a8c9fc46c790..b3799d88ffcf 100644
--- a/arch/xtensa/include/asm/gpio.h
+++ b/arch/xtensa/include/asm/gpio.h
@@ -1,56 +1,4 @@
-/*
- * Generic GPIO API implementation for xtensa.
- *
- * Stolen from x86, which is derived from the generic GPIO API for powerpc:
- *
- * Copyright (c) 2007-2008 MontaVista Software, Inc.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef _ASM_XTENSA_GPIO_H
-#define _ASM_XTENSA_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#ifdef CONFIG_GPIOLIB
-
-/*
- * Just call gpiolib.
- */
-static inline int gpio_get_value(unsigned int gpio)
-{
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned int gpio, int value)
-{
- __gpio_set_value(gpio, value);
-}
-
-static inline int gpio_cansleep(unsigned int gpio)
-{
- return __gpio_cansleep(gpio);
-}
-
-static inline int gpio_to_irq(unsigned int gpio)
-{
- return __gpio_to_irq(gpio);
-}
-
-/*
- * Not implemented, yet.
- */
-static inline int irq_to_gpio(unsigned int irq)
-{
- return -EINVAL;
-}
-
-#endif /* CONFIG_GPIOLIB */
-
-#endif /* _ASM_XTENSA_GPIO_H */
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/arch/xtensa/include/asm/kvm_para.h b/arch/xtensa/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/xtensa/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 3199b76f795d..421bef9c4c48 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -23,8 +23,6 @@ config IOSCHED_DEADLINE
config IOSCHED_CFQ
tristate "CFQ I/O scheduler"
- # If BLK_CGROUP is a module, CFQ has to be built as module.
- depends on (BLK_CGROUP=m && m) || !BLK_CGROUP || BLK_CGROUP=y
default y
---help---
The CFQ I/O scheduler tries to distribute bandwidth equally
@@ -34,8 +32,6 @@ config IOSCHED_CFQ
This is the default I/O scheduler.
- Note: If BLK_CGROUP=m, then CFQ can be built only as module.
-
config CFQ_GROUP_IOSCHED
bool "CFQ Group Scheduling support"
depends on IOSCHED_CFQ && BLK_CGROUP
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 126c341955de..02cf6335e9bd 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -11,1570 +11,612 @@
* Nauman Rafique <nauman@google.com>
*/
#include <linux/ioprio.h>
-#include <linux/seq_file.h>
#include <linux/kdev_t.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
-#include "blk-cgroup.h"
#include <linux/genhd.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include "blk-cgroup.h"
+#include "blk.h"
#define MAX_KEY_LEN 100
-static DEFINE_SPINLOCK(blkio_list_lock);
-static LIST_HEAD(blkio_list);
+static DEFINE_MUTEX(blkcg_pol_mutex);
-struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
-EXPORT_SYMBOL_GPL(blkio_root_cgroup);
+struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
+EXPORT_SYMBOL_GPL(blkcg_root);
-/* for encoding cft->private value on file */
-#define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
-/* What policy owns the file, proportional or throttle */
-#define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
-#define BLKIOFILE_ATTR(val) ((val) & 0xffff)
+static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
-static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
- struct blkio_policy_node *pn)
+struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
{
- list_add(&pn->node, &blkcg->policy_list);
+ return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
+ struct blkcg, css);
}
+EXPORT_SYMBOL_GPL(cgroup_to_blkcg);
-static inline bool cftype_blkg_same_policy(struct cftype *cft,
- struct blkio_group *blkg)
+static struct blkcg *task_blkcg(struct task_struct *tsk)
{
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-
- if (blkg->plid == plid)
- return 1;
-
- return 0;
+ return container_of(task_subsys_state(tsk, blkio_subsys_id),
+ struct blkcg, css);
}
-/* Determines if policy node matches cgroup file being accessed */
-static inline bool pn_matches_cftype(struct cftype *cft,
- struct blkio_policy_node *pn)
+struct blkcg *bio_blkcg(struct bio *bio)
{
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int fileid = BLKIOFILE_ATTR(cft->private);
-
- return (plid == pn->plid && fileid == pn->fileid);
+ if (bio && bio->bi_css)
+ return container_of(bio->bi_css, struct blkcg, css);
+ return task_blkcg(current);
}
+EXPORT_SYMBOL_GPL(bio_blkcg);
-/* Must be called with blkcg->lock held */
-static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
+static bool blkcg_policy_enabled(struct request_queue *q,
+ const struct blkcg_policy *pol)
{
- list_del(&pn->node);
+ return pol && test_bit(pol->plid, q->blkcg_pols);
}
-/* Must be called with blkcg->lock held */
-static struct blkio_policy_node *
-blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
- enum blkio_policy_id plid, int fileid)
+/**
+ * blkg_free - free a blkg
+ * @blkg: blkg to free
+ *
+ * Free @blkg which may be partially allocated.
+ */
+static void blkg_free(struct blkcg_gq *blkg)
{
- struct blkio_policy_node *pn;
-
- list_for_each_entry(pn, &blkcg->policy_list, node) {
- if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
- return pn;
- }
+ int i;
- return NULL;
-}
+ if (!blkg)
+ return;
-struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
-{
- return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
- struct blkio_cgroup, css);
-}
-EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+ struct blkg_policy_data *pd = blkg->pd[i];
-struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
-{
- return container_of(task_subsys_state(tsk, blkio_subsys_id),
- struct blkio_cgroup, css);
-}
-EXPORT_SYMBOL_GPL(task_blkio_cgroup);
+ if (!pd)
+ continue;
-static inline void
-blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
-{
- struct blkio_policy_type *blkiop;
+ if (pol && pol->pd_exit_fn)
+ pol->pd_exit_fn(blkg);
- list_for_each_entry(blkiop, &blkio_list, list) {
- /* If this policy does not own the blkg, do not send updates */
- if (blkiop->plid != blkg->plid)
- continue;
- if (blkiop->ops.blkio_update_group_weight_fn)
- blkiop->ops.blkio_update_group_weight_fn(blkg->key,
- blkg, weight);
+ kfree(pd);
}
+
+ kfree(blkg);
}
-static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
- int fileid)
+/**
+ * blkg_alloc - allocate a blkg
+ * @blkcg: block cgroup the new blkg is associated with
+ * @q: request_queue the new blkg is associated with
+ *
+ * Allocate a new blkg assocating @blkcg and @q.
+ */
+static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
{
- struct blkio_policy_type *blkiop;
-
- list_for_each_entry(blkiop, &blkio_list, list) {
-
- /* If this policy does not own the blkg, do not send updates */
- if (blkiop->plid != blkg->plid)
- continue;
-
- if (fileid == BLKIO_THROTL_read_bps_device
- && blkiop->ops.blkio_update_group_read_bps_fn)
- blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
- blkg, bps);
+ struct blkcg_gq *blkg;
+ int i;
- if (fileid == BLKIO_THROTL_write_bps_device
- && blkiop->ops.blkio_update_group_write_bps_fn)
- blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
- blkg, bps);
- }
-}
+ /* alloc and init base part */
+ blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
+ if (!blkg)
+ return NULL;
-static inline void blkio_update_group_iops(struct blkio_group *blkg,
- unsigned int iops, int fileid)
-{
- struct blkio_policy_type *blkiop;
+ blkg->q = q;
+ INIT_LIST_HEAD(&blkg->q_node);
+ blkg->blkcg = blkcg;
+ blkg->refcnt = 1;
- list_for_each_entry(blkiop, &blkio_list, list) {
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+ struct blkg_policy_data *pd;
- /* If this policy does not own the blkg, do not send updates */
- if (blkiop->plid != blkg->plid)
+ if (!blkcg_policy_enabled(q, pol))
continue;
- if (fileid == BLKIO_THROTL_read_iops_device
- && blkiop->ops.blkio_update_group_read_iops_fn)
- blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
- blkg, iops);
+ /* alloc per-policy data and attach it to blkg */
+ pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node);
+ if (!pd) {
+ blkg_free(blkg);
+ return NULL;
+ }
- if (fileid == BLKIO_THROTL_write_iops_device
- && blkiop->ops.blkio_update_group_write_iops_fn)
- blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
- blkg,iops);
+ blkg->pd[i] = pd;
+ pd->blkg = blkg;
}
-}
-/*
- * Add to the appropriate stat variable depending on the request type.
- * This should be called with the blkg->stats_lock held.
- */
-static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
- bool sync)
-{
- if (direction)
- stat[BLKIO_STAT_WRITE] += add;
- else
- stat[BLKIO_STAT_READ] += add;
- if (sync)
- stat[BLKIO_STAT_SYNC] += add;
- else
- stat[BLKIO_STAT_ASYNC] += add;
-}
+ /* invoke per-policy init */
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
-/*
- * Decrements the appropriate stat variable if non-zero depending on the
- * request type. Panics on value being zero.
- * This should be called with the blkg->stats_lock held.
- */
-static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
-{
- if (direction) {
- BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
- stat[BLKIO_STAT_WRITE]--;
- } else {
- BUG_ON(stat[BLKIO_STAT_READ] == 0);
- stat[BLKIO_STAT_READ]--;
- }
- if (sync) {
- BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
- stat[BLKIO_STAT_SYNC]--;
- } else {
- BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
- stat[BLKIO_STAT_ASYNC]--;
+ if (blkcg_policy_enabled(blkg->q, pol))
+ pol->pd_init_fn(blkg);
}
-}
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
- struct blkio_group *curr_blkg)
-{
- if (blkio_blkg_waiting(&blkg->stats))
- return;
- if (blkg == curr_blkg)
- return;
- blkg->stats.start_group_wait_time = sched_clock();
- blkio_mark_blkg_waiting(&blkg->stats);
+ return blkg;
}
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
+static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
+ struct request_queue *q)
{
- unsigned long long now;
+ struct blkcg_gq *blkg;
- if (!blkio_blkg_waiting(stats))
- return;
+ blkg = rcu_dereference(blkcg->blkg_hint);
+ if (blkg && blkg->q == q)
+ return blkg;
- now = sched_clock();
- if (time_after64(now, stats->start_group_wait_time))
- stats->group_wait_time += now - stats->start_group_wait_time;
- blkio_clear_blkg_waiting(stats);
+ /*
+ * Hint didn't match. Look up from the radix tree. Note that we
+ * may not be holding queue_lock and thus are not sure whether
+ * @blkg from blkg_tree has already been removed or not, so we
+ * can't update hint to the lookup result. Leave it to the caller.
+ */
+ blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
+ if (blkg && blkg->q == q)
+ return blkg;
+
+ return NULL;
}
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_end_empty_time(struct blkio_group_stats *stats)
+/**
+ * blkg_lookup - lookup blkg for the specified blkcg - q pair
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ *
+ * Lookup blkg for the @blkcg - @q pair. This function should be called
+ * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
+ * - see blk_queue_bypass_start() for details.
+ */
+struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
{
- unsigned long long now;
-
- if (!blkio_blkg_empty(stats))
- return;
+ WARN_ON_ONCE(!rcu_read_lock_held());
- now = sched_clock();
- if (time_after64(now, stats->start_empty_time))
- stats->empty_time += now - stats->start_empty_time;
- blkio_clear_blkg_empty(stats);
+ if (unlikely(blk_queue_bypass(q)))
+ return NULL;
+ return __blkg_lookup(blkcg, q);
}
+EXPORT_SYMBOL_GPL(blkg_lookup);
-void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
+static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
+ __releases(q->queue_lock) __acquires(q->queue_lock)
{
- unsigned long flags;
+ struct blkcg_gq *blkg;
+ int ret;
- spin_lock_irqsave(&blkg->stats_lock, flags);
- BUG_ON(blkio_blkg_idling(&blkg->stats));
- blkg->stats.start_idle_time = sched_clock();
- blkio_mark_blkg_idling(&blkg->stats);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ lockdep_assert_held(q->queue_lock);
-void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
- unsigned long flags;
- unsigned long long now;
- struct blkio_group_stats *stats;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &blkg->stats;
- if (blkio_blkg_idling(stats)) {
- now = sched_clock();
- if (time_after64(now, stats->start_idle_time))
- stats->idle_time += now - stats->start_idle_time;
- blkio_clear_blkg_idling(stats);
+ /* lookup and update hint on success, see __blkg_lookup() for details */
+ blkg = __blkg_lookup(blkcg, q);
+ if (blkg) {
+ rcu_assign_pointer(blkcg->blkg_hint, blkg);
+ return blkg;
}
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
-void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
-{
- unsigned long flags;
- struct blkio_group_stats *stats;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &blkg->stats;
- stats->avg_queue_size_sum +=
- stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
- stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
- stats->avg_queue_size_samples++;
- blkio_update_group_wait_time(stats);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
+ /* blkg holds a reference to blkcg */
+ if (!css_tryget(&blkcg->css))
+ return ERR_PTR(-EINVAL);
-void blkiocg_set_start_empty_time(struct blkio_group *blkg)
-{
- unsigned long flags;
- struct blkio_group_stats *stats;
+ /* allocate */
+ ret = -ENOMEM;
+ blkg = blkg_alloc(blkcg, q);
+ if (unlikely(!blkg))
+ goto err_put;
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &blkg->stats;
+ /* insert */
+ ret = radix_tree_preload(GFP_ATOMIC);
+ if (ret)
+ goto err_free;
- if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
- stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
- return;
+ spin_lock(&blkcg->lock);
+ ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
+ if (likely(!ret)) {
+ hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
+ list_add(&blkg->q_node, &q->blkg_list);
}
+ spin_unlock(&blkcg->lock);
- /*
- * group is already marked empty. This can happen if cfqq got new
- * request in parent group and moved to this group while being added
- * to service tree. Just ignore the event and move on.
- */
- if(blkio_blkg_empty(stats)) {
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
- return;
- }
+ radix_tree_preload_end();
- stats->start_empty_time = sched_clock();
- blkio_mark_blkg_empty(stats);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ if (!ret)
+ return blkg;
+err_free:
+ blkg_free(blkg);
+err_put:
+ css_put(&blkcg->css);
+ return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
-void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue)
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
{
- blkg->stats.dequeue += dequeue;
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
-#else
-static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
- struct blkio_group *curr_blkg) {}
-static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
-#endif
-
-void blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction,
- bool sync)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
- sync);
- blkio_end_empty_time(&blkg->stats);
- blkio_set_start_group_wait_time(blkg, curr_blkg);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ /*
+ * This could be the first entry point of blkcg implementation and
+ * we shouldn't allow anything to go through for a bypassing queue.
+ */
+ if (unlikely(blk_queue_bypass(q)))
+ return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
+ return __blkg_lookup_create(blkcg, q);
}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
+EXPORT_SYMBOL_GPL(blkg_lookup_create);
-void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync)
+static void blkg_destroy(struct blkcg_gq *blkg)
{
- unsigned long flags;
+ struct request_queue *q = blkg->q;
+ struct blkcg *blkcg = blkg->blkcg;
- spin_lock_irqsave(&blkg->stats_lock, flags);
- blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
- direction, sync);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
+ lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&blkcg->lock);
-void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
- unsigned long unaccounted_time)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- blkg->stats.time += time;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- blkg->stats.unaccounted_time += unaccounted_time;
-#endif
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
+ /* Something wrong if we are trying to remove same group twice */
+ WARN_ON_ONCE(list_empty(&blkg->q_node));
+ WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
-/*
- * should be called under rcu read lock or queue lock to make sure blkg pointer
- * is valid.
- */
-void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- uint64_t bytes, bool direction, bool sync)
-{
- struct blkio_group_stats_cpu *stats_cpu;
- unsigned long flags;
+ radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
+ list_del_init(&blkg->q_node);
+ hlist_del_init_rcu(&blkg->blkcg_node);
/*
- * Disabling interrupts to provide mutual exclusion between two
- * writes on same cpu. It probably is not needed for 64bit. Not
- * optimizing that case yet.
+ * Both setting lookup hint to and clearing it from @blkg are done
+ * under queue_lock. If it's not pointing to @blkg now, it never
+ * will. Hint assignment itself can race safely.
*/
- local_irq_save(flags);
-
- stats_cpu = this_cpu_ptr(blkg->stats_cpu);
-
- u64_stats_update_begin(&stats_cpu->syncp);
- stats_cpu->sectors += bytes >> 9;
- blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
- 1, direction, sync);
- blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
- bytes, direction, sync);
- u64_stats_update_end(&stats_cpu->syncp);
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
-
-void blkiocg_update_completion_stats(struct blkio_group *blkg,
- uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
-{
- struct blkio_group_stats *stats;
- unsigned long flags;
- unsigned long long now = sched_clock();
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &blkg->stats;
- if (time_after64(now, io_start_time))
- blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
- now - io_start_time, direction, sync);
- if (time_after64(io_start_time, start_time))
- blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
- io_start_time - start_time, direction, sync);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
-
-/* Merged stats are per cpu. */
-void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
- bool sync)
-{
- struct blkio_group_stats_cpu *stats_cpu;
- unsigned long flags;
+ if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
+ rcu_assign_pointer(blkcg->blkg_hint, NULL);
/*
- * Disabling interrupts to provide mutual exclusion between two
- * writes on same cpu. It probably is not needed for 64bit. Not
- * optimizing that case yet.
+ * Put the reference taken at the time of creation so that when all
+ * queues are gone, group can be destroyed.
*/
- local_irq_save(flags);
-
- stats_cpu = this_cpu_ptr(blkg->stats_cpu);
-
- u64_stats_update_begin(&stats_cpu->syncp);
- blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
- direction, sync);
- u64_stats_update_end(&stats_cpu->syncp);
- local_irq_restore(flags);
+ blkg_put(blkg);
}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
-/*
- * This function allocates the per cpu stats for blkio_group. Should be called
- * from sleepable context as alloc_per_cpu() requires that.
+/**
+ * blkg_destroy_all - destroy all blkgs associated with a request_queue
+ * @q: request_queue of interest
+ *
+ * Destroy all blkgs associated with @q.
*/
-int blkio_alloc_blkg_stats(struct blkio_group *blkg)
+static void blkg_destroy_all(struct request_queue *q)
{
- /* Allocate memory for per cpu stats */
- blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
- if (!blkg->stats_cpu)
- return -ENOMEM;
- return 0;
-}
-EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
+ struct blkcg_gq *blkg, *n;
-void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev,
- enum blkio_policy_id plid)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- spin_lock_init(&blkg->stats_lock);
- rcu_assign_pointer(blkg->key, key);
- blkg->blkcg_id = css_id(&blkcg->css);
- hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
- blkg->plid = plid;
- spin_unlock_irqrestore(&blkcg->lock, flags);
- /* Need to take css reference ? */
- cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
- blkg->dev = dev;
-}
-EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
+ lockdep_assert_held(q->queue_lock);
-static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
- hlist_del_init_rcu(&blkg->blkcg_node);
- blkg->blkcg_id = 0;
-}
+ list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
+ struct blkcg *blkcg = blkg->blkcg;
-/*
- * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
- * indicating that blk_group was unhashed by the time we got to it.
- */
-int blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
- struct blkio_cgroup *blkcg;
- unsigned long flags;
- struct cgroup_subsys_state *css;
- int ret = 1;
-
- rcu_read_lock();
- css = css_lookup(&blkio_subsys, blkg->blkcg_id);
- if (css) {
- blkcg = container_of(css, struct blkio_cgroup, css);
- spin_lock_irqsave(&blkcg->lock, flags);
- if (!hlist_unhashed(&blkg->blkcg_node)) {
- __blkiocg_del_blkio_group(blkg);
- ret = 0;
- }
- spin_unlock_irqrestore(&blkcg->lock, flags);
+ spin_lock(&blkcg->lock);
+ blkg_destroy(blkg);
+ spin_unlock(&blkcg->lock);
}
-
- rcu_read_unlock();
- return ret;
}
-EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
-/* called under rcu_read_lock(). */
-struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
+static void blkg_rcu_free(struct rcu_head *rcu_head)
{
- struct blkio_group *blkg;
- struct hlist_node *n;
- void *__key;
-
- hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
- __key = blkg->key;
- if (__key == key)
- return blkg;
- }
-
- return NULL;
+ blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
}
-EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
-static void blkio_reset_stats_cpu(struct blkio_group *blkg)
+void __blkg_release(struct blkcg_gq *blkg)
{
- struct blkio_group_stats_cpu *stats_cpu;
- int i, j, k;
+ /* release the extra blkcg reference this blkg has been holding */
+ css_put(&blkg->blkcg->css);
+
/*
- * Note: On 64 bit arch this should not be an issue. This has the
- * possibility of returning some inconsistent value on 32bit arch
- * as 64bit update on 32bit is non atomic. Taking care of this
- * corner case makes code very complicated, like sending IPIs to
- * cpus, taking care of stats of offline cpus etc.
+ * A group is freed in rcu manner. But having an rcu lock does not
+ * mean that one can access all the fields of blkg and assume these
+ * are valid. For example, don't try to follow throtl_data and
+ * request queue links.
*
- * reset stats is anyway more of a debug feature and this sounds a
- * corner case. So I am not complicating the code yet until and
- * unless this becomes a real issue.
+ * Having a reference to blkg under an rcu allows acess to only
+ * values local to groups like group stats and group rate limits
*/
- for_each_possible_cpu(i) {
- stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
- stats_cpu->sectors = 0;
- for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
- for (k = 0; k < BLKIO_STAT_TOTAL; k++)
- stats_cpu->stat_arr_cpu[j][k] = 0;
- }
+ call_rcu(&blkg->rcu_head, blkg_rcu_free);
}
+EXPORT_SYMBOL_GPL(__blkg_release);
-static int
-blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
+static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
+ u64 val)
{
- struct blkio_cgroup *blkcg;
- struct blkio_group *blkg;
- struct blkio_group_stats *stats;
+ struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
+ struct blkcg_gq *blkg;
struct hlist_node *n;
- uint64_t queued[BLKIO_STAT_TOTAL];
int i;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- bool idling, waiting, empty;
- unsigned long long now = sched_clock();
-#endif
- blkcg = cgroup_to_blkio_cgroup(cgroup);
+ mutex_lock(&blkcg_pol_mutex);
spin_lock_irq(&blkcg->lock);
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
- spin_lock(&blkg->stats_lock);
- stats = &blkg->stats;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- idling = blkio_blkg_idling(stats);
- waiting = blkio_blkg_waiting(stats);
- empty = blkio_blkg_empty(stats);
-#endif
- for (i = 0; i < BLKIO_STAT_TOTAL; i++)
- queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
- memset(stats, 0, sizeof(struct blkio_group_stats));
- for (i = 0; i < BLKIO_STAT_TOTAL; i++)
- stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- if (idling) {
- blkio_mark_blkg_idling(stats);
- stats->start_idle_time = now;
- }
- if (waiting) {
- blkio_mark_blkg_waiting(stats);
- stats->start_group_wait_time = now;
- }
- if (empty) {
- blkio_mark_blkg_empty(stats);
- stats->start_empty_time = now;
- }
-#endif
- spin_unlock(&blkg->stats_lock);
-
- /* Reset Per cpu stats which don't take blkg->stats_lock */
- blkio_reset_stats_cpu(blkg);
- }
-
- spin_unlock_irq(&blkcg->lock);
- return 0;
-}
-
-static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
- int chars_left, bool diskname_only)
-{
- snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
- chars_left -= strlen(str);
- if (chars_left <= 0) {
- printk(KERN_WARNING
- "Possibly incorrect cgroup stat display format");
- return;
- }
- if (diskname_only)
- return;
- switch (type) {
- case BLKIO_STAT_READ:
- strlcat(str, " Read", chars_left);
- break;
- case BLKIO_STAT_WRITE:
- strlcat(str, " Write", chars_left);
- break;
- case BLKIO_STAT_SYNC:
- strlcat(str, " Sync", chars_left);
- break;
- case BLKIO_STAT_ASYNC:
- strlcat(str, " Async", chars_left);
- break;
- case BLKIO_STAT_TOTAL:
- strlcat(str, " Total", chars_left);
- break;
- default:
- strlcat(str, " Invalid", chars_left);
- }
-}
-
-static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
- struct cgroup_map_cb *cb, dev_t dev)
-{
- blkio_get_key_name(0, dev, str, chars_left, true);
- cb->fill(cb, str, val);
- return val;
-}
-
-
-static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
- enum stat_type_cpu type, enum stat_sub_type sub_type)
-{
- int cpu;
- struct blkio_group_stats_cpu *stats_cpu;
- u64 val = 0, tval;
-
- for_each_possible_cpu(cpu) {
- unsigned int start;
- stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
-
- do {
- start = u64_stats_fetch_begin(&stats_cpu->syncp);
- if (type == BLKIO_STAT_CPU_SECTORS)
- tval = stats_cpu->sectors;
- else
- tval = stats_cpu->stat_arr_cpu[type][sub_type];
- } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
-
- val += tval;
- }
-
- return val;
-}
-
-static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
- struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
-{
- uint64_t disk_total, val;
- char key_str[MAX_KEY_LEN];
- enum stat_sub_type sub_type;
- if (type == BLKIO_STAT_CPU_SECTORS) {
- val = blkio_read_stat_cpu(blkg, type, 0);
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
- }
-
- for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
- sub_type++) {
- blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
- val = blkio_read_stat_cpu(blkg, type, sub_type);
- cb->fill(cb, key_str, val);
- }
-
- disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
- blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
-
- blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
- cb->fill(cb, key_str, disk_total);
- return disk_total;
-}
-
-/* This should be called with blkg->stats_lock held */
-static uint64_t blkio_get_stat(struct blkio_group *blkg,
- struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
-{
- uint64_t disk_total;
- char key_str[MAX_KEY_LEN];
- enum stat_sub_type sub_type;
-
- if (type == BLKIO_STAT_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.time, cb, dev);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- if (type == BLKIO_STAT_UNACCOUNTED_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.unaccounted_time, cb, dev);
- if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
- uint64_t sum = blkg->stats.avg_queue_size_sum;
- uint64_t samples = blkg->stats.avg_queue_size_samples;
- if (samples)
- do_div(sum, samples);
- else
- sum = 0;
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
- }
- if (type == BLKIO_STAT_GROUP_WAIT_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.group_wait_time, cb, dev);
- if (type == BLKIO_STAT_IDLE_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.idle_time, cb, dev);
- if (type == BLKIO_STAT_EMPTY_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.empty_time, cb, dev);
- if (type == BLKIO_STAT_DEQUEUE)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.dequeue, cb, dev);
-#endif
-
- for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
- sub_type++) {
- blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
- cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
- }
- disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
- blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
- blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
- cb->fill(cb, key_str, disk_total);
- return disk_total;
-}
-
-static int blkio_policy_parse_and_set(char *buf,
- struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
-{
- struct gendisk *disk = NULL;
- char *s[4], *p, *major_s = NULL, *minor_s = NULL;
- unsigned long major, minor;
- int i = 0, ret = -EINVAL;
- int part;
- dev_t dev;
- u64 temp;
-
- memset(s, 0, sizeof(s));
-
- while ((p = strsep(&buf, " ")) != NULL) {
- if (!*p)
- continue;
-
- s[i++] = p;
-
- /* Prevent from inputing too many things */
- if (i == 3)
- break;
- }
-
- if (i != 2)
- goto out;
-
- p = strsep(&s[0], ":");
- if (p != NULL)
- major_s = p;
- else
- goto out;
-
- minor_s = s[0];
- if (!minor_s)
- goto out;
-
- if (strict_strtoul(major_s, 10, &major))
- goto out;
-
- if (strict_strtoul(minor_s, 10, &minor))
- goto out;
-
- dev = MKDEV(major, minor);
-
- if (strict_strtoull(s[1], 10, &temp))
- goto out;
-
- /* For rule removal, do not check for device presence. */
- if (temp) {
- disk = get_gendisk(dev, &part);
- if (!disk || part) {
- ret = -ENODEV;
- goto out;
- }
- }
-
- newpn->dev = dev;
-
- switch (plid) {
- case BLKIO_POLICY_PROP:
- if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
- temp > BLKIO_WEIGHT_MAX)
- goto out;
-
- newpn->plid = plid;
- newpn->fileid = fileid;
- newpn->val.weight = temp;
- break;
- case BLKIO_POLICY_THROTL:
- switch(fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- newpn->plid = plid;
- newpn->fileid = fileid;
- newpn->val.bps = temp;
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- if (temp > THROTL_IOPS_MAX)
- goto out;
-
- newpn->plid = plid;
- newpn->fileid = fileid;
- newpn->val.iops = (unsigned int)temp;
- break;
- }
- break;
- default:
- BUG();
- }
- ret = 0;
-out:
- put_disk(disk);
- return ret;
-}
-
-unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
- dev_t dev)
-{
- struct blkio_policy_node *pn;
- unsigned long flags;
- unsigned int weight;
-
- spin_lock_irqsave(&blkcg->lock, flags);
-
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
- BLKIO_PROP_weight_device);
- if (pn)
- weight = pn->val.weight;
- else
- weight = blkcg->weight;
-
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return weight;
-}
-EXPORT_SYMBOL_GPL(blkcg_get_weight);
-
-uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
-{
- struct blkio_policy_node *pn;
- unsigned long flags;
- uint64_t bps = -1;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
- BLKIO_THROTL_read_bps_device);
- if (pn)
- bps = pn->val.bps;
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return bps;
-}
-
-uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
-{
- struct blkio_policy_node *pn;
- unsigned long flags;
- uint64_t bps = -1;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
- BLKIO_THROTL_write_bps_device);
- if (pn)
- bps = pn->val.bps;
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return bps;
-}
-
-unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
-{
- struct blkio_policy_node *pn;
- unsigned long flags;
- unsigned int iops = -1;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
- BLKIO_THROTL_read_iops_device);
- if (pn)
- iops = pn->val.iops;
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return iops;
-}
-
-unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
-{
- struct blkio_policy_node *pn;
- unsigned long flags;
- unsigned int iops = -1;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
- BLKIO_THROTL_write_iops_device);
- if (pn)
- iops = pn->val.iops;
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return iops;
-}
+ /*
+ * Note that stat reset is racy - it doesn't synchronize against
+ * stat updates. This is a debug feature which shouldn't exist
+ * anyway. If you get hit by a race, retry.
+ */
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
-/* Checks whether user asked for deleting a policy rule */
-static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
-{
- switch(pn->plid) {
- case BLKIO_POLICY_PROP:
- if (pn->val.weight == 0)
- return 1;
- break;
- case BLKIO_POLICY_THROTL:
- switch(pn->fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- if (pn->val.bps == 0)
- return 1;
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- if (pn->val.iops == 0)
- return 1;
+ if (blkcg_policy_enabled(blkg->q, pol) &&
+ pol->pd_reset_stats_fn)
+ pol->pd_reset_stats_fn(blkg);
}
- break;
- default:
- BUG();
}
+ spin_unlock_irq(&blkcg->lock);
+ mutex_unlock(&blkcg_pol_mutex);
return 0;
}
-static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
- struct blkio_policy_node *newpn)
-{
- switch(oldpn->plid) {
- case BLKIO_POLICY_PROP:
- oldpn->val.weight = newpn->val.weight;
- break;
- case BLKIO_POLICY_THROTL:
- switch(newpn->fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- oldpn->val.bps = newpn->val.bps;
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- oldpn->val.iops = newpn->val.iops;
- }
- break;
- default:
- BUG();
- }
-}
-
-/*
- * Some rules/values in blkg have changed. Propagate those to respective
- * policies.
- */
-static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, struct blkio_policy_node *pn)
+static const char *blkg_dev_name(struct blkcg_gq *blkg)
{
- unsigned int weight, iops;
- u64 bps;
-
- switch(pn->plid) {
- case BLKIO_POLICY_PROP:
- weight = pn->val.weight ? pn->val.weight :
- blkcg->weight;
- blkio_update_group_weight(blkg, weight);
- break;
- case BLKIO_POLICY_THROTL:
- switch(pn->fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- bps = pn->val.bps ? pn->val.bps : (-1);
- blkio_update_group_bps(blkg, bps, pn->fileid);
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- iops = pn->val.iops ? pn->val.iops : (-1);
- blkio_update_group_iops(blkg, iops, pn->fileid);
- break;
- }
- break;
- default:
- BUG();
- }
+ /* some drivers (floppy) instantiate a queue w/o disk registered */
+ if (blkg->q->backing_dev_info.dev)
+ return dev_name(blkg->q->backing_dev_info.dev);
+ return NULL;
}
-/*
- * A policy node rule has been updated. Propagate this update to all the
- * block groups which might be affected by this update.
+/**
+ * blkcg_print_blkgs - helper for printing per-blkg data
+ * @sf: seq_file to print to
+ * @blkcg: blkcg of interest
+ * @prfill: fill function to print out a blkg
+ * @pol: policy in question
+ * @data: data to be passed to @prfill
+ * @show_total: to print out sum of prfill return values or not
+ *
+ * This function invokes @prfill on each blkg of @blkcg if pd for the
+ * policy specified by @pol exists. @prfill is invoked with @sf, the
+ * policy data and @data. If @show_total is %true, the sum of the return
+ * values from @prfill is printed with "Total" label at the end.
+ *
+ * This is to be used to construct print functions for
+ * cftype->read_seq_string method.
*/
-static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
- struct blkio_policy_node *pn)
+void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
+ u64 (*prfill)(struct seq_file *,
+ struct blkg_policy_data *, int),
+ const struct blkcg_policy *pol, int data,
+ bool show_total)
{
- struct blkio_group *blkg;
+ struct blkcg_gq *blkg;
struct hlist_node *n;
+ u64 total = 0;
- spin_lock(&blkio_list_lock);
spin_lock_irq(&blkcg->lock);
-
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
- if (pn->dev != blkg->dev || pn->plid != blkg->plid)
- continue;
- blkio_update_blkg_policy(blkcg, blkg, pn);
- }
-
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
+ if (blkcg_policy_enabled(blkg->q, pol))
+ total += prfill(sf, blkg->pd[pol->plid], data);
spin_unlock_irq(&blkcg->lock);
- spin_unlock(&blkio_list_lock);
+
+ if (show_total)
+ seq_printf(sf, "Total %llu\n", (unsigned long long)total);
}
+EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
-static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
- const char *buffer)
+/**
+ * __blkg_prfill_u64 - prfill helper for a single u64 value
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @v: value to print
+ *
+ * Print @v to @sf for the device assocaited with @pd.
+ */
+u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
{
- int ret = 0;
- char *buf;
- struct blkio_policy_node *newpn, *pn;
- struct blkio_cgroup *blkcg;
- int keep_newpn = 0;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int fileid = BLKIOFILE_ATTR(cft->private);
-
- buf = kstrdup(buffer, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
- if (!newpn) {
- ret = -ENOMEM;
- goto free_buf;
- }
-
- ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
- if (ret)
- goto free_newpn;
-
- blkcg = cgroup_to_blkio_cgroup(cgrp);
-
- spin_lock_irq(&blkcg->lock);
-
- pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
- if (!pn) {
- if (!blkio_delete_rule_command(newpn)) {
- blkio_policy_insert_node(blkcg, newpn);
- keep_newpn = 1;
- }
- spin_unlock_irq(&blkcg->lock);
- goto update_io_group;
- }
-
- if (blkio_delete_rule_command(newpn)) {
- blkio_policy_delete_node(pn);
- kfree(pn);
- spin_unlock_irq(&blkcg->lock);
- goto update_io_group;
- }
- spin_unlock_irq(&blkcg->lock);
+ const char *dname = blkg_dev_name(pd->blkg);
- blkio_update_policy_rule(pn, newpn);
+ if (!dname)
+ return 0;
-update_io_group:
- blkio_update_policy_node_blkg(blkcg, newpn);
-
-free_newpn:
- if (!keep_newpn)
- kfree(newpn);
-free_buf:
- kfree(buf);
- return ret;
+ seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
+ return v;
}
+EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
-static void
-blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
-{
- switch(pn->plid) {
- case BLKIO_POLICY_PROP:
- if (pn->fileid == BLKIO_PROP_weight_device)
- seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
- MINOR(pn->dev), pn->val.weight);
- break;
- case BLKIO_POLICY_THROTL:
- switch(pn->fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
- MINOR(pn->dev), pn->val.bps);
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
- MINOR(pn->dev), pn->val.iops);
- break;
- }
- break;
- default:
- BUG();
- }
-}
+/**
+ * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @rwstat: rwstat to print
+ *
+ * Print @rwstat to @sf for the device assocaited with @pd.
+ */
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ const struct blkg_rwstat *rwstat)
+{
+ static const char *rwstr[] = {
+ [BLKG_RWSTAT_READ] = "Read",
+ [BLKG_RWSTAT_WRITE] = "Write",
+ [BLKG_RWSTAT_SYNC] = "Sync",
+ [BLKG_RWSTAT_ASYNC] = "Async",
+ };
+ const char *dname = blkg_dev_name(pd->blkg);
+ u64 v;
+ int i;
-/* cgroup files which read their data from policy nodes end up here */
-static void blkio_read_policy_node_files(struct cftype *cft,
- struct blkio_cgroup *blkcg, struct seq_file *m)
-{
- struct blkio_policy_node *pn;
-
- if (!list_empty(&blkcg->policy_list)) {
- spin_lock_irq(&blkcg->lock);
- list_for_each_entry(pn, &blkcg->policy_list, node) {
- if (!pn_matches_cftype(cft, pn))
- continue;
- blkio_print_policy_node(m, pn);
- }
- spin_unlock_irq(&blkcg->lock);
- }
-}
+ if (!dname)
+ return 0;
-static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *m)
-{
- struct blkio_cgroup *blkcg;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int name = BLKIOFILE_ATTR(cft->private);
-
- blkcg = cgroup_to_blkio_cgroup(cgrp);
-
- switch(plid) {
- case BLKIO_POLICY_PROP:
- switch(name) {
- case BLKIO_PROP_weight_device:
- blkio_read_policy_node_files(cft, blkcg, m);
- return 0;
- default:
- BUG();
- }
- break;
- case BLKIO_POLICY_THROTL:
- switch(name){
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- blkio_read_policy_node_files(cft, blkcg, m);
- return 0;
- default:
- BUG();
- }
- break;
- default:
- BUG();
- }
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
+ (unsigned long long)rwstat->cnt[i]);
- return 0;
+ v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
+ seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
+ return v;
}
-static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
- struct cftype *cft, struct cgroup_map_cb *cb,
- enum stat_type type, bool show_total, bool pcpu)
+/**
+ * blkg_prfill_stat - prfill callback for blkg_stat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_stat in @pd
+ *
+ * prfill callback for printing a blkg_stat.
+ */
+u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
{
- struct blkio_group *blkg;
- struct hlist_node *n;
- uint64_t cgroup_total = 0;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
- if (blkg->dev) {
- if (!cftype_blkg_same_policy(cft, blkg))
- continue;
- if (pcpu)
- cgroup_total += blkio_get_stat_cpu(blkg, cb,
- blkg->dev, type);
- else {
- spin_lock_irq(&blkg->stats_lock);
- cgroup_total += blkio_get_stat(blkg, cb,
- blkg->dev, type);
- spin_unlock_irq(&blkg->stats_lock);
- }
- }
- }
- if (show_total)
- cb->fill(cb, "Total", cgroup_total);
- rcu_read_unlock();
- return 0;
+ return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
}
+EXPORT_SYMBOL_GPL(blkg_prfill_stat);
-/* All map kind of cgroup file get serviced by this function */
-static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
- struct cgroup_map_cb *cb)
+/**
+ * blkg_prfill_rwstat - prfill callback for blkg_rwstat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_rwstat in @pd
+ *
+ * prfill callback for printing a blkg_rwstat.
+ */
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
{
- struct blkio_cgroup *blkcg;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int name = BLKIOFILE_ATTR(cft->private);
-
- blkcg = cgroup_to_blkio_cgroup(cgrp);
-
- switch(plid) {
- case BLKIO_POLICY_PROP:
- switch(name) {
- case BLKIO_PROP_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_TIME, 0, 0);
- case BLKIO_PROP_sectors:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SECTORS, 0, 1);
- case BLKIO_PROP_io_service_bytes:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
- case BLKIO_PROP_io_serviced:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SERVICED, 1, 1);
- case BLKIO_PROP_io_service_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_SERVICE_TIME, 1, 0);
- case BLKIO_PROP_io_wait_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_WAIT_TIME, 1, 0);
- case BLKIO_PROP_io_merged:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_MERGED, 1, 1);
- case BLKIO_PROP_io_queued:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_QUEUED, 1, 0);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- case BLKIO_PROP_unaccounted_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
- case BLKIO_PROP_dequeue:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_DEQUEUE, 0, 0);
- case BLKIO_PROP_avg_queue_size:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
- case BLKIO_PROP_group_wait_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
- case BLKIO_PROP_idle_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_IDLE_TIME, 0, 0);
- case BLKIO_PROP_empty_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_EMPTY_TIME, 0, 0);
-#endif
- default:
- BUG();
- }
- break;
- case BLKIO_POLICY_THROTL:
- switch(name){
- case BLKIO_THROTL_io_service_bytes:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
- case BLKIO_THROTL_io_serviced:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SERVICED, 1, 1);
- default:
- BUG();
- }
- break;
- default:
- BUG();
- }
+ struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
- return 0;
+ return __blkg_prfill_rwstat(sf, pd, &rwstat);
}
+EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
-static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
+/**
+ * blkg_conf_prep - parse and prepare for per-blkg config update
+ * @blkcg: target block cgroup
+ * @pol: target policy
+ * @input: input string
+ * @ctx: blkg_conf_ctx to be filled
+ *
+ * Parse per-blkg config update from @input and initialize @ctx with the
+ * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
+ * value. This function returns with RCU read lock and queue lock held and
+ * must be paired with blkg_conf_finish().
+ */
+int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ const char *input, struct blkg_conf_ctx *ctx)
+ __acquires(rcu) __acquires(disk->queue->queue_lock)
{
- struct blkio_group *blkg;
- struct hlist_node *n;
- struct blkio_policy_node *pn;
+ struct gendisk *disk;
+ struct blkcg_gq *blkg;
+ unsigned int major, minor;
+ unsigned long long v;
+ int part, ret;
- if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
+ if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
return -EINVAL;
- spin_lock(&blkio_list_lock);
- spin_lock_irq(&blkcg->lock);
- blkcg->weight = (unsigned int)val;
-
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
- pn = blkio_policy_search_node(blkcg, blkg->dev,
- BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
- if (pn)
- continue;
-
- blkio_update_group_weight(blkg, blkcg->weight);
- }
- spin_unlock_irq(&blkcg->lock);
- spin_unlock(&blkio_list_lock);
- return 0;
-}
+ disk = get_gendisk(MKDEV(major, minor), &part);
+ if (!disk || part)
+ return -EINVAL;
-static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
- struct blkio_cgroup *blkcg;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int name = BLKIOFILE_ATTR(cft->private);
+ rcu_read_lock();
+ spin_lock_irq(disk->queue->queue_lock);
- blkcg = cgroup_to_blkio_cgroup(cgrp);
+ if (blkcg_policy_enabled(disk->queue, pol))
+ blkg = blkg_lookup_create(blkcg, disk->queue);
+ else
+ blkg = ERR_PTR(-EINVAL);
- switch(plid) {
- case BLKIO_POLICY_PROP:
- switch(name) {
- case BLKIO_PROP_weight:
- return (u64)blkcg->weight;
+ if (IS_ERR(blkg)) {
+ ret = PTR_ERR(blkg);
+ rcu_read_unlock();
+ spin_unlock_irq(disk->queue->queue_lock);
+ put_disk(disk);
+ /*
+ * If queue was bypassing, we should retry. Do so after a
+ * short msleep(). It isn't strictly necessary but queue
+ * can be bypassing for some time and it's always nice to
+ * avoid busy looping.
+ */
+ if (ret == -EBUSY) {
+ msleep(10);
+ ret = restart_syscall();
}
- break;
- default:
- BUG();
+ return ret;
}
+
+ ctx->disk = disk;
+ ctx->blkg = blkg;
+ ctx->v = v;
return 0;
}
+EXPORT_SYMBOL_GPL(blkg_conf_prep);
-static int
-blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
+/**
+ * blkg_conf_finish - finish up per-blkg config update
+ * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
+ *
+ * Finish up after per-blkg config update. This function must be paired
+ * with blkg_conf_prep().
+ */
+void blkg_conf_finish(struct blkg_conf_ctx *ctx)
+ __releases(ctx->disk->queue->queue_lock) __releases(rcu)
{
- struct blkio_cgroup *blkcg;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int name = BLKIOFILE_ATTR(cft->private);
-
- blkcg = cgroup_to_blkio_cgroup(cgrp);
-
- switch(plid) {
- case BLKIO_POLICY_PROP:
- switch(name) {
- case BLKIO_PROP_weight:
- return blkio_weight_write(blkcg, val);
- }
- break;
- default:
- BUG();
- }
-
- return 0;
+ spin_unlock_irq(ctx->disk->queue->queue_lock);
+ rcu_read_unlock();
+ put_disk(ctx->disk);
}
+EXPORT_SYMBOL_GPL(blkg_conf_finish);
-struct cftype blkio_files[] = {
- {
- .name = "weight_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_weight_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
- {
- .name = "weight",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_weight),
- .read_u64 = blkiocg_file_read_u64,
- .write_u64 = blkiocg_file_write_u64,
- },
- {
- .name = "time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "sectors",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_sectors),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_service_bytes",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_service_bytes),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_serviced",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_serviced),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_service_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_service_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_wait_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_wait_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_merged",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_merged),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_queued",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_queued),
- .read_map = blkiocg_file_read_map,
- },
+struct cftype blkcg_files[] = {
{
.name = "reset_stats",
- .write_u64 = blkiocg_reset_stats,
- },
-#ifdef CONFIG_BLK_DEV_THROTTLING
- {
- .name = "throttle.read_bps_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_read_bps_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
-
- {
- .name = "throttle.write_bps_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_write_bps_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
-
- {
- .name = "throttle.read_iops_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_read_iops_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
-
- {
- .name = "throttle.write_iops_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_write_iops_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
- {
- .name = "throttle.io_service_bytes",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_io_service_bytes),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "throttle.io_serviced",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_io_serviced),
- .read_map = blkiocg_file_read_map,
- },
-#endif /* CONFIG_BLK_DEV_THROTTLING */
-
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- {
- .name = "avg_queue_size",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_avg_queue_size),
- .read_map = blkiocg_file_read_map,
+ .write_u64 = blkcg_reset_stats,
},
- {
- .name = "group_wait_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_group_wait_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "idle_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_idle_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "empty_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_empty_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "dequeue",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_dequeue),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "unaccounted_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_unaccounted_time),
- .read_map = blkiocg_file_read_map,
- },
-#endif
{ } /* terminate */
};
-static void blkiocg_destroy(struct cgroup *cgroup)
+/**
+ * blkcg_pre_destroy - cgroup pre_destroy callback
+ * @cgroup: cgroup of interest
+ *
+ * This function is called when @cgroup is about to go away and responsible
+ * for shooting down all blkgs associated with @cgroup. blkgs should be
+ * removed while holding both q and blkcg locks. As blkcg lock is nested
+ * inside q lock, this function performs reverse double lock dancing.
+ *
+ * This is the blkcg counterpart of ioc_release_fn().
+ */
+static int blkcg_pre_destroy(struct cgroup *cgroup)
{
- struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
- unsigned long flags;
- struct blkio_group *blkg;
- void *key;
- struct blkio_policy_type *blkiop;
- struct blkio_policy_node *pn, *pntmp;
+ struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
- rcu_read_lock();
- do {
- spin_lock_irqsave(&blkcg->lock, flags);
+ spin_lock_irq(&blkcg->lock);
- if (hlist_empty(&blkcg->blkg_list)) {
- spin_unlock_irqrestore(&blkcg->lock, flags);
- break;
+ while (!hlist_empty(&blkcg->blkg_list)) {
+ struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
+ struct blkcg_gq, blkcg_node);
+ struct request_queue *q = blkg->q;
+
+ if (spin_trylock(q->queue_lock)) {
+ blkg_destroy(blkg);
+ spin_unlock(q->queue_lock);
+ } else {
+ spin_unlock_irq(&blkcg->lock);
+ cpu_relax();
+ spin_lock_irq(&blkcg->lock);
}
+ }
- blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
- blkcg_node);
- key = rcu_dereference(blkg->key);
- __blkiocg_del_blkio_group(blkg);
-
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- /*
- * This blkio_group is being unlinked as associated cgroup is
- * going away. Let all the IO controlling policies know about
- * this event.
- */
- spin_lock(&blkio_list_lock);
- list_for_each_entry(blkiop, &blkio_list, list) {
- if (blkiop->plid != blkg->plid)
- continue;
- blkiop->ops.blkio_unlink_group_fn(key, blkg);
- }
- spin_unlock(&blkio_list_lock);
- } while (1);
+ spin_unlock_irq(&blkcg->lock);
+ return 0;
+}
- list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
- blkio_policy_delete_node(pn);
- kfree(pn);
- }
+static void blkcg_destroy(struct cgroup *cgroup)
+{
+ struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
- free_css_id(&blkio_subsys, &blkcg->css);
- rcu_read_unlock();
- if (blkcg != &blkio_root_cgroup)
+ if (blkcg != &blkcg_root)
kfree(blkcg);
}
-static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
+static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup)
{
- struct blkio_cgroup *blkcg;
+ static atomic64_t id_seq = ATOMIC64_INIT(0);
+ struct blkcg *blkcg;
struct cgroup *parent = cgroup->parent;
if (!parent) {
- blkcg = &blkio_root_cgroup;
+ blkcg = &blkcg_root;
goto done;
}
@@ -1582,22 +624,68 @@ static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
if (!blkcg)
return ERR_PTR(-ENOMEM);
- blkcg->weight = BLKIO_WEIGHT_DEFAULT;
+ blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
+ blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
done:
spin_lock_init(&blkcg->lock);
+ INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
INIT_HLIST_HEAD(&blkcg->blkg_list);
- INIT_LIST_HEAD(&blkcg->policy_list);
return &blkcg->css;
}
+/**
+ * blkcg_init_queue - initialize blkcg part of request queue
+ * @q: request_queue to initialize
+ *
+ * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
+ * part of new request_queue @q.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int blkcg_init_queue(struct request_queue *q)
+{
+ might_sleep();
+
+ return blk_throtl_init(q);
+}
+
+/**
+ * blkcg_drain_queue - drain blkcg part of request_queue
+ * @q: request_queue to drain
+ *
+ * Called from blk_drain_queue(). Responsible for draining blkcg part.
+ */
+void blkcg_drain_queue(struct request_queue *q)
+{
+ lockdep_assert_held(q->queue_lock);
+
+ blk_throtl_drain(q);
+}
+
+/**
+ * blkcg_exit_queue - exit and release blkcg part of request_queue
+ * @q: request_queue being released
+ *
+ * Called from blk_release_queue(). Responsible for exiting blkcg part.
+ */
+void blkcg_exit_queue(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ blkg_destroy_all(q);
+ spin_unlock_irq(q->queue_lock);
+
+ blk_throtl_exit(q);
+}
+
/*
* We cannot support shared io contexts, as we have no mean to support
* two tasks with the same ioc in two different groups without major rework
* of the main cic data structures. For now we allow a task to change
* its cgroup only if it's the only owner of its ioc.
*/
-static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
struct task_struct *task;
struct io_context *ioc;
@@ -1616,63 +704,213 @@ static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
return ret;
}
-static void blkiocg_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
-{
- struct task_struct *task;
- struct io_context *ioc;
-
- cgroup_taskset_for_each(task, cgrp, tset) {
- /* we don't lose anything even if ioc allocation fails */
- ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
- if (ioc) {
- ioc_cgroup_changed(ioc);
- put_io_context(ioc);
- }
- }
-}
-
struct cgroup_subsys blkio_subsys = {
.name = "blkio",
- .create = blkiocg_create,
- .can_attach = blkiocg_can_attach,
- .attach = blkiocg_attach,
- .destroy = blkiocg_destroy,
-#ifdef CONFIG_BLK_CGROUP
- /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
+ .create = blkcg_create,
+ .can_attach = blkcg_can_attach,
+ .pre_destroy = blkcg_pre_destroy,
+ .destroy = blkcg_destroy,
.subsys_id = blkio_subsys_id,
-#endif
- .base_cftypes = blkio_files,
- .use_id = 1,
+ .base_cftypes = blkcg_files,
.module = THIS_MODULE,
};
EXPORT_SYMBOL_GPL(blkio_subsys);
-void blkio_policy_register(struct blkio_policy_type *blkiop)
+/**
+ * blkcg_activate_policy - activate a blkcg policy on a request_queue
+ * @q: request_queue of interest
+ * @pol: blkcg policy to activate
+ *
+ * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
+ * bypass mode to populate its blkgs with policy_data for @pol.
+ *
+ * Activation happens with @q bypassed, so nobody would be accessing blkgs
+ * from IO path. Update of each blkg is protected by both queue and blkcg
+ * locks so that holding either lock and testing blkcg_policy_enabled() is
+ * always enough for dereferencing policy data.
+ *
+ * The caller is responsible for synchronizing [de]activations and policy
+ * [un]registerations. Returns 0 on success, -errno on failure.
+ */
+int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol)
{
- spin_lock(&blkio_list_lock);
- list_add_tail(&blkiop->list, &blkio_list);
- spin_unlock(&blkio_list_lock);
+ LIST_HEAD(pds);
+ struct blkcg_gq *blkg;
+ struct blkg_policy_data *pd, *n;
+ int cnt = 0, ret;
+
+ if (blkcg_policy_enabled(q, pol))
+ return 0;
+
+ blk_queue_bypass_start(q);
+
+ /* make sure the root blkg exists and count the existing blkgs */
+ spin_lock_irq(q->queue_lock);
+
+ rcu_read_lock();
+ blkg = __blkg_lookup_create(&blkcg_root, q);
+ rcu_read_unlock();
+
+ if (IS_ERR(blkg)) {
+ ret = PTR_ERR(blkg);
+ goto out_unlock;
+ }
+ q->root_blkg = blkg;
+
+ list_for_each_entry(blkg, &q->blkg_list, q_node)
+ cnt++;
+
+ spin_unlock_irq(q->queue_lock);
+
+ /* allocate policy_data for all existing blkgs */
+ while (cnt--) {
+ pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ list_add_tail(&pd->alloc_node, &pds);
+ }
+
+ /*
+ * Install the allocated pds. With @q bypassing, no new blkg
+ * should have been created while the queue lock was dropped.
+ */
+ spin_lock_irq(q->queue_lock);
+
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ if (WARN_ON(list_empty(&pds))) {
+ /* umm... this shouldn't happen, just abort */
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
+ list_del_init(&pd->alloc_node);
+
+ /* grab blkcg lock too while installing @pd on @blkg */
+ spin_lock(&blkg->blkcg->lock);
+
+ blkg->pd[pol->plid] = pd;
+ pd->blkg = blkg;
+ pol->pd_init_fn(blkg);
+
+ spin_unlock(&blkg->blkcg->lock);
+ }
+
+ __set_bit(pol->plid, q->blkcg_pols);
+ ret = 0;
+out_unlock:
+ spin_unlock_irq(q->queue_lock);
+out_free:
+ blk_queue_bypass_end(q);
+ list_for_each_entry_safe(pd, n, &pds, alloc_node)
+ kfree(pd);
+ return ret;
}
-EXPORT_SYMBOL_GPL(blkio_policy_register);
+EXPORT_SYMBOL_GPL(blkcg_activate_policy);
-void blkio_policy_unregister(struct blkio_policy_type *blkiop)
+/**
+ * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
+ * @q: request_queue of interest
+ * @pol: blkcg policy to deactivate
+ *
+ * Deactivate @pol on @q. Follows the same synchronization rules as
+ * blkcg_activate_policy().
+ */
+void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol)
{
- spin_lock(&blkio_list_lock);
- list_del_init(&blkiop->list);
- spin_unlock(&blkio_list_lock);
+ struct blkcg_gq *blkg;
+
+ if (!blkcg_policy_enabled(q, pol))
+ return;
+
+ blk_queue_bypass_start(q);
+ spin_lock_irq(q->queue_lock);
+
+ __clear_bit(pol->plid, q->blkcg_pols);
+
+ /* if no policy is left, no need for blkgs - shoot them down */
+ if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
+ blkg_destroy_all(q);
+
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ /* grab blkcg lock too while removing @pd from @blkg */
+ spin_lock(&blkg->blkcg->lock);
+
+ if (pol->pd_exit_fn)
+ pol->pd_exit_fn(blkg);
+
+ kfree(blkg->pd[pol->plid]);
+ blkg->pd[pol->plid] = NULL;
+
+ spin_unlock(&blkg->blkcg->lock);
+ }
+
+ spin_unlock_irq(q->queue_lock);
+ blk_queue_bypass_end(q);
}
-EXPORT_SYMBOL_GPL(blkio_policy_unregister);
+EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
-static int __init init_cgroup_blkio(void)
+/**
+ * blkcg_policy_register - register a blkcg policy
+ * @pol: blkcg policy to register
+ *
+ * Register @pol with blkcg core. Might sleep and @pol may be modified on
+ * successful registration. Returns 0 on success and -errno on failure.
+ */
+int blkcg_policy_register(struct blkcg_policy *pol)
{
- return cgroup_load_subsys(&blkio_subsys);
+ int i, ret;
+
+ if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
+ return -EINVAL;
+
+ mutex_lock(&blkcg_pol_mutex);
+
+ /* find an empty slot */
+ ret = -ENOSPC;
+ for (i = 0; i < BLKCG_MAX_POLS; i++)
+ if (!blkcg_policy[i])
+ break;
+ if (i >= BLKCG_MAX_POLS)
+ goto out_unlock;
+
+ /* register and update blkgs */
+ pol->plid = i;
+ blkcg_policy[i] = pol;
+
+ /* everything is in place, add intf files for the new policy */
+ if (pol->cftypes)
+ WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
+ ret = 0;
+out_unlock:
+ mutex_unlock(&blkcg_pol_mutex);
+ return ret;
}
+EXPORT_SYMBOL_GPL(blkcg_policy_register);
-static void __exit exit_cgroup_blkio(void)
+/**
+ * blkcg_policy_unregister - unregister a blkcg policy
+ * @pol: blkcg policy to unregister
+ *
+ * Undo blkcg_policy_register(@pol). Might sleep.
+ */
+void blkcg_policy_unregister(struct blkcg_policy *pol)
{
- cgroup_unload_subsys(&blkio_subsys);
-}
+ mutex_lock(&blkcg_pol_mutex);
-module_init(init_cgroup_blkio);
-module_exit(exit_cgroup_blkio);
-MODULE_LICENSE("GPL");
+ if (WARN_ON(blkcg_policy[pol->plid] != pol))
+ goto out_unlock;
+
+ /* kill the intf files first */
+ if (pol->cftypes)
+ cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
+
+ /* unregister and update blkgs */
+ blkcg_policy[pol->plid] = NULL;
+out_unlock:
+ mutex_unlock(&blkcg_pol_mutex);
+}
+EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 6f3ace7e792f..8ac457ce7783 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -15,350 +15,371 @@
#include <linux/cgroup.h>
#include <linux/u64_stats_sync.h>
-
-enum blkio_policy_id {
- BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
- BLKIO_POLICY_THROTL, /* Throttling */
-};
+#include <linux/seq_file.h>
+#include <linux/radix-tree.h>
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX UINT_MAX
-#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
-
-#ifndef CONFIG_BLK_CGROUP
-/* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
-extern struct cgroup_subsys blkio_subsys;
-#define blkio_subsys_id blkio_subsys.subsys_id
-#endif
-
-enum stat_type {
- /* Total time spent (in ns) between request dispatch to the driver and
- * request completion for IOs doen by this cgroup. This may not be
- * accurate when NCQ is turned on. */
- BLKIO_STAT_SERVICE_TIME = 0,
- /* Total time spent waiting in scheduler queue in ns */
- BLKIO_STAT_WAIT_TIME,
- /* Number of IOs queued up */
- BLKIO_STAT_QUEUED,
- /* All the single valued stats go below this */
- BLKIO_STAT_TIME,
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- /* Time not charged to this cgroup */
- BLKIO_STAT_UNACCOUNTED_TIME,
- BLKIO_STAT_AVG_QUEUE_SIZE,
- BLKIO_STAT_IDLE_TIME,
- BLKIO_STAT_EMPTY_TIME,
- BLKIO_STAT_GROUP_WAIT_TIME,
- BLKIO_STAT_DEQUEUE
-#endif
-};
+/* CFQ specific, out here for blkcg->cfq_weight */
+#define CFQ_WEIGHT_MIN 10
+#define CFQ_WEIGHT_MAX 1000
+#define CFQ_WEIGHT_DEFAULT 500
-/* Per cpu stats */
-enum stat_type_cpu {
- BLKIO_STAT_CPU_SECTORS,
- /* Total bytes transferred */
- BLKIO_STAT_CPU_SERVICE_BYTES,
- /* Total IOs serviced, post merge */
- BLKIO_STAT_CPU_SERVICED,
- /* Number of IOs merged */
- BLKIO_STAT_CPU_MERGED,
- BLKIO_STAT_CPU_NR
-};
+#ifdef CONFIG_BLK_CGROUP
-enum stat_sub_type {
- BLKIO_STAT_READ = 0,
- BLKIO_STAT_WRITE,
- BLKIO_STAT_SYNC,
- BLKIO_STAT_ASYNC,
- BLKIO_STAT_TOTAL
-};
+enum blkg_rwstat_type {
+ BLKG_RWSTAT_READ,
+ BLKG_RWSTAT_WRITE,
+ BLKG_RWSTAT_SYNC,
+ BLKG_RWSTAT_ASYNC,
-/* blkg state flags */
-enum blkg_state_flags {
- BLKG_waiting = 0,
- BLKG_idling,
- BLKG_empty,
+ BLKG_RWSTAT_NR,
+ BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
};
-/* cgroup files owned by proportional weight policy */
-enum blkcg_file_name_prop {
- BLKIO_PROP_weight = 1,
- BLKIO_PROP_weight_device,
- BLKIO_PROP_io_service_bytes,
- BLKIO_PROP_io_serviced,
- BLKIO_PROP_time,
- BLKIO_PROP_sectors,
- BLKIO_PROP_unaccounted_time,
- BLKIO_PROP_io_service_time,
- BLKIO_PROP_io_wait_time,
- BLKIO_PROP_io_merged,
- BLKIO_PROP_io_queued,
- BLKIO_PROP_avg_queue_size,
- BLKIO_PROP_group_wait_time,
- BLKIO_PROP_idle_time,
- BLKIO_PROP_empty_time,
- BLKIO_PROP_dequeue,
-};
+struct blkcg_gq;
-/* cgroup files owned by throttle policy */
-enum blkcg_file_name_throtl {
- BLKIO_THROTL_read_bps_device,
- BLKIO_THROTL_write_bps_device,
- BLKIO_THROTL_read_iops_device,
- BLKIO_THROTL_write_iops_device,
- BLKIO_THROTL_io_service_bytes,
- BLKIO_THROTL_io_serviced,
-};
+struct blkcg {
+ struct cgroup_subsys_state css;
+ spinlock_t lock;
-struct blkio_cgroup {
- struct cgroup_subsys_state css;
- unsigned int weight;
- spinlock_t lock;
- struct hlist_head blkg_list;
- struct list_head policy_list; /* list of blkio_policy_node */
-};
+ struct radix_tree_root blkg_tree;
+ struct blkcg_gq *blkg_hint;
+ struct hlist_head blkg_list;
+
+ /* for policies to test whether associated blkcg has changed */
+ uint64_t id;
-struct blkio_group_stats {
- /* total disk time and nr sectors dispatched by this group */
- uint64_t time;
- uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- /* Time not charged to this cgroup */
- uint64_t unaccounted_time;
-
- /* Sum of number of IOs queued across all samples */
- uint64_t avg_queue_size_sum;
- /* Count of samples taken for average */
- uint64_t avg_queue_size_samples;
- /* How many times this group has been removed from service tree */
- unsigned long dequeue;
-
- /* Total time spent waiting for it to be assigned a timeslice. */
- uint64_t group_wait_time;
- uint64_t start_group_wait_time;
-
- /* Time spent idling for this blkio_group */
- uint64_t idle_time;
- uint64_t start_idle_time;
- /*
- * Total time when we have requests queued and do not contain the
- * current active queue.
- */
- uint64_t empty_time;
- uint64_t start_empty_time;
- uint16_t flags;
-#endif
+ /* TODO: per-policy storage in blkcg */
+ unsigned int cfq_weight; /* belongs to cfq */
};
-/* Per cpu blkio group stats */
-struct blkio_group_stats_cpu {
- uint64_t sectors;
- uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL];
- struct u64_stats_sync syncp;
+struct blkg_stat {
+ struct u64_stats_sync syncp;
+ uint64_t cnt;
};
-struct blkio_group {
- /* An rcu protected unique identifier for the group */
- void *key;
- struct hlist_node blkcg_node;
- unsigned short blkcg_id;
- /* Store cgroup path */
- char path[128];
- /* The device MKDEV(major, minor), this group has been created for */
- dev_t dev;
- /* policy which owns this blk group */
- enum blkio_policy_id plid;
-
- /* Need to serialize the stats in the case of reset/update */
- spinlock_t stats_lock;
- struct blkio_group_stats stats;
- /* Per cpu stats pointer */
- struct blkio_group_stats_cpu __percpu *stats_cpu;
+struct blkg_rwstat {
+ struct u64_stats_sync syncp;
+ uint64_t cnt[BLKG_RWSTAT_NR];
};
-struct blkio_policy_node {
- struct list_head node;
- dev_t dev;
- /* This node belongs to max bw policy or porportional weight policy */
- enum blkio_policy_id plid;
- /* cgroup file to which this rule belongs to */
- int fileid;
-
- union {
- unsigned int weight;
- /*
- * Rate read/write in terms of bytes per second
- * Whether this rate represents read or write is determined
- * by file type "fileid".
- */
- u64 bps;
- unsigned int iops;
- } val;
+/*
+ * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
+ * request_queue (q). This is used by blkcg policies which need to track
+ * information per blkcg - q pair.
+ *
+ * There can be multiple active blkcg policies and each has its private
+ * data on each blkg, the size of which is determined by
+ * blkcg_policy->pd_size. blkcg core allocates and frees such areas
+ * together with blkg and invokes pd_init/exit_fn() methods.
+ *
+ * Such private data must embed struct blkg_policy_data (pd) at the
+ * beginning and pd_size can't be smaller than pd.
+ */
+struct blkg_policy_data {
+ /* the blkg this per-policy data belongs to */
+ struct blkcg_gq *blkg;
+
+ /* used during policy activation */
+ struct list_head alloc_node;
};
-extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
- dev_t dev);
-extern uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg,
- dev_t dev);
-extern uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg,
- dev_t dev);
-extern unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg,
- dev_t dev);
-extern unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg,
- dev_t dev);
-
-typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
-
-typedef void (blkio_update_group_weight_fn) (void *key,
- struct blkio_group *blkg, unsigned int weight);
-typedef void (blkio_update_group_read_bps_fn) (void * key,
- struct blkio_group *blkg, u64 read_bps);
-typedef void (blkio_update_group_write_bps_fn) (void *key,
- struct blkio_group *blkg, u64 write_bps);
-typedef void (blkio_update_group_read_iops_fn) (void *key,
- struct blkio_group *blkg, unsigned int read_iops);
-typedef void (blkio_update_group_write_iops_fn) (void *key,
- struct blkio_group *blkg, unsigned int write_iops);
-
-struct blkio_policy_ops {
- blkio_unlink_group_fn *blkio_unlink_group_fn;
- blkio_update_group_weight_fn *blkio_update_group_weight_fn;
- blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
- blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
- blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
- blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
+/* association between a blk cgroup and a request queue */
+struct blkcg_gq {
+ /* Pointer to the associated request_queue */
+ struct request_queue *q;
+ struct list_head q_node;
+ struct hlist_node blkcg_node;
+ struct blkcg *blkcg;
+ /* reference count */
+ int refcnt;
+
+ struct blkg_policy_data *pd[BLKCG_MAX_POLS];
+
+ struct rcu_head rcu_head;
};
-struct blkio_policy_type {
- struct list_head list;
- struct blkio_policy_ops ops;
- enum blkio_policy_id plid;
+typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
+
+struct blkcg_policy {
+ int plid;
+ /* policy specific private data size */
+ size_t pd_size;
+ /* cgroup files for the policy */
+ struct cftype *cftypes;
+
+ /* operations */
+ blkcg_pol_init_pd_fn *pd_init_fn;
+ blkcg_pol_exit_pd_fn *pd_exit_fn;
+ blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
};
+extern struct blkcg blkcg_root;
+
+struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup);
+struct blkcg *bio_blkcg(struct bio *bio);
+struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q);
+int blkcg_init_queue(struct request_queue *q);
+void blkcg_drain_queue(struct request_queue *q);
+void blkcg_exit_queue(struct request_queue *q);
+
/* Blkio controller policy registration */
-extern void blkio_policy_register(struct blkio_policy_type *);
-extern void blkio_policy_unregister(struct blkio_policy_type *);
+int blkcg_policy_register(struct blkcg_policy *pol);
+void blkcg_policy_unregister(struct blkcg_policy *pol);
+int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol);
+void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol);
+
+void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
+ u64 (*prfill)(struct seq_file *,
+ struct blkg_policy_data *, int),
+ const struct blkcg_policy *pol, int data,
+ bool show_total);
+u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ const struct blkg_rwstat *rwstat);
+u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off);
+
+struct blkg_conf_ctx {
+ struct gendisk *disk;
+ struct blkcg_gq *blkg;
+ u64 v;
+};
+
+int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ const char *input, struct blkg_conf_ctx *ctx);
+void blkg_conf_finish(struct blkg_conf_ctx *ctx);
+
+
+/**
+ * blkg_to_pdata - get policy private data
+ * @blkg: blkg of interest
+ * @pol: policy of interest
+ *
+ * Return pointer to private data associated with the @blkg-@pol pair.
+ */
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol)
+{
+ return blkg ? blkg->pd[pol->plid] : NULL;
+}
+
+/**
+ * pdata_to_blkg - get blkg associated with policy private data
+ * @pd: policy private data of interest
+ *
+ * @pd is policy private data. Determine the blkg it's associated with.
+ */
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
+{
+ return pd ? pd->blkg : NULL;
+}
+
+/**
+ * blkg_path - format cgroup path of blkg
+ * @blkg: blkg of interest
+ * @buf: target buffer
+ * @buflen: target buffer length
+ *
+ * Format the path of the cgroup of @blkg into @buf.
+ */
+static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
+{
+ int ret;
+
+ rcu_read_lock();
+ ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
+ rcu_read_unlock();
+ if (ret)
+ strncpy(buf, "<unavailable>", buflen);
+ return ret;
+}
-static inline char *blkg_path(struct blkio_group *blkg)
+/**
+ * blkg_get - get a blkg reference
+ * @blkg: blkg to get
+ *
+ * The caller should be holding queue_lock and an existing reference.
+ */
+static inline void blkg_get(struct blkcg_gq *blkg)
{
- return blkg->path;
+ lockdep_assert_held(blkg->q->queue_lock);
+ WARN_ON_ONCE(!blkg->refcnt);
+ blkg->refcnt++;
}
-#else
+void __blkg_release(struct blkcg_gq *blkg);
-struct blkio_group {
+/**
+ * blkg_put - put a blkg reference
+ * @blkg: blkg to put
+ *
+ * The caller should be holding queue_lock.
+ */
+static inline void blkg_put(struct blkcg_gq *blkg)
+{
+ lockdep_assert_held(blkg->q->queue_lock);
+ WARN_ON_ONCE(blkg->refcnt <= 0);
+ if (!--blkg->refcnt)
+ __blkg_release(blkg);
+}
+
+/**
+ * blkg_stat_add - add a value to a blkg_stat
+ * @stat: target blkg_stat
+ * @val: value to add
+ *
+ * Add @val to @stat. The caller is responsible for synchronizing calls to
+ * this function.
+ */
+static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
+{
+ u64_stats_update_begin(&stat->syncp);
+ stat->cnt += val;
+ u64_stats_update_end(&stat->syncp);
+}
+
+/**
+ * blkg_stat_read - read the current value of a blkg_stat
+ * @stat: blkg_stat to read
+ *
+ * Read the current value of @stat. This function can be called without
+ * synchroniztion and takes care of u64 atomicity.
+ */
+static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
+{
+ unsigned int start;
+ uint64_t v;
+
+ do {
+ start = u64_stats_fetch_begin(&stat->syncp);
+ v = stat->cnt;
+ } while (u64_stats_fetch_retry(&stat->syncp, start));
+
+ return v;
+}
+
+/**
+ * blkg_stat_reset - reset a blkg_stat
+ * @stat: blkg_stat to reset
+ */
+static inline void blkg_stat_reset(struct blkg_stat *stat)
+{
+ stat->cnt = 0;
+}
+
+/**
+ * blkg_rwstat_add - add a value to a blkg_rwstat
+ * @rwstat: target blkg_rwstat
+ * @rw: mask of REQ_{WRITE|SYNC}
+ * @val: value to add
+ *
+ * Add @val to @rwstat. The counters are chosen according to @rw. The
+ * caller is responsible for synchronizing calls to this function.
+ */
+static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
+ int rw, uint64_t val)
+{
+ u64_stats_update_begin(&rwstat->syncp);
+
+ if (rw & REQ_WRITE)
+ rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
+ else
+ rwstat->cnt[BLKG_RWSTAT_READ] += val;
+ if (rw & REQ_SYNC)
+ rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
+ else
+ rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
+
+ u64_stats_update_end(&rwstat->syncp);
+}
+
+/**
+ * blkg_rwstat_read - read the current values of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Read the current snapshot of @rwstat and return it as the return value.
+ * This function can be called without synchronization and takes care of
+ * u64 atomicity.
+ */
+static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
+{
+ unsigned int start;
+ struct blkg_rwstat tmp;
+
+ do {
+ start = u64_stats_fetch_begin(&rwstat->syncp);
+ tmp = *rwstat;
+ } while (u64_stats_fetch_retry(&rwstat->syncp, start));
+
+ return tmp;
+}
+
+/**
+ * blkg_rwstat_sum - read the total count of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Return the total count of @rwstat regardless of the IO direction. This
+ * function can be called without synchronization and takes care of u64
+ * atomicity.
+ */
+static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
+{
+ struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
+
+ return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
+}
+
+/**
+ * blkg_rwstat_reset - reset a blkg_rwstat
+ * @rwstat: blkg_rwstat to reset
+ */
+static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
+{
+ memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
+}
+
+#else /* CONFIG_BLK_CGROUP */
+
+struct cgroup;
+
+struct blkg_policy_data {
};
-struct blkio_policy_type {
+struct blkcg_gq {
};
-static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
-static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
-
-static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
-
-#endif
-
-#define BLKIO_WEIGHT_MIN 10
-#define BLKIO_WEIGHT_MAX 1000
-#define BLKIO_WEIGHT_DEFAULT 500
-
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg);
-void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue);
-void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg);
-void blkiocg_update_idle_time_stats(struct blkio_group *blkg);
-void blkiocg_set_start_empty_time(struct blkio_group *blkg);
-
-#define BLKG_FLAG_FNS(name) \
-static inline void blkio_mark_blkg_##name( \
- struct blkio_group_stats *stats) \
-{ \
- stats->flags |= (1 << BLKG_##name); \
-} \
-static inline void blkio_clear_blkg_##name( \
- struct blkio_group_stats *stats) \
-{ \
- stats->flags &= ~(1 << BLKG_##name); \
-} \
-static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
-{ \
- return (stats->flags & (1 << BLKG_##name)) != 0; \
-} \
-
-BLKG_FLAG_FNS(waiting)
-BLKG_FLAG_FNS(idling)
-BLKG_FLAG_FNS(empty)
-#undef BLKG_FLAG_FNS
-#else
-static inline void blkiocg_update_avg_queue_size_stats(
- struct blkio_group *blkg) {}
-static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue) {}
-static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
-{}
-static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {}
-static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
-#endif
-
-#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
-extern struct blkio_cgroup blkio_root_cgroup;
-extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
-extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
-extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev,
- enum blkio_policy_id plid);
-extern int blkio_alloc_blkg_stats(struct blkio_group *blkg);
-extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
-extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
- void *key);
-void blkiocg_update_timeslice_used(struct blkio_group *blkg,
- unsigned long time,
- unsigned long unaccounted_time);
-void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
- bool direction, bool sync);
-void blkiocg_update_completion_stats(struct blkio_group *blkg,
- uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
-void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
- bool sync);
-void blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction, bool sync);
-void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync);
-#else
-struct cgroup;
-static inline struct blkio_cgroup *
-cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
-static inline struct blkio_cgroup *
-task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
-
-static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev,
- enum blkio_policy_id plid) {}
-
-static inline int blkio_alloc_blkg_stats(struct blkio_group *blkg) { return 0; }
-
-static inline int
-blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
-
-static inline struct blkio_group *
-blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
-static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
- unsigned long time,
- unsigned long unaccounted_time)
-{}
-static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- uint64_t bytes, bool direction, bool sync) {}
-static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
- uint64_t start_time, uint64_t io_start_time, bool direction,
- bool sync) {}
-static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
- bool direction, bool sync) {}
-static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction, bool sync) {}
-static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync) {}
-#endif
-#endif /* _BLK_CGROUP_H */
+struct blkcg_policy {
+};
+
+static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
+static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
+static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
+static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
+static inline void blkcg_drain_queue(struct request_queue *q) { }
+static inline void blkcg_exit_queue(struct request_queue *q) { }
+static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
+static inline int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol) { }
+
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol) { return NULL; }
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
+static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
+static inline void blkg_get(struct blkcg_gq *blkg) { }
+static inline void blkg_put(struct blkcg_gq *blkg) { }
+
+#endif /* CONFIG_BLK_CGROUP */
+#endif /* _BLK_CGROUP_H */
diff --git a/block/blk-core.c b/block/blk-core.c
index 1f61b74867e4..3c923a7aeb56 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -29,11 +29,13 @@
#include <linux/fault-inject.h>
#include <linux/list_sort.h>
#include <linux/delay.h>
+#include <linux/ratelimit.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
#include "blk.h"
+#include "blk-cgroup.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -280,7 +282,7 @@ EXPORT_SYMBOL(blk_stop_queue);
*
* This function does not cancel any asynchronous activity arising
* out of elevator or throttling code. That would require elevaotor_exit()
- * and blk_throtl_exit() to be called with queue lock initialized.
+ * and blkcg_exit_queue() to be called with queue lock initialized.
*
*/
void blk_sync_queue(struct request_queue *q)
@@ -365,17 +367,23 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
spin_lock_irq(q->queue_lock);
- elv_drain_elevator(q);
- if (drain_all)
- blk_throtl_drain(q);
+ /*
+ * The caller might be trying to drain @q before its
+ * elevator is initialized.
+ */
+ if (q->elevator)
+ elv_drain_elevator(q);
+
+ blkcg_drain_queue(q);
/*
* This function might be called on a queue which failed
- * driver init after queue creation. Some drivers
- * (e.g. fd) get unhappy in such cases. Kick queue iff
- * dispatch queue has something on it.
+ * driver init after queue creation or is not yet fully
+ * active yet. Some drivers (e.g. fd and loop) get unhappy
+ * in such cases. Kick queue iff dispatch queue has
+ * something on it and @q has request_fn set.
*/
- if (!list_empty(&q->queue_head))
+ if (!list_empty(&q->queue_head) && q->request_fn)
__blk_run_queue(q);
drain |= q->rq.elvpriv;
@@ -403,6 +411,49 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
}
/**
+ * blk_queue_bypass_start - enter queue bypass mode
+ * @q: queue of interest
+ *
+ * In bypass mode, only the dispatch FIFO queue of @q is used. This
+ * function makes @q enter bypass mode and drains all requests which were
+ * throttled or issued before. On return, it's guaranteed that no request
+ * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
+ * inside queue or RCU read lock.
+ */
+void blk_queue_bypass_start(struct request_queue *q)
+{
+ bool drain;
+
+ spin_lock_irq(q->queue_lock);
+ drain = !q->bypass_depth++;
+ queue_flag_set(QUEUE_FLAG_BYPASS, q);
+ spin_unlock_irq(q->queue_lock);
+
+ if (drain) {
+ blk_drain_queue(q, false);
+ /* ensure blk_queue_bypass() is %true inside RCU read lock */
+ synchronize_rcu();
+ }
+}
+EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
+
+/**
+ * blk_queue_bypass_end - leave queue bypass mode
+ * @q: queue of interest
+ *
+ * Leave bypass mode and restore the normal queueing behavior.
+ */
+void blk_queue_bypass_end(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ if (!--q->bypass_depth)
+ queue_flag_clear(QUEUE_FLAG_BYPASS, q);
+ WARN_ON_ONCE(q->bypass_depth < 0);
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
+
+/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
@@ -418,6 +469,19 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
spin_lock_irq(lock);
+
+ /*
+ * Dead queue is permanently in bypass mode till released. Note
+ * that, unlike blk_queue_bypass_start(), we aren't performing
+ * synchronize_rcu() after entering bypass mode to avoid the delay
+ * as some drivers create and destroy a lot of queues while
+ * probing. This is still safe because blk_release_queue() will be
+ * called only after the queue refcnt drops to zero and nothing,
+ * RCU or not, would be traversing the queue by then.
+ */
+ q->bypass_depth++;
+ queue_flag_set(QUEUE_FLAG_BYPASS, q);
+
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
queue_flag_set(QUEUE_FLAG_DEAD, q);
@@ -428,13 +492,8 @@ void blk_cleanup_queue(struct request_queue *q)
spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock);
- /*
- * Drain all requests queued before DEAD marking. The caller might
- * be trying to tear down @q before its elevator is initialized, in
- * which case we don't want to call into draining.
- */
- if (q->elevator)
- blk_drain_queue(q, true);
+ /* drain all requests queued before DEAD marking */
+ blk_drain_queue(q, true);
/* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
@@ -498,14 +557,15 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (err)
goto fail_id;
- if (blk_throtl_init(q))
- goto fail_id;
-
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+ INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->icq_list);
+#ifdef CONFIG_BLK_CGROUP
+ INIT_LIST_HEAD(&q->blkg_list);
+#endif
INIT_LIST_HEAD(&q->flush_queue[0]);
INIT_LIST_HEAD(&q->flush_queue[1]);
INIT_LIST_HEAD(&q->flush_data_in_flight);
@@ -522,6 +582,18 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
*/
q->queue_lock = &q->__queue_lock;
+ /*
+ * A queue starts its life with bypass turned on to avoid
+ * unnecessary bypass on/off overhead and nasty surprises during
+ * init. The initial bypass will be finished at the end of
+ * blk_init_allocated_queue().
+ */
+ q->bypass_depth = 1;
+ __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+
+ if (blkcg_init_queue(q))
+ goto fail_id;
+
return q;
fail_id:
@@ -614,15 +686,15 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
q->sg_reserved_size = INT_MAX;
- /*
- * all done
- */
- if (!elevator_init(q, NULL)) {
- blk_queue_congestion_threshold(q);
- return q;
- }
+ /* init elevator */
+ if (elevator_init(q, NULL))
+ return NULL;
- return NULL;
+ blk_queue_congestion_threshold(q);
+
+ /* all done, end the initial bypass */
+ blk_queue_bypass_end(q);
+ return q;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -648,33 +720,6 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
mempool_free(rq, q->rq.rq_pool);
}
-static struct request *
-blk_alloc_request(struct request_queue *q, struct io_cq *icq,
- unsigned int flags, gfp_t gfp_mask)
-{
- struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
-
- if (!rq)
- return NULL;
-
- blk_rq_init(q, rq);
-
- rq->cmd_flags = flags | REQ_ALLOCED;
-
- if (flags & REQ_ELVPRIV) {
- rq->elv.icq = icq;
- if (unlikely(elv_set_request(q, rq, gfp_mask))) {
- mempool_free(rq, q->rq.rq_pool);
- return NULL;
- }
- /* @rq->elv.icq holds on to io_context until @rq is freed */
- if (icq)
- get_io_context(icq->ioc);
- }
-
- return rq;
-}
-
/*
* ioc_batching returns true if the ioc is a valid batching request and
* should be given priority access to a request.
@@ -763,6 +808,22 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
}
/**
+ * rq_ioc - determine io_context for request allocation
+ * @bio: request being allocated is for this bio (can be %NULL)
+ *
+ * Determine io_context to use for request allocation for @bio. May return
+ * %NULL if %current->io_context doesn't exist.
+ */
+static struct io_context *rq_ioc(struct bio *bio)
+{
+#ifdef CONFIG_BLK_CGROUP
+ if (bio && bio->bi_ioc)
+ return bio->bi_ioc;
+#endif
+ return current->io_context;
+}
+
+/**
* get_request - get a free request
* @q: request_queue to allocate request from
* @rw_flags: RW and SYNC flags
@@ -779,7 +840,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
static struct request *get_request(struct request_queue *q, int rw_flags,
struct bio *bio, gfp_t gfp_mask)
{
- struct request *rq = NULL;
+ struct request *rq;
struct request_list *rl = &q->rq;
struct elevator_type *et;
struct io_context *ioc;
@@ -789,7 +850,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
int may_queue;
retry:
et = q->elevator->type;
- ioc = current->io_context;
+ ioc = rq_ioc(bio);
if (unlikely(blk_queue_dead(q)))
return NULL;
@@ -808,7 +869,7 @@ retry:
*/
if (!ioc && !retried) {
spin_unlock_irq(q->queue_lock);
- create_io_context(current, gfp_mask, q->node);
+ create_io_context(gfp_mask, q->node);
spin_lock_irq(q->queue_lock);
retried = true;
goto retry;
@@ -831,7 +892,7 @@ retry:
* process is not a "batcher", and not
* exempted by the IO scheduler
*/
- goto out;
+ return NULL;
}
}
}
@@ -844,7 +905,7 @@ retry:
* allocated with any setting of ->nr_requests
*/
if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
- goto out;
+ return NULL;
rl->count[is_sync]++;
rl->starved[is_sync] = 0;
@@ -859,8 +920,7 @@ retry:
* Also, lookup icq while holding queue_lock. If it doesn't exist,
* it will be created after releasing queue_lock.
*/
- if (blk_rq_should_init_elevator(bio) &&
- !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
+ if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
rw_flags |= REQ_ELVPRIV;
rl->elvpriv++;
if (et->icq_cache && ioc)
@@ -871,41 +931,36 @@ retry:
rw_flags |= REQ_IO_STAT;
spin_unlock_irq(q->queue_lock);
- /* create icq if missing */
- if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
- icq = ioc_create_icq(q, gfp_mask);
- if (!icq)
- goto fail_icq;
- }
-
- rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
+ /* allocate and init request */
+ rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
+ if (!rq)
+ goto fail_alloc;
-fail_icq:
- if (unlikely(!rq)) {
- /*
- * Allocation failed presumably due to memory. Undo anything
- * we might have messed up.
- *
- * Allocating task should really be put onto the front of the
- * wait queue, but this is pretty rare.
- */
- spin_lock_irq(q->queue_lock);
- freed_request(q, rw_flags);
+ blk_rq_init(q, rq);
+ rq->cmd_flags = rw_flags | REQ_ALLOCED;
+
+ /* init elvpriv */
+ if (rw_flags & REQ_ELVPRIV) {
+ if (unlikely(et->icq_cache && !icq)) {
+ create_io_context(gfp_mask, q->node);
+ ioc = rq_ioc(bio);
+ if (!ioc)
+ goto fail_elvpriv;
+
+ icq = ioc_create_icq(ioc, q, gfp_mask);
+ if (!icq)
+ goto fail_elvpriv;
+ }
- /*
- * in the very unlikely event that allocation failed and no
- * requests for this direction was pending, mark us starved
- * so that freeing of a request in the other direction will
- * notice us. another possible fix would be to split the
- * rq mempool into READ and WRITE
- */
-rq_starved:
- if (unlikely(rl->count[is_sync] == 0))
- rl->starved[is_sync] = 1;
+ rq->elv.icq = icq;
+ if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
+ goto fail_elvpriv;
- goto out;
+ /* @rq->elv.icq holds io_context until @rq is freed */
+ if (icq)
+ get_io_context(icq->ioc);
}
-
+out:
/*
* ioc may be NULL here, and ioc_batching will be false. That's
* OK, if the queue is under the request limit then requests need
@@ -916,8 +971,48 @@ rq_starved:
ioc->nr_batch_requests--;
trace_block_getrq(q, bio, rw_flags & 1);
-out:
return rq;
+
+fail_elvpriv:
+ /*
+ * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
+ * and may fail indefinitely under memory pressure and thus
+ * shouldn't stall IO. Treat this request as !elvpriv. This will
+ * disturb iosched and blkcg but weird is bettern than dead.
+ */
+ printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
+ dev_name(q->backing_dev_info.dev));
+
+ rq->cmd_flags &= ~REQ_ELVPRIV;
+ rq->elv.icq = NULL;
+
+ spin_lock_irq(q->queue_lock);
+ rl->elvpriv--;
+ spin_unlock_irq(q->queue_lock);
+ goto out;
+
+fail_alloc:
+ /*
+ * Allocation failed presumably due to memory. Undo anything we
+ * might have messed up.
+ *
+ * Allocating task should really be put onto the front of the wait
+ * queue, but this is pretty rare.
+ */
+ spin_lock_irq(q->queue_lock);
+ freed_request(q, rw_flags);
+
+ /*
+ * in the very unlikely event that allocation failed and no
+ * requests for this direction was pending, mark us starved so that
+ * freeing of a request in the other direction will notice
+ * us. another possible fix would be to split the rq mempool into
+ * READ and WRITE
+ */
+rq_starved:
+ if (unlikely(rl->count[is_sync] == 0))
+ rl->starved[is_sync] = 1;
+ return NULL;
}
/**
@@ -961,7 +1056,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
* up to a big batch of them for a small period time.
* See ioc_batching, ioc_set_batching
*/
- create_io_context(current, GFP_NOIO, q->node);
+ create_io_context(GFP_NOIO, q->node);
ioc_set_batching(q, current->io_context);
spin_lock_irq(q->queue_lock);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index fb95dd2f889a..893b8007c657 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -155,20 +155,20 @@ void put_io_context(struct io_context *ioc)
}
EXPORT_SYMBOL(put_io_context);
-/* Called by the exiting task */
-void exit_io_context(struct task_struct *task)
+/**
+ * put_io_context_active - put active reference on ioc
+ * @ioc: ioc of interest
+ *
+ * Undo get_io_context_active(). If active reference reaches zero after
+ * put, @ioc can never issue further IOs and ioscheds are notified.
+ */
+void put_io_context_active(struct io_context *ioc)
{
- struct io_context *ioc;
- struct io_cq *icq;
struct hlist_node *n;
unsigned long flags;
+ struct io_cq *icq;
- task_lock(task);
- ioc = task->io_context;
- task->io_context = NULL;
- task_unlock(task);
-
- if (!atomic_dec_and_test(&ioc->nr_tasks)) {
+ if (!atomic_dec_and_test(&ioc->active_ref)) {
put_io_context(ioc);
return;
}
@@ -197,6 +197,20 @@ retry:
put_io_context(ioc);
}
+/* Called by the exiting task */
+void exit_io_context(struct task_struct *task)
+{
+ struct io_context *ioc;
+
+ task_lock(task);
+ ioc = task->io_context;
+ task->io_context = NULL;
+ task_unlock(task);
+
+ atomic_dec(&ioc->nr_tasks);
+ put_io_context_active(ioc);
+}
+
/**
* ioc_clear_queue - break any ioc association with the specified queue
* @q: request_queue being cleared
@@ -218,19 +232,19 @@ void ioc_clear_queue(struct request_queue *q)
}
}
-void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
- int node)
+int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
{
struct io_context *ioc;
+ int ret;
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
node);
if (unlikely(!ioc))
- return;
+ return -ENOMEM;
/* initialize */
atomic_long_set(&ioc->refcount, 1);
- atomic_set(&ioc->nr_tasks, 1);
+ atomic_set(&ioc->active_ref, 1);
spin_lock_init(&ioc->lock);
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ioc->icq_list);
@@ -249,7 +263,12 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
task->io_context = ioc;
else
kmem_cache_free(iocontext_cachep, ioc);
+
+ ret = task->io_context ? 0 : -EBUSY;
+
task_unlock(task);
+
+ return ret;
}
/**
@@ -281,7 +300,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
return ioc;
}
task_unlock(task);
- } while (create_io_context(task, gfp_flags, node));
+ } while (!create_task_io_context(task, gfp_flags, node));
return NULL;
}
@@ -325,26 +344,23 @@ EXPORT_SYMBOL(ioc_lookup_icq);
/**
* ioc_create_icq - create and link io_cq
+ * @ioc: io_context of interest
* @q: request_queue of interest
* @gfp_mask: allocation mask
*
- * Make sure io_cq linking %current->io_context and @q exists. If either
- * io_context and/or icq don't exist, they will be created using @gfp_mask.
+ * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
+ * will be created using @gfp_mask.
*
* The caller is responsible for ensuring @ioc won't go away and @q is
* alive and will stay alive until this function returns.
*/
-struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
+struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
+ gfp_t gfp_mask)
{
struct elevator_type *et = q->elevator->type;
- struct io_context *ioc;
struct io_cq *icq;
/* allocate stuff */
- ioc = create_io_context(current, gfp_mask, q->node);
- if (!ioc)
- return NULL;
-
icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
q->node);
if (!icq)
@@ -382,74 +398,6 @@ struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
return icq;
}
-void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
-{
- struct io_cq *icq;
- struct hlist_node *n;
-
- hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
- icq->flags |= flags;
-}
-
-/**
- * ioc_ioprio_changed - notify ioprio change
- * @ioc: io_context of interest
- * @ioprio: new ioprio
- *
- * @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all
- * icq's. iosched is responsible for checking the bit and applying it on
- * request issue path.
- */
-void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->lock, flags);
- ioc->ioprio = ioprio;
- ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
- spin_unlock_irqrestore(&ioc->lock, flags);
-}
-
-/**
- * ioc_cgroup_changed - notify cgroup change
- * @ioc: io_context of interest
- *
- * @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's.
- * iosched is responsible for checking the bit and applying it on request
- * issue path.
- */
-void ioc_cgroup_changed(struct io_context *ioc)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->lock, flags);
- ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
- spin_unlock_irqrestore(&ioc->lock, flags);
-}
-EXPORT_SYMBOL(ioc_cgroup_changed);
-
-/**
- * icq_get_changed - fetch and clear icq changed mask
- * @icq: icq of interest
- *
- * Fetch and clear ICQ_*_CHANGED bits from @icq. Grabs and releases
- * @icq->ioc->lock.
- */
-unsigned icq_get_changed(struct io_cq *icq)
-{
- unsigned int changed = 0;
- unsigned long flags;
-
- if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
- spin_lock_irqsave(&icq->ioc->lock, flags);
- changed = icq->flags & ICQ_CHANGED_MASK;
- icq->flags &= ~ICQ_CHANGED_MASK;
- spin_unlock_irqrestore(&icq->ioc->lock, flags);
- }
- return changed;
-}
-EXPORT_SYMBOL(icq_get_changed);
-
static int __init blk_ioc_init(void)
{
iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index cf150011d808..aa41b47c22d2 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -9,6 +9,7 @@
#include <linux/blktrace_api.h>
#include "blk.h"
+#include "blk-cgroup.h"
struct queue_sysfs_entry {
struct attribute attr;
@@ -479,6 +480,8 @@ static void blk_release_queue(struct kobject *kobj)
blk_sync_queue(q);
+ blkcg_exit_queue(q);
+
if (q->elevator) {
spin_lock_irq(q->queue_lock);
ioc_clear_queue(q);
@@ -486,15 +489,12 @@ static void blk_release_queue(struct kobject *kobj)
elevator_exit(q->elevator);
}
- blk_throtl_exit(q);
-
if (rl->rq_pool)
mempool_destroy(rl->rq_pool);
if (q->queue_tags)
__blk_queue_free_tags(q);
- blk_throtl_release(q);
blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index f2ddb94626bd..5b0659512047 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -21,6 +21,8 @@ static int throtl_quantum = 32;
/* Throttling is performed over 100ms slice and after that slice is renewed */
static unsigned long throtl_slice = HZ/10; /* 100 ms */
+static struct blkcg_policy blkcg_policy_throtl;
+
/* A workqueue to queue throttle related work */
static struct workqueue_struct *kthrotld_workqueue;
static void throtl_schedule_delayed_work(struct throtl_data *td,
@@ -38,9 +40,17 @@ struct throtl_rb_root {
#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
+/* Per-cpu group stats */
+struct tg_stats_cpu {
+ /* total bytes transferred */
+ struct blkg_rwstat service_bytes;
+ /* total IOs serviced, post merge */
+ struct blkg_rwstat serviced;
+};
+
struct throtl_grp {
- /* List of throtl groups on the request queue*/
- struct hlist_node tg_node;
+ /* must be the first member */
+ struct blkg_policy_data pd;
/* active throtl group service_tree member */
struct rb_node rb_node;
@@ -52,8 +62,6 @@ struct throtl_grp {
*/
unsigned long disptime;
- struct blkio_group blkg;
- atomic_t ref;
unsigned int flags;
/* Two lists for READ and WRITE */
@@ -80,18 +88,18 @@ struct throtl_grp {
/* Some throttle limits got updated for the group */
int limits_changed;
- struct rcu_head rcu_head;
+ /* Per cpu stats pointer */
+ struct tg_stats_cpu __percpu *stats_cpu;
+
+ /* List of tgs waiting for per cpu stats memory to be allocated */
+ struct list_head stats_alloc_node;
};
struct throtl_data
{
- /* List of throtl groups */
- struct hlist_head tg_list;
-
/* service tree for active throtl groups */
struct throtl_rb_root tg_service_tree;
- struct throtl_grp *root_tg;
struct request_queue *queue;
/* Total Number of queued bios on READ and WRITE lists */
@@ -108,6 +116,33 @@ struct throtl_data
int limits_changed;
};
+/* list and work item to allocate percpu group stats */
+static DEFINE_SPINLOCK(tg_stats_alloc_lock);
+static LIST_HEAD(tg_stats_alloc_list);
+
+static void tg_stats_alloc_fn(struct work_struct *);
+static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
+
+static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
+{
+ return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
+}
+
+static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
+{
+ return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
+}
+
+static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
+{
+ return pd_to_blkg(&tg->pd);
+}
+
+static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
+{
+ return blkg_to_tg(td->queue->root_blkg);
+}
+
enum tg_state_flags {
THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
};
@@ -128,244 +163,150 @@ static inline int throtl_tg_##name(const struct throtl_grp *tg) \
THROTL_TG_FNS(on_rr);
-#define throtl_log_tg(td, tg, fmt, args...) \
- blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
- blkg_path(&(tg)->blkg), ##args); \
+#define throtl_log_tg(td, tg, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+ blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
+ blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
+} while (0)
#define throtl_log(td, fmt, args...) \
blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
-static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
-{
- if (blkg)
- return container_of(blkg, struct throtl_grp, blkg);
-
- return NULL;
-}
-
static inline unsigned int total_nr_queued(struct throtl_data *td)
{
return td->nr_queued[0] + td->nr_queued[1];
}
-static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
-{
- atomic_inc(&tg->ref);
- return tg;
-}
-
-static void throtl_free_tg(struct rcu_head *head)
+/*
+ * Worker for allocating per cpu stat for tgs. This is scheduled on the
+ * system_nrt_wq once there are some groups on the alloc_list waiting for
+ * allocation.
+ */
+static void tg_stats_alloc_fn(struct work_struct *work)
{
- struct throtl_grp *tg;
+ static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
+ struct delayed_work *dwork = to_delayed_work(work);
+ bool empty = false;
+
+alloc_stats:
+ if (!stats_cpu) {
+ stats_cpu = alloc_percpu(struct tg_stats_cpu);
+ if (!stats_cpu) {
+ /* allocation failed, try again after some time */
+ queue_delayed_work(system_nrt_wq, dwork,
+ msecs_to_jiffies(10));
+ return;
+ }
+ }
- tg = container_of(head, struct throtl_grp, rcu_head);
- free_percpu(tg->blkg.stats_cpu);
- kfree(tg);
-}
+ spin_lock_irq(&tg_stats_alloc_lock);
-static void throtl_put_tg(struct throtl_grp *tg)
-{
- BUG_ON(atomic_read(&tg->ref) <= 0);
- if (!atomic_dec_and_test(&tg->ref))
- return;
+ if (!list_empty(&tg_stats_alloc_list)) {
+ struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
+ struct throtl_grp,
+ stats_alloc_node);
+ swap(tg->stats_cpu, stats_cpu);
+ list_del_init(&tg->stats_alloc_node);
+ }
- /*
- * A group is freed in rcu manner. But having an rcu lock does not
- * mean that one can access all the fields of blkg and assume these
- * are valid. For example, don't try to follow throtl_data and
- * request queue links.
- *
- * Having a reference to blkg under an rcu allows acess to only
- * values local to groups like group stats and group rate limits
- */
- call_rcu(&tg->rcu_head, throtl_free_tg);
+ empty = list_empty(&tg_stats_alloc_list);
+ spin_unlock_irq(&tg_stats_alloc_lock);
+ if (!empty)
+ goto alloc_stats;
}
-static void throtl_init_group(struct throtl_grp *tg)
+static void throtl_pd_init(struct blkcg_gq *blkg)
{
- INIT_HLIST_NODE(&tg->tg_node);
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+ unsigned long flags;
+
RB_CLEAR_NODE(&tg->rb_node);
bio_list_init(&tg->bio_lists[0]);
bio_list_init(&tg->bio_lists[1]);
tg->limits_changed = false;
- /* Practically unlimited BW */
- tg->bps[0] = tg->bps[1] = -1;
- tg->iops[0] = tg->iops[1] = -1;
+ tg->bps[READ] = -1;
+ tg->bps[WRITE] = -1;
+ tg->iops[READ] = -1;
+ tg->iops[WRITE] = -1;
/*
- * Take the initial reference that will be released on destroy
- * This can be thought of a joint reference by cgroup and
- * request queue which will be dropped by either request queue
- * exit or cgroup deletion path depending on who is exiting first.
+ * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
+ * but percpu allocator can't be called from IO path. Queue tg on
+ * tg_stats_alloc_list and allocate from work item.
*/
- atomic_set(&tg->ref, 1);
+ spin_lock_irqsave(&tg_stats_alloc_lock, flags);
+ list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
+ queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
+ spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
}
-/* Should be called with rcu read lock held (needed for blkcg) */
-static void
-throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
+static void throtl_pd_exit(struct blkcg_gq *blkg)
{
- hlist_add_head(&tg->tg_node, &td->tg_list);
- td->nr_undestroyed_grps++;
-}
-
-static void
-__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
-{
- struct backing_dev_info *bdi = &td->queue->backing_dev_info;
- unsigned int major, minor;
-
- if (!tg || tg->blkg.dev)
- return;
-
- /*
- * Fill in device details for a group which might not have been
- * filled at group creation time as queue was being instantiated
- * and driver had not attached a device yet
- */
- if (bdi->dev && dev_name(bdi->dev)) {
- sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
- tg->blkg.dev = MKDEV(major, minor);
- }
-}
-
-/*
- * Should be called with without queue lock held. Here queue lock will be
- * taken rarely. It will be taken only once during life time of a group
- * if need be
- */
-static void
-throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
-{
- if (!tg || tg->blkg.dev)
- return;
-
- spin_lock_irq(td->queue->queue_lock);
- __throtl_tg_fill_dev_details(td, tg);
- spin_unlock_irq(td->queue->queue_lock);
-}
-
-static void throtl_init_add_tg_lists(struct throtl_data *td,
- struct throtl_grp *tg, struct blkio_cgroup *blkcg)
-{
- __throtl_tg_fill_dev_details(td, tg);
-
- /* Add group onto cgroup list */
- blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
- tg->blkg.dev, BLKIO_POLICY_THROTL);
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+ unsigned long flags;
- tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
- tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
- tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
- tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
+ spin_lock_irqsave(&tg_stats_alloc_lock, flags);
+ list_del_init(&tg->stats_alloc_node);
+ spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
- throtl_add_group_to_td_list(td, tg);
+ free_percpu(tg->stats_cpu);
}
-/* Should be called without queue lock and outside of rcu period */
-static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
+static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
{
- struct throtl_grp *tg = NULL;
- int ret;
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+ int cpu;
- tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
- if (!tg)
- return NULL;
+ if (tg->stats_cpu == NULL)
+ return;
- ret = blkio_alloc_blkg_stats(&tg->blkg);
+ for_each_possible_cpu(cpu) {
+ struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
- if (ret) {
- kfree(tg);
- return NULL;
+ blkg_rwstat_reset(&sc->service_bytes);
+ blkg_rwstat_reset(&sc->serviced);
}
-
- throtl_init_group(tg);
- return tg;
}
-static struct
-throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
+static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
+ struct blkcg *blkcg)
{
- struct throtl_grp *tg = NULL;
- void *key = td;
-
/*
- * This is the common case when there are no blkio cgroups.
- * Avoid lookup in this case
- */
- if (blkcg == &blkio_root_cgroup)
- tg = td->root_tg;
- else
- tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
+ * This is the common case when there are no blkcgs. Avoid lookup
+ * in this case
+ */
+ if (blkcg == &blkcg_root)
+ return td_root_tg(td);
- __throtl_tg_fill_dev_details(td, tg);
- return tg;
+ return blkg_to_tg(blkg_lookup(blkcg, td->queue));
}
-static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
+static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
+ struct blkcg *blkcg)
{
- struct throtl_grp *tg = NULL, *__tg = NULL;
- struct blkio_cgroup *blkcg;
struct request_queue *q = td->queue;
-
- /* no throttling for dead queue */
- if (unlikely(blk_queue_dead(q)))
- return NULL;
-
- rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
- tg = throtl_find_tg(td, blkcg);
- if (tg) {
- rcu_read_unlock();
- return tg;
- }
-
- /*
- * Need to allocate a group. Allocation of group also needs allocation
- * of per cpu stats which in-turn takes a mutex() and can block. Hence
- * we need to drop rcu lock and queue_lock before we call alloc.
- */
- rcu_read_unlock();
- spin_unlock_irq(q->queue_lock);
-
- tg = throtl_alloc_tg(td);
-
- /* Group allocated and queue is still alive. take the lock */
- spin_lock_irq(q->queue_lock);
-
- /* Make sure @q is still alive */
- if (unlikely(blk_queue_dead(q))) {
- kfree(tg);
- return NULL;
- }
-
- /*
- * Initialize the new group. After sleeping, read the blkcg again.
- */
- rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
+ struct throtl_grp *tg = NULL;
/*
- * If some other thread already allocated the group while we were
- * not holding queue lock, free up the group
+ * This is the common case when there are no blkcgs. Avoid lookup
+ * in this case
*/
- __tg = throtl_find_tg(td, blkcg);
-
- if (__tg) {
- kfree(tg);
- rcu_read_unlock();
- return __tg;
- }
-
- /* Group allocation failed. Account the IO to root group */
- if (!tg) {
- tg = td->root_tg;
- return tg;
+ if (blkcg == &blkcg_root) {
+ tg = td_root_tg(td);
+ } else {
+ struct blkcg_gq *blkg;
+
+ blkg = blkg_lookup_create(blkcg, q);
+
+ /* if %NULL and @q is alive, fall back to root_tg */
+ if (!IS_ERR(blkg))
+ tg = blkg_to_tg(blkg);
+ else if (!blk_queue_dead(q))
+ tg = td_root_tg(td);
}
- throtl_init_add_tg_lists(td, tg, blkcg);
- rcu_read_unlock();
return tg;
}
@@ -734,16 +675,41 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
return 0;
}
+static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
+ int rw)
+{
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+ struct tg_stats_cpu *stats_cpu;
+ unsigned long flags;
+
+ /* If per cpu stats are not allocated yet, don't do any accounting. */
+ if (tg->stats_cpu == NULL)
+ return;
+
+ /*
+ * Disabling interrupts to provide mutual exclusion between two
+ * writes on same cpu. It probably is not needed for 64bit. Not
+ * optimizing that case yet.
+ */
+ local_irq_save(flags);
+
+ stats_cpu = this_cpu_ptr(tg->stats_cpu);
+
+ blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
+ blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
+
+ local_irq_restore(flags);
+}
+
static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
{
bool rw = bio_data_dir(bio);
- bool sync = rw_is_sync(bio->bi_rw);
/* Charge the bio to the group */
tg->bytes_disp[rw] += bio->bi_size;
tg->io_disp[rw]++;
- blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
+ throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
}
static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
@@ -753,7 +719,7 @@ static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
bio_list_add(&tg->bio_lists[rw], bio);
/* Take a bio reference on tg */
- throtl_ref_get_tg(tg);
+ blkg_get(tg_to_blkg(tg));
tg->nr_queued[rw]++;
td->nr_queued[rw]++;
throtl_enqueue_tg(td, tg);
@@ -786,8 +752,8 @@ static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
bio = bio_list_pop(&tg->bio_lists[rw]);
tg->nr_queued[rw]--;
- /* Drop bio reference on tg */
- throtl_put_tg(tg);
+ /* Drop bio reference on blkg */
+ blkg_put(tg_to_blkg(tg));
BUG_ON(td->nr_queued[rw] <= 0);
td->nr_queued[rw]--;
@@ -865,8 +831,8 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
static void throtl_process_limit_change(struct throtl_data *td)
{
- struct throtl_grp *tg;
- struct hlist_node *pos, *n;
+ struct request_queue *q = td->queue;
+ struct blkcg_gq *blkg, *n;
if (!td->limits_changed)
return;
@@ -875,7 +841,9 @@ static void throtl_process_limit_change(struct throtl_data *td)
throtl_log(td, "limits changed");
- hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
+ list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+
if (!tg->limits_changed)
continue;
@@ -973,120 +941,159 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
}
}
-static void
-throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
+static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
{
- /* Something wrong if we are trying to remove same group twice */
- BUG_ON(hlist_unhashed(&tg->tg_node));
+ struct throtl_grp *tg = pd_to_tg(pd);
+ struct blkg_rwstat rwstat = { }, tmp;
+ int i, cpu;
- hlist_del_init(&tg->tg_node);
+ for_each_possible_cpu(cpu) {
+ struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
- /*
- * Put the reference taken at the time of creation so that when all
- * queues are gone, group can be destroyed.
- */
- throtl_put_tg(tg);
- td->nr_undestroyed_grps--;
+ tmp = blkg_rwstat_read((void *)sc + off);
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ rwstat.cnt[i] += tmp.cnt[i];
+ }
+
+ return __blkg_prfill_rwstat(sf, pd, &rwstat);
}
-static void throtl_release_tgs(struct throtl_data *td)
+static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
{
- struct hlist_node *pos, *n;
- struct throtl_grp *tg;
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
- hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
- /*
- * If cgroup removal path got to blk_group first and removed
- * it from cgroup list, then it will take care of destroying
- * cfqg also.
- */
- if (!blkiocg_del_blkio_group(&tg->blkg))
- throtl_destroy_tg(td, tg);
- }
+ blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
+ cft->private, true);
+ return 0;
}
-/*
- * Blk cgroup controller notification saying that blkio_group object is being
- * delinked as associated cgroup object is going away. That also means that
- * no new IO will come in this group. So get rid of this group as soon as
- * any pending IO in the group is finished.
- *
- * This function is called under rcu_read_lock(). key is the rcu protected
- * pointer. That means "key" is a valid throtl_data pointer as long as we are
- * rcu read lock.
- *
- * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
- * it should not be NULL as even if queue was going away, cgroup deltion
- * path got to it first.
- */
-void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
+static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
{
- unsigned long flags;
- struct throtl_data *td = key;
+ struct throtl_grp *tg = pd_to_tg(pd);
+ u64 v = *(u64 *)((void *)tg + off);
- spin_lock_irqsave(td->queue->queue_lock, flags);
- throtl_destroy_tg(td, tg_of_blkg(blkg));
- spin_unlock_irqrestore(td->queue->queue_lock, flags);
+ if (v == -1)
+ return 0;
+ return __blkg_prfill_u64(sf, pd, v);
}
-static void throtl_update_blkio_group_common(struct throtl_data *td,
- struct throtl_grp *tg)
+static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
{
- xchg(&tg->limits_changed, true);
- xchg(&td->limits_changed, true);
- /* Schedule a work now to process the limit change */
- throtl_schedule_delayed_work(td, 0);
+ struct throtl_grp *tg = pd_to_tg(pd);
+ unsigned int v = *(unsigned int *)((void *)tg + off);
+
+ if (v == -1)
+ return 0;
+ return __blkg_prfill_u64(sf, pd, v);
}
-/*
- * For all update functions, key should be a valid pointer because these
- * update functions are called under blkcg_lock, that means, blkg is
- * valid and in turn key is valid. queue exit path can not race because
- * of blkcg_lock
- *
- * Can not take queue lock in update functions as queue lock under blkcg_lock
- * is not allowed. Under other paths we take blkcg_lock under queue_lock.
- */
-static void throtl_update_blkio_group_read_bps(void *key,
- struct blkio_group *blkg, u64 read_bps)
+static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
{
- struct throtl_data *td = key;
- struct throtl_grp *tg = tg_of_blkg(blkg);
-
- tg->bps[READ] = read_bps;
- throtl_update_blkio_group_common(td, tg);
+ blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
+ &blkcg_policy_throtl, cft->private, false);
+ return 0;
}
-static void throtl_update_blkio_group_write_bps(void *key,
- struct blkio_group *blkg, u64 write_bps)
+static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
{
- struct throtl_data *td = key;
- struct throtl_grp *tg = tg_of_blkg(blkg);
-
- tg->bps[WRITE] = write_bps;
- throtl_update_blkio_group_common(td, tg);
+ blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
+ &blkcg_policy_throtl, cft->private, false);
+ return 0;
}
-static void throtl_update_blkio_group_read_iops(void *key,
- struct blkio_group *blkg, unsigned int read_iops)
+static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
+ bool is_u64)
{
- struct throtl_data *td = key;
- struct throtl_grp *tg = tg_of_blkg(blkg);
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkg_conf_ctx ctx;
+ struct throtl_grp *tg;
+ struct throtl_data *td;
+ int ret;
+
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
+ if (ret)
+ return ret;
+
+ tg = blkg_to_tg(ctx.blkg);
+ td = ctx.blkg->q->td;
+
+ if (!ctx.v)
+ ctx.v = -1;
+
+ if (is_u64)
+ *(u64 *)((void *)tg + cft->private) = ctx.v;
+ else
+ *(unsigned int *)((void *)tg + cft->private) = ctx.v;
+
+ /* XXX: we don't need the following deferred processing */
+ xchg(&tg->limits_changed, true);
+ xchg(&td->limits_changed, true);
+ throtl_schedule_delayed_work(td, 0);
- tg->iops[READ] = read_iops;
- throtl_update_blkio_group_common(td, tg);
+ blkg_conf_finish(&ctx);
+ return 0;
}
-static void throtl_update_blkio_group_write_iops(void *key,
- struct blkio_group *blkg, unsigned int write_iops)
+static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
+ const char *buf)
{
- struct throtl_data *td = key;
- struct throtl_grp *tg = tg_of_blkg(blkg);
+ return tg_set_conf(cgrp, cft, buf, true);
+}
- tg->iops[WRITE] = write_iops;
- throtl_update_blkio_group_common(td, tg);
+static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
+ const char *buf)
+{
+ return tg_set_conf(cgrp, cft, buf, false);
}
+static struct cftype throtl_files[] = {
+ {
+ .name = "throttle.read_bps_device",
+ .private = offsetof(struct throtl_grp, bps[READ]),
+ .read_seq_string = tg_print_conf_u64,
+ .write_string = tg_set_conf_u64,
+ .max_write_len = 256,
+ },
+ {
+ .name = "throttle.write_bps_device",
+ .private = offsetof(struct throtl_grp, bps[WRITE]),
+ .read_seq_string = tg_print_conf_u64,
+ .write_string = tg_set_conf_u64,
+ .max_write_len = 256,
+ },
+ {
+ .name = "throttle.read_iops_device",
+ .private = offsetof(struct throtl_grp, iops[READ]),
+ .read_seq_string = tg_print_conf_uint,
+ .write_string = tg_set_conf_uint,
+ .max_write_len = 256,
+ },
+ {
+ .name = "throttle.write_iops_device",
+ .private = offsetof(struct throtl_grp, iops[WRITE]),
+ .read_seq_string = tg_print_conf_uint,
+ .write_string = tg_set_conf_uint,
+ .max_write_len = 256,
+ },
+ {
+ .name = "throttle.io_service_bytes",
+ .private = offsetof(struct tg_stats_cpu, service_bytes),
+ .read_seq_string = tg_print_cpu_rwstat,
+ },
+ {
+ .name = "throttle.io_serviced",
+ .private = offsetof(struct tg_stats_cpu, serviced),
+ .read_seq_string = tg_print_cpu_rwstat,
+ },
+ { } /* terminate */
+};
+
static void throtl_shutdown_wq(struct request_queue *q)
{
struct throtl_data *td = q->td;
@@ -1094,19 +1101,13 @@ static void throtl_shutdown_wq(struct request_queue *q)
cancel_delayed_work_sync(&td->throtl_work);
}
-static struct blkio_policy_type blkio_policy_throtl = {
- .ops = {
- .blkio_unlink_group_fn = throtl_unlink_blkio_group,
- .blkio_update_group_read_bps_fn =
- throtl_update_blkio_group_read_bps,
- .blkio_update_group_write_bps_fn =
- throtl_update_blkio_group_write_bps,
- .blkio_update_group_read_iops_fn =
- throtl_update_blkio_group_read_iops,
- .blkio_update_group_write_iops_fn =
- throtl_update_blkio_group_write_iops,
- },
- .plid = BLKIO_POLICY_THROTL,
+static struct blkcg_policy blkcg_policy_throtl = {
+ .pd_size = sizeof(struct throtl_grp),
+ .cftypes = throtl_files,
+
+ .pd_init_fn = throtl_pd_init,
+ .pd_exit_fn = throtl_pd_exit,
+ .pd_reset_stats_fn = throtl_pd_reset_stats,
};
bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
@@ -1114,7 +1115,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
struct throtl_data *td = q->td;
struct throtl_grp *tg;
bool rw = bio_data_dir(bio), update_disptime = true;
- struct blkio_cgroup *blkcg;
+ struct blkcg *blkcg;
bool throttled = false;
if (bio->bi_rw & REQ_THROTTLED) {
@@ -1122,33 +1123,31 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
goto out;
}
+ /* bio_associate_current() needs ioc, try creating */
+ create_io_context(GFP_ATOMIC, q->node);
+
/*
* A throtl_grp pointer retrieved under rcu can be used to access
* basic fields like stats and io rates. If a group has no rules,
* just update the dispatch stats in lockless manner and return.
*/
-
rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
- tg = throtl_find_tg(td, blkcg);
+ blkcg = bio_blkcg(bio);
+ tg = throtl_lookup_tg(td, blkcg);
if (tg) {
- throtl_tg_fill_dev_details(td, tg);
-
if (tg_no_rule_group(tg, rw)) {
- blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
- rw, rw_is_sync(bio->bi_rw));
- rcu_read_unlock();
- goto out;
+ throtl_update_dispatch_stats(tg_to_blkg(tg),
+ bio->bi_size, bio->bi_rw);
+ goto out_unlock_rcu;
}
}
- rcu_read_unlock();
/*
* Either group has not been allocated yet or it is not an unlimited
* IO group
*/
spin_lock_irq(q->queue_lock);
- tg = throtl_get_tg(td);
+ tg = throtl_lookup_create_tg(td, blkcg);
if (unlikely(!tg))
goto out_unlock;
@@ -1189,6 +1188,7 @@ queue_bio:
tg->io_disp[rw], tg->iops[rw],
tg->nr_queued[READ], tg->nr_queued[WRITE]);
+ bio_associate_current(bio);
throtl_add_bio_tg(q->td, tg, bio);
throttled = true;
@@ -1199,6 +1199,8 @@ queue_bio:
out_unlock:
spin_unlock_irq(q->queue_lock);
+out_unlock_rcu:
+ rcu_read_unlock();
out:
return throttled;
}
@@ -1241,79 +1243,31 @@ void blk_throtl_drain(struct request_queue *q)
int blk_throtl_init(struct request_queue *q)
{
struct throtl_data *td;
- struct throtl_grp *tg;
+ int ret;
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
if (!td)
return -ENOMEM;
- INIT_HLIST_HEAD(&td->tg_list);
td->tg_service_tree = THROTL_RB_ROOT;
td->limits_changed = false;
INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
- /* alloc and Init root group. */
+ q->td = td;
td->queue = q;
- tg = throtl_alloc_tg(td);
- if (!tg) {
+ /* activate policy */
+ ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
+ if (ret)
kfree(td);
- return -ENOMEM;
- }
-
- td->root_tg = tg;
-
- rcu_read_lock();
- throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
- rcu_read_unlock();
-
- /* Attach throtl data to request queue */
- q->td = td;
- return 0;
+ return ret;
}
void blk_throtl_exit(struct request_queue *q)
{
- struct throtl_data *td = q->td;
- bool wait = false;
-
- BUG_ON(!td);
-
- throtl_shutdown_wq(q);
-
- spin_lock_irq(q->queue_lock);
- throtl_release_tgs(td);
-
- /* If there are other groups */
- if (td->nr_undestroyed_grps > 0)
- wait = true;
-
- spin_unlock_irq(q->queue_lock);
-
- /*
- * Wait for tg->blkg->key accessors to exit their grace periods.
- * Do this wait only if there are other undestroyed groups out
- * there (other than root group). This can happen if cgroup deletion
- * path claimed the responsibility of cleaning up a group before
- * queue cleanup code get to the group.
- *
- * Do not call synchronize_rcu() unconditionally as there are drivers
- * which create/delete request queue hundreds of times during scan/boot
- * and synchronize_rcu() can take significant time and slow down boot.
- */
- if (wait)
- synchronize_rcu();
-
- /*
- * Just being safe to make sure after previous flush if some body did
- * update limits through cgroup and another work got queued, cancel
- * it.
- */
+ BUG_ON(!q->td);
throtl_shutdown_wq(q);
-}
-
-void blk_throtl_release(struct request_queue *q)
-{
+ blkcg_deactivate_policy(q, &blkcg_policy_throtl);
kfree(q->td);
}
@@ -1323,8 +1277,7 @@ static int __init throtl_init(void)
if (!kthrotld_workqueue)
panic("Failed to create kthrotld\n");
- blkio_policy_register(&blkio_policy_throtl);
- return 0;
+ return blkcg_policy_register(&blkcg_policy_throtl);
}
module_init(throtl_init);
diff --git a/block/blk.h b/block/blk.h
index d45be871329e..85f6ae42f7d3 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -23,7 +23,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio);
-void blk_drain_queue(struct request_queue *q, bool drain_all);
+void blk_queue_bypass_start(struct request_queue *q);
+void blk_queue_bypass_end(struct request_queue *q);
void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q);
bool __blk_end_bidi_request(struct request *rq, int error,
@@ -144,9 +145,6 @@ void blk_queue_congestion_threshold(struct request_queue *q);
int blk_dev_init(void);
-void elv_quiesce_start(struct request_queue *q);
-void elv_quiesce_end(struct request_queue *q);
-
/*
* Return the threshold (number of used requests) at which the queue is
@@ -186,32 +184,30 @@ static inline int blk_do_io_stat(struct request *rq)
*/
void get_io_context(struct io_context *ioc);
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
-struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
+struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
+ gfp_t gfp_mask);
void ioc_clear_queue(struct request_queue *q);
-void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
- int node);
+int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
/**
* create_io_context - try to create task->io_context
- * @task: target task
* @gfp_mask: allocation mask
* @node: allocation node
*
- * If @task->io_context is %NULL, allocate a new io_context and install it.
- * Returns the current @task->io_context which may be %NULL if allocation
- * failed.
+ * If %current->io_context is %NULL, allocate a new io_context and install
+ * it. Returns the current %current->io_context which may be %NULL if
+ * allocation failed.
*
* Note that this function can't be called with IRQ disabled because
- * task_lock which protects @task->io_context is IRQ-unsafe.
+ * task_lock which protects %current->io_context is IRQ-unsafe.
*/
-static inline struct io_context *create_io_context(struct task_struct *task,
- gfp_t gfp_mask, int node)
+static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
{
WARN_ON_ONCE(irqs_disabled());
- if (unlikely(!task->io_context))
- create_io_context_slowpath(task, gfp_mask, node);
- return task->io_context;
+ if (unlikely(!current->io_context))
+ create_task_io_context(current, gfp_mask, node);
+ return current->io_context;
}
/*
@@ -222,7 +218,6 @@ extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
extern void blk_throtl_drain(struct request_queue *q);
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
-extern void blk_throtl_release(struct request_queue *q);
#else /* CONFIG_BLK_DEV_THROTTLING */
static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
{
@@ -231,7 +226,6 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
static inline void blk_throtl_drain(struct request_queue *q) { }
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { }
-static inline void blk_throtl_release(struct request_queue *q) { }
#endif /* CONFIG_BLK_DEV_THROTTLING */
#endif /* BLK_INTERNAL_H */
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3c38536bd52c..673c977cc2bf 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -15,7 +15,9 @@
#include <linux/ioprio.h>
#include <linux/blktrace_api.h>
#include "blk.h"
-#include "cfq.h"
+#include "blk-cgroup.h"
+
+static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
/*
* tunables
@@ -171,8 +173,53 @@ enum wl_type_t {
SYNC_WORKLOAD = 2
};
+struct cfqg_stats {
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ /* total bytes transferred */
+ struct blkg_rwstat service_bytes;
+ /* total IOs serviced, post merge */
+ struct blkg_rwstat serviced;
+ /* number of ios merged */
+ struct blkg_rwstat merged;
+ /* total time spent on device in ns, may not be accurate w/ queueing */
+ struct blkg_rwstat service_time;
+ /* total time spent waiting in scheduler queue in ns */
+ struct blkg_rwstat wait_time;
+ /* number of IOs queued up */
+ struct blkg_rwstat queued;
+ /* total sectors transferred */
+ struct blkg_stat sectors;
+ /* total disk time and nr sectors dispatched by this group */
+ struct blkg_stat time;
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ /* time not charged to this cgroup */
+ struct blkg_stat unaccounted_time;
+ /* sum of number of ios queued across all samples */
+ struct blkg_stat avg_queue_size_sum;
+ /* count of samples taken for average */
+ struct blkg_stat avg_queue_size_samples;
+ /* how many times this group has been removed from service tree */
+ struct blkg_stat dequeue;
+ /* total time spent waiting for it to be assigned a timeslice. */
+ struct blkg_stat group_wait_time;
+ /* time spent idling for this blkcg_gq */
+ struct blkg_stat idle_time;
+ /* total time with empty current active q with other requests queued */
+ struct blkg_stat empty_time;
+ /* fields after this shouldn't be cleared on stat reset */
+ uint64_t start_group_wait_time;
+ uint64_t start_idle_time;
+ uint64_t start_empty_time;
+ uint16_t flags;
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
+#endif /* CONFIG_CFQ_GROUP_IOSCHED */
+};
+
/* This is per cgroup per device grouping structure */
struct cfq_group {
+ /* must be the first member */
+ struct blkg_policy_data pd;
+
/* group service_tree member */
struct rb_node rb_node;
@@ -180,7 +227,7 @@ struct cfq_group {
u64 vdisktime;
unsigned int weight;
unsigned int new_weight;
- bool needs_update;
+ unsigned int dev_weight;
/* number of cfqq currently on this group */
int nr_cfqq;
@@ -206,20 +253,21 @@ struct cfq_group {
unsigned long saved_workload_slice;
enum wl_type_t saved_workload;
enum wl_prio_t saved_serving_prio;
- struct blkio_group blkg;
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- struct hlist_node cfqd_node;
- int ref;
-#endif
+
/* number of requests that are on the dispatch list or inside driver */
int dispatched;
struct cfq_ttime ttime;
+ struct cfqg_stats stats;
};
struct cfq_io_cq {
struct io_cq icq; /* must be the first member */
struct cfq_queue *cfqq[2];
struct cfq_ttime ttime;
+ int ioprio; /* the current ioprio */
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ uint64_t blkcg_id; /* the current blkcg ID */
+#endif
};
/*
@@ -229,7 +277,7 @@ struct cfq_data {
struct request_queue *queue;
/* Root service tree for cfq_groups */
struct cfq_rb_root grp_service_tree;
- struct cfq_group root_group;
+ struct cfq_group *root_group;
/*
* The priority currently being served
@@ -303,12 +351,6 @@ struct cfq_data {
struct cfq_queue oom_cfqq;
unsigned long last_delayed_sync;
-
- /* List of cfq groups being managed on this device*/
- struct hlist_head cfqg_list;
-
- /* Number of groups which are on blkcg->blkg_list */
- unsigned int nr_blkcg_linked_grps;
};
static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
@@ -371,21 +413,284 @@ CFQ_CFQQ_FNS(deep);
CFQ_CFQQ_FNS(wait_busy);
#undef CFQ_CFQQ_FNS
+static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
+{
+ return pd ? container_of(pd, struct cfq_group, pd) : NULL;
+}
+
+static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
+{
+ return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
+}
+
+static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
+{
+ return pd_to_blkg(&cfqg->pd);
+}
+
+#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+
+/* cfqg stats flags */
+enum cfqg_stats_flags {
+ CFQG_stats_waiting = 0,
+ CFQG_stats_idling,
+ CFQG_stats_empty,
+};
+
+#define CFQG_FLAG_FNS(name) \
+static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
+{ \
+ stats->flags |= (1 << CFQG_stats_##name); \
+} \
+static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
+{ \
+ stats->flags &= ~(1 << CFQG_stats_##name); \
+} \
+static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
+{ \
+ return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
+} \
+
+CFQG_FLAG_FNS(waiting)
+CFQG_FLAG_FNS(idling)
+CFQG_FLAG_FNS(empty)
+#undef CFQG_FLAG_FNS
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
+{
+ unsigned long long now;
+
+ if (!cfqg_stats_waiting(stats))
+ return;
+
+ now = sched_clock();
+ if (time_after64(now, stats->start_group_wait_time))
+ blkg_stat_add(&stats->group_wait_time,
+ now - stats->start_group_wait_time);
+ cfqg_stats_clear_waiting(stats);
+}
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
+ struct cfq_group *curr_cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ if (cfqg_stats_waiting(stats))
+ return;
+ if (cfqg == curr_cfqg)
+ return;
+ stats->start_group_wait_time = sched_clock();
+ cfqg_stats_mark_waiting(stats);
+}
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
+{
+ unsigned long long now;
+
+ if (!cfqg_stats_empty(stats))
+ return;
+
+ now = sched_clock();
+ if (time_after64(now, stats->start_empty_time))
+ blkg_stat_add(&stats->empty_time,
+ now - stats->start_empty_time);
+ cfqg_stats_clear_empty(stats);
+}
+
+static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
+{
+ blkg_stat_add(&cfqg->stats.dequeue, 1);
+}
+
+static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ if (blkg_rwstat_sum(&stats->queued))
+ return;
+
+ /*
+ * group is already marked empty. This can happen if cfqq got new
+ * request in parent group and moved to this group while being added
+ * to service tree. Just ignore the event and move on.
+ */
+ if (cfqg_stats_empty(stats))
+ return;
+
+ stats->start_empty_time = sched_clock();
+ cfqg_stats_mark_empty(stats);
+}
+
+static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ if (cfqg_stats_idling(stats)) {
+ unsigned long long now = sched_clock();
+
+ if (time_after64(now, stats->start_idle_time))
+ blkg_stat_add(&stats->idle_time,
+ now - stats->start_idle_time);
+ cfqg_stats_clear_idling(stats);
+ }
+}
+
+static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ BUG_ON(cfqg_stats_idling(stats));
+
+ stats->start_idle_time = sched_clock();
+ cfqg_stats_mark_idling(stats);
+}
+
+static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ blkg_stat_add(&stats->avg_queue_size_sum,
+ blkg_rwstat_sum(&stats->queued));
+ blkg_stat_add(&stats->avg_queue_size_samples, 1);
+ cfqg_stats_update_group_wait_time(stats);
+}
+
+#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+
+static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
+static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
+static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
+
+#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+
#ifdef CONFIG_CFQ_GROUP_IOSCHED
-#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
+
+static inline void cfqg_get(struct cfq_group *cfqg)
+{
+ return blkg_get(cfqg_to_blkg(cfqg));
+}
+
+static inline void cfqg_put(struct cfq_group *cfqg)
+{
+ return blkg_put(cfqg_to_blkg(cfqg));
+}
+
+#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+ blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
- cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
- blkg_path(&(cfqq)->cfqg->blkg), ##args)
+ cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
+ __pbuf, ##args); \
+} while (0)
-#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
- blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
- blkg_path(&(cfqg)->blkg), ##args) \
+#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+ blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
+ blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
+} while (0)
+
+static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
+ struct cfq_group *curr_cfqg, int rw)
+{
+ blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
+ cfqg_stats_end_empty_time(&cfqg->stats);
+ cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
+}
+
+static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
+ unsigned long time, unsigned long unaccounted_time)
+{
+ blkg_stat_add(&cfqg->stats.time, time);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
+#endif
+}
+
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
+{
+ blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
+}
+
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
+{
+ blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
+}
+
+static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
+ uint64_t bytes, int rw)
+{
+ blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
+ blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
+ blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
+}
+
+static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
+ uint64_t start_time, uint64_t io_start_time, int rw)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+ unsigned long long now = sched_clock();
+
+ if (time_after64(now, io_start_time))
+ blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
+ if (time_after64(io_start_time, start_time))
+ blkg_rwstat_add(&stats->wait_time, rw,
+ io_start_time - start_time);
+}
+
+static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
+{
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ /* queued stats shouldn't be cleared */
+ blkg_rwstat_reset(&stats->service_bytes);
+ blkg_rwstat_reset(&stats->serviced);
+ blkg_rwstat_reset(&stats->merged);
+ blkg_rwstat_reset(&stats->service_time);
+ blkg_rwstat_reset(&stats->wait_time);
+ blkg_stat_reset(&stats->time);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ blkg_stat_reset(&stats->unaccounted_time);
+ blkg_stat_reset(&stats->avg_queue_size_sum);
+ blkg_stat_reset(&stats->avg_queue_size_samples);
+ blkg_stat_reset(&stats->dequeue);
+ blkg_stat_reset(&stats->group_wait_time);
+ blkg_stat_reset(&stats->idle_time);
+ blkg_stat_reset(&stats->empty_time);
+#endif
+}
+
+#else /* CONFIG_CFQ_GROUP_IOSCHED */
+
+static inline void cfqg_get(struct cfq_group *cfqg) { }
+static inline void cfqg_put(struct cfq_group *cfqg) { }
-#else
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
-#endif
+
+static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
+ struct cfq_group *curr_cfqg, int rw) { }
+static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
+ unsigned long time, unsigned long unaccounted_time) { }
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
+static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
+ uint64_t bytes, int rw) { }
+static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
+ uint64_t start_time, uint64_t io_start_time, int rw) { }
+
+#endif /* CONFIG_CFQ_GROUP_IOSCHED */
+
#define cfq_log(cfqd, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
@@ -466,8 +771,9 @@ static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
}
static void cfq_dispatch_insert(struct request_queue *, struct request *);
-static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
- struct io_context *, gfp_t);
+static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
+ struct cfq_io_cq *cic, struct bio *bio,
+ gfp_t gfp_mask);
static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
{
@@ -545,7 +851,7 @@ static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
{
u64 d = delta << CFQ_SERVICE_SHIFT;
- d = d * BLKIO_WEIGHT_DEFAULT;
+ d = d * CFQ_WEIGHT_DEFAULT;
do_div(d, cfqg->weight);
return d;
}
@@ -872,9 +1178,9 @@ static void
cfq_update_group_weight(struct cfq_group *cfqg)
{
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
- if (cfqg->needs_update) {
+ if (cfqg->new_weight) {
cfqg->weight = cfqg->new_weight;
- cfqg->needs_update = false;
+ cfqg->new_weight = 0;
}
}
@@ -936,7 +1242,7 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
cfq_group_service_tree_del(st, cfqg);
cfqg->saved_workload_slice = 0;
- cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
+ cfqg_stats_update_dequeue(cfqg);
}
static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
@@ -1008,178 +1314,59 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
"sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
used_sl, cfqq->slice_dispatch, charge,
iops_mode(cfqd), cfqq->nr_sectors);
- cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
- unaccounted_sl);
- cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
+ cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
+ cfqg_stats_set_start_empty_time(cfqg);
}
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
-{
- if (blkg)
- return container_of(blkg, struct cfq_group, blkg);
- return NULL;
-}
-
-static void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
- unsigned int weight)
-{
- struct cfq_group *cfqg = cfqg_of_blkg(blkg);
- cfqg->new_weight = weight;
- cfqg->needs_update = true;
-}
-
-static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
- struct cfq_group *cfqg, struct blkio_cgroup *blkcg)
-{
- struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
- unsigned int major, minor;
-
- /*
- * Add group onto cgroup list. It might happen that bdi->dev is
- * not initialized yet. Initialize this new group without major
- * and minor info and this info will be filled in once a new thread
- * comes for IO.
- */
- if (bdi->dev) {
- sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
- cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
- (void *)cfqd, MKDEV(major, minor));
- } else
- cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
- (void *)cfqd, 0);
-
- cfqd->nr_blkcg_linked_grps++;
- cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
-
- /* Add group on cfqd list */
- hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
-}
-
-/*
- * Should be called from sleepable context. No request queue lock as per
- * cpu stats are allocated dynamically and alloc_percpu needs to be called
- * from sleepable context.
+/**
+ * cfq_init_cfqg_base - initialize base part of a cfq_group
+ * @cfqg: cfq_group to initialize
+ *
+ * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
+ * is enabled or not.
*/
-static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
+static void cfq_init_cfqg_base(struct cfq_group *cfqg)
{
- struct cfq_group *cfqg = NULL;
- int i, j, ret;
struct cfq_rb_root *st;
-
- cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
- if (!cfqg)
- return NULL;
+ int i, j;
for_each_cfqg_st(cfqg, i, j, st)
*st = CFQ_RB_ROOT;
RB_CLEAR_NODE(&cfqg->rb_node);
cfqg->ttime.last_end_request = jiffies;
-
- /*
- * Take the initial reference that will be released on destroy
- * This can be thought of a joint reference by cgroup and
- * elevator which will be dropped by either elevator exit
- * or cgroup deletion path depending on who is exiting first.
- */
- cfqg->ref = 1;
-
- ret = blkio_alloc_blkg_stats(&cfqg->blkg);
- if (ret) {
- kfree(cfqg);
- return NULL;
- }
-
- return cfqg;
}
-static struct cfq_group *
-cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+static void cfq_pd_init(struct blkcg_gq *blkg)
{
- struct cfq_group *cfqg = NULL;
- void *key = cfqd;
- struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
- unsigned int major, minor;
-
- /*
- * This is the common case when there are no blkio cgroups.
- * Avoid lookup in this case
- */
- if (blkcg == &blkio_root_cgroup)
- cfqg = &cfqd->root_group;
- else
- cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
-
- if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
- sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
- cfqg->blkg.dev = MKDEV(major, minor);
- }
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
- return cfqg;
+ cfq_init_cfqg_base(cfqg);
+ cfqg->weight = blkg->blkcg->cfq_weight;
}
/*
* Search for the cfq group current task belongs to. request_queue lock must
* be held.
*/
-static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
+static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
+ struct blkcg *blkcg)
{
- struct blkio_cgroup *blkcg;
- struct cfq_group *cfqg = NULL, *__cfqg = NULL;
struct request_queue *q = cfqd->queue;
+ struct cfq_group *cfqg = NULL;
- rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
- cfqg = cfq_find_cfqg(cfqd, blkcg);
- if (cfqg) {
- rcu_read_unlock();
- return cfqg;
- }
-
- /*
- * Need to allocate a group. Allocation of group also needs allocation
- * of per cpu stats which in-turn takes a mutex() and can block. Hence
- * we need to drop rcu lock and queue_lock before we call alloc.
- *
- * Not taking any queue reference here and assuming that queue is
- * around by the time we return. CFQ queue allocation code does
- * the same. It might be racy though.
- */
-
- rcu_read_unlock();
- spin_unlock_irq(q->queue_lock);
-
- cfqg = cfq_alloc_cfqg(cfqd);
-
- spin_lock_irq(q->queue_lock);
-
- rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
-
- /*
- * If some other thread already allocated the group while we were
- * not holding queue lock, free up the group
- */
- __cfqg = cfq_find_cfqg(cfqd, blkcg);
+ /* avoid lookup for the common case where there's no blkcg */
+ if (blkcg == &blkcg_root) {
+ cfqg = cfqd->root_group;
+ } else {
+ struct blkcg_gq *blkg;
- if (__cfqg) {
- kfree(cfqg);
- rcu_read_unlock();
- return __cfqg;
+ blkg = blkg_lookup_create(blkcg, q);
+ if (!IS_ERR(blkg))
+ cfqg = blkg_to_cfqg(blkg);
}
- if (!cfqg)
- cfqg = &cfqd->root_group;
-
- cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
- rcu_read_unlock();
- return cfqg;
-}
-
-static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
-{
- cfqg->ref++;
return cfqg;
}
@@ -1187,94 +1374,224 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
{
/* Currently, all async queues are mapped to root group */
if (!cfq_cfqq_sync(cfqq))
- cfqg = &cfqq->cfqd->root_group;
+ cfqg = cfqq->cfqd->root_group;
cfqq->cfqg = cfqg;
/* cfqq reference on cfqg */
- cfqq->cfqg->ref++;
+ cfqg_get(cfqg);
}
-static void cfq_put_cfqg(struct cfq_group *cfqg)
+static u64 cfqg_prfill_weight_device(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
{
- struct cfq_rb_root *st;
- int i, j;
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
- BUG_ON(cfqg->ref <= 0);
- cfqg->ref--;
- if (cfqg->ref)
- return;
- for_each_cfqg_st(cfqg, i, j, st)
- BUG_ON(!RB_EMPTY_ROOT(&st->rb));
- free_percpu(cfqg->blkg.stats_cpu);
- kfree(cfqg);
+ if (!cfqg->dev_weight)
+ return 0;
+ return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
}
-static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
+static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
{
- /* Something wrong if we are trying to remove same group twice */
- BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
+ blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
+ cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
+ false);
+ return 0;
+}
- hlist_del_init(&cfqg->cfqd_node);
+static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
+{
+ seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
+ return 0;
+}
- BUG_ON(cfqd->nr_blkcg_linked_grps <= 0);
- cfqd->nr_blkcg_linked_grps--;
+static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
+ const char *buf)
+{
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkg_conf_ctx ctx;
+ struct cfq_group *cfqg;
+ int ret;
- /*
- * Put the reference taken at the time of creation so that when all
- * queues are gone, group can be destroyed.
- */
- cfq_put_cfqg(cfqg);
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
+ if (ret)
+ return ret;
+
+ ret = -EINVAL;
+ cfqg = blkg_to_cfqg(ctx.blkg);
+ if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
+ cfqg->dev_weight = ctx.v;
+ cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
+ ret = 0;
+ }
+
+ blkg_conf_finish(&ctx);
+ return ret;
}
-static void cfq_release_cfq_groups(struct cfq_data *cfqd)
+static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
{
- struct hlist_node *pos, *n;
- struct cfq_group *cfqg;
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg_gq *blkg;
+ struct hlist_node *n;
- hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
- /*
- * If cgroup removal path got to blk_group first and removed
- * it from cgroup list, then it will take care of destroying
- * cfqg also.
- */
- if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
- cfq_destroy_cfqg(cfqd, cfqg);
+ if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
+ return -EINVAL;
+
+ spin_lock_irq(&blkcg->lock);
+ blkcg->cfq_weight = (unsigned int)val;
+
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+
+ if (cfqg && !cfqg->dev_weight)
+ cfqg->new_weight = blkcg->cfq_weight;
}
+
+ spin_unlock_irq(&blkcg->lock);
+ return 0;
}
-/*
- * Blk cgroup controller notification saying that blkio_group object is being
- * delinked as associated cgroup object is going away. That also means that
- * no new IO will come in this group. So get rid of this group as soon as
- * any pending IO in the group is finished.
- *
- * This function is called under rcu_read_lock(). key is the rcu protected
- * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
- * read lock.
- *
- * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
- * it should not be NULL as even if elevator was exiting, cgroup deltion
- * path got to it first.
- */
-static void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
+static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
{
- unsigned long flags;
- struct cfq_data *cfqd = key;
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
- spin_lock_irqsave(cfqd->queue->queue_lock, flags);
- cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
- spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+ blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
+ cft->private, false);
+ return 0;
}
-#else /* GROUP_IOSCHED */
-static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
+static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
{
- return &cfqd->root_group;
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+
+ blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
+ cft->private, true);
+ return 0;
}
-static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
{
- return cfqg;
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
+ u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
+ u64 v = 0;
+
+ if (samples) {
+ v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
+ do_div(v, samples);
+ }
+ __blkg_prfill_u64(sf, pd, v);
+ return 0;
+}
+
+/* print avg_queue_size */
+static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
+{
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+
+ blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
+ &blkcg_policy_cfq, 0, false);
+ return 0;
+}
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
+
+static struct cftype cfq_blkcg_files[] = {
+ {
+ .name = "weight_device",
+ .read_seq_string = cfqg_print_weight_device,
+ .write_string = cfqg_set_weight_device,
+ .max_write_len = 256,
+ },
+ {
+ .name = "weight",
+ .read_seq_string = cfq_print_weight,
+ .write_u64 = cfq_set_weight,
+ },
+ {
+ .name = "time",
+ .private = offsetof(struct cfq_group, stats.time),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "sectors",
+ .private = offsetof(struct cfq_group, stats.sectors),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "io_service_bytes",
+ .private = offsetof(struct cfq_group, stats.service_bytes),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_serviced",
+ .private = offsetof(struct cfq_group, stats.serviced),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_service_time",
+ .private = offsetof(struct cfq_group, stats.service_time),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_wait_time",
+ .private = offsetof(struct cfq_group, stats.wait_time),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_merged",
+ .private = offsetof(struct cfq_group, stats.merged),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_queued",
+ .private = offsetof(struct cfq_group, stats.queued),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ {
+ .name = "avg_queue_size",
+ .read_seq_string = cfqg_print_avg_queue_size,
+ },
+ {
+ .name = "group_wait_time",
+ .private = offsetof(struct cfq_group, stats.group_wait_time),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "idle_time",
+ .private = offsetof(struct cfq_group, stats.idle_time),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "empty_time",
+ .private = offsetof(struct cfq_group, stats.empty_time),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "dequeue",
+ .private = offsetof(struct cfq_group, stats.dequeue),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "unaccounted_time",
+ .private = offsetof(struct cfq_group, stats.unaccounted_time),
+ .read_seq_string = cfqg_print_stat,
+ },
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
+ { } /* terminate */
+};
+#else /* GROUP_IOSCHED */
+static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
+ struct blkcg *blkcg)
+{
+ return cfqd->root_group;
}
static inline void
@@ -1282,9 +1599,6 @@ cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
cfqq->cfqg = cfqg;
}
-static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
-static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
-
#endif /* GROUP_IOSCHED */
/*
@@ -1551,12 +1865,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
{
elv_rb_del(&cfqq->sort_list, rq);
cfqq->queued[rq_is_sync(rq)]--;
- cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
- rq_data_dir(rq), rq_is_sync(rq));
+ cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
cfq_add_rq_rb(rq);
- cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
- &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
- rq_is_sync(rq));
+ cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
+ rq->cmd_flags);
}
static struct request *
@@ -1612,8 +1924,7 @@ static void cfq_remove_request(struct request *rq)
cfq_del_rq_rb(rq);
cfqq->cfqd->rq_queued--;
- cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
- rq_data_dir(rq), rq_is_sync(rq));
+ cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
if (rq->cmd_flags & REQ_PRIO) {
WARN_ON(!cfqq->prio_pending);
cfqq->prio_pending--;
@@ -1648,8 +1959,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
static void cfq_bio_merged(struct request_queue *q, struct request *req,
struct bio *bio)
{
- cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
- bio_data_dir(bio), cfq_bio_sync(bio));
+ cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
}
static void
@@ -1671,8 +1981,7 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
if (cfqq->next_rq == next)
cfqq->next_rq = rq;
cfq_remove_request(next);
- cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
- rq_data_dir(next), rq_is_sync(next));
+ cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
cfqq = RQ_CFQQ(next);
/*
@@ -1713,7 +2022,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
del_timer(&cfqd->idle_slice_timer);
- cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
+ cfqg_stats_update_idle_time(cfqq->cfqg);
}
static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -1722,7 +2031,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
if (cfqq) {
cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
cfqd->serving_prio, cfqd->serving_type);
- cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
+ cfqg_stats_update_avg_queue_size(cfqq->cfqg);
cfqq->slice_start = 0;
cfqq->dispatch_start = jiffies;
cfqq->allocated_slice = 0;
@@ -2043,7 +2352,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
* task has exited, don't wait
*/
cic = cfqd->active_cic;
- if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
+ if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
return;
/*
@@ -2070,7 +2379,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
sl = cfqd->cfq_slice_idle;
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
- cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
+ cfqg_stats_set_start_idle_time(cfqq->cfqg);
cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
group_idle ? 1 : 0);
}
@@ -2093,8 +2402,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
cfqq->nr_sectors += blk_rq_sectors(rq);
- cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
- rq_data_dir(rq), rq_is_sync(rq));
+ cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
}
/*
@@ -2677,7 +2985,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
BUG_ON(cfq_cfqq_on_rr(cfqq));
kmem_cache_free(cfq_pool, cfqq);
- cfq_put_cfqg(cfqg);
+ cfqg_put(cfqg);
}
static void cfq_put_cooperator(struct cfq_queue *cfqq)
@@ -2736,7 +3044,7 @@ static void cfq_exit_icq(struct io_cq *icq)
}
}
-static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
+static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
{
struct task_struct *tsk = current;
int ioprio_class;
@@ -2744,7 +3052,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
if (!cfq_cfqq_prio_changed(cfqq))
return;
- ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
+ ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
switch (ioprio_class) {
default:
printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
@@ -2756,11 +3064,11 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
cfqq->ioprio_class = task_nice_ioclass(tsk);
break;
case IOPRIO_CLASS_RT:
- cfqq->ioprio = task_ioprio(ioc);
+ cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
cfqq->ioprio_class = IOPRIO_CLASS_RT;
break;
case IOPRIO_CLASS_BE:
- cfqq->ioprio = task_ioprio(ioc);
+ cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
cfqq->ioprio_class = IOPRIO_CLASS_BE;
break;
case IOPRIO_CLASS_IDLE:
@@ -2778,19 +3086,24 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
cfq_clear_cfqq_prio_changed(cfqq);
}
-static void changed_ioprio(struct cfq_io_cq *cic)
+static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
{
+ int ioprio = cic->icq.ioc->ioprio;
struct cfq_data *cfqd = cic_to_cfqd(cic);
struct cfq_queue *cfqq;
- if (unlikely(!cfqd))
+ /*
+ * Check whether ioprio has changed. The condition may trigger
+ * spuriously on a newly created cic but there's no harm.
+ */
+ if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
return;
cfqq = cic->cfqq[BLK_RW_ASYNC];
if (cfqq) {
struct cfq_queue *new_cfqq;
- new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
- GFP_ATOMIC);
+ new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
+ GFP_ATOMIC);
if (new_cfqq) {
cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
cfq_put_queue(cfqq);
@@ -2800,6 +3113,8 @@ static void changed_ioprio(struct cfq_io_cq *cic)
cfqq = cic->cfqq[BLK_RW_SYNC];
if (cfqq)
cfq_mark_cfqq_prio_changed(cfqq);
+
+ cic->ioprio = ioprio;
}
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
@@ -2823,17 +3138,24 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static void changed_cgroup(struct cfq_io_cq *cic)
+static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
{
- struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
struct cfq_data *cfqd = cic_to_cfqd(cic);
- struct request_queue *q;
+ struct cfq_queue *sync_cfqq;
+ uint64_t id;
- if (unlikely(!cfqd))
- return;
+ rcu_read_lock();
+ id = bio_blkcg(bio)->id;
+ rcu_read_unlock();
- q = cfqd->queue;
+ /*
+ * Check whether blkcg has changed. The condition may trigger
+ * spuriously on a newly created cic but there's no harm.
+ */
+ if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
+ return;
+ sync_cfqq = cic_to_cfqq(cic, 1);
if (sync_cfqq) {
/*
* Drop reference to sync queue. A new sync queue will be
@@ -2843,21 +3165,26 @@ static void changed_cgroup(struct cfq_io_cq *cic)
cic_set_cfqq(cic, NULL, 1);
cfq_put_queue(sync_cfqq);
}
+
+ cic->blkcg_id = id;
}
+#else
+static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
static struct cfq_queue *
-cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
- struct io_context *ioc, gfp_t gfp_mask)
+cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
+ struct bio *bio, gfp_t gfp_mask)
{
+ struct blkcg *blkcg;
struct cfq_queue *cfqq, *new_cfqq = NULL;
- struct cfq_io_cq *cic;
struct cfq_group *cfqg;
retry:
- cfqg = cfq_get_cfqg(cfqd);
- cic = cfq_cic_lookup(cfqd, ioc);
- /* cic always exists here */
+ rcu_read_lock();
+
+ blkcg = bio_blkcg(bio);
+ cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
cfqq = cic_to_cfqq(cic, is_sync);
/*
@@ -2870,6 +3197,7 @@ retry:
cfqq = new_cfqq;
new_cfqq = NULL;
} else if (gfp_mask & __GFP_WAIT) {
+ rcu_read_unlock();
spin_unlock_irq(cfqd->queue->queue_lock);
new_cfqq = kmem_cache_alloc_node(cfq_pool,
gfp_mask | __GFP_ZERO,
@@ -2885,7 +3213,7 @@ retry:
if (cfqq) {
cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
- cfq_init_prio_data(cfqq, ioc);
+ cfq_init_prio_data(cfqq, cic);
cfq_link_cfqq_cfqg(cfqq, cfqg);
cfq_log_cfqq(cfqd, cfqq, "alloced");
} else
@@ -2895,6 +3223,7 @@ retry:
if (new_cfqq)
kmem_cache_free(cfq_pool, new_cfqq);
+ rcu_read_unlock();
return cfqq;
}
@@ -2904,6 +3233,9 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
switch (ioprio_class) {
case IOPRIO_CLASS_RT:
return &cfqd->async_cfqq[0][ioprio];
+ case IOPRIO_CLASS_NONE:
+ ioprio = IOPRIO_NORM;
+ /* fall through */
case IOPRIO_CLASS_BE:
return &cfqd->async_cfqq[1][ioprio];
case IOPRIO_CLASS_IDLE:
@@ -2914,11 +3246,11 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
}
static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
- gfp_t gfp_mask)
+cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
+ struct bio *bio, gfp_t gfp_mask)
{
- const int ioprio = task_ioprio(ioc);
- const int ioprio_class = task_ioprio_class(ioc);
+ const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
+ const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
struct cfq_queue **async_cfqq = NULL;
struct cfq_queue *cfqq = NULL;
@@ -2928,7 +3260,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
}
if (!cfqq)
- cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
+ cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
/*
* pin the queue now that it's allocated, scheduler exit will prune it
@@ -3010,7 +3342,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
enable_idle = 0;
- else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
+ else if (!atomic_read(&cic->icq.ioc->active_ref) ||
!cfqd->cfq_slice_idle ||
(!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
enable_idle = 0;
@@ -3174,8 +3506,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue);
} else {
- cfq_blkiocg_update_idle_time_stats(
- &cfqq->cfqg->blkg);
+ cfqg_stats_update_idle_time(cfqq->cfqg);
cfq_mark_cfqq_must_dispatch(cfqq);
}
}
@@ -3197,14 +3528,13 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
struct cfq_queue *cfqq = RQ_CFQQ(rq);
cfq_log_cfqq(cfqd, cfqq, "insert_request");
- cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc);
+ cfq_init_prio_data(cfqq, RQ_CIC(rq));
rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq);
- cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
- &cfqd->serving_group->blkg, rq_data_dir(rq),
- rq_is_sync(rq));
+ cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
+ rq->cmd_flags);
cfq_rq_enqueued(cfqd, cfqq, rq);
}
@@ -3300,9 +3630,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfqd->rq_in_driver--;
cfqq->dispatched--;
(RQ_CFQG(rq))->dispatched--;
- cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
- rq_start_time_ns(rq), rq_io_start_time_ns(rq),
- rq_data_dir(rq), rq_is_sync(rq));
+ cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
+ rq_io_start_time_ns(rq), rq->cmd_flags);
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
@@ -3399,7 +3728,7 @@ static int cfq_may_queue(struct request_queue *q, int rw)
cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
if (cfqq) {
- cfq_init_prio_data(cfqq, cic->icq.ioc);
+ cfq_init_prio_data(cfqq, cic);
return __cfq_may_queue(cfqq);
}
@@ -3421,7 +3750,7 @@ static void cfq_put_request(struct request *rq)
cfqq->allocated[rw]--;
/* Put down rq reference on cfqg */
- cfq_put_cfqg(RQ_CFQG(rq));
+ cfqg_put(RQ_CFQG(rq));
rq->elv.priv[0] = NULL;
rq->elv.priv[1] = NULL;
@@ -3465,32 +3794,25 @@ split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
* Allocate cfq data structures associated with this request.
*/
static int
-cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
+cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
+ gfp_t gfp_mask)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
const int rw = rq_data_dir(rq);
const bool is_sync = rq_is_sync(rq);
struct cfq_queue *cfqq;
- unsigned int changed;
might_sleep_if(gfp_mask & __GFP_WAIT);
spin_lock_irq(q->queue_lock);
- /* handle changed notifications */
- changed = icq_get_changed(&cic->icq);
- if (unlikely(changed & ICQ_IOPRIO_CHANGED))
- changed_ioprio(cic);
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- if (unlikely(changed & ICQ_CGROUP_CHANGED))
- changed_cgroup(cic);
-#endif
-
+ check_ioprio_changed(cic, bio);
+ check_blkcg_changed(cic, bio);
new_queue:
cfqq = cic_to_cfqq(cic, is_sync);
if (!cfqq || cfqq == &cfqd->oom_cfqq) {
- cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
+ cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
cic_set_cfqq(cic, cfqq, is_sync);
} else {
/*
@@ -3516,8 +3838,9 @@ new_queue:
cfqq->allocated[rw]++;
cfqq->ref++;
+ cfqg_get(cfqq->cfqg);
rq->elv.priv[0] = cfqq;
- rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
+ rq->elv.priv[1] = cfqq->cfqg;
spin_unlock_irq(q->queue_lock);
return 0;
}
@@ -3614,7 +3937,6 @@ static void cfq_exit_queue(struct elevator_queue *e)
{
struct cfq_data *cfqd = e->elevator_data;
struct request_queue *q = cfqd->queue;
- bool wait = false;
cfq_shutdown_timer_wq(cfqd);
@@ -3624,89 +3946,52 @@ static void cfq_exit_queue(struct elevator_queue *e)
__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
cfq_put_async_queues(cfqd);
- cfq_release_cfq_groups(cfqd);
-
- /*
- * If there are groups which we could not unlink from blkcg list,
- * wait for a rcu period for them to be freed.
- */
- if (cfqd->nr_blkcg_linked_grps)
- wait = true;
spin_unlock_irq(q->queue_lock);
cfq_shutdown_timer_wq(cfqd);
- /*
- * Wait for cfqg->blkg->key accessors to exit their grace periods.
- * Do this wait only if there are other unlinked groups out
- * there. This can happen if cgroup deletion path claimed the
- * responsibility of cleaning up a group before queue cleanup code
- * get to the group.
- *
- * Do not call synchronize_rcu() unconditionally as there are drivers
- * which create/delete request queue hundreds of times during scan/boot
- * and synchronize_rcu() can take significant time and slow down boot.
- */
- if (wait)
- synchronize_rcu();
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- /* Free up per cpu stats for root group */
- free_percpu(cfqd->root_group.blkg.stats_cpu);
+#ifndef CONFIG_CFQ_GROUP_IOSCHED
+ kfree(cfqd->root_group);
#endif
+ blkcg_deactivate_policy(q, &blkcg_policy_cfq);
kfree(cfqd);
}
-static void *cfq_init_queue(struct request_queue *q)
+static int cfq_init_queue(struct request_queue *q)
{
struct cfq_data *cfqd;
- int i, j;
- struct cfq_group *cfqg;
- struct cfq_rb_root *st;
+ struct blkcg_gq *blkg __maybe_unused;
+ int i, ret;
cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
if (!cfqd)
- return NULL;
+ return -ENOMEM;
+
+ cfqd->queue = q;
+ q->elevator->elevator_data = cfqd;
/* Init root service tree */
cfqd->grp_service_tree = CFQ_RB_ROOT;
- /* Init root group */
- cfqg = &cfqd->root_group;
- for_each_cfqg_st(cfqg, i, j, st)
- *st = CFQ_RB_ROOT;
- RB_CLEAR_NODE(&cfqg->rb_node);
-
- /* Give preference to root group over other groups */
- cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
-
+ /* Init root group and prefer root group over other groups by default */
#ifdef CONFIG_CFQ_GROUP_IOSCHED
- /*
- * Set root group reference to 2. One reference will be dropped when
- * all groups on cfqd->cfqg_list are being deleted during queue exit.
- * Other reference will remain there as we don't want to delete this
- * group as it is statically allocated and gets destroyed when
- * throtl_data goes away.
- */
- cfqg->ref = 2;
-
- if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
- kfree(cfqg);
- kfree(cfqd);
- return NULL;
- }
-
- rcu_read_lock();
+ ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
+ if (ret)
+ goto out_free;
- cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
- (void *)cfqd, 0);
- rcu_read_unlock();
- cfqd->nr_blkcg_linked_grps++;
+ cfqd->root_group = blkg_to_cfqg(q->root_blkg);
+#else
+ ret = -ENOMEM;
+ cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
+ GFP_KERNEL, cfqd->queue->node);
+ if (!cfqd->root_group)
+ goto out_free;
- /* Add group on cfqd->cfqg_list */
- hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
+ cfq_init_cfqg_base(cfqd->root_group);
#endif
+ cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
+
/*
* Not strictly needed (since RB_ROOT just clears the node and we
* zeroed cfqd on alloc), but better be safe in case someone decides
@@ -3718,13 +4003,17 @@ static void *cfq_init_queue(struct request_queue *q)
/*
* Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
* Grab a permanent reference to it, so that the normal code flow
- * will not attempt to free it.
+ * will not attempt to free it. oom_cfqq is linked to root_group
+ * but shouldn't hold a reference as it'll never be unlinked. Lose
+ * the reference from linking right away.
*/
cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
cfqd->oom_cfqq.ref++;
- cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
- cfqd->queue = q;
+ spin_lock_irq(q->queue_lock);
+ cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
+ cfqg_put(cfqd->root_group);
+ spin_unlock_irq(q->queue_lock);
init_timer(&cfqd->idle_slice_timer);
cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
@@ -3750,7 +4039,11 @@ static void *cfq_init_queue(struct request_queue *q)
* second, in order to have larger depth for async operations.
*/
cfqd->last_delayed_sync = jiffies - HZ;
- return cfqd;
+ return 0;
+
+out_free:
+ kfree(cfqd);
+ return ret;
}
/*
@@ -3877,15 +4170,13 @@ static struct elevator_type iosched_cfq = {
};
#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static struct blkio_policy_type blkio_policy_cfq = {
- .ops = {
- .blkio_unlink_group_fn = cfq_unlink_blkio_group,
- .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
- },
- .plid = BLKIO_POLICY_PROP,
+static struct blkcg_policy blkcg_policy_cfq = {
+ .pd_size = sizeof(struct cfq_group),
+ .cftypes = cfq_blkcg_files,
+
+ .pd_init_fn = cfq_pd_init,
+ .pd_reset_stats_fn = cfq_pd_reset_stats,
};
-#else
-static struct blkio_policy_type blkio_policy_cfq;
#endif
static int __init cfq_init(void)
@@ -3906,24 +4197,31 @@ static int __init cfq_init(void)
#else
cfq_group_idle = 0;
#endif
+
+ ret = blkcg_policy_register(&blkcg_policy_cfq);
+ if (ret)
+ return ret;
+
cfq_pool = KMEM_CACHE(cfq_queue, 0);
if (!cfq_pool)
- return -ENOMEM;
+ goto err_pol_unreg;
ret = elv_register(&iosched_cfq);
- if (ret) {
- kmem_cache_destroy(cfq_pool);
- return ret;
- }
-
- blkio_policy_register(&blkio_policy_cfq);
+ if (ret)
+ goto err_free_pool;
return 0;
+
+err_free_pool:
+ kmem_cache_destroy(cfq_pool);
+err_pol_unreg:
+ blkcg_policy_unregister(&blkcg_policy_cfq);
+ return ret;
}
static void __exit cfq_exit(void)
{
- blkio_policy_unregister(&blkio_policy_cfq);
+ blkcg_policy_unregister(&blkcg_policy_cfq);
elv_unregister(&iosched_cfq);
kmem_cache_destroy(cfq_pool);
}
diff --git a/block/cfq.h b/block/cfq.h
deleted file mode 100644
index 2a155927e37c..000000000000
--- a/block/cfq.h
+++ /dev/null
@@ -1,115 +0,0 @@
-#ifndef _CFQ_H
-#define _CFQ_H
-#include "blk-cgroup.h"
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction, bool sync)
-{
- blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue)
-{
- blkiocg_update_dequeue_stats(blkg, dequeue);
-}
-
-static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
- unsigned long time, unsigned long unaccounted_time)
-{
- blkiocg_update_timeslice_used(blkg, time, unaccounted_time);
-}
-
-static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
-{
- blkiocg_set_start_empty_time(blkg);
-}
-
-static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync)
-{
- blkiocg_update_io_remove_stats(blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
- bool direction, bool sync)
-{
- blkiocg_update_io_merged_stats(blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
- blkiocg_update_idle_time_stats(blkg);
-}
-
-static inline void
-cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
-{
- blkiocg_update_avg_queue_size_stats(blkg);
-}
-
-static inline void
-cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
-{
- blkiocg_update_set_idle_time_stats(blkg);
-}
-
-static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- uint64_t bytes, bool direction, bool sync)
-{
- blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
-{
- blkiocg_update_completion_stats(blkg, start_time, io_start_time,
- direction, sync);
-}
-
-static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev) {
- blkiocg_add_blkio_group(blkcg, blkg, key, dev, BLKIO_POLICY_PROP);
-}
-
-static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
- return blkiocg_del_blkio_group(blkg);
-}
-
-#else /* CFQ_GROUP_IOSCHED */
-static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction, bool sync) {}
-
-static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue) {}
-
-static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
- unsigned long time, unsigned long unaccounted_time) {}
-static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
-static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
- bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
-}
-static inline void
-cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
-
-static inline void
-cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
-
-static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- uint64_t bytes, bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
-
-static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev) {}
-static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
- return 0;
-}
-
-#endif /* CFQ_GROUP_IOSCHED */
-#endif
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 7bf12d793fcd..599b12e5380f 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -337,13 +337,13 @@ static void deadline_exit_queue(struct elevator_queue *e)
/*
* initialize elevator private data (deadline_data).
*/
-static void *deadline_init_queue(struct request_queue *q)
+static int deadline_init_queue(struct request_queue *q)
{
struct deadline_data *dd;
dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
if (!dd)
- return NULL;
+ return -ENOMEM;
INIT_LIST_HEAD(&dd->fifo_list[READ]);
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
@@ -354,7 +354,9 @@ static void *deadline_init_queue(struct request_queue *q)
dd->writes_starved = writes_starved;
dd->front_merges = 1;
dd->fifo_batch = fifo_batch;
- return dd;
+
+ q->elevator->elevator_data = dd;
+ return 0;
}
/*
diff --git a/block/elevator.c b/block/elevator.c
index f016855a46b0..6a55d418896f 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -38,6 +38,7 @@
#include <trace/events/block.h>
#include "blk.h"
+#include "blk-cgroup.h"
static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);
@@ -121,15 +122,6 @@ static struct elevator_type *elevator_get(const char *name)
return e;
}
-static int elevator_init_queue(struct request_queue *q,
- struct elevator_queue *eq)
-{
- eq->elevator_data = eq->type->ops.elevator_init_fn(q);
- if (eq->elevator_data)
- return 0;
- return -ENOMEM;
-}
-
static char chosen_elevator[ELV_NAME_MAX];
static int __init elevator_setup(char *str)
@@ -188,7 +180,6 @@ static void elevator_release(struct kobject *kobj)
int elevator_init(struct request_queue *q, char *name)
{
struct elevator_type *e = NULL;
- struct elevator_queue *eq;
int err;
if (unlikely(q->elevator))
@@ -222,17 +213,16 @@ int elevator_init(struct request_queue *q, char *name)
}
}
- eq = elevator_alloc(q, e);
- if (!eq)
+ q->elevator = elevator_alloc(q, e);
+ if (!q->elevator)
return -ENOMEM;
- err = elevator_init_queue(q, eq);
+ err = e->ops.elevator_init_fn(q);
if (err) {
- kobject_put(&eq->kobj);
+ kobject_put(&q->elevator->kobj);
return err;
}
- q->elevator = eq;
return 0;
}
EXPORT_SYMBOL(elevator_init);
@@ -564,25 +554,6 @@ void elv_drain_elevator(struct request_queue *q)
}
}
-void elv_quiesce_start(struct request_queue *q)
-{
- if (!q->elevator)
- return;
-
- spin_lock_irq(q->queue_lock);
- queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
- spin_unlock_irq(q->queue_lock);
-
- blk_drain_queue(q, false);
-}
-
-void elv_quiesce_end(struct request_queue *q)
-{
- spin_lock_irq(q->queue_lock);
- queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
- spin_unlock_irq(q->queue_lock);
-}
-
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
trace_block_rq_insert(q, rq);
@@ -692,12 +663,13 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq)
return NULL;
}
-int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
+int elv_set_request(struct request_queue *q, struct request *rq,
+ struct bio *bio, gfp_t gfp_mask)
{
struct elevator_queue *e = q->elevator;
if (e->type->ops.elevator_set_req_fn)
- return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask);
+ return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
return 0;
}
@@ -801,8 +773,9 @@ static struct kobj_type elv_ktype = {
.release = elevator_release,
};
-int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
+int elv_register_queue(struct request_queue *q)
{
+ struct elevator_queue *e = q->elevator;
int error;
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
@@ -820,11 +793,6 @@ int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
}
return error;
}
-
-int elv_register_queue(struct request_queue *q)
-{
- return __elv_register_queue(q, q->elevator);
-}
EXPORT_SYMBOL(elv_register_queue);
void elv_unregister_queue(struct request_queue *q)
@@ -907,53 +875,60 @@ EXPORT_SYMBOL_GPL(elv_unregister);
*/
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{
- struct elevator_queue *old_elevator, *e;
+ struct elevator_queue *old = q->elevator;
+ bool registered = old->registered;
int err;
- /* allocate new elevator */
- e = elevator_alloc(q, new_e);
- if (!e)
- return -ENOMEM;
+ /*
+ * Turn on BYPASS and drain all requests w/ elevator private data.
+ * Block layer doesn't call into a quiesced elevator - all requests
+ * are directly put on the dispatch list without elevator data
+ * using INSERT_BACK. All requests have SOFTBARRIER set and no
+ * merge happens either.
+ */
+ blk_queue_bypass_start(q);
+
+ /* unregister and clear all auxiliary data of the old elevator */
+ if (registered)
+ elv_unregister_queue(q);
+
+ spin_lock_irq(q->queue_lock);
+ ioc_clear_queue(q);
+ spin_unlock_irq(q->queue_lock);
- err = elevator_init_queue(q, e);
+ /* allocate, init and register new elevator */
+ err = -ENOMEM;
+ q->elevator = elevator_alloc(q, new_e);
+ if (!q->elevator)
+ goto fail_init;
+
+ err = new_e->ops.elevator_init_fn(q);
if (err) {
- kobject_put(&e->kobj);
- return err;
+ kobject_put(&q->elevator->kobj);
+ goto fail_init;
}
- /* turn on BYPASS and drain all requests w/ elevator private data */
- elv_quiesce_start(q);
-
- /* unregister old queue, register new one and kill old elevator */
- if (q->elevator->registered) {
- elv_unregister_queue(q);
- err = __elv_register_queue(q, e);
+ if (registered) {
+ err = elv_register_queue(q);
if (err)
goto fail_register;
}
- /* done, clear io_cq's, switch elevators and turn off BYPASS */
- spin_lock_irq(q->queue_lock);
- ioc_clear_queue(q);
- old_elevator = q->elevator;
- q->elevator = e;
- spin_unlock_irq(q->queue_lock);
-
- elevator_exit(old_elevator);
- elv_quiesce_end(q);
+ /* done, kill the old one and finish */
+ elevator_exit(old);
+ blk_queue_bypass_end(q);
- blk_add_trace_msg(q, "elv switch: %s", e->type->elevator_name);
+ blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
return 0;
fail_register:
- /*
- * switch failed, exit the new io scheduler and reattach the old
- * one again (along with re-adding the sysfs dir)
- */
- elevator_exit(e);
+ elevator_exit(q->elevator);
+fail_init:
+ /* switch failed, restore and re-register old elevator */
+ q->elevator = old;
elv_register_queue(q);
- elv_quiesce_end(q);
+ blk_queue_bypass_end(q);
return err;
}
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 413a0b1d788c..5d1bf70e33d5 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -59,15 +59,17 @@ noop_latter_request(struct request_queue *q, struct request *rq)
return list_entry(rq->queuelist.next, struct request, queuelist);
}
-static void *noop_init_queue(struct request_queue *q)
+static int noop_init_queue(struct request_queue *q)
{
struct noop_data *nd;
nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
if (!nd)
- return NULL;
+ return -ENOMEM;
+
INIT_LIST_HEAD(&nd->queue);
- return nd;
+ q->elevator->elevator_data = nd;
+ return 0;
}
static void noop_exit_queue(struct elevator_queue *e)
diff --git a/drivers/Makefile b/drivers/Makefile
index 0ee98d50f975..2ba29ffef2cb 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_SFI) += sfi/
# PnP must come after ACPI since it will eventually need to check if acpi
# was used and do nothing if so
obj-$(CONFIG_PNP) += pnp/
-obj-$(CONFIG_ARM_AMBA) += amba/
+obj-y += amba/
# Many drivers will want to use DMA so this has to be made available
# really early.
obj-$(CONFIG_DMA_ENGINE) += dma/
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index 8cf6c46e99fb..6680df36b963 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/sysfs.h>
+#include <linux/io.h>
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 06527c526618..74ee4ab577b6 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -93,11 +93,9 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP
/* do we have a wakeup address for S2 and S3? */
if (acpi_state == ACPI_STATE_S3) {
- if (!acpi_wakeup_address) {
+ if (!acpi_wakeup_address)
return -EFAULT;
- }
- acpi_set_firmware_waking_vector(
- (acpi_physical_address)acpi_wakeup_address);
+ acpi_set_firmware_waking_vector(acpi_wakeup_address);
}
ACPI_FLUSH_CPU_CACHE();
diff --git a/drivers/amba/Makefile b/drivers/amba/Makefile
index 40fe74097be2..66e81c2f1e3c 100644
--- a/drivers/amba/Makefile
+++ b/drivers/amba/Makefile
@@ -1,2 +1,2 @@
-obj-y += bus.o
-
+obj-$(CONFIG_ARM_AMBA) += bus.o
+obj-$(CONFIG_TEGRA_AHB) += tegra-ahb.o
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
new file mode 100644
index 000000000000..aa0b1f160528
--- /dev/null
+++ b/drivers/amba/tegra-ahb.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Jay Cheng <jacheng@nvidia.com>
+ * James Wylder <james.wylder@motorola.com>
+ * Benoit Goby <benoit@android.com>
+ * Colin Cross <ccross@android.com>
+ * Hiroshi DOYU <hdoyu@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#define DRV_NAME "tegra-ahb"
+
+#define AHB_ARBITRATION_DISABLE 0x00
+#define AHB_ARBITRATION_PRIORITY_CTRL 0x04
+#define AHB_PRIORITY_WEIGHT(x) (((x) & 0x7) << 29)
+#define PRIORITY_SELECT_USB BIT(6)
+#define PRIORITY_SELECT_USB2 BIT(18)
+#define PRIORITY_SELECT_USB3 BIT(17)
+
+#define AHB_GIZMO_AHB_MEM 0x0c
+#define ENB_FAST_REARBITRATE BIT(2)
+#define DONT_SPLIT_AHB_WR BIT(7)
+
+#define AHB_GIZMO_APB_DMA 0x10
+#define AHB_GIZMO_IDE 0x18
+#define AHB_GIZMO_USB 0x1c
+#define AHB_GIZMO_AHB_XBAR_BRIDGE 0x20
+#define AHB_GIZMO_CPU_AHB_BRIDGE 0x24
+#define AHB_GIZMO_COP_AHB_BRIDGE 0x28
+#define AHB_GIZMO_XBAR_APB_CTLR 0x2c
+#define AHB_GIZMO_VCP_AHB_BRIDGE 0x30
+#define AHB_GIZMO_NAND 0x3c
+#define AHB_GIZMO_SDMMC4 0x44
+#define AHB_GIZMO_XIO 0x48
+#define AHB_GIZMO_BSEV 0x60
+#define AHB_GIZMO_BSEA 0x70
+#define AHB_GIZMO_NOR 0x74
+#define AHB_GIZMO_USB2 0x78
+#define AHB_GIZMO_USB3 0x7c
+#define IMMEDIATE BIT(18)
+
+#define AHB_GIZMO_SDMMC1 0x80
+#define AHB_GIZMO_SDMMC2 0x84
+#define AHB_GIZMO_SDMMC3 0x88
+#define AHB_MEM_PREFETCH_CFG_X 0xd8
+#define AHB_ARBITRATION_XBAR_CTRL 0xdc
+#define AHB_MEM_PREFETCH_CFG3 0xe0
+#define AHB_MEM_PREFETCH_CFG4 0xe4
+#define AHB_MEM_PREFETCH_CFG1 0xec
+#define AHB_MEM_PREFETCH_CFG2 0xf0
+#define PREFETCH_ENB BIT(31)
+#define MST_ID(x) (((x) & 0x1f) << 26)
+#define AHBDMA_MST_ID MST_ID(5)
+#define USB_MST_ID MST_ID(6)
+#define USB2_MST_ID MST_ID(18)
+#define USB3_MST_ID MST_ID(17)
+#define ADDR_BNDRY(x) (((x) & 0xf) << 21)
+#define INACTIVITY_TIMEOUT(x) (((x) & 0xffff) << 0)
+
+#define AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID 0xf8
+
+#define AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE BIT(17)
+
+static struct platform_driver tegra_ahb_driver;
+
+static const u32 tegra_ahb_gizmo[] = {
+ AHB_ARBITRATION_DISABLE,
+ AHB_ARBITRATION_PRIORITY_CTRL,
+ AHB_GIZMO_AHB_MEM,
+ AHB_GIZMO_APB_DMA,
+ AHB_GIZMO_IDE,
+ AHB_GIZMO_USB,
+ AHB_GIZMO_AHB_XBAR_BRIDGE,
+ AHB_GIZMO_CPU_AHB_BRIDGE,
+ AHB_GIZMO_COP_AHB_BRIDGE,
+ AHB_GIZMO_XBAR_APB_CTLR,
+ AHB_GIZMO_VCP_AHB_BRIDGE,
+ AHB_GIZMO_NAND,
+ AHB_GIZMO_SDMMC4,
+ AHB_GIZMO_XIO,
+ AHB_GIZMO_BSEV,
+ AHB_GIZMO_BSEA,
+ AHB_GIZMO_NOR,
+ AHB_GIZMO_USB2,
+ AHB_GIZMO_USB3,
+ AHB_GIZMO_SDMMC1,
+ AHB_GIZMO_SDMMC2,
+ AHB_GIZMO_SDMMC3,
+ AHB_MEM_PREFETCH_CFG_X,
+ AHB_ARBITRATION_XBAR_CTRL,
+ AHB_MEM_PREFETCH_CFG3,
+ AHB_MEM_PREFETCH_CFG4,
+ AHB_MEM_PREFETCH_CFG1,
+ AHB_MEM_PREFETCH_CFG2,
+ AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID,
+};
+
+struct tegra_ahb {
+ void __iomem *regs;
+ struct device *dev;
+ u32 ctx[0];
+};
+
+static inline u32 gizmo_readl(struct tegra_ahb *ahb, u32 offset)
+{
+ return readl(ahb->regs + offset);
+}
+
+static inline void gizmo_writel(struct tegra_ahb *ahb, u32 value, u32 offset)
+{
+ writel(value, ahb->regs + offset);
+}
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+static int tegra_ahb_match_by_smmu(struct device *dev, void *data)
+{
+ struct tegra_ahb *ahb = dev_get_drvdata(dev);
+ struct device_node *dn = data;
+
+ return (ahb->dev->of_node == dn) ? 1 : 0;
+}
+
+int tegra_ahb_enable_smmu(struct device_node *dn)
+{
+ struct device *dev;
+ u32 val;
+ struct tegra_ahb *ahb;
+
+ dev = driver_find_device(&tegra_ahb_driver.driver, NULL, dn,
+ tegra_ahb_match_by_smmu);
+ if (!dev)
+ return -EPROBE_DEFER;
+ ahb = dev_get_drvdata(dev);
+ val = gizmo_readl(ahb, AHB_ARBITRATION_XBAR_CTRL);
+ val |= AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE;
+ gizmo_writel(ahb, val, AHB_ARBITRATION_XBAR_CTRL);
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ahb_enable_smmu);
+#endif
+
+static int tegra_ahb_suspend(struct device *dev)
+{
+ int i;
+ struct tegra_ahb *ahb = dev_get_drvdata(dev);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++)
+ ahb->ctx[i] = gizmo_readl(ahb, tegra_ahb_gizmo[i]);
+ return 0;
+}
+
+static int tegra_ahb_resume(struct device *dev)
+{
+ int i;
+ struct tegra_ahb *ahb = dev_get_drvdata(dev);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++)
+ gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]);
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm,
+ tegra_ahb_suspend,
+ tegra_ahb_resume, NULL);
+
+static void tegra_ahb_gizmo_init(struct tegra_ahb *ahb)
+{
+ u32 val;
+
+ val = gizmo_readl(ahb, AHB_GIZMO_AHB_MEM);
+ val |= ENB_FAST_REARBITRATE | IMMEDIATE | DONT_SPLIT_AHB_WR;
+ gizmo_writel(ahb, val, AHB_GIZMO_AHB_MEM);
+
+ val = gizmo_readl(ahb, AHB_GIZMO_USB);
+ val |= IMMEDIATE;
+ gizmo_writel(ahb, val, AHB_GIZMO_USB);
+
+ val = gizmo_readl(ahb, AHB_GIZMO_USB2);
+ val |= IMMEDIATE;
+ gizmo_writel(ahb, val, AHB_GIZMO_USB2);
+
+ val = gizmo_readl(ahb, AHB_GIZMO_USB3);
+ val |= IMMEDIATE;
+ gizmo_writel(ahb, val, AHB_GIZMO_USB3);
+
+ val = gizmo_readl(ahb, AHB_ARBITRATION_PRIORITY_CTRL);
+ val |= PRIORITY_SELECT_USB |
+ PRIORITY_SELECT_USB2 |
+ PRIORITY_SELECT_USB3 |
+ AHB_PRIORITY_WEIGHT(7);
+ gizmo_writel(ahb, val, AHB_ARBITRATION_PRIORITY_CTRL);
+
+ val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG1);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB |
+ AHBDMA_MST_ID |
+ ADDR_BNDRY(0xc) |
+ INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG1);
+
+ val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG2);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB |
+ USB_MST_ID |
+ ADDR_BNDRY(0xc) |
+ INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG2);
+
+ val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG3);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB |
+ USB3_MST_ID |
+ ADDR_BNDRY(0xc) |
+ INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG3);
+
+ val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG4);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB |
+ USB2_MST_ID |
+ ADDR_BNDRY(0xc) |
+ INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG4);
+}
+
+static int __devinit tegra_ahb_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct tegra_ahb *ahb;
+ size_t bytes;
+
+ bytes = sizeof(*ahb) + sizeof(u32) * ARRAY_SIZE(tegra_ahb_gizmo);
+ ahb = devm_kzalloc(&pdev->dev, bytes, GFP_KERNEL);
+ if (!ahb)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ ahb->regs = devm_request_and_ioremap(&pdev->dev, res);
+ if (!ahb->regs)
+ return -EBUSY;
+
+ ahb->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ahb);
+ tegra_ahb_gizmo_init(ahb);
+ return 0;
+}
+
+static int __devexit tegra_ahb_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id tegra_ahb_of_match[] __devinitconst = {
+ { .compatible = "nvidia,tegra30-ahb", },
+ { .compatible = "nvidia,tegra20-ahb", },
+ {},
+};
+
+static struct platform_driver tegra_ahb_driver = {
+ .probe = tegra_ahb_probe,
+ .remove = __devexit_p(tegra_ahb_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = tegra_ahb_of_match,
+ .pm = &tegra_ahb_pm,
+ },
+};
+module_platform_driver(tegra_ahb_driver);
+
+MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
+MODULE_DESCRIPTION("Tegra AHB driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 7336d4a7ab31..24712adf69df 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -553,6 +553,7 @@ struct mv_host_priv {
#if defined(CONFIG_HAVE_CLK)
struct clk *clk;
+ struct clk **port_clks;
#endif
/*
* These consistent DMA memory pools give us guaranteed
@@ -4027,6 +4028,9 @@ static int mv_platform_probe(struct platform_device *pdev)
struct resource *res;
int n_ports = 0;
int rc;
+#if defined(CONFIG_HAVE_CLK)
+ int port;
+#endif
ata_print_version_once(&pdev->dev, DRV_VERSION);
@@ -4054,6 +4058,13 @@ static int mv_platform_probe(struct platform_device *pdev)
if (!host || !hpriv)
return -ENOMEM;
+#if defined(CONFIG_HAVE_CLK)
+ hpriv->port_clks = devm_kzalloc(&pdev->dev,
+ sizeof(struct clk *) * n_ports,
+ GFP_KERNEL);
+ if (!hpriv->port_clks)
+ return -ENOMEM;
+#endif
host->private_data = hpriv;
hpriv->n_ports = n_ports;
hpriv->board_idx = chip_soc;
@@ -4066,9 +4077,17 @@ static int mv_platform_probe(struct platform_device *pdev)
#if defined(CONFIG_HAVE_CLK)
hpriv->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(hpriv->clk))
- dev_notice(&pdev->dev, "cannot get clkdev\n");
+ dev_notice(&pdev->dev, "cannot get optional clkdev\n");
else
- clk_enable(hpriv->clk);
+ clk_prepare_enable(hpriv->clk);
+
+ for (port = 0; port < n_ports; port++) {
+ char port_number[16];
+ sprintf(port_number, "%d", port);
+ hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
+ if (!IS_ERR(hpriv->port_clks[port]))
+ clk_prepare_enable(hpriv->port_clks[port]);
+ }
#endif
/*
@@ -4098,9 +4117,15 @@ static int mv_platform_probe(struct platform_device *pdev)
err:
#if defined(CONFIG_HAVE_CLK)
if (!IS_ERR(hpriv->clk)) {
- clk_disable(hpriv->clk);
+ clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
+ for (port = 0; port < n_ports; port++) {
+ if (!IS_ERR(hpriv->port_clks[port])) {
+ clk_disable_unprepare(hpriv->port_clks[port]);
+ clk_put(hpriv->port_clks[port]);
+ }
+ }
#endif
return rc;
@@ -4119,14 +4144,21 @@ static int __devexit mv_platform_remove(struct platform_device *pdev)
struct ata_host *host = platform_get_drvdata(pdev);
#if defined(CONFIG_HAVE_CLK)
struct mv_host_priv *hpriv = host->private_data;
+ int port;
#endif
ata_host_detach(host);
#if defined(CONFIG_HAVE_CLK)
if (!IS_ERR(hpriv->clk)) {
- clk_disable(hpriv->clk);
+ clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
+ for (port = 0; port < host->n_ports; port++) {
+ if (!IS_ERR(hpriv->port_clks[port])) {
+ clk_disable_unprepare(hpriv->port_clks[port]);
+ clk_put(hpriv->port_clks[port]);
+ }
+ }
#endif
return 0;
}
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index e8cd652d2017..98510931c815 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -984,6 +984,7 @@ static uint32_t fpga_tx(struct solos_card *card)
} else if (skb && card->using_dma) {
SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
skb->len, PCI_DMA_TODEVICE);
+ card->tx_skb[port] = skb;
iowrite32(SKB_CB(skb)->dma_addr,
card->config_regs + TX_DMA_ADDR(port));
}
@@ -1152,7 +1153,8 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
db_fpga_upgrade = db_firmware_upgrade = 0;
}
- if (card->fpga_version >= DMA_SUPPORTED){
+ if (card->fpga_version >= DMA_SUPPORTED) {
+ pci_set_master(dev);
card->using_dma = 1;
} else {
card->using_dma = 0;
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 9aa618acfe97..9b21469482ae 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -192,4 +192,93 @@ config DMA_SHARED_BUFFER
APIs extension; the file's descriptor can then be passed on to other
driver.
+config CMA
+ bool "Contiguous Memory Allocator (EXPERIMENTAL)"
+ depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
+ select MIGRATION
+ help
+ This enables the Contiguous Memory Allocator which allows drivers
+ to allocate big physically-contiguous blocks of memory for use with
+ hardware components that do not support I/O map nor scatter-gather.
+
+ For more information see <include/linux/dma-contiguous.h>.
+ If unsure, say "n".
+
+if CMA
+
+config CMA_DEBUG
+ bool "CMA debug messages (DEVELOPMENT)"
+ depends on DEBUG_KERNEL
+ help
+ Turns on debug messages in CMA. This produces KERN_DEBUG
+ messages for every CMA call as well as various messages while
+ processing calls such as dma_alloc_from_contiguous().
+ This option does not affect warning and error messages.
+
+comment "Default contiguous memory area size:"
+
+config CMA_SIZE_MBYTES
+ int "Size in Mega Bytes"
+ depends on !CMA_SIZE_SEL_PERCENTAGE
+ default 16
+ help
+ Defines the size (in MiB) of the default memory area for Contiguous
+ Memory Allocator.
+
+config CMA_SIZE_PERCENTAGE
+ int "Percentage of total memory"
+ depends on !CMA_SIZE_SEL_MBYTES
+ default 10
+ help
+ Defines the size of the default memory area for Contiguous Memory
+ Allocator as a percentage of the total memory in the system.
+
+choice
+ prompt "Selected region size"
+ default CMA_SIZE_SEL_ABSOLUTE
+
+config CMA_SIZE_SEL_MBYTES
+ bool "Use mega bytes value only"
+
+config CMA_SIZE_SEL_PERCENTAGE
+ bool "Use percentage value only"
+
+config CMA_SIZE_SEL_MIN
+ bool "Use lower value (minimum)"
+
+config CMA_SIZE_SEL_MAX
+ bool "Use higher value (maximum)"
+
+endchoice
+
+config CMA_ALIGNMENT
+ int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
+ range 4 9
+ default 8
+ help
+ DMA mapping framework by default aligns all buffers to the smallest
+ PAGE_SIZE order which is greater than or equal to the requested buffer
+ size. This works well for buffers up to a few hundreds kilobytes, but
+ for larger buffers it just a memory waste. With this parameter you can
+ specify the maximum PAGE_SIZE order for contiguous buffers. Larger
+ buffers will be aligned only to this specified order. The order is
+ expressed as a power of two multiplied by the PAGE_SIZE.
+
+ For example, if your system defaults to 4KiB pages, the order value
+ of 8 means that the buffers will be aligned up to 1MiB only.
+
+ If unsure, leave the default value "8".
+
+config CMA_AREAS
+ int "Maximum count of the CMA device-private areas"
+ default 7
+ help
+ CMA allows to create CMA areas for particular devices. This parameter
+ sets the maximum number of such device private CMA areas in the
+ system.
+
+ If unsure, leave the default value "7".
+
+endif
+
endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index b6d1b9c4200c..5aa2d703d19f 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,6 +6,7 @@ obj-y := core.o bus.o dd.o syscore.o \
attribute_container.o transport_class.o \
topology.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
+obj-$(CONFIG_CMA) += dma-contiguous.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 05c64c11bad2..24e88fe29ec1 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -44,8 +44,26 @@ static int dma_buf_release(struct inode *inode, struct file *file)
return 0;
}
+static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
+{
+ struct dma_buf *dmabuf;
+
+ if (!is_dma_buf_file(file))
+ return -EINVAL;
+
+ dmabuf = file->private_data;
+
+ /* check for overflowing the buffer's size */
+ if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+ dmabuf->size >> PAGE_SHIFT)
+ return -EINVAL;
+
+ return dmabuf->ops->mmap(dmabuf, vma);
+}
+
static const struct file_operations dma_buf_fops = {
.release = dma_buf_release,
+ .mmap = dma_buf_mmap_internal,
};
/*
@@ -82,7 +100,8 @@ struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
|| !ops->unmap_dma_buf
|| !ops->release
|| !ops->kmap_atomic
- || !ops->kmap)) {
+ || !ops->kmap
+ || !ops->mmap)) {
return ERR_PTR(-EINVAL);
}
@@ -406,3 +425,81 @@ void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
}
EXPORT_SYMBOL_GPL(dma_buf_kunmap);
+
+
+/**
+ * dma_buf_mmap - Setup up a userspace mmap with the given vma
+ * @dmabuf: [in] buffer that should back the vma
+ * @vma: [in] vma for the mmap
+ * @pgoff: [in] offset in pages where this mmap should start within the
+ * dma-buf buffer.
+ *
+ * This function adjusts the passed in vma so that it points at the file of the
+ * dma_buf operation. It alsog adjusts the starting pgoff and does bounds
+ * checking on the size of the vma. Then it calls the exporters mmap function to
+ * set up the mapping.
+ *
+ * Can return negative error values, returns 0 on success.
+ */
+int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
+ unsigned long pgoff)
+{
+ if (WARN_ON(!dmabuf || !vma))
+ return -EINVAL;
+
+ /* check for offset overflow */
+ if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
+ return -EOVERFLOW;
+
+ /* check for overflowing the buffer's size */
+ if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+ dmabuf->size >> PAGE_SHIFT)
+ return -EINVAL;
+
+ /* readjust the vma */
+ if (vma->vm_file)
+ fput(vma->vm_file);
+
+ vma->vm_file = dmabuf->file;
+ get_file(vma->vm_file);
+
+ vma->vm_pgoff = pgoff;
+
+ return dmabuf->ops->mmap(dmabuf, vma);
+}
+EXPORT_SYMBOL_GPL(dma_buf_mmap);
+
+/**
+ * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
+ * address space. Same restrictions as for vmap and friends apply.
+ * @dmabuf: [in] buffer to vmap
+ *
+ * This call may fail due to lack of virtual mapping address space.
+ * These calls are optional in drivers. The intended use for them
+ * is for mapping objects linear in kernel space for high use objects.
+ * Please attempt to use kmap/kunmap before thinking about these interfaces.
+ */
+void *dma_buf_vmap(struct dma_buf *dmabuf)
+{
+ if (WARN_ON(!dmabuf))
+ return NULL;
+
+ if (dmabuf->ops->vmap)
+ return dmabuf->ops->vmap(dmabuf);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dma_buf_vmap);
+
+/**
+ * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
+ * @dmabuf: [in] buffer to vunmap
+ */
+void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+ if (WARN_ON(!dmabuf))
+ return;
+
+ if (dmabuf->ops->vunmap)
+ dmabuf->ops->vunmap(dmabuf, vaddr);
+}
+EXPORT_SYMBOL_GPL(dma_buf_vunmap);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index bb0025c510b3..1b85949e3d2f 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -10,6 +10,7 @@
struct dma_coherent_mem {
void *virt_base;
dma_addr_t device_base;
+ phys_addr_t pfn_base;
int size;
int flags;
unsigned long *bitmap;
@@ -44,6 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dev->dma_mem->virt_base = mem_base;
dev->dma_mem->device_base = device_addr;
+ dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
dev->dma_mem->size = pages;
dev->dma_mem->flags = flags;
@@ -176,3 +178,43 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
return 0;
}
EXPORT_SYMBOL(dma_release_from_coherent);
+
+/**
+ * dma_mmap_from_coherent() - try to mmap the memory allocated from
+ * per-device coherent memory pool to userspace
+ * @dev: device from which the memory was allocated
+ * @vma: vm_area for the userspace memory
+ * @vaddr: cpu address returned by dma_alloc_from_coherent
+ * @size: size of the memory buffer allocated by dma_alloc_from_coherent
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if we correctly mapped the memory, or 0 if
+ * dma_release_coherent() should proceed with mapping memory from
+ * generic pools.
+ */
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *vaddr, size_t size, int *ret)
+{
+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+ if (mem && vaddr >= mem->virt_base && vaddr + size <=
+ (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+ unsigned long off = vma->vm_pgoff;
+ int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+ int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int count = size >> PAGE_SHIFT;
+
+ *ret = -ENXIO;
+ if (off < count && user_count <= count - off) {
+ unsigned pfn = mem->pfn_base + start + off;
+ *ret = remap_pfn_range(vma, vma->vm_start, pfn,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+ }
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(dma_mmap_from_coherent);
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
new file mode 100644
index 000000000000..78efb0306a44
--- /dev/null
+++ b/drivers/base/dma-contiguous.c
@@ -0,0 +1,401 @@
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ * Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+#define pr_fmt(fmt) "cma: " fmt
+
+#ifdef CONFIG_CMA_DEBUG
+#ifndef DEBUG
+# define DEBUG
+#endif
+#endif
+
+#include <asm/page.h>
+#include <asm/dma-contiguous.h>
+
+#include <linux/memblock.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-isolation.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/mm_types.h>
+#include <linux/dma-contiguous.h>
+
+#ifndef SZ_1M
+#define SZ_1M (1 << 20)
+#endif
+
+struct cma {
+ unsigned long base_pfn;
+ unsigned long count;
+ unsigned long *bitmap;
+};
+
+struct cma *dma_contiguous_default_area;
+
+#ifdef CONFIG_CMA_SIZE_MBYTES
+#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
+#else
+#define CMA_SIZE_MBYTES 0
+#endif
+
+/*
+ * Default global CMA area size can be defined in kernel's .config.
+ * This is usefull mainly for distro maintainers to create a kernel
+ * that works correctly for most supported systems.
+ * The size can be set in bytes or as a percentage of the total memory
+ * in the system.
+ *
+ * Users, who want to set the size of global CMA area for their system
+ * should use cma= kernel parameter.
+ */
+static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+static long size_cmdline = -1;
+
+static int __init early_cma(char *p)
+{
+ pr_debug("%s(%s)\n", __func__, p);
+ size_cmdline = memparse(p, &p);
+ return 0;
+}
+early_param("cma", early_cma);
+
+#ifdef CONFIG_CMA_SIZE_PERCENTAGE
+
+static unsigned long __init __maybe_unused cma_early_percent_memory(void)
+{
+ struct memblock_region *reg;
+ unsigned long total_pages = 0;
+
+ /*
+ * We cannot use memblock_phys_mem_size() here, because
+ * memblock_analyze() has not been called yet.
+ */
+ for_each_memblock(memory, reg)
+ total_pages += memblock_region_memory_end_pfn(reg) -
+ memblock_region_memory_base_pfn(reg);
+
+ return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
+}
+
+#else
+
+static inline __maybe_unused unsigned long cma_early_percent_memory(void)
+{
+ return 0;
+}
+
+#endif
+
+/**
+ * dma_contiguous_reserve() - reserve area for contiguous memory handling
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory.
+ */
+void __init dma_contiguous_reserve(phys_addr_t limit)
+{
+ unsigned long selected_size = 0;
+
+ pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
+
+ if (size_cmdline != -1) {
+ selected_size = size_cmdline;
+ } else {
+#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
+ selected_size = size_bytes;
+#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
+ selected_size = cma_early_percent_memory();
+#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
+ selected_size = min(size_bytes, cma_early_percent_memory());
+#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
+ selected_size = max(size_bytes, cma_early_percent_memory());
+#endif
+ }
+
+ if (selected_size) {
+ pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+ selected_size / SZ_1M);
+
+ dma_declare_contiguous(NULL, selected_size, 0, limit);
+ }
+};
+
+static DEFINE_MUTEX(cma_mutex);
+
+static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
+{
+ unsigned long pfn = base_pfn;
+ unsigned i = count >> pageblock_order;
+ struct zone *zone;
+
+ WARN_ON_ONCE(!pfn_valid(pfn));
+ zone = page_zone(pfn_to_page(pfn));
+
+ do {
+ unsigned j;
+ base_pfn = pfn;
+ for (j = pageblock_nr_pages; j; --j, pfn++) {
+ WARN_ON_ONCE(!pfn_valid(pfn));
+ if (page_zone(pfn_to_page(pfn)) != zone)
+ return -EINVAL;
+ }
+ init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+ } while (--i);
+ return 0;
+}
+
+static __init struct cma *cma_create_area(unsigned long base_pfn,
+ unsigned long count)
+{
+ int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+ struct cma *cma;
+ int ret = -ENOMEM;
+
+ pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
+
+ cma = kmalloc(sizeof *cma, GFP_KERNEL);
+ if (!cma)
+ return ERR_PTR(-ENOMEM);
+
+ cma->base_pfn = base_pfn;
+ cma->count = count;
+ cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+
+ if (!cma->bitmap)
+ goto no_mem;
+
+ ret = cma_activate_area(base_pfn, count);
+ if (ret)
+ goto error;
+
+ pr_debug("%s: returned %p\n", __func__, (void *)cma);
+ return cma;
+
+error:
+ kfree(cma->bitmap);
+no_mem:
+ kfree(cma);
+ return ERR_PTR(ret);
+}
+
+static struct cma_reserved {
+ phys_addr_t start;
+ unsigned long size;
+ struct device *dev;
+} cma_reserved[MAX_CMA_AREAS] __initdata;
+static unsigned cma_reserved_count __initdata;
+
+static int __init cma_init_reserved_areas(void)
+{
+ struct cma_reserved *r = cma_reserved;
+ unsigned i = cma_reserved_count;
+
+ pr_debug("%s()\n", __func__);
+
+ for (; i; --i, ++r) {
+ struct cma *cma;
+ cma = cma_create_area(PFN_DOWN(r->start),
+ r->size >> PAGE_SHIFT);
+ if (!IS_ERR(cma))
+ dev_set_cma_area(r->dev, cma);
+ }
+ return 0;
+}
+core_initcall(cma_init_reserved_areas);
+
+/**
+ * dma_declare_contiguous() - reserve area for contiguous memory handling
+ * for particular device
+ * @dev: Pointer to device structure.
+ * @size: Size of the reserved memory.
+ * @base: Start address of the reserved memory (optional, 0 for any).
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory for specified device. It should be
+ * called by board specific code when early allocator (memblock or bootmem)
+ * is still activate.
+ */
+int __init dma_declare_contiguous(struct device *dev, unsigned long size,
+ phys_addr_t base, phys_addr_t limit)
+{
+ struct cma_reserved *r = &cma_reserved[cma_reserved_count];
+ unsigned long alignment;
+
+ pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
+ (unsigned long)size, (unsigned long)base,
+ (unsigned long)limit);
+
+ /* Sanity checks */
+ if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
+ pr_err("Not enough slots for CMA reserved regions!\n");
+ return -ENOSPC;
+ }
+
+ if (!size)
+ return -EINVAL;
+
+ /* Sanitise input arguments */
+ alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+ base = ALIGN(base, alignment);
+ size = ALIGN(size, alignment);
+ limit &= ~(alignment - 1);
+
+ /* Reserve memory */
+ if (base) {
+ if (memblock_is_region_reserved(base, size) ||
+ memblock_reserve(base, size) < 0) {
+ base = -EBUSY;
+ goto err;
+ }
+ } else {
+ /*
+ * Use __memblock_alloc_base() since
+ * memblock_alloc_base() panic()s.
+ */
+ phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
+ if (!addr) {
+ base = -ENOMEM;
+ goto err;
+ } else if (addr + size > ~(unsigned long)0) {
+ memblock_free(addr, size);
+ base = -EINVAL;
+ goto err;
+ } else {
+ base = addr;
+ }
+ }
+
+ /*
+ * Each reserved area must be initialised later, when more kernel
+ * subsystems (like slab allocator) are available.
+ */
+ r->start = base;
+ r->size = size;
+ r->dev = dev;
+ cma_reserved_count++;
+ pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
+ (unsigned long)base);
+
+ /* Architecture specific contiguous memory fixup. */
+ dma_contiguous_early_fixup(base, size);
+ return 0;
+err:
+ pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
+ return base;
+}
+
+/**
+ * dma_alloc_from_contiguous() - allocate pages from contiguous area
+ * @dev: Pointer to device for which the allocation is performed.
+ * @count: Requested number of pages.
+ * @align: Requested alignment of pages (in PAGE_SIZE order).
+ *
+ * This function allocates memory buffer for specified device. It uses
+ * device specific contiguous memory area if available or the default
+ * global one. Requires architecture specific get_dev_cma_area() helper
+ * function.
+ */
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+ unsigned int align)
+{
+ unsigned long mask, pfn, pageno, start = 0;
+ struct cma *cma = dev_get_cma_area(dev);
+ int ret;
+
+ if (!cma || !cma->count)
+ return NULL;
+
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
+
+ pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
+ count, align);
+
+ if (!count)
+ return NULL;
+
+ mask = (1 << align) - 1;
+
+ mutex_lock(&cma_mutex);
+
+ for (;;) {
+ pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
+ start, count, mask);
+ if (pageno >= cma->count) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ pfn = cma->base_pfn + pageno;
+ ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+ if (ret == 0) {
+ bitmap_set(cma->bitmap, pageno, count);
+ break;
+ } else if (ret != -EBUSY) {
+ goto error;
+ }
+ pr_debug("%s(): memory range at %p is busy, retrying\n",
+ __func__, pfn_to_page(pfn));
+ /* try again with a bit different memory target */
+ start = pageno + mask + 1;
+ }
+
+ mutex_unlock(&cma_mutex);
+
+ pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
+ return pfn_to_page(pfn);
+error:
+ mutex_unlock(&cma_mutex);
+ return NULL;
+}
+
+/**
+ * dma_release_from_contiguous() - release allocated pages
+ * @dev: Pointer to device for which the pages were allocated.
+ * @pages: Allocated pages.
+ * @count: Number of allocated pages.
+ *
+ * This function releases memory allocated by dma_alloc_from_contiguous().
+ * It returns false when provided pages do not belong to contiguous area and
+ * true otherwise.
+ */
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count)
+{
+ struct cma *cma = dev_get_cma_area(dev);
+ unsigned long pfn;
+
+ if (!cma || !pages)
+ return false;
+
+ pr_debug("%s(page %p)\n", __func__, (void *)pages);
+
+ pfn = page_to_pfn(pages);
+
+ if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+ return false;
+
+ VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
+
+ mutex_lock(&cma_mutex);
+ bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
+ free_contig_range(pfn, count);
+ mutex_unlock(&cma_mutex);
+
+ return true;
+}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 90aa2a11a933..af1a177216f1 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -592,11 +592,9 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
{
int n;
- n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
- if (n > 0 && PAGE_SIZE > n + 1) {
- *(buf + n++) = '\n';
- *(buf + n++) = '\0';
- }
+ n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
+ buf[n++] = '\n';
+ buf[n] = '\0';
return n;
}
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 5f6b2478bf17..fa6bf5279d28 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -42,7 +42,7 @@ static int regmap_i2c_gather_write(void *context,
/* If the I2C controller can't do a gather tell the core, it
* will substitute in a linear write for us.
*/
- if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_PROTOCOL_MANGLING))
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_NOSTART))
return -ENOTSUPP;
xfer[0].addr = i2c->addr;
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
index 893f6e0c759f..bc6e89212ad3 100644
--- a/drivers/bcma/core.c
+++ b/drivers/bcma/core.c
@@ -30,6 +30,7 @@ void bcma_core_disable(struct bcma_device *core, u32 flags)
udelay(10);
bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
+ bcma_aread32(core, BCMA_RESET_CTL);
udelay(1);
}
EXPORT_SYMBOL_GPL(bcma_core_disable);
@@ -77,7 +78,7 @@ void bcma_core_set_clockmode(struct bcma_device *core,
pr_err("HT force timeout\n");
break;
case BCMA_CLKMODE_DYNAMIC:
- pr_warn("Dynamic clockmode not supported yet!\n");
+ bcma_set32(core, BCMA_CLKCTLST, ~BCMA_CLKCTLST_FORCEHT);
break;
}
}
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index 4d38ae179b48..9a96f14c8f47 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -24,14 +24,12 @@ u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address)
return pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_DATA);
}
-#if 0
static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
{
pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
}
-#endif
static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
{
@@ -170,13 +168,50 @@ static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN);
}
+static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
+{
+ struct bcma_device *core = pc->core;
+ u16 val16, core_index;
+ uint regoff;
+
+ regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
+ core_index = (u16)core->core_index;
+
+ val16 = pcicore_read16(pc, regoff);
+ if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
+ != core_index) {
+ val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
+ (val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
+ pcicore_write16(pc, regoff, val16);
+ }
+}
+
+/* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
+/* Needs to happen when coming out of 'standby'/'hibernate' */
+static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
+{
+ u16 val16;
+ uint regoff;
+
+ regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_MISC_CONFIG);
+
+ val16 = pcicore_read16(pc, regoff);
+
+ if (!(val16 & BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST)) {
+ val16 |= BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST;
+ pcicore_write16(pc, regoff, val16);
+ }
+}
+
/**************************************************
* Init.
**************************************************/
static void __devinit bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
{
+ bcma_core_pci_fixcfg(pc);
bcma_pcicore_serdes_workaround(pc);
+ bcma_core_pci_config_fixup(pc);
}
void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
@@ -224,3 +259,17 @@ out:
return err;
}
EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
+
+void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
+{
+ u32 w;
+
+ w = bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
+ if (extend)
+ w |= BCMA_CORE_PCI_ASPMTIMER_EXTEND;
+ else
+ w &= ~BCMA_CORE_PCI_ASPMTIMER_EXTEND;
+ bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w);
+ bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
+}
+EXPORT_SYMBOL_GPL(bcma_core_pci_extend_L1timer);
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index d2097a11c3c7..b9a86edfec39 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -119,7 +119,7 @@ static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
if (unlikely(!addr))
goto out;
err = -ENOMEM;
- mmio = ioremap_nocache(addr, len);
+ mmio = ioremap_nocache(addr, sizeof(val));
if (!mmio)
goto out;
@@ -171,7 +171,7 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
addr = pc->core->addr + BCMA_CORE_PCI_PCICFG0;
addr |= (func << 8);
addr |= (off & 0xfc);
- mmio = ioremap_nocache(addr, len);
+ mmio = ioremap_nocache(addr, sizeof(val));
if (!mmio)
goto out;
}
@@ -180,7 +180,7 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
if (unlikely(!addr))
goto out;
err = -ENOMEM;
- mmio = ioremap_nocache(addr, len);
+ mmio = ioremap_nocache(addr, sizeof(val));
if (!mmio)
goto out;
@@ -491,8 +491,8 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
/* Ok, ready to run, register it to the system.
* The following needs change, if we want to port hostmode
* to non-MIPS platform. */
- io_map_base = (unsigned long)ioremap_nocache(BCMA_SOC_PCI_MEM,
- 0x04000000);
+ io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start,
+ resource_size(&pc_host->mem_resource));
pc_host->pci_controller.io_map_base = io_map_base;
set_io_port_base(pc_host->pci_controller.io_map_base);
/* Give some time to the PCI controller to configure itself with the new
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index e3928d68802b..6c05cf470f96 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -201,6 +201,9 @@ static int __devinit bcma_host_pci_probe(struct pci_dev *dev,
bus->hosttype = BCMA_HOSTTYPE_PCI;
bus->ops = &bcma_host_pci_ops;
+ bus->boardinfo.vendor = bus->host_pci->subsystem_vendor;
+ bus->boardinfo.type = bus->host_pci->subsystem_device;
+
/* Register */
err = bcma_bus_register(bus);
if (err)
@@ -222,7 +225,7 @@ err_kfree_bus:
return err;
}
-static void bcma_host_pci_remove(struct pci_dev *dev)
+static void __devexit bcma_host_pci_remove(struct pci_dev *dev)
{
struct bcma_bus *bus = pci_get_drvdata(dev);
@@ -277,7 +280,7 @@ static struct pci_driver bcma_pci_bridge_driver = {
.name = "bcma-pci-bridge",
.id_table = bcma_pci_bridge_tbl,
.probe = bcma_host_pci_probe,
- .remove = bcma_host_pci_remove,
+ .remove = __devexit_p(bcma_host_pci_remove),
.driver.pm = BCMA_PM_OPS,
};
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 3bea7fe25b20..5ed0718fc660 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -19,7 +19,14 @@ struct bcma_device_id_name {
u16 id;
const char *name;
};
-struct bcma_device_id_name bcma_device_names[] = {
+
+static const struct bcma_device_id_name bcma_arm_device_names[] = {
+ { BCMA_CORE_ARM_1176, "ARM 1176" },
+ { BCMA_CORE_ARM_7TDMI, "ARM 7TDMI" },
+ { BCMA_CORE_ARM_CM3, "ARM CM3" },
+};
+
+static const struct bcma_device_id_name bcma_bcm_device_names[] = {
{ BCMA_CORE_OOB_ROUTER, "OOB Router" },
{ BCMA_CORE_INVALID, "Invalid" },
{ BCMA_CORE_CHIPCOMMON, "ChipCommon" },
@@ -27,7 +34,6 @@ struct bcma_device_id_name bcma_device_names[] = {
{ BCMA_CORE_SRAM, "SRAM" },
{ BCMA_CORE_SDRAM, "SDRAM" },
{ BCMA_CORE_PCI, "PCI" },
- { BCMA_CORE_MIPS, "MIPS" },
{ BCMA_CORE_ETHERNET, "Fast Ethernet" },
{ BCMA_CORE_V90, "V90" },
{ BCMA_CORE_USB11_HOSTDEV, "USB 1.1 Hostdev" },
@@ -44,7 +50,6 @@ struct bcma_device_id_name bcma_device_names[] = {
{ BCMA_CORE_PHY_A, "PHY A" },
{ BCMA_CORE_PHY_B, "PHY B" },
{ BCMA_CORE_PHY_G, "PHY G" },
- { BCMA_CORE_MIPS_3302, "MIPS 3302" },
{ BCMA_CORE_USB11_HOST, "USB 1.1 Host" },
{ BCMA_CORE_USB11_DEV, "USB 1.1 Device" },
{ BCMA_CORE_USB20_HOST, "USB 2.0 Host" },
@@ -58,15 +63,11 @@ struct bcma_device_id_name bcma_device_names[] = {
{ BCMA_CORE_PHY_N, "PHY N" },
{ BCMA_CORE_SRAM_CTL, "SRAM Controller" },
{ BCMA_CORE_MINI_MACPHY, "Mini MACPHY" },
- { BCMA_CORE_ARM_1176, "ARM 1176" },
- { BCMA_CORE_ARM_7TDMI, "ARM 7TDMI" },
{ BCMA_CORE_PHY_LP, "PHY LP" },
{ BCMA_CORE_PMU, "PMU" },
{ BCMA_CORE_PHY_SSN, "PHY SSN" },
{ BCMA_CORE_SDIO_DEV, "SDIO Device" },
- { BCMA_CORE_ARM_CM3, "ARM CM3" },
{ BCMA_CORE_PHY_HT, "PHY HT" },
- { BCMA_CORE_MIPS_74K, "MIPS 74K" },
{ BCMA_CORE_MAC_GBIT, "GBit MAC" },
{ BCMA_CORE_DDR12_MEM_CTL, "DDR1/DDR2 Memory Controller" },
{ BCMA_CORE_PCIE_RC, "PCIe Root Complex" },
@@ -79,16 +80,41 @@ struct bcma_device_id_name bcma_device_names[] = {
{ BCMA_CORE_SHIM, "SHIM" },
{ BCMA_CORE_DEFAULT, "Default" },
};
-const char *bcma_device_name(struct bcma_device_id *id)
+
+static const struct bcma_device_id_name bcma_mips_device_names[] = {
+ { BCMA_CORE_MIPS, "MIPS" },
+ { BCMA_CORE_MIPS_3302, "MIPS 3302" },
+ { BCMA_CORE_MIPS_74K, "MIPS 74K" },
+};
+
+static const char *bcma_device_name(const struct bcma_device_id *id)
{
- int i;
+ const struct bcma_device_id_name *names;
+ int size, i;
+
+ /* search manufacturer specific names */
+ switch (id->manuf) {
+ case BCMA_MANUF_ARM:
+ names = bcma_arm_device_names;
+ size = ARRAY_SIZE(bcma_arm_device_names);
+ break;
+ case BCMA_MANUF_BCM:
+ names = bcma_bcm_device_names;
+ size = ARRAY_SIZE(bcma_bcm_device_names);
+ break;
+ case BCMA_MANUF_MIPS:
+ names = bcma_mips_device_names;
+ size = ARRAY_SIZE(bcma_mips_device_names);
+ break;
+ default:
+ return "UNKNOWN";
+ }
- if (id->manuf == BCMA_MANUF_BCM) {
- for (i = 0; i < ARRAY_SIZE(bcma_device_names); i++) {
- if (bcma_device_names[i].id == id->id)
- return bcma_device_names[i].name;
- }
+ for (i = 0; i < size; i++) {
+ if (names[i].id == id->id)
+ return names[i].name;
}
+
return "UNKNOWN";
}
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 3e2a6002aae6..c7f93359acb0 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -181,6 +181,22 @@ static int bcma_sprom_valid(const u16 *sprom)
#define SPEX(_field, _offset, _mask, _shift) \
bus->sprom._field = ((sprom[SPOFF(_offset)] & (_mask)) >> (_shift))
+#define SPEX32(_field, _offset, _mask, _shift) \
+ bus->sprom._field = ((((u32)sprom[SPOFF((_offset)+2)] << 16 | \
+ sprom[SPOFF(_offset)]) & (_mask)) >> (_shift))
+
+#define SPEX_ARRAY8(_field, _offset, _mask, _shift) \
+ do { \
+ SPEX(_field[0], _offset + 0, _mask, _shift); \
+ SPEX(_field[1], _offset + 2, _mask, _shift); \
+ SPEX(_field[2], _offset + 4, _mask, _shift); \
+ SPEX(_field[3], _offset + 6, _mask, _shift); \
+ SPEX(_field[4], _offset + 8, _mask, _shift); \
+ SPEX(_field[5], _offset + 10, _mask, _shift); \
+ SPEX(_field[6], _offset + 12, _mask, _shift); \
+ SPEX(_field[7], _offset + 14, _mask, _shift); \
+ } while (0)
+
static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
{
u16 v, o;
@@ -243,7 +259,8 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
SPEX(boardflags2_lo, SSB_SPROM8_BFL2LO, ~0, 0);
SPEX(boardflags2_hi, SSB_SPROM8_BFL2HI, ~0, 0);
- SPEX(country_code, SSB_SPROM8_CCODE, ~0, 0);
+ SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8);
+ SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0);
/* Extract cores power info info */
for (i = 0; i < ARRAY_SIZE(pwr_info_offset); i++) {
@@ -298,6 +315,136 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
SSB_SROM8_FEM_TR_ISO_SHIFT);
SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_ANTSWLUT,
SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
+ SPEX(ant_available_a, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_A,
+ SSB_SPROM8_ANTAVAIL_A_SHIFT);
+ SPEX(ant_available_bg, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_BG,
+ SSB_SPROM8_ANTAVAIL_BG_SHIFT);
+ SPEX(maxpwr_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_MAXP_BG_MASK, 0);
+ SPEX(itssi_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_ITSSI_BG,
+ SSB_SPROM8_ITSSI_BG_SHIFT);
+ SPEX(maxpwr_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_MAXP_A_MASK, 0);
+ SPEX(itssi_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_ITSSI_A,
+ SSB_SPROM8_ITSSI_A_SHIFT);
+ SPEX(maxpwr_ah, SSB_SPROM8_MAXP_AHL, SSB_SPROM8_MAXP_AH_MASK, 0);
+ SPEX(maxpwr_al, SSB_SPROM8_MAXP_AHL, SSB_SPROM8_MAXP_AL_MASK,
+ SSB_SPROM8_MAXP_AL_SHIFT);
+ SPEX(gpio0, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P0, 0);
+ SPEX(gpio1, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P1,
+ SSB_SPROM8_GPIOA_P1_SHIFT);
+ SPEX(gpio2, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P2, 0);
+ SPEX(gpio3, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P3,
+ SSB_SPROM8_GPIOB_P3_SHIFT);
+ SPEX(tri2g, SSB_SPROM8_TRI25G, SSB_SPROM8_TRI2G, 0);
+ SPEX(tri5g, SSB_SPROM8_TRI25G, SSB_SPROM8_TRI5G,
+ SSB_SPROM8_TRI5G_SHIFT);
+ SPEX(tri5gl, SSB_SPROM8_TRI5GHL, SSB_SPROM8_TRI5GL, 0);
+ SPEX(tri5gh, SSB_SPROM8_TRI5GHL, SSB_SPROM8_TRI5GH,
+ SSB_SPROM8_TRI5GH_SHIFT);
+ SPEX(rxpo2g, SSB_SPROM8_RXPO, SSB_SPROM8_RXPO2G,
+ SSB_SPROM8_RXPO2G_SHIFT);
+ SPEX(rxpo5g, SSB_SPROM8_RXPO, SSB_SPROM8_RXPO5G,
+ SSB_SPROM8_RXPO5G_SHIFT);
+ SPEX(rssismf2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISMF2G, 0);
+ SPEX(rssismc2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISMC2G,
+ SSB_SPROM8_RSSISMC2G_SHIFT);
+ SPEX(rssisav2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISAV2G,
+ SSB_SPROM8_RSSISAV2G_SHIFT);
+ SPEX(bxa2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_BXA2G,
+ SSB_SPROM8_BXA2G_SHIFT);
+ SPEX(rssismf5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISMF5G, 0);
+ SPEX(rssismc5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISMC5G,
+ SSB_SPROM8_RSSISMC5G_SHIFT);
+ SPEX(rssisav5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISAV5G,
+ SSB_SPROM8_RSSISAV5G_SHIFT);
+ SPEX(bxa5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_BXA5G,
+ SSB_SPROM8_BXA5G_SHIFT);
+
+ SPEX(pa0b0, SSB_SPROM8_PA0B0, ~0, 0);
+ SPEX(pa0b1, SSB_SPROM8_PA0B1, ~0, 0);
+ SPEX(pa0b2, SSB_SPROM8_PA0B2, ~0, 0);
+ SPEX(pa1b0, SSB_SPROM8_PA1B0, ~0, 0);
+ SPEX(pa1b1, SSB_SPROM8_PA1B1, ~0, 0);
+ SPEX(pa1b2, SSB_SPROM8_PA1B2, ~0, 0);
+ SPEX(pa1lob0, SSB_SPROM8_PA1LOB0, ~0, 0);
+ SPEX(pa1lob1, SSB_SPROM8_PA1LOB1, ~0, 0);
+ SPEX(pa1lob2, SSB_SPROM8_PA1LOB2, ~0, 0);
+ SPEX(pa1hib0, SSB_SPROM8_PA1HIB0, ~0, 0);
+ SPEX(pa1hib1, SSB_SPROM8_PA1HIB1, ~0, 0);
+ SPEX(pa1hib2, SSB_SPROM8_PA1HIB2, ~0, 0);
+ SPEX(cck2gpo, SSB_SPROM8_CCK2GPO, ~0, 0);
+ SPEX32(ofdm2gpo, SSB_SPROM8_OFDM2GPO, ~0, 0);
+ SPEX32(ofdm5glpo, SSB_SPROM8_OFDM5GLPO, ~0, 0);
+ SPEX32(ofdm5gpo, SSB_SPROM8_OFDM5GPO, ~0, 0);
+ SPEX32(ofdm5ghpo, SSB_SPROM8_OFDM5GHPO, ~0, 0);
+
+ /* Extract the antenna gain values. */
+ SPEX(antenna_gain.a0, SSB_SPROM8_AGAIN01,
+ SSB_SPROM8_AGAIN0, SSB_SPROM8_AGAIN0_SHIFT);
+ SPEX(antenna_gain.a1, SSB_SPROM8_AGAIN01,
+ SSB_SPROM8_AGAIN1, SSB_SPROM8_AGAIN1_SHIFT);
+ SPEX(antenna_gain.a2, SSB_SPROM8_AGAIN23,
+ SSB_SPROM8_AGAIN2, SSB_SPROM8_AGAIN2_SHIFT);
+ SPEX(antenna_gain.a3, SSB_SPROM8_AGAIN23,
+ SSB_SPROM8_AGAIN3, SSB_SPROM8_AGAIN3_SHIFT);
+
+ SPEX(leddc_on_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_ON,
+ SSB_SPROM8_LEDDC_ON_SHIFT);
+ SPEX(leddc_off_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_OFF,
+ SSB_SPROM8_LEDDC_OFF_SHIFT);
+
+ SPEX(txchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_TXCHAIN,
+ SSB_SPROM8_TXRXC_TXCHAIN_SHIFT);
+ SPEX(rxchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_RXCHAIN,
+ SSB_SPROM8_TXRXC_RXCHAIN_SHIFT);
+ SPEX(antswitch, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_SWITCH,
+ SSB_SPROM8_TXRXC_SWITCH_SHIFT);
+
+ SPEX(opo, SSB_SPROM8_OFDM2GPO, 0x00ff, 0);
+
+ SPEX_ARRAY8(mcs2gpo, SSB_SPROM8_2G_MCSPO, ~0, 0);
+ SPEX_ARRAY8(mcs5gpo, SSB_SPROM8_5G_MCSPO, ~0, 0);
+ SPEX_ARRAY8(mcs5glpo, SSB_SPROM8_5GL_MCSPO, ~0, 0);
+ SPEX_ARRAY8(mcs5ghpo, SSB_SPROM8_5GH_MCSPO, ~0, 0);
+
+ SPEX(rawtempsense, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_RAWTEMP,
+ SSB_SPROM8_RAWTS_RAWTEMP_SHIFT);
+ SPEX(measpower, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_MEASPOWER,
+ SSB_SPROM8_RAWTS_MEASPOWER_SHIFT);
+ SPEX(tempsense_slope, SSB_SPROM8_OPT_CORRX,
+ SSB_SPROM8_OPT_CORRX_TEMP_SLOPE,
+ SSB_SPROM8_OPT_CORRX_TEMP_SLOPE_SHIFT);
+ SPEX(tempcorrx, SSB_SPROM8_OPT_CORRX, SSB_SPROM8_OPT_CORRX_TEMPCORRX,
+ SSB_SPROM8_OPT_CORRX_TEMPCORRX_SHIFT);
+ SPEX(tempsense_option, SSB_SPROM8_OPT_CORRX,
+ SSB_SPROM8_OPT_CORRX_TEMP_OPTION,
+ SSB_SPROM8_OPT_CORRX_TEMP_OPTION_SHIFT);
+ SPEX(freqoffset_corr, SSB_SPROM8_HWIQ_IQSWP,
+ SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR,
+ SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR_SHIFT);
+ SPEX(iqcal_swp_dis, SSB_SPROM8_HWIQ_IQSWP,
+ SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP,
+ SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP_SHIFT);
+ SPEX(hw_iqcal_en, SSB_SPROM8_HWIQ_IQSWP, SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL,
+ SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL_SHIFT);
+
+ SPEX(bw40po, SSB_SPROM8_BW40PO, ~0, 0);
+ SPEX(cddpo, SSB_SPROM8_CDDPO, ~0, 0);
+ SPEX(stbcpo, SSB_SPROM8_STBCPO, ~0, 0);
+ SPEX(bwduppo, SSB_SPROM8_BWDUPPO, ~0, 0);
+
+ SPEX(tempthresh, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_TRESH,
+ SSB_SPROM8_THERMAL_TRESH_SHIFT);
+ SPEX(tempoffset, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_OFFSET,
+ SSB_SPROM8_THERMAL_OFFSET_SHIFT);
+ SPEX(phycal_tempdelta, SSB_SPROM8_TEMPDELTA,
+ SSB_SPROM8_TEMPDELTA_PHYCAL,
+ SSB_SPROM8_TEMPDELTA_PHYCAL_SHIFT);
+ SPEX(temps_period, SSB_SPROM8_TEMPDELTA, SSB_SPROM8_TEMPDELTA_PERIOD,
+ SSB_SPROM8_TEMPDELTA_PERIOD_SHIFT);
+ SPEX(temps_hysteresis, SSB_SPROM8_TEMPDELTA,
+ SSB_SPROM8_TEMPDELTA_HYSTERESIS,
+ SSB_SPROM8_TEMPDELTA_HYSTERESIS_SHIFT);
}
/*
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index cf0e63dd97da..e54e31b02b88 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -65,39 +65,80 @@ struct drbd_atodb_wait {
int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int);
+void *drbd_md_get_buffer(struct drbd_conf *mdev)
+{
+ int r;
+
+ wait_event(mdev->misc_wait,
+ (r = atomic_cmpxchg(&mdev->md_io_in_use, 0, 1)) == 0 ||
+ mdev->state.disk <= D_FAILED);
+
+ return r ? NULL : page_address(mdev->md_io_page);
+}
+
+void drbd_md_put_buffer(struct drbd_conf *mdev)
+{
+ if (atomic_dec_and_test(&mdev->md_io_in_use))
+ wake_up(&mdev->misc_wait);
+}
+
+static bool md_io_allowed(struct drbd_conf *mdev)
+{
+ enum drbd_disk_state ds = mdev->state.disk;
+ return ds >= D_NEGOTIATING || ds == D_ATTACHING;
+}
+
+void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+ unsigned int *done)
+{
+ long dt = bdev->dc.disk_timeout * HZ / 10;
+ if (dt == 0)
+ dt = MAX_SCHEDULE_TIMEOUT;
+
+ dt = wait_event_timeout(mdev->misc_wait, *done || !md_io_allowed(mdev), dt);
+ if (dt == 0)
+ dev_err(DEV, "meta-data IO operation timed out\n");
+}
+
static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev,
struct page *page, sector_t sector,
int rw, int size)
{
struct bio *bio;
- struct drbd_md_io md_io;
int ok;
- md_io.mdev = mdev;
- init_completion(&md_io.event);
- md_io.error = 0;
+ mdev->md_io.done = 0;
+ mdev->md_io.error = -ENODEV;
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC;
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev;
bio->bi_sector = sector;
ok = (bio_add_page(bio, page, size, 0) == size);
if (!ok)
goto out;
- bio->bi_private = &md_io;
+ bio->bi_private = &mdev->md_io;
bio->bi_end_io = drbd_md_io_complete;
bio->bi_rw = rw;
+ if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* Corresponding put_ldev in drbd_md_io_complete() */
+ dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
+ ok = 0;
+ goto out;
+ }
+
+ bio_get(bio); /* one bio_put() is in the completion handler */
+ atomic_inc(&mdev->md_io_in_use); /* drbd_md_put_buffer() is in the completion handler */
if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
bio_endio(bio, -EIO);
else
submit_bio(rw, bio);
- wait_for_completion(&md_io.event);
- ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
+ wait_until_done_or_disk_failure(mdev, bdev, &mdev->md_io.done);
+ ok = bio_flagged(bio, BIO_UPTODATE) && mdev->md_io.error == 0;
out:
bio_put(bio);
@@ -111,7 +152,7 @@ int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
int offset = 0;
struct page *iop = mdev->md_io_page;
- D_ASSERT(mutex_is_locked(&mdev->md_io_mutex));
+ D_ASSERT(atomic_read(&mdev->md_io_in_use) == 1);
BUG_ON(!bdev->md_bdev);
@@ -328,8 +369,13 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
return 1;
}
- mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
- buffer = (struct al_transaction *)page_address(mdev->md_io_page);
+ buffer = drbd_md_get_buffer(mdev); /* protects md_io_buffer, al_tr_cycle, ... */
+ if (!buffer) {
+ dev_err(DEV, "disk failed while waiting for md_io buffer\n");
+ complete(&((struct update_al_work *)w)->event);
+ put_ldev(mdev);
+ return 1;
+ }
buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
@@ -374,7 +420,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE);
mdev->al_tr_number++;
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
complete(&((struct update_al_work *)w)->event);
put_ldev(mdev);
@@ -443,8 +489,9 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
/* lock out all other meta data io for now,
* and make sure the page is mapped.
*/
- mutex_lock(&mdev->md_io_mutex);
- buffer = page_address(mdev->md_io_page);
+ buffer = drbd_md_get_buffer(mdev);
+ if (!buffer)
+ return 0;
/* Find the valid transaction in the log */
for (i = 0; i <= mx; i++) {
@@ -452,7 +499,7 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
if (rv == 0)
continue;
if (rv == -1) {
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
return 0;
}
cnr = be32_to_cpu(buffer->tr_number);
@@ -478,7 +525,7 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
if (!found_valid) {
dev_warn(DEV, "No usable activity log found.\n");
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
return 1;
}
@@ -493,7 +540,7 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
rv = drbd_al_read_tr(mdev, bdev, buffer, i);
ERR_IF(rv == 0) goto cancel;
if (rv == -1) {
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
return 0;
}
@@ -534,7 +581,7 @@ cancel:
mdev->al_tr_pos = 0;
/* ok, we are done with it */
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n",
transactions, active_extents);
@@ -671,16 +718,20 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
else
ext->rs_failed += count;
if (ext->rs_left < ext->rs_failed) {
- dev_err(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
- "rs_failed=%d count=%d\n",
+ dev_warn(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
+ "rs_failed=%d count=%d cstate=%s\n",
(unsigned long long)sector,
ext->lce.lc_number, ext->rs_left,
- ext->rs_failed, count);
- dump_stack();
-
- lc_put(mdev->resync, &ext->lce);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return;
+ ext->rs_failed, count,
+ drbd_conn_str(mdev->state.conn));
+
+ /* We don't expect to be able to clear more bits
+ * than have been set when we originally counted
+ * the set bits to cache that value in ext->rs_left.
+ * Whatever the reason (disconnect during resync,
+ * delayed local completion of an application write),
+ * try to fix it up by recounting here. */
+ ext->rs_left = drbd_bm_e_weight(mdev, enr);
}
} else {
/* Normally this element should be in the cache,
@@ -1192,6 +1243,7 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
put_ldev(mdev);
}
spin_unlock_irq(&mdev->al_lock);
+ wake_up(&mdev->al_wait);
return 0;
}
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 3030201c69d8..b5c5ff53cb57 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -205,7 +205,7 @@ void drbd_bm_unlock(struct drbd_conf *mdev)
static void bm_store_page_idx(struct page *page, unsigned long idx)
{
BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
- page_private(page) |= idx;
+ set_page_private(page, idx);
}
static unsigned long bm_page_to_idx(struct page *page)
@@ -886,12 +886,21 @@ void drbd_bm_clear_all(struct drbd_conf *mdev)
struct bm_aio_ctx {
struct drbd_conf *mdev;
atomic_t in_flight;
- struct completion done;
+ unsigned int done;
unsigned flags;
#define BM_AIO_COPY_PAGES 1
int error;
+ struct kref kref;
};
+static void bm_aio_ctx_destroy(struct kref *kref)
+{
+ struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref);
+
+ put_ldev(ctx->mdev);
+ kfree(ctx);
+}
+
/* bv_page may be a copy, or may be the original */
static void bm_async_io_complete(struct bio *bio, int error)
{
@@ -930,20 +939,21 @@ static void bm_async_io_complete(struct bio *bio, int error)
bm_page_unlock_io(mdev, idx);
- /* FIXME give back to page pool */
if (ctx->flags & BM_AIO_COPY_PAGES)
- put_page(bio->bi_io_vec[0].bv_page);
+ mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
bio_put(bio);
- if (atomic_dec_and_test(&ctx->in_flight))
- complete(&ctx->done);
+ if (atomic_dec_and_test(&ctx->in_flight)) {
+ ctx->done = 1;
+ wake_up(&mdev->misc_wait);
+ kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+ }
}
static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
{
- /* we are process context. we always get a bio */
- struct bio *bio = bio_alloc(GFP_KERNEL, 1);
+ struct bio *bio = bio_alloc_drbd(GFP_NOIO);
struct drbd_conf *mdev = ctx->mdev;
struct drbd_bitmap *b = mdev->bitmap;
struct page *page;
@@ -966,10 +976,8 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bm_set_page_unchanged(b->bm_pages[page_nr]);
if (ctx->flags & BM_AIO_COPY_PAGES) {
- /* FIXME alloc_page is good enough for now, but actually needs
- * to use pre-allocated page pool */
void *src, *dest;
- page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
+ page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT);
dest = kmap_atomic(page);
src = kmap_atomic(b->bm_pages[page_nr]);
memcpy(dest, src, PAGE_SIZE);
@@ -981,6 +989,8 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bio->bi_bdev = mdev->ldev->md_bdev;
bio->bi_sector = on_disk_sector;
+ /* bio_add_page of a single page to an empty bio will always succeed,
+ * according to api. Do we want to assert that? */
bio_add_page(bio, page, len, 0);
bio->bi_private = ctx;
bio->bi_end_io = bm_async_io_complete;
@@ -999,14 +1009,9 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
/*
* bm_rw: read/write the whole bitmap from/to its on disk location.
*/
-static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local)
+static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
{
- struct bm_aio_ctx ctx = {
- .mdev = mdev,
- .in_flight = ATOMIC_INIT(1),
- .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
- .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0,
- };
+ struct bm_aio_ctx *ctx;
struct drbd_bitmap *b = mdev->bitmap;
int num_pages, i, count = 0;
unsigned long now;
@@ -1021,7 +1026,27 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
* For lazy writeout, we don't care for ongoing changes to the bitmap,
* as we submit copies of pages anyways.
*/
- if (!ctx.flags)
+
+ ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
+ if (!ctx)
+ return -ENOMEM;
+
+ *ctx = (struct bm_aio_ctx) {
+ .mdev = mdev,
+ .in_flight = ATOMIC_INIT(1),
+ .done = 0,
+ .flags = flags,
+ .error = 0,
+ .kref = { ATOMIC_INIT(2) },
+ };
+
+ if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
+ dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
+ kfree(ctx);
+ return -ENODEV;
+ }
+
+ if (!ctx->flags)
WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
num_pages = b->bm_number_of_pages;
@@ -1046,29 +1071,38 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
continue;
}
}
- atomic_inc(&ctx.in_flight);
- bm_page_io_async(&ctx, i, rw);
+ atomic_inc(&ctx->in_flight);
+ bm_page_io_async(ctx, i, rw);
++count;
cond_resched();
}
/*
- * We initialize ctx.in_flight to one to make sure bm_async_io_complete
- * will not complete() early, and decrement / test it here. If there
+ * We initialize ctx->in_flight to one to make sure bm_async_io_complete
+ * will not set ctx->done early, and decrement / test it here. If there
* are still some bios in flight, we need to wait for them here.
+ * If all IO is done already (or nothing had been submitted), there is
+ * no need to wait. Still, we need to put the kref associated with the
+ * "in_flight reached zero, all done" event.
*/
- if (!atomic_dec_and_test(&ctx.in_flight))
- wait_for_completion(&ctx.done);
+ if (!atomic_dec_and_test(&ctx->in_flight))
+ wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
+ else
+ kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+
dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
rw == WRITE ? "WRITE" : "READ",
count, jiffies - now);
- if (ctx.error) {
+ if (ctx->error) {
dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
drbd_chk_io_error(mdev, 1, true);
- err = -EIO; /* ctx.error ? */
+ err = -EIO; /* ctx->error ? */
}
+ if (atomic_read(&ctx->in_flight))
+ err = -EIO; /* Disk failed during IO... */
+
now = jiffies;
if (rw == WRITE) {
drbd_md_flush(mdev);
@@ -1082,6 +1116,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
+ kref_put(&ctx->kref, &bm_aio_ctx_destroy);
return err;
}
@@ -1091,7 +1126,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
*/
int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
{
- return bm_rw(mdev, READ, 0);
+ return bm_rw(mdev, READ, 0, 0);
}
/**
@@ -1102,7 +1137,7 @@ int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
*/
int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
{
- return bm_rw(mdev, WRITE, 0);
+ return bm_rw(mdev, WRITE, 0, 0);
}
/**
@@ -1112,7 +1147,23 @@ int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
*/
int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
{
- return bm_rw(mdev, WRITE, upper_idx);
+ return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, upper_idx);
+}
+
+/**
+ * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
+ * @mdev: DRBD device.
+ *
+ * Will only write pages that have changed since last IO.
+ * In contrast to drbd_bm_write(), this will copy the bitmap pages
+ * to temporary writeout pages. It is intended to trigger a full write-out
+ * while still allowing the bitmap to change, for example if a resync or online
+ * verify is aborted due to a failed peer disk, while local IO continues, or
+ * pending resync acks are still being processed.
+ */
+int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local)
+{
+ return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0);
}
@@ -1130,28 +1181,45 @@ int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(l
*/
int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
{
- struct bm_aio_ctx ctx = {
+ struct bm_aio_ctx *ctx;
+ int err;
+
+ if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
+ dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
+ return 0;
+ }
+
+ ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
+ if (!ctx)
+ return -ENOMEM;
+
+ *ctx = (struct bm_aio_ctx) {
.mdev = mdev,
.in_flight = ATOMIC_INIT(1),
- .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
+ .done = 0,
.flags = BM_AIO_COPY_PAGES,
+ .error = 0,
+ .kref = { ATOMIC_INIT(2) },
};
- if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
- dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
- return 0;
+ if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
+ dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
+ kfree(ctx);
+ return -ENODEV;
}
- bm_page_io_async(&ctx, idx, WRITE_SYNC);
- wait_for_completion(&ctx.done);
+ bm_page_io_async(ctx, idx, WRITE_SYNC);
+ wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
- if (ctx.error)
+ if (ctx->error)
drbd_chk_io_error(mdev, 1, true);
/* that should force detach, so the in memory bitmap will be
* gone in a moment as well. */
mdev->bm_writ_cnt++;
- return ctx.error;
+ err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error;
+ kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+ return err;
}
/* NOTE
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 8d680562ba73..02f013a073a7 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -712,7 +712,6 @@ struct drbd_request {
struct list_head tl_requests; /* ring list in the transfer log */
struct bio *master_bio; /* master bio pointer */
unsigned long rq_state; /* see comments above _req_mod() */
- int seq_num;
unsigned long start_time;
};
@@ -851,6 +850,7 @@ enum {
NEW_CUR_UUID, /* Create new current UUID when thawing IO */
AL_SUSPENDED, /* Activity logging is currently suspended. */
AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
+ STATE_SENT, /* Do not change state/UUIDs while this is set */
};
struct drbd_bitmap; /* opaque for drbd_conf */
@@ -862,31 +862,30 @@ enum bm_flag {
BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
/* currently locked for bulk operation */
- BM_LOCKED_MASK = 0x7,
+ BM_LOCKED_MASK = 0xf,
/* in detail, that is: */
BM_DONT_CLEAR = 0x1,
BM_DONT_SET = 0x2,
BM_DONT_TEST = 0x4,
+ /* so we can mark it locked for bulk operation,
+ * and still allow all non-bulk operations */
+ BM_IS_LOCKED = 0x8,
+
/* (test bit, count bit) allowed (common case) */
- BM_LOCKED_TEST_ALLOWED = 0x3,
+ BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
/* testing bits, as well as setting new bits allowed, but clearing bits
* would be unexpected. Used during bitmap receive. Setting new bits
* requires sending of "out-of-sync" information, though. */
- BM_LOCKED_SET_ALLOWED = 0x1,
+ BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
- /* clear is not expected while bitmap is locked for bulk operation */
+ /* for drbd_bm_write_copy_pages, everything is allowed,
+ * only concurrent bulk operations are locked out. */
+ BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
};
-
-/* TODO sort members for performance
- * MAYBE group them further */
-
-/* THINK maybe we actually want to use the default "event/%s" worker threads
- * or similar in linux 2.6, which uses per cpu data and threads.
- */
struct drbd_work_queue {
struct list_head q;
struct semaphore s; /* producers up it, worker down()s it */
@@ -938,8 +937,7 @@ struct drbd_backing_dev {
};
struct drbd_md_io {
- struct drbd_conf *mdev;
- struct completion event;
+ unsigned int done;
int error;
};
@@ -1022,6 +1020,7 @@ struct drbd_conf {
struct drbd_tl_epoch *newest_tle;
struct drbd_tl_epoch *oldest_tle;
struct list_head out_of_sequence_requests;
+ struct list_head barrier_acked_requests;
struct hlist_head *tl_hash;
unsigned int tl_hash_s;
@@ -1056,6 +1055,8 @@ struct drbd_conf {
struct crypto_hash *csums_tfm;
struct crypto_hash *verify_tfm;
+ unsigned long last_reattach_jif;
+ unsigned long last_reconnect_jif;
struct drbd_thread receiver;
struct drbd_thread worker;
struct drbd_thread asender;
@@ -1094,7 +1095,8 @@ struct drbd_conf {
wait_queue_head_t ee_wait;
struct page *md_io_page; /* one page buffer for md_io */
struct page *md_io_tmpp; /* for logical_block_size != 512 */
- struct mutex md_io_mutex; /* protects the md_io_buffer */
+ struct drbd_md_io md_io;
+ atomic_t md_io_in_use; /* protects the md_io, md_io_page and md_io_tmpp */
spinlock_t al_lock;
wait_queue_head_t al_wait;
struct lru_cache *act_log; /* activity log */
@@ -1228,8 +1230,8 @@ extern int drbd_send_uuids(struct drbd_conf *mdev);
extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
-extern int _drbd_send_state(struct drbd_conf *mdev);
-extern int drbd_send_state(struct drbd_conf *mdev);
+extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s);
+extern int drbd_send_current_state(struct drbd_conf *mdev);
extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
enum drbd_packets cmd, struct p_header80 *h,
size_t size, unsigned msg_flags);
@@ -1461,6 +1463,7 @@ extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
+extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
unsigned long al_enr);
extern size_t drbd_bm_words(struct drbd_conf *mdev);
@@ -1493,11 +1496,38 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
extern mempool_t *drbd_request_mempool;
extern mempool_t *drbd_ee_mempool;
-extern struct page *drbd_pp_pool; /* drbd's page pool */
+/* drbd's page pool, used to buffer data received from the peer,
+ * or data requested by the peer.
+ *
+ * This does not have an emergency reserve.
+ *
+ * When allocating from this pool, it first takes pages from the pool.
+ * Only if the pool is depleted will try to allocate from the system.
+ *
+ * The assumption is that pages taken from this pool will be processed,
+ * and given back, "quickly", and then can be recycled, so we can avoid
+ * frequent calls to alloc_page(), and still will be able to make progress even
+ * under memory pressure.
+ */
+extern struct page *drbd_pp_pool;
extern spinlock_t drbd_pp_lock;
extern int drbd_pp_vacant;
extern wait_queue_head_t drbd_pp_wait;
+/* We also need a standard (emergency-reserve backed) page pool
+ * for meta data IO (activity log, bitmap).
+ * We can keep it global, as long as it is used as "N pages at a time".
+ * 128 should be plenty, currently we probably can get away with as few as 1.
+ */
+#define DRBD_MIN_POOL_PAGES 128
+extern mempool_t *drbd_md_io_page_pool;
+
+/* We also need to make sure we get a bio
+ * when we need it for housekeeping purposes */
+extern struct bio_set *drbd_md_io_bio_set;
+/* to allocate from that set */
+extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
+
extern rwlock_t global_state_lock;
extern struct drbd_conf *drbd_new_device(unsigned int minor);
@@ -1536,8 +1566,12 @@ extern void resume_next_sg(struct drbd_conf *mdev);
extern void suspend_other_sg(struct drbd_conf *mdev);
extern int drbd_resync_finished(struct drbd_conf *mdev);
/* maybe rather drbd_main.c ? */
+extern void *drbd_md_get_buffer(struct drbd_conf *mdev);
+extern void drbd_md_put_buffer(struct drbd_conf *mdev);
extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
- struct drbd_backing_dev *bdev, sector_t sector, int rw);
+ struct drbd_backing_dev *bdev, sector_t sector, int rw);
+extern void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+ unsigned int *done);
extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
@@ -1754,19 +1788,6 @@ static inline struct page *page_chain_next(struct page *page)
#define page_chain_for_each_safe(page, n) \
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
-static inline int drbd_bio_has_active_page(struct bio *bio)
-{
- struct bio_vec *bvec;
- int i;
-
- __bio_for_each_segment(bvec, bio, i, 0) {
- if (page_count(bvec->bv_page) > 1)
- return 1;
- }
-
- return 0;
-}
-
static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
{
struct page *page = e->pages;
@@ -1777,7 +1798,6 @@ static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
return 0;
}
-
static inline void drbd_state_lock(struct drbd_conf *mdev)
{
wait_event(mdev->misc_wait,
@@ -2230,7 +2250,7 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
* Note: currently we don't support such large bitmaps on 32bit
* arch anyways, but no harm done to be prepared for it here.
*/
- unsigned int shift = mdev->rs_total >= (1ULL << 32) ? 16 : 10;
+ unsigned int shift = mdev->rs_total > UINT_MAX ? 16 : 10;
unsigned long left = *bits_left >> shift;
unsigned long total = 1UL + (mdev->rs_total >> shift);
unsigned long tmp = 1000UL - left * 1000UL/total;
@@ -2306,12 +2326,12 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
case D_OUTDATED:
case D_CONSISTENT:
case D_UP_TO_DATE:
+ case D_FAILED:
/* disk state is stable as well. */
break;
/* no new io accepted during tansitional states */
case D_ATTACHING:
- case D_FAILED:
case D_NEGOTIATING:
case D_UNKNOWN:
case D_MASK:
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 211fc44f84be..920ede2829d6 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -139,6 +139,8 @@ struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t *drbd_request_mempool;
mempool_t *drbd_ee_mempool;
+mempool_t *drbd_md_io_page_pool;
+struct bio_set *drbd_md_io_bio_set;
/* I do not use a standard mempool, because:
1) I want to hand out the pre-allocated objects first.
@@ -159,7 +161,24 @@ static const struct block_device_operations drbd_ops = {
.release = drbd_release,
};
-#define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
+static void bio_destructor_drbd(struct bio *bio)
+{
+ bio_free(bio, drbd_md_io_bio_set);
+}
+
+struct bio *bio_alloc_drbd(gfp_t gfp_mask)
+{
+ struct bio *bio;
+
+ if (!drbd_md_io_bio_set)
+ return bio_alloc(gfp_mask, 1);
+
+ bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
+ if (!bio)
+ return NULL;
+ bio->bi_destructor = bio_destructor_drbd;
+ return bio;
+}
#ifdef __CHECKER__
/* When checking with sparse, and this is an inline function, sparse will
@@ -208,6 +227,7 @@ static int tl_init(struct drbd_conf *mdev)
mdev->oldest_tle = b;
mdev->newest_tle = b;
INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
+ INIT_LIST_HEAD(&mdev->barrier_acked_requests);
mdev->tl_hash = NULL;
mdev->tl_hash_s = 0;
@@ -246,9 +266,7 @@ void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
new->n_writes = 0;
newest_before = mdev->newest_tle;
- /* never send a barrier number == 0, because that is special-cased
- * when using TCQ for our write ordering code */
- new->br_number = (newest_before->br_number+1) ?: 1;
+ new->br_number = newest_before->br_number+1;
if (mdev->newest_tle != new) {
mdev->newest_tle->next = new;
mdev->newest_tle = new;
@@ -311,7 +329,7 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
These have been list_move'd to the out_of_sequence_requests list in
_req_mod(, barrier_acked) above.
*/
- list_del_init(&b->requests);
+ list_splice_init(&b->requests, &mdev->barrier_acked_requests);
nob = b->next;
if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
@@ -411,6 +429,23 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
b = tmp;
list_splice(&carry_reads, &b->requests);
}
+
+ /* Actions operating on the disk state, also want to work on
+ requests that got barrier acked. */
+ switch (what) {
+ case fail_frozen_disk_io:
+ case restart_frozen_disk_io:
+ list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
+ req = list_entry(le, struct drbd_request, tl_requests);
+ _req_mod(req, what);
+ }
+
+ case connection_lost_while_pending:
+ case resend:
+ break;
+ default:
+ dev_err(DEV, "what = %d in _tl_restart()\n", what);
+ }
}
@@ -458,6 +493,38 @@ void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
}
/**
+ * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
+ * @mdev: DRBD device.
+ */
+void tl_abort_disk_io(struct drbd_conf *mdev)
+{
+ struct drbd_tl_epoch *b;
+ struct list_head *le, *tle;
+ struct drbd_request *req;
+
+ spin_lock_irq(&mdev->req_lock);
+ b = mdev->oldest_tle;
+ while (b) {
+ list_for_each_safe(le, tle, &b->requests) {
+ req = list_entry(le, struct drbd_request, tl_requests);
+ if (!(req->rq_state & RQ_LOCAL_PENDING))
+ continue;
+ _req_mod(req, abort_disk_io);
+ }
+ b = b->next;
+ }
+
+ list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
+ req = list_entry(le, struct drbd_request, tl_requests);
+ if (!(req->rq_state & RQ_LOCAL_PENDING))
+ continue;
+ _req_mod(req, abort_disk_io);
+ }
+
+ spin_unlock_irq(&mdev->req_lock);
+}
+
+/**
* cl_wide_st_chg() - true if the state change is a cluster wide one
* @mdev: DRBD device.
* @os: old (current) state.
@@ -470,7 +537,7 @@ static int cl_wide_st_chg(struct drbd_conf *mdev,
((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
(os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
(os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
- (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
+ (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
(os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
(os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
}
@@ -509,8 +576,16 @@ static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
union drbd_state,
union drbd_state);
+enum sanitize_state_warnings {
+ NO_WARNING,
+ ABORTED_ONLINE_VERIFY,
+ ABORTED_RESYNC,
+ CONNECTION_LOST_NEGOTIATING,
+ IMPLICITLY_UPGRADED_DISK,
+ IMPLICITLY_UPGRADED_PDSK,
+};
static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, const char **warn_sync_abort);
+ union drbd_state ns, enum sanitize_state_warnings *warn);
int drbd_send_state_req(struct drbd_conf *,
union drbd_state, union drbd_state);
@@ -785,6 +860,13 @@ is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
rv = SS_IN_TRANSIENT_STATE;
+ /* While establishing a connection only allow cstate to change.
+ Delay/refuse role changes, detach attach etc... */
+ if (test_bit(STATE_SENT, &mdev->flags) &&
+ !(os.conn == C_WF_REPORT_PARAMS ||
+ (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
+ rv = SS_IN_TRANSIENT_STATE;
+
if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
rv = SS_NEED_CONNECTION;
@@ -803,6 +885,21 @@ is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
return rv;
}
+static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
+{
+ static const char *msg_table[] = {
+ [NO_WARNING] = "",
+ [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
+ [ABORTED_RESYNC] = "Resync aborted.",
+ [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
+ [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
+ [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
+ };
+
+ if (warn != NO_WARNING)
+ dev_warn(DEV, "%s\n", msg_table[warn]);
+}
+
/**
* sanitize_state() - Resolves implicitly necessary additional changes to a state transition
* @mdev: DRBD device.
@@ -814,11 +911,14 @@ is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
* to D_UNKNOWN. This rule and many more along those lines are in this function.
*/
static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, const char **warn_sync_abort)
+ union drbd_state ns, enum sanitize_state_warnings *warn)
{
enum drbd_fencing_p fp;
enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
+ if (warn)
+ *warn = NO_WARNING;
+
fp = FP_DONT_CARE;
if (get_ldev(mdev)) {
fp = mdev->ldev->dc.fencing;
@@ -833,18 +933,13 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
/* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
* If you try to go into some Sync* state, that shall fail (elsewhere). */
if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
- ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
+ ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_CONNECTED)
ns.conn = os.conn;
/* we cannot fail (again) if we already detached */
if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
ns.disk = D_DISKLESS;
- /* if we are only D_ATTACHING yet,
- * we can (and should) go directly to D_DISKLESS. */
- if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
- ns.disk = D_DISKLESS;
-
/* After C_DISCONNECTING only C_STANDALONE may follow */
if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
ns.conn = os.conn;
@@ -863,10 +958,9 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
/* Abort resync if a disk fails/detaches */
if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
(ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
- if (warn_sync_abort)
- *warn_sync_abort =
- os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
- "Online-verify" : "Resync";
+ if (warn)
+ *warn = os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
+ ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
ns.conn = C_CONNECTED;
}
@@ -877,7 +971,8 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
ns.disk = mdev->new_state_tmp.disk;
ns.pdsk = mdev->new_state_tmp.pdsk;
} else {
- dev_alert(DEV, "Connection lost while negotiating, no data!\n");
+ if (warn)
+ *warn = CONNECTION_LOST_NEGOTIATING;
ns.disk = D_DISKLESS;
ns.pdsk = D_UNKNOWN;
}
@@ -959,16 +1054,16 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
ns.disk = disk_max;
if (ns.disk < disk_min) {
- dev_warn(DEV, "Implicitly set disk from %s to %s\n",
- drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
+ if (warn)
+ *warn = IMPLICITLY_UPGRADED_DISK;
ns.disk = disk_min;
}
if (ns.pdsk > pdsk_max)
ns.pdsk = pdsk_max;
if (ns.pdsk < pdsk_min) {
- dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
- drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
+ if (warn)
+ *warn = IMPLICITLY_UPGRADED_PDSK;
ns.pdsk = pdsk_min;
}
@@ -1045,12 +1140,12 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
{
union drbd_state os;
enum drbd_state_rv rv = SS_SUCCESS;
- const char *warn_sync_abort = NULL;
+ enum sanitize_state_warnings ssw;
struct after_state_chg_work *ascw;
os = mdev->state;
- ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
+ ns = sanitize_state(mdev, os, ns, &ssw);
if (ns.i == os.i)
return SS_NOTHING_TO_DO;
@@ -1076,8 +1171,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
return rv;
}
- if (warn_sync_abort)
- dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
+ print_sanitize_warnings(mdev, ssw);
{
char *pbp, pb[300];
@@ -1243,7 +1337,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
drbd_thread_stop_nowait(&mdev->receiver);
/* Upon network failure, we need to restart the receiver. */
- if (os.conn > C_TEAR_DOWN &&
+ if (os.conn > C_WF_CONNECTION &&
ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
drbd_thread_restart_nowait(&mdev->receiver);
@@ -1251,6 +1345,15 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
drbd_resume_al(mdev);
+ /* remember last connect and attach times so request_timer_fn() won't
+ * kill newly established sessions while we are still trying to thaw
+ * previously frozen IO */
+ if (os.conn != C_WF_REPORT_PARAMS && ns.conn == C_WF_REPORT_PARAMS)
+ mdev->last_reconnect_jif = jiffies;
+ if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+ ns.disk > D_NEGOTIATING)
+ mdev->last_reattach_jif = jiffies;
+
ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
if (ascw) {
ascw->os = os;
@@ -1354,12 +1457,16 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* Here we have the actions that are performed after a
state change. This function might sleep */
+ if (os.disk <= D_NEGOTIATING && ns.disk > D_NEGOTIATING)
+ mod_timer(&mdev->request_timer, jiffies + HZ);
+
nsm.i = -1;
if (ns.susp_nod) {
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
what = resend;
- if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
+ if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+ ns.disk > D_NEGOTIATING)
what = restart_frozen_disk_io;
if (what != nothing)
@@ -1408,7 +1515,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* Do not change the order of the if above and the two below... */
if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
}
/* No point in queuing send_bitmap if we don't have a connection
* anymore, so check also the _current_ state, not only the new state
@@ -1441,11 +1548,11 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
}
if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
- if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
+ if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
+ mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
drbd_uuid_new_current(mdev);
drbd_send_uuids(mdev);
}
-
/* D_DISKLESS Peer becomes secondary */
if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
/* We may still be Primary ourselves.
@@ -1473,14 +1580,14 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
drbd_send_sizes(mdev, 0, 0); /* to start sync... */
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
}
/* We want to pause/continue resync, tell peer. */
if (ns.conn >= C_CONNECTED &&
((os.aftr_isp != ns.aftr_isp) ||
(os.user_isp != ns.user_isp)))
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
/* In case one of the isp bits got set, suspend other devices. */
if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
@@ -1490,10 +1597,10 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* Make sure the peer gets informed about eventual state
changes (ISP bits) while we were in WFReportParams. */
if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
/* We are in the progress to start a full sync... */
if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
@@ -1513,33 +1620,38 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* first half of local IO error, failure to attach,
* or administrative detach */
if (os.disk != D_FAILED && ns.disk == D_FAILED) {
- enum drbd_io_error_p eh;
- int was_io_error;
+ enum drbd_io_error_p eh = EP_PASS_ON;
+ int was_io_error = 0;
/* corresponding get_ldev was in __drbd_set_state, to serialize
- * our cleanup here with the transition to D_DISKLESS,
- * so it is safe to dreference ldev here. */
- eh = mdev->ldev->dc.on_io_error;
- was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
-
- /* current state still has to be D_FAILED,
- * there is only one way out: to D_DISKLESS,
- * and that may only happen after our put_ldev below. */
- if (mdev->state.disk != D_FAILED)
- dev_err(DEV,
- "ASSERT FAILED: disk is %s during detach\n",
- drbd_disk_str(mdev->state.disk));
-
- if (drbd_send_state(mdev))
- dev_warn(DEV, "Notified peer that I am detaching my disk\n");
- else
- dev_err(DEV, "Sending state for detaching disk failed\n");
-
- drbd_rs_cancel_all(mdev);
-
- /* In case we want to get something to stable storage still,
- * this may be the last chance.
- * Following put_ldev may transition to D_DISKLESS. */
- drbd_md_sync(mdev);
+ * our cleanup here with the transition to D_DISKLESS.
+ * But is is still not save to dreference ldev here, since
+ * we might come from an failed Attach before ldev was set. */
+ if (mdev->ldev) {
+ eh = mdev->ldev->dc.on_io_error;
+ was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+
+ /* Immediately allow completion of all application IO, that waits
+ for completion from the local disk. */
+ tl_abort_disk_io(mdev);
+
+ /* current state still has to be D_FAILED,
+ * there is only one way out: to D_DISKLESS,
+ * and that may only happen after our put_ldev below. */
+ if (mdev->state.disk != D_FAILED)
+ dev_err(DEV,
+ "ASSERT FAILED: disk is %s during detach\n",
+ drbd_disk_str(mdev->state.disk));
+
+ if (ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
+ drbd_rs_cancel_all(mdev);
+
+ /* In case we want to get something to stable storage still,
+ * this may be the last chance.
+ * Following put_ldev may transition to D_DISKLESS. */
+ drbd_md_sync(mdev);
+ }
put_ldev(mdev);
if (was_io_error && eh == EP_CALL_HELPER)
@@ -1561,16 +1673,17 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
mdev->rs_failed = 0;
atomic_set(&mdev->rs_pending_cnt, 0);
- if (drbd_send_state(mdev))
- dev_warn(DEV, "Notified peer that I'm now diskless.\n");
+ if (ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
/* corresponding get_ldev in __drbd_set_state
* this may finally trigger drbd_ldev_destroy. */
put_ldev(mdev);
}
/* Notify peer that I had a local IO error, and did not detached.. */
- if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
- drbd_send_state(mdev);
+ if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
/* Disks got bigger while they were detached */
if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
@@ -1588,7 +1701,13 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* sync target done with resync. Explicitly notify peer, even though
* it should (at least for non-empty resyncs) already know itself. */
if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
+
+ /* Wake up role changes, that were delayed because of connection establishing */
+ if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS) {
+ clear_bit(STATE_SENT, &mdev->flags);
+ wake_up(&mdev->state_wait);
+ }
/* This triggers bitmap writeout of potentially still unwritten pages
* if the resync finished cleanly, or aborted because of peer disk
@@ -1598,8 +1717,8 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
* No harm done if some bits change during this phase.
*/
if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
- drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
- "write from resync_finished", BM_LOCKED_SET_ALLOWED);
+ drbd_queue_bitmap_io(mdev, &drbd_bm_write_copy_pages, NULL,
+ "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
put_ldev(mdev);
}
@@ -2057,7 +2176,11 @@ int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
- uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
+ uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ if (uuid && uuid != UUID_JUST_CREATED)
+ uuid = uuid + UUID_NEW_BM_OFFSET;
+ else
+ get_random_bytes(&uuid, sizeof(u64));
drbd_uuid_set(mdev, UI_BITMAP, uuid);
drbd_print_uuids(mdev, "updated sync UUID");
drbd_md_sync(mdev);
@@ -2089,6 +2212,10 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
}
+ /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
+ if (mdev->agreed_pro_version <= 94)
+ max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+
p.d_size = cpu_to_be64(d_size);
p.u_size = cpu_to_be64(u_size);
p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
@@ -2102,10 +2229,10 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
}
/**
- * drbd_send_state() - Sends the drbd state to the peer
+ * drbd_send_current_state() - Sends the drbd state to the peer
* @mdev: DRBD device.
*/
-int drbd_send_state(struct drbd_conf *mdev)
+int drbd_send_current_state(struct drbd_conf *mdev)
{
struct socket *sock;
struct p_state p;
@@ -2131,6 +2258,37 @@ int drbd_send_state(struct drbd_conf *mdev)
return ok;
}
+/**
+ * drbd_send_state() - After a state change, sends the new state to the peer
+ * @mdev: DRBD device.
+ * @state: the state to send, not necessarily the current state.
+ *
+ * Each state change queues an "after_state_ch" work, which will eventually
+ * send the resulting new state to the peer. If more state changes happen
+ * between queuing and processing of the after_state_ch work, we still
+ * want to send each intermediary state in the order it occurred.
+ */
+int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
+{
+ struct socket *sock;
+ struct p_state p;
+ int ok = 0;
+
+ mutex_lock(&mdev->data.mutex);
+
+ p.state = cpu_to_be32(state.i);
+ sock = mdev->data.socket;
+
+ if (likely(sock != NULL)) {
+ ok = _drbd_send_cmd(mdev, sock, P_STATE,
+ (struct p_header80 *)&p, sizeof(p), 0);
+ }
+
+ mutex_unlock(&mdev->data.mutex);
+
+ return ok;
+}
+
int drbd_send_state_req(struct drbd_conf *mdev,
union drbd_state mask, union drbd_state val)
{
@@ -2615,7 +2773,7 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
struct bio_vec *bvec;
int i;
/* hint all but last page with MSG_MORE */
- __bio_for_each_segment(bvec, bio, i, 0) {
+ bio_for_each_segment(bvec, bio, i) {
if (!_drbd_no_send_page(mdev, bvec->bv_page,
bvec->bv_offset, bvec->bv_len,
i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
@@ -2629,7 +2787,7 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
struct bio_vec *bvec;
int i;
/* hint all but last page with MSG_MORE */
- __bio_for_each_segment(bvec, bio, i, 0) {
+ bio_for_each_segment(bvec, bio, i) {
if (!_drbd_send_page(mdev, bvec->bv_page,
bvec->bv_offset, bvec->bv_len,
i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
@@ -2695,8 +2853,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
p.sector = cpu_to_be64(req->sector);
p.block_id = (unsigned long)req;
- p.seq_num = cpu_to_be32(req->seq_num =
- atomic_add_return(1, &mdev->packet_seq));
+ p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
@@ -2987,8 +3144,8 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
atomic_set(&mdev->rs_sect_in, 0);
atomic_set(&mdev->rs_sect_ev, 0);
atomic_set(&mdev->ap_in_flight, 0);
+ atomic_set(&mdev->md_io_in_use, 0);
- mutex_init(&mdev->md_io_mutex);
mutex_init(&mdev->data.mutex);
mutex_init(&mdev->meta.mutex);
sema_init(&mdev->data.work.s, 0);
@@ -3126,6 +3283,10 @@ static void drbd_destroy_mempools(void)
/* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
+ if (drbd_md_io_bio_set)
+ bioset_free(drbd_md_io_bio_set);
+ if (drbd_md_io_page_pool)
+ mempool_destroy(drbd_md_io_page_pool);
if (drbd_ee_mempool)
mempool_destroy(drbd_ee_mempool);
if (drbd_request_mempool)
@@ -3139,6 +3300,8 @@ static void drbd_destroy_mempools(void)
if (drbd_al_ext_cache)
kmem_cache_destroy(drbd_al_ext_cache);
+ drbd_md_io_bio_set = NULL;
+ drbd_md_io_page_pool = NULL;
drbd_ee_mempool = NULL;
drbd_request_mempool = NULL;
drbd_ee_cache = NULL;
@@ -3162,6 +3325,8 @@ static int drbd_create_mempools(void)
drbd_bm_ext_cache = NULL;
drbd_al_ext_cache = NULL;
drbd_pp_pool = NULL;
+ drbd_md_io_page_pool = NULL;
+ drbd_md_io_bio_set = NULL;
/* caches */
drbd_request_cache = kmem_cache_create(
@@ -3185,6 +3350,16 @@ static int drbd_create_mempools(void)
goto Enomem;
/* mempools */
+#ifdef COMPAT_HAVE_BIOSET_CREATE
+ drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
+ if (drbd_md_io_bio_set == NULL)
+ goto Enomem;
+#endif
+
+ drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
+ if (drbd_md_io_page_pool == NULL)
+ goto Enomem;
+
drbd_request_mempool = mempool_create(number,
mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
if (drbd_request_mempool == NULL)
@@ -3262,6 +3437,8 @@ static void drbd_delete_device(unsigned int minor)
if (!mdev)
return;
+ del_timer_sync(&mdev->request_timer);
+
/* paranoia asserts */
if (mdev->open_cnt != 0)
dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
@@ -3666,8 +3843,10 @@ void drbd_md_sync(struct drbd_conf *mdev)
if (!get_ldev_if_state(mdev, D_FAILED))
return;
- mutex_lock(&mdev->md_io_mutex);
- buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
+ buffer = drbd_md_get_buffer(mdev);
+ if (!buffer)
+ goto out;
+
memset(buffer, 0, 512);
buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
@@ -3698,7 +3877,8 @@ void drbd_md_sync(struct drbd_conf *mdev)
* since we updated it on metadata. */
mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
+out:
put_ldev(mdev);
}
@@ -3718,8 +3898,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
if (!get_ldev_if_state(mdev, D_ATTACHING))
return ERR_IO_MD_DISK;
- mutex_lock(&mdev->md_io_mutex);
- buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
+ buffer = drbd_md_get_buffer(mdev);
+ if (!buffer)
+ goto out;
if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
/* NOTE: can't do normal error processing here as this is
@@ -3780,7 +3961,8 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
mdev->sync_conf.al_extents = 127;
err:
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
+ out:
put_ldev(mdev);
return rv;
@@ -4183,12 +4365,11 @@ const char *drbd_buildtag(void)
static char buildtag[38] = "\0uilt-in";
if (buildtag[0] == 0) {
-#ifdef CONFIG_MODULES
- if (THIS_MODULE != NULL)
- sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
- else
+#ifdef MODULE
+ sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
+#else
+ buildtag[0] = 'b';
#endif
- buildtag[0] = 'b';
}
return buildtag;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 946166e13953..6d4de6a72e80 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -289,7 +289,7 @@ static int _try_outdate_peer_async(void *data)
*/
spin_lock_irq(&mdev->req_lock);
ns = mdev->state;
- if (ns.conn < C_WF_REPORT_PARAMS) {
+ if (ns.conn < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &mdev->flags)) {
ns.pdsk = nps;
_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
}
@@ -432,7 +432,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
/* if this was forced, we should consider sync */
if (forced)
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_current_state(mdev);
}
drbd_md_sync(mdev);
@@ -845,9 +845,10 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
Because new from 8.3.8 onwards the peer can use multiple
BIOs for a single peer_request */
if (mdev->state.conn >= C_CONNECTED) {
- if (mdev->agreed_pro_version < 94)
- peer = mdev->peer_max_bio_size;
- else if (mdev->agreed_pro_version == 94)
+ if (mdev->agreed_pro_version < 94) {
+ peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
+ } else if (mdev->agreed_pro_version == 94)
peer = DRBD_MAX_SIZE_H80_PACKET;
else /* drbd 8.3.8 onwards */
peer = DRBD_MAX_BIO_SIZE;
@@ -1032,7 +1033,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
(unsigned long long) drbd_get_max_capacity(nbc),
(unsigned long long) nbc->dc.disk_size);
- retcode = ERR_DISK_TO_SMALL;
+ retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
@@ -1046,7 +1047,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
}
if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
- retcode = ERR_MD_DISK_TO_SMALL;
+ retcode = ERR_MD_DISK_TOO_SMALL;
dev_warn(DEV, "refusing attach: md-device too small, "
"at least %llu sectors needed for this meta-disk type\n",
(unsigned long long) min_md_device_sectors);
@@ -1057,7 +1058,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
* (we may currently be R_PRIMARY with no local disk...) */
if (drbd_get_max_capacity(nbc) <
drbd_get_capacity(mdev->this_bdev)) {
- retcode = ERR_DISK_TO_SMALL;
+ retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
@@ -1138,7 +1139,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
dev_warn(DEV, "refusing to truncate a consistent device\n");
- retcode = ERR_DISK_TO_SMALL;
+ retcode = ERR_DISK_TOO_SMALL;
goto force_diskless_dec;
}
@@ -1336,17 +1337,34 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
{
enum drbd_ret_code retcode;
int ret;
+ struct detach dt = {};
+
+ if (!detach_from_tags(mdev, nlp->tag_list, &dt)) {
+ reply->ret_code = ERR_MANDATORY_TAG;
+ goto out;
+ }
+
+ if (dt.detach_force) {
+ drbd_force_state(mdev, NS(disk, D_FAILED));
+ reply->ret_code = SS_SUCCESS;
+ goto out;
+ }
+
drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
+ drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
+ drbd_md_put_buffer(mdev);
/* D_FAILED will transition to DISKLESS. */
ret = wait_event_interruptible(mdev->misc_wait,
mdev->state.disk != D_FAILED);
drbd_resume_io(mdev);
+
if ((int)retcode == (int)SS_IS_DISKLESS)
retcode = SS_NOTHING_TO_DO;
if (ret)
retcode = ERR_INTR;
reply->ret_code = retcode;
+out:
return 0;
}
@@ -1711,7 +1729,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
if (rs.no_resync && mdev->agreed_pro_version < 93) {
retcode = ERR_NEED_APV_93;
- goto fail;
+ goto fail_ldev;
}
if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
@@ -1738,6 +1756,10 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
fail:
reply->ret_code = retcode;
return 0;
+
+ fail_ldev:
+ put_ldev(mdev);
+ goto fail;
}
static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
@@ -1941,6 +1963,7 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync. */
+ drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
@@ -1959,6 +1982,7 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
}
+ drbd_resume_io(mdev);
reply->ret_code = retcode;
return 0;
@@ -1980,6 +2004,7 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync. */
+ drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
@@ -1998,6 +2023,7 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
} else
retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
}
+ drbd_resume_io(mdev);
reply->ret_code = retcode;
return 0;
@@ -2170,11 +2196,13 @@ static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
/* If there is still bitmap IO pending, e.g. previous resync or verify
* just being finished, wait for it before requesting a new resync. */
+ drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
/* w_make_ov_request expects position to be aligned */
mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
+ drbd_resume_io(mdev);
return 0;
}
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 2959cdfb77f5..869bada2ed06 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -52,7 +52,7 @@ void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
if (unlikely(v >= 1000000)) {
/* cool: > GiByte/s */
seq_printf(seq, "%ld,", v / 1000000);
- v /= 1000000;
+ v %= 1000000;
seq_printf(seq, "%03ld,%03ld", v/1000, v % 1000);
} else if (likely(v >= 1000))
seq_printf(seq, "%ld,%03ld", v/1000, v % 1000);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 436f519bed1c..ea4836e0ae98 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -466,6 +466,7 @@ static int drbd_accept(struct drbd_conf *mdev, const char **what,
goto out;
}
(*newsock)->ops = sock->ops;
+ __module_get((*newsock)->ops->owner);
out:
return err;
@@ -750,6 +751,7 @@ static int drbd_connect(struct drbd_conf *mdev)
{
struct socket *s, *sock, *msock;
int try, h, ok;
+ enum drbd_state_rv rv;
D_ASSERT(!mdev->data.socket);
@@ -888,25 +890,32 @@ retry:
}
}
- if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
- return 0;
-
sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
atomic_set(&mdev->packet_seq, 0);
mdev->peer_seq = 0;
- drbd_thread_start(&mdev->asender);
-
if (drbd_send_protocol(mdev) == -1)
return -1;
+ set_bit(STATE_SENT, &mdev->flags);
drbd_send_sync_param(mdev, &mdev->sync_conf);
drbd_send_sizes(mdev, 0, 0);
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_current_state(mdev);
clear_bit(USE_DEGR_WFC_T, &mdev->flags);
clear_bit(RESIZE_PENDING, &mdev->flags);
+
+ spin_lock_irq(&mdev->req_lock);
+ rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
+ if (mdev->state.conn != C_WF_REPORT_PARAMS)
+ clear_bit(STATE_SENT, &mdev->flags);
+ spin_unlock_irq(&mdev->req_lock);
+
+ if (rv < SS_SUCCESS)
+ return 0;
+
+ drbd_thread_start(&mdev->asender);
mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
return 1;
@@ -957,7 +966,7 @@ static void drbd_flush(struct drbd_conf *mdev)
rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
NULL);
if (rv) {
- dev_err(DEV, "local disk flush failed with status %d\n", rv);
+ dev_info(DEV, "local disk flush failed with status %d\n", rv);
/* would rather check on EOPNOTSUPP, but that is not reliable.
* don't try again for ANY return value != 0
* if (rv == -EOPNOTSUPP) */
@@ -1001,13 +1010,14 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
if (epoch_size != 0 &&
atomic_read(&epoch->active) == 0 &&
- test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
+ (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
if (!(ev & EV_CLEANUP)) {
spin_unlock(&mdev->epoch_lock);
drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
spin_lock(&mdev->epoch_lock);
}
- dec_unacked(mdev);
+ if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
+ dec_unacked(mdev);
if (mdev->current_epoch != epoch) {
next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
@@ -1096,7 +1106,11 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
/* In most cases, we will only need one bio. But in case the lower
* level restrictions happen to be different at this offset on this
* side than those of the sending peer, we may need to submit the
- * request in more than one bio. */
+ * request in more than one bio.
+ *
+ * Plain bio_alloc is good enough here, this is no DRBD internally
+ * generated bio, but a bio allocated on behalf of the peer.
+ */
next_bio:
bio = bio_alloc(GFP_NOIO, nr_pages);
if (!bio) {
@@ -1583,6 +1597,24 @@ static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int u
return ok;
}
+static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_entry *data_e)
+{
+
+ struct drbd_epoch_entry *rs_e;
+ bool rv = 0;
+
+ spin_lock_irq(&mdev->req_lock);
+ list_for_each_entry(rs_e, &mdev->sync_ee, w.list) {
+ if (overlaps(data_e->sector, data_e->size, rs_e->sector, rs_e->size)) {
+ rv = 1;
+ break;
+ }
+ }
+ spin_unlock_irq(&mdev->req_lock);
+
+ return rv;
+}
+
/* Called from receive_Data.
* Synchronize packets on sock with packets on msock.
*
@@ -1826,6 +1858,9 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
list_add(&e->w.list, &mdev->active_ee);
spin_unlock_irq(&mdev->req_lock);
+ if (mdev->state.conn == C_SYNC_TARGET)
+ wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, e));
+
switch (mdev->net_conf->wire_protocol) {
case DRBD_PROT_C:
inc_unacked(mdev);
@@ -2420,7 +2455,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
- dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
+ dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
return -1;
@@ -2806,10 +2841,10 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
if (apv >= 88) {
if (apv == 88) {
- if (data_size > SHARED_SECRET_MAX) {
- dev_err(DEV, "verify-alg too long, "
- "peer wants %u, accepting only %u byte\n",
- data_size, SHARED_SECRET_MAX);
+ if (data_size > SHARED_SECRET_MAX || data_size == 0) {
+ dev_err(DEV, "verify-alg of wrong size, "
+ "peer wants %u, accepting only up to %u byte\n",
+ data_size, SHARED_SECRET_MAX);
return false;
}
@@ -3168,9 +3203,20 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
os = ns = mdev->state;
spin_unlock_irq(&mdev->req_lock);
- /* peer says his disk is uptodate, while we think it is inconsistent,
- * and this happens while we think we have a sync going on. */
- if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
+ /* If some other part of the code (asender thread, timeout)
+ * already decided to close the connection again,
+ * we must not "re-establish" it here. */
+ if (os.conn <= C_TEAR_DOWN)
+ return false;
+
+ /* If this is the "end of sync" confirmation, usually the peer disk
+ * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
+ * set) resync started in PausedSyncT, or if the timing of pause-/
+ * unpause-sync events has been "just right", the peer disk may
+ * transition from D_CONSISTENT to D_UP_TO_DATE as well.
+ */
+ if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
+ real_peer_disk == D_UP_TO_DATE &&
os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
/* If we are (becoming) SyncSource, but peer is still in sync
* preparation, ignore its uptodate-ness to avoid flapping, it
@@ -3288,7 +3334,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
/* Nowadays only used when forcing a node into primary role and
setting its disk to UpToDate with that */
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_current_state(mdev);
}
}
@@ -3776,6 +3822,13 @@ static void drbd_disconnect(struct drbd_conf *mdev)
if (mdev->state.conn == C_STANDALONE)
return;
+ /* We are about to start the cleanup after connection loss.
+ * Make sure drbd_make_request knows about that.
+ * Usually we should be in some network failure state already,
+ * but just in case we are not, we fix it up here.
+ */
+ drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
+
/* asender does not clean up anything. it must not interfere, either */
drbd_thread_stop(&mdev->asender);
drbd_free_sock(mdev);
@@ -3803,8 +3856,6 @@ static void drbd_disconnect(struct drbd_conf *mdev)
atomic_set(&mdev->rs_pending_cnt, 0);
wake_up(&mdev->misc_wait);
- del_timer(&mdev->request_timer);
-
/* make sure syncer is stopped and w_resume_next_sg queued */
del_timer_sync(&mdev->resync_timer);
resync_timer_fn((unsigned long)mdev);
@@ -4433,7 +4484,7 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
if (mdev->state.conn == C_AHEAD &&
atomic_read(&mdev->ap_in_flight) == 0 &&
- !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
+ !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
mdev->start_resync_timer.expires = jiffies + HZ;
add_timer(&mdev->start_resync_timer);
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 4a0f314086e5..9c5c84946b05 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -37,6 +37,7 @@ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req
const int rw = bio_data_dir(bio);
int cpu;
cpu = part_stat_lock();
+ part_round_stats(cpu, &mdev->vdisk->part0);
part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
part_inc_in_flight(&mdev->vdisk->part0, rw);
@@ -214,8 +215,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
{
const unsigned long s = req->rq_state;
struct drbd_conf *mdev = req->mdev;
- /* only WRITES may end up here without a master bio (on barrier ack) */
- int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE;
+ int rw = req->rq_state & RQ_WRITE ? WRITE : READ;
/* we must not complete the master bio, while it is
* still being processed by _drbd_send_zc_bio (drbd_send_dblock)
@@ -230,7 +230,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
return;
if (s & RQ_NET_PENDING)
return;
- if (s & RQ_LOCAL_PENDING)
+ if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED))
return;
if (req->master_bio) {
@@ -277,6 +277,9 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
req->master_bio = NULL;
}
+ if (s & RQ_LOCAL_PENDING)
+ return;
+
if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
/* this is disconnected (local only) operation,
* or protocol C P_WRITE_ACK,
@@ -429,7 +432,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;
case completed_ok:
- if (bio_data_dir(req->master_bio) == WRITE)
+ if (req->rq_state & RQ_WRITE)
mdev->writ_cnt += req->size>>9;
else
mdev->read_cnt += req->size>>9;
@@ -438,7 +441,14 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state &= ~RQ_LOCAL_PENDING;
_req_may_be_done_not_susp(req, m);
- put_ldev(mdev);
+ break;
+
+ case abort_disk_io:
+ req->rq_state |= RQ_LOCAL_ABORTED;
+ if (req->rq_state & RQ_WRITE)
+ _req_may_be_done_not_susp(req, m);
+ else
+ goto goto_queue_for_net_read;
break;
case write_completed_with_error:
@@ -447,7 +457,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
__drbd_chk_io_error(mdev, false);
_req_may_be_done_not_susp(req, m);
- put_ldev(mdev);
break;
case read_ahead_completed_with_error:
@@ -455,7 +464,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
_req_may_be_done_not_susp(req, m);
- put_ldev(mdev);
break;
case read_completed_with_error:
@@ -467,7 +475,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
__drbd_chk_io_error(mdev, false);
- put_ldev(mdev);
+
+ goto_queue_for_net_read:
/* no point in retrying if there is no good remote data,
* or we have no connection. */
@@ -556,10 +565,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
drbd_queue_work(&mdev->data.work, &req->w);
break;
- case oos_handed_to_network:
- /* actually the same */
+ case read_retry_remote_canceled:
case send_canceled:
- /* treat it the same */
case send_failed:
/* real cleanup will be done from tl_clear. just update flags
* so it is no longer marked as on the worker queue */
@@ -589,17 +596,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
}
req->rq_state &= ~RQ_NET_QUEUED;
req->rq_state |= RQ_NET_SENT;
- /* because _drbd_send_zc_bio could sleep, and may want to
- * dereference the bio even after the "write_acked_by_peer" and
- * "completed_ok" events came in, once we return from
- * _drbd_send_zc_bio (drbd_send_dblock), we have to check
- * whether it is done already, and end it. */
_req_may_be_done_not_susp(req, m);
break;
- case read_retry_remote_canceled:
+ case oos_handed_to_network:
+ /* Was not set PENDING, no longer QUEUED, so is now DONE
+ * as far as this connection is concerned. */
req->rq_state &= ~RQ_NET_QUEUED;
- /* fall through, in case we raced with drbd_disconnect */
+ req->rq_state |= RQ_NET_DONE;
+ _req_may_be_done_not_susp(req, m);
+ break;
+
case connection_lost_while_pending:
/* transfer log cleanup after connection loss */
/* assert something? */
@@ -616,8 +623,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
_req_may_be_done(req, m); /* Allowed while state.susp */
break;
- case write_acked_by_peer_and_sis:
- req->rq_state |= RQ_NET_SIS;
case conflict_discarded_by_peer:
/* for discarded conflicting writes of multiple primaries,
* there is no need to keep anything in the tl, potential
@@ -628,18 +633,15 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
(unsigned long long)req->sector, req->size);
req->rq_state |= RQ_NET_DONE;
/* fall through */
+ case write_acked_by_peer_and_sis:
case write_acked_by_peer:
+ if (what == write_acked_by_peer_and_sis)
+ req->rq_state |= RQ_NET_SIS;
/* protocol C; successfully written on peer.
- * Nothing to do here.
+ * Nothing more to do here.
* We want to keep the tl in place for all protocols, to cater
- * for volatile write-back caches on lower level devices.
- *
- * A barrier request is expected to have forced all prior
- * requests onto stable storage, so completion of a barrier
- * request could set NET_DONE right here, and not wait for the
- * P_BARRIER_ACK, but that is an unnecessary optimization. */
+ * for volatile write-back caches on lower level devices. */
- /* this makes it effectively the same as for: */
case recv_acked_by_peer:
/* protocol B; pretends to be successfully written on peer.
* see also notes above in handed_over_to_network about
@@ -773,6 +775,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
int local, remote, send_oos = 0;
int err = -EIO;
int ret = 0;
+ union drbd_state s;
/* allocate outside of all locks; */
req = drbd_req_new(mdev, bio);
@@ -834,8 +837,9 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
drbd_al_begin_io(mdev, sector);
}
- remote = remote && drbd_should_do_remote(mdev->state);
- send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
+ s = mdev->state;
+ remote = remote && drbd_should_do_remote(s);
+ send_oos = rw == WRITE && drbd_should_send_oos(s);
D_ASSERT(!(remote && send_oos));
if (!(local || remote) && !is_susp(mdev->state)) {
@@ -867,7 +871,7 @@ allocate_barrier:
if (is_susp(mdev->state)) {
/* If we got suspended, use the retry mechanism of
- generic_make_request() to restart processing of this
+ drbd_make_request() to restart processing of this
bio. In the next call to drbd_make_request
we sleep in inc_ap_bio() */
ret = 1;
@@ -1091,7 +1095,6 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
*/
D_ASSERT(bio->bi_size > 0);
D_ASSERT((bio->bi_size & 0x1ff) == 0);
- D_ASSERT(bio->bi_idx == 0);
/* to make some things easier, force alignment of requests within the
* granularity of our hash tables */
@@ -1099,8 +1102,9 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT;
if (likely(s_enr == e_enr)) {
- inc_ap_bio(mdev, 1);
- drbd_make_request_common(mdev, bio, start_time);
+ do {
+ inc_ap_bio(mdev, 1);
+ } while (drbd_make_request_common(mdev, bio, start_time));
return;
}
@@ -1196,36 +1200,66 @@ void request_timer_fn(unsigned long data)
struct drbd_conf *mdev = (struct drbd_conf *) data;
struct drbd_request *req; /* oldest request */
struct list_head *le;
- unsigned long et = 0; /* effective timeout = ko_count * timeout */
+ unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
+ unsigned long now;
if (get_net_conf(mdev)) {
- et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count;
+ if (mdev->state.conn >= C_WF_REPORT_PARAMS)
+ ent = mdev->net_conf->timeout*HZ/10
+ * mdev->net_conf->ko_count;
put_net_conf(mdev);
}
- if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
+ if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */
+ dt = mdev->ldev->dc.disk_timeout * HZ / 10;
+ put_ldev(mdev);
+ }
+ et = min_not_zero(dt, ent);
+
+ if (!et)
return; /* Recurring timer stopped */
+ now = jiffies;
+
spin_lock_irq(&mdev->req_lock);
le = &mdev->oldest_tle->requests;
if (list_empty(le)) {
spin_unlock_irq(&mdev->req_lock);
- mod_timer(&mdev->request_timer, jiffies + et);
+ mod_timer(&mdev->request_timer, now + et);
return;
}
le = le->prev;
req = list_entry(le, struct drbd_request, tl_requests);
- if (time_is_before_eq_jiffies(req->start_time + et)) {
- if (req->rq_state & RQ_NET_PENDING) {
- dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
- _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE, NULL);
- } else {
- dev_warn(DEV, "Local backing block device frozen?\n");
- mod_timer(&mdev->request_timer, jiffies + et);
- }
- } else {
- mod_timer(&mdev->request_timer, req->start_time + et);
- }
+ /* The request is considered timed out, if
+ * - we have some effective timeout from the configuration,
+ * with above state restrictions applied,
+ * - the oldest request is waiting for a response from the network
+ * resp. the local disk,
+ * - the oldest request is in fact older than the effective timeout,
+ * - the connection was established (resp. disk was attached)
+ * for longer than the timeout already.
+ * Note that for 32bit jiffies and very stable connections/disks,
+ * we may have a wrap around, which is catched by
+ * !time_in_range(now, last_..._jif, last_..._jif + timeout).
+ *
+ * Side effect: once per 32bit wrap-around interval, which means every
+ * ~198 days with 250 HZ, we have a window where the timeout would need
+ * to expire twice (worst case) to become effective. Good enough.
+ */
+ if (ent && req->rq_state & RQ_NET_PENDING &&
+ time_after(now, req->start_time + ent) &&
+ !time_in_range(now, mdev->last_reconnect_jif, mdev->last_reconnect_jif + ent)) {
+ dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
+ _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
+ }
+ if (dt && req->rq_state & RQ_LOCAL_PENDING &&
+ time_after(now, req->start_time + dt) &&
+ !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
+ dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
+ __drbd_chk_io_error(mdev, 1);
+ }
+ nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
spin_unlock_irq(&mdev->req_lock);
+ mod_timer(&mdev->request_timer, nt);
}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 68a234a5fdc5..3d2111919486 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -105,6 +105,7 @@ enum drbd_req_event {
read_completed_with_error,
read_ahead_completed_with_error,
write_completed_with_error,
+ abort_disk_io,
completed_ok,
resend,
fail_frozen_disk_io,
@@ -118,18 +119,21 @@ enum drbd_req_event {
* same time, so we should hold the request lock anyways.
*/
enum drbd_req_state_bits {
- /* 210
- * 000: no local possible
- * 001: to be submitted
+ /* 3210
+ * 0000: no local possible
+ * 0001: to be submitted
* UNUSED, we could map: 011: submitted, completion still pending
- * 110: completed ok
- * 010: completed with error
+ * 0110: completed ok
+ * 0010: completed with error
+ * 1001: Aborted (before completion)
+ * 1x10: Aborted and completed -> free
*/
__RQ_LOCAL_PENDING,
__RQ_LOCAL_COMPLETED,
__RQ_LOCAL_OK,
+ __RQ_LOCAL_ABORTED,
- /* 76543
+ /* 87654
* 00000: no network possible
* 00001: to be send
* 00011: to be send, on worker queue
@@ -199,8 +203,9 @@ enum drbd_req_state_bits {
#define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING)
#define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
#define RQ_LOCAL_OK (1UL << __RQ_LOCAL_OK)
+#define RQ_LOCAL_ABORTED (1UL << __RQ_LOCAL_ABORTED)
-#define RQ_LOCAL_MASK ((RQ_LOCAL_OK << 1)-1) /* 0x07 */
+#define RQ_LOCAL_MASK ((RQ_LOCAL_ABORTED << 1)-1)
#define RQ_NET_PENDING (1UL << __RQ_NET_PENDING)
#define RQ_NET_QUEUED (1UL << __RQ_NET_QUEUED)
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 4d3e6f6213ba..620c70ff2231 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -70,11 +70,29 @@ rwlock_t global_state_lock;
void drbd_md_io_complete(struct bio *bio, int error)
{
struct drbd_md_io *md_io;
+ struct drbd_conf *mdev;
md_io = (struct drbd_md_io *)bio->bi_private;
+ mdev = container_of(md_io, struct drbd_conf, md_io);
+
md_io->error = error;
- complete(&md_io->event);
+ /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
+ * to timeout on the lower level device, and eventually detach from it.
+ * If this io completion runs after that timeout expired, this
+ * drbd_md_put_buffer() may allow us to finally try and re-attach.
+ * During normal operation, this only puts that extra reference
+ * down to 1 again.
+ * Make sure we first drop the reference, and only then signal
+ * completion, or we may (in drbd_al_read_log()) cycle so fast into the
+ * next drbd_md_sync_page_io(), that we trigger the
+ * ASSERT(atomic_read(&mdev->md_io_in_use) == 1) there.
+ */
+ drbd_md_put_buffer(mdev);
+ md_io->done = 1;
+ wake_up(&mdev->misc_wait);
+ bio_put(bio);
+ put_ldev(mdev);
}
/* reads on behalf of the partner,
@@ -226,6 +244,7 @@ void drbd_endio_pri(struct bio *bio, int error)
spin_lock_irqsave(&mdev->req_lock, flags);
__req_mod(req, what, &m);
spin_unlock_irqrestore(&mdev->req_lock, flags);
+ put_ldev(mdev);
if (m.bio)
complete_master_bio(mdev, &m);
@@ -290,7 +309,7 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
sg_init_table(&sg, 1);
crypto_hash_init(&desc);
- __bio_for_each_segment(bvec, bio, i, 0) {
+ bio_for_each_segment(bvec, bio, i) {
sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
crypto_hash_update(&desc, &sg, sg.length);
}
@@ -728,7 +747,7 @@ int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
}
drbd_start_resync(mdev, C_SYNC_SOURCE);
- clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags);
+ clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
return 1;
}
@@ -1519,14 +1538,14 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
}
drbd_state_lock(mdev);
-
+ write_lock_irq(&global_state_lock);
if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
+ write_unlock_irq(&global_state_lock);
drbd_state_unlock(mdev);
return;
}
- write_lock_irq(&global_state_lock);
- ns = mdev->state;
+ ns.i = mdev->state.i;
ns.aftr_isp = !_drbd_may_sync_now(mdev);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b0b00d70c166..cce7df367b79 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -551,7 +551,7 @@ static void floppy_ready(void);
static void floppy_start(void);
static void process_fd_request(void);
static void recalibrate_floppy(void);
-static void floppy_shutdown(unsigned long);
+static void floppy_shutdown(struct work_struct *);
static int floppy_request_regions(int);
static void floppy_release_regions(int);
@@ -588,6 +588,8 @@ static int buffer_max = -1;
static struct floppy_fdc_state fdc_state[N_FDC];
static int fdc; /* current fdc */
+static struct workqueue_struct *floppy_wq;
+
static struct floppy_struct *_floppy = floppy_type;
static unsigned char current_drive;
static long current_count_sectors;
@@ -629,16 +631,15 @@ static inline void set_debugt(void) { }
static inline void debugt(const char *func, const char *msg) { }
#endif /* DEBUGT */
-typedef void (*timeout_fn)(unsigned long);
-static DEFINE_TIMER(fd_timeout, floppy_shutdown, 0, 0);
+static DECLARE_DELAYED_WORK(fd_timeout, floppy_shutdown);
static const char *timeout_message;
static void is_alive(const char *func, const char *message)
{
/* this routine checks whether the floppy driver is "alive" */
if (test_bit(0, &fdc_busy) && command_status < 2 &&
- !timer_pending(&fd_timeout)) {
+ !delayed_work_pending(&fd_timeout)) {
DPRINT("%s: timeout handler died. %s\n", func, message);
}
}
@@ -666,15 +667,18 @@ static int output_log_pos;
static void __reschedule_timeout(int drive, const char *message)
{
+ unsigned long delay;
+
if (drive == current_reqD)
drive = current_drive;
- del_timer(&fd_timeout);
+
if (drive < 0 || drive >= N_DRIVE) {
- fd_timeout.expires = jiffies + 20UL * HZ;
+ delay = 20UL * HZ;
drive = 0;
} else
- fd_timeout.expires = jiffies + UDP->timeout;
- add_timer(&fd_timeout);
+ delay = UDP->timeout;
+
+ queue_delayed_work(floppy_wq, &fd_timeout, delay);
if (UDP->flags & FD_DEBUG)
DPRINT("reschedule timeout %s\n", message);
timeout_message = message;
@@ -872,7 +876,7 @@ static int lock_fdc(int drive, bool interruptible)
command_status = FD_COMMAND_NONE;
- __reschedule_timeout(drive, "lock fdc");
+ reschedule_timeout(drive, "lock fdc");
set_fdc(drive);
return 0;
}
@@ -880,23 +884,15 @@ static int lock_fdc(int drive, bool interruptible)
/* unlocks the driver */
static void unlock_fdc(void)
{
- unsigned long flags;
-
- raw_cmd = NULL;
if (!test_bit(0, &fdc_busy))
DPRINT("FDC access conflict!\n");
- if (do_floppy)
- DPRINT("device interrupt still active at FDC release: %pf!\n",
- do_floppy);
+ raw_cmd = NULL;
command_status = FD_COMMAND_NONE;
- spin_lock_irqsave(&floppy_lock, flags);
- del_timer(&fd_timeout);
+ __cancel_delayed_work(&fd_timeout);
+ do_floppy = NULL;
cont = NULL;
clear_bit(0, &fdc_busy);
- if (current_req || set_next_request())
- do_fd_request(current_req->q);
- spin_unlock_irqrestore(&floppy_lock, flags);
wake_up(&fdc_wait);
}
@@ -968,26 +964,24 @@ static DECLARE_WORK(floppy_work, NULL);
static void schedule_bh(void (*handler)(void))
{
+ WARN_ON(work_pending(&floppy_work));
+
PREPARE_WORK(&floppy_work, (work_func_t)handler);
- schedule_work(&floppy_work);
+ queue_work(floppy_wq, &floppy_work);
}
-static DEFINE_TIMER(fd_timer, NULL, 0, 0);
+static DECLARE_DELAYED_WORK(fd_timer, NULL);
static void cancel_activity(void)
{
- unsigned long flags;
-
- spin_lock_irqsave(&floppy_lock, flags);
do_floppy = NULL;
- PREPARE_WORK(&floppy_work, (work_func_t)empty);
- del_timer(&fd_timer);
- spin_unlock_irqrestore(&floppy_lock, flags);
+ cancel_delayed_work_sync(&fd_timer);
+ cancel_work_sync(&floppy_work);
}
/* this function makes sure that the disk stays in the drive during the
* transfer */
-static void fd_watchdog(void)
+static void fd_watchdog(struct work_struct *arg)
{
debug_dcl(DP->flags, "calling disk change from watchdog\n");
@@ -997,21 +991,20 @@ static void fd_watchdog(void)
cont->done(0);
reset_fdc();
} else {
- del_timer(&fd_timer);
- fd_timer.function = (timeout_fn)fd_watchdog;
- fd_timer.expires = jiffies + HZ / 10;
- add_timer(&fd_timer);
+ cancel_delayed_work(&fd_timer);
+ PREPARE_DELAYED_WORK(&fd_timer, fd_watchdog);
+ queue_delayed_work(floppy_wq, &fd_timer, HZ / 10);
}
}
static void main_command_interrupt(void)
{
- del_timer(&fd_timer);
+ cancel_delayed_work(&fd_timer);
cont->interrupt();
}
/* waits for a delay (spinup or select) to pass */
-static int fd_wait_for_completion(unsigned long delay, timeout_fn function)
+static int fd_wait_for_completion(unsigned long expires, work_func_t function)
{
if (FDCS->reset) {
reset_fdc(); /* do the reset during sleep to win time
@@ -1020,11 +1013,10 @@ static int fd_wait_for_completion(unsigned long delay, timeout_fn function)
return 1;
}
- if (time_before(jiffies, delay)) {
- del_timer(&fd_timer);
- fd_timer.function = function;
- fd_timer.expires = delay;
- add_timer(&fd_timer);
+ if (time_before(jiffies, expires)) {
+ cancel_delayed_work(&fd_timer);
+ PREPARE_DELAYED_WORK(&fd_timer, function);
+ queue_delayed_work(floppy_wq, &fd_timer, expires - jiffies);
return 1;
}
return 0;
@@ -1342,7 +1334,7 @@ static int fdc_dtr(void)
*/
FDCS->dtr = raw_cmd->rate & 3;
return fd_wait_for_completion(jiffies + 2UL * HZ / 100,
- (timeout_fn)floppy_ready);
+ (work_func_t)floppy_ready);
} /* fdc_dtr */
static void tell_sector(void)
@@ -1447,7 +1439,7 @@ static void setup_rw_floppy(void)
int flags;
int dflags;
unsigned long ready_date;
- timeout_fn function;
+ work_func_t function;
flags = raw_cmd->flags;
if (flags & (FD_RAW_READ | FD_RAW_WRITE))
@@ -1461,9 +1453,9 @@ static void setup_rw_floppy(void)
*/
if (time_after(ready_date, jiffies + DP->select_delay)) {
ready_date -= DP->select_delay;
- function = (timeout_fn)floppy_start;
+ function = (work_func_t)floppy_start;
} else
- function = (timeout_fn)setup_rw_floppy;
+ function = (work_func_t)setup_rw_floppy;
/* wait until the floppy is spinning fast enough */
if (fd_wait_for_completion(ready_date, function))
@@ -1493,7 +1485,7 @@ static void setup_rw_floppy(void)
inr = result();
cont->interrupt();
} else if (flags & FD_RAW_NEED_DISK)
- fd_watchdog();
+ fd_watchdog(NULL);
}
static int blind_seek;
@@ -1802,20 +1794,22 @@ static void show_floppy(void)
pr_info("do_floppy=%pf\n", do_floppy);
if (work_pending(&floppy_work))
pr_info("floppy_work.func=%pf\n", floppy_work.func);
- if (timer_pending(&fd_timer))
- pr_info("fd_timer.function=%pf\n", fd_timer.function);
- if (timer_pending(&fd_timeout)) {
- pr_info("timer_function=%pf\n", fd_timeout.function);
- pr_info("expires=%lu\n", fd_timeout.expires - jiffies);
- pr_info("now=%lu\n", jiffies);
- }
+ if (delayed_work_pending(&fd_timer))
+ pr_info("delayed work.function=%p expires=%ld\n",
+ fd_timer.work.func,
+ fd_timer.timer.expires - jiffies);
+ if (delayed_work_pending(&fd_timeout))
+ pr_info("timer_function=%p expires=%ld\n",
+ fd_timeout.work.func,
+ fd_timeout.timer.expires - jiffies);
+
pr_info("cont=%p\n", cont);
pr_info("current_req=%p\n", current_req);
pr_info("command_status=%d\n", command_status);
pr_info("\n");
}
-static void floppy_shutdown(unsigned long data)
+static void floppy_shutdown(struct work_struct *arg)
{
unsigned long flags;
@@ -1868,7 +1862,7 @@ static int start_motor(void (*function)(void))
/* wait_for_completion also schedules reset if needed. */
return fd_wait_for_completion(DRS->select_date + DP->select_delay,
- (timeout_fn)function);
+ (work_func_t)function);
}
static void floppy_ready(void)
@@ -2821,7 +2815,6 @@ do_request:
spin_lock_irq(&floppy_lock);
pending = set_next_request();
spin_unlock_irq(&floppy_lock);
-
if (!pending) {
do_floppy = NULL;
unlock_fdc();
@@ -2898,13 +2891,15 @@ static void do_fd_request(struct request_queue *q)
current_req->cmd_flags))
return;
- if (test_bit(0, &fdc_busy)) {
+ if (test_and_set_bit(0, &fdc_busy)) {
/* fdc busy, this new request will be treated when the
current one is done */
is_alive(__func__, "old request running");
return;
}
- lock_fdc(MAXTIMEOUT, false);
+ command_status = FD_COMMAND_NONE;
+ __reschedule_timeout(MAXTIMEOUT, "fd_request");
+ set_fdc(0);
process_fd_request();
is_alive(__func__, "");
}
@@ -3612,9 +3607,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
mutex_lock(&floppy_mutex);
mutex_lock(&open_lock);
- if (UDRS->fd_ref < 0)
- UDRS->fd_ref = 0;
- else if (!UDRS->fd_ref--) {
+ if (!UDRS->fd_ref--) {
DPRINT("floppy_release with fd_ref == 0");
UDRS->fd_ref = 0;
}
@@ -3650,13 +3643,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
set_bit(FD_VERIFY_BIT, &UDRS->flags);
}
- if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (mode & FMODE_EXCL)))
- goto out2;
-
- if (mode & FMODE_EXCL)
- UDRS->fd_ref = -1;
- else
- UDRS->fd_ref++;
+ UDRS->fd_ref++;
opened_bdev[drive] = bdev;
@@ -3719,10 +3706,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
mutex_unlock(&floppy_mutex);
return 0;
out:
- if (UDRS->fd_ref < 0)
- UDRS->fd_ref = 0;
- else
- UDRS->fd_ref--;
+ UDRS->fd_ref--;
+
if (!UDRS->fd_ref)
opened_bdev[drive] = NULL;
out2:
@@ -4159,10 +4144,16 @@ static int __init floppy_init(void)
goto out_put_disk;
}
+ floppy_wq = alloc_ordered_workqueue("floppy", 0);
+ if (!floppy_wq) {
+ err = -ENOMEM;
+ goto out_put_disk;
+ }
+
disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock);
if (!disks[dr]->queue) {
err = -ENOMEM;
- goto out_put_disk;
+ goto out_destroy_workq;
}
blk_queue_max_hw_sectors(disks[dr]->queue, 64);
@@ -4213,7 +4204,7 @@ static int __init floppy_init(void)
use_virtual_dma = can_use_virtual_dma & 1;
fdc_state[0].address = FDC1;
if (fdc_state[0].address == -1) {
- del_timer_sync(&fd_timeout);
+ cancel_delayed_work(&fd_timeout);
err = -ENODEV;
goto out_unreg_region;
}
@@ -4224,7 +4215,7 @@ static int __init floppy_init(void)
fdc = 0; /* reset fdc in case of unexpected interrupt */
err = floppy_grab_irq_and_dma();
if (err) {
- del_timer_sync(&fd_timeout);
+ cancel_delayed_work(&fd_timeout);
err = -EBUSY;
goto out_unreg_region;
}
@@ -4281,13 +4272,13 @@ static int __init floppy_init(void)
user_reset_fdc(-1, FD_RESET_ALWAYS, false);
}
fdc = 0;
- del_timer_sync(&fd_timeout);
+ cancel_delayed_work(&fd_timeout);
current_drive = 0;
initialized = true;
if (have_no_fdc) {
DPRINT("no floppy controllers found\n");
err = have_no_fdc;
- goto out_flush_work;
+ goto out_release_dma;
}
for (drive = 0; drive < N_DRIVE; drive++) {
@@ -4302,7 +4293,7 @@ static int __init floppy_init(void)
err = platform_device_register(&floppy_device[drive]);
if (err)
- goto out_flush_work;
+ goto out_release_dma;
err = device_create_file(&floppy_device[drive].dev,
&dev_attr_cmos);
@@ -4320,13 +4311,14 @@ static int __init floppy_init(void)
out_unreg_platform_dev:
platform_device_unregister(&floppy_device[drive]);
-out_flush_work:
- flush_work_sync(&floppy_work);
+out_release_dma:
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
out_unreg_region:
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
platform_driver_unregister(&floppy_driver);
+out_destroy_workq:
+ destroy_workqueue(floppy_wq);
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
@@ -4397,7 +4389,7 @@ static int floppy_grab_irq_and_dma(void)
* We might have scheduled a free_irq(), wait it to
* drain first:
*/
- flush_work_sync(&floppy_work);
+ flush_workqueue(floppy_wq);
if (fd_request_irq()) {
DPRINT("Unable to grab IRQ%d for the floppy driver\n",
@@ -4488,9 +4480,9 @@ static void floppy_release_irq_and_dma(void)
pr_info("motor off timer %d still active\n", drive);
#endif
- if (timer_pending(&fd_timeout))
+ if (delayed_work_pending(&fd_timeout))
pr_info("floppy timer still active:%s\n", timeout_message);
- if (timer_pending(&fd_timer))
+ if (delayed_work_pending(&fd_timer))
pr_info("auxiliary floppy timer still active\n");
if (work_pending(&floppy_work))
pr_info("work still pending\n");
@@ -4560,8 +4552,9 @@ static void __exit floppy_module_exit(void)
put_disk(disks[drive]);
}
- del_timer_sync(&fd_timeout);
- del_timer_sync(&fd_timer);
+ cancel_delayed_work_sync(&fd_timeout);
+ cancel_delayed_work_sync(&fd_timer);
+ destroy_workqueue(floppy_wq);
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 304000c3d433..264bc77dcb91 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -294,18 +294,16 @@ static int hba_reset_nosleep(struct driver_data *dd)
*/
static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
{
- unsigned long flags = 0;
-
atomic_set(&port->commands[tag].active, 1);
- spin_lock_irqsave(&port->cmd_issue_lock, flags);
+ spin_lock(&port->cmd_issue_lock);
writel((1 << MTIP_TAG_BIT(tag)),
port->s_active[MTIP_TAG_INDEX(tag)]);
writel((1 << MTIP_TAG_BIT(tag)),
port->cmd_issue[MTIP_TAG_INDEX(tag)]);
- spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
+ spin_unlock(&port->cmd_issue_lock);
/* Set the command's timeout value.*/
port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
@@ -436,8 +434,7 @@ static void mtip_init_port(struct mtip_port *port)
writel(0xFFFFFFFF, port->completed[i]);
/* Clear any pending interrupts for this port */
- writel(readl(port->dd->mmio + PORT_IRQ_STAT),
- port->dd->mmio + PORT_IRQ_STAT);
+ writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
/* Clear any pending interrupts on the HBA. */
writel(readl(port->dd->mmio + HOST_IRQ_STAT),
@@ -782,13 +779,24 @@ static void mtip_handle_tfe(struct driver_data *dd)
/* Stop the timer to prevent command timeouts. */
del_timer(&port->cmd_timer);
+ set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
+
+ if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
+ test_bit(MTIP_TAG_INTERNAL, port->allocated)) {
+ cmd = &port->commands[MTIP_TAG_INTERNAL];
+ dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
+
+ atomic_inc(&cmd->active); /* active > 1 indicates error */
+ if (cmd->comp_data && cmd->comp_func) {
+ cmd->comp_func(port, MTIP_TAG_INTERNAL,
+ cmd->comp_data, PORT_IRQ_TF_ERR);
+ }
+ goto handle_tfe_exit;
+ }
/* clear the tag accumulator */
memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
- /* Set eh_active */
- set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
-
/* Loop through all the groups */
for (group = 0; group < dd->slot_groups; group++) {
completed = readl(port->completed[group]);
@@ -940,6 +948,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
}
print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
+handle_tfe_exit:
/* clear eh_active */
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
wake_up_interruptible(&port->svc_wait);
@@ -961,6 +970,8 @@ static inline void mtip_process_sdbf(struct driver_data *dd)
/* walk all bits in all slot groups */
for (group = 0; group < dd->slot_groups; group++) {
completed = readl(port->completed[group]);
+ if (!completed)
+ continue;
/* clear completed status register in the hardware.*/
writel(completed, port->completed[group]);
@@ -1329,22 +1340,6 @@ static int mtip_exec_internal_command(struct mtip_port *port,
}
rv = -EAGAIN;
}
-
- if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
- & (1 << MTIP_TAG_INTERNAL)) {
- dev_warn(&port->dd->pdev->dev,
- "Retiring internal command but CI is 1.\n");
- if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &port->dd->dd_flag)) {
- hba_reset_nosleep(port->dd);
- rv = -ENXIO;
- } else {
- mtip_restart_port(port);
- rv = -EAGAIN;
- }
- goto exec_ic_exit;
- }
-
} else {
/* Spin for <timeout> checking if command still outstanding */
timeout = jiffies + msecs_to_jiffies(timeout);
@@ -1361,21 +1356,25 @@ static int mtip_exec_internal_command(struct mtip_port *port,
rv = -ENXIO;
goto exec_ic_exit;
}
+ if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) {
+ atomic_inc(&int_cmd->active); /* error */
+ break;
+ }
}
+ }
- if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+ if (atomic_read(&int_cmd->active) > 1) {
+ dev_err(&port->dd->pdev->dev,
+ "Internal command [%02X] failed\n", fis->command);
+ rv = -EIO;
+ }
+ if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
& (1 << MTIP_TAG_INTERNAL)) {
- dev_err(&port->dd->pdev->dev,
- "Internal command did not complete [atomic]\n");
+ rv = -ENXIO;
+ if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &port->dd->dd_flag)) {
+ mtip_restart_port(port);
rv = -EAGAIN;
- if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &port->dd->dd_flag)) {
- hba_reset_nosleep(port->dd);
- rv = -ENXIO;
- } else {
- mtip_restart_port(port);
- rv = -EAGAIN;
- }
}
}
exec_ic_exit:
@@ -1893,13 +1892,33 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
void __user *user_buffer)
{
struct host_to_dev_fis fis;
- struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
+ struct host_to_dev_fis *reply;
+ u8 *buf = NULL;
+ dma_addr_t dma_addr = 0;
+ int rv = 0, xfer_sz = command[3];
+
+ if (xfer_sz) {
+ if (user_buffer)
+ return -EFAULT;
+
+ buf = dmam_alloc_coherent(&port->dd->pdev->dev,
+ ATA_SECT_SIZE * xfer_sz,
+ &dma_addr,
+ GFP_KERNEL);
+ if (!buf) {
+ dev_err(&port->dd->pdev->dev,
+ "Memory allocation failed (%d bytes)\n",
+ ATA_SECT_SIZE * xfer_sz);
+ return -ENOMEM;
+ }
+ memset(buf, 0, ATA_SECT_SIZE * xfer_sz);
+ }
/* Build the FIS. */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
- fis.type = 0x27;
- fis.opts = 1 << 7;
- fis.command = command[0];
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = command[0];
fis.features = command[2];
fis.sect_count = command[3];
if (fis.command == ATA_CMD_SMART) {
@@ -1908,6 +1927,11 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
fis.cyl_hi = 0xC2;
}
+ if (xfer_sz)
+ reply = (port->rxfis + RX_FIS_PIO_SETUP);
+ else
+ reply = (port->rxfis + RX_FIS_D2H_REG);
+
dbg_printk(MTIP_DRV_NAME
" %s: User Command: cmd %x, sect %x, "
"feat %x, sectcnt %x\n",
@@ -1917,43 +1941,46 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
command[2],
command[3]);
- memset(port->sector_buffer, 0x00, ATA_SECT_SIZE);
-
/* Execute the command. */
if (mtip_exec_internal_command(port,
&fis,
5,
- port->sector_buffer_dma,
- (command[3] != 0) ? ATA_SECT_SIZE : 0,
+ (xfer_sz ? dma_addr : 0),
+ (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
0,
GFP_KERNEL,
MTIP_IOCTL_COMMAND_TIMEOUT_MS)
< 0) {
- return -1;
+ rv = -EFAULT;
+ goto exit_drive_command;
}
/* Collect the completion status. */
command[0] = reply->command; /* Status*/
command[1] = reply->features; /* Error*/
- command[2] = command[3];
+ command[2] = reply->sect_count;
dbg_printk(MTIP_DRV_NAME
" %s: Completion Status: stat %x, "
- "err %x, cmd %x\n",
+ "err %x, nsect %x\n",
__func__,
command[0],
command[1],
command[2]);
- if (user_buffer && command[3]) {
+ if (xfer_sz) {
if (copy_to_user(user_buffer,
- port->sector_buffer,
+ buf,
ATA_SECT_SIZE * command[3])) {
- return -EFAULT;
+ rv = -EFAULT;
+ goto exit_drive_command;
}
}
-
- return 0;
+exit_drive_command:
+ if (buf)
+ dmam_free_coherent(&port->dd->pdev->dev,
+ ATA_SECT_SIZE * xfer_sz, buf, dma_addr);
+ return rv;
}
/*
@@ -2003,6 +2030,32 @@ static unsigned int implicit_sector(unsigned char command,
return rv;
}
+static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout)
+{
+ switch (fis->command) {
+ case ATA_CMD_DOWNLOAD_MICRO:
+ *timeout = 120000; /* 2 minutes */
+ break;
+ case ATA_CMD_SEC_ERASE_UNIT:
+ case 0xFC:
+ *timeout = 240000; /* 4 minutes */
+ break;
+ case ATA_CMD_STANDBYNOW1:
+ *timeout = 10000; /* 10 seconds */
+ break;
+ case 0xF7:
+ case 0xFA:
+ *timeout = 60000; /* 60 seconds */
+ break;
+ case ATA_CMD_SMART:
+ *timeout = 15000; /* 15 seconds */
+ break;
+ default:
+ *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+ break;
+ }
+}
+
/*
* Executes a taskfile
* See ide_taskfile_ioctl() for derivation
@@ -2023,7 +2076,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
unsigned int taskin = 0;
unsigned int taskout = 0;
u8 nsect = 0;
- unsigned int timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+ unsigned int timeout;
unsigned int force_single_sector;
unsigned int transfer_size;
unsigned long task_file_data;
@@ -2153,32 +2206,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
fis.lba_hi,
fis.device);
- switch (fis.command) {
- case ATA_CMD_DOWNLOAD_MICRO:
- /* Change timeout for Download Microcode to 2 minutes */
- timeout = 120000;
- break;
- case ATA_CMD_SEC_ERASE_UNIT:
- /* Change timeout for Security Erase Unit to 4 minutes.*/
- timeout = 240000;
- break;
- case ATA_CMD_STANDBYNOW1:
- /* Change timeout for standby immediate to 10 seconds.*/
- timeout = 10000;
- break;
- case 0xF7:
- case 0xFA:
- /* Change timeout for vendor unique command to 10 secs */
- timeout = 10000;
- break;
- case ATA_CMD_SMART:
- /* Change timeout for vendor unique command to 15 secs */
- timeout = 15000;
- break;
- default:
- timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
- break;
- }
+ mtip_set_timeout(&fis, &timeout);
/* Determine the correct transfer size.*/
if (force_single_sector)
@@ -2295,13 +2323,12 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
{
switch (cmd) {
case HDIO_GET_IDENTITY:
- if (mtip_get_identify(dd->port, (void __user *) arg) < 0) {
- dev_warn(&dd->pdev->dev,
- "Unable to read identity\n");
- return -EIO;
- }
-
+ {
+ if (copy_to_user((void __user *)arg, dd->port->identify,
+ sizeof(u16) * ATA_ID_WORDS))
+ return -EFAULT;
break;
+ }
case HDIO_DRIVE_CMD:
{
u8 drive_command[4];
@@ -2537,40 +2564,58 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
int size = 0;
int n;
- size += sprintf(&buf[size], "S ACTive:\n");
+ size += sprintf(&buf[size], "Hardware\n--------\n");
+ size += sprintf(&buf[size], "S ACTive : [ 0x");
- for (n = 0; n < dd->slot_groups; n++)
- size += sprintf(&buf[size], "0x%08x\n",
+ for (n = dd->slot_groups-1; n >= 0; n--)
+ size += sprintf(&buf[size], "%08X ",
readl(dd->port->s_active[n]));
- size += sprintf(&buf[size], "Command Issue:\n");
+ size += sprintf(&buf[size], "]\n");
+ size += sprintf(&buf[size], "Command Issue : [ 0x");
- for (n = 0; n < dd->slot_groups; n++)
- size += sprintf(&buf[size], "0x%08x\n",
+ for (n = dd->slot_groups-1; n >= 0; n--)
+ size += sprintf(&buf[size], "%08X ",
readl(dd->port->cmd_issue[n]));
- size += sprintf(&buf[size], "Allocated:\n");
+ size += sprintf(&buf[size], "]\n");
+ size += sprintf(&buf[size], "Completed : [ 0x");
+
+ for (n = dd->slot_groups-1; n >= 0; n--)
+ size += sprintf(&buf[size], "%08X ",
+ readl(dd->port->completed[n]));
+
+ size += sprintf(&buf[size], "]\n");
+ size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n",
+ readl(dd->port->mmio + PORT_IRQ_STAT));
+ size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n",
+ readl(dd->mmio + HOST_IRQ_STAT));
+ size += sprintf(&buf[size], "\n");
- for (n = 0; n < dd->slot_groups; n++) {
+ size += sprintf(&buf[size], "Local\n-----\n");
+ size += sprintf(&buf[size], "Allocated : [ 0x");
+
+ for (n = dd->slot_groups-1; n >= 0; n--) {
if (sizeof(long) > sizeof(u32))
group_allocated =
dd->port->allocated[n/2] >> (32*(n&1));
else
group_allocated = dd->port->allocated[n];
- size += sprintf(&buf[size], "0x%08x\n",
- group_allocated);
+ size += sprintf(&buf[size], "%08X ", group_allocated);
}
+ size += sprintf(&buf[size], "]\n");
- size += sprintf(&buf[size], "Completed:\n");
-
- for (n = 0; n < dd->slot_groups; n++)
- size += sprintf(&buf[size], "0x%08x\n",
- readl(dd->port->completed[n]));
+ size += sprintf(&buf[size], "Commands in Q: [ 0x");
- size += sprintf(&buf[size], "PORT IRQ STAT : 0x%08x\n",
- readl(dd->port->mmio + PORT_IRQ_STAT));
- size += sprintf(&buf[size], "HOST IRQ STAT : 0x%08x\n",
- readl(dd->mmio + HOST_IRQ_STAT));
+ for (n = dd->slot_groups-1; n >= 0; n--) {
+ if (sizeof(long) > sizeof(u32))
+ group_allocated =
+ dd->port->cmds_to_issue[n/2] >> (32*(n&1));
+ else
+ group_allocated = dd->port->cmds_to_issue[n];
+ size += sprintf(&buf[size], "%08X ", group_allocated);
+ }
+ size += sprintf(&buf[size], "]\n");
return size;
}
@@ -2592,8 +2637,24 @@ static ssize_t mtip_hw_show_status(struct device *dev,
return size;
}
+static ssize_t mtip_hw_show_flags(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct driver_data *dd = dev_to_disk(dev)->private_data;
+ int size = 0;
+
+ size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n",
+ dd->port->flags);
+ size += sprintf(&buf[size], "Flag in dd struct : [ %08lX ]\n",
+ dd->dd_flag);
+
+ return size;
+}
+
static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL);
/*
* Create the sysfs related attributes.
@@ -2616,6 +2677,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
if (sysfs_create_file(kobj, &dev_attr_status.attr))
dev_warn(&dd->pdev->dev,
"Error creating 'status' sysfs entry\n");
+ if (sysfs_create_file(kobj, &dev_attr_flags.attr))
+ dev_warn(&dd->pdev->dev,
+ "Error creating 'flags' sysfs entry\n");
return 0;
}
@@ -2636,6 +2700,7 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
sysfs_remove_file(kobj, &dev_attr_registers.attr);
sysfs_remove_file(kobj, &dev_attr_status.attr);
+ sysfs_remove_file(kobj, &dev_attr_flags.attr);
return 0;
}
@@ -3634,7 +3699,10 @@ skip_create_disk:
set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
blk_queue_physical_block_size(dd->queue, 4096);
+ blk_queue_max_hw_sectors(dd->queue, 0xffff);
+ blk_queue_max_segment_size(dd->queue, 0x400000);
blk_queue_io_min(dd->queue, 4096);
+
/*
* write back cache is not supported in the device. FUA depends on
* write back cache support, hence setting flush support to zero.
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 4ef58336310a..b2c88da26b2a 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -113,33 +113,35 @@
#define __force_bit2int (unsigned int __force)
-/* below are bit numbers in 'flags' defined in mtip_port */
-#define MTIP_PF_IC_ACTIVE_BIT 0 /* pio/ioctl */
-#define MTIP_PF_EH_ACTIVE_BIT 1 /* error handling */
-#define MTIP_PF_SE_ACTIVE_BIT 2 /* secure erase */
-#define MTIP_PF_DM_ACTIVE_BIT 3 /* download microcde */
-#define MTIP_PF_PAUSE_IO ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
+enum {
+ /* below are bit numbers in 'flags' defined in mtip_port */
+ MTIP_PF_IC_ACTIVE_BIT = 0, /* pio/ioctl */
+ MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
+ MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
+ MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
+ MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
(1 << MTIP_PF_EH_ACTIVE_BIT) | \
(1 << MTIP_PF_SE_ACTIVE_BIT) | \
- (1 << MTIP_PF_DM_ACTIVE_BIT))
-
-#define MTIP_PF_SVC_THD_ACTIVE_BIT 4
-#define MTIP_PF_ISSUE_CMDS_BIT 5
-#define MTIP_PF_REBUILD_BIT 6
-#define MTIP_PF_SVC_THD_STOP_BIT 8
-
-/* below are bit numbers in 'dd_flag' defined in driver_data */
-#define MTIP_DDF_REMOVE_PENDING_BIT 1
-#define MTIP_DDF_OVER_TEMP_BIT 2
-#define MTIP_DDF_WRITE_PROTECT_BIT 3
-#define MTIP_DDF_STOP_IO ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
+ (1 << MTIP_PF_DM_ACTIVE_BIT)),
+
+ MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
+ MTIP_PF_ISSUE_CMDS_BIT = 5,
+ MTIP_PF_REBUILD_BIT = 6,
+ MTIP_PF_SVC_THD_STOP_BIT = 8,
+
+ /* below are bit numbers in 'dd_flag' defined in driver_data */
+ MTIP_DDF_REMOVE_PENDING_BIT = 1,
+ MTIP_DDF_OVER_TEMP_BIT = 2,
+ MTIP_DDF_WRITE_PROTECT_BIT = 3,
+ MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
(1 << MTIP_DDF_OVER_TEMP_BIT) | \
- (1 << MTIP_DDF_WRITE_PROTECT_BIT))
+ (1 << MTIP_DDF_WRITE_PROTECT_BIT)),
-#define MTIP_DDF_CLEANUP_BIT 5
-#define MTIP_DDF_RESUME_BIT 6
-#define MTIP_DDF_INIT_DONE_BIT 7
-#define MTIP_DDF_REBUILD_FAILED_BIT 8
+ MTIP_DDF_CLEANUP_BIT = 5,
+ MTIP_DDF_RESUME_BIT = 6,
+ MTIP_DDF_INIT_DONE_BIT = 7,
+ MTIP_DDF_REBUILD_FAILED_BIT = 8,
+};
__packed struct smart_attr{
u8 attr_id;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 013c7a549fb6..65665c9c42c6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -141,7 +141,7 @@ struct rbd_request {
struct rbd_snap {
struct device dev;
const char *name;
- size_t size;
+ u64 size;
struct list_head node;
u64 id;
};
@@ -175,8 +175,7 @@ struct rbd_device {
/* protects updating the header */
struct rw_semaphore header_rwsem;
char snap_name[RBD_MAX_SNAP_NAME_LEN];
- u32 cur_snap; /* index+1 of current snapshot within snap context
- 0 - for the head */
+ u64 snap_id; /* current snapshot id */
int read_only;
struct list_head node;
@@ -241,7 +240,7 @@ static void rbd_put_dev(struct rbd_device *rbd_dev)
put_device(&rbd_dev->dev);
}
-static int __rbd_update_snaps(struct rbd_device *rbd_dev);
+static int __rbd_refresh_header(struct rbd_device *rbd_dev);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
@@ -450,7 +449,9 @@ static void rbd_client_release(struct kref *kref)
struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
dout("rbd_release_client %p\n", rbdc);
+ spin_lock(&rbd_client_list_lock);
list_del(&rbdc->node);
+ spin_unlock(&rbd_client_list_lock);
ceph_destroy_client(rbdc->client);
kfree(rbdc->rbd_opts);
@@ -463,9 +464,7 @@ static void rbd_client_release(struct kref *kref)
*/
static void rbd_put_client(struct rbd_device *rbd_dev)
{
- spin_lock(&rbd_client_list_lock);
kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
- spin_unlock(&rbd_client_list_lock);
rbd_dev->rbd_client = NULL;
}
@@ -487,16 +486,18 @@ static void rbd_coll_release(struct kref *kref)
*/
static int rbd_header_from_disk(struct rbd_image_header *header,
struct rbd_image_header_ondisk *ondisk,
- int allocated_snaps,
+ u32 allocated_snaps,
gfp_t gfp_flags)
{
- int i;
- u32 snap_count;
+ u32 i, snap_count;
if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT)))
return -ENXIO;
snap_count = le32_to_cpu(ondisk->snap_count);
+ if (snap_count > (UINT_MAX - sizeof(struct ceph_snap_context))
+ / sizeof (*ondisk))
+ return -EINVAL;
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
snap_count * sizeof (*ondisk),
gfp_flags);
@@ -506,11 +507,11 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
if (snap_count) {
header->snap_names = kmalloc(header->snap_names_len,
- GFP_KERNEL);
+ gfp_flags);
if (!header->snap_names)
goto err_snapc;
header->snap_sizes = kmalloc(snap_count * sizeof(u64),
- GFP_KERNEL);
+ gfp_flags);
if (!header->snap_sizes)
goto err_names;
} else {
@@ -552,21 +553,6 @@ err_snapc:
return -ENOMEM;
}
-static int snap_index(struct rbd_image_header *header, int snap_num)
-{
- return header->total_snaps - snap_num;
-}
-
-static u64 cur_snap_id(struct rbd_device *rbd_dev)
-{
- struct rbd_image_header *header = &rbd_dev->header;
-
- if (!rbd_dev->cur_snap)
- return 0;
-
- return header->snapc->snaps[snap_index(header, rbd_dev->cur_snap)];
-}
-
static int snap_by_name(struct rbd_image_header *header, const char *snap_name,
u64 *seq, u64 *size)
{
@@ -605,7 +591,7 @@ static int rbd_header_set_snap(struct rbd_device *dev, u64 *size)
snapc->seq = header->snap_seq;
else
snapc->seq = 0;
- dev->cur_snap = 0;
+ dev->snap_id = CEPH_NOSNAP;
dev->read_only = 0;
if (size)
*size = header->image_size;
@@ -613,8 +599,7 @@ static int rbd_header_set_snap(struct rbd_device *dev, u64 *size)
ret = snap_by_name(header, dev->snap_name, &snapc->seq, size);
if (ret < 0)
goto done;
-
- dev->cur_snap = header->total_snaps - ret;
+ dev->snap_id = snapc->seq;
dev->read_only = 1;
}
@@ -935,7 +920,6 @@ static int rbd_do_request(struct request *rq,
layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
layout->fl_stripe_count = cpu_to_le32(1);
layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- layout->fl_pg_preferred = cpu_to_le32(-1);
layout->fl_pg_pool = cpu_to_le32(dev->poolid);
ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
req, ops);
@@ -1168,7 +1152,7 @@ static int rbd_req_read(struct request *rq,
int coll_index)
{
return rbd_do_op(rq, rbd_dev, NULL,
- (snapid ? snapid : CEPH_NOSNAP),
+ snapid,
CEPH_OSD_OP_READ,
CEPH_OSD_FLAG_READ,
2,
@@ -1187,7 +1171,7 @@ static int rbd_req_sync_read(struct rbd_device *dev,
u64 *ver)
{
return rbd_req_sync_op(dev, NULL,
- (snapid ? snapid : CEPH_NOSNAP),
+ snapid,
CEPH_OSD_OP_READ,
CEPH_OSD_FLAG_READ,
NULL,
@@ -1238,7 +1222,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
notify_id, (int)opcode);
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rc = __rbd_update_snaps(dev);
+ rc = __rbd_refresh_header(dev);
mutex_unlock(&ctl_mutex);
if (rc)
pr_warning(RBD_DRV_NAME "%d got notification but failed to "
@@ -1521,7 +1505,7 @@ static void rbd_rq_fn(struct request_queue *q)
coll, cur_seg);
else
rbd_req_read(rq, rbd_dev,
- cur_snap_id(rbd_dev),
+ rbd_dev->snap_id,
ofs,
op_size, bio,
coll, cur_seg);
@@ -1592,7 +1576,7 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
{
ssize_t rc;
struct rbd_image_header_ondisk *dh;
- int snap_count = 0;
+ u32 snap_count = 0;
u64 ver;
size_t len;
@@ -1656,7 +1640,7 @@ static int rbd_header_add_snap(struct rbd_device *dev,
struct ceph_mon_client *monc;
/* we should create a snapshot only if we're pointing at the head */
- if (dev->cur_snap)
+ if (dev->snap_id != CEPH_NOSNAP)
return -EINVAL;
monc = &dev->rbd_client->client->monc;
@@ -1683,7 +1667,9 @@ static int rbd_header_add_snap(struct rbd_device *dev,
if (ret < 0)
return ret;
- dev->header.snapc->seq = new_snapid;
+ down_write(&dev->header_rwsem);
+ dev->header.snapc->seq = new_snapid;
+ up_write(&dev->header_rwsem);
return 0;
bad:
@@ -1703,7 +1689,7 @@ static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
/*
* only read the first part of the ondisk header, without the snaps info
*/
-static int __rbd_update_snaps(struct rbd_device *rbd_dev)
+static int __rbd_refresh_header(struct rbd_device *rbd_dev)
{
int ret;
struct rbd_image_header h;
@@ -1890,7 +1876,7 @@ static ssize_t rbd_image_refresh(struct device *dev,
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rc = __rbd_update_snaps(rbd_dev);
+ rc = __rbd_refresh_header(rbd_dev);
if (rc < 0)
ret = rc;
@@ -1949,7 +1935,7 @@ static ssize_t rbd_snap_size_show(struct device *dev,
{
struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
- return sprintf(buf, "%zd\n", snap->size);
+ return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
}
static ssize_t rbd_snap_id_show(struct device *dev,
@@ -1958,7 +1944,7 @@ static ssize_t rbd_snap_id_show(struct device *dev,
{
struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
- return sprintf(buf, "%llu\n", (unsigned long long) snap->id);
+ return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
}
static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
@@ -2173,7 +2159,7 @@ static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
rbd_dev->header.obj_version);
if (ret == -ERANGE) {
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rc = __rbd_update_snaps(rbd_dev);
+ rc = __rbd_refresh_header(rbd_dev);
mutex_unlock(&ctl_mutex);
if (rc < 0)
return rc;
@@ -2558,7 +2544,7 @@ static ssize_t rbd_snap_add(struct device *dev,
if (ret < 0)
goto err_unlock;
- ret = __rbd_update_snaps(rbd_dev);
+ ret = __rbd_refresh_header(rbd_dev);
if (ret < 0)
goto err_unlock;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 4e86393a09cf..60eed4bdd2e4 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -526,6 +526,14 @@ static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
return 0;
}
+static char *encode_disk_name(char *ptr, unsigned int n)
+{
+ if (n >= 26)
+ ptr = encode_disk_name(ptr, n / 26 - 1);
+ *ptr = 'a' + n % 26;
+ return ptr + 1;
+}
+
static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
struct blkfront_info *info,
u16 vdisk_info, u16 sector_size)
@@ -536,6 +544,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
unsigned int offset;
int minor;
int nr_parts;
+ char *ptr;
BUG_ON(info->gd != NULL);
BUG_ON(info->rq != NULL);
@@ -560,7 +569,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
"emulated IDE disks,\n\t choose an xvd device name"
"from xvde on\n", info->vdevice);
}
- err = -ENODEV;
+ if (minor >> MINORBITS) {
+ pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
+ info->vdevice, minor);
+ return -ENODEV;
+ }
if ((minor % nr_parts) == 0)
nr_minors = nr_parts;
@@ -574,23 +587,14 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
if (gd == NULL)
goto release;
- if (nr_minors > 1) {
- if (offset < 26)
- sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
- else
- sprintf(gd->disk_name, "%s%c%c", DEV_NAME,
- 'a' + ((offset / 26)-1), 'a' + (offset % 26));
- } else {
- if (offset < 26)
- sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
- 'a' + offset,
- minor & (nr_parts - 1));
- else
- sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME,
- 'a' + ((offset / 26) - 1),
- 'a' + (offset % 26),
- minor & (nr_parts - 1));
- }
+ strcpy(gd->disk_name, DEV_NAME);
+ ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
+ BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
+ if (nr_minors > 1)
+ *ptr = 0;
+ else
+ snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
+ "%d", minor & (nr_parts - 1));
gd->major = XENVBD_MAJOR;
gd->first_minor = minor;
@@ -1496,7 +1500,9 @@ module_init(xlblk_init);
static void __exit xlblk_exit(void)
{
- return xenbus_unregister_driver(&blkfront_driver);
+ xenbus_unregister_driver(&blkfront_driver);
+ unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
+ kfree(minors);
}
module_exit(xlblk_exit);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 2812b152d6e9..ad591bd240ec 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -81,6 +81,9 @@ static struct usb_device_id ath3k_table[] = {
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
+ /* Atheros AR5BBU22 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE03C) },
+
{ } /* Terminating entry */
};
@@ -99,6 +102,9 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ /* Atheros AR5BBU22 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
+
{ } /* Terminating entry */
};
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 90bda50dc446..94f2d65131c4 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -67,6 +67,7 @@ struct btmrvl_adapter {
u8 wakeup_tries;
wait_queue_head_t cmd_wait_q;
u8 cmd_complete;
+ bool is_suspended;
};
struct btmrvl_private {
@@ -139,8 +140,10 @@ void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
+int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv);
int btmrvl_enable_ps(struct btmrvl_private *priv);
int btmrvl_prepare_command(struct btmrvl_private *priv);
+int btmrvl_enable_hs(struct btmrvl_private *priv);
#ifdef CONFIG_DEBUG_FS
void btmrvl_debugfs_init(struct hci_dev *hdev);
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index d1209adc882d..681ca9d18e12 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -200,6 +200,36 @@ int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
}
EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd);
+int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv)
+{
+ struct sk_buff *skb;
+ struct btmrvl_cmd *cmd;
+
+ skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!skb) {
+ BT_ERR("No free skb");
+ return -ENOMEM;
+ }
+
+ cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
+ cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
+ BT_CMD_HOST_SLEEP_CONFIG));
+ cmd->length = 2;
+ cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
+ cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
+
+ bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
+
+ skb->dev = (void *) priv->btmrvl_dev.hcidev;
+ skb_queue_head(&priv->adapter->tx_queue, skb);
+
+ BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x", cmd->data[0],
+ cmd->data[1]);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btmrvl_send_hscfg_cmd);
+
int btmrvl_enable_ps(struct btmrvl_private *priv)
{
struct sk_buff *skb;
@@ -232,7 +262,7 @@ int btmrvl_enable_ps(struct btmrvl_private *priv)
}
EXPORT_SYMBOL_GPL(btmrvl_enable_ps);
-static int btmrvl_enable_hs(struct btmrvl_private *priv)
+int btmrvl_enable_hs(struct btmrvl_private *priv)
{
struct sk_buff *skb;
struct btmrvl_cmd *cmd;
@@ -268,35 +298,15 @@ static int btmrvl_enable_hs(struct btmrvl_private *priv)
return ret;
}
+EXPORT_SYMBOL_GPL(btmrvl_enable_hs);
int btmrvl_prepare_command(struct btmrvl_private *priv)
{
- struct sk_buff *skb = NULL;
- struct btmrvl_cmd *cmd;
int ret = 0;
if (priv->btmrvl_dev.hscfgcmd) {
priv->btmrvl_dev.hscfgcmd = 0;
-
- skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
- if (skb == NULL) {
- BT_ERR("No free skb");
- return -ENOMEM;
- }
-
- cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
- cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_CONFIG));
- cmd->length = 2;
- cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
- cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
-
- bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
-
- skb->dev = (void *) priv->btmrvl_dev.hcidev;
- skb_queue_head(&priv->adapter->tx_queue, skb);
-
- BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x",
- cmd->data[0], cmd->data[1]);
+ btmrvl_send_hscfg_cmd(priv);
}
if (priv->btmrvl_dev.pscmd) {
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 27b74b0d547b..a853244e7fd7 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -339,9 +339,7 @@ static int btmrvl_sdio_download_helper(struct btmrvl_sdio_card *card)
done:
kfree(tmphlprbuf);
- if (fw_helper)
- release_firmware(fw_helper);
-
+ release_firmware(fw_helper);
return ret;
}
@@ -484,10 +482,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card)
done:
kfree(tmpfwbuf);
-
- if (fw_firmware)
- release_firmware(fw_firmware);
-
+ release_firmware(fw_firmware);
return ret;
}
@@ -1013,6 +1008,9 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
priv->btmrvl_dev.psmode = 1;
btmrvl_enable_ps(priv);
+ priv->btmrvl_dev.gpio_gap = 0xffff;
+ btmrvl_send_hscfg_cmd(priv);
+
return 0;
disable_host_int:
@@ -1048,11 +1046,111 @@ static void btmrvl_sdio_remove(struct sdio_func *func)
}
}
+static int btmrvl_sdio_suspend(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct btmrvl_sdio_card *card;
+ struct btmrvl_private *priv;
+ mmc_pm_flag_t pm_flags;
+ struct hci_dev *hcidev;
+
+ if (func) {
+ pm_flags = sdio_get_host_pm_caps(func);
+ BT_DBG("%s: suspend: PM flags = 0x%x", sdio_func_id(func),
+ pm_flags);
+ if (!(pm_flags & MMC_PM_KEEP_POWER)) {
+ BT_ERR("%s: cannot remain alive while suspended",
+ sdio_func_id(func));
+ return -ENOSYS;
+ }
+ card = sdio_get_drvdata(func);
+ if (!card || !card->priv) {
+ BT_ERR("card or priv structure is not valid");
+ return 0;
+ }
+ } else {
+ BT_ERR("sdio_func is not specified");
+ return 0;
+ }
+
+ priv = card->priv;
+
+ if (priv->adapter->hs_state != HS_ACTIVATED) {
+ if (btmrvl_enable_hs(priv)) {
+ BT_ERR("HS not actived, suspend failed!");
+ return -EBUSY;
+ }
+ }
+ hcidev = priv->btmrvl_dev.hcidev;
+ BT_DBG("%s: SDIO suspend", hcidev->name);
+ hci_suspend_dev(hcidev);
+ skb_queue_purge(&priv->adapter->tx_queue);
+
+ priv->adapter->is_suspended = true;
+
+ /* We will keep the power when hs enabled successfully */
+ if (priv->adapter->hs_state == HS_ACTIVATED) {
+ BT_DBG("suspend with MMC_PM_KEEP_POWER");
+ return sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ } else {
+ BT_DBG("suspend without MMC_PM_KEEP_POWER");
+ return 0;
+ }
+}
+
+static int btmrvl_sdio_resume(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct btmrvl_sdio_card *card;
+ struct btmrvl_private *priv;
+ mmc_pm_flag_t pm_flags;
+ struct hci_dev *hcidev;
+
+ if (func) {
+ pm_flags = sdio_get_host_pm_caps(func);
+ BT_DBG("%s: resume: PM flags = 0x%x", sdio_func_id(func),
+ pm_flags);
+ card = sdio_get_drvdata(func);
+ if (!card || !card->priv) {
+ BT_ERR("card or priv structure is not valid");
+ return 0;
+ }
+ } else {
+ BT_ERR("sdio_func is not specified");
+ return 0;
+ }
+ priv = card->priv;
+
+ if (!priv->adapter->is_suspended) {
+ BT_DBG("device already resumed");
+ return 0;
+ }
+
+ priv->adapter->is_suspended = false;
+ hcidev = priv->btmrvl_dev.hcidev;
+ BT_DBG("%s: SDIO resume", hcidev->name);
+ hci_resume_dev(hcidev);
+ priv->hw_wakeup_firmware(priv);
+ priv->adapter->hs_state = HS_DEACTIVATED;
+ BT_DBG("%s: HS DEACTIVATED in resume!", hcidev->name);
+
+ return 0;
+}
+
+static const struct dev_pm_ops btmrvl_sdio_pm_ops = {
+ .suspend = btmrvl_sdio_suspend,
+ .resume = btmrvl_sdio_resume,
+};
+
static struct sdio_driver bt_mrvl_sdio = {
.name = "btmrvl_sdio",
.id_table = btmrvl_sdio_ids,
.probe = btmrvl_sdio_probe,
.remove = btmrvl_sdio_remove,
+ .drv = {
+ .owner = THIS_MODULE,
+ .pm = &btmrvl_sdio_pm_ops,
+ }
};
static int __init btmrvl_sdio_init_module(void)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 461c68bc4dd7..c9463af8e564 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -143,6 +143,9 @@ static struct usb_device_id blacklist_table[] = {
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
+
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@@ -855,6 +858,7 @@ static void btusb_work(struct work_struct *work)
{
struct btusb_data *data = container_of(work, struct btusb_data, work);
struct hci_dev *hdev = data->hdev;
+ int new_alts;
int err;
if (hdev->conn_hash.sco_num > 0) {
@@ -868,11 +872,19 @@ static void btusb_work(struct work_struct *work)
set_bit(BTUSB_DID_ISO_RESUME, &data->flags);
}
- if (data->isoc_altsetting != 2) {
+
+ if (hdev->voice_setting & 0x0020) {
+ static const int alts[3] = { 2, 4, 5 };
+ new_alts = alts[hdev->conn_hash.sco_num - 1];
+ } else {
+ new_alts = hdev->conn_hash.sco_num;
+ }
+
+ if (data->isoc_altsetting != new_alts) {
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
usb_kill_anchored_urbs(&data->isoc_anchor);
- if (__set_isoc_interface(hdev, 2) < 0)
+ if (__set_isoc_interface(hdev, new_alts) < 0)
return;
}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 98a8c05d4f23..e564579a6115 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -388,7 +388,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
hdev->close = hci_uart_close;
hdev->flush = hci_uart_flush;
hdev->send = hci_uart_send_frame;
- hdev->parent = hu->tty->dev;
+ SET_HCIDEV_DEV(hdev, hu->tty->dev);
if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 158bfe507da7..3f72595a6017 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -252,8 +252,9 @@ static int vhci_open(struct inode *inode, struct file *file)
}
file->private_data = data;
+ nonseekable_open(inode, file);
- return nonseekable_open(inode, file);
+ return 0;
}
static int vhci_release(struct inode *inode, struct file *file)
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 17e05d1076b3..a0df182f6f7d 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -958,7 +958,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
if (set_memory_uc((unsigned long)table, 1 << page_order))
printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
- bridge->gatt_table = (void *)table;
+ bridge->gatt_table = (u32 __iomem *)table;
#else
bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
(PAGE_SIZE * (1 << page_order)));
@@ -1010,7 +1010,6 @@ int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
case LVL2_APER_SIZE:
/* The generic routines can't deal with 2 level gatt's */
return -EINVAL;
- break;
default:
page_order = 0;
break;
@@ -1077,7 +1076,6 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
case LVL2_APER_SIZE:
/* The generic routines can't deal with 2 level gatt's */
return -EINVAL;
- break;
default:
num_entries = 0;
break;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 962e75dc4781..764f70c5e690 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -907,6 +907,11 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB),
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
+ ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB),
+ ID(PCI_DEVICE_ID_INTEL_HASWELL_HB),
+ ID(PCI_DEVICE_ID_INTEL_HASWELL_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_HASWELL_S_HB),
+ ID(PCI_DEVICE_ID_INTEL_HASWELL_E_HB),
{ }
};
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 7ea18a5fe71c..c0091753a0d1 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -96,6 +96,7 @@
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
+#define GFX_FLSH_CNTL_VLV 0x101008
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
@@ -235,6 +236,19 @@
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
+#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
+#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
+#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
+#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
+#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */
+#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
int intel_gmch_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 7f025fb620de..1237e7575c3f 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1179,6 +1179,20 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
writel(addr | pte_flags, intel_private.gtt + entry);
}
+static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
+{
+ u32 pte_flags;
+
+ pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
+
+ /* gen6 has bit11-4 for physical addr bit39-32 */
+ addr |= (addr >> 28) & 0xff0;
+ writel(addr | pte_flags, intel_private.gtt + entry);
+
+ writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
+}
+
static void gen6_cleanup(void)
{
}
@@ -1205,12 +1219,16 @@ static inline int needs_idle_maps(void)
static int i9xx_setup(void)
{
u32 reg_addr;
+ int size = KB(512);
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
reg_addr &= 0xfff80000;
- intel_private.registers = ioremap(reg_addr, 128 * 4096);
+ if (INTEL_GTT_GEN >= 7)
+ size = MB(2);
+
+ intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers)
return -ENOMEM;
@@ -1354,6 +1372,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
+static const struct intel_gtt_driver valleyview_gtt_driver = {
+ .gen = 7,
+ .setup = i9xx_setup,
+ .cleanup = gen6_cleanup,
+ .write_entry = valleyview_write_entry,
+ .dma_mask_size = 40,
+ .check_flags = gen6_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine
@@ -1460,6 +1487,22 @@ static const struct intel_gtt_driver_description {
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
+ "ValleyView", &valleyview_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV,
+ "Haswell", &sandybridge_gtt_driver },
{ 0, NULL, NULL }
};
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index ffa888cd1c88..192000377737 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -158,7 +158,6 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
break;
case LVL2_APER_SIZE:
return -EINVAL;
- break;
default:
num_entries = 0;
break;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 165e1febae53..4864407e3fc4 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -12,6 +12,7 @@ config HAVE_MACH_CLKDEV
config COMMON_CLK
bool
select HAVE_CLK_PREPARE
+ select CLKDEV_LOOKUP
---help---
The common clock framework is a single definition of struct
clk, useful across many platforms, as well as an
@@ -22,17 +23,6 @@ config COMMON_CLK
menu "Common Clock Framework"
depends on COMMON_CLK
-config COMMON_CLK_DISABLE_UNUSED
- bool "Disabled unused clocks at boot"
- depends on COMMON_CLK
- ---help---
- Traverses the entire clock tree and disables any clocks that are
- enabled in hardware but have not been enabled by any device drivers.
- This saves power and keeps the software model of the clock in line
- with reality.
-
- If in doubt, say "N".
-
config COMMON_CLK_DEBUG
bool "DebugFS representation of clock tree"
depends on COMMON_CLK
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 1f736bc11c4b..b9a5158a30b1 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,4 +1,7 @@
obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \
- clk-mux.o clk-divider.o
+ clk-mux.o clk-divider.o clk-fixed-factor.o
+# SoCs specific
+obj-$(CONFIG_ARCH_MXS) += mxs/
+obj-$(CONFIG_PLAT_SPEAR) += spear/
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index d5ac6a75ea57..8ea11b444528 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -45,7 +45,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
return parent_rate / div;
}
-EXPORT_SYMBOL_GPL(clk_divider_recalc_rate);
/*
* The reverse of DIV_ROUND_UP: The maximum number which
@@ -68,8 +67,8 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
if (divider->flags & CLK_DIVIDER_ONE_BASED)
maxdiv--;
- if (!best_parent_rate) {
- parent_rate = __clk_get_rate(__clk_get_parent(hw->clk));
+ if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
+ parent_rate = *best_parent_rate;
bestdiv = DIV_ROUND_UP(parent_rate, rate);
bestdiv = bestdiv == 0 ? 1 : bestdiv;
bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
@@ -109,24 +108,18 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
int div;
div = clk_divider_bestdiv(hw, rate, prate);
- if (prate)
- return *prate / div;
- else {
- unsigned long r;
- r = __clk_get_rate(__clk_get_parent(hw->clk));
- return r / div;
- }
+ return *prate / div;
}
-EXPORT_SYMBOL_GPL(clk_divider_round_rate);
-static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate)
+static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
struct clk_divider *divider = to_clk_divider(hw);
unsigned int div;
unsigned long flags = 0;
u32 val;
- div = __clk_get_rate(__clk_get_parent(hw->clk)) / rate;
+ div = parent_rate / rate;
if (!(divider->flags & CLK_DIVIDER_ONE_BASED))
div--;
@@ -147,15 +140,26 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate)
return 0;
}
-EXPORT_SYMBOL_GPL(clk_divider_set_rate);
-struct clk_ops clk_divider_ops = {
+const struct clk_ops clk_divider_ops = {
.recalc_rate = clk_divider_recalc_rate,
.round_rate = clk_divider_round_rate,
.set_rate = clk_divider_set_rate,
};
EXPORT_SYMBOL_GPL(clk_divider_ops);
+/**
+ * clk_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
struct clk *clk_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
@@ -163,38 +167,34 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
{
struct clk_divider *div;
struct clk *clk;
+ struct clk_init_data init;
+ /* allocate the divider */
div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL);
-
if (!div) {
pr_err("%s: could not allocate divider clk\n", __func__);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
+ init.name = name;
+ init.ops = &clk_divider_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
/* struct clk_divider assignments */
div->reg = reg;
div->shift = shift;
div->width = width;
div->flags = clk_divider_flags;
div->lock = lock;
+ div->hw.init = &init;
- if (parent_name) {
- div->parent[0] = kstrdup(parent_name, GFP_KERNEL);
- if (!div->parent[0])
- goto out;
- }
-
- clk = clk_register(dev, name,
- &clk_divider_ops, &div->hw,
- div->parent,
- (parent_name ? 1 : 0),
- flags);
- if (clk)
- return clk;
+ /* register the clock */
+ clk = clk_register(dev, &div->hw);
-out:
- kfree(div->parent[0]);
- kfree(div);
+ if (IS_ERR(clk))
+ kfree(div);
- return NULL;
+ return clk;
}
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
new file mode 100644
index 000000000000..c8c003e217ad
--- /dev/null
+++ b/drivers/clk/clk-fixed-factor.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Standard functionality for the common clock API.
+ */
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+/*
+ * DOC: basic fixed multiplier and divider clock that cannot gate
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is fixed. clk->rate = parent->rate / div * mult
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw)
+
+static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
+
+ return parent_rate * fix->mult / fix->div;
+}
+
+static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
+
+ if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+ unsigned long best_parent;
+
+ best_parent = (rate / fix->mult) * fix->div;
+ *prate = __clk_round_rate(__clk_get_parent(hw->clk),
+ best_parent);
+ }
+
+ return (*prate / fix->div) * fix->mult;
+}
+
+static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+struct clk_ops clk_fixed_factor_ops = {
+ .round_rate = clk_factor_round_rate,
+ .set_rate = clk_factor_set_rate,
+ .recalc_rate = clk_factor_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
+
+struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div)
+{
+ struct clk_fixed_factor *fix;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ fix = kmalloc(sizeof(*fix), GFP_KERNEL);
+ if (!fix) {
+ pr_err("%s: could not allocate fixed factor clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* struct clk_fixed_factor assignments */
+ fix->mult = mult;
+ fix->div = div;
+ fix->hw.init = &init;
+
+ init.name = name;
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(dev, &fix->hw);
+
+ if (IS_ERR(clk))
+ kfree(fix);
+
+ return clk;
+}
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 90c79fb5d1bd..cbd246229786 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -32,51 +32,50 @@ static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
{
return to_clk_fixed_rate(hw)->fixed_rate;
}
-EXPORT_SYMBOL_GPL(clk_fixed_rate_recalc_rate);
-struct clk_ops clk_fixed_rate_ops = {
+const struct clk_ops clk_fixed_rate_ops = {
.recalc_rate = clk_fixed_rate_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
+/**
+ * clk_register_fixed_rate - register fixed-rate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate)
{
struct clk_fixed_rate *fixed;
- char **parent_names = NULL;
- u8 len;
+ struct clk *clk;
+ struct clk_init_data init;
+ /* allocate fixed-rate clock */
fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
-
if (!fixed) {
pr_err("%s: could not allocate fixed clk\n", __func__);
return ERR_PTR(-ENOMEM);
}
+ init.name = name;
+ init.ops = &clk_fixed_rate_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
/* struct clk_fixed_rate assignments */
fixed->fixed_rate = fixed_rate;
+ fixed->hw.init = &init;
- if (parent_name) {
- parent_names = kmalloc(sizeof(char *), GFP_KERNEL);
-
- if (! parent_names)
- goto out;
+ /* register the clock */
+ clk = clk_register(dev, &fixed->hw);
- len = sizeof(char) * strlen(parent_name);
-
- parent_names[0] = kmalloc(len, GFP_KERNEL);
-
- if (!parent_names[0])
- goto out;
-
- strncpy(parent_names[0], parent_name, len);
- }
+ if (IS_ERR(clk))
+ kfree(fixed);
-out:
- return clk_register(dev, name,
- &clk_fixed_rate_ops, &fixed->hw,
- parent_names,
- (parent_name ? 1 : 0),
- flags);
+ return clk;
}
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index b5902e2ef2fd..578465e04be6 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -28,32 +28,38 @@
#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
-static void clk_gate_set_bit(struct clk_gate *gate)
+/*
+ * It works on following logic:
+ *
+ * For enabling clock, enable = 1
+ * set2dis = 1 -> clear bit -> set = 0
+ * set2dis = 0 -> set bit -> set = 1
+ *
+ * For disabling clock, enable = 0
+ * set2dis = 1 -> set bit -> set = 1
+ * set2dis = 0 -> clear bit -> set = 0
+ *
+ * So, result is always: enable xor set2dis.
+ */
+static void clk_gate_endisable(struct clk_hw *hw, int enable)
{
- u32 reg;
+ struct clk_gate *gate = to_clk_gate(hw);
+ int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
unsigned long flags = 0;
+ u32 reg;
+
+ set ^= enable;
if (gate->lock)
spin_lock_irqsave(gate->lock, flags);
reg = readl(gate->reg);
- reg |= BIT(gate->bit_idx);
- writel(reg, gate->reg);
-
- if (gate->lock)
- spin_unlock_irqrestore(gate->lock, flags);
-}
-
-static void clk_gate_clear_bit(struct clk_gate *gate)
-{
- u32 reg;
- unsigned long flags = 0;
- if (gate->lock)
- spin_lock_irqsave(gate->lock, flags);
+ if (set)
+ reg |= BIT(gate->bit_idx);
+ else
+ reg &= ~BIT(gate->bit_idx);
- reg = readl(gate->reg);
- reg &= ~BIT(gate->bit_idx);
writel(reg, gate->reg);
if (gate->lock)
@@ -62,27 +68,15 @@ static void clk_gate_clear_bit(struct clk_gate *gate)
static int clk_gate_enable(struct clk_hw *hw)
{
- struct clk_gate *gate = to_clk_gate(hw);
-
- if (gate->flags & CLK_GATE_SET_TO_DISABLE)
- clk_gate_clear_bit(gate);
- else
- clk_gate_set_bit(gate);
+ clk_gate_endisable(hw, 1);
return 0;
}
-EXPORT_SYMBOL_GPL(clk_gate_enable);
static void clk_gate_disable(struct clk_hw *hw)
{
- struct clk_gate *gate = to_clk_gate(hw);
-
- if (gate->flags & CLK_GATE_SET_TO_DISABLE)
- clk_gate_set_bit(gate);
- else
- clk_gate_clear_bit(gate);
+ clk_gate_endisable(hw, 0);
}
-EXPORT_SYMBOL_GPL(clk_gate_disable);
static int clk_gate_is_enabled(struct clk_hw *hw)
{
@@ -99,15 +93,25 @@ static int clk_gate_is_enabled(struct clk_hw *hw)
return reg ? 1 : 0;
}
-EXPORT_SYMBOL_GPL(clk_gate_is_enabled);
-struct clk_ops clk_gate_ops = {
+const struct clk_ops clk_gate_ops = {
.enable = clk_gate_enable,
.disable = clk_gate_disable,
.is_enabled = clk_gate_is_enabled,
};
EXPORT_SYMBOL_GPL(clk_gate_ops);
+/**
+ * clk_register_gate - register a gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
@@ -115,36 +119,32 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
{
struct clk_gate *gate;
struct clk *clk;
+ struct clk_init_data init;
+ /* allocate the gate */
gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
-
if (!gate) {
pr_err("%s: could not allocate gated clk\n", __func__);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
+ init.name = name;
+ init.ops = &clk_gate_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
/* struct clk_gate assignments */
gate->reg = reg;
gate->bit_idx = bit_idx;
gate->flags = clk_gate_flags;
gate->lock = lock;
+ gate->hw.init = &init;
- if (parent_name) {
- gate->parent[0] = kstrdup(parent_name, GFP_KERNEL);
- if (!gate->parent[0])
- goto out;
- }
+ clk = clk_register(dev, &gate->hw);
+
+ if (IS_ERR(clk))
+ kfree(gate);
- clk = clk_register(dev, name,
- &clk_gate_ops, &gate->hw,
- gate->parent,
- (parent_name ? 1 : 0),
- flags);
- if (clk)
- return clk;
-out:
- kfree(gate->parent[0]);
- kfree(gate);
-
- return NULL;
+ return clk;
}
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index c71ad1f41a97..fd36a8ea73d9 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -55,7 +55,6 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
return val;
}
-EXPORT_SYMBOL_GPL(clk_mux_get_parent);
static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
{
@@ -82,35 +81,47 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
return 0;
}
-EXPORT_SYMBOL_GPL(clk_mux_set_parent);
-struct clk_ops clk_mux_ops = {
+const struct clk_ops clk_mux_ops = {
.get_parent = clk_mux_get_parent,
.set_parent = clk_mux_set_parent,
};
EXPORT_SYMBOL_GPL(clk_mux_ops);
struct clk *clk_register_mux(struct device *dev, const char *name,
- char **parent_names, u8 num_parents, unsigned long flags,
+ const char **parent_names, u8 num_parents, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_mux_flags, spinlock_t *lock)
{
struct clk_mux *mux;
+ struct clk *clk;
+ struct clk_init_data init;
- mux = kmalloc(sizeof(struct clk_mux), GFP_KERNEL);
-
+ /* allocate the mux */
+ mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
if (!mux) {
pr_err("%s: could not allocate mux clk\n", __func__);
return ERR_PTR(-ENOMEM);
}
+ init.name = name;
+ init.ops = &clk_mux_ops;
+ init.flags = flags;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
/* struct clk_mux assignments */
mux->reg = reg;
mux->shift = shift;
mux->width = width;
mux->flags = clk_mux_flags;
mux->lock = lock;
+ mux->hw.init = &init;
+
+ clk = clk_register(dev, &mux->hw);
+
+ if (IS_ERR(clk))
+ kfree(mux);
- return clk_register(dev, name, &clk_mux_ops, &mux->hw,
- parent_names, num_parents, flags);
+ return clk;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 9cf6f59e3e19..687b00d67c8a 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -194,9 +194,8 @@ static int __init clk_debug_init(void)
late_initcall(clk_debug_init);
#else
static inline int clk_debug_register(struct clk *clk) { return 0; }
-#endif /* CONFIG_COMMON_CLK_DEBUG */
+#endif
-#ifdef CONFIG_COMMON_CLK_DISABLE_UNUSED
/* caller must hold prepare_lock */
static void clk_disable_unused_subtree(struct clk *clk)
{
@@ -246,9 +245,6 @@ static int clk_disable_unused(void)
return 0;
}
late_initcall(clk_disable_unused);
-#else
-static inline int clk_disable_unused(struct clk *clk) { return 0; }
-#endif /* CONFIG_COMMON_CLK_DISABLE_UNUSED */
/*** helper functions ***/
@@ -287,7 +283,7 @@ unsigned long __clk_get_rate(struct clk *clk)
unsigned long ret;
if (!clk) {
- ret = -EINVAL;
+ ret = 0;
goto out;
}
@@ -297,7 +293,7 @@ unsigned long __clk_get_rate(struct clk *clk)
goto out;
if (!clk->parent)
- ret = -ENODEV;
+ ret = 0;
out:
return ret;
@@ -562,7 +558,7 @@ EXPORT_SYMBOL_GPL(clk_enable);
* @clk: the clk whose rate is being returned
*
* Simply returns the cached rate of the clk. Does not query the hardware. If
- * clk is NULL then returns -EINVAL.
+ * clk is NULL then returns 0.
*/
unsigned long clk_get_rate(struct clk *clk)
{
@@ -584,18 +580,22 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
*/
unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
{
- unsigned long unused;
+ unsigned long parent_rate = 0;
if (!clk)
return -EINVAL;
- if (!clk->ops->round_rate)
- return clk->rate;
+ if (!clk->ops->round_rate) {
+ if (clk->flags & CLK_SET_RATE_PARENT)
+ return __clk_round_rate(clk->parent, rate);
+ else
+ return clk->rate;
+ }
- if (clk->flags & CLK_SET_RATE_PARENT)
- return clk->ops->round_rate(clk->hw, rate, &unused);
- else
- return clk->ops->round_rate(clk->hw, rate, NULL);
+ if (clk->parent)
+ parent_rate = clk->parent->rate;
+
+ return clk->ops->round_rate(clk->hw, rate, &parent_rate);
}
/**
@@ -765,25 +765,41 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
{
struct clk *top = clk;
- unsigned long best_parent_rate = clk->parent->rate;
+ unsigned long best_parent_rate = 0;
unsigned long new_rate;
- if (!clk->ops->round_rate && !(clk->flags & CLK_SET_RATE_PARENT)) {
- clk->new_rate = clk->rate;
+ /* sanity */
+ if (IS_ERR_OR_NULL(clk))
+ return NULL;
+
+ /* save parent rate, if it exists */
+ if (clk->parent)
+ best_parent_rate = clk->parent->rate;
+
+ /* never propagate up to the parent */
+ if (!(clk->flags & CLK_SET_RATE_PARENT)) {
+ if (!clk->ops->round_rate) {
+ clk->new_rate = clk->rate;
+ return NULL;
+ }
+ new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
+ goto out;
+ }
+
+ /* need clk->parent from here on out */
+ if (!clk->parent) {
+ pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
return NULL;
}
- if (!clk->ops->round_rate && (clk->flags & CLK_SET_RATE_PARENT)) {
+ if (!clk->ops->round_rate) {
top = clk_calc_new_rates(clk->parent, rate);
- new_rate = clk->new_rate = clk->parent->new_rate;
+ new_rate = clk->parent->new_rate;
goto out;
}
- if (clk->flags & CLK_SET_RATE_PARENT)
- new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
- else
- new_rate = clk->ops->round_rate(clk->hw, rate, NULL);
+ new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
if (best_parent_rate != clk->parent->rate) {
top = clk_calc_new_rates(clk->parent, best_parent_rate);
@@ -839,7 +855,7 @@ static void clk_change_rate(struct clk *clk)
old_rate = clk->rate;
if (clk->ops->set_rate)
- clk->ops->set_rate(clk->hw, clk->new_rate);
+ clk->ops->set_rate(clk->hw, clk->new_rate, clk->parent->rate);
if (clk->ops->recalc_rate)
clk->rate = clk->ops->recalc_rate(clk->hw,
@@ -859,38 +875,19 @@ static void clk_change_rate(struct clk *clk)
* @clk: the clk whose rate is being changed
* @rate: the new rate for clk
*
- * In the simplest case clk_set_rate will only change the rate of clk.
- *
- * If clk has the CLK_SET_RATE_GATE flag set and it is enabled this call
- * will fail; only when the clk is disabled will it be able to change
- * its rate.
+ * In the simplest case clk_set_rate will only adjust the rate of clk.
*
- * Setting the CLK_SET_RATE_PARENT flag allows clk_set_rate to
- * recursively propagate up to clk's parent; whether or not this happens
- * depends on the outcome of clk's .round_rate implementation. If
- * *parent_rate is 0 after calling .round_rate then upstream parent
- * propagation is ignored. If *parent_rate comes back with a new rate
- * for clk's parent then we propagate up to clk's parent and set it's
- * rate. Upward propagation will continue until either a clk does not
- * support the CLK_SET_RATE_PARENT flag or .round_rate stops requesting
- * changes to clk's parent_rate. If there is a failure during upstream
- * propagation then clk_set_rate will unwind and restore each clk's rate
- * that had been successfully changed. Afterwards a rate change abort
- * notification will be propagated downstream, starting from the clk
- * that failed.
+ * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
+ * propagate up to clk's parent; whether or not this happens depends on the
+ * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
+ * after calling .round_rate then upstream parent propagation is ignored. If
+ * *parent_rate comes back with a new rate for clk's parent then we propagate
+ * up to clk's parent and set it's rate. Upward propagation will continue
+ * until either a clk does not support the CLK_SET_RATE_PARENT flag or
+ * .round_rate stops requesting changes to clk's parent_rate.
*
- * At the end of all of the rate setting, clk_set_rate internally calls
- * __clk_recalc_rates and propagates the rate changes downstream,
- * starting from the highest clk whose rate was changed. This has the
- * added benefit of propagating post-rate change notifiers.
- *
- * Note that while post-rate change and rate change abort notifications
- * are guaranteed to be sent to a clk only once per call to
- * clk_set_rate, pre-change notifications will be sent for every clk
- * whose rate is changed. Stacking pre-change notifications is noisy
- * for the drivers subscribed to them, but this allows drivers to react
- * to intermediate clk rate changes up until the point where the final
- * rate is achieved at the end of upstream propagation.
+ * Rate changes are accomplished via tree traversal that also recalculates the
+ * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
*
* Returns 0 on success, -EERROR otherwise.
*/
@@ -906,6 +903,11 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
if (rate == clk->rate)
goto out;
+ if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
+ ret = -EBUSY;
+ goto out;
+ }
+
/* calculate new rates and get the topmost changed clock */
top = clk_calc_new_rates(clk, rate);
if (!top) {
@@ -1175,40 +1177,41 @@ EXPORT_SYMBOL_GPL(clk_set_parent);
*
* Initializes the lists in struct clk, queries the hardware for the
* parent and rate and sets them both.
- *
- * Any struct clk passed into __clk_init must have the following members
- * populated:
- * .name
- * .ops
- * .hw
- * .parent_names
- * .num_parents
- * .flags
- *
- * Essentially, everything that would normally be passed into clk_register is
- * assumed to be initialized already in __clk_init. The other members may be
- * populated, but are optional.
- *
- * __clk_init is only exposed via clk-private.h and is intended for use with
- * very large numbers of clocks that need to be statically initialized. It is
- * a layering violation to include clk-private.h from any code which implements
- * a clock's .ops; as such any statically initialized clock data MUST be in a
- * separate C file from the logic that implements it's operations.
*/
-void __clk_init(struct device *dev, struct clk *clk)
+int __clk_init(struct device *dev, struct clk *clk)
{
- int i;
+ int i, ret = 0;
struct clk *orphan;
struct hlist_node *tmp, *tmp2;
if (!clk)
- return;
+ return -EINVAL;
mutex_lock(&prepare_lock);
/* check to see if a clock with this name is already registered */
- if (__clk_lookup(clk->name))
+ if (__clk_lookup(clk->name)) {
+ pr_debug("%s: clk %s already initialized\n",
+ __func__, clk->name);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ /* check that clk_ops are sane. See Documentation/clk.txt */
+ if (clk->ops->set_rate &&
+ !(clk->ops->round_rate && clk->ops->recalc_rate)) {
+ pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
+ __func__, clk->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (clk->ops->set_parent && !clk->ops->get_parent) {
+ pr_warning("%s: %s must implement .get_parent & .set_parent\n",
+ __func__, clk->name);
+ ret = -EINVAL;
goto out;
+ }
/* throw a WARN if any entries in parent_names are NULL */
for (i = 0; i < clk->num_parents; i++)
@@ -1302,48 +1305,130 @@ void __clk_init(struct device *dev, struct clk *clk)
out:
mutex_unlock(&prepare_lock);
- return;
+ return ret;
}
/**
+ * __clk_register - register a clock and return a cookie.
+ *
+ * Same as clk_register, except that the .clk field inside hw shall point to a
+ * preallocated (generally statically allocated) struct clk. None of the fields
+ * of the struct clk need to be initialized.
+ *
+ * The data pointed to by .init and .clk field shall NOT be marked as init
+ * data.
+ *
+ * __clk_register is only exposed via clk-private.h and is intended for use with
+ * very large numbers of clocks that need to be statically initialized. It is
+ * a layering violation to include clk-private.h from any code which implements
+ * a clock's .ops; as such any statically initialized clock data MUST be in a
+ * separate C file from the logic that implements it's operations. Returns 0
+ * on success, otherwise an error code.
+ */
+struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
+{
+ int ret;
+ struct clk *clk;
+
+ clk = hw->clk;
+ clk->name = hw->init->name;
+ clk->ops = hw->init->ops;
+ clk->hw = hw;
+ clk->flags = hw->init->flags;
+ clk->parent_names = hw->init->parent_names;
+ clk->num_parents = hw->init->num_parents;
+
+ ret = __clk_init(dev, clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk;
+}
+EXPORT_SYMBOL_GPL(__clk_register);
+
+/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
- * @name: clock name
- * @ops: operations this clock supports
* @hw: link to hardware-specific clock data
- * @parent_names: array of string names for all possible parents
- * @num_parents: number of possible parents
- * @flags: framework-level hints and quirks
*
* clk_register is the primary interface for populating the clock tree with new
* clock nodes. It returns a pointer to the newly allocated struct clk which
* cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API.
+ * rest of the clock API. In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
*/
-struct clk *clk_register(struct device *dev, const char *name,
- const struct clk_ops *ops, struct clk_hw *hw,
- char **parent_names, u8 num_parents, unsigned long flags)
+struct clk *clk_register(struct device *dev, struct clk_hw *hw)
{
+ int i, ret;
struct clk *clk;
clk = kzalloc(sizeof(*clk), GFP_KERNEL);
- if (!clk)
- return NULL;
+ if (!clk) {
+ pr_err("%s: could not allocate clk\n", __func__);
+ ret = -ENOMEM;
+ goto fail_out;
+ }
- clk->name = name;
- clk->ops = ops;
+ clk->name = kstrdup(hw->init->name, GFP_KERNEL);
+ if (!clk->name) {
+ pr_err("%s: could not allocate clk->name\n", __func__);
+ ret = -ENOMEM;
+ goto fail_name;
+ }
+ clk->ops = hw->init->ops;
clk->hw = hw;
- clk->flags = flags;
- clk->parent_names = parent_names;
- clk->num_parents = num_parents;
+ clk->flags = hw->init->flags;
+ clk->num_parents = hw->init->num_parents;
hw->clk = clk;
- __clk_init(dev, clk);
+ /* allocate local copy in case parent_names is __initdata */
+ clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
+ GFP_KERNEL);
- return clk;
+ if (!clk->parent_names) {
+ pr_err("%s: could not allocate clk->parent_names\n", __func__);
+ ret = -ENOMEM;
+ goto fail_parent_names;
+ }
+
+
+ /* copy each string name in case parent_names is __initdata */
+ for (i = 0; i < clk->num_parents; i++) {
+ clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
+ GFP_KERNEL);
+ if (!clk->parent_names[i]) {
+ pr_err("%s: could not copy parent_names\n", __func__);
+ ret = -ENOMEM;
+ goto fail_parent_names_copy;
+ }
+ }
+
+ ret = __clk_init(dev, clk);
+ if (!ret)
+ return clk;
+
+fail_parent_names_copy:
+ while (--i >= 0)
+ kfree(clk->parent_names[i]);
+ kfree(clk->parent_names);
+fail_parent_names:
+ kfree(clk->name);
+fail_name:
+ kfree(clk);
+fail_out:
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(clk_register);
+/**
+ * clk_unregister - unregister a currently registered clock
+ * @clk: clock to unregister
+ *
+ * Currently unimplemented.
+ */
+void clk_unregister(struct clk *clk) {}
+EXPORT_SYMBOL_GPL(clk_unregister);
+
/*** clk rate change notifiers ***/
/**
diff --git a/drivers/clk/mxs/Makefile b/drivers/clk/mxs/Makefile
new file mode 100644
index 000000000000..7bedeec08524
--- /dev/null
+++ b/drivers/clk/mxs/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for mxs specific clk
+#
+
+obj-y += clk.o clk-pll.o clk-ref.o clk-div.o clk-frac.o
+
+obj-$(CONFIG_SOC_IMX23) += clk-imx23.o
+obj-$(CONFIG_SOC_IMX28) += clk-imx28.o
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
new file mode 100644
index 000000000000..90e1da93877e
--- /dev/null
+++ b/drivers/clk/mxs/clk-div.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_div - mxs integer divider clock
+ * @divider: the parent class
+ * @ops: pointer to clk_ops of parent class
+ * @reg: register address
+ * @busy: busy bit shift
+ *
+ * The mxs divider clock is a subclass of basic clk_divider with an
+ * addtional busy bit.
+ */
+struct clk_div {
+ struct clk_divider divider;
+ const struct clk_ops *ops;
+ void __iomem *reg;
+ u8 busy;
+};
+
+static inline struct clk_div *to_clk_div(struct clk_hw *hw)
+{
+ struct clk_divider *divider = container_of(hw, struct clk_divider, hw);
+
+ return container_of(divider, struct clk_div, divider);
+}
+
+static unsigned long clk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_div *div = to_clk_div(hw);
+
+ return div->ops->recalc_rate(&div->divider.hw, parent_rate);
+}
+
+static long clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_div *div = to_clk_div(hw);
+
+ return div->ops->round_rate(&div->divider.hw, rate, prate);
+}
+
+static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_div *div = to_clk_div(hw);
+ int ret;
+
+ ret = div->ops->set_rate(&div->divider.hw, rate, parent_rate);
+ if (!ret)
+ ret = mxs_clk_wait(div->reg, div->busy);
+
+ return ret;
+}
+
+static struct clk_ops clk_div_ops = {
+ .recalc_rate = clk_div_recalc_rate,
+ .round_rate = clk_div_round_rate,
+ .set_rate = clk_div_set_rate,
+};
+
+struct clk *mxs_clk_div(const char *name, const char *parent_name,
+ void __iomem *reg, u8 shift, u8 width, u8 busy)
+{
+ struct clk_div *div;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_div_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ div->reg = reg;
+ div->busy = busy;
+
+ div->divider.reg = reg;
+ div->divider.shift = shift;
+ div->divider.width = width;
+ div->divider.flags = CLK_DIVIDER_ONE_BASED;
+ div->divider.lock = &mxs_lock;
+ div->divider.hw.init = &init;
+ div->ops = &clk_divider_ops;
+
+ clk = clk_register(NULL, &div->divider.hw);
+ if (IS_ERR(clk))
+ kfree(div);
+
+ return clk;
+}
diff --git a/drivers/clk/mxs/clk-frac.c b/drivers/clk/mxs/clk-frac.c
new file mode 100644
index 000000000000..e6aa6b567d68
--- /dev/null
+++ b/drivers/clk/mxs/clk-frac.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_frac - mxs fractional divider clock
+ * @hw: clk_hw for the fractional divider clock
+ * @reg: register address
+ * @shift: the divider bit shift
+ * @width: the divider bit width
+ * @busy: busy bit shift
+ *
+ * The clock is an adjustable fractional divider with a busy bit to wait
+ * when the divider is adjusted.
+ */
+struct clk_frac {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 shift;
+ u8 width;
+ u8 busy;
+};
+
+#define to_clk_frac(_hw) container_of(_hw, struct clk_frac, hw)
+
+static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ u32 div;
+
+ div = readl_relaxed(frac->reg) >> frac->shift;
+ div &= (1 << frac->width) - 1;
+
+ return (parent_rate >> frac->width) * div;
+}
+
+static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ unsigned long parent_rate = *prate;
+ u32 div;
+ u64 tmp;
+
+ if (rate > parent_rate)
+ return -EINVAL;
+
+ tmp = rate;
+ tmp <<= frac->width;
+ do_div(tmp, parent_rate);
+ div = tmp;
+
+ if (!div)
+ return -EINVAL;
+
+ return (parent_rate >> frac->width) * div;
+}
+
+static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ unsigned long flags;
+ u32 div, val;
+ u64 tmp;
+
+ if (rate > parent_rate)
+ return -EINVAL;
+
+ tmp = rate;
+ tmp <<= frac->width;
+ do_div(tmp, parent_rate);
+ div = tmp;
+
+ if (!div)
+ return -EINVAL;
+
+ spin_lock_irqsave(&mxs_lock, flags);
+
+ val = readl_relaxed(frac->reg);
+ val &= ~(((1 << frac->width) - 1) << frac->shift);
+ val |= div << frac->shift;
+ writel_relaxed(val, frac->reg);
+
+ spin_unlock_irqrestore(&mxs_lock, flags);
+
+ return mxs_clk_wait(frac->reg, frac->busy);
+}
+
+static struct clk_ops clk_frac_ops = {
+ .recalc_rate = clk_frac_recalc_rate,
+ .round_rate = clk_frac_round_rate,
+ .set_rate = clk_frac_set_rate,
+};
+
+struct clk *mxs_clk_frac(const char *name, const char *parent_name,
+ void __iomem *reg, u8 shift, u8 width, u8 busy)
+{
+ struct clk_frac *frac;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ frac = kzalloc(sizeof(*frac), GFP_KERNEL);
+ if (!frac)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_frac_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ frac->reg = reg;
+ frac->shift = shift;
+ frac->width = width;
+ frac->busy = busy;
+ frac->hw.init = &init;
+
+ clk = clk_register(NULL, &frac->hw);
+ if (IS_ERR(clk))
+ kfree(frac);
+
+ return clk;
+}
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
new file mode 100644
index 000000000000..f7be225f544c
--- /dev/null
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <mach/common.h>
+#include <mach/mx23.h>
+#include "clk.h"
+
+#define DIGCTRL MX23_IO_ADDRESS(MX23_DIGCTL_BASE_ADDR)
+#define CLKCTRL MX23_IO_ADDRESS(MX23_CLKCTRL_BASE_ADDR)
+#define PLLCTRL0 (CLKCTRL + 0x0000)
+#define CPU (CLKCTRL + 0x0020)
+#define HBUS (CLKCTRL + 0x0030)
+#define XBUS (CLKCTRL + 0x0040)
+#define XTAL (CLKCTRL + 0x0050)
+#define PIX (CLKCTRL + 0x0060)
+#define SSP (CLKCTRL + 0x0070)
+#define GPMI (CLKCTRL + 0x0080)
+#define SPDIF (CLKCTRL + 0x0090)
+#define EMI (CLKCTRL + 0x00a0)
+#define SAIF (CLKCTRL + 0x00c0)
+#define TV (CLKCTRL + 0x00d0)
+#define ETM (CLKCTRL + 0x00e0)
+#define FRAC (CLKCTRL + 0x00f0)
+#define CLKSEQ (CLKCTRL + 0x0110)
+
+#define BP_CPU_INTERRUPT_WAIT 12
+#define BP_CLKSEQ_BYPASS_SAIF 0
+#define BP_CLKSEQ_BYPASS_SSP 5
+#define BP_SAIF_DIV_FRAC_EN 16
+#define BP_FRAC_IOFRAC 24
+
+static void __init clk_misc_init(void)
+{
+ u32 val;
+
+ /* Gate off cpu clock in WFI for power saving */
+ __mxs_setl(1 << BP_CPU_INTERRUPT_WAIT, CPU);
+
+ /* Clear BYPASS for SAIF */
+ __mxs_clrl(1 << BP_CLKSEQ_BYPASS_SAIF, CLKSEQ);
+
+ /* SAIF has to use frac div for functional operation */
+ val = readl_relaxed(SAIF);
+ val |= 1 << BP_SAIF_DIV_FRAC_EN;
+ writel_relaxed(val, SAIF);
+
+ /*
+ * Source ssp clock from ref_io than ref_xtal,
+ * as ref_xtal only provides 24 MHz as maximum.
+ */
+ __mxs_clrl(1 << BP_CLKSEQ_BYPASS_SSP, CLKSEQ);
+
+ /*
+ * 480 MHz seems too high to be ssp clock source directly,
+ * so set frac to get a 288 MHz ref_io.
+ */
+ __mxs_clrl(0x3f << BP_FRAC_IOFRAC, FRAC);
+ __mxs_setl(30 << BP_FRAC_IOFRAC, FRAC);
+}
+
+static struct clk_lookup uart_lookups[] __initdata = {
+ { .dev_id = "duart", },
+ { .dev_id = "mxs-auart.0", },
+ { .dev_id = "mxs-auart.1", },
+ { .dev_id = "8006c000.serial", },
+ { .dev_id = "8006e000.serial", },
+ { .dev_id = "80070000.serial", },
+};
+
+static struct clk_lookup hbus_lookups[] __initdata = {
+ { .dev_id = "imx23-dma-apbh", },
+ { .dev_id = "80004000.dma-apbh", },
+};
+
+static struct clk_lookup xbus_lookups[] __initdata = {
+ { .dev_id = "duart", .con_id = "apb_pclk"},
+ { .dev_id = "80070000.serial", .con_id = "apb_pclk"},
+ { .dev_id = "imx23-dma-apbx", },
+ { .dev_id = "80024000.dma-apbx", },
+};
+
+static struct clk_lookup ssp_lookups[] __initdata = {
+ { .dev_id = "imx23-mmc.0", },
+ { .dev_id = "imx23-mmc.1", },
+ { .dev_id = "80010000.ssp", },
+ { .dev_id = "80034000.ssp", },
+};
+
+static struct clk_lookup lcdif_lookups[] __initdata = {
+ { .dev_id = "imx23-fb", },
+ { .dev_id = "80030000.lcdif", },
+};
+
+static struct clk_lookup gpmi_lookups[] __initdata = {
+ { .dev_id = "imx23-gpmi-nand", },
+ { .dev_id = "8000c000.gpmi", },
+};
+
+static const char *sel_pll[] __initconst = { "pll", "ref_xtal", };
+static const char *sel_cpu[] __initconst = { "ref_cpu", "ref_xtal", };
+static const char *sel_pix[] __initconst = { "ref_pix", "ref_xtal", };
+static const char *sel_io[] __initconst = { "ref_io", "ref_xtal", };
+static const char *cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", };
+static const char *emi_sels[] __initconst = { "emi_pll", "emi_xtal", };
+
+enum imx23_clk {
+ ref_xtal, pll, ref_cpu, ref_emi, ref_pix, ref_io, saif_sel,
+ lcdif_sel, gpmi_sel, ssp_sel, emi_sel, cpu, etm_sel, cpu_pll,
+ cpu_xtal, hbus, xbus, lcdif_div, ssp_div, gpmi_div, emi_pll,
+ emi_xtal, etm_div, saif_div, clk32k_div, rtc, adc, spdif_div,
+ clk32k, dri, pwm, filt, uart, ssp, gpmi, spdif, emi, saif,
+ lcdif, etm, usb, usb_pwr,
+ clk_max
+};
+
+static struct clk *clks[clk_max];
+
+static enum imx23_clk clks_init_on[] __initdata = {
+ cpu, hbus, xbus, emi, uart,
+};
+
+int __init mx23_clocks_init(void)
+{
+ int i;
+
+ clk_misc_init();
+
+ clks[ref_xtal] = mxs_clk_fixed("ref_xtal", 24000000);
+ clks[pll] = mxs_clk_pll("pll", "ref_xtal", PLLCTRL0, 16, 480000000);
+ clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll", FRAC, 0);
+ clks[ref_emi] = mxs_clk_ref("ref_emi", "pll", FRAC, 1);
+ clks[ref_pix] = mxs_clk_ref("ref_pix", "pll", FRAC, 2);
+ clks[ref_io] = mxs_clk_ref("ref_io", "pll", FRAC, 3);
+ clks[saif_sel] = mxs_clk_mux("saif_sel", CLKSEQ, 0, 1, sel_pll, ARRAY_SIZE(sel_pll));
+ clks[lcdif_sel] = mxs_clk_mux("lcdif_sel", CLKSEQ, 1, 1, sel_pix, ARRAY_SIZE(sel_pix));
+ clks[gpmi_sel] = mxs_clk_mux("gpmi_sel", CLKSEQ, 4, 1, sel_io, ARRAY_SIZE(sel_io));
+ clks[ssp_sel] = mxs_clk_mux("ssp_sel", CLKSEQ, 5, 1, sel_io, ARRAY_SIZE(sel_io));
+ clks[emi_sel] = mxs_clk_mux("emi_sel", CLKSEQ, 6, 1, emi_sels, ARRAY_SIZE(emi_sels));
+ clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 7, 1, cpu_sels, ARRAY_SIZE(cpu_sels));
+ clks[etm_sel] = mxs_clk_mux("etm_sel", CLKSEQ, 8, 1, sel_cpu, ARRAY_SIZE(sel_cpu));
+ clks[cpu_pll] = mxs_clk_div("cpu_pll", "ref_cpu", CPU, 0, 6, 28);
+ clks[cpu_xtal] = mxs_clk_div("cpu_xtal", "ref_xtal", CPU, 16, 10, 29);
+ clks[hbus] = mxs_clk_div("hbus", "cpu", HBUS, 0, 5, 29);
+ clks[xbus] = mxs_clk_div("xbus", "ref_xtal", XBUS, 0, 10, 31);
+ clks[lcdif_div] = mxs_clk_div("lcdif_div", "lcdif_sel", PIX, 0, 12, 29);
+ clks[ssp_div] = mxs_clk_div("ssp_div", "ssp_sel", SSP, 0, 9, 29);
+ clks[gpmi_div] = mxs_clk_div("gpmi_div", "gpmi_sel", GPMI, 0, 10, 29);
+ clks[emi_pll] = mxs_clk_div("emi_pll", "ref_emi", EMI, 0, 6, 28);
+ clks[emi_xtal] = mxs_clk_div("emi_xtal", "ref_xtal", EMI, 8, 4, 29);
+ clks[etm_div] = mxs_clk_div("etm_div", "etm_sel", ETM, 0, 6, 29);
+ clks[saif_div] = mxs_clk_frac("saif_div", "saif_sel", SAIF, 0, 16, 29);
+ clks[clk32k_div] = mxs_clk_fixed_factor("clk32k_div", "ref_xtal", 1, 750);
+ clks[rtc] = mxs_clk_fixed_factor("rtc", "ref_xtal", 1, 768);
+ clks[adc] = mxs_clk_fixed_factor("adc", "clk32k", 1, 16);
+ clks[spdif_div] = mxs_clk_fixed_factor("spdif_div", "pll", 1, 4);
+ clks[clk32k] = mxs_clk_gate("clk32k", "clk32k_div", XTAL, 26);
+ clks[dri] = mxs_clk_gate("dri", "ref_xtal", XTAL, 28);
+ clks[pwm] = mxs_clk_gate("pwm", "ref_xtal", XTAL, 29);
+ clks[filt] = mxs_clk_gate("filt", "ref_xtal", XTAL, 30);
+ clks[uart] = mxs_clk_gate("uart", "ref_xtal", XTAL, 31);
+ clks[ssp] = mxs_clk_gate("ssp", "ssp_div", SSP, 31);
+ clks[gpmi] = mxs_clk_gate("gpmi", "gpmi_div", GPMI, 31);
+ clks[spdif] = mxs_clk_gate("spdif", "spdif_div", SPDIF, 31);
+ clks[emi] = mxs_clk_gate("emi", "emi_sel", EMI, 31);
+ clks[saif] = mxs_clk_gate("saif", "saif_div", SAIF, 31);
+ clks[lcdif] = mxs_clk_gate("lcdif", "lcdif_div", PIX, 31);
+ clks[etm] = mxs_clk_gate("etm", "etm_div", ETM, 31);
+ clks[usb] = mxs_clk_gate("usb", "usb_pwr", DIGCTRL, 2);
+ clks[usb_pwr] = clk_register_gate(NULL, "usb_pwr", "pll", 0, PLLCTRL0, 18, 0, &mxs_lock);
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ if (IS_ERR(clks[i])) {
+ pr_err("i.MX23 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ return PTR_ERR(clks[i]);
+ }
+
+ clk_register_clkdev(clks[clk32k], NULL, "timrot");
+ clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups));
+ clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups));
+ clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups));
+ clk_register_clkdevs(clks[ssp], ssp_lookups, ARRAY_SIZE(ssp_lookups));
+ clk_register_clkdevs(clks[gpmi], gpmi_lookups, ARRAY_SIZE(gpmi_lookups));
+ clk_register_clkdevs(clks[lcdif], lcdif_lookups, ARRAY_SIZE(lcdif_lookups));
+
+ for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+ clk_prepare_enable(clks[clks_init_on[i]]);
+
+ mxs_timer_init(MX23_INT_TIMER0);
+
+ return 0;
+}
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
new file mode 100644
index 000000000000..2826a2606a29
--- /dev/null
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <mach/common.h>
+#include <mach/mx28.h>
+#include "clk.h"
+
+#define CLKCTRL MX28_IO_ADDRESS(MX28_CLKCTRL_BASE_ADDR)
+#define PLL0CTRL0 (CLKCTRL + 0x0000)
+#define PLL1CTRL0 (CLKCTRL + 0x0020)
+#define PLL2CTRL0 (CLKCTRL + 0x0040)
+#define CPU (CLKCTRL + 0x0050)
+#define HBUS (CLKCTRL + 0x0060)
+#define XBUS (CLKCTRL + 0x0070)
+#define XTAL (CLKCTRL + 0x0080)
+#define SSP0 (CLKCTRL + 0x0090)
+#define SSP1 (CLKCTRL + 0x00a0)
+#define SSP2 (CLKCTRL + 0x00b0)
+#define SSP3 (CLKCTRL + 0x00c0)
+#define GPMI (CLKCTRL + 0x00d0)
+#define SPDIF (CLKCTRL + 0x00e0)
+#define EMI (CLKCTRL + 0x00f0)
+#define SAIF0 (CLKCTRL + 0x0100)
+#define SAIF1 (CLKCTRL + 0x0110)
+#define LCDIF (CLKCTRL + 0x0120)
+#define ETM (CLKCTRL + 0x0130)
+#define ENET (CLKCTRL + 0x0140)
+#define FLEXCAN (CLKCTRL + 0x0160)
+#define FRAC0 (CLKCTRL + 0x01b0)
+#define FRAC1 (CLKCTRL + 0x01c0)
+#define CLKSEQ (CLKCTRL + 0x01d0)
+
+#define BP_CPU_INTERRUPT_WAIT 12
+#define BP_SAIF_DIV_FRAC_EN 16
+#define BP_ENET_DIV_TIME 21
+#define BP_ENET_SLEEP 31
+#define BP_CLKSEQ_BYPASS_SAIF0 0
+#define BP_CLKSEQ_BYPASS_SSP0 3
+#define BP_FRAC0_IO1FRAC 16
+#define BP_FRAC0_IO0FRAC 24
+
+#define DIGCTRL MX28_IO_ADDRESS(MX28_DIGCTL_BASE_ADDR)
+#define BP_SAIF_CLKMUX 10
+
+/*
+ * HW_SAIF_CLKMUX_SEL:
+ * DIRECT(0x0): SAIF0 clock pins selected for SAIF0 input clocks, and SAIF1
+ * clock pins selected for SAIF1 input clocks.
+ * CROSSINPUT(0x1): SAIF1 clock inputs selected for SAIF0 input clocks, and
+ * SAIF0 clock inputs selected for SAIF1 input clocks.
+ * EXTMSTR0(0x2): SAIF0 clock pin selected for both SAIF0 and SAIF1 input
+ * clocks.
+ * EXTMSTR1(0x3): SAIF1 clock pin selected for both SAIF0 and SAIF1 input
+ * clocks.
+ */
+int mxs_saif_clkmux_select(unsigned int clkmux)
+{
+ if (clkmux > 0x3)
+ return -EINVAL;
+
+ __mxs_clrl(0x3 << BP_SAIF_CLKMUX, DIGCTRL);
+ __mxs_setl(clkmux << BP_SAIF_CLKMUX, DIGCTRL);
+
+ return 0;
+}
+
+static void __init clk_misc_init(void)
+{
+ u32 val;
+
+ /* Gate off cpu clock in WFI for power saving */
+ __mxs_setl(1 << BP_CPU_INTERRUPT_WAIT, CPU);
+
+ /* 0 is a bad default value for a divider */
+ __mxs_setl(1 << BP_ENET_DIV_TIME, ENET);
+
+ /* Clear BYPASS for SAIF */
+ __mxs_clrl(0x3 << BP_CLKSEQ_BYPASS_SAIF0, CLKSEQ);
+
+ /* SAIF has to use frac div for functional operation */
+ val = readl_relaxed(SAIF0);
+ val |= 1 << BP_SAIF_DIV_FRAC_EN;
+ writel_relaxed(val, SAIF0);
+
+ val = readl_relaxed(SAIF1);
+ val |= 1 << BP_SAIF_DIV_FRAC_EN;
+ writel_relaxed(val, SAIF1);
+
+ /* Extra fec clock setting */
+ val = readl_relaxed(ENET);
+ val &= ~(1 << BP_ENET_SLEEP);
+ writel_relaxed(val, ENET);
+
+ /*
+ * Source ssp clock from ref_io than ref_xtal,
+ * as ref_xtal only provides 24 MHz as maximum.
+ */
+ __mxs_clrl(0xf << BP_CLKSEQ_BYPASS_SSP0, CLKSEQ);
+
+ /*
+ * 480 MHz seems too high to be ssp clock source directly,
+ * so set frac0 to get a 288 MHz ref_io0.
+ */
+ val = readl_relaxed(FRAC0);
+ val &= ~(0x3f << BP_FRAC0_IO0FRAC);
+ val |= 30 << BP_FRAC0_IO0FRAC;
+ writel_relaxed(val, FRAC0);
+}
+
+static struct clk_lookup uart_lookups[] __initdata = {
+ { .dev_id = "duart", },
+ { .dev_id = "mxs-auart.0", },
+ { .dev_id = "mxs-auart.1", },
+ { .dev_id = "mxs-auart.2", },
+ { .dev_id = "mxs-auart.3", },
+ { .dev_id = "mxs-auart.4", },
+ { .dev_id = "8006a000.serial", },
+ { .dev_id = "8006c000.serial", },
+ { .dev_id = "8006e000.serial", },
+ { .dev_id = "80070000.serial", },
+ { .dev_id = "80072000.serial", },
+ { .dev_id = "80074000.serial", },
+};
+
+static struct clk_lookup hbus_lookups[] __initdata = {
+ { .dev_id = "imx28-dma-apbh", },
+ { .dev_id = "80004000.dma-apbh", },
+};
+
+static struct clk_lookup xbus_lookups[] __initdata = {
+ { .dev_id = "duart", .con_id = "apb_pclk"},
+ { .dev_id = "80074000.serial", .con_id = "apb_pclk"},
+ { .dev_id = "imx28-dma-apbx", },
+ { .dev_id = "80024000.dma-apbx", },
+};
+
+static struct clk_lookup ssp0_lookups[] __initdata = {
+ { .dev_id = "imx28-mmc.0", },
+ { .dev_id = "80010000.ssp", },
+};
+
+static struct clk_lookup ssp1_lookups[] __initdata = {
+ { .dev_id = "imx28-mmc.1", },
+ { .dev_id = "80012000.ssp", },
+};
+
+static struct clk_lookup ssp2_lookups[] __initdata = {
+ { .dev_id = "imx28-mmc.2", },
+ { .dev_id = "80014000.ssp", },
+};
+
+static struct clk_lookup ssp3_lookups[] __initdata = {
+ { .dev_id = "imx28-mmc.3", },
+ { .dev_id = "80016000.ssp", },
+};
+
+static struct clk_lookup lcdif_lookups[] __initdata = {
+ { .dev_id = "imx28-fb", },
+ { .dev_id = "80030000.lcdif", },
+};
+
+static struct clk_lookup gpmi_lookups[] __initdata = {
+ { .dev_id = "imx28-gpmi-nand", },
+ { .dev_id = "8000c000.gpmi", },
+};
+
+static struct clk_lookup fec_lookups[] __initdata = {
+ { .dev_id = "imx28-fec.0", },
+ { .dev_id = "imx28-fec.1", },
+ { .dev_id = "800f0000.ethernet", },
+ { .dev_id = "800f4000.ethernet", },
+};
+
+static struct clk_lookup can0_lookups[] __initdata = {
+ { .dev_id = "flexcan.0", },
+ { .dev_id = "80032000.can", },
+};
+
+static struct clk_lookup can1_lookups[] __initdata = {
+ { .dev_id = "flexcan.1", },
+ { .dev_id = "80034000.can", },
+};
+
+static struct clk_lookup saif0_lookups[] __initdata = {
+ { .dev_id = "mxs-saif.0", },
+ { .dev_id = "80042000.saif", },
+};
+
+static struct clk_lookup saif1_lookups[] __initdata = {
+ { .dev_id = "mxs-saif.1", },
+ { .dev_id = "80046000.saif", },
+};
+
+static const char *sel_cpu[] __initconst = { "ref_cpu", "ref_xtal", };
+static const char *sel_io0[] __initconst = { "ref_io0", "ref_xtal", };
+static const char *sel_io1[] __initconst = { "ref_io1", "ref_xtal", };
+static const char *sel_pix[] __initconst = { "ref_pix", "ref_xtal", };
+static const char *sel_gpmi[] __initconst = { "ref_gpmi", "ref_xtal", };
+static const char *sel_pll0[] __initconst = { "pll0", "ref_xtal", };
+static const char *cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", };
+static const char *emi_sels[] __initconst = { "emi_pll", "emi_xtal", };
+static const char *ptp_sels[] __initconst = { "ref_xtal", "pll0", };
+
+enum imx28_clk {
+ ref_xtal, pll0, pll1, pll2, ref_cpu, ref_emi, ref_io0, ref_io1,
+ ref_pix, ref_hsadc, ref_gpmi, saif0_sel, saif1_sel, gpmi_sel,
+ ssp0_sel, ssp1_sel, ssp2_sel, ssp3_sel, emi_sel, etm_sel,
+ lcdif_sel, cpu, ptp_sel, cpu_pll, cpu_xtal, hbus, xbus,
+ ssp0_div, ssp1_div, ssp2_div, ssp3_div, gpmi_div, emi_pll,
+ emi_xtal, lcdif_div, etm_div, ptp, saif0_div, saif1_div,
+ clk32k_div, rtc, lradc, spdif_div, clk32k, pwm, uart, ssp0,
+ ssp1, ssp2, ssp3, gpmi, spdif, emi, saif0, saif1, lcdif, etm,
+ fec, can0, can1, usb0, usb1, usb0_pwr, usb1_pwr, enet_out,
+ clk_max
+};
+
+static struct clk *clks[clk_max];
+
+static enum imx28_clk clks_init_on[] __initdata = {
+ cpu, hbus, xbus, emi, uart,
+};
+
+int __init mx28_clocks_init(void)
+{
+ int i;
+
+ clk_misc_init();
+
+ clks[ref_xtal] = mxs_clk_fixed("ref_xtal", 24000000);
+ clks[pll0] = mxs_clk_pll("pll0", "ref_xtal", PLL0CTRL0, 17, 480000000);
+ clks[pll1] = mxs_clk_pll("pll1", "ref_xtal", PLL1CTRL0, 17, 480000000);
+ clks[pll2] = mxs_clk_pll("pll2", "ref_xtal", PLL2CTRL0, 23, 50000000);
+ clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll0", FRAC0, 0);
+ clks[ref_emi] = mxs_clk_ref("ref_emi", "pll0", FRAC0, 1);
+ clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 2);
+ clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 3);
+ clks[ref_pix] = mxs_clk_ref("ref_pix", "pll0", FRAC1, 0);
+ clks[ref_hsadc] = mxs_clk_ref("ref_hsadc", "pll0", FRAC1, 1);
+ clks[ref_gpmi] = mxs_clk_ref("ref_gpmi", "pll0", FRAC1, 2);
+ clks[saif0_sel] = mxs_clk_mux("saif0_sel", CLKSEQ, 0, 1, sel_pll0, ARRAY_SIZE(sel_pll0));
+ clks[saif1_sel] = mxs_clk_mux("saif1_sel", CLKSEQ, 1, 1, sel_pll0, ARRAY_SIZE(sel_pll0));
+ clks[gpmi_sel] = mxs_clk_mux("gpmi_sel", CLKSEQ, 2, 1, sel_gpmi, ARRAY_SIZE(sel_gpmi));
+ clks[ssp0_sel] = mxs_clk_mux("ssp0_sel", CLKSEQ, 3, 1, sel_io0, ARRAY_SIZE(sel_io0));
+ clks[ssp1_sel] = mxs_clk_mux("ssp1_sel", CLKSEQ, 4, 1, sel_io0, ARRAY_SIZE(sel_io0));
+ clks[ssp2_sel] = mxs_clk_mux("ssp2_sel", CLKSEQ, 5, 1, sel_io1, ARRAY_SIZE(sel_io1));
+ clks[ssp3_sel] = mxs_clk_mux("ssp3_sel", CLKSEQ, 6, 1, sel_io1, ARRAY_SIZE(sel_io1));
+ clks[emi_sel] = mxs_clk_mux("emi_sel", CLKSEQ, 7, 1, emi_sels, ARRAY_SIZE(emi_sels));
+ clks[etm_sel] = mxs_clk_mux("etm_sel", CLKSEQ, 8, 1, sel_cpu, ARRAY_SIZE(sel_cpu));
+ clks[lcdif_sel] = mxs_clk_mux("lcdif_sel", CLKSEQ, 14, 1, sel_pix, ARRAY_SIZE(sel_pix));
+ clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 18, 1, cpu_sels, ARRAY_SIZE(cpu_sels));
+ clks[ptp_sel] = mxs_clk_mux("ptp_sel", ENET, 19, 1, ptp_sels, ARRAY_SIZE(ptp_sels));
+ clks[cpu_pll] = mxs_clk_div("cpu_pll", "ref_cpu", CPU, 0, 6, 28);
+ clks[cpu_xtal] = mxs_clk_div("cpu_xtal", "ref_xtal", CPU, 16, 10, 29);
+ clks[hbus] = mxs_clk_div("hbus", "cpu", HBUS, 0, 5, 31);
+ clks[xbus] = mxs_clk_div("xbus", "ref_xtal", XBUS, 0, 10, 31);
+ clks[ssp0_div] = mxs_clk_div("ssp0_div", "ssp0_sel", SSP0, 0, 9, 29);
+ clks[ssp1_div] = mxs_clk_div("ssp1_div", "ssp1_sel", SSP1, 0, 9, 29);
+ clks[ssp2_div] = mxs_clk_div("ssp2_div", "ssp2_sel", SSP2, 0, 9, 29);
+ clks[ssp3_div] = mxs_clk_div("ssp3_div", "ssp3_sel", SSP3, 0, 9, 29);
+ clks[gpmi_div] = mxs_clk_div("gpmi_div", "gpmi_sel", GPMI, 0, 10, 29);
+ clks[emi_pll] = mxs_clk_div("emi_pll", "ref_emi", EMI, 0, 6, 28);
+ clks[emi_xtal] = mxs_clk_div("emi_xtal", "ref_xtal", EMI, 8, 4, 29);
+ clks[lcdif_div] = mxs_clk_div("lcdif_div", "lcdif_sel", LCDIF, 0, 13, 29);
+ clks[etm_div] = mxs_clk_div("etm_div", "etm_sel", ETM, 0, 7, 29);
+ clks[ptp] = mxs_clk_div("ptp", "ptp_sel", ENET, 21, 6, 27);
+ clks[saif0_div] = mxs_clk_frac("saif0_div", "saif0_sel", SAIF0, 0, 16, 29);
+ clks[saif1_div] = mxs_clk_frac("saif1_div", "saif1_sel", SAIF1, 0, 16, 29);
+ clks[clk32k_div] = mxs_clk_fixed_factor("clk32k_div", "ref_xtal", 1, 750);
+ clks[rtc] = mxs_clk_fixed_factor("rtc", "ref_xtal", 1, 768);
+ clks[lradc] = mxs_clk_fixed_factor("lradc", "clk32k", 1, 16);
+ clks[spdif_div] = mxs_clk_fixed_factor("spdif_div", "pll0", 1, 4);
+ clks[clk32k] = mxs_clk_gate("clk32k", "clk32k_div", XTAL, 26);
+ clks[pwm] = mxs_clk_gate("pwm", "ref_xtal", XTAL, 29);
+ clks[uart] = mxs_clk_gate("uart", "ref_xtal", XTAL, 31);
+ clks[ssp0] = mxs_clk_gate("ssp0", "ssp0_div", SSP0, 31);
+ clks[ssp1] = mxs_clk_gate("ssp1", "ssp1_div", SSP1, 31);
+ clks[ssp2] = mxs_clk_gate("ssp2", "ssp2_div", SSP2, 31);
+ clks[ssp3] = mxs_clk_gate("ssp3", "ssp3_div", SSP3, 31);
+ clks[gpmi] = mxs_clk_gate("gpmi", "gpmi_div", GPMI, 31);
+ clks[spdif] = mxs_clk_gate("spdif", "spdif_div", SPDIF, 31);
+ clks[emi] = mxs_clk_gate("emi", "emi_sel", EMI, 31);
+ clks[saif0] = mxs_clk_gate("saif0", "saif0_div", SAIF0, 31);
+ clks[saif1] = mxs_clk_gate("saif1", "saif1_div", SAIF1, 31);
+ clks[lcdif] = mxs_clk_gate("lcdif", "lcdif_div", LCDIF, 31);
+ clks[etm] = mxs_clk_gate("etm", "etm_div", ETM, 31);
+ clks[fec] = mxs_clk_gate("fec", "hbus", ENET, 30);
+ clks[can0] = mxs_clk_gate("can0", "ref_xtal", FLEXCAN, 30);
+ clks[can1] = mxs_clk_gate("can1", "ref_xtal", FLEXCAN, 28);
+ clks[usb0] = mxs_clk_gate("usb0", "usb0_pwr", DIGCTRL, 2);
+ clks[usb1] = mxs_clk_gate("usb1", "usb1_pwr", DIGCTRL, 16);
+ clks[usb0_pwr] = clk_register_gate(NULL, "usb0_pwr", "pll0", 0, PLL0CTRL0, 18, 0, &mxs_lock);
+ clks[usb1_pwr] = clk_register_gate(NULL, "usb1_pwr", "pll1", 0, PLL1CTRL0, 18, 0, &mxs_lock);
+ clks[enet_out] = clk_register_gate(NULL, "enet_out", "pll2", 0, ENET, 18, 0, &mxs_lock);
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ if (IS_ERR(clks[i])) {
+ pr_err("i.MX28 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ return PTR_ERR(clks[i]);
+ }
+
+ clk_register_clkdev(clks[clk32k], NULL, "timrot");
+ clk_register_clkdev(clks[enet_out], NULL, "enet_out");
+ clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups));
+ clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups));
+ clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups));
+ clk_register_clkdevs(clks[ssp0], ssp0_lookups, ARRAY_SIZE(ssp0_lookups));
+ clk_register_clkdevs(clks[ssp1], ssp1_lookups, ARRAY_SIZE(ssp1_lookups));
+ clk_register_clkdevs(clks[ssp2], ssp2_lookups, ARRAY_SIZE(ssp2_lookups));
+ clk_register_clkdevs(clks[ssp3], ssp3_lookups, ARRAY_SIZE(ssp3_lookups));
+ clk_register_clkdevs(clks[gpmi], gpmi_lookups, ARRAY_SIZE(gpmi_lookups));
+ clk_register_clkdevs(clks[saif0], saif0_lookups, ARRAY_SIZE(saif0_lookups));
+ clk_register_clkdevs(clks[saif1], saif1_lookups, ARRAY_SIZE(saif1_lookups));
+ clk_register_clkdevs(clks[lcdif], lcdif_lookups, ARRAY_SIZE(lcdif_lookups));
+ clk_register_clkdevs(clks[fec], fec_lookups, ARRAY_SIZE(fec_lookups));
+ clk_register_clkdevs(clks[can0], can0_lookups, ARRAY_SIZE(can0_lookups));
+ clk_register_clkdevs(clks[can1], can1_lookups, ARRAY_SIZE(can1_lookups));
+
+ for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+ clk_prepare_enable(clks[clks_init_on[i]]);
+
+ mxs_timer_init(MX28_INT_TIMER0);
+
+ return 0;
+}
diff --git a/drivers/clk/mxs/clk-pll.c b/drivers/clk/mxs/clk-pll.c
new file mode 100644
index 000000000000..fadae41833ec
--- /dev/null
+++ b/drivers/clk/mxs/clk-pll.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_pll - mxs pll clock
+ * @hw: clk_hw for the pll
+ * @base: base address of the pll
+ * @power: the shift of power bit
+ * @rate: the clock rate of the pll
+ *
+ * The mxs pll is a fixed rate clock with power and gate control,
+ * and the shift of gate bit is always 31.
+ */
+struct clk_pll {
+ struct clk_hw hw;
+ void __iomem *base;
+ u8 power;
+ unsigned long rate;
+};
+
+#define to_clk_pll(_hw) container_of(_hw, struct clk_pll, hw)
+
+static int clk_pll_prepare(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ writel_relaxed(1 << pll->power, pll->base + SET);
+
+ udelay(10);
+
+ return 0;
+}
+
+static void clk_pll_unprepare(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ writel_relaxed(1 << pll->power, pll->base + CLR);
+}
+
+static int clk_pll_enable(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ writel_relaxed(1 << 31, pll->base + CLR);
+
+ return 0;
+}
+
+static void clk_pll_disable(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ writel_relaxed(1 << 31, pll->base + SET);
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ return pll->rate;
+}
+
+static const struct clk_ops clk_pll_ops = {
+ .prepare = clk_pll_prepare,
+ .unprepare = clk_pll_unprepare,
+ .enable = clk_pll_enable,
+ .disable = clk_pll_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+};
+
+struct clk *mxs_clk_pll(const char *name, const char *parent_name,
+ void __iomem *base, u8 power, unsigned long rate)
+{
+ struct clk_pll *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_pll_ops;
+ init.flags = 0;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ pll->base = base;
+ pll->rate = rate;
+ pll->power = power;
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
diff --git a/drivers/clk/mxs/clk-ref.c b/drivers/clk/mxs/clk-ref.c
new file mode 100644
index 000000000000..4adeed6c2f94
--- /dev/null
+++ b/drivers/clk/mxs/clk-ref.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_ref - mxs reference clock
+ * @hw: clk_hw for the reference clock
+ * @reg: register address
+ * @idx: the index of the reference clock within the same register
+ *
+ * The mxs reference clock sources from pll. Every 4 reference clocks share
+ * one register space, and @idx is used to identify them. Each reference
+ * clock has a gate control and a fractional * divider. The rate is calculated
+ * as pll rate * (18 / FRAC), where FRAC = 18 ~ 35.
+ */
+struct clk_ref {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 idx;
+};
+
+#define to_clk_ref(_hw) container_of(_hw, struct clk_ref, hw)
+
+static int clk_ref_enable(struct clk_hw *hw)
+{
+ struct clk_ref *ref = to_clk_ref(hw);
+
+ writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + CLR);
+
+ return 0;
+}
+
+static void clk_ref_disable(struct clk_hw *hw)
+{
+ struct clk_ref *ref = to_clk_ref(hw);
+
+ writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + SET);
+}
+
+static unsigned long clk_ref_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_ref *ref = to_clk_ref(hw);
+ u64 tmp = parent_rate;
+ u8 frac = (readl_relaxed(ref->reg) >> (ref->idx * 8)) & 0x3f;
+
+ tmp *= 18;
+ do_div(tmp, frac);
+
+ return tmp;
+}
+
+static long clk_ref_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ unsigned long parent_rate = *prate;
+ u64 tmp = parent_rate;
+ u8 frac;
+
+ tmp = tmp * 18 + rate / 2;
+ do_div(tmp, rate);
+ frac = tmp;
+
+ if (frac < 18)
+ frac = 18;
+ else if (frac > 35)
+ frac = 35;
+
+ tmp = parent_rate;
+ tmp *= 18;
+ do_div(tmp, frac);
+
+ return tmp;
+}
+
+static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_ref *ref = to_clk_ref(hw);
+ unsigned long flags;
+ u64 tmp = parent_rate;
+ u32 val;
+ u8 frac, shift = ref->idx * 8;
+
+ tmp = tmp * 18 + rate / 2;
+ do_div(tmp, rate);
+ frac = tmp;
+
+ if (frac < 18)
+ frac = 18;
+ else if (frac > 35)
+ frac = 35;
+
+ spin_lock_irqsave(&mxs_lock, flags);
+
+ val = readl_relaxed(ref->reg);
+ val &= ~(0x3f << shift);
+ val |= frac << shift;
+ writel_relaxed(val, ref->reg);
+
+ spin_unlock_irqrestore(&mxs_lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops clk_ref_ops = {
+ .enable = clk_ref_enable,
+ .disable = clk_ref_disable,
+ .recalc_rate = clk_ref_recalc_rate,
+ .round_rate = clk_ref_round_rate,
+ .set_rate = clk_ref_set_rate,
+};
+
+struct clk *mxs_clk_ref(const char *name, const char *parent_name,
+ void __iomem *reg, u8 idx)
+{
+ struct clk_ref *ref;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_ref_ops;
+ init.flags = 0;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ ref->reg = reg;
+ ref->idx = idx;
+ ref->hw.init = &init;
+
+ clk = clk_register(NULL, &ref->hw);
+ if (IS_ERR(clk))
+ kfree(ref);
+
+ return clk;
+}
diff --git a/drivers/clk/mxs/clk.c b/drivers/clk/mxs/clk.c
new file mode 100644
index 000000000000..b24d56067c80
--- /dev/null
+++ b/drivers/clk/mxs/clk.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+
+DEFINE_SPINLOCK(mxs_lock);
+
+int mxs_clk_wait(void __iomem *reg, u8 shift)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(10);
+
+ while (readl_relaxed(reg) & (1 << shift))
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
diff --git a/drivers/clk/mxs/clk.h b/drivers/clk/mxs/clk.h
new file mode 100644
index 000000000000..81421e28e69c
--- /dev/null
+++ b/drivers/clk/mxs/clk.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef __MXS_CLK_H
+#define __MXS_CLK_H
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#define SET 0x4
+#define CLR 0x8
+
+extern spinlock_t mxs_lock;
+
+int mxs_clk_wait(void __iomem *reg, u8 shift);
+
+struct clk *mxs_clk_pll(const char *name, const char *parent_name,
+ void __iomem *base, u8 power, unsigned long rate);
+
+struct clk *mxs_clk_ref(const char *name, const char *parent_name,
+ void __iomem *reg, u8 idx);
+
+struct clk *mxs_clk_div(const char *name, const char *parent_name,
+ void __iomem *reg, u8 shift, u8 width, u8 busy);
+
+struct clk *mxs_clk_frac(const char *name, const char *parent_name,
+ void __iomem *reg, u8 shift, u8 width, u8 busy);
+
+static inline struct clk *mxs_clk_fixed(const char *name, int rate)
+{
+ return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+}
+
+static inline struct clk *mxs_clk_gate(const char *name,
+ const char *parent_name, void __iomem *reg, u8 shift)
+{
+ return clk_register_gate(NULL, name, parent_name, CLK_SET_RATE_PARENT,
+ reg, shift, CLK_GATE_SET_TO_DISABLE,
+ &mxs_lock);
+}
+
+static inline struct clk *mxs_clk_mux(const char *name, void __iomem *reg,
+ u8 shift, u8 width, const char **parent_names, int num_parents)
+{
+ return clk_register_mux(NULL, name, parent_names, num_parents,
+ CLK_SET_RATE_PARENT, reg, shift, width,
+ 0, &mxs_lock);
+}
+
+static inline struct clk *mxs_clk_fixed_factor(const char *name,
+ const char *parent_name, unsigned int mult, unsigned int div)
+{
+ return clk_register_fixed_factor(NULL, name, parent_name,
+ CLK_SET_RATE_PARENT, mult, div);
+}
+
+#endif /* __MXS_CLK_H */
diff --git a/drivers/clk/spear/Makefile b/drivers/clk/spear/Makefile
new file mode 100644
index 000000000000..cdb425d3b8ee
--- /dev/null
+++ b/drivers/clk/spear/Makefile
@@ -0,0 +1,10 @@
+#
+# SPEAr Clock specific Makefile
+#
+
+obj-y += clk.o clk-aux-synth.o clk-frac-synth.o clk-gpt-synth.o clk-vco-pll.o
+
+obj-$(CONFIG_ARCH_SPEAR3XX) += spear3xx_clock.o
+obj-$(CONFIG_ARCH_SPEAR6XX) += spear6xx_clock.o
+obj-$(CONFIG_MACH_SPEAR1310) += spear1310_clock.o
+obj-$(CONFIG_MACH_SPEAR1340) += spear1340_clock.o
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
new file mode 100644
index 000000000000..af34074e702b
--- /dev/null
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Auxiliary Synthesizer clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-aux-synth: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+/*
+ * DOC: Auxiliary Synthesizer clock
+ *
+ * Aux synth gives rate for different values of eq, x and y
+ *
+ * Fout from synthesizer can be given from two equations:
+ * Fout1 = (Fin * X/Y)/2 EQ1
+ * Fout2 = Fin * X/Y EQ2
+ */
+
+#define to_clk_aux(_hw) container_of(_hw, struct clk_aux, hw)
+
+static struct aux_clk_masks default_aux_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = AUX_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = AUX_XSCALE_MASK,
+ .xscale_sel_shift = AUX_XSCALE_SHIFT,
+ .yscale_sel_mask = AUX_YSCALE_MASK,
+ .yscale_sel_shift = AUX_YSCALE_SHIFT,
+ .enable_bit = AUX_SYNT_ENB,
+};
+
+static unsigned long aux_calc_rate(struct clk_hw *hw, unsigned long prate,
+ int index)
+{
+ struct clk_aux *aux = to_clk_aux(hw);
+ struct aux_rate_tbl *rtbl = aux->rtbl;
+ u8 eq = rtbl[index].eq ? 1 : 2;
+
+ return (((prate / 10000) * rtbl[index].xscale) /
+ (rtbl[index].yscale * eq)) * 10000;
+}
+
+static long clk_aux_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate)
+{
+ struct clk_aux *aux = to_clk_aux(hw);
+ int unused;
+
+ return clk_round_rate_index(hw, drate, *prate, aux_calc_rate,
+ aux->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_aux_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_aux *aux = to_clk_aux(hw);
+ unsigned int num = 1, den = 1, val, eqn;
+ unsigned long flags = 0;
+
+ if (aux->lock)
+ spin_lock_irqsave(aux->lock, flags);
+
+ val = readl_relaxed(aux->reg);
+
+ if (aux->lock)
+ spin_unlock_irqrestore(aux->lock, flags);
+
+ eqn = (val >> aux->masks->eq_sel_shift) & aux->masks->eq_sel_mask;
+ if (eqn == aux->masks->eq1_mask)
+ den = 2;
+
+ /* calculate numerator */
+ num = (val >> aux->masks->xscale_sel_shift) &
+ aux->masks->xscale_sel_mask;
+
+ /* calculate denominator */
+ den *= (val >> aux->masks->yscale_sel_shift) &
+ aux->masks->yscale_sel_mask;
+
+ if (!den)
+ return 0;
+
+ return (((parent_rate / 10000) * num) / den) * 10000;
+}
+
+/* Configures new clock rate of aux */
+static int clk_aux_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_aux *aux = to_clk_aux(hw);
+ struct aux_rate_tbl *rtbl = aux->rtbl;
+ unsigned long val, flags = 0;
+ int i;
+
+ clk_round_rate_index(hw, drate, prate, aux_calc_rate, aux->rtbl_cnt,
+ &i);
+
+ if (aux->lock)
+ spin_lock_irqsave(aux->lock, flags);
+
+ val = readl_relaxed(aux->reg) &
+ ~(aux->masks->eq_sel_mask << aux->masks->eq_sel_shift);
+ val |= (rtbl[i].eq & aux->masks->eq_sel_mask) <<
+ aux->masks->eq_sel_shift;
+ val &= ~(aux->masks->xscale_sel_mask << aux->masks->xscale_sel_shift);
+ val |= (rtbl[i].xscale & aux->masks->xscale_sel_mask) <<
+ aux->masks->xscale_sel_shift;
+ val &= ~(aux->masks->yscale_sel_mask << aux->masks->yscale_sel_shift);
+ val |= (rtbl[i].yscale & aux->masks->yscale_sel_mask) <<
+ aux->masks->yscale_sel_shift;
+ writel_relaxed(val, aux->reg);
+
+ if (aux->lock)
+ spin_unlock_irqrestore(aux->lock, flags);
+
+ return 0;
+}
+
+static struct clk_ops clk_aux_ops = {
+ .recalc_rate = clk_aux_recalc_rate,
+ .round_rate = clk_aux_round_rate,
+ .set_rate = clk_aux_set_rate,
+};
+
+struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
+ const char *parent_name, unsigned long flags, void __iomem *reg,
+ struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
+ u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk)
+{
+ struct clk_aux *aux;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ if (!aux_name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
+ pr_err("Invalid arguments passed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ aux = kzalloc(sizeof(*aux), GFP_KERNEL);
+ if (!aux) {
+ pr_err("could not allocate aux clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* struct clk_aux assignments */
+ if (!masks)
+ aux->masks = &default_aux_masks;
+ else
+ aux->masks = masks;
+
+ aux->reg = reg;
+ aux->rtbl = rtbl;
+ aux->rtbl_cnt = rtbl_cnt;
+ aux->lock = lock;
+ aux->hw.init = &init;
+
+ init.name = aux_name;
+ init.ops = &clk_aux_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(NULL, &aux->hw);
+ if (IS_ERR_OR_NULL(clk))
+ goto free_aux;
+
+ if (gate_name) {
+ struct clk *tgate_clk;
+
+ tgate_clk = clk_register_gate(NULL, gate_name, aux_name, 0, reg,
+ aux->masks->enable_bit, 0, lock);
+ if (IS_ERR_OR_NULL(tgate_clk))
+ goto free_aux;
+
+ if (gate_clk)
+ *gate_clk = tgate_clk;
+ }
+
+ return clk;
+
+free_aux:
+ kfree(aux);
+ pr_err("clk register failed\n");
+
+ return NULL;
+}
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
new file mode 100644
index 000000000000..4dbdb3fe18e0
--- /dev/null
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Fractional Synthesizer clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-frac-synth: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+#define DIV_FACTOR_MASK 0x1FFFF
+
+/*
+ * DOC: Fractional Synthesizer clock
+ *
+ * Fout from synthesizer can be given from below equation:
+ *
+ * Fout= Fin/2*div (division factor)
+ * div is 17 bits:-
+ * 0-13 (fractional part)
+ * 14-16 (integer part)
+ * div is (16-14 bits).(13-0 bits) (in binary)
+ *
+ * Fout = Fin/(2 * div)
+ * Fout = ((Fin / 10000)/(2 * div)) * 10000
+ * Fout = (2^14 * (Fin / 10000)/(2^14 * (2 * div))) * 10000
+ * Fout = (((Fin / 10000) << 14)/(2 * (div << 14))) * 10000
+ *
+ * div << 14 simply 17 bit value written at register.
+ * Max error due to scaling down by 10000 is 10 KHz
+ */
+
+#define to_clk_frac(_hw) container_of(_hw, struct clk_frac, hw)
+
+static unsigned long frac_calc_rate(struct clk_hw *hw, unsigned long prate,
+ int index)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ struct frac_rate_tbl *rtbl = frac->rtbl;
+
+ prate /= 10000;
+ prate <<= 14;
+ prate /= (2 * rtbl[index].div);
+ prate *= 10000;
+
+ return prate;
+}
+
+static long clk_frac_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ int unused;
+
+ return clk_round_rate_index(hw, drate, *prate, frac_calc_rate,
+ frac->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ unsigned long flags = 0;
+ unsigned int div = 1, val;
+
+ if (frac->lock)
+ spin_lock_irqsave(frac->lock, flags);
+
+ val = readl_relaxed(frac->reg);
+
+ if (frac->lock)
+ spin_unlock_irqrestore(frac->lock, flags);
+
+ div = val & DIV_FACTOR_MASK;
+
+ if (!div)
+ return 0;
+
+ parent_rate = parent_rate / 10000;
+
+ parent_rate = (parent_rate << 14) / (2 * div);
+ return parent_rate * 10000;
+}
+
+/* Configures new clock rate of frac */
+static int clk_frac_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ struct frac_rate_tbl *rtbl = frac->rtbl;
+ unsigned long flags = 0, val;
+ int i;
+
+ clk_round_rate_index(hw, drate, prate, frac_calc_rate, frac->rtbl_cnt,
+ &i);
+
+ if (frac->lock)
+ spin_lock_irqsave(frac->lock, flags);
+
+ val = readl_relaxed(frac->reg) & ~DIV_FACTOR_MASK;
+ val |= rtbl[i].div & DIV_FACTOR_MASK;
+ writel_relaxed(val, frac->reg);
+
+ if (frac->lock)
+ spin_unlock_irqrestore(frac->lock, flags);
+
+ return 0;
+}
+
+struct clk_ops clk_frac_ops = {
+ .recalc_rate = clk_frac_recalc_rate,
+ .round_rate = clk_frac_round_rate,
+ .set_rate = clk_frac_set_rate,
+};
+
+struct clk *clk_register_frac(const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg,
+ struct frac_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock)
+{
+ struct clk_init_data init;
+ struct clk_frac *frac;
+ struct clk *clk;
+
+ if (!name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
+ pr_err("Invalid arguments passed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ frac = kzalloc(sizeof(*frac), GFP_KERNEL);
+ if (!frac) {
+ pr_err("could not allocate frac clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* struct clk_frac assignments */
+ frac->reg = reg;
+ frac->rtbl = rtbl;
+ frac->rtbl_cnt = rtbl_cnt;
+ frac->lock = lock;
+ frac->hw.init = &init;
+
+ init.name = name;
+ init.ops = &clk_frac_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(NULL, &frac->hw);
+ if (!IS_ERR_OR_NULL(clk))
+ return clk;
+
+ pr_err("clk register failed\n");
+ kfree(frac);
+
+ return NULL;
+}
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
new file mode 100644
index 000000000000..b471c9762a97
--- /dev/null
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * General Purpose Timer Synthesizer clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-gpt-synth: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+#define GPT_MSCALE_MASK 0xFFF
+#define GPT_NSCALE_SHIFT 12
+#define GPT_NSCALE_MASK 0xF
+
+/*
+ * DOC: General Purpose Timer Synthesizer clock
+ *
+ * Calculates gpt synth clk rate for different values of mscale and nscale
+ *
+ * Fout= Fin/((2 ^ (N+1)) * (M+1))
+ */
+
+#define to_clk_gpt(_hw) container_of(_hw, struct clk_gpt, hw)
+
+static unsigned long gpt_calc_rate(struct clk_hw *hw, unsigned long prate,
+ int index)
+{
+ struct clk_gpt *gpt = to_clk_gpt(hw);
+ struct gpt_rate_tbl *rtbl = gpt->rtbl;
+
+ prate /= ((1 << (rtbl[index].nscale + 1)) * (rtbl[index].mscale + 1));
+
+ return prate;
+}
+
+static long clk_gpt_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate)
+{
+ struct clk_gpt *gpt = to_clk_gpt(hw);
+ int unused;
+
+ return clk_round_rate_index(hw, drate, *prate, gpt_calc_rate,
+ gpt->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_gpt_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_gpt *gpt = to_clk_gpt(hw);
+ unsigned long flags = 0;
+ unsigned int div = 1, val;
+
+ if (gpt->lock)
+ spin_lock_irqsave(gpt->lock, flags);
+
+ val = readl_relaxed(gpt->reg);
+
+ if (gpt->lock)
+ spin_unlock_irqrestore(gpt->lock, flags);
+
+ div += val & GPT_MSCALE_MASK;
+ div *= 1 << (((val >> GPT_NSCALE_SHIFT) & GPT_NSCALE_MASK) + 1);
+
+ if (!div)
+ return 0;
+
+ return parent_rate / div;
+}
+
+/* Configures new clock rate of gpt */
+static int clk_gpt_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_gpt *gpt = to_clk_gpt(hw);
+ struct gpt_rate_tbl *rtbl = gpt->rtbl;
+ unsigned long flags = 0, val;
+ int i;
+
+ clk_round_rate_index(hw, drate, prate, gpt_calc_rate, gpt->rtbl_cnt,
+ &i);
+
+ if (gpt->lock)
+ spin_lock_irqsave(gpt->lock, flags);
+
+ val = readl(gpt->reg) & ~GPT_MSCALE_MASK;
+ val &= ~(GPT_NSCALE_MASK << GPT_NSCALE_SHIFT);
+
+ val |= rtbl[i].mscale & GPT_MSCALE_MASK;
+ val |= (rtbl[i].nscale & GPT_NSCALE_MASK) << GPT_NSCALE_SHIFT;
+
+ writel_relaxed(val, gpt->reg);
+
+ if (gpt->lock)
+ spin_unlock_irqrestore(gpt->lock, flags);
+
+ return 0;
+}
+
+static struct clk_ops clk_gpt_ops = {
+ .recalc_rate = clk_gpt_recalc_rate,
+ .round_rate = clk_gpt_round_rate,
+ .set_rate = clk_gpt_set_rate,
+};
+
+struct clk *clk_register_gpt(const char *name, const char *parent_name, unsigned
+ long flags, void __iomem *reg, struct gpt_rate_tbl *rtbl, u8
+ rtbl_cnt, spinlock_t *lock)
+{
+ struct clk_init_data init;
+ struct clk_gpt *gpt;
+ struct clk *clk;
+
+ if (!name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
+ pr_err("Invalid arguments passed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ gpt = kzalloc(sizeof(*gpt), GFP_KERNEL);
+ if (!gpt) {
+ pr_err("could not allocate gpt clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* struct clk_gpt assignments */
+ gpt->reg = reg;
+ gpt->rtbl = rtbl;
+ gpt->rtbl_cnt = rtbl_cnt;
+ gpt->lock = lock;
+ gpt->hw.init = &init;
+
+ init.name = name;
+ init.ops = &clk_gpt_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(NULL, &gpt->hw);
+ if (!IS_ERR_OR_NULL(clk))
+ return clk;
+
+ pr_err("clk register failed\n");
+ kfree(gpt);
+
+ return NULL;
+}
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
new file mode 100644
index 000000000000..dcd4bdf4b0d9
--- /dev/null
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * VCO-PLL clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-vco-pll: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+/*
+ * DOC: VCO-PLL clock
+ *
+ * VCO and PLL rate are derived from following equations:
+ *
+ * In normal mode
+ * vco = (2 * M[15:8] * Fin)/N
+ *
+ * In Dithered mode
+ * vco = (2 * M[15:0] * Fin)/(256 * N)
+ *
+ * pll_rate = pll/2^p
+ *
+ * vco and pll are very closely bound to each other, "vco needs to program:
+ * mode, m & n" and "pll needs to program p", both share common enable/disable
+ * logic.
+ *
+ * clk_register_vco_pll() registers instances of both vco & pll.
+ * CLK_SET_RATE_PARENT flag is forced for pll, as it will always pass its
+ * set_rate to vco. A single rate table exists for both the clocks, which
+ * configures m, n and p.
+ */
+
+/* PLL_CTR register masks */
+#define PLL_MODE_NORMAL 0
+#define PLL_MODE_FRACTION 1
+#define PLL_MODE_DITH_DSM 2
+#define PLL_MODE_DITH_SSM 3
+#define PLL_MODE_MASK 3
+#define PLL_MODE_SHIFT 3
+#define PLL_ENABLE 2
+
+#define PLL_LOCK_SHIFT 0
+#define PLL_LOCK_MASK 1
+
+/* PLL FRQ register masks */
+#define PLL_NORM_FDBK_M_MASK 0xFF
+#define PLL_NORM_FDBK_M_SHIFT 24
+#define PLL_DITH_FDBK_M_MASK 0xFFFF
+#define PLL_DITH_FDBK_M_SHIFT 16
+#define PLL_DIV_P_MASK 0x7
+#define PLL_DIV_P_SHIFT 8
+#define PLL_DIV_N_MASK 0xFF
+#define PLL_DIV_N_SHIFT 0
+
+#define to_clk_vco(_hw) container_of(_hw, struct clk_vco, hw)
+#define to_clk_pll(_hw) container_of(_hw, struct clk_pll, hw)
+
+/* Calculates pll clk rate for specific value of mode, m, n and p */
+static unsigned long pll_calc_rate(struct pll_rate_tbl *rtbl,
+ unsigned long prate, int index, unsigned long *pll_rate)
+{
+ unsigned long rate = prate;
+ unsigned int mode;
+
+ mode = rtbl[index].mode ? 256 : 1;
+ rate = (((2 * rate / 10000) * rtbl[index].m) / (mode * rtbl[index].n));
+
+ if (pll_rate)
+ *pll_rate = (rate / (1 << rtbl[index].p)) * 10000;
+
+ return rate * 10000;
+}
+
+static long clk_pll_round_rate_index(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate, int *index)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ unsigned long prev_rate, vco_prev_rate, rate = 0;
+ unsigned long vco_parent_rate =
+ __clk_get_rate(__clk_get_parent(__clk_get_parent(hw->clk)));
+
+ if (!prate) {
+ pr_err("%s: prate is must for pll clk\n", __func__);
+ return -EINVAL;
+ }
+
+ for (*index = 0; *index < pll->vco->rtbl_cnt; (*index)++) {
+ prev_rate = rate;
+ vco_prev_rate = *prate;
+ *prate = pll_calc_rate(pll->vco->rtbl, vco_parent_rate, *index,
+ &rate);
+ if (drate < rate) {
+ /* previous clock was best */
+ if (*index) {
+ rate = prev_rate;
+ *prate = vco_prev_rate;
+ (*index)--;
+ }
+ break;
+ }
+ }
+
+ return rate;
+}
+
+static long clk_pll_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate)
+{
+ int unused;
+
+ return clk_pll_round_rate_index(hw, drate, prate, &unused);
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, unsigned long
+ parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+ unsigned int p;
+
+ if (pll->vco->lock)
+ spin_lock_irqsave(pll->vco->lock, flags);
+
+ p = readl_relaxed(pll->vco->cfg_reg);
+
+ if (pll->vco->lock)
+ spin_unlock_irqrestore(pll->vco->lock, flags);
+
+ p = (p >> PLL_DIV_P_SHIFT) & PLL_DIV_P_MASK;
+
+ return parent_rate / (1 << p);
+}
+
+static int clk_pll_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ struct pll_rate_tbl *rtbl = pll->vco->rtbl;
+ unsigned long flags = 0, val;
+ int i;
+
+ clk_pll_round_rate_index(hw, drate, NULL, &i);
+
+ if (pll->vco->lock)
+ spin_lock_irqsave(pll->vco->lock, flags);
+
+ val = readl_relaxed(pll->vco->cfg_reg);
+ val &= ~(PLL_DIV_P_MASK << PLL_DIV_P_SHIFT);
+ val |= (rtbl[i].p & PLL_DIV_P_MASK) << PLL_DIV_P_SHIFT;
+ writel_relaxed(val, pll->vco->cfg_reg);
+
+ if (pll->vco->lock)
+ spin_unlock_irqrestore(pll->vco->lock, flags);
+
+ return 0;
+}
+
+static struct clk_ops clk_pll_ops = {
+ .recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_round_rate,
+ .set_rate = clk_pll_set_rate,
+};
+
+static inline unsigned long vco_calc_rate(struct clk_hw *hw,
+ unsigned long prate, int index)
+{
+ struct clk_vco *vco = to_clk_vco(hw);
+
+ return pll_calc_rate(vco->rtbl, prate, index, NULL);
+}
+
+static long clk_vco_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate)
+{
+ struct clk_vco *vco = to_clk_vco(hw);
+ int unused;
+
+ return clk_round_rate_index(hw, drate, *prate, vco_calc_rate,
+ vco->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_vco_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_vco *vco = to_clk_vco(hw);
+ unsigned long flags = 0;
+ unsigned int num = 2, den = 0, val, mode = 0;
+
+ if (vco->lock)
+ spin_lock_irqsave(vco->lock, flags);
+
+ mode = (readl_relaxed(vco->mode_reg) >> PLL_MODE_SHIFT) & PLL_MODE_MASK;
+
+ val = readl_relaxed(vco->cfg_reg);
+
+ if (vco->lock)
+ spin_unlock_irqrestore(vco->lock, flags);
+
+ den = (val >> PLL_DIV_N_SHIFT) & PLL_DIV_N_MASK;
+
+ /* calculate numerator & denominator */
+ if (!mode) {
+ /* Normal mode */
+ num *= (val >> PLL_NORM_FDBK_M_SHIFT) & PLL_NORM_FDBK_M_MASK;
+ } else {
+ /* Dithered mode */
+ num *= (val >> PLL_DITH_FDBK_M_SHIFT) & PLL_DITH_FDBK_M_MASK;
+ den *= 256;
+ }
+
+ if (!den) {
+ WARN(1, "%s: denominator can't be zero\n", __func__);
+ return 0;
+ }
+
+ return (((parent_rate / 10000) * num) / den) * 10000;
+}
+
+/* Configures new clock rate of vco */
+static int clk_vco_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_vco *vco = to_clk_vco(hw);
+ struct pll_rate_tbl *rtbl = vco->rtbl;
+ unsigned long flags = 0, val;
+ int i;
+
+ clk_round_rate_index(hw, drate, prate, vco_calc_rate, vco->rtbl_cnt,
+ &i);
+
+ if (vco->lock)
+ spin_lock_irqsave(vco->lock, flags);
+
+ val = readl_relaxed(vco->mode_reg);
+ val &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
+ val |= (rtbl[i].mode & PLL_MODE_MASK) << PLL_MODE_SHIFT;
+ writel_relaxed(val, vco->mode_reg);
+
+ val = readl_relaxed(vco->cfg_reg);
+ val &= ~(PLL_DIV_N_MASK << PLL_DIV_N_SHIFT);
+ val |= (rtbl[i].n & PLL_DIV_N_MASK) << PLL_DIV_N_SHIFT;
+
+ val &= ~(PLL_DITH_FDBK_M_MASK << PLL_DITH_FDBK_M_SHIFT);
+ if (rtbl[i].mode)
+ val |= (rtbl[i].m & PLL_DITH_FDBK_M_MASK) <<
+ PLL_DITH_FDBK_M_SHIFT;
+ else
+ val |= (rtbl[i].m & PLL_NORM_FDBK_M_MASK) <<
+ PLL_NORM_FDBK_M_SHIFT;
+
+ writel_relaxed(val, vco->cfg_reg);
+
+ if (vco->lock)
+ spin_unlock_irqrestore(vco->lock, flags);
+
+ return 0;
+}
+
+static struct clk_ops clk_vco_ops = {
+ .recalc_rate = clk_vco_recalc_rate,
+ .round_rate = clk_vco_round_rate,
+ .set_rate = clk_vco_set_rate,
+};
+
+struct clk *clk_register_vco_pll(const char *vco_name, const char *pll_name,
+ const char *vco_gate_name, const char *parent_name,
+ unsigned long flags, void __iomem *mode_reg, void __iomem
+ *cfg_reg, struct pll_rate_tbl *rtbl, u8 rtbl_cnt,
+ spinlock_t *lock, struct clk **pll_clk,
+ struct clk **vco_gate_clk)
+{
+ struct clk_vco *vco;
+ struct clk_pll *pll;
+ struct clk *vco_clk, *tpll_clk, *tvco_gate_clk;
+ struct clk_init_data vco_init, pll_init;
+ const char **vco_parent_name;
+
+ if (!vco_name || !pll_name || !parent_name || !mode_reg || !cfg_reg ||
+ !rtbl || !rtbl_cnt) {
+ pr_err("Invalid arguments passed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ vco = kzalloc(sizeof(*vco), GFP_KERNEL);
+ if (!vco) {
+ pr_err("could not allocate vco clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll) {
+ pr_err("could not allocate pll clk\n");
+ goto free_vco;
+ }
+
+ /* struct clk_vco assignments */
+ vco->mode_reg = mode_reg;
+ vco->cfg_reg = cfg_reg;
+ vco->rtbl = rtbl;
+ vco->rtbl_cnt = rtbl_cnt;
+ vco->lock = lock;
+ vco->hw.init = &vco_init;
+
+ pll->vco = vco;
+ pll->hw.init = &pll_init;
+
+ if (vco_gate_name) {
+ tvco_gate_clk = clk_register_gate(NULL, vco_gate_name,
+ parent_name, 0, mode_reg, PLL_ENABLE, 0, lock);
+ if (IS_ERR_OR_NULL(tvco_gate_clk))
+ goto free_pll;
+
+ if (vco_gate_clk)
+ *vco_gate_clk = tvco_gate_clk;
+ vco_parent_name = &vco_gate_name;
+ } else {
+ vco_parent_name = &parent_name;
+ }
+
+ vco_init.name = vco_name;
+ vco_init.ops = &clk_vco_ops;
+ vco_init.flags = flags;
+ vco_init.parent_names = vco_parent_name;
+ vco_init.num_parents = 1;
+
+ pll_init.name = pll_name;
+ pll_init.ops = &clk_pll_ops;
+ pll_init.flags = CLK_SET_RATE_PARENT;
+ pll_init.parent_names = &vco_name;
+ pll_init.num_parents = 1;
+
+ vco_clk = clk_register(NULL, &vco->hw);
+ if (IS_ERR_OR_NULL(vco_clk))
+ goto free_pll;
+
+ tpll_clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR_OR_NULL(tpll_clk))
+ goto free_pll;
+
+ if (pll_clk)
+ *pll_clk = tpll_clk;
+
+ return vco_clk;
+
+free_pll:
+ kfree(pll);
+free_vco:
+ kfree(vco);
+
+ pr_err("Failed to register vco pll clock\n");
+
+ return ERR_PTR(-ENOMEM);
+}
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c
new file mode 100644
index 000000000000..376d4e5ff326
--- /dev/null
+++ b/drivers/clk/spear/clk.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * SPEAr clk - Common routines
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/types.h>
+#include "clk.h"
+
+long clk_round_rate_index(struct clk_hw *hw, unsigned long drate,
+ unsigned long parent_rate, clk_calc_rate calc_rate, u8 rtbl_cnt,
+ int *index)
+{
+ unsigned long prev_rate, rate = 0;
+
+ for (*index = 0; *index < rtbl_cnt; (*index)++) {
+ prev_rate = rate;
+ rate = calc_rate(hw, parent_rate, *index);
+ if (drate < rate) {
+ /* previous clock was best */
+ if (*index) {
+ rate = prev_rate;
+ (*index)--;
+ }
+ break;
+ }
+ }
+
+ return rate;
+}
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
new file mode 100644
index 000000000000..3321c46a071c
--- /dev/null
+++ b/drivers/clk/spear/clk.h
@@ -0,0 +1,134 @@
+/*
+ * Clock framework definitions for SPEAr platform
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __SPEAR_CLK_H
+#define __SPEAR_CLK_H
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+/* Auxiliary Synth clk */
+/* Default masks */
+#define AUX_EQ_SEL_SHIFT 30
+#define AUX_EQ_SEL_MASK 1
+#define AUX_EQ1_SEL 0
+#define AUX_EQ2_SEL 1
+#define AUX_XSCALE_SHIFT 16
+#define AUX_XSCALE_MASK 0xFFF
+#define AUX_YSCALE_SHIFT 0
+#define AUX_YSCALE_MASK 0xFFF
+#define AUX_SYNT_ENB 31
+
+struct aux_clk_masks {
+ u32 eq_sel_mask;
+ u32 eq_sel_shift;
+ u32 eq1_mask;
+ u32 eq2_mask;
+ u32 xscale_sel_mask;
+ u32 xscale_sel_shift;
+ u32 yscale_sel_mask;
+ u32 yscale_sel_shift;
+ u32 enable_bit;
+};
+
+struct aux_rate_tbl {
+ u16 xscale;
+ u16 yscale;
+ u8 eq;
+};
+
+struct clk_aux {
+ struct clk_hw hw;
+ void __iomem *reg;
+ struct aux_clk_masks *masks;
+ struct aux_rate_tbl *rtbl;
+ u8 rtbl_cnt;
+ spinlock_t *lock;
+};
+
+/* Fractional Synth clk */
+struct frac_rate_tbl {
+ u32 div;
+};
+
+struct clk_frac {
+ struct clk_hw hw;
+ void __iomem *reg;
+ struct frac_rate_tbl *rtbl;
+ u8 rtbl_cnt;
+ spinlock_t *lock;
+};
+
+/* GPT clk */
+struct gpt_rate_tbl {
+ u16 mscale;
+ u16 nscale;
+};
+
+struct clk_gpt {
+ struct clk_hw hw;
+ void __iomem *reg;
+ struct gpt_rate_tbl *rtbl;
+ u8 rtbl_cnt;
+ spinlock_t *lock;
+};
+
+/* VCO-PLL clk */
+struct pll_rate_tbl {
+ u8 mode;
+ u16 m;
+ u8 n;
+ u8 p;
+};
+
+struct clk_vco {
+ struct clk_hw hw;
+ void __iomem *mode_reg;
+ void __iomem *cfg_reg;
+ struct pll_rate_tbl *rtbl;
+ u8 rtbl_cnt;
+ spinlock_t *lock;
+};
+
+struct clk_pll {
+ struct clk_hw hw;
+ struct clk_vco *vco;
+ const char *parent[1];
+ spinlock_t *lock;
+};
+
+typedef unsigned long (*clk_calc_rate)(struct clk_hw *hw, unsigned long prate,
+ int index);
+
+/* clk register routines */
+struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
+ const char *parent_name, unsigned long flags, void __iomem *reg,
+ struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
+ u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk);
+struct clk *clk_register_frac(const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg,
+ struct frac_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock);
+struct clk *clk_register_gpt(const char *name, const char *parent_name, unsigned
+ long flags, void __iomem *reg, struct gpt_rate_tbl *rtbl, u8
+ rtbl_cnt, spinlock_t *lock);
+struct clk *clk_register_vco_pll(const char *vco_name, const char *pll_name,
+ const char *vco_gate_name, const char *parent_name,
+ unsigned long flags, void __iomem *mode_reg, void __iomem
+ *cfg_reg, struct pll_rate_tbl *rtbl, u8 rtbl_cnt,
+ spinlock_t *lock, struct clk **pll_clk,
+ struct clk **vco_gate_clk);
+
+long clk_round_rate_index(struct clk_hw *hw, unsigned long drate,
+ unsigned long parent_rate, clk_calc_rate calc_rate, u8 rtbl_cnt,
+ int *index);
+
+#endif /* __SPEAR_CLK_H */
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
new file mode 100644
index 000000000000..42b68df9aeef
--- /dev/null
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -0,0 +1,1106 @@
+/*
+ * arch/arm/mach-spear13xx/spear1310_clock.c
+ *
+ * SPEAr1310 machine clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <mach/spear.h>
+#include "clk.h"
+
+/* PLL related registers and bit values */
+#define SPEAR1310_PLL_CFG (VA_MISC_BASE + 0x210)
+ /* PLL_CFG bit values */
+ #define SPEAR1310_CLCD_SYNT_CLK_MASK 1
+ #define SPEAR1310_CLCD_SYNT_CLK_SHIFT 31
+ #define SPEAR1310_RAS_SYNT2_3_CLK_MASK 2
+ #define SPEAR1310_RAS_SYNT2_3_CLK_SHIFT 29
+ #define SPEAR1310_RAS_SYNT_CLK_MASK 2
+ #define SPEAR1310_RAS_SYNT0_1_CLK_SHIFT 27
+ #define SPEAR1310_PLL_CLK_MASK 2
+ #define SPEAR1310_PLL3_CLK_SHIFT 24
+ #define SPEAR1310_PLL2_CLK_SHIFT 22
+ #define SPEAR1310_PLL1_CLK_SHIFT 20
+
+#define SPEAR1310_PLL1_CTR (VA_MISC_BASE + 0x214)
+#define SPEAR1310_PLL1_FRQ (VA_MISC_BASE + 0x218)
+#define SPEAR1310_PLL2_CTR (VA_MISC_BASE + 0x220)
+#define SPEAR1310_PLL2_FRQ (VA_MISC_BASE + 0x224)
+#define SPEAR1310_PLL3_CTR (VA_MISC_BASE + 0x22C)
+#define SPEAR1310_PLL3_FRQ (VA_MISC_BASE + 0x230)
+#define SPEAR1310_PLL4_CTR (VA_MISC_BASE + 0x238)
+#define SPEAR1310_PLL4_FRQ (VA_MISC_BASE + 0x23C)
+#define SPEAR1310_PERIP_CLK_CFG (VA_MISC_BASE + 0x244)
+ /* PERIP_CLK_CFG bit values */
+ #define SPEAR1310_GPT_OSC24_VAL 0
+ #define SPEAR1310_GPT_APB_VAL 1
+ #define SPEAR1310_GPT_CLK_MASK 1
+ #define SPEAR1310_GPT3_CLK_SHIFT 11
+ #define SPEAR1310_GPT2_CLK_SHIFT 10
+ #define SPEAR1310_GPT1_CLK_SHIFT 9
+ #define SPEAR1310_GPT0_CLK_SHIFT 8
+ #define SPEAR1310_UART_CLK_PLL5_VAL 0
+ #define SPEAR1310_UART_CLK_OSC24_VAL 1
+ #define SPEAR1310_UART_CLK_SYNT_VAL 2
+ #define SPEAR1310_UART_CLK_MASK 2
+ #define SPEAR1310_UART_CLK_SHIFT 4
+
+ #define SPEAR1310_AUX_CLK_PLL5_VAL 0
+ #define SPEAR1310_AUX_CLK_SYNT_VAL 1
+ #define SPEAR1310_CLCD_CLK_MASK 2
+ #define SPEAR1310_CLCD_CLK_SHIFT 2
+ #define SPEAR1310_C3_CLK_MASK 1
+ #define SPEAR1310_C3_CLK_SHIFT 1
+
+#define SPEAR1310_GMAC_CLK_CFG (VA_MISC_BASE + 0x248)
+ #define SPEAR1310_GMAC_PHY_IF_SEL_MASK 3
+ #define SPEAR1310_GMAC_PHY_IF_SEL_SHIFT 4
+ #define SPEAR1310_GMAC_PHY_CLK_MASK 1
+ #define SPEAR1310_GMAC_PHY_CLK_SHIFT 3
+ #define SPEAR1310_GMAC_PHY_INPUT_CLK_MASK 2
+ #define SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT 1
+
+#define SPEAR1310_I2S_CLK_CFG (VA_MISC_BASE + 0x24C)
+ /* I2S_CLK_CFG register mask */
+ #define SPEAR1310_I2S_SCLK_X_MASK 0x1F
+ #define SPEAR1310_I2S_SCLK_X_SHIFT 27
+ #define SPEAR1310_I2S_SCLK_Y_MASK 0x1F
+ #define SPEAR1310_I2S_SCLK_Y_SHIFT 22
+ #define SPEAR1310_I2S_SCLK_EQ_SEL_SHIFT 21
+ #define SPEAR1310_I2S_SCLK_SYNTH_ENB 20
+ #define SPEAR1310_I2S_PRS1_CLK_X_MASK 0xFF
+ #define SPEAR1310_I2S_PRS1_CLK_X_SHIFT 12
+ #define SPEAR1310_I2S_PRS1_CLK_Y_MASK 0xFF
+ #define SPEAR1310_I2S_PRS1_CLK_Y_SHIFT 4
+ #define SPEAR1310_I2S_PRS1_EQ_SEL_SHIFT 3
+ #define SPEAR1310_I2S_REF_SEL_MASK 1
+ #define SPEAR1310_I2S_REF_SHIFT 2
+ #define SPEAR1310_I2S_SRC_CLK_MASK 2
+ #define SPEAR1310_I2S_SRC_CLK_SHIFT 0
+
+#define SPEAR1310_C3_CLK_SYNT (VA_MISC_BASE + 0x250)
+#define SPEAR1310_UART_CLK_SYNT (VA_MISC_BASE + 0x254)
+#define SPEAR1310_GMAC_CLK_SYNT (VA_MISC_BASE + 0x258)
+#define SPEAR1310_SDHCI_CLK_SYNT (VA_MISC_BASE + 0x25C)
+#define SPEAR1310_CFXD_CLK_SYNT (VA_MISC_BASE + 0x260)
+#define SPEAR1310_ADC_CLK_SYNT (VA_MISC_BASE + 0x264)
+#define SPEAR1310_AMBA_CLK_SYNT (VA_MISC_BASE + 0x268)
+#define SPEAR1310_CLCD_CLK_SYNT (VA_MISC_BASE + 0x270)
+#define SPEAR1310_RAS_CLK_SYNT0 (VA_MISC_BASE + 0x280)
+#define SPEAR1310_RAS_CLK_SYNT1 (VA_MISC_BASE + 0x288)
+#define SPEAR1310_RAS_CLK_SYNT2 (VA_MISC_BASE + 0x290)
+#define SPEAR1310_RAS_CLK_SYNT3 (VA_MISC_BASE + 0x298)
+ /* Check Fractional synthesizer reg masks */
+
+#define SPEAR1310_PERIP1_CLK_ENB (VA_MISC_BASE + 0x300)
+ /* PERIP1_CLK_ENB register masks */
+ #define SPEAR1310_RTC_CLK_ENB 31
+ #define SPEAR1310_ADC_CLK_ENB 30
+ #define SPEAR1310_C3_CLK_ENB 29
+ #define SPEAR1310_JPEG_CLK_ENB 28
+ #define SPEAR1310_CLCD_CLK_ENB 27
+ #define SPEAR1310_DMA_CLK_ENB 25
+ #define SPEAR1310_GPIO1_CLK_ENB 24
+ #define SPEAR1310_GPIO0_CLK_ENB 23
+ #define SPEAR1310_GPT1_CLK_ENB 22
+ #define SPEAR1310_GPT0_CLK_ENB 21
+ #define SPEAR1310_I2S0_CLK_ENB 20
+ #define SPEAR1310_I2S1_CLK_ENB 19
+ #define SPEAR1310_I2C0_CLK_ENB 18
+ #define SPEAR1310_SSP_CLK_ENB 17
+ #define SPEAR1310_UART_CLK_ENB 15
+ #define SPEAR1310_PCIE_SATA_2_CLK_ENB 14
+ #define SPEAR1310_PCIE_SATA_1_CLK_ENB 13
+ #define SPEAR1310_PCIE_SATA_0_CLK_ENB 12
+ #define SPEAR1310_UOC_CLK_ENB 11
+ #define SPEAR1310_UHC1_CLK_ENB 10
+ #define SPEAR1310_UHC0_CLK_ENB 9
+ #define SPEAR1310_GMAC_CLK_ENB 8
+ #define SPEAR1310_CFXD_CLK_ENB 7
+ #define SPEAR1310_SDHCI_CLK_ENB 6
+ #define SPEAR1310_SMI_CLK_ENB 5
+ #define SPEAR1310_FSMC_CLK_ENB 4
+ #define SPEAR1310_SYSRAM0_CLK_ENB 3
+ #define SPEAR1310_SYSRAM1_CLK_ENB 2
+ #define SPEAR1310_SYSROM_CLK_ENB 1
+ #define SPEAR1310_BUS_CLK_ENB 0
+
+#define SPEAR1310_PERIP2_CLK_ENB (VA_MISC_BASE + 0x304)
+ /* PERIP2_CLK_ENB register masks */
+ #define SPEAR1310_THSENS_CLK_ENB 8
+ #define SPEAR1310_I2S_REF_PAD_CLK_ENB 7
+ #define SPEAR1310_ACP_CLK_ENB 6
+ #define SPEAR1310_GPT3_CLK_ENB 5
+ #define SPEAR1310_GPT2_CLK_ENB 4
+ #define SPEAR1310_KBD_CLK_ENB 3
+ #define SPEAR1310_CPU_DBG_CLK_ENB 2
+ #define SPEAR1310_DDR_CORE_CLK_ENB 1
+ #define SPEAR1310_DDR_CTRL_CLK_ENB 0
+
+#define SPEAR1310_RAS_CLK_ENB (VA_MISC_BASE + 0x310)
+ /* RAS_CLK_ENB register masks */
+ #define SPEAR1310_SYNT3_CLK_ENB 17
+ #define SPEAR1310_SYNT2_CLK_ENB 16
+ #define SPEAR1310_SYNT1_CLK_ENB 15
+ #define SPEAR1310_SYNT0_CLK_ENB 14
+ #define SPEAR1310_PCLK3_CLK_ENB 13
+ #define SPEAR1310_PCLK2_CLK_ENB 12
+ #define SPEAR1310_PCLK1_CLK_ENB 11
+ #define SPEAR1310_PCLK0_CLK_ENB 10
+ #define SPEAR1310_PLL3_CLK_ENB 9
+ #define SPEAR1310_PLL2_CLK_ENB 8
+ #define SPEAR1310_C125M_PAD_CLK_ENB 7
+ #define SPEAR1310_C30M_CLK_ENB 6
+ #define SPEAR1310_C48M_CLK_ENB 5
+ #define SPEAR1310_OSC_25M_CLK_ENB 4
+ #define SPEAR1310_OSC_32K_CLK_ENB 3
+ #define SPEAR1310_OSC_24M_CLK_ENB 2
+ #define SPEAR1310_PCLK_CLK_ENB 1
+ #define SPEAR1310_ACLK_CLK_ENB 0
+
+/* RAS Area Control Register */
+#define SPEAR1310_RAS_CTRL_REG0 (VA_SPEAR1310_RAS_BASE + 0x000)
+ #define SPEAR1310_SSP1_CLK_MASK 3
+ #define SPEAR1310_SSP1_CLK_SHIFT 26
+ #define SPEAR1310_TDM_CLK_MASK 1
+ #define SPEAR1310_TDM2_CLK_SHIFT 24
+ #define SPEAR1310_TDM1_CLK_SHIFT 23
+ #define SPEAR1310_I2C_CLK_MASK 1
+ #define SPEAR1310_I2C7_CLK_SHIFT 22
+ #define SPEAR1310_I2C6_CLK_SHIFT 21
+ #define SPEAR1310_I2C5_CLK_SHIFT 20
+ #define SPEAR1310_I2C4_CLK_SHIFT 19
+ #define SPEAR1310_I2C3_CLK_SHIFT 18
+ #define SPEAR1310_I2C2_CLK_SHIFT 17
+ #define SPEAR1310_I2C1_CLK_SHIFT 16
+ #define SPEAR1310_GPT64_CLK_MASK 1
+ #define SPEAR1310_GPT64_CLK_SHIFT 15
+ #define SPEAR1310_RAS_UART_CLK_MASK 1
+ #define SPEAR1310_UART5_CLK_SHIFT 14
+ #define SPEAR1310_UART4_CLK_SHIFT 13
+ #define SPEAR1310_UART3_CLK_SHIFT 12
+ #define SPEAR1310_UART2_CLK_SHIFT 11
+ #define SPEAR1310_UART1_CLK_SHIFT 10
+ #define SPEAR1310_PCI_CLK_MASK 1
+ #define SPEAR1310_PCI_CLK_SHIFT 0
+
+#define SPEAR1310_RAS_CTRL_REG1 (VA_SPEAR1310_RAS_BASE + 0x004)
+ #define SPEAR1310_PHY_CLK_MASK 0x3
+ #define SPEAR1310_RMII_PHY_CLK_SHIFT 0
+ #define SPEAR1310_SMII_RGMII_PHY_CLK_SHIFT 2
+
+#define SPEAR1310_RAS_SW_CLK_CTRL (VA_SPEAR1310_RAS_BASE + 0x0148)
+ #define SPEAR1310_CAN1_CLK_ENB 25
+ #define SPEAR1310_CAN0_CLK_ENB 24
+ #define SPEAR1310_GPT64_CLK_ENB 23
+ #define SPEAR1310_SSP1_CLK_ENB 22
+ #define SPEAR1310_I2C7_CLK_ENB 21
+ #define SPEAR1310_I2C6_CLK_ENB 20
+ #define SPEAR1310_I2C5_CLK_ENB 19
+ #define SPEAR1310_I2C4_CLK_ENB 18
+ #define SPEAR1310_I2C3_CLK_ENB 17
+ #define SPEAR1310_I2C2_CLK_ENB 16
+ #define SPEAR1310_I2C1_CLK_ENB 15
+ #define SPEAR1310_UART5_CLK_ENB 14
+ #define SPEAR1310_UART4_CLK_ENB 13
+ #define SPEAR1310_UART3_CLK_ENB 12
+ #define SPEAR1310_UART2_CLK_ENB 11
+ #define SPEAR1310_UART1_CLK_ENB 10
+ #define SPEAR1310_RS485_1_CLK_ENB 9
+ #define SPEAR1310_RS485_0_CLK_ENB 8
+ #define SPEAR1310_TDM2_CLK_ENB 7
+ #define SPEAR1310_TDM1_CLK_ENB 6
+ #define SPEAR1310_PCI_CLK_ENB 5
+ #define SPEAR1310_GMII_CLK_ENB 4
+ #define SPEAR1310_MII2_CLK_ENB 3
+ #define SPEAR1310_MII1_CLK_ENB 2
+ #define SPEAR1310_MII0_CLK_ENB 1
+ #define SPEAR1310_ESRAM_CLK_ENB 0
+
+static DEFINE_SPINLOCK(_lock);
+
+/* pll rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+ /* PCLK 24MHz */
+ {.mode = 0, .m = 0x83, .n = 0x04, .p = 0x5}, /* vco 1572, pll 49.125 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x3}, /* vco 1000, pll 125 MHz */
+ {.mode = 0, .m = 0x64, .n = 0x06, .p = 0x1}, /* vco 800, pll 400 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x1}, /* vco 1000, pll 500 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x1}, /* vco 1328, pll 664 MHz */
+ {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x1}, /* vco 1600, pll 800 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+};
+
+/* vco-pll4 rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll4_rtbl[] = {
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x2}, /* vco 1000, pll 250 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x2}, /* vco 1328, pll 332 MHz */
+ {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x2}, /* vco 1600, pll 400 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+ /* For VCO1div2 = 500 MHz */
+ {.xscale = 10, .yscale = 204, .eq = 0}, /* 12.29 MHz */
+ {.xscale = 4, .yscale = 21, .eq = 0}, /* 48 MHz */
+ {.xscale = 2, .yscale = 6, .eq = 0}, /* 83 MHz */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* 125 MHz */
+ {.xscale = 1, .yscale = 3, .eq = 1}, /* 166 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* 250 MHz */
+};
+
+/* gmac rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl gmac_rtbl[] = {
+ /* For gmac phy input clk */
+ {.xscale = 2, .yscale = 6, .eq = 0}, /* divided by 6 */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* divided by 4 */
+ {.xscale = 1, .yscale = 3, .eq = 1}, /* divided by 3 */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* divided by 2 */
+};
+
+/* clcd rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl clcd_rtbl[] = {
+ {.div = 0x14000}, /* 25 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x1284B}, /* 27 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x0D8D3}, /* 58 Mhz , for vco1div4 = 393 MHz */
+ {.div = 0x0B72C}, /* 58 Mhz , for vco1div4 = 332 MHz */
+ {.div = 0x089EE}, /* 58 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x06f1C}, /* 72 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x06E58}, /* 58 Mhz , for vco1div4 = 200 MHz */
+ {.div = 0x06c1B}, /* 74 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x04A12}, /* 108 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x0378E}, /* 144 Mhz , for vc01div4 = 250 MHz*/
+};
+
+/* i2s prescaler1 masks */
+static struct aux_clk_masks i2s_prs1_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = SPEAR1310_I2S_PRS1_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = SPEAR1310_I2S_PRS1_CLK_X_MASK,
+ .xscale_sel_shift = SPEAR1310_I2S_PRS1_CLK_X_SHIFT,
+ .yscale_sel_mask = SPEAR1310_I2S_PRS1_CLK_Y_MASK,
+ .yscale_sel_shift = SPEAR1310_I2S_PRS1_CLK_Y_SHIFT,
+};
+
+/* i2s sclk (bit clock) syynthesizers masks */
+static struct aux_clk_masks i2s_sclk_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = SPEAR1310_I2S_SCLK_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = SPEAR1310_I2S_SCLK_X_MASK,
+ .xscale_sel_shift = SPEAR1310_I2S_SCLK_X_SHIFT,
+ .yscale_sel_mask = SPEAR1310_I2S_SCLK_Y_MASK,
+ .yscale_sel_shift = SPEAR1310_I2S_SCLK_Y_SHIFT,
+ .enable_bit = SPEAR1310_I2S_SCLK_SYNTH_ENB,
+};
+
+/* i2s prs1 aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_prs1_rtbl[] = {
+ /* For parent clk = 49.152 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 0}, /* 12.288 MHz */
+};
+
+/* i2s sclk aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_sclk_rtbl[] = {
+ /* For i2s_ref_clk = 12.288MHz */
+ {.xscale = 1, .yscale = 4, .eq = 0}, /* 1.53 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 0}, /* 3.07 Mhz */
+};
+
+/* adc rate configuration table, in ascending order of rates */
+/* possible adc range is 2.5 MHz to 20 MHz. */
+static struct aux_rate_tbl adc_rtbl[] = {
+ /* For ahb = 166.67 MHz */
+ {.xscale = 1, .yscale = 31, .eq = 0}, /* 2.68 MHz */
+ {.xscale = 2, .yscale = 21, .eq = 0}, /* 7.94 MHz */
+ {.xscale = 4, .yscale = 21, .eq = 0}, /* 15.87 MHz */
+ {.xscale = 10, .yscale = 42, .eq = 0}, /* 19.84 MHz */
+};
+
+/* General synth rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl gen_rtbl[] = {
+ /* For vco1div4 = 250 MHz */
+ {.div = 0x14000}, /* 25 MHz */
+ {.div = 0x0A000}, /* 50 MHz */
+ {.div = 0x05000}, /* 100 MHz */
+ {.div = 0x02000}, /* 250 MHz */
+};
+
+/* clock parents */
+static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
+static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
+static const char *uart0_parents[] = { "pll5_clk", "uart_synth_gate_clk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
+static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+ "osc_25m_clk", };
+static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
+ "gmac_phy_synth_gate_clk", };
+static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *i2s_src_parents[] = { "vco1div2_clk", "none", "pll3_clk",
+ "i2s_src_pad_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
+static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
+ "pll3_clk", };
+static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
+ "pll2_clk", };
+static const char *rmii_phy_parents[] = { "ras_tx50_clk", "none",
+ "ras_pll2_clk", "ras_synth0_clk", };
+static const char *smii_rgmii_phy_parents[] = { "none", "ras_tx125_clk",
+ "ras_pll2_clk", "ras_synth0_clk", };
+static const char *uart_parents[] = { "ras_apb_clk", "gen_synth3_clk", };
+static const char *i2c_parents[] = { "ras_apb_clk", "gen_synth1_clk", };
+static const char *ssp1_parents[] = { "ras_apb_clk", "gen_synth1_clk",
+ "ras_plclk0_clk", };
+static const char *pci_parents[] = { "ras_pll3_clk", "gen_synth2_clk", };
+static const char *tdm_parents[] = { "ras_pll3_clk", "gen_synth1_clk", };
+
+void __init spear1310_clk_init(void)
+{
+ struct clk *clk, *clk1;
+
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk_register_clkdev(clk, "apb_pclk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+ 32000);
+ clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
+ 24000000);
+ clk_register_clkdev(clk, "osc_24m_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, CLK_IS_ROOT,
+ 25000000);
+ clk_register_clkdev(clk, "osc_25m_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
+ CLK_IS_ROOT, 125000000);
+ clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
+ CLK_IS_ROOT, 12288000);
+ clk_register_clkdev(clk, "i2s_src_pad_clk", NULL);
+
+ /* clock derived from 32 KHz osc clk */
+ clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_RTC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "fc900000.rtc");
+
+ /* clock derived from 24 or 25 MHz osc clk */
+ /* vco-pll */
+ clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
+ SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco1_mux_clk", NULL);
+ clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
+ 0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco1_clk", NULL);
+ clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
+ SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco2_mux_clk", NULL);
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
+ 0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco2_clk", NULL);
+ clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+ clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
+ SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco3_mux_clk", NULL);
+ clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
+ 0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco3_clk", NULL);
+ clk_register_clkdev(clk1, "pll3_clk", NULL);
+
+ clk = clk_register_vco_pll("vco4_clk", "pll4_clk", NULL, "osc_24m_clk",
+ 0, SPEAR1310_PLL4_CTR, SPEAR1310_PLL4_FRQ, pll4_rtbl,
+ ARRAY_SIZE(pll4_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco4_clk", NULL);
+ clk_register_clkdev(clk1, "pll4_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "pll5_clk", "osc_24m_clk", 0,
+ 48000000);
+ clk_register_clkdev(clk, "pll5_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "pll6_clk", "osc_25m_clk", 0,
+ 25000000);
+ clk_register_clkdev(clk, "pll6_clk", NULL);
+
+ /* vco div n clocks */
+ clk = clk_register_fixed_factor(NULL, "vco1div2_clk", "vco1_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco1div2_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco1div4_clk", "vco1_clk", 0, 1,
+ 4);
+ clk_register_clkdev(clk, "vco1div4_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco2div2_clk", "vco2_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco2div2_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco3div2_clk", "vco3_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco3div2_clk", NULL);
+
+ /* peripherals */
+ clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
+ 128);
+ clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+ SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_THSENS_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_thermal");
+
+ /* clock derived from pll4 clk */
+ clk = clk_register_fixed_factor(NULL, "ddr_clk", "pll4_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "ddr_clk", NULL);
+
+ /* clock derived from pll1 clk */
+ clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 2);
+ clk_register_clkdev(clk, "cpu_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "wdt_clk", "cpu_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, NULL, "ec800620.wdt");
+
+ clk = clk_register_fixed_factor(NULL, "ahb_clk", "pll1_clk", 0, 1,
+ 6);
+ clk_register_clkdev(clk, "ahb_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "apb_clk", "pll1_clk", 0, 1,
+ 12);
+ clk_register_clkdev(clk, "apb_clk", NULL);
+
+ /* gpt clocks */
+ clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt0");
+
+ clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt1");
+
+ clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+ SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt2");
+
+ clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+ SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt3");
+
+ /* others */
+ clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1310_UART_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "uart_synth_clk", NULL);
+ clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+ ARRAY_SIZE(uart0_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_UART_CLK_SHIFT, SPEAR1310_UART_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UART_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0000000.serial");
+
+ clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1310_SDHCI_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
+ clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SDHCI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b3000000.sdhci");
+
+ clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1310_CFXD_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
+ clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CFXD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b2800000.cf");
+ clk_register_clkdev(clk, NULL, "arasan_xd");
+
+ clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1310_C3_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "c3_synth_clk", NULL);
+ clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+ ARRAY_SIZE(c3_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_C3_CLK_SHIFT, SPEAR1310_C3_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "c3_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_C3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "c3");
+
+ /* gmac */
+ clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
+ gmac_phy_input_parents,
+ ARRAY_SIZE(gmac_phy_input_parents), 0,
+ SPEAR1310_GMAC_CLK_CFG,
+ SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT,
+ SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+
+ clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
+ "gmac_phy_input_mux_clk", 0, SPEAR1310_GMAC_CLK_SYNT,
+ NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
+ clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+ ARRAY_SIZE(gmac_phy_parents), 0,
+ SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT,
+ SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "stmmacphy.0");
+
+ /* clcd */
+ clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+ ARRAY_SIZE(clcd_synth_parents), 0,
+ SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT,
+ SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+
+ clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+ SPEAR1310_CLCD_CLK_SYNT, clcd_rtbl,
+ ARRAY_SIZE(clcd_rtbl), &_lock);
+ clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+
+ clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+ ARRAY_SIZE(clcd_pixel_parents), 0,
+ SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT,
+ SPEAR1310_CLCD_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
+
+ clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CLCD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "clcd_clk", NULL);
+
+ /* i2s */
+ clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+ ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG,
+ SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "i2s_src_clk", NULL);
+
+ clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+ SPEAR1310_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
+ ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
+ clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+ ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1310_I2S_CLK_CFG,
+ SPEAR1310_I2S_REF_SHIFT, SPEAR1310_I2S_REF_SEL_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2s_ref_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+ SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_I2S_REF_PAD_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
+
+ clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
+ "i2s_ref_pad_clk", 0, SPEAR1310_I2S_CLK_CFG,
+ &i2s_sclk_masks, i2s_sclk_rtbl,
+ ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
+ clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+
+ /* clock derived from ahb clk */
+ clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2C0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0280000.i2c");
+
+ clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_DMA_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "ea800000.dma");
+ clk_register_clkdev(clk, NULL, "eb000000.dma");
+
+ clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_JPEG_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b2000000.jpeg");
+
+ clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GMAC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e2000000.eth");
+
+ clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_FSMC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b0000000.flash");
+
+ clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SMI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "ea000000.flash");
+
+ clk = clk_register_gate(NULL, "usbh0_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UHC0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "usbh.0_clk", NULL);
+
+ clk = clk_register_gate(NULL, "usbh1_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UHC1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "usbh.1_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uoc_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UOC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "uoc");
+
+ clk = clk_register_gate(NULL, "pcie_sata_0_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_0_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "dw_pcie.0");
+ clk_register_clkdev(clk, NULL, "ahci.0");
+
+ clk = clk_register_gate(NULL, "pcie_sata_1_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_1_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "dw_pcie.1");
+ clk_register_clkdev(clk, NULL, "ahci.1");
+
+ clk = clk_register_gate(NULL, "pcie_sata_2_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_2_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "dw_pcie.2");
+ clk_register_clkdev(clk, NULL, "ahci.2");
+
+ clk = clk_register_gate(NULL, "sysram0_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SYSRAM0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "sysram0_clk", NULL);
+
+ clk = clk_register_gate(NULL, "sysram1_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SYSRAM1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "sysram1_clk", NULL);
+
+ clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+ 0, SPEAR1310_ADC_CLK_SYNT, NULL, adc_rtbl,
+ ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "adc_synth_clk", NULL);
+ clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_ADC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "adc_clk");
+
+ /* clock derived from apb clk */
+ clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SSP_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0100000.spi");
+
+ clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPIO0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0600000.gpio");
+
+ clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPIO1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0680000.gpio");
+
+ clk = clk_register_gate(NULL, "i2s0_clk", "apb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2S0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0180000.i2s");
+
+ clk = clk_register_gate(NULL, "i2s1_clk", "apb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2S1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0200000.i2s");
+
+ clk = clk_register_gate(NULL, "kbd_clk", "apb_clk", 0,
+ SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_KBD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0300000.kbd");
+
+ /* RAS clks */
+ clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
+ gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
+ 0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
+ SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
+ gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
+ 0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
+ SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+
+ clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+ SPEAR1310_RAS_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+
+ clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+ SPEAR1310_RAS_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+
+ clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+ SPEAR1310_RAS_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+
+ clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+ SPEAR1310_RAS_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_osc_24m_clk", "osc_24m_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_24M_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_osc_24m_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_osc_25m_clk", "osc_25m_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_25M_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_osc_25m_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_osc_32k_clk", "osc_32k_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_32K_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_osc_32k_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_pll2_clk", "pll2_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_PLL2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_pll2_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_pll3_clk", "pll3_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_PLL3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_pll3_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_125m_pad_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_C125M_PAD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_tx125_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "ras_30m_fixed_clk", "pll5_clk", 0,
+ 30000000);
+ clk = clk_register_gate(NULL, "ras_30m_clk", "ras_30m_fixed_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_C30M_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_30m_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "ras_48m_fixed_clk", "pll5_clk", 0,
+ 48000000);
+ clk = clk_register_gate(NULL, "ras_48m_clk", "ras_48m_fixed_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_C48M_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_48m_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_ahb_clk", "ahb_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_ACLK_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_ahb_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_PCLK_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_apb_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "ras_plclk0_clk", NULL, CLK_IS_ROOT,
+ 50000000);
+
+ clk = clk_register_fixed_rate(NULL, "ras_tx50_clk", NULL, CLK_IS_ROOT,
+ 50000000);
+
+ clk = clk_register_gate(NULL, "can0_clk", "apb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_CAN0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "c_can_platform.0");
+
+ clk = clk_register_gate(NULL, "can1_clk", "apb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_CAN1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "c_can_platform.1");
+
+ clk = clk_register_gate(NULL, "ras_smii0_clk", "ras_ahb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c400000.eth");
+
+ clk = clk_register_gate(NULL, "ras_smii1_clk", "ras_ahb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c500000.eth");
+
+ clk = clk_register_gate(NULL, "ras_smii2_clk", "ras_ahb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c600000.eth");
+
+ clk = clk_register_gate(NULL, "ras_rgmii_clk", "ras_ahb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_GMII_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c700000.eth");
+
+ clk = clk_register_mux(NULL, "smii_rgmii_phy_mux_clk",
+ smii_rgmii_phy_parents,
+ ARRAY_SIZE(smii_rgmii_phy_parents), 0,
+ SPEAR1310_RAS_CTRL_REG1,
+ SPEAR1310_SMII_RGMII_PHY_CLK_SHIFT,
+ SPEAR1310_PHY_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "stmmacphy.1");
+ clk_register_clkdev(clk, NULL, "stmmacphy.2");
+ clk_register_clkdev(clk, NULL, "stmmacphy.4");
+
+ clk = clk_register_mux(NULL, "rmii_phy_mux_clk", rmii_phy_parents,
+ ARRAY_SIZE(rmii_phy_parents), 0,
+ SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT,
+ SPEAR1310_PHY_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "stmmacphy.3");
+
+ clk = clk_register_mux(NULL, "uart1_mux_clk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c800000.serial");
+
+ clk = clk_register_mux(NULL, "uart2_mux_clk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "uart2_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart2_clk", "uart2_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c900000.serial");
+
+ clk = clk_register_mux(NULL, "uart3_mux_clk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "uart3_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart3_clk", "uart3_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5ca00000.serial");
+
+ clk = clk_register_mux(NULL, "uart4_mux_clk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "uart4_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart4_clk", "uart4_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART4_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5cb00000.serial");
+
+ clk = clk_register_mux(NULL, "uart5_mux_clk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "uart5_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart5_clk", "uart5_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART5_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5cc00000.serial");
+
+ clk = clk_register_mux(NULL, "i2c1_mux_clk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c1_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5cd00000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c2_mux_clk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c2_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5ce00000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c3_mux_clk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c3_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5cf00000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c4_mux_clk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c4_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C4_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5d000000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c5_mux_clk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c5_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C5_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5d100000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c6_mux_clk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c6_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C6_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5d200000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c7_mux_clk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c7_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C7_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5d300000.i2c");
+
+ clk = clk_register_mux(NULL, "ssp1_mux_clk", ssp1_parents,
+ ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ssp1_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_SSP1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5d400000.spi");
+
+ clk = clk_register_mux(NULL, "pci_mux_clk", pci_parents,
+ ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "pci_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "pci_clk", "pci_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_PCI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "pci");
+
+ clk = clk_register_mux(NULL, "tdm1_mux_clk", tdm_parents,
+ ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "tdm1_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "tdm_hdlc.0");
+
+ clk = clk_register_mux(NULL, "tdm2_mux_clk", tdm_parents,
+ ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "tdm2_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "tdm_hdlc.1");
+}
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
new file mode 100644
index 000000000000..f130919d5bf8
--- /dev/null
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -0,0 +1,964 @@
+/*
+ * arch/arm/mach-spear13xx/spear1340_clock.c
+ *
+ * SPEAr1340 machine clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <mach/spear.h>
+#include "clk.h"
+
+/* Clock Configuration Registers */
+#define SPEAR1340_SYS_CLK_CTRL (VA_MISC_BASE + 0x200)
+ #define SPEAR1340_HCLK_SRC_SEL_SHIFT 27
+ #define SPEAR1340_HCLK_SRC_SEL_MASK 1
+ #define SPEAR1340_SCLK_SRC_SEL_SHIFT 23
+ #define SPEAR1340_SCLK_SRC_SEL_MASK 3
+
+/* PLL related registers and bit values */
+#define SPEAR1340_PLL_CFG (VA_MISC_BASE + 0x210)
+ /* PLL_CFG bit values */
+ #define SPEAR1340_CLCD_SYNT_CLK_MASK 1
+ #define SPEAR1340_CLCD_SYNT_CLK_SHIFT 31
+ #define SPEAR1340_GEN_SYNT2_3_CLK_SHIFT 29
+ #define SPEAR1340_GEN_SYNT_CLK_MASK 2
+ #define SPEAR1340_GEN_SYNT0_1_CLK_SHIFT 27
+ #define SPEAR1340_PLL_CLK_MASK 2
+ #define SPEAR1340_PLL3_CLK_SHIFT 24
+ #define SPEAR1340_PLL2_CLK_SHIFT 22
+ #define SPEAR1340_PLL1_CLK_SHIFT 20
+
+#define SPEAR1340_PLL1_CTR (VA_MISC_BASE + 0x214)
+#define SPEAR1340_PLL1_FRQ (VA_MISC_BASE + 0x218)
+#define SPEAR1340_PLL2_CTR (VA_MISC_BASE + 0x220)
+#define SPEAR1340_PLL2_FRQ (VA_MISC_BASE + 0x224)
+#define SPEAR1340_PLL3_CTR (VA_MISC_BASE + 0x22C)
+#define SPEAR1340_PLL3_FRQ (VA_MISC_BASE + 0x230)
+#define SPEAR1340_PLL4_CTR (VA_MISC_BASE + 0x238)
+#define SPEAR1340_PLL4_FRQ (VA_MISC_BASE + 0x23C)
+#define SPEAR1340_PERIP_CLK_CFG (VA_MISC_BASE + 0x244)
+ /* PERIP_CLK_CFG bit values */
+ #define SPEAR1340_SPDIF_CLK_MASK 1
+ #define SPEAR1340_SPDIF_OUT_CLK_SHIFT 15
+ #define SPEAR1340_SPDIF_IN_CLK_SHIFT 14
+ #define SPEAR1340_GPT3_CLK_SHIFT 13
+ #define SPEAR1340_GPT2_CLK_SHIFT 12
+ #define SPEAR1340_GPT_CLK_MASK 1
+ #define SPEAR1340_GPT1_CLK_SHIFT 9
+ #define SPEAR1340_GPT0_CLK_SHIFT 8
+ #define SPEAR1340_UART_CLK_MASK 2
+ #define SPEAR1340_UART1_CLK_SHIFT 6
+ #define SPEAR1340_UART0_CLK_SHIFT 4
+ #define SPEAR1340_CLCD_CLK_MASK 2
+ #define SPEAR1340_CLCD_CLK_SHIFT 2
+ #define SPEAR1340_C3_CLK_MASK 1
+ #define SPEAR1340_C3_CLK_SHIFT 1
+
+#define SPEAR1340_GMAC_CLK_CFG (VA_MISC_BASE + 0x248)
+ #define SPEAR1340_GMAC_PHY_CLK_MASK 1
+ #define SPEAR1340_GMAC_PHY_CLK_SHIFT 2
+ #define SPEAR1340_GMAC_PHY_INPUT_CLK_MASK 2
+ #define SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT 0
+
+#define SPEAR1340_I2S_CLK_CFG (VA_MISC_BASE + 0x24C)
+ /* I2S_CLK_CFG register mask */
+ #define SPEAR1340_I2S_SCLK_X_MASK 0x1F
+ #define SPEAR1340_I2S_SCLK_X_SHIFT 27
+ #define SPEAR1340_I2S_SCLK_Y_MASK 0x1F
+ #define SPEAR1340_I2S_SCLK_Y_SHIFT 22
+ #define SPEAR1340_I2S_SCLK_EQ_SEL_SHIFT 21
+ #define SPEAR1340_I2S_SCLK_SYNTH_ENB 20
+ #define SPEAR1340_I2S_PRS1_CLK_X_MASK 0xFF
+ #define SPEAR1340_I2S_PRS1_CLK_X_SHIFT 12
+ #define SPEAR1340_I2S_PRS1_CLK_Y_MASK 0xFF
+ #define SPEAR1340_I2S_PRS1_CLK_Y_SHIFT 4
+ #define SPEAR1340_I2S_PRS1_EQ_SEL_SHIFT 3
+ #define SPEAR1340_I2S_REF_SEL_MASK 1
+ #define SPEAR1340_I2S_REF_SHIFT 2
+ #define SPEAR1340_I2S_SRC_CLK_MASK 2
+ #define SPEAR1340_I2S_SRC_CLK_SHIFT 0
+
+#define SPEAR1340_C3_CLK_SYNT (VA_MISC_BASE + 0x250)
+#define SPEAR1340_UART0_CLK_SYNT (VA_MISC_BASE + 0x254)
+#define SPEAR1340_UART1_CLK_SYNT (VA_MISC_BASE + 0x258)
+#define SPEAR1340_GMAC_CLK_SYNT (VA_MISC_BASE + 0x25C)
+#define SPEAR1340_SDHCI_CLK_SYNT (VA_MISC_BASE + 0x260)
+#define SPEAR1340_CFXD_CLK_SYNT (VA_MISC_BASE + 0x264)
+#define SPEAR1340_ADC_CLK_SYNT (VA_MISC_BASE + 0x270)
+#define SPEAR1340_AMBA_CLK_SYNT (VA_MISC_BASE + 0x274)
+#define SPEAR1340_CLCD_CLK_SYNT (VA_MISC_BASE + 0x27C)
+#define SPEAR1340_SYS_CLK_SYNT (VA_MISC_BASE + 0x284)
+#define SPEAR1340_GEN_CLK_SYNT0 (VA_MISC_BASE + 0x28C)
+#define SPEAR1340_GEN_CLK_SYNT1 (VA_MISC_BASE + 0x294)
+#define SPEAR1340_GEN_CLK_SYNT2 (VA_MISC_BASE + 0x29C)
+#define SPEAR1340_GEN_CLK_SYNT3 (VA_MISC_BASE + 0x304)
+#define SPEAR1340_PERIP1_CLK_ENB (VA_MISC_BASE + 0x30C)
+ #define SPEAR1340_RTC_CLK_ENB 31
+ #define SPEAR1340_ADC_CLK_ENB 30
+ #define SPEAR1340_C3_CLK_ENB 29
+ #define SPEAR1340_CLCD_CLK_ENB 27
+ #define SPEAR1340_DMA_CLK_ENB 25
+ #define SPEAR1340_GPIO1_CLK_ENB 24
+ #define SPEAR1340_GPIO0_CLK_ENB 23
+ #define SPEAR1340_GPT1_CLK_ENB 22
+ #define SPEAR1340_GPT0_CLK_ENB 21
+ #define SPEAR1340_I2S_PLAY_CLK_ENB 20
+ #define SPEAR1340_I2S_REC_CLK_ENB 19
+ #define SPEAR1340_I2C0_CLK_ENB 18
+ #define SPEAR1340_SSP_CLK_ENB 17
+ #define SPEAR1340_UART0_CLK_ENB 15
+ #define SPEAR1340_PCIE_SATA_CLK_ENB 12
+ #define SPEAR1340_UOC_CLK_ENB 11
+ #define SPEAR1340_UHC1_CLK_ENB 10
+ #define SPEAR1340_UHC0_CLK_ENB 9
+ #define SPEAR1340_GMAC_CLK_ENB 8
+ #define SPEAR1340_CFXD_CLK_ENB 7
+ #define SPEAR1340_SDHCI_CLK_ENB 6
+ #define SPEAR1340_SMI_CLK_ENB 5
+ #define SPEAR1340_FSMC_CLK_ENB 4
+ #define SPEAR1340_SYSRAM0_CLK_ENB 3
+ #define SPEAR1340_SYSRAM1_CLK_ENB 2
+ #define SPEAR1340_SYSROM_CLK_ENB 1
+ #define SPEAR1340_BUS_CLK_ENB 0
+
+#define SPEAR1340_PERIP2_CLK_ENB (VA_MISC_BASE + 0x310)
+ #define SPEAR1340_THSENS_CLK_ENB 8
+ #define SPEAR1340_I2S_REF_PAD_CLK_ENB 7
+ #define SPEAR1340_ACP_CLK_ENB 6
+ #define SPEAR1340_GPT3_CLK_ENB 5
+ #define SPEAR1340_GPT2_CLK_ENB 4
+ #define SPEAR1340_KBD_CLK_ENB 3
+ #define SPEAR1340_CPU_DBG_CLK_ENB 2
+ #define SPEAR1340_DDR_CORE_CLK_ENB 1
+ #define SPEAR1340_DDR_CTRL_CLK_ENB 0
+
+#define SPEAR1340_PERIP3_CLK_ENB (VA_MISC_BASE + 0x314)
+ #define SPEAR1340_PLGPIO_CLK_ENB 18
+ #define SPEAR1340_VIDEO_DEC_CLK_ENB 16
+ #define SPEAR1340_VIDEO_ENC_CLK_ENB 15
+ #define SPEAR1340_SPDIF_OUT_CLK_ENB 13
+ #define SPEAR1340_SPDIF_IN_CLK_ENB 12
+ #define SPEAR1340_VIDEO_IN_CLK_ENB 11
+ #define SPEAR1340_CAM0_CLK_ENB 10
+ #define SPEAR1340_CAM1_CLK_ENB 9
+ #define SPEAR1340_CAM2_CLK_ENB 8
+ #define SPEAR1340_CAM3_CLK_ENB 7
+ #define SPEAR1340_MALI_CLK_ENB 6
+ #define SPEAR1340_CEC0_CLK_ENB 5
+ #define SPEAR1340_CEC1_CLK_ENB 4
+ #define SPEAR1340_PWM_CLK_ENB 3
+ #define SPEAR1340_I2C1_CLK_ENB 2
+ #define SPEAR1340_UART1_CLK_ENB 1
+
+static DEFINE_SPINLOCK(_lock);
+
+/* pll rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+ /* PCLK 24MHz */
+ {.mode = 0, .m = 0x83, .n = 0x04, .p = 0x5}, /* vco 1572, pll 49.125 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x3}, /* vco 1000, pll 125 MHz */
+ {.mode = 0, .m = 0x64, .n = 0x06, .p = 0x1}, /* vco 800, pll 400 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x1}, /* vco 1000, pll 500 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x1}, /* vco 1328, pll 664 MHz */
+ {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x1}, /* vco 1600, pll 800 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+ {.mode = 0, .m = 0x96, .n = 0x06, .p = 0x0}, /* vco 1200, pll 1200 MHz */
+};
+
+/* vco-pll4 rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll4_rtbl[] = {
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x2}, /* vco 1000, pll 250 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x2}, /* vco 1328, pll 332 MHz */
+ {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x2}, /* vco 1600, pll 400 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+};
+
+/*
+ * All below entries generate 166 MHz for
+ * different values of vco1div2
+ */
+static struct frac_rate_tbl amba_synth_rtbl[] = {
+ {.div = 0x06062}, /* for vco1div2 = 500 MHz */
+ {.div = 0x04D1B}, /* for vco1div2 = 400 MHz */
+ {.div = 0x04000}, /* for vco1div2 = 332 MHz */
+ {.div = 0x03031}, /* for vco1div2 = 250 MHz */
+ {.div = 0x0268D}, /* for vco1div2 = 200 MHz */
+};
+
+/*
+ * Synthesizer Clock derived from vcodiv2. This clock is one of the
+ * possible clocks to feed cpu directly.
+ * We can program this synthesizer to make cpu run on different clock
+ * frequencies.
+ * Following table provides configuration values to let cpu run on 200,
+ * 250, 332, 400 or 500 MHz considering different possibilites of input
+ * (vco1div2) clock.
+ *
+ * --------------------------------------------------------------------
+ * vco1div2(Mhz) fout(Mhz) cpuclk = fout/2 div
+ * --------------------------------------------------------------------
+ * 400 200 100 0x04000
+ * 400 250 125 0x03333
+ * 400 332 166 0x0268D
+ * 400 400 200 0x02000
+ * --------------------------------------------------------------------
+ * 500 200 100 0x05000
+ * 500 250 125 0x04000
+ * 500 332 166 0x03031
+ * 500 400 200 0x02800
+ * 500 500 250 0x02000
+ * --------------------------------------------------------------------
+ * 664 200 100 0x06a38
+ * 664 250 125 0x054FD
+ * 664 332 166 0x04000
+ * 664 400 200 0x0351E
+ * 664 500 250 0x02A7E
+ * --------------------------------------------------------------------
+ * 800 200 100 0x08000
+ * 800 250 125 0x06666
+ * 800 332 166 0x04D18
+ * 800 400 200 0x04000
+ * 800 500 250 0x03333
+ * --------------------------------------------------------------------
+ * sys rate configuration table is in descending order of divisor.
+ */
+static struct frac_rate_tbl sys_synth_rtbl[] = {
+ {.div = 0x08000},
+ {.div = 0x06a38},
+ {.div = 0x06666},
+ {.div = 0x054FD},
+ {.div = 0x05000},
+ {.div = 0x04D18},
+ {.div = 0x04000},
+ {.div = 0x0351E},
+ {.div = 0x03333},
+ {.div = 0x03031},
+ {.div = 0x02A7E},
+ {.div = 0x02800},
+ {.div = 0x0268D},
+ {.div = 0x02000},
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+ /* For VCO1div2 = 500 MHz */
+ {.xscale = 10, .yscale = 204, .eq = 0}, /* 12.29 MHz */
+ {.xscale = 4, .yscale = 21, .eq = 0}, /* 48 MHz */
+ {.xscale = 2, .yscale = 6, .eq = 0}, /* 83 MHz */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* 125 MHz */
+ {.xscale = 1, .yscale = 3, .eq = 1}, /* 166 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* 250 MHz */
+};
+
+/* gmac rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl gmac_rtbl[] = {
+ /* For gmac phy input clk */
+ {.xscale = 2, .yscale = 6, .eq = 0}, /* divided by 6 */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* divided by 4 */
+ {.xscale = 1, .yscale = 3, .eq = 1}, /* divided by 3 */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* divided by 2 */
+};
+
+/* clcd rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl clcd_rtbl[] = {
+ {.div = 0x14000}, /* 25 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x1284B}, /* 27 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x0D8D3}, /* 58 Mhz , for vco1div4 = 393 MHz */
+ {.div = 0x0B72C}, /* 58 Mhz , for vco1div4 = 332 MHz */
+ {.div = 0x089EE}, /* 58 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x07BA0}, /* 65 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x06f1C}, /* 72 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x06E58}, /* 58 Mhz , for vco1div4 = 200 MHz */
+ {.div = 0x06c1B}, /* 74 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x04A12}, /* 108 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x0378E}, /* 144 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x0360D}, /* 148 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x035E0}, /* 148.5 MHz, for vc01div4 = 250 MHz*/
+};
+
+/* i2s prescaler1 masks */
+static struct aux_clk_masks i2s_prs1_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = SPEAR1340_I2S_PRS1_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = SPEAR1340_I2S_PRS1_CLK_X_MASK,
+ .xscale_sel_shift = SPEAR1340_I2S_PRS1_CLK_X_SHIFT,
+ .yscale_sel_mask = SPEAR1340_I2S_PRS1_CLK_Y_MASK,
+ .yscale_sel_shift = SPEAR1340_I2S_PRS1_CLK_Y_SHIFT,
+};
+
+/* i2s sclk (bit clock) syynthesizers masks */
+static struct aux_clk_masks i2s_sclk_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = SPEAR1340_I2S_SCLK_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = SPEAR1340_I2S_SCLK_X_MASK,
+ .xscale_sel_shift = SPEAR1340_I2S_SCLK_X_SHIFT,
+ .yscale_sel_mask = SPEAR1340_I2S_SCLK_Y_MASK,
+ .yscale_sel_shift = SPEAR1340_I2S_SCLK_Y_SHIFT,
+ .enable_bit = SPEAR1340_I2S_SCLK_SYNTH_ENB,
+};
+
+/* i2s prs1 aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_prs1_rtbl[] = {
+ /* For parent clk = 49.152 MHz */
+ {.xscale = 1, .yscale = 12, .eq = 0}, /* 2.048 MHz, smp freq = 8Khz */
+ {.xscale = 11, .yscale = 96, .eq = 0}, /* 2.816 MHz, smp freq = 11Khz */
+ {.xscale = 1, .yscale = 6, .eq = 0}, /* 4.096 MHz, smp freq = 16Khz */
+ {.xscale = 11, .yscale = 48, .eq = 0}, /* 5.632 MHz, smp freq = 22Khz */
+
+ /*
+ * with parent clk = 49.152, freq gen is 8.192 MHz, smp freq = 32Khz
+ * with parent clk = 12.288, freq gen is 2.048 MHz, smp freq = 8Khz
+ */
+ {.xscale = 1, .yscale = 3, .eq = 0},
+
+ /* For parent clk = 49.152 MHz */
+ {.xscale = 17, .yscale = 37, .eq = 0}, /* 11.289 MHz, smp freq = 44Khz*/
+ {.xscale = 1, .yscale = 2, .eq = 0}, /* 12.288 MHz, smp freq = 48Khz*/
+};
+
+/* i2s sclk aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_sclk_rtbl[] = {
+ /* For sclk = ref_clk * x/2/y */
+ {.xscale = 1, .yscale = 4, .eq = 0},
+ {.xscale = 1, .yscale = 2, .eq = 0},
+};
+
+/* adc rate configuration table, in ascending order of rates */
+/* possible adc range is 2.5 MHz to 20 MHz. */
+static struct aux_rate_tbl adc_rtbl[] = {
+ /* For ahb = 166.67 MHz */
+ {.xscale = 1, .yscale = 31, .eq = 0}, /* 2.68 MHz */
+ {.xscale = 2, .yscale = 21, .eq = 0}, /* 7.94 MHz */
+ {.xscale = 4, .yscale = 21, .eq = 0}, /* 15.87 MHz */
+ {.xscale = 10, .yscale = 42, .eq = 0}, /* 19.84 MHz */
+};
+
+/* General synth rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl gen_rtbl[] = {
+ /* For vco1div4 = 250 MHz */
+ {.div = 0x1624E}, /* 22.5792 MHz */
+ {.div = 0x14585}, /* 24.576 MHz */
+ {.div = 0x14000}, /* 25 MHz */
+ {.div = 0x0B127}, /* 45.1584 MHz */
+ {.div = 0x0A000}, /* 50 MHz */
+ {.div = 0x061A8}, /* 81.92 MHz */
+ {.div = 0x05000}, /* 100 MHz */
+ {.div = 0x02800}, /* 200 MHz */
+ {.div = 0x02620}, /* 210 MHz */
+ {.div = 0x02460}, /* 220 MHz */
+ {.div = 0x022C0}, /* 230 MHz */
+ {.div = 0x02160}, /* 240 MHz */
+ {.div = 0x02000}, /* 250 MHz */
+};
+
+/* clock parents */
+static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
+static const char *sys_parents[] = { "none", "pll1_clk", "none", "none",
+ "sys_synth_clk", "none", "pll2_clk", "pll3_clk", };
+static const char *ahb_parents[] = { "cpu_div3_clk", "amba_synth_clk", };
+static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
+static const char *uart0_parents[] = { "pll5_clk", "osc_24m_clk",
+ "uart0_synth_gate_clk", };
+static const char *uart1_parents[] = { "pll5_clk", "osc_24m_clk",
+ "uart1_synth_gate_clk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
+static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+ "osc_25m_clk", };
+static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
+ "gmac_phy_synth_gate_clk", };
+static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *i2s_src_parents[] = { "vco1div2_clk", "pll2_clk", "pll3_clk",
+ "i2s_src_pad_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
+static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_synth2_clk",
+};
+static const char *spdif_in_parents[] = { "pll2_clk", "gen_synth3_clk", };
+
+static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
+ "pll3_clk", };
+static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
+ "pll2_clk", };
+
+void __init spear1340_clk_init(void)
+{
+ struct clk *clk, *clk1;
+
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk_register_clkdev(clk, "apb_pclk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+ 32000);
+ clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
+ 24000000);
+ clk_register_clkdev(clk, "osc_24m_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, CLK_IS_ROOT,
+ 25000000);
+ clk_register_clkdev(clk, "osc_25m_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
+ CLK_IS_ROOT, 125000000);
+ clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
+ CLK_IS_ROOT, 12288000);
+ clk_register_clkdev(clk, "i2s_src_pad_clk", NULL);
+
+ /* clock derived from 32 KHz osc clk */
+ clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_RTC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "fc900000.rtc");
+
+ /* clock derived from 24 or 25 MHz osc clk */
+ /* vco-pll */
+ clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
+ SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco1_mux_clk", NULL);
+ clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
+ 0, SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco1_clk", NULL);
+ clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
+ SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco2_mux_clk", NULL);
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
+ 0, SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco2_clk", NULL);
+ clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+ clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
+ SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco3_mux_clk", NULL);
+ clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
+ 0, SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco3_clk", NULL);
+ clk_register_clkdev(clk1, "pll3_clk", NULL);
+
+ clk = clk_register_vco_pll("vco4_clk", "pll4_clk", NULL, "osc_24m_clk",
+ 0, SPEAR1340_PLL4_CTR, SPEAR1340_PLL4_FRQ, pll4_rtbl,
+ ARRAY_SIZE(pll4_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco4_clk", NULL);
+ clk_register_clkdev(clk1, "pll4_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "pll5_clk", "osc_24m_clk", 0,
+ 48000000);
+ clk_register_clkdev(clk, "pll5_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "pll6_clk", "osc_25m_clk", 0,
+ 25000000);
+ clk_register_clkdev(clk, "pll6_clk", NULL);
+
+ /* vco div n clocks */
+ clk = clk_register_fixed_factor(NULL, "vco1div2_clk", "vco1_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco1div2_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco1div4_clk", "vco1_clk", 0, 1,
+ 4);
+ clk_register_clkdev(clk, "vco1div4_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco2div2_clk", "vco2_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco2div2_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco3div2_clk", "vco3_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco3div2_clk", NULL);
+
+ /* peripherals */
+ clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
+ 128);
+ clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_THSENS_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_thermal");
+
+ /* clock derived from pll4 clk */
+ clk = clk_register_fixed_factor(NULL, "ddr_clk", "pll4_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "ddr_clk", NULL);
+
+ /* clock derived from pll1 clk */
+ clk = clk_register_frac("sys_synth_clk", "vco1div2_clk", 0,
+ SPEAR1340_SYS_CLK_SYNT, sys_synth_rtbl,
+ ARRAY_SIZE(sys_synth_rtbl), &_lock);
+ clk_register_clkdev(clk, "sys_synth_clk", NULL);
+
+ clk = clk_register_frac("amba_synth_clk", "vco1div2_clk", 0,
+ SPEAR1340_AMBA_CLK_SYNT, amba_synth_rtbl,
+ ARRAY_SIZE(amba_synth_rtbl), &_lock);
+ clk_register_clkdev(clk, "amba_synth_clk", NULL);
+
+ clk = clk_register_mux(NULL, "sys_mux_clk", sys_parents,
+ ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL,
+ SPEAR1340_SCLK_SRC_SEL_SHIFT,
+ SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "sys_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mux_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "cpu_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "cpu_div3_clk", "cpu_clk", 0, 1,
+ 3);
+ clk_register_clkdev(clk, "cpu_div3_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "wdt_clk", "cpu_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, NULL, "ec800620.wdt");
+
+ clk = clk_register_mux(NULL, "ahb_clk", ahb_parents,
+ ARRAY_SIZE(ahb_parents), 0, SPEAR1340_SYS_CLK_CTRL,
+ SPEAR1340_HCLK_SRC_SEL_SHIFT,
+ SPEAR1340_HCLK_SRC_SEL_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "ahb_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "apb_clk", NULL);
+
+ /* gpt clocks */
+ clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt0");
+
+ clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt1");
+
+ clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt2");
+
+ clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt3");
+
+ /* others */
+ clk = clk_register_aux("uart0_synth_clk", "uart0_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1340_UART0_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "uart0_synth_clk", NULL);
+ clk_register_clkdev(clk1, "uart0_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+ ARRAY_SIZE(uart0_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_UART0_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0000000.serial");
+
+ clk = clk_register_aux("uart1_synth_clk", "uart1_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1340_UART1_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "uart1_synth_clk", NULL);
+ clk_register_clkdev(clk1, "uart1_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "uart1_mux_clk", uart1_parents,
+ ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b4100000.serial");
+
+ clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1340_SDHCI_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
+ clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SDHCI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b3000000.sdhci");
+
+ clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1340_CFXD_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
+ clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CFXD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b2800000.cf");
+ clk_register_clkdev(clk, NULL, "arasan_xd");
+
+ clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1340_C3_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "c3_synth_clk", NULL);
+ clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+ ARRAY_SIZE(c3_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_C3_CLK_SHIFT, SPEAR1340_C3_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "c3_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_C3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "c3");
+
+ /* gmac */
+ clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
+ gmac_phy_input_parents,
+ ARRAY_SIZE(gmac_phy_input_parents), 0,
+ SPEAR1340_GMAC_CLK_CFG,
+ SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT,
+ SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+
+ clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
+ "gmac_phy_input_mux_clk", 0, SPEAR1340_GMAC_CLK_SYNT,
+ NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
+ clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+ ARRAY_SIZE(gmac_phy_parents), 0,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT,
+ SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "stmmacphy.0");
+
+ /* clcd */
+ clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+ ARRAY_SIZE(clcd_synth_parents), 0,
+ SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT,
+ SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+
+ clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+ SPEAR1340_CLCD_CLK_SYNT, clcd_rtbl,
+ ARRAY_SIZE(clcd_rtbl), &_lock);
+ clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+
+ clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+ ARRAY_SIZE(clcd_pixel_parents), 0,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT,
+ SPEAR1340_CLCD_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
+
+ clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CLCD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "clcd_clk", NULL);
+
+ /* i2s */
+ clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+ ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG,
+ SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "i2s_src_clk", NULL);
+
+ clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+ SPEAR1340_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
+ ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
+ clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+ ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1340_I2S_CLK_CFG,
+ SPEAR1340_I2S_REF_SHIFT, SPEAR1340_I2S_REF_SEL_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2s_ref_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_I2S_REF_PAD_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
+
+ clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
+ "i2s_ref_mux_clk", 0, SPEAR1340_I2S_CLK_CFG,
+ &i2s_sclk_masks, i2s_sclk_rtbl,
+ ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
+ clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+
+ /* clock derived from ahb clk */
+ clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0280000.i2c");
+
+ clk = clk_register_gate(NULL, "i2c1_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b4000000.i2c");
+
+ clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_DMA_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "ea800000.dma");
+ clk_register_clkdev(clk, NULL, "eb000000.dma");
+
+ clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GMAC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e2000000.eth");
+
+ clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_FSMC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b0000000.flash");
+
+ clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SMI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "ea000000.flash");
+
+ clk = clk_register_gate(NULL, "usbh0_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UHC0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "usbh.0_clk", NULL);
+
+ clk = clk_register_gate(NULL, "usbh1_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UHC1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "usbh.1_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uoc_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UOC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "uoc");
+
+ clk = clk_register_gate(NULL, "pcie_sata_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_PCIE_SATA_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "dw_pcie");
+ clk_register_clkdev(clk, NULL, "ahci");
+
+ clk = clk_register_gate(NULL, "sysram0_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SYSRAM0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "sysram0_clk", NULL);
+
+ clk = clk_register_gate(NULL, "sysram1_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SYSRAM1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "sysram1_clk", NULL);
+
+ clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+ 0, SPEAR1340_ADC_CLK_SYNT, NULL, adc_rtbl,
+ ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "adc_synth_clk", NULL);
+ clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_ADC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "adc_clk");
+
+ /* clock derived from apb clk */
+ clk = clk_register_gate(NULL, "ssp_clk", "apb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SSP_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0100000.spi");
+
+ clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPIO0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0600000.gpio");
+
+ clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPIO1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0680000.gpio");
+
+ clk = clk_register_gate(NULL, "i2s_play_clk", "apb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2S_PLAY_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b2400000.i2s");
+
+ clk = clk_register_gate(NULL, "i2s_rec_clk", "apb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2S_REC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b2000000.i2s");
+
+ clk = clk_register_gate(NULL, "kbd_clk", "apb_clk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_KBD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0300000.kbd");
+
+ /* RAS clks */
+ clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
+ gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
+ 0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
+ SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
+ gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
+ 0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
+ SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+
+ clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+ SPEAR1340_GEN_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+
+ clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+ SPEAR1340_GEN_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+
+ clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+ SPEAR1340_GEN_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+
+ clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+ SPEAR1340_GEN_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+
+ clk = clk_register_gate(NULL, "mali_clk", "gen_synth3_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_MALI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "mali");
+
+ clk = clk_register_gate(NULL, "cec0_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CEC0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_cec.0");
+
+ clk = clk_register_gate(NULL, "cec1_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CEC1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_cec.1");
+
+ clk = clk_register_mux(NULL, "spdif_out_mux_clk", spdif_out_parents,
+ ARRAY_SIZE(spdif_out_parents), 0,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT,
+ SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "spdif_out_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_OUT_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "spdif-out");
+
+ clk = clk_register_mux(NULL, "spdif_in_mux_clk", spdif_in_parents,
+ ARRAY_SIZE(spdif_in_parents), 0,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT,
+ SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "spdif_in_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_IN_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spdif-in");
+
+ clk = clk_register_gate(NULL, "acp_clk", "acp_mux_clk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_ACP_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "acp_clk");
+
+ clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PLGPIO_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "plgpio");
+
+ clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_DEC_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "video_dec");
+
+ clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_ENC_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "video_enc");
+
+ clk = clk_register_gate(NULL, "video_in_clk", "video_in_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_IN_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_vip");
+
+ clk = clk_register_gate(NULL, "cam0_clk", "cam0_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_camif.0");
+
+ clk = clk_register_gate(NULL, "cam1_clk", "cam1_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_camif.1");
+
+ clk = clk_register_gate(NULL, "cam2_clk", "cam2_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_camif.2");
+
+ clk = clk_register_gate(NULL, "cam3_clk", "cam3_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_camif.3");
+
+ clk = clk_register_gate(NULL, "pwm_clk", "pwm_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PWM_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "pwm");
+}
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
new file mode 100644
index 000000000000..440bb3e4c971
--- /dev/null
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -0,0 +1,612 @@
+/*
+ * SPEAr3xx machines clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <mach/misc_regs.h>
+#include "clk.h"
+
+static DEFINE_SPINLOCK(_lock);
+
+#define PLL1_CTR (MISC_BASE + 0x008)
+#define PLL1_FRQ (MISC_BASE + 0x00C)
+#define PLL2_CTR (MISC_BASE + 0x014)
+#define PLL2_FRQ (MISC_BASE + 0x018)
+#define PLL_CLK_CFG (MISC_BASE + 0x020)
+ /* PLL_CLK_CFG register masks */
+ #define MCTR_CLK_SHIFT 28
+ #define MCTR_CLK_MASK 3
+
+#define CORE_CLK_CFG (MISC_BASE + 0x024)
+ /* CORE CLK CFG register masks */
+ #define GEN_SYNTH2_3_CLK_SHIFT 18
+ #define GEN_SYNTH2_3_CLK_MASK 1
+
+ #define HCLK_RATIO_SHIFT 10
+ #define HCLK_RATIO_MASK 2
+ #define PCLK_RATIO_SHIFT 8
+ #define PCLK_RATIO_MASK 2
+
+#define PERIP_CLK_CFG (MISC_BASE + 0x028)
+ /* PERIP_CLK_CFG register masks */
+ #define UART_CLK_SHIFT 4
+ #define UART_CLK_MASK 1
+ #define FIRDA_CLK_SHIFT 5
+ #define FIRDA_CLK_MASK 2
+ #define GPT0_CLK_SHIFT 8
+ #define GPT1_CLK_SHIFT 11
+ #define GPT2_CLK_SHIFT 12
+ #define GPT_CLK_MASK 1
+
+#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
+ /* PERIP1_CLK_ENB register masks */
+ #define UART_CLK_ENB 3
+ #define SSP_CLK_ENB 5
+ #define I2C_CLK_ENB 7
+ #define JPEG_CLK_ENB 8
+ #define FIRDA_CLK_ENB 10
+ #define GPT1_CLK_ENB 11
+ #define GPT2_CLK_ENB 12
+ #define ADC_CLK_ENB 15
+ #define RTC_CLK_ENB 17
+ #define GPIO_CLK_ENB 18
+ #define DMA_CLK_ENB 19
+ #define SMI_CLK_ENB 21
+ #define GMAC_CLK_ENB 23
+ #define USBD_CLK_ENB 24
+ #define USBH_CLK_ENB 25
+ #define C3_CLK_ENB 31
+
+#define RAS_CLK_ENB (MISC_BASE + 0x034)
+ #define RAS_AHB_CLK_ENB 0
+ #define RAS_PLL1_CLK_ENB 1
+ #define RAS_APB_CLK_ENB 2
+ #define RAS_32K_CLK_ENB 3
+ #define RAS_24M_CLK_ENB 4
+ #define RAS_48M_CLK_ENB 5
+ #define RAS_PLL2_CLK_ENB 7
+ #define RAS_SYNT0_CLK_ENB 8
+ #define RAS_SYNT1_CLK_ENB 9
+ #define RAS_SYNT2_CLK_ENB 10
+ #define RAS_SYNT3_CLK_ENB 11
+
+#define PRSC0_CLK_CFG (MISC_BASE + 0x044)
+#define PRSC1_CLK_CFG (MISC_BASE + 0x048)
+#define PRSC2_CLK_CFG (MISC_BASE + 0x04C)
+#define AMEM_CLK_CFG (MISC_BASE + 0x050)
+ #define AMEM_CLK_ENB 0
+
+#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
+#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
+#define UART_CLK_SYNT (MISC_BASE + 0x064)
+#define GMAC_CLK_SYNT (MISC_BASE + 0x068)
+#define GEN0_CLK_SYNT (MISC_BASE + 0x06C)
+#define GEN1_CLK_SYNT (MISC_BASE + 0x070)
+#define GEN2_CLK_SYNT (MISC_BASE + 0x074)
+#define GEN3_CLK_SYNT (MISC_BASE + 0x078)
+
+/* pll rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+ {.mode = 0, .m = 0x53, .n = 0x0C, .p = 0x1}, /* vco 332 & pll 166 MHz */
+ {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* vco 532 & pll 266 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* vco 664 & pll 332 MHz */
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+ /* For PLL1 = 332 MHz */
+ {.xscale = 2, .yscale = 27, .eq = 0}, /* 12.296 MHz */
+ {.xscale = 2, .yscale = 8, .eq = 0}, /* 41.5 MHz */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* 83 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
+};
+
+/* gpt rate configuration table, in ascending order of rates */
+static struct gpt_rate_tbl gpt_rtbl[] = {
+ /* For pll1 = 332 MHz */
+ {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
+ {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
+ {.mscale = 1, .nscale = 0}, /* 83 MHz */
+};
+
+/* clock parents */
+static const char *uart0_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
+static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
+};
+static const char *gpt0_parents[] = { "pll3_48m_clk", "gpt0_synth_clk", };
+static const char *gpt1_parents[] = { "pll3_48m_clk", "gpt1_synth_clk", };
+static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
+static const char *gen2_3_parents[] = { "pll1_clk", "pll2_clk", };
+static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
+ "pll2_clk", };
+
+#ifdef CONFIG_MACH_SPEAR300
+static void __init spear300_clk_init(void)
+{
+ struct clk *clk;
+
+ clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+ 1, 1);
+ clk_register_clkdev(clk, NULL, "60000000.clcd");
+
+ clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "94000000.flash");
+
+ clk = clk_register_fixed_factor(NULL, "sdhci_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "70000000.sdhci");
+
+ clk = clk_register_fixed_factor(NULL, "gpio1_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "a9000000.gpio");
+
+ clk = clk_register_fixed_factor(NULL, "kbd_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "a0000000.kbd");
+}
+#endif
+
+/* array of all spear 310 clock lookups */
+#ifdef CONFIG_MACH_SPEAR310
+static void __init spear310_clk_init(void)
+{
+ struct clk *clk;
+
+ clk = clk_register_fixed_factor(NULL, "emi_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "emi", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "44000000.flash");
+
+ clk = clk_register_fixed_factor(NULL, "tdm_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "tdm");
+
+ clk = clk_register_fixed_factor(NULL, "uart1_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "b2000000.serial");
+
+ clk = clk_register_fixed_factor(NULL, "uart2_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "b2080000.serial");
+
+ clk = clk_register_fixed_factor(NULL, "uart3_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "b2100000.serial");
+
+ clk = clk_register_fixed_factor(NULL, "uart4_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "b2180000.serial");
+
+ clk = clk_register_fixed_factor(NULL, "uart5_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "b2200000.serial");
+}
+#endif
+
+/* array of all spear 320 clock lookups */
+#ifdef CONFIG_MACH_SPEAR320
+ #define SMII_PCLK_SHIFT 18
+ #define SMII_PCLK_MASK 2
+ #define SMII_PCLK_VAL_PAD 0x0
+ #define SMII_PCLK_VAL_PLL2 0x1
+ #define SMII_PCLK_VAL_SYNTH0 0x2
+ #define SDHCI_PCLK_SHIFT 15
+ #define SDHCI_PCLK_MASK 1
+ #define SDHCI_PCLK_VAL_48M 0x0
+ #define SDHCI_PCLK_VAL_SYNTH3 0x1
+ #define I2S_REF_PCLK_SHIFT 8
+ #define I2S_REF_PCLK_MASK 1
+ #define I2S_REF_PCLK_SYNTH_VAL 0x1
+ #define I2S_REF_PCLK_PLL2_VAL 0x0
+ #define UART1_PCLK_SHIFT 6
+ #define UART1_PCLK_MASK 1
+ #define SPEAR320_UARTX_PCLK_VAL_SYNTH1 0x0
+ #define SPEAR320_UARTX_PCLK_VAL_APB 0x1
+
+static const char *i2s_ref_parents[] = { "ras_pll2_clk",
+ "ras_gen2_synth_gate_clk", };
+static const char *sdhci_parents[] = { "ras_pll3_48m_clk",
+ "ras_gen3_synth_gate_clk",
+};
+static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
+ "ras_gen0_synth_gate_clk", };
+static const char *uartx_parents[] = { "ras_gen1_synth_gate_clk", "ras_apb_clk",
+};
+
+static void __init spear320_clk_init(void)
+{
+ struct clk *clk;
+
+ clk = clk_register_fixed_rate(NULL, "smii_125m_pad_clk", NULL,
+ CLK_IS_ROOT, 125000000);
+ clk_register_clkdev(clk, "smii_125m_pad", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+ 1, 1);
+ clk_register_clkdev(clk, NULL, "90000000.clcd");
+
+ clk = clk_register_fixed_factor(NULL, "emi_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "emi", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "4c000000.flash");
+
+ clk = clk_register_fixed_factor(NULL, "i2c1_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "a7000000.i2c");
+
+ clk = clk_register_fixed_factor(NULL, "pwm_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "pwm", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "ssp1_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "a5000000.spi");
+
+ clk = clk_register_fixed_factor(NULL, "ssp2_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "a6000000.spi");
+
+ clk = clk_register_fixed_factor(NULL, "can0_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "c_can_platform.0");
+
+ clk = clk_register_fixed_factor(NULL, "can1_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "c_can_platform.1");
+
+ clk = clk_register_fixed_factor(NULL, "i2s_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "i2s");
+
+ clk = clk_register_mux(NULL, "i2s_ref_clk", i2s_ref_parents,
+ ARRAY_SIZE(i2s_ref_parents), 0, SPEAR320_CONTROL_REG,
+ I2S_REF_PCLK_SHIFT, I2S_REF_PCLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "i2s_ref_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "i2s_sclk", "i2s_ref_clk", 0, 1,
+ 4);
+ clk_register_clkdev(clk, "i2s_sclk", NULL);
+
+ clk = clk_register_mux(NULL, "rs485_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_RS485_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "a9300000.serial");
+
+ clk = clk_register_mux(NULL, "sdhci_clk", sdhci_parents,
+ ARRAY_SIZE(sdhci_parents), 0, SPEAR320_CONTROL_REG,
+ SDHCI_PCLK_SHIFT, SDHCI_PCLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "70000000.sdhci");
+
+ clk = clk_register_mux(NULL, "smii_pclk", smii0_parents,
+ ARRAY_SIZE(smii0_parents), 0, SPEAR320_CONTROL_REG,
+ SMII_PCLK_SHIFT, SMII_PCLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "smii_pclk");
+
+ clk = clk_register_fixed_factor(NULL, "smii_clk", "smii_pclk", 0, 1, 1);
+ clk_register_clkdev(clk, NULL, "smii");
+
+ clk = clk_register_mux(NULL, "uart1_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_CONTROL_REG,
+ UART1_PCLK_SHIFT, UART1_PCLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "a3000000.serial");
+
+ clk = clk_register_mux(NULL, "uart2_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_UART2_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "a4000000.serial");
+
+ clk = clk_register_mux(NULL, "uart3_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_UART3_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "a9100000.serial");
+
+ clk = clk_register_mux(NULL, "uart4_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_UART4_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "a9200000.serial");
+
+ clk = clk_register_mux(NULL, "uart5_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_UART5_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "60000000.serial");
+
+ clk = clk_register_mux(NULL, "uart6_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_UART6_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "60100000.serial");
+}
+#endif
+
+void __init spear3xx_clk_init(void)
+{
+ struct clk *clk, *clk1;
+
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk_register_clkdev(clk, "apb_pclk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+ 32000);
+ clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
+ 24000000);
+ clk_register_clkdev(clk, "osc_24m_clk", NULL);
+
+ /* clock derived from 32 KHz osc clk */
+ clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
+ PERIP1_CLK_ENB, RTC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc900000.rtc");
+
+ /* clock derived from 24 MHz osc clk */
+ clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+ 48000000);
+ clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_24m_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "fc880000.wdt");
+
+ clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL,
+ "osc_24m_clk", 0, PLL1_CTR, PLL1_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco1_clk", NULL);
+ clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL,
+ "osc_24m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco2_clk", NULL);
+ clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+ /* clock derived from pll1 clk */
+ clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 1);
+ clk_register_clkdev(clk, "cpu_clk", NULL);
+
+ clk = clk_register_divider(NULL, "ahb_clk", "pll1_clk",
+ CLK_SET_RATE_PARENT, CORE_CLK_CFG, HCLK_RATIO_SHIFT,
+ HCLK_RATIO_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "ahb_clk", NULL);
+
+ clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
+ "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "uart_synth_clk", NULL);
+ clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+ ARRAY_SIZE(uart0_parents), 0, PERIP_CLK_CFG,
+ UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart0", "uart0_mux_clk", 0,
+ PERIP1_CLK_ENB, UART_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0000000.serial");
+
+ clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
+ "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "firda_synth_clk", NULL);
+ clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+ ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
+ FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "firda_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+ PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "firda");
+
+ /* gpt clocks */
+ clk_register_gpt("gpt0_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
+ gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents,
+ ARRAY_SIZE(gpt0_parents), 0, PERIP_CLK_CFG,
+ GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt0");
+
+ clk_register_gpt("gpt1_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
+ gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt1_parents,
+ ARRAY_SIZE(gpt1_parents), 0, PERIP_CLK_CFG,
+ GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+ PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt1");
+
+ clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
+ gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+ ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
+ GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+ PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt2");
+
+ /* general synths clocks */
+ clk = clk_register_aux("gen0_synth_clk", "gen0_synth_gate_clk",
+ "pll1_clk", 0, GEN0_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "gen0_synth_clk", NULL);
+ clk_register_clkdev(clk1, "gen0_synth_gate_clk", NULL);
+
+ clk = clk_register_aux("gen1_synth_clk", "gen1_synth_gate_clk",
+ "pll1_clk", 0, GEN1_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "gen1_synth_clk", NULL);
+ clk_register_clkdev(clk1, "gen1_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gen2_3_parent_clk", gen2_3_parents,
+ ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG,
+ GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gen2_3_parent_clk", NULL);
+
+ clk = clk_register_aux("gen2_synth_clk", "gen2_synth_gate_clk",
+ "gen2_3_parent_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "gen2_synth_clk", NULL);
+ clk_register_clkdev(clk1, "gen2_synth_gate_clk", NULL);
+
+ clk = clk_register_aux("gen3_synth_clk", "gen3_synth_gate_clk",
+ "gen2_3_parent_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "gen3_synth_clk", NULL);
+ clk_register_clkdev(clk1, "gen3_synth_gate_clk", NULL);
+
+ /* clock derived from pll3 clk */
+ clk = clk_register_gate(NULL, "usbh_clk", "pll3_48m_clk", 0,
+ PERIP1_CLK_ENB, USBH_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "usbh_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "usbh.0_clk", "usbh_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "usbh.0_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "usbh.1_clk", "usbh_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "usbh.1_clk", NULL);
+
+ clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
+ PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "designware_udc");
+
+ /* clock derived from ahb clk */
+ clk = clk_register_fixed_factor(NULL, "ahbmult2_clk", "ahb_clk", 0, 2,
+ 1);
+ clk_register_clkdev(clk, "ahbmult2_clk", NULL);
+
+ clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
+ ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
+ MCTR_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "ddr_clk", NULL);
+
+ clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
+ CLK_SET_RATE_PARENT, CORE_CLK_CFG, PCLK_RATIO_SHIFT,
+ PCLK_RATIO_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "apb_clk", NULL);
+
+ clk = clk_register_gate(NULL, "amem_clk", "ahb_clk", 0, AMEM_CLK_CFG,
+ AMEM_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "amem_clk", NULL);
+
+ clk = clk_register_gate(NULL, "c3_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ C3_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "c3_clk");
+
+ clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ DMA_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc400000.dma");
+
+ clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ GMAC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "e0800000.eth");
+
+ clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ I2C_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0180000.i2c");
+
+ clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ JPEG_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "jpeg");
+
+ clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ SMI_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc000000.flash");
+
+ /* clock derived from apb clk */
+ clk = clk_register_gate(NULL, "adc_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ ADC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "adc");
+
+ clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ GPIO_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc980000.gpio");
+
+ clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ SSP_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0100000.spi");
+
+ /* RAS clk enable */
+ clk = clk_register_gate(NULL, "ras_ahb_clk", "ahb_clk", 0, RAS_CLK_ENB,
+ RAS_AHB_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_ahb_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB,
+ RAS_APB_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_apb_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0,
+ RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_32k_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_24m_clk", "osc_24m_clk", 0,
+ RAS_CLK_ENB, RAS_24M_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_24m_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_pll1_clk", "pll1_clk", 0,
+ RAS_CLK_ENB, RAS_PLL1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_pll1_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_pll2_clk", "pll2_clk", 0,
+ RAS_CLK_ENB, RAS_PLL2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_pll2_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_pll3_48m_clk", "pll3_48m_clk", 0,
+ RAS_CLK_ENB, RAS_48M_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_pll3_48m_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_gen0_synth_gate_clk",
+ "gen0_synth_gate_clk", 0, RAS_CLK_ENB,
+ RAS_SYNT0_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_gen0_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_gen1_synth_gate_clk",
+ "gen1_synth_gate_clk", 0, RAS_CLK_ENB,
+ RAS_SYNT1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_gen1_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_gen2_synth_gate_clk",
+ "gen2_synth_gate_clk", 0, RAS_CLK_ENB,
+ RAS_SYNT2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_gen2_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_gen3_synth_gate_clk",
+ "gen3_synth_gate_clk", 0, RAS_CLK_ENB,
+ RAS_SYNT3_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_gen3_synth_gate_clk", NULL);
+
+ if (of_machine_is_compatible("st,spear300"))
+ spear300_clk_init();
+ else if (of_machine_is_compatible("st,spear310"))
+ spear310_clk_init();
+ else if (of_machine_is_compatible("st,spear320"))
+ spear320_clk_init();
+}
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
new file mode 100644
index 000000000000..f9a20b382304
--- /dev/null
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -0,0 +1,342 @@
+/*
+ * SPEAr6xx machines clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <mach/misc_regs.h>
+#include "clk.h"
+
+static DEFINE_SPINLOCK(_lock);
+
+#define PLL1_CTR (MISC_BASE + 0x008)
+#define PLL1_FRQ (MISC_BASE + 0x00C)
+#define PLL2_CTR (MISC_BASE + 0x014)
+#define PLL2_FRQ (MISC_BASE + 0x018)
+#define PLL_CLK_CFG (MISC_BASE + 0x020)
+ /* PLL_CLK_CFG register masks */
+ #define MCTR_CLK_SHIFT 28
+ #define MCTR_CLK_MASK 3
+
+#define CORE_CLK_CFG (MISC_BASE + 0x024)
+ /* CORE CLK CFG register masks */
+ #define HCLK_RATIO_SHIFT 10
+ #define HCLK_RATIO_MASK 2
+ #define PCLK_RATIO_SHIFT 8
+ #define PCLK_RATIO_MASK 2
+
+#define PERIP_CLK_CFG (MISC_BASE + 0x028)
+ /* PERIP_CLK_CFG register masks */
+ #define CLCD_CLK_SHIFT 2
+ #define CLCD_CLK_MASK 2
+ #define UART_CLK_SHIFT 4
+ #define UART_CLK_MASK 1
+ #define FIRDA_CLK_SHIFT 5
+ #define FIRDA_CLK_MASK 2
+ #define GPT0_CLK_SHIFT 8
+ #define GPT1_CLK_SHIFT 10
+ #define GPT2_CLK_SHIFT 11
+ #define GPT3_CLK_SHIFT 12
+ #define GPT_CLK_MASK 1
+
+#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
+ /* PERIP1_CLK_ENB register masks */
+ #define UART0_CLK_ENB 3
+ #define UART1_CLK_ENB 4
+ #define SSP0_CLK_ENB 5
+ #define SSP1_CLK_ENB 6
+ #define I2C_CLK_ENB 7
+ #define JPEG_CLK_ENB 8
+ #define FSMC_CLK_ENB 9
+ #define FIRDA_CLK_ENB 10
+ #define GPT2_CLK_ENB 11
+ #define GPT3_CLK_ENB 12
+ #define GPIO2_CLK_ENB 13
+ #define SSP2_CLK_ENB 14
+ #define ADC_CLK_ENB 15
+ #define GPT1_CLK_ENB 11
+ #define RTC_CLK_ENB 17
+ #define GPIO1_CLK_ENB 18
+ #define DMA_CLK_ENB 19
+ #define SMI_CLK_ENB 21
+ #define CLCD_CLK_ENB 22
+ #define GMAC_CLK_ENB 23
+ #define USBD_CLK_ENB 24
+ #define USBH0_CLK_ENB 25
+ #define USBH1_CLK_ENB 26
+
+#define PRSC0_CLK_CFG (MISC_BASE + 0x044)
+#define PRSC1_CLK_CFG (MISC_BASE + 0x048)
+#define PRSC2_CLK_CFG (MISC_BASE + 0x04C)
+
+#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
+#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
+#define UART_CLK_SYNT (MISC_BASE + 0x064)
+
+/* vco rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+ {.mode = 0, .m = 0x53, .n = 0x0F, .p = 0x1}, /* vco 332 & pll 166 MHz */
+ {.mode = 0, .m = 0x85, .n = 0x0F, .p = 0x1}, /* vco 532 & pll 266 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x0F, .p = 0x1}, /* vco 664 & pll 332 MHz */
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+ /* For PLL1 = 332 MHz */
+ {.xscale = 2, .yscale = 8, .eq = 0}, /* 41.5 MHz */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* 83 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
+};
+
+static const char *clcd_parents[] = { "pll3_48m_clk", "clcd_synth_gate_clk", };
+static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
+};
+static const char *uart_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
+static const char *gpt0_1_parents[] = { "pll3_48m_clk", "gpt0_1_synth_clk", };
+static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
+static const char *gpt3_parents[] = { "pll3_48m_clk", "gpt3_synth_clk", };
+static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
+ "pll2_clk", };
+
+/* gpt rate configuration table, in ascending order of rates */
+static struct gpt_rate_tbl gpt_rtbl[] = {
+ /* For pll1 = 332 MHz */
+ {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
+ {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
+ {.mscale = 1, .nscale = 0}, /* 83 MHz */
+};
+
+void __init spear6xx_clk_init(void)
+{
+ struct clk *clk, *clk1;
+
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk_register_clkdev(clk, "apb_pclk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+ 32000);
+ clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_30m_clk", NULL, CLK_IS_ROOT,
+ 30000000);
+ clk_register_clkdev(clk, "osc_30m_clk", NULL);
+
+ /* clock derived from 32 KHz osc clk */
+ clk = clk_register_gate(NULL, "rtc_spear", "osc_32k_clk", 0,
+ PERIP1_CLK_ENB, RTC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "rtc-spear");
+
+ /* clock derived from 30 MHz osc clk */
+ clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+ 48000000);
+ clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+
+ clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "osc_30m_clk",
+ 0, PLL1_CTR, PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
+ &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco1_clk", NULL);
+ clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL,
+ "osc_30m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco2_clk", NULL);
+ clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_30m_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "wdt");
+
+ /* clock derived from pll1 clk */
+ clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 1);
+ clk_register_clkdev(clk, "cpu_clk", NULL);
+
+ clk = clk_register_divider(NULL, "ahb_clk", "pll1_clk",
+ CLK_SET_RATE_PARENT, CORE_CLK_CFG, HCLK_RATIO_SHIFT,
+ HCLK_RATIO_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "ahb_clk", NULL);
+
+ clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
+ "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "uart_synth_clk", NULL);
+ clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "uart_mux_clk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG,
+ UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "uart_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart0", "uart_mux_clk", 0,
+ PERIP1_CLK_ENB, UART0_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0000000.serial");
+
+ clk = clk_register_gate(NULL, "uart1", "uart_mux_clk", 0,
+ PERIP1_CLK_ENB, UART1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0080000.serial");
+
+ clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
+ "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "firda_synth_clk", NULL);
+ clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+ ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
+ FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "firda_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+ PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "firda");
+
+ clk = clk_register_aux("clcd_synth_clk", "clcd_synth_gate_clk",
+ "pll1_clk", 0, CLCD_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+ clk_register_clkdev(clk1, "clcd_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "clcd_mux_clk", clcd_parents,
+ ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG,
+ CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "clcd_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "clcd_clk", "clcd_mux_clk", 0,
+ PERIP1_CLK_ENB, CLCD_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "clcd");
+
+ /* gpt clocks */
+ clk = clk_register_gpt("gpt0_1_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
+ gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk_register_clkdev(clk, "gpt0_1_synth_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt0_1_parents,
+ ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
+ GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt0");
+
+ clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt0_1_parents,
+ ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
+ GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+ PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt1");
+
+ clk = clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
+ gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk_register_clkdev(clk, "gpt2_synth_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+ ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
+ GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+ PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt2");
+
+ clk = clk_register_gpt("gpt3_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
+ gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk_register_clkdev(clk, "gpt3_synth_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt3_parents,
+ ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG,
+ GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+ PERIP1_CLK_ENB, GPT3_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt3");
+
+ /* clock derived from pll3 clk */
+ clk = clk_register_gate(NULL, "usbh0_clk", "pll3_48m_clk", 0,
+ PERIP1_CLK_ENB, USBH0_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "usbh.0_clk");
+
+ clk = clk_register_gate(NULL, "usbh1_clk", "pll3_48m_clk", 0,
+ PERIP1_CLK_ENB, USBH1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "usbh.1_clk");
+
+ clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
+ PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "designware_udc");
+
+ /* clock derived from ahb clk */
+ clk = clk_register_fixed_factor(NULL, "ahbmult2_clk", "ahb_clk", 0, 2,
+ 1);
+ clk_register_clkdev(clk, "ahbmult2_clk", NULL);
+
+ clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
+ ARRAY_SIZE(ddr_parents),
+ 0, PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ddr_clk", NULL);
+
+ clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
+ CLK_SET_RATE_PARENT, CORE_CLK_CFG, PCLK_RATIO_SHIFT,
+ PCLK_RATIO_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "apb_clk", NULL);
+
+ clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ DMA_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc400000.dma");
+
+ clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ FSMC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d1800000.flash");
+
+ clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ GMAC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gmac");
+
+ clk = clk_register_gate(NULL, "i2c_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ I2C_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0200000.i2c");
+
+ clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ JPEG_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "jpeg");
+
+ clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ SMI_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc000000.flash");
+
+ /* clock derived from apb clk */
+ clk = clk_register_gate(NULL, "adc_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ ADC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "adc");
+
+ clk = clk_register_fixed_factor(NULL, "gpio0_clk", "apb_clk", 0, 1, 1);
+ clk_register_clkdev(clk, NULL, "f0100000.gpio");
+
+ clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ GPIO1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc980000.gpio");
+
+ clk = clk_register_gate(NULL, "gpio2_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ GPIO2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d8100000.gpio");
+
+ clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ SSP0_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "ssp-pl022.0");
+
+ clk = clk_register_gate(NULL, "ssp1_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ SSP1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "ssp-pl022.1");
+
+ clk = clk_register_gate(NULL, "ssp2_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ SSP2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "ssp-pl022.2");
+}
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index e6ecc5f23943..1cc6b3f3e262 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -16,6 +16,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/clk.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
@@ -79,6 +80,7 @@ struct crypto_priv {
void __iomem *reg;
void __iomem *sram;
int irq;
+ struct clk *clk;
struct task_struct *queue_th;
/* the lock protects queue and eng_st */
@@ -1053,6 +1055,12 @@ static int mv_probe(struct platform_device *pdev)
if (ret)
goto err_thread;
+ /* Not all platforms can gate the clock, so it is not
+ an error if the clock does not exists. */
+ cp->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(cp->clk))
+ clk_prepare_enable(cp->clk);
+
writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
@@ -1118,6 +1126,12 @@ static int mv_remove(struct platform_device *pdev)
memset(cp->sram, 0, cp->sram_size);
iounmap(cp->sram);
iounmap(cp->reg);
+
+ if (!IS_ERR(cp->clk)) {
+ clk_disable_unprepare(cp->clk);
+ clk_put(cp->clk);
+ }
+
kfree(cp);
cpg = NULL;
return 0;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ef378b5b17e4..aadeb5be9dba 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -238,6 +238,7 @@ config IMX_DMA
config MXS_DMA
bool "MXS DMA support"
depends on SOC_IMX23 || SOC_IMX28
+ select STMP_DEVICE
select DMA_ENGINE
help
Support the MXS DMA engine. This engine including APBH-DMA
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 3d704abd7912..49ecbbb8932d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -95,10 +95,14 @@ static struct amba_driver pl08x_amba_driver;
* struct vendor_data - vendor-specific config parameters for PL08x derivatives
* @channels: the number of channels available in this variant
* @dualmaster: whether this version supports dual AHB masters or not.
+ * @nomadik: whether the channels have Nomadik security extension bits
+ * that need to be checked for permission before use and some registers are
+ * missing
*/
struct vendor_data {
u8 channels;
bool dualmaster;
+ bool nomadik;
};
/*
@@ -385,7 +389,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
spin_lock_irqsave(&ch->lock, flags);
- if (!ch->serving) {
+ if (!ch->locked && !ch->serving) {
ch->serving = virt_chan;
ch->signal = -1;
spin_unlock_irqrestore(&ch->lock, flags);
@@ -1324,7 +1328,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
int ret, tmp;
dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
- __func__, sgl->length, plchan->name);
+ __func__, sg_dma_len(sgl), plchan->name);
txd = pl08x_get_txd(plchan, flags);
if (!txd) {
@@ -1378,11 +1382,11 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
dsg->len = sg_dma_len(sg);
if (direction == DMA_MEM_TO_DEV) {
- dsg->src_addr = sg_phys(sg);
+ dsg->src_addr = sg_dma_address(sg);
dsg->dst_addr = slave_addr;
} else {
dsg->src_addr = slave_addr;
- dsg->dst_addr = sg_phys(sg);
+ dsg->dst_addr = sg_dma_address(sg);
}
}
@@ -1484,6 +1488,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
*/
static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
{
+ /* The Nomadik variant does not have the config register */
+ if (pl08x->vd->nomadik)
+ return;
writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
}
@@ -1616,7 +1623,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
__func__, err);
writel(err, pl08x->base + PL080_ERR_CLEAR);
}
- tc = readl(pl08x->base + PL080_INT_STATUS);
+ tc = readl(pl08x->base + PL080_TC_STATUS);
if (tc)
writel(tc, pl08x->base + PL080_TC_CLEAR);
@@ -1773,8 +1780,10 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
spin_lock_irqsave(&ch->lock, flags);
virt_chan = ch->serving;
- seq_printf(s, "%d\t\t%s\n",
- ch->id, virt_chan ? virt_chan->name : "(none)");
+ seq_printf(s, "%d\t\t%s%s\n",
+ ch->id,
+ virt_chan ? virt_chan->name : "(none)",
+ ch->locked ? " LOCKED" : "");
spin_unlock_irqrestore(&ch->lock, flags);
}
@@ -1918,7 +1927,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
}
/* Initialize physical channels */
- pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)),
+ pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
GFP_KERNEL);
if (!pl08x->phy_chans) {
dev_err(&adev->dev, "%s failed to allocate "
@@ -1933,8 +1942,23 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
ch->id = i;
ch->base = pl08x->base + PL080_Cx_BASE(i);
spin_lock_init(&ch->lock);
- ch->serving = NULL;
ch->signal = -1;
+
+ /*
+ * Nomadik variants can have channels that are locked
+ * down for the secure world only. Lock up these channels
+ * by perpetually serving a dummy virtual channel.
+ */
+ if (vd->nomadik) {
+ u32 val;
+
+ val = readl(ch->base + PL080_CH_CONFIG);
+ if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
+ dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
+ ch->locked = true;
+ }
+ }
+
dev_dbg(&adev->dev, "physical channel %d is %s\n",
i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
}
@@ -2017,6 +2041,12 @@ static struct vendor_data vendor_pl080 = {
.dualmaster = true,
};
+static struct vendor_data vendor_nomadik = {
+ .channels = 8,
+ .dualmaster = true,
+ .nomadik = true,
+};
+
static struct vendor_data vendor_pl081 = {
.channels = 2,
.dualmaster = false,
@@ -2037,9 +2067,9 @@ static struct amba_id pl08x_ids[] = {
},
/* Nomadik 8815 PL080 variant */
{
- .id = 0x00280880,
+ .id = 0x00280080,
.mask = 0x00ffffff,
- .data = &vendor_pl080,
+ .data = &vendor_nomadik,
},
{ 0, 0 },
};
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index bf0d7e4e345b..7292aa87b2dd 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -39,7 +39,6 @@
*/
#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
-#define ATC_DEFAULT_CTRLA (0)
#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
|ATC_DIF(AT_DMA_MEM_IF))
@@ -574,7 +573,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
return NULL;
}
- ctrla = ATC_DEFAULT_CTRLA;
ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
| ATC_SRC_ADDR_MODE_INCR
| ATC_DST_ADDR_MODE_INCR
@@ -585,13 +583,13 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
* of the most common optimization.
*/
if (!((src | dest | len) & 3)) {
- ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
+ ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
src_width = dst_width = 2;
} else if (!((src | dest | len) & 1)) {
- ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
+ ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
src_width = dst_width = 1;
} else {
- ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
+ ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
src_width = dst_width = 0;
}
@@ -668,7 +666,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
- ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
+ ctrla = ATC_SCSIZE(sconfig->src_maxburst)
+ | ATC_DCSIZE(sconfig->dst_maxburst);
ctrlb = ATC_IEN;
switch (direction) {
@@ -796,12 +795,12 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
enum dma_transfer_direction direction)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
- struct at_dma_slave *atslave = chan->private;
struct dma_slave_config *sconfig = &atchan->dma_sconfig;
u32 ctrla;
/* prepare common CRTLA value */
- ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
+ ctrla = ATC_SCSIZE(sconfig->src_maxburst)
+ | ATC_DCSIZE(sconfig->dst_maxburst)
| ATC_DST_WIDTH(reg_width)
| ATC_SRC_WIDTH(reg_width)
| period_len >> reg_width;
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 897a8bcaec90..8a6c8e8b2940 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -87,7 +87,26 @@
/* Bitfields in CTRLA */
#define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */
#define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */
-/* Chunck Tranfer size definitions are in at_hdmac.h */
+#define ATC_SCSIZE_MASK (0x7 << 16) /* Source Chunk Transfer Size */
+#define ATC_SCSIZE(x) (ATC_SCSIZE_MASK & ((x) << 16))
+#define ATC_SCSIZE_1 (0x0 << 16)
+#define ATC_SCSIZE_4 (0x1 << 16)
+#define ATC_SCSIZE_8 (0x2 << 16)
+#define ATC_SCSIZE_16 (0x3 << 16)
+#define ATC_SCSIZE_32 (0x4 << 16)
+#define ATC_SCSIZE_64 (0x5 << 16)
+#define ATC_SCSIZE_128 (0x6 << 16)
+#define ATC_SCSIZE_256 (0x7 << 16)
+#define ATC_DCSIZE_MASK (0x7 << 20) /* Destination Chunk Transfer Size */
+#define ATC_DCSIZE(x) (ATC_DCSIZE_MASK & ((x) << 20))
+#define ATC_DCSIZE_1 (0x0 << 20)
+#define ATC_DCSIZE_4 (0x1 << 20)
+#define ATC_DCSIZE_8 (0x2 << 20)
+#define ATC_DCSIZE_16 (0x3 << 20)
+#define ATC_DCSIZE_32 (0x4 << 20)
+#define ATC_DCSIZE_64 (0x5 << 20)
+#define ATC_DCSIZE_128 (0x6 << 20)
+#define ATC_DCSIZE_256 (0x7 << 20)
#define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */
#define ATC_SRC_WIDTH(x) ((x) << 24)
#define ATC_SRC_WIDTH_BYTE (0x0 << 24)
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 750925f9638b..e67b4e06a918 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1033,7 +1033,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!sgl)
goto out;
- if (sgl->length == 0)
+ if (sg_dma_len(sgl) == 0)
goto out;
spin_lock_irqsave(&cohc->lock, flg);
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 6c0e2d4c6682..780e0429b38c 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -270,10 +270,10 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
if (dir == DMA_MEM_TO_DEV)
/* increment source address */
- src = sg_phys(sg);
+ src = sg_dma_address(sg);
else
/* increment destination address */
- dst = sg_phys(sg);
+ dst = sg_dma_address(sg);
bytes_to_transfer = sg_dma_len(sg);
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 7439079f5eed..e23dc82d43ac 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -742,7 +743,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dw_desc *desc;
u32 len, dlen, mem;
- mem = sg_phys(sg);
+ mem = sg_dma_address(sg);
len = sg_dma_len(sg);
if (!((mem | len) & 7))
@@ -809,7 +810,7 @@ slave_sg_todev_fill_desc:
struct dw_desc *desc;
u32 len, dlen, mem;
- mem = sg_phys(sg);
+ mem = sg_dma_address(sg);
len = sg_dma_len(sg);
if (!((mem | len) & 7))
@@ -1429,7 +1430,7 @@ static int __init dw_probe(struct platform_device *pdev)
err = PTR_ERR(dw->clk);
goto err_clk;
}
- clk_enable(dw->clk);
+ clk_prepare_enable(dw->clk);
/* force dma off, just in case */
dw_dma_off(dw);
@@ -1510,7 +1511,7 @@ static int __init dw_probe(struct platform_device *pdev)
return 0;
err_irq:
- clk_disable(dw->clk);
+ clk_disable_unprepare(dw->clk);
clk_put(dw->clk);
err_clk:
iounmap(dw->regs);
@@ -1540,7 +1541,7 @@ static int __exit dw_remove(struct platform_device *pdev)
channel_clear_bit(dw, CH_EN, dwc->mask);
}
- clk_disable(dw->clk);
+ clk_disable_unprepare(dw->clk);
clk_put(dw->clk);
iounmap(dw->regs);
@@ -1559,7 +1560,7 @@ static void dw_shutdown(struct platform_device *pdev)
struct dw_dma *dw = platform_get_drvdata(pdev);
dw_dma_off(platform_get_drvdata(pdev));
- clk_disable(dw->clk);
+ clk_disable_unprepare(dw->clk);
}
static int dw_suspend_noirq(struct device *dev)
@@ -1568,7 +1569,7 @@ static int dw_suspend_noirq(struct device *dev)
struct dw_dma *dw = platform_get_drvdata(pdev);
dw_dma_off(platform_get_drvdata(pdev));
- clk_disable(dw->clk);
+ clk_disable_unprepare(dw->clk);
return 0;
}
@@ -1578,7 +1579,7 @@ static int dw_resume_noirq(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct dw_dma *dw = platform_get_drvdata(pdev);
- clk_enable(dw->clk);
+ clk_prepare_enable(dw->clk);
dma_writel(dw, CFG, DW_CFG_DMA_EN);
return 0;
}
@@ -1592,12 +1593,21 @@ static const struct dev_pm_ops dw_dev_pm_ops = {
.poweroff_noirq = dw_suspend_noirq,
};
+#ifdef CONFIG_OF
+static const struct of_device_id dw_dma_id_table[] = {
+ { .compatible = "snps,dma-spear1340" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dw_dma_id_table);
+#endif
+
static struct platform_driver dw_driver = {
.remove = __exit_p(dw_remove),
.shutdown = dw_shutdown,
.driver = {
.name = "dw_dmac",
.pm = &dw_dev_pm_ops,
+ .of_match_table = of_match_ptr(dw_dma_id_table),
},
};
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index f6e9b572b998..c64917ec313d 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -71,6 +71,7 @@
#define M2M_CONTROL_TM_SHIFT 13
#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
+#define M2M_CONTROL_NFBINT BIT(21)
#define M2M_CONTROL_RSS_SHIFT 22
#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
@@ -79,7 +80,22 @@
#define M2M_CONTROL_PWSC_SHIFT 25
#define M2M_INTERRUPT 0x0004
-#define M2M_INTERRUPT_DONEINT BIT(1)
+#define M2M_INTERRUPT_MASK 6
+
+#define M2M_STATUS 0x000c
+#define M2M_STATUS_CTL_SHIFT 1
+#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_BUF_SHIFT 4
+#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_DONE BIT(6)
#define M2M_BCR0 0x0010
#define M2M_BCR1 0x0014
@@ -426,15 +442,6 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
/*
* M2M DMA implementation
- *
- * For the M2M transfers we don't use NFB at all. This is because it simply
- * doesn't work well with memcpy transfers. When you submit both buffers it is
- * extremely unlikely that you get an NFB interrupt, but it instead reports
- * DONE interrupt and both buffers are already transferred which means that we
- * weren't able to update the next buffer.
- *
- * So for now we "simulate" NFB by just submitting buffer after buffer
- * without double buffering.
*/
static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
@@ -543,6 +550,11 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
m2m_fill_desc(edmac);
control |= M2M_CONTROL_DONEINT;
+ if (ep93xx_dma_advance_active(edmac)) {
+ m2m_fill_desc(edmac);
+ control |= M2M_CONTROL_NFBINT;
+ }
+
/*
* Now we can finally enable the channel. For M2M channel this must be
* done _after_ the BCRx registers are programmed.
@@ -560,32 +572,89 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
}
}
+/*
+ * According to EP93xx User's Guide, we should receive DONE interrupt when all
+ * M2M DMA controller transactions complete normally. This is not always the
+ * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
+ * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
+ * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
+ * In effect, disabling the channel when only DONE bit is set could stop
+ * currently running DMA transfer. To avoid this, we use Buffer FSM and
+ * Control FSM to check current state of DMA channel.
+ */
static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
{
+ u32 status = readl(edmac->regs + M2M_STATUS);
+ u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
+ u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
+ bool done = status & M2M_STATUS_DONE;
+ bool last_done;
u32 control;
+ struct ep93xx_dma_desc *desc;
- if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT))
+ /* Accept only DONE and NFB interrupts */
+ if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
return INTERRUPT_UNKNOWN;
- /* Clear the DONE bit */
- writel(0, edmac->regs + M2M_INTERRUPT);
+ if (done) {
+ /* Clear the DONE bit */
+ writel(0, edmac->regs + M2M_INTERRUPT);
+ }
- /* Disable interrupts and the channel */
- control = readl(edmac->regs + M2M_CONTROL);
- control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE);
- writel(control, edmac->regs + M2M_CONTROL);
+ /*
+ * Check whether we are done with descriptors or not. This, together
+ * with DMA channel state, determines action to take in interrupt.
+ */
+ desc = ep93xx_dma_get_active(edmac);
+ last_done = !desc || desc->txd.cookie;
/*
- * Since we only get DONE interrupt we have to find out ourselves
- * whether there still is something to process. So we try to advance
- * the chain an see whether it succeeds.
+ * Use M2M DMA Buffer FSM and Control FSM to check current state of
+ * DMA channel. Using DONE and NFB bits from channel status register
+ * or bits from channel interrupt register is not reliable.
*/
- if (ep93xx_dma_advance_active(edmac)) {
- edmac->edma->hw_submit(edmac);
- return INTERRUPT_NEXT_BUFFER;
+ if (!last_done &&
+ (buf_fsm == M2M_STATUS_BUF_NO ||
+ buf_fsm == M2M_STATUS_BUF_ON)) {
+ /*
+ * Two buffers are ready for update when Buffer FSM is in
+ * DMA_NO_BUF state. Only one buffer can be prepared without
+ * disabling the channel or polling the DONE bit.
+ * To simplify things, always prepare only one buffer.
+ */
+ if (ep93xx_dma_advance_active(edmac)) {
+ m2m_fill_desc(edmac);
+ if (done && !edmac->chan.private) {
+ /* Software trigger for memcpy channel */
+ control = readl(edmac->regs + M2M_CONTROL);
+ control |= M2M_CONTROL_START;
+ writel(control, edmac->regs + M2M_CONTROL);
+ }
+ return INTERRUPT_NEXT_BUFFER;
+ } else {
+ last_done = true;
+ }
+ }
+
+ /*
+ * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
+ * and Control FSM is in DMA_STALL state.
+ */
+ if (last_done &&
+ buf_fsm == M2M_STATUS_BUF_NO &&
+ ctl_fsm == M2M_STATUS_CTL_STALL) {
+ /* Disable interrupts and the channel */
+ control = readl(edmac->regs + M2M_CONTROL);
+ control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
+ | M2M_CONTROL_ENABLE);
+ writel(control, edmac->regs + M2M_CONTROL);
+ return INTERRUPT_DONE;
}
- return INTERRUPT_DONE;
+ /*
+ * Nothing to do this time.
+ */
+ return INTERRUPT_NEXT_BUFFER;
}
/*
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index bb787d8e1529..fcfeb3cd8d31 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -227,7 +227,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d)
struct scatterlist *sg = d->sg;
unsigned long now;
- now = min(d->len, sg->length);
+ now = min(d->len, sg_dma_len(sg));
if (d->len != IMX_DMA_LENGTH_LOOP)
d->len -= now;
@@ -763,16 +763,16 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
for_each_sg(sgl, sg, sg_len, i) {
- dma_length += sg->length;
+ dma_length += sg_dma_len(sg);
}
switch (imxdmac->word_size) {
case DMA_SLAVE_BUSWIDTH_4_BYTES:
- if (sgl->length & 3 || sgl->dma_address & 3)
+ if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
return NULL;
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
- if (sgl->length & 1 || sgl->dma_address & 1)
+ if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
return NULL;
break;
case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -831,13 +831,13 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
imxdmac->sg_list[i].page_link = 0;
imxdmac->sg_list[i].offset = 0;
imxdmac->sg_list[i].dma_address = dma_addr;
- imxdmac->sg_list[i].length = period_len;
+ sg_dma_len(&imxdmac->sg_list[i]) = period_len;
dma_addr += period_len;
}
/* close the loop */
imxdmac->sg_list[periods].offset = 0;
- imxdmac->sg_list[periods].length = 0;
+ sg_dma_len(&imxdmac->sg_list[periods]) = 0;
imxdmac->sg_list[periods].page_link =
((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d3e38e28bb6b..fb4f4990f5eb 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -24,7 +24,7 @@
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
-#include <linux/wait.h>
+#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/spinlock.h>
@@ -271,6 +271,7 @@ struct sdma_channel {
enum dma_status status;
unsigned int chn_count;
unsigned int chn_real_count;
+ struct tasklet_struct tasklet;
};
#define IMX_DMA_SG_LOOP BIT(0)
@@ -322,8 +323,9 @@ struct sdma_engine {
struct sdma_context_data *context;
dma_addr_t context_phys;
struct dma_device dma_device;
- struct clk *clk;
- struct mutex channel_0_lock;
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+ spinlock_t channel_0_lock;
struct sdma_script_start_addrs *script_addrs;
};
@@ -401,19 +403,27 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
}
/*
- * sdma_run_channel - run a channel and wait till it's done
+ * sdma_run_channel0 - run a channel and wait till it's done
*/
-static int sdma_run_channel(struct sdma_channel *sdmac)
+static int sdma_run_channel0(struct sdma_engine *sdma)
{
- struct sdma_engine *sdma = sdmac->sdma;
- int channel = sdmac->channel;
int ret;
+ unsigned long timeout = 500;
- init_completion(&sdmac->done);
+ sdma_enable_channel(sdma, 0);
- sdma_enable_channel(sdma, channel);
+ while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
+ if (timeout-- <= 0)
+ break;
+ udelay(1);
+ }
- ret = wait_for_completion_timeout(&sdmac->done, HZ);
+ if (ret) {
+ /* Clear the interrupt status */
+ writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
+ } else {
+ dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
+ }
return ret ? 0 : -ETIMEDOUT;
}
@@ -425,17 +435,17 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
void *buf_virt;
dma_addr_t buf_phys;
int ret;
-
- mutex_lock(&sdma->channel_0_lock);
+ unsigned long flags;
buf_virt = dma_alloc_coherent(NULL,
size,
&buf_phys, GFP_KERNEL);
if (!buf_virt) {
- ret = -ENOMEM;
- goto err_out;
+ return -ENOMEM;
}
+ spin_lock_irqsave(&sdma->channel_0_lock, flags);
+
bd0->mode.command = C0_SETPM;
bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
bd0->mode.count = size / 2;
@@ -444,12 +454,11 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
memcpy(buf_virt, buf, size);
- ret = sdma_run_channel(&sdma->channel[0]);
+ ret = sdma_run_channel0(sdma);
- dma_free_coherent(NULL, size, buf_virt, buf_phys);
+ spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
-err_out:
- mutex_unlock(&sdma->channel_0_lock);
+ dma_free_coherent(NULL, size, buf_virt, buf_phys);
return ret;
}
@@ -534,13 +543,11 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
sdmac->desc.callback(sdmac->desc.callback_param);
}
-static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
+static void sdma_tasklet(unsigned long data)
{
- complete(&sdmac->done);
+ struct sdma_channel *sdmac = (struct sdma_channel *) data;
- /* not interested in channel 0 interrupts */
- if (sdmac->channel == 0)
- return;
+ complete(&sdmac->done);
if (sdmac->flags & IMX_DMA_SG_LOOP)
sdma_handle_channel_loop(sdmac);
@@ -554,13 +561,15 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
unsigned long stat;
stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
+ /* not interested in channel 0 interrupts */
+ stat &= ~1;
writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
while (stat) {
int channel = fls(stat) - 1;
struct sdma_channel *sdmac = &sdma->channel[channel];
- mxc_sdma_handle_channel(sdmac);
+ tasklet_schedule(&sdmac->tasklet);
__clear_bit(channel, &stat);
}
@@ -659,6 +668,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
struct sdma_context_data *context = sdma->context;
struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
int ret;
+ unsigned long flags;
if (sdmac->direction == DMA_DEV_TO_MEM) {
load_address = sdmac->pc_from_device;
@@ -676,7 +686,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
- mutex_lock(&sdma->channel_0_lock);
+ spin_lock_irqsave(&sdma->channel_0_lock, flags);
memset(context, 0, sizeof(*context));
context->channel_state.pc = load_address;
@@ -695,10 +705,9 @@ static int sdma_load_context(struct sdma_channel *sdmac)
bd0->mode.count = sizeof(*context) / 4;
bd0->buffer_addr = sdma->context_phys;
bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
+ ret = sdma_run_channel0(sdma);
- ret = sdma_run_channel(&sdma->channel[0]);
-
- mutex_unlock(&sdma->channel_0_lock);
+ spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
return ret;
}
@@ -859,7 +868,8 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
sdmac->peripheral_type = data->peripheral_type;
sdmac->event_id0 = data->dma_request;
- clk_enable(sdmac->sdma->clk);
+ clk_enable(sdmac->sdma->clk_ipg);
+ clk_enable(sdmac->sdma->clk_ahb);
ret = sdma_request_channel(sdmac);
if (ret)
@@ -896,7 +906,8 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
- clk_disable(sdma->clk);
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
}
static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
@@ -938,7 +949,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
bd->buffer_addr = sg->dma_address;
- count = sg->length;
+ count = sg_dma_len(sg);
if (count > 0xffff) {
dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
@@ -1169,12 +1180,14 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
addr = (void *)header + header->script_addrs_start;
ram_code = (void *)header + header->ram_code_start;
- clk_enable(sdma->clk);
+ clk_enable(sdma->clk_ipg);
+ clk_enable(sdma->clk_ahb);
/* download the RAM image for SDMA */
sdma_load_script(sdma, ram_code,
header->ram_code_size,
addr->ram_code_start_addr);
- clk_disable(sdma->clk);
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
sdma_add_scripts(sdma, addr);
@@ -1216,7 +1229,8 @@ static int __init sdma_init(struct sdma_engine *sdma)
return -ENODEV;
}
- clk_enable(sdma->clk);
+ clk_enable(sdma->clk_ipg);
+ clk_enable(sdma->clk_ahb);
/* Be sure SDMA has not started yet */
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
@@ -1269,12 +1283,14 @@ static int __init sdma_init(struct sdma_engine *sdma)
/* Initializes channel's priorities */
sdma_set_channel_priority(&sdma->channel[0], 7);
- clk_disable(sdma->clk);
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
return 0;
err_dma_alloc:
- clk_disable(sdma->clk);
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
dev_err(sdma->dev, "initialisation failed with %d\n", ret);
return ret;
}
@@ -1297,7 +1313,7 @@ static int __init sdma_probe(struct platform_device *pdev)
if (!sdma)
return -ENOMEM;
- mutex_init(&sdma->channel_0_lock);
+ spin_lock_init(&sdma->channel_0_lock);
sdma->dev = &pdev->dev;
@@ -1313,12 +1329,21 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_request_region;
}
- sdma->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(sdma->clk)) {
- ret = PTR_ERR(sdma->clk);
+ sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(sdma->clk_ipg)) {
+ ret = PTR_ERR(sdma->clk_ipg);
goto err_clk;
}
+ sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sdma->clk_ahb)) {
+ ret = PTR_ERR(sdma->clk_ahb);
+ goto err_clk;
+ }
+
+ clk_prepare(sdma->clk_ipg);
+ clk_prepare(sdma->clk_ahb);
+
sdma->regs = ioremap(iores->start, resource_size(iores));
if (!sdma->regs) {
ret = -ENOMEM;
@@ -1359,6 +1384,8 @@ static int __init sdma_probe(struct platform_device *pdev)
dma_cookie_init(&sdmac->chan);
sdmac->channel = i;
+ tasklet_init(&sdmac->tasklet, sdma_tasklet,
+ (unsigned long) sdmac);
/*
* Add the channel to the DMAC list. Do not add channel 0 though
* because we need it internally in the SDMA driver. This also means
@@ -1426,7 +1453,6 @@ err_alloc:
err_request_irq:
iounmap(sdma->regs);
err_ioremap:
- clk_put(sdma->clk);
err_clk:
release_mem_region(iores->start, resource_size(iores));
err_request_region:
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index c900ca7aaec4..222e907bfaaa 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -394,11 +394,11 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
}
}
/*Populate CTL_HI values*/
- ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
+ ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
desc->width,
midc->dma->block_size);
/*Populate SAR and DAR values*/
- sg_phy_addr = sg_phys(sg);
+ sg_phy_addr = sg_dma_address(sg);
if (desc->dirn == DMA_MEM_TO_DEV) {
lli_bloc_desc->sar = sg_phy_addr;
lli_bloc_desc->dar = mids->dma_slave.dst_addr;
@@ -747,7 +747,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
txd = intel_mid_dma_prep_memcpy(chan,
mids->dma_slave.dst_addr,
mids->dma_slave.src_addr,
- sgl->length,
+ sg_dma_len(sgl),
flags);
return txd;
} else {
@@ -759,7 +759,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
sg_len, direction, flags);
- txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
+ txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
if (NULL == txd) {
pr_err("MDMA: Prep memcpy failed\n");
return NULL;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 62e3f8ec2461..5ec72044ea4c 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1715,7 +1715,7 @@ static int __init ipu_probe(struct platform_device *pdev)
}
/* Make sure IPU HSP clock is running */
- clk_enable(ipu_data.ipu_clk);
+ clk_prepare_enable(ipu_data.ipu_clk);
/* Disable all interrupts */
idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1);
@@ -1747,7 +1747,7 @@ static int __init ipu_probe(struct platform_device *pdev)
err_idmac_init:
err_attach_irq:
ipu_irq_detach_irq(&ipu_data, pdev);
- clk_disable(ipu_data.ipu_clk);
+ clk_disable_unprepare(ipu_data.ipu_clk);
clk_put(ipu_data.ipu_clk);
err_clk_get:
iounmap(ipu_data.reg_ic);
@@ -1765,7 +1765,7 @@ static int __exit ipu_remove(struct platform_device *pdev)
ipu_idmac_exit(ipu);
ipu_irq_detach_irq(ipu, pdev);
- clk_disable(ipu->ipu_clk);
+ clk_disable_unprepare(ipu->ipu_clk);
clk_put(ipu->ipu_clk);
iounmap(ipu->reg_ic);
iounmap(ipu->reg_ipu);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fa5d55fea46c..0b12e68bf79c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/memory.h>
+#include <linux/clk.h>
#include <plat/mv_xor.h>
#include "dmaengine.h"
@@ -1307,11 +1308,25 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
if (dram)
mv_xor_conf_mbus_windows(msp, dram);
+ /* Not all platforms can gate the clock, so it is not
+ * an error if the clock does not exists.
+ */
+ msp->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(msp->clk))
+ clk_prepare_enable(msp->clk);
+
return 0;
}
static int mv_xor_shared_remove(struct platform_device *pdev)
{
+ struct mv_xor_shared_private *msp = platform_get_drvdata(pdev);
+
+ if (!IS_ERR(msp->clk)) {
+ clk_disable_unprepare(msp->clk);
+ clk_put(msp->clk);
+ }
+
return 0;
}
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 654876b7ba1d..a5b422f5a8ab 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -55,6 +55,7 @@
struct mv_xor_shared_private {
void __iomem *xor_base;
void __iomem *xor_high_base;
+ struct clk *clk;
};
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 655d4ce6ed0d..c96ab15319f2 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -22,11 +22,14 @@
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/fsl/mxs-dma.h>
+#include <linux/stmp_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <asm/irq.h>
#include <mach/mxs.h>
-#include <mach/common.h>
#include "dmaengine.h"
@@ -36,12 +39,8 @@
* dma can program the controller registers of peripheral devices.
*/
-#define MXS_DMA_APBH 0
-#define MXS_DMA_APBX 1
-#define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH)
-
-#define APBH_VERSION_LATEST 3
-#define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST)
+#define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH)
+#define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA)
#define HW_APBHX_CTRL0 0x000
#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
@@ -51,13 +50,14 @@
#define HW_APBHX_CTRL2 0x020
#define HW_APBHX_CHANNEL_CTRL 0x030
#define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
-#define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800)
-#define HW_APBX_VERSION 0x800
-#define BP_APBHX_VERSION_MAJOR 24
-#define HW_APBHX_CHn_NXTCMDAR(n) \
- (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70)
-#define HW_APBHX_CHn_SEMA(n) \
- (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70)
+/*
+ * The offset of NXTCMDAR register is different per both dma type and version,
+ * while stride for each channel is all the same 0x70.
+ */
+#define HW_APBHX_CHn_NXTCMDAR(d, n) \
+ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
+#define HW_APBHX_CHn_SEMA(d, n) \
+ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
/*
* ccw bits definitions
@@ -121,9 +121,19 @@ struct mxs_dma_chan {
#define MXS_DMA_CHANNELS 16
#define MXS_DMA_CHANNELS_MASK 0xffff
+enum mxs_dma_devtype {
+ MXS_DMA_APBH,
+ MXS_DMA_APBX,
+};
+
+enum mxs_dma_id {
+ IMX23_DMA,
+ IMX28_DMA,
+};
+
struct mxs_dma_engine {
- int dev_id;
- unsigned int version;
+ enum mxs_dma_id dev_id;
+ enum mxs_dma_devtype type;
void __iomem *base;
struct clk *clk;
struct dma_device dma_device;
@@ -131,17 +141,86 @@ struct mxs_dma_engine {
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
};
+struct mxs_dma_type {
+ enum mxs_dma_id id;
+ enum mxs_dma_devtype type;
+};
+
+static struct mxs_dma_type mxs_dma_types[] = {
+ {
+ .id = IMX23_DMA,
+ .type = MXS_DMA_APBH,
+ }, {
+ .id = IMX23_DMA,
+ .type = MXS_DMA_APBX,
+ }, {
+ .id = IMX28_DMA,
+ .type = MXS_DMA_APBH,
+ }, {
+ .id = IMX28_DMA,
+ .type = MXS_DMA_APBX,
+ }
+};
+
+static struct platform_device_id mxs_dma_ids[] = {
+ {
+ .name = "imx23-dma-apbh",
+ .driver_data = (kernel_ulong_t) &mxs_dma_types[0],
+ }, {
+ .name = "imx23-dma-apbx",
+ .driver_data = (kernel_ulong_t) &mxs_dma_types[1],
+ }, {
+ .name = "imx28-dma-apbh",
+ .driver_data = (kernel_ulong_t) &mxs_dma_types[2],
+ }, {
+ .name = "imx28-dma-apbx",
+ .driver_data = (kernel_ulong_t) &mxs_dma_types[3],
+ }, {
+ /* end of list */
+ }
+};
+
+static const struct of_device_id mxs_dma_dt_ids[] = {
+ { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
+ { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
+ { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
+ { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
+
+static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct mxs_dma_chan, chan);
+}
+
+int mxs_dma_is_apbh(struct dma_chan *chan)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+
+ return dma_is_apbh(mxs_dma);
+}
+
+int mxs_dma_is_apbx(struct dma_chan *chan)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+
+ return !dma_is_apbh(mxs_dma);
+}
+
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
- if (dma_is_apbh() && apbh_is_old())
+ if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
else
writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
- mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
}
static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
@@ -151,10 +230,10 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
/* set cmd_addr up */
writel(mxs_chan->ccw_phys,
- mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
+ mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
/* write 1 to SEMA to kick off the channel */
- writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
+ writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
}
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
@@ -168,12 +247,12 @@ static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
int chan_id = mxs_chan->chan.chan_id;
/* freeze the channel */
- if (dma_is_apbh() && apbh_is_old())
+ if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
else
writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
mxs_chan->status = DMA_PAUSED;
}
@@ -184,21 +263,16 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
int chan_id = mxs_chan->chan.chan_id;
/* unfreeze the channel */
- if (dma_is_apbh() && apbh_is_old())
+ if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
else
writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR);
+ mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
mxs_chan->status = DMA_IN_PROGRESS;
}
-static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
-{
- return container_of(chan, struct mxs_dma_chan, chan);
-}
-
static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
return dma_cookie_assign(tx);
@@ -220,11 +294,11 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
/* completion status */
stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1);
stat1 &= MXS_DMA_CHANNELS_MASK;
- writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR);
+ writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
/* error status */
stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2);
- writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR);
+ writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
/*
* When both completion and error of termination bits set at the
@@ -415,9 +489,9 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
} else {
for_each_sg(sgl, sg, sg_len, i) {
- if (sg->length > MAX_XFER_BYTES) {
+ if (sg_dma_len(sg) > MAX_XFER_BYTES) {
dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
- sg->length, MAX_XFER_BYTES);
+ sg_dma_len(sg), MAX_XFER_BYTES);
goto err_out;
}
@@ -425,7 +499,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
ccw->bufaddr = sg->dma_address;
- ccw->xfer_bytes = sg->length;
+ ccw->xfer_bytes = sg_dma_len(sg);
ccw->bits = 0;
ccw->bits |= CCW_CHAIN;
@@ -567,27 +641,21 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
if (ret)
return ret;
- ret = mxs_reset_block(mxs_dma->base);
+ ret = stmp_reset_block(mxs_dma->base);
if (ret)
goto err_out;
- /* only major version matters */
- mxs_dma->version = readl(mxs_dma->base +
- ((mxs_dma->dev_id == MXS_DMA_APBX) ?
- HW_APBX_VERSION : HW_APBH_VERSION)) >>
- BP_APBHX_VERSION_MAJOR;
-
/* enable apbh burst */
- if (dma_is_apbh()) {
+ if (dma_is_apbh(mxs_dma)) {
writel(BM_APBH_CTRL0_APB_BURST_EN,
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
writel(BM_APBH_CTRL0_APB_BURST8_EN,
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
}
/* enable irq for all the channels */
writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
- mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
err_out:
clk_disable_unprepare(mxs_dma->clk);
@@ -596,8 +664,9 @@ err_out:
static int __init mxs_dma_probe(struct platform_device *pdev)
{
- const struct platform_device_id *id_entry =
- platform_get_device_id(pdev);
+ const struct platform_device_id *id_entry;
+ const struct of_device_id *of_id;
+ const struct mxs_dma_type *dma_type;
struct mxs_dma_engine *mxs_dma;
struct resource *iores;
int ret, i;
@@ -606,7 +675,15 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
if (!mxs_dma)
return -ENOMEM;
- mxs_dma->dev_id = id_entry->driver_data;
+ of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
+ if (of_id)
+ id_entry = of_id->data;
+ else
+ id_entry = platform_get_device_id(pdev);
+
+ dma_type = (struct mxs_dma_type *)id_entry->driver_data;
+ mxs_dma->type = dma_type->type;
+ mxs_dma->dev_id = dma_type->id;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -689,23 +766,12 @@ err_request_region:
return ret;
}
-static struct platform_device_id mxs_dma_type[] = {
- {
- .name = "mxs-dma-apbh",
- .driver_data = MXS_DMA_APBH,
- }, {
- .name = "mxs-dma-apbx",
- .driver_data = MXS_DMA_APBX,
- }, {
- /* end of list */
- }
-};
-
static struct platform_driver mxs_dma_driver = {
.driver = {
.name = "mxs-dma",
+ .of_match_table = mxs_dma_dt_ids,
},
- .id_table = mxs_dma_type,
+ .id_table = mxs_dma_ids,
};
static int __init mxs_dma_module_init(void)
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 65c0495a6d40..987ab5cd2617 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -621,7 +621,7 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
goto err_desc_get;
desc->regs.dev_addr = reg;
- desc->regs.mem_addr = sg_phys(sg);
+ desc->regs.mem_addr = sg_dma_address(sg);
desc->regs.size = sg_dma_len(sg);
desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index fa3fb21e60be..cbcc28e79be6 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -21,7 +21,6 @@
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/interrupt.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 2ed1ac3513f3..000d309602b2 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2362,7 +2362,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
}
sg[periods].offset = 0;
- sg[periods].length = 0;
+ sg_dma_len(&sg[periods]) = 0;
sg[periods].page_link =
((unsigned long)sg | 0x01) & ~0x02;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 7ef73c919c5d..7be9b7288e90 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -715,25 +715,6 @@ static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
input_addr_to_dram_addr(mci, input_addr));
}
-/*
- * Find the minimum and maximum InputAddr values that map to the given @csrow.
- * Pass back these values in *input_addr_min and *input_addr_max.
- */
-static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
- u64 *input_addr_min, u64 *input_addr_max)
-{
- struct amd64_pvt *pvt;
- u64 base, mask;
-
- pvt = mci->pvt_info;
- BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
-
- get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
-
- *input_addr_min = base & ~mask;
- *input_addr_max = base | mask;
-}
-
/* Map the Error address to a PAGE and PAGE OFFSET. */
static inline void error_address_to_page_and_offset(u64 error_address,
u32 *page, u32 *offset)
@@ -1058,6 +1039,37 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
int channel, csrow;
u32 page, offset;
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
+
+ /*
+ * Find out which node the error address belongs to. This may be
+ * different from the node that detected the error.
+ */
+ src_mci = find_mc_by_sys_addr(mci, sys_addr);
+ if (!src_mci) {
+ amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
+ (unsigned long)sys_addr);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "failed to map error addr to a node",
+ NULL);
+ return;
+ }
+
+ /* Now map the sys_addr to a CSROW */
+ csrow = sys_addr_to_csrow(src_mci, sys_addr);
+ if (csrow < 0) {
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "failed to map error addr to a csrow",
+ NULL);
+ return;
+ }
+
/* CHIPKILL enabled */
if (pvt->nbcfg & NBCFG_CHIPKILL) {
channel = get_channel_from_ecc_syndrome(mci, syndrome);
@@ -1067,9 +1079,15 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
* 2 DIMMs is in error. So we need to ID 'both' of them
* as suspect.
*/
- amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
- "error reporting race\n", syndrome);
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - "
+ "possible error reporting race\n",
+ syndrome);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ csrow, -1, -1,
+ EDAC_MOD_STR,
+ "unknown syndrome - possible error reporting race",
+ NULL);
return;
}
} else {
@@ -1084,28 +1102,10 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
channel = ((sys_addr & BIT(3)) != 0);
}
- /*
- * Find out which node the error address belongs to. This may be
- * different from the node that detected the error.
- */
- src_mci = find_mc_by_sys_addr(mci, sys_addr);
- if (!src_mci) {
- amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
- (unsigned long)sys_addr);
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
- return;
- }
-
- /* Now map the sys_addr to a CSROW */
- csrow = sys_addr_to_csrow(src_mci, sys_addr);
- if (csrow < 0) {
- edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
- } else {
- error_address_to_page_and_offset(sys_addr, &page, &offset);
-
- edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
- channel, EDAC_MOD_STR);
- }
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci,
+ page, offset, syndrome,
+ csrow, channel, -1,
+ EDAC_MOD_STR, "", NULL);
}
static int ddr2_cs_size(unsigned i, bool dct_width)
@@ -1611,15 +1611,20 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
u32 page, offset;
int nid, csrow, chan = 0;
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
+
csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
if (csrow < 0) {
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "failed to map error addr to a csrow",
+ NULL);
return;
}
- error_address_to_page_and_offset(sys_addr, &page, &offset);
-
/*
* We need the syndromes for channel detection only when we're
* ganged. Otherwise @chan should already contain the channel at
@@ -1628,16 +1633,10 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
if (dct_ganging_enabled(pvt))
chan = get_channel_from_ecc_syndrome(mci, syndrome);
- if (chan >= 0)
- edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
- EDAC_MOD_STR);
- else
- /*
- * Channel unknown, report all channels on this CSROW as failed.
- */
- for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
- edac_mc_handle_ce(mci, page, offset, syndrome,
- csrow, chan, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ csrow, chan, -1,
+ EDAC_MOD_STR, "", NULL);
}
/*
@@ -1918,7 +1917,12 @@ static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
/* Ensure that the Error Address is VALID */
if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, 0,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "HW has no ERROR_ADDRESS available",
+ NULL);
return;
}
@@ -1942,11 +1946,17 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, 0,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "HW has no ERROR_ADDRESS available",
+ NULL);
return;
}
sys_addr = get_error_address(m);
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
/*
* Find out which node the error address belongs to. This may be
@@ -1956,7 +1966,11 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
if (!src_mci) {
amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
(unsigned long)sys_addr);
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offset, 0,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "ERROR ADDRESS NOT mapped to a MC", NULL);
return;
}
@@ -1966,10 +1980,17 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
if (csrow < 0) {
amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
(unsigned long)sys_addr);
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offset, 0,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "ERROR ADDRESS NOT mapped to CS",
+ NULL);
} else {
- error_address_to_page_and_offset(sys_addr, &page, &offset);
- edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offset, 0,
+ csrow, -1, -1,
+ EDAC_MOD_STR, "", NULL);
}
}
@@ -2171,7 +2192,7 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
- debugf0(" nr_pages= %u channel-count = %d\n",
+ debugf0(" nr_pages/channel= %u channel-count = %d\n",
nr_pages, pvt->channel_count);
return nr_pages;
@@ -2185,9 +2206,12 @@ static int init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow;
struct amd64_pvt *pvt = mci->pvt_info;
- u64 input_addr_min, input_addr_max, sys_addr, base, mask;
+ u64 base, mask;
u32 val;
- int i, empty = 1;
+ int i, j, empty = 1;
+ enum mem_type mtype;
+ enum edac_type edac_mode;
+ int nr_pages = 0;
amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
@@ -2211,41 +2235,32 @@ static int init_csrows(struct mem_ctl_info *mci)
empty = 0;
if (csrow_enabled(i, 0, pvt))
- csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
+ nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
if (csrow_enabled(i, 1, pvt))
- csrow->nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
- find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
- sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
- csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
- sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
- csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
+ nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
get_cs_base_and_mask(pvt, i, 0, &base, &mask);
- csrow->page_mask = ~mask;
/* 8 bytes of resolution */
- csrow->mtype = amd64_determine_memory_type(pvt, i);
+ mtype = amd64_determine_memory_type(pvt, i);
debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
- debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
- (unsigned long)input_addr_min,
- (unsigned long)input_addr_max);
- debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
- (unsigned long)sys_addr, csrow->page_mask);
- debugf1(" nr_pages: %u first_page: 0x%lx "
- "last_page: 0x%lx\n",
- (unsigned)csrow->nr_pages,
- csrow->first_page, csrow->last_page);
+ debugf1(" nr_pages: %u\n", nr_pages * pvt->channel_count);
/*
* determine whether CHIPKILL or JUST ECC or NO ECC is operating
*/
if (pvt->nbcfg & NBCFG_ECC_ENABLE)
- csrow->edac_mode =
- (pvt->nbcfg & NBCFG_CHIPKILL) ?
- EDAC_S4ECD4ED : EDAC_SECDED;
+ edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
+ EDAC_S4ECD4ED : EDAC_SECDED;
else
- csrow->edac_mode = EDAC_NONE;
+ edac_mode = EDAC_NONE;
+
+ for (j = 0; j < pvt->channel_count; j++) {
+ csrow->channels[j].dimm->mtype = mtype;
+ csrow->channels[j].dimm->edac_mode = edac_mode;
+ csrow->channels[j].dimm->nr_pages = nr_pages;
+ }
}
return empty;
@@ -2540,6 +2555,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
struct amd64_pvt *pvt = NULL;
struct amd64_family_type *fam_type = NULL;
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
int err = 0, ret;
u8 nid = get_node_id(F2);
@@ -2574,7 +2590,13 @@ static int amd64_init_one_instance(struct pci_dev *F2)
goto err_siblings;
ret = -ENOMEM;
- mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = pvt->csels[0].b_cnt;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = pvt->channel_count;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
if (!mci)
goto err_siblings;
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index f8fd3c807bde..9774d443fa57 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -29,7 +29,6 @@
edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg)
#define AMD76X_NR_CSROWS 8
-#define AMD76X_NR_CHANS 1
#define AMD76X_NR_DIMMS 4
/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
@@ -146,8 +145,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
if (handle_errors) {
row = (info->ecc_mode_status >> 4) & 0xf;
- edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0,
- row, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ mci->csrows[row].first_page, 0, 0,
+ row, 0, -1,
+ mci->ctl_name, "", NULL);
}
}
@@ -159,8 +160,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
if (handle_errors) {
row = info->ecc_mode_status & 0xf;
- edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0,
- 0, row, 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ mci->csrows[row].first_page, 0, 0,
+ row, 0, -1,
+ mci->ctl_name, "", NULL);
}
}
@@ -186,11 +189,13 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
enum edac_type edac_mode)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
u32 mba, mba_base, mba_mask, dms;
int index;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
/* find the DRAM Chip Select Base address and mask */
pci_read_config_dword(pdev,
@@ -203,13 +208,13 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
csrow->first_page = mba_base >> PAGE_SHIFT;
- csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ dimm->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
csrow->page_mask = mba_mask >> PAGE_SHIFT;
- csrow->grain = csrow->nr_pages << PAGE_SHIFT;
- csrow->mtype = MEM_RDDR;
- csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
- csrow->edac_mode = edac_mode;
+ dimm->grain = dimm->nr_pages << PAGE_SHIFT;
+ dimm->mtype = MEM_RDDR;
+ dimm->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
+ dimm->edac_mode = edac_mode;
}
}
@@ -230,7 +235,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
EDAC_SECDED,
EDAC_SECDED
};
- struct mem_ctl_info *mci = NULL;
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
u32 ems;
u32 ems_mode;
struct amd76x_error_info discard;
@@ -238,11 +244,17 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
debugf0("%s()\n", __func__);
pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
ems_mode = (ems >> 10) & 0x3;
- mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS, 0);
- if (mci == NULL) {
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = AMD76X_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
+
+ if (mci == NULL)
return -ENOMEM;
- }
debugf0("%s(): mci = %p\n", __func__, mci);
mci->dev = &pdev->dev;
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index 9a6a274e6925..69ee6aab5c71 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -48,8 +48,9 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
syndrome = (ar & 0x000000001fe00000ul) >> 21;
/* TODO: Decoding of the error address */
- edac_mc_handle_ce(mci, csrow->first_page + pfn, offset,
- syndrome, 0, chan, "");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ csrow->first_page + pfn, offset, syndrome,
+ 0, chan, -1, "", "", NULL);
}
static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
@@ -69,7 +70,9 @@ static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
offset = address & ~PAGE_MASK;
/* TODO: Decoding of the error address */
- edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, "");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ csrow->first_page + pfn, offset, 0,
+ 0, chan, -1, "", "", NULL);
}
static void cell_edac_check(struct mem_ctl_info *mci)
@@ -124,8 +127,11 @@ static void cell_edac_check(struct mem_ctl_info *mci)
static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow = &mci->csrows[0];
+ struct dimm_info *dimm;
struct cell_edac_priv *priv = mci->pvt_info;
struct device_node *np;
+ int j;
+ u32 nr_pages;
for (np = NULL;
(np = of_find_node_by_name(np, "memory")) != NULL;) {
@@ -140,15 +146,20 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
if (of_node_to_nid(np) != priv->node)
continue;
csrow->first_page = r.start >> PAGE_SHIFT;
- csrow->nr_pages = resource_size(&r) >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- csrow->mtype = MEM_XDR;
- csrow->edac_mode = EDAC_SECDED;
+ nr_pages = resource_size(&r) >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + nr_pages - 1;
+
+ for (j = 0; j < csrow->nr_channels; j++) {
+ dimm = csrow->channels[j].dimm;
+ dimm->mtype = MEM_XDR;
+ dimm->edac_mode = EDAC_SECDED;
+ dimm->nr_pages = nr_pages / csrow->nr_channels;
+ }
dev_dbg(mci->dev,
"Initialized on node %d, chanmask=0x%x,"
" first_page=0x%lx, nr_pages=0x%x\n",
priv->node, priv->chanmask,
- csrow->first_page, csrow->nr_pages);
+ csrow->first_page, nr_pages);
break;
}
}
@@ -157,9 +168,10 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
{
struct cbe_mic_tm_regs __iomem *regs;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct cell_edac_priv *priv;
u64 reg;
- int rc, chanmask;
+ int rc, chanmask, num_chans;
regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
if (regs == NULL)
@@ -184,8 +196,16 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
in_be64(&regs->mic_fir));
/* Allocate & init EDAC MC data structure */
- mci = edac_mc_alloc(sizeof(struct cell_edac_priv), 1,
- chanmask == 3 ? 2 : 1, pdev->id);
+ num_chans = chanmask == 3 ? 2 : 1;
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = num_chans;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
+ sizeof(struct cell_edac_priv));
if (mci == NULL)
return -ENOMEM;
priv = mci->pvt_info;
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index a774c0ddaf5b..e22030a9de66 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -329,9 +329,10 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
{
struct cpc925_mc_pdata *pdata = mci->pvt_info;
struct csrow_info *csrow;
- int index;
+ struct dimm_info *dimm;
+ int index, j;
u32 mbmr, mbbar, bba;
- unsigned long row_size, last_nr_pages = 0;
+ unsigned long row_size, nr_pages, last_nr_pages = 0;
get_total_mem(pdata);
@@ -350,36 +351,41 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
row_size = bba * (1UL << 28); /* 256M */
csrow->first_page = last_nr_pages;
- csrow->nr_pages = row_size >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ nr_pages = row_size >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + nr_pages - 1;
last_nr_pages = csrow->last_page + 1;
- csrow->mtype = MEM_RDDR;
- csrow->edac_mode = EDAC_SECDED;
-
- switch (csrow->nr_channels) {
- case 1: /* Single channel */
- csrow->grain = 32; /* four-beat burst of 32 bytes */
- break;
- case 2: /* Dual channel */
- default:
- csrow->grain = 64; /* four-beat burst of 64 bytes */
- break;
- }
-
- switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
- case 6: /* 0110, no way to differentiate X8 VS X16 */
- case 5: /* 0101 */
- case 8: /* 1000 */
- csrow->dtype = DEV_X16;
- break;
- case 7: /* 0111 */
- case 9: /* 1001 */
- csrow->dtype = DEV_X8;
- break;
- default:
- csrow->dtype = DEV_UNKNOWN;
- break;
+ for (j = 0; j < csrow->nr_channels; j++) {
+ dimm = csrow->channels[j].dimm;
+
+ dimm->nr_pages = nr_pages / csrow->nr_channels;
+ dimm->mtype = MEM_RDDR;
+ dimm->edac_mode = EDAC_SECDED;
+
+ switch (csrow->nr_channels) {
+ case 1: /* Single channel */
+ dimm->grain = 32; /* four-beat burst of 32 bytes */
+ break;
+ case 2: /* Dual channel */
+ default:
+ dimm->grain = 64; /* four-beat burst of 64 bytes */
+ break;
+ }
+
+ switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
+ case 6: /* 0110, no way to differentiate X8 VS X16 */
+ case 5: /* 0101 */
+ case 8: /* 1000 */
+ dimm->dtype = DEV_X16;
+ break;
+ case 7: /* 0111 */
+ case 9: /* 1001 */
+ dimm->dtype = DEV_X8;
+ break;
+ default:
+ dimm->dtype = DEV_UNKNOWN;
+ break;
+ }
}
}
}
@@ -549,13 +555,18 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
if (apiexcp & CECC_EXCP_DETECTED) {
cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
channel = cpc925_mc_find_channel(mci, syndrome);
- edac_mc_handle_ce(mci, pfn, offset, syndrome,
- csrow, channel, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ pfn, offset, syndrome,
+ csrow, channel, -1,
+ mci->ctl_name, "", NULL);
}
if (apiexcp & UECC_EXCP_DETECTED) {
cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
- edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ pfn, offset, 0,
+ csrow, -1, -1,
+ mci->ctl_name, "", NULL);
}
cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
@@ -927,6 +938,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
{
static int edac_mc_idx;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
void __iomem *vbase;
struct cpc925_mc_pdata *pdata;
struct resource *r;
@@ -962,9 +974,16 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
goto err2;
}
- nr_channels = cpc925_mc_get_channels(vbase);
- mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata),
- CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx);
+ nr_channels = cpc925_mc_get_channels(vbase) + 1;
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = CPC925_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = nr_channels;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+ sizeof(struct cpc925_mc_pdata));
if (!mci) {
cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
res = -ENOMEM;
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 41223261ede9..3186512c9739 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -4,7 +4,11 @@
* This file may be distributed under the terms of the
* GNU General Public License.
*
- * See "enum e752x_chips" below for supported chipsets
+ * Implement support for the e7520, E7525, e7320 and i3100 memory controllers.
+ *
+ * Datasheets:
+ * http://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
+ * ftp://download.intel.com/design/intarch/datashts/31345803.pdf
*
* Written by Tom Zimmerman
*
@@ -13,8 +17,6 @@
* Wang Zhenyu at intel.com
* Dave Jiang at mvista.com
*
- * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
- *
*/
#include <linux/module.h>
@@ -187,6 +189,25 @@ enum e752x_chips {
I3100 = 3
};
+/*
+ * Those chips Support single-rank and dual-rank memories only.
+ *
+ * On e752x chips, the odd rows are present only on dual-rank memories.
+ * Dividing the rank by two will provide the dimm#
+ *
+ * i3100 MC has a different mapping: it supports only 4 ranks.
+ *
+ * The mapping is (from 1 to n):
+ * slot single-ranked double-ranked
+ * dimm #1 -> rank #4 NA
+ * dimm #2 -> rank #3 NA
+ * dimm #3 -> rank #2 Ranks 2 and 3
+ * dimm #4 -> rank $1 Ranks 1 and 4
+ *
+ * FIXME: The current mapping for i3100 considers that it supports up to 8
+ * ranks/chanel, but datasheet says that the MC supports only 4 ranks.
+ */
+
struct e752x_pvt {
struct pci_dev *bridge_ck;
struct pci_dev *dev_d0f0;
@@ -350,8 +371,10 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
channel = !(error_one & 1);
/* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
- sec1_syndrome, row, channel, "e752x CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset_in_page(sec1_add << 4), sec1_syndrome,
+ row, channel, -1,
+ "e752x CE", "", NULL);
}
static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
@@ -385,9 +408,12 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_ue(mci, block_page,
- offset_in_page(error_2b << 4),
- row, "e752x UE from Read");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ block_page,
+ offset_in_page(error_2b << 4), 0,
+ row, -1, -1,
+ "e752x UE from Read", "", NULL);
+
}
if (error_one & 0x0404) {
error_2b = scrb_add;
@@ -401,9 +427,11 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_ue(mci, block_page,
- offset_in_page(error_2b << 4),
- row, "e752x UE from Scruber");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ block_page,
+ offset_in_page(error_2b << 4), 0,
+ row, -1, -1,
+ "e752x UE from Scruber", "", NULL);
}
}
@@ -426,7 +454,9 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
return;
debugf3("%s()\n", __func__);
- edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1,
+ "e752x UE log memory write", "", NULL);
}
static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
@@ -1044,7 +1074,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
u8 value;
- u32 dra, drc, cumul_size;
+ u32 dra, drc, cumul_size, i, nr_pages;
dra = 0;
for (index = 0; index < 4; index++) {
@@ -1053,7 +1083,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
dra |= dra_reg << (index * 8);
}
pci_read_config_dword(pdev, E752X_DRC, &drc);
- drc_chan = dual_channel_active(ddrcsr);
+ drc_chan = dual_channel_active(ddrcsr) ? 1 : 0;
drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
drc_ddim = (drc >> 20) & 0x3;
@@ -1078,26 +1108,33 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
- csrow->mtype = MEM_RDDR; /* only one type supported */
- csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
-
- /*
- * if single channel or x8 devices then SECDED
- * if dual channel and x4 then S4ECD4ED
- */
- if (drc_ddim) {
- if (drc_chan && mem_dev) {
- csrow->edac_mode = EDAC_S4ECD4ED;
- mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
- } else {
- csrow->edac_mode = EDAC_SECDED;
- mci->edac_cap |= EDAC_FLAG_SECDED;
- }
- } else
- csrow->edac_mode = EDAC_NONE;
+
+ for (i = 0; i < csrow->nr_channels; i++) {
+ struct dimm_info *dimm = csrow->channels[i].dimm;
+
+ debugf3("Initializing rank at (%i,%i)\n", index, i);
+ dimm->nr_pages = nr_pages / csrow->nr_channels;
+ dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+ dimm->mtype = MEM_RDDR; /* only one type supported */
+ dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+ /*
+ * if single channel or x8 devices then SECDED
+ * if dual channel and x4 then S4ECD4ED
+ */
+ if (drc_ddim) {
+ if (drc_chan && mem_dev) {
+ dimm->edac_mode = EDAC_S4ECD4ED;
+ mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+ } else {
+ dimm->edac_mode = EDAC_SECDED;
+ mci->edac_cap |= EDAC_FLAG_SECDED;
+ }
+ } else
+ dimm->edac_mode = EDAC_NONE;
+ }
}
}
@@ -1226,6 +1263,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
u16 pci_data;
u8 stat8;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct e752x_pvt *pvt;
u16 ddrcsr;
int drc_chan; /* Number of channels 0=1chan,1=2chan */
@@ -1252,11 +1290,15 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
/* Dual channel = 1, Single channel = 0 */
drc_chan = dual_channel_active(ddrcsr);
- mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0);
-
- if (mci == NULL) {
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = E752X_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = drc_chan + 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
+ if (mci == NULL)
return -ENOMEM;
- }
debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 68dea87b72e6..9a9c1a546797 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -10,6 +10,9 @@
* Based on work by Dan Hollis <goemon at anime dot net> and others.
* http://www.anime.net/~goemon/linux-ecc/
*
+ * Datasheet:
+ * http://www.intel.com/content/www/us/en/chipsets/e7501-chipset-memory-controller-hub-datasheet.html
+ *
* Contributors:
* Eric Biederman (Linux Networx)
* Tom Zimmerman (Linux Networx)
@@ -71,7 +74,7 @@
#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
#define E7XXX_NR_CSROWS 8 /* number of csrows */
-#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */
+#define E7XXX_NR_DIMMS 8 /* 2 channels, 4 dimms/channel */
/* E7XXX register addresses - device 0 function 0 */
#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
@@ -216,13 +219,15 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
row = edac_mc_find_csrow_by_page(mci, page);
/* convert syndrome to channel */
channel = e7xxx_find_channel(syndrome);
- edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, page, 0, syndrome,
+ row, channel, -1, "e7xxx CE", "", NULL);
}
static void process_ce_no_info(struct mem_ctl_info *mci)
{
debugf3("%s()\n", __func__);
- edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
+ "e7xxx CE log register overflow", "", NULL);
}
static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
@@ -236,13 +241,17 @@ static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
/* FIXME - should use PAGE_SHIFT */
block_page = error_2b >> 6; /* convert to 4k address */
row = edac_mc_find_csrow_by_page(mci, block_page);
- edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, block_page, 0, 0,
+ row, -1, -1, "e7xxx UE", "", NULL);
}
static void process_ue_no_info(struct mem_ctl_info *mci)
{
debugf3("%s()\n", __func__);
- edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
+ "e7xxx UE log register overflow", "", NULL);
}
static void e7xxx_get_error_info(struct mem_ctl_info *mci,
@@ -347,11 +356,12 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
int dev_idx, u32 drc)
{
unsigned long last_cumul_size;
- int index;
+ int index, j;
u8 value;
- u32 dra, cumul_size;
+ u32 dra, cumul_size, nr_pages;
int drc_chan, drc_drbg, drc_ddim, mem_dev;
struct csrow_info *csrow;
+ struct dimm_info *dimm;
pci_read_config_dword(pdev, E7XXX_DRA, &dra);
drc_chan = dual_channel_active(drc, dev_idx);
@@ -379,26 +389,32 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
- csrow->mtype = MEM_RDDR; /* only one type supported */
- csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
-
- /*
- * if single channel or x8 devices then SECDED
- * if dual channel and x4 then S4ECD4ED
- */
- if (drc_ddim) {
- if (drc_chan && mem_dev) {
- csrow->edac_mode = EDAC_S4ECD4ED;
- mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
- } else {
- csrow->edac_mode = EDAC_SECDED;
- mci->edac_cap |= EDAC_FLAG_SECDED;
- }
- } else
- csrow->edac_mode = EDAC_NONE;
+
+ for (j = 0; j < drc_chan + 1; j++) {
+ dimm = csrow->channels[j].dimm;
+
+ dimm->nr_pages = nr_pages / (drc_chan + 1);
+ dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+ dimm->mtype = MEM_RDDR; /* only one type supported */
+ dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+ /*
+ * if single channel or x8 devices then SECDED
+ * if dual channel and x4 then S4ECD4ED
+ */
+ if (drc_ddim) {
+ if (drc_chan && mem_dev) {
+ dimm->edac_mode = EDAC_S4ECD4ED;
+ mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+ } else {
+ dimm->edac_mode = EDAC_SECDED;
+ mci->edac_cap |= EDAC_FLAG_SECDED;
+ }
+ } else
+ dimm->edac_mode = EDAC_NONE;
+ }
}
}
@@ -406,6 +422,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
{
u16 pci_data;
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
struct e7xxx_pvt *pvt = NULL;
u32 drc;
int drc_chan;
@@ -416,8 +433,21 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
pci_read_config_dword(pdev, E7XXX_DRC, &drc);
drc_chan = dual_channel_active(drc, dev_idx);
- mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0);
-
+ /*
+ * According with the datasheet, this device has a maximum of
+ * 4 DIMMS per channel, either single-rank or dual-rank. So, the
+ * total amount of dimms is 8 (E7XXX_NR_DIMMS).
+ * That means that the DIMM is mapped as CSROWs, and the channel
+ * will map the rank. So, an error to either channel should be
+ * attributed to the same dimm.
+ */
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = E7XXX_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = drc_chan + 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 5b739411d62f..117490d4f835 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -447,8 +447,10 @@ static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
#endif /* CONFIG_PCI */
-extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
- unsigned nr_chans, int edac_index);
+struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
+ unsigned n_layers,
+ struct edac_mc_layer *layers,
+ unsigned sz_pvt);
extern int edac_mc_add_mc(struct mem_ctl_info *mci);
extern void edac_mc_free(struct mem_ctl_info *mci);
extern struct mem_ctl_info *edac_mc_find(int idx);
@@ -456,35 +458,17 @@ extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
unsigned long page);
-
-/*
- * The no info errors are used when error overflows are reported.
- * There are a limited number of error logging registers that can
- * be exausted. When all registers are exhausted and an additional
- * error occurs then an error overflow register records that an
- * error occurred and the type of error, but doesn't have any
- * further information. The ce/ue versions make for cleaner
- * reporting logic and function interface - reduces conditional
- * statement clutter and extra function arguments.
- */
-extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page,
- unsigned long syndrome, int row, int channel,
- const char *msg);
-extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
- const char *msg);
-extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, int row,
- const char *msg);
-extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
- const char *msg);
-extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow,
- unsigned int channel0, unsigned int channel1,
- char *msg);
-extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow,
- unsigned int channel, char *msg);
+void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+ struct mem_ctl_info *mci,
+ const unsigned long page_frame_number,
+ const unsigned long offset_in_page,
+ const unsigned long syndrome,
+ const int layer0,
+ const int layer1,
+ const int layer2,
+ const char *msg,
+ const char *other_detail,
+ const void *mcelog);
/*
* edac_device APIs
@@ -496,6 +480,7 @@ extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
int inst_nr, int block_nr, const char *msg);
extern int edac_device_alloc_index(void);
+extern const char *edac_layer_name[];
/*
* edac_pci APIs
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 45b8f4bdd773..ee3f1f810c1e 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -79,7 +79,7 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
unsigned total_size;
unsigned count;
unsigned instance, block, attr;
- void *pvt;
+ void *pvt, *p;
int err;
debugf4("%s() instances=%d blocks=%d\n",
@@ -92,35 +92,30 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
* to be at least as stringent as what the compiler would
* provide if we could simply hardcode everything into a single struct.
*/
- dev_ctl = (struct edac_device_ctl_info *)NULL;
+ p = NULL;
+ dev_ctl = edac_align_ptr(&p, sizeof(*dev_ctl), 1);
/* Calc the 'end' offset past end of ONE ctl_info structure
* which will become the start of the 'instance' array
*/
- dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
+ dev_inst = edac_align_ptr(&p, sizeof(*dev_inst), nr_instances);
/* Calc the 'end' offset past the instance array within the ctl_info
* which will become the start of the block array
*/
- dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
+ count = nr_instances * nr_blocks;
+ dev_blk = edac_align_ptr(&p, sizeof(*dev_blk), count);
/* Calc the 'end' offset past the dev_blk array
* which will become the start of the attrib array, if any.
*/
- count = nr_instances * nr_blocks;
- dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
-
- /* Check for case of when an attribute array is specified */
- if (nr_attrib > 0) {
- /* calc how many nr_attrib we need */
+ /* calc how many nr_attrib we need */
+ if (nr_attrib > 0)
count *= nr_attrib;
+ dev_attrib = edac_align_ptr(&p, sizeof(*dev_attrib), count);
- /* Calc the 'end' offset past the attributes array */
- pvt = edac_align_ptr(&dev_attrib[count], sz_private);
- } else {
- /* no attribute array specified */
- pvt = edac_align_ptr(dev_attrib, sz_private);
- }
+ /* Calc the 'end' offset past the attributes array */
+ pvt = edac_align_ptr(&p, sz_private, 1);
/* 'pvt' now points to where the private data area is.
* At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index feef7733fae7..10f375032e96 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -43,9 +43,26 @@ static void edac_mc_dump_channel(struct rank_info *chan)
{
debugf4("\tchannel = %p\n", chan);
debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
- debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
- debugf4("\tchannel->label = '%s'\n", chan->label);
debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
+ debugf4("\tchannel->dimm = %p\n", chan->dimm);
+}
+
+static void edac_mc_dump_dimm(struct dimm_info *dimm)
+{
+ int i;
+
+ debugf4("\tdimm = %p\n", dimm);
+ debugf4("\tdimm->label = '%s'\n", dimm->label);
+ debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
+ debugf4("\tdimm location ");
+ for (i = 0; i < dimm->mci->n_layers; i++) {
+ printk(KERN_CONT "%d", dimm->location[i]);
+ if (i < dimm->mci->n_layers - 1)
+ printk(KERN_CONT ".");
+ }
+ printk(KERN_CONT "\n");
+ debugf4("\tdimm->grain = %d\n", dimm->grain);
+ debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
}
static void edac_mc_dump_csrow(struct csrow_info *csrow)
@@ -55,7 +72,6 @@ static void edac_mc_dump_csrow(struct csrow_info *csrow)
debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
- debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
debugf4("\tcsrow->channels = %p\n", csrow->channels);
debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
@@ -70,6 +86,8 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci)
debugf4("\tmci->edac_check = %p\n", mci->edac_check);
debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
mci->nr_csrows, mci->csrows);
+ debugf3("\tmci->nr_dimms = %d, dimms = %p\n",
+ mci->tot_dimms, mci->dimms);
debugf3("\tdev = %p\n", mci->dev);
debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
@@ -101,18 +119,37 @@ const char *edac_mem_types[] = {
};
EXPORT_SYMBOL_GPL(edac_mem_types);
-/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
- * Adjust 'ptr' so that its alignment is at least as stringent as what the
- * compiler would provide for X and return the aligned result.
+/**
+ * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
+ * @p: pointer to a pointer with the memory offset to be used. At
+ * return, this will be incremented to point to the next offset
+ * @size: Size of the data structure to be reserved
+ * @n_elems: Number of elements that should be reserved
*
* If 'size' is a constant, the compiler will optimize this whole function
- * down to either a no-op or the addition of a constant to the value of 'ptr'.
+ * down to either a no-op or the addition of a constant to the value of '*p'.
+ *
+ * The 'p' pointer is absolutely needed to keep the proper advancing
+ * further in memory to the proper offsets when allocating the struct along
+ * with its embedded structs, as edac_device_alloc_ctl_info() does it
+ * above, for example.
+ *
+ * At return, the pointer 'p' will be incremented to be used on a next call
+ * to this function.
*/
-void *edac_align_ptr(void *ptr, unsigned size)
+void *edac_align_ptr(void **p, unsigned size, int n_elems)
{
unsigned align, r;
+ void *ptr = *p;
+
+ *p += size * n_elems;
- /* Here we assume that the alignment of a "long long" is the most
+ /*
+ * 'p' can possibly be an unaligned item X such that sizeof(X) is
+ * 'size'. Adjust 'p' so that its alignment is at least as
+ * stringent as what the compiler would provide for X and return
+ * the aligned result.
+ * Here we assume that the alignment of a "long long" is the most
* stringent alignment that the compiler will ever provide by default.
* As far as I know, this is a reasonable assumption.
*/
@@ -132,14 +169,18 @@ void *edac_align_ptr(void *ptr, unsigned size)
if (r == 0)
return (char *)ptr;
+ *p += align - r;
+
return (void *)(((unsigned long)ptr) + align - r);
}
/**
- * edac_mc_alloc: Allocate a struct mem_ctl_info structure
- * @size_pvt: size of private storage needed
- * @nr_csrows: Number of CWROWS needed for this MC
- * @nr_chans: Number of channels for the MC
+ * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
+ * @mc_num: Memory controller number
+ * @n_layers: Number of MC hierarchy layers
+ * layers: Describes each layer as seen by the Memory Controller
+ * @size_pvt: size of private storage needed
+ *
*
* Everything is kmalloc'ed as one big chunk - more efficient.
* Only can be used if all structures have the same lifetime - otherwise
@@ -147,32 +188,77 @@ void *edac_align_ptr(void *ptr, unsigned size)
*
* Use edac_mc_free() to free mc structures allocated by this function.
*
+ * NOTE: drivers handle multi-rank memories in different ways: in some
+ * drivers, one multi-rank memory stick is mapped as one entry, while, in
+ * others, a single multi-rank memory stick would be mapped into several
+ * entries. Currently, this function will allocate multiple struct dimm_info
+ * on such scenarios, as grouping the multiple ranks require drivers change.
+ *
* Returns:
- * NULL allocation failed
- * struct mem_ctl_info pointer
+ * On failure: NULL
+ * On success: struct mem_ctl_info pointer
*/
-struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
- unsigned nr_chans, int edac_index)
+struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
+ unsigned n_layers,
+ struct edac_mc_layer *layers,
+ unsigned sz_pvt)
{
struct mem_ctl_info *mci;
- struct csrow_info *csi, *csrow;
+ struct edac_mc_layer *layer;
+ struct csrow_info *csi, *csr;
struct rank_info *chi, *chp, *chan;
- void *pvt;
- unsigned size;
- int row, chn;
- int err;
+ struct dimm_info *dimm;
+ u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
+ unsigned pos[EDAC_MAX_LAYERS];
+ unsigned size, tot_dimms = 1, count = 1;
+ unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
+ void *pvt, *p, *ptr = NULL;
+ int i, j, err, row, chn, n, len;
+ bool per_rank = false;
+
+ BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
+ /*
+ * Calculate the total amount of dimms and csrows/cschannels while
+ * in the old API emulation mode
+ */
+ for (i = 0; i < n_layers; i++) {
+ tot_dimms *= layers[i].size;
+ if (layers[i].is_virt_csrow)
+ tot_csrows *= layers[i].size;
+ else
+ tot_channels *= layers[i].size;
+
+ if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
+ per_rank = true;
+ }
/* Figure out the offsets of the various items from the start of an mc
* structure. We want the alignment of each item to be at least as
* stringent as what the compiler would provide if we could simply
* hardcode everything into a single struct.
*/
- mci = (struct mem_ctl_info *)0;
- csi = edac_align_ptr(&mci[1], sizeof(*csi));
- chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi));
- pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
+ mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
+ layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
+ csi = edac_align_ptr(&ptr, sizeof(*csi), tot_csrows);
+ chi = edac_align_ptr(&ptr, sizeof(*chi), tot_csrows * tot_channels);
+ dimm = edac_align_ptr(&ptr, sizeof(*dimm), tot_dimms);
+ for (i = 0; i < n_layers; i++) {
+ count *= layers[i].size;
+ debugf4("%s: errcount layer %d size %d\n", __func__, i, count);
+ ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
+ ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
+ tot_errcount += 2 * count;
+ }
+
+ debugf4("%s: allocating %d error counters\n", __func__, tot_errcount);
+ pvt = edac_align_ptr(&ptr, sz_pvt, 1);
size = ((unsigned long)pvt) + sz_pvt;
+ debugf1("%s(): allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
+ __func__, size,
+ tot_dimms,
+ per_rank ? "ranks" : "dimms",
+ tot_csrows * tot_channels);
mci = kzalloc(size, GFP_KERNEL);
if (mci == NULL)
return NULL;
@@ -180,28 +266,103 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
/* Adjust pointers so they point within the memory we just allocated
* rather than an imaginary chunk of memory located at address 0.
*/
+ layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi));
+ dimm = (struct dimm_info *)(((char *)mci) + ((unsigned long)dimm));
+ for (i = 0; i < n_layers; i++) {
+ mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
+ mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
+ }
pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
/* setup index and various internal pointers */
- mci->mc_idx = edac_index;
+ mci->mc_idx = mc_num;
mci->csrows = csi;
+ mci->dimms = dimm;
+ mci->tot_dimms = tot_dimms;
mci->pvt_info = pvt;
- mci->nr_csrows = nr_csrows;
-
- for (row = 0; row < nr_csrows; row++) {
- csrow = &csi[row];
- csrow->csrow_idx = row;
- csrow->mci = mci;
- csrow->nr_channels = nr_chans;
- chp = &chi[row * nr_chans];
- csrow->channels = chp;
+ mci->n_layers = n_layers;
+ mci->layers = layer;
+ memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
+ mci->nr_csrows = tot_csrows;
+ mci->num_cschannel = tot_channels;
+ mci->mem_is_per_rank = per_rank;
- for (chn = 0; chn < nr_chans; chn++) {
+ /*
+ * Fill the csrow struct
+ */
+ for (row = 0; row < tot_csrows; row++) {
+ csr = &csi[row];
+ csr->csrow_idx = row;
+ csr->mci = mci;
+ csr->nr_channels = tot_channels;
+ chp = &chi[row * tot_channels];
+ csr->channels = chp;
+
+ for (chn = 0; chn < tot_channels; chn++) {
chan = &chp[chn];
chan->chan_idx = chn;
- chan->csrow = csrow;
+ chan->csrow = csr;
+ }
+ }
+
+ /*
+ * Fill the dimm struct
+ */
+ memset(&pos, 0, sizeof(pos));
+ row = 0;
+ chn = 0;
+ debugf4("%s: initializing %d %s\n", __func__, tot_dimms,
+ per_rank ? "ranks" : "dimms");
+ for (i = 0; i < tot_dimms; i++) {
+ chan = &csi[row].channels[chn];
+ dimm = EDAC_DIMM_PTR(layer, mci->dimms, n_layers,
+ pos[0], pos[1], pos[2]);
+ dimm->mci = mci;
+
+ debugf2("%s: %d: %s%zd (%d:%d:%d): row %d, chan %d\n", __func__,
+ i, per_rank ? "rank" : "dimm", (dimm - mci->dimms),
+ pos[0], pos[1], pos[2], row, chn);
+
+ /*
+ * Copy DIMM location and initialize it.
+ */
+ len = sizeof(dimm->label);
+ p = dimm->label;
+ n = snprintf(p, len, "mc#%u", mc_num);
+ p += n;
+ len -= n;
+ for (j = 0; j < n_layers; j++) {
+ n = snprintf(p, len, "%s#%u",
+ edac_layer_name[layers[j].type],
+ pos[j]);
+ p += n;
+ len -= n;
+ dimm->location[j] = pos[j];
+
+ if (len <= 0)
+ break;
+ }
+
+ /* Link it to the csrows old API data */
+ chan->dimm = dimm;
+ dimm->csrow = row;
+ dimm->cschannel = chn;
+
+ /* Increment csrow location */
+ row++;
+ if (row == tot_csrows) {
+ row = 0;
+ chn++;
+ }
+
+ /* Increment dimm location */
+ for (j = n_layers - 1; j >= 0; j--) {
+ pos[j]++;
+ if (pos[j] < layers[j].size)
+ break;
+ pos[j] = 0;
}
}
@@ -490,7 +651,6 @@ EXPORT_SYMBOL(edac_mc_find);
* edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
* create sysfs entries associated with mci structure
* @mci: pointer to the mci structure to be added to the list
- * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
*
* Return:
* 0 Success
@@ -517,6 +677,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
edac_mc_dump_channel(&mci->csrows[i].
channels[j]);
}
+ for (i = 0; i < mci->tot_dimms; i++)
+ edac_mc_dump_dimm(&mci->dimms[i]);
}
#endif
mutex_lock(&mem_ctls_mutex);
@@ -636,15 +798,19 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
{
struct csrow_info *csrows = mci->csrows;
- int row, i;
+ int row, i, j, n;
debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
row = -1;
for (i = 0; i < mci->nr_csrows; i++) {
struct csrow_info *csrow = &csrows[i];
-
- if (csrow->nr_pages == 0)
+ n = 0;
+ for (j = 0; j < csrow->nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
+ n += dimm->nr_pages;
+ }
+ if (n == 0)
continue;
debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
@@ -670,249 +836,307 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
}
EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
-/* FIXME - setable log (warning/emerg) levels */
-/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
-void edac_mc_handle_ce(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, unsigned long syndrome,
- int row, int channel, const char *msg)
-{
- unsigned long remapped_page;
+const char *edac_layer_name[] = {
+ [EDAC_MC_LAYER_BRANCH] = "branch",
+ [EDAC_MC_LAYER_CHANNEL] = "channel",
+ [EDAC_MC_LAYER_SLOT] = "slot",
+ [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
+};
+EXPORT_SYMBOL_GPL(edac_layer_name);
- debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
+static void edac_inc_ce_error(struct mem_ctl_info *mci,
+ bool enable_per_layer_report,
+ const int pos[EDAC_MAX_LAYERS])
+{
+ int i, index = 0;
- /* FIXME - maybe make panic on INTERNAL ERROR an option */
- if (row >= mci->nr_csrows || row < 0) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range "
- "(%d >= %d)\n", row, mci->nr_csrows);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
- return;
- }
+ mci->ce_mc++;
- if (channel >= mci->csrows[row].nr_channels || channel < 0) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel out of range "
- "(%d >= %d)\n", channel,
- mci->csrows[row].nr_channels);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+ if (!enable_per_layer_report) {
+ mci->ce_noinfo_count++;
return;
}
- if (edac_mc_get_log_ce())
- /* FIXME - put in DIMM location */
- edac_mc_printk(mci, KERN_WARNING,
- "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
- "0x%lx, row %d, channel %d, label \"%s\": %s\n",
- page_frame_number, offset_in_page,
- mci->csrows[row].grain, syndrome, row, channel,
- mci->csrows[row].channels[channel].label, msg);
-
- mci->ce_count++;
- mci->csrows[row].ce_count++;
- mci->csrows[row].channels[channel].ce_count++;
-
- if (mci->scrub_mode & SCRUB_SW_SRC) {
- /*
- * Some MC's can remap memory so that it is still available
- * at a different address when PCI devices map into memory.
- * MC's that can't do this lose the memory where PCI devices
- * are mapped. This mapping is MC dependent and so we call
- * back into the MC driver for it to map the MC page to
- * a physical (CPU) page which can then be mapped to a virtual
- * page - which can then be scrubbed.
- */
- remapped_page = mci->ctl_page_to_phys ?
- mci->ctl_page_to_phys(mci, page_frame_number) :
- page_frame_number;
+ for (i = 0; i < mci->n_layers; i++) {
+ if (pos[i] < 0)
+ break;
+ index += pos[i];
+ mci->ce_per_layer[i][index]++;
- edac_mc_scrub_block(remapped_page, offset_in_page,
- mci->csrows[row].grain);
+ if (i < mci->n_layers - 1)
+ index *= mci->layers[i + 1].size;
}
}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
-void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
+static void edac_inc_ue_error(struct mem_ctl_info *mci,
+ bool enable_per_layer_report,
+ const int pos[EDAC_MAX_LAYERS])
{
- if (edac_mc_get_log_ce())
- edac_mc_printk(mci, KERN_WARNING,
- "CE - no information available: %s\n", msg);
+ int i, index = 0;
- mci->ce_noinfo_count++;
- mci->ce_count++;
-}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
+ mci->ue_mc++;
-void edac_mc_handle_ue(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, int row, const char *msg)
-{
- int len = EDAC_MC_LABEL_LEN * 4;
- char labels[len + 1];
- char *pos = labels;
- int chan;
- int chars;
-
- debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
-
- /* FIXME - maybe make panic on INTERNAL ERROR an option */
- if (row >= mci->nr_csrows || row < 0) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range "
- "(%d >= %d)\n", row, mci->nr_csrows);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
+ if (!enable_per_layer_report) {
+ mci->ce_noinfo_count++;
return;
}
- chars = snprintf(pos, len + 1, "%s",
- mci->csrows[row].channels[0].label);
- len -= chars;
- pos += chars;
+ for (i = 0; i < mci->n_layers; i++) {
+ if (pos[i] < 0)
+ break;
+ index += pos[i];
+ mci->ue_per_layer[i][index]++;
- for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
- chan++) {
- chars = snprintf(pos, len + 1, ":%s",
- mci->csrows[row].channels[chan].label);
- len -= chars;
- pos += chars;
+ if (i < mci->n_layers - 1)
+ index *= mci->layers[i + 1].size;
}
+}
- if (edac_mc_get_log_ue())
- edac_mc_printk(mci, KERN_EMERG,
- "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
- "labels \"%s\": %s\n", page_frame_number,
- offset_in_page, mci->csrows[row].grain, row,
- labels, msg);
+static void edac_ce_error(struct mem_ctl_info *mci,
+ const int pos[EDAC_MAX_LAYERS],
+ const char *msg,
+ const char *location,
+ const char *label,
+ const char *detail,
+ const char *other_detail,
+ const bool enable_per_layer_report,
+ const unsigned long page_frame_number,
+ const unsigned long offset_in_page,
+ u32 grain)
+{
+ unsigned long remapped_page;
- if (edac_mc_get_panic_on_ue())
- panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
- "row %d, labels \"%s\": %s\n", mci->mc_idx,
- page_frame_number, offset_in_page,
- mci->csrows[row].grain, row, labels, msg);
+ if (edac_mc_get_log_ce()) {
+ if (other_detail && *other_detail)
+ edac_mc_printk(mci, KERN_WARNING,
+ "CE %s on %s (%s%s - %s)\n",
+ msg, label, location,
+ detail, other_detail);
+ else
+ edac_mc_printk(mci, KERN_WARNING,
+ "CE %s on %s (%s%s)\n",
+ msg, label, location,
+ detail);
+ }
+ edac_inc_ce_error(mci, enable_per_layer_report, pos);
- mci->ue_count++;
- mci->csrows[row].ue_count++;
+ if (mci->scrub_mode & SCRUB_SW_SRC) {
+ /*
+ * Some memory controllers (called MCs below) can remap
+ * memory so that it is still available at a different
+ * address when PCI devices map into memory.
+ * MC's that can't do this, lose the memory where PCI
+ * devices are mapped. This mapping is MC-dependent
+ * and so we call back into the MC driver for it to
+ * map the MC page to a physical (CPU) page which can
+ * then be mapped to a virtual page - which can then
+ * be scrubbed.
+ */
+ remapped_page = mci->ctl_page_to_phys ?
+ mci->ctl_page_to_phys(mci, page_frame_number) :
+ page_frame_number;
+
+ edac_mc_scrub_block(remapped_page,
+ offset_in_page, grain);
+ }
}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
-void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
+static void edac_ue_error(struct mem_ctl_info *mci,
+ const int pos[EDAC_MAX_LAYERS],
+ const char *msg,
+ const char *location,
+ const char *label,
+ const char *detail,
+ const char *other_detail,
+ const bool enable_per_layer_report)
{
- if (edac_mc_get_panic_on_ue())
- panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
+ if (edac_mc_get_log_ue()) {
+ if (other_detail && *other_detail)
+ edac_mc_printk(mci, KERN_WARNING,
+ "UE %s on %s (%s%s - %s)\n",
+ msg, label, location, detail,
+ other_detail);
+ else
+ edac_mc_printk(mci, KERN_WARNING,
+ "UE %s on %s (%s%s)\n",
+ msg, label, location, detail);
+ }
- if (edac_mc_get_log_ue())
- edac_mc_printk(mci, KERN_WARNING,
- "UE - no information available: %s\n", msg);
- mci->ue_noinfo_count++;
- mci->ue_count++;
+ if (edac_mc_get_panic_on_ue()) {
+ if (other_detail && *other_detail)
+ panic("UE %s on %s (%s%s - %s)\n",
+ msg, label, location, detail, other_detail);
+ else
+ panic("UE %s on %s (%s%s)\n",
+ msg, label, location, detail);
+ }
+
+ edac_inc_ue_error(mci, enable_per_layer_report, pos);
}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
-/*************************************************************
- * On Fully Buffered DIMM modules, this help function is
- * called to process UE events
- */
-void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
- unsigned int csrow,
- unsigned int channela,
- unsigned int channelb, char *msg)
+#define OTHER_LABEL " or "
+void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+ struct mem_ctl_info *mci,
+ const unsigned long page_frame_number,
+ const unsigned long offset_in_page,
+ const unsigned long syndrome,
+ const int layer0,
+ const int layer1,
+ const int layer2,
+ const char *msg,
+ const char *other_detail,
+ const void *mcelog)
{
- int len = EDAC_MC_LABEL_LEN * 4;
- char labels[len + 1];
- char *pos = labels;
- int chars;
+ /* FIXME: too much for stack: move it to some pre-alocated area */
+ char detail[80], location[80];
+ char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
+ char *p;
+ int row = -1, chan = -1;
+ int pos[EDAC_MAX_LAYERS] = { layer0, layer1, layer2 };
+ int i;
+ u32 grain;
+ bool enable_per_layer_report = false;
- if (csrow >= mci->nr_csrows) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range (%d >= %d)\n",
- csrow, mci->nr_csrows);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
- return;
- }
+ debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
- if (channela >= mci->csrows[csrow].nr_channels) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel-a out of range "
- "(%d >= %d)\n",
- channela, mci->csrows[csrow].nr_channels);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
- return;
+ /*
+ * Check if the event report is consistent and if the memory
+ * location is known. If it is known, enable_per_layer_report will be
+ * true, the DIMM(s) label info will be filled and the per-layer
+ * error counters will be incremented.
+ */
+ for (i = 0; i < mci->n_layers; i++) {
+ if (pos[i] >= (int)mci->layers[i].size) {
+ if (type == HW_EVENT_ERR_CORRECTED)
+ p = "CE";
+ else
+ p = "UE";
+
+ edac_mc_printk(mci, KERN_ERR,
+ "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
+ edac_layer_name[mci->layers[i].type],
+ pos[i], mci->layers[i].size);
+ /*
+ * Instead of just returning it, let's use what's
+ * known about the error. The increment routines and
+ * the DIMM filter logic will do the right thing by
+ * pointing the likely damaged DIMMs.
+ */
+ pos[i] = -1;
+ }
+ if (pos[i] >= 0)
+ enable_per_layer_report = true;
}
- if (channelb >= mci->csrows[csrow].nr_channels) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel-b out of range "
- "(%d >= %d)\n",
- channelb, mci->csrows[csrow].nr_channels);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
- return;
- }
+ /*
+ * Get the dimm label/grain that applies to the match criteria.
+ * As the error algorithm may not be able to point to just one memory
+ * stick, the logic here will get all possible labels that could
+ * pottentially be affected by the error.
+ * On FB-DIMM memory controllers, for uncorrected errors, it is common
+ * to have only the MC channel and the MC dimm (also called "branch")
+ * but the channel is not known, as the memory is arranged in pairs,
+ * where each memory belongs to a separate channel within the same
+ * branch.
+ */
+ grain = 0;
+ p = label;
+ *p = '\0';
+ for (i = 0; i < mci->tot_dimms; i++) {
+ struct dimm_info *dimm = &mci->dimms[i];
- mci->ue_count++;
- mci->csrows[csrow].ue_count++;
+ if (layer0 >= 0 && layer0 != dimm->location[0])
+ continue;
+ if (layer1 >= 0 && layer1 != dimm->location[1])
+ continue;
+ if (layer2 >= 0 && layer2 != dimm->location[2])
+ continue;
- /* Generate the DIMM labels from the specified channels */
- chars = snprintf(pos, len + 1, "%s",
- mci->csrows[csrow].channels[channela].label);
- len -= chars;
- pos += chars;
- chars = snprintf(pos, len + 1, "-%s",
- mci->csrows[csrow].channels[channelb].label);
+ /* get the max grain, over the error match range */
+ if (dimm->grain > grain)
+ grain = dimm->grain;
- if (edac_mc_get_log_ue())
- edac_mc_printk(mci, KERN_EMERG,
- "UE row %d, channel-a= %d channel-b= %d "
- "labels \"%s\": %s\n", csrow, channela, channelb,
- labels, msg);
+ /*
+ * If the error is memory-controller wide, there's no need to
+ * seek for the affected DIMMs because the whole
+ * channel/memory controller/... may be affected.
+ * Also, don't show errors for empty DIMM slots.
+ */
+ if (enable_per_layer_report && dimm->nr_pages) {
+ if (p != label) {
+ strcpy(p, OTHER_LABEL);
+ p += strlen(OTHER_LABEL);
+ }
+ strcpy(p, dimm->label);
+ p += strlen(p);
+ *p = '\0';
+
+ /*
+ * get csrow/channel of the DIMM, in order to allow
+ * incrementing the compat API counters
+ */
+ debugf4("%s: %s csrows map: (%d,%d)\n",
+ __func__,
+ mci->mem_is_per_rank ? "rank" : "dimm",
+ dimm->csrow, dimm->cschannel);
+
+ if (row == -1)
+ row = dimm->csrow;
+ else if (row >= 0 && row != dimm->csrow)
+ row = -2;
+
+ if (chan == -1)
+ chan = dimm->cschannel;
+ else if (chan >= 0 && chan != dimm->cschannel)
+ chan = -2;
+ }
+ }
- if (edac_mc_get_panic_on_ue())
- panic("UE row %d, channel-a= %d channel-b= %d "
- "labels \"%s\": %s\n", csrow, channela,
- channelb, labels, msg);
-}
-EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
+ if (!enable_per_layer_report) {
+ strcpy(label, "any memory");
+ } else {
+ debugf4("%s: csrow/channel to increment: (%d,%d)\n",
+ __func__, row, chan);
+ if (p == label)
+ strcpy(label, "unknown memory");
+ if (type == HW_EVENT_ERR_CORRECTED) {
+ if (row >= 0) {
+ mci->csrows[row].ce_count++;
+ if (chan >= 0)
+ mci->csrows[row].channels[chan].ce_count++;
+ }
+ } else
+ if (row >= 0)
+ mci->csrows[row].ue_count++;
+ }
-/*************************************************************
- * On Fully Buffered DIMM modules, this help function is
- * called to process CE events
- */
-void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
- unsigned int csrow, unsigned int channel, char *msg)
-{
+ /* Fill the RAM location data */
+ p = location;
+ for (i = 0; i < mci->n_layers; i++) {
+ if (pos[i] < 0)
+ continue;
- /* Ensure boundary values */
- if (csrow >= mci->nr_csrows) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range (%d >= %d)\n",
- csrow, mci->nr_csrows);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
- return;
- }
- if (channel >= mci->csrows[csrow].nr_channels) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel out of range (%d >= %d)\n",
- channel, mci->csrows[csrow].nr_channels);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
- return;
+ p += sprintf(p, "%s:%d ",
+ edac_layer_name[mci->layers[i].type],
+ pos[i]);
}
- if (edac_mc_get_log_ce())
- /* FIXME - put in DIMM location */
- edac_mc_printk(mci, KERN_WARNING,
- "CE row %d, channel %d, label \"%s\": %s\n",
- csrow, channel,
- mci->csrows[csrow].channels[channel].label, msg);
+ /* Memory type dependent details about the error */
+ if (type == HW_EVENT_ERR_CORRECTED) {
+ snprintf(detail, sizeof(detail),
+ "page:0x%lx offset:0x%lx grain:%d syndrome:0x%lx",
+ page_frame_number, offset_in_page,
+ grain, syndrome);
+ edac_ce_error(mci, pos, msg, location, label, detail,
+ other_detail, enable_per_layer_report,
+ page_frame_number, offset_in_page, grain);
+ } else {
+ snprintf(detail, sizeof(detail),
+ "page:0x%lx offset:0x%lx grain:%d",
+ page_frame_number, offset_in_page, grain);
- mci->ce_count++;
- mci->csrows[csrow].ce_count++;
- mci->csrows[csrow].channels[channel].ce_count++;
+ edac_ue_error(mci, pos, msg, location, label, detail,
+ other_detail, enable_per_layer_report);
+ }
}
-EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
+EXPORT_SYMBOL_GPL(edac_mc_handle_error);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index e9a28f576d14..f6a29b0eedc8 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -144,25 +144,31 @@ static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data,
static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
int private)
{
- return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
+ int i;
+ u32 nr_pages = 0;
+
+ for (i = 0; i < csrow->nr_channels; i++)
+ nr_pages += csrow->channels[i].dimm->nr_pages;
+
+ return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
}
static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
int private)
{
- return sprintf(data, "%s\n", mem_types[csrow->mtype]);
+ return sprintf(data, "%s\n", mem_types[csrow->channels[0].dimm->mtype]);
}
static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
int private)
{
- return sprintf(data, "%s\n", dev_types[csrow->dtype]);
+ return sprintf(data, "%s\n", dev_types[csrow->channels[0].dimm->dtype]);
}
static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
int private)
{
- return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]);
+ return sprintf(data, "%s\n", edac_caps[csrow->channels[0].dimm->edac_mode]);
}
/* show/store functions for DIMM Label attributes */
@@ -170,11 +176,11 @@ static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
char *data, int channel)
{
/* if field has not been initialized, there is nothing to send */
- if (!csrow->channels[channel].label[0])
+ if (!csrow->channels[channel].dimm->label[0])
return 0;
return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
- csrow->channels[channel].label);
+ csrow->channels[channel].dimm->label);
}
static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
@@ -184,8 +190,8 @@ static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
ssize_t max_size = 0;
max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
- strncpy(csrow->channels[channel].label, data, max_size);
- csrow->channels[channel].label[max_size] = '\0';
+ strncpy(csrow->channels[channel].dimm->label, data, max_size);
+ csrow->channels[channel].dimm->label[max_size] = '\0';
return max_size;
}
@@ -419,8 +425,8 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
mci->ue_noinfo_count = 0;
mci->ce_noinfo_count = 0;
- mci->ue_count = 0;
- mci->ce_count = 0;
+ mci->ue_mc = 0;
+ mci->ce_mc = 0;
for (row = 0; row < mci->nr_csrows; row++) {
struct csrow_info *ri = &mci->csrows[row];
@@ -489,12 +495,12 @@ static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
/* default attribute files for the MCI object */
static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
{
- return sprintf(data, "%d\n", mci->ue_count);
+ return sprintf(data, "%d\n", mci->ue_mc);
}
static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
{
- return sprintf(data, "%d\n", mci->ce_count);
+ return sprintf(data, "%d\n", mci->ce_mc);
}
static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
@@ -519,16 +525,16 @@ static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
{
- int total_pages, csrow_idx;
+ int total_pages = 0, csrow_idx, j;
- for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
- csrow_idx++) {
+ for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
struct csrow_info *csrow = &mci->csrows[csrow_idx];
- if (!csrow->nr_pages)
- continue;
+ for (j = 0; j < csrow->nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
- total_pages += csrow->nr_pages;
+ total_pages += dimm->nr_pages;
+ }
}
return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
@@ -900,7 +906,7 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
*/
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
{
- int i;
+ int i, j;
int err;
struct csrow_info *csrow;
struct kobject *kobj_mci = &mci->edac_mci_kobj;
@@ -934,10 +940,13 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
/* Make directories for each CSROW object under the mc<id> kobject
*/
for (i = 0; i < mci->nr_csrows; i++) {
+ int nr_pages = 0;
+
csrow = &mci->csrows[i];
+ for (j = 0; j < csrow->nr_channels; j++)
+ nr_pages += csrow->channels[j].dimm->nr_pages;
- /* Only expose populated CSROWs */
- if (csrow->nr_pages > 0) {
+ if (nr_pages > 0) {
err = edac_create_csrow_object(mci, csrow, i);
if (err) {
debugf1("%s() failure: create csrow %d obj\n",
@@ -949,12 +958,15 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
return 0;
- /* CSROW error: backout what has already been registered, */
fail1:
for (i--; i >= 0; i--) {
- if (csrow->nr_pages > 0) {
+ int nr_pages = 0;
+
+ csrow = &mci->csrows[i];
+ for (j = 0; j < csrow->nr_channels; j++)
+ nr_pages += csrow->channels[j].dimm->nr_pages;
+ if (nr_pages > 0)
kobject_put(&mci->csrows[i].kobj);
- }
}
/* remove the mci instance's attributes, if any */
@@ -973,14 +985,20 @@ fail0:
*/
void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
{
- int i;
+ struct csrow_info *csrow;
+ int i, j;
debugf0("%s()\n", __func__);
/* remove all csrow kobjects */
debugf4("%s() unregister this mci kobj\n", __func__);
for (i = 0; i < mci->nr_csrows; i++) {
- if (mci->csrows[i].nr_pages > 0) {
+ int nr_pages = 0;
+
+ csrow = &mci->csrows[i];
+ for (j = 0; j < csrow->nr_channels; j++)
+ nr_pages += csrow->channels[j].dimm->nr_pages;
+ if (nr_pages > 0) {
debugf0("%s() unreg csrow-%d\n", __func__, i);
kobject_put(&mci->csrows[i].kobj);
}
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 00f81b47a51f..0ea7d14cb930 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -50,7 +50,7 @@ extern void edac_device_reset_delay_period(struct edac_device_ctl_info
*edac_dev, unsigned long value);
extern void edac_mc_reset_delay_period(int value);
-extern void *edac_align_ptr(void *ptr, unsigned size);
+extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
/*
* EDAC PCI functions
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 63af1c5673d1..f1ac86649886 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -42,13 +42,13 @@ struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
const char *edac_pci_name)
{
struct edac_pci_ctl_info *pci;
- void *pvt;
+ void *p = NULL, *pvt;
unsigned int size;
debugf1("%s()\n", __func__);
- pci = (struct edac_pci_ctl_info *)0;
- pvt = edac_align_ptr(&pci[1], sz_pvt);
+ pci = edac_align_ptr(&p, sizeof(*pci), 1);
+ pvt = edac_align_ptr(&p, 1, sz_pvt);
size = ((unsigned long)pvt) + sz_pvt;
/* Alloc the needed control struct memory */
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 277689a68841..8ad1744faacd 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -245,7 +245,9 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1,
+ "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
@@ -256,10 +258,15 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
row = edac_mc_find_csrow_by_page(mci, pfn);
if (info->errsts & I3000_ERRSTS_UE)
- edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ pfn, offset, 0,
+ row, -1, -1,
+ "i3000 UE", "", NULL);
else
- edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row,
- multi_chan ? channel : 0, "i3000 CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ pfn, offset, info->derrsyn,
+ row, multi_chan ? channel : 0, -1,
+ "i3000 CE", "", NULL);
return 1;
}
@@ -304,9 +311,10 @@ static int i3000_is_interleaved(const unsigned char *c0dra,
static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
- int i;
+ int i, j;
struct mem_ctl_info *mci = NULL;
- unsigned long last_cumul_size;
+ struct edac_mc_layer layers[2];
+ unsigned long last_cumul_size, nr_pages;
int interleaved, nr_channels;
unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
@@ -347,7 +355,14 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
*/
interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
nr_channels = interleaved ? 2 : 1;
- mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0);
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I3000_RANKS / nr_channels;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = nr_channels;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
@@ -386,19 +401,23 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
cumul_size <<= 1;
debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
__func__, i, cumul_size);
- if (cumul_size == last_cumul_size) {
- csrow->mtype = MEM_EMPTY;
+ if (cumul_size == last_cumul_size)
continue;
- }
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = I3000_DEAP_GRAIN;
- csrow->mtype = MEM_DDR2;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = EDAC_UNKNOWN;
+
+ for (j = 0; j < nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
+
+ dimm->nr_pages = nr_pages / nr_channels;
+ dimm->grain = I3000_DEAP_GRAIN;
+ dimm->mtype = MEM_DDR2;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
}
/*
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 046808c6357d..bbe43ef71823 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -23,6 +23,7 @@
#define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0
+#define I3200_DIMMS 4
#define I3200_RANKS 8
#define I3200_RANKS_PER_CHANNEL 4
#define I3200_CHANNELS 2
@@ -217,21 +218,25 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
return;
if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
for (channel = 0; channel < nr_channels; channel++) {
log = info->eccerrlog[channel];
if (log & I3200_ECCERRLOG_UE) {
- edac_mc_handle_ue(mci, 0, 0,
- eccerrlog_row(channel, log),
- "i3200 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, 0,
+ eccerrlog_row(channel, log),
+ -1, -1,
+ "i3000 UE", "", NULL);
} else if (log & I3200_ECCERRLOG_CE) {
- edac_mc_handle_ce(mci, 0, 0,
- eccerrlog_syndrome(log),
- eccerrlog_row(channel, log), 0,
- "i3200 CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, eccerrlog_syndrome(log),
+ eccerrlog_row(channel, log),
+ -1, -1,
+ "i3000 UE", "", NULL);
}
}
}
@@ -319,9 +324,9 @@ static unsigned long drb_to_nr_pages(
static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
- int i;
+ int i, j;
struct mem_ctl_info *mci = NULL;
- unsigned long last_page;
+ struct edac_mc_layer layers[2];
u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL];
bool stacked;
void __iomem *window;
@@ -336,8 +341,14 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
i3200_get_drbs(window, drbs);
nr_channels = how_many_channels(pdev);
- mci = edac_mc_alloc(sizeof(struct i3200_priv), I3200_RANKS,
- nr_channels, 0);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I3200_DIMMS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = nr_channels;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(struct i3200_priv));
if (!mci)
return -ENOMEM;
@@ -366,7 +377,6 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
* cumulative; the last one will contain the total memory
* contained in all ranks.
*/
- last_page = -1UL;
for (i = 0; i < mci->nr_csrows; i++) {
unsigned long nr_pages;
struct csrow_info *csrow = &mci->csrows[i];
@@ -375,20 +385,18 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
i / I3200_RANKS_PER_CHANNEL,
i % I3200_RANKS_PER_CHANNEL);
- if (nr_pages == 0) {
- csrow->mtype = MEM_EMPTY;
+ if (nr_pages == 0)
continue;
- }
- csrow->first_page = last_page + 1;
- last_page += nr_pages;
- csrow->last_page = last_page;
- csrow->nr_pages = nr_pages;
+ for (j = 0; j < nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
- csrow->grain = nr_pages << PAGE_SHIFT;
- csrow->mtype = MEM_DDR2;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = EDAC_UNKNOWN;
+ dimm->nr_pages = nr_pages / nr_channels;
+ dimm->grain = nr_pages << PAGE_SHIFT;
+ dimm->mtype = MEM_DDR2;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
}
i3200_clear_error_info(mci);
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index a2680d8e744b..11ea835f155a 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -270,7 +270,8 @@
#define MTR3 0x8C
#define NUM_MTRS 4
-#define CHANNELS_PER_BRANCH (2)
+#define CHANNELS_PER_BRANCH 2
+#define MAX_BRANCHES 2
/* Defines to extract the vaious fields from the
* MTRx - Memory Technology Registers
@@ -473,7 +474,6 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
char msg[EDAC_MC_LABEL_LEN + 1 + 160];
char *specific = NULL;
u32 allErrors;
- int branch;
int channel;
int bank;
int rank;
@@ -485,8 +485,7 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
if (!allErrors)
return; /* if no error, return now */
- branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
- channel = branch;
+ channel = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
/* Use the NON-Recoverable macros to extract data */
bank = NREC_BANK(info->nrecmema);
@@ -495,9 +494,9 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
ras = NREC_RAS(info->nrecmemb);
cas = NREC_CAS(info->nrecmemb);
- debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
- "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, channel + 1, branch >> 1, bank,
+ debugf0("\t\tCSROW= %d Channel= %d "
+ "(DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, bank,
rdwr ? "Write" : "Read", ras, cas);
/* Only 1 bit will be on */
@@ -533,13 +532,14 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d "
- "FATAL Err=0x%x (%s))",
- branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
- allErrors, specific);
+ "Bank=%d RAS=%d CAS=%d FATAL Err=0x%x (%s)",
+ bank, ras, cas, allErrors, specific);
/* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
+ channel >> 1, channel & 1, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
}
/*
@@ -633,13 +633,14 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
- "CAS=%d, UE Err=0x%x (%s))",
- branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
- ue_errors, specific);
+ "Rank=%d Bank=%d RAS=%d CAS=%d, UE Err=0x%x (%s)",
+ rank, bank, ras, cas, ue_errors, specific);
/* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ channel >> 1, -1, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
}
/* Check correctable errors */
@@ -685,13 +686,16 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
+ "Rank=%d Bank=%d RDWR=%s RAS=%d "
"CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank,
rdwr ? "Write" : "Read", ras, cas, ce_errors,
specific);
/* Call the helper to output message */
- edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ channel >> 1, channel % 2, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
}
if (!misc_messages)
@@ -731,11 +735,12 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "(Branch=%d Err=%#x (%s))", branch >> 1,
- misc_errors, specific);
+ "Err=%#x (%s)", misc_errors, specific);
/* Call the helper to output message */
- edac_mc_handle_fbd_ce(mci, 0, 0, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ branch >> 1, -1, -1,
+ "Misc error", msg, NULL);
}
}
@@ -956,14 +961,14 @@ static int determine_amb_present_reg(struct i5000_pvt *pvt, int channel)
*
* return the proper MTR register as determine by the csrow and channel desired
*/
-static int determine_mtr(struct i5000_pvt *pvt, int csrow, int channel)
+static int determine_mtr(struct i5000_pvt *pvt, int slot, int channel)
{
int mtr;
if (channel < CHANNELS_PER_BRANCH)
- mtr = pvt->b0_mtr[csrow >> 1];
+ mtr = pvt->b0_mtr[slot];
else
- mtr = pvt->b1_mtr[csrow >> 1];
+ mtr = pvt->b1_mtr[slot];
return mtr;
}
@@ -988,37 +993,34 @@ static void decode_mtr(int slot_row, u16 mtr)
debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
}
-static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
+static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
struct i5000_dimm_info *dinfo)
{
int mtr;
int amb_present_reg;
int addrBits;
- mtr = determine_mtr(pvt, csrow, channel);
+ mtr = determine_mtr(pvt, slot, channel);
if (MTR_DIMMS_PRESENT(mtr)) {
amb_present_reg = determine_amb_present_reg(pvt, channel);
- /* Determine if there is a DIMM present in this DIMM slot */
- if (amb_present_reg & (1 << (csrow >> 1))) {
+ /* Determine if there is a DIMM present in this DIMM slot */
+ if (amb_present_reg) {
dinfo->dual_rank = MTR_DIMM_RANK(mtr);
- if (!((dinfo->dual_rank == 0) &&
- ((csrow & 0x1) == 0x1))) {
- /* Start with the number of bits for a Bank
- * on the DRAM */
- addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
- /* Add thenumber of ROW bits */
- addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
- /* add the number of COLUMN bits */
- addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
-
- addrBits += 6; /* add 64 bits per DIMM */
- addrBits -= 20; /* divide by 2^^20 */
- addrBits -= 3; /* 8 bits per bytes */
-
- dinfo->megabytes = 1 << addrBits;
- }
+ /* Start with the number of bits for a Bank
+ * on the DRAM */
+ addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
+ /* Add the number of ROW bits */
+ addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
+ /* add the number of COLUMN bits */
+ addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
+
+ addrBits += 6; /* add 64 bits per DIMM */
+ addrBits -= 20; /* divide by 2^^20 */
+ addrBits -= 3; /* 8 bits per bytes */
+
+ dinfo->megabytes = 1 << addrBits;
}
}
}
@@ -1032,10 +1034,9 @@ static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
static void calculate_dimm_size(struct i5000_pvt *pvt)
{
struct i5000_dimm_info *dinfo;
- int csrow, max_csrows;
+ int slot, channel, branch;
char *p, *mem_buffer;
int space, n;
- int channel;
/* ================= Generate some debug output ================= */
space = PAGE_SIZE;
@@ -1046,22 +1047,17 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
return;
}
- n = snprintf(p, space, "\n");
- p += n;
- space -= n;
-
- /* Scan all the actual CSROWS (which is # of DIMMS * 2)
+ /* Scan all the actual slots
* and calculate the information for each DIMM
- * Start with the highest csrow first, to display it first
- * and work toward the 0th csrow
+ * Start with the highest slot first, to display it first
+ * and work toward the 0th slot
*/
- max_csrows = pvt->maxdimmperch * 2;
- for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+ for (slot = pvt->maxdimmperch - 1; slot >= 0; slot--) {
- /* on an odd csrow, first output a 'boundary' marker,
+ /* on an odd slot, first output a 'boundary' marker,
* then reset the message buffer */
- if (csrow & 0x1) {
- n = snprintf(p, space, "---------------------------"
+ if (slot & 0x1) {
+ n = snprintf(p, space, "--------------------------"
"--------------------------------");
p += n;
space -= n;
@@ -1069,30 +1065,39 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
p = mem_buffer;
space = PAGE_SIZE;
}
- n = snprintf(p, space, "csrow %2d ", csrow);
+ n = snprintf(p, space, "slot %2d ", slot);
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
- dinfo = &pvt->dimm_info[csrow][channel];
- handle_channel(pvt, csrow, channel, dinfo);
- n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
+ dinfo = &pvt->dimm_info[slot][channel];
+ handle_channel(pvt, slot, channel, dinfo);
+ if (dinfo->megabytes)
+ n = snprintf(p, space, "%4d MB %dR| ",
+ dinfo->megabytes, dinfo->dual_rank + 1);
+ else
+ n = snprintf(p, space, "%4d MB | ", 0);
p += n;
space -= n;
}
- n = snprintf(p, space, "\n");
p += n;
space -= n;
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
}
/* Output the last bottom 'boundary' marker */
- n = snprintf(p, space, "---------------------------"
- "--------------------------------\n");
+ n = snprintf(p, space, "--------------------------"
+ "--------------------------------");
p += n;
space -= n;
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
/* now output the 'channel' labels */
- n = snprintf(p, space, " ");
+ n = snprintf(p, space, " ");
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
@@ -1100,9 +1105,17 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
p += n;
space -= n;
}
- n = snprintf(p, space, "\n");
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
+
+ n = snprintf(p, space, " ");
p += n;
- space -= n;
+ for (branch = 0; branch < MAX_BRANCHES; branch++) {
+ n = snprintf(p, space, " branch %d | ", branch);
+ p += n;
+ space -= n;
+ }
/* output the last message and free buffer */
debugf2("%s\n", mem_buffer);
@@ -1235,13 +1248,13 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
static int i5000_init_csrows(struct mem_ctl_info *mci)
{
struct i5000_pvt *pvt;
- struct csrow_info *p_csrow;
+ struct dimm_info *dimm;
int empty, channel_count;
int max_csrows;
- int mtr, mtr1;
+ int mtr;
int csrow_megs;
int channel;
- int csrow;
+ int slot;
pvt = mci->pvt_info;
@@ -1250,43 +1263,40 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
empty = 1; /* Assume NO memory */
- for (csrow = 0; csrow < max_csrows; csrow++) {
- p_csrow = &mci->csrows[csrow];
-
- p_csrow->csrow_idx = csrow;
-
- /* use branch 0 for the basis */
- mtr = pvt->b0_mtr[csrow >> 1];
- mtr1 = pvt->b1_mtr[csrow >> 1];
-
- /* if no DIMMS on this row, continue */
- if (!MTR_DIMMS_PRESENT(mtr) && !MTR_DIMMS_PRESENT(mtr1))
- continue;
+ /*
+ * FIXME: The memory layout used to map slot/channel into the
+ * real memory architecture is weird: branch+slot are "csrows"
+ * and channel is channel. That required an extra array (dimm_info)
+ * to map the dimms. A good cleanup would be to remove this array,
+ * and do a loop here with branch, channel, slot
+ */
+ for (slot = 0; slot < max_csrows; slot++) {
+ for (channel = 0; channel < pvt->maxch; channel++) {
- /* FAKE OUT VALUES, FIXME */
- p_csrow->first_page = 0 + csrow * 20;
- p_csrow->last_page = 9 + csrow * 20;
- p_csrow->page_mask = 0xFFF;
+ mtr = determine_mtr(pvt, slot, channel);
- p_csrow->grain = 8;
+ if (!MTR_DIMMS_PRESENT(mtr))
+ continue;
- csrow_megs = 0;
- for (channel = 0; channel < pvt->maxch; channel++) {
- csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
- }
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ channel / MAX_BRANCHES,
+ channel % MAX_BRANCHES, slot);
- p_csrow->nr_pages = csrow_megs << 8;
+ csrow_megs = pvt->dimm_info[slot][channel].megabytes;
+ dimm->grain = 8;
- /* Assume DDR2 for now */
- p_csrow->mtype = MEM_FB_DDR2;
+ /* Assume DDR2 for now */
+ dimm->mtype = MEM_FB_DDR2;
- /* ask what device type on this row */
- if (MTR_DRAM_WIDTH(mtr))
- p_csrow->dtype = DEV_X8;
- else
- p_csrow->dtype = DEV_X4;
+ /* ask what device type on this row */
+ if (MTR_DRAM_WIDTH(mtr))
+ dimm->dtype = DEV_X8;
+ else
+ dimm->dtype = DEV_X4;
- p_csrow->edac_mode = EDAC_S8ECD8ED;
+ dimm->edac_mode = EDAC_S8ECD8ED;
+ dimm->nr_pages = csrow_megs << 8;
+ }
empty = 0;
}
@@ -1317,7 +1327,7 @@ static void i5000_enable_error_reporting(struct mem_ctl_info *mci)
}
/*
- * i5000_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels)
+ * i5000_get_dimm_and_channel_counts(pdev, &nr_csrows, &num_channels)
*
* ask the device how many channels are present and how many CSROWS
* as well
@@ -1332,7 +1342,7 @@ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
* supported on this memory controller
*/
pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
- *num_dimms_per_channel = (int)value *2;
+ *num_dimms_per_channel = (int)value;
pci_read_config_byte(pdev, MAXCH, &value);
*num_channels = (int)value;
@@ -1348,10 +1358,10 @@ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[3];
struct i5000_pvt *pvt;
int num_channels;
int num_dimms_per_channel;
- int num_csrows;
debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
__FILE__, __func__,
@@ -1377,14 +1387,22 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
*/
i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
&num_channels);
- num_csrows = num_dimms_per_channel * 2;
- debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
- __func__, num_channels, num_dimms_per_channel, num_csrows);
+ debugf0("MC: %s(): Number of Branches=2 Channels= %d DIMMS= %d\n",
+ __func__, num_channels, num_dimms_per_channel);
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
+ layers[0].type = EDAC_MC_LAYER_BRANCH;
+ layers[0].size = MAX_BRANCHES;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = num_channels / MAX_BRANCHES;
+ layers[1].is_virt_csrow = false;
+ layers[2].type = EDAC_MC_LAYER_SLOT;
+ layers[2].size = num_dimms_per_channel;
+ layers[2].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index d500749464ea..e9e7c2a29dc3 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -14,6 +14,11 @@
* rows for each respective channel are laid out one after another,
* the first half belonging to channel 0, the second half belonging
* to channel 1.
+ *
+ * This driver is for DDR2 DIMMs, and it uses chip select to select among the
+ * several ranks. However, instead of showing memories as ranks, it outputs
+ * them as DIMM's. An internal table creates the association between ranks
+ * and DIMM's.
*/
#include <linux/module.h>
#include <linux/init.h>
@@ -410,14 +415,6 @@ static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
return csrow / priv->ranksperchan;
}
-static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
- int chan, int rank)
-{
- const struct i5100_priv *priv = mci->pvt_info;
-
- return chan * priv->ranksperchan + rank;
-}
-
static void i5100_handle_ce(struct mem_ctl_info *mci,
int chan,
unsigned bank,
@@ -427,17 +424,17 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, chan, rank);
+ char detail[80];
- printk(KERN_ERR
- "CE chan %d, bank %u, rank %u, syndrome 0x%lx, "
- "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- chan, bank, rank, syndrome, cas, ras,
- csrow, mci->csrows[csrow].channels[0].label, msg);
+ /* Form out message */
+ snprintf(detail, sizeof(detail),
+ "bank %u, cas %u, ras %u\n",
+ bank, cas, ras);
- mci->ce_count++;
- mci->csrows[csrow].ce_count++;
- mci->csrows[csrow].channels[0].ce_count++;
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, syndrome,
+ chan, rank, -1,
+ msg, detail, NULL);
}
static void i5100_handle_ue(struct mem_ctl_info *mci,
@@ -449,16 +446,17 @@ static void i5100_handle_ue(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, chan, rank);
+ char detail[80];
- printk(KERN_ERR
- "UE chan %d, bank %u, rank %u, syndrome 0x%lx, "
- "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- chan, bank, rank, syndrome, cas, ras,
- csrow, mci->csrows[csrow].channels[0].label, msg);
+ /* Form out message */
+ snprintf(detail, sizeof(detail),
+ "bank %u, cas %u, ras %u\n",
+ bank, cas, ras);
- mci->ue_count++;
- mci->csrows[csrow].ue_count++;
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, syndrome,
+ chan, rank, -1,
+ msg, detail, NULL);
}
static void i5100_read_log(struct mem_ctl_info *mci, int chan,
@@ -835,10 +833,10 @@ static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
{
int i;
- unsigned long total_pages = 0UL;
struct i5100_priv *priv = mci->pvt_info;
- for (i = 0; i < mci->nr_csrows; i++) {
+ for (i = 0; i < mci->tot_dimms; i++) {
+ struct dimm_info *dimm;
const unsigned long npages = i5100_npages(mci, i);
const unsigned chan = i5100_csrow_to_chan(mci, i);
const unsigned rank = i5100_csrow_to_rank(mci, i);
@@ -846,33 +844,23 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
if (!npages)
continue;
- /*
- * FIXME: these two are totally bogus -- I don't see how to
- * map them correctly to this structure...
- */
- mci->csrows[i].first_page = total_pages;
- mci->csrows[i].last_page = total_pages + npages - 1;
- mci->csrows[i].page_mask = 0UL;
-
- mci->csrows[i].nr_pages = npages;
- mci->csrows[i].grain = 32;
- mci->csrows[i].csrow_idx = i;
- mci->csrows[i].dtype =
- (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8;
- mci->csrows[i].ue_count = 0;
- mci->csrows[i].ce_count = 0;
- mci->csrows[i].mtype = MEM_RDDR2;
- mci->csrows[i].edac_mode = EDAC_SECDED;
- mci->csrows[i].mci = mci;
- mci->csrows[i].nr_channels = 1;
- mci->csrows[i].channels[0].chan_idx = 0;
- mci->csrows[i].channels[0].ce_count = 0;
- mci->csrows[i].channels[0].csrow = mci->csrows + i;
- snprintf(mci->csrows[i].channels[0].label,
- sizeof(mci->csrows[i].channels[0].label),
- "DIMM%u", i5100_rank_to_slot(mci, chan, rank));
-
- total_pages += npages;
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ chan, rank, 0);
+
+ dimm->nr_pages = npages;
+ if (npages) {
+ dimm->grain = 32;
+ dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
+ DEV_X4 : DEV_X8;
+ dimm->mtype = MEM_RDDR2;
+ dimm->edac_mode = EDAC_SECDED;
+ snprintf(dimm->label, sizeof(dimm->label),
+ "DIMM%u",
+ i5100_rank_to_slot(mci, chan, rank));
+ }
+
+ debugf2("dimm channel %d, rank %d, size %ld\n",
+ chan, rank, (long)PAGES_TO_MiB(npages));
}
}
@@ -881,6 +869,7 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
{
int rc;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct i5100_priv *priv;
struct pci_dev *ch0mm, *ch1mm;
int ret = 0;
@@ -941,7 +930,14 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
goto bail_ch1;
}
- mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0);
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = 2;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = ranksperch;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(*priv));
if (!mci) {
ret = -ENOMEM;
goto bail_disable_ch1;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 1869a1018fb5..6640c29e1885 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -18,6 +18,10 @@
* Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet
* http://developer.intel.com/design/chipsets/datashts/313070.htm
*
+ * This Memory Controller manages DDR2 FB-DIMMs. It has 2 branches, each with
+ * 2 channels operating in lockstep no-mirror mode. Each channel can have up to
+ * 4 dimm's, each with up to 8GB.
+ *
*/
#include <linux/module.h>
@@ -44,12 +48,10 @@
edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg)
/* Limits for i5400 */
-#define NUM_MTRS_PER_BRANCH 4
+#define MAX_BRANCHES 2
#define CHANNELS_PER_BRANCH 2
-#define MAX_DIMMS_PER_CHANNEL NUM_MTRS_PER_BRANCH
-#define MAX_CHANNELS 4
-/* max possible csrows per channel */
-#define MAX_CSROWS (MAX_DIMMS_PER_CHANNEL)
+#define DIMMS_PER_CHANNEL 4
+#define MAX_CHANNELS (MAX_BRANCHES * CHANNELS_PER_BRANCH)
/* Device 16,
* Function 0: System Address
@@ -347,16 +349,16 @@ struct i5400_pvt {
u16 mir0, mir1;
- u16 b0_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */
+ u16 b0_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */
u16 b0_ambpresent0; /* Branch 0, Channel 0 */
u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
- u16 b1_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */
+ u16 b1_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */
u16 b1_ambpresent0; /* Branch 1, Channel 8 */
u16 b1_ambpresent1; /* Branch 1, Channel 1 */
/* DIMM information matrix, allocating architecture maximums */
- struct i5400_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
+ struct i5400_dimm_info dimm_info[DIMMS_PER_CHANNEL][MAX_CHANNELS];
/* Actual values for this controller */
int maxch; /* Max channels */
@@ -532,13 +534,15 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
int ras, cas;
int errnum;
char *type = NULL;
+ enum hw_event_mc_err_type tp_event = HW_EVENT_ERR_UNCORRECTED;
if (!allErrors)
return; /* if no error, return now */
- if (allErrors & ERROR_FAT_MASK)
+ if (allErrors & ERROR_FAT_MASK) {
type = "FATAL";
- else if (allErrors & FERR_NF_UNCORRECTABLE)
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else if (allErrors & FERR_NF_UNCORRECTABLE)
type = "NON-FATAL uncorrected";
else
type = "NON-FATAL recoverable";
@@ -556,7 +560,7 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
ras = nrec_ras(info);
cas = nrec_cas(info);
- debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
+ debugf0("\t\tDIMM= %d Channels= %d,%d (Branch= %d "
"DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
rank, channel, channel + 1, branch >> 1, bank,
buf_id, rdwr_str(rdwr), ras, cas);
@@ -566,13 +570,13 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s "
- "RAS=%d CAS=%d %s Err=0x%lx (%s))",
- type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas,
- type, allErrors, error_name[errnum]);
+ "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)",
+ bank, buf_id, ras, cas, allErrors, error_name[errnum]);
- /* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+ edac_mc_handle_error(tp_event, mci, 0, 0, 0,
+ branch >> 1, -1, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
}
/*
@@ -630,7 +634,7 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Only 1 bit will be on */
errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
- debugf0("\t\tCSROW= %d Channel= %d (Branch %d "
+ debugf0("\t\tDIMM= %d Channel= %d (Branch %d "
"DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
rank, channel, branch >> 1, bank,
rdwr_str(rdwr), ras, cas);
@@ -642,8 +646,10 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
branch >> 1, bank, rdwr_str(rdwr), ras, cas,
allErrors, error_name[errnum]);
- /* Call the helper to output message */
- edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ branch >> 1, channel % 2, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
return;
}
@@ -831,8 +837,8 @@ static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
/*
* determine_amb_present
*
- * the information is contained in NUM_MTRS_PER_BRANCH different
- * registers determining which of the NUM_MTRS_PER_BRANCH requires
+ * the information is contained in DIMMS_PER_CHANNEL different
+ * registers determining which of the DIMMS_PER_CHANNEL requires
* knowing which channel is in question
*
* 2 branches, each with 2 channels
@@ -861,11 +867,11 @@ static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel)
}
/*
- * determine_mtr(pvt, csrow, channel)
+ * determine_mtr(pvt, dimm, channel)
*
- * return the proper MTR register as determine by the csrow and desired channel
+ * return the proper MTR register as determine by the dimm and desired channel
*/
-static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
+static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel)
{
int mtr;
int n;
@@ -873,11 +879,11 @@ static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
/* There is one MTR for each slot pair of FB-DIMMs,
Each slot pair may be at branch 0 or branch 1.
*/
- n = csrow;
+ n = dimm;
- if (n >= NUM_MTRS_PER_BRANCH) {
- debugf0("ERROR: trying to access an invalid csrow: %d\n",
- csrow);
+ if (n >= DIMMS_PER_CHANNEL) {
+ debugf0("ERROR: trying to access an invalid dimm: %d\n",
+ dimm);
return 0;
}
@@ -913,19 +919,19 @@ static void decode_mtr(int slot_row, u16 mtr)
debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
}
-static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
+static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel,
struct i5400_dimm_info *dinfo)
{
int mtr;
int amb_present_reg;
int addrBits;
- mtr = determine_mtr(pvt, csrow, channel);
+ mtr = determine_mtr(pvt, dimm, channel);
if (MTR_DIMMS_PRESENT(mtr)) {
amb_present_reg = determine_amb_present_reg(pvt, channel);
/* Determine if there is a DIMM present in this DIMM slot */
- if (amb_present_reg & (1 << csrow)) {
+ if (amb_present_reg & (1 << dimm)) {
/* Start with the number of bits for a Bank
* on the DRAM */
addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
@@ -954,10 +960,10 @@ static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
static void calculate_dimm_size(struct i5400_pvt *pvt)
{
struct i5400_dimm_info *dinfo;
- int csrow, max_csrows;
+ int dimm, max_dimms;
char *p, *mem_buffer;
int space, n;
- int channel;
+ int channel, branch;
/* ================= Generate some debug output ================= */
space = PAGE_SIZE;
@@ -968,32 +974,32 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
return;
}
- /* Scan all the actual CSROWS
+ /* Scan all the actual DIMMS
* and calculate the information for each DIMM
- * Start with the highest csrow first, to display it first
- * and work toward the 0th csrow
+ * Start with the highest dimm first, to display it first
+ * and work toward the 0th dimm
*/
- max_csrows = pvt->maxdimmperch;
- for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+ max_dimms = pvt->maxdimmperch;
+ for (dimm = max_dimms - 1; dimm >= 0; dimm--) {
- /* on an odd csrow, first output a 'boundary' marker,
+ /* on an odd dimm, first output a 'boundary' marker,
* then reset the message buffer */
- if (csrow & 0x1) {
+ if (dimm & 0x1) {
n = snprintf(p, space, "---------------------------"
- "--------------------------------");
+ "-------------------------------");
p += n;
space -= n;
debugf2("%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
- n = snprintf(p, space, "csrow %2d ", csrow);
+ n = snprintf(p, space, "dimm %2d ", dimm);
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
- dinfo = &pvt->dimm_info[csrow][channel];
- handle_channel(pvt, csrow, channel, dinfo);
+ dinfo = &pvt->dimm_info[dimm][channel];
+ handle_channel(pvt, dimm, channel, dinfo);
n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
p += n;
space -= n;
@@ -1005,7 +1011,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
/* Output the last bottom 'boundary' marker */
n = snprintf(p, space, "---------------------------"
- "--------------------------------");
+ "-------------------------------");
p += n;
space -= n;
debugf2("%s\n", mem_buffer);
@@ -1013,7 +1019,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
space = PAGE_SIZE;
/* now output the 'channel' labels */
- n = snprintf(p, space, " ");
+ n = snprintf(p, space, " ");
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
@@ -1022,6 +1028,19 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
space -= n;
}
+ space -= n;
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
+
+ n = snprintf(p, space, " ");
+ p += n;
+ for (branch = 0; branch < MAX_BRANCHES; branch++) {
+ n = snprintf(p, space, " branch %d | ", branch);
+ p += n;
+ space -= n;
+ }
+
/* output the last message and free buffer */
debugf2("%s\n", mem_buffer);
kfree(mem_buffer);
@@ -1080,7 +1099,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
/* Get the set of MTR[0-3] regs by each branch */
- for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) {
+ for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) {
int where = MTR0 + (slot_row * sizeof(u16));
/* Branch 0 set of MTR registers */
@@ -1105,7 +1124,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
/* Read and dump branch 0's MTRs */
debugf2("\nMemory Technology Registers:\n");
debugf2(" Branch 0:\n");
- for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
+ for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
@@ -1122,7 +1141,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
} else {
/* Read and dump branch 1's MTRs */
debugf2(" Branch 1:\n");
- for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
+ for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
@@ -1141,7 +1160,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
}
/*
- * i5400_init_csrows Initialize the 'csrows' table within
+ * i5400_init_dimms Initialize the 'dimms' table within
* the mci control structure with the
* addressing of memory.
*
@@ -1149,64 +1168,68 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
* 0 success
* 1 no actual memory found on this MC
*/
-static int i5400_init_csrows(struct mem_ctl_info *mci)
+static int i5400_init_dimms(struct mem_ctl_info *mci)
{
struct i5400_pvt *pvt;
- struct csrow_info *p_csrow;
- int empty, channel_count;
- int max_csrows;
+ struct dimm_info *dimm;
+ int ndimms, channel_count;
+ int max_dimms;
int mtr;
- int csrow_megs;
- int channel;
- int csrow;
+ int size_mb;
+ int channel, slot;
pvt = mci->pvt_info;
channel_count = pvt->maxch;
- max_csrows = pvt->maxdimmperch;
+ max_dimms = pvt->maxdimmperch;
- empty = 1; /* Assume NO memory */
+ ndimms = 0;
- for (csrow = 0; csrow < max_csrows; csrow++) {
- p_csrow = &mci->csrows[csrow];
-
- p_csrow->csrow_idx = csrow;
-
- /* use branch 0 for the basis */
- mtr = determine_mtr(pvt, csrow, 0);
-
- /* if no DIMMS on this row, continue */
- if (!MTR_DIMMS_PRESENT(mtr))
- continue;
-
- /* FAKE OUT VALUES, FIXME */
- p_csrow->first_page = 0 + csrow * 20;
- p_csrow->last_page = 9 + csrow * 20;
- p_csrow->page_mask = 0xFFF;
-
- p_csrow->grain = 8;
-
- csrow_megs = 0;
- for (channel = 0; channel < pvt->maxch; channel++)
- csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
-
- p_csrow->nr_pages = csrow_megs << 8;
-
- /* Assume DDR2 for now */
- p_csrow->mtype = MEM_FB_DDR2;
-
- /* ask what device type on this row */
- if (MTR_DRAM_WIDTH(mtr))
- p_csrow->dtype = DEV_X8;
- else
- p_csrow->dtype = DEV_X4;
-
- p_csrow->edac_mode = EDAC_S8ECD8ED;
-
- empty = 0;
+ /*
+ * FIXME: remove pvt->dimm_info[slot][channel] and use the 3
+ * layers here.
+ */
+ for (channel = 0; channel < mci->layers[0].size * mci->layers[1].size;
+ channel++) {
+ for (slot = 0; slot < mci->layers[2].size; slot++) {
+ mtr = determine_mtr(pvt, slot, channel);
+
+ /* if no DIMMS on this slot, continue */
+ if (!MTR_DIMMS_PRESENT(mtr))
+ continue;
+
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ channel / 2, channel % 2, slot);
+
+ size_mb = pvt->dimm_info[slot][channel].megabytes;
+
+ debugf2("%s: dimm%zd (branch %d channel %d slot %d): %d.%03d GB\n",
+ __func__, dimm - mci->dimms,
+ channel / 2, channel % 2, slot,
+ size_mb / 1000, size_mb % 1000);
+
+ dimm->nr_pages = size_mb << 8;
+ dimm->grain = 8;
+ dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
+ dimm->mtype = MEM_FB_DDR2;
+ /*
+ * The eccc mechanism is SDDC (aka SECC), with
+ * is similar to Chipkill.
+ */
+ dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
+ EDAC_S8ECD8ED : EDAC_S4ECD4ED;
+ ndimms++;
+ }
}
- return empty;
+ /*
+ * When just one memory is provided, it should be at location (0,0,0).
+ * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+.
+ */
+ if (ndimms == 1)
+ mci->dimms[0].edac_mode = EDAC_SECDED;
+
+ return (ndimms == 0);
}
/*
@@ -1242,9 +1265,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
struct i5400_pvt *pvt;
- int num_channels;
- int num_dimms_per_channel;
- int num_csrows;
+ struct edac_mc_layer layers[3];
if (dev_idx >= ARRAY_SIZE(i5400_devs))
return -EINVAL;
@@ -1258,23 +1279,21 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
if (PCI_FUNC(pdev->devfn) != 0)
return -ENODEV;
- /* As we don't have a motherboard identification routine to determine
- * actual number of slots/dimms per channel, we thus utilize the
- * resource as specified by the chipset. Thus, we might have
- * have more DIMMs per channel than actually on the mobo, but this
- * allows the driver to support up to the chipset max, without
- * some fancy mobo determination.
+ /*
+ * allocate a new MC control structure
+ *
+ * This drivers uses the DIMM slot as "csrow" and the rest as "channel".
*/
- num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL;
- num_channels = MAX_CHANNELS;
- num_csrows = num_dimms_per_channel;
-
- debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
- __func__, num_channels, num_dimms_per_channel, num_csrows);
-
- /* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
-
+ layers[0].type = EDAC_MC_LAYER_BRANCH;
+ layers[0].size = MAX_BRANCHES;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = CHANNELS_PER_BRANCH;
+ layers[1].is_virt_csrow = false;
+ layers[2].type = EDAC_MC_LAYER_SLOT;
+ layers[2].size = DIMMS_PER_CHANNEL;
+ layers[2].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
@@ -1284,8 +1303,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
pvt = mci->pvt_info;
pvt->system_address = pdev; /* Record this device in our private */
- pvt->maxch = num_channels;
- pvt->maxdimmperch = num_dimms_per_channel;
+ pvt->maxch = MAX_CHANNELS;
+ pvt->maxdimmperch = DIMMS_PER_CHANNEL;
/* 'get' the pci devices we want to reserve for our use */
if (i5400_get_devices(mci, dev_idx))
@@ -1307,13 +1326,13 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
/* Set the function pointer to an actual operation function */
mci->edac_check = i5400_check_error;
- /* initialize the MC control structure 'csrows' table
+ /* initialize the MC control structure 'dimms' table
* with the mapping and control information */
- if (i5400_init_csrows(mci)) {
+ if (i5400_init_dimms(mci)) {
debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
- " because i5400_init_csrows() returned nonzero "
+ " because i5400_init_dimms() returned nonzero "
"value\n");
- mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
+ mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */
} else {
debugf1("MC: Enable error reporting now\n");
i5400_enable_error_reporting(mci);
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 3bafa3bca148..97c22fd650ee 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -464,17 +464,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
FERR_FAT_FBD, error_reg);
snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
- "FATAL (Branch=%d DRAM-Bank=%d %s "
- "RAS=%d CAS=%d Err=0x%lx (%s))",
- branch, bank,
- is_wr ? "RDWR" : "RD",
- ras, cas,
- errors, specific);
-
- /* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, branch << 1,
- (branch << 1) + 1,
- pvt->tmp_prt_buffer);
+ "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
+ bank, ras, cas, errors, specific);
+
+ edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
+ branch, -1, rank,
+ is_wr ? "Write error" : "Read error",
+ pvt->tmp_prt_buffer, NULL);
+
}
/* read in the 1st NON-FATAL error register */
@@ -513,23 +510,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
/* Form out message */
snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
- "Corrected error (Branch=%d, Channel %d), "
- " DRAM-Bank=%d %s "
- "RAS=%d CAS=%d, CE Err=0x%lx, Syndrome=0x%08x(%s))",
- branch, channel,
- bank,
- is_wr ? "RDWR" : "RD",
- ras, cas,
- errors, syndrome, specific);
-
- /*
- * Call the helper to output message
- * NOTE: Errors are reported per-branch, and not per-channel
- * Currently, we don't know how to identify the right
- * channel.
- */
- edac_mc_handle_fbd_ce(mci, rank, channel,
- pvt->tmp_prt_buffer);
+ "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
+ bank, ras, cas, errors, specific);
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0,
+ syndrome,
+ branch >> 1, channel % 2, rank,
+ is_wr ? "Write error" : "Read error",
+ pvt->tmp_prt_buffer, NULL);
}
return;
}
@@ -617,8 +605,7 @@ static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
static int decode_mtr(struct i7300_pvt *pvt,
int slot, int ch, int branch,
struct i7300_dimm_info *dinfo,
- struct csrow_info *p_csrow,
- u32 *nr_pages)
+ struct dimm_info *dimm)
{
int mtr, ans, addrBits, channel;
@@ -650,7 +637,6 @@ static int decode_mtr(struct i7300_pvt *pvt,
addrBits -= 3; /* 8 bits per bytes */
dinfo->megabytes = 1 << addrBits;
- *nr_pages = dinfo->megabytes << 8;
debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
@@ -663,11 +649,6 @@ static int decode_mtr(struct i7300_pvt *pvt,
debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes);
- p_csrow->grain = 8;
- p_csrow->mtype = MEM_FB_DDR2;
- p_csrow->csrow_idx = slot;
- p_csrow->page_mask = 0;
-
/*
* The type of error detection actually depends of the
* mode of operation. When it is just one single memory chip, at
@@ -677,15 +658,18 @@ static int decode_mtr(struct i7300_pvt *pvt,
* See datasheet Sections 7.3.6 to 7.3.8
*/
+ dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
+ dimm->grain = 8;
+ dimm->mtype = MEM_FB_DDR2;
if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
- p_csrow->edac_mode = EDAC_SECDED;
+ dimm->edac_mode = EDAC_SECDED;
debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
} else {
debugf2("\t\tECC code is on Lockstep mode\n");
if (MTR_DRAM_WIDTH(mtr) == 8)
- p_csrow->edac_mode = EDAC_S8ECD8ED;
+ dimm->edac_mode = EDAC_S8ECD8ED;
else
- p_csrow->edac_mode = EDAC_S4ECD4ED;
+ dimm->edac_mode = EDAC_S4ECD4ED;
}
/* ask what device type on this row */
@@ -694,9 +678,9 @@ static int decode_mtr(struct i7300_pvt *pvt,
IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
"enhanced" : "normal");
- p_csrow->dtype = DEV_X8;
+ dimm->dtype = DEV_X8;
} else
- p_csrow->dtype = DEV_X4;
+ dimm->dtype = DEV_X4;
return mtr;
}
@@ -774,11 +758,10 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
{
struct i7300_pvt *pvt;
struct i7300_dimm_info *dinfo;
- struct csrow_info *p_csrow;
int rc = -ENODEV;
int mtr;
int ch, branch, slot, channel;
- u32 last_page = 0, nr_pages;
+ struct dimm_info *dimm;
pvt = mci->pvt_info;
@@ -809,25 +792,23 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
where,
&pvt->mtr[slot][branch]);
- for (ch = 0; ch < MAX_BRANCHES; ch++) {
+ for (ch = 0; ch < MAX_CH_PER_BRANCH; ch++) {
int channel = to_channel(ch, branch);
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+ mci->n_layers, branch, ch, slot);
+
dinfo = &pvt->dimm_info[slot][channel];
- p_csrow = &mci->csrows[slot];
mtr = decode_mtr(pvt, slot, ch, branch,
- dinfo, p_csrow, &nr_pages);
+ dinfo, dimm);
+
/* if no DIMMS on this row, continue */
if (!MTR_DIMMS_PRESENT(mtr))
continue;
- /* Update per_csrow memory count */
- p_csrow->nr_pages += nr_pages;
- p_csrow->first_page = last_page;
- last_page += nr_pages;
- p_csrow->last_page = last_page;
-
rc = 0;
+
}
}
}
@@ -1042,10 +1023,8 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[3];
struct i7300_pvt *pvt;
- int num_channels;
- int num_dimms_per_channel;
- int num_csrows;
int rc;
/* wake up device */
@@ -1062,23 +1041,17 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
if (PCI_FUNC(pdev->devfn) != 0)
return -ENODEV;
- /* As we don't have a motherboard identification routine to determine
- * actual number of slots/dimms per channel, we thus utilize the
- * resource as specified by the chipset. Thus, we might have
- * have more DIMMs per channel than actually on the mobo, but this
- * allows the driver to support up to the chipset max, without
- * some fancy mobo determination.
- */
- num_dimms_per_channel = MAX_SLOTS;
- num_channels = MAX_CHANNELS;
- num_csrows = MAX_SLOTS * MAX_CHANNELS;
-
- debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
- __func__, num_channels, num_dimms_per_channel, num_csrows);
-
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
-
+ layers[0].type = EDAC_MC_LAYER_BRANCH;
+ layers[0].size = MAX_BRANCHES;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = MAX_CH_PER_BRANCH;
+ layers[1].is_virt_csrow = true;
+ layers[2].type = EDAC_MC_LAYER_SLOT;
+ layers[2].size = MAX_SLOTS;
+ layers[2].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 7f1dfcc4e597..d27778f65a5d 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -221,7 +221,9 @@ struct i7core_inject {
};
struct i7core_channel {
- u32 ranks;
+ bool is_3dimms_present;
+ bool is_single_4rank;
+ bool has_4rank;
u32 dimms;
};
@@ -257,7 +259,6 @@ struct i7core_pvt {
struct i7core_channel channel[NUM_CHANS];
int ce_count_available;
- int csrow_map[NUM_CHANS][MAX_DIMMS];
/* ECC corrected errors counts per udimm */
unsigned long udimm_ce_count[MAX_DIMMS];
@@ -492,116 +493,15 @@ static void free_i7core_dev(struct i7core_dev *i7core_dev)
/****************************************************************************
Memory check routines
****************************************************************************/
-static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
- unsigned func)
-{
- struct i7core_dev *i7core_dev = get_i7core_dev(socket);
- int i;
-
- if (!i7core_dev)
- return NULL;
-
- for (i = 0; i < i7core_dev->n_devs; i++) {
- if (!i7core_dev->pdev[i])
- continue;
-
- if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
- PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
- return i7core_dev->pdev[i];
- }
- }
-
- return NULL;
-}
-
-/**
- * i7core_get_active_channels() - gets the number of channels and csrows
- * @socket: Quick Path Interconnect socket
- * @channels: Number of channels that will be returned
- * @csrows: Number of csrows found
- *
- * Since EDAC core needs to know in advance the number of available channels
- * and csrows, in order to allocate memory for csrows/channels, it is needed
- * to run two similar steps. At the first step, implemented on this function,
- * it checks the number of csrows/channels present at one socket.
- * this is used in order to properly allocate the size of mci components.
- *
- * It should be noticed that none of the current available datasheets explain
- * or even mention how csrows are seen by the memory controller. So, we need
- * to add a fake description for csrows.
- * So, this driver is attributing one DIMM memory for one csrow.
- */
-static int i7core_get_active_channels(const u8 socket, unsigned *channels,
- unsigned *csrows)
-{
- struct pci_dev *pdev = NULL;
- int i, j;
- u32 status, control;
-
- *channels = 0;
- *csrows = 0;
-
- pdev = get_pdev_slot_func(socket, 3, 0);
- if (!pdev) {
- i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
- socket);
- return -ENODEV;
- }
-
- /* Device 3 function 0 reads */
- pci_read_config_dword(pdev, MC_STATUS, &status);
- pci_read_config_dword(pdev, MC_CONTROL, &control);
-
- for (i = 0; i < NUM_CHANS; i++) {
- u32 dimm_dod[3];
- /* Check if the channel is active */
- if (!(control & (1 << (8 + i))))
- continue;
-
- /* Check if the channel is disabled */
- if (status & (1 << i))
- continue;
-
- pdev = get_pdev_slot_func(socket, i + 4, 1);
- if (!pdev) {
- i7core_printk(KERN_ERR, "Couldn't find socket %d "
- "fn %d.%d!!!\n",
- socket, i + 4, 1);
- return -ENODEV;
- }
- /* Devices 4-6 function 1 */
- pci_read_config_dword(pdev,
- MC_DOD_CH_DIMM0, &dimm_dod[0]);
- pci_read_config_dword(pdev,
- MC_DOD_CH_DIMM1, &dimm_dod[1]);
- pci_read_config_dword(pdev,
- MC_DOD_CH_DIMM2, &dimm_dod[2]);
- (*channels)++;
-
- for (j = 0; j < 3; j++) {
- if (!DIMM_PRESENT(dimm_dod[j]))
- continue;
- (*csrows)++;
- }
- }
-
- debugf0("Number of active channels on socket %d: %d\n",
- socket, *channels);
-
- return 0;
-}
-
-static int get_dimm_config(const struct mem_ctl_info *mci)
+static int get_dimm_config(struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
- struct csrow_info *csr;
struct pci_dev *pdev;
int i, j;
- int csrow = 0;
- unsigned long last_page = 0;
enum edac_type mode;
enum mem_type mtype;
+ struct dimm_info *dimm;
/* Get data from the MC register, function 0 */
pdev = pvt->pci_mcr[0];
@@ -657,21 +557,20 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
pci_read_config_dword(pvt->pci_ch[i][0],
MC_CHANNEL_DIMM_INIT_PARAMS, &data);
- pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
- 4 : 2;
+
+ if (data & THREE_DIMMS_PRESENT)
+ pvt->channel[i].is_3dimms_present = true;
+
+ if (data & SINGLE_QUAD_RANK_PRESENT)
+ pvt->channel[i].is_single_4rank = true;
+
+ if (data & QUAD_RANK_PRESENT)
+ pvt->channel[i].has_4rank = true;
if (data & REGISTERED_DIMM)
mtype = MEM_RDDR3;
else
mtype = MEM_DDR3;
-#if 0
- if (data & THREE_DIMMS_PRESENT)
- pvt->channel[i].dimms = 3;
- else if (data & SINGLE_QUAD_RANK_PRESENT)
- pvt->channel[i].dimms = 1;
- else
- pvt->channel[i].dimms = 2;
-#endif
/* Devices 4-6 function 1 */
pci_read_config_dword(pvt->pci_ch[i][1],
@@ -682,11 +581,13 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
MC_DOD_CH_DIMM2, &dimm_dod[2]);
debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
- "%d ranks, %cDIMMs\n",
+ "%s%s%s%cDIMMs\n",
i,
RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
data,
- pvt->channel[i].ranks,
+ pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
+ pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
+ pvt->channel[i].has_4rank ? "HAS_4R " : "",
(data & REGISTERED_DIMM) ? 'R' : 'U');
for (j = 0; j < 3; j++) {
@@ -696,6 +597,8 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
if (!DIMM_PRESENT(dimm_dod[j]))
continue;
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ i, j, 0);
banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
@@ -704,8 +607,6 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
/* DDR3 has 8 I/O banks */
size = (rows * cols * banks * ranks) >> (20 - 3);
- pvt->channel[i].dimms++;
-
debugf0("\tdimm %d %d Mb offset: %x, "
"bank: %d, rank: %d, row: %#x, col: %#x\n",
j, size,
@@ -714,44 +615,28 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
npages = MiB_TO_PAGES(size);
- csr = &mci->csrows[csrow];
- csr->first_page = last_page + 1;
- last_page += npages;
- csr->last_page = last_page;
- csr->nr_pages = npages;
-
- csr->page_mask = 0;
- csr->grain = 8;
- csr->csrow_idx = csrow;
- csr->nr_channels = 1;
-
- csr->channels[0].chan_idx = i;
- csr->channels[0].ce_count = 0;
-
- pvt->csrow_map[i][j] = csrow;
+ dimm->nr_pages = npages;
switch (banks) {
case 4:
- csr->dtype = DEV_X4;
+ dimm->dtype = DEV_X4;
break;
case 8:
- csr->dtype = DEV_X8;
+ dimm->dtype = DEV_X8;
break;
case 16:
- csr->dtype = DEV_X16;
+ dimm->dtype = DEV_X16;
break;
default:
- csr->dtype = DEV_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
}
- csr->edac_mode = mode;
- csr->mtype = mtype;
- snprintf(csr->channels[0].label,
- sizeof(csr->channels[0].label),
- "CPU#%uChannel#%u_DIMM#%u",
- pvt->i7core_dev->socket, i, j);
-
- csrow++;
+ snprintf(dimm->label, sizeof(dimm->label),
+ "CPU#%uChannel#%u_DIMM#%u",
+ pvt->i7core_dev->socket, i, j);
+ dimm->grain = 8;
+ dimm->edac_mode = mode;
+ dimm->mtype = mtype;
}
pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
@@ -1567,22 +1452,16 @@ error:
/****************************************************************************
Error check routines
****************************************************************************/
-static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
+static void i7core_rdimm_update_errcount(struct mem_ctl_info *mci,
const int chan,
const int dimm,
const int add)
{
- char *msg;
- struct i7core_pvt *pvt = mci->pvt_info;
- int row = pvt->csrow_map[chan][dimm], i;
+ int i;
for (i = 0; i < add; i++) {
- msg = kasprintf(GFP_KERNEL, "Corrected error "
- "(Socket=%d channel=%d dimm=%d)",
- pvt->i7core_dev->socket, chan, dimm);
-
- edac_mc_handle_fbd_ce(mci, row, 0, msg);
- kfree (msg);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ chan, dimm, -1, "error", "", NULL);
}
}
@@ -1623,11 +1502,11 @@ static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
/*updated the edac core */
if (add0 != 0)
- i7core_rdimm_update_csrow(mci, chan, 0, add0);
+ i7core_rdimm_update_errcount(mci, chan, 0, add0);
if (add1 != 0)
- i7core_rdimm_update_csrow(mci, chan, 1, add1);
+ i7core_rdimm_update_errcount(mci, chan, 1, add1);
if (add2 != 0)
- i7core_rdimm_update_csrow(mci, chan, 2, add2);
+ i7core_rdimm_update_errcount(mci, chan, 2, add2);
}
@@ -1747,20 +1626,30 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
const struct mce *m)
{
struct i7core_pvt *pvt = mci->pvt_info;
- char *type, *optype, *err, *msg;
+ char *type, *optype, *err, msg[80];
+ enum hw_event_mc_err_type tp_event;
unsigned long error = m->status & 0x1ff0000l;
+ bool uncorrected_error = m->mcgstatus & 1ll << 61;
+ bool ripv = m->mcgstatus & 1;
u32 optypenum = (m->status >> 4) & 0x07;
u32 core_err_cnt = (m->status >> 38) & 0x7fff;
u32 dimm = (m->misc >> 16) & 0x3;
u32 channel = (m->misc >> 18) & 0x3;
u32 syndrome = m->misc >> 32;
u32 errnum = find_first_bit(&error, 32);
- int csrow;
- if (m->mcgstatus & 1)
- type = "FATAL";
- else
- type = "NON_FATAL";
+ if (uncorrected_error) {
+ if (ripv) {
+ type = "FATAL";
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else {
+ type = "NON_FATAL";
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
+ }
+ } else {
+ type = "CORRECTED";
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
switch (optypenum) {
case 0:
@@ -1815,27 +1704,20 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
err = "unknown";
}
- /* FIXME: should convert addr into bank and rank information */
- msg = kasprintf(GFP_ATOMIC,
- "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
- "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
- type, (long long) m->addr, m->cpu, dimm, channel,
- syndrome, core_err_cnt, (long long)m->status,
- (long long)m->misc, optype, err);
-
- debugf0("%s", msg);
-
- csrow = pvt->csrow_map[channel][dimm];
+ snprintf(msg, sizeof(msg), "count=%d %s", core_err_cnt, optype);
- /* Call the helper to output message */
- if (m->mcgstatus & 1)
- edac_mc_handle_fbd_ue(mci, csrow, 0,
- 0 /* FIXME: should be channel here */, msg);
- else if (!pvt->is_registered)
- edac_mc_handle_fbd_ce(mci, csrow,
- 0 /* FIXME: should be channel here */, msg);
-
- kfree(msg);
+ /*
+ * Call the helper to output message
+ * FIXME: what to do if core_err_cnt > 1? Currently, it generates
+ * only one event
+ */
+ if (uncorrected_error || !pvt->is_registered)
+ edac_mc_handle_error(tp_event, mci,
+ m->addr >> PAGE_SHIFT,
+ m->addr & ~PAGE_MASK,
+ syndrome,
+ channel, dimm, -1,
+ err, msg, m);
}
/*
@@ -2252,15 +2134,19 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
{
struct mem_ctl_info *mci;
struct i7core_pvt *pvt;
- int rc, channels, csrows;
-
- /* Check the number of active and not disabled channels */
- rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
- if (unlikely(rc < 0))
- return rc;
+ int rc;
+ struct edac_mc_layer layers[2];
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
+
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = NUM_CHANS;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = MAX_DIMMS;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(i7core_dev->socket, ARRAY_SIZE(layers), layers,
+ sizeof(*pvt));
if (unlikely(!mci))
return -ENOMEM;
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 3bf2b2f490e7..52072c28a8a6 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -12,7 +12,7 @@
* 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>.
*
* Written with reference to 82443BX Host Bridge Datasheet:
- * http://download.intel.com/design/chipsets/datashts/29063301.pdf
+ * http://download.intel.com/design/chipsets/datashts/29063301.pdf
* references to this document given in [].
*
* This module doesn't support the 440LX, but it may be possible to
@@ -156,19 +156,19 @@ static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
error_found = 1;
if (handle_errors)
- edac_mc_handle_ce(mci, page, pageoffset,
- /* 440BX/GX don't make syndrome information
- * available */
- 0, edac_mc_find_csrow_by_page(mci, page), 0,
- mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, pageoffset, 0,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, -1, mci->ctl_name, "", NULL);
}
if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
error_found = 1;
if (handle_errors)
- edac_mc_handle_ue(mci, page, pageoffset,
- edac_mc_find_csrow_by_page(mci, page),
- mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, pageoffset, 0,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, -1, mci->ctl_name, "", NULL);
}
return error_found;
@@ -189,6 +189,7 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
enum mem_type mtype)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
int index;
u8 drbar, dramc;
u32 row_base, row_high_limit, row_high_limit_last;
@@ -197,6 +198,8 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
row_high_limit_last = 0;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
+
pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
mci->mc_idx, __FILE__, __func__, index, drbar);
@@ -217,14 +220,14 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
row_base = row_high_limit_last;
csrow->first_page = row_base >> PAGE_SHIFT;
csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
- csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+ dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
/* EAP reports in 4kilobyte granularity [61] */
- csrow->grain = 1 << 12;
- csrow->mtype = mtype;
+ dimm->grain = 1 << 12;
+ dimm->mtype = mtype;
/* I don't think 440BX can tell you device type? FIXME? */
- csrow->dtype = DEV_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
/* Mode is global to all rows on 440BX */
- csrow->edac_mode = edac_mode;
+ dimm->edac_mode = edac_mode;
row_high_limit_last = row_high_limit;
}
}
@@ -232,6 +235,7 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
u8 dramc;
u32 nbxcfg, ecc_mode;
enum mem_type mtype;
@@ -245,8 +249,13 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg))
return -EIO;
- mci = edac_mc_alloc(0, I82443BXGX_NR_CSROWS, I82443BXGX_NR_CHANS, 0);
-
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I82443BXGX_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = I82443BXGX_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index c779092d18d1..08045059d10b 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -99,6 +99,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
struct i82860_error_info *info,
int handle_errors)
{
+ struct dimm_info *dimm;
int row;
if (!(info->errsts2 & 0x0003))
@@ -108,18 +109,25 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & 0x0003) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
info->eap >>= PAGE_SHIFT;
row = edac_mc_find_csrow_by_page(mci, info->eap);
+ dimm = mci->csrows[row].channels[0].dimm;
if (info->errsts & 0x0002)
- edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ info->eap, 0, 0,
+ dimm->location[0], dimm->location[1], -1,
+ "i82860 UE", "", NULL);
else
- edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0,
- "i82860 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ info->eap, 0, info->derrsyn,
+ dimm->location[0], dimm->location[1], -1,
+ "i82860 CE", "", NULL);
return 1;
}
@@ -140,6 +148,7 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
u16 value;
u32 cumul_size;
struct csrow_info *csrow;
+ struct dimm_info *dimm;
int index;
pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
@@ -153,6 +162,8 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
*/
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
+
pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
cumul_size = (value & I82860_GBA_MASK) <<
(I82860_GBA_SHIFT - PAGE_SHIFT);
@@ -164,30 +175,38 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ dimm->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
- csrow->mtype = MEM_RMBS;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
+ dimm->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
+ dimm->mtype = MEM_RMBS;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
}
}
static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct i82860_error_info discard;
- /* RDRAM has channels but these don't map onto the abstractions that
- edac uses.
- The device groups from the GRA registers seem to map reasonably
- well onto the notion of a chip select row.
- There are 16 GRA registers and since the name is associated with
- the channel and the GRA registers map to physical devices so we are
- going to make 1 channel for group.
+ /*
+ * RDRAM has channels but these don't map onto the csrow abstraction.
+ * According with the datasheet, there are 2 Rambus channels, supporting
+ * up to 16 direct RDRAM devices.
+ * The device groups from the GRA registers seem to map reasonably
+ * well onto the notion of a chip select row.
+ * There are 16 GRA registers and since the name is associated with
+ * the channel and the GRA registers map to physical devices so we are
+ * going to make 1 channel for group.
*/
- mci = edac_mc_alloc(0, 16, 1, 0);
-
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = 2;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = 8;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 10f15d85fb5e..b613e31c16e5 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -38,7 +38,8 @@
#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
/* four csrows in dual channel, eight in single channel */
-#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
+#define I82875P_NR_DIMMS 8
+#define I82875P_NR_CSROWS(nr_chans) (I82875P_NR_DIMMS / (nr_chans))
/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
@@ -235,7 +236,9 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & 0x0081) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1,
+ "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
@@ -243,11 +246,15 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
row = edac_mc_find_csrow_by_page(mci, info->eap);
if (info->errsts & 0x0080)
- edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ info->eap, 0, 0,
+ row, -1, -1,
+ "i82875p UE", "", NULL);
else
- edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
- multi_chan ? (info->des & 0x1) : 0,
- "i82875p CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ info->eap, 0, info->derrsyn,
+ row, multi_chan ? (info->des & 0x1) : 0,
+ -1, "i82875p CE", "", NULL);
return 1;
}
@@ -342,11 +349,13 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
void __iomem * ovrfl_window, u32 drc)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
+ unsigned nr_chans = dual_channel_active(drc) + 1;
unsigned long last_cumul_size;
u8 value;
u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
- u32 cumul_size;
- int index;
+ u32 cumul_size, nr_pages;
+ int index, j;
drc_ddim = (drc >> 18) & 0x1;
last_cumul_size = 0;
@@ -369,12 +378,18 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
- csrow->mtype = MEM_DDR;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+
+ for (j = 0; j < nr_chans; j++) {
+ dimm = csrow->channels[j].dimm;
+
+ dimm->nr_pages = nr_pages / nr_chans;
+ dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
+ dimm->mtype = MEM_DDR;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+ }
}
}
@@ -382,6 +397,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc = -ENODEV;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct i82875p_pvt *pvt;
struct pci_dev *ovrfl_pdev;
void __iomem *ovrfl_window;
@@ -397,9 +413,14 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
return -ENODEV;
drc = readl(ovrfl_window + I82875P_DRC);
nr_chans = dual_channel_active(drc) + 1;
- mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
- nr_chans, 0);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I82875P_NR_CSROWS(nr_chans);
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = nr_chans;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (!mci) {
rc = -ENOMEM;
goto fail0;
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 0cd8368f88f8..433332c7cdba 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -29,7 +29,8 @@
#define PCI_DEVICE_ID_INTEL_82975_0 0x277c
#endif /* PCI_DEVICE_ID_INTEL_82975_0 */
-#define I82975X_NR_CSROWS(nr_chans) (8/(nr_chans))
+#define I82975X_NR_DIMMS 8
+#define I82975X_NR_CSROWS(nr_chans) (I82975X_NR_DIMMS / (nr_chans))
/* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */
#define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b)
@@ -287,7 +288,8 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & 0x0003) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
@@ -309,13 +311,18 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1;
offst = info->eap
& ((1 << PAGE_SHIFT) -
- (1 << mci->csrows[row].grain));
+ (1 << mci->csrows[row].channels[chan].dimm->grain));
if (info->errsts & 0x0002)
- edac_mc_handle_ue(mci, page, offst , row, "i82975x UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offst, 0,
+ row, -1, -1,
+ "i82975x UE", "", NULL);
else
- edac_mc_handle_ce(mci, page, offst, info->derrsyn, row,
- chan, "i82975x CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offst, info->derrsyn,
+ row, chan ? chan : 0, -1,
+ "i82975x CE", "", NULL);
return 1;
}
@@ -370,8 +377,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
struct csrow_info *csrow;
unsigned long last_cumul_size;
u8 value;
- u32 cumul_size;
+ u32 cumul_size, nr_pages;
int index, chan;
+ struct dimm_info *dimm;
+ enum dev_type dtype;
last_cumul_size = 0;
@@ -400,28 +409,33 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size);
+ nr_pages = cumul_size - last_cumul_size;
+ if (!nr_pages)
+ continue;
+
/*
* Initialise dram labels
* index values:
* [0-7] for single-channel; i.e. csrow->nr_channels = 1
* [0-3] for dual-channel; i.e. csrow->nr_channels = 2
*/
- for (chan = 0; chan < csrow->nr_channels; chan++)
- strncpy(csrow->channels[chan].label,
+ dtype = i82975x_dram_type(mch_window, index);
+ for (chan = 0; chan < csrow->nr_channels; chan++) {
+ dimm = mci->csrows[index].channels[chan].dimm;
+
+ dimm->nr_pages = nr_pages / csrow->nr_channels;
+ strncpy(csrow->channels[chan].dimm->label,
labels[(index >> 1) + (chan * 2)],
EDAC_MC_LABEL_LEN);
-
- if (cumul_size == last_cumul_size)
- continue; /* not populated */
+ dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
+ dimm->dtype = i82975x_dram_type(mch_window, index);
+ dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
+ dimm->edac_mode = EDAC_SECDED; /* only supported */
+ }
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 7; /* 128Byte cache-line resolution */
- csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
- csrow->dtype = i82975x_dram_type(mch_window, index);
- csrow->edac_mode = EDAC_SECDED; /* only supported */
}
}
@@ -463,6 +477,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc = -ENODEV;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct i82975x_pvt *pvt;
void __iomem *mch_window;
u32 mchbar;
@@ -531,8 +546,13 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
chans = dual_channel_active(mch_window) + 1;
/* assuming only one controller, index thus is 0 */
- mci = edac_mc_alloc(sizeof(*pvt), I82975X_NR_CSROWS(chans),
- chans, 0);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I82975X_NR_DIMMS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = I82975X_NR_CSROWS(chans);
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (!mci) {
rc = -ENOMEM;
goto fail1;
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index c6074c5cd1ef..8c87a5e87057 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -5,8 +5,6 @@
#include <asm/mce.h>
-#define BIT_64(n) (U64_C(1) << (n))
-
#define EC(x) ((x) & 0xffff)
#define XEC(x, mask) (((x) >> 16) & mask)
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 73464a62adf7..4c402353ba98 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -854,12 +854,16 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
if (err_detect & DDR_EDE_SBE)
- edac_mc_handle_ce(mci, pfn, err_addr & ~PAGE_MASK,
- syndrome, row_index, 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ pfn, err_addr & ~PAGE_MASK, syndrome,
+ row_index, 0, -1,
+ mci->ctl_name, "", NULL);
if (err_detect & DDR_EDE_MBE)
- edac_mc_handle_ue(mci, pfn, err_addr & ~PAGE_MASK,
- row_index, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ pfn, err_addr & ~PAGE_MASK, syndrome,
+ row_index, 0, -1,
+ mci->ctl_name, "", NULL);
out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
}
@@ -883,6 +887,7 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
{
struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
struct csrow_info *csrow;
+ struct dimm_info *dimm;
u32 sdram_ctl;
u32 sdtype;
enum mem_type mtype;
@@ -929,6 +934,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
u32 end;
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
+
cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
(index * MPC85XX_MC_CS_BNDS_OFS));
@@ -944,19 +951,21 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
csrow->first_page = start;
csrow->last_page = end;
- csrow->nr_pages = end + 1 - start;
- csrow->grain = 8;
- csrow->mtype = mtype;
- csrow->dtype = DEV_UNKNOWN;
+
+ dimm->nr_pages = end + 1 - start;
+ dimm->grain = 8;
+ dimm->mtype = mtype;
+ dimm->dtype = DEV_UNKNOWN;
if (sdram_ctl & DSC_X32_EN)
- csrow->dtype = DEV_X32;
- csrow->edac_mode = EDAC_SECDED;
+ dimm->dtype = DEV_X32;
+ dimm->edac_mode = EDAC_SECDED;
}
}
static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct mpc85xx_mc_pdata *pdata;
struct resource r;
u32 sdram_ctl;
@@ -965,7 +974,13 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
return -ENOMEM;
- mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 4;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), sizeof(*pdata));
if (!mci) {
devres_release_group(&op->dev, mpc85xx_mc_err_probe);
return -ENOMEM;
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 7e5ff367705c..b0bb5a3d2527 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -611,12 +611,17 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci)
/* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
if (!(reg & 0x1))
- edac_mc_handle_ce(mci, err_addr >> PAGE_SHIFT,
- err_addr & PAGE_MASK, syndrome, 0, 0,
- mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ err_addr >> PAGE_SHIFT,
+ err_addr & PAGE_MASK, syndrome,
+ 0, 0, -1,
+ mci->ctl_name, "", NULL);
else /* 2 bit error, UE */
- edac_mc_handle_ue(mci, err_addr >> PAGE_SHIFT,
- err_addr & PAGE_MASK, 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ err_addr >> PAGE_SHIFT,
+ err_addr & PAGE_MASK, 0,
+ 0, 0, -1,
+ mci->ctl_name, "", NULL);
/* clear the error */
out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
@@ -656,6 +661,8 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
struct mv64x60_mc_pdata *pdata)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
+
u32 devtype;
u32 ctl;
@@ -664,35 +671,36 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
csrow = &mci->csrows[0];
- csrow->first_page = 0;
- csrow->nr_pages = pdata->total_mem >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- csrow->grain = 8;
+ dimm = csrow->channels[0].dimm;
+
+ dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
+ dimm->grain = 8;
- csrow->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
+ dimm->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
devtype = (ctl >> 20) & 0x3;
switch (devtype) {
case 0x0:
- csrow->dtype = DEV_X32;
+ dimm->dtype = DEV_X32;
break;
case 0x2: /* could be X8 too, but no way to tell */
- csrow->dtype = DEV_X16;
+ dimm->dtype = DEV_X16;
break;
case 0x3:
- csrow->dtype = DEV_X4;
+ dimm->dtype = DEV_X4;
break;
default:
- csrow->dtype = DEV_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
break;
}
- csrow->edac_mode = EDAC_SECDED;
+ dimm->edac_mode = EDAC_SECDED;
}
static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct mv64x60_mc_pdata *pdata;
struct resource *r;
u32 ctl;
@@ -701,7 +709,14 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
return -ENOMEM;
- mci = edac_mc_alloc(sizeof(struct mv64x60_mc_pdata), 1, 1, edac_mc_idx);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+ sizeof(struct mv64x60_mc_pdata));
if (!mci) {
printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index 7f71ee436744..b095a906a994 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -110,15 +110,16 @@ static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
/* uncorrectable/multi-bit errors */
if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
MCDEBUG_ERRSTA_RFL_STATUS)) {
- edac_mc_handle_ue(mci, mci->csrows[cs].first_page, 0,
- cs, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ mci->csrows[cs].first_page, 0, 0,
+ cs, 0, -1, mci->ctl_name, "", NULL);
}
/* correctable/single-bit errors */
- if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) {
- edac_mc_handle_ce(mci, mci->csrows[cs].first_page, 0,
- 0, cs, 0, mci->ctl_name);
- }
+ if (errsta & MCDEBUG_ERRSTA_SBE_STATUS)
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ mci->csrows[cs].first_page, 0, 0,
+ cs, 0, -1, mci->ctl_name, "", NULL);
}
static void pasemi_edac_check(struct mem_ctl_info *mci)
@@ -135,11 +136,13 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
enum edac_type edac_mode)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
u32 rankcfg;
int index;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
pci_read_config_dword(pdev,
MCDRAM_RANKCFG + (index * 12),
@@ -151,20 +154,20 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >>
MCDRAM_RANKCFG_TYPE_SIZE_S) {
case 0:
- csrow->nr_pages = 128 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 128 << (20 - PAGE_SHIFT);
break;
case 1:
- csrow->nr_pages = 256 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 256 << (20 - PAGE_SHIFT);
break;
case 2:
case 3:
- csrow->nr_pages = 512 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 512 << (20 - PAGE_SHIFT);
break;
case 4:
- csrow->nr_pages = 1024 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 1024 << (20 - PAGE_SHIFT);
break;
case 5:
- csrow->nr_pages = 2048 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 2048 << (20 - PAGE_SHIFT);
break;
default:
edac_mc_printk(mci, KERN_ERR,
@@ -174,13 +177,13 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
}
csrow->first_page = last_page_in_mmc;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- last_page_in_mmc += csrow->nr_pages;
+ csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
+ last_page_in_mmc += dimm->nr_pages;
csrow->page_mask = 0;
- csrow->grain = PASEMI_EDAC_ERROR_GRAIN;
- csrow->mtype = MEM_DDR;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = edac_mode;
+ dimm->grain = PASEMI_EDAC_ERROR_GRAIN;
+ dimm->mtype = MEM_DDR;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = edac_mode;
}
return 0;
}
@@ -189,6 +192,7 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
u32 errctl1, errcor, scrub, mcen;
pci_read_config_dword(pdev, MCCFG_MCEN, &mcen);
@@ -205,9 +209,14 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
MCDEBUG_ERRCTL1_RFL_LOG_EN;
pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1);
- mci = edac_mc_alloc(0, PASEMI_EDAC_NR_CSROWS, PASEMI_EDAC_NR_CHANS,
- system_mmc_id++);
-
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = PASEMI_EDAC_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = PASEMI_EDAC_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(system_mmc_id++, ARRAY_SIZE(layers), layers,
+ 0);
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index d427c69bb8b1..f3f9fed06ad7 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -727,7 +727,10 @@ ppc4xx_edac_handle_ce(struct mem_ctl_info *mci,
for (row = 0; row < mci->nr_csrows; row++)
if (ppc4xx_edac_check_bank_error(status, row))
- edac_mc_handle_ce_no_info(mci, message);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, 0,
+ row, 0, -1,
+ message, "", NULL);
}
/**
@@ -755,7 +758,10 @@ ppc4xx_edac_handle_ue(struct mem_ctl_info *mci,
for (row = 0; row < mci->nr_csrows; row++)
if (ppc4xx_edac_check_bank_error(status, row))
- edac_mc_handle_ue(mci, page, offset, row, message);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offset, 0,
+ row, 0, -1,
+ message, "", NULL);
}
/**
@@ -895,9 +901,8 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
enum mem_type mtype;
enum dev_type dtype;
enum edac_type edac_mode;
- int row;
- u32 mbxcf, size;
- static u32 ppc4xx_last_page;
+ int row, j;
+ u32 mbxcf, size, nr_pages;
/* Establish the memory type and width */
@@ -948,7 +953,7 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
case SDRAM_MBCF_SZ_2GB:
case SDRAM_MBCF_SZ_4GB:
case SDRAM_MBCF_SZ_8GB:
- csi->nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
+ nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
break;
default:
ppc4xx_edac_mc_printk(KERN_ERR, mci,
@@ -959,10 +964,6 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
goto done;
}
- csi->first_page = ppc4xx_last_page;
- csi->last_page = csi->first_page + csi->nr_pages - 1;
- csi->page_mask = 0;
-
/*
* It's unclear exactly what grain should be set to
* here. The SDRAM_ECCES register allows resolution of
@@ -975,15 +976,17 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
* possible values would be the PLB width (16), the
* page size (PAGE_SIZE) or the memory width (2 or 4).
*/
+ for (j = 0; j < csi->nr_channels; j++) {
+ struct dimm_info *dimm = csi->channels[j].dimm;
- csi->grain = 1;
-
- csi->mtype = mtype;
- csi->dtype = dtype;
+ dimm->nr_pages = nr_pages / csi->nr_channels;
+ dimm->grain = 1;
- csi->edac_mode = edac_mode;
+ dimm->mtype = mtype;
+ dimm->dtype = dtype;
- ppc4xx_last_page += csi->nr_pages;
+ dimm->edac_mode = edac_mode;
+ }
}
done:
@@ -1236,6 +1239,7 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
dcr_host_t dcr_host;
const struct device_node *np = op->dev.of_node;
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
static int ppc4xx_edac_instance;
/*
@@ -1281,12 +1285,14 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
* controller instance and perform the appropriate
* initialization.
*/
-
- mci = edac_mc_alloc(sizeof(struct ppc4xx_edac_pdata),
- ppc4xx_edac_nr_csrows,
- ppc4xx_edac_nr_chans,
- ppc4xx_edac_instance);
-
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = ppc4xx_edac_nr_csrows;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = ppc4xx_edac_nr_chans;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(ppc4xx_edac_instance, ARRAY_SIZE(layers), layers,
+ sizeof(struct ppc4xx_edac_pdata));
if (mci == NULL) {
ppc4xx_edac_printk(KERN_ERR, "%s: "
"Failed to allocate EDAC MC instance!\n",
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 6d908ad72d64..e1cacd164f31 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -179,10 +179,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
error_found = 1;
if (handle_errors)
- edac_mc_handle_ce(mci, page, 0, /* not avail */
- syndrome,
- edac_mc_find_csrow_by_page(mci, page),
- 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, 0, syndrome,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, -1,
+ mci->ctl_name, "", NULL);
}
if (info->eapr & BIT(1)) { /* UE? */
@@ -190,9 +191,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
if (handle_errors)
/* 82600 doesn't give enough info */
- edac_mc_handle_ue(mci, page, 0,
- edac_mc_find_csrow_by_page(mci, page),
- mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, 0, 0,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, -1,
+ mci->ctl_name, "", NULL);
}
return error_found;
@@ -216,6 +219,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
u8 dramcr)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
int index;
u8 drbar; /* SDRAM Row Boundary Address Register */
u32 row_high_limit, row_high_limit_last;
@@ -227,6 +231,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
/* find the DRAM Chip Select Base address and mask */
pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
@@ -247,16 +252,17 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
csrow->first_page = row_base >> PAGE_SHIFT;
csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
- csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+
+ dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
/* Error address is top 19 bits - so granularity is *
* 14 bits */
- csrow->grain = 1 << 14;
- csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
+ dimm->grain = 1 << 14;
+ dimm->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
/* FIXME - check that this is unknowable with this chipset */
- csrow->dtype = DEV_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
/* Mode is global on 82600 */
- csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
+ dimm->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
row_high_limit_last = row_high_limit;
}
}
@@ -264,6 +270,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
u8 dramcr;
u32 eapr;
u32 scrub_disabled;
@@ -278,8 +285,13 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
sdram_refresh_rate);
debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
- mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS, 0);
-
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = R82600_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = R82600_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 123204f8e23b..4adaf4b7da99 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -314,8 +314,6 @@ struct sbridge_pvt {
struct sbridge_info info;
struct sbridge_channel channel[NUM_CHANNELS];
- int csrow_map[NUM_CHANNELS][MAX_DIMMS];
-
/* Memory type detection */
bool is_mirrored, is_lockstep, is_close_pg;
@@ -487,29 +485,14 @@ static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot,
}
/**
- * sbridge_get_active_channels() - gets the number of channels and csrows
+ * check_if_ecc_is_active() - Checks if ECC is active
* bus: Device bus
- * @channels: Number of channels that will be returned
- * @csrows: Number of csrows found
- *
- * Since EDAC core needs to know in advance the number of available channels
- * and csrows, in order to allocate memory for csrows/channels, it is needed
- * to run two similar steps. At the first step, implemented on this function,
- * it checks the number of csrows/channels present at one socket, identified
- * by the associated PCI bus.
- * this is used in order to properly allocate the size of mci components.
- * Note: one csrow is one dimm.
*/
-static int sbridge_get_active_channels(const u8 bus, unsigned *channels,
- unsigned *csrows)
+static int check_if_ecc_is_active(const u8 bus)
{
struct pci_dev *pdev = NULL;
- int i, j;
u32 mcmtr;
- *channels = 0;
- *csrows = 0;
-
pdev = get_pdev_slot_func(bus, 15, 0);
if (!pdev) {
sbridge_printk(KERN_ERR, "Couldn't find PCI device "
@@ -523,41 +506,14 @@ static int sbridge_get_active_channels(const u8 bus, unsigned *channels,
sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
return -ENODEV;
}
-
- for (i = 0; i < NUM_CHANNELS; i++) {
- u32 mtr;
-
- /* Device 15 functions 2 - 5 */
- pdev = get_pdev_slot_func(bus, 15, 2 + i);
- if (!pdev) {
- sbridge_printk(KERN_ERR, "Couldn't find PCI device "
- "%2x.%02d.%d!!!\n",
- bus, 15, 2 + i);
- return -ENODEV;
- }
- (*channels)++;
-
- for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
- pci_read_config_dword(pdev, mtr_regs[j], &mtr);
- debugf1("Bus#%02x channel #%d MTR%d = %x\n", bus, i, j, mtr);
- if (IS_DIMM_PRESENT(mtr))
- (*csrows)++;
- }
- }
-
- debugf0("Number of active channels: %d, number of active dimms: %d\n",
- *channels, *csrows);
-
return 0;
}
-static int get_dimm_config(const struct mem_ctl_info *mci)
+static int get_dimm_config(struct mem_ctl_info *mci)
{
struct sbridge_pvt *pvt = mci->pvt_info;
- struct csrow_info *csr;
+ struct dimm_info *dimm;
int i, j, banks, ranks, rows, cols, size, npages;
- int csrow = 0;
- unsigned long last_page = 0;
u32 reg;
enum edac_type mode;
enum mem_type mtype;
@@ -616,6 +572,8 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
u32 mtr;
for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ i, j, 0);
pci_read_config_dword(pvt->pci_tad[i],
mtr_regs[j], &mtr);
debugf4("Channel #%d MTR%d = %x\n", i, j, mtr);
@@ -634,29 +592,15 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
pvt->sbridge_dev->mc, i, j,
size, npages,
banks, ranks, rows, cols);
- csr = &mci->csrows[csrow];
-
- csr->first_page = last_page;
- csr->last_page = last_page + npages - 1;
- csr->page_mask = 0UL; /* Unused */
- csr->nr_pages = npages;
- csr->grain = 32;
- csr->csrow_idx = csrow;
- csr->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
- csr->ce_count = 0;
- csr->ue_count = 0;
- csr->mtype = mtype;
- csr->edac_mode = mode;
- csr->nr_channels = 1;
- csr->channels[0].chan_idx = i;
- csr->channels[0].ce_count = 0;
- pvt->csrow_map[i][j] = csrow;
- snprintf(csr->channels[0].label,
- sizeof(csr->channels[0].label),
+
+ dimm->nr_pages = npages;
+ dimm->grain = 32;
+ dimm->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
+ dimm->mtype = mtype;
+ dimm->edac_mode = mode;
+ snprintf(dimm->label, sizeof(dimm->label),
"CPU_SrcID#%u_Channel#%u_DIMM#%u",
pvt->sbridge_dev->source_id, i, j);
- last_page += npages;
- csrow++;
}
}
}
@@ -844,11 +788,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
u8 *socket,
long *channel_mask,
u8 *rank,
- char *area_type)
+ char **area_type, char *msg)
{
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
- char msg[256];
int n_rir, n_sads, n_tads, sad_way, sck_xch;
int sad_interl, idx, base_ch;
int interleave_mode;
@@ -870,12 +813,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
*/
if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
if (addr >= (u64)pvt->tohm) {
sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
@@ -892,7 +833,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
limit = SAD_LIMIT(reg);
if (limit <= prv) {
sprintf(msg, "Can't discover the memory socket");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
if (addr <= limit)
@@ -901,10 +841,9 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
}
if (n_sads == MAX_SAD) {
sprintf(msg, "Can't discover the memory socket");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
- area_type = get_dram_attr(reg);
+ *area_type = get_dram_attr(reg);
interleave_mode = INTERLEAVE_MODE(reg);
pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
@@ -942,7 +881,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
break;
default:
sprintf(msg, "Can't discover socket interleave");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
*socket = sad_interleave[idx];
@@ -957,7 +895,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
if (!new_mci) {
sprintf(msg, "Struct for socket #%u wasn't initialized",
*socket);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
mci = new_mci;
@@ -973,7 +910,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
limit = TAD_LIMIT(reg);
if (limit <= prv) {
sprintf(msg, "Can't discover the memory channel");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
if (addr <= limit)
@@ -1013,7 +949,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
break;
default:
sprintf(msg, "Can't discover the TAD target");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
*channel_mask = 1 << base_ch;
@@ -1027,7 +962,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
break;
default:
sprintf(msg, "Invalid mirror set. Can't decode addr");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
} else
@@ -1055,7 +989,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
if (offset > addr) {
sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
offset, addr);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
addr -= offset;
@@ -1095,7 +1028,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
if (n_rir == MAX_RIR_RANGES) {
sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
ch_addr);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
rir_way = RIR_WAY(reg);
@@ -1409,7 +1341,8 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
{
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
- char *type, *optype, *msg, *recoverable_msg;
+ enum hw_event_mc_err_type tp_event;
+ char *type, *optype, msg[256];
bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
bool overflow = GET_BITFIELD(m->status, 62, 62);
bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
@@ -1421,13 +1354,21 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
u32 optypenum = GET_BITFIELD(m->status, 4, 6);
long channel_mask, first_channel;
u8 rank, socket;
- int csrow, rc, dimm;
- char *area_type = "Unknown";
-
- if (ripv)
- type = "NON_FATAL";
- else
- type = "FATAL";
+ int rc, dimm;
+ char *area_type = NULL;
+
+ if (uncorrected_error) {
+ if (ripv) {
+ type = "FATAL";
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else {
+ type = "NON_FATAL";
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
+ }
+ } else {
+ type = "CORRECTED";
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
/*
* According with Table 15-9 of the Intel Architecture spec vol 3A,
@@ -1445,19 +1386,19 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
} else {
switch (optypenum) {
case 0:
- optype = "generic undef request";
+ optype = "generic undef request error";
break;
case 1:
- optype = "memory read";
+ optype = "memory read error";
break;
case 2:
- optype = "memory write";
+ optype = "memory write error";
break;
case 3:
- optype = "addr/cmd";
+ optype = "addr/cmd error";
break;
case 4:
- optype = "memory scrubbing";
+ optype = "memory scrubbing error";
break;
default:
optype = "reserved";
@@ -1466,13 +1407,13 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
}
rc = get_memory_error_data(mci, m->addr, &socket,
- &channel_mask, &rank, area_type);
+ &channel_mask, &rank, &area_type, msg);
if (rc < 0)
- return;
+ goto err_parsing;
new_mci = get_mci_for_node_id(socket);
if (!new_mci) {
- edac_mc_handle_ce_no_info(mci, "Error: socket got corrupted!");
- return;
+ strcpy(msg, "Error: socket got corrupted!");
+ goto err_parsing;
}
mci = new_mci;
pvt = mci->pvt_info;
@@ -1486,45 +1427,39 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
else
dimm = 2;
- csrow = pvt->csrow_map[first_channel][dimm];
-
- if (uncorrected_error && recoverable)
- recoverable_msg = " recoverable";
- else
- recoverable_msg = "";
/*
- * FIXME: What should we do with "channel" information on mcelog?
- * Probably, we can just discard it, as the channel information
- * comes from the get_memory_error_data() address decoding
+ * FIXME: On some memory configurations (mirror, lockstep), the
+ * Memory Controller can't point the error to a single DIMM. The
+ * EDAC core should be handling the channel mask, in order to point
+ * to the group of dimm's where the error may be happening.
*/
- msg = kasprintf(GFP_ATOMIC,
- "%d %s error(s): %s on %s area %s%s: cpu=%d Err=%04x:%04x (ch=%d), "
- "addr = 0x%08llx => socket=%d, Channel=%ld(mask=%ld), rank=%d\n",
- core_err_cnt,
- area_type,
- optype,
- type,
- recoverable_msg,
- overflow ? "OVERFLOW" : "",
- m->cpu,
- mscod, errcode,
- channel, /* 1111b means not specified */
- (long long) m->addr,
- socket,
- first_channel, /* This is the real channel on SB */
- channel_mask,
- rank);
+ snprintf(msg, sizeof(msg),
+ "count:%d%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
+ core_err_cnt,
+ overflow ? " OVERFLOW" : "",
+ (uncorrected_error && recoverable) ? " recoverable" : "",
+ area_type,
+ mscod, errcode,
+ socket,
+ channel_mask,
+ rank);
debugf0("%s", msg);
+ /* FIXME: need support for channel mask */
+
/* Call the helper to output message */
- if (uncorrected_error)
- edac_mc_handle_fbd_ue(mci, csrow, 0, 0, msg);
- else
- edac_mc_handle_fbd_ce(mci, csrow, 0, msg);
+ edac_mc_handle_error(tp_event, mci,
+ m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
+ channel, dimm, -1,
+ optype, msg, m);
+ return;
+err_parsing:
+ edac_mc_handle_error(tp_event, mci, 0, 0, 0,
+ -1, -1, -1,
+ msg, "", m);
- kfree(msg);
}
/*
@@ -1683,16 +1618,25 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct sbridge_pvt *pvt;
- int rc, channels, csrows;
+ int rc;
/* Check the number of active and not disabled channels */
- rc = sbridge_get_active_channels(sbridge_dev->bus, &channels, &csrows);
+ rc = check_if_ecc_is_active(sbridge_dev->bus);
if (unlikely(rc < 0))
return rc;
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, sbridge_dev->mc);
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = NUM_CHANNELS;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = MAX_DIMMS;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
+ sizeof(*pvt));
+
if (unlikely(!mci))
return -ENOMEM;
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index e99d00976189..7bb4614730db 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -71,7 +71,10 @@ static void tile_edac_check(struct mem_ctl_info *mci)
if (mem_error.sbe_count != priv->ce_count) {
dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node);
priv->ce_count = mem_error.sbe_count;
- edac_mc_handle_ce(mci, 0, 0, 0, 0, 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, 0,
+ 0, 0, -1,
+ mci->ctl_name, "", NULL);
}
}
@@ -84,6 +87,7 @@ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
struct csrow_info *csrow = &mci->csrows[0];
struct tile_edac_priv *priv = mci->pvt_info;
struct mshim_mem_info mem_info;
+ struct dimm_info *dimm = csrow->channels[0].dimm;
if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
@@ -93,27 +97,25 @@ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
}
if (mem_info.mem_ecc)
- csrow->edac_mode = EDAC_SECDED;
+ dimm->edac_mode = EDAC_SECDED;
else
- csrow->edac_mode = EDAC_NONE;
+ dimm->edac_mode = EDAC_NONE;
switch (mem_info.mem_type) {
case DDR2:
- csrow->mtype = MEM_DDR2;
+ dimm->mtype = MEM_DDR2;
break;
case DDR3:
- csrow->mtype = MEM_DDR3;
+ dimm->mtype = MEM_DDR3;
break;
default:
return -1;
}
- csrow->first_page = 0;
- csrow->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- csrow->grain = TILE_EDAC_ERROR_GRAIN;
- csrow->dtype = DEV_UNKNOWN;
+ dimm->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
+ dimm->grain = TILE_EDAC_ERROR_GRAIN;
+ dimm->dtype = DEV_UNKNOWN;
return 0;
}
@@ -123,6 +125,7 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
char hv_file[32];
int hv_devhdl;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct tile_edac_priv *priv;
int rc;
@@ -132,8 +135,14 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
return -EINVAL;
/* A TILE MC has a single channel and one chip-select row. */
- mci = edac_mc_alloc(sizeof(struct tile_edac_priv),
- TILE_EDAC_NR_CSROWS, TILE_EDAC_NR_CHANS, pdev->id);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = TILE_EDAC_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = TILE_EDAC_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
+ sizeof(struct tile_edac_priv));
if (mci == NULL)
return -ENOMEM;
priv = mci->pvt_info;
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index a438297389e5..1ac7962d63ea 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -215,19 +215,26 @@ static void x38_process_error_info(struct mem_ctl_info *mci,
return;
if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1,
+ "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
for (channel = 0; channel < x38_channel_num; channel++) {
log = info->eccerrlog[channel];
if (log & X38_ECCERRLOG_UE) {
- edac_mc_handle_ue(mci, 0, 0,
- eccerrlog_row(channel, log), "x38 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, 0,
+ eccerrlog_row(channel, log),
+ -1, -1,
+ "x38 UE", "", NULL);
} else if (log & X38_ECCERRLOG_CE) {
- edac_mc_handle_ce(mci, 0, 0,
- eccerrlog_syndrome(log),
- eccerrlog_row(channel, log), 0, "x38 CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, eccerrlog_syndrome(log),
+ eccerrlog_row(channel, log),
+ -1, -1,
+ "x38 CE", "", NULL);
}
}
}
@@ -317,9 +324,9 @@ static unsigned long drb_to_nr_pages(
static int x38_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
- int i;
+ int i, j;
struct mem_ctl_info *mci = NULL;
- unsigned long last_page;
+ struct edac_mc_layer layers[2];
u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
bool stacked;
void __iomem *window;
@@ -335,7 +342,13 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
how_many_channel(pdev);
/* FIXME: unconventional pvt_info usage */
- mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = X38_RANKS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = x38_channel_num;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
@@ -363,7 +376,6 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
* cumulative; the last one will contain the total memory
* contained in all ranks.
*/
- last_page = -1UL;
for (i = 0; i < mci->nr_csrows; i++) {
unsigned long nr_pages;
struct csrow_info *csrow = &mci->csrows[i];
@@ -372,20 +384,18 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
i / X38_RANKS_PER_CHANNEL,
i % X38_RANKS_PER_CHANNEL);
- if (nr_pages == 0) {
- csrow->mtype = MEM_EMPTY;
+ if (nr_pages == 0)
continue;
- }
- csrow->first_page = last_page + 1;
- last_page += nr_pages;
- csrow->last_page = last_page;
- csrow->nr_pages = nr_pages;
+ for (j = 0; j < x38_channel_num; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
- csrow->grain = nr_pages << PAGE_SHIFT;
- csrow->mtype = MEM_DDR2;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = EDAC_UNKNOWN;
+ dimm->nr_pages = nr_pages / x38_channel_num;
+ dimm->grain = nr_pages << PAGE_SHIFT;
+ dimm->mtype = MEM_DDR2;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
}
x38_clear_error_info(mci);
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index f5552b362efc..57ea7f464178 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -421,8 +421,8 @@ static void bm_work(struct work_struct *work)
* root, and thus, IRM.
*/
new_root_id = local_id;
- fw_notice(card, "%s, making local node (%02x) root\n",
- "BM lock failed", new_root_id);
+ fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
+ fw_rcode_string(rcode), new_root_id);
goto pick_me;
}
} else if (card->bm_generation != generation) {
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 2e6b24547e2a..2783f69dada6 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -22,6 +22,7 @@
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-cdev.h>
@@ -70,6 +71,7 @@ struct client {
u64 iso_closure;
struct fw_iso_buffer buffer;
unsigned long vm_start;
+ bool buffer_is_mapped;
struct list_head phy_receiver_link;
u64 phy_receiver_closure;
@@ -959,11 +961,20 @@ static void iso_mc_callback(struct fw_iso_context *context,
sizeof(e->interrupt), NULL, 0);
}
+static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
+{
+ if (context->type == FW_ISO_CONTEXT_TRANSMIT)
+ return DMA_TO_DEVICE;
+ else
+ return DMA_FROM_DEVICE;
+}
+
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
struct fw_iso_context *context;
fw_iso_callback_t cb;
+ int ret;
BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
@@ -1004,8 +1015,21 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
if (client->iso_context != NULL) {
spin_unlock_irq(&client->lock);
fw_iso_context_destroy(context);
+
return -EBUSY;
}
+ if (!client->buffer_is_mapped) {
+ ret = fw_iso_buffer_map_dma(&client->buffer,
+ client->device->card,
+ iso_dma_direction(context));
+ if (ret < 0) {
+ spin_unlock_irq(&client->lock);
+ fw_iso_context_destroy(context);
+
+ return ret;
+ }
+ client->buffer_is_mapped = true;
+ }
client->iso_closure = a->closure;
client->iso_context = context;
spin_unlock_irq(&client->lock);
@@ -1651,7 +1675,6 @@ static long fw_device_op_compat_ioctl(struct file *file,
static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
struct client *client = file->private_data;
- enum dma_data_direction direction;
unsigned long size;
int page_count, ret;
@@ -1674,20 +1697,28 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
if (size & ~PAGE_MASK)
return -EINVAL;
- if (vma->vm_flags & VM_WRITE)
- direction = DMA_TO_DEVICE;
- else
- direction = DMA_FROM_DEVICE;
-
- ret = fw_iso_buffer_init(&client->buffer, client->device->card,
- page_count, direction);
+ ret = fw_iso_buffer_alloc(&client->buffer, page_count);
if (ret < 0)
return ret;
- ret = fw_iso_buffer_map(&client->buffer, vma);
+ spin_lock_irq(&client->lock);
+ if (client->iso_context) {
+ ret = fw_iso_buffer_map_dma(&client->buffer,
+ client->device->card,
+ iso_dma_direction(client->iso_context));
+ client->buffer_is_mapped = (ret == 0);
+ }
+ spin_unlock_irq(&client->lock);
if (ret < 0)
- fw_iso_buffer_destroy(&client->buffer, client->device->card);
+ goto fail;
+ ret = fw_iso_buffer_map_vma(&client->buffer, vma);
+ if (ret < 0)
+ goto fail;
+
+ return 0;
+ fail:
+ fw_iso_buffer_destroy(&client->buffer, client->device->card);
return ret;
}
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 68109e9bb04e..4d460ef87161 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -481,6 +481,7 @@ static int read_rom(struct fw_device *device,
* generation changes under us, read_config_rom will fail and get retried.
* It's better to start all over in this case because the node from which we
* are reading the ROM may have changed the ROM during the reset.
+ * Returns either a result code or a negative error code.
*/
static int read_config_rom(struct fw_device *device, int generation)
{
@@ -488,7 +489,7 @@ static int read_config_rom(struct fw_device *device, int generation)
const u32 *old_rom, *new_rom;
u32 *rom, *stack;
u32 sp, key;
- int i, end, length, ret = -1;
+ int i, end, length, ret;
rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE +
sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL);
@@ -502,18 +503,21 @@ static int read_config_rom(struct fw_device *device, int generation)
/* First read the bus info block. */
for (i = 0; i < 5; i++) {
- if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
+ ret = read_rom(device, generation, i, &rom[i]);
+ if (ret != RCODE_COMPLETE)
goto out;
/*
- * As per IEEE1212 7.2, during power-up, devices can
+ * As per IEEE1212 7.2, during initialization, devices can
* reply with a 0 for the first quadlet of the config
* rom to indicate that they are booting (for example,
* if the firmware is on the disk of a external
* harddisk). In that case we just fail, and the
* retry mechanism will try again later.
*/
- if (i == 0 && rom[i] == 0)
+ if (i == 0 && rom[i] == 0) {
+ ret = RCODE_BUSY;
goto out;
+ }
}
device->max_speed = device->node->max_speed;
@@ -563,11 +567,14 @@ static int read_config_rom(struct fw_device *device, int generation)
*/
key = stack[--sp];
i = key & 0xffffff;
- if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE))
+ if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE)) {
+ ret = -ENXIO;
goto out;
+ }
/* Read header quadlet for the block to get the length. */
- if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
+ ret = read_rom(device, generation, i, &rom[i]);
+ if (ret != RCODE_COMPLETE)
goto out;
end = i + (rom[i] >> 16) + 1;
if (end > MAX_CONFIG_ROM_SIZE) {
@@ -590,8 +597,8 @@ static int read_config_rom(struct fw_device *device, int generation)
* it references another block, and push it in that case.
*/
for (; i < end; i++) {
- if (read_rom(device, generation, i, &rom[i]) !=
- RCODE_COMPLETE)
+ ret = read_rom(device, generation, i, &rom[i]);
+ if (ret != RCODE_COMPLETE)
goto out;
if ((key >> 30) != 3 || (rom[i] >> 30) < 2)
@@ -619,8 +626,10 @@ static int read_config_rom(struct fw_device *device, int generation)
old_rom = device->config_rom;
new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
- if (new_rom == NULL)
+ if (new_rom == NULL) {
+ ret = -ENOMEM;
goto out;
+ }
down_write(&fw_device_rwsem);
device->config_rom = new_rom;
@@ -628,7 +637,7 @@ static int read_config_rom(struct fw_device *device, int generation)
up_write(&fw_device_rwsem);
kfree(old_rom);
- ret = 0;
+ ret = RCODE_COMPLETE;
device->max_rec = rom[2] >> 12 & 0xf;
device->cmc = rom[2] >> 30 & 1;
device->irmc = rom[2] >> 31 & 1;
@@ -967,15 +976,17 @@ static void fw_device_init(struct work_struct *work)
* device.
*/
- if (read_config_rom(device, device->generation) < 0) {
+ ret = read_config_rom(device, device->generation);
+ if (ret != RCODE_COMPLETE) {
if (device->config_rom_retries < MAX_RETRIES &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
fw_schedule_device_work(device, RETRY_DELAY);
} else {
if (device->node->link_on)
- fw_notice(card, "giving up on Config ROM for node id %x\n",
- device->node_id);
+ fw_notice(card, "giving up on node %x: reading config rom failed: %s\n",
+ device->node_id,
+ fw_rcode_string(ret));
if (device->node == card->root_node)
fw_schedule_bm_work(card, 0);
fw_device_release(&device->device);
@@ -1069,31 +1080,30 @@ static void fw_device_init(struct work_struct *work)
put_device(&device->device); /* our reference */
}
-enum {
- REREAD_BIB_ERROR,
- REREAD_BIB_GONE,
- REREAD_BIB_UNCHANGED,
- REREAD_BIB_CHANGED,
-};
-
/* Reread and compare bus info block and header of root directory */
-static int reread_config_rom(struct fw_device *device, int generation)
+static int reread_config_rom(struct fw_device *device, int generation,
+ bool *changed)
{
u32 q;
- int i;
+ int i, rcode;
for (i = 0; i < 6; i++) {
- if (read_rom(device, generation, i, &q) != RCODE_COMPLETE)
- return REREAD_BIB_ERROR;
+ rcode = read_rom(device, generation, i, &q);
+ if (rcode != RCODE_COMPLETE)
+ return rcode;
if (i == 0 && q == 0)
- return REREAD_BIB_GONE;
+ /* inaccessible (see read_config_rom); retry later */
+ return RCODE_BUSY;
- if (q != device->config_rom[i])
- return REREAD_BIB_CHANGED;
+ if (q != device->config_rom[i]) {
+ *changed = true;
+ return RCODE_COMPLETE;
+ }
}
- return REREAD_BIB_UNCHANGED;
+ *changed = false;
+ return RCODE_COMPLETE;
}
static void fw_device_refresh(struct work_struct *work)
@@ -1101,23 +1111,14 @@ static void fw_device_refresh(struct work_struct *work)
struct fw_device *device =
container_of(work, struct fw_device, work.work);
struct fw_card *card = device->card;
- int node_id = device->node_id;
-
- switch (reread_config_rom(device, device->generation)) {
- case REREAD_BIB_ERROR:
- if (device->config_rom_retries < MAX_RETRIES / 2 &&
- atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
- device->config_rom_retries++;
- fw_schedule_device_work(device, RETRY_DELAY / 2);
-
- return;
- }
- goto give_up;
+ int ret, node_id = device->node_id;
+ bool changed;
- case REREAD_BIB_GONE:
- goto gone;
+ ret = reread_config_rom(device, device->generation, &changed);
+ if (ret != RCODE_COMPLETE)
+ goto failed_config_rom;
- case REREAD_BIB_UNCHANGED:
+ if (!changed) {
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
@@ -1126,9 +1127,6 @@ static void fw_device_refresh(struct work_struct *work)
fw_device_update(work);
device->config_rom_retries = 0;
goto out;
-
- case REREAD_BIB_CHANGED:
- break;
}
/*
@@ -1137,16 +1135,9 @@ static void fw_device_refresh(struct work_struct *work)
*/
device_for_each_child(&device->device, NULL, shutdown_unit);
- if (read_config_rom(device, device->generation) < 0) {
- if (device->config_rom_retries < MAX_RETRIES &&
- atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
- device->config_rom_retries++;
- fw_schedule_device_work(device, RETRY_DELAY);
-
- return;
- }
- goto give_up;
- }
+ ret = read_config_rom(device, device->generation);
+ if (ret != RCODE_COMPLETE)
+ goto failed_config_rom;
fw_device_cdev_update(device);
create_units(device);
@@ -1163,9 +1154,16 @@ static void fw_device_refresh(struct work_struct *work)
device->config_rom_retries = 0;
goto out;
- give_up:
- fw_notice(card, "giving up on refresh of device %s\n",
- dev_name(&device->device));
+ failed_config_rom:
+ if (device->config_rom_retries < MAX_RETRIES &&
+ atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
+ device->config_rom_retries++;
+ fw_schedule_device_work(device, RETRY_DELAY);
+ return;
+ }
+
+ fw_notice(card, "giving up on refresh of device %s: %s\n",
+ dev_name(&device->device), fw_rcode_string(ret));
gone:
atomic_set(&device->state, FW_DEVICE_GONE);
PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index d1565828ae2c..8382e27e9a27 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -39,52 +39,73 @@
* Isochronous DMA context management
*/
-int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
- int page_count, enum dma_data_direction direction)
+int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count)
{
- int i, j;
- dma_addr_t address;
-
- buffer->page_count = page_count;
- buffer->direction = direction;
+ int i;
+ buffer->page_count = 0;
+ buffer->page_count_mapped = 0;
buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
GFP_KERNEL);
if (buffer->pages == NULL)
- goto out;
+ return -ENOMEM;
- for (i = 0; i < buffer->page_count; i++) {
+ for (i = 0; i < page_count; i++) {
buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (buffer->pages[i] == NULL)
- goto out_pages;
+ break;
+ }
+ buffer->page_count = i;
+ if (i < page_count) {
+ fw_iso_buffer_destroy(buffer, NULL);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
+ enum dma_data_direction direction)
+{
+ dma_addr_t address;
+ int i;
+
+ buffer->direction = direction;
+
+ for (i = 0; i < buffer->page_count; i++) {
address = dma_map_page(card->device, buffer->pages[i],
0, PAGE_SIZE, direction);
- if (dma_mapping_error(card->device, address)) {
- __free_page(buffer->pages[i]);
- goto out_pages;
- }
+ if (dma_mapping_error(card->device, address))
+ break;
+
set_page_private(buffer->pages[i], address);
}
+ buffer->page_count_mapped = i;
+ if (i < buffer->page_count)
+ return -ENOMEM;
return 0;
+}
- out_pages:
- for (j = 0; j < i; j++) {
- address = page_private(buffer->pages[j]);
- dma_unmap_page(card->device, address,
- PAGE_SIZE, direction);
- __free_page(buffer->pages[j]);
- }
- kfree(buffer->pages);
- out:
- buffer->pages = NULL;
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+ int page_count, enum dma_data_direction direction)
+{
+ int ret;
+
+ ret = fw_iso_buffer_alloc(buffer, page_count);
+ if (ret < 0)
+ return ret;
+
+ ret = fw_iso_buffer_map_dma(buffer, card, direction);
+ if (ret < 0)
+ fw_iso_buffer_destroy(buffer, card);
- return -ENOMEM;
+ return ret;
}
EXPORT_SYMBOL(fw_iso_buffer_init);
-int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
+int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
+ struct vm_area_struct *vma)
{
unsigned long uaddr;
int i, err;
@@ -107,15 +128,18 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
int i;
dma_addr_t address;
- for (i = 0; i < buffer->page_count; i++) {
+ for (i = 0; i < buffer->page_count_mapped; i++) {
address = page_private(buffer->pages[i]);
dma_unmap_page(card->device, address,
PAGE_SIZE, buffer->direction);
- __free_page(buffer->pages[i]);
}
+ for (i = 0; i < buffer->page_count; i++)
+ __free_page(buffer->pages[i]);
kfree(buffer->pages);
buffer->pages = NULL;
+ buffer->page_count = 0;
+ buffer->page_count_mapped = 0;
}
EXPORT_SYMBOL(fw_iso_buffer_destroy);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index db8a965cf712..780708dc6e25 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -1003,6 +1003,32 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
}
EXPORT_SYMBOL(fw_core_handle_response);
+/**
+ * fw_rcode_string - convert a firewire result code to an error description
+ * @rcode: the result code
+ */
+const char *fw_rcode_string(int rcode)
+{
+ static const char *const names[] = {
+ [RCODE_COMPLETE] = "no error",
+ [RCODE_CONFLICT_ERROR] = "conflict error",
+ [RCODE_DATA_ERROR] = "data error",
+ [RCODE_TYPE_ERROR] = "type error",
+ [RCODE_ADDRESS_ERROR] = "address error",
+ [RCODE_SEND_ERROR] = "send error",
+ [RCODE_CANCELLED] = "timeout",
+ [RCODE_BUSY] = "busy",
+ [RCODE_GENERATION] = "bus reset",
+ [RCODE_NO_ACK] = "no ack",
+ };
+
+ if ((unsigned int)rcode < ARRAY_SIZE(names) && names[rcode])
+ return names[rcode];
+ else
+ return "unknown";
+}
+EXPORT_SYMBOL(fw_rcode_string);
+
static const struct fw_address_region topology_map_region =
{ .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
.end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index b5a2f6197053..515a42c786d0 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -3,6 +3,7 @@
#include <linux/compiler.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/idr.h>
@@ -154,7 +155,11 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
/* -iso */
-int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
+int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
+int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
+ enum dma_data_direction direction);
+int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
+ struct vm_area_struct *vma);
/* -topology */
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index a7c4422a688e..4ebfb2273672 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -693,6 +693,8 @@ static struct pci_device_id pci_table[] __devinitdata = {
{ } /* Terminating entry */
};
+MODULE_DEVICE_TABLE(pci, pci_table);
+
static struct pci_driver lynx_pci_driver = {
.name = driver_name,
.id_table = pci_table,
@@ -700,22 +702,8 @@ static struct pci_driver lynx_pci_driver = {
.remove = remove_card,
};
+module_pci_driver(lynx_pci_driver);
+
MODULE_AUTHOR("Kristian Hoegsberg");
MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, pci_table);
-
-static int __init nosy_init(void)
-{
- return pci_register_driver(&lynx_pci_driver);
-}
-
-static void __exit nosy_cleanup(void)
-{
- pci_unregister_driver(&lynx_pci_driver);
-
- pr_info("Unloaded %s\n", driver_name);
-}
-
-module_init(nosy_init);
-module_exit(nosy_cleanup);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 2b5460075a9f..c1af05e834b6 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1821,9 +1821,8 @@ static void bus_reset_work(struct work_struct *work)
{
struct fw_ohci *ohci =
container_of(work, struct fw_ohci, bus_reset_work);
- int self_id_count, i, j, reg;
- int generation, new_generation;
- unsigned long flags;
+ int self_id_count, generation, new_generation, i, j;
+ u32 reg;
void *free_rom = NULL;
dma_addr_t free_rom_bus = 0;
bool is_new_root;
@@ -1930,13 +1929,13 @@ static void bus_reset_work(struct work_struct *work)
}
/* FIXME: Document how the locking works. */
- spin_lock_irqsave(&ohci->lock, flags);
+ spin_lock_irq(&ohci->lock);
ohci->generation = -1; /* prevent AT packet queueing */
context_stop(&ohci->at_request_ctx);
context_stop(&ohci->at_response_ctx);
- spin_unlock_irqrestore(&ohci->lock, flags);
+ spin_unlock_irq(&ohci->lock);
/*
* Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
@@ -1946,7 +1945,7 @@ static void bus_reset_work(struct work_struct *work)
at_context_flush(&ohci->at_request_ctx);
at_context_flush(&ohci->at_response_ctx);
- spin_lock_irqsave(&ohci->lock, flags);
+ spin_lock_irq(&ohci->lock);
ohci->generation = generation;
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
@@ -1990,7 +1989,7 @@ static void bus_reset_work(struct work_struct *work)
reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
#endif
- spin_unlock_irqrestore(&ohci->lock, flags);
+ spin_unlock_irq(&ohci->lock);
if (free_rom)
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
@@ -2402,7 +2401,6 @@ static int ohci_set_config_rom(struct fw_card *card,
const __be32 *config_rom, size_t length)
{
struct fw_ohci *ohci;
- unsigned long flags;
__be32 *next_config_rom;
dma_addr_t uninitialized_var(next_config_rom_bus);
@@ -2441,7 +2439,7 @@ static int ohci_set_config_rom(struct fw_card *card,
if (next_config_rom == NULL)
return -ENOMEM;
- spin_lock_irqsave(&ohci->lock, flags);
+ spin_lock_irq(&ohci->lock);
/*
* If there is not an already pending config_rom update,
@@ -2467,7 +2465,7 @@ static int ohci_set_config_rom(struct fw_card *card,
reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
- spin_unlock_irqrestore(&ohci->lock, flags);
+ spin_unlock_irq(&ohci->lock);
/* If we didn't use the DMA allocation, delete it. */
if (next_config_rom != NULL)
@@ -2891,10 +2889,9 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
descriptor_callback_t uninitialized_var(callback);
u64 *uninitialized_var(channels);
u32 *uninitialized_var(mask), uninitialized_var(regs);
- unsigned long flags;
int index, ret = -EBUSY;
- spin_lock_irqsave(&ohci->lock, flags);
+ spin_lock_irq(&ohci->lock);
switch (type) {
case FW_ISO_CONTEXT_TRANSMIT:
@@ -2938,7 +2935,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
ret = -ENOSYS;
}
- spin_unlock_irqrestore(&ohci->lock, flags);
+ spin_unlock_irq(&ohci->lock);
if (index < 0)
return ERR_PTR(ret);
@@ -2964,7 +2961,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
out_with_header:
free_page((unsigned long)ctx->header);
out:
- spin_lock_irqsave(&ohci->lock, flags);
+ spin_lock_irq(&ohci->lock);
switch (type) {
case FW_ISO_CONTEXT_RECEIVE:
@@ -2977,7 +2974,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
}
*mask |= 1 << index;
- spin_unlock_irqrestore(&ohci->lock, flags);
+ spin_unlock_irq(&ohci->lock);
return ERR_PTR(ret);
}
@@ -3789,6 +3786,8 @@ static struct pci_driver fw_ohci_pci_driver = {
#endif
};
+module_pci_driver(fw_ohci_pci_driver);
+
MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
MODULE_LICENSE("GPL");
@@ -3797,16 +3796,3 @@ MODULE_LICENSE("GPL");
#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
MODULE_ALIAS("ohci1394");
#endif
-
-static int __init fw_ohci_init(void)
-{
- return pci_register_driver(&fw_ohci_pci_driver);
-}
-
-static void __exit fw_ohci_cleanup(void)
-{
- pci_unregister_driver(&fw_ohci_pci_driver);
-}
-
-module_init(fw_ohci_init);
-module_exit(fw_ohci_cleanup);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index b7e65d7eab64..1162d6b3bf85 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -207,9 +207,8 @@ static const struct device *lu_dev(const struct sbp2_logical_unit *lu)
#define SBP2_MAX_CDB_SIZE 16
/*
- * The default maximum s/g segment size of a FireWire controller is
- * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
- * be quadlet-aligned, we set the length limit to 0xffff & ~3.
+ * The maximum SBP-2 data buffer size is 0xffff. We quadlet-align this
+ * for compatibility with earlier versions of this driver.
*/
#define SBP2_MAX_SEG_SIZE 0xfffc
@@ -1163,7 +1162,8 @@ static int sbp2_probe(struct device *dev)
shost->max_cmd_len = SBP2_MAX_CDB_SIZE;
- if (scsi_add_host(shost, &unit->device) < 0)
+ if (scsi_add_host_with_dma(shost, &unit->device,
+ device->card->device) < 0)
goto fail_shost_put;
/* implicit directory ID */
@@ -1295,10 +1295,7 @@ static struct fw_driver sbp2_driver = {
static void sbp2_unmap_scatterlist(struct device *card_device,
struct sbp2_command_orb *orb)
{
- if (scsi_sg_count(orb->cmd))
- dma_unmap_sg(card_device, scsi_sglist(orb->cmd),
- scsi_sg_count(orb->cmd),
- orb->cmd->sc_data_direction);
+ scsi_dma_unmap(orb->cmd);
if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT))
dma_unmap_single(card_device, orb->page_table_bus,
@@ -1404,9 +1401,8 @@ static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
struct scatterlist *sg = scsi_sglist(orb->cmd);
int i, n;
- n = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
- orb->cmd->sc_data_direction);
- if (n == 0)
+ n = scsi_dma_map(orb->cmd);
+ if (n <= 0)
goto fail;
/*
@@ -1452,8 +1448,7 @@ static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
return 0;
fail_page_table:
- dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
- scsi_sg_count(orb->cmd), orb->cmd->sc_data_direction);
+ scsi_dma_unmap(orb->cmd);
fail:
return -ENOMEM;
}
@@ -1534,7 +1529,10 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
sdev->allow_restart = 1;
- /* SBP-2 requires quadlet alignment of the data buffers. */
+ /*
+ * SBP-2 does not require any alignment, but we set it anyway
+ * for compatibility with earlier versions of this driver.
+ */
blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
@@ -1568,8 +1566,6 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
- blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
-
return 0;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index eb80ba300452..c4067d0141f7 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -2,6 +2,14 @@
# GPIO infrastructure and drivers
#
+config ARCH_HAVE_CUSTOM_GPIO_H
+ bool
+ help
+ Selecting this config option from the architecture Kconfig allows
+ the architecture to provide a custom asm/gpio.h implementation
+ overriding the default implementations. New uses of this are
+ strongly discouraged.
+
config ARCH_WANT_OPTIONAL_GPIOLIB
bool
help
@@ -37,6 +45,10 @@ menuconfig GPIOLIB
if GPIOLIB
+config OF_GPIO
+ def_bool y
+ depends on OF && !SPARC
+
config DEBUG_GPIO
bool "Debug GPIO calls"
depends on DEBUG_KERNEL
@@ -102,6 +114,14 @@ config GPIO_EP93XX
depends on ARCH_EP93XX
select GPIO_GENERIC
+config GPIO_MM_LANTIQ
+ bool "Lantiq Memory mapped GPIOs"
+ depends on LANTIQ && SOC_XWAY
+ help
+ This enables support for memory mapped GPIOs on the External Bus Unit
+ (EBU) found on Lantiq SoCs. The gpios are output only as they are
+ created by attaching a 16bit latch to the bus.
+
config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
@@ -155,6 +175,14 @@ config GPIO_PXA
help
Say yes here to support the PXA GPIO device
+config GPIO_STA2X11
+ bool "STA2x11/ConneXt GPIO support"
+ depends on MFD_STA2X11
+ select GENERIC_IRQ_CHIP
+ help
+ Say yes here to support the STA2x11/ConneXt GPIO device.
+ The GPIO module has 128 GPIO pins with alternate functions.
+
config GPIO_XILINX
bool "Xilinx GPIO support"
depends on PPC_OF || MICROBLAZE
@@ -168,13 +196,13 @@ config GPIO_VR41XX
Say yes here to support the NEC VR4100 series General-purpose I/O Uint
config GPIO_SCH
- tristate "Intel SCH/TunnelCreek GPIO"
+ tristate "Intel SCH/TunnelCreek/Centerton GPIO"
depends on PCI && X86
select MFD_CORE
select LPC_SCH
help
- Say yes here to support GPIO interface on Intel Poulsbo SCH
- or Intel Tunnel Creek processor.
+ Say yes here to support GPIO interface on Intel Poulsbo SCH,
+ Intel Tunnel Creek processor or Intel Centerton processor.
The Intel SCH contains a total of 14 GPIO pins. Ten GPIOs are
powered by the core power rail and are turned off during sleep
modes (S3 and higher). The remaining four GPIOs are powered by
@@ -183,6 +211,22 @@ config GPIO_SCH
system from the Suspend-to-RAM state.
The Intel Tunnel Creek processor has 5 GPIOs powered by the
core power rail and 9 from suspend power supply.
+ The Intel Centerton processor has a total of 30 GPIO pins.
+ Twenty-one are powered by the core power rail and 9 from the
+ suspend power supply.
+
+config GPIO_ICH
+ tristate "Intel ICH GPIO"
+ depends on PCI && X86
+ select MFD_CORE
+ select LPC_ICH
+ help
+ Say yes here to support the GPIO functionality of a number of Intel
+ ICH-based chipsets. Currently supported devices: ICH6, ICH7, ICH8
+ ICH9, ICH10, Series 5/3400 (eg Ibex Peak), Series 6/C200 (eg
+ Cougar Point), NM10 (Tiger Point), and 3100 (Whitmore Lake).
+
+ If unsure, say N.
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
@@ -249,7 +293,7 @@ config GPIO_MC9S08DZ60
Select this to enable the MC9S08DZ60 GPIO driver
config GPIO_PCA953X
- tristate "PCA953x, PCA955x, TCA64xx, and MAX7310 I/O ports"
+ tristate "PCA953x, PCA955x, PCA957x, TCA64xx, and MAX7310 I/O ports"
depends on I2C
help
Say yes here to provide access to several register-oriented
@@ -258,10 +302,11 @@ config GPIO_PCA953X
4 bits: pca9536, pca9537
- 8 bits: max7310, pca9534, pca9538, pca9554, pca9557,
- tca6408
+ 8 bits: max7310, max7315, pca6107, pca9534, pca9538, pca9554,
+ pca9556, pca9557, pca9574, tca6408
- 16 bits: pca9535, pca9539, pca9555, tca6416
+ 16 bits: max7312, max7313, pca9535, pca9539, pca9555, pca9575,
+ tca6416
config GPIO_PCA953X_IRQ
bool "Interrupt controller support for PCA953x"
@@ -294,6 +339,15 @@ config GPIO_PCF857X
This driver provides an in-kernel interface to those GPIOs using
platform-neutral GPIO calls.
+config GPIO_RC5T583
+ bool "RICOH RC5T583 GPIO"
+ depends on MFD_RC5T583
+ help
+ Select this option to enable GPIO driver for the Ricoh RC5T583
+ chip family.
+ This driver provides the support for driving/reading the gpio pins
+ of RC5T583 device through standard gpio library.
+
config GPIO_SX150X
bool "Semtech SX150x I2C GPIO expander"
depends on I2C=y
@@ -312,6 +366,16 @@ config GPIO_STMPE
This enables support for the GPIOs found on the STMPE I/O
Expanders.
+config GPIO_STP_XWAY
+ bool "XWAY STP GPIOs"
+ depends on SOC_XWAY
+ help
+ This enables support for the Serial To Parallel (STP) unit found on
+ XWAY SoC. The STP allows the SoC to drive a shift registers cascade,
+ that can be up to 24 bit. This peripheral is aimed at driving leds.
+ Some of the gpios/leds can be auto updated by the soc with dsl and
+ phy status.
+
config GPIO_TC3589X
bool "TC3589X GPIOs"
depends on MFD_TC3589X
@@ -405,6 +469,7 @@ config GPIO_BT8XX
config GPIO_LANGWELL
bool "Intel Langwell/Penwell GPIO support"
depends on PCI && X86
+ select IRQ_DOMAIN
help
Say Y here to support Intel Langwell/Penwell GPIO.
@@ -520,4 +585,12 @@ config GPIO_TPS65910
help
Select this option to enable GPIO driver for the TPS65910
chip family.
+
+config GPIO_MSIC
+ bool "Intel MSIC mixed signal gpio support"
+ depends on MFD_INTEL_MSIC
+ help
+ Enable support for GPIO on intel MSIC controllers found in
+ intel MID devices
+
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 708ffb2165ea..0f55662002c3 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -3,6 +3,7 @@
ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
obj-$(CONFIG_GPIOLIB) += gpiolib.o devres.o
+obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
# Device drivers. Generally keep list sorted alphabetically
obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
@@ -18,6 +19,7 @@ obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
obj-$(CONFIG_GPIO_EM) += gpio-em.o
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
+obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
@@ -31,8 +33,10 @@ obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
+obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
+obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
obj-$(CONFIG_GPIO_MSM_V1) += gpio-msm-v1.o
obj-$(CONFIG_GPIO_MSM_V2) += gpio-msm-v2.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
@@ -43,12 +47,15 @@ obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
+obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
obj-$(CONFIG_PLAT_SAMSUNG) += gpio-samsung.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
+obj-$(CONFIG_GPIO_STA2X11) += gpio-sta2x11.o
obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
+obj-$(CONFIG_GPIO_STP_XWAY) += gpio-stp-xway.o
obj-$(CONFIG_GPIO_SX150X) += gpio-sx150x.o
obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 8950f6261bbb..9e9947cb86a3 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -71,6 +71,35 @@ int devm_gpio_request(struct device *dev, unsigned gpio, const char *label)
EXPORT_SYMBOL(devm_gpio_request);
/**
+ * devm_gpio_request_one - request a single GPIO with initial setup
+ * @dev: device to request for
+ * @gpio: the GPIO number
+ * @flags: GPIO configuration as specified by GPIOF_*
+ * @label: a literal description string of this GPIO
+ */
+int devm_gpio_request_one(struct device *dev, unsigned gpio,
+ unsigned long flags, const char *label)
+{
+ unsigned *dr;
+ int rc;
+
+ dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ rc = gpio_request_one(gpio, flags, label);
+ if (rc) {
+ devres_free(dr);
+ return rc;
+ }
+
+ *dr = gpio;
+ devres_add(dev, dr);
+
+ return 0;
+}
+
+/**
* devm_gpio_free - free an interrupt
* @dev: device to free gpio for
* @gpio: gpio to free
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index 5ca4098ba092..e4cc7eb69bb2 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -328,17 +328,7 @@ static struct pci_driver bt8xxgpio_pci_driver = {
.resume = bt8xxgpio_resume,
};
-static int __init bt8xxgpio_init(void)
-{
- return pci_register_driver(&bt8xxgpio_pci_driver);
-}
-module_init(bt8xxgpio_init)
-
-static void __exit bt8xxgpio_exit(void)
-{
- pci_unregister_driver(&bt8xxgpio_pci_driver);
-}
-module_exit(bt8xxgpio_exit)
+module_pci_driver(bt8xxgpio_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Buesch");
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 776b772523e5..9fe5b8fe9be8 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -325,7 +325,7 @@ static int ep93xx_gpio_add_bank(struct bgpio_chip *bgc, struct device *dev,
void __iomem *dir = mmio_base + bank->dir;
int err;
- err = bgpio_init(bgc, dev, 1, data, NULL, NULL, dir, NULL, false);
+ err = bgpio_init(bgc, dev, 1, data, NULL, NULL, dir, NULL, 0);
if (err)
return err;
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index e38dd0c31973..82e2e4fe599e 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -364,7 +364,7 @@ EXPORT_SYMBOL_GPL(bgpio_remove);
int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
unsigned long sz, void __iomem *dat, void __iomem *set,
void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
- bool big_endian)
+ unsigned long flags)
{
int ret;
@@ -385,7 +385,7 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
if (ret)
return ret;
- ret = bgpio_setup_accessors(dev, bgc, big_endian);
+ ret = bgpio_setup_accessors(dev, bgc, flags & BGPIOF_BIG_ENDIAN);
if (ret)
return ret;
@@ -394,6 +394,11 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
return ret;
bgc->data = bgc->read_reg(bgc->reg_dat);
+ if (bgc->gc.set == bgpio_set_set &&
+ !(flags & BGPIOF_UNREADABLE_REG_SET))
+ bgc->data = bgc->read_reg(bgc->reg_set);
+ if (bgc->reg_dir && !(flags & BGPIOF_UNREADABLE_REG_DIR))
+ bgc->dir = bgc->read_reg(bgc->reg_dir);
return ret;
}
@@ -449,7 +454,7 @@ static int __devinit bgpio_pdev_probe(struct platform_device *pdev)
void __iomem *dirout;
void __iomem *dirin;
unsigned long sz;
- bool be;
+ unsigned long flags = 0;
int err;
struct bgpio_chip *bgc;
struct bgpio_pdata *pdata = dev_get_platdata(dev);
@@ -480,13 +485,14 @@ static int __devinit bgpio_pdev_probe(struct platform_device *pdev)
if (err)
return err;
- be = !strcmp(platform_get_device_id(pdev)->name, "basic-mmio-gpio-be");
+ if (!strcmp(platform_get_device_id(pdev)->name, "basic-mmio-gpio-be"))
+ flags |= BGPIOF_BIG_ENDIAN;
bgc = devm_kzalloc(&pdev->dev, sizeof(*bgc), GFP_KERNEL);
if (!bgc)
return -ENOMEM;
- err = bgpio_init(bgc, dev, sz, dat, set, clr, dirout, dirin, be);
+ err = bgpio_init(bgc, dev, sz, dat, set, clr, dirout, dirin, flags);
if (err)
return err;
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
new file mode 100644
index 000000000000..b7c06517403d
--- /dev/null
+++ b/drivers/gpio/gpio-ich.c
@@ -0,0 +1,419 @@
+/*
+ * Intel ICH6-10, Series 5 and 6 GPIO driver
+ *
+ * Copyright (C) 2010 Extreme Engineering Solutions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/lpc_ich.h>
+
+#define DRV_NAME "gpio_ich"
+
+/*
+ * GPIO register offsets in GPIO I/O space.
+ * Each chunk of 32 GPIOs is manipulated via its own USE_SELx, IO_SELx, and
+ * LVLx registers. Logic in the read/write functions takes a register and
+ * an absolute bit number and determines the proper register offset and bit
+ * number in that register. For example, to read the value of GPIO bit 50
+ * the code would access offset ichx_regs[2(=GPIO_LVL)][1(=50/32)],
+ * bit 18 (50%32).
+ */
+enum GPIO_REG {
+ GPIO_USE_SEL = 0,
+ GPIO_IO_SEL,
+ GPIO_LVL,
+};
+
+static const u8 ichx_regs[3][3] = {
+ {0x00, 0x30, 0x40}, /* USE_SEL[1-3] offsets */
+ {0x04, 0x34, 0x44}, /* IO_SEL[1-3] offsets */
+ {0x0c, 0x38, 0x48}, /* LVL[1-3] offsets */
+};
+
+#define ICHX_WRITE(val, reg, base_res) outl(val, (reg) + (base_res)->start)
+#define ICHX_READ(reg, base_res) inl((reg) + (base_res)->start)
+
+struct ichx_desc {
+ /* Max GPIO pins the chipset can have */
+ uint ngpio;
+
+ /* Whether the chipset has GPIO in GPE0_STS in the PM IO region */
+ bool uses_gpe0;
+
+ /* USE_SEL is bogus on some chipsets, eg 3100 */
+ u32 use_sel_ignore[3];
+
+ /* Some chipsets have quirks, let these use their own request/get */
+ int (*request)(struct gpio_chip *chip, unsigned offset);
+ int (*get)(struct gpio_chip *chip, unsigned offset);
+};
+
+static struct {
+ spinlock_t lock;
+ struct platform_device *dev;
+ struct gpio_chip chip;
+ struct resource *gpio_base; /* GPIO IO base */
+ struct resource *pm_base; /* Power Mangagment IO base */
+ struct ichx_desc *desc; /* Pointer to chipset-specific description */
+ u32 orig_gpio_ctrl; /* Orig CTRL value, used to restore on exit */
+} ichx_priv;
+
+static int modparam_gpiobase = -1; /* dynamic */
+module_param_named(gpiobase, modparam_gpiobase, int, 0444);
+MODULE_PARM_DESC(gpiobase, "The GPIO number base. -1 means dynamic, "
+ "which is the default.");
+
+static int ichx_write_bit(int reg, unsigned nr, int val, int verify)
+{
+ unsigned long flags;
+ u32 data, tmp;
+ int reg_nr = nr / 32;
+ int bit = nr & 0x1f;
+ int ret = 0;
+
+ spin_lock_irqsave(&ichx_priv.lock, flags);
+
+ data = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+ if (val)
+ data |= 1 << bit;
+ else
+ data &= ~(1 << bit);
+ ICHX_WRITE(data, ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+ tmp = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+ if (verify && data != tmp)
+ ret = -EPERM;
+
+ spin_unlock_irqrestore(&ichx_priv.lock, flags);
+
+ return ret;
+}
+
+static int ichx_read_bit(int reg, unsigned nr)
+{
+ unsigned long flags;
+ u32 data;
+ int reg_nr = nr / 32;
+ int bit = nr & 0x1f;
+
+ spin_lock_irqsave(&ichx_priv.lock, flags);
+
+ data = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+
+ spin_unlock_irqrestore(&ichx_priv.lock, flags);
+
+ return data & (1 << bit) ? 1 : 0;
+}
+
+static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+ /*
+ * Try setting pin as an input and verify it worked since many pins
+ * are output-only.
+ */
+ if (ichx_write_bit(GPIO_IO_SEL, nr, 1, 1))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ichx_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+ int val)
+{
+ /* Set GPIO output value. */
+ ichx_write_bit(GPIO_LVL, nr, val, 0);
+
+ /*
+ * Try setting pin as an output and verify it worked since many pins
+ * are input-only.
+ */
+ if (ichx_write_bit(GPIO_IO_SEL, nr, 0, 1))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ichx_gpio_get(struct gpio_chip *chip, unsigned nr)
+{
+ return ichx_read_bit(GPIO_LVL, nr);
+}
+
+static int ich6_gpio_get(struct gpio_chip *chip, unsigned nr)
+{
+ unsigned long flags;
+ u32 data;
+
+ /*
+ * GPI 0 - 15 need to be read from the power management registers on
+ * a ICH6/3100 bridge.
+ */
+ if (nr < 16) {
+ if (!ichx_priv.pm_base)
+ return -ENXIO;
+
+ spin_lock_irqsave(&ichx_priv.lock, flags);
+
+ /* GPI 0 - 15 are latched, write 1 to clear*/
+ ICHX_WRITE(1 << (16 + nr), 0, ichx_priv.pm_base);
+ data = ICHX_READ(0, ichx_priv.pm_base);
+
+ spin_unlock_irqrestore(&ichx_priv.lock, flags);
+
+ return (data >> 16) & (1 << nr) ? 1 : 0;
+ } else {
+ return ichx_gpio_get(chip, nr);
+ }
+}
+
+static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr)
+{
+ /*
+ * Note we assume the BIOS properly set a bridge's USE value. Some
+ * chips (eg Intel 3100) have bogus USE values though, so first see if
+ * the chipset's USE value can be trusted for this specific bit.
+ * If it can't be trusted, assume that the pin can be used as a GPIO.
+ */
+ if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f)))
+ return 1;
+
+ return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV;
+}
+
+static int ich6_gpio_request(struct gpio_chip *chip, unsigned nr)
+{
+ /*
+ * Fixups for bits 16 and 17 are necessary on the Intel ICH6/3100
+ * bridge as they are controlled by USE register bits 0 and 1. See
+ * "Table 704 GPIO_USE_SEL1 register" in the i3100 datasheet for
+ * additional info.
+ */
+ if (nr == 16 || nr == 17)
+ nr -= 16;
+
+ return ichx_gpio_request(chip, nr);
+}
+
+static void ichx_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
+{
+ ichx_write_bit(GPIO_LVL, nr, val, 0);
+}
+
+static void __devinit ichx_gpiolib_setup(struct gpio_chip *chip)
+{
+ chip->owner = THIS_MODULE;
+ chip->label = DRV_NAME;
+ chip->dev = &ichx_priv.dev->dev;
+
+ /* Allow chip-specific overrides of request()/get() */
+ chip->request = ichx_priv.desc->request ?
+ ichx_priv.desc->request : ichx_gpio_request;
+ chip->get = ichx_priv.desc->get ?
+ ichx_priv.desc->get : ichx_gpio_get;
+
+ chip->set = ichx_gpio_set;
+ chip->direction_input = ichx_gpio_direction_input;
+ chip->direction_output = ichx_gpio_direction_output;
+ chip->base = modparam_gpiobase;
+ chip->ngpio = ichx_priv.desc->ngpio;
+ chip->can_sleep = 0;
+ chip->dbg_show = NULL;
+}
+
+/* ICH6-based, 631xesb-based */
+static struct ichx_desc ich6_desc = {
+ /* Bridges using the ICH6 controller need fixups for GPIO 0 - 17 */
+ .request = ich6_gpio_request,
+ .get = ich6_gpio_get,
+
+ /* GPIO 0-15 are read in the GPE0_STS PM register */
+ .uses_gpe0 = true,
+
+ .ngpio = 50,
+};
+
+/* Intel 3100 */
+static struct ichx_desc i3100_desc = {
+ /*
+ * Bits 16,17, 20 of USE_SEL and bit 16 of USE_SEL2 always read 0 on
+ * the Intel 3100. See "Table 712. GPIO Summary Table" of 3100
+ * Datasheet for more info.
+ */
+ .use_sel_ignore = {0x00130000, 0x00010000, 0x0},
+
+ /* The 3100 needs fixups for GPIO 0 - 17 */
+ .request = ich6_gpio_request,
+ .get = ich6_gpio_get,
+
+ /* GPIO 0-15 are read in the GPE0_STS PM register */
+ .uses_gpe0 = true,
+
+ .ngpio = 50,
+};
+
+/* ICH7 and ICH8-based */
+static struct ichx_desc ich7_desc = {
+ .ngpio = 50,
+};
+
+/* ICH9-based */
+static struct ichx_desc ich9_desc = {
+ .ngpio = 61,
+};
+
+/* ICH10-based - Consumer/corporate versions have different amount of GPIO */
+static struct ichx_desc ich10_cons_desc = {
+ .ngpio = 61,
+};
+static struct ichx_desc ich10_corp_desc = {
+ .ngpio = 72,
+};
+
+/* Intel 5 series, 6 series, 3400 series, and C200 series */
+static struct ichx_desc intel5_desc = {
+ .ngpio = 76,
+};
+
+static int __devinit ichx_gpio_probe(struct platform_device *pdev)
+{
+ struct resource *res_base, *res_pm;
+ int err;
+ struct lpc_ich_info *ich_info = pdev->dev.platform_data;
+
+ if (!ich_info)
+ return -ENODEV;
+
+ ichx_priv.dev = pdev;
+
+ switch (ich_info->gpio_version) {
+ case ICH_I3100_GPIO:
+ ichx_priv.desc = &i3100_desc;
+ break;
+ case ICH_V5_GPIO:
+ ichx_priv.desc = &intel5_desc;
+ break;
+ case ICH_V6_GPIO:
+ ichx_priv.desc = &ich6_desc;
+ break;
+ case ICH_V7_GPIO:
+ ichx_priv.desc = &ich7_desc;
+ break;
+ case ICH_V9_GPIO:
+ ichx_priv.desc = &ich9_desc;
+ break;
+ case ICH_V10CORP_GPIO:
+ ichx_priv.desc = &ich10_corp_desc;
+ break;
+ case ICH_V10CONS_GPIO:
+ ichx_priv.desc = &ich10_cons_desc;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ res_base = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPIO);
+ if (!res_base || !res_base->start || !res_base->end)
+ return -ENODEV;
+
+ if (!request_region(res_base->start, resource_size(res_base),
+ pdev->name))
+ return -EBUSY;
+
+ ichx_priv.gpio_base = res_base;
+
+ /*
+ * If necessary, determine the I/O address of ACPI/power management
+ * registers which are needed to read the the GPE0 register for GPI pins
+ * 0 - 15 on some chipsets.
+ */
+ if (!ichx_priv.desc->uses_gpe0)
+ goto init;
+
+ res_pm = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPE0);
+ if (!res_pm) {
+ pr_warn("ACPI BAR is unavailable, GPI 0 - 15 unavailable\n");
+ goto init;
+ }
+
+ if (!request_region(res_pm->start, resource_size(res_pm),
+ pdev->name)) {
+ pr_warn("ACPI BAR is busy, GPI 0 - 15 unavailable\n");
+ goto init;
+ }
+
+ ichx_priv.pm_base = res_pm;
+
+init:
+ ichx_gpiolib_setup(&ichx_priv.chip);
+ err = gpiochip_add(&ichx_priv.chip);
+ if (err) {
+ pr_err("Failed to register GPIOs\n");
+ goto add_err;
+ }
+
+ pr_info("GPIO from %d to %d on %s\n", ichx_priv.chip.base,
+ ichx_priv.chip.base + ichx_priv.chip.ngpio - 1, DRV_NAME);
+
+ return 0;
+
+add_err:
+ release_region(ichx_priv.gpio_base->start,
+ resource_size(ichx_priv.gpio_base));
+ if (ichx_priv.pm_base)
+ release_region(ichx_priv.pm_base->start,
+ resource_size(ichx_priv.pm_base));
+ return err;
+}
+
+static int __devexit ichx_gpio_remove(struct platform_device *pdev)
+{
+ int err;
+
+ err = gpiochip_remove(&ichx_priv.chip);
+ if (err) {
+ dev_err(&pdev->dev, "%s failed, %d\n",
+ "gpiochip_remove()", err);
+ return err;
+ }
+
+ release_region(ichx_priv.gpio_base->start,
+ resource_size(ichx_priv.gpio_base));
+ if (ichx_priv.pm_base)
+ release_region(ichx_priv.pm_base->start,
+ resource_size(ichx_priv.pm_base));
+
+ return 0;
+}
+
+static struct platform_driver ichx_gpio_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ },
+ .probe = ichx_gpio_probe,
+ .remove = __devexit_p(ichx_gpio_remove),
+};
+
+module_platform_driver(ichx_gpio_driver);
+
+MODULE_AUTHOR("Peter Tyser <ptyser@xes-inc.com>");
+MODULE_DESCRIPTION("GPIO interface for Intel ICH series");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:"DRV_NAME);
diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c
index 00692e89ef87..a1c8754f52cf 100644
--- a/drivers/gpio/gpio-langwell.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -36,6 +36,7 @@
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/irqdomain.h>
/*
* Langwell chip has 64 pins and thus there are 2 32bit registers to control
@@ -66,8 +67,8 @@ struct lnw_gpio {
struct gpio_chip chip;
void *reg_base;
spinlock_t lock;
- unsigned irq_base;
struct pci_dev *pdev;
+ struct irq_domain *domain;
};
static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
@@ -176,13 +177,13 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip,
static int lnw_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
- return lnw->irq_base + offset;
+ return irq_create_mapping(lnw->domain, offset);
}
static int lnw_irq_type(struct irq_data *d, unsigned type)
{
struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
- u32 gpio = d->irq - lnw->irq_base;
+ u32 gpio = irqd_to_hwirq(d);
unsigned long flags;
u32 value;
void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
@@ -249,20 +250,55 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
/* check GPIO controller to check which pin triggered the interrupt */
for (base = 0; base < lnw->chip.ngpio; base += 32) {
gedr = gpio_reg(&lnw->chip, base, GEDR);
- pending = readl(gedr);
- while (pending) {
+ while ((pending = readl(gedr))) {
gpio = __ffs(pending);
mask = BIT(gpio);
- pending &= ~mask;
/* Clear before handling so we can't lose an edge */
writel(mask, gedr);
- generic_handle_irq(lnw->irq_base + base + gpio);
+ generic_handle_irq(irq_find_mapping(lnw->domain,
+ base + gpio));
}
}
chip->irq_eoi(data);
}
+static void lnw_irq_init_hw(struct lnw_gpio *lnw)
+{
+ void __iomem *reg;
+ unsigned base;
+
+ for (base = 0; base < lnw->chip.ngpio; base += 32) {
+ /* Clear the rising-edge detect register */
+ reg = gpio_reg(&lnw->chip, base, GRER);
+ writel(0, reg);
+ /* Clear the falling-edge detect register */
+ reg = gpio_reg(&lnw->chip, base, GFER);
+ writel(0, reg);
+ /* Clear the edge detect status register */
+ reg = gpio_reg(&lnw->chip, base, GEDR);
+ writel(~0, reg);
+ }
+}
+
+static int lnw_gpio_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct lnw_gpio *lnw = d->host_data;
+
+ irq_set_chip_and_handler_name(virq, &lnw_irqchip, handle_simple_irq,
+ "demux");
+ irq_set_chip_data(virq, lnw);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static const struct irq_domain_ops lnw_gpio_irq_ops = {
+ .map = lnw_gpio_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
#ifdef CONFIG_PM
static int lnw_gpio_runtime_resume(struct device *dev)
{
@@ -300,23 +336,22 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
void *base;
- int i;
resource_size_t start, len;
struct lnw_gpio *lnw;
- u32 irq_base;
u32 gpio_base;
int retval = 0;
+ int ngpio = id->driver_data;
retval = pci_enable_device(pdev);
if (retval)
- goto done;
+ return retval;
retval = pci_request_regions(pdev, "langwell_gpio");
if (retval) {
dev_err(&pdev->dev, "error requesting resources\n");
goto err2;
}
- /* get the irq_base from bar1 */
+ /* get the gpio_base from bar1 */
start = pci_resource_start(pdev, 1);
len = pci_resource_len(pdev, 1);
base = ioremap_nocache(start, len);
@@ -324,28 +359,32 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "error mapping bar1\n");
goto err3;
}
- irq_base = *(u32 *)base;
gpio_base = *((u32 *)base + 1);
/* release the IO mapping, since we already get the info from bar1 */
iounmap(base);
/* get the register base from bar0 */
start = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
- base = ioremap_nocache(start, len);
+ base = devm_ioremap_nocache(&pdev->dev, start, len);
if (!base) {
dev_err(&pdev->dev, "error mapping bar0\n");
retval = -EFAULT;
goto err3;
}
- lnw = kzalloc(sizeof(struct lnw_gpio), GFP_KERNEL);
+ lnw = devm_kzalloc(&pdev->dev, sizeof(struct lnw_gpio), GFP_KERNEL);
if (!lnw) {
dev_err(&pdev->dev, "can't allocate langwell_gpio chip data\n");
retval = -ENOMEM;
- goto err4;
+ goto err3;
}
+
+ lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
+ &lnw_gpio_irq_ops, lnw);
+ if (!lnw->domain)
+ goto err3;
+
lnw->reg_base = base;
- lnw->irq_base = irq_base;
lnw->chip.label = dev_name(&pdev->dev);
lnw->chip.request = lnw_gpio_request;
lnw->chip.direction_input = lnw_gpio_direction_input;
@@ -354,38 +393,32 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
lnw->chip.set = lnw_gpio_set;
lnw->chip.to_irq = lnw_gpio_to_irq;
lnw->chip.base = gpio_base;
- lnw->chip.ngpio = id->driver_data;
+ lnw->chip.ngpio = ngpio;
lnw->chip.can_sleep = 0;
lnw->pdev = pdev;
pci_set_drvdata(pdev, lnw);
retval = gpiochip_add(&lnw->chip);
if (retval) {
dev_err(&pdev->dev, "langwell gpiochip_add error %d\n", retval);
- goto err5;
+ goto err3;
}
+
+ lnw_irq_init_hw(lnw);
+
irq_set_handler_data(pdev->irq, lnw);
irq_set_chained_handler(pdev->irq, lnw_irq_handler);
- for (i = 0; i < lnw->chip.ngpio; i++) {
- irq_set_chip_and_handler_name(i + lnw->irq_base, &lnw_irqchip,
- handle_simple_irq, "demux");
- irq_set_chip_data(i + lnw->irq_base, lnw);
- }
spin_lock_init(&lnw->lock);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
- goto done;
-err5:
- kfree(lnw);
-err4:
- iounmap(base);
+ return 0;
+
err3:
pci_release_regions(pdev);
err2:
pci_disable_device(pdev);
-done:
return retval;
}
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 61c2d08d37b6..c2199beca98a 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -21,6 +21,9 @@
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
#include <mach/hardware.h>
#include <mach/platform.h>
@@ -454,10 +457,57 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
},
};
+/* Empty now, can be removed later when mach-lpc32xx is finally switched over
+ * to DT support
+ */
void __init lpc32xx_gpio_init(void)
{
+}
+
+static int lpc32xx_of_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec, u32 *flags)
+{
+ /* Is this the correct bank? */
+ u32 bank = gpiospec->args[0];
+ if ((bank > ARRAY_SIZE(lpc32xx_gpiochip) ||
+ (gc != &lpc32xx_gpiochip[bank].chip)))
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpiospec->args[2];
+ return gpiospec->args[1];
+}
+
+static int __devinit lpc32xx_gpio_probe(struct platform_device *pdev)
+{
int i;
- for (i = 0; i < ARRAY_SIZE(lpc32xx_gpiochip); i++)
+ for (i = 0; i < ARRAY_SIZE(lpc32xx_gpiochip); i++) {
+ if (pdev->dev.of_node) {
+ lpc32xx_gpiochip[i].chip.of_xlate = lpc32xx_of_xlate;
+ lpc32xx_gpiochip[i].chip.of_gpio_n_cells = 3;
+ lpc32xx_gpiochip[i].chip.of_node = pdev->dev.of_node;
+ }
gpiochip_add(&lpc32xx_gpiochip[i].chip);
+ }
+
+ return 0;
}
+
+#ifdef CONFIG_OF
+static struct of_device_id lpc32xx_gpio_of_match[] __devinitdata = {
+ { .compatible = "nxp,lpc3220-gpio", },
+ { },
+};
+#endif
+
+static struct platform_driver lpc32xx_gpio_driver = {
+ .driver = {
+ .name = "lpc32xx-gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(lpc32xx_gpio_of_match),
+ },
+ .probe = lpc32xx_gpio_probe,
+};
+
+module_platform_driver(lpc32xx_gpio_driver);
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index c5d83a8a91c2..0f425189de11 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -353,7 +353,7 @@ static void mcp23s08_dbg_show(struct seq_file *s, struct gpio_chip *chip)
chip->base + t, bank, t, label,
(mcp->cache[MCP_IODIR] & mask) ? "in " : "out",
(mcp->cache[MCP_GPIO] & mask) ? "hi" : "lo",
- (mcp->cache[MCP_GPPU] & mask) ? " " : "up");
+ (mcp->cache[MCP_GPPU] & mask) ? "up" : " ");
/* NOTE: ignoring the irq-related registers */
seq_printf(s, "\n");
}
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index f0febe5b8221..db01f151d41c 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -611,17 +611,7 @@ static struct pci_driver ioh_gpio_driver = {
.resume = ioh_gpio_resume
};
-static int __init ioh_gpio_pci_init(void)
-{
- return pci_register_driver(&ioh_gpio_driver);
-}
-module_init(ioh_gpio_pci_init);
-
-static void __exit ioh_gpio_pci_exit(void)
-{
- pci_unregister_driver(&ioh_gpio_driver);
-}
-module_exit(ioh_gpio_pci_exit);
+module_pci_driver(ioh_gpio_driver);
MODULE_DESCRIPTION("OKI SEMICONDUCTOR ML-IOH series GPIO Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
new file mode 100644
index 000000000000..2983dfbd0668
--- /dev/null
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -0,0 +1,158 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <lantiq_soc.h>
+
+/*
+ * By attaching hardware latches to the EBU it is possible to create output
+ * only gpios. This driver configures a special memory address, which when
+ * written to outputs 16 bit to the latches.
+ */
+
+#define LTQ_EBU_BUSCON 0x1e7ff /* 16 bit access, slowest timing */
+#define LTQ_EBU_WP 0x80000000 /* write protect bit */
+
+struct ltq_mm {
+ struct of_mm_gpio_chip mmchip;
+ u16 shadow; /* shadow the latches state */
+};
+
+/**
+ * ltq_mm_apply() - write the shadow value to the ebu address.
+ * @chip: Pointer to our private data structure.
+ *
+ * Write the shadow value to the EBU to set the gpios. We need to set the
+ * global EBU lock to make sure that PCI/MTD dont break.
+ */
+static void ltq_mm_apply(struct ltq_mm *chip)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+ ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1);
+ __raw_writew(chip->shadow, chip->mmchip.regs);
+ ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
+ spin_unlock_irqrestore(&ebu_lock, flags);
+}
+
+/**
+ * ltq_mm_set() - gpio_chip->set - set gpios.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ * @val: Value to be written to specified signal.
+ *
+ * Set the shadow value and call ltq_mm_apply.
+ */
+static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct ltq_mm *chip =
+ container_of(mm_gc, struct ltq_mm, mmchip);
+
+ if (value)
+ chip->shadow |= (1 << offset);
+ else
+ chip->shadow &= ~(1 << offset);
+ ltq_mm_apply(chip);
+}
+
+/**
+ * ltq_mm_dir_out() - gpio_chip->dir_out - set gpio direction.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ * @val: Value to be written to specified signal.
+ *
+ * Same as ltq_mm_set, always returns 0.
+ */
+static int ltq_mm_dir_out(struct gpio_chip *gc, unsigned offset, int value)
+{
+ ltq_mm_set(gc, offset, value);
+
+ return 0;
+}
+
+/**
+ * ltq_mm_save_regs() - Set initial values of GPIO pins
+ * @mm_gc: pointer to memory mapped GPIO chip structure
+ */
+static void ltq_mm_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+ struct ltq_mm *chip =
+ container_of(mm_gc, struct ltq_mm, mmchip);
+
+ /* tell the ebu controller which memory address we will be using */
+ ltq_ebu_w32(CPHYSADDR(chip->mmchip.regs) | 0x1, LTQ_EBU_ADDRSEL1);
+
+ ltq_mm_apply(chip);
+}
+
+static int ltq_mm_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct ltq_mm *chip;
+ const __be32 *shadow;
+ int ret = 0;
+
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get memory resource\n");
+ return -ENOENT;
+ }
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->mmchip.gc.ngpio = 16;
+ chip->mmchip.gc.label = "gpio-mm-ltq";
+ chip->mmchip.gc.direction_output = ltq_mm_dir_out;
+ chip->mmchip.gc.set = ltq_mm_set;
+ chip->mmchip.save_regs = ltq_mm_save_regs;
+
+ /* store the shadow value if one was passed by the devicetree */
+ shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL);
+ if (shadow)
+ chip->shadow = be32_to_cpu(*shadow);
+
+ ret = of_mm_gpiochip_add(pdev->dev.of_node, &chip->mmchip);
+ if (ret)
+ kfree(chip);
+ return ret;
+}
+
+static const struct of_device_id ltq_mm_match[] = {
+ { .compatible = "lantiq,gpio-mm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_mm_match);
+
+static struct platform_driver ltq_mm_driver = {
+ .probe = ltq_mm_probe,
+ .driver = {
+ .name = "gpio-mm-ltq",
+ .owner = THIS_MODULE,
+ .of_match_table = ltq_mm_match,
+ },
+};
+
+static int __init ltq_mm_init(void)
+{
+ return platform_driver_register(&ltq_mm_driver);
+}
+
+subsys_initcall(ltq_mm_init);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index e6568c19c939..5a1817eedd1b 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -163,7 +163,8 @@ static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc)
if (mask)
generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq,
32 - ffs(mask)));
- chip->irq_eoi(&desc->irq_data);
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
}
static void mpc8xxx_irq_unmask(struct irq_data *d)
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
new file mode 100644
index 000000000000..71a838f44501
--- /dev/null
+++ b/drivers/gpio/gpio-msic.c
@@ -0,0 +1,339 @@
+/*
+ * Intel Medfield MSIC GPIO driver>
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * Author: Mathias Nyman <mathias.nyman@linux.intel.com>
+ * Based on intel_pmic_gpio.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_msic.h>
+
+/* the offset for the mapping of global gpio pin to irq */
+#define MSIC_GPIO_IRQ_OFFSET 0x100
+
+#define MSIC_GPIO_DIR_IN 0
+#define MSIC_GPIO_DIR_OUT BIT(5)
+#define MSIC_GPIO_TRIG_FALL BIT(1)
+#define MSIC_GPIO_TRIG_RISE BIT(2)
+
+/* masks for msic gpio output GPIOxxxxCTLO registers */
+#define MSIC_GPIO_DIR_MASK BIT(5)
+#define MSIC_GPIO_DRV_MASK BIT(4)
+#define MSIC_GPIO_REN_MASK BIT(3)
+#define MSIC_GPIO_RVAL_MASK (BIT(2) | BIT(1))
+#define MSIC_GPIO_DOUT_MASK BIT(0)
+
+/* masks for msic gpio input GPIOxxxxCTLI registers */
+#define MSIC_GPIO_GLBYP_MASK BIT(5)
+#define MSIC_GPIO_DBNC_MASK (BIT(4) | BIT(3))
+#define MSIC_GPIO_INTCNT_MASK (BIT(2) | BIT(1))
+#define MSIC_GPIO_DIN_MASK BIT(0)
+
+#define MSIC_NUM_GPIO 24
+
+struct msic_gpio {
+ struct platform_device *pdev;
+ struct mutex buslock;
+ struct gpio_chip chip;
+ int irq;
+ unsigned irq_base;
+ unsigned long trig_change_mask;
+ unsigned trig_type;
+};
+
+/*
+ * MSIC has 24 gpios, 16 low voltage (1.2-1.8v) and 8 high voltage (3v).
+ * Both the high and low voltage gpios are divided in two banks.
+ * GPIOs are numbered with GPIO0LV0 as gpio_base in the following order:
+ * GPIO0LV0..GPIO0LV7: low voltage, bank 0, gpio_base
+ * GPIO1LV0..GPIO1LV7: low voltage, bank 1, gpio_base + 8
+ * GPIO0HV0..GPIO0HV3: high voltage, bank 0, gpio_base + 16
+ * GPIO1HV0..GPIO1HV3: high voltage, bank 1, gpio_base + 20
+ */
+
+static int msic_gpio_to_ireg(unsigned offset)
+{
+ if (offset >= MSIC_NUM_GPIO)
+ return -EINVAL;
+
+ if (offset < 8)
+ return INTEL_MSIC_GPIO0LV0CTLI - offset;
+ if (offset < 16)
+ return INTEL_MSIC_GPIO1LV0CTLI - offset + 8;
+ if (offset < 20)
+ return INTEL_MSIC_GPIO0HV0CTLI - offset + 16;
+
+ return INTEL_MSIC_GPIO1HV0CTLI - offset + 20;
+}
+
+static int msic_gpio_to_oreg(unsigned offset)
+{
+ if (offset >= MSIC_NUM_GPIO)
+ return -EINVAL;
+
+ if (offset < 8)
+ return INTEL_MSIC_GPIO0LV0CTLO - offset;
+ if (offset < 16)
+ return INTEL_MSIC_GPIO1LV0CTLO - offset + 8;
+ if (offset < 20)
+ return INTEL_MSIC_GPIO0HV0CTLO - offset + 16;
+
+ return INTEL_MSIC_GPIO1HV0CTLO + offset + 20;
+}
+
+static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ int reg;
+
+ reg = msic_gpio_to_oreg(offset);
+ if (reg < 0)
+ return reg;
+
+ return intel_msic_reg_update(reg, MSIC_GPIO_DIR_IN, MSIC_GPIO_DIR_MASK);
+}
+
+static int msic_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int reg;
+ unsigned mask;
+
+ value = (!!value) | MSIC_GPIO_DIR_OUT;
+ mask = MSIC_GPIO_DIR_MASK | MSIC_GPIO_DOUT_MASK;
+
+ reg = msic_gpio_to_oreg(offset);
+ if (reg < 0)
+ return reg;
+
+ return intel_msic_reg_update(reg, value, mask);
+}
+
+static int msic_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ u8 r;
+ int ret;
+ int reg;
+
+ reg = msic_gpio_to_ireg(offset);
+ if (reg < 0)
+ return reg;
+
+ ret = intel_msic_reg_read(reg, &r);
+ if (ret < 0)
+ return ret;
+
+ return r & MSIC_GPIO_DIN_MASK;
+}
+
+static void msic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ int reg;
+
+ reg = msic_gpio_to_oreg(offset);
+ if (reg < 0)
+ return;
+
+ intel_msic_reg_update(reg, !!value , MSIC_GPIO_DOUT_MASK);
+}
+
+/*
+ * This is called from genirq with mg->buslock locked and
+ * irq_desc->lock held. We can not access the scu bus here, so we
+ * store the change and update in the bus_sync_unlock() function below
+ */
+static int msic_irq_type(struct irq_data *data, unsigned type)
+{
+ struct msic_gpio *mg = irq_data_get_irq_chip_data(data);
+ u32 gpio = data->irq - mg->irq_base;
+
+ if (gpio >= mg->chip.ngpio)
+ return -EINVAL;
+
+ /* mark for which gpio the trigger changed, protected by buslock */
+ mg->trig_change_mask |= (1 << gpio);
+ mg->trig_type = type;
+
+ return 0;
+}
+
+static int msic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct msic_gpio *mg = container_of(chip, struct msic_gpio, chip);
+ return mg->irq_base + offset;
+}
+
+static void msic_bus_lock(struct irq_data *data)
+{
+ struct msic_gpio *mg = irq_data_get_irq_chip_data(data);
+ mutex_lock(&mg->buslock);
+}
+
+static void msic_bus_sync_unlock(struct irq_data *data)
+{
+ struct msic_gpio *mg = irq_data_get_irq_chip_data(data);
+ int offset;
+ int reg;
+ u8 trig = 0;
+
+ /* We can only get one change at a time as the buslock covers the
+ entire transaction. The irq_desc->lock is dropped before we are
+ called but that is fine */
+ if (mg->trig_change_mask) {
+ offset = __ffs(mg->trig_change_mask);
+
+ reg = msic_gpio_to_ireg(offset);
+ if (reg < 0)
+ goto out;
+
+ if (mg->trig_type & IRQ_TYPE_EDGE_RISING)
+ trig |= MSIC_GPIO_TRIG_RISE;
+ if (mg->trig_type & IRQ_TYPE_EDGE_FALLING)
+ trig |= MSIC_GPIO_TRIG_FALL;
+
+ intel_msic_reg_update(reg, trig, MSIC_GPIO_INTCNT_MASK);
+ mg->trig_change_mask = 0;
+ }
+out:
+ mutex_unlock(&mg->buslock);
+}
+
+/* Firmware does all the masking and unmasking for us, no masking here. */
+static void msic_irq_unmask(struct irq_data *data) { }
+
+static void msic_irq_mask(struct irq_data *data) { }
+
+static struct irq_chip msic_irqchip = {
+ .name = "MSIC-GPIO",
+ .irq_mask = msic_irq_mask,
+ .irq_unmask = msic_irq_unmask,
+ .irq_set_type = msic_irq_type,
+ .irq_bus_lock = msic_bus_lock,
+ .irq_bus_sync_unlock = msic_bus_sync_unlock,
+};
+
+static void msic_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct irq_data *data = irq_desc_get_irq_data(desc);
+ struct msic_gpio *mg = irq_data_get_irq_handler_data(data);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ struct intel_msic *msic = pdev_to_intel_msic(mg->pdev);
+ int i;
+ int bitnr;
+ u8 pin;
+ unsigned long pending = 0;
+
+ for (i = 0; i < (mg->chip.ngpio / BITS_PER_BYTE); i++) {
+ intel_msic_irq_read(msic, INTEL_MSIC_GPIO0LVIRQ + i, &pin);
+ pending = pin;
+
+ if (pending) {
+ for_each_set_bit(bitnr, &pending, BITS_PER_BYTE)
+ generic_handle_irq(mg->irq_base +
+ (i * BITS_PER_BYTE) + bitnr);
+ }
+ }
+ chip->irq_eoi(data);
+}
+
+static int __devinit platform_msic_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_msic_gpio_pdata *pdata = dev->platform_data;
+ struct msic_gpio *mg;
+ int irq = platform_get_irq(pdev, 0);
+ int retval;
+ int i;
+
+ if (irq < 0) {
+ dev_err(dev, "no IRQ line\n");
+ return -EINVAL;
+ }
+
+ if (!pdata || !pdata->gpio_base) {
+ dev_err(dev, "incorrect or missing platform data\n");
+ return -EINVAL;
+ }
+
+ mg = kzalloc(sizeof(*mg), GFP_KERNEL);
+ if (!mg)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, mg);
+
+ mg->pdev = pdev;
+ mg->irq = irq;
+ mg->irq_base = pdata->gpio_base + MSIC_GPIO_IRQ_OFFSET;
+ mg->chip.label = "msic_gpio";
+ mg->chip.direction_input = msic_gpio_direction_input;
+ mg->chip.direction_output = msic_gpio_direction_output;
+ mg->chip.get = msic_gpio_get;
+ mg->chip.set = msic_gpio_set;
+ mg->chip.to_irq = msic_gpio_to_irq;
+ mg->chip.base = pdata->gpio_base;
+ mg->chip.ngpio = MSIC_NUM_GPIO;
+ mg->chip.can_sleep = 1;
+ mg->chip.dev = dev;
+
+ mutex_init(&mg->buslock);
+
+ retval = gpiochip_add(&mg->chip);
+ if (retval) {
+ dev_err(dev, "Adding MSIC gpio chip failed\n");
+ goto err;
+ }
+
+ for (i = 0; i < mg->chip.ngpio; i++) {
+ irq_set_chip_data(i + mg->irq_base, mg);
+ irq_set_chip_and_handler_name(i + mg->irq_base,
+ &msic_irqchip,
+ handle_simple_irq,
+ "demux");
+ }
+ irq_set_chained_handler(mg->irq, msic_gpio_irq_handler);
+ irq_set_handler_data(mg->irq, mg);
+
+ return 0;
+err:
+ kfree(mg);
+ return retval;
+}
+
+static struct platform_driver platform_msic_gpio_driver = {
+ .driver = {
+ .name = "msic_gpio",
+ .owner = THIS_MODULE,
+ },
+ .probe = platform_msic_gpio_probe,
+};
+
+static int __init platform_msic_gpio_init(void)
+{
+ return platform_driver_register(&platform_msic_gpio_driver);
+}
+
+subsys_initcall(platform_msic_gpio_init);
+
+MODULE_AUTHOR("Mathias Nyman <mathias.nyman@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Medfield MSIC GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index e79147634573..c337143b18f8 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -417,7 +417,7 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
err = bgpio_init(&port->bgc, &pdev->dev, 4,
port->base + GPIO_PSR,
port->base + GPIO_DR, NULL,
- port->base + GPIO_GDIR, NULL, false);
+ port->base + GPIO_GDIR, NULL, 0);
if (err)
goto out_iounmap;
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 385c58e8405b..39e495669961 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -25,23 +25,25 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/basic_mmio_gpio.h>
#include <linux/module.h>
-#include <mach/mxs.h>
#define MXS_SET 0x4
#define MXS_CLR 0x8
-#define PINCTRL_DOUT(n) ((cpu_is_mx23() ? 0x0500 : 0x0700) + (n) * 0x10)
-#define PINCTRL_DIN(n) ((cpu_is_mx23() ? 0x0600 : 0x0900) + (n) * 0x10)
-#define PINCTRL_DOE(n) ((cpu_is_mx23() ? 0x0700 : 0x0b00) + (n) * 0x10)
-#define PINCTRL_PIN2IRQ(n) ((cpu_is_mx23() ? 0x0800 : 0x1000) + (n) * 0x10)
-#define PINCTRL_IRQEN(n) ((cpu_is_mx23() ? 0x0900 : 0x1100) + (n) * 0x10)
-#define PINCTRL_IRQLEV(n) ((cpu_is_mx23() ? 0x0a00 : 0x1200) + (n) * 0x10)
-#define PINCTRL_IRQPOL(n) ((cpu_is_mx23() ? 0x0b00 : 0x1300) + (n) * 0x10)
-#define PINCTRL_IRQSTAT(n) ((cpu_is_mx23() ? 0x0c00 : 0x1400) + (n) * 0x10)
+#define PINCTRL_DOUT(p) ((is_imx23_gpio(p) ? 0x0500 : 0x0700) + (p->id) * 0x10)
+#define PINCTRL_DIN(p) ((is_imx23_gpio(p) ? 0x0600 : 0x0900) + (p->id) * 0x10)
+#define PINCTRL_DOE(p) ((is_imx23_gpio(p) ? 0x0700 : 0x0b00) + (p->id) * 0x10)
+#define PINCTRL_PIN2IRQ(p) ((is_imx23_gpio(p) ? 0x0800 : 0x1000) + (p->id) * 0x10)
+#define PINCTRL_IRQEN(p) ((is_imx23_gpio(p) ? 0x0900 : 0x1100) + (p->id) * 0x10)
+#define PINCTRL_IRQLEV(p) ((is_imx23_gpio(p) ? 0x0a00 : 0x1200) + (p->id) * 0x10)
+#define PINCTRL_IRQPOL(p) ((is_imx23_gpio(p) ? 0x0b00 : 0x1300) + (p->id) * 0x10)
+#define PINCTRL_IRQSTAT(p) ((is_imx23_gpio(p) ? 0x0c00 : 0x1400) + (p->id) * 0x10)
#define GPIO_INT_FALL_EDGE 0x0
#define GPIO_INT_LOW_LEV 0x1
@@ -52,14 +54,30 @@
#define irq_to_gpio(irq) ((irq) - MXS_GPIO_IRQ_START)
+enum mxs_gpio_id {
+ IMX23_GPIO,
+ IMX28_GPIO,
+};
+
struct mxs_gpio_port {
void __iomem *base;
int id;
int irq;
int virtual_irq_start;
struct bgpio_chip bgc;
+ enum mxs_gpio_id devid;
};
+static inline int is_imx23_gpio(struct mxs_gpio_port *port)
+{
+ return port->devid == IMX23_GPIO;
+}
+
+static inline int is_imx28_gpio(struct mxs_gpio_port *port)
+{
+ return port->devid == IMX28_GPIO;
+}
+
/* Note: This driver assumes 32 GPIOs are handled in one register */
static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
@@ -89,21 +107,21 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
}
/* set level or edge */
- pin_addr = port->base + PINCTRL_IRQLEV(port->id);
+ pin_addr = port->base + PINCTRL_IRQLEV(port);
if (edge & GPIO_INT_LEV_MASK)
writel(pin_mask, pin_addr + MXS_SET);
else
writel(pin_mask, pin_addr + MXS_CLR);
/* set polarity */
- pin_addr = port->base + PINCTRL_IRQPOL(port->id);
+ pin_addr = port->base + PINCTRL_IRQPOL(port);
if (edge & GPIO_INT_POL_MASK)
writel(pin_mask, pin_addr + MXS_SET);
else
writel(pin_mask, pin_addr + MXS_CLR);
writel(1 << (gpio & 0x1f),
- port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR);
+ port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
return 0;
}
@@ -117,8 +135,8 @@ static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
desc->irq_data.chip->irq_ack(&desc->irq_data);
- irq_stat = readl(port->base + PINCTRL_IRQSTAT(port->id)) &
- readl(port->base + PINCTRL_IRQEN(port->id));
+ irq_stat = readl(port->base + PINCTRL_IRQSTAT(port)) &
+ readl(port->base + PINCTRL_IRQEN(port));
while (irq_stat != 0) {
int irqoffset = fls(irq_stat) - 1;
@@ -164,8 +182,8 @@ static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port)
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = mxs_gpio_set_irq_type;
ct->chip.irq_set_wake = mxs_gpio_set_wake_irq;
- ct->regs.ack = PINCTRL_IRQSTAT(port->id) + MXS_CLR;
- ct->regs.mask = PINCTRL_IRQEN(port->id);
+ ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR;
+ ct->regs.mask = PINCTRL_IRQEN(port);
irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0);
}
@@ -179,60 +197,83 @@ static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
return port->virtual_irq_start + offset;
}
+static struct platform_device_id mxs_gpio_ids[] = {
+ {
+ .name = "imx23-gpio",
+ .driver_data = IMX23_GPIO,
+ }, {
+ .name = "imx28-gpio",
+ .driver_data = IMX28_GPIO,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mxs_gpio_ids);
+
+static const struct of_device_id mxs_gpio_dt_ids[] = {
+ { .compatible = "fsl,imx23-gpio", .data = (void *) IMX23_GPIO, },
+ { .compatible = "fsl,imx28-gpio", .data = (void *) IMX28_GPIO, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_gpio_dt_ids);
+
static int __devinit mxs_gpio_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(mxs_gpio_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *parent;
static void __iomem *base;
struct mxs_gpio_port *port;
struct resource *iores = NULL;
int err;
- port = kzalloc(sizeof(struct mxs_gpio_port), GFP_KERNEL);
+ port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
- port->id = pdev->id;
+ if (np) {
+ port->id = of_alias_get_id(np, "gpio");
+ if (port->id < 0)
+ return port->id;
+ port->devid = (enum mxs_gpio_id) of_id->data;
+ } else {
+ port->id = pdev->id;
+ port->devid = pdev->id_entry->driver_data;
+ }
port->virtual_irq_start = MXS_GPIO_IRQ_START + port->id * 32;
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0)
+ return port->irq;
+
/*
* map memory region only once, as all the gpio ports
* share the same one
*/
if (!base) {
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores) {
- err = -ENODEV;
- goto out_kfree;
- }
-
- if (!request_mem_region(iores->start, resource_size(iores),
- pdev->name)) {
- err = -EBUSY;
- goto out_kfree;
- }
-
- base = ioremap(iores->start, resource_size(iores));
- if (!base) {
- err = -ENOMEM;
- goto out_release_mem;
+ if (np) {
+ parent = of_get_parent(np);
+ base = of_iomap(parent, 0);
+ of_node_put(parent);
+ } else {
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_request_and_ioremap(&pdev->dev, iores);
}
+ if (!base)
+ return -EADDRNOTAVAIL;
}
port->base = base;
- port->irq = platform_get_irq(pdev, 0);
- if (port->irq < 0) {
- err = -EINVAL;
- goto out_iounmap;
- }
-
/*
* select the pin interrupt functionality but initially
* disable the interrupts
*/
- writel(~0U, port->base + PINCTRL_PIN2IRQ(port->id));
- writel(0, port->base + PINCTRL_IRQEN(port->id));
+ writel(~0U, port->base + PINCTRL_PIN2IRQ(port));
+ writel(0, port->base + PINCTRL_IRQEN(port));
/* clear address has to be used to clear IRQSTAT bits */
- writel(~0U, port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR);
+ writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
/* gpio-mxs can be a generic irq chip */
mxs_gpio_init_gc(port);
@@ -242,41 +283,32 @@ static int __devinit mxs_gpio_probe(struct platform_device *pdev)
irq_set_handler_data(port->irq, port);
err = bgpio_init(&port->bgc, &pdev->dev, 4,
- port->base + PINCTRL_DIN(port->id),
- port->base + PINCTRL_DOUT(port->id), NULL,
- port->base + PINCTRL_DOE(port->id), NULL, false);
+ port->base + PINCTRL_DIN(port),
+ port->base + PINCTRL_DOUT(port), NULL,
+ port->base + PINCTRL_DOE(port), NULL, 0);
if (err)
- goto out_iounmap;
+ return err;
port->bgc.gc.to_irq = mxs_gpio_to_irq;
port->bgc.gc.base = port->id * 32;
err = gpiochip_add(&port->bgc.gc);
- if (err)
- goto out_bgpio_remove;
+ if (err) {
+ bgpio_remove(&port->bgc);
+ return err;
+ }
return 0;
-
-out_bgpio_remove:
- bgpio_remove(&port->bgc);
-out_iounmap:
- if (iores)
- iounmap(port->base);
-out_release_mem:
- if (iores)
- release_mem_region(iores->start, resource_size(iores));
-out_kfree:
- kfree(port);
- dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err);
- return err;
}
static struct platform_driver mxs_gpio_driver = {
.driver = {
.name = "gpio-mxs",
.owner = THIS_MODULE,
+ .of_match_table = mxs_gpio_dt_ids,
},
.probe = mxs_gpio_probe,
+ .id_table = mxs_gpio_ids,
};
static int __init mxs_gpio_init(void)
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 4461540653a8..c4ed1722734c 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -57,14 +57,10 @@ struct gpio_bank {
u16 irq;
int irq_base;
struct irq_domain *domain;
- u32 suspend_wakeup;
- u32 saved_wakeup;
u32 non_wakeup_gpios;
u32 enabled_non_wakeup_gpios;
struct gpio_regs context;
u32 saved_datain;
- u32 saved_fallingdetect;
- u32 saved_risingdetect;
u32 level_mask;
u32 toggle_mask;
spinlock_t lock;
@@ -516,11 +512,11 @@ static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
spin_lock_irqsave(&bank->lock, flags);
if (enable)
- bank->suspend_wakeup |= gpio_bit;
+ bank->context.wake_en |= gpio_bit;
else
- bank->suspend_wakeup &= ~gpio_bit;
+ bank->context.wake_en &= ~gpio_bit;
- __raw_writel(bank->suspend_wakeup, bank->base + bank->regs->wkup_en);
+ __raw_writel(bank->context.wake_en, bank->base + bank->regs->wkup_en);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
@@ -640,7 +636,6 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
u32 isr;
unsigned int gpio_irq, gpio_index;
struct gpio_bank *bank;
- u32 retrigger = 0;
int unmasked = 0;
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -677,8 +672,6 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
- isr |= retrigger;
- retrigger = 0;
if (!isr)
break;
@@ -789,8 +782,7 @@ static int omap_mpuio_suspend_noirq(struct device *dev)
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
- bank->saved_wakeup = __raw_readl(mask_reg);
- __raw_writel(0xffff & ~bank->suspend_wakeup, mask_reg);
+ __raw_writel(0xffff & ~bank->context.wake_en, mask_reg);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
@@ -805,7 +797,7 @@ static int omap_mpuio_resume_noirq(struct device *dev)
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
- __raw_writel(bank->saved_wakeup, mask_reg);
+ __raw_writel(bank->context.wake_en, mask_reg);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
@@ -1152,54 +1144,6 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
#ifdef CONFIG_ARCH_OMAP2PLUS
-#if defined(CONFIG_PM_SLEEP)
-static int omap_gpio_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct gpio_bank *bank = platform_get_drvdata(pdev);
- void __iomem *base = bank->base;
- void __iomem *wakeup_enable;
- unsigned long flags;
-
- if (!bank->mod_usage || !bank->loses_context)
- return 0;
-
- if (!bank->regs->wkup_en || !bank->suspend_wakeup)
- return 0;
-
- wakeup_enable = bank->base + bank->regs->wkup_en;
-
- spin_lock_irqsave(&bank->lock, flags);
- bank->saved_wakeup = __raw_readl(wakeup_enable);
- _gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
- _gpio_rmw(base, bank->regs->wkup_en, bank->suspend_wakeup, 1);
- spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
-}
-
-static int omap_gpio_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct gpio_bank *bank = platform_get_drvdata(pdev);
- void __iomem *base = bank->base;
- unsigned long flags;
-
- if (!bank->mod_usage || !bank->loses_context)
- return 0;
-
- if (!bank->regs->wkup_en || !bank->saved_wakeup)
- return 0;
-
- spin_lock_irqsave(&bank->lock, flags);
- _gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
- _gpio_rmw(base, bank->regs->wkup_en, bank->saved_wakeup, 1);
- spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
#if defined(CONFIG_PM_RUNTIME)
static void omap_gpio_restore_context(struct gpio_bank *bank);
@@ -1233,6 +1177,9 @@ static int omap_gpio_runtime_suspend(struct device *dev)
__raw_writel(wake_hi | bank->context.risingdetect,
bank->base + bank->regs->risingdetect);
+ if (!bank->enabled_non_wakeup_gpios)
+ goto update_gpio_context_count;
+
if (bank->power_mode != OFF_MODE) {
bank->power_mode = 0;
goto update_gpio_context_count;
@@ -1244,11 +1191,9 @@ static int omap_gpio_runtime_suspend(struct device *dev)
*/
bank->saved_datain = __raw_readl(bank->base +
bank->regs->datain);
- l1 = __raw_readl(bank->base + bank->regs->fallingdetect);
- l2 = __raw_readl(bank->base + bank->regs->risingdetect);
+ l1 = bank->context.fallingdetect;
+ l2 = bank->context.risingdetect;
- bank->saved_fallingdetect = l1;
- bank->saved_risingdetect = l2;
l1 &= ~bank->enabled_non_wakeup_gpios;
l2 &= ~bank->enabled_non_wakeup_gpios;
@@ -1290,16 +1235,10 @@ static int omap_gpio_runtime_resume(struct device *dev)
__raw_writel(bank->context.risingdetect,
bank->base + bank->regs->risingdetect);
- if (!bank->workaround_enabled) {
- spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
- }
-
if (bank->get_context_loss_count) {
context_lost_cnt_after =
bank->get_context_loss_count(bank->dev);
- if (context_lost_cnt_after != bank->context_loss_count ||
- !context_lost_cnt_after) {
+ if (context_lost_cnt_after != bank->context_loss_count) {
omap_gpio_restore_context(bank);
} else {
spin_unlock_irqrestore(&bank->lock, flags);
@@ -1307,9 +1246,14 @@ static int omap_gpio_runtime_resume(struct device *dev)
}
}
- __raw_writel(bank->saved_fallingdetect,
+ if (!bank->workaround_enabled) {
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
+ }
+
+ __raw_writel(bank->context.fallingdetect,
bank->base + bank->regs->fallingdetect);
- __raw_writel(bank->saved_risingdetect,
+ __raw_writel(bank->context.risingdetect,
bank->base + bank->regs->risingdetect);
l = __raw_readl(bank->base + bank->regs->datain);
@@ -1326,14 +1270,15 @@ static int omap_gpio_runtime_resume(struct device *dev)
* No need to generate IRQs for the rising edge for gpio IRQs
* configured with falling edge only; and vice versa.
*/
- gen0 = l & bank->saved_fallingdetect;
+ gen0 = l & bank->context.fallingdetect;
gen0 &= bank->saved_datain;
- gen1 = l & bank->saved_risingdetect;
+ gen1 = l & bank->context.risingdetect;
gen1 &= ~(bank->saved_datain);
/* FIXME: Consider GPIO IRQs with level detections properly! */
- gen = l & (~(bank->saved_fallingdetect) & ~(bank->saved_risingdetect));
+ gen = l & (~(bank->context.fallingdetect) &
+ ~(bank->context.risingdetect));
/* Consider all GPIO IRQs needed to be updated */
gen |= gen0 | gen1;
@@ -1343,14 +1288,14 @@ static int omap_gpio_runtime_resume(struct device *dev)
old0 = __raw_readl(bank->base + bank->regs->leveldetect0);
old1 = __raw_readl(bank->base + bank->regs->leveldetect1);
- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+ if (!bank->regs->irqstatus_raw0) {
__raw_writel(old0 | gen, bank->base +
bank->regs->leveldetect0);
__raw_writel(old1 | gen, bank->base +
bank->regs->leveldetect1);
}
- if (cpu_is_omap44xx()) {
+ if (bank->regs->irqstatus_raw0) {
__raw_writel(old0 | l, bank->base +
bank->regs->leveldetect0);
__raw_writel(old1 | l, bank->base +
@@ -1429,14 +1374,11 @@ static void omap_gpio_restore_context(struct gpio_bank *bank)
}
#endif /* CONFIG_PM_RUNTIME */
#else
-#define omap_gpio_suspend NULL
-#define omap_gpio_resume NULL
#define omap_gpio_runtime_suspend NULL
#define omap_gpio_runtime_resume NULL
#endif
static const struct dev_pm_ops gpio_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume)
SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
NULL)
};
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d3f3e8f54561..1c313c710be3 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -28,6 +28,8 @@
#define PCA953X_INVERT 2
#define PCA953X_DIRECTION 3
+#define REG_ADDR_AI 0x80
+
#define PCA957X_IN 0
#define PCA957X_INVRT 1
#define PCA957X_BKEN 2
@@ -63,15 +65,15 @@ static const struct i2c_device_id pca953x_id[] = {
{ "pca6107", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
- /* NYET: { "tca6424", 24, }, */
+ { "tca6424", 24 | PCA953X_TYPE | PCA_INT, },
{ }
};
MODULE_DEVICE_TABLE(i2c, pca953x_id);
struct pca953x_chip {
unsigned gpio_start;
- uint16_t reg_output;
- uint16_t reg_direction;
+ u32 reg_output;
+ u32 reg_direction;
struct mutex i2c_lock;
#ifdef CONFIG_GPIO_PCA953X_IRQ
@@ -89,12 +91,20 @@ struct pca953x_chip {
int chip_type;
};
-static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
+static int pca953x_write_reg(struct pca953x_chip *chip, int reg, u32 val)
{
int ret = 0;
if (chip->gpio_chip.ngpio <= 8)
ret = i2c_smbus_write_byte_data(chip->client, reg, val);
+ else if (chip->gpio_chip.ngpio == 24) {
+ ret = i2c_smbus_write_word_data(chip->client,
+ (reg << 2) | REG_ADDR_AI,
+ val & 0xffff);
+ ret = i2c_smbus_write_byte_data(chip->client,
+ (reg << 2) + 2,
+ (val & 0xff0000) >> 16);
+ }
else {
switch (chip->chip_type) {
case PCA953X_TYPE:
@@ -121,12 +131,17 @@ static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
return 0;
}
-static int pca953x_read_reg(struct pca953x_chip *chip, int reg, uint16_t *val)
+static int pca953x_read_reg(struct pca953x_chip *chip, int reg, u32 *val)
{
int ret;
if (chip->gpio_chip.ngpio <= 8)
ret = i2c_smbus_read_byte_data(chip->client, reg);
+ else if (chip->gpio_chip.ngpio == 24) {
+ ret = i2c_smbus_read_word_data(chip->client, reg << 2);
+ ret |= (i2c_smbus_read_byte_data(chip->client,
+ (reg << 2) + 2)<<16);
+ }
else
ret = i2c_smbus_read_word_data(chip->client, reg << 1);
@@ -135,14 +150,14 @@ static int pca953x_read_reg(struct pca953x_chip *chip, int reg, uint16_t *val)
return ret;
}
- *val = (uint16_t)ret;
+ *val = (u32)ret;
return 0;
}
static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
- uint16_t reg_val;
+ uint reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -173,7 +188,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
unsigned off, int val)
{
struct pca953x_chip *chip;
- uint16_t reg_val;
+ uint reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -223,7 +238,7 @@ exit:
static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
- uint16_t reg_val;
+ u32 reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -253,7 +268,7 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
{
struct pca953x_chip *chip;
- uint16_t reg_val;
+ u32 reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -386,7 +401,7 @@ static struct irq_chip pca953x_irq_chip = {
static uint16_t pca953x_irq_pending(struct pca953x_chip *chip)
{
- uint16_t cur_stat;
+ u32 cur_stat;
uint16_t old_stat;
uint16_t pending;
uint16_t trigger;
@@ -449,6 +464,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
{
struct i2c_client *client = chip->client;
int ret, offset = 0;
+ u32 temporary;
if (irq_base != -1
&& (id->driver_data & PCA_INT)) {
@@ -462,7 +478,8 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
offset = PCA957X_IN;
break;
}
- ret = pca953x_read_reg(chip, offset, &chip->irq_stat);
+ ret = pca953x_read_reg(chip, offset, &temporary);
+ chip->irq_stat = temporary;
if (ret)
goto out_failed;
@@ -603,7 +620,7 @@ out:
static int __devinit device_pca957x_init(struct pca953x_chip *chip, int invert)
{
int ret;
- uint16_t val = 0;
+ u32 val = 0;
/* Let every port in proper state, that could save power */
pca953x_write_reg(chip, PCA957X_PUPD, 0x0);
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 2cd958e0b822..139ad3e20011 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -538,17 +538,7 @@ static struct pci_driver pch_gpio_driver = {
.resume = pch_gpio_resume
};
-static int __init pch_gpio_pci_init(void)
-{
- return pci_register_driver(&pch_gpio_driver);
-}
-module_init(pch_gpio_pci_init);
-
-static void __exit pch_gpio_pci_exit(void)
-{
- pci_unregister_driver(&pch_gpio_driver);
-}
-module_exit(pch_gpio_pci_exit);
+module_pci_driver(pch_gpio_driver);
MODULE_DESCRIPTION("PCH GPIO PCI Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
new file mode 100644
index 000000000000..08428bf17718
--- /dev/null
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -0,0 +1,180 @@
+/*
+ * GPIO driver for RICOH583 power management chip.
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ * Author: Laxman dewangan <ldewangan@nvidia.com>
+ *
+ * Based on code
+ * Copyright (C) 2011 RICOH COMPANY,LTD
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/mfd/rc5t583.h>
+
+struct rc5t583_gpio {
+ struct gpio_chip gpio_chip;
+ struct rc5t583 *rc5t583;
+};
+
+static inline struct rc5t583_gpio *to_rc5t583_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct rc5t583_gpio, gpio_chip);
+}
+
+static int rc5t583_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc);
+ struct device *parent = rc5t583_gpio->rc5t583->dev;
+ uint8_t val = 0;
+ int ret;
+
+ ret = rc5t583_read(parent, RC5T583_GPIO_MON_IOIN, &val);
+ if (ret < 0)
+ return ret;
+
+ return !!(val & BIT(offset));
+}
+
+static void rc5t583_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+{
+ struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc);
+ struct device *parent = rc5t583_gpio->rc5t583->dev;
+ if (val)
+ rc5t583_set_bits(parent, RC5T583_GPIO_IOOUT, BIT(offset));
+ else
+ rc5t583_clear_bits(parent, RC5T583_GPIO_IOOUT, BIT(offset));
+}
+
+static int rc5t583_gpio_dir_input(struct gpio_chip *gc, unsigned int offset)
+{
+ struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc);
+ struct device *parent = rc5t583_gpio->rc5t583->dev;
+ int ret;
+
+ ret = rc5t583_clear_bits(parent, RC5T583_GPIO_IOSEL, BIT(offset));
+ if (ret < 0)
+ return ret;
+
+ /* Set pin to gpio mode */
+ return rc5t583_clear_bits(parent, RC5T583_GPIO_PGSEL, BIT(offset));
+}
+
+static int rc5t583_gpio_dir_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc);
+ struct device *parent = rc5t583_gpio->rc5t583->dev;
+ int ret;
+
+ rc5t583_gpio_set(gc, offset, value);
+ ret = rc5t583_set_bits(parent, RC5T583_GPIO_IOSEL, BIT(offset));
+ if (ret < 0)
+ return ret;
+
+ /* Set pin to gpio mode */
+ return rc5t583_clear_bits(parent, RC5T583_GPIO_PGSEL, BIT(offset));
+}
+
+static int rc5t583_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc);
+
+ if ((offset >= 0) && (offset < 8))
+ return rc5t583_gpio->rc5t583->irq_base +
+ RC5T583_IRQ_GPIO0 + offset;
+ return -EINVAL;
+}
+
+static void rc5t583_gpio_free(struct gpio_chip *gc, unsigned offset)
+{
+ struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc);
+ struct device *parent = rc5t583_gpio->rc5t583->dev;
+
+ rc5t583_set_bits(parent, RC5T583_GPIO_PGSEL, BIT(offset));
+}
+
+static int __devinit rc5t583_gpio_probe(struct platform_device *pdev)
+{
+ struct rc5t583 *rc5t583 = dev_get_drvdata(pdev->dev.parent);
+ struct rc5t583_platform_data *pdata = dev_get_platdata(rc5t583->dev);
+ struct rc5t583_gpio *rc5t583_gpio;
+
+ rc5t583_gpio = devm_kzalloc(&pdev->dev, sizeof(*rc5t583_gpio),
+ GFP_KERNEL);
+ if (!rc5t583_gpio) {
+ dev_warn(&pdev->dev, "Mem allocation for rc5t583_gpio failed");
+ return -ENOMEM;
+ }
+
+ rc5t583_gpio->gpio_chip.label = "gpio-rc5t583",
+ rc5t583_gpio->gpio_chip.owner = THIS_MODULE,
+ rc5t583_gpio->gpio_chip.free = rc5t583_gpio_free,
+ rc5t583_gpio->gpio_chip.direction_input = rc5t583_gpio_dir_input,
+ rc5t583_gpio->gpio_chip.direction_output = rc5t583_gpio_dir_output,
+ rc5t583_gpio->gpio_chip.set = rc5t583_gpio_set,
+ rc5t583_gpio->gpio_chip.get = rc5t583_gpio_get,
+ rc5t583_gpio->gpio_chip.to_irq = rc5t583_gpio_to_irq,
+ rc5t583_gpio->gpio_chip.ngpio = RC5T583_MAX_GPIO,
+ rc5t583_gpio->gpio_chip.can_sleep = 1,
+ rc5t583_gpio->gpio_chip.dev = &pdev->dev;
+ rc5t583_gpio->gpio_chip.base = -1;
+ rc5t583_gpio->rc5t583 = rc5t583;
+
+ if (pdata && pdata->gpio_base)
+ rc5t583_gpio->gpio_chip.base = pdata->gpio_base;
+
+ platform_set_drvdata(pdev, rc5t583_gpio);
+
+ return gpiochip_add(&rc5t583_gpio->gpio_chip);
+}
+
+static int __devexit rc5t583_gpio_remove(struct platform_device *pdev)
+{
+ struct rc5t583_gpio *rc5t583_gpio = platform_get_drvdata(pdev);
+
+ return gpiochip_remove(&rc5t583_gpio->gpio_chip);
+}
+
+static struct platform_driver rc5t583_gpio_driver = {
+ .driver = {
+ .name = "rc5t583-gpio",
+ .owner = THIS_MODULE,
+ },
+ .probe = rc5t583_gpio_probe,
+ .remove = __devexit_p(rc5t583_gpio_remove),
+};
+
+static int __init rc5t583_gpio_init(void)
+{
+ return platform_driver_register(&rc5t583_gpio_driver);
+}
+subsys_initcall(rc5t583_gpio_init);
+
+static void __exit rc5t583_gpio_exit(void)
+{
+ platform_driver_unregister(&rc5t583_gpio_driver);
+}
+module_exit(rc5t583_gpio_exit);
+
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_DESCRIPTION("GPIO interface for RC5T583");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:rc5t583-gpio");
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index e991d9171961..7bb00448e13d 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2454,6 +2454,12 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
},
}, {
.chip = {
+ .base = EXYNOS5_GPC4(0),
+ .ngpio = EXYNOS5_GPIO_C4_NR,
+ .label = "GPC4",
+ },
+ }, {
+ .chip = {
.base = EXYNOS5_GPD0(0),
.ngpio = EXYNOS5_GPIO_D0_NR,
.label = "GPD0",
@@ -2716,14 +2722,227 @@ static __init void exynos_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip,
}
#endif /* defined(CONFIG_ARCH_EXYNOS) && defined(CONFIG_OF) */
+static __init void exynos4_gpiolib_init(void)
+{
+#ifdef CONFIG_CPU_EXYNOS4210
+ struct samsung_gpio_chip *chip;
+ int i, nr_chips;
+ void __iomem *gpio_base1, *gpio_base2, *gpio_base3;
+ int group = 0;
+ void __iomem *gpx_base;
+
+ /* gpio part1 */
+ gpio_base1 = ioremap(EXYNOS4_PA_GPIO1, SZ_4K);
+ if (gpio_base1 == NULL) {
+ pr_err("unable to ioremap for gpio_base1\n");
+ goto err_ioremap1;
+ }
+
+ chip = exynos4_gpios_1;
+ nr_chips = ARRAY_SIZE(exynos4_gpios_1);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS4_PA_GPIO1, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos4_gpios_1,
+ nr_chips, gpio_base1);
+
+ /* gpio part2 */
+ gpio_base2 = ioremap(EXYNOS4_PA_GPIO2, SZ_4K);
+ if (gpio_base2 == NULL) {
+ pr_err("unable to ioremap for gpio_base2\n");
+ goto err_ioremap2;
+ }
+
+ /* need to set base address for gpx */
+ chip = &exynos4_gpios_2[16];
+ gpx_base = gpio_base2 + 0xC00;
+ for (i = 0; i < 4; i++, chip++, gpx_base += 0x20)
+ chip->base = gpx_base;
+
+ chip = exynos4_gpios_2;
+ nr_chips = ARRAY_SIZE(exynos4_gpios_2);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS4_PA_GPIO2, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos4_gpios_2,
+ nr_chips, gpio_base2);
+
+ /* gpio part3 */
+ gpio_base3 = ioremap(EXYNOS4_PA_GPIO3, SZ_256);
+ if (gpio_base3 == NULL) {
+ pr_err("unable to ioremap for gpio_base3\n");
+ goto err_ioremap3;
+ }
+
+ chip = exynos4_gpios_3;
+ nr_chips = ARRAY_SIZE(exynos4_gpios_3);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS4_PA_GPIO3, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos4_gpios_3,
+ nr_chips, gpio_base3);
+
+#if defined(CONFIG_CPU_EXYNOS4210) && defined(CONFIG_S5P_GPIO_INT)
+ s5p_register_gpioint_bank(IRQ_GPIO_XA, 0, IRQ_GPIO1_NR_GROUPS);
+ s5p_register_gpioint_bank(IRQ_GPIO_XB, IRQ_GPIO1_NR_GROUPS, IRQ_GPIO2_NR_GROUPS);
+#endif
+
+ return;
+
+err_ioremap3:
+ iounmap(gpio_base2);
+err_ioremap2:
+ iounmap(gpio_base1);
+err_ioremap1:
+ return;
+#endif /* CONFIG_CPU_EXYNOS4210 */
+}
+
+static __init void exynos5_gpiolib_init(void)
+{
+#ifdef CONFIG_SOC_EXYNOS5250
+ struct samsung_gpio_chip *chip;
+ int i, nr_chips;
+ void __iomem *gpio_base1, *gpio_base2, *gpio_base3, *gpio_base4;
+ int group = 0;
+ void __iomem *gpx_base;
+
+ /* gpio part1 */
+ gpio_base1 = ioremap(EXYNOS5_PA_GPIO1, SZ_4K);
+ if (gpio_base1 == NULL) {
+ pr_err("unable to ioremap for gpio_base1\n");
+ goto err_ioremap1;
+ }
+
+ /* need to set base address for gpc4 */
+ exonys5_gpios_1[11].base = gpio_base1 + 0x2E0;
+
+ /* need to set base address for gpx */
+ chip = &exynos5_gpios_1[21];
+ gpx_base = gpio_base1 + 0xC00;
+ for (i = 0; i < 4; i++, chip++, gpx_base += 0x20)
+ chip->base = gpx_base;
+
+ chip = exynos5_gpios_1;
+ nr_chips = ARRAY_SIZE(exynos5_gpios_1);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS5_PA_GPIO1, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos5_gpios_1,
+ nr_chips, gpio_base1);
+
+ /* gpio part2 */
+ gpio_base2 = ioremap(EXYNOS5_PA_GPIO2, SZ_4K);
+ if (gpio_base2 == NULL) {
+ pr_err("unable to ioremap for gpio_base2\n");
+ goto err_ioremap2;
+ }
+
+ chip = exynos5_gpios_2;
+ nr_chips = ARRAY_SIZE(exynos5_gpios_2);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS5_PA_GPIO2, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos5_gpios_2,
+ nr_chips, gpio_base2);
+
+ /* gpio part3 */
+ gpio_base3 = ioremap(EXYNOS5_PA_GPIO3, SZ_4K);
+ if (gpio_base3 == NULL) {
+ pr_err("unable to ioremap for gpio_base3\n");
+ goto err_ioremap3;
+ }
+
+ /* need to set base address for gpv */
+ exynos5_gpios_3[0].base = gpio_base3;
+ exynos5_gpios_3[1].base = gpio_base3 + 0x20;
+ exynos5_gpios_3[2].base = gpio_base3 + 0x60;
+ exynos5_gpios_3[3].base = gpio_base3 + 0x80;
+ exynos5_gpios_3[4].base = gpio_base3 + 0xC0;
+
+ chip = exynos5_gpios_3;
+ nr_chips = ARRAY_SIZE(exynos5_gpios_3);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS5_PA_GPIO3, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos5_gpios_3,
+ nr_chips, gpio_base3);
+
+ /* gpio part4 */
+ gpio_base4 = ioremap(EXYNOS5_PA_GPIO4, SZ_4K);
+ if (gpio_base4 == NULL) {
+ pr_err("unable to ioremap for gpio_base4\n");
+ goto err_ioremap4;
+ }
+
+ chip = exynos5_gpios_4;
+ nr_chips = ARRAY_SIZE(exynos5_gpios_4);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS5_PA_GPIO4, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos5_gpios_4,
+ nr_chips, gpio_base4);
+ return;
+
+err_ioremap4:
+ iounmap(gpio_base3);
+err_ioremap3:
+ iounmap(gpio_base2);
+err_ioremap2:
+ iounmap(gpio_base1);
+err_ioremap1:
+ return;
+
+#endif /* CONFIG_SOC_EXYNOS5250 */
+}
+
/* TODO: cleanup soc_is_* */
static __init int samsung_gpiolib_init(void)
{
struct samsung_gpio_chip *chip;
int i, nr_chips;
-#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250)
- void __iomem *gpio_base1, *gpio_base2, *gpio_base3, *gpio_base4;
-#endif
int group = 0;
samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs));
@@ -2789,202 +3008,15 @@ static __init int samsung_gpiolib_init(void)
s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR);
#endif
} else if (soc_is_exynos4210()) {
-#ifdef CONFIG_CPU_EXYNOS4210
- void __iomem *gpx_base;
-
- /* gpio part1 */
- gpio_base1 = ioremap(EXYNOS4_PA_GPIO1, SZ_4K);
- if (gpio_base1 == NULL) {
- pr_err("unable to ioremap for gpio_base1\n");
- goto err_ioremap1;
- }
-
- chip = exynos4_gpios_1;
- nr_chips = ARRAY_SIZE(exynos4_gpios_1);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (!chip->config) {
- chip->config = &exynos_gpio_cfg;
- chip->group = group++;
- }
- exynos_gpiolib_attach_ofnode(chip,
- EXYNOS4_PA_GPIO1, i * 0x20);
- }
- samsung_gpiolib_add_4bit_chips(exynos4_gpios_1,
- nr_chips, gpio_base1);
-
- /* gpio part2 */
- gpio_base2 = ioremap(EXYNOS4_PA_GPIO2, SZ_4K);
- if (gpio_base2 == NULL) {
- pr_err("unable to ioremap for gpio_base2\n");
- goto err_ioremap2;
- }
-
- /* need to set base address for gpx */
- chip = &exynos4_gpios_2[16];
- gpx_base = gpio_base2 + 0xC00;
- for (i = 0; i < 4; i++, chip++, gpx_base += 0x20)
- chip->base = gpx_base;
-
- chip = exynos4_gpios_2;
- nr_chips = ARRAY_SIZE(exynos4_gpios_2);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (!chip->config) {
- chip->config = &exynos_gpio_cfg;
- chip->group = group++;
- }
- exynos_gpiolib_attach_ofnode(chip,
- EXYNOS4_PA_GPIO2, i * 0x20);
- }
- samsung_gpiolib_add_4bit_chips(exynos4_gpios_2,
- nr_chips, gpio_base2);
-
- /* gpio part3 */
- gpio_base3 = ioremap(EXYNOS4_PA_GPIO3, SZ_256);
- if (gpio_base3 == NULL) {
- pr_err("unable to ioremap for gpio_base3\n");
- goto err_ioremap3;
- }
-
- chip = exynos4_gpios_3;
- nr_chips = ARRAY_SIZE(exynos4_gpios_3);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (!chip->config) {
- chip->config = &exynos_gpio_cfg;
- chip->group = group++;
- }
- exynos_gpiolib_attach_ofnode(chip,
- EXYNOS4_PA_GPIO3, i * 0x20);
- }
- samsung_gpiolib_add_4bit_chips(exynos4_gpios_3,
- nr_chips, gpio_base3);
-
-#if defined(CONFIG_CPU_EXYNOS4210) && defined(CONFIG_S5P_GPIO_INT)
- s5p_register_gpioint_bank(IRQ_GPIO_XA, 0, IRQ_GPIO1_NR_GROUPS);
- s5p_register_gpioint_bank(IRQ_GPIO_XB, IRQ_GPIO1_NR_GROUPS, IRQ_GPIO2_NR_GROUPS);
-#endif
-
-#endif /* CONFIG_CPU_EXYNOS4210 */
+ exynos4_gpiolib_init();
} else if (soc_is_exynos5250()) {
-#ifdef CONFIG_SOC_EXYNOS5250
- void __iomem *gpx_base;
-
- /* gpio part1 */
- gpio_base1 = ioremap(EXYNOS5_PA_GPIO1, SZ_4K);
- if (gpio_base1 == NULL) {
- pr_err("unable to ioremap for gpio_base1\n");
- goto err_ioremap1;
- }
-
- /* need to set base address for gpx */
- chip = &exynos5_gpios_1[20];
- gpx_base = gpio_base1 + 0xC00;
- for (i = 0; i < 4; i++, chip++, gpx_base += 0x20)
- chip->base = gpx_base;
-
- chip = exynos5_gpios_1;
- nr_chips = ARRAY_SIZE(exynos5_gpios_1);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (!chip->config) {
- chip->config = &exynos_gpio_cfg;
- chip->group = group++;
- }
- exynos_gpiolib_attach_ofnode(chip,
- EXYNOS5_PA_GPIO1, i * 0x20);
- }
- samsung_gpiolib_add_4bit_chips(exynos5_gpios_1,
- nr_chips, gpio_base1);
-
- /* gpio part2 */
- gpio_base2 = ioremap(EXYNOS5_PA_GPIO2, SZ_4K);
- if (gpio_base2 == NULL) {
- pr_err("unable to ioremap for gpio_base2\n");
- goto err_ioremap2;
- }
-
- chip = exynos5_gpios_2;
- nr_chips = ARRAY_SIZE(exynos5_gpios_2);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (!chip->config) {
- chip->config = &exynos_gpio_cfg;
- chip->group = group++;
- }
- exynos_gpiolib_attach_ofnode(chip,
- EXYNOS5_PA_GPIO2, i * 0x20);
- }
- samsung_gpiolib_add_4bit_chips(exynos5_gpios_2,
- nr_chips, gpio_base2);
-
- /* gpio part3 */
- gpio_base3 = ioremap(EXYNOS5_PA_GPIO3, SZ_4K);
- if (gpio_base3 == NULL) {
- pr_err("unable to ioremap for gpio_base3\n");
- goto err_ioremap3;
- }
-
- /* need to set base address for gpv */
- exynos5_gpios_3[0].base = gpio_base3;
- exynos5_gpios_3[1].base = gpio_base3 + 0x20;
- exynos5_gpios_3[2].base = gpio_base3 + 0x60;
- exynos5_gpios_3[3].base = gpio_base3 + 0x80;
- exynos5_gpios_3[4].base = gpio_base3 + 0xC0;
-
- chip = exynos5_gpios_3;
- nr_chips = ARRAY_SIZE(exynos5_gpios_3);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (!chip->config) {
- chip->config = &exynos_gpio_cfg;
- chip->group = group++;
- }
- exynos_gpiolib_attach_ofnode(chip,
- EXYNOS5_PA_GPIO3, i * 0x20);
- }
- samsung_gpiolib_add_4bit_chips(exynos5_gpios_3,
- nr_chips, gpio_base3);
-
- /* gpio part4 */
- gpio_base4 = ioremap(EXYNOS5_PA_GPIO4, SZ_4K);
- if (gpio_base4 == NULL) {
- pr_err("unable to ioremap for gpio_base4\n");
- goto err_ioremap4;
- }
-
- chip = exynos5_gpios_4;
- nr_chips = ARRAY_SIZE(exynos5_gpios_4);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (!chip->config) {
- chip->config = &exynos_gpio_cfg;
- chip->group = group++;
- }
- exynos_gpiolib_attach_ofnode(chip,
- EXYNOS5_PA_GPIO4, i * 0x20);
- }
- samsung_gpiolib_add_4bit_chips(exynos5_gpios_4,
- nr_chips, gpio_base4);
-#endif /* CONFIG_SOC_EXYNOS5250 */
+ exynos5_gpiolib_init();
} else {
WARN(1, "Unknown SoC in gpio-samsung, no GPIOs added\n");
return -ENODEV;
}
return 0;
-
-#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250)
-err_ioremap4:
- iounmap(gpio_base3);
-err_ioremap3:
- iounmap(gpio_base2);
-err_ioremap2:
- iounmap(gpio_base1);
-err_ioremap1:
- return -ENOMEM;
-#endif
}
core_initcall(samsung_gpiolib_init);
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 8cadf4d683a8..424dce8e3f30 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -232,6 +232,14 @@ static int __devinit sch_gpio_probe(struct platform_device *pdev)
sch_gpio_resume.ngpio = 9;
break;
+ case PCI_DEVICE_ID_INTEL_CENTERTON_ILB:
+ sch_gpio_core.base = 0;
+ sch_gpio_core.ngpio = 21;
+
+ sch_gpio_resume.base = 21;
+ sch_gpio_resume.ngpio = 9;
+ break;
+
default:
return -ENODEV;
}
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index 031e5d24837d..9d9891f7a607 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -224,7 +224,7 @@ static int __devinit sdv_gpio_probe(struct pci_dev *pdev,
ret = bgpio_init(&sd->bgpio, &pdev->dev, 4,
sd->gpio_pub_base + GPINR, sd->gpio_pub_base + GPOUTR,
- NULL, sd->gpio_pub_base + GPOER, NULL, false);
+ NULL, sd->gpio_pub_base + GPOER, NULL, 0);
if (ret)
goto unmap;
sd->bgpio.gc.ngpio = SDV_NUM_PUB_GPIOS;
@@ -282,17 +282,7 @@ static struct pci_driver sdv_gpio_driver = {
.remove = sdv_gpio_remove,
};
-static int __init sdv_gpio_init(void)
-{
- return pci_register_driver(&sdv_gpio_driver);
-}
-module_init(sdv_gpio_init);
-
-static void __exit sdv_gpio_exit(void)
-{
- pci_unregister_driver(&sdv_gpio_driver);
-}
-module_exit(sdv_gpio_exit);
+module_pci_driver(sdv_gpio_driver);
MODULE_AUTHOR("Hans J. Koch <hjk@linutronix.de>");
MODULE_DESCRIPTION("GPIO interface for Intel Sodaville SoCs");
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
new file mode 100644
index 000000000000..38416be8ba11
--- /dev/null
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -0,0 +1,435 @@
+/*
+ * STMicroelectronics ConneXt (STA2X11) GPIO driver
+ *
+ * Copyright 2012 ST Microelectronics (Alessandro Rubini)
+ * Based on gpio-ml-ioh.c, Copyright 2010 OKI Semiconductors Ltd.
+ * Also based on previous sta2x11 work, Copyright 2011 Wind River Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/sta2x11-mfd.h>
+
+struct gsta_regs {
+ u32 dat; /* 0x00 */
+ u32 dats;
+ u32 datc;
+ u32 pdis;
+ u32 dir; /* 0x10 */
+ u32 dirs;
+ u32 dirc;
+ u32 unused_1c;
+ u32 afsela; /* 0x20 */
+ u32 unused_24[7];
+ u32 rimsc; /* 0x40 */
+ u32 fimsc;
+ u32 is;
+ u32 ic;
+};
+
+struct gsta_gpio {
+ spinlock_t lock;
+ struct device *dev;
+ void __iomem *reg_base;
+ struct gsta_regs __iomem *regs[GSTA_NR_BLOCKS];
+ struct gpio_chip gpio;
+ int irq_base;
+ /* FIXME: save the whole config here (AF, ...) */
+ unsigned irq_type[GSTA_NR_GPIO];
+};
+
+static inline struct gsta_regs __iomem *__regs(struct gsta_gpio *chip, int nr)
+{
+ return chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+}
+
+static inline u32 __bit(int nr)
+{
+ return 1U << (nr % GSTA_GPIO_PER_BLOCK);
+}
+
+/*
+ * gpio methods
+ */
+
+static void gsta_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+{
+ struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+
+ if (val)
+ writel(bit, &regs->dats);
+ else
+ writel(bit, &regs->datc);
+}
+
+static int gsta_gpio_get(struct gpio_chip *gpio, unsigned nr)
+{
+ struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+
+ return readl(&regs->dat) & bit;
+}
+
+static int gsta_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+ int val)
+{
+ struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+
+ writel(bit, &regs->dirs);
+ /* Data register after direction, otherwise pullup/down is selected */
+ if (val)
+ writel(bit, &regs->dats);
+ else
+ writel(bit, &regs->datc);
+ return 0;
+}
+
+static int gsta_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+ struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+
+ writel(bit, &regs->dirc);
+ return 0;
+}
+
+static int gsta_gpio_to_irq(struct gpio_chip *gpio, unsigned offset)
+{
+ struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+ return chip->irq_base + offset;
+}
+
+static void gsta_gpio_setup(struct gsta_gpio *chip) /* called from probe */
+{
+ struct gpio_chip *gpio = &chip->gpio;
+
+ /*
+ * ARCH_NR_GPIOS is currently 256 and dynamic allocation starts
+ * from the end. However, for compatibility, we need the first
+ * ConneXt device to start from gpio 0: it's the main chipset
+ * on most boards so documents and drivers assume gpio0..gpio127
+ */
+ static int gpio_base;
+
+ gpio->label = dev_name(chip->dev);
+ gpio->owner = THIS_MODULE;
+ gpio->direction_input = gsta_gpio_direction_input;
+ gpio->get = gsta_gpio_get;
+ gpio->direction_output = gsta_gpio_direction_output;
+ gpio->set = gsta_gpio_set;
+ gpio->dbg_show = NULL;
+ gpio->base = gpio_base;
+ gpio->ngpio = GSTA_NR_GPIO;
+ gpio->can_sleep = 0;
+ gpio->to_irq = gsta_gpio_to_irq;
+
+ /*
+ * After the first device, turn to dynamic gpio numbers.
+ * For example, with ARCH_NR_GPIOS = 256 we can fit two cards
+ */
+ if (!gpio_base)
+ gpio_base = -1;
+}
+
+/*
+ * Special method: alternate functions and pullup/pulldown. This is only
+ * invoked on startup to configure gpio's according to platform data.
+ * FIXME : this functionality shall be managed (and exported to other drivers)
+ * via the pin control subsystem.
+ */
+static void gsta_set_config(struct gsta_gpio *chip, int nr, unsigned cfg)
+{
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ unsigned long flags;
+ u32 bit = __bit(nr);
+ u32 val;
+ int err = 0;
+
+ pr_info("%s: %p %i %i\n", __func__, chip, nr, cfg);
+
+ if (cfg == PINMUX_TYPE_NONE)
+ return;
+
+ /* Alternate function or not? */
+ spin_lock_irqsave(&chip->lock, flags);
+ val = readl(&regs->afsela);
+ if (cfg == PINMUX_TYPE_FUNCTION)
+ val |= bit;
+ else
+ val &= ~bit;
+ writel(val | bit, &regs->afsela);
+ if (cfg == PINMUX_TYPE_FUNCTION) {
+ spin_unlock_irqrestore(&chip->lock, flags);
+ return;
+ }
+
+ /* not alternate function: set details */
+ switch (cfg) {
+ case PINMUX_TYPE_OUTPUT_LOW:
+ writel(bit, &regs->dirs);
+ writel(bit, &regs->datc);
+ break;
+ case PINMUX_TYPE_OUTPUT_HIGH:
+ writel(bit, &regs->dirs);
+ writel(bit, &regs->dats);
+ break;
+ case PINMUX_TYPE_INPUT:
+ writel(bit, &regs->dirc);
+ val = readl(&regs->pdis) | bit;
+ writel(val, &regs->pdis);
+ break;
+ case PINMUX_TYPE_INPUT_PULLUP:
+ writel(bit, &regs->dirc);
+ val = readl(&regs->pdis) & ~bit;
+ writel(val, &regs->pdis);
+ writel(bit, &regs->dats);
+ break;
+ case PINMUX_TYPE_INPUT_PULLDOWN:
+ writel(bit, &regs->dirc);
+ val = readl(&regs->pdis) & ~bit;
+ writel(val, &regs->pdis);
+ writel(bit, &regs->datc);
+ break;
+ default:
+ err = 1;
+ }
+ spin_unlock_irqrestore(&chip->lock, flags);
+ if (err)
+ pr_err("%s: chip %p, pin %i, cfg %i is invalid\n",
+ __func__, chip, nr, cfg);
+}
+
+/*
+ * Irq methods
+ */
+
+static void gsta_irq_disable(struct irq_data *data)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct gsta_gpio *chip = gc->private;
+ int nr = data->irq - chip->irq_base;
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ if (chip->irq_type[nr] & IRQ_TYPE_EDGE_RISING) {
+ val = readl(&regs->rimsc) & ~bit;
+ writel(val, &regs->rimsc);
+ }
+ if (chip->irq_type[nr] & IRQ_TYPE_EDGE_FALLING) {
+ val = readl(&regs->fimsc) & ~bit;
+ writel(val, &regs->fimsc);
+ }
+ spin_unlock_irqrestore(&chip->lock, flags);
+ return;
+}
+
+static void gsta_irq_enable(struct irq_data *data)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct gsta_gpio *chip = gc->private;
+ int nr = data->irq - chip->irq_base;
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+ u32 val;
+ int type;
+ unsigned long flags;
+
+ type = chip->irq_type[nr];
+
+ spin_lock_irqsave(&chip->lock, flags);
+ val = readl(&regs->rimsc);
+ if (type & IRQ_TYPE_EDGE_RISING)
+ writel(val | bit, &regs->rimsc);
+ else
+ writel(val & ~bit, &regs->rimsc);
+ val = readl(&regs->rimsc);
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ writel(val | bit, &regs->fimsc);
+ else
+ writel(val & ~bit, &regs->fimsc);
+ spin_unlock_irqrestore(&chip->lock, flags);
+ return;
+}
+
+static int gsta_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct gsta_gpio *chip = gc->private;
+ int nr = d->irq - chip->irq_base;
+
+ /* We only support edge interrupts */
+ if (!(type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))) {
+ pr_debug("%s: unsupported type 0x%x\n", __func__, type);
+ return -EINVAL;
+ }
+
+ chip->irq_type[nr] = type; /* used for enable/disable */
+
+ gsta_irq_enable(d);
+ return 0;
+}
+
+static irqreturn_t gsta_gpio_handler(int irq, void *dev_id)
+{
+ struct gsta_gpio *chip = dev_id;
+ struct gsta_regs __iomem *regs;
+ u32 is;
+ int i, nr, base;
+ irqreturn_t ret = IRQ_NONE;
+
+ for (i = 0; i < GSTA_NR_BLOCKS; i++) {
+ regs = chip->regs[i];
+ base = chip->irq_base + i * GSTA_GPIO_PER_BLOCK;
+ while ((is = readl(&regs->is))) {
+ nr = __ffs(is);
+ irq = base + nr;
+ generic_handle_irq(irq);
+ writel(1 << nr, &regs->ic);
+ ret = IRQ_HANDLED;
+ }
+ }
+ return ret;
+}
+
+static __devinit void gsta_alloc_irq_chip(struct gsta_gpio *chip)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_alloc_generic_chip(KBUILD_MODNAME, 1, chip->irq_base,
+ chip->reg_base, handle_simple_irq);
+ gc->private = chip;
+ ct = gc->chip_types;
+
+ ct->chip.irq_set_type = gsta_irq_type;
+ ct->chip.irq_disable = gsta_irq_disable;
+ ct->chip.irq_enable = gsta_irq_enable;
+
+ /* FIXME: this makes at most 32 interrupts. Request 0 by now */
+ irq_setup_generic_chip(gc, 0 /* IRQ_MSK(GSTA_GPIO_PER_BLOCK) */, 0,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+
+ /* Set up all all 128 interrupts: code from setup_generic_chip */
+ {
+ struct irq_chip_type *ct = gc->chip_types;
+ int i, j;
+ for (j = 0; j < GSTA_NR_GPIO; j++) {
+ i = chip->irq_base + j;
+ irq_set_chip_and_handler(i, &ct->chip, ct->handler);
+ irq_set_chip_data(i, gc);
+ irq_modify_status(i, IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+ }
+ gc->irq_cnt = i - gc->irq_base;
+ }
+}
+
+/* The platform device used here is instantiated by the MFD device */
+static int __devinit gsta_probe(struct platform_device *dev)
+{
+ int i, err;
+ struct pci_dev *pdev;
+ struct sta2x11_gpio_pdata *gpio_pdata;
+ struct gsta_gpio *chip;
+ struct resource *res;
+
+ pdev = *(struct pci_dev **)(dev->dev.platform_data);
+ gpio_pdata = dev_get_platdata(&pdev->dev);
+
+ if (gpio_pdata == NULL)
+ dev_err(&dev->dev, "no gpio config\n");
+ pr_debug("gpio config: %p\n", gpio_pdata);
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+
+ chip = devm_kzalloc(&dev->dev, sizeof(*chip), GFP_KERNEL);
+ chip->dev = &dev->dev;
+ chip->reg_base = devm_request_and_ioremap(&dev->dev, res);
+
+ for (i = 0; i < GSTA_NR_BLOCKS; i++) {
+ chip->regs[i] = chip->reg_base + i * 4096;
+ /* disable all irqs */
+ writel(0, &chip->regs[i]->rimsc);
+ writel(0, &chip->regs[i]->fimsc);
+ writel(~0, &chip->regs[i]->ic);
+ }
+ spin_lock_init(&chip->lock);
+ gsta_gpio_setup(chip);
+ for (i = 0; i < GSTA_NR_GPIO; i++)
+ gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
+
+ /* 384 was used in previous code: be compatible for other drivers */
+ err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE);
+ if (err < 0) {
+ dev_warn(&dev->dev, "sta2x11 gpio: Can't get irq base (%i)\n",
+ -err);
+ return err;
+ }
+ chip->irq_base = err;
+ gsta_alloc_irq_chip(chip);
+
+ err = request_irq(pdev->irq, gsta_gpio_handler,
+ IRQF_SHARED, KBUILD_MODNAME, chip);
+ if (err < 0) {
+ dev_err(&dev->dev, "sta2x11 gpio: Can't request irq (%i)\n",
+ -err);
+ goto err_free_descs;
+ }
+
+ err = gpiochip_add(&chip->gpio);
+ if (err < 0) {
+ dev_err(&dev->dev, "sta2x11 gpio: Can't register (%i)\n",
+ -err);
+ goto err_free_irq;
+ }
+
+ platform_set_drvdata(dev, chip);
+ return 0;
+
+err_free_irq:
+ free_irq(pdev->irq, chip);
+err_free_descs:
+ irq_free_descs(chip->irq_base, GSTA_NR_GPIO);
+ return err;
+}
+
+static struct platform_driver sta2x11_gpio_platform_driver = {
+ .driver = {
+ .name = "sta2x11-gpio",
+ .owner = THIS_MODULE,
+ },
+ .probe = gsta_probe,
+};
+
+module_platform_driver(sta2x11_gpio_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("sta2x11_gpio GPIO driver");
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
new file mode 100644
index 000000000000..e35096bf3cfb
--- /dev/null
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -0,0 +1,301 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/of_platform.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <lantiq_soc.h>
+
+/*
+ * The Serial To Parallel (STP) is found on MIPS based Lantiq socs. It is a
+ * peripheral controller used to drive external shift register cascades. At most
+ * 3 groups of 8 bits can be driven. The hardware is able to allow the DSL modem
+ * to drive the 2 LSBs of the cascade automatically.
+ */
+
+/* control register 0 */
+#define XWAY_STP_CON0 0x00
+/* control register 1 */
+#define XWAY_STP_CON1 0x04
+/* data register 0 */
+#define XWAY_STP_CPU0 0x08
+/* data register 1 */
+#define XWAY_STP_CPU1 0x0C
+/* access register */
+#define XWAY_STP_AR 0x10
+
+/* software or hardware update select bit */
+#define XWAY_STP_CON_SWU BIT(31)
+
+/* automatic update rates */
+#define XWAY_STP_2HZ 0
+#define XWAY_STP_4HZ BIT(23)
+#define XWAY_STP_8HZ BIT(24)
+#define XWAY_STP_10HZ (BIT(24) | BIT(23))
+#define XWAY_STP_SPEED_MASK (0xf << 23)
+
+/* clock source for automatic update */
+#define XWAY_STP_UPD_FPI BIT(31)
+#define XWAY_STP_UPD_MASK (BIT(31) | BIT(30))
+
+/* let the adsl core drive the 2 LSBs */
+#define XWAY_STP_ADSL_SHIFT 24
+#define XWAY_STP_ADSL_MASK 0x3
+
+/* 2 groups of 3 bits can be driven by the phys */
+#define XWAY_STP_PHY_MASK 0x3
+#define XWAY_STP_PHY1_SHIFT 27
+#define XWAY_STP_PHY2_SHIFT 15
+
+/* STP has 3 groups of 8 bits */
+#define XWAY_STP_GROUP0 BIT(0)
+#define XWAY_STP_GROUP1 BIT(1)
+#define XWAY_STP_GROUP2 BIT(2)
+#define XWAY_STP_GROUP_MASK (0x7)
+
+/* Edge configuration bits */
+#define XWAY_STP_FALLING BIT(26)
+#define XWAY_STP_EDGE_MASK BIT(26)
+
+#define xway_stp_r32(m, reg) __raw_readl(m + reg)
+#define xway_stp_w32(m, val, reg) __raw_writel(val, m + reg)
+#define xway_stp_w32_mask(m, clear, set, reg) \
+ ltq_w32((ltq_r32(m + reg) & ~(clear)) | (set), \
+ m + reg)
+
+struct xway_stp {
+ struct gpio_chip gc;
+ void __iomem *virt;
+ u32 edge; /* rising or falling edge triggered shift register */
+ u16 shadow; /* shadow the shift registers state */
+ u8 groups; /* we can drive 1-3 groups of 8bit each */
+ u8 dsl; /* the 2 LSBs can be driven by the dsl core */
+ u8 phy1; /* 3 bits can be driven by phy1 */
+ u8 phy2; /* 3 bits can be driven by phy2 */
+ u8 reserved; /* mask out the hw driven bits in gpio_request */
+};
+
+/**
+ * xway_stp_set() - gpio_chip->set - set gpios.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ * @val: Value to be written to specified signal.
+ *
+ * Set the shadow value and call ltq_ebu_apply.
+ */
+static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
+{
+ struct xway_stp *chip =
+ container_of(gc, struct xway_stp, gc);
+
+ if (val)
+ chip->shadow |= BIT(gpio);
+ else
+ chip->shadow &= ~BIT(gpio);
+ xway_stp_w32(chip->virt, chip->shadow, XWAY_STP_CPU0);
+ xway_stp_w32_mask(chip->virt, 0, XWAY_STP_CON_SWU, XWAY_STP_CON0);
+}
+
+/**
+ * xway_stp_dir_out() - gpio_chip->dir_out - set gpio direction.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ * @val: Value to be written to specified signal.
+ *
+ * Same as xway_stp_set, always returns 0.
+ */
+static int xway_stp_dir_out(struct gpio_chip *gc, unsigned gpio, int val)
+{
+ xway_stp_set(gc, gpio, val);
+
+ return 0;
+}
+
+/**
+ * xway_stp_request() - gpio_chip->request
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ *
+ * We mask out the HW driven pins
+ */
+static int xway_stp_request(struct gpio_chip *gc, unsigned gpio)
+{
+ struct xway_stp *chip =
+ container_of(gc, struct xway_stp, gc);
+
+ if ((gpio < 8) && (chip->reserved & BIT(gpio))) {
+ dev_err(gc->dev, "GPIO %d is driven by hardware\n", gpio);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * xway_stp_hw_init() - Configure the STP unit and enable the clock gate
+ * @virt: pointer to the remapped register range
+ */
+static int xway_stp_hw_init(struct xway_stp *chip)
+{
+ /* sane defaults */
+ xway_stp_w32(chip->virt, 0, XWAY_STP_AR);
+ xway_stp_w32(chip->virt, 0, XWAY_STP_CPU0);
+ xway_stp_w32(chip->virt, 0, XWAY_STP_CPU1);
+ xway_stp_w32(chip->virt, XWAY_STP_CON_SWU, XWAY_STP_CON0);
+ xway_stp_w32(chip->virt, 0, XWAY_STP_CON1);
+
+ /* apply edge trigger settings for the shift register */
+ xway_stp_w32_mask(chip->virt, XWAY_STP_EDGE_MASK,
+ chip->edge, XWAY_STP_CON0);
+
+ /* apply led group settings */
+ xway_stp_w32_mask(chip->virt, XWAY_STP_GROUP_MASK,
+ chip->groups, XWAY_STP_CON1);
+
+ /* tell the hardware which pins are controlled by the dsl modem */
+ xway_stp_w32_mask(chip->virt,
+ XWAY_STP_ADSL_MASK << XWAY_STP_ADSL_SHIFT,
+ chip->dsl << XWAY_STP_ADSL_SHIFT,
+ XWAY_STP_CON0);
+
+ /* tell the hardware which pins are controlled by the phys */
+ xway_stp_w32_mask(chip->virt,
+ XWAY_STP_PHY_MASK << XWAY_STP_PHY1_SHIFT,
+ chip->phy1 << XWAY_STP_PHY1_SHIFT,
+ XWAY_STP_CON0);
+ xway_stp_w32_mask(chip->virt,
+ XWAY_STP_PHY_MASK << XWAY_STP_PHY2_SHIFT,
+ chip->phy2 << XWAY_STP_PHY2_SHIFT,
+ XWAY_STP_CON1);
+
+ /* mask out the hw driven bits in gpio_request */
+ chip->reserved = (chip->phy2 << 5) | (chip->phy1 << 2) | chip->dsl;
+
+ /*
+ * if we have pins that are driven by hw, we need to tell the stp what
+ * clock to use as a timer.
+ */
+ if (chip->reserved)
+ xway_stp_w32_mask(chip->virt, XWAY_STP_UPD_MASK,
+ XWAY_STP_UPD_FPI, XWAY_STP_CON1);
+
+ return 0;
+}
+
+static int __devinit xway_stp_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ const __be32 *shadow, *groups, *dsl, *phy;
+ struct xway_stp *chip;
+ struct clk *clk;
+ int ret = 0;
+
+ if (!res) {
+ dev_err(&pdev->dev, "failed to request STP resource\n");
+ return -ENOENT;
+ }
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->virt = devm_request_and_ioremap(&pdev->dev, res);
+ if (!chip->virt) {
+ dev_err(&pdev->dev, "failed to remap STP memory\n");
+ return -ENOMEM;
+ }
+ chip->gc.dev = &pdev->dev;
+ chip->gc.label = "stp-xway";
+ chip->gc.direction_output = xway_stp_dir_out;
+ chip->gc.set = xway_stp_set;
+ chip->gc.request = xway_stp_request;
+ chip->gc.base = -1;
+ chip->gc.owner = THIS_MODULE;
+
+ /* store the shadow value if one was passed by the devicetree */
+ shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL);
+ if (shadow)
+ chip->shadow = be32_to_cpu(*shadow);
+
+ /* find out which gpio groups should be enabled */
+ groups = of_get_property(pdev->dev.of_node, "lantiq,groups", NULL);
+ if (groups)
+ chip->groups = be32_to_cpu(*groups) & XWAY_STP_GROUP_MASK;
+ else
+ chip->groups = XWAY_STP_GROUP0;
+ chip->gc.ngpio = fls(chip->groups) * 8;
+
+ /* find out which gpios are controlled by the dsl core */
+ dsl = of_get_property(pdev->dev.of_node, "lantiq,dsl", NULL);
+ if (dsl)
+ chip->dsl = be32_to_cpu(*dsl) & XWAY_STP_ADSL_MASK;
+
+ /* find out which gpios are controlled by the phys */
+ if (of_machine_is_compatible("lantiq,ar9") ||
+ of_machine_is_compatible("lantiq,gr9") ||
+ of_machine_is_compatible("lantiq,vr9")) {
+ phy = of_get_property(pdev->dev.of_node, "lantiq,phy1", NULL);
+ if (phy)
+ chip->phy1 = be32_to_cpu(*phy) & XWAY_STP_PHY_MASK;
+ phy = of_get_property(pdev->dev.of_node, "lantiq,phy2", NULL);
+ if (phy)
+ chip->phy2 = be32_to_cpu(*phy) & XWAY_STP_PHY_MASK;
+ }
+
+ /* check which edge trigger we should use, default to a falling edge */
+ if (!of_find_property(pdev->dev.of_node, "lantiq,rising", NULL))
+ chip->edge = XWAY_STP_FALLING;
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ return PTR_ERR(clk);
+ }
+ clk_enable(clk);
+
+ ret = xway_stp_hw_init(chip);
+ if (!ret)
+ ret = gpiochip_add(&chip->gc);
+
+ if (!ret)
+ dev_info(&pdev->dev, "Init done\n");
+
+ return ret;
+}
+
+static const struct of_device_id xway_stp_match[] = {
+ { .compatible = "lantiq,gpio-stp-xway" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xway_stp_match);
+
+static struct platform_driver xway_stp_driver = {
+ .probe = xway_stp_probe,
+ .driver = {
+ .name = "gpio-stp-xway",
+ .owner = THIS_MODULE,
+ .of_match_table = xway_stp_match,
+ },
+};
+
+int __init xway_stp_init(void)
+{
+ return platform_driver_register(&xway_stp_driver);
+}
+
+subsys_initcall(xway_stp_init);
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 7eef648a3351..c1ad2884f2ed 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -18,14 +18,27 @@
#include <linux/errno.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
+#include <linux/platform_device.h>
#include <linux/mfd/tps65910.h>
+#include <linux/of_device.h>
+
+struct tps65910_gpio {
+ struct gpio_chip gpio_chip;
+ struct tps65910 *tps65910;
+};
+
+static inline struct tps65910_gpio *to_tps65910_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct tps65910_gpio, gpio_chip);
+}
static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
{
- struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
- uint8_t val;
+ struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+ struct tps65910 *tps65910 = tps65910_gpio->tps65910;
+ unsigned int val;
- tps65910->read(tps65910, TPS65910_GPIO0 + offset, 1, &val);
+ tps65910_reg_read(tps65910, TPS65910_GPIO0 + offset, &val);
if (val & GPIO_STS_MASK)
return 1;
@@ -36,83 +49,170 @@ static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset,
int value)
{
- struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+ struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+ struct tps65910 *tps65910 = tps65910_gpio->tps65910;
if (value)
- tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
+ tps65910_reg_set_bits(tps65910, TPS65910_GPIO0 + offset,
GPIO_SET_MASK);
else
- tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+ tps65910_reg_clear_bits(tps65910, TPS65910_GPIO0 + offset,
GPIO_SET_MASK);
}
static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
int value)
{
- struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+ struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+ struct tps65910 *tps65910 = tps65910_gpio->tps65910;
/* Set the initial value */
tps65910_gpio_set(gc, offset, value);
- return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
+ return tps65910_reg_set_bits(tps65910, TPS65910_GPIO0 + offset,
GPIO_CFG_MASK);
}
static int tps65910_gpio_input(struct gpio_chip *gc, unsigned offset)
{
- struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+ struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+ struct tps65910 *tps65910 = tps65910_gpio->tps65910;
- return tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+ return tps65910_reg_clear_bits(tps65910, TPS65910_GPIO0 + offset,
GPIO_CFG_MASK);
}
-void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base)
+#ifdef CONFIG_OF
+static struct tps65910_board *tps65910_parse_dt_for_gpio(struct device *dev,
+ struct tps65910 *tps65910, int chip_ngpio)
{
+ struct tps65910_board *tps65910_board = tps65910->of_plat_data;
+ unsigned int prop_array[TPS6591X_MAX_NUM_GPIO];
+ int ngpio = min(chip_ngpio, TPS6591X_MAX_NUM_GPIO);
int ret;
- struct tps65910_board *board_data;
+ int idx;
+
+ tps65910_board->gpio_base = -1;
+ ret = of_property_read_u32_array(tps65910->dev->of_node,
+ "ti,en-gpio-sleep", prop_array, ngpio);
+ if (ret < 0) {
+ dev_dbg(dev, "ti,en-gpio-sleep not specified\n");
+ return tps65910_board;
+ }
- if (!gpio_base)
- return;
+ for (idx = 0; idx < ngpio; idx++)
+ tps65910_board->en_gpio_sleep[idx] = (prop_array[idx] != 0);
- tps65910->gpio.owner = THIS_MODULE;
- tps65910->gpio.label = tps65910->i2c_client->name;
- tps65910->gpio.dev = tps65910->dev;
- tps65910->gpio.base = gpio_base;
+ return tps65910_board;
+}
+#else
+static struct tps65910_board *tps65910_parse_dt_for_gpio(struct device *dev,
+ struct tps65910 *tps65910, int chip_ngpio)
+{
+ return NULL;
+}
+#endif
+
+static int __devinit tps65910_gpio_probe(struct platform_device *pdev)
+{
+ struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
+ struct tps65910_board *pdata = dev_get_platdata(tps65910->dev);
+ struct tps65910_gpio *tps65910_gpio;
+ int ret;
+ int i;
+
+ tps65910_gpio = devm_kzalloc(&pdev->dev,
+ sizeof(*tps65910_gpio), GFP_KERNEL);
+ if (!tps65910_gpio) {
+ dev_err(&pdev->dev, "Could not allocate tps65910_gpio\n");
+ return -ENOMEM;
+ }
+
+ tps65910_gpio->tps65910 = tps65910;
+
+ tps65910_gpio->gpio_chip.owner = THIS_MODULE;
+ tps65910_gpio->gpio_chip.label = tps65910->i2c_client->name;
switch(tps65910_chip_id(tps65910)) {
case TPS65910:
- tps65910->gpio.ngpio = TPS65910_NUM_GPIO;
+ tps65910_gpio->gpio_chip.ngpio = TPS65910_NUM_GPIO;
break;
case TPS65911:
- tps65910->gpio.ngpio = TPS65911_NUM_GPIO;
+ tps65910_gpio->gpio_chip.ngpio = TPS65911_NUM_GPIO;
break;
default:
- return;
+ return -EINVAL;
+ }
+ tps65910_gpio->gpio_chip.can_sleep = 1;
+ tps65910_gpio->gpio_chip.direction_input = tps65910_gpio_input;
+ tps65910_gpio->gpio_chip.direction_output = tps65910_gpio_output;
+ tps65910_gpio->gpio_chip.set = tps65910_gpio_set;
+ tps65910_gpio->gpio_chip.get = tps65910_gpio_get;
+ tps65910_gpio->gpio_chip.dev = &pdev->dev;
+ if (pdata && pdata->gpio_base)
+ tps65910_gpio->gpio_chip.base = pdata->gpio_base;
+ else
+ tps65910_gpio->gpio_chip.base = -1;
+
+ if (!pdata && tps65910->dev->of_node)
+ pdata = tps65910_parse_dt_for_gpio(&pdev->dev, tps65910,
+ tps65910_gpio->gpio_chip.ngpio);
+
+ if (!pdata)
+ goto skip_init;
+
+ /* Configure sleep control for gpios if provided */
+ for (i = 0; i < tps65910_gpio->gpio_chip.ngpio; ++i) {
+ if (!pdata->en_gpio_sleep[i])
+ continue;
+
+ ret = tps65910_reg_set_bits(tps65910,
+ TPS65910_GPIO0 + i, GPIO_SLEEP_MASK);
+ if (ret < 0)
+ dev_warn(tps65910->dev,
+ "GPIO Sleep setting failed with err %d\n", ret);
}
- tps65910->gpio.can_sleep = 1;
-
- tps65910->gpio.direction_input = tps65910_gpio_input;
- tps65910->gpio.direction_output = tps65910_gpio_output;
- tps65910->gpio.set = tps65910_gpio_set;
- tps65910->gpio.get = tps65910_gpio_get;
-
- /* Configure sleep control for gpios */
- board_data = dev_get_platdata(tps65910->dev);
- if (board_data) {
- int i;
- for (i = 0; i < tps65910->gpio.ngpio; ++i) {
- if (board_data->en_gpio_sleep[i]) {
- ret = tps65910_set_bits(tps65910,
- TPS65910_GPIO0 + i, GPIO_SLEEP_MASK);
- if (ret < 0)
- dev_warn(tps65910->dev,
- "GPIO Sleep setting failed\n");
- }
- }
+
+skip_init:
+ ret = gpiochip_add(&tps65910_gpio->gpio_chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ return ret;
}
- ret = gpiochip_add(&tps65910->gpio);
+ platform_set_drvdata(pdev, tps65910_gpio);
+
+ return ret;
+}
+
+static int __devexit tps65910_gpio_remove(struct platform_device *pdev)
+{
+ struct tps65910_gpio *tps65910_gpio = platform_get_drvdata(pdev);
- if (ret)
- dev_warn(tps65910->dev, "GPIO registration failed: %d\n", ret);
+ return gpiochip_remove(&tps65910_gpio->gpio_chip);
}
+
+static struct platform_driver tps65910_gpio_driver = {
+ .driver.name = "tps65910-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = tps65910_gpio_probe,
+ .remove = __devexit_p(tps65910_gpio_remove),
+};
+
+static int __init tps65910_gpio_init(void)
+{
+ return platform_driver_register(&tps65910_gpio_driver);
+}
+subsys_initcall(tps65910_gpio_init);
+
+static void __exit tps65910_gpio_exit(void)
+{
+ platform_driver_unregister(&tps65910_gpio_driver);
+}
+module_exit(tps65910_gpio_exit);
+
+MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+MODULE_AUTHOR("Jorge Eduardo Candelaria jedu@slimlogic.co.uk>");
+MODULE_DESCRIPTION("GPIO interface for TPS65910/TPS6511 PMICs");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65910-gpio");
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index deb949e75ec1..e56a2165641c 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -102,10 +102,8 @@ static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
- if (!wm831x->irq_base)
- return -EINVAL;
-
- return wm831x->irq_base + WM831X_IRQ_GPIO_1 + offset;
+ return irq_create_mapping(wm831x->irq_domain,
+ WM831X_IRQ_GPIO_1 + offset);
}
static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
diff --git a/drivers/of/gpio.c b/drivers/gpio/gpiolib-of.c
index bf984b6dc477..d18068a9f3ec 100644
--- a/drivers/of/gpio.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -15,11 +15,39 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
+/* Private data structure for of_gpiochip_is_match */
+struct gg_data {
+ enum of_gpio_flags *flags;
+ struct of_phandle_args gpiospec;
+
+ int out_gpio;
+};
+
+/* Private function for resolving node pointer to gpio_chip */
+static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
+{
+ struct gg_data *gg_data = data;
+ int ret;
+
+ if ((gc->of_node != gg_data->gpiospec.np) ||
+ (gc->of_gpio_n_cells != gg_data->gpiospec.args_count) ||
+ (!gc->of_xlate))
+ return false;
+
+ ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
+ if (ret < 0)
+ return false;
+
+ gg_data->out_gpio = ret + gc->base;
+ return true;
+}
+
/**
* of_get_named_gpio_flags() - Get a GPIO number and flags to use with GPIO API
* @np: device node to get GPIO from
@@ -34,46 +62,25 @@
int of_get_named_gpio_flags(struct device_node *np, const char *propname,
int index, enum of_gpio_flags *flags)
{
+ struct gg_data gg_data = { .flags = flags, .out_gpio = -ENODEV };
int ret;
- struct gpio_chip *gc;
- struct of_phandle_args gpiospec;
+
+ /* .of_xlate might decide to not fill in the flags, so clear it. */
+ if (flags)
+ *flags = 0;
ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index,
- &gpiospec);
+ &gg_data.gpiospec);
if (ret) {
pr_debug("%s: can't parse gpios property\n", __func__);
- goto err0;
- }
-
- gc = of_node_to_gpiochip(gpiospec.np);
- if (!gc) {
- pr_debug("%s: gpio controller %s isn't registered\n",
- np->full_name, gpiospec.np->full_name);
- ret = -ENODEV;
- goto err1;
- }
-
- if (gpiospec.args_count != gc->of_gpio_n_cells) {
- pr_debug("%s: wrong #gpio-cells for %s\n",
- np->full_name, gpiospec.np->full_name);
- ret = -EINVAL;
- goto err1;
+ return -EINVAL;
}
- /* .xlate might decide to not fill in the flags, so clear it. */
- if (flags)
- *flags = 0;
-
- ret = gc->of_xlate(gc, &gpiospec, flags);
- if (ret < 0)
- goto err1;
+ gpiochip_find(&gg_data, of_gpiochip_find_and_xlate);
- ret += gc->base;
-err1:
- of_node_put(gpiospec.np);
-err0:
+ of_node_put(gg_data.gpiospec.np);
pr_debug("%s exited with status %d\n", __func__, ret);
- return ret;
+ return gg_data.out_gpio;
}
EXPORT_SYMBOL(of_get_named_gpio_flags);
@@ -227,14 +234,3 @@ void of_gpiochip_remove(struct gpio_chip *chip)
if (chip->of_node)
of_node_put(chip->of_node);
}
-
-/* Private function for resolving node pointer to gpio_chip */
-static int of_gpiochip_is_match(struct gpio_chip *chip, const void *data)
-{
- return chip->of_node == data;
-}
-
-struct gpio_chip *of_node_to_gpiochip(struct device_node *np)
-{
- return gpiochip_find(np, of_gpiochip_is_match);
-}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 5a75510d66bb..120b2a0e3167 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1093,7 +1093,7 @@ unlock:
if (status)
goto fail;
- pr_info("gpiochip_add: registered GPIOs %d to %d on device: %s\n",
+ pr_debug("gpiochip_add: registered GPIOs %d to %d on device: %s\n",
chip->base, chip->base + chip->ngpio - 1,
chip->label ? : "generic");
@@ -1154,9 +1154,9 @@ EXPORT_SYMBOL_GPL(gpiochip_remove);
* non-zero, this function will return to the caller and not iterate over any
* more gpio_chips.
*/
-struct gpio_chip *gpiochip_find(const void *data,
+struct gpio_chip *gpiochip_find(void *data,
int (*match)(struct gpio_chip *chip,
- const void *data))
+ void *data))
{
struct gpio_chip *chip = NULL;
unsigned long flags;
@@ -1302,8 +1302,18 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
(flags & GPIOF_INIT_HIGH) ? 1 : 0);
if (err)
- gpio_free(gpio);
+ goto free_gpio;
+
+ if (flags & GPIOF_EXPORT) {
+ err = gpio_export(gpio, flags & GPIOF_EXPORT_CHANGEABLE);
+ if (err)
+ goto free_gpio;
+ }
+
+ return 0;
+ free_gpio:
+ gpio_free(gpio);
return err;
}
EXPORT_SYMBOL_GPL(gpio_request_one);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e354bc0b052a..23120c00a881 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -186,3 +186,9 @@ source "drivers/gpu/drm/vmwgfx/Kconfig"
source "drivers/gpu/drm/gma500/Kconfig"
source "drivers/gpu/drm/udl/Kconfig"
+
+source "drivers/gpu/drm/ast/Kconfig"
+
+source "drivers/gpu/drm/mgag200/Kconfig"
+
+source "drivers/gpu/drm/cirrus/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index c20da5bda355..f65f65ed0ddf 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -34,6 +34,8 @@ obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
obj-$(CONFIG_DRM_I915) += i915/
+obj-$(CONFIG_DRM_MGAG200) += mgag200/
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
@@ -42,4 +44,5 @@ obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
+obj-$(CONFIG_DRM_AST) += ast/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
new file mode 100644
index 000000000000..a277b1257888
--- /dev/null
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -0,0 +1,16 @@
+config DRM_AST
+ tristate "AST server chips"
+ depends on DRM && PCI && EXPERIMENTAL
+ select DRM_TTM
+ select FB_SYS_COPYAREA
+ select FB_SYS_FILLRECT
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ help
+ Say yes for experimental AST GPU driver. Do not enable
+ this driver without having a working -modesetting,
+ and a version of AST that knows to fail if KMS
+ is bound to the driver. These GPUs are commonly found
+ in server chipsets.
+
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
new file mode 100644
index 000000000000..8df4f284ee24
--- /dev/null
+++ b/drivers/gpu/drm/ast/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o
+
+obj-$(CONFIG_DRM_AST) := ast.o \ No newline at end of file
diff --git a/drivers/gpu/drm/ast/ast_dram_tables.h b/drivers/gpu/drm/ast/ast_dram_tables.h
new file mode 100644
index 000000000000..cc04539c0ff3
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_dram_tables.h
@@ -0,0 +1,144 @@
+#ifndef AST_DRAM_TABLES_H
+#define AST_DRAM_TABLES_H
+
+/* DRAM timing tables */
+struct ast_dramstruct {
+ u16 index;
+ u32 data;
+};
+
+static const struct ast_dramstruct ast2000_dram_table_data[] = {
+ { 0x0108, 0x00000000 },
+ { 0x0120, 0x00004a21 },
+ { 0xFF00, 0x00000043 },
+ { 0x0000, 0xFFFFFFFF },
+ { 0x0004, 0x00000089 },
+ { 0x0008, 0x22331353 },
+ { 0x000C, 0x0d07000b },
+ { 0x0010, 0x11113333 },
+ { 0x0020, 0x00110350 },
+ { 0x0028, 0x1e0828f0 },
+ { 0x0024, 0x00000001 },
+ { 0x001C, 0x00000000 },
+ { 0x0014, 0x00000003 },
+ { 0xFF00, 0x00000043 },
+ { 0x0018, 0x00000131 },
+ { 0x0014, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x0018, 0x00000031 },
+ { 0x0014, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x0028, 0x1e0828f1 },
+ { 0x0024, 0x00000003 },
+ { 0x002C, 0x1f0f28fb },
+ { 0x0030, 0xFFFFFE01 },
+ { 0xFFFF, 0xFFFFFFFF }
+};
+
+static const struct ast_dramstruct ast1100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x000041f0 },
+ { 0xFF00, 0x00000043 },
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00050000 },
+ { 0x0004, 0x00000585 },
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x22201724 },
+ { 0x0018, 0x1e29011a },
+ { 0x0020, 0x00c82222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x032aa02a },
+ { 0x0064, 0x002d3000 },
+ { 0x0068, 0x00000000 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x002C, 0x00000732 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000632 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00004c41 },
+ { 0xffff, 0xffffffff },
+};
+
+static const struct ast_dramstruct ast2100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x00004120 },
+ { 0xFF00, 0x00000043 },
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00070000 },
+ { 0x0004, 0x00000489 },
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x32302926 },
+ { 0x0018, 0x274c0122 },
+ { 0x0020, 0x00ce2222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x0f2aa02a },
+ { 0x0064, 0x003f3005 },
+ { 0x0068, 0x02020202 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x002C, 0x00000942 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000842 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00005061 },
+ { 0xffff, 0xffffffff },
+};
+
+#endif
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
new file mode 100644
index 000000000000..d0c4574ef49c
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "ast_drv.h"
+
+int ast_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, ast_modeset, int, 0400);
+
+#define PCI_VENDOR_ASPEED 0x1a03
+
+static struct drm_driver driver;
+
+#define AST_VGA_DEVICE(id, info) { \
+ .class = PCI_BASE_CLASS_DISPLAY << 16, \
+ .class_mask = 0xff0000, \
+ .vendor = PCI_VENDOR_ASPEED, \
+ .device = id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .driver_data = (unsigned long) info }
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL),
+ AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL),
+ /* AST_VGA_DEVICE(PCI_CHIP_AST1180, NULL), - don't bind to 1180 for now */
+ {0, 0, 0},
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int __devinit
+ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void
+ast_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+
+
+static int ast_drm_freeze(struct drm_device *dev)
+{
+ drm_kms_helper_poll_disable(dev);
+
+ pci_save_state(dev->pdev);
+
+ console_lock();
+ ast_fbdev_set_suspend(dev, 1);
+ console_unlock();
+ return 0;
+}
+
+static int ast_drm_thaw(struct drm_device *dev)
+{
+ int error = 0;
+
+ ast_post_gpu(dev);
+
+ drm_mode_config_reset(dev);
+ mutex_lock(&dev->mode_config.mutex);
+ drm_helper_resume_force_mode(dev);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ console_lock();
+ ast_fbdev_set_suspend(dev, 0);
+ console_unlock();
+ return error;
+}
+
+static int ast_drm_resume(struct drm_device *dev)
+{
+ int ret;
+
+ if (pci_enable_device(dev->pdev))
+ return -EIO;
+
+ ret = ast_drm_thaw(dev);
+ if (ret)
+ return ret;
+
+ drm_kms_helper_poll_enable(dev);
+ return 0;
+}
+
+static int ast_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ int error;
+
+ error = ast_drm_freeze(ddev);
+ if (error)
+ return error;
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+static int ast_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ return ast_drm_resume(ddev);
+}
+
+static int ast_pm_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+
+ if (!ddev || !ddev->dev_private)
+ return -ENODEV;
+ return ast_drm_freeze(ddev);
+
+}
+
+static int ast_pm_thaw(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ return ast_drm_thaw(ddev);
+}
+
+static int ast_pm_poweroff(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+
+ return ast_drm_freeze(ddev);
+}
+
+static const struct dev_pm_ops ast_pm_ops = {
+ .suspend = ast_pm_suspend,
+ .resume = ast_pm_resume,
+ .freeze = ast_pm_freeze,
+ .thaw = ast_pm_thaw,
+ .poweroff = ast_pm_poweroff,
+ .restore = ast_pm_resume,
+};
+
+static struct pci_driver ast_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = ast_pci_probe,
+ .remove = ast_pci_remove,
+ .driver.pm = &ast_pm_ops,
+};
+
+static const struct file_operations ast_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = ast_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+};
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_USE_MTRR | DRIVER_MODESET | DRIVER_GEM,
+ .dev_priv_size = 0,
+
+ .load = ast_driver_load,
+ .unload = ast_driver_unload,
+
+ .fops = &ast_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ .gem_init_object = ast_gem_init_object,
+ .gem_free_object = ast_gem_free_object,
+ .dumb_create = ast_dumb_create,
+ .dumb_map_offset = ast_dumb_mmap_offset,
+ .dumb_destroy = ast_dumb_destroy,
+
+};
+
+static int __init ast_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && ast_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (ast_modeset == 0)
+ return -EINVAL;
+ return drm_pci_init(&driver, &ast_pci_driver);
+}
+static void __exit ast_exit(void)
+{
+ drm_pci_exit(&driver, &ast_pci_driver);
+}
+
+module_init(ast_init);
+module_exit(ast_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
new file mode 100644
index 000000000000..d4af9edcbb97
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#ifndef __AST_DRV_H__
+#define __AST_DRV_H__
+
+#include "drm_fb_helper.h"
+
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#define DRIVER_AUTHOR "Dave Airlie"
+
+#define DRIVER_NAME "ast"
+#define DRIVER_DESC "AST"
+#define DRIVER_DATE "20120228"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
+
+#define PCI_CHIP_AST2000 0x2000
+#define PCI_CHIP_AST2100 0x2010
+#define PCI_CHIP_AST1180 0x1180
+
+
+enum ast_chip {
+ AST2000,
+ AST2100,
+ AST1100,
+ AST2200,
+ AST2150,
+ AST2300,
+ AST1180,
+};
+
+#define AST_DRAM_512Mx16 0
+#define AST_DRAM_1Gx16 1
+#define AST_DRAM_512Mx32 2
+#define AST_DRAM_1Gx32 3
+#define AST_DRAM_2Gx16 6
+#define AST_DRAM_4Gx16 7
+
+struct ast_fbdev;
+
+struct ast_private {
+ struct drm_device *dev;
+
+ void __iomem *regs;
+ void __iomem *ioregs;
+
+ enum ast_chip chip;
+ bool vga2_clone;
+ uint32_t dram_bus_width;
+ uint32_t dram_type;
+ uint32_t mclk;
+ uint32_t vram_size;
+
+ struct ast_fbdev *fbdev;
+
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ atomic_t validate_sequence;
+ } ttm;
+
+ struct drm_gem_object *cursor_cache;
+ uint64_t cursor_cache_gpu_addr;
+ struct ttm_bo_kmap_obj cache_kmap;
+ int next_cursor;
+};
+
+int ast_driver_load(struct drm_device *dev, unsigned long flags);
+int ast_driver_unload(struct drm_device *dev);
+
+struct ast_gem_object;
+
+#define AST_IO_AR_PORT_WRITE (0x40)
+#define AST_IO_MISC_PORT_WRITE (0x42)
+#define AST_IO_SEQ_PORT (0x44)
+#define AST_DAC_INDEX_READ (0x3c7)
+#define AST_IO_DAC_INDEX_WRITE (0x48)
+#define AST_IO_DAC_DATA (0x49)
+#define AST_IO_GR_PORT (0x4E)
+#define AST_IO_CRTC_PORT (0x54)
+#define AST_IO_INPUT_STATUS1_READ (0x5A)
+#define AST_IO_MISC_PORT_READ (0x4C)
+
+#define __ast_read(x) \
+static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
+u##x val = 0;\
+val = ioread##x(ast->regs + reg); \
+return val;\
+}
+
+__ast_read(8);
+__ast_read(16);
+__ast_read(32)
+
+#define __ast_io_read(x) \
+static inline u##x ast_io_read##x(struct ast_private *ast, u32 reg) { \
+u##x val = 0;\
+val = ioread##x(ast->ioregs + reg); \
+return val;\
+}
+
+__ast_io_read(8);
+__ast_io_read(16);
+__ast_io_read(32);
+
+#define __ast_write(x) \
+static inline void ast_write##x(struct ast_private *ast, u32 reg, u##x val) {\
+ iowrite##x(val, ast->regs + reg);\
+ }
+
+__ast_write(8);
+__ast_write(16);
+__ast_write(32);
+
+#define __ast_io_write(x) \
+static inline void ast_io_write##x(struct ast_private *ast, u32 reg, u##x val) {\
+ iowrite##x(val, ast->ioregs + reg);\
+ }
+
+__ast_io_write(8);
+__ast_io_write(16);
+#undef __ast_io_write
+
+static inline void ast_set_index_reg(struct ast_private *ast,
+ uint32_t base, uint8_t index,
+ uint8_t val)
+{
+ ast_io_write16(ast, base, ((u16)val << 8) | index);
+}
+
+void ast_set_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index,
+ uint8_t mask, uint8_t val);
+uint8_t ast_get_index_reg(struct ast_private *ast,
+ uint32_t base, uint8_t index);
+uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index, uint8_t mask);
+
+static inline void ast_open_key(struct ast_private *ast)
+{
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04);
+}
+
+#define AST_VIDMEM_SIZE_8M 0x00800000
+#define AST_VIDMEM_SIZE_16M 0x01000000
+#define AST_VIDMEM_SIZE_32M 0x02000000
+#define AST_VIDMEM_SIZE_64M 0x04000000
+#define AST_VIDMEM_SIZE_128M 0x08000000
+
+#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
+
+#define AST_MAX_HWC_WIDTH 64
+#define AST_MAX_HWC_HEIGHT 64
+
+#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH*AST_MAX_HWC_HEIGHT*2)
+#define AST_HWC_SIGNATURE_SIZE 32
+
+#define AST_DEFAULT_HWC_NUM 2
+/* define for signature structure */
+#define AST_HWC_SIGNATURE_CHECKSUM 0x00
+#define AST_HWC_SIGNATURE_SizeX 0x04
+#define AST_HWC_SIGNATURE_SizeY 0x08
+#define AST_HWC_SIGNATURE_X 0x0C
+#define AST_HWC_SIGNATURE_Y 0x10
+#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
+#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+
+
+struct ast_i2c_chan {
+ struct i2c_adapter adapter;
+ struct drm_device *dev;
+ struct i2c_algo_bit_data bit;
+};
+
+struct ast_connector {
+ struct drm_connector base;
+ struct ast_i2c_chan *i2c;
+};
+
+struct ast_crtc {
+ struct drm_crtc base;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ struct drm_gem_object *cursor_bo;
+ uint64_t cursor_addr;
+ int cursor_width, cursor_height;
+ u8 offset_x, offset_y;
+};
+
+struct ast_encoder {
+ struct drm_encoder base;
+};
+
+struct ast_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct ast_fbdev {
+ struct drm_fb_helper helper;
+ struct ast_framebuffer afb;
+ struct list_head fbdev_list;
+ void *sysram;
+ int size;
+ struct ttm_bo_kmap_obj mapping;
+};
+
+#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
+#define to_ast_connector(x) container_of(x, struct ast_connector, base)
+#define to_ast_encoder(x) container_of(x, struct ast_encoder, base)
+#define to_ast_framebuffer(x) container_of(x, struct ast_framebuffer, base)
+
+struct ast_vbios_stdtable {
+ u8 misc;
+ u8 seq[4];
+ u8 crtc[25];
+ u8 ar[20];
+ u8 gr[9];
+};
+
+struct ast_vbios_enhtable {
+ u32 ht;
+ u32 hde;
+ u32 hfp;
+ u32 hsync;
+ u32 vt;
+ u32 vde;
+ u32 vfp;
+ u32 vsync;
+ u32 dclk_index;
+ u32 flags;
+ u32 refresh_rate;
+ u32 refresh_rate_index;
+ u32 mode_id;
+};
+
+struct ast_vbios_dclk_info {
+ u8 param1;
+ u8 param2;
+ u8 param3;
+};
+
+struct ast_vbios_mode_info {
+ struct ast_vbios_stdtable *std_table;
+ struct ast_vbios_enhtable *enh_table;
+};
+
+extern int ast_mode_init(struct drm_device *dev);
+extern void ast_mode_fini(struct drm_device *dev);
+
+int ast_framebuffer_init(struct drm_device *dev,
+ struct ast_framebuffer *ast_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+int ast_fbdev_init(struct drm_device *dev);
+void ast_fbdev_fini(struct drm_device *dev);
+void ast_fbdev_set_suspend(struct drm_device *dev, int state);
+
+struct ast_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+#define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem)
+
+static inline struct ast_bo *
+ast_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct ast_bo, bo);
+}
+
+
+#define to_ast_obj(x) container_of(x, struct ast_gem_object, base)
+
+#define AST_MM_ALIGN_SHIFT 4
+#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
+
+extern int ast_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+extern int ast_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle);
+
+extern int ast_gem_init_object(struct drm_gem_object *obj);
+extern void ast_gem_free_object(struct drm_gem_object *obj);
+extern int ast_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset);
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+int ast_mm_init(struct ast_private *ast);
+void ast_mm_fini(struct ast_private *ast);
+
+int ast_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct ast_bo **pastbo);
+
+int ast_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+
+int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int ast_bo_unpin(struct ast_bo *bo);
+
+int ast_bo_reserve(struct ast_bo *bo, bool no_wait);
+void ast_bo_unreserve(struct ast_bo *bo);
+void ast_ttm_placement(struct ast_bo *bo, int domain);
+int ast_bo_push_sysram(struct ast_bo *bo);
+int ast_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* ast post */
+void ast_post_gpu(struct drm_device *dev);
+#endif
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
new file mode 100644
index 000000000000..2fc8e9e860b1
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+#include "ast_drv.h"
+
+static void ast_dirty_update(struct ast_fbdev *afbdev,
+ int x, int y, int width, int height)
+{
+ int i;
+ struct drm_gem_object *obj;
+ struct ast_bo *bo;
+ int src_offset, dst_offset;
+ int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
+ int ret;
+ bool unmap = false;
+
+ obj = afbdev->afb.obj;
+ bo = gem_to_ast_bo(obj);
+
+ ret = ast_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("failed to reserve fb bo\n");
+ return;
+ }
+
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ ast_bo_unreserve(bo);
+ return;
+ }
+ unmap = true;
+ }
+ for (i = y; i < y + height; i++) {
+ /* assume equal stride for now */
+ src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+
+ }
+ if (unmap)
+ ttm_bo_kunmap(&bo->kmap);
+
+ ast_bo_unreserve(bo);
+}
+
+static void ast_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct ast_fbdev *afbdev = info->par;
+ sys_fillrect(info, rect);
+ ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+static void ast_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct ast_fbdev *afbdev = info->par;
+ sys_copyarea(info, area);
+ ast_dirty_update(afbdev, area->dx, area->dy, area->width,
+ area->height);
+}
+
+static void ast_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct ast_fbdev *afbdev = info->par;
+ sys_imageblit(info, image);
+ ast_dirty_update(afbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+static struct fb_ops astfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = ast_fillrect,
+ .fb_copyarea = ast_copyarea,
+ .fb_imageblit = ast_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int astfb_create_object(struct ast_fbdev *afbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ u32 bpp, depth;
+ u32 size;
+ struct drm_gem_object *gobj;
+
+ int ret = 0;
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = ast_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int astfb_create(struct ast_fbdev *afbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_framebuffer *fb;
+ struct fb_info *info;
+ int size, ret;
+ struct device *device = &dev->pdev->dev;
+ void *sysram;
+ struct drm_gem_object *gobj = NULL;
+ struct ast_bo *bo = NULL;
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7)/8);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ ret = astfb_create_object(afbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+ bo = gem_to_ast_bo(gobj);
+
+ sysram = vmalloc(size);
+ if (!sysram)
+ return -ENOMEM;
+
+ info = framebuffer_alloc(0, device);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ info->par = afbdev;
+
+ ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
+ if (ret)
+ goto out;
+
+ afbdev->sysram = sysram;
+ afbdev->size = size;
+
+ fb = &afbdev->afb.base;
+ afbdev->helper.fb = fb;
+ afbdev->helper.fbdev = info;
+
+ strcpy(info->fix.id, "astdrmfb");
+
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &astfb_ops;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &afbdev->helper, sizes->fb_width, sizes->fb_height);
+
+ info->screen_base = sysram;
+ info->screen_size = size;
+
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+ DRM_DEBUG_KMS("allocated %dx%d\n",
+ fb->width, fb->height);
+
+ return 0;
+out:
+ return ret;
+}
+
+static void ast_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ ast_crtc->lut_r[regno] = red >> 8;
+ ast_crtc->lut_g[regno] = green >> 8;
+ ast_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ *red = ast_crtc->lut_r[regno] << 8;
+ *green = ast_crtc->lut_g[regno] << 8;
+ *blue = ast_crtc->lut_b[regno] << 8;
+}
+
+static int ast_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = astfb_create(afbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static struct drm_fb_helper_funcs ast_fb_helper_funcs = {
+ .gamma_set = ast_fb_gamma_set,
+ .gamma_get = ast_fb_gamma_get,
+ .fb_probe = ast_find_or_create_single,
+};
+
+static void ast_fbdev_destroy(struct drm_device *dev,
+ struct ast_fbdev *afbdev)
+{
+ struct fb_info *info;
+ struct ast_framebuffer *afb = &afbdev->afb;
+ if (afbdev->helper.fbdev) {
+ info = afbdev->helper.fbdev;
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (afb->obj) {
+ drm_gem_object_unreference_unlocked(afb->obj);
+ afb->obj = NULL;
+ }
+ drm_fb_helper_fini(&afbdev->helper);
+
+ vfree(afbdev->sysram);
+ drm_framebuffer_cleanup(&afb->base);
+}
+
+int ast_fbdev_init(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast_fbdev *afbdev;
+ int ret;
+
+ afbdev = kzalloc(sizeof(struct ast_fbdev), GFP_KERNEL);
+ if (!afbdev)
+ return -ENOMEM;
+
+ ast->fbdev = afbdev;
+ afbdev->helper.funcs = &ast_fb_helper_funcs;
+ ret = drm_fb_helper_init(dev, &afbdev->helper,
+ 1, 1);
+ if (ret) {
+ kfree(afbdev);
+ return ret;
+ }
+
+ drm_fb_helper_single_add_all_connectors(&afbdev->helper);
+ drm_fb_helper_initial_config(&afbdev->helper, 32);
+ return 0;
+}
+
+void ast_fbdev_fini(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ if (!ast->fbdev)
+ return;
+
+ ast_fbdev_destroy(dev, ast->fbdev);
+ kfree(ast->fbdev);
+ ast->fbdev = NULL;
+}
+
+void ast_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ if (!ast->fbdev)
+ return;
+
+ fb_set_suspend(ast->fbdev->helper.fbdev, state);
+}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
new file mode 100644
index 000000000000..95ae55b8214b
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -0,0 +1,527 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "ast_drv.h"
+
+
+#include "drm_fb_helper.h"
+#include "drm_crtc_helper.h"
+
+#include "ast_dram_tables.h"
+
+void ast_set_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index,
+ uint8_t mask, uint8_t val)
+{
+ u8 tmp;
+ ast_io_write8(ast, base, index);
+ tmp = (ast_io_read8(ast, base + 1) & mask) | val;
+ ast_set_index_reg(ast, base, index, tmp);
+}
+
+uint8_t ast_get_index_reg(struct ast_private *ast,
+ uint32_t base, uint8_t index)
+{
+ uint8_t ret;
+ ast_io_write8(ast, base, index);
+ ret = ast_io_read8(ast, base + 1);
+ return ret;
+}
+
+uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index, uint8_t mask)
+{
+ uint8_t ret;
+ ast_io_write8(ast, base, index);
+ ret = ast_io_read8(ast, base + 1) & mask;
+ return ret;
+}
+
+
+static int ast_detect_chip(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ if (dev->pdev->device == PCI_CHIP_AST1180) {
+ ast->chip = AST1100;
+ DRM_INFO("AST 1180 detected\n");
+ } else {
+ if (dev->pdev->revision >= 0x20) {
+ ast->chip = AST2300;
+ DRM_INFO("AST 2300 detected\n");
+ } else if (dev->pdev->revision >= 0x10) {
+ uint32_t data;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+
+ data = ast_read32(ast, 0x1207c);
+ switch (data & 0x0300) {
+ case 0x0200:
+ ast->chip = AST1100;
+ DRM_INFO("AST 1100 detected\n");
+ break;
+ case 0x0100:
+ ast->chip = AST2200;
+ DRM_INFO("AST 2200 detected\n");
+ break;
+ case 0x0000:
+ ast->chip = AST2150;
+ DRM_INFO("AST 2150 detected\n");
+ break;
+ default:
+ ast->chip = AST2100;
+ DRM_INFO("AST 2100 detected\n");
+ break;
+ }
+ ast->vga2_clone = false;
+ } else {
+ ast->chip = 2000;
+ DRM_INFO("AST 2000 detected\n");
+ }
+ }
+ return 0;
+}
+
+static int ast_get_dram_info(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ uint32_t data, data2;
+ uint32_t denum, num, div, ref_pll;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x01);
+ data = ast_read32(ast, 0x10004);
+
+ if (data & 0x400)
+ ast->dram_bus_width = 16;
+ else
+ ast->dram_bus_width = 32;
+
+ if (ast->chip == AST2300) {
+ switch (data & 0x03) {
+ case 0:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ default:
+ case 1:
+ ast->dram_type = AST_DRAM_1Gx16;
+ break;
+ case 2:
+ ast->dram_type = AST_DRAM_2Gx16;
+ break;
+ case 3:
+ ast->dram_type = AST_DRAM_4Gx16;
+ break;
+ }
+ } else {
+ switch (data & 0x0c) {
+ case 0:
+ case 4:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ case 8:
+ if (data & 0x40)
+ ast->dram_type = AST_DRAM_1Gx16;
+ else
+ ast->dram_type = AST_DRAM_512Mx32;
+ break;
+ case 0xc:
+ ast->dram_type = AST_DRAM_1Gx32;
+ break;
+ }
+ }
+
+ data = ast_read32(ast, 0x10120);
+ data2 = ast_read32(ast, 0x10170);
+ if (data2 & 0x2000)
+ ref_pll = 14318;
+ else
+ ref_pll = 12000;
+
+ denum = data & 0x1f;
+ num = (data & 0x3fe0) >> 5;
+ data = (data & 0xc000) >> 14;
+ switch (data) {
+ case 3:
+ div = 0x4;
+ break;
+ case 2:
+ case 1:
+ div = 0x2;
+ break;
+ default:
+ div = 0x1;
+ break;
+ }
+ ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
+ return 0;
+}
+
+uint32_t ast_get_max_dclk(struct drm_device *dev, int bpp)
+{
+ struct ast_private *ast = dev->dev_private;
+ uint32_t dclk, jreg;
+ uint32_t dram_bus_width, mclk, dram_bandwidth, actual_dram_bandwidth, dram_efficency = 500;
+
+ dram_bus_width = ast->dram_bus_width;
+ mclk = ast->mclk;
+
+ if (ast->chip == AST2100 ||
+ ast->chip == AST1100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2150 ||
+ ast->dram_bus_width == 16)
+ dram_efficency = 600;
+ else if (ast->chip == AST2300)
+ dram_efficency = 400;
+
+ dram_bandwidth = mclk * dram_bus_width * 2 / 8;
+ actual_dram_bandwidth = dram_bandwidth * dram_efficency / 1000;
+
+ if (ast->chip == AST1180)
+ dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
+ else {
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ if ((jreg & 0x08) && (ast->chip == AST2000))
+ dclk = actual_dram_bandwidth / ((bpp + 1 + 16) / 8);
+ else if ((jreg & 0x08) && (bpp == 8))
+ dclk = actual_dram_bandwidth / ((bpp + 1 + 24) / 8);
+ else
+ dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
+ }
+
+ if (ast->chip == AST2100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2300 ||
+ ast->chip == AST1180) {
+ if (dclk > 200)
+ dclk = 200;
+ } else {
+ if (dclk > 165)
+ dclk = 165;
+ }
+
+ return dclk;
+}
+
+static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
+ if (ast_fb->obj)
+ drm_gem_object_unreference_unlocked(ast_fb->obj);
+
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static int ast_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned int *handle)
+{
+ return -EINVAL;
+}
+
+static const struct drm_framebuffer_funcs ast_fb_funcs = {
+ .destroy = ast_user_framebuffer_destroy,
+ .create_handle = ast_user_framebuffer_create_handle,
+};
+
+
+int ast_framebuffer_init(struct drm_device *dev,
+ struct ast_framebuffer *ast_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs);
+ if (ret) {
+ DRM_ERROR("framebuffer init failed %d\n", ret);
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
+ ast_fb->obj = obj;
+ return 0;
+}
+
+static struct drm_framebuffer *
+ast_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct ast_framebuffer *ast_fb;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL);
+ if (!ast_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(ast_fb);
+ return ERR_PTR(ret);
+ }
+ return &ast_fb->base;
+}
+
+static const struct drm_mode_config_funcs ast_mode_funcs = {
+ .fb_create = ast_user_framebuffer_create,
+};
+
+static u32 ast_get_vram_info(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 jreg;
+
+ ast_open_key(ast);
+
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
+ switch (jreg & 3) {
+ case 0: return AST_VIDMEM_SIZE_8M;
+ case 1: return AST_VIDMEM_SIZE_16M;
+ case 2: return AST_VIDMEM_SIZE_32M;
+ case 3: return AST_VIDMEM_SIZE_64M;
+ }
+ return AST_VIDMEM_DEFAULT_SIZE;
+}
+
+int ast_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct ast_private *ast;
+ int ret = 0;
+
+ ast = kzalloc(sizeof(struct ast_private), GFP_KERNEL);
+ if (!ast)
+ return -ENOMEM;
+
+ dev->dev_private = ast;
+ ast->dev = dev;
+
+ ast->regs = pci_iomap(dev->pdev, 1, 0);
+ if (!ast->regs) {
+ ret = -EIO;
+ goto out_free;
+ }
+ ast->ioregs = pci_iomap(dev->pdev, 2, 0);
+ if (!ast->ioregs) {
+ ret = -EIO;
+ goto out_free;
+ }
+
+ ast_detect_chip(dev);
+
+ if (ast->chip != AST1180) {
+ ast_get_dram_info(dev);
+ ast->vram_size = ast_get_vram_info(dev);
+ DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ goto out_free;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.funcs = (void *)&ast_mode_funcs;
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
+ if (ast->chip == AST2100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2300 ||
+ ast->chip == AST1180) {
+ dev->mode_config.max_width = 1920;
+ dev->mode_config.max_height = 2048;
+ } else {
+ dev->mode_config.max_width = 1600;
+ dev->mode_config.max_height = 1200;
+ }
+
+ ret = ast_mode_init(dev);
+ if (ret)
+ goto out_free;
+
+ ret = ast_fbdev_init(dev);
+ if (ret)
+ goto out_free;
+
+ return 0;
+out_free:
+ kfree(ast);
+ dev->dev_private = NULL;
+ return ret;
+}
+
+int ast_driver_unload(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ ast_mode_fini(dev);
+ ast_fbdev_fini(dev);
+ drm_mode_config_cleanup(dev);
+
+ ast_mm_fini(ast);
+ pci_iounmap(dev->pdev, ast->ioregs);
+ pci_iounmap(dev->pdev, ast->regs);
+ kfree(ast);
+ return 0;
+}
+
+int ast_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct ast_bo *astbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = ast_bo_create(dev, size, 0, 0, &astbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &astbo->gem;
+ return 0;
+}
+
+int ast_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = ast_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+int ast_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+int ast_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
+ return 0;
+}
+
+void ast_bo_unref(struct ast_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
+
+}
+void ast_gem_free_object(struct drm_gem_object *obj)
+{
+ struct ast_bo *ast_bo = gem_to_ast_bo(obj);
+
+ if (!ast_bo)
+ return;
+ ast_bo_unref(&ast_bo);
+}
+
+
+static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
+{
+ return bo->bo.addr_space_offset;
+}
+int
+ast_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct ast_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_ast_bo(obj);
+ *offset = ast_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
+
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
new file mode 100644
index 000000000000..65f9d231af14
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -0,0 +1,1160 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ * Parts based on xf86-video-ast
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/export.h>
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "ast_drv.h"
+
+#include "ast_tables.h"
+
+static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
+static void ast_i2c_destroy(struct ast_i2c_chan *i2c);
+static int ast_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height);
+static int ast_cursor_move(struct drm_crtc *crtc,
+ int x, int y);
+
+static inline void ast_load_palette_index(struct ast_private *ast,
+ u8 index, u8 red, u8 green,
+ u8 blue)
+{
+ ast_io_write8(ast, AST_IO_DAC_INDEX_WRITE, index);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+ ast_io_write8(ast, AST_IO_DAC_DATA, red);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+ ast_io_write8(ast, AST_IO_DAC_DATA, green);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+ ast_io_write8(ast, AST_IO_DAC_DATA, blue);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+}
+
+static void ast_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ int i;
+
+ if (!crtc->enabled)
+ return;
+
+ for (i = 0; i < 256; i++)
+ ast_load_palette_index(ast, i, ast_crtc->lut_r[i],
+ ast_crtc->lut_g[i], ast_crtc->lut_b[i]);
+}
+
+static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate;
+ u32 hborder, vborder;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
+ color_index = VGAModeIndex - 1;
+ break;
+ case 16:
+ vbios_mode->std_table = &vbios_stdtable[HiCModeIndex];
+ color_index = HiCModeIndex;
+ break;
+ case 24:
+ case 32:
+ vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex];
+ color_index = TrueCModeIndex;
+ break;
+ default:
+ return false;
+ }
+
+ switch (crtc->mode.crtc_hdisplay) {
+ case 640:
+ vbios_mode->enh_table = &res_640x480[refresh_rate_index];
+ break;
+ case 800:
+ vbios_mode->enh_table = &res_800x600[refresh_rate_index];
+ break;
+ case 1024:
+ vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
+ break;
+ case 1280:
+ if (crtc->mode.crtc_vdisplay == 800)
+ vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
+ else
+ vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
+ break;
+ case 1440:
+ vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
+ break;
+ case 1600:
+ vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
+ break;
+ case 1680:
+ vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
+ break;
+ case 1920:
+ if (crtc->mode.crtc_vdisplay == 1080)
+ vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
+ else
+ vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
+ break;
+ default:
+ return false;
+ }
+
+ refresh_rate = drm_mode_vrefresh(mode);
+ while (vbios_mode->enh_table->refresh_rate < refresh_rate) {
+ vbios_mode->enh_table++;
+ if ((vbios_mode->enh_table->refresh_rate > refresh_rate) ||
+ (vbios_mode->enh_table->refresh_rate == 0xff)) {
+ vbios_mode->enh_table--;
+ break;
+ }
+ }
+
+ hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
+ vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
+
+ adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht;
+ adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder;
+ adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder;
+ adjusted_mode->crtc_hsync_start = vbios_mode->enh_table->hde + hborder +
+ vbios_mode->enh_table->hfp;
+ adjusted_mode->crtc_hsync_end = (vbios_mode->enh_table->hde + hborder +
+ vbios_mode->enh_table->hfp +
+ vbios_mode->enh_table->hsync);
+
+ adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt;
+ adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder;
+ adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder;
+ adjusted_mode->crtc_vsync_start = vbios_mode->enh_table->vde + vborder +
+ vbios_mode->enh_table->vfp;
+ adjusted_mode->crtc_vsync_end = (vbios_mode->enh_table->vde + vborder +
+ vbios_mode->enh_table->vfp +
+ vbios_mode->enh_table->vsync);
+
+ refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
+ mode_id = vbios_mode->enh_table->mode_id;
+
+ if (ast->chip == AST1180) {
+ /* TODO 1180 */
+ } else {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0xf) << 4));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->fb->bits_per_pixel);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
+ }
+
+ return true;
+
+
+}
+static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_vbios_stdtable *stdtable;
+ u32 i;
+ u8 jreg;
+
+ stdtable = vbios_mode->std_table;
+
+ jreg = stdtable->misc;
+ ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
+
+ /* Set SEQ */
+ ast_set_index_reg(ast, AST_IO_SEQ_PORT, 0x00, 0x03);
+ for (i = 0; i < 4; i++) {
+ jreg = stdtable->seq[i];
+ if (!i)
+ jreg |= 0x20;
+ ast_set_index_reg(ast, AST_IO_SEQ_PORT, (i + 1) , jreg);
+ }
+
+ /* Set CRTC */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
+ for (i = 0; i < 25; i++)
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
+
+ /* set AR */
+ jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
+ for (i = 0; i < 20; i++) {
+ jreg = stdtable->ar[i];
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, (u8)i);
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, jreg);
+ }
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x14);
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x00);
+
+ jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x20);
+
+ /* Set GR */
+ for (i = 0; i < 9; i++)
+ ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
+}
+
+static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
+ u16 temp;
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
+
+ temp = (mode->crtc_htotal >> 3) - 5;
+ if (temp & 0x100)
+ jregAC |= 0x01; /* HT D[8] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x00, 0x00, temp);
+
+ temp = (mode->crtc_hdisplay >> 3) - 1;
+ if (temp & 0x100)
+ jregAC |= 0x04; /* HDE D[8] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x01, 0x00, temp);
+
+ temp = (mode->crtc_hblank_start >> 3) - 1;
+ if (temp & 0x100)
+ jregAC |= 0x10; /* HBS D[8] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x02, 0x00, temp);
+
+ temp = ((mode->crtc_hblank_end >> 3) - 1) & 0x7f;
+ if (temp & 0x20)
+ jreg05 |= 0x80; /* HBE D[5] */
+ if (temp & 0x40)
+ jregAD |= 0x01; /* HBE D[5] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x03, 0xE0, (temp & 0x1f));
+
+ temp = (mode->crtc_hsync_start >> 3) - 1;
+ if (temp & 0x100)
+ jregAC |= 0x40; /* HRS D[5] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x04, 0x00, temp);
+
+ temp = ((mode->crtc_hsync_end >> 3) - 1) & 0x3f;
+ if (temp & 0x20)
+ jregAD |= 0x04; /* HRE D[5] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05));
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAC, 0x00, jregAC);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD);
+
+ /* vert timings */
+ temp = (mode->crtc_vtotal) - 2;
+ if (temp & 0x100)
+ jreg07 |= 0x01;
+ if (temp & 0x200)
+ jreg07 |= 0x20;
+ if (temp & 0x400)
+ jregAE |= 0x01;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x06, 0x00, temp);
+
+ temp = (mode->crtc_vsync_start) - 1;
+ if (temp & 0x100)
+ jreg07 |= 0x04;
+ if (temp & 0x200)
+ jreg07 |= 0x80;
+ if (temp & 0x400)
+ jregAE |= 0x08;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x10, 0x00, temp);
+
+ temp = (mode->crtc_vsync_end - 1) & 0x3f;
+ if (temp & 0x10)
+ jregAE |= 0x20;
+ if (temp & 0x20)
+ jregAE |= 0x40;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x70, temp & 0xf);
+
+ temp = mode->crtc_vdisplay - 1;
+ if (temp & 0x100)
+ jreg07 |= 0x02;
+ if (temp & 0x200)
+ jreg07 |= 0x40;
+ if (temp & 0x400)
+ jregAE |= 0x02;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x12, 0x00, temp);
+
+ temp = mode->crtc_vblank_start - 1;
+ if (temp & 0x100)
+ jreg07 |= 0x08;
+ if (temp & 0x200)
+ jreg09 |= 0x20;
+ if (temp & 0x400)
+ jregAE |= 0x04;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x15, 0x00, temp);
+
+ temp = mode->crtc_vblank_end - 1;
+ if (temp & 0x100)
+ jregAE |= 0x10;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x16, 0x00, temp);
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x07, 0x00, jreg07);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x09, 0xdf, jreg09);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAE, 0x00, (jregAE | 0x80));
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
+}
+
+static void ast_set_offset_reg(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+
+ u16 offset;
+
+ offset = crtc->fb->pitches[0] >> 3;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
+}
+
+static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast_vbios_dclk_info *clk_info;
+
+ clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc0, 0x00, clk_info->param1);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc1, 0x00, clk_info->param2);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xbb, 0x0f,
+ (clk_info->param3 & 0x80) | ((clk_info->param3 & 0x3) << 4));
+}
+
+static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ jregA0 = 0x70;
+ jregA3 = 0x01;
+ jregA8 = 0x00;
+ break;
+ case 15:
+ case 16:
+ jregA0 = 0x70;
+ jregA3 = 0x04;
+ jregA8 = 0x02;
+ break;
+ case 32:
+ jregA0 = 0x70;
+ jregA3 = 0x08;
+ jregA8 = 0x02;
+ break;
+ }
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa0, 0x8f, jregA0);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xf0, jregA3);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
+
+ /* Set Threshold */
+ if (ast->chip == AST2300) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
+ } else if (ast->chip == AST2100 ||
+ ast->chip == AST1100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2150) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x3f);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x2f);
+ } else {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x2f);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x1f);
+ }
+}
+
+void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 jreg;
+
+ jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ);
+ jreg |= (vbios_mode->enh_table->flags & SyncNN);
+ ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
+}
+
+bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u32 addr;
+
+ addr = offset >> 2;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0d, (u8)(addr & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0c, (u8)((addr >> 8) & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xaf, (u8)((addr >> 16) & 0xff));
+
+}
+
+static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+
+ if (ast->chip == AST1180)
+ return;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
+ ast_crtc_load_lut(crtc);
+ break;
+ case DRM_MODE_DPMS_OFF:
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
+ break;
+ }
+}
+
+static bool ast_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+/* ast is different - we will force move buffers out of VRAM */
+static int ast_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
+ struct ast_framebuffer *ast_fb;
+ struct ast_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* push the previous fb to system ram */
+ if (!atomic && fb) {
+ ast_fb = to_ast_framebuffer(fb);
+ obj = ast_fb->obj;
+ bo = gem_to_ast_bo(obj);
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+ ast_bo_push_sysram(bo);
+ ast_bo_unreserve(bo);
+ }
+
+ ast_fb = to_ast_framebuffer(crtc->fb);
+ obj = ast_fb->obj;
+ bo = gem_to_ast_bo(obj);
+
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ ast_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&ast->fbdev->afb == ast_fb) {
+ /* if pushing console in kmap it */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret)
+ DRM_ERROR("failed to kmap fbcon\n");
+ }
+ ast_bo_unreserve(bo);
+
+ ast_set_start_address_crt1(crtc, (u32)gpu_addr);
+
+ return 0;
+}
+
+static int ast_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return ast_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int ast_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_vbios_mode_info vbios_mode;
+ bool ret;
+ if (ast->chip == AST1180) {
+ DRM_ERROR("AST 1180 modesetting not supported\n");
+ return -EINVAL;
+ }
+
+ ret = ast_get_vbios_mode_info(crtc, mode, adjusted_mode, &vbios_mode);
+ if (ret == false)
+ return -EINVAL;
+ ast_open_key(ast);
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+
+ ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
+ ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
+ ast_set_offset_reg(crtc);
+ ast_set_dclk_reg(dev, adjusted_mode, &vbios_mode);
+ ast_set_ext_reg(crtc, adjusted_mode, &vbios_mode);
+ ast_set_sync_reg(dev, adjusted_mode, &vbios_mode);
+ ast_set_dac_reg(crtc, adjusted_mode, &vbios_mode);
+
+ ast_crtc_mode_set_base(crtc, x, y, old_fb);
+
+ return 0;
+}
+
+static void ast_crtc_disable(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_prepare(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_commit(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
+}
+
+
+static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
+ .dpms = ast_crtc_dpms,
+ .mode_fixup = ast_crtc_mode_fixup,
+ .mode_set = ast_crtc_mode_set,
+ .mode_set_base = ast_crtc_mode_set_base,
+ .disable = ast_crtc_disable,
+ .load_lut = ast_crtc_load_lut,
+ .disable = ast_crtc_disable,
+ .prepare = ast_crtc_prepare,
+ .commit = ast_crtc_commit,
+
+};
+
+static void ast_crtc_reset(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ int end = (start + size > 256) ? 256 : start + size, i;
+
+ /* userspace palettes are always correct as is */
+ for (i = start; i < end; i++) {
+ ast_crtc->lut_r[i] = red[i] >> 8;
+ ast_crtc->lut_g[i] = green[i] >> 8;
+ ast_crtc->lut_b[i] = blue[i] >> 8;
+ }
+ ast_crtc_load_lut(crtc);
+}
+
+
+static void ast_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static const struct drm_crtc_funcs ast_crtc_funcs = {
+ .cursor_set = ast_cursor_set,
+ .cursor_move = ast_cursor_move,
+ .reset = ast_crtc_reset,
+ .set_config = drm_crtc_helper_set_config,
+ .gamma_set = ast_crtc_gamma_set,
+ .destroy = ast_crtc_destroy,
+};
+
+int ast_crtc_init(struct drm_device *dev)
+{
+ struct ast_crtc *crtc;
+ int i;
+
+ crtc = kzalloc(sizeof(struct ast_crtc), GFP_KERNEL);
+ if (!crtc)
+ return -ENOMEM;
+
+ drm_crtc_init(dev, &crtc->base, &ast_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(&crtc->base, 256);
+ drm_crtc_helper_add(&crtc->base, &ast_crtc_helper_funcs);
+
+ for (i = 0; i < 256; i++) {
+ crtc->lut_r[i] = i;
+ crtc->lut_g[i] = i;
+ crtc->lut_b[i] = i;
+ }
+ return 0;
+}
+
+static void ast_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+
+static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+
+static const struct drm_encoder_funcs ast_enc_funcs = {
+ .destroy = ast_encoder_destroy,
+};
+
+static void ast_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool ast_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void ast_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void ast_encoder_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void ast_encoder_commit(struct drm_encoder *encoder)
+{
+
+}
+
+
+static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = {
+ .dpms = ast_encoder_dpms,
+ .mode_fixup = ast_mode_fixup,
+ .prepare = ast_encoder_prepare,
+ .commit = ast_encoder_commit,
+ .mode_set = ast_encoder_mode_set,
+};
+
+int ast_encoder_init(struct drm_device *dev)
+{
+ struct ast_encoder *ast_encoder;
+
+ ast_encoder = kzalloc(sizeof(struct ast_encoder), GFP_KERNEL);
+ if (!ast_encoder)
+ return -ENOMEM;
+
+ drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs);
+
+ ast_encoder->base.possible_crtcs = 1;
+ return 0;
+}
+
+static int ast_get_modes(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(&ast_connector->base, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ return ret;
+ } else
+ drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
+ return 0;
+}
+
+static int ast_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void ast_connector_destroy(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ ast_i2c_destroy(ast_connector->i2c);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static enum drm_connector_status
+ast_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
+ .mode_valid = ast_mode_valid,
+ .get_modes = ast_get_modes,
+ .best_encoder = ast_best_single_encoder,
+};
+
+static const struct drm_connector_funcs ast_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = ast_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = ast_connector_destroy,
+};
+
+int ast_connector_init(struct drm_device *dev)
+{
+ struct ast_connector *ast_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ ast_connector = kzalloc(sizeof(struct ast_connector), GFP_KERNEL);
+ if (!ast_connector)
+ return -ENOMEM;
+
+ connector = &ast_connector->base;
+ drm_connector_init(dev, connector, &ast_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_connector_helper_add(connector, &ast_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_sysfs_connector_add(connector);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ ast_connector->i2c = ast_i2c_create(dev);
+ if (!ast_connector->i2c)
+ DRM_ERROR("failed to add ddc bus for connector\n");
+
+ return 0;
+}
+
+/* allocate cursor cache and pin at start of VRAM */
+int ast_cursor_init(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ int size;
+ int ret;
+ struct drm_gem_object *obj;
+ struct ast_bo *bo;
+ uint64_t gpu_addr;
+
+ size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM;
+
+ ret = ast_gem_create(dev, size, true, &obj);
+ if (ret)
+ return ret;
+ bo = gem_to_ast_bo(obj);
+ ret = ast_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ goto fail;
+
+ ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ ast_bo_unreserve(bo);
+ if (ret)
+ goto fail;
+
+ /* kmap the object */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &ast->cache_kmap);
+ if (ret)
+ goto fail;
+
+ ast->cursor_cache = obj;
+ ast->cursor_cache_gpu_addr = gpu_addr;
+ DRM_ERROR("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
+ return 0;
+fail:
+ return ret;
+}
+
+void ast_cursor_fini(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ ttm_bo_kunmap(&ast->cache_kmap);
+ drm_gem_object_unreference_unlocked(ast->cursor_cache);
+}
+
+int ast_mode_init(struct drm_device *dev)
+{
+ ast_cursor_init(dev);
+ ast_crtc_init(dev);
+ ast_encoder_init(dev);
+ ast_connector_init(dev);
+ return 0;
+}
+
+void ast_mode_fini(struct drm_device *dev)
+{
+ ast_cursor_fini(dev);
+}
+
+static int get_clock(void *i2c_priv)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ uint32_t val;
+
+ val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
+ return val & 1 ? 1 : 0;
+}
+
+static int get_data(void *i2c_priv)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ uint32_t val;
+
+ val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
+ return val & 1 ? 1 : 0;
+}
+
+static void set_clock(void *i2c_priv, int clock)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ int i;
+ u8 ujcrb7, jtemp;
+
+ for (i = 0; i < 0x10000; i++) {
+ ujcrb7 = ((clock & 0x01) ? 0 : 1);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
+ if (ujcrb7 == jtemp)
+ break;
+ }
+}
+
+static void set_data(void *i2c_priv, int data)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ int i;
+ u8 ujcrb7, jtemp;
+
+ for (i = 0; i < 0x10000; i++) {
+ ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
+ if (ujcrb7 == jtemp)
+ break;
+ }
+}
+
+static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
+{
+ struct ast_i2c_chan *i2c;
+ int ret;
+
+ i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.class = I2C_CLASS_DDC;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "AST i2c bit bus");
+ i2c->adapter.algo_data = &i2c->bit;
+
+ i2c->bit.udelay = 20;
+ i2c->bit.timeout = 2;
+ i2c->bit.data = i2c;
+ i2c->bit.setsda = set_data;
+ i2c->bit.setscl = set_clock;
+ i2c->bit.getsda = get_data;
+ i2c->bit.getscl = get_clock;
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ if (ret) {
+ DRM_ERROR("Failed to register bit i2c\n");
+ goto out_free;
+ }
+
+ return i2c;
+out_free:
+ kfree(i2c);
+ return NULL;
+}
+
+static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
+{
+ if (!i2c)
+ return;
+ i2c_del_adapter(&i2c->adapter);
+ kfree(i2c);
+}
+
+void ast_show_cursor(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u8 jreg;
+
+ jreg = 0x2;
+ /* enable ARGB cursor */
+ jreg |= 1;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
+}
+
+void ast_hide_cursor(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
+}
+
+static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
+{
+ union {
+ u32 ul;
+ u8 b[4];
+ } srcdata32[2], data32;
+ union {
+ u16 us;
+ u8 b[2];
+ } data16;
+ u32 csum = 0;
+ s32 alpha_dst_delta, last_alpha_dst_delta;
+ u8 *srcxor, *dstxor;
+ int i, j;
+ u32 per_pixel_copy, two_pixel_copy;
+
+ alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
+ last_alpha_dst_delta = alpha_dst_delta - (width << 1);
+
+ srcxor = src;
+ dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
+ per_pixel_copy = width & 1;
+ two_pixel_copy = width >> 1;
+
+ for (j = 0; j < height; j++) {
+ for (i = 0; i < two_pixel_copy; i++) {
+ srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
+ srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
+ data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+ data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+ data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
+ data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
+
+ writel(data32.ul, dstxor);
+ csum += data32.ul;
+
+ dstxor += 4;
+ srcxor += 8;
+
+ }
+
+ for (i = 0; i < per_pixel_copy; i++) {
+ srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
+ data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+ data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+ writew(data16.us, dstxor);
+ csum += (u32)data16.us;
+
+ dstxor += 2;
+ srcxor += 4;
+ }
+ dstxor += last_alpha_dst_delta;
+ }
+ return csum;
+}
+
+static int ast_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ struct drm_gem_object *obj;
+ struct ast_bo *bo;
+ uint64_t gpu_addr;
+ u32 csum;
+ int ret;
+ struct ttm_bo_kmap_obj uobj_map;
+ u8 *src, *dst;
+ bool src_isiomem, dst_isiomem;
+ if (!handle) {
+ ast_hide_cursor(crtc);
+ return 0;
+ }
+
+ if (width > AST_MAX_HWC_WIDTH || height > AST_MAX_HWC_HEIGHT)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
+ return -ENOENT;
+ }
+ bo = gem_to_ast_bo(obj);
+
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ goto fail;
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
+
+ src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
+ dst = ttm_kmap_obj_virtual(&ast->cache_kmap, &dst_isiomem);
+
+ if (src_isiomem == true)
+ DRM_ERROR("src cursor bo should be in main memory\n");
+ if (dst_isiomem == false)
+ DRM_ERROR("dst bo should be in VRAM\n");
+
+ dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
+
+ /* do data transfer to cursor cache */
+ csum = copy_cursor_image(src, dst, width, height);
+
+ /* write checksum + signature */
+ ttm_bo_kunmap(&uobj_map);
+ ast_bo_unreserve(bo);
+ {
+ u8 *dst = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ writel(csum, dst);
+ writel(width, dst + AST_HWC_SIGNATURE_SizeX);
+ writel(height, dst + AST_HWC_SIGNATURE_SizeY);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
+
+ /* set pattern offset */
+ gpu_addr = ast->cursor_cache_gpu_addr;
+ gpu_addr += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
+ gpu_addr >>= 3;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, gpu_addr & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, (gpu_addr >> 8) & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, (gpu_addr >> 16) & 0xff);
+ }
+ ast_crtc->cursor_width = width;
+ ast_crtc->cursor_height = height;
+ ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
+ ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
+
+ ast->next_cursor = (ast->next_cursor + 1) % AST_DEFAULT_HWC_NUM;
+
+ ast_show_cursor(crtc);
+
+ drm_gem_object_unreference_unlocked(obj);
+ return 0;
+fail:
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+}
+
+static int ast_cursor_move(struct drm_crtc *crtc,
+ int x, int y)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ struct ast_private *ast = crtc->dev->dev_private;
+ int x_offset, y_offset;
+ u8 *sig;
+
+ sig = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ writel(x, sig + AST_HWC_SIGNATURE_X);
+ writel(y, sig + AST_HWC_SIGNATURE_Y);
+
+ x_offset = ast_crtc->offset_x;
+ y_offset = ast_crtc->offset_y;
+ if (x < 0) {
+ x_offset = (-x) + ast_crtc->offset_x;
+ x = 0;
+ }
+
+ if (y < 0) {
+ y_offset = (-y) + ast_crtc->offset_y;
+ y = 0;
+ }
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, (x & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, ((x >> 8) & 0x0f));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, (y & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
+
+ /* dummy write to fire HWC */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
new file mode 100644
index 000000000000..6edbee63b0cb
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -0,0 +1,1780 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include "drmP.h"
+#include "ast_drv.h"
+
+#include "ast_dram_tables.h"
+
+static void ast_init_dram_2300(struct drm_device *dev);
+
+static void
+ast_enable_vga(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ ast_io_write8(ast, 0x43, 0x01);
+ ast_io_write8(ast, 0x42, 0x01);
+}
+
+#if 0 /* will use later */
+static bool
+ast_is_vga_enabled(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 ch;
+
+ if (ast->chip == AST1180) {
+ /* TODO 1180 */
+ } else {
+ ch = ast_io_read8(ast, 0x43);
+ if (ch) {
+ ast_open_key(ast);
+ ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff);
+ return ch & 0x04;
+ }
+ }
+ return 0;
+}
+#endif
+
+static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
+static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff };
+static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
+
+static void
+ast_set_def_ext_reg(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 i, index, reg;
+ const u8 *ext_reg_info;
+
+ /* reset scratch */
+ for (i = 0x81; i <= 0x8f; i++)
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00);
+
+ if (ast->chip == AST2300) {
+ if (dev->pdev->revision >= 0x20)
+ ext_reg_info = extreginfo_ast2300;
+ else
+ ext_reg_info = extreginfo_ast2300a0;
+ } else
+ ext_reg_info = extreginfo;
+
+ index = 0xa0;
+ while (*ext_reg_info != 0xff) {
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, index, 0x00, *ext_reg_info);
+ index++;
+ ext_reg_info++;
+ }
+
+ /* disable standard IO/MEM decode if secondary */
+ /* ast_set_index_reg-mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x3); */
+
+ /* Set Ext. Default */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x8c, 0x00, 0x01);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x00, 0x00);
+
+ /* Enable RAMDAC for A1 */
+ reg = 0x04;
+ if (ast->chip == AST2300)
+ reg |= 0x20;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
+}
+
+static inline u32 mindwm(struct ast_private *ast, u32 r)
+{
+ ast_write32(ast, 0xf004, r & 0xffff0000);
+ ast_write32(ast, 0xf000, 0x1);
+
+ return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
+}
+
+static inline void moutdwm(struct ast_private *ast, u32 r, u32 v)
+{
+ ast_write32(ast, 0xf004, r & 0xffff0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x10000 + (r & 0x0000ffff), v);
+}
+
+/*
+ * AST2100/2150 DLL CBR Setting
+ */
+#define CBR_SIZE_AST2150 ((16 << 10) - 1)
+#define CBR_PASSNUM_AST2150 5
+#define CBR_THRESHOLD_AST2150 10
+#define CBR_THRESHOLD2_AST2150 10
+#define TIMEOUT_AST2150 5000000
+
+#define CBR_PATNUM_AST2150 8
+
+static const u32 pattern_AST2150[14] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0xFFFE0001,
+ 0x683501FE,
+ 0x0F1929B0,
+ 0x2D0B4346,
+ 0x60767F02,
+ 0x6FBE36A6,
+ 0x3A253035,
+ 0x3019686D,
+ 0x41C6167E,
+ 0x620152BF,
+ 0x20F050E0
+};
+
+static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+
+#if 0 /* unused in DDX driver - here for completeness */
+static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+#endif
+
+static int cbrtest_ast2150(struct ast_private *ast)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ if (mmctestburst2_ast2150(ast, i))
+ return 0;
+ return 1;
+}
+
+static int cbrscan_ast2150(struct ast_private *ast, int busw)
+{
+ u32 patcnt, loop;
+
+ for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
+ moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
+ if (cbrtest_ast2150(ast))
+ break;
+ }
+ if (loop == CBR_PASSNUM_AST2150)
+ return 0;
+ }
+ return 1;
+}
+
+
+static void cbrdlli_ast2150(struct ast_private *ast, int busw)
+{
+ u32 dll_min[4], dll_max[4], dlli, data, passcnt;
+
+cbr_start:
+ dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff;
+ dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0;
+ passcnt = 0;
+
+ for (dlli = 0; dlli < 100; dlli++) {
+ moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+ data = cbrscan_ast2150(ast, busw);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dll_min[0] > dlli)
+ dll_min[0] = dlli;
+ if (dll_max[0] < dlli)
+ dll_max[0] = dlli;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD_AST2150)
+ goto cbr_start;
+ }
+ if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150)
+ goto cbr_start;
+
+ dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
+ moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+}
+
+
+
+static void ast_init_dram_reg(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 j;
+ u32 data, temp, i;
+ const struct ast_dramstruct *dram_reg_info;
+
+ j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+
+ if ((j & 0x80) == 0) { /* VGA only */
+ if (ast->chip == AST2000) {
+ dram_reg_info = ast2000_dram_table_data;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x10100, 0xa8);
+
+ do {
+ ;
+ } while (ast_read32(ast, 0x10100) != 0xa8);
+ } else {/* AST2100/1100 */
+ if (ast->chip == AST2100 || ast->chip == 2200)
+ dram_reg_info = ast2100_dram_table_data;
+ else
+ dram_reg_info = ast1100_dram_table_data;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688A8A8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x01);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x01);
+ }
+
+ while (dram_reg_info->index != 0xffff) {
+ if (dram_reg_info->index == 0xff00) {/* delay fn */
+ for (i = 0; i < 15; i++)
+ udelay(dram_reg_info->data);
+ } else if (dram_reg_info->index == 0x4 && ast->chip != AST2000) {
+ data = dram_reg_info->data;
+ if (ast->dram_type == AST_DRAM_1Gx16)
+ data = 0x00000d89;
+ else if (ast->dram_type == AST_DRAM_1Gx32)
+ data = 0x00000c8d;
+
+ temp = ast_read32(ast, 0x12070);
+ temp &= 0xc;
+ temp <<= 2;
+ ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
+ } else
+ ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data);
+ dram_reg_info++;
+ }
+
+ /* AST 2100/2150 DRAM calibration */
+ data = ast_read32(ast, 0x10120);
+ if (data == 0x5061) { /* 266Mhz */
+ data = ast_read32(ast, 0x10004);
+ if (data & 0x40)
+ cbrdlli_ast2150(ast, 16); /* 16 bits */
+ else
+ cbrdlli_ast2150(ast, 32); /* 32 bits */
+ }
+
+ switch (ast->chip) {
+ case AST2000:
+ temp = ast_read32(ast, 0x10140);
+ ast_write32(ast, 0x10140, temp | 0x40);
+ break;
+ case AST1100:
+ case AST2100:
+ case AST2200:
+ case AST2150:
+ temp = ast_read32(ast, 0x1200c);
+ ast_write32(ast, 0x1200c, temp & 0xfffffffd);
+ temp = ast_read32(ast, 0x12040);
+ ast_write32(ast, 0x12040, temp | 0x40);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* wait ready */
+ do {
+ j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ } while ((j & 0x40) == 0);
+}
+
+void ast_post_gpu(struct drm_device *dev)
+{
+ u32 reg;
+ struct ast_private *ast = dev->dev_private;
+
+ pci_read_config_dword(ast->dev->pdev, 0x04, &reg);
+ reg |= 0x3;
+ pci_write_config_dword(ast->dev->pdev, 0x04, reg);
+
+ ast_enable_vga(dev);
+ ast_open_key(ast);
+ ast_set_def_ext_reg(dev);
+
+ if (ast->chip == AST2300)
+ ast_init_dram_2300(dev);
+ else
+ ast_init_dram_reg(dev);
+}
+
+/* AST 2300 DRAM settings */
+#define AST_DDR3 0
+#define AST_DDR2 1
+
+struct ast2300_dram_param {
+ u32 dram_type;
+ u32 dram_chipid;
+ u32 dram_freq;
+ u32 vram_size;
+ u32 odt;
+ u32 wodt;
+ u32 rodt;
+ u32 dram_config;
+ u32 reg_PERIOD;
+ u32 reg_MADJ;
+ u32 reg_SADJ;
+ u32 reg_MRS;
+ u32 reg_EMRS;
+ u32 reg_AC1;
+ u32 reg_AC2;
+ u32 reg_DQSIC;
+ u32 reg_DRV;
+ u32 reg_IOZ;
+ u32 reg_DQIDLY;
+ u32 reg_FREQ;
+ u32 madj_max;
+ u32 dll2_finetune_step;
+};
+
+/*
+ * DQSI DLL CBR Setting
+ */
+#define CBR_SIZE1 ((4 << 10) - 1)
+#define CBR_SIZE2 ((64 << 10) - 1)
+#define CBR_PASSNUM 5
+#define CBR_PASSNUM2 5
+#define CBR_THRESHOLD 10
+#define CBR_THRESHOLD2 10
+#define TIMEOUT 5000000
+#define CBR_PATNUM 8
+
+static const u32 pattern[8] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0x88778877,
+ 0x92CC4D6E,
+ 0x543D3CDE,
+ 0xF1E843C7,
+ 0x7C61D253
+};
+
+#if 0 /* unused in DDX, included for completeness */
+static int mmc_test_burst(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x000000c1 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x3000;
+ if (data & 0x2000) {
+ return 0;
+ }
+ if (++timeout > TIMEOUT) {
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0;
+ }
+ } while (!data);
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 1;
+}
+#endif
+
+static int mmc_test_burst2(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000041 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x1000;
+ if (++timeout > TIMEOUT) {
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return -1;
+ }
+ } while (!data);
+ data = mindwm(ast, 0x1e6e0078);
+ data = (data | (data >> 16)) & 0xffff;
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return data;
+}
+
+#if 0 /* Unused in DDX here for completeness */
+static int mmc_test_single(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x000000c5 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x3000;
+ if (data & 0x2000)
+ return 0;
+ if (++timeout > TIMEOUT) {
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return 0;
+ }
+ } while (!data);
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return 1;
+}
+#endif
+
+static int mmc_test_single2(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x1000;
+ if (++timeout > TIMEOUT) {
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return -1;
+ }
+ } while (!data);
+ data = mindwm(ast, 0x1e6e0078);
+ data = (data | (data >> 16)) & 0xffff;
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return data;
+}
+
+static int cbr_test(struct ast_private *ast)
+{
+ u32 data;
+ int i;
+ data = mmc_test_single2(ast, 0);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ for (i = 0; i < 8; i++) {
+ data = mmc_test_burst2(ast, i);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ }
+ if (!data)
+ return 3;
+ else if (data & 0xff)
+ return 2;
+ return 1;
+}
+
+static int cbr_scan(struct ast_private *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 3;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ if ((data = cbr_test(ast)) != 0) {
+ data2 &= data;
+ if (!data2)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+static u32 cbr_test2(struct ast_private *ast)
+{
+ u32 data;
+
+ data = mmc_test_burst2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+ data |= mmc_test_single2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+
+ return ~data & 0xffff;
+}
+
+static u32 cbr_scan2(struct ast_private *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 0xffff;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ if ((data = cbr_test2(ast)) != 0) {
+ data2 &= data;
+ if (!data)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+#if 0 /* unused in DDX - added for completeness */
+static void finetuneDQI(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
+
+ gold_sadj[0] = (mindwm(ast, 0x1E6E0024) >> 16) & 0xffff;
+ gold_sadj[1] = gold_sadj[0] >> 8;
+ gold_sadj[0] = gold_sadj[0] & 0xff;
+ gold_sadj[0] = (gold_sadj[0] + gold_sadj[1]) >> 1;
+ gold_sadj[1] = gold_sadj[0];
+
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+ moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli) {
+ dllmin[cnt] = dlli;
+ }
+ if (dllmax[cnt] < dlli) {
+ dllmax[cnt] = dlli;
+ }
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD) {
+ break;
+ }
+ }
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
+ dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+ if (gold_sadj[0] >= dlli) {
+ dlli = (gold_sadj[0] - dlli) >> 1;
+ if (dlli > 3) {
+ dlli = 3;
+ }
+ } else {
+ dlli = (dlli - gold_sadj[0]) >> 1;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ moutdwm(ast, 0x1E6E0080, data);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
+ dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+ if (gold_sadj[1] >= dlli) {
+ dlli = (gold_sadj[1] - dlli) >> 1;
+ if (dlli > 3) {
+ dlli = 3;
+ } else {
+ dlli = (dlli - 1) & 0x7;
+ }
+ } else {
+ dlli = (dlli - gold_sadj[1]) >> 1;
+ dlli += 1;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI */
+#endif
+
+static void finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
+
+FINETUNE_START:
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+ moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli) {
+ dllmin[cnt] = dlli;
+ }
+ if (dllmax[cnt] < dlli) {
+ dllmax[cnt] = dlli;
+ }
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD2) {
+ break;
+ }
+ }
+ gold_sadj[0] = 0x0;
+ passcnt = 0;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ gold_sadj[0] += dllmin[cnt];
+ passcnt++;
+ }
+ }
+ if (passcnt != 16) {
+ goto FINETUNE_START;
+ }
+ gold_sadj[0] = gold_sadj[0] >> 4;
+ gold_sadj[1] = gold_sadj[0];
+
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[0] >= dlli) {
+ dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
+ if (dlli > 3) {
+ dlli = 3;
+ }
+ } else {
+ dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ moutdwm(ast, 0x1E6E0080, data);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[1] >= dlli) {
+ dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
+ if (dlli > 3) {
+ dlli = 3;
+ } else {
+ dlli = (dlli - 1) & 0x7;
+ }
+ } else {
+ dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
+ dlli += 1;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI_L */
+
+static void finetuneDQI_L2(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, data2;
+
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+ moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli) {
+ dllmin[cnt] = dlli;
+ }
+ if (dllmax[cnt] < dlli) {
+ dllmax[cnt] = dlli;
+ }
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD2) {
+ break;
+ }
+ }
+ gold_sadj[0] = 0x0;
+ gold_sadj[1] = 0xFF;
+ for (cnt = 0; cnt < 8; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ if (gold_sadj[0] < dllmin[cnt]) {
+ gold_sadj[0] = dllmin[cnt];
+ }
+ if (gold_sadj[1] > dllmax[cnt]) {
+ gold_sadj[1] = dllmax[cnt];
+ }
+ }
+ }
+ gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
+ gold_sadj[1] = mindwm(ast, 0x1E6E0080);
+
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ data2 = gold_sadj[1] & 0x7;
+ gold_sadj[1] >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+ if (gold_sadj[0] >= dlli) {
+ dlli = (gold_sadj[0] - dlli) >> 1;
+ if (dlli > 0) {
+ dlli = 1;
+ }
+ if (data2 != 3) {
+ data2 = (data2 + dlli) & 0x7;
+ }
+ } else {
+ dlli = (dlli - gold_sadj[0]) >> 1;
+ if (dlli > 0) {
+ dlli = 1;
+ }
+ if (data2 != 4) {
+ data2 = (data2 - dlli) & 0x7;
+ }
+ }
+ }
+ data |= data2 << 21;
+ }
+ moutdwm(ast, 0x1E6E0080, data);
+
+ gold_sadj[0] = 0x0;
+ gold_sadj[1] = 0xFF;
+ for (cnt = 8; cnt < 16; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ if (gold_sadj[0] < dllmin[cnt]) {
+ gold_sadj[0] = dllmin[cnt];
+ }
+ if (gold_sadj[1] > dllmax[cnt]) {
+ gold_sadj[1] = dllmax[cnt];
+ }
+ }
+ }
+ gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
+ gold_sadj[1] = mindwm(ast, 0x1E6E0084);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ data2 = gold_sadj[1] & 0x7;
+ gold_sadj[1] >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+ if (gold_sadj[0] >= dlli) {
+ dlli = (gold_sadj[0] - dlli) >> 1;
+ if (dlli > 0) {
+ dlli = 1;
+ }
+ if (data2 != 3) {
+ data2 = (data2 + dlli) & 0x7;
+ }
+ } else {
+ dlli = (dlli - gold_sadj[0]) >> 1;
+ if (dlli > 0) {
+ dlli = 1;
+ }
+ if (data2 != 4) {
+ data2 = (data2 - dlli) & 0x7;
+ }
+ }
+ }
+ data |= data2 << 21;
+ }
+ moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI_L2 */
+
+static void cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 dllmin[2], dllmax[2], dlli, data, data2, passcnt;
+
+
+ finetuneDQI_L(ast, param);
+ finetuneDQI_L2(ast, param);
+
+CBR_START2:
+ dllmin[0] = dllmin[1] = 0xff;
+ dllmax[0] = dllmax[1] = 0x0;
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+ moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+ data = cbr_scan(ast);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dllmin[0] > dlli) {
+ dllmin[0] = dlli;
+ }
+ if (dllmax[0] < dlli) {
+ dllmax[0] = dlli;
+ }
+ }
+ if (data & 0x2) {
+ if (dllmin[1] > dlli) {
+ dllmin[1] = dlli;
+ }
+ if (dllmax[1] < dlli) {
+ dllmax[1] = dlli;
+ }
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD) {
+ break;
+ }
+ }
+ if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) {
+ goto CBR_START2;
+ }
+ if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) {
+ goto CBR_START2;
+ }
+ dlli = (dllmin[1] + dllmax[1]) >> 1;
+ dlli <<= 8;
+ dlli += (dllmin[0] + dllmax[0]) >> 1;
+ moutdwm(ast, 0x1E6E0068, (mindwm(ast, 0x1E6E0068) & 0xFFFF) | (dlli << 16));
+
+ data = (mindwm(ast, 0x1E6E0080) >> 24) & 0x1F;
+ data2 = (mindwm(ast, 0x1E6E0018) & 0xff80ffff) | (data << 16);
+ moutdwm(ast, 0x1E6E0018, data2);
+ moutdwm(ast, 0x1E6E0024, 0x8001 | (data << 1) | (param->dll2_finetune_step << 8));
+
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+} /* CBRDLL2 */
+
+static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = 0x00020000 + (trap << 16);
+ trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
+ trap_MRS = 0x00000010 + (trap << 4);
+ trap_MRS |= ((trap & 0x2) << 18);
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 336:
+ moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 0;
+ param->reg_AC1 = 0x22202725;
+ param->reg_AC2 = 0xAA007613 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x04001400 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ break;
+ default:
+ case 396:
+ moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+ break;
+
+ case 408:
+ moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xCD44961A;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00081830;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 4;
+ break;
+ case 504:
+ moutdwm(ast, 0x1E6E2020, 0x0270);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xDE44A61D;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x070000BB;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 4;
+ break;
+ case 528:
+ moutdwm(ast, 0x1E6E2020, 0x0290);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xEF44B61E;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000088;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302A37;
+ param->reg_AC2 = 0xEF56B61E;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 136;
+ param->dll2_finetune_step = 3;
+ break;
+ case 600:
+ moutdwm(ast, 0x1E6E2020, 0x02E1);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xDF56B61F;
+ param->reg_DQSIC = 0x0000014D;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000058C0;
+ param->madj_max = 132;
+ param->dll2_finetune_step = 3;
+ break;
+ case 624:
+ moutdwm(ast, 0x1E6E2020, 0x0160);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xEF56B621;
+ param->reg_DQSIC = 0x0000015A;
+ param->reg_MRS = 0x02101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000059C0;
+ param->madj_max = 128;
+ param->dll2_finetune_step = 3;
+ break;
+ } /* switch freq */
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x130;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x131;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x132;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x133;
+ break;
+ }; /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case AST_VIDMEM_SIZE_8M:
+ param->dram_config |= 0x00;
+ break;
+ case AST_VIDMEM_SIZE_16M:
+ param->dram_config |= 0x04;
+ break;
+ case AST_VIDMEM_SIZE_32M:
+ param->dram_config |= 0x08;
+ break;
+ case AST_VIDMEM_SIZE_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+
+}
+
+static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2;
+
+ moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ moutdwm(ast, 0x1E6E0018, 0x00000100);
+ moutdwm(ast, 0x1E6E0024, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ moutdwm(ast, 0x1E6E0004, param->dram_config);
+ moutdwm(ast, 0x1E6E0008, 0x90040f);
+ moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ moutdwm(ast, 0x1E6E0080, 0x00000000);
+ moutdwm(ast, 0x1E6E0084, 0x00000000);
+ moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ moutdwm(ast, 0x1E6E0018, 0x4040A170);
+ moutdwm(ast, 0x1E6E0018, 0x20402370);
+ moutdwm(ast, 0x1E6E0038, 0x00000000);
+ moutdwm(ast, 0x1E6E0040, 0xFF444444);
+ moutdwm(ast, 0x1E6E0044, 0x22222222);
+ moutdwm(ast, 0x1E6E0048, 0x22222222);
+ moutdwm(ast, 0x1E6E004C, 0x00000002);
+ moutdwm(ast, 0x1E6E0050, 0x80000000);
+ moutdwm(ast, 0x1E6E0050, 0x00000000);
+ moutdwm(ast, 0x1E6E0054, 0);
+ moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0074, 0x00000000);
+ moutdwm(ast, 0x1E6E0078, 0x00000000);
+ moutdwm(ast, 0x1E6E007C, 0x00000000);
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00005C04);
+ udelay(10);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ data = mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max) {
+ break;
+ }
+ moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000) {
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ } else {
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ }
+ data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00005C04);
+ udelay(10);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ data = mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ data = mindwm(ast, 0x1E6E0018) | 0xC00;
+ moutdwm(ast, 0x1E6E0018, data);
+
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00000040);
+ udelay(50);
+ /* Mode Register Setting */
+ moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ moutdwm(ast, 0x1E6E0028, 0x00000005);
+ moutdwm(ast, 0x1E6E0028, 0x00000007);
+ moutdwm(ast, 0x1E6E0028, 0x00000003);
+ moutdwm(ast, 0x1E6E0028, 0x00000001);
+ moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+ data = 0;
+ if (param->wodt) {
+ data = 0x300;
+ }
+ if (param->rodt) {
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ }
+ moutdwm(ast, 0x1E6E0034, data | 0x3);
+
+ /* Wait DQI delay lock */
+ do {
+ data = mindwm(ast, 0x1E6E0080);
+ } while (!(data & 0x40000000));
+ /* Wait DQSI delay lock */
+ do {
+ data = mindwm(ast, 0x1E6E0020);
+ } while (!(data & 0x00000800));
+ /* Calibrate the DQSI delay */
+ cbr_dll2(ast, param);
+
+ moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+ /* ECC Memory Initialization */
+#ifdef ECC
+ moutdwm(ast, 0x1E6E007C, 0x00000000);
+ moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0050, 0x80000000);
+ moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+
+
+}
+
+static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = (trap << 20) | (trap << 16);
+ trap_AC2 += 0x00110000;
+ trap_MRS = 0x00000040 | (trap << 4);
+
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 264:
+ moutdwm(ast, 0x1E6E2020, 0x0130);
+ param->wodt = 0;
+ param->reg_AC1 = 0x11101513;
+ param->reg_AC2 = 0x78117011;
+ param->reg_DQSIC = 0x00000092;
+ param->reg_MRS = 0x00000842;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x000000F0;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x0000005A;
+ param->reg_FREQ = 0x00004AC0;
+ param->madj_max = 138;
+ param->dll2_finetune_step = 3;
+ break;
+ case 336:
+ moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 1;
+ param->reg_AC1 = 0x22202613;
+ param->reg_AC2 = 0xAA009016 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x00000A02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ break;
+ default:
+ case 396:
+ moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+
+ case 408:
+ moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xCD44B01E;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 3;
+ break;
+ case 504:
+ moutdwm(ast, 0x1E6E2020, 0x0261);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xDE44C022;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 3;
+ break;
+ case 528:
+ moutdwm(ast, 0x1E6E2020, 0x0120);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xEF44D024;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F9;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A7;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 552:
+ moutdwm(ast, 0x1E6E2020, 0x02A1);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E025;
+ param->reg_DQSIC = 0x00000132;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000AD;
+ param->reg_FREQ = 0x000056C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E027;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000B3;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ }
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x100;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x121;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x122;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x123;
+ break;
+ }; /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case AST_VIDMEM_SIZE_8M:
+ param->dram_config |= 0x00;
+ break;
+ case AST_VIDMEM_SIZE_16M:
+ param->dram_config |= 0x04;
+ break;
+ case AST_VIDMEM_SIZE_32M:
+ param->dram_config |= 0x08;
+ break;
+ case AST_VIDMEM_SIZE_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+}
+
+static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2;
+
+ moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ moutdwm(ast, 0x1E6E0018, 0x00000100);
+ moutdwm(ast, 0x1E6E0024, 0x00000000);
+ moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ moutdwm(ast, 0x1E6E0004, param->dram_config);
+ moutdwm(ast, 0x1E6E0008, 0x90040f);
+ moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ moutdwm(ast, 0x1E6E0080, 0x00000000);
+ moutdwm(ast, 0x1E6E0084, 0x00000000);
+ moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ moutdwm(ast, 0x1E6E0018, 0x4040A130);
+ moutdwm(ast, 0x1E6E0018, 0x20402330);
+ moutdwm(ast, 0x1E6E0038, 0x00000000);
+ moutdwm(ast, 0x1E6E0040, 0xFF808000);
+ moutdwm(ast, 0x1E6E0044, 0x88848466);
+ moutdwm(ast, 0x1E6E0048, 0x44440008);
+ moutdwm(ast, 0x1E6E004C, 0x00000000);
+ moutdwm(ast, 0x1E6E0050, 0x80000000);
+ moutdwm(ast, 0x1E6E0050, 0x00000000);
+ moutdwm(ast, 0x1E6E0054, 0);
+ moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0074, 0x00000000);
+ moutdwm(ast, 0x1E6E0078, 0x00000000);
+ moutdwm(ast, 0x1E6E007C, 0x00000000);
+
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00005C04);
+ udelay(10);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ data = mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max) {
+ break;
+ }
+ moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000) {
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ } else {
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ }
+ data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00005C04);
+ udelay(10);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ data = mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ data = mindwm(ast, 0x1E6E0018) | 0xC00;
+ moutdwm(ast, 0x1E6E0018, data);
+
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ udelay(50);
+ /* Mode Register Setting */
+ moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ moutdwm(ast, 0x1E6E0028, 0x00000005);
+ moutdwm(ast, 0x1E6E0028, 0x00000007);
+ moutdwm(ast, 0x1E6E0028, 0x00000003);
+ moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ moutdwm(ast, 0x1E6E0028, 0x00000001);
+ moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
+ moutdwm(ast, 0x1E6E0028, 0x00000003);
+ moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ moutdwm(ast, 0x1E6E0028, 0x00000003);
+
+ moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+ data = 0;
+ if (param->wodt) {
+ data = 0x500;
+ }
+ if (param->rodt) {
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ }
+ moutdwm(ast, 0x1E6E0034, data | 0x3);
+ moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+
+ /* Wait DQI delay lock */
+ do {
+ data = mindwm(ast, 0x1E6E0080);
+ } while (!(data & 0x40000000));
+ /* Wait DQSI delay lock */
+ do {
+ data = mindwm(ast, 0x1E6E0020);
+ } while (!(data & 0x00000800));
+ /* Calibrate the DQSI delay */
+ cbr_dll2(ast, param);
+
+ /* ECC Memory Initialization */
+#ifdef ECC
+ moutdwm(ast, 0x1E6E007C, 0x00000000);
+ moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0050, 0x80000000);
+ moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+
+}
+
+static void ast_init_dram_2300(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast2300_dram_param param;
+ u32 temp;
+ u8 reg;
+
+ reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ if ((reg & 0x80) == 0) {/* vga only */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688a8a8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x1);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x1);
+
+ /* Slow down CPU/AHB CLK in VGA only mode */
+ temp = ast_read32(ast, 0x12008);
+ temp |= 0x73;
+ ast_write32(ast, 0x12008, temp);
+
+ param.dram_type = AST_DDR3;
+ if (temp & 0x01000000)
+ param.dram_type = AST_DDR2;
+ param.dram_chipid = ast->dram_type;
+ param.dram_freq = ast->mclk;
+ param.vram_size = ast->vram_size;
+
+ if (param.dram_type == AST_DDR3) {
+ get_ddr3_info(ast, &param);
+ ddr3_init(ast, &param);
+ } else {
+ get_ddr2_info(ast, &param);
+ ddr2_init(ast, &param);
+ }
+
+ temp = mindwm(ast, 0x1e6e2040);
+ moutdwm(ast, 0x1e6e2040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ } while ((reg & 0x40) == 0);
+}
+
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
new file mode 100644
index 000000000000..95fa6aba26bc
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of the authors not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission. The authors makes no representations
+ * about the suitability of this software for any purpose. It is provided
+ * "as is" without express or implied warranty.
+ *
+ * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+/* Ported from xf86-video-ast driver */
+
+#ifndef AST_TABLES_H
+#define AST_TABLES_H
+
+/* Std. Table Index Definition */
+#define TextModeIndex 0
+#define EGAModeIndex 1
+#define VGAModeIndex 2
+#define HiCModeIndex 3
+#define TrueCModeIndex 4
+
+#define Charx8Dot 0x00000001
+#define HalfDCLK 0x00000002
+#define DoubleScanMode 0x00000004
+#define LineCompareOff 0x00000008
+#define SyncPP 0x00000000
+#define SyncPN 0x00000040
+#define SyncNP 0x00000080
+#define SyncNN 0x000000C0
+#define HBorder 0x00000020
+#define VBorder 0x00000010
+#define WideScreenMode 0x00000100
+
+
+/* DCLK Index */
+#define VCLK25_175 0x00
+#define VCLK28_322 0x01
+#define VCLK31_5 0x02
+#define VCLK36 0x03
+#define VCLK40 0x04
+#define VCLK49_5 0x05
+#define VCLK50 0x06
+#define VCLK56_25 0x07
+#define VCLK65 0x08
+#define VCLK75 0x09
+#define VCLK78_75 0x0A
+#define VCLK94_5 0x0B
+#define VCLK108 0x0C
+#define VCLK135 0x0D
+#define VCLK157_5 0x0E
+#define VCLK162 0x0F
+/* #define VCLK193_25 0x10 */
+#define VCLK154 0x10
+#define VCLK83_5 0x11
+#define VCLK106_5 0x12
+#define VCLK146_25 0x13
+#define VCLK148_5 0x14
+
+static struct ast_vbios_dclk_info dclk_table[] = {
+ {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
+ {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
+ {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
+ {0x76, 0x63, 0x01}, /* 03: VCLK36 */
+ {0xEE, 0x67, 0x01}, /* 04: VCLK40 */
+ {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
+ {0xC6, 0x64, 0x01}, /* 06: VCLK50 */
+ {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
+ {0x80, 0x64, 0x00}, /* 08: VCLK65 */
+ {0x7B, 0x63, 0x00}, /* 09: VCLK75 */
+ {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */
+ {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */
+ {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */
+ {0x85, 0x24, 0x00}, /* 0D: VCLK135 */
+ {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
+ {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
+ {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
+ {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */
+ {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
+ {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
+ {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
+};
+
+static struct ast_vbios_stdtable vbios_stdtable[] = {
+ /* MD_2_3_400 */
+ {
+ 0x67,
+ {0x00,0x03,0x00,0x02},
+ {0x5f,0x4f,0x50,0x82,0x55,0x81,0xbf,0x1f,
+ 0x00,0x4f,0x0d,0x0e,0x00,0x00,0x00,0x00,
+ 0x9c,0x8e,0x8f,0x28,0x1f,0x96,0xb9,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07,
+ 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
+ 0x0c,0x00,0x0f,0x08},
+ {0x00,0x00,0x00,0x00,0x00,0x10,0x0e,0x00,
+ 0xff}
+ },
+ /* Mode12/ExtEGATable */
+ {
+ 0xe3,
+ {0x01,0x0f,0x00,0x06},
+ {0x5f,0x4f,0x50,0x82,0x55,0x81,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xe9,0x8b,0xdf,0x28,0x00,0xe7,0x04,0xe3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07,
+ 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
+ 0x01,0x00,0x0f,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+ 0xff}
+ },
+ /* ExtVGATable */
+ {
+ 0x2f,
+ {0x01,0x0f,0x00,0x0e},
+ {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+ 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+ 0x01,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x40,0x05,0x0f,
+ 0xff}
+ },
+ /* ExtHiCTable */
+ {
+ 0x2f,
+ {0x01,0x0f,0x00,0x0e},
+ {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+ 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+ 0x01,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+ 0xff}
+ },
+ /* ExtTrueCTable */
+ {
+ 0x2f,
+ {0x01,0x0f,0x00,0x0e},
+ {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+ 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+ 0x01,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+ 0xff}
+ },
+};
+
+static struct ast_vbios_enhtable res_640x480[] = {
+ { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60Hz */
+ (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E },
+ { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72Hz */
+ (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E },
+ { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75Hz */
+ (SyncNN | Charx8Dot) , 75, 3, 0x2E },
+ { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */
+ (SyncNN | Charx8Dot) , 85, 4, 0x2E },
+ { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */
+ (SyncNN | Charx8Dot) , 0xFF, 4, 0x2E },
+};
+
+static struct ast_vbios_enhtable res_800x600[] = {
+ {1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */
+ (SyncPP | Charx8Dot), 56, 1, 0x30 },
+ {1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60Hz */
+ (SyncPP | Charx8Dot), 60, 2, 0x30 },
+ {1040, 800, 56, 120, 666, 600, 37, 6, VCLK50, /* 72Hz */
+ (SyncPP | Charx8Dot), 72, 3, 0x30 },
+ {1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5, /* 75Hz */
+ (SyncPP | Charx8Dot), 75, 4, 0x30 },
+ {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* 85Hz */
+ (SyncPP | Charx8Dot), 84, 5, 0x30 },
+ {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 5, 0x30 },
+};
+
+
+static struct ast_vbios_enhtable res_1024x768[] = {
+ {1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60Hz */
+ (SyncNN | Charx8Dot), 60, 1, 0x31 },
+ {1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70Hz */
+ (SyncNN | Charx8Dot), 70, 2, 0x31 },
+ {1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75, /* 75Hz */
+ (SyncPP | Charx8Dot), 75, 3, 0x31 },
+ {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* 85Hz */
+ (SyncPP | Charx8Dot), 84, 4, 0x31 },
+ {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 4, 0x31 },
+};
+
+static struct ast_vbios_enhtable res_1280x1024[] = {
+ {1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60Hz */
+ (SyncPP | Charx8Dot), 60, 1, 0x32 },
+ {1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75Hz */
+ (SyncPP | Charx8Dot), 75, 2, 0x32 },
+ {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* 85Hz */
+ (SyncPP | Charx8Dot), 85, 3, 0x32 },
+ {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 3, 0x32 },
+};
+
+static struct ast_vbios_enhtable res_1600x1200[] = {
+ {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60Hz */
+ (SyncPP | Charx8Dot), 60, 1, 0x33 },
+ {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
+};
+
+static struct ast_vbios_enhtable res_1920x1200[] = {
+ {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
+ (SyncNP | Charx8Dot), 60, 1, 0x34 },
+ {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
+ (SyncNP | Charx8Dot), 0xFF, 1, 0x34 },
+};
+
+/* 16:10 */
+static struct ast_vbios_enhtable res_1280x800[] = {
+ {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x35 },
+ {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x35 },
+
+};
+
+static struct ast_vbios_enhtable res_1440x900[] = {
+ {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x36 },
+ {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x36 },
+};
+
+static struct ast_vbios_enhtable res_1680x1050[] = {
+ {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x37 },
+ {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x37 },
+};
+
+/* HDTV */
+static struct ast_vbios_enhtable res_1920x1080[] = {
+ {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x38 },
+ {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x38 },
+};
+#endif
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
new file mode 100644
index 000000000000..6cf2adea66bc
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "ast_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct ast_private *
+ast_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct ast_private, ttm.bdev);
+}
+
+static int
+ast_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+ast_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int ast_ttm_global_init(struct ast_private *ast)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &ast->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &ast_ttm_mem_global_init;
+ global_ref->release = &ast_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ ast->ttm.bo_global_ref.mem_glob =
+ ast->ttm.mem_global_ref.object;
+ global_ref = &ast->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ return r;
+ }
+ return 0;
+}
+
+void
+ast_ttm_global_release(struct ast_private *ast)
+{
+ if (ast->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ ast->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct ast_bo *bo;
+
+ bo = container_of(tbo, struct ast_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &ast_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int
+ast_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct ast_bo *astbo = ast_bo(bo);
+
+ if (!ast_ttm_bo_is_ast_bo(bo))
+ return;
+
+ ast_ttm_placement(astbo, TTM_PL_FLAG_SYSTEM);
+ *pl = astbo->placement;
+}
+
+static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct ast_private *ast = ast_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(ast->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int ast_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ int r;
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ return r;
+}
+
+
+static void ast_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func ast_tt_backend_func = {
+ .destroy = &ast_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &ast_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int ast_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver ast_bo_driver = {
+ .ttm_tt_create = ast_ttm_tt_create,
+ .ttm_tt_populate = ast_ttm_tt_populate,
+ .ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
+ .init_mem_type = ast_bo_init_mem_type,
+ .evict_flags = ast_bo_evict_flags,
+ .move = ast_bo_move,
+ .verify_access = ast_bo_verify_access,
+ .io_mem_reserve = &ast_ttm_io_mem_reserve,
+ .io_mem_free = &ast_ttm_io_mem_free,
+};
+
+int ast_mm_init(struct ast_private *ast)
+{
+ int ret;
+ struct drm_device *dev = ast->dev;
+ struct ttm_bo_device *bdev = &ast->ttm.bdev;
+
+ ret = ast_ttm_global_init(ast);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&ast->ttm.bdev,
+ ast->ttm.bo_global_ref.ref.object,
+ &ast_bo_driver, DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ ast->vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ ast->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0),
+ DRM_MTRR_WC);
+
+ return 0;
+}
+
+void ast_mm_fini(struct ast_private *ast)
+{
+ struct drm_device *dev = ast->dev;
+ ttm_bo_device_release(&ast->ttm.bdev);
+
+ ast_ttm_global_release(ast);
+
+ if (ast->fb_mtrr >= 0) {
+ drm_mtrr_del(ast->fb_mtrr,
+ pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+ ast->fb_mtrr = -1;
+ }
+}
+
+void ast_ttm_placement(struct ast_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+void ast_bo_unreserve(struct ast_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+int ast_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct ast_bo **pastbo)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast_bo *astbo;
+ size_t acc_size;
+ int ret;
+
+ astbo = kzalloc(sizeof(struct ast_bo), GFP_KERNEL);
+ if (!astbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &astbo->gem, size);
+ if (ret) {
+ kfree(astbo);
+ return ret;
+ }
+
+ astbo->gem.driver_private = NULL;
+ astbo->bo.bdev = &ast->ttm.bdev;
+
+ ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&ast->ttm.bdev, size,
+ sizeof(struct ast_bo));
+
+ ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
+ ttm_bo_type_device, &astbo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ NULL, ast_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pastbo = astbo;
+ return 0;
+}
+
+static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = ast_bo_gpu_offset(bo);
+ }
+
+ ast_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = ast_bo_gpu_offset(bo);
+ return 0;
+}
+
+int ast_bo_unpin(struct ast_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int ast_bo_push_sysram(struct ast_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+int ast_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct ast_private *ast;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ ast = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &ast->ttm.bdev);
+}
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
new file mode 100644
index 000000000000..fc154dd75296
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -0,0 +1,12 @@
+config DRM_CIRRUS_QEMU
+ tristate "Cirrus driver for QEMU emulated device"
+ depends on DRM && PCI && EXPERIMENTAL
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ help
+ This is a KMS driver for emulated cirrus device in qemu.
+ It is *NOT* intended for real cirrus devices. This requires
+ the modesetting userspace X.org driver.
diff --git a/drivers/gpu/drm/cirrus/Makefile b/drivers/gpu/drm/cirrus/Makefile
new file mode 100644
index 000000000000..69ffe7006d55
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -Iinclude/drm
+cirrus-y := cirrus_main.o cirrus_mode.o \
+ cirrus_drv.o cirrus_fbdev.o cirrus_ttm.o
+
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
new file mode 100644
index 000000000000..d7038230b71e
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2012 Red Hat <mjg@redhat.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include "drmP.h"
+#include "drm.h"
+
+#include "cirrus_drv.h"
+
+int cirrus_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, cirrus_modeset, int, 0400);
+
+/*
+ * This is the generic driver code. This binds the driver to the drm core,
+ * which then performs further device association and calls our graphics init
+ * functions
+ */
+
+static struct drm_driver driver;
+
+/* only bind to the cirrus chip in qemu */
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
+ 0, 0 },
+ {0,}
+};
+
+static int __devinit
+cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void cirrus_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static const struct file_operations cirrus_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = cirrus_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+};
+static struct drm_driver driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_USE_MTRR,
+ .load = cirrus_driver_load,
+ .unload = cirrus_driver_unload,
+ .fops = &cirrus_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+ .gem_init_object = cirrus_gem_init_object,
+ .gem_free_object = cirrus_gem_free_object,
+ .dumb_create = cirrus_dumb_create,
+ .dumb_map_offset = cirrus_dumb_mmap_offset,
+ .dumb_destroy = cirrus_dumb_destroy,
+};
+
+static struct pci_driver cirrus_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = cirrus_pci_probe,
+ .remove = cirrus_pci_remove,
+};
+
+static int __init cirrus_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && cirrus_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (cirrus_modeset == 0)
+ return -EINVAL;
+ return drm_pci_init(&driver, &cirrus_pci_driver);
+}
+
+static void __exit cirrus_exit(void)
+{
+ drm_pci_exit(&driver, &cirrus_pci_driver);
+}
+
+module_init(cirrus_init);
+module_exit(cirrus_exit);
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
new file mode 100644
index 000000000000..21bdfa8836f7
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#ifndef __CIRRUS_DRV_H__
+#define __CIRRUS_DRV_H__
+
+#include <video/vga.h>
+
+#include <drm/drm_fb_helper.h>
+
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+#define DRIVER_AUTHOR "Matthew Garrett"
+
+#define DRIVER_NAME "cirrus"
+#define DRIVER_DESC "qemu Cirrus emulation"
+#define DRIVER_DATE "20110418"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+#define CIRRUSFB_CONN_LIMIT 1
+
+#define RREG8(reg) ioread8(((void __iomem *)cdev->rmmio) + (reg))
+#define WREG8(reg, v) iowrite8(v, ((void __iomem *)cdev->rmmio) + (reg))
+#define RREG32(reg) ioread32(((void __iomem *)cdev->rmmio) + (reg))
+#define WREG32(reg, v) iowrite32(v, ((void __iomem *)cdev->rmmio) + (reg))
+
+#define SEQ_INDEX 4
+#define SEQ_DATA 5
+
+#define WREG_SEQ(reg, v) \
+ do { \
+ WREG8(SEQ_INDEX, reg); \
+ WREG8(SEQ_DATA, v); \
+ } while (0) \
+
+#define CRT_INDEX 0x14
+#define CRT_DATA 0x15
+
+#define WREG_CRT(reg, v) \
+ do { \
+ WREG8(CRT_INDEX, reg); \
+ WREG8(CRT_DATA, v); \
+ } while (0) \
+
+#define GFX_INDEX 0xe
+#define GFX_DATA 0xf
+
+#define WREG_GFX(reg, v) \
+ do { \
+ WREG8(GFX_INDEX, reg); \
+ WREG8(GFX_DATA, v); \
+ } while (0) \
+
+/*
+ * Cirrus has a "hidden" DAC register that can be accessed by writing to
+ * the pixel mask register to reset the state, then reading from the register
+ * four times. The next write will then pass to the DAC
+ */
+#define VGA_DAC_MASK 0x6
+
+#define WREG_HDR(v) \
+ do { \
+ RREG8(VGA_DAC_MASK); \
+ RREG8(VGA_DAC_MASK); \
+ RREG8(VGA_DAC_MASK); \
+ RREG8(VGA_DAC_MASK); \
+ WREG8(VGA_DAC_MASK, v); \
+ } while (0) \
+
+
+#define CIRRUS_MAX_FB_HEIGHT 4096
+#define CIRRUS_MAX_FB_WIDTH 4096
+
+#define CIRRUS_DPMS_CLEARED (-1)
+
+#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
+#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
+#define to_cirrus_framebuffer(x) container_of(x, struct cirrus_framebuffer, base)
+
+struct cirrus_crtc {
+ struct drm_crtc base;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ int last_dpms;
+ bool enabled;
+};
+
+struct cirrus_fbdev;
+struct cirrus_mode_info {
+ bool mode_config_initialized;
+ struct cirrus_crtc *crtc;
+ /* pointer to fbdev info structure */
+ struct cirrus_fbdev *gfbdev;
+};
+
+struct cirrus_encoder {
+ struct drm_encoder base;
+ int last_dpms;
+};
+
+struct cirrus_connector {
+ struct drm_connector base;
+};
+
+struct cirrus_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct cirrus_mc {
+ resource_size_t vram_size;
+ resource_size_t vram_base;
+};
+
+struct cirrus_device {
+ struct drm_device *dev;
+ unsigned long flags;
+
+ resource_size_t rmmio_base;
+ resource_size_t rmmio_size;
+ void __iomem *rmmio;
+
+ struct cirrus_mc mc;
+ struct cirrus_mode_info mode_info;
+
+ int num_crtc;
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ atomic_t validate_sequence;
+ } ttm;
+
+};
+
+
+struct cirrus_fbdev {
+ struct drm_fb_helper helper;
+ struct cirrus_framebuffer gfb;
+ struct list_head fbdev_list;
+ void *sysram;
+ int size;
+};
+
+struct cirrus_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)
+
+static inline struct cirrus_bo *
+cirrus_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct cirrus_bo, bo);
+}
+
+
+#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+ /* cirrus_mode.c */
+void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+
+
+ /* cirrus_main.c */
+int cirrus_device_init(struct cirrus_device *cdev,
+ struct drm_device *ddev,
+ struct pci_dev *pdev,
+ uint32_t flags);
+void cirrus_device_fini(struct cirrus_device *cdev);
+int cirrus_gem_init_object(struct drm_gem_object *obj);
+void cirrus_gem_free_object(struct drm_gem_object *obj);
+int cirrus_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset);
+int cirrus_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+int cirrus_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int cirrus_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle);
+
+int cirrus_framebuffer_init(struct drm_device *dev,
+ struct cirrus_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+ /* cirrus_display.c */
+int cirrus_modeset_init(struct cirrus_device *cdev);
+void cirrus_modeset_fini(struct cirrus_device *cdev);
+
+ /* cirrus_fbdev.c */
+int cirrus_fbdev_init(struct cirrus_device *cdev);
+void cirrus_fbdev_fini(struct cirrus_device *cdev);
+
+
+
+ /* cirrus_irq.c */
+void cirrus_driver_irq_preinstall(struct drm_device *dev);
+int cirrus_driver_irq_postinstall(struct drm_device *dev);
+void cirrus_driver_irq_uninstall(struct drm_device *dev);
+irqreturn_t cirrus_driver_irq_handler(DRM_IRQ_ARGS);
+
+ /* cirrus_kms.c */
+int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
+int cirrus_driver_unload(struct drm_device *dev);
+extern struct drm_ioctl_desc cirrus_ioctls[];
+extern int cirrus_max_ioctl;
+
+int cirrus_mm_init(struct cirrus_device *cirrus);
+void cirrus_mm_fini(struct cirrus_device *cirrus);
+void cirrus_ttm_placement(struct cirrus_bo *bo, int domain);
+int cirrus_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct cirrus_bo **pcirrusbo);
+int cirrus_mmap(struct file *filp, struct vm_area_struct *vma);
+int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait);
+void cirrus_bo_unreserve(struct cirrus_bo *bo);
+int cirrus_bo_push_sysram(struct cirrus_bo *bo);
+int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
+#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
new file mode 100644
index 000000000000..9a276a536992
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_fb_helper.h"
+
+#include <linux/fb.h>
+
+#include "cirrus_drv.h"
+
+static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
+ int x, int y, int width, int height)
+{
+ int i;
+ struct drm_gem_object *obj;
+ struct cirrus_bo *bo;
+ int src_offset, dst_offset;
+ int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
+ int ret;
+ bool unmap = false;
+
+ obj = afbdev->gfb.obj;
+ bo = gem_to_cirrus_bo(obj);
+
+ ret = cirrus_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("failed to reserve fb bo\n");
+ return;
+ }
+
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ cirrus_bo_unreserve(bo);
+ return;
+ }
+ unmap = true;
+ }
+ for (i = y; i < y + height; i++) {
+ /* assume equal stride for now */
+ src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+
+ }
+ if (unmap)
+ ttm_bo_kunmap(&bo->kmap);
+
+ cirrus_bo_unreserve(bo);
+}
+
+static void cirrus_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct cirrus_fbdev *afbdev = info->par;
+ sys_fillrect(info, rect);
+ cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+static void cirrus_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct cirrus_fbdev *afbdev = info->par;
+ sys_copyarea(info, area);
+ cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
+ area->height);
+}
+
+static void cirrus_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct cirrus_fbdev *afbdev = info->par;
+ sys_imageblit(info, image);
+ cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+
+static struct fb_ops cirrusfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = cirrus_fillrect,
+ .fb_copyarea = cirrus_copyarea,
+ .fb_imageblit = cirrus_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ u32 bpp, depth;
+ u32 size;
+ struct drm_gem_object *gobj;
+
+ int ret = 0;
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ if (bpp > 24)
+ return -EINVAL;
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = cirrus_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int cirrusfb_create(struct cirrus_fbdev *gfbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = gfbdev->helper.dev;
+ struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct device *device = &dev->pdev->dev;
+ void *sysram;
+ struct drm_gem_object *gobj = NULL;
+ struct cirrus_bo *bo = NULL;
+ int size, ret;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+
+ bo = gem_to_cirrus_bo(gobj);
+
+ sysram = vmalloc(size);
+ if (!sysram)
+ return -ENOMEM;
+
+ info = framebuffer_alloc(0, device);
+ if (info == NULL)
+ return -ENOMEM;
+
+ info->par = gfbdev;
+
+ ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ gfbdev->sysram = sysram;
+ gfbdev->size = size;
+
+ fb = &gfbdev->gfb.base;
+ if (!fb) {
+ DRM_INFO("fb is NULL\n");
+ return -EINVAL;
+ }
+
+ /* setup helper */
+ gfbdev->helper.fb = fb;
+ gfbdev->helper.fbdev = info;
+
+ strcpy(info->fix.id, "cirrusdrmfb");
+
+
+ info->flags = FBINFO_DEFAULT;
+ info->fbops = &cirrusfb_ops;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ /* setup aperture base/size for vesafb takeover */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+ info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = cdev->mc.vram_size;
+
+ info->screen_base = sysram;
+ info->screen_size = size;
+
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
+ DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
+ DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
+ DRM_INFO("fb depth is %d\n", fb->depth);
+ DRM_INFO(" pitch is %d\n", fb->pitches[0]);
+
+ return 0;
+out_iounmap:
+ return ret;
+}
+
+static int cirrus_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size
+ *sizes)
+{
+ struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = cirrusfb_create(gfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static int cirrus_fbdev_destroy(struct drm_device *dev,
+ struct cirrus_fbdev *gfbdev)
+{
+ struct fb_info *info;
+ struct cirrus_framebuffer *gfb = &gfbdev->gfb;
+
+ if (gfbdev->helper.fbdev) {
+ info = gfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (gfb->obj) {
+ drm_gem_object_unreference_unlocked(gfb->obj);
+ gfb->obj = NULL;
+ }
+
+ vfree(gfbdev->sysram);
+ drm_fb_helper_fini(&gfbdev->helper);
+ drm_framebuffer_cleanup(&gfb->base);
+
+ return 0;
+}
+
+static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
+ .gamma_set = cirrus_crtc_fb_gamma_set,
+ .gamma_get = cirrus_crtc_fb_gamma_get,
+ .fb_probe = cirrus_fb_find_or_create_single,
+};
+
+int cirrus_fbdev_init(struct cirrus_device *cdev)
+{
+ struct cirrus_fbdev *gfbdev;
+ int ret;
+ int bpp_sel = 24;
+
+ /*bpp_sel = 8;*/
+ gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
+ if (!gfbdev)
+ return -ENOMEM;
+
+ cdev->mode_info.gfbdev = gfbdev;
+ gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
+ cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
+ if (ret) {
+ kfree(gfbdev);
+ return ret;
+ }
+ drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
+ drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
+
+ return 0;
+}
+
+void cirrus_fbdev_fini(struct cirrus_device *cdev)
+{
+ if (!cdev->mode_info.gfbdev)
+ return;
+
+ cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
+ kfree(cdev->mode_info.gfbdev);
+ cdev->mode_info.gfbdev = NULL;
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
new file mode 100644
index 000000000000..e3c122578417
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "cirrus_drv.h"
+
+
+static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
+ if (cirrus_fb->obj)
+ drm_gem_object_unreference_unlocked(cirrus_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static int cirrus_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
+ .destroy = cirrus_user_framebuffer_destroy,
+ .create_handle = cirrus_user_framebuffer_create_handle,
+};
+
+int cirrus_framebuffer_init(struct drm_device *dev,
+ struct cirrus_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
+ if (ret) {
+ DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
+ return 0;
+}
+
+static struct drm_framebuffer *
+cirrus_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct cirrus_framebuffer *cirrus_fb;
+ int ret;
+ u32 bpp, depth;
+
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ /* cirrus can't handle > 24bpp framebuffers at all */
+ if (bpp > 24)
+ return ERR_PTR(-EINVAL);
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
+ if (!cirrus_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(cirrus_fb);
+ return ERR_PTR(ret);
+ }
+ return &cirrus_fb->base;
+}
+
+static const struct drm_mode_config_funcs cirrus_mode_funcs = {
+ .fb_create = cirrus_user_framebuffer_create,
+};
+
+/* Unmap the framebuffer from the core and release the memory */
+static void cirrus_vram_fini(struct cirrus_device *cdev)
+{
+ iounmap(cdev->rmmio);
+ cdev->rmmio = NULL;
+ if (cdev->mc.vram_base)
+ release_mem_region(cdev->mc.vram_base, cdev->mc.vram_size);
+}
+
+/* Map the framebuffer from the card and configure the core */
+static int cirrus_vram_init(struct cirrus_device *cdev)
+{
+ /* BAR 0 is VRAM */
+ cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
+ /* We have 4MB of VRAM */
+ cdev->mc.vram_size = 4 * 1024 * 1024;
+
+ if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
+ "cirrusdrmfb_vram")) {
+ DRM_ERROR("can't reserve VRAM\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Our emulated hardware has two sets of memory. One is video RAM and can
+ * simply be used as a linear framebuffer - the other provides mmio access
+ * to the display registers. The latter can also be accessed via IO port
+ * access, but we map the range and use mmio to program them instead
+ */
+
+int cirrus_device_init(struct cirrus_device *cdev,
+ struct drm_device *ddev,
+ struct pci_dev *pdev, uint32_t flags)
+{
+ int ret;
+
+ cdev->dev = ddev;
+ cdev->flags = flags;
+
+ /* Hardcode the number of CRTCs to 1 */
+ cdev->num_crtc = 1;
+
+ /* BAR 0 is the framebuffer, BAR 1 contains registers */
+ cdev->rmmio_base = pci_resource_start(cdev->dev->pdev, 1);
+ cdev->rmmio_size = pci_resource_len(cdev->dev->pdev, 1);
+
+ if (!request_mem_region(cdev->rmmio_base, cdev->rmmio_size,
+ "cirrusdrmfb_mmio")) {
+ DRM_ERROR("can't reserve mmio registers\n");
+ return -ENOMEM;
+ }
+
+ cdev->rmmio = ioremap(cdev->rmmio_base, cdev->rmmio_size);
+
+ if (cdev->rmmio == NULL)
+ return -ENOMEM;
+
+ ret = cirrus_vram_init(cdev);
+ if (ret) {
+ release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
+ return ret;
+ }
+
+ return 0;
+}
+
+void cirrus_device_fini(struct cirrus_device *cdev)
+{
+ release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
+ cirrus_vram_fini(cdev);
+}
+
+/*
+ * Functions here will be called by the core once it's bound the driver to
+ * a PCI device
+ */
+
+int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct cirrus_device *cdev;
+ int r;
+
+ cdev = kzalloc(sizeof(struct cirrus_device), GFP_KERNEL);
+ if (cdev == NULL)
+ return -ENOMEM;
+ dev->dev_private = (void *)cdev;
+
+ r = cirrus_device_init(cdev, dev, dev->pdev, flags);
+ if (r) {
+ dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
+ goto out;
+ }
+
+ r = cirrus_mm_init(cdev);
+ if (r)
+ dev_err(&dev->pdev->dev, "fatal err on mm init\n");
+
+ r = cirrus_modeset_init(cdev);
+ if (r)
+ dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+
+ dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
+out:
+ if (r)
+ cirrus_driver_unload(dev);
+ return r;
+}
+
+int cirrus_driver_unload(struct drm_device *dev)
+{
+ struct cirrus_device *cdev = dev->dev_private;
+
+ if (cdev == NULL)
+ return 0;
+ cirrus_modeset_fini(cdev);
+ cirrus_mm_fini(cdev);
+ cirrus_device_fini(cdev);
+ kfree(cdev);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int cirrus_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct cirrus_bo *cirrusbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = cirrus_bo_create(dev, size, 0, 0, &cirrusbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &cirrusbo->gem;
+ return 0;
+}
+
+int cirrus_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = cirrus_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+int cirrus_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+int cirrus_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
+ return 0;
+}
+
+void cirrus_bo_unref(struct cirrus_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
+
+}
+
+void cirrus_gem_free_object(struct drm_gem_object *obj)
+{
+ struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
+
+ if (!cirrus_bo)
+ return;
+ cirrus_bo_unref(&cirrus_bo);
+}
+
+
+static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
+{
+ return bo->bo.addr_space_offset;
+}
+
+int
+cirrus_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct cirrus_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_cirrus_bo(obj);
+ *offset = cirrus_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
new file mode 100644
index 000000000000..100f6308c509
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -0,0 +1,629 @@
+
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ *
+ * Portions of this code derived from cirrusfb.c:
+ * drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
+ *
+ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include <video/cirrus.h>
+
+#include "cirrus_drv.h"
+
+#define CIRRUS_LUT_SIZE 256
+
+#define PALETTE_INDEX 0x8
+#define PALETTE_DATA 0x9
+
+/*
+ * This file contains setup code for the CRTC.
+ */
+
+static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ int i;
+
+ if (!crtc->enabled)
+ return;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ /* VGA registers */
+ WREG8(PALETTE_INDEX, i);
+ WREG8(PALETTE_DATA, cirrus_crtc->lut_r[i]);
+ WREG8(PALETTE_DATA, cirrus_crtc->lut_g[i]);
+ WREG8(PALETTE_DATA, cirrus_crtc->lut_b[i]);
+ }
+}
+
+/*
+ * The DRM core requires DPMS functions, but they make little sense in our
+ * case and so are just stubs
+ */
+
+static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ u8 sr01, gr0e;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ sr01 = 0x00;
+ gr0e = 0x00;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ sr01 = 0x20;
+ gr0e = 0x02;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ sr01 = 0x20;
+ gr0e = 0x04;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ sr01 = 0x20;
+ gr0e = 0x06;
+ break;
+ default:
+ return;
+ }
+
+ WREG8(SEQ_INDEX, 0x1);
+ sr01 |= RREG8(SEQ_DATA) & ~0x20;
+ WREG_SEQ(0x1, sr01);
+
+ WREG8(GFX_INDEX, 0xe);
+ gr0e |= RREG8(GFX_DATA) & ~0x06;
+ WREG_GFX(0xe, gr0e);
+}
+
+/*
+ * The core passes the desired mode to the CRTC code to see whether any
+ * CRTC-specific modifications need to be made to it. We're in a position
+ * to just pass that straight through, so this does nothing
+ */
+static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
+{
+ struct cirrus_device *cdev = crtc->dev->dev_private;
+ u32 addr;
+ u8 tmp;
+
+ addr = offset >> 2;
+ WREG_CRT(0x0c, (u8)((addr >> 8) & 0xff));
+ WREG_CRT(0x0d, (u8)(addr & 0xff));
+
+ WREG8(CRT_INDEX, 0x1b);
+ tmp = RREG8(CRT_DATA);
+ tmp &= 0xf2;
+ tmp |= (addr >> 16) & 0x01;
+ tmp |= (addr >> 15) & 0x0c;
+ WREG_CRT(0x1b, tmp);
+ WREG8(CRT_INDEX, 0x1d);
+ tmp = RREG8(CRT_DATA);
+ tmp &= 0x7f;
+ tmp |= (addr >> 12) & 0x80;
+ WREG_CRT(0x1d, tmp);
+}
+
+/* cirrus is different - we will force move buffers out of VRAM */
+static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct cirrus_device *cdev = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
+ struct cirrus_framebuffer *cirrus_fb;
+ struct cirrus_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* push the previous fb to system ram */
+ if (!atomic && fb) {
+ cirrus_fb = to_cirrus_framebuffer(fb);
+ obj = cirrus_fb->obj;
+ bo = gem_to_cirrus_bo(obj);
+ ret = cirrus_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+ cirrus_bo_push_sysram(bo);
+ cirrus_bo_unreserve(bo);
+ }
+
+ cirrus_fb = to_cirrus_framebuffer(crtc->fb);
+ obj = cirrus_fb->obj;
+ bo = gem_to_cirrus_bo(obj);
+
+ ret = cirrus_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = cirrus_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ cirrus_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&cdev->mode_info.gfbdev->gfb == cirrus_fb) {
+ /* if pushing console in kmap it */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret)
+ DRM_ERROR("failed to kmap fbcon\n");
+ }
+ cirrus_bo_unreserve(bo);
+
+ cirrus_set_start_address(crtc, (u32)gpu_addr);
+ return 0;
+}
+
+static int cirrus_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+/*
+ * The meat of this driver. The core passes us a mode and we have to program
+ * it. The modesetting here is the bare minimum required to satisfy the qemu
+ * emulation of this hardware, and running this against a real device is
+ * likely to result in an inadequately programmed mode. We've already had
+ * the opportunity to modify the mode, so whatever we receive here should
+ * be something that can be correctly programmed and displayed
+ */
+static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ int hsyncstart, hsyncend, htotal, hdispend;
+ int vtotal, vdispend;
+ int tmp;
+ int sr07 = 0, hdr = 0;
+
+ htotal = mode->htotal / 8;
+ hsyncend = mode->hsync_end / 8;
+ hsyncstart = mode->hsync_start / 8;
+ hdispend = mode->hdisplay / 8;
+
+ vtotal = mode->vtotal;
+ vdispend = mode->vdisplay;
+
+ vdispend -= 1;
+ vtotal -= 2;
+
+ htotal -= 5;
+ hdispend -= 1;
+ hsyncstart += 1;
+ hsyncend += 1;
+
+ WREG_CRT(VGA_CRTC_V_SYNC_END, 0x20);
+ WREG_CRT(VGA_CRTC_H_TOTAL, htotal);
+ WREG_CRT(VGA_CRTC_H_DISP, hdispend);
+ WREG_CRT(VGA_CRTC_H_SYNC_START, hsyncstart);
+ WREG_CRT(VGA_CRTC_H_SYNC_END, hsyncend);
+ WREG_CRT(VGA_CRTC_V_TOTAL, vtotal & 0xff);
+ WREG_CRT(VGA_CRTC_V_DISP_END, vdispend & 0xff);
+
+ tmp = 0x40;
+ if ((vdispend + 1) & 512)
+ tmp |= 0x20;
+ WREG_CRT(VGA_CRTC_MAX_SCAN, tmp);
+
+ /*
+ * Overflow bits for values that don't fit in the standard registers
+ */
+ tmp = 16;
+ if (vtotal & 256)
+ tmp |= 1;
+ if (vdispend & 256)
+ tmp |= 2;
+ if ((vdispend + 1) & 256)
+ tmp |= 8;
+ if (vtotal & 512)
+ tmp |= 32;
+ if (vdispend & 512)
+ tmp |= 64;
+ WREG_CRT(VGA_CRTC_OVERFLOW, tmp);
+
+ tmp = 0;
+
+ /* More overflow bits */
+
+ if ((htotal + 5) & 64)
+ tmp |= 16;
+ if ((htotal + 5) & 128)
+ tmp |= 32;
+ if (vtotal & 256)
+ tmp |= 64;
+ if (vtotal & 512)
+ tmp |= 128;
+
+ WREG_CRT(CL_CRT1A, tmp);
+
+ /* Disable Hercules/CGA compatibility */
+ WREG_CRT(VGA_CRTC_MODE, 0x03);
+
+ WREG8(SEQ_INDEX, 0x7);
+ sr07 = RREG8(SEQ_DATA);
+ sr07 &= 0xe0;
+ hdr = 0;
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ sr07 |= 0x11;
+ break;
+ case 16:
+ sr07 |= 0xc1;
+ hdr = 0xc0;
+ break;
+ case 24:
+ sr07 |= 0x15;
+ hdr = 0xc5;
+ break;
+ case 32:
+ sr07 |= 0x19;
+ hdr = 0xc5;
+ break;
+ default:
+ return -1;
+ }
+
+ WREG_SEQ(0x7, sr07);
+
+ /* Program the pitch */
+ tmp = crtc->fb->pitches[0] / 8;
+ WREG_CRT(VGA_CRTC_OFFSET, tmp);
+
+ /* Enable extended blanking and pitch bits, and enable full memory */
+ tmp = 0x22;
+ tmp |= (crtc->fb->pitches[0] >> 7) & 0x10;
+ tmp |= (crtc->fb->pitches[0] >> 6) & 0x40;
+ WREG_CRT(0x1b, tmp);
+
+ /* Enable high-colour modes */
+ WREG_GFX(VGA_GFX_MODE, 0x40);
+
+ /* And set graphics mode */
+ WREG_GFX(VGA_GFX_MISC, 0x01);
+
+ WREG_HDR(hdr);
+ cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
+ return 0;
+}
+
+/*
+ * This is called before a mode is programmed. A typical use might be to
+ * enable DPMS during the programming to avoid seeing intermediate stages,
+ * but that's not relevant to us
+ */
+static void cirrus_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void cirrus_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+ int i;
+
+ if (size != CIRRUS_LUT_SIZE)
+ return;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ cirrus_crtc->lut_r[i] = red[i];
+ cirrus_crtc->lut_g[i] = green[i];
+ cirrus_crtc->lut_b[i] = blue[i];
+ }
+ cirrus_crtc_load_lut(crtc);
+}
+
+/* Simple cleanup function */
+static void cirrus_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(cirrus_crtc);
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs cirrus_crtc_funcs = {
+ .gamma_set = cirrus_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = cirrus_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
+ .dpms = cirrus_crtc_dpms,
+ .mode_fixup = cirrus_crtc_mode_fixup,
+ .mode_set = cirrus_crtc_mode_set,
+ .mode_set_base = cirrus_crtc_mode_set_base,
+ .prepare = cirrus_crtc_prepare,
+ .commit = cirrus_crtc_commit,
+ .load_lut = cirrus_crtc_load_lut,
+};
+
+/* CRTC setup */
+static void cirrus_crtc_init(struct drm_device *dev)
+{
+ struct cirrus_device *cdev = dev->dev_private;
+ struct cirrus_crtc *cirrus_crtc;
+ int i;
+
+ cirrus_crtc = kzalloc(sizeof(struct cirrus_crtc) +
+ (CIRRUSFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ GFP_KERNEL);
+
+ if (cirrus_crtc == NULL)
+ return;
+
+ drm_crtc_init(dev, &cirrus_crtc->base, &cirrus_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&cirrus_crtc->base, CIRRUS_LUT_SIZE);
+ cdev->mode_info.crtc = cirrus_crtc;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ cirrus_crtc->lut_r[i] = i;
+ cirrus_crtc->lut_g[i] = i;
+ cirrus_crtc->lut_b[i] = i;
+ }
+
+ drm_crtc_helper_add(&cirrus_crtc->base, &cirrus_helper_funcs);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+ cirrus_crtc->lut_r[regno] = red;
+ cirrus_crtc->lut_g[regno] = green;
+ cirrus_crtc->lut_b[regno] = blue;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+ *red = cirrus_crtc->lut_r[regno];
+ *green = cirrus_crtc->lut_g[regno];
+ *blue = cirrus_crtc->lut_b[regno];
+}
+
+
+static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void cirrus_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void cirrus_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+ return;
+}
+
+static void cirrus_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void cirrus_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+void cirrus_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(cirrus_encoder);
+}
+
+static const struct drm_encoder_helper_funcs cirrus_encoder_helper_funcs = {
+ .dpms = cirrus_encoder_dpms,
+ .mode_fixup = cirrus_encoder_mode_fixup,
+ .mode_set = cirrus_encoder_mode_set,
+ .prepare = cirrus_encoder_prepare,
+ .commit = cirrus_encoder_commit,
+};
+
+static const struct drm_encoder_funcs cirrus_encoder_encoder_funcs = {
+ .destroy = cirrus_encoder_destroy,
+};
+
+static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct cirrus_encoder *cirrus_encoder;
+
+ cirrus_encoder = kzalloc(sizeof(struct cirrus_encoder), GFP_KERNEL);
+ if (!cirrus_encoder)
+ return NULL;
+
+ encoder = &cirrus_encoder->base;
+ encoder->possible_crtcs = 0x1;
+
+ drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs);
+
+ return encoder;
+}
+
+
+int cirrus_vga_get_modes(struct drm_connector *connector)
+{
+ /* Just add a static list of modes */
+ drm_add_modes_noedid(connector, 640, 480);
+ drm_add_modes_noedid(connector, 800, 600);
+ drm_add_modes_noedid(connector, 1024, 768);
+ drm_add_modes_noedid(connector, 1280, 1024);
+
+ return 4;
+}
+
+static int cirrus_vga_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* Any mode we've added is valid */
+ return MODE_OK;
+}
+
+struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
+ *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj =
+ drm_mode_object_find(connector->dev, enc_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+static enum drm_connector_status cirrus_vga_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void cirrus_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
+ .get_modes = cirrus_vga_get_modes,
+ .mode_valid = cirrus_vga_mode_valid,
+ .best_encoder = cirrus_connector_best_encoder,
+};
+
+struct drm_connector_funcs cirrus_vga_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = cirrus_vga_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = cirrus_connector_destroy,
+};
+
+static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct cirrus_connector *cirrus_connector;
+
+ cirrus_connector = kzalloc(sizeof(struct cirrus_connector), GFP_KERNEL);
+ if (!cirrus_connector)
+ return NULL;
+
+ connector = &cirrus_connector->base;
+
+ drm_connector_init(dev, connector,
+ &cirrus_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
+
+ return connector;
+}
+
+
+int cirrus_modeset_init(struct cirrus_device *cdev)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ drm_mode_config_init(cdev->dev);
+ cdev->mode_info.mode_config_initialized = true;
+
+ cdev->dev->mode_config.max_width = CIRRUS_MAX_FB_WIDTH;
+ cdev->dev->mode_config.max_height = CIRRUS_MAX_FB_HEIGHT;
+
+ cdev->dev->mode_config.fb_base = cdev->mc.vram_base;
+ cdev->dev->mode_config.preferred_depth = 24;
+ /* don't prefer a shadow on virt GPU */
+ cdev->dev->mode_config.prefer_shadow = 0;
+
+ cirrus_crtc_init(cdev->dev);
+
+ encoder = cirrus_encoder_init(cdev->dev);
+ if (!encoder) {
+ DRM_ERROR("cirrus_encoder_init failed\n");
+ return -1;
+ }
+
+ connector = cirrus_vga_init(cdev->dev);
+ if (!connector) {
+ DRM_ERROR("cirrus_vga_init failed\n");
+ return -1;
+ }
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ ret = cirrus_fbdev_init(cdev);
+ if (ret) {
+ DRM_ERROR("cirrus_fbdev_init failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void cirrus_modeset_fini(struct cirrus_device *cdev)
+{
+ cirrus_fbdev_fini(cdev);
+
+ if (cdev->mode_info.mode_config_initialized) {
+ drm_mode_config_cleanup(cdev->dev);
+ cdev->mode_info.mode_config_initialized = false;
+ }
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
new file mode 100644
index 000000000000..2ebcd11a5023
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "cirrus_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct cirrus_device *
+cirrus_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct cirrus_device, ttm.bdev);
+}
+
+static int
+cirrus_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+cirrus_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &cirrus->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &cirrus_ttm_mem_global_init;
+ global_ref->release = &cirrus_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ cirrus->ttm.bo_global_ref.mem_glob =
+ cirrus->ttm.mem_global_ref.object;
+ global_ref = &cirrus->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&cirrus->ttm.mem_global_ref);
+ return r;
+ }
+ return 0;
+}
+
+void
+cirrus_ttm_global_release(struct cirrus_device *cirrus)
+{
+ if (cirrus->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&cirrus->ttm.mem_global_ref);
+ cirrus->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct cirrus_bo *bo;
+
+ bo = container_of(tbo, struct cirrus_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &cirrus_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int
+cirrus_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct cirrus_bo *cirrusbo = cirrus_bo(bo);
+
+ if (!cirrus_ttm_bo_is_cirrus_bo(bo))
+ return;
+
+ cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_SYSTEM);
+ *pl = cirrusbo->placement;
+}
+
+static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct cirrus_device *cirrus = cirrus_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(cirrus->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int cirrus_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ int r;
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ return r;
+}
+
+
+static void cirrus_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func cirrus_tt_backend_func = {
+ .destroy = &cirrus_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &cirrus_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int cirrus_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver cirrus_bo_driver = {
+ .ttm_tt_create = cirrus_ttm_tt_create,
+ .ttm_tt_populate = cirrus_ttm_tt_populate,
+ .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
+ .init_mem_type = cirrus_bo_init_mem_type,
+ .evict_flags = cirrus_bo_evict_flags,
+ .move = cirrus_bo_move,
+ .verify_access = cirrus_bo_verify_access,
+ .io_mem_reserve = &cirrus_ttm_io_mem_reserve,
+ .io_mem_free = &cirrus_ttm_io_mem_free,
+};
+
+int cirrus_mm_init(struct cirrus_device *cirrus)
+{
+ int ret;
+ struct drm_device *dev = cirrus->dev;
+ struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
+
+ ret = cirrus_ttm_global_init(cirrus);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&cirrus->ttm.bdev,
+ cirrus->ttm.bo_global_ref.ref.object,
+ &cirrus_bo_driver, DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ cirrus->mc.vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ cirrus->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0),
+ DRM_MTRR_WC);
+
+ return 0;
+}
+
+void cirrus_mm_fini(struct cirrus_device *cirrus)
+{
+ struct drm_device *dev = cirrus->dev;
+ ttm_bo_device_release(&cirrus->ttm.bdev);
+
+ cirrus_ttm_global_release(cirrus);
+
+ if (cirrus->fb_mtrr >= 0) {
+ drm_mtrr_del(cirrus->fb_mtrr,
+ pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+ cirrus->fb_mtrr = -1;
+ }
+}
+
+void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+void cirrus_bo_unreserve(struct cirrus_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+int cirrus_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct cirrus_bo **pcirrusbo)
+{
+ struct cirrus_device *cirrus = dev->dev_private;
+ struct cirrus_bo *cirrusbo;
+ size_t acc_size;
+ int ret;
+
+ cirrusbo = kzalloc(sizeof(struct cirrus_bo), GFP_KERNEL);
+ if (!cirrusbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &cirrusbo->gem, size);
+ if (ret) {
+ kfree(cirrusbo);
+ return ret;
+ }
+
+ cirrusbo->gem.driver_private = NULL;
+ cirrusbo->bo.bdev = &cirrus->ttm.bdev;
+
+ cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&cirrus->ttm.bdev, size,
+ sizeof(struct cirrus_bo));
+
+ ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
+ ttm_bo_type_device, &cirrusbo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ NULL, cirrus_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pcirrusbo = cirrusbo;
+ return 0;
+}
+
+static inline u64 cirrus_bo_gpu_offset(struct cirrus_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = cirrus_bo_gpu_offset(bo);
+ }
+
+ cirrus_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = cirrus_bo_gpu_offset(bo);
+ return 0;
+}
+
+int cirrus_bo_unpin(struct cirrus_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int cirrus_bo_push_sysram(struct cirrus_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+int cirrus_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct cirrus_device *cirrus;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ cirrus = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &cirrus->ttm.bdev);
+}
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 4b8653b932f9..08758e061478 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -98,3 +98,26 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
#endif
}
EXPORT_SYMBOL(drm_clflush_pages);
+
+void
+drm_clflush_virt_range(char *addr, unsigned long length)
+{
+#if defined(CONFIG_X86)
+ if (cpu_has_clflush) {
+ char *end = addr + length;
+ mb();
+ for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
+ clflush(addr);
+ clflush(end - 1);
+ mb();
+ return;
+ }
+
+ if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+#else
+ printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+ WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_virt_range);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 325365f6d355..affa629589ac 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -85,11 +85,12 @@ again:
mutex_lock(&dev->struct_mutex);
ret = idr_get_new_above(&dev->ctx_idr, NULL,
DRM_RESERVED_CONTEXTS, &new_id);
- if (ret == -EAGAIN) {
- mutex_unlock(&dev->struct_mutex);
- goto again;
- }
mutex_unlock(&dev->struct_mutex);
+ if (ret == -EAGAIN)
+ goto again;
+ else if (ret)
+ return ret;
+
return new_id;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index c79870a75c2f..08a7aa722d6b 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -227,7 +227,7 @@ static int drm_mode_object_get(struct drm_device *dev,
again:
if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Ran out memory getting a mode number\n");
- return -EINVAL;
+ return -ENOMEM;
}
mutex_lock(&dev->mode_config.idr_mutex);
@@ -235,6 +235,8 @@ again:
mutex_unlock(&dev->mode_config.idr_mutex);
if (ret == -EAGAIN)
goto again;
+ else if (ret)
+ return ret;
obj->id = new_id;
obj->type = obj_type;
@@ -361,7 +363,7 @@ EXPORT_SYMBOL(drm_framebuffer_cleanup);
* @funcs: callbacks for the new CRTC
*
* LOCKING:
- * Caller must hold mode config lock.
+ * Takes mode_config lock.
*
* Inits a new object created as base part of an driver crtc object.
*
@@ -382,6 +384,8 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
if (ret)
goto out;
+ crtc->base.properties = &crtc->properties;
+
list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
dev->mode_config.num_crtc++;
@@ -481,6 +485,7 @@ int drm_connector_init(struct drm_device *dev,
if (ret)
goto out;
+ connector->base.properties = &connector->properties;
connector->dev = dev;
connector->funcs = funcs;
connector->connector_type = connector_type;
@@ -603,6 +608,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (ret)
goto out;
+ plane->base.properties = &plane->properties;
plane->dev = dev;
plane->funcs = funcs;
plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
@@ -1422,11 +1428,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
}
connector = obj_to_connector(obj);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- props_count++;
- }
- }
+ props_count = connector->properties.count;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
@@ -1479,21 +1481,19 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
copied = 0;
prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- if (put_user(connector->property_ids[i],
- prop_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
+ for (i = 0; i < connector->properties.count; i++) {
+ if (put_user(connector->properties.ids[i],
+ prop_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
- if (put_user(connector->property_values[i],
- prop_values + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
+ if (put_user(connector->properties.values[i],
+ prop_values + copied)) {
+ ret = -EFAULT;
+ goto out;
}
+ copied++;
}
}
out_resp->count_props = props_count;
@@ -1830,7 +1830,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_display_mode *mode = NULL;
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
- int ret = 0;
+ int ret;
int i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -2102,7 +2102,7 @@ int drm_mode_addfb(struct drm_device *dev,
fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
if (IS_ERR(fb)) {
- DRM_ERROR("could not create framebuffer\n");
+ DRM_DEBUG_KMS("could not create framebuffer\n");
ret = PTR_ERR(fb);
goto out;
}
@@ -2116,7 +2116,7 @@ out:
return ret;
}
-static int format_check(struct drm_mode_fb_cmd2 *r)
+static int format_check(const struct drm_mode_fb_cmd2 *r)
{
uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
@@ -2185,6 +2185,47 @@ static int format_check(struct drm_mode_fb_cmd2 *r)
}
}
+static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
+{
+ int ret, hsub, vsub, num_planes, i;
+
+ ret = format_check(r);
+ if (ret) {
+ DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format);
+ return ret;
+ }
+
+ hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
+ num_planes = drm_format_num_planes(r->pixel_format);
+
+ if (r->width == 0 || r->width % hsub) {
+ DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
+ return -EINVAL;
+ }
+
+ if (r->height == 0 || r->height % vsub) {
+ DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_planes; i++) {
+ unsigned int width = r->width / (i != 0 ? hsub : 1);
+
+ if (!r->handles[i]) {
+ DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
+ return -EINVAL;
+ }
+
+ if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
+ DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
/**
* drm_mode_addfb2 - add an FB to the graphics configuration
* @inode: inode from the ioctl
@@ -2208,33 +2249,31 @@ int drm_mode_addfb2(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r = data;
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
- int ret = 0;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
if ((config->min_width > r->width) || (r->width > config->max_width)) {
- DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n",
+ DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
return -EINVAL;
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
- DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n",
+ DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
r->height, config->min_height, config->max_height);
return -EINVAL;
}
- ret = format_check(r);
- if (ret) {
- DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
+ ret = framebuffer_check(r);
+ if (ret)
return ret;
- }
mutex_lock(&dev->mode_config.mutex);
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
- DRM_ERROR("could not create framebuffer\n");
+ DRM_DEBUG_KMS("could not create framebuffer\n");
ret = PTR_ERR(fb);
goto out;
}
@@ -2365,7 +2404,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
struct drm_framebuffer *fb;
unsigned flags;
int num_clips;
- int ret = 0;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -2564,7 +2603,7 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
struct drm_display_mode *mode;
struct drm_mode_object *obj;
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
- int ret = 0;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -2618,7 +2657,7 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
struct drm_connector *connector;
struct drm_display_mode mode;
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
- int ret = 0;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -2710,6 +2749,34 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
}
EXPORT_SYMBOL(drm_property_create_enum);
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+ int flags, const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values)
+{
+ struct drm_property *property;
+ int i, ret;
+
+ flags |= DRM_MODE_PROP_BITMASK;
+
+ property = drm_property_create(dev, flags, name, num_values);
+ if (!property)
+ return NULL;
+
+ for (i = 0; i < num_values; i++) {
+ ret = drm_property_add_enum(property, i,
+ props[i].type,
+ props[i].name);
+ if (ret) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+ }
+
+ return property;
+}
+EXPORT_SYMBOL(drm_property_create_bitmask);
+
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max)
@@ -2734,7 +2801,14 @@ int drm_property_add_enum(struct drm_property *property, int index,
{
struct drm_property_enum *prop_enum;
- if (!(property->flags & DRM_MODE_PROP_ENUM))
+ if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
+ return -EINVAL;
+
+ /*
+ * Bitmask enum properties have the additional constraint of values
+ * from 0 to 63
+ */
+ if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
return -EINVAL;
if (!list_empty(&property->enum_blob_list)) {
@@ -2778,60 +2852,78 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
}
EXPORT_SYMBOL(drm_property_destroy);
-int drm_connector_attach_property(struct drm_connector *connector,
+void drm_connector_attach_property(struct drm_connector *connector,
struct drm_property *property, uint64_t init_val)
{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == 0) {
- connector->property_ids[i] = property->base.id;
- connector->property_values[i] = init_val;
- break;
- }
- }
-
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
+ drm_object_attach_property(&connector->base, property, init_val);
}
EXPORT_SYMBOL(drm_connector_attach_property);
int drm_connector_property_set_value(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
+ return drm_object_property_set_value(&connector->base, property, value);
+}
+EXPORT_SYMBOL(drm_connector_property_set_value);
+
+int drm_connector_property_get_value(struct drm_connector *connector,
+ struct drm_property *property, uint64_t *val)
+{
+ return drm_object_property_get_value(&connector->base, property, val);
+}
+EXPORT_SYMBOL(drm_connector_property_get_value);
+
+void drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val)
+{
+ int count = obj->properties->count;
+
+ if (count == DRM_OBJECT_MAX_PROPERTY) {
+ WARN(1, "Failed to attach object property (type: 0x%x). Please "
+ "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
+ "you see this message on the same object type.\n",
+ obj->type);
+ return;
+ }
+
+ obj->properties->ids[count] = property->base.id;
+ obj->properties->values[count] = init_val;
+ obj->properties->count++;
+}
+EXPORT_SYMBOL(drm_object_attach_property);
+
+int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t val)
+{
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == property->base.id) {
- connector->property_values[i] = value;
- break;
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->ids[i] == property->base.id) {
+ obj->properties->values[i] = val;
+ return 0;
}
}
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
+ return -EINVAL;
}
-EXPORT_SYMBOL(drm_connector_property_set_value);
+EXPORT_SYMBOL(drm_object_property_set_value);
-int drm_connector_property_get_value(struct drm_connector *connector,
+int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == property->base.id) {
- *val = connector->property_values[i];
- break;
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->ids[i] == property->base.id) {
+ *val = obj->properties->values[i];
+ return 0;
}
}
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
+ return -EINVAL;
}
-EXPORT_SYMBOL(drm_connector_property_get_value);
+EXPORT_SYMBOL(drm_object_property_get_value);
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -2862,7 +2954,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
}
property = obj_to_property(obj);
- if (property->flags & DRM_MODE_PROP_ENUM) {
+ if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head)
enum_count++;
} else if (property->flags & DRM_MODE_PROP_BLOB) {
@@ -2887,7 +2979,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
}
out_resp->count_values = value_count;
- if (property->flags & DRM_MODE_PROP_ENUM) {
+ if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
@@ -3009,7 +3101,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid)
{
struct drm_device *dev = connector->dev;
- int ret = 0, size;
+ int ret, size;
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -3033,75 +3125,202 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+static bool drm_property_change_is_valid(struct drm_property *property,
+ uint64_t value)
+{
+ if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ return false;
+ if (property->flags & DRM_MODE_PROP_RANGE) {
+ if (value < property->values[0] || value > property->values[1])
+ return false;
+ return true;
+ } else if (property->flags & DRM_MODE_PROP_BITMASK) {
+ int i;
+ uint64_t valid_mask = 0;
+ for (i = 0; i < property->num_values; i++)
+ valid_mask |= (1ULL << property->values[i]);
+ return !(value & ~valid_mask);
+ } else {
+ int i;
+ for (i = 0; i < property->num_values; i++)
+ if (property->values[i] == value)
+ return true;
+ return false;
+ }
+}
+
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_connector_set_property *out_resp = data;
- struct drm_mode_object *obj;
- struct drm_property *property;
- struct drm_connector *connector;
+ struct drm_mode_connector_set_property *conn_set_prop = data;
+ struct drm_mode_obj_set_property obj_set_prop = {
+ .value = conn_set_prop->value,
+ .prop_id = conn_set_prop->prop_id,
+ .obj_id = conn_set_prop->connector_id,
+ .obj_type = DRM_MODE_OBJECT_CONNECTOR
+ };
+
+ /* It does all the locking and checking we need */
+ return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
+}
+
+static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
int ret = -EINVAL;
+ struct drm_connector *connector = obj_to_connector(obj);
+
+ /* Do DPMS ourselves */
+ if (property == connector->dev->mode_config.dpms_property) {
+ if (connector->funcs->dpms)
+ (*connector->funcs->dpms)(connector, (int)value);
+ ret = 0;
+ } else if (connector->funcs->set_property)
+ ret = connector->funcs->set_property(connector, property, value);
+
+ /* store the property value if successful */
+ if (!ret)
+ drm_connector_property_set_value(connector, property, value);
+ return ret;
+}
+
+static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_crtc *crtc = obj_to_crtc(obj);
+
+ if (crtc->funcs->set_property)
+ ret = crtc->funcs->set_property(crtc, property, value);
+ if (!ret)
+ drm_object_property_set_value(obj, property, value);
+
+ return ret;
+}
+
+static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_plane *plane = obj_to_plane(obj);
+
+ if (plane->funcs->set_property)
+ ret = plane->funcs->set_property(plane, property, value);
+ if (!ret)
+ drm_object_property_set_value(obj, property, value);
+
+ return ret;
+}
+
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_obj_get_properties *arg = data;
+ struct drm_mode_object *obj;
+ int ret = 0;
int i;
+ int copied = 0;
+ int props_count = 0;
+ uint32_t __user *props_ptr;
+ uint64_t __user *prop_values_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+ obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!obj->properties) {
+ ret = -EINVAL;
goto out;
}
- connector = obj_to_connector(obj);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == out_resp->prop_id)
- break;
+ props_count = obj->properties->count;
+
+ /* This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it. */
+ if ((arg->count_props >= props_count) && props_count) {
+ copied = 0;
+ props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
+ prop_values_ptr = (uint64_t __user *)(unsigned long)
+ (arg->prop_values_ptr);
+ for (i = 0; i < props_count; i++) {
+ if (put_user(obj->properties->ids[i],
+ props_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (put_user(obj->properties->values[i],
+ prop_values_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
}
+ arg->count_props = props_count;
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_obj_set_property *arg = data;
+ struct drm_mode_object *arg_obj;
+ struct drm_mode_object *prop_obj;
+ struct drm_property *property;
+ int ret = -EINVAL;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
- if (i == DRM_CONNECTOR_MAX_PROPERTY) {
+ arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ if (!arg_obj)
+ goto out;
+ if (!arg_obj->properties)
goto out;
- }
- obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
- if (!obj) {
+ for (i = 0; i < arg_obj->properties->count; i++)
+ if (arg_obj->properties->ids[i] == arg->prop_id)
+ break;
+
+ if (i == arg_obj->properties->count)
goto out;
- }
- property = obj_to_property(obj);
- if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ prop_obj = drm_mode_object_find(dev, arg->prop_id,
+ DRM_MODE_OBJECT_PROPERTY);
+ if (!prop_obj)
goto out;
+ property = obj_to_property(prop_obj);
- if (property->flags & DRM_MODE_PROP_RANGE) {
- if (out_resp->value < property->values[0])
- goto out;
+ if (!drm_property_change_is_valid(property, arg->value))
+ goto out;
- if (out_resp->value > property->values[1])
- goto out;
- } else {
- int found = 0;
- for (i = 0; i < property->num_values; i++) {
- if (property->values[i] == out_resp->value) {
- found = 1;
- break;
- }
- }
- if (!found) {
- goto out;
- }
+ switch (arg_obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR:
+ ret = drm_mode_connector_set_obj_prop(arg_obj, property,
+ arg->value);
+ break;
+ case DRM_MODE_OBJECT_CRTC:
+ ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
+ break;
+ case DRM_MODE_OBJECT_PLANE:
+ ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
+ break;
}
- /* Do DPMS ourselves */
- if (property == connector->dev->mode_config.dpms_property) {
- if (connector->funcs->dpms)
- (*connector->funcs->dpms)(connector, (int) out_resp->value);
- ret = 0;
- } else if (connector->funcs->set_property)
- ret = connector->funcs->set_property(connector, property, out_resp->value);
-
- /* store the property value if successful */
- if (!ret)
- drm_connector_property_set_value(connector, property, out_resp->value);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
@@ -3173,6 +3392,11 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
}
crtc = obj_to_crtc(obj);
+ if (crtc->funcs->gamma_set == NULL) {
+ ret = -ENOSYS;
+ goto out;
+ }
+
/* memcpy into gamma store */
if (crtc_lut->gamma_size != crtc->gamma_size) {
ret = -EINVAL;
@@ -3468,3 +3692,140 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
}
}
EXPORT_SYMBOL(drm_fb_get_bpp_depth);
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The number of planes used by the specified pixel format.
+ */
+int drm_format_num_planes(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 3;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_num_planes);
+
+/**
+ * drm_format_plane_cpp - determine the bytes per pixel value
+ * @format: pixel format (DRM_FORMAT_*)
+ * @plane: plane index
+ *
+ * RETURNS:
+ * The bytes per pixel value for the specified plane.
+ */
+int drm_format_plane_cpp(uint32_t format, int plane)
+{
+ unsigned int depth;
+ int bpp;
+
+ if (plane >= drm_format_num_planes(format))
+ return 0;
+
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ return 2;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ return plane ? 2 : 1;
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 1;
+ default:
+ drm_fb_get_bpp_depth(format, &depth, &bpp);
+ return bpp >> 3;
+ }
+}
+EXPORT_SYMBOL(drm_format_plane_cpp);
+
+/**
+ * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The horizontal chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_horz_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
+
+/**
+ * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The vertical chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_vert_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 81118893264c..3252e7067d8b 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -518,7 +518,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
int count = 0, ro, fail = 0;
struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_mode_set save_set;
- int ret = 0;
+ int ret;
int i;
DRM_DEBUG_KMS("\n");
@@ -1023,36 +1023,3 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
-
-
-/**
- * drm_format_num_planes - get the number of planes for format
- * @format: pixel format (DRM_FORMAT_*)
- *
- * RETURNS:
- * The number of planes used by the specified pixel format.
- */
-int drm_format_num_planes(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 3;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- return 2;
- default:
- return 1;
- }
-}
-EXPORT_SYMBOL(drm_format_num_planes);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 6116e3b75393..8a9d0792e4ec 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -163,7 +163,9 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5a18b0df8285..c3b5139eba7f 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -66,6 +66,8 @@
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
+/* Force reduced-blanking timings for detailed modes */
+#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -81,7 +83,7 @@ struct detailed_mode_closure {
#define LEVEL_CVT 3
static struct edid_quirk {
- char *vendor;
+ char vendor[4];
int product_id;
u32 quirks;
} edid_quirk_list[] = {
@@ -120,6 +122,9 @@ static struct edid_quirk {
/* Samsung SyncMaster 22[5-6]BW */
{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* ViewSonic VA2026w */
+ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
};
/*** DDC fetch and block validation ***/
@@ -149,13 +154,13 @@ EXPORT_SYMBOL(drm_edid_header_is_valid);
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
*/
-bool drm_edid_block_valid(u8 *raw_edid)
+bool drm_edid_block_valid(u8 *raw_edid, int block)
{
int i;
u8 csum = 0;
struct edid *edid = (struct edid *)raw_edid;
- if (raw_edid[0] == 0x00) {
+ if (block == 0) {
int score = drm_edid_header_is_valid(raw_edid);
if (score == 8) ;
else if (score >= 6) {
@@ -219,7 +224,7 @@ bool drm_edid_is_valid(struct edid *edid)
return false;
for (i = 0; i <= edid->extensions; i++)
- if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i))
return false;
return true;
@@ -299,7 +304,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block))
+ if (drm_edid_block_valid(block, 0))
break;
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
connector->null_edid_counter++;
@@ -324,7 +329,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
+ if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j)) {
valid_extensions++;
break;
}
@@ -486,23 +491,47 @@ static void edid_fixup_preferred(struct drm_connector *connector,
preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
+static bool
+mode_is_rb(const struct drm_display_mode *mode)
+{
+ return (mode->htotal - mode->hdisplay == 160) &&
+ (mode->hsync_end - mode->hdisplay == 80) &&
+ (mode->hsync_end - mode->hsync_start == 32) &&
+ (mode->vsync_start - mode->vdisplay == 3);
+}
+
+/*
+ * drm_mode_find_dmt - Create a copy of a mode if present in DMT
+ * @dev: Device to duplicate against
+ * @hsize: Mode width
+ * @vsize: Mode height
+ * @fresh: Mode refresh rate
+ * @rb: Mode reduced-blanking-ness
+ *
+ * Walk the DMT mode list looking for a match for the given parameters.
+ * Return a newly allocated copy of the mode, or NULL if not found.
+ */
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
- int hsize, int vsize, int fresh)
+ int hsize, int vsize, int fresh,
+ bool rb)
{
- struct drm_display_mode *mode = NULL;
int i;
for (i = 0; i < drm_num_dmt_modes; i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
- if (hsize == ptr->hdisplay &&
- vsize == ptr->vdisplay &&
- fresh == drm_mode_vrefresh(ptr)) {
- /* get the expected default mode */
- mode = drm_mode_duplicate(dev, ptr);
- break;
- }
+ if (hsize != ptr->hdisplay)
+ continue;
+ if (vsize != ptr->vdisplay)
+ continue;
+ if (fresh != drm_mode_vrefresh(ptr))
+ continue;
+ if (rb != mode_is_rb(ptr))
+ continue;
+
+ return drm_mode_duplicate(dev, ptr);
}
- return mode;
+
+ return NULL;
}
EXPORT_SYMBOL(drm_mode_find_dmt);
@@ -731,10 +760,17 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
}
/* check whether it can be found in default mode table */
- mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
+ if (drm_monitor_supports_rb(edid)) {
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate,
+ true);
+ if (mode)
+ return mode;
+ }
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false);
if (mode)
return mode;
+ /* okay, generate it */
switch (timing_level) {
case LEVEL_DMT:
break;
@@ -748,6 +784,8 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
* secondary GTF curve. Please don't do that.
*/
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ if (!mode)
+ return NULL;
if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
drm_mode_destroy(dev, mode);
mode = drm_gtf_mode_complex(dev, hsize, vsize,
@@ -852,12 +890,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
"Wrong Hsync/Vsync pulse width\n");
return NULL;
}
+
+ if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+ mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
+ if (!mode)
+ return NULL;
+
+ goto set_size;
+ }
+
mode = drm_mode_create(dev);
if (!mode)
return NULL;
- mode->type = DRM_MODE_TYPE_DRIVER;
-
if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
timing->pixel_clock = cpu_to_le16(1088);
@@ -881,8 +926,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
drm_mode_do_interlace_quirk(mode, pt);
- drm_mode_set_name(mode);
-
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
}
@@ -892,6 +935,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
@@ -905,16 +949,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->height_mm = edid->height_cm * 10;
}
- return mode;
-}
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ drm_mode_set_name(mode);
-static bool
-mode_is_rb(const struct drm_display_mode *mode)
-{
- return (mode->htotal - mode->hdisplay == 160) &&
- (mode->hsync_end - mode->hdisplay == 80) &&
- (mode->hsync_end - mode->hsync_start == 32) &&
- (mode->vsync_start - mode->vdisplay == 3);
+ return mode;
}
static bool
@@ -994,12 +1032,8 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
return true;
}
-/*
- * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
- * need to account for them.
- */
static int
-drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
{
int i, modes = 0;
@@ -1019,17 +1053,110 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
return modes;
}
+/* fix up 1366x768 mode from 1368x768;
+ * GFT/CVT can't express 1366 width which isn't dividable by 8
+ */
+static void fixup_mode_1366x768(struct drm_display_mode *mode)
+{
+ if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
+ mode->hdisplay = 1366;
+ mode->hsync_start--;
+ mode->hsync_end--;
+ drm_mode_set_name(mode);
+ }
+}
+
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < num_extra_modes; i++) {
+ const struct minimode *m = &extra_modes[i];
+ newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
+ if (!newmode)
+ return modes;
+
+ fixup_mode_1366x768(newmode);
+ if (!mode_in_range(newmode, edid, timing)) {
+ drm_mode_destroy(dev, newmode);
+ continue;
+ }
+
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+ return modes;
+}
+
+static int
+drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+ bool rb = drm_monitor_supports_rb(edid);
+
+ for (i = 0; i < num_extra_modes; i++) {
+ const struct minimode *m = &extra_modes[i];
+ newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
+ if (!newmode)
+ return modes;
+
+ fixup_mode_1366x768(newmode);
+ if (!mode_in_range(newmode, edid, timing)) {
+ drm_mode_destroy(dev, newmode);
+ continue;
+ }
+
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+ return modes;
+}
+
static void
do_inferred_modes(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
- int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
+ struct detailed_data_monitor_range *range = &data->data.range;
+
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ return;
+
+ closure->modes += drm_dmt_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+
+ if (!version_greater(closure->edid, 1, 1))
+ return; /* GTF not defined yet */
- if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE)
+ switch (range->flags) {
+ case 0x02: /* secondary gtf, XXX could do more */
+ case 0x00: /* default gtf */
closure->modes += drm_gtf_modes_for_range(closure->connector,
closure->edid,
timing);
+ break;
+ case 0x04: /* cvt, only in 1.4+ */
+ if (!version_greater(closure->edid, 1, 3))
+ break;
+
+ closure->modes += drm_cvt_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+ break;
+ case 0x01: /* just the ranges, no formula */
+ default:
+ break;
+ }
}
static int
@@ -1062,8 +1189,8 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
mode = drm_mode_find_dmt(connector->dev,
est3_modes[m].w,
est3_modes[m].h,
- est3_modes[m].r
- /*, est3_modes[m].rb */);
+ est3_modes[m].r,
+ est3_modes[m].rb);
if (mode) {
drm_mode_probed_add(connector, mode);
modes++;
@@ -1312,6 +1439,8 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
#define EDID_BASIC_AUDIO (1 << 6)
+#define EDID_CEA_YCRCB444 (1 << 5)
+#define EDID_CEA_YCRCB422 (1 << 4)
/**
* Search EDID for CEA extension block.
@@ -1666,13 +1795,29 @@ static void drm_add_display_info(struct edid *edid,
info->bpc = 0;
info->color_formats = 0;
- /* Only defined for 1.4 with digital displays */
- if (edid->revision < 4)
+ if (edid->revision < 3)
return;
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
return;
+ /* Get data from CEA blocks if present */
+ edid_ext = drm_find_cea_extension(edid);
+ if (edid_ext) {
+ info->cea_rev = edid_ext[1];
+
+ /* The existence of a CEA block should imply RGB support */
+ info->color_formats = DRM_COLOR_FORMAT_RGB444;
+ if (edid_ext[3] & EDID_CEA_YCRCB444)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ if (edid_ext[3] & EDID_CEA_YCRCB422)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+ }
+
+ /* Only defined for 1.4 with digital displays */
+ if (edid->revision < 4)
+ return;
+
switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
case DRM_EDID_DIGITAL_DEPTH_6:
info->bpc = 6;
@@ -1698,18 +1843,11 @@ static void drm_add_display_info(struct edid *edid,
break;
}
- info->color_formats = DRM_COLOR_FORMAT_RGB444;
- if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB444)
- info->color_formats = DRM_COLOR_FORMAT_YCRCB444;
- if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422)
- info->color_formats = DRM_COLOR_FORMAT_YCRCB422;
-
- /* Get data from CEA blocks if present */
- edid_ext = drm_find_cea_extension(edid);
- if (!edid_ext)
- return;
-
- info->cea_rev = edid_ext[1];
+ info->color_formats |= DRM_COLOR_FORMAT_RGB444;
+ if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
/**
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index da9acba2dd6c..66d4a28ad5a2 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -173,7 +173,7 @@ static int edid_load(struct drm_connector *connector, char *name,
}
memcpy(edid, fwdata, fwsize);
- if (!drm_edid_block_valid(edid)) {
+ if (!drm_edid_block_valid(edid, 0)) {
DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
name);
kfree(edid);
@@ -185,7 +185,7 @@ static int edid_load(struct drm_connector *connector, char *name,
if (i != valid_extensions + 1)
memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
edid + i * EDID_LENGTH, EDID_LENGTH);
- if (drm_edid_block_valid(edid + i * EDID_LENGTH))
+ if (drm_edid_block_valid(edid + i * EDID_LENGTH, i))
valid_extensions++;
}
@@ -220,18 +220,18 @@ int drm_load_edid_firmware(struct drm_connector *connector)
{
char *connector_name = drm_get_connector_name(connector);
char *edidname = edid_firmware, *last, *colon;
- int ret = 0;
+ int ret;
if (*edidname == '\0')
- return ret;
+ return 0;
colon = strchr(edidname, ':');
if (colon != NULL) {
if (strncmp(connector_name, edidname, colon - edidname))
- return ret;
+ return 0;
edidname = colon + 1;
if (*edidname == '\0')
- return ret;
+ return 0;
}
last = edidname + strlen(edidname) - 1;
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index a91ffb117220..ff98a7eb38dd 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -30,7 +30,6 @@
/*
* Autogenerated from the DMT spec.
* This table is copied from xfree86/modes/xf86EdidModes.c.
- * But the mode with Reduced blank feature is deleted.
*/
static const struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
@@ -81,6 +80,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
896, 1048, 0, 600, 601, 604, 631, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@120Hz RB */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
+ 880, 960, 0, 600, 603, 607, 636, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 848x480@60Hz */
{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
976, 1088, 0, 480, 486, 494, 517, 0,
@@ -106,10 +109,18 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@120Hz RB */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
+ 1104, 1184, 0, 768, 771, 775, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 790, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
@@ -122,6 +133,14 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
1496, 1712, 0, 768, 771, 778, 809, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@120Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@60Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 823, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
@@ -134,6 +153,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
1496, 1712, 0, 800, 803, 809, 843, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@120Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 847, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
@@ -142,6 +165,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
1504, 1728, 0, 960, 961, 964, 1011, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@120Hz RB */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
+ 1360, 1440, 0, 960, 963, 967, 1017, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
@@ -154,22 +181,42 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@120Hz RB */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
+ 1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@60Hz */
+ /* 1360x768@120Hz RB */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
+ 1440, 1520, 0, 768, 771, 776, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@75Hz */
+ /* 1400x1050@75Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@85Hz */
+ /* 1400x1050@85Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@120Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x900@60Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 926, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
@@ -182,6 +229,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
1696, 1952, 0, 900, 903, 909, 948, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@120Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 953, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
@@ -202,6 +253,14 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@120Hz RB */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
+ 1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1680x1050@60Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
@@ -214,15 +273,23 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@120Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1729x1344@75Hz */
+ /* 1792x1344@75Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1853x1392@60Hz */
+ /* 1792x1344@120Hz RB */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
+ 1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1856x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
@@ -230,6 +297,14 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@120Hz RB */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
+ 1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1200@60Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
@@ -242,6 +317,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@120Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
@@ -250,6 +329,14 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@120Hz RB */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
+ 2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2560x1600@60Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
@@ -262,6 +349,11 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@120Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+
};
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
@@ -320,12 +412,14 @@ static const struct drm_display_mode edid_est_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
};
-static const struct {
+struct minimode {
short w;
short h;
short r;
short rb;
-} est3_modes[] = {
+};
+
+static const struct minimode est3_modes[] = {
/* byte 6 */
{ 640, 350, 85, 0 },
{ 640, 400, 85, 0 },
@@ -377,288 +471,304 @@ static const struct {
{ 1920, 1440, 60, 0 },
{ 1920, 1440, 75, 0 },
};
-static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+static const int num_est3_modes = ARRAY_SIZE(est3_modes);
+
+static const struct minimode extra_modes[] = {
+ { 1024, 576, 60, 0 },
+ { 1366, 768, 60, 0 },
+ { 1600, 900, 60, 0 },
+ { 1680, 945, 60, 0 },
+ { 1920, 1080, 60, 0 },
+ { 2048, 1152, 60, 0 },
+ { 2048, 1536, 60, 0 },
+};
+static const int num_extra_modes = ARRAY_SIZE(extra_modes);
/*
* Probably taken from CEA-861 spec.
* This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
*/
static const struct drm_display_mode edid_cea_modes[] = {
- /* 640x480@60Hz */
+ /* 1 - 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x480@60Hz */
+ /* 2 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x480@60Hz */
+ /* 3 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x720@60Hz */
+ /* 4 - 1280x720@60Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080i@60Hz */
+ /* 5 - 1920x1080i@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 1440x480i@60Hz */
+ /* 6 - 1440x480i@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x480i@60Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 7 - 1440x480i@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x240@60Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 8 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x240@60Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 9 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x480i@60Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 10 - 2880x480i@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 2880x480i@60Hz */
+ /* 11 - 2880x480i@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 2880x240@60Hz */
+ /* 12 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x240@60Hz */
+ /* 13 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x480@60Hz */
+ /* 14 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x480@60Hz */
+ /* 15 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1080@60Hz */
+ /* 16 - 1920x1080@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 720x576@50Hz */
+ /* 17 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x576@50Hz */
+ /* 18 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x720@50Hz */
+ /* 19 - 1280x720@50Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080i@50Hz */
+ /* 20 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 1440x576i@50Hz */
+ /* 21 - 1440x576i@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x576i@50Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 22 - 1440x576i@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x288@50Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 23 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x288@50Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 24 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x576i@50Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 25 - 2880x576i@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 2880x576i@50Hz */
+ /* 26 - 2880x576i@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 2880x288@50Hz */
+ /* 27 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x288@50Hz */
+ /* 28 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x576@50Hz */
+ /* 29 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x576@50Hz */
+ /* 30 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1080@50Hz */
+ /* 31 - 1920x1080@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@24Hz */
+ /* 32 - 1920x1080@24Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@25Hz */
+ /* 33 - 1920x1080@25Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@30Hz */
+ /* 34 - 1920x1080@30Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2880x480@60Hz */
+ /* 35 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x480@60Hz */
+ /* 36 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x576@50Hz */
+ /* 37 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x576@50Hz */
+ /* 38 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1080i@50Hz */
+ /* 39 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 1920x1080i@100Hz */
+ /* 40 - 1920x1080i@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 1280x720@100Hz */
+ /* 41 - 1280x720@100Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 720x576@100Hz */
+ /* 42 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x576@100Hz */
+ /* 43 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x576i@100Hz */
+ /* 44 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x576i@100Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 45 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1080i@120Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 46 - 1920x1080i@120Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 1280x720@120Hz */
+ /* 47 - 1280x720@120Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 720x480@120Hz */
+ /* 48 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x480@120Hz */
+ /* 49 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x480i@120Hz */
+ /* 50 - 1440x480i@120Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x480i@120Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 51 - 1440x480i@120Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 720x576@200Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 52 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x576@200Hz */
+ /* 53 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x576i@200Hz */
+ /* 54 - 1440x576i@200Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x576i@200Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 55 - 1440x576i@200Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 720x480@240Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 56 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x480@240Hz */
+ /* 57 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x480i@240 */
+ /* 58 - 1440x480i@240 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x480i@240 */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 59 - 1440x480i@240 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1280x720@24Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 60 - 1280x720@24Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x720@25Hz */
+ /* 61 - 1280x720@25Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x720@30Hz */
+ /* 62 - 1280x720@30Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@120Hz */
+ /* 63 - 1920x1080@120Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@100Hz */
+ /* 64 - 1920x1080@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
-static const int drm_num_cea_modes =
- sizeof (edid_cea_modes) / sizeof (edid_cea_modes[0]);
+static const int drm_num_cea_modes = ARRAY_SIZE(edid_cea_modes);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a0d6e894d97c..5683b7fdd746 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -136,6 +136,9 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
{
uint16_t *r_base, *g_base, *b_base;
+ if (crtc->funcs->gamma_set == NULL)
+ return;
+
r_base = crtc->gamma_store;
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
@@ -383,7 +386,6 @@ int drm_fb_helper_init(struct drm_device *dev,
int crtc_count, int max_conn_count)
{
struct drm_crtc *crtc;
- int ret = 0;
int i;
fb_helper->dev = dev;
@@ -408,10 +410,8 @@ int drm_fb_helper_init(struct drm_device *dev,
sizeof(struct drm_connector *),
GFP_KERNEL);
- if (!fb_helper->crtc_info[i].mode_set.connectors) {
- ret = -ENOMEM;
+ if (!fb_helper->crtc_info[i].mode_set.connectors)
goto out_free;
- }
fb_helper->crtc_info[i].mode_set.num_connectors = 0;
}
@@ -1083,7 +1083,7 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
/* try and find a 1024x768 mode on each connector */
can_clone = true;
- dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
+ dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false);
for (i = 0; i < fb_helper->connector_count; i++) {
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 83114b5e3cee..d58e69da1fb5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -201,6 +201,19 @@ free:
}
EXPORT_SYMBOL(drm_gem_object_alloc);
+static void
+drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
+{
+ if (obj->import_attach) {
+ drm_prime_remove_imported_buf_handle(&filp->prime,
+ obj->import_attach->dmabuf);
+ }
+ if (obj->export_dma_buf) {
+ drm_prime_remove_imported_buf_handle(&filp->prime,
+ obj->export_dma_buf);
+ }
+}
+
/**
* Removes the mapping from handle to filp for this object.
*/
@@ -233,9 +246,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
- if (obj->import_attach)
- drm_prime_remove_imported_buf_handle(&filp->prime,
- obj->import_attach->dmabuf);
+ drm_gem_remove_prime_handles(obj, filp);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, filp);
@@ -272,8 +283,7 @@ again:
spin_unlock(&file_priv->table_lock);
if (ret == -EAGAIN)
goto again;
-
- if (ret != 0)
+ else if (ret)
return ret;
drm_gem_object_handle_reference(obj);
@@ -329,7 +339,7 @@ drm_gem_create_mmap_offset(struct drm_gem_object *obj)
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map_list *list;
struct drm_local_map *map;
- int ret = 0;
+ int ret;
/* Set the object up for mmap'ing */
list = &obj->map_list;
@@ -456,8 +466,7 @@ again:
if (ret == -EAGAIN)
goto again;
-
- if (ret != 0)
+ else if (ret)
goto err;
/* Allocate a reference for the name table. */
@@ -532,9 +541,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
- if (obj->import_attach)
- drm_prime_remove_imported_buf_handle(&file_priv->prime,
- obj->import_attach->dmabuf);
+ drm_gem_remove_prime_handles(obj, file_priv);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
@@ -628,7 +635,7 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
drm_gem_object_reference(obj);
mutex_lock(&obj->dev->struct_mutex);
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(obj->dev, vma);
mutex_unlock(&obj->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_open);
@@ -639,7 +646,7 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
struct drm_device *dev = obj->dev;
mutex_lock(&dev->struct_mutex);
- drm_vm_close_locked(vma);
+ drm_vm_close_locked(obj->dev, vma);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
}
@@ -712,7 +719,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
*/
drm_gem_object_reference(obj);
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
out_unlock:
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index cf85155da2a0..64a62c697313 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -283,6 +283,10 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CAP_DUMB_PREFER_SHADOW:
req->value = dev->mode_config.prefer_shadow;
break;
+ case DRM_CAP_PRIME:
+ req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
+ req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index c869436e238a..c798eeae0a03 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -189,7 +189,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
if (dev->num_crtcs == 0)
return;
- del_timer(&dev->vblank_disable_timer);
+ del_timer_sync(&dev->vblank_disable_timer);
vblank_disable_fn((unsigned long)dev);
@@ -310,7 +310,7 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state)
*/
int drm_irq_install(struct drm_device *dev)
{
- int ret = 0;
+ int ret;
unsigned long sh_flags = 0;
char *irqname;
@@ -731,7 +731,7 @@ EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
struct timeval *tvblank, unsigned flags)
{
- int ret = 0;
+ int ret;
/* Define requested maximum error on timestamps (nanoseconds). */
int max_error = (int) drm_timestamp_precision * 1000;
@@ -1031,18 +1031,15 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
- int ret = 0;
unsigned int crtc;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
- goto out;
+ return 0;
crtc = modeset->crtc;
- if (crtc >= dev->num_crtcs) {
- ret = -EINVAL;
- goto out;
- }
+ if (crtc >= dev->num_crtcs)
+ return -EINVAL;
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
@@ -1052,12 +1049,10 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
drm_vblank_post_modeset(dev, crtc);
break;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-out:
- return ret;
+ return 0;
}
static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
@@ -1154,7 +1149,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
union drm_wait_vblank *vblwait = data;
- int ret = 0;
+ int ret;
unsigned int flags, seq, crtc, high_crtc;
if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index c79c713eeba0..521152041691 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -331,7 +331,7 @@ static int drm_notifier(void *priv)
void drm_idlelock_take(struct drm_lock_data *lock_data)
{
- int ret = 0;
+ int ret;
spin_lock_bh(&lock_data->spinlock);
lock_data->kernel_waiters++;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 1bdf2b54eaf6..f546ff98a114 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -68,6 +68,7 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
{
struct drm_gem_object *obj;
void *buf;
+ int ret;
obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj)
@@ -100,6 +101,17 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
obj->export_dma_buf = buf;
*prime_fd = dma_buf_fd(buf, flags);
}
+ /* if we've exported this buffer the cheat and add it to the import list
+ * so we get the correct handle back
+ */
+ ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
+ obj->export_dma_buf, handle);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ mutex_unlock(&file_priv->prime.lock);
+ return ret;
+ }
+
mutex_unlock(&file_priv->prime.lock);
return 0;
}
@@ -227,6 +239,42 @@ out:
}
EXPORT_SYMBOL(drm_prime_pages_to_sg);
+/* export an sg table into an array of pages and addresses
+ this is currently required by the TTM driver in order to do correct fault
+ handling */
+int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
+ dma_addr_t *addrs, int max_pages)
+{
+ unsigned count;
+ struct scatterlist *sg;
+ struct page *page;
+ u32 len, offset;
+ int pg_index;
+ dma_addr_t addr;
+
+ pg_index = 0;
+ for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+ len = sg->length;
+ offset = sg->offset;
+ page = sg_page(sg);
+ addr = sg_dma_address(sg);
+
+ while (len > 0) {
+ if (WARN_ON(pg_index >= max_pages))
+ return -1;
+ pages[pg_index] = page;
+ if (addrs)
+ addrs[pg_index] = addr;
+
+ page++;
+ addr += PAGE_SIZE;
+ len -= PAGE_SIZE;
+ pg_index++;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
/* helper function to cleanup a GEM/prime object */
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
{
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index aa454f80e109..21bcd4a555d8 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -122,11 +122,10 @@ again:
ret = idr_get_new_above(&drm_minors_idr, NULL,
base, &new_id);
mutex_unlock(&dev->struct_mutex);
- if (ret == -EAGAIN) {
+ if (ret == -EAGAIN)
goto again;
- } else if (ret) {
+ else if (ret)
return ret;
- }
if (new_id >= limit) {
idr_remove(&drm_minors_idr, new_id);
@@ -211,7 +210,7 @@ EXPORT_SYMBOL(drm_master_put);
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- int ret = 0;
+ int ret;
if (file_priv->is_master)
return 0;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 5a7bd51fc3d8..45cf1dd3eb9c 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -347,17 +347,17 @@ static struct bin_attribute edid_attr = {
};
/**
- * drm_sysfs_connector_add - add an connector to sysfs
+ * drm_sysfs_connector_add - add a connector to sysfs
* @connector: connector to add
*
- * Create an connector device in sysfs, along with its associated connector
+ * Create a connector device in sysfs, along with its associated connector
* properties (so far, connection status, dpms, mode list & edid) and
* generate a hotplug event so userspace knows there's a new connector
* available.
*
* Note:
- * This routine should only be called *once* for each DRM minor registered.
- * A second call for an already registered device will trigger the BUG_ON
+ * This routine should only be called *once* for each registered connector.
+ * A second call for an already registered connector will trigger the BUG_ON
* below.
*/
int drm_sysfs_connector_add(struct drm_connector *connector)
@@ -366,7 +366,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
int attr_cnt = 0;
int opt_cnt = 0;
int i;
- int ret = 0;
+ int ret;
/* We shouldn't get called more than once for the same connector */
BUG_ON(device_is_registered(&connector->kdev));
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 149561818349..961ee08927fe 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -406,10 +406,9 @@ static const struct vm_operations_struct drm_vm_sg_ops = {
* Create a new drm_vma_entry structure as the \p vma private data entry and
* add it to drm_device::vmalist.
*/
-void drm_vm_open_locked(struct vm_area_struct *vma)
+void drm_vm_open_locked(struct drm_device *dev,
+ struct vm_area_struct *vma)
{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *vma_entry;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -430,14 +429,13 @@ static void drm_vm_open(struct vm_area_struct *vma)
struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
mutex_unlock(&dev->struct_mutex);
}
-void drm_vm_close_locked(struct vm_area_struct *vma)
+void drm_vm_close_locked(struct drm_device *dev,
+ struct vm_area_struct *vma)
{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *pt, *temp;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -467,7 +465,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
- drm_vm_close_locked(vma);
+ drm_vm_close_locked(dev, vma);
mutex_unlock(&dev->struct_mutex);
}
@@ -519,7 +517,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_flags |= VM_DONTEXPAND;
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
return 0;
}
@@ -670,7 +668,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_flags |= VM_DONTEXPAND;
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 3343ac437fe5..7f5096763b7d 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,6 +10,12 @@ config DRM_EXYNOS
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
+config DRM_EXYNOS_DMABUF
+ bool "EXYNOS DRM DMABUF"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use DMABUF feature for DRM.
+
config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
depends on DRM_EXYNOS && !FB_S3C
@@ -27,3 +33,9 @@ config DRM_EXYNOS_VIDI
depends on DRM_EXYNOS
help
Choose this option if you want to use Exynos VIDI for DRM.
+
+config DRM_EXYNOS_G2D
+ bool "Exynos DRM G2D"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 9e0bff8badf9..eb651ca8e2a8 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -8,10 +8,12 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
exynos_ddc.o exynos_hdmiphy.o \
exynos_drm_hdmi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index de8d2090bce3..b3cb0a69fbf2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -35,7 +35,7 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf)
{
dma_addr_t start_addr;
- unsigned int npages, page_size, i = 0;
+ unsigned int npages, i = 0;
struct scatterlist *sgl;
int ret = 0;
@@ -53,13 +53,13 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
if (buf->size >= SZ_1M) {
npages = buf->size >> SECTION_SHIFT;
- page_size = SECTION_SIZE;
+ buf->page_size = SECTION_SIZE;
} else if (buf->size >= SZ_64K) {
npages = buf->size >> 16;
- page_size = SZ_64K;
+ buf->page_size = SZ_64K;
} else {
npages = buf->size >> PAGE_SHIFT;
- page_size = PAGE_SIZE;
+ buf->page_size = PAGE_SIZE;
}
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
@@ -96,9 +96,9 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
while (i < npages) {
buf->pages[i] = phys_to_page(start_addr);
- sg_set_page(sgl, buf->pages[i], page_size, 0);
+ sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
sg_dma_address(sgl) = start_addr;
- start_addr += page_size;
+ start_addr += buf->page_size;
sgl = sg_next(sgl);
i++;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 3486ffed0bf0..4afb625128d7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -105,6 +105,8 @@ int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
overlay->fb_y = pos->fb_y;
overlay->fb_width = fb->width;
overlay->fb_height = fb->height;
+ overlay->src_width = pos->src_w;
+ overlay->src_height = pos->src_h;
overlay->bpp = fb->bits_per_pixel;
overlay->pitch = fb->pitches[0];
overlay->pixel_format = fb->pixel_format;
@@ -153,6 +155,8 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
pos.crtc_y = 0;
pos.crtc_w = fb->width - crtc->x;
pos.crtc_h = fb->height - crtc->y;
+ pos.src_w = pos.crtc_w;
+ pos.src_h = pos.crtc_h;
return exynos_drm_overlay_update(overlay, crtc->fb, mode, &pos);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 25f72a62cb88..16b8e2195a0d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -42,6 +42,8 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
* - the unit is screen coordinates.
* @fb_y: offset y on a framebuffer to be displayed
* - the unit is screen coordinates.
+ * @src_w: width of source area to be displayed from a framebuffer.
+ * @src_h: height of source area to be displayed from a framebuffer.
* @crtc_x: offset x on hardware screen.
* @crtc_y: offset y on hardware screen.
* @crtc_w: width of hardware screen.
@@ -50,6 +52,8 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
struct exynos_drm_crtc_pos {
unsigned int fb_x;
unsigned int fb_y;
+ unsigned int src_w;
+ unsigned int src_h;
unsigned int crtc_x;
unsigned int crtc_y;
unsigned int crtc_w;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
new file mode 100644
index 000000000000..274909271c36
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -0,0 +1,272 @@
+/* exynos_drm_dmabuf.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+
+#include <linux/dma-buf.h>
+
+static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
+ unsigned int page_size)
+{
+ struct sg_table *sgt = NULL;
+ struct scatterlist *sgl;
+ int i, ret;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ goto out;
+
+ ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
+ if (ret)
+ goto err_free_sgt;
+
+ if (page_size < PAGE_SIZE)
+ page_size = PAGE_SIZE;
+
+ for_each_sg(sgt->sgl, sgl, nr_pages, i)
+ sg_set_page(sgl, pages[i], page_size, 0);
+
+ return sgt;
+
+err_free_sgt:
+ kfree(sgt);
+ sgt = NULL;
+out:
+ return NULL;
+}
+
+static struct sg_table *
+ exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
+ struct drm_device *dev = gem_obj->base.dev;
+ struct exynos_drm_gem_buf *buf;
+ struct sg_table *sgt = NULL;
+ unsigned int npages;
+ int nents;
+
+ DRM_DEBUG_PRIME("%s\n", __FILE__);
+
+ mutex_lock(&dev->struct_mutex);
+
+ buf = gem_obj->buffer;
+
+ /* there should always be pages allocated. */
+ if (!buf->pages) {
+ DRM_ERROR("pages is null.\n");
+ goto err_unlock;
+ }
+
+ npages = buf->size / buf->page_size;
+
+ sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
+ nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
+ DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
+ npages, buf->size, buf->page_size);
+
+err_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return sgt;
+}
+
+static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ sg_free_table(sgt);
+ kfree(sgt);
+ sgt = NULL;
+}
+
+static void exynos_dmabuf_release(struct dma_buf *dmabuf)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
+
+ DRM_DEBUG_PRIME("%s\n", __FILE__);
+
+ /*
+ * exynos_dmabuf_release() call means that file object's
+ * f_count is 0 and it calls drm_gem_object_handle_unreference()
+ * to drop the references that these values had been increased
+ * at drm_prime_handle_to_fd()
+ */
+ if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
+ exynos_gem_obj->base.export_dma_buf = NULL;
+
+ /*
+ * drop this gem object refcount to release allocated buffer
+ * and resources.
+ */
+ drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
+ }
+}
+
+static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ /* TODO */
+
+ return NULL;
+}
+
+static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num,
+ void *addr)
+{
+ /* TODO */
+}
+
+static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ /* TODO */
+
+ return NULL;
+}
+
+static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+ /* TODO */
+}
+
+static struct dma_buf_ops exynos_dmabuf_ops = {
+ .map_dma_buf = exynos_gem_map_dma_buf,
+ .unmap_dma_buf = exynos_gem_unmap_dma_buf,
+ .kmap = exynos_gem_dmabuf_kmap,
+ .kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
+ .kunmap = exynos_gem_dmabuf_kunmap,
+ .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
+ .release = exynos_dmabuf_release,
+};
+
+struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
+ struct drm_gem_object *obj, int flags)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
+ exynos_gem_obj->base.size, 0600);
+}
+
+struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct scatterlist *sgl;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_buf *buffer;
+ struct page *page;
+ int ret, i = 0;
+
+ DRM_DEBUG_PRIME("%s\n", __FILE__);
+
+ /* is this one of own objects? */
+ if (dma_buf->ops == &exynos_dmabuf_ops) {
+ struct drm_gem_object *obj;
+
+ exynos_gem_obj = dma_buf->priv;
+ obj = &exynos_gem_obj->base;
+
+ /* is it from our device? */
+ if (obj->dev == drm_dev) {
+ drm_gem_object_reference(obj);
+ return obj;
+ }
+ }
+
+ attach = dma_buf_attach(dma_buf, drm_dev->dev);
+ if (IS_ERR(attach))
+ return ERR_PTR(-EINVAL);
+
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_buf_detach;
+ }
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
+ ret = -ENOMEM;
+ goto err_unmap_attach;
+ }
+
+ buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
+ if (!buffer->pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ ret = -ENOMEM;
+ goto err_free_buffer;
+ }
+
+ exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
+ if (!exynos_gem_obj) {
+ ret = -ENOMEM;
+ goto err_free_pages;
+ }
+
+ sgl = sgt->sgl;
+ buffer->dma_addr = sg_dma_address(sgl);
+
+ while (i < sgt->nents) {
+ buffer->pages[i] = sg_page(sgl);
+ buffer->size += sg_dma_len(sgl);
+ sgl = sg_next(sgl);
+ i++;
+ }
+
+ exynos_gem_obj->buffer = buffer;
+ buffer->sgt = sgt;
+ exynos_gem_obj->base.import_attach = attach;
+
+ DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
+ buffer->size);
+
+ return &exynos_gem_obj->base;
+
+err_free_pages:
+ kfree(buffer->pages);
+ buffer->pages = NULL;
+err_free_buffer:
+ kfree(buffer);
+ buffer = NULL;
+err_unmap_attach:
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+err_buf_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
new file mode 100644
index 000000000000..662a8f98ccdb
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
@@ -0,0 +1,39 @@
+/* exynos_drm_dmabuf.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_DMABUF_H_
+#define _EXYNOS_DRM_DMABUF_H_
+
+#ifdef CONFIG_DRM_EXYNOS_DMABUF
+struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
+ struct drm_gem_object *obj, int flags);
+
+struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
+ struct dma_buf *dma_buf);
+#else
+#define exynos_dmabuf_prime_export NULL
+#define exynos_dmabuf_prime_import NULL
+#endif
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index a6819b5f8428..420953197d0a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -39,6 +39,8 @@
#include "exynos_drm_gem.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_vidi.h"
+#include "exynos_drm_dmabuf.h"
+#include "exynos_drm_g2d.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
@@ -147,8 +149,17 @@ static int exynos_drm_unload(struct drm_device *dev)
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
+ struct drm_exynos_file_private *file_priv;
+
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+
+ drm_prime_init_file_private(&file->prime);
+ file->driver_priv = file_priv;
+
return exynos_drm_subdrv_open(dev, file);
}
@@ -170,6 +181,7 @@ static void exynos_drm_preclose(struct drm_device *dev,
e->base.destroy(&e->base);
}
}
+ drm_prime_destroy_file_private(&file->prime);
spin_unlock_irqrestore(&dev->event_lock, flags);
exynos_drm_subdrv_close(dev, file);
@@ -193,7 +205,7 @@ static void exynos_drm_lastclose(struct drm_device *dev)
exynos_drm_fbdev_restore_mode(dev);
}
-static struct vm_operations_struct exynos_drm_gem_vm_ops = {
+static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
.fault = exynos_drm_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
@@ -207,10 +219,18 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
+ exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl,
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER,
+ exynos_g2d_get_ver_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST,
+ exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
+ exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
};
static const struct file_operations exynos_drm_driver_fops = {
@@ -225,7 +245,7 @@ static const struct file_operations exynos_drm_driver_fops = {
static struct drm_driver exynos_drm_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM |
- DRIVER_MODESET | DRIVER_GEM,
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = exynos_drm_load,
.unload = exynos_drm_unload,
.open = exynos_drm_open,
@@ -241,6 +261,10 @@ static struct drm_driver exynos_drm_driver = {
.dumb_create = exynos_drm_gem_dumb_create,
.dumb_map_offset = exynos_drm_gem_dumb_map_offset,
.dumb_destroy = exynos_drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = exynos_dmabuf_prime_export,
+ .gem_prime_import = exynos_dmabuf_prime_import,
.ioctls = exynos_ioctls,
.fops = &exynos_drm_driver_fops,
.name = DRIVER_NAME,
@@ -307,6 +331,12 @@ static int __init exynos_drm_init(void)
goto out_vidi;
#endif
+#ifdef CONFIG_DRM_EXYNOS_G2D
+ ret = platform_driver_register(&g2d_driver);
+ if (ret < 0)
+ goto out_g2d;
+#endif
+
ret = platform_driver_register(&exynos_drm_platform_driver);
if (ret < 0)
goto out;
@@ -314,6 +344,11 @@ static int __init exynos_drm_init(void)
return 0;
out:
+#ifdef CONFIG_DRM_EXYNOS_G2D
+ platform_driver_unregister(&g2d_driver);
+out_g2d:
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_VIDI
out_vidi:
platform_driver_unregister(&vidi_driver);
@@ -341,6 +376,10 @@ static void __exit exynos_drm_exit(void)
platform_driver_unregister(&exynos_drm_platform_driver);
+#ifdef CONFIG_DRM_EXYNOS_G2D
+ platform_driver_unregister(&g2d_driver);
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_HDMI
platform_driver_unregister(&exynos_drm_common_hdmi_driver);
platform_driver_unregister(&mixer_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 1d814175cd49..c82c90c443e7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -77,6 +77,8 @@ struct exynos_drm_overlay_ops {
* - the unit is screen coordinates.
* @fb_width: width of a framebuffer.
* @fb_height: height of a framebuffer.
+ * @src_width: width of a partial image to be displayed from framebuffer.
+ * @src_height: height of a partial image to be displayed from framebuffer.
* @crtc_x: offset x on hardware screen.
* @crtc_y: offset y on hardware screen.
* @crtc_width: window width to be displayed (hardware screen).
@@ -108,6 +110,8 @@ struct exynos_drm_overlay {
unsigned int fb_y;
unsigned int fb_width;
unsigned int fb_height;
+ unsigned int src_width;
+ unsigned int src_height;
unsigned int crtc_x;
unsigned int crtc_y;
unsigned int crtc_width;
@@ -205,6 +209,18 @@ struct exynos_drm_manager {
struct exynos_drm_display_ops *display_ops;
};
+struct exynos_drm_g2d_private {
+ struct device *dev;
+ struct list_head inuse_cmdlist;
+ struct list_head event_list;
+ struct list_head gem_list;
+ unsigned int gem_nr;
+};
+
+struct drm_exynos_file_private {
+ struct exynos_drm_g2d_private *g2d_priv;
+};
+
/*
* Exynos drm private structure.
*/
@@ -287,4 +303,5 @@ extern struct platform_driver hdmi_driver;
extern struct platform_driver mixer_driver;
extern struct platform_driver exynos_drm_common_hdmi_driver;
extern struct platform_driver vidi_driver;
+extern struct platform_driver g2d_driver;
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index c38c8f468fa3..f82a299553fb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -191,7 +191,7 @@ static void exynos_drm_output_poll_changed(struct drm_device *dev)
drm_fb_helper_hotplug_event(fb_helper);
}
-static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
+static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
.output_poll_changed = exynos_drm_output_poll_changed,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
new file mode 100644
index 000000000000..d2d88f22a037
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -0,0 +1,937 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "drmP.h"
+#include "exynos_drm.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+
+#define G2D_HW_MAJOR_VER 4
+#define G2D_HW_MINOR_VER 1
+
+/* vaild register range set from user: 0x0104 ~ 0x0880 */
+#define G2D_VALID_START 0x0104
+#define G2D_VALID_END 0x0880
+
+/* general registers */
+#define G2D_SOFT_RESET 0x0000
+#define G2D_INTEN 0x0004
+#define G2D_INTC_PEND 0x000C
+#define G2D_DMA_SFR_BASE_ADDR 0x0080
+#define G2D_DMA_COMMAND 0x0084
+#define G2D_DMA_STATUS 0x008C
+#define G2D_DMA_HOLD_CMD 0x0090
+
+/* command registers */
+#define G2D_BITBLT_START 0x0100
+
+/* registers for base address */
+#define G2D_SRC_BASE_ADDR 0x0304
+#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
+#define G2D_DST_BASE_ADDR 0x0404
+#define G2D_DST_PLANE2_BASE_ADDR 0x0418
+#define G2D_PAT_BASE_ADDR 0x0500
+#define G2D_MSK_BASE_ADDR 0x0520
+
+/* G2D_SOFT_RESET */
+#define G2D_SFRCLEAR (1 << 1)
+#define G2D_R (1 << 0)
+
+/* G2D_INTEN */
+#define G2D_INTEN_ACF (1 << 3)
+#define G2D_INTEN_UCF (1 << 2)
+#define G2D_INTEN_GCF (1 << 1)
+#define G2D_INTEN_SCF (1 << 0)
+
+/* G2D_INTC_PEND */
+#define G2D_INTP_ACMD_FIN (1 << 3)
+#define G2D_INTP_UCMD_FIN (1 << 2)
+#define G2D_INTP_GCMD_FIN (1 << 1)
+#define G2D_INTP_SCMD_FIN (1 << 0)
+
+/* G2D_DMA_COMMAND */
+#define G2D_DMA_HALT (1 << 2)
+#define G2D_DMA_CONTINUE (1 << 1)
+#define G2D_DMA_START (1 << 0)
+
+/* G2D_DMA_STATUS */
+#define G2D_DMA_LIST_DONE_COUNT (0xFF << 17)
+#define G2D_DMA_BITBLT_DONE_COUNT (0xFFFF << 1)
+#define G2D_DMA_DONE (1 << 0)
+#define G2D_DMA_LIST_DONE_COUNT_OFFSET 17
+
+/* G2D_DMA_HOLD_CMD */
+#define G2D_USET_HOLD (1 << 2)
+#define G2D_LIST_HOLD (1 << 1)
+#define G2D_BITBLT_HOLD (1 << 0)
+
+/* G2D_BITBLT_START */
+#define G2D_START_CASESEL (1 << 2)
+#define G2D_START_NHOLT (1 << 1)
+#define G2D_START_BITBLT (1 << 0)
+
+#define G2D_CMDLIST_SIZE (PAGE_SIZE / 4)
+#define G2D_CMDLIST_NUM 64
+#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
+#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
+
+/* cmdlist data structure */
+struct g2d_cmdlist {
+ u32 head;
+ u32 data[G2D_CMDLIST_DATA_NUM];
+ u32 last; /* last data offset */
+};
+
+struct drm_exynos_pending_g2d_event {
+ struct drm_pending_event base;
+ struct drm_exynos_g2d_event event;
+};
+
+struct g2d_gem_node {
+ struct list_head list;
+ unsigned int handle;
+};
+
+struct g2d_cmdlist_node {
+ struct list_head list;
+ struct g2d_cmdlist *cmdlist;
+ unsigned int gem_nr;
+ dma_addr_t dma_addr;
+
+ struct drm_exynos_pending_g2d_event *event;
+};
+
+struct g2d_runqueue_node {
+ struct list_head list;
+ struct list_head run_cmdlist;
+ struct list_head event_list;
+ struct completion complete;
+ int async;
+};
+
+struct g2d_data {
+ struct device *dev;
+ struct clk *gate_clk;
+ struct resource *regs_res;
+ void __iomem *regs;
+ int irq;
+ struct workqueue_struct *g2d_workq;
+ struct work_struct runqueue_work;
+ struct exynos_drm_subdrv subdrv;
+ bool suspended;
+
+ /* cmdlist */
+ struct g2d_cmdlist_node *cmdlist_node;
+ struct list_head free_cmdlist;
+ struct mutex cmdlist_mutex;
+ dma_addr_t cmdlist_pool;
+ void *cmdlist_pool_virt;
+
+ /* runqueue*/
+ struct g2d_runqueue_node *runqueue_node;
+ struct list_head runqueue;
+ struct mutex runqueue_mutex;
+ struct kmem_cache *runqueue_slab;
+};
+
+static int g2d_init_cmdlist(struct g2d_data *g2d)
+{
+ struct device *dev = g2d->dev;
+ struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+ int nr;
+ int ret;
+
+ g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE,
+ &g2d->cmdlist_pool, GFP_KERNEL);
+ if (!g2d->cmdlist_pool_virt) {
+ dev_err(dev, "failed to allocate dma memory\n");
+ return -ENOMEM;
+ }
+
+ node = kcalloc(G2D_CMDLIST_NUM, G2D_CMDLIST_NUM * sizeof(*node),
+ GFP_KERNEL);
+ if (!node) {
+ dev_err(dev, "failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
+ node[nr].cmdlist =
+ g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
+ node[nr].dma_addr =
+ g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
+
+ list_add_tail(&node[nr].list, &g2d->free_cmdlist);
+ }
+
+ return 0;
+
+err:
+ dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool);
+ return ret;
+}
+
+static void g2d_fini_cmdlist(struct g2d_data *g2d)
+{
+ struct device *dev = g2d->dev;
+
+ kfree(g2d->cmdlist_node);
+ dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool);
+}
+
+static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
+{
+ struct device *dev = g2d->dev;
+ struct g2d_cmdlist_node *node;
+
+ mutex_lock(&g2d->cmdlist_mutex);
+ if (list_empty(&g2d->free_cmdlist)) {
+ dev_err(dev, "there is no free cmdlist\n");
+ mutex_unlock(&g2d->cmdlist_mutex);
+ return NULL;
+ }
+
+ node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node,
+ list);
+ list_del_init(&node->list);
+ mutex_unlock(&g2d->cmdlist_mutex);
+
+ return node;
+}
+
+static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
+{
+ mutex_lock(&g2d->cmdlist_mutex);
+ list_move_tail(&node->list, &g2d->free_cmdlist);
+ mutex_unlock(&g2d->cmdlist_mutex);
+}
+
+static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
+ struct g2d_cmdlist_node *node)
+{
+ struct g2d_cmdlist_node *lnode;
+
+ if (list_empty(&g2d_priv->inuse_cmdlist))
+ goto add_to_list;
+
+ /* this links to base address of new cmdlist */
+ lnode = list_entry(g2d_priv->inuse_cmdlist.prev,
+ struct g2d_cmdlist_node, list);
+ lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
+
+add_to_list:
+ list_add_tail(&node->list, &g2d_priv->inuse_cmdlist);
+
+ if (node->event)
+ list_add_tail(&node->event->base.link, &g2d_priv->event_list);
+}
+
+static int g2d_get_cmdlist_gem(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct g2d_cmdlist_node *node)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_cmdlist *cmdlist = node->cmdlist;
+ dma_addr_t *addr;
+ int offset;
+ int i;
+
+ for (i = 0; i < node->gem_nr; i++) {
+ struct g2d_gem_node *gem_node;
+
+ gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
+ if (!gem_node) {
+ dev_err(g2d_priv->dev, "failed to allocate gem node\n");
+ return -ENOMEM;
+ }
+
+ offset = cmdlist->last - (i * 2 + 1);
+ gem_node->handle = cmdlist->data[offset];
+
+ addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle,
+ file);
+ if (IS_ERR(addr)) {
+ node->gem_nr = i;
+ kfree(gem_node);
+ return PTR_ERR(addr);
+ }
+
+ cmdlist->data[offset] = *addr;
+ list_add_tail(&gem_node->list, &g2d_priv->gem_list);
+ g2d_priv->gem_nr++;
+ }
+
+ return 0;
+}
+
+static void g2d_put_cmdlist_gem(struct drm_device *drm_dev,
+ struct drm_file *file,
+ unsigned int nr)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_gem_node *node, *n;
+
+ list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) {
+ if (!nr)
+ break;
+
+ exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file);
+ list_del_init(&node->list);
+ kfree(node);
+ nr--;
+ }
+}
+
+static void g2d_dma_start(struct g2d_data *g2d,
+ struct g2d_runqueue_node *runqueue_node)
+{
+ struct g2d_cmdlist_node *node =
+ list_first_entry(&runqueue_node->run_cmdlist,
+ struct g2d_cmdlist_node, list);
+
+ pm_runtime_get_sync(g2d->dev);
+ clk_enable(g2d->gate_clk);
+
+ /* interrupt enable */
+ writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF,
+ g2d->regs + G2D_INTEN);
+
+ writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
+ writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
+}
+
+static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
+{
+ struct g2d_runqueue_node *runqueue_node;
+
+ if (list_empty(&g2d->runqueue))
+ return NULL;
+
+ runqueue_node = list_first_entry(&g2d->runqueue,
+ struct g2d_runqueue_node, list);
+ list_del_init(&runqueue_node->list);
+ return runqueue_node;
+}
+
+static void g2d_free_runqueue_node(struct g2d_data *g2d,
+ struct g2d_runqueue_node *runqueue_node)
+{
+ if (!runqueue_node)
+ return;
+
+ mutex_lock(&g2d->cmdlist_mutex);
+ list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
+ mutex_unlock(&g2d->cmdlist_mutex);
+
+ kmem_cache_free(g2d->runqueue_slab, runqueue_node);
+}
+
+static void g2d_exec_runqueue(struct g2d_data *g2d)
+{
+ g2d->runqueue_node = g2d_get_runqueue_node(g2d);
+ if (g2d->runqueue_node)
+ g2d_dma_start(g2d, g2d->runqueue_node);
+}
+
+static void g2d_runqueue_worker(struct work_struct *work)
+{
+ struct g2d_data *g2d = container_of(work, struct g2d_data,
+ runqueue_work);
+
+
+ mutex_lock(&g2d->runqueue_mutex);
+ clk_disable(g2d->gate_clk);
+ pm_runtime_put_sync(g2d->dev);
+
+ complete(&g2d->runqueue_node->complete);
+ if (g2d->runqueue_node->async)
+ g2d_free_runqueue_node(g2d, g2d->runqueue_node);
+
+ if (g2d->suspended)
+ g2d->runqueue_node = NULL;
+ else
+ g2d_exec_runqueue(g2d);
+ mutex_unlock(&g2d->runqueue_mutex);
+}
+
+static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
+{
+ struct drm_device *drm_dev = g2d->subdrv.drm_dev;
+ struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
+ struct drm_exynos_pending_g2d_event *e;
+ struct timeval now;
+ unsigned long flags;
+
+ if (list_empty(&runqueue_node->event_list))
+ return;
+
+ e = list_first_entry(&runqueue_node->event_list,
+ struct drm_exynos_pending_g2d_event, base.link);
+
+ do_gettimeofday(&now);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ e->event.cmdlist_no = cmdlist_no;
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+}
+
+static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
+{
+ struct g2d_data *g2d = dev_id;
+ u32 pending;
+
+ pending = readl_relaxed(g2d->regs + G2D_INTC_PEND);
+ if (pending)
+ writel_relaxed(pending, g2d->regs + G2D_INTC_PEND);
+
+ if (pending & G2D_INTP_GCMD_FIN) {
+ u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS);
+
+ cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >>
+ G2D_DMA_LIST_DONE_COUNT_OFFSET;
+
+ g2d_finish_event(g2d, cmdlist_no);
+
+ writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD);
+ if (!(pending & G2D_INTP_ACMD_FIN)) {
+ writel_relaxed(G2D_DMA_CONTINUE,
+ g2d->regs + G2D_DMA_COMMAND);
+ }
+ }
+
+ if (pending & G2D_INTP_ACMD_FIN)
+ queue_work(g2d->g2d_workq, &g2d->runqueue_work);
+
+ return IRQ_HANDLED;
+}
+
+static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
+ int nr, bool for_addr)
+{
+ int reg_offset;
+ int index;
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ index = cmdlist->last - 2 * (i + 1);
+ reg_offset = cmdlist->data[index] & ~0xfffff000;
+
+ if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
+ goto err;
+ if (reg_offset % 4)
+ goto err;
+
+ switch (reg_offset) {
+ case G2D_SRC_BASE_ADDR:
+ case G2D_SRC_PLANE2_BASE_ADDR:
+ case G2D_DST_BASE_ADDR:
+ case G2D_DST_PLANE2_BASE_ADDR:
+ case G2D_PAT_BASE_ADDR:
+ case G2D_MSK_BASE_ADDR:
+ if (!for_addr)
+ goto err;
+ break;
+ default:
+ if (for_addr)
+ goto err;
+ break;
+ }
+ }
+
+ return 0;
+
+err:
+ dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
+ return -EINVAL;
+}
+
+/* ioctl functions */
+int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_g2d_get_ver *ver = data;
+
+ ver->major = G2D_HW_MAJOR_VER;
+ ver->minor = G2D_HW_MINOR_VER;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
+
+int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct device *dev = g2d_priv->dev;
+ struct g2d_data *g2d;
+ struct drm_exynos_g2d_set_cmdlist *req = data;
+ struct drm_exynos_g2d_cmd *cmd;
+ struct drm_exynos_pending_g2d_event *e;
+ struct g2d_cmdlist_node *node;
+ struct g2d_cmdlist *cmdlist;
+ unsigned long flags;
+ int size;
+ int ret;
+
+ if (!dev)
+ return -ENODEV;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
+ node = g2d_get_cmdlist(g2d);
+ if (!node)
+ return -ENOMEM;
+
+ node->event = NULL;
+
+ if (req->event_type != G2D_EVENT_NOT) {
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ if (file->event_space < sizeof(e->event)) {
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ ret = -ENOMEM;
+ goto err;
+ }
+ file->event_space -= sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ e = kzalloc(sizeof(*node->event), GFP_KERNEL);
+ if (!e) {
+ dev_err(dev, "failed to allocate event\n");
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ file->event_space += sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ e->event.base.type = DRM_EXYNOS_G2D_EVENT;
+ e->event.base.length = sizeof(e->event);
+ e->event.user_data = req->user_data;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file;
+ e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+ node->event = e;
+ }
+
+ cmdlist = node->cmdlist;
+
+ cmdlist->last = 0;
+
+ /*
+ * If don't clear SFR registers, the cmdlist is affected by register
+ * values of previous cmdlist. G2D hw executes SFR clear command and
+ * a next command at the same time then the next command is ignored and
+ * is executed rightly from next next command, so needs a dummy command
+ * to next command of SFR clear command.
+ */
+ cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET;
+ cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR;
+ cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
+ cmdlist->data[cmdlist->last++] = 0;
+
+ if (node->event) {
+ cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
+ cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
+ }
+
+ /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
+ size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
+ if (size > G2D_CMDLIST_DATA_NUM) {
+ dev_err(dev, "cmdlist size is too big\n");
+ ret = -EINVAL;
+ goto err_free_event;
+ }
+
+ cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd;
+
+ if (copy_from_user(cmdlist->data + cmdlist->last,
+ (void __user *)cmd,
+ sizeof(*cmd) * req->cmd_nr)) {
+ ret = -EFAULT;
+ goto err_free_event;
+ }
+ cmdlist->last += req->cmd_nr * 2;
+
+ ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
+ if (ret < 0)
+ goto err_free_event;
+
+ node->gem_nr = req->cmd_gem_nr;
+ if (req->cmd_gem_nr) {
+ struct drm_exynos_g2d_cmd *cmd_gem;
+
+ cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
+
+ if (copy_from_user(cmdlist->data + cmdlist->last,
+ (void __user *)cmd_gem,
+ sizeof(*cmd_gem) * req->cmd_gem_nr)) {
+ ret = -EFAULT;
+ goto err_free_event;
+ }
+ cmdlist->last += req->cmd_gem_nr * 2;
+
+ ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
+ if (ret < 0)
+ goto err_free_event;
+
+ ret = g2d_get_cmdlist_gem(drm_dev, file, node);
+ if (ret < 0)
+ goto err_unmap;
+ }
+
+ cmdlist->data[cmdlist->last++] = G2D_BITBLT_START;
+ cmdlist->data[cmdlist->last++] = G2D_START_BITBLT;
+
+ /* head */
+ cmdlist->head = cmdlist->last / 2;
+
+ /* tail */
+ cmdlist->data[cmdlist->last] = 0;
+
+ g2d_add_cmdlist_to_inuse(g2d_priv, node);
+
+ return 0;
+
+err_unmap:
+ g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr);
+err_free_event:
+ if (node->event) {
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ file->event_space += sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ kfree(node->event);
+ }
+err:
+ g2d_put_cmdlist(g2d, node);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
+
+int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct device *dev = g2d_priv->dev;
+ struct g2d_data *g2d;
+ struct drm_exynos_g2d_exec *req = data;
+ struct g2d_runqueue_node *runqueue_node;
+ struct list_head *run_cmdlist;
+ struct list_head *event_list;
+
+ if (!dev)
+ return -ENODEV;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
+ runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
+ if (!runqueue_node) {
+ dev_err(dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+ run_cmdlist = &runqueue_node->run_cmdlist;
+ event_list = &runqueue_node->event_list;
+ INIT_LIST_HEAD(run_cmdlist);
+ INIT_LIST_HEAD(event_list);
+ init_completion(&runqueue_node->complete);
+ runqueue_node->async = req->async;
+
+ list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist);
+ list_splice_init(&g2d_priv->event_list, event_list);
+
+ if (list_empty(run_cmdlist)) {
+ dev_err(dev, "there is no inuse cmdlist\n");
+ kmem_cache_free(g2d->runqueue_slab, runqueue_node);
+ return -EPERM;
+ }
+
+ mutex_lock(&g2d->runqueue_mutex);
+ list_add_tail(&runqueue_node->list, &g2d->runqueue);
+ if (!g2d->runqueue_node)
+ g2d_exec_runqueue(g2d);
+ mutex_unlock(&g2d->runqueue_mutex);
+
+ if (runqueue_node->async)
+ goto out;
+
+ wait_for_completion(&runqueue_node->complete);
+ g2d_free_runqueue_node(g2d, runqueue_node);
+
+out:
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
+
+static int g2d_open(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv;
+
+ g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
+ if (!g2d_priv) {
+ dev_err(dev, "failed to allocate g2d private data\n");
+ return -ENOMEM;
+ }
+
+ g2d_priv->dev = dev;
+ file_priv->g2d_priv = g2d_priv;
+
+ INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
+ INIT_LIST_HEAD(&g2d_priv->event_list);
+ INIT_LIST_HEAD(&g2d_priv->gem_list);
+
+ return 0;
+}
+
+static void g2d_close(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_data *g2d;
+ struct g2d_cmdlist_node *node, *n;
+
+ if (!dev)
+ return;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return;
+
+ mutex_lock(&g2d->cmdlist_mutex);
+ list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
+ list_move_tail(&node->list, &g2d->free_cmdlist);
+ mutex_unlock(&g2d->cmdlist_mutex);
+
+ g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr);
+
+ kfree(file_priv->g2d_priv);
+}
+
+static int __devinit g2d_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct g2d_data *g2d;
+ struct exynos_drm_subdrv *subdrv;
+ int ret;
+
+ g2d = kzalloc(sizeof(*g2d), GFP_KERNEL);
+ if (!g2d) {
+ dev_err(dev, "failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
+ sizeof(struct g2d_runqueue_node), 0, 0, NULL);
+ if (!g2d->runqueue_slab) {
+ ret = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ g2d->dev = dev;
+
+ g2d->g2d_workq = create_singlethread_workqueue("g2d");
+ if (!g2d->g2d_workq) {
+ dev_err(dev, "failed to create workqueue\n");
+ ret = -EINVAL;
+ goto err_destroy_slab;
+ }
+
+ INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker);
+ INIT_LIST_HEAD(&g2d->free_cmdlist);
+ INIT_LIST_HEAD(&g2d->runqueue);
+
+ mutex_init(&g2d->cmdlist_mutex);
+ mutex_init(&g2d->runqueue_mutex);
+
+ ret = g2d_init_cmdlist(g2d);
+ if (ret < 0)
+ goto err_destroy_workqueue;
+
+ g2d->gate_clk = clk_get(dev, "fimg2d");
+ if (IS_ERR(g2d->gate_clk)) {
+ dev_err(dev, "failed to get gate clock\n");
+ ret = PTR_ERR(g2d->gate_clk);
+ goto err_fini_cmdlist;
+ }
+
+ pm_runtime_enable(dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get I/O memory\n");
+ ret = -ENOENT;
+ goto err_put_clk;
+ }
+
+ g2d->regs_res = request_mem_region(res->start, resource_size(res),
+ dev_name(dev));
+ if (!g2d->regs_res) {
+ dev_err(dev, "failed to request I/O memory\n");
+ ret = -ENOENT;
+ goto err_put_clk;
+ }
+
+ g2d->regs = ioremap(res->start, resource_size(res));
+ if (!g2d->regs) {
+ dev_err(dev, "failed to remap I/O memory\n");
+ ret = -ENXIO;
+ goto err_release_res;
+ }
+
+ g2d->irq = platform_get_irq(pdev, 0);
+ if (g2d->irq < 0) {
+ dev_err(dev, "failed to get irq\n");
+ ret = g2d->irq;
+ goto err_unmap_base;
+ }
+
+ ret = request_irq(g2d->irq, g2d_irq_handler, 0, "drm_g2d", g2d);
+ if (ret < 0) {
+ dev_err(dev, "irq request failed\n");
+ goto err_unmap_base;
+ }
+
+ platform_set_drvdata(pdev, g2d);
+
+ subdrv = &g2d->subdrv;
+ subdrv->dev = dev;
+ subdrv->open = g2d_open;
+ subdrv->close = g2d_close;
+
+ ret = exynos_drm_subdrv_register(subdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm g2d device\n");
+ goto err_free_irq;
+ }
+
+ dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n",
+ G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
+
+ return 0;
+
+err_free_irq:
+ free_irq(g2d->irq, g2d);
+err_unmap_base:
+ iounmap(g2d->regs);
+err_release_res:
+ release_resource(g2d->regs_res);
+ kfree(g2d->regs_res);
+err_put_clk:
+ pm_runtime_disable(dev);
+ clk_put(g2d->gate_clk);
+err_fini_cmdlist:
+ g2d_fini_cmdlist(g2d);
+err_destroy_workqueue:
+ destroy_workqueue(g2d->g2d_workq);
+err_destroy_slab:
+ kmem_cache_destroy(g2d->runqueue_slab);
+err_free_mem:
+ kfree(g2d);
+ return ret;
+}
+
+static int __devexit g2d_remove(struct platform_device *pdev)
+{
+ struct g2d_data *g2d = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&g2d->runqueue_work);
+ exynos_drm_subdrv_unregister(&g2d->subdrv);
+ free_irq(g2d->irq, g2d);
+
+ while (g2d->runqueue_node) {
+ g2d_free_runqueue_node(g2d, g2d->runqueue_node);
+ g2d->runqueue_node = g2d_get_runqueue_node(g2d);
+ }
+
+ iounmap(g2d->regs);
+ release_resource(g2d->regs_res);
+ kfree(g2d->regs_res);
+
+ pm_runtime_disable(&pdev->dev);
+ clk_put(g2d->gate_clk);
+
+ g2d_fini_cmdlist(g2d);
+ destroy_workqueue(g2d->g2d_workq);
+ kmem_cache_destroy(g2d->runqueue_slab);
+ kfree(g2d);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int g2d_suspend(struct device *dev)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+
+ mutex_lock(&g2d->runqueue_mutex);
+ g2d->suspended = true;
+ mutex_unlock(&g2d->runqueue_mutex);
+
+ while (g2d->runqueue_node)
+ /* FIXME: good range? */
+ usleep_range(500, 1000);
+
+ flush_work_sync(&g2d->runqueue_work);
+
+ return 0;
+}
+
+static int g2d_resume(struct device *dev)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+
+ g2d->suspended = false;
+ g2d_exec_runqueue(g2d);
+
+ return 0;
+}
+#endif
+
+SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
+
+struct platform_driver g2d_driver = {
+ .probe = g2d_probe,
+ .remove = __devexit_p(g2d_remove),
+ .driver = {
+ .name = "s5p-g2d",
+ .owner = THIS_MODULE,
+ .pm = &g2d_pm_ops,
+ },
+};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
new file mode 100644
index 000000000000..1a9c7ca8c15b
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#ifdef CONFIG_DRM_EXYNOS_G2D
+extern int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+#else
+static inline int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 1dffa8359f88..fc91293c4560 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -66,6 +66,22 @@ static int check_gem_flags(unsigned int flags)
return 0;
}
+static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
+ struct vm_area_struct *vma)
+{
+ DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
+
+ /* non-cachable as default. */
+ if (obj->flags & EXYNOS_BO_CACHABLE)
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ else if (obj->flags & EXYNOS_BO_WC)
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ else
+ vma->vm_page_prot =
+ pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+}
+
static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
{
if (!IS_NONCONTIG_BUFFER(flags)) {
@@ -80,7 +96,7 @@ out:
return roundup(size, PAGE_SIZE);
}
-static struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
+struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
{
struct inode *inode;
@@ -180,6 +196,7 @@ static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
}
npages = obj->size >> PAGE_SHIFT;
+ buf->page_size = PAGE_SIZE;
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!buf->sgt) {
@@ -262,24 +279,24 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
{
struct drm_gem_object *obj;
+ struct exynos_drm_gem_buf *buf;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (!exynos_gem_obj)
- return;
-
obj = &exynos_gem_obj->base;
+ buf = exynos_gem_obj->buffer;
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
- if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) &&
- exynos_gem_obj->buffer->pages)
+ if (!buf->pages)
+ return;
+
+ if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
exynos_drm_gem_put_pages(obj);
else
- exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags,
- exynos_gem_obj->buffer);
+ exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
- exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer);
+ exynos_drm_fini_buf(obj->dev, buf);
exynos_gem_obj->buffer = NULL;
if (obj->map_list.map)
@@ -292,7 +309,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
exynos_gem_obj = NULL;
}
-static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
@@ -493,8 +510,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
vma->vm_flags |= (VM_IO | VM_RESERVED);
- /* in case of direct mapping, always having non-cachable attribute */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ update_vm_cache_attr(exynos_gem_obj, vma);
vm_size = usize = vma->vm_end - vma->vm_start;
@@ -588,6 +604,32 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
return 0;
}
+int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_exynos_gem_info *args = data;
+ struct drm_gem_object *obj;
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ args->flags = exynos_gem_obj->flags;
+ args->size = exynos_gem_obj->size;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -597,8 +639,17 @@ int exynos_drm_gem_init_object(struct drm_gem_object *obj)
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_buf *buf;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+ buf = exynos_gem_obj->buffer;
+
+ if (obj->import_attach)
+ drm_prime_gem_destroy(obj, buf->sgt);
+
exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
}
@@ -724,6 +775,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_gem_object *obj;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -735,8 +788,20 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
+ obj = vma->vm_private_data;
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_gem_flags(exynos_gem_obj->flags);
+ if (ret) {
+ drm_gem_vm_close(vma);
+ drm_gem_free_mmap_offset(obj);
+ return ret;
+ }
+
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
+ update_vm_cache_attr(exynos_gem_obj, vma);
+
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 4ed842039505..14d038b6cb02 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -40,6 +40,7 @@
* device address with IOMMU.
* @sgt: sg table to transfer page data.
* @pages: contain all pages to allocated memory region.
+ * @page_size: could be 4K, 64K or 1MB.
* @size: size of allocated memory region.
*/
struct exynos_drm_gem_buf {
@@ -47,6 +48,7 @@ struct exynos_drm_gem_buf {
dma_addr_t dma_addr;
struct sg_table *sgt;
struct page **pages;
+ unsigned long page_size;
unsigned long size;
};
@@ -74,9 +76,15 @@ struct exynos_drm_gem_obj {
unsigned int flags;
};
+struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+
/* destroy a buffer with gem object */
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
+/* create a private gem object and initialize it. */
+struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+ unsigned long size);
+
/* create a new buffer with gem object */
struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
@@ -119,6 +127,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/* get buffer information to memory region allocated by gem. */
+int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
/* initialize gem object. */
int exynos_drm_gem_init_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 3424463676e0..5d9d2c2f8f3f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -37,6 +37,8 @@ struct drm_hdmi_context {
struct exynos_drm_subdrv subdrv;
struct exynos_drm_hdmi_context *hdmi_ctx;
struct exynos_drm_hdmi_context *mixer_ctx;
+
+ bool enabled[MIXER_WIN_NR];
};
void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops)
@@ -189,23 +191,34 @@ static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
DRM_DEBUG_KMS("%s\n", __FILE__);
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- if (hdmi_ops && hdmi_ops->disable)
- hdmi_ops->disable(ctx->hdmi_ctx->ctx);
- break;
- default:
- DRM_DEBUG_KMS("unkown dps mode: %d\n", mode);
- break;
+ if (mixer_ops && mixer_ops->dpms)
+ mixer_ops->dpms(ctx->mixer_ctx->ctx, mode);
+
+ if (hdmi_ops && hdmi_ops->dpms)
+ hdmi_ops->dpms(ctx->hdmi_ctx->ctx, mode);
+}
+
+static void drm_hdmi_apply(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ for (i = 0; i < MIXER_WIN_NR; i++) {
+ if (!ctx->enabled[i])
+ continue;
+ if (mixer_ops && mixer_ops->win_commit)
+ mixer_ops->win_commit(ctx->mixer_ctx->ctx, i);
}
+
+ if (hdmi_ops && hdmi_ops->commit)
+ hdmi_ops->commit(ctx->hdmi_ctx->ctx);
}
static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
.dpms = drm_hdmi_dpms,
+ .apply = drm_hdmi_apply,
.enable_vblank = drm_hdmi_enable_vblank,
.disable_vblank = drm_hdmi_disable_vblank,
.mode_fixup = drm_hdmi_mode_fixup,
@@ -228,21 +241,37 @@ static void drm_mixer_mode_set(struct device *subdrv_dev,
static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+ int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (win < 0 || win > MIXER_WIN_NR) {
+ DRM_ERROR("mixer window[%d] is wrong\n", win);
+ return;
+ }
+
if (mixer_ops && mixer_ops->win_commit)
- mixer_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
+ mixer_ops->win_commit(ctx->mixer_ctx->ctx, win);
+
+ ctx->enabled[win] = true;
}
static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+ int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (win < 0 || win > MIXER_WIN_NR) {
+ DRM_ERROR("mixer window[%d] is wrong\n", win);
+ return;
+ }
+
if (mixer_ops && mixer_ops->win_disable)
- mixer_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
+ mixer_ops->win_disable(ctx->mixer_ctx->ctx, win);
+
+ ctx->enabled[win] = false;
}
static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
@@ -335,25 +364,6 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
return 0;
}
-static int hdmi_runtime_suspend(struct device *dev)
-{
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
- .runtime_suspend = hdmi_runtime_suspend,
- .runtime_resume = hdmi_runtime_resume,
-};
-
static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
{
struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
@@ -372,6 +382,5 @@ struct platform_driver exynos_drm_common_hdmi_driver = {
.driver = {
.name = "exynos-drm-hdmi",
.owner = THIS_MODULE,
- .pm = &hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index f3ae192c8dcf..bd8126996e52 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -26,6 +26,9 @@
#ifndef _EXYNOS_DRM_HDMI_H_
#define _EXYNOS_DRM_HDMI_H_
+#define MIXER_WIN_NR 3
+#define MIXER_DEFAULT_WIN 0
+
/*
* exynos hdmi common context structure.
*
@@ -54,13 +57,14 @@ struct exynos_hdmi_ops {
void (*get_max_resol)(void *ctx, unsigned int *width,
unsigned int *height);
void (*commit)(void *ctx);
- void (*disable)(void *ctx);
+ void (*dpms)(void *ctx, int mode);
};
struct exynos_mixer_ops {
/* manager */
int (*enable_vblank)(void *ctx, int pipe);
void (*disable_vblank)(void *ctx);
+ void (*dpms)(void *ctx, int mode);
/* overlay */
void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index f92fe4c6174a..c4c6525d4653 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -41,8 +41,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
container_of(plane, struct exynos_plane, base);
struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
struct exynos_drm_crtc_pos pos;
- unsigned int x = src_x >> 16;
- unsigned int y = src_y >> 16;
int ret;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -53,10 +51,12 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
pos.crtc_w = crtc_w;
pos.crtc_h = crtc_h;
- pos.fb_x = x;
- pos.fb_y = y;
+ /* considering 16.16 fixed point of source values */
+ pos.fb_x = src_x >> 16;
+ pos.fb_y = src_y >> 16;
+ pos.src_w = src_w >> 16;
+ pos.src_h = src_h >> 16;
- /* TODO: scale feature */
ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index b00353876458..a137e9e39a33 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -57,18 +57,16 @@ struct hdmi_resources {
struct hdmi_context {
struct device *dev;
struct drm_device *drm_dev;
- struct fb_videomode *default_timing;
- unsigned int is_v13:1;
- unsigned int default_win;
- unsigned int default_bpp;
- bool hpd_handle;
- bool enabled;
+ bool hpd;
+ bool powered;
+ bool is_v13;
+ bool dvi_mode;
+ struct mutex hdmi_mutex;
struct resource *regs_res;
void __iomem *regs;
- unsigned int irq;
- struct workqueue_struct *wq;
- struct work_struct hotplug_work;
+ unsigned int external_irq;
+ unsigned int internal_irq;
struct i2c_client *ddc_port;
struct i2c_client *hdmiphy_port;
@@ -78,6 +76,9 @@ struct hdmi_context {
struct hdmi_resources res;
void *parent_ctx;
+
+ void (*cfg_hpd)(bool external);
+ int (*get_hpd)(void);
};
/* HDMI Version 1.3 */
@@ -361,6 +362,13 @@ static const u8 hdmiphy_conf27_027[32] = {
0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
};
+static const u8 hdmiphy_conf74_176[32] = {
+ 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x5b, 0xef, 0x08,
+ 0x81, 0xa0, 0xb9, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+};
+
static const u8 hdmiphy_conf74_25[32] = {
0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
@@ -750,6 +758,63 @@ static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
},
};
+static const struct hdmi_preset_conf hdmi_conf_1080p30 = {
+ .core = {
+ .h_blank = {0x18, 0x01},
+ .v2_blank = {0x65, 0x04},
+ .v1_blank = {0x2d, 0x00},
+ .v_line = {0x65, 0x04},
+ .h_line = {0x98, 0x08},
+ .hsync_pol = {0x00},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f0 = {0xff, 0xff},
+ .v_blank_f1 = {0xff, 0xff},
+ .h_sync_start = {0x56, 0x00},
+ .h_sync_end = {0x82, 0x00},
+ .v_sync_line_bef_2 = {0x09, 0x00},
+ .v_sync_line_bef_1 = {0x04, 0x00},
+ .v_sync_line_aft_2 = {0xff, 0xff},
+ .v_sync_line_aft_1 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_2 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_1 = {0xff, 0xff},
+ .v_blank_f2 = {0xff, 0xff},
+ .v_blank_f3 = {0xff, 0xff},
+ .v_blank_f4 = {0xff, 0xff},
+ .v_blank_f5 = {0xff, 0xff},
+ .v_sync_line_aft_3 = {0xff, 0xff},
+ .v_sync_line_aft_4 = {0xff, 0xff},
+ .v_sync_line_aft_5 = {0xff, 0xff},
+ .v_sync_line_aft_6 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_3 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_4 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_5 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_6 = {0xff, 0xff},
+ .vact_space_1 = {0xff, 0xff},
+ .vact_space_2 = {0xff, 0xff},
+ .vact_space_3 = {0xff, 0xff},
+ .vact_space_4 = {0xff, 0xff},
+ .vact_space_5 = {0xff, 0xff},
+ .vact_space_6 = {0xff, 0xff},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x98, 0x08, /* h_fsz */
+ 0x18, 0x01, 0x80, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0x38, 0x04, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x48, 0x02, /* vact_st2 */
+ 0x00, 0x00, /* vact_st3 */
+ 0x00, 0x00, /* vact_st4 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ 0x00, /* 3d FP */
+ },
+};
+
static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
.core = {
.h_blank = {0xd0, 0x02},
@@ -864,6 +929,7 @@ static const struct hdmi_conf hdmi_confs[] = {
{ 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
{ 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
{ 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+ { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
{ 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
{ 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
};
@@ -1194,12 +1260,8 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
static bool hdmi_is_connected(void *ctx)
{
struct hdmi_context *hdata = ctx;
- u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
-
- if (val)
- return true;
- return false;
+ return hdata->hpd;
}
static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
@@ -1215,10 +1277,12 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
if (raw_edid) {
+ hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
* EDID_LENGTH, len));
- DRM_DEBUG_KMS("width[%d] x height[%d]\n",
- raw_edid->width_cm, raw_edid->height_cm);
+ DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
+ (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
+ raw_edid->width_cm, raw_edid->height_cm);
} else {
return -ENODEV;
}
@@ -1289,28 +1353,6 @@ static int hdmi_check_timing(void *ctx, void *timing)
return hdmi_v14_check_timing(check_timing);
}
-static int hdmi_display_power_on(void *ctx, int mode)
-{
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- DRM_DEBUG_KMS("hdmi [on]\n");
- break;
- case DRM_MODE_DPMS_STANDBY:
- break;
- case DRM_MODE_DPMS_SUSPEND:
- break;
- case DRM_MODE_DPMS_OFF:
- DRM_DEBUG_KMS("hdmi [off]\n");
- break;
- default:
- break;
- }
-
- return 0;
-}
-
static void hdmi_set_acr(u32 freq, u8 *acr)
{
u32 n, cts;
@@ -1463,10 +1505,7 @@ static void hdmi_audio_init(struct hdmi_context *hdata)
static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff)
{
- u32 mod;
-
- mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
- if (mod & HDMI_DVI_MODE_EN)
+ if (hdata->dvi_mode)
return;
hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0);
@@ -1478,9 +1517,6 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
{
u32 reg;
- /* disable hpd handle for drm */
- hdata->hpd_handle = false;
-
if (hdata->is_v13)
reg = HDMI_V13_CORE_RSTOUT;
else
@@ -1491,16 +1527,10 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
mdelay(10);
hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT);
mdelay(10);
-
- /* enable hpd handle for drm */
- hdata->hpd_handle = true;
}
static void hdmi_conf_init(struct hdmi_context *hdata)
{
- /* disable hpd handle for drm */
- hdata->hpd_handle = false;
-
/* enable HPD interrupts */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1514,6 +1544,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
/* disable bluescreen */
hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
+ if (hdata->dvi_mode) {
+ /* choose DVI mode */
+ hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
+ HDMI_MODE_DVI_EN, HDMI_MODE_MASK);
+ hdmi_reg_writeb(hdata, HDMI_CON_2,
+ HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS);
+ }
+
if (hdata->is_v13) {
/* choose bluescreen (fecal) color */
hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
@@ -1535,9 +1573,6 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
}
-
- /* enable hpd handle for drm */
- hdata->hpd_handle = true;
}
static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
@@ -1890,8 +1925,11 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
hdmiphy_conf_reset(hdata);
hdmiphy_conf_apply(hdata);
+ mutex_lock(&hdata->hdmi_mutex);
hdmi_conf_reset(hdata);
hdmi_conf_init(hdata);
+ mutex_unlock(&hdata->hdmi_mutex);
+
hdmi_audio_init(hdata);
/* setting core registers */
@@ -1971,20 +2009,86 @@ static void hdmi_commit(void *ctx)
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
hdmi_conf_apply(hdata);
+}
+
+static void hdmi_poweron(struct hdmi_context *hdata)
+{
+ struct hdmi_resources *res = &hdata->res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&hdata->hdmi_mutex);
+ if (hdata->powered) {
+ mutex_unlock(&hdata->hdmi_mutex);
+ return;
+ }
+
+ hdata->powered = true;
+
+ if (hdata->cfg_hpd)
+ hdata->cfg_hpd(true);
+ mutex_unlock(&hdata->hdmi_mutex);
+
+ pm_runtime_get_sync(hdata->dev);
+
+ regulator_bulk_enable(res->regul_count, res->regul_bulk);
+ clk_enable(res->hdmiphy);
+ clk_enable(res->hdmi);
+ clk_enable(res->sclk_hdmi);
+}
+
+static void hdmi_poweroff(struct hdmi_context *hdata)
+{
+ struct hdmi_resources *res = &hdata->res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&hdata->hdmi_mutex);
+ if (!hdata->powered)
+ goto out;
+ mutex_unlock(&hdata->hdmi_mutex);
+
+ /*
+ * The TV power domain needs any condition of hdmiphy to turn off and
+ * its reset state seems to meet the condition.
+ */
+ hdmiphy_conf_reset(hdata);
+
+ clk_disable(res->sclk_hdmi);
+ clk_disable(res->hdmi);
+ clk_disable(res->hdmiphy);
+ regulator_bulk_disable(res->regul_count, res->regul_bulk);
+
+ pm_runtime_put_sync(hdata->dev);
- hdata->enabled = true;
+ mutex_lock(&hdata->hdmi_mutex);
+ if (hdata->cfg_hpd)
+ hdata->cfg_hpd(false);
+
+ hdata->powered = false;
+
+out:
+ mutex_unlock(&hdata->hdmi_mutex);
}
-static void hdmi_disable(void *ctx)
+static void hdmi_dpms(void *ctx, int mode)
{
struct hdmi_context *hdata = ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- if (hdata->enabled) {
- hdmi_audio_control(hdata, false);
- hdmiphy_conf_reset(hdata);
- hdmi_conf_reset(hdata);
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ hdmi_poweron(hdata);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ hdmi_poweroff(hdata);
+ break;
+ default:
+ DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+ break;
}
}
@@ -1993,30 +2097,35 @@ static struct exynos_hdmi_ops hdmi_ops = {
.is_connected = hdmi_is_connected,
.get_edid = hdmi_get_edid,
.check_timing = hdmi_check_timing,
- .power_on = hdmi_display_power_on,
/* manager */
.mode_fixup = hdmi_mode_fixup,
.mode_set = hdmi_mode_set,
.get_max_resol = hdmi_get_max_resol,
.commit = hdmi_commit,
- .disable = hdmi_disable,
+ .dpms = hdmi_dpms,
};
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
-static void hdmi_hotplug_func(struct work_struct *work)
+static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
{
- struct hdmi_context *hdata =
- container_of(work, struct hdmi_context, hotplug_work);
- struct exynos_drm_hdmi_context *ctx =
- (struct exynos_drm_hdmi_context *)hdata->parent_ctx;
+ struct exynos_drm_hdmi_context *ctx = arg;
+ struct hdmi_context *hdata = ctx->ctx;
+
+ if (!hdata->get_hpd)
+ goto out;
+
+ mutex_lock(&hdata->hdmi_mutex);
+ hdata->hpd = hdata->get_hpd();
+ mutex_unlock(&hdata->hdmi_mutex);
- drm_helper_hpd_irq_event(ctx->drm_dev);
+ if (ctx->drm_dev)
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+
+out:
+ return IRQ_HANDLED;
}
-static irqreturn_t hdmi_irq_handler(int irq, void *arg)
+static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg)
{
struct exynos_drm_hdmi_context *ctx = arg;
struct hdmi_context *hdata = ctx->ctx;
@@ -2025,19 +2134,28 @@ static irqreturn_t hdmi_irq_handler(int irq, void *arg)
intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
/* clearing flags for HPD plug/unplug */
if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
- DRM_DEBUG_KMS("unplugged, handling:%d\n", hdata->hpd_handle);
+ DRM_DEBUG_KMS("unplugged\n");
hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
HDMI_INTC_FLAG_HPD_UNPLUG);
}
if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
- DRM_DEBUG_KMS("plugged, handling:%d\n", hdata->hpd_handle);
+ DRM_DEBUG_KMS("plugged\n");
hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
HDMI_INTC_FLAG_HPD_PLUG);
}
- if (ctx->drm_dev && hdata->hpd_handle)
- queue_work(hdata->wq, &hdata->hotplug_work);
+ mutex_lock(&hdata->hdmi_mutex);
+ hdata->hpd = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
+ if (hdata->powered && hdata->hpd) {
+ mutex_unlock(&hdata->hdmi_mutex);
+ goto out;
+ }
+ mutex_unlock(&hdata->hdmi_mutex);
+
+ if (ctx->drm_dev)
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+out:
return IRQ_HANDLED;
}
@@ -2131,68 +2249,6 @@ static int hdmi_resources_cleanup(struct hdmi_context *hdata)
return 0;
}
-static void hdmi_resource_poweron(struct hdmi_context *hdata)
-{
- struct hdmi_resources *res = &hdata->res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- /* turn HDMI power on */
- regulator_bulk_enable(res->regul_count, res->regul_bulk);
- /* power-on hdmi physical interface */
- clk_enable(res->hdmiphy);
- /* turn clocks on */
- clk_enable(res->hdmi);
- clk_enable(res->sclk_hdmi);
-
- hdmiphy_conf_reset(hdata);
- hdmi_conf_reset(hdata);
- hdmi_conf_init(hdata);
- hdmi_audio_init(hdata);
-}
-
-static void hdmi_resource_poweroff(struct hdmi_context *hdata)
-{
- struct hdmi_resources *res = &hdata->res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- /* turn clocks off */
- clk_disable(res->sclk_hdmi);
- clk_disable(res->hdmi);
- /* power-off hdmiphy */
- clk_disable(res->hdmiphy);
- /* turn HDMI power off */
- regulator_bulk_disable(res->regul_count, res->regul_bulk);
-}
-
-static int hdmi_runtime_suspend(struct device *dev)
-{
- struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
-
- DRM_DEBUG_KMS("%s\n", __func__);
-
- hdmi_resource_poweroff(ctx->ctx);
-
- return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
- struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
-
- DRM_DEBUG_KMS("%s\n", __func__);
-
- hdmi_resource_poweron(ctx->ctx);
-
- return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
- .runtime_suspend = hdmi_runtime_suspend,
- .runtime_resume = hdmi_runtime_resume,
-};
-
static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2237,15 +2293,16 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ mutex_init(&hdata->hdmi_mutex);
+
drm_hdmi_ctx->ctx = (void *)hdata;
hdata->parent_ctx = (void *)drm_hdmi_ctx;
platform_set_drvdata(pdev, drm_hdmi_ctx);
hdata->is_v13 = pdata->is_v13;
- hdata->default_win = pdata->default_win;
- hdata->default_timing = &pdata->timing;
- hdata->default_bpp = pdata->bpp;
+ hdata->cfg_hpd = pdata->cfg_hpd;
+ hdata->get_hpd = pdata->get_hpd;
hdata->dev = dev;
ret = hdmi_resources_init(hdata);
@@ -2294,41 +2351,49 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
hdata->hdmiphy_port = hdmi_hdmiphy;
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- DRM_ERROR("get interrupt resource failed.\n");
- ret = -ENXIO;
+ hdata->external_irq = platform_get_irq_byname(pdev, "external_irq");
+ if (hdata->external_irq < 0) {
+ DRM_ERROR("failed to get platform irq\n");
+ ret = hdata->external_irq;
goto err_hdmiphy;
}
- /* create workqueue and hotplug work */
- hdata->wq = alloc_workqueue("exynos-drm-hdmi",
- WQ_UNBOUND | WQ_NON_REENTRANT, 1);
- if (hdata->wq == NULL) {
- DRM_ERROR("Failed to create workqueue.\n");
- ret = -ENOMEM;
+ hdata->internal_irq = platform_get_irq_byname(pdev, "internal_irq");
+ if (hdata->internal_irq < 0) {
+ DRM_ERROR("failed to get platform internal irq\n");
+ ret = hdata->internal_irq;
goto err_hdmiphy;
}
- INIT_WORK(&hdata->hotplug_work, hdmi_hotplug_func);
- /* register hpd interrupt */
- ret = request_irq(res->start, hdmi_irq_handler, 0, "drm_hdmi",
- drm_hdmi_ctx);
+ ret = request_threaded_irq(hdata->external_irq, NULL,
+ hdmi_external_irq_thread, IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "hdmi_external", drm_hdmi_ctx);
if (ret) {
- DRM_ERROR("request interrupt failed.\n");
- goto err_workqueue;
+ DRM_ERROR("failed to register hdmi internal interrupt\n");
+ goto err_hdmiphy;
+ }
+
+ if (hdata->cfg_hpd)
+ hdata->cfg_hpd(false);
+
+ ret = request_threaded_irq(hdata->internal_irq, NULL,
+ hdmi_internal_irq_thread, IRQF_ONESHOT,
+ "hdmi_internal", drm_hdmi_ctx);
+ if (ret) {
+ DRM_ERROR("failed to register hdmi internal interrupt\n");
+ goto err_free_irq;
}
- hdata->irq = res->start;
/* register specific callbacks to common hdmi. */
exynos_hdmi_ops_register(&hdmi_ops);
- hdmi_resource_poweron(hdata);
+ pm_runtime_enable(dev);
return 0;
-err_workqueue:
- destroy_workqueue(hdata->wq);
+err_free_irq:
+ free_irq(hdata->external_irq, drm_hdmi_ctx);
err_hdmiphy:
i2c_del_driver(&hdmiphy_driver);
err_ddc:
@@ -2348,18 +2413,15 @@ err_data:
static int __devexit hdmi_remove(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
struct hdmi_context *hdata = ctx->ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- hdmi_resource_poweroff(hdata);
+ pm_runtime_disable(dev);
- disable_irq(hdata->irq);
- free_irq(hdata->irq, hdata);
-
- cancel_work_sync(&hdata->hotplug_work);
- destroy_workqueue(hdata->wq);
+ free_irq(hdata->internal_irq, hdata);
hdmi_resources_cleanup(hdata);
@@ -2378,12 +2440,43 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int hdmi_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+ struct hdmi_context *hdata = ctx->ctx;
+
+ disable_irq(hdata->internal_irq);
+ disable_irq(hdata->external_irq);
+
+ hdata->hpd = false;
+ if (ctx->drm_dev)
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+
+ hdmi_poweroff(hdata);
+
+ return 0;
+}
+
+static int hdmi_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+ struct hdmi_context *hdata = ctx->ctx;
+
+ enable_irq(hdata->external_irq);
+ enable_irq(hdata->internal_irq);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume);
+
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
.remove = __devexit_p(hdmi_remove),
.driver = {
.name = "exynos4-hdmi",
.owner = THIS_MODULE,
- .pm = &hdmi_pm_ops,
+ .pm = &hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e15438c01129..68ef01028375 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -37,9 +37,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_hdmi.h"
-#define MIXER_WIN_NR 3
-#define MIXER_DEFAULT_WIN 0
-
#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
struct hdmi_win_data {
@@ -57,13 +54,14 @@ struct hdmi_win_data {
unsigned int fb_y;
unsigned int fb_width;
unsigned int fb_height;
+ unsigned int src_width;
+ unsigned int src_height;
unsigned int mode_width;
unsigned int mode_height;
unsigned int scan_flags;
};
struct mixer_resources {
- struct device *dev;
int irq;
void __iomem *mixer_regs;
void __iomem *vp_regs;
@@ -76,10 +74,13 @@ struct mixer_resources {
};
struct mixer_context {
- unsigned int irq;
+ struct device *dev;
int pipe;
bool interlace;
+ bool powered;
+ u32 int_en;
+ struct mutex mixer_mutex;
struct mixer_resources mixer_res;
struct hdmi_win_data win_data[MIXER_WIN_NR];
};
@@ -352,10 +353,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
struct hdmi_win_data *win_data;
- unsigned int full_width, full_height, width, height;
unsigned int x_ratio, y_ratio;
- unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
- unsigned int mode_width, mode_height;
unsigned int buf_num;
dma_addr_t luma_addr[2], chroma_addr[2];
bool tiled_mode = false;
@@ -382,21 +380,9 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
return;
}
- full_width = win_data->fb_width;
- full_height = win_data->fb_height;
- width = win_data->crtc_width;
- height = win_data->crtc_height;
- mode_width = win_data->mode_width;
- mode_height = win_data->mode_height;
-
/* scaling feature: (src << 16) / dst */
- x_ratio = (width << 16) / width;
- y_ratio = (height << 16) / height;
-
- src_x_offset = win_data->fb_x;
- src_y_offset = win_data->fb_y;
- dst_x_offset = win_data->crtc_x;
- dst_y_offset = win_data->crtc_y;
+ x_ratio = (win_data->src_width << 16) / win_data->crtc_width;
+ y_ratio = (win_data->src_height << 16) / win_data->crtc_height;
if (buf_num == 2) {
luma_addr[0] = win_data->dma_addr;
@@ -404,7 +390,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
} else {
luma_addr[0] = win_data->dma_addr;
chroma_addr[0] = win_data->dma_addr
- + (full_width * full_height);
+ + (win_data->fb_width * win_data->fb_height);
}
if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
@@ -413,8 +399,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
- luma_addr[1] = luma_addr[0] + full_width;
- chroma_addr[1] = chroma_addr[0] + full_width;
+ luma_addr[1] = luma_addr[0] + win_data->fb_width;
+ chroma_addr[1] = chroma_addr[0] + win_data->fb_width;
}
} else {
ctx->interlace = false;
@@ -435,26 +421,26 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
- vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(full_width) |
- VP_IMG_VSIZE(full_height));
+ vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) |
+ VP_IMG_VSIZE(win_data->fb_height));
/* chroma height has to reduced by 2 to avoid chroma distorions */
- vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(full_width) |
- VP_IMG_VSIZE(full_height / 2));
+ vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) |
+ VP_IMG_VSIZE(win_data->fb_height / 2));
- vp_reg_write(res, VP_SRC_WIDTH, width);
- vp_reg_write(res, VP_SRC_HEIGHT, height);
+ vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
+ vp_reg_write(res, VP_SRC_HEIGHT, win_data->src_height);
vp_reg_write(res, VP_SRC_H_POSITION,
- VP_SRC_H_POSITION_VAL(src_x_offset));
- vp_reg_write(res, VP_SRC_V_POSITION, src_y_offset);
+ VP_SRC_H_POSITION_VAL(win_data->fb_x));
+ vp_reg_write(res, VP_SRC_V_POSITION, win_data->fb_y);
- vp_reg_write(res, VP_DST_WIDTH, width);
- vp_reg_write(res, VP_DST_H_POSITION, dst_x_offset);
+ vp_reg_write(res, VP_DST_WIDTH, win_data->crtc_width);
+ vp_reg_write(res, VP_DST_H_POSITION, win_data->crtc_x);
if (ctx->interlace) {
- vp_reg_write(res, VP_DST_HEIGHT, height / 2);
- vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset / 2);
+ vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height / 2);
+ vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y / 2);
} else {
- vp_reg_write(res, VP_DST_HEIGHT, height);
- vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset);
+ vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height);
+ vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y);
}
vp_reg_write(res, VP_H_RATIO, x_ratio);
@@ -468,8 +454,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
- mixer_cfg_scan(ctx, mode_height);
- mixer_cfg_rgb_fmt(ctx, mode_height);
+ mixer_cfg_scan(ctx, win_data->mode_height);
+ mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
mixer_cfg_layer(ctx, win, true);
mixer_run(ctx);
@@ -484,10 +470,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
struct hdmi_win_data *win_data;
- unsigned int full_width, width, height;
unsigned int x_ratio, y_ratio;
unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
- unsigned int mode_width, mode_height;
dma_addr_t dma_addr;
unsigned int fmt;
u32 val;
@@ -510,26 +494,17 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
fmt = ARGB8888;
}
- dma_addr = win_data->dma_addr;
- full_width = win_data->fb_width;
- width = win_data->crtc_width;
- height = win_data->crtc_height;
- mode_width = win_data->mode_width;
- mode_height = win_data->mode_height;
-
/* 2x scaling feature */
x_ratio = 0;
y_ratio = 0;
- src_x_offset = win_data->fb_x;
- src_y_offset = win_data->fb_y;
dst_x_offset = win_data->crtc_x;
dst_y_offset = win_data->crtc_y;
/* converting dma address base and source offset */
- dma_addr = dma_addr
- + (src_x_offset * win_data->bpp >> 3)
- + (src_y_offset * full_width * win_data->bpp >> 3);
+ dma_addr = win_data->dma_addr
+ + (win_data->fb_x * win_data->bpp >> 3)
+ + (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3);
src_x_offset = 0;
src_y_offset = 0;
@@ -546,10 +521,10 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
/* setup geometry */
- mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), full_width);
+ mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width);
- val = MXR_GRP_WH_WIDTH(width);
- val |= MXR_GRP_WH_HEIGHT(height);
+ val = MXR_GRP_WH_WIDTH(win_data->crtc_width);
+ val |= MXR_GRP_WH_HEIGHT(win_data->crtc_height);
val |= MXR_GRP_WH_H_SCALE(x_ratio);
val |= MXR_GRP_WH_V_SCALE(y_ratio);
mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
@@ -567,8 +542,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
/* set buffer address to mixer */
mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
- mixer_cfg_scan(ctx, mode_height);
- mixer_cfg_rgb_fmt(ctx, mode_height);
+ mixer_cfg_scan(ctx, win_data->mode_height);
+ mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
mixer_cfg_layer(ctx, win, true);
mixer_run(ctx);
@@ -591,6 +566,116 @@ static void vp_win_reset(struct mixer_context *ctx)
WARN(tries == 0, "failed to reset Video Processor\n");
}
+static void mixer_win_reset(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ unsigned long flags;
+ u32 val; /* value stored to register */
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(ctx, false);
+
+ mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
+
+ /* set output in RGB888 mode */
+ mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
+
+ /* 16 beat burst in DMA */
+ mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
+ MXR_STATUS_BURST_MASK);
+
+ /* setting default layer priority: layer1 > layer0 > video
+ * because typical usage scenario would be
+ * layer1 - OSD
+ * layer0 - framebuffer
+ * video - video overlay
+ */
+ val = MXR_LAYER_CFG_GRP1_VAL(3);
+ val |= MXR_LAYER_CFG_GRP0_VAL(2);
+ val |= MXR_LAYER_CFG_VP_VAL(1);
+ mixer_reg_write(res, MXR_LAYER_CFG, val);
+
+ /* setting background color */
+ mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
+ mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
+ mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
+
+ /* setting graphical layers */
+
+ val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
+ val |= MXR_GRP_CFG_WIN_BLEND_EN;
+ val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
+
+ /* the same configuration for both layers */
+ mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
+
+ val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+ val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
+ mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
+
+ /* configuration of Video Processor Registers */
+ vp_win_reset(ctx);
+ vp_default_filter(res);
+
+ /* disable all layers */
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
+
+ mixer_vsync_set_update(ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static void mixer_poweron(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&ctx->mixer_mutex);
+ if (ctx->powered) {
+ mutex_unlock(&ctx->mixer_mutex);
+ return;
+ }
+ ctx->powered = true;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ pm_runtime_get_sync(ctx->dev);
+
+ clk_enable(res->mixer);
+ clk_enable(res->vp);
+ clk_enable(res->sclk_mixer);
+
+ mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+ mixer_win_reset(ctx);
+}
+
+static void mixer_poweroff(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&ctx->mixer_mutex);
+ if (!ctx->powered)
+ goto out;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+
+ clk_disable(res->mixer);
+ clk_disable(res->vp);
+ clk_disable(res->sclk_mixer);
+
+ pm_runtime_put_sync(ctx->dev);
+
+ mutex_lock(&ctx->mixer_mutex);
+ ctx->powered = false;
+
+out:
+ mutex_unlock(&ctx->mixer_mutex);
+}
+
static int mixer_enable_vblank(void *ctx, int pipe)
{
struct mixer_context *mixer_ctx = ctx;
@@ -618,6 +703,27 @@ static void mixer_disable_vblank(void *ctx)
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
+static void mixer_dpms(void *ctx, int mode)
+{
+ struct mixer_context *mixer_ctx = ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ mixer_poweron(mixer_ctx);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ mixer_poweroff(mixer_ctx);
+ break;
+ default:
+ DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+ break;
+ }
+}
+
static void mixer_win_mode_set(void *ctx,
struct exynos_drm_overlay *overlay)
{
@@ -643,7 +749,7 @@ static void mixer_win_mode_set(void *ctx,
win = MIXER_DEFAULT_WIN;
if (win < 0 || win > MIXER_WIN_NR) {
- DRM_ERROR("overlay plane[%d] is wrong\n", win);
+ DRM_ERROR("mixer window[%d] is wrong\n", win);
return;
}
@@ -665,6 +771,8 @@ static void mixer_win_mode_set(void *ctx,
win_data->fb_y = overlay->fb_y;
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
+ win_data->src_width = overlay->src_width;
+ win_data->src_height = overlay->src_height;
win_data->mode_width = overlay->mode_width;
win_data->mode_height = overlay->mode_height;
@@ -672,44 +780,26 @@ static void mixer_win_mode_set(void *ctx,
win_data->scan_flags = overlay->scan_flag;
}
-static void mixer_win_commit(void *ctx, int zpos)
+static void mixer_win_commit(void *ctx, int win)
{
struct mixer_context *mixer_ctx = ctx;
- int win = zpos;
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
- if (win == DEFAULT_ZPOS)
- win = MIXER_DEFAULT_WIN;
-
- if (win < 0 || win > MIXER_WIN_NR) {
- DRM_ERROR("overlay plane[%d] is wrong\n", win);
- return;
- }
-
if (win > 1)
vp_video_buffer(mixer_ctx, win);
else
mixer_graph_buffer(mixer_ctx, win);
}
-static void mixer_win_disable(void *ctx, int zpos)
+static void mixer_win_disable(void *ctx, int win)
{
struct mixer_context *mixer_ctx = ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
unsigned long flags;
- int win = zpos;
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
- if (win == DEFAULT_ZPOS)
- win = MIXER_DEFAULT_WIN;
-
- if (win < 0 || win > MIXER_WIN_NR) {
- DRM_ERROR("overlay plane[%d] is wrong\n", win);
- return;
- }
-
spin_lock_irqsave(&res->reg_slock, flags);
mixer_vsync_set_update(mixer_ctx, false);
@@ -723,6 +813,7 @@ static struct exynos_mixer_ops mixer_ops = {
/* manager */
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
+ .dpms = mixer_dpms,
/* overlay */
.win_mode_set = mixer_win_mode_set,
@@ -773,7 +864,7 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
struct mixer_context *ctx = drm_hdmi_ctx->ctx;
struct mixer_resources *res = &ctx->mixer_res;
- u32 val, val_base;
+ u32 val, base, shadow;
spin_lock(&res->reg_slock);
@@ -784,12 +875,14 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
if (val & MXR_INT_STATUS_VSYNC) {
/* interlace scan need to check shadow register */
if (ctx->interlace) {
- val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
- if (ctx->win_data[0].dma_addr != val_base)
+ base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
+ shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
+ if (base != shadow)
goto out;
- val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
- if (ctx->win_data[1].dma_addr != val_base)
+ base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1));
+ shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
+ if (base != shadow)
goto out;
}
@@ -811,117 +904,6 @@ out:
return IRQ_HANDLED;
}
-static void mixer_win_reset(struct mixer_context *ctx)
-{
- struct mixer_resources *res = &ctx->mixer_res;
- unsigned long flags;
- u32 val; /* value stored to register */
-
- spin_lock_irqsave(&res->reg_slock, flags);
- mixer_vsync_set_update(ctx, false);
-
- mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
-
- /* set output in RGB888 mode */
- mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
-
- /* 16 beat burst in DMA */
- mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
- MXR_STATUS_BURST_MASK);
-
- /* setting default layer priority: layer1 > layer0 > video
- * because typical usage scenario would be
- * layer1 - OSD
- * layer0 - framebuffer
- * video - video overlay
- */
- val = MXR_LAYER_CFG_GRP1_VAL(3);
- val |= MXR_LAYER_CFG_GRP0_VAL(2);
- val |= MXR_LAYER_CFG_VP_VAL(1);
- mixer_reg_write(res, MXR_LAYER_CFG, val);
-
- /* setting background color */
- mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
- mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
- mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
-
- /* setting graphical layers */
-
- val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
- val |= MXR_GRP_CFG_WIN_BLEND_EN;
- val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
-
- /* the same configuration for both layers */
- mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
-
- val |= MXR_GRP_CFG_BLEND_PRE_MUL;
- val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
- mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
-
- /* configuration of Video Processor Registers */
- vp_win_reset(ctx);
- vp_default_filter(res);
-
- /* disable all layers */
- mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
- mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
- mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
-
- mixer_vsync_set_update(ctx, true);
- spin_unlock_irqrestore(&res->reg_slock, flags);
-}
-
-static void mixer_resource_poweron(struct mixer_context *ctx)
-{
- struct mixer_resources *res = &ctx->mixer_res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- clk_enable(res->mixer);
- clk_enable(res->vp);
- clk_enable(res->sclk_mixer);
-
- mixer_win_reset(ctx);
-}
-
-static void mixer_resource_poweroff(struct mixer_context *ctx)
-{
- struct mixer_resources *res = &ctx->mixer_res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- clk_disable(res->mixer);
- clk_disable(res->vp);
- clk_disable(res->sclk_mixer);
-}
-
-static int mixer_runtime_resume(struct device *dev)
-{
- struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
-
- DRM_DEBUG_KMS("resume - start\n");
-
- mixer_resource_poweron(ctx->ctx);
-
- return 0;
-}
-
-static int mixer_runtime_suspend(struct device *dev)
-{
- struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
-
- DRM_DEBUG_KMS("suspend - start\n");
-
- mixer_resource_poweroff(ctx->ctx);
-
- return 0;
-}
-
-static const struct dev_pm_ops mixer_pm_ops = {
- .runtime_suspend = mixer_runtime_suspend,
- .runtime_resume = mixer_runtime_resume,
-};
-
static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
struct platform_device *pdev)
{
@@ -931,7 +913,6 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
struct resource *res;
int ret;
- mixer_res->dev = dev;
spin_lock_init(&mixer_res->reg_slock);
mixer_res->mixer = clk_get(dev, "mixer");
@@ -1027,7 +1008,6 @@ fail:
clk_put(mixer_res->vp);
if (!IS_ERR_OR_NULL(mixer_res->mixer))
clk_put(mixer_res->mixer);
- mixer_res->dev = NULL;
return ret;
}
@@ -1035,7 +1015,6 @@ static void mixer_resources_cleanup(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
- disable_irq(res->irq);
free_irq(res->irq, ctx);
iounmap(res->vp_regs);
@@ -1064,6 +1043,9 @@ static int __devinit mixer_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ mutex_init(&ctx->mixer_mutex);
+
+ ctx->dev = &pdev->dev;
drm_hdmi_ctx->ctx = (void *)ctx;
platform_set_drvdata(pdev, drm_hdmi_ctx);
@@ -1076,7 +1058,7 @@ static int __devinit mixer_probe(struct platform_device *pdev)
/* register specific callback point to common hdmi. */
exynos_mixer_ops_register(&mixer_ops);
- mixer_resource_poweron(ctx);
+ pm_runtime_enable(dev);
return 0;
@@ -1095,12 +1077,27 @@ static int mixer_remove(struct platform_device *pdev)
dev_info(dev, "remove successful\n");
- mixer_resource_poweroff(ctx);
+ pm_runtime_disable(&pdev->dev);
+
mixer_resources_cleanup(ctx);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int mixer_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ mixer_poweroff(ctx);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL);
+
struct platform_driver mixer_driver = {
.driver = {
.name = "s5p-mixer",
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 3c04bea842ce..9cc7c5e9718c 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -138,14 +138,16 @@
#define HDMI_ASP_MASK (1 << 2)
#define HDMI_EN (1 << 0)
+/* HDMI_CON_2 */
+#define HDMI_VID_PREAMBLE_DIS (1 << 5)
+#define HDMI_GUARD_BAND_DIS (1 << 1)
+
/* HDMI_PHY_STATUS */
#define HDMI_PHY_STATUS_READY (1 << 0)
/* HDMI_MODE_SEL */
#define HDMI_MODE_HDMI_EN (1 << 1)
#define HDMI_MODE_DVI_EN (1 << 0)
-#define HDMI_DVI_MODE_EN (1)
-#define HDMI_DVI_MODE_DIS (0)
#define HDMI_MODE_MASK (3 << 0)
/* HDMI_TG_CMD */
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 1583982917ce..abfa2a93f0d0 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -1,7 +1,7 @@
#
# KMS driver for the GMA500
#
-ccflags-y += -Iinclude/drm
+ccflags-y += -I$(srctree)/include/drm
gma500_gfx-y += gem_glue.o \
accel_2d.o \
@@ -12,7 +12,6 @@ gma500_gfx-y += gem_glue.o \
intel_bios.o \
intel_i2c.o \
intel_gmbus.o \
- intel_opregion.o \
mmu.o \
power.o \
psb_drv.o \
@@ -25,6 +24,8 @@ gma500_gfx-y += gem_glue.o \
psb_device.o \
mid_bios.o
+gma500_gfx-$(CONFIG_ACPI) += opregion.o \
+
gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
cdv_intel_crt.o \
cdv_intel_display.o \
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index a54cc738926a..9764045428ce 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -49,13 +49,15 @@ static void cdv_disable_vga(struct drm_device *dev)
static int cdv_output_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+
+ drm_mode_create_scaling_mode_property(dev);
+
cdv_disable_vga(dev);
cdv_intel_crt_init(dev, &dev_priv->mode_dev);
cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
- /* These bits indicate HDMI not SDVO on CDV, but we don't yet support
- the HDMI interface */
+ /* These bits indicate HDMI not SDVO on CDV */
if (REG_READ(SDVOB) & SDVO_DETECTED)
cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
if (REG_READ(SDVOC) & SDVO_DETECTED)
@@ -66,76 +68,71 @@ static int cdv_output_init(struct drm_device *dev)
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
/*
- * Poulsbo Backlight Interfaces
+ * Cedartrail Backlght Interfaces
*/
-#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
-#define BLC_PWM_FREQ_CALC_CONSTANT 32
-#define MHz 1000000
-
-#define PSB_BLC_PWM_PRECISION_FACTOR 10
-#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
-#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
-
-#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
-#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
-
-static int cdv_brightness;
static struct backlight_device *cdv_backlight_device;
-static int cdv_get_brightness(struct backlight_device *bd)
+static int cdv_backlight_combination_mode(struct drm_device *dev)
{
- /* return locally cached var instead of HW read (due to DPST etc.) */
- /* FIXME: ideally return actual value in case firmware fiddled with
- it */
- return cdv_brightness;
+ return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
}
-
-static int cdv_backlight_setup(struct drm_device *dev)
+static int cdv_get_brightness(struct backlight_device *bd)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
- unsigned long core_clock;
- /* u32 bl_max_freq; */
- /* unsigned long value; */
- u16 bl_max_freq;
- uint32_t value;
- uint32_t blc_pwm_precision_factor;
-
- /* get bl_max_freq and pol from dev_priv*/
- if (!dev_priv->lvds_bl) {
- dev_err(dev->dev, "Has no valid LVDS backlight info\n");
- return -ENOENT;
- }
- bl_max_freq = dev_priv->lvds_bl->freq;
- blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
+ struct drm_device *dev = bl_get_data(bd);
+ u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- core_clock = dev_priv->core_freq;
+ if (cdv_backlight_combination_mode(dev)) {
+ u8 lbpc;
- value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
- value *= blc_pwm_precision_factor;
- value /= bl_max_freq;
- value /= blc_pwm_precision_factor;
+ val &= ~1;
+ pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
+ val *= lbpc;
+ }
+ return val;
+}
- if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
- value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
- return -ERANGE;
- else {
- /* FIXME */
+static u32 cdv_get_max_backlight(struct drm_device *dev)
+{
+ u32 max = REG_READ(BLC_PWM_CTL);
+
+ if (max == 0) {
+ DRM_DEBUG_KMS("LVDS Panel PWM value is 0!\n");
+ /* i915 does this, I believe which means that we should not
+ * smash PWM control as firmware will take control of it. */
+ return 1;
}
- return 0;
+
+ max >>= 16;
+ if (cdv_backlight_combination_mode(dev))
+ max *= 0xff;
+ return max;
}
static int cdv_set_brightness(struct backlight_device *bd)
{
+ struct drm_device *dev = bl_get_data(bd);
int level = bd->props.brightness;
+ u32 blc_pwm_ctl;
/* Percentage 1-100% being valid */
if (level < 1)
level = 1;
- /*cdv_intel_lvds_set_brightness(dev, level); FIXME */
- cdv_brightness = level;
+ if (cdv_backlight_combination_mode(dev)) {
+ u32 max = cdv_get_max_backlight(dev);
+ u8 lbpc;
+
+ lbpc = level * 0xfe / max + 1;
+ level /= lbpc;
+
+ pci_write_config_byte(dev->pdev, 0xF4, lbpc);
+ }
+
+ blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
return 0;
}
@@ -147,7 +144,6 @@ static const struct backlight_ops cdv_ops = {
static int cdv_backlight_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- int ret;
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -159,14 +155,9 @@ static int cdv_backlight_init(struct drm_device *dev)
if (IS_ERR(cdv_backlight_device))
return PTR_ERR(cdv_backlight_device);
- ret = cdv_backlight_setup(dev);
- if (ret < 0) {
- backlight_device_unregister(cdv_backlight_device);
- cdv_backlight_device = NULL;
- return ret;
- }
- cdv_backlight_device->props.brightness = 100;
- cdv_backlight_device->props.max_brightness = 100;
+ cdv_backlight_device->props.brightness =
+ cdv_get_brightness(cdv_backlight_device);
+ cdv_backlight_device->props.max_brightness = cdv_get_max_backlight(dev);
backlight_update_status(cdv_backlight_device);
dev_priv->backlight_device = cdv_backlight_device;
return 0;
@@ -238,6 +229,19 @@ static void cdv_init_pm(struct drm_device *dev)
dev_err(dev->dev, "GPU: power management timed out.\n");
}
+static void cdv_errata(struct drm_device *dev)
+{
+ /* Disable bonus launch.
+ * CPU and GPU competes for memory and display misses updates and
+ * flickers. Worst with dual core, dual displays.
+ *
+ * Fixes were done to Win 7 gfx driver to disable a feature called
+ * Bonus Launch to work around the issue, by degrading
+ * performance.
+ */
+ CDV_MSG_WRITE32(3, 0x30, 0x08027108);
+}
+
/**
* cdv_save_display_registers - save registers lost on suspend
* @dev: our DRM device
@@ -251,7 +255,7 @@ static int cdv_save_display_registers(struct drm_device *dev)
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector *connector;
- dev_info(dev->dev, "Saving GPU registers.\n");
+ dev_dbg(dev->dev, "Saving GPU registers.\n");
pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB);
@@ -355,7 +359,7 @@ static int cdv_restore_display_registers(struct drm_device *dev)
REG_WRITE(PSB_INT_MASK_R, regs->cdv.saveIMR);
/* Fix arbitration bug */
- CDV_MSG_WRITE32(3, 0x30, 0x08027108);
+ cdv_errata(dev);
drm_mode_config_reset(dev);
@@ -447,13 +451,106 @@ static void cdv_get_core_freq(struct drm_device *dev)
}
}
+static void cdv_hotplug_work_func(struct work_struct *work)
+{
+ struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
+ hotplug_work);
+ struct drm_device *dev = dev_priv->dev;
+
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(dev);
+}
+
+/* The core driver has received a hotplug IRQ. We are in IRQ context
+ so extract the needed information and kick off queued processing */
+
+static int cdv_hotplug_event(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ schedule_work(&dev_priv->hotplug_work);
+ REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+ return 1;
+}
+
+static void cdv_hotplug_enable(struct drm_device *dev, bool on)
+{
+ if (on) {
+ u32 hotplug = REG_READ(PORT_HOTPLUG_EN);
+ hotplug |= HDMIB_HOTPLUG_INT_EN | HDMIC_HOTPLUG_INT_EN |
+ HDMID_HOTPLUG_INT_EN | CRT_HOTPLUG_INT_EN;
+ REG_WRITE(PORT_HOTPLUG_EN, hotplug);
+ } else {
+ REG_WRITE(PORT_HOTPLUG_EN, 0);
+ REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+ }
+}
+
+/* Cedarview */
+static const struct psb_offset cdv_regmap[2] = {
+ {
+ .fp0 = FPA0,
+ .fp1 = FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = DPLL_A,
+ .dpll_md = DPLL_A_MD,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .base = DSPABASE,
+ .surf = DSPASURF,
+ .addr = DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = FPB0,
+ .fp1 = FPB1,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = DPLL_B,
+ .dpll_md = DPLL_B_MD,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .base = DSPBBASE,
+ .surf = DSPBSURF,
+ .addr = DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ }
+};
+
static int cdv_chip_setup(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
+
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->regmap = cdv_regmap;
cdv_get_core_freq(dev);
- gma_intel_opregion_init(dev);
+ psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
- REG_WRITE(PORT_HOTPLUG_EN, 0);
- REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+ cdv_hotplug_enable(dev, false);
return 0;
}
@@ -464,13 +561,19 @@ const struct psb_ops cdv_chip_ops = {
.accel_2d = 0,
.pipes = 2,
.crtcs = 2,
+ .hdmi_mask = (1 << 0) | (1 << 1),
+ .lvds_mask = (1 << 1),
+ .cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
.chip_setup = cdv_chip_setup,
+ .errata = cdv_errata,
.crtc_helper = &cdv_intel_helper_funcs,
.crtc_funcs = &cdv_intel_crtc_funcs,
.output_init = cdv_output_init,
+ .hotplug = cdv_hotplug_event,
+ .hotplug_enable = cdv_hotplug_enable,
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
.backlight_init = cdv_backlight_init,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index a71a6cd95bdd..187422018601 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -67,8 +67,6 @@ static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
- int max_clock = 0;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -77,18 +75,9 @@ static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_LOW;
/* The max clock for CDV is 355 instead of 400 */
- max_clock = 355000;
- if (mode->clock > max_clock)
+ if (mode->clock > 355000)
return MODE_CLOCK_HIGH;
- if (mode->hdisplay > 1680 || mode->vdisplay > 1050)
- return MODE_PANEL;
-
- /* We assume worst case scenario of 32 bpp here, since we don't know */
- if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
- dev_priv->vram_stolen_size)
- return MODE_MEM;
-
return MODE_OK;
}
@@ -156,13 +145,7 @@ static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
u32 hotplug_en;
int i, tries = 0, ret = false;
- u32 adpa_orig;
-
- /* disable the DAC when doing the hotplug detection */
-
- adpa_orig = REG_READ(ADPA);
-
- REG_WRITE(ADPA, adpa_orig & ~(ADPA_DAC_ENABLE));
+ u32 orig;
/*
* On a CDV thep, CRT detect sequence need to be done twice
@@ -170,7 +153,7 @@ static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
*/
tries = 2;
- hotplug_en = REG_READ(PORT_HOTPLUG_EN);
+ orig = hotplug_en = REG_READ(PORT_HOTPLUG_EN);
hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK);
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
@@ -195,8 +178,11 @@ static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
CRT_HOTPLUG_MONITOR_NONE)
ret = true;
- /* Restore the saved ADPA */
- REG_WRITE(ADPA, adpa_orig);
+ /* clear the interrupt we just generated, if any */
+ REG_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+
+ /* and put the bits back */
+ REG_WRITE(PORT_HOTPLUG_EN, orig);
return ret;
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index be8455919b33..c3e9a0f701df 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -216,22 +216,22 @@ static void cdv_sb_reset(struct drm_device *dev)
*/
static int
cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
- struct cdv_intel_clock_t *clock)
+ struct cdv_intel_clock_t *clock, bool is_lvds)
{
- struct psb_intel_crtc *psb_crtc =
- to_psb_intel_crtc(crtc);
+ struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_crtc->pipe;
u32 m, n_vco, p;
int ret = 0;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int ref_sfr = (pipe == 0) ? SB_REF_DPLLA : SB_REF_DPLLB;
u32 ref_value;
+ u32 lane_reg, lane_value;
cdv_sb_reset(dev);
- if ((REG_READ(dpll_reg) & DPLL_SYNCLOCK_ENABLE) == 0) {
- DRM_ERROR("Attempting to set DPLL with refclk disabled\n");
- return -EBUSY;
- }
+ REG_WRITE(dpll_reg, DPLL_SYNCLOCK_ENABLE | DPLL_VGA_MODE_DIS);
+
+ udelay(100);
/* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
ref_value = 0x68A701;
@@ -241,6 +241,35 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
/* We don't know what the other fields of these regs are, so
* leave them in place.
*/
+ /*
+ * The BIT 14:13 of 0x8010/0x8030 is used to select the ref clk
+ * for the pipe A/B. Display spec 1.06 has wrong definition.
+ * Correct definition is like below:
+ *
+ * refclka mean use clock from same PLL
+ *
+ * if DPLLA sets 01 and DPLLB sets 01, they use clock from their pll
+ *
+ * if DPLLA sets 01 and DPLLB sets 02, both use clk from DPLLA
+ *
+ */
+ ret = cdv_sb_read(dev, ref_sfr, &ref_value);
+ if (ret)
+ return ret;
+ ref_value &= ~(REF_CLK_MASK);
+
+ /* use DPLL_A for pipeB on CRT/HDMI */
+ if (pipe == 1 && !is_lvds) {
+ DRM_DEBUG_KMS("use DPLLA for pipe B\n");
+ ref_value |= REF_CLK_DPLLA;
+ } else {
+ DRM_DEBUG_KMS("use their DPLL for pipe A/B\n");
+ ref_value |= REF_CLK_DPLL;
+ }
+ ret = cdv_sb_write(dev, ref_sfr, ref_value);
+ if (ret)
+ return ret;
+
ret = cdv_sb_read(dev, SB_M(pipe), &m);
if (ret)
return ret;
@@ -307,36 +336,29 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
if (ret)
return ret;
- /* always Program the Lane Register for the Pipe A*/
- if (pipe == 0) {
- /* Program the Lane0/1 for HDMI B */
- u32 lane_reg, lane_value;
-
- lane_reg = PSB_LANE0;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE;
- cdv_sb_write(dev, lane_reg, lane_value);
-
- lane_reg = PSB_LANE1;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE;
- cdv_sb_write(dev, lane_reg, lane_value);
-
- /* Program the Lane2/3 for HDMI C */
- lane_reg = PSB_LANE2;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE;
- cdv_sb_write(dev, lane_reg, lane_value);
-
- lane_reg = PSB_LANE3;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE;
- cdv_sb_write(dev, lane_reg, lane_value);
- }
+ lane_reg = PSB_LANE0;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE1;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE2;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE3;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
return 0;
}
@@ -480,14 +502,12 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
- int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
- int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
int ret = 0;
@@ -509,9 +529,9 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
start = psbfb->gtt->offset;
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitches[0]);
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
- dspcntr = REG_READ(dspcntr_reg);
+ dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
@@ -533,15 +553,15 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
ret = -EINVAL;
goto psb_intel_pipe_set_base_exit;
}
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
dev_dbg(dev->dev,
"Writing base %08lX %08lX %d %d\n", start, offset, x, y);
- REG_WRITE(dspbase, offset);
- REG_READ(dspbase);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
+ REG_WRITE(map->base, offset);
+ REG_READ(map->base);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
psb_intel_pipe_cleaner:
/* If there was a previous display we can now unpin it */
@@ -553,6 +573,199 @@ psb_intel_pipe_set_base_exit:
return ret;
}
+#define FIFO_PIPEA (1 << 0)
+#define FIFO_PIPEB (1 << 1)
+
+static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
+{
+ struct drm_crtc *crtc;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc = NULL;
+
+ crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+ if (crtc->fb == NULL || !psb_intel_crtc->active)
+ return false;
+ return true;
+}
+
+static bool cdv_intel_single_pipe_active (struct drm_device *dev)
+{
+ uint32_t pipe_enabled = 0;
+
+ if (cdv_intel_pipe_enabled(dev, 0))
+ pipe_enabled |= FIFO_PIPEA;
+
+ if (cdv_intel_pipe_enabled(dev, 1))
+ pipe_enabled |= FIFO_PIPEB;
+
+
+ DRM_DEBUG_KMS("pipe enabled %x\n", pipe_enabled);
+
+ if (pipe_enabled == FIFO_PIPEA || pipe_enabled == FIFO_PIPEB)
+ return true;
+ else
+ return false;
+}
+
+static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ if (psb_intel_crtc->pipe != 1)
+ return false;
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+
+ if (!connector->encoder
+ || connector->encoder->crtc != crtc)
+ continue;
+
+ if (psb_intel_encoder->type == INTEL_OUTPUT_LVDS)
+ return true;
+ }
+
+ return false;
+}
+
+static void cdv_intel_disable_self_refresh (struct drm_device *dev)
+{
+ if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
+
+ /* Disable self-refresh before adjust WM */
+ REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN));
+ REG_READ(FW_BLC_SELF);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ /* Cedarview workaround to write ovelay plane, which force to leave
+ * MAX_FIFO state.
+ */
+ REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/);
+ REG_READ(OV_OVADD);
+
+ cdv_intel_wait_for_vblank(dev);
+ }
+
+}
+
+static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc *crtc)
+{
+
+ if (cdv_intel_single_pipe_active(dev)) {
+ u32 fw;
+
+ fw = REG_READ(DSPFW1);
+ fw &= ~DSP_FIFO_SR_WM_MASK;
+ fw |= (0x7e << DSP_FIFO_SR_WM_SHIFT);
+ fw &= ~CURSOR_B_FIFO_WM_MASK;
+ fw |= (0x4 << CURSOR_B_FIFO_WM_SHIFT);
+ REG_WRITE(DSPFW1, fw);
+
+ fw = REG_READ(DSPFW2);
+ fw &= ~CURSOR_A_FIFO_WM_MASK;
+ fw |= (0x6 << CURSOR_A_FIFO_WM_SHIFT);
+ fw &= ~DSP_PLANE_C_FIFO_WM_MASK;
+ fw |= (0x8 << DSP_PLANE_C_FIFO_WM_SHIFT);
+ REG_WRITE(DSPFW2, fw);
+
+ REG_WRITE(DSPFW3, 0x36000000);
+
+ /* ignore FW4 */
+
+ if (is_pipeb_lvds(dev, crtc)) {
+ REG_WRITE(DSPFW5, 0x00040330);
+ } else {
+ fw = (3 << DSP_PLANE_B_FIFO_WM1_SHIFT) |
+ (4 << DSP_PLANE_A_FIFO_WM1_SHIFT) |
+ (3 << CURSOR_B_FIFO_WM1_SHIFT) |
+ (4 << CURSOR_FIFO_SR_WM1_SHIFT);
+ REG_WRITE(DSPFW5, fw);
+ }
+
+ REG_WRITE(DSPFW6, 0x10);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ /* enable self-refresh for single pipe active */
+ REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ REG_READ(FW_BLC_SELF);
+ cdv_intel_wait_for_vblank(dev);
+
+ } else {
+
+ /* HW team suggested values... */
+ REG_WRITE(DSPFW1, 0x3f880808);
+ REG_WRITE(DSPFW2, 0x0b020202);
+ REG_WRITE(DSPFW3, 0x24000000);
+ REG_WRITE(DSPFW4, 0x08030202);
+ REG_WRITE(DSPFW5, 0x01010101);
+ REG_WRITE(DSPFW6, 0x1d0);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ cdv_intel_disable_self_refresh(dev);
+
+ }
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int palreg = PALETTE_A;
+ int i;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled)
+ return;
+
+ switch (psb_intel_crtc->pipe) {
+ case 0:
+ break;
+ case 1:
+ palreg = PALETTE_B;
+ break;
+ case 2:
+ palreg = PALETTE_C;
+ break;
+ default:
+ dev_err(dev->dev, "Illegal Pipe Number.\n");
+ return;
+ }
+
+ if (gma_power_begin(dev, false)) {
+ for (i = 0; i < 256; i++) {
+ REG_WRITE(palreg + 4 * i,
+ ((psb_intel_crtc->lut_r[i] +
+ psb_intel_crtc->lut_adj[i]) << 16) |
+ ((psb_intel_crtc->lut_g[i] +
+ psb_intel_crtc->lut_adj[i]) << 8) |
+ (psb_intel_crtc->lut_b[i] +
+ psb_intel_crtc->lut_adj[i]));
+ }
+ gma_power_end(dev);
+ } else {
+ for (i = 0; i < 256; i++) {
+ dev_priv->regs.pipe[0].palette[i] =
+ ((psb_intel_crtc->lut_r[i] +
+ psb_intel_crtc->lut_adj[i]) << 16) |
+ ((psb_intel_crtc->lut_g[i] +
+ psb_intel_crtc->lut_adj[i]) << 8) |
+ (psb_intel_crtc->lut_b[i] +
+ psb_intel_crtc->lut_adj[i]);
+ }
+
+ }
+}
+
/**
* Sets the power management mode of the pipe and plane.
*
@@ -562,62 +775,80 @@ psb_intel_pipe_set_base_exit:
static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
*/
+ cdv_intel_disable_self_refresh(dev);
+
switch (mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
+ if (psb_intel_crtc->active)
+ return;
+
+ psb_intel_crtc->active = true;
+
/* Enable the DPLL */
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Jim Bish - switch plan and pipe per scott */
/* Enable the plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
}
udelay(150);
/* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+ REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
+
+ temp = REG_READ(map->status);
+ temp &= ~(0xFFFF);
+ temp |= PIPE_FIFO_UNDERRUN;
+ REG_WRITE(map->status, temp);
+ REG_READ(map->status);
- psb_intel_crtc_load_lut(crtc);
+ cdv_intel_update_watermark(dev, crtc);
+ cdv_intel_crtc_load_lut(crtc);
/* Give the overlay scaler a chance to enable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+ psb_intel_crtc->crtc_enable = true;
break;
case DRM_MODE_DPMS_OFF:
+ if (!psb_intel_crtc->active)
+ return;
+
+ psb_intel_crtc->active = false;
+
/* Give the overlay scaler a chance to disable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
@@ -627,14 +858,15 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Jim Bish - changed pipe/plane here as well. */
+ drm_vblank_off(dev, pipe);
/* Wait for vblank for the disable to take effect */
cdv_intel_wait_for_vblank(dev);
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+ REG_READ(map->conf);
}
/* Wait for vblank for the disable to take effect. */
@@ -643,23 +875,25 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(150);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
}
/* Wait for the clocks to turn off. */
udelay(150);
+ cdv_intel_update_watermark(dev, crtc);
+ psb_intel_crtc->crtc_enable = false;
break;
}
/*Set FIFO Watermarks*/
@@ -709,21 +943,10 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
struct cdv_intel_clock_t clock;
u32 dpll = 0, dspcntr, pipeconf;
@@ -757,13 +980,18 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
}
}
- refclk = 96000;
-
- /* Hack selection about ref clk for CRT */
- /* Select 27MHz as the reference clk for HDMI */
- if (is_crt || is_hdmi)
+ if (dev_priv->dplla_96mhz)
+ /* low-end sku, 96/100 mhz */
+ refclk = 96000;
+ else
+ /* high-end sku, 27/100 mhz */
refclk = 27000;
+ if (is_lvds && dev_priv->lvds_use_ssc) {
+ refclk = dev_priv->lvds_ssc_freq * 1000;
+ DRM_DEBUG_KMS("Use SSC reference clock %d Mhz\n", dev_priv->lvds_ssc_freq);
+ }
+
drm_mode_debug_printmodeline(adjusted_mode);
ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
@@ -779,18 +1007,17 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
}
- dpll |= PLL_REF_INPUT_DREFCLK;
+/* dpll |= PLL_REF_INPUT_DREFCLK; */
dpll |= DPLL_SYNCLOCK_ENABLE;
- dpll |= DPLL_VGA_MODE_DIS;
- if (is_lvds)
+/* if (is_lvds)
dpll |= DPLLB_MODE_LVDS;
else
- dpll |= DPLLB_MODE_DAC_SERIAL;
+ dpll |= DPLLB_MODE_DAC_SERIAL; */
/* dpll |= (2 << 11); */
/* setup pipeconf */
- pipeconf = REG_READ(pipeconf_reg);
+ pipeconf = REG_READ(map->conf);
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -803,10 +1030,10 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
dspcntr |= DISPLAY_PLANE_ENABLE;
pipeconf |= PIPEACONF_ENABLE;
- REG_WRITE(dpll_reg, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
+ REG_READ(map->dpll);
- cdv_dpll_set_clock_cdv(dev, crtc, &clock);
+ cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds);
udelay(150);
@@ -848,48 +1075,48 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
- REG_WRITE(dpll_reg,
- (REG_READ(dpll_reg) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll,
+ (REG_READ(map->dpll) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150); /* 42 usec w/o calibration, 110 with. rounded up. */
- if (!(REG_READ(dpll_reg) & DPLL_LOCK)) {
+ if (!(REG_READ(map->dpll) & DPLL_LOCK)) {
dev_err(dev->dev, "Failed to get DPLL lock\n");
return -EBUSY;
}
{
int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- REG_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+ REG_WRITE(map->dpll_md, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
}
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
- REG_WRITE(dspsize_reg,
+ REG_WRITE(map->size,
((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
- REG_WRITE(dsppos_reg, 0);
- REG_WRITE(pipesrc_reg,
+ REG_WRITE(map->pos, 0);
+ REG_WRITE(map->src,
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, pipeconf);
+ REG_READ(map->conf);
cdv_intel_wait_for_vblank(dev);
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
/* Flush the plane changes */
{
@@ -903,58 +1130,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
return 0;
}
-/** Loads the palette/gamma unit for the CRTC with the prepared values */
-static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int palreg = PALETTE_A;
- int i;
-
- /* The clocks have to be on to load the palette. */
- if (!crtc->enabled)
- return;
-
- switch (psb_intel_crtc->pipe) {
- case 0:
- break;
- case 1:
- palreg = PALETTE_B;
- break;
- case 2:
- palreg = PALETTE_C;
- break;
- default:
- dev_err(dev->dev, "Illegal Pipe Number.\n");
- return;
- }
-
- if (gma_power_begin(dev, false)) {
- for (i = 0; i < 256; i++) {
- REG_WRITE(palreg + 4 * i,
- ((psb_intel_crtc->lut_r[i] +
- psb_intel_crtc->lut_adj[i]) << 16) |
- ((psb_intel_crtc->lut_g[i] +
- psb_intel_crtc->lut_adj[i]) << 8) |
- (psb_intel_crtc->lut_b[i] +
- psb_intel_crtc->lut_adj[i]));
- }
- gma_power_end(dev);
- } else {
- for (i = 0; i < 256; i++) {
- dev_priv->regs.psb.save_palette_a[i] =
- ((psb_intel_crtc->lut_r[i] +
- psb_intel_crtc->lut_adj[i]) << 16) |
- ((psb_intel_crtc->lut_g[i] +
- psb_intel_crtc->lut_adj[i]) << 8) |
- (psb_intel_crtc->lut_b[i] +
- psb_intel_crtc->lut_adj[i]);
- }
-
- }
-}
/**
* Save HW states of giving crtc
@@ -962,11 +1137,10 @@ static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
static void cdv_intel_crtc_save(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- int pipeA = (psb_intel_crtc->pipe == 0);
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
uint32_t paletteReg;
int i;
@@ -975,25 +1149,25 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
return;
}
- crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
- crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
- crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
- crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
- crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
- crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
- crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
- crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
- crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
- crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
- crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
- crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
- crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+ crtc_state->saveDSPCNTR = REG_READ(map->cntr);
+ crtc_state->savePIPECONF = REG_READ(map->conf);
+ crtc_state->savePIPESRC = REG_READ(map->src);
+ crtc_state->saveFP0 = REG_READ(map->fp0);
+ crtc_state->saveFP1 = REG_READ(map->fp1);
+ crtc_state->saveDPLL = REG_READ(map->dpll);
+ crtc_state->saveHTOTAL = REG_READ(map->htotal);
+ crtc_state->saveHBLANK = REG_READ(map->hblank);
+ crtc_state->saveHSYNC = REG_READ(map->hsync);
+ crtc_state->saveVTOTAL = REG_READ(map->vtotal);
+ crtc_state->saveVBLANK = REG_READ(map->vblank);
+ crtc_state->saveVSYNC = REG_READ(map->vsync);
+ crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
/*NOTE: DSPSIZE DSPPOS only for psb*/
- crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
- crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+ crtc_state->saveDSPSIZE = REG_READ(map->size);
+ crtc_state->saveDSPPOS = REG_READ(map->pos);
- crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+ crtc_state->saveDSPBASE = REG_READ(map->base);
DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
crtc_state->saveDSPCNTR,
@@ -1014,7 +1188,7 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
crtc_state->saveDSPBASE
);
- paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ paletteReg = map->palette;
for (i = 0; i < 256; ++i)
crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
}
@@ -1025,12 +1199,10 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_psb_private * dev_priv =
- (struct drm_psb_private *)dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
- int pipeA = (psb_intel_crtc->pipe == 0);
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
uint32_t paletteReg;
int i;
@@ -1041,23 +1213,23 @@ static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
DRM_DEBUG(
"current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
- REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
- REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
- REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
- REG_READ(pipeA ? FPA0 : FPB0),
- REG_READ(pipeA ? FPA1 : FPB1),
- REG_READ(pipeA ? DPLL_A : DPLL_B),
- REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
- REG_READ(pipeA ? HBLANK_A : HBLANK_B),
- REG_READ(pipeA ? HSYNC_A : HSYNC_B),
- REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
- REG_READ(pipeA ? VBLANK_A : VBLANK_B),
- REG_READ(pipeA ? VSYNC_A : VSYNC_B),
- REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
- REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
- REG_READ(pipeA ? DSPAPOS : DSPBPOS),
- REG_READ(pipeA ? DSPABASE : DSPBBASE)
- );
+ REG_READ(map->cntr),
+ REG_READ(map->conf),
+ REG_READ(map->src),
+ REG_READ(map->fp0),
+ REG_READ(map->fp1),
+ REG_READ(map->dpll),
+ REG_READ(map->htotal),
+ REG_READ(map->hblank),
+ REG_READ(map->hsync),
+ REG_READ(map->vtotal),
+ REG_READ(map->vblank),
+ REG_READ(map->vsync),
+ REG_READ(map->stride),
+ REG_READ(map->size),
+ REG_READ(map->pos),
+ REG_READ(map->base)
+ );
DRM_DEBUG(
"saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
@@ -1077,51 +1249,51 @@ static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
crtc_state->saveDSPSIZE,
crtc_state->saveDSPPOS,
crtc_state->saveDSPBASE
- );
+ );
if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
- REG_WRITE(pipeA ? DPLL_A : DPLL_B,
- crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
- REG_READ(pipeA ? DPLL_A : DPLL_B);
+ REG_WRITE(map->dpll,
+ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
DRM_DEBUG("write dpll: %x\n",
- REG_READ(pipeA ? DPLL_A : DPLL_B));
+ REG_READ(map->dpll));
udelay(150);
}
- REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
- REG_READ(pipeA ? FPA0 : FPB0);
+ REG_WRITE(map->fp0, crtc_state->saveFP0);
+ REG_READ(map->fp0);
- REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
- REG_READ(pipeA ? FPA1 : FPB1);
+ REG_WRITE(map->fp1, crtc_state->saveFP1);
+ REG_READ(map->fp1);
- REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
- REG_READ(pipeA ? DPLL_A : DPLL_B);
+ REG_WRITE(map->dpll, crtc_state->saveDPLL);
+ REG_READ(map->dpll);
udelay(150);
- REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
- REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
- REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
- REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
- REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
- REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
- REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+ REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
+ REG_WRITE(map->hblank, crtc_state->saveHBLANK);
+ REG_WRITE(map->hsync, crtc_state->saveHSYNC);
+ REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
+ REG_WRITE(map->vblank, crtc_state->saveVBLANK);
+ REG_WRITE(map->vsync, crtc_state->saveVSYNC);
+ REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
- REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
- REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+ REG_WRITE(map->size, crtc_state->saveDSPSIZE);
+ REG_WRITE(map->pos, crtc_state->saveDSPPOS);
- REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
- REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
- REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+ REG_WRITE(map->src, crtc_state->savePIPESRC);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
+ REG_WRITE(map->conf, crtc_state->savePIPECONF);
cdv_intel_wait_for_vblank(dev);
- REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
- REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+ REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
cdv_intel_wait_for_vblank(dev);
- paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ paletteReg = map->palette;
for (i = 0; i < 256; ++i)
REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
}
@@ -1296,35 +1468,30 @@ static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
static int cdv_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
u32 fp;
struct cdv_intel_clock_t clock;
bool is_lvds;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
if (gma_power_begin(dev, false)) {
- dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ dpll = REG_READ(map->dpll);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+ fp = REG_READ(map->fp0);
else
- fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+ fp = REG_READ(map->fp1);
is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
gma_power_end(dev);
} else {
- dpll = (pipe == 0) ?
- dev_priv->regs.psb.saveDPLL_A :
- dev_priv->regs.psb.saveDPLL_B;
-
+ dpll = p->dpll;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = (pipe == 0) ?
- dev_priv->regs.psb.saveFPA0 :
- dev_priv->regs.psb.saveFPB0;
+ fp = p->fp0;
else
- fp = (pipe == 0) ?
- dev_priv->regs.psb.saveFPA1 :
- dev_priv->regs.psb.saveFPB1;
+ fp = p->fp1;
is_lvds = (pipe == 1) &&
(dev_priv->regs.psb.saveLVDS & LVDS_PORT_EN);
@@ -1382,32 +1549,26 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
{
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
struct drm_display_mode *mode;
int htot;
int hsync;
int vtot;
int vsync;
- struct drm_psb_private *dev_priv = dev->dev_private;
if (gma_power_begin(dev, false)) {
- htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
- hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
- vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
- vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ htot = REG_READ(map->htotal);
+ hsync = REG_READ(map->hsync);
+ vtot = REG_READ(map->vtotal);
+ vsync = REG_READ(map->vsync);
gma_power_end(dev);
} else {
- htot = (pipe == 0) ?
- dev_priv->regs.psb.saveHTOTAL_A :
- dev_priv->regs.psb.saveHTOTAL_B;
- hsync = (pipe == 0) ?
- dev_priv->regs.psb.saveHSYNC_A :
- dev_priv->regs.psb.saveHSYNC_B;
- vtot = (pipe == 0) ?
- dev_priv->regs.psb.saveVTOTAL_A :
- dev_priv->regs.psb.saveVTOTAL_B;
- vsync = (pipe == 0) ?
- dev_priv->regs.psb.saveVSYNC_A :
- dev_priv->regs.psb.saveVSYNC_B;
+ htot = p->htotal;
+ hsync = p->hsync;
+ vtot = p->vtotal;
+ vsync = p->vsync;
}
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 8d5269555005..88b59d4a7b7f 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -242,8 +242,6 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector)
static int cdv_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
-
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
@@ -257,11 +255,6 @@ static int cdv_hdmi_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
- /* We assume worst case scenario of 32 bpp here, since we don't know */
- if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
- dev_priv->vram_stolen_size)
- return MODE_MEM;
-
return MODE_OK;
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 8359c1a3f45f..ff5b58eb878c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -356,6 +356,8 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(
+ encoder->crtc);
u32 pfit_control;
/*
@@ -377,6 +379,8 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
else
pfit_control = 0;
+ pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
+
if (dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
@@ -552,10 +556,60 @@ static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
drm_encoder_cleanup(encoder);
}
-const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
+static const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
.destroy = cdv_intel_lvds_enc_destroy,
};
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the LVDS is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+ */
+static bool lvds_is_present_in_vbt(struct drm_device *dev,
+ u8 *i2c_pin)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (!dev_priv->child_dev_num)
+ return true;
+
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ struct child_device_config *child = dev_priv->child_dev + i;
+
+ /* If the device type is not LFP, continue.
+ * We have to check both the new identifiers as well as the
+ * old for compatibility with some BIOSes.
+ */
+ if (child->device_type != DEVICE_TYPE_INT_LFP &&
+ child->device_type != DEVICE_TYPE_LFP)
+ continue;
+
+ if (child->i2c_pin)
+ *i2c_pin = child->i2c_pin;
+
+ /* However, we cannot trust the BIOS writers to populate
+ * the VBT correctly. Since LVDS requires additional
+ * information from AIM blocks, a non-zero addin offset is
+ * a good indicator that the LVDS is actually present.
+ */
+ if (child->addin_offset)
+ return true;
+
+ /* But even then some BIOS writers perform some black magic
+ * and instantiate the device without reference to any
+ * additional data. Trust that if the VBT was written into
+ * the OpRegion then they have validated the LVDS's existence.
+ */
+ if (dev_priv->opregion.vbt)
+ return true;
+ }
+
+ return false;
+}
+
/**
* cdv_intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
@@ -576,6 +630,13 @@ void cdv_intel_lvds_init(struct drm_device *dev,
struct drm_psb_private *dev_priv = dev->dev_private;
u32 lvds;
int pipe;
+ u8 pin;
+
+ pin = GMBUS_PORT_PANEL;
+ if (!lvds_is_present_in_vbt(dev, &pin)) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ return;
+ }
psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
GFP_KERNEL);
@@ -710,6 +771,19 @@ void cdv_intel_lvds_init(struct drm_device *dev,
goto failed_find;
}
+ /* setup PWM */
+ {
+ u32 pwm;
+
+ pwm = REG_READ(BLC_PWM_CTL2);
+ if (pipe == 1)
+ pwm |= PWM_PIPE_B;
+ else
+ pwm &= ~PWM_PIPE_B;
+ pwm |= PWM_ENABLE;
+ REG_WRITE(BLC_PWM_CTL2, pwm);
+ }
+
out:
drm_sysfs_connector_add(connector);
return;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 8ea202f1ba50..5732b5702e1c 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -153,7 +153,7 @@ static void psbfb_vm_close(struct vm_area_struct *vma)
{
}
-static struct vm_operations_struct psbfb_vm_ops = {
+static const struct vm_operations_struct psbfb_vm_ops = {
.fault = psbfb_vm_fault,
.open = psbfb_vm_open,
.close = psbfb_vm_close
@@ -408,6 +408,8 @@ static int psbfb_create(struct psb_fbdev *fbdev,
return -ENOMEM;
}
+ memset(dev_priv->vram_addr + backing->offset, 0, size);
+
mutex_lock(&dev->struct_mutex);
info = framebuffer_alloc(0, device);
@@ -453,8 +455,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->fix.ypanstep = 0;
/* Accessed stolen memory directly */
- info->screen_base = (char *)dev_priv->vram_addr +
- backing->offset;
+ info->screen_base = dev_priv->vram_addr + backing->offset;
info->screen_size = size;
if (dev_priv->gtt.stolen_size) {
@@ -475,7 +476,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- dev_info(dev->dev, "allocated %dx%d fb\n",
+ dev_dbg(dev->dev, "allocated %dx%d fb\n",
psbfb->base.width, psbfb->base.height);
mutex_unlock(&dev->struct_mutex);
@@ -543,9 +544,25 @@ static int psbfb_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
+ struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
int new_fb = 0;
+ int bytespp;
int ret;
+ bytespp = sizes->surface_bpp / 8;
+ if (bytespp == 3) /* no 24bit packed */
+ bytespp = 4;
+
+ /* If the mode will not fit in 32bit then switch to 16bit to get
+ a console on full resolution. The X mode setting server will
+ allocate its own 32bit GEM framebuffer */
+ if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
+ dev_priv->vram_stolen_size) {
+ sizes->surface_bpp = 16;
+ sizes->surface_depth = 16;
+ }
+
if (!helper->fb) {
ret = psbfb_create(psb_fbdev, sizes);
if (ret)
@@ -555,7 +572,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
return new_fb;
}
-struct drm_fb_helper_funcs psb_fb_helper_funcs = {
+static struct drm_fb_helper_funcs psb_fb_helper_funcs = {
.gamma_set = psbfb_gamma_set,
.gamma_get = psbfb_gamma_get,
.fb_probe = psbfb_probe,
@@ -732,10 +749,7 @@ static void psb_setup_outputs(struct drm_device *dev)
clone_mask = (1 << INTEL_OUTPUT_SDVO);
break;
case INTEL_OUTPUT_LVDS:
- if (IS_MRST(dev))
- crtc_mask = (1 << 0);
- else
- crtc_mask = (1 << 1);
+ crtc_mask = dev_priv->ops->lvds_mask;
clone_mask = (1 << INTEL_OUTPUT_LVDS);
break;
case INTEL_OUTPUT_MIPI:
@@ -747,10 +761,7 @@ static void psb_setup_outputs(struct drm_device *dev)
clone_mask = (1 << INTEL_OUTPUT_MIPI2);
break;
case INTEL_OUTPUT_HDMI:
- if (IS_MFLD(dev))
- crtc_mask = (1 << 1);
- else
- crtc_mask = (1 << 0);
+ crtc_mask = dev_priv->ops->hdmi_mask;
clone_mask = (1 << INTEL_OUTPUT_HDMI);
break;
}
@@ -771,7 +782,7 @@ void psb_modeset_init(struct drm_device *dev)
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- dev->mode_config.funcs = (void *) &psb_mode_funcs;
+ dev->mode_config.funcs = &psb_mode_funcs;
/* set memory base */
/* Oaktrail and Poulsbo should use BAR 2*/
@@ -786,15 +797,23 @@ void psb_modeset_init(struct drm_device *dev)
dev->mode_config.max_height = 2048;
psb_setup_outputs(dev);
+
+ if (dev_priv->ops->errata)
+ dev_priv->ops->errata(dev);
+
+ dev_priv->modeset = true;
}
void psb_modeset_cleanup(struct drm_device *dev)
{
- mutex_lock(&dev->struct_mutex);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (dev_priv->modeset) {
+ mutex_lock(&dev->struct_mutex);
- drm_kms_helper_poll_fini(dev);
- psb_fbdev_fini(dev);
- drm_mode_config_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
+ psb_fbdev_fini(dev);
+ drm_mode_config_cleanup(dev);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 9fbb86868e2e..fc7d144bc2d3 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -124,6 +124,8 @@ static int psb_gem_create(struct drm_file *file,
dev_err(dev->dev, "GEM init failed for %lld\n", size);
return -ENOMEM;
}
+ /* Limit the object to 32bit mappings */
+ mapping_set_gfp_mask(r->gem.filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
/* Give the object a handle so we can carry it more easily */
ret = drm_gem_handle_create(file, &r->gem, &handle);
if (ret) {
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index c6465b40090f..04a371aceb34 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -39,6 +39,10 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
{
uint32_t mask = PSB_PTE_VALID;
+ /* Ensure we explode rather than put an invalid low mapping of
+ a high mapping page into the gtt */
+ BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
+
if (type & PSB_MMU_CACHED_MEMORY)
mask |= PSB_PTE_CACHED;
if (type & PSB_MMU_RO_MEMORY)
@@ -57,7 +61,7 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
* Given a gtt_range object return the GTT offset of the page table
* entries for this gtt_range
*/
-static u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
+static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
{
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long offset;
@@ -78,7 +82,8 @@ static u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
*/
static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
{
- u32 *gtt_slot, pte;
+ u32 __iomem *gtt_slot;
+ u32 pte;
struct page **pages;
int i;
@@ -93,7 +98,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
pages = r->pages;
/* Make sure changes are visible to the GPU */
- set_pages_array_uc(pages, r->npage);
+ set_pages_array_wc(pages, r->npage);
/* Write our page entries into the GTT itself */
for (i = r->roll; i < r->npage; i++) {
@@ -122,7 +127,8 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- u32 *gtt_slot, pte;
+ u32 __iomem *gtt_slot;
+ u32 pte;
int i;
WARN_ON(r->stolen);
@@ -148,7 +154,8 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
*/
void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
{
- u32 *gtt_slot, pte;
+ u32 __iomem *gtt_slot;
+ u32 pte;
int i;
if (roll >= r->npage) {
@@ -409,8 +416,6 @@ int psb_gtt_init(struct drm_device *dev, int resume)
unsigned long stolen_size, vram_stolen_size;
unsigned i, num_pages;
unsigned pfn_base;
- uint32_t vram_pages;
- uint32_t dvmt_mode = 0;
struct psb_gtt *pg;
int ret = 0;
@@ -483,13 +488,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
stolen_size = vram_stolen_size;
- printk(KERN_INFO "Stolen memory information\n");
- printk(KERN_INFO " base in RAM: 0x%x\n", dev_priv->stolen_base);
- printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
- vram_stolen_size/1024);
- dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
- printk(KERN_INFO " the correct size should be: %dM(dvmt mode=%d)\n",
- (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
+ dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
+ dev_priv->stolen_base, vram_stolen_size / 1024);
if (resume && (gtt_pages != pg->gtt_pages) &&
(stolen_size != pg->stolen_size)) {
@@ -525,8 +525,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
*/
pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
- vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
- printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
+ num_pages = vram_stolen_size >> PAGE_SHIFT;
+ dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
num_pages, pfn_base << PAGE_SHIFT, 0);
for (i = 0; i < num_pages; ++i) {
pte = psb_gtt_mask_pte(pfn_base + i, 0);
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index d4d0c5b8bf91..973d7f6d66b7 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -26,6 +26,8 @@
#include "psb_intel_reg.h"
#include "intel_bios.h"
+#define SLAVE_ADDR1 0x70
+#define SLAVE_ADDR2 0x72
static void *find_section(struct bdb_header *bdb, int section_id)
{
@@ -52,6 +54,16 @@ static void *find_section(struct bdb_header *bdb, int section_id)
return NULL;
}
+static u16
+get_blocksize(void *p)
+{
+ u16 *block_ptr, block_size;
+
+ block_ptr = (u16 *)((char *)p - 2);
+ block_size = *block_ptr;
+ return block_size;
+}
+
static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
struct lvds_dvo_timing *dvo_timing)
{
@@ -75,6 +87,16 @@ static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
panel_fixed_mode->clock = dvo_timing->clock * 10;
panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+ if (dvo_timing->hsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dvo_timing->vsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
/* Some VBTs have bogus h/vtotal values */
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
@@ -217,6 +239,180 @@ static void parse_general_features(struct drm_psb_private *dev_priv,
}
}
+static void
+parse_sdvo_device_mapping(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct sdvo_device_mapping *p_mapping;
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ if (p_child->slave_addr != SLAVE_ADDR1 &&
+ p_child->slave_addr != SLAVE_ADDR2) {
+ /*
+ * If the slave address is neither 0x70 nor 0x72,
+ * it is not a SDVO device. Skip it.
+ */
+ continue;
+ }
+ if (p_child->dvo_port != DEVICE_PORT_DVOB &&
+ p_child->dvo_port != DEVICE_PORT_DVOC) {
+ /* skip the incorrect SDVO port */
+ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
+ continue;
+ }
+ DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+ " %s port\n",
+ p_child->slave_addr,
+ (p_child->dvo_port == DEVICE_PORT_DVOB) ?
+ "SDVOB" : "SDVOC");
+ p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
+ if (!p_mapping->initialized) {
+ p_mapping->dvo_port = p_child->dvo_port;
+ p_mapping->slave_addr = p_child->slave_addr;
+ p_mapping->dvo_wiring = p_child->dvo_wiring;
+ p_mapping->ddc_pin = p_child->ddc_pin;
+ p_mapping->i2c_pin = p_child->i2c_pin;
+ p_mapping->initialized = 1;
+ DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+ p_mapping->dvo_port,
+ p_mapping->slave_addr,
+ p_mapping->dvo_wiring,
+ p_mapping->ddc_pin,
+ p_mapping->i2c_pin);
+ } else {
+ DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
+ "two SDVO device.\n");
+ }
+ if (p_child->slave2_addr) {
+ /* Maybe this is a SDVO device with multiple inputs */
+ /* And the mapping info is not added */
+ DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+ " is a SDVO device with multiple inputs.\n");
+ }
+ count++;
+ }
+
+ if (!count) {
+ /* No SDVO device info is found */
+ DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
+ }
+ return;
+}
+
+
+static void
+parse_driver_features(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_driver_features *driver;
+
+ driver = find_section(bdb, BDB_DRIVER_FEATURES);
+ if (!driver)
+ return;
+
+ /* This bit means to use 96Mhz for DPLL_A or not */
+ if (driver->primary_lfp_id)
+ dev_priv->dplla_96mhz = true;
+ else
+ dev_priv->dplla_96mhz = false;
+}
+
+static void
+parse_device_mapping(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child, *child_dev_ptr;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ /* get the number of child devices that are present */
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ count++;
+ }
+ if (!count) {
+ DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+ return;
+ }
+ dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
+ if (!dev_priv->child_dev) {
+ DRM_DEBUG_KMS("No memory space for child devices\n");
+ return;
+ }
+
+ dev_priv->child_dev_num = count;
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ child_dev_ptr = dev_priv->child_dev + count;
+ count++;
+ memcpy((void *)child_dev_ptr, (void *)p_child,
+ sizeof(*p_child));
+ }
+ return;
+}
+
+
/**
* psb_intel_init_bios - initialize VBIOS settings & find VBT
* @dev: DRM device
@@ -236,38 +432,54 @@ bool psb_intel_init_bios(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev->pdev;
struct vbt_header *vbt = NULL;
- struct bdb_header *bdb;
- u8 __iomem *bios;
+ struct bdb_header *bdb = NULL;
+ u8 __iomem *bios = NULL;
size_t size;
int i;
- bios = pci_map_rom(pdev, &size);
- if (!bios)
- return -1;
+ /* XXX Should this validation be moved to intel_opregion.c? */
+ if (dev_priv->opregion.vbt) {
+ struct vbt_header *vbt = dev_priv->opregion.vbt;
+ if (memcmp(vbt->signature, "$VBT", 4) == 0) {
+ DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
+ vbt->signature);
+ bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
+ } else
+ dev_priv->opregion.vbt = NULL;
+ }
- /* Scour memory looking for the VBT signature */
- for (i = 0; i + 4 < size; i++) {
- if (!memcmp(bios + i, "$VBT", 4)) {
- vbt = (struct vbt_header *)(bios + i);
- break;
+ if (bdb == NULL) {
+ bios = pci_map_rom(pdev, &size);
+ if (!bios)
+ return -1;
+
+ /* Scour memory looking for the VBT signature */
+ for (i = 0; i + 4 < size; i++) {
+ if (!memcmp(bios + i, "$VBT", 4)) {
+ vbt = (struct vbt_header *)(bios + i);
+ break;
+ }
}
- }
- if (!vbt) {
- dev_err(dev->dev, "VBT signature missing\n");
- pci_unmap_rom(pdev, bios);
- return -1;
+ if (!vbt) {
+ dev_err(dev->dev, "VBT signature missing\n");
+ pci_unmap_rom(pdev, bios);
+ return -1;
+ }
+ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
}
- bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
-
- /* Grab useful general definitions */
+ /* Grab useful general dxefinitions */
parse_general_features(dev_priv, bdb);
+ parse_driver_features(dev_priv, bdb);
parse_lfp_panel_data(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
+ parse_sdvo_device_mapping(dev_priv, bdb);
+ parse_device_mapping(dev_priv, bdb);
parse_backlight_data(dev_priv, bdb);
- pci_unmap_rom(pdev, bios);
+ if (bios)
+ pci_unmap_rom(pdev, bios);
return 0;
}
@@ -278,26 +490,8 @@ bool psb_intel_init_bios(struct drm_device *dev)
void psb_intel_destroy_bios(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct drm_display_mode *sdvo_lvds_vbt_mode =
- dev_priv->sdvo_lvds_vbt_mode;
- struct drm_display_mode *lfp_lvds_vbt_mode =
- dev_priv->lfp_lvds_vbt_mode;
- struct bdb_lvds_backlight *lvds_bl =
- dev_priv->lvds_bl;
-
- /*free sdvo panel mode*/
- if (sdvo_lvds_vbt_mode) {
- dev_priv->sdvo_lvds_vbt_mode = NULL;
- kfree(sdvo_lvds_vbt_mode);
- }
- if (lfp_lvds_vbt_mode) {
- dev_priv->lfp_lvds_vbt_mode = NULL;
- kfree(lfp_lvds_vbt_mode);
- }
-
- if (lvds_bl) {
- dev_priv->lvds_bl = NULL;
- kfree(lvds_bl);
- }
+ kfree(dev_priv->sdvo_lvds_vbt_mode);
+ kfree(dev_priv->lfp_lvds_vbt_mode);
+ kfree(dev_priv->lvds_bl);
}
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 70f1bf018183..0a738663eb5a 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -127,9 +127,93 @@ struct bdb_general_features {
/* bits 5 */
u8 int_crt_support:1;
u8 int_tv_support:1;
- u8 rsvd11:6; /* finish byte */
+ u8 int_efp_support:1;
+ u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
+ u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
+ u8 rsvd11:3; /* finish byte */
} __attribute__((packed));
+/* pre-915 */
+#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
+#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
+#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
+#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+
+/* Pre 915 */
+#define DEVICE_TYPE_NONE 0x00
+#define DEVICE_TYPE_CRT 0x01
+#define DEVICE_TYPE_TV 0x09
+#define DEVICE_TYPE_EFP 0x12
+#define DEVICE_TYPE_LFP 0x22
+/* On 915+ */
+#define DEVICE_TYPE_CRT_DPMS 0x6001
+#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
+#define DEVICE_TYPE_TV_COMPOSITE 0x0209
+#define DEVICE_TYPE_TV_MACROVISION 0x0289
+#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
+#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
+#define DEVICE_TYPE_TV_SCART 0x0209
+#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
+#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
+#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
+#define DEVICE_TYPE_EFP_DVI_I 0x6053
+#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
+#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
+#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
+#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
+#define DEVICE_TYPE_LFP_PANELLINK 0x5012
+#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
+#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
+#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
+#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
+
+#define DEVICE_CFG_NONE 0x00
+#define DEVICE_CFG_12BIT_DVOB 0x01
+#define DEVICE_CFG_12BIT_DVOC 0x02
+#define DEVICE_CFG_24BIT_DVOBC 0x09
+#define DEVICE_CFG_24BIT_DVOCB 0x0a
+#define DEVICE_CFG_DUAL_DVOB 0x11
+#define DEVICE_CFG_DUAL_DVOC 0x12
+#define DEVICE_CFG_DUAL_DVOBC 0x13
+#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
+#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
+
+#define DEVICE_WIRE_NONE 0x00
+#define DEVICE_WIRE_DVOB 0x01
+#define DEVICE_WIRE_DVOC 0x02
+#define DEVICE_WIRE_DVOBC 0x03
+#define DEVICE_WIRE_DVOBB 0x05
+#define DEVICE_WIRE_DVOCC 0x06
+#define DEVICE_WIRE_DVOB_MASTER 0x0d
+#define DEVICE_WIRE_DVOC_MASTER 0x0e
+
+#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
+#define DEVICE_PORT_DVOB 0x01
+#define DEVICE_PORT_DVOC 0x02
+
+struct child_device_config {
+ u16 handle;
+ u16 device_type;
+ u8 device_id[10]; /* ascii string */
+ u16 addin_offset;
+ u8 dvo_port; /* See Device_PORT_* above */
+ u8 i2c_pin;
+ u8 slave_addr;
+ u8 ddc_pin;
+ u16 edid_ptr;
+ u8 dvo_cfg; /* See DEVICE_CFG_* above */
+ u8 dvo2_port;
+ u8 i2c2_pin;
+ u8 slave2_addr;
+ u8 ddc2_pin;
+ u8 capabilities;
+ u8 dvo_wiring;/* See DEVICE_WIRE_* above */
+ u8 dvo2_wiring;
+ u16 extended_type;
+ u8 dvo_function;
+} __attribute__((packed));
+
+
struct bdb_general_definitions {
/* DDC GPIO */
u8 crt_ddc_gmbus_pin;
@@ -144,13 +228,18 @@ struct bdb_general_definitions {
u8 boot_display[2];
u8 child_dev_size;
- /* device info */
- u8 tv_or_lvds_info[33];
- u8 dev1[33];
- u8 dev2[33];
- u8 dev3[33];
- u8 dev4[33];
- /* may be another device block here on some platforms */
+ /*
+ * Device info:
+ * If TV is present, it'll be at devices[0].
+ * LVDS will be next, either devices[0] or [1], if present.
+ * On some platforms the number of device is 6. But could be as few as
+ * 4 if both TV and LVDS are missing.
+ * And the device num is related with the size of general definition
+ * block. It is obtained by using the following formula:
+ * number = (block_size - sizeof(bdb_general_definitions))/
+ * sizeof(child_device_config);
+ */
+ struct child_device_config devices[0];
};
struct bdb_lvds_options {
@@ -302,6 +391,45 @@ struct bdb_sdvo_lvds_options {
u8 panel_misc_bits_4;
} __attribute__((packed));
+struct bdb_driver_features {
+ u8 boot_dev_algorithm:1;
+ u8 block_display_switch:1;
+ u8 allow_display_switch:1;
+ u8 hotplug_dvo:1;
+ u8 dual_view_zoom:1;
+ u8 int15h_hook:1;
+ u8 sprite_in_clone:1;
+ u8 primary_lfp_id:1;
+
+ u16 boot_mode_x;
+ u16 boot_mode_y;
+ u8 boot_mode_bpp;
+ u8 boot_mode_refresh;
+
+ u16 enable_lfp_primary:1;
+ u16 selective_mode_pruning:1;
+ u16 dual_frequency:1;
+ u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
+ u16 nt_clone_support:1;
+ u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
+ u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
+ u16 cui_aspect_scaling:1;
+ u16 preserve_aspect_ratio:1;
+ u16 sdvo_device_power_down:1;
+ u16 crt_hotplug:1;
+ u16 lvds_config:2;
+ u16 tv_hotplug:1;
+ u16 hdmi_config:2;
+
+ u8 static_display:1;
+ u8 reserved2:7;
+ u16 legacy_crt_max_x;
+ u16 legacy_crt_max_y;
+ u8 legacy_crt_max_refresh;
+
+ u8 hdmi_termination;
+ u8 custom_vbt_version;
+} __attribute__((packed));
extern bool psb_intel_init_bios(struct drm_device *dev);
extern void psb_intel_destroy_bios(struct drm_device *dev);
@@ -427,4 +555,21 @@ extern void psb_intel_destroy_bios(struct drm_device *dev);
#define SWF14_APM_STANDBY 0x1
#define SWF14_APM_RESTORE 0x0
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_eDP 0x78C6
+
+/* define the DVO port for HDMI output type */
+#define DVO_B 1
+#define DVO_C 2
+#define DVO_D 3
+
+/* define the PORT for DP output type */
+#define PORT_IDPB 7
+#define PORT_IDPC 8
+#define PORT_IDPD 9
+
#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
index af656787db0f..265ad0de44a6 100644
--- a/drivers/gpu/drm/gma500/mdfld_device.c
+++ b/drivers/gpu/drm/gma500/mdfld_device.c
@@ -163,142 +163,30 @@ struct backlight_device *mdfld_get_backlight_device(void)
*
* Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
*/
-static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
+static int mdfld_save_display_registers(struct drm_device *dev, int pipenum)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct medfield_state *regs = &dev_priv->regs.mdfld;
+ struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
+ const struct psb_offset *map = &dev_priv->regmap[pipenum];
int i;
+ u32 *mipi_val;
/* register */
- u32 dpll_reg = MRST_DPLL_A;
- u32 fp_reg = MRST_FPA0;
- u32 pipeconf_reg = PIPEACONF;
- u32 htot_reg = HTOTAL_A;
- u32 hblank_reg = HBLANK_A;
- u32 hsync_reg = HSYNC_A;
- u32 vtot_reg = VTOTAL_A;
- u32 vblank_reg = VBLANK_A;
- u32 vsync_reg = VSYNC_A;
- u32 pipesrc_reg = PIPEASRC;
- u32 dspstride_reg = DSPASTRIDE;
- u32 dsplinoff_reg = DSPALINOFF;
- u32 dsptileoff_reg = DSPATILEOFF;
- u32 dspsize_reg = DSPASIZE;
- u32 dsppos_reg = DSPAPOS;
- u32 dspsurf_reg = DSPASURF;
u32 mipi_reg = MIPI;
- u32 dspcntr_reg = DSPACNTR;
- u32 dspstatus_reg = PIPEASTAT;
- u32 palette_reg = PALETTE_A;
-
- /* pointer to values */
- u32 *dpll_val = &regs->saveDPLL_A;
- u32 *fp_val = &regs->saveFPA0;
- u32 *pipeconf_val = &regs->savePIPEACONF;
- u32 *htot_val = &regs->saveHTOTAL_A;
- u32 *hblank_val = &regs->saveHBLANK_A;
- u32 *hsync_val = &regs->saveHSYNC_A;
- u32 *vtot_val = &regs->saveVTOTAL_A;
- u32 *vblank_val = &regs->saveVBLANK_A;
- u32 *vsync_val = &regs->saveVSYNC_A;
- u32 *pipesrc_val = &regs->savePIPEASRC;
- u32 *dspstride_val = &regs->saveDSPASTRIDE;
- u32 *dsplinoff_val = &regs->saveDSPALINOFF;
- u32 *dsptileoff_val = &regs->saveDSPATILEOFF;
- u32 *dspsize_val = &regs->saveDSPASIZE;
- u32 *dsppos_val = &regs->saveDSPAPOS;
- u32 *dspsurf_val = &regs->saveDSPASURF;
- u32 *mipi_val = &regs->saveMIPI;
- u32 *dspcntr_val = &regs->saveDSPACNTR;
- u32 *dspstatus_val = &regs->saveDSPASTATUS;
- u32 *palette_val = regs->save_palette_a;
-
- switch (pipe) {
+
+ switch (pipenum) {
case 0:
+ mipi_val = &regs->saveMIPI;
break;
case 1:
- /* regester */
- dpll_reg = MDFLD_DPLL_B;
- fp_reg = MDFLD_DPLL_DIV0;
- pipeconf_reg = PIPEBCONF;
- htot_reg = HTOTAL_B;
- hblank_reg = HBLANK_B;
- hsync_reg = HSYNC_B;
- vtot_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- pipesrc_reg = PIPEBSRC;
- dspstride_reg = DSPBSTRIDE;
- dsplinoff_reg = DSPBLINOFF;
- dsptileoff_reg = DSPBTILEOFF;
- dspsize_reg = DSPBSIZE;
- dsppos_reg = DSPBPOS;
- dspsurf_reg = DSPBSURF;
- dspcntr_reg = DSPBCNTR;
- dspstatus_reg = PIPEBSTAT;
- palette_reg = PALETTE_B;
-
- /* values */
- dpll_val = &regs->saveDPLL_B;
- fp_val = &regs->saveFPB0;
- pipeconf_val = &regs->savePIPEBCONF;
- htot_val = &regs->saveHTOTAL_B;
- hblank_val = &regs->saveHBLANK_B;
- hsync_val = &regs->saveHSYNC_B;
- vtot_val = &regs->saveVTOTAL_B;
- vblank_val = &regs->saveVBLANK_B;
- vsync_val = &regs->saveVSYNC_B;
- pipesrc_val = &regs->savePIPEBSRC;
- dspstride_val = &regs->saveDSPBSTRIDE;
- dsplinoff_val = &regs->saveDSPBLINOFF;
- dsptileoff_val = &regs->saveDSPBTILEOFF;
- dspsize_val = &regs->saveDSPBSIZE;
- dsppos_val = &regs->saveDSPBPOS;
- dspsurf_val = &regs->saveDSPBSURF;
- dspcntr_val = &regs->saveDSPBCNTR;
- dspstatus_val = &regs->saveDSPBSTATUS;
- palette_val = regs->save_palette_b;
+ mipi_val = &regs->saveMIPI;
break;
case 2:
/* register */
- pipeconf_reg = PIPECCONF;
- htot_reg = HTOTAL_C;
- hblank_reg = HBLANK_C;
- hsync_reg = HSYNC_C;
- vtot_reg = VTOTAL_C;
- vblank_reg = VBLANK_C;
- vsync_reg = VSYNC_C;
- pipesrc_reg = PIPECSRC;
- dspstride_reg = DSPCSTRIDE;
- dsplinoff_reg = DSPCLINOFF;
- dsptileoff_reg = DSPCTILEOFF;
- dspsize_reg = DSPCSIZE;
- dsppos_reg = DSPCPOS;
- dspsurf_reg = DSPCSURF;
mipi_reg = MIPI_C;
- dspcntr_reg = DSPCCNTR;
- dspstatus_reg = PIPECSTAT;
- palette_reg = PALETTE_C;
-
/* pointer to values */
- pipeconf_val = &regs->savePIPECCONF;
- htot_val = &regs->saveHTOTAL_C;
- hblank_val = &regs->saveHBLANK_C;
- hsync_val = &regs->saveHSYNC_C;
- vtot_val = &regs->saveVTOTAL_C;
- vblank_val = &regs->saveVBLANK_C;
- vsync_val = &regs->saveVSYNC_C;
- pipesrc_val = &regs->savePIPECSRC;
- dspstride_val = &regs->saveDSPCSTRIDE;
- dsplinoff_val = &regs->saveDSPCLINOFF;
- dsptileoff_val = &regs->saveDSPCTILEOFF;
- dspsize_val = &regs->saveDSPCSIZE;
- dsppos_val = &regs->saveDSPCPOS;
- dspsurf_val = &regs->saveDSPCSURF;
mipi_val = &regs->saveMIPI_C;
- dspcntr_val = &regs->saveDSPCCNTR;
- dspstatus_val = &regs->saveDSPCSTATUS;
- palette_val = regs->save_palette_c;
break;
default:
DRM_ERROR("%s, invalid pipe number.\n", __func__);
@@ -306,30 +194,30 @@ static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
}
/* Pipe & plane A info */
- *dpll_val = PSB_RVDC32(dpll_reg);
- *fp_val = PSB_RVDC32(fp_reg);
- *pipeconf_val = PSB_RVDC32(pipeconf_reg);
- *htot_val = PSB_RVDC32(htot_reg);
- *hblank_val = PSB_RVDC32(hblank_reg);
- *hsync_val = PSB_RVDC32(hsync_reg);
- *vtot_val = PSB_RVDC32(vtot_reg);
- *vblank_val = PSB_RVDC32(vblank_reg);
- *vsync_val = PSB_RVDC32(vsync_reg);
- *pipesrc_val = PSB_RVDC32(pipesrc_reg);
- *dspstride_val = PSB_RVDC32(dspstride_reg);
- *dsplinoff_val = PSB_RVDC32(dsplinoff_reg);
- *dsptileoff_val = PSB_RVDC32(dsptileoff_reg);
- *dspsize_val = PSB_RVDC32(dspsize_reg);
- *dsppos_val = PSB_RVDC32(dsppos_reg);
- *dspsurf_val = PSB_RVDC32(dspsurf_reg);
- *dspcntr_val = PSB_RVDC32(dspcntr_reg);
- *dspstatus_val = PSB_RVDC32(dspstatus_reg);
+ pipe->dpll = PSB_RVDC32(map->dpll);
+ pipe->fp0 = PSB_RVDC32(map->fp0);
+ pipe->conf = PSB_RVDC32(map->conf);
+ pipe->htotal = PSB_RVDC32(map->htotal);
+ pipe->hblank = PSB_RVDC32(map->hblank);
+ pipe->hsync = PSB_RVDC32(map->hsync);
+ pipe->vtotal = PSB_RVDC32(map->vtotal);
+ pipe->vblank = PSB_RVDC32(map->vblank);
+ pipe->vsync = PSB_RVDC32(map->vsync);
+ pipe->src = PSB_RVDC32(map->src);
+ pipe->stride = PSB_RVDC32(map->stride);
+ pipe->linoff = PSB_RVDC32(map->linoff);
+ pipe->tileoff = PSB_RVDC32(map->tileoff);
+ pipe->size = PSB_RVDC32(map->size);
+ pipe->pos = PSB_RVDC32(map->pos);
+ pipe->surf = PSB_RVDC32(map->surf);
+ pipe->cntr = PSB_RVDC32(map->cntr);
+ pipe->status = PSB_RVDC32(map->status);
/*save palette (gamma) */
for (i = 0; i < 256; i++)
- palette_val[i] = PSB_RVDC32(palette_reg + (i << 2));
+ pipe->palette[i] = PSB_RVDC32(map->palette + (i << 2));
- if (pipe == 1) {
+ if (pipenum == 1) {
regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
@@ -349,7 +237,7 @@ static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
*
* Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
*/
-static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
+static int mdfld_restore_display_registers(struct drm_device *dev, int pipenum)
{
/* To get panel out of ULPS mode. */
u32 temp = 0;
@@ -357,142 +245,30 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
struct drm_psb_private *dev_priv = dev->dev_private;
struct mdfld_dsi_config *dsi_config = NULL;
struct medfield_state *regs = &dev_priv->regs.mdfld;
- u32 i = 0;
- u32 dpll = 0;
+ struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
+ const struct psb_offset *map = &dev_priv->regmap[pipenum];
+ u32 i;
+ u32 dpll;
u32 timeout = 0;
- /* regester */
- u32 dpll_reg = MRST_DPLL_A;
- u32 fp_reg = MRST_FPA0;
- u32 pipeconf_reg = PIPEACONF;
- u32 htot_reg = HTOTAL_A;
- u32 hblank_reg = HBLANK_A;
- u32 hsync_reg = HSYNC_A;
- u32 vtot_reg = VTOTAL_A;
- u32 vblank_reg = VBLANK_A;
- u32 vsync_reg = VSYNC_A;
- u32 pipesrc_reg = PIPEASRC;
- u32 dspstride_reg = DSPASTRIDE;
- u32 dsplinoff_reg = DSPALINOFF;
- u32 dsptileoff_reg = DSPATILEOFF;
- u32 dspsize_reg = DSPASIZE;
- u32 dsppos_reg = DSPAPOS;
- u32 dspsurf_reg = DSPASURF;
- u32 dspstatus_reg = PIPEASTAT;
+ /* register */
u32 mipi_reg = MIPI;
- u32 dspcntr_reg = DSPACNTR;
- u32 palette_reg = PALETTE_A;
/* values */
- u32 dpll_val = regs->saveDPLL_A & ~DPLL_VCO_ENABLE;
- u32 fp_val = regs->saveFPA0;
- u32 pipeconf_val = regs->savePIPEACONF;
- u32 htot_val = regs->saveHTOTAL_A;
- u32 hblank_val = regs->saveHBLANK_A;
- u32 hsync_val = regs->saveHSYNC_A;
- u32 vtot_val = regs->saveVTOTAL_A;
- u32 vblank_val = regs->saveVBLANK_A;
- u32 vsync_val = regs->saveVSYNC_A;
- u32 pipesrc_val = regs->savePIPEASRC;
- u32 dspstride_val = regs->saveDSPASTRIDE;
- u32 dsplinoff_val = regs->saveDSPALINOFF;
- u32 dsptileoff_val = regs->saveDSPATILEOFF;
- u32 dspsize_val = regs->saveDSPASIZE;
- u32 dsppos_val = regs->saveDSPAPOS;
- u32 dspsurf_val = regs->saveDSPASURF;
- u32 dspstatus_val = regs->saveDSPASTATUS;
+ u32 dpll_val = pipe->dpll;
u32 mipi_val = regs->saveMIPI;
- u32 dspcntr_val = regs->saveDSPACNTR;
- u32 *palette_val = regs->save_palette_a;
- switch (pipe) {
+ switch (pipenum) {
case 0:
+ dpll_val &= ~DPLL_VCO_ENABLE;
dsi_config = dev_priv->dsi_configs[0];
break;
case 1:
- /* regester */
- dpll_reg = MDFLD_DPLL_B;
- fp_reg = MDFLD_DPLL_DIV0;
- pipeconf_reg = PIPEBCONF;
- htot_reg = HTOTAL_B;
- hblank_reg = HBLANK_B;
- hsync_reg = HSYNC_B;
- vtot_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- pipesrc_reg = PIPEBSRC;
- dspstride_reg = DSPBSTRIDE;
- dsplinoff_reg = DSPBLINOFF;
- dsptileoff_reg = DSPBTILEOFF;
- dspsize_reg = DSPBSIZE;
- dsppos_reg = DSPBPOS;
- dspsurf_reg = DSPBSURF;
- dspcntr_reg = DSPBCNTR;
- dspstatus_reg = PIPEBSTAT;
- palette_reg = PALETTE_B;
-
- /* values */
- dpll_val = regs->saveDPLL_B & ~DPLL_VCO_ENABLE;
- fp_val = regs->saveFPB0;
- pipeconf_val = regs->savePIPEBCONF;
- htot_val = regs->saveHTOTAL_B;
- hblank_val = regs->saveHBLANK_B;
- hsync_val = regs->saveHSYNC_B;
- vtot_val = regs->saveVTOTAL_B;
- vblank_val = regs->saveVBLANK_B;
- vsync_val = regs->saveVSYNC_B;
- pipesrc_val = regs->savePIPEBSRC;
- dspstride_val = regs->saveDSPBSTRIDE;
- dsplinoff_val = regs->saveDSPBLINOFF;
- dsptileoff_val = regs->saveDSPBTILEOFF;
- dspsize_val = regs->saveDSPBSIZE;
- dsppos_val = regs->saveDSPBPOS;
- dspsurf_val = regs->saveDSPBSURF;
- dspcntr_val = regs->saveDSPBCNTR;
- dspstatus_val = regs->saveDSPBSTATUS;
- palette_val = regs->save_palette_b;
+ dpll_val &= ~DPLL_VCO_ENABLE;
break;
case 2:
- /* regester */
- pipeconf_reg = PIPECCONF;
- htot_reg = HTOTAL_C;
- hblank_reg = HBLANK_C;
- hsync_reg = HSYNC_C;
- vtot_reg = VTOTAL_C;
- vblank_reg = VBLANK_C;
- vsync_reg = VSYNC_C;
- pipesrc_reg = PIPECSRC;
- dspstride_reg = DSPCSTRIDE;
- dsplinoff_reg = DSPCLINOFF;
- dsptileoff_reg = DSPCTILEOFF;
- dspsize_reg = DSPCSIZE;
- dsppos_reg = DSPCPOS;
- dspsurf_reg = DSPCSURF;
mipi_reg = MIPI_C;
- dspcntr_reg = DSPCCNTR;
- dspstatus_reg = PIPECSTAT;
- palette_reg = PALETTE_C;
-
- /* values */
- pipeconf_val = regs->savePIPECCONF;
- htot_val = regs->saveHTOTAL_C;
- hblank_val = regs->saveHBLANK_C;
- hsync_val = regs->saveHSYNC_C;
- vtot_val = regs->saveVTOTAL_C;
- vblank_val = regs->saveVBLANK_C;
- vsync_val = regs->saveVSYNC_C;
- pipesrc_val = regs->savePIPECSRC;
- dspstride_val = regs->saveDSPCSTRIDE;
- dsplinoff_val = regs->saveDSPCLINOFF;
- dsptileoff_val = regs->saveDSPCTILEOFF;
- dspsize_val = regs->saveDSPCSIZE;
- dsppos_val = regs->saveDSPCPOS;
- dspsurf_val = regs->saveDSPCSURF;
mipi_val = regs->saveMIPI_C;
- dspcntr_val = regs->saveDSPCCNTR;
- dspstatus_val = regs->saveDSPCSTATUS;
- palette_val = regs->save_palette_c;
-
dsi_config = dev_priv->dsi_configs[1];
break;
default:
@@ -503,14 +279,14 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
/*make sure VGA plane is off. it initializes to on after reset!*/
PSB_WVDC32(0x80000000, VGACNTRL);
- if (pipe == 1) {
- PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, dpll_reg);
- PSB_RVDC32(dpll_reg);
+ if (pipenum == 1) {
+ PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, map->dpll);
+ PSB_RVDC32(map->dpll);
- PSB_WVDC32(fp_val, fp_reg);
+ PSB_WVDC32(pipe->fp0, map->fp0);
} else {
- dpll = PSB_RVDC32(dpll_reg);
+ dpll = PSB_RVDC32(map->dpll);
if (!(dpll & DPLL_VCO_ENABLE)) {
@@ -518,23 +294,23 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
before enable the VCO */
if (dpll & MDFLD_PWR_GATE_EN) {
dpll &= ~MDFLD_PWR_GATE_EN;
- PSB_WVDC32(dpll, dpll_reg);
+ PSB_WVDC32(dpll, map->dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
- PSB_WVDC32(fp_val, fp_reg);
- PSB_WVDC32(dpll_val, dpll_reg);
+ PSB_WVDC32(pipe->fp0, map->fp0);
+ PSB_WVDC32(dpll_val, map->dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
dpll_val |= DPLL_VCO_ENABLE;
- PSB_WVDC32(dpll_val, dpll_reg);
- PSB_RVDC32(dpll_reg);
+ PSB_WVDC32(dpll_val, map->dpll);
+ PSB_RVDC32(map->dpll);
/* wait for DSI PLL to lock */
while (timeout < 20000 &&
- !(PSB_RVDC32(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ !(PSB_RVDC32(map->conf) & PIPECONF_DSIPLL_LOCK)) {
udelay(150);
timeout++;
}
@@ -547,28 +323,28 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
}
}
/* Restore mode */
- PSB_WVDC32(htot_val, htot_reg);
- PSB_WVDC32(hblank_val, hblank_reg);
- PSB_WVDC32(hsync_val, hsync_reg);
- PSB_WVDC32(vtot_val, vtot_reg);
- PSB_WVDC32(vblank_val, vblank_reg);
- PSB_WVDC32(vsync_val, vsync_reg);
- PSB_WVDC32(pipesrc_val, pipesrc_reg);
- PSB_WVDC32(dspstatus_val, dspstatus_reg);
+ PSB_WVDC32(pipe->htotal, map->htotal);
+ PSB_WVDC32(pipe->hblank, map->hblank);
+ PSB_WVDC32(pipe->hsync, map->hsync);
+ PSB_WVDC32(pipe->vtotal, map->vtotal);
+ PSB_WVDC32(pipe->vblank, map->vblank);
+ PSB_WVDC32(pipe->vsync, map->vsync);
+ PSB_WVDC32(pipe->src, map->src);
+ PSB_WVDC32(pipe->status, map->status);
/*set up the plane*/
- PSB_WVDC32(dspstride_val, dspstride_reg);
- PSB_WVDC32(dsplinoff_val, dsplinoff_reg);
- PSB_WVDC32(dsptileoff_val, dsptileoff_reg);
- PSB_WVDC32(dspsize_val, dspsize_reg);
- PSB_WVDC32(dsppos_val, dsppos_reg);
- PSB_WVDC32(dspsurf_val, dspsurf_reg);
-
- if (pipe == 1) {
+ PSB_WVDC32(pipe->stride, map->stride);
+ PSB_WVDC32(pipe->linoff, map->linoff);
+ PSB_WVDC32(pipe->tileoff, map->tileoff);
+ PSB_WVDC32(pipe->size, map->size);
+ PSB_WVDC32(pipe->pos, map->pos);
+ PSB_WVDC32(pipe->surf, map->surf);
+
+ if (pipenum == 1) {
/* restore palette (gamma) */
/*DRM_UDELAY(50000); */
for (i = 0; i < 256; i++)
- PSB_WVDC32(palette_val[i], palette_reg + (i << 2));
+ PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL);
PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
@@ -578,7 +354,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
/*TODO: resume pipe*/
/*enable the plane*/
- PSB_WVDC32(dspcntr_val & ~DISPLAY_PLANE_ENABLE, dspcntr_reg);
+ PSB_WVDC32(pipe->cntr & ~DISPLAY_PLANE_ENABLE, map->cntr);
return 0;
}
@@ -588,7 +364,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
/*setup MIPI adapter + MIPI IP registers*/
if (dsi_config)
- mdfld_dsi_controller_init(dsi_config, pipe);
+ mdfld_dsi_controller_init(dsi_config, pipenum);
if (in_atomic() || in_interrupt())
mdelay(20);
@@ -596,7 +372,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
msleep(20);
/*enable the plane*/
- PSB_WVDC32(dspcntr_val, dspcntr_reg);
+ PSB_WVDC32(pipe->cntr, map->cntr);
if (in_atomic() || in_interrupt())
mdelay(20);
@@ -625,12 +401,12 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
mdelay(1);
/*enable the pipe*/
- PSB_WVDC32(pipeconf_val, pipeconf_reg);
+ PSB_WVDC32(pipe->conf, map->conf);
/* restore palette (gamma) */
/*DRM_UDELAY(50000); */
for (i = 0; i < 256; i++)
- PSB_WVDC32(palette_val[i], palette_reg + (i << 2));
+ PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
return 0;
}
@@ -667,14 +443,98 @@ static int mdfld_power_up(struct drm_device *dev)
return 0;
}
+/* Medfield */
+static const struct psb_offset mdfld_regmap[3] = {
+ {
+ .fp0 = MRST_FPA0,
+ .fp1 = MRST_FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = MRST_DPLL_A,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .surf = DSPASURF,
+ .addr = MRST_DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = MDFLD_DPLL_DIV0,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = MDFLD_DPLL_B,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .surf = DSPBSURF,
+ .addr = MRST_DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ },
+ {
+ .fp0 = MRST_FPA0, /* This is what the old code did ?? */
+ .cntr = DSPCCNTR,
+ .conf = PIPECCONF,
+ .src = PIPECSRC,
+ /* No DPLL_C */
+ .dpll = MRST_DPLL_A,
+ .htotal = HTOTAL_C,
+ .hblank = HBLANK_C,
+ .hsync = HSYNC_C,
+ .vtotal = VTOTAL_C,
+ .vblank = VBLANK_C,
+ .vsync = VSYNC_C,
+ .stride = DSPCSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPCPOS,
+ .surf = DSPCSURF,
+ .addr = MDFLD_DSPCBASE,
+ .status = PIPECSTAT,
+ .linoff = DSPCLINOFF,
+ .tileoff = DSPCTILEOFF,
+ .palette = PALETTE_C,
+ },
+};
+
+static int mdfld_chip_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->regmap = mdfld_regmap;
+ return mid_chip_setup(dev);
+}
+
const struct psb_ops mdfld_chip_ops = {
.name = "mdfld",
.accel_2d = 0,
.pipes = 3,
.crtcs = 3,
+ .lvds_mask = (1 << 1),
+ .hdmi_mask = (1 << 1),
+ .cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
- .chip_setup = mid_chip_setup,
+ .chip_setup = mdfld_chip_setup,
.crtc_helper = &mdfld_helper_funcs,
.crtc_funcs = &psb_intel_crtc_funcs,
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index d52358b744a0..b34ff097b979 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -869,7 +869,6 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
mdfld_set_pipe_timing(dsi_config, pipe);
REG_WRITE(DSPABASE, 0x00);
- REG_WRITE(DSPASTRIDE, (mode->hdisplay * 4));
REG_WRITE(DSPASIZE,
((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index baa0e14165e0..489ffd2c66e5 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -605,6 +605,8 @@ int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
struct mdfld_dsi_config *dsi_config =
mdfld_dsi_get_config(dsi_connector);
struct drm_device *dev = dsi_config->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 mipi_val = 0;
if (!dsi_connector) {
@@ -632,21 +634,13 @@ int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
/*init regs*/
- if (pipe == 0) {
- pkg_sender->dpll_reg = MRST_DPLL_A;
- pkg_sender->dspcntr_reg = DSPACNTR;
- pkg_sender->pipeconf_reg = PIPEACONF;
- pkg_sender->dsplinoff_reg = DSPALINOFF;
- pkg_sender->dspsurf_reg = DSPASURF;
- pkg_sender->pipestat_reg = PIPEASTAT;
- } else if (pipe == 2) {
- pkg_sender->dpll_reg = MRST_DPLL_A;
- pkg_sender->dspcntr_reg = DSPCCNTR;
- pkg_sender->pipeconf_reg = PIPECCONF;
- pkg_sender->dsplinoff_reg = DSPCLINOFF;
- pkg_sender->dspsurf_reg = DSPCSURF;
- pkg_sender->pipestat_reg = PIPECSTAT;
- }
+ /* FIXME: should just copy the regmap ptr ? */
+ pkg_sender->dpll_reg = map->dpll;
+ pkg_sender->dspcntr_reg = map->cntr;
+ pkg_sender->pipeconf_reg = map->conf;
+ pkg_sender->dsplinoff_reg = map->linoff;
+ pkg_sender->dspsurf_reg = map->surf;
+ pkg_sender->pipestat_reg = map->status;
pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe);
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index a35a2921bdf7..3f3cd619c79f 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -50,17 +50,14 @@ struct mrst_clock_t {
void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int count, temp;
- u32 pipeconf_reg = PIPEACONF;
switch (pipe) {
case 0:
- break;
case 1:
- pipeconf_reg = PIPEBCONF;
- break;
case 2:
- pipeconf_reg = PIPECCONF;
break;
default:
DRM_ERROR("Illegal Pipe Number.\n");
@@ -73,7 +70,7 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
/* Wait for for the pipe disable to take effect. */
for (count = 0; count < COUNT_MAX; count++) {
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_PIPE_STATE) == 0)
break;
}
@@ -81,17 +78,14 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int count, temp;
- u32 pipeconf_reg = PIPEACONF;
switch (pipe) {
case 0:
- break;
case 1:
- pipeconf_reg = PIPEBCONF;
- break;
case 2:
- pipeconf_reg = PIPECCONF;
break;
default:
DRM_ERROR("Illegal Pipe Number.\n");
@@ -104,7 +98,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
/* Wait for for the pipe enable to take effect. */
for (count = 0; count < COUNT_MAX; count++) {
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_PIPE_STATE) == 1)
break;
}
@@ -189,15 +183,12 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_i915_master_private *master_priv; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
- int dsplinoff = DSPALINOFF;
- int dspsurf = DSPASURF;
- int dspstride = DSPASTRIDE;
- int dspcntr_reg = DSPACNTR;
u32 dspcntr;
int ret;
@@ -215,23 +206,7 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (ret)
return ret;
- switch (pipe) {
- case 0:
- dsplinoff = DSPALINOFF;
- break;
- case 1:
- dsplinoff = DSPBLINOFF;
- dspsurf = DSPBSURF;
- dspstride = DSPBSTRIDE;
- dspcntr_reg = DSPBCNTR;
- break;
- case 2:
- dsplinoff = DSPCLINOFF;
- dspsurf = DSPCSURF;
- dspstride = DSPCSTRIDE;
- dspcntr_reg = DSPCCNTR;
- break;
- default:
+ if (pipe > 2) {
DRM_ERROR("Illegal Pipe Number.\n");
return -EINVAL;
}
@@ -242,8 +217,8 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
start = psbfb->gtt->offset;
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitches[0]);
- dspcntr = REG_READ(dspcntr_reg);
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
+ dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
@@ -261,14 +236,14 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
}
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
start, offset, x, y);
- REG_WRITE(dsplinoff, offset);
- REG_READ(dsplinoff);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
+ REG_WRITE(map->linoff, offset);
+ REG_READ(map->linoff);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
gma_power_end(dev);
@@ -281,78 +256,56 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
*/
void mdfld_disable_crtc(struct drm_device *dev, int pipe)
{
- int dpll_reg = MRST_DPLL_A;
- int dspcntr_reg = DSPACNTR;
- int dspbase_reg = MRST_DSPABASE;
- int pipeconf_reg = PIPEACONF;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
dev_dbg(dev->dev, "pipe = %d\n", pipe);
- switch (pipe) {
- case 0:
- break;
- case 1:
- dpll_reg = MDFLD_DPLL_B;
- dspcntr_reg = DSPBCNTR;
- dspbase_reg = DSPBSURF;
- pipeconf_reg = PIPEBCONF;
- break;
- case 2:
- dpll_reg = MRST_DPLL_A;
- dspcntr_reg = DSPCCNTR;
- dspbase_reg = MDFLD_DSPCBASE;
- pipeconf_reg = PIPECCONF;
- break;
- default:
- DRM_ERROR("Illegal Pipe Number.\n");
- return;
- }
-
if (pipe != 1)
mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe),
HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
/* FIXME_JLIU7 MDFLD_PO revisit */
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
temp &= ~PIPEACONF_ENABLE;
temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
- REG_WRITE(pipeconf_reg, temp);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp);
+ REG_READ(map->conf);
/* Wait for for the pipe disable to take effect. */
mdfldWaitForPipeDisable(dev, pipe);
}
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if (temp & DPLL_VCO_ENABLE) {
if ((pipe != 1 &&
!((REG_READ(PIPEACONF) | REG_READ(PIPECCONF))
& PIPEACONF_ENABLE)) || pipe == 1) {
temp &= ~(DPLL_VCO_ENABLE);
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to turn off. */
/* FIXME_MDFLD PO may need more delay */
udelay(500);
if (!(temp & MDFLD_PWR_GATE_EN)) {
/* gating power of DPLL */
- REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
+ REG_WRITE(map->dpll, temp | MDFLD_PWR_GATE_EN);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(5000);
}
@@ -373,41 +326,15 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = MRST_DPLL_A;
- int dspcntr_reg = DSPACNTR;
- int dspbase_reg = MRST_DSPABASE;
- int pipeconf_reg = PIPEACONF;
- u32 pipestat_reg = PIPEASTAT;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 pipeconf = dev_priv->pipeconf[pipe];
u32 temp;
int timeout = 0;
dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe);
-/* FIXME_JLIU7 MDFLD_PO replaced w/ the following function */
-/* mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled) */
-
- switch (pipe) {
- case 0:
- break;
- case 1:
- dpll_reg = DPLL_B;
- dspcntr_reg = DSPBCNTR;
- dspbase_reg = MRST_DSPBBASE;
- pipeconf_reg = PIPEBCONF;
- dpll_reg = MDFLD_DPLL_B;
- break;
- case 2:
- dpll_reg = MRST_DPLL_A;
- dspcntr_reg = DSPCCNTR;
- dspbase_reg = MDFLD_DSPCBASE;
- pipeconf_reg = PIPECCONF;
- pipestat_reg = PIPECSTAT;
- break;
- default:
- DRM_ERROR("Illegal Pipe Number.\n");
- return;
- }
+ /* Note: Old code uses pipe a stat for pipe b but that appears
+ to be a bug */
if (!gma_power_begin(dev, true))
return;
@@ -420,25 +347,25 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable the DPLL */
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
/* When ungating power of DPLL, needs to wait 0.5us
before enable the VCO */
if (temp & MDFLD_PWR_GATE_EN) {
temp &= ~MDFLD_PWR_GATE_EN;
- REG_WRITE(dpll_reg, temp);
+ REG_WRITE(map->dpll, temp);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/**
* wait for DSI PLL to lock
@@ -446,25 +373,25 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
* since both MIPI pipes share the same PLL.
*/
while ((pipe != 2) && (timeout < 20000) &&
- !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
udelay(150);
timeout++;
}
}
/* Enable the plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
}
/* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0) {
- REG_WRITE(pipeconf_reg, pipeconf);
+ REG_WRITE(map->conf, pipeconf);
/* Wait for for the pipe enable to take effect. */
mdfldWaitForPipeEnable(dev, pipe);
@@ -473,39 +400,39 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
/*workaround for sighting 3741701 Random X blank display*/
/*perform w/a in video mode only on pipe A or C*/
if (pipe == 0 || pipe == 2) {
- REG_WRITE(pipestat_reg, REG_READ(pipestat_reg));
+ REG_WRITE(map->status, REG_READ(map->status));
msleep(100);
- if (PIPE_VBLANK_STATUS & REG_READ(pipestat_reg))
+ if (PIPE_VBLANK_STATUS & REG_READ(map->status))
dev_dbg(dev->dev, "OK");
else {
dev_dbg(dev->dev, "STUCK!!!!");
/*shutdown controller*/
- temp = REG_READ(dspcntr_reg);
- REG_WRITE(dspcntr_reg,
+ temp = REG_READ(map->cntr);
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
/*mdfld_dsi_dpi_shut_down(dev, pipe);*/
REG_WRITE(0xb048, 1);
msleep(100);
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
temp &= ~PIPEACONF_ENABLE;
- REG_WRITE(pipeconf_reg, temp);
+ REG_WRITE(map->conf, temp);
msleep(100); /*wait for pipe disable*/
REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0);
msleep(100);
REG_WRITE(0xb004, REG_READ(0xb004));
/* try to bring the controller back up again*/
REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1);
- temp = REG_READ(dspcntr_reg);
- REG_WRITE(dspcntr_reg,
+ temp = REG_READ(map->cntr);
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
/*mdfld_dsi_dpi_turn_on(dev, pipe);*/
REG_WRITE(0xb048, 2);
msleep(100);
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
temp |= PIPEACONF_ENABLE;
- REG_WRITE(pipeconf_reg, temp);
+ REG_WRITE(map->conf, temp);
}
}
@@ -529,35 +456,35 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
temp &= ~PIPEACONF_ENABLE;
temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
- REG_WRITE(pipeconf_reg, temp);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp);
+ REG_READ(map->conf);
/* Wait for for the pipe disable to take effect. */
mdfldWaitForPipeDisable(dev, pipe);
}
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if (temp & DPLL_VCO_ENABLE) {
if ((pipe != 1 && !((REG_READ(PIPEACONF)
| REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
|| pipe == 1) {
temp &= ~(DPLL_VCO_ENABLE);
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to turn off. */
/* FIXME_MDFLD PO may need more delay */
udelay(500);
@@ -764,21 +691,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = psb_intel_crtc->pipe;
- int fp_reg = MRST_FPA0;
- int dpll_reg = MRST_DPLL_A;
- int dspcntr_reg = DSPACNTR;
- int pipeconf_reg = PIPEACONF;
- int htot_reg = HTOTAL_A;
- int hblank_reg = HBLANK_A;
- int hsync_reg = HSYNC_A;
- int vtot_reg = VTOTAL_A;
- int vblank_reg = VBLANK_A;
- int vsync_reg = VSYNC_A;
- int dspsize_reg = DSPASIZE;
- int dsppos_reg = DSPAPOS;
- int pipesrc_reg = PIPEASRC;
- u32 *pipeconf = &dev_priv->pipeconf[pipe];
- u32 *dspcntr = &dev_priv->dspcntr[pipe];
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
clk_tmp = 0;
@@ -806,45 +719,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
}
#endif
- switch (pipe) {
- case 0:
- break;
- case 1:
- fp_reg = FPB0;
- dpll_reg = DPLL_B;
- dspcntr_reg = DSPBCNTR;
- pipeconf_reg = PIPEBCONF;
- htot_reg = HTOTAL_B;
- hblank_reg = HBLANK_B;
- hsync_reg = HSYNC_B;
- vtot_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- dspsize_reg = DSPBSIZE;
- dsppos_reg = DSPBPOS;
- pipesrc_reg = PIPEBSRC;
- fp_reg = MDFLD_DPLL_DIV0;
- dpll_reg = MDFLD_DPLL_B;
- break;
- case 2:
- dpll_reg = MRST_DPLL_A;
- dspcntr_reg = DSPCCNTR;
- pipeconf_reg = PIPECCONF;
- htot_reg = HTOTAL_C;
- hblank_reg = HBLANK_C;
- hsync_reg = HSYNC_C;
- vtot_reg = VTOTAL_C;
- vblank_reg = VBLANK_C;
- vsync_reg = VSYNC_C;
- dspsize_reg = DSPCSIZE;
- dsppos_reg = DSPCPOS;
- pipesrc_reg = PIPECSRC;
- break;
- default:
- DRM_ERROR("Illegal Pipe Number.\n");
- return 0;
- }
-
ret = check_fb(crtc->fb);
if (ret)
return ret;
@@ -929,21 +803,21 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
* contained within the displayable area of the screen image
* (frame buffer).
*/
- REG_WRITE(dspsize_reg, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
+ REG_WRITE(map->size, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
| (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
/* Set the CRTC with encoder mode. */
- REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16)
+ REG_WRITE(map->src, ((mode->crtc_hdisplay - 1) << 16)
| (mode->crtc_vdisplay - 1));
} else {
- REG_WRITE(dspsize_reg,
+ REG_WRITE(map->size,
((mode->crtc_vdisplay - 1) << 16) |
(mode->crtc_hdisplay - 1));
- REG_WRITE(pipesrc_reg,
+ REG_WRITE(map->src,
((mode->crtc_hdisplay - 1) << 16) |
(mode->crtc_vdisplay - 1));
}
- REG_WRITE(dsppos_reg, 0);
+ REG_WRITE(map->pos, 0);
if (psb_intel_encoder)
drm_connector_property_get_value(connector,
@@ -961,34 +835,34 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
offsetY = (adjusted_mode->crtc_vdisplay -
mode->crtc_vdisplay) / 2;
- REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start -
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start -
offsetX - 1) |
((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start -
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start -
offsetX - 1) |
((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start -
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start -
offsetY - 1) |
((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start -
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start -
offsetY - 1) |
((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
} else {
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
}
@@ -1000,12 +874,12 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
}
/* setup pipeconf */
- *pipeconf = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
+ dev_priv->pipeconf[pipe] = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
/* Set up the display plane register */
- *dspcntr = REG_READ(dspcntr_reg);
- *dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS;
- *dspcntr |= DISPLAY_PLANE_ENABLE;
+ dev_priv->dspcntr[pipe] = REG_READ(map->cntr);
+ dev_priv->dspcntr[pipe] |= pipe << DISPPLANE_SEL_PIPE_POS;
+ dev_priv->dspcntr[pipe] |= DISPLAY_PLANE_ENABLE;
if (is_mipi2)
goto mrst_crtc_mode_set_exit;
@@ -1070,21 +944,21 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
clock.p1, m_conv);
}
- dpll = REG_READ(dpll_reg);
+ dpll = REG_READ(map->dpll);
if (dpll & DPLL_VCO_ENABLE) {
dpll &= ~DPLL_VCO_ENABLE;
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
/* reset M1, N1 & P1 */
- REG_WRITE(fp_reg, 0);
+ REG_WRITE(map->fp0, 0);
dpll &= ~MDFLD_P1_MASK;
- REG_WRITE(dpll_reg, dpll);
+ REG_WRITE(map->dpll, dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
@@ -1093,7 +967,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
* enable the VCO */
if (dpll & MDFLD_PWR_GATE_EN) {
dpll &= ~MDFLD_PWR_GATE_EN;
- REG_WRITE(dpll_reg, dpll);
+ REG_WRITE(map->dpll, dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
@@ -1134,18 +1008,18 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
fp = 0x000000c1;
}
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
dpll |= DPLL_VCO_ENABLE;
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* wait for DSI PLL to lock */
while (timeout < 20000 &&
- !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
udelay(150);
timeout++;
}
@@ -1155,11 +1029,11 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi);
- REG_WRITE(pipeconf_reg, *pipeconf);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, dev_priv->pipeconf[pipe]);
+ REG_READ(map->conf);
/* Wait for for the pipe enable to take effect. */
- REG_WRITE(dspcntr_reg, *dspcntr);
+ REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
psb_intel_wait_for_vblank(dev);
mrst_crtc_mode_set_exit:
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 5eee9ad80da4..b2a790bd9899 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -118,139 +118,214 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
dev_priv->platform_rev_id);
}
+struct vbt_header {
+ u32 signature;
+ u8 revision;
+} __packed;
+
+/* The same for r0 and r1 */
+struct vbt_r0 {
+ struct vbt_header vbt_header;
+ u8 size;
+ u8 checksum;
+} __packed;
+
+struct vbt_r10 {
+ struct vbt_header vbt_header;
+ u8 checksum;
+ u16 size;
+ u8 panel_count;
+ u8 primary_panel_idx;
+ u8 secondary_panel_idx;
+ u8 __reserved[5];
+} __packed;
+
+static int read_vbt_r0(u32 addr, struct vbt_r0 *vbt)
+{
+ void __iomem *vbt_virtual;
+
+ vbt_virtual = ioremap(addr, sizeof(*vbt));
+ if (vbt_virtual == NULL)
+ return -1;
+
+ memcpy_fromio(vbt, vbt_virtual, sizeof(*vbt));
+ iounmap(vbt_virtual);
+
+ return 0;
+}
+
+static int read_vbt_r10(u32 addr, struct vbt_r10 *vbt)
+{
+ void __iomem *vbt_virtual;
+
+ vbt_virtual = ioremap(addr, sizeof(*vbt));
+ if (!vbt_virtual)
+ return -1;
+
+ memcpy_fromio(vbt, vbt_virtual, sizeof(*vbt));
+ iounmap(vbt_virtual);
+
+ return 0;
+}
+
+static int mid_get_vbt_data_r0(struct drm_psb_private *dev_priv, u32 addr)
+{
+ struct vbt_r0 vbt;
+ void __iomem *gct_virtual;
+ struct gct_r0 gct;
+ u8 bpi;
+
+ if (read_vbt_r0(addr, &vbt))
+ return -1;
+
+ gct_virtual = ioremap(addr + sizeof(vbt), vbt.size - sizeof(vbt));
+ if (!gct_virtual)
+ return -1;
+ memcpy_fromio(&gct, gct_virtual, sizeof(gct));
+ iounmap(gct_virtual);
+
+ bpi = gct.PD.BootPanelIndex;
+ dev_priv->gct_data.bpi = bpi;
+ dev_priv->gct_data.pt = gct.PD.PanelType;
+ dev_priv->gct_data.DTD = gct.panel[bpi].DTD;
+ dev_priv->gct_data.Panel_Port_Control =
+ gct.panel[bpi].Panel_Port_Control;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ gct.panel[bpi].Panel_MIPI_Display_Descriptor;
+
+ return 0;
+}
+
+static int mid_get_vbt_data_r1(struct drm_psb_private *dev_priv, u32 addr)
+{
+ struct vbt_r0 vbt;
+ void __iomem *gct_virtual;
+ struct gct_r1 gct;
+ u8 bpi;
+
+ if (read_vbt_r0(addr, &vbt))
+ return -1;
+
+ gct_virtual = ioremap(addr + sizeof(vbt), vbt.size - sizeof(vbt));
+ if (!gct_virtual)
+ return -1;
+ memcpy_fromio(&gct, gct_virtual, sizeof(gct));
+ iounmap(gct_virtual);
+
+ bpi = gct.PD.BootPanelIndex;
+ dev_priv->gct_data.bpi = bpi;
+ dev_priv->gct_data.pt = gct.PD.PanelType;
+ dev_priv->gct_data.DTD = gct.panel[bpi].DTD;
+ dev_priv->gct_data.Panel_Port_Control =
+ gct.panel[bpi].Panel_Port_Control;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ gct.panel[bpi].Panel_MIPI_Display_Descriptor;
+
+ return 0;
+}
+
+static int mid_get_vbt_data_r10(struct drm_psb_private *dev_priv, u32 addr)
+{
+ struct vbt_r10 vbt;
+ void __iomem *gct_virtual;
+ struct gct_r10 *gct;
+ struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
+ struct gct_r10_timing_info *ti;
+ int ret = -1;
+
+ if (read_vbt_r10(addr, &vbt))
+ return -1;
+
+ gct = kmalloc(sizeof(*gct) * vbt.panel_count, GFP_KERNEL);
+ if (!gct)
+ return -1;
+
+ gct_virtual = ioremap(addr + sizeof(vbt),
+ sizeof(*gct) * vbt.panel_count);
+ if (!gct_virtual)
+ goto out;
+ memcpy_fromio(gct, gct_virtual, sizeof(*gct));
+ iounmap(gct_virtual);
+
+ dev_priv->gct_data.bpi = vbt.primary_panel_idx;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ gct[vbt.primary_panel_idx].Panel_MIPI_Display_Descriptor;
+
+ ti = &gct[vbt.primary_panel_idx].DTD;
+ dp_ti->pixel_clock = ti->pixel_clock;
+ dp_ti->hactive_hi = ti->hactive_hi;
+ dp_ti->hactive_lo = ti->hactive_lo;
+ dp_ti->hblank_hi = ti->hblank_hi;
+ dp_ti->hblank_lo = ti->hblank_lo;
+ dp_ti->hsync_offset_hi = ti->hsync_offset_hi;
+ dp_ti->hsync_offset_lo = ti->hsync_offset_lo;
+ dp_ti->hsync_pulse_width_hi = ti->hsync_pulse_width_hi;
+ dp_ti->hsync_pulse_width_lo = ti->hsync_pulse_width_lo;
+ dp_ti->vactive_hi = ti->vactive_hi;
+ dp_ti->vactive_lo = ti->vactive_lo;
+ dp_ti->vblank_hi = ti->vblank_hi;
+ dp_ti->vblank_lo = ti->vblank_lo;
+ dp_ti->vsync_offset_hi = ti->vsync_offset_hi;
+ dp_ti->vsync_offset_lo = ti->vsync_offset_lo;
+ dp_ti->vsync_pulse_width_hi = ti->vsync_pulse_width_hi;
+ dp_ti->vsync_pulse_width_lo = ti->vsync_pulse_width_lo;
+
+ ret = 0;
+out:
+ kfree(gct);
+ return ret;
+}
+
static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
u32 addr;
- u16 new_size;
- u8 *vbt_virtual;
- u8 bpi;
- u8 number_desc = 0;
- struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
- struct gct_r10_timing_info ti;
- void *pGCT;
+ u8 __iomem *vbt_virtual;
+ struct vbt_header vbt_header;
struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+ int ret = -1;
- /* Get the address of the platform config vbt, B0:D2:F0;0xFC */
+ /* Get the address of the platform config vbt */
pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
pci_dev_put(pci_gfx_root);
dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
- /* check for platform config address == 0. */
- /* this means fw doesn't support vbt */
-
- if (addr == 0) {
- vbt->size = 0;
- return;
- }
+ if (!addr)
+ goto out;
/* get the virtual address of the vbt */
- vbt_virtual = ioremap(addr, sizeof(*vbt));
- if (vbt_virtual == NULL) {
- vbt->size = 0;
- return;
- }
+ vbt_virtual = ioremap(addr, sizeof(vbt_header));
+ if (!vbt_virtual)
+ goto out;
- memcpy(vbt, vbt_virtual, sizeof(*vbt));
- iounmap(vbt_virtual); /* Free virtual address space */
+ memcpy_fromio(&vbt_header, vbt_virtual, sizeof(vbt_header));
+ iounmap(vbt_virtual);
- /* No matching signature don't process the data */
- if (memcmp(vbt->signature, "$GCT", 4)) {
- vbt->size = 0;
- return;
- }
+ if (memcmp(&vbt_header.signature, "$GCT", 4))
+ goto out;
+
+ dev_dbg(dev->dev, "GCT revision is %02x\n", vbt_header.revision);
- dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision);
-
- switch (vbt->revision) {
- case 0:
- vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
- vbt->size - sizeof(*vbt) + 4);
- pGCT = vbt->oaktrail_gct;
- bpi = ((struct oaktrail_gct_v1 *)pGCT)->PD.BootPanelIndex;
- dev_priv->gct_data.bpi = bpi;
- dev_priv->gct_data.pt =
- ((struct oaktrail_gct_v1 *)pGCT)->PD.PanelType;
- memcpy(&dev_priv->gct_data.DTD,
- &((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].DTD,
- sizeof(struct oaktrail_timing_info));
- dev_priv->gct_data.Panel_Port_Control =
- ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
- dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
- ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+ switch (vbt_header.revision) {
+ case 0x00:
+ ret = mid_get_vbt_data_r0(dev_priv, addr);
break;
- case 1:
- vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
- vbt->size - sizeof(*vbt) + 4);
- pGCT = vbt->oaktrail_gct;
- bpi = ((struct oaktrail_gct_v2 *)pGCT)->PD.BootPanelIndex;
- dev_priv->gct_data.bpi = bpi;
- dev_priv->gct_data.pt =
- ((struct oaktrail_gct_v2 *)pGCT)->PD.PanelType;
- memcpy(&dev_priv->gct_data.DTD,
- &((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].DTD,
- sizeof(struct oaktrail_timing_info));
- dev_priv->gct_data.Panel_Port_Control =
- ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
- dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
- ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+ case 0x01:
+ ret = mid_get_vbt_data_r1(dev_priv, addr);
break;
case 0x10:
- /*header definition changed from rev 01 (v2) to rev 10h. */
- /*so, some values have changed location*/
- new_size = vbt->checksum; /*checksum contains lo size byte*/
- /*LSB of oaktrail_gct contains hi size byte*/
- new_size |= ((0xff & (unsigned int)(long)vbt->oaktrail_gct)) << 8;
-
- vbt->checksum = vbt->size; /*size contains the checksum*/
- if (new_size > 0xff)
- vbt->size = 0xff; /*restrict size to 255*/
- else
- vbt->size = new_size;
-
- /* number of descriptors defined in the GCT */
- number_desc = ((0xff00 & (unsigned int)(long)vbt->oaktrail_gct)) >> 8;
- bpi = ((0xff0000 & (unsigned int)(long)vbt->oaktrail_gct)) >> 16;
- vbt->oaktrail_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
- GCT_R10_DISPLAY_DESC_SIZE * number_desc);
- pGCT = vbt->oaktrail_gct;
- pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
- dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
-
- /*copy the GCT display timings into a temp structure*/
- memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));
-
- /*now copy the temp struct into the dev_priv->gct_data*/
- dp_ti->pixel_clock = ti.pixel_clock;
- dp_ti->hactive_hi = ti.hactive_hi;
- dp_ti->hactive_lo = ti.hactive_lo;
- dp_ti->hblank_hi = ti.hblank_hi;
- dp_ti->hblank_lo = ti.hblank_lo;
- dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
- dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
- dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
- dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
- dp_ti->vactive_hi = ti.vactive_hi;
- dp_ti->vactive_lo = ti.vactive_lo;
- dp_ti->vblank_hi = ti.vblank_hi;
- dp_ti->vblank_lo = ti.vblank_lo;
- dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
- dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
- dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
- dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;
-
- /* Move the MIPI_Display_Descriptor data from GCT to dev priv */
- dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
- *((u8 *)pGCT + 0x0d);
- dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
- (*((u8 *)pGCT + 0x0e)) << 8;
+ ret = mid_get_vbt_data_r10(dev_priv, addr);
break;
default:
dev_err(dev->dev, "Unknown revision of GCT!\n");
- vbt->size = 0;
}
+
+out:
+ if (ret)
+ dev_err(dev->dev, "Unable to read GCT!");
+ else
+ dev_priv->has_gct = true;
}
int mid_chip_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
index 2da1f368f14e..f2f9f38a5362 100644
--- a/drivers/gpu/drm/gma500/oaktrail.h
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -19,14 +19,6 @@
/* MID device specific descriptors */
-struct oaktrail_vbt {
- s8 signature[4]; /*4 bytes,"$GCT" */
- u8 revision;
- u8 size;
- u8 checksum;
- void *oaktrail_gct;
-} __packed;
-
struct oaktrail_timing_info {
u16 pixel_clock;
u8 hactive_lo;
@@ -161,7 +153,7 @@ union oaktrail_panel_rx {
u16 panel_receiver;
} __packed;
-struct oaktrail_gct_v1 {
+struct gct_r0 {
union { /*8 bits,Defined as follows: */
struct {
u8 PanelType:4; /*4 bits, Bit field for panels*/
@@ -178,7 +170,7 @@ struct oaktrail_gct_v1 {
union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
} __packed;
-struct oaktrail_gct_v2 {
+struct gct_r1 {
union { /*8 bits,Defined as follows: */
struct {
u8 PanelType:4; /*4 bits, Bit field for panels*/
@@ -195,6 +187,16 @@ struct oaktrail_gct_v2 {
union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
} __packed;
+struct gct_r10 {
+ struct gct_r10_timing_info DTD;
+ u16 Panel_MIPI_Display_Descriptor;
+ u16 Panel_MIPI_Receiver_Descriptor;
+ u16 Panel_Backlight_Inverter_Descriptor;
+ u8 Panel_Initial_Brightness;
+ u32 MIPI_Ctlr_Init_ptr;
+ u32 MIPI_Panel_Init_ptr;
+} __packed;
+
struct oaktrail_gct_data {
u8 bpi; /* boot panel index, number of panel used during boot */
u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
@@ -213,9 +215,6 @@ struct oaktrail_gct_data {
#define MODE_SETTING_IN_DSR 0x4
#define MODE_SETTING_ENCODER_DONE 0x8
-#define GCT_R10_HEADER_SIZE 16
-#define GCT_R10_DISPLAY_DESC_SIZE 28
-
/*
* Moorestown HDMI interfaces
*/
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index a39b0d0d680f..f821c835ca90 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -162,12 +162,10 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
if (!gma_power_begin(dev, true))
@@ -181,32 +179,32 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable the DPLL */
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+ REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
/* Enable the plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
}
psb_intel_crtc_load_lut(crtc);
@@ -223,28 +221,28 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Disable the VGA plane that we never use */
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+ REG_READ(map->conf);
}
/* Wait for for the pipe disable to take effect. */
psb_intel_wait_for_vblank(dev);
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
}
/* Wait for the clocks to turn off. */
@@ -292,17 +290,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = psb_intel_crtc->pipe;
- int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
- int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
struct oaktrail_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
@@ -350,7 +338,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
if (oaktrail_panel_fitter_pipe(dev) == pipe)
REG_WRITE(PFIT_CONTROL, 0);
- REG_WRITE(pipesrc_reg,
+ REG_WRITE(map->src,
((mode->crtc_hdisplay - 1) << 16) |
(mode->crtc_vdisplay - 1));
@@ -369,34 +357,34 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
offsetY = (adjusted_mode->crtc_vdisplay -
mode->crtc_vdisplay) / 2;
- REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg,
+ REG_WRITE(map->hblank,
(adjusted_mode->crtc_hblank_start - offsetX - 1) |
((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
- REG_WRITE(hsync_reg,
+ REG_WRITE(map->hsync,
(adjusted_mode->crtc_hsync_start - offsetX - 1) |
((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
- REG_WRITE(vblank_reg,
+ REG_WRITE(map->vblank,
(adjusted_mode->crtc_vblank_start - offsetY - 1) |
((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
- REG_WRITE(vsync_reg,
+ REG_WRITE(map->vsync,
(adjusted_mode->crtc_vsync_start - offsetY - 1) |
((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
} else {
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
}
@@ -408,10 +396,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
}
/* setup pipeconf */
- pipeconf = REG_READ(pipeconf_reg);
+ pipeconf = REG_READ(map->conf);
/* Set up the display plane register */
- dspcntr = REG_READ(dspcntr_reg);
+ dspcntr = REG_READ(map->cntr);
dspcntr |= DISPPLANE_GAMMA_ENABLE;
if (pipe == 0)
@@ -467,30 +455,30 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
mrstPrintPll("chosen", &clock);
if (dpll & DPLL_VCO_ENABLE) {
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Check the DPLLA lock bit PIPEACONF[29] */
udelay(150);
}
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
/* write it again -- the BIOS does, after all */
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, pipeconf);
+ REG_READ(map->conf);
psb_intel_wait_for_vblank(dev);
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
psb_intel_wait_for_vblank(dev);
oaktrail_crtc_mode_set_exit:
@@ -509,15 +497,13 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
- int dspbase = (pipe == 0 ? DSPALINOFF : DSPBBASE);
- int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
int ret = 0;
@@ -533,9 +519,9 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
start = psbfb->gtt->offset;
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitches[0]);
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
- dspcntr = REG_READ(dspcntr_reg);
+ dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
@@ -557,12 +543,12 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
ret = -EINVAL;
goto pipe_set_base_exit;
}
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
- REG_WRITE(dspbase, offset);
- REG_READ(dspbase);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
+ REG_WRITE(map->base, offset);
+ REG_READ(map->base);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
pipe_set_base_exit:
gma_power_end(dev);
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 41d1924ea31e..0f9b7db80f6b 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -187,6 +187,7 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_save_area *regs = &dev_priv->regs;
+ struct psb_pipe *p = &regs->pipe[0];
int i;
u32 pp_stat;
@@ -201,24 +202,24 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
regs->psb.saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
/* Pipe & plane A info */
- regs->psb.savePIPEACONF = PSB_RVDC32(PIPEACONF);
- regs->psb.savePIPEASRC = PSB_RVDC32(PIPEASRC);
- regs->psb.saveFPA0 = PSB_RVDC32(MRST_FPA0);
- regs->psb.saveFPA1 = PSB_RVDC32(MRST_FPA1);
- regs->psb.saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
- regs->psb.saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
- regs->psb.saveHBLANK_A = PSB_RVDC32(HBLANK_A);
- regs->psb.saveHSYNC_A = PSB_RVDC32(HSYNC_A);
- regs->psb.saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
- regs->psb.saveVBLANK_A = PSB_RVDC32(VBLANK_A);
- regs->psb.saveVSYNC_A = PSB_RVDC32(VSYNC_A);
+ p->conf = PSB_RVDC32(PIPEACONF);
+ p->src = PSB_RVDC32(PIPEASRC);
+ p->fp0 = PSB_RVDC32(MRST_FPA0);
+ p->fp1 = PSB_RVDC32(MRST_FPA1);
+ p->dpll = PSB_RVDC32(MRST_DPLL_A);
+ p->htotal = PSB_RVDC32(HTOTAL_A);
+ p->hblank = PSB_RVDC32(HBLANK_A);
+ p->hsync = PSB_RVDC32(HSYNC_A);
+ p->vtotal = PSB_RVDC32(VTOTAL_A);
+ p->vblank = PSB_RVDC32(VBLANK_A);
+ p->vsync = PSB_RVDC32(VSYNC_A);
regs->psb.saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
- regs->psb.saveDSPACNTR = PSB_RVDC32(DSPACNTR);
- regs->psb.saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
- regs->psb.saveDSPAADDR = PSB_RVDC32(DSPABASE);
- regs->psb.saveDSPASURF = PSB_RVDC32(DSPASURF);
- regs->psb.saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
- regs->psb.saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
+ p->cntr = PSB_RVDC32(DSPACNTR);
+ p->stride = PSB_RVDC32(DSPASTRIDE);
+ p->addr = PSB_RVDC32(DSPABASE);
+ p->surf = PSB_RVDC32(DSPASURF);
+ p->linoff = PSB_RVDC32(DSPALINOFF);
+ p->tileoff = PSB_RVDC32(DSPATILEOFF);
/* Save cursor regs */
regs->psb.saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
@@ -227,7 +228,7 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
/* Save palette (gamma) */
for (i = 0; i < 256; i++)
- regs->psb.save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
+ p->palette[i] = PSB_RVDC32(PALETTE_A + (i << 2));
if (dev_priv->hdmi_priv)
oaktrail_hdmi_save(dev);
@@ -300,6 +301,7 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_save_area *regs = &dev_priv->regs;
+ struct psb_pipe *p = &regs->pipe[0];
u32 pp_stat;
int i;
@@ -317,21 +319,21 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
PSB_WVDC32(0x80000000, VGACNTRL);
/* set the plls */
- PSB_WVDC32(regs->psb.saveFPA0, MRST_FPA0);
- PSB_WVDC32(regs->psb.saveFPA1, MRST_FPA1);
+ PSB_WVDC32(p->fp0, MRST_FPA0);
+ PSB_WVDC32(p->fp1, MRST_FPA1);
/* Actually enable it */
- PSB_WVDC32(regs->psb.saveDPLL_A, MRST_DPLL_A);
+ PSB_WVDC32(p->dpll, MRST_DPLL_A);
DRM_UDELAY(150);
/* Restore mode */
- PSB_WVDC32(regs->psb.saveHTOTAL_A, HTOTAL_A);
- PSB_WVDC32(regs->psb.saveHBLANK_A, HBLANK_A);
- PSB_WVDC32(regs->psb.saveHSYNC_A, HSYNC_A);
- PSB_WVDC32(regs->psb.saveVTOTAL_A, VTOTAL_A);
- PSB_WVDC32(regs->psb.saveVBLANK_A, VBLANK_A);
- PSB_WVDC32(regs->psb.saveVSYNC_A, VSYNC_A);
- PSB_WVDC32(regs->psb.savePIPEASRC, PIPEASRC);
+ PSB_WVDC32(p->htotal, HTOTAL_A);
+ PSB_WVDC32(p->hblank, HBLANK_A);
+ PSB_WVDC32(p->hsync, HSYNC_A);
+ PSB_WVDC32(p->vtotal, VTOTAL_A);
+ PSB_WVDC32(p->vblank, VBLANK_A);
+ PSB_WVDC32(p->vsync, VSYNC_A);
+ PSB_WVDC32(p->src, PIPEASRC);
PSB_WVDC32(regs->psb.saveBCLRPAT_A, BCLRPAT_A);
/* Restore performance mode*/
@@ -339,16 +341,16 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
/* Enable the pipe*/
if (dev_priv->iLVDS_enable)
- PSB_WVDC32(regs->psb.savePIPEACONF, PIPEACONF);
+ PSB_WVDC32(p->conf, PIPEACONF);
/* Set up the plane*/
- PSB_WVDC32(regs->psb.saveDSPALINOFF, DSPALINOFF);
- PSB_WVDC32(regs->psb.saveDSPASTRIDE, DSPASTRIDE);
- PSB_WVDC32(regs->psb.saveDSPATILEOFF, DSPATILEOFF);
+ PSB_WVDC32(p->linoff, DSPALINOFF);
+ PSB_WVDC32(p->stride, DSPASTRIDE);
+ PSB_WVDC32(p->tileoff, DSPATILEOFF);
/* Enable the plane */
- PSB_WVDC32(regs->psb.saveDSPACNTR, DSPACNTR);
- PSB_WVDC32(regs->psb.saveDSPASURF, DSPASURF);
+ PSB_WVDC32(p->cntr, DSPACNTR);
+ PSB_WVDC32(p->surf, DSPASURF);
/* Enable Cursor A */
PSB_WVDC32(regs->psb.saveDSPACURSOR_CTRL, CURACNTR);
@@ -357,7 +359,7 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
/* Restore palette (gamma) */
for (i = 0; i < 256; i++)
- PSB_WVDC32(regs->psb.save_palette_a[i], PALETTE_A + (i << 2));
+ PSB_WVDC32(p->palette[i], PALETTE_A + (i << 2));
if (dev_priv->hdmi_priv)
oaktrail_hdmi_restore(dev);
@@ -454,31 +456,84 @@ static int oaktrail_power_up(struct drm_device *dev)
return 0;
}
+/* Oaktrail */
+static const struct psb_offset oaktrail_regmap[2] = {
+ {
+ .fp0 = MRST_FPA0,
+ .fp1 = MRST_FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = MRST_DPLL_A,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .surf = DSPASURF,
+ .addr = MRST_DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = FPB0,
+ .fp1 = FPB1,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = DPLL_B,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .surf = DSPBSURF,
+ .addr = DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ },
+};
static int oaktrail_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
int ret;
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+
+ dev_priv->regmap = oaktrail_regmap;
+
ret = mid_chip_setup(dev);
if (ret < 0)
return ret;
- if (vbt->size == 0) {
+ if (!dev_priv->has_gct) {
/* Now pull the BIOS data */
- gma_intel_opregion_init(dev);
+ psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
}
+ oaktrail_hdmi_setup(dev);
return 0;
}
static void oaktrail_teardown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
oaktrail_hdmi_teardown(dev);
- if (vbt->size == 0)
+ if (!dev_priv->has_gct)
psb_intel_destroy_bios(dev);
}
@@ -487,6 +542,9 @@ const struct psb_ops oaktrail_chip_ops = {
.accel_2d = 1,
.pipes = 2,
.crtcs = 2,
+ .hdmi_mask = (1 << 0),
+ .lvds_mask = (1 << 0),
+ .cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
.chip_setup = oaktrail_chip_setup,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index f8b367b45f66..c10899c953b9 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -179,7 +179,6 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
@@ -188,11 +187,6 @@ static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- /* We assume worst case scenario of 32 bpp here, since we don't know */
- if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
- dev_priv->vram_stolen_size)
- return MODE_MEM;
-
return MODE_OK;
}
@@ -440,6 +434,7 @@ void oaktrail_hdmi_save(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct psb_state *regs = &dev_priv->regs.psb;
+ struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
int i;
/* dpll */
@@ -450,14 +445,14 @@ void oaktrail_hdmi_save(struct drm_device *dev)
hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
/* pipe B */
- regs->savePIPEBCONF = PSB_RVDC32(PIPEBCONF);
- regs->savePIPEBSRC = PSB_RVDC32(PIPEBSRC);
- regs->saveHTOTAL_B = PSB_RVDC32(HTOTAL_B);
- regs->saveHBLANK_B = PSB_RVDC32(HBLANK_B);
- regs->saveHSYNC_B = PSB_RVDC32(HSYNC_B);
- regs->saveVTOTAL_B = PSB_RVDC32(VTOTAL_B);
- regs->saveVBLANK_B = PSB_RVDC32(VBLANK_B);
- regs->saveVSYNC_B = PSB_RVDC32(VSYNC_B);
+ pipeb->conf = PSB_RVDC32(PIPEBCONF);
+ pipeb->src = PSB_RVDC32(PIPEBSRC);
+ pipeb->htotal = PSB_RVDC32(HTOTAL_B);
+ pipeb->hblank = PSB_RVDC32(HBLANK_B);
+ pipeb->hsync = PSB_RVDC32(HSYNC_B);
+ pipeb->vtotal = PSB_RVDC32(VTOTAL_B);
+ pipeb->vblank = PSB_RVDC32(VBLANK_B);
+ pipeb->vsync = PSB_RVDC32(VSYNC_B);
hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
@@ -469,12 +464,12 @@ void oaktrail_hdmi_save(struct drm_device *dev)
hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B);
/* plane */
- regs->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR);
- regs->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE);
- regs->saveDSPBADDR = PSB_RVDC32(DSPBBASE);
- regs->saveDSPBSURF = PSB_RVDC32(DSPBSURF);
- regs->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF);
- regs->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF);
+ pipeb->cntr = PSB_RVDC32(DSPBCNTR);
+ pipeb->stride = PSB_RVDC32(DSPBSTRIDE);
+ pipeb->addr = PSB_RVDC32(DSPBBASE);
+ pipeb->surf = PSB_RVDC32(DSPBSURF);
+ pipeb->linoff = PSB_RVDC32(DSPBLINOFF);
+ pipeb->tileoff = PSB_RVDC32(DSPBTILEOFF);
/* cursor B */
regs->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
@@ -483,7 +478,7 @@ void oaktrail_hdmi_save(struct drm_device *dev)
/* save palette */
for (i = 0; i < 256; i++)
- regs->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2));
+ pipeb->palette[i] = PSB_RVDC32(PALETTE_B + (i << 2));
}
/* restore HDMI register state */
@@ -492,6 +487,7 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct psb_state *regs = &dev_priv->regs.psb;
+ struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
int i;
/* dpll */
@@ -503,13 +499,13 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
DRM_UDELAY(150);
/* pipe */
- PSB_WVDC32(regs->savePIPEBSRC, PIPEBSRC);
- PSB_WVDC32(regs->saveHTOTAL_B, HTOTAL_B);
- PSB_WVDC32(regs->saveHBLANK_B, HBLANK_B);
- PSB_WVDC32(regs->saveHSYNC_B, HSYNC_B);
- PSB_WVDC32(regs->saveVTOTAL_B, VTOTAL_B);
- PSB_WVDC32(regs->saveVBLANK_B, VBLANK_B);
- PSB_WVDC32(regs->saveVSYNC_B, VSYNC_B);
+ PSB_WVDC32(pipeb->src, PIPEBSRC);
+ PSB_WVDC32(pipeb->htotal, HTOTAL_B);
+ PSB_WVDC32(pipeb->hblank, HBLANK_B);
+ PSB_WVDC32(pipeb->hsync, HSYNC_B);
+ PSB_WVDC32(pipeb->vtotal, VTOTAL_B);
+ PSB_WVDC32(pipeb->vblank, VBLANK_B);
+ PSB_WVDC32(pipeb->vsync, VSYNC_B);
PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
@@ -519,15 +515,15 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B);
- PSB_WVDC32(regs->savePIPEBCONF, PIPEBCONF);
+ PSB_WVDC32(pipeb->conf, PIPEBCONF);
PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
/* plane */
- PSB_WVDC32(regs->saveDSPBLINOFF, DSPBLINOFF);
- PSB_WVDC32(regs->saveDSPBSTRIDE, DSPBSTRIDE);
- PSB_WVDC32(regs->saveDSPBTILEOFF, DSPBTILEOFF);
- PSB_WVDC32(regs->saveDSPBCNTR, DSPBCNTR);
- PSB_WVDC32(regs->saveDSPBSURF, DSPBSURF);
+ PSB_WVDC32(pipeb->linoff, DSPBLINOFF);
+ PSB_WVDC32(pipeb->stride, DSPBSTRIDE);
+ PSB_WVDC32(pipeb->tileoff, DSPBTILEOFF);
+ PSB_WVDC32(pipeb->cntr, DSPBCNTR);
+ PSB_WVDC32(pipeb->surf, DSPBSURF);
/* cursor B */
PSB_WVDC32(regs->saveDSPBCURSOR_CTRL, CURBCNTR);
@@ -536,5 +532,5 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
/* restore palette */
for (i = 0; i < 256; i++)
- PSB_WVDC32(regs->save_palette_b[i], PALETTE_B + (i << 2));
+ PSB_WVDC32(pipeb->palette[i], PALETTE_B + (i << 2));
}
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index 5e84fbde749b..88627e3ba1e3 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -250,7 +250,7 @@ static irqreturn_t oaktrail_hdmi_i2c_handler(int this_irq, void *dev)
*/
static void oaktrail_hdmi_i2c_gpio_fix(void)
{
- void *base;
+ void __iomem *base;
unsigned int gpio_base = 0xff12c000;
int gpio_len = 0x1000;
u32 temp;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 654f32b22b21..558c77fb55ec 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -257,7 +257,7 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
mode_dev->panel_fixed_mode = NULL;
/* Use the firmware provided data on Moorestown */
- if (dev_priv->vbt_data.size != 0x00) { /*if non-zero, then use vbt*/
+ if (dev_priv->has_gct) {
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
return;
@@ -371,7 +371,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
BRIGHTNESS_MAX_LEVEL);
mode_dev->panel_wants_dither = false;
- if (dev_priv->vbt_data.size != 0x00)
+ if (dev_priv->has_gct)
mode_dev->panel_wants_dither = (dev_priv->gct_data.
Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
if (dev_priv->lvds_dither)
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
new file mode 100644
index 000000000000..4f186eca3a30
--- /dev/null
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -0,0 +1,344 @@
+/*
+ * Copyright 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/acpi.h>
+#include <linux/acpi_io.h>
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+#define PCI_ASLE 0xe4
+#define PCI_ASLS 0xfc
+
+#define OPREGION_HEADER_OFFSET 0
+#define OPREGION_ACPI_OFFSET 0x100
+#define ACPI_CLID 0x01ac /* current lid state indicator */
+#define ACPI_CDCK 0x01b0 /* current docking state indicator */
+#define OPREGION_SWSCI_OFFSET 0x200
+#define OPREGION_ASLE_OFFSET 0x300
+#define OPREGION_VBT_OFFSET 0x400
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_ACPI (1<<0)
+#define MBOX_SWSCI (1<<1)
+#define MBOX_ASLE (1<<2)
+
+struct opregion_header {
+ u8 signature[16];
+ u32 size;
+ u32 opregion_ver;
+ u8 bios_ver[32];
+ u8 vbios_ver[16];
+ u8 driver_ver[16];
+ u32 mboxes;
+ u8 reserved[164];
+} __packed;
+
+/* OpRegion mailbox #1: public ACPI methods */
+struct opregion_acpi {
+ u32 drdy; /* driver readiness */
+ u32 csts; /* notification status */
+ u32 cevt; /* current event */
+ u8 rsvd1[20];
+ u32 didl[8]; /* supported display devices ID list */
+ u32 cpdl[8]; /* currently presented display list */
+ u32 cadl[8]; /* currently active display list */
+ u32 nadl[8]; /* next active devices list */
+ u32 aslp; /* ASL sleep time-out */
+ u32 tidx; /* toggle table index */
+ u32 chpd; /* current hotplug enable indicator */
+ u32 clid; /* current lid state*/
+ u32 cdck; /* current docking state */
+ u32 sxsw; /* Sx state resume */
+ u32 evts; /* ASL supported events */
+ u32 cnot; /* current OS notification */
+ u32 nrdy; /* driver status */
+ u8 rsvd2[60];
+} __packed;
+
+/* OpRegion mailbox #2: SWSCI */
+struct opregion_swsci {
+ /*FIXME: add it later*/
+} __packed;
+
+/* OpRegion mailbox #3: ASLE */
+struct opregion_asle {
+ u32 ardy; /* driver readiness */
+ u32 aslc; /* ASLE interrupt command */
+ u32 tche; /* technology enabled indicator */
+ u32 alsi; /* current ALS illuminance reading */
+ u32 bclp; /* backlight brightness to set */
+ u32 pfit; /* panel fitting state */
+ u32 cblv; /* current brightness level */
+ u16 bclm[20]; /* backlight level duty cycle mapping table */
+ u32 cpfm; /* current panel fitting mode */
+ u32 epfm; /* enabled panel fitting modes */
+ u8 plut[74]; /* panel LUT and identifier */
+ u32 pfmb; /* PWM freq and min brightness */
+ u8 rsvd[102];
+} __packed;
+
+/* ASLE irq request bits */
+#define ASLE_SET_ALS_ILLUM (1 << 0)
+#define ASLE_SET_BACKLIGHT (1 << 1)
+#define ASLE_SET_PFIT (1 << 2)
+#define ASLE_SET_PWM_FREQ (1 << 3)
+#define ASLE_REQ_MSK 0xf
+
+/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAILED (1<<10)
+#define ASLE_BACKLIGHT_FAILED (1<<12)
+#define ASLE_PFIT_FAILED (1<<14)
+#define ASLE_PWM_FREQ_FAILED (1<<16)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID (1<<31)
+#define ASLE_BCLP_MSK (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAILED (1<<10)
+#define ASLE_BACKLIGHT_FAILED (1<<12)
+#define ASLE_PFIT_FAILED (1<<14)
+#define ASLE_PWM_FREQ_FAILED (1<<16)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID (1<<31)
+#define ASLE_BCLP_MSK (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* PWM frequency and minimum brightness */
+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
+#define ASLE_PFMB_PWM_VALID (1<<31)
+
+#define ASLE_CBLV_VALID (1<<31)
+
+static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct backlight_device *bd = dev_priv->backlight_device;
+
+ DRM_DEBUG_DRIVER("asle set backlight %x\n", bclp);
+
+ if (!(bclp & ASLE_BCLP_VALID))
+ return ASLE_BACKLIGHT_FAILED;
+
+ if (bd == NULL)
+ return ASLE_BACKLIGHT_FAILED;
+
+ bclp &= ASLE_BCLP_MSK;
+ if (bclp > 255)
+ return ASLE_BACKLIGHT_FAILED;
+
+ if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
+ int max = bd->props.max_brightness;
+ bd->props.brightness = bclp * max / 255;
+ backlight_update_status(bd);
+ }
+
+ asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
+
+ return 0;
+}
+
+void psb_intel_opregion_asle_intr(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ u32 asle_stat = 0;
+ u32 asle_req;
+
+ if (!asle)
+ return;
+
+ asle_req = asle->aslc & ASLE_REQ_MSK;
+ if (!asle_req) {
+ DRM_DEBUG_DRIVER("non asle set request??\n");
+ return;
+ }
+
+ if (asle_req & ASLE_SET_BACKLIGHT)
+ asle_stat |= asle_set_backlight(dev, asle->bclp);
+
+ asle->aslc = asle_stat;
+}
+
+#define ASLE_ALS_EN (1<<0)
+#define ASLE_BLC_EN (1<<1)
+#define ASLE_PFIT_EN (1<<2)
+#define ASLE_PFMB_EN (1<<3)
+
+void psb_intel_opregion_enable_asle(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+
+ if (asle) {
+ /* Don't do this on Medfield or other non PC like devices, they
+ use the bit for something different altogether */
+ psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
+ psb_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
+
+ asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN
+ | ASLE_PFMB_EN;
+ asle->ardy = 1;
+ }
+}
+
+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
+#define ACPI_EV_LID (1<<1)
+#define ACPI_EV_DOCK (1<<2)
+
+static struct psb_intel_opregion *system_opregion;
+
+static int psb_intel_opregion_video_event(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ /* The only video events relevant to opregion are 0x80. These indicate
+ either a docking event, lid switch or display switch request. In
+ Linux, these are handled by the dock, button and video drivers.
+ We might want to fix the video driver to be opregion-aware in
+ future, but right now we just indicate to the firmware that the
+ request has been handled */
+
+ struct opregion_acpi *acpi;
+
+ if (!system_opregion)
+ return NOTIFY_DONE;
+
+ acpi = system_opregion->acpi;
+ acpi->csts = 0;
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block psb_intel_opregion_notifier = {
+ .notifier_call = psb_intel_opregion_video_event,
+};
+
+void psb_intel_opregion_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ /* Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video
+ * module. We don't actually need to do anything with them. */
+ opregion->acpi->csts = 0;
+ opregion->acpi->drdy = 1;
+
+ system_opregion = opregion;
+ register_acpi_notifier(&psb_intel_opregion_notifier);
+ }
+
+ if (opregion->asle)
+ psb_intel_opregion_enable_asle(dev);
+}
+
+void psb_intel_opregion_fini(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ opregion->acpi->drdy = 0;
+
+ system_opregion = NULL;
+ unregister_acpi_notifier(&psb_intel_opregion_notifier);
+ }
+
+ /* just clear all opregion memory pointers now */
+ iounmap(opregion->header);
+ opregion->header = NULL;
+ opregion->acpi = NULL;
+ opregion->swsci = NULL;
+ opregion->asle = NULL;
+ opregion->vbt = NULL;
+}
+
+int psb_intel_opregion_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_opregion *opregion = &dev_priv->opregion;
+ u32 opregion_phy, mboxes;
+ void __iomem *base;
+ int err = 0;
+
+ pci_read_config_dword(dev->pdev, PCI_ASLS, &opregion_phy);
+ if (opregion_phy == 0) {
+ DRM_DEBUG_DRIVER("ACPI Opregion not supported\n");
+ return -ENOTSUPP;
+ }
+ DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
+ base = acpi_os_ioremap(opregion_phy, 8*1024);
+ if (!base)
+ return -ENOMEM;
+
+ if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+ DRM_DEBUG_DRIVER("opregion signature mismatch\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ opregion->header = base;
+ opregion->vbt = base + OPREGION_VBT_OFFSET;
+
+ opregion->lid_state = base + ACPI_CLID;
+
+ mboxes = opregion->header->mboxes;
+ if (mboxes & MBOX_ACPI) {
+ DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
+ opregion->acpi = base + OPREGION_ACPI_OFFSET;
+ }
+
+ if (mboxes & MBOX_ASLE) {
+ DRM_DEBUG_DRIVER("ASLE supported\n");
+ opregion->asle = base + OPREGION_ASLE_OFFSET;
+ }
+
+ return 0;
+
+err_out:
+ iounmap(base);
+ return err;
+}
+
diff --git a/drivers/gpu/drm/gma500/intel_opregion.c b/drivers/gpu/drm/gma500/opregion.h
index d946bc1b17bf..72dc6b921265 100644
--- a/drivers/gpu/drm/gma500/intel_opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2010 Intel Corporation
+ * Copyright 2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,62 +20,30 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
- * FIXME: resolve with the i915 version
*/
-#include "psb_drv.h"
+#if defined(CONFIG_ACPI)
+extern void psb_intel_opregion_asle_intr(struct drm_device *dev);
+extern void psb_intel_opregion_init(struct drm_device *dev);
+extern void psb_intel_opregion_fini(struct drm_device *dev);
+extern int psb_intel_opregion_setup(struct drm_device *dev);
-struct opregion_header {
- u8 signature[16];
- u32 size;
- u32 opregion_ver;
- u8 bios_ver[32];
- u8 vbios_ver[16];
- u8 driver_ver[16];
- u32 mboxes;
- u8 reserved[164];
-} __packed;
+#else
-struct opregion_apci {
- /*FIXME: add it later*/
-} __packed;
-
-struct opregion_swsci {
- /*FIXME: add it later*/
-} __packed;
-
-struct opregion_acpi {
- /*FIXME: add it later*/
-} __packed;
-
-int gma_intel_opregion_init(struct drm_device *dev)
+extern inline void psb_intel_opregion_asle_intr(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
- u32 opregion_phy;
- void *base;
- u32 *lid_state;
-
- dev_priv->lid_state = NULL;
-
- pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
- if (opregion_phy == 0)
- return -ENOTSUPP;
-
- base = ioremap(opregion_phy, 8*1024);
- if (!base)
- return -ENOMEM;
+}
- lid_state = base + 0x01ac;
+extern inline void psb_intel_opregion_init(struct drm_device *dev)
+{
+}
- dev_priv->lid_state = lid_state;
- dev_priv->lid_last_state = readl(lid_state);
- return 0;
+extern inline void psb_intel_opregion_fini(struct drm_device *dev)
+{
}
-int gma_intel_opregion_exit(struct drm_device *dev)
+extern inline int psb_intel_opregion_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
- if (dev_priv->lid_state)
- iounmap(dev_priv->lid_state);
return 0;
}
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 95d163e4f1f4..eff039bf92d4 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -197,7 +197,8 @@ static int psb_save_display_registers(struct drm_device *dev)
}
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- connector->funcs->save(connector);
+ if (connector->funcs->save)
+ connector->funcs->save(connector);
mutex_unlock(&dev->mode_config.mutex);
return 0;
@@ -235,7 +236,8 @@ static int psb_restore_display_registers(struct drm_device *dev)
crtc->funcs->restore(crtc);
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- connector->funcs->restore(connector);
+ if (connector->funcs->restore)
+ connector->funcs->restore(connector);
mutex_unlock(&dev->mode_config.mutex);
return 0;
@@ -289,17 +291,80 @@ static void psb_get_core_freq(struct drm_device *dev)
}
}
+/* Poulsbo */
+static const struct psb_offset psb_regmap[2] = {
+ {
+ .fp0 = FPA0,
+ .fp1 = FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = DPLL_A,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .base = DSPABASE,
+ .surf = DSPASURF,
+ .addr = DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = FPB0,
+ .fp1 = FPB1,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = DPLL_B,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .base = DSPBBASE,
+ .surf = DSPBSURF,
+ .addr = DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ }
+};
+
static int psb_chip_setup(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->regmap = psb_regmap;
psb_get_core_freq(dev);
gma_intel_setup_gmbus(dev);
- gma_intel_opregion_init(dev);
+ psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
return 0;
}
+/* Not exactly an erratum more an irritation */
+static void psb_chip_errata(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ psb_lid_timer_init(dev_priv);
+}
+
static void psb_chip_teardown(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ psb_lid_timer_takedown(dev_priv);
gma_intel_teardown_gmbus(dev);
}
@@ -308,9 +373,13 @@ const struct psb_ops psb_chip_ops = {
.accel_2d = 1,
.pipes = 2,
.crtcs = 2,
+ .hdmi_mask = (1 << 0),
+ .lvds_mask = (1 << 1),
+ .cursor_needs_phys = 1,
.sgx_offset = PSB_SGX_OFFSET,
.chip_setup = psb_chip_setup,
.chip_teardown = psb_chip_teardown,
+ .errata = psb_chip_errata,
.crtc_helper = &psb_intel_helper_funcs,
.crtc_funcs = &psb_intel_crtc_funcs,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index c34adf9d910a..caba6e08693c 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -79,6 +79,14 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
{ 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
{ 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
{ 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
#endif
{ 0, }
};
@@ -144,10 +152,6 @@ static void psb_lastclose(struct drm_device *dev)
return;
}
-static void psb_do_takedown(struct drm_device *dev)
-{
-}
-
static int psb_do_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -172,24 +176,6 @@ static int psb_do_init(struct drm_device *dev)
dev_priv->gatt_free_offset = pg->mmu_gatt_start +
(stolen_gtt << PAGE_SHIFT) * 1024;
- if (1 || drm_debug) {
- uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
- uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
- DRM_INFO("SGX core id = 0x%08x\n", core_id);
- DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
- (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
- _PSB_CC_REVISION_MAJOR_SHIFT,
- (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
- _PSB_CC_REVISION_MINOR_SHIFT);
- DRM_INFO
- ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
- (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
- _PSB_CC_REVISION_MAINTENANCE_SHIFT,
- (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
- _PSB_CC_REVISION_DESIGNER_SHIFT);
- }
-
-
spin_lock_init(&dev_priv->irqmask_lock);
spin_lock_init(&dev_priv->lock_2d);
@@ -204,7 +190,6 @@ static int psb_do_init(struct drm_device *dev)
PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
return 0;
out_err:
- psb_do_takedown(dev);
return ret;
}
@@ -214,18 +199,16 @@ static int psb_driver_unload(struct drm_device *dev)
/* Kill vblank etc here */
- gma_backlight_exit(dev);
-
- psb_modeset_cleanup(dev);
if (dev_priv) {
- psb_lid_timer_takedown(dev_priv);
- gma_intel_opregion_exit(dev);
+ if (dev_priv->backlight_device)
+ gma_backlight_exit(dev);
+ psb_modeset_cleanup(dev);
if (dev_priv->ops->chip_teardown)
dev_priv->ops->chip_teardown(dev);
- psb_do_takedown(dev);
+ psb_intel_opregion_fini(dev);
if (dev_priv->pf_pd) {
psb_mmu_free_pagedir(dev_priv->pf_pd);
@@ -246,6 +229,7 @@ static int psb_driver_unload(struct drm_device *dev)
}
psb_gtt_takedown(dev);
if (dev_priv->scratch_page) {
+ set_pages_wb(dev_priv->scratch_page, 1);
__free_page(dev_priv->scratch_page);
dev_priv->scratch_page = NULL;
}
@@ -258,15 +242,13 @@ static int psb_driver_unload(struct drm_device *dev)
dev_priv->sgx_reg = NULL;
}
+ /* Destroy VBT data */
+ psb_intel_destroy_bios(dev);
+
kfree(dev_priv);
dev->dev_private = NULL;
-
- /*destroy VBT data*/
- psb_intel_destroy_bios(dev);
}
-
gma_power_uninit(dev);
-
return 0;
}
@@ -290,11 +272,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
pci_set_master(dev->pdev);
- if (!IS_PSB(dev)) {
- if (pci_enable_msi(dev->pdev))
- dev_warn(dev->dev, "Enabling MSI failed!\n");
- }
-
dev_priv->num_pipe = dev_priv->ops->pipes;
resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
@@ -309,6 +286,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (!dev_priv->sgx_reg)
goto out_err;
+ psb_intel_opregion_setup(dev);
+
ret = dev_priv->ops->chip_setup(dev);
if (ret)
goto out_err;
@@ -348,10 +327,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
-/* igd_opregion_init(&dev_priv->opregion_dev); */
acpi_video_register();
- if (dev_priv->lid_state)
- psb_lid_timer_init(dev_priv);
ret = drm_vblank_init(dev, dev_priv->num_pipe);
if (ret)
@@ -370,8 +346,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
- if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET))
- drm_irq_install(dev);
+
+ drm_irq_install(dev);
dev->vblank_disable_allowed = 1;
@@ -619,7 +595,7 @@ static const struct dev_pm_ops psb_pm_ops = {
.runtime_idle = psb_runtime_idle,
};
-static struct vm_operations_struct psb_gem_vm_ops = {
+static const struct vm_operations_struct psb_gem_vm_ops = {
.fault = psb_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 40ce2c9bc2e4..1bd115ecefe1 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -30,6 +30,7 @@
#include "psb_intel_drv.h"
#include "gtt.h"
#include "power.h"
+#include "opregion.h"
#include "oaktrail.h"
/* Append new drm mode definition here, align with libdrm definition */
@@ -120,6 +121,7 @@ enum {
#define PSB_HWSTAM 0x2098
#define PSB_INSTPM 0x20C0
#define PSB_INT_IDENTITY_R 0x20A4
+#define _PSB_IRQ_ASLE (1<<0)
#define _MDFLD_PIPEC_EVENT_FLAG (1<<2)
#define _MDFLD_PIPEC_VBLANK_FLAG (1<<3)
#define _PSB_DPST_PIPEB_FLAG (1<<4)
@@ -130,6 +132,7 @@ enum {
#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
#define _MDFLD_MIPIA_FLAG (1<<16)
#define _MDFLD_MIPIC_FLAG (1<<17)
+#define _PSB_IRQ_DISP_HOTSYNC (1<<17)
#define _PSB_IRQ_SGX_FLAG (1<<18)
#define _PSB_IRQ_MSVDX_FLAG (1<<19)
#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
@@ -257,7 +260,8 @@ struct psb_intel_opregion {
struct opregion_acpi *acpi;
struct opregion_swsci *swsci;
struct opregion_asle *asle;
- int enabled;
+ void *vbt;
+ u32 __iomem *lid_state;
};
struct sdvo_device_mapping {
@@ -277,50 +281,72 @@ struct intel_gmbus {
};
/*
+ * Register offset maps
+ */
+
+struct psb_offset {
+ u32 fp0;
+ u32 fp1;
+ u32 cntr;
+ u32 conf;
+ u32 src;
+ u32 dpll;
+ u32 dpll_md;
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 surf;
+ u32 addr;
+ u32 base;
+ u32 status;
+ u32 linoff;
+ u32 tileoff;
+ u32 palette;
+};
+
+/*
* Register save state. This is used to hold the context when the
* device is powered off. In the case of Oaktrail this can (but does not
* yet) include screen blank. Operations occuring during the save
* update the register cache instead.
*/
+
+/*
+ * Common status for pipes.
+ */
+struct psb_pipe {
+ u32 fp0;
+ u32 fp1;
+ u32 cntr;
+ u32 conf;
+ u32 src;
+ u32 dpll;
+ u32 dpll_md;
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 base;
+ u32 surf;
+ u32 addr;
+ u32 status;
+ u32 linoff;
+ u32 tileoff;
+ u32 palette[256];
+};
+
struct psb_state {
- uint32_t saveDSPACNTR;
- uint32_t saveDSPBCNTR;
- uint32_t savePIPEACONF;
- uint32_t savePIPEBCONF;
- uint32_t savePIPEASRC;
- uint32_t savePIPEBSRC;
- uint32_t saveFPA0;
- uint32_t saveFPA1;
- uint32_t saveDPLL_A;
- uint32_t saveDPLL_A_MD;
- uint32_t saveHTOTAL_A;
- uint32_t saveHBLANK_A;
- uint32_t saveHSYNC_A;
- uint32_t saveVTOTAL_A;
- uint32_t saveVBLANK_A;
- uint32_t saveVSYNC_A;
- uint32_t saveDSPASTRIDE;
- uint32_t saveDSPASIZE;
- uint32_t saveDSPAPOS;
- uint32_t saveDSPABASE;
- uint32_t saveDSPASURF;
- uint32_t saveDSPASTATUS;
- uint32_t saveFPB0;
- uint32_t saveFPB1;
- uint32_t saveDPLL_B;
- uint32_t saveDPLL_B_MD;
- uint32_t saveHTOTAL_B;
- uint32_t saveHBLANK_B;
- uint32_t saveHSYNC_B;
- uint32_t saveVTOTAL_B;
- uint32_t saveVBLANK_B;
- uint32_t saveVSYNC_B;
- uint32_t saveDSPBSTRIDE;
- uint32_t saveDSPBSIZE;
- uint32_t saveDSPBPOS;
- uint32_t saveDSPBBASE;
- uint32_t saveDSPBSURF;
- uint32_t saveDSPBSTATUS;
uint32_t saveVCLK_DIVISOR_VGA0;
uint32_t saveVCLK_DIVISOR_VGA1;
uint32_t saveVCLK_POST_DIV;
@@ -335,14 +361,8 @@ struct psb_state {
uint32_t savePP_CONTROL;
uint32_t savePP_CYCLE;
uint32_t savePFIT_CONTROL;
- uint32_t savePaletteA[256];
- uint32_t savePaletteB[256];
uint32_t saveCLOCKGATING;
uint32_t saveDSPARB;
- uint32_t saveDSPATILEOFF;
- uint32_t saveDSPBTILEOFF;
- uint32_t saveDSPAADDR;
- uint32_t saveDSPBADDR;
uint32_t savePFIT_AUTO_RATIOS;
uint32_t savePFIT_PGM_RATIOS;
uint32_t savePP_ON_DELAYS;
@@ -350,8 +370,6 @@ struct psb_state {
uint32_t savePP_DIVISOR;
uint32_t saveBCLRPAT_A;
uint32_t saveBCLRPAT_B;
- uint32_t saveDSPALINOFF;
- uint32_t saveDSPBLINOFF;
uint32_t savePERF_MODE;
uint32_t saveDSPFW1;
uint32_t saveDSPFW2;
@@ -366,8 +384,6 @@ struct psb_state {
uint32_t saveDSPBCURSOR_BASE;
uint32_t saveDSPACURSOR_POS;
uint32_t saveDSPBCURSOR_POS;
- uint32_t save_palette_a[256];
- uint32_t save_palette_b[256];
uint32_t saveOV_OVADD;
uint32_t saveOV_OGAMC0;
uint32_t saveOV_OGAMC1;
@@ -390,64 +406,7 @@ struct psb_state {
};
struct medfield_state {
- uint32_t saveDPLL_A;
- uint32_t saveFPA0;
- uint32_t savePIPEACONF;
- uint32_t saveHTOTAL_A;
- uint32_t saveHBLANK_A;
- uint32_t saveHSYNC_A;
- uint32_t saveVTOTAL_A;
- uint32_t saveVBLANK_A;
- uint32_t saveVSYNC_A;
- uint32_t savePIPEASRC;
- uint32_t saveDSPASTRIDE;
- uint32_t saveDSPALINOFF;
- uint32_t saveDSPATILEOFF;
- uint32_t saveDSPASIZE;
- uint32_t saveDSPAPOS;
- uint32_t saveDSPASURF;
- uint32_t saveDSPACNTR;
- uint32_t saveDSPASTATUS;
- uint32_t save_palette_a[256];
uint32_t saveMIPI;
-
- uint32_t saveDPLL_B;
- uint32_t saveFPB0;
- uint32_t savePIPEBCONF;
- uint32_t saveHTOTAL_B;
- uint32_t saveHBLANK_B;
- uint32_t saveHSYNC_B;
- uint32_t saveVTOTAL_B;
- uint32_t saveVBLANK_B;
- uint32_t saveVSYNC_B;
- uint32_t savePIPEBSRC;
- uint32_t saveDSPBSTRIDE;
- uint32_t saveDSPBLINOFF;
- uint32_t saveDSPBTILEOFF;
- uint32_t saveDSPBSIZE;
- uint32_t saveDSPBPOS;
- uint32_t saveDSPBSURF;
- uint32_t saveDSPBCNTR;
- uint32_t saveDSPBSTATUS;
- uint32_t save_palette_b[256];
-
- uint32_t savePIPECCONF;
- uint32_t saveHTOTAL_C;
- uint32_t saveHBLANK_C;
- uint32_t saveHSYNC_C;
- uint32_t saveVTOTAL_C;
- uint32_t saveVBLANK_C;
- uint32_t saveVSYNC_C;
- uint32_t savePIPECSRC;
- uint32_t saveDSPCSTRIDE;
- uint32_t saveDSPCLINOFF;
- uint32_t saveDSPCTILEOFF;
- uint32_t saveDSPCSIZE;
- uint32_t saveDSPCPOS;
- uint32_t saveDSPCSURF;
- uint32_t saveDSPCCNTR;
- uint32_t saveDSPCSTATUS;
- uint32_t save_palette_c[256];
uint32_t saveMIPI_C;
uint32_t savePFIT_CONTROL;
@@ -476,6 +435,7 @@ struct cdv_state {
};
struct psb_save_area {
+ struct psb_pipe pipe[3];
uint32_t saveBSM;
uint32_t saveVBT;
union {
@@ -494,15 +454,19 @@ struct psb_ops;
struct drm_psb_private {
struct drm_device *dev;
const struct psb_ops *ops;
+ const struct psb_offset *regmap;
+
+ struct child_device_config *child_dev;
+ int child_dev_num;
struct psb_gtt gtt;
/* GTT Memory manager */
struct psb_gtt_mm *gtt_mm;
struct page *scratch_page;
- u32 *gtt_map;
+ u32 __iomem *gtt_map;
uint32_t stolen_base;
- void *vram_addr;
+ u8 __iomem *vram_addr;
unsigned long vram_stolen_size;
int gtt_initialized;
u16 gmch_ctrl; /* Saved GTT setup */
@@ -518,8 +482,8 @@ struct drm_psb_private {
* Register base
*/
- uint8_t *sgx_reg;
- uint8_t *vdc_reg;
+ uint8_t __iomem *sgx_reg;
+ uint8_t __iomem *vdc_reg;
uint32_t gatt_free_offset;
/*
@@ -543,6 +507,7 @@ struct drm_psb_private {
* Modesetting
*/
struct psb_intel_mode_device mode_dev;
+ bool modeset; /* true if we have done the mode_device setup */
struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
@@ -605,7 +570,7 @@ struct drm_psb_private {
int rpm_enabled;
/* MID specific */
- struct oaktrail_vbt vbt_data;
+ bool has_gct;
struct oaktrail_gct_data gct_data;
/* Oaktrail HDMI state */
@@ -621,6 +586,11 @@ struct drm_psb_private {
uint32_t msi_addr;
uint32_t msi_data;
+ /*
+ * Hotplug handling
+ */
+
+ struct work_struct hotplug_work;
/*
* LID-Switch
@@ -628,7 +598,6 @@ struct drm_psb_private {
spinlock_t lid_lock;
struct timer_list lid_timer;
struct psb_intel_opregion opregion;
- u32 *lid_state;
u32 lid_last_state;
/*
@@ -669,6 +638,8 @@ struct drm_psb_private {
u32 dspcntr[3];
int mdfld_panel_id;
+
+ bool dplla_96mhz; /* DPLL data from the VBT */
};
@@ -682,6 +653,9 @@ struct psb_ops {
int pipes; /* Number of output pipes */
int crtcs; /* Number of CRTCs */
int sgx_offset; /* Base offset of SGX device */
+ int hdmi_mask; /* Mask of HDMI CRTCs */
+ int lvds_mask; /* Mask of LVDS CRTCs */
+ int cursor_needs_phys; /* If cursor base reg need physical address */
/* Sub functions */
struct drm_crtc_helper_funcs const *crtc_helper;
@@ -690,9 +664,13 @@ struct psb_ops {
/* Setup hooks */
int (*chip_setup)(struct drm_device *dev);
void (*chip_teardown)(struct drm_device *dev);
+ /* Optional helper caller after modeset */
+ void (*errata)(struct drm_device *dev);
/* Display management hooks */
int (*output_init)(struct drm_device *dev);
+ int (*hotplug)(struct drm_device *dev);
+ void (*hotplug_enable)(struct drm_device *dev, bool on);
/* Power management hooks */
void (*init_pm)(struct drm_device *dev);
int (*save_regs)(struct drm_device *dev);
@@ -789,12 +767,6 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
/*
- * intel_opregion.c
- */
-extern int gma_intel_opregion_init(struct drm_device *dev);
-extern int gma_intel_opregion_exit(struct drm_device *dev);
-
-/*
* framebuffer.c
*/
extern int psbfb_probed(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 2616558457c8..36c3c99612f6 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -337,15 +337,12 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_i915_master_private *master_priv; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
- int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
- int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
int ret = 0;
@@ -367,9 +364,9 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitches[0]);
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
- dspcntr = REG_READ(dspcntr_reg);
+ dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
@@ -392,18 +389,10 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
psb_gtt_unpin(psbfb->gtt);
goto psb_intel_pipe_set_base_exit;
}
- REG_WRITE(dspcntr_reg, dspcntr);
-
+ REG_WRITE(map->cntr, dspcntr);
- if (0 /* FIXMEAC - check what PSB needs */) {
- REG_WRITE(dspbase, offset);
- REG_READ(dspbase);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
- } else {
- REG_WRITE(dspbase, start + offset);
- REG_READ(dspbase);
- }
+ REG_WRITE(map->base, start + offset);
+ REG_READ(map->base);
psb_intel_pipe_cleaner:
/* If there was a previous display we can now unpin it */
@@ -424,14 +413,10 @@ psb_intel_pipe_set_base_exit:
static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_i915_master_private *master_priv; */
- /* struct drm_i915_private *dev_priv = dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
/* XXX: When our outputs are all unaware of DPMS modes other than off
@@ -442,34 +427,34 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable the DPLL */
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+ REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
/* Enable the plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
}
psb_intel_crtc_load_lut(crtc);
@@ -487,29 +472,29 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+ REG_READ(map->conf);
}
/* Wait for vblank for the disable to take effect. */
psb_intel_wait_for_vblank(dev);
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
}
/* Wait for the clocks to turn off. */
@@ -589,22 +574,11 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
int pipe = psb_intel_crtc->pipe;
- int fp_reg = (pipe == 0) ? FPA0 : FPB0;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
struct psb_intel_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
@@ -690,7 +664,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
/* setup pipeconf */
- pipeconf = REG_READ(pipeconf_reg);
+ pipeconf = REG_READ(map->conf);
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -712,9 +686,9 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
drm_mode_debug_printmodeline(mode);
if (dpll & DPLL_VCO_ENABLE) {
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
udelay(150);
}
@@ -747,45 +721,45 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
REG_READ(LVDS);
}
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
/* write it again -- the BIOS does, after all */
- REG_WRITE(dpll_reg, dpll);
+ REG_WRITE(map->dpll, dpll);
- REG_READ(dpll_reg);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
- REG_WRITE(dspsize_reg,
+ REG_WRITE(map->size,
((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
- REG_WRITE(dsppos_reg, 0);
- REG_WRITE(pipesrc_reg,
+ REG_WRITE(map->pos, 0);
+ REG_WRITE(map->src,
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, pipeconf);
+ REG_READ(map->conf);
psb_intel_wait_for_vblank(dev);
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
/* Flush the plane changes */
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
@@ -799,10 +773,10 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int palreg = PALETTE_A;
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
+ int palreg = map->palette;
int i;
/* The clocks have to be on to load the palette. */
@@ -811,12 +785,7 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
switch (psb_intel_crtc->pipe) {
case 0:
- break;
case 1:
- palreg = PALETTE_B;
- break;
- case 2:
- palreg = PALETTE_C;
break;
default:
dev_err(dev->dev, "Illegal Pipe Number.\n");
@@ -836,7 +805,7 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
gma_power_end(dev);
} else {
for (i = 0; i < 256; i++) {
- dev_priv->regs.psb.save_palette_a[i] =
+ dev_priv->regs.pipe[0].palette[i] =
((psb_intel_crtc->lut_r[i] +
psb_intel_crtc->lut_adj[i]) << 16) |
((psb_intel_crtc->lut_g[i] +
@@ -854,11 +823,10 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
static void psb_intel_crtc_save(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- int pipeA = (psb_intel_crtc->pipe == 0);
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
uint32_t paletteReg;
int i;
@@ -867,27 +835,27 @@ static void psb_intel_crtc_save(struct drm_crtc *crtc)
return;
}
- crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
- crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
- crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
- crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
- crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
- crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
- crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
- crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
- crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
- crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
- crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
- crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
- crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+ crtc_state->saveDSPCNTR = REG_READ(map->cntr);
+ crtc_state->savePIPECONF = REG_READ(map->conf);
+ crtc_state->savePIPESRC = REG_READ(map->src);
+ crtc_state->saveFP0 = REG_READ(map->fp0);
+ crtc_state->saveFP1 = REG_READ(map->fp1);
+ crtc_state->saveDPLL = REG_READ(map->dpll);
+ crtc_state->saveHTOTAL = REG_READ(map->htotal);
+ crtc_state->saveHBLANK = REG_READ(map->hblank);
+ crtc_state->saveHSYNC = REG_READ(map->hsync);
+ crtc_state->saveVTOTAL = REG_READ(map->vtotal);
+ crtc_state->saveVBLANK = REG_READ(map->vblank);
+ crtc_state->saveVSYNC = REG_READ(map->vsync);
+ crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
/*NOTE: DSPSIZE DSPPOS only for psb*/
- crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
- crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+ crtc_state->saveDSPSIZE = REG_READ(map->size);
+ crtc_state->saveDSPPOS = REG_READ(map->pos);
- crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+ crtc_state->saveDSPBASE = REG_READ(map->base);
- paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ paletteReg = map->palette;
for (i = 0; i < 256; ++i)
crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
}
@@ -898,12 +866,10 @@ static void psb_intel_crtc_save(struct drm_crtc *crtc)
static void psb_intel_crtc_restore(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_psb_private * dev_priv =
- (struct drm_psb_private *)dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
- int pipeA = (psb_intel_crtc->pipe == 0);
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
uint32_t paletteReg;
int i;
@@ -913,45 +879,45 @@ static void psb_intel_crtc_restore(struct drm_crtc *crtc)
}
if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
- REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+ REG_WRITE(map->dpll,
crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
- REG_READ(pipeA ? DPLL_A : DPLL_B);
+ REG_READ(map->dpll);
udelay(150);
}
- REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
- REG_READ(pipeA ? FPA0 : FPB0);
+ REG_WRITE(map->fp0, crtc_state->saveFP0);
+ REG_READ(map->fp0);
- REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
- REG_READ(pipeA ? FPA1 : FPB1);
+ REG_WRITE(map->fp1, crtc_state->saveFP1);
+ REG_READ(map->fp1);
- REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
- REG_READ(pipeA ? DPLL_A : DPLL_B);
+ REG_WRITE(map->dpll, crtc_state->saveDPLL);
+ REG_READ(map->dpll);
udelay(150);
- REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
- REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
- REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
- REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
- REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
- REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
- REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+ REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
+ REG_WRITE(map->hblank, crtc_state->saveHBLANK);
+ REG_WRITE(map->hsync, crtc_state->saveHSYNC);
+ REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
+ REG_WRITE(map->vblank, crtc_state->saveVBLANK);
+ REG_WRITE(map->vsync, crtc_state->saveVSYNC);
+ REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
- REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
- REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+ REG_WRITE(map->size, crtc_state->saveDSPSIZE);
+ REG_WRITE(map->pos, crtc_state->saveDSPPOS);
- REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
- REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
- REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+ REG_WRITE(map->src, crtc_state->savePIPESRC);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
+ REG_WRITE(map->conf, crtc_state->savePIPECONF);
psb_intel_wait_for_vblank(dev);
- REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
- REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+ REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
psb_intel_wait_for_vblank(dev);
- paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ paletteReg = map->palette;
for (i = 0; i < 256; ++i)
REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
}
@@ -962,6 +928,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
@@ -969,8 +936,10 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t temp;
size_t addr = 0;
struct gtt_range *gt;
+ struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
struct drm_gem_object *obj;
- int ret;
+ void *tmp_dst, *tmp_src;
+ int ret, i, cursor_pages;
/* if we want to turn of the cursor ignore width and height */
if (!handle) {
@@ -1019,10 +988,32 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
return ret;
}
+ if (dev_priv->ops->cursor_needs_phys) {
+ if (cursor_gt == NULL) {
+ dev_err(dev->dev, "No hardware cursor mem available");
+ return -ENOMEM;
+ }
- addr = gt->offset; /* Or resource.start ??? */
+ /* Prevent overflow */
+ if (gt->npage > 4)
+ cursor_pages = 4;
+ else
+ cursor_pages = gt->npage;
+
+ /* Copy the cursor to cursor mem */
+ tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
+ for (i = 0; i < cursor_pages; i++) {
+ tmp_src = kmap(gt->pages[i]);
+ memcpy(tmp_dst, tmp_src, PAGE_SIZE);
+ kunmap(gt->pages[i]);
+ tmp_dst += PAGE_SIZE;
+ }
- psb_intel_crtc->cursor_addr = addr;
+ addr = psb_intel_crtc->cursor_addr;
+ } else {
+ addr = gt->offset; /* Or resource.start ??? */
+ psb_intel_crtc->cursor_addr = addr;
+ }
temp = 0;
/* set the pipe for the cursor */
@@ -1115,34 +1106,30 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
u32 fp;
struct psb_intel_clock_t clock;
bool is_lvds;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
if (gma_power_begin(dev, false)) {
- dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ dpll = REG_READ(map->dpll);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+ fp = REG_READ(map->fp0);
else
- fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+ fp = REG_READ(map->fp1);
is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
gma_power_end(dev);
} else {
- dpll = (pipe == 0) ?
- dev_priv->regs.psb.saveDPLL_A :
- dev_priv->regs.psb.saveDPLL_B;
+ dpll = p->dpll;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = (pipe == 0) ?
- dev_priv->regs.psb.saveFPA0 :
- dev_priv->regs.psb.saveFPB0;
+ fp = p->fp0;
else
- fp = (pipe == 0) ?
- dev_priv->regs.psb.saveFPA1 :
- dev_priv->regs.psb.saveFPB1;
+ fp = p->fp1;
is_lvds = (pipe == 1) && (dev_priv->regs.psb.saveLVDS &
LVDS_PORT_EN);
@@ -1202,26 +1189,20 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
int vtot;
int vsync;
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
if (gma_power_begin(dev, false)) {
- htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
- hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
- vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
- vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ htot = REG_READ(map->htotal);
+ hsync = REG_READ(map->hsync);
+ vtot = REG_READ(map->vtotal);
+ vsync = REG_READ(map->vsync);
gma_power_end(dev);
} else {
- htot = (pipe == 0) ?
- dev_priv->regs.psb.saveHTOTAL_A :
- dev_priv->regs.psb.saveHTOTAL_B;
- hsync = (pipe == 0) ?
- dev_priv->regs.psb.saveHSYNC_A :
- dev_priv->regs.psb.saveHSYNC_B;
- vtot = (pipe == 0) ?
- dev_priv->regs.psb.saveVTOTAL_A :
- dev_priv->regs.psb.saveVTOTAL_B;
- vsync = (pipe == 0) ?
- dev_priv->regs.psb.saveVSYNC_A :
- dev_priv->regs.psb.saveVSYNC_B;
+ htot = p->htotal;
+ hsync = p->hsync;
+ vtot = p->vtotal;
+ vsync = p->vsync;
}
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
@@ -1257,6 +1238,9 @@ void psb_intel_crtc_destroy(struct drm_crtc *crtc)
drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
psb_intel_crtc->cursor_obj = NULL;
}
+
+ if (psb_intel_crtc->cursor_gt != NULL)
+ psb_gtt_free_range(crtc->dev, psb_intel_crtc->cursor_gt);
kfree(psb_intel_crtc->crtc_state);
drm_crtc_cleanup(crtc);
kfree(psb_intel_crtc);
@@ -1285,13 +1269,33 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
* Set the default value of cursor control and base register
* to zero. This is a workaround for h/w defect on Oaktrail
*/
-static void psb_intel_cursor_init(struct drm_device *dev, int pipe)
+static void psb_intel_cursor_init(struct drm_device *dev,
+ struct psb_intel_crtc *psb_intel_crtc)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
+ struct gtt_range *cursor_gt;
+
+ if (dev_priv->ops->cursor_needs_phys) {
+ /* Allocate 4 pages of stolen mem for a hardware cursor. That
+ * is enough for the 64 x 64 ARGB cursors we support.
+ */
+ cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1);
+ if (!cursor_gt) {
+ psb_intel_crtc->cursor_gt = NULL;
+ goto out;
+ }
+ psb_intel_crtc->cursor_gt = cursor_gt;
+ psb_intel_crtc->cursor_addr = dev_priv->stolen_base +
+ cursor_gt->offset;
+ } else {
+ psb_intel_crtc->cursor_gt = NULL;
+ }
- REG_WRITE(control[pipe], 0);
- REG_WRITE(base[pipe], 0);
+out:
+ REG_WRITE(control[psb_intel_crtc->pipe], 0);
+ REG_WRITE(base[psb_intel_crtc->pipe], 0);
}
void psb_intel_crtc_init(struct drm_device *dev, int pipe,
@@ -1357,7 +1361,7 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
psb_intel_crtc->mode_set.connectors =
(struct drm_connector **) (psb_intel_crtc + 1);
psb_intel_crtc->mode_set.num_connectors = 0;
- psb_intel_cursor_init(dev, pipe);
+ psb_intel_cursor_init(dev, psb_intel_crtc);
}
int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index f40535e56689..2515f83248cb 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -106,11 +106,6 @@ struct psb_intel_mode_device {
size_t(*bo_offset) (struct drm_device *dev, void *bo);
/*
- * Cursor (Can go ?)
- */
- int cursor_needs_physical;
-
- /*
* LVDS info
*/
int backlight_duty_cycle; /* restore backlight to this value */
@@ -176,6 +171,7 @@ struct psb_intel_crtc {
int pipe;
int plane;
uint32_t cursor_addr;
+ struct gtt_range *cursor_gt;
u8 lut_r[256], lut_g[256], lut_b[256];
u8 lut_adj[256];
struct psb_intel_framebuffer *fbdev_fb;
@@ -193,6 +189,9 @@ struct psb_intel_crtc {
/*crtc mode setting flags*/
u32 mode_flags;
+ bool active;
+ bool crtc_enable;
+
/* Saved Crtc HW states */
struct psb_intel_crtc_state *crtc_state;
};
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index e89d3a2e8fdc..8e8c8efb0a89 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -91,6 +91,9 @@
#define BLC_PWM_CTL 0x61254
#define BLC_PWM_CTL2 0x61250
+#define PWM_ENABLE (1 << 31)
+#define PWM_LEGACY_MODE (1 << 30)
+#define PWM_PIPE_B (1 << 29)
#define BLC_PWM_CTL_C 0x62254
#define BLC_PWM_CTL2_C 0x62250
#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
@@ -216,7 +219,7 @@
#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
-#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+#define DPLL_FPA0h1_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_LOCK (1 << 15) /* CDV */
/*
@@ -343,6 +346,9 @@
#define FP_M2_DIV_SHIFT 0
#define PORT_HOTPLUG_EN 0x61110
+#define HDMIB_HOTPLUG_INT_EN (1 << 29)
+#define HDMIC_HOTPLUG_INT_EN (1 << 28)
+#define HDMID_HOTPLUG_INT_EN (1 << 27)
#define SDVOB_HOTPLUG_INT_EN (1 << 26)
#define SDVOC_HOTPLUG_INT_EN (1 << 25)
#define TV_HOTPLUG_INT_EN (1 << 18)
@@ -501,10 +507,12 @@
#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18)
#define PIPE_TE_ENABLE (1UL << 22)
+#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
#define PIPE_VSYNC_ENABL (1UL << 25)
#define PIPE_HDMI_AUDIO_UNDERRUN (1UL << 26)
#define PIPE_HDMI_AUDIO_BUFFER_DONE (1UL << 27)
+#define PIPE_FIFO_UNDERRUN (1UL << 31)
#define PIPE_HDMI_AUDIO_INT_MASK (PIPE_HDMI_AUDIO_UNDERRUN | \
PIPE_HDMI_AUDIO_BUFFER_DONE)
#define PIPE_EVENT_MASK ((1 << 29)|(1 << 28)|(1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)|(1 << 22)|(1 << 21)|(1 << 20)|(1 << 16))
@@ -569,12 +577,27 @@ struct dpst_guardband {
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
+#define FW_BLC_SELF 0x20e0
+#define FW_BLC_SELF_EN (1<<15)
+
#define DSPARB 0x70030
#define DSPFW1 0x70034
+#define DSP_FIFO_SR_WM_MASK 0xFF800000
+#define DSP_FIFO_SR_WM_SHIFT 23
+#define CURSOR_B_FIFO_WM_MASK 0x003F0000
+#define CURSOR_B_FIFO_WM_SHIFT 16
#define DSPFW2 0x70038
+#define CURSOR_A_FIFO_WM_MASK 0x3F00
+#define CURSOR_A_FIFO_WM_SHIFT 8
+#define DSP_PLANE_C_FIFO_WM_MASK 0x7F
+#define DSP_PLANE_C_FIFO_WM_SHIFT 0
#define DSPFW3 0x7003c
#define DSPFW4 0x70050
#define DSPFW5 0x70054
+#define DSP_PLANE_B_FIFO_WM1_SHIFT 24
+#define DSP_PLANE_A_FIFO_WM1_SHIFT 16
+#define CURSOR_B_FIFO_WM1_SHIFT 8
+#define CURSOR_FIFO_SR_WM1_SHIFT 0
#define DSPFW6 0x70058
#define DSPCHICKENBIT 0x70400
#define DSPACNTR 0x70180
@@ -1290,6 +1313,15 @@ No status bits are changed.
#define SB_N_CB_TUNE_MASK PSB_MASK(25, 24)
#define SB_N_CB_TUNE_SHIFT 24
+/* the bit 14:13 is used to select between the different reference clock for Pipe A/B */
+#define SB_REF_DPLLA 0x8010
+#define SB_REF_DPLLB 0x8030
+#define REF_CLK_MASK (0x3 << 13)
+#define REF_CLK_CORE (0 << 13)
+#define REF_CLK_DPLL (1 << 13)
+#define REF_CLK_DPLLA (2 << 13)
+/* For the DPLL B, it will use the reference clk from DPLL A when using (2 << 13) */
+
#define _SB_REF_A 0x8018
#define _SB_REF_B 0x8038
#define SB_REF_SFR(pipe) _PIPE(pipe, _SB_REF_A, _SB_REF_B)
@@ -1313,6 +1345,7 @@ No status bits are changed.
#define LANE_PLL_MASK (0x7 << 20)
#define LANE_PLL_ENABLE (0x3 << 20)
+#define LANE_PLL_PIPE(p) (((p) == 0) ? (1 << 21) : (0 << 21))
#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 36330cabcea2..d39b15be7649 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1141,7 +1141,6 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1161,11 +1160,6 @@ static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
- /* We assume worst case scenario of 32 bpp here, since we don't know */
- if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
- dev_priv->vram_stolen_size)
- return MODE_MEM;
-
return MODE_OK;
}
@@ -2044,8 +2038,7 @@ psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
struct drm_device *dev = connector->base.base.dev;
intel_attach_force_audio_property(&connector->base.base);
- if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
- intel_attach_broadcast_rgb_property(&connector->base.base);
+ intel_attach_broadcast_rgb_property(&connector->base.base);
*/
}
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 1869586457b1..8652cdf3f03f 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -190,6 +190,9 @@ static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
*/
static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
{
+ if (vdc_stat & _PSB_IRQ_ASLE)
+ psb_intel_opregion_asle_intr(dev);
+
if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
mid_pipe_event_handler(dev, 0);
@@ -199,11 +202,9 @@ static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
{
- struct drm_device *dev = (struct drm_device *) arg;
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
-
- uint32_t vdc_stat, dsp_int = 0, sgx_int = 0;
+ struct drm_device *dev = arg;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
int handled = 0;
spin_lock(&dev_priv->irqmask_lock);
@@ -220,6 +221,8 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
if (vdc_stat & _PSB_IRQ_SGX_FLAG)
sgx_int = 1;
+ if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
+ hotplug_int = 1;
vdc_stat &= dev_priv->vdc_irq_mask;
spin_unlock(&dev_priv->irqmask_lock);
@@ -241,6 +244,13 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
handled = 1;
}
+ /* Note: this bit has other meanings on some devices, so we will
+ need to address that later if it ever matters */
+ if (hotplug_int && dev_priv->ops->hotplug) {
+ handled = dev_priv->ops->hotplug(dev);
+ REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+ }
+
PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
(void) PSB_RVDC32(PSB_INT_IDENTITY_R);
DRM_READMEMORYBARRIER();
@@ -273,6 +283,11 @@ void psb_irq_preinstall(struct drm_device *dev)
dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
*/
+ /* Revisit this area - want per device masks ? */
+ if (dev_priv->ops->hotplug)
+ dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
+ dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE;
+
/* This register is safe even if display island is off */
PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
@@ -305,18 +320,23 @@ int psb_irq_postinstall(struct drm_device *dev)
else
psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+ if (dev_priv->ops->hotplug_enable)
+ dev_priv->ops->hotplug_enable(dev, true);
+
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
return 0;
}
void psb_irq_uninstall(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+ if (dev_priv->ops->hotplug_enable)
+ dev_priv->ops->hotplug_enable(dev, false);
+
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
if (dev->vblank_enabled[0])
@@ -406,7 +426,7 @@ void psb_irq_turn_off_dpst(struct drm_device *dev)
psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
- PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE),
+ PSB_WVDC32(pwm_reg & ~PWM_PHASEIN_INT_ENABLE,
PWM_CONTROL_LOGIC);
pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
index b867aabe6bf3..1d2ebb5e530f 100644
--- a/drivers/gpu/drm/gma500/psb_lid.c
+++ b/drivers/gpu/drm/gma500/psb_lid.c
@@ -29,7 +29,7 @@ static void psb_lid_timer_func(unsigned long data)
struct drm_device *dev = (struct drm_device *)dev_priv->dev;
struct timer_list *lid_timer = &dev_priv->lid_timer;
unsigned long irq_flags;
- u32 *lid_state = dev_priv->lid_state;
+ u32 __iomem *lid_state = dev_priv->opregion.lid_state;
u32 pp_status;
if (readl(lid_state) == dev_priv->lid_last_state)
@@ -40,10 +40,16 @@ static void psb_lid_timer_func(unsigned long data)
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
- } while ((pp_status & PP_ON) == 0);
+ } while ((pp_status & PP_ON) == 0 &&
+ (pp_status & PP_SEQUENCE_MASK) != 0);
- /*FIXME: should be backlight level before*/
- psb_intel_lvds_set_brightness(dev, 100);
+ if (REG_READ(PP_STATUS) & PP_ON) {
+ /*FIXME: should be backlight level before*/
+ psb_intel_lvds_set_brightness(dev, 100);
+ } else {
+ DRM_DEBUG("LVDS panel never powered up");
+ return;
+ }
} else {
psb_intel_lvds_set_brightness(dev, 0);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index ce7fc77678b4..2e9268da58d8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -11,17 +11,21 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
i915_gem_gtt.o \
+ i915_gem_stolen.o \
i915_gem_tiling.o \
+ i915_sysfs.o \
i915_trace_points.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
intel_bios.o \
+ intel_ddi.o \
intel_dp.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
intel_panel.o \
+ intel_pm.o \
intel_i2c.o \
intel_fb.o \
intel_tv.o \
@@ -34,7 +38,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
dvo_ch7017.o \
dvo_ivch.o \
dvo_tfp410.o \
- dvo_sil164.o
+ dvo_sil164.o \
+ i915_gem_dmabuf.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e6162a1681f0..5363e9c66c27 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -47,7 +47,6 @@ enum {
FLUSHING_LIST,
INACTIVE_LIST,
PINNED_LIST,
- DEFERRED_FREE_LIST,
};
static const char *yesno(int v)
@@ -178,18 +177,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
seq_printf(m, "Inactive:\n");
head = &dev_priv->mm.inactive_list;
break;
- case PINNED_LIST:
- seq_printf(m, "Pinned:\n");
- head = &dev_priv->mm.pinned_list;
- break;
case FLUSHING_LIST:
seq_printf(m, "Flushing:\n");
head = &dev_priv->mm.flushing_list;
break;
- case DEFERRED_FREE_LIST:
- seq_printf(m, "Deferred free:\n");
- head = &dev_priv->mm.deferred_free_list;
- break;
default:
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
@@ -252,21 +243,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.pinned_list, mm_list);
- seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
- count, mappable_count, size, mappable_size);
-
- size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.inactive_list, mm_list);
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.deferred_free_list, mm_list);
- seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
- count, mappable_count, size, mappable_size);
-
- size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
if (obj->fault_mappable) {
size += obj->gtt_space->size;
@@ -294,6 +275,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
+ uintptr_t list = (uintptr_t) node->info_ent->data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
@@ -305,6 +287,9 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ if (list == PINNED_LIST && obj->pin_count == 0)
+ continue;
+
seq_printf(m, " ");
describe_obj(m, obj);
seq_printf(m, "\n");
@@ -321,7 +306,6 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
return 0;
}
-
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -430,10 +414,6 @@ static void i915_ring_seqno_info(struct seq_file *m,
if (ring->get_seqno) {
seq_printf(m, "Current sequence (%s): %d\n",
ring->name, ring->get_seqno(ring));
- seq_printf(m, "Waiter sequence (%s): %d\n",
- ring->name, ring->waiting_seqno);
- seq_printf(m, "IRQ sequence (%s): %d\n",
- ring->name, ring->irq_seqno);
}
}
@@ -468,7 +448,45 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
if (ret)
return ret;
- if (!HAS_PCH_SPLIT(dev)) {
+ if (IS_VALLEYVIEW(dev)) {
+ seq_printf(m, "Display IER:\t%08x\n",
+ I915_READ(VLV_IER));
+ seq_printf(m, "Display IIR:\t%08x\n",
+ I915_READ(VLV_IIR));
+ seq_printf(m, "Display IIR_RW:\t%08x\n",
+ I915_READ(VLV_IIR_RW));
+ seq_printf(m, "Display IMR:\t%08x\n",
+ I915_READ(VLV_IMR));
+ for_each_pipe(pipe)
+ seq_printf(m, "Pipe %c stat:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(PIPESTAT(pipe)));
+
+ seq_printf(m, "Master IER:\t%08x\n",
+ I915_READ(VLV_MASTER_IER));
+
+ seq_printf(m, "Render IER:\t%08x\n",
+ I915_READ(GTIER));
+ seq_printf(m, "Render IIR:\t%08x\n",
+ I915_READ(GTIIR));
+ seq_printf(m, "Render IMR:\t%08x\n",
+ I915_READ(GTIMR));
+
+ seq_printf(m, "PM IER:\t\t%08x\n",
+ I915_READ(GEN6_PMIER));
+ seq_printf(m, "PM IIR:\t\t%08x\n",
+ I915_READ(GEN6_PMIIR));
+ seq_printf(m, "PM IMR:\t\t%08x\n",
+ I915_READ(GEN6_PMIMR));
+
+ seq_printf(m, "Port hotplug:\t%08x\n",
+ I915_READ(PORT_HOTPLUG_EN));
+ seq_printf(m, "DPFLIPSTAT:\t%08x\n",
+ I915_READ(VLV_DPFLIPSTAT));
+ seq_printf(m, "DPINVGTT:\t%08x\n",
+ I915_READ(DPINVGTT));
+
+ } else if (!HAS_PCH_SPLIT(dev)) {
seq_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
seq_printf(m, "Interrupt identity: %08x\n",
@@ -564,69 +582,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
-static int i915_ringbuffer_data(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- if (!ring->obj) {
- seq_printf(m, "No ringbuffer setup\n");
- } else {
- const u8 __iomem *virt = ring->virtual_start;
- uint32_t off;
-
- for (off = 0; off < ring->size; off += 4) {
- uint32_t *ptr = (uint32_t *)(virt + off);
- seq_printf(m, "%08x : %08x\n", off, *ptr);
- }
- }
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-static int i915_ringbuffer_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- int ret;
-
- ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- if (ring->size == 0)
- return 0;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- seq_printf(m, "Ring %s:\n", ring->name);
- seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
- seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
- seq_printf(m, " Size : %08x\n", ring->size);
- seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
- seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
- seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
- seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
- }
- seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
- seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static const char *ring_str(int ring)
{
switch (ring) {
@@ -704,6 +659,7 @@ static void i915_ring_error_state(struct seq_file *m,
struct drm_i915_error_state *error,
unsigned ring)
{
+ BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
seq_printf(m, "%s command stream:\n", ring_str(ring));
seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
@@ -718,8 +674,8 @@ static void i915_ring_error_state(struct seq_file *m,
if (INTEL_INFO(dev)->gen >= 4)
seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
+ seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
if (INTEL_INFO(dev)->gen >= 6) {
- seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
seq_printf(m, " SYNC_0: 0x%08x\n",
error->semaphore_mboxes[ring][0]);
@@ -727,31 +683,35 @@ static void i915_ring_error_state(struct seq_file *m,
error->semaphore_mboxes[ring][1]);
}
seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
+ seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
}
+struct i915_error_state_file_priv {
+ struct drm_device *dev;
+ struct drm_i915_error_state *error;
+};
+
static int i915_error_state(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
+ struct i915_error_state_file_priv *error_priv = m->private;
+ struct drm_device *dev = error_priv->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_error_state *error;
- unsigned long flags;
+ struct drm_i915_error_state *error = error_priv->error;
+ struct intel_ring_buffer *ring;
int i, j, page, offset, elt;
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- if (!dev_priv->first_error) {
+ if (!error) {
seq_printf(m, "no error state collected\n");
- goto out;
+ return 0;
}
- error = dev_priv->first_error;
-
seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
+ seq_printf(m, "IER: 0x%08x\n", error->ier);
seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
for (i = 0; i < dev_priv->num_fence_regs; i++)
@@ -762,11 +722,8 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- i915_ring_error_state(m, dev, error, RCS);
- if (HAS_BLT(dev))
- i915_ring_error_state(m, dev, error, BCS);
- if (HAS_BSD(dev))
- i915_ring_error_state(m, dev, error, VCS);
+ for_each_ring(ring, dev_priv, i)
+ i915_ring_error_state(m, dev, error, i);
if (error->active_bo)
print_error_buffers(m, "Active",
@@ -828,12 +785,71 @@ static int i915_error_state(struct seq_file *m, void *unused)
if (error->display)
intel_display_print_error_state(m, dev, error->display);
-out:
+ return 0;
+}
+
+static ssize_t
+i915_error_state_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct i915_error_state_file_priv *error_priv = m->private;
+ struct drm_device *dev = error_priv->dev;
+
+ DRM_DEBUG_DRIVER("Resetting error state\n");
+
+ mutex_lock(&dev->struct_mutex);
+ i915_destroy_error_state(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return cnt;
+}
+
+static int i915_error_state_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_error_state_file_priv *error_priv;
+ unsigned long flags;
+
+ error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
+ if (!error_priv)
+ return -ENOMEM;
+
+ error_priv->dev = dev;
+
+ spin_lock_irqsave(&dev_priv->error_lock, flags);
+ error_priv->error = dev_priv->first_error;
+ if (error_priv->error)
+ kref_get(&error_priv->error->ref);
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
- return 0;
+ return single_open(file, i915_error_state, error_priv);
+}
+
+static int i915_error_state_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+ struct i915_error_state_file_priv *error_priv = m->private;
+
+ if (error_priv->error)
+ kref_put(&error_priv->error->ref, i915_error_state_free);
+ kfree(error_priv);
+
+ return single_release(inode, file);
}
+static const struct file_operations i915_error_state_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_error_state_open,
+ .read = seq_read,
+ .write = i915_error_state_write,
+ .llseek = default_llseek,
+ .release = i915_error_state_release,
+};
+
static int i915_rstdby_delays(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1132,6 +1148,17 @@ static int gen6_drpc_info(struct seq_file *m)
seq_printf(m, "Core Power Down: %s\n",
yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+
+ /* Not exactly sure what this is */
+ seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6_LOCKED));
+ seq_printf(m, "RC6 residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6));
+ seq_printf(m, "RC6+ residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6p));
+ seq_printf(m, "RC6++ residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6pp));
+
return 0;
}
@@ -1306,17 +1333,25 @@ static int i915_opregion(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
+ void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
int ret;
+ if (data == NULL)
+ return -ENOMEM;
+
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
- return ret;
+ goto out;
- if (opregion->header)
- seq_write(m, opregion->header, OPREGION_SIZE);
+ if (opregion->header) {
+ memcpy_fromio(data, opregion->header, OPREGION_SIZE);
+ seq_write(m, data, OPREGION_SIZE);
+ }
mutex_unlock(&dev->struct_mutex);
+out:
+ kfree(data);
return 0;
}
@@ -1505,6 +1540,53 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
return 0;
}
+static int i915_dpio_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+
+ if (!IS_VALLEYVIEW(dev)) {
+ seq_printf(m, "unsupported\n");
+ return 0;
+ }
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
+
+ seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_DIV_A));
+ seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_DIV_B));
+
+ seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
+ seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
+
+ seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
+ seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
+
+ seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
+ seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
+
+ seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
+ intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+
static ssize_t
i915_wedged_read(struct file *filp,
char __user *ubuf,
@@ -1562,6 +1644,65 @@ static const struct file_operations i915_wedged_fops = {
};
static ssize_t
+i915_ring_stop_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[20];
+ int len;
+
+ len = snprintf(buf, sizeof(buf),
+ "0x%08x\n", dev_priv->stop_rings);
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_ring_stop_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ char buf[20];
+ int val = 0;
+
+ if (cnt > 0) {
+ if (cnt > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
+
+ mutex_lock(&dev->struct_mutex);
+ dev_priv->stop_rings = val;
+ mutex_unlock(&dev->struct_mutex);
+
+ return cnt;
+}
+
+static const struct file_operations i915_ring_stop_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i915_ring_stop_read,
+ .write = i915_ring_stop_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t
i915_max_freq_read(struct file *filp,
char __user *ubuf,
size_t max,
@@ -1738,7 +1879,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
return 0;
}
-int i915_forcewake_release(struct inode *inode, struct file *file)
+static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1803,11 +1944,10 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
+ {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
- {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
- {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -1816,13 +1956,6 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
- {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
- {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
- {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
- {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
- {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
- {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
- {"i915_error_state", i915_error_state, 0},
{"i915_rstdby_delays", i915_rstdby_delays, 0},
{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
{"i915_delayfreq_table", i915_delayfreq_table, 0},
@@ -1839,6 +1972,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
+ {"i915_dpio", i915_dpio_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
@@ -1867,6 +2001,17 @@ int i915_debugfs_init(struct drm_minor *minor)
&i915_cache_sharing_fops);
if (ret)
return ret;
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_ring_stop",
+ &i915_ring_stop_fops);
+ if (ret)
+ return ret;
+
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_error_state",
+ &i915_error_state_fops);
+ if (ret)
+ return ret;
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
@@ -1885,6 +2030,10 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
+ 1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
+ 1, minor);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ba60f3c8f911..f94792626b94 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -26,6 +26,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "drmP.h"
#include "drm.h"
#include "drm_crtc_helper.h"
@@ -34,15 +36,62 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
-#include "../../../platform/x86/intel_ips.h"
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <linux/acpi.h>
#include <linux/pnp.h>
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <acpi/video.h>
+#include <asm/pat.h>
+
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
+
+#define BEGIN_LP_RING(n) \
+ intel_ring_begin(LP_RING(dev_priv), (n))
+
+#define OUT_RING(x) \
+ intel_ring_emit(LP_RING(dev_priv), x)
+
+#define ADVANCE_LP_RING() \
+ intel_ring_advance(LP_RING(dev_priv))
+
+/**
+ * Lock test for when it's just for synchronization of ring access.
+ *
+ * In that case, we don't need to do it when GEM is initialized as nobody else
+ * has access to the ring.
+ */
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
+ if (LP_RING(dev->dev_private)->obj == NULL) \
+ LOCK_TEST_WITH_RETURN(dev, file); \
+} while (0)
+
+static inline u32
+intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
+{
+ if (I915_NEED_GFX_HWS(dev_priv->dev))
+ return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
+ else
+ return intel_read_status_page(LP_RING(dev_priv), reg);
+}
+
+#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
+#define I915_BREADCRUMB_INDEX 0x21
+
+void i915_update_dri1_breadcrumb(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv;
+
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
+}
static void i915_write_hws_pga(struct drm_device *dev)
{
@@ -97,7 +146,7 @@ static void i915_free_hws(struct drm_device *dev)
if (ring->status_page.gfx_addr) {
ring->status_page.gfx_addr = 0;
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
}
/* Need to rewrite hardware status page */
@@ -195,7 +244,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
/* Allow hardware batchbuffers unless told otherwise.
*/
- dev_priv->allow_batchbuffer = 1;
+ dev_priv->dri1.allow_batchbuffer = 1;
return 0;
}
@@ -207,7 +256,7 @@ static int i915_dma_resume(struct drm_device * dev)
DRM_DEBUG_DRIVER("%s\n", __func__);
- if (ring->map.handle == NULL) {
+ if (ring->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
@@ -236,6 +285,9 @@ static int i915_dma_init(struct drm_device *dev, void *data,
drm_i915_init_t *init = data;
int retcode = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
switch (init->func) {
case I915_INIT_DMA:
retcode = i915_initialize(dev, init);
@@ -578,6 +630,9 @@ static int i915_flush_ioctl(struct drm_device *dev, void *data,
{
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
mutex_lock(&dev->struct_mutex);
@@ -598,7 +653,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
int ret;
struct drm_clip_rect *cliprects = NULL;
- if (!dev_priv->allow_batchbuffer) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv->dri1.allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
return -EINVAL;
}
@@ -655,6 +713,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (cmdbuf->num_cliprects < 0)
@@ -706,11 +767,166 @@ fail_batch_free:
return ret;
}
+static int i915_emit_irq(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+
+ i915_kernel_lost_context(dev);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ dev_priv->counter++;
+ if (dev_priv->counter > 0x7FFFFFFFUL)
+ dev_priv->counter = 1;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
+
+ return dev_priv->counter;
+}
+
+static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ int ret = 0;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+ DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
+ READ_BREADCRUMB(dev_priv));
+
+ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ return 0;
+ }
+
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
+ if (ring->irq_get(ring)) {
+ DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+ READ_BREADCRUMB(dev_priv) >= irq_nr);
+ ring->irq_put(ring);
+ } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
+ ret = -EBUSY;
+
+ if (ret == -EBUSY) {
+ DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
+ READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+ }
+
+ return ret;
+}
+
+/* Needs the lock as it touches the ring.
+ */
+static int i915_irq_emit(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_irq_emit_t *emit = data;
+ int result;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ mutex_lock(&dev->struct_mutex);
+ result = i915_emit_irq(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+ DRM_ERROR("copy_to_user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+static int i915_irq_wait(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_irq_wait_t *irqwait = data;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ return i915_wait_irq(dev, irqwait->irq_seq);
+}
+
+static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_vblank_pipe_t *pipe = data;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+ return 0;
+}
+
+/**
+ * Schedule buffer swap at given vertical blank.
+ */
+static int i915_vblank_swap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ /* The delayed swap mechanism was fundamentally racy, and has been
+ * removed. The model was that the client requested a delayed flip/swap
+ * from the kernel, then waited for vblank before continuing to perform
+ * rendering. The problem was that the kernel might wake the client
+ * up before it dispatched the vblank swap (since the lock has to be
+ * held while touching the ringbuffer), in which case the client would
+ * clear and start the next frame before the swap occurred, and
+ * flicker would occur in addition to likely missing the vblank.
+ *
+ * In the absence of this ioctl, userland falls back to a correct path
+ * of waiting for a vblank, then dispatching the swap on its own.
+ * Context switching to userland and back is plenty fast enough for
+ * meeting the requirements of vblank swapping.
+ */
+ return -EINVAL;
+}
+
static int i915_flip_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
DRM_DEBUG_DRIVER("%s\n", __func__);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -739,7 +955,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev->pdev->irq ? 1 : 0;
break;
case I915_PARAM_ALLOW_BATCHBUFFER:
- value = dev_priv->allow_batchbuffer ? 1 : 0;
+ value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
break;
case I915_PARAM_LAST_DISPATCH:
value = READ_BREADCRUMB(dev_priv);
@@ -748,7 +964,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev->pci_device;
break;
case I915_PARAM_HAS_GEM:
- value = dev_priv->has_gem;
+ value = 1;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
@@ -761,13 +977,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
break;
case I915_PARAM_HAS_EXECBUF2:
/* depends on GEM */
- value = dev_priv->has_gem;
+ value = 1;
break;
case I915_PARAM_HAS_BSD:
- value = HAS_BSD(dev);
+ value = intel_ring_initialized(&dev_priv->ring[VCS]);
break;
case I915_PARAM_HAS_BLT:
- value = HAS_BLT(dev);
+ value = intel_ring_initialized(&dev_priv->ring[BCS]);
break;
case I915_PARAM_HAS_RELAXED_FENCING:
value = 1;
@@ -787,6 +1003,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev);
break;
+ case I915_PARAM_HAS_ALIASING_PPGTT:
+ value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -816,10 +1035,9 @@ static int i915_setparam(struct drm_device *dev, void *data,
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
- dev_priv->tex_lru_log_granularity = param->value;
break;
case I915_SETPARAM_ALLOW_BATCHBUFFER:
- dev_priv->allow_batchbuffer = param->value;
+ dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
break;
case I915_SETPARAM_NUM_USED_FENCES:
if (param->value > dev_priv->num_fence_regs ||
@@ -844,6 +1062,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
drm_i915_hws_addr_t *hws = data;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
@@ -861,23 +1082,17 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
- dev_priv->hws_map.offset = dev->agp->base + hws->addr;
- dev_priv->hws_map.size = 4*1024;
- dev_priv->hws_map.type = 0;
- dev_priv->hws_map.flags = 0;
- dev_priv->hws_map.mtrr = 0;
-
- drm_core_ioremap_wc(&dev_priv->hws_map, dev);
- if (dev_priv->hws_map.handle == NULL) {
+ dev_priv->dri1.gfx_hws_cpu_addr = ioremap_wc(dev->agp->base + hws->addr,
+ 4096);
+ if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
i915_dma_cleanup(dev);
ring->status_page.gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return -ENOMEM;
}
- ring->status_page.page_addr =
- (void __force __iomem *)dev_priv->hws_map.handle;
- memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
@@ -1013,133 +1228,6 @@ intel_teardown_mchbar(struct drm_device *dev)
release_resource(&dev_priv->mch_res);
}
-#define PTE_ADDRESS_MASK 0xfffff000
-#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
-#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
-#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
-#define PTE_MAPPING_TYPE_CACHED (3 << 1)
-#define PTE_MAPPING_TYPE_MASK (3 << 1)
-#define PTE_VALID (1 << 0)
-
-/**
- * i915_stolen_to_phys - take an offset into stolen memory and turn it into
- * a physical one
- * @dev: drm device
- * @offset: address to translate
- *
- * Some chip functions require allocations from stolen space and need the
- * physical address of the memory in question.
- */
-static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct pci_dev *pdev = dev_priv->bridge_dev;
- u32 base;
-
-#if 0
- /* On the machines I have tested the Graphics Base of Stolen Memory
- * is unreliable, so compute the base by subtracting the stolen memory
- * from the Top of Low Usable DRAM which is where the BIOS places
- * the graphics stolen memory.
- */
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- /* top 32bits are reserved = 0 */
- pci_read_config_dword(pdev, 0xA4, &base);
- } else {
- /* XXX presume 8xx is the same as i915 */
- pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
- }
-#else
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- u16 val;
- pci_read_config_word(pdev, 0xb0, &val);
- base = val >> 4 << 20;
- } else {
- u8 val;
- pci_read_config_byte(pdev, 0x9c, &val);
- base = val >> 3 << 27;
- }
- base -= dev_priv->mm.gtt->stolen_size;
-#endif
-
- return base + offset;
-}
-
-static void i915_warn_stolen(struct drm_device *dev)
-{
- DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
- DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
-}
-
-static void i915_setup_compression(struct drm_device *dev, int size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
- unsigned long cfb_base;
- unsigned long ll_base = 0;
-
- /* Just in case the BIOS is doing something questionable. */
- intel_disable_fbc(dev);
-
- compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
- if (compressed_fb)
- compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
- if (!compressed_fb)
- goto err;
-
- cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
- if (!cfb_base)
- goto err_fb;
-
- if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
- compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
- 4096, 4096, 0);
- if (compressed_llb)
- compressed_llb = drm_mm_get_block(compressed_llb,
- 4096, 4096);
- if (!compressed_llb)
- goto err_fb;
-
- ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
- if (!ll_base)
- goto err_llb;
- }
-
- dev_priv->cfb_size = size;
-
- dev_priv->compressed_fb = compressed_fb;
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
- else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
- } else {
- I915_WRITE(FBC_CFB_BASE, cfb_base);
- I915_WRITE(FBC_LL_BASE, ll_base);
- dev_priv->compressed_llb = compressed_llb;
- }
-
- DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
- cfb_base, ll_base, size >> 20);
- return;
-
-err_llb:
- drm_mm_put_block(compressed_llb);
-err_fb:
- drm_mm_put_block(compressed_fb);
-err:
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- i915_warn_stolen(dev);
-}
-
-static void i915_cleanup_compression(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- drm_mm_put_block(dev_priv->compressed_fb);
- if (dev_priv->compressed_llb)
- drm_mm_put_block(dev_priv->compressed_llb);
-}
-
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
@@ -1158,14 +1246,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
- printk(KERN_INFO "i915: switched on\n");
+ pr_info("switched on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
i915_resume(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
- printk(KERN_ERR "i915: switched off\n");
+ pr_err("switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
i915_suspend(dev, pmm);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
@@ -1183,88 +1271,11 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
-static bool
-intel_enable_ppgtt(struct drm_device *dev)
-{
- if (i915_enable_ppgtt >= 0)
- return i915_enable_ppgtt;
-
-#ifdef CONFIG_INTEL_IOMMU
- /* Disable ppgtt on SNB if VT-d is on. */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
- return false;
-#endif
-
- return true;
-}
-
-static int i915_load_gem_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long prealloc_size, gtt_size, mappable_size;
- int ret;
-
- prealloc_size = dev_priv->mm.gtt->stolen_size;
- gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
- mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
- /* Basic memrange allocator for stolen space */
- drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
-
- mutex_lock(&dev->struct_mutex);
- if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
- /* PPGTT pdes are stolen from global gtt ptes, so shrink the
- * aperture accordingly when using aliasing ppgtt. */
- gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
- /* For paranoia keep the guard page in between. */
- gtt_size -= PAGE_SIZE;
-
- i915_gem_do_init(dev, 0, mappable_size, gtt_size);
-
- ret = i915_gem_init_aliasing_ppgtt(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- } else {
- /* Let GEM Manage all of the aperture.
- *
- * However, leave one page at the end still bound to the scratch
- * page. There are a number of places where the hardware
- * apparently prefetches past the end of the object, and we've
- * seen multiple hangs with the GPU head pointer stuck in a
- * batchbuffer bound at the last page of the aperture. One page
- * should be enough to keep any prefetching inside of the
- * aperture.
- */
- i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
- }
-
- ret = i915_gem_init_hw(dev);
- mutex_unlock(&dev->struct_mutex);
- if (ret) {
- i915_gem_cleanup_aliasing_ppgtt(dev);
- return ret;
- }
-
- /* Try to set up FBC with a reasonable compressed buffer size */
- if (I915_HAS_FBC(dev) && i915_powersave) {
- int cfb_size;
-
- /* Leave 1M for line length buffer & misc. */
-
- /* Try to get a 32M buffer... */
- if (prealloc_size > (36*1024*1024))
- cfb_size = 32*1024*1024;
- else /* fall back to 7/8 of the stolen space */
- cfb_size = prealloc_size * 7 / 8;
- i915_setup_compression(dev, cfb_size);
- }
-
- /* Allow hardware batchbuffers unless told otherwise. */
- dev_priv->allow_batchbuffer = 1;
- return 0;
-}
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+ .set_gpu_state = i915_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = i915_switcheroo_can_switch,
+};
static int i915_load_modeset_init(struct drm_device *dev)
{
@@ -1288,22 +1299,22 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_register_dsm_handler();
- ret = vga_switcheroo_register_client(dev->pdev,
- i915_switcheroo_set_state,
- NULL,
- i915_switcheroo_can_switch);
+ ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
if (ret)
goto cleanup_vga_client;
- /* IIR "flip pending" bit means done if this bit is set */
- if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
- dev_priv->flip_pending_is_done = true;
+ /* Initialise stolen first so that we may reserve preallocated
+ * objects for the BIOS to KMS transition.
+ */
+ ret = i915_gem_init_stolen(dev);
+ if (ret)
+ goto cleanup_vga_switcheroo;
intel_modeset_init(dev);
- ret = i915_load_gem_init(dev);
+ ret = i915_gem_init(dev);
if (ret)
- goto cleanup_vga_switcheroo;
+ goto cleanup_gem_stolen;
intel_modeset_gem_init(dev);
@@ -1333,6 +1344,8 @@ cleanup_gem:
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
+cleanup_gem_stolen:
+ i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo:
vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
@@ -1365,572 +1378,26 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
master->driver_priv = NULL;
}
-static void i915_pineview_get_mem_freq(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 tmp;
-
- tmp = I915_READ(CLKCFG);
-
- switch (tmp & CLKCFG_FSB_MASK) {
- case CLKCFG_FSB_533:
- dev_priv->fsb_freq = 533; /* 133*4 */
- break;
- case CLKCFG_FSB_800:
- dev_priv->fsb_freq = 800; /* 200*4 */
- break;
- case CLKCFG_FSB_667:
- dev_priv->fsb_freq = 667; /* 167*4 */
- break;
- case CLKCFG_FSB_400:
- dev_priv->fsb_freq = 400; /* 100*4 */
- break;
- }
-
- switch (tmp & CLKCFG_MEM_MASK) {
- case CLKCFG_MEM_533:
- dev_priv->mem_freq = 533;
- break;
- case CLKCFG_MEM_667:
- dev_priv->mem_freq = 667;
- break;
- case CLKCFG_MEM_800:
- dev_priv->mem_freq = 800;
- break;
- }
-
- /* detect pineview DDR3 setting */
- tmp = I915_READ(CSHRDDR3CTL);
- dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
-}
-
-static void i915_ironlake_get_mem_freq(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u16 ddrpll, csipll;
-
- ddrpll = I915_READ16(DDRMPLL1);
- csipll = I915_READ16(CSIPLL0);
-
- switch (ddrpll & 0xff) {
- case 0xc:
- dev_priv->mem_freq = 800;
- break;
- case 0x10:
- dev_priv->mem_freq = 1066;
- break;
- case 0x14:
- dev_priv->mem_freq = 1333;
- break;
- case 0x18:
- dev_priv->mem_freq = 1600;
- break;
- default:
- DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
- ddrpll & 0xff);
- dev_priv->mem_freq = 0;
- break;
- }
-
- dev_priv->r_t = dev_priv->mem_freq;
-
- switch (csipll & 0x3ff) {
- case 0x00c:
- dev_priv->fsb_freq = 3200;
- break;
- case 0x00e:
- dev_priv->fsb_freq = 3733;
- break;
- case 0x010:
- dev_priv->fsb_freq = 4266;
- break;
- case 0x012:
- dev_priv->fsb_freq = 4800;
- break;
- case 0x014:
- dev_priv->fsb_freq = 5333;
- break;
- case 0x016:
- dev_priv->fsb_freq = 5866;
- break;
- case 0x018:
- dev_priv->fsb_freq = 6400;
- break;
- default:
- DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
- csipll & 0x3ff);
- dev_priv->fsb_freq = 0;
- break;
- }
-
- if (dev_priv->fsb_freq == 3200) {
- dev_priv->c_m = 0;
- } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
- dev_priv->c_m = 1;
- } else {
- dev_priv->c_m = 2;
- }
-}
-
-static const struct cparams {
- u16 i;
- u16 t;
- u16 m;
- u16 c;
-} cparams[] = {
- { 1, 1333, 301, 28664 },
- { 1, 1066, 294, 24460 },
- { 1, 800, 294, 25192 },
- { 0, 1333, 276, 27605 },
- { 0, 1066, 276, 27605 },
- { 0, 800, 231, 23784 },
-};
-
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
-{
- u64 total_count, diff, ret;
- u32 count1, count2, count3, m = 0, c = 0;
- unsigned long now = jiffies_to_msecs(jiffies), diff1;
- int i;
-
- diff1 = now - dev_priv->last_time1;
-
- /* Prevent division-by-zero if we are asking too fast.
- * Also, we don't get interesting results if we are polling
- * faster than once in 10ms, so just return the saved value
- * in such cases.
- */
- if (diff1 <= 10)
- return dev_priv->chipset_power;
-
- count1 = I915_READ(DMIEC);
- count2 = I915_READ(DDREC);
- count3 = I915_READ(CSIEC);
-
- total_count = count1 + count2 + count3;
-
- /* FIXME: handle per-counter overflow */
- if (total_count < dev_priv->last_count1) {
- diff = ~0UL - dev_priv->last_count1;
- diff += total_count;
- } else {
- diff = total_count - dev_priv->last_count1;
- }
-
- for (i = 0; i < ARRAY_SIZE(cparams); i++) {
- if (cparams[i].i == dev_priv->c_m &&
- cparams[i].t == dev_priv->r_t) {
- m = cparams[i].m;
- c = cparams[i].c;
- break;
- }
- }
-
- diff = div_u64(diff, diff1);
- ret = ((m * diff) + c);
- ret = div_u64(ret, 10);
-
- dev_priv->last_count1 = total_count;
- dev_priv->last_time1 = now;
-
- dev_priv->chipset_power = ret;
-
- return ret;
-}
-
-unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
-{
- unsigned long m, x, b;
- u32 tsfs;
-
- tsfs = I915_READ(TSFS);
-
- m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
- x = I915_READ8(TR1);
-
- b = tsfs & TSFS_INTR_MASK;
-
- return ((m * x) / 127) - b;
-}
-
-static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
-{
- static const struct v_table {
- u16 vd; /* in .1 mil */
- u16 vm; /* in .1 mil */
- } v_table[] = {
- { 0, 0, },
- { 375, 0, },
- { 500, 0, },
- { 625, 0, },
- { 750, 0, },
- { 875, 0, },
- { 1000, 0, },
- { 1125, 0, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4250, 3125, },
- { 4375, 3250, },
- { 4500, 3375, },
- { 4625, 3500, },
- { 4750, 3625, },
- { 4875, 3750, },
- { 5000, 3875, },
- { 5125, 4000, },
- { 5250, 4125, },
- { 5375, 4250, },
- { 5500, 4375, },
- { 5625, 4500, },
- { 5750, 4625, },
- { 5875, 4750, },
- { 6000, 4875, },
- { 6125, 5000, },
- { 6250, 5125, },
- { 6375, 5250, },
- { 6500, 5375, },
- { 6625, 5500, },
- { 6750, 5625, },
- { 6875, 5750, },
- { 7000, 5875, },
- { 7125, 6000, },
- { 7250, 6125, },
- { 7375, 6250, },
- { 7500, 6375, },
- { 7625, 6500, },
- { 7750, 6625, },
- { 7875, 6750, },
- { 8000, 6875, },
- { 8125, 7000, },
- { 8250, 7125, },
- { 8375, 7250, },
- { 8500, 7375, },
- { 8625, 7500, },
- { 8750, 7625, },
- { 8875, 7750, },
- { 9000, 7875, },
- { 9125, 8000, },
- { 9250, 8125, },
- { 9375, 8250, },
- { 9500, 8375, },
- { 9625, 8500, },
- { 9750, 8625, },
- { 9875, 8750, },
- { 10000, 8875, },
- { 10125, 9000, },
- { 10250, 9125, },
- { 10375, 9250, },
- { 10500, 9375, },
- { 10625, 9500, },
- { 10750, 9625, },
- { 10875, 9750, },
- { 11000, 9875, },
- { 11125, 10000, },
- { 11250, 10125, },
- { 11375, 10250, },
- { 11500, 10375, },
- { 11625, 10500, },
- { 11750, 10625, },
- { 11875, 10750, },
- { 12000, 10875, },
- { 12125, 11000, },
- { 12250, 11125, },
- { 12375, 11250, },
- { 12500, 11375, },
- { 12625, 11500, },
- { 12750, 11625, },
- { 12875, 11750, },
- { 13000, 11875, },
- { 13125, 12000, },
- { 13250, 12125, },
- { 13375, 12250, },
- { 13500, 12375, },
- { 13625, 12500, },
- { 13750, 12625, },
- { 13875, 12750, },
- { 14000, 12875, },
- { 14125, 13000, },
- { 14250, 13125, },
- { 14375, 13250, },
- { 14500, 13375, },
- { 14625, 13500, },
- { 14750, 13625, },
- { 14875, 13750, },
- { 15000, 13875, },
- { 15125, 14000, },
- { 15250, 14125, },
- { 15375, 14250, },
- { 15500, 14375, },
- { 15625, 14500, },
- { 15750, 14625, },
- { 15875, 14750, },
- { 16000, 14875, },
- { 16125, 15000, },
- };
- if (dev_priv->info->is_mobile)
- return v_table[pxvid].vm;
- else
- return v_table[pxvid].vd;
-}
-
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+static void
+i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
+ unsigned long size)
{
- struct timespec now, diff1;
- u64 diff;
- unsigned long diffms;
- u32 count;
-
- if (dev_priv->info->gen != 5)
- return;
-
- getrawmonotonic(&now);
- diff1 = timespec_sub(now, dev_priv->last_time2);
+ dev_priv->mm.gtt_mtrr = -1;
- /* Don't divide by 0 */
- diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
- if (!diffms)
+#if defined(CONFIG_X86_PAT)
+ if (cpu_has_pat)
return;
+#endif
- count = I915_READ(GFXEC);
-
- if (count < dev_priv->last_count2) {
- diff = ~0UL - dev_priv->last_count2;
- diff += count;
- } else {
- diff = count - dev_priv->last_count2;
- }
-
- dev_priv->last_count2 = count;
- dev_priv->last_time2 = now;
-
- /* More magic constants... */
- diff = diff * 1181;
- diff = div_u64(diff, diffms * 10);
- dev_priv->gfx_power = diff;
-}
-
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
-{
- unsigned long t, corr, state1, corr2, state2;
- u32 pxvid, ext_v;
-
- pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
- pxvid = (pxvid >> 24) & 0x7f;
- ext_v = pvid_to_extvid(dev_priv, pxvid);
-
- state1 = ext_v;
-
- t = i915_mch_val(dev_priv);
-
- /* Revel in the empirically derived constants */
-
- /* Correction factor in 1/100000 units */
- if (t > 80)
- corr = ((t * 2349) + 135940);
- else if (t >= 50)
- corr = ((t * 964) + 29317);
- else /* < 50 */
- corr = ((t * 301) + 1004);
-
- corr = corr * ((150142 * state1) / 10000 - 78642);
- corr /= 100000;
- corr2 = (corr * dev_priv->corr);
-
- state2 = (corr2 * state1) / 10000;
- state2 /= 100; /* convert to mW */
-
- i915_update_gfx_val(dev_priv);
-
- return dev_priv->gfx_power + state2;
-}
-
-/* Global for IPS driver to get at the current i915 device */
-static struct drm_i915_private *i915_mch_dev;
-/*
- * Lock protecting IPS related data structures
- * - i915_mch_dev
- * - dev_priv->max_delay
- * - dev_priv->min_delay
- * - dev_priv->fmax
- * - dev_priv->gpu_busy
- */
-static DEFINE_SPINLOCK(mchdev_lock);
-
-/**
- * i915_read_mch_val - return value for IPS use
- *
- * Calculate and return a value for the IPS driver to use when deciding whether
- * we have thermal and power headroom to increase CPU or GPU power budget.
- */
-unsigned long i915_read_mch_val(void)
-{
- struct drm_i915_private *dev_priv;
- unsigned long chipset_val, graphics_val, ret = 0;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev)
- goto out_unlock;
- dev_priv = i915_mch_dev;
-
- chipset_val = i915_chipset_val(dev_priv);
- graphics_val = i915_gfx_val(dev_priv);
-
- ret = chipset_val + graphics_val;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_read_mch_val);
-
-/**
- * i915_gpu_raise - raise GPU frequency limit
- *
- * Raise the limit; IPS indicates we have thermal headroom.
- */
-bool i915_gpu_raise(void)
-{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- if (dev_priv->max_delay > dev_priv->fmax)
- dev_priv->max_delay--;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_raise);
-
-/**
- * i915_gpu_lower - lower GPU frequency limit
- *
- * IPS indicates we're close to a thermal limit, so throttle back the GPU
- * frequency maximum.
- */
-bool i915_gpu_lower(void)
-{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- if (dev_priv->max_delay < dev_priv->min_delay)
- dev_priv->max_delay++;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_lower);
-
-/**
- * i915_gpu_busy - indicate GPU business to IPS
- *
- * Tell the IPS driver whether or not the GPU is busy.
- */
-bool i915_gpu_busy(void)
-{
- struct drm_i915_private *dev_priv;
- bool ret = false;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev)
- goto out_unlock;
- dev_priv = i915_mch_dev;
-
- ret = dev_priv->busy;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_busy);
-
-/**
- * i915_gpu_turbo_disable - disable graphics turbo
- *
- * Disable graphics turbo by resetting the max frequency and setting the
- * current frequency to the default.
- */
-bool i915_gpu_turbo_disable(void)
-{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- dev_priv->max_delay = dev_priv->fstart;
-
- if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
- ret = false;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
-
-/**
- * Tells the intel_ips driver that the i915 driver is now loaded, if
- * IPS got loaded first.
- *
- * This awkward dance is so that neither module has to depend on the
- * other in order for IPS to do the appropriate communication of
- * GPU turbo limits to i915.
- */
-static void
-ips_ping_for_i915_load(void)
-{
- void (*link)(void);
-
- link = symbol_get(ips_link_to_i915_driver);
- if (link) {
- link();
- symbol_put(ips_link_to_i915_driver);
+ /* Set up a WC MTRR for non-PAT systems. This is more common than
+ * one would think, because the kernel disables PAT on first
+ * generation Core chips because WC PAT gets overridden by a UC
+ * MTRR if present. Even if a UC MTRR isn't present.
+ */
+ dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1);
+ if (dev_priv->mm.gtt_mtrr < 0) {
+ DRM_INFO("MTRR allocation failed. Graphics "
+ "performance may suffer.\n");
}
}
@@ -1948,8 +1415,16 @@ ips_ping_for_i915_load(void)
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
+ struct intel_device_info *info;
int ret = 0, mmio_bar;
- uint32_t agp_size;
+ uint32_t aperture_size;
+
+ info = (struct intel_device_info *) flags;
+
+ /* Refuse to load on gen6+ without kms enabled. */
+ if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
/* i915 has 4 more counters */
dev->counters += 4;
@@ -1964,7 +1439,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = (void *)dev_priv;
dev_priv->dev = dev;
- dev_priv->info = (struct intel_device_info *) flags;
+ dev_priv->info = info;
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
@@ -2003,27 +1478,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_rmmap;
}
- agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
dev_priv->mm.gtt_mapping =
- io_mapping_create_wc(dev->agp->base, agp_size);
+ io_mapping_create_wc(dev->agp->base, aperture_size);
if (dev_priv->mm.gtt_mapping == NULL) {
ret = -EIO;
goto out_rmmap;
}
- /* Set up a WC MTRR for non-PAT systems. This is more common than
- * one would think, because the kernel disables PAT on first
- * generation Core chips because WC PAT gets overridden by a UC
- * MTRR if present. Even if a UC MTRR isn't present.
- */
- dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
- agp_size,
- MTRR_TYPE_WRCOMB, 1);
- if (dev_priv->mm.gtt_mtrr < 0) {
- DRM_INFO("MTRR allocation failed. Graphics "
- "performance may suffer.\n");
- }
+ i915_mtrr_setup(dev_priv, dev->agp->base, aperture_size);
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
@@ -2047,9 +1511,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_mtrrfree;
}
- /* enable GEM by default */
- dev_priv->has_gem = 1;
-
intel_irq_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
@@ -2069,11 +1530,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gem_unload;
}
- if (IS_PINEVIEW(dev))
- i915_pineview_get_mem_freq(dev);
- else if (IS_GEN5(dev))
- i915_ironlake_get_mem_freq(dev);
-
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
@@ -2093,7 +1549,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps_lock);
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;
@@ -2117,6 +1573,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
}
+ i915_setup_sysfs(dev);
+
/* Must be done after probing outputs */
intel_opregion_init(dev);
acpi_video_register();
@@ -2124,14 +1582,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
- if (IS_GEN5(dev)) {
- spin_lock(&mchdev_lock);
- i915_mch_dev = dev_priv;
- dev_priv->mchdev_lock = &mchdev_lock;
- spin_unlock(&mchdev_lock);
-
- ips_ping_for_i915_load();
- }
+ if (IS_GEN5(dev))
+ intel_gpu_ips_init(dev_priv);
return 0;
@@ -2166,17 +1618,18 @@ int i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- spin_lock(&mchdev_lock);
- i915_mch_dev = NULL;
- spin_unlock(&mchdev_lock);
+ intel_gpu_ips_teardown();
+
+ i915_teardown_sysfs(dev);
if (dev_priv->mm.inactive_shrinker.shrink)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
+ i915_gem_retire_requests(dev);
mutex_unlock(&dev->struct_mutex);
/* Cancel the retire work handler, which should be idle now. */
@@ -2228,8 +1681,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
- if (I915_HAS_FBC(dev) && i915_powersave)
- i915_cleanup_compression(dev);
+ i915_gem_cleanup_stolen(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
intel_cleanup_overlay(dev);
@@ -2277,7 +1729,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
* mode setting case, we want to restore the kernel's initial mode (just
* in case the last client left us in a bad state).
*
- * Additionally, in the non-mode setting case, we'll tear down the AGP
+ * Additionally, in the non-mode setting case, we'll tear down the GTT
* and DMA structures, since the kernel won't be using them, and clea
* up any GEM state.
*/
@@ -2322,7 +1774,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -2355,16 +1807,10 @@ struct drm_ioctl_desc i915_ioctls[] = {
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
-/**
- * Determine if the device really is AGP or not.
- *
- * All Intel graphics chipsets are treated as AGP, even if they are really
- * PCI-e.
- *
- * \param dev The device to be tested.
- *
- * \returns
- * A value of 1 is always retured to indictate every i9x5 is AGP.
+/*
+ * This is really ugly: Because old userspace abused the linux agp interface to
+ * manage the gtt, we need to claim that all intel devices are agp. For
+ * otherwise the drm core refuses to initialize the agp support code.
*/
int i915_driver_device_is_agp(struct drm_device * dev)
{
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ae8a64f9f845..238a52165833 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -84,6 +84,12 @@ MODULE_PARM_DESC(lvds_downclock,
"Use panel (LVDS/eDP) downclocking for power savings "
"(default: false)");
+int i915_lvds_channel_mode __read_mostly;
+module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
+MODULE_PARM_DESC(lvds_channel_mode,
+ "Specify LVDS channel mode "
+ "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
+
int i915_panel_use_ssc __read_mostly = -1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
MODULE_PARM_DESC(lvds_use_ssc,
@@ -93,8 +99,8 @@ MODULE_PARM_DESC(lvds_use_ssc,
int i915_vbt_sdvo_panel_type __read_mostly = -1;
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
MODULE_PARM_DESC(vbt_sdvo_panel_type,
- "Override selection of SDVO panel mode in the VBT "
- "(default: auto)");
+ "Override/Ignore selection of SDVO panel mode in the VBT "
+ "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
static bool i915_try_reset __read_mostly = true;
module_param_named(reset, i915_try_reset, bool, 0600);
@@ -209,6 +215,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_ironlake_m_info = {
@@ -216,6 +223,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.has_bsd_ring = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_sandybridge_d_info = {
@@ -224,6 +232,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
@@ -233,6 +242,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_ivybridge_d_info = {
@@ -241,6 +251,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
@@ -250,6 +261,43 @@ static const struct intel_device_info intel_ivybridge_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
+ .has_pch_split = 1,
+};
+
+static const struct intel_device_info intel_valleyview_m_info = {
+ .gen = 7, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 0,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .is_valleyview = 1,
+};
+
+static const struct intel_device_info intel_valleyview_d_info = {
+ .gen = 7,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 0,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .is_valleyview = 1,
+};
+
+static const struct intel_device_info intel_haswell_d_info = {
+ .is_haswell = 1, .gen = 7,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+ .has_pch_split = 1,
+};
+
+static const struct intel_device_info intel_haswell_m_info = {
+ .is_haswell = 1, .gen = 7, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+ .has_pch_split = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -297,6 +345,13 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
+ INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
+ INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
+ INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
+ INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
+ INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
+ INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
+ INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
{0, 0, 0}
};
@@ -308,6 +363,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
void intel_detect_pch(struct drm_device *dev)
{
@@ -328,20 +384,45 @@ void intel_detect_pch(struct drm_device *dev)
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
+ dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
+ dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
+ dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+ } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_LPT;
+ dev_priv->num_pch_pll = 0;
+ DRM_DEBUG_KMS("Found LynxPoint PCH\n");
}
+ BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
}
pci_dev_put(pch);
}
}
+bool i915_semaphore_is_enabled(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ return 0;
+
+ if (i915_semaphores >= 0)
+ return i915_semaphores;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Enable semaphores on SNB when IO remapping is off */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return 1;
+}
+
void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@@ -366,7 +447,7 @@ void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
udelay(10);
- I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
POSTING_READ(FORCEWAKE_MT);
count = 0;
@@ -408,7 +489,7 @@ void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
@@ -446,6 +527,31 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return ret;
}
+void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+
+ /* Already awake? */
+ if ((I915_READ(0x130094) & 0xa1) == 0xa1)
+ return;
+
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
+ POSTING_READ(FORCEWAKE_VLV);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
+ udelay(10);
+}
+
+void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
+ /* FIXME: confirm VLV behavior with Punit folks */
+ POSTING_READ(FORCEWAKE_VLV);
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -525,15 +631,16 @@ static int i915_drm_thaw(struct drm_device *dev)
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_init_pch_refclk(dev);
+
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
- if (HAS_PCH_SPLIT(dev))
- ironlake_init_pch_refclk(dev);
-
+ intel_modeset_init_hw(dev);
drm_mode_config_reset(dev);
drm_irq_install(dev);
@@ -541,9 +648,6 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_lock(&dev->mode_config.mutex);
drm_helper_resume_force_mode(dev);
mutex_unlock(&dev->mode_config.mutex);
-
- if (IS_IRONLAKE_M(dev))
- ironlake_enable_rc6(dev);
}
intel_opregion_init(dev);
@@ -576,7 +680,7 @@ int i915_resume(struct drm_device *dev)
return 0;
}
-static int i8xx_do_reset(struct drm_device *dev, u8 flags)
+static int i8xx_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -610,11 +714,12 @@ static int i965_reset_complete(struct drm_device *dev)
{
u8 gdrst;
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- return gdrst & 0x1;
+ return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
-static int i965_do_reset(struct drm_device *dev, u8 flags)
+static int i965_do_reset(struct drm_device *dev)
{
+ int ret;
u8 gdrst;
/*
@@ -623,20 +728,43 @@ static int i965_do_reset(struct drm_device *dev, u8 flags)
* triggers the reset; when done, the hardware will clear it.
*/
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ gdrst | GRDOM_RENDER |
+ GRDOM_RESET_ENABLE);
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ /* We can't reset render&media without also resetting display ... */
+ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ gdrst | GRDOM_MEDIA |
+ GRDOM_RESET_ENABLE);
return wait_for(i965_reset_complete(dev), 500);
}
-static int ironlake_do_reset(struct drm_device *dev, u8 flags)
+static int ironlake_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
- I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
+ u32 gdrst;
+ int ret;
+
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+ if (ret)
+ return ret;
+
+ /* We can't reset render&media without also resetting display ... */
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
-static int gen6_do_reset(struct drm_device *dev, u8 flags)
+static int gen6_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -671,10 +799,44 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
return ret;
}
+static int intel_gpu_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = -ENODEV;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ ret = gen6_do_reset(dev);
+ break;
+ case 5:
+ ret = ironlake_do_reset(dev);
+ break;
+ case 4:
+ ret = i965_do_reset(dev);
+ break;
+ case 2:
+ ret = i8xx_do_reset(dev);
+ break;
+ }
+
+ /* Also reset the gpu hangman. */
+ if (dev_priv->stop_rings) {
+ DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
+ dev_priv->stop_rings = 0;
+ if (ret == -ENODEV) {
+ DRM_ERROR("Reset not implemented, but ignoring "
+ "error for simulated gpu hangs\n");
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
- * @flags: reset domains
*
* Reset the chip. Useful if a hang is detected. Returns zero on successful
* reset or otherwise an error code.
@@ -687,14 +849,9 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
* - re-init interrupt state
* - re-init display
*/
-int i915_reset(struct drm_device *dev, u8 flags)
+int i915_reset(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- /*
- * We really should only reset the display subsystem if we actually
- * need to
- */
- bool need_display = true;
int ret;
if (!i915_try_reset)
@@ -703,26 +860,16 @@ int i915_reset(struct drm_device *dev, u8 flags)
if (!mutex_trylock(&dev->struct_mutex))
return -EBUSY;
+ dev_priv->stop_rings = 0;
+
i915_gem_reset(dev);
ret = -ENODEV;
- if (get_seconds() - dev_priv->last_gpu_reset < 5) {
+ if (get_seconds() - dev_priv->last_gpu_reset < 5)
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
- } else switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- ret = gen6_do_reset(dev, flags);
- break;
- case 5:
- ret = ironlake_do_reset(dev, flags);
- break;
- case 4:
- ret = i965_do_reset(dev, flags);
- break;
- case 2:
- ret = i8xx_do_reset(dev, flags);
- break;
- }
+ else
+ ret = intel_gpu_reset(dev);
+
dev_priv->last_gpu_reset = get_seconds();
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
@@ -746,36 +893,27 @@ int i915_reset(struct drm_device *dev, u8 flags)
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) {
+ struct intel_ring_buffer *ring;
+ int i;
+
dev_priv->mm.suspended = 0;
i915_gem_init_swizzling(dev);
- dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
- if (HAS_BSD(dev))
- dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
- if (HAS_BLT(dev))
- dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
+ for_each_ring(ring, dev_priv, i)
+ ring->init(ring);
i915_gem_init_ppgtt(dev);
mutex_unlock(&dev->struct_mutex);
- drm_irq_uninstall(dev);
- drm_mode_config_reset(dev);
- drm_irq_install(dev);
- mutex_lock(&dev->struct_mutex);
- }
- mutex_unlock(&dev->struct_mutex);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_modeset_init_hw(dev);
- /*
- * Perform a full modeset as on later generations, e.g. Ironlake, we may
- * need to retrain the display link and cannot just restore the register
- * values.
- */
- if (need_display) {
- mutex_lock(&dev->mode_config.mutex);
- drm_helper_resume_force_mode(dev);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_irq_uninstall(dev);
+ drm_irq_install(dev);
+ } else {
+ mutex_unlock(&dev->struct_mutex);
}
return 0;
@@ -874,7 +1012,7 @@ static const struct dev_pm_ops i915_pm_ops = {
.restore = i915_pm_resume,
};
-static struct vm_operations_struct i915_gem_vm_ops = {
+static const struct vm_operations_struct i915_gem_vm_ops = {
.fault = i915_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
@@ -901,7 +1039,7 @@ static struct drm_driver driver = {
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
.load = i915_driver_load,
.unload = i915_driver_unload,
.open = i915_driver_open,
@@ -924,6 +1062,12 @@ static struct drm_driver driver = {
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = i915_gem_prime_export,
+ .gem_prime_import = i915_gem_prime_import,
+
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = i915_gem_dumb_destroy,
@@ -993,6 +1137,13 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+ (((dev_priv)->info->gen >= 6) && \
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE)) && \
+ (!IS_VALLEYVIEW((dev_priv)->dev))
+
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5fabc6c31fec..c9cfc67c2cf5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -38,6 +38,8 @@
#include <linux/i2c-algo-bit.h>
#include <drm/intel-gtt.h>
#include <linux/backlight.h>
+#include <linux/intel-iommu.h>
+#include <linux/kref.h>
/* General customization:
*/
@@ -63,10 +65,30 @@ enum plane {
};
#define plane_name(p) ((p) + 'A')
+enum port {
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ I915_MAX_PORTS
+};
+#define port_name(p) ((p) + 'A')
+
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+struct intel_pch_pll {
+ int refcount; /* count of number of CRTCs sharing this PLL */
+ int active; /* count of number of active CRTCs (i.e. DPMS on) */
+ bool on; /* is the PLL actually active? Disabled during modeset */
+ int pll_reg;
+ int fp0_reg;
+ int fp1_reg;
+};
+#define I915_NUM_PLLS 2
+
/* Interface history:
*
* 1.1: Original.
@@ -111,11 +133,11 @@ struct opregion_asle;
struct drm_i915_private;
struct intel_opregion {
- struct opregion_header *header;
- struct opregion_acpi *acpi;
- struct opregion_swsci *swsci;
- struct opregion_asle *asle;
- void *vbt;
+ struct opregion_header __iomem *header;
+ struct opregion_acpi __iomem *acpi;
+ struct opregion_swsci __iomem *swsci;
+ struct opregion_asle __iomem *asle;
+ void __iomem *vbt;
u32 __iomem *lid_state;
};
#define OPREGION_SIZE (8*1024)
@@ -135,7 +157,6 @@ struct drm_i915_master_private {
struct drm_i915_fence_reg {
struct list_head lru_list;
struct drm_i915_gem_object *obj;
- uint32_t setup_seqno;
int pin_count;
};
@@ -151,8 +172,11 @@ struct sdvo_device_mapping {
struct intel_display_error_state;
struct drm_i915_error_state {
+ struct kref ref;
u32 eir;
u32 pgtbl_er;
+ u32 ier;
+ bool waiting[I915_NUM_RINGS];
u32 pipestat[I915_MAX_PIPES];
u32 tail[I915_NUM_RINGS];
u32 head[I915_NUM_RINGS];
@@ -218,11 +242,15 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size);
+ void (*sanitize_pm)(struct drm_device *dev);
+ void (*update_linetime_wm)(struct drm_device *dev, int pipe,
+ struct drm_display_mode *mode);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
+ void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
@@ -255,6 +283,9 @@ struct intel_device_info {
u8 is_broadwater:1;
u8 is_crestline:1;
u8 is_ivybridge:1;
+ u8 is_valleyview:1;
+ u8 has_pch_split:1;
+ u8 is_haswell:1;
u8 has_fbc:1;
u8 has_pipe_cxsr:1;
u8 has_hotplug:1;
@@ -291,10 +322,12 @@ enum no_fbc_reason {
enum intel_pch {
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */
+ PCH_LPT, /* Lynxpoint PCH */
};
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
+#define QUIRK_INVERT_BRIGHTNESS (1<<2)
struct intel_fbdev;
struct intel_fbc_work;
@@ -302,7 +335,6 @@ struct intel_fbc_work;
struct intel_gmbus {
struct i2c_adapter adapter;
bool force_bit;
- bool has_gpio;
u32 reg0;
u32 gpio_reg;
struct i2c_algo_bit_data bit_algo;
@@ -314,7 +346,6 @@ typedef struct drm_i915_private {
const struct intel_device_info *info;
- int has_gem;
int relative_constants_mode;
void __iomem *regs;
@@ -326,19 +357,23 @@ typedef struct drm_i915_private {
/** gt_lock is also taken in irq contexts. */
struct spinlock gt_lock;
- struct intel_gmbus *gmbus;
+ struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
+ /**
+ * Base address of the gmbus and gpio block.
+ */
+ uint32_t gpio_mmio_base;
+
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
uint32_t counter;
- drm_local_map_t hws_map;
struct drm_i915_gem_object *pwrctx;
struct drm_i915_gem_object *renderctx;
@@ -354,6 +389,10 @@ typedef struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
+
+ /* DPIO indirect register protection */
+ spinlock_t dpio_lock;
+
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
@@ -363,22 +402,20 @@ typedef struct drm_i915_private {
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
- int tex_lru_log_granularity;
- int allow_batchbuffer;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
- int vblank_pipe;
int num_pipe;
+ int num_pch_pll;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
struct timer_list hangcheck_timer;
int hangcheck_count;
- uint32_t last_acthd;
- uint32_t last_acthd_bsd;
- uint32_t last_acthd_blt;
+ uint32_t last_acthd[I915_NUM_RINGS];
uint32_t last_instdone;
uint32_t last_instdone1;
+ unsigned int stop_rings;
+
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
@@ -405,6 +442,8 @@ typedef struct drm_i915_private {
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
@@ -428,6 +467,7 @@ typedef struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
spinlock_t error_lock;
+ /* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
struct completion error_completion;
@@ -652,24 +692,10 @@ typedef struct drm_i915_private {
*/
struct list_head inactive_list;
- /**
- * LRU list of objects which are not in the ringbuffer but
- * are still pinned in the GTT.
- */
- struct list_head pinned_list;
-
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
/**
- * List of objects currently pending being freed.
- *
- * These objects are no longer in use, but due to a signal
- * we were prevented from freeing them at the appointed time.
- */
- struct list_head deferred_free_list;
-
- /**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
@@ -717,6 +743,16 @@ typedef struct drm_i915_private {
size_t object_memory;
u32 object_count;
} mm;
+
+ /* Old dri1 support infrastructure, beware the dragons ya fools entering
+ * here! */
+ struct {
+ unsigned allow_batchbuffer : 1;
+ u32 __iomem *gfx_hws_cpu_addr;
+ } dri1;
+
+ /* Kernel Modesetting */
+
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
@@ -726,7 +762,8 @@ typedef struct drm_i915_private {
struct drm_crtc *plane_to_crtc_mapping[3];
struct drm_crtc *pipe_to_crtc_mapping[3];
wait_queue_head_t pending_flip_queue;
- bool flip_pending_is_done;
+
+ struct intel_pch_pll pch_plls[I915_NUM_PLLS];
/* Reclocking support */
bool render_reclock_avail;
@@ -781,6 +818,11 @@ typedef struct drm_i915_private {
struct drm_property *force_audio_property;
} drm_i915_private_t;
+/* Iterate over initialised rings */
+#define for_each_ring(ring__, dev_priv__, i__) \
+ for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
+ if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
+
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
HDMI_AUDIO_OFF, /* force turn off HDMI audio */
@@ -844,7 +886,14 @@ struct drm_i915_gem_object {
* Current tiling mode for the object.
*/
unsigned int tiling_mode:2;
- unsigned int tiling_changed:1;
+ /**
+ * Whether the tiling parameters for the currently associated fence
+ * register have changed. Note that for the purposes of tracking
+ * tiling changes we also treat the unfenced register, the register
+ * slot that the object occupies whilst it executes a fenced
+ * command (such as BLT on gen2/3), as a "fence".
+ */
+ unsigned int fence_dirty:1;
/** How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -881,6 +930,7 @@ struct drm_i915_gem_object {
unsigned int cache_level:2;
unsigned int has_aliasing_ppgtt_mapping:1;
+ unsigned int has_global_gtt_mapping:1;
struct page **pages;
@@ -890,6 +940,11 @@ struct drm_i915_gem_object {
struct scatterlist *sg_list;
int num_sg;
+ /* prime dma-buf support */
+ struct sg_table *sg_table;
+ void *dma_buf_vmapping;
+ int vmapping_count;
+
/**
* Used for performing relocations during execbuffer insertion.
*/
@@ -904,13 +959,12 @@ struct drm_i915_gem_object {
*/
uint32_t gtt_offset;
- /** Breadcrumb of last rendering to the buffer. */
- uint32_t last_rendering_seqno;
struct intel_ring_buffer *ring;
+ /** Breadcrumb of last rendering to the buffer. */
+ uint32_t last_rendering_seqno;
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
- struct intel_ring_buffer *last_fenced_ring;
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
@@ -918,13 +972,6 @@ struct drm_i915_gem_object {
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
-
- /**
- * If present, while GEM_DOMAIN_CPU is in the read domain this array
- * flags which individual pages are valid.
- */
- uint8_t *page_cpu_valid;
-
/** User space pin count and filp owning the pin */
uint32_t user_pin_count;
struct drm_file *pin_filp;
@@ -1001,6 +1048,8 @@ struct drm_i915_file_private {
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
+#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
+#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
/*
@@ -1044,10 +1093,11 @@ struct drm_i915_file_private {
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev))
+#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@@ -1081,6 +1131,7 @@ extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
extern int i915_semaphores __read_mostly;
extern unsigned int i915_lvds_downclock __read_mostly;
+extern int i915_lvds_channel_mode __read_mostly;
extern int i915_panel_use_ssc __read_mostly;
extern int i915_vbt_sdvo_panel_type __read_mostly;
extern int i915_enable_rc6 __read_mostly;
@@ -1094,6 +1145,7 @@ extern int i915_master_create(struct drm_device *dev, struct drm_master *master)
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
/* i915_dma.c */
+void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
@@ -1104,12 +1156,14 @@ extern void i915_driver_preclose(struct drm_device *dev,
extern void i915_driver_postclose(struct drm_device *dev,
struct drm_file *file_priv);
extern int i915_driver_device_is_agp(struct drm_device * dev);
+#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+#endif
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *box,
int DR1, int DR4);
-extern int i915_reset(struct drm_device *dev, u8 flags);
+extern int i915_reset(struct drm_device *dev);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -1119,19 +1173,10 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
-extern int i915_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
extern void intel_irq_init(struct drm_device *dev);
-extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_vblank_swap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+void i915_error_state_free(struct kref *error_ref);
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1205,8 +1250,12 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
+int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
+ gfp_t gfpmask);
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
+int i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
u32 seqno);
@@ -1229,17 +1278,18 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
-int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined);
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-static inline void
+static inline bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
dev_priv->fence_regs[obj->fence_reg].pin_count++;
- }
+ return true;
+ } else
+ return false;
}
static inline void
@@ -1260,27 +1310,25 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-void i915_gem_do_init(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end);
-int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
+int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno,
- bool do_retire);
+ uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
int __must_check
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
+int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_ring_buffer *pipelined);
@@ -1301,6 +1349,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gem_obj, int flags);
+
+
/* i915_gem_gtt.c */
int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
@@ -1311,18 +1366,24 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
-int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+void i915_gem_init_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable);
-int __must_check i915_gem_evict_everything(struct drm_device *dev,
- bool purgeable_only);
-int __must_check i915_gem_evict_inactive(struct drm_device *dev,
- bool purgeable_only);
+int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
+
+/* i915_gem_stolen.c */
+int i915_gem_init_stolen(struct drm_device *dev);
+void i915_gem_cleanup_stolen(struct drm_device *dev);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -1354,9 +1415,20 @@ extern int i915_restore_state(struct drm_device *dev);
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
+/* i915_sysfs.c */
+void i915_setup_sysfs(struct drm_device *dev_priv);
+void i915_teardown_sysfs(struct drm_device *dev_priv);
+
/* intel_i2c.c */
extern int intel_setup_gmbus(struct drm_device *dev);
extern void intel_teardown_gmbus(struct drm_device *dev);
+extern inline bool intel_gmbus_is_port_valid(unsigned port)
+{
+ return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
+}
+
+extern struct i2c_adapter *intel_gmbus_get_adapter(
+ struct drm_i915_private *dev_priv, unsigned port);
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
@@ -1391,6 +1463,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
#endif /* CONFIG_ACPI */
/* modesetting */
+extern void intel_modeset_init_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1403,12 +1476,17 @@ extern void ironlake_enable_rc6(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+extern int intel_enable_rc6(const struct drm_device *dev);
+extern bool i915_semaphore_is_enabled(struct drm_device *dev);
extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
+extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
+
/* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1420,28 +1498,6 @@ extern void intel_display_print_error_state(struct seq_file *m,
struct intel_display_error_state *error);
#endif
-#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
-
-#define BEGIN_LP_RING(n) \
- intel_ring_begin(LP_RING(dev_priv), (n))
-
-#define OUT_RING(x) \
- intel_ring_emit(LP_RING(dev_priv), x)
-
-#define ADVANCE_LP_RING() \
- intel_ring_advance(LP_RING(dev_priv))
-
-/**
- * Lock test for when it's just for synchronization of ring access.
- *
- * In that case, we don't need to do it when GEM is initialized as nobody else
- * has access to the ring.
- */
-#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
- if (LP_RING(dev->dev_private)->obj == NULL) \
- LOCK_TEST_WITH_RETURN(dev, file); \
-} while (0)
-
/* On SNB platform, before reading ring registers forcewake bit
* must be set to prevent GT core from power down and stale values being
* returned.
@@ -1450,12 +1506,6 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
-/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
- (((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
-
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0d1e4b7b4b99..288d7b8f49ae 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,31 +35,41 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
+#include <linux/dma-buf.h>
static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
- bool write);
-static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
- uint64_t offset,
- uint64_t size);
-static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
bool map_and_fenceable);
-static void i915_gem_clear_fence_reg(struct drm_device *dev,
- struct drm_i915_fence_reg *reg);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file);
-static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
+
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj);
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_fence_reg *fence,
+ bool enable);
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
struct shrink_control *sc);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+{
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ /* As we do not have an associated fence register, we will force
+ * a tiling change if we ever need to acquire one.
+ */
+ obj->fence_dirty = false;
+ obj->fence_reg = I915_FENCE_REG_NONE;
+}
+
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
size_t size)
@@ -122,26 +132,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return obj->gtt_space && !obj->active && obj->pin_count == 0;
-}
-
-void i915_gem_do_init(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
-
- dev_priv->mm.gtt_start = start;
- dev_priv->mm.gtt_mappable_end = mappable_end;
- dev_priv->mm.gtt_end = end;
- dev_priv->mm.gtt_total = end - start;
- dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
-
- /* Take over this portion of the GTT */
- intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+ return !obj->active;
}
int
@@ -150,12 +141,20 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_init *args = data;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
if (args->gtt_start >= args->gtt_end ||
(args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
return -EINVAL;
+ /* GEM with user mode setting was never supported on ilk and later. */
+ if (INTEL_INFO(dev)->gen >= 5)
+ return -ENODEV;
+
mutex_lock(&dev->struct_mutex);
- i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
+ i915_gem_init_global_gtt(dev, args->gtt_start,
+ args->gtt_end, args->gtt_end);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -170,13 +169,11 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
size_t pinned;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
pinned = 0;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
- pinned += obj->gtt_space->size;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+ if (obj->pin_count)
+ pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
args->aper_size = dev_priv->mm.gtt_total;
@@ -247,6 +244,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_create *args = data;
+
return i915_gem_create(file, dev,
args->size, &args->handle);
}
@@ -259,66 +257,6 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
obj->tiling_mode != I915_TILING_NONE;
}
-/**
- * This is the fast shmem pread path, which attempts to copy_from_user directly
- * from the backing pages of the object to the user's address space. On a
- * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
- */
-static int
-i915_gem_shmem_pread_fast(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pread *args,
- struct drm_file *file)
-{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- ssize_t remain;
- loff_t offset;
- char __user *user_data;
- int page_offset, page_length;
-
- user_data = (char __user *) (uintptr_t) args->data_ptr;
- remain = args->size;
-
- offset = args->offset;
-
- while (remain > 0) {
- struct page *page;
- char *vaddr;
- int ret;
-
- /* Operation in this page
- *
- * page_offset = offset within page
- * page_length = bytes to copy for this page
- */
- page_offset = offset_in_page(offset);
- page_length = remain;
- if ((page_offset + remain) > PAGE_SIZE)
- page_length = PAGE_SIZE - page_offset;
-
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page))
- return PTR_ERR(page);
-
- vaddr = kmap_atomic(page);
- ret = __copy_to_user_inatomic(user_data,
- vaddr + page_offset,
- page_length);
- kunmap_atomic(vaddr);
-
- mark_page_accessed(page);
- page_cache_release(page);
- if (ret)
- return -EFAULT;
-
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
-
- return 0;
-}
-
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
const char *gpu_vaddr, int gpu_offset,
@@ -346,8 +284,8 @@ __copy_to_user_swizzled(char __user *cpu_vaddr,
}
static inline int
-__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
- const char *cpu_vaddr,
+__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
+ const char __user *cpu_vaddr,
int length)
{
int ret, cpu_offset = 0;
@@ -371,37 +309,121 @@ __copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
return 0;
}
-/**
- * This is the fallback shmem pread path, which allocates temporary storage
- * in kernel space to copy_to_user into outside of the struct_mutex, so we
- * can copy out of the object's backing pages while holding the struct mutex
- * and not take page faults.
- */
+/* Per-page copy function for the shmem pread fastpath.
+ * Flushes invalid cachelines before reading the target if
+ * needs_clflush is set. */
static int
-i915_gem_shmem_pread_slow(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pread *args,
- struct drm_file *file)
+shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling, bool needs_clflush)
+{
+ char *vaddr;
+ int ret;
+
+ if (unlikely(page_do_bit17_swizzling))
+ return -EINVAL;
+
+ vaddr = kmap_atomic(page);
+ if (needs_clflush)
+ drm_clflush_virt_range(vaddr + shmem_page_offset,
+ page_length);
+ ret = __copy_to_user_inatomic(user_data,
+ vaddr + shmem_page_offset,
+ page_length);
+ kunmap_atomic(vaddr);
+
+ return ret;
+}
+
+static void
+shmem_clflush_swizzled_range(char *addr, unsigned long length,
+ bool swizzled)
+{
+ if (unlikely(swizzled)) {
+ unsigned long start = (unsigned long) addr;
+ unsigned long end = (unsigned long) addr + length;
+
+ /* For swizzling simply ensure that we always flush both
+ * channels. Lame, but simple and it works. Swizzled
+ * pwrite/pread is far from a hotpath - current userspace
+ * doesn't use it at all. */
+ start = round_down(start, 128);
+ end = round_up(end, 128);
+
+ drm_clflush_virt_range((void *)start, end - start);
+ } else {
+ drm_clflush_virt_range(addr, length);
+ }
+
+}
+
+/* Only difference to the fast-path function is that this can handle bit17
+ * and uses non-atomic copy and kmap functions. */
+static int
+shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling, bool needs_clflush)
+{
+ char *vaddr;
+ int ret;
+
+ vaddr = kmap(page);
+ if (needs_clflush)
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
+
+ if (page_do_bit17_swizzling)
+ ret = __copy_to_user_swizzled(user_data,
+ vaddr, shmem_page_offset,
+ page_length);
+ else
+ ret = __copy_to_user(user_data,
+ vaddr + shmem_page_offset,
+ page_length);
+ kunmap(page);
+
+ return ret;
+}
+
+static int
+i915_gem_shmem_pread(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pread *args,
+ struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
char __user *user_data;
ssize_t remain;
loff_t offset;
- int shmem_page_offset, page_length, ret;
+ int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ int hit_slowpath = 0;
+ int prefaulted = 0;
+ int needs_clflush = 0;
+ int release_page;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- offset = args->offset;
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
+ /* If we're not in the cpu read domain, set ourself into the gtt
+ * read domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will dirty the data
+ * anyway again before the next pread happens. */
+ if (obj->cache_level == I915_CACHE_NONE)
+ needs_clflush = 1;
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret)
+ return ret;
+ }
- mutex_unlock(&dev->struct_mutex);
+ offset = args->offset;
while (remain > 0) {
struct page *page;
- char *vaddr;
/* Operation in this page
*
@@ -413,28 +435,51 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
+ if (obj->pages) {
+ page = obj->pages[offset >> PAGE_SHIFT];
+ release_page = 0;
+ } else {
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto out;
+ }
+ release_page = 1;
}
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
- vaddr = kmap(page);
- if (page_do_bit17_swizzling)
- ret = __copy_to_user_swizzled(user_data,
- vaddr, shmem_page_offset,
- page_length);
- else
- ret = __copy_to_user(user_data,
- vaddr + shmem_page_offset,
- page_length);
- kunmap(page);
+ ret = shmem_pread_fast(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ needs_clflush);
+ if (ret == 0)
+ goto next_page;
- mark_page_accessed(page);
+ hit_slowpath = 1;
+ page_cache_get(page);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (!prefaulted) {
+ ret = fault_in_multipages_writeable(user_data, remain);
+ /* Userspace is tricking us, but we've already clobbered
+ * its pages with the prefault and promised to write the
+ * data up to the first fault. Hence ignore any errors
+ * and just continue. */
+ (void)ret;
+ prefaulted = 1;
+ }
+
+ ret = shmem_pread_slow(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ needs_clflush);
+
+ mutex_lock(&dev->struct_mutex);
page_cache_release(page);
+next_page:
+ mark_page_accessed(page);
+ if (release_page)
+ page_cache_release(page);
if (ret) {
ret = -EFAULT;
@@ -447,10 +492,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
}
out:
- mutex_lock(&dev->struct_mutex);
- /* Fixup: Kill any reinstated backing storage pages */
- if (obj->madv == __I915_MADV_PURGED)
- i915_gem_object_truncate(obj);
+ if (hit_slowpath) {
+ /* Fixup: Kill any reinstated backing storage pages */
+ if (obj->madv == __I915_MADV_PURGED)
+ i915_gem_object_truncate(obj);
+ }
return ret;
}
@@ -476,11 +522,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
- args->size);
- if (ret)
- return -EFAULT;
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -498,19 +539,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
goto out;
}
- trace_i915_gem_object_pread(obj, args->offset, args->size);
-
- ret = i915_gem_object_set_cpu_read_domain_range(obj,
- args->offset,
- args->size);
- if (ret)
+ /* prime objects have no backing filp to GEM pread/pwrite
+ * pages from.
+ */
+ if (!obj->base.filp) {
+ ret = -EINVAL;
goto out;
+ }
- ret = -EFAULT;
- if (!i915_gem_object_needs_bit17_swizzle(obj))
- ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
- if (ret == -EFAULT)
- ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
+ trace_i915_gem_object_pread(obj, args->offset, args->size);
+
+ ret = i915_gem_shmem_pread(dev, obj, args, file);
out:
drm_gem_object_unreference(&obj->base);
@@ -529,40 +568,19 @@ fast_user_write(struct io_mapping *mapping,
char __user *user_data,
int length)
{
- char *vaddr_atomic;
+ void __iomem *vaddr_atomic;
+ void *vaddr;
unsigned long unwritten;
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
- unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
+ /* We can use the cpu mem copy function because this is X86. */
+ vaddr = (void __force*)vaddr_atomic + page_offset;
+ unwritten = __copy_from_user_inatomic_nocache(vaddr,
user_data, length);
io_mapping_unmap_atomic(vaddr_atomic);
return unwritten;
}
-/* Here's the write path which can sleep for
- * page faults
- */
-
-static inline void
-slow_kernel_write(struct io_mapping *mapping,
- loff_t gtt_base, int gtt_offset,
- struct page *user_page, int user_offset,
- int length)
-{
- char __iomem *dst_vaddr;
- char *src_vaddr;
-
- dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
- src_vaddr = kmap(user_page);
-
- memcpy_toio(dst_vaddr + gtt_offset,
- src_vaddr + user_offset,
- length);
-
- kunmap(user_page);
- io_mapping_unmap(dst_vaddr);
-}
-
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
@@ -577,7 +595,19 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
- int page_offset, page_length;
+ int page_offset, page_length, ret;
+
+ ret = i915_gem_object_pin(obj, 0, true);
+ if (ret)
+ goto out;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto out_unpin;
+
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ goto out_unpin;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@@ -602,214 +632,133 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
* retry in the slow path.
*/
if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
- page_offset, user_data, page_length))
- return -EFAULT;
+ page_offset, user_data, page_length)) {
+ ret = -EFAULT;
+ goto out_unpin;
+ }
remain -= page_length;
user_data += page_length;
offset += page_length;
}
- return 0;
+out_unpin:
+ i915_gem_object_unpin(obj);
+out:
+ return ret;
}
-/**
- * This is the fallback GTT pwrite path, which uses get_user_pages to pin
- * the memory and maps it using kmap_atomic for copying.
- *
- * This code resulted in x11perf -rgb10text consuming about 10% more CPU
- * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
- */
+/* Per-page copy function for the shmem pwrite fastpath.
+ * Flushes invalid cachelines before writing to the target if
+ * needs_clflush_before is set and flushes out any written cachelines after
+ * writing if needs_clflush is set. */
static int
-i915_gem_gtt_pwrite_slow(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling,
+ bool needs_clflush_before,
+ bool needs_clflush_after)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- ssize_t remain;
- loff_t gtt_page_base, offset;
- loff_t first_data_page, last_data_page, num_pages;
- loff_t pinned_pages, i;
- struct page **user_pages;
- struct mm_struct *mm = current->mm;
- int gtt_page_offset, data_page_offset, data_page_index, page_length;
+ char *vaddr;
int ret;
- uint64_t data_ptr = args->data_ptr;
-
- remain = args->size;
-
- /* Pin the user pages containing the data. We can't fault while
- * holding the struct mutex, and all of the pwrite implementations
- * want to hold it while dereferencing the user data.
- */
- first_data_page = data_ptr / PAGE_SIZE;
- last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
- num_pages = last_data_page - first_data_page + 1;
-
- user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
- if (user_pages == NULL)
- return -ENOMEM;
-
- mutex_unlock(&dev->struct_mutex);
- down_read(&mm->mmap_sem);
- pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- num_pages, 0, 0, user_pages, NULL);
- up_read(&mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- if (pinned_pages < num_pages) {
- ret = -EFAULT;
- goto out_unpin_pages;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- goto out_unpin_pages;
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto out_unpin_pages;
-
- offset = obj->gtt_offset + args->offset;
-
- while (remain > 0) {
- /* Operation in this page
- *
- * gtt_page_base = page offset within aperture
- * gtt_page_offset = offset within page in aperture
- * data_page_index = page number in get_user_pages return
- * data_page_offset = offset with data_page_index page.
- * page_length = bytes to copy for this page
- */
- gtt_page_base = offset & PAGE_MASK;
- gtt_page_offset = offset_in_page(offset);
- data_page_index = data_ptr / PAGE_SIZE - first_data_page;
- data_page_offset = offset_in_page(data_ptr);
-
- page_length = remain;
- if ((gtt_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - gtt_page_offset;
- if ((data_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - data_page_offset;
-
- slow_kernel_write(dev_priv->mm.gtt_mapping,
- gtt_page_base, gtt_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length);
-
- remain -= page_length;
- offset += page_length;
- data_ptr += page_length;
- }
+ if (unlikely(page_do_bit17_swizzling))
+ return -EINVAL;
-out_unpin_pages:
- for (i = 0; i < pinned_pages; i++)
- page_cache_release(user_pages[i]);
- drm_free_large(user_pages);
+ vaddr = kmap_atomic(page);
+ if (needs_clflush_before)
+ drm_clflush_virt_range(vaddr + shmem_page_offset,
+ page_length);
+ ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
+ user_data,
+ page_length);
+ if (needs_clflush_after)
+ drm_clflush_virt_range(vaddr + shmem_page_offset,
+ page_length);
+ kunmap_atomic(vaddr);
return ret;
}
-/**
- * This is the fast shmem pwrite path, which attempts to directly
- * copy_from_user into the kmapped pages backing the object.
- */
+/* Only difference to the fast-path function is that this can handle bit17
+ * and uses non-atomic copy and kmap functions. */
static int
-i915_gem_shmem_pwrite_fast(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling,
+ bool needs_clflush_before,
+ bool needs_clflush_after)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- ssize_t remain;
- loff_t offset;
- char __user *user_data;
- int page_offset, page_length;
-
- user_data = (char __user *) (uintptr_t) args->data_ptr;
- remain = args->size;
-
- offset = args->offset;
- obj->dirty = 1;
-
- while (remain > 0) {
- struct page *page;
- char *vaddr;
- int ret;
-
- /* Operation in this page
- *
- * page_offset = offset within page
- * page_length = bytes to copy for this page
- */
- page_offset = offset_in_page(offset);
- page_length = remain;
- if ((page_offset + remain) > PAGE_SIZE)
- page_length = PAGE_SIZE - page_offset;
-
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ char *vaddr;
+ int ret;
- vaddr = kmap_atomic(page);
- ret = __copy_from_user_inatomic(vaddr + page_offset,
+ vaddr = kmap(page);
+ if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
+ if (page_do_bit17_swizzling)
+ ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
user_data,
page_length);
- kunmap_atomic(vaddr);
-
- set_page_dirty(page);
- mark_page_accessed(page);
- page_cache_release(page);
-
- /* If we get a fault while copying data, then (presumably) our
- * source page isn't available. Return the error and we'll
- * retry in the slow path.
- */
- if (ret)
- return -EFAULT;
-
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
+ else
+ ret = __copy_from_user(vaddr + shmem_page_offset,
+ user_data,
+ page_length);
+ if (needs_clflush_after)
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
+ kunmap(page);
- return 0;
+ return ret;
}
-/**
- * This is the fallback shmem pwrite path, which uses get_user_pages to pin
- * the memory and maps it using kmap_atomic for copying.
- *
- * This avoids taking mmap_sem for faulting on the user's address while the
- * struct_mutex is held.
- */
static int
-i915_gem_shmem_pwrite_slow(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+i915_gem_shmem_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
loff_t offset;
char __user *user_data;
- int shmem_page_offset, page_length, ret;
+ int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ int hit_slowpath = 0;
+ int needs_clflush_after = 0;
+ int needs_clflush_before = 0;
+ int release_page;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ /* If we're not in the cpu write domain, set ourself into the gtt
+ * write domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will use the data
+ * right away and we therefore have to clflush anyway. */
+ if (obj->cache_level == I915_CACHE_NONE)
+ needs_clflush_after = 1;
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+ }
+ /* Same trick applies for invalidate partially written cachelines before
+ * writing. */
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
+ && obj->cache_level == I915_CACHE_NONE)
+ needs_clflush_before = 1;
+
offset = args->offset;
obj->dirty = 1;
- mutex_unlock(&dev->struct_mutex);
-
while (remain > 0) {
struct page *page;
- char *vaddr;
+ int partial_cacheline_write;
/* Operation in this page
*
@@ -822,29 +771,51 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
+ /* If we don't overwrite a cacheline completely we need to be
+ * careful to have up-to-date data by first clflushing. Don't
+ * overcomplicate things and flush the entire patch. */
+ partial_cacheline_write = needs_clflush_before &&
+ ((shmem_page_offset | page_length)
+ & (boot_cpu_data.x86_clflush_size - 1));
+
+ if (obj->pages) {
+ page = obj->pages[offset >> PAGE_SHIFT];
+ release_page = 0;
+ } else {
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto out;
+ }
+ release_page = 1;
}
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
- vaddr = kmap(page);
- if (page_do_bit17_swizzling)
- ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
- user_data,
- page_length);
- else
- ret = __copy_from_user(vaddr + shmem_page_offset,
- user_data,
- page_length);
- kunmap(page);
+ ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ partial_cacheline_write,
+ needs_clflush_after);
+ if (ret == 0)
+ goto next_page;
+
+ hit_slowpath = 1;
+ page_cache_get(page);
+ mutex_unlock(&dev->struct_mutex);
+ ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ partial_cacheline_write,
+ needs_clflush_after);
+
+ mutex_lock(&dev->struct_mutex);
+ page_cache_release(page);
+next_page:
set_page_dirty(page);
mark_page_accessed(page);
- page_cache_release(page);
+ if (release_page)
+ page_cache_release(page);
if (ret) {
ret = -EFAULT;
@@ -857,17 +828,21 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
}
out:
- mutex_lock(&dev->struct_mutex);
- /* Fixup: Kill any reinstated backing storage pages */
- if (obj->madv == __I915_MADV_PURGED)
- i915_gem_object_truncate(obj);
- /* and flush dirty cachelines in case the object isn't in the cpu write
- * domain anymore. */
- if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
- i915_gem_clflush_object(obj);
- intel_gtt_chipset_flush();
+ if (hit_slowpath) {
+ /* Fixup: Kill any reinstated backing storage pages */
+ if (obj->madv == __I915_MADV_PURGED)
+ i915_gem_object_truncate(obj);
+ /* and flush dirty cachelines in case the object isn't in the cpu write
+ * domain anymore. */
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ i915_gem_clflush_object(obj);
+ intel_gtt_chipset_flush();
+ }
}
+ if (needs_clflush_after)
+ intel_gtt_chipset_flush();
+
return ret;
}
@@ -892,8 +867,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
- args->size);
+ ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
+ args->size);
if (ret)
return -EFAULT;
@@ -914,8 +889,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ /* prime objects have no backing filp to GEM pread/pwrite
+ * pages from.
+ */
+ if (!obj->base.filp) {
+ ret = -EINVAL;
+ goto out;
+ }
+
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
+ ret = -EFAULT;
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
* different detiling behavior between reading and writing.
@@ -928,42 +912,18 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
if (obj->gtt_space &&
+ obj->cache_level == I915_CACHE_NONE &&
+ obj->tiling_mode == I915_TILING_NONE &&
+ obj->map_and_fenceable &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
- ret = i915_gem_object_pin(obj, 0, true);
- if (ret)
- goto out;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- goto out_unpin;
-
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto out_unpin;
-
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
- if (ret == -EFAULT)
- ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
-
-out_unpin:
- i915_gem_object_unpin(obj);
-
- if (ret != -EFAULT)
- goto out;
- /* Fall through to the shmfs paths because the gtt paths might
- * fail with non-page-backed user pointers (e.g. gtt mappings
- * when moving data between textures). */
+ /* Note that the gtt paths might fail with non-page-backed user
+ * pointers (e.g. gtt mappings when moving data between
+ * textures). Fallback to the shmem path in that case. */
}
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret)
- goto out;
-
- ret = -EFAULT;
- if (!i915_gem_object_needs_bit17_swizzle(obj))
- ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
if (ret == -EFAULT)
- ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
+ ret = i915_gem_shmem_pwrite(dev, obj, args, file);
out:
drm_gem_object_unreference(&obj->base);
@@ -986,9 +946,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
uint32_t write_domain = args->write_domain;
int ret;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
/* Only handle setting domains to types used by the CPU. */
if (write_domain & I915_GEM_GPU_DOMAINS)
return -EINVAL;
@@ -1042,9 +999,6 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int ret = 0;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -1080,13 +1034,18 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
unsigned long addr;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
return -ENOENT;
+ /* prime objects have no backing filp to GEM mmap
+ * pages from.
+ */
+ if (!obj->filp) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -EINVAL;
+ }
+
addr = vm_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
@@ -1151,10 +1110,10 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock;
}
- if (obj->tiling_mode == I915_TILING_NONE)
- ret = i915_gem_object_put_fence(obj);
- else
- ret = i915_gem_object_get_fence(obj, NULL);
+ if (!obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
+ ret = i915_gem_object_get_fence(obj);
if (ret)
goto unlock;
@@ -1308,9 +1267,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
struct drm_i915_gem_object *obj;
int ret;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -1368,14 +1324,10 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_mmap_gtt *args = data;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
-
-static int
+int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
gfp_t gfpmask)
{
@@ -1384,6 +1336,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
struct inode *inode;
struct page *page;
+ if (obj->pages || obj->sg_table)
+ return 0;
+
/* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them.
*/
@@ -1425,6 +1380,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
int page_count = obj->base.size / PAGE_SIZE;
int i;
+ if (!obj->pages)
+ return;
+
BUG_ON(obj->madv == __I915_MADV_PURGED);
if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -1473,7 +1431,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
if (obj->fenced_gpu_access) {
obj->last_fenced_seqno = seqno;
- obj->last_fenced_ring = ring;
/* Bump MRU to take account of the delayed flush */
if (obj->fence_reg != I915_FENCE_REG_NONE) {
@@ -1512,15 +1469,11 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (obj->pin_count != 0)
- list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
- else
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
BUG_ON(!list_empty(&obj->gpu_write_list));
BUG_ON(!obj->active);
obj->ring = NULL;
- obj->last_fenced_ring = NULL;
i915_gem_object_move_off_active(obj);
obj->fenced_gpu_access = false;
@@ -1546,6 +1499,9 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
inode = obj->base.filp->f_path.dentry->d_inode;
shmem_truncate_range(inode, 0, (loff_t)-1);
+ if (obj->base.map_list.map)
+ drm_gem_free_mmap_offset(&obj->base);
+
obj->madv = __I915_MADV_PURGED;
}
@@ -1711,30 +1667,29 @@ static void i915_gem_reset_fences(struct drm_device *dev)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
- struct drm_i915_gem_object *obj = reg->obj;
- if (!obj)
- continue;
+ i915_gem_write_fence(dev, i, NULL);
- if (obj->tiling_mode)
- i915_gem_release_mmap(obj);
+ if (reg->obj)
+ i915_gem_object_fence_lost(reg->obj);
- reg->obj->fence_reg = I915_FENCE_REG_NONE;
- reg->obj->fenced_gpu_access = false;
- reg->obj->last_fenced_seqno = 0;
- reg->obj->last_fenced_ring = NULL;
- i915_gem_clear_fence_reg(dev, reg);
+ reg->pin_count = 0;
+ reg->obj = NULL;
+ INIT_LIST_HEAD(&reg->lru_list);
}
+
+ INIT_LIST_HEAD(&dev_priv->mm.fence_list);
}
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
+ struct intel_ring_buffer *ring;
int i;
- for (i = 0; i < I915_NUM_RINGS; i++)
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_reset_ring_lists(dev_priv, ring);
/* Remove anything from the flushing lists. The GPU cache is likely
* to be lost on reset along with the data, so simply move the
@@ -1839,24 +1794,11 @@ void
i915_gem_retire_requests(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int i;
- if (!list_empty(&dev_priv->mm.deferred_free_list)) {
- struct drm_i915_gem_object *obj, *next;
-
- /* We must be careful that during unbind() we do not
- * accidentally infinitely recurse into retire requests.
- * Currently:
- * retire -> free -> unbind -> wait -> retire_ring
- */
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.deferred_free_list,
- mm_list)
- i915_gem_free_object_tail(obj);
- }
-
- for (i = 0; i < I915_NUM_RINGS; i++)
- i915_gem_retire_requests_ring(&dev_priv->ring[i]);
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_retire_requests_ring(ring);
}
static void
@@ -1864,6 +1806,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
struct drm_device *dev;
+ struct intel_ring_buffer *ring;
bool idle;
int i;
@@ -1883,9 +1826,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
* objects indefinitely.
*/
idle = true;
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_ring_buffer *ring = &dev_priv->ring[i];
-
+ for_each_ring(ring, dev_priv, i) {
if (!list_empty(&ring->gpu_write_list)) {
struct drm_i915_gem_request *request;
int ret;
@@ -1907,20 +1848,10 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int
-i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno,
- bool do_retire)
+static int
+i915_gem_check_wedge(struct drm_i915_private *dev_priv)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
- u32 ier;
- int ret = 0;
-
- BUG_ON(seqno == 0);
+ BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (atomic_read(&dev_priv->mm.wedged)) {
struct completion *x = &dev_priv->error_completion;
@@ -1935,6 +1866,20 @@ i915_wait_request(struct intel_ring_buffer *ring,
return recovery_complete ? -EIO : -EAGAIN;
}
+ return 0;
+}
+
+/*
+ * Compare seqno against outstanding lazy request. Emit a request if they are
+ * equal.
+ */
+static int
+i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+{
+ int ret = 0;
+
+ BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
if (seqno == ring->outstanding_lazy_request) {
struct drm_i915_gem_request *request;
@@ -1948,54 +1893,67 @@ i915_wait_request(struct intel_ring_buffer *ring,
return ret;
}
- seqno = request->seqno;
+ BUG_ON(seqno != request->seqno);
}
- if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
- if (HAS_PCH_SPLIT(ring->dev))
- ier = I915_READ(DEIER) | I915_READ(GTIER);
- else
- ier = I915_READ(IER);
- if (!ier) {
- DRM_ERROR("something (likely vbetool) disabled "
- "interrupts, re-enabling\n");
- ring->dev->driver->irq_preinstall(ring->dev);
- ring->dev->driver->irq_postinstall(ring->dev);
- }
+ return ret;
+}
- trace_i915_gem_request_wait_begin(ring, seqno);
-
- ring->waiting_seqno = seqno;
- if (ring->irq_get(ring)) {
- if (dev_priv->mm.interruptible)
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- else
- wait_event(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
-
- ring->irq_put(ring);
- } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged), 3000))
- ret = -EBUSY;
- ring->waiting_seqno = 0;
-
- trace_i915_gem_request_wait_end(ring, seqno);
- }
+static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+ bool interruptible)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ int ret = 0;
+
+ if (i915_seqno_passed(ring->get_seqno(ring), seqno))
+ return 0;
+
+ trace_i915_gem_request_wait_begin(ring, seqno);
+ if (WARN_ON(!ring->irq_get(ring)))
+ return -ENODEV;
+
+#define EXIT_COND \
+ (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
+ atomic_read(&dev_priv->mm.wedged))
+
+ if (interruptible)
+ ret = wait_event_interruptible(ring->irq_queue,
+ EXIT_COND);
+ else
+ wait_event(ring->irq_queue, EXIT_COND);
+
+ ring->irq_put(ring);
+ trace_i915_gem_request_wait_end(ring, seqno);
+#undef EXIT_COND
+
+ return ret;
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_request(struct intel_ring_buffer *ring,
+ uint32_t seqno)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ int ret = 0;
+
+ BUG_ON(seqno == 0);
+
+ ret = i915_gem_check_wedge(dev_priv);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(ring, seqno);
+ if (ret)
+ return ret;
+
+ ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible);
if (atomic_read(&dev_priv->mm.wedged))
ret = -EAGAIN;
- /* Directly dispatch request retiring. While we have the work queue
- * to handle this, the waiter on a request often wants an associated
- * buffer to have made it to the inactive list, and we would need
- * a separate wait queue to handle that.
- */
- if (ret == 0 && do_retire)
- i915_gem_retire_requests_ring(ring);
-
return ret;
}
@@ -2017,15 +1975,58 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
* it.
*/
if (obj->active) {
- ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
- true);
+ ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
if (ret)
return ret;
+ i915_gem_retire_requests_ring(obj->ring);
}
return 0;
}
+/**
+ * i915_gem_object_sync - sync an object to a ring.
+ *
+ * @obj: object which may be in use on another ring.
+ * @to: ring we wish to use the object on. May be NULL.
+ *
+ * This code is meant to abstract object synchronization with the GPU.
+ * Calling with NULL implies synchronizing the object with the CPU
+ * rather than a particular GPU ring.
+ *
+ * Returns 0 if successful, else propagates up the lower layer error.
+ */
+int
+i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to)
+{
+ struct intel_ring_buffer *from = obj->ring;
+ u32 seqno;
+ int ret, idx;
+
+ if (from == NULL || to == from)
+ return 0;
+
+ if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
+ return i915_gem_object_wait_rendering(obj);
+
+ idx = intel_ring_sync_index(from, to);
+
+ seqno = obj->last_rendering_seqno;
+ if (seqno <= from->sync_seqno[idx])
+ return 0;
+
+ ret = i915_gem_check_olr(obj->ring, seqno);
+ if (ret)
+ return ret;
+
+ ret = to->sync_to(to, from, seqno);
+ if (!ret)
+ from->sync_seqno[idx] = seqno;
+
+ return ret;
+}
+
static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
{
u32 old_write_domain, old_read_domains;
@@ -2062,13 +2063,11 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (obj->gtt_space == NULL)
return 0;
- if (obj->pin_count != 0) {
- DRM_ERROR("Attempting to unbind pinned buffer\n");
- return -EINVAL;
- }
+ if (obj->pin_count)
+ return -EBUSY;
ret = i915_gem_object_finish_gpu(obj);
- if (ret == -ERESTARTSYS)
+ if (ret)
return ret;
/* Continue on if we fail due to EIO, the GPU is hung so we
* should be safe and we need to cleanup or else we might
@@ -2095,16 +2094,18 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
/* release the fence reg _after_ flushing */
ret = i915_gem_object_put_fence(obj);
- if (ret == -ERESTARTSYS)
+ if (ret)
return ret;
trace_i915_gem_object_unbind(obj);
- i915_gem_gtt_unbind_object(obj);
+ if (obj->has_global_gtt_mapping)
+ i915_gem_gtt_unbind_object(obj);
if (obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
obj->has_aliasing_ppgtt_mapping = 0;
}
+ i915_gem_gtt_finish_object(obj);
i915_gem_object_put_pages_gtt(obj);
@@ -2145,7 +2146,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
return 0;
}
-static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
+static int i915_ring_idle(struct intel_ring_buffer *ring)
{
int ret;
@@ -2159,208 +2160,201 @@ static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
return ret;
}
- return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
- do_retire);
+ return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
}
-int i915_gpu_idle(struct drm_device *dev, bool do_retire)
+int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int ret, i;
/* Flush everything onto the inactive list. */
- for (i = 0; i < I915_NUM_RINGS; i++) {
- ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
+ for_each_ring(ring, dev_priv, i) {
+ ret = i915_ring_idle(ring);
if (ret)
return ret;
+
+ /* Is the device fubar? */
+ if (WARN_ON(!list_empty(&ring->gpu_write_list)))
+ return -EBUSY;
}
return 0;
}
-static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
- int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
- 0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
- val |= (uint64_t)((obj->stride / 128) - 1) <<
- SANDYBRIDGE_FENCE_PITCH_SHIFT;
-
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
- val |= I965_FENCE_REG_VALID;
+ if (obj) {
+ u32 size = obj->gtt_space->size;
- if (pipelined) {
- int ret = intel_ring_begin(pipelined, 6);
- if (ret)
- return ret;
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= (uint64_t)((obj->stride / 128) - 1) <<
+ SANDYBRIDGE_FENCE_PITCH_SHIFT;
- intel_ring_emit(pipelined, MI_NOOP);
- intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
- intel_ring_emit(pipelined, (u32)val);
- intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
- intel_ring_emit(pipelined, (u32)(val >> 32));
- intel_ring_advance(pipelined);
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
} else
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
+ val = 0;
- return 0;
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
+ POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
}
-static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+static void i965_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
- int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
- 0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
- val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
- val |= I965_FENCE_REG_VALID;
-
- if (pipelined) {
- int ret = intel_ring_begin(pipelined, 6);
- if (ret)
- return ret;
+ if (obj) {
+ u32 size = obj->gtt_space->size;
- intel_ring_emit(pipelined, MI_NOOP);
- intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
- intel_ring_emit(pipelined, (u32)val);
- intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
- intel_ring_emit(pipelined, (u32)(val >> 32));
- intel_ring_advance(pipelined);
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
} else
- I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
+ val = 0;
- return 0;
+ I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
+ POSTING_READ(FENCE_REG_965_0 + reg * 8);
}
-static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+static void i915_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
- u32 fence_reg, val, pitch_val;
- int tile_width;
-
- if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
- (size & -size) != size ||
- (obj->gtt_offset & (size - 1)),
- "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
- obj->gtt_offset, obj->map_and_fenceable, size))
- return -EINVAL;
+ u32 val;
- if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
- tile_width = 128;
- else
- tile_width = 512;
-
- /* Note: pitch better be a power of two tile widths */
- pitch_val = obj->stride / tile_width;
- pitch_val = ffs(pitch_val) - 1;
-
- val = obj->gtt_offset;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I915_FENCE_SIZE_BITS(size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
- val |= I830_FENCE_REG_VALID;
-
- fence_reg = obj->fence_reg;
- if (fence_reg < 8)
- fence_reg = FENCE_REG_830_0 + fence_reg * 4;
- else
- fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
+ if (obj) {
+ u32 size = obj->gtt_space->size;
+ int pitch_val;
+ int tile_width;
- if (pipelined) {
- int ret = intel_ring_begin(pipelined, 4);
- if (ret)
- return ret;
+ WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ obj->gtt_offset, obj->map_and_fenceable, size);
- intel_ring_emit(pipelined, MI_NOOP);
- intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(pipelined, fence_reg);
- intel_ring_emit(pipelined, val);
- intel_ring_advance(pipelined);
+ if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ /* Note: pitch better be a power of two tile widths */
+ pitch_val = obj->stride / tile_width;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I915_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
} else
- I915_WRITE(fence_reg, val);
+ val = 0;
- return 0;
+ if (reg < 8)
+ reg = FENCE_REG_830_0 + reg * 4;
+ else
+ reg = FENCE_REG_945_8 + (reg - 8) * 4;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
}
-static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+static void i830_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
- int regnum = obj->fence_reg;
uint32_t val;
- uint32_t pitch_val;
- if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
- (size & -size) != size ||
- (obj->gtt_offset & (size - 1)),
- "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
- obj->gtt_offset, size))
- return -EINVAL;
-
- pitch_val = obj->stride / 128;
- pitch_val = ffs(pitch_val) - 1;
-
- val = obj->gtt_offset;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I830_FENCE_SIZE_BITS(size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
- val |= I830_FENCE_REG_VALID;
+ if (obj) {
+ u32 size = obj->gtt_space->size;
+ uint32_t pitch_val;
+
+ WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
+ obj->gtt_offset, size);
+
+ pitch_val = obj->stride / 128;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I830_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+ } else
+ val = 0;
- if (pipelined) {
- int ret = intel_ring_begin(pipelined, 4);
- if (ret)
- return ret;
+ I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
+ POSTING_READ(FENCE_REG_830_0 + reg * 4);
+}
- intel_ring_emit(pipelined, MI_NOOP);
- intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
- intel_ring_emit(pipelined, val);
- intel_ring_advance(pipelined);
- } else
- I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
+ case 5:
+ case 4: i965_write_fence_reg(dev, reg, obj); break;
+ case 3: i915_write_fence_reg(dev, reg, obj); break;
+ case 2: i830_write_fence_reg(dev, reg, obj); break;
+ default: break;
+ }
+}
- return 0;
+static inline int fence_number(struct drm_i915_private *dev_priv,
+ struct drm_i915_fence_reg *fence)
+{
+ return fence - dev_priv->fence_regs;
}
-static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_fence_reg *fence,
+ bool enable)
{
- return i915_seqno_passed(ring->get_seqno(ring), seqno);
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int reg = fence_number(dev_priv, fence);
+
+ i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+
+ if (enable) {
+ obj->fence_reg = reg;
+ fence->obj = obj;
+ list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
+ } else {
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ fence->obj = NULL;
+ list_del_init(&fence->lru_list);
+ }
}
static int
-i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
{
int ret;
if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->last_fenced_ring,
+ ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
if (ret)
return ret;
@@ -2369,18 +2363,12 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
obj->fenced_gpu_access = false;
}
- if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
- if (!ring_passed_seqno(obj->last_fenced_ring,
- obj->last_fenced_seqno)) {
- ret = i915_wait_request(obj->last_fenced_ring,
- obj->last_fenced_seqno,
- true);
- if (ret)
- return ret;
- }
+ if (obj->last_fenced_seqno) {
+ ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
+ if (ret)
+ return ret;
obj->last_fenced_seqno = 0;
- obj->last_fenced_ring = NULL;
}
/* Ensure that all CPU reads are completed before installing a fence
@@ -2395,34 +2383,29 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
int
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- if (obj->tiling_mode)
- i915_gem_release_mmap(obj);
-
- ret = i915_gem_object_flush_fence(obj, NULL);
+ ret = i915_gem_object_flush_fence(obj);
if (ret)
return ret;
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
- WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
- i915_gem_clear_fence_reg(obj->base.dev,
- &dev_priv->fence_regs[obj->fence_reg]);
+ if (obj->fence_reg == I915_FENCE_REG_NONE)
+ return 0;
- obj->fence_reg = I915_FENCE_REG_NONE;
- }
+ i915_gem_object_update_fence(obj,
+ &dev_priv->fence_regs[obj->fence_reg],
+ false);
+ i915_gem_object_fence_lost(obj);
return 0;
}
static struct drm_i915_fence_reg *
-i915_find_fence_reg(struct drm_device *dev,
- struct intel_ring_buffer *pipelined)
+i915_find_fence_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_fence_reg *reg, *first, *avail;
+ struct drm_i915_fence_reg *reg, *avail;
int i;
/* First try to find a free reg */
@@ -2440,204 +2423,77 @@ i915_find_fence_reg(struct drm_device *dev,
return NULL;
/* None available, try to steal one or wait for a user to finish */
- avail = first = NULL;
list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
if (reg->pin_count)
continue;
- if (first == NULL)
- first = reg;
-
- if (!pipelined ||
- !reg->obj->last_fenced_ring ||
- reg->obj->last_fenced_ring == pipelined) {
- avail = reg;
- break;
- }
+ return reg;
}
- if (avail == NULL)
- avail = first;
-
- return avail;
+ return NULL;
}
/**
- * i915_gem_object_get_fence - set up a fence reg for an object
+ * i915_gem_object_get_fence - set up fencing for an object
* @obj: object to map through a fence reg
- * @pipelined: ring on which to queue the change, or NULL for CPU access
- * @interruptible: must we wait uninterruptibly for the register to retire?
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
- *
* This function walks the fence regs looking for a free one for @obj,
* stealing one if it can't find any.
*
* It then sets up the reg based on the object's properties: address, pitch
* and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
*/
int
-i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ bool enable = obj->tiling_mode != I915_TILING_NONE;
struct drm_i915_fence_reg *reg;
int ret;
- /* XXX disable pipelining. There are bugs. Shocking. */
- pipelined = NULL;
+ /* Have we updated the tiling parameters upon the object and so
+ * will need to serialise the write to the associated fence register?
+ */
+ if (obj->fence_dirty) {
+ ret = i915_gem_object_flush_fence(obj);
+ if (ret)
+ return ret;
+ }
/* Just update our place in the LRU if our fence is getting reused. */
if (obj->fence_reg != I915_FENCE_REG_NONE) {
reg = &dev_priv->fence_regs[obj->fence_reg];
- list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
-
- if (obj->tiling_changed) {
- ret = i915_gem_object_flush_fence(obj, pipelined);
- if (ret)
- return ret;
-
- if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
- pipelined = NULL;
-
- if (pipelined) {
- reg->setup_seqno =
- i915_gem_next_request_seqno(pipelined);
- obj->last_fenced_seqno = reg->setup_seqno;
- obj->last_fenced_ring = pipelined;
- }
-
- goto update;
+ if (!obj->fence_dirty) {
+ list_move_tail(&reg->lru_list,
+ &dev_priv->mm.fence_list);
+ return 0;
}
+ } else if (enable) {
+ reg = i915_find_fence_reg(dev);
+ if (reg == NULL)
+ return -EDEADLK;
- if (!pipelined) {
- if (reg->setup_seqno) {
- if (!ring_passed_seqno(obj->last_fenced_ring,
- reg->setup_seqno)) {
- ret = i915_wait_request(obj->last_fenced_ring,
- reg->setup_seqno,
- true);
- if (ret)
- return ret;
- }
+ if (reg->obj) {
+ struct drm_i915_gem_object *old = reg->obj;
- reg->setup_seqno = 0;
- }
- } else if (obj->last_fenced_ring &&
- obj->last_fenced_ring != pipelined) {
- ret = i915_gem_object_flush_fence(obj, pipelined);
+ ret = i915_gem_object_flush_fence(old);
if (ret)
return ret;
- }
-
- return 0;
- }
-
- reg = i915_find_fence_reg(dev, pipelined);
- if (reg == NULL)
- return -EDEADLK;
-
- ret = i915_gem_object_flush_fence(obj, pipelined);
- if (ret)
- return ret;
-
- if (reg->obj) {
- struct drm_i915_gem_object *old = reg->obj;
-
- drm_gem_object_reference(&old->base);
-
- if (old->tiling_mode)
- i915_gem_release_mmap(old);
- ret = i915_gem_object_flush_fence(old, pipelined);
- if (ret) {
- drm_gem_object_unreference(&old->base);
- return ret;
+ i915_gem_object_fence_lost(old);
}
+ } else
+ return 0;
- if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
- pipelined = NULL;
-
- old->fence_reg = I915_FENCE_REG_NONE;
- old->last_fenced_ring = pipelined;
- old->last_fenced_seqno =
- pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
-
- drm_gem_object_unreference(&old->base);
- } else if (obj->last_fenced_seqno == 0)
- pipelined = NULL;
-
- reg->obj = obj;
- list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
- obj->fence_reg = reg - dev_priv->fence_regs;
- obj->last_fenced_ring = pipelined;
-
- reg->setup_seqno =
- pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
- obj->last_fenced_seqno = reg->setup_seqno;
-
-update:
- obj->tiling_changed = false;
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- ret = sandybridge_write_fence_reg(obj, pipelined);
- break;
- case 5:
- case 4:
- ret = i965_write_fence_reg(obj, pipelined);
- break;
- case 3:
- ret = i915_write_fence_reg(obj, pipelined);
- break;
- case 2:
- ret = i830_write_fence_reg(obj, pipelined);
- break;
- }
-
- return ret;
-}
-
-/**
- * i915_gem_clear_fence_reg - clear out fence register info
- * @obj: object to clear
- *
- * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj.
- */
-static void
-i915_gem_clear_fence_reg(struct drm_device *dev,
- struct drm_i915_fence_reg *reg)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t fence_reg = reg - dev_priv->fence_regs;
-
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
- break;
- case 5:
- case 4:
- I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
- break;
- case 3:
- if (fence_reg >= 8)
- fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
- else
- case 2:
- fence_reg = FENCE_REG_830_0 + fence_reg * 4;
-
- I915_WRITE(fence_reg, 0);
- break;
- }
+ i915_gem_object_update_fence(obj, reg, enable);
+ obj->fence_dirty = false;
- list_del_init(&reg->lru_list);
- reg->obj = NULL;
- reg->setup_seqno = 0;
- reg->pin_count = 0;
+ return 0;
}
/**
@@ -2749,7 +2605,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
return ret;
}
- ret = i915_gem_gtt_bind_object(obj);
+ ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
i915_gem_object_put_pages_gtt(obj);
drm_mm_put_block(obj->gtt_space);
@@ -2761,6 +2617,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
goto search_free;
}
+ if (!dev_priv->mm.aliasing_ppgtt)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
@@ -2878,6 +2737,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
uint32_t old_write_domain, old_read_domains;
int ret;
@@ -2918,6 +2778,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
old_read_domains,
old_write_domain);
+ /* And bump the LRU for this access */
+ if (i915_gem_object_is_inactive(obj))
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
return 0;
}
@@ -2953,7 +2817,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return ret;
}
- i915_gem_gtt_rebind_object(obj, cache_level);
+ if (obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(obj, cache_level);
if (obj->has_aliasing_ppgtt_mapping)
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
@@ -2990,11 +2855,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows
* any flushes to be pipelined (for pageflips).
- *
- * For the display plane, we want to be in the GTT but out of any write
- * domains. So in many ways this looks like set_to_gtt_domain() apart from the
- * ability to pipeline the waits, pinning and any additional subtleties
- * that may differentiate the display plane from ordinary buffers.
*/
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
@@ -3009,8 +2869,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
return ret;
if (pipelined != obj->ring) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret == -ERESTARTSYS)
+ ret = i915_gem_object_sync(obj, pipelined);
+ if (ret)
return ret;
}
@@ -3082,7 +2942,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
-static int
+int
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
{
uint32_t old_write_domain, old_read_domains;
@@ -3095,17 +2955,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
+ if (write || obj->pending_gpu_write) {
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret)
+ return ret;
+ }
i915_gem_object_flush_gtt_write_domain(obj);
- /* If we have a partially-valid cache of the object in the CPU,
- * finish invalidating it and free the per-page flags.
- */
- i915_gem_object_set_to_full_cpu_read_domain(obj);
-
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -3136,113 +2993,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
return 0;
}
-/**
- * Moves the object from a partially CPU read to a full one.
- *
- * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
- * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
- */
-static void
-i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
-{
- if (!obj->page_cpu_valid)
- return;
-
- /* If we're partially in the CPU read domain, finish moving it in.
- */
- if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
- int i;
-
- for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
- if (obj->page_cpu_valid[i])
- continue;
- drm_clflush_pages(obj->pages + i, 1);
- }
- }
-
- /* Free the page_cpu_valid mappings which are now stale, whether
- * or not we've got I915_GEM_DOMAIN_CPU.
- */
- kfree(obj->page_cpu_valid);
- obj->page_cpu_valid = NULL;
-}
-
-/**
- * Set the CPU read domain on a range of the object.
- *
- * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
- * not entirely valid. The page_cpu_valid member of the object flags which
- * pages have been flushed, and will be respected by
- * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
- * of the whole object.
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
- */
-static int
-i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
- uint64_t offset, uint64_t size)
-{
- uint32_t old_read_domains;
- int i, ret;
-
- if (offset == 0 && size == obj->base.size)
- return i915_gem_object_set_to_cpu_domain(obj, 0);
-
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
-
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
-
- i915_gem_object_flush_gtt_write_domain(obj);
-
- /* If we're already fully in the CPU read domain, we're done. */
- if (obj->page_cpu_valid == NULL &&
- (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
- return 0;
-
- /* Otherwise, create/clear the per-page CPU read domain flag if we're
- * newly adding I915_GEM_DOMAIN_CPU
- */
- if (obj->page_cpu_valid == NULL) {
- obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
- GFP_KERNEL);
- if (obj->page_cpu_valid == NULL)
- return -ENOMEM;
- } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
- memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
-
- /* Flush the cache on any pages that are still invalid from the CPU's
- * perspective.
- */
- for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
- i++) {
- if (obj->page_cpu_valid[i])
- continue;
-
- drm_clflush_pages(obj->pages + i, 1);
-
- obj->page_cpu_valid[i] = 1;
- }
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
-
- old_read_domains = obj->base.read_domains;
- obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
-
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- obj->base.write_domain);
-
- return 0;
-}
-
/* Throttle our rendering by waiting until the ring has completed our requests
* emitted over 20 msec ago.
*
@@ -3280,28 +3030,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (seqno == 0)
return 0;
- ret = 0;
- if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
- /* And wait for the seqno passing without holding any locks and
- * causing extra latency for others. This is safe as the irq
- * generation is designed to be run atomically and so is
- * lockless.
- */
- if (ring->irq_get(ring)) {
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- ring->irq_put(ring);
-
- if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
- ret = -EIO;
- } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged), 3000)) {
- ret = -EBUSY;
- }
- }
-
+ ret = __wait_seqno(ring, seqno, true);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -3313,12 +3042,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
- WARN_ON(i915_verify_lists(dev));
if (obj->gtt_space != NULL) {
if ((alignment && obj->gtt_offset & (alignment - 1)) ||
@@ -3343,34 +3069,23 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
return ret;
}
- if (obj->pin_count++ == 0) {
- if (!obj->active)
- list_move_tail(&obj->mm_list,
- &dev_priv->mm.pinned_list);
- }
+ if (!obj->has_global_gtt_mapping && map_and_fenceable)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
+ obj->pin_count++;
obj->pin_mappable |= map_and_fenceable;
- WARN_ON(i915_verify_lists(dev));
return 0;
}
void
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- WARN_ON(i915_verify_lists(dev));
BUG_ON(obj->pin_count == 0);
BUG_ON(obj->gtt_space == NULL);
- if (--obj->pin_count == 0) {
- if (!obj->active)
- list_move_tail(&obj->mm_list,
- &dev_priv->mm.inactive_list);
+ if (--obj->pin_count == 0)
obj->pin_mappable = false;
- }
- WARN_ON(i915_verify_lists(dev));
}
int
@@ -3494,20 +3209,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
- } else if (obj->ring->outstanding_lazy_request ==
- obj->last_rendering_seqno) {
- struct drm_i915_gem_request *request;
-
- /* This ring is not being cleared by active usage,
- * so emit a request to do so.
- */
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request) {
- ret = i915_add_request(obj->ring, NULL, request);
- if (ret)
- kfree(request);
- } else
- ret = -ENOMEM;
+ } else {
+ ret = i915_gem_check_olr(obj->ring,
+ obj->last_rendering_seqno);
}
/* Update the active list for the hardware's current position.
@@ -3587,6 +3291,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct address_space *mapping;
+ u32 mask;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL)
@@ -3597,8 +3302,15 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
return NULL;
}
+ mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+ if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+ /* 965gm cannot relocate objects above 4GiB. */
+ mask &= ~__GFP_HIGHMEM;
+ mask |= __GFP_DMA32;
+ }
+
mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ mapping_set_gfp_mask(mapping, mask);
i915_gem_info_add_obj(dev_priv, size);
@@ -3643,46 +3355,42 @@ int i915_gem_init_object(struct drm_gem_object *obj)
return 0;
}
-static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
-
- ret = i915_gem_object_unbind(obj);
- if (ret == -ERESTARTSYS) {
- list_move(&obj->mm_list,
- &dev_priv->mm.deferred_free_list);
- return;
- }
trace_i915_gem_object_destroy(obj);
+ if (gem_obj->import_attach)
+ drm_prime_gem_destroy(gem_obj, obj->sg_table);
+
+ if (obj->phys_obj)
+ i915_gem_detach_phys_object(dev, obj);
+
+ obj->pin_count = 0;
+ if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
+ bool was_interruptible;
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ WARN_ON(i915_gem_object_unbind(obj));
+
+ dev_priv->mm.interruptible = was_interruptible;
+ }
+
if (obj->base.map_list.map)
drm_gem_free_mmap_offset(&obj->base);
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
- kfree(obj->page_cpu_valid);
kfree(obj->bit_17);
kfree(obj);
}
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
-{
- struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- struct drm_device *dev = obj->base.dev;
-
- while (obj->pin_count > 0)
- i915_gem_object_unpin(obj);
-
- if (obj->phys_obj)
- i915_gem_detach_phys_object(dev, obj);
-
- i915_gem_free_object_tail(obj);
-}
-
int
i915_gem_idle(struct drm_device *dev)
{
@@ -3696,20 +3404,16 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
+ i915_gem_retire_requests(dev);
/* Under UMS, be paranoid and evict. */
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_gem_evict_inactive(dev, false);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- }
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_gem_evict_everything(dev, false);
i915_gem_reset_fences(dev);
@@ -3747,9 +3451,9 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
if (IS_GEN6(dev))
- I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
else
- I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
}
void i915_gem_init_ppgtt(struct drm_device *dev)
@@ -3787,21 +3491,27 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
pd_offset <<= 16;
if (INTEL_INFO(dev)->gen == 6) {
- uint32_t ecochk = I915_READ(GAM_ECOCHK);
+ uint32_t ecochk, gab_ctl, ecobits;
+
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+
+ gab_ctl = I915_READ(GAB_CTL);
+ I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
- I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+ I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
/* GFX_MODE is per-ring on gen7+ */
}
- for (i = 0; i < I915_NUM_RINGS; i++) {
- ring = &dev_priv->ring[i];
-
+ for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring),
- GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
@@ -3845,14 +3555,80 @@ cleanup_render_ring:
return ret;
}
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+ if (i915_enable_ppgtt >= 0)
+ return i915_enable_ppgtt;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Disable ppgtt on SNB if VT-d is on. */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return true;
+}
+
+int i915_gem_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long gtt_size, mappable_size;
+ int ret;
+
+ gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+ mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+ mutex_lock(&dev->struct_mutex);
+ if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+ /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+ * aperture accordingly when using aliasing ppgtt. */
+ gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+
+ i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
+
+ ret = i915_gem_init_aliasing_ppgtt(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ } else {
+ /* Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch
+ * page. There are a number of places where the hardware
+ * apparently prefetches past the end of the object, and we've
+ * seen multiple hangs with the GPU head pointer stuck in a
+ * batchbuffer bound at the last page of the aperture. One page
+ * should be enough to keep any prefetching inside of the
+ * aperture.
+ */
+ i915_gem_init_global_gtt(dev, 0, mappable_size,
+ gtt_size);
+ }
+
+ ret = i915_gem_init_hw(dev);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+ return ret;
+ }
+
+ /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ dev_priv->dri1.allow_batchbuffer = 1;
+ return 0;
+}
+
void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int i;
- for (i = 0; i < I915_NUM_RINGS; i++)
- intel_cleanup_ring_buffer(&dev_priv->ring[i]);
+ for_each_ring(ring, dev_priv, i)
+ intel_cleanup_ring_buffer(ring);
}
int
@@ -3860,7 +3636,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret, i;
+ int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
@@ -3882,10 +3658,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
BUG_ON(!list_empty(&dev_priv->mm.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- for (i = 0; i < I915_NUM_RINGS; i++) {
- BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
- BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
- }
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
@@ -3944,9 +3716,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
- INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]);
@@ -3958,12 +3728,8 @@ i915_gem_load(struct drm_device *dev)
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
- u32 tmp = I915_READ(MI_ARB_STATE);
- if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
- /* arb state is a masked write, so set bit + bit in mask */
- tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
- I915_WRITE(MI_ARB_STATE, tmp);
- }
+ I915_WRITE(MI_ARB_STATE,
+ _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
}
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
@@ -3978,9 +3744,7 @@ i915_gem_load(struct drm_device *dev)
dev_priv->num_fence_regs = 8;
/* Initialize fence registers to zero */
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
- }
+ i915_gem_reset_fences(dev);
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -4268,7 +4032,7 @@ rescan:
* This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively.
*/
- if (i915_gpu_idle(dev, true) == 0)
+ if (i915_gpu_idle(dev) == 0)
goto rescan;
}
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index cc93cac242d6..a4f6aaabca99 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -114,22 +114,6 @@ i915_verify_lists(struct drm_device *dev)
}
}
- list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
- if (obj->base.dev != dev ||
- !atomic_read(&obj->base.refcount.refcount)) {
- DRM_ERROR("freed pinned %p\n", obj);
- err++;
- break;
- } else if (!obj->pin_count || obj->active ||
- (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
- DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
- obj,
- obj->pin_count, obj->active,
- obj->base.write_domain);
- err++;
- }
- }
-
return warned = err;
}
#endif /* WATCH_INACTIVE */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
new file mode 100644
index 000000000000..aa308e1337db
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2012 Red Hat Inc
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "i915_drv.h"
+#include <linux/dma-buf.h>
+
+static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+ struct drm_device *dev = obj->base.dev;
+ int npages = obj->base.size / PAGE_SIZE;
+ struct sg_table *sg = NULL;
+ int ret;
+ int nents;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!obj->pages) {
+ ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
+ if (ret)
+ goto out;
+ }
+
+ /* link the pages into an SG then map the sg */
+ sg = drm_prime_pages_to_sg(obj->pages, npages);
+ nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+out:
+ mutex_unlock(&dev->struct_mutex);
+ return sg;
+}
+
+static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sg, enum dma_data_direction dir)
+{
+ dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct drm_i915_gem_object *obj = dma_buf->priv;
+
+ if (obj->base.export_dma_buf == dma_buf) {
+ /* drop the reference on the export fd holds */
+ obj->base.export_dma_buf = NULL;
+ drm_gem_object_unreference_unlocked(&obj->base);
+ }
+}
+
+static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->base.dev;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (obj->dma_buf_vmapping) {
+ obj->vmapping_count++;
+ goto out_unlock;
+ }
+
+ if (!obj->pages) {
+ ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
+ }
+ }
+
+ obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
+ if (!obj->dma_buf_vmapping) {
+ DRM_ERROR("failed to vmap object\n");
+ goto out_unlock;
+ }
+
+ obj->vmapping_count = 1;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return obj->dma_buf_vmapping;
+}
+
+static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->base.dev;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return;
+
+ --obj->vmapping_count;
+ if (obj->vmapping_count == 0) {
+ vunmap(obj->dma_buf_vmapping);
+ obj->dma_buf_vmapping = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+
+static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static const struct dma_buf_ops i915_dmabuf_ops = {
+ .map_dma_buf = i915_gem_map_dma_buf,
+ .unmap_dma_buf = i915_gem_unmap_dma_buf,
+ .release = i915_gem_dmabuf_release,
+ .kmap = i915_gem_dmabuf_kmap,
+ .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
+ .kunmap = i915_gem_dmabuf_kunmap,
+ .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
+ .mmap = i915_gem_dmabuf_mmap,
+ .vmap = i915_gem_dmabuf_vmap,
+ .vunmap = i915_gem_dmabuf_vunmap,
+};
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gem_obj, int flags)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+
+ return dma_buf_export(obj, &i915_dmabuf_ops,
+ obj->base.size, 0600);
+}
+
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sg;
+ struct drm_i915_gem_object *obj;
+ int npages;
+ int size;
+ int ret;
+
+ /* is this one of own objects? */
+ if (dma_buf->ops == &i915_dmabuf_ops) {
+ obj = dma_buf->priv;
+ /* is it from our device? */
+ if (obj->base.dev == dev) {
+ drm_gem_object_reference(&obj->base);
+ return &obj->base;
+ }
+ }
+
+ /* need to attach */
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto fail_detach;
+ }
+
+ size = dma_buf->size;
+ npages = size / PAGE_SIZE;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (obj == NULL) {
+ ret = -ENOMEM;
+ goto fail_unmap;
+ }
+
+ ret = drm_gem_private_object_init(dev, &obj->base, size);
+ if (ret) {
+ kfree(obj);
+ goto fail_unmap;
+ }
+
+ obj->sg_table = sg;
+ obj->base.import_attach = attach;
+
+ return &obj->base;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 21a82710f4b2..ae7c24e12e52 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -35,6 +35,9 @@
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
+ if (obj->pin_count)
+ return false;
+
list_add(&obj->exec_list, unwind);
return drm_mm_scan_add_block(obj->gtt_space);
}
@@ -90,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
- if (obj->base.write_domain || obj->pin_count)
+ if (obj->base.write_domain)
continue;
if (mark_free(obj, &unwind_list))
@@ -99,14 +102,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
/* Finally add anything with a pending flush (in order of retirement) */
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
- if (obj->pin_count)
- continue;
-
if (mark_free(obj, &unwind_list))
goto found;
}
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (!obj->base.write_domain || obj->pin_count)
+ if (!obj->base.write_domain)
continue;
if (mark_free(obj, &unwind_list))
@@ -166,8 +166,9 @@ int
i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ struct drm_i915_gem_object *obj, *next;
bool lists_empty;
+ int ret;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
@@ -177,29 +178,24 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
trace_i915_gem_evict_everything(dev, purgeable_only);
- /* Flush everything (on to the inactive lists) and evict */
- ret = i915_gpu_idle(dev, true);
+ /* The gpu_idle will flush everything in the write domain to the
+ * active list. Then we must move everything off the active list
+ * with retire requests.
+ */
+ ret = i915_gpu_idle(dev);
if (ret)
return ret;
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ i915_gem_retire_requests(dev);
- return i915_gem_evict_inactive(dev, purgeable_only);
-}
-
-/** Unbinds all inactive objects. */
-int
-i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj, *next;
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ /* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list, mm_list) {
if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
- int ret = i915_gem_object_unbind(obj);
- if (ret)
- return ret;
+ if (obj->pin_count == 0)
+ WARN_ON(i915_gem_object_unbind(obj));
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index de431942ded4..974a9f1068a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -266,6 +266,12 @@ eb_destroy(struct eb_objects *eb)
kfree(eb);
}
+static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
+{
+ return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+ obj->cache_level != I915_CACHE_NONE);
+}
+
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
@@ -273,6 +279,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
+ struct drm_i915_gem_object *target_i915_obj;
uint32_t target_offset;
int ret = -EINVAL;
@@ -281,7 +288,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (unlikely(target_obj == NULL))
return -ENOENT;
- target_offset = to_intel_bo(target_obj)->gtt_offset;
+ target_i915_obj = to_intel_bo(target_obj);
+ target_offset = target_i915_obj->gtt_offset;
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
@@ -352,11 +360,19 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
}
+ /* We can't wait for rendering with pagefaults disabled */
+ if (obj->active && in_atomic())
+ return -EFAULT;
+
reloc->delta += target_offset;
- if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+ if (use_cpu_reloc(obj)) {
uint32_t page_offset = reloc->offset & ~PAGE_MASK;
char *vaddr;
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret)
+ return ret;
+
vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
kunmap_atomic(vaddr);
@@ -365,11 +381,11 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
- /* We can't wait for rendering with pagefaults disabled */
- if (obj->active && in_atomic())
- return -EFAULT;
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
@@ -383,6 +399,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
io_mapping_unmap_atomic(reloc_page);
}
+ /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
+ * pipe_control writes because the gpu doesn't properly redirect them
+ * through the ppgtt for non_secure batchbuffers. */
+ if (unlikely(IS_GEN6(dev) &&
+ reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
+ !target_i915_obj->has_global_gtt_mapping)) {
+ i915_gem_gtt_bind_object(target_i915_obj,
+ target_i915_obj->cache_level);
+ }
+
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
@@ -393,30 +419,46 @@ static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb)
{
+#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
+ struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
- int i, ret;
+ int remain, ret;
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_i915_gem_relocation_entry reloc;
- if (__copy_from_user_inatomic(&reloc,
- user_relocs+i,
- sizeof(reloc)))
+ remain = entry->relocation_count;
+ while (remain) {
+ struct drm_i915_gem_relocation_entry *r = stack_reloc;
+ int count = remain;
+ if (count > ARRAY_SIZE(stack_reloc))
+ count = ARRAY_SIZE(stack_reloc);
+ remain -= count;
+
+ if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
return -EFAULT;
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
- if (ret)
- return ret;
+ do {
+ u64 offset = r->presumed_offset;
- if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
- &reloc.presumed_offset,
- sizeof(reloc.presumed_offset)))
- return -EFAULT;
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
+ if (ret)
+ return ret;
+
+ if (r->presumed_offset != offset &&
+ __copy_to_user_inatomic(&user_relocs->presumed_offset,
+ &r->presumed_offset,
+ sizeof(r->presumed_offset))) {
+ return -EFAULT;
+ }
+
+ user_relocs++;
+ r++;
+ } while (--count);
}
return 0;
+#undef N_RELOC
}
static int
@@ -465,6 +507,13 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
#define __EXEC_OBJECT_HAS_FENCE (1<<31)
static int
+need_reloc_mappable(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ return entry->relocation_count && !use_cpu_reloc(obj);
+}
+
+static int
pin_and_fence_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
@@ -477,8 +526,7 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable =
- entry->relocation_count ? true : need_fence;
+ need_mappable = need_fence || need_reloc_mappable(obj);
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
if (ret)
@@ -486,18 +534,13 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
- if (obj->tiling_mode) {
- ret = i915_gem_object_get_fence(obj, ring);
- if (ret)
- goto err_unpin;
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ goto err_unpin;
+ if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
- i915_gem_object_pin_fence(obj);
- } else {
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto err_unpin;
- }
+
obj->pending_fenced_gpu_access = true;
}
}
@@ -535,8 +578,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable =
- entry->relocation_count ? true : need_fence;
+ need_mappable = need_fence || need_reloc_mappable(obj);
if (need_mappable)
list_move(&obj->exec_list, &ordered_objects);
@@ -576,8 +618,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable =
- entry->relocation_count ? true : need_fence;
+ need_mappable = need_fence || need_reloc_mappable(obj);
if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
@@ -798,64 +839,6 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
return 0;
}
-static bool
-intel_enable_semaphores(struct drm_device *dev)
-{
- if (INTEL_INFO(dev)->gen < 6)
- return 0;
-
- if (i915_semaphores >= 0)
- return i915_semaphores;
-
- /* Disable semaphores on SNB */
- if (INTEL_INFO(dev)->gen == 6)
- return 0;
-
- return 1;
-}
-
-static int
-i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *to)
-{
- struct intel_ring_buffer *from = obj->ring;
- u32 seqno;
- int ret, idx;
-
- if (from == NULL || to == from)
- return 0;
-
- /* XXX gpu semaphores are implicated in various hard hangs on SNB */
- if (!intel_enable_semaphores(obj->base.dev))
- return i915_gem_object_wait_rendering(obj);
-
- idx = intel_ring_sync_index(from, to);
-
- seqno = obj->last_rendering_seqno;
- if (seqno <= from->sync_seqno[idx])
- return 0;
-
- if (seqno == from->outstanding_lazy_request) {
- struct drm_i915_gem_request *request;
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
-
- ret = i915_add_request(from, NULL, request);
- if (ret) {
- kfree(request);
- return ret;
- }
-
- seqno = request->seqno;
- }
-
- from->sync_seqno[idx] = seqno;
-
- return to->sync_to(to, from, seqno - 1);
-}
-
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
@@ -917,7 +900,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
}
list_for_each_entry(obj, objects, exec_list) {
- ret = i915_gem_execbuffer_sync_rings(obj, ring);
+ ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
}
@@ -955,7 +938,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
- if (fault_in_pages_readable(ptr, length))
+ if (fault_in_multipages_readable(ptr, length))
return -EFAULT;
}
@@ -984,11 +967,14 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->pending_gpu_write = true;
list_move_tail(&obj->gpu_write_list,
&ring->gpu_write_list);
- intel_mark_busy(ring->dev, obj);
+ if (obj->pin_count) /* check for potential scanout */
+ intel_mark_busy(ring->dev, obj);
}
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
+
+ intel_mark_busy(ring->dev, NULL);
}
static void
@@ -1078,17 +1064,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ring = &dev_priv->ring[RCS];
break;
case I915_EXEC_BSD:
- if (!HAS_BSD(dev)) {
- DRM_DEBUG("execbuf with invalid ring (BSD)\n");
- return -EINVAL;
- }
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BLT:
- if (!HAS_BLT(dev)) {
- DRM_DEBUG("execbuf with invalid ring (BLT)\n");
- return -EINVAL;
- }
ring = &dev_priv->ring[BCS];
break;
default:
@@ -1096,6 +1074,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
+ if (!intel_ring_initialized(ring)) {
+ DRM_DEBUG("execbuf with invalid ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return -EINVAL;
+ }
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
mask = I915_EXEC_CONSTANTS_MASK;
@@ -1133,11 +1116,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (INTEL_INFO(dev)->gen >= 5) {
+ DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
+ return -EINVAL;
+ }
+
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
DRM_DEBUG("execbuf with %u cliprects\n",
args->num_cliprects);
return -EINVAL;
}
+
cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
@@ -1242,9 +1231,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret)
goto err;
+ i915_gem_retire_requests(dev);
BUG_ON(ring->sync_seqno[i]);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a135c61f4119..9fd25a435536 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -96,11 +96,10 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
- }
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
- if (dev_priv->mm.gtt->needs_dmar) {
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
0, 4096,
PCI_DMA_BIDIRECTIONAL);
@@ -112,8 +111,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
}
ppgtt->pt_dma_addr[i] = pt_addr;
- } else
- pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+ }
}
ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
@@ -269,7 +267,13 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
BUG();
}
- if (dev_priv->mm.gtt->needs_dmar) {
+ if (obj->sg_table) {
+ i915_ppgtt_insert_sg_entries(ppgtt,
+ obj->sg_table->sgl,
+ obj->sg_table->nents,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ pte_flags);
+ } else if (dev_priv->mm.gtt->needs_dmar) {
BUG_ON(!obj->sg_list);
i915_ppgtt_insert_sg_entries(ppgtt,
@@ -319,7 +323,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
dev_priv->mm.interruptible = false;
- if (i915_gpu_idle(dev_priv->dev, false)) {
+ if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
@@ -346,48 +350,39 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
i915_gem_clflush_object(obj);
- i915_gem_gtt_rebind_object(obj, obj->cache_level);
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
}
intel_gtt_chipset_flush();
}
-int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
+int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
- int ret;
-
- if (dev_priv->mm.gtt->needs_dmar) {
- ret = intel_gtt_map_memory(obj->pages,
- obj->base.size >> PAGE_SHIFT,
- &obj->sg_list,
- &obj->num_sg);
- if (ret != 0)
- return ret;
-
- intel_gtt_insert_sg_entries(obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else
- intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- agp_type);
- return 0;
+ if (dev_priv->mm.gtt->needs_dmar)
+ return intel_gtt_map_memory(obj->pages,
+ obj->base.size >> PAGE_SHIFT,
+ &obj->sg_list,
+ &obj->num_sg);
+ else
+ return 0;
}
-void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
- if (dev_priv->mm.gtt->needs_dmar) {
+ if (obj->sg_table) {
+ intel_gtt_insert_sg_entries(obj->sg_table->sgl,
+ obj->sg_table->nents,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ agp_type);
+ } else if (dev_priv->mm.gtt->needs_dmar) {
BUG_ON(!obj->sg_list);
intel_gtt_insert_sg_entries(obj->sg_list,
@@ -399,19 +394,26 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
obj->base.size >> PAGE_SHIFT,
obj->pages,
agp_type);
+
+ obj->has_global_gtt_mapping = 1;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
+ intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
+
+ obj->has_global_gtt_mapping = 0;
+}
+
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool interruptible;
interruptible = do_idling(dev_priv);
- intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
-
if (obj->sg_list) {
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL;
@@ -419,3 +421,23 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
undo_idling(dev_priv, interruptible);
}
+
+void i915_gem_init_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* Substract the guard page ... */
+ drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+
+ dev_priv->mm.gtt_start = start;
+ dev_priv->mm.gtt_mappable_end = mappable_end;
+ dev_priv->mm.gtt_end = end;
+ dev_priv->mm.gtt_total = end - start;
+ dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
+
+ /* ... but ensure that we clear the entire range. */
+ intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
new file mode 100644
index 000000000000..ada2e90a2a60
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright © 2008-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/*
+ * The BIOS typically reserves some of the system's memory for the exclusive
+ * use of the integrated graphics. This memory is no longer available for
+ * use by the OS and so the user finds that his system has less memory
+ * available than he put in. We refer to this memory as stolen.
+ *
+ * The BIOS will allocate its framebuffer from the stolen memory. Our
+ * goal is try to reuse that object for our own fbcon which must always
+ * be available for panics. Anything else we can reuse the stolen memory
+ * for is a boon.
+ */
+
+#define PTE_ADDRESS_MASK 0xfffff000
+#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
+#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
+#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
+#define PTE_MAPPING_TYPE_CACHED (3 << 1)
+#define PTE_MAPPING_TYPE_MASK (3 << 1)
+#define PTE_VALID (1 << 0)
+
+/**
+ * i915_stolen_to_phys - take an offset into stolen memory and turn it into
+ * a physical one
+ * @dev: drm device
+ * @offset: address to translate
+ *
+ * Some chip functions require allocations from stolen space and need the
+ * physical address of the memory in question.
+ */
+static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->bridge_dev;
+ u32 base;
+
+#if 0
+ /* On the machines I have tested the Graphics Base of Stolen Memory
+ * is unreliable, so compute the base by subtracting the stolen memory
+ * from the Top of Low Usable DRAM which is where the BIOS places
+ * the graphics stolen memory.
+ */
+ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ /* top 32bits are reserved = 0 */
+ pci_read_config_dword(pdev, 0xA4, &base);
+ } else {
+ /* XXX presume 8xx is the same as i915 */
+ pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
+ }
+#else
+ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ u16 val;
+ pci_read_config_word(pdev, 0xb0, &val);
+ base = val >> 4 << 20;
+ } else {
+ u8 val;
+ pci_read_config_byte(pdev, 0x9c, &val);
+ base = val >> 3 << 27;
+ }
+ base -= dev_priv->mm.gtt->stolen_size;
+#endif
+
+ return base + offset;
+}
+
+static void i915_warn_stolen(struct drm_device *dev)
+{
+ DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
+ DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
+}
+
+static void i915_setup_compression(struct drm_device *dev, int size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+ unsigned long cfb_base;
+ unsigned long ll_base = 0;
+
+ /* Just in case the BIOS is doing something questionable. */
+ intel_disable_fbc(dev);
+
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+ if (compressed_fb)
+ compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+ if (!compressed_fb)
+ goto err;
+
+ cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
+ if (!cfb_base)
+ goto err_fb;
+
+ if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
+ compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
+ 4096, 4096, 0);
+ if (compressed_llb)
+ compressed_llb = drm_mm_get_block(compressed_llb,
+ 4096, 4096);
+ if (!compressed_llb)
+ goto err_fb;
+
+ ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
+ if (!ll_base)
+ goto err_llb;
+ }
+
+ dev_priv->cfb_size = size;
+
+ dev_priv->compressed_fb = compressed_fb;
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+ else if (IS_GM45(dev)) {
+ I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+ } else {
+ I915_WRITE(FBC_CFB_BASE, cfb_base);
+ I915_WRITE(FBC_LL_BASE, ll_base);
+ dev_priv->compressed_llb = compressed_llb;
+ }
+
+ DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+ cfb_base, ll_base, size >> 20);
+ return;
+
+err_llb:
+ drm_mm_put_block(compressed_llb);
+err_fb:
+ drm_mm_put_block(compressed_fb);
+err:
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ i915_warn_stolen(dev);
+}
+
+static void i915_cleanup_compression(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ drm_mm_put_block(dev_priv->compressed_fb);
+ if (dev_priv->compressed_llb)
+ drm_mm_put_block(dev_priv->compressed_llb);
+}
+
+void i915_gem_cleanup_stolen(struct drm_device *dev)
+{
+ if (I915_HAS_FBC(dev) && i915_powersave)
+ i915_cleanup_compression(dev);
+}
+
+int i915_gem_init_stolen(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
+
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+ /* Try to set up FBC with a reasonable compressed buffer size */
+ if (I915_HAS_FBC(dev) && i915_powersave) {
+ int cfb_size;
+
+ /* Leave 1M for line length buffer & misc. */
+
+ /* Try to get a 32M buffer... */
+ if (prealloc_size > (36*1024*1024))
+ cfb_size = 32*1024*1024;
+ else /* fall back to 7/8 of the stolen space */
+ cfb_size = prealloc_size * 7 / 8;
+ i915_setup_compression(dev, cfb_size);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 1a9306665987..b964df51cec7 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -354,9 +354,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
- * need to ensure that any fence register is cleared.
+ * need to ensure that any fence register is updated before
+ * the next fenced (either through the GTT or by the BLT unit
+ * on older GPUs) access.
+ *
+ * After updating the tiling parameters, we then flag whether
+ * we need to update an associated fence register. Note this
+ * has to also include the unfenced register the GPU uses
+ * whilst executing a fenced command for an untiled object.
*/
- i915_gem_release_mmap(obj);
obj->map_and_fenceable =
obj->gtt_space == NULL ||
@@ -374,9 +380,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
if (ret == 0) {
- obj->tiling_changed = true;
+ obj->fence_dirty =
+ obj->fenced_gpu_access ||
+ obj->fence_reg != I915_FENCE_REG_NONE;
+
obj->tiling_mode = args->tiling_mode;
obj->stride = args->stride;
+
+ /* Force the fence to be reacquired for GTT access */
+ i915_gem_release_mmap(obj);
}
}
/* we have to maintain this existing ABI... */
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 13b028994b2b..0e72abb9f701 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -34,6 +34,7 @@
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
+#include "i915_drv.h"
typedef struct _drm_i915_batchbuffer32 {
int start; /* agp offset */
@@ -181,7 +182,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
(unsigned long)request);
}
-drm_ioctl_compat_t *i915_compat_ioctls[] = {
+static drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
[DRM_I915_GETPARAM] = compat_i915_getparam,
@@ -189,6 +190,7 @@ drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_ALLOC] = compat_i915_alloc
};
+#ifdef CONFIG_COMPAT
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
@@ -217,3 +219,4 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return ret;
}
+#endif
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index afd4e03e337e..1417660a93ec 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -26,6 +26,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sysrq.h>
#include <linux/slab.h>
#include "drmP.h"
@@ -35,35 +37,6 @@
#include "i915_trace.h"
#include "intel_drv.h"
-#define MAX_NOPID ((u32)~0)
-
-/**
- * Interrupts that are always left unmasked.
- *
- * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
- * we leave them always unmasked in IMR and then control enabling them through
- * PIPESTAT alone.
- */
-#define I915_INTERRUPT_ENABLE_FIX \
- (I915_ASLE_INTERRUPT | \
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-
-/** Interrupts that we mask and unmask at runtime. */
-#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
-
-#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
- PIPE_VBLANK_INTERRUPT_STATUS)
-
-#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
- PIPE_VBLANK_INTERRUPT_ENABLE)
-
-#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
- DRM_I915_VBLANK_PIPE_B)
-
/* For display hotplug interrupt */
static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -118,6 +91,10 @@ void intel_enable_asle(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long irqflags;
+ /* FIXME: opregion/asle for VLV */
+ if (IS_VALLEYVIEW(dev))
+ return;
+
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
@@ -354,15 +331,12 @@ static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 seqno;
if (ring->obj == NULL)
return;
- seqno = ring->get_seqno(ring);
- trace_i915_gem_request_complete(ring, seqno);
+ trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
- ring->irq_seqno = seqno;
wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) {
dev_priv->hangcheck_count = 0;
@@ -376,8 +350,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps_work);
- u8 new_delay = dev_priv->cur_delay;
u32 pm_iir, pm_imr;
+ u8 new_delay;
spin_lock_irq(&dev_priv->rps_lock);
pm_iir = dev_priv->pm_iir;
@@ -386,51 +360,160 @@ static void gen6_pm_rps_work(struct work_struct *work)
I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps_lock);
- if (!pm_iir)
+ if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
mutex_lock(&dev_priv->dev->struct_mutex);
- if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
- if (dev_priv->cur_delay != dev_priv->max_delay)
- new_delay = dev_priv->cur_delay + 1;
- if (new_delay > dev_priv->max_delay)
- new_delay = dev_priv->max_delay;
- } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
- gen6_gt_force_wake_get(dev_priv);
- if (dev_priv->cur_delay != dev_priv->min_delay)
- new_delay = dev_priv->cur_delay - 1;
- if (new_delay < dev_priv->min_delay) {
- new_delay = dev_priv->min_delay;
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
- ((new_delay << 16) & 0x3f0000));
- } else {
- /* Make sure we continue to get down interrupts
- * until we hit the minimum frequency */
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
- }
- gen6_gt_force_wake_put(dev_priv);
- }
+
+ if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
+ new_delay = dev_priv->cur_delay + 1;
+ else
+ new_delay = dev_priv->cur_delay - 1;
gen6_set_rps(dev_priv->dev, new_delay);
- dev_priv->cur_delay = new_delay;
+
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
+static void snb_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 gt_iir)
+{
+
+ if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
+ GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (gt_iir & GEN6_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+ if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[BCS]);
+
+ if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
+ GT_GEN6_BSD_CS_ERROR_INTERRUPT |
+ GT_RENDER_CS_ERROR_INTERRUPT)) {
+ DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
+ i915_handle_error(dev, false);
+ }
+}
+
+static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
+ u32 pm_iir)
+{
+ unsigned long flags;
/*
- * rps_lock not held here because clearing is non-destructive. There is
- * an *extremely* unlikely race with gen6_rps_enable() that is prevented
- * by holding struct_mutex for the duration of the write.
+ * IIR bits should never already be set because IMR should
+ * prevent an interrupt from being shown in IIR. The warning
+ * displays a case where we've unsafely cleared
+ * dev_priv->pm_iir. Although missing an interrupt of the same
+ * type is not a problem, it displays a problem in the logic.
+ *
+ * The mask bit in IMR is cleared by rps_work.
*/
- mutex_unlock(&dev_priv->dev->struct_mutex);
+
+ spin_lock_irqsave(&dev_priv->rps_lock, flags);
+ WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
+ dev_priv->pm_iir |= pm_iir;
+ I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+ POSTING_READ(GEN6_PMIMR);
+ spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+
+ queue_work(dev_priv->wq, &dev_priv->rps_work);
}
-static void pch_irq_handler(struct drm_device *dev)
+static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
{
+ struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 pch_iir;
+ u32 iir, gt_iir, pm_iir;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long irqflags;
int pipe;
+ u32 pipe_stats[I915_MAX_PIPES];
+ u32 vblank_status;
+ int vblank = 0;
+ bool blc_event;
- pch_iir = I915_READ(SDEIIR);
+ atomic_inc(&dev_priv->irq_received);
+
+ vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
+ PIPE_VBLANK_INTERRUPT_STATUS;
+
+ while (true) {
+ iir = I915_READ(VLV_IIR);
+ gt_iir = I915_READ(GTIIR);
+ pm_iir = I915_READ(GEN6_PMIIR);
+
+ if (gt_iir == 0 && pm_iir == 0 && iir == 0)
+ goto out;
+
+ ret = IRQ_HANDLED;
+
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+ if (hotplug_status & dev_priv->hotplug_supported_mask)
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ I915_READ(PORT_HOTPLUG_STAT);
+ }
+
+
+ if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
+ drm_handle_vblank(dev, 0);
+ vblank++;
+ intel_finish_page_flip(dev, 0);
+ }
+
+ if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
+ drm_handle_vblank(dev, 1);
+ vblank++;
+ intel_finish_page_flip(dev, 0);
+ }
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+
+ if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
+
+ I915_WRITE(GTIIR, gt_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ I915_WRITE(VLV_IIR, iir);
+ }
+
+out:
+ return ret;
+}
+
+static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
@@ -471,91 +554,77 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
- struct drm_i915_master_private *master_priv;
+ u32 de_iir, gt_iir, de_ier, pm_iir;
+ irqreturn_t ret = IRQ_NONE;
+ int i;
atomic_inc(&dev_priv->irq_received);
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- POSTING_READ(DEIER);
- de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
- pch_iir = I915_READ(SDEIIR);
- pm_iir = I915_READ(GEN6_PMIIR);
-
- if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
- goto done;
-
- ret = IRQ_HANDLED;
-
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
+ if (gt_iir) {
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ I915_WRITE(GTIIR, gt_iir);
+ ret = IRQ_HANDLED;
}
- if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
- notify_ring(dev, &dev_priv->ring[RCS]);
- if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
- if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[BCS]);
-
- if (de_iir & DE_GSE_IVB)
- intel_opregion_gse_intr(dev);
-
- if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
- intel_prepare_page_flip(dev, 0);
- intel_finish_page_flip_plane(dev, 0);
- }
+ de_iir = I915_READ(DEIIR);
+ if (de_iir) {
+ if (de_iir & DE_GSE_IVB)
+ intel_opregion_gse_intr(dev);
+
+ for (i = 0; i < 3; i++) {
+ if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+ intel_prepare_page_flip(dev, i);
+ intel_finish_page_flip_plane(dev, i);
+ }
+ if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+ drm_handle_vblank(dev, i);
+ }
- if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
- intel_prepare_page_flip(dev, 1);
- intel_finish_page_flip_plane(dev, 1);
- }
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT_IVB) {
+ u32 pch_iir = I915_READ(SDEIIR);
- if (de_iir & DE_PIPEA_VBLANK_IVB)
- drm_handle_vblank(dev, 0);
+ if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+ pch_irq_handler(dev, pch_iir);
- if (de_iir & DE_PIPEB_VBLANK_IVB)
- drm_handle_vblank(dev, 1);
+ /* clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ }
- /* check event from PCH */
- if (de_iir & DE_PCH_EVENT_IVB) {
- if (pch_iir & SDE_HOTPLUG_MASK_CPT)
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- pch_irq_handler(dev);
+ I915_WRITE(DEIIR, de_iir);
+ ret = IRQ_HANDLED;
}
- if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->rps_lock, flags);
- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
- dev_priv->pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
- POSTING_READ(GEN6_PMIMR);
- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
- queue_work(dev_priv->wq, &dev_priv->rps_work);
+ pm_iir = I915_READ(GEN6_PMIIR);
+ if (pm_iir) {
+ if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ ret = IRQ_HANDLED;
}
- /* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
- I915_WRITE(GTIIR, gt_iir);
- I915_WRITE(DEIIR, de_iir);
- I915_WRITE(GEN6_PMIIR, pm_iir);
-
-done:
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
return ret;
}
+static void ilk_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 gt_iir)
+{
+ if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (gt_iir & GT_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+}
+
static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -563,14 +632,9 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
u32 hotplug_mask;
- struct drm_i915_master_private *master_priv;
- u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
atomic_inc(&dev_priv->irq_received);
- if (IS_GEN6(dev))
- bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
-
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -592,19 +656,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
ret = IRQ_HANDLED;
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
-
- if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
- notify_ring(dev, &dev_priv->ring[RCS]);
- if (gt_iir & bsd_usr_interrupt)
- notify_ring(dev, &dev_priv->ring[VCS]);
- if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[BCS]);
+ if (IS_GEN5(dev))
+ ilk_gt_irq_handler(dev, dev_priv, gt_iir);
+ else
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
@@ -629,7 +684,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
if (de_iir & DE_PCH_EVENT) {
if (pch_iir & hotplug_mask)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- pch_irq_handler(dev);
+ pch_irq_handler(dev, pch_iir);
}
if (de_iir & DE_PCU_EVENT) {
@@ -637,25 +692,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
i915_handle_rps_change(dev);
}
- if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
- /*
- * IIR bits should never already be set because IMR should
- * prevent an interrupt from being shown in IIR. The warning
- * displays a case where we've unsafely cleared
- * dev_priv->pm_iir. Although missing an interrupt of the same
- * type is not a problem, it displays a problem in the logic.
- *
- * The mask bit in IMR is cleared by rps_work.
- */
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->rps_lock, flags);
- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
- dev_priv->pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
- POSTING_READ(GEN6_PMIMR);
- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
- queue_work(dev_priv->wq, &dev_priv->rps_work);
- }
+ if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
@@ -691,7 +729,7 @@ static void i915_error_work_func(struct work_struct *work)
if (atomic_read(&dev_priv->mm.wedged)) {
DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
- if (!i915_reset(dev, GRDOM_RENDER)) {
+ if (!i915_reset(dev)) {
atomic_set(&dev_priv->mm.wedged, 0);
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
}
@@ -727,7 +765,8 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind;
local_irq_save(flags);
- if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
+ if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
+ src->has_global_gtt_mapping) {
void __iomem *s;
/* Simply ignore tiling or any overlapping fence.
@@ -782,10 +821,11 @@ i915_error_object_free(struct drm_i915_error_object *obj)
kfree(obj);
}
-static void
-i915_error_state_free(struct drm_device *dev,
- struct drm_i915_error_state *error)
+void
+i915_error_state_free(struct kref *error_ref)
{
+ struct drm_i915_error_state *error = container_of(error_ref,
+ typeof(*error), ref);
int i;
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
@@ -798,37 +838,56 @@ i915_error_state_free(struct drm_device *dev,
kfree(error->overlay);
kfree(error);
}
+static void capture_bo(struct drm_i915_error_buffer *err,
+ struct drm_i915_gem_object *obj)
+{
+ err->size = obj->base.size;
+ err->name = obj->base.name;
+ err->seqno = obj->last_rendering_seqno;
+ err->gtt_offset = obj->gtt_offset;
+ err->read_domains = obj->base.read_domains;
+ err->write_domain = obj->base.write_domain;
+ err->fence_reg = obj->fence_reg;
+ err->pinned = 0;
+ if (obj->pin_count > 0)
+ err->pinned = 1;
+ if (obj->user_pin_count > 0)
+ err->pinned = -1;
+ err->tiling = obj->tiling_mode;
+ err->dirty = obj->dirty;
+ err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->ring = obj->ring ? obj->ring->id : -1;
+ err->cache_level = obj->cache_level;
+}
-static u32 capture_bo_list(struct drm_i915_error_buffer *err,
- int count,
- struct list_head *head)
+static u32 capture_active_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
{
struct drm_i915_gem_object *obj;
int i = 0;
list_for_each_entry(obj, head, mm_list) {
- err->size = obj->base.size;
- err->name = obj->base.name;
- err->seqno = obj->last_rendering_seqno;
- err->gtt_offset = obj->gtt_offset;
- err->read_domains = obj->base.read_domains;
- err->write_domain = obj->base.write_domain;
- err->fence_reg = obj->fence_reg;
- err->pinned = 0;
- if (obj->pin_count > 0)
- err->pinned = 1;
- if (obj->user_pin_count > 0)
- err->pinned = -1;
- err->tiling = obj->tiling_mode;
- err->dirty = obj->dirty;
- err->purgeable = obj->madv != I915_MADV_WILLNEED;
- err->ring = obj->ring ? obj->ring->id : -1;
- err->cache_level = obj->cache_level;
-
+ capture_bo(err++, obj);
if (++i == count)
break;
+ }
+
+ return i;
+}
+
+static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, gtt_list) {
+ if (obj->pin_count == 0)
+ continue;
- err++;
+ capture_bo(err++, obj);
+ if (++i == count)
+ break;
}
return i;
@@ -901,7 +960,6 @@ static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) {
- error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
error->semaphore_mboxes[ring->id][0]
= I915_READ(RING_SYNC_0(ring->mmio_base));
@@ -910,6 +968,7 @@ static void i915_record_ring_state(struct drm_device *dev,
}
if (INTEL_INFO(dev)->gen >= 4) {
+ error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
@@ -919,11 +978,13 @@ static void i915_record_ring_state(struct drm_device *dev,
error->bbaddr = I915_READ64(BB_ADDR);
}
} else {
+ error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
error->ipeir[ring->id] = I915_READ(IPEIR);
error->ipehr[ring->id] = I915_READ(IPEHR);
error->instdone[ring->id] = I915_READ(INSTDONE);
}
+ error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
error->seqno[ring->id] = ring->get_seqno(ring);
error->acthd[ring->id] = intel_ring_get_active_head(ring);
@@ -938,15 +999,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
struct drm_i915_gem_request *request;
int i, count;
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_ring_buffer *ring = &dev_priv->ring[i];
-
- if (ring->obj == NULL)
- continue;
-
+ for_each_ring(ring, dev_priv, i) {
i915_record_ring_state(dev, error, ring);
error->ring[i].batchbuffer =
@@ -1013,8 +1070,19 @@ static void i915_capture_error_state(struct drm_device *dev)
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
dev->primary->index);
+ kref_init(&error->ref);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
+
+ if (HAS_PCH_SPLIT(dev))
+ error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else if (IS_VALLEYVIEW(dev))
+ error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+ else if (IS_GEN2(dev))
+ error->ier = I915_READ16(IER);
+ else
+ error->ier = I915_READ(IER);
+
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
@@ -1034,8 +1102,9 @@ static void i915_capture_error_state(struct drm_device *dev)
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
i++;
error->active_bo_count = i;
- list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
- i++;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+ if (obj->pin_count)
+ i++;
error->pinned_bo_count = i - error->active_bo_count;
error->active_bo = NULL;
@@ -1050,15 +1119,15 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error->active_bo)
error->active_bo_count =
- capture_bo_list(error->active_bo,
- error->active_bo_count,
- &dev_priv->mm.active_list);
+ capture_active_bo(error->active_bo,
+ error->active_bo_count,
+ &dev_priv->mm.active_list);
if (error->pinned_bo)
error->pinned_bo_count =
- capture_bo_list(error->pinned_bo,
- error->pinned_bo_count,
- &dev_priv->mm.pinned_list);
+ capture_pinned_bo(error->pinned_bo,
+ error->pinned_bo_count,
+ &dev_priv->mm.gtt_list);
do_gettimeofday(&error->time);
@@ -1073,7 +1142,7 @@ static void i915_capture_error_state(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
if (error)
- i915_error_state_free(dev, error);
+ i915_error_state_free(&error->ref);
}
void i915_destroy_error_state(struct drm_device *dev)
@@ -1088,7 +1157,7 @@ void i915_destroy_error_state(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
if (error)
- i915_error_state_free(dev, error);
+ kref_put(&error->ref, i915_error_state_free);
}
#else
#define i915_capture_error_state(x)
@@ -1103,33 +1172,26 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
if (!eir)
return;
- printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
- eir);
+ pr_err("render error detected, EIR: 0x%08x\n", eir);
if (IS_G4X(dev)) {
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
u32 ipeir = I915_READ(IPEIR_I965);
- printk(KERN_ERR " IPEIR: 0x%08x\n",
- I915_READ(IPEIR_I965));
- printk(KERN_ERR " IPEHR: 0x%08x\n",
- I915_READ(IPEHR_I965));
- printk(KERN_ERR " INSTDONE: 0x%08x\n",
+ pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+ pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+ pr_err(" INSTDONE: 0x%08x\n",
I915_READ(INSTDONE_I965));
- printk(KERN_ERR " INSTPS: 0x%08x\n",
- I915_READ(INSTPS));
- printk(KERN_ERR " INSTDONE1: 0x%08x\n",
- I915_READ(INSTDONE1));
- printk(KERN_ERR " ACTHD: 0x%08x\n",
- I915_READ(ACTHD_I965));
+ pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
+ pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
+ pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
}
if (eir & GM45_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
- printk(KERN_ERR "page table error\n");
- printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
- pgtbl_err);
+ pr_err("page table error\n");
+ pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
POSTING_READ(PGTBL_ER);
}
@@ -1138,53 +1200,42 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
if (!IS_GEN2(dev)) {
if (eir & I915_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
- printk(KERN_ERR "page table error\n");
- printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
- pgtbl_err);
+ pr_err("page table error\n");
+ pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
POSTING_READ(PGTBL_ER);
}
}
if (eir & I915_ERROR_MEMORY_REFRESH) {
- printk(KERN_ERR "memory refresh error:\n");
+ pr_err("memory refresh error:\n");
for_each_pipe(pipe)
- printk(KERN_ERR "pipe %c stat: 0x%08x\n",
+ pr_err("pipe %c stat: 0x%08x\n",
pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
/* pipestat has already been acked */
}
if (eir & I915_ERROR_INSTRUCTION) {
- printk(KERN_ERR "instruction error\n");
- printk(KERN_ERR " INSTPM: 0x%08x\n",
- I915_READ(INSTPM));
+ pr_err("instruction error\n");
+ pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
if (INTEL_INFO(dev)->gen < 4) {
u32 ipeir = I915_READ(IPEIR);
- printk(KERN_ERR " IPEIR: 0x%08x\n",
- I915_READ(IPEIR));
- printk(KERN_ERR " IPEHR: 0x%08x\n",
- I915_READ(IPEHR));
- printk(KERN_ERR " INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE));
- printk(KERN_ERR " ACTHD: 0x%08x\n",
- I915_READ(ACTHD));
+ pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
+ pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
+ pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
+ pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
I915_WRITE(IPEIR, ipeir);
POSTING_READ(IPEIR);
} else {
u32 ipeir = I915_READ(IPEIR_I965);
- printk(KERN_ERR " IPEIR: 0x%08x\n",
- I915_READ(IPEIR_I965));
- printk(KERN_ERR " IPEHR: 0x%08x\n",
- I915_READ(IPEHR_I965));
- printk(KERN_ERR " INSTDONE: 0x%08x\n",
+ pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+ pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+ pr_err(" INSTDONE: 0x%08x\n",
I915_READ(INSTDONE_I965));
- printk(KERN_ERR " INSTPS: 0x%08x\n",
- I915_READ(INSTPS));
- printk(KERN_ERR " INSTDONE1: 0x%08x\n",
- I915_READ(INSTDONE1));
- printk(KERN_ERR " ACTHD: 0x%08x\n",
- I915_READ(ACTHD_I965));
+ pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
+ pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
+ pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
}
@@ -1217,6 +1268,8 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
void i915_handle_error(struct drm_device *dev, bool wedged)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i;
i915_capture_error_state(dev);
i915_report_and_clear_eir(dev);
@@ -1228,11 +1281,8 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
/*
* Wakeup waiting processes so they don't hang
*/
- wake_up_all(&dev_priv->ring[RCS].irq_queue);
- if (HAS_BSD(dev))
- wake_up_all(&dev_priv->ring[VCS].irq_queue);
- if (HAS_BLT(dev))
- wake_up_all(&dev_priv->ring[BCS].irq_queue);
+ for_each_ring(ring, dev_priv, i)
+ wake_up_all(&ring->irq_queue);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -1265,7 +1315,8 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = DSPSURF(intel_crtc->plane);
- stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
+ stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
+ obj->gtt_offset;
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
@@ -1281,248 +1332,6 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
}
}
-static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct drm_i915_master_private *master_priv;
- u32 iir, new_iir;
- u32 pipe_stats[I915_MAX_PIPES];
- u32 vblank_status;
- int vblank = 0;
- unsigned long irqflags;
- int irq_received;
- int ret = IRQ_NONE, pipe;
- bool blc_event = false;
-
- atomic_inc(&dev_priv->irq_received);
-
- iir = I915_READ(IIR);
-
- if (INTEL_INFO(dev)->gen >= 4)
- vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
- else
- vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
-
- for (;;) {
- irq_received = iir != 0;
-
- /* Can't rely on pipestat interrupt bit in iir as it might
- * have been cleared after the pipestat interrupt was received.
- * It doesn't set the bit in iir again, but it still produces
- * interrupts (for non-MSI).
- */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false);
-
- for_each_pipe(pipe) {
- int reg = PIPESTAT(pipe);
- pipe_stats[pipe] = I915_READ(reg);
-
- /*
- * Clear the PIPE*STAT regs before the IIR
- */
- if (pipe_stats[pipe] & 0x8000ffff) {
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe %c underrun\n",
- pipe_name(pipe));
- I915_WRITE(reg, pipe_stats[pipe]);
- irq_received = 1;
- }
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
- if (!irq_received)
- break;
-
- ret = IRQ_HANDLED;
-
- /* Consume port. Then clear IIR or we'll miss events */
- if ((I915_HAS_HOTPLUG(dev)) &&
- (iir & I915_DISPLAY_PORT_INTERRUPT)) {
- u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
-
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hotplug_status);
- if (hotplug_status & dev_priv->hotplug_supported_mask)
- queue_work(dev_priv->wq,
- &dev_priv->hotplug_work);
-
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- I915_READ(PORT_HOTPLUG_STAT);
- }
-
- I915_WRITE(IIR, iir);
- new_iir = I915_READ(IIR); /* Flush posted writes */
-
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
-
- if (iir & I915_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[RCS]);
- if (iir & I915_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
-
- if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
- intel_prepare_page_flip(dev, 0);
- if (dev_priv->flip_pending_is_done)
- intel_finish_page_flip_plane(dev, 0);
- }
-
- if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
- intel_prepare_page_flip(dev, 1);
- if (dev_priv->flip_pending_is_done)
- intel_finish_page_flip_plane(dev, 1);
- }
-
- for_each_pipe(pipe) {
- if (pipe_stats[pipe] & vblank_status &&
- drm_handle_vblank(dev, pipe)) {
- vblank++;
- if (!dev_priv->flip_pending_is_done) {
- i915_pageflip_stall_check(dev, pipe);
- intel_finish_page_flip(dev, pipe);
- }
- }
-
- if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
- blc_event = true;
- }
-
-
- if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev);
-
- /* With MSI, interrupts are only generated when iir
- * transitions from zero to nonzero. If another bit got
- * set while we were handling the existing iir bits, then
- * we would never get another interrupt.
- *
- * This is fine on non-MSI as well, as if we hit this path
- * we avoid exiting the interrupt handler only to generate
- * another one.
- *
- * Note that for MSI this could cause a stray interrupt report
- * if an interrupt landed in the time between writing IIR and
- * the posting read. This should be rare enough to never
- * trigger the 99% of 100,000 interrupts test for disabling
- * stray interrupts.
- */
- iir = new_iir;
- }
-
- return ret;
-}
-
-static int i915_emit_irq(struct drm_device * dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
- i915_kernel_lost_context(dev);
-
- DRM_DEBUG_DRIVER("\n");
-
- dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->counter = 1;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->counter;
-
- if (BEGIN_LP_RING(4) == 0) {
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
- }
-
- return dev_priv->counter;
-}
-
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- int ret = 0;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
-
- DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
- READ_BREADCRUMB(dev_priv));
-
- if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- return 0;
- }
-
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-
- if (ring->irq_get(ring)) {
- DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
- READ_BREADCRUMB(dev_priv) >= irq_nr);
- ring->irq_put(ring);
- } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
- ret = -EBUSY;
-
- if (ret == -EBUSY) {
- DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
- READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
- }
-
- return ret;
-}
-
-/* Needs the lock as it touches the ring.
- */
-int i915_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_irq_emit_t *emit = data;
- int result;
-
- if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- mutex_lock(&dev->struct_mutex);
- result = i915_emit_irq(dev);
- mutex_unlock(&dev->struct_mutex);
-
- if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/* Doesn't need the hardware lock.
- */
-int i915_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_irq_wait_t *irqwait = data;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- return i915_wait_irq(dev, irqwait->irq_seq);
-}
-
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@@ -1544,7 +1353,7 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
/* maintain vblank delivery even in deep C-states */
if (dev_priv->info->gen == 3)
- I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
+ I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -1575,8 +1384,34 @@ static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
+ ironlake_enable_display_irq(dev_priv,
+ DE_PIPEA_VBLANK_IVB << (5 * pipe));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ return 0;
+}
+
+static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+ u32 dpfl, imr;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ dpfl = I915_READ(VLV_DPFLIPSTAT);
+ imr = I915_READ(VLV_IMR);
+ if (pipe == 0) {
+ dpfl |= PIPEA_VBLANK_INT_EN;
+ imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+ } else {
+ dpfl |= PIPEA_VBLANK_INT_EN;
+ imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+ }
+ I915_WRITE(VLV_DPFLIPSTAT, dpfl);
+ I915_WRITE(VLV_IMR, imr);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -1592,8 +1427,7 @@ static void i915_disable_vblank(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (dev_priv->info->gen == 3)
- I915_WRITE(INSTPM,
- INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE |
@@ -1618,63 +1452,30 @@ static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
+ ironlake_disable_display_irq(dev_priv,
+ DE_PIPEA_VBLANK_IVB << (pipe * 5));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-/* Set the vblank monitor pipe
- */
-int i915_vblank_pipe_set(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_vblank_pipe_t *pipe = data;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+ u32 dpfl, imr;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ dpfl = I915_READ(VLV_DPFLIPSTAT);
+ imr = I915_READ(VLV_IMR);
+ if (pipe == 0) {
+ dpfl &= ~PIPEA_VBLANK_INT_EN;
+ imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+ } else {
+ dpfl &= ~PIPEB_VBLANK_INT_EN;
+ imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
}
-
- pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
- return 0;
-}
-
-/**
- * Schedule buffer swap at given vertical blank.
- */
-int i915_vblank_swap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* The delayed swap mechanism was fundamentally racy, and has been
- * removed. The model was that the client requested a delayed flip/swap
- * from the kernel, then waited for vblank before continuing to perform
- * rendering. The problem was that the kernel might wake the client
- * up before it dispatched the vblank swap (since the lock has to be
- * held while touching the ringbuffer), in which case the client would
- * clear and start the next frame before the swap occurred, and
- * flicker would occur in addition to likely missing the vblank.
- *
- * In the absence of this ioctl, userland falls back to a correct path
- * of waiting for a vblank, then dispatching the swap on its own.
- * Context switching to userland and back is plenty fast enough for
- * meeting the requirements of vblank swapping.
- */
- return -EINVAL;
+ I915_WRITE(VLV_IMR, imr);
+ I915_WRITE(VLV_DPFLIPSTAT, dpfl);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static u32
@@ -1689,11 +1490,9 @@ static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
if (list_empty(&ring->request_list) ||
i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
/* Issue a wake-up to catch stuck h/w. */
- if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) {
- DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
- ring->name,
- ring->waiting_seqno,
- ring->get_seqno(ring));
+ if (waitqueue_active(&ring->irq_queue)) {
+ DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+ ring->name);
wake_up_all(&ring->irq_queue);
*err = true;
}
@@ -1716,6 +1515,35 @@ static bool kick_ring(struct intel_ring_buffer *ring)
return false;
}
+static bool i915_hangcheck_hung(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->hangcheck_count++ > 1) {
+ bool hung = true;
+
+ DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+ i915_handle_error(dev, true);
+
+ if (!IS_GEN2(dev)) {
+ struct intel_ring_buffer *ring;
+ int i;
+
+ /* Is the chip hanging on a WAIT_FOR_EVENT?
+ * If so we can simply poke the RB_WAIT bit
+ * and break the hang. This should work on
+ * all but the second generation chipsets.
+ */
+ for_each_ring(ring, dev_priv, i)
+ hung &= !kick_ring(ring);
+ }
+
+ return hung;
+ }
+
+ return false;
+}
+
/**
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. The first time this is called we simply record
@@ -1726,19 +1554,31 @@ void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
- bool err = false;
+ uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
+ struct intel_ring_buffer *ring;
+ bool err = false, idle;
+ int i;
if (!i915_enable_hangcheck)
return;
+ memset(acthd, 0, sizeof(acthd));
+ idle = true;
+ for_each_ring(ring, dev_priv, i) {
+ idle &= i915_hangcheck_ring_idle(ring, &err);
+ acthd[i] = intel_ring_get_active_head(ring);
+ }
+
/* If all work is done then ACTHD clearly hasn't advanced. */
- if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
- i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
- i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
- dev_priv->hangcheck_count = 0;
- if (err)
+ if (idle) {
+ if (err) {
+ if (i915_hangcheck_hung(dev))
+ return;
+
goto repeat;
+ }
+
+ dev_priv->hangcheck_count = 0;
return;
}
@@ -1749,47 +1589,16 @@ void i915_hangcheck_elapsed(unsigned long data)
instdone = I915_READ(INSTDONE_I965);
instdone1 = I915_READ(INSTDONE1);
}
- acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
- acthd_bsd = HAS_BSD(dev) ?
- intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
- acthd_blt = HAS_BLT(dev) ?
- intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
- if (dev_priv->last_acthd == acthd &&
- dev_priv->last_acthd_bsd == acthd_bsd &&
- dev_priv->last_acthd_blt == acthd_blt &&
+ if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
dev_priv->last_instdone == instdone &&
dev_priv->last_instdone1 == instdone1) {
- if (dev_priv->hangcheck_count++ > 1) {
- DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
- i915_handle_error(dev, true);
-
- if (!IS_GEN2(dev)) {
- /* Is the chip hanging on a WAIT_FOR_EVENT?
- * If so we can simply poke the RB_WAIT bit
- * and break the hang. This should work on
- * all but the second generation chipsets.
- */
- if (kick_ring(&dev_priv->ring[RCS]))
- goto repeat;
-
- if (HAS_BSD(dev) &&
- kick_ring(&dev_priv->ring[VCS]))
- goto repeat;
-
- if (HAS_BLT(dev) &&
- kick_ring(&dev_priv->ring[BCS]))
- goto repeat;
- }
-
+ if (i915_hangcheck_hung(dev))
return;
- }
} else {
dev_priv->hangcheck_count = 0;
- dev_priv->last_acthd = acthd;
- dev_priv->last_acthd_bsd = acthd_bsd;
- dev_priv->last_acthd_blt = acthd_blt;
+ memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
dev_priv->last_instdone = instdone;
dev_priv->last_instdone1 = instdone1;
}
@@ -1808,10 +1617,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
atomic_set(&dev_priv->irq_received, 0);
- INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
- INIT_WORK(&dev_priv->error_work, i915_error_work_func);
- if (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
- INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
I915_WRITE(HWSTAM, 0xeffe);
@@ -1832,6 +1637,38 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
+static void valleyview_irq_preinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ /* VLV magic */
+ I915_WRITE(VLV_IMR, 0);
+ I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
+ I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
+ I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
+
+ /* and GT */
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ POSTING_READ(GTIER);
+
+ I915_WRITE(DPINVGTT, 0xff);
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ POSTING_READ(VLV_IER);
+}
+
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
@@ -1861,13 +1698,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
u32 render_irqs;
u32 hotplug_mask;
- DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
- if (HAS_BSD(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
- if (HAS_BLT(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
-
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
@@ -1884,8 +1714,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
if (IS_GEN6(dev))
render_irqs =
GT_USER_INTERRUPT |
- GT_GEN6_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT;
+ GEN6_BSD_USER_INTERRUPT |
+ GEN6_BLITTER_USER_INTERRUPT;
else
render_irqs =
GT_USER_INTERRUPT |
@@ -1930,26 +1760,24 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
- u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
- DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB |
- DE_PLANEB_FLIP_DONE_IVB;
+ u32 display_mask =
+ DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
+ DE_PLANEC_FLIP_DONE_IVB |
+ DE_PLANEB_FLIP_DONE_IVB |
+ DE_PLANEA_FLIP_DONE_IVB;
u32 render_irqs;
u32 hotplug_mask;
- DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
- if (HAS_BSD(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
- if (HAS_BLT(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
-
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
I915_WRITE(DEIMR, dev_priv->irq_mask);
- I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB |
- DE_PIPEB_VBLANK_IVB);
+ I915_WRITE(DEIER,
+ display_mask |
+ DE_PIPEC_VBLANK_IVB |
+ DE_PIPEB_VBLANK_IVB |
+ DE_PIPEA_VBLANK_IVB);
POSTING_READ(DEIER);
dev_priv->gt_irq_mask = ~0;
@@ -1957,8 +1785,8 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT;
+ render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
+ GEN6_BLITTER_USER_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
@@ -1978,15 +1806,496 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void i915_driver_irq_preinstall(struct drm_device * dev)
+static int valleyview_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 render_irqs;
+ u32 enable_mask;
+ u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+ u16 msid;
+
+ enable_mask = I915_DISPLAY_PORT_INTERRUPT;
+ enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+ dev_priv->irq_mask = ~enable_mask;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ /* Hack for broken MSIs on VLV */
+ pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
+ pci_read_config_word(dev->pdev, 0x98, &msid);
+ msid &= 0xff; /* mask out delivery bits */
+ msid |= (1<<14);
+ pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
+
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IER, enable_mask);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(PIPESTAT(0), 0xffff);
+ I915_WRITE(PIPESTAT(1), 0xffff);
+ POSTING_READ(VLV_IER);
+
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+
+ render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
+ GT_GEN6_BLT_CS_ERROR_INTERRUPT |
+ GT_GEN6_BLT_USER_INTERRUPT |
+ GT_GEN6_BSD_USER_INTERRUPT |
+ GT_GEN6_BSD_CS_ERROR_INTERRUPT |
+ GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
+ GT_PIPE_NOTIFY |
+ GT_RENDER_CS_ERROR_INTERRUPT |
+ GT_SYNC_STATUS |
+ GT_USER_INTERRUPT;
+
+ dev_priv->gt_irq_mask = ~render_irqs;
+
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, 0);
+ I915_WRITE(GTIER, render_irqs);
+ POSTING_READ(GTIER);
+
+ /* ack & enable invalid PTE error interrupts */
+#if 0 /* FIXME: add support to irq handler for checking these bits */
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+ I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
+#endif
+
+ I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+#if 0 /* FIXME: check register definitions; some have moved */
+ /* Note HDMI and DP share bits */
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ }
+#endif
+
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+
+ return 0;
+}
+
+static void valleyview_irq_uninstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (!dev_priv)
+ return;
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ POSTING_READ(VLV_IER);
+}
+
+static void ironlake_irq_uninstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (!dev_priv)
+ return;
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+
+ I915_WRITE(DEIMR, 0xffffffff);
+ I915_WRITE(DEIER, 0x0);
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+
+ I915_WRITE(SDEIMR, 0xffffffff);
+ I915_WRITE(SDEIER, 0x0);
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+}
+
+static void i8xx_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
atomic_set(&dev_priv->irq_received, 0);
- INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
- INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE16(IMR, 0xffff);
+ I915_WRITE16(IER, 0x0);
+ POSTING_READ16(IER);
+}
+
+static int i8xx_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ I915_WRITE16(EMR,
+ ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask =
+ ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+
+ I915_WRITE16(IER,
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT);
+ POSTING_READ16(IER);
+
+ return 0;
+}
+
+static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u16 iir, new_iir;
+ u32 pipe_stats[2];
+ unsigned long irqflags;
+ int irq_received;
+ int pipe;
+ u16 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+
+ atomic_inc(&dev_priv->irq_received);
+
+ iir = I915_READ16(IIR);
+ if (iir == 0)
+ return IRQ_NONE;
+
+ while (iir & ~flip_mask) {
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = 1;
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ I915_WRITE16(IIR, iir & ~flip_mask);
+ new_iir = I915_READ16(IIR); /* Flush posted writes */
+
+ i915_update_dri1_breadcrumb(dev);
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+
+ if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, 0)) {
+ if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip(dev, 0);
+ flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
+ }
+ }
+
+ if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, 1)) {
+ if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
+ intel_prepare_page_flip(dev, 1);
+ intel_finish_page_flip(dev, 1);
+ flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ }
+ }
+
+ iir = new_iir;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void i8xx_irq_uninstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ for_each_pipe(pipe) {
+ /* Clear enable bits; then clear status bits */
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+ }
+ I915_WRITE16(IMR, 0xffff);
+ I915_WRITE16(IER, 0x0);
+ I915_WRITE16(IIR, I915_READ16(IIR));
+}
+
+static void i915_irq_preinstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+
+ I915_WRITE16(HWSTAM, 0xeffe);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+ POSTING_READ(IER);
+}
+
+static int i915_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 enable_mask;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask =
+ ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+ enable_mask =
+ I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ /* Enable in IER... */
+ enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
+ /* and unmask in IMR */
+ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
+ }
+
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ I915_WRITE(IER, enable_mask);
+ POSTING_READ(IER);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ }
+
+ /* Ignore TV since it's buggy */
+
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ }
+
+ intel_opregion_enable_asle(dev);
+
+ return 0;
+}
+
+static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
+ unsigned long irqflags;
+ u32 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ u32 flip[2] = {
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
+ };
+ int pipe, ret = IRQ_NONE;
+
+ atomic_inc(&dev_priv->irq_received);
+
+ iir = I915_READ(IIR);
+ do {
+ bool irq_received = (iir & ~flip_mask) != 0;
+ bool blc_event = false;
+
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /* Clear the PIPE*STAT regs before the IIR */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = true;
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ if (!irq_received)
+ break;
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if ((I915_HAS_HOTPLUG(dev)) &&
+ (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+ if (hotplug_status & dev_priv->hotplug_supported_mask)
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ POSTING_READ(PORT_HOTPLUG_STAT);
+ }
+
+ I915_WRITE(IIR, iir & ~flip_mask);
+ new_iir = I915_READ(IIR); /* Flush posted writes */
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+
+ for_each_pipe(pipe) {
+ int plane = pipe;
+ if (IS_MOBILE(dev))
+ plane = !plane;
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, pipe)) {
+ if (iir & flip[plane]) {
+ intel_prepare_page_flip(dev, plane);
+ intel_finish_page_flip(dev, pipe);
+ flip_mask &= ~flip[plane];
+ }
+ }
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+ }
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
+ intel_opregion_asle_intr(dev);
+
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+ * set while we were handling the existing iir bits, then
+ * we would never get another interrupt.
+ *
+ * This is fine on non-MSI as well, as if we hit this path
+ * we avoid exiting the interrupt handler only to generate
+ * another one.
+ *
+ * Note that for MSI this could cause a stray interrupt report
+ * if an interrupt landed in the time between writing IIR and
+ * the posting read. This should be rare enough to never
+ * trigger the 99% of 100,000 interrupts test for disabling
+ * stray interrupts.
+ */
+ ret = IRQ_HANDLED;
+ iir = new_iir;
+ } while (iir & ~flip_mask);
+
+ i915_update_dri1_breadcrumb(dev);
+
+ return ret;
+}
+
+static void i915_irq_uninstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+
+ I915_WRITE16(HWSTAM, 0xffff);
+ for_each_pipe(pipe) {
+ /* Clear enable bits; then clear status bits */
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+ }
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+
+ I915_WRITE(IIR, I915_READ(IIR));
+}
+
+static void i965_irq_preinstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
@@ -2001,20 +2310,25 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
POSTING_READ(IER);
}
-/*
- * Must be called after intel_modeset_init or hotplug interrupts won't be
- * enabled correctly.
- */
-static int i915_driver_irq_postinstall(struct drm_device *dev)
+static int i965_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
+ u32 enable_mask;
u32 error_mask;
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
/* Unmask the interrupts that we always want on. */
- dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
+ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+ enable_mask = ~dev_priv->irq_mask;
+ enable_mask |= I915_USER_INTERRUPT;
+
+ if (IS_G4X(dev))
+ enable_mask |= I915_BSD_USER_INTERRUPT;
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
@@ -2081,31 +2395,124 @@ static int i915_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void ironlake_irq_uninstall(struct drm_device *dev)
+static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
{
+ struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 iir, new_iir;
+ u32 pipe_stats[I915_MAX_PIPES];
+ unsigned long irqflags;
+ int irq_received;
+ int ret = IRQ_NONE, pipe;
- if (!dev_priv)
- return;
+ atomic_inc(&dev_priv->irq_received);
- dev_priv->vblank_pipe = 0;
+ iir = I915_READ(IIR);
- I915_WRITE(HWSTAM, 0xffffffff);
+ for (;;) {
+ bool blc_event = false;
- I915_WRITE(DEIMR, 0xffffffff);
- I915_WRITE(DEIER, 0x0);
- I915_WRITE(DEIIR, I915_READ(DEIIR));
+ irq_received = iir != 0;
- I915_WRITE(GTIMR, 0xffffffff);
- I915_WRITE(GTIER, 0x0);
- I915_WRITE(GTIIR, I915_READ(GTIIR));
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
- I915_WRITE(SDEIMR, 0xffffffff);
- I915_WRITE(SDEIER, 0x0);
- I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = 1;
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ if (!irq_received)
+ break;
+
+ ret = IRQ_HANDLED;
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if ((I915_HAS_HOTPLUG(dev)) &&
+ (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+ if (hotplug_status & dev_priv->hotplug_supported_mask)
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ I915_READ(PORT_HOTPLUG_STAT);
+ }
+
+ I915_WRITE(IIR, iir);
+ new_iir = I915_READ(IIR); /* Flush posted writes */
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (iir & I915_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+
+ if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
+ intel_prepare_page_flip(dev, 0);
+
+ if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
+ intel_prepare_page_flip(dev, 1);
+
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, pipe)) {
+ i915_pageflip_stall_check(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+ }
+
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
+ intel_opregion_asle_intr(dev);
+
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+ * set while we were handling the existing iir bits, then
+ * we would never get another interrupt.
+ *
+ * This is fine on non-MSI as well, as if we hit this path
+ * we avoid exiting the interrupt handler only to generate
+ * another one.
+ *
+ * Note that for MSI this could cause a stray interrupt report
+ * if an interrupt landed in the time between writing IIR and
+ * the posting read. This should be rare enough to never
+ * trigger the 99% of 100,000 interrupts test for disabling
+ * stray interrupts.
+ */
+ iir = new_iir;
+ }
+
+ i915_update_dri1_breadcrumb(dev);
+
+ return ret;
}
-static void i915_driver_irq_uninstall(struct drm_device * dev)
+static void i965_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
@@ -2113,8 +2520,6 @@ static void i915_driver_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
- dev_priv->vblank_pipe = 0;
-
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -2134,9 +2539,15 @@ static void i915_driver_irq_uninstall(struct drm_device * dev)
void intel_irq_init(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+ INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
+
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+ if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
@@ -2147,7 +2558,14 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->get_vblank_timestamp = NULL;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_VALLEYVIEW(dev)) {
+ dev->driver->irq_handler = valleyview_irq_handler;
+ dev->driver->irq_preinstall = valleyview_irq_preinstall;
+ dev->driver->irq_postinstall = valleyview_irq_postinstall;
+ dev->driver->irq_uninstall = valleyview_irq_uninstall;
+ dev->driver->enable_vblank = valleyview_enable_vblank;
+ dev->driver->disable_vblank = valleyview_disable_vblank;
+ } else if (IS_IVYBRIDGE(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2155,6 +2573,14 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ivybridge_enable_vblank;
dev->driver->disable_vblank = ivybridge_disable_vblank;
+ } else if (IS_HASWELL(dev)) {
+ /* Share interrupts handling with IVB */
+ dev->driver->irq_handler = ivybridge_irq_handler;
+ dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ivybridge_enable_vblank;
+ dev->driver->disable_vblank = ivybridge_disable_vblank;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2163,10 +2589,25 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->enable_vblank = ironlake_enable_vblank;
dev->driver->disable_vblank = ironlake_disable_vblank;
} else {
- dev->driver->irq_preinstall = i915_driver_irq_preinstall;
- dev->driver->irq_postinstall = i915_driver_irq_postinstall;
- dev->driver->irq_uninstall = i915_driver_irq_uninstall;
- dev->driver->irq_handler = i915_driver_irq_handler;
+ if (INTEL_INFO(dev)->gen == 2) {
+ dev->driver->irq_preinstall = i8xx_irq_preinstall;
+ dev->driver->irq_postinstall = i8xx_irq_postinstall;
+ dev->driver->irq_handler = i8xx_irq_handler;
+ dev->driver->irq_uninstall = i8xx_irq_uninstall;
+ } else if (INTEL_INFO(dev)->gen == 3) {
+ /* IIR "flip pending" means done if this bit is set */
+ I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+
+ dev->driver->irq_preinstall = i915_irq_preinstall;
+ dev->driver->irq_postinstall = i915_irq_postinstall;
+ dev->driver->irq_uninstall = i915_irq_uninstall;
+ dev->driver->irq_handler = i915_irq_handler;
+ } else {
+ dev->driver->irq_preinstall = i965_irq_preinstall;
+ dev->driver->irq_postinstall = i965_irq_postinstall;
+ dev->driver->irq_uninstall = i965_irq_uninstall;
+ dev->driver->irq_handler = i965_irq_handler;
+ }
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9d24d65f0c3e..2d49b9507ed0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -27,6 +27,11 @@
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+
+#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
+#define _MASKED_BIT_DISABLE(a) ((a) << 16)
+
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
@@ -77,6 +82,7 @@
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
+#define GRDOM_RESET_ENABLE (1<<0)
#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
#define GEN6_MBC_SNPCR_SHIFT 21
@@ -125,6 +131,13 @@
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
+#define GAC_ECO_BITS 0x14090
+#define ECOBITS_PPGTT_CACHE64B (3<<8)
+#define ECOBITS_PPGTT_CACHE4B (0<<8)
+
+#define GAB_CTL 0x24000
+#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
+
/* VGA stuff */
#define VGA_ST01_MDA 0x3ba
@@ -222,6 +235,7 @@
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
@@ -301,6 +315,61 @@
#define DEBUG_RESET_RENDER (1<<8)
#define DEBUG_RESET_DISPLAY (1<<9)
+/*
+ * DPIO - a special bus for various display related registers to hide behind:
+ * 0x800c: m1, m2, n, p1, p2, k dividers
+ * 0x8014: REF and SFR select
+ * 0x8014: N divider, VCO select
+ * 0x801c/3c: core clock bits
+ * 0x8048/68: low pass filter coefficients
+ * 0x8100: fast clock controls
+ */
+#define DPIO_PKT 0x2100
+#define DPIO_RID (0<<24)
+#define DPIO_OP_WRITE (1<<16)
+#define DPIO_OP_READ (0<<16)
+#define DPIO_PORTID (0x12<<8)
+#define DPIO_BYTE (0xf<<4)
+#define DPIO_BUSY (1<<0) /* status only */
+#define DPIO_DATA 0x2104
+#define DPIO_REG 0x2108
+#define DPIO_CTL 0x2110
+#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
+#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
+#define DPIO_SFR_BYPASS (1<<1)
+#define DPIO_RESET (1<<0)
+
+#define _DPIO_DIV_A 0x800c
+#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
+#define DPIO_K_SHIFT (24) /* 4 bits */
+#define DPIO_P1_SHIFT (21) /* 3 bits */
+#define DPIO_P2_SHIFT (16) /* 5 bits */
+#define DPIO_N_SHIFT (12) /* 4 bits */
+#define DPIO_ENABLE_CALIBRATION (1<<11)
+#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
+#define DPIO_M2DIV_MASK 0xff
+#define _DPIO_DIV_B 0x802c
+#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
+
+#define _DPIO_REFSFR_A 0x8014
+#define DPIO_REFSEL_OVERRIDE 27
+#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
+#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
+#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
+#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
+#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
+#define _DPIO_REFSFR_B 0x8034
+#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
+
+#define _DPIO_CORE_CLK_A 0x801c
+#define _DPIO_CORE_CLK_B 0x803c
+#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
+
+#define _DPIO_LFP_COEFF_A 0x8048
+#define _DPIO_LFP_COEFF_B 0x8068
+#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B)
+
+#define DPIO_FASTCLK_DISABLE 0x8100
/*
* Fence registers
@@ -360,8 +429,6 @@
#define ARB_MODE 0x04030
#define ARB_MODE_SWIZZLE_SNB (1<<4)
#define ARB_MODE_SWIZZLE_IVB (1<<5)
-#define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x)
-#define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x)
#define RENDER_HWS_PGA_GEN7 (0x04080)
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
#define DONE_REG 0x40b0
@@ -417,6 +484,7 @@
#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
+#define DMA_FADD_I8XX 0x020d0
#define ERROR_GEN6 0x040a0
@@ -432,6 +500,7 @@
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090
+#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
@@ -447,14 +516,16 @@
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
-#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
-#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
-
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
+#define VLV_IIR_RW 0x182084
+#define VLV_IER 0x1820a0
+#define VLV_IIR 0x1820a4
+#define VLV_IMR 0x1820a8
+#define VLV_ISR 0x1820ac
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
@@ -500,7 +571,6 @@
#define LM_BURST_LENGTH 0x00000700
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE 0x020e4 /* 915+ only */
-#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
/* Make render/texture TLB fetches lower priorty than associated data
* fetches. This is not turned on by default
@@ -565,7 +635,6 @@
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
#define CACHE_MODE_0 0x02120 /* 915+ only */
-#define CM0_MASK_SHIFT 16
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@@ -579,7 +648,12 @@
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
-/* GEN6 interrupt control */
+#define CACHE_MODE_1 0x7004 /* IVB+ */
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
+
+/* GEN6 interrupt control
+ * Note that the per-ring interrupt bits do alias with the global interrupt bits
+ * in GTIMR. */
#define GEN6_RENDER_HWSTAM 0x2098
#define GEN6_RENDER_IMR 0x20a8
#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
@@ -615,6 +689,21 @@
#define GEN6_BSD_RNCID 0x12198
+#define GEN7_FF_THREAD_MODE 0x20a0
+#define GEN7_FF_SCHED_MASK 0x0077070
+#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
+#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
+#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
+#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
+#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
+#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
+#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
+#define GEN7_FF_VS_SCHED_HW (0x0<<12)
+#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
+#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
+#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
+#define GEN7_FF_DS_SCHED_HW (0x0<<4)
+
/*
* Framebuffer compression (915+ only)
*/
@@ -743,9 +832,9 @@
#define GMBUS_PORT_PANEL 3
#define GMBUS_PORT_DPC 4 /* HDMIC */
#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
- /* 6 reserved */
-#define GMBUS_PORT_DPD 7 /* HDMID */
-#define GMBUS_NUM_PORTS 8
+#define GMBUS_PORT_DPD 6 /* HDMID */
+#define GMBUS_PORT_RESERVED 7 /* 7 reserved */
+#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1)
#define GMBUS1 0x5104 /* command/status */
#define GMBUS_SW_CLR_INT (1<<31)
#define GMBUS_SW_RDY (1<<30)
@@ -797,7 +886,9 @@
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
+#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
+#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
#define DPLL_VGA_MODE_DIS (1 << 28)
#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
@@ -809,6 +900,7 @@
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
+#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
@@ -904,6 +996,7 @@
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define _DPLL_B_MD 0x06020 /* 965+ only */
#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
+
#define _FPA0 0x06040
#define _FPA1 0x06044
#define _FPB0 0x06048
@@ -1044,6 +1137,9 @@
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
+#define FW_BLC_SELF_VLV 0x6500
+#define FW_CSPWRDWNEN (1<<15)
+
/*
* Palette regs
*/
@@ -1601,9 +1697,12 @@
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
#define VIDEO_DIP_CTL 0x61170
+/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
#define VIDEO_DIP_PORT_B (1 << 29)
#define VIDEO_DIP_PORT_C (2 << 29)
+#define VIDEO_DIP_PORT_D (3 << 29)
+#define VIDEO_DIP_PORT_MASK (3 << 29)
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
#define VIDEO_DIP_ENABLE_SPD (8 << 21)
@@ -1614,6 +1713,10 @@
#define VIDEO_DIP_FREQ_ONCE (0 << 16)
#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
+#define VIDEO_DIP_FREQ_MASK (3 << 16)
+/* HSW and later: */
+#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
+#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
/* Panel power sequencing */
#define PP_STATUS 0x61200
@@ -2380,7 +2483,8 @@
/* Pipe A */
#define _PIPEADSL 0x70000
-#define DSL_LINEMASK 0x00000fff
+#define DSL_LINEMASK_GEN2 0x00000fff
+#define DSL_LINEMASK_GEN3 0x00001fff
#define _PIPEACONF 0x70008
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
@@ -2422,23 +2526,30 @@
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
+#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
+#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
+#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
+#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
+#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
+#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10)
#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
@@ -2463,6 +2574,40 @@
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
+#define VLV_DPFLIPSTAT 0x70028
+#define PIPEB_LINE_COMPARE_STATUS (1<<29)
+#define PIPEB_HLINE_INT_EN (1<<28)
+#define PIPEB_VBLANK_INT_EN (1<<27)
+#define SPRITED_FLIPDONE_INT_EN (1<<26)
+#define SPRITEC_FLIPDONE_INT_EN (1<<25)
+#define PLANEB_FLIPDONE_INT_EN (1<<24)
+#define PIPEA_LINE_COMPARE_STATUS (1<<21)
+#define PIPEA_HLINE_INT_EN (1<<20)
+#define PIPEA_VBLANK_INT_EN (1<<19)
+#define SPRITEB_FLIPDONE_INT_EN (1<<18)
+#define SPRITEA_FLIPDONE_INT_EN (1<<17)
+#define PLANEA_FLIPDONE_INT_EN (1<<16)
+
+#define DPINVGTT 0x7002c /* VLV only */
+#define CURSORB_INVALID_GTT_INT_EN (1<<23)
+#define CURSORA_INVALID_GTT_INT_EN (1<<22)
+#define SPRITED_INVALID_GTT_INT_EN (1<<21)
+#define SPRITEC_INVALID_GTT_INT_EN (1<<20)
+#define PLANEB_INVALID_GTT_INT_EN (1<<19)
+#define SPRITEB_INVALID_GTT_INT_EN (1<<18)
+#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
+#define PLANEA_INVALID_GTT_INT_EN (1<<16)
+#define DPINVGTT_EN_MASK 0xff0000
+#define CURSORB_INVALID_GTT_STATUS (1<<7)
+#define CURSORA_INVALID_GTT_STATUS (1<<6)
+#define SPRITED_INVALID_GTT_STATUS (1<<5)
+#define SPRITEC_INVALID_GTT_STATUS (1<<4)
+#define PLANEB_INVALID_GTT_STATUS (1<<3)
+#define SPRITEB_INVALID_GTT_STATUS (1<<2)
+#define SPRITEA_INVALID_GTT_STATUS (1<<1)
+#define PLANEA_INVALID_GTT_STATUS (1<<0)
+#define DPINVGTT_STATUS_MASK 0xff
+
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
@@ -2492,11 +2637,28 @@
#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
#define DSPFW_HPLL_SR_MASK (0x1ff)
+/* drain latency register values*/
+#define DRAIN_LATENCY_PRECISION_32 32
+#define DRAIN_LATENCY_PRECISION_16 16
+#define VLV_DDL1 0x70050
+#define DDL_CURSORA_PRECISION_32 (1<<31)
+#define DDL_CURSORA_PRECISION_16 (0<<31)
+#define DDL_CURSORA_SHIFT 24
+#define DDL_PLANEA_PRECISION_32 (1<<7)
+#define DDL_PLANEA_PRECISION_16 (0<<7)
+#define VLV_DDL2 0x70054
+#define DDL_CURSORB_PRECISION_32 (1<<31)
+#define DDL_CURSORB_PRECISION_16 (0<<31)
+#define DDL_CURSORB_SHIFT 24
+#define DDL_PLANEB_PRECISION_32 (1<<7)
+#define DDL_PLANEB_PRECISION_16 (0<<7)
+
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
#define I830_FIFO_LINE_SIZE 32
+#define VALLEYVIEW_FIFO_SIZE 255
#define G4X_FIFO_SIZE 127
#define I965_FIFO_SIZE 512
#define I945_FIFO_SIZE 127
@@ -2504,6 +2666,7 @@
#define I855GM_FIFO_SIZE 127 /* In cachelines */
#define I830_FIFO_SIZE 95
+#define VALLEYVIEW_MAX_WM 0xff
#define G4X_MAX_WM 0x3f
#define I915_MAX_WM 0x3f
@@ -2518,6 +2681,7 @@
#define PINEVIEW_CURSOR_DFT_WM 0
#define PINEVIEW_CURSOR_GUARD_WM 5
+#define VALLEYVIEW_CURSOR_MAX_WM 64
#define I965_CURSOR_FIFO 64
#define I965_CURSOR_MAX_WM 32
#define I965_CURSOR_DFT_WM 8
@@ -2726,6 +2890,13 @@
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
+/* Display/Sprite base address macros */
+#define DISP_BASEADDR_MASK (0xfffff000)
+#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
+#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
+ (I915_WRITE(reg, gfx_addr | I915_LO_DISPBASE(I915_READ(reg))))
+
/* VBIOS flags */
#define SWF00 0x71410
#define SWF01 0x71414
@@ -3058,25 +3229,38 @@
#define DE_PCH_EVENT_IVB (1<<28)
#define DE_DP_A_HOTPLUG_IVB (1<<27)
#define DE_AUX_CHANNEL_A_IVB (1<<26)
+#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
+#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
+#define DE_PIPEC_VBLANK_IVB (1<<10)
#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
-#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
-#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEB_VBLANK_IVB (1<<5)
+#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
+#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEA_VBLANK_IVB (1<<0)
+#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
+#define MASTER_INTERRUPT_ENABLE (1<<31)
+
#define DEISR 0x44000
#define DEIMR 0x44004
#define DEIIR 0x44008
#define DEIER 0x4400c
-/* GT interrupt */
-#define GT_PIPE_NOTIFY (1 << 4)
-#define GT_SYNC_STATUS (1 << 2)
-#define GT_USER_INTERRUPT (1 << 0)
-#define GT_BSD_USER_INTERRUPT (1 << 5)
-#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
-#define GT_BLT_USER_INTERRUPT (1 << 22)
+/* GT interrupt.
+ * Note that for gen6+ the ring-specific interrupt bits do alias with the
+ * corresponding bits in the per-ring interrupt control registers. */
+#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
+#define GT_GEN6_BLT_CS_ERROR_INTERRUPT (1 << 25)
+#define GT_GEN6_BLT_USER_INTERRUPT (1 << 22)
+#define GT_GEN6_BSD_CS_ERROR_INTERRUPT (1 << 15)
+#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
+#define GT_BSD_USER_INTERRUPT (1 << 5) /* ilk only */
+#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT (1 << 5)
+#define GT_PIPE_NOTIFY (1 << 4)
+#define GT_RENDER_CS_ERROR_INTERRUPT (1 << 3)
+#define GT_SYNC_STATUS (1 << 2)
+#define GT_USER_INTERRUPT (1 << 0)
#define GTISR 0x44010
#define GTIMR 0x44014
@@ -3226,15 +3410,15 @@
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
-#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1)
+#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@@ -3329,6 +3513,57 @@
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+#define VLV_VIDEO_DIP_CTL_A 0x60220
+#define VLV_VIDEO_DIP_DATA_A 0x60208
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
+
+#define VLV_VIDEO_DIP_CTL_B 0x61170
+#define VLV_VIDEO_DIP_DATA_B 0x61174
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
+
+#define VLV_TVIDEO_DIP_CTL(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
+#define VLV_TVIDEO_DIP_DATA(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
+#define VLV_TVIDEO_DIP_GCP(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
+
+/* Haswell DIP controls */
+#define HSW_VIDEO_DIP_CTL_A 0x60200
+#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220
+#define HSW_VIDEO_DIP_VS_DATA_A 0x60260
+#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
+#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
+#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320
+#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240
+#define HSW_VIDEO_DIP_VS_ECC_A 0x60280
+#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
+#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300
+#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344
+#define HSW_VIDEO_DIP_GCP_A 0x60210
+
+#define HSW_VIDEO_DIP_CTL_B 0x61200
+#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220
+#define HSW_VIDEO_DIP_VS_DATA_B 0x61260
+#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
+#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
+#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240
+#define HSW_VIDEO_DIP_VS_ECC_B 0x61280
+#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
+#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300
+#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
+#define HSW_VIDEO_DIP_GCP_B 0x61210
+
+#define HSW_TVIDEO_DIP_CTL(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
+#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
+#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
+#define HSW_TVIDEO_DIP_GCP(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
+
#define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004
#define _TRANS_HSYNC_B 0xe1008
@@ -3489,6 +3724,9 @@
#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
+/* LPT */
+#define FDI_PORT_WIDTH_2X_LPT (1<<19)
+#define FDI_PORT_WIDTH_1X_LPT (0<<19)
#define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010
@@ -3549,6 +3787,7 @@
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
/* or SDVOB */
+#define VLV_HDMIB 0x61140
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER(pipe) ((pipe) << 30)
@@ -3714,6 +3953,8 @@
#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
#define FORCEWAKE 0xA18C
+#define FORCEWAKE_VLV 0x1300b0
+#define FORCEWAKE_ACK_VLV 0x1300b4
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_MT_ACK 0x130040
@@ -3731,6 +3972,7 @@
#define GEN6_UCGCTL1 0x9400
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
+# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
#define GEN6_UCGCTL2 0x9404
# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
@@ -3811,6 +4053,11 @@
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
+#define GEN6_GT_GFX_RC6_LOCKED 0x138104
+#define GEN6_GT_GFX_RC6 0x138108
+#define GEN6_GT_GFX_RC6p 0x13810C
+#define GEN6_GT_GFX_RC6pp 0x138110
+
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
#define GEN6_READ_OC_PARAMS 0xc
@@ -3870,4 +4117,197 @@
#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+/* HSW Power Wells */
+#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
+#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
+#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
+#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
+#define HSW_PWR_WELL_ENABLE (1<<31)
+#define HSW_PWR_WELL_STATE (1<<30)
+#define HSW_PWR_WELL_CTL5 0x45410
+#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
+#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
+#define HSW_PWR_WELL_FORCE_ON (1<<19)
+#define HSW_PWR_WELL_CTL6 0x45414
+
+/* Per-pipe DDI Function Control */
+#define PIPE_DDI_FUNC_CTL_A 0x60400
+#define PIPE_DDI_FUNC_CTL_B 0x61400
+#define PIPE_DDI_FUNC_CTL_C 0x62400
+#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
+#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \
+ PIPE_DDI_FUNC_CTL_A, \
+ PIPE_DDI_FUNC_CTL_B)
+#define PIPE_DDI_FUNC_ENABLE (1<<31)
+/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
+#define PIPE_DDI_PORT_MASK (0xf<<28)
+#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
+#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
+#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
+#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
+#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
+#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
+#define PIPE_DDI_BPC_8 (0<<20)
+#define PIPE_DDI_BPC_10 (1<<20)
+#define PIPE_DDI_BPC_6 (2<<20)
+#define PIPE_DDI_BPC_12 (3<<20)
+#define PIPE_DDI_BFI_ENABLE (1<<4)
+#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
+#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
+#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
+
+/* DisplayPort Transport Control */
+#define DP_TP_CTL_A 0x64040
+#define DP_TP_CTL_B 0x64140
+#define DP_TP_CTL(port) _PORT(port, \
+ DP_TP_CTL_A, \
+ DP_TP_CTL_B)
+#define DP_TP_CTL_ENABLE (1<<31)
+#define DP_TP_CTL_MODE_SST (0<<27)
+#define DP_TP_CTL_MODE_MST (1<<27)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
+#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
+#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
+
+/* DisplayPort Transport Status */
+#define DP_TP_STATUS_A 0x64044
+#define DP_TP_STATUS_B 0x64144
+#define DP_TP_STATUS(port) _PORT(port, \
+ DP_TP_STATUS_A, \
+ DP_TP_STATUS_B)
+#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+
+/* DDI Buffer Control */
+#define DDI_BUF_CTL_A 0x64000
+#define DDI_BUF_CTL_B 0x64100
+#define DDI_BUF_CTL(port) _PORT(port, \
+ DDI_BUF_CTL_A, \
+ DDI_BUF_CTL_B)
+#define DDI_BUF_CTL_ENABLE (1<<31)
+#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
+#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
+#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
+#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
+#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
+#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
+#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
+#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
+#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
+#define DDI_BUF_EMP_MASK (0xf<<24)
+#define DDI_BUF_IS_IDLE (1<<7)
+#define DDI_PORT_WIDTH_X1 (0<<1)
+#define DDI_PORT_WIDTH_X2 (1<<1)
+#define DDI_PORT_WIDTH_X4 (3<<1)
+#define DDI_INIT_DISPLAY_DETECTED (1<<0)
+
+/* DDI Buffer Translations */
+#define DDI_BUF_TRANS_A 0x64E00
+#define DDI_BUF_TRANS_B 0x64E60
+#define DDI_BUF_TRANS(port) _PORT(port, \
+ DDI_BUF_TRANS_A, \
+ DDI_BUF_TRANS_B)
+
+/* Sideband Interface (SBI) is programmed indirectly, via
+ * SBI_ADDR, which contains the register offset; and SBI_DATA,
+ * which contains the payload */
+#define SBI_ADDR 0xC6000
+#define SBI_DATA 0xC6004
+#define SBI_CTL_STAT 0xC6008
+#define SBI_CTL_OP_CRRD (0x6<<8)
+#define SBI_CTL_OP_CRWR (0x7<<8)
+#define SBI_RESPONSE_FAIL (0x1<<1)
+#define SBI_RESPONSE_SUCCESS (0x0<<1)
+#define SBI_BUSY (0x1<<0)
+#define SBI_READY (0x0<<0)
+
+/* SBI offsets */
+#define SBI_SSCDIVINTPHASE6 0x0600
+#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
+#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
+#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
+#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
+#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
+#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
+#define SBI_SSCCTL 0x020c
+#define SBI_SSCCTL6 0x060C
+#define SBI_SSCCTL_DISABLE (1<<0)
+#define SBI_SSCAUXDIV6 0x0610
+#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
+#define SBI_DBUFF0 0x2a00
+
+/* LPT PIXCLK_GATE */
+#define PIXCLK_GATE 0xC6020
+#define PIXCLK_GATE_UNGATE 1<<0
+#define PIXCLK_GATE_GATE 0<<0
+
+/* SPLL */
+#define SPLL_CTL 0x46020
+#define SPLL_PLL_ENABLE (1<<31)
+#define SPLL_PLL_SCC (1<<28)
+#define SPLL_PLL_NON_SCC (2<<28)
+#define SPLL_PLL_FREQ_810MHz (0<<26)
+#define SPLL_PLL_FREQ_1350MHz (1<<26)
+
+/* WRPLL */
+#define WRPLL_CTL1 0x46040
+#define WRPLL_CTL2 0x46060
+#define WRPLL_PLL_ENABLE (1<<31)
+#define WRPLL_PLL_SELECT_SSC (0x01<<28)
+#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
+#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
+/* WRPLL divider programming */
+#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
+#define WRPLL_DIVIDER_POST(x) ((x)<<8)
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
+
+/* Port clock selection */
+#define PORT_CLK_SEL_A 0x46100
+#define PORT_CLK_SEL_B 0x46104
+#define PORT_CLK_SEL(port) _PORT(port, \
+ PORT_CLK_SEL_A, \
+ PORT_CLK_SEL_B)
+#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
+#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
+#define PORT_CLK_SEL_LCPLL_810 (2<<29)
+#define PORT_CLK_SEL_SPLL (3<<29)
+#define PORT_CLK_SEL_WRPLL1 (4<<29)
+#define PORT_CLK_SEL_WRPLL2 (5<<29)
+
+/* Pipe clock selection */
+#define PIPE_CLK_SEL_A 0x46140
+#define PIPE_CLK_SEL_B 0x46144
+#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \
+ PIPE_CLK_SEL_A, \
+ PIPE_CLK_SEL_B)
+/* For each pipe, we need to select the corresponding port clock */
+#define PIPE_CLK_SEL_DISABLED (0x0<<29)
+#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
+
+/* LCPLL Control */
+#define LCPLL_CTL 0x130040
+#define LCPLL_PLL_DISABLE (1<<31)
+#define LCPLL_PLL_LOCK (1<<30)
+#define LCPLL_CD_CLOCK_DISABLE (1<<25)
+#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
+
+/* Pipe WM_LINETIME - watermark line time */
+#define PIPE_WM_LINETIME_A 0x45270
+#define PIPE_WM_LINETIME_B 0x45274
+#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
+ PIPE_WM_LINETIME_A, \
+ PIPE_WM_LINETIME_A)
+#define PIPE_WM_LINETIME_MASK (0x1ff)
+#define PIPE_WM_LINETIME_TIME(x) ((x))
+#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
+#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
+
+/* SFUSE_STRAP */
+#define SFUSE_STRAP 0xc2014
+#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
+#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
+#define SFUSE_STRAP_DDID_DETECTED (1<<0)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 2b5eb229ff2c..0ede02a99d91 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -40,7 +40,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
return false;
if (HAS_PCH_SPLIT(dev))
- dpll_reg = PCH_DPLL(pipe);
+ dpll_reg = _PCH_DPLL(pipe);
else
dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
@@ -876,22 +876,6 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(IER, dev_priv->saveIER);
I915_WRITE(IMR, dev_priv->saveIMR);
}
- mutex_unlock(&dev->struct_mutex);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_init_clock_gating(dev);
-
- if (IS_IRONLAKE_M(dev)) {
- ironlake_enable_drps(dev);
- intel_init_emon(dev);
- }
-
- if (INTEL_INFO(dev)->gen >= 6) {
- gen6_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
- }
-
- mutex_lock(&dev->struct_mutex);
/* Cache mode state */
I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
new file mode 100644
index 000000000000..79f83445afa0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/sysfs.h>
+#include "i915_drv.h"
+
+static u32 calc_residency(struct drm_device *dev, const u32 reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u64 raw_time; /* 32b value may overflow during fixed point math */
+
+ if (!intel_enable_rc6(dev))
+ return 0;
+
+ raw_time = I915_READ(reg) * 128ULL;
+ return DIV_ROUND_UP_ULL(raw_time, 100000);
+}
+
+static ssize_t
+show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
+}
+
+static ssize_t
+show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
+ return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
+}
+
+static ssize_t
+show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+ return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
+}
+
+static ssize_t
+show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+ return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
+}
+
+static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
+static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
+static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
+static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
+
+static struct attribute *rc6_attrs[] = {
+ &dev_attr_rc6_enable.attr,
+ &dev_attr_rc6_residency_ms.attr,
+ &dev_attr_rc6p_residency_ms.attr,
+ &dev_attr_rc6pp_residency_ms.attr,
+ NULL
+};
+
+static struct attribute_group rc6_attr_group = {
+ .name = power_group_name,
+ .attrs = rc6_attrs
+};
+
+void i915_setup_sysfs(struct drm_device *dev)
+{
+ int ret;
+
+ /* ILK doesn't have any residency information */
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
+
+ ret = sysfs_merge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+ if (ret)
+ DRM_ERROR("sysfs setup failed\n");
+}
+
+void i915_teardown_sysfs(struct drm_device *dev)
+{
+ sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+}
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
index ead876eb6ea0..f1df2bd4ecf4 100644
--- a/drivers/gpu/drm/i915/i915_trace_points.c
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -7,5 +7,7 @@
#include "i915_drv.h"
+#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "i915_trace.h"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index bae3edf956a4..f413899475e9 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -9,6 +9,7 @@
#include <acpi/acpi_drivers.h>
#include "drmP.h"
+#include "i915_drv.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
@@ -182,8 +183,6 @@ static void intel_dsm_platform_mux_info(void)
DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
intel_dsm_mux_type(info->buffer.pointer[3]));
}
- } else {
- DRM_ERROR("MUX INFO call failed\n");
}
out:
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b48fc2a8410c..353459362f6f 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -174,6 +174,28 @@ get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
}
+/* get lvds_fp_timing entry
+ * this function may return NULL if the corresponding entry is invalid
+ */
+static const struct lvds_fp_timing *
+get_lvds_fp_timing(const struct bdb_header *bdb,
+ const struct bdb_lvds_lfp_data *data,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs,
+ int index)
+{
+ size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
+ u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
+ size_t ofs;
+
+ if (index >= ARRAY_SIZE(ptrs->ptr))
+ return NULL;
+ ofs = ptrs->ptr[index].fp_timing_offset;
+ if (ofs < data_ofs ||
+ ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
+ return NULL;
+ return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
+}
+
/* Try to find integrated panel data */
static void
parse_lfp_panel_data(struct drm_i915_private *dev_priv,
@@ -183,6 +205,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
const struct bdb_lvds_lfp_data *lvds_lfp_data;
const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
const struct lvds_dvo_timing *panel_dvo_timing;
+ const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
int i, downclock;
@@ -244,6 +267,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
"Normal Clock %dKHz, downclock %dKHz\n",
panel_fixed_mode->clock, 10*downclock);
}
+
+ fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ lvds_options->panel_type);
+ if (fp_timing) {
+ /* check the resolution, just to be sure */
+ if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
+ fp_timing->y_res == panel_fixed_mode->vdisplay) {
+ dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
+ DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
+ dev_priv->bios_lvds_val);
+ }
+ }
}
/* Try to find sdvo panel data */
@@ -256,6 +292,11 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
int index;
index = i915_vbt_sdvo_panel_type;
+ if (index == -2) {
+ DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
+ return;
+ }
+
if (index == -1) {
struct bdb_sdvo_lvds_options *sdvo_lvds_options;
@@ -332,11 +373,11 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
- if (bus_pin >= 1 && bus_pin <= 6)
+ if (intel_gmbus_is_port_valid(bus_pin))
dev_priv->crt_ddc_pin = bus_pin;
} else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
- block_size);
+ block_size);
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 90b9793fd5da..75a70c46ef1b 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -55,18 +55,36 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
struct intel_crt, base);
}
-static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
+static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 temp, reg;
+ u32 temp;
- if (HAS_PCH_SPLIT(dev))
- reg = PCH_ADPA;
- else
- reg = ADPA;
+ temp = I915_READ(PCH_ADPA);
+ temp &= ~ADPA_DAC_ENABLE;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ temp |= ADPA_DAC_ENABLE;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ /* Just leave port enable cleared */
+ break;
+ }
+
+ I915_WRITE(PCH_ADPA, temp);
+}
- temp = I915_READ(reg);
+static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 temp;
+
+ temp = I915_READ(ADPA);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
@@ -85,7 +103,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
break;
}
- I915_WRITE(reg, temp);
+ I915_WRITE(ADPA, temp);
}
static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -278,9 +296,10 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
struct edid *edid;
bool is_digital = false;
+ struct i2c_adapter *i2c;
- edid = drm_get_edid(connector,
- &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+ edid = drm_get_edid(connector, i2c);
/*
* This may be a DVI-I connector with a shared DDC
* link between analog and digital outputs, so we
@@ -476,15 +495,16 @@ static int intel_crt_get_modes(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ struct i2c_adapter *i2c;
- ret = intel_ddc_get_modes(connector,
- &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+ ret = intel_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
return ret;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
- return intel_ddc_get_modes(connector,
- &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
+ i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
+ return intel_ddc_get_modes(connector, i2c);
}
static int intel_crt_set_property(struct drm_connector *connector,
@@ -507,12 +527,20 @@ static void intel_crt_reset(struct drm_connector *connector)
* Routines for controlling stuff on the analog port
*/
-static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
- .dpms = intel_crt_dpms,
+static const struct drm_encoder_helper_funcs pch_encoder_funcs = {
.mode_fixup = intel_crt_mode_fixup,
.prepare = intel_encoder_prepare,
.commit = intel_encoder_commit,
.mode_set = intel_crt_mode_set,
+ .dpms = pch_crt_dpms,
+};
+
+static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
+ .mode_fixup = intel_crt_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .commit = intel_encoder_commit,
+ .mode_set = intel_crt_mode_set,
+ .dpms = gmch_crt_dpms,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
@@ -536,7 +564,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
{
- DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident);
+ DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
return 1;
}
@@ -558,6 +586,7 @@ void intel_crt_init(struct drm_device *dev)
struct intel_crt *crt;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct drm_encoder_helper_funcs *encoder_helper_funcs;
/* Skip machines without VGA that falsely report hotplug events */
if (dmi_check_system(intel_no_crt))
@@ -586,14 +615,23 @@ void intel_crt_init(struct drm_device *dev)
crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
1 << INTEL_ANALOG_CLONE_BIT |
1 << INTEL_SDVO_LVDS_CLONE_BIT);
- crt->base.crtc_mask = (1 << 0) | (1 << 1);
+ if (IS_HASWELL(dev))
+ crt->base.crtc_mask = (1 << 0);
+ else
+ crt->base.crtc_mask = (1 << 0) | (1 << 1);
+
if (IS_GEN2(dev))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
+ if (HAS_PCH_SPLIT(dev))
+ encoder_helper_funcs = &pch_encoder_funcs;
+ else
+ encoder_helper_funcs = &gmch_encoder_funcs;
+
+ drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
new file mode 100644
index 000000000000..46d1e886c692
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -0,0 +1,755 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/* HDMI/DVI modes ignore everything but the last 2 items. So we share
+ * them for both DP and FDI transports, allowing those ports to
+ * automatically adapt to HDMI connections as well
+ */
+static const u32 hsw_ddi_translations_dp[] = {
+ 0x00FFFFFF, 0x0006000E, /* DP parameters */
+ 0x00D75FFF, 0x0005000A,
+ 0x00C30FFF, 0x00040006,
+ 0x80AAAFFF, 0x000B0000,
+ 0x00FFFFFF, 0x0005000A,
+ 0x00D75FFF, 0x000C0004,
+ 0x80C30FFF, 0x000B0000,
+ 0x00FFFFFF, 0x00040006,
+ 0x80D75FFF, 0x000B0000,
+ 0x00FFFFFF, 0x00040006 /* HDMI parameters */
+};
+
+static const u32 hsw_ddi_translations_fdi[] = {
+ 0x00FFFFFF, 0x0007000E, /* FDI parameters */
+ 0x00D75FFF, 0x000F000A,
+ 0x00C30FFF, 0x00060006,
+ 0x00AAAFFF, 0x001E0000,
+ 0x00FFFFFF, 0x000F000A,
+ 0x00D75FFF, 0x00160004,
+ 0x00C30FFF, 0x001E0000,
+ 0x00FFFFFF, 0x00060006,
+ 0x00D75FFF, 0x001E0000,
+ 0x00FFFFFF, 0x00040006 /* HDMI parameters */
+};
+
+/* On Haswell, DDI port buffers must be programmed with correct values
+ * in advance. The buffer values are different for FDI and DP modes,
+ * but the HDMI/DVI fields are shared among those. So we program the DDI
+ * in either FDI or DP modes only, as HDMI connections will work with both
+ * of those
+ */
+void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+ int i;
+ const u32 *ddi_translations = ((use_fdi_mode) ?
+ hsw_ddi_translations_fdi :
+ hsw_ddi_translations_dp);
+
+ DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
+ port_name(port),
+ use_fdi_mode ? "FDI" : "DP");
+
+ WARN((use_fdi_mode && (port != PORT_E)),
+ "Programming port %c in FDI mode, this probably will not work.\n",
+ port_name(port));
+
+ for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
+ I915_WRITE(reg, ddi_translations[i]);
+ reg += 4;
+ }
+}
+
+/* Program DDI buffers translations for DP. By default, program ports A-D in DP
+ * mode and port E for FDI.
+ */
+void intel_prepare_ddi(struct drm_device *dev)
+{
+ int port;
+
+ if (IS_HASWELL(dev)) {
+ for (port = PORT_A; port < PORT_E; port++)
+ intel_prepare_ddi_buffers(dev, port, false);
+
+ /* DDI E is the suggested one to work in FDI mode, so program is as such by
+ * default. It will have to be re-programmed in case a digital DP output
+ * will be detected on it
+ */
+ intel_prepare_ddi_buffers(dev, PORT_E, true);
+ }
+}
+
+static const long hsw_ddi_buf_ctl_values[] = {
+ DDI_BUF_EMP_400MV_0DB_HSW,
+ DDI_BUF_EMP_400MV_3_5DB_HSW,
+ DDI_BUF_EMP_400MV_6DB_HSW,
+ DDI_BUF_EMP_400MV_9_5DB_HSW,
+ DDI_BUF_EMP_600MV_0DB_HSW,
+ DDI_BUF_EMP_600MV_3_5DB_HSW,
+ DDI_BUF_EMP_600MV_6DB_HSW,
+ DDI_BUF_EMP_800MV_0DB_HSW,
+ DDI_BUF_EMP_800MV_3_5DB_HSW
+};
+
+
+/* Starting with Haswell, different DDI ports can work in FDI mode for
+ * connection to the PCH-located connectors. For this, it is necessary to train
+ * both the DDI port and PCH receiver for the desired DDI buffer settings.
+ *
+ * The recommended port to work in FDI mode is DDI E, which we use here. Also,
+ * please note that when FDI mode is active on DDI E, it shares 2 lines with
+ * DDI A (which is used for eDP)
+ */
+
+void hsw_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp, i;
+
+ /* Configure CPU PLL, wait for warmup */
+ I915_WRITE(SPLL_CTL,
+ SPLL_PLL_ENABLE |
+ SPLL_PLL_FREQ_1350MHz |
+ SPLL_PLL_SCC);
+
+ /* Use SPLL to drive the output when in FDI mode */
+ I915_WRITE(PORT_CLK_SEL(PORT_E),
+ PORT_CLK_SEL_SPLL);
+ I915_WRITE(PIPE_CLK_SEL(pipe),
+ PIPE_CLK_SEL_PORT(PORT_E));
+
+ udelay(20);
+
+ /* Start the training iterating through available voltages and emphasis */
+ for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
+ /* Configure DP_TP_CTL with auto-training */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_LINK_TRAIN_PAT1 |
+ DP_TP_CTL_ENABLE);
+
+ /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
+ temp = I915_READ(DDI_BUF_CTL(PORT_E));
+ temp = (temp & ~DDI_BUF_EMP_MASK);
+ I915_WRITE(DDI_BUF_CTL(PORT_E),
+ temp |
+ DDI_BUF_CTL_ENABLE |
+ DDI_PORT_WIDTH_X2 |
+ hsw_ddi_buf_ctl_values[i]);
+
+ udelay(600);
+
+ /* Enable CPU FDI Receiver with auto-training */
+ reg = FDI_RX_CTL(pipe);
+ I915_WRITE(reg,
+ I915_READ(reg) |
+ FDI_LINK_TRAIN_AUTO |
+ FDI_RX_ENABLE |
+ FDI_LINK_TRAIN_PATTERN_1_CPT |
+ FDI_RX_ENHANCE_FRAME_ENABLE |
+ FDI_PORT_WIDTH_2X_LPT |
+ FDI_RX_PLL_ENABLE);
+ POSTING_READ(reg);
+ udelay(100);
+
+ temp = I915_READ(DP_TP_STATUS(PORT_E));
+ if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
+ DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
+
+ /* Enable normal pixel sending for FDI */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
+
+ /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
+ temp = I915_READ(DDI_FUNC_CTL(pipe));
+ temp &= ~PIPE_DDI_PORT_MASK;
+ temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
+ PIPE_DDI_MODE_SELECT_FDI |
+ PIPE_DDI_FUNC_ENABLE |
+ PIPE_DDI_PORT_WIDTH_X2;
+ I915_WRITE(DDI_FUNC_CTL(pipe),
+ temp);
+ break;
+ } else {
+ DRM_ERROR("Error training BUF_CTL %d\n", i);
+
+ /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ I915_READ(DP_TP_CTL(PORT_E)) &
+ ~DP_TP_CTL_ENABLE);
+ I915_WRITE(FDI_RX_CTL(pipe),
+ I915_READ(FDI_RX_CTL(pipe)) &
+ ~FDI_RX_PLL_ENABLE);
+ continue;
+ }
+ }
+
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+/* For DDI connections, it is possible to support different outputs over the
+ * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
+ * the time the output is detected what exactly is on the other end of it. This
+ * function aims at providing support for this detection and proper output
+ * configuration.
+ */
+void intel_ddi_init(struct drm_device *dev, enum port port)
+{
+ /* For now, we don't do any proper output detection and assume that we
+ * handle HDMI only */
+
+ switch(port){
+ case PORT_A:
+ /* We don't handle eDP and DP yet */
+ DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
+ break;
+ /* Assume that the ports B, C and D are working in HDMI mode for now */
+ case PORT_B:
+ case PORT_C:
+ case PORT_D:
+ intel_hdmi_init(dev, DDI_BUF_CTL(port));
+ break;
+ default:
+ DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
+ port);
+ break;
+ }
+}
+
+/* WRPLL clock dividers */
+struct wrpll_tmds_clock {
+ u32 clock;
+ u16 p; /* Post divider */
+ u16 n2; /* Feedback divider */
+ u16 r2; /* Reference divider */
+};
+
+/* Table of matching values for WRPLL clocks programming for each frequency */
+static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
+ {19750, 38, 25, 18},
+ {20000, 48, 32, 18},
+ {21000, 36, 21, 15},
+ {21912, 42, 29, 17},
+ {22000, 36, 22, 15},
+ {23000, 36, 23, 15},
+ {23500, 40, 40, 23},
+ {23750, 26, 16, 14},
+ {23750, 26, 16, 14},
+ {24000, 36, 24, 15},
+ {25000, 36, 25, 15},
+ {25175, 26, 40, 33},
+ {25200, 30, 21, 15},
+ {26000, 36, 26, 15},
+ {27000, 30, 21, 14},
+ {27027, 18, 100, 111},
+ {27500, 30, 29, 19},
+ {28000, 34, 30, 17},
+ {28320, 26, 30, 22},
+ {28322, 32, 42, 25},
+ {28750, 24, 23, 18},
+ {29000, 30, 29, 18},
+ {29750, 32, 30, 17},
+ {30000, 30, 25, 15},
+ {30750, 30, 41, 24},
+ {31000, 30, 31, 18},
+ {31500, 30, 28, 16},
+ {32000, 30, 32, 18},
+ {32500, 28, 32, 19},
+ {33000, 24, 22, 15},
+ {34000, 28, 30, 17},
+ {35000, 26, 32, 19},
+ {35500, 24, 30, 19},
+ {36000, 26, 26, 15},
+ {36750, 26, 46, 26},
+ {37000, 24, 23, 14},
+ {37762, 22, 40, 26},
+ {37800, 20, 21, 15},
+ {38000, 24, 27, 16},
+ {38250, 24, 34, 20},
+ {39000, 24, 26, 15},
+ {40000, 24, 32, 18},
+ {40500, 20, 21, 14},
+ {40541, 22, 147, 89},
+ {40750, 18, 19, 14},
+ {41000, 16, 17, 14},
+ {41500, 22, 44, 26},
+ {41540, 22, 44, 26},
+ {42000, 18, 21, 15},
+ {42500, 22, 45, 26},
+ {43000, 20, 43, 27},
+ {43163, 20, 24, 15},
+ {44000, 18, 22, 15},
+ {44900, 20, 108, 65},
+ {45000, 20, 25, 15},
+ {45250, 20, 52, 31},
+ {46000, 18, 23, 15},
+ {46750, 20, 45, 26},
+ {47000, 20, 40, 23},
+ {48000, 18, 24, 15},
+ {49000, 18, 49, 30},
+ {49500, 16, 22, 15},
+ {50000, 18, 25, 15},
+ {50500, 18, 32, 19},
+ {51000, 18, 34, 20},
+ {52000, 18, 26, 15},
+ {52406, 14, 34, 25},
+ {53000, 16, 22, 14},
+ {54000, 16, 24, 15},
+ {54054, 16, 173, 108},
+ {54500, 14, 24, 17},
+ {55000, 12, 22, 18},
+ {56000, 14, 45, 31},
+ {56250, 16, 25, 15},
+ {56750, 14, 25, 17},
+ {57000, 16, 27, 16},
+ {58000, 16, 43, 25},
+ {58250, 16, 38, 22},
+ {58750, 16, 40, 23},
+ {59000, 14, 26, 17},
+ {59341, 14, 40, 26},
+ {59400, 16, 44, 25},
+ {60000, 16, 32, 18},
+ {60500, 12, 39, 29},
+ {61000, 14, 49, 31},
+ {62000, 14, 37, 23},
+ {62250, 14, 42, 26},
+ {63000, 12, 21, 15},
+ {63500, 14, 28, 17},
+ {64000, 12, 27, 19},
+ {65000, 14, 32, 19},
+ {65250, 12, 29, 20},
+ {65500, 12, 32, 22},
+ {66000, 12, 22, 15},
+ {66667, 14, 38, 22},
+ {66750, 10, 21, 17},
+ {67000, 14, 33, 19},
+ {67750, 14, 58, 33},
+ {68000, 14, 30, 17},
+ {68179, 14, 46, 26},
+ {68250, 14, 46, 26},
+ {69000, 12, 23, 15},
+ {70000, 12, 28, 18},
+ {71000, 12, 30, 19},
+ {72000, 12, 24, 15},
+ {73000, 10, 23, 17},
+ {74000, 12, 23, 14},
+ {74176, 8, 100, 91},
+ {74250, 10, 22, 16},
+ {74481, 12, 43, 26},
+ {74500, 10, 29, 21},
+ {75000, 12, 25, 15},
+ {75250, 10, 39, 28},
+ {76000, 12, 27, 16},
+ {77000, 12, 53, 31},
+ {78000, 12, 26, 15},
+ {78750, 12, 28, 16},
+ {79000, 10, 38, 26},
+ {79500, 10, 28, 19},
+ {80000, 12, 32, 18},
+ {81000, 10, 21, 14},
+ {81081, 6, 100, 111},
+ {81624, 8, 29, 24},
+ {82000, 8, 17, 14},
+ {83000, 10, 40, 26},
+ {83950, 10, 28, 18},
+ {84000, 10, 28, 18},
+ {84750, 6, 16, 17},
+ {85000, 6, 17, 18},
+ {85250, 10, 30, 19},
+ {85750, 10, 27, 17},
+ {86000, 10, 43, 27},
+ {87000, 10, 29, 18},
+ {88000, 10, 44, 27},
+ {88500, 10, 41, 25},
+ {89000, 10, 28, 17},
+ {89012, 6, 90, 91},
+ {89100, 10, 33, 20},
+ {90000, 10, 25, 15},
+ {91000, 10, 32, 19},
+ {92000, 10, 46, 27},
+ {93000, 10, 31, 18},
+ {94000, 10, 40, 23},
+ {94500, 10, 28, 16},
+ {95000, 10, 44, 25},
+ {95654, 10, 39, 22},
+ {95750, 10, 39, 22},
+ {96000, 10, 32, 18},
+ {97000, 8, 23, 16},
+ {97750, 8, 42, 29},
+ {98000, 8, 45, 31},
+ {99000, 8, 22, 15},
+ {99750, 8, 34, 23},
+ {100000, 6, 20, 18},
+ {100500, 6, 19, 17},
+ {101000, 6, 37, 33},
+ {101250, 8, 21, 14},
+ {102000, 6, 17, 15},
+ {102250, 6, 25, 22},
+ {103000, 8, 29, 19},
+ {104000, 8, 37, 24},
+ {105000, 8, 28, 18},
+ {106000, 8, 22, 14},
+ {107000, 8, 46, 29},
+ {107214, 8, 27, 17},
+ {108000, 8, 24, 15},
+ {108108, 8, 173, 108},
+ {109000, 6, 23, 19},
+ {109000, 6, 23, 19},
+ {110000, 6, 22, 18},
+ {110013, 6, 22, 18},
+ {110250, 8, 49, 30},
+ {110500, 8, 36, 22},
+ {111000, 8, 23, 14},
+ {111264, 8, 150, 91},
+ {111375, 8, 33, 20},
+ {112000, 8, 63, 38},
+ {112500, 8, 25, 15},
+ {113100, 8, 57, 34},
+ {113309, 8, 42, 25},
+ {114000, 8, 27, 16},
+ {115000, 6, 23, 18},
+ {116000, 8, 43, 25},
+ {117000, 8, 26, 15},
+ {117500, 8, 40, 23},
+ {118000, 6, 38, 29},
+ {119000, 8, 30, 17},
+ {119500, 8, 46, 26},
+ {119651, 8, 39, 22},
+ {120000, 8, 32, 18},
+ {121000, 6, 39, 29},
+ {121250, 6, 31, 23},
+ {121750, 6, 23, 17},
+ {122000, 6, 42, 31},
+ {122614, 6, 30, 22},
+ {123000, 6, 41, 30},
+ {123379, 6, 37, 27},
+ {124000, 6, 51, 37},
+ {125000, 6, 25, 18},
+ {125250, 4, 13, 14},
+ {125750, 4, 27, 29},
+ {126000, 6, 21, 15},
+ {127000, 6, 24, 17},
+ {127250, 6, 41, 29},
+ {128000, 6, 27, 19},
+ {129000, 6, 43, 30},
+ {129859, 4, 25, 26},
+ {130000, 6, 26, 18},
+ {130250, 6, 42, 29},
+ {131000, 6, 32, 22},
+ {131500, 6, 38, 26},
+ {131850, 6, 41, 28},
+ {132000, 6, 22, 15},
+ {132750, 6, 28, 19},
+ {133000, 6, 34, 23},
+ {133330, 6, 37, 25},
+ {134000, 6, 61, 41},
+ {135000, 6, 21, 14},
+ {135250, 6, 167, 111},
+ {136000, 6, 62, 41},
+ {137000, 6, 35, 23},
+ {138000, 6, 23, 15},
+ {138500, 6, 40, 26},
+ {138750, 6, 37, 24},
+ {139000, 6, 34, 22},
+ {139050, 6, 34, 22},
+ {139054, 6, 34, 22},
+ {140000, 6, 28, 18},
+ {141000, 6, 36, 23},
+ {141500, 6, 22, 14},
+ {142000, 6, 30, 19},
+ {143000, 6, 27, 17},
+ {143472, 4, 17, 16},
+ {144000, 6, 24, 15},
+ {145000, 6, 29, 18},
+ {146000, 6, 47, 29},
+ {146250, 6, 26, 16},
+ {147000, 6, 49, 30},
+ {147891, 6, 23, 14},
+ {148000, 6, 23, 14},
+ {148250, 6, 28, 17},
+ {148352, 4, 100, 91},
+ {148500, 6, 33, 20},
+ {149000, 6, 48, 29},
+ {150000, 6, 25, 15},
+ {151000, 4, 19, 17},
+ {152000, 6, 27, 16},
+ {152280, 6, 44, 26},
+ {153000, 6, 34, 20},
+ {154000, 6, 53, 31},
+ {155000, 6, 31, 18},
+ {155250, 6, 50, 29},
+ {155750, 6, 45, 26},
+ {156000, 6, 26, 15},
+ {157000, 6, 61, 35},
+ {157500, 6, 28, 16},
+ {158000, 6, 65, 37},
+ {158250, 6, 44, 25},
+ {159000, 6, 53, 30},
+ {159500, 6, 39, 22},
+ {160000, 6, 32, 18},
+ {161000, 4, 31, 26},
+ {162000, 4, 18, 15},
+ {162162, 4, 131, 109},
+ {162500, 4, 53, 44},
+ {163000, 4, 29, 24},
+ {164000, 4, 17, 14},
+ {165000, 4, 22, 18},
+ {166000, 4, 32, 26},
+ {167000, 4, 26, 21},
+ {168000, 4, 46, 37},
+ {169000, 4, 104, 83},
+ {169128, 4, 64, 51},
+ {169500, 4, 39, 31},
+ {170000, 4, 34, 27},
+ {171000, 4, 19, 15},
+ {172000, 4, 51, 40},
+ {172750, 4, 32, 25},
+ {172800, 4, 32, 25},
+ {173000, 4, 41, 32},
+ {174000, 4, 49, 38},
+ {174787, 4, 22, 17},
+ {175000, 4, 35, 27},
+ {176000, 4, 30, 23},
+ {177000, 4, 38, 29},
+ {178000, 4, 29, 22},
+ {178500, 4, 37, 28},
+ {179000, 4, 53, 40},
+ {179500, 4, 73, 55},
+ {180000, 4, 20, 15},
+ {181000, 4, 55, 41},
+ {182000, 4, 31, 23},
+ {183000, 4, 42, 31},
+ {184000, 4, 30, 22},
+ {184750, 4, 26, 19},
+ {185000, 4, 37, 27},
+ {186000, 4, 51, 37},
+ {187000, 4, 36, 26},
+ {188000, 4, 32, 23},
+ {189000, 4, 21, 15},
+ {190000, 4, 38, 27},
+ {190960, 4, 41, 29},
+ {191000, 4, 41, 29},
+ {192000, 4, 27, 19},
+ {192250, 4, 37, 26},
+ {193000, 4, 20, 14},
+ {193250, 4, 53, 37},
+ {194000, 4, 23, 16},
+ {194208, 4, 23, 16},
+ {195000, 4, 26, 18},
+ {196000, 4, 45, 31},
+ {197000, 4, 35, 24},
+ {197750, 4, 41, 28},
+ {198000, 4, 22, 15},
+ {198500, 4, 25, 17},
+ {199000, 4, 28, 19},
+ {200000, 4, 37, 25},
+ {201000, 4, 61, 41},
+ {202000, 4, 112, 75},
+ {202500, 4, 21, 14},
+ {203000, 4, 146, 97},
+ {204000, 4, 62, 41},
+ {204750, 4, 44, 29},
+ {205000, 4, 38, 25},
+ {206000, 4, 29, 19},
+ {207000, 4, 23, 15},
+ {207500, 4, 40, 26},
+ {208000, 4, 37, 24},
+ {208900, 4, 48, 31},
+ {209000, 4, 48, 31},
+ {209250, 4, 31, 20},
+ {210000, 4, 28, 18},
+ {211000, 4, 25, 16},
+ {212000, 4, 22, 14},
+ {213000, 4, 30, 19},
+ {213750, 4, 38, 24},
+ {214000, 4, 46, 29},
+ {214750, 4, 35, 22},
+ {215000, 4, 43, 27},
+ {216000, 4, 24, 15},
+ {217000, 4, 37, 23},
+ {218000, 4, 42, 26},
+ {218250, 4, 42, 26},
+ {218750, 4, 34, 21},
+ {219000, 4, 47, 29},
+ {219000, 4, 47, 29},
+ {220000, 4, 44, 27},
+ {220640, 4, 49, 30},
+ {220750, 4, 36, 22},
+ {221000, 4, 36, 22},
+ {222000, 4, 23, 14},
+ {222525, 4, 28, 17},
+ {222750, 4, 33, 20},
+ {227000, 4, 37, 22},
+ {230250, 4, 29, 17},
+ {233500, 4, 38, 22},
+ {235000, 4, 40, 23},
+ {238000, 4, 30, 17},
+ {241500, 2, 17, 19},
+ {245250, 2, 20, 22},
+ {247750, 2, 22, 24},
+ {253250, 2, 15, 16},
+ {256250, 2, 18, 19},
+ {262500, 2, 31, 32},
+ {267250, 2, 66, 67},
+ {268500, 2, 94, 95},
+ {270000, 2, 14, 14},
+ {272500, 2, 77, 76},
+ {273750, 2, 57, 56},
+ {280750, 2, 24, 23},
+ {281250, 2, 23, 22},
+ {286000, 2, 17, 16},
+ {291750, 2, 26, 24},
+ {296703, 2, 56, 51},
+ {297000, 2, 22, 20},
+ {298000, 2, 21, 19},
+};
+
+void intel_ddi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ int port = intel_hdmi->ddi_port;
+ int pipe = intel_crtc->pipe;
+ int p, n2, r2, valid=0;
+ u32 temp, i;
+
+ /* On Haswell, we need to enable the clocks and prepare DDI function to
+ * work in HDMI mode for this pipe.
+ */
+ DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
+
+ for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
+ if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
+ p = wrpll_tmds_clock_table[i].p;
+ n2 = wrpll_tmds_clock_table[i].n2;
+ r2 = wrpll_tmds_clock_table[i].r2;
+
+ DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
+ crtc->mode.clock,
+ p, n2, r2);
+
+ valid = 1;
+ break;
+ }
+ }
+
+ if (!valid) {
+ DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
+ crtc->mode.clock);
+ return;
+ }
+
+ /* Enable LCPLL if disabled */
+ temp = I915_READ(LCPLL_CTL);
+ if (temp & LCPLL_PLL_DISABLE)
+ I915_WRITE(LCPLL_CTL,
+ temp & ~LCPLL_PLL_DISABLE);
+
+ /* Configure WR PLL 1, program the correct divider values for
+ * the desired frequency and wait for warmup */
+ I915_WRITE(WRPLL_CTL1,
+ WRPLL_PLL_ENABLE |
+ WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) |
+ WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p));
+
+ udelay(20);
+
+ /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
+ * this port for connection.
+ */
+ I915_WRITE(PORT_CLK_SEL(port),
+ PORT_CLK_SEL_WRPLL1);
+ I915_WRITE(PIPE_CLK_SEL(pipe),
+ PIPE_CLK_SEL_PORT(port));
+
+ udelay(20);
+
+ if (intel_hdmi->has_audio) {
+ /* Proper support for digital audio needs a new logic and a new set
+ * of registers, so we leave it for future patch bombing.
+ */
+ DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n",
+ pipe_name(intel_crtc->pipe));
+ }
+
+ /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+ temp = I915_READ(DDI_FUNC_CTL(pipe));
+ temp &= ~PIPE_DDI_PORT_MASK;
+ temp &= ~PIPE_DDI_BPC_12;
+ temp |= PIPE_DDI_SELECT_PORT(port) |
+ PIPE_DDI_MODE_SELECT_HDMI |
+ ((intel_crtc->bpp > 24) ?
+ PIPE_DDI_BPC_12 :
+ PIPE_DDI_BPC_8) |
+ PIPE_DDI_FUNC_ENABLE;
+
+ I915_WRITE(DDI_FUNC_CTL(pipe), temp);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ int port = intel_hdmi->ddi_port;
+ u32 temp;
+
+ temp = I915_READ(DDI_BUF_CTL(port));
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ temp &= ~DDI_BUF_CTL_ENABLE;
+ } else {
+ temp |= DDI_BUF_CTL_ENABLE;
+ }
+
+ /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
+ * and swing/emphasis values are ignored so nothing special needs
+ * to be done besides enabling the port.
+ */
+ I915_WRITE(DDI_BUF_CTL(port),
+ temp);
+}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1b1cf3b3ff51..914789420906 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,7 +24,7 @@
* Eric Anholt <eric@anholt.net>
*/
-#include <linux/cpufreq.h>
+#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
@@ -44,7 +44,6 @@
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
-static void intel_update_watermarks(struct drm_device *dev);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -360,6 +359,88 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
.find_pll = intel_find_pll_ironlake_dp,
};
+u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
+{
+ unsigned long flags;
+ u32 val = 0;
+
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
+ if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+ DRM_ERROR("DPIO idle wait timed out\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(DPIO_REG, reg);
+ I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
+ DPIO_BYTE);
+ if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+ DRM_ERROR("DPIO read wait timed out\n");
+ goto out_unlock;
+ }
+ val = I915_READ(DPIO_DATA);
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
+ return val;
+}
+
+static void vlv_init_dpio(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Reset the DPIO config */
+ I915_WRITE(DPIO_CTL, 0);
+ POSTING_READ(DPIO_CTL);
+ I915_WRITE(DPIO_CTL, 1);
+ POSTING_READ(DPIO_CTL);
+}
+
+static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
+{
+ DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_dual_link_lvds[] = {
+ {
+ .callback = intel_dual_link_lvds_callback,
+ .ident = "Apple MacBook Pro (Core i5/i7 Series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
+ },
+ },
+ { } /* terminating entry */
+};
+
+static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
+ unsigned int reg)
+{
+ unsigned int val;
+
+ /* use the module option value if specified */
+ if (i915_lvds_channel_mode > 0)
+ return i915_lvds_channel_mode == 2;
+
+ if (dmi_check_system(intel_dual_link_lvds))
+ return true;
+
+ if (dev_priv->lvds_val)
+ val = dev_priv->lvds_val;
+ else {
+ /* BIOS should set the proper LVDS register value at boot, but
+ * in reality, it doesn't set the value when the lid is closed;
+ * we need to check "the value to be set" in VBT when LVDS
+ * register is uninitialized.
+ */
+ val = I915_READ(reg);
+ if (!(val & ~LVDS_DETECTED))
+ val = dev_priv->bios_lvds_val;
+ dev_priv->lvds_val = val;
+ }
+ return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
+}
+
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
int refclk)
{
@@ -368,8 +449,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP) {
+ if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
/* LVDS dual channel */
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -397,8 +477,7 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
+ if (is_dual_link_lvds(dev_priv, LVDS))
/* LVDS with dual channel */
limit = &intel_limits_g4x_dual_channel_lvds;
else
@@ -536,8 +615,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
* reliably set up different single/dual channel state, if we
* even can.
*/
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
+ if (is_dual_link_lvds(dev_priv, LVDS))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
@@ -706,6 +784,17 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
+static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 frame, frame_reg = PIPEFRAME(pipe);
+
+ frame = I915_READ(frame_reg);
+
+ if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
+ DRM_DEBUG_KMS("vblank wait timed out\n");
+}
+
/**
* intel_wait_for_vblank - wait for vblank on a given pipe
* @dev: drm device
@@ -719,6 +808,11 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipestat_reg = PIPESTAT(pipe);
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ironlake_wait_for_vblank(dev, pipe);
+ return;
+ }
+
/* Clear existing vblank status. Note this will clear any other
* sticky status fields as well.
*
@@ -771,15 +865,20 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
100))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
} else {
- u32 last_line;
+ u32 last_line, line_mask;
int reg = PIPEDSL(pipe);
unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ if (IS_GEN2(dev))
+ line_mask = DSL_LINEMASK_GEN2;
+ else
+ line_mask = DSL_LINEMASK_GEN3;
+
/* Wait for the display line to settle */
do {
- last_line = I915_READ(reg) & DSL_LINEMASK;
+ last_line = I915_READ(reg) & line_mask;
mdelay(5);
- } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
+ } while (((I915_READ(reg) & line_mask) != last_line) &&
time_after(timeout, jiffies));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
@@ -811,34 +910,49 @@ static void assert_pll(struct drm_i915_private *dev_priv,
/* For ILK+ */
static void assert_pch_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
+ struct intel_pch_pll *pll,
+ struct intel_crtc *crtc,
+ bool state)
{
- int reg;
u32 val;
bool cur_state;
- if (HAS_PCH_CPT(dev_priv->dev)) {
- u32 pch_dpll;
-
- pch_dpll = I915_READ(PCH_DPLL_SEL);
-
- /* Make sure the selected PLL is enabled to the transcoder */
- WARN(!((pch_dpll >> (4 * pipe)) & 8),
- "transcoder %d PLL not enabled\n", pipe);
-
- /* Convert the transcoder pipe number to a pll pipe number */
- pipe = (pch_dpll >> (4 * pipe)) & 1;
+ if (HAS_PCH_LPT(dev_priv->dev)) {
+ DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
+ return;
}
- reg = PCH_DPLL(pipe);
- val = I915_READ(reg);
+ if (WARN (!pll,
+ "asserting PCH PLL %s with no PLL\n", state_string(state)))
+ return;
+
+ val = I915_READ(pll->pll_reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
WARN(cur_state != state,
- "PCH PLL state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
+ pll->pll_reg, state_string(state), state_string(cur_state), val);
+
+ /* Make sure the selected PLL is correctly attached to the transcoder */
+ if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
+ u32 pch_dpll;
+
+ pch_dpll = I915_READ(PCH_DPLL_SEL);
+ cur_state = pll->pll_reg == _PCH_DPLL_B;
+ if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
+ "PLL[%d] not attached to this transcoder %d: %08x\n",
+ cur_state, crtc->pipe, pch_dpll)) {
+ cur_state = !!(val >> (4*crtc->pipe + 3));
+ WARN(cur_state != state,
+ "PLL[%d] not %s on this transcoder %d: %08x\n",
+ pll->pll_reg == _PCH_DPLL_B,
+ state_string(state),
+ crtc->pipe,
+ val);
+ }
+ }
}
-#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
-#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
+#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
+#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
@@ -847,9 +961,16 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- reg = FDI_TX_CTL(pipe);
- val = I915_READ(reg);
- cur_state = !!(val & FDI_TX_ENABLE);
+ if (IS_HASWELL(dev_priv->dev)) {
+ /* On Haswell, DDI is used instead of FDI_TX_CTL */
+ reg = DDI_FUNC_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
+ } else {
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_TX_ENABLE);
+ }
WARN(cur_state != state,
"FDI TX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
@@ -864,9 +985,14 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- reg = FDI_RX_CTL(pipe);
- val = I915_READ(reg);
- cur_state = !!(val & FDI_RX_ENABLE);
+ if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+ DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
+ return;
+ } else {
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_ENABLE);
+ }
WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
@@ -884,6 +1010,10 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
if (dev_priv->info->gen == 5)
return;
+ /* On Haswell, DDI ports are responsible for the FDI PLL setup */
+ if (IS_HASWELL(dev_priv->dev))
+ return;
+
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
@@ -895,6 +1025,10 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
int reg;
u32 val;
+ if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+ DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
+ return;
+ }
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
@@ -1000,6 +1134,11 @@ static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
u32 val;
bool enabled;
+ if (HAS_PCH_LPT(dev_priv->dev)) {
+ DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
+ return;
+ }
+
val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
DREF_SUPERSPREAD_SOURCE_MASK));
@@ -1198,6 +1337,69 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
POSTING_READ(reg);
}
+/* SBI access */
+static void
+intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to become ready\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(SBI_ADDR,
+ (reg << 16));
+ I915_WRITE(SBI_DATA,
+ value);
+ I915_WRITE(SBI_CTL_STAT,
+ SBI_BUSY |
+ SBI_CTL_OP_CRWR);
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
+ goto out_unlock;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
+}
+
+static u32
+intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to become ready\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(SBI_ADDR,
+ (reg << 16));
+ I915_WRITE(SBI_CTL_STAT,
+ SBI_BUSY |
+ SBI_CTL_OP_CRRD);
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
+ goto out_unlock;
+ }
+
+ value = I915_READ(SBI_DATA);
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
+ return value;
+}
+
/**
* intel_enable_pch_pll - enable PCH PLL
* @dev_priv: i915 private structure
@@ -1206,60 +1408,88 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
-static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ struct intel_pch_pll *pll;
int reg;
u32 val;
- if (pipe > 1)
+ /* PCH PLLs only available on ILK, SNB and IVB */
+ BUG_ON(dev_priv->info->gen < 5);
+ pll = intel_crtc->pch_pll;
+ if (pll == NULL)
return;
- /* PCH only available on ILK+ */
- BUG_ON(dev_priv->info->gen < 5);
+ if (WARN_ON(pll->refcount == 0))
+ return;
+
+ DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
+ pll->pll_reg, pll->active, pll->on,
+ intel_crtc->base.base.id);
/* PCH refclock must be enabled first */
assert_pch_refclk_enabled(dev_priv);
- reg = PCH_DPLL(pipe);
+ if (pll->active++ && pll->on) {
+ assert_pch_pll_enabled(dev_priv, pll, NULL);
+ return;
+ }
+
+ DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
+
+ reg = pll->pll_reg;
val = I915_READ(reg);
val |= DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
+
+ pll->on = true;
}
-static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ struct intel_pch_pll *pll = intel_crtc->pch_pll;
int reg;
- u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
- pll_sel = TRANSC_DPLL_ENABLE;
-
- if (pipe > 1)
- return;
+ u32 val;
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
+ if (pll == NULL)
+ return;
- /* Make sure transcoder isn't still depending on us */
- assert_transcoder_disabled(dev_priv, pipe);
+ if (WARN_ON(pll->refcount == 0))
+ return;
- if (pipe == 0)
- pll_sel |= TRANSC_DPLLA_SEL;
- else if (pipe == 1)
- pll_sel |= TRANSC_DPLLB_SEL;
+ DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
+ pll->pll_reg, pll->active, pll->on,
+ intel_crtc->base.base.id);
+ if (WARN_ON(pll->active == 0)) {
+ assert_pch_pll_disabled(dev_priv, pll, NULL);
+ return;
+ }
- if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
+ if (--pll->active) {
+ assert_pch_pll_enabled(dev_priv, pll, NULL);
return;
+ }
+
+ DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
+
+ /* Make sure transcoder isn't still depending on us */
+ assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
- reg = PCH_DPLL(pipe);
+ reg = pll->pll_reg;
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
+
+ pll->on = false;
}
static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
@@ -1273,12 +1503,18 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
BUG_ON(dev_priv->info->gen < 5);
/* Make sure PCH DPLL is enabled */
- assert_pch_pll_enabled(dev_priv, pipe);
+ assert_pch_pll_enabled(dev_priv,
+ to_intel_crtc(crtc)->pch_pll,
+ to_intel_crtc(crtc));
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
+ if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+ DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
+ return;
+ }
reg = TRANSCONF(pipe);
val = I915_READ(reg);
pipeconf_val = I915_READ(PIPECONF(pipe));
@@ -1415,7 +1651,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
* Plane regs are double buffered, going from enabled->disabled needs a
* trigger in order to latch. The display address reg provides this.
*/
-static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane)
{
I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
@@ -1526,490 +1762,6 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
disable_pch_hdmi(dev_priv, pipe, HDMID);
}
-static void i8xx_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 fbc_ctl;
-
- /* Disable compression */
- fbc_ctl = I915_READ(FBC_CONTROL);
- if ((fbc_ctl & FBC_CTL_EN) == 0)
- return;
-
- fbc_ctl &= ~FBC_CTL_EN;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- /* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
- DRM_DEBUG_KMS("FBC idle timed out\n");
- return;
- }
-
- DRM_DEBUG_KMS("disabled FBC\n");
-}
-
-static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int cfb_pitch;
- int plane, i;
- u32 fbc_ctl, fbc_ctl2;
-
- cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
- if (fb->pitches[0] < cfb_pitch)
- cfb_pitch = fb->pitches[0];
-
- /* FBC_CTL wants 64B units */
- cfb_pitch = (cfb_pitch / 64) - 1;
- plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
-
- /* Clear old tags */
- for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
- I915_WRITE(FBC_TAG + (i * 4), 0);
-
- /* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= plane;
- I915_WRITE(FBC_CONTROL2, fbc_ctl2);
- I915_WRITE(FBC_FENCE_OFF, crtc->y);
-
- /* enable it... */
- fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
- if (IS_I945GM(dev))
- fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
- fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
- fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
- fbc_ctl |= obj->fence_reg;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
- cfb_pitch, crtc->y, intel_crtc->plane);
-}
-
-static bool i8xx_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
-}
-
-static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
- unsigned long stall_watermark = 200;
- u32 dpfc_ctl;
-
- dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
- dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
- I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
-
- I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
- (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
- (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
- I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
-
- /* enable it... */
- I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
-
- DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
-}
-
-static void g4x_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpfc_ctl;
-
- /* Disable compression */
- dpfc_ctl = I915_READ(DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
-
- DRM_DEBUG_KMS("disabled FBC\n");
- }
-}
-
-static bool g4x_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-static void sandybridge_blit_fbc_update(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blt_ecoskpd;
-
- /* Make sure blitter notifies FBC of writes */
- gen6_gt_force_wake_get(dev_priv);
- blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT);
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- POSTING_READ(GEN6_BLITTER_ECOSKPD);
- gen6_gt_force_wake_put(dev_priv);
-}
-
-static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
- unsigned long stall_watermark = 200;
- u32 dpfc_ctl;
-
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- dpfc_ctl &= DPFC_RESERVED;
- dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
- /* Set persistent mode for front-buffer rendering, ala X. */
- dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
- dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
- I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
-
- I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
- (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
- (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
- I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
- /* enable it... */
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- if (IS_GEN6(dev)) {
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | obj->fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- sandybridge_blit_fbc_update(dev);
- }
-
- DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
-}
-
-static void ironlake_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpfc_ctl;
-
- /* Disable compression */
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
-
- DRM_DEBUG_KMS("disabled FBC\n");
- }
-}
-
-static bool ironlake_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-bool intel_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->display.fbc_enabled)
- return false;
-
- return dev_priv->display.fbc_enabled(dev);
-}
-
-static void intel_fbc_work_fn(struct work_struct *__work)
-{
- struct intel_fbc_work *work =
- container_of(to_delayed_work(__work),
- struct intel_fbc_work, work);
- struct drm_device *dev = work->crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- if (work == dev_priv->fbc_work) {
- /* Double check that we haven't switched fb without cancelling
- * the prior work.
- */
- if (work->crtc->fb == work->fb) {
- dev_priv->display.enable_fbc(work->crtc,
- work->interval);
-
- dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
- dev_priv->cfb_fb = work->crtc->fb->base.id;
- dev_priv->cfb_y = work->crtc->y;
- }
-
- dev_priv->fbc_work = NULL;
- }
- mutex_unlock(&dev->struct_mutex);
-
- kfree(work);
-}
-
-static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->fbc_work == NULL)
- return;
-
- DRM_DEBUG_KMS("cancelling pending FBC enable\n");
-
- /* Synchronisation is provided by struct_mutex and checking of
- * dev_priv->fbc_work, so we can perform the cancellation
- * entirely asynchronously.
- */
- if (cancel_delayed_work(&dev_priv->fbc_work->work))
- /* tasklet was killed before being run, clean up */
- kfree(dev_priv->fbc_work);
-
- /* Mark the work as no longer wanted so that if it does
- * wake-up (because the work was already running and waiting
- * for our mutex), it will discover that is no longer
- * necessary to run.
- */
- dev_priv->fbc_work = NULL;
-}
-
-static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
- struct intel_fbc_work *work;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->display.enable_fbc)
- return;
-
- intel_cancel_fbc_work(dev_priv);
-
- work = kzalloc(sizeof *work, GFP_KERNEL);
- if (work == NULL) {
- dev_priv->display.enable_fbc(crtc, interval);
- return;
- }
-
- work->crtc = crtc;
- work->fb = crtc->fb;
- work->interval = interval;
- INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
-
- dev_priv->fbc_work = work;
-
- DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
-
- /* Delay the actual enabling to let pageflipping cease and the
- * display to settle before starting the compression. Note that
- * this delay also serves a second purpose: it allows for a
- * vblank to pass after disabling the FBC before we attempt
- * to modify the control registers.
- *
- * A more complicated solution would involve tracking vblanks
- * following the termination of the page-flipping sequence
- * and indeed performing the enable as a co-routine and not
- * waiting synchronously upon the vblank.
- */
- schedule_delayed_work(&work->work, msecs_to_jiffies(50));
-}
-
-void intel_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- intel_cancel_fbc_work(dev_priv);
-
- if (!dev_priv->display.disable_fbc)
- return;
-
- dev_priv->display.disable_fbc(dev);
- dev_priv->cfb_plane = -1;
-}
-
-/**
- * intel_update_fbc - enable/disable FBC as needed
- * @dev: the drm_device
- *
- * Set up the framebuffer compression hardware at mode set time. We
- * enable it if possible:
- * - plane A only (on pre-965)
- * - no pixel mulitply/line duplication
- * - no alpha buffer discard
- * - no dual wide
- * - framebuffer <= 2048 in width, 1536 in height
- *
- * We can't assume that any compression will take place (worst case),
- * so the compressed buffer has to be the same size as the uncompressed
- * one. It also must reside (along with the line length buffer) in
- * stolen memory.
- *
- * We need to enable/disable FBC on a global basis.
- */
-static void intel_update_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = NULL, *tmp_crtc;
- struct intel_crtc *intel_crtc;
- struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj;
- int enable_fbc;
-
- DRM_DEBUG_KMS("\n");
-
- if (!i915_powersave)
- return;
-
- if (!I915_HAS_FBC(dev))
- return;
-
- /*
- * If FBC is already on, we just have to verify that we can
- * keep it that way...
- * Need to disable if:
- * - more than one pipe is active
- * - changing FBC params (stride, fence, mode)
- * - new fb is too large to fit in compressed buffer
- * - going to an unsupported config (interlace, pixel multiply, etc.)
- */
- list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled && tmp_crtc->fb) {
- if (crtc) {
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
- goto out_disable;
- }
- crtc = tmp_crtc;
- }
- }
-
- if (!crtc || crtc->fb == NULL) {
- DRM_DEBUG_KMS("no output, disabling\n");
- dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
- goto out_disable;
- }
-
- intel_crtc = to_intel_crtc(crtc);
- fb = crtc->fb;
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
- enable_fbc = i915_enable_fbc;
- if (enable_fbc < 0) {
- DRM_DEBUG_KMS("fbc set to per-chip default\n");
- enable_fbc = 1;
- if (INTEL_INFO(dev)->gen <= 6)
- enable_fbc = 0;
- }
- if (!enable_fbc) {
- DRM_DEBUG_KMS("fbc disabled per module param\n");
- dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
- goto out_disable;
- }
- if (intel_fb->obj->base.size > dev_priv->cfb_size) {
- DRM_DEBUG_KMS("framebuffer too large, disabling "
- "compression\n");
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- goto out_disable;
- }
- if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
- (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
- DRM_DEBUG_KMS("mode incompatible with compression, "
- "disabling\n");
- dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
- goto out_disable;
- }
- if ((crtc->mode.hdisplay > 2048) ||
- (crtc->mode.vdisplay > 1536)) {
- DRM_DEBUG_KMS("mode too large for compression, disabling\n");
- dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
- goto out_disable;
- }
- if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
- DRM_DEBUG_KMS("plane not 0, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_BAD_PLANE;
- goto out_disable;
- }
-
- /* The use of a CPU fence is mandatory in order to detect writes
- * by the CPU to the scanout and trigger updates to the FBC.
- */
- if (obj->tiling_mode != I915_TILING_X ||
- obj->fence_reg == I915_FENCE_REG_NONE) {
- DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_NOT_TILED;
- goto out_disable;
- }
-
- /* If the kernel debugger is active, always disable compression */
- if (in_dbg_master())
- goto out_disable;
-
- /* If the scanout has not changed, don't modify the FBC settings.
- * Note that we make the fundamental assumption that the fb->obj
- * cannot be unpinned (and have its GTT offset and fence revoked)
- * without first being decoupled from the scanout and FBC disabled.
- */
- if (dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_fb == fb->base.id &&
- dev_priv->cfb_y == crtc->y)
- return;
-
- if (intel_fbc_enabled(dev)) {
- /* We update FBC along two paths, after changing fb/crtc
- * configuration (modeswitching) and after page-flipping
- * finishes. For the latter, we know that not only did
- * we disable the FBC at the start of the page-flip
- * sequence, but also more than one vblank has passed.
- *
- * For the former case of modeswitching, it is possible
- * to switch between two FBC valid configurations
- * instantaneously so we do need to disable the FBC
- * before we can modify its control registers. We also
- * have to wait for the next vblank for that to take
- * effect. However, since we delay enabling FBC we can
- * assume that a vblank has passed since disabling and
- * that we can safely alter the registers in the deferred
- * callback.
- *
- * In the scenario that we go from a valid to invalid
- * and then back to valid FBC configuration we have
- * no strict enforcement that a vblank occurred since
- * disabling the FBC. However, along all current pipe
- * disabling paths we do need to wait for a vblank at
- * some point. And we wait before enabling FBC anyway.
- */
- DRM_DEBUG_KMS("disabling active FBC for update\n");
- intel_disable_fbc(dev);
- }
-
- intel_enable_fbc(crtc, 500);
- return;
-
-out_disable:
- /* Multiple disables should be harmless */
- if (intel_fbc_enabled(dev)) {
- DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
- intel_disable_fbc(dev);
- }
-}
-
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -2050,13 +1802,11 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
* framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous.
*/
- if (obj->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence(obj, pipelined);
- if (ret)
- goto err_unpin;
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ goto err_unpin;
- i915_gem_object_pin_fence(obj);
- }
+ i915_gem_object_pin_fence(obj);
dev_priv->mm.interruptible = true;
return 0;
@@ -2137,7 +1887,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
Start, Offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(DSPSURF(plane), Start);
+ I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
} else
@@ -2217,7 +1967,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
Start, Offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
- I915_WRITE(DSPSURF(plane), Start);
+ I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
POSTING_READ(reg);
@@ -2232,16 +1982,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- ret = dev_priv->display.update_plane(crtc, fb, x, y);
- if (ret)
- return ret;
- intel_update_fbc(dev);
+ if (dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
intel_increase_pllclock(crtc);
- return 0;
+ return dev_priv->display.update_plane(crtc, fb, x, y);
}
static int
@@ -2276,6 +2022,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int ret;
@@ -2286,16 +2033,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
- switch (intel_crtc->plane) {
- case 0:
- case 1:
- break;
- case 2:
- if (IS_IVYBRIDGE(dev))
- break;
- /* fall through otherwise */
- default:
- DRM_ERROR("no plane for crtc\n");
+ if(intel_crtc->plane > dev_priv->num_pipe) {
+ DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
+ intel_crtc->plane,
+ dev_priv->num_pipe);
return -EINVAL;
}
@@ -2312,8 +2053,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb)
intel_finish_fb(old_fb);
- ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
- LEAVE_ATOMIC_MODE_SET);
+ ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
if (ret) {
intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
@@ -2326,6 +2066,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
}
+ intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
if (!dev->primary->master)
@@ -2547,7 +2288,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- u32 reg, temp, i;
+ u32 reg, temp, i, retry;
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
@@ -2599,15 +2340,19 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(500);
- reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_BIT_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done.\n");
- break;
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_BIT_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ break;
+ }
+ udelay(50);
}
+ if (retry < 5)
+ break;
}
if (i == 4)
DRM_ERROR("FDI train 1 fail!\n");
@@ -2648,15 +2393,19 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(500);
- reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done.\n");
- break;
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ udelay(50);
}
+ if (retry < 5)
+ break;
}
if (i == 4)
DRM_ERROR("FDI train 2 fail!\n");
@@ -2808,14 +2557,18 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(200);
- /* Enable CPU FDI TX PLL, always on for Ironlake */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+ /* On Haswell, the PLL configuration for ports and pipes is handled
+ * separately, as part of DDI setup */
+ if (!IS_HASWELL(dev)) {
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
- POSTING_READ(reg);
- udelay(100);
+ POSTING_READ(reg);
+ udelay(100);
+ }
}
}
@@ -2888,38 +2641,16 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100);
}
-/*
- * When we disable a pipe, we need to clear any pending scanline wait events
- * to avoid hanging the ring, which we assume we are waiting on.
- */
-static void intel_clear_scanline_wait(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- u32 tmp;
-
- if (IS_GEN2(dev))
- /* Can't break the hang on i8xx */
- return;
-
- ring = LP_RING(dev_priv);
- tmp = I915_READ_CTL(ring);
- if (tmp & RING_WAIT)
- I915_WRITE_CTL(ring, tmp);
-}
-
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
- struct drm_i915_gem_object *obj;
- struct drm_i915_private *dev_priv;
+ struct drm_device *dev = crtc->dev;
if (crtc->fb == NULL)
return;
- obj = to_intel_framebuffer(crtc->fb)->obj;
- dev_priv = crtc->dev->dev_private;
- wait_event(dev_priv->pending_flip_queue,
- atomic_read(&obj->pending_flip) == 0);
+ mutex_lock(&dev->struct_mutex);
+ intel_finish_fb(crtc->fb);
+ mutex_unlock(&dev->struct_mutex);
}
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
@@ -2936,6 +2667,22 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
if (encoder->base.crtc != crtc)
continue;
+ /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
+ * CPU handles all others */
+ if (IS_HASWELL(dev)) {
+ /* It is still unclear how this will work on PPT, so throw up a warning */
+ WARN_ON(!HAS_PCH_LPT(dev));
+
+ if (encoder->type == DRM_MODE_ENCODER_DAC) {
+ DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
+ return true;
+ } else {
+ DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
+ encoder->type);
+ return false;
+ }
+ }
+
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
if (!intel_encoder_is_pch_edp(&encoder->base))
@@ -2947,6 +2694,97 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
return true;
}
+/* Program iCLKIP clock to the desired frequency */
+static void lpt_program_iclkip(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 divsel, phaseinc, auxdiv, phasedir = 0;
+ u32 temp;
+
+ /* It is necessary to ungate the pixclk gate prior to programming
+ * the divisors, and gate it back when it is done.
+ */
+ I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
+
+ /* Disable SSCCTL */
+ intel_sbi_write(dev_priv, SBI_SSCCTL6,
+ intel_sbi_read(dev_priv, SBI_SSCCTL6) |
+ SBI_SSCCTL_DISABLE);
+
+ /* 20MHz is a corner case which is out of range for the 7-bit divisor */
+ if (crtc->mode.clock == 20000) {
+ auxdiv = 1;
+ divsel = 0x41;
+ phaseinc = 0x20;
+ } else {
+ /* The iCLK virtual clock root frequency is in MHz,
+ * but the crtc->mode.clock in in KHz. To get the divisors,
+ * it is necessary to divide one by another, so we
+ * convert the virtual clock precision to KHz here for higher
+ * precision.
+ */
+ u32 iclk_virtual_root_freq = 172800 * 1000;
+ u32 iclk_pi_range = 64;
+ u32 desired_divisor, msb_divisor_value, pi_value;
+
+ desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
+ msb_divisor_value = desired_divisor / iclk_pi_range;
+ pi_value = desired_divisor % iclk_pi_range;
+
+ auxdiv = 0;
+ divsel = msb_divisor_value - 2;
+ phaseinc = pi_value;
+ }
+
+ /* This should not happen with any sane values */
+ WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+ ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+ WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
+ ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+
+ DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+ crtc->mode.clock,
+ auxdiv,
+ divsel,
+ phasedir,
+ phaseinc);
+
+ /* Program SSCDIVINTPHASE6 */
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
+ temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
+ temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
+ temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
+ temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
+
+ intel_sbi_write(dev_priv,
+ SBI_SSCDIVINTPHASE6,
+ temp);
+
+ /* Program SSCAUXDIV */
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
+ temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
+ temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
+ intel_sbi_write(dev_priv,
+ SBI_SSCAUXDIV6,
+ temp);
+
+
+ /* Enable modulator and associated divider */
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
+ temp &= ~SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv,
+ SBI_SSCCTL6,
+ temp);
+
+ /* Wait for initialization time */
+ udelay(24);
+
+ I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+}
+
/*
* Enable PCH resources required for PCH ports:
* - PCH PLLs
@@ -2961,29 +2799,41 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- u32 reg, temp, transc_sel;
+ u32 reg, temp;
+
+ assert_transcoder_disabled(dev_priv, pipe);
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
- intel_enable_pch_pll(dev_priv, pipe);
+ intel_enable_pch_pll(intel_crtc);
- if (HAS_PCH_CPT(dev)) {
- transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
- TRANSC_DPLLB_SEL;
+ if (HAS_PCH_LPT(dev)) {
+ DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
+ lpt_program_iclkip(crtc);
+ } else if (HAS_PCH_CPT(dev)) {
+ u32 sel;
- /* Be sure PCH DPLL SEL is set */
temp = I915_READ(PCH_DPLL_SEL);
- if (pipe == 0) {
- temp &= ~(TRANSA_DPLLB_SEL);
- temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
- } else if (pipe == 1) {
- temp &= ~(TRANSB_DPLLB_SEL);
- temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
- } else if (pipe == 2) {
- temp &= ~(TRANSC_DPLLB_SEL);
- temp |= (TRANSC_DPLL_ENABLE | transc_sel);
+ switch (pipe) {
+ default:
+ case 0:
+ temp |= TRANSA_DPLL_ENABLE;
+ sel = TRANSA_DPLLB_SEL;
+ break;
+ case 1:
+ temp |= TRANSB_DPLL_ENABLE;
+ sel = TRANSB_DPLLB_SEL;
+ break;
+ case 2:
+ temp |= TRANSC_DPLL_ENABLE;
+ sel = TRANSC_DPLLB_SEL;
+ break;
}
+ if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
+ temp |= sel;
+ else
+ temp &= ~sel;
I915_WRITE(PCH_DPLL_SEL, temp);
}
@@ -2998,7 +2848,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
- intel_fdi_normal_train(crtc);
+ if (!IS_HASWELL(dev))
+ intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
@@ -3041,6 +2892,93 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_enable_transcoder(dev_priv, pipe);
}
+static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
+{
+ struct intel_pch_pll *pll = intel_crtc->pch_pll;
+
+ if (pll == NULL)
+ return;
+
+ if (pll->refcount == 0) {
+ WARN(1, "bad PCH PLL refcount\n");
+ return;
+ }
+
+ --pll->refcount;
+ intel_crtc->pch_pll = NULL;
+}
+
+static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
+{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ struct intel_pch_pll *pll;
+ int i;
+
+ pll = intel_crtc->pch_pll;
+ if (pll) {
+ DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
+ intel_crtc->base.base.id, pll->pll_reg);
+ goto prepare;
+ }
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
+ i = intel_crtc->pipe;
+ pll = &dev_priv->pch_plls[i];
+
+ DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
+ intel_crtc->base.base.id, pll->pll_reg);
+
+ goto found;
+ }
+
+ for (i = 0; i < dev_priv->num_pch_pll; i++) {
+ pll = &dev_priv->pch_plls[i];
+
+ /* Only want to check enabled timings first */
+ if (pll->refcount == 0)
+ continue;
+
+ if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
+ fp == I915_READ(pll->fp0_reg)) {
+ DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
+ intel_crtc->base.base.id,
+ pll->pll_reg, pll->refcount, pll->active);
+
+ goto found;
+ }
+ }
+
+ /* Ok no matching timings, maybe there's a free one? */
+ for (i = 0; i < dev_priv->num_pch_pll; i++) {
+ pll = &dev_priv->pch_plls[i];
+ if (pll->refcount == 0) {
+ DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
+ intel_crtc->base.base.id, pll->pll_reg);
+ goto found;
+ }
+ }
+
+ return NULL;
+
+found:
+ intel_crtc->pch_pll = pll;
+ pll->refcount++;
+ DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
+prepare: /* separate function? */
+ DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
+
+ /* Wait for the clocks to stabilize before rewriting the regs */
+ I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(pll->pll_reg);
+ udelay(150);
+
+ I915_WRITE(pll->fp0_reg, fp);
+ I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
+ pll->on = false;
+ return pll;
+}
+
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3185,8 +3123,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
}
/* disable PCH DPLL */
- if (!intel_crtc->no_pll)
- intel_disable_pch_pll(dev_priv, pipe);
+ intel_disable_pch_pll(intel_crtc);
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
@@ -3214,7 +3151,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_clear_scanline_wait(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -3242,6 +3178,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
+static void ironlake_crtc_off(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ intel_put_pch_pll(intel_crtc);
+}
+
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
@@ -3313,7 +3255,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_crtc->active = false;
intel_update_fbc(dev);
intel_update_watermarks(dev);
- intel_clear_scanline_wait(dev);
}
static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -3333,6 +3274,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
+static void i9xx_crtc_off(struct drm_crtc *crtc)
+{
+}
+
/**
* Sets the power management mode of the pipe and plane.
*/
@@ -3380,25 +3325,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_device *dev = crtc->dev;
-
- /* Flush any pending WAITs before we disable the pipe. Note that
- * we need to drop the struct_mutex in order to acquire it again
- * during the lowlevel dpms routines around a couple of the
- * operations. It does not look trivial nor desirable to move
- * that locking higher. So instead we leave a window for the
- * submission of further commands on the fb before we can actually
- * disable it. This race with userspace exists anyway, and we can
- * only rely on the pipe being disabled by userspace after it
- * receives the hotplug notification and has flushed any pending
- * batches.
- */
- if (crtc->fb) {
- mutex_lock(&dev->struct_mutex);
- intel_finish_fb(crtc->fb);
- mutex_unlock(&dev->struct_mutex);
- }
+ struct drm_i915_private *dev_priv = dev->dev_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ dev_priv->display.off(crtc);
+
assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
@@ -3448,8 +3379,7 @@ void intel_encoder_commit(struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
struct drm_device *dev = encoder->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
/* lvds has its own version of commit see intel_lvds_commit */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
@@ -3487,6 +3417,11 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
+static int valleyview_get_display_clock_speed(struct drm_device *dev)
+{
+ return 400000; /* FIXME */
+}
+
static int i945_get_display_clock_speed(struct drm_device *dev)
{
return 400000;
@@ -3584,1342 +3519,6 @@ ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
-
-struct intel_watermark_params {
- unsigned long fifo_size;
- unsigned long max_wm;
- unsigned long default_wm;
- unsigned long guard_size;
- unsigned long cacheline_size;
-};
-
-/* Pineview has different values for various configs */
-static const struct intel_watermark_params pineview_display_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params pineview_display_hplloff_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_HPLLOFF_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params pineview_cursor_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params g4x_wm_info = {
- G4X_FIFO_SIZE,
- G4X_MAX_WM,
- G4X_MAX_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params g4x_cursor_wm_info = {
- I965_CURSOR_FIFO,
- I965_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params i965_cursor_wm_info = {
- I965_CURSOR_FIFO,
- I965_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- I915_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params i945_wm_info = {
- I945_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params i915_wm_info = {
- I915_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params i855_wm_info = {
- I855GM_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params i830_wm_info = {
- I830_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
-};
-
-static const struct intel_watermark_params ironlake_display_wm_info = {
- ILK_DISPLAY_FIFO,
- ILK_DISPLAY_MAXWM,
- ILK_DISPLAY_DFTWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_wm_info = {
- ILK_CURSOR_FIFO,
- ILK_CURSOR_MAXWM,
- ILK_CURSOR_DFTWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_display_srwm_info = {
- ILK_DISPLAY_SR_FIFO,
- ILK_DISPLAY_MAX_SRWM,
- ILK_DISPLAY_DFT_SRWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_srwm_info = {
- ILK_CURSOR_SR_FIFO,
- ILK_CURSOR_MAX_SRWM,
- ILK_CURSOR_DFT_SRWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-
-static const struct intel_watermark_params sandybridge_display_wm_info = {
- SNB_DISPLAY_FIFO,
- SNB_DISPLAY_MAXWM,
- SNB_DISPLAY_DFTWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_wm_info = {
- SNB_CURSOR_FIFO,
- SNB_CURSOR_MAXWM,
- SNB_CURSOR_DFTWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_display_srwm_info = {
- SNB_DISPLAY_SR_FIFO,
- SNB_DISPLAY_MAX_SRWM,
- SNB_DISPLAY_DFT_SRWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
- SNB_CURSOR_SR_FIFO,
- SNB_CURSOR_MAX_SRWM,
- SNB_CURSOR_DFT_SRWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-
-
-/**
- * intel_calculate_wm - calculate watermark level
- * @clock_in_khz: pixel clock
- * @wm: chip FIFO params
- * @pixel_size: display pixel size
- * @latency_ns: memory latency for the platform
- *
- * Calculate the watermark level (the level at which the display plane will
- * start fetching from memory again). Each chip has a different display
- * FIFO size and allocation, so the caller needs to figure that out and pass
- * in the correct intel_watermark_params structure.
- *
- * As the pixel clock runs, the FIFO will be drained at a rate that depends
- * on the pixel size. When it reaches the watermark level, it'll start
- * fetching FIFO line sized based chunks from memory until the FIFO fills
- * past the watermark point. If the FIFO drains completely, a FIFO underrun
- * will occur, and a display engine hang could result.
- */
-static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
- const struct intel_watermark_params *wm,
- int fifo_size,
- int pixel_size,
- unsigned long latency_ns)
-{
- long entries_required, wm_size;
-
- /*
- * Note: we need to make sure we don't overflow for various clock &
- * latency values.
- * clocks go from a few thousand to several hundred thousand.
- * latency is usually a few thousand
- */
- entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
- 1000;
- entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
-
- DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
-
- wm_size = fifo_size - (entries_required + wm->guard_size);
-
- DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
-
- /* Don't promote wm_size to unsigned... */
- if (wm_size > (long)wm->max_wm)
- wm_size = wm->max_wm;
- if (wm_size <= 0)
- wm_size = wm->default_wm;
- return wm_size;
-}
-
-struct cxsr_latency {
- int is_desktop;
- int is_ddr3;
- unsigned long fsb_freq;
- unsigned long mem_freq;
- unsigned long display_sr;
- unsigned long display_hpll_disable;
- unsigned long cursor_sr;
- unsigned long cursor_hpll_disable;
-};
-
-static const struct cxsr_latency cxsr_latency_table[] = {
- {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
- {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
- {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
- {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
- {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
-
- {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
- {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
- {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
- {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
- {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
-
- {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
- {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
- {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
- {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
- {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
-
- {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
- {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
- {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
- {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
- {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
-
- {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
- {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
- {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
- {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
- {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
-
- {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
- {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
- {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
- {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
- {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
-};
-
-static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
- int is_ddr3,
- int fsb,
- int mem)
-{
- const struct cxsr_latency *latency;
- int i;
-
- if (fsb == 0 || mem == 0)
- return NULL;
-
- for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
- latency = &cxsr_latency_table[i];
- if (is_desktop == latency->is_desktop &&
- is_ddr3 == latency->is_ddr3 &&
- fsb == latency->fsb_freq && mem == latency->mem_freq)
- return latency;
- }
-
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-
- return NULL;
-}
-
-static void pineview_disable_cxsr(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* deactivate cxsr */
- I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
-}
-
-/*
- * Latency for FIFO fetches is dependent on several factors:
- * - memory configuration (speed, channels)
- * - chipset
- * - current MCH state
- * It can be fairly high in some situations, so here we assume a fairly
- * pessimal value. It's a tradeoff between extra memory fetches (if we
- * set this value too high, the FIFO will fetch frequently to stay full)
- * and power consumption (set it too low to save power and we might see
- * FIFO underruns and display "flicker").
- *
- * A value of 5us seems to be a good balance; safe for very low end
- * platforms but not overly aggressive on lower latency configs.
- */
-static const int latency_ns = 5000;
-
-static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- if (plane)
- size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
-
- return size;
-}
-
-static int i85x_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x1ff;
- if (plane)
- size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
- size >>= 1; /* Convert to cachelines */
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
-
- return size;
-}
-
-static int i845_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- size >>= 2; /* Convert to cachelines */
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A",
- size);
-
- return size;
-}
-
-static int i830_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- size >>= 1; /* Convert to cachelines */
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
-
- return size;
-}
-
-static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
-{
- struct drm_crtc *crtc, *enabled = NULL;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->enabled && crtc->fb) {
- if (enabled)
- return NULL;
- enabled = crtc;
- }
- }
-
- return enabled;
-}
-
-static void pineview_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- const struct cxsr_latency *latency;
- u32 reg;
- unsigned long wm;
-
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
- dev_priv->fsb_freq, dev_priv->mem_freq);
- if (!latency) {
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
- pineview_disable_cxsr(dev);
- return;
- }
-
- crtc = single_enabled_crtc(dev);
- if (crtc) {
- int clock = crtc->mode.clock;
- int pixel_size = crtc->fb->bits_per_pixel / 8;
-
- /* Display SR */
- wm = intel_calculate_wm(clock, &pineview_display_wm,
- pineview_display_wm.fifo_size,
- pixel_size, latency->display_sr);
- reg = I915_READ(DSPFW1);
- reg &= ~DSPFW_SR_MASK;
- reg |= wm << DSPFW_SR_SHIFT;
- I915_WRITE(DSPFW1, reg);
- DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
-
- /* cursor SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_wm,
- pineview_display_wm.fifo_size,
- pixel_size, latency->cursor_sr);
- reg = I915_READ(DSPFW3);
- reg &= ~DSPFW_CURSOR_SR_MASK;
- reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
- I915_WRITE(DSPFW3, reg);
-
- /* Display HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
- pineview_display_hplloff_wm.fifo_size,
- pixel_size, latency->display_hpll_disable);
- reg = I915_READ(DSPFW3);
- reg &= ~DSPFW_HPLL_SR_MASK;
- reg |= wm & DSPFW_HPLL_SR_MASK;
- I915_WRITE(DSPFW3, reg);
-
- /* cursor HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
- pineview_display_hplloff_wm.fifo_size,
- pixel_size, latency->cursor_hpll_disable);
- reg = I915_READ(DSPFW3);
- reg &= ~DSPFW_HPLL_CURSOR_MASK;
- reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
- I915_WRITE(DSPFW3, reg);
- DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
-
- /* activate cxsr */
- I915_WRITE(DSPFW3,
- I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
- DRM_DEBUG_KMS("Self-refresh is enabled\n");
- } else {
- pineview_disable_cxsr(dev);
- DRM_DEBUG_KMS("Self-refresh is disabled\n");
- }
-}
-
-static bool g4x_compute_wm0(struct drm_device *dev,
- int plane,
- const struct intel_watermark_params *display,
- int display_latency_ns,
- const struct intel_watermark_params *cursor,
- int cursor_latency_ns,
- int *plane_wm,
- int *cursor_wm)
-{
- struct drm_crtc *crtc;
- int htotal, hdisplay, clock, pixel_size;
- int line_time_us, line_count;
- int entries, tlb_miss;
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
- *cursor_wm = cursor->guard_size;
- *plane_wm = display->guard_size;
- return false;
- }
-
- htotal = crtc->mode.htotal;
- hdisplay = crtc->mode.hdisplay;
- clock = crtc->mode.clock;
- pixel_size = crtc->fb->bits_per_pixel / 8;
-
- /* Use the small buffer method to calculate plane watermark */
- entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
- tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, display->cacheline_size);
- *plane_wm = entries + display->guard_size;
- if (*plane_wm > (int)display->max_wm)
- *plane_wm = display->max_wm;
-
- /* Use the large buffer method to calculate cursor watermark */
- line_time_us = ((htotal * 1000) / clock);
- line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
- entries = line_count * 64 * pixel_size;
- tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
- if (*cursor_wm > (int)cursor->max_wm)
- *cursor_wm = (int)cursor->max_wm;
-
- return true;
-}
-
-/*
- * Check the wm result.
- *
- * If any calculated watermark values is larger than the maximum value that
- * can be programmed into the associated watermark register, that watermark
- * must be disabled.
- */
-static bool g4x_check_srwm(struct drm_device *dev,
- int display_wm, int cursor_wm,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor)
-{
- DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
- display_wm, cursor_wm);
-
- if (display_wm > display->max_wm) {
- DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
- display_wm, display->max_wm);
- return false;
- }
-
- if (cursor_wm > cursor->max_wm) {
- DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
- cursor_wm, cursor->max_wm);
- return false;
- }
-
- if (!(display_wm || cursor_wm)) {
- DRM_DEBUG_KMS("SR latency is 0, disabling\n");
- return false;
- }
-
- return true;
-}
-
-static bool g4x_compute_srwm(struct drm_device *dev,
- int plane,
- int latency_ns,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor,
- int *display_wm, int *cursor_wm)
-{
- struct drm_crtc *crtc;
- int hdisplay, htotal, pixel_size, clock;
- unsigned long line_time_us;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *display_wm = *cursor_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- hdisplay = crtc->mode.hdisplay;
- htotal = crtc->mode.htotal;
- clock = crtc->mode.clock;
- pixel_size = crtc->fb->bits_per_pixel / 8;
-
- line_time_us = (htotal * 1000) / clock;
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = hdisplay * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *display_wm = entries + display->guard_size;
-
- /* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * 64;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
-
- return g4x_check_srwm(dev,
- *display_wm, *cursor_wm,
- display, cursor);
-}
-
-#define single_plane_enabled(mask) is_power_of_2(mask)
-
-static void g4x_update_wm(struct drm_device *dev)
-{
- static const int sr_latency_ns = 12000;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
- int plane_sr, cursor_sr;
- unsigned int enabled = 0;
-
- if (g4x_compute_wm0(dev, 0,
- &g4x_wm_info, latency_ns,
- &g4x_cursor_wm_info, latency_ns,
- &planea_wm, &cursora_wm))
- enabled |= 1;
-
- if (g4x_compute_wm0(dev, 1,
- &g4x_wm_info, latency_ns,
- &g4x_cursor_wm_info, latency_ns,
- &planeb_wm, &cursorb_wm))
- enabled |= 2;
-
- plane_sr = cursor_sr = 0;
- if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- sr_latency_ns,
- &g4x_wm_info,
- &g4x_cursor_wm_info,
- &plane_sr, &cursor_sr))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- else
- I915_WRITE(FW_BLC_SELF,
- I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
- planea_wm, cursora_wm,
- planeb_wm, cursorb_wm,
- plane_sr, cursor_sr);
-
- I915_WRITE(DSPFW1,
- (plane_sr << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) |
- planea_wm);
- I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
- /* HPLL off in SR has some issues on G4x... disable it */
- I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
-}
-
-static void i965_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- int srwm = 1;
- int cursor_sr = 16;
-
- /* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev);
- if (crtc) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 12000;
- int clock = crtc->mode.clock;
- int htotal = crtc->mode.htotal;
- int hdisplay = crtc->mode.hdisplay;
- int pixel_size = crtc->fb->bits_per_pixel / 8;
- unsigned long line_time_us;
- int entries;
-
- line_time_us = ((htotal * 1000) / clock);
-
- /* Use ns/us then divide to preserve precision */
- entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * hdisplay;
- entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
- srwm = I965_FIFO_SIZE - entries;
- if (srwm < 0)
- srwm = 1;
- srwm &= 0x1ff;
- DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
- entries, srwm);
-
- entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * 64;
- entries = DIV_ROUND_UP(entries,
- i965_cursor_wm_info.cacheline_size);
- cursor_sr = i965_cursor_wm_info.fifo_size -
- (entries + i965_cursor_wm_info.guard_size);
-
- if (cursor_sr > i965_cursor_wm_info.max_wm)
- cursor_sr = i965_cursor_wm_info.max_wm;
-
- DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
- "cursor %d\n", srwm, cursor_sr);
-
- if (IS_CRESTLINE(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- } else {
- /* Turn off self refresh if both pipes are enabled */
- if (IS_CRESTLINE(dev))
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
- }
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
- srwm);
-
- /* 965 has limitations... */
- I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
- (8 << 16) | (8 << 8) | (8 << 0));
- I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
- /* update cursor SR watermark */
- I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
-}
-
-static void i9xx_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- const struct intel_watermark_params *wm_info;
- uint32_t fwater_lo;
- uint32_t fwater_hi;
- int cwm, srwm = 1;
- int fifo_size;
- int planea_wm, planeb_wm;
- struct drm_crtc *crtc, *enabled = NULL;
-
- if (IS_I945GM(dev))
- wm_info = &i945_wm_info;
- else if (!IS_GEN2(dev))
- wm_info = &i915_wm_info;
- else
- wm_info = &i855_wm_info;
-
- fifo_size = dev_priv->display.get_fifo_size(dev, 0);
- crtc = intel_get_crtc_for_plane(dev, 0);
- if (crtc->enabled && crtc->fb) {
- planea_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size,
- crtc->fb->bits_per_pixel / 8,
- latency_ns);
- enabled = crtc;
- } else
- planea_wm = fifo_size - wm_info->guard_size;
-
- fifo_size = dev_priv->display.get_fifo_size(dev, 1);
- crtc = intel_get_crtc_for_plane(dev, 1);
- if (crtc->enabled && crtc->fb) {
- planeb_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size,
- crtc->fb->bits_per_pixel / 8,
- latency_ns);
- if (enabled == NULL)
- enabled = crtc;
- else
- enabled = NULL;
- } else
- planeb_wm = fifo_size - wm_info->guard_size;
-
- DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
-
- /*
- * Overlay gets an aggressive default since video jitter is bad.
- */
- cwm = 2;
-
- /* Play safe and disable self-refresh before adjusting watermarks. */
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
- else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
-
- /* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev) && enabled) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 6000;
- int clock = enabled->mode.clock;
- int htotal = enabled->mode.htotal;
- int hdisplay = enabled->mode.hdisplay;
- int pixel_size = enabled->fb->bits_per_pixel / 8;
- unsigned long line_time_us;
- int entries;
-
- line_time_us = (htotal * 1000) / clock;
-
- /* Use ns/us then divide to preserve precision */
- entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * hdisplay;
- entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
- DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
- srwm = wm_info->fifo_size - entries;
- if (srwm < 0)
- srwm = 1;
-
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF,
- FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
- else if (IS_I915GM(dev))
- I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
- }
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
- planea_wm, planeb_wm, cwm, srwm);
-
- fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
- fwater_hi = (cwm & 0x1f);
-
- /* Set request length to 8 cachelines per fetch */
- fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
- fwater_hi = fwater_hi | (1 << 8);
-
- I915_WRITE(FW_BLC, fwater_lo);
- I915_WRITE(FW_BLC2, fwater_hi);
-
- if (HAS_FW_BLC(dev)) {
- if (enabled) {
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF,
- FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
- else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
- DRM_DEBUG_KMS("memory self refresh enabled\n");
- } else
- DRM_DEBUG_KMS("memory self refresh disabled\n");
- }
-}
-
-static void i830_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- uint32_t fwater_lo;
- int planea_wm;
-
- crtc = single_enabled_crtc(dev);
- if (crtc == NULL)
- return;
-
- planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
- dev_priv->display.get_fifo_size(dev, 0),
- crtc->fb->bits_per_pixel / 8,
- latency_ns);
- fwater_lo = I915_READ(FW_BLC) & ~0xfff;
- fwater_lo |= (3<<8) | planea_wm;
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
-
- I915_WRITE(FW_BLC, fwater_lo);
-}
-
-#define ILK_LP0_PLANE_LATENCY 700
-#define ILK_LP0_CURSOR_LATENCY 1300
-
-/*
- * Check the wm result.
- *
- * If any calculated watermark values is larger than the maximum value that
- * can be programmed into the associated watermark register, that watermark
- * must be disabled.
- */
-static bool ironlake_check_srwm(struct drm_device *dev, int level,
- int fbc_wm, int display_wm, int cursor_wm,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
- " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
-
- if (fbc_wm > SNB_FBC_MAX_SRWM) {
- DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
- fbc_wm, SNB_FBC_MAX_SRWM, level);
-
- /* fbc has it's own way to disable FBC WM */
- I915_WRITE(DISP_ARB_CTL,
- I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
- return false;
- }
-
- if (display_wm > display->max_wm) {
- DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
- display_wm, SNB_DISPLAY_MAX_SRWM, level);
- return false;
- }
-
- if (cursor_wm > cursor->max_wm) {
- DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
- cursor_wm, SNB_CURSOR_MAX_SRWM, level);
- return false;
- }
-
- if (!(fbc_wm || display_wm || cursor_wm)) {
- DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
- return false;
- }
-
- return true;
-}
-
-/*
- * Compute watermark values of WM[1-3],
- */
-static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
- int latency_ns,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor,
- int *fbc_wm, int *display_wm, int *cursor_wm)
-{
- struct drm_crtc *crtc;
- unsigned long line_time_us;
- int hdisplay, htotal, pixel_size, clock;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *fbc_wm = *display_wm = *cursor_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- hdisplay = crtc->mode.hdisplay;
- htotal = crtc->mode.htotal;
- clock = crtc->mode.clock;
- pixel_size = crtc->fb->bits_per_pixel / 8;
-
- line_time_us = (htotal * 1000) / clock;
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = hdisplay * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *display_wm = entries + display->guard_size;
-
- /*
- * Spec says:
- * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
- */
- *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
-
- /* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * 64;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
-
- return ironlake_check_srwm(dev, level,
- *fbc_wm, *display_wm, *cursor_wm,
- display, cursor);
-}
-
-static void ironlake_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int fbc_wm, plane_wm, cursor_wm;
- unsigned int enabled;
-
- enabled = 0;
- if (g4x_compute_wm0(dev, 0,
- &ironlake_display_wm_info,
- ILK_LP0_PLANE_LATENCY,
- &ironlake_cursor_wm_info,
- ILK_LP0_CURSOR_LATENCY,
- &plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEA_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
- DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
- " plane %d, " "cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1;
- }
-
- if (g4x_compute_wm0(dev, 1,
- &ironlake_display_wm_info,
- ILK_LP0_PLANE_LATENCY,
- &ironlake_cursor_wm_info,
- ILK_LP0_CURSOR_LATENCY,
- &plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEB_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
- DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 2;
- }
-
- /*
- * Calculate and update the self-refresh watermark only when one
- * display plane is used.
- */
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- if (!single_plane_enabled(enabled))
- return;
- enabled = ffs(enabled) - 1;
-
- /* WM1 */
- if (!ironlake_compute_srwm(dev, 1, enabled,
- ILK_READ_WM1_LATENCY() * 500,
- &ironlake_display_srwm_info,
- &ironlake_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM1_LP_ILK,
- WM1_LP_SR_EN |
- (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM2 */
- if (!ironlake_compute_srwm(dev, 2, enabled,
- ILK_READ_WM2_LATENCY() * 500,
- &ironlake_display_srwm_info,
- &ironlake_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM2_LP_ILK,
- WM2_LP_EN |
- (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /*
- * WM3 is unsupported on ILK, probably because we don't have latency
- * data for that power state
- */
-}
-
-void sandybridge_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
- u32 val;
- int fbc_wm, plane_wm, cursor_wm;
- unsigned int enabled;
-
- enabled = 0;
- if (g4x_compute_wm0(dev, 0,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEA_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEA_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
- " plane %d, " "cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1;
- }
-
- if (g4x_compute_wm0(dev, 1,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEB_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEB_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 2;
- }
-
- /* IVB has 3 pipes */
- if (IS_IVYBRIDGE(dev) &&
- g4x_compute_wm0(dev, 2,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEC_IVB);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEC_IVB, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 3;
- }
-
- /*
- * Calculate and update the self-refresh watermark only when one
- * display plane is used.
- *
- * SNB support 3 levels of watermark.
- *
- * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
- * and disabled in the descending order
- *
- */
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- if (!single_plane_enabled(enabled) ||
- dev_priv->sprite_scaling_enabled)
- return;
- enabled = ffs(enabled) - 1;
-
- /* WM1 */
- if (!ironlake_compute_srwm(dev, 1, enabled,
- SNB_READ_WM1_LATENCY() * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM1_LP_ILK,
- WM1_LP_SR_EN |
- (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM2 */
- if (!ironlake_compute_srwm(dev, 2, enabled,
- SNB_READ_WM2_LATENCY() * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM2_LP_ILK,
- WM2_LP_EN |
- (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM3 */
- if (!ironlake_compute_srwm(dev, 3, enabled,
- SNB_READ_WM3_LATENCY() * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM3_LP_ILK,
- WM3_LP_EN |
- (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-}
-
-static bool
-sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
- uint32_t sprite_width, int pixel_size,
- const struct intel_watermark_params *display,
- int display_latency_ns, int *sprite_wm)
-{
- struct drm_crtc *crtc;
- int clock;
- int entries, tlb_miss;
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
- *sprite_wm = display->guard_size;
- return false;
- }
-
- clock = crtc->mode.clock;
-
- /* Use the small buffer method to calculate the sprite watermark */
- entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
- tlb_miss = display->fifo_size*display->cacheline_size -
- sprite_width * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, display->cacheline_size);
- *sprite_wm = entries + display->guard_size;
- if (*sprite_wm > (int)display->max_wm)
- *sprite_wm = display->max_wm;
-
- return true;
-}
-
-static bool
-sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
- uint32_t sprite_width, int pixel_size,
- const struct intel_watermark_params *display,
- int latency_ns, int *sprite_wm)
-{
- struct drm_crtc *crtc;
- unsigned long line_time_us;
- int clock;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *sprite_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- clock = crtc->mode.clock;
- if (!clock) {
- *sprite_wm = 0;
- return false;
- }
-
- line_time_us = (sprite_width * 1000) / clock;
- if (!line_time_us) {
- *sprite_wm = 0;
- return false;
- }
-
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = sprite_width * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *sprite_wm = entries + display->guard_size;
-
- return *sprite_wm > 0x3ff ? false : true;
-}
-
-static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
- uint32_t sprite_width, int pixel_size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
- u32 val;
- int sprite_wm, reg;
- int ret;
-
- switch (pipe) {
- case 0:
- reg = WM0_PIPEA_ILK;
- break;
- case 1:
- reg = WM0_PIPEB_ILK;
- break;
- case 2:
- reg = WM0_PIPEC_IVB;
- break;
- default:
- return; /* bad pipe */
- }
-
- ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
- &sandybridge_display_wm_info,
- latency, &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
- pipe);
- return;
- }
-
- val = I915_READ(reg);
- val &= ~WM0_PIPE_SPRITE_MASK;
- I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
- DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
-
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- SNB_READ_WM1_LATENCY() * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
- pipe);
- return;
- }
- I915_WRITE(WM1S_LP_ILK, sprite_wm);
-
- /* Only IVB has two more LP watermarks for sprite */
- if (!IS_IVYBRIDGE(dev))
- return;
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- SNB_READ_WM2_LATENCY() * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
- pipe);
- return;
- }
- I915_WRITE(WM2S_LP_IVB, sprite_wm);
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- SNB_READ_WM3_LATENCY() * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
- pipe);
- return;
- }
- I915_WRITE(WM3S_LP_IVB, sprite_wm);
-}
-
-/**
- * intel_update_watermarks - update FIFO watermark values based on current modes
- *
- * Calculate watermark values for the various WM regs based on current mode
- * and plane configuration.
- *
- * There are several cases to deal with here:
- * - normal (i.e. non-self-refresh)
- * - self-refresh (SR) mode
- * - lines are large relative to FIFO size (buffer can hold up to 2)
- * - lines are small relative to FIFO size (buffer can hold more than 2
- * lines), so need to account for TLB latency
- *
- * The normal calculation is:
- * watermark = dotclock * bytes per pixel * latency
- * where latency is platform & configuration dependent (we assume pessimal
- * values here).
- *
- * The SR calculation is:
- * watermark = (trunc(latency/line time)+1) * surface width *
- * bytes per pixel
- * where
- * line time = htotal / dotclock
- * surface width = hdisplay for normal plane and 64 for cursor
- * and latency is assumed to be high, as above.
- *
- * The final value programmed to the register should always be rounded up,
- * and include an extra 2 entries to account for clock crossings.
- *
- * We don't use the sprite, so we can ignore that. And on Crestline we have
- * to set the non-SR watermarks to 8.
- */
-static void intel_update_watermarks(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->display.update_wm)
- dev_priv->display.update_wm(dev);
-}
-
-void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
- uint32_t sprite_width, int pixel_size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->display.update_sprite_wm)
- dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
- pixel_size);
-}
-
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_panel_use_ssc >= 0)
@@ -5143,6 +3742,222 @@ static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
}
}
+static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 temp;
+
+ temp = I915_READ(LVDS);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (pipe == 1) {
+ temp |= LVDS_PIPEB_SELECT;
+ } else {
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (clock->p2 == 7)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more thoroughly into how
+ * panels behave in the two modes.
+ */
+ /* set the dithering flag on LVDS as needed */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (dev_priv->lvds_dither)
+ temp |= LVDS_ENABLE_DITHER;
+ else
+ temp &= ~LVDS_ENABLE_DITHER;
+ }
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+ I915_WRITE(LVDS, temp);
+}
+
+static void i9xx_update_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock, intel_clock_t *reduced_clock,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll;
+ bool is_sdvo;
+
+ is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+ if (is_sdvo) {
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (pixel_multiplier > 1) {
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ }
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ dpll |= DPLL_DVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ if (IS_PINEVIEW(dev))
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+ else {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (IS_G4X(dev) && reduced_clock)
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ }
+ switch (clock->p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+ if (INTEL_INFO(dev)->gen >= 4)
+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+
+ if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ dpll |= PLL_REF_INPUT_TVCLKINBC;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ /* XXX: just matching BIOS for now */
+ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ intel_update_lvds(crtc, clock, adjusted_mode);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ u32 temp = 0;
+ if (is_sdvo) {
+ temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (temp > 1)
+ temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ else
+ temp = 0;
+ }
+ I915_WRITE(DPLL_MD(pipe), temp);
+ } else {
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(DPLL(pipe), dpll);
+ }
+}
+
+static void i8xx_update_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll;
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ } else {
+ if (clock->p1 == 2)
+ dpll |= PLL_P1_DIVIDE_BY_TWO;
+ else
+ dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (clock->p2 == 4)
+ dpll |= PLL_P2_DIVIDE_BY_4;
+ }
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ /* XXX: just matching BIOS for now */
+ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ intel_update_lvds(crtc, clock, adjusted_mode);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(DPLL(pipe), dpll);
+}
+
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -5156,15 +3971,13 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dpll, dspcntr, pipeconf, vsyncshift;
- bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
+ u32 dspcntr, pipeconf, vsyncshift;
+ bool ok, has_reduced_clock = false, is_sdvo = false;
+ bool is_lvds = false, is_tv = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
- u32 temp;
- u32 lvds_sync = 0;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
@@ -5180,15 +3993,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (encoder->needs_tv_clock)
is_tv = true;
break;
- case INTEL_OUTPUT_DVO:
- is_dvo = true;
- break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
- case INTEL_OUTPUT_ANALOG:
- is_crt = true;
- break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
@@ -5235,71 +4042,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
&reduced_clock : NULL);
- dpll = DPLL_VGA_MODE_DIS;
-
- if (!IS_GEN2(dev)) {
- if (is_lvds)
- dpll |= DPLLB_MODE_LVDS;
- else
- dpll |= DPLLB_MODE_DAC_SERIAL;
- if (is_sdvo) {
- int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
- if (pixel_multiplier > 1) {
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
- }
- dpll |= DPLL_DVO_HIGH_SPEED;
- }
- if (is_dp)
- dpll |= DPLL_DVO_HIGH_SPEED;
-
- /* compute bitmask from p1 value */
- if (IS_PINEVIEW(dev))
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
- else {
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (IS_G4X(dev) && has_reduced_clock)
- dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- }
- switch (clock.p2) {
- case 5:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
- break;
- case 7:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
- break;
- case 10:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
- break;
- case 14:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
- break;
- }
- if (INTEL_INFO(dev)->gen >= 4)
- dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
- } else {
- if (is_lvds) {
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- } else {
- if (clock.p1 == 2)
- dpll |= PLL_P1_DIVIDE_BY_TWO;
- else
- dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (clock.p2 == 4)
- dpll |= PLL_P2_DIVIDE_BY_4;
- }
- }
-
- if (is_sdvo && is_tv)
- dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (is_tv)
- /* XXX: just matching BIOS for now */
- /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
- dpll |= 3;
- else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ if (IS_GEN2(dev))
+ i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
else
- dpll |= PLL_REF_INPUT_DREFCLK;
+ i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
/* setup pipeconf */
pipeconf = I915_READ(PIPECONF(pipe));
@@ -5336,97 +4084,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
}
}
- dpll |= DPLL_VCO_ENABLE;
-
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
- I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
-
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- /* The LVDS pin pair needs to be on before the DPLLs are enabled.
- * This is an exception to the general rule that mode_set doesn't turn
- * things on.
- */
- if (is_lvds) {
- temp = I915_READ(LVDS);
- temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (pipe == 1) {
- temp |= LVDS_PIPEB_SELECT;
- } else {
- temp &= ~LVDS_PIPEB_SELECT;
- }
- /* set the corresponsding LVDS_BORDER bit */
- temp |= dev_priv->lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether we're going to
- * set the DPLLs for dual-channel mode or not.
- */
- if (clock.p2 == 7)
- temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- else
- temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
-
- /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
- * appropriately here, but we need to look more thoroughly into how
- * panels behave in the two modes.
- */
- /* set the dithering flag on LVDS as needed */
- if (INTEL_INFO(dev)->gen >= 4) {
- if (dev_priv->lvds_dither)
- temp |= LVDS_ENABLE_DITHER;
- else
- temp &= ~LVDS_ENABLE_DITHER;
- }
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- lvds_sync |= LVDS_HSYNC_POLARITY;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- lvds_sync |= LVDS_VSYNC_POLARITY;
- if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
- != lvds_sync) {
- char flags[2] = "-+";
- DRM_INFO("Changing LVDS panel from "
- "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
- flags[!(temp & LVDS_HSYNC_POLARITY)],
- flags[!(temp & LVDS_VSYNC_POLARITY)],
- flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
- flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- temp |= lvds_sync;
- }
- I915_WRITE(LVDS, temp);
- }
-
- if (is_dp) {
- intel_dp_set_m_n(crtc, mode, adjusted_mode);
- }
-
- I915_WRITE(DPLL(pipe), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- if (INTEL_INFO(dev)->gen >= 4) {
- temp = 0;
- if (is_sdvo) {
- temp = intel_mode_get_pixel_multiplier(adjusted_mode);
- if (temp > 1)
- temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- else
- temp = 0;
- }
- I915_WRITE(DPLL_MD(pipe), temp);
- } else {
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(DPLL(pipe), dpll);
- }
-
if (HAS_PIPE_CXSR(dev)) {
if (intel_crtc->lowfreq_avail) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
@@ -5492,7 +4152,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
- intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -5668,17 +4327,16 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- struct intel_encoder *has_edp_encoder = NULL;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_encoder *encoder;
+ struct intel_encoder *encoder, *edp_encoder = NULL;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
u32 temp;
- u32 lvds_sync = 0;
int target_clock, pixel_multiplier, lane, link_bw, factor;
unsigned int pipe_bpp;
bool dither;
+ bool is_cpu_edp = false, is_pch_edp = false;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
@@ -5704,7 +4362,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
- has_edp_encoder = encoder;
+ is_dp = true;
+ if (intel_encoder_is_pch_edp(&encoder->base))
+ is_pch_edp = true;
+ else
+ is_cpu_edp = true;
+ edp_encoder = encoder;
break;
}
@@ -5767,15 +4430,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
lane = 0;
/* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
- if (has_edp_encoder &&
- !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (is_cpu_edp) {
target_clock = mode->clock;
- intel_edp_link_config(has_edp_encoder,
- &lane, &link_bw);
+ intel_edp_link_config(edp_encoder, &lane, &link_bw);
} else {
/* [e]DP over FDI requires target mode clock
instead of link clock */
- if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
+ if (is_dp)
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
@@ -5866,7 +4527,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
dpll |= DPLL_DVO_HIGH_SPEED;
}
- if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
+ if (is_dp && !is_cpu_edp)
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -5909,30 +4570,22 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
- /* PCH eDP needs FDI, but CPU eDP does not */
- if (!intel_crtc->no_pll) {
- if (!has_edp_encoder ||
- intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- I915_WRITE(PCH_FP0(pipe), fp);
- I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
-
- POSTING_READ(PCH_DPLL(pipe));
- udelay(150);
- }
- } else {
- if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
- fp == I915_READ(PCH_FP0(0))) {
- intel_crtc->use_pll_a = true;
- DRM_DEBUG_KMS("using pipe a dpll\n");
- } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
- fp == I915_READ(PCH_FP0(1))) {
- intel_crtc->use_pll_a = false;
- DRM_DEBUG_KMS("using pipe b dpll\n");
- } else {
- DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
+ * pre-Haswell/LPT generation */
+ if (HAS_PCH_LPT(dev)) {
+ DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
+ pipe);
+ } else if (!is_cpu_edp) {
+ struct intel_pch_pll *pll;
+
+ pll = intel_get_pch_pll(intel_crtc, dpll, fp);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
+ pipe);
return -EINVAL;
}
- }
+ } else
+ intel_put_pch_pll(intel_crtc);
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
@@ -5965,22 +4618,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- lvds_sync |= LVDS_HSYNC_POLARITY;
+ temp |= LVDS_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- lvds_sync |= LVDS_VSYNC_POLARITY;
- if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
- != lvds_sync) {
- char flags[2] = "-+";
- DRM_INFO("Changing LVDS panel from "
- "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
- flags[!(temp & LVDS_HSYNC_POLARITY)],
- flags[!(temp & LVDS_VSYNC_POLARITY)],
- flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
- flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- temp |= lvds_sync;
- }
+ temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(PCH_LVDS, temp);
}
@@ -5990,7 +4632,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
pipeconf |= PIPECONF_DITHER_EN;
pipeconf |= PIPECONF_DITHER_TYPE_SP;
}
- if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (is_dp && !is_cpu_edp) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
/* For non-DP output, clear any trans DP clock recovery setting.*/
@@ -6000,13 +4642,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(TRANSDPLINK_N1(pipe), 0);
}
- if (!intel_crtc->no_pll &&
- (!has_edp_encoder ||
- intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
- I915_WRITE(PCH_DPLL(pipe), dpll);
+ if (intel_crtc->pch_pll) {
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
/* Wait for the clocks to stabilize. */
- POSTING_READ(PCH_DPLL(pipe));
+ POSTING_READ(intel_crtc->pch_pll->pll_reg);
udelay(150);
/* The pixel multiplier can only be updated once the
@@ -6014,20 +4654,20 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
*
* So write it again.
*/
- I915_WRITE(PCH_DPLL(pipe), dpll);
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
}
intel_crtc->lowfreq_avail = false;
- if (!intel_crtc->no_pll) {
+ if (intel_crtc->pch_pll) {
if (is_lvds && has_reduced_clock && i915_powersave) {
- I915_WRITE(PCH_FP1(pipe), fp2);
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
intel_crtc->lowfreq_avail = true;
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
}
} else {
- I915_WRITE(PCH_FP1(pipe), fp);
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
@@ -6080,10 +4720,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
- if (has_edp_encoder &&
- !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
- }
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
@@ -6097,6 +4735,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_update_watermarks(dev);
+ intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+
return ret;
}
@@ -6451,7 +5091,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
if (!visible && !intel_crtc->cursor_visible)
return;
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
I915_WRITE(CURPOS_IVB(pipe), pos);
ivb_update_cursor(crtc, base);
} else {
@@ -6461,9 +5101,6 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
else
i9xx_update_cursor(crtc, base);
}
-
- if (visible)
- intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
}
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
@@ -6987,7 +5624,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
return mode;
}
@@ -7086,7 +5722,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
- u32 dpll;
+ int dpll;
DRM_DEBUG_DRIVER("downclocking LVDS\n");
@@ -7100,6 +5736,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
}
+
}
/**
@@ -7158,12 +5795,16 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (!dev_priv->busy)
+ if (!dev_priv->busy) {
+ intel_sanitize_pm(dev);
dev_priv->busy = true;
- else
+ } else
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+ if (obj == NULL)
+ return;
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
continue;
@@ -7336,18 +5977,19 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long offset;
u32 flip_mask;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
- ret = BEGIN_LP_RING(6);
+ ret = intel_ring_begin(ring, 6);
if (ret)
- goto out;
+ goto err_unpin;
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
@@ -7356,15 +5998,19 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0]);
- OUT_RING(obj->gtt_offset + offset);
- OUT_RING(0); /* aux display base address, unused */
- ADVANCE_LP_RING();
-out:
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, obj->gtt_offset + offset);
+ intel_ring_emit(ring, 0); /* aux display base address, unused */
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7377,33 +6023,38 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long offset;
u32 flip_mask;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
- ret = BEGIN_LP_RING(6);
+ ret = intel_ring_begin(ring, 6);
if (ret)
- goto out;
+ goto err_unpin;
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_DISPLAY_FLIP_I915 |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0]);
- OUT_RING(obj->gtt_offset + offset);
- OUT_RING(MI_NOOP);
-
- ADVANCE_LP_RING();
-out:
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, obj->gtt_offset + offset);
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7415,24 +6066,25 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
- ret = BEGIN_LP_RING(4);
+ ret = intel_ring_begin(ring, 4);
if (ret)
- goto out;
+ goto err_unpin;
/* i965+ uses the linear or tiled offsets from the
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0]);
- OUT_RING(obj->gtt_offset | obj->tiling_mode);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -7440,9 +6092,13 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- ADVANCE_LP_RING();
-out:
+ intel_ring_emit(ring, pf | pipesrc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7453,21 +6109,22 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
uint32_t pf, pipesrc;
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
- ret = BEGIN_LP_RING(4);
+ ret = intel_ring_begin(ring, 4);
if (ret)
- goto out;
+ goto err_unpin;
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0] | obj->tiling_mode);
- OUT_RING(obj->gtt_offset);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
+ intel_ring_emit(ring, obj->gtt_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -7477,9 +6134,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- ADVANCE_LP_RING();
-out:
+ intel_ring_emit(ring, pf | pipesrc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7501,18 +6162,22 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
ret = intel_ring_begin(ring, 4);
if (ret)
- goto out;
+ goto err_unpin;
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, (obj->gtt_offset));
intel_ring_emit(ring, (MI_NOOP));
intel_ring_advance(ring);
-out:
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7589,6 +6254,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup_pending;
intel_disable_fbc(dev);
+ intel_mark_busy(dev, obj);
mutex_unlock(&dev->struct_mutex);
trace_i915_flip_request(intel_crtc->plane, obj);
@@ -7617,10 +6283,11 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg, val;
+ int i;
/* Clear any frame start delays used for debugging left by the BIOS */
- for_each_pipe(pipe) {
- reg = PIPECONF(pipe);
+ for_each_pipe(i) {
+ reg = PIPECONF(i);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
}
@@ -7690,6 +6357,23 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip,
};
+static void intel_pch_pll_init(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ if (dev_priv->num_pch_pll == 0) {
+ DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
+ return;
+ }
+
+ for (i = 0; i < dev_priv->num_pch_pll; i++) {
+ dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
+ dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
+ dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
+ }
+}
+
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -7727,8 +6411,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->bpp = 24; /* default for pre-Ironlake */
if (HAS_PCH_SPLIT(dev)) {
- if (pipe == 2 && IS_IVYBRIDGE(dev))
- intel_crtc->no_pll = true;
intel_helper_funcs.prepare = ironlake_crtc_prepare;
intel_helper_funcs.commit = ironlake_crtc_commit;
} else {
@@ -7747,15 +6429,12 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
struct drm_mode_object *drmmode_obj;
struct intel_crtc *crtc;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
DRM_MODE_OBJECT_CRTC);
@@ -7828,12 +6507,31 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_crt_init(dev);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_HASWELL(dev)) {
+ int found;
+
+ /* Haswell uses DDI functions to detect digital outputs */
+ found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
+ /* DDI A only supports eDP */
+ if (found)
+ intel_ddi_init(dev, PORT_A);
+
+ /* DDI B, C and D detection is indicated by the SFUSE_STRAP
+ * register */
+ found = I915_READ(SFUSE_STRAP);
+
+ if (found & SFUSE_STRAP_DDIB_DETECTED)
+ intel_ddi_init(dev, PORT_B);
+ if (found & SFUSE_STRAP_DDIC_DETECTED)
+ intel_ddi_init(dev, PORT_C);
+ if (found & SFUSE_STRAP_DDID_DETECTED)
+ intel_ddi_init(dev, PORT_D);
+ } else if (HAS_PCH_SPLIT(dev)) {
int found;
if (I915_READ(HDMIB) & PORT_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
- found = intel_sdvo_init(dev, PCH_SDVOB);
+ found = intel_sdvo_init(dev, PCH_SDVOB, true);
if (!found)
intel_hdmi_init(dev, HDMIB);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -7857,7 +6555,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
- found = intel_sdvo_init(dev, SDVOB);
+ found = intel_sdvo_init(dev, SDVOB, true);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, SDVOB);
@@ -7873,7 +6571,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOC\n");
- found = intel_sdvo_init(dev, SDVOC);
+ found = intel_sdvo_init(dev, SDVOC, false);
}
if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
@@ -8002,882 +6700,6 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.output_poll_changed = intel_fb_output_poll_changed,
};
-static struct drm_i915_gem_object *
-intel_alloc_context_page(struct drm_device *dev)
-{
- struct drm_i915_gem_object *ctx;
- int ret;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- ctx = i915_gem_alloc_object(dev, 4096);
- if (!ctx) {
- DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
- return NULL;
- }
-
- ret = i915_gem_object_pin(ctx, 4096, true);
- if (ret) {
- DRM_ERROR("failed to pin power context: %d\n", ret);
- goto err_unref;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
- if (ret) {
- DRM_ERROR("failed to set-domain on power context: %d\n", ret);
- goto err_unpin;
- }
-
- return ctx;
-
-err_unpin:
- i915_gem_object_unpin(ctx);
-err_unref:
- drm_gem_object_unreference(&ctx->base);
- mutex_unlock(&dev->struct_mutex);
- return NULL;
-}
-
-bool ironlake_set_drps(struct drm_device *dev, u8 val)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u16 rgvswctl;
-
- rgvswctl = I915_READ16(MEMSWCTL);
- if (rgvswctl & MEMCTL_CMD_STS) {
- DRM_DEBUG("gpu busy, RCS change rejected\n");
- return false; /* still busy with another command */
- }
-
- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
- (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
- I915_WRITE16(MEMSWCTL, rgvswctl);
- POSTING_READ16(MEMSWCTL);
-
- rgvswctl |= MEMCTL_CMD_STS;
- I915_WRITE16(MEMSWCTL, rgvswctl);
-
- return true;
-}
-
-void ironlake_enable_drps(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rgvmodectl = I915_READ(MEMMODECTL);
- u8 fmax, fmin, fstart, vstart;
-
- /* Enable temp reporting */
- I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
- I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
-
- /* 100ms RC evaluation intervals */
- I915_WRITE(RCUPEI, 100000);
- I915_WRITE(RCDNEI, 100000);
-
- /* Set max/min thresholds to 90ms and 80ms respectively */
- I915_WRITE(RCBMAXAVG, 90000);
- I915_WRITE(RCBMINAVG, 80000);
-
- I915_WRITE(MEMIHYST, 1);
-
- /* Set up min, max, and cur for interrupt handling */
- fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
- fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
- fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
- MEMMODE_FSTART_SHIFT;
-
- vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
- PXVFREQ_PX_SHIFT;
-
- dev_priv->fmax = fmax; /* IPS callback will increase this */
- dev_priv->fstart = fstart;
-
- dev_priv->max_delay = fstart;
- dev_priv->min_delay = fmin;
- dev_priv->cur_delay = fstart;
-
- DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
- fmax, fmin, fstart);
-
- I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
-
- /*
- * Interrupts will be enabled in ironlake_irq_postinstall
- */
-
- I915_WRITE(VIDSTART, vstart);
- POSTING_READ(VIDSTART);
-
- rgvmodectl |= MEMMODE_SWMODE_EN;
- I915_WRITE(MEMMODECTL, rgvmodectl);
-
- if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
- DRM_ERROR("stuck trying to change perf mode\n");
- msleep(1);
-
- ironlake_set_drps(dev, fstart);
-
- dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
- I915_READ(0x112e0);
- dev_priv->last_time1 = jiffies_to_msecs(jiffies);
- dev_priv->last_count2 = I915_READ(0x112f4);
- getrawmonotonic(&dev_priv->last_time2);
-}
-
-void ironlake_disable_drps(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u16 rgvswctl = I915_READ16(MEMSWCTL);
-
- /* Ack interrupts, disable EFC interrupt */
- I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
- I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
- I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
- I915_WRITE(DEIIR, DE_PCU_EVENT);
- I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
-
- /* Go back to the starting frequency */
- ironlake_set_drps(dev, dev_priv->fstart);
- msleep(1);
- rgvswctl |= MEMCTL_CMD_STS;
- I915_WRITE(MEMSWCTL, rgvswctl);
- msleep(1);
-
-}
-
-void gen6_set_rps(struct drm_device *dev, u8 val)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 swreq;
-
- swreq = (val & 0x3ff) << 25;
- I915_WRITE(GEN6_RPNSWREQ, swreq);
-}
-
-void gen6_disable_rps(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
- I915_WRITE(GEN6_PMIER, 0);
- /* Complete PM interrupt masking here doesn't race with the rps work
- * item again unmasking PM interrupts because that is using a different
- * register (PMIMR) to mask PM interrupts. The only risk is in leaving
- * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
-
- spin_lock_irq(&dev_priv->rps_lock);
- dev_priv->pm_iir = 0;
- spin_unlock_irq(&dev_priv->rps_lock);
-
- I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
-}
-
-static unsigned long intel_pxfreq(u32 vidfreq)
-{
- unsigned long freq;
- int div = (vidfreq & 0x3f0000) >> 16;
- int post = (vidfreq & 0x3000) >> 12;
- int pre = (vidfreq & 0x7);
-
- if (!pre)
- return 0;
-
- freq = ((div * 133333) / ((1<<post) * pre));
-
- return freq;
-}
-
-void intel_init_emon(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 lcfuse;
- u8 pxw[16];
- int i;
-
- /* Disable to program */
- I915_WRITE(ECR, 0);
- POSTING_READ(ECR);
-
- /* Program energy weights for various events */
- I915_WRITE(SDEW, 0x15040d00);
- I915_WRITE(CSIEW0, 0x007f0000);
- I915_WRITE(CSIEW1, 0x1e220004);
- I915_WRITE(CSIEW2, 0x04000004);
-
- for (i = 0; i < 5; i++)
- I915_WRITE(PEW + (i * 4), 0);
- for (i = 0; i < 3; i++)
- I915_WRITE(DEW + (i * 4), 0);
-
- /* Program P-state weights to account for frequency power adjustment */
- for (i = 0; i < 16; i++) {
- u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
- unsigned long freq = intel_pxfreq(pxvidfreq);
- unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
- PXVFREQ_PX_SHIFT;
- unsigned long val;
-
- val = vid * vid;
- val *= (freq / 1000);
- val *= 255;
- val /= (127*127*900);
- if (val > 0xff)
- DRM_ERROR("bad pxval: %ld\n", val);
- pxw[i] = val;
- }
- /* Render standby states get 0 weight */
- pxw[14] = 0;
- pxw[15] = 0;
-
- for (i = 0; i < 4; i++) {
- u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
- (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
- I915_WRITE(PXW + (i * 4), val);
- }
-
- /* Adjust magic regs to magic values (more experimental results) */
- I915_WRITE(OGW0, 0);
- I915_WRITE(OGW1, 0);
- I915_WRITE(EG0, 0x00007f00);
- I915_WRITE(EG1, 0x0000000e);
- I915_WRITE(EG2, 0x000e0000);
- I915_WRITE(EG3, 0x68000300);
- I915_WRITE(EG4, 0x42000000);
- I915_WRITE(EG5, 0x00140031);
- I915_WRITE(EG6, 0);
- I915_WRITE(EG7, 0);
-
- for (i = 0; i < 8; i++)
- I915_WRITE(PXWL + (i * 4), 0);
-
- /* Enable PMON + select events */
- I915_WRITE(ECR, 0x80000019);
-
- lcfuse = I915_READ(LCFUSE02);
-
- dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
-}
-
-static int intel_enable_rc6(struct drm_device *dev)
-{
- /*
- * Respect the kernel parameter if it is set
- */
- if (i915_enable_rc6 >= 0)
- return i915_enable_rc6;
-
- /*
- * Disable RC6 on Ironlake
- */
- if (INTEL_INFO(dev)->gen == 5)
- return 0;
-
- /*
- * Disable rc6 on Sandybridge
- */
- if (INTEL_INFO(dev)->gen == 6) {
- DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
- return INTEL_RC6_ENABLE;
- }
- DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
- return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
-}
-
-void gen6_enable_rps(struct drm_i915_private *dev_priv)
-{
- u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
- u32 pcu_mbox, rc6_mask = 0;
- u32 gtfifodbg;
- int cur_freq, min_freq, max_freq;
- int rc6_mode;
- int i;
-
- /* Here begins a magic sequence of register writes to enable
- * auto-downclocking.
- *
- * Perhaps there might be some value in exposing these to
- * userspace...
- */
- I915_WRITE(GEN6_RC_STATE, 0);
- mutex_lock(&dev_priv->dev->struct_mutex);
-
- /* Clear the DBG now so we don't confuse earlier errors */
- if ((gtfifodbg = I915_READ(GTFIFODBG))) {
- DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
- gen6_gt_force_wake_get(dev_priv);
-
- /* disable the counters and set deterministic thresholds */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
- I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
-
- for (i = 0; i < I915_NUM_RINGS; i++)
- I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
-
- I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
- I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
- I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
-
- rc6_mode = intel_enable_rc6(dev_priv->dev);
- if (rc6_mode & INTEL_RC6_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
-
- if (rc6_mode & INTEL_RC6p_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
-
- if (rc6_mode & INTEL_RC6pp_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
-
- DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
- (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
- (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
- (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
-
- I915_WRITE(GEN6_RC_CONTROL,
- rc6_mask |
- GEN6_RC_CTL_EI_MODE(1) |
- GEN6_RC_CTL_HW_ENABLE);
-
- I915_WRITE(GEN6_RPNSWREQ,
- GEN6_FREQUENCY(10) |
- GEN6_OFFSET(0) |
- GEN6_AGGRESSIVE_TURBO);
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- GEN6_FREQUENCY(12));
-
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- 18 << 24 |
- 6 << 16);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
- I915_WRITE(GEN6_RP_UP_EI, 100000);
- I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_CONT);
-
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
-
- I915_WRITE(GEN6_PCODE_DATA, 0);
- I915_WRITE(GEN6_PCODE_MAILBOX,
- GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
-
- min_freq = (rp_state_cap & 0xff0000) >> 16;
- max_freq = rp_state_cap & 0xff;
- cur_freq = (gt_perf_status & 0xff00) >> 8;
-
- /* Check for overclock support */
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
- pcu_mbox = I915_READ(GEN6_PCODE_DATA);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
- if (pcu_mbox & (1<<31)) { /* OC supported */
- max_freq = pcu_mbox & 0xff;
- DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
- }
-
- /* In units of 100MHz */
- dev_priv->max_delay = max_freq;
- dev_priv->min_delay = min_freq;
- dev_priv->cur_delay = cur_freq;
-
- /* requires MSI enabled */
- I915_WRITE(GEN6_PMIER,
- GEN6_PM_MBOX_EVENT |
- GEN6_PM_THERMAL_EVENT |
- GEN6_PM_RP_DOWN_TIMEOUT |
- GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_UP_EI_EXPIRED |
- GEN6_PM_RP_DOWN_EI_EXPIRED);
- spin_lock_irq(&dev_priv->rps_lock);
- WARN_ON(dev_priv->pm_iir != 0);
- I915_WRITE(GEN6_PMIMR, 0);
- spin_unlock_irq(&dev_priv->rps_lock);
- /* enable all PM interrupts */
- I915_WRITE(GEN6_PMINTRMSK, 0);
-
- gen6_gt_force_wake_put(dev_priv);
- mutex_unlock(&dev_priv->dev->struct_mutex);
-}
-
-void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
-{
- int min_freq = 15;
- int gpu_freq, ia_freq, max_ia_freq;
- int scaling_factor = 180;
-
- max_ia_freq = cpufreq_quick_get_max(0);
- /*
- * Default to measured freq if none found, PCU will ensure we don't go
- * over
- */
- if (!max_ia_freq)
- max_ia_freq = tsc_khz;
-
- /* Convert from kHz to MHz */
- max_ia_freq /= 1000;
-
- mutex_lock(&dev_priv->dev->struct_mutex);
-
- /*
- * For each potential GPU frequency, load a ring frequency we'd like
- * to use for memory access. We do this by specifying the IA frequency
- * the PCU should use as a reference to determine the ring frequency.
- */
- for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
- gpu_freq--) {
- int diff = dev_priv->max_delay - gpu_freq;
-
- /*
- * For GPU frequencies less than 750MHz, just use the lowest
- * ring freq.
- */
- if (gpu_freq < min_freq)
- ia_freq = 800;
- else
- ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
- ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
-
- I915_WRITE(GEN6_PCODE_DATA,
- (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
- gpu_freq);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
- GEN6_PCODE_READY) == 0, 10)) {
- DRM_ERROR("pcode write of freq table timed out\n");
- continue;
- }
- }
-
- mutex_unlock(&dev_priv->dev->struct_mutex);
-}
-
-static void ironlake_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- /* Required for FBC */
- dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
- DPFCRUNIT_CLOCK_GATE_DISABLE |
- DPFDUNIT_CLOCK_GATE_DISABLE;
- /* Required for CxSR */
- dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_3DCGDIS0,
- MARIUNIT_CLOCK_GATE_DISABLE |
- SVSMUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(PCH_3DCGDIS1,
- VFMUNIT_CLOCK_GATE_DISABLE);
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
- /*
- * According to the spec the following bits should be set in
- * order to enable memory self-refresh
- * The bit 22/21 of 0x42004
- * The bit 5 of 0x42020
- * The bit 15 of 0x45000
- */
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- (I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_DPARB_GATE | ILK_VSDPFD_FULL));
- I915_WRITE(ILK_DSPCLK_GATE,
- (I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPARB_CLK_GATE));
- I915_WRITE(DISP_ARB_CTL,
- (I915_READ(DISP_ARB_CTL) |
- DISP_FBC_WM_DIS));
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- /*
- * Based on the document from hardware guys the following bits
- * should be set unconditionally in order to enable FBC.
- * The bit 22 of 0x42000
- * The bit 22 of 0x42004
- * The bit 7,8,9 of 0x42020.
- */
- if (IS_IRONLAKE_M(dev)) {
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
- ILK_FBCQ_DIS);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_DPARB_GATE);
- I915_WRITE(ILK_DSPCLK_GATE,
- I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPFC_DIS1 |
- ILK_DPFC_DIS2 |
- ILK_CLK_FBC);
- }
-
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_ELPIN_409_SELECT);
- I915_WRITE(_3D_CHICKEN2,
- _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
- _3D_CHICKEN2_WM_READ_PIPELINED);
-}
-
-static void gen6_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_ELPIN_409_SELECT);
-
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- I915_WRITE(GEN6_UCGCTL1,
- I915_READ(GEN6_UCGCTL1) |
- GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
-
- /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
- * gating disable must be set. Failure to set it results in
- * flickering pixels due to Z write ordering failures after
- * some amount of runtime in the Mesa "fire" demo, and Unigine
- * Sanctuary and Tropics, and apparently anything else with
- * alpha test or pixel discard.
- *
- * According to the spec, bit 11 (RCCUNIT) must also be set,
- * but we didn't debug actual testcases to find it out.
- */
- I915_WRITE(GEN6_UCGCTL2,
- GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
- GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
-
- /*
- * According to the spec the following bits should be
- * set in order to enable memory self-refresh and fbc:
- * The bit21 and bit22 of 0x42000
- * The bit21 and bit22 of 0x42004
- * The bit5 and bit7 of 0x42020
- * The bit14 of 0x70180
- * The bit14 of 0x71180
- */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
- ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_DPARB_GATE | ILK_VSDPFD_FULL);
- I915_WRITE(ILK_DSPCLK_GATE,
- I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPARB_CLK_GATE |
- ILK_DPFD_CLK_GATE);
-
- for_each_pipe(pipe) {
- I915_WRITE(DSPCNTR(pipe),
- I915_READ(DSPCNTR(pipe)) |
- DISPPLANE_TRICKLE_FEED_DISABLE);
- intel_flush_display_plane(dev_priv, pipe);
- }
-}
-
-static void ivybridge_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
- * This implements the WaDisableRCZUnitClockGating workaround.
- */
- I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
-
- I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
-
- I915_WRITE(IVB_CHICKEN3,
- CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
- CHICKEN3_DGMG_DONE_FIX_DISABLE);
-
- /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
- I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
- GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
-
- /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
- I915_WRITE(GEN7_L3CNTLREG1,
- GEN7_WA_FOR_GEN7_L3_CONTROL);
- I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
- GEN7_WA_L3_CHICKEN_MODE);
-
- /* This is required by WaCatErrorRejectionIssue */
- I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
- GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
-
- for_each_pipe(pipe) {
- I915_WRITE(DSPCNTR(pipe),
- I915_READ(DSPCNTR(pipe)) |
- DISPPLANE_TRICKLE_FEED_DISABLE);
- intel_flush_display_plane(dev_priv, pipe);
- }
-}
-
-static void g4x_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dspclk_gate;
-
- I915_WRITE(RENCLK_GATE_D1, 0);
- I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
- GS_UNIT_CLOCK_GATE_DISABLE |
- CL_UNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(RAMCLK_GATE_D, 0);
- dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
- OVRUNIT_CLOCK_GATE_DISABLE |
- OVCUNIT_CLOCK_GATE_DISABLE;
- if (IS_GM45(dev))
- dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
-}
-
-static void crestline_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
- I915_WRITE(RENCLK_GATE_D2, 0);
- I915_WRITE(DSPCLK_GATE_D, 0);
- I915_WRITE(RAMCLK_GATE_D, 0);
- I915_WRITE16(DEUC, 0);
-}
-
-static void broadwater_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
- I965_RCC_CLOCK_GATE_DISABLE |
- I965_RCPB_CLOCK_GATE_DISABLE |
- I965_ISC_CLOCK_GATE_DISABLE |
- I965_FBC_CLOCK_GATE_DISABLE);
- I915_WRITE(RENCLK_GATE_D2, 0);
-}
-
-static void gen3_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dstate = I915_READ(D_STATE);
-
- dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
- DSTATE_DOT_CLOCK_GATING;
- I915_WRITE(D_STATE, dstate);
-}
-
-static void i85x_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
-}
-
-static void i830_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void ibx_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void cpt_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
- DPLS_EDP_PPS_FIX_DIS);
- /* Without this, mode sets may fail silently on FDI */
- for_each_pipe(pipe)
- I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
-}
-
-static void ironlake_teardown_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->renderctx) {
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(&dev_priv->renderctx->base);
- dev_priv->renderctx = NULL;
- }
-
- if (dev_priv->pwrctx) {
- i915_gem_object_unpin(dev_priv->pwrctx);
- drm_gem_object_unreference(&dev_priv->pwrctx->base);
- dev_priv->pwrctx = NULL;
- }
-}
-
-static void ironlake_disable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (I915_READ(PWRCTXA)) {
- /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
- wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
- 50);
-
- I915_WRITE(PWRCTXA, 0);
- POSTING_READ(PWRCTXA);
-
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- POSTING_READ(RSTDBYCTL);
- }
-
- ironlake_teardown_rc6(dev);
-}
-
-static int ironlake_setup_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->renderctx == NULL)
- dev_priv->renderctx = intel_alloc_context_page(dev);
- if (!dev_priv->renderctx)
- return -ENOMEM;
-
- if (dev_priv->pwrctx == NULL)
- dev_priv->pwrctx = intel_alloc_context_page(dev);
- if (!dev_priv->pwrctx) {
- ironlake_teardown_rc6(dev);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void ironlake_enable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- /* rc6 disabled by default due to repeated reports of hanging during
- * boot and resume.
- */
- if (!intel_enable_rc6(dev))
- return;
-
- mutex_lock(&dev->struct_mutex);
- ret = ironlake_setup_rc6(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return;
- }
-
- /*
- * GPU can automatically power down the render unit if given a page
- * to save state.
- */
- ret = BEGIN_LP_RING(6);
- if (ret) {
- ironlake_teardown_rc6(dev);
- mutex_unlock(&dev->struct_mutex);
- return;
- }
-
- OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
- OUT_RING(MI_SET_CONTEXT);
- OUT_RING(dev_priv->renderctx->gtt_offset |
- MI_MM_SPACE_GTT |
- MI_SAVE_EXT_STATE_EN |
- MI_RESTORE_EXT_STATE_EN |
- MI_RESTORE_INHIBIT);
- OUT_RING(MI_SUSPEND_FLUSH);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_FLUSH);
- ADVANCE_LP_RING();
-
- /*
- * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
- * does an implicit flush, combined with MI_FLUSH above, it should be
- * safe to assume that renderctx is valid
- */
- ret = intel_wait_ring_idle(LP_RING(dev_priv));
- if (ret) {
- DRM_ERROR("failed to enable ironlake power power savings\n");
- ironlake_teardown_rc6(dev);
- mutex_unlock(&dev->struct_mutex);
- return;
- }
-
- I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- mutex_unlock(&dev->struct_mutex);
-}
-
-void intel_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- dev_priv->display.init_clock_gating(dev);
-
- if (dev_priv->display.init_pch_clock_gating)
- dev_priv->display.init_pch_clock_gating(dev);
-}
-
/* Set up chip specific display functions */
static void intel_init_display(struct drm_device *dev)
{
@@ -8887,32 +6709,20 @@ static void intel_init_display(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.dpms = ironlake_crtc_dpms;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_plane = ironlake_update_plane;
} else {
dev_priv->display.dpms = i9xx_crtc_dpms;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_plane = i9xx_update_plane;
}
- if (I915_HAS_FBC(dev)) {
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- dev_priv->display.enable_fbc = ironlake_enable_fbc;
- dev_priv->display.disable_fbc = ironlake_disable_fbc;
- } else if (IS_GM45(dev)) {
- dev_priv->display.fbc_enabled = g4x_fbc_enabled;
- dev_priv->display.enable_fbc = g4x_enable_fbc;
- dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else if (IS_CRESTLINE(dev)) {
- dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
- dev_priv->display.enable_fbc = i8xx_enable_fbc;
- dev_priv->display.disable_fbc = i8xx_disable_fbc;
- }
- /* 855GM needs testing */
- }
-
/* Returns the core display clock speed */
- if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+ if (IS_VALLEYVIEW(dev))
+ dev_priv->display.get_display_clock_speed =
+ valleyview_get_display_clock_speed;
+ else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_I915G(dev))
@@ -8934,124 +6744,27 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
- /* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
- dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
-
- /* IVB configs may use multi-threaded forcewake */
- if (IS_IVYBRIDGE(dev)) {
- u32 ecobus;
-
- /* A small trick here - if the bios hasn't configured MT forcewake,
- * and if the device is in RC6, then force_wake_mt_get will not wake
- * the device and the ECOBUS read will return zero. Which will be
- * (correctly) interpreted by the test below as MT forcewake being
- * disabled.
- */
- mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = I915_READ_NOTRACE(ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- DRM_DEBUG_KMS("Using MT version of forcewake\n");
- dev_priv->display.force_wake_get =
- __gen6_gt_force_wake_mt_get;
- dev_priv->display.force_wake_put =
- __gen6_gt_force_wake_mt_put;
- }
- }
-
- if (HAS_PCH_IBX(dev))
- dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
- else if (HAS_PCH_CPT(dev))
- dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
-
if (IS_GEN5(dev)) {
- if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
- dev_priv->display.update_wm = ironlake_update_wm;
- else {
- DRM_DEBUG_KMS("Failed to get proper latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
- dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_GEN6(dev)) {
- if (SNB_READ_WM0_LATENCY()) {
- dev_priv->display.update_wm = sandybridge_update_wm;
- dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
- } else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
- dev_priv->display.init_clock_gating = gen6_init_clock_gating;
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- if (SNB_READ_WM0_LATENCY()) {
- dev_priv->display.update_wm = sandybridge_update_wm;
- dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
- } else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
- dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ } else if (IS_HASWELL(dev)) {
+ dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = ironlake_write_eld;
} else
dev_priv->display.update_wm = NULL;
- } else if (IS_PINEVIEW(dev)) {
- if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
- dev_priv->is_ddr3,
- dev_priv->fsb_freq,
- dev_priv->mem_freq)) {
- DRM_INFO("failed to find known CxSR latency "
- "(found ddr%s fsb freq %d, mem freq %d), "
- "disabling CxSR\n",
- (dev_priv->is_ddr3 == 1) ? "3" : "2",
- dev_priv->fsb_freq, dev_priv->mem_freq);
- /* Disable CxSR and never update its watermark again */
- pineview_disable_cxsr(dev);
- dev_priv->display.update_wm = NULL;
- } else
- dev_priv->display.update_wm = pineview_update_wm;
- dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.force_wake_get = vlv_force_wake_get;
+ dev_priv->display.force_wake_put = vlv_force_wake_put;
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
- dev_priv->display.update_wm = g4x_update_wm;
- dev_priv->display.init_clock_gating = g4x_init_clock_gating;
- } else if (IS_GEN4(dev)) {
- dev_priv->display.update_wm = i965_update_wm;
- if (IS_CRESTLINE(dev))
- dev_priv->display.init_clock_gating = crestline_init_clock_gating;
- else if (IS_BROADWATER(dev))
- dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
- } else if (IS_GEN3(dev)) {
- dev_priv->display.update_wm = i9xx_update_wm;
- dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- dev_priv->display.init_clock_gating = gen3_init_clock_gating;
- } else if (IS_I865G(dev)) {
- dev_priv->display.update_wm = i830_update_wm;
- dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- dev_priv->display.get_fifo_size = i830_get_fifo_size;
- } else if (IS_I85X(dev)) {
- dev_priv->display.update_wm = i9xx_update_wm;
- dev_priv->display.get_fifo_size = i85x_get_fifo_size;
- dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- } else {
- dev_priv->display.update_wm = i830_update_wm;
- dev_priv->display.init_clock_gating = i830_init_clock_gating;
- if (IS_845G(dev))
- dev_priv->display.get_fifo_size = i845_get_fifo_size;
- else
- dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
/* Default just returns -ENODEV to indicate unsupported */
@@ -9090,7 +6803,7 @@ static void quirk_pipea_force(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_PIPEA_FORCE;
- DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
+ DRM_INFO("applying pipe a force quirk\n");
}
/*
@@ -9100,6 +6813,18 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+ DRM_INFO("applying lvds SSC disable quirk\n");
+}
+
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+static void quirk_invert_brightness(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
+ DRM_INFO("applying inverted panel brightness quirk\n");
}
struct intel_quirk {
@@ -9109,7 +6834,7 @@ struct intel_quirk {
void (*hook)(struct drm_device *dev);
};
-struct intel_quirk intel_quirks[] = {
+static struct intel_quirk intel_quirks[] = {
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
@@ -9134,6 +6859,9 @@ struct intel_quirk intel_quirks[] = {
/* Sony Vaio Y cannot use SSC on LVDS */
{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+ /* Acer Aspire 5734Z must invert backlight brightness */
+ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -9166,7 +6894,7 @@ static void i915_disable_vga(struct drm_device *dev)
vga_reg = VGACNTRL;
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
- outb(1, VGA_SR_INDEX);
+ outb(SR01, VGA_SR_INDEX);
sr1 = inb(VGA_SR_DATA);
outb(sr1 | 1<<5, VGA_SR_DATA);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
@@ -9176,6 +6904,40 @@ static void i915_disable_vga(struct drm_device *dev)
POSTING_READ(vga_reg);
}
+static void ivb_pch_pwm_override(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * IVB has CPU eDP backlight regs too, set things up to let the
+ * PCH regs control the backlight
+ */
+ I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
+ I915_WRITE(BLC_PWM_CPU_CTL, 0);
+ I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30));
+}
+
+void intel_modeset_init_hw(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_init_clock_gating(dev);
+
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_enable_drps(dev);
+ ironlake_enable_rc6(dev);
+ intel_init_emon(dev);
+ }
+
+ if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+ gen6_enable_rps(dev_priv);
+ gen6_update_ring_freq(dev_priv);
+ }
+
+ if (IS_IVYBRIDGE(dev))
+ ivb_pch_pwm_override(dev);
+}
+
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -9189,10 +6951,14 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
- dev->mode_config.funcs = (void *)&intel_mode_funcs;
+ dev->mode_config.funcs = &intel_mode_funcs;
intel_init_quirks(dev);
+ intel_init_pm(dev);
+
+ intel_prepare_ddi(dev);
+
intel_init_display(dev);
if (IS_GEN2(dev)) {
@@ -9217,22 +6983,12 @@ void intel_modeset_init(struct drm_device *dev)
DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
}
+ intel_pch_pll_init(dev);
+
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
- intel_init_clock_gating(dev);
-
- if (IS_IRONLAKE_M(dev)) {
- ironlake_enable_drps(dev);
- intel_init_emon(dev);
- }
-
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
- gen6_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
- }
-
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
@@ -9240,8 +6996,7 @@ void intel_modeset_init(struct drm_device *dev)
void intel_modeset_gem_init(struct drm_device *dev)
{
- if (IS_IRONLAKE_M(dev))
- ironlake_enable_rc6(dev);
+ intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
}
@@ -9271,12 +7026,15 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
gen6_disable_rps(dev);
if (IS_IRONLAKE_M(dev))
ironlake_disable_rc6(dev);
+ if (IS_VALLEYVIEW(dev))
+ vlv_init_dpio(dev);
+
mutex_unlock(&dev->struct_mutex);
/* Disable the irq before mode object teardown, for the irq might
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4b637919f74f..296cfc201a81 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -266,6 +266,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+
return MODE_OK;
}
@@ -688,7 +691,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
- int bpp;
+ int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -702,24 +705,33 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
mode->clock = intel_dp->panel_fixed_mode->clock;
}
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return false;
+
+ DRM_DEBUG_KMS("DP link computation with max lane count %i "
+ "max bw %02x pixel clock %iKHz\n",
+ max_lane_count, bws[max_clock], mode->clock);
+
if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
return false;
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+ mode_rate = intel_dp_link_required(mode->clock, bpp);
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(mode->clock, bpp)
- <= link_avail) {
+ if (mode_rate <= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
- DRM_DEBUG_KMS("Display port link bw %02x lane "
- "count %d clock %d\n",
+ DRM_DEBUG_KMS("DP link bw %02x lane "
+ "count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
- adjusted_mode->clock);
+ adjusted_mode->clock, bpp);
+ DRM_DEBUG_KMS("DP link bw required %i available %i\n",
+ mode_rate, link_avail);
return true;
}
}
@@ -1148,10 +1160,10 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n");
- WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
+ WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(dev_priv);
- pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@@ -1259,18 +1271,16 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ /* Make sure the panel is off before trying to change the mode. But also
+ * ensure that we have vdd while we switch off the panel. */
+ ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
- /* Wake up the sink first */
- ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
-
- /* Make sure the panel is off before trying to
- * change the mode
- */
}
static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1302,10 +1312,11 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
+ /* Switching the panel off requires vdd. */
+ ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
- ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
@@ -1954,6 +1965,23 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
return false;
}
+static void
+intel_dp_probe_oui(struct intel_dp *intel_dp)
+{
+ u8 buf[3];
+
+ if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+ return;
+
+ if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
+ DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
+ DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+}
+
static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
@@ -2137,6 +2165,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
if (status != connector_status_connected)
return status;
+ intel_dp_probe_oui(intel_dp);
+
if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
} else {
@@ -2438,6 +2468,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
}
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
@@ -2483,6 +2514,13 @@ intel_dp_init(struct drm_device *dev, int output_reg)
pp_off = I915_READ(PCH_PP_OFF_DELAYS);
pp_div = I915_READ(PCH_PP_DIVISOR);
+ if (!pp_on || !pp_off || !pp_div) {
+ DRM_INFO("bad panel power sequencing delays, disabling panel\n");
+ intel_dp_encoder_destroy(&intel_dp->base.base);
+ intel_dp_destroy(&intel_connector->base);
+ return;
+ }
+
/* Pull timing values out of registers */
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
PANEL_POWER_UP_DELAY_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 715afa153025..3e0918834e7e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -45,6 +45,18 @@
ret__; \
})
+#define wait_for_atomic_us(COND, US) ({ \
+ int i, ret__ = -ETIMEDOUT; \
+ for (i = 0; i < (US); i++) { \
+ if ((COND)) { \
+ ret__ = 0; \
+ break; \
+ } \
+ udelay(1); \
+ } \
+ ret__; \
+})
+
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
@@ -171,8 +183,8 @@ struct intel_crtc {
bool cursor_visible;
unsigned int bpp;
- bool no_pll; /* tertiary pipe for IVB */
- bool use_pll_a;
+ /* We can share PLLs across outputs if the timings match */
+ struct intel_pch_pll *pch_pll;
};
struct intel_plane {
@@ -196,6 +208,25 @@ struct intel_plane {
struct drm_intel_sprite_colorkey *key);
};
+struct intel_watermark_params {
+ unsigned long fifo_size;
+ unsigned long max_wm;
+ unsigned long default_wm;
+ unsigned long guard_size;
+ unsigned long cacheline_size;
+};
+
+struct cxsr_latency {
+ int is_desktop;
+ int is_ddr3;
+ unsigned long fsb_freq;
+ unsigned long mem_freq;
+ unsigned long display_sr;
+ unsigned long display_hpll_disable;
+ unsigned long cursor_sr;
+ unsigned long cursor_hpll_disable;
+};
+
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
@@ -207,6 +238,8 @@ struct intel_plane {
#define DIP_TYPE_AVI 0x82
#define DIP_VERSION_AVI 0x2
#define DIP_LEN_AVI 13
+#define DIP_AVI_PR_1 0
+#define DIP_AVI_PR_2 1
#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
@@ -240,23 +273,36 @@ struct dip_infoframe {
uint8_t ITC_EC_Q_SC;
/* PB4 - VIC 6:0 */
uint8_t VIC;
- /* PB5 - PR 3:0 */
- uint8_t PR;
+ /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
+ uint8_t YQ_CN_PR;
/* PB6 to PB13 */
uint16_t top_bar_end;
uint16_t bottom_bar_start;
uint16_t left_bar_end;
uint16_t right_bar_start;
- } avi;
+ } __attribute__ ((packed)) avi;
struct {
uint8_t vn[8];
uint8_t pd[16];
uint8_t sdi;
- } spd;
+ } __attribute__ ((packed)) spd;
uint8_t payload[27];
} __attribute__ ((packed)) body;
} __attribute__((packed));
+struct intel_hdmi {
+ struct intel_encoder base;
+ u32 sdvox_reg;
+ int ddc_bus;
+ int ddi_port;
+ uint32_t color_range;
+ bool has_hdmi_sink;
+ bool has_audio;
+ enum hdmi_force_audio force_audio;
+ void (*write_infoframe)(struct drm_encoder *encoder,
+ struct dip_infoframe *frame);
+};
+
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
@@ -296,8 +342,13 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
-void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
-extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
+extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode);
+extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder);
+extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
+extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
+ bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev,
@@ -311,6 +362,10 @@ extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
+extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+ enum plane plane);
+
+void intel_sanitize_pm(struct drm_device *dev);
/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
@@ -368,12 +423,9 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_enable_clock_gating(struct drm_device *dev);
+extern void ironlake_disable_rc6(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
-extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
-extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
-extern void gen6_disable_rps(struct drm_device *dev);
-extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -411,16 +463,43 @@ extern void intel_init_clock_gating(struct drm_device *dev);
extern void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+extern void intel_prepare_ddi(struct drm_device *dev);
+extern void hsw_fdi_link_train(struct drm_crtc *crtc);
+extern void intel_ddi_init(struct drm_device *dev, enum port port);
/* For use by IVB LP watermark workaround in intel_sprite.c */
-extern void sandybridge_update_wm(struct drm_device *dev);
+extern void intel_update_watermarks(struct drm_device *dev);
extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
uint32_t sprite_width,
int pixel_size);
+extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
+ struct drm_display_mode *mode);
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
+
+/* Power-related functions, located in intel_pm.c */
+extern void intel_init_pm(struct drm_device *dev);
+/* FBC */
+extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
+extern void intel_update_fbc(struct drm_device *dev);
+/* IPS */
+extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+extern void intel_gpu_ips_teardown(void);
+
+extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
+extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
+extern void gen6_disable_rps(struct drm_device *dev);
+extern void intel_init_emon(struct drm_device *dev);
+
+extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
+extern void intel_ddi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 020a7d7f744d..60ba50b956f2 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -243,7 +243,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
* that's not the case.
*/
intel_ddc_get_modes(connector,
- &dev_priv->gmbus[GMBUS_PORT_DPC].adapter);
+ intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC));
if (!list_empty(&connector->probed_modes))
return 1;
@@ -375,7 +375,7 @@ void intel_dvo_init(struct drm_device *dev)
* special cases, but otherwise default to what's defined
* in the spec.
*/
- if (dvo->gpio != 0)
+ if (intel_gmbus_is_port_valid(dvo->gpio))
gpio = dvo->gpio;
else if (dvo->type == INTEL_DVO_CHIP_LVDS)
gpio = GMBUS_PORT_SSC;
@@ -386,7 +386,7 @@ void intel_dvo_init(struct drm_device *dev)
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
- i2c = &dev_priv->gmbus[gpio].adapter;
+ i2c = intel_gmbus_get_adapter(dev_priv, gpio);
intel_dvo->dev = *dvo;
if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 6e9ee33fd412..bf8690720a0c 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -94,7 +94,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mutex_lock(&dev->struct_mutex);
/* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(dev, obj, false);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2d7f47b56b6a..2ead3bf7c21d 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -37,19 +37,7 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct intel_hdmi {
- struct intel_encoder base;
- u32 sdvox_reg;
- int ddc_bus;
- uint32_t color_range;
- bool has_hdmi_sink;
- bool has_audio;
- enum hdmi_force_audio force_audio;
- void (*write_infoframe)(struct drm_encoder *encoder,
- struct dip_infoframe *frame);
-};
-
-static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_hdmi, base.base);
}
@@ -75,108 +63,246 @@ void intel_dip_infoframe_csum(struct dip_infoframe *frame)
frame->checksum = 0x100 - sum;
}
-static u32 intel_infoframe_index(struct dip_infoframe *frame)
+static u32 g4x_infoframe_index(struct dip_infoframe *frame)
{
- u32 flags = 0;
-
switch (frame->type) {
case DIP_TYPE_AVI:
- flags |= VIDEO_DIP_SELECT_AVI;
- break;
+ return VIDEO_DIP_SELECT_AVI;
case DIP_TYPE_SPD:
- flags |= VIDEO_DIP_SELECT_SPD;
- break;
+ return VIDEO_DIP_SELECT_SPD;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
- break;
+ return 0;
}
-
- return flags;
}
-static u32 intel_infoframe_flags(struct dip_infoframe *frame)
+static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
{
- u32 flags = 0;
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ return VIDEO_DIP_ENABLE_AVI;
+ case DIP_TYPE_SPD:
+ return VIDEO_DIP_ENABLE_SPD;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ return 0;
+ }
+}
+static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
+{
switch (frame->type) {
case DIP_TYPE_AVI:
- flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
- break;
+ return VIDEO_DIP_ENABLE_AVI_HSW;
case DIP_TYPE_SPD:
- flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC;
- break;
+ return VIDEO_DIP_ENABLE_SPD_HSW;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
- break;
+ return 0;
}
+}
- return flags;
+static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe)
+{
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ return HSW_TVIDEO_DIP_AVI_DATA(pipe);
+ case DIP_TYPE_SPD:
+ return HSW_TVIDEO_DIP_SPD_DATA(pipe);
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ return 0;
+ }
}
-static void i9xx_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+static void g4x_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- u32 port, flags, val = I915_READ(VIDEO_DIP_CTL);
+ u32 val = I915_READ(VIDEO_DIP_CTL);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
-
- /* XXX first guess at handling video port, is this corrent? */
+ val &= ~VIDEO_DIP_PORT_MASK;
if (intel_hdmi->sdvox_reg == SDVOB)
- port = VIDEO_DIP_PORT_B;
+ val |= VIDEO_DIP_PORT_B;
else if (intel_hdmi->sdvox_reg == SDVOC)
- port = VIDEO_DIP_PORT_C;
+ val |= VIDEO_DIP_PORT_C;
else
return;
- flags = intel_infoframe_index(frame);
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
- val &= ~VIDEO_DIP_SELECT_MASK;
+ val &= ~g4x_infoframe_enable(frame);
+ val |= VIDEO_DIP_ENABLE;
- I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+ I915_WRITE(VIDEO_DIP_CTL, val);
for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
- flags |= intel_infoframe_flags(frame);
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+ I915_WRITE(VIDEO_DIP_CTL, val);
}
-static void ironlake_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+static void ibx_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
- u32 flags, val = I915_READ(reg);
+ u32 val = I915_READ(reg);
+
+ val &= ~VIDEO_DIP_PORT_MASK;
+ switch (intel_hdmi->sdvox_reg) {
+ case HDMIB:
+ val |= VIDEO_DIP_PORT_B;
+ break;
+ case HDMIC:
+ val |= VIDEO_DIP_PORT_C;
+ break;
+ case HDMID:
+ val |= VIDEO_DIP_PORT_D;
+ break;
+ default:
+ return;
+ }
intel_wait_for_vblank(dev, intel_crtc->pipe);
- flags = intel_infoframe_index(frame);
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ val &= ~g4x_infoframe_enable(frame);
+ val |= VIDEO_DIP_ENABLE;
+
+ I915_WRITE(reg, val);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+}
+
+static void cpt_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(reg);
+
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ /* The DIP control register spec says that we need to update the AVI
+ * infoframe without clearing its enable bit */
+ if (frame->type == DIP_TYPE_AVI)
+ val |= VIDEO_DIP_ENABLE_AVI;
+ else
+ val &= ~g4x_infoframe_enable(frame);
+
+ val |= VIDEO_DIP_ENABLE;
- I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+ I915_WRITE(reg, val);
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
- flags |= intel_infoframe_flags(frame);
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+ I915_WRITE(reg, val);
}
+
+static void vlv_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(reg);
+
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ val &= ~g4x_infoframe_enable(frame);
+ val |= VIDEO_DIP_ENABLE;
+
+ I915_WRITE(reg, val);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+}
+
+static void hsw_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe);
+ unsigned int i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(ctl_reg);
+
+ if (data_reg == 0)
+ return;
+
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ val &= ~hsw_infoframe_enable(frame);
+ I915_WRITE(ctl_reg, val);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(data_reg + i, *data);
+ data++;
+ }
+
+ val |= hsw_infoframe_enable(frame);
+ I915_WRITE(ctl_reg, val);
+}
+
static void intel_set_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
{
@@ -189,7 +315,8 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
intel_hdmi->write_infoframe(encoder, frame);
}
-static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
@@ -197,10 +324,13 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
.len = DIP_LEN_AVI,
};
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
+
intel_set_infoframe(encoder, &avi_if);
}
-static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
{
struct dip_infoframe spd_if;
@@ -221,8 +351,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 sdvox;
@@ -259,7 +388,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
- intel_hdmi_set_avi_infoframe(encoder);
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
@@ -334,7 +463,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
edid = drm_get_edid(connector,
- &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -367,7 +497,8 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
*/
return intel_ddc_get_modes(connector,
- &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
}
static bool
@@ -379,7 +510,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
bool has_audio = false;
edid = drm_get_edid(connector,
- &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
@@ -393,8 +525,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
static int
intel_hdmi_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
+ struct drm_property *property,
+ uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
@@ -453,6 +585,14 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
kfree(connector);
}
+static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
+ .dpms = intel_ddi_dpms,
+ .mode_fixup = intel_hdmi_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .mode_set = intel_ddi_mode_set,
+ .commit = intel_encoder_commit,
+};
+
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.dpms = intel_hdmi_dpms,
.mode_fixup = intel_hdmi_mode_fixup,
@@ -542,20 +682,60 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
+ DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
+ intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+ intel_hdmi->ddi_port = PORT_B;
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
+ DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
+ intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+ intel_hdmi->ddi_port = PORT_C;
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
+ DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
+ intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+ intel_hdmi->ddi_port = PORT_D;
+ dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+ } else {
+ /* If we got an unknown sdvox_reg, things are pretty much broken
+ * in a way that we should let the kernel know about it */
+ BUG();
}
intel_hdmi->sdvox_reg = sdvox_reg;
if (!HAS_PCH_SPLIT(dev)) {
- intel_hdmi->write_infoframe = i9xx_write_infoframe;
+ intel_hdmi->write_infoframe = g4x_write_infoframe;
I915_WRITE(VIDEO_DIP_CTL, 0);
+ } else if (IS_VALLEYVIEW(dev)) {
+ intel_hdmi->write_infoframe = vlv_write_infoframe;
+ for_each_pipe(i)
+ I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0);
+ } else if (IS_HASWELL(dev)) {
+ /* FIXME: Haswell has a new set of DIP frame registers, but we are
+ * just doing the minimal required for HDMI to work at this stage.
+ */
+ intel_hdmi->write_infoframe = hsw_write_infoframe;
+ for_each_pipe(i)
+ I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0);
+ } else if (HAS_PCH_IBX(dev)) {
+ intel_hdmi->write_infoframe = ibx_write_infoframe;
+ for_each_pipe(i)
+ I915_WRITE(TVIDEO_DIP_CTL(i), 0);
} else {
- intel_hdmi->write_infoframe = ironlake_write_infoframe;
+ intel_hdmi->write_infoframe = cpt_write_infoframe;
for_each_pipe(i)
I915_WRITE(TVIDEO_DIP_CTL(i), 0);
}
- drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+ if (IS_HASWELL(dev))
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw);
+ else
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_hdmi_add_properties(intel_hdmi, connector);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 8fdc95700218..1991a4408cf9 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -35,6 +35,20 @@
#include "i915_drm.h"
#include "i915_drv.h"
+struct gmbus_port {
+ const char *name;
+ int reg;
+};
+
+static const struct gmbus_port gmbus_ports[] = {
+ { "ssc", GPIOB },
+ { "vga", GPIOA },
+ { "panel", GPIOC },
+ { "dpc", GPIOD },
+ { "dpb", GPIOE },
+ { "dpd", GPIOF },
+};
+
/* Intel GPIO access functions */
#define I2C_RISEFALL_TIME 10
@@ -49,10 +63,7 @@ void
intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(PCH_GMBUS0, 0);
- else
- I915_WRITE(GMBUS0, 0);
+ I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
}
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
@@ -140,63 +151,173 @@ static void set_data(void *data, int state_high)
POSTING_READ(bus->gpio_reg);
}
-static bool
+static int
+intel_gpio_pre_xfer(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ intel_i2c_reset(dev_priv->dev);
+ intel_i2c_quirk_set(dev_priv, true);
+ set_data(bus, 1);
+ set_clock(bus, 1);
+ udelay(I2C_RISEFALL_TIME);
+ return 0;
+}
+
+static void
+intel_gpio_post_xfer(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ set_data(bus, 1);
+ set_clock(bus, 1);
+ intel_i2c_quirk_set(dev_priv, false);
+}
+
+static void
intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
- static const int map_pin_to_reg[] = {
- 0,
- GPIOB,
- GPIOA,
- GPIOC,
- GPIOD,
- GPIOE,
- 0,
- GPIOF,
- };
struct i2c_algo_bit_data *algo;
- if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
- return false;
-
algo = &bus->bit_algo;
- bus->gpio_reg = map_pin_to_reg[pin];
- if (HAS_PCH_SPLIT(dev_priv->dev))
- bus->gpio_reg += PCH_GPIOA - GPIOA;
+ /* -1 to map pin pair to gmbus index */
+ bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg;
bus->adapter.algo_data = algo;
algo->setsda = set_data;
algo->setscl = set_clock;
algo->getsda = get_data;
algo->getscl = get_clock;
+ algo->pre_xfer = intel_gpio_pre_xfer;
+ algo->post_xfer = intel_gpio_post_xfer;
algo->udelay = I2C_RISEFALL_TIME;
algo->timeout = usecs_to_jiffies(2200);
algo->data = bus;
+}
- return true;
+static int
+gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ u32 gmbus1_index)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u16 len = msg->len;
+ u8 *buf = msg->buf;
+
+ I915_WRITE(GMBUS1 + reg_offset,
+ gmbus1_index |
+ GMBUS_CYCLE_WAIT |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+ u32 val, loop = 0;
+ u32 gmbus2;
+
+ ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+ (GMBUS_SATOER | GMBUS_HW_RDY),
+ 50);
+ if (ret)
+ return -ETIMEDOUT;
+ if (gmbus2 & GMBUS_SATOER)
+ return -ENXIO;
+
+ val = I915_READ(GMBUS3 + reg_offset);
+ do {
+ *buf++ = val & 0xff;
+ val >>= 8;
+ } while (--len && ++loop < 4);
+ }
+
+ return 0;
}
static int
-intel_i2c_quirk_xfer(struct intel_gmbus *bus,
- struct i2c_msg *msgs,
- int num)
+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
{
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u16 len = msg->len;
+ u8 *buf = msg->buf;
+ u32 val, loop;
+
+ val = loop = 0;
+ while (len && loop < 4) {
+ val |= *buf++ << (8 * loop++);
+ len -= 1;
+ }
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ I915_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT |
+ (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+ u32 gmbus2;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+
+ ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+ (GMBUS_SATOER | GMBUS_HW_RDY),
+ 50);
+ if (ret)
+ return -ETIMEDOUT;
+ if (gmbus2 & GMBUS_SATOER)
+ return -ENXIO;
+ }
+ return 0;
+}
+
+/*
+ * The gmbus controller can combine a 1 or 2 byte write with a read that
+ * immediately follows it by using an "INDEX" cycle.
+ */
+static bool
+gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
+{
+ return (i + 1 < num &&
+ !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+ (msgs[i + 1].flags & I2C_M_RD));
+}
+
+static int
+gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u32 gmbus1_index = 0;
+ u32 gmbus5 = 0;
int ret;
- intel_i2c_reset(dev_priv->dev);
+ if (msgs[0].len == 2)
+ gmbus5 = GMBUS_2BYTE_INDEX_EN |
+ msgs[0].buf[1] | (msgs[0].buf[0] << 8);
+ if (msgs[0].len == 1)
+ gmbus1_index = GMBUS_CYCLE_INDEX |
+ (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT);
- intel_i2c_quirk_set(dev_priv, true);
- set_data(bus, 1);
- set_clock(bus, 1);
- udelay(I2C_RISEFALL_TIME);
+ /* GMBUS5 holds 16-bit index */
+ if (gmbus5)
+ I915_WRITE(GMBUS5 + reg_offset, gmbus5);
- ret = i2c_bit_algo.master_xfer(&bus->adapter, msgs, num);
+ ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
- set_data(bus, 1);
- set_clock(bus, 1);
- intel_i2c_quirk_set(dev_priv, false);
+ /* Clear GMBUS5 after each index transfer */
+ if (gmbus5)
+ I915_WRITE(GMBUS5 + reg_offset, 0);
return ret;
}
@@ -210,117 +331,111 @@ gmbus_xfer(struct i2c_adapter *adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- int i, reg_offset, ret;
+ int i, reg_offset;
+ int ret = 0;
mutex_lock(&dev_priv->gmbus_mutex);
if (bus->force_bit) {
- ret = intel_i2c_quirk_xfer(bus, msgs, num);
+ ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
goto out;
}
- reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
+ reg_offset = dev_priv->gpio_mmio_base;
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
for (i = 0; i < num; i++) {
- u16 len = msgs[i].len;
- u8 *buf = msgs[i].buf;
-
- if (msgs[i].flags & I2C_M_RD) {
- I915_WRITE(GMBUS1 + reg_offset,
- GMBUS_CYCLE_WAIT |
- (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
- (len << GMBUS_BYTE_COUNT_SHIFT) |
- (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_READ | GMBUS_SW_RDY);
- POSTING_READ(GMBUS2+reg_offset);
- do {
- u32 val, loop = 0;
-
- if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
- goto timeout;
- if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
- goto clear_err;
-
- val = I915_READ(GMBUS3 + reg_offset);
- do {
- *buf++ = val & 0xff;
- val >>= 8;
- } while (--len && ++loop < 4);
- } while (len);
+ u32 gmbus2;
+
+ if (gmbus_is_index_read(msgs, i, num)) {
+ ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
+ i += 1; /* set i to the index of the read xfer */
+ } else if (msgs[i].flags & I2C_M_RD) {
+ ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
} else {
- u32 val, loop;
-
- val = loop = 0;
- do {
- val |= *buf++ << (8 * loop);
- } while (--len && ++loop < 4);
-
- I915_WRITE(GMBUS3 + reg_offset, val);
- I915_WRITE(GMBUS1 + reg_offset,
- GMBUS_CYCLE_WAIT |
- (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
- (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
- (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
- POSTING_READ(GMBUS2+reg_offset);
-
- while (len) {
- if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
- goto timeout;
- if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
- goto clear_err;
-
- val = loop = 0;
- do {
- val |= *buf++ << (8 * loop);
- } while (--len && ++loop < 4);
-
- I915_WRITE(GMBUS3 + reg_offset, val);
- POSTING_READ(GMBUS2+reg_offset);
- }
+ ret = gmbus_xfer_write(dev_priv, &msgs[i]);
}
- if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+ if (ret == -ETIMEDOUT)
+ goto timeout;
+ if (ret == -ENXIO)
+ goto clear_err;
+
+ ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+ (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE),
+ 50);
+ if (ret)
goto timeout;
- if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ if (gmbus2 & GMBUS_SATOER)
goto clear_err;
}
- goto done;
+ /* Generate a STOP condition on the bus. Note that gmbus can't generata
+ * a STOP on the very first cycle. To simplify the code we
+ * unconditionally generate the STOP condition with an additional gmbus
+ * cycle. */
+ I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+
+ /* Mark the GMBUS interface as disabled after waiting for idle.
+ * We will re-enable it at the start of the next xfer,
+ * till then let it sleep.
+ */
+ if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
+ 10)) {
+ DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
+ adapter->name);
+ ret = -ETIMEDOUT;
+ }
+ I915_WRITE(GMBUS0 + reg_offset, 0);
+ ret = ret ?: i;
+ goto out;
clear_err:
+ /*
+ * Wait for bus to IDLE before clearing NAK.
+ * If we clear the NAK while bus is still active, then it will stay
+ * active and the next transaction may fail.
+ *
+ * If no ACK is received during the address phase of a transaction, the
+ * adapter must report -ENXIO. It is not clear what to return if no ACK
+ * is received at other times. But we have to be careful to not return
+ * spurious -ENXIO because that will prevent i2c and drm edid functions
+ * from retrying. So return -ENXIO only when gmbus properly quiescents -
+ * timing out seems to happen when there _is_ a ddc chip present, but
+ * it's slow responding and only answers on the 2nd retry.
+ */
+ ret = -ENXIO;
+ if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
+ 10)) {
+ DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
+ adapter->name);
+ ret = -ETIMEDOUT;
+ }
+
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
I915_WRITE(GMBUS1 + reg_offset, 0);
-
-done:
- /* Mark the GMBUS interface as disabled after waiting for idle.
- * We will re-enable it at the start of the next xfer,
- * till then let it sleep.
- */
- if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10))
- DRM_INFO("GMBUS timed out waiting for idle\n");
I915_WRITE(GMBUS0 + reg_offset, 0);
- ret = i;
+
+ DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+ adapter->name, msgs[i].addr,
+ (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+
goto out;
timeout:
- DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
- bus->reg0 & 0xff, bus->adapter.name);
+ DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+ bus->adapter.name, bus->reg0 & 0xff);
I915_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
- if (!bus->has_gpio) {
- ret = -EIO;
- } else {
- bus->force_bit = true;
- ret = intel_i2c_quirk_xfer(bus, msgs, num);
- }
+ bus->force_bit = true;
+ ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+
out:
mutex_unlock(&dev_priv->gmbus_mutex);
return ret;
@@ -346,35 +461,26 @@ static const struct i2c_algorithm gmbus_algorithm = {
*/
int intel_setup_gmbus(struct drm_device *dev)
{
- static const char *names[GMBUS_NUM_PORTS] = {
- "disabled",
- "ssc",
- "vga",
- "panel",
- "dpc",
- "dpb",
- "reserved",
- "dpd",
- };
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
- dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus),
- GFP_KERNEL);
- if (dev_priv->gmbus == NULL)
- return -ENOMEM;
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
+ else
+ dev_priv->gpio_mmio_base = 0;
mutex_init(&dev_priv->gmbus_mutex);
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ u32 port = i + 1; /* +1 to map gmbus index to pin pair */
bus->adapter.owner = THIS_MODULE;
bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
sizeof(bus->adapter.name),
"i915 gmbus %s",
- names[i]);
+ gmbus_ports[i].name);
bus->adapter.dev.parent = &dev->pdev->dev;
bus->dev_priv = dev_priv;
@@ -385,13 +491,13 @@ int intel_setup_gmbus(struct drm_device *dev)
goto err;
/* By default use a conservative clock rate */
- bus->reg0 = i | GMBUS_RATE_100KHZ;
+ bus->reg0 = port | GMBUS_RATE_100KHZ;
- bus->has_gpio = intel_gpio_setup(bus, i);
-
- /* XXX force bit banging until GMBUS is fully debugged */
- if (bus->has_gpio)
+ /* gmbus seems to be broken on i830 */
+ if (IS_I830(dev))
bus->force_bit = true;
+
+ intel_gpio_setup(bus, port);
}
intel_i2c_reset(dev_priv->dev);
@@ -403,11 +509,18 @@ err:
struct intel_gmbus *bus = &dev_priv->gmbus[i];
i2c_del_adapter(&bus->adapter);
}
- kfree(dev_priv->gmbus);
- dev_priv->gmbus = NULL;
return ret;
}
+struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
+ unsigned port)
+{
+ WARN_ON(!intel_gmbus_is_port_valid(port));
+ /* -1 to map pin pair to gmbus index */
+ return (intel_gmbus_is_port_valid(port)) ?
+ &dev_priv->gmbus[port - 1].adapter : NULL;
+}
+
void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
@@ -419,8 +532,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- if (bus->has_gpio)
- bus->force_bit = force_bit;
+ bus->force_bit = force_bit;
}
void intel_teardown_gmbus(struct drm_device *dev)
@@ -435,7 +547,4 @@ void intel_teardown_gmbus(struct drm_device *dev)
struct intel_gmbus *bus = &dev_priv->gmbus[i];
i2c_del_adapter(&bus->adapter);
}
-
- kfree(dev_priv->gmbus);
- dev_priv->gmbus = NULL;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 9c71183629c2..08eb04c787e8 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -480,7 +480,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
{
- DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
+ DRM_INFO("Skipping forced modeset for %s\n", id->ident);
return 1;
}
@@ -628,7 +628,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
{
- DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident);
+ DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
return 1;
}
@@ -747,6 +747,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard HP t5740e Thin Client",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
.ident = "Hewlett-Packard t5745",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
@@ -851,8 +859,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
child->device_type != DEVICE_TYPE_LFP)
continue;
- if (child->i2c_pin)
- *i2c_pin = child->i2c_pin;
+ if (intel_gmbus_is_port_valid(child->i2c_pin))
+ *i2c_pin = child->i2c_pin;
/* However, we cannot trust the BIOS writers to populate
* the VBT correctly. Since LVDS requires additional
@@ -993,7 +1001,8 @@ bool intel_lvds_init(struct drm_device *dev)
* preferred mode is the right one.
*/
intel_lvds->edid = drm_get_edid(connector,
- &dev_priv->gmbus[pin].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ pin));
if (intel_lvds->edid) {
if (drm_add_edid_modes(connector,
intel_lvds->edid)) {
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index d1928e79d9b6..d67ec3a51e42 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -56,7 +56,8 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
}
};
- return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2;
+ return i2c_transfer(intel_gmbus_get_adapter(dev_priv, ddc_bus),
+ msgs, 2) == 2;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 289140bc83cb..18bd0af855dc 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -25,6 +25,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/acpi.h>
#include <linux/acpi_io.h>
#include <acpi/video.h>
@@ -149,7 +151,7 @@ struct opregion_asle {
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 max;
if (!(bclp & ASLE_BCLP_VALID))
@@ -161,7 +163,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
max = intel_panel_get_max_backlight(dev);
intel_panel_set_backlight(dev, bclp * max / 255);
- asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+ iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
return 0;
}
@@ -198,14 +200,14 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
void intel_opregion_asle_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 asle_stat = 0;
u32 asle_req;
if (!asle)
return;
- asle_req = asle->aslc & ASLE_REQ_MSK;
+ asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
if (!asle_req) {
DRM_DEBUG_DRIVER("non asle set request??\n");
@@ -213,31 +215,31 @@ void intel_opregion_asle_intr(struct drm_device *dev)
}
if (asle_req & ASLE_SET_ALS_ILLUM)
- asle_stat |= asle_set_als_illum(dev, asle->alsi);
+ asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev, asle->bclp);
+ asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
if (asle_req & ASLE_SET_PFIT)
- asle_stat |= asle_set_pfit(dev, asle->pfit);
+ asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
if (asle_req & ASLE_SET_PWM_FREQ)
- asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+ asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
- asle->aslc = asle_stat;
+ iowrite32(asle_stat, &asle->aslc);
}
void intel_opregion_gse_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 asle_stat = 0;
u32 asle_req;
if (!asle)
return;
- asle_req = asle->aslc & ASLE_REQ_MSK;
+ asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
if (!asle_req) {
DRM_DEBUG_DRIVER("non asle set request??\n");
@@ -250,7 +252,7 @@ void intel_opregion_gse_intr(struct drm_device *dev)
}
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev, asle->bclp);
+ asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
if (asle_req & ASLE_SET_PFIT) {
DRM_DEBUG_DRIVER("Pfit is not supported\n");
@@ -262,7 +264,7 @@ void intel_opregion_gse_intr(struct drm_device *dev)
asle_stat |= ASLE_PWM_FREQ_FAILED;
}
- asle->aslc = asle_stat;
+ iowrite32(asle_stat, &asle->aslc);
}
#define ASLE_ALS_EN (1<<0)
#define ASLE_BLC_EN (1<<1)
@@ -272,15 +274,16 @@ void intel_opregion_gse_intr(struct drm_device *dev)
void intel_opregion_enable_asle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
if (asle) {
if (IS_MOBILE(dev))
intel_enable_asle(dev);
- asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
- ASLE_PFMB_EN;
- asle->ardy = 1;
+ iowrite32(ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
+ ASLE_PFMB_EN,
+ &asle->tche);
+ iowrite32(1, &asle->ardy);
}
}
@@ -298,7 +301,7 @@ static int intel_opregion_video_event(struct notifier_block *nb,
Linux, these are handled by the dock, button and video drivers.
*/
- struct opregion_acpi *acpi;
+ struct opregion_acpi __iomem *acpi;
struct acpi_bus_event *event = data;
int ret = NOTIFY_OK;
@@ -310,10 +313,11 @@ static int intel_opregion_video_event(struct notifier_block *nb,
acpi = system_opregion->acpi;
- if (event->type == 0x80 && !(acpi->cevt & 0x1))
+ if (event->type == 0x80 &&
+ (ioread32(&acpi->cevt) & 1) == 0)
ret = NOTIFY_BAD;
- acpi->csts = 0;
+ iowrite32(0, &acpi->csts);
return ret;
}
@@ -337,6 +341,7 @@ static void intel_didl_outputs(struct drm_device *dev)
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
unsigned long long device_id;
acpi_status status;
+ u32 temp;
int i = 0;
handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
@@ -355,7 +360,7 @@ static void intel_didl_outputs(struct drm_device *dev)
}
if (!acpi_video_bus) {
- printk(KERN_WARNING "No ACPI video bus found\n");
+ pr_warn("No ACPI video bus found\n");
return;
}
@@ -371,7 +376,8 @@ static void intel_didl_outputs(struct drm_device *dev)
if (ACPI_SUCCESS(status)) {
if (!device_id)
goto blind_set;
- opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
+ iowrite32((u32)(device_id & 0x0f0f),
+ &opregion->acpi->didl[i]);
i++;
}
}
@@ -379,7 +385,7 @@ static void intel_didl_outputs(struct drm_device *dev)
end:
/* If fewer than 8 outputs, the list must be null terminated */
if (i < 8)
- opregion->acpi->didl[i] = 0;
+ iowrite32(0, &opregion->acpi->didl[i]);
return;
blind_set:
@@ -413,7 +419,9 @@ blind_set:
output_type = ACPI_LVDS_OUTPUT;
break;
}
- opregion->acpi->didl[i] |= (1<<31) | output_type | i;
+ temp = ioread32(&opregion->acpi->didl[i]);
+ iowrite32(temp | (1<<31) | output_type | i,
+ &opregion->acpi->didl[i]);
i++;
}
goto end;
@@ -434,8 +442,8 @@ void intel_opregion_init(struct drm_device *dev)
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
* We don't actually need to do anything with them. */
- opregion->acpi->csts = 0;
- opregion->acpi->drdy = 1;
+ iowrite32(0, &opregion->acpi->csts);
+ iowrite32(1, &opregion->acpi->drdy);
system_opregion = opregion;
register_acpi_notifier(&intel_opregion_notifier);
@@ -454,7 +462,7 @@ void intel_opregion_fini(struct drm_device *dev)
return;
if (opregion->acpi) {
- opregion->acpi->drdy = 0;
+ iowrite32(0, &opregion->acpi->drdy);
system_opregion = NULL;
unregister_acpi_notifier(&intel_opregion_notifier);
@@ -474,8 +482,9 @@ int intel_opregion_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
- void *base;
+ void __iomem *base;
u32 asls, mboxes;
+ char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
@@ -489,7 +498,9 @@ int intel_opregion_setup(struct drm_device *dev)
if (!base)
return -ENOMEM;
- if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+ memcpy_fromio(buf, base, sizeof(buf));
+
+ if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
DRM_DEBUG_DRIVER("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
@@ -499,7 +510,7 @@ int intel_opregion_setup(struct drm_device *dev)
opregion->lid_state = base + ACPI_CLID;
- mboxes = opregion->header->mboxes;
+ mboxes = ioread32(&opregion->header->mboxes);
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 80b331c322fb..458743da3774 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -187,14 +187,14 @@ struct intel_overlay {
void (*flip_tail)(struct intel_overlay *);
};
-static struct overlay_registers *
+static struct overlay_registers __iomem *
intel_overlay_map_regs(struct intel_overlay *overlay)
{
drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
overlay->reg_bo->gtt_offset);
@@ -203,7 +203,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
}
static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
io_mapping_unmap(regs);
@@ -215,20 +215,21 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
BUG_ON(overlay->last_flip_req);
- ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+ ret = i915_add_request(ring, NULL, request);
if (ret) {
kfree(request);
return ret;
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
- true);
+ ret = i915_wait_request(ring, overlay->last_flip_req);
if (ret)
return ret;
+ i915_gem_retire_requests(dev);
overlay->last_flip_req = 0;
return 0;
@@ -262,7 +263,7 @@ i830_activate_pipe_a(struct drm_device *dev)
DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
mode = drm_mode_duplicate(dev, &vesa_640x480);
- drm_mode_set_crtcinfo(mode, 0);
+
if (!drm_crtc_helper_set_mode(&crtc->base, mode,
crtc->base.x, crtc->base.y,
crtc->base.fb))
@@ -287,6 +288,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
struct drm_i915_gem_request *request;
int pipe_a_quirk = 0;
int ret;
@@ -306,17 +308,17 @@ static int intel_overlay_on(struct intel_overlay *overlay)
goto out;
}
- ret = BEGIN_LP_RING(4);
+ ret = intel_ring_begin(ring, 4);
if (ret) {
kfree(request);
goto out;
}
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
- OUT_RING(overlay->flip_addr | OFC_UPDATE);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+ intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
ret = intel_overlay_do_wait_request(overlay, request, NULL);
out:
@@ -332,6 +334,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
struct drm_i915_gem_request *request;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
@@ -351,16 +354,16 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- ret = BEGIN_LP_RING(2);
+ ret = intel_ring_begin(ring, 2);
if (ret) {
kfree(request);
return ret;
}
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- OUT_RING(flip_addr);
- ADVANCE_LP_RING();
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_advance(ring);
- ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+ ret = i915_add_request(ring, NULL, request);
if (ret) {
kfree(request);
return ret;
@@ -401,6 +404,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
u32 flip_addr = overlay->flip_addr;
struct drm_i915_gem_request *request;
int ret;
@@ -417,20 +421,20 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- ret = BEGIN_LP_RING(6);
+ ret = intel_ring_begin(ring, 6);
if (ret) {
kfree(request);
return ret;
}
/* wait for overlay to go idle */
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- ADVANCE_LP_RING();
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_advance(ring);
return intel_overlay_do_wait_request(overlay, request,
intel_overlay_off_tail);
@@ -442,15 +446,16 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
if (overlay->last_flip_req == 0)
return 0;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
- true);
+ ret = i915_wait_request(ring, overlay->last_flip_req);
if (ret)
return ret;
+ i915_gem_retire_requests(dev);
if (overlay->flip_tail)
overlay->flip_tail(overlay);
@@ -467,6 +472,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
/* Only wait if there is actually an old frame to release to
@@ -483,15 +489,15 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (request == NULL)
return -ENOMEM;
- ret = BEGIN_LP_RING(2);
+ ret = intel_ring_begin(ring, 2);
if (ret) {
kfree(request);
return ret;
}
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
ret = intel_overlay_do_wait_request(overlay, request,
intel_overlay_release_old_vid_tail);
@@ -619,14 +625,15 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
0x3000, 0x0800, 0x3000
};
-static void update_polyphase_filter(struct overlay_registers *regs)
+static void update_polyphase_filter(struct overlay_registers __iomem *regs)
{
- memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
- memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
+ memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+ memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs,
+ sizeof(uv_static_hcoeffs));
}
static bool update_scaling_factors(struct intel_overlay *overlay,
- struct overlay_registers *regs,
+ struct overlay_registers __iomem *regs,
struct put_image_params *params)
{
/* fixed point with a 12 bit shift */
@@ -665,16 +672,19 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
overlay->old_xscale = xscale;
overlay->old_yscale = yscale;
- regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
- ((xscale >> FP_SHIFT) << 16) |
- ((xscale & FRACT_MASK) << 3));
+ iowrite32(((yscale & FRACT_MASK) << 20) |
+ ((xscale >> FP_SHIFT) << 16) |
+ ((xscale & FRACT_MASK) << 3),
+ &regs->YRGBSCALE);
- regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
- ((xscale_UV >> FP_SHIFT) << 16) |
- ((xscale_UV & FRACT_MASK) << 3));
+ iowrite32(((yscale_UV & FRACT_MASK) << 20) |
+ ((xscale_UV >> FP_SHIFT) << 16) |
+ ((xscale_UV & FRACT_MASK) << 3),
+ &regs->UVSCALE);
- regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) |
- ((yscale_UV >> FP_SHIFT) << 0)));
+ iowrite32((((yscale >> FP_SHIFT) << 16) |
+ ((yscale_UV >> FP_SHIFT) << 0)),
+ &regs->UVSCALEV);
if (scale_changed)
update_polyphase_filter(regs);
@@ -683,30 +693,32 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
}
static void update_colorkey(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
u32 key = overlay->color_key;
switch (overlay->crtc->base.fb->bits_per_pixel) {
case 8:
- regs->DCLRKV = 0;
- regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+ iowrite32(0, &regs->DCLRKV);
+ iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
break;
case 16:
if (overlay->crtc->base.fb->depth == 15) {
- regs->DCLRKV = RGB15_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+ iowrite32(RGB15_TO_COLORKEY(key), &regs->DCLRKV);
+ iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE,
+ &regs->DCLRKM);
} else {
- regs->DCLRKV = RGB16_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+ iowrite32(RGB16_TO_COLORKEY(key), &regs->DCLRKV);
+ iowrite32(CLK_RGB16_MASK | DST_KEY_ENABLE,
+ &regs->DCLRKM);
}
break;
case 24:
case 32:
- regs->DCLRKV = key;
- regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ iowrite32(key, &regs->DCLRKV);
+ iowrite32(CLK_RGB24_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
break;
}
}
@@ -761,9 +773,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct put_image_params *params)
{
int ret, tmp_width;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
bool scale_changed = false;
struct drm_device *dev = overlay->dev;
+ u32 swidth, swidthsw, sheight, ostride;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
@@ -782,16 +795,18 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
if (!overlay->active) {
+ u32 oconfig;
regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unpin;
}
- regs->OCONFIG = OCONF_CC_OUT_8BIT;
+ oconfig = OCONF_CC_OUT_8BIT;
if (IS_GEN4(overlay->dev))
- regs->OCONFIG |= OCONF_CSC_MODE_BT709;
- regs->OCONFIG |= overlay->crtc->pipe == 0 ?
+ oconfig |= OCONF_CSC_MODE_BT709;
+ oconfig |= overlay->crtc->pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
+ iowrite32(oconfig, &regs->OCONFIG);
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_on(overlay);
@@ -805,42 +820,46 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
}
- regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
- regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
+ iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
+ iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
if (params->format & I915_OVERLAY_YUV_PACKED)
tmp_width = packed_width_bytes(params->format, params->src_w);
else
tmp_width = params->src_w;
- regs->SWIDTH = params->src_w;
- regs->SWIDTHSW = calc_swidthsw(overlay->dev,
- params->offset_Y, tmp_width);
- regs->SHEIGHT = params->src_h;
- regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y;
- regs->OSTRIDE = params->stride_Y;
+ swidth = params->src_w;
+ swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
+ sheight = params->src_h;
+ iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y);
+ ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
int uv_hscale = uv_hsubsampling(params->format);
int uv_vscale = uv_vsubsampling(params->format);
u32 tmp_U, tmp_V;
- regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
+ swidth |= (params->src_w/uv_hscale) << 16;
tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
params->src_w/uv_hscale);
tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
params->src_w/uv_hscale);
- regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
- regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
- regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
- regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
- regs->OSTRIDE |= params->stride_UV << 16;
+ swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
+ sheight |= (params->src_h/uv_vscale) << 16;
+ iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U);
+ iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V);
+ ostride |= params->stride_UV << 16;
}
+ iowrite32(swidth, &regs->SWIDTH);
+ iowrite32(swidthsw, &regs->SWIDTHSW);
+ iowrite32(sheight, &regs->SHEIGHT);
+ iowrite32(ostride, &regs->OSTRIDE);
+
scale_changed = update_scaling_factors(overlay, regs, params);
update_colorkey(overlay, regs);
- regs->OCMD = overlay_cmd_reg(params);
+ iowrite32(overlay_cmd_reg(params), &regs->OCMD);
intel_overlay_unmap_regs(overlay, regs);
@@ -860,7 +879,7 @@ out_unpin:
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
struct drm_device *dev = overlay->dev;
int ret;
@@ -879,7 +898,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
return ret;
regs = intel_overlay_map_regs(overlay);
- regs->OCMD = 0;
+ iowrite32(0, &regs->OCMD);
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_off(overlay);
@@ -1109,11 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct put_image_params *params;
int ret;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
+ /* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
@@ -1250,10 +1265,11 @@ out_free:
}
static void update_reg_attrs(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
- regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
- regs->OCLRC1 = overlay->saturation;
+ iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
+ &regs->OCLRC0);
+ iowrite32(overlay->saturation, &regs->OCLRC1);
}
static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
@@ -1306,14 +1322,10 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_intel_overlay_attrs *attrs = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
int ret;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
+ /* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
@@ -1396,7 +1408,7 @@ void intel_setup_overlay(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
struct drm_i915_gem_object *reg_bo;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
int ret;
if (!HAS_OVERLAY(dev))
@@ -1451,7 +1463,7 @@ void intel_setup_overlay(struct drm_device *dev)
if (!regs)
goto out_unpin_bo;
- memset(regs, 0, sizeof(struct overlay_registers));
+ memset_io(regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(regs);
update_reg_attrs(overlay, regs);
@@ -1499,14 +1511,17 @@ struct intel_overlay_error_state {
u32 isr;
};
-static struct overlay_registers *
+static struct overlay_registers __iomem *
intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
{
drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ /* Cast to make sparse happy, but it's wc memory anyway, so
+ * equivalent to the wc io mapping on X86. */
+ regs = (struct overlay_registers __iomem *)
+ overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
overlay->reg_bo->gtt_offset);
@@ -1515,7 +1530,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
}
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
io_mapping_unmap_atomic(regs);
@@ -1540,9 +1555,9 @@ intel_overlay_capture_error_state(struct drm_device *dev)
error->dovsta = I915_READ(DOVSTA);
error->isr = I915_READ(ISR);
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+ error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
else
- error->base = (long) overlay->reg_bo->gtt_offset;
+ error->base = overlay->reg_bo->gtt_offset;
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 48177ec4720e..2a1625d84a69 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -28,6 +28,9 @@
* Chris Wilson <chris@chris-wilson.co.uk>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/moduleparam.h>
#include "intel_drv.h"
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
@@ -169,7 +172,7 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
/* XXX add code here to query mode clock or hardware clock
* and program max PWM appropriately.
*/
- printk_once(KERN_WARNING "fixme: max PWM is zero.\n");
+ pr_warn_once("fixme: max PWM is zero\n");
return 1;
}
@@ -189,6 +192,27 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
return max;
}
+static int i915_panel_invert_brightness;
+MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
+ "(-1 force normal, 0 machine defaults, 1 force inversion), please "
+ "report PCI device ID, subsystem vendor and subsystem device ID "
+ "to dri-devel@lists.freedesktop.org, if your machine needs it. "
+ "It will then be included in an upcoming module version.");
+module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
+static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (i915_panel_invert_brightness < 0)
+ return val;
+
+ if (i915_panel_invert_brightness > 0 ||
+ dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
+ return intel_panel_get_max_backlight(dev) - val;
+
+ return val;
+}
+
u32 intel_panel_get_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -209,6 +233,7 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
}
}
+ val = intel_panel_compute_brightness(dev, val);
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
return val;
}
@@ -226,6 +251,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
u32 tmp;
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+ level = intel_panel_compute_brightness(dev, level);
if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);
@@ -342,6 +368,7 @@ int intel_panel_setup_backlight(struct drm_device *dev)
else
return -ENODEV;
+ memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = intel_panel_get_max_backlight(dev);
dev_priv->backlight =
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
new file mode 100644
index 000000000000..d0ce2a5b1d3f
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -0,0 +1,3820 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "../../../platform/x86/intel_ips.h"
+#include <linux/module.h>
+
+/* FBC, or Frame Buffer Compression, is a technique employed to compress the
+ * framebuffer contents in-memory, aiming at reducing the required bandwidth
+ * during in-memory transfers and, therefore, reduce the power packet.
+ *
+ * The benefits of FBC are mostly visible with solid backgrounds and
+ * variation-less patterns.
+ *
+ * FBC-related functionality can be enabled by the means of the
+ * i915.i915_enable_fbc parameter
+ */
+
+static void i8xx_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+}
+
+static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int cfb_pitch;
+ int plane, i;
+ u32 fbc_ctl, fbc_ctl2;
+
+ cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+ if (fb->pitches[0] < cfb_pitch)
+ cfb_pitch = fb->pitches[0];
+
+ /* FBC_CTL wants 64B units */
+ cfb_pitch = (cfb_pitch / 64) - 1;
+ plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
+
+ /* Clear old tags */
+ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+ I915_WRITE(FBC_TAG + (i * 4), 0);
+
+ /* Set it up... */
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 |= plane;
+ I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+ I915_WRITE(FBC_FENCE_OFF, crtc->y);
+
+ /* enable it... */
+ fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+ if (IS_I945GM(dev))
+ fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
+ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
+ fbc_ctl |= obj->fence_reg;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
+ cfb_pitch, crtc->y, intel_crtc->plane);
+}
+
+static bool i8xx_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+ I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+ I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+
+ /* enable it... */
+ I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+static void g4x_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool g4x_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void sandybridge_blit_fbc_update(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 blt_ecoskpd;
+
+ /* Make sure blitter notifies FBC of writes */
+ gen6_gt_force_wake_get(dev_priv);
+ blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT);
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ POSTING_READ(GEN6_BLITTER_ECOSKPD);
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ dpfc_ctl &= DPFC_RESERVED;
+ dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
+ /* Set persistent mode for front-buffer rendering, ala X. */
+ dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
+ dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
+ I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+ I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+ /* enable it... */
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ if (IS_GEN6(dev)) {
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ sandybridge_blit_fbc_update(dev);
+ }
+
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+static void ironlake_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool ironlake_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.fbc_enabled)
+ return false;
+
+ return dev_priv->display.fbc_enabled(dev);
+}
+
+static void intel_fbc_work_fn(struct work_struct *__work)
+{
+ struct intel_fbc_work *work =
+ container_of(to_delayed_work(__work),
+ struct intel_fbc_work, work);
+ struct drm_device *dev = work->crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ if (work == dev_priv->fbc_work) {
+ /* Double check that we haven't switched fb without cancelling
+ * the prior work.
+ */
+ if (work->crtc->fb == work->fb) {
+ dev_priv->display.enable_fbc(work->crtc,
+ work->interval);
+
+ dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->cfb_fb = work->crtc->fb->base.id;
+ dev_priv->cfb_y = work->crtc->y;
+ }
+
+ dev_priv->fbc_work = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(work);
+}
+
+static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->fbc_work == NULL)
+ return;
+
+ DRM_DEBUG_KMS("cancelling pending FBC enable\n");
+
+ /* Synchronisation is provided by struct_mutex and checking of
+ * dev_priv->fbc_work, so we can perform the cancellation
+ * entirely asynchronously.
+ */
+ if (cancel_delayed_work(&dev_priv->fbc_work->work))
+ /* tasklet was killed before being run, clean up */
+ kfree(dev_priv->fbc_work);
+
+ /* Mark the work as no longer wanted so that if it does
+ * wake-up (because the work was already running and waiting
+ * for our mutex), it will discover that is no longer
+ * necessary to run.
+ */
+ dev_priv->fbc_work = NULL;
+}
+
+void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct intel_fbc_work *work;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.enable_fbc)
+ return;
+
+ intel_cancel_fbc_work(dev_priv);
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL) {
+ dev_priv->display.enable_fbc(crtc, interval);
+ return;
+ }
+
+ work->crtc = crtc;
+ work->fb = crtc->fb;
+ work->interval = interval;
+ INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
+
+ dev_priv->fbc_work = work;
+
+ DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+
+ /* Delay the actual enabling to let pageflipping cease and the
+ * display to settle before starting the compression. Note that
+ * this delay also serves a second purpose: it allows for a
+ * vblank to pass after disabling the FBC before we attempt
+ * to modify the control registers.
+ *
+ * A more complicated solution would involve tracking vblanks
+ * following the termination of the page-flipping sequence
+ * and indeed performing the enable as a co-routine and not
+ * waiting synchronously upon the vblank.
+ */
+ schedule_delayed_work(&work->work, msecs_to_jiffies(50));
+}
+
+void intel_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_cancel_fbc_work(dev_priv);
+
+ if (!dev_priv->display.disable_fbc)
+ return;
+
+ dev_priv->display.disable_fbc(dev);
+ dev_priv->cfb_plane = -1;
+}
+
+/**
+ * intel_update_fbc - enable/disable FBC as needed
+ * @dev: the drm_device
+ *
+ * Set up the framebuffer compression hardware at mode set time. We
+ * enable it if possible:
+ * - plane A only (on pre-965)
+ * - no pixel mulitply/line duplication
+ * - no alpha buffer discard
+ * - no dual wide
+ * - framebuffer <= 2048 in width, 1536 in height
+ *
+ * We can't assume that any compression will take place (worst case),
+ * so the compressed buffer has to be the same size as the uncompressed
+ * one. It also must reside (along with the line length buffer) in
+ * stolen memory.
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+void intel_update_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct intel_crtc *intel_crtc;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int enable_fbc;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!i915_powersave)
+ return;
+
+ if (!I915_HAS_FBC(dev))
+ return;
+
+ /*
+ * If FBC is already on, we just have to verify that we can
+ * keep it that way...
+ * Need to disable if:
+ * - more than one pipe is active
+ * - changing FBC params (stride, fence, mode)
+ * - new fb is too large to fit in compressed buffer
+ * - going to an unsupported config (interlace, pixel multiply, etc.)
+ */
+ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
+ if (tmp_crtc->enabled && tmp_crtc->fb) {
+ if (crtc) {
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+ goto out_disable;
+ }
+ crtc = tmp_crtc;
+ }
+ }
+
+ if (!crtc || crtc->fb == NULL) {
+ DRM_DEBUG_KMS("no output, disabling\n");
+ dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+ goto out_disable;
+ }
+
+ intel_crtc = to_intel_crtc(crtc);
+ fb = crtc->fb;
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ enable_fbc = i915_enable_fbc;
+ if (enable_fbc < 0) {
+ DRM_DEBUG_KMS("fbc set to per-chip default\n");
+ enable_fbc = 1;
+ if (INTEL_INFO(dev)->gen <= 6)
+ enable_fbc = 0;
+ }
+ if (!enable_fbc) {
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
+ dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
+ goto out_disable;
+ }
+ if (intel_fb->obj->base.size > dev_priv->cfb_size) {
+ DRM_DEBUG_KMS("framebuffer too large, disabling "
+ "compression\n");
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ goto out_disable;
+ }
+ if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
+ (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+ "disabling\n");
+ dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+ goto out_disable;
+ }
+ if ((crtc->mode.hdisplay > 2048) ||
+ (crtc->mode.vdisplay > 1536)) {
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+ goto out_disable;
+ }
+ if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
+ DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+ goto out_disable;
+ }
+
+ /* The use of a CPU fence is mandatory in order to detect writes
+ * by the CPU to the scanout and trigger updates to the FBC.
+ */
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_NOT_TILED;
+ goto out_disable;
+ }
+
+ /* If the kernel debugger is active, always disable compression */
+ if (in_dbg_master())
+ goto out_disable;
+
+ /* If the scanout has not changed, don't modify the FBC settings.
+ * Note that we make the fundamental assumption that the fb->obj
+ * cannot be unpinned (and have its GTT offset and fence revoked)
+ * without first being decoupled from the scanout and FBC disabled.
+ */
+ if (dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_fb == fb->base.id &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ if (intel_fbc_enabled(dev)) {
+ /* We update FBC along two paths, after changing fb/crtc
+ * configuration (modeswitching) and after page-flipping
+ * finishes. For the latter, we know that not only did
+ * we disable the FBC at the start of the page-flip
+ * sequence, but also more than one vblank has passed.
+ *
+ * For the former case of modeswitching, it is possible
+ * to switch between two FBC valid configurations
+ * instantaneously so we do need to disable the FBC
+ * before we can modify its control registers. We also
+ * have to wait for the next vblank for that to take
+ * effect. However, since we delay enabling FBC we can
+ * assume that a vblank has passed since disabling and
+ * that we can safely alter the registers in the deferred
+ * callback.
+ *
+ * In the scenario that we go from a valid to invalid
+ * and then back to valid FBC configuration we have
+ * no strict enforcement that a vblank occurred since
+ * disabling the FBC. However, along all current pipe
+ * disabling paths we do need to wait for a vblank at
+ * some point. And we wait before enabling FBC anyway.
+ */
+ DRM_DEBUG_KMS("disabling active FBC for update\n");
+ intel_disable_fbc(dev);
+ }
+
+ intel_enable_fbc(crtc, 500);
+ return;
+
+out_disable:
+ /* Multiple disables should be harmless */
+ if (intel_fbc_enabled(dev)) {
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
+ intel_disable_fbc(dev);
+ }
+}
+
+static void i915_pineview_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ tmp = I915_READ(CLKCFG);
+
+ switch (tmp & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_533:
+ dev_priv->fsb_freq = 533; /* 133*4 */
+ break;
+ case CLKCFG_FSB_800:
+ dev_priv->fsb_freq = 800; /* 200*4 */
+ break;
+ case CLKCFG_FSB_667:
+ dev_priv->fsb_freq = 667; /* 167*4 */
+ break;
+ case CLKCFG_FSB_400:
+ dev_priv->fsb_freq = 400; /* 100*4 */
+ break;
+ }
+
+ switch (tmp & CLKCFG_MEM_MASK) {
+ case CLKCFG_MEM_533:
+ dev_priv->mem_freq = 533;
+ break;
+ case CLKCFG_MEM_667:
+ dev_priv->mem_freq = 667;
+ break;
+ case CLKCFG_MEM_800:
+ dev_priv->mem_freq = 800;
+ break;
+ }
+
+ /* detect pineview DDR3 setting */
+ tmp = I915_READ(CSHRDDR3CTL);
+ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 ddrpll, csipll;
+
+ ddrpll = I915_READ16(DDRMPLL1);
+ csipll = I915_READ16(CSIPLL0);
+
+ switch (ddrpll & 0xff) {
+ case 0xc:
+ dev_priv->mem_freq = 800;
+ break;
+ case 0x10:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 0x14:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 0x18:
+ dev_priv->mem_freq = 1600;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
+ dev_priv->mem_freq = 0;
+ break;
+ }
+
+ dev_priv->r_t = dev_priv->mem_freq;
+
+ switch (csipll & 0x3ff) {
+ case 0x00c:
+ dev_priv->fsb_freq = 3200;
+ break;
+ case 0x00e:
+ dev_priv->fsb_freq = 3733;
+ break;
+ case 0x010:
+ dev_priv->fsb_freq = 4266;
+ break;
+ case 0x012:
+ dev_priv->fsb_freq = 4800;
+ break;
+ case 0x014:
+ dev_priv->fsb_freq = 5333;
+ break;
+ case 0x016:
+ dev_priv->fsb_freq = 5866;
+ break;
+ case 0x018:
+ dev_priv->fsb_freq = 6400;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
+ dev_priv->fsb_freq = 0;
+ break;
+ }
+
+ if (dev_priv->fsb_freq == 3200) {
+ dev_priv->c_m = 0;
+ } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
+ dev_priv->c_m = 1;
+ } else {
+ dev_priv->c_m = 2;
+ }
+}
+
+static const struct cxsr_latency cxsr_latency_table[] = {
+ {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
+ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
+ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
+ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
+ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
+
+ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
+ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
+ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
+ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
+ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
+
+ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
+ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
+ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
+ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
+ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
+
+ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
+ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
+ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
+ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
+ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
+
+ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
+ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
+ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
+ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
+ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
+
+ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
+ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
+ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
+ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
+};
+
+static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
+ int is_ddr3,
+ int fsb,
+ int mem)
+{
+ const struct cxsr_latency *latency;
+ int i;
+
+ if (fsb == 0 || mem == 0)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
+ latency = &cxsr_latency_table[i];
+ if (is_desktop == latency->is_desktop &&
+ is_ddr3 == latency->is_ddr3 &&
+ fsb == latency->fsb_freq && mem == latency->mem_freq)
+ return latency;
+ }
+
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+
+ return NULL;
+}
+
+static void pineview_disable_cxsr(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* deactivate cxsr */
+ I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+}
+
+/*
+ * Latency for FIFO fetches is dependent on several factors:
+ * - memory configuration (speed, channels)
+ * - chipset
+ * - current MCH state
+ * It can be fairly high in some situations, so here we assume a fairly
+ * pessimal value. It's a tradeoff between extra memory fetches (if we
+ * set this value too high, the FIFO will fetch frequently to stay full)
+ * and power consumption (set it too low to save power and we might see
+ * FIFO underruns and display "flicker").
+ *
+ * A value of 5us seems to be a good balance; safe for very low end
+ * platforms but not overly aggressive on lower latency configs.
+ */
+static const int latency_ns = 5000;
+
+static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ if (plane)
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x1ff;
+ if (plane)
+ size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+static int i845_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 2; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A",
+ size);
+
+ return size;
+}
+
+static int i830_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+/* Pineview has different values for various configs */
+static const struct intel_watermark_params pineview_display_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_display_hplloff_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_HPLLOFF_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_cursor_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params g4x_wm_info = {
+ G4X_FIFO_SIZE,
+ G4X_MAX_WM,
+ G4X_MAX_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params g4x_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_wm_info = {
+ VALLEYVIEW_FIFO_SIZE,
+ VALLEYVIEW_MAX_WM,
+ VALLEYVIEW_MAX_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ VALLEYVIEW_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i965_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ I915_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i945_wm_info = {
+ I945_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i915_wm_info = {
+ I915_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i855_wm_info = {
+ I855GM_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I830_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i830_wm_info = {
+ I830_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I830_FIFO_LINE_SIZE
+};
+
+static const struct intel_watermark_params ironlake_display_wm_info = {
+ ILK_DISPLAY_FIFO,
+ ILK_DISPLAY_MAXWM,
+ ILK_DISPLAY_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_cursor_wm_info = {
+ ILK_CURSOR_FIFO,
+ ILK_CURSOR_MAXWM,
+ ILK_CURSOR_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_display_srwm_info = {
+ ILK_DISPLAY_SR_FIFO,
+ ILK_DISPLAY_MAX_SRWM,
+ ILK_DISPLAY_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_cursor_srwm_info = {
+ ILK_CURSOR_SR_FIFO,
+ ILK_CURSOR_MAX_SRWM,
+ ILK_CURSOR_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static const struct intel_watermark_params sandybridge_display_wm_info = {
+ SNB_DISPLAY_FIFO,
+ SNB_DISPLAY_MAXWM,
+ SNB_DISPLAY_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_cursor_wm_info = {
+ SNB_CURSOR_FIFO,
+ SNB_CURSOR_MAXWM,
+ SNB_CURSOR_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_display_srwm_info = {
+ SNB_DISPLAY_SR_FIFO,
+ SNB_DISPLAY_MAX_SRWM,
+ SNB_DISPLAY_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
+ SNB_CURSOR_SR_FIFO,
+ SNB_CURSOR_MAX_SRWM,
+ SNB_CURSOR_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+
+/**
+ * intel_calculate_wm - calculate watermark level
+ * @clock_in_khz: pixel clock
+ * @wm: chip FIFO params
+ * @pixel_size: display pixel size
+ * @latency_ns: memory latency for the platform
+ *
+ * Calculate the watermark level (the level at which the display plane will
+ * start fetching from memory again). Each chip has a different display
+ * FIFO size and allocation, so the caller needs to figure that out and pass
+ * in the correct intel_watermark_params structure.
+ *
+ * As the pixel clock runs, the FIFO will be drained at a rate that depends
+ * on the pixel size. When it reaches the watermark level, it'll start
+ * fetching FIFO line sized based chunks from memory until the FIFO fills
+ * past the watermark point. If the FIFO drains completely, a FIFO underrun
+ * will occur, and a display engine hang could result.
+ */
+static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
+ const struct intel_watermark_params *wm,
+ int fifo_size,
+ int pixel_size,
+ unsigned long latency_ns)
+{
+ long entries_required, wm_size;
+
+ /*
+ * Note: we need to make sure we don't overflow for various clock &
+ * latency values.
+ * clocks go from a few thousand to several hundred thousand.
+ * latency is usually a few thousand
+ */
+ entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
+ 1000;
+ entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
+
+ DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
+
+ wm_size = fifo_size - (entries_required + wm->guard_size);
+
+ DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
+
+ /* Don't promote wm_size to unsigned... */
+ if (wm_size > (long)wm->max_wm)
+ wm_size = wm->max_wm;
+ if (wm_size <= 0)
+ wm_size = wm->default_wm;
+ return wm_size;
+}
+
+static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+{
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->enabled && crtc->fb) {
+ if (enabled)
+ return NULL;
+ enabled = crtc;
+ }
+ }
+
+ return enabled;
+}
+
+static void pineview_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ const struct cxsr_latency *latency;
+ u32 reg;
+ unsigned long wm;
+
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ if (!latency) {
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ pineview_disable_cxsr(dev);
+ return;
+ }
+
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ int clock = crtc->mode.clock;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Display SR */
+ wm = intel_calculate_wm(clock, &pineview_display_wm,
+ pineview_display_wm.fifo_size,
+ pixel_size, latency->display_sr);
+ reg = I915_READ(DSPFW1);
+ reg &= ~DSPFW_SR_MASK;
+ reg |= wm << DSPFW_SR_SHIFT;
+ I915_WRITE(DSPFW1, reg);
+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(clock, &pineview_cursor_wm,
+ pineview_display_wm.fifo_size,
+ pixel_size, latency->cursor_sr);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_CURSOR_SR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
+ pixel_size, latency->display_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_SR_MASK;
+ reg |= wm & DSPFW_HPLL_SR_MASK;
+ I915_WRITE(DSPFW3, reg);
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
+ pixel_size, latency->cursor_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_CURSOR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+ /* activate cxsr */
+ I915_WRITE(DSPFW3,
+ I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
+ DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ } else {
+ pineview_disable_cxsr(dev);
+ DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ }
+}
+
+static bool g4x_compute_wm0(struct drm_device *dev,
+ int plane,
+ const struct intel_watermark_params *display,
+ int display_latency_ns,
+ const struct intel_watermark_params *cursor,
+ int cursor_latency_ns,
+ int *plane_wm,
+ int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int htotal, hdisplay, clock, pixel_size;
+ int line_time_us, line_count;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *cursor_wm = cursor->guard_size;
+ *plane_wm = display->guard_size;
+ return false;
+ }
+
+ htotal = crtc->mode.htotal;
+ hdisplay = crtc->mode.hdisplay;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Use the small buffer method to calculate plane watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *plane_wm = entries + display->guard_size;
+ if (*plane_wm > (int)display->max_wm)
+ *plane_wm = display->max_wm;
+
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = ((htotal * 1000) / clock);
+ line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
+ entries = line_count * 64 * pixel_size;
+ tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+ if (*cursor_wm > (int)cursor->max_wm)
+ *cursor_wm = (int)cursor->max_wm;
+
+ return true;
+}
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool g4x_check_srwm(struct drm_device *dev,
+ int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
+ display_wm, cursor_wm);
+
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
+ display_wm, display->max_wm);
+ return false;
+ }
+
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
+ cursor_wm, cursor->max_wm);
+ return false;
+ }
+
+ if (!(display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("SR latency is 0, disabling\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool g4x_compute_srwm(struct drm_device *dev,
+ int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int hdisplay, htotal, pixel_size, clock;
+ unsigned long line_time_us;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return g4x_check_srwm(dev,
+ *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+static bool vlv_compute_drain_latency(struct drm_device *dev,
+ int plane,
+ int *plane_prec_mult,
+ int *plane_dl,
+ int *cursor_prec_mult,
+ int *cursor_dl)
+{
+ struct drm_crtc *crtc;
+ int clock, pixel_size;
+ int entries;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled)
+ return false;
+
+ clock = crtc->mode.clock; /* VESA DOT Clock */
+ pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
+
+ entries = (clock / 1000) * pixel_size;
+ *plane_prec_mult = (entries > 256) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+ *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
+ pixel_size);
+
+ entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
+ *cursor_prec_mult = (entries > 256) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+ *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
+
+ return true;
+}
+
+/*
+ * Update drain latency registers of memory arbiter
+ *
+ * Valleyview SoC has a new memory arbiter and needs drain latency registers
+ * to be programmed. Each plane has a drain latency multiplier and a drain
+ * latency value.
+ */
+
+static void vlv_update_drain_latency(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_prec, planea_dl, planeb_prec, planeb_dl;
+ int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
+ int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
+ either 16 or 32 */
+
+ /* For plane A, Cursor A */
+ if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
+ &cursor_prec_mult, &cursora_dl)) {
+ cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
+ planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
+
+ I915_WRITE(VLV_DDL1, cursora_prec |
+ (cursora_dl << DDL_CURSORA_SHIFT) |
+ planea_prec | planea_dl);
+ }
+
+ /* For plane B, Cursor B */
+ if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
+ &cursor_prec_mult, &cursorb_dl)) {
+ cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
+ planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
+
+ I915_WRITE(VLV_DDL2, cursorb_prec |
+ (cursorb_dl << DDL_CURSORB_SHIFT) |
+ planeb_prec | planeb_dl);
+ }
+}
+
+#define single_plane_enabled(mask) is_power_of_2(mask)
+
+static void valleyview_update_wm(struct drm_device *dev)
+{
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ unsigned int enabled = 0;
+
+ vlv_update_drain_latency(dev);
+
+ if (g4x_compute_wm0(dev, 0,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1;
+
+ if (g4x_compute_wm0(dev, 1,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 2;
+
+ plane_sr = cursor_sr = 0;
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &plane_sr, &cursor_sr))
+ I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
+ else
+ I915_WRITE(FW_BLC_SELF_VLV,
+ I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
+}
+
+static void g4x_update_wm(struct drm_device *dev)
+{
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ unsigned int enabled = 0;
+
+ if (g4x_compute_wm0(dev, 0,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1;
+
+ if (g4x_compute_wm0(dev, 1,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 2;
+
+ plane_sr = cursor_sr = 0;
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &g4x_wm_info,
+ &g4x_cursor_wm_info,
+ &plane_sr, &cursor_sr))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ else
+ I915_WRITE(FW_BLC_SELF,
+ I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ /* HPLL off in SR has some issues on G4x... disable it */
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void i965_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ int srwm = 1;
+ int cursor_sr = 16;
+
+ /* Calc sr entries for one plane configs */
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 12000;
+ int clock = crtc->mode.clock;
+ int htotal = crtc->mode.htotal;
+ int hdisplay = crtc->mode.hdisplay;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
+
+ line_time_us = ((htotal * 1000) / clock);
+
+ /* Use ns/us then divide to preserve precision */
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
+ srwm = I965_FIFO_SIZE - entries;
+ if (srwm < 0)
+ srwm = 1;
+ srwm &= 0x1ff;
+ DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
+
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * 64;
+ entries = DIV_ROUND_UP(entries,
+ i965_cursor_wm_info.cacheline_size);
+ cursor_sr = i965_cursor_wm_info.fifo_size -
+ (entries + i965_cursor_wm_info.guard_size);
+
+ if (cursor_sr > i965_cursor_wm_info.max_wm)
+ cursor_sr = i965_cursor_wm_info.max_wm;
+
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
+
+ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
+
+ /* 965 has limitations... */
+ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
+ (8 << 16) | (8 << 8) | (8 << 0));
+ I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
+ /* update cursor SR watermark */
+ I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void i9xx_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct intel_watermark_params *wm_info;
+ uint32_t fwater_lo;
+ uint32_t fwater_hi;
+ int cwm, srwm = 1;
+ int fifo_size;
+ int planea_wm, planeb_wm;
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ if (IS_I945GM(dev))
+ wm_info = &i945_wm_info;
+ else if (!IS_GEN2(dev))
+ wm_info = &i915_wm_info;
+ else
+ wm_info = &i855_wm_info;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ crtc = intel_get_crtc_for_plane(dev, 0);
+ if (crtc->enabled && crtc->fb) {
+ planea_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ enabled = crtc;
+ } else
+ planea_wm = fifo_size - wm_info->guard_size;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+ crtc = intel_get_crtc_for_plane(dev, 1);
+ if (crtc->enabled && crtc->fb) {
+ planeb_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ if (enabled == NULL)
+ enabled = crtc;
+ else
+ enabled = NULL;
+ } else
+ planeb_wm = fifo_size - wm_info->guard_size;
+
+ DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+
+ /*
+ * Overlay gets an aggressive default since video jitter is bad.
+ */
+ cwm = 2;
+
+ /* Play safe and disable self-refresh before adjusting watermarks. */
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+
+ /* Calc sr entries for one plane configs */
+ if (HAS_FW_BLC(dev) && enabled) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 6000;
+ int clock = enabled->mode.clock;
+ int htotal = enabled->mode.htotal;
+ int hdisplay = enabled->mode.hdisplay;
+ int pixel_size = enabled->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
+
+ line_time_us = (htotal * 1000) / clock;
+
+ /* Use ns/us then divide to preserve precision */
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
+ DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+ srwm = wm_info->fifo_size - entries;
+ if (srwm < 0)
+ srwm = 1;
+
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else if (IS_I915GM(dev))
+ I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ planea_wm, planeb_wm, cwm, srwm);
+
+ fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
+ fwater_hi = (cwm & 0x1f);
+
+ /* Set request length to 8 cachelines per fetch */
+ fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
+ fwater_hi = fwater_hi | (1 << 8);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+ I915_WRITE(FW_BLC2, fwater_hi);
+
+ if (HAS_FW_BLC(dev)) {
+ if (enabled) {
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+ DRM_DEBUG_KMS("memory self refresh enabled\n");
+ } else
+ DRM_DEBUG_KMS("memory self refresh disabled\n");
+ }
+}
+
+static void i830_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ uint32_t fwater_lo;
+ int planea_wm;
+
+ crtc = single_enabled_crtc(dev);
+ if (crtc == NULL)
+ return;
+
+ planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+ dev_priv->display.get_fifo_size(dev, 0),
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+ fwater_lo |= (3<<8) | planea_wm;
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+}
+
+#define ILK_LP0_PLANE_LATENCY 700
+#define ILK_LP0_CURSOR_LATENCY 1300
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool ironlake_check_srwm(struct drm_device *dev, int level,
+ int fbc_wm, int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
+ " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
+
+ if (fbc_wm > SNB_FBC_MAX_SRWM) {
+ DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
+ fbc_wm, SNB_FBC_MAX_SRWM, level);
+
+ /* fbc has it's own way to disable FBC WM */
+ I915_WRITE(DISP_ARB_CTL,
+ I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
+ return false;
+ }
+
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
+ display_wm, SNB_DISPLAY_MAX_SRWM, level);
+ return false;
+ }
+
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
+ cursor_wm, SNB_CURSOR_MAX_SRWM, level);
+ return false;
+ }
+
+ if (!(fbc_wm || display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Compute watermark values of WM[1-3],
+ */
+static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *fbc_wm, int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int hdisplay, htotal, pixel_size, clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *fbc_wm = *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /*
+ * Spec says:
+ * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
+ */
+ *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return ironlake_check_srwm(dev, level,
+ *fbc_wm, *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+static void ironlake_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEA_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEB_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled))
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ ILK_READ_WM1_LATENCY() * 500,
+ &ironlake_display_srwm_info,
+ &ironlake_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ ILK_READ_WM2_LATENCY() * 500,
+ &ironlake_display_srwm_info,
+ &ironlake_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /*
+ * WM3 is unsupported on ILK, probably because we don't have latency
+ * data for that power state
+ */
+}
+
+static void sandybridge_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ if ((dev_priv->num_pipe == 3) &&
+ g4x_compute_wm0(dev, 2,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEC_IVB);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEC_IVB, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 3;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3 */
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
+static void
+haswell_update_linetime_wm(struct drm_device *dev, int pipe,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 temp;
+
+ temp = I915_READ(PIPE_WM_LINETIME(pipe));
+ temp &= ~PIPE_WM_LINETIME_MASK;
+
+ /* The WM are computed with base on how long it takes to fill a single
+ * row at the given clock rate, multiplied by 8.
+ * */
+ temp |= PIPE_WM_LINETIME_TIME(
+ ((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
+
+ /* IPS watermarks are only used by pipe A, and are ignored by
+ * pipes B and C. They are calculated similarly to the common
+ * linetime values, except that we are using CD clock frequency
+ * in MHz instead of pixel rate for the division.
+ *
+ * This is a placeholder for the IPS watermark calculation code.
+ */
+
+ I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
+}
+
+static bool
+sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int display_latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ int clock;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *sprite_wm = display->guard_size;
+ return false;
+ }
+
+ clock = crtc->mode.clock;
+
+ /* Use the small buffer method to calculate the sprite watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size -
+ sprite_width * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+ if (*sprite_wm > (int)display->max_wm)
+ *sprite_wm = display->max_wm;
+
+ return true;
+}
+
+static bool
+sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ clock = crtc->mode.clock;
+ if (!clock) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ line_time_us = (sprite_width * 1000) / clock;
+ if (!line_time_us) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = sprite_width * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+
+ return *sprite_wm > 0x3ff ? false : true;
+}
+
+static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int sprite_wm, reg;
+ int ret;
+
+ switch (pipe) {
+ case 0:
+ reg = WM0_PIPEA_ILK;
+ break;
+ case 1:
+ reg = WM0_PIPEB_ILK;
+ break;
+ case 2:
+ reg = WM0_PIPEC_IVB;
+ break;
+ default:
+ return; /* bad pipe */
+ }
+
+ ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
+ &sandybridge_display_wm_info,
+ latency, &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
+ pipe);
+ return;
+ }
+
+ val = I915_READ(reg);
+ val &= ~WM0_PIPE_SPRITE_MASK;
+ I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+ DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
+
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM1S_LP_ILK, sprite_wm);
+
+ /* Only IVB has two more LP watermarks for sprite */
+ if (!IS_IVYBRIDGE(dev))
+ return;
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM2S_LP_IVB, sprite_wm);
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM3S_LP_IVB, sprite_wm);
+}
+
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+void intel_update_watermarks(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_wm)
+ dev_priv->display.update_wm(dev);
+}
+
+void intel_update_linetime_watermarks(struct drm_device *dev,
+ int pipe, struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_linetime_wm)
+ dev_priv->display.update_linetime_wm(dev, pipe, mode);
+}
+
+void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_sprite_wm)
+ dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
+ pixel_size);
+}
+
+static struct drm_i915_gem_object *
+intel_alloc_context_page(struct drm_device *dev)
+{
+ struct drm_i915_gem_object *ctx;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ ctx = i915_gem_alloc_object(dev, 4096);
+ if (!ctx) {
+ DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
+ return NULL;
+ }
+
+ ret = i915_gem_object_pin(ctx, 4096, true);
+ if (ret) {
+ DRM_ERROR("failed to pin power context: %d\n", ret);
+ goto err_unref;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
+ if (ret) {
+ DRM_ERROR("failed to set-domain on power context: %d\n", ret);
+ goto err_unpin;
+ }
+
+ return ctx;
+
+err_unpin:
+ i915_gem_object_unpin(ctx);
+err_unref:
+ drm_gem_object_unreference(&ctx->base);
+ mutex_unlock(&dev->struct_mutex);
+ return NULL;
+}
+
+bool ironlake_set_drps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl;
+
+ rgvswctl = I915_READ16(MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_DEBUG("gpu busy, RCS change rejected\n");
+ return false; /* still busy with another command */
+ }
+
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+ POSTING_READ16(MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+
+ return true;
+}
+
+void ironlake_enable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rgvmodectl = I915_READ(MEMMODECTL);
+ u8 fmax, fmin, fstart, vstart;
+
+ /* Enable temp reporting */
+ I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+ I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+
+ /* 100ms RC evaluation intervals */
+ I915_WRITE(RCUPEI, 100000);
+ I915_WRITE(RCDNEI, 100000);
+
+ /* Set max/min thresholds to 90ms and 80ms respectively */
+ I915_WRITE(RCBMAXAVG, 90000);
+ I915_WRITE(RCBMINAVG, 80000);
+
+ I915_WRITE(MEMIHYST, 1);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+
+ vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+
+ dev_priv->fmax = fmax; /* IPS callback will increase this */
+ dev_priv->fstart = fstart;
+
+ dev_priv->max_delay = fstart;
+ dev_priv->min_delay = fmin;
+ dev_priv->cur_delay = fstart;
+
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
+
+ I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+ /*
+ * Interrupts will be enabled in ironlake_irq_postinstall
+ */
+
+ I915_WRITE(VIDSTART, vstart);
+ POSTING_READ(VIDSTART);
+
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ I915_WRITE(MEMMODECTL, rgvmodectl);
+
+ if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+ DRM_ERROR("stuck trying to change perf mode\n");
+ msleep(1);
+
+ ironlake_set_drps(dev, fstart);
+
+ dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+ I915_READ(0x112e0);
+ dev_priv->last_time1 = jiffies_to_msecs(jiffies);
+ dev_priv->last_count2 = I915_READ(0x112f4);
+ getrawmonotonic(&dev_priv->last_time2);
+}
+
+void ironlake_disable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
+
+ /* Ack interrupts, disable EFC interrupt */
+ I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
+ I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
+ I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
+ I915_WRITE(DEIIR, DE_PCU_EVENT);
+ I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
+
+ /* Go back to the starting frequency */
+ ironlake_set_drps(dev, dev_priv->fstart);
+ msleep(1);
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ msleep(1);
+
+}
+
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 limits;
+
+ limits = 0;
+ if (val >= dev_priv->max_delay)
+ val = dev_priv->max_delay;
+ else
+ limits |= dev_priv->max_delay << 24;
+
+ if (val <= dev_priv->min_delay)
+ val = dev_priv->min_delay;
+ else
+ limits |= dev_priv->min_delay << 16;
+
+ if (val == dev_priv->cur_delay)
+ return;
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(val) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+
+ /* Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+
+ dev_priv->cur_delay = val;
+}
+
+void gen6_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, 0);
+ /* Complete PM interrupt masking here doesn't race with the rps work
+ * item again unmasking PM interrupts because that is using a different
+ * register (PMIMR) to mask PM interrupts. The only risk is in leaving
+ * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+
+ spin_lock_irq(&dev_priv->rps_lock);
+ dev_priv->pm_iir = 0;
+ spin_unlock_irq(&dev_priv->rps_lock);
+
+ I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+}
+
+int intel_enable_rc6(const struct drm_device *dev)
+{
+ /*
+ * Respect the kernel parameter if it is set
+ */
+ if (i915_enable_rc6 >= 0)
+ return i915_enable_rc6;
+
+ /*
+ * Disable RC6 on Ironlake
+ */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
+
+ /* Sorry Haswell, no RC6 for you for now. */
+ if (IS_HASWELL(dev))
+ return 0;
+
+ /*
+ * Disable rc6 on Sandybridge
+ */
+ if (INTEL_INFO(dev)->gen == 6) {
+ DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+ return INTEL_RC6_ENABLE;
+ }
+ DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
+ return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+}
+
+void gen6_enable_rps(struct drm_i915_private *dev_priv)
+{
+ struct intel_ring_buffer *ring;
+ u32 rp_state_cap;
+ u32 gt_perf_status;
+ u32 pcu_mbox, rc6_mask = 0;
+ u32 gtfifodbg;
+ int rc6_mode;
+ int i;
+
+ /* Here begins a magic sequence of register writes to enable
+ * auto-downclocking.
+ *
+ * Perhaps there might be some value in exposing these to
+ * userspace...
+ */
+ I915_WRITE(GEN6_RC_STATE, 0);
+ mutex_lock(&dev_priv->dev->struct_mutex);
+
+ /* Clear the DBG now so we don't confuse earlier errors */
+ if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+ DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+ I915_WRITE(GTFIFODBG, gtfifodbg);
+ }
+
+ gen6_gt_force_wake_get(dev_priv);
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+
+ /* In units of 100MHz */
+ dev_priv->max_delay = rp_state_cap & 0xff;
+ dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
+ dev_priv->cur_delay = 0;
+
+ /* disable the counters and set deterministic thresholds */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_ring(ring, dev_priv, i)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+ I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ rc6_mode = intel_enable_rc6(dev_priv->dev);
+ if (rc6_mode & INTEL_RC6_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
+
+ if (rc6_mode & INTEL_RC6p_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+
+ if (rc6_mode & INTEL_RC6pp_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+
+ DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+ (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
+ (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
+ (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
+
+ I915_WRITE(GEN6_RC_CONTROL,
+ rc6_mask |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE);
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(10) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN6_FREQUENCY(12));
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ dev_priv->max_delay << 24 |
+ dev_priv->min_delay << 16);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
+ I915_WRITE(GEN6_RP_UP_EI, 100000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+ I915_WRITE(GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+
+ /* Check for overclock support */
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
+ pcu_mbox = I915_READ(GEN6_PCODE_DATA);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+ if (pcu_mbox & (1<<31)) { /* OC supported */
+ dev_priv->max_delay = pcu_mbox & 0xff;
+ DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+ }
+
+ gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
+
+ /* requires MSI enabled */
+ I915_WRITE(GEN6_PMIER,
+ GEN6_PM_MBOX_EVENT |
+ GEN6_PM_THERMAL_EVENT |
+ GEN6_PM_RP_DOWN_TIMEOUT |
+ GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_UP_EI_EXPIRED |
+ GEN6_PM_RP_DOWN_EI_EXPIRED);
+ spin_lock_irq(&dev_priv->rps_lock);
+ WARN_ON(dev_priv->pm_iir != 0);
+ I915_WRITE(GEN6_PMIMR, 0);
+ spin_unlock_irq(&dev_priv->rps_lock);
+ /* enable all PM interrupts */
+ I915_WRITE(GEN6_PMINTRMSK, 0);
+
+ gen6_gt_force_wake_put(dev_priv);
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
+void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
+{
+ int min_freq = 15;
+ int gpu_freq, ia_freq, max_ia_freq;
+ int scaling_factor = 180;
+
+ max_ia_freq = cpufreq_quick_get_max(0);
+ /*
+ * Default to measured freq if none found, PCU will ensure we don't go
+ * over
+ */
+ if (!max_ia_freq)
+ max_ia_freq = tsc_khz;
+
+ /* Convert from kHz to MHz */
+ max_ia_freq /= 1000;
+
+ mutex_lock(&dev_priv->dev->struct_mutex);
+
+ /*
+ * For each potential GPU frequency, load a ring frequency we'd like
+ * to use for memory access. We do this by specifying the IA frequency
+ * the PCU should use as a reference to determine the ring frequency.
+ */
+ for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+ gpu_freq--) {
+ int diff = dev_priv->max_delay - gpu_freq;
+
+ /*
+ * For GPU frequencies less than 750MHz, just use the lowest
+ * ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
+ ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+
+ I915_WRITE(GEN6_PCODE_DATA,
+ (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
+ gpu_freq);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
+ GEN6_PCODE_READY) == 0, 10)) {
+ DRM_ERROR("pcode write of freq table timed out\n");
+ continue;
+ }
+ }
+
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
+static void ironlake_teardown_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->renderctx) {
+ i915_gem_object_unpin(dev_priv->renderctx);
+ drm_gem_object_unreference(&dev_priv->renderctx->base);
+ dev_priv->renderctx = NULL;
+ }
+
+ if (dev_priv->pwrctx) {
+ i915_gem_object_unpin(dev_priv->pwrctx);
+ drm_gem_object_unreference(&dev_priv->pwrctx->base);
+ dev_priv->pwrctx = NULL;
+ }
+}
+
+void ironlake_disable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (I915_READ(PWRCTXA)) {
+ /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+ wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+ 50);
+
+ I915_WRITE(PWRCTXA, 0);
+ POSTING_READ(PWRCTXA);
+
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+ POSTING_READ(RSTDBYCTL);
+ }
+
+ ironlake_teardown_rc6(dev);
+}
+
+static int ironlake_setup_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->renderctx == NULL)
+ dev_priv->renderctx = intel_alloc_context_page(dev);
+ if (!dev_priv->renderctx)
+ return -ENOMEM;
+
+ if (dev_priv->pwrctx == NULL)
+ dev_priv->pwrctx = intel_alloc_context_page(dev);
+ if (!dev_priv->pwrctx) {
+ ironlake_teardown_rc6(dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ironlake_enable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ int ret;
+
+ /* rc6 disabled by default due to repeated reports of hanging during
+ * boot and resume.
+ */
+ if (!intel_enable_rc6(dev))
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = ironlake_setup_rc6(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+
+ /*
+ * GPU can automatically power down the render unit if given a page
+ * to save state.
+ */
+ ret = intel_ring_begin(ring, 6);
+ if (ret) {
+ ironlake_teardown_rc6(dev);
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+
+ intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+ intel_ring_emit(ring, MI_SET_CONTEXT);
+ intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ MI_RESTORE_INHIBIT);
+ intel_ring_emit(ring, MI_SUSPEND_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_advance(ring);
+
+ /*
+ * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
+ * does an implicit flush, combined with MI_FLUSH above, it should be
+ * safe to assume that renderctx is valid
+ */
+ ret = intel_wait_ring_idle(ring);
+ if (ret) {
+ DRM_ERROR("failed to enable ironlake power power savings\n");
+ ironlake_teardown_rc6(dev);
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+
+ I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+ unsigned long freq;
+ int div = (vidfreq & 0x3f0000) >> 16;
+ int post = (vidfreq & 0x3000) >> 12;
+ int pre = (vidfreq & 0x7);
+
+ if (!pre)
+ return 0;
+
+ freq = ((div * 133333) / ((1<<post) * pre));
+
+ return freq;
+}
+
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
+ { 1, 1333, 301, 28664 },
+ { 1, 1066, 294, 24460 },
+ { 1, 800, 294, 25192 },
+ { 0, 1333, 276, 27605 },
+ { 0, 1066, 276, 27605 },
+ { 0, 800, 231, 23784 },
+};
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+ u64 total_count, diff, ret;
+ u32 count1, count2, count3, m = 0, c = 0;
+ unsigned long now = jiffies_to_msecs(jiffies), diff1;
+ int i;
+
+ diff1 = now - dev_priv->last_time1;
+
+ /* Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ if (diff1 <= 10)
+ return dev_priv->chipset_power;
+
+ count1 = I915_READ(DMIEC);
+ count2 = I915_READ(DDREC);
+ count3 = I915_READ(CSIEC);
+
+ total_count = count1 + count2 + count3;
+
+ /* FIXME: handle per-counter overflow */
+ if (total_count < dev_priv->last_count1) {
+ diff = ~0UL - dev_priv->last_count1;
+ diff += total_count;
+ } else {
+ diff = total_count - dev_priv->last_count1;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+ if (cparams[i].i == dev_priv->c_m &&
+ cparams[i].t == dev_priv->r_t) {
+ m = cparams[i].m;
+ c = cparams[i].c;
+ break;
+ }
+ }
+
+ diff = div_u64(diff, diff1);
+ ret = ((m * diff) + c);
+ ret = div_u64(ret, 10);
+
+ dev_priv->last_count1 = total_count;
+ dev_priv->last_time1 = now;
+
+ dev_priv->chipset_power = ret;
+
+ return ret;
+}
+
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long m, x, b;
+ u32 tsfs;
+
+ tsfs = I915_READ(TSFS);
+
+ m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
+ x = I915_READ8(TR1);
+
+ b = tsfs & TSFS_INTR_MASK;
+
+ return ((m * x) / 127) - b;
+}
+
+static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+{
+ static const struct v_table {
+ u16 vd; /* in .1 mil */
+ u16 vm; /* in .1 mil */
+ } v_table[] = {
+ { 0, 0, },
+ { 375, 0, },
+ { 500, 0, },
+ { 625, 0, },
+ { 750, 0, },
+ { 875, 0, },
+ { 1000, 0, },
+ { 1125, 0, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4250, 3125, },
+ { 4375, 3250, },
+ { 4500, 3375, },
+ { 4625, 3500, },
+ { 4750, 3625, },
+ { 4875, 3750, },
+ { 5000, 3875, },
+ { 5125, 4000, },
+ { 5250, 4125, },
+ { 5375, 4250, },
+ { 5500, 4375, },
+ { 5625, 4500, },
+ { 5750, 4625, },
+ { 5875, 4750, },
+ { 6000, 4875, },
+ { 6125, 5000, },
+ { 6250, 5125, },
+ { 6375, 5250, },
+ { 6500, 5375, },
+ { 6625, 5500, },
+ { 6750, 5625, },
+ { 6875, 5750, },
+ { 7000, 5875, },
+ { 7125, 6000, },
+ { 7250, 6125, },
+ { 7375, 6250, },
+ { 7500, 6375, },
+ { 7625, 6500, },
+ { 7750, 6625, },
+ { 7875, 6750, },
+ { 8000, 6875, },
+ { 8125, 7000, },
+ { 8250, 7125, },
+ { 8375, 7250, },
+ { 8500, 7375, },
+ { 8625, 7500, },
+ { 8750, 7625, },
+ { 8875, 7750, },
+ { 9000, 7875, },
+ { 9125, 8000, },
+ { 9250, 8125, },
+ { 9375, 8250, },
+ { 9500, 8375, },
+ { 9625, 8500, },
+ { 9750, 8625, },
+ { 9875, 8750, },
+ { 10000, 8875, },
+ { 10125, 9000, },
+ { 10250, 9125, },
+ { 10375, 9250, },
+ { 10500, 9375, },
+ { 10625, 9500, },
+ { 10750, 9625, },
+ { 10875, 9750, },
+ { 11000, 9875, },
+ { 11125, 10000, },
+ { 11250, 10125, },
+ { 11375, 10250, },
+ { 11500, 10375, },
+ { 11625, 10500, },
+ { 11750, 10625, },
+ { 11875, 10750, },
+ { 12000, 10875, },
+ { 12125, 11000, },
+ { 12250, 11125, },
+ { 12375, 11250, },
+ { 12500, 11375, },
+ { 12625, 11500, },
+ { 12750, 11625, },
+ { 12875, 11750, },
+ { 13000, 11875, },
+ { 13125, 12000, },
+ { 13250, 12125, },
+ { 13375, 12250, },
+ { 13500, 12375, },
+ { 13625, 12500, },
+ { 13750, 12625, },
+ { 13875, 12750, },
+ { 14000, 12875, },
+ { 14125, 13000, },
+ { 14250, 13125, },
+ { 14375, 13250, },
+ { 14500, 13375, },
+ { 14625, 13500, },
+ { 14750, 13625, },
+ { 14875, 13750, },
+ { 15000, 13875, },
+ { 15125, 14000, },
+ { 15250, 14125, },
+ { 15375, 14250, },
+ { 15500, 14375, },
+ { 15625, 14500, },
+ { 15750, 14625, },
+ { 15875, 14750, },
+ { 16000, 14875, },
+ { 16125, 15000, },
+ };
+ if (dev_priv->info->is_mobile)
+ return v_table[pxvid].vm;
+ else
+ return v_table[pxvid].vd;
+}
+
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+ struct timespec now, diff1;
+ u64 diff;
+ unsigned long diffms;
+ u32 count;
+
+ if (dev_priv->info->gen != 5)
+ return;
+
+ getrawmonotonic(&now);
+ diff1 = timespec_sub(now, dev_priv->last_time2);
+
+ /* Don't divide by 0 */
+ diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
+ if (!diffms)
+ return;
+
+ count = I915_READ(GFXEC);
+
+ if (count < dev_priv->last_count2) {
+ diff = ~0UL - dev_priv->last_count2;
+ diff += count;
+ } else {
+ diff = count - dev_priv->last_count2;
+ }
+
+ dev_priv->last_count2 = count;
+ dev_priv->last_time2 = now;
+
+ /* More magic constants... */
+ diff = diff * 1181;
+ diff = div_u64(diff, diffms * 10);
+ dev_priv->gfx_power = diff;
+}
+
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long t, corr, state1, corr2, state2;
+ u32 pxvid, ext_v;
+
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+ pxvid = (pxvid >> 24) & 0x7f;
+ ext_v = pvid_to_extvid(dev_priv, pxvid);
+
+ state1 = ext_v;
+
+ t = i915_mch_val(dev_priv);
+
+ /* Revel in the empirically derived constants */
+
+ /* Correction factor in 1/100000 units */
+ if (t > 80)
+ corr = ((t * 2349) + 135940);
+ else if (t >= 50)
+ corr = ((t * 964) + 29317);
+ else /* < 50 */
+ corr = ((t * 301) + 1004);
+
+ corr = corr * ((150142 * state1) / 10000 - 78642);
+ corr /= 100000;
+ corr2 = (corr * dev_priv->corr);
+
+ state2 = (corr2 * state1) / 10000;
+ state2 /= 100; /* convert to mW */
+
+ i915_update_gfx_val(dev_priv);
+
+ return dev_priv->gfx_power + state2;
+}
+
+/* Global for IPS driver to get at the current i915 device */
+static struct drm_i915_private *i915_mch_dev;
+/*
+ * Lock protecting IPS related data structures
+ * - i915_mch_dev
+ * - dev_priv->max_delay
+ * - dev_priv->min_delay
+ * - dev_priv->fmax
+ * - dev_priv->gpu_busy
+ */
+static DEFINE_SPINLOCK(mchdev_lock);
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+ struct drm_i915_private *dev_priv;
+ unsigned long chipset_val, graphics_val, ret = 0;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ chipset_val = i915_chipset_val(dev_priv);
+ graphics_val = i915_gfx_val(dev_priv);
+
+ ret = chipset_val + graphics_val;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->max_delay > dev_priv->fmax)
+ dev_priv->max_delay--;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->max_delay < dev_priv->min_delay)
+ dev_priv->max_delay++;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = false;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ ret = dev_priv->busy;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ dev_priv->max_delay = dev_priv->fstart;
+
+ if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+ ret = false;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+ void (*link)(void);
+
+ link = symbol_get(ips_link_to_i915_driver);
+ if (link) {
+ link();
+ symbol_put(ips_link_to_i915_driver);
+ }
+}
+
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
+{
+ spin_lock(&mchdev_lock);
+ i915_mch_dev = dev_priv;
+ dev_priv->mchdev_lock = &mchdev_lock;
+ spin_unlock(&mchdev_lock);
+
+ ips_ping_for_i915_load();
+}
+
+void intel_gpu_ips_teardown(void)
+{
+ spin_lock(&mchdev_lock);
+ i915_mch_dev = NULL;
+ spin_unlock(&mchdev_lock);
+}
+
+void intel_init_emon(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 lcfuse;
+ u8 pxw[16];
+ int i;
+
+ /* Disable to program */
+ I915_WRITE(ECR, 0);
+ POSTING_READ(ECR);
+
+ /* Program energy weights for various events */
+ I915_WRITE(SDEW, 0x15040d00);
+ I915_WRITE(CSIEW0, 0x007f0000);
+ I915_WRITE(CSIEW1, 0x1e220004);
+ I915_WRITE(CSIEW2, 0x04000004);
+
+ for (i = 0; i < 5; i++)
+ I915_WRITE(PEW + (i * 4), 0);
+ for (i = 0; i < 3; i++)
+ I915_WRITE(DEW + (i * 4), 0);
+
+ /* Program P-state weights to account for frequency power adjustment */
+ for (i = 0; i < 16; i++) {
+ u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
+ unsigned long freq = intel_pxfreq(pxvidfreq);
+ unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+ unsigned long val;
+
+ val = vid * vid;
+ val *= (freq / 1000);
+ val *= 255;
+ val /= (127*127*900);
+ if (val > 0xff)
+ DRM_ERROR("bad pxval: %ld\n", val);
+ pxw[i] = val;
+ }
+ /* Render standby states get 0 weight */
+ pxw[14] = 0;
+ pxw[15] = 0;
+
+ for (i = 0; i < 4; i++) {
+ u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
+ (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
+ I915_WRITE(PXW + (i * 4), val);
+ }
+
+ /* Adjust magic regs to magic values (more experimental results) */
+ I915_WRITE(OGW0, 0);
+ I915_WRITE(OGW1, 0);
+ I915_WRITE(EG0, 0x00007f00);
+ I915_WRITE(EG1, 0x0000000e);
+ I915_WRITE(EG2, 0x000e0000);
+ I915_WRITE(EG3, 0x68000300);
+ I915_WRITE(EG4, 0x42000000);
+ I915_WRITE(EG5, 0x00140031);
+ I915_WRITE(EG6, 0);
+ I915_WRITE(EG7, 0);
+
+ for (i = 0; i < 8; i++)
+ I915_WRITE(PXWL + (i * 4), 0);
+
+ /* Enable PMON + select events */
+ I915_WRITE(ECR, 0x80000019);
+
+ lcfuse = I915_READ(LCFUSE02);
+
+ dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
+}
+
+static void ironlake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ /* Required for FBC */
+ dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
+ DPFCRUNIT_CLOCK_GATE_DISABLE |
+ DPFDUNIT_CLOCK_GATE_DISABLE;
+ /* Required for CxSR */
+ dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_3DCGDIS0,
+ MARIUNIT_CLOCK_GATE_DISABLE |
+ SVSMUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(PCH_3DCGDIS1,
+ VFMUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ /*
+ * According to the spec the following bits should be set in
+ * order to enable memory self-refresh
+ * The bit 22/21 of 0x42004
+ * The bit 5 of 0x42020
+ * The bit 15 of 0x45000
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+ I915_WRITE(ILK_DSPCLK_GATE,
+ (I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE));
+ I915_WRITE(DISP_ARB_CTL,
+ (I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS));
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /*
+ * Based on the document from hardware guys the following bits
+ * should be set unconditionally in order to enable FBC.
+ * The bit 22 of 0x42000
+ * The bit 22 of 0x42004
+ * The bit 7,8,9 of 0x42020.
+ */
+ if (IS_IRONLAKE_M(dev)) {
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPFC_DIS1 |
+ ILK_DPFC_DIS2 |
+ ILK_CLK_FBC);
+ }
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+ I915_WRITE(_3D_CHICKEN2,
+ _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+ _3D_CHICKEN2_WM_READ_PIPELINED);
+}
+
+static void gen6_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+ I915_WRITE(GEN6_UCGCTL1,
+ I915_READ(GEN6_UCGCTL1) |
+ GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE);
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ /* Bspec says we need to always set all mask bits. */
+ I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
+ _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
+
+ /*
+ * According to the spec the following bits should be
+ * set in order to enable memory self-refresh and fbc:
+ * The bit21 and bit22 of 0x42000
+ * The bit21 and bit22 of 0x42004
+ * The bit5 and bit7 of 0x42020
+ * The bit14 of 0x70180
+ * The bit14 of 0x71180
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE |
+ ILK_DPFD_CLK_GATE);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+}
+
+static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
+{
+ uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
+
+ reg &= ~GEN7_FF_SCHED_MASK;
+ reg |= GEN7_FF_TS_SCHED_HW;
+ reg |= GEN7_FF_VS_SCHED_HW;
+ reg |= GEN7_FF_DS_SCHED_HW;
+
+ I915_WRITE(GEN7_FF_THREAD_MODE, reg);
+}
+
+static void ivybridge_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1,
+ GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+ GEN7_WA_L3_CHICKEN_MODE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+
+ gen7_setup_fixed_func_scheduler(dev_priv);
+
+ /* WaDisable4x2SubspanOptimization */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+}
+
+static void valleyview_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+}
+
+static void g4x_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate;
+
+ I915_WRITE(RENCLK_GATE_D1, 0);
+ I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+ GS_UNIT_CLOCK_GATE_DISABLE |
+ CL_UNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
+ OVRUNIT_CLOCK_GATE_DISABLE |
+ OVCUNIT_CLOCK_GATE_DISABLE;
+ if (IS_GM45(dev))
+ dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+}
+
+static void crestline_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+ I915_WRITE(DSPCLK_GATE_D, 0);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ I915_WRITE16(DEUC, 0);
+}
+
+static void broadwater_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+ I965_RCC_CLOCK_GATE_DISABLE |
+ I965_RCPB_CLOCK_GATE_DISABLE |
+ I965_ISC_CLOCK_GATE_DISABLE |
+ I965_FBC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+}
+
+static void gen3_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dstate = I915_READ(D_STATE);
+
+ dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
+ DSTATE_DOT_CLOCK_GATING;
+ I915_WRITE(D_STATE, dstate);
+
+ if (IS_PINEVIEW(dev))
+ I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+}
+
+static void i85x_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+}
+
+static void i830_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+}
+
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ DPLS_EDP_PPS_FIX_DIS);
+ /* Without this, mode sets may fail silently on FDI */
+ for_each_pipe(pipe)
+ I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
+}
+
+void intel_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->display.init_clock_gating(dev);
+
+ if (dev_priv->display.init_pch_clock_gating)
+ dev_priv->display.init_pch_clock_gating(dev);
+}
+
+static void gen6_sanitize_pm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 limits, delay, old;
+
+ gen6_gt_force_wake_get(dev_priv);
+
+ old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
+ /* Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ limits &= ~(0x3f << 16 | 0x3f << 24);
+ delay = dev_priv->cur_delay;
+ if (delay < dev_priv->max_delay)
+ limits |= (dev_priv->max_delay & 0x3f) << 24;
+ if (delay > dev_priv->min_delay)
+ limits |= (dev_priv->min_delay & 0x3f) << 16;
+
+ if (old != limits) {
+ /* Note that the known failure case is to read back 0. */
+ DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
+ "expected %08x, was %08x\n", limits, old);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+ }
+
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+void intel_sanitize_pm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.sanitize_pm)
+ dev_priv->display.sanitize_pm(dev);
+}
+
+/* Starting with Haswell, we have different power wells for
+ * different parts of the GPU. This attempts to enable them all.
+ */
+void intel_init_power_wells(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long power_wells[] = {
+ HSW_PWR_WELL_CTL1,
+ HSW_PWR_WELL_CTL2,
+ HSW_PWR_WELL_CTL4
+ };
+ int i;
+
+ if (!IS_HASWELL(dev))
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
+ int well = I915_READ(power_wells[i]);
+
+ if ((well & HSW_PWR_WELL_STATE) == 0) {
+ I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
+ if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
+ DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
+ }
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/* Set up chip specific power management-related functions */
+void intel_init_pm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (I915_HAS_FBC(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = ironlake_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (IS_GM45(dev)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_enable_fbc;
+ dev_priv->display.disable_fbc = g4x_disable_fbc;
+ } else if (IS_CRESTLINE(dev)) {
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_enable_fbc;
+ dev_priv->display.disable_fbc = i8xx_disable_fbc;
+ }
+ /* 855GM needs testing */
+ }
+
+ /* For cxsr */
+ if (IS_PINEVIEW(dev))
+ i915_pineview_get_mem_freq(dev);
+ else if (IS_GEN5(dev))
+ i915_ironlake_get_mem_freq(dev);
+
+ /* For FIFO watermark updates */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+ /* IVB configs may use multi-threaded forcewake */
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ u32 ecobus;
+
+ /* A small trick here - if the bios hasn't configured MT forcewake,
+ * and if the device is in RC6, then force_wake_mt_get will not wake
+ * the device and the ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT forcewake being
+ * disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ_NOTRACE(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ DRM_DEBUG_KMS("Using MT version of forcewake\n");
+ dev_priv->display.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->display.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ }
+ }
+
+ if (HAS_PCH_IBX(dev))
+ dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
+ else if (HAS_PCH_CPT(dev))
+ dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
+
+ if (IS_GEN5(dev)) {
+ if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+ dev_priv->display.update_wm = ironlake_update_wm;
+ else {
+ DRM_DEBUG_KMS("Failed to get proper latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
+ } else if (IS_GEN6(dev)) {
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+ dev_priv->display.sanitize_pm = gen6_sanitize_pm;
+ } else if (IS_IVYBRIDGE(dev)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.sanitize_pm = gen6_sanitize_pm;
+ } else if (IS_HASWELL(dev)) {
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.sanitize_pm = gen6_sanitize_pm;
+ } else
+ dev_priv->display.update_wm = NULL;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.update_wm = valleyview_update_wm;
+ dev_priv->display.init_clock_gating =
+ valleyview_init_clock_gating;
+ dev_priv->display.force_wake_get = vlv_force_wake_get;
+ dev_priv->display.force_wake_put = vlv_force_wake_put;
+ } else if (IS_PINEVIEW(dev)) {
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq)) {
+ DRM_INFO("failed to find known CxSR latency "
+ "(found ddr%s fsb freq %d, mem freq %d), "
+ "disabling CxSR\n",
+ (dev_priv->is_ddr3 == 1) ? "3" : "2",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ /* Disable CxSR and never update its watermark again */
+ pineview_disable_cxsr(dev);
+ dev_priv->display.update_wm = NULL;
+ } else
+ dev_priv->display.update_wm = pineview_update_wm;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_G4X(dev)) {
+ dev_priv->display.update_wm = g4x_update_wm;
+ dev_priv->display.init_clock_gating = g4x_init_clock_gating;
+ } else if (IS_GEN4(dev)) {
+ dev_priv->display.update_wm = i965_update_wm;
+ if (IS_CRESTLINE(dev))
+ dev_priv->display.init_clock_gating = crestline_init_clock_gating;
+ else if (IS_BROADWATER(dev))
+ dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
+ } else if (IS_GEN3(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_I865G(dev)) {
+ dev_priv->display.update_wm = i830_update_wm;
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ } else if (IS_I85X(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i85x_get_fifo_size;
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ } else {
+ dev_priv->display.update_wm = i830_update_wm;
+ dev_priv->display.init_clock_gating = i830_init_clock_gating;
+ if (IS_845G(dev))
+ dev_priv->display.get_fifo_size = i845_get_fifo_size;
+ else
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ }
+
+ /* We attempt to init the necessary power wells early in the initialization
+ * time, so the subsystems that expect power to be enabled can work.
+ */
+ intel_init_power_wells(dev);
+}
+
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 62892a826ede..b59b6d5b7583 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -53,9 +53,35 @@ static inline int ring_space(struct intel_ring_buffer *ring)
}
static int
-render_ring_flush(struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
+gen2_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ u32 cmd;
+ int ret;
+
+ cmd = MI_FLUSH;
+ if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
+ cmd |= MI_NO_WRITE_FLUSH;
+
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen4_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
{
struct drm_device *dev = ring->dev;
u32 cmd;
@@ -90,17 +116,8 @@ render_ring_flush(struct intel_ring_buffer *ring,
*/
cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- if ((invalidate_domains|flush_domains) &
- I915_GEM_DOMAIN_RENDER)
+ if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
cmd &= ~MI_NO_WRITE_FLUSH;
- if (INTEL_INFO(dev)->gen < 4) {
- /*
- * On the 965, the sampler cache always gets flushed
- * and this bit is reserved.
- */
- if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
- cmd |= MI_READ_FLUSH;
- }
if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
cmd |= MI_EXE_FLUSH;
@@ -290,9 +307,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
- if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
- I915_READ_START(ring) != obj->gtt_offset ||
- (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
+ if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
+ I915_READ_START(ring) == obj->gtt_offset &&
+ (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
@@ -384,12 +401,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
int ret = init_ring_common(ring);
if (INTEL_INFO(dev)->gen > 3) {
- int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
- I915_WRITE(MI_MODE, mode);
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
if (IS_GEN7(dev))
I915_WRITE(GFX_MODE_GEN7,
- GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
- GFX_MODE_ENABLE(GFX_REPLAY_MODE));
+ _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+ _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
}
if (INTEL_INFO(dev)->gen >= 5) {
@@ -398,7 +414,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
return ret;
}
-
if (IS_GEN6(dev)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
@@ -406,13 +421,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
* policy is not supported."
*/
I915_WRITE(CACHE_MODE_0,
- CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
+ _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
- if (INTEL_INFO(dev)->gen >= 6) {
- I915_WRITE(INSTPM,
- INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
- }
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
return ret;
}
@@ -483,21 +496,30 @@ gen6_add_request(struct intel_ring_buffer *ring,
* @seqno - seqno which the waiter will block on
*/
static int
-intel_ring_sync(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- int ring,
- u32 seqno)
+gen6_ring_sync(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller,
+ u32 seqno)
{
int ret;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
+ /* Throughout all of the GEM code, seqno passed implies our current
+ * seqno is >= the last seqno executed. However for hardware the
+ * comparison is strictly greater than.
+ */
+ seqno -= 1;
+
+ WARN_ON(signaller->semaphore_register[waiter->id] ==
+ MI_SEMAPHORE_SYNC_INVALID);
+
ret = intel_ring_begin(waiter, 4);
if (ret)
return ret;
- intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
+ intel_ring_emit(waiter,
+ dw1 | signaller->semaphore_register[waiter->id]);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter, 0);
intel_ring_emit(waiter, MI_NOOP);
@@ -506,47 +528,6 @@ intel_ring_sync(struct intel_ring_buffer *waiter,
return 0;
}
-/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
-int
-render_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- u32 seqno)
-{
- WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
- return intel_ring_sync(waiter,
- signaller,
- RCS,
- seqno);
-}
-
-/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
-int
-gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- u32 seqno)
-{
- WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
- return intel_ring_sync(waiter,
- signaller,
- VCS,
- seqno);
-}
-
-/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
-int
-gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- u32 seqno)
-{
- WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
- return intel_ring_sync(waiter,
- signaller,
- BCS,
- seqno);
-}
-
-
-
#define PIPE_CONTROL_FLUSH(ring__, addr__) \
do { \
intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
@@ -608,27 +589,6 @@ pc_render_add_request(struct intel_ring_buffer *ring,
return 0;
}
-static int
-render_ring_add_request(struct intel_ring_buffer *ring,
- u32 *result)
-{
- u32 seqno = i915_gem_next_request_seqno(ring);
- int ret;
-
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, seqno);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_advance(ring);
-
- *result = seqno;
- return 0;
-}
-
static u32
gen6_ring_get_seqno(struct intel_ring_buffer *ring)
{
@@ -655,76 +615,115 @@ pc_render_get_seqno(struct intel_ring_buffer *ring)
return pc->cpu_page[0];
}
-static void
-ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+static bool
+gen5_ring_get_irq(struct intel_ring_buffer *ring)
{
- dev_priv->gt_irq_mask &= ~mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
+ dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
}
static void
-ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+gen5_ring_put_irq(struct intel_ring_buffer *ring)
{
- dev_priv->gt_irq_mask |= mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
+ dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
-static void
-i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+static bool
+i9xx_ring_get_irq(struct intel_ring_buffer *ring)
{
- dev_priv->irq_mask &= ~mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ(IMR);
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
+ dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
}
static void
-i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+i9xx_ring_put_irq(struct intel_ring_buffer *ring)
{
- dev_priv->irq_mask |= mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ(IMR);
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
+ dev_priv->irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
static bool
-render_ring_get_irq(struct intel_ring_buffer *ring)
+i8xx_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
if (!dev->irq_enabled)
return false;
- spin_lock(&ring->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
- if (HAS_PCH_SPLIT(dev))
- ironlake_enable_irq(dev_priv,
- GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
- else
- i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+ dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(IMR);
}
- spin_unlock(&ring->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
static void
-render_ring_put_irq(struct intel_ring_buffer *ring)
+i8xx_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&ring->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
- if (HAS_PCH_SPLIT(dev))
- ironlake_disable_irq(dev_priv,
- GT_USER_INTERRUPT |
- GT_PIPE_NOTIFY);
- else
- i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+ dev_priv->irq_mask |= ring->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(IMR);
}
- spin_unlock(&ring->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -776,7 +775,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
}
static int
-ring_add_request(struct intel_ring_buffer *ring,
+i9xx_add_request(struct intel_ring_buffer *ring,
u32 *result)
{
u32 seqno;
@@ -799,10 +798,11 @@ ring_add_request(struct intel_ring_buffer *ring,
}
static bool
-gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+gen6_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
if (!dev->irq_enabled)
return false;
@@ -812,120 +812,87 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
* blt/bsd rings on ivb. */
gen6_gt_force_wake_get(dev_priv);
- spin_lock(&ring->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
- ring->irq_mask &= ~rflag;
- I915_WRITE_IMR(ring, ring->irq_mask);
- ironlake_enable_irq(dev_priv, gflag);
+ I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
}
- spin_unlock(&ring->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
static void
-gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+gen6_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&ring->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
- ring->irq_mask |= rflag;
- I915_WRITE_IMR(ring, ring->irq_mask);
- ironlake_disable_irq(dev_priv, gflag);
+ I915_WRITE_IMR(ring, ~0);
+ dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
}
- spin_unlock(&ring->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
gen6_gt_force_wake_put(dev_priv);
}
-static bool
-bsd_ring_get_irq(struct intel_ring_buffer *ring)
+static int
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
{
- struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (!dev->irq_enabled)
- return false;
+ int ret;
- spin_lock(&ring->irq_lock);
- if (ring->irq_refcount++ == 0) {
- if (IS_G4X(dev))
- i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
- else
- ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
- }
- spin_unlock(&ring->irq_lock);
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- return true;
-}
-static void
-bsd_ring_put_irq(struct intel_ring_buffer *ring)
-{
- struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START |
+ MI_BATCH_GTT |
+ MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
- spin_lock(&ring->irq_lock);
- if (--ring->irq_refcount == 0) {
- if (IS_G4X(dev))
- i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
- else
- ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
- }
- spin_unlock(&ring->irq_lock);
+ return 0;
}
static int
-ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
+i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len)
{
int ret;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START | (2 << 6) |
- MI_BATCH_NON_SECURE_I965);
- intel_ring_emit(ring, offset);
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, 0);
intel_ring_advance(ring);
return 0;
}
static int
-render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
{
- struct drm_device *dev = ring->dev;
int ret;
- if (IS_I830(dev) || IS_845G(dev)) {
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
- intel_ring_emit(ring, offset + len - 8);
- intel_ring_emit(ring, 0);
- } else {
- ret = intel_ring_begin(ring, 2);
- if (ret)
- return ret;
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- if (INTEL_INFO(dev)->gen >= 4) {
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START | (2 << 6) |
- MI_BATCH_NON_SECURE_I965);
- intel_ring_emit(ring, offset);
- } else {
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START | (2 << 6));
- intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
- }
- }
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
intel_ring_advance(ring);
return 0;
@@ -933,7 +900,6 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
static void cleanup_status_page(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
obj = ring->status_page.obj;
@@ -944,14 +910,11 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
-
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
}
static int init_status_page(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int ret;
@@ -972,7 +935,6 @@ static int init_status_page(struct intel_ring_buffer *ring)
ring->status_page.gfx_addr = obj->gtt_offset;
ring->status_page.page_addr = kmap(obj->pages[0]);
if (ring->status_page.page_addr == NULL) {
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
goto err_unpin;
}
ring->status_page.obj = obj;
@@ -992,8 +954,8 @@ err:
return ret;
}
-int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int intel_init_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
int ret;
@@ -1002,10 +964,9 @@ int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->gpu_write_list);
+ ring->size = 32 * PAGE_SIZE;
init_waitqueue_head(&ring->irq_queue);
- spin_lock_init(&ring->irq_lock);
- ring->irq_mask = ~0;
if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(ring);
@@ -1026,20 +987,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto err_unref;
- ring->map.size = ring->size;
- ring->map.offset = dev->agp->base + obj->gtt_offset;
- ring->map.type = 0;
- ring->map.flags = 0;
- ring->map.mtrr = 0;
-
- drm_core_ioremap_wc(&ring->map, dev);
- if (ring->map.handle == NULL) {
+ ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
+ ring->size);
+ if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
ret = -EINVAL;
goto err_unpin;
}
- ring->virtual_start = ring->map.handle;
ret = ring->init(ring);
if (ret)
goto err_unmap;
@@ -1055,7 +1010,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
return 0;
err_unmap:
- drm_core_ioremapfree(&ring->map, dev);
+ iounmap(ring->virtual_start);
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
@@ -1083,7 +1038,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
I915_WRITE_CTL(ring, 0);
- drm_core_ioremapfree(&ring->map, ring->dev);
+ iounmap(ring->virtual_start);
i915_gem_object_unpin(ring->obj);
drm_gem_object_unreference(&ring->obj->base);
@@ -1097,7 +1052,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{
- unsigned int *virt;
+ uint32_t __iomem *virt;
int rem = ring->size - ring->tail;
if (ring->space < rem) {
@@ -1106,12 +1061,10 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
return ret;
}
- virt = (unsigned int *)(ring->virtual_start + ring->tail);
- rem /= 8;
- while (rem--) {
- *virt++ = MI_NOOP;
- *virt++ = MI_NOOP;
- }
+ virt = ring->virtual_start + ring->tail;
+ rem /= 4;
+ while (rem--)
+ iowrite32(MI_NOOP, virt++);
ring->tail = 0;
ring->space = ring_space(ring);
@@ -1132,9 +1085,11 @@ static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
- ret = i915_wait_request(ring, seqno, true);
+ ret = i915_wait_request(ring, seqno);
dev_priv->mm.interruptible = was_interruptible;
+ if (!ret)
+ i915_gem_retire_requests_ring(ring);
return ret;
}
@@ -1208,15 +1163,12 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
return ret;
trace_i915_ring_wait_begin(ring);
- if (drm_core_check_feature(dev, DRIVER_GEM))
- /* With GEM the hangcheck timer should kick us out of the loop,
- * leaving it early runs the risk of corrupting GEM state (due
- * to running on almost untested codepaths). But on resume
- * timers don't work yet, so prevent a complete hang in that
- * case by choosing an insanely large timeout. */
- end = jiffies + 60 * HZ;
- else
- end = jiffies + 3 * HZ;
+ /* With GEM the hangcheck timer should kick us out of the loop,
+ * leaving it early runs the risk of corrupting GEM state (due
+ * to running on almost untested codepaths). But on resume
+ * timers don't work yet, so prevent a complete hang in that
+ * case by choosing an insanely large timeout. */
+ end = jiffies + 60 * HZ;
do {
ring->head = I915_READ_HEAD(ring);
@@ -1268,48 +1220,14 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
void intel_ring_advance(struct intel_ring_buffer *ring)
{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
ring->tail &= ring->size - 1;
+ if (dev_priv->stop_rings & intel_ring_flag(ring))
+ return;
ring->write_tail(ring, ring->tail);
}
-static const struct intel_ring_buffer render_ring = {
- .name = "render ring",
- .id = RCS,
- .mmio_base = RENDER_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_render_ring,
- .write_tail = ring_write_tail,
- .flush = render_ring_flush,
- .add_request = render_ring_add_request,
- .get_seqno = ring_get_seqno,
- .irq_get = render_ring_get_irq,
- .irq_put = render_ring_put_irq,
- .dispatch_execbuffer = render_ring_dispatch_execbuffer,
- .cleanup = render_ring_cleanup,
- .sync_to = render_ring_sync_to,
- .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
- MI_SEMAPHORE_SYNC_RV,
- MI_SEMAPHORE_SYNC_RB},
- .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
-};
-
-/* ring buffer for bit-stream decoder */
-
-static const struct intel_ring_buffer bsd_ring = {
- .name = "bsd ring",
- .id = VCS,
- .mmio_base = BSD_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_ring_common,
- .write_tail = ring_write_tail,
- .flush = bsd_ring_flush,
- .add_request = ring_add_request,
- .get_seqno = ring_get_seqno,
- .irq_get = bsd_ring_get_irq,
- .irq_put = bsd_ring_put_irq,
- .dispatch_execbuffer = ring_dispatch_execbuffer,
-};
-
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
@@ -1372,77 +1290,8 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
return 0;
}
-static bool
-gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_get_irq(ring,
- GT_USER_INTERRUPT,
- GEN6_RENDER_USER_INTERRUPT);
-}
-
-static void
-gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_put_irq(ring,
- GT_USER_INTERRUPT,
- GEN6_RENDER_USER_INTERRUPT);
-}
-
-static bool
-gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_get_irq(ring,
- GT_GEN6_BSD_USER_INTERRUPT,
- GEN6_BSD_USER_INTERRUPT);
-}
-
-static void
-gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_put_irq(ring,
- GT_GEN6_BSD_USER_INTERRUPT,
- GEN6_BSD_USER_INTERRUPT);
-}
-
-/* ring buffer for Video Codec for Gen6+ */
-static const struct intel_ring_buffer gen6_bsd_ring = {
- .name = "gen6 bsd ring",
- .id = VCS,
- .mmio_base = GEN6_BSD_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_ring_common,
- .write_tail = gen6_bsd_ring_write_tail,
- .flush = gen6_ring_flush,
- .add_request = gen6_add_request,
- .get_seqno = gen6_ring_get_seqno,
- .irq_get = gen6_bsd_ring_get_irq,
- .irq_put = gen6_bsd_ring_put_irq,
- .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
- .sync_to = gen6_bsd_ring_sync_to,
- .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
- MI_SEMAPHORE_SYNC_INVALID,
- MI_SEMAPHORE_SYNC_VB},
- .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
-};
-
/* Blitter support (SandyBridge+) */
-static bool
-blt_ring_get_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_get_irq(ring,
- GT_BLT_USER_INTERRUPT,
- GEN6_BLITTER_USER_INTERRUPT);
-}
-
-static void
-blt_ring_put_irq(struct intel_ring_buffer *ring)
-{
- gen6_ring_put_irq(ring,
- GT_BLT_USER_INTERRUPT,
- GEN6_BLITTER_USER_INTERRUPT);
-}
-
static int blt_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate, u32 flush)
{
@@ -1464,42 +1313,63 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
return 0;
}
-static const struct intel_ring_buffer gen6_blt_ring = {
- .name = "blt ring",
- .id = BCS,
- .mmio_base = BLT_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_ring_common,
- .write_tail = ring_write_tail,
- .flush = blt_ring_flush,
- .add_request = gen6_add_request,
- .get_seqno = gen6_ring_get_seqno,
- .irq_get = blt_ring_get_irq,
- .irq_put = blt_ring_put_irq,
- .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
- .sync_to = gen6_blt_ring_sync_to,
- .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
- MI_SEMAPHORE_SYNC_BV,
- MI_SEMAPHORE_SYNC_INVALID},
- .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
-};
-
int intel_init_render_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
- *ring = render_ring;
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+
if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
ring->flush = gen6_render_ring_flush;
- ring->irq_get = gen6_render_ring_get_irq;
- ring->irq_put = gen6_render_ring_put_irq;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->irq_enable_mask = GT_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
+ ring->signal_mbox[0] = GEN6_VRSYNC;
+ ring->signal_mbox[1] = GEN6_BRSYNC;
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
+ ring->flush = gen4_render_ring_flush;
ring->get_seqno = pc_render_get_seqno;
+ ring->irq_get = gen5_ring_get_irq;
+ ring->irq_put = gen5_ring_put_irq;
+ ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
+ } else {
+ ring->add_request = i9xx_add_request;
+ if (INTEL_INFO(dev)->gen < 4)
+ ring->flush = gen2_render_ring_flush;
+ else
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN2(dev)) {
+ ring->irq_get = i8xx_ring_get_irq;
+ ring->irq_put = i8xx_ring_put_irq;
+ } else {
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
}
+ ring->write_tail = ring_write_tail;
+ if (INTEL_INFO(dev)->gen >= 6)
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ else if (INTEL_INFO(dev)->gen >= 4)
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev) || IS_845G(dev))
+ ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+ ring->init = init_render_ring;
+ ring->cleanup = render_ring_cleanup;
+
if (!I915_NEED_GFX_HWS(dev)) {
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@ -1514,15 +1384,41 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
- *ring = render_ring;
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+
if (INTEL_INFO(dev)->gen >= 6) {
- ring->add_request = gen6_add_request;
- ring->irq_get = gen6_render_ring_get_irq;
- ring->irq_put = gen6_render_ring_put_irq;
- } else if (IS_GEN5(dev)) {
- ring->add_request = pc_render_add_request;
- ring->get_seqno = pc_render_get_seqno;
+ /* non-kms not supported on gen6+ */
+ return -ENODEV;
+ }
+
+ /* Note: gem is not supported on gen5/ilk without kms (the corresponding
+ * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
+ * the special gen5 functions. */
+ ring->add_request = i9xx_add_request;
+ if (INTEL_INFO(dev)->gen < 4)
+ ring->flush = gen2_render_ring_flush;
+ else
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN2(dev)) {
+ ring->irq_get = i8xx_ring_get_irq;
+ ring->irq_put = i8xx_ring_put_irq;
+ } else {
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
}
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
+ ring->write_tail = ring_write_tail;
+ if (INTEL_INFO(dev)->gen >= 4)
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev) || IS_845G(dev))
+ ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+ ring->init = init_render_ring;
+ ring->cleanup = render_ring_cleanup;
if (!I915_NEED_GFX_HWS(dev))
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@ -1537,20 +1433,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
if (IS_I830(ring->dev))
ring->effective_size -= 128;
- ring->map.offset = start;
- ring->map.size = size;
- ring->map.type = 0;
- ring->map.flags = 0;
- ring->map.mtrr = 0;
-
- drm_core_ioremap_wc(&ring->map, dev);
- if (ring->map.handle == NULL) {
+ ring->virtual_start = ioremap_wc(start, size);
+ if (ring->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
- ring->virtual_start = (void __force __iomem *)ring->map.handle;
return 0;
}
@@ -1559,10 +1448,46 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
- if (IS_GEN6(dev) || IS_GEN7(dev))
- *ring = gen6_bsd_ring;
- else
- *ring = bsd_ring;
+ ring->name = "bsd ring";
+ ring->id = VCS;
+
+ ring->write_tail = ring_write_tail;
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ ring->mmio_base = GEN6_BSD_RING_BASE;
+ /* gen6 bsd needs a special wa for tail updates */
+ if (IS_GEN6(dev))
+ ring->write_tail = gen6_bsd_ring_write_tail;
+ ring->flush = gen6_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
+ ring->signal_mbox[0] = GEN6_RVSYNC;
+ ring->signal_mbox[1] = GEN6_BVSYNC;
+ } else {
+ ring->mmio_base = BSD_RING_BASE;
+ ring->flush = bsd_ring_flush;
+ ring->add_request = i9xx_add_request;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN5(dev)) {
+ ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+ ring->irq_get = gen5_ring_get_irq;
+ ring->irq_put = gen5_ring_put_irq;
+ } else {
+ ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ }
+ ring->init = init_ring_common;
+
return intel_init_ring_buffer(dev, ring);
}
@@ -1572,7 +1497,25 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
- *ring = gen6_blt_ring;
+ ring->name = "blitter ring";
+ ring->id = BCS;
+
+ ring->mmio_base = BLT_RING_BASE;
+ ring->write_tail = ring_write_tail;
+ ring->flush = blt_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->signal_mbox[0] = GEN6_RBSYNC;
+ ring->signal_mbox[1] = GEN6_VBSYNC;
+ ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index bc0365b8fa4d..55d3da26bae7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -2,7 +2,7 @@
#define _INTEL_RINGBUFFER_H_
struct intel_hw_status_page {
- u32 __iomem *page_addr;
+ u32 *page_addr;
unsigned int gfx_addr;
struct drm_i915_gem_object *obj;
};
@@ -56,12 +56,9 @@ struct intel_ring_buffer {
*/
u32 last_retired_head;
- spinlock_t irq_lock;
- u32 irq_refcount;
- u32 irq_mask;
- u32 irq_seqno; /* last seq seem at irq time */
+ u32 irq_refcount; /* protected by dev_priv->irq_lock */
+ u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
- u32 waiting_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
void (*irq_put)(struct intel_ring_buffer *ring);
@@ -118,11 +115,16 @@ struct intel_ring_buffer {
u32 outstanding_lazy_request;
wait_queue_head_t irq_queue;
- drm_local_map_t map;
void *private;
};
+static inline bool
+intel_ring_initialized(struct intel_ring_buffer *ring)
+{
+ return ring->obj != NULL;
+}
+
static inline unsigned
intel_ring_flag(struct intel_ring_buffer *ring)
{
@@ -152,7 +154,9 @@ static inline u32
intel_read_status_page(struct intel_ring_buffer *ring,
int reg)
{
- return ioread32(ring->status_page.page_addr + reg);
+ /* Ensure that the compiler doesn't optimize away the load. */
+ barrier();
+ return ring->status_page.page_addr[reg];
}
/**
@@ -170,10 +174,7 @@ intel_read_status_page(struct intel_ring_buffer *ring,
*
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
-#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
-#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
-#define I915_BREADCRUMB_INDEX 0x21
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index ae5e748f39bb..b6a9d45fc3c6 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -41,7 +41,7 @@
#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
-#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0)
#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
SDVO_TV_MASK)
@@ -74,7 +74,7 @@ struct intel_sdvo {
struct i2c_adapter ddc;
/* Register for the SDVO device: SDVOB or SDVOC */
- int sdvo_reg;
+ uint32_t sdvo_reg;
/* Active outputs controlled by this SDVO output */
uint16_t controlled_output;
@@ -114,6 +114,9 @@ struct intel_sdvo {
*/
bool is_tv;
+ /* On different gens SDVOB is at different places. */
+ bool is_sdvob;
+
/* This is for current tv format name */
int tv_format_index;
@@ -403,8 +406,7 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
-#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
-#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
+#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
@@ -441,9 +443,17 @@ static const char *cmd_status_names[] = {
static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
- u8 buf[args_len*2 + 2], status;
- struct i2c_msg msgs[args_len + 3];
- int i, ret;
+ u8 *buf, status;
+ struct i2c_msg *msgs;
+ int i, ret = true;
+
+ buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
+ if (!buf)
+ return false;
+
+ msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
+ if (!msgs)
+ return false;
intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
@@ -477,15 +487,19 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
if (ret < 0) {
DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
- return false;
+ ret = false;
+ goto out;
}
if (ret != i+3) {
/* failure in I2C transfer */
DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
- return false;
+ ret = false;
}
- return true;
+out:
+ kfree(msgs);
+ kfree(buf);
+ return ret;
}
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
@@ -733,18 +747,18 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
uint16_t h_sync_offset, v_sync_offset;
int mode_clock;
- width = mode->crtc_hdisplay;
- height = mode->crtc_vdisplay;
+ width = mode->hdisplay;
+ height = mode->vdisplay;
/* do some mode translations */
- h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
- h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ h_blank_len = mode->htotal - mode->hdisplay;
+ h_sync_len = mode->hsync_end - mode->hsync_start;
- v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
- v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ v_blank_len = mode->vtotal - mode->vdisplay;
+ v_sync_len = mode->vsync_end - mode->vsync_start;
- h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
- v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+ h_sync_offset = mode->hsync_start - mode->hdisplay;
+ v_sync_offset = mode->vsync_start - mode->vdisplay;
mode_clock = mode->clock;
mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
@@ -769,10 +783,12 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
((v_sync_len & 0x30) >> 4);
dtd->part2.dtd_flags = 0x18;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- dtd->part2.dtd_flags |= 0x2;
+ dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- dtd->part2.dtd_flags |= 0x4;
+ dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
dtd->part2.sdvo_flags = 0;
dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
@@ -806,9 +822,11 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
mode->clock = dtd->part1.clock * 10;
mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
- if (dtd->part2.dtd_flags & 0x2)
+ if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PHSYNC;
- if (dtd->part2.dtd_flags & 0x4)
+ if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
@@ -873,17 +891,24 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
};
uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
uint8_t set_buf_index[2] = { 1, 0 };
- uint64_t *data = (uint64_t *)&avi_if;
+ uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
+ uint64_t *data = (uint64_t *)sdvo_data;
unsigned i;
intel_dip_infoframe_csum(&avi_if);
+ /* sdvo spec says that the ecc is handled by the hw, and it looks like
+ * we must not send the ecc field, either. */
+ memcpy(sdvo_data, &avi_if, 3);
+ sdvo_data[3] = avi_if.checksum;
+ memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
+
if (!intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_SET_HBUF_INDEX,
set_buf_index, 2))
return false;
- for (i = 0; i < sizeof(avi_if); i += 8) {
+ for (i = 0; i < sizeof(sdvo_data); i += 8) {
if (!intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_SET_HBUF_DATA,
data, 8))
@@ -1260,10 +1285,11 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
struct drm_i915_private *dev_priv = connector->dev->dev_private;
return drm_get_edid(connector,
- &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ dev_priv->crt_ddc_pin));
}
-enum drm_connector_status
+static enum drm_connector_status
intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
@@ -1349,8 +1375,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
return connector_status_unknown;
/* add 30ms delay when the output type might be TV */
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
+ if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
mdelay(30);
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
@@ -1570,9 +1595,6 @@ end:
intel_sdvo->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
- drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
- 0);
-
intel_sdvo->is_lvds = true;
break;
}
@@ -1901,7 +1923,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
{
struct sdvo_device_mapping *mapping;
- if (IS_SDVOB(reg))
+ if (sdvo->is_sdvob)
mapping = &(dev_priv->sdvo_mappings[0]);
else
mapping = &(dev_priv->sdvo_mappings[1]);
@@ -1919,7 +1941,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
struct sdvo_device_mapping *mapping;
u8 pin;
- if (IS_SDVOB(reg))
+ if (sdvo->is_sdvob)
mapping = &dev_priv->sdvo_mappings[0];
else
mapping = &dev_priv->sdvo_mappings[1];
@@ -1928,12 +1950,12 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
if (mapping->initialized)
pin = mapping->i2c_pin;
- if (pin < GMBUS_NUM_PORTS) {
- sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+ if (intel_gmbus_is_port_valid(pin)) {
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
intel_gmbus_force_bit(sdvo->i2c, true);
} else {
- sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
}
}
@@ -1944,12 +1966,12 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
}
static u8
-intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct sdvo_device_mapping *my_mapping, *other_mapping;
- if (IS_SDVOB(sdvo_reg)) {
+ if (sdvo->is_sdvob) {
my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1];
} else {
@@ -1974,7 +1996,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
/* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
- if (IS_SDVOB(sdvo_reg))
+ if (sdvo->is_sdvob)
return 0x70;
else
return 0x72;
@@ -2199,6 +2221,10 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
return false;
+ if (flags & SDVO_OUTPUT_YPRPB0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
+ return false;
+
if (flags & SDVO_OUTPUT_RGB0)
if (!intel_sdvo_analog_init(intel_sdvo, 0))
return false;
@@ -2490,7 +2516,7 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
return i2c_add_adapter(&sdvo->ddc) == 0;
}
-bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
@@ -2502,7 +2528,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
return false;
intel_sdvo->sdvo_reg = sdvo_reg;
- intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+ intel_sdvo->is_sdvob = is_sdvob;
+ intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
kfree(intel_sdvo);
@@ -2519,13 +2546,13 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
u8 byte;
if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
- DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
- IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ DRM_DEBUG_KMS("No SDVO device found on %s\n",
+ SDVO_NAME(intel_sdvo));
goto err;
}
}
- if (IS_SDVOB(sdvo_reg))
+ if (intel_sdvo->is_sdvob)
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
else
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
@@ -2546,8 +2573,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
- DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
- IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
+ SDVO_NAME(intel_sdvo));
goto err;
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 6b7b22f4d63e..9d030142ee43 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -61,6 +61,11 @@ struct intel_sdvo_caps {
u16 output_flags;
} __attribute__((packed));
+/* Note: SDVO detailed timing flags match EDID misc flags. */
+#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
+#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
+#define DTD_FLAG_INTERLACE (1 << 7)
+
/** This matches the EDID DTD structure, more or less */
struct intel_sdvo_dtd {
struct {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index e90dfb625c42..2a20fb0781d7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -110,14 +110,18 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
* when scaling is disabled.
*/
if (crtc_w != src_w || crtc_h != src_h) {
- dev_priv->sprite_scaling_enabled = true;
- sandybridge_update_wm(dev);
- intel_wait_for_vblank(dev, pipe);
+ if (!dev_priv->sprite_scaling_enabled) {
+ dev_priv->sprite_scaling_enabled = true;
+ intel_update_watermarks(dev);
+ intel_wait_for_vblank(dev, pipe);
+ }
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
} else {
- dev_priv->sprite_scaling_enabled = false;
- /* potentially re-enable LP watermarks */
- sandybridge_update_wm(dev);
+ if (dev_priv->sprite_scaling_enabled) {
+ dev_priv->sprite_scaling_enabled = false;
+ /* potentially re-enable LP watermarks */
+ intel_update_watermarks(dev);
+ }
}
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
@@ -133,7 +137,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
- I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
POSTING_READ(SPRSURF(pipe));
}
@@ -149,8 +153,11 @@ ivb_disable_plane(struct drm_plane *plane)
/* Can't leave the scaler enabled... */
I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
- I915_WRITE(SPRSURF(pipe), 0);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
+
+ dev_priv->sprite_scaling_enabled = false;
+ intel_update_watermarks(dev);
}
static int
@@ -208,7 +215,7 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
}
static void
-snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@@ -218,7 +225,7 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe, pixel_size;
- u32 dvscntr, dvsscale = 0;
+ u32 dvscntr, dvsscale;
dvscntr = I915_READ(DVSCNTR(pipe));
@@ -262,8 +269,8 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
if (obj->tiling_mode != I915_TILING_NONE)
dvscntr |= DVS_TILED;
- /* must disable */
- dvscntr |= DVS_TRICKLE_FEED_DISABLE;
+ if (IS_GEN6(dev))
+ dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
/* Sizes are 0 based */
@@ -274,7 +281,8 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
- if (crtc_w != src_w || crtc_h != src_h)
+ dvsscale = 0;
+ if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
@@ -290,12 +298,12 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
POSTING_READ(DVSSURF(pipe));
}
static void
-snb_disable_plane(struct drm_plane *plane)
+ilk_disable_plane(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -306,7 +314,7 @@ snb_disable_plane(struct drm_plane *plane)
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
/* Flush double buffered register updates */
- I915_WRITE(DVSSURF(pipe), 0);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
}
@@ -333,7 +341,7 @@ intel_disable_primary(struct drm_crtc *crtc)
}
static int
-snb_update_colorkey(struct drm_plane *plane,
+ilk_update_colorkey(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
@@ -362,7 +370,7 @@ snb_update_colorkey(struct drm_plane *plane,
}
static void
-snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -550,14 +558,13 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *set = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
- if (!dev_priv)
- return -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
@@ -584,14 +591,13 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *get = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
- if (!dev_priv)
- return -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
mutex_lock(&dev->mode_config.mutex);
@@ -616,6 +622,14 @@ static const struct drm_plane_funcs intel_plane_funcs = {
.destroy = intel_destroy_plane,
};
+static uint32_t ilk_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
static uint32_t snb_plane_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
@@ -630,34 +644,56 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
{
struct intel_plane *intel_plane;
unsigned long possible_crtcs;
+ const uint32_t *plane_formats;
+ int num_plane_formats;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 5)
return -ENODEV;
intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
if (!intel_plane)
return -ENOMEM;
- if (IS_GEN6(dev)) {
+ switch (INTEL_INFO(dev)->gen) {
+ case 5:
+ case 6:
intel_plane->max_downscale = 16;
- intel_plane->update_plane = snb_update_plane;
- intel_plane->disable_plane = snb_disable_plane;
- intel_plane->update_colorkey = snb_update_colorkey;
- intel_plane->get_colorkey = snb_get_colorkey;
- } else if (IS_GEN7(dev)) {
+ intel_plane->update_plane = ilk_update_plane;
+ intel_plane->disable_plane = ilk_disable_plane;
+ intel_plane->update_colorkey = ilk_update_colorkey;
+ intel_plane->get_colorkey = ilk_get_colorkey;
+
+ if (IS_GEN6(dev)) {
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ } else {
+ plane_formats = ilk_plane_formats;
+ num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
+ }
+ break;
+
+ case 7:
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
intel_plane->update_colorkey = ivb_update_colorkey;
intel_plane->get_colorkey = ivb_get_colorkey;
+
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ break;
+
+ default:
+ return -ENODEV;
}
intel_plane->pipe = pipe;
possible_crtcs = (1 << pipe);
ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_plane_funcs, snb_plane_formats,
- ARRAY_SIZE(snb_plane_formats), false);
+ &intel_plane_funcs,
+ plane_formats, num_plane_formats,
+ false);
if (ret)
kfree(intel_plane);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 05f765ef5464..a233a51fd7e6 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -674,6 +674,54 @@ static const struct tv_mode tv_modes[] = {
.filter_table = filter_table,
},
{
+ .name = "480p",
+ .clock = 107520,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 122,
+ .hblank_start = 842, .htotal = 857,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 12, .vsync_start_f2 = 12,
+ .vsync_len = 12,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 479,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "576p",
+ .clock = 107520,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 139,
+ .hblank_start = 859, .htotal = 863,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 48, .vi_end_f2 = 48,
+ .nbr_end = 575,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
.name = "720p@60Hz",
.clock = 148800,
.refresh = 60000,
@@ -811,7 +859,7 @@ intel_tv_mode_lookup(const char *tv_format)
{
int i;
- for (i = 0; i < sizeof(tv_modes) / sizeof(tv_modes[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
const struct tv_mode *tv_mode = &tv_modes[i];
if (!strcmp(tv_format, tv_mode->name))
@@ -1153,6 +1201,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
DAC_B_0_7_V |
DAC_C_0_7_V);
+
+ /*
+ * The TV sense state should be cleared to zero on cantiga platform. Otherwise
+ * the TV is misdetected. This is hardware requirement.
+ */
+ if (IS_GM45(dev))
+ tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
+
I915_WRITE(TV_CTL, tv_ctl);
I915_WRITE(TV_DAC, tv_dac);
POSTING_READ(TV_DAC);
@@ -1185,6 +1242,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
I915_WRITE(TV_CTL, save_tv_ctl);
+ POSTING_READ(TV_CTL);
+
+ /* For unknown reasons the hw barfs if we don't do this vblank wait. */
+ intel_wait_for_vblank(intel_tv->base.base.dev,
+ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
@@ -1240,11 +1302,8 @@ intel_tv_detect(struct drm_connector *connector, bool force)
int type;
mode = reported_modes[0];
- drm_mode_set_crtcinfo(&mode, 0);
- if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
- type = intel_tv_detect_type(intel_tv, connector);
- } else if (force) {
+ if (force) {
struct intel_load_detect_pipe tmp;
if (intel_get_load_detect_pipe(&intel_tv->base, connector,
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
new file mode 100644
index 000000000000..d63013497f66
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -0,0 +1,15 @@
+config DRM_MGAG200
+ tristate "Kernel modesetting driver for MGA G200 server engines"
+ depends on DRM && PCI && EXPERIMENTAL
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ help
+ This is a KMS driver for the MGA G200 server chips, it
+ does not support the original MGA G200 or any of the desktop
+ chips. It requires 0.3.0 of the modesetting userspace driver,
+ and a version of mga driver that will fail on KMS enabled
+ devices.
+
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
new file mode 100644
index 000000000000..7db592eedbf1
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -Iinclude/drm
+mgag200-y := mgag200_main.o mgag200_mode.o \
+ mgag200_drv.o mgag200_fb.o mgag200_i2c.o mgag200_ttm.o
+
+obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
new file mode 100644
index 000000000000..3c8e04f54713
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include "drmP.h"
+#include "drm.h"
+
+#include "mgag200_drv.h"
+
+#include "drm_pciids.h"
+
+/*
+ * This is the generic driver code. This binds the driver to the drm core,
+ * which then performs further device association and calls our graphics init
+ * functions
+ */
+int mgag200_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, mgag200_modeset, int, 0400);
+
+static struct drm_driver driver;
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
+ { PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
+ { PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
+ { PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
+ { PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH },
+ { PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int __devinit
+mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void mga_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static const struct file_operations mgag200_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = mgag200_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+};
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_USE_MTRR,
+ .load = mgag200_driver_load,
+ .unload = mgag200_driver_unload,
+ .fops = &mgag200_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ .gem_init_object = mgag200_gem_init_object,
+ .gem_free_object = mgag200_gem_free_object,
+ .dumb_create = mgag200_dumb_create,
+ .dumb_map_offset = mgag200_dumb_mmap_offset,
+ .dumb_destroy = mgag200_dumb_destroy,
+};
+
+static struct pci_driver mgag200_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = mga_pci_probe,
+ .remove = mga_pci_remove,
+};
+
+static int __init mgag200_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && mgag200_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (mgag200_modeset == 0)
+ return -EINVAL;
+ return drm_pci_init(&driver, &mgag200_pci_driver);
+}
+
+static void __exit mgag200_exit(void)
+{
+ drm_pci_exit(&driver, &mgag200_pci_driver);
+}
+
+module_init(mgag200_init);
+module_exit(mgag200_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
new file mode 100644
index 000000000000..6f13b3563234
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+#ifndef __MGAG200_DRV_H__
+#define __MGAG200_DRV_H__
+
+#include <video/vga.h>
+
+#include "drm/drm_fb_helper.h"
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#include "mgag200_reg.h"
+
+#define DRIVER_AUTHOR "Matthew Garrett"
+
+#define DRIVER_NAME "mgag200"
+#define DRIVER_DESC "MGA G200 SE"
+#define DRIVER_DATE "20110418"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+#define MGAG200FB_CONN_LIMIT 1
+
+#define RREG8(reg) ioread8(((void __iomem *)mdev->rmmio) + (reg))
+#define WREG8(reg, v) iowrite8(v, ((void __iomem *)mdev->rmmio) + (reg))
+#define RREG32(reg) ioread32(((void __iomem *)mdev->rmmio) + (reg))
+#define WREG32(reg, v) iowrite32(v, ((void __iomem *)mdev->rmmio) + (reg))
+
+#define ATTR_INDEX 0x1fc0
+#define ATTR_DATA 0x1fc1
+
+#define WREG_ATTR(reg, v) \
+ do { \
+ RREG8(0x1fda); \
+ WREG8(ATTR_INDEX, reg); \
+ WREG8(ATTR_DATA, v); \
+ } while (0) \
+
+#define WREG_SEQ(reg, v) \
+ do { \
+ WREG8(MGAREG_SEQ_INDEX, reg); \
+ WREG8(MGAREG_SEQ_DATA, v); \
+ } while (0) \
+
+#define WREG_CRT(reg, v) \
+ do { \
+ WREG8(MGAREG_CRTC_INDEX, reg); \
+ WREG8(MGAREG_CRTC_DATA, v); \
+ } while (0) \
+
+
+#define WREG_ECRT(reg, v) \
+ do { \
+ WREG8(MGAREG_CRTCEXT_INDEX, reg); \
+ WREG8(MGAREG_CRTCEXT_DATA, v); \
+ } while (0) \
+
+#define GFX_INDEX 0x1fce
+#define GFX_DATA 0x1fcf
+
+#define WREG_GFX(reg, v) \
+ do { \
+ WREG8(GFX_INDEX, reg); \
+ WREG8(GFX_DATA, v); \
+ } while (0) \
+
+#define DAC_INDEX 0x3c00
+#define DAC_DATA 0x3c0a
+
+#define WREG_DAC(reg, v) \
+ do { \
+ WREG8(DAC_INDEX, reg); \
+ WREG8(DAC_DATA, v); \
+ } while (0) \
+
+#define MGA_MISC_OUT 0x1fc2
+#define MGA_MISC_IN 0x1fcc
+
+#define MGAG200_MAX_FB_HEIGHT 4096
+#define MGAG200_MAX_FB_WIDTH 4096
+
+#define MATROX_DPMS_CLEARED (-1)
+
+#define to_mga_crtc(x) container_of(x, struct mga_crtc, base)
+#define to_mga_encoder(x) container_of(x, struct mga_encoder, base)
+#define to_mga_connector(x) container_of(x, struct mga_connector, base)
+#define to_mga_framebuffer(x) container_of(x, struct mga_framebuffer, base)
+
+struct mga_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct mga_fbdev {
+ struct drm_fb_helper helper;
+ struct mga_framebuffer mfb;
+ struct list_head fbdev_list;
+ void *sysram;
+ int size;
+ struct ttm_bo_kmap_obj mapping;
+};
+
+struct mga_crtc {
+ struct drm_crtc base;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ int last_dpms;
+ bool enabled;
+};
+
+struct mga_mode_info {
+ bool mode_config_initialized;
+ struct mga_crtc *crtc;
+};
+
+struct mga_encoder {
+ struct drm_encoder base;
+ int last_dpms;
+};
+
+
+struct mga_i2c_chan {
+ struct i2c_adapter adapter;
+ struct drm_device *dev;
+ struct i2c_algo_bit_data bit;
+ int data, clock;
+};
+
+struct mga_connector {
+ struct drm_connector base;
+ struct mga_i2c_chan *i2c;
+};
+
+
+struct mga_mc {
+ resource_size_t vram_size;
+ resource_size_t vram_base;
+ resource_size_t vram_window;
+};
+
+enum mga_type {
+ G200_SE_A,
+ G200_SE_B,
+ G200_WB,
+ G200_EV,
+ G200_EH,
+ G200_ER,
+};
+
+#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
+
+struct mga_device {
+ struct drm_device *dev;
+ unsigned long flags;
+
+ resource_size_t rmmio_base;
+ resource_size_t rmmio_size;
+ void __iomem *rmmio;
+
+ drm_local_map_t *framebuffer;
+
+ struct mga_mc mc;
+ struct mga_mode_info mode_info;
+
+ struct mga_fbdev *mfbdev;
+
+ bool suspended;
+ int num_crtc;
+ enum mga_type type;
+ int has_sdram;
+ struct drm_display_mode mode;
+
+ int bpp_shifts[4];
+
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ atomic_t validate_sequence;
+ } ttm;
+
+ u32 reg_1e24; /* SE model number */
+};
+
+
+struct mgag200_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+#define gem_to_mga_bo(gobj) container_of((gobj), struct mgag200_bo, gem)
+
+static inline struct mgag200_bo *
+mgag200_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct mgag200_bo, bo);
+}
+ /* mga_crtc.c */
+void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+
+ /* mgag200_mode.c */
+int mgag200_modeset_init(struct mga_device *mdev);
+void mgag200_modeset_fini(struct mga_device *mdev);
+
+ /* mga_fbdev.c */
+int mgag200_fbdev_init(struct mga_device *mdev);
+void mgag200_fbdev_fini(struct mga_device *mdev);
+
+ /* mgag200_main.c */
+int mgag200_framebuffer_init(struct drm_device *dev,
+ struct mga_framebuffer *mfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags);
+int mgag200_driver_unload(struct drm_device *dev);
+int mgag200_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+int mgag200_gem_init_object(struct drm_gem_object *obj);
+int mgag200_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int mgag200_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle);
+void mgag200_gem_free_object(struct drm_gem_object *obj);
+int
+mgag200_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset);
+ /* mga_i2c.c */
+struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
+void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+void mgag200_ttm_placement(struct mgag200_bo *bo, int domain);
+
+int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait);
+void mgag200_bo_unreserve(struct mgag200_bo *bo);
+int mgag200_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct mgag200_bo **pastbo);
+int mgag200_mm_init(struct mga_device *mdev);
+void mgag200_mm_fini(struct mga_device *mdev);
+int mgag200_mmap(struct file *filp, struct vm_area_struct *vma);
+int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int mgag200_bo_unpin(struct mgag200_bo *bo);
+int mgag200_bo_push_sysram(struct mgag200_bo *bo);
+#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
new file mode 100644
index 000000000000..880d3369760e
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_fb_helper.h"
+
+#include <linux/fb.h>
+
+#include "mgag200_drv.h"
+
+static void mga_dirty_update(struct mga_fbdev *mfbdev,
+ int x, int y, int width, int height)
+{
+ int i;
+ struct drm_gem_object *obj;
+ struct mgag200_bo *bo;
+ int src_offset, dst_offset;
+ int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
+ int ret;
+ bool unmap = false;
+
+ obj = mfbdev->mfb.obj;
+ bo = gem_to_mga_bo(obj);
+
+ ret = mgag200_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("failed to reserve fb bo\n");
+ return;
+ }
+
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ mgag200_bo_unreserve(bo);
+ return;
+ }
+ unmap = true;
+ }
+ for (i = y; i < y + height; i++) {
+ /* assume equal stride for now */
+ src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp);
+
+ }
+ if (unmap)
+ ttm_bo_kunmap(&bo->kmap);
+
+ mgag200_bo_unreserve(bo);
+}
+
+static void mga_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct mga_fbdev *mfbdev = info->par;
+ sys_fillrect(info, rect);
+ mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+static void mga_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct mga_fbdev *mfbdev = info->par;
+ sys_copyarea(info, area);
+ mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
+ area->height);
+}
+
+static void mga_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct mga_fbdev *mfbdev = info->par;
+ sys_imageblit(info, image);
+ mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+
+static struct fb_ops mgag200fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = mga_fillrect,
+ .fb_copyarea = mga_copyarea,
+ .fb_imageblit = mga_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int mgag200fb_create_object(struct mga_fbdev *afbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ u32 bpp, depth;
+ u32 size;
+ struct drm_gem_object *gobj;
+
+ int ret = 0;
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = mgag200_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int mgag200fb_create(struct mga_fbdev *mfbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = mfbdev->helper.dev;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct mga_device *mdev = dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *gobj = NULL;
+ struct device *device = &dev->pdev->dev;
+ struct mgag200_bo *bo;
+ int ret;
+ void *sysram;
+ int size;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ ret = mgag200fb_create_object(mfbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+ bo = gem_to_mga_bo(gobj);
+
+ sysram = vmalloc(size);
+ if (!sysram)
+ return -ENOMEM;
+
+ info = framebuffer_alloc(0, device);
+ if (info == NULL)
+ return -ENOMEM;
+
+ info->par = mfbdev;
+
+ ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ mfbdev->sysram = sysram;
+ mfbdev->size = size;
+
+ fb = &mfbdev->mfb.base;
+
+ /* setup helper */
+ mfbdev->helper.fb = fb;
+ mfbdev->helper.fbdev = info;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ strcpy(info->fix.id, "mgadrmfb");
+
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &mgag200fb_ops;
+
+ /* setup aperture base/size for vesafb takeover */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = mdev->mc.vram_size;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ info->screen_base = sysram;
+ info->screen_size = size;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+ DRM_DEBUG_KMS("allocated %dx%d\n",
+ fb->width, fb->height);
+ return 0;
+out:
+ return ret;
+}
+
+static int mga_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size
+ *sizes)
+{
+ struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = mgag200fb_create(mfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static int mga_fbdev_destroy(struct drm_device *dev,
+ struct mga_fbdev *mfbdev)
+{
+ struct fb_info *info;
+ struct mga_framebuffer *mfb = &mfbdev->mfb;
+
+ if (mfbdev->helper.fbdev) {
+ info = mfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (mfb->obj) {
+ drm_gem_object_unreference_unlocked(mfb->obj);
+ mfb->obj = NULL;
+ }
+ drm_fb_helper_fini(&mfbdev->helper);
+ vfree(mfbdev->sysram);
+ drm_framebuffer_cleanup(&mfb->base);
+
+ return 0;
+}
+
+static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
+ .gamma_set = mga_crtc_fb_gamma_set,
+ .gamma_get = mga_crtc_fb_gamma_get,
+ .fb_probe = mga_fb_find_or_create_single,
+};
+
+int mgag200_fbdev_init(struct mga_device *mdev)
+{
+ struct mga_fbdev *mfbdev;
+ int ret;
+
+ mfbdev = kzalloc(sizeof(struct mga_fbdev), GFP_KERNEL);
+ if (!mfbdev)
+ return -ENOMEM;
+
+ mdev->mfbdev = mfbdev;
+ mfbdev->helper.funcs = &mga_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
+ mdev->num_crtc, MGAG200FB_CONN_LIMIT);
+ if (ret) {
+ kfree(mfbdev);
+ return ret;
+ }
+ drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
+ drm_fb_helper_initial_config(&mfbdev->helper, 32);
+
+ return 0;
+}
+
+void mgag200_fbdev_fini(struct mga_device *mdev)
+{
+ if (!mdev->mfbdev)
+ return;
+
+ mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
+ kfree(mdev->mfbdev);
+ mdev->mfbdev = NULL;
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
new file mode 100644
index 000000000000..dd3568a1b6b0
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm.h"
+
+#include "mgag200_drv.h"
+
+static int mga_i2c_read_gpio(struct mga_device *mdev)
+{
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ return RREG8(DAC_DATA);
+}
+
+static void mga_i2c_set_gpio(struct mga_device *mdev, int mask, int val)
+{
+ int tmp;
+
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
+ tmp = (RREG8(DAC_DATA) & mask) | val;
+ WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
+ WREG_DAC(MGA1064_GEN_IO_DATA, 0);
+}
+
+static inline void mga_i2c_set(struct mga_device *mdev, int mask, int state)
+{
+ if (state)
+ state = 0;
+ else
+ state = mask;
+ mga_i2c_set_gpio(mdev, ~mask, state);
+}
+
+static void mga_gpio_setsda(void *data, int state)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ mga_i2c_set(mdev, i2c->data, state);
+}
+
+static void mga_gpio_setscl(void *data, int state)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ mga_i2c_set(mdev, i2c->clock, state);
+}
+
+static int mga_gpio_getsda(void *data)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ return (mga_i2c_read_gpio(mdev) & i2c->data) ? 1 : 0;
+}
+
+static int mga_gpio_getscl(void *data)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
+}
+
+struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
+{
+ struct mga_device *mdev = dev->dev_private;
+ struct mga_i2c_chan *i2c;
+ int ret;
+ int data, clock;
+
+ WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
+ WREG_DAC(MGA1064_GEN_IO_CTL, 0);
+
+ switch (mdev->type) {
+ case G200_SE_A:
+ case G200_SE_B:
+ case G200_EV:
+ case G200_WB:
+ data = 1;
+ clock = 2;
+ break;
+ case G200_EH:
+ case G200_ER:
+ data = 2;
+ clock = 1;
+ break;
+ default:
+ data = 2;
+ clock = 8;
+ break;
+ }
+
+ i2c = kzalloc(sizeof(struct mga_i2c_chan), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+
+ i2c->data = data;
+ i2c->clock = clock;
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.class = I2C_CLASS_DDC;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "mga i2c");
+
+ i2c->adapter.algo_data = &i2c->bit;
+
+ i2c->bit.udelay = 10;
+ i2c->bit.timeout = 2;
+ i2c->bit.data = i2c;
+ i2c->bit.setsda = mga_gpio_setsda;
+ i2c->bit.setscl = mga_gpio_setscl;
+ i2c->bit.getsda = mga_gpio_getsda;
+ i2c->bit.getscl = mga_gpio_getscl;
+
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ if (ret) {
+ kfree(i2c);
+ i2c = NULL;
+ }
+ return i2c;
+}
+
+void mgag200_i2c_destroy(struct mga_i2c_chan *i2c)
+{
+ if (!i2c)
+ return;
+ i2c_del_adapter(&i2c->adapter);
+ kfree(i2c);
+}
+
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
new file mode 100644
index 000000000000..636a81cd2f37
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+#include "mgag200_drv.h"
+
+static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
+ if (mga_fb->obj)
+ drm_gem_object_unreference_unlocked(mga_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static int mga_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs mga_fb_funcs = {
+ .destroy = mga_user_framebuffer_destroy,
+ .create_handle = mga_user_framebuffer_create_handle,
+};
+
+int mgag200_framebuffer_init(struct drm_device *dev,
+ struct mga_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
+ if (ret) {
+ DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
+ return 0;
+}
+
+static struct drm_framebuffer *
+mgag200_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct mga_framebuffer *mga_fb;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL);
+ if (!mga_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(mga_fb);
+ return ERR_PTR(ret);
+ }
+ return &mga_fb->base;
+}
+
+static const struct drm_mode_config_funcs mga_mode_funcs = {
+ .fb_create = mgag200_user_framebuffer_create,
+};
+
+/* Unmap the framebuffer from the core and release the memory */
+static void mga_vram_fini(struct mga_device *mdev)
+{
+ pci_iounmap(mdev->dev->pdev, mdev->rmmio);
+ mdev->rmmio = NULL;
+ if (mdev->mc.vram_base)
+ release_mem_region(mdev->mc.vram_base, mdev->mc.vram_window);
+}
+
+static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
+{
+ int offset;
+ int orig;
+ int test1, test2;
+ int orig1, orig2;
+
+ /* Probe */
+ orig = ioread16(mem);
+ iowrite16(0, mem);
+
+ for (offset = 0x100000; offset < mdev->mc.vram_window; offset += 0x4000) {
+ orig1 = ioread8(mem + offset);
+ orig2 = ioread8(mem + offset + 0x100);
+
+ iowrite16(0xaa55, mem + offset);
+ iowrite16(0xaa55, mem + offset + 0x100);
+
+ test1 = ioread16(mem + offset);
+ test2 = ioread16(mem);
+
+ iowrite16(orig1, mem + offset);
+ iowrite16(orig2, mem + offset + 0x100);
+
+ if (test1 != 0xaa55) {
+ break;
+ }
+
+ if (test2) {
+ break;
+ }
+ }
+
+ iowrite16(orig, mem);
+ return offset - 65536;
+}
+
+/* Map the framebuffer from the card and configure the core */
+static int mga_vram_init(struct mga_device *mdev)
+{
+ void __iomem *mem;
+ struct apertures_struct *aper = alloc_apertures(1);
+
+ /* BAR 0 is VRAM */
+ mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
+ mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
+
+ aper->ranges[0].base = mdev->mc.vram_base;
+ aper->ranges[0].size = mdev->mc.vram_window;
+ aper->count = 1;
+
+ remove_conflicting_framebuffers(aper, "mgafb", true);
+
+ if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
+ "mgadrmfb_vram")) {
+ DRM_ERROR("can't reserve VRAM\n");
+ return -ENXIO;
+ }
+
+ mem = pci_iomap(mdev->dev->pdev, 0, 0);
+
+ mdev->mc.vram_size = mga_probe_vram(mdev, mem);
+
+ pci_iounmap(mdev->dev->pdev, mem);
+
+ return 0;
+}
+
+static int mgag200_device_init(struct drm_device *dev,
+ uint32_t flags)
+{
+ struct mga_device *mdev = dev->dev_private;
+ int ret, option;
+
+ mdev->type = flags;
+
+ /* Hardcode the number of CRTCs to 1 */
+ mdev->num_crtc = 1;
+
+ pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
+ mdev->has_sdram = !(option & (1 << 14));
+
+ /* BAR 0 is the framebuffer, BAR 1 contains registers */
+ mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
+ mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
+
+ if (!request_mem_region(mdev->rmmio_base, mdev->rmmio_size,
+ "mgadrmfb_mmio")) {
+ DRM_ERROR("can't reserve mmio registers\n");
+ return -ENOMEM;
+ }
+
+ mdev->rmmio = pci_iomap(dev->pdev, 1, 0);
+ if (mdev->rmmio == NULL)
+ return -ENOMEM;
+
+ /* stash G200 SE model number for later use */
+ if (IS_G200_SE(mdev))
+ mdev->reg_1e24 = RREG32(0x1e24);
+
+ ret = mga_vram_init(mdev);
+ if (ret) {
+ release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
+ return ret;
+ }
+
+ mdev->bpp_shifts[0] = 0;
+ mdev->bpp_shifts[1] = 1;
+ mdev->bpp_shifts[2] = 0;
+ mdev->bpp_shifts[3] = 2;
+ return 0;
+}
+
+void mgag200_device_fini(struct mga_device *mdev)
+{
+ release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
+ mga_vram_fini(mdev);
+}
+
+/*
+ * Functions here will be called by the core once it's bound the driver to
+ * a PCI device
+ */
+
+
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct mga_device *mdev;
+ int r;
+
+ mdev = kzalloc(sizeof(struct mga_device), GFP_KERNEL);
+ if (mdev == NULL)
+ return -ENOMEM;
+ dev->dev_private = (void *)mdev;
+ mdev->dev = dev;
+
+ r = mgag200_device_init(dev, flags);
+ if (r) {
+ dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
+ goto out;
+ }
+ r = mgag200_mm_init(mdev);
+ if (r)
+ goto out;
+
+ drm_mode_config_init(dev);
+ dev->mode_config.funcs = (void *)&mga_mode_funcs;
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
+ r = mgag200_modeset_init(mdev);
+ if (r)
+ dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+out:
+ if (r)
+ mgag200_driver_unload(dev);
+ return r;
+}
+
+int mgag200_driver_unload(struct drm_device *dev)
+{
+ struct mga_device *mdev = dev->dev_private;
+
+ if (mdev == NULL)
+ return 0;
+ mgag200_modeset_fini(mdev);
+ mgag200_fbdev_fini(mdev);
+ drm_mode_config_cleanup(dev);
+ mgag200_mm_fini(mdev);
+ mgag200_device_fini(mdev);
+ kfree(mdev);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int mgag200_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct mgag200_bo *astbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = mgag200_bo_create(dev, size, 0, 0, &astbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &astbo->gem;
+ return 0;
+}
+
+int mgag200_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = mgag200_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+int mgag200_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+int mgag200_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
+ return 0;
+}
+
+void mgag200_bo_unref(struct mgag200_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
+
+}
+
+void mgag200_gem_free_object(struct drm_gem_object *obj)
+{
+ struct mgag200_bo *mgag200_bo = gem_to_mga_bo(obj);
+
+ if (!mgag200_bo)
+ return;
+ mgag200_bo_unref(&mgag200_bo);
+}
+
+
+static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
+{
+ return bo->bo.addr_space_offset;
+}
+
+int
+mgag200_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct mgag200_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_mga_bo(obj);
+ *offset = mgag200_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
new file mode 100644
index 000000000000..d303061b251e
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -0,0 +1,1533 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+
+#include <linux/delay.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "mgag200_drv.h"
+
+#define MGAG200_LUT_SIZE 256
+
+/*
+ * This file contains setup code for the CRTC.
+ */
+
+static void mga_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ int i;
+
+ if (!crtc->enabled)
+ return;
+
+ WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+
+ for (i = 0; i < MGAG200_LUT_SIZE; i++) {
+ /* VGA registers */
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_b[i]);
+ }
+}
+
+static inline void mga_wait_vsync(struct mga_device *mdev)
+{
+ unsigned int count = 0;
+ unsigned int status = 0;
+
+ do {
+ status = RREG32(MGAREG_Status);
+ count++;
+ } while ((status & 0x08) && (count < 250000));
+ count = 0;
+ status = 0;
+ do {
+ status = RREG32(MGAREG_Status);
+ count++;
+ } while (!(status & 0x08) && (count < 250000));
+}
+
+static inline void mga_wait_busy(struct mga_device *mdev)
+{
+ unsigned int count = 0;
+ unsigned int status = 0;
+ do {
+ status = RREG8(MGAREG_Status + 2);
+ count++;
+ } while ((status & 0x01) && (count < 500000));
+}
+
+/*
+ * The core passes the desired mode to the CRTC code to see whether any
+ * CRTC-specific modifications need to be made to it. We're in a position
+ * to just pass that straight through, so this does nothing
+ */
+static bool mga_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+
+ m = n = p = 0;
+ vcomax = 320000;
+ vcomin = 160000;
+ pllreffreq = 25000;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 8; testp > 0; testp /= 2) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testn = 17; testn < 256; testn++) {
+ for (testm = 1; testm < 32; testm++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm - 1;
+ n = testn - 1;
+ p = testp - 1;
+ }
+ }
+ }
+ }
+
+ if (delta > permitteddelta) {
+ printk(KERN_WARNING "PLL delta too large\n");
+ return 1;
+ }
+
+ WREG_DAC(MGA1064_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_PIX_PLLC_P, p);
+ return 0;
+}
+
+static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+ int i, j, tmpcount, vcount;
+ bool pll_locked = false;
+ u8 tmp;
+
+ m = n = p = 0;
+ vcomax = 550000;
+ vcomin = 150000;
+ pllreffreq = 48000;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 1; testp < 9; testp++) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testm = 1; testm < 17; testm++) {
+ for (testn = 1; testn < 151; testn++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = (testm - 1) | ((n >> 1) & 0x80);
+ p = testp - 1;
+ }
+ }
+ }
+ }
+
+ for (i = 0; i <= 32 && pll_locked == false; i++) {
+ if (i > 0) {
+ WREG8(MGAREG_CRTC_INDEX, 0x1e);
+ tmp = RREG8(MGAREG_CRTC_DATA);
+ if (tmp < 0xff)
+ WREG8(MGAREG_CRTC_DATA, tmp+1);
+ }
+
+ /* set pixclkdis to 1 */
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_REMHEADCTL_CLKDIS;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+
+ /* select PLL Set C */
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ udelay(500);
+
+ /* reset the PLL */
+ WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x04;
+ WREG_DAC(MGA1064_VREF_CTL, tmp);
+
+ udelay(50);
+
+ /* program pixel pll register */
+ WREG_DAC(MGA1064_WB_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_P, p);
+
+ udelay(50);
+
+ /* turn pll on */
+ WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x04;
+ WREG_DAC(MGA1064_VREF_CTL, tmp);
+
+ udelay(500);
+
+ /* select the pixel pll */
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
+ tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+
+ /* reset dotclock rate bit */
+ WREG8(MGAREG_SEQ_INDEX, 1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+ tmp &= ~0x8;
+ WREG8(MGAREG_SEQ_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ vcount = RREG8(MGAREG_VCOUNT);
+
+ for (j = 0; j < 30 && pll_locked == false; j++) {
+ tmpcount = RREG8(MGAREG_VCOUNT);
+ if (tmpcount < vcount)
+ vcount = 0;
+ if ((tmpcount - vcount) > 2)
+ pll_locked = true;
+ else
+ udelay(5);
+ }
+ }
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_REMHEADCTL_CLKDIS;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+ return 0;
+}
+
+static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+ u8 tmp;
+
+ m = n = p = 0;
+ vcomax = 550000;
+ vcomin = 150000;
+ pllreffreq = 50000;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 16; testp > 0; testp--) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testn = 1; testn < 257; testn++) {
+ for (testm = 1; testm < 17; testm++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = testm - 1;
+ p = testp - 1;
+ }
+ }
+ }
+ }
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+ tmp = RREG8(DAC_DATA);
+ WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_EV_PIX_PLLC_P, p);
+
+ udelay(50);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ udelay(500);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+ tmp = RREG8(DAC_DATA);
+ WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= (0x3 << 2);
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ return 0;
+}
+
+static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+ int i, j, tmpcount, vcount;
+ u8 tmp;
+ bool pll_locked = false;
+
+ m = n = p = 0;
+ vcomax = 800000;
+ vcomin = 400000;
+ pllreffreq = 3333;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 16; testp > 0; testp--) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testm = 1; testm < 33; testm++) {
+ for (testn = 1; testn < 257; testn++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = (testm - 1) | ((n >> 1) & 0x80);
+ p = testp - 1;
+ }
+ if ((clock * testp) >= 600000)
+ p |= 80;
+ }
+ }
+ }
+ for (i = 0; i <= 32 && pll_locked == false; i++) {
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ udelay(500);
+
+ WREG_DAC(MGA1064_EH_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_EH_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_EH_PIX_PLLC_P, p);
+
+ udelay(500);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ vcount = RREG8(MGAREG_VCOUNT);
+
+ for (j = 0; j < 30 && pll_locked == false; j++) {
+ tmpcount = RREG8(MGAREG_VCOUNT);
+ if (tmpcount < vcount)
+ vcount = 0;
+ if ((tmpcount - vcount) > 2)
+ pll_locked = true;
+ else
+ udelay(5);
+ }
+ }
+
+ return 0;
+}
+
+static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta;
+ unsigned int testr, testn, testm, testo;
+ unsigned int p, m, n;
+ unsigned int computed;
+ int tmp;
+
+ m = n = p = 0;
+ vcomax = 1488000;
+ vcomin = 1056000;
+ pllreffreq = 48000;
+
+ delta = 0xffffffff;
+
+ for (testr = 0; testr < 4; testr++) {
+ if (delta == 0)
+ break;
+ for (testn = 5; testn < 129; testn++) {
+ if (delta == 0)
+ break;
+ for (testm = 3; testm >= 0; testm--) {
+ if (delta == 0)
+ break;
+ for (testo = 5; testo < 33; testo++) {
+ computed = pllreffreq * (testn + 1) /
+ (testr + 1);
+ if (computed < vcomin)
+ continue;
+ if (computed > vcomax)
+ continue;
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm | (testo << 3);
+ n = testn;
+ p = testr | (testr << 3);
+ }
+ }
+ }
+ }
+ }
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_REMHEADCTL_CLKDIS;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= (0x3<<2) | 0xc0;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ udelay(500);
+
+ WREG_DAC(MGA1064_ER_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_ER_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_ER_PIX_PLLC_P, p);
+
+ udelay(50);
+
+ return 0;
+}
+
+static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
+{
+ switch(mdev->type) {
+ case G200_SE_A:
+ case G200_SE_B:
+ return mga_g200se_set_plls(mdev, clock);
+ break;
+ case G200_WB:
+ return mga_g200wb_set_plls(mdev, clock);
+ break;
+ case G200_EV:
+ return mga_g200ev_set_plls(mdev, clock);
+ break;
+ case G200_EH:
+ return mga_g200eh_set_plls(mdev, clock);
+ break;
+ case G200_ER:
+ return mga_g200er_set_plls(mdev, clock);
+ break;
+ }
+ return 0;
+}
+
+static void mga_g200wb_prepare(struct drm_crtc *crtc)
+{
+ struct mga_device *mdev = crtc->dev->dev_private;
+ u8 tmp;
+ int iter_max;
+
+ /* 1- The first step is to warn the BMC of an upcoming mode change.
+ * We are putting the misc<0> to output.*/
+
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x10;
+ WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
+
+ /* we are putting a 1 on the misc<0> line */
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x10;
+ WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+
+ /* 2- Second step to mask and further scan request
+ * This will be done by asserting the remfreqmsk bit (XSPAREREG<7>)
+ */
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x80;
+ WREG_DAC(MGA1064_SPAREREG, tmp);
+
+ /* 3a- the third step is to verifu if there is an active scan
+ * We are searching for a 0 on remhsyncsts <XSPAREREG<0>)
+ */
+ iter_max = 300;
+ while (!(tmp & 0x1) && iter_max) {
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ udelay(1000);
+ iter_max--;
+ }
+
+ /* 3b- this step occurs only if the remove is actually scanning
+ * we are waiting for the end of the frame which is a 1 on
+ * remvsyncsts (XSPAREREG<1>)
+ */
+ if (iter_max) {
+ iter_max = 300;
+ while ((tmp & 0x2) && iter_max) {
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ udelay(1000);
+ iter_max--;
+ }
+ }
+}
+
+static void mga_g200wb_commit(struct drm_crtc *crtc)
+{
+ u8 tmp;
+ struct mga_device *mdev = crtc->dev->dev_private;
+
+ /* 1- The first step is to ensure that the vrsten and hrsten are set */
+ WREG8(MGAREG_CRTCEXT_INDEX, 1);
+ tmp = RREG8(MGAREG_CRTCEXT_DATA);
+ WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88);
+
+ /* 2- second step is to assert the rstlvl2 */
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x8;
+ WREG8(DAC_DATA, tmp);
+
+ /* wait 10 us */
+ udelay(10);
+
+ /* 3- deassert rstlvl2 */
+ tmp &= ~0x08;
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+ WREG8(DAC_DATA, tmp);
+
+ /* 4- remove mask of scan request */
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x80;
+ WREG8(DAC_DATA, tmp);
+
+ /* 5- put back a 0 on the misc<0> line */
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x10;
+ WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+}
+
+
+void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
+{
+ struct mga_device *mdev = crtc->dev->dev_private;
+ u32 addr;
+ int count;
+
+ while (RREG8(0x1fda) & 0x08);
+ while (!(RREG8(0x1fda) & 0x08));
+
+ count = RREG8(MGAREG_VCOUNT) + 2;
+ while (RREG8(MGAREG_VCOUNT) < count);
+
+ addr = offset >> 2;
+ WREG_CRT(0x0d, (u8)(addr & 0xff));
+ WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
+ WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf);
+}
+
+
+/* ast is different - we will force move buffers out of VRAM */
+static int mga_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct mga_device *mdev = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
+ struct mga_framebuffer *mga_fb;
+ struct mgag200_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* push the previous fb to system ram */
+ if (!atomic && fb) {
+ mga_fb = to_mga_framebuffer(fb);
+ obj = mga_fb->obj;
+ bo = gem_to_mga_bo(obj);
+ ret = mgag200_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+ mgag200_bo_push_sysram(bo);
+ mgag200_bo_unreserve(bo);
+ }
+
+ mga_fb = to_mga_framebuffer(crtc->fb);
+ obj = mga_fb->obj;
+ bo = gem_to_mga_bo(obj);
+
+ ret = mgag200_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = mgag200_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ mgag200_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&mdev->mfbdev->mfb == mga_fb) {
+ /* if pushing console in kmap it */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret)
+ DRM_ERROR("failed to kmap fbcon\n");
+
+ }
+ mgag200_bo_unreserve(bo);
+
+ DRM_INFO("mga base %llx\n", gpu_addr);
+
+ mga_set_start_address(crtc, (u32)gpu_addr);
+
+ return 0;
+}
+
+static int mga_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int mga_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ int hdisplay, hsyncstart, hsyncend, htotal;
+ int vdisplay, vsyncstart, vsyncend, vtotal;
+ int pitch;
+ int option = 0, option2 = 0;
+ int i;
+ unsigned char misc = 0;
+ unsigned char ext_vga[6];
+ unsigned char ext_vga_index24;
+ unsigned char dac_index90 = 0;
+ u8 bppshift;
+
+ static unsigned char dacvalue[] = {
+ /* 0x00: */ 0, 0, 0, 0, 0, 0, 0x00, 0,
+ /* 0x08: */ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x10: */ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x18: */ 0x00, 0, 0xC9, 0xFF, 0xBF, 0x20, 0x1F, 0x20,
+ /* 0x20: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x28: */ 0x00, 0x00, 0x00, 0x00, 0, 0, 0, 0x40,
+ /* 0x30: */ 0x00, 0xB0, 0x00, 0xC2, 0x34, 0x14, 0x02, 0x83,
+ /* 0x38: */ 0x00, 0x93, 0x00, 0x77, 0x00, 0x00, 0x00, 0x3A,
+ /* 0x40: */ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0
+ };
+
+ bppshift = mdev->bpp_shifts[(crtc->fb->bits_per_pixel >> 3) - 1];
+
+ switch (mdev->type) {
+ case G200_SE_A:
+ case G200_SE_B:
+ dacvalue[MGA1064_VREF_CTL] = 0x03;
+ dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
+ dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_DAC_EN |
+ MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS;
+ if (mdev->has_sdram)
+ option = 0x40049120;
+ else
+ option = 0x4004d120;
+ option2 = 0x00008000;
+ break;
+ case G200_WB:
+ dacvalue[MGA1064_VREF_CTL] = 0x07;
+ option = 0x41049120;
+ option2 = 0x0000b000;
+ break;
+ case G200_EV:
+ dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
+ dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS;
+ option = 0x00000120;
+ option2 = 0x0000b000;
+ break;
+ case G200_EH:
+ dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS;
+ option = 0x00000120;
+ option2 = 0x0000b000;
+ break;
+ case G200_ER:
+ dac_index90 = 0;
+ break;
+ }
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_8bits;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_15bits;
+ else
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_16bits;
+ break;
+ case 24:
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_24bits;
+ break;
+ case 32:
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_32_24bits;
+ break;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ misc |= 0x40;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ misc |= 0x80;
+
+
+ for (i = 0; i < sizeof(dacvalue); i++) {
+ if ((i <= 0x03) ||
+ (i == 0x07) ||
+ (i == 0x0b) ||
+ (i == 0x0f) ||
+ ((i >= 0x13) && (i <= 0x17)) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i >= 0x30) && (i <= 0x37)))
+ continue;
+ if (IS_G200_SE(mdev) &&
+ ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)))
+ continue;
+ if ((mdev->type == G200_EV || mdev->type == G200_WB || mdev->type == G200_EH) &&
+ (i >= 0x44) && (i <= 0x4e))
+ continue;
+
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ if (mdev->type == G200_ER) {
+ WREG_DAC(0x90, dac_index90);
+ }
+
+
+ if (option)
+ pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
+ if (option2)
+ pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2);
+
+ WREG_SEQ(2, 0xf);
+ WREG_SEQ(3, 0);
+ WREG_SEQ(4, 0xe);
+
+ pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8);
+ if (crtc->fb->bits_per_pixel == 24)
+ pitch = pitch >> (4 - bppshift);
+ else
+ pitch = pitch >> (4 - bppshift);
+
+ hdisplay = mode->hdisplay / 8 - 1;
+ hsyncstart = mode->hsync_start / 8 - 1;
+ hsyncend = mode->hsync_end / 8 - 1;
+ htotal = mode->htotal / 8 - 1;
+
+ /* Work around hardware quirk */
+ if ((htotal & 0x07) == 0x06 || (htotal & 0x07) == 0x04)
+ htotal++;
+
+ vdisplay = mode->vdisplay - 1;
+ vsyncstart = mode->vsync_start - 1;
+ vsyncend = mode->vsync_end - 1;
+ vtotal = mode->vtotal - 2;
+
+ WREG_GFX(0, 0);
+ WREG_GFX(1, 0);
+ WREG_GFX(2, 0);
+ WREG_GFX(3, 0);
+ WREG_GFX(4, 0);
+ WREG_GFX(5, 0x40);
+ WREG_GFX(6, 0x5);
+ WREG_GFX(7, 0xf);
+ WREG_GFX(8, 0xf);
+
+ WREG_CRT(0, htotal - 4);
+ WREG_CRT(1, hdisplay);
+ WREG_CRT(2, hdisplay);
+ WREG_CRT(3, (htotal & 0x1F) | 0x80);
+ WREG_CRT(4, hsyncstart);
+ WREG_CRT(5, ((htotal & 0x20) << 2) | (hsyncend & 0x1F));
+ WREG_CRT(6, vtotal & 0xFF);
+ WREG_CRT(7, ((vtotal & 0x100) >> 8) |
+ ((vdisplay & 0x100) >> 7) |
+ ((vsyncstart & 0x100) >> 6) |
+ ((vdisplay & 0x100) >> 5) |
+ ((vdisplay & 0x100) >> 4) | /* linecomp */
+ ((vtotal & 0x200) >> 4)|
+ ((vdisplay & 0x200) >> 3) |
+ ((vsyncstart & 0x200) >> 2));
+ WREG_CRT(9, ((vdisplay & 0x200) >> 4) |
+ ((vdisplay & 0x200) >> 3));
+ WREG_CRT(10, 0);
+ WREG_CRT(11, 0);
+ WREG_CRT(12, 0);
+ WREG_CRT(13, 0);
+ WREG_CRT(14, 0);
+ WREG_CRT(15, 0);
+ WREG_CRT(16, vsyncstart & 0xFF);
+ WREG_CRT(17, (vsyncend & 0x0F) | 0x20);
+ WREG_CRT(18, vdisplay & 0xFF);
+ WREG_CRT(19, pitch & 0xFF);
+ WREG_CRT(20, 0);
+ WREG_CRT(21, vdisplay & 0xFF);
+ WREG_CRT(22, (vtotal + 1) & 0xFF);
+ WREG_CRT(23, 0xc3);
+ WREG_CRT(24, vdisplay & 0xFF);
+
+ ext_vga[0] = 0;
+ ext_vga[5] = 0;
+
+ /* TODO interlace */
+
+ ext_vga[0] |= (pitch & 0x300) >> 4;
+ ext_vga[1] = (((htotal - 4) & 0x100) >> 8) |
+ ((hdisplay & 0x100) >> 7) |
+ ((hsyncstart & 0x100) >> 6) |
+ (htotal & 0x40);
+ ext_vga[2] = ((vtotal & 0xc00) >> 10) |
+ ((vdisplay & 0x400) >> 8) |
+ ((vdisplay & 0xc00) >> 7) |
+ ((vsyncstart & 0xc00) >> 5) |
+ ((vdisplay & 0x400) >> 3);
+ if (crtc->fb->bits_per_pixel == 24)
+ ext_vga[3] = (((1 << bppshift) * 3) - 1) | 0x80;
+ else
+ ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
+ ext_vga[4] = 0;
+ if (mdev->type == G200_WB)
+ ext_vga[1] |= 0x88;
+
+ ext_vga_index24 = 0x05;
+
+ /* Set pixel clocks */
+ misc = 0x2d;
+ WREG8(MGA_MISC_OUT, misc);
+
+ mga_crtc_set_plls(mdev, mode->clock);
+
+ for (i = 0; i < 6; i++) {
+ WREG_ECRT(i, ext_vga[i]);
+ }
+
+ if (mdev->type == G200_ER)
+ WREG_ECRT(24, ext_vga_index24);
+
+ if (mdev->type == G200_EV) {
+ WREG_ECRT(6, 0);
+ }
+
+ WREG_ECRT(0, ext_vga[0]);
+ /* Enable mga pixel clock */
+ misc = 0x2d;
+
+ WREG8(MGA_MISC_OUT, misc);
+
+ if (adjusted_mode)
+ memcpy(&mdev->mode, mode, sizeof(struct drm_display_mode));
+
+ mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
+
+ /* reset tagfifo */
+ if (mdev->type == G200_ER) {
+ u32 mem_ctl = RREG32(MGAREG_MEMCTL);
+ u8 seq1;
+
+ /* screen off */
+ WREG8(MGAREG_SEQ_INDEX, 0x01);
+ seq1 = RREG8(MGAREG_SEQ_DATA) | 0x20;
+ WREG8(MGAREG_SEQ_DATA, seq1);
+
+ WREG32(MGAREG_MEMCTL, mem_ctl | 0x00200000);
+ udelay(1000);
+ WREG32(MGAREG_MEMCTL, mem_ctl & ~0x00200000);
+
+ WREG8(MGAREG_SEQ_DATA, seq1 & ~0x20);
+ }
+
+
+ if (IS_G200_SE(mdev)) {
+ if (mdev->reg_1e24 >= 0x02) {
+ u8 hi_pri_lvl;
+ u32 bpp;
+ u32 mb;
+
+ if (crtc->fb->bits_per_pixel > 16)
+ bpp = 32;
+ else if (crtc->fb->bits_per_pixel > 8)
+ bpp = 16;
+ else
+ bpp = 8;
+
+ mb = (mode->clock * bpp) / 1000;
+ if (mb > 3100)
+ hi_pri_lvl = 0;
+ else if (mb > 2600)
+ hi_pri_lvl = 1;
+ else if (mb > 1900)
+ hi_pri_lvl = 2;
+ else if (mb > 1160)
+ hi_pri_lvl = 3;
+ else if (mb > 440)
+ hi_pri_lvl = 4;
+ else
+ hi_pri_lvl = 5;
+
+ WREG8(0x1fde, 0x06);
+ WREG8(0x1fdf, hi_pri_lvl);
+ } else {
+ if (mdev->reg_1e24 >= 0x01)
+ WREG8(0x1fdf, 0x03);
+ else
+ WREG8(0x1fdf, 0x04);
+ }
+ }
+ return 0;
+}
+
+#if 0 /* code from mjg to attempt D3 on crtc dpms off - revisit later */
+static int mga_suspend(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ int option;
+
+ if (mdev->suspended)
+ return 0;
+
+ WREG_SEQ(1, 0x20);
+ WREG_ECRT(1, 0x30);
+ /* Disable the pixel clock */
+ WREG_DAC(0x1a, 0x05);
+ /* Power down the DAC */
+ WREG_DAC(0x1e, 0x18);
+ /* Power down the pixel PLL */
+ WREG_DAC(0x1a, 0x0d);
+
+ /* Disable PLLs and clocks */
+ pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+ option &= ~(0x1F8024);
+ pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
+ pci_set_power_state(pdev, PCI_D3hot);
+ pci_disable_device(pdev);
+
+ mdev->suspended = true;
+
+ return 0;
+}
+
+static int mga_resume(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ int option;
+
+ if (!mdev->suspended)
+ return 0;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_device(pdev);
+
+ /* Disable sysclk */
+ pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+ option &= ~(0x4);
+ pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
+
+ mdev->suspended = false;
+
+ return 0;
+}
+
+#endif
+
+static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ u8 seq1 = 0, crtcext1 = 0;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ seq1 = 0;
+ crtcext1 = 0;
+ mga_crtc_load_lut(crtc);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ seq1 = 0x20;
+ crtcext1 = 0x10;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ seq1 = 0x20;
+ crtcext1 = 0x20;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ seq1 = 0x20;
+ crtcext1 = 0x30;
+ break;
+ }
+
+#if 0
+ if (mode == DRM_MODE_DPMS_OFF) {
+ mga_suspend(crtc);
+ }
+#endif
+ WREG8(MGAREG_SEQ_INDEX, 0x01);
+ seq1 |= RREG8(MGAREG_SEQ_DATA) & ~0x20;
+ mga_wait_vsync(mdev);
+ mga_wait_busy(mdev);
+ WREG8(MGAREG_SEQ_DATA, seq1);
+ msleep(20);
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x01);
+ crtcext1 |= RREG8(MGAREG_CRTCEXT_DATA) & ~0x30;
+ WREG8(MGAREG_CRTCEXT_DATA, crtcext1);
+
+#if 0
+ if (mode == DRM_MODE_DPMS_ON && mdev->suspended == true) {
+ mga_resume(crtc);
+ drm_helper_resume_force_mode(dev);
+ }
+#endif
+}
+
+/*
+ * This is called before a mode is programmed. A typical use might be to
+ * enable DPMS during the programming to avoid seeing intermediate stages,
+ * but that's not relevant to us
+ */
+static void mga_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ u8 tmp;
+
+ /* mga_resume(crtc);*/
+
+ WREG8(MGAREG_CRTC_INDEX, 0x11);
+ tmp = RREG8(MGAREG_CRTC_DATA);
+ WREG_CRT(0x11, tmp | 0x80);
+
+ if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
+ WREG_SEQ(0, 1);
+ msleep(50);
+ WREG_SEQ(1, 0x20);
+ msleep(20);
+ } else {
+ WREG8(MGAREG_SEQ_INDEX, 0x1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+
+ /* start sync reset */
+ WREG_SEQ(0, 1);
+ WREG_SEQ(1, tmp | 0x20);
+ }
+
+ if (mdev->type == G200_WB)
+ mga_g200wb_prepare(crtc);
+
+ WREG_CRT(17, 0);
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void mga_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ u8 tmp;
+
+ if (mdev->type == G200_WB)
+ mga_g200wb_commit(crtc);
+
+ if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
+ msleep(50);
+ WREG_SEQ(1, 0x0);
+ msleep(20);
+ WREG_SEQ(0, 0x3);
+ } else {
+ WREG8(MGAREG_SEQ_INDEX, 0x1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+
+ tmp &= ~0x20;
+ WREG_SEQ(0x1, tmp);
+ WREG_SEQ(0, 3);
+ }
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size;
+ int i;
+
+ for (i = start; i < end; i++) {
+ mga_crtc->lut_r[i] = red[i] >> 8;
+ mga_crtc->lut_g[i] = green[i] >> 8;
+ mga_crtc->lut_b[i] = blue[i] >> 8;
+ }
+ mga_crtc_load_lut(crtc);
+}
+
+/* Simple cleanup function */
+static void mga_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(mga_crtc);
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs mga_crtc_funcs = {
+ .gamma_set = mga_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = mga_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs mga_helper_funcs = {
+ .dpms = mga_crtc_dpms,
+ .mode_fixup = mga_crtc_mode_fixup,
+ .mode_set = mga_crtc_mode_set,
+ .mode_set_base = mga_crtc_mode_set_base,
+ .prepare = mga_crtc_prepare,
+ .commit = mga_crtc_commit,
+ .load_lut = mga_crtc_load_lut,
+};
+
+/* CRTC setup */
+static void mga_crtc_init(struct drm_device *dev)
+{
+ struct mga_device *mdev = dev->dev_private;
+ struct mga_crtc *mga_crtc;
+ int i;
+
+ mga_crtc = kzalloc(sizeof(struct mga_crtc) +
+ (MGAG200FB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ GFP_KERNEL);
+
+ if (mga_crtc == NULL)
+ return;
+
+ drm_crtc_init(dev, &mga_crtc->base, &mga_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
+ mdev->mode_info.crtc = mga_crtc;
+
+ for (i = 0; i < MGAG200_LUT_SIZE; i++) {
+ mga_crtc->lut_r[i] = i;
+ mga_crtc->lut_g[i] = i;
+ mga_crtc->lut_b[i] = i;
+ }
+
+ drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+ mga_crtc->lut_r[regno] = red >> 8;
+ mga_crtc->lut_g[regno] = green >> 8;
+ mga_crtc->lut_b[regno] = blue >> 8;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+ *red = (u16)mga_crtc->lut_r[regno] << 8;
+ *green = (u16)mga_crtc->lut_g[regno] << 8;
+ *blue = (u16)mga_crtc->lut_b[regno] << 8;
+}
+
+/*
+ * The encoder comes after the CRTC in the output pipeline, but before
+ * the connector. It's responsible for ensuring that the digital
+ * stream is appropriately converted into the output format. Setup is
+ * very simple in this case - all we have to do is inform qemu of the
+ * colour depth in order to ensure that it displays appropriately
+ */
+
+/*
+ * These functions are analagous to those in the CRTC code, but are intended
+ * to handle any encoder-specific limitations
+ */
+static bool mga_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mga_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void mga_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+ return;
+}
+
+static void mga_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void mga_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+void mga_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mga_encoder);
+}
+
+static const struct drm_encoder_helper_funcs mga_encoder_helper_funcs = {
+ .dpms = mga_encoder_dpms,
+ .mode_fixup = mga_encoder_mode_fixup,
+ .mode_set = mga_encoder_mode_set,
+ .prepare = mga_encoder_prepare,
+ .commit = mga_encoder_commit,
+};
+
+static const struct drm_encoder_funcs mga_encoder_encoder_funcs = {
+ .destroy = mga_encoder_destroy,
+};
+
+static struct drm_encoder *mga_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct mga_encoder *mga_encoder;
+
+ mga_encoder = kzalloc(sizeof(struct mga_encoder), GFP_KERNEL);
+ if (!mga_encoder)
+ return NULL;
+
+ encoder = &mga_encoder->base;
+ encoder->possible_crtcs = 0x1;
+
+ drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs);
+
+ return encoder;
+}
+
+
+static int mga_vga_get_modes(struct drm_connector *connector)
+{
+ struct mga_connector *mga_connector = to_mga_connector(connector);
+ struct edid *edid;
+ int ret = 0;
+
+ edid = drm_get_edid(connector, &mga_connector->i2c->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+ return ret;
+}
+
+static int mga_vga_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* FIXME: Add bandwidth and g200se limitations */
+
+ if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
+ mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
+ mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
+ mode->crtc_vsync_end > 4096 || mode->crtc_vtotal > 4096) {
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
+struct drm_encoder *mga_connector_best_encoder(struct drm_connector
+ *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj =
+ drm_mode_object_find(connector->dev, enc_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+static enum drm_connector_status mga_vga_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void mga_connector_destroy(struct drm_connector *connector)
+{
+ struct mga_connector *mga_connector = to_mga_connector(connector);
+ mgag200_i2c_destroy(mga_connector->i2c);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
+ .get_modes = mga_vga_get_modes,
+ .mode_valid = mga_vga_mode_valid,
+ .best_encoder = mga_connector_best_encoder,
+};
+
+struct drm_connector_funcs mga_vga_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = mga_vga_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = mga_connector_destroy,
+};
+
+static struct drm_connector *mga_vga_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct mga_connector *mga_connector;
+
+ mga_connector = kzalloc(sizeof(struct mga_connector), GFP_KERNEL);
+ if (!mga_connector)
+ return NULL;
+
+ connector = &mga_connector->base;
+
+ drm_connector_init(dev, connector,
+ &mga_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
+
+ mga_connector->i2c = mgag200_i2c_create(dev);
+ if (!mga_connector->i2c)
+ DRM_ERROR("failed to add ddc bus\n");
+
+ return connector;
+}
+
+
+int mgag200_modeset_init(struct mga_device *mdev)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ mdev->mode_info.mode_config_initialized = true;
+
+ mdev->dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
+ mdev->dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
+
+ mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
+
+ mga_crtc_init(mdev->dev);
+
+ encoder = mga_encoder_init(mdev->dev);
+ if (!encoder) {
+ DRM_ERROR("mga_encoder_init failed\n");
+ return -1;
+ }
+
+ connector = mga_vga_init(mdev->dev);
+ if (!connector) {
+ DRM_ERROR("mga_vga_init failed\n");
+ return -1;
+ }
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ ret = mgag200_fbdev_init(mdev);
+ if (ret) {
+ DRM_ERROR("mga_fbdev_init failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void mgag200_modeset_fini(struct mga_device *mdev)
+{
+
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
new file mode 100644
index 000000000000..fb24d8655feb
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -0,0 +1,661 @@
+/*
+ * MGA Millennium (MGA2064W) functions
+ * MGA Mystique (MGA1064SG) functions
+ *
+ * Copyright 1996 The XFree86 Project, Inc.
+ *
+ * Authors
+ * Dirk Hohndel
+ * hohndel@XFree86.Org
+ * David Dawes
+ * dawes@XFree86.Org
+ * Contributors:
+ * Guy DESBIEF, Aix-en-provence, France
+ * g.desbief@aix.pacwan.net
+ * MGA1064SG Mystique register file
+ */
+
+
+#ifndef _MGA_REG_H_
+#define _MGA_REG_H_
+
+#define MGAREG_DWGCTL 0x1c00
+#define MGAREG_MACCESS 0x1c04
+/* the following is a mystique only register */
+#define MGAREG_MCTLWTST 0x1c08
+#define MGAREG_ZORG 0x1c0c
+
+#define MGAREG_PAT0 0x1c10
+#define MGAREG_PAT1 0x1c14
+#define MGAREG_PLNWT 0x1c1c
+
+#define MGAREG_BCOL 0x1c20
+#define MGAREG_FCOL 0x1c24
+
+#define MGAREG_SRC0 0x1c30
+#define MGAREG_SRC1 0x1c34
+#define MGAREG_SRC2 0x1c38
+#define MGAREG_SRC3 0x1c3c
+
+#define MGAREG_XYSTRT 0x1c40
+#define MGAREG_XYEND 0x1c44
+
+#define MGAREG_SHIFT 0x1c50
+/* the following is a mystique only register */
+#define MGAREG_DMAPAD 0x1c54
+#define MGAREG_SGN 0x1c58
+#define MGAREG_LEN 0x1c5c
+
+#define MGAREG_AR0 0x1c60
+#define MGAREG_AR1 0x1c64
+#define MGAREG_AR2 0x1c68
+#define MGAREG_AR3 0x1c6c
+#define MGAREG_AR4 0x1c70
+#define MGAREG_AR5 0x1c74
+#define MGAREG_AR6 0x1c78
+
+#define MGAREG_CXBNDRY 0x1c80
+#define MGAREG_FXBNDRY 0x1c84
+#define MGAREG_YDSTLEN 0x1c88
+#define MGAREG_PITCH 0x1c8c
+
+#define MGAREG_YDST 0x1c90
+#define MGAREG_YDSTORG 0x1c94
+#define MGAREG_YTOP 0x1c98
+#define MGAREG_YBOT 0x1c9c
+
+#define MGAREG_CXLEFT 0x1ca0
+#define MGAREG_CXRIGHT 0x1ca4
+#define MGAREG_FXLEFT 0x1ca8
+#define MGAREG_FXRIGHT 0x1cac
+
+#define MGAREG_XDST 0x1cb0
+
+#define MGAREG_DR0 0x1cc0
+#define MGAREG_DR1 0x1cc4
+#define MGAREG_DR2 0x1cc8
+#define MGAREG_DR3 0x1ccc
+
+#define MGAREG_DR4 0x1cd0
+#define MGAREG_DR5 0x1cd4
+#define MGAREG_DR6 0x1cd8
+#define MGAREG_DR7 0x1cdc
+
+#define MGAREG_DR8 0x1ce0
+#define MGAREG_DR9 0x1ce4
+#define MGAREG_DR10 0x1ce8
+#define MGAREG_DR11 0x1cec
+
+#define MGAREG_DR12 0x1cf0
+#define MGAREG_DR13 0x1cf4
+#define MGAREG_DR14 0x1cf8
+#define MGAREG_DR15 0x1cfc
+
+#define MGAREG_SRCORG 0x2cb4
+#define MGAREG_DSTORG 0x2cb8
+
+/* add or or this to one of the previous "power registers" to start
+ the drawing engine */
+
+#define MGAREG_EXEC 0x0100
+
+#define MGAREG_FIFOSTATUS 0x1e10
+#define MGAREG_Status 0x1e14
+#define MGAREG_CACHEFLUSH 0x1fff
+#define MGAREG_ICLEAR 0x1e18
+#define MGAREG_IEN 0x1e1c
+
+#define MGAREG_VCOUNT 0x1e20
+
+#define MGAREG_Reset 0x1e40
+
+#define MGAREG_OPMODE 0x1e54
+
+/* Warp Registers */
+#define MGAREG_WIADDR 0x1dc0
+#define MGAREG_WIADDR2 0x1dd8
+#define MGAREG_WGETMSB 0x1dc8
+#define MGAREG_WVRTXSZ 0x1dcc
+#define MGAREG_WACCEPTSEQ 0x1dd4
+#define MGAREG_WMISC 0x1e70
+
+#define MGAREG_MEMCTL 0x2e08
+
+/* OPMODE register additives */
+
+#define MGAOPM_DMA_GENERAL (0x00 << 2)
+#define MGAOPM_DMA_BLIT (0x01 << 2)
+#define MGAOPM_DMA_VECTOR (0x10 << 2)
+
+/* MACCESS register additives */
+#define MGAMAC_PW8 0x00
+#define MGAMAC_PW16 0x01
+#define MGAMAC_PW24 0x03 /* not a typo */
+#define MGAMAC_PW32 0x02 /* not a typo */
+#define MGAMAC_BYPASS332 0x10000000
+#define MGAMAC_NODITHER 0x40000000
+#define MGAMAC_DIT555 0x80000000
+
+/* DWGCTL register additives */
+
+/* Lines */
+
+#define MGADWG_LINE_OPEN 0x00
+#define MGADWG_AUTOLINE_OPEN 0x01
+#define MGADWG_LINE_CLOSE 0x02
+#define MGADWG_AUTOLINE_CLOSE 0x03
+
+/* Trapezoids */
+#define MGADWG_TRAP 0x04
+#define MGADWG_TEXTURE_TRAP 0x06
+
+/* BitBlts */
+
+#define MGADWG_BITBLT 0x08
+#define MGADWG_FBITBLT 0x0c
+#define MGADWG_ILOAD 0x09
+#define MGADWG_ILOAD_SCALE 0x0d
+#define MGADWG_ILOAD_FILTER 0x0f
+#define MGADWG_ILOAD_HIQH 0x07
+#define MGADWG_ILOAD_HIQHV 0x0e
+#define MGADWG_IDUMP 0x0a
+
+/* atype access to WRAM */
+
+#define MGADWG_RPL ( 0x00 << 4 )
+#define MGADWG_RSTR ( 0x01 << 4 )
+#define MGADWG_ZI ( 0x03 << 4 )
+#define MGADWG_BLK ( 0x04 << 4 )
+#define MGADWG_I ( 0x07 << 4 )
+
+/* specifies whether bit blits are linear or xy */
+#define MGADWG_LINEAR ( 0x01 << 7 )
+
+/* z drawing mode. use MGADWG_NOZCMP for always */
+
+#define MGADWG_NOZCMP ( 0x00 << 8 )
+#define MGADWG_ZE ( 0x02 << 8 )
+#define MGADWG_ZNE ( 0x03 << 8 )
+#define MGADWG_ZLT ( 0x04 << 8 )
+#define MGADWG_ZLTE ( 0x05 << 8 )
+#define MGADWG_GT ( 0x06 << 8 )
+#define MGADWG_GTE ( 0x07 << 8 )
+
+/* use this to force colour expansion circuitry to do its stuff */
+
+#define MGADWG_SOLID ( 0x01 << 11 )
+
+/* ar register at zero */
+
+#define MGADWG_ARZERO ( 0x01 << 12 )
+
+#define MGADWG_SGNZERO ( 0x01 << 13 )
+
+#define MGADWG_SHIFTZERO ( 0x01 << 14 )
+
+/* See table on 4-43 for bop ALU operations */
+
+/* See table on 4-44 for translucidity masks */
+
+#define MGADWG_BMONOLEF ( 0x00 << 25 )
+#define MGADWG_BMONOWF ( 0x04 << 25 )
+#define MGADWG_BPLAN ( 0x01 << 25 )
+
+/* note that if bfcol is specified and you're doing a bitblt, it causes
+ a fbitblt to be performed, so check that you obey the fbitblt rules */
+
+#define MGADWG_BFCOL ( 0x02 << 25 )
+#define MGADWG_BUYUV ( 0x0e << 25 )
+#define MGADWG_BU32BGR ( 0x03 << 25 )
+#define MGADWG_BU32RGB ( 0x07 << 25 )
+#define MGADWG_BU24BGR ( 0x0b << 25 )
+#define MGADWG_BU24RGB ( 0x0f << 25 )
+
+#define MGADWG_PATTERN ( 0x01 << 29 )
+#define MGADWG_TRANSC ( 0x01 << 30 )
+#define MGAREG_MISC_WRITE 0x3c2
+#define MGAREG_MISC_READ 0x3cc
+#define MGAREG_MEM_MISC_WRITE 0x1fc2
+#define MGAREG_MEM_MISC_READ 0x1fcc
+
+#define MGAREG_MISC_IOADSEL (0x1 << 0)
+#define MGAREG_MISC_RAMMAPEN (0x1 << 1)
+#define MGAREG_MISC_CLK_SEL_VGA25 (0x0 << 2)
+#define MGAREG_MISC_CLK_SEL_VGA28 (0x1 << 2)
+#define MGAREG_MISC_CLK_SEL_MGA_PIX (0x2 << 2)
+#define MGAREG_MISC_CLK_SEL_MGA_MSK (0x3 << 2)
+#define MGAREG_MISC_VIDEO_DIS (0x1 << 4)
+#define MGAREG_MISC_HIGH_PG_SEL (0x1 << 5)
+
+/* MMIO VGA registers */
+#define MGAREG_SEQ_INDEX 0x1fc4
+#define MGAREG_SEQ_DATA 0x1fc5
+#define MGAREG_CRTC_INDEX 0x1fd4
+#define MGAREG_CRTC_DATA 0x1fd5
+#define MGAREG_CRTCEXT_INDEX 0x1fde
+#define MGAREG_CRTCEXT_DATA 0x1fdf
+
+
+
+/* MGA bits for registers PCI_OPTION_REG */
+#define MGA1064_OPT_SYS_CLK_PCI ( 0x00 << 0 )
+#define MGA1064_OPT_SYS_CLK_PLL ( 0x01 << 0 )
+#define MGA1064_OPT_SYS_CLK_EXT ( 0x02 << 0 )
+#define MGA1064_OPT_SYS_CLK_MSK ( 0x03 << 0 )
+
+#define MGA1064_OPT_SYS_CLK_DIS ( 0x01 << 2 )
+#define MGA1064_OPT_G_CLK_DIV_1 ( 0x01 << 3 )
+#define MGA1064_OPT_M_CLK_DIV_1 ( 0x01 << 4 )
+
+#define MGA1064_OPT_SYS_PLL_PDN ( 0x01 << 5 )
+#define MGA1064_OPT_VGA_ION ( 0x01 << 8 )
+
+/* MGA registers in PCI config space */
+#define PCI_MGA_INDEX 0x44
+#define PCI_MGA_DATA 0x48
+#define PCI_MGA_OPTION 0x40
+#define PCI_MGA_OPTION2 0x50
+#define PCI_MGA_OPTION3 0x54
+
+#define RAMDAC_OFFSET 0x3c00
+
+/* TVP3026 direct registers */
+
+#define TVP3026_INDEX 0x00
+#define TVP3026_WADR_PAL 0x00
+#define TVP3026_COL_PAL 0x01
+#define TVP3026_PIX_RD_MSK 0x02
+#define TVP3026_RADR_PAL 0x03
+#define TVP3026_CUR_COL_ADDR 0x04
+#define TVP3026_CUR_COL_DATA 0x05
+#define TVP3026_DATA 0x0a
+#define TVP3026_CUR_RAM 0x0b
+#define TVP3026_CUR_XLOW 0x0c
+#define TVP3026_CUR_XHI 0x0d
+#define TVP3026_CUR_YLOW 0x0e
+#define TVP3026_CUR_YHI 0x0f
+
+/* TVP3026 indirect registers */
+
+#define TVP3026_SILICON_REV 0x01
+#define TVP3026_CURSOR_CTL 0x06
+#define TVP3026_LATCH_CTL 0x0f
+#define TVP3026_TRUE_COLOR_CTL 0x18
+#define TVP3026_MUX_CTL 0x19
+#define TVP3026_CLK_SEL 0x1a
+#define TVP3026_PAL_PAGE 0x1c
+#define TVP3026_GEN_CTL 0x1d
+#define TVP3026_MISC_CTL 0x1e
+#define TVP3026_GEN_IO_CTL 0x2a
+#define TVP3026_GEN_IO_DATA 0x2b
+#define TVP3026_PLL_ADDR 0x2c
+#define TVP3026_PIX_CLK_DATA 0x2d
+#define TVP3026_MEM_CLK_DATA 0x2e
+#define TVP3026_LOAD_CLK_DATA 0x2f
+#define TVP3026_KEY_RED_LOW 0x32
+#define TVP3026_KEY_RED_HI 0x33
+#define TVP3026_KEY_GREEN_LOW 0x34
+#define TVP3026_KEY_GREEN_HI 0x35
+#define TVP3026_KEY_BLUE_LOW 0x36
+#define TVP3026_KEY_BLUE_HI 0x37
+#define TVP3026_KEY_CTL 0x38
+#define TVP3026_MCLK_CTL 0x39
+#define TVP3026_SENSE_TEST 0x3a
+#define TVP3026_TEST_DATA 0x3b
+#define TVP3026_CRC_LSB 0x3c
+#define TVP3026_CRC_MSB 0x3d
+#define TVP3026_CRC_CTL 0x3e
+#define TVP3026_ID 0x3f
+#define TVP3026_RESET 0xff
+
+
+/* MGA1064 DAC Register file */
+/* MGA1064 direct registers */
+
+#define MGA1064_INDEX 0x00
+#define MGA1064_WADR_PAL 0x00
+#define MGA1064_SPAREREG 0x00
+#define MGA1064_COL_PAL 0x01
+#define MGA1064_PIX_RD_MSK 0x02
+#define MGA1064_RADR_PAL 0x03
+#define MGA1064_DATA 0x0a
+
+#define MGA1064_CUR_XLOW 0x0c
+#define MGA1064_CUR_XHI 0x0d
+#define MGA1064_CUR_YLOW 0x0e
+#define MGA1064_CUR_YHI 0x0f
+
+/* MGA1064 indirect registers */
+#define MGA1064_DVI_PIPE_CTL 0x03
+#define MGA1064_CURSOR_BASE_ADR_LOW 0x04
+#define MGA1064_CURSOR_BASE_ADR_HI 0x05
+#define MGA1064_CURSOR_CTL 0x06
+#define MGA1064_CURSOR_COL0_RED 0x08
+#define MGA1064_CURSOR_COL0_GREEN 0x09
+#define MGA1064_CURSOR_COL0_BLUE 0x0a
+
+#define MGA1064_CURSOR_COL1_RED 0x0c
+#define MGA1064_CURSOR_COL1_GREEN 0x0d
+#define MGA1064_CURSOR_COL1_BLUE 0x0e
+
+#define MGA1064_CURSOR_COL2_RED 0x010
+#define MGA1064_CURSOR_COL2_GREEN 0x011
+#define MGA1064_CURSOR_COL2_BLUE 0x012
+
+#define MGA1064_VREF_CTL 0x018
+
+#define MGA1064_MUL_CTL 0x19
+#define MGA1064_MUL_CTL_8bits 0x0
+#define MGA1064_MUL_CTL_15bits 0x01
+#define MGA1064_MUL_CTL_16bits 0x02
+#define MGA1064_MUL_CTL_24bits 0x03
+#define MGA1064_MUL_CTL_32bits 0x04
+#define MGA1064_MUL_CTL_2G8V16bits 0x05
+#define MGA1064_MUL_CTL_G16V16bits 0x06
+#define MGA1064_MUL_CTL_32_24bits 0x07
+
+#define MGA1064_PIX_CLK_CTL 0x1a
+#define MGA1064_PIX_CLK_CTL_CLK_DIS ( 0x01 << 2 )
+#define MGA1064_PIX_CLK_CTL_CLK_POW_DOWN ( 0x01 << 3 )
+#define MGA1064_PIX_CLK_CTL_SEL_PCI ( 0x00 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_PLL ( 0x01 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_EXT ( 0x02 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_MSK ( 0x03 << 0 )
+
+#define MGA1064_GEN_CTL 0x1d
+#define MGA1064_GEN_CTL_SYNC_ON_GREEN_DIS (0x01 << 5)
+#define MGA1064_MISC_CTL 0x1e
+#define MGA1064_MISC_CTL_DAC_EN ( 0x01 << 0 )
+#define MGA1064_MISC_CTL_VGA ( 0x01 << 1 )
+#define MGA1064_MISC_CTL_DIS_CON ( 0x03 << 1 )
+#define MGA1064_MISC_CTL_MAFC ( 0x02 << 1 )
+#define MGA1064_MISC_CTL_VGA8 ( 0x01 << 3 )
+#define MGA1064_MISC_CTL_DAC_RAM_CS ( 0x01 << 4 )
+
+#define MGA1064_GEN_IO_CTL2 0x29
+#define MGA1064_GEN_IO_CTL 0x2a
+#define MGA1064_GEN_IO_DATA 0x2b
+#define MGA1064_SYS_PLL_M 0x2c
+#define MGA1064_SYS_PLL_N 0x2d
+#define MGA1064_SYS_PLL_P 0x2e
+#define MGA1064_SYS_PLL_STAT 0x2f
+
+#define MGA1064_REMHEADCTL 0x30
+#define MGA1064_REMHEADCTL_CLKDIS ( 0x01 << 0 )
+#define MGA1064_REMHEADCTL_CLKSL_OFF ( 0x00 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_PLL ( 0x01 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_PCI ( 0x02 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_MSK ( 0x03 << 1 )
+
+#define MGA1064_REMHEADCTL2 0x31
+
+#define MGA1064_ZOOM_CTL 0x38
+#define MGA1064_SENSE_TST 0x3a
+
+#define MGA1064_CRC_LSB 0x3c
+#define MGA1064_CRC_MSB 0x3d
+#define MGA1064_CRC_CTL 0x3e
+#define MGA1064_COL_KEY_MSK_LSB 0x40
+#define MGA1064_COL_KEY_MSK_MSB 0x41
+#define MGA1064_COL_KEY_LSB 0x42
+#define MGA1064_COL_KEY_MSB 0x43
+#define MGA1064_PIX_PLLA_M 0x44
+#define MGA1064_PIX_PLLA_N 0x45
+#define MGA1064_PIX_PLLA_P 0x46
+#define MGA1064_PIX_PLLB_M 0x48
+#define MGA1064_PIX_PLLB_N 0x49
+#define MGA1064_PIX_PLLB_P 0x4a
+#define MGA1064_PIX_PLLC_M 0x4c
+#define MGA1064_PIX_PLLC_N 0x4d
+#define MGA1064_PIX_PLLC_P 0x4e
+
+#define MGA1064_PIX_PLL_STAT 0x4f
+
+/*Added for G450 dual head*/
+
+#define MGA1064_VID_PLL_STAT 0x8c
+#define MGA1064_VID_PLL_P 0x8D
+#define MGA1064_VID_PLL_M 0x8E
+#define MGA1064_VID_PLL_N 0x8F
+
+/* Modified PLL for G200 Winbond (G200WB) */
+#define MGA1064_WB_PIX_PLLC_M 0xb7
+#define MGA1064_WB_PIX_PLLC_N 0xb6
+#define MGA1064_WB_PIX_PLLC_P 0xb8
+
+/* Modified PLL for G200 Maxim (G200EV) */
+#define MGA1064_EV_PIX_PLLC_M 0xb6
+#define MGA1064_EV_PIX_PLLC_N 0xb7
+#define MGA1064_EV_PIX_PLLC_P 0xb8
+
+/* Modified PLL for G200 EH */
+#define MGA1064_EH_PIX_PLLC_M 0xb6
+#define MGA1064_EH_PIX_PLLC_N 0xb7
+#define MGA1064_EH_PIX_PLLC_P 0xb8
+
+/* Modified PLL for G200 Maxim (G200ER) */
+#define MGA1064_ER_PIX_PLLC_M 0xb7
+#define MGA1064_ER_PIX_PLLC_N 0xb6
+#define MGA1064_ER_PIX_PLLC_P 0xb8
+
+#define MGA1064_DISP_CTL 0x8a
+#define MGA1064_DISP_CTL_DAC1OUTSEL_MASK 0x01
+#define MGA1064_DISP_CTL_DAC1OUTSEL_DIS 0x00
+#define MGA1064_DISP_CTL_DAC1OUTSEL_EN 0x01
+#define MGA1064_DISP_CTL_DAC2OUTSEL_MASK (0x03 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_DIS 0x00
+#define MGA1064_DISP_CTL_DAC2OUTSEL_CRTC1 (0x01 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_CRTC2 (0x02 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_TVE (0x03 << 2)
+#define MGA1064_DISP_CTL_PANOUTSEL_MASK (0x03 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_DIS 0x00
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC1 (0x01 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC2RGB (0x02 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC2656 (0x03 << 5)
+
+#define MGA1064_SYNC_CTL 0x8b
+
+#define MGA1064_PWR_CTL 0xa0
+#define MGA1064_PWR_CTL_DAC2_EN (0x01 << 0)
+#define MGA1064_PWR_CTL_VID_PLL_EN (0x01 << 1)
+#define MGA1064_PWR_CTL_PANEL_EN (0x01 << 2)
+#define MGA1064_PWR_CTL_RFIFO_EN (0x01 << 3)
+#define MGA1064_PWR_CTL_CFIFO_EN (0x01 << 4)
+
+#define MGA1064_PAN_CTL 0xa2
+
+/* Using crtc2 */
+#define MGAREG2_C2CTL 0x10
+#define MGAREG2_C2HPARAM 0x14
+#define MGAREG2_C2HSYNC 0x18
+#define MGAREG2_C2VPARAM 0x1c
+#define MGAREG2_C2VSYNC 0x20
+#define MGAREG2_C2STARTADD0 0x28
+
+#define MGAREG2_C2OFFSET 0x40
+#define MGAREG2_C2DATACTL 0x4c
+
+#define MGAREG_C2CTL 0x3c10
+#define MGAREG_C2CTL_C2_EN 0x01
+
+#define MGAREG_C2_HIPRILVL_M (0x07 << 4)
+#define MGAREG_C2_MAXHIPRI_M (0x07 << 8)
+
+#define MGAREG_C2CTL_PIXCLKSEL_MASK (0x03 << 1)
+#define MGAREG_C2CTL_PIXCLKSELH_MASK (0x01 << 14)
+#define MGAREG_C2CTL_PIXCLKSEL_PCICLK 0x00
+#define MGAREG_C2CTL_PIXCLKSEL_VDOCLK (0x01 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_PIXELPLL (0x02 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_VIDEOPLL (0x03 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_VDCLK (0x01 << 14)
+
+#define MGAREG_C2CTL_PIXCLKSEL_CRISTAL (0x01 << 1) | (0x01 << 14)
+#define MGAREG_C2CTL_PIXCLKSEL_SYSTEMPLL (0x02 << 1) | (0x01 << 14)
+
+#define MGAREG_C2CTL_PIXCLKDIS_MASK (0x01 << 3)
+#define MGAREG_C2CTL_PIXCLKDIS_DISABLE (0x01 << 3)
+
+#define MGAREG_C2CTL_CRTCDACSEL_MASK (0x01 << 20)
+#define MGAREG_C2CTL_CRTCDACSEL_CRTC1 0x00
+#define MGAREG_C2CTL_CRTCDACSEL_CRTC2 (0x01 << 20)
+
+#define MGAREG_C2HPARAM 0x3c14
+#define MGAREG_C2HSYNC 0x3c18
+#define MGAREG_C2VPARAM 0x3c1c
+#define MGAREG_C2VSYNC 0x3c20
+#define MGAREG_C2STARTADD0 0x3c28
+
+#define MGAREG_C2OFFSET 0x3c40
+#define MGAREG_C2DATACTL 0x3c4c
+
+/* video register */
+
+#define MGAREG_BESA1C3ORG 0x3d60
+#define MGAREG_BESA1CORG 0x3d10
+#define MGAREG_BESA1ORG 0x3d00
+#define MGAREG_BESCTL 0x3d20
+#define MGAREG_BESGLOBCTL 0x3dc0
+#define MGAREG_BESHCOORD 0x3d28
+#define MGAREG_BESHISCAL 0x3d30
+#define MGAREG_BESHSRCEND 0x3d3c
+#define MGAREG_BESHSRCLST 0x3d50
+#define MGAREG_BESHSRCST 0x3d38
+#define MGAREG_BESLUMACTL 0x3d40
+#define MGAREG_BESPITCH 0x3d24
+#define MGAREG_BESV1SRCLST 0x3d54
+#define MGAREG_BESV1WGHT 0x3d48
+#define MGAREG_BESVCOORD 0x3d2c
+#define MGAREG_BESVISCAL 0x3d34
+
+/* texture engine registers */
+
+#define MGAREG_TMR0 0x2c00
+#define MGAREG_TMR1 0x2c04
+#define MGAREG_TMR2 0x2c08
+#define MGAREG_TMR3 0x2c0c
+#define MGAREG_TMR4 0x2c10
+#define MGAREG_TMR5 0x2c14
+#define MGAREG_TMR6 0x2c18
+#define MGAREG_TMR7 0x2c1c
+#define MGAREG_TMR8 0x2c20
+#define MGAREG_TEXORG 0x2c24
+#define MGAREG_TEXWIDTH 0x2c28
+#define MGAREG_TEXHEIGHT 0x2c2c
+#define MGAREG_TEXCTL 0x2c30
+# define MGA_TW4 (0x00000000)
+# define MGA_TW8 (0x00000001)
+# define MGA_TW15 (0x00000002)
+# define MGA_TW16 (0x00000003)
+# define MGA_TW12 (0x00000004)
+# define MGA_TW32 (0x00000006)
+# define MGA_TW8A (0x00000007)
+# define MGA_TW8AL (0x00000008)
+# define MGA_TW422 (0x0000000A)
+# define MGA_TW422UYVY (0x0000000B)
+# define MGA_PITCHLIN (0x00000100)
+# define MGA_NOPERSPECTIVE (0x00200000)
+# define MGA_TAKEY (0x02000000)
+# define MGA_TAMASK (0x04000000)
+# define MGA_CLAMPUV (0x18000000)
+# define MGA_TEXMODULATE (0x20000000)
+#define MGAREG_TEXCTL2 0x2c3c
+# define MGA_G400_TC2_MAGIC (0x00008000)
+# define MGA_TC2_DECALBLEND (0x00000001)
+# define MGA_TC2_IDECAL (0x00000002)
+# define MGA_TC2_DECALDIS (0x00000004)
+# define MGA_TC2_CKSTRANSDIS (0x00000010)
+# define MGA_TC2_BORDEREN (0x00000020)
+# define MGA_TC2_SPECEN (0x00000040)
+# define MGA_TC2_DUALTEX (0x00000080)
+# define MGA_TC2_TABLEFOG (0x00000100)
+# define MGA_TC2_BUMPMAP (0x00000200)
+# define MGA_TC2_SELECT_TMU1 (0x80000000)
+#define MGAREG_TEXTRANS 0x2c34
+#define MGAREG_TEXTRANSHIGH 0x2c38
+#define MGAREG_TEXFILTER 0x2c58
+# define MGA_MIN_NRST (0x00000000)
+# define MGA_MIN_BILIN (0x00000002)
+# define MGA_MIN_ANISO (0x0000000D)
+# define MGA_MAG_NRST (0x00000000)
+# define MGA_MAG_BILIN (0x00000020)
+# define MGA_FILTERALPHA (0x00100000)
+#define MGAREG_ALPHASTART 0x2c70
+#define MGAREG_ALPHAXINC 0x2c74
+#define MGAREG_ALPHAYINC 0x2c78
+#define MGAREG_ALPHACTRL 0x2c7c
+# define MGA_SRC_ZERO (0x00000000)
+# define MGA_SRC_ONE (0x00000001)
+# define MGA_SRC_DST_COLOR (0x00000002)
+# define MGA_SRC_ONE_MINUS_DST_COLOR (0x00000003)
+# define MGA_SRC_ALPHA (0x00000004)
+# define MGA_SRC_ONE_MINUS_SRC_ALPHA (0x00000005)
+# define MGA_SRC_DST_ALPHA (0x00000006)
+# define MGA_SRC_ONE_MINUS_DST_ALPHA (0x00000007)
+# define MGA_SRC_SRC_ALPHA_SATURATE (0x00000008)
+# define MGA_SRC_BLEND_MASK (0x0000000f)
+# define MGA_DST_ZERO (0x00000000)
+# define MGA_DST_ONE (0x00000010)
+# define MGA_DST_SRC_COLOR (0x00000020)
+# define MGA_DST_ONE_MINUS_SRC_COLOR (0x00000030)
+# define MGA_DST_SRC_ALPHA (0x00000040)
+# define MGA_DST_ONE_MINUS_SRC_ALPHA (0x00000050)
+# define MGA_DST_DST_ALPHA (0x00000060)
+# define MGA_DST_ONE_MINUS_DST_ALPHA (0x00000070)
+# define MGA_DST_BLEND_MASK (0x00000070)
+# define MGA_ALPHACHANNEL (0x00000100)
+# define MGA_VIDEOALPHA (0x00000200)
+# define MGA_DIFFUSEDALPHA (0x01000000)
+# define MGA_MODULATEDALPHA (0x02000000)
+#define MGAREG_TDUALSTAGE0 (0x2CF8)
+#define MGAREG_TDUALSTAGE1 (0x2CFC)
+# define MGA_TDS_COLOR_ARG2_DIFFUSE (0x00000000)
+# define MGA_TDS_COLOR_ARG2_SPECULAR (0x00000001)
+# define MGA_TDS_COLOR_ARG2_FCOL (0x00000002)
+# define MGA_TDS_COLOR_ARG2_PREVSTAGE (0x00000003)
+# define MGA_TDS_COLOR_ALPHA_DIFFUSE (0x00000000)
+# define MGA_TDS_COLOR_ALPHA_FCOL (0x00000004)
+# define MGA_TDS_COLOR_ALPHA_CURRTEX (0x00000008)
+# define MGA_TDS_COLOR_ALPHA_PREVTEX (0x0000000c)
+# define MGA_TDS_COLOR_ALPHA_PREVSTAGE (0x00000010)
+# define MGA_TDS_COLOR_ARG1_REPLICATEALPHA (0x00000020)
+# define MGA_TDS_COLOR_ARG1_INV (0x00000040)
+# define MGA_TDS_COLOR_ARG2_REPLICATEALPHA (0x00000080)
+# define MGA_TDS_COLOR_ARG2_INV (0x00000100)
+# define MGA_TDS_COLOR_ALPHA1INV (0x00000200)
+# define MGA_TDS_COLOR_ALPHA2INV (0x00000400)
+# define MGA_TDS_COLOR_ARG1MUL_ALPHA1 (0x00000800)
+# define MGA_TDS_COLOR_ARG2MUL_ALPHA2 (0x00001000)
+# define MGA_TDS_COLOR_ARG1ADD_MULOUT (0x00002000)
+# define MGA_TDS_COLOR_ARG2ADD_MULOUT (0x00004000)
+# define MGA_TDS_COLOR_MODBRIGHT_2X (0x00008000)
+# define MGA_TDS_COLOR_MODBRIGHT_4X (0x00010000)
+# define MGA_TDS_COLOR_ADD_SUB (0x00000000)
+# define MGA_TDS_COLOR_ADD_ADD (0x00020000)
+# define MGA_TDS_COLOR_ADD2X (0x00040000)
+# define MGA_TDS_COLOR_ADDBIAS (0x00080000)
+# define MGA_TDS_COLOR_BLEND (0x00100000)
+# define MGA_TDS_COLOR_SEL_ARG1 (0x00000000)
+# define MGA_TDS_COLOR_SEL_ARG2 (0x00200000)
+# define MGA_TDS_COLOR_SEL_ADD (0x00400000)
+# define MGA_TDS_COLOR_SEL_MUL (0x00600000)
+# define MGA_TDS_ALPHA_ARG1_INV (0x00800000)
+# define MGA_TDS_ALPHA_ARG2_DIFFUSE (0x00000000)
+# define MGA_TDS_ALPHA_ARG2_FCOL (0x01000000)
+# define MGA_TDS_ALPHA_ARG2_PREVTEX (0x02000000)
+# define MGA_TDS_ALPHA_ARG2_PREVSTAGE (0x03000000)
+# define MGA_TDS_ALPHA_ARG2_INV (0x04000000)
+# define MGA_TDS_ALPHA_ADD (0x08000000)
+# define MGA_TDS_ALPHA_ADDBIAS (0x10000000)
+# define MGA_TDS_ALPHA_ADD2X (0x20000000)
+# define MGA_TDS_ALPHA_SEL_ARG1 (0x00000000)
+# define MGA_TDS_ALPHA_SEL_ARG2 (0x40000000)
+# define MGA_TDS_ALPHA_SEL_ADD (0x80000000)
+# define MGA_TDS_ALPHA_SEL_MUL (0xc0000000)
+
+#define MGAREG_DWGSYNC 0x2c4c
+
+#define MGAREG_AGP_PLL 0x1e4c
+#define MGA_AGP2XPLL_ENABLE 0x1
+#define MGA_AGP2XPLL_DISABLE 0x0
+
+#endif
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
new file mode 100644
index 000000000000..b223dcb7a710
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "mgag200_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct mga_device *
+mgag200_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct mga_device, ttm.bdev);
+}
+
+static int
+mgag200_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+mgag200_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int mgag200_ttm_global_init(struct mga_device *ast)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &ast->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &mgag200_ttm_mem_global_init;
+ global_ref->release = &mgag200_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ ast->ttm.bo_global_ref.mem_glob =
+ ast->ttm.mem_global_ref.object;
+ global_ref = &ast->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ return r;
+ }
+ return 0;
+}
+
+void
+mgag200_ttm_global_release(struct mga_device *ast)
+{
+ if (ast->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ ast->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct mgag200_bo *bo;
+
+ bo = container_of(tbo, struct mgag200_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &mgag200_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int
+mgag200_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct mgag200_bo *mgabo = mgag200_bo(bo);
+
+ if (!mgag200_ttm_bo_is_mgag200_bo(bo))
+ return;
+
+ mgag200_ttm_placement(mgabo, TTM_PL_FLAG_SYSTEM);
+ *pl = mgabo->placement;
+}
+
+static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct mga_device *mdev = mgag200_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(mdev->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int mgag200_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ int r;
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ return r;
+}
+
+
+static void mgag200_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func mgag200_tt_backend_func = {
+ .destroy = &mgag200_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &mgag200_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int mgag200_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver mgag200_bo_driver = {
+ .ttm_tt_create = mgag200_ttm_tt_create,
+ .ttm_tt_populate = mgag200_ttm_tt_populate,
+ .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
+ .init_mem_type = mgag200_bo_init_mem_type,
+ .evict_flags = mgag200_bo_evict_flags,
+ .move = mgag200_bo_move,
+ .verify_access = mgag200_bo_verify_access,
+ .io_mem_reserve = &mgag200_ttm_io_mem_reserve,
+ .io_mem_free = &mgag200_ttm_io_mem_free,
+};
+
+int mgag200_mm_init(struct mga_device *mdev)
+{
+ int ret;
+ struct drm_device *dev = mdev->dev;
+ struct ttm_bo_device *bdev = &mdev->ttm.bdev;
+
+ ret = mgag200_ttm_global_init(mdev);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&mdev->ttm.bdev,
+ mdev->ttm.bo_global_ref.ref.object,
+ &mgag200_bo_driver, DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, mdev->mc.vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ mdev->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0),
+ DRM_MTRR_WC);
+
+ return 0;
+}
+
+void mgag200_mm_fini(struct mga_device *mdev)
+{
+ struct drm_device *dev = mdev->dev;
+ ttm_bo_device_release(&mdev->ttm.bdev);
+
+ mgag200_ttm_global_release(mdev);
+
+ if (mdev->fb_mtrr >= 0) {
+ drm_mtrr_del(mdev->fb_mtrr,
+ pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+ mdev->fb_mtrr = -1;
+ }
+}
+
+void mgag200_ttm_placement(struct mgag200_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+void mgag200_bo_unreserve(struct mgag200_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+int mgag200_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct mgag200_bo **pmgabo)
+{
+ struct mga_device *mdev = dev->dev_private;
+ struct mgag200_bo *mgabo;
+ size_t acc_size;
+ int ret;
+
+ mgabo = kzalloc(sizeof(struct mgag200_bo), GFP_KERNEL);
+ if (!mgabo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &mgabo->gem, size);
+ if (ret) {
+ kfree(mgabo);
+ return ret;
+ }
+
+ mgabo->gem.driver_private = NULL;
+ mgabo->bo.bdev = &mdev->ttm.bdev;
+
+ mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&mdev->ttm.bdev, size,
+ sizeof(struct mgag200_bo));
+
+ ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
+ ttm_bo_type_device, &mgabo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ NULL, mgag200_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pmgabo = mgabo;
+ return 0;
+}
+
+static inline u64 mgag200_bo_gpu_offset(struct mgag200_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = mgag200_bo_gpu_offset(bo);
+ }
+
+ mgag200_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = mgag200_bo_gpu_offset(bo);
+ return 0;
+}
+
+int mgag200_bo_unpin(struct mgag200_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int mgag200_bo_push_sysram(struct mgag200_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+int mgag200_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct mga_device *mdev;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ mdev = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &mdev->ttm.bdev);
+}
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 1a2ad7eb1734..fe5267d06ab5 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -16,10 +16,13 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \
nv50_fb.o nvc0_fb.o \
- nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
+ nv04_fifo.o nv10_fifo.o nv17_fifo.o nv40_fifo.o nv50_fifo.o \
+ nv84_fifo.o nvc0_fifo.o nve0_fifo.o \
+ nv04_fence.o nv10_fence.o nv84_fence.o nvc0_fence.o \
+ nv04_software.o nv50_software.o nvc0_software.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
- nv40_graph.o nv50_graph.o nvc0_graph.o \
- nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
+ nv40_graph.o nv50_graph.o nvc0_graph.o nve0_graph.o \
+ nv40_grctx.o nv50_grctx.o nvc0_grctx.o nve0_grctx.o \
nv84_crypt.o nv98_crypt.o \
nva3_copy.o nvc0_copy.o \
nv31_mpeg.o nv50_mpeg.o \
@@ -37,7 +40,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv50_calc.o \
nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \
nv50_vram.o nvc0_vram.o \
- nv50_vm.o nvc0_vm.o
+ nv50_vm.o nvc0_vm.o nouveau_prime.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 284bd25d5d21..fc841e87b343 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -338,7 +338,8 @@ void nouveau_switcheroo_optimus_dsm(void)
void nouveau_unregister_dsm_handler(void)
{
- vga_switcheroo_unregister_handler();
+ if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected)
+ vga_switcheroo_unregister_handler();
}
/* retrieve the ROM in 4k blocks */
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0be4a815e706..2f11e16a81a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -30,6 +30,7 @@
#include "nouveau_gpio.h"
#include <linux/io-mapping.h>
+#include <linux/firmware.h>
/* these defines are made up */
#define NV_CIO_CRE_44_HEADA 0x0
@@ -195,35 +196,24 @@ static void
bios_shadow_acpi(struct nvbios *bios)
{
struct pci_dev *pdev = bios->dev->pdev;
- int ptr, len, ret;
- u8 data[3];
+ int cnt = 65536 / ROM_BIOS_PAGE;
+ int ret;
if (!nouveau_acpi_rom_supported(pdev))
return;
- ret = nouveau_acpi_get_bios_chunk(data, 0, sizeof(data));
- if (ret != sizeof(data))
- return;
-
- bios->length = min(data[2] * 512, 65536);
- bios->data = kmalloc(bios->length, GFP_KERNEL);
+ bios->data = kmalloc(cnt * ROM_BIOS_PAGE, GFP_KERNEL);
if (!bios->data)
return;
- len = bios->length;
- ptr = 0;
- while (len) {
- int size = (len > ROM_BIOS_PAGE) ? ROM_BIOS_PAGE : len;
-
- ret = nouveau_acpi_get_bios_chunk(bios->data, ptr, size);
- if (ret != size) {
- kfree(bios->data);
- bios->data = NULL;
+ bios->length = 0;
+ while (cnt--) {
+ ret = nouveau_acpi_get_bios_chunk(bios->data, bios->length,
+ ROM_BIOS_PAGE);
+ if (ret != ROM_BIOS_PAGE)
return;
- }
- len -= size;
- ptr += size;
+ bios->length += ROM_BIOS_PAGE;
}
}
@@ -249,8 +239,12 @@ bios_shadow(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvbios *bios = &dev_priv->vbios;
struct methods *mthd, *best;
+ const struct firmware *fw;
+ char fname[32];
+ int ret;
if (nouveau_vbios) {
+ /* try to match one of the built-in methods */
mthd = shadow_methods;
do {
if (strcasecmp(nouveau_vbios, mthd->desc))
@@ -263,6 +257,22 @@ bios_shadow(struct drm_device *dev)
return true;
} while ((++mthd)->shadow);
+ /* attempt to load firmware image */
+ snprintf(fname, sizeof(fname), "nouveau/%s", nouveau_vbios);
+ ret = request_firmware(&fw, fname, &dev->pdev->dev);
+ if (ret == 0) {
+ bios->length = fw->size;
+ bios->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ release_firmware(fw);
+
+ NV_INFO(dev, "VBIOS image: %s\n", nouveau_vbios);
+ if (score_vbios(bios, 1))
+ return true;
+
+ kfree(bios->data);
+ bios->data = NULL;
+ }
+
NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
}
@@ -273,6 +283,7 @@ bios_shadow(struct drm_device *dev)
mthd->score = score_vbios(bios, mthd->rw);
mthd->size = bios->length;
mthd->data = bios->data;
+ bios->data = NULL;
} while (mthd->score != 3 && (++mthd)->shadow);
mthd = shadow_methods;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7d15a774f9c9..7f80ed523562 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -35,6 +35,8 @@
#include "nouveau_dma.h"
#include "nouveau_mm.h"
#include "nouveau_vm.h"
+#include "nouveau_fence.h"
+#include "nouveau_ramht.h"
#include <linux/log2.h>
#include <linux/slab.h>
@@ -89,12 +91,17 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
int
nouveau_bo_new(struct drm_device *dev, int size, int align,
uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
+ struct sg_table *sg,
struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
size_t acc_size;
int ret;
+ int type = ttm_bo_type_device;
+
+ if (sg)
+ type = ttm_bo_type_sg;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
@@ -120,8 +127,8 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
sizeof(struct nouveau_bo));
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
- ttm_bo_type_device, &nvbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ type, &nvbo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -473,7 +480,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
struct nouveau_fence *fence = NULL;
int ret;
- ret = nouveau_fence_new(chan, &fence, true);
+ ret = nouveau_fence_new(chan, &fence);
if (ret)
return ret;
@@ -484,6 +491,76 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
}
static int
+nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_mem *node = old_mem->mm_node;
+ int ret = RING_SPACE(chan, 10);
+ if (ret == 0) {
+ BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
+ OUT_RING (chan, upper_32_bits(node->vma[0].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[0].offset));
+ OUT_RING (chan, upper_32_bits(node->vma[1].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[1].offset));
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, new_mem->num_pages);
+ BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
+ }
+ return ret;
+}
+
+static int
+nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+ int ret = RING_SPACE(chan, 2);
+ if (ret == 0) {
+ BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle);
+ }
+ return ret;
+}
+
+static int
+nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_mem *node = old_mem->mm_node;
+ u64 src_offset = node->vma[0].offset;
+ u64 dst_offset = node->vma[1].offset;
+ u32 page_count = new_mem->num_pages;
+ int ret;
+
+ page_count = new_mem->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 8191) ? 8191 : page_count;
+
+ ret = RING_SPACE(chan, 11);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
+ OUT_RING (chan, upper_32_bits(src_offset));
+ OUT_RING (chan, lower_32_bits(src_offset));
+ OUT_RING (chan, upper_32_bits(dst_offset));
+ OUT_RING (chan, lower_32_bits(dst_offset));
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, line_count);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
+ OUT_RING (chan, 0x00000110);
+
+ page_count -= line_count;
+ src_offset += (PAGE_SIZE * line_count);
+ dst_offset += (PAGE_SIZE * line_count);
+ }
+
+ return 0;
+}
+
+static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
@@ -501,17 +578,17 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
OUT_RING (chan, upper_32_bits(dst_offset));
OUT_RING (chan, lower_32_bits(dst_offset));
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
+ BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
OUT_RING (chan, upper_32_bits(src_offset));
OUT_RING (chan, lower_32_bits(src_offset));
OUT_RING (chan, PAGE_SIZE); /* src_pitch */
OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
OUT_RING (chan, PAGE_SIZE); /* line_length */
OUT_RING (chan, line_count);
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
OUT_RING (chan, 0x00100110);
page_count -= line_count;
@@ -523,6 +600,102 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
}
static int
+nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_mem *node = old_mem->mm_node;
+ u64 src_offset = node->vma[0].offset;
+ u64 dst_offset = node->vma[1].offset;
+ u32 page_count = new_mem->num_pages;
+ int ret;
+
+ page_count = new_mem->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 8191) ? 8191 : page_count;
+
+ ret = RING_SPACE(chan, 11);
+ if (ret)
+ return ret;
+
+ BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
+ OUT_RING (chan, upper_32_bits(src_offset));
+ OUT_RING (chan, lower_32_bits(src_offset));
+ OUT_RING (chan, upper_32_bits(dst_offset));
+ OUT_RING (chan, lower_32_bits(dst_offset));
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, line_count);
+ BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
+ OUT_RING (chan, 0x00000110);
+
+ page_count -= line_count;
+ src_offset += (PAGE_SIZE * line_count);
+ dst_offset += (PAGE_SIZE * line_count);
+ }
+
+ return 0;
+}
+
+static int
+nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_mem *node = old_mem->mm_node;
+ int ret = RING_SPACE(chan, 7);
+ if (ret == 0) {
+ BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
+ OUT_RING (chan, upper_32_bits(node->vma[0].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[0].offset));
+ OUT_RING (chan, upper_32_bits(node->vma[1].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[1].offset));
+ OUT_RING (chan, 0x00000000 /* COPY */);
+ OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
+ }
+ return ret;
+}
+
+static int
+nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_mem *node = old_mem->mm_node;
+ int ret = RING_SPACE(chan, 7);
+ if (ret == 0) {
+ BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
+ OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
+ OUT_RING (chan, upper_32_bits(node->vma[0].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[0].offset));
+ OUT_RING (chan, upper_32_bits(node->vma[1].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[1].offset));
+ OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
+ }
+ return ret;
+}
+
+static int
+nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+ int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
+ &chan->m2mf_ntfy);
+ if (ret == 0) {
+ ret = RING_SPACE(chan, 6);
+ if (ret == 0) {
+ BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle);
+ BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
+ OUT_RING (chan, NvNotify0);
+ OUT_RING (chan, NvDmaFB);
+ OUT_RING (chan, NvDmaFB);
+ } else {
+ nouveau_ramht_remove(chan, NvNotify0);
+ }
+ }
+
+ return ret;
+}
+
+static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
@@ -546,7 +719,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
+ BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
OUT_RING (chan, stride);
@@ -559,7 +732,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
+ BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
OUT_RING (chan, 1);
}
if (old_mem->mem_type == TTM_PL_VRAM &&
@@ -568,7 +741,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
+ BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
OUT_RING (chan, stride);
@@ -581,7 +754,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
+ BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
OUT_RING (chan, 1);
}
@@ -589,10 +762,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
+ BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
OUT_RING (chan, upper_32_bits(src_offset));
OUT_RING (chan, upper_32_bits(dst_offset));
- BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
+ BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
OUT_RING (chan, lower_32_bits(src_offset));
OUT_RING (chan, lower_32_bits(dst_offset));
OUT_RING (chan, stride);
@@ -601,7 +774,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
OUT_RING (chan, height);
OUT_RING (chan, 0x00000101);
OUT_RING (chan, 0x00000000);
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+ BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
OUT_RING (chan, 0);
length -= amount;
@@ -612,6 +785,24 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
return 0;
}
+static int
+nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+ int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
+ &chan->m2mf_ntfy);
+ if (ret == 0) {
+ ret = RING_SPACE(chan, 4);
+ if (ret == 0) {
+ BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle);
+ BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
+ OUT_RING (chan, NvNotify0);
+ }
+ }
+
+ return ret;
+}
+
static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
struct nouveau_channel *chan, struct ttm_mem_reg *mem)
@@ -634,7 +825,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
+ BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
@@ -646,7 +837,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF,
+ BEGIN_NV04(chan, NvSubCopy,
NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
OUT_RING (chan, src_offset);
OUT_RING (chan, dst_offset);
@@ -656,7 +847,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
OUT_RING (chan, line_count);
OUT_RING (chan, 0x00000101);
OUT_RING (chan, 0x00000000);
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+ BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
OUT_RING (chan, 0);
page_count -= line_count;
@@ -716,13 +907,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
goto out;
}
- if (dev_priv->card_type < NV_50)
- ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
- else
- if (dev_priv->card_type < NV_C0)
- ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
- else
- ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
+ ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem);
if (ret == 0) {
ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
no_wait_reserve,
@@ -734,6 +919,49 @@ out:
return ret;
}
+void
+nouveau_bo_move_init(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ static const struct {
+ const char *name;
+ int engine;
+ u32 oclass;
+ int (*exec)(struct nouveau_channel *,
+ struct ttm_buffer_object *,
+ struct ttm_mem_reg *, struct ttm_mem_reg *);
+ int (*init)(struct nouveau_channel *, u32 handle);
+ } _methods[] = {
+ { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
+ { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
+ { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
+ { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
+ { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
+ {},
+ { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
+ }, *mthd = _methods;
+ const char *name = "CPU";
+ int ret;
+
+ do {
+ u32 handle = (mthd->engine << 16) | mthd->oclass;
+ ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass);
+ if (ret == 0) {
+ ret = mthd->init(chan, handle);
+ if (ret == 0) {
+ dev_priv->ttm.move = mthd->exec;
+ name = mthd->name;
+ break;
+ }
+ }
+ } while ((++mthd)->exec);
+
+ NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name);
+}
+
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
@@ -817,9 +1045,14 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
} else
if (new_mem && new_mem->mem_type == TTM_PL_TT &&
nvbo->page_shift == vma->vm->spg_shift) {
- nouveau_vm_map_sg(vma, 0, new_mem->
- num_pages << PAGE_SHIFT,
- new_mem->mm_node);
+ if (((struct nouveau_mem *)new_mem->mm_node)->sg)
+ nouveau_vm_map_sg_table(vma, 0, new_mem->
+ num_pages << PAGE_SHIFT,
+ new_mem->mm_node);
+ else
+ nouveau_vm_map_sg(vma, 0, new_mem->
+ num_pages << PAGE_SHIFT,
+ new_mem->mm_node);
} else {
nouveau_vm_unmap(vma);
}
@@ -885,8 +1118,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
goto out;
}
- /* Software copy if the card isn't up and running yet. */
- if (!dev_priv->channel) {
+ /* CPU copy if we have no accelerated method available */
+ if (!dev_priv->ttm.move) {
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
goto out;
}
@@ -1030,26 +1263,10 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->placement.fpfn = 0;
nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
- nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
+ nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
return nouveau_bo_validate(nvbo, false, true, false);
}
-void
-nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
-{
- struct nouveau_fence *old_fence;
-
- if (likely(fence))
- nouveau_fence_ref(fence);
-
- spin_lock(&nvbo->bo.bdev->fence_lock);
- old_fence = nvbo->bo.sync_obj;
- nvbo->bo.sync_obj = fence;
- spin_unlock(&nvbo->bo.bdev->fence_lock);
-
- nouveau_fence_unref(&old_fence);
-}
-
static int
nouveau_ttm_tt_populate(struct ttm_tt *ttm)
{
@@ -1058,10 +1275,19 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
struct drm_device *dev;
unsigned i;
int r;
+ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
return 0;
+ if (slave && ttm->sg) {
+ /* make userspace faulting work */
+ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+ ttm_dma->dma_address, ttm->num_pages);
+ ttm->state = tt_unbound;
+ return 0;
+ }
+
dev_priv = nouveau_bdev(ttm->bdev);
dev = dev_priv->dev;
@@ -1106,6 +1332,10 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
struct drm_nouveau_private *dev_priv;
struct drm_device *dev;
unsigned i;
+ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+ if (slave)
+ return;
dev_priv = nouveau_bdev(ttm->bdev);
dev = dev_priv->dev;
@@ -1134,6 +1364,52 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
ttm_pool_unpopulate(ttm);
}
+void
+nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
+{
+ struct nouveau_fence *old_fence = NULL;
+
+ if (likely(fence))
+ nouveau_fence_ref(fence);
+
+ spin_lock(&nvbo->bo.bdev->fence_lock);
+ old_fence = nvbo->bo.sync_obj;
+ nvbo->bo.sync_obj = fence;
+ spin_unlock(&nvbo->bo.bdev->fence_lock);
+
+ nouveau_fence_unref(&old_fence);
+}
+
+static void
+nouveau_bo_fence_unref(void **sync_obj)
+{
+ nouveau_fence_unref((struct nouveau_fence **)sync_obj);
+}
+
+static void *
+nouveau_bo_fence_ref(void *sync_obj)
+{
+ return nouveau_fence_ref(sync_obj);
+}
+
+static bool
+nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
+{
+ return nouveau_fence_done(sync_obj);
+}
+
+static int
+nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+{
+ return nouveau_fence_wait(sync_obj, lazy, intr);
+}
+
+static int
+nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
+{
+ return 0;
+}
+
struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate,
@@ -1144,11 +1420,11 @@ struct ttm_bo_driver nouveau_bo_driver = {
.move_notify = nouveau_bo_move_ntfy,
.move = nouveau_bo_move,
.verify_access = nouveau_bo_verify_access,
- .sync_obj_signaled = __nouveau_fence_signalled,
- .sync_obj_wait = __nouveau_fence_wait,
- .sync_obj_flush = __nouveau_fence_flush,
- .sync_obj_unref = __nouveau_fence_unref,
- .sync_obj_ref = __nouveau_fence_ref,
+ .sync_obj_signaled = nouveau_bo_fence_signalled,
+ .sync_obj_wait = nouveau_bo_fence_wait,
+ .sync_obj_flush = nouveau_bo_fence_flush,
+ .sync_obj_unref = nouveau_bo_fence_unref,
+ .sync_obj_ref = nouveau_bo_fence_ref,
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
@@ -1181,9 +1457,12 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
- else
- if (nvbo->bo.mem.mem_type == TTM_PL_TT)
- nouveau_vm_map_sg(vma, 0, size, node);
+ else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
+ if (node->sg)
+ nouveau_vm_map_sg_table(vma, 0, size, node);
+ else
+ nouveau_vm_map_sg(vma, 0, size, node);
+ }
list_add_tail(&vma->head, &nvbo->vma_list);
vma->refcount = 1;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 846afb0bfef4..629d8a2df5bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -27,7 +27,10 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_dma.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+#include "nouveau_software.h"
static int
nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
@@ -38,7 +41,7 @@ nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
int ret;
/* allocate buffer object */
- ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, &chan->pushbuf_bo);
+ ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, NULL, &chan->pushbuf_bo);
if (ret)
goto out;
@@ -117,8 +120,9 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
struct drm_file *file_priv,
uint32_t vram_handle, uint32_t gart_handle)
{
+ struct nouveau_exec_engine *fence = nv_engine(dev, NVOBJ_ENGINE_FENCE);
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_channel *chan;
unsigned long flags;
@@ -155,10 +159,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
}
NV_DEBUG(dev, "initialising channel %d\n", chan->id);
- INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
- INIT_LIST_HEAD(&chan->nvsw.flip);
- INIT_LIST_HEAD(&chan->fence.pending);
- spin_lock_init(&chan->fence.lock);
/* setup channel's memory and vm */
ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
@@ -188,20 +188,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
chan->user_put = 0x40;
chan->user_get = 0x44;
if (dev_priv->card_type >= NV_50)
- chan->user_get_hi = 0x60;
+ chan->user_get_hi = 0x60;
- /* disable the fifo caches */
- pfifo->reassign(dev, false);
-
- /* Construct initial RAMFC for new channel */
- ret = pfifo->create_context(chan);
+ /* create fifo context */
+ ret = pfifo->base.context_new(chan, NVOBJ_ENGINE_FIFO);
if (ret) {
nouveau_channel_put(&chan);
return ret;
}
- pfifo->reassign(dev, true);
-
/* Insert NOPs for NOUVEAU_DMA_SKIPS */
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
if (ret) {
@@ -211,9 +206,28 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
OUT_RING (chan, 0x00000000);
+
+ ret = nouveau_gpuobj_gr_new(chan, NvSw, nouveau_software_class(dev));
+ if (ret) {
+ nouveau_channel_put(&chan);
+ return ret;
+ }
+
+ if (dev_priv->card_type < NV_C0) {
+ ret = RING_SPACE(chan, 2);
+ if (ret) {
+ nouveau_channel_put(&chan);
+ return ret;
+ }
+
+ BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
+ OUT_RING (chan, NvSw);
+ FIRE_RING (chan);
+ }
+
FIRE_RING(chan);
- ret = nouveau_fence_channel_init(chan);
+ ret = fence->context_new(chan, NVOBJ_ENGINE_FENCE);
if (ret) {
nouveau_channel_put(&chan);
return ret;
@@ -268,7 +282,6 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
struct nouveau_channel *chan = *pchan;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
unsigned long flags;
int i;
@@ -285,24 +298,12 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
/* give it chance to idle */
nouveau_channel_idle(chan);
- /* ensure all outstanding fences are signaled. they should be if the
- * above attempts at idling were OK, but if we failed this'll tell TTM
- * we're done with the buffers.
- */
- nouveau_fence_channel_fini(chan);
-
- /* boot it off the hardware */
- pfifo->reassign(dev, false);
-
/* destroy the engine specific contexts */
- pfifo->destroy_context(chan);
- for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
+ for (i = NVOBJ_ENGINE_NR - 1; i >= 0; i--) {
if (chan->engctx[i])
dev_priv->eng[i]->context_del(chan, i);
}
- pfifo->reassign(dev, true);
-
/* aside from its resources, the channel should now be dead,
* remove it from the channel list
*/
@@ -354,38 +355,37 @@ nouveau_channel_ref(struct nouveau_channel *chan,
*pchan = chan;
}
-void
+int
nouveau_channel_idle(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct nouveau_fence *fence = NULL;
int ret;
- nouveau_fence_update(chan);
-
- if (chan->fence.sequence != chan->fence.sequence_ack) {
- ret = nouveau_fence_new(chan, &fence, true);
- if (!ret) {
- ret = nouveau_fence_wait(fence, false, false);
- nouveau_fence_unref(&fence);
- }
-
- if (ret)
- NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+ ret = nouveau_fence_new(chan, &fence);
+ if (!ret) {
+ ret = nouveau_fence_wait(fence, false, false);
+ nouveau_fence_unref(&fence);
}
+
+ if (ret)
+ NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+ return ret;
}
/* cleans up all the fifos from file_priv */
void
nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_channel *chan;
int i;
+ if (!pfifo)
+ return;
+
NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
- for (i = 0; i < engine->fifo.channels; i++) {
+ for (i = 0; i < pfifo->channels; i++) {
chan = nouveau_channel_get(file_priv, i);
if (IS_ERR(chan))
continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index fa860358add1..7b11edb077d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -654,7 +654,13 @@ nouveau_connector_detect_depth(struct drm_connector *connector)
if (nv_connector->edid && connector->display_info.bpc)
return;
- /* if not, we're out of options unless we're LVDS, default to 8bpc */
+ /* EDID 1.4 is *supposed* to be supported on eDP, but, Apple... */
+ if (nv_connector->type == DCB_CONNECTOR_eDP) {
+ connector->display_info.bpc = 6;
+ return;
+ }
+
+ /* we're out of options unless we're LVDS, default to 8bpc */
if (nv_encoder->dcb->type != OUTPUT_LVDS) {
connector->display_info.bpc = 8;
return;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index fa2ec491f6a7..188c92b327e2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -67,8 +67,6 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
nvchan_rd32(chan, 0x8c));
}
- seq_printf(m, "last fence : %d\n", chan->fence.sequence);
- seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index a85e112863d1..69688ef5cf46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -33,7 +33,9 @@
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
#include "nouveau_connector.h"
+#include "nouveau_software.h"
#include "nouveau_gpio.h"
+#include "nouveau_fence.h"
#include "nv50_display.h"
static void
@@ -300,7 +302,7 @@ nouveau_display_create(struct drm_device *dev)
disp->color_vibrance_property->values[1] = 200; /* -100..+100 */
}
- dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+ dev->mode_config.funcs = &nouveau_mode_config_funcs;
dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
dev->mode_config.min_width = 0;
@@ -325,14 +327,21 @@ nouveau_display_create(struct drm_device *dev)
ret = disp->create(dev);
if (ret)
- return ret;
+ goto disp_create_err;
if (dev->mode_config.num_crtc) {
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret)
- return ret;
+ goto vblank_err;
}
+ return 0;
+
+vblank_err:
+ disp->destroy(dev);
+disp_create_err:
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
return ret;
}
@@ -425,6 +434,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
struct nouveau_page_flip_state *s,
struct nouveau_fence **pfence)
{
+ struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW];
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
unsigned long flags;
@@ -432,7 +442,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
/* Queue it to the pending list */
spin_lock_irqsave(&dev->event_lock, flags);
- list_add_tail(&s->head, &chan->nvsw.flip);
+ list_add_tail(&s->head, &swch->flip);
spin_unlock_irqrestore(&dev->event_lock, flags);
/* Synchronize with the old framebuffer */
@@ -446,17 +456,17 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
goto fail;
if (dev_priv->card_type < NV_C0) {
- BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+ BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
OUT_RING (chan, 0x00000000);
OUT_RING (chan, 0x00000000);
} else {
- BEGIN_NVC0(chan, 2, 0, NV10_SUBCHAN_REF_CNT, 1);
- OUT_RING (chan, ++chan->fence.sequence);
- BEGIN_NVC0(chan, 8, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
+ BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
+ OUT_RING (chan, 0);
+ BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
}
FIRE_RING (chan);
- ret = nouveau_fence_new(chan, pfence, true);
+ ret = nouveau_fence_new(chan, pfence);
if (ret)
goto fail;
@@ -477,7 +487,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
struct nouveau_page_flip_state *s;
- struct nouveau_channel *chan;
+ struct nouveau_channel *chan = NULL;
struct nouveau_fence *fence;
int ret;
@@ -500,7 +510,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
new_bo->bo.offset };
/* Choose the channel the flip will be handled in */
- chan = nouveau_fence_channel(new_bo->bo.sync_obj);
+ fence = new_bo->bo.sync_obj;
+ if (fence)
+ chan = nouveau_channel_get_unlocked(fence->channel);
if (!chan)
chan = nouveau_channel_get_unlocked(dev_priv->channel);
mutex_lock(&chan->mutex);
@@ -540,20 +552,20 @@ int
nouveau_finish_page_flip(struct nouveau_channel *chan,
struct nouveau_page_flip_state *ps)
{
+ struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW];
struct drm_device *dev = chan->dev;
struct nouveau_page_flip_state *s;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
- if (list_empty(&chan->nvsw.flip)) {
+ if (list_empty(&swch->flip)) {
NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
spin_unlock_irqrestore(&dev->event_lock, flags);
return -EINVAL;
}
- s = list_first_entry(&chan->nvsw.flip,
- struct nouveau_page_flip_state, head);
+ s = list_first_entry(&swch->flip, struct nouveau_page_flip_state, head);
if (s->event) {
struct drm_pending_vblank_event *e = s->event;
struct timeval now;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 23d4edf992b7..8db68be9544f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -48,12 +48,12 @@ void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
/* Hardcoded object assignments to subchannels (subchannel id). */
enum {
- NvSubM2MF = 0,
+ NvSubCtxSurf2D = 0,
NvSubSw = 1,
- NvSub2D = 2,
- NvSubCtxSurf2D = 2,
+ NvSubImageBlit = 2,
+ NvSub2D = 3,
NvSubGdiRect = 3,
- NvSubImageBlit = 4
+ NvSubCopy = 4,
};
/* Object handles. */
@@ -73,6 +73,7 @@ enum {
NvSema = 0x8000000f,
NvEvoSema0 = 0x80000010,
NvEvoSema1 = 0x80000011,
+ NvNotify1 = 0x80000012,
/* G80+ display objects */
NvEvoVRAM = 0x01000000,
@@ -127,15 +128,33 @@ extern void
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
static inline void
-BEGIN_NVC0(struct nouveau_channel *chan, int op, int subc, int mthd, int size)
+BEGIN_NV04(struct nouveau_channel *chan, int subc, int mthd, int size)
{
- OUT_RING(chan, (op << 28) | (size << 16) | (subc << 13) | (mthd >> 2));
+ OUT_RING(chan, 0x00000000 | (subc << 13) | (size << 18) | mthd);
}
static inline void
-BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
+BEGIN_NI04(struct nouveau_channel *chan, int subc, int mthd, int size)
{
- OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
+ OUT_RING(chan, 0x40000000 | (subc << 13) | (size << 18) | mthd);
+}
+
+static inline void
+BEGIN_NVC0(struct nouveau_channel *chan, int subc, int mthd, int size)
+{
+ OUT_RING(chan, 0x20000000 | (size << 16) | (subc << 13) | (mthd >> 2));
+}
+
+static inline void
+BEGIN_NIC0(struct nouveau_channel *chan, int subc, int mthd, int size)
+{
+ OUT_RING(chan, 0x60000000 | (size << 16) | (subc << 13) | (mthd >> 2));
+}
+
+static inline void
+BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
+{
+ OUT_RING(chan, 0x80000000 | (data << 16) | (subc << 13) | (mthd >> 2));
}
#define WRITE_PUT(val) do { \
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index d996134b1b28..7e289d2ad8e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -510,6 +510,25 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
nouveau_dp_link_train(encoder, datarate, func);
}
+static void
+nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_chan *auxch,
+ u8 *dpcd)
+{
+ u8 buf[3];
+
+ if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+ return;
+
+ if (!auxch_tx(dev, auxch->drive, 9, DP_SINK_OUI, buf, 3))
+ NV_DEBUG_KMS(dev, "Sink OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ if (!auxch_tx(dev, auxch->drive, 9, DP_BRANCH_OUI, buf, 3))
+ NV_DEBUG_KMS(dev, "Branch OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+}
+
bool
nouveau_dp_detect(struct drm_encoder *encoder)
{
@@ -544,6 +563,8 @@ nouveau_dp_detect(struct drm_encoder *encoder)
NV_DEBUG_KMS(dev, "maximum: %dx%d\n",
nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
+ nouveau_dp_probe_oui(dev, auxch, dpcd);
+
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 4f2030bd5676..cad254c8e387 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -33,6 +33,7 @@
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
#include "nouveau_pm.h"
+#include "nouveau_fifo.h"
#include "nv50_display.h"
#include "drm_pciids.h"
@@ -175,7 +176,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_channel *chan;
struct drm_crtc *crtc;
int ret, i, e;
@@ -214,17 +215,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
NV_INFO(dev, "Idling channels...\n");
- for (i = 0; i < pfifo->channels; i++) {
+ for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
chan = dev_priv->channels.ptr[i];
if (chan && chan->pushbuf_bo)
nouveau_channel_idle(chan);
}
- pfifo->reassign(dev, false);
- pfifo->disable(dev);
- pfifo->unload_context(dev);
-
for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
if (!dev_priv->eng[e])
continue;
@@ -265,8 +262,6 @@ out_abort:
if (dev_priv->eng[e])
dev_priv->eng[e]->init(dev, e);
}
- pfifo->enable(dev);
- pfifo->reassign(dev, true);
return ret;
}
@@ -274,6 +269,7 @@ int
nouveau_pci_resume(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
struct drm_crtc *crtc;
@@ -321,7 +317,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
if (dev_priv->eng[i])
dev_priv->eng[i]->init(dev, i);
}
- engine->fifo.init(dev);
nouveau_irq_postinstall(dev);
@@ -330,7 +325,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
struct nouveau_channel *chan;
int j;
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
chan = dev_priv->channels.ptr[i];
if (!chan || !chan->pushbuf_bo)
continue;
@@ -408,7 +403,7 @@ static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
- DRIVER_MODESET,
+ DRIVER_MODESET | DRIVER_PRIME,
.load = nouveau_load,
.firstopen = nouveau_firstopen,
.lastclose = nouveau_lastclose,
@@ -430,6 +425,12 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = nouveau_ioctls,
.fops = &nouveau_driver_fops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = nouveau_gem_prime_export,
+ .gem_prime_import = nouveau_gem_prime_import,
+
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
.gem_open_object = nouveau_gem_object_open,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 3aef353a926c..8613cb23808c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -70,7 +70,7 @@ struct nouveau_mem;
#define MAX_NUM_DCB_ENTRIES 16
-#define NOUVEAU_MAX_CHANNEL_NR 128
+#define NOUVEAU_MAX_CHANNEL_NR 4096
#define NOUVEAU_MAX_TILE_NR 15
struct nouveau_mem {
@@ -86,6 +86,7 @@ struct nouveau_mem {
u32 memtype;
u64 offset;
u64 size;
+ struct sg_table *sg;
};
struct nouveau_tile_reg {
@@ -122,6 +123,9 @@ struct nouveau_bo {
struct drm_gem_object *gem;
int pin_refcnt;
+
+ struct ttm_bo_kmap_obj dma_buf_vmap;
+ int vmapping_count;
};
#define nouveau_bo_tile_layout(nvbo) \
@@ -164,8 +168,10 @@ enum nouveau_flags {
#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG
#define NVOBJ_ENGINE_BSP 6
#define NVOBJ_ENGINE_VP 7
-#define NVOBJ_ENGINE_DISPLAY 15
+#define NVOBJ_ENGINE_FIFO 14
+#define NVOBJ_ENGINE_FENCE 15
#define NVOBJ_ENGINE_NR 16
+#define NVOBJ_ENGINE_DISPLAY (NVOBJ_ENGINE_NR + 0) /*XXX*/
#define NVOBJ_FLAG_DONT_MAP (1 << 0)
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
@@ -233,17 +239,6 @@ struct nouveau_channel {
uint32_t user_get_hi;
uint32_t user_put;
- /* Fencing */
- struct {
- /* lock protects the pending list only */
- spinlock_t lock;
- struct list_head pending;
- uint32_t sequence;
- uint32_t sequence_ack;
- atomic_t last_sequence_irq;
- struct nouveau_vma vma;
- } fence;
-
/* DMA push buffer */
struct nouveau_gpuobj *pushbuf;
struct nouveau_bo *pushbuf_bo;
@@ -257,8 +252,6 @@ struct nouveau_channel {
/* PFIFO context */
struct nouveau_gpuobj *ramfc;
- struct nouveau_gpuobj *cache;
- void *fifo_priv;
/* Execution engine contexts */
void *engctx[NVOBJ_ENGINE_NR];
@@ -292,18 +285,6 @@ struct nouveau_channel {
int ib_put;
} dma;
- uint32_t sw_subchannel[8];
-
- struct nouveau_vma dispc_vma[4];
- struct {
- struct nouveau_gpuobj *vblsem;
- uint32_t vblsem_head;
- uint32_t vblsem_offset;
- uint32_t vblsem_rval;
- struct list_head vbl_wait;
- struct list_head flip;
- } nvsw;
-
struct {
bool active;
char name[32];
@@ -366,30 +347,6 @@ struct nouveau_fb_engine {
void (*free_tile_region)(struct drm_device *dev, int i);
};
-struct nouveau_fifo_engine {
- void *priv;
- int channels;
-
- struct nouveau_gpuobj *playlist[2];
- int cur_playlist;
-
- int (*init)(struct drm_device *);
- void (*takedown)(struct drm_device *);
-
- void (*disable)(struct drm_device *);
- void (*enable)(struct drm_device *);
- bool (*reassign)(struct drm_device *, bool enable);
- bool (*cache_pull)(struct drm_device *dev, bool enable);
-
- int (*channel_id)(struct drm_device *);
-
- int (*create_context)(struct nouveau_channel *);
- void (*destroy_context)(struct nouveau_channel *);
- int (*load_context)(struct nouveau_channel *);
- int (*unload_context)(struct drm_device *);
- void (*tlb_flush)(struct drm_device *dev);
-};
-
struct nouveau_display_engine {
void *priv;
int (*early_init)(struct drm_device *);
@@ -597,7 +554,6 @@ struct nouveau_engine {
struct nouveau_mc_engine mc;
struct nouveau_timer_engine timer;
struct nouveau_fb_engine fb;
- struct nouveau_fifo_engine fifo;
struct nouveau_display_engine display;
struct nouveau_gpio_engine gpio;
struct nouveau_pm_engine pm;
@@ -740,6 +696,9 @@ struct drm_nouveau_private {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
atomic_t validate_sequence;
+ int (*move)(struct nouveau_channel *,
+ struct ttm_buffer_object *,
+ struct ttm_mem_reg *, struct ttm_mem_reg *);
} ttm;
struct {
@@ -977,7 +936,7 @@ extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
extern void nouveau_channel_put(struct nouveau_channel **);
extern void nouveau_channel_ref(struct nouveau_channel *chan,
struct nouveau_channel **pchan);
-extern void nouveau_channel_idle(struct nouveau_channel *chan);
+extern int nouveau_channel_idle(struct nouveau_channel *chan);
/* nouveau_object.c */
#define NVOBJ_ENGINE_ADD(d, e, p) do { \
@@ -1209,56 +1168,6 @@ extern void nv50_fb_vm_trap(struct drm_device *, int display);
extern int nvc0_fb_init(struct drm_device *);
extern void nvc0_fb_takedown(struct drm_device *);
-/* nv04_fifo.c */
-extern int nv04_fifo_init(struct drm_device *);
-extern void nv04_fifo_fini(struct drm_device *);
-extern void nv04_fifo_disable(struct drm_device *);
-extern void nv04_fifo_enable(struct drm_device *);
-extern bool nv04_fifo_reassign(struct drm_device *, bool);
-extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
-extern int nv04_fifo_channel_id(struct drm_device *);
-extern int nv04_fifo_create_context(struct nouveau_channel *);
-extern void nv04_fifo_destroy_context(struct nouveau_channel *);
-extern int nv04_fifo_load_context(struct nouveau_channel *);
-extern int nv04_fifo_unload_context(struct drm_device *);
-extern void nv04_fifo_isr(struct drm_device *);
-
-/* nv10_fifo.c */
-extern int nv10_fifo_init(struct drm_device *);
-extern int nv10_fifo_channel_id(struct drm_device *);
-extern int nv10_fifo_create_context(struct nouveau_channel *);
-extern int nv10_fifo_load_context(struct nouveau_channel *);
-extern int nv10_fifo_unload_context(struct drm_device *);
-
-/* nv40_fifo.c */
-extern int nv40_fifo_init(struct drm_device *);
-extern int nv40_fifo_create_context(struct nouveau_channel *);
-extern int nv40_fifo_load_context(struct nouveau_channel *);
-extern int nv40_fifo_unload_context(struct drm_device *);
-
-/* nv50_fifo.c */
-extern int nv50_fifo_init(struct drm_device *);
-extern void nv50_fifo_takedown(struct drm_device *);
-extern int nv50_fifo_channel_id(struct drm_device *);
-extern int nv50_fifo_create_context(struct nouveau_channel *);
-extern void nv50_fifo_destroy_context(struct nouveau_channel *);
-extern int nv50_fifo_load_context(struct nouveau_channel *);
-extern int nv50_fifo_unload_context(struct drm_device *);
-extern void nv50_fifo_tlb_flush(struct drm_device *dev);
-
-/* nvc0_fifo.c */
-extern int nvc0_fifo_init(struct drm_device *);
-extern void nvc0_fifo_takedown(struct drm_device *);
-extern void nvc0_fifo_disable(struct drm_device *);
-extern void nvc0_fifo_enable(struct drm_device *);
-extern bool nvc0_fifo_reassign(struct drm_device *, bool);
-extern bool nvc0_fifo_cache_pull(struct drm_device *, bool);
-extern int nvc0_fifo_channel_id(struct drm_device *);
-extern int nvc0_fifo_create_context(struct nouveau_channel *);
-extern void nvc0_fifo_destroy_context(struct nouveau_channel *);
-extern int nvc0_fifo_load_context(struct nouveau_channel *);
-extern int nvc0_fifo_unload_context(struct drm_device *);
-
/* nv04_graph.c */
extern int nv04_graph_create(struct drm_device *);
extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
@@ -1277,18 +1186,23 @@ extern int nv20_graph_create(struct drm_device *);
/* nv40_graph.c */
extern int nv40_graph_create(struct drm_device *);
-extern void nv40_grctx_init(struct nouveau_grctx *);
+extern void nv40_grctx_init(struct drm_device *, u32 *size);
+extern void nv40_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
/* nv50_graph.c */
extern int nv50_graph_create(struct drm_device *);
-extern int nv50_grctx_init(struct nouveau_grctx *);
extern struct nouveau_enum nv50_data_error_names[];
extern int nv50_graph_isr_chid(struct drm_device *dev, u64 inst);
+extern int nv50_grctx_init(struct drm_device *, u32 *, u32, u32 *, u32 *);
+extern void nv50_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
/* nvc0_graph.c */
extern int nvc0_graph_create(struct drm_device *);
extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
+/* nve0_graph.c */
+extern int nve0_graph_create(struct drm_device *);
+
/* nv84_crypt.c */
extern int nv84_crypt_create(struct drm_device *);
@@ -1414,9 +1328,12 @@ extern int nv04_crtc_create(struct drm_device *, int index);
/* nouveau_bo.c */
extern struct ttm_bo_driver nouveau_bo_driver;
+extern void nouveau_bo_move_init(struct nouveau_channel *);
extern int nouveau_bo_new(struct drm_device *, int size, int align,
uint32_t flags, uint32_t tile_mode,
- uint32_t tile_flags, struct nouveau_bo **);
+ uint32_t tile_flags,
+ struct sg_table *sg,
+ struct nouveau_bo **);
extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
extern int nouveau_bo_unpin(struct nouveau_bo *);
extern int nouveau_bo_map(struct nouveau_bo *);
@@ -1437,50 +1354,6 @@ extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
struct nouveau_vma *);
extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
-/* nouveau_fence.c */
-struct nouveau_fence;
-extern int nouveau_fence_init(struct drm_device *);
-extern void nouveau_fence_fini(struct drm_device *);
-extern int nouveau_fence_channel_init(struct nouveau_channel *);
-extern void nouveau_fence_channel_fini(struct nouveau_channel *);
-extern void nouveau_fence_update(struct nouveau_channel *);
-extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
- bool emit);
-extern int nouveau_fence_emit(struct nouveau_fence *);
-extern void nouveau_fence_work(struct nouveau_fence *fence,
- void (*work)(void *priv, bool signalled),
- void *priv);
-struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
-
-extern bool __nouveau_fence_signalled(void *obj, void *arg);
-extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
-extern int __nouveau_fence_flush(void *obj, void *arg);
-extern void __nouveau_fence_unref(void **obj);
-extern void *__nouveau_fence_ref(void *obj);
-
-static inline bool nouveau_fence_signalled(struct nouveau_fence *obj)
-{
- return __nouveau_fence_signalled(obj, NULL);
-}
-static inline int
-nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr)
-{
- return __nouveau_fence_wait(obj, NULL, lazy, intr);
-}
-extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
-static inline int nouveau_fence_flush(struct nouveau_fence *obj)
-{
- return __nouveau_fence_flush(obj, NULL);
-}
-static inline void nouveau_fence_unref(struct nouveau_fence **obj)
-{
- __nouveau_fence_unref((void **)obj);
-}
-static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
-{
- return __nouveau_fence_ref(obj);
-}
-
/* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, int size, int align,
uint32_t domain, uint32_t tile_mode,
@@ -1501,6 +1374,11 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
+extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
/* nouveau_display.c */
int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
@@ -1772,6 +1650,7 @@ nv44_graph_class(struct drm_device *dev)
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
+#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
#define NV10_SUBCHAN_REF_CNT 0x00000050
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8113e9201ed9..153b9a15469b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -153,7 +153,7 @@ nouveau_fbcon_sync(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
- int ret, i;
+ int ret;
if (!chan || !chan->accel_done || in_interrupt() ||
info->state != FBINFO_STATE_RUNNING ||
@@ -163,38 +163,8 @@ nouveau_fbcon_sync(struct fb_info *info)
if (!mutex_trylock(&chan->mutex))
return 0;
- ret = RING_SPACE(chan, 4);
- if (ret) {
- mutex_unlock(&chan->mutex);
- nouveau_fbcon_gpu_lockup(info);
- return 0;
- }
-
- if (dev_priv->card_type >= NV_C0) {
- BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
- OUT_RING (chan, 0);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
- OUT_RING (chan, 0);
- } else {
- BEGIN_RING(chan, 0, 0x0104, 1);
- OUT_RING (chan, 0);
- BEGIN_RING(chan, 0, 0x0100, 1);
- OUT_RING (chan, 0);
- }
-
- nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
- FIRE_RING(chan);
+ ret = nouveau_channel_idle(chan);
mutex_unlock(&chan->mutex);
-
- ret = -EBUSY;
- for (i = 0; i < 100000; i++) {
- if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
- ret = 0;
- break;
- }
- DRM_UDELAY(1);
- }
-
if (ret) {
nouveau_fbcon_gpu_lockup(info);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index c1dc20f6cb85..3c180493dab8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -32,220 +32,100 @@
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+#include "nouveau_software.h"
#include "nouveau_dma.h"
-#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
-#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
-
-struct nouveau_fence {
- struct nouveau_channel *channel;
- struct kref refcount;
- struct list_head entry;
-
- uint32_t sequence;
- bool signalled;
-
- void (*work)(void *priv, bool signalled);
- void *priv;
-};
-
-struct nouveau_semaphore {
- struct kref ref;
- struct drm_device *dev;
- struct drm_mm_node *mem;
-};
-
-static inline struct nouveau_fence *
-nouveau_fence(void *sync_obj)
+void
+nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
- return (struct nouveau_fence *)sync_obj;
+ struct nouveau_fence *fence, *fnext;
+ spin_lock(&fctx->lock);
+ list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
+ if (fence->work)
+ fence->work(fence->priv, false);
+ fence->channel = NULL;
+ list_del(&fence->head);
+ nouveau_fence_unref(&fence);
+ }
+ spin_unlock(&fctx->lock);
}
-static void
-nouveau_fence_del(struct kref *ref)
+void
+nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
{
- struct nouveau_fence *fence =
- container_of(ref, struct nouveau_fence, refcount);
-
- nouveau_channel_ref(NULL, &fence->channel);
- kfree(fence);
+ INIT_LIST_HEAD(&fctx->pending);
+ spin_lock_init(&fctx->lock);
}
void
nouveau_fence_update(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
- struct nouveau_fence *tmp, *fence;
- uint32_t sequence;
+ struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
+ struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+ struct nouveau_fence *fence, *fnext;
- spin_lock(&chan->fence.lock);
-
- /* Fetch the last sequence if the channel is still up and running */
- if (likely(!list_empty(&chan->fence.pending))) {
- if (USE_REFCNT(dev))
- sequence = nvchan_rd32(chan, 0x48);
- else
- sequence = atomic_read(&chan->fence.last_sequence_irq);
-
- if (chan->fence.sequence_ack == sequence)
- goto out;
- chan->fence.sequence_ack = sequence;
- }
-
- list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
- if (fence->sequence > chan->fence.sequence_ack)
+ spin_lock(&fctx->lock);
+ list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
+ if (priv->read(chan) < fence->sequence)
break;
- fence->signalled = true;
- list_del(&fence->entry);
if (fence->work)
fence->work(fence->priv, true);
-
- kref_put(&fence->refcount, nouveau_fence_del);
- }
-
-out:
- spin_unlock(&chan->fence.lock);
-}
-
-int
-nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
- bool emit)
-{
- struct nouveau_fence *fence;
- int ret = 0;
-
- fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (!fence)
- return -ENOMEM;
- kref_init(&fence->refcount);
- nouveau_channel_ref(chan, &fence->channel);
-
- if (emit)
- ret = nouveau_fence_emit(fence);
-
- if (ret)
+ fence->channel = NULL;
+ list_del(&fence->head);
nouveau_fence_unref(&fence);
- *pfence = fence;
- return ret;
-}
-
-struct nouveau_channel *
-nouveau_fence_channel(struct nouveau_fence *fence)
-{
- return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
+ }
+ spin_unlock(&fctx->lock);
}
int
-nouveau_fence_emit(struct nouveau_fence *fence)
+nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
- struct nouveau_channel *chan = fence->channel;
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
+ struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
int ret;
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
-
- if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
- nouveau_fence_update(chan);
+ fence->channel = chan;
+ fence->timeout = jiffies + (3 * DRM_HZ);
+ fence->sequence = ++fctx->sequence;
- BUG_ON(chan->fence.sequence ==
- chan->fence.sequence_ack - 1);
+ ret = priv->emit(fence);
+ if (!ret) {
+ kref_get(&fence->kref);
+ spin_lock(&fctx->lock);
+ list_add_tail(&fence->head, &fctx->pending);
+ spin_unlock(&fctx->lock);
}
- fence->sequence = ++chan->fence.sequence;
-
- kref_get(&fence->refcount);
- spin_lock(&chan->fence.lock);
- list_add_tail(&fence->entry, &chan->fence.pending);
- spin_unlock(&chan->fence.lock);
-
- if (USE_REFCNT(dev)) {
- if (dev_priv->card_type < NV_C0)
- BEGIN_RING(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
- else
- BEGIN_NVC0(chan, 2, 0, NV10_SUBCHAN_REF_CNT, 1);
- } else {
- BEGIN_RING(chan, NvSubSw, 0x0150, 1);
- }
- OUT_RING (chan, fence->sequence);
- FIRE_RING(chan);
-
- return 0;
-}
-
-void
-nouveau_fence_work(struct nouveau_fence *fence,
- void (*work)(void *priv, bool signalled),
- void *priv)
-{
- BUG_ON(fence->work);
-
- spin_lock(&fence->channel->fence.lock);
-
- if (fence->signalled) {
- work(priv, true);
- } else {
- fence->work = work;
- fence->priv = priv;
- }
-
- spin_unlock(&fence->channel->fence.lock);
-}
-
-void
-__nouveau_fence_unref(void **sync_obj)
-{
- struct nouveau_fence *fence = nouveau_fence(*sync_obj);
-
- if (fence)
- kref_put(&fence->refcount, nouveau_fence_del);
- *sync_obj = NULL;
-}
-
-void *
-__nouveau_fence_ref(void *sync_obj)
-{
- struct nouveau_fence *fence = nouveau_fence(sync_obj);
-
- kref_get(&fence->refcount);
- return sync_obj;
+ return ret;
}
bool
-__nouveau_fence_signalled(void *sync_obj, void *sync_arg)
+nouveau_fence_done(struct nouveau_fence *fence)
{
- struct nouveau_fence *fence = nouveau_fence(sync_obj);
- struct nouveau_channel *chan = fence->channel;
-
- if (fence->signalled)
- return true;
-
- nouveau_fence_update(chan);
- return fence->signalled;
+ if (fence->channel)
+ nouveau_fence_update(fence->channel);
+ return !fence->channel;
}
int
-__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
{
- unsigned long timeout = jiffies + (3 * DRM_HZ);
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
ktime_t t;
int ret = 0;
- while (1) {
- if (__nouveau_fence_signalled(sync_obj, sync_arg))
- break;
-
- if (time_after_eq(jiffies, timeout)) {
+ while (!nouveau_fence_done(fence)) {
+ if (fence->timeout && time_after_eq(jiffies, fence->timeout)) {
ret = -EBUSY;
break;
}
- __set_current_state(intr ? TASK_INTERRUPTIBLE
- : TASK_UNINTERRUPTIBLE);
+ __set_current_state(intr ? TASK_INTERRUPTIBLE :
+ TASK_UNINTERRUPTIBLE);
if (lazy) {
t = ktime_set(0, sleep_time);
schedule_hrtimeout(&t, HRTIMER_MODE_REL);
@@ -261,354 +141,72 @@ __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
}
__set_current_state(TASK_RUNNING);
-
return ret;
}
-static struct nouveau_semaphore *
-semaphore_alloc(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_semaphore *sema;
- int size = (dev_priv->chipset < 0x84) ? 4 : 16;
- int ret, i;
-
- if (!USE_SEMA(dev))
- return NULL;
-
- sema = kmalloc(sizeof(*sema), GFP_KERNEL);
- if (!sema)
- goto fail;
-
- ret = drm_mm_pre_get(&dev_priv->fence.heap);
- if (ret)
- goto fail;
-
- spin_lock(&dev_priv->fence.lock);
- sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
- if (sema->mem)
- sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
- spin_unlock(&dev_priv->fence.lock);
-
- if (!sema->mem)
- goto fail;
-
- kref_init(&sema->ref);
- sema->dev = dev;
- for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
- nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
-
- return sema;
-fail:
- kfree(sema);
- return NULL;
-}
-
-static void
-semaphore_free(struct kref *ref)
-{
- struct nouveau_semaphore *sema =
- container_of(ref, struct nouveau_semaphore, ref);
- struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
-
- spin_lock(&dev_priv->fence.lock);
- drm_mm_put_block(sema->mem);
- spin_unlock(&dev_priv->fence.lock);
-
- kfree(sema);
-}
-
-static void
-semaphore_work(void *priv, bool signalled)
-{
- struct nouveau_semaphore *sema = priv;
- struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
-
- if (unlikely(!signalled))
- nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
-
- kref_put(&sema->ref, semaphore_free);
-}
-
-static int
-semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_fence *fence = NULL;
- u64 offset = chan->fence.vma.offset + sema->mem->start;
- int ret;
-
- if (dev_priv->chipset < 0x84) {
- ret = RING_SPACE(chan, 4);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3);
- OUT_RING (chan, NvSema);
- OUT_RING (chan, offset);
- OUT_RING (chan, 1);
- } else
- if (dev_priv->chipset < 0xc0) {
- ret = RING_SPACE(chan, 7);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram_handle);
- BEGIN_RING(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 1);
- OUT_RING (chan, 1); /* ACQUIRE_EQ */
- } else {
- ret = RING_SPACE(chan, 5);
- if (ret)
- return ret;
-
- BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 1);
- OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
- }
-
- /* Delay semaphore destruction until its work is done */
- ret = nouveau_fence_new(chan, &fence, true);
- if (ret)
- return ret;
-
- kref_get(&sema->ref);
- nouveau_fence_work(fence, semaphore_work, sema);
- nouveau_fence_unref(&fence);
- return 0;
-}
-
-static int
-semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_fence *fence = NULL;
- u64 offset = chan->fence.vma.offset + sema->mem->start;
- int ret;
-
- if (dev_priv->chipset < 0x84) {
- ret = RING_SPACE(chan, 5);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
- OUT_RING (chan, NvSema);
- OUT_RING (chan, offset);
- BEGIN_RING(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
- OUT_RING (chan, 1);
- } else
- if (dev_priv->chipset < 0xc0) {
- ret = RING_SPACE(chan, 7);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram_handle);
- BEGIN_RING(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 1);
- OUT_RING (chan, 2); /* RELEASE */
- } else {
- ret = RING_SPACE(chan, 5);
- if (ret)
- return ret;
-
- BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 1);
- OUT_RING (chan, 0x1002); /* RELEASE */
- }
-
- /* Delay semaphore destruction until its work is done */
- ret = nouveau_fence_new(chan, &fence, true);
- if (ret)
- return ret;
-
- kref_get(&sema->ref);
- nouveau_fence_work(fence, semaphore_work, sema);
- nouveau_fence_unref(&fence);
- return 0;
-}
-
int
-nouveau_fence_sync(struct nouveau_fence *fence,
- struct nouveau_channel *wchan)
+nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
- struct nouveau_channel *chan = nouveau_fence_channel(fence);
- struct drm_device *dev = wchan->dev;
- struct nouveau_semaphore *sema;
+ struct drm_device *dev = chan->dev;
+ struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
+ struct nouveau_channel *prev;
int ret = 0;
- if (likely(!chan || chan == wchan ||
- nouveau_fence_signalled(fence)))
- goto out;
-
- sema = semaphore_alloc(dev);
- if (!sema) {
- /* Early card or broken userspace, fall back to
- * software sync. */
- ret = nouveau_fence_wait(fence, true, false);
- goto out;
- }
-
- /* try to take chan's mutex, if we can't take it right away
- * we have to fallback to software sync to prevent locking
- * order issues
- */
- if (!mutex_trylock(&chan->mutex)) {
- ret = nouveau_fence_wait(fence, true, false);
- goto out_unref;
+ prev = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
+ if (prev) {
+ if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
+ ret = priv->sync(fence, prev, chan);
+ if (unlikely(ret))
+ ret = nouveau_fence_wait(fence, true, false);
+ }
+ nouveau_channel_put_unlocked(&prev);
}
- /* Make wchan wait until it gets signalled */
- ret = semaphore_acquire(wchan, sema);
- if (ret)
- goto out_unlock;
-
- /* Signal the semaphore from chan */
- ret = semaphore_release(chan, sema);
-
-out_unlock:
- mutex_unlock(&chan->mutex);
-out_unref:
- kref_put(&sema->ref, semaphore_free);
-out:
- if (chan)
- nouveau_channel_put_unlocked(&chan);
return ret;
}
-int
-__nouveau_fence_flush(void *sync_obj, void *sync_arg)
+static void
+nouveau_fence_del(struct kref *kref)
{
- return 0;
+ struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref);
+ kfree(fence);
}
-int
-nouveau_fence_channel_init(struct nouveau_channel *chan)
+void
+nouveau_fence_unref(struct nouveau_fence **pfence)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- if (dev_priv->card_type < NV_C0) {
- /* Create an NV_SW object for various sync purposes */
- ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
- if (ret)
- return ret;
-
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
- OUT_RING (chan, NvSw);
- FIRE_RING (chan);
- }
-
- /* Setup area of memory shared between all channels for x-chan sync */
- if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
- struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
-
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
- mem->start << PAGE_SHIFT,
- mem->size, NV_MEM_ACCESS_RW,
- NV_MEM_TARGET_VRAM, &obj);
- if (ret)
- return ret;
-
- ret = nouveau_ramht_insert(chan, NvSema, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- if (ret)
- return ret;
- } else
- if (USE_SEMA(dev)) {
- /* map fence bo into channel's vm */
- ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
- &chan->fence.vma);
- if (ret)
- return ret;
- }
-
- atomic_set(&chan->fence.last_sequence_irq, 0);
- return 0;
+ if (*pfence)
+ kref_put(&(*pfence)->kref, nouveau_fence_del);
+ *pfence = NULL;
}
-void
-nouveau_fence_channel_fini(struct nouveau_channel *chan)
+struct nouveau_fence *
+nouveau_fence_ref(struct nouveau_fence *fence)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_fence *tmp, *fence;
-
- spin_lock(&chan->fence.lock);
- list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
- fence->signalled = true;
- list_del(&fence->entry);
-
- if (unlikely(fence->work))
- fence->work(fence->priv, false);
-
- kref_put(&fence->refcount, nouveau_fence_del);
- }
- spin_unlock(&chan->fence.lock);
-
- nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
+ kref_get(&fence->kref);
+ return fence;
}
int
-nouveau_fence_init(struct drm_device *dev)
+nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
- int ret;
-
- /* Create a shared VRAM heap for cross-channel sync. */
- if (USE_SEMA(dev)) {
- ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
- 0, 0, &dev_priv->fence.bo);
- if (ret)
- return ret;
+ struct nouveau_fence *fence;
+ int ret = 0;
- ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
- if (ret)
- goto fail;
+ if (unlikely(!chan->engctx[NVOBJ_ENGINE_FENCE]))
+ return -ENODEV;
- ret = nouveau_bo_map(dev_priv->fence.bo);
- if (ret)
- goto fail;
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return -ENOMEM;
+ kref_init(&fence->kref);
- ret = drm_mm_init(&dev_priv->fence.heap, 0,
- dev_priv->fence.bo->bo.mem.size);
+ if (chan) {
+ ret = nouveau_fence_emit(fence, chan);
if (ret)
- goto fail;
-
- spin_lock_init(&dev_priv->fence.lock);
+ nouveau_fence_unref(&fence);
}
- return 0;
-fail:
- nouveau_bo_unmap(dev_priv->fence.bo);
- nouveau_bo_ref(NULL, &dev_priv->fence.bo);
+ *pfence = fence;
return ret;
}
-
-void
-nouveau_fence_fini(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (USE_SEMA(dev)) {
- drm_mm_takedown(&dev_priv->fence.heap);
- nouveau_bo_unmap(dev_priv->fence.bo);
- nouveau_bo_unpin(dev_priv->fence.bo);
- nouveau_bo_ref(NULL, &dev_priv->fence.bo);
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
new file mode 100644
index 000000000000..82ba733393ae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -0,0 +1,52 @@
+#ifndef __NOUVEAU_FENCE_H__
+#define __NOUVEAU_FENCE_H__
+
+struct nouveau_fence {
+ struct list_head head;
+ struct kref kref;
+
+ struct nouveau_channel *channel;
+ unsigned long timeout;
+ u32 sequence;
+
+ void (*work)(void *priv, bool signalled);
+ void *priv;
+};
+
+int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **);
+struct nouveau_fence *
+nouveau_fence_ref(struct nouveau_fence *);
+void nouveau_fence_unref(struct nouveau_fence **);
+
+int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
+bool nouveau_fence_done(struct nouveau_fence *);
+int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
+int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
+void nouveau_fence_idle(struct nouveau_channel *);
+void nouveau_fence_update(struct nouveau_channel *);
+
+struct nouveau_fence_chan {
+ struct list_head pending;
+ spinlock_t lock;
+ u32 sequence;
+};
+
+struct nouveau_fence_priv {
+ struct nouveau_exec_engine engine;
+ int (*emit)(struct nouveau_fence *);
+ int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
+ struct nouveau_channel *);
+ u32 (*read)(struct nouveau_channel *);
+};
+
+void nouveau_fence_context_new(struct nouveau_fence_chan *);
+void nouveau_fence_context_del(struct nouveau_fence_chan *);
+
+int nv04_fence_create(struct drm_device *dev);
+int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
+
+int nv10_fence_create(struct drm_device *dev);
+int nv84_fence_create(struct drm_device *dev);
+int nvc0_fence_create(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fifo.h b/drivers/gpu/drm/nouveau/nouveau_fifo.h
new file mode 100644
index 000000000000..ce99cab2f257
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fifo.h
@@ -0,0 +1,32 @@
+#ifndef __NOUVEAU_FIFO_H__
+#define __NOUVEAU_FIFO_H__
+
+struct nouveau_fifo_priv {
+ struct nouveau_exec_engine base;
+ u32 channels;
+};
+
+struct nouveau_fifo_chan {
+};
+
+bool nv04_fifo_cache_pull(struct drm_device *, bool);
+void nv04_fifo_context_del(struct nouveau_channel *, int);
+int nv04_fifo_fini(struct drm_device *, int, bool);
+int nv04_fifo_init(struct drm_device *, int);
+void nv04_fifo_isr(struct drm_device *);
+void nv04_fifo_destroy(struct drm_device *, int);
+
+void nv50_fifo_playlist_update(struct drm_device *);
+void nv50_fifo_destroy(struct drm_device *, int);
+void nv50_fifo_tlb_flush(struct drm_device *, int);
+
+int nv04_fifo_create(struct drm_device *);
+int nv10_fifo_create(struct drm_device *);
+int nv17_fifo_create(struct drm_device *);
+int nv40_fifo_create(struct drm_device *);
+int nv50_fifo_create(struct drm_device *);
+int nv84_fifo_create(struct drm_device *);
+int nvc0_fifo_create(struct drm_device *);
+int nve0_fifo_create(struct drm_device *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index ed52a6f41613..30f542316944 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -23,12 +23,14 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <linux/dma-buf.h>
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_dma.h"
+#include "nouveau_fence.h"
#define nouveau_gem_pushbuf_sync(chan) 0
@@ -53,6 +55,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
nouveau_bo_unpin(nvbo);
}
+ if (gem->import_attach)
+ drm_prime_gem_destroy(gem, nvbo->bo.sg);
+
ttm_bo_unref(&bo);
drm_gem_object_release(gem);
@@ -139,7 +144,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
flags |= TTM_PL_FLAG_SYSTEM;
ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
- tile_flags, pnvbo);
+ tile_flags, NULL, pnvbo);
if (ret)
return ret;
nvbo = *pnvbo;
@@ -704,7 +709,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
if (chan->dma.ib_max) {
- ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
+ ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
if (ret) {
NV_INFO(dev, "nv50cal_space: %d\n", ret);
goto out;
@@ -774,7 +779,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
}
- ret = nouveau_fence_new(chan, &fence, true);
+ ret = nouveau_fence_new(chan, &fence);
if (ret) {
NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
index a580cc62337a..82c19e82ff02 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gpio.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gpio.c
@@ -387,7 +387,7 @@ nouveau_gpio_reset(struct drm_device *dev)
if (dev_priv->card_type >= NV_D0) {
nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
if (unk1--)
- nv_mask(dev, 0x00d640 + (unk1 * 4), 0xff, line);
+ nv_mask(dev, 0x00d740 + (unk1 * 4), 0xff, line);
} else
if (dev_priv->card_type >= NV_50) {
static const u32 regs[] = { 0xe100, 0xe28c };
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h
index 86c2e374e938..b0795ececbda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.h
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h
@@ -18,7 +18,6 @@ struct nouveau_grctx {
uint32_t ctxvals_base;
};
-#ifdef CP_CTX
static inline void
cp_out(struct nouveau_grctx *ctx, uint32_t inst)
{
@@ -88,10 +87,8 @@ _cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
(state ? 0 : CP_BRA_IF_CLEAR));
}
#define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
-#ifdef CP_BRA_MOD
#define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
#define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
-#endif
static inline void
_cp_wait(struct nouveau_grctx *ctx, int flag, int state)
@@ -128,6 +125,5 @@ gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
nv_wo32(ctx->data, reg * 4, val);
}
-#endif
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index ba896e54b799..b87ad3bd7739 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -1018,11 +1018,6 @@ nv_load_state_ext(struct drm_device *dev, int head,
}
NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
-
- /* Enable vblank interrupts. */
- NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0,
- (dev->vblank_enabled[head] ? 1 : 0));
- NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index b08065f981df..5b498ea32e14 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -39,6 +39,8 @@
#include "nouveau_pm.h"
#include "nouveau_mm.h"
#include "nouveau_vm.h"
+#include "nouveau_fifo.h"
+#include "nouveau_fence.h"
/*
* NV10-NV40 tiling helpers
@@ -50,7 +52,6 @@ nv10_mem_update_tile_region(struct drm_device *dev,
uint32_t size, uint32_t pitch, uint32_t flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
int i = tile - dev_priv->tile.reg, j;
unsigned long save;
@@ -64,8 +65,8 @@ nv10_mem_update_tile_region(struct drm_device *dev,
pfb->init_tile_region(dev, i, addr, size, pitch, flags);
spin_lock_irqsave(&dev_priv->context_switch_lock, save);
- pfifo->reassign(dev, false);
- pfifo->cache_pull(dev, false);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+ nv04_fifo_cache_pull(dev, false);
nouveau_wait_for_idle(dev);
@@ -75,8 +76,8 @@ nv10_mem_update_tile_region(struct drm_device *dev,
dev_priv->eng[j]->set_tile_region(dev, i);
}
- pfifo->cache_pull(dev, true);
- pfifo->reassign(dev, true);
+ nv04_fifo_cache_pull(dev, true);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
}
@@ -89,7 +90,7 @@ nv10_mem_get_tile_region(struct drm_device *dev, int i)
spin_lock(&dev_priv->tile.lock);
if (!tile->used &&
- (!tile->fence || nouveau_fence_signalled(tile->fence)))
+ (!tile->fence || nouveau_fence_done(tile->fence)))
tile->used = true;
else
tile = NULL;
@@ -416,7 +417,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
if (dev_priv->card_type < NV_50) {
ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
- 0, 0, &dev_priv->vga_ram);
+ 0, 0, NULL, &dev_priv->vga_ram);
if (ret == 0)
ret = nouveau_bo_pin(dev_priv->vga_ram,
TTM_PL_FLAG_VRAM);
@@ -843,6 +844,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
break;
case NV_C0:
+ case NV_D0:
ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
break;
default:
@@ -977,6 +979,8 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
break;
case NV_MEM_TYPE_DDR3:
tDLLK = 12000;
+ tCKSRE = 2000;
+ tXS = 1000;
mr1_dlloff = 0x00000001;
break;
case NV_MEM_TYPE_GDDR3:
@@ -1023,6 +1027,7 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
exec->refresh_self(exec, false);
exec->refresh_auto(exec, true);
exec->wait(exec, tXS);
+ exec->wait(exec, tXS);
/* update MRs */
if (mr[2] != info->mr[2]) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index cc419fae794b..b190cc01c820 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -34,9 +34,10 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
+#include "nouveau_software.h"
#include "nouveau_vm.h"
-#include "nv50_display.h"
struct nouveau_gpuobj_method {
struct list_head head;
@@ -120,12 +121,13 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
u32 class, u32 mthd, u32 data)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_channel *chan = NULL;
unsigned long flags;
int ret = -EINVAL;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (chid >= 0 && chid < dev_priv->engine.fifo.channels)
+ if (chid >= 0 && chid < pfifo->channels)
chan = dev_priv->channels.ptr[chid];
if (chan)
ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
@@ -133,37 +135,6 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
return ret;
}
-/* NVidia uses context objects to drive drawing operations.
-
- Context objects can be selected into 8 subchannels in the FIFO,
- and then used via DMA command buffers.
-
- A context object is referenced by a user defined handle (CARD32). The HW
- looks up graphics objects in a hash table in the instance RAM.
-
- An entry in the hash table consists of 2 CARD32. The first CARD32 contains
- the handle, the second one a bitfield, that contains the address of the
- object in instance RAM.
-
- The format of the second CARD32 seems to be:
-
- NV4 to NV30:
-
- 15: 0 instance_addr >> 4
- 17:16 engine (here uses 1 = graphics)
- 28:24 channel id (here uses 0)
- 31 valid (use 1)
-
- NV40:
-
- 15: 0 instance_addr >> 4 (maybe 19-0)
- 21:20 engine (here uses 1 = graphics)
- I'm unsure about the other bits, but using 0 seems to work.
-
- The key into the hash table depends on the object handle and channel id and
- is given as:
-*/
-
int
nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
uint32_t size, int align, uint32_t flags,
@@ -267,7 +238,7 @@ nouveau_gpuobj_takedown(struct drm_device *dev)
kfree(oc);
}
- BUG_ON(!list_empty(&dev_priv->gpuobj_list));
+ WARN_ON(!list_empty(&dev_priv->gpuobj_list));
}
@@ -361,34 +332,6 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
return 0;
}
-/*
- DMA objects are used to reference a piece of memory in the
- framebuffer, PCI or AGP address space. Each object is 16 bytes big
- and looks as follows:
-
- entry[0]
- 11:0 class (seems like I can always use 0 here)
- 12 page table present?
- 13 page entry linear?
- 15:14 access: 0 rw, 1 ro, 2 wo
- 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
- 31:20 dma adjust (bits 0-11 of the address)
- entry[1]
- dma limit (size of transfer)
- entry[X]
- 1 0 readonly, 1 readwrite
- 31:12 dma frame address of the page (bits 12-31 of the address)
- entry[N]
- page table terminator, same value as the first pte, as does nvidia
- rivatv uses 0xffffffff
-
- Non linear page tables need a list of frame addresses afterwards,
- the rivatv project has some info on this.
-
- The method below creates a DMA object in instance RAM and returns a handle
- to it that can be used to set up context objects.
-*/
-
void
nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
u64 base, u64 size, int target, int access,
@@ -540,82 +483,6 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
return 0;
}
-/* Context objects in the instance RAM have the following structure.
- * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
-
- NV4 - NV30:
-
- entry[0]
- 11:0 class
- 12 chroma key enable
- 13 user clip enable
- 14 swizzle enable
- 17:15 patch config:
- scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
- 18 synchronize enable
- 19 endian: 1 big, 0 little
- 21:20 dither mode
- 23 single step enable
- 24 patch status: 0 invalid, 1 valid
- 25 context_surface 0: 1 valid
- 26 context surface 1: 1 valid
- 27 context pattern: 1 valid
- 28 context rop: 1 valid
- 29,30 context beta, beta4
- entry[1]
- 7:0 mono format
- 15:8 color format
- 31:16 notify instance address
- entry[2]
- 15:0 dma 0 instance address
- 31:16 dma 1 instance address
- entry[3]
- dma method traps
-
- NV40:
- No idea what the exact format is. Here's what can be deducted:
-
- entry[0]:
- 11:0 class (maybe uses more bits here?)
- 17 user clip enable
- 21:19 patch config
- 25 patch status valid ?
- entry[1]:
- 15:0 DMA notifier (maybe 20:0)
- entry[2]:
- 15:0 DMA 0 instance (maybe 20:0)
- 24 big endian
- entry[3]:
- 15:0 DMA 1 instance (maybe 20:0)
- entry[4]:
- entry[5]:
- set to 0?
-*/
-static int
-nouveau_gpuobj_sw_new(struct nouveau_channel *chan, u32 handle, u16 class)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_gpuobj *gpuobj;
- int ret;
-
- gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
- if (!gpuobj)
- return -ENOMEM;
- gpuobj->dev = chan->dev;
- gpuobj->engine = NVOBJ_ENGINE_SW;
- gpuobj->class = class;
- kref_init(&gpuobj->refcount);
- gpuobj->cinst = 0x40;
-
- spin_lock(&dev_priv->ramin_lock);
- list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
- spin_unlock(&dev_priv->ramin_lock);
-
- ret = nouveau_ramht_insert(chan, handle, gpuobj);
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return ret;
-}
-
int
nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
{
@@ -632,9 +499,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
if (oc->id != class)
continue;
- if (oc->engine == NVOBJ_ENGINE_SW)
- return nouveau_gpuobj_sw_new(chan, handle, class);
-
if (!chan->engctx[oc->engine]) {
ret = eng->context_new(chan, oc->engine);
if (ret)
@@ -644,7 +508,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
return eng->object_new(chan, oc->engine, handle, class);
}
- NV_ERROR(dev, "illegal object class: 0x%x\n", class);
return -EINVAL;
}
@@ -693,11 +556,10 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
static int
nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *pgd = NULL;
struct nouveau_vm_pgd *vpgd;
- int ret, i;
+ int ret;
ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
if (ret)
@@ -722,19 +584,6 @@ nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
nv_wo32(chan->ramin, 0x0208, 0xffffffff);
nv_wo32(chan->ramin, 0x020c, 0x000000ff);
- /* map display semaphore buffers into channel's vm */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo;
- if (dev_priv->card_type >= NV_D0)
- bo = nvd0_display_crtc_sema(dev, i);
- else
- bo = nv50_display(dev)->crtc[i].sem.bo;
-
- ret = nouveau_bo_vma_add(bo, chan->vm, &chan->dispc_vma[i]);
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -747,7 +596,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
- int ret, i;
+ int ret;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
if (dev_priv->card_type >= NV_C0)
@@ -795,25 +644,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
nouveau_gpuobj_ref(NULL, &ramht);
if (ret)
return ret;
-
- /* dma objects for display sync channel semaphore blocks */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_gpuobj *sem = NULL;
- struct nv50_display_crtc *dispc =
- &nv50_display(dev)->crtc[i];
- u64 offset = dispc->sem.bo->bo.offset;
-
- ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
- NV_MEM_ACCESS_RW,
- NV_MEM_TARGET_VRAM, &sem);
- if (ret)
- return ret;
-
- ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, sem);
- nouveau_gpuobj_ref(NULL, &sem);
- if (ret)
- return ret;
- }
}
/* VRAM ctxdma */
@@ -873,25 +703,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
void
nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- if (dev_priv->card_type >= NV_D0) {
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &chan->dispc_vma[i]);
- }
- } else
- if (dev_priv->card_type >= NV_50) {
- struct nv50_display *disp = nv50_display(dev);
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nv50_display_crtc *dispc = &disp->crtc[i];
- nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
- }
- }
+ NV_DEBUG(chan->dev, "ch%d\n", chan->id);
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
@@ -956,6 +768,17 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
if (init->handle == ~0)
return -EINVAL;
+ /* compatibility with userspace that assumes 506e for all chipsets */
+ if (init->class == 0x506e) {
+ init->class = nouveau_software_class(dev);
+ if (init->class == 0x906e)
+ return 0;
+ } else
+ if (init->class == 0x906e) {
+ NV_ERROR(dev, "906e not supported yet\n");
+ return -EINVAL;
+ }
+
chan = nouveau_channel_get(file_priv, init->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 69a528d106e6..ea6acf1c4a78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -83,7 +83,7 @@ nouveau_perf_entry(struct drm_device *dev, int idx,
return NULL;
}
-static u8 *
+u8 *
nouveau_perf_rammap(struct drm_device *dev, u32 freq,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 3f82dfea61dd..07cac72c72b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -61,8 +61,10 @@ int nouveau_voltage_gpio_set(struct drm_device *, int voltage);
/* nouveau_perf.c */
void nouveau_perf_init(struct drm_device *);
void nouveau_perf_fini(struct drm_device *);
-u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
+u8 *nouveau_perf_rammap(struct drm_device *, u32 freq, u8 *ver,
+ u8 *hdr, u8 *cnt, u8 *len);
u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len);
+u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
/* nouveau_mem.c */
void nouveau_mem_timing_init(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
new file mode 100644
index 000000000000..a89240e5fb29
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -0,0 +1,208 @@
+
+#include "drmP.h"
+#include "drm.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+
+#include <linux/dma-buf.h>
+
+static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct nouveau_bo *nvbo = attachment->dmabuf->priv;
+ struct drm_device *dev = nvbo->gem->dev;
+ int npages = nvbo->bo.num_pages;
+ struct sg_table *sg;
+ int nents;
+
+ mutex_lock(&dev->struct_mutex);
+ sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
+ nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ mutex_unlock(&dev->struct_mutex);
+ return sg;
+}
+
+static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sg, enum dma_data_direction dir)
+{
+ dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct nouveau_bo *nvbo = dma_buf->priv;
+
+ if (nvbo->gem->export_dma_buf == dma_buf) {
+ nvbo->gem->export_dma_buf = NULL;
+ drm_gem_object_unreference_unlocked(nvbo->gem);
+ }
+}
+
+static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+
+static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+ struct nouveau_bo *nvbo = dma_buf->priv;
+ struct drm_device *dev = nvbo->gem->dev;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ if (nvbo->vmapping_count) {
+ nvbo->vmapping_count++;
+ goto out_unlock;
+ }
+
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
+ &nvbo->dma_buf_vmap);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
+ }
+ nvbo->vmapping_count = 1;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return nvbo->dma_buf_vmap.virtual;
+}
+
+static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct nouveau_bo *nvbo = dma_buf->priv;
+ struct drm_device *dev = nvbo->gem->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ nvbo->vmapping_count--;
+ if (nvbo->vmapping_count == 0) {
+ ttm_bo_kunmap(&nvbo->dma_buf_vmap);
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static const struct dma_buf_ops nouveau_dmabuf_ops = {
+ .map_dma_buf = nouveau_gem_map_dma_buf,
+ .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
+ .release = nouveau_gem_dmabuf_release,
+ .kmap = nouveau_gem_kmap,
+ .kmap_atomic = nouveau_gem_kmap_atomic,
+ .kunmap = nouveau_gem_kunmap,
+ .kunmap_atomic = nouveau_gem_kunmap_atomic,
+ .mmap = nouveau_gem_prime_mmap,
+ .vmap = nouveau_gem_prime_vmap,
+ .vunmap = nouveau_gem_prime_vunmap,
+};
+
+static int
+nouveau_prime_new(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg,
+ struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ u32 flags = 0;
+ int ret;
+
+ flags = TTM_PL_FLAG_TT;
+
+ ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
+ sg, pnvbo);
+ if (ret)
+ return ret;
+ nvbo = *pnvbo;
+
+ /* we restrict allowed domains on nv50+ to only the types
+ * that were requested at creation time. not possibly on
+ * earlier chips without busting the ABI.
+ */
+ nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
+ nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
+ if (!nvbo->gem) {
+ nouveau_bo_ref(NULL, pnvbo);
+ return -ENOMEM;
+ }
+
+ nvbo->gem->driver_private = nvbo;
+ return 0;
+}
+
+struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
+ int ret = 0;
+
+ /* pin buffer into GTT */
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
+}
+
+struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sg;
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ if (dma_buf->ops == &nouveau_dmabuf_ops) {
+ nvbo = dma_buf->priv;
+ if (nvbo->gem) {
+ if (nvbo->gem->dev == dev) {
+ drm_gem_object_reference(nvbo->gem);
+ return nvbo->gem;
+ }
+ }
+ }
+ /* need to attach */
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_PTR(PTR_ERR(attach));
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto fail_detach;
+ }
+
+ ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
+ if (ret)
+ goto fail_unmap;
+
+ nvbo->gem->import_attach = attach;
+
+ return nvbo->gem;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 47f245edf538..38483a042bc2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -290,7 +290,10 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
struct nouveau_mem *node = mem->mm_node;
/* noop: bound in move_notify() */
- node->pages = nvbe->ttm.dma_address;
+ if (ttm->sg) {
+ node->sg = ttm->sg;
+ } else
+ node->pages = nvbe->ttm.dma_address;
return 0;
}
@@ -338,10 +341,10 @@ nouveau_sgdma_init(struct drm_device *dev)
u32 aper_size, align;
int ret;
- if (dev_priv->card_type >= NV_40 && pci_is_pcie(dev->pdev))
+ if (dev_priv->card_type >= NV_40)
aper_size = 512 * 1024 * 1024;
else
- aper_size = 64 * 1024 * 1024;
+ aper_size = 128 * 1024 * 1024;
/* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
* christmas. The cards before it have them, the cards after
diff --git a/drivers/gpu/drm/nouveau/nouveau_software.h b/drivers/gpu/drm/nouveau/nouveau_software.h
new file mode 100644
index 000000000000..e60bc6ce9003
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_software.h
@@ -0,0 +1,69 @@
+#ifndef __NOUVEAU_SOFTWARE_H__
+#define __NOUVEAU_SOFTWARE_H__
+
+struct nouveau_software_priv {
+ struct nouveau_exec_engine base;
+ struct list_head vblank;
+};
+
+struct nouveau_software_chan {
+ struct list_head flip;
+ struct {
+ struct list_head list;
+ struct nouveau_bo *bo;
+ u32 offset;
+ u32 value;
+ u32 head;
+ } vblank;
+};
+
+static inline void
+nouveau_software_vblank(struct drm_device *dev, int crtc)
+{
+ struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
+ struct nouveau_software_chan *pch, *tmp;
+
+ list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) {
+ if (pch->vblank.head != crtc)
+ continue;
+
+ nouveau_bo_wr32(pch->vblank.bo, pch->vblank.offset,
+ pch->vblank.value);
+ list_del(&pch->vblank.list);
+ drm_vblank_put(dev, crtc);
+ }
+}
+
+static inline void
+nouveau_software_context_new(struct nouveau_software_chan *pch)
+{
+ INIT_LIST_HEAD(&pch->flip);
+}
+
+static inline void
+nouveau_software_create(struct nouveau_software_priv *psw)
+{
+ INIT_LIST_HEAD(&psw->vblank);
+}
+
+static inline u16
+nouveau_software_class(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->card_type <= NV_04)
+ return 0x006e;
+ if (dev_priv->card_type <= NV_40)
+ return 0x016e;
+ if (dev_priv->card_type <= NV_50)
+ return 0x506e;
+ if (dev_priv->card_type <= NV_E0)
+ return 0x906e;
+ return 0x0000;
+}
+
+int nv04_software_create(struct drm_device *);
+int nv50_software_create(struct drm_device *);
+int nvc0_software_create(struct drm_device *);
+u64 nvc0_software_crtc(struct nouveau_channel *, int crtc);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index c2a8511e855a..19706f0532ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -39,6 +39,9 @@
#include "nouveau_gpio.h"
#include "nouveau_pm.h"
#include "nv50_display.h"
+#include "nouveau_fifo.h"
+#include "nouveau_fence.h"
+#include "nouveau_software.h"
static void nouveau_stub_takedown(struct drm_device *dev) {}
static int nouveau_stub_init(struct drm_device *dev) { return 0; }
@@ -66,18 +69,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv04_fb_init;
engine->fb.takedown = nv04_fb_takedown;
- engine->fifo.channels = 16;
- engine->fifo.init = nv04_fifo_init;
- engine->fifo.takedown = nv04_fifo_fini;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_pull = nv04_fifo_cache_pull;
- engine->fifo.channel_id = nv04_fifo_channel_id;
- engine->fifo.create_context = nv04_fifo_create_context;
- engine->fifo.destroy_context = nv04_fifo_destroy_context;
- engine->fifo.load_context = nv04_fifo_load_context;
- engine->fifo.unload_context = nv04_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
@@ -111,18 +102,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fb.init_tile_region = nv10_fb_init_tile_region;
engine->fb.set_tile_region = nv10_fb_set_tile_region;
engine->fb.free_tile_region = nv10_fb_free_tile_region;
- engine->fifo.channels = 32;
- engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nv04_fifo_fini;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_pull = nv04_fifo_cache_pull;
- engine->fifo.channel_id = nv10_fifo_channel_id;
- engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv04_fifo_destroy_context;
- engine->fifo.load_context = nv10_fifo_load_context;
- engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
@@ -162,18 +141,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fb.init_tile_region = nv20_fb_init_tile_region;
engine->fb.set_tile_region = nv20_fb_set_tile_region;
engine->fb.free_tile_region = nv20_fb_free_tile_region;
- engine->fifo.channels = 32;
- engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nv04_fifo_fini;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_pull = nv04_fifo_cache_pull;
- engine->fifo.channel_id = nv10_fifo_channel_id;
- engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv04_fifo_destroy_context;
- engine->fifo.load_context = nv10_fifo_load_context;
- engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
@@ -209,18 +176,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fb.init_tile_region = nv30_fb_init_tile_region;
engine->fb.set_tile_region = nv10_fb_set_tile_region;
engine->fb.free_tile_region = nv30_fb_free_tile_region;
- engine->fifo.channels = 32;
- engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nv04_fifo_fini;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_pull = nv04_fifo_cache_pull;
- engine->fifo.channel_id = nv10_fifo_channel_id;
- engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv04_fifo_destroy_context;
- engine->fifo.load_context = nv10_fifo_load_context;
- engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
@@ -259,18 +214,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fb.init_tile_region = nv30_fb_init_tile_region;
engine->fb.set_tile_region = nv40_fb_set_tile_region;
engine->fb.free_tile_region = nv30_fb_free_tile_region;
- engine->fifo.channels = 32;
- engine->fifo.init = nv40_fifo_init;
- engine->fifo.takedown = nv04_fifo_fini;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_pull = nv04_fifo_cache_pull;
- engine->fifo.channel_id = nv10_fifo_channel_id;
- engine->fifo.create_context = nv40_fifo_create_context;
- engine->fifo.destroy_context = nv04_fifo_destroy_context;
- engine->fifo.load_context = nv40_fifo_load_context;
- engine->fifo.unload_context = nv40_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
@@ -317,18 +260,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv50_fb_init;
engine->fb.takedown = nv50_fb_takedown;
- engine->fifo.channels = 128;
- engine->fifo.init = nv50_fifo_init;
- engine->fifo.takedown = nv50_fifo_takedown;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.channel_id = nv50_fifo_channel_id;
- engine->fifo.create_context = nv50_fifo_create_context;
- engine->fifo.destroy_context = nv50_fifo_destroy_context;
- engine->fifo.load_context = nv50_fifo_load_context;
- engine->fifo.unload_context = nv50_fifo_unload_context;
- engine->fifo.tlb_flush = nv50_fifo_tlb_flush;
engine->display.early_init = nv50_display_early_init;
engine->display.late_takedown = nv50_display_late_takedown;
engine->display.create = nv50_display_create;
@@ -392,17 +323,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nvc0_fb_init;
engine->fb.takedown = nvc0_fb_takedown;
- engine->fifo.channels = 128;
- engine->fifo.init = nvc0_fifo_init;
- engine->fifo.takedown = nvc0_fifo_takedown;
- engine->fifo.disable = nvc0_fifo_disable;
- engine->fifo.enable = nvc0_fifo_enable;
- engine->fifo.reassign = nvc0_fifo_reassign;
- engine->fifo.channel_id = nvc0_fifo_channel_id;
- engine->fifo.create_context = nvc0_fifo_create_context;
- engine->fifo.destroy_context = nvc0_fifo_destroy_context;
- engine->fifo.load_context = nvc0_fifo_load_context;
- engine->fifo.unload_context = nvc0_fifo_unload_context;
engine->display.early_init = nv50_display_early_init;
engine->display.late_takedown = nv50_display_late_takedown;
engine->display.create = nv50_display_create;
@@ -445,17 +365,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nvc0_fb_init;
engine->fb.takedown = nvc0_fb_takedown;
- engine->fifo.channels = 128;
- engine->fifo.init = nvc0_fifo_init;
- engine->fifo.takedown = nvc0_fifo_takedown;
- engine->fifo.disable = nvc0_fifo_disable;
- engine->fifo.enable = nvc0_fifo_enable;
- engine->fifo.reassign = nvc0_fifo_reassign;
- engine->fifo.channel_id = nvc0_fifo_channel_id;
- engine->fifo.create_context = nvc0_fifo_create_context;
- engine->fifo.destroy_context = nvc0_fifo_destroy_context;
- engine->fifo.load_context = nvc0_fifo_load_context;
- engine->fifo.unload_context = nvc0_fifo_unload_context;
engine->display.early_init = nouveau_stub_init;
engine->display.late_takedown = nouveau_stub_takedown;
engine->display.create = nvd0_display_create;
@@ -496,13 +405,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nvc0_fb_init;
engine->fb.takedown = nvc0_fb_takedown;
- engine->fifo.channels = 0;
- engine->fifo.init = nouveau_stub_init;
- engine->fifo.takedown = nouveau_stub_takedown;
- engine->fifo.disable = nvc0_fifo_disable;
- engine->fifo.enable = nvc0_fifo_enable;
- engine->fifo.reassign = nvc0_fifo_reassign;
- engine->fifo.unload_context = nouveau_stub_init;
engine->display.early_init = nouveau_stub_init;
engine->display.late_takedown = nouveau_stub_takedown;
engine->display.create = nvd0_display_create;
@@ -607,61 +509,24 @@ nouveau_card_channel_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan;
- int ret, oclass;
+ int ret;
ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
dev_priv->channel = chan;
if (ret)
return ret;
-
mutex_unlock(&dev_priv->channel->mutex);
- if (dev_priv->card_type <= NV_50) {
- if (dev_priv->card_type < NV_50)
- oclass = 0x0039;
- else
- oclass = 0x5039;
-
- ret = nouveau_gpuobj_gr_new(chan, NvM2MF, oclass);
- if (ret)
- goto error;
-
- ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
- &chan->m2mf_ntfy);
- if (ret)
- goto error;
-
- ret = RING_SPACE(chan, 6);
- if (ret)
- goto error;
-
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
- OUT_RING (chan, NvM2MF);
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
- OUT_RING (chan, NvNotify0);
- OUT_RING (chan, chan->vram_handle);
- OUT_RING (chan, chan->gart_handle);
- } else
- if (dev_priv->card_type <= NV_D0) {
- ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
- if (ret)
- goto error;
-
- ret = RING_SPACE(chan, 2);
- if (ret)
- goto error;
-
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
- OUT_RING (chan, 0x00009039);
- }
-
- FIRE_RING (chan);
-error:
- if (ret)
- nouveau_card_channel_fini(dev);
- return ret;
+ nouveau_bo_move_init(chan);
+ return 0;
}
+static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = {
+ .set_gpu_state = nouveau_switcheroo_set_state,
+ .reprobe = nouveau_switcheroo_reprobe,
+ .can_switch = nouveau_switcheroo_can_switch,
+};
+
int
nouveau_card_init(struct drm_device *dev)
{
@@ -670,9 +535,7 @@ nouveau_card_init(struct drm_device *dev)
int ret, e = 0;
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
- vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
- nouveau_switcheroo_reprobe,
- nouveau_switcheroo_can_switch);
+ vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
/* Initialise internal driver API hooks */
ret = nouveau_init_engine_ptrs(dev);
@@ -745,6 +608,81 @@ nouveau_card_init(struct drm_device *dev)
if (!dev_priv->noaccel) {
switch (dev_priv->card_type) {
case NV_04:
+ nv04_fifo_create(dev);
+ break;
+ case NV_10:
+ case NV_20:
+ case NV_30:
+ if (dev_priv->chipset < 0x17)
+ nv10_fifo_create(dev);
+ else
+ nv17_fifo_create(dev);
+ break;
+ case NV_40:
+ nv40_fifo_create(dev);
+ break;
+ case NV_50:
+ if (dev_priv->chipset == 0x50)
+ nv50_fifo_create(dev);
+ else
+ nv84_fifo_create(dev);
+ break;
+ case NV_C0:
+ case NV_D0:
+ nvc0_fifo_create(dev);
+ break;
+ case NV_E0:
+ nve0_fifo_create(dev);
+ break;
+ default:
+ break;
+ }
+
+ switch (dev_priv->card_type) {
+ case NV_04:
+ nv04_fence_create(dev);
+ break;
+ case NV_10:
+ case NV_20:
+ case NV_30:
+ case NV_40:
+ case NV_50:
+ if (dev_priv->chipset < 0x84)
+ nv10_fence_create(dev);
+ else
+ nv84_fence_create(dev);
+ break;
+ case NV_C0:
+ case NV_D0:
+ case NV_E0:
+ nvc0_fence_create(dev);
+ break;
+ default:
+ break;
+ }
+
+ switch (dev_priv->card_type) {
+ case NV_04:
+ case NV_10:
+ case NV_20:
+ case NV_30:
+ case NV_40:
+ nv04_software_create(dev);
+ break;
+ case NV_50:
+ nv50_software_create(dev);
+ break;
+ case NV_C0:
+ case NV_D0:
+ case NV_E0:
+ nvc0_software_create(dev);
+ break;
+ default:
+ break;
+ }
+
+ switch (dev_priv->card_type) {
+ case NV_04:
nv04_graph_create(dev);
break;
case NV_10:
@@ -764,6 +702,9 @@ nouveau_card_init(struct drm_device *dev)
case NV_D0:
nvc0_graph_create(dev);
break;
+ case NV_E0:
+ nve0_graph_create(dev);
+ break;
default:
break;
}
@@ -796,8 +737,9 @@ nouveau_card_init(struct drm_device *dev)
}
break;
case NV_C0:
- nvc0_copy_create(dev, 0);
nvc0_copy_create(dev, 1);
+ case NV_D0:
+ nvc0_copy_create(dev, 0);
break;
default:
break;
@@ -830,16 +772,11 @@ nouveau_card_init(struct drm_device *dev)
goto out_engine;
}
}
-
- /* PFIFO */
- ret = engine->fifo.init(dev);
- if (ret)
- goto out_engine;
}
ret = nouveau_irq_init(dev);
if (ret)
- goto out_fifo;
+ goto out_engine;
ret = nouveau_display_create(dev);
if (ret)
@@ -848,14 +785,10 @@ nouveau_card_init(struct drm_device *dev)
nouveau_backlight_init(dev);
nouveau_pm_init(dev);
- ret = nouveau_fence_init(dev);
- if (ret)
- goto out_pm;
-
if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
ret = nouveau_card_channel_init(dev);
if (ret)
- goto out_fence;
+ goto out_pm;
}
if (dev->mode_config.num_crtc) {
@@ -870,17 +803,12 @@ nouveau_card_init(struct drm_device *dev)
out_chan:
nouveau_card_channel_fini(dev);
-out_fence:
- nouveau_fence_fini(dev);
out_pm:
nouveau_pm_fini(dev);
nouveau_backlight_exit(dev);
nouveau_display_destroy(dev);
out_irq:
nouveau_irq_fini(dev);
-out_fifo:
- if (!dev_priv->noaccel)
- engine->fifo.takedown(dev);
out_engine:
if (!dev_priv->noaccel) {
for (e = e - 1; e >= 0; e--) {
@@ -912,6 +840,7 @@ out_bios:
out_display_early:
engine->display.late_takedown(dev);
out:
+ vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
return ret;
}
@@ -928,13 +857,11 @@ static void nouveau_card_takedown(struct drm_device *dev)
}
nouveau_card_channel_fini(dev);
- nouveau_fence_fini(dev);
nouveau_pm_fini(dev);
nouveau_backlight_exit(dev);
nouveau_display_destroy(dev);
if (!dev_priv->noaccel) {
- engine->fifo.takedown(dev);
for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
if (dev_priv->eng[e]) {
dev_priv->eng[e]->fini(dev, e, false);
@@ -969,6 +896,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
nouveau_irq_fini(dev);
+ vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
@@ -1176,7 +1104,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
goto err_priv;
}
- NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
+ NV_INFO(dev, "Detected an NV%02x generation card (0x%08x)\n",
dev_priv->card_type, reg0);
/* map the mmio regs, limiting the amount to preserve vmap space */
@@ -1219,6 +1147,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
if (nouveau_noaccel == -1) {
switch (dev_priv->chipset) {
case 0xd9: /* known broken */
+ case 0xe4: /* needs binary driver firmware */
+ case 0xe7: /* needs binary driver firmware */
NV_INFO(dev, "acceleration disabled by default, pass "
"noaccel=0 to force enable\n");
dev_priv->noaccel = true;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 2bf6c0350b4b..11edd5e91a0a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -77,6 +77,63 @@ nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
}
void
+nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
+ struct nouveau_mem *mem)
+{
+ struct nouveau_vm *vm = vma->vm;
+ int big = vma->node->type != vm->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 num = length >> vma->node->type;
+ u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (vm->pgt_bits - bits);
+ unsigned m, sglen;
+ u32 end, len;
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+ sglen = sg_dma_len(sg) >> PAGE_SHIFT;
+
+ end = pte + sglen;
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ for (m = 0; m < len; m++) {
+ dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+
+ vm->map_sg(vma, pgt, mem, pte, 1, &addr);
+ num--;
+ pte++;
+
+ if (num == 0)
+ goto finish;
+ }
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ if (m < sglen) {
+ for (; m < sglen; m++) {
+ dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+
+ vm->map_sg(vma, pgt, mem, pte, 1, &addr);
+ num--;
+ pte++;
+ if (num == 0)
+ goto finish;
+ }
+ }
+
+ }
+finish:
+ vm->flush(vm);
+}
+
+void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
struct nouveau_mem *mem)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index 4fb6e728734d..a8246e7e4a89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -72,6 +72,9 @@ struct nouveau_vm {
u64 phys, u64 delta);
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
+
+ void (*map_sg_table)(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
void (*flush)(struct nouveau_vm *);
};
@@ -90,7 +93,8 @@ void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
struct nouveau_mem *);
-
+void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
+ struct nouveau_mem *mem);
/* nv50_vm.c */
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 728d07584d39..4c31c63e5528 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -1047,7 +1047,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 7047d37e8dab..44488e3a257d 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -98,6 +98,13 @@ nv04_display_early_init(struct drm_device *dev)
NVSetOwner(dev, 0);
}
+ /* ensure vblank interrupts are off, they can't be enabled until
+ * drm_vblank has been initialised
+ */
+ NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0);
+ if (nv_two_heads(dev))
+ NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
+
return 0;
}
@@ -246,6 +253,10 @@ nv04_display_init(struct drm_device *dev)
void
nv04_display_fini(struct drm_device *dev)
{
+ /* disable vblank interrupts */
+ NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0);
+ if (nv_two_heads(dev))
+ NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 7a1189371096..7cd7857347ef 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -41,7 +41,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
if (ret)
return ret;
- BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0300, 3);
OUT_RING(chan, (region->sy << 16) | region->sx);
OUT_RING(chan, (region->dy << 16) | region->dx);
OUT_RING(chan, (region->height << 16) | region->width);
@@ -62,15 +62,15 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
if (ret)
return ret;
- BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1);
OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
- BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x03fc, 1);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR)
OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
else
OUT_RING(chan, rect->color);
- BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0400, 2);
OUT_RING(chan, (rect->dx << 16) | rect->dy);
OUT_RING(chan, (rect->width << 16) | rect->height);
FIRE_RING(chan);
@@ -110,7 +110,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
bg = image->bg_color;
}
- BEGIN_RING(chan, NvSubGdiRect, 0x0be4, 7);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0be4, 7);
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
OUT_RING(chan, ((image->dy + image->height) << 16) |
((image->dx + image->width) & 0xffff));
@@ -127,7 +127,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
- BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0c00, iter_len);
OUT_RINGp(chan, data, iter_len);
data += iter_len;
dsize -= iter_len;
@@ -209,25 +209,25 @@ nv04_fbcon_accel_init(struct fb_info *info)
return 0;
}
- BEGIN_RING(chan, sub, 0x0000, 1);
+ BEGIN_NV04(chan, sub, 0x0000, 1);
OUT_RING(chan, NvCtxSurf2D);
- BEGIN_RING(chan, sub, 0x0184, 2);
+ BEGIN_NV04(chan, sub, 0x0184, 2);
OUT_RING(chan, NvDmaFB);
OUT_RING(chan, NvDmaFB);
- BEGIN_RING(chan, sub, 0x0300, 4);
+ BEGIN_NV04(chan, sub, 0x0300, 4);
OUT_RING(chan, surface_fmt);
OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
- BEGIN_RING(chan, sub, 0x0000, 1);
+ BEGIN_NV04(chan, sub, 0x0000, 1);
OUT_RING(chan, NvRop);
- BEGIN_RING(chan, sub, 0x0300, 1);
+ BEGIN_NV04(chan, sub, 0x0300, 1);
OUT_RING(chan, 0x55);
- BEGIN_RING(chan, sub, 0x0000, 1);
+ BEGIN_NV04(chan, sub, 0x0000, 1);
OUT_RING(chan, NvImagePatt);
- BEGIN_RING(chan, sub, 0x0300, 8);
+ BEGIN_NV04(chan, sub, 0x0300, 8);
OUT_RING(chan, pattern_fmt);
#ifdef __BIG_ENDIAN
OUT_RING(chan, 2);
@@ -241,31 +241,31 @@ nv04_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, ~0);
OUT_RING(chan, ~0);
- BEGIN_RING(chan, sub, 0x0000, 1);
+ BEGIN_NV04(chan, sub, 0x0000, 1);
OUT_RING(chan, NvClipRect);
- BEGIN_RING(chan, sub, 0x0300, 2);
+ BEGIN_NV04(chan, sub, 0x0300, 2);
OUT_RING(chan, 0);
OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
- BEGIN_RING(chan, NvSubImageBlit, 0x0000, 1);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0000, 1);
OUT_RING(chan, NvImageBlit);
- BEGIN_RING(chan, NvSubImageBlit, 0x019c, 1);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x019c, 1);
OUT_RING(chan, NvCtxSurf2D);
- BEGIN_RING(chan, NvSubImageBlit, 0x02fc, 1);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1);
OUT_RING(chan, 3);
- BEGIN_RING(chan, NvSubGdiRect, 0x0000, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1);
OUT_RING(chan, NvGdiRect);
- BEGIN_RING(chan, NvSubGdiRect, 0x0198, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0198, 1);
OUT_RING(chan, NvCtxSurf2D);
- BEGIN_RING(chan, NvSubGdiRect, 0x0188, 2);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0188, 2);
OUT_RING(chan, NvImagePatt);
OUT_RING(chan, NvRop);
- BEGIN_RING(chan, NvSubGdiRect, 0x0304, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0304, 1);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSubGdiRect, 0x0300, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0300, 1);
OUT_RING(chan, rect_fmt);
- BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1);
OUT_RING(chan, 3);
FIRE_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
new file mode 100644
index 000000000000..abe89db6de24
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+
+struct nv04_fence_chan {
+ struct nouveau_fence_chan base;
+ atomic_t sequence;
+};
+
+struct nv04_fence_priv {
+ struct nouveau_fence_priv base;
+};
+
+static int
+nv04_fence_emit(struct nouveau_fence *fence)
+{
+ struct nouveau_channel *chan = fence->channel;
+ int ret = RING_SPACE(chan, 2);
+ if (ret == 0) {
+ BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
+ OUT_RING (chan, fence->sequence);
+ FIRE_RING (chan);
+ }
+ return ret;
+}
+
+static int
+nv04_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ return -ENODEV;
+}
+
+int
+nv04_fence_mthd(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+ atomic_set(&fctx->sequence, data);
+ return 0;
+}
+
+static u32
+nv04_fence_read(struct nouveau_channel *chan)
+{
+ struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+ return atomic_read(&fctx->sequence);
+}
+
+static void
+nv04_fence_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv04_fence_chan *fctx = chan->engctx[engine];
+ nouveau_fence_context_del(&fctx->base);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nv04_fence_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (fctx) {
+ nouveau_fence_context_new(&fctx->base);
+ atomic_set(&fctx->sequence, 0);
+ chan->engctx[engine] = fctx;
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+static int
+nv04_fence_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static int
+nv04_fence_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static void
+nv04_fence_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fence_priv *priv = nv_engine(dev, engine);
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nv04_fence_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fence_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.engine.destroy = nv04_fence_destroy;
+ priv->base.engine.init = nv04_fence_init;
+ priv->base.engine.fini = nv04_fence_fini;
+ priv->base.engine.context_new = nv04_fence_context_new;
+ priv->base.engine.context_del = nv04_fence_context_del;
+ priv->base.emit = nv04_fence_emit;
+ priv->base.sync = nv04_fence_sync;
+ priv->base.read = nv04_fence_read;
+ dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index db465a3ee1b2..a6295cd00ec7 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Ben Skeggs.
+ * Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
@@ -27,49 +27,38 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
+#include "nouveau_fifo.h"
#include "nouveau_util.h"
-
-#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
-#define NV04_RAMFC__SIZE 32
-#define NV04_RAMFC_DMA_PUT 0x00
-#define NV04_RAMFC_DMA_GET 0x04
-#define NV04_RAMFC_DMA_INSTANCE 0x08
-#define NV04_RAMFC_DMA_STATE 0x0C
-#define NV04_RAMFC_DMA_FETCH 0x10
-#define NV04_RAMFC_ENGINE 0x14
-#define NV04_RAMFC_PULL1_ENGINE 0x18
-
-#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val))
-#define RAMFC_RD(offset) nv_ro32(chan->ramfc, NV04_RAMFC_##offset)
-
-void
-nv04_fifo_disable(struct drm_device *dev)
-{
- uint32_t tmp;
-
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, tmp & ~1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
- tmp = nv_rd32(dev, NV03_PFIFO_CACHE1_PULL1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, tmp & ~1);
-}
-
-void
-nv04_fifo_enable(struct drm_device *dev)
-{
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-}
-
-bool
-nv04_fifo_reassign(struct drm_device *dev, bool enable)
-{
- uint32_t reassign = nv_rd32(dev, NV03_PFIFO_CACHES);
-
- nv_wr32(dev, NV03_PFIFO_CACHES, enable ? 1 : 0);
- return (reassign == 1);
-}
+#include "nouveau_ramht.h"
+#include "nouveau_software.h"
+
+static struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+} nv04_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
+ {}
+};
+
+struct nv04_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct ramfc_desc *ramfc_desc;
+};
+
+struct nv04_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+};
bool
nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
@@ -86,13 +75,13 @@ nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
* invalidate the most recently calculated instance.
*/
if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
- NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
+ NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
- NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
+ NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
nv_wr32(dev, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_CACHE_ERROR);
+ NV_PFIFO_INTR_CACHE_ERROR);
nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
}
@@ -100,242 +89,182 @@ nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
return pull & 1;
}
-int
-nv04_fifo_channel_id(struct drm_device *dev)
-{
- return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
- NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
-}
-
-#ifdef __BIG_ENDIAN
-#define DMA_FETCH_ENDIANNESS NV_PFIFO_CACHE1_BIG_ENDIAN
-#else
-#define DMA_FETCH_ENDIANNESS 0
-#endif
-
-int
-nv04_fifo_create_context(struct nouveau_channel *chan)
+static int
+nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv04_fifo_chan *fctx;
unsigned long flags;
int ret;
- ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
- NV04_RAMFC__SIZE,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE,
- &chan->ramfc);
- if (ret)
- return ret;
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+ /* map channel control registers */
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV03_USER(chan->id), PAGE_SIZE);
- if (!chan->user)
- return -ENOMEM;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
- /* Setup initial state */
- RAMFC_WR(DMA_PUT, chan->pushbuf_base);
- RAMFC_WR(DMA_GET, chan->pushbuf_base);
- RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4);
- RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
- DMA_FETCH_ENDIANNESS));
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
- /* enable the fifo dma operation */
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+ /* initialise default fifo context */
+ ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
+ chan->id * 32, ~0, 32,
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x08, chan->pushbuf->pinst >> 4);
+ nv_wo32(fctx->ramfc, 0x0c, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nv_wo32(fctx->ramfc, 0x14, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x18, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
+ /* enable dma mode on the channel */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- return 0;
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
}
void
-nv04_fifo_destroy_context(struct nouveau_channel *chan)
+nv04_fifo_context_del(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nv04_fifo_priv *priv = nv_engine(chan->dev, engine);
+ struct nv04_fifo_chan *fctx = chan->engctx[engine];
+ struct ramfc_desc *c = priv->ramfc_desc;
unsigned long flags;
+ int chid;
+ /* prevent fifo context switches */
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- pfifo->reassign(dev, false);
-
- /* Unload the context if it's the currently active one */
- if (pfifo->channel_id(dev) == chan->id) {
- pfifo->disable(dev);
- pfifo->unload_context(dev);
- pfifo->enable(dev);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+
+ /* if this channel is active, replace it with a null context */
+ chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
+ if (chid == chan->id) {
+ nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
+ nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
+
+ do {
+ u32 mask = ((1ULL << c->bits) - 1) << c->regs;
+ nv_mask(dev, c->regp, mask, 0x00000000);
+ } while ((++c)->bits);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
}
- /* Keep it from being rescheduled */
+ /* restore normal operation, after disabling dma mode */
nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
-
- pfifo->reassign(dev, true);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- /* Free the channel resources */
+ /* clean up */
+ nouveau_gpuobj_ref(NULL, &fctx->ramfc);
+ nouveau_gpuobj_ref(NULL, &chan->ramfc); /*XXX: nv40 */
if (chan->user) {
iounmap(chan->user);
chan->user = NULL;
}
- nouveau_gpuobj_ref(NULL, &chan->ramfc);
-}
-
-static void
-nv04_fifo_do_load_context(struct drm_device *dev, int chid)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t fc = NV04_RAMFC(chid), tmp;
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
- tmp = nv_ri32(dev, fc + 8);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16));
- nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
-}
-
-int
-nv04_fifo_load_context(struct nouveau_channel *chan)
-{
- uint32_t tmp;
-
- nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1,
- NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
- nv04_fifo_do_load_context(chan->dev, chan->id);
- nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
-
- /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
- tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
- nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
-
- return 0;
}
int
-nv04_fifo_unload_context(struct drm_device *dev)
+nv04_fifo_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nouveau_channel *chan = NULL;
- uint32_t tmp;
- int chid;
-
- chid = pfifo->channel_id(dev);
- if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
- return 0;
-
- chan = dev_priv->channels.ptr[chid];
- if (!chan) {
- NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
- return -EINVAL;
- }
-
- RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
- RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
- tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE);
- RAMFC_WR(DMA_INSTANCE, tmp);
- RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
- RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
- RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
- RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
-
- nv04_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
- return 0;
-}
+ struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
-static void
-nv04_fifo_init_reset(struct drm_device *dev)
-{
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
-
- nv_wr32(dev, 0x003224, 0x000f0078);
- nv_wr32(dev, 0x002044, 0x0101ffff);
- nv_wr32(dev, 0x002040, 0x000000ff);
- nv_wr32(dev, 0x002500, 0x00000000);
- nv_wr32(dev, 0x003000, 0x00000000);
- nv_wr32(dev, 0x003050, 0x00000000);
- nv_wr32(dev, 0x003200, 0x00000000);
- nv_wr32(dev, 0x003250, 0x00000000);
- nv_wr32(dev, 0x003220, 0x00000000);
-
- nv_wr32(dev, 0x003250, 0x00000000);
- nv_wr32(dev, 0x003270, 0x00000000);
- nv_wr32(dev, 0x003210, 0x00000000);
-}
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
-static void
-nv04_fifo_init_ramxx(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((dev_priv->ramht->bits - 9) << 16) |
(dev_priv->ramht->gpuobj->pinst >> 8));
nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
-}
-static void
-nv04_fifo_init_intr(struct drm_device *dev)
-{
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+
+ nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
+
+ for (i = 0; i < priv->base.channels; i++) {
+ if (dev_priv->channels.ptr[i])
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
+ }
+
+ return 0;
}
int
-nv04_fifo_init(struct drm_device *dev)
+nv04_fifo_fini(struct drm_device *dev, int engine, bool suspend)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- int i;
-
- nv04_fifo_init_reset(dev);
- nv04_fifo_init_ramxx(dev);
-
- nv04_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+ struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+ struct nouveau_channel *chan;
+ int chid;
- nv04_fifo_init_intr(dev);
- pfifo->enable(dev);
- pfifo->reassign(dev, true);
+ /* prevent context switches and halt fifo operation */
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 0);
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->channels.ptr[i]) {
- uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
- nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
- }
+ /* store current fifo context in ramfc */
+ chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
+ chan = dev_priv->channels.ptr[chid];
+ if (suspend && chid != priv->base.channels && chan) {
+ struct nv04_fifo_chan *fctx = chan->engctx[engine];
+ struct nouveau_gpuobj *ctx = fctx->ramfc;
+ struct ramfc_desc *c = priv->ramfc_desc;
+ do {
+ u32 rm = ((1ULL << c->bits) - 1) << c->regs;
+ u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
+ u32 rv = (nv_rd32(dev, c->regp) & rm) >> c->regs;
+ u32 cv = (nv_ro32(ctx, c->ctxp) & ~cm);
+ nv_wo32(ctx, c->ctxp, cv | (rv << c->ctxs));
+ } while ((++c)->bits);
}
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0x00000000);
return 0;
}
-void
-nv04_fifo_fini(struct drm_device *dev)
-{
- nv_wr32(dev, 0x2140, 0x00000000);
- nouveau_irq_unregister(dev, 8);
-}
-
static bool
nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = NULL;
struct nouveau_gpuobj *obj;
@@ -346,7 +275,7 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
u32 engine;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
+ if (likely(chid >= 0 && chid < pfifo->channels))
chan = dev_priv->channels.ptr[chid];
if (unlikely(!chan))
goto out;
@@ -357,7 +286,6 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
break;
- chan->sw_subchannel[subc] = obj->class;
engine = 0x0000000f << (subc * 4);
nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
@@ -368,7 +296,7 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
break;
- if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
+ if (!nouveau_gpuobj_mthd_call(chan, nouveau_software_class(dev),
mthd, data))
handled = true;
break;
@@ -391,8 +319,8 @@ static const char *nv_dma_state_err(u32 state)
void
nv04_fifo_isr(struct drm_device *dev)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
uint32_t status, reassign;
int cnt = 0;
@@ -402,7 +330,7 @@ nv04_fifo_isr(struct drm_device *dev)
nv_wr32(dev, NV03_PFIFO_CACHES, 0);
- chid = engine->fifo.channel_id(dev);
+ chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & pfifo->channels;
get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
if (status & NV_PFIFO_INTR_CACHE_ERROR) {
@@ -541,3 +469,38 @@ nv04_fifo_isr(struct drm_device *dev)
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
}
+
+void
+nv04_fifo_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+
+ nouveau_irq_unregister(dev, 8);
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nv04_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fifo_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nv04_fifo_destroy;
+ priv->base.base.init = nv04_fifo_init;
+ priv->base.base.fini = nv04_fifo_fini;
+ priv->base.base.context_new = nv04_fifo_context_new;
+ priv->base.base.context_del = nv04_fifo_context_del;
+ priv->base.channels = 15;
+ priv->ramfc_desc = nv04_ramfc;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index dbdea8ed3925..72f1a62903b3 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -356,12 +356,12 @@ static struct nouveau_channel *
nv04_graph_channel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chid = dev_priv->engine.fifo.channels;
+ int chid = 15;
if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
- if (chid >= dev_priv->engine.fifo.channels)
+ if (chid > 15)
return NULL;
return dev_priv->channels.ptr[chid];
@@ -404,7 +404,6 @@ nv04_graph_load_context(struct nouveau_channel *chan)
static int
nv04_graph_unload_context(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = NULL;
struct graph_state *ctx;
uint32_t tmp;
@@ -420,7 +419,7 @@ nv04_graph_unload_context(struct drm_device *dev)
nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
+ tmp |= 15 << 24;
nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
return 0;
}
@@ -495,7 +494,6 @@ nv04_graph_object_new(struct nouveau_channel *chan, int engine,
static int
nv04_graph_init(struct drm_device *dev, int engine)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -527,7 +525,7 @@ nv04_graph_init(struct drm_device *dev, int engine)
nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
+ tmp |= 15 << 24;
nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
/* These don't belong here, they're part of a per-channel context */
@@ -550,28 +548,6 @@ nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
return 0;
}
-static int
-nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- atomic_set(&chan->fence.last_sequence_irq, data);
- return 0;
-}
-
-int
-nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- struct drm_device *dev = chan->dev;
- struct nouveau_page_flip_state s;
-
- if (!nouveau_finish_page_flip(chan, &s))
- nv_set_crtc_base(dev, s.crtc,
- s.offset + s.y * s.pitch + s.x * s.bpp / 8);
-
- return 0;
-}
-
/*
* Software methods, why they are needed, and how they all work:
*
@@ -1020,7 +996,8 @@ nv04_graph_context_switch(struct drm_device *dev)
nv04_graph_unload_context(dev);
/* Load context for next channel */
- chid = dev_priv->engine.fifo.channel_id(dev);
+ chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
+ NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
chan = dev_priv->channels.ptr[chid];
if (chan)
nv04_graph_load_context(chan);
@@ -1345,9 +1322,5 @@ nv04_graph_create(struct drm_device *dev)
NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
- /* nvsw */
- NVOBJ_CLASS(dev, 0x506e, SW);
- NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref);
- NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index c1248e0740a3..ef7a934a499a 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -1,6 +1,8 @@
#include "drmP.h"
#include "drm.h"
+
#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
/* returns the size of fifo context */
@@ -10,12 +12,15 @@ nouveau_fifo_ctx_size(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (dev_priv->chipset >= 0x40)
- return 128;
+ return 128 * 32;
else
if (dev_priv->chipset >= 0x17)
- return 64;
+ return 64 * 32;
+ else
+ if (dev_priv->chipset >= 0x10)
+ return 32 * 32;
- return 32;
+ return 32 * 16;
}
int nv04_instmem_init(struct drm_device *dev)
@@ -39,14 +44,10 @@ int nv04_instmem_init(struct drm_device *dev)
else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
else rsvd = 0x4a40 * vs;
rsvd += 16 * 1024;
- rsvd *= dev_priv->engine.fifo.channels;
-
- /* pciegart table */
- if (pci_is_pcie(dev->pdev))
- rsvd += 512 * 1024;
+ rsvd *= 32; /* per-channel */
- /* object storage */
- rsvd += 512 * 1024;
+ rsvd += 512 * 1024; /* pci(e)gart table */
+ rsvd += 512 * 1024; /* object storage */
dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
} else {
@@ -71,7 +72,7 @@ int nv04_instmem_init(struct drm_device *dev)
return ret;
/* And RAMFC */
- length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev);
+ length = nouveau_fifo_ctx_size(dev);
switch (dev_priv->card_type) {
case NV_40:
offset = 0x20000;
diff --git a/drivers/gpu/drm/nouveau/nv04_software.c b/drivers/gpu/drm/nouveau/nv04_software.c
new file mode 100644
index 000000000000..0c41abf48774
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_software.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+#include "nouveau_software.h"
+#include "nouveau_hw.h"
+
+struct nv04_software_priv {
+ struct nouveau_software_priv base;
+};
+
+struct nv04_software_chan {
+ struct nouveau_software_chan base;
+};
+
+static int
+mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+
+ struct nouveau_page_flip_state state;
+
+ if (!nouveau_finish_page_flip(chan, &state)) {
+ nv_set_crtc_base(chan->dev, state.crtc, state.offset +
+ state.y * state.pitch +
+ state.x * state.bpp / 8);
+ }
+
+ return 0;
+}
+
+static int
+nv04_software_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv04_software_chan *pch;
+
+ pch = kzalloc(sizeof(*pch), GFP_KERNEL);
+ if (!pch)
+ return -ENOMEM;
+
+ nouveau_software_context_new(&pch->base);
+ chan->engctx[engine] = pch;
+ return 0;
+}
+
+static void
+nv04_software_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv04_software_chan *pch = chan->engctx[engine];
+ chan->engctx[engine] = NULL;
+ kfree(pch);
+}
+
+static int
+nv04_software_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
+ if (ret)
+ return ret;
+ obj->engine = 0;
+ obj->class = class;
+
+ ret = nouveau_ramht_insert(chan, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
+}
+
+static int
+nv04_software_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static int
+nv04_software_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static void
+nv04_software_destroy(struct drm_device *dev, int engine)
+{
+ struct nv04_software_priv *psw = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, SW);
+ kfree(psw);
+}
+
+int
+nv04_software_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_software_priv *psw;
+
+ psw = kzalloc(sizeof(*psw), GFP_KERNEL);
+ if (!psw)
+ return -ENOMEM;
+
+ psw->base.base.destroy = nv04_software_destroy;
+ psw->base.base.init = nv04_software_init;
+ psw->base.base.fini = nv04_software_fini;
+ psw->base.base.context_new = nv04_software_context_new;
+ psw->base.base.context_del = nv04_software_context_del;
+ psw->base.base.object_new = nv04_software_object_new;
+ nouveau_software_create(&psw->base);
+
+ NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
+ if (dev_priv->card_type <= NV_04) {
+ NVOBJ_CLASS(dev, 0x006e, SW);
+ NVOBJ_MTHD (dev, 0x006e, 0x0150, nv04_fence_mthd);
+ NVOBJ_MTHD (dev, 0x006e, 0x0500, mthd_flip);
+ } else {
+ NVOBJ_CLASS(dev, 0x016e, SW);
+ NVOBJ_MTHD (dev, 0x016e, 0x0500, mthd_flip);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
new file mode 100644
index 000000000000..8a1b75009185
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+
+struct nv10_fence_chan {
+ struct nouveau_fence_chan base;
+};
+
+struct nv10_fence_priv {
+ struct nouveau_fence_priv base;
+ struct nouveau_bo *bo;
+ spinlock_t lock;
+ u32 sequence;
+};
+
+static int
+nv10_fence_emit(struct nouveau_fence *fence)
+{
+ struct nouveau_channel *chan = fence->channel;
+ int ret = RING_SPACE(chan, 2);
+ if (ret == 0) {
+ BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
+ OUT_RING (chan, fence->sequence);
+ FIRE_RING (chan);
+ }
+ return ret;
+}
+
+
+static int
+nv10_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ return -ENODEV;
+}
+
+static int
+nv17_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ struct nv10_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
+ u32 value;
+ int ret;
+
+ if (!mutex_trylock(&prev->mutex))
+ return -EBUSY;
+
+ spin_lock(&priv->lock);
+ value = priv->sequence;
+ priv->sequence += 2;
+ spin_unlock(&priv->lock);
+
+ ret = RING_SPACE(prev, 5);
+ if (!ret) {
+ BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
+ OUT_RING (prev, NvSema);
+ OUT_RING (prev, 0);
+ OUT_RING (prev, value + 0);
+ OUT_RING (prev, value + 1);
+ FIRE_RING (prev);
+ }
+
+ if (!ret && !(ret = RING_SPACE(chan, 5))) {
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
+ OUT_RING (chan, NvSema);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, value + 1);
+ OUT_RING (chan, value + 2);
+ FIRE_RING (chan);
+ }
+
+ mutex_unlock(&prev->mutex);
+ return 0;
+}
+
+static u32
+nv10_fence_read(struct nouveau_channel *chan)
+{
+ return nvchan_rd32(chan, 0x0048);
+}
+
+static void
+nv10_fence_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv10_fence_chan *fctx = chan->engctx[engine];
+ nouveau_fence_context_del(&fctx->base);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nv10_fence_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv10_fence_priv *priv = nv_engine(chan->dev, engine);
+ struct nv10_fence_chan *fctx;
+ struct nouveau_gpuobj *obj;
+ int ret = 0;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ nouveau_fence_context_new(&fctx->base);
+
+ if (priv->bo) {
+ struct ttm_mem_reg *mem = &priv->bo->bo.mem;
+
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
+ mem->start * PAGE_SIZE, mem->size,
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &obj);
+ if (!ret) {
+ ret = nouveau_ramht_insert(chan, NvSema, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ }
+ }
+
+ if (ret)
+ nv10_fence_context_del(chan, engine);
+ return ret;
+}
+
+static int
+nv10_fence_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static int
+nv10_fence_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static void
+nv10_fence_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv10_fence_priv *priv = nv_engine(dev, engine);
+
+ nouveau_bo_ref(NULL, &priv->bo);
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nv10_fence_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv10_fence_priv *priv;
+ int ret = 0;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.engine.destroy = nv10_fence_destroy;
+ priv->base.engine.init = nv10_fence_init;
+ priv->base.engine.fini = nv10_fence_fini;
+ priv->base.engine.context_new = nv10_fence_context_new;
+ priv->base.engine.context_del = nv10_fence_context_del;
+ priv->base.emit = nv10_fence_emit;
+ priv->base.read = nv10_fence_read;
+ priv->base.sync = nv10_fence_sync;
+ dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
+ spin_lock_init(&priv->lock);
+
+ if (dev_priv->chipset >= 0x17) {
+ ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &priv->bo);
+ if (!ret) {
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_ref(NULL, &priv->bo);
+ }
+
+ if (ret == 0) {
+ nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
+ priv->base.sync = nv17_fence_sync;
+ }
+ }
+
+ if (ret)
+ nv10_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index d2ecbff4bee1..f1fe7d758241 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Ben Skeggs.
+ * Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
@@ -27,220 +27,112 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
+#include "nouveau_util.h"
#include "nouveau_ramht.h"
-#define NV10_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV10_RAMFC__SIZE))
-#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
-
-int
-nv10_fifo_channel_id(struct drm_device *dev)
-{
- return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
- NV10_PFIFO_CACHE1_PUSH1_CHID_MASK;
-}
-
-int
-nv10_fifo_create_context(struct nouveau_channel *chan)
+static struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+} nv10_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
+ {}
+};
+
+struct nv10_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct ramfc_desc *ramfc_desc;
+};
+
+struct nv10_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+};
+
+static int
+nv10_fifo_context_new(struct nouveau_channel *chan, int engine)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
- uint32_t fc = NV10_RAMFC(chan->id);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv10_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv10_fifo_chan *fctx;
+ unsigned long flags;
int ret;
- ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
- NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
- if (ret)
- return ret;
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+ /* map channel control registers */
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV03_USER(chan->id), PAGE_SIZE);
- if (!chan->user)
- return -ENOMEM;
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
- /* Fill entries that are seen filled in dumps of nvidia driver just
- * after channel's is put into DMA mode
- */
- nv_wi32(dev, fc + 0, chan->pushbuf_base);
- nv_wi32(dev, fc + 4, chan->pushbuf_base);
- nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4);
- nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+ /* initialise default fifo context */
+ ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
+ chan->id * 32, ~0, 32,
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x08, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
+ nv_wo32(fctx->ramfc, 0x10, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
- NV_PFIFO_CACHE1_BIG_ENDIAN |
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
- 0);
-
- /* enable the fifo dma operation */
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
- return 0;
-}
-
-static void
-nv10_fifo_do_load_context(struct drm_device *dev, int chid)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t fc = NV10_RAMFC(chid), tmp;
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
- nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
-
- tmp = nv_ri32(dev, fc + 12);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 16));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 20));
- nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 24));
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 28));
-
- if (dev_priv->chipset < 0x17)
- goto out;
-
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 32));
- tmp = nv_ri32(dev, fc + 36);
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 40));
- nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 44));
- nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
-
-out:
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
-}
-
-int
-nv10_fifo_load_context(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
- uint32_t tmp;
-
- nv10_fifo_do_load_context(dev, chan->id);
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nv_wo32(fctx->ramfc, 0x18, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
- NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
+ /* enable dma mode on the channel */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
-
- return 0;
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
}
int
-nv10_fifo_unload_context(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- uint32_t fc, tmp;
- int chid;
-
- chid = pfifo->channel_id(dev);
- if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
- return 0;
- fc = NV10_RAMFC(chid);
-
- nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
- nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
- nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF;
- tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16);
- nv_wi32(dev, fc + 12, tmp);
- nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
- nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
- nv_wi32(dev, fc + 24, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
- nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
-
- if (dev_priv->chipset < 0x17)
- goto out;
-
- nv_wi32(dev, fc + 32, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
- tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
- nv_wi32(dev, fc + 36, tmp);
- nv_wi32(dev, fc + 40, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
- nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
- nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
-
-out:
- nv10_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
- return 0;
-}
-
-static void
-nv10_fifo_init_reset(struct drm_device *dev)
-{
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
-
- nv_wr32(dev, 0x003224, 0x000f0078);
- nv_wr32(dev, 0x002044, 0x0101ffff);
- nv_wr32(dev, 0x002040, 0x000000ff);
- nv_wr32(dev, 0x002500, 0x00000000);
- nv_wr32(dev, 0x003000, 0x00000000);
- nv_wr32(dev, 0x003050, 0x00000000);
-
- nv_wr32(dev, 0x003258, 0x00000000);
- nv_wr32(dev, 0x003210, 0x00000000);
- nv_wr32(dev, 0x003270, 0x00000000);
-}
-
-static void
-nv10_fifo_init_ramxx(struct drm_device *dev)
+nv10_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv10_fifo_priv *priv;
- nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((dev_priv->ramht->bits - 9) << 16) |
- (dev_priv->ramht->gpuobj->pinst >> 8));
- nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- if (dev_priv->chipset < 0x17) {
- nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
- } else {
- nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc->pinst >> 8) |
- (1 << 16) /* 64 Bytes entry*/);
- /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
- }
-}
+ priv->base.base.destroy = nv04_fifo_destroy;
+ priv->base.base.init = nv04_fifo_init;
+ priv->base.base.fini = nv04_fifo_fini;
+ priv->base.base.context_new = nv10_fifo_context_new;
+ priv->base.base.context_del = nv04_fifo_context_del;
+ priv->base.channels = 31;
+ priv->ramfc_desc = nv10_ramfc;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-static void
-nv10_fifo_init_intr(struct drm_device *dev)
-{
nouveau_irq_register(dev, 8, nv04_fifo_isr);
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xffffffff);
-}
-
-int
-nv10_fifo_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- int i;
-
- nv10_fifo_init_reset(dev);
- nv10_fifo_init_ramxx(dev);
-
- nv10_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
-
- nv10_fifo_init_intr(dev);
- pfifo->enable(dev);
- pfifo->reassign(dev, true);
-
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->channels.ptr[i]) {
- uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
- nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
- }
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 7255e4a4d3f3..fb1d88a951de 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -759,7 +759,6 @@ static int
nv10_graph_unload_context(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan;
struct graph_state *ctx;
uint32_t tmp;
@@ -782,7 +781,7 @@ nv10_graph_unload_context(struct drm_device *dev)
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= (pfifo->channels - 1) << 24;
+ tmp |= 31 << 24;
nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
return 0;
}
@@ -822,12 +821,12 @@ struct nouveau_channel *
nv10_graph_channel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chid = dev_priv->engine.fifo.channels;
+ int chid = 31;
if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
- if (chid >= dev_priv->engine.fifo.channels)
+ if (chid >= 31)
return NULL;
return dev_priv->channels.ptr[chid];
@@ -948,7 +947,7 @@ nv10_graph_init(struct drm_device *dev, int engine)
nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF);
tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
+ tmp |= 31 << 24;
nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
@@ -1153,10 +1152,6 @@ nv10_graph_create(struct drm_device *dev)
NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
nouveau_irq_register(dev, 12, nv10_graph_isr);
- /* nvsw */
- NVOBJ_CLASS(dev, 0x506e, SW);
- NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
-
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
diff --git a/drivers/gpu/drm/nouveau/nv17_fifo.c b/drivers/gpu/drm/nouveau/nv17_fifo.c
new file mode 100644
index 000000000000..d9e482e4abee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_fifo.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2012 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
+#include "nouveau_util.h"
+#include "nouveau_ramht.h"
+
+static struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+} nv17_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
+ { 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+ { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+ { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+ { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
+ { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+ {}
+};
+
+struct nv17_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct ramfc_desc *ramfc_desc;
+};
+
+struct nv17_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+};
+
+static int
+nv17_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv17_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv17_fifo_chan *fctx;
+ unsigned long flags;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ /* map channel control registers */
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV03_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* initialise default fifo context */
+ ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
+ chan->id * 64, ~0, 64,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
+ nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+
+ /* enable dma mode on the channel */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static int
+nv17_fifo_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv17_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
+
+ nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+ nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((dev_priv->ramht->bits - 9) << 16) |
+ (dev_priv->ramht->gpuobj->pinst >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
+ nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 |
+ dev_priv->ramfc->pinst >> 8);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+
+ nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
+
+ for (i = 0; i < priv->base.channels; i++) {
+ if (dev_priv->channels.ptr[i])
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
+ }
+
+ return 0;
+}
+
+int
+nv17_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv17_fifo_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nv04_fifo_destroy;
+ priv->base.base.init = nv17_fifo_init;
+ priv->base.base.fini = nv04_fifo_fini;
+ priv->base.base.context_new = nv17_fifo_context_new;
+ priv->base.base.context_del = nv04_fifo_context_del;
+ priv->base.channels = 31;
+ priv->ramfc_desc = nv17_ramfc;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 183e37512ef9..e34ea30758f6 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -43,8 +43,6 @@ struct nv20_graph_engine {
int
nv20_graph_unload_context(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan;
struct nouveau_gpuobj *grctx;
u32 tmp;
@@ -62,7 +60,7 @@ nv20_graph_unload_context(struct drm_device *dev)
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= (pfifo->channels - 1) << 24;
+ tmp |= 31 << 24;
nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
return 0;
}
@@ -796,10 +794,6 @@ nv20_graph_create(struct drm_device *dev)
NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
nouveau_irq_register(dev, 12, nv20_graph_isr);
- /* nvsw */
- NVOBJ_CLASS(dev, 0x506e, SW);
- NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
-
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
diff --git a/drivers/gpu/drm/nouveau/nv31_mpeg.c b/drivers/gpu/drm/nouveau/nv31_mpeg.c
index 6f06a0713f00..5f239bf658c4 100644
--- a/drivers/gpu/drm/nouveau/nv31_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv31_mpeg.c
@@ -24,6 +24,7 @@
#include "drmP.h"
#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
struct nv31_mpeg_engine {
@@ -208,6 +209,7 @@ nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
static int
nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ctx;
unsigned long flags;
@@ -218,7 +220,7 @@ nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
return 0;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ for (i = 0; i < pfifo->channels; i++) {
if (!dev_priv->channels.ptr[i])
continue;
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index 68cb2d991c88..cdc818479b0a 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Ben Skeggs.
+ * Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
@@ -25,215 +25,123 @@
*/
#include "drmP.h"
+#include "drm.h"
#include "nouveau_drv.h"
-#include "nouveau_drm.h"
+#include "nouveau_fifo.h"
+#include "nouveau_util.h"
#include "nouveau_ramht.h"
-#define NV40_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV40_RAMFC__SIZE))
-#define NV40_RAMFC__SIZE 128
-
-int
-nv40_fifo_create_context(struct nouveau_channel *chan)
+static struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+} nv40_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 2, 28, 0x18, 28, 0x002058 },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
+ { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+ { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+ { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+ { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
+ { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+ { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
+ { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
+ { 32, 0, 0x40, 0, 0x0032e4 },
+ { 32, 0, 0x44, 0, 0x0032e8 },
+ { 32, 0, 0x4c, 0, 0x002088 },
+ { 32, 0, 0x50, 0, 0x003300 },
+ { 32, 0, 0x54, 0, 0x00330c },
+ {}
+};
+
+struct nv40_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct ramfc_desc *ramfc_desc;
+};
+
+struct nv40_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+};
+
+static int
+nv40_fifo_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t fc = NV40_RAMFC(chan->id);
+ struct nv40_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv40_fifo_chan *fctx;
unsigned long flags;
int ret;
- ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
- NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
- if (ret)
- return ret;
-
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV40_USER(chan->id), PAGE_SIZE);
- if (!chan->user)
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
return -ENOMEM;
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ /* map channel control registers */
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV03_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
- nv_wi32(dev, fc + 0, chan->pushbuf_base);
- nv_wi32(dev, fc + 4, chan->pushbuf_base);
- nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4);
- nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+ /* initialise default fifo context */
+ ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
+ chan->id * 128, ~0, 128,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
+ nv_wo32(fctx->ramfc, 0x18, 0x30000000 |
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
- NV_PFIFO_CACHE1_BIG_ENDIAN |
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
- 0x30000000 /* no idea.. */);
- nv_wi32(dev, fc + 60, 0x0001FFFF);
-
- /* enable the fifo dma operation */
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nv_wo32(fctx->ramfc, 0x3c, 0x0001ffff);
+ /* enable dma mode on the channel */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- return 0;
-}
-
-static void
-nv40_fifo_do_load_context(struct drm_device *dev, int chid)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
- nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20));
-
- /* No idea what 0x2058 is.. */
- tmp = nv_ri32(dev, fc + 24);
- tmp2 = nv_rd32(dev, 0x2058) & 0xFFF;
- tmp2 |= (tmp & 0x30000000);
- nv_wr32(dev, 0x2058, tmp2);
- tmp &= ~0x30000000;
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp);
- nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28));
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32));
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36));
- tmp = nv_ri32(dev, fc + 40);
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44));
- nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48));
- nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52));
- nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56));
+ /*XXX: remove this later, need fifo engine context commit hook */
+ nouveau_gpuobj_ref(fctx->ramfc, &chan->ramfc);
- /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */
- tmp = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF;
- tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF;
- nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp);
-
- nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64));
- /* NVIDIA does this next line twice... */
- nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68));
- nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
- nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
- nv_wr32(dev, 0x330c, nv_ri32(dev, fc + 84));
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
-}
-
-int
-nv40_fifo_load_context(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
- uint32_t tmp;
-
- nv40_fifo_do_load_context(dev, chan->id);
-
- /* Set channel active, and in DMA mode */
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
- NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
-
- /* Reset DMA_CTL_AT_INFO to INVALID */
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
-
- return 0;
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
}
-int
-nv40_fifo_unload_context(struct drm_device *dev)
+static int
+nv40_fifo_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- uint32_t fc, tmp;
- int chid;
-
- chid = pfifo->channel_id(dev);
- if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
- return 0;
- fc = NV40_RAMFC(chid);
-
- nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
- nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
- nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
- nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE));
- nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT));
- nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH);
- tmp |= nv_rd32(dev, 0x2058) & 0x30000000;
- nv_wi32(dev, fc + 24, tmp);
- nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
- nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
- nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
- tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
- nv_wi32(dev, fc + 40, tmp);
- nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
- nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
- /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
- * more involved depending on the value of 0x3228?
- */
- nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
- nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE));
- nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff);
- /* No idea what the below is for exactly, ripped from a mmio-trace */
- nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4));
- /* NVIDIA do this next line twice.. bug? */
- nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8));
- nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088));
- nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300));
-#if 0 /* no real idea which is PUT/GET in UNK_48.. */
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_GET);
- tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
- nv_wi32(dev, fc + 72, tmp);
-#endif
- nv_wi32(dev, fc + 84, nv_rd32(dev, 0x330c));
-
- nv40_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
- NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1));
- return 0;
-}
-
-static void
-nv40_fifo_init_reset(struct drm_device *dev)
-{
+ struct nv40_fifo_priv *priv = nv_engine(dev, engine);
int i;
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
- nv_wr32(dev, 0x003224, 0x000f0078);
- nv_wr32(dev, 0x003210, 0x00000000);
- nv_wr32(dev, 0x003270, 0x00000000);
- nv_wr32(dev, 0x003240, 0x00000000);
- nv_wr32(dev, 0x003244, 0x00000000);
- nv_wr32(dev, 0x003258, 0x00000000);
- nv_wr32(dev, 0x002504, 0x00000000);
- for (i = 0; i < 16; i++)
- nv_wr32(dev, 0x002510 + (i * 4), 0x00000000);
- nv_wr32(dev, 0x00250c, 0x0000ffff);
- nv_wr32(dev, 0x002048, 0x00000000);
- nv_wr32(dev, 0x003228, 0x00000000);
- nv_wr32(dev, 0x0032e8, 0x00000000);
- nv_wr32(dev, 0x002410, 0x00000000);
- nv_wr32(dev, 0x002420, 0x00000000);
- nv_wr32(dev, 0x002058, 0x00000001);
- nv_wr32(dev, 0x00221c, 0x00000000);
- /* something with 0x2084, read/modify/write, no change */
nv_wr32(dev, 0x002040, 0x000000ff);
- nv_wr32(dev, 0x002500, 0x00000000);
- nv_wr32(dev, 0x003200, 0x00000000);
-
- nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
-}
-
-static void
-nv40_fifo_init_ramxx(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nv_wr32(dev, 0x002044, 0x2101ffff);
+ nv_wr32(dev, 0x002058, 0x00000001);
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((dev_priv->ramht->bits - 9) << 16) |
@@ -244,64 +152,59 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
case 0x47:
case 0x49:
case 0x4b:
- nv_wr32(dev, 0x2230, 1);
- break;
- default:
- break;
- }
-
- switch (dev_priv->chipset) {
+ nv_wr32(dev, 0x002230, 0x00000001);
case 0x40:
case 0x41:
case 0x42:
case 0x43:
case 0x45:
- case 0x47:
case 0x48:
- case 0x49:
- case 0x4b:
- nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002);
+ nv_wr32(dev, 0x002220, 0x00030002);
break;
default:
- nv_wr32(dev, 0x2230, 0);
- nv_wr32(dev, NV40_PFIFO_RAMFC,
- ((dev_priv->vram_size - 512 * 1024 +
- dev_priv->ramfc->pinst) >> 16) | (3 << 16));
+ nv_wr32(dev, 0x002230, 0x00000000);
+ nv_wr32(dev, 0x002220, ((dev_priv->vram_size - 512 * 1024 +
+ dev_priv->ramfc->pinst) >> 16) |
+ 0x00030000);
break;
}
-}
-static void
-nv40_fifo_init_intr(struct drm_device *dev)
-{
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+
+ nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
+
+ for (i = 0; i < priv->base.channels; i++) {
+ if (dev_priv->channels.ptr[i])
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
+ }
+
+ return 0;
}
int
-nv40_fifo_init(struct drm_device *dev)
+nv40_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- int i;
-
- nv40_fifo_init_reset(dev);
- nv40_fifo_init_ramxx(dev);
+ struct nv40_fifo_priv *priv;
- nv40_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
-
- nv40_fifo_init_intr(dev);
- pfifo->enable(dev);
- pfifo->reassign(dev, true);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->channels.ptr[i]) {
- uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
- nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
- }
- }
+ priv->base.base.destroy = nv04_fifo_destroy;
+ priv->base.base.init = nv40_fifo_init;
+ priv->base.base.fini = nv04_fifo_fini;
+ priv->base.base.context_new = nv40_fifo_context_new;
+ priv->base.base.context_del = nv04_fifo_context_del;
+ priv->base.channels = 31;
+ priv->ramfc_desc = nv40_ramfc;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index ba14a93d8afa..aa9e2df64a26 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -27,7 +27,7 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
-#include "nouveau_grctx.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
struct nv40_graph_engine {
@@ -42,7 +42,6 @@ nv40_graph_context_new(struct nouveau_channel *chan, int engine)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *grctx = NULL;
- struct nouveau_grctx ctx = {};
unsigned long flags;
int ret;
@@ -52,11 +51,7 @@ nv40_graph_context_new(struct nouveau_channel *chan, int engine)
return ret;
/* Initialise default context values */
- ctx.dev = chan->dev;
- ctx.mode = NOUVEAU_GRCTX_VALS;
- ctx.data = grctx;
- nv40_grctx_init(&ctx);
-
+ nv40_grctx_fill(dev, grctx);
nv_wo32(grctx, 0, grctx->vinst);
/* init grctx pointer in ramfc, and on PFIFO if channel is
@@ -184,8 +179,7 @@ nv40_graph_init(struct drm_device *dev, int engine)
struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct nouveau_grctx ctx = {};
- uint32_t vramsz, *cp;
+ uint32_t vramsz;
int i, j;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -193,22 +187,8 @@ nv40_graph_init(struct drm_device *dev, int engine)
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
- cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
- if (!cp)
- return -ENOMEM;
-
- ctx.dev = dev;
- ctx.mode = NOUVEAU_GRCTX_PROG;
- ctx.data = cp;
- ctx.ctxprog_max = 256;
- nv40_grctx_init(&ctx);
- pgraph->grctx_size = ctx.ctxvals_pos * 4;
-
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
- for (i = 0; i < ctx.ctxprog_len; i++)
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
-
- kfree(cp);
+ /* generate and upload context program */
+ nv40_grctx_init(dev, &pgraph->grctx_size);
/* No context present currently */
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
@@ -366,13 +346,14 @@ nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
static int
nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *grctx;
unsigned long flags;
int i;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ for (i = 0; i < pfifo->channels; i++) {
if (!dev_priv->channels.ptr[i])
continue;
grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
@@ -460,7 +441,6 @@ nv40_graph_create(struct drm_device *dev)
NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
nouveau_irq_register(dev, 12, nv40_graph_isr);
- NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
@@ -483,8 +463,5 @@ nv40_graph_create(struct drm_device *dev)
else
NVOBJ_CLASS(dev, 0x4097, GR);
- /* nvsw */
- NVOBJ_CLASS(dev, 0x506e, SW);
- NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index f70447d131d7..be0a74750fb1 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -595,8 +595,8 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
}
}
-void
-nv40_grctx_init(struct nouveau_grctx *ctx)
+static void
+nv40_grctx_generate(struct nouveau_grctx *ctx)
{
/* decide whether we're loading/unloading the context */
cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
@@ -660,3 +660,31 @@ nv40_grctx_init(struct nouveau_grctx *ctx)
cp_out (ctx, CP_END);
}
+void
+nv40_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
+{
+ nv40_grctx_generate(&(struct nouveau_grctx) {
+ .dev = dev,
+ .mode = NOUVEAU_GRCTX_VALS,
+ .data = mem,
+ });
+}
+
+void
+nv40_grctx_init(struct drm_device *dev, u32 *size)
+{
+ u32 ctxprog[256], i;
+ struct nouveau_grctx ctx = {
+ .dev = dev,
+ .mode = NOUVEAU_GRCTX_PROG,
+ .data = ctxprog,
+ .ctxprog_max = ARRAY_SIZE(ctxprog)
+ };
+
+ nv40_grctx_generate(&ctx);
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < ctx.ctxprog_len; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, ctxprog[i]);
+ *size = ctx.ctxvals_pos * 4;
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index c7615381c5d9..e66273aff493 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -27,6 +27,7 @@
#include "nouveau_bios.h"
#include "nouveau_pm.h"
#include "nouveau_hw.h"
+#include "nouveau_fifo.h"
#define min2(a,b) ((a) < (b) ? (a) : (b))
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 701b927998bf..97a477b3d52d 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -79,15 +79,15 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
NV_ERROR(dev, "no space while blanking crtc\n");
return ret;
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
OUT_RING(evo, 0);
if (dev_priv->chipset != 0x50) {
- BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
+ BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
} else {
if (nv_crtc->cursor.visible)
@@ -100,20 +100,20 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
NV_ERROR(dev, "no space while unblanking crtc\n");
return ret;
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
OUT_RING(evo, nv_crtc->lut.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF :
NV50_EVO_CRTC_CLUT_MODE_ON);
OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
if (dev_priv->chipset != 0x50) {
- BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
+ BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
OUT_RING(evo, NvEvoVRAM);
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
OUT_RING(evo, nv_crtc->fb.offset >> 8);
OUT_RING(evo, 0);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
if (dev_priv->chipset != 0x50)
if (nv_crtc->fb.tile_flags == 0x7a00 ||
nv_crtc->fb.tile_flags == 0xfe00)
@@ -158,10 +158,10 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
if (ret == 0) {
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
OUT_RING (evo, mode);
if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
FIRE_RING (evo);
}
@@ -193,11 +193,11 @@ nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
OUT_RING (evo, (hue << 20) | (vib << 8));
if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
FIRE_RING (evo);
}
@@ -311,9 +311,9 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
if (ret)
return ret;
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
OUT_RING (evo, ctrl);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
OUT_RING (evo, oY << 16 | oX);
OUT_RING (evo, oY << 16 | oX);
@@ -383,23 +383,15 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
static void
nv50_crtc_destroy(struct drm_crtc *crtc)
{
- struct drm_device *dev;
- struct nouveau_crtc *nv_crtc;
-
- if (!crtc)
- return;
-
- dev = crtc->dev;
- nv_crtc = nouveau_crtc(crtc);
-
- NV_DEBUG_KMS(dev, "\n");
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- drm_crtc_cleanup(&nv_crtc->base);
+ NV_DEBUG_KMS(crtc->dev, "\n");
nouveau_bo_unmap(nv_crtc->lut.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ drm_crtc_cleanup(&nv_crtc->base);
kfree(nv_crtc);
}
@@ -593,7 +585,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
if (ret)
return ret;
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
OUT_RING (evo, fb->r_dma);
}
@@ -601,18 +593,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
if (ret)
return ret;
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
OUT_RING (evo, nv_crtc->fb.offset >> 8);
OUT_RING (evo, 0);
OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
OUT_RING (evo, fb->r_pitch);
OUT_RING (evo, fb->r_format);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
OUT_RING (evo, fb->base.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
OUT_RING (evo, (y << 16) | x);
if (nv_crtc->lut.depth != fb->base.depth) {
@@ -672,23 +664,23 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
ret = RING_SPACE(evo, 18);
if (ret == 0) {
- BEGIN_RING(evo, 0, 0x0804 + head, 2);
+ BEGIN_NV04(evo, 0, 0x0804 + head, 2);
OUT_RING (evo, 0x00800000 | mode->clock);
OUT_RING (evo, (ilace == 2) ? 2 : 0);
- BEGIN_RING(evo, 0, 0x0810 + head, 6);
+ BEGIN_NV04(evo, 0, 0x0810 + head, 6);
OUT_RING (evo, 0x00000000); /* border colour */
OUT_RING (evo, (vactive << 16) | hactive);
OUT_RING (evo, ( vsynce << 16) | hsynce);
OUT_RING (evo, (vblanke << 16) | hblanke);
OUT_RING (evo, (vblanks << 16) | hblanks);
OUT_RING (evo, (vblan2e << 16) | vblan2s);
- BEGIN_RING(evo, 0, 0x082c + head, 1);
+ BEGIN_NV04(evo, 0, 0x082c + head, 1);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x0900 + head, 1);
+ BEGIN_NV04(evo, 0, 0x0900 + head, 1);
OUT_RING (evo, 0x00000311); /* makes sync channel work */
- BEGIN_RING(evo, 0, 0x08c8 + head, 1);
+ BEGIN_NV04(evo, 0, 0x08c8 + head, 1);
OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay);
- BEGIN_RING(evo, 0, 0x08d4 + head, 1);
+ BEGIN_NV04(evo, 0, 0x08d4 + head, 1);
OUT_RING (evo, 0x00000000); /* screen position */
}
@@ -755,21 +747,25 @@ nv50_crtc_create(struct drm_device *dev, int index)
if (!nv_crtc)
return -ENOMEM;
+ nv_crtc->index = index;
+ nv_crtc->set_dither = nv50_crtc_set_dither;
+ nv_crtc->set_scale = nv50_crtc_set_scale;
+ nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
nv_crtc->color_vibrance = 50;
nv_crtc->vibrant_hue = 0;
-
- /* Default CLUT parameters, will be activated on the hw upon
- * first mode set.
- */
+ nv_crtc->lut.depth = 0;
for (i = 0; i < 256; i++) {
nv_crtc->lut.r[i] = i << 8;
nv_crtc->lut.g[i] = i << 8;
nv_crtc->lut.b[i] = i << 8;
}
- nv_crtc->lut.depth = 0;
+
+ drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
+ drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
+ drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->lut.nvbo);
+ 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -778,24 +774,12 @@ nv50_crtc_create(struct drm_device *dev, int index)
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
}
- if (ret) {
- kfree(nv_crtc);
- return ret;
- }
-
- nv_crtc->index = index;
+ if (ret)
+ goto out;
- /* set function pointers */
- nv_crtc->set_dither = nv50_crtc_set_dither;
- nv_crtc->set_scale = nv50_crtc_set_scale;
- nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
-
- drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
- drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
- drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -804,6 +788,12 @@ nv50_crtc_create(struct drm_device *dev, int index)
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
}
+ if (ret)
+ goto out;
+
nv50_cursor_init(nv_crtc);
- return 0;
+out:
+ if (ret)
+ nv50_crtc_destroy(&nv_crtc->base);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index adfc9b607a50..af4ec7bf3670 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -53,15 +53,15 @@ nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
}
if (dev_priv->chipset != 0x50) {
- BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
+ BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
OUT_RING(evo, NvEvoVRAM);
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
OUT_RING(evo, nv_crtc->cursor.offset >> 8);
if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING(evo, 0);
FIRE_RING(evo);
nv_crtc->cursor.visible = true;
@@ -86,16 +86,16 @@ nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
NV_ERROR(dev, "no space while hiding cursor\n");
return;
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
OUT_RING(evo, 0);
if (dev_priv->chipset != 0x50) {
- BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
+ BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
}
if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING(evo, 0);
FIRE_RING(evo);
nv_crtc->cursor.visible = false;
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 55c56330be6d..eb216a446b89 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -55,9 +55,9 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
NV_ERROR(dev, "no space while disconnecting DAC\n");
return;
}
- BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
OUT_RING (evo, 0);
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
nv_encoder->crtc = NULL;
@@ -240,7 +240,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
NV_ERROR(dev, "no space while connecting DAC\n");
return;
}
- BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
OUT_RING(evo, mode_ctl);
OUT_RING(evo, mode_ctl2);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 8b78b9cfa383..5c41612723b4 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -32,6 +32,7 @@
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
#include "nouveau_ramht.h"
+#include "nouveau_software.h"
#include "drm_crtc_helper.h"
static void nv50_display_isr(struct drm_device *);
@@ -140,11 +141,11 @@ nv50_display_sync(struct drm_device *dev)
ret = RING_SPACE(evo, 6);
if (ret == 0) {
- BEGIN_RING(evo, 0, 0x0084, 1);
+ BEGIN_NV04(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x80000000);
- BEGIN_RING(evo, 0, 0x0080, 1);
+ BEGIN_NV04(evo, 0, 0x0080, 1);
OUT_RING (evo, 0);
- BEGIN_RING(evo, 0, 0x0084, 1);
+ BEGIN_NV04(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000000);
nv_wo32(disp->ntfy, 0x000, 0x00000000);
@@ -267,7 +268,7 @@ nv50_display_init(struct drm_device *dev)
ret = RING_SPACE(evo, 3);
if (ret)
return ret;
- BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_UNK84, 2);
OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
OUT_RING (evo, NvEvoSync);
@@ -292,7 +293,7 @@ nv50_display_fini(struct drm_device *dev)
ret = RING_SPACE(evo, 2);
if (ret == 0) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING(evo, 0);
}
FIRE_RING(evo);
@@ -358,8 +359,11 @@ nv50_display_create(struct drm_device *dev)
dev_priv->engine.display.priv = priv;
/* Create CRTC objects */
- for (i = 0; i < 2; i++)
- nv50_crtc_create(dev, i);
+ for (i = 0; i < 2; i++) {
+ ret = nv50_crtc_create(dev, i);
+ if (ret)
+ return ret;
+ }
/* We setup the encoders from the BIOS table */
for (i = 0 ; i < dcb->entries; i++) {
@@ -438,13 +442,13 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
return;
}
- BEGIN_RING(evo, 0, 0x0084, 1);
+ BEGIN_NV04(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x0094, 1);
+ BEGIN_NV04(evo, 0, 0x0094, 1);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x00c0, 1);
+ BEGIN_NV04(evo, 0, 0x00c0, 1);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x0080, 1);
+ BEGIN_NV04(evo, 0, 0x0080, 1);
OUT_RING (evo, 0x00000000);
FIRE_RING (evo);
}
@@ -474,28 +478,28 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
}
if (dev_priv->chipset < 0xc0) {
- BEGIN_RING(chan, 0, 0x0060, 2);
+ BEGIN_NV04(chan, 0, 0x0060, 2);
OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
OUT_RING (chan, dispc->sem.offset);
- BEGIN_RING(chan, 0, 0x006c, 1);
+ BEGIN_NV04(chan, 0, 0x006c, 1);
OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
- BEGIN_RING(chan, 0, 0x0064, 2);
+ BEGIN_NV04(chan, 0, 0x0064, 2);
OUT_RING (chan, dispc->sem.offset ^ 0x10);
OUT_RING (chan, 0x74b1e000);
- BEGIN_RING(chan, 0, 0x0060, 1);
+ BEGIN_NV04(chan, 0, 0x0060, 1);
if (dev_priv->chipset < 0x84)
OUT_RING (chan, NvSema);
else
OUT_RING (chan, chan->vram_handle);
} else {
- u64 offset = chan->dispc_vma[nv_crtc->index].offset;
+ u64 offset = nvc0_software_crtc(chan, nv_crtc->index);
offset += dispc->sem.offset;
- BEGIN_NVC0(chan, 2, 0, 0x0010, 4);
+ BEGIN_NVC0(chan, 0, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
OUT_RING (chan, 0x1002);
- BEGIN_NVC0(chan, 2, 0, 0x0010, 4);
+ BEGIN_NVC0(chan, 0, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
@@ -508,40 +512,40 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
}
/* queue the flip on the crtc's "display sync" channel */
- BEGIN_RING(evo, 0, 0x0100, 1);
+ BEGIN_NV04(evo, 0, 0x0100, 1);
OUT_RING (evo, 0xfffe0000);
if (chan) {
- BEGIN_RING(evo, 0, 0x0084, 1);
+ BEGIN_NV04(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000100);
} else {
- BEGIN_RING(evo, 0, 0x0084, 1);
+ BEGIN_NV04(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000010);
/* allows gamma somehow, PDISP will bitch at you if
* you don't wait for vblank before changing this..
*/
- BEGIN_RING(evo, 0, 0x00e0, 1);
+ BEGIN_NV04(evo, 0, 0x00e0, 1);
OUT_RING (evo, 0x40000000);
}
- BEGIN_RING(evo, 0, 0x0088, 4);
+ BEGIN_NV04(evo, 0, 0x0088, 4);
OUT_RING (evo, dispc->sem.offset);
OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
OUT_RING (evo, 0x74b1e000);
OUT_RING (evo, NvEvoSync);
- BEGIN_RING(evo, 0, 0x00a0, 2);
+ BEGIN_NV04(evo, 0, 0x00a0, 2);
OUT_RING (evo, 0x00000000);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x00c0, 1);
+ BEGIN_NV04(evo, 0, 0x00c0, 1);
OUT_RING (evo, nv_fb->r_dma);
- BEGIN_RING(evo, 0, 0x0110, 2);
+ BEGIN_NV04(evo, 0, 0x0110, 2);
OUT_RING (evo, 0x00000000);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x0800, 5);
+ BEGIN_NV04(evo, 0, 0x0800, 5);
OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
OUT_RING (evo, 0);
OUT_RING (evo, (fb->height << 16) | fb->width);
OUT_RING (evo, nv_fb->r_pitch);
OUT_RING (evo, nv_fb->r_format);
- BEGIN_RING(evo, 0, 0x0080, 1);
+ BEGIN_NV04(evo, 0, 0x0080, 1);
OUT_RING (evo, 0x00000000);
FIRE_RING (evo);
@@ -642,20 +646,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
static void
nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan, *tmp;
-
- list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting,
- nvsw.vbl_wait) {
- if (chan->nvsw.vblsem_head != crtc)
- continue;
-
- nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
- chan->nvsw.vblsem_rval);
- list_del(&chan->nvsw.vbl_wait);
- drm_vblank_put(dev, crtc);
- }
-
+ nouveau_software_vblank(dev, crtc);
drm_handle_vblank(dev, crtc);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 5d3dd14d2837..e9db9b97f041 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -33,6 +33,7 @@
#include "nouveau_dma.h"
#include "nouveau_reg.h"
#include "nouveau_crtc.h"
+#include "nouveau_software.h"
#include "nv50_evo.h"
struct nv50_display_crtc {
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 9b962e989d7c..ddcd55595824 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -117,7 +117,7 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
evo->user_get = 4;
evo->user_put = 0;
- ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
+ ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
&evo->pushbuf_bo);
if (ret == 0)
ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
@@ -333,7 +333,7 @@ nv50_evo_create(struct drm_device *dev)
goto err;
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &dispc->sem.bo);
+ 0, 0x0000, NULL, &dispc->sem.bo);
if (!ret) {
ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index bdd2afe29205..f1e4b9e07d14 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -2,6 +2,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_fifo.h"
struct nv50_fb_priv {
struct page *r100c08_page;
@@ -212,6 +213,7 @@ static struct nouveau_enum vm_fault[] = {
void
nv50_fb_vm_trap(struct drm_device *dev, int display)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
const struct nouveau_enum *en, *cl;
unsigned long flags;
@@ -236,7 +238,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display)
/* lookup channel id */
chinst = (trap[2] << 16) | trap[1];
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
+ for (ch = 0; ch < pfifo->channels; ch++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
if (!chan || !chan->ramin)
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index dc75a7206524..e3c8b05dcae4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -43,22 +43,22 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
return ret;
if (rect->rop != ROP_COPY) {
- BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
OUT_RING(chan, 1);
}
- BEGIN_RING(chan, NvSub2D, 0x0588, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0588, 1);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR)
OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
else
OUT_RING(chan, rect->color);
- BEGIN_RING(chan, NvSub2D, 0x0600, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x0600, 4);
OUT_RING(chan, rect->dx);
OUT_RING(chan, rect->dy);
OUT_RING(chan, rect->dx + rect->width);
OUT_RING(chan, rect->dy + rect->height);
if (rect->rop != ROP_COPY) {
- BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
OUT_RING(chan, 3);
}
FIRE_RING(chan);
@@ -78,14 +78,14 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
if (ret)
return ret;
- BEGIN_RING(chan, NvSub2D, 0x0110, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0110, 1);
OUT_RING(chan, 0);
- BEGIN_RING(chan, NvSub2D, 0x08b0, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x08b0, 4);
OUT_RING(chan, region->dx);
OUT_RING(chan, region->dy);
OUT_RING(chan, region->width);
OUT_RING(chan, region->height);
- BEGIN_RING(chan, NvSub2D, 0x08d0, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x08d0, 4);
OUT_RING(chan, 0);
OUT_RING(chan, region->sx);
OUT_RING(chan, 0);
@@ -116,7 +116,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
width = ALIGN(image->width, 32);
dwords = (width * image->height) >> 5;
- BEGIN_RING(chan, NvSub2D, 0x0814, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
OUT_RING(chan, palette[image->bg_color] | mask);
@@ -125,10 +125,10 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING(chan, image->bg_color);
OUT_RING(chan, image->fg_color);
}
- BEGIN_RING(chan, NvSub2D, 0x0838, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x0838, 2);
OUT_RING(chan, image->width);
OUT_RING(chan, image->height);
- BEGIN_RING(chan, NvSub2D, 0x0850, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x0850, 4);
OUT_RING(chan, 0);
OUT_RING(chan, image->dx);
OUT_RING(chan, 0);
@@ -143,7 +143,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
dwords -= push;
- BEGIN_RING(chan, NvSub2D, 0x40000860, push);
+ BEGIN_NI04(chan, NvSub2D, 0x0860, push);
OUT_RINGp(chan, data, push);
data += push;
}
@@ -199,60 +199,59 @@ nv50_fbcon_accel_init(struct fb_info *info)
return ret;
}
- BEGIN_RING(chan, NvSub2D, 0x0000, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
OUT_RING(chan, Nv2D);
- BEGIN_RING(chan, NvSub2D, 0x0180, 4);
- OUT_RING(chan, NvNotify0);
+ BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
OUT_RING(chan, chan->vram_handle);
OUT_RING(chan, chan->vram_handle);
OUT_RING(chan, chan->vram_handle);
- BEGIN_RING(chan, NvSub2D, 0x0290, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0290, 1);
OUT_RING(chan, 0);
- BEGIN_RING(chan, NvSub2D, 0x0888, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0888, 1);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
OUT_RING(chan, 3);
- BEGIN_RING(chan, NvSub2D, 0x02a0, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x02a0, 1);
OUT_RING(chan, 0x55);
- BEGIN_RING(chan, NvSub2D, 0x08c0, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x08c0, 4);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0580, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x0580, 2);
OUT_RING(chan, 4);
OUT_RING(chan, format);
- BEGIN_RING(chan, NvSub2D, 0x02e8, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x02e8, 2);
OUT_RING(chan, 2);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0804, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0804, 1);
OUT_RING(chan, format);
- BEGIN_RING(chan, NvSub2D, 0x0800, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0800, 1);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0808, 3);
+ BEGIN_NV04(chan, NvSub2D, 0x0808, 3);
OUT_RING(chan, 0);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x081c, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x081c, 1);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0840, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x0840, 4);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0200, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x0200, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0214, 5);
+ BEGIN_NV04(chan, NvSub2D, 0x0214, 5);
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
OUT_RING(chan, upper_32_bits(fb->vma.offset));
OUT_RING(chan, lower_32_bits(fb->vma.offset));
- BEGIN_RING(chan, NvSub2D, 0x0230, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0244, 5);
+ BEGIN_NV04(chan, NvSub2D, 0x0244, 5);
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 3bc2a565c20b..55383b85db0b 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Ben Skeggs.
+ * Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
@@ -27,480 +27,268 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
#include "nouveau_vm.h"
-static void
+struct nv50_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+};
+
+struct nv50_fifo_chan {
+ struct nouveau_fifo_chan base;
+};
+
+void
nv50_fifo_playlist_update(struct drm_device *dev)
{
+ struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_gpuobj *cur;
- int i, nr;
-
- NV_DEBUG(dev, "\n");
+ int i, p;
- cur = pfifo->playlist[pfifo->cur_playlist];
- pfifo->cur_playlist = !pfifo->cur_playlist;
+ cur = priv->playlist[priv->cur_playlist];
+ priv->cur_playlist = !priv->cur_playlist;
- /* We never schedule channel 0 or 127 */
- for (i = 1, nr = 0; i < 127; i++) {
- if (dev_priv->channels.ptr[i] &&
- dev_priv->channels.ptr[i]->ramfc) {
- nv_wo32(cur, (nr * 4), i);
- nr++;
- }
+ for (i = 0, p = 0; i < priv->base.channels; i++) {
+ if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000)
+ nv_wo32(cur, p++ * 4, i);
}
- dev_priv->engine.instmem.flush(dev);
-
- nv_wr32(dev, 0x32f4, cur->vinst >> 12);
- nv_wr32(dev, 0x32ec, nr);
- nv_wr32(dev, 0x2500, 0x101);
-}
-static void
-nv50_fifo_channel_enable(struct drm_device *dev, int channel)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
- uint32_t inst;
-
- NV_DEBUG(dev, "ch%d\n", channel);
-
- if (dev_priv->chipset == 0x50)
- inst = chan->ramfc->vinst >> 12;
- else
- inst = chan->ramfc->vinst >> 8;
+ dev_priv->engine.instmem.flush(dev);
- nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst |
- NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
+ nv_wr32(dev, 0x0032f4, cur->vinst >> 12);
+ nv_wr32(dev, 0x0032ec, p);
+ nv_wr32(dev, 0x002500, 0x00000101);
}
-static void
-nv50_fifo_channel_disable(struct drm_device *dev, int channel)
+static int
+nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
{
+ struct nv50_fifo_priv *priv = nv_engine(chan->dev, engine);
+ struct nv50_fifo_chan *fctx;
+ struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t inst;
-
- NV_DEBUG(dev, "ch%d\n", channel);
+ u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
+ u64 instance = chan->ramin->vinst >> 12;
+ unsigned long flags;
+ int ret = 0, i;
- if (dev_priv->chipset == 0x50)
- inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
- else
- inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
- nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
-}
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+ atomic_inc(&chan->vm->engref[engine]);
-static void
-nv50_fifo_init_reset(struct drm_device *dev)
-{
- uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
- NV_DEBUG(dev, "\n");
+ for (i = 0; i < 0x100; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+ nv_wo32(chan->ramin, 0x3c, 0x403f6078);
+ nv_wo32(chan->ramin, 0x40, 0x00000000);
+ nv_wo32(chan->ramin, 0x44, 0x01003fff);
+ nv_wo32(chan->ramin, 0x48, chan->pushbuf->cinst >> 4);
+ nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset));
+ nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) |
+ drm_order(chan->dma.ib_max + 1) << 16);
+ nv_wo32(chan->ramin, 0x60, 0x7fffffff);
+ nv_wo32(chan->ramin, 0x78, 0x00000000);
+ nv_wo32(chan->ramin, 0x7c, 0x30000001);
+ nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->cinst >> 4));
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
-}
+ dev_priv->engine.instmem.flush(dev);
-static void
-nv50_fifo_init_intr(struct drm_device *dev)
-{
- NV_DEBUG(dev, "\n");
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
+ nv50_fifo_playlist_update(dev);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
- nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
- nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
}
-static void
-nv50_fifo_init_context_table(struct drm_device *dev)
+static bool
+nv50_fifo_kickoff(struct nouveau_channel *chan)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i;
-
- NV_DEBUG(dev, "\n");
-
- for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
- if (dev_priv->channels.ptr[i])
- nv50_fifo_channel_enable(dev, i);
- else
- nv50_fifo_channel_disable(dev, i);
+ struct drm_device *dev = chan->dev;
+ bool done = true;
+ u32 me;
+
+ /* HW bug workaround:
+ *
+ * PFIFO will hang forever if the connected engines don't report
+ * that they've processed the context switch request.
+ *
+ * In order for the kickoff to work, we need to ensure all the
+ * connected engines are in a state where they can answer.
+ *
+ * Newer chipsets don't seem to suffer from this issue, and well,
+ * there's also a "ignore these engines" bitmask reg we can use
+ * if we hit the issue there..
+ */
+
+ /* PME: make sure engine is enabled */
+ me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
+
+ /* do the kickoff... */
+ nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
+ if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
+ NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
+ done = false;
}
- nv50_fifo_playlist_update(dev);
+ /* restore any engine states we changed, and exit */
+ nv_wr32(dev, 0x00b860, me);
+ return done;
}
static void
-nv50_fifo_init_regs__nv(struct drm_device *dev)
-{
- NV_DEBUG(dev, "\n");
-
- nv_wr32(dev, 0x250c, 0x6f3cfc34);
-}
-
-static void
-nv50_fifo_init_regs(struct drm_device *dev)
-{
- NV_DEBUG(dev, "\n");
-
- nv_wr32(dev, 0x2500, 0);
- nv_wr32(dev, 0x3250, 0);
- nv_wr32(dev, 0x3220, 0);
- nv_wr32(dev, 0x3204, 0);
- nv_wr32(dev, 0x3210, 0);
- nv_wr32(dev, 0x3270, 0);
- nv_wr32(dev, 0x2044, 0x01003fff);
-
- /* Enable dummy channels setup by nv50_instmem.c */
- nv50_fifo_channel_enable(dev, 0);
- nv50_fifo_channel_enable(dev, 127);
-}
-
-int
-nv50_fifo_init(struct drm_device *dev)
+nv50_fifo_context_del(struct nouveau_channel *chan, int engine)
{
+ struct nv50_fifo_chan *fctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- int ret;
+ unsigned long flags;
- NV_DEBUG(dev, "\n");
+ /* remove channel from playlist, will context switch if active */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
+ nv50_fifo_playlist_update(dev);
- if (pfifo->playlist[0]) {
- pfifo->cur_playlist = !pfifo->cur_playlist;
- goto just_reset;
- }
+ /* tell any engines on this channel to unload their contexts */
+ nv50_fifo_kickoff(chan);
- ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC,
- &pfifo->playlist[0]);
- if (ret) {
- NV_ERROR(dev, "error creating playlist 0: %d\n", ret);
- return ret;
- }
+ nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC,
- &pfifo->playlist[1]);
- if (ret) {
- nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
- NV_ERROR(dev, "error creating playlist 1: %d\n", ret);
- return ret;
+ /* clean up */
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
}
-just_reset:
- nv50_fifo_init_reset(dev);
- nv50_fifo_init_intr(dev);
- nv50_fifo_init_context_table(dev);
- nv50_fifo_init_regs__nv(dev);
- nv50_fifo_init_regs(dev);
- dev_priv->engine.fifo.enable(dev);
- dev_priv->engine.fifo.reassign(dev, true);
-
- return 0;
+ atomic_dec(&chan->vm->engref[engine]);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
}
-void
-nv50_fifo_takedown(struct drm_device *dev)
+static int
+nv50_fifo_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ u32 instance;
+ int i;
- NV_DEBUG(dev, "\n");
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x00250c, 0x6f3cfc34);
+ nv_wr32(dev, 0x002044, 0x01003fff);
- if (!pfifo->playlist[0])
- return;
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xffffffff);
- nv_wr32(dev, 0x2140, 0x00000000);
- nouveau_irq_unregister(dev, 8);
+ for (i = 0; i < 128; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (chan && chan->engctx[engine])
+ instance = 0x80000000 | chan->ramin->vinst >> 12;
+ else
+ instance = 0x00000000;
+ nv_wr32(dev, 0x002600 + (i * 4), instance);
+ }
- nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
- nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
-}
+ nv50_fifo_playlist_update(dev);
-int
-nv50_fifo_channel_id(struct drm_device *dev)
-{
- return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
- NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
+ nv_wr32(dev, 0x003200, 1);
+ nv_wr32(dev, 0x003250, 1);
+ nv_wr32(dev, 0x002500, 1);
+ return 0;
}
-int
-nv50_fifo_create_context(struct nouveau_channel *chan)
+static int
+nv50_fifo_fini(struct drm_device *dev, int engine, bool suspend)
{
- struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramfc = NULL;
- uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
- unsigned long flags;
- int ret;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- if (dev_priv->chipset == 0x50) {
- ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
- chan->ramin->vinst, 0x100,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE,
- &chan->ramfc);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400,
- chan->ramin->vinst + 0x0400,
- 4096, 0, &chan->cache);
- if (ret)
- return ret;
- } else {
- ret = nouveau_gpuobj_new(dev, chan, 0x100, 256,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 4096, 1024,
- 0, &chan->cache);
- if (ret)
- return ret;
- }
- ramfc = chan->ramfc;
+ struct nv50_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV50_USER(chan->id), PAGE_SIZE);
- if (!chan->user)
- return -ENOMEM;
+ /* set playlist length to zero, fifo will unload context */
+ nv_wr32(dev, 0x0032ec, 0);
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
- nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
- nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj->cinst >> 4));
- nv_wo32(ramfc, 0x44, 0x01003fff);
- nv_wo32(ramfc, 0x60, 0x7fffffff);
- nv_wo32(ramfc, 0x40, 0x00000000);
- nv_wo32(ramfc, 0x7c, 0x30000001);
- nv_wo32(ramfc, 0x78, 0x00000000);
- nv_wo32(ramfc, 0x3c, 0x403f6078);
- nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
- nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
- drm_order(chan->dma.ib_max + 1) << 16);
-
- if (dev_priv->chipset != 0x50) {
- nv_wo32(chan->ramin, 0, chan->id);
- nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8);
-
- nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10);
- nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12);
+ /* tell all connected engines to unload their contexts */
+ for (i = 0; i < priv->base.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (chan && !nv50_fifo_kickoff(chan))
+ return -EBUSY;
}
- dev_priv->engine.instmem.flush(dev);
-
- nv50_fifo_channel_enable(dev, chan->id);
- nv50_fifo_playlist_update(dev);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ nv_wr32(dev, 0x002140, 0);
return 0;
}
void
-nv50_fifo_destroy_context(struct nouveau_channel *chan)
+nv50_fifo_tlb_flush(struct drm_device *dev, int engine)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nouveau_gpuobj *ramfc = NULL;
- unsigned long flags;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- pfifo->reassign(dev, false);
-
- /* Unload the context if it's the currently active one */
- if (pfifo->channel_id(dev) == chan->id) {
- pfifo->disable(dev);
- pfifo->unload_context(dev);
- pfifo->enable(dev);
- }
-
- /* This will ensure the channel is seen as disabled. */
- nouveau_gpuobj_ref(chan->ramfc, &ramfc);
- nouveau_gpuobj_ref(NULL, &chan->ramfc);
- nv50_fifo_channel_disable(dev, chan->id);
-
- /* Dummy channel, also used on ch 127 */
- if (chan->id == 0)
- nv50_fifo_channel_disable(dev, 127);
- nv50_fifo_playlist_update(dev);
-
- pfifo->reassign(dev, true);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- /* Free the channel resources */
- if (chan->user) {
- iounmap(chan->user);
- chan->user = NULL;
- }
- nouveau_gpuobj_ref(NULL, &ramfc);
- nouveau_gpuobj_ref(NULL, &chan->cache);
+ nv50_vm_flush_engine(dev, 5);
}
-int
-nv50_fifo_load_context(struct nouveau_channel *chan)
+void
+nv50_fifo_destroy(struct drm_device *dev, int engine)
{
- struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramfc = chan->ramfc;
- struct nouveau_gpuobj *cache = chan->cache;
- int ptr, cnt;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- nv_wr32(dev, 0x3330, nv_ro32(ramfc, 0x00));
- nv_wr32(dev, 0x3334, nv_ro32(ramfc, 0x04));
- nv_wr32(dev, 0x3240, nv_ro32(ramfc, 0x08));
- nv_wr32(dev, 0x3320, nv_ro32(ramfc, 0x0c));
- nv_wr32(dev, 0x3244, nv_ro32(ramfc, 0x10));
- nv_wr32(dev, 0x3328, nv_ro32(ramfc, 0x14));
- nv_wr32(dev, 0x3368, nv_ro32(ramfc, 0x18));
- nv_wr32(dev, 0x336c, nv_ro32(ramfc, 0x1c));
- nv_wr32(dev, 0x3370, nv_ro32(ramfc, 0x20));
- nv_wr32(dev, 0x3374, nv_ro32(ramfc, 0x24));
- nv_wr32(dev, 0x3378, nv_ro32(ramfc, 0x28));
- nv_wr32(dev, 0x337c, nv_ro32(ramfc, 0x2c));
- nv_wr32(dev, 0x3228, nv_ro32(ramfc, 0x30));
- nv_wr32(dev, 0x3364, nv_ro32(ramfc, 0x34));
- nv_wr32(dev, 0x32a0, nv_ro32(ramfc, 0x38));
- nv_wr32(dev, 0x3224, nv_ro32(ramfc, 0x3c));
- nv_wr32(dev, 0x324c, nv_ro32(ramfc, 0x40));
- nv_wr32(dev, 0x2044, nv_ro32(ramfc, 0x44));
- nv_wr32(dev, 0x322c, nv_ro32(ramfc, 0x48));
- nv_wr32(dev, 0x3234, nv_ro32(ramfc, 0x4c));
- nv_wr32(dev, 0x3340, nv_ro32(ramfc, 0x50));
- nv_wr32(dev, 0x3344, nv_ro32(ramfc, 0x54));
- nv_wr32(dev, 0x3280, nv_ro32(ramfc, 0x58));
- nv_wr32(dev, 0x3254, nv_ro32(ramfc, 0x5c));
- nv_wr32(dev, 0x3260, nv_ro32(ramfc, 0x60));
- nv_wr32(dev, 0x3264, nv_ro32(ramfc, 0x64));
- nv_wr32(dev, 0x3268, nv_ro32(ramfc, 0x68));
- nv_wr32(dev, 0x326c, nv_ro32(ramfc, 0x6c));
- nv_wr32(dev, 0x32e4, nv_ro32(ramfc, 0x70));
- nv_wr32(dev, 0x3248, nv_ro32(ramfc, 0x74));
- nv_wr32(dev, 0x2088, nv_ro32(ramfc, 0x78));
- nv_wr32(dev, 0x2058, nv_ro32(ramfc, 0x7c));
- nv_wr32(dev, 0x2210, nv_ro32(ramfc, 0x80));
-
- cnt = nv_ro32(ramfc, 0x84);
- for (ptr = 0; ptr < cnt; ptr++) {
- nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
- nv_ro32(cache, (ptr * 8) + 0));
- nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
- nv_ro32(cache, (ptr * 8) + 4));
- }
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
-
- /* guessing that all the 0x34xx regs aren't on NV50 */
- if (dev_priv->chipset != 0x50) {
- nv_wr32(dev, 0x340c, nv_ro32(ramfc, 0x88));
- nv_wr32(dev, 0x3400, nv_ro32(ramfc, 0x8c));
- nv_wr32(dev, 0x3404, nv_ro32(ramfc, 0x90));
- nv_wr32(dev, 0x3408, nv_ro32(ramfc, 0x94));
- nv_wr32(dev, 0x3410, nv_ro32(ramfc, 0x98));
- }
+ struct nv50_fifo_priv *priv = nv_engine(dev, engine);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
- return 0;
+ nouveau_irq_unregister(dev, 8);
+
+ nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
}
int
-nv50_fifo_unload_context(struct drm_device *dev)
+nv50_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nouveau_gpuobj *ramfc, *cache;
- struct nouveau_channel *chan = NULL;
- int chid, get, put, ptr;
-
- NV_DEBUG(dev, "\n");
-
- chid = pfifo->channel_id(dev);
- if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
- return 0;
-
- chan = dev_priv->channels.ptr[chid];
- if (!chan) {
- NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
- return -EINVAL;
- }
- NV_DEBUG(dev, "ch%d\n", chan->id);
- ramfc = chan->ramfc;
- cache = chan->cache;
-
- nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330));
- nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334));
- nv_wo32(ramfc, 0x08, nv_rd32(dev, 0x3240));
- nv_wo32(ramfc, 0x0c, nv_rd32(dev, 0x3320));
- nv_wo32(ramfc, 0x10, nv_rd32(dev, 0x3244));
- nv_wo32(ramfc, 0x14, nv_rd32(dev, 0x3328));
- nv_wo32(ramfc, 0x18, nv_rd32(dev, 0x3368));
- nv_wo32(ramfc, 0x1c, nv_rd32(dev, 0x336c));
- nv_wo32(ramfc, 0x20, nv_rd32(dev, 0x3370));
- nv_wo32(ramfc, 0x24, nv_rd32(dev, 0x3374));
- nv_wo32(ramfc, 0x28, nv_rd32(dev, 0x3378));
- nv_wo32(ramfc, 0x2c, nv_rd32(dev, 0x337c));
- nv_wo32(ramfc, 0x30, nv_rd32(dev, 0x3228));
- nv_wo32(ramfc, 0x34, nv_rd32(dev, 0x3364));
- nv_wo32(ramfc, 0x38, nv_rd32(dev, 0x32a0));
- nv_wo32(ramfc, 0x3c, nv_rd32(dev, 0x3224));
- nv_wo32(ramfc, 0x40, nv_rd32(dev, 0x324c));
- nv_wo32(ramfc, 0x44, nv_rd32(dev, 0x2044));
- nv_wo32(ramfc, 0x48, nv_rd32(dev, 0x322c));
- nv_wo32(ramfc, 0x4c, nv_rd32(dev, 0x3234));
- nv_wo32(ramfc, 0x50, nv_rd32(dev, 0x3340));
- nv_wo32(ramfc, 0x54, nv_rd32(dev, 0x3344));
- nv_wo32(ramfc, 0x58, nv_rd32(dev, 0x3280));
- nv_wo32(ramfc, 0x5c, nv_rd32(dev, 0x3254));
- nv_wo32(ramfc, 0x60, nv_rd32(dev, 0x3260));
- nv_wo32(ramfc, 0x64, nv_rd32(dev, 0x3264));
- nv_wo32(ramfc, 0x68, nv_rd32(dev, 0x3268));
- nv_wo32(ramfc, 0x6c, nv_rd32(dev, 0x326c));
- nv_wo32(ramfc, 0x70, nv_rd32(dev, 0x32e4));
- nv_wo32(ramfc, 0x74, nv_rd32(dev, 0x3248));
- nv_wo32(ramfc, 0x78, nv_rd32(dev, 0x2088));
- nv_wo32(ramfc, 0x7c, nv_rd32(dev, 0x2058));
- nv_wo32(ramfc, 0x80, nv_rd32(dev, 0x2210));
-
- put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
- get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
- ptr = 0;
- while (put != get) {
- nv_wo32(cache, ptr + 0,
- nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
- nv_wo32(cache, ptr + 4,
- nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
- get = (get + 1) & 0x1ff;
- ptr += 8;
- }
-
- /* guessing that all the 0x34xx regs aren't on NV50 */
- if (dev_priv->chipset != 0x50) {
- nv_wo32(ramfc, 0x84, ptr >> 3);
- nv_wo32(ramfc, 0x88, nv_rd32(dev, 0x340c));
- nv_wo32(ramfc, 0x8c, nv_rd32(dev, 0x3400));
- nv_wo32(ramfc, 0x90, nv_rd32(dev, 0x3404));
- nv_wo32(ramfc, 0x94, nv_rd32(dev, 0x3408));
- nv_wo32(ramfc, 0x98, nv_rd32(dev, 0x3410));
- }
+ struct nv50_fifo_priv *priv;
+ int ret;
- dev_priv->engine.instmem.flush(dev);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- /*XXX: probably reload ch127 (NULL) state back too */
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
- return 0;
-}
+ priv->base.base.destroy = nv50_fifo_destroy;
+ priv->base.base.init = nv50_fifo_init;
+ priv->base.base.fini = nv50_fifo_fini;
+ priv->base.base.context_new = nv50_fifo_context_new;
+ priv->base.base.context_del = nv50_fifo_context_del;
+ priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
+ priv->base.channels = 127;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
+ if (ret)
+ goto error;
-void
-nv50_fifo_tlb_flush(struct drm_device *dev)
-{
- nv50_vm_flush_engine(dev, 5);
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+error:
+ if (ret)
+ priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 33d5711a918d..d9cc2f2638d6 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -27,8 +27,8 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
-#include "nouveau_grctx.h"
#include "nouveau_dma.h"
#include "nouveau_vm.h"
#include "nv50_evo.h"
@@ -40,86 +40,6 @@ struct nv50_graph_engine {
u32 grctx_size;
};
-static void
-nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
-{
- const uint32_t mask = 0x00010001;
-
- if (enabled)
- nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
- else
- nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
-}
-
-static struct nouveau_channel *
-nv50_graph_channel(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t inst;
- int i;
-
- /* Be sure we're not in the middle of a context switch or bad things
- * will happen, such as unloading the wrong pgraph context.
- */
- if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
- NV_ERROR(dev, "Ctxprog is still running\n");
-
- inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
- if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
- return NULL;
- inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
-
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->channels.ptr[i];
-
- if (chan && chan->ramin && chan->ramin->vinst == inst)
- return chan;
- }
-
- return NULL;
-}
-
-static int
-nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
-{
- uint32_t fifo = nv_rd32(dev, 0x400500);
-
- nv_wr32(dev, 0x400500, fifo & ~1);
- nv_wr32(dev, 0x400784, inst);
- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
- nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
- nv_wr32(dev, 0x400040, 0xffffffff);
- (void)nv_rd32(dev, 0x400040);
- nv_wr32(dev, 0x400040, 0x00000000);
- nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
-
- if (nouveau_wait_for_idle(dev))
- nv_wr32(dev, 0x40032c, inst | (1<<31));
- nv_wr32(dev, 0x400500, fifo);
-
- return 0;
-}
-
-static int
-nv50_graph_unload_context(struct drm_device *dev)
-{
- uint32_t inst;
-
- inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
- if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
- return 0;
- inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
-
- nouveau_wait_for_idle(dev);
- nv_wr32(dev, 0x400784, inst);
- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
- nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
- nouveau_wait_for_idle(dev);
-
- nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
- return 0;
-}
-
static int
nv50_graph_init(struct drm_device *dev, int engine)
{
@@ -211,12 +131,6 @@ nv50_graph_init(struct drm_device *dev, int engine)
static int
nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
- nv_mask(dev, 0x400500, 0x00010001, 0x00000000);
- if (!nv_wait(dev, 0x400700, ~0, 0) && suspend) {
- nv_mask(dev, 0x400500, 0x00010001, 0x00010001);
- return -EBUSY;
- }
- nv50_graph_unload_context(dev);
nv_wr32(dev, 0x40013c, 0x00000000);
return 0;
}
@@ -229,7 +143,6 @@ nv50_graph_context_new(struct nouveau_channel *chan, int engine)
struct nouveau_gpuobj *ramin = chan->ramin;
struct nouveau_gpuobj *grctx = NULL;
struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
- struct nouveau_grctx ctx = {};
int hdr, ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
@@ -248,11 +161,7 @@ nv50_graph_context_new(struct nouveau_channel *chan, int engine)
nv_wo32(ramin, hdr + 0x10, 0);
nv_wo32(ramin, hdr + 0x14, 0x00010000);
- ctx.dev = chan->dev;
- ctx.mode = NOUVEAU_GRCTX_VALS;
- ctx.data = grctx;
- nv50_grctx_init(&ctx);
-
+ nv50_grctx_fill(dev, grctx);
nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
dev_priv->engine.instmem.flush(dev);
@@ -268,33 +177,14 @@ nv50_graph_context_del(struct nouveau_channel *chan, int engine)
struct nouveau_gpuobj *grctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
- unsigned long flags;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- if (!chan->ramin)
- return;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- pfifo->reassign(dev, false);
- nv50_graph_fifo_access(dev, false);
-
- if (nv50_graph_channel(dev) == chan)
- nv50_graph_unload_context(dev);
for (i = hdr; i < hdr + 24; i += 4)
nv_wo32(chan->ramin, i, 0);
dev_priv->engine.instmem.flush(dev);
- nv50_graph_fifo_access(dev, true);
- pfifo->reassign(dev, true);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- nouveau_gpuobj_ref(NULL, &grctx);
-
atomic_dec(&chan->vm->engref[engine]);
+ nouveau_gpuobj_ref(NULL, &grctx);
chan->engctx[engine] = NULL;
}
@@ -325,85 +215,6 @@ nv50_graph_object_new(struct nouveau_channel *chan, int engine,
}
static void
-nv50_graph_context_switch(struct drm_device *dev)
-{
- uint32_t inst;
-
- nv50_graph_unload_context(dev);
-
- inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
- inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
- nv50_graph_do_load_context(dev, inst);
-
- nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
- NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
-}
-
-static int
-nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- struct nouveau_gpuobj *gpuobj;
-
- gpuobj = nouveau_ramht_find(chan, data);
- if (!gpuobj)
- return -ENOENT;
-
- if (nouveau_notifier_offset(gpuobj, NULL))
- return -EINVAL;
-
- chan->nvsw.vblsem = gpuobj;
- chan->nvsw.vblsem_offset = ~0;
- return 0;
-}
-
-static int
-nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
- return -ERANGE;
-
- chan->nvsw.vblsem_offset = data >> 2;
- return 0;
-}
-
-static int
-nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- chan->nvsw.vblsem_rval = data;
- return 0;
-}
-
-static int
-nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
- return -EINVAL;
-
- drm_vblank_get(dev, data);
-
- chan->nvsw.vblsem_head = data;
- list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
-
- return 0;
-}
-
-static int
-nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- nouveau_finish_page_flip(chan, NULL);
- return 0;
-}
-
-
-static void
nv50_graph_tlb_flush(struct drm_device *dev, int engine)
{
nv50_vm_flush_engine(dev, 0);
@@ -514,6 +325,7 @@ struct nouveau_enum nv50_data_error_names[] = {
{ 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
{ 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
{ 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
+ { 0x00000024, "VP_ZERO_INPUTS", NULL },
{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
{ 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
{ 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
@@ -900,13 +712,14 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
int
nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan;
unsigned long flags;
int i;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ for (i = 0; i < pfifo->channels; i++) {
chan = dev_priv->channels.ptr[i];
if (!chan || !chan->ramin)
continue;
@@ -939,15 +752,6 @@ nv50_graph_isr(struct drm_device *dev)
show &= ~0x00000010;
}
- if (stat & 0x00001000) {
- nv_wr32(dev, 0x400500, 0x00000000);
- nv_wr32(dev, 0x400100, 0x00001000);
- nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
- nv50_graph_context_switch(dev);
- stat &= ~0x00001000;
- show &= ~0x00001000;
- }
-
show = (show && nouveau_ratelimit()) ? show : 0;
if (show & 0x00100000) {
@@ -996,28 +800,21 @@ nv50_graph_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_graph_engine *pgraph;
- struct nouveau_grctx ctx = {};
int ret;
pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
if (!pgraph)
return -ENOMEM;
- ctx.dev = dev;
- ctx.mode = NOUVEAU_GRCTX_PROG;
- ctx.data = pgraph->ctxprog;
- ctx.ctxprog_max = ARRAY_SIZE(pgraph->ctxprog);
-
- ret = nv50_grctx_init(&ctx);
+ ret = nv50_grctx_init(dev, pgraph->ctxprog, ARRAY_SIZE(pgraph->ctxprog),
+ &pgraph->ctxprog_size,
+ &pgraph->grctx_size);
if (ret) {
NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
kfree(pgraph);
return 0;
}
- pgraph->grctx_size = ctx.ctxvals_pos * 4;
- pgraph->ctxprog_size = ctx.ctxprog_len;
-
pgraph->base.destroy = nv50_graph_destroy;
pgraph->base.init = nv50_graph_init;
pgraph->base.fini = nv50_graph_fini;
@@ -1031,14 +828,6 @@ nv50_graph_create(struct drm_device *dev)
nouveau_irq_register(dev, 12, nv50_graph_isr);
- /* NVSW really doesn't live here... */
- NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
- NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
- NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
- NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
- NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
- NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
-
NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index 4b46d6968566..881e22b249fc 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -172,8 +172,8 @@ static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
/* Main function: construct the ctxprog skeleton, call the other functions. */
-int
-nv50_grctx_init(struct nouveau_grctx *ctx)
+static int
+nv50_grctx_generate(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
@@ -210,7 +210,7 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
cp_name(ctx, cp_check_load);
cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
- cp_bra (ctx, ALWAYS, TRUE, cp_exit);
+ cp_bra (ctx, ALWAYS, TRUE, cp_prepare_exit);
/* setup for context load */
cp_name(ctx, cp_setup_auto_load);
@@ -277,6 +277,33 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
return 0;
}
+void
+nv50_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
+{
+ nv50_grctx_generate(&(struct nouveau_grctx) {
+ .dev = dev,
+ .mode = NOUVEAU_GRCTX_VALS,
+ .data = mem,
+ });
+}
+
+int
+nv50_grctx_init(struct drm_device *dev, u32 *data, u32 max, u32 *len, u32 *cnt)
+{
+ struct nouveau_grctx ctx = {
+ .dev = dev,
+ .mode = NOUVEAU_GRCTX_PROG,
+ .data = data,
+ .ctxprog_max = max
+ };
+ int ret;
+
+ ret = nv50_grctx_generate(&ctx);
+ *cnt = ctx.ctxvals_pos * 4;
+ *len = ctx.ctxprog_len;
+ return ret;
+}
+
/*
* Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which
* registers to save/restore and the default values for them.
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index a7c12c94a5a6..0bba54f11800 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -83,7 +83,7 @@ nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
return ret;
}
- ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size);
+ ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size - 0x6000);
if (ret) {
nv50_channel_del(&chan);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c
index b57a2d180ad2..90e8ed22cfcb 100644
--- a/drivers/gpu/drm/nouveau/nv50_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv50_mpeg.c
@@ -77,27 +77,13 @@ nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
static void
nv50_mpeg_context_del(struct nouveau_channel *chan, int engine)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_gpuobj *ctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
- unsigned long flags;
- u32 inst, i;
-
- if (!chan->ramin)
- return;
-
- inst = chan->ramin->vinst >> 12;
- inst |= 0x80000000;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
- if (nv_rd32(dev, 0x00b318) == inst)
- nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ int i;
for (i = 0x00; i <= 0x14; i += 4)
nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000);
+
nouveau_gpuobj_ref(NULL, &ctx);
chan->engctx[engine] = NULL;
}
@@ -162,7 +148,6 @@ nv50_mpeg_init(struct drm_device *dev, int engine)
static int
nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
{
- /*XXX: context save for s/r */
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
nv_wr32(dev, 0x00b140, 0x00000000);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_software.c b/drivers/gpu/drm/nouveau/nv50_software.c
new file mode 100644
index 000000000000..114d2517d4a8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_software.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
+#include "nouveau_software.h"
+
+#include "nv50_display.h"
+
+struct nv50_software_priv {
+ struct nouveau_software_priv base;
+};
+
+struct nv50_software_chan {
+ struct nouveau_software_chan base;
+ struct {
+ struct nouveau_gpuobj *object;
+ } vblank;
+};
+
+static int
+mthd_dma_vblsem(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
+ struct nouveau_gpuobj *gpuobj;
+
+ gpuobj = nouveau_ramht_find(chan, data);
+ if (!gpuobj)
+ return -ENOENT;
+
+ if (nouveau_notifier_offset(gpuobj, NULL))
+ return -EINVAL;
+
+ pch->vblank.object = gpuobj;
+ pch->base.vblank.offset = ~0;
+ return 0;
+}
+
+static int
+mthd_vblsem_offset(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
+
+ if (nouveau_notifier_offset(pch->vblank.object, &data))
+ return -ERANGE;
+
+ pch->base.vblank.offset = data >> 2;
+ return 0;
+}
+
+static int
+mthd_vblsem_value(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
+ pch->base.vblank.value = data;
+ return 0;
+}
+
+static int
+mthd_vblsem_release(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
+ struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
+ struct drm_device *dev = chan->dev;
+
+ if (!pch->vblank.object || pch->base.vblank.offset == ~0 || data > 1)
+ return -EINVAL;
+
+ drm_vblank_get(dev, data);
+
+ pch->base.vblank.head = data;
+ list_add(&pch->base.vblank.list, &psw->base.vblank);
+ return 0;
+}
+
+static int
+mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ nouveau_finish_page_flip(chan, NULL);
+ return 0;
+}
+
+static int
+nv50_software_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
+ struct nv50_display *pdisp = nv50_display(chan->dev);
+ struct nv50_software_chan *pch;
+ int ret = 0, i;
+
+ pch = kzalloc(sizeof(*pch), GFP_KERNEL);
+ if (!pch)
+ return -ENOMEM;
+
+ nouveau_software_context_new(&pch->base);
+ pch->base.vblank.bo = chan->notifier_bo;
+ chan->engctx[engine] = pch;
+
+ /* dma objects for display sync channel semaphore blocks */
+ for (i = 0; i < chan->dev->mode_config.num_crtc; i++) {
+ struct nv50_display_crtc *dispc = &pdisp->crtc[i];
+ struct nouveau_gpuobj *obj = NULL;
+
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ dispc->sem.bo->bo.offset, 0x1000,
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &obj);
+ if (ret)
+ break;
+
+ ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ }
+
+ if (ret)
+ psw->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nv50_software_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv50_software_chan *pch = chan->engctx[engine];
+ chan->engctx[engine] = NULL;
+ kfree(pch);
+}
+
+static int
+nv50_software_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
+ if (ret)
+ return ret;
+ obj->engine = 0;
+ obj->class = class;
+
+ ret = nouveau_ramht_insert(chan, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
+}
+
+static int
+nv50_software_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static int
+nv50_software_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static void
+nv50_software_destroy(struct drm_device *dev, int engine)
+{
+ struct nv50_software_priv *psw = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, SW);
+ kfree(psw);
+}
+
+int
+nv50_software_create(struct drm_device *dev)
+{
+ struct nv50_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
+ if (!psw)
+ return -ENOMEM;
+
+ psw->base.base.destroy = nv50_software_destroy;
+ psw->base.base.init = nv50_software_init;
+ psw->base.base.fini = nv50_software_fini;
+ psw->base.base.context_new = nv50_software_context_new;
+ psw->base.base.context_del = nv50_software_context_del;
+ psw->base.base.object_new = nv50_software_object_new;
+ nouveau_software_create(&psw->base);
+
+ NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x018c, mthd_dma_vblsem);
+ NVOBJ_MTHD (dev, 0x506e, 0x0400, mthd_vblsem_offset);
+ NVOBJ_MTHD (dev, 0x506e, 0x0404, mthd_vblsem_value);
+ NVOBJ_MTHD (dev, 0x506e, 0x0408, mthd_vblsem_release);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, mthd_flip);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 274640212475..a9514eaa74c1 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -242,9 +242,9 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
NV_ERROR(dev, "no space while disconnecting SOR\n");
return;
}
- BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
OUT_RING (evo, 0);
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
nouveau_hdmi_mode_set(encoder, NULL);
@@ -430,7 +430,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
nv_encoder->crtc = NULL;
return;
}
- BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
OUT_RING(evo, mode_ctl);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 44fbac9c7d93..179bb42a635c 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -147,7 +147,6 @@ nv50_vm_flush(struct nouveau_vm *vm)
{
struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
int i;
pinstmem->flush(vm->dev);
@@ -158,7 +157,6 @@ nv50_vm_flush(struct nouveau_vm *vm)
return;
}
- pfifo->tlb_flush(vm->dev);
for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
if (atomic_read(&vm->engref[i]))
dev_priv->eng[i]->tlb_flush(vm->dev, i);
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
new file mode 100644
index 000000000000..c2f889b0d340
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_fifo.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+
+struct nv84_fence_chan {
+ struct nouveau_fence_chan base;
+};
+
+struct nv84_fence_priv {
+ struct nouveau_fence_priv base;
+ struct nouveau_gpuobj *mem;
+};
+
+static int
+nv84_fence_emit(struct nouveau_fence *fence)
+{
+ struct nouveau_channel *chan = fence->channel;
+ int ret = RING_SPACE(chan, 7);
+ if (ret == 0) {
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+ OUT_RING (chan, NvSema);
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(chan->id * 16));
+ OUT_RING (chan, lower_32_bits(chan->id * 16));
+ OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+ FIRE_RING (chan);
+ }
+ return ret;
+}
+
+
+static int
+nv84_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ int ret = RING_SPACE(chan, 7);
+ if (ret == 0) {
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+ OUT_RING (chan, NvSema);
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(prev->id * 16));
+ OUT_RING (chan, lower_32_bits(prev->id * 16));
+ OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
+ FIRE_RING (chan);
+ }
+ return ret;
+}
+
+static u32
+nv84_fence_read(struct nouveau_channel *chan)
+{
+ struct nv84_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
+ return nv_ro32(priv->mem, chan->id * 16);
+}
+
+static void
+nv84_fence_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv84_fence_chan *fctx = chan->engctx[engine];
+ nouveau_fence_context_del(&fctx->base);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nv84_fence_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv84_fence_priv *priv = nv_engine(chan->dev, engine);
+ struct nv84_fence_chan *fctx;
+ struct nouveau_gpuobj *obj;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ nouveau_fence_context_new(&fctx->base);
+
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
+ priv->mem->vinst, priv->mem->size,
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &obj);
+ if (ret == 0) {
+ ret = nouveau_ramht_insert(chan, NvSema, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ nv_wo32(priv->mem, chan->id * 16, 0x00000000);
+ }
+
+ if (ret)
+ nv84_fence_context_del(chan, engine);
+ return ret;
+}
+
+static int
+nv84_fence_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static int
+nv84_fence_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static void
+nv84_fence_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fence_priv *priv = nv_engine(dev, engine);
+
+ nouveau_gpuobj_ref(NULL, &priv->mem);
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nv84_fence_create(struct drm_device *dev)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fence_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.engine.destroy = nv84_fence_destroy;
+ priv->base.engine.init = nv84_fence_init;
+ priv->base.engine.fini = nv84_fence_fini;
+ priv->base.engine.context_new = nv84_fence_context_new;
+ priv->base.engine.context_del = nv84_fence_context_del;
+ priv->base.emit = nv84_fence_emit;
+ priv->base.sync = nv84_fence_sync;
+ priv->base.read = nv84_fence_read;
+ dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 16 * pfifo->channels,
+ 0x1000, 0, &priv->mem);
+ if (ret)
+ goto out;
+
+out:
+ if (ret)
+ nv84_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv84_fifo.c b/drivers/gpu/drm/nouveau/nv84_fifo.c
new file mode 100644
index 000000000000..cc82d799fc3b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_fifo.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2012 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
+#include "nouveau_ramht.h"
+#include "nouveau_vm.h"
+
+struct nv84_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+};
+
+struct nv84_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+ struct nouveau_gpuobj *cache;
+};
+
+static int
+nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv84_fifo_priv *priv = nv_engine(chan->dev, engine);
+ struct nv84_fifo_chan *fctx;
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
+ u64 instance;
+ unsigned long flags;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+ atomic_inc(&chan->vm->engref[engine]);
+
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = nouveau_gpuobj_new(dev, chan, 256, 256, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ instance = fctx->ramfc->vinst >> 8;
+
+ ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x3c, 0x403f6078);
+ nv_wo32(fctx->ramfc, 0x40, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x44, 0x01003fff);
+ nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->cinst >> 4);
+ nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset));
+ nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) |
+ drm_order(chan->dma.ib_max + 1) << 16);
+ nv_wo32(fctx->ramfc, 0x60, 0x7fffffff);
+ nv_wo32(fctx->ramfc, 0x78, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x7c, 0x30000001);
+ nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->cinst >> 4));
+ nv_wo32(fctx->ramfc, 0x88, fctx->cache->vinst >> 10);
+ nv_wo32(fctx->ramfc, 0x98, chan->ramin->vinst >> 12);
+
+ nv_wo32(chan->ramin, 0x00, chan->id);
+ nv_wo32(chan->ramin, 0x04, fctx->ramfc->vinst >> 8);
+
+ dev_priv->engine.instmem.flush(dev);
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
+ nv50_fifo_playlist_update(dev);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv84_fifo_chan *fctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ /* remove channel from playlist, will context switch if active */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
+ nv50_fifo_playlist_update(dev);
+
+ /* tell any engines on this channel to unload their contexts */
+ nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
+ if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
+ NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
+
+ nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* clean up */
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
+
+ nouveau_gpuobj_ref(NULL, &fctx->ramfc);
+ nouveau_gpuobj_ref(NULL, &fctx->cache);
+
+ atomic_dec(&chan->vm->engref[engine]);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nv84_fifo_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fifo_chan *fctx;
+ u32 instance;
+ int i;
+
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x00250c, 0x6f3cfc34);
+ nv_wr32(dev, 0x002044, 0x01003fff);
+
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xffffffff);
+
+ for (i = 0; i < 128; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (chan && (fctx = chan->engctx[engine]))
+ instance = 0x80000000 | fctx->ramfc->vinst >> 8;
+ else
+ instance = 0x00000000;
+ nv_wr32(dev, 0x002600 + (i * 4), instance);
+ }
+
+ nv50_fifo_playlist_update(dev);
+
+ nv_wr32(dev, 0x003200, 1);
+ nv_wr32(dev, 0x003250, 1);
+ nv_wr32(dev, 0x002500, 1);
+ return 0;
+}
+
+static int
+nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ /* set playlist length to zero, fifo will unload context */
+ nv_wr32(dev, 0x0032ec, 0);
+
+ /* tell all connected engines to unload their contexts */
+ for (i = 0; i < priv->base.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (chan)
+ nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
+ if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
+ NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
+ return -EBUSY;
+ }
+ }
+
+ nv_wr32(dev, 0x002140, 0);
+ return 0;
+}
+
+int
+nv84_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fifo_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nv50_fifo_destroy;
+ priv->base.base.init = nv84_fifo_init;
+ priv->base.base.fini = nv84_fifo_fini;
+ priv->base.base.context_new = nv84_fifo_context_new;
+ priv->base.base.context_del = nv84_fifo_context_del;
+ priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
+ priv->base.channels = 127;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
+ if (ret)
+ goto error;
+
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+error:
+ if (ret)
+ priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.c b/drivers/gpu/drm/nouveau/nv98_crypt.c
index db94ff0a9fab..e25e13fb894e 100644
--- a/drivers/gpu/drm/nouveau/nv98_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.c
@@ -23,21 +23,93 @@
*/
#include "drmP.h"
+
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include "nouveau_vm.h"
#include "nouveau_ramht.h"
-struct nv98_crypt_engine {
+#include "nv98_crypt.fuc.h"
+
+struct nv98_crypt_priv {
struct nouveau_exec_engine base;
};
+struct nv98_crypt_chan {
+ struct nouveau_gpuobj *mem;
+};
+
static int
-nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
+nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv98_crypt_priv *priv = nv_engine(dev, engine);
+ struct nv98_crypt_chan *cctx;
+ int ret;
+
+ cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
+ if (!cctx)
+ return -ENOMEM;
+
+ atomic_inc(&chan->vm->engref[engine]);
+
+ ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &cctx->mem);
+ if (ret)
+ goto error;
+
+ nv_wo32(chan->ramin, 0xa0, 0x00190000);
+ nv_wo32(chan->ramin, 0xa4, cctx->mem->vinst + cctx->mem->size - 1);
+ nv_wo32(chan->ramin, 0xa8, cctx->mem->vinst);
+ nv_wo32(chan->ramin, 0xac, 0x00000000);
+ nv_wo32(chan->ramin, 0xb0, 0x00000000);
+ nv_wo32(chan->ramin, 0xb4, 0x00000000);
+ dev_priv->engine.instmem.flush(dev);
+
+error:
+ if (ret)
+ priv->base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nv98_crypt_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv98_crypt_chan *cctx = chan->engctx[engine];
+ int i;
+
+ for (i = 0xa0; i < 0xb4; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+
+ nouveau_gpuobj_ref(NULL, &cctx->mem);
+
+ atomic_dec(&chan->vm->engref[engine]);
+ chan->engctx[engine] = NULL;
+ kfree(cctx);
+}
+
+static int
+nv98_crypt_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
{
- if (!(nv_rd32(dev, 0x000200) & 0x00004000))
- return 0;
+ struct nv98_crypt_chan *cctx = chan->engctx[engine];
+
+ /* fuc engine doesn't need an object, our ramht code does.. */
+ cctx->mem->engine = 5;
+ cctx->mem->class = class;
+ return nouveau_ramht_insert(chan, handle, cctx->mem);
+}
+static void
+nv98_crypt_tlb_flush(struct drm_device *dev, int engine)
+{
+ nv50_vm_flush_engine(dev, 0x0a);
+}
+
+static int
+nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
+{
nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
return 0;
}
@@ -45,34 +117,100 @@ nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
static int
nv98_crypt_init(struct drm_device *dev, int engine)
{
+ int i;
+
+ /* reset! */
nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+
+ /* wait for exit interrupt to signal */
+ nv_wait(dev, 0x087008, 0x00000010, 0x00000010);
+ nv_wr32(dev, 0x087004, 0x00000010);
+
+ /* upload microcode code and data segments */
+ nv_wr32(dev, 0x087ff8, 0x00100000);
+ for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
+ nv_wr32(dev, 0x087ff4, nv98_pcrypt_code[i]);
+
+ nv_wr32(dev, 0x087ff8, 0x00000000);
+ for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
+ nv_wr32(dev, 0x087ff4, nv98_pcrypt_data[i]);
+
+ /* start it running */
+ nv_wr32(dev, 0x08710c, 0x00000000);
+ nv_wr32(dev, 0x087104, 0x00000000); /* ENTRY */
+ nv_wr32(dev, 0x087100, 0x00000002); /* TRIGGER */
return 0;
}
+static struct nouveau_enum nv98_crypt_isr_error_name[] = {
+ { 0x0000, "ILLEGAL_MTHD" },
+ { 0x0001, "INVALID_BITFIELD" },
+ { 0x0002, "INVALID_ENUM" },
+ { 0x0003, "QUERY" },
+ {}
+};
+
+static void
+nv98_crypt_isr(struct drm_device *dev)
+{
+ u32 disp = nv_rd32(dev, 0x08701c);
+ u32 stat = nv_rd32(dev, 0x087008) & disp & ~(disp >> 16);
+ u32 inst = nv_rd32(dev, 0x087050) & 0x3fffffff;
+ u32 ssta = nv_rd32(dev, 0x087040) & 0x0000ffff;
+ u32 addr = nv_rd32(dev, 0x087040) >> 16;
+ u32 mthd = (addr & 0x07ff) << 2;
+ u32 subc = (addr & 0x3800) >> 11;
+ u32 data = nv_rd32(dev, 0x087044);
+ int chid = nv50_graph_isr_chid(dev, inst);
+
+ if (stat & 0x00000040) {
+ NV_INFO(dev, "PCRYPT: DISPATCH_ERROR [");
+ nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
+ printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, mthd, data);
+ nv_wr32(dev, 0x087004, 0x00000040);
+ stat &= ~0x00000040;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PCRYPT: unhandled intr 0x%08x\n", stat);
+ nv_wr32(dev, 0x087004, stat);
+ }
+
+ nv50_fb_vm_trap(dev, 1);
+}
+
static void
nv98_crypt_destroy(struct drm_device *dev, int engine)
{
- struct nv98_crypt_engine *pcrypt = nv_engine(dev, engine);
+ struct nv98_crypt_priv *priv = nv_engine(dev, engine);
+ nouveau_irq_unregister(dev, 14);
NVOBJ_ENGINE_DEL(dev, CRYPT);
-
- kfree(pcrypt);
+ kfree(priv);
}
int
nv98_crypt_create(struct drm_device *dev)
{
- struct nv98_crypt_engine *pcrypt;
+ struct nv98_crypt_priv *priv;
- pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
- if (!pcrypt)
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- pcrypt->base.destroy = nv98_crypt_destroy;
- pcrypt->base.init = nv98_crypt_init;
- pcrypt->base.fini = nv98_crypt_fini;
+ priv->base.destroy = nv98_crypt_destroy;
+ priv->base.init = nv98_crypt_init;
+ priv->base.fini = nv98_crypt_fini;
+ priv->base.context_new = nv98_crypt_context_new;
+ priv->base.context_del = nv98_crypt_context_del;
+ priv->base.object_new = nv98_crypt_object_new;
+ priv->base.tlb_flush = nv98_crypt_tlb_flush;
+
+ nouveau_irq_register(dev, 14, nv98_crypt_isr);
- NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
+ NVOBJ_ENGINE_ADD(dev, CRYPT, &priv->base);
+ NVOBJ_CLASS(dev, 0x88b4, CRYPT);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc b/drivers/gpu/drm/nouveau/nv98_crypt.fuc
new file mode 100644
index 000000000000..7393813044de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.fuc
@@ -0,0 +1,698 @@
+/*
+ * fuc microcode for nv98 pcrypt engine
+ * Copyright (C) 2010 Marcin Kościelnicki
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+.section #nv98_pcrypt_data
+
+ctx_dma:
+ctx_dma_query: .b32 0
+ctx_dma_src: .b32 0
+ctx_dma_dst: .b32 0
+.equ #dma_count 3
+ctx_query_address_high: .b32 0
+ctx_query_address_low: .b32 0
+ctx_query_counter: .b32 0
+ctx_cond_address_high: .b32 0
+ctx_cond_address_low: .b32 0
+ctx_cond_off: .b32 0
+ctx_src_address_high: .b32 0
+ctx_src_address_low: .b32 0
+ctx_dst_address_high: .b32 0
+ctx_dst_address_low: .b32 0
+ctx_mode: .b32 0
+.align 16
+ctx_key: .skip 16
+ctx_iv: .skip 16
+
+.align 0x80
+swap:
+.skip 32
+
+.align 8
+common_cmd_dtable:
+.b32 #ctx_query_address_high + 0x20000 ~0xff
+.b32 #ctx_query_address_low + 0x20000 ~0xfffffff0
+.b32 #ctx_query_counter + 0x20000 ~0xffffffff
+.b32 #cmd_query_get + 0x00000 ~1
+.b32 #ctx_cond_address_high + 0x20000 ~0xff
+.b32 #ctx_cond_address_low + 0x20000 ~0xfffffff0
+.b32 #cmd_cond_mode + 0x00000 ~7
+.b32 #cmd_wrcache_flush + 0x00000 ~0
+.equ #common_cmd_max 0x88
+
+
+.align 8
+engine_cmd_dtable:
+.b32 #ctx_key + 0x0 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0x4 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0x8 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0xc + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x0 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x4 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x8 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0xc + 0x20000 ~0xffffffff
+.b32 #ctx_src_address_high + 0x20000 ~0xff
+.b32 #ctx_src_address_low + 0x20000 ~0xfffffff0
+.b32 #ctx_dst_address_high + 0x20000 ~0xff
+.b32 #ctx_dst_address_low + 0x20000 ~0xfffffff0
+.b32 #crypt_cmd_mode + 0x00000 ~0xf
+.b32 #crypt_cmd_length + 0x10000 ~0x0ffffff0
+.equ #engine_cmd_max 0xce
+
+.align 4
+crypt_dtable:
+.b16 #crypt_copy_prep #crypt_do_inout
+.b16 #crypt_store_prep #crypt_do_out
+.b16 #crypt_ecb_e_prep #crypt_do_inout
+.b16 #crypt_ecb_d_prep #crypt_do_inout
+.b16 #crypt_cbc_e_prep #crypt_do_inout
+.b16 #crypt_cbc_d_prep #crypt_do_inout
+.b16 #crypt_pcbc_e_prep #crypt_do_inout
+.b16 #crypt_pcbc_d_prep #crypt_do_inout
+.b16 #crypt_cfb_e_prep #crypt_do_inout
+.b16 #crypt_cfb_d_prep #crypt_do_inout
+.b16 #crypt_ofb_prep #crypt_do_inout
+.b16 #crypt_ctr_prep #crypt_do_inout
+.b16 #crypt_cbc_mac_prep #crypt_do_in
+.b16 #crypt_cmac_finish_complete_prep #crypt_do_in
+.b16 #crypt_cmac_finish_partial_prep #crypt_do_in
+
+.align 0x100
+
+.section #nv98_pcrypt_code
+
+ // $r0 is always set to 0 in our code - this allows some space savings.
+ clear b32 $r0
+
+ // set up the interrupt handler
+ mov $r1 #ih
+ mov $iv0 $r1
+
+ // init stack pointer
+ mov $sp $r0
+
+ // set interrupt dispatch - route timer, fifo, ctxswitch to i0, others to host
+ movw $r1 0xfff0
+ sethi $r1 0
+ mov $r2 0x400
+ iowr I[$r2 + 0x300] $r1
+
+ // enable the interrupts
+ or $r1 0xc
+ iowr I[$r2] $r1
+
+ // enable fifo access and context switching
+ mov $r1 3
+ mov $r2 0x1200
+ iowr I[$r2] $r1
+
+ // enable i0 delivery
+ bset $flags ie0
+
+ // sleep forver, waking only for interrupts.
+ bset $flags $p0
+ spin:
+ sleep $p0
+ bra #spin
+
+// i0 handler
+ih:
+ // see which interrupts we got
+ iord $r1 I[$r0 + 0x200]
+
+ and $r2 $r1 0x8
+ cmpu b32 $r2 0
+ bra e #noctx
+
+ // context switch... prepare the regs for xfer
+ mov $r2 0x7700
+ mov $xtargets $r2
+ mov $xdbase $r0
+ // 128-byte context.
+ mov $r2 0
+ sethi $r2 0x50000
+
+ // read current channel
+ mov $r3 0x1400
+ iord $r4 I[$r3]
+ // if bit 30 set, it's active, so we have to unload it first.
+ shl b32 $r5 $r4 1
+ cmps b32 $r5 0
+ bra nc #ctxload
+
+ // unload the current channel - save the context
+ xdst $r0 $r2
+ xdwait
+ // and clear bit 30, then write back
+ bclr $r4 0x1e
+ iowr I[$r3] $r4
+ // tell PFIFO we unloaded
+ mov $r4 1
+ iowr I[$r3 + 0x200] $r4
+
+ bra #noctx
+
+ ctxload:
+ // no channel loaded - perhaps we're requested to load one
+ iord $r4 I[$r3 + 0x100]
+ shl b32 $r15 $r4 1
+ cmps b32 $r15 0
+ // if bit 30 of next channel not set, probably PFIFO is just
+ // killing a context. do a faux load, without the active bit.
+ bra nc #dummyload
+
+ // ok, do a real context load.
+ xdld $r0 $r2
+ xdwait
+ mov $r5 #ctx_dma
+ mov $r6 #dma_count - 1
+ ctxload_dma_loop:
+ ld b32 $r7 D[$r5 + $r6 * 4]
+ add b32 $r8 $r6 0x180
+ shl b32 $r8 8
+ iowr I[$r8] $r7
+ sub b32 $r6 1
+ bra nc #ctxload_dma_loop
+
+ dummyload:
+ // tell PFIFO we're done
+ mov $r5 2
+ iowr I[$r3 + 0x200] $r5
+
+ noctx:
+ and $r2 $r1 0x4
+ cmpu b32 $r2 0
+ bra e #nocmd
+
+ // incoming fifo command.
+ mov $r3 0x1900
+ iord $r2 I[$r3 + 0x100]
+ iord $r3 I[$r3]
+ // extract the method
+ and $r4 $r2 0x7ff
+ // shift the addr to proper position if we need to interrupt later
+ shl b32 $r2 0x10
+
+ // mthd 0 and 0x100 [NAME, NOP]: ignore
+ and $r5 $r4 0x7bf
+ cmpu b32 $r5 0
+ bra e #cmddone
+
+ mov $r5 #engine_cmd_dtable - 0xc0 * 8
+ mov $r6 #engine_cmd_max
+ cmpu b32 $r4 0xc0
+ bra nc #dtable_cmd
+ mov $r5 #common_cmd_dtable - 0x80 * 8
+ mov $r6 #common_cmd_max
+ cmpu b32 $r4 0x80
+ bra nc #dtable_cmd
+ cmpu b32 $r4 0x60
+ bra nc #dma_cmd
+ cmpu b32 $r4 0x50
+ bra ne #illegal_mthd
+
+ // mthd 0x140: PM_TRIGGER
+ mov $r2 0x2200
+ clear b32 $r3
+ sethi $r3 0x20000
+ iowr I[$r2] $r3
+ bra #cmddone
+
+ dma_cmd:
+ // mthd 0x180...: DMA_*
+ cmpu b32 $r4 0x60+#dma_count
+ bra nc #illegal_mthd
+ shl b32 $r5 $r4 2
+ add b32 $r5 (#ctx_dma - 0x60 * 4) & 0xffff
+ bset $r3 0x1e
+ st b32 D[$r5] $r3
+ add b32 $r4 0x180 - 0x60
+ shl b32 $r4 8
+ iowr I[$r4] $r3
+ bra #cmddone
+
+ dtable_cmd:
+ cmpu b32 $r4 $r6
+ bra nc #illegal_mthd
+ shl b32 $r4 3
+ add b32 $r4 $r5
+ ld b32 $r5 D[$r4 + 4]
+ and $r5 $r3
+ cmpu b32 $r5 0
+ bra ne #invalid_bitfield
+ ld b16 $r5 D[$r4]
+ ld b16 $r6 D[$r4 + 2]
+ cmpu b32 $r6 2
+ bra e #cmd_setctx
+ ld b32 $r7 D[$r0 + #ctx_cond_off]
+ and $r6 $r7
+ cmpu b32 $r6 1
+ bra e #cmddone
+ call $r5
+ bra $p1 #dispatch_error
+ bra #cmddone
+
+ cmd_setctx:
+ st b32 D[$r5] $r3
+ bra #cmddone
+
+
+ invalid_bitfield:
+ or $r2 1
+ dispatch_error:
+ illegal_mthd:
+ mov $r4 0x1000
+ iowr I[$r4] $r2
+ iowr I[$r4 + 0x100] $r3
+ mov $r4 0x40
+ iowr I[$r0] $r4
+
+ im_loop:
+ iord $r4 I[$r0 + 0x200]
+ and $r4 0x40
+ cmpu b32 $r4 0
+ bra ne #im_loop
+
+ cmddone:
+ // remove the command from FIFO
+ mov $r3 0x1d00
+ mov $r4 1
+ iowr I[$r3] $r4
+
+ nocmd:
+ // ack the processed interrupts
+ and $r1 $r1 0xc
+ iowr I[$r0 + 0x100] $r1
+iret
+
+cmd_query_get:
+ // if bit 0 of param set, trigger interrupt afterwards.
+ setp $p1 $r3
+ or $r2 3
+
+ // read PTIMER, beware of races...
+ mov $r4 0xb00
+ ptimer_retry:
+ iord $r6 I[$r4 + 0x100]
+ iord $r5 I[$r4]
+ iord $r7 I[$r4 + 0x100]
+ cmpu b32 $r6 $r7
+ bra ne #ptimer_retry
+
+ // prepare the query structure
+ ld b32 $r4 D[$r0 + #ctx_query_counter]
+ st b32 D[$r0 + #swap + 0x0] $r4
+ st b32 D[$r0 + #swap + 0x4] $r0
+ st b32 D[$r0 + #swap + 0x8] $r5
+ st b32 D[$r0 + #swap + 0xc] $r6
+
+ // will use target 0, DMA_QUERY.
+ mov $xtargets $r0
+
+ ld b32 $r4 D[$r0 + #ctx_query_address_high]
+ shl b32 $r4 0x18
+ mov $xdbase $r4
+
+ ld b32 $r4 D[$r0 + #ctx_query_address_low]
+ mov $r5 #swap
+ sethi $r5 0x20000
+ xdst $r4 $r5
+ xdwait
+
+ ret
+
+cmd_cond_mode:
+ // if >= 5, INVALID_ENUM
+ bset $flags $p1
+ or $r2 2
+ cmpu b32 $r3 5
+ bra nc #return
+
+ // otherwise, no error.
+ bclr $flags $p1
+
+ // if < 2, no QUERY object is involved
+ cmpu b32 $r3 2
+ bra nc #cmd_cond_mode_queryful
+
+ xor $r3 1
+ st b32 D[$r0 + #ctx_cond_off] $r3
+ return:
+ ret
+
+ cmd_cond_mode_queryful:
+ // ok, will need to pull a QUERY object, prepare offsets
+ ld b32 $r4 D[$r0 + #ctx_cond_address_high]
+ ld b32 $r5 D[$r0 + #ctx_cond_address_low]
+ and $r6 $r5 0xff
+ shr b32 $r5 8
+ shl b32 $r4 0x18
+ or $r4 $r5
+ mov $xdbase $r4
+ mov $xtargets $r0
+
+ // pull the first one
+ mov $r5 #swap
+ sethi $r5 0x20000
+ xdld $r6 $r5
+
+ // if == 2, only a single QUERY is involved...
+ cmpu b32 $r3 2
+ bra ne #cmd_cond_mode_double
+
+ xdwait
+ ld b32 $r4 D[$r0 + #swap + 4]
+ cmpu b32 $r4 0
+ xbit $r4 $flags z
+ st b32 D[$r0 + #ctx_cond_off] $r4
+ ret
+
+ // ok, we'll need to pull second one too
+ cmd_cond_mode_double:
+ add b32 $r6 0x10
+ add b32 $r5 0x10
+ xdld $r6 $r5
+ xdwait
+
+ // compare COUNTERs
+ ld b32 $r5 D[$r0 + #swap + 0x00]
+ ld b32 $r6 D[$r0 + #swap + 0x10]
+ cmpu b32 $r5 $r6
+ xbit $r4 $flags z
+
+ // compare RESen
+ ld b32 $r5 D[$r0 + #swap + 0x04]
+ ld b32 $r6 D[$r0 + #swap + 0x14]
+ cmpu b32 $r5 $r6
+ xbit $r5 $flags z
+ and $r4 $r5
+
+ // and negate or not, depending on mode
+ cmpu b32 $r3 3
+ xbit $r5 $flags z
+ xor $r4 $r5
+ st b32 D[$r0 + #ctx_cond_off] $r4
+ ret
+
+cmd_wrcache_flush:
+ bclr $flags $p1
+ mov $r2 0x2200
+ clear b32 $r3
+ sethi $r3 0x10000
+ iowr I[$r2] $r3
+ ret
+
+crypt_cmd_mode:
+ // if >= 0xf, INVALID_ENUM
+ bset $flags $p1
+ or $r2 2
+ cmpu b32 $r3 0xf
+ bra nc #crypt_cmd_mode_return
+
+ bclr $flags $p1
+ st b32 D[$r0 + #ctx_mode] $r3
+
+ crypt_cmd_mode_return:
+ ret
+
+crypt_cmd_length:
+ // nop if length == 0
+ cmpu b32 $r3 0
+ bra e #crypt_cmd_mode_return
+
+ // init key, IV
+ cxset 3
+ mov $r4 #ctx_key
+ sethi $r4 0x70000
+ xdst $r0 $r4
+ mov $r4 #ctx_iv
+ sethi $r4 0x60000
+ xdst $r0 $r4
+ xdwait
+ ckeyreg $c7
+
+ // prepare the targets
+ mov $r4 0x2100
+ mov $xtargets $r4
+
+ // prepare src address
+ ld b32 $r4 D[$r0 + #ctx_src_address_high]
+ ld b32 $r5 D[$r0 + #ctx_src_address_low]
+ shr b32 $r8 $r5 8
+ shl b32 $r4 0x18
+ or $r4 $r8
+ and $r5 $r5 0xff
+
+ // prepare dst address
+ ld b32 $r6 D[$r0 + #ctx_dst_address_high]
+ ld b32 $r7 D[$r0 + #ctx_dst_address_low]
+ shr b32 $r8 $r7 8
+ shl b32 $r6 0x18
+ or $r6 $r8
+ and $r7 $r7 0xff
+
+ // find the proper prep & do functions
+ ld b32 $r8 D[$r0 + #ctx_mode]
+ shl b32 $r8 2
+
+ // run prep
+ ld b16 $r9 D[$r8 + #crypt_dtable]
+ call $r9
+
+ // do it
+ ld b16 $r9 D[$r8 + #crypt_dtable + 2]
+ call $r9
+ cxset 1
+ xdwait
+ cxset 0x61
+ xdwait
+ xdwait
+
+ // update src address
+ shr b32 $r8 $r4 0x18
+ shl b32 $r9 $r4 8
+ add b32 $r9 $r5
+ adc b32 $r8 0
+ st b32 D[$r0 + #ctx_src_address_high] $r8
+ st b32 D[$r0 + #ctx_src_address_low] $r9
+
+ // update dst address
+ shr b32 $r8 $r6 0x18
+ shl b32 $r9 $r6 8
+ add b32 $r9 $r7
+ adc b32 $r8 0
+ st b32 D[$r0 + #ctx_dst_address_high] $r8
+ st b32 D[$r0 + #ctx_dst_address_low] $r9
+
+ // pull updated IV
+ cxset 2
+ mov $r4 #ctx_iv
+ sethi $r4 0x60000
+ xdld $r0 $r4
+ xdwait
+
+ ret
+
+
+crypt_copy_prep:
+ cs0begin 2
+ cxsin $c0
+ cxsout $c0
+ ret
+
+crypt_store_prep:
+ cs0begin 1
+ cxsout $c6
+ ret
+
+crypt_ecb_e_prep:
+ cs0begin 3
+ cxsin $c0
+ cenc $c0 $c0
+ cxsout $c0
+ ret
+
+crypt_ecb_d_prep:
+ ckexp $c7 $c7
+ cs0begin 3
+ cxsin $c0
+ cdec $c0 $c0
+ cxsout $c0
+ ret
+
+crypt_cbc_e_prep:
+ cs0begin 4
+ cxsin $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ cxsout $c6
+ ret
+
+crypt_cbc_d_prep:
+ ckexp $c7 $c7
+ cs0begin 5
+ cmov $c2 $c6
+ cxsin $c6
+ cdec $c0 $c6
+ cxor $c0 $c2
+ cxsout $c0
+ ret
+
+crypt_pcbc_e_prep:
+ cs0begin 5
+ cxsin $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ cxsout $c6
+ cxor $c6 $c0
+ ret
+
+crypt_pcbc_d_prep:
+ ckexp $c7 $c7
+ cs0begin 5
+ cxsin $c0
+ cdec $c1 $c0
+ cxor $c6 $c1
+ cxsout $c6
+ cxor $c6 $c0
+ ret
+
+crypt_cfb_e_prep:
+ cs0begin 4
+ cenc $c6 $c6
+ cxsin $c0
+ cxor $c6 $c0
+ cxsout $c6
+ ret
+
+crypt_cfb_d_prep:
+ cs0begin 4
+ cenc $c0 $c6
+ cxsin $c6
+ cxor $c0 $c6
+ cxsout $c0
+ ret
+
+crypt_ofb_prep:
+ cs0begin 4
+ cenc $c6 $c6
+ cxsin $c0
+ cxor $c0 $c6
+ cxsout $c0
+ ret
+
+crypt_ctr_prep:
+ cs0begin 5
+ cenc $c1 $c6
+ cadd $c6 1
+ cxsin $c0
+ cxor $c0 $c1
+ cxsout $c0
+ ret
+
+crypt_cbc_mac_prep:
+ cs0begin 3
+ cxsin $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ ret
+
+crypt_cmac_finish_complete_prep:
+ cs0begin 7
+ cxsin $c0
+ cxor $c6 $c0
+ cxor $c0 $c0
+ cenc $c0 $c0
+ cprecmac $c0 $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ ret
+
+crypt_cmac_finish_partial_prep:
+ cs0begin 8
+ cxsin $c0
+ cxor $c6 $c0
+ cxor $c0 $c0
+ cenc $c0 $c0
+ cprecmac $c0 $c0
+ cprecmac $c0 $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ ret
+
+// TODO
+crypt_do_in:
+ add b32 $r3 $r5
+ mov $xdbase $r4
+ mov $r9 #swap
+ sethi $r9 0x20000
+ crypt_do_in_loop:
+ xdld $r5 $r9
+ xdwait
+ cxset 0x22
+ xdst $r0 $r9
+ cs0exec 1
+ xdwait
+ add b32 $r5 0x10
+ cmpu b32 $r5 $r3
+ bra ne #crypt_do_in_loop
+ cxset 1
+ xdwait
+ ret
+
+crypt_do_out:
+ add b32 $r3 $r7
+ mov $xdbase $r6
+ mov $r9 #swap
+ sethi $r9 0x20000
+ crypt_do_out_loop:
+ cs0exec 1
+ cxset 0x61
+ xdld $r7 $r9
+ xdst $r7 $r9
+ cxset 1
+ xdwait
+ add b32 $r7 0x10
+ cmpu b32 $r7 $r3
+ bra ne #crypt_do_out_loop
+ ret
+
+crypt_do_inout:
+ add b32 $r3 $r5
+ mov $r9 #swap
+ sethi $r9 0x20000
+ crypt_do_inout_loop:
+ mov $xdbase $r4
+ xdld $r5 $r9
+ xdwait
+ cxset 0x21
+ xdst $r0 $r9
+ cs0exec 1
+ cxset 0x61
+ mov $xdbase $r6
+ xdld $r7 $r9
+ xdst $r7 $r9
+ cxset 1
+ xdwait
+ add b32 $r5 0x10
+ add b32 $r7 0x10
+ cmpu b32 $r5 $r3
+ bra ne #crypt_do_inout_loop
+ ret
+
+.align 0x100
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h b/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h
new file mode 100644
index 000000000000..38676c74e6e0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h
@@ -0,0 +1,584 @@
+uint32_t nv98_pcrypt_data[] = {
+/* 0x0000: ctx_dma */
+/* 0x0000: ctx_dma_query */
+ 0x00000000,
+/* 0x0004: ctx_dma_src */
+ 0x00000000,
+/* 0x0008: ctx_dma_dst */
+ 0x00000000,
+/* 0x000c: ctx_query_address_high */
+ 0x00000000,
+/* 0x0010: ctx_query_address_low */
+ 0x00000000,
+/* 0x0014: ctx_query_counter */
+ 0x00000000,
+/* 0x0018: ctx_cond_address_high */
+ 0x00000000,
+/* 0x001c: ctx_cond_address_low */
+ 0x00000000,
+/* 0x0020: ctx_cond_off */
+ 0x00000000,
+/* 0x0024: ctx_src_address_high */
+ 0x00000000,
+/* 0x0028: ctx_src_address_low */
+ 0x00000000,
+/* 0x002c: ctx_dst_address_high */
+ 0x00000000,
+/* 0x0030: ctx_dst_address_low */
+ 0x00000000,
+/* 0x0034: ctx_mode */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0040: ctx_key */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0050: ctx_iv */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0080: swap */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x00a0: common_cmd_dtable */
+ 0x0002000c,
+ 0xffffff00,
+ 0x00020010,
+ 0x0000000f,
+ 0x00020014,
+ 0x00000000,
+ 0x00000192,
+ 0xfffffffe,
+ 0x00020018,
+ 0xffffff00,
+ 0x0002001c,
+ 0x0000000f,
+ 0x000001d7,
+ 0xfffffff8,
+ 0x00000260,
+ 0xffffffff,
+/* 0x00e0: engine_cmd_dtable */
+ 0x00020040,
+ 0x00000000,
+ 0x00020044,
+ 0x00000000,
+ 0x00020048,
+ 0x00000000,
+ 0x0002004c,
+ 0x00000000,
+ 0x00020050,
+ 0x00000000,
+ 0x00020054,
+ 0x00000000,
+ 0x00020058,
+ 0x00000000,
+ 0x0002005c,
+ 0x00000000,
+ 0x00020024,
+ 0xffffff00,
+ 0x00020028,
+ 0x0000000f,
+ 0x0002002c,
+ 0xffffff00,
+ 0x00020030,
+ 0x0000000f,
+ 0x00000271,
+ 0xfffffff0,
+ 0x00010285,
+ 0xf000000f,
+/* 0x0150: crypt_dtable */
+ 0x04db0321,
+ 0x04b1032f,
+ 0x04db0339,
+ 0x04db034b,
+ 0x04db0361,
+ 0x04db0377,
+ 0x04db0395,
+ 0x04db03af,
+ 0x04db03cd,
+ 0x04db03e3,
+ 0x04db03f9,
+ 0x04db040f,
+ 0x04830429,
+ 0x0483043b,
+ 0x0483045d,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+uint32_t nv98_pcrypt_code[] = {
+ 0x17f004bd,
+ 0x0010fe35,
+ 0xf10004fe,
+ 0xf0fff017,
+ 0x27f10013,
+ 0x21d00400,
+ 0x0c15f0c0,
+ 0xf00021d0,
+ 0x27f10317,
+ 0x21d01200,
+ 0x1031f400,
+/* 0x002f: spin */
+ 0xf40031f4,
+ 0x0ef40028,
+/* 0x0035: ih */
+ 0x8001cffd,
+ 0xb00812c4,
+ 0x0bf40024,
+ 0x0027f167,
+ 0x002bfe77,
+ 0xf00007fe,
+ 0x23f00027,
+ 0x0037f105,
+ 0x0034cf14,
+ 0xb0014594,
+ 0x18f40055,
+ 0x0602fa17,
+ 0x4af003f8,
+ 0x0034d01e,
+ 0xd00147f0,
+ 0x0ef48034,
+/* 0x0075: ctxload */
+ 0x4034cf33,
+ 0xb0014f94,
+ 0x18f400f5,
+ 0x0502fa21,
+ 0x57f003f8,
+ 0x0267f000,
+/* 0x008c: ctxload_dma_loop */
+ 0xa07856bc,
+ 0xb6018068,
+ 0x87d00884,
+ 0x0162b600,
+/* 0x009f: dummyload */
+ 0xf0f018f4,
+ 0x35d00257,
+/* 0x00a5: noctx */
+ 0x0412c480,
+ 0xf50024b0,
+ 0xf100df0b,
+ 0xcf190037,
+ 0x33cf4032,
+ 0xff24e400,
+ 0x1024b607,
+ 0x07bf45e4,
+ 0xf50054b0,
+ 0xf100b90b,
+ 0xf1fae057,
+ 0xb000ce67,
+ 0x18f4c044,
+ 0xa057f14d,
+ 0x8867f1fc,
+ 0x8044b000,
+ 0xb03f18f4,
+ 0x18f46044,
+ 0x5044b019,
+ 0xf1741bf4,
+ 0xbd220027,
+ 0x0233f034,
+ 0xf50023d0,
+/* 0x0103: dma_cmd */
+ 0xb000810e,
+ 0x18f46344,
+ 0x0245945e,
+ 0xfe8050b7,
+ 0x801e39f0,
+ 0x40b70053,
+ 0x44b60120,
+ 0x0043d008,
+/* 0x0123: dtable_cmd */
+ 0xb8600ef4,
+ 0x18f40446,
+ 0x0344b63e,
+ 0x980045bb,
+ 0x53fd0145,
+ 0x0054b004,
+ 0x58291bf4,
+ 0x46580045,
+ 0x0264b001,
+ 0x98170bf4,
+ 0x67fd0807,
+ 0x0164b004,
+ 0xf9300bf4,
+ 0x0f01f455,
+/* 0x015b: cmd_setctx */
+ 0x80280ef4,
+ 0x0ef40053,
+/* 0x0161: invalid_bitfield */
+ 0x0125f022,
+/* 0x0164: dispatch_error */
+/* 0x0164: illegal_mthd */
+ 0x100047f1,
+ 0xd00042d0,
+ 0x47f04043,
+ 0x0004d040,
+/* 0x0174: im_loop */
+ 0xf08004cf,
+ 0x44b04044,
+ 0xf71bf400,
+/* 0x0180: cmddone */
+ 0x1d0037f1,
+ 0xd00147f0,
+/* 0x018a: nocmd */
+ 0x11c40034,
+ 0x4001d00c,
+/* 0x0192: cmd_query_get */
+ 0x38f201f8,
+ 0x0325f001,
+ 0x0b0047f1,
+/* 0x019c: ptimer_retry */
+ 0xcf4046cf,
+ 0x47cf0045,
+ 0x0467b840,
+ 0x98f41bf4,
+ 0x04800504,
+ 0x21008020,
+ 0x80220580,
+ 0x0bfe2306,
+ 0x03049800,
+ 0xfe1844b6,
+ 0x04980047,
+ 0x8057f104,
+ 0x0253f000,
+ 0xf80645fa,
+/* 0x01d7: cmd_cond_mode */
+ 0xf400f803,
+ 0x25f00131,
+ 0x0534b002,
+ 0xf41218f4,
+ 0x34b00132,
+ 0x0b18f402,
+ 0x800136f0,
+/* 0x01f2: return */
+ 0x00f80803,
+/* 0x01f4: cmd_cond_mode_queryful */
+ 0x98060498,
+ 0x56c40705,
+ 0x0855b6ff,
+ 0xfd1844b6,
+ 0x47fe0545,
+ 0x000bfe00,
+ 0x008057f1,
+ 0xfa0253f0,
+ 0x34b00565,
+ 0x131bf402,
+ 0x049803f8,
+ 0x0044b021,
+ 0x800b4cf0,
+ 0x00f80804,
+/* 0x022c: cmd_cond_mode_double */
+ 0xb61060b6,
+ 0x65fa1050,
+ 0x9803f805,
+ 0x06982005,
+ 0x0456b824,
+ 0x980b4cf0,
+ 0x06982105,
+ 0x0456b825,
+ 0xfd0b5cf0,
+ 0x34b00445,
+ 0x0b5cf003,
+ 0x800645fd,
+ 0x00f80804,
+/* 0x0260: cmd_wrcache_flush */
+ 0xf10132f4,
+ 0xbd220027,
+ 0x0133f034,
+ 0xf80023d0,
+/* 0x0271: crypt_cmd_mode */
+ 0x0131f400,
+ 0xb00225f0,
+ 0x18f40f34,
+ 0x0132f409,
+/* 0x0283: crypt_cmd_mode_return */
+ 0xf80d0380,
+/* 0x0285: crypt_cmd_length */
+ 0x0034b000,
+ 0xf4fb0bf4,
+ 0x47f0033c,
+ 0x0743f040,
+ 0xf00604fa,
+ 0x43f05047,
+ 0x0604fa06,
+ 0x3cf503f8,
+ 0x47f1c407,
+ 0x4bfe2100,
+ 0x09049800,
+ 0x950a0598,
+ 0x44b60858,
+ 0x0548fd18,
+ 0x98ff55c4,
+ 0x07980b06,
+ 0x0878950c,
+ 0xfd1864b6,
+ 0x77c40568,
+ 0x0d0898ff,
+ 0x580284b6,
+ 0x95f9a889,
+ 0xf9a98958,
+ 0x013cf495,
+ 0x3cf403f8,
+ 0xf803f861,
+ 0x18489503,
+ 0xbb084994,
+ 0x81b60095,
+ 0x09088000,
+ 0x950a0980,
+ 0x69941868,
+ 0x0097bb08,
+ 0x800081b6,
+ 0x09800b08,
+ 0x023cf40c,
+ 0xf05047f0,
+ 0x04fa0643,
+ 0xf803f805,
+/* 0x0321: crypt_copy_prep */
+ 0x203cf500,
+ 0x003cf594,
+ 0x003cf588,
+/* 0x032f: crypt_store_prep */
+ 0xf500f88c,
+ 0xf594103c,
+ 0xf88c063c,
+/* 0x0339: crypt_ecb_e_prep */
+ 0x303cf500,
+ 0x003cf594,
+ 0x003cf588,
+ 0x003cf5d0,
+/* 0x034b: crypt_ecb_d_prep */
+ 0xf500f88c,
+ 0xf5c8773c,
+ 0xf594303c,
+ 0xf588003c,
+ 0xf5d4003c,
+ 0xf88c003c,
+/* 0x0361: crypt_cbc_e_prep */
+ 0x403cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x663cf5ac,
+ 0x063cf5d0,
+/* 0x0377: crypt_cbc_d_prep */
+ 0xf500f88c,
+ 0xf5c8773c,
+ 0xf594503c,
+ 0xf584623c,
+ 0xf588063c,
+ 0xf5d4603c,
+ 0xf5ac203c,
+ 0xf88c003c,
+/* 0x0395: crypt_pcbc_e_prep */
+ 0x503cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x663cf5ac,
+ 0x063cf5d0,
+ 0x063cf58c,
+/* 0x03af: crypt_pcbc_d_prep */
+ 0xf500f8ac,
+ 0xf5c8773c,
+ 0xf594503c,
+ 0xf588003c,
+ 0xf5d4013c,
+ 0xf5ac163c,
+ 0xf58c063c,
+ 0xf8ac063c,
+/* 0x03cd: crypt_cfb_e_prep */
+ 0x403cf500,
+ 0x663cf594,
+ 0x003cf5d0,
+ 0x063cf588,
+ 0x063cf5ac,
+/* 0x03e3: crypt_cfb_d_prep */
+ 0xf500f88c,
+ 0xf594403c,
+ 0xf5d0603c,
+ 0xf588063c,
+ 0xf5ac603c,
+ 0xf88c003c,
+/* 0x03f9: crypt_ofb_prep */
+ 0x403cf500,
+ 0x663cf594,
+ 0x003cf5d0,
+ 0x603cf588,
+ 0x003cf5ac,
+/* 0x040f: crypt_ctr_prep */
+ 0xf500f88c,
+ 0xf594503c,
+ 0xf5d0613c,
+ 0xf5b0163c,
+ 0xf588003c,
+ 0xf5ac103c,
+ 0xf88c003c,
+/* 0x0429: crypt_cbc_mac_prep */
+ 0x303cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x663cf5ac,
+/* 0x043b: crypt_cmac_finish_complete_prep */
+ 0xf500f8d0,
+ 0xf594703c,
+ 0xf588003c,
+ 0xf5ac063c,
+ 0xf5ac003c,
+ 0xf5d0003c,
+ 0xf5bc003c,
+ 0xf5ac063c,
+ 0xf8d0663c,
+/* 0x045d: crypt_cmac_finish_partial_prep */
+ 0x803cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x003cf5ac,
+ 0x003cf5ac,
+ 0x003cf5d0,
+ 0x003cf5bc,
+ 0x063cf5bc,
+ 0x663cf5ac,
+/* 0x0483: crypt_do_in */
+ 0xbb00f8d0,
+ 0x47fe0035,
+ 0x8097f100,
+ 0x0293f000,
+/* 0x0490: crypt_do_in_loop */
+ 0xf80559fa,
+ 0x223cf403,
+ 0xf50609fa,
+ 0xf898103c,
+ 0x1050b603,
+ 0xf40453b8,
+ 0x3cf4e91b,
+ 0xf803f801,
+/* 0x04b1: crypt_do_out */
+ 0x0037bb00,
+ 0xf10067fe,
+ 0xf0008097,
+/* 0x04be: crypt_do_out_loop */
+ 0x3cf50293,
+ 0x3cf49810,
+ 0x0579fa61,
+ 0xf40679fa,
+ 0x03f8013c,
+ 0xb81070b6,
+ 0x1bf40473,
+/* 0x04db: crypt_do_inout */
+ 0xbb00f8e8,
+ 0x97f10035,
+ 0x93f00080,
+/* 0x04e5: crypt_do_inout_loop */
+ 0x0047fe02,
+ 0xf80559fa,
+ 0x213cf403,
+ 0xf50609fa,
+ 0xf498103c,
+ 0x67fe613c,
+ 0x0579fa00,
+ 0xf40679fa,
+ 0x03f8013c,
+ 0xb61050b6,
+ 0x53b81070,
+ 0xd41bf404,
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c
index 8f356d58e409..0387dc7f4f42 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.c
+++ b/drivers/gpu/drm/nouveau/nva3_copy.c
@@ -79,29 +79,13 @@ static void
nva3_copy_context_del(struct nouveau_channel *chan, int engine)
{
struct nouveau_gpuobj *ctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- u32 inst;
-
- inst = (chan->ramin->vinst >> 12);
- inst |= 0x40000000;
-
- /* disable fifo access */
- nv_wr32(dev, 0x104048, 0x00000000);
- /* mark channel as unloaded if it's currently active */
- if (nv_rd32(dev, 0x104050) == inst)
- nv_mask(dev, 0x104050, 0x40000000, 0x00000000);
- /* mark next channel as invalid if it's about to be loaded */
- if (nv_rd32(dev, 0x104054) == inst)
- nv_mask(dev, 0x104054, 0x40000000, 0x00000000);
- /* restore fifo access */
- nv_wr32(dev, 0x104048, 0x00000003);
+ int i;
- for (inst = 0xc0; inst <= 0xd4; inst += 4)
- nv_wo32(chan->ramin, inst, 0x00000000);
-
- nouveau_gpuobj_ref(NULL, &ctx);
+ for (i = 0xc0; i <= 0xd4; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
atomic_dec(&chan->vm->engref[engine]);
+ nouveau_gpuobj_ref(NULL, &ctx);
chan->engctx[engine] = ctx;
}
@@ -143,13 +127,6 @@ static int
nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
{
nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
-
- /* trigger fuc context unload */
- nv_wait(dev, 0x104008, 0x0000000c, 0x00000000);
- nv_mask(dev, 0x104054, 0x40000000, 0x00000000);
- nv_wr32(dev, 0x104000, 0x00000008);
- nv_wait(dev, 0x104008, 0x00000008, 0x00000000);
-
nv_wr32(dev, 0x104014, 0xffffffff);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index 9e636e6ef6d7..798829353fb6 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -98,7 +98,9 @@ read_pll(struct drm_device *dev, int clk, u32 pll)
sclk = read_clk(dev, 0x10 + clk, false);
}
- return sclk * N / (M * P);
+ if (M * P)
+ return sclk * N / (M * P);
+ return 0;
}
struct creg {
@@ -182,23 +184,26 @@ prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
const u32 src1 = 0x004160 + (clk * 4);
const u32 ctrl = pll + 0;
const u32 coef = pll + 4;
- u32 cntl;
if (!reg->clk && !reg->pll) {
NV_DEBUG(dev, "no clock for %02x\n", clk);
return;
}
- cntl = nv_rd32(dev, ctrl) & 0xfffffff2;
if (reg->pll) {
nv_mask(dev, src0, 0x00000101, 0x00000101);
nv_wr32(dev, coef, reg->pll);
- nv_wr32(dev, ctrl, cntl | 0x00000015);
+ nv_mask(dev, ctrl, 0x00000015, 0x00000015);
+ nv_mask(dev, ctrl, 0x00000010, 0x00000000);
+ nv_wait(dev, ctrl, 0x00020000, 0x00020000);
+ nv_mask(dev, ctrl, 0x00000010, 0x00000010);
+ nv_mask(dev, ctrl, 0x00000008, 0x00000000);
nv_mask(dev, src1, 0x00000100, 0x00000000);
nv_mask(dev, src1, 0x00000001, 0x00000000);
} else {
nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk);
- nv_wr32(dev, ctrl, cntl | 0x0000001d);
+ nv_mask(dev, ctrl, 0x00000018, 0x00000018);
+ udelay(20);
nv_mask(dev, ctrl, 0x00000001, 0x00000000);
nv_mask(dev, src0, 0x00000100, 0x00000000);
nv_mask(dev, src0, 0x00000001, 0x00000000);
@@ -230,17 +235,28 @@ nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
}
struct nva3_pm_state {
+ struct nouveau_pm_level *perflvl;
+
struct creg nclk;
struct creg sclk;
- struct creg mclk;
struct creg vdec;
struct creg unka0;
+
+ struct creg mclk;
+ u8 *rammap;
+ u8 rammap_ver;
+ u8 rammap_len;
+ u8 *ramcfg;
+ u8 ramcfg_len;
+ u32 r004018;
+ u32 r100760;
};
void *
nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
struct nva3_pm_state *info;
+ u8 ramcfg_cnt;
int ret;
info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -267,6 +283,20 @@ nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
if (ret < 0)
goto out;
+ info->rammap = nouveau_perf_rammap(dev, perflvl->memory,
+ &info->rammap_ver,
+ &info->rammap_len,
+ &ramcfg_cnt, &info->ramcfg_len);
+ if (info->rammap_ver != 0x10 || info->rammap_len < 5)
+ info->rammap = NULL;
+
+ info->ramcfg = nouveau_perf_ramcfg(dev, perflvl->memory,
+ &info->rammap_ver,
+ &info->ramcfg_len);
+ if (info->rammap_ver != 0x10)
+ info->ramcfg = NULL;
+
+ info->perflvl = perflvl;
out:
if (ret < 0) {
kfree(info);
@@ -287,6 +317,240 @@ nva3_pm_grcp_idle(void *data)
return false;
}
+static void
+mclk_precharge(struct nouveau_mem_exec_func *exec)
+{
+ nv_wr32(exec->dev, 0x1002d4, 0x00000001);
+}
+
+static void
+mclk_refresh(struct nouveau_mem_exec_func *exec)
+{
+ nv_wr32(exec->dev, 0x1002d0, 0x00000001);
+}
+
+static void
+mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
+{
+ nv_wr32(exec->dev, 0x100210, enable ? 0x80000000 : 0x00000000);
+}
+
+static void
+mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
+{
+ nv_wr32(exec->dev, 0x1002dc, enable ? 0x00000001 : 0x00000000);
+}
+
+static void
+mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
+{
+ volatile u32 post = nv_rd32(exec->dev, 0); (void)post;
+ udelay((nsec + 500) / 1000);
+}
+
+static u32
+mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
+{
+ if (mr <= 1)
+ return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4));
+ if (mr <= 3)
+ return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4));
+ return 0;
+}
+
+static void
+mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
+{
+ struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
+
+ if (mr <= 1) {
+ if (dev_priv->vram_rank_B)
+ nv_wr32(exec->dev, 0x1002c8 + ((mr - 0) * 4), data);
+ nv_wr32(exec->dev, 0x1002c0 + ((mr - 0) * 4), data);
+ } else
+ if (mr <= 3) {
+ if (dev_priv->vram_rank_B)
+ nv_wr32(exec->dev, 0x1002e8 + ((mr - 2) * 4), data);
+ nv_wr32(exec->dev, 0x1002e0 + ((mr - 2) * 4), data);
+ }
+}
+
+static void
+mclk_clock_set(struct nouveau_mem_exec_func *exec)
+{
+ struct drm_device *dev = exec->dev;
+ struct nva3_pm_state *info = exec->priv;
+ u32 ctrl;
+
+ ctrl = nv_rd32(dev, 0x004000);
+ if (!(ctrl & 0x00000008) && info->mclk.pll) {
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000008));
+ nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000);
+ nv_wr32(dev, 0x004018, 0x00001000);
+ nv_wr32(dev, 0x004000, (ctrl &= ~0x00000001));
+ nv_wr32(dev, 0x004004, info->mclk.pll);
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000001));
+ udelay(64);
+ nv_wr32(dev, 0x004018, 0x00005000 | info->r004018);
+ udelay(20);
+ } else
+ if (!info->mclk.pll) {
+ nv_mask(dev, 0x004168, 0x003f3040, info->mclk.clk);
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000008));
+ nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000);
+ nv_wr32(dev, 0x004018, 0x0000d000 | info->r004018);
+ }
+
+ if (info->rammap) {
+ if (info->ramcfg && (info->rammap[4] & 0x08)) {
+ u32 unk5a0 = (ROM16(info->ramcfg[5]) << 8) |
+ info->ramcfg[5];
+ u32 unk5a4 = ROM16(info->ramcfg[7]);
+ u32 unk804 = (info->ramcfg[9] & 0xf0) << 16 |
+ (info->ramcfg[3] & 0x0f) << 16 |
+ (info->ramcfg[9] & 0x0f) |
+ 0x80000000;
+ nv_wr32(dev, 0x1005a0, unk5a0);
+ nv_wr32(dev, 0x1005a4, unk5a4);
+ nv_wr32(dev, 0x10f804, unk804);
+ nv_mask(dev, 0x10053c, 0x00001000, 0x00000000);
+ } else {
+ nv_mask(dev, 0x10053c, 0x00001000, 0x00001000);
+ nv_mask(dev, 0x10f804, 0x80000000, 0x00000000);
+ nv_mask(dev, 0x100760, 0x22222222, info->r100760);
+ nv_mask(dev, 0x1007a0, 0x22222222, info->r100760);
+ nv_mask(dev, 0x1007e0, 0x22222222, info->r100760);
+ }
+ }
+
+ if (info->mclk.pll) {
+ nv_mask(dev, 0x1110e0, 0x00088000, 0x00011000);
+ nv_wr32(dev, 0x004000, (ctrl &= ~0x00000008));
+ }
+}
+
+static void
+mclk_timing_set(struct nouveau_mem_exec_func *exec)
+{
+ struct drm_device *dev = exec->dev;
+ struct nva3_pm_state *info = exec->priv;
+ struct nouveau_pm_level *perflvl = info->perflvl;
+ int i;
+
+ for (i = 0; i < 9; i++)
+ nv_wr32(dev, 0x100220 + (i * 4), perflvl->timing.reg[i]);
+
+ if (info->ramcfg) {
+ u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000;
+ nv_mask(dev, 0x100200, 0x00001000, data);
+ }
+
+ if (info->ramcfg) {
+ u32 unk714 = nv_rd32(dev, 0x100714) & ~0xf0000010;
+ u32 unk718 = nv_rd32(dev, 0x100718) & ~0x00000100;
+ u32 unk71c = nv_rd32(dev, 0x10071c) & ~0x00000100;
+ if ( (info->ramcfg[2] & 0x20))
+ unk714 |= 0xf0000000;
+ if (!(info->ramcfg[2] & 0x04))
+ unk714 |= 0x00000010;
+ nv_wr32(dev, 0x100714, unk714);
+
+ if (info->ramcfg[2] & 0x01)
+ unk71c |= 0x00000100;
+ nv_wr32(dev, 0x10071c, unk71c);
+
+ if (info->ramcfg[2] & 0x02)
+ unk718 |= 0x00000100;
+ nv_wr32(dev, 0x100718, unk718);
+
+ if (info->ramcfg[2] & 0x10)
+ nv_wr32(dev, 0x111100, 0x48000000); /*XXX*/
+ }
+}
+
+static void
+prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
+{
+ struct nouveau_mem_exec_func exec = {
+ .dev = dev,
+ .precharge = mclk_precharge,
+ .refresh = mclk_refresh,
+ .refresh_auto = mclk_refresh_auto,
+ .refresh_self = mclk_refresh_self,
+ .wait = mclk_wait,
+ .mrg = mclk_mrg,
+ .mrs = mclk_mrs,
+ .clock_set = mclk_clock_set,
+ .timing_set = mclk_timing_set,
+ .priv = info
+ };
+ u32 ctrl;
+
+ /* XXX: where the fuck does 750MHz come from? */
+ if (info->perflvl->memory <= 750000) {
+ info->r004018 = 0x10000000;
+ info->r100760 = 0x22222222;
+ }
+
+ ctrl = nv_rd32(dev, 0x004000);
+ if (ctrl & 0x00000008) {
+ if (info->mclk.pll) {
+ nv_mask(dev, 0x004128, 0x00000101, 0x00000101);
+ nv_wr32(dev, 0x004004, info->mclk.pll);
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000001));
+ nv_wr32(dev, 0x004000, (ctrl &= 0xffffffef));
+ nv_wait(dev, 0x004000, 0x00020000, 0x00020000);
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000010));
+ nv_wr32(dev, 0x004018, 0x00005000 | info->r004018);
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000004));
+ }
+ } else {
+ u32 ssel = 0x00000101;
+ if (info->mclk.clk)
+ ssel |= info->mclk.clk;
+ else
+ ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
+ nv_mask(dev, 0x004168, 0x003f3141, ctrl);
+ }
+
+ if (info->ramcfg) {
+ if (info->ramcfg[2] & 0x10) {
+ nv_mask(dev, 0x111104, 0x00000600, 0x00000000);
+ } else {
+ nv_mask(dev, 0x111100, 0x40000000, 0x40000000);
+ nv_mask(dev, 0x111104, 0x00000180, 0x00000000);
+ }
+ }
+ if (info->rammap && !(info->rammap[4] & 0x02))
+ nv_mask(dev, 0x100200, 0x00000800, 0x00000000);
+ nv_wr32(dev, 0x611200, 0x00003300);
+ if (!(info->ramcfg[2] & 0x10))
+ nv_wr32(dev, 0x111100, 0x4c020000); /*XXX*/
+
+ nouveau_mem_exec(&exec, info->perflvl);
+
+ nv_wr32(dev, 0x611200, 0x00003330);
+ if (info->rammap && (info->rammap[4] & 0x02))
+ nv_mask(dev, 0x100200, 0x00000800, 0x00000800);
+ if (info->ramcfg) {
+ if (info->ramcfg[2] & 0x10) {
+ nv_mask(dev, 0x111104, 0x00000180, 0x00000180);
+ nv_mask(dev, 0x111100, 0x40000000, 0x00000000);
+ } else {
+ nv_mask(dev, 0x111104, 0x00000600, 0x00000600);
+ }
+ }
+
+ if (info->mclk.pll) {
+ nv_mask(dev, 0x004168, 0x00000001, 0x00000000);
+ nv_mask(dev, 0x004168, 0x00000100, 0x00000000);
+ } else {
+ nv_mask(dev, 0x004000, 0x00000001, 0x00000000);
+ nv_mask(dev, 0x004128, 0x00000001, 0x00000000);
+ nv_mask(dev, 0x004128, 0x00000100, 0x00000000);
+ }
+}
+
int
nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
{
@@ -316,18 +580,8 @@ nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
prog_clk(dev, 0x20, &info->unka0);
prog_clk(dev, 0x21, &info->vdec);
- if (info->mclk.clk || info->mclk.pll) {
- nv_wr32(dev, 0x100210, 0);
- nv_wr32(dev, 0x1002dc, 1);
- nv_wr32(dev, 0x004018, 0x00001000);
- prog_pll(dev, 0x02, 0x004000, &info->mclk);
- if (nv_rd32(dev, 0x4000) & 0x00000008)
- nv_wr32(dev, 0x004018, 0x1000d000);
- else
- nv_wr32(dev, 0x004018, 0x10005000);
- nv_wr32(dev, 0x1002dc, 0);
- nv_wr32(dev, 0x100210, 0x80000000);
- }
+ if (info->mclk.clk || info->mclk.pll)
+ prog_mem(dev, info);
ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index a495e48197ca..797159e7b7a6 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -43,22 +43,22 @@ nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
return ret;
if (rect->rop != ROP_COPY) {
- BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
OUT_RING (chan, 1);
}
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0588, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0588, 1);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR)
OUT_RING (chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
else
OUT_RING (chan, rect->color);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0600, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x0600, 4);
OUT_RING (chan, rect->dx);
OUT_RING (chan, rect->dy);
OUT_RING (chan, rect->dx + rect->width);
OUT_RING (chan, rect->dy + rect->height);
if (rect->rop != ROP_COPY) {
- BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
OUT_RING (chan, 3);
}
FIRE_RING(chan);
@@ -78,14 +78,14 @@ nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
if (ret)
return ret;
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0110, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0110, 1);
OUT_RING (chan, 0);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x08b0, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x08b0, 4);
OUT_RING (chan, region->dx);
OUT_RING (chan, region->dy);
OUT_RING (chan, region->width);
OUT_RING (chan, region->height);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x08d0, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x08d0, 4);
OUT_RING (chan, 0);
OUT_RING (chan, region->sx);
OUT_RING (chan, 0);
@@ -116,7 +116,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
width = ALIGN(image->width, 32);
dwords = (width * image->height) >> 5;
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0814, 2);
+ BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
OUT_RING (chan, palette[image->bg_color] | mask);
@@ -125,10 +125,10 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING (chan, image->bg_color);
OUT_RING (chan, image->fg_color);
}
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0838, 2);
+ BEGIN_NVC0(chan, NvSub2D, 0x0838, 2);
OUT_RING (chan, image->width);
OUT_RING (chan, image->height);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0850, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x0850, 4);
OUT_RING (chan, 0);
OUT_RING (chan, image->dx);
OUT_RING (chan, 0);
@@ -143,7 +143,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
dwords -= push;
- BEGIN_NVC0(chan, 6, NvSub2D, 0x0860, push);
+ BEGIN_NIC0(chan, NvSub2D, 0x0860, push);
OUT_RINGp(chan, data, push);
data += push;
}
@@ -200,47 +200,47 @@ nvc0_fbcon_accel_init(struct fb_info *info)
return ret;
}
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0000, 1);
OUT_RING (chan, 0x0000902d);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2);
+ BEGIN_NVC0(chan, NvSub2D, 0x0104, 2);
OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset));
OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset));
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0290, 1);
OUT_RING (chan, 0);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0888, 1);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
OUT_RING (chan, 3);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x02a0, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x02a0, 1);
OUT_RING (chan, 0x55);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x08c0, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x08c0, 4);
OUT_RING (chan, 0);
OUT_RING (chan, 1);
OUT_RING (chan, 0);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0580, 2);
+ BEGIN_NVC0(chan, NvSub2D, 0x0580, 2);
OUT_RING (chan, 4);
OUT_RING (chan, format);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x02e8, 2);
+ BEGIN_NVC0(chan, NvSub2D, 0x02e8, 2);
OUT_RING (chan, 2);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0804, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0804, 1);
OUT_RING (chan, format);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0800, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0800, 1);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0808, 3);
+ BEGIN_NVC0(chan, NvSub2D, 0x0808, 3);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x081c, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x081c, 1);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0840, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x0840, 4);
OUT_RING (chan, 0);
OUT_RING (chan, 1);
OUT_RING (chan, 0);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0200, 10);
+ BEGIN_NVC0(chan, NvSub2D, 0x0200, 10);
OUT_RING (chan, format);
OUT_RING (chan, 1);
OUT_RING (chan, 0);
@@ -251,7 +251,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->var.yres_virtual);
OUT_RING (chan, upper_32_bits(fb->vma.offset));
OUT_RING (chan, lower_32_bits(fb->vma.offset));
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10);
+ BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
OUT_RING (chan, format);
OUT_RING (chan, 1);
OUT_RING (chan, 0);
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
new file mode 100644
index 000000000000..47ab388a606e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_fifo.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+
+struct nvc0_fence_priv {
+ struct nouveau_fence_priv base;
+ struct nouveau_bo *bo;
+};
+
+struct nvc0_fence_chan {
+ struct nouveau_fence_chan base;
+ struct nouveau_vma vma;
+};
+
+static int
+nvc0_fence_emit(struct nouveau_fence *fence)
+{
+ struct nouveau_channel *chan = fence->channel;
+ struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+ u64 addr = fctx->vma.offset + chan->id * 16;
+ int ret;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret == 0) {
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(addr));
+ OUT_RING (chan, lower_32_bits(addr));
+ OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+ FIRE_RING (chan);
+ }
+
+ return ret;
+}
+
+static int
+nvc0_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+ u64 addr = fctx->vma.offset + prev->id * 16;
+ int ret;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret == 0) {
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(addr));
+ OUT_RING (chan, lower_32_bits(addr));
+ OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL |
+ NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
+ FIRE_RING (chan);
+ }
+
+ return ret;
+}
+
+static u32
+nvc0_fence_read(struct nouveau_channel *chan)
+{
+ struct nvc0_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
+ return nouveau_bo_rd32(priv->bo, chan->id * 16/4);
+}
+
+static void
+nvc0_fence_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine);
+ struct nvc0_fence_chan *fctx = chan->engctx[engine];
+
+ nouveau_bo_vma_del(priv->bo, &fctx->vma);
+ nouveau_fence_context_del(&fctx->base);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nvc0_fence_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine);
+ struct nvc0_fence_chan *fctx;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ nouveau_fence_context_new(&fctx->base);
+
+ ret = nouveau_bo_vma_add(priv->bo, chan->vm, &fctx->vma);
+ if (ret)
+ nvc0_fence_context_del(chan, engine);
+
+ nouveau_bo_wr32(priv->bo, chan->id * 16/4, 0x00000000);
+ return ret;
+}
+
+static int
+nvc0_fence_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static int
+nvc0_fence_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static void
+nvc0_fence_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_fence_priv *priv = nv_engine(dev, engine);
+
+ nouveau_bo_unmap(priv->bo);
+ nouveau_bo_ref(NULL, &priv->bo);
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nvc0_fence_create(struct drm_device *dev)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_fence_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.engine.destroy = nvc0_fence_destroy;
+ priv->base.engine.init = nvc0_fence_init;
+ priv->base.engine.fini = nvc0_fence_fini;
+ priv->base.engine.context_new = nvc0_fence_context_new;
+ priv->base.engine.context_del = nvc0_fence_context_del;
+ priv->base.emit = nvc0_fence_emit;
+ priv->base.sync = nvc0_fence_sync;
+ priv->base.read = nvc0_fence_read;
+ dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
+
+ ret = nouveau_bo_new(dev, 16 * pfifo->channels, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, NULL, &priv->bo);
+ if (ret == 0) {
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ if (ret == 0)
+ ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_ref(NULL, &priv->bo);
+ }
+
+ if (ret)
+ nvc0_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 50d68a7a1379..7d85553d518c 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -26,10 +26,12 @@
#include "nouveau_drv.h"
#include "nouveau_mm.h"
+#include "nouveau_fifo.h"
static void nvc0_fifo_isr(struct drm_device *);
struct nvc0_fifo_priv {
+ struct nouveau_fifo_priv base;
struct nouveau_gpuobj *playlist[2];
int cur_playlist;
struct nouveau_vma user_vma;
@@ -37,8 +39,8 @@ struct nvc0_fifo_priv {
};
struct nvc0_fifo_chan {
+ struct nouveau_fifo_chan base;
struct nouveau_gpuobj *user;
- struct nouveau_gpuobj *ramfc;
};
static void
@@ -46,8 +48,7 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nvc0_fifo_priv *priv = pfifo->priv;
+ struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_gpuobj *cur;
int i, p;
@@ -69,59 +70,20 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
NV_ERROR(dev, "PFIFO - playlist update failed\n");
}
-void
-nvc0_fifo_disable(struct drm_device *dev)
-{
-}
-
-void
-nvc0_fifo_enable(struct drm_device *dev)
-{
-}
-
-bool
-nvc0_fifo_reassign(struct drm_device *dev, bool enable)
-{
- return false;
-}
-
-bool
-nvc0_fifo_cache_pull(struct drm_device *dev, bool enable)
-{
- return false;
-}
-
-int
-nvc0_fifo_channel_id(struct drm_device *dev)
-{
- return 127;
-}
-
-int
-nvc0_fifo_create_context(struct nouveau_channel *chan)
+static int
+nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nvc0_fifo_priv *priv = pfifo->priv;
- struct nvc0_fifo_chan *fifoch;
+ struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
+ struct nvc0_fifo_chan *fctx;
u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
- int ret;
+ int ret, i;
- chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL);
- if (!chan->fifo_priv)
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
return -ENOMEM;
- fifoch = chan->fifo_priv;
-
- /* allocate vram for control regs, map into polling area */
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &fifoch->user);
- if (ret)
- goto error;
-
- nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
- *(struct nouveau_mem **)fifoch->user->node);
chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
priv->user_vma.offset + (chan->id * 0x1000),
@@ -131,176 +93,77 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
goto error;
}
- /* ramfc */
- ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
- chan->ramin->vinst, 0x100,
- NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
+ /* allocate vram for control regs, map into polling area */
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &fctx->user);
if (ret)
goto error;
- nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(fifoch->user->vinst));
- nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(fifoch->user->vinst));
- nv_wo32(fifoch->ramfc, 0x10, 0x0000face);
- nv_wo32(fifoch->ramfc, 0x30, 0xfffff902);
- nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt));
- nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
+ nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
+ *(struct nouveau_mem **)fctx->user->node);
+
+ for (i = 0; i < 0x100; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+ nv_wo32(chan->ramin, 0x08, lower_32_bits(fctx->user->vinst));
+ nv_wo32(chan->ramin, 0x0c, upper_32_bits(fctx->user->vinst));
+ nv_wo32(chan->ramin, 0x10, 0x0000face);
+ nv_wo32(chan->ramin, 0x30, 0xfffff902);
+ nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
+ nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
upper_32_bits(ib_virt));
- nv_wo32(fifoch->ramfc, 0x54, 0x00000002);
- nv_wo32(fifoch->ramfc, 0x84, 0x20400000);
- nv_wo32(fifoch->ramfc, 0x94, 0x30000001);
- nv_wo32(fifoch->ramfc, 0x9c, 0x00000100);
- nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f);
- nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f);
- nv_wo32(fifoch->ramfc, 0xac, 0x0000001f);
- nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000);
- nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */
- nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */
+ nv_wo32(chan->ramin, 0x54, 0x00000002);
+ nv_wo32(chan->ramin, 0x84, 0x20400000);
+ nv_wo32(chan->ramin, 0x94, 0x30000001);
+ nv_wo32(chan->ramin, 0x9c, 0x00000100);
+ nv_wo32(chan->ramin, 0xa4, 0x1f1f1f1f);
+ nv_wo32(chan->ramin, 0xa8, 0x1f1f1f1f);
+ nv_wo32(chan->ramin, 0xac, 0x0000001f);
+ nv_wo32(chan->ramin, 0xb8, 0xf8000000);
+ nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
+ nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
pinstmem->flush(dev);
nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
(chan->ramin->vinst >> 12));
nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
nvc0_fifo_playlist_update(dev);
- return 0;
error:
- pfifo->destroy_context(chan);
+ if (ret)
+ priv->base.base.context_del(chan, engine);
return ret;
}
-void
-nvc0_fifo_destroy_context(struct nouveau_channel *chan)
+static void
+nvc0_fifo_context_del(struct nouveau_channel *chan, int engine)
{
+ struct nvc0_fifo_chan *fctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
- struct nvc0_fifo_chan *fifoch;
nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
nv_wr32(dev, 0x002634, chan->id);
if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
-
nvc0_fifo_playlist_update(dev);
-
nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
+ nouveau_gpuobj_ref(NULL, &fctx->user);
if (chan->user) {
iounmap(chan->user);
chan->user = NULL;
}
- fifoch = chan->fifo_priv;
- chan->fifo_priv = NULL;
- if (!fifoch)
- return;
-
- nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
- nouveau_gpuobj_ref(NULL, &fifoch->user);
- kfree(fifoch);
-}
-
-int
-nvc0_fifo_load_context(struct nouveau_channel *chan)
-{
- return 0;
-}
-
-int
-nvc0_fifo_unload_context(struct drm_device *dev)
-{
- int i;
-
- for (i = 0; i < 128; i++) {
- if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
- continue;
-
- nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
- nv_wr32(dev, 0x002634, i);
- if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
- NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
- i, nv_rd32(dev, 0x002634));
- return -EBUSY;
- }
- }
-
- return 0;
-}
-
-static void
-nvc0_fifo_destroy(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nvc0_fifo_priv *priv;
-
- priv = pfifo->priv;
- if (!priv)
- return;
-
- nouveau_vm_put(&priv->user_vma);
- nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
- nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
- kfree(priv);
-}
-
-void
-nvc0_fifo_takedown(struct drm_device *dev)
-{
- nv_wr32(dev, 0x002140, 0x00000000);
- nvc0_fifo_destroy(dev);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
}
static int
-nvc0_fifo_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nvc0_fifo_priv *priv;
- int ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- pfifo->priv = priv;
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
- &priv->playlist[0]);
- if (ret)
- goto error;
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
- &priv->playlist[1]);
- if (ret)
- goto error;
-
- ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000,
- 12, NV_MEM_ACCESS_RW, &priv->user_vma);
- if (ret)
- goto error;
-
- nouveau_irq_register(dev, 8, nvc0_fifo_isr);
- NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
- return 0;
-
-error:
- nvc0_fifo_destroy(dev);
- return ret;
-}
-
-int
-nvc0_fifo_init(struct drm_device *dev)
+nvc0_fifo_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
struct nouveau_channel *chan;
- struct nvc0_fifo_priv *priv;
- int ret, i;
-
- if (!pfifo->priv) {
- ret = nvc0_fifo_create(dev);
- if (ret)
- return ret;
- }
- priv = pfifo->priv;
+ int i;
/* reset PFIFO, enable all available PSUBFIFO areas */
nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
@@ -338,7 +201,7 @@ nvc0_fifo_init(struct drm_device *dev)
/* restore PFIFO context table */
for (i = 0; i < 128; i++) {
chan = dev_priv->channels.ptr[i];
- if (!chan || !chan->fifo_priv)
+ if (!chan || !chan->engctx[engine])
continue;
nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
@@ -350,6 +213,29 @@ nvc0_fifo_init(struct drm_device *dev)
return 0;
}
+static int
+nvc0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ int i;
+
+ for (i = 0; i < 128; i++) {
+ if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
+ continue;
+
+ nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x002634, i);
+ if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
+ NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
+ i, nv_rd32(dev, 0x002634));
+ return -EBUSY;
+ }
+ }
+
+ nv_wr32(dev, 0x002140, 0x00000000);
+ return 0;
+}
+
+
struct nouveau_enum nvc0_fifo_fault_unit[] = {
{ 0x00, "PGRAPH" },
{ 0x03, "PEEPHOLE" },
@@ -439,13 +325,14 @@ nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
static int
nvc0_fifo_page_flip(struct drm_device *dev, u32 chid)
{
+ struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = NULL;
unsigned long flags;
int ret = -EINVAL;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) {
+ if (likely(chid >= 0 && chid < priv->base.channels)) {
chan = dev_priv->channels.ptr[chid];
if (likely(chan))
ret = nouveau_finish_page_flip(chan, NULL);
@@ -534,3 +421,56 @@ nvc0_fifo_isr(struct drm_device *dev)
nv_wr32(dev, 0x002140, 0);
}
}
+
+static void
+nvc0_fifo_destroy(struct drm_device *dev, int engine)
+{
+ struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nouveau_vm_put(&priv->user_vma);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nvc0_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_fifo_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nvc0_fifo_destroy;
+ priv->base.base.init = nvc0_fifo_init;
+ priv->base.base.fini = nvc0_fifo_fini;
+ priv->base.base.context_new = nvc0_fifo_context_new;
+ priv->base.base.context_del = nvc0_fifo_context_del;
+ priv->base.channels = 128;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[0]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[1]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_get(dev_priv->bar1_vm, priv->base.channels * 0x1000,
+ 12, NV_MEM_ACCESS_RW, &priv->user_vma);
+ if (ret)
+ goto error;
+
+ nouveau_irq_register(dev, 8, nvc0_fifo_isr);
+error:
+ if (ret)
+ priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 9066102d1159..2a01e6e47724 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -29,6 +29,7 @@
#include "nouveau_drv.h"
#include "nouveau_mm.h"
+#include "nouveau_fifo.h"
#include "nvc0_graph.h"
#include "nvc0_grhub.fuc.h"
@@ -620,13 +621,14 @@ nvc0_graph_init(struct drm_device *dev, int engine)
int
nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan;
unsigned long flags;
int i;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ for (i = 0; i < pfifo->channels; i++) {
chan = dev_priv->channels.ptr[i];
if (!chan || !chan->ramin)
continue;
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index ce65f81bb871..7c95c44e2887 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -164,7 +164,9 @@ struct nvc0_pm_clock {
};
struct nvc0_pm_state {
+ struct nouveau_pm_level *perflvl;
struct nvc0_pm_clock eng[16];
+ struct nvc0_pm_clock mem;
};
static u32
@@ -303,6 +305,48 @@ calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
return 0;
}
+static int
+calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
+{
+ struct pll_lims pll;
+ int N, M, P, ret;
+ u32 ctrl;
+
+ /* mclk pll input freq comes from another pll, make sure it's on */
+ ctrl = nv_rd32(dev, 0x132020);
+ if (!(ctrl & 0x00000001)) {
+ /* if not, program it to 567MHz. nfi where this value comes
+ * from - it looks like it's in the pll limits table for
+ * 132000 but the binary driver ignores all my attempts to
+ * change this value.
+ */
+ nv_wr32(dev, 0x137320, 0x00000103);
+ nv_wr32(dev, 0x137330, 0x81200606);
+ nv_wait(dev, 0x132020, 0x00010000, 0x00010000);
+ nv_wr32(dev, 0x132024, 0x0001150f);
+ nv_mask(dev, 0x132020, 0x00000001, 0x00000001);
+ nv_wait(dev, 0x137390, 0x00020000, 0x00020000);
+ nv_mask(dev, 0x132020, 0x00000004, 0x00000004);
+ }
+
+ /* for the moment, until the clock tree is better understood, use
+ * pll mode for all clock frequencies
+ */
+ ret = get_pll_limits(dev, 0x132000, &pll);
+ if (ret == 0) {
+ pll.refclk = read_pll(dev, 0x132020);
+ if (pll.refclk) {
+ ret = nva3_calc_pll(dev, &pll, freq, &N, NULL, &M, &P);
+ if (ret > 0) {
+ info->coef = (P << 16) | (N << 8) | M;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
void *
nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
@@ -335,6 +379,15 @@ nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
return ERR_PTR(ret);
}
+ if (perflvl->memory) {
+ ret = calc_mem(dev, &info->mem, perflvl->memory);
+ if (ret) {
+ kfree(info);
+ return ERR_PTR(ret);
+ }
+ }
+
+ info->perflvl = perflvl;
return info;
}
@@ -375,12 +428,148 @@ prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
}
+static void
+mclk_precharge(struct nouveau_mem_exec_func *exec)
+{
+}
+
+static void
+mclk_refresh(struct nouveau_mem_exec_func *exec)
+{
+}
+
+static void
+mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
+{
+ nv_wr32(exec->dev, 0x10f210, enable ? 0x80000000 : 0x00000000);
+}
+
+static void
+mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
+{
+}
+
+static void
+mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
+{
+ udelay((nsec + 500) / 1000);
+}
+
+static u32
+mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
+{
+ struct drm_device *dev = exec->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) {
+ if (mr <= 1)
+ return nv_rd32(dev, 0x10f300 + ((mr - 0) * 4));
+ return nv_rd32(dev, 0x10f320 + ((mr - 2) * 4));
+ } else {
+ if (mr == 0)
+ return nv_rd32(dev, 0x10f300 + (mr * 4));
+ else
+ if (mr <= 7)
+ return nv_rd32(dev, 0x10f32c + (mr * 4));
+ return nv_rd32(dev, 0x10f34c);
+ }
+}
+
+static void
+mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
+{
+ struct drm_device *dev = exec->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) {
+ if (mr <= 1) {
+ nv_wr32(dev, 0x10f300 + ((mr - 0) * 4), data);
+ if (dev_priv->vram_rank_B)
+ nv_wr32(dev, 0x10f308 + ((mr - 0) * 4), data);
+ } else
+ if (mr <= 3) {
+ nv_wr32(dev, 0x10f320 + ((mr - 2) * 4), data);
+ if (dev_priv->vram_rank_B)
+ nv_wr32(dev, 0x10f328 + ((mr - 2) * 4), data);
+ }
+ } else {
+ if (mr == 0) nv_wr32(dev, 0x10f300 + (mr * 4), data);
+ else if (mr <= 7) nv_wr32(dev, 0x10f32c + (mr * 4), data);
+ else if (mr == 15) nv_wr32(dev, 0x10f34c, data);
+ }
+}
+
+static void
+mclk_clock_set(struct nouveau_mem_exec_func *exec)
+{
+ struct nvc0_pm_state *info = exec->priv;
+ struct drm_device *dev = exec->dev;
+ u32 ctrl = nv_rd32(dev, 0x132000);
+
+ nv_wr32(dev, 0x137360, 0x00000001);
+ nv_wr32(dev, 0x137370, 0x00000000);
+ nv_wr32(dev, 0x137380, 0x00000000);
+ if (ctrl & 0x00000001)
+ nv_wr32(dev, 0x132000, (ctrl &= ~0x00000001));
+
+ nv_wr32(dev, 0x132004, info->mem.coef);
+ nv_wr32(dev, 0x132000, (ctrl |= 0x00000001));
+ nv_wait(dev, 0x137390, 0x00000002, 0x00000002);
+ nv_wr32(dev, 0x132018, 0x00005000);
+
+ nv_wr32(dev, 0x137370, 0x00000001);
+ nv_wr32(dev, 0x137380, 0x00000001);
+ nv_wr32(dev, 0x137360, 0x00000000);
+}
+
+static void
+mclk_timing_set(struct nouveau_mem_exec_func *exec)
+{
+ struct nvc0_pm_state *info = exec->priv;
+ struct nouveau_pm_level *perflvl = info->perflvl;
+ int i;
+
+ for (i = 0; i < 5; i++)
+ nv_wr32(exec->dev, 0x10f290 + (i * 4), perflvl->timing.reg[i]);
+}
+
+static void
+prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_mem_exec_func exec = {
+ .dev = dev,
+ .precharge = mclk_precharge,
+ .refresh = mclk_refresh,
+ .refresh_auto = mclk_refresh_auto,
+ .refresh_self = mclk_refresh_self,
+ .wait = mclk_wait,
+ .mrg = mclk_mrg,
+ .mrs = mclk_mrs,
+ .clock_set = mclk_clock_set,
+ .timing_set = mclk_timing_set,
+ .priv = info
+ };
+
+ if (dev_priv->chipset < 0xd0)
+ nv_wr32(dev, 0x611200, 0x00003300);
+ else
+ nv_wr32(dev, 0x62c000, 0x03030000);
+
+ nouveau_mem_exec(&exec, info->perflvl);
+
+ if (dev_priv->chipset < 0xd0)
+ nv_wr32(dev, 0x611200, 0x00003300);
+ else
+ nv_wr32(dev, 0x62c000, 0x03030300);
+}
int
nvc0_pm_clocks_set(struct drm_device *dev, void *data)
{
struct nvc0_pm_state *info = data;
int i;
+ if (info->mem.coef)
+ prog_mem(dev, info);
+
for (i = 0; i < 16; i++) {
if (!info->eng[i].freq)
continue;
diff --git a/drivers/gpu/drm/nouveau/nvc0_software.c b/drivers/gpu/drm/nouveau/nvc0_software.c
new file mode 100644
index 000000000000..93e8c164fec6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_software.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
+#include "nouveau_software.h"
+
+#include "nv50_display.h"
+
+struct nvc0_software_priv {
+ struct nouveau_software_priv base;
+};
+
+struct nvc0_software_chan {
+ struct nouveau_software_chan base;
+ struct nouveau_vma dispc_vma[4];
+};
+
+u64
+nvc0_software_crtc(struct nouveau_channel *chan, int crtc)
+{
+ struct nvc0_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
+ return pch->dispc_vma[crtc].offset;
+}
+
+static int
+nvc0_software_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
+ struct nvc0_software_chan *pch;
+ int ret = 0, i;
+
+ pch = kzalloc(sizeof(*pch), GFP_KERNEL);
+ if (!pch)
+ return -ENOMEM;
+
+ nouveau_software_context_new(&pch->base);
+ chan->engctx[engine] = pch;
+
+ /* map display semaphore buffers into channel's vm */
+ for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo;
+ if (dev_priv->card_type >= NV_D0)
+ bo = nvd0_display_crtc_sema(dev, i);
+ else
+ bo = nv50_display(dev)->crtc[i].sem.bo;
+
+ ret = nouveau_bo_vma_add(bo, chan->vm, &pch->dispc_vma[i]);
+ }
+
+ if (ret)
+ psw->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nvc0_software_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_software_chan *pch = chan->engctx[engine];
+ int i;
+
+ if (dev_priv->card_type >= NV_D0) {
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
+ nouveau_bo_vma_del(bo, &pch->dispc_vma[i]);
+ }
+ } else
+ if (dev_priv->card_type >= NV_50) {
+ struct nv50_display *disp = nv50_display(dev);
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nv50_display_crtc *dispc = &disp->crtc[i];
+ nouveau_bo_vma_del(dispc->sem.bo, &pch->dispc_vma[i]);
+ }
+ }
+
+ chan->engctx[engine] = NULL;
+ kfree(pch);
+}
+
+static int
+nvc0_software_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ return 0;
+}
+
+static int
+nvc0_software_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static int
+nvc0_software_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static void
+nvc0_software_destroy(struct drm_device *dev, int engine)
+{
+ struct nvc0_software_priv *psw = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, SW);
+ kfree(psw);
+}
+
+int
+nvc0_software_create(struct drm_device *dev)
+{
+ struct nvc0_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
+ if (!psw)
+ return -ENOMEM;
+
+ psw->base.base.destroy = nvc0_software_destroy;
+ psw->base.base.init = nvc0_software_init;
+ psw->base.base.fini = nvc0_software_fini;
+ psw->base.base.context_new = nvc0_software_context_new;
+ psw->base.base.context_del = nvc0_software_context_del;
+ psw->base.base.object_new = nvc0_software_object_new;
+ nouveau_software_create(&psw->base);
+
+ NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
+ NVOBJ_CLASS(dev, 0x906e, SW);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 0247250939e8..c486d3ce3c2c 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -33,6 +33,7 @@
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
#include "nouveau_fb.h"
+#include "nouveau_software.h"
#include "nv50_display.h"
#define EVO_DMA_NR 9
@@ -284,8 +285,6 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
u32 *push;
int ret;
- evo_sync(crtc->dev, EVO_MASTER);
-
swap_interval <<= 4;
if (swap_interval == 0)
swap_interval |= 0x100;
@@ -300,15 +299,16 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (ret)
return ret;
- offset = chan->dispc_vma[nv_crtc->index].offset;
+
+ offset = nvc0_software_crtc(chan, nv_crtc->index);
offset += evo->sem.offset;
- BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 0xf00d0000 | evo->sem.value);
OUT_RING (chan, 0x1002);
- BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
@@ -882,7 +882,7 @@ nvd0_crtc_create(struct drm_device *dev, int index)
drm_mode_crtc_set_gamma_size(crtc, 256);
ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -895,7 +895,7 @@ nvd0_crtc_create(struct drm_device *dev, int index)
goto out;
ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->lut.nvbo);
+ 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -2030,7 +2030,7 @@ nvd0_display_create(struct drm_device *dev)
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &disp->sync);
+ 0, 0x0000, NULL, &disp->sync);
if (!ret) {
ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c
new file mode 100644
index 000000000000..1855ecbd843b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_fifo.c
@@ -0,0 +1,423 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nouveau_fifo.h"
+
+#define NVE0_FIFO_ENGINE_NUM 32
+
+static void nve0_fifo_isr(struct drm_device *);
+
+struct nve0_fifo_engine {
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+};
+
+struct nve0_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
+ struct {
+ struct nouveau_gpuobj *mem;
+ struct nouveau_vma bar;
+ } user;
+ int spoon_nr;
+};
+
+struct nve0_fifo_chan {
+ struct nouveau_fifo_chan base;
+ u32 engine;
+};
+
+static void
+nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct nve0_fifo_engine *peng = &priv->engine[engine];
+ struct nouveau_gpuobj *cur;
+ u32 match = (engine << 16) | 0x00000001;
+ int ret, i, p;
+
+ cur = peng->playlist[peng->cur_playlist];
+ if (unlikely(cur == NULL)) {
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
+ if (ret) {
+ NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
+ return;
+ }
+
+ peng->playlist[peng->cur_playlist] = cur;
+ }
+
+ peng->cur_playlist = !peng->cur_playlist;
+
+ for (i = 0, p = 0; i < priv->base.channels; i++) {
+ u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
+ if (ctrl != match)
+ continue;
+ nv_wo32(cur, p + 0, i);
+ nv_wo32(cur, p + 4, 0x00000000);
+ p += 8;
+ }
+ pinstmem->flush(dev);
+
+ nv_wr32(dev, 0x002270, cur->vinst >> 12);
+ nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
+ if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
+ NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
+}
+
+static int
+nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ struct nve0_fifo_chan *fctx;
+ u64 usermem = priv->user.mem->vinst + chan->id * 512;
+ u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
+ int ret = 0, i;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ fctx->engine = 0; /* PGRAPH */
+
+ /* allocate vram for control regs, map into polling area */
+ chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
+ priv->user.bar.offset + (chan->id * 512), 512);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ for (i = 0; i < 0x100; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+ nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
+ nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
+ nv_wo32(chan->ramin, 0x10, 0x0000face);
+ nv_wo32(chan->ramin, 0x30, 0xfffff902);
+ nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
+ nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
+ upper_32_bits(ib_virt));
+ nv_wo32(chan->ramin, 0x84, 0x20400000);
+ nv_wo32(chan->ramin, 0x94, 0x30000001);
+ nv_wo32(chan->ramin, 0x9c, 0x00000100);
+ nv_wo32(chan->ramin, 0xac, 0x0000001f);
+ nv_wo32(chan->ramin, 0xe4, 0x00000000);
+ nv_wo32(chan->ramin, 0xe8, chan->id);
+ nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
+ nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
+ pinstmem->flush(dev);
+
+ nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
+ (chan->ramin->vinst >> 12));
+ nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
+ nve0_fifo_playlist_update(dev, fctx->engine);
+ nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nve0_fifo_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nve0_fifo_chan *fctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+
+ nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
+ nv_wr32(dev, 0x002634, chan->id);
+ if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
+ NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
+ nve0_fifo_playlist_update(dev, fctx->engine);
+ nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
+
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
+
+ chan->engctx[NVOBJ_ENGINE_FIFO] = NULL;
+ kfree(fctx);
+}
+
+static int
+nve0_fifo_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ struct nve0_fifo_chan *fctx;
+ int i;
+
+ /* reset PFIFO, enable all available PSUBFIFO areas */
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x000204, 0xffffffff);
+
+ priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
+ NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
+
+ /* PSUBFIFO[n] */
+ for (i = 0; i < priv->spoon_nr; i++) {
+ nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
+ }
+
+ nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+
+ nv_wr32(dev, 0x002a00, 0xffffffff);
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xbfffffff);
+
+ /* restore PFIFO context table */
+ for (i = 0; i < priv->base.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (!chan || !(fctx = chan->engctx[engine]))
+ continue;
+
+ nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
+ (chan->ramin->vinst >> 12));
+ nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
+ nve0_fifo_playlist_update(dev, fctx->engine);
+ nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
+ }
+
+ return 0;
+}
+
+static int
+nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ for (i = 0; i < priv->base.channels; i++) {
+ if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
+ continue;
+
+ nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
+ nv_wr32(dev, 0x002634, i);
+ if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
+ NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
+ i, nv_rd32(dev, 0x002634));
+ return -EBUSY;
+ }
+ }
+
+ nv_wr32(dev, 0x002140, 0x00000000);
+ return 0;
+}
+
+struct nouveau_enum nve0_fifo_fault_unit[] = {
+ {}
+};
+
+struct nouveau_enum nve0_fifo_fault_reason[] = {
+ { 0x00, "PT_NOT_PRESENT" },
+ { 0x01, "PT_TOO_SHORT" },
+ { 0x02, "PAGE_NOT_PRESENT" },
+ { 0x03, "VM_LIMIT_EXCEEDED" },
+ { 0x04, "NO_CHANNEL" },
+ { 0x05, "PAGE_SYSTEM_ONLY" },
+ { 0x06, "PAGE_READ_ONLY" },
+ { 0x0a, "COMPRESSED_SYSRAM" },
+ { 0x0c, "INVALID_STORAGE_TYPE" },
+ {}
+};
+
+struct nouveau_enum nve0_fifo_fault_hubclient[] = {
+ {}
+};
+
+struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
+ {}
+};
+
+struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
+ { 0x00200000, "ILLEGAL_MTHD" },
+ { 0x00800000, "EMPTY_SUBC" },
+ {}
+};
+
+static void
+nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
+{
+ u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
+ u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
+ u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
+ u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
+ u32 client = (stat & 0x00001f00) >> 8;
+
+ NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
+ (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
+ nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
+ printk("] from ");
+ nouveau_enum_print(nve0_fifo_fault_unit, unit);
+ if (stat & 0x00000040) {
+ printk("/");
+ nouveau_enum_print(nve0_fifo_fault_hubclient, client);
+ } else {
+ printk("/GPC%d/", (stat & 0x1f000000) >> 24);
+ nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
+ }
+ printk(" on channel 0x%010llx\n", (u64)inst << 12);
+}
+
+static void
+nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
+{
+ u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
+ u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
+ u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
+ u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
+ u32 subc = (addr & 0x00070000);
+ u32 mthd = (addr & 0x00003ffc);
+
+ NV_INFO(dev, "PSUBFIFO %d:", unit);
+ nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat);
+ NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
+ unit, chid, subc, mthd, data);
+
+ nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
+ nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+nve0_fifo_isr(struct drm_device *dev)
+{
+ u32 stat = nv_rd32(dev, 0x002100);
+
+ if (stat & 0x00000100) {
+ NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
+ nv_wr32(dev, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
+ if (stat & 0x10000000) {
+ u32 units = nv_rd32(dev, 0x00259c);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nve0_fifo_isr_vm_fault(dev, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(dev, 0x00259c, units);
+ stat &= ~0x10000000;
+ }
+
+ if (stat & 0x20000000) {
+ u32 units = nv_rd32(dev, 0x0025a0);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nve0_fifo_isr_subfifo_intr(dev, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(dev, 0x0025a0, units);
+ stat &= ~0x20000000;
+ }
+
+ if (stat & 0x40000000) {
+ NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
+ nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
+ stat &= ~0x40000000;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
+ nv_wr32(dev, 0x002100, stat);
+ nv_wr32(dev, 0x002140, 0);
+ }
+}
+
+static void
+nve0_fifo_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ nouveau_vm_put(&priv->user.bar);
+ nouveau_gpuobj_ref(NULL, &priv->user.mem);
+
+ for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
+ nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
+ nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
+ }
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nve0_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nve0_fifo_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nve0_fifo_destroy;
+ priv->base.base.init = nve0_fifo_init;
+ priv->base.base.fini = nve0_fifo_fini;
+ priv->base.base.context_new = nve0_fifo_context_new;
+ priv->base.base.context_del = nve0_fifo_context_del;
+ priv->base.channels = 4096;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size,
+ 12, NV_MEM_ACCESS_RW, &priv->user.bar);
+ if (ret)
+ goto error;
+
+ nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node);
+
+ nouveau_irq_register(dev, 8, nve0_fifo_isr);
+error:
+ if (ret)
+ priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.c b/drivers/gpu/drm/nouveau/nve0_graph.c
new file mode 100644
index 000000000000..8a8051b68f10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_graph.c
@@ -0,0 +1,831 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nouveau_fifo.h"
+
+#include "nve0_graph.h"
+
+static void
+nve0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
+{
+ NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
+ nv_rd32(dev, base + 0x400));
+ NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+ nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
+ nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
+ NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+ nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
+ nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
+}
+
+static void
+nve0_graph_ctxctl_debug(struct drm_device *dev)
+{
+ u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
+ u32 gpc;
+
+ nve0_graph_ctxctl_debug_unit(dev, 0x409000);
+ for (gpc = 0; gpc < gpcnr; gpc++)
+ nve0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
+}
+
+static int
+nve0_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+
+ nv_wr32(dev, 0x409840, 0x00000030);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
+ nv_wr32(dev, 0x409504, 0x00000003);
+ if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
+ NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
+
+ return 0;
+}
+
+static int
+nve0_graph_unload_context_to(struct drm_device *dev, u64 chan)
+{
+ nv_wr32(dev, 0x409840, 0x00000003);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
+ nv_wr32(dev, 0x409504, 0x00000009);
+ if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nve0_graph_construct_context(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
+ struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ int ret, i;
+ u32 *ctx;
+
+ ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ nve0_graph_load_context(chan);
+
+ nv_wo32(grch->grctx, 0x1c, 1);
+ nv_wo32(grch->grctx, 0x20, 0);
+ nv_wo32(grch->grctx, 0x28, 0);
+ nv_wo32(grch->grctx, 0x2c, 0);
+ dev_priv->engine.instmem.flush(dev);
+
+ ret = nve0_grctx_generate(chan);
+ if (ret)
+ goto err;
+
+ ret = nve0_graph_unload_context_to(dev, chan->ramin->vinst);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < priv->grctx_size; i += 4)
+ ctx[i / 4] = nv_ro32(grch->grctx, i);
+
+ priv->grctx_vals = ctx;
+ return 0;
+
+err:
+ kfree(ctx);
+ return ret;
+}
+
+static int
+nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
+{
+ struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
+ struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ u32 magic[GPC_MAX][2];
+ u16 offset = 0x0000;
+ int gpc;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x3000, 256, NVOBJ_FLAG_VM,
+ &grch->unk408004);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
+ &grch->unk40800c);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
+ NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
+ &grch->unk418810);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
+ &grch->mmio);
+ if (ret)
+ return ret;
+
+#define mmio(r,v) do { \
+ nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 0, (r)); \
+ nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 4, (v)); \
+ grch->mmio_nr++; \
+} while (0)
+ mmio(0x40800c, grch->unk40800c->linst >> 8);
+ mmio(0x408010, 0x80000000);
+ mmio(0x419004, grch->unk40800c->linst >> 8);
+ mmio(0x419008, 0x00000000);
+ mmio(0x4064cc, 0x80000000);
+ mmio(0x408004, grch->unk408004->linst >> 8);
+ mmio(0x408008, 0x80000030);
+ mmio(0x418808, grch->unk408004->linst >> 8);
+ mmio(0x41880c, 0x80000030);
+ mmio(0x4064c8, 0x01800600);
+ mmio(0x418810, 0x80000000 | grch->unk418810->linst >> 12);
+ mmio(0x419848, 0x10000000 | grch->unk418810->linst >> 12);
+ mmio(0x405830, 0x02180648);
+ mmio(0x4064c4, 0x0192ffff);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
+ u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
+ magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
+ magic[gpc][1] = 0x00000000 | (magic1 << 16);
+ offset += 0x0324 * priv->tpc_nr[gpc];
+ }
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ mmio(GPC_UNIT(gpc, 0x30c0), magic[gpc][0]);
+ mmio(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset);
+ offset += 0x07ff * priv->tpc_nr[gpc];
+ }
+
+ mmio(0x17e91c, 0x06060609);
+ mmio(0x17e920, 0x00090a05);
+#undef mmio
+ return 0;
+}
+
+static int
+nve0_graph_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nve0_graph_priv *priv = nv_engine(dev, engine);
+ struct nve0_graph_chan *grch;
+ struct nouveau_gpuobj *grctx;
+ int ret, i;
+
+ grch = kzalloc(sizeof(*grch), GFP_KERNEL);
+ if (!grch)
+ return -ENOMEM;
+ chan->engctx[NVOBJ_ENGINE_GR] = grch;
+
+ ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
+ NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
+ &grch->grctx);
+ if (ret)
+ goto error;
+ grctx = grch->grctx;
+
+ ret = nve0_graph_create_context_mmio_list(chan);
+ if (ret)
+ goto error;
+
+ nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
+ nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
+ pinstmem->flush(dev);
+
+ if (!priv->grctx_vals) {
+ ret = nve0_graph_construct_context(chan);
+ if (ret)
+ goto error;
+ }
+
+ for (i = 0; i < priv->grctx_size; i += 4)
+ nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
+ nv_wo32(grctx, 0xf4, 0);
+ nv_wo32(grctx, 0xf8, 0);
+ nv_wo32(grctx, 0x10, grch->mmio_nr);
+ nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
+ nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
+ nv_wo32(grctx, 0x1c, 1);
+ nv_wo32(grctx, 0x20, 0);
+ nv_wo32(grctx, 0x28, 0);
+ nv_wo32(grctx, 0x2c, 0);
+
+ pinstmem->flush(dev);
+ return 0;
+
+error:
+ priv->base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nve0_graph_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nve0_graph_chan *grch = chan->engctx[engine];
+
+ nouveau_gpuobj_ref(NULL, &grch->mmio);
+ nouveau_gpuobj_ref(NULL, &grch->unk418810);
+ nouveau_gpuobj_ref(NULL, &grch->unk40800c);
+ nouveau_gpuobj_ref(NULL, &grch->unk408004);
+ nouveau_gpuobj_ref(NULL, &grch->grctx);
+ chan->engctx[engine] = NULL;
+}
+
+static int
+nve0_graph_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ return 0;
+}
+
+static int
+nve0_graph_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static void
+nve0_graph_init_obj418880(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ int i;
+
+ nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
+ nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
+ nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
+ nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
+}
+
+static void
+nve0_graph_init_regs(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x400080, 0x003083c2);
+ nv_wr32(dev, 0x400088, 0x0001ffe7);
+ nv_wr32(dev, 0x40008c, 0x00000000);
+ nv_wr32(dev, 0x400090, 0x00000030);
+ nv_wr32(dev, 0x40013c, 0x003901f7);
+ nv_wr32(dev, 0x400140, 0x00000100);
+ nv_wr32(dev, 0x400144, 0x00000000);
+ nv_wr32(dev, 0x400148, 0x00000110);
+ nv_wr32(dev, 0x400138, 0x00000000);
+ nv_wr32(dev, 0x400130, 0x00000000);
+ nv_wr32(dev, 0x400134, 0x00000000);
+ nv_wr32(dev, 0x400124, 0x00000002);
+}
+
+static void
+nve0_graph_init_units(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x409ffc, 0x00000000);
+ nv_wr32(dev, 0x409c14, 0x00003e3e);
+ nv_wr32(dev, 0x409c24, 0x000f0000);
+
+ nv_wr32(dev, 0x404000, 0xc0000000);
+ nv_wr32(dev, 0x404600, 0xc0000000);
+ nv_wr32(dev, 0x408030, 0xc0000000);
+ nv_wr32(dev, 0x404490, 0xc0000000);
+ nv_wr32(dev, 0x406018, 0xc0000000);
+ nv_wr32(dev, 0x407020, 0xc0000000);
+ nv_wr32(dev, 0x405840, 0xc0000000);
+ nv_wr32(dev, 0x405844, 0x00ffffff);
+
+ nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
+ nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
+
+}
+
+static void
+nve0_graph_init_gpc_0(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+ u32 data[TPC_MAX / 8];
+ u8 tpcnr[GPC_MAX];
+ int i, gpc, tpc;
+
+ nv_wr32(dev, GPC_UNIT(0, 0x3018), 0x00000001);
+
+ memset(data, 0x00, sizeof(data));
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ data[i / 8] |= tpc << ((i % 8) * 4);
+ }
+
+ nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
+ nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
+ nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
+ nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
+ priv->tpc_nr[gpc]);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ }
+
+ nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
+ nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
+}
+
+static void
+nve0_graph_init_gpc_1(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ int gpc, tpc;
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(dev, GPC_UNIT(gpc, 0x3038), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+ }
+ nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ }
+}
+
+static void
+nve0_graph_init_rop(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ int rop;
+
+ for (rop = 0; rop < priv->rop_nr; rop++) {
+ nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
+ }
+}
+
+static void
+nve0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
+ struct nve0_graph_fuc *code, struct nve0_graph_fuc *data)
+{
+ int i;
+
+ nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
+ for (i = 0; i < data->size / 4; i++)
+ nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
+
+ nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
+ for (i = 0; i < code->size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(dev, fuc_base + 0x0188, i >> 6);
+ nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
+ }
+}
+
+static int
+nve0_graph_init_ctxctl(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ u32 r000260;
+
+ /* load fuc microcode */
+ r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+ nve0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
+ nve0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
+ nv_wr32(dev, 0x000260, r000260);
+
+ /* start both of them running */
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x41a10c, 0x00000000);
+ nv_wr32(dev, 0x40910c, 0x00000000);
+ nv_wr32(dev, 0x41a100, 0x00000002);
+ nv_wr32(dev, 0x409100, 0x00000002);
+ if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
+ NV_INFO(dev, "0x409800 wait failed\n");
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x7fffffff);
+ nv_wr32(dev, 0x409504, 0x00000021);
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000010);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
+ return -EBUSY;
+ }
+ priv->grctx_size = nv_rd32(dev, 0x409800);
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000016);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000025);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409800, 0x00000000);
+ nv_wr32(dev, 0x409500, 0x00000001);
+ nv_wr32(dev, 0x409504, 0x00000030);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x30 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409810, 0xb00095c8);
+ nv_wr32(dev, 0x409800, 0x00000000);
+ nv_wr32(dev, 0x409500, 0x00000001);
+ nv_wr32(dev, 0x409504, 0x00000031);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x31 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409810, 0x00080420);
+ nv_wr32(dev, 0x409800, 0x00000000);
+ nv_wr32(dev, 0x409500, 0x00000001);
+ nv_wr32(dev, 0x409504, 0x00000032);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x32 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409614, 0x00000070);
+ nv_wr32(dev, 0x409614, 0x00000770);
+ nv_wr32(dev, 0x40802c, 0x00000001);
+ return 0;
+}
+
+static int
+nve0_graph_init(struct drm_device *dev, int engine)
+{
+ int ret;
+
+ nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
+
+ nve0_graph_init_obj418880(dev);
+ nve0_graph_init_regs(dev);
+ nve0_graph_init_gpc_0(dev);
+
+ nv_wr32(dev, 0x400500, 0x00010001);
+ nv_wr32(dev, 0x400100, 0xffffffff);
+ nv_wr32(dev, 0x40013c, 0xffffffff);
+
+ nve0_graph_init_units(dev);
+ nve0_graph_init_gpc_1(dev);
+ nve0_graph_init_rop(dev);
+
+ nv_wr32(dev, 0x400108, 0xffffffff);
+ nv_wr32(dev, 0x400138, 0xffffffff);
+ nv_wr32(dev, 0x400118, 0xffffffff);
+ nv_wr32(dev, 0x400130, 0xffffffff);
+ nv_wr32(dev, 0x40011c, 0xffffffff);
+ nv_wr32(dev, 0x400134, 0xffffffff);
+ nv_wr32(dev, 0x400054, 0x34ce3464);
+
+ ret = nve0_graph_init_ctxctl(dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+nve0_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < pfifo->channels; i++) {
+ chan = dev_priv->channels.ptr[i];
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (inst == chan->ramin->vinst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
+}
+
+static void
+nve0_graph_ctxctl_isr(struct drm_device *dev)
+{
+ u32 ustat = nv_rd32(dev, 0x409c18);
+
+ if (ustat & 0x00000001)
+ NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
+ if (ustat & 0x00080000)
+ NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
+ if (ustat & ~0x00080001)
+ NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
+
+ nve0_graph_ctxctl_debug(dev);
+ nv_wr32(dev, 0x409c20, ustat);
+}
+
+static void
+nve0_graph_trap_isr(struct drm_device *dev, int chid)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ u32 trap = nv_rd32(dev, 0x400108);
+ int rop;
+
+ if (trap & 0x00000001) {
+ u32 stat = nv_rd32(dev, 0x404000);
+ NV_INFO(dev, "PGRAPH: DISPATCH ch %d 0x%08x\n", chid, stat);
+ nv_wr32(dev, 0x404000, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x00000001);
+ trap &= ~0x00000001;
+ }
+
+ if (trap & 0x00000010) {
+ u32 stat = nv_rd32(dev, 0x405840);
+ NV_INFO(dev, "PGRAPH: SHADER ch %d 0x%08x\n", chid, stat);
+ nv_wr32(dev, 0x405840, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x00000010);
+ trap &= ~0x00000010;
+ }
+
+ if (trap & 0x02000000) {
+ for (rop = 0; rop < priv->rop_nr; rop++) {
+ u32 statz = nv_rd32(dev, ROP_UNIT(rop, 0x070));
+ u32 statc = nv_rd32(dev, ROP_UNIT(rop, 0x144));
+ NV_INFO(dev, "PGRAPH: ROP%d ch %d 0x%08x 0x%08x\n",
+ rop, chid, statz, statc);
+ nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
+ }
+ nv_wr32(dev, 0x400108, 0x02000000);
+ trap &= ~0x02000000;
+ }
+
+ if (trap) {
+ NV_INFO(dev, "PGRAPH: TRAP ch %d 0x%08x\n", chid, trap);
+ nv_wr32(dev, 0x400108, trap);
+ }
+}
+
+static void
+nve0_graph_isr(struct drm_device *dev)
+{
+ u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
+ u32 chid = nve0_graph_isr_chid(dev, inst);
+ u32 stat = nv_rd32(dev, 0x400100);
+ u32 addr = nv_rd32(dev, 0x400704);
+ u32 mthd = (addr & 0x00003ffc);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 data = nv_rd32(dev, 0x400708);
+ u32 code = nv_rd32(dev, 0x400110);
+ u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
+
+ if (stat & 0x00000010) {
+ if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ }
+ nv_wr32(dev, 0x400100, 0x00000010);
+ stat &= ~0x00000010;
+ }
+
+ if (stat & 0x00000020) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00000020);
+ stat &= ~0x00000020;
+ }
+
+ if (stat & 0x00100000) {
+ NV_INFO(dev, "PGRAPH: DATA_ERROR [");
+ nouveau_enum_print(nv50_data_error_names, code);
+ printk("] ch %d [0x%010llx] subc %d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00100000);
+ stat &= ~0x00100000;
+ }
+
+ if (stat & 0x00200000) {
+ nve0_graph_trap_isr(dev, chid);
+ nv_wr32(dev, 0x400100, 0x00200000);
+ stat &= ~0x00200000;
+ }
+
+ if (stat & 0x00080000) {
+ nve0_graph_ctxctl_isr(dev);
+ nv_wr32(dev, 0x400100, 0x00080000);
+ stat &= ~0x00080000;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
+ nv_wr32(dev, 0x400100, stat);
+ }
+
+ nv_wr32(dev, 0x400500, 0x00010001);
+}
+
+static int
+nve0_graph_create_fw(struct drm_device *dev, const char *fwname,
+ struct nve0_graph_fuc *fuc)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const struct firmware *fw;
+ char f[32];
+ int ret;
+
+ snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
+ ret = request_firmware(&fw, f, &dev->pdev->dev);
+ if (ret)
+ return ret;
+
+ fuc->size = fw->size;
+ fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
+ release_firmware(fw);
+ return (fuc->data != NULL) ? 0 : -ENOMEM;
+}
+
+static void
+nve0_graph_destroy_fw(struct nve0_graph_fuc *fuc)
+{
+ if (fuc->data) {
+ kfree(fuc->data);
+ fuc->data = NULL;
+ }
+}
+
+static void
+nve0_graph_destroy(struct drm_device *dev, int engine)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, engine);
+
+ nve0_graph_destroy_fw(&priv->fuc409c);
+ nve0_graph_destroy_fw(&priv->fuc409d);
+ nve0_graph_destroy_fw(&priv->fuc41ac);
+ nve0_graph_destroy_fw(&priv->fuc41ad);
+
+ nouveau_irq_unregister(dev, 12);
+
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
+
+ if (priv->grctx_vals)
+ kfree(priv->grctx_vals);
+
+ NVOBJ_ENGINE_DEL(dev, GR);
+ kfree(priv);
+}
+
+int
+nve0_graph_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nve0_graph_priv *priv;
+ int ret, gpc, i;
+ u32 kepler;
+
+ kepler = nve0_graph_class(dev);
+ if (!kepler) {
+ NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
+ return 0;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.destroy = nve0_graph_destroy;
+ priv->base.init = nve0_graph_init;
+ priv->base.fini = nve0_graph_fini;
+ priv->base.context_new = nve0_graph_context_new;
+ priv->base.context_del = nve0_graph_context_del;
+ priv->base.object_new = nve0_graph_object_new;
+
+ NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
+ nouveau_irq_register(dev, 12, nve0_graph_isr);
+
+ NV_INFO(dev, "PGRAPH: using external firmware\n");
+ if (nve0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
+ nve0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
+ nve0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
+ nve0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
+ ret = 0;
+ goto error;
+ }
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+ if (ret)
+ goto error;
+
+ for (i = 0; i < 0x1000; i += 4) {
+ nv_wo32(priv->unk4188b4, i, 0x00000010);
+ nv_wo32(priv->unk4188b8, i, 0x00000010);
+ }
+
+ priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
+ priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ priv->tpc_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
+ priv->tpc_total += priv->tpc_nr[gpc];
+ }
+
+ switch (dev_priv->chipset) {
+ case 0xe4:
+ if (priv->tpc_total == 8)
+ priv->magic_not_rop_nr = 3;
+ else
+ if (priv->tpc_total == 7)
+ priv->magic_not_rop_nr = 1;
+ break;
+ case 0xe7:
+ priv->magic_not_rop_nr = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (!priv->magic_not_rop_nr) {
+ NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
+ priv->tpc_nr[0], priv->tpc_nr[1], priv->tpc_nr[2],
+ priv->tpc_nr[3], priv->rop_nr);
+ priv->magic_not_rop_nr = 0x00;
+ }
+
+ NVOBJ_CLASS(dev, 0xa097, GR); /* subc 0: 3D */
+ NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */
+ NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */
+ NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */
+ NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */
+ return 0;
+
+error:
+ nve0_graph_destroy(dev, NVOBJ_ENGINE_GR);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.h b/drivers/gpu/drm/nouveau/nve0_graph.h
new file mode 100644
index 000000000000..2ba70449ba01
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_graph.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NVE0_GRAPH_H__
+#define __NVE0_GRAPH_H__
+
+#define GPC_MAX 4
+#define TPC_MAX 32
+
+#define ROP_BCAST(r) (0x408800 + (r))
+#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
+#define GPC_BCAST(r) (0x418000 + (r))
+#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
+#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
+
+struct nve0_graph_fuc {
+ u32 *data;
+ u32 size;
+};
+
+struct nve0_graph_priv {
+ struct nouveau_exec_engine base;
+
+ struct nve0_graph_fuc fuc409c;
+ struct nve0_graph_fuc fuc409d;
+ struct nve0_graph_fuc fuc41ac;
+ struct nve0_graph_fuc fuc41ad;
+
+ u8 gpc_nr;
+ u8 rop_nr;
+ u8 tpc_nr[GPC_MAX];
+ u8 tpc_total;
+
+ u32 grctx_size;
+ u32 *grctx_vals;
+ struct nouveau_gpuobj *unk4188b4;
+ struct nouveau_gpuobj *unk4188b8;
+
+ u8 magic_not_rop_nr;
+};
+
+struct nve0_graph_chan {
+ struct nouveau_gpuobj *grctx;
+ struct nouveau_gpuobj *unk408004; /* 0x418810 too */
+ struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
+ struct nouveau_gpuobj *unk418810; /* 0x419848 too */
+ struct nouveau_gpuobj *mmio;
+ int mmio_nr;
+};
+
+int nve0_grctx_generate(struct nouveau_channel *);
+
+/* nve0_graph.c uses this also to determine supported chipsets */
+static inline u32
+nve0_graph_class(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0xe4:
+ case 0xe7:
+ return 0xa097;
+ default:
+ return 0;
+ }
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nve0_grctx.c b/drivers/gpu/drm/nouveau/nve0_grctx.c
new file mode 100644
index 000000000000..d8cb360e92c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_grctx.c
@@ -0,0 +1,2777 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nve0_graph.h"
+
+static void
+nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
+{
+ nv_wr32(dev, 0x400204, data);
+ nv_wr32(dev, 0x400200, icmd);
+ while (nv_rd32(dev, 0x400700) & 0x00000002) {}
+}
+
+static void
+nve0_grctx_generate_icmd(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x400208, 0x80000000);
+ nv_icmd(dev, 0x001000, 0x00000004);
+ nv_icmd(dev, 0x000039, 0x00000000);
+ nv_icmd(dev, 0x00003a, 0x00000000);
+ nv_icmd(dev, 0x00003b, 0x00000000);
+ nv_icmd(dev, 0x0000a9, 0x0000ffff);
+ nv_icmd(dev, 0x000038, 0x0fac6881);
+ nv_icmd(dev, 0x00003d, 0x00000001);
+ nv_icmd(dev, 0x0000e8, 0x00000400);
+ nv_icmd(dev, 0x0000e9, 0x00000400);
+ nv_icmd(dev, 0x0000ea, 0x00000400);
+ nv_icmd(dev, 0x0000eb, 0x00000400);
+ nv_icmd(dev, 0x0000ec, 0x00000400);
+ nv_icmd(dev, 0x0000ed, 0x00000400);
+ nv_icmd(dev, 0x0000ee, 0x00000400);
+ nv_icmd(dev, 0x0000ef, 0x00000400);
+ nv_icmd(dev, 0x000078, 0x00000300);
+ nv_icmd(dev, 0x000079, 0x00000300);
+ nv_icmd(dev, 0x00007a, 0x00000300);
+ nv_icmd(dev, 0x00007b, 0x00000300);
+ nv_icmd(dev, 0x00007c, 0x00000300);
+ nv_icmd(dev, 0x00007d, 0x00000300);
+ nv_icmd(dev, 0x00007e, 0x00000300);
+ nv_icmd(dev, 0x00007f, 0x00000300);
+ nv_icmd(dev, 0x000050, 0x00000011);
+ nv_icmd(dev, 0x000058, 0x00000008);
+ nv_icmd(dev, 0x000059, 0x00000008);
+ nv_icmd(dev, 0x00005a, 0x00000008);
+ nv_icmd(dev, 0x00005b, 0x00000008);
+ nv_icmd(dev, 0x00005c, 0x00000008);
+ nv_icmd(dev, 0x00005d, 0x00000008);
+ nv_icmd(dev, 0x00005e, 0x00000008);
+ nv_icmd(dev, 0x00005f, 0x00000008);
+ nv_icmd(dev, 0x000208, 0x00000001);
+ nv_icmd(dev, 0x000209, 0x00000001);
+ nv_icmd(dev, 0x00020a, 0x00000001);
+ nv_icmd(dev, 0x00020b, 0x00000001);
+ nv_icmd(dev, 0x00020c, 0x00000001);
+ nv_icmd(dev, 0x00020d, 0x00000001);
+ nv_icmd(dev, 0x00020e, 0x00000001);
+ nv_icmd(dev, 0x00020f, 0x00000001);
+ nv_icmd(dev, 0x000081, 0x00000001);
+ nv_icmd(dev, 0x000085, 0x00000004);
+ nv_icmd(dev, 0x000088, 0x00000400);
+ nv_icmd(dev, 0x000090, 0x00000300);
+ nv_icmd(dev, 0x000098, 0x00001001);
+ nv_icmd(dev, 0x0000e3, 0x00000001);
+ nv_icmd(dev, 0x0000da, 0x00000001);
+ nv_icmd(dev, 0x0000f8, 0x00000003);
+ nv_icmd(dev, 0x0000fa, 0x00000001);
+ nv_icmd(dev, 0x00009f, 0x0000ffff);
+ nv_icmd(dev, 0x0000a0, 0x0000ffff);
+ nv_icmd(dev, 0x0000a1, 0x0000ffff);
+ nv_icmd(dev, 0x0000a2, 0x0000ffff);
+ nv_icmd(dev, 0x0000b1, 0x00000001);
+ nv_icmd(dev, 0x0000ad, 0x0000013e);
+ nv_icmd(dev, 0x0000e1, 0x00000010);
+ nv_icmd(dev, 0x000290, 0x00000000);
+ nv_icmd(dev, 0x000291, 0x00000000);
+ nv_icmd(dev, 0x000292, 0x00000000);
+ nv_icmd(dev, 0x000293, 0x00000000);
+ nv_icmd(dev, 0x000294, 0x00000000);
+ nv_icmd(dev, 0x000295, 0x00000000);
+ nv_icmd(dev, 0x000296, 0x00000000);
+ nv_icmd(dev, 0x000297, 0x00000000);
+ nv_icmd(dev, 0x000298, 0x00000000);
+ nv_icmd(dev, 0x000299, 0x00000000);
+ nv_icmd(dev, 0x00029a, 0x00000000);
+ nv_icmd(dev, 0x00029b, 0x00000000);
+ nv_icmd(dev, 0x00029c, 0x00000000);
+ nv_icmd(dev, 0x00029d, 0x00000000);
+ nv_icmd(dev, 0x00029e, 0x00000000);
+ nv_icmd(dev, 0x00029f, 0x00000000);
+ nv_icmd(dev, 0x0003b0, 0x00000000);
+ nv_icmd(dev, 0x0003b1, 0x00000000);
+ nv_icmd(dev, 0x0003b2, 0x00000000);
+ nv_icmd(dev, 0x0003b3, 0x00000000);
+ nv_icmd(dev, 0x0003b4, 0x00000000);
+ nv_icmd(dev, 0x0003b5, 0x00000000);
+ nv_icmd(dev, 0x0003b6, 0x00000000);
+ nv_icmd(dev, 0x0003b7, 0x00000000);
+ nv_icmd(dev, 0x0003b8, 0x00000000);
+ nv_icmd(dev, 0x0003b9, 0x00000000);
+ nv_icmd(dev, 0x0003ba, 0x00000000);
+ nv_icmd(dev, 0x0003bb, 0x00000000);
+ nv_icmd(dev, 0x0003bc, 0x00000000);
+ nv_icmd(dev, 0x0003bd, 0x00000000);
+ nv_icmd(dev, 0x0003be, 0x00000000);
+ nv_icmd(dev, 0x0003bf, 0x00000000);
+ nv_icmd(dev, 0x0002a0, 0x00000000);
+ nv_icmd(dev, 0x0002a1, 0x00000000);
+ nv_icmd(dev, 0x0002a2, 0x00000000);
+ nv_icmd(dev, 0x0002a3, 0x00000000);
+ nv_icmd(dev, 0x0002a4, 0x00000000);
+ nv_icmd(dev, 0x0002a5, 0x00000000);
+ nv_icmd(dev, 0x0002a6, 0x00000000);
+ nv_icmd(dev, 0x0002a7, 0x00000000);
+ nv_icmd(dev, 0x0002a8, 0x00000000);
+ nv_icmd(dev, 0x0002a9, 0x00000000);
+ nv_icmd(dev, 0x0002aa, 0x00000000);
+ nv_icmd(dev, 0x0002ab, 0x00000000);
+ nv_icmd(dev, 0x0002ac, 0x00000000);
+ nv_icmd(dev, 0x0002ad, 0x00000000);
+ nv_icmd(dev, 0x0002ae, 0x00000000);
+ nv_icmd(dev, 0x0002af, 0x00000000);
+ nv_icmd(dev, 0x000420, 0x00000000);
+ nv_icmd(dev, 0x000421, 0x00000000);
+ nv_icmd(dev, 0x000422, 0x00000000);
+ nv_icmd(dev, 0x000423, 0x00000000);
+ nv_icmd(dev, 0x000424, 0x00000000);
+ nv_icmd(dev, 0x000425, 0x00000000);
+ nv_icmd(dev, 0x000426, 0x00000000);
+ nv_icmd(dev, 0x000427, 0x00000000);
+ nv_icmd(dev, 0x000428, 0x00000000);
+ nv_icmd(dev, 0x000429, 0x00000000);
+ nv_icmd(dev, 0x00042a, 0x00000000);
+ nv_icmd(dev, 0x00042b, 0x00000000);
+ nv_icmd(dev, 0x00042c, 0x00000000);
+ nv_icmd(dev, 0x00042d, 0x00000000);
+ nv_icmd(dev, 0x00042e, 0x00000000);
+ nv_icmd(dev, 0x00042f, 0x00000000);
+ nv_icmd(dev, 0x0002b0, 0x00000000);
+ nv_icmd(dev, 0x0002b1, 0x00000000);
+ nv_icmd(dev, 0x0002b2, 0x00000000);
+ nv_icmd(dev, 0x0002b3, 0x00000000);
+ nv_icmd(dev, 0x0002b4, 0x00000000);
+ nv_icmd(dev, 0x0002b5, 0x00000000);
+ nv_icmd(dev, 0x0002b6, 0x00000000);
+ nv_icmd(dev, 0x0002b7, 0x00000000);
+ nv_icmd(dev, 0x0002b8, 0x00000000);
+ nv_icmd(dev, 0x0002b9, 0x00000000);
+ nv_icmd(dev, 0x0002ba, 0x00000000);
+ nv_icmd(dev, 0x0002bb, 0x00000000);
+ nv_icmd(dev, 0x0002bc, 0x00000000);
+ nv_icmd(dev, 0x0002bd, 0x00000000);
+ nv_icmd(dev, 0x0002be, 0x00000000);
+ nv_icmd(dev, 0x0002bf, 0x00000000);
+ nv_icmd(dev, 0x000430, 0x00000000);
+ nv_icmd(dev, 0x000431, 0x00000000);
+ nv_icmd(dev, 0x000432, 0x00000000);
+ nv_icmd(dev, 0x000433, 0x00000000);
+ nv_icmd(dev, 0x000434, 0x00000000);
+ nv_icmd(dev, 0x000435, 0x00000000);
+ nv_icmd(dev, 0x000436, 0x00000000);
+ nv_icmd(dev, 0x000437, 0x00000000);
+ nv_icmd(dev, 0x000438, 0x00000000);
+ nv_icmd(dev, 0x000439, 0x00000000);
+ nv_icmd(dev, 0x00043a, 0x00000000);
+ nv_icmd(dev, 0x00043b, 0x00000000);
+ nv_icmd(dev, 0x00043c, 0x00000000);
+ nv_icmd(dev, 0x00043d, 0x00000000);
+ nv_icmd(dev, 0x00043e, 0x00000000);
+ nv_icmd(dev, 0x00043f, 0x00000000);
+ nv_icmd(dev, 0x0002c0, 0x00000000);
+ nv_icmd(dev, 0x0002c1, 0x00000000);
+ nv_icmd(dev, 0x0002c2, 0x00000000);
+ nv_icmd(dev, 0x0002c3, 0x00000000);
+ nv_icmd(dev, 0x0002c4, 0x00000000);
+ nv_icmd(dev, 0x0002c5, 0x00000000);
+ nv_icmd(dev, 0x0002c6, 0x00000000);
+ nv_icmd(dev, 0x0002c7, 0x00000000);
+ nv_icmd(dev, 0x0002c8, 0x00000000);
+ nv_icmd(dev, 0x0002c9, 0x00000000);
+ nv_icmd(dev, 0x0002ca, 0x00000000);
+ nv_icmd(dev, 0x0002cb, 0x00000000);
+ nv_icmd(dev, 0x0002cc, 0x00000000);
+ nv_icmd(dev, 0x0002cd, 0x00000000);
+ nv_icmd(dev, 0x0002ce, 0x00000000);
+ nv_icmd(dev, 0x0002cf, 0x00000000);
+ nv_icmd(dev, 0x0004d0, 0x00000000);
+ nv_icmd(dev, 0x0004d1, 0x00000000);
+ nv_icmd(dev, 0x0004d2, 0x00000000);
+ nv_icmd(dev, 0x0004d3, 0x00000000);
+ nv_icmd(dev, 0x0004d4, 0x00000000);
+ nv_icmd(dev, 0x0004d5, 0x00000000);
+ nv_icmd(dev, 0x0004d6, 0x00000000);
+ nv_icmd(dev, 0x0004d7, 0x00000000);
+ nv_icmd(dev, 0x0004d8, 0x00000000);
+ nv_icmd(dev, 0x0004d9, 0x00000000);
+ nv_icmd(dev, 0x0004da, 0x00000000);
+ nv_icmd(dev, 0x0004db, 0x00000000);
+ nv_icmd(dev, 0x0004dc, 0x00000000);
+ nv_icmd(dev, 0x0004dd, 0x00000000);
+ nv_icmd(dev, 0x0004de, 0x00000000);
+ nv_icmd(dev, 0x0004df, 0x00000000);
+ nv_icmd(dev, 0x000720, 0x00000000);
+ nv_icmd(dev, 0x000721, 0x00000000);
+ nv_icmd(dev, 0x000722, 0x00000000);
+ nv_icmd(dev, 0x000723, 0x00000000);
+ nv_icmd(dev, 0x000724, 0x00000000);
+ nv_icmd(dev, 0x000725, 0x00000000);
+ nv_icmd(dev, 0x000726, 0x00000000);
+ nv_icmd(dev, 0x000727, 0x00000000);
+ nv_icmd(dev, 0x000728, 0x00000000);
+ nv_icmd(dev, 0x000729, 0x00000000);
+ nv_icmd(dev, 0x00072a, 0x00000000);
+ nv_icmd(dev, 0x00072b, 0x00000000);
+ nv_icmd(dev, 0x00072c, 0x00000000);
+ nv_icmd(dev, 0x00072d, 0x00000000);
+ nv_icmd(dev, 0x00072e, 0x00000000);
+ nv_icmd(dev, 0x00072f, 0x00000000);
+ nv_icmd(dev, 0x0008c0, 0x00000000);
+ nv_icmd(dev, 0x0008c1, 0x00000000);
+ nv_icmd(dev, 0x0008c2, 0x00000000);
+ nv_icmd(dev, 0x0008c3, 0x00000000);
+ nv_icmd(dev, 0x0008c4, 0x00000000);
+ nv_icmd(dev, 0x0008c5, 0x00000000);
+ nv_icmd(dev, 0x0008c6, 0x00000000);
+ nv_icmd(dev, 0x0008c7, 0x00000000);
+ nv_icmd(dev, 0x0008c8, 0x00000000);
+ nv_icmd(dev, 0x0008c9, 0x00000000);
+ nv_icmd(dev, 0x0008ca, 0x00000000);
+ nv_icmd(dev, 0x0008cb, 0x00000000);
+ nv_icmd(dev, 0x0008cc, 0x00000000);
+ nv_icmd(dev, 0x0008cd, 0x00000000);
+ nv_icmd(dev, 0x0008ce, 0x00000000);
+ nv_icmd(dev, 0x0008cf, 0x00000000);
+ nv_icmd(dev, 0x000890, 0x00000000);
+ nv_icmd(dev, 0x000891, 0x00000000);
+ nv_icmd(dev, 0x000892, 0x00000000);
+ nv_icmd(dev, 0x000893, 0x00000000);
+ nv_icmd(dev, 0x000894, 0x00000000);
+ nv_icmd(dev, 0x000895, 0x00000000);
+ nv_icmd(dev, 0x000896, 0x00000000);
+ nv_icmd(dev, 0x000897, 0x00000000);
+ nv_icmd(dev, 0x000898, 0x00000000);
+ nv_icmd(dev, 0x000899, 0x00000000);
+ nv_icmd(dev, 0x00089a, 0x00000000);
+ nv_icmd(dev, 0x00089b, 0x00000000);
+ nv_icmd(dev, 0x00089c, 0x00000000);
+ nv_icmd(dev, 0x00089d, 0x00000000);
+ nv_icmd(dev, 0x00089e, 0x00000000);
+ nv_icmd(dev, 0x00089f, 0x00000000);
+ nv_icmd(dev, 0x0008e0, 0x00000000);
+ nv_icmd(dev, 0x0008e1, 0x00000000);
+ nv_icmd(dev, 0x0008e2, 0x00000000);
+ nv_icmd(dev, 0x0008e3, 0x00000000);
+ nv_icmd(dev, 0x0008e4, 0x00000000);
+ nv_icmd(dev, 0x0008e5, 0x00000000);
+ nv_icmd(dev, 0x0008e6, 0x00000000);
+ nv_icmd(dev, 0x0008e7, 0x00000000);
+ nv_icmd(dev, 0x0008e8, 0x00000000);
+ nv_icmd(dev, 0x0008e9, 0x00000000);
+ nv_icmd(dev, 0x0008ea, 0x00000000);
+ nv_icmd(dev, 0x0008eb, 0x00000000);
+ nv_icmd(dev, 0x0008ec, 0x00000000);
+ nv_icmd(dev, 0x0008ed, 0x00000000);
+ nv_icmd(dev, 0x0008ee, 0x00000000);
+ nv_icmd(dev, 0x0008ef, 0x00000000);
+ nv_icmd(dev, 0x0008a0, 0x00000000);
+ nv_icmd(dev, 0x0008a1, 0x00000000);
+ nv_icmd(dev, 0x0008a2, 0x00000000);
+ nv_icmd(dev, 0x0008a3, 0x00000000);
+ nv_icmd(dev, 0x0008a4, 0x00000000);
+ nv_icmd(dev, 0x0008a5, 0x00000000);
+ nv_icmd(dev, 0x0008a6, 0x00000000);
+ nv_icmd(dev, 0x0008a7, 0x00000000);
+ nv_icmd(dev, 0x0008a8, 0x00000000);
+ nv_icmd(dev, 0x0008a9, 0x00000000);
+ nv_icmd(dev, 0x0008aa, 0x00000000);
+ nv_icmd(dev, 0x0008ab, 0x00000000);
+ nv_icmd(dev, 0x0008ac, 0x00000000);
+ nv_icmd(dev, 0x0008ad, 0x00000000);
+ nv_icmd(dev, 0x0008ae, 0x00000000);
+ nv_icmd(dev, 0x0008af, 0x00000000);
+ nv_icmd(dev, 0x0008f0, 0x00000000);
+ nv_icmd(dev, 0x0008f1, 0x00000000);
+ nv_icmd(dev, 0x0008f2, 0x00000000);
+ nv_icmd(dev, 0x0008f3, 0x00000000);
+ nv_icmd(dev, 0x0008f4, 0x00000000);
+ nv_icmd(dev, 0x0008f5, 0x00000000);
+ nv_icmd(dev, 0x0008f6, 0x00000000);
+ nv_icmd(dev, 0x0008f7, 0x00000000);
+ nv_icmd(dev, 0x0008f8, 0x00000000);
+ nv_icmd(dev, 0x0008f9, 0x00000000);
+ nv_icmd(dev, 0x0008fa, 0x00000000);
+ nv_icmd(dev, 0x0008fb, 0x00000000);
+ nv_icmd(dev, 0x0008fc, 0x00000000);
+ nv_icmd(dev, 0x0008fd, 0x00000000);
+ nv_icmd(dev, 0x0008fe, 0x00000000);
+ nv_icmd(dev, 0x0008ff, 0x00000000);
+ nv_icmd(dev, 0x00094c, 0x000000ff);
+ nv_icmd(dev, 0x00094d, 0xffffffff);
+ nv_icmd(dev, 0x00094e, 0x00000002);
+ nv_icmd(dev, 0x0002ec, 0x00000001);
+ nv_icmd(dev, 0x000303, 0x00000001);
+ nv_icmd(dev, 0x0002e6, 0x00000001);
+ nv_icmd(dev, 0x000466, 0x00000052);
+ nv_icmd(dev, 0x000301, 0x3f800000);
+ nv_icmd(dev, 0x000304, 0x30201000);
+ nv_icmd(dev, 0x000305, 0x70605040);
+ nv_icmd(dev, 0x000306, 0xb8a89888);
+ nv_icmd(dev, 0x000307, 0xf8e8d8c8);
+ nv_icmd(dev, 0x00030a, 0x00ffff00);
+ nv_icmd(dev, 0x00030b, 0x0000001a);
+ nv_icmd(dev, 0x00030c, 0x00000001);
+ nv_icmd(dev, 0x000318, 0x00000001);
+ nv_icmd(dev, 0x000340, 0x00000000);
+ nv_icmd(dev, 0x000375, 0x00000001);
+ nv_icmd(dev, 0x00037d, 0x00000006);
+ nv_icmd(dev, 0x0003a0, 0x00000002);
+ nv_icmd(dev, 0x0003aa, 0x00000001);
+ nv_icmd(dev, 0x0003a9, 0x00000001);
+ nv_icmd(dev, 0x000380, 0x00000001);
+ nv_icmd(dev, 0x000383, 0x00000011);
+ nv_icmd(dev, 0x000360, 0x00000040);
+ nv_icmd(dev, 0x000366, 0x00000000);
+ nv_icmd(dev, 0x000367, 0x00000000);
+ nv_icmd(dev, 0x000368, 0x00000fff);
+ nv_icmd(dev, 0x000370, 0x00000000);
+ nv_icmd(dev, 0x000371, 0x00000000);
+ nv_icmd(dev, 0x000372, 0x000fffff);
+ nv_icmd(dev, 0x00037a, 0x00000012);
+ nv_icmd(dev, 0x000619, 0x00000003);
+ nv_icmd(dev, 0x000811, 0x00000003);
+ nv_icmd(dev, 0x000812, 0x00000004);
+ nv_icmd(dev, 0x000813, 0x00000006);
+ nv_icmd(dev, 0x000814, 0x00000008);
+ nv_icmd(dev, 0x000815, 0x0000000b);
+ nv_icmd(dev, 0x000800, 0x00000001);
+ nv_icmd(dev, 0x000801, 0x00000001);
+ nv_icmd(dev, 0x000802, 0x00000001);
+ nv_icmd(dev, 0x000803, 0x00000001);
+ nv_icmd(dev, 0x000804, 0x00000001);
+ nv_icmd(dev, 0x000805, 0x00000001);
+ nv_icmd(dev, 0x000632, 0x00000001);
+ nv_icmd(dev, 0x000633, 0x00000002);
+ nv_icmd(dev, 0x000634, 0x00000003);
+ nv_icmd(dev, 0x000635, 0x00000004);
+ nv_icmd(dev, 0x000654, 0x3f800000);
+ nv_icmd(dev, 0x000657, 0x3f800000);
+ nv_icmd(dev, 0x000655, 0x3f800000);
+ nv_icmd(dev, 0x000656, 0x3f800000);
+ nv_icmd(dev, 0x0006cd, 0x3f800000);
+ nv_icmd(dev, 0x0007f5, 0x3f800000);
+ nv_icmd(dev, 0x0007dc, 0x39291909);
+ nv_icmd(dev, 0x0007dd, 0x79695949);
+ nv_icmd(dev, 0x0007de, 0xb9a99989);
+ nv_icmd(dev, 0x0007df, 0xf9e9d9c9);
+ nv_icmd(dev, 0x0007e8, 0x00003210);
+ nv_icmd(dev, 0x0007e9, 0x00007654);
+ nv_icmd(dev, 0x0007ea, 0x00000098);
+ nv_icmd(dev, 0x0007ec, 0x39291909);
+ nv_icmd(dev, 0x0007ed, 0x79695949);
+ nv_icmd(dev, 0x0007ee, 0xb9a99989);
+ nv_icmd(dev, 0x0007ef, 0xf9e9d9c9);
+ nv_icmd(dev, 0x0007f0, 0x00003210);
+ nv_icmd(dev, 0x0007f1, 0x00007654);
+ nv_icmd(dev, 0x0007f2, 0x00000098);
+ nv_icmd(dev, 0x0005a5, 0x00000001);
+ nv_icmd(dev, 0x000980, 0x00000000);
+ nv_icmd(dev, 0x000981, 0x00000000);
+ nv_icmd(dev, 0x000982, 0x00000000);
+ nv_icmd(dev, 0x000983, 0x00000000);
+ nv_icmd(dev, 0x000984, 0x00000000);
+ nv_icmd(dev, 0x000985, 0x00000000);
+ nv_icmd(dev, 0x000986, 0x00000000);
+ nv_icmd(dev, 0x000987, 0x00000000);
+ nv_icmd(dev, 0x000988, 0x00000000);
+ nv_icmd(dev, 0x000989, 0x00000000);
+ nv_icmd(dev, 0x00098a, 0x00000000);
+ nv_icmd(dev, 0x00098b, 0x00000000);
+ nv_icmd(dev, 0x00098c, 0x00000000);
+ nv_icmd(dev, 0x00098d, 0x00000000);
+ nv_icmd(dev, 0x00098e, 0x00000000);
+ nv_icmd(dev, 0x00098f, 0x00000000);
+ nv_icmd(dev, 0x000990, 0x00000000);
+ nv_icmd(dev, 0x000991, 0x00000000);
+ nv_icmd(dev, 0x000992, 0x00000000);
+ nv_icmd(dev, 0x000993, 0x00000000);
+ nv_icmd(dev, 0x000994, 0x00000000);
+ nv_icmd(dev, 0x000995, 0x00000000);
+ nv_icmd(dev, 0x000996, 0x00000000);
+ nv_icmd(dev, 0x000997, 0x00000000);
+ nv_icmd(dev, 0x000998, 0x00000000);
+ nv_icmd(dev, 0x000999, 0x00000000);
+ nv_icmd(dev, 0x00099a, 0x00000000);
+ nv_icmd(dev, 0x00099b, 0x00000000);
+ nv_icmd(dev, 0x00099c, 0x00000000);
+ nv_icmd(dev, 0x00099d, 0x00000000);
+ nv_icmd(dev, 0x00099e, 0x00000000);
+ nv_icmd(dev, 0x00099f, 0x00000000);
+ nv_icmd(dev, 0x0009a0, 0x00000000);
+ nv_icmd(dev, 0x0009a1, 0x00000000);
+ nv_icmd(dev, 0x0009a2, 0x00000000);
+ nv_icmd(dev, 0x0009a3, 0x00000000);
+ nv_icmd(dev, 0x0009a4, 0x00000000);
+ nv_icmd(dev, 0x0009a5, 0x00000000);
+ nv_icmd(dev, 0x0009a6, 0x00000000);
+ nv_icmd(dev, 0x0009a7, 0x00000000);
+ nv_icmd(dev, 0x0009a8, 0x00000000);
+ nv_icmd(dev, 0x0009a9, 0x00000000);
+ nv_icmd(dev, 0x0009aa, 0x00000000);
+ nv_icmd(dev, 0x0009ab, 0x00000000);
+ nv_icmd(dev, 0x0009ac, 0x00000000);
+ nv_icmd(dev, 0x0009ad, 0x00000000);
+ nv_icmd(dev, 0x0009ae, 0x00000000);
+ nv_icmd(dev, 0x0009af, 0x00000000);
+ nv_icmd(dev, 0x0009b0, 0x00000000);
+ nv_icmd(dev, 0x0009b1, 0x00000000);
+ nv_icmd(dev, 0x0009b2, 0x00000000);
+ nv_icmd(dev, 0x0009b3, 0x00000000);
+ nv_icmd(dev, 0x0009b4, 0x00000000);
+ nv_icmd(dev, 0x0009b5, 0x00000000);
+ nv_icmd(dev, 0x0009b6, 0x00000000);
+ nv_icmd(dev, 0x0009b7, 0x00000000);
+ nv_icmd(dev, 0x0009b8, 0x00000000);
+ nv_icmd(dev, 0x0009b9, 0x00000000);
+ nv_icmd(dev, 0x0009ba, 0x00000000);
+ nv_icmd(dev, 0x0009bb, 0x00000000);
+ nv_icmd(dev, 0x0009bc, 0x00000000);
+ nv_icmd(dev, 0x0009bd, 0x00000000);
+ nv_icmd(dev, 0x0009be, 0x00000000);
+ nv_icmd(dev, 0x0009bf, 0x00000000);
+ nv_icmd(dev, 0x0009c0, 0x00000000);
+ nv_icmd(dev, 0x0009c1, 0x00000000);
+ nv_icmd(dev, 0x0009c2, 0x00000000);
+ nv_icmd(dev, 0x0009c3, 0x00000000);
+ nv_icmd(dev, 0x0009c4, 0x00000000);
+ nv_icmd(dev, 0x0009c5, 0x00000000);
+ nv_icmd(dev, 0x0009c6, 0x00000000);
+ nv_icmd(dev, 0x0009c7, 0x00000000);
+ nv_icmd(dev, 0x0009c8, 0x00000000);
+ nv_icmd(dev, 0x0009c9, 0x00000000);
+ nv_icmd(dev, 0x0009ca, 0x00000000);
+ nv_icmd(dev, 0x0009cb, 0x00000000);
+ nv_icmd(dev, 0x0009cc, 0x00000000);
+ nv_icmd(dev, 0x0009cd, 0x00000000);
+ nv_icmd(dev, 0x0009ce, 0x00000000);
+ nv_icmd(dev, 0x0009cf, 0x00000000);
+ nv_icmd(dev, 0x0009d0, 0x00000000);
+ nv_icmd(dev, 0x0009d1, 0x00000000);
+ nv_icmd(dev, 0x0009d2, 0x00000000);
+ nv_icmd(dev, 0x0009d3, 0x00000000);
+ nv_icmd(dev, 0x0009d4, 0x00000000);
+ nv_icmd(dev, 0x0009d5, 0x00000000);
+ nv_icmd(dev, 0x0009d6, 0x00000000);
+ nv_icmd(dev, 0x0009d7, 0x00000000);
+ nv_icmd(dev, 0x0009d8, 0x00000000);
+ nv_icmd(dev, 0x0009d9, 0x00000000);
+ nv_icmd(dev, 0x0009da, 0x00000000);
+ nv_icmd(dev, 0x0009db, 0x00000000);
+ nv_icmd(dev, 0x0009dc, 0x00000000);
+ nv_icmd(dev, 0x0009dd, 0x00000000);
+ nv_icmd(dev, 0x0009de, 0x00000000);
+ nv_icmd(dev, 0x0009df, 0x00000000);
+ nv_icmd(dev, 0x0009e0, 0x00000000);
+ nv_icmd(dev, 0x0009e1, 0x00000000);
+ nv_icmd(dev, 0x0009e2, 0x00000000);
+ nv_icmd(dev, 0x0009e3, 0x00000000);
+ nv_icmd(dev, 0x0009e4, 0x00000000);
+ nv_icmd(dev, 0x0009e5, 0x00000000);
+ nv_icmd(dev, 0x0009e6, 0x00000000);
+ nv_icmd(dev, 0x0009e7, 0x00000000);
+ nv_icmd(dev, 0x0009e8, 0x00000000);
+ nv_icmd(dev, 0x0009e9, 0x00000000);
+ nv_icmd(dev, 0x0009ea, 0x00000000);
+ nv_icmd(dev, 0x0009eb, 0x00000000);
+ nv_icmd(dev, 0x0009ec, 0x00000000);
+ nv_icmd(dev, 0x0009ed, 0x00000000);
+ nv_icmd(dev, 0x0009ee, 0x00000000);
+ nv_icmd(dev, 0x0009ef, 0x00000000);
+ nv_icmd(dev, 0x0009f0, 0x00000000);
+ nv_icmd(dev, 0x0009f1, 0x00000000);
+ nv_icmd(dev, 0x0009f2, 0x00000000);
+ nv_icmd(dev, 0x0009f3, 0x00000000);
+ nv_icmd(dev, 0x0009f4, 0x00000000);
+ nv_icmd(dev, 0x0009f5, 0x00000000);
+ nv_icmd(dev, 0x0009f6, 0x00000000);
+ nv_icmd(dev, 0x0009f7, 0x00000000);
+ nv_icmd(dev, 0x0009f8, 0x00000000);
+ nv_icmd(dev, 0x0009f9, 0x00000000);
+ nv_icmd(dev, 0x0009fa, 0x00000000);
+ nv_icmd(dev, 0x0009fb, 0x00000000);
+ nv_icmd(dev, 0x0009fc, 0x00000000);
+ nv_icmd(dev, 0x0009fd, 0x00000000);
+ nv_icmd(dev, 0x0009fe, 0x00000000);
+ nv_icmd(dev, 0x0009ff, 0x00000000);
+ nv_icmd(dev, 0x000468, 0x00000004);
+ nv_icmd(dev, 0x00046c, 0x00000001);
+ nv_icmd(dev, 0x000470, 0x00000000);
+ nv_icmd(dev, 0x000471, 0x00000000);
+ nv_icmd(dev, 0x000472, 0x00000000);
+ nv_icmd(dev, 0x000473, 0x00000000);
+ nv_icmd(dev, 0x000474, 0x00000000);
+ nv_icmd(dev, 0x000475, 0x00000000);
+ nv_icmd(dev, 0x000476, 0x00000000);
+ nv_icmd(dev, 0x000477, 0x00000000);
+ nv_icmd(dev, 0x000478, 0x00000000);
+ nv_icmd(dev, 0x000479, 0x00000000);
+ nv_icmd(dev, 0x00047a, 0x00000000);
+ nv_icmd(dev, 0x00047b, 0x00000000);
+ nv_icmd(dev, 0x00047c, 0x00000000);
+ nv_icmd(dev, 0x00047d, 0x00000000);
+ nv_icmd(dev, 0x00047e, 0x00000000);
+ nv_icmd(dev, 0x00047f, 0x00000000);
+ nv_icmd(dev, 0x000480, 0x00000000);
+ nv_icmd(dev, 0x000481, 0x00000000);
+ nv_icmd(dev, 0x000482, 0x00000000);
+ nv_icmd(dev, 0x000483, 0x00000000);
+ nv_icmd(dev, 0x000484, 0x00000000);
+ nv_icmd(dev, 0x000485, 0x00000000);
+ nv_icmd(dev, 0x000486, 0x00000000);
+ nv_icmd(dev, 0x000487, 0x00000000);
+ nv_icmd(dev, 0x000488, 0x00000000);
+ nv_icmd(dev, 0x000489, 0x00000000);
+ nv_icmd(dev, 0x00048a, 0x00000000);
+ nv_icmd(dev, 0x00048b, 0x00000000);
+ nv_icmd(dev, 0x00048c, 0x00000000);
+ nv_icmd(dev, 0x00048d, 0x00000000);
+ nv_icmd(dev, 0x00048e, 0x00000000);
+ nv_icmd(dev, 0x00048f, 0x00000000);
+ nv_icmd(dev, 0x000490, 0x00000000);
+ nv_icmd(dev, 0x000491, 0x00000000);
+ nv_icmd(dev, 0x000492, 0x00000000);
+ nv_icmd(dev, 0x000493, 0x00000000);
+ nv_icmd(dev, 0x000494, 0x00000000);
+ nv_icmd(dev, 0x000495, 0x00000000);
+ nv_icmd(dev, 0x000496, 0x00000000);
+ nv_icmd(dev, 0x000497, 0x00000000);
+ nv_icmd(dev, 0x000498, 0x00000000);
+ nv_icmd(dev, 0x000499, 0x00000000);
+ nv_icmd(dev, 0x00049a, 0x00000000);
+ nv_icmd(dev, 0x00049b, 0x00000000);
+ nv_icmd(dev, 0x00049c, 0x00000000);
+ nv_icmd(dev, 0x00049d, 0x00000000);
+ nv_icmd(dev, 0x00049e, 0x00000000);
+ nv_icmd(dev, 0x00049f, 0x00000000);
+ nv_icmd(dev, 0x0004a0, 0x00000000);
+ nv_icmd(dev, 0x0004a1, 0x00000000);
+ nv_icmd(dev, 0x0004a2, 0x00000000);
+ nv_icmd(dev, 0x0004a3, 0x00000000);
+ nv_icmd(dev, 0x0004a4, 0x00000000);
+ nv_icmd(dev, 0x0004a5, 0x00000000);
+ nv_icmd(dev, 0x0004a6, 0x00000000);
+ nv_icmd(dev, 0x0004a7, 0x00000000);
+ nv_icmd(dev, 0x0004a8, 0x00000000);
+ nv_icmd(dev, 0x0004a9, 0x00000000);
+ nv_icmd(dev, 0x0004aa, 0x00000000);
+ nv_icmd(dev, 0x0004ab, 0x00000000);
+ nv_icmd(dev, 0x0004ac, 0x00000000);
+ nv_icmd(dev, 0x0004ad, 0x00000000);
+ nv_icmd(dev, 0x0004ae, 0x00000000);
+ nv_icmd(dev, 0x0004af, 0x00000000);
+ nv_icmd(dev, 0x0004b0, 0x00000000);
+ nv_icmd(dev, 0x0004b1, 0x00000000);
+ nv_icmd(dev, 0x0004b2, 0x00000000);
+ nv_icmd(dev, 0x0004b3, 0x00000000);
+ nv_icmd(dev, 0x0004b4, 0x00000000);
+ nv_icmd(dev, 0x0004b5, 0x00000000);
+ nv_icmd(dev, 0x0004b6, 0x00000000);
+ nv_icmd(dev, 0x0004b7, 0x00000000);
+ nv_icmd(dev, 0x0004b8, 0x00000000);
+ nv_icmd(dev, 0x0004b9, 0x00000000);
+ nv_icmd(dev, 0x0004ba, 0x00000000);
+ nv_icmd(dev, 0x0004bb, 0x00000000);
+ nv_icmd(dev, 0x0004bc, 0x00000000);
+ nv_icmd(dev, 0x0004bd, 0x00000000);
+ nv_icmd(dev, 0x0004be, 0x00000000);
+ nv_icmd(dev, 0x0004bf, 0x00000000);
+ nv_icmd(dev, 0x0004c0, 0x00000000);
+ nv_icmd(dev, 0x0004c1, 0x00000000);
+ nv_icmd(dev, 0x0004c2, 0x00000000);
+ nv_icmd(dev, 0x0004c3, 0x00000000);
+ nv_icmd(dev, 0x0004c4, 0x00000000);
+ nv_icmd(dev, 0x0004c5, 0x00000000);
+ nv_icmd(dev, 0x0004c6, 0x00000000);
+ nv_icmd(dev, 0x0004c7, 0x00000000);
+ nv_icmd(dev, 0x0004c8, 0x00000000);
+ nv_icmd(dev, 0x0004c9, 0x00000000);
+ nv_icmd(dev, 0x0004ca, 0x00000000);
+ nv_icmd(dev, 0x0004cb, 0x00000000);
+ nv_icmd(dev, 0x0004cc, 0x00000000);
+ nv_icmd(dev, 0x0004cd, 0x00000000);
+ nv_icmd(dev, 0x0004ce, 0x00000000);
+ nv_icmd(dev, 0x0004cf, 0x00000000);
+ nv_icmd(dev, 0x000510, 0x3f800000);
+ nv_icmd(dev, 0x000511, 0x3f800000);
+ nv_icmd(dev, 0x000512, 0x3f800000);
+ nv_icmd(dev, 0x000513, 0x3f800000);
+ nv_icmd(dev, 0x000514, 0x3f800000);
+ nv_icmd(dev, 0x000515, 0x3f800000);
+ nv_icmd(dev, 0x000516, 0x3f800000);
+ nv_icmd(dev, 0x000517, 0x3f800000);
+ nv_icmd(dev, 0x000518, 0x3f800000);
+ nv_icmd(dev, 0x000519, 0x3f800000);
+ nv_icmd(dev, 0x00051a, 0x3f800000);
+ nv_icmd(dev, 0x00051b, 0x3f800000);
+ nv_icmd(dev, 0x00051c, 0x3f800000);
+ nv_icmd(dev, 0x00051d, 0x3f800000);
+ nv_icmd(dev, 0x00051e, 0x3f800000);
+ nv_icmd(dev, 0x00051f, 0x3f800000);
+ nv_icmd(dev, 0x000520, 0x000002b6);
+ nv_icmd(dev, 0x000529, 0x00000001);
+ nv_icmd(dev, 0x000530, 0xffff0000);
+ nv_icmd(dev, 0x000531, 0xffff0000);
+ nv_icmd(dev, 0x000532, 0xffff0000);
+ nv_icmd(dev, 0x000533, 0xffff0000);
+ nv_icmd(dev, 0x000534, 0xffff0000);
+ nv_icmd(dev, 0x000535, 0xffff0000);
+ nv_icmd(dev, 0x000536, 0xffff0000);
+ nv_icmd(dev, 0x000537, 0xffff0000);
+ nv_icmd(dev, 0x000538, 0xffff0000);
+ nv_icmd(dev, 0x000539, 0xffff0000);
+ nv_icmd(dev, 0x00053a, 0xffff0000);
+ nv_icmd(dev, 0x00053b, 0xffff0000);
+ nv_icmd(dev, 0x00053c, 0xffff0000);
+ nv_icmd(dev, 0x00053d, 0xffff0000);
+ nv_icmd(dev, 0x00053e, 0xffff0000);
+ nv_icmd(dev, 0x00053f, 0xffff0000);
+ nv_icmd(dev, 0x000585, 0x0000003f);
+ nv_icmd(dev, 0x000576, 0x00000003);
+ nv_icmd(dev, 0x00057b, 0x00000059);
+ nv_icmd(dev, 0x000586, 0x00000040);
+ nv_icmd(dev, 0x000582, 0x00000080);
+ nv_icmd(dev, 0x000583, 0x00000080);
+ nv_icmd(dev, 0x0005c2, 0x00000001);
+ nv_icmd(dev, 0x000638, 0x00000001);
+ nv_icmd(dev, 0x000639, 0x00000001);
+ nv_icmd(dev, 0x00063a, 0x00000002);
+ nv_icmd(dev, 0x00063b, 0x00000001);
+ nv_icmd(dev, 0x00063c, 0x00000001);
+ nv_icmd(dev, 0x00063d, 0x00000002);
+ nv_icmd(dev, 0x00063e, 0x00000001);
+ nv_icmd(dev, 0x0008b8, 0x00000001);
+ nv_icmd(dev, 0x0008b9, 0x00000001);
+ nv_icmd(dev, 0x0008ba, 0x00000001);
+ nv_icmd(dev, 0x0008bb, 0x00000001);
+ nv_icmd(dev, 0x0008bc, 0x00000001);
+ nv_icmd(dev, 0x0008bd, 0x00000001);
+ nv_icmd(dev, 0x0008be, 0x00000001);
+ nv_icmd(dev, 0x0008bf, 0x00000001);
+ nv_icmd(dev, 0x000900, 0x00000001);
+ nv_icmd(dev, 0x000901, 0x00000001);
+ nv_icmd(dev, 0x000902, 0x00000001);
+ nv_icmd(dev, 0x000903, 0x00000001);
+ nv_icmd(dev, 0x000904, 0x00000001);
+ nv_icmd(dev, 0x000905, 0x00000001);
+ nv_icmd(dev, 0x000906, 0x00000001);
+ nv_icmd(dev, 0x000907, 0x00000001);
+ nv_icmd(dev, 0x000908, 0x00000002);
+ nv_icmd(dev, 0x000909, 0x00000002);
+ nv_icmd(dev, 0x00090a, 0x00000002);
+ nv_icmd(dev, 0x00090b, 0x00000002);
+ nv_icmd(dev, 0x00090c, 0x00000002);
+ nv_icmd(dev, 0x00090d, 0x00000002);
+ nv_icmd(dev, 0x00090e, 0x00000002);
+ nv_icmd(dev, 0x00090f, 0x00000002);
+ nv_icmd(dev, 0x000910, 0x00000001);
+ nv_icmd(dev, 0x000911, 0x00000001);
+ nv_icmd(dev, 0x000912, 0x00000001);
+ nv_icmd(dev, 0x000913, 0x00000001);
+ nv_icmd(dev, 0x000914, 0x00000001);
+ nv_icmd(dev, 0x000915, 0x00000001);
+ nv_icmd(dev, 0x000916, 0x00000001);
+ nv_icmd(dev, 0x000917, 0x00000001);
+ nv_icmd(dev, 0x000918, 0x00000001);
+ nv_icmd(dev, 0x000919, 0x00000001);
+ nv_icmd(dev, 0x00091a, 0x00000001);
+ nv_icmd(dev, 0x00091b, 0x00000001);
+ nv_icmd(dev, 0x00091c, 0x00000001);
+ nv_icmd(dev, 0x00091d, 0x00000001);
+ nv_icmd(dev, 0x00091e, 0x00000001);
+ nv_icmd(dev, 0x00091f, 0x00000001);
+ nv_icmd(dev, 0x000920, 0x00000002);
+ nv_icmd(dev, 0x000921, 0x00000002);
+ nv_icmd(dev, 0x000922, 0x00000002);
+ nv_icmd(dev, 0x000923, 0x00000002);
+ nv_icmd(dev, 0x000924, 0x00000002);
+ nv_icmd(dev, 0x000925, 0x00000002);
+ nv_icmd(dev, 0x000926, 0x00000002);
+ nv_icmd(dev, 0x000927, 0x00000002);
+ nv_icmd(dev, 0x000928, 0x00000001);
+ nv_icmd(dev, 0x000929, 0x00000001);
+ nv_icmd(dev, 0x00092a, 0x00000001);
+ nv_icmd(dev, 0x00092b, 0x00000001);
+ nv_icmd(dev, 0x00092c, 0x00000001);
+ nv_icmd(dev, 0x00092d, 0x00000001);
+ nv_icmd(dev, 0x00092e, 0x00000001);
+ nv_icmd(dev, 0x00092f, 0x00000001);
+ nv_icmd(dev, 0x000648, 0x00000001);
+ nv_icmd(dev, 0x000649, 0x00000001);
+ nv_icmd(dev, 0x00064a, 0x00000001);
+ nv_icmd(dev, 0x00064b, 0x00000001);
+ nv_icmd(dev, 0x00064c, 0x00000001);
+ nv_icmd(dev, 0x00064d, 0x00000001);
+ nv_icmd(dev, 0x00064e, 0x00000001);
+ nv_icmd(dev, 0x00064f, 0x00000001);
+ nv_icmd(dev, 0x000650, 0x00000001);
+ nv_icmd(dev, 0x000658, 0x0000000f);
+ nv_icmd(dev, 0x0007ff, 0x0000000a);
+ nv_icmd(dev, 0x00066a, 0x40000000);
+ nv_icmd(dev, 0x00066b, 0x10000000);
+ nv_icmd(dev, 0x00066c, 0xffff0000);
+ nv_icmd(dev, 0x00066d, 0xffff0000);
+ nv_icmd(dev, 0x0007af, 0x00000008);
+ nv_icmd(dev, 0x0007b0, 0x00000008);
+ nv_icmd(dev, 0x0007f6, 0x00000001);
+ nv_icmd(dev, 0x0006b2, 0x00000055);
+ nv_icmd(dev, 0x0007ad, 0x00000003);
+ nv_icmd(dev, 0x000937, 0x00000001);
+ nv_icmd(dev, 0x000971, 0x00000008);
+ nv_icmd(dev, 0x000972, 0x00000040);
+ nv_icmd(dev, 0x000973, 0x0000012c);
+ nv_icmd(dev, 0x00097c, 0x00000040);
+ nv_icmd(dev, 0x000979, 0x00000003);
+ nv_icmd(dev, 0x000975, 0x00000020);
+ nv_icmd(dev, 0x000976, 0x00000001);
+ nv_icmd(dev, 0x000977, 0x00000020);
+ nv_icmd(dev, 0x000978, 0x00000001);
+ nv_icmd(dev, 0x000957, 0x00000003);
+ nv_icmd(dev, 0x00095e, 0x20164010);
+ nv_icmd(dev, 0x00095f, 0x00000020);
+ nv_icmd(dev, 0x00097d, 0x00000020);
+ nv_icmd(dev, 0x000683, 0x00000006);
+ nv_icmd(dev, 0x000685, 0x003fffff);
+ nv_icmd(dev, 0x000687, 0x003fffff);
+ nv_icmd(dev, 0x0006a0, 0x00000005);
+ nv_icmd(dev, 0x000840, 0x00400008);
+ nv_icmd(dev, 0x000841, 0x08000080);
+ nv_icmd(dev, 0x000842, 0x00400008);
+ nv_icmd(dev, 0x000843, 0x08000080);
+ nv_icmd(dev, 0x000818, 0x00000000);
+ nv_icmd(dev, 0x000819, 0x00000000);
+ nv_icmd(dev, 0x00081a, 0x00000000);
+ nv_icmd(dev, 0x00081b, 0x00000000);
+ nv_icmd(dev, 0x00081c, 0x00000000);
+ nv_icmd(dev, 0x00081d, 0x00000000);
+ nv_icmd(dev, 0x00081e, 0x00000000);
+ nv_icmd(dev, 0x00081f, 0x00000000);
+ nv_icmd(dev, 0x000848, 0x00000000);
+ nv_icmd(dev, 0x000849, 0x00000000);
+ nv_icmd(dev, 0x00084a, 0x00000000);
+ nv_icmd(dev, 0x00084b, 0x00000000);
+ nv_icmd(dev, 0x00084c, 0x00000000);
+ nv_icmd(dev, 0x00084d, 0x00000000);
+ nv_icmd(dev, 0x00084e, 0x00000000);
+ nv_icmd(dev, 0x00084f, 0x00000000);
+ nv_icmd(dev, 0x000850, 0x00000000);
+ nv_icmd(dev, 0x000851, 0x00000000);
+ nv_icmd(dev, 0x000852, 0x00000000);
+ nv_icmd(dev, 0x000853, 0x00000000);
+ nv_icmd(dev, 0x000854, 0x00000000);
+ nv_icmd(dev, 0x000855, 0x00000000);
+ nv_icmd(dev, 0x000856, 0x00000000);
+ nv_icmd(dev, 0x000857, 0x00000000);
+ nv_icmd(dev, 0x000738, 0x00000000);
+ nv_icmd(dev, 0x0006aa, 0x00000001);
+ nv_icmd(dev, 0x0006ab, 0x00000002);
+ nv_icmd(dev, 0x0006ac, 0x00000080);
+ nv_icmd(dev, 0x0006ad, 0x00000100);
+ nv_icmd(dev, 0x0006ae, 0x00000100);
+ nv_icmd(dev, 0x0006b1, 0x00000011);
+ nv_icmd(dev, 0x0006bb, 0x000000cf);
+ nv_icmd(dev, 0x0006ce, 0x2a712488);
+ nv_icmd(dev, 0x000739, 0x4085c000);
+ nv_icmd(dev, 0x00073a, 0x00000080);
+ nv_icmd(dev, 0x000786, 0x80000100);
+ nv_icmd(dev, 0x00073c, 0x00010100);
+ nv_icmd(dev, 0x00073d, 0x02800000);
+ nv_icmd(dev, 0x000787, 0x000000cf);
+ nv_icmd(dev, 0x00078c, 0x00000008);
+ nv_icmd(dev, 0x000792, 0x00000001);
+ nv_icmd(dev, 0x000794, 0x00000001);
+ nv_icmd(dev, 0x000795, 0x00000001);
+ nv_icmd(dev, 0x000796, 0x00000001);
+ nv_icmd(dev, 0x000797, 0x000000cf);
+ nv_icmd(dev, 0x000836, 0x00000001);
+ nv_icmd(dev, 0x00079a, 0x00000002);
+ nv_icmd(dev, 0x000833, 0x04444480);
+ nv_icmd(dev, 0x0007a1, 0x00000001);
+ nv_icmd(dev, 0x0007a3, 0x00000001);
+ nv_icmd(dev, 0x0007a4, 0x00000001);
+ nv_icmd(dev, 0x0007a5, 0x00000001);
+ nv_icmd(dev, 0x000831, 0x00000004);
+ nv_icmd(dev, 0x000b07, 0x00000002);
+ nv_icmd(dev, 0x000b08, 0x00000100);
+ nv_icmd(dev, 0x000b09, 0x00000100);
+ nv_icmd(dev, 0x000b0a, 0x00000001);
+ nv_icmd(dev, 0x000a04, 0x000000ff);
+ nv_icmd(dev, 0x000a0b, 0x00000040);
+ nv_icmd(dev, 0x00097f, 0x00000100);
+ nv_icmd(dev, 0x000a02, 0x00000001);
+ nv_icmd(dev, 0x000809, 0x00000007);
+ nv_icmd(dev, 0x00c221, 0x00000040);
+ nv_icmd(dev, 0x00c1b0, 0x0000000f);
+ nv_icmd(dev, 0x00c1b1, 0x0000000f);
+ nv_icmd(dev, 0x00c1b2, 0x0000000f);
+ nv_icmd(dev, 0x00c1b3, 0x0000000f);
+ nv_icmd(dev, 0x00c1b4, 0x0000000f);
+ nv_icmd(dev, 0x00c1b5, 0x0000000f);
+ nv_icmd(dev, 0x00c1b6, 0x0000000f);
+ nv_icmd(dev, 0x00c1b7, 0x0000000f);
+ nv_icmd(dev, 0x00c1b8, 0x0fac6881);
+ nv_icmd(dev, 0x00c1b9, 0x00fac688);
+ nv_icmd(dev, 0x00c401, 0x00000001);
+ nv_icmd(dev, 0x00c402, 0x00010001);
+ nv_icmd(dev, 0x00c403, 0x00000001);
+ nv_icmd(dev, 0x00c404, 0x00000001);
+ nv_icmd(dev, 0x00c40e, 0x00000020);
+ nv_icmd(dev, 0x00c500, 0x00000003);
+ nv_icmd(dev, 0x01e100, 0x00000001);
+ nv_icmd(dev, 0x001000, 0x00000002);
+ nv_icmd(dev, 0x0006aa, 0x00000001);
+ nv_icmd(dev, 0x0006ad, 0x00000100);
+ nv_icmd(dev, 0x0006ae, 0x00000100);
+ nv_icmd(dev, 0x0006b1, 0x00000011);
+ nv_icmd(dev, 0x00078c, 0x00000008);
+ nv_icmd(dev, 0x000792, 0x00000001);
+ nv_icmd(dev, 0x000794, 0x00000001);
+ nv_icmd(dev, 0x000795, 0x00000001);
+ nv_icmd(dev, 0x000796, 0x00000001);
+ nv_icmd(dev, 0x000797, 0x000000cf);
+ nv_icmd(dev, 0x00079a, 0x00000002);
+ nv_icmd(dev, 0x000833, 0x04444480);
+ nv_icmd(dev, 0x0007a1, 0x00000001);
+ nv_icmd(dev, 0x0007a3, 0x00000001);
+ nv_icmd(dev, 0x0007a4, 0x00000001);
+ nv_icmd(dev, 0x0007a5, 0x00000001);
+ nv_icmd(dev, 0x000831, 0x00000004);
+ nv_icmd(dev, 0x01e100, 0x00000001);
+ nv_icmd(dev, 0x001000, 0x00000008);
+ nv_icmd(dev, 0x000039, 0x00000000);
+ nv_icmd(dev, 0x00003a, 0x00000000);
+ nv_icmd(dev, 0x00003b, 0x00000000);
+ nv_icmd(dev, 0x000380, 0x00000001);
+ nv_icmd(dev, 0x000366, 0x00000000);
+ nv_icmd(dev, 0x000367, 0x00000000);
+ nv_icmd(dev, 0x000368, 0x00000fff);
+ nv_icmd(dev, 0x000370, 0x00000000);
+ nv_icmd(dev, 0x000371, 0x00000000);
+ nv_icmd(dev, 0x000372, 0x000fffff);
+ nv_icmd(dev, 0x000813, 0x00000006);
+ nv_icmd(dev, 0x000814, 0x00000008);
+ nv_icmd(dev, 0x000957, 0x00000003);
+ nv_icmd(dev, 0x000818, 0x00000000);
+ nv_icmd(dev, 0x000819, 0x00000000);
+ nv_icmd(dev, 0x00081a, 0x00000000);
+ nv_icmd(dev, 0x00081b, 0x00000000);
+ nv_icmd(dev, 0x00081c, 0x00000000);
+ nv_icmd(dev, 0x00081d, 0x00000000);
+ nv_icmd(dev, 0x00081e, 0x00000000);
+ nv_icmd(dev, 0x00081f, 0x00000000);
+ nv_icmd(dev, 0x000848, 0x00000000);
+ nv_icmd(dev, 0x000849, 0x00000000);
+ nv_icmd(dev, 0x00084a, 0x00000000);
+ nv_icmd(dev, 0x00084b, 0x00000000);
+ nv_icmd(dev, 0x00084c, 0x00000000);
+ nv_icmd(dev, 0x00084d, 0x00000000);
+ nv_icmd(dev, 0x00084e, 0x00000000);
+ nv_icmd(dev, 0x00084f, 0x00000000);
+ nv_icmd(dev, 0x000850, 0x00000000);
+ nv_icmd(dev, 0x000851, 0x00000000);
+ nv_icmd(dev, 0x000852, 0x00000000);
+ nv_icmd(dev, 0x000853, 0x00000000);
+ nv_icmd(dev, 0x000854, 0x00000000);
+ nv_icmd(dev, 0x000855, 0x00000000);
+ nv_icmd(dev, 0x000856, 0x00000000);
+ nv_icmd(dev, 0x000857, 0x00000000);
+ nv_icmd(dev, 0x000738, 0x00000000);
+ nv_icmd(dev, 0x000b07, 0x00000002);
+ nv_icmd(dev, 0x000b08, 0x00000100);
+ nv_icmd(dev, 0x000b09, 0x00000100);
+ nv_icmd(dev, 0x000b0a, 0x00000001);
+ nv_icmd(dev, 0x000a04, 0x000000ff);
+ nv_icmd(dev, 0x00097f, 0x00000100);
+ nv_icmd(dev, 0x000a02, 0x00000001);
+ nv_icmd(dev, 0x000809, 0x00000007);
+ nv_icmd(dev, 0x00c221, 0x00000040);
+ nv_icmd(dev, 0x00c401, 0x00000001);
+ nv_icmd(dev, 0x00c402, 0x00010001);
+ nv_icmd(dev, 0x00c403, 0x00000001);
+ nv_icmd(dev, 0x00c404, 0x00000001);
+ nv_icmd(dev, 0x00c40e, 0x00000020);
+ nv_icmd(dev, 0x00c500, 0x00000003);
+ nv_icmd(dev, 0x01e100, 0x00000001);
+ nv_icmd(dev, 0x001000, 0x00000001);
+ nv_icmd(dev, 0x000b07, 0x00000002);
+ nv_icmd(dev, 0x000b08, 0x00000100);
+ nv_icmd(dev, 0x000b09, 0x00000100);
+ nv_icmd(dev, 0x000b0a, 0x00000001);
+ nv_icmd(dev, 0x01e100, 0x00000001);
+ nv_wr32(dev, 0x400208, 0x00000000);
+}
+
+static void
+nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
+{
+ nv_wr32(dev, 0x40448c, data);
+ nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
+}
+
+static void
+nve0_grctx_generate_a097(struct drm_device *dev)
+{
+ nv_mthd(dev, 0xa097, 0x0800, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0840, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0880, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0900, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0940, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0980, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0804, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0844, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0884, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0904, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0944, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0984, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0808, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0848, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0888, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x08c8, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0908, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0948, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0988, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x09c8, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x080c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x084c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x088c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x08cc, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x090c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x094c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x098c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x09cc, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x0810, 0x000000cf);
+ nv_mthd(dev, 0xa097, 0x0850, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0890, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0910, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0950, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0990, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0814, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0854, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0894, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x08d4, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0914, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0954, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0994, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x09d4, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0818, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0858, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0898, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x08d8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0918, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0958, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0998, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x09d8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x081c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x085c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x089c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x091c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x095c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x099c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0820, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0860, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0920, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0960, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ca0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ce0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cf0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ca4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cb4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ce4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cf4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c38, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c78, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c98, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ca8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cb8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cd8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ce8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cf8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c1c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c3c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c7c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c9c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cbc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ccc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cdc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cfc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1da0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1db0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1de0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1df0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1da4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1db4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1de4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1df4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d38, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d78, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d98, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1da8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1db8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dd8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1de8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1df8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d1c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d3c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d7c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d9c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dbc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dcc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ddc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dfc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f38, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f78, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f1c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f3c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f7c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f98, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fa0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fa8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fb8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fd8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fe0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fe8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ff0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ff8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f9c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fa4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fb4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fbc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fcc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fdc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fe4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ff4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ffc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2000, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2040, 0x00000011);
+ nv_mthd(dev, 0xa097, 0x2080, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x20c0, 0x00000030);
+ nv_mthd(dev, 0xa097, 0x2100, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x2140, 0x00000051);
+ nv_mthd(dev, 0xa097, 0x200c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x204c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x208c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x20cc, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x210c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x214c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x2010, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2050, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2090, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x20d0, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x2110, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x2150, 0x00000004);
+ nv_mthd(dev, 0xa097, 0x0380, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0384, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0388, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x038c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0700, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0710, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0720, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0730, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0704, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0714, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0724, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0734, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0708, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0718, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0728, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0738, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2800, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2804, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2808, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x280c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2810, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2814, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2818, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x281c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2820, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2824, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2828, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x282c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2830, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2834, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2838, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x283c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2840, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2844, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2848, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x284c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2850, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2854, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2858, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x285c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2860, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2864, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2868, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x286c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2870, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2874, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2878, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x287c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2880, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2884, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2888, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x288c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2890, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2894, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2898, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x289c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2900, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2904, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2908, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x290c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2910, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2914, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2918, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x291c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2920, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2924, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2928, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x292c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2930, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2934, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2938, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x293c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2940, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2944, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2948, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x294c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2950, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2954, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2958, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x295c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2960, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2964, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2968, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x296c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2970, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2974, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2978, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x297c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2980, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2984, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2988, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x298c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2990, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2994, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2998, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x299c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aa0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ac0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ae0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ba0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0be0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aa4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ac4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ae4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ba4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0be4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aa8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ac8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ae8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ba8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0be8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0acc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bcc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ab0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ad0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0af0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bf0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ab4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ad4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0af4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bb4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bf4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ca0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ce0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cf0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ca4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cb4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ce4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cf4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c38, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c78, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c98, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ca8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cb8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cd8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ce8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cf8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c0c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c1c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c2c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c3c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c4c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c5c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c6c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c7c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c8c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c9c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cac, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cbc, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0ccc, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cdc, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cec, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cfc, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0d00, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d08, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d10, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d18, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d20, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d28, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d30, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d38, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d04, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d0c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d14, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d1c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d24, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d2c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d34, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d3c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ea0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0eb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ec0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ed0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ee0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ef0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e04, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e14, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e24, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e34, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e44, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e54, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e64, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e74, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e84, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e94, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ea4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0eb4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ec4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ed4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ee4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ef4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e08, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e18, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e28, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e38, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e48, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e58, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e68, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e78, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e88, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e98, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ea8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0eb8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ec8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ed8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ee8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ef8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1e00, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e20, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e40, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e60, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e80, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ea0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ec0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ee0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e04, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e24, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e44, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e64, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e84, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ea4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ec4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ee4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e08, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e28, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e48, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e68, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e88, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ea8, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ec8, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ee8, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e0c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e2c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e4c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e6c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e8c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1eac, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ecc, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1eec, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e10, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e30, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e50, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e70, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e90, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1eb0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ed0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ef0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e14, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e34, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e54, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e74, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e94, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1eb4, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ed4, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ef4, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e18, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e38, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e58, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e78, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e98, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1eb8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ed8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ef8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x3400, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3404, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3408, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x340c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3410, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3414, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3418, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x341c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3420, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3424, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3428, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x342c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3430, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3434, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3438, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x343c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3440, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3444, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3448, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x344c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3450, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3454, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3458, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x345c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3460, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3464, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3468, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x346c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3470, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3474, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3478, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x347c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3480, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3484, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3488, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x348c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3490, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3494, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3498, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x349c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3500, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3504, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3508, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x350c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3510, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3514, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3518, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x351c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3520, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3524, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3528, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x352c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3530, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3534, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3538, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x353c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3540, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3544, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3548, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x354c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3550, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3554, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3558, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x355c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3560, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3564, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3568, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x356c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3570, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3574, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3578, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x357c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3580, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3584, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3588, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x358c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3590, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3594, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3598, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x359c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x030c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1944, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1514, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d68, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x121c, 0x0fac6881);
+ nv_mthd(dev, 0xa097, 0x0fac, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1538, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0fe0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fe4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fe8, 0x00000014);
+ nv_mthd(dev, 0xa097, 0x0fec, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0ff0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x179c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1228, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x122c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x1230, 0x00010001);
+ nv_mthd(dev, 0xa097, 0x07f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15b4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x15cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1534, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x153c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x16b4, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x0fbc, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x0fc0, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x0fc4, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x0fc8, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x0df8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dfc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1948, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1970, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x161c, 0x000009f0);
+ nv_mthd(dev, 0xa097, 0x0dcc, 0x00000010);
+ nv_mthd(dev, 0xa097, 0x163c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1160, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1164, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1168, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x116c, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1170, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1174, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1178, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x117c, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1180, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1184, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1188, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x118c, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1190, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1194, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1198, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x119c, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11a0, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11a4, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11a8, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11ac, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11b0, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11b4, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11b8, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11bc, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11c0, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11c4, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11c8, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11cc, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11d0, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11d4, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11d8, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11dc, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1880, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1884, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1888, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x188c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1890, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1894, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1898, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x189c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17d0, 0x000000ff);
+ nv_mthd(dev, 0xa097, 0x17d4, 0xffffffff);
+ nv_mthd(dev, 0xa097, 0x17d8, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x17dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1434, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1438, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dec, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x13a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1318, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1644, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0748, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0de8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1648, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1120, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1124, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1128, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x112c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1118, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x164c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1658, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1910, 0x00000290);
+ nv_mthd(dev, 0xa097, 0x1518, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x165c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1520, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1604, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1570, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x13b0, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x13b4, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x020c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1670, 0x30201000);
+ nv_mthd(dev, 0xa097, 0x1674, 0x70605040);
+ nv_mthd(dev, 0xa097, 0x1678, 0xb8a89888);
+ nv_mthd(dev, 0xa097, 0x167c, 0xf8e8d8c8);
+ nv_mthd(dev, 0xa097, 0x166c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1680, 0x00ffff00);
+ nv_mthd(dev, 0xa097, 0x12d0, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x12d4, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1684, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1688, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dac, 0x00001b02);
+ nv_mthd(dev, 0xa097, 0x0db0, 0x00001b02);
+ nv_mthd(dev, 0xa097, 0x0db4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x168c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x156c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x187c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1110, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0dc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1234, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1690, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12ac, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0790, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0794, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0798, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x079c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x077c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1000, 0x00000010);
+ nv_mthd(dev, 0xa097, 0x10fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1290, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0218, 0x00000010);
+ nv_mthd(dev, 0xa097, 0x12d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12dc, 0x00000010);
+ nv_mthd(dev, 0xa097, 0x0d94, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x155c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1560, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1564, 0x00000fff);
+ nv_mthd(dev, 0xa097, 0x1574, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1578, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x157c, 0x000fffff);
+ nv_mthd(dev, 0xa097, 0x1354, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1610, 0x00000012);
+ nv_mthd(dev, 0xa097, 0x1608, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x160c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x260c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x162c, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x0210, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0320, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0324, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0328, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x032c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0330, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0334, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0338, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0750, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0760, 0x39291909);
+ nv_mthd(dev, 0xa097, 0x0764, 0x79695949);
+ nv_mthd(dev, 0xa097, 0x0768, 0xb9a99989);
+ nv_mthd(dev, 0xa097, 0x076c, 0xf9e9d9c9);
+ nv_mthd(dev, 0xa097, 0x0770, 0x30201000);
+ nv_mthd(dev, 0xa097, 0x0774, 0x70605040);
+ nv_mthd(dev, 0xa097, 0x0778, 0x00009080);
+ nv_mthd(dev, 0xa097, 0x0780, 0x39291909);
+ nv_mthd(dev, 0xa097, 0x0784, 0x79695949);
+ nv_mthd(dev, 0xa097, 0x0788, 0xb9a99989);
+ nv_mthd(dev, 0xa097, 0x078c, 0xf9e9d9c9);
+ nv_mthd(dev, 0xa097, 0x07d0, 0x30201000);
+ nv_mthd(dev, 0xa097, 0x07d4, 0x70605040);
+ nv_mthd(dev, 0xa097, 0x07d8, 0x00009080);
+ nv_mthd(dev, 0xa097, 0x037c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0740, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0744, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2600, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1918, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x191c, 0x00000900);
+ nv_mthd(dev, 0xa097, 0x1920, 0x00000405);
+ nv_mthd(dev, 0xa097, 0x1308, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1924, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x13ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x192c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x193c, 0x00002c1c);
+ nv_mthd(dev, 0xa097, 0x0d7c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x02c0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1510, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1940, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ff4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ff8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x194c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1950, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1968, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1590, 0x0000003f);
+ nv_mthd(dev, 0xa097, 0x07e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x196c, 0x00000011);
+ nv_mthd(dev, 0xa097, 0x02e4, 0x0000b001);
+ nv_mthd(dev, 0xa097, 0x036c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0370, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x197c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fcc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x02d8, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x1980, 0x00000080);
+ nv_mthd(dev, 0xa097, 0x1504, 0x00000080);
+ nv_mthd(dev, 0xa097, 0x1984, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0300, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x13a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1310, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1314, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1380, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1384, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1388, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x138c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1390, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1394, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x139c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1398, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1594, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1598, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x159c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x15a0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x15a4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0f54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f9c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fa0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x130c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1360, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1364, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1368, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x136c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1370, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1374, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1378, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x137c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x133c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1340, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1344, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1348, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x134c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1350, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1358, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x12e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x131c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1320, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1324, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1328, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1140, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19c8, 0x00001500);
+ nv_mthd(dev, 0xa097, 0x135c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19e0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19e4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19e8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19ec, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19f0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19f4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19f8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19fc, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19cc, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x15b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a00, 0x00001111);
+ nv_mthd(dev, 0xa097, 0x1a04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a1c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d6c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d70, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x10f8, 0x00001010);
+ nv_mthd(dev, 0xa097, 0x0d80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0da0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1508, 0x80000000);
+ nv_mthd(dev, 0xa097, 0x150c, 0x40000000);
+ nv_mthd(dev, 0xa097, 0x1668, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0318, 0x00000008);
+ nv_mthd(dev, 0xa097, 0x031c, 0x00000008);
+ nv_mthd(dev, 0xa097, 0x0d9c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0374, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0378, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x07dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x074c, 0x00000055);
+ nv_mthd(dev, 0xa097, 0x1420, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x17bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17c4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1008, 0x00000008);
+ nv_mthd(dev, 0xa097, 0x100c, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x1010, 0x0000012c);
+ nv_mthd(dev, 0xa097, 0x0d60, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x075c, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x1018, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x101c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1020, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x1024, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1444, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1448, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x144c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0360, 0x20164010);
+ nv_mthd(dev, 0xa097, 0x0364, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x0368, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0de4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0204, 0x00000006);
+ nv_mthd(dev, 0xa097, 0x0208, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x02cc, 0x003fffff);
+ nv_mthd(dev, 0xa097, 0x02d0, 0x003fffff);
+ nv_mthd(dev, 0xa097, 0x1220, 0x00000005);
+ nv_mthd(dev, 0xa097, 0x0fdc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f98, 0x00400008);
+ nv_mthd(dev, 0xa097, 0x1284, 0x08000080);
+ nv_mthd(dev, 0xa097, 0x1450, 0x00400008);
+ nv_mthd(dev, 0xa097, 0x1454, 0x08000080);
+ nv_mthd(dev, 0xa097, 0x0214, 0x00000000);
+}
+
+static void
+nve0_grctx_generate_902d(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
+ nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
+ nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
+ nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
+ nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
+ nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
+ nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x3410, 0x00000000);
+}
+
+static void
+nve0_graph_generate_unk40xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404010, 0x0);
+ nv_wr32(dev, 0x404014, 0x0);
+ nv_wr32(dev, 0x404018, 0x0);
+ nv_wr32(dev, 0x40401c, 0x0);
+ nv_wr32(dev, 0x404020, 0x0);
+ nv_wr32(dev, 0x404024, 0xe000);
+ nv_wr32(dev, 0x404028, 0x0);
+ nv_wr32(dev, 0x4040a8, 0x0);
+ nv_wr32(dev, 0x4040ac, 0x0);
+ nv_wr32(dev, 0x4040b0, 0x0);
+ nv_wr32(dev, 0x4040b4, 0x0);
+ nv_wr32(dev, 0x4040b8, 0x0);
+ nv_wr32(dev, 0x4040bc, 0x0);
+ nv_wr32(dev, 0x4040c0, 0x0);
+ nv_wr32(dev, 0x4040c4, 0x0);
+ nv_wr32(dev, 0x4040c8, 0xf800008f);
+ nv_wr32(dev, 0x4040d0, 0x0);
+ nv_wr32(dev, 0x4040d4, 0x0);
+ nv_wr32(dev, 0x4040d8, 0x0);
+ nv_wr32(dev, 0x4040dc, 0x0);
+ nv_wr32(dev, 0x4040e0, 0x0);
+ nv_wr32(dev, 0x4040e4, 0x0);
+ nv_wr32(dev, 0x4040e8, 0x1000);
+ nv_wr32(dev, 0x4040f8, 0x0);
+ nv_wr32(dev, 0x404130, 0x0);
+ nv_wr32(dev, 0x404134, 0x0);
+ nv_wr32(dev, 0x404138, 0x20000040);
+ nv_wr32(dev, 0x404150, 0x2e);
+ nv_wr32(dev, 0x404154, 0x400);
+ nv_wr32(dev, 0x404158, 0x200);
+ nv_wr32(dev, 0x404164, 0x55);
+ nv_wr32(dev, 0x4041a0, 0x0);
+ nv_wr32(dev, 0x4041a4, 0x0);
+ nv_wr32(dev, 0x4041a8, 0x0);
+ nv_wr32(dev, 0x4041ac, 0x0);
+ nv_wr32(dev, 0x404200, 0x0);
+ nv_wr32(dev, 0x404204, 0x0);
+ nv_wr32(dev, 0x404208, 0x0);
+ nv_wr32(dev, 0x40420c, 0x0);
+}
+
+static void
+nve0_graph_generate_unk44xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404404, 0x0);
+ nv_wr32(dev, 0x404408, 0x0);
+ nv_wr32(dev, 0x40440c, 0x0);
+ nv_wr32(dev, 0x404410, 0x0);
+ nv_wr32(dev, 0x404414, 0x0);
+ nv_wr32(dev, 0x404418, 0x0);
+ nv_wr32(dev, 0x40441c, 0x0);
+ nv_wr32(dev, 0x404420, 0x0);
+ nv_wr32(dev, 0x404424, 0x0);
+ nv_wr32(dev, 0x404428, 0x0);
+ nv_wr32(dev, 0x40442c, 0x0);
+ nv_wr32(dev, 0x404430, 0x0);
+ nv_wr32(dev, 0x404434, 0x0);
+ nv_wr32(dev, 0x404438, 0x0);
+ nv_wr32(dev, 0x404460, 0x0);
+ nv_wr32(dev, 0x404464, 0x0);
+ nv_wr32(dev, 0x404468, 0xffffff);
+ nv_wr32(dev, 0x40446c, 0x0);
+ nv_wr32(dev, 0x404480, 0x1);
+ nv_wr32(dev, 0x404498, 0x1);
+}
+
+static void
+nve0_graph_generate_unk46xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404604, 0x14);
+ nv_wr32(dev, 0x404608, 0x0);
+ nv_wr32(dev, 0x40460c, 0x3fff);
+ nv_wr32(dev, 0x404610, 0x100);
+ nv_wr32(dev, 0x404618, 0x0);
+ nv_wr32(dev, 0x40461c, 0x0);
+ nv_wr32(dev, 0x404620, 0x0);
+ nv_wr32(dev, 0x404624, 0x0);
+ nv_wr32(dev, 0x40462c, 0x0);
+ nv_wr32(dev, 0x404630, 0x0);
+ nv_wr32(dev, 0x404640, 0x0);
+ nv_wr32(dev, 0x404654, 0x0);
+ nv_wr32(dev, 0x404660, 0x0);
+ nv_wr32(dev, 0x404678, 0x0);
+ nv_wr32(dev, 0x40467c, 0x2);
+ nv_wr32(dev, 0x404680, 0x0);
+ nv_wr32(dev, 0x404684, 0x0);
+ nv_wr32(dev, 0x404688, 0x0);
+ nv_wr32(dev, 0x40468c, 0x0);
+ nv_wr32(dev, 0x404690, 0x0);
+ nv_wr32(dev, 0x404694, 0x0);
+ nv_wr32(dev, 0x404698, 0x0);
+ nv_wr32(dev, 0x40469c, 0x0);
+ nv_wr32(dev, 0x4046a0, 0x7f0080);
+ nv_wr32(dev, 0x4046a4, 0x0);
+ nv_wr32(dev, 0x4046a8, 0x0);
+ nv_wr32(dev, 0x4046ac, 0x0);
+ nv_wr32(dev, 0x4046b0, 0x0);
+ nv_wr32(dev, 0x4046b4, 0x0);
+ nv_wr32(dev, 0x4046b8, 0x0);
+ nv_wr32(dev, 0x4046bc, 0x0);
+ nv_wr32(dev, 0x4046c0, 0x0);
+ nv_wr32(dev, 0x4046c8, 0x0);
+ nv_wr32(dev, 0x4046cc, 0x0);
+ nv_wr32(dev, 0x4046d0, 0x0);
+}
+
+static void
+nve0_graph_generate_unk47xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404700, 0x0);
+ nv_wr32(dev, 0x404704, 0x0);
+ nv_wr32(dev, 0x404708, 0x0);
+ nv_wr32(dev, 0x404718, 0x0);
+ nv_wr32(dev, 0x40471c, 0x0);
+ nv_wr32(dev, 0x404720, 0x0);
+ nv_wr32(dev, 0x404724, 0x0);
+ nv_wr32(dev, 0x404728, 0x0);
+ nv_wr32(dev, 0x40472c, 0x0);
+ nv_wr32(dev, 0x404730, 0x0);
+ nv_wr32(dev, 0x404734, 0x100);
+ nv_wr32(dev, 0x404738, 0x0);
+ nv_wr32(dev, 0x40473c, 0x0);
+ nv_wr32(dev, 0x404744, 0x0);
+ nv_wr32(dev, 0x404748, 0x0);
+ nv_wr32(dev, 0x404754, 0x0);
+}
+
+static void
+nve0_graph_generate_unk58xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x405800, 0xf8000bf);
+ nv_wr32(dev, 0x405830, 0x2180648);
+ nv_wr32(dev, 0x405834, 0x8000000);
+ nv_wr32(dev, 0x405838, 0x0);
+ nv_wr32(dev, 0x405854, 0x0);
+ nv_wr32(dev, 0x405870, 0x1);
+ nv_wr32(dev, 0x405874, 0x1);
+ nv_wr32(dev, 0x405878, 0x1);
+ nv_wr32(dev, 0x40587c, 0x1);
+ nv_wr32(dev, 0x405a00, 0x0);
+ nv_wr32(dev, 0x405a04, 0x0);
+ nv_wr32(dev, 0x405a18, 0x0);
+ nv_wr32(dev, 0x405b00, 0x0);
+ nv_wr32(dev, 0x405b10, 0x1000);
+}
+
+static void
+nve0_graph_generate_unk60xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x406020, 0x4103c1);
+ nv_wr32(dev, 0x406028, 0x1);
+ nv_wr32(dev, 0x40602c, 0x1);
+ nv_wr32(dev, 0x406030, 0x1);
+ nv_wr32(dev, 0x406034, 0x1);
+}
+
+static void
+nve0_graph_generate_unk64xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x4064a8, 0x0);
+ nv_wr32(dev, 0x4064ac, 0x3fff);
+ nv_wr32(dev, 0x4064b4, 0x0);
+ nv_wr32(dev, 0x4064b8, 0x0);
+ nv_wr32(dev, 0x4064c0, 0x801a00f0);
+ nv_wr32(dev, 0x4064c4, 0x192ffff);
+ nv_wr32(dev, 0x4064c8, 0x1800600);
+ nv_wr32(dev, 0x4064cc, 0x0);
+ nv_wr32(dev, 0x4064d0, 0x0);
+ nv_wr32(dev, 0x4064d4, 0x0);
+ nv_wr32(dev, 0x4064d8, 0x0);
+ nv_wr32(dev, 0x4064dc, 0x0);
+ nv_wr32(dev, 0x4064e0, 0x0);
+ nv_wr32(dev, 0x4064e4, 0x0);
+ nv_wr32(dev, 0x4064e8, 0x0);
+ nv_wr32(dev, 0x4064ec, 0x0);
+ nv_wr32(dev, 0x4064fc, 0x22a);
+}
+
+static void
+nve0_graph_generate_unk70xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x407040, 0x0);
+}
+
+static void
+nve0_graph_generate_unk78xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x407804, 0x23);
+ nv_wr32(dev, 0x40780c, 0xa418820);
+ nv_wr32(dev, 0x407810, 0x62080e6);
+ nv_wr32(dev, 0x407814, 0x20398a4);
+ nv_wr32(dev, 0x407818, 0xe629062);
+ nv_wr32(dev, 0x40781c, 0xa418820);
+ nv_wr32(dev, 0x407820, 0xe6);
+ nv_wr32(dev, 0x4078bc, 0x103);
+}
+
+static void
+nve0_graph_generate_unk80xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x408000, 0x0);
+ nv_wr32(dev, 0x408004, 0x0);
+ nv_wr32(dev, 0x408008, 0x30);
+ nv_wr32(dev, 0x40800c, 0x0);
+ nv_wr32(dev, 0x408010, 0x0);
+ nv_wr32(dev, 0x408014, 0x69);
+ nv_wr32(dev, 0x408018, 0xe100e100);
+ nv_wr32(dev, 0x408064, 0x0);
+}
+
+static void
+nve0_graph_generate_unk88xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x408800, 0x2802a3c);
+ nv_wr32(dev, 0x408804, 0x40);
+ nv_wr32(dev, 0x408808, 0x1043e005);
+ nv_wr32(dev, 0x408840, 0xb);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, 0x62000001);
+ nv_wr32(dev, 0x408908, 0xc8102f);
+ nv_wr32(dev, 0x408980, 0x11d);
+}
+
+static void
+nve0_graph_generate_gpc(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x418380, 0x16);
+ nv_wr32(dev, 0x418400, 0x38004e00);
+ nv_wr32(dev, 0x418404, 0x71e0ffff);
+ nv_wr32(dev, 0x41840c, 0x1008);
+ nv_wr32(dev, 0x418410, 0xfff0fff);
+ nv_wr32(dev, 0x418414, 0x2200fff);
+ nv_wr32(dev, 0x418450, 0x0);
+ nv_wr32(dev, 0x418454, 0x0);
+ nv_wr32(dev, 0x418458, 0x0);
+ nv_wr32(dev, 0x41845c, 0x0);
+ nv_wr32(dev, 0x418460, 0x0);
+ nv_wr32(dev, 0x418464, 0x0);
+ nv_wr32(dev, 0x418468, 0x1);
+ nv_wr32(dev, 0x41846c, 0x0);
+ nv_wr32(dev, 0x418470, 0x0);
+ nv_wr32(dev, 0x418600, 0x1f);
+ nv_wr32(dev, 0x418684, 0xf);
+ nv_wr32(dev, 0x418700, 0x2);
+ nv_wr32(dev, 0x418704, 0x80);
+ nv_wr32(dev, 0x418708, 0x0);
+ nv_wr32(dev, 0x41870c, 0x0);
+ nv_wr32(dev, 0x418710, 0x0);
+ nv_wr32(dev, 0x418800, 0x7006860a);
+ nv_wr32(dev, 0x418808, 0x0);
+ nv_wr32(dev, 0x41880c, 0x0);
+ nv_wr32(dev, 0x418810, 0x0);
+ nv_wr32(dev, 0x418828, 0x44);
+ nv_wr32(dev, 0x418830, 0x10000001);
+ nv_wr32(dev, 0x4188d8, 0x8);
+ nv_wr32(dev, 0x4188e0, 0x1000000);
+ nv_wr32(dev, 0x4188e8, 0x0);
+ nv_wr32(dev, 0x4188ec, 0x0);
+ nv_wr32(dev, 0x4188f0, 0x0);
+ nv_wr32(dev, 0x4188f4, 0x0);
+ nv_wr32(dev, 0x4188f8, 0x0);
+ nv_wr32(dev, 0x4188fc, 0x20100018);
+ nv_wr32(dev, 0x41891c, 0xff00ff);
+ nv_wr32(dev, 0x418924, 0x0);
+ nv_wr32(dev, 0x418928, 0xffff00);
+ nv_wr32(dev, 0x41892c, 0xff00);
+ nv_wr32(dev, 0x418a00, 0x0);
+ nv_wr32(dev, 0x418a04, 0x0);
+ nv_wr32(dev, 0x418a08, 0x0);
+ nv_wr32(dev, 0x418a0c, 0x10000);
+ nv_wr32(dev, 0x418a10, 0x0);
+ nv_wr32(dev, 0x418a14, 0x0);
+ nv_wr32(dev, 0x418a18, 0x0);
+ nv_wr32(dev, 0x418a20, 0x0);
+ nv_wr32(dev, 0x418a24, 0x0);
+ nv_wr32(dev, 0x418a28, 0x0);
+ nv_wr32(dev, 0x418a2c, 0x10000);
+ nv_wr32(dev, 0x418a30, 0x0);
+ nv_wr32(dev, 0x418a34, 0x0);
+ nv_wr32(dev, 0x418a38, 0x0);
+ nv_wr32(dev, 0x418a40, 0x0);
+ nv_wr32(dev, 0x418a44, 0x0);
+ nv_wr32(dev, 0x418a48, 0x0);
+ nv_wr32(dev, 0x418a4c, 0x10000);
+ nv_wr32(dev, 0x418a50, 0x0);
+ nv_wr32(dev, 0x418a54, 0x0);
+ nv_wr32(dev, 0x418a58, 0x0);
+ nv_wr32(dev, 0x418a60, 0x0);
+ nv_wr32(dev, 0x418a64, 0x0);
+ nv_wr32(dev, 0x418a68, 0x0);
+ nv_wr32(dev, 0x418a6c, 0x10000);
+ nv_wr32(dev, 0x418a70, 0x0);
+ nv_wr32(dev, 0x418a74, 0x0);
+ nv_wr32(dev, 0x418a78, 0x0);
+ nv_wr32(dev, 0x418a80, 0x0);
+ nv_wr32(dev, 0x418a84, 0x0);
+ nv_wr32(dev, 0x418a88, 0x0);
+ nv_wr32(dev, 0x418a8c, 0x10000);
+ nv_wr32(dev, 0x418a90, 0x0);
+ nv_wr32(dev, 0x418a94, 0x0);
+ nv_wr32(dev, 0x418a98, 0x0);
+ nv_wr32(dev, 0x418aa0, 0x0);
+ nv_wr32(dev, 0x418aa4, 0x0);
+ nv_wr32(dev, 0x418aa8, 0x0);
+ nv_wr32(dev, 0x418aac, 0x10000);
+ nv_wr32(dev, 0x418ab0, 0x0);
+ nv_wr32(dev, 0x418ab4, 0x0);
+ nv_wr32(dev, 0x418ab8, 0x0);
+ nv_wr32(dev, 0x418ac0, 0x0);
+ nv_wr32(dev, 0x418ac4, 0x0);
+ nv_wr32(dev, 0x418ac8, 0x0);
+ nv_wr32(dev, 0x418acc, 0x10000);
+ nv_wr32(dev, 0x418ad0, 0x0);
+ nv_wr32(dev, 0x418ad4, 0x0);
+ nv_wr32(dev, 0x418ad8, 0x0);
+ nv_wr32(dev, 0x418ae0, 0x0);
+ nv_wr32(dev, 0x418ae4, 0x0);
+ nv_wr32(dev, 0x418ae8, 0x0);
+ nv_wr32(dev, 0x418aec, 0x10000);
+ nv_wr32(dev, 0x418af0, 0x0);
+ nv_wr32(dev, 0x418af4, 0x0);
+ nv_wr32(dev, 0x418af8, 0x0);
+ nv_wr32(dev, 0x418b00, 0x6);
+ nv_wr32(dev, 0x418b08, 0xa418820);
+ nv_wr32(dev, 0x418b0c, 0x62080e6);
+ nv_wr32(dev, 0x418b10, 0x20398a4);
+ nv_wr32(dev, 0x418b14, 0xe629062);
+ nv_wr32(dev, 0x418b18, 0xa418820);
+ nv_wr32(dev, 0x418b1c, 0xe6);
+ nv_wr32(dev, 0x418bb8, 0x103);
+ nv_wr32(dev, 0x418c08, 0x1);
+ nv_wr32(dev, 0x418c10, 0x0);
+ nv_wr32(dev, 0x418c14, 0x0);
+ nv_wr32(dev, 0x418c18, 0x0);
+ nv_wr32(dev, 0x418c1c, 0x0);
+ nv_wr32(dev, 0x418c20, 0x0);
+ nv_wr32(dev, 0x418c24, 0x0);
+ nv_wr32(dev, 0x418c28, 0x0);
+ nv_wr32(dev, 0x418c2c, 0x0);
+ nv_wr32(dev, 0x418c40, 0xffffffff);
+ nv_wr32(dev, 0x418c6c, 0x1);
+ nv_wr32(dev, 0x418c80, 0x20200004);
+ nv_wr32(dev, 0x418c8c, 0x1);
+ nv_wr32(dev, 0x419000, 0x780);
+ nv_wr32(dev, 0x419004, 0x0);
+ nv_wr32(dev, 0x419008, 0x0);
+ nv_wr32(dev, 0x419014, 0x4);
+}
+
+static void
+nve0_graph_generate_tpc(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x419848, 0x0);
+ nv_wr32(dev, 0x419864, 0x129);
+ nv_wr32(dev, 0x419888, 0x0);
+ nv_wr32(dev, 0x419a00, 0xf0);
+ nv_wr32(dev, 0x419a04, 0x1);
+ nv_wr32(dev, 0x419a08, 0x21);
+ nv_wr32(dev, 0x419a0c, 0x20000);
+ nv_wr32(dev, 0x419a10, 0x0);
+ nv_wr32(dev, 0x419a14, 0x200);
+ nv_wr32(dev, 0x419a1c, 0xc000);
+ nv_wr32(dev, 0x419a20, 0x800);
+ nv_wr32(dev, 0x419a30, 0x1);
+ nv_wr32(dev, 0x419ac4, 0x37f440);
+ nv_wr32(dev, 0x419c00, 0xa);
+ nv_wr32(dev, 0x419c04, 0x80000006);
+ nv_wr32(dev, 0x419c08, 0x2);
+ nv_wr32(dev, 0x419c20, 0x0);
+ nv_wr32(dev, 0x419c24, 0x84210);
+ nv_wr32(dev, 0x419c28, 0x3efbefbe);
+ nv_wr32(dev, 0x419ce8, 0x0);
+ nv_wr32(dev, 0x419cf4, 0x3203);
+ nv_wr32(dev, 0x419e04, 0x0);
+ nv_wr32(dev, 0x419e08, 0x0);
+ nv_wr32(dev, 0x419e0c, 0x0);
+ nv_wr32(dev, 0x419e10, 0x402);
+ nv_wr32(dev, 0x419e44, 0x13eff2);
+ nv_wr32(dev, 0x419e48, 0x0);
+ nv_wr32(dev, 0x419e4c, 0x7f);
+ nv_wr32(dev, 0x419e50, 0x0);
+ nv_wr32(dev, 0x419e54, 0x0);
+ nv_wr32(dev, 0x419e58, 0x0);
+ nv_wr32(dev, 0x419e5c, 0x0);
+ nv_wr32(dev, 0x419e60, 0x0);
+ nv_wr32(dev, 0x419e64, 0x0);
+ nv_wr32(dev, 0x419e68, 0x0);
+ nv_wr32(dev, 0x419e6c, 0x0);
+ nv_wr32(dev, 0x419e70, 0x0);
+ nv_wr32(dev, 0x419e74, 0x0);
+ nv_wr32(dev, 0x419e78, 0x0);
+ nv_wr32(dev, 0x419e7c, 0x0);
+ nv_wr32(dev, 0x419e80, 0x0);
+ nv_wr32(dev, 0x419e84, 0x0);
+ nv_wr32(dev, 0x419e88, 0x0);
+ nv_wr32(dev, 0x419e8c, 0x0);
+ nv_wr32(dev, 0x419e90, 0x0);
+ nv_wr32(dev, 0x419e94, 0x0);
+ nv_wr32(dev, 0x419e98, 0x0);
+ nv_wr32(dev, 0x419eac, 0x1fcf);
+ nv_wr32(dev, 0x419eb0, 0xd3f);
+ nv_wr32(dev, 0x419ec8, 0x1304f);
+ nv_wr32(dev, 0x419f30, 0x0);
+ nv_wr32(dev, 0x419f34, 0x0);
+ nv_wr32(dev, 0x419f38, 0x0);
+ nv_wr32(dev, 0x419f3c, 0x0);
+ nv_wr32(dev, 0x419f40, 0x0);
+ nv_wr32(dev, 0x419f44, 0x0);
+ nv_wr32(dev, 0x419f48, 0x0);
+ nv_wr32(dev, 0x419f4c, 0x0);
+ nv_wr32(dev, 0x419f58, 0x0);
+ nv_wr32(dev, 0x419f78, 0xb);
+}
+
+static void
+nve0_graph_generate_tpcunk(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x41be24, 0x6);
+ nv_wr32(dev, 0x41bec0, 0x12180000);
+ nv_wr32(dev, 0x41bec4, 0x37f7f);
+ nv_wr32(dev, 0x41bee4, 0x6480430);
+ nv_wr32(dev, 0x41bf00, 0xa418820);
+ nv_wr32(dev, 0x41bf04, 0x62080e6);
+ nv_wr32(dev, 0x41bf08, 0x20398a4);
+ nv_wr32(dev, 0x41bf0c, 0xe629062);
+ nv_wr32(dev, 0x41bf10, 0xa418820);
+ nv_wr32(dev, 0x41bf14, 0xe6);
+ nv_wr32(dev, 0x41bfd0, 0x900103);
+ nv_wr32(dev, 0x41bfe0, 0x400001);
+ nv_wr32(dev, 0x41bfe4, 0x0);
+}
+
+int
+nve0_grctx_generate(struct nouveau_channel *chan)
+{
+ struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
+ struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ u32 data[6] = {}, data2[2] = {}, tmp;
+ u32 tpc_set = 0, tpc_mask = 0;
+ u8 tpcnr[GPC_MAX], a, b;
+ u8 shift, ntpcv;
+ int i, gpc, tpc, id;
+
+ nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x400204, 0x00000000);
+ nv_wr32(dev, 0x400208, 0x00000000);
+
+ nve0_graph_generate_unk40xx(dev);
+ nve0_graph_generate_unk44xx(dev);
+ nve0_graph_generate_unk46xx(dev);
+ nve0_graph_generate_unk47xx(dev);
+ nve0_graph_generate_unk58xx(dev);
+ nve0_graph_generate_unk60xx(dev);
+ nve0_graph_generate_unk64xx(dev);
+ nve0_graph_generate_unk70xx(dev);
+ nve0_graph_generate_unk78xx(dev);
+ nve0_graph_generate_unk80xx(dev);
+ nve0_graph_generate_unk88xx(dev);
+ nve0_graph_generate_gpc(dev);
+ nve0_graph_generate_tpc(dev);
+ nve0_graph_generate_tpcunk(dev);
+
+ nv_wr32(dev, 0x404154, 0x0);
+
+ for (i = 0; i < grch->mmio_nr * 8; i += 8) {
+ u32 reg = nv_ro32(grch->mmio, i + 0);
+ u32 val = nv_ro32(grch->mmio, i + 4);
+ nv_wr32(dev, reg, val);
+ }
+
+ nv_wr32(dev, 0x418c6c, 0x1);
+ nv_wr32(dev, 0x41980c, 0x10);
+ nv_wr32(dev, 0x41be08, 0x4);
+ nv_wr32(dev, 0x4064c0, 0x801a00f0);
+ nv_wr32(dev, 0x405800, 0xf8000bf);
+ nv_wr32(dev, 0x419c00, 0xa);
+
+ for (tpc = 0, id = 0; tpc < 4; tpc++) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ if (tpc < priv->tpc_nr[gpc]) {
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0698), id);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x04e8), id);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0088), id++);
+ }
+
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+ }
+ }
+
+ tmp = 0;
+ for (i = 0; i < priv->gpc_nr; i++)
+ tmp |= priv->tpc_nr[i] << (i * 4);
+ nv_wr32(dev, 0x406028, tmp);
+ nv_wr32(dev, 0x405870, tmp);
+
+ nv_wr32(dev, 0x40602c, 0x0);
+ nv_wr32(dev, 0x405874, 0x0);
+ nv_wr32(dev, 0x406030, 0x0);
+ nv_wr32(dev, 0x405878, 0x0);
+ nv_wr32(dev, 0x406034, 0x0);
+ nv_wr32(dev, 0x40587c, 0x0);
+
+ /* calculate first set of magics */
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+
+ gpc = -1;
+ for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpcnr[gpc]--;
+
+ data[tpc / 6] |= gpc << ((tpc % 6) * 5);
+ }
+
+ for (; tpc < 32; tpc++)
+ data[tpc / 6] |= 7 << ((tpc % 6) * 5);
+
+ /* and the second... */
+ shift = 0;
+ ntpcv = priv->tpc_total;
+ while (!(ntpcv & (1 << 4))) {
+ ntpcv <<= 1;
+ shift++;
+ }
+
+ data2[0] = ntpcv << 16;
+ data2[0] |= shift << 21;
+ data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+ data2[0] |= priv->tpc_total << 8;
+ data2[0] |= priv->magic_not_rop_nr;
+ for (i = 1; i < 7; i++)
+ data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+ /* and write it all the various parts of PGRAPH */
+ nv_wr32(dev, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
+
+ nv_wr32(dev, 0x41bfd0, data2[0]);
+ nv_wr32(dev, 0x41bfe4, data2[1]);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x41bf00 + (i * 4), data[i]);
+
+ nv_wr32(dev, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x40780c + (i * 4), data[i]);
+
+
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+ tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
+
+ for (i = 0, gpc = -1, b = -1; i < 32; i++) {
+ a = (i * (priv->tpc_total - 1)) / 32;
+ if (a != b) {
+ b = a;
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ tpc_set |= 1 << ((gpc * 8) + tpc);
+ }
+
+ nv_wr32(dev, 0x406800 + (i * 0x20), tpc_set);
+ nv_wr32(dev, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
+ }
+
+ for (i = 0; i < 8; i++)
+ nv_wr32(dev, 0x4064d0 + (i * 0x04), 0x00000000);
+
+ nv_wr32(dev, 0x405b00, 0x201);
+ nv_wr32(dev, 0x408850, 0x2);
+ nv_wr32(dev, 0x408958, 0x2);
+ nv_wr32(dev, 0x419f78, 0xa);
+
+ nve0_grctx_generate_icmd(dev);
+ nve0_grctx_generate_a097(dev);
+ nve0_grctx_generate_902d(dev);
+
+ nv_mask(dev, 0x000260, 0x00000001, 0x00000001);
+ nv_wr32(dev, 0x418800, 0x7026860a); //XXX
+ nv_wr32(dev, 0x41be10, 0x00bb8bc7); //XXX
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 9d83729956ff..a6598fd66423 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -70,8 +70,9 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
- radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \
- radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o si_blit_shaders.o
+ evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
+ atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
+ si_blit_shaders.o radeon_prime.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index af1054f8202a..01d77d1554f4 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -591,8 +591,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
connector = radeon_get_connector_for_encoder(encoder);
- /* if (connector && connector->display_info.bpc)
- bpc = connector->display_info.bpc; */
+ bpc = radeon_get_monitor_bpc(connector);
encoder_mode = atombios_get_encoder_mode(encoder);
is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
@@ -968,9 +967,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
int dp_clock;
-
- /* if (connector->display_info.bpc)
- bpc = connector->display_info.bpc; */
+ bpc = radeon_get_monitor_bpc(connector);
switch (encoder_mode) {
case ATOM_ENCODER_MODE_DP_MST:
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index c57d85664e77..5131b3b0f7d2 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -405,13 +405,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
/* get bpc from the EDID */
static int convert_bpc_to_bpp(int bpc)
{
-#if 0
if (bpc == 0)
return 24;
else
return bpc * 3;
-#endif
- return 24;
}
/* get the max pix clock supported by the link rate and lane num */
@@ -463,7 +460,7 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
- int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
+ int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int max_link_rate = dp_get_max_link_rate(dpcd);
int max_lane_num = dp_get_max_lane_number(dpcd);
int lane_num;
@@ -482,7 +479,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
- int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
+ int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int lane_num, max_pix_clock;
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
@@ -533,6 +530,23 @@ u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
dig_connector->dp_i2c_bus->rec.i2c_id, 0);
}
+static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 buf[3];
+
+ if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+ return;
+
+ if (radeon_dp_aux_native_read(radeon_connector, DP_SINK_OUI, buf, 3, 0))
+ DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ if (radeon_dp_aux_native_read(radeon_connector, DP_BRANCH_OUI, buf, 3, 0))
+ DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+}
+
bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
@@ -546,6 +560,9 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
for (i = 0; i < 8; i++)
DRM_DEBUG_KMS("%02x ", msg[i]);
DRM_DEBUG_KMS("\n");
+
+ radeon_dp_probe_oui(radeon_connector);
+
return true;
}
dig_connector->dpcd[0] = 0;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2d39f9977e00..e7b1ec5ae8c6 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -545,7 +545,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
hpd_id = radeon_connector->hpd.hpd;
- /* bpc = connector->display_info.bpc; */
+ bpc = radeon_get_monitor_bpc(connector);
}
/* no dig encoder assigned */
@@ -1163,7 +1163,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
dp_lane_count = dig_connector->dp_lane_count;
connector_object_id =
(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- /* bpc = connector->display_info.bpc; */
+ bpc = radeon_get_monitor_bpc(connector);
}
memset(&args, 0, sizeof(args));
@@ -1926,7 +1926,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
r600_hdmi_enable(encoder);
- r600_hdmi_setmode(encoder, adjusted_mode);
+ if (ASIC_IS_DCE4(rdev))
+ evergreen_hdmi_setmode(encoder, adjusted_mode);
+ else
+ r600_hdmi_setmode(encoder, adjusted_mode);
}
}
@@ -2081,6 +2084,7 @@ radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{
+ struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
@@ -2089,8 +2093,16 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
(radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
ENCODER_OBJECT_ID_NONE)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (dig)
+ if (dig) {
dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+ if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) {
+ if (rdev->family >= CHIP_R600)
+ dig->afmt = rdev->mode_info.afmt[dig->dig_encoder];
+ else
+ /* RS600/690/740 have only 1 afmt block */
+ dig->afmt = rdev->mode_info.afmt[0];
+ }
+ }
}
radeon_atom_output_lock(encoder, true);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index cfa372cb1cb3..58991af90502 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2424,27 +2424,18 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
u32 srbm_status;
u32 grbm_status;
u32 grbm_status_se0, grbm_status_se1;
- struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
- int r;
srbm_status = RREG32(SRBM_STATUS);
grbm_status = RREG32(GRBM_STATUS);
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, ring);
+ radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- ring->rptr = RREG32(CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, lockup, ring);
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2594,6 +2585,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+ u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2614,6 +2606,13 @@ int evergreen_irq_set(struct radeon_device *rdev)
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+
if (rdev->family >= CHIP_CAYMAN) {
/* enable CP interrupts on all rings */
if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
@@ -2690,6 +2689,30 @@ int evergreen_irq_set(struct radeon_device *rdev)
DRM_DEBUG("evergreen_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
+ if (rdev->irq.afmt[0]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
+ afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[1]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 1\n");
+ afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[2]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 2\n");
+ afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[3]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 3\n");
+ afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[4]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 4\n");
+ afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[5]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
+ afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
if (rdev->irq.gui_idle) {
DRM_DEBUG("gui idle\n");
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
@@ -2732,6 +2755,13 @@ int evergreen_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
+
return 0;
}
@@ -2756,6 +2786,13 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
+ rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+
if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
@@ -2829,6 +2866,36 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
+ if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp);
+ }
}
void evergreen_irq_disable(struct radeon_device *rdev)
@@ -2878,6 +2945,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
+ bool queue_hdmi = false;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
@@ -3111,6 +3179,55 @@ restart_ih:
break;
}
break;
+ case 44: /* hdmi */
+ switch (src_data) {
+ case 0:
+ if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
+ }
+ break;
+ case 1:
+ if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
+ }
+ break;
+ case 2:
+ if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI2\n");
+ }
+ break;
+ case 3:
+ if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI3\n");
+ }
+ break;
+ case 4:
+ if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI4\n");
+ }
+ break;
+ case 5:
+ if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI5\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
@@ -3154,6 +3271,8 @@ restart_ih:
goto restart_ih;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
+ if (queue_hdmi)
+ schedule_work(&rdev->audio_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
@@ -3248,12 +3367,9 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
r = r600_audio_init(rdev);
if (r) {
@@ -3319,10 +3435,6 @@ int evergreen_init(struct radeon_device *rdev)
{
int r;
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -3434,7 +3546,6 @@ void evergreen_fini(struct radeon_device *rdev)
evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 222acd2d33df..1e96bd458cfd 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -637,7 +637,6 @@ int evergreen_blit_init(struct radeon_device *rdev)
if (rdev->r600_blit.shader_obj)
goto done;
- mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
if (rdev->family < CHIP_CAYMAN)
@@ -669,7 +668,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
obj_size = ALIGN(obj_size, 256);
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_obj);
+ NULL, &rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("evergreen failed to allocate shader\n");
return r;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 70089d32b80f..4e7dd2b4843d 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -1057,7 +1057,7 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
uint32_t header, h_idx, reg, wait_reg_mem_info;
volatile uint32_t *ib;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
/* parse the WAIT_REG_MEM */
r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
@@ -1215,7 +1215,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
if (!(evergreen_reg_safe_bm[i] & m))
return 0;
}
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
switch (reg) {
/* force following reg to 0 in an attempt to disable out buffer
* which will need us to better understand how it works to perform
@@ -1896,7 +1896,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u32 idx_value;
track = (struct evergreen_cs_track *)p->track;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx + 1;
idx_value = radeon_get_ib_value(p, idx);
@@ -2610,8 +2610,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
}
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
- for (r = 0; r < p->ib->length_dw; r++) {
- printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
+ for (r = 0; r < p->ib.length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
mdelay(1);
}
#endif
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
new file mode 100644
index 000000000000..a51f880985f8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ * Rafał Miłecki
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "evergreend.h"
+#include "atom.h"
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr.cts_32khz));
+ WREG32(HDMI_ACR_32_1 + offset, acr.n_32khz);
+
+ WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr.cts_44_1khz));
+ WREG32(HDMI_ACR_44_1 + offset, acr.n_44_1khz);
+
+ WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr.cts_48khz));
+ WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
+}
+
+/*
+ * calculate the crc for a given info frame
+ */
+static void evergreen_hdmi_infoframe_checksum(uint8_t packetType,
+ uint8_t versionNumber,
+ uint8_t length,
+ uint8_t *frame)
+{
+ int i;
+ frame[0] = packetType + versionNumber + length;
+ for (i = 1; i <= length; i++)
+ frame[0] += frame[i];
+ frame[0] = 0x100 - frame[0];
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void evergreen_hdmi_videoinfoframe(
+ struct drm_encoder *encoder,
+ uint8_t color_format,
+ int active_information_present,
+ uint8_t active_format_aspect_ratio,
+ uint8_t scan_information,
+ uint8_t colorimetry,
+ uint8_t ex_colorimetry,
+ uint8_t quantization,
+ int ITC,
+ uint8_t picture_aspect_ratio,
+ uint8_t video_format_identification,
+ uint8_t pixel_repetition,
+ uint8_t non_uniform_picture_scaling,
+ uint8_t bar_info_data_valid,
+ uint16_t top_bar,
+ uint16_t bottom_bar,
+ uint16_t left_bar,
+ uint16_t right_bar
+)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ uint8_t frame[14];
+
+ frame[0x0] = 0;
+ frame[0x1] =
+ (scan_information & 0x3) |
+ ((bar_info_data_valid & 0x3) << 2) |
+ ((active_information_present & 0x1) << 4) |
+ ((color_format & 0x3) << 5);
+ frame[0x2] =
+ (active_format_aspect_ratio & 0xF) |
+ ((picture_aspect_ratio & 0x3) << 4) |
+ ((colorimetry & 0x3) << 6);
+ frame[0x3] =
+ (non_uniform_picture_scaling & 0x3) |
+ ((quantization & 0x3) << 2) |
+ ((ex_colorimetry & 0x7) << 4) |
+ ((ITC & 0x1) << 7);
+ frame[0x4] = (video_format_identification & 0x7F);
+ frame[0x5] = (pixel_repetition & 0xF);
+ frame[0x6] = (top_bar & 0xFF);
+ frame[0x7] = (top_bar >> 8);
+ frame[0x8] = (bottom_bar & 0xFF);
+ frame[0x9] = (bottom_bar >> 8);
+ frame[0xA] = (left_bar & 0xFF);
+ frame[0xB] = (left_bar >> 8);
+ frame[0xC] = (right_bar & 0xFF);
+ frame[0xD] = (right_bar >> 8);
+
+ evergreen_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+ /* Our header values (type, version, length) should be alright, Intel
+ * is using the same. Checksum function also seems to be OK, it works
+ * fine for audio infoframe. However calculated value is always lower
+ * by 2 in comparison to fglrx. It breaks displaying anything in case
+ * of TVs that strictly check the checksum. Hack it manually here to
+ * workaround this issue. */
+ frame[0x0] += 2;
+
+ WREG32(AFMT_AVI_INFO0 + offset,
+ frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+ WREG32(AFMT_AVI_INFO1 + offset,
+ frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+ WREG32(AFMT_AVI_INFO2 + offset,
+ frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+ WREG32(AFMT_AVI_INFO3 + offset,
+ frame[0xC] | (frame[0xD] << 8));
+}
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset;
+
+ if (ASIC_IS_DCE5(rdev))
+ return;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (!dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
+
+ r600_audio_set_clock(encoder, mode->clock);
+
+ WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+ HDMI_NULL_SEND); /* send null packets when required */
+
+ WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+ WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
+ HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+ HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+
+ WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+ HDMI_ACR_SOURCE); /* select SW CTS value */
+
+ WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+ HDMI_NULL_SEND | /* send null packets when required */
+ HDMI_GC_SEND | /* send general control packets */
+ HDMI_GC_CONT); /* send general control packets every frame */
+
+ WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+ HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+
+ WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
+ AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
+
+ WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
+ HDMI_AVI_INFO_LINE(2) | /* anything other than 0 */
+ HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+ WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
+
+ evergreen_hdmi_videoinfoframe(encoder, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0);
+
+ evergreen_hdmi_update_ACR(encoder, mode->clock);
+
+ /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
+ WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+ WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
+ WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
+ WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 96c10b3991aa..8beac1065025 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -232,6 +232,4 @@
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
#define EVERGREEN_HDMI_BASE 0x7030
-#define EVERGREEN_HDMI_CONFIG_OFFSET 0xf0
-
#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b4eefc355f16..79130bfd1d6f 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -112,6 +112,226 @@
#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
#define CP_DEBUG 0xC1FC
+/* Audio clocks */
+#define DCCG_AUDIO_DTO_SOURCE 0x05ac
+# define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
+# define DCCG_AUDIO_DTO_SEL (1 << 4) /* 0=dto0 1=dto1 */
+
+#define DCCG_AUDIO_DTO0_PHASE 0x05b0
+#define DCCG_AUDIO_DTO0_MODULE 0x05b4
+#define DCCG_AUDIO_DTO0_LOAD 0x05b8
+#define DCCG_AUDIO_DTO0_CNTL 0x05bc
+
+#define DCCG_AUDIO_DTO1_PHASE 0x05c0
+#define DCCG_AUDIO_DTO1_MODULE 0x05c4
+#define DCCG_AUDIO_DTO1_LOAD 0x05c8
+#define DCCG_AUDIO_DTO1_CNTL 0x05cc
+
+/* DCE 4.0 AFMT */
+#define HDMI_CONTROL 0x7030
+# define HDMI_KEEPOUT_MODE (1 << 0)
+# define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */
+# define HDMI_ERROR_ACK (1 << 8)
+# define HDMI_ERROR_MASK (1 << 9)
+# define HDMI_DEEP_COLOR_ENABLE (1 << 24)
+# define HDMI_DEEP_COLOR_DEPTH (((x) & 3) << 28)
+# define HDMI_24BIT_DEEP_COLOR 0
+# define HDMI_30BIT_DEEP_COLOR 1
+# define HDMI_36BIT_DEEP_COLOR 2
+#define HDMI_STATUS 0x7034
+# define HDMI_ACTIVE_AVMUTE (1 << 0)
+# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
+# define HDMI_VBI_PACKET_ERROR (1 << 20)
+#define HDMI_AUDIO_PACKET_CONTROL 0x7038
+# define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
+# define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
+#define HDMI_ACR_PACKET_CONTROL 0x703c
+# define HDMI_ACR_SEND (1 << 0)
+# define HDMI_ACR_CONT (1 << 1)
+# define HDMI_ACR_SELECT(x) (((x) & 3) << 4)
+# define HDMI_ACR_HW 0
+# define HDMI_ACR_32 1
+# define HDMI_ACR_44 2
+# define HDMI_ACR_48 3
+# define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
+# define HDMI_ACR_AUTO_SEND (1 << 12)
+# define HDMI_ACR_N_MULTIPLE(x) (((x) & 7) << 16)
+# define HDMI_ACR_X1 1
+# define HDMI_ACR_X2 2
+# define HDMI_ACR_X4 4
+# define HDMI_ACR_AUDIO_PRIORITY (1 << 31)
+#define HDMI_VBI_PACKET_CONTROL 0x7040
+# define HDMI_NULL_SEND (1 << 0)
+# define HDMI_GC_SEND (1 << 4)
+# define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI_INFOFRAME_CONTROL0 0x7044
+# define HDMI_AVI_INFO_SEND (1 << 0)
+# define HDMI_AVI_INFO_CONT (1 << 1)
+# define HDMI_AUDIO_INFO_SEND (1 << 4)
+# define HDMI_AUDIO_INFO_CONT (1 << 5)
+# define HDMI_MPEG_INFO_SEND (1 << 8)
+# define HDMI_MPEG_INFO_CONT (1 << 9)
+#define HDMI_INFOFRAME_CONTROL1 0x7048
+# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
+# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
+# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
+#define HDMI_GENERIC_PACKET_CONTROL 0x704c
+# define HDMI_GENERIC0_SEND (1 << 0)
+# define HDMI_GENERIC0_CONT (1 << 1)
+# define HDMI_GENERIC1_SEND (1 << 4)
+# define HDMI_GENERIC1_CONT (1 << 5)
+# define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
+# define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
+#define HDMI_GC 0x7058
+# define HDMI_GC_AVMUTE (1 << 0)
+# define HDMI_GC_AVMUTE_CONT (1 << 2)
+#define AFMT_AUDIO_PACKET_CONTROL2 0x705c
+# define AFMT_AUDIO_LAYOUT_OVRD (1 << 0)
+# define AFMT_AUDIO_LAYOUT_SELECT (1 << 1)
+# define AFMT_60958_CS_SOURCE (1 << 4)
+# define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8)
+# define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16)
+#define AFMT_AVI_INFO0 0x7084
+# define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_AVI_INFO_S(x) (((x) & 3) << 8)
+# define AFMT_AVI_INFO_B(x) (((x) & 3) << 10)
+# define AFMT_AVI_INFO_A(x) (((x) & 1) << 12)
+# define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13)
+# define AFMT_AVI_INFO_Y_RGB 0
+# define AFMT_AVI_INFO_Y_YCBCR422 1
+# define AFMT_AVI_INFO_Y_YCBCR444 2
+# define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
+# define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16)
+# define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20)
+# define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22)
+# define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
+# define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24)
+# define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26)
+# define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28)
+# define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31)
+# define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
+#define AFMT_AVI_INFO1 0x7088
+# define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+# define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+# define AFMT_AVI_INFO_CN(x) (((x) & 0x3) << 12)
+# define AFMT_AVI_INFO_YQ(x) (((x) & 0x3) << 14)
+# define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO2 0x708c
+# define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
+# define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO3 0x7090
+# define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
+# define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24)
+#define AFMT_MPEG_INFO0 0x7094
+# define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
+# define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
+# define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
+#define AFMT_MPEG_INFO1 0x7098
+# define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
+# define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8)
+# define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12)
+#define AFMT_GENERIC0_HDR 0x709c
+#define AFMT_GENERIC0_0 0x70a0
+#define AFMT_GENERIC0_1 0x70a4
+#define AFMT_GENERIC0_2 0x70a8
+#define AFMT_GENERIC0_3 0x70ac
+#define AFMT_GENERIC0_4 0x70b0
+#define AFMT_GENERIC0_5 0x70b4
+#define AFMT_GENERIC0_6 0x70b8
+#define AFMT_GENERIC1_HDR 0x70bc
+#define AFMT_GENERIC1_0 0x70c0
+#define AFMT_GENERIC1_1 0x70c4
+#define AFMT_GENERIC1_2 0x70c8
+#define AFMT_GENERIC1_3 0x70cc
+#define AFMT_GENERIC1_4 0x70d0
+#define AFMT_GENERIC1_5 0x70d4
+#define AFMT_GENERIC1_6 0x70d8
+#define HDMI_ACR_32_0 0x70dc
+# define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_32_1 0x70e0
+# define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_44_0 0x70e4
+# define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_44_1 0x70e8
+# define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_48_0 0x70ec
+# define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_48_1 0x70f0
+# define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_STATUS_0 0x70f4
+#define HDMI_ACR_STATUS_1 0x70f8
+#define AFMT_AUDIO_INFO0 0x70fc
+# define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8)
+# define AFMT_AUDIO_INFO_CT(x) (((x) & 0xf) << 11)
+# define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16)
+# define AFMT_AUDIO_INFO_CXT(x) (((x) & 0x1f) << 24)
+#define AFMT_AUDIO_INFO1 0x7100
+# define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
+# define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
+# define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
+# define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+# define AFMT_AUDIO_INFO_LFEBPL(x) (((x) & 3) << 16)
+#define AFMT_60958_0 0x7104
+# define AFMT_60958_CS_A(x) (((x) & 1) << 0)
+# define AFMT_60958_CS_B(x) (((x) & 1) << 1)
+# define AFMT_60958_CS_C(x) (((x) & 1) << 2)
+# define AFMT_60958_CS_D(x) (((x) & 3) << 3)
+# define AFMT_60958_CS_MODE(x) (((x) & 3) << 6)
+# define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
+# define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
+# define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
+# define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+# define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
+#define AFMT_60958_1 0x7108
+# define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
+# define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
+# define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16)
+# define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18)
+# define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
+#define AFMT_AUDIO_CRC_CONTROL 0x710c
+# define AFMT_AUDIO_CRC_EN (1 << 0)
+#define AFMT_RAMP_CONTROL0 0x7110
+# define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
+# define AFMT_RAMP_DATA_SIGN (1 << 31)
+#define AFMT_RAMP_CONTROL1 0x7114
+# define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
+# define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
+#define AFMT_RAMP_CONTROL2 0x7118
+# define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
+#define AFMT_RAMP_CONTROL3 0x711c
+# define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
+#define AFMT_60958_2 0x7120
+# define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
+# define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
+# define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
+# define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
+# define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
+# define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
+#define AFMT_STATUS 0x7128
+# define AFMT_AUDIO_ENABLE (1 << 4)
+# define AFMT_AUDIO_HBR_ENABLE (1 << 8)
+# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL 0x712c
+# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
+# define AFMT_RESET_FIFO_WHEN_AUDIO_DIS (1 << 11) /* set to 1 */
+# define AFMT_AUDIO_TEST_EN (1 << 12)
+# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
+# define AFMT_60958_CS_UPDATE (1 << 26)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
+#define AFMT_VBI_PACKET_CONTROL 0x7130
+# define AFMT_GENERIC0_UPDATE (1 << 2)
+#define AFMT_INFOFRAME_CONTROL0 0x7134
+# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - afmt regs */
+# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
+# define AFMT_MPEG_INFO_UPDATE (1 << 10)
+#define AFMT_GENERIC0_7 0x7138
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index a48ca53fcd6a..ce4e7cc6c905 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -865,7 +865,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
- rdev->config.evergreen.tile_config |= 1 << 4;
+ rdev->config.cayman.tile_config |= 1 << 4;
else
rdev->config.cayman.tile_config |=
((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
@@ -1392,35 +1392,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
return 0;
}
-bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 srbm_status;
- u32 grbm_status;
- u32 grbm_status_se0, grbm_status_se1;
- struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup;
- int r;
-
- srbm_status = RREG32(SRBM_STATUS);
- grbm_status = RREG32(GRBM_STATUS);
- grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
- grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
- if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, ring);
- return false;
- }
- /* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- /* XXX deal with CP0,1,2 */
- ring->rptr = RREG32(ring->rptr_reg);
- return r100_gpu_cp_is_lockup(rdev, lockup, ring);
-}
-
static int cayman_gpu_soft_reset(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
@@ -1601,12 +1572,9 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
r = radeon_vm_manager_start(rdev);
if (r)
@@ -1661,10 +1629,6 @@ int cayman_init(struct radeon_device *rdev)
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -1776,7 +1740,6 @@ void cayman_fini(struct radeon_device *rdev)
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index fe33d35dae8c..fb44e7e49083 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -139,9 +139,9 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
}
tmp |= tile_flags;
- p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
+ p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
} else
- p->ib->ptr[idx] = (value & 0xffc00000) | tmp;
+ p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
return 0;
}
@@ -156,7 +156,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
volatile uint32_t *ib;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
c = radeon_get_ib_value(p, idx++) & 0x1F;
if (c > 16) {
@@ -660,7 +660,7 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
WREG32(RADEON_AIC_CNTL, tmp);
r100_pci_gart_tlb_flush(rdev);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
@@ -1180,6 +1180,10 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
+
+ /* at this point everything should be setup correctly to enable master */
+ pci_set_master(rdev->pdev);
+
radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
if (r) {
@@ -1271,7 +1275,7 @@ void r100_cs_dump_packet(struct radeon_cs_parser *p,
unsigned i;
unsigned idx;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx;
for (i = 0; i <= (pkt->count + 1); i++, idx++) {
DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
@@ -1350,7 +1354,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
uint32_t header, h_idx, reg;
volatile uint32_t *ib;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
/* parse the wait until */
r = r100_cs_packet_parse(p, &waitreloc, p->idx);
@@ -1529,7 +1533,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
u32 tile_flags = 0;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
@@ -1885,7 +1889,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
volatile uint32_t *ib;
int r;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track;
switch (pkt->opcode) {
@@ -2004,6 +2008,8 @@ int r100_cs_parse(struct radeon_cs_parser *p)
int r;
track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (!track)
+ return -ENOMEM;
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
@@ -2155,79 +2161,18 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
-{
- lockup->last_cp_rptr = ring->rptr;
- lockup->last_jiffies = jiffies;
-}
-
-/**
- * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
- * @rdev: radeon device structure
- * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
- * @cp: radeon_cp structure holding CP information
- *
- * We don't need to initialize the lockup tracking information as we will either
- * have CP rptr to a different value of jiffies wrap around which will force
- * initialization of the lockup tracking informations.
- *
- * A possible false positivie is if we get call after while and last_cp_rptr ==
- * the current CP rptr, even if it's unlikely it might happen. To avoid this
- * if the elapsed time since last call is bigger than 2 second than we return
- * false and update the tracking information. Due to this the caller must call
- * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
- * the fencing code should be cautious about that.
- *
- * Caller should write to the ring to force CP to do something so we don't get
- * false positive when CP is just gived nothing to do.
- *
- **/
-bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
-{
- unsigned long cjiffies, elapsed;
-
- cjiffies = jiffies;
- if (!time_after(cjiffies, lockup->last_jiffies)) {
- /* likely a wrap around */
- lockup->last_cp_rptr = ring->rptr;
- lockup->last_jiffies = jiffies;
- return false;
- }
- if (ring->rptr != lockup->last_cp_rptr) {
- /* CP is still working no lockup */
- lockup->last_cp_rptr = ring->rptr;
- lockup->last_jiffies = jiffies;
- return false;
- }
- elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
- if (elapsed >= 10000) {
- dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
- return true;
- }
- /* give a chance to the GPU ... */
- return false;
-}
-
bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rbbm_status;
- int r;
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
- r100_gpu_lockup_update(&rdev->config.r100.lockup, ring);
+ radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- ring->rptr = RREG32(ring->rptr_reg);
- return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, ring);
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
void r100_bm_disable(struct radeon_device *rdev)
@@ -2296,7 +2241,6 @@ int r100_asic_reset(struct radeon_device *rdev)
if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
- rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
@@ -3742,7 +3686,7 @@ void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
- struct radeon_ib *ib;
+ struct radeon_ib ib;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
@@ -3758,22 +3702,22 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
if (r) {
return r;
}
- ib->ptr[0] = PACKET0(scratch, 0);
- ib->ptr[1] = 0xDEADBEEF;
- ib->ptr[2] = PACKET2(0);
- ib->ptr[3] = PACKET2(0);
- ib->ptr[4] = PACKET2(0);
- ib->ptr[5] = PACKET2(0);
- ib->ptr[6] = PACKET2(0);
- ib->ptr[7] = PACKET2(0);
- ib->length_dw = 8;
- r = radeon_ib_schedule(rdev, ib);
+ ib.ptr[0] = PACKET0(scratch, 0);
+ ib.ptr[1] = 0xDEADBEEF;
+ ib.ptr[2] = PACKET2(0);
+ ib.ptr[3] = PACKET2(0);
+ ib.ptr[4] = PACKET2(0);
+ ib.ptr[5] = PACKET2(0);
+ ib.ptr[6] = PACKET2(0);
+ ib.ptr[7] = PACKET2(0);
+ ib.length_dw = 8;
+ r = radeon_ib_schedule(rdev, &ib);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
return r;
}
- r = radeon_fence_wait(ib->fence, false);
+ r = radeon_fence_wait(ib.fence, false);
if (r) {
return r;
}
@@ -3965,12 +3909,9 @@ static int r100_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index a59cc474d537..a26144d01207 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -154,7 +154,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
u32 tile_flags = 0;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
switch (reg) {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index fa14383f9ca0..97722a33e513 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -377,28 +377,6 @@ void r300_gpu_init(struct radeon_device *rdev)
rdev->num_gb_pipes, rdev->num_z_pipes);
}
-bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 rbbm_status;
- int r;
-
- rbbm_status = RREG32(R_000E40_RBBM_STATUS);
- if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
- r100_gpu_lockup_update(&rdev->config.r300.lockup, ring);
- return false;
- }
- /* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- ring->rptr = RREG32(RADEON_CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, ring);
-}
-
int r300_asic_reset(struct radeon_device *rdev)
{
struct r100_mc_save save;
@@ -449,7 +427,6 @@ int r300_asic_reset(struct radeon_device *rdev)
/* Check if GPU is idle */
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
- rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
@@ -627,7 +604,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
int r;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
@@ -1169,7 +1146,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
unsigned idx;
int r;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track;
switch(pkt->opcode) {
@@ -1418,12 +1395,9 @@ static int r300_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index f3fcaacfea01..99137be7a300 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -279,12 +279,9 @@ static int r420_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index ebcc15b03c9f..b5cf8375cd25 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -207,12 +207,10 @@ static int r520_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c8187c4b6ae8..f388a1d73b63 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -713,6 +713,14 @@ void r600_hpd_init(struct radeon_device *rdev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ */
+ continue;
+ }
if (ASIC_IS_DCE3(rdev)) {
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
if (ASIC_IS_DCE32(rdev))
@@ -1223,7 +1231,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev)
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->vram_scratch.robj);
+ NULL, &rdev->vram_scratch.robj);
if (r) {
return r;
}
@@ -1350,31 +1358,17 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
u32 srbm_status;
u32 grbm_status;
u32 grbm_status2;
- struct r100_gpu_lockup *lockup;
- int r;
-
- if (rdev->family >= CHIP_RV770)
- lockup = &rdev->config.rv770.lockup;
- else
- lockup = &rdev->config.r600.lockup;
srbm_status = RREG32(R_000E50_SRBM_STATUS);
grbm_status = RREG32(R_008010_GRBM_STATUS);
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
if (!G_008010_GUI_ACTIVE(grbm_status)) {
- r100_gpu_lockup_update(lockup, ring);
+ radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- ring->rptr = RREG32(ring->rptr_reg);
- return r100_gpu_cp_is_lockup(rdev, lockup, ring);
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
int r600_asic_reset(struct radeon_device *rdev)
@@ -2377,20 +2371,15 @@ int r600_copy_blit(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
+ struct radeon_sa_bo *vb = NULL;
int r;
- mutex_lock(&rdev->r600_blit.mutex);
- rdev->r600_blit.vb_ib = NULL;
- r = r600_blit_prepare_copy(rdev, num_gpu_pages);
+ r = r600_blit_prepare_copy(rdev, num_gpu_pages, &vb);
if (r) {
- if (rdev->r600_blit.vb_ib)
- radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
- mutex_unlock(&rdev->r600_blit.mutex);
return r;
}
- r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages);
- r600_blit_done_copy(rdev, fence);
- mutex_unlock(&rdev->r600_blit.mutex);
+ r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
+ r600_blit_done_copy(rdev, fence, vb);
return 0;
}
@@ -2494,12 +2483,9 @@ int r600_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
@@ -2574,10 +2560,6 @@ int r600_init(struct radeon_device *rdev)
if (r600_debugfs_mc_info_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for mc !\n");
}
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -2675,7 +2657,6 @@ void r600_fini(struct radeon_device *rdev)
r600_vram_scratch_fini(rdev);
radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
@@ -2704,7 +2685,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
- struct radeon_ib *ib;
+ struct radeon_ib ib;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
@@ -2722,18 +2703,18 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
return r;
}
- ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
- ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- ib->ptr[2] = 0xDEADBEEF;
- ib->length_dw = 3;
- r = radeon_ib_schedule(rdev, ib);
+ ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
+ ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ ib.ptr[2] = 0xDEADBEEF;
+ ib.length_dw = 3;
+ r = radeon_ib_schedule(rdev, &ib);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
return r;
}
- r = radeon_fence_wait(ib->fence, false);
+ r = radeon_fence_wait(ib.fence, false);
if (r) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
return r;
@@ -2745,7 +2726,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i);
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
} else {
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
@@ -2788,7 +2769,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev)
r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
- &rdev->ih.ring_obj);
+ NULL, &rdev->ih.ring_obj);
if (r) {
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
return r;
@@ -2968,6 +2949,15 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD6_INT_CONTROL, tmp);
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
+ } else {
+ tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
}
} else {
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
@@ -2978,6 +2968,10 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+ tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
}
}
@@ -3047,6 +3041,9 @@ int r600_irq_init(struct radeon_device *rdev)
else
r600_disable_interrupt_state(rdev);
+ /* at this point everything should be setup correctly to enable master */
+ pci_set_master(rdev->pdev);
+
/* enable irqs */
r600_enable_interrupts(rdev);
@@ -3071,7 +3068,7 @@ int r600_irq_set(struct radeon_device *rdev)
u32 mode_int = 0;
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
- u32 hdmi1, hdmi2;
+ u32 hdmi0, hdmi1;
u32 d1grph = 0, d2grph = 0;
if (!rdev->irq.installed) {
@@ -3086,9 +3083,7 @@ int r600_irq_set(struct radeon_device *rdev)
return 0;
}
- hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
if (ASIC_IS_DCE3(rdev)) {
- hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -3096,12 +3091,18 @@ int r600_irq_set(struct radeon_device *rdev)
if (ASIC_IS_DCE32(rdev)) {
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ } else {
+ hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
} else {
- hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
@@ -3143,13 +3144,13 @@ int r600_irq_set(struct radeon_device *rdev)
DRM_DEBUG("r600_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
- if (rdev->irq.hdmi[0]) {
- DRM_DEBUG("r600_irq_set: hdmi 1\n");
- hdmi1 |= R600_HDMI_INT_EN;
+ if (rdev->irq.afmt[0]) {
+ DRM_DEBUG("r600_irq_set: hdmi 0\n");
+ hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
}
- if (rdev->irq.hdmi[1]) {
- DRM_DEBUG("r600_irq_set: hdmi 2\n");
- hdmi2 |= R600_HDMI_INT_EN;
+ if (rdev->irq.afmt[1]) {
+ DRM_DEBUG("r600_irq_set: hdmi 0\n");
+ hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.gui_idle) {
DRM_DEBUG("gui idle\n");
@@ -3161,9 +3162,7 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
- WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
if (ASIC_IS_DCE3(rdev)) {
- WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -3171,12 +3170,18 @@ int r600_irq_set(struct radeon_device *rdev)
if (ASIC_IS_DCE32(rdev)) {
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
+ } else {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+ WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
}
} else {
- WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+ WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
}
return 0;
@@ -3190,10 +3195,19 @@ static void r600_irq_ack(struct radeon_device *rdev)
rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+ if (ASIC_IS_DCE32(rdev)) {
+ rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
+ rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
+ } else {
+ rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
+ rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
+ }
} else {
rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
+ rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
+ rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
}
rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
@@ -3259,17 +3273,32 @@ static void r600_irq_ack(struct radeon_device *rdev)
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
- }
- if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
- WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
- }
- if (ASIC_IS_DCE3(rdev)) {
- if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
- WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
+ }
+ if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
}
} else {
- if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
- WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
+ tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
+ if (ASIC_IS_DCE3(rdev)) {
+ tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
+ tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+ WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
+ } else {
+ tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
+ tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+ WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
+ }
}
}
}
@@ -3345,6 +3374,7 @@ int r600_irq_process(struct radeon_device *rdev)
u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
+ bool queue_hdmi = false;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
@@ -3480,9 +3510,26 @@ restart_ih:
break;
}
break;
- case 21: /* HDMI */
- DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
- r600_audio_schedule_polling(rdev);
+ case 21: /* hdmi */
+ switch (src_data) {
+ case 4:
+ if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
+ }
+ break;
+ case 5:
+ if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
@@ -3514,6 +3561,8 @@ restart_ih:
goto restart_ih;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
+ if (queue_hdmi)
+ schedule_work(&rdev->audio_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index ba66f3093d46..7c4fa77f018f 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -29,7 +29,28 @@
#include "radeon_asic.h"
#include "atom.h"
-#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
+/*
+ * check if enc_priv stores radeon_encoder_atom_dig
+ */
+static bool radeon_dig_encoder(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ return true;
+ }
+ return false;
+}
/*
* check if the chipset is supported
@@ -42,118 +63,85 @@ static int r600_audio_chipset_supported(struct radeon_device *rdev)
|| rdev->family == CHIP_RS740;
}
-/*
- * current number of channels
- */
-int r600_audio_channels(struct radeon_device *rdev)
+struct r600_audio r600_audio_status(struct radeon_device *rdev)
{
- return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
-}
+ struct r600_audio status;
+ uint32_t value;
-/*
- * current bits per sample
- */
-int r600_audio_bits_per_sample(struct radeon_device *rdev)
-{
- uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
- switch (value) {
- case 0x0: return 8;
- case 0x1: return 16;
- case 0x2: return 20;
- case 0x3: return 24;
- case 0x4: return 32;
- }
+ value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
- dev_err(rdev->dev, "Unknown bits per sample 0x%x using 16 instead\n",
- (int)value);
+ /* number of channels */
+ status.channels = (value & 0x7) + 1;
- return 16;
-}
-
-/*
- * current sampling rate in HZ
- */
-int r600_audio_rate(struct radeon_device *rdev)
-{
- uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
- uint32_t result;
+ /* bits per sample */
+ switch ((value & 0xF0) >> 4) {
+ case 0x0:
+ status.bits_per_sample = 8;
+ break;
+ case 0x1:
+ status.bits_per_sample = 16;
+ break;
+ case 0x2:
+ status.bits_per_sample = 20;
+ break;
+ case 0x3:
+ status.bits_per_sample = 24;
+ break;
+ case 0x4:
+ status.bits_per_sample = 32;
+ break;
+ default:
+ dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n",
+ (int)value);
+ status.bits_per_sample = 16;
+ }
+ /* current sampling rate in HZ */
if (value & 0x4000)
- result = 44100;
+ status.rate = 44100;
else
- result = 48000;
+ status.rate = 48000;
+ status.rate *= ((value >> 11) & 0x7) + 1;
+ status.rate /= ((value >> 8) & 0x7) + 1;
- result *= ((value >> 11) & 0x7) + 1;
- result /= ((value >> 8) & 0x7) + 1;
+ value = RREG32(R600_AUDIO_STATUS_BITS);
- return result;
-}
+ /* iec 60958 status bits */
+ status.status_bits = value & 0xff;
-/*
- * iec 60958 status bits
- */
-uint8_t r600_audio_status_bits(struct radeon_device *rdev)
-{
- return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
-}
+ /* iec 60958 category code */
+ status.category_code = (value >> 8) & 0xff;
-/*
- * iec 60958 category code
- */
-uint8_t r600_audio_category_code(struct radeon_device *rdev)
-{
- return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
-}
-
-/*
- * schedule next audio update event
- */
-void r600_audio_schedule_polling(struct radeon_device *rdev)
-{
- mod_timer(&rdev->audio_timer,
- jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+ return status;
}
/*
* update all hdmi interfaces with current audio parameters
*/
-static void r600_audio_update_hdmi(unsigned long param)
+void r600_audio_update_hdmi(struct work_struct *work)
{
- struct radeon_device *rdev = (struct radeon_device *)param;
+ struct radeon_device *rdev = container_of(work, struct radeon_device,
+ audio_work);
struct drm_device *dev = rdev->ddev;
-
- int channels = r600_audio_channels(rdev);
- int rate = r600_audio_rate(rdev);
- int bps = r600_audio_bits_per_sample(rdev);
- uint8_t status_bits = r600_audio_status_bits(rdev);
- uint8_t category_code = r600_audio_category_code(rdev);
-
+ struct r600_audio audio_status = r600_audio_status(rdev);
struct drm_encoder *encoder;
- int changes = 0, still_going = 0;
-
- changes |= channels != rdev->audio_channels;
- changes |= rate != rdev->audio_rate;
- changes |= bps != rdev->audio_bits_per_sample;
- changes |= status_bits != rdev->audio_status_bits;
- changes |= category_code != rdev->audio_category_code;
-
- if (changes) {
- rdev->audio_channels = channels;
- rdev->audio_rate = rate;
- rdev->audio_bits_per_sample = bps;
- rdev->audio_status_bits = status_bits;
- rdev->audio_category_code = category_code;
+ bool changed = false;
+
+ if (rdev->audio_status.channels != audio_status.channels ||
+ rdev->audio_status.rate != audio_status.rate ||
+ rdev->audio_status.bits_per_sample != audio_status.bits_per_sample ||
+ rdev->audio_status.status_bits != audio_status.status_bits ||
+ rdev->audio_status.category_code != audio_status.category_code) {
+ rdev->audio_status = audio_status;
+ changed = true;
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- still_going |= radeon_encoder->audio_polling_active;
- if (changes || r600_hdmi_buffer_status_changed(encoder))
+ if (!radeon_dig_encoder(encoder))
+ continue;
+ if (changed || r600_hdmi_buffer_status_changed(encoder))
r600_hdmi_update_audio_settings(encoder);
}
-
- if (still_going)
- r600_audio_schedule_polling(rdev);
}
/*
@@ -177,7 +165,7 @@ static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
}
/*
- * initialize the audio vars and register the update timer
+ * initialize the audio vars
*/
int r600_audio_init(struct radeon_device *rdev)
{
@@ -186,51 +174,16 @@ int r600_audio_init(struct radeon_device *rdev)
r600_audio_engine_enable(rdev, true);
- rdev->audio_channels = -1;
- rdev->audio_rate = -1;
- rdev->audio_bits_per_sample = -1;
- rdev->audio_status_bits = 0;
- rdev->audio_category_code = 0;
-
- setup_timer(
- &rdev->audio_timer,
- r600_audio_update_hdmi,
- (unsigned long)rdev);
+ rdev->audio_status.channels = -1;
+ rdev->audio_status.rate = -1;
+ rdev->audio_status.bits_per_sample = -1;
+ rdev->audio_status.status_bits = 0;
+ rdev->audio_status.category_code = 0;
return 0;
}
/*
- * enable the polling timer, to check for status changes
- */
-void r600_audio_enable_polling(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-
- DRM_DEBUG("r600_audio_enable_polling: %d\n",
- radeon_encoder->audio_polling_active);
- if (radeon_encoder->audio_polling_active)
- return;
-
- radeon_encoder->audio_polling_active = 1;
- if (rdev->audio_enabled)
- mod_timer(&rdev->audio_timer, jiffies + 1);
-}
-
-/*
- * disable the polling timer, so we get no more status updates
- */
-void r600_audio_disable_polling(struct drm_encoder *encoder)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- DRM_DEBUG("r600_audio_disable_polling: %d\n",
- radeon_encoder->audio_polling_active);
- radeon_encoder->audio_polling_active = 0;
-}
-
-/*
* atach the audio codec to the clock source of the encoder
*/
void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
@@ -297,7 +250,5 @@ void r600_audio_fini(struct radeon_device *rdev)
if (!rdev->audio_enabled)
return;
- del_timer(&rdev->audio_timer);
-
r600_audio_engine_enable(rdev, false);
}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index db38f587f27a..03b6e0d3d503 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -513,7 +513,6 @@ int r600_blit_init(struct radeon_device *rdev)
rdev->r600_blit.primitives.set_default_state = set_default_state;
rdev->r600_blit.ring_size_common = 40; /* shaders + def state */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
rdev->r600_blit.ring_size_common += 5; /* done copy */
rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
@@ -528,7 +527,6 @@ int r600_blit_init(struct radeon_device *rdev)
if (rdev->r600_blit.shader_obj)
goto done;
- mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
if (rdev->family >= CHIP_RV770)
@@ -554,7 +552,7 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size = ALIGN(obj_size, 256);
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_obj);
+ NULL, &rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("r600 failed to allocate shader\n");
return r;
@@ -621,27 +619,6 @@ void r600_blit_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
-static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size)
-{
- int r;
- r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX,
- &rdev->r600_blit.vb_ib, size);
- if (r) {
- DRM_ERROR("failed to get IB for vertex buffer\n");
- return r;
- }
-
- rdev->r600_blit.vb_total = size;
- rdev->r600_blit.vb_used = 0;
- return 0;
-}
-
-static void r600_vb_ib_put(struct radeon_device *rdev)
-{
- radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
- radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
-}
-
static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
int *width, int *height, int max_dim)
{
@@ -688,7 +665,8 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
}
-int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
+ struct radeon_sa_bo **vb)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
@@ -705,46 +683,54 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
}
/* 48 bytes for vertex per loop */
- r = r600_vb_ib_get(rdev, (num_loops*48)+256);
- if (r)
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb,
+ (num_loops*48)+256, 256, true);
+ if (r) {
return r;
+ }
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
ring_size += rdev->r600_blit.ring_size_common;
r = radeon_ring_lock(rdev, ring, ring_size);
- if (r)
+ if (r) {
+ radeon_sa_bo_free(rdev, vb, NULL);
return r;
+ }
rdev->r600_blit.primitives.set_default_state(rdev);
rdev->r600_blit.primitives.set_shaders(rdev);
return 0;
}
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
+ struct radeon_sa_bo *vb)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- if (rdev->r600_blit.vb_ib)
- r600_vb_ib_put(rdev);
-
- if (fence)
- r = radeon_fence_emit(rdev, fence);
+ r = radeon_fence_emit(rdev, fence);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return;
+ }
- radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_sa_bo_free(rdev, &vb, fence);
}
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
- unsigned num_gpu_pages)
+ unsigned num_gpu_pages,
+ struct radeon_sa_bo *vb)
{
u64 vb_gpu_addr;
- u32 *vb;
+ u32 *vb_cpu_addr;
- DRM_DEBUG("emitting copy %16llx %16llx %d %d\n",
- src_gpu_addr, dst_gpu_addr,
- num_gpu_pages, rdev->r600_blit.vb_used);
- vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
+ DRM_DEBUG("emitting copy %16llx %16llx %d\n",
+ src_gpu_addr, dst_gpu_addr, num_gpu_pages);
+ vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb);
+ vb_gpu_addr = radeon_sa_bo_gpu_addr(vb);
while (num_gpu_pages) {
int w, h;
@@ -756,39 +742,34 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
- if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
- WARN_ON(1);
- }
-
- vb[0] = 0;
- vb[1] = 0;
- vb[2] = 0;
- vb[3] = 0;
+ vb_cpu_addr[0] = 0;
+ vb_cpu_addr[1] = 0;
+ vb_cpu_addr[2] = 0;
+ vb_cpu_addr[3] = 0;
- vb[4] = 0;
- vb[5] = i2f(h);
- vb[6] = 0;
- vb[7] = i2f(h);
+ vb_cpu_addr[4] = 0;
+ vb_cpu_addr[5] = i2f(h);
+ vb_cpu_addr[6] = 0;
+ vb_cpu_addr[7] = i2f(h);
- vb[8] = i2f(w);
- vb[9] = i2f(h);
- vb[10] = i2f(w);
- vb[11] = i2f(h);
+ vb_cpu_addr[8] = i2f(w);
+ vb_cpu_addr[9] = i2f(h);
+ vb_cpu_addr[10] = i2f(w);
+ vb_cpu_addr[11] = i2f(h);
rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
w, h, w, src_gpu_addr, size_in_bytes);
rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
w, h, dst_gpu_addr);
rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
- vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
rdev->r600_blit.primitives.draw_auto(rdev);
rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
size_in_bytes, dst_gpu_addr);
- vb += 12;
- rdev->r600_blit.vb_used += 4*12;
+ vb_cpu_addr += 12;
+ vb_gpu_addr += 4*12;
src_gpu_addr += size_in_bytes;
dst_gpu_addr += size_in_bytes;
num_gpu_pages -= pages_per_loop;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index b8e12af304a9..0133f5f09bd6 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -345,7 +345,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
u32 height, height_align, pitch, pitch_align, depth_align;
u64 base_offset, base_align;
struct array_mode_checker array_check;
- volatile u32 *ib = p->ib->ptr;
+ volatile u32 *ib = p->ib.ptr;
unsigned array_mode;
u32 format;
@@ -471,7 +471,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
u64 base_offset, base_align;
struct array_mode_checker array_check;
int array_mode;
- volatile u32 *ib = p->ib->ptr;
+ volatile u32 *ib = p->ib.ptr;
if (track->db_bo == NULL) {
@@ -961,7 +961,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
uint32_t header, h_idx, reg, wait_reg_mem_info;
volatile uint32_t *ib;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
/* parse the WAIT_REG_MEM */
r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
@@ -1110,7 +1110,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
m = 1 << ((reg >> 2) & 31);
if (!(r600_reg_safe_bm[i] & m))
return 0;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
switch (reg) {
/* force following reg to 0 in an attempt to disable out buffer
* which will need us to better understand how it works to perform
@@ -1714,7 +1714,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
u32 idx_value;
track = (struct r600_cs_track *)p->track;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx + 1;
idx_value = radeon_get_ib_value(p, idx);
@@ -2249,8 +2249,8 @@ int r600_cs_parse(struct radeon_cs_parser *p)
}
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
- for (r = 0; r < p->ib->length_dw; r++) {
- printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
+ for (r = 0; r < p->ib.length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
mdelay(1);
}
#endif
@@ -2298,7 +2298,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
{
struct radeon_cs_parser parser;
struct radeon_cs_chunk *ib_chunk;
- struct radeon_ib fake_ib;
struct r600_cs_track *track;
int r;
@@ -2314,9 +2313,8 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
parser.dev = &dev->pdev->dev;
parser.rdev = NULL;
parser.family = family;
- parser.ib = &fake_ib;
parser.track = track;
- fake_ib.ptr = ib;
+ parser.ib.ptr = ib;
r = radeon_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
@@ -2333,8 +2331,8 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
* input memory (cached) and write to the IB (which can be
* uncached). */
ib_chunk = &parser.chunks[parser.chunk_ib_idx];
- parser.ib->length_dw = ib_chunk->length_dw;
- *l = parser.ib->length_dw;
+ parser.ib.length_dw = ib_chunk->length_dw;
+ *l = parser.ib.length_dw;
r = r600_cs_parse(&parser);
if (r) {
DRM_ERROR("Invalid command stream !\n");
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 0b5920671450..226379e00ac1 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -27,6 +27,7 @@
#include "radeon_drm.h"
#include "radeon.h"
#include "radeon_asic.h"
+#include "r600d.h"
#include "atom.h"
/*
@@ -52,19 +53,7 @@ enum r600_hdmi_iec_status_bits {
AUDIO_STATUS_LEVEL = 0x80
};
-struct {
- uint32_t Clock;
-
- int N_32kHz;
- int CTS_32kHz;
-
- int N_44_1kHz;
- int CTS_44_1kHz;
-
- int N_48kHz;
- int CTS_48kHz;
-
-} r600_hdmi_ACR[] = {
+struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
/* 32kHz 44.1kHz 48kHz */
/* Clock N CTS N CTS N CTS */
{ 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
@@ -83,7 +72,7 @@ struct {
/*
* calculate CTS value if it's not found in the table
*/
-static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
+static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
{
if (*CTS == 0)
*CTS = clock * N / (128 * freq) * 1000;
@@ -91,6 +80,24 @@ static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
N, *CTS, freq);
}
+struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
+{
+ struct radeon_hdmi_acr res;
+ u8 i;
+
+ for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
+ r600_hdmi_predefined_acr[i].clock != 0; i++)
+ ;
+ res = r600_hdmi_predefined_acr[i];
+
+ /* In case some CTS are missing */
+ r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
+ r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
+ r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
+
+ return res;
+}
+
/*
* update the N and CTS parameters for a given pixel clock rate
*/
@@ -98,30 +105,19 @@ static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
- int CTS;
- int N;
- int i;
+ struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz));
+ WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz);
- for (i = 0; r600_hdmi_ACR[i].Clock != clock && r600_hdmi_ACR[i].Clock != 0; i++);
-
- CTS = r600_hdmi_ACR[i].CTS_32kHz;
- N = r600_hdmi_ACR[i].N_32kHz;
- r600_hdmi_calc_CTS(clock, &CTS, N, 32000);
- WREG32(offset+R600_HDMI_32kHz_CTS, CTS << 12);
- WREG32(offset+R600_HDMI_32kHz_N, N);
-
- CTS = r600_hdmi_ACR[i].CTS_44_1kHz;
- N = r600_hdmi_ACR[i].N_44_1kHz;
- r600_hdmi_calc_CTS(clock, &CTS, N, 44100);
- WREG32(offset+R600_HDMI_44_1kHz_CTS, CTS << 12);
- WREG32(offset+R600_HDMI_44_1kHz_N, N);
-
- CTS = r600_hdmi_ACR[i].CTS_48kHz;
- N = r600_hdmi_ACR[i].N_48kHz;
- r600_hdmi_calc_CTS(clock, &CTS, N, 48000);
- WREG32(offset+R600_HDMI_48kHz_CTS, CTS << 12);
- WREG32(offset+R600_HDMI_48kHz_N, N);
+ WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz));
+ WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz);
+
+ WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz));
+ WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz);
}
/*
@@ -165,7 +161,9 @@ static void r600_hdmi_videoinfoframe(
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
uint8_t frame[14];
@@ -204,13 +202,13 @@ static void r600_hdmi_videoinfoframe(
* workaround this issue. */
frame[0x0] += 2;
- WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
+ WREG32(HDMI0_AVI_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
- WREG32(offset+R600_HDMI_VIDEOINFOFRAME_1,
+ WREG32(HDMI0_AVI_INFO1 + offset,
frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
- WREG32(offset+R600_HDMI_VIDEOINFOFRAME_2,
+ WREG32(HDMI0_AVI_INFO2 + offset,
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
- WREG32(offset+R600_HDMI_VIDEOINFOFRAME_3,
+ WREG32(HDMI0_AVI_INFO3 + offset,
frame[0xC] | (frame[0xD] << 8));
}
@@ -231,7 +229,9 @@ static void r600_hdmi_audioinfoframe(
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
uint8_t frame[11];
@@ -249,22 +249,24 @@ static void r600_hdmi_audioinfoframe(
r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
- WREG32(offset+R600_HDMI_AUDIOINFOFRAME_0,
+ WREG32(HDMI0_AUDIO_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
- WREG32(offset+R600_HDMI_AUDIOINFOFRAME_1,
+ WREG32(HDMI0_AUDIO_INFO1 + offset,
frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
}
/*
* test if audio buffer is filled enough to start playing
*/
-static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
+static bool r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
- return (RREG32(offset+R600_HDMI_STATUS) & 0x10) != 0;
+ return (RREG32(HDMI0_STATUS + offset) & 0x10) != 0;
}
/*
@@ -273,14 +275,15 @@ static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
int status, result;
- if (!radeon_encoder->hdmi_offset)
+ if (!dig->afmt || !dig->afmt->enabled)
return 0;
status = r600_hdmi_is_audio_buffer_filled(encoder);
- result = radeon_encoder->hdmi_buffer_status != status;
- radeon_encoder->hdmi_buffer_status = status;
+ result = dig->afmt->last_buffer_filled_status != status;
+ dig->afmt->last_buffer_filled_status = status;
return result;
}
@@ -288,26 +291,23 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
/*
* write the audio workaround status to the hardware
*/
-void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
+static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- uint32_t offset = radeon_encoder->hdmi_offset;
-
- if (!offset)
- return;
-
- if (!radeon_encoder->hdmi_audio_workaround ||
- r600_hdmi_is_audio_buffer_filled(encoder)) {
-
- /* disable audio workaround */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
-
- } else {
- /* enable audio workaround */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
- }
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+ bool hdmi_audio_workaround = false; /* FIXME */
+ u32 value;
+
+ if (!hdmi_audio_workaround ||
+ r600_hdmi_is_audio_buffer_filled(encoder))
+ value = 0; /* disable workaround */
+ else
+ value = HDMI0_AUDIO_TEST_EN; /* enable workaround */
+ WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ value, ~HDMI0_AUDIO_TEST_EN);
}
@@ -318,39 +318,75 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset;
if (ASIC_IS_DCE5(rdev))
return;
- if (!offset)
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (!dig->afmt->enabled)
return;
+ offset = dig->afmt->offset;
r600_audio_set_clock(encoder, mode->clock);
- WREG32(offset+R600_HDMI_UNKNOWN_0, 0x1000);
- WREG32(offset+R600_HDMI_UNKNOWN_1, 0x0);
- WREG32(offset+R600_HDMI_UNKNOWN_2, 0x1000);
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND); /* send null packets when required */
- r600_hdmi_update_ACR(encoder, mode->clock);
+ WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+ if (ASIC_IS_DCE32(rdev)) {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+ } else {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
+ HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+ }
+
+ WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
+ HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+ HDMI0_ACR_SOURCE); /* select SW CTS value */
+
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND | /* send null packets when required */
+ HDMI0_GC_SEND | /* send general control packets */
+ HDMI0_GC_CONT); /* send general control packets every frame */
- WREG32(offset+R600_HDMI_VIDEOCNTL, 0x13);
+ /* TODO: HDMI0_AUDIO_INFO_UPDATE */
+ WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
+ HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+ HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
- WREG32(offset+R600_HDMI_VERSION, 0x202);
+ WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
+ HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
+ HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+ WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ r600_hdmi_update_ACR(encoder, mode->clock);
+
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
- WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF);
- WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF);
- WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001);
- WREG32(offset+R600_HDMI_AUDIO_DEBUG_3, 0x00000001);
+ WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+ WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
+ WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
+ WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
r600_hdmi_audio_workaround(encoder);
-
- /* audio packets per line, does anyone know how to calc this ? */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
}
/*
@@ -360,145 +396,82 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
-
- int channels = r600_audio_channels(rdev);
- int rate = r600_audio_rate(rdev);
- int bps = r600_audio_bits_per_sample(rdev);
- uint8_t status_bits = r600_audio_status_bits(rdev);
- uint8_t category_code = r600_audio_category_code(rdev);
-
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct r600_audio audio = r600_audio_status(rdev);
+ uint32_t offset;
uint32_t iec;
- if (!offset)
+ if (!dig->afmt || !dig->afmt->enabled)
return;
+ offset = dig->afmt->offset;
DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
- channels, rate, bps);
+ audio.channels, audio.rate, audio.bits_per_sample);
DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
- (int)status_bits, (int)category_code);
+ (int)audio.status_bits, (int)audio.category_code);
iec = 0;
- if (status_bits & AUDIO_STATUS_PROFESSIONAL)
+ if (audio.status_bits & AUDIO_STATUS_PROFESSIONAL)
iec |= 1 << 0;
- if (status_bits & AUDIO_STATUS_NONAUDIO)
+ if (audio.status_bits & AUDIO_STATUS_NONAUDIO)
iec |= 1 << 1;
- if (status_bits & AUDIO_STATUS_COPYRIGHT)
+ if (audio.status_bits & AUDIO_STATUS_COPYRIGHT)
iec |= 1 << 2;
- if (status_bits & AUDIO_STATUS_EMPHASIS)
+ if (audio.status_bits & AUDIO_STATUS_EMPHASIS)
iec |= 1 << 3;
- iec |= category_code << 8;
-
- switch (rate) {
- case 32000: iec |= 0x3 << 24; break;
- case 44100: iec |= 0x0 << 24; break;
- case 88200: iec |= 0x8 << 24; break;
- case 176400: iec |= 0xc << 24; break;
- case 48000: iec |= 0x2 << 24; break;
- case 96000: iec |= 0xa << 24; break;
- case 192000: iec |= 0xe << 24; break;
+ iec |= HDMI0_60958_CS_CATEGORY_CODE(audio.category_code);
+
+ switch (audio.rate) {
+ case 32000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3);
+ break;
+ case 44100:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0);
+ break;
+ case 48000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2);
+ break;
+ case 88200:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8);
+ break;
+ case 96000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa);
+ break;
+ case 176400:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc);
+ break;
+ case 192000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe);
+ break;
}
- WREG32(offset+R600_HDMI_IEC60958_1, iec);
+ WREG32(HDMI0_60958_0 + offset, iec);
iec = 0;
- switch (bps) {
- case 16: iec |= 0x2; break;
- case 20: iec |= 0x3; break;
- case 24: iec |= 0xb; break;
+ switch (audio.bits_per_sample) {
+ case 16:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0x2);
+ break;
+ case 20:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0x3);
+ break;
+ case 24:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0xb);
+ break;
}
- if (status_bits & AUDIO_STATUS_V)
+ if (audio.status_bits & AUDIO_STATUS_V)
iec |= 0x5 << 16;
+ WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
- WREG32_P(offset+R600_HDMI_IEC60958_2, iec, ~0x5000f);
-
- /* 0x021 or 0x031 sets the audio frame length */
- WREG32(offset+R600_HDMI_AUDIOCNTL, 0x31);
- r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
+ r600_hdmi_audioinfoframe(encoder, audio.channels - 1, 0, 0, 0, 0, 0, 0,
+ 0);
r600_hdmi_audio_workaround(encoder);
}
-static int r600_hdmi_find_free_block(struct drm_device *dev)
-{
- struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *encoder;
- struct radeon_encoder *radeon_encoder;
- bool free_blocks[3] = { true, true, true };
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- radeon_encoder = to_radeon_encoder(encoder);
- switch (radeon_encoder->hdmi_offset) {
- case R600_HDMI_BLOCK1:
- free_blocks[0] = false;
- break;
- case R600_HDMI_BLOCK2:
- free_blocks[1] = false;
- break;
- case R600_HDMI_BLOCK3:
- free_blocks[2] = false;
- break;
- }
- }
-
- if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 ||
- rdev->family == CHIP_RS740) {
- return free_blocks[0] ? R600_HDMI_BLOCK1 : 0;
- } else if (rdev->family >= CHIP_R600) {
- if (free_blocks[0])
- return R600_HDMI_BLOCK1;
- else if (free_blocks[1])
- return R600_HDMI_BLOCK2;
- }
- return 0;
-}
-
-static void r600_hdmi_assign_block(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-
- u16 eg_offsets[] = {
- EVERGREEN_CRTC0_REGISTER_OFFSET,
- EVERGREEN_CRTC1_REGISTER_OFFSET,
- EVERGREEN_CRTC2_REGISTER_OFFSET,
- EVERGREEN_CRTC3_REGISTER_OFFSET,
- EVERGREEN_CRTC4_REGISTER_OFFSET,
- EVERGREEN_CRTC5_REGISTER_OFFSET,
- };
-
- if (!dig) {
- dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
- return;
- }
-
- if (ASIC_IS_DCE5(rdev)) {
- /* TODO */
- } else if (ASIC_IS_DCE4(rdev)) {
- if (dig->dig_encoder >= ARRAY_SIZE(eg_offsets)) {
- dev_err(rdev->dev, "Enabling HDMI on unknown dig\n");
- return;
- }
- radeon_encoder->hdmi_offset = EVERGREEN_HDMI_BASE +
- eg_offsets[dig->dig_encoder];
- radeon_encoder->hdmi_config_offset = radeon_encoder->hdmi_offset
- + EVERGREEN_HDMI_CONFIG_OFFSET;
- } else if (ASIC_IS_DCE3(rdev)) {
- radeon_encoder->hdmi_offset = dig->dig_encoder ?
- R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
- if (ASIC_IS_DCE32(rdev))
- radeon_encoder->hdmi_config_offset = dig->dig_encoder ?
- R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1;
- } else if (rdev->family >= CHIP_R600 || rdev->family == CHIP_RS600 ||
- rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
- radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev);
- }
-}
-
/*
* enable the HDMI engine
*/
@@ -507,64 +480,57 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
+ u32 hdmi;
if (ASIC_IS_DCE5(rdev))
return;
- if (!radeon_encoder->hdmi_offset) {
- r600_hdmi_assign_block(encoder);
- if (!radeon_encoder->hdmi_offset) {
- dev_warn(rdev->dev, "Could not find HDMI block for "
- "0x%x encoder\n", radeon_encoder->encoder_id);
- return;
- }
- }
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
- offset = radeon_encoder->hdmi_offset;
- if (ASIC_IS_DCE5(rdev)) {
- /* TODO */
- } else if (ASIC_IS_DCE4(rdev)) {
- WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0x1, ~0x1);
- } else if (ASIC_IS_DCE32(rdev)) {
- WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
- } else if (ASIC_IS_DCE3(rdev)) {
- /* TODO */
- } else if (rdev->family >= CHIP_R600) {
+ /* Older chipsets require setting HDMI and routing manually */
+ if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
~AVIVO_TMDSA_CNTL_HDMI_EN);
- WREG32(offset + R600_HDMI_ENABLE, 0x101);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
~AVIVO_LVTMA_CNTL_HDMI_EN);
- WREG32(offset + R600_HDMI_ENABLE, 0x105);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ WREG32_P(DDIA_CNTL, DDIA_HDMI_EN, ~DDIA_HDMI_EN);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
break;
default:
- dev_err(rdev->dev, "Unknown HDMI output type\n");
+ dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
+ radeon_encoder->encoder_id);
break;
}
+ WREG32(HDMI0_CONTROL + offset, hdmi);
}
- if (rdev->irq.installed
- && rdev->family != CHIP_RS600
- && rdev->family != CHIP_RS690
- && rdev->family != CHIP_RS740
- && !ASIC_IS_DCE4(rdev)) {
+ if (rdev->irq.installed) {
/* if irq is available use it */
- rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
+ rdev->irq.afmt[dig->afmt->id] = true;
radeon_irq_set(rdev);
-
- r600_audio_disable_polling(encoder);
- } else {
- /* if not fallback to polling */
- r600_audio_enable_polling(encoder);
}
+ dig->afmt->enabled = true;
+
DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+ offset, radeon_encoder->encoder_id);
}
/*
@@ -575,51 +541,51 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
if (ASIC_IS_DCE5(rdev))
return;
- offset = radeon_encoder->hdmi_offset;
- if (!offset) {
- dev_err(rdev->dev, "Disabling not enabled HDMI\n");
+ /* Called for ATOM_ENCODER_MODE_HDMI only */
+ if (!dig || !dig->afmt) {
+ WARN_ON(1);
return;
}
+ if (!dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- offset, radeon_encoder->encoder_id);
+ offset, radeon_encoder->encoder_id);
/* disable irq */
- rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false;
+ rdev->irq.afmt[dig->afmt->id] = false;
radeon_irq_set(rdev);
- /* disable polling */
- r600_audio_disable_polling(encoder);
-
- if (ASIC_IS_DCE5(rdev)) {
- /* TODO */
- } else if (ASIC_IS_DCE4(rdev)) {
- WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0, ~0x1);
- } else if (ASIC_IS_DCE32(rdev)) {
- WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
- } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ /* Older chipsets not handled by AtomBIOS */
+ if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0,
~AVIVO_TMDSA_CNTL_HDMI_EN);
- WREG32(offset + R600_HDMI_ENABLE, 0);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(AVIVO_LVTMA_CNTL, 0,
~AVIVO_LVTMA_CNTL_HDMI_EN);
- WREG32(offset + R600_HDMI_ENABLE, 0);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ WREG32_P(DDIA_CNTL, 0, ~DDIA_HDMI_EN);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
break;
default:
- dev_err(rdev->dev, "Unknown HDMI output type\n");
+ dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
+ radeon_encoder->encoder_id);
break;
}
+ WREG32(HDMI0_CONTROL + offset, HDMI0_ERROR_ACK);
}
- radeon_encoder->hdmi_offset = 0;
- radeon_encoder->hdmi_config_offset = 0;
+ dig->afmt->enabled = false;
}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index f869897c7456..2b960cb5c18a 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -156,45 +156,10 @@
#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4
#define R600_AUDIO_STATUS_BITS 0x73d8
-/* HDMI base register addresses */
-#define R600_HDMI_BLOCK1 0x7400
-#define R600_HDMI_BLOCK2 0x7700
-#define R600_HDMI_BLOCK3 0x7800
-
-/* HDMI registers */
-#define R600_HDMI_ENABLE 0x00
-#define R600_HDMI_STATUS 0x04
-# define R600_HDMI_INT_PENDING (1 << 29)
-#define R600_HDMI_CNTL 0x08
-# define R600_HDMI_INT_EN (1 << 28)
-# define R600_HDMI_INT_ACK (1 << 29)
-#define R600_HDMI_UNKNOWN_0 0x0C
-#define R600_HDMI_AUDIOCNTL 0x10
-#define R600_HDMI_VIDEOCNTL 0x14
-#define R600_HDMI_VERSION 0x18
-#define R600_HDMI_UNKNOWN_1 0x28
-#define R600_HDMI_VIDEOINFOFRAME_0 0x54
-#define R600_HDMI_VIDEOINFOFRAME_1 0x58
-#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
-#define R600_HDMI_VIDEOINFOFRAME_3 0x60
-#define R600_HDMI_32kHz_CTS 0xac
-#define R600_HDMI_32kHz_N 0xb0
-#define R600_HDMI_44_1kHz_CTS 0xb4
-#define R600_HDMI_44_1kHz_N 0xb8
-#define R600_HDMI_48kHz_CTS 0xbc
-#define R600_HDMI_48kHz_N 0xc0
-#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
-#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
-#define R600_HDMI_IEC60958_1 0xd4
-#define R600_HDMI_IEC60958_2 0xd8
-#define R600_HDMI_UNKNOWN_2 0xdc
-#define R600_HDMI_AUDIO_DEBUG_0 0xe0
-#define R600_HDMI_AUDIO_DEBUG_1 0xe4
-#define R600_HDMI_AUDIO_DEBUG_2 0xe8
-#define R600_HDMI_AUDIO_DEBUG_3 0xec
-
-/* HDMI additional config base register addresses */
-#define R600_HDMI_CONFIG1 0x7600
-#define R600_HDMI_CONFIG2 0x7a00
+#define DCE2_HDMI_OFFSET0 (0x7400 - 0x7400)
+#define DCE2_HDMI_OFFSET1 (0x7700 - 0x7400)
+/* DCE3.2 second instance starts at 0x7800 */
+#define DCE3_HDMI_OFFSET0 (0x7400 - 0x7400)
+#define DCE3_HDMI_OFFSET1 (0x7800 - 0x7400)
#endif
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 59f9c993cc31..15bd3b216243 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -824,6 +824,239 @@
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
+/* Audio clocks */
+#define DCCG_AUDIO_DTO0_PHASE 0x0514
+#define DCCG_AUDIO_DTO0_MODULE 0x0518
+#define DCCG_AUDIO_DTO0_LOAD 0x051c
+# define DTO_LOAD (1 << 31)
+#define DCCG_AUDIO_DTO0_CNTL 0x0520
+
+#define DCCG_AUDIO_DTO1_PHASE 0x0524
+#define DCCG_AUDIO_DTO1_MODULE 0x0528
+#define DCCG_AUDIO_DTO1_LOAD 0x052c
+#define DCCG_AUDIO_DTO1_CNTL 0x0530
+
+#define DCCG_AUDIO_DTO_SELECT 0x0534
+
+/* digital blocks */
+#define TMDSA_CNTL 0x7880
+# define TMDSA_HDMI_EN (1 << 2)
+#define LVTMA_CNTL 0x7a80
+# define LVTMA_HDMI_EN (1 << 2)
+#define DDIA_CNTL 0x7200
+# define DDIA_HDMI_EN (1 << 2)
+#define DIG0_CNTL 0x75a0
+# define DIG_MODE(x) (((x) & 7) << 8)
+# define DIG_MODE_DP 0
+# define DIG_MODE_LVDS 1
+# define DIG_MODE_TMDS_DVI 2
+# define DIG_MODE_TMDS_HDMI 3
+# define DIG_MODE_SDVO 4
+#define DIG1_CNTL 0x79a0
+
+/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
+ * instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly
+ * different due to the new DIG blocks, but also have 2 instances.
+ * DCE 3.0 HDMI blocks are part of each DIG encoder.
+ */
+
+/* rs6xx/rs740/r6xx/dce3 */
+#define HDMI0_CONTROL 0x7400
+/* rs6xx/rs740/r6xx */
+# define HDMI0_ENABLE (1 << 0)
+# define HDMI0_STREAM(x) (((x) & 3) << 2)
+# define HDMI0_STREAM_TMDSA 0
+# define HDMI0_STREAM_LVTMA 1
+# define HDMI0_STREAM_DVOA 2
+# define HDMI0_STREAM_DDIA 3
+/* rs6xx/r6xx/dce3 */
+# define HDMI0_ERROR_ACK (1 << 8)
+# define HDMI0_ERROR_MASK (1 << 9)
+#define HDMI0_STATUS 0x7404
+# define HDMI0_ACTIVE_AVMUTE (1 << 0)
+# define HDMI0_AUDIO_ENABLE (1 << 4)
+# define HDMI0_AZ_FORMAT_WTRIG (1 << 28)
+# define HDMI0_AZ_FORMAT_WTRIG_INT (1 << 29)
+#define HDMI0_AUDIO_PACKET_CONTROL 0x7408
+# define HDMI0_AUDIO_SAMPLE_SEND (1 << 0)
+# define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
+# define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8)
+# define HDMI0_AUDIO_TEST_EN (1 << 12)
+# define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
+# define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24)
+# define HDMI0_60958_CS_UPDATE (1 << 26)
+# define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28)
+# define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29)
+#define HDMI0_AUDIO_CRC_CONTROL 0x740c
+# define HDMI0_AUDIO_CRC_EN (1 << 0)
+#define HDMI0_VBI_PACKET_CONTROL 0x7410
+# define HDMI0_NULL_SEND (1 << 0)
+# define HDMI0_GC_SEND (1 << 4)
+# define HDMI0_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI0_INFOFRAME_CONTROL0 0x7414
+# define HDMI0_AVI_INFO_SEND (1 << 0)
+# define HDMI0_AVI_INFO_CONT (1 << 1)
+# define HDMI0_AUDIO_INFO_SEND (1 << 4)
+# define HDMI0_AUDIO_INFO_CONT (1 << 5)
+# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+# define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
+# define HDMI0_MPEG_INFO_SEND (1 << 8)
+# define HDMI0_MPEG_INFO_CONT (1 << 9)
+# define HDMI0_MPEG_INFO_UPDATE (1 << 10)
+#define HDMI0_INFOFRAME_CONTROL1 0x7418
+# define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
+# define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
+# define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
+#define HDMI0_GENERIC_PACKET_CONTROL 0x741c
+# define HDMI0_GENERIC0_SEND (1 << 0)
+# define HDMI0_GENERIC0_CONT (1 << 1)
+# define HDMI0_GENERIC0_UPDATE (1 << 2)
+# define HDMI0_GENERIC1_SEND (1 << 4)
+# define HDMI0_GENERIC1_CONT (1 << 5)
+# define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
+# define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
+#define HDMI0_GC 0x7428
+# define HDMI0_GC_AVMUTE (1 << 0)
+#define HDMI0_AVI_INFO0 0x7454
+# define HDMI0_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define HDMI0_AVI_INFO_S(x) (((x) & 3) << 8)
+# define HDMI0_AVI_INFO_B(x) (((x) & 3) << 10)
+# define HDMI0_AVI_INFO_A(x) (((x) & 1) << 12)
+# define HDMI0_AVI_INFO_Y(x) (((x) & 3) << 13)
+# define HDMI0_AVI_INFO_Y_RGB 0
+# define HDMI0_AVI_INFO_Y_YCBCR422 1
+# define HDMI0_AVI_INFO_Y_YCBCR444 2
+# define HDMI0_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
+# define HDMI0_AVI_INFO_R(x) (((x) & 0xf) << 16)
+# define HDMI0_AVI_INFO_M(x) (((x) & 0x3) << 20)
+# define HDMI0_AVI_INFO_C(x) (((x) & 0x3) << 22)
+# define HDMI0_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
+# define HDMI0_AVI_INFO_SC(x) (((x) & 0x3) << 24)
+# define HDMI0_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
+#define HDMI0_AVI_INFO1 0x7458
+# define HDMI0_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+# define HDMI0_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+# define HDMI0_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
+#define HDMI0_AVI_INFO2 0x745c
+# define HDMI0_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
+# define HDMI0_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
+#define HDMI0_AVI_INFO3 0x7460
+# define HDMI0_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
+# define HDMI0_AVI_INFO_VERSION(x) (((x) & 3) << 24)
+#define HDMI0_MPEG_INFO0 0x7464
+# define HDMI0_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define HDMI0_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
+# define HDMI0_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
+# define HDMI0_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
+#define HDMI0_MPEG_INFO1 0x7468
+# define HDMI0_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
+# define HDMI0_MPEG_INFO_MF(x) (((x) & 3) << 8)
+# define HDMI0_MPEG_INFO_FR(x) (((x) & 1) << 12)
+#define HDMI0_GENERIC0_HDR 0x746c
+#define HDMI0_GENERIC0_0 0x7470
+#define HDMI0_GENERIC0_1 0x7474
+#define HDMI0_GENERIC0_2 0x7478
+#define HDMI0_GENERIC0_3 0x747c
+#define HDMI0_GENERIC0_4 0x7480
+#define HDMI0_GENERIC0_5 0x7484
+#define HDMI0_GENERIC0_6 0x7488
+#define HDMI0_GENERIC1_HDR 0x748c
+#define HDMI0_GENERIC1_0 0x7490
+#define HDMI0_GENERIC1_1 0x7494
+#define HDMI0_GENERIC1_2 0x7498
+#define HDMI0_GENERIC1_3 0x749c
+#define HDMI0_GENERIC1_4 0x74a0
+#define HDMI0_GENERIC1_5 0x74a4
+#define HDMI0_GENERIC1_6 0x74a8
+#define HDMI0_ACR_32_0 0x74ac
+# define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_32_1 0x74b0
+# define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_44_0 0x74b4
+# define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_44_1 0x74b8
+# define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_48_0 0x74bc
+# define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_48_1 0x74c0
+# define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_STATUS_0 0x74c4
+#define HDMI0_ACR_STATUS_1 0x74c8
+#define HDMI0_AUDIO_INFO0 0x74cc
+# define HDMI0_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define HDMI0_AUDIO_INFO_CC(x) (((x) & 7) << 8)
+#define HDMI0_AUDIO_INFO1 0x74d0
+# define HDMI0_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
+# define HDMI0_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
+# define HDMI0_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
+# define HDMI0_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+#define HDMI0_60958_0 0x74d4
+# define HDMI0_60958_CS_A(x) (((x) & 1) << 0)
+# define HDMI0_60958_CS_B(x) (((x) & 1) << 1)
+# define HDMI0_60958_CS_C(x) (((x) & 1) << 2)
+# define HDMI0_60958_CS_D(x) (((x) & 3) << 3)
+# define HDMI0_60958_CS_MODE(x) (((x) & 3) << 6)
+# define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
+# define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
+# define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+# define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
+#define HDMI0_60958_1 0x74d8
+# define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
+# define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
+# define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16)
+# define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
+#define HDMI0_ACR_PACKET_CONTROL 0x74dc
+# define HDMI0_ACR_SEND (1 << 0)
+# define HDMI0_ACR_CONT (1 << 1)
+# define HDMI0_ACR_SELECT(x) (((x) & 3) << 4)
+# define HDMI0_ACR_HW 0
+# define HDMI0_ACR_32 1
+# define HDMI0_ACR_44 2
+# define HDMI0_ACR_48 3
+# define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
+# define HDMI0_ACR_AUTO_SEND (1 << 12)
+#define HDMI0_RAMP_CONTROL0 0x74e0
+# define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL1 0x74e4
+# define HDMI0_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL2 0x74e8
+# define HDMI0_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL3 0x74ec
+# define HDMI0_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
+/* HDMI0_60958_2 is r7xx only */
+#define HDMI0_60958_2 0x74f0
+# define HDMI0_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
+/* r6xx only; second instance starts at 0x7700 */
+#define HDMI1_CONTROL 0x7700
+#define HDMI1_STATUS 0x7704
+#define HDMI1_AUDIO_PACKET_CONTROL 0x7708
+/* DCE3; second instance starts at 0x7800 NOT 0x7700 */
+#define DCE3_HDMI1_CONTROL 0x7800
+#define DCE3_HDMI1_STATUS 0x7804
+#define DCE3_HDMI1_AUDIO_PACKET_CONTROL 0x7808
+/* DCE3.2 (for interrupts) */
+#define AFMT_STATUS 0x7600
+# define AFMT_AUDIO_ENABLE (1 << 4)
+# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL 0x7604
+# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
+# define AFMT_AUDIO_TEST_EN (1 << 12)
+# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
+# define AFMT_60958_CS_UPDATE (1 << 26)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
+
/*
* PM4
*/
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 138b95216d8d..2e24022b389a 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -94,33 +94,38 @@ extern int radeon_disp_priority;
extern int radeon_hw_i2c;
extern int radeon_pcie_gen2;
extern int radeon_msi;
+extern int radeon_lockup_timeout;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
* symbol;
*/
-#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
-#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
+#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
-#define RADEON_IB_POOL_SIZE 16
-#define RADEON_DEBUGFS_MAX_COMPONENTS 32
-#define RADEONFB_CONN_LIMIT 4
-#define RADEON_BIOS_NUM_SCRATCH 8
+#define RADEON_IB_POOL_SIZE 16
+#define RADEON_DEBUGFS_MAX_COMPONENTS 32
+#define RADEONFB_CONN_LIMIT 4
+#define RADEON_BIOS_NUM_SCRATCH 8
/* max number of rings */
-#define RADEON_NUM_RINGS 3
+#define RADEON_NUM_RINGS 3
+
+/* fence seq are set to this number when signaled */
+#define RADEON_FENCE_SIGNALED_SEQ 0LL
+#define RADEON_FENCE_NOTEMITED_SEQ (~0LL)
/* internal ring indices */
/* r1xx+ has gfx CP ring */
-#define RADEON_RING_TYPE_GFX_INDEX 0
+#define RADEON_RING_TYPE_GFX_INDEX 0
/* cayman has 2 compute CP rings */
-#define CAYMAN_RING_TYPE_CP1_INDEX 1
-#define CAYMAN_RING_TYPE_CP2_INDEX 2
+#define CAYMAN_RING_TYPE_CP1_INDEX 1
+#define CAYMAN_RING_TYPE_CP2_INDEX 2
/* hardcode those limit for now */
-#define RADEON_VA_RESERVED_SIZE (8 << 20)
-#define RADEON_IB_VM_MAX_SIZE (64 << 10)
+#define RADEON_VA_RESERVED_SIZE (8 << 20)
+#define RADEON_IB_VM_MAX_SIZE (64 << 10)
/*
* Errata workarounds.
@@ -253,28 +258,20 @@ struct radeon_fence_driver {
uint32_t scratch_reg;
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
- atomic_t seq;
- uint32_t last_seq;
- unsigned long last_jiffies;
- unsigned long last_timeout;
- wait_queue_head_t queue;
- struct list_head created;
- struct list_head emitted;
- struct list_head signaled;
+ /* seq is protected by ring emission lock */
+ uint64_t seq;
+ atomic64_t last_seq;
+ unsigned long last_activity;
bool initialized;
};
struct radeon_fence {
struct radeon_device *rdev;
struct kref kref;
- struct list_head list;
/* protected by radeon_fence.lock */
- uint32_t seq;
- bool emitted;
- bool signaled;
+ uint64_t seq;
/* RB, DMA, etc. */
- int ring;
- struct radeon_semaphore *semaphore;
+ unsigned ring;
};
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
@@ -285,11 +282,14 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
-int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_last(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_any(struct radeon_device *rdev,
+ struct radeon_fence **fences,
+ bool intr);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
-int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
+unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
/*
* Tiling registers
@@ -346,6 +346,9 @@ struct radeon_bo {
/* Constant after initialization */
struct radeon_device *rdev;
struct drm_gem_object gem_base;
+
+ struct ttm_bo_kmap_obj dma_buf_vmap;
+ int vmapping_count;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -382,8 +385,11 @@ struct radeon_bo_list {
* alignment).
*/
struct radeon_sa_manager {
+ spinlock_t lock;
struct radeon_bo *bo;
- struct list_head sa_bo;
+ struct list_head *hole;
+ struct list_head flist[RADEON_NUM_RINGS];
+ struct list_head olist;
unsigned size;
uint64_t gpu_addr;
void *cpu_ptr;
@@ -394,10 +400,12 @@ struct radeon_sa_bo;
/* sub-allocation buffer */
struct radeon_sa_bo {
- struct list_head list;
+ struct list_head olist;
+ struct list_head flist;
struct radeon_sa_manager *manager;
- unsigned offset;
- unsigned size;
+ unsigned soffset;
+ unsigned eoffset;
+ struct radeon_fence *fence;
};
/*
@@ -428,42 +436,26 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
/*
* Semaphores.
*/
-struct radeon_ring;
-
-#define RADEON_SEMAPHORE_BO_SIZE 256
-
-struct radeon_semaphore_driver {
- rwlock_t lock;
- struct list_head bo;
-};
-
-struct radeon_semaphore_bo;
-
/* everything here is constant */
struct radeon_semaphore {
- struct list_head list;
+ struct radeon_sa_bo *sa_bo;
+ signed waiters;
uint64_t gpu_addr;
- uint32_t *cpu_ptr;
- struct radeon_semaphore_bo *bo;
};
-struct radeon_semaphore_bo {
- struct list_head list;
- struct radeon_ib *ib;
- struct list_head free;
- struct radeon_semaphore semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
- unsigned nused;
-};
-
-void radeon_semaphore_driver_fini(struct radeon_device *rdev);
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore);
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
+int radeon_semaphore_sync_rings(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ bool sync_to[RADEON_NUM_RINGS],
+ int dst_ring);
void radeon_semaphore_free(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore);
+ struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence);
/*
* GART structures, functions & helpers
@@ -560,6 +552,7 @@ struct radeon_unpin_work {
struct r500_irq_stat_regs {
u32 disp_int;
+ u32 hdmi0_status;
};
struct r600_irq_stat_regs {
@@ -568,6 +561,8 @@ struct r600_irq_stat_regs {
u32 disp_int_cont2;
u32 d1grph_int;
u32 d2grph_int;
+ u32 hdmi0_status;
+ u32 hdmi1_status;
};
struct evergreen_irq_stat_regs {
@@ -583,6 +578,12 @@ struct evergreen_irq_stat_regs {
u32 d4grph_int;
u32 d5grph_int;
u32 d6grph_int;
+ u32 afmt_status1;
+ u32 afmt_status2;
+ u32 afmt_status3;
+ u32 afmt_status4;
+ u32 afmt_status5;
+ u32 afmt_status6;
};
union radeon_irq_stat_regs {
@@ -593,7 +594,7 @@ union radeon_irq_stat_regs {
#define RADEON_MAX_HPD_PINS 6
#define RADEON_MAX_CRTCS 6
-#define RADEON_MAX_HDMI_BLOCKS 2
+#define RADEON_MAX_AFMT_BLOCKS 6
struct radeon_irq {
bool installed;
@@ -605,7 +606,7 @@ struct radeon_irq {
bool gui_idle;
bool gui_idle_acked;
wait_queue_head_t idle_queue;
- bool hdmi[RADEON_MAX_HDMI_BLOCKS];
+ bool afmt[RADEON_MAX_AFMT_BLOCKS];
spinlock_t sw_lock;
int sw_refcount[RADEON_NUM_RINGS];
union radeon_irq_stat_regs stat_regs;
@@ -625,26 +626,14 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
*/
struct radeon_ib {
- struct radeon_sa_bo sa_bo;
- unsigned idx;
- uint32_t length_dw;
- uint64_t gpu_addr;
- uint32_t *ptr;
- struct radeon_fence *fence;
- unsigned vm_id;
- bool is_const_ib;
-};
-
-/*
- * locking -
- * mutex protects scheduled_ibs, ready, alloc_bm
- */
-struct radeon_ib_pool {
- struct radeon_mutex mutex;
- struct radeon_sa_manager sa_manager;
- struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
- bool ready;
- unsigned head_id;
+ struct radeon_sa_bo *sa_bo;
+ uint32_t length_dw;
+ uint64_t gpu_addr;
+ uint32_t *ptr;
+ struct radeon_fence *fence;
+ unsigned vm_id;
+ bool is_const_ib;
+ struct radeon_semaphore *semaphore;
};
struct radeon_ring {
@@ -659,10 +648,11 @@ struct radeon_ring {
unsigned ring_size;
unsigned ring_free_dw;
int count_dw;
+ unsigned long last_activity;
+ unsigned last_rptr;
uint64_t gpu_addr;
uint32_t align_mask;
uint32_t ptr_mask;
- struct mutex mutex;
bool ready;
u32 ptr_reg_shift;
u32 ptr_reg_mask;
@@ -679,7 +669,7 @@ struct radeon_vm {
unsigned last_pfn;
u64 pt_gpu_addr;
u64 *pt;
- struct radeon_sa_bo sa_bo;
+ struct radeon_sa_bo *sa_bo;
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
@@ -756,7 +746,6 @@ struct r600_blit_cp_primitives {
};
struct r600_blit {
- struct mutex mutex;
struct radeon_bo *shader_obj;
struct r600_blit_cp_primitives primitives;
int max_dim;
@@ -766,8 +755,6 @@ struct r600_blit {
u32 vs_offset, ps_offset;
u32 state_offset;
u32 state_len;
- u32 vb_used, vb_total;
- struct radeon_ib *vb_ib;
};
void r600_blit_suspend(struct radeon_device *rdev);
@@ -785,14 +772,14 @@ struct si_rlc {
};
int radeon_ib_get(struct radeon_device *rdev, int ring,
- struct radeon_ib **ib, unsigned size);
-void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
-bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
+ struct radeon_ib *ib, unsigned size);
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_pool_start(struct radeon_device *rdev);
int radeon_ib_pool_suspend(struct radeon_device *rdev);
+int radeon_ib_ring_tests(struct radeon_device *rdev);
/* Ring access between begin & end cannot sleep */
int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -800,8 +787,12 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsign
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_undo(struct radeon_ring *ring);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
+void radeon_ring_lockup_update(struct radeon_ring *ring);
+bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
@@ -850,8 +841,8 @@ struct radeon_cs_parser {
int chunk_relocs_idx;
int chunk_flags_idx;
int chunk_const_ib_idx;
- struct radeon_ib *ib;
- struct radeon_ib *const_ib;
+ struct radeon_ib ib;
+ struct radeon_ib const_ib;
void *track;
unsigned family;
int parser_error;
@@ -860,7 +851,6 @@ struct radeon_cs_parser {
s32 priority;
};
-extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
@@ -1105,6 +1095,14 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
int instance);
+struct r600_audio {
+ int channels;
+ int rate;
+ int bits_per_sample;
+ u8 status_bits;
+ u8 category_code;
+};
+
/*
* Benchmarking
*/
@@ -1144,7 +1142,6 @@ struct radeon_asic {
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
- bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*asic_reset)(struct radeon_device *rdev);
/* ioctl hw specific callback. Some hw might want to perform special
* operation on specific ioctl. For instance on wait idle some hw
@@ -1173,6 +1170,7 @@ struct radeon_asic {
void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+ bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
} ring[RADEON_NUM_RINGS];
/* irqs */
struct {
@@ -1251,16 +1249,10 @@ struct radeon_asic {
/*
* Asic structures
*/
-struct r100_gpu_lockup {
- unsigned long last_jiffies;
- u32 last_cp_rptr;
-};
-
struct r100_asic {
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
u32 hdp_cntl;
- struct r100_gpu_lockup lockup;
};
struct r300_asic {
@@ -1268,7 +1260,6 @@ struct r300_asic {
unsigned reg_safe_bm_size;
u32 resync_scratch;
u32 hdp_cntl;
- struct r100_gpu_lockup lockup;
};
struct r600_asic {
@@ -1290,7 +1281,6 @@ struct r600_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
- struct r100_gpu_lockup lockup;
};
struct rv770_asic {
@@ -1316,7 +1306,6 @@ struct rv770_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
- struct r100_gpu_lockup lockup;
};
struct evergreen_asic {
@@ -1343,7 +1332,6 @@ struct evergreen_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
- struct r100_gpu_lockup lockup;
};
struct cayman_asic {
@@ -1382,7 +1370,6 @@ struct cayman_asic {
unsigned multi_gpu_tile_size;
unsigned tile_config;
- struct r100_gpu_lockup lockup;
};
struct si_asic {
@@ -1413,7 +1400,6 @@ struct si_asic {
unsigned multi_gpu_tile_size;
unsigned tile_config;
- struct r100_gpu_lockup lockup;
};
union radeon_asic_config {
@@ -1516,11 +1502,12 @@ struct radeon_device {
struct radeon_mode_info mode_info;
struct radeon_scratch scratch;
struct radeon_mman mman;
- rwlock_t fence_lock;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
- struct radeon_semaphore_driver semaphore_drv;
+ wait_queue_head_t fence_queue;
+ struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
- struct radeon_ib_pool ib_pool;
+ bool ib_pool_ready;
+ struct radeon_sa_manager ring_tmp_bo;
struct radeon_irq irq;
struct radeon_asic *asic;
struct radeon_gem gem;
@@ -1529,7 +1516,6 @@ struct radeon_device {
struct radeon_mutex cs_mutex;
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
- bool gpu_lockup;
bool shutdown;
bool suspend;
bool need_dma32;
@@ -1546,19 +1532,12 @@ struct radeon_device {
struct r600_ih ih; /* r6/700 interrupt ring */
struct si_rlc rlc;
struct work_struct hotplug_work;
+ struct work_struct audio_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
struct mutex vram_mutex;
-
- /* audio stuff */
- bool audio_enabled;
- struct timer_list audio_timer;
- int audio_channels;
- int audio_rate;
- int audio_bits_per_sample;
- uint8_t audio_status_bits;
- uint8_t audio_category_code;
-
+ bool audio_enabled;
+ struct r600_audio audio_status; /* audio stuff */
struct notifier_block acpi_nb;
/* only one userspace can use Hyperz features or CMASK at a time */
struct drm_file *hyperz_filp;
@@ -1730,7 +1709,6 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
@@ -1739,6 +1717,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
+#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
@@ -1828,6 +1807,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo);
+/* audio */
+void r600_audio_update_hdmi(struct work_struct *work);
/*
* R600 vram scratch functions
@@ -1848,10 +1829,32 @@ int r600_fmt_get_nblocksy(u32 format, u32 h);
/*
* r600 functions used by radeon_encoder.c
*/
+struct radeon_hdmi_acr {
+ u32 clock;
+
+ int n_32khz;
+ int cts_32khz;
+
+ int n_44_1khz;
+ int cts_44_1khz;
+
+ int n_48khz;
+ int cts_48khz;
+
+};
+
+extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
+
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+/*
+ * evergreen functions used by radeon_encoder.c
+ */
+
+extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+
extern int ni_init_microcode(struct radeon_device *rdev);
extern int ni_mc_load_microcode(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index be4dc2ff0e40..f533df5f7d50 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -134,7 +134,6 @@ static struct radeon_asic r100_asic = {
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -152,6 +151,7 @@ static struct radeon_asic r100_asic = {
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -208,7 +208,6 @@ static struct radeon_asic r200_asic = {
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -226,6 +225,7 @@ static struct radeon_asic r200_asic = {
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -282,7 +282,6 @@ static struct radeon_asic r300_asic = {
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -300,6 +299,7 @@ static struct radeon_asic r300_asic = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -356,7 +356,6 @@ static struct radeon_asic r300_asic_pcie = {
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -374,6 +373,7 @@ static struct radeon_asic r300_asic_pcie = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -430,7 +430,6 @@ static struct radeon_asic r420_asic = {
.suspend = &r420_suspend,
.resume = &r420_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -448,6 +447,7 @@ static struct radeon_asic r420_asic = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -504,7 +504,6 @@ static struct radeon_asic rs400_asic = {
.suspend = &rs400_suspend,
.resume = &rs400_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -522,6 +521,7 @@ static struct radeon_asic rs400_asic = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -578,7 +578,6 @@ static struct radeon_asic rs600_asic = {
.suspend = &rs600_suspend,
.resume = &rs600_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -596,6 +595,7 @@ static struct radeon_asic rs600_asic = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -652,7 +652,6 @@ static struct radeon_asic rs690_asic = {
.suspend = &rs690_suspend,
.resume = &rs690_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -670,6 +669,7 @@ static struct radeon_asic rs690_asic = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -726,7 +726,6 @@ static struct radeon_asic rv515_asic = {
.suspend = &rv515_suspend,
.resume = &rv515_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -744,6 +743,7 @@ static struct radeon_asic rv515_asic = {
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -800,7 +800,6 @@ static struct radeon_asic r520_asic = {
.suspend = &rv515_suspend,
.resume = &r520_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -818,6 +817,7 @@ static struct radeon_asic r520_asic = {
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -874,7 +874,6 @@ static struct radeon_asic r600_asic = {
.suspend = &r600_suspend,
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
- .gpu_is_lockup = &r600_gpu_is_lockup,
.asic_reset = &r600_asic_reset,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
@@ -891,6 +890,7 @@ static struct radeon_asic r600_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &r600_gpu_is_lockup,
}
},
.irq = {
@@ -946,7 +946,6 @@ static struct radeon_asic rs780_asic = {
.fini = &r600_fini,
.suspend = &r600_suspend,
.resume = &r600_resume,
- .gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -964,6 +963,7 @@ static struct radeon_asic rs780_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &r600_gpu_is_lockup,
}
},
.irq = {
@@ -1020,7 +1020,6 @@ static struct radeon_asic rv770_asic = {
.suspend = &rv770_suspend,
.resume = &rv770_resume,
.asic_reset = &r600_asic_reset,
- .gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
@@ -1037,6 +1036,7 @@ static struct radeon_asic rv770_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &r600_gpu_is_lockup,
}
},
.irq = {
@@ -1092,7 +1092,6 @@ static struct radeon_asic evergreen_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1110,6 +1109,7 @@ static struct radeon_asic evergreen_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
}
},
.irq = {
@@ -1165,7 +1165,6 @@ static struct radeon_asic sumo_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1183,6 +1182,7 @@ static struct radeon_asic sumo_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
},
},
.irq = {
@@ -1238,7 +1238,6 @@ static struct radeon_asic btc_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1256,6 +1255,7 @@ static struct radeon_asic btc_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
}
},
.irq = {
@@ -1321,7 +1321,6 @@ static struct radeon_asic cayman_asic = {
.fini = &cayman_fini,
.suspend = &cayman_suspend,
.resume = &cayman_resume,
- .gpu_is_lockup = &cayman_gpu_is_lockup,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1340,6 +1339,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1349,6 +1349,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1358,6 +1359,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
}
},
.irq = {
@@ -1413,7 +1415,6 @@ static struct radeon_asic trinity_asic = {
.fini = &cayman_fini,
.suspend = &cayman_suspend,
.resume = &cayman_resume,
- .gpu_is_lockup = &cayman_gpu_is_lockup,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1432,6 +1433,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1441,6 +1443,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1450,6 +1453,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
}
},
.irq = {
@@ -1515,7 +1519,6 @@ static struct radeon_asic si_asic = {
.fini = &si_fini,
.suspend = &si_suspend,
.resume = &si_resume,
- .gpu_is_lockup = &si_gpu_is_lockup,
.asic_reset = &si_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1534,6 +1537,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &si_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &si_ring_ib_execute,
@@ -1543,6 +1547,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &si_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &si_ring_ib_execute,
@@ -1552,6 +1557,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &si_gpu_is_lockup,
}
},
.irq = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3d9f9f1d8f90..e76a941ef14e 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -103,11 +103,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev);
void r100_pci_gart_disable(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
-void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
- struct radeon_ring *cp);
-bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
- struct r100_gpu_lockup *lockup,
- struct radeon_ring *cp);
void r100_ib_fini(struct radeon_device *rdev);
int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r100_irq_disable(struct radeon_device *rdev);
@@ -159,7 +154,6 @@ extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
-extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
extern int r300_asic_reset(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -362,26 +356,20 @@ void r600_disable_interrupts(struct radeon_device *rdev);
void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
int r600_audio_init(struct radeon_device *rdev);
-int r600_audio_tmds_index(struct drm_encoder *encoder);
void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
-int r600_audio_channels(struct radeon_device *rdev);
-int r600_audio_bits_per_sample(struct radeon_device *rdev);
-int r600_audio_rate(struct radeon_device *rdev);
-uint8_t r600_audio_status_bits(struct radeon_device *rdev);
-uint8_t r600_audio_category_code(struct radeon_device *rdev);
-void r600_audio_schedule_polling(struct radeon_device *rdev);
-void r600_audio_enable_polling(struct drm_encoder *encoder);
-void r600_audio_disable_polling(struct drm_encoder *encoder);
+struct r600_audio r600_audio_status(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
-void r600_hdmi_init(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
/* r600 blit */
-int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages);
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
+ struct radeon_sa_bo **vb);
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
+ struct radeon_sa_bo *vb);
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
- unsigned num_gpu_pages);
+ unsigned num_gpu_pages,
+ struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
/*
@@ -446,7 +434,6 @@ int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
int cayman_suspend(struct radeon_device *rdev);
int cayman_resume(struct radeon_device *rdev);
-bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int cayman_asic_reset(struct radeon_device *rdev);
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int cayman_vm_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f6e69b8c06c6..b1e3820df363 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -444,7 +444,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*/
if ((dev->pdev->device == 0x9498) &&
(dev->pdev->subsystem_vendor == 0x1682) &&
- (dev->pdev->subsystem_device == 0x2452)) {
+ (dev->pdev->subsystem_device == 0x2452) &&
+ (i2c_bus->valid == false) &&
+ !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
struct radeon_device *rdev = dev->dev_private;
*i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index fef7b722b05d..364f5b1a04b9 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -103,7 +103,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
int time;
n = RADEON_BENCHMARK_ITERATIONS;
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
if (r) {
goto out_cleanup;
}
@@ -115,7 +115,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (r) {
goto out_cleanup;
}
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 2cad9fde92fc..576f4f6919f2 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1561,6 +1561,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
(rdev->pdev->subsystem_device == 0x4150)) {
/* Mac G5 tower 9600 */
rdev->mode_info.connector_table = CT_MAC_G5_9600;
+ } else if ((rdev->pdev->device == 0x4c66) &&
+ (rdev->pdev->subsystem_vendor == 0x1002) &&
+ (rdev->pdev->subsystem_device == 0x4c66)) {
+ /* SAM440ep RV250 embedded board */
+ rdev->mode_info.connector_table = CT_SAM440EP;
} else
#endif /* CONFIG_PPC_PMAC */
#ifdef CONFIG_PPC64
@@ -2134,6 +2139,67 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
+ case CT_SAM440EP:
+ DRM_INFO("Connector Table: %d (SAM440ep embedded board)\n",
+ rdev->mode_info.connector_table);
+ /* LVDS */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ 0),
+ ATOM_DEVICE_LCD1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
+ /* DVI-I - secondary dac, int tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 3, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 3c2e7a000a2a..2914c5761cfc 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -84,6 +84,62 @@ static void radeon_property_change_mode(struct drm_encoder *encoder)
crtc->x, crtc->y, crtc->fb);
}
}
+
+int radeon_get_monitor_bpc(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector;
+ int bpc = 8;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ if (radeon_connector->use_digital) {
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ }
+ }
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ }
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ dig_connector = radeon_connector->con_priv;
+ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
+ drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ }
+ break;
+ case DRM_MODE_CONNECTOR_eDP:
+ case DRM_MODE_CONNECTOR_LVDS:
+ if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ else if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ struct drm_encoder *encoder = connector_funcs->best_encoder(connector);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR)
+ bpc = 6;
+ else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR)
+ bpc = 8;
+ }
+ break;
+ }
+ return bpc;
+}
+
static void
radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
{
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5cac83278338..0137689ed461 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -118,44 +118,33 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
bool sync_to_ring[RADEON_NUM_RINGS] = { };
+ bool need_sync = false;
int i, r;
for (i = 0; i < p->nrelocs; i++) {
+ struct radeon_fence *fence;
+
if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
continue;
- if (!(p->relocs[i].flags & RADEON_RELOC_DONT_SYNC)) {
- struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
- if (!radeon_fence_signaled(fence)) {
- sync_to_ring[fence->ring] = true;
- }
+ fence = p->relocs[i].robj->tbo.sync_obj;
+ if (fence->ring != p->ring && !radeon_fence_signaled(fence)) {
+ sync_to_ring[fence->ring] = true;
+ need_sync = true;
}
}
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- /* no need to sync to our own or unused rings */
- if (i == p->ring || !sync_to_ring[i] || !p->rdev->ring[i].ready)
- continue;
-
- if (!p->ib->fence->semaphore) {
- r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
- if (r)
- return r;
- }
-
- r = radeon_ring_lock(p->rdev, &p->rdev->ring[i], 3);
- if (r)
- return r;
- radeon_semaphore_emit_signal(p->rdev, i, p->ib->fence->semaphore);
- radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[i]);
+ if (!need_sync) {
+ return 0;
+ }
- r = radeon_ring_lock(p->rdev, &p->rdev->ring[p->ring], 3);
- if (r)
- return r;
- radeon_semaphore_emit_wait(p->rdev, p->ring, p->ib->fence->semaphore);
- radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[p->ring]);
+ r = radeon_semaphore_create(p->rdev, &p->ib.semaphore);
+ if (r) {
+ return r;
}
- return 0;
+
+ return radeon_semaphore_sync_rings(p->rdev, p->ib.semaphore,
+ sync_to_ring, p->ring);
}
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -172,6 +161,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
/* get chunks */
INIT_LIST_HEAD(&p->validated);
p->idx = 0;
+ p->ib.sa_bo = NULL;
+ p->ib.semaphore = NULL;
+ p->const_ib.sa_bo = NULL;
+ p->const_ib.semaphore = NULL;
p->chunk_ib_idx = -1;
p->chunk_relocs_idx = -1;
p->chunk_flags_idx = -1;
@@ -278,11 +271,16 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[p->chunk_ib_idx].length_dw);
return -EINVAL;
}
- p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
- p->chunks[p->chunk_ib_idx].kpage[1] == NULL)
- return -ENOMEM;
+ if ((p->rdev->flags & RADEON_IS_AGP)) {
+ p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
+ p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
+ kfree(p->chunks[i].kpage[0]);
+ kfree(p->chunks[i].kpage[1]);
+ return -ENOMEM;
+ }
+ }
p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
p->chunks[p->chunk_ib_idx].last_copied_page = -1;
@@ -305,10 +303,9 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
-
- if (!error && parser->ib)
+ if (!error)
ttm_eu_fence_buffer_objects(&parser->validated,
- parser->ib->fence);
+ parser->ib.fence);
else
ttm_eu_backoff_reservation(&parser->validated);
@@ -323,12 +320,15 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
kfree(parser->relocs_ptr);
for (i = 0; i < parser->nchunks; i++) {
kfree(parser->chunks[i].kdata);
- kfree(parser->chunks[i].kpage[0]);
- kfree(parser->chunks[i].kpage[1]);
+ if ((parser->rdev->flags & RADEON_IS_AGP)) {
+ kfree(parser->chunks[i].kpage[0]);
+ kfree(parser->chunks[i].kpage[1]);
+ }
}
kfree(parser->chunks);
kfree(parser->chunks_array);
radeon_ib_free(parser->rdev, &parser->ib);
+ radeon_ib_free(parser->rdev, &parser->const_ib);
}
static int radeon_cs_ib_chunk(struct radeon_device *rdev,
@@ -354,7 +354,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
DRM_ERROR("Failed to get ib !\n");
return r;
}
- parser->ib->length_dw = ib_chunk->length_dw;
+ parser->ib.length_dw = ib_chunk->length_dw;
r = radeon_cs_parse(rdev, parser->ring, parser);
if (r || parser->parser_error) {
DRM_ERROR("Invalid command stream !\n");
@@ -369,8 +369,8 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
if (r) {
DRM_ERROR("Failed to synchronize rings !\n");
}
- parser->ib->vm_id = 0;
- r = radeon_ib_schedule(rdev, parser->ib);
+ parser->ib.vm_id = 0;
+ r = radeon_ib_schedule(rdev, &parser->ib);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
}
@@ -421,14 +421,14 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
DRM_ERROR("Failed to get const ib !\n");
return r;
}
- parser->const_ib->is_const_ib = true;
- parser->const_ib->length_dw = ib_chunk->length_dw;
+ parser->const_ib.is_const_ib = true;
+ parser->const_ib.length_dw = ib_chunk->length_dw;
/* Copy the packet into the IB */
- if (DRM_COPY_FROM_USER(parser->const_ib->ptr, ib_chunk->user_ptr,
+ if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
ib_chunk->length_dw * 4)) {
return -EFAULT;
}
- r = radeon_ring_ib_parse(rdev, parser->ring, parser->const_ib);
+ r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
if (r) {
return r;
}
@@ -445,13 +445,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
DRM_ERROR("Failed to get ib !\n");
return r;
}
- parser->ib->length_dw = ib_chunk->length_dw;
+ parser->ib.length_dw = ib_chunk->length_dw;
/* Copy the packet into the IB */
- if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr,
+ if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
ib_chunk->length_dw * 4)) {
return -EFAULT;
}
- r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib);
+ r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
if (r) {
return r;
}
@@ -472,34 +472,44 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
- parser->const_ib->vm_id = vm->id;
+ parser->const_ib.vm_id = vm->id;
/* ib pool is bind at 0 in virtual address space to gpu_addr is the
* offset inside the pool bo
*/
- parser->const_ib->gpu_addr = parser->const_ib->sa_bo.offset;
- r = radeon_ib_schedule(rdev, parser->const_ib);
+ parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset;
+ r = radeon_ib_schedule(rdev, &parser->const_ib);
if (r)
goto out;
}
- parser->ib->vm_id = vm->id;
+ parser->ib.vm_id = vm->id;
/* ib pool is bind at 0 in virtual address space to gpu_addr is the
* offset inside the pool bo
*/
- parser->ib->gpu_addr = parser->ib->sa_bo.offset;
- parser->ib->is_const_ib = false;
- r = radeon_ib_schedule(rdev, parser->ib);
+ parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
+ parser->ib.is_const_ib = false;
+ r = radeon_ib_schedule(rdev, &parser->ib);
out:
if (!r) {
if (vm->fence) {
radeon_fence_unref(&vm->fence);
}
- vm->fence = radeon_fence_ref(parser->ib->fence);
+ vm->fence = radeon_fence_ref(parser->ib.fence);
}
mutex_unlock(&fpriv->vm.mutex);
return r;
}
+static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
+{
+ if (r == -EDEADLK) {
+ r = radeon_gpu_reset(rdev);
+ if (!r)
+ r = -EAGAIN;
+ }
+ return r;
+}
+
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
@@ -521,6 +531,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
radeon_cs_parser_fini(&parser, r);
+ r = radeon_cs_handle_lockup(rdev, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
@@ -529,6 +540,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r);
+ r = radeon_cs_handle_lockup(rdev, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
@@ -542,6 +554,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
out:
radeon_cs_parser_fini(&parser, r);
+ r = radeon_cs_handle_lockup(rdev, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
@@ -559,7 +572,7 @@ int radeon_cs_finish_pages(struct radeon_cs_parser *p)
size = PAGE_SIZE;
}
- if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
+ if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
ibc->user_ptr + (i * PAGE_SIZE),
size))
return -EFAULT;
@@ -567,15 +580,16 @@ int radeon_cs_finish_pages(struct radeon_cs_parser *p)
return 0;
}
-int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
+static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
{
int new_page;
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
int i;
int size = PAGE_SIZE;
+ bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
- if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
+ if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
ibc->user_ptr + (i * PAGE_SIZE),
PAGE_SIZE)) {
p->parser_error = -EFAULT;
@@ -583,14 +597,16 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
}
}
- new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
-
if (pg_idx == ibc->last_page_index) {
size = (ibc->length_dw * 4) % PAGE_SIZE;
- if (size == 0)
- size = PAGE_SIZE;
+ if (size == 0)
+ size = PAGE_SIZE;
}
+ new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
+ if (copy1)
+ ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
+
if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
ibc->user_ptr + (pg_idx * PAGE_SIZE),
size)) {
@@ -598,11 +614,37 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
return 0;
}
- /* copy to IB here */
- memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
+ /* copy to IB for non single case */
+ if (!copy1)
+ memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
ibc->last_copied_page = pg_idx;
ibc->kpage_idx[new_page] = pg_idx;
return new_page;
}
+
+u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+ struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ u32 pg_idx, pg_offset;
+ u32 idx_value = 0;
+ int new_page;
+
+ pg_idx = (idx * 4) / PAGE_SIZE;
+ pg_offset = (idx * 4) % PAGE_SIZE;
+
+ if (ibc->kpage_idx[0] == pg_idx)
+ return ibc->kpage[0][pg_offset/4];
+ if (ibc->kpage_idx[1] == pg_idx)
+ return ibc->kpage[1][pg_offset/4];
+
+ new_page = radeon_cs_update_pages(p, pg_idx);
+ if (new_page < 0) {
+ p->parser_error = new_page;
+ return 0;
+ }
+
+ idx_value = ibc->kpage[new_page][pg_offset/4];
+ return idx_value;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 5992502a3448..066c98b888a5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -193,7 +193,7 @@ int radeon_wb_init(struct radeon_device *rdev)
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
+ RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
@@ -225,9 +225,9 @@ int radeon_wb_init(struct radeon_device *rdev)
/* disable event_write fences */
rdev->wb.use_event = false;
/* disabled via module param */
- if (radeon_no_wb == 1)
+ if (radeon_no_wb == 1) {
rdev->wb.enabled = false;
- else {
+ } else {
if (rdev->flags & RADEON_IS_AGP) {
/* often unreliable on AGP */
rdev->wb.enabled = false;
@@ -237,8 +237,9 @@ int radeon_wb_init(struct radeon_device *rdev)
} else {
rdev->wb.enabled = true;
/* event_write fences are only available on r600+ */
- if (rdev->family >= CHIP_R600)
+ if (rdev->family >= CHIP_R600) {
rdev->wb.use_event = true;
+ }
}
}
/* always use writeback/events on NI, APUs */
@@ -696,6 +697,11 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
+static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
+ .set_gpu_state = radeon_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = radeon_switcheroo_can_switch,
+};
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
@@ -714,7 +720,6 @@ int radeon_device_init(struct radeon_device *rdev,
rdev->is_atom_bios = false;
rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
- rdev->gpu_lockup = false;
rdev->accel_working = false;
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
@@ -724,21 +729,18 @@ int radeon_device_init(struct radeon_device *rdev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
radeon_mutex_init(&rdev->cs_mutex);
- radeon_mutex_init(&rdev->ib_pool.mutex);
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- mutex_init(&rdev->ring[i].mutex);
+ mutex_init(&rdev->ring_lock);
mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600)
spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->vram_mutex);
- rwlock_init(&rdev->fence_lock);
- rwlock_init(&rdev->semaphore_drv.lock);
- INIT_LIST_HEAD(&rdev->gem.objects);
init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue);
- INIT_LIST_HEAD(&rdev->semaphore_drv.bo);
+ r = radeon_gem_init(rdev);
+ if (r)
+ return r;
/* initialize vm here */
rdev->vm_manager.use_bitmap = 1;
rdev->vm_manager.max_pfn = 1 << 20;
@@ -814,10 +816,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- vga_switcheroo_register_client(rdev->pdev,
- radeon_switcheroo_set_state,
- NULL,
- radeon_switcheroo_can_switch);
+ vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
r = radeon_init(rdev);
if (r)
@@ -914,9 +913,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
}
/* evict vram memory */
radeon_bo_evict_vram(rdev);
+
+ mutex_lock(&rdev->ring_lock);
/* wait for gpu to finish processing current batch */
for (i = 0; i < RADEON_NUM_RINGS; i++)
- radeon_fence_wait_last(rdev, i);
+ radeon_fence_wait_empty_locked(rdev, i);
+ mutex_unlock(&rdev->ring_lock);
radeon_save_bios_scratch_regs(rdev);
@@ -955,7 +957,6 @@ int radeon_resume_kms(struct drm_device *dev)
console_unlock();
return -1;
}
- pci_set_master(dev->pdev);
/* resume AGP if in use */
radeon_agp_resume(rdev);
radeon_resume(rdev);
@@ -988,9 +989,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
int r;
int resched;
- /* Prevent CS ioctl from interfering */
- radeon_mutex_lock(&rdev->cs_mutex);
-
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -1005,8 +1003,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
}
- radeon_mutex_unlock(&rdev->cs_mutex);
-
if (r) {
/* bad news, how to tell it to userspace ? */
dev_info(rdev->dev, "GPU reset failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0a1d4bd65edc..64a008d14493 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -573,24 +573,6 @@ static const char *encoder_names[37] = {
"INTERNAL_VCE"
};
-static const char *connector_names[15] = {
- "Unknown",
- "VGA",
- "DVI-I",
- "DVI-D",
- "DVI-A",
- "Composite",
- "S-video",
- "LVDS",
- "Component",
- "DIN",
- "DisplayPort",
- "HDMI-A",
- "HDMI-B",
- "TV",
- "eDP",
-};
-
static const char *hpd_names[6] = {
"HPD1",
"HPD2",
@@ -613,7 +595,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
DRM_INFO("Connector %d:\n", i);
- DRM_INFO(" %s\n", connector_names[connector->connector_type]);
+ DRM_INFO(" %s\n", drm_get_connector_name(connector));
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
if (radeon_connector->ddc_bus) {
@@ -1243,6 +1225,93 @@ void radeon_update_display_priority(struct radeon_device *rdev)
}
+/*
+ * Allocate hdmi structs and determine register offsets
+ */
+static void radeon_afmt_init(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
+ rdev->mode_info.afmt[i] = NULL;
+
+ if (ASIC_IS_DCE6(rdev)) {
+ /* todo */
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* DCE4/5 has 6 audio blocks tied to DIG encoders */
+ /* DCE4.1 has 2 audio blocks tied to DIG encoders */
+ rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[0]) {
+ rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
+ rdev->mode_info.afmt[0]->id = 0;
+ }
+ rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[1]) {
+ rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
+ rdev->mode_info.afmt[1]->id = 1;
+ }
+ if (!ASIC_IS_DCE41(rdev)) {
+ rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[2]) {
+ rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
+ rdev->mode_info.afmt[2]->id = 2;
+ }
+ rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[3]) {
+ rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
+ rdev->mode_info.afmt[3]->id = 3;
+ }
+ rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[4]) {
+ rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
+ rdev->mode_info.afmt[4]->id = 4;
+ }
+ rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[5]) {
+ rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
+ rdev->mode_info.afmt[5]->id = 5;
+ }
+ }
+ } else if (ASIC_IS_DCE3(rdev)) {
+ /* DCE3.x has 2 audio blocks tied to DIG encoders */
+ rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[0]) {
+ rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0;
+ rdev->mode_info.afmt[0]->id = 0;
+ }
+ rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[1]) {
+ rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1;
+ rdev->mode_info.afmt[1]->id = 1;
+ }
+ } else if (ASIC_IS_DCE2(rdev)) {
+ /* DCE2 has at least 1 routable audio block */
+ rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[0]) {
+ rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0;
+ rdev->mode_info.afmt[0]->id = 0;
+ }
+ /* r6xx has 2 routable audio blocks */
+ if (rdev->family >= CHIP_R600) {
+ rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[1]) {
+ rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1;
+ rdev->mode_info.afmt[1]->id = 1;
+ }
+ }
+ }
+}
+
+static void radeon_afmt_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) {
+ kfree(rdev->mode_info.afmt[i]);
+ rdev->mode_info.afmt[i] = NULL;
+ }
+}
+
int radeon_modeset_init(struct radeon_device *rdev)
{
int i;
@@ -1251,7 +1320,7 @@ int radeon_modeset_init(struct radeon_device *rdev)
drm_mode_config_init(rdev->ddev);
rdev->mode_info.mode_config_initialized = true;
- rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
+ rdev->ddev->mode_config.funcs = &radeon_mode_funcs;
if (ASIC_IS_DCE5(rdev)) {
rdev->ddev->mode_config.max_width = 16384;
@@ -1303,6 +1372,9 @@ int radeon_modeset_init(struct radeon_device *rdev)
/* initialize hpd */
radeon_hpd_init(rdev);
+ /* setup afmt */
+ radeon_afmt_init(rdev);
+
/* Initialize power management */
radeon_pm_init(rdev);
@@ -1319,6 +1391,7 @@ void radeon_modeset_fini(struct radeon_device *rdev)
radeon_pm_fini(rdev);
if (rdev->mode_info.mode_config_initialized) {
+ radeon_afmt_fini(rdev);
drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ef7bb3f6ecae..f0bb2b543b13 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -105,6 +105,11 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
int radeon_mode_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle);
+struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags);
+struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor);
@@ -128,6 +133,7 @@ int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = 0;
int radeon_msi = -1;
+int radeon_lockup_timeout = 10000;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -177,6 +183,9 @@ module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, radeon_msi, int, 0444);
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
+module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -329,7 +338,8 @@ static const struct file_operations radeon_driver_kms_fops = {
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
- DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM,
+ DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM |
+ DRIVER_PRIME,
.dev_priv_size = 0,
.load = radeon_driver_load_kms,
.firstopen = radeon_driver_firstopen_kms,
@@ -364,6 +374,12 @@ static struct drm_driver kms_driver = {
.dumb_map_offset = radeon_mode_dumb_mmap,
.dumb_destroy = radeon_mode_dumb_destroy,
.fops = &radeon_driver_kms_fops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = radeon_gem_prime_export,
+ .gem_prime_import = radeon_gem_prime_import,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 4bd36a354fbe..11f5f402d22c 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -63,98 +63,82 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
- unsigned long irq_flags;
-
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (fence->emitted) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ /* we are protected by the ring emission mutex */
+ if (fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
return 0;
}
- fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
- if (!rdev->ring[fence->ring].ready)
- /* FIXME: cp is not running assume everythings is done right
- * away
- */
- radeon_fence_write(rdev, fence->seq, fence->ring);
- else
- radeon_fence_ring_emit(rdev, fence->ring, fence);
-
+ fence->seq = ++rdev->fence_drv[fence->ring].seq;
+ radeon_fence_ring_emit(rdev, fence->ring, fence);
trace_radeon_fence_emit(rdev->ddev, fence->seq);
- fence->emitted = true;
- list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
-static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
+void radeon_fence_process(struct radeon_device *rdev, int ring)
{
- struct radeon_fence *fence;
- struct list_head *i, *n;
- uint32_t seq;
+ uint64_t seq, last_seq;
+ unsigned count_loop = 0;
bool wake = false;
- unsigned long cjiffies;
- seq = radeon_fence_read(rdev, ring);
- if (seq != rdev->fence_drv[ring].last_seq) {
- rdev->fence_drv[ring].last_seq = seq;
- rdev->fence_drv[ring].last_jiffies = jiffies;
- rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- } else {
- cjiffies = jiffies;
- if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
- cjiffies -= rdev->fence_drv[ring].last_jiffies;
- if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
- /* update the timeout */
- rdev->fence_drv[ring].last_timeout -= cjiffies;
- } else {
- /* the 500ms timeout is elapsed we should test
- * for GPU lockup
- */
- rdev->fence_drv[ring].last_timeout = 1;
- }
- } else {
- /* wrap around update last jiffies, we will just wait
- * a little longer
- */
- rdev->fence_drv[ring].last_jiffies = cjiffies;
+ /* Note there is a scenario here for an infinite loop but it's
+ * very unlikely to happen. For it to happen, the current polling
+ * process need to be interrupted by another process and another
+ * process needs to update the last_seq btw the atomic read and
+ * xchg of the current process.
+ *
+ * More over for this to go in infinite loop there need to be
+ * continuously new fence signaled ie radeon_fence_read needs
+ * to return a different value each time for both the currently
+ * polling process and the other process that xchg the last_seq
+ * btw atomic read and xchg of the current process. And the
+ * value the other process set as last seq must be higher than
+ * the seq value we just read. Which means that current process
+ * need to be interrupted after radeon_fence_read and before
+ * atomic xchg.
+ *
+ * To be even more safe we count the number of time we loop and
+ * we bail after 10 loop just accepting the fact that we might
+ * have temporarly set the last_seq not to the true real last
+ * seq but to an older one.
+ */
+ last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
+ do {
+ seq = radeon_fence_read(rdev, ring);
+ seq |= last_seq & 0xffffffff00000000LL;
+ if (seq < last_seq) {
+ seq += 0x100000000LL;
}
- return false;
- }
- n = NULL;
- list_for_each(i, &rdev->fence_drv[ring].emitted) {
- fence = list_entry(i, struct radeon_fence, list);
- if (fence->seq == seq) {
- n = i;
+
+ if (seq == last_seq) {
break;
}
- }
- /* all fence previous to this one are considered as signaled */
- if (n) {
- i = n;
- do {
- n = i->prev;
- list_move_tail(i, &rdev->fence_drv[ring].signaled);
- fence = list_entry(i, struct radeon_fence, list);
- fence->signaled = true;
- i = n;
- } while (i != &rdev->fence_drv[ring].emitted);
+ /* If we loop over we don't want to return without
+ * checking if a fence is signaled as it means that the
+ * seq we just read is different from the previous on.
+ */
wake = true;
+ last_seq = seq;
+ if ((count_loop++) > 10) {
+ /* We looped over too many time leave with the
+ * fact that we might have set an older fence
+ * seq then the current real last seq as signaled
+ * by the hw.
+ */
+ break;
+ }
+ } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
+
+ if (wake) {
+ rdev->fence_drv[ring].last_activity = jiffies;
+ wake_up_all(&rdev->fence_queue);
}
- return wake;
}
static void radeon_fence_destroy(struct kref *kref)
{
- unsigned long irq_flags;
- struct radeon_fence *fence;
+ struct radeon_fence *fence;
fence = container_of(kref, struct radeon_fence, kref);
- write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
- list_del(&fence->list);
- fence->emitted = false;
- write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
- if (fence->semaphore)
- radeon_semaphore_free(fence->rdev, fence->semaphore);
+ fence->seq = RADEON_FENCE_NOTEMITED_SEQ;
kfree(fence);
}
@@ -162,171 +146,342 @@ int radeon_fence_create(struct radeon_device *rdev,
struct radeon_fence **fence,
int ring)
{
- unsigned long irq_flags;
-
*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
if ((*fence) == NULL) {
return -ENOMEM;
}
kref_init(&((*fence)->kref));
(*fence)->rdev = rdev;
- (*fence)->emitted = false;
- (*fence)->signaled = false;
- (*fence)->seq = 0;
+ (*fence)->seq = RADEON_FENCE_NOTEMITED_SEQ;
(*fence)->ring = ring;
- (*fence)->semaphore = NULL;
- INIT_LIST_HEAD(&(*fence)->list);
-
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
-bool radeon_fence_signaled(struct radeon_fence *fence)
+static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
+ u64 seq, unsigned ring)
{
- unsigned long irq_flags;
- bool signaled = false;
-
- if (!fence)
+ if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
return true;
-
- if (fence->rdev->gpu_lockup)
+ }
+ /* poll new last sequence at least once */
+ radeon_fence_process(rdev, ring);
+ if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
return true;
+ }
+ return false;
+}
- write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
- signaled = fence->signaled;
- /* if we are shuting down report all fence as signaled */
- if (fence->rdev->shutdown) {
- signaled = true;
+bool radeon_fence_signaled(struct radeon_fence *fence)
+{
+ if (!fence) {
+ return true;
}
- if (!fence->emitted) {
+ if (fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
WARN(1, "Querying an unemitted fence : %p !\n", fence);
- signaled = true;
+ return true;
}
- if (!signaled) {
- radeon_fence_poll_locked(fence->rdev, fence->ring);
- signaled = fence->signaled;
+ if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
+ return true;
+ }
+ if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ return true;
}
- write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
- return signaled;
+ return false;
+}
+
+static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
+ unsigned ring, bool intr, bool lock_ring)
+{
+ unsigned long timeout, last_activity;
+ uint64_t seq;
+ unsigned i;
+ bool signaled;
+ int r;
+
+ while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+ if (!rdev->ring[ring].ready) {
+ return -EBUSY;
+ }
+
+ timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
+ if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
+ /* the normal case, timeout is somewhere before last_activity */
+ timeout = rdev->fence_drv[ring].last_activity - timeout;
+ } else {
+ /* either jiffies wrapped around, or no fence was signaled in the last 500ms
+ * anyway we will just wait for the minimum amount and then check for a lockup
+ */
+ timeout = 1;
+ }
+ seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
+ /* Save current last activity valuee, used to check for GPU lockups */
+ last_activity = rdev->fence_drv[ring].last_activity;
+
+ trace_radeon_fence_wait_begin(rdev->ddev, seq);
+ radeon_irq_kms_sw_irq_get(rdev, ring);
+ if (intr) {
+ r = wait_event_interruptible_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
+ timeout);
+ } else {
+ r = wait_event_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
+ timeout);
+ }
+ radeon_irq_kms_sw_irq_put(rdev, ring);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+ trace_radeon_fence_wait_end(rdev->ddev, seq);
+
+ if (unlikely(!signaled)) {
+ /* we were interrupted for some reason and fence
+ * isn't signaled yet, resume waiting */
+ if (r) {
+ continue;
+ }
+
+ /* check if sequence value has changed since last_activity */
+ if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+ continue;
+ }
+
+ if (lock_ring) {
+ mutex_lock(&rdev->ring_lock);
+ }
+
+ /* test if somebody else has already decided that this is a lockup */
+ if (last_activity != rdev->fence_drv[ring].last_activity) {
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
+ continue;
+ }
+
+ if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+ /* good news we believe it's a lockup */
+ dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
+ target_seq, seq);
+
+ /* change last activity so nobody else think there is a lockup */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ rdev->fence_drv[i].last_activity = jiffies;
+ }
+
+ /* mark the ring as not ready any more */
+ rdev->ring[ring].ready = false;
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
+ return -EDEADLK;
+ }
+
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
+ }
+ }
+ return 0;
}
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
- struct radeon_device *rdev;
- unsigned long irq_flags, timeout;
- u32 seq;
int r;
if (fence == NULL) {
WARN(1, "Querying an invalid fence : %p !\n", fence);
- return 0;
+ return -EINVAL;
}
- rdev = fence->rdev;
- if (radeon_fence_signaled(fence)) {
- return 0;
+
+ r = radeon_fence_wait_seq(fence->rdev, fence->seq,
+ fence->ring, intr, true);
+ if (r) {
+ return r;
}
- timeout = rdev->fence_drv[fence->ring].last_timeout;
-retry:
- /* save current sequence used to check for GPU lockup */
- seq = rdev->fence_drv[fence->ring].last_seq;
- trace_radeon_fence_wait_begin(rdev->ddev, seq);
- if (intr) {
- radeon_irq_kms_sw_irq_get(rdev, fence->ring);
- r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
- radeon_fence_signaled(fence), timeout);
- radeon_irq_kms_sw_irq_put(rdev, fence->ring);
- if (unlikely(r < 0)) {
- return r;
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ return 0;
+}
+
+bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
+{
+ unsigned i;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
+ return true;
}
- } else {
- radeon_irq_kms_sw_irq_get(rdev, fence->ring);
- r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
- radeon_fence_signaled(fence), timeout);
- radeon_irq_kms_sw_irq_put(rdev, fence->ring);
}
- trace_radeon_fence_wait_end(rdev->ddev, seq);
- if (unlikely(!radeon_fence_signaled(fence))) {
- /* we were interrupted for some reason and fence isn't
- * isn't signaled yet, resume wait
- */
- if (r) {
- timeout = r;
- goto retry;
+ return false;
+}
+
+static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
+ u64 *target_seq, bool intr)
+{
+ unsigned long timeout, last_activity, tmp;
+ unsigned i, ring = RADEON_NUM_RINGS;
+ bool signaled;
+ int r;
+
+ for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!target_seq[i]) {
+ continue;
+ }
+
+ /* use the most recent one as indicator */
+ if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
+ last_activity = rdev->fence_drv[i].last_activity;
}
- /* don't protect read access to rdev->fence_drv[t].last_seq
- * if we experiencing a lockup the value doesn't change
+
+ /* For lockup detection just pick the lowest ring we are
+ * actively waiting for
*/
- if (seq == rdev->fence_drv[fence->ring].last_seq &&
- radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) {
- /* good news we believe it's a lockup */
- printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
- fence->seq, seq);
- /* FIXME: what should we do ? marking everyone
- * as signaled for now
+ if (i < ring) {
+ ring = i;
+ }
+ }
+
+ /* nothing to wait for ? */
+ if (ring == RADEON_NUM_RINGS) {
+ return 0;
+ }
+
+ while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
+ timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
+ if (time_after(last_activity, timeout)) {
+ /* the normal case, timeout is somewhere before last_activity */
+ timeout = last_activity - timeout;
+ } else {
+ /* either jiffies wrapped around, or no fence was signaled in the last 500ms
+ * anyway we will just wait for the minimum amount and then check for a lockup
*/
- rdev->gpu_lockup = true;
- r = radeon_gpu_reset(rdev);
- if (r)
- return r;
- radeon_fence_write(rdev, fence->seq, fence->ring);
- rdev->gpu_lockup = false;
+ timeout = 1;
+ }
+
+ trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (target_seq[i]) {
+ radeon_irq_kms_sw_irq_get(rdev, i);
+ }
+ }
+ if (intr) {
+ r = wait_event_interruptible_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
+ timeout);
+ } else {
+ r = wait_event_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
+ timeout);
+ }
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (target_seq[i]) {
+ radeon_irq_kms_sw_irq_put(rdev, i);
+ }
+ }
+ if (unlikely(r < 0)) {
+ return r;
+ }
+ trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
+
+ if (unlikely(!signaled)) {
+ /* we were interrupted for some reason and fence
+ * isn't signaled yet, resume waiting */
+ if (r) {
+ continue;
+ }
+
+ mutex_lock(&rdev->ring_lock);
+ for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
+ tmp = rdev->fence_drv[i].last_activity;
+ }
+ }
+ /* test if somebody else has already decided that this is a lockup */
+ if (last_activity != tmp) {
+ last_activity = tmp;
+ mutex_unlock(&rdev->ring_lock);
+ continue;
+ }
+
+ if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+ /* good news we believe it's a lockup */
+ dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
+ target_seq[ring]);
+
+ /* change last activity so nobody else think there is a lockup */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ rdev->fence_drv[i].last_activity = jiffies;
+ }
+
+ /* mark the ring as not ready any more */
+ rdev->ring[ring].ready = false;
+ mutex_unlock(&rdev->ring_lock);
+ return -EDEADLK;
+ }
+ mutex_unlock(&rdev->ring_lock);
}
- timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- rdev->fence_drv[fence->ring].last_jiffies = jiffies;
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- goto retry;
}
return 0;
}
-int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_any(struct radeon_device *rdev,
+ struct radeon_fence **fences,
+ bool intr)
{
- unsigned long irq_flags;
- struct radeon_fence *fence;
+ uint64_t seq[RADEON_NUM_RINGS];
+ unsigned i;
int r;
- if (rdev->gpu_lockup) {
- return 0;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ seq[i] = 0;
+
+ if (!fences[i]) {
+ continue;
+ }
+
+ if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
+ /* something was allready signaled */
+ return 0;
+ }
+
+ if (fences[i]->seq < RADEON_FENCE_NOTEMITED_SEQ) {
+ seq[i] = fences[i]->seq;
+ }
}
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (list_empty(&rdev->fence_drv[ring].emitted)) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return 0;
+
+ r = radeon_fence_wait_any_seq(rdev, seq, intr);
+ if (r) {
+ return r;
}
- fence = list_entry(rdev->fence_drv[ring].emitted.next,
- struct radeon_fence, list);
- radeon_fence_ref(fence);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- r = radeon_fence_wait(fence, false);
- radeon_fence_unref(&fence);
- return r;
+ return 0;
}
-int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
{
- unsigned long irq_flags;
- struct radeon_fence *fence;
- int r;
-
- if (rdev->gpu_lockup) {
- return 0;
+ uint64_t seq;
+
+ /* We are not protected by ring lock when reading current seq but
+ * it's ok as worst case is we return to early while we could have
+ * wait.
+ */
+ seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
+ if (seq >= rdev->fence_drv[ring].seq) {
+ /* nothing to wait for, last_seq is
+ already the last emited fence */
+ return -ENOENT;
}
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (list_empty(&rdev->fence_drv[ring].emitted)) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return 0;
- }
- fence = list_entry(rdev->fence_drv[ring].emitted.prev,
- struct radeon_fence, list);
- radeon_fence_ref(fence);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- r = radeon_fence_wait(fence, false);
- radeon_fence_unref(&fence);
- return r;
+ return radeon_fence_wait_seq(rdev, seq, ring, false, false);
+}
+
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+{
+ /* We are not protected by ring lock when reading current seq
+ * but it's ok as wait empty is call from place where no more
+ * activity can be scheduled so there won't be concurrent access
+ * to seq value.
+ */
+ return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].seq,
+ ring, false, false);
}
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
@@ -345,49 +500,27 @@ void radeon_fence_unref(struct radeon_fence **fence)
}
}
-void radeon_fence_process(struct radeon_device *rdev, int ring)
-{
- unsigned long irq_flags;
- bool wake;
-
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- wake = radeon_fence_poll_locked(rdev, ring);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- if (wake) {
- wake_up_all(&rdev->fence_drv[ring].queue);
- }
-}
-
-int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
+unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
{
- unsigned long irq_flags;
- int not_processed = 0;
-
- read_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (!rdev->fence_drv[ring].initialized) {
- read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return 0;
+ uint64_t emitted;
+
+ /* We are not protected by ring lock when reading the last sequence
+ * but it's ok to report slightly wrong fence count here.
+ */
+ radeon_fence_process(rdev, ring);
+ emitted = rdev->fence_drv[ring].seq - atomic64_read(&rdev->fence_drv[ring].last_seq);
+ /* to avoid 32bits warp around */
+ if (emitted > 0x10000000) {
+ emitted = 0x10000000;
}
-
- if (!list_empty(&rdev->fence_drv[ring].emitted)) {
- struct list_head *ptr;
- list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
- /* count up to 3, that's enought info */
- if (++not_processed >= 3)
- break;
- }
- }
- read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return not_processed;
+ return (unsigned)emitted;
}
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
{
- unsigned long irq_flags;
uint64_t index;
int r;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
if (rdev->wb.use_event) {
rdev->fence_drv[ring].scratch_reg = 0;
@@ -396,7 +529,6 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
if (r) {
dev_err(rdev->dev, "fence failed to get scratch register\n");
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return r;
}
index = RADEON_WB_SCRATCH_OFFSET +
@@ -405,11 +537,10 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
}
rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
- radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
+ radeon_fence_write(rdev, rdev->fence_drv[ring].seq, ring);
rdev->fence_drv[ring].initialized = true;
- DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
+ dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
@@ -418,24 +549,20 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
rdev->fence_drv[ring].scratch_reg = -1;
rdev->fence_drv[ring].cpu_addr = NULL;
rdev->fence_drv[ring].gpu_addr = 0;
- atomic_set(&rdev->fence_drv[ring].seq, 0);
- INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
- INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
- INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
- init_waitqueue_head(&rdev->fence_drv[ring].queue);
+ rdev->fence_drv[ring].seq = 0;
+ atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
+ rdev->fence_drv[ring].last_activity = jiffies;
rdev->fence_drv[ring].initialized = false;
}
int radeon_fence_driver_init(struct radeon_device *rdev)
{
- unsigned long irq_flags;
int ring;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ init_waitqueue_head(&rdev->fence_queue);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
radeon_fence_driver_init_ring(rdev, ring);
}
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
if (radeon_debugfs_fence_init(rdev)) {
dev_err(rdev->dev, "fence debugfs file creation failed\n");
}
@@ -444,19 +571,18 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
- unsigned long irq_flags;
int ring;
+ mutex_lock(&rdev->ring_lock);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
- radeon_fence_wait_last(rdev, ring);
- wake_up_all(&rdev->fence_drv[ring].queue);
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ radeon_fence_wait_empty_locked(rdev, ring);
+ wake_up_all(&rdev->fence_queue);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
rdev->fence_drv[ring].initialized = false;
}
+ mutex_unlock(&rdev->ring_lock);
}
@@ -469,7 +595,6 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_fence *fence;
int i;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -477,14 +602,10 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
continue;
seq_printf(m, "--- ring %d ---\n", i);
- seq_printf(m, "Last signaled fence 0x%08X\n",
- radeon_fence_read(rdev, i));
- if (!list_empty(&rdev->fence_drv[i].emitted)) {
- fence = list_entry(rdev->fence_drv[i].emitted.prev,
- struct radeon_fence, list);
- seq_printf(m, "Last emitted fence %p with 0x%08X\n",
- fence, fence->seq);
- }
+ seq_printf(m, "Last signaled fence 0x%016llx\n",
+ (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
+ seq_printf(m, "Last emitted 0x%016llx\n",
+ rdev->fence_drv[i].seq);
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 456a77cf4b7f..79db56e6c2ac 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -80,7 +80,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
if (rdev->gart.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->gart.robj);
+ NULL, &rdev->gart.robj);
if (r) {
return r;
}
@@ -326,7 +326,7 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
list_del_init(&vm->list);
vm->id = -1;
- radeon_sa_bo_free(rdev, &vm->sa_bo);
+ radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
vm->pt = NULL;
list_for_each_entry(bo_va, &vm->va, vm_list) {
@@ -395,7 +395,7 @@ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
- RADEON_GPU_PAGE_SIZE);
+ RADEON_GPU_PAGE_SIZE, false);
if (r) {
if (list_empty(&rdev->vm_manager.lru_vm)) {
return r;
@@ -404,10 +404,8 @@ retry:
radeon_vm_unbind(rdev, vm_evict);
goto retry;
}
- vm->pt = rdev->vm_manager.sa_manager.cpu_ptr;
- vm->pt += (vm->sa_bo.offset >> 3);
- vm->pt_gpu_addr = rdev->vm_manager.sa_manager.gpu_addr;
- vm->pt_gpu_addr += vm->sa_bo.offset;
+ vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo);
+ vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
retry_id:
@@ -428,14 +426,14 @@ retry_id:
/* do hw bind */
r = rdev->vm_manager.funcs->bind(rdev, vm, id);
if (r) {
- radeon_sa_bo_free(rdev, &vm->sa_bo);
+ radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
return r;
}
rdev->vm_manager.use_bitmap |= 1 << id;
vm->id = id;
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
- return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo,
- &rdev->ib_pool.sa_manager.bo->tbo.mem);
+ return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
+ &rdev->ring_tmp_bo.bo->tbo.mem);
}
/* object have to be reserved */
@@ -633,7 +631,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
/* map the ib pool buffer at 0 in virtual address space, set
* read only
*/
- r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0,
+ r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0,
RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
return r;
}
@@ -650,12 +648,12 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_mutex_unlock(&rdev->cs_mutex);
/* remove all bo */
- r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false);
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
- bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm);
+ bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
list_del_init(&bo_va->bo_list);
list_del_init(&bo_va->vm_list);
- radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo);
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
kfree(bo_va);
}
if (!list_empty(&vm->va)) {
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 0519b05968b5..f28bd4b7ef98 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -42,6 +42,8 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
struct radeon_bo *robj = gem_to_radeon_bo(gobj);
if (robj) {
+ if (robj->gem_base.import_attach)
+ drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
radeon_bo_unref(&robj);
}
}
@@ -59,7 +61,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
- r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
+ r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
@@ -154,6 +156,17 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
radeon_bo_unreserve(rbo);
}
+static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
+{
+ if (r == -EDEADLK) {
+ radeon_mutex_lock(&rdev->cs_mutex);
+ r = radeon_gpu_reset(rdev);
+ if (!r)
+ r = -EAGAIN;
+ radeon_mutex_unlock(&rdev->cs_mutex);
+ }
+ return r;
+}
/*
* GEM ioctls.
@@ -210,12 +223,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
args->initial_domain, false,
false, &gobj);
if (r) {
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
args->handle = handle;
@@ -245,6 +260,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
drm_gem_object_unreference_unlocked(gobj);
+ r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
}
@@ -301,6 +317,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
break;
}
drm_gem_object_unreference_unlocked(gobj);
+ r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
}
@@ -322,6 +339,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
if (robj->rdev->asic->ioctl_wait_idle)
robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
drm_gem_object_unreference_unlocked(gobj);
+ r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 65060b77c805..5df58d1aba06 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -73,6 +73,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false;
rdev->irq.pflip[i] = false;
+ rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
/* Clear bits */
@@ -108,6 +109,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false;
rdev->irq.pflip[i] = false;
+ rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
}
@@ -170,6 +172,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
int r = 0;
INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+ INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
spin_lock_init(&rdev->irq.sw_lock);
for (i = 0; i < rdev->num_crtc; i++)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 3c2628b14d56..f1016a5820d1 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -57,8 +57,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
dev->dev_private = (void *)rdev;
- pci_set_master(dev->pdev);
-
/* update BUS flag */
if (drm_pci_device_is_agp(dev)) {
flags |= RADEON_IS_AGP;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 42db254f6bb0..a0c82229e8f0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -369,6 +369,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
goto error;
}
+ memset(&props, 0, sizeof(props));
props.max_brightness = MAX_RADEON_LEVEL;
props.type = BACKLIGHT_RAW;
bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index f7eb5d8b9fd3..5b10ffd7bb2f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -210,6 +210,7 @@ enum radeon_connector_table {
CT_RN50_POWER,
CT_MAC_X800,
CT_MAC_G5_9600,
+ CT_SAM440EP
};
enum radeon_dvo_chip {
@@ -219,12 +220,20 @@ enum radeon_dvo_chip {
struct radeon_fbdev;
+struct radeon_afmt {
+ bool enabled;
+ int offset;
+ bool last_buffer_filled_status;
+ int id;
+};
+
struct radeon_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
enum radeon_connector_table connector_table;
bool mode_config_initialized;
struct radeon_crtc *crtcs[6];
+ struct radeon_afmt *afmt[6];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
@@ -363,6 +372,7 @@ struct radeon_encoder_atom_dig {
int dpms_mode;
uint8_t backlight_level;
int panel_mode;
+ struct radeon_afmt *afmt;
};
struct radeon_encoder_atom_dac {
@@ -384,10 +394,6 @@ struct radeon_encoder {
struct drm_display_mode native_mode;
void *enc_priv;
int audio_polling_active;
- int hdmi_offset;
- int hdmi_config_offset;
- int hdmi_audio_workaround;
- int hdmi_buffer_status;
bool is_ext_encoder;
u16 caps;
};
@@ -476,6 +482,7 @@ extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
+extern int radeon_get_monitor_bpc(struct drm_connector *connector);
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index df6a4dbd93f8..830f1a7b486f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -104,7 +104,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel, u32 domain,
- struct radeon_bo **bo_ptr)
+ struct sg_table *sg, struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
enum ttm_bo_type type;
@@ -120,6 +120,8 @@ int radeon_bo_create(struct radeon_device *rdev,
}
if (kernel) {
type = ttm_bo_type_kernel;
+ } else if (sg) {
+ type = ttm_bo_type_sg;
} else {
type = ttm_bo_type_device;
}
@@ -155,7 +157,7 @@ retry:
mutex_lock(&rdev->vram_mutex);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, 0, !kernel, NULL,
- acc_size, &radeon_ttm_bo_destroy);
+ acc_size, sg, &radeon_ttm_bo_destroy);
mutex_unlock(&rdev->vram_mutex);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index f9104be88d7c..17fb99f177cf 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -111,9 +111,10 @@ extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
bool no_wait);
extern int radeon_bo_create(struct radeon_device *rdev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain,
- struct radeon_bo **bo_ptr);
+ unsigned long size, int byte_align,
+ bool kernel, u32 domain,
+ struct sg_table *sg,
+ struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
@@ -146,6 +147,17 @@ extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
/*
* sub allocation
*/
+
+static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
+{
+ return sa_bo->manager->gpu_addr + sa_bo->soffset;
+}
+
+static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
+{
+ return sa_bo->manager->cpu_ptr + sa_bo->soffset;
+}
+
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
unsigned size, u32 domain);
@@ -157,9 +169,15 @@ extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- struct radeon_sa_bo *sa_bo,
- unsigned size, unsigned align);
+ struct radeon_sa_bo **sa_bo,
+ unsigned size, unsigned align, bool block);
extern void radeon_sa_bo_free(struct radeon_device *rdev,
- struct radeon_sa_bo *sa_bo);
+ struct radeon_sa_bo **sa_bo,
+ struct radeon_fence *fence);
+#if defined(CONFIG_DEBUG_FS)
+extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
+ struct seq_file *m);
+#endif
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index caa55d68f319..08825548ee69 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -252,10 +252,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (rdev->ring[i].ring_obj)
- mutex_lock(&rdev->ring[i].mutex);
- }
+ mutex_lock(&rdev->ring_lock);
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
@@ -273,13 +270,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
} else {
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
if (ring->ready) {
- struct radeon_fence *fence;
- radeon_ring_alloc(rdev, ring, 64);
- radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring));
- radeon_fence_emit(rdev, fence);
- radeon_ring_commit(rdev, ring);
- radeon_fence_wait(fence, false);
- radeon_fence_unref(&fence);
+ radeon_fence_wait_empty_locked(rdev, RADEON_RING_TYPE_GFX_INDEX);
}
}
radeon_unmap_vram_bos(rdev);
@@ -311,10 +302,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (rdev->ring[i].ring_obj)
- mutex_unlock(&rdev->ring[i].mutex);
- }
+ mutex_unlock(&rdev->ring_lock);
mutex_unlock(&rdev->vram_mutex);
mutex_unlock(&rdev->ddev->struct_mutex);
}
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
new file mode 100644
index 000000000000..8ddab4c76710
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * based on nouveau_prime.c
+ *
+ * Authors: Alex Deucher
+ */
+#include "drmP.h"
+#include "drm.h"
+
+#include "radeon.h"
+#include "radeon_drm.h"
+
+#include <linux/dma-buf.h>
+
+static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct radeon_bo *bo = attachment->dmabuf->priv;
+ struct drm_device *dev = bo->rdev->ddev;
+ int npages = bo->tbo.num_pages;
+ struct sg_table *sg;
+ int nents;
+
+ mutex_lock(&dev->struct_mutex);
+ sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
+ nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ mutex_unlock(&dev->struct_mutex);
+ return sg;
+}
+
+static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sg, enum dma_data_direction dir)
+{
+ dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct radeon_bo *bo = dma_buf->priv;
+
+ if (bo->gem_base.export_dma_buf == dma_buf) {
+ DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
+ bo->gem_base.export_dma_buf = NULL;
+ drm_gem_object_unreference_unlocked(&bo->gem_base);
+ }
+}
+
+static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+
+static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+ struct radeon_bo *bo = dma_buf->priv;
+ struct drm_device *dev = bo->rdev->ddev;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ if (bo->vmapping_count) {
+ bo->vmapping_count++;
+ goto out_unlock;
+ }
+
+ ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
+ &bo->dma_buf_vmap);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
+ }
+ bo->vmapping_count = 1;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return bo->dma_buf_vmap.virtual;
+}
+
+static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct radeon_bo *bo = dma_buf->priv;
+ struct drm_device *dev = bo->rdev->ddev;
+
+ mutex_lock(&dev->struct_mutex);
+ bo->vmapping_count--;
+ if (bo->vmapping_count == 0) {
+ ttm_bo_kunmap(&bo->dma_buf_vmap);
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+const static struct dma_buf_ops radeon_dmabuf_ops = {
+ .map_dma_buf = radeon_gem_map_dma_buf,
+ .unmap_dma_buf = radeon_gem_unmap_dma_buf,
+ .release = radeon_gem_dmabuf_release,
+ .kmap = radeon_gem_kmap,
+ .kmap_atomic = radeon_gem_kmap_atomic,
+ .kunmap = radeon_gem_kunmap,
+ .kunmap_atomic = radeon_gem_kunmap_atomic,
+ .mmap = radeon_gem_prime_mmap,
+ .vmap = radeon_gem_prime_vmap,
+ .vunmap = radeon_gem_prime_vunmap,
+};
+
+static int radeon_prime_create(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg,
+ struct radeon_bo **pbo)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_bo *bo;
+ int ret;
+
+ ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
+ RADEON_GEM_DOMAIN_GTT, sg, pbo);
+ if (ret)
+ return ret;
+ bo = *pbo;
+ bo->gem_base.driver_private = bo;
+
+ mutex_lock(&rdev->gem.mutex);
+ list_add_tail(&bo->list, &rdev->gem.objects);
+ mutex_unlock(&rdev->gem.mutex);
+
+ return 0;
+}
+
+struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags)
+{
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
+ int ret = 0;
+
+ /* pin buffer into GTT */
+ ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
+}
+
+struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sg;
+ struct radeon_bo *bo;
+ int ret;
+
+ if (dma_buf->ops == &radeon_dmabuf_ops) {
+ bo = dma_buf->priv;
+ if (bo->gem_base.dev == dev) {
+ drm_gem_object_reference(&bo->gem_base);
+ return &bo->gem_base;
+ }
+ }
+
+ /* need to attach */
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto fail_detach;
+ }
+
+ ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
+ if (ret)
+ goto fail_unmap;
+
+ bo->gem_base.import_attach = attach;
+
+ return &bo->gem_base;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index cc33b3d7c33b..983658c91358 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -24,6 +24,7 @@
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
+ * Christian König
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -33,151 +34,42 @@
#include "radeon.h"
#include "atom.h"
-int radeon_debugfs_ib_init(struct radeon_device *rdev);
-int radeon_debugfs_ring_init(struct radeon_device *rdev);
-
-u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
-{
- struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
- u32 pg_idx, pg_offset;
- u32 idx_value = 0;
- int new_page;
-
- pg_idx = (idx * 4) / PAGE_SIZE;
- pg_offset = (idx * 4) % PAGE_SIZE;
-
- if (ibc->kpage_idx[0] == pg_idx)
- return ibc->kpage[0][pg_offset/4];
- if (ibc->kpage_idx[1] == pg_idx)
- return ibc->kpage[1][pg_offset/4];
-
- new_page = radeon_cs_update_pages(p, pg_idx);
- if (new_page < 0) {
- p->parser_error = new_page;
- return 0;
- }
-
- idx_value = ibc->kpage[new_page][pg_offset/4];
- return idx_value;
-}
-
-void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
-{
-#if DRM_DEBUG_CODE
- if (ring->count_dw <= 0) {
- DRM_ERROR("radeon: writting more dword to ring than expected !\n");
- }
-#endif
- ring->ring[ring->wptr++] = v;
- ring->wptr &= ring->ptr_mask;
- ring->count_dw--;
- ring->ring_free_dw--;
-}
-
/*
* IB.
*/
-bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- bool done = false;
-
- /* only free ib which have been emited */
- if (ib->fence && ib->fence->emitted) {
- if (radeon_fence_signaled(ib->fence)) {
- radeon_fence_unref(&ib->fence);
- radeon_sa_bo_free(rdev, &ib->sa_bo);
- done = true;
- }
- }
- return done;
-}
+int radeon_debugfs_sa_init(struct radeon_device *rdev);
int radeon_ib_get(struct radeon_device *rdev, int ring,
- struct radeon_ib **ib, unsigned size)
+ struct radeon_ib *ib, unsigned size)
{
- struct radeon_fence *fence;
- unsigned cretry = 0;
- int r = 0, i, idx;
-
- *ib = NULL;
- /* align size on 256 bytes */
- size = ALIGN(size, 256);
+ int r;
- r = radeon_fence_create(rdev, &fence, ring);
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
if (r) {
- dev_err(rdev->dev, "failed to create fence for new IB\n");
+ dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
return r;
}
-
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- idx = rdev->ib_pool.head_id;
-retry:
- if (cretry > 5) {
- dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- radeon_fence_unref(&fence);
- return -ENOMEM;
- }
- cretry++;
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
- if (rdev->ib_pool.ibs[idx].fence == NULL) {
- r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
- &rdev->ib_pool.ibs[idx].sa_bo,
- size, 256);
- if (!r) {
- *ib = &rdev->ib_pool.ibs[idx];
- (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
- (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
- (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
- (*ib)->gpu_addr += (*ib)->sa_bo.offset;
- (*ib)->fence = fence;
- (*ib)->vm_id = 0;
- (*ib)->is_const_ib = false;
- /* ib are most likely to be allocated in a ring fashion
- * thus rdev->ib_pool.head_id should be the id of the
- * oldest ib
- */
- rdev->ib_pool.head_id = (1 + idx);
- rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- return 0;
- }
- }
- idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
- }
- /* this should be rare event, ie all ib scheduled none signaled yet.
- */
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
- r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
- if (!r) {
- goto retry;
- }
- /* an error happened */
- break;
- }
- idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+ r = radeon_fence_create(rdev, &ib->fence, ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r);
+ radeon_sa_bo_free(rdev, &ib->sa_bo, NULL);
+ return r;
}
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- radeon_fence_unref(&fence);
- return r;
+
+ ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
+ ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
+ ib->vm_id = 0;
+ ib->is_const_ib = false;
+ ib->semaphore = NULL;
+
+ return 0;
}
-void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ib *tmp = *ib;
-
- *ib = NULL;
- if (tmp == NULL) {
- return;
- }
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (tmp->fence && !tmp->fence->emitted) {
- radeon_sa_bo_free(rdev, &tmp->sa_bo);
- radeon_fence_unref(&tmp->fence);
- }
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
+ radeon_semaphore_free(rdev, ib->semaphore, ib->fence);
+ radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
+ radeon_fence_unref(&ib->fence);
}
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
@@ -187,14 +79,14 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
- DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
+ dev_err(rdev->dev, "couldn't schedule ib\n");
return -EINVAL;
}
/* 64 dwords should be enough for fence too */
r = radeon_ring_lock(rdev, ring, 64);
if (r) {
- DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
+ dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
@@ -205,74 +97,90 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
int radeon_ib_pool_init(struct radeon_device *rdev)
{
- struct radeon_sa_manager tmp;
- int i, r;
+ int r;
- r = radeon_sa_bo_manager_init(rdev, &tmp,
+ if (rdev->ib_pool_ready) {
+ return 0;
+ }
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
RADEON_IB_POOL_SIZE*64*1024,
RADEON_GEM_DOMAIN_GTT);
if (r) {
return r;
}
-
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (rdev->ib_pool.ready) {
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- radeon_sa_bo_manager_fini(rdev, &tmp);
- return 0;
- }
-
- rdev->ib_pool.sa_manager = tmp;
- INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- rdev->ib_pool.ibs[i].fence = NULL;
- rdev->ib_pool.ibs[i].idx = i;
- rdev->ib_pool.ibs[i].length_dw = 0;
- INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
- }
- rdev->ib_pool.head_id = 0;
- rdev->ib_pool.ready = true;
- DRM_INFO("radeon: ib pool ready.\n");
-
- if (radeon_debugfs_ib_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for IB !\n");
- }
- if (radeon_debugfs_ring_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for rings !\n");
+ rdev->ib_pool_ready = true;
+ if (radeon_debugfs_sa_init(rdev)) {
+ dev_err(rdev->dev, "failed to register debugfs file for SA\n");
}
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
return 0;
}
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
- unsigned i;
-
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (rdev->ib_pool.ready) {
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
- radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
- }
- radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
- rdev->ib_pool.ready = false;
+ if (rdev->ib_pool_ready) {
+ radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
+ rdev->ib_pool_ready = false;
}
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
}
int radeon_ib_pool_start(struct radeon_device *rdev)
{
- return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
+ return radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
}
int radeon_ib_pool_suspend(struct radeon_device *rdev)
{
- return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
+ return radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
+}
+
+int radeon_ib_ring_tests(struct radeon_device *rdev)
+{
+ unsigned i;
+ int r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_ring *ring = &rdev->ring[i];
+
+ if (!ring->ready)
+ continue;
+
+ r = radeon_ib_test(rdev, i, ring);
+ if (r) {
+ ring->ready = false;
+
+ if (i == RADEON_RING_TYPE_GFX_INDEX) {
+ /* oh, oh, that's really bad */
+ DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+
+ } else {
+ /* still not good, but we can live with it */
+ DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
+ }
+ }
+ }
+ return 0;
}
/*
* Ring.
*/
+int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
+
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+{
+#if DRM_DEBUG_CODE
+ if (ring->count_dw <= 0) {
+ DRM_ERROR("radeon: writting more dword to ring than expected !\n");
+ }
+#endif
+ ring->ring[ring->wptr++] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+ ring->ring_free_dw--;
+}
+
int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
{
/* r1xx-r5xx only has CP ring */
@@ -319,7 +227,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
if (ndw < ring->ring_free_dw) {
break;
}
- r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
+ r = radeon_fence_wait_next_locked(rdev, radeon_ring_index(rdev, ring));
if (r)
return r;
}
@@ -332,10 +240,10 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
{
int r;
- mutex_lock(&ring->mutex);
+ mutex_lock(&rdev->ring_lock);
r = radeon_ring_alloc(rdev, ring, ndw);
if (r) {
- mutex_unlock(&ring->mutex);
+ mutex_unlock(&rdev->ring_lock);
return r;
}
return 0;
@@ -360,13 +268,85 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_commit(rdev, ring);
- mutex_unlock(&ring->mutex);
+ mutex_unlock(&rdev->ring_lock);
}
-void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_undo(struct radeon_ring *ring)
{
ring->wptr = ring->wptr_old;
- mutex_unlock(&ring->mutex);
+}
+
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ radeon_ring_undo(ring);
+ mutex_unlock(&rdev->ring_lock);
+}
+
+void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ int r;
+
+ radeon_ring_free_size(rdev, ring);
+ if (ring->rptr == ring->wptr) {
+ r = radeon_ring_alloc(rdev, ring, 1);
+ if (!r) {
+ radeon_ring_write(ring, ring->nop);
+ radeon_ring_commit(rdev, ring);
+ }
+ }
+}
+
+void radeon_ring_lockup_update(struct radeon_ring *ring)
+{
+ ring->last_rptr = ring->rptr;
+ ring->last_activity = jiffies;
+}
+
+/**
+ * radeon_ring_test_lockup() - check if ring is lockedup by recording information
+ * @rdev: radeon device structure
+ * @ring: radeon_ring structure holding ring information
+ *
+ * We don't need to initialize the lockup tracking information as we will either
+ * have CP rptr to a different value of jiffies wrap around which will force
+ * initialization of the lockup tracking informations.
+ *
+ * A possible false positivie is if we get call after while and last_cp_rptr ==
+ * the current CP rptr, even if it's unlikely it might happen. To avoid this
+ * if the elapsed time since last call is bigger than 2 second than we return
+ * false and update the tracking information. Due to this the caller must call
+ * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
+ * the fencing code should be cautious about that.
+ *
+ * Caller should write to the ring to force CP to do something so we don't get
+ * false positive when CP is just gived nothing to do.
+ *
+ **/
+bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ unsigned long cjiffies, elapsed;
+ uint32_t rptr;
+
+ cjiffies = jiffies;
+ if (!time_after(cjiffies, ring->last_activity)) {
+ /* likely a wrap around */
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ rptr = RREG32(ring->rptr_reg);
+ ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
+ if (ring->rptr != ring->last_rptr) {
+ /* CP is still working no lockup */
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
+ if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
+ dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
+ return true;
+ }
+ /* give a chance to the GPU ... */
+ return false;
}
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
@@ -385,8 +365,8 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT,
- &ring->ring_obj);
+ RADEON_GEM_DOMAIN_GTT,
+ NULL, &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
return r;
@@ -411,6 +391,9 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
ring->ring_free_dw = ring->ring_size / 4;
+ if (radeon_debugfs_ring_init(rdev, ring)) {
+ DRM_ERROR("Failed to register debugfs file for rings !\n");
+ }
return 0;
}
@@ -419,11 +402,12 @@ void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
int r;
struct radeon_bo *ring_obj;
- mutex_lock(&ring->mutex);
+ mutex_lock(&rdev->ring_lock);
ring_obj = ring->ring_obj;
+ ring->ready = false;
ring->ring = NULL;
ring->ring_obj = NULL;
- mutex_unlock(&ring->mutex);
+ mutex_unlock(&rdev->ring_lock);
if (ring_obj) {
r = radeon_bo_reserve(ring_obj, false);
@@ -476,59 +460,48 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
};
-static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
+static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)];
- unsigned i;
- if (ib == NULL) {
- return 0;
- }
- seq_printf(m, "IB %04u\n", ib->idx);
- seq_printf(m, "IB fence %p\n", ib->fence);
- seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
- for (i = 0; i < ib->length_dw; i++) {
- seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
- }
+ radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
+
return 0;
+
}
-static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
-static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
-static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE];
+static struct drm_info_list radeon_debugfs_sa_list[] = {
+ {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
+};
+
#endif
-int radeon_debugfs_ring_init(struct radeon_device *rdev)
+int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
- if (rdev->family >= CHIP_CAYMAN)
- return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
- ARRAY_SIZE(radeon_debugfs_ring_info_list));
- else
- return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list, 1);
-#else
- return 0;
+ unsigned i;
+ for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
+ struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
+ int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
+ unsigned r;
+
+ if (&rdev->ring[ridx] != ring)
+ continue;
+
+ r = radeon_debugfs_add_files(rdev, info, 1);
+ if (r)
+ return r;
+ }
#endif
+ return 0;
}
-int radeon_debugfs_ib_init(struct radeon_device *rdev)
+int radeon_debugfs_sa_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- unsigned i;
-
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
- radeon_debugfs_ib_idx[i] = i;
- radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
- radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
- radeon_debugfs_ib_list[i].driver_features = 0;
- radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i];
- }
- return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
- RADEON_IB_POOL_SIZE);
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
#else
return 0;
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 4cce47e7dc0d..32059b745728 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -27,23 +27,45 @@
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
*/
+/* Algorithm:
+ *
+ * We store the last allocated bo in "hole", we always try to allocate
+ * after the last allocated bo. Principle is that in a linear GPU ring
+ * progression was is after last is the oldest bo we allocated and thus
+ * the first one that should no longer be in use by the GPU.
+ *
+ * If it's not the case we skip over the bo after last to the closest
+ * done bo if such one exist. If none exist and we are not asked to
+ * block we report failure to allocate.
+ *
+ * If we are asked to block we wait on all the oldest fence of all
+ * rings. We just wait for any of those fence to complete.
+ */
#include "drmP.h"
#include "drm.h"
#include "radeon.h"
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
+static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
+
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
unsigned size, u32 domain)
{
- int r;
+ int i, r;
+ spin_lock_init(&sa_manager->lock);
sa_manager->bo = NULL;
sa_manager->size = size;
sa_manager->domain = domain;
- INIT_LIST_HEAD(&sa_manager->sa_bo);
+ sa_manager->hole = &sa_manager->olist;
+ INIT_LIST_HEAD(&sa_manager->olist);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ INIT_LIST_HEAD(&sa_manager->flist[i]);
+ }
r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_CPU, &sa_manager->bo);
+ RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
@@ -57,11 +79,15 @@ void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
{
struct radeon_sa_bo *sa_bo, *tmp;
- if (!list_empty(&sa_manager->sa_bo)) {
- dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+ if (!list_empty(&sa_manager->olist)) {
+ sa_manager->hole = &sa_manager->olist,
+ radeon_sa_bo_try_free(sa_manager);
+ if (!list_empty(&sa_manager->olist)) {
+ dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+ }
}
- list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) {
- list_del_init(&sa_bo->list);
+ list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
+ radeon_sa_bo_remove_locked(sa_bo);
}
radeon_bo_unref(&sa_manager->bo);
sa_manager->size = 0;
@@ -113,77 +139,248 @@ int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
return r;
}
-/*
- * Principe is simple, we keep a list of sub allocation in offset
- * order (first entry has offset == 0, last entry has the highest
- * offset).
- *
- * When allocating new object we first check if there is room at
- * the end total_size - (last_object_offset + last_object_size) >=
- * alloc_size. If so we allocate new object there.
- *
- * When there is not enough room at the end, we start waiting for
- * each sub object until we reach object_offset+object_size >=
- * alloc_size, this object then become the sub object we return.
- *
- * Alignment can't be bigger than page size
- */
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
+{
+ struct radeon_sa_manager *sa_manager = sa_bo->manager;
+ if (sa_manager->hole == &sa_bo->olist) {
+ sa_manager->hole = sa_bo->olist.prev;
+ }
+ list_del_init(&sa_bo->olist);
+ list_del_init(&sa_bo->flist);
+ radeon_fence_unref(&sa_bo->fence);
+ kfree(sa_bo);
+}
+
+static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
+{
+ struct radeon_sa_bo *sa_bo, *tmp;
+
+ if (sa_manager->hole->next == &sa_manager->olist)
+ return;
+
+ sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
+ list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
+ if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
+ return;
+ }
+ radeon_sa_bo_remove_locked(sa_bo);
+ }
+}
+
+static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
+{
+ struct list_head *hole = sa_manager->hole;
+
+ if (hole != &sa_manager->olist) {
+ return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
+ }
+ return 0;
+}
+
+static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
+{
+ struct list_head *hole = sa_manager->hole;
+
+ if (hole->next != &sa_manager->olist) {
+ return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
+ }
+ return sa_manager->size;
+}
+
+static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
+ struct radeon_sa_bo *sa_bo,
+ unsigned size, unsigned align)
+{
+ unsigned soffset, eoffset, wasted;
+
+ soffset = radeon_sa_bo_hole_soffset(sa_manager);
+ eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
+ wasted = (align - (soffset % align)) % align;
+
+ if ((eoffset - soffset) >= (size + wasted)) {
+ soffset += wasted;
+
+ sa_bo->manager = sa_manager;
+ sa_bo->soffset = soffset;
+ sa_bo->eoffset = soffset + size;
+ list_add(&sa_bo->olist, sa_manager->hole);
+ INIT_LIST_HEAD(&sa_bo->flist);
+ sa_manager->hole = &sa_bo->olist;
+ return true;
+ }
+ return false;
+}
+
+static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
+ struct radeon_fence **fences,
+ unsigned *tries)
+{
+ struct radeon_sa_bo *best_bo = NULL;
+ unsigned i, soffset, best, tmp;
+
+ /* if hole points to the end of the buffer */
+ if (sa_manager->hole->next == &sa_manager->olist) {
+ /* try again with its beginning */
+ sa_manager->hole = &sa_manager->olist;
+ return true;
+ }
+
+ soffset = radeon_sa_bo_hole_soffset(sa_manager);
+ /* to handle wrap around we add sa_manager->size */
+ best = sa_manager->size * 2;
+ /* go over all fence list and try to find the closest sa_bo
+ * of the current last
+ */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_sa_bo *sa_bo;
+
+ if (list_empty(&sa_manager->flist[i])) {
+ continue;
+ }
+
+ sa_bo = list_first_entry(&sa_manager->flist[i],
+ struct radeon_sa_bo, flist);
+
+ if (!radeon_fence_signaled(sa_bo->fence)) {
+ fences[i] = sa_bo->fence;
+ continue;
+ }
+
+ /* limit the number of tries each ring gets */
+ if (tries[i] > 2) {
+ continue;
+ }
+
+ tmp = sa_bo->soffset;
+ if (tmp < soffset) {
+ /* wrap around, pretend it's after */
+ tmp += sa_manager->size;
+ }
+ tmp -= soffset;
+ if (tmp < best) {
+ /* this sa bo is the closest one */
+ best = tmp;
+ best_bo = sa_bo;
+ }
+ }
+
+ if (best_bo) {
+ ++tries[best_bo->fence->ring];
+ sa_manager->hole = best_bo->olist.prev;
+
+ /* we knew that this one is signaled,
+ so it's save to remote it */
+ radeon_sa_bo_remove_locked(best_bo);
+ return true;
+ }
+ return false;
+}
+
int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- struct radeon_sa_bo *sa_bo,
- unsigned size, unsigned align)
+ struct radeon_sa_bo **sa_bo,
+ unsigned size, unsigned align, bool block)
{
- struct radeon_sa_bo *tmp;
- struct list_head *head;
- unsigned offset = 0, wasted = 0;
+ struct radeon_fence *fences[RADEON_NUM_RINGS];
+ unsigned tries[RADEON_NUM_RINGS];
+ int i, r = -ENOMEM;
BUG_ON(align > RADEON_GPU_PAGE_SIZE);
BUG_ON(size > sa_manager->size);
- /* no one ? */
- head = sa_manager->sa_bo.prev;
- if (list_empty(&sa_manager->sa_bo)) {
- goto out;
+ *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
+ if ((*sa_bo) == NULL) {
+ return -ENOMEM;
}
+ (*sa_bo)->manager = sa_manager;
+ (*sa_bo)->fence = NULL;
+ INIT_LIST_HEAD(&(*sa_bo)->olist);
+ INIT_LIST_HEAD(&(*sa_bo)->flist);
- /* look for a hole big enough */
- offset = 0;
- list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
- /* room before this object ? */
- if ((tmp->offset - offset) >= size) {
- head = tmp->list.prev;
- goto out;
+ spin_lock(&sa_manager->lock);
+ do {
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ fences[i] = NULL;
+ tries[i] = 0;
}
- offset = tmp->offset + tmp->size;
- wasted = offset % align;
- if (wasted) {
- wasted = align - wasted;
+
+ do {
+ radeon_sa_bo_try_free(sa_manager);
+
+ if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
+ size, align)) {
+ spin_unlock(&sa_manager->lock);
+ return 0;
+ }
+
+ /* see if we can skip over some allocations */
+ } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+
+ if (block) {
+ spin_unlock(&sa_manager->lock);
+ r = radeon_fence_wait_any(rdev, fences, false);
+ spin_lock(&sa_manager->lock);
+ if (r) {
+ /* if we have nothing to wait for we
+ are practically out of memory */
+ if (r == -ENOENT) {
+ r = -ENOMEM;
+ }
+ goto out_err;
+ }
}
- offset += wasted;
- }
- /* room at the end ? */
- head = sa_manager->sa_bo.prev;
- tmp = list_entry(head, struct radeon_sa_bo, list);
- offset = tmp->offset + tmp->size;
- wasted = offset % align;
- if (wasted) {
- wasted = align - wasted;
- }
- offset += wasted;
- if ((sa_manager->size - offset) < size) {
- /* failed to find somethings big enough */
- return -ENOMEM;
+ } while (block);
+
+out_err:
+ spin_unlock(&sa_manager->lock);
+ kfree(*sa_bo);
+ *sa_bo = NULL;
+ return r;
+}
+
+void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
+ struct radeon_fence *fence)
+{
+ struct radeon_sa_manager *sa_manager;
+
+ if (sa_bo == NULL || *sa_bo == NULL) {
+ return;
}
-out:
- sa_bo->manager = sa_manager;
- sa_bo->offset = offset;
- sa_bo->size = size;
- list_add(&sa_bo->list, head);
- return 0;
+ sa_manager = (*sa_bo)->manager;
+ spin_lock(&sa_manager->lock);
+ if (fence && fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
+ (*sa_bo)->fence = radeon_fence_ref(fence);
+ list_add_tail(&(*sa_bo)->flist,
+ &sa_manager->flist[fence->ring]);
+ } else {
+ radeon_sa_bo_remove_locked(*sa_bo);
+ }
+ spin_unlock(&sa_manager->lock);
+ *sa_bo = NULL;
}
-void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
+#if defined(CONFIG_DEBUG_FS)
+void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
+ struct seq_file *m)
{
- list_del_init(&sa_bo->list);
+ struct radeon_sa_bo *i;
+
+ spin_lock(&sa_manager->lock);
+ list_for_each_entry(i, &sa_manager->olist, olist) {
+ if (&i->olist == sa_manager->hole) {
+ seq_printf(m, ">");
+ } else {
+ seq_printf(m, " ");
+ }
+ seq_printf(m, "[0x%08x 0x%08x] size %8d",
+ i->soffset, i->eoffset, i->eoffset - i->soffset);
+ if (i->fence) {
+ seq_printf(m, " protected by 0x%016llx on ring %d",
+ i->fence->seq, i->fence->ring);
+ }
+ seq_printf(m, "\n");
+ }
+ spin_unlock(&sa_manager->lock);
}
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 61dd4e3c9209..e2ace5dce117 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -31,148 +31,107 @@
#include "drm.h"
#include "radeon.h"
-static int radeon_semaphore_add_bo(struct radeon_device *rdev)
-{
- struct radeon_semaphore_bo *bo;
- unsigned long irq_flags;
- uint64_t gpu_addr;
- uint32_t *cpu_ptr;
- int r, i;
-
-
- bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
- if (bo == NULL) {
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&bo->free);
- INIT_LIST_HEAD(&bo->list);
- bo->nused = 0;
-
- r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
- if (r) {
- dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
- kfree(bo);
- return r;
- }
- gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
- gpu_addr += bo->ib->sa_bo.offset;
- cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
- cpu_ptr += (bo->ib->sa_bo.offset >> 2);
- for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
- bo->semaphores[i].gpu_addr = gpu_addr;
- bo->semaphores[i].cpu_ptr = cpu_ptr;
- bo->semaphores[i].bo = bo;
- list_add_tail(&bo->semaphores[i].list, &bo->free);
- gpu_addr += 8;
- cpu_ptr += 2;
- }
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- list_add_tail(&bo->list, &rdev->semaphore_drv.bo);
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
- return 0;
-}
-
-static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
- struct radeon_semaphore_bo *bo)
-{
- radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
- radeon_fence_unref(&bo->ib->fence);
- list_del(&bo->list);
- kfree(bo);
-}
-
-void radeon_semaphore_shrink_locked(struct radeon_device *rdev)
-{
- struct radeon_semaphore_bo *bo, *n;
-
- if (list_empty(&rdev->semaphore_drv.bo)) {
- return;
- }
- /* only shrink if first bo has free semaphore */
- bo = list_first_entry(&rdev->semaphore_drv.bo, struct radeon_semaphore_bo, list);
- if (list_empty(&bo->free)) {
- return;
- }
- list_for_each_entry_safe_continue(bo, n, &rdev->semaphore_drv.bo, list) {
- if (bo->nused)
- continue;
- radeon_semaphore_del_bo_locked(rdev, bo);
- }
-}
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
- struct radeon_semaphore_bo *bo;
- unsigned long irq_flags;
- bool do_retry = true;
int r;
-retry:
- *semaphore = NULL;
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- list_for_each_entry(bo, &rdev->semaphore_drv.bo, list) {
- if (list_empty(&bo->free))
- continue;
- *semaphore = list_first_entry(&bo->free, struct radeon_semaphore, list);
- (*semaphore)->cpu_ptr[0] = 0;
- (*semaphore)->cpu_ptr[1] = 0;
- list_del(&(*semaphore)->list);
- bo->nused++;
- break;
- }
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
-
+ *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
- if (do_retry) {
- do_retry = false;
- r = radeon_semaphore_add_bo(rdev);
- if (r)
- return r;
- goto retry;
- }
return -ENOMEM;
}
-
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
+ &(*semaphore)->sa_bo, 8, 8, true);
+ if (r) {
+ kfree(*semaphore);
+ *semaphore = NULL;
+ return r;
+ }
+ (*semaphore)->waiters = 0;
+ (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
+ *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
return 0;
}
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
+ --semaphore->waiters;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
}
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
+ ++semaphore->waiters;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
}
-void radeon_semaphore_free(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore)
+int radeon_semaphore_sync_rings(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ bool sync_to[RADEON_NUM_RINGS],
+ int dst_ring)
{
- unsigned long irq_flags;
+ int i = 0, r;
+
+ mutex_lock(&rdev->ring_lock);
+ r = radeon_ring_alloc(rdev, &rdev->ring[dst_ring], RADEON_NUM_RINGS * 8);
+ if (r) {
+ goto error;
+ }
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ /* no need to sync to our own or unused rings */
+ if (!sync_to[i] || i == dst_ring)
+ continue;
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- semaphore->bo->nused--;
- list_add_tail(&semaphore->list, &semaphore->bo->free);
- radeon_semaphore_shrink_locked(rdev);
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+ /* prevent GPU deadlocks */
+ if (!rdev->ring[i].ready) {
+ dev_err(rdev->dev, "Trying to sync to a disabled ring!");
+ r = -EINVAL;
+ goto error;
+ }
+
+ r = radeon_ring_alloc(rdev, &rdev->ring[i], 8);
+ if (r) {
+ goto error;
+ }
+
+ radeon_semaphore_emit_signal(rdev, i, semaphore);
+ radeon_semaphore_emit_wait(rdev, dst_ring, semaphore);
+
+ radeon_ring_commit(rdev, &rdev->ring[i]);
+ }
+
+ radeon_ring_commit(rdev, &rdev->ring[dst_ring]);
+ mutex_unlock(&rdev->ring_lock);
+
+ return 0;
+
+error:
+ /* unlock all locks taken so far */
+ for (--i; i >= 0; --i) {
+ if (sync_to[i] || i == dst_ring) {
+ radeon_ring_undo(&rdev->ring[i]);
+ }
+ }
+ radeon_ring_undo(&rdev->ring[dst_ring]);
+ mutex_unlock(&rdev->ring_lock);
+ return r;
}
-void radeon_semaphore_driver_fini(struct radeon_device *rdev)
+void radeon_semaphore_free(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence)
{
- struct radeon_semaphore_bo *bo, *n;
- unsigned long irq_flags;
-
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- /* we force to free everything */
- list_for_each_entry_safe(bo, n, &rdev->semaphore_drv.bo, list) {
- if (!list_empty(&bo->free)) {
- dev_err(rdev->dev, "still in use semaphore\n");
- }
- radeon_semaphore_del_bo_locked(rdev, bo);
+ if (semaphore == NULL) {
+ return;
+ }
+ if (semaphore->waiters > 0) {
+ dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
+ " hardware lockup imminent!\n", semaphore);
}
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+ radeon_sa_bo_free(rdev, &semaphore->sa_bo, fence);
+ kfree(semaphore);
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index dc5dcf483aa3..efff929ea49d 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -59,7 +59,7 @@ void radeon_test_moves(struct radeon_device *rdev)
}
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &vram_obj);
+ NULL, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
@@ -78,7 +78,7 @@ void radeon_test_moves(struct radeon_device *rdev)
void **vram_start, **vram_end;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
+ RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_cleanup;
@@ -317,7 +317,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
out_cleanup:
if (semaphore)
- radeon_semaphore_free(rdev, semaphore);
+ radeon_semaphore_free(rdev, semaphore, NULL);
if (fence1)
radeon_fence_unref(&fence1);
@@ -437,7 +437,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
out_cleanup:
if (semaphore)
- radeon_semaphore_free(rdev, semaphore);
+ radeon_semaphore_free(rdev, semaphore, NULL);
if (fenceA)
radeon_fence_unref(&fenceA);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index f493c6403af5..c94a2257761f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -222,8 +222,9 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
{
struct radeon_device *rdev;
uint64_t old_start, new_start;
- struct radeon_fence *fence;
- int r, i;
+ struct radeon_fence *fence, *old_fence;
+ struct radeon_semaphore *sem = NULL;
+ int r;
rdev = radeon_get_rdev(bo->bdev);
r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev));
@@ -242,6 +243,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+ radeon_fence_unref(&fence);
return -EINVAL;
}
switch (new_mem->mem_type) {
@@ -253,42 +255,36 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+ radeon_fence_unref(&fence);
return -EINVAL;
}
if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
+ radeon_fence_unref(&fence);
return -EINVAL;
}
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
/* sync other rings */
- if (rdev->family >= CHIP_R600) {
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- /* no need to sync to our own or unused rings */
- if (i == radeon_copy_ring_index(rdev) || !rdev->ring[i].ready)
- continue;
-
- if (!fence->semaphore) {
- r = radeon_semaphore_create(rdev, &fence->semaphore);
- /* FIXME: handle semaphore error */
- if (r)
- continue;
- }
+ old_fence = bo->sync_obj;
+ if (old_fence && old_fence->ring != fence->ring
+ && !radeon_fence_signaled(old_fence)) {
+ bool sync_to_ring[RADEON_NUM_RINGS] = { };
+ sync_to_ring[old_fence->ring] = true;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ radeon_fence_unref(&fence);
+ return r;
+ }
- r = radeon_ring_lock(rdev, &rdev->ring[i], 3);
- /* FIXME: handle ring lock error */
- if (r)
- continue;
- radeon_semaphore_emit_signal(rdev, i, fence->semaphore);
- radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
-
- r = radeon_ring_lock(rdev, &rdev->ring[radeon_copy_ring_index(rdev)], 3);
- /* FIXME: handle ring lock error */
- if (r)
- continue;
- radeon_semaphore_emit_wait(rdev, radeon_copy_ring_index(rdev), fence->semaphore);
- radeon_ring_unlock_commit(rdev, &rdev->ring[radeon_copy_ring_index(rdev)]);
+ r = radeon_semaphore_sync_rings(rdev, sem,
+ sync_to_ring, fence->ring);
+ if (r) {
+ radeon_semaphore_free(rdev, sem, NULL);
+ radeon_fence_unref(&fence);
+ return r;
}
}
@@ -298,6 +294,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
evict, no_wait_reserve, no_wait_gpu, new_mem);
+ radeon_semaphore_free(rdev, sem, fence);
radeon_fence_unref(&fence);
return r;
}
@@ -614,10 +611,18 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
int r;
+ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
return 0;
+ if (slave && ttm->sg) {
+ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+ gtt->ttm.dma_address, ttm->num_pages);
+ ttm->state = tt_unbound;
+ return 0;
+ }
+
rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
@@ -658,6 +663,10 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
+ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+ if (slave)
+ return;
rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
@@ -729,8 +738,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
return r;
}
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM,
- &rdev->stollen_vga_memory);
+ RADEON_GEM_DOMAIN_VRAM,
+ NULL, &rdev->stollen_vga_memory);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 4cf381b3a6d8..a464eb5e2df2 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -430,12 +430,9 @@ static int rs400_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d25cf869d08d..25f9eef12c42 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -396,7 +396,6 @@ int rs600_asic_reset(struct radeon_device *rdev)
/* Check if GPU is idle */
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
- rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
@@ -553,6 +552,12 @@ int rs600_irq_set(struct radeon_device *rdev)
~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+ u32 hdmi0;
+ if (ASIC_IS_DCE2(rdev))
+ hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
+ ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+ else
+ hdmi0 = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -579,10 +584,15 @@ int rs600_irq_set(struct radeon_device *rdev)
if (rdev->irq.hpd[1]) {
hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
}
+ if (rdev->irq.afmt[0]) {
+ hdmi0 |= S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+ }
WREG32(R_000040_GEN_INT_CNTL, tmp);
WREG32(R_006540_DxMODE_INT_MASK, mode_int);
WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+ if (ASIC_IS_DCE2(rdev))
+ WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
return 0;
}
@@ -622,6 +632,17 @@ static inline u32 rs600_irq_ack(struct radeon_device *rdev)
rdev->irq.stat_regs.r500.disp_int = 0;
}
+ if (ASIC_IS_DCE2(rdev)) {
+ rdev->irq.stat_regs.r500.hdmi0_status = RREG32(R_007404_HDMI0_STATUS) &
+ S_007404_HDMI0_AZ_FORMAT_WTRIG(1);
+ if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
+ tmp = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL);
+ tmp |= S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(1);
+ WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ }
+ } else
+ rdev->irq.stat_regs.r500.hdmi0_status = 0;
+
if (irqs) {
WREG32(R_000044_GEN_INT_STATUS, irqs);
}
@@ -630,6 +651,9 @@ static inline u32 rs600_irq_ack(struct radeon_device *rdev)
void rs600_irq_disable(struct radeon_device *rdev)
{
+ u32 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
+ ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+ WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
WREG32(R_000040_GEN_INT_CNTL, 0);
WREG32(R_006540_DxMODE_INT_MASK, 0);
/* Wait and acknowledge irq */
@@ -641,15 +665,20 @@ int rs600_irq_process(struct radeon_device *rdev)
{
u32 status, msi_rearm;
bool queue_hotplug = false;
+ bool queue_hdmi = false;
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
status = rs600_irq_ack(rdev);
- if (!status && !rdev->irq.stat_regs.r500.disp_int) {
+ if (!status &&
+ !rdev->irq.stat_regs.r500.disp_int &&
+ !rdev->irq.stat_regs.r500.hdmi0_status) {
return IRQ_NONE;
}
- while (status || rdev->irq.stat_regs.r500.disp_int) {
+ while (status ||
+ rdev->irq.stat_regs.r500.disp_int ||
+ rdev->irq.stat_regs.r500.hdmi0_status) {
/* SW interrupt */
if (G_000044_SW_INT(status)) {
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
@@ -687,12 +716,18 @@ int rs600_irq_process(struct radeon_device *rdev)
queue_hotplug = true;
DRM_DEBUG("HPD2\n");
}
+ if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
+ queue_hdmi = true;
+ DRM_DEBUG("HDMI0\n");
+ }
status = rs600_irq_ack(rdev);
}
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
+ if (queue_hdmi)
+ schedule_work(&rdev->audio_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS600:
@@ -883,12 +918,9 @@ static int rs600_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index a27c13ac47c3..f1f89414dc63 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -485,6 +485,20 @@
#define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16)
#define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1)
#define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF
+#define R_007404_HDMI0_STATUS 0x007404
+#define S_007404_HDMI0_AZ_FORMAT_WTRIG(x) (((x) & 0x1) << 28)
+#define G_007404_HDMI0_AZ_FORMAT_WTRIG(x) (((x) >> 28) & 0x1)
+#define C_007404_HDMI0_AZ_FORMAT_WTRIG 0xEFFFFFFF
+#define S_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x) (((x) & 0x1) << 29)
+#define G_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x) (((x) >> 29) & 0x1)
+#define C_007404_HDMI0_AZ_FORMAT_WTRIG_INT 0xDFFFFFFF
+#define R_007408_HDMI0_AUDIO_PACKET_CONTROL 0x007408
+#define S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x) (((x) & 0x1) << 28)
+#define G_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x) (((x) >> 28) & 0x1)
+#define C_007408_HDMI0_AZ_FORMAT_WTRIG_MASK 0xEFFFFFFF
+#define S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x) (((x) & 0x1) << 29)
+#define G_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x) (((x) >> 29) & 0x1)
+#define C_007408_HDMI0_AZ_FORMAT_WTRIG_ACK 0xDFFFFFFF
/* MC registers */
#define R_000000_MC_STATUS 0x000000
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index f2c3b9d75f18..3277ddecfe9f 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -647,12 +647,9 @@ static int rs690_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index d8d78fe17946..7f08cedb5333 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -412,12 +412,10 @@ static int rv515_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index cdab1aeaed6e..c2f473bc13b8 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1114,12 +1114,9 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "IB test failed (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
@@ -1178,10 +1175,6 @@ int rv770_init(struct radeon_device *rdev)
{
int r;
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -1281,7 +1274,6 @@ void rv770_fini(struct radeon_device *rdev)
rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 79fa588e9ed5..9c549f702f2f 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -353,6 +353,197 @@
#define SRBM_STATUS 0x0E50
+/* DCE 3.2 HDMI */
+#define HDMI_CONTROL 0x7400
+# define HDMI_KEEPOUT_MODE (1 << 0)
+# define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */
+# define HDMI_ERROR_ACK (1 << 8)
+# define HDMI_ERROR_MASK (1 << 9)
+#define HDMI_STATUS 0x7404
+# define HDMI_ACTIVE_AVMUTE (1 << 0)
+# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
+# define HDMI_VBI_PACKET_ERROR (1 << 20)
+#define HDMI_AUDIO_PACKET_CONTROL 0x7408
+# define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
+# define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
+#define HDMI_ACR_PACKET_CONTROL 0x740c
+# define HDMI_ACR_SEND (1 << 0)
+# define HDMI_ACR_CONT (1 << 1)
+# define HDMI_ACR_SELECT(x) (((x) & 3) << 4)
+# define HDMI_ACR_HW 0
+# define HDMI_ACR_32 1
+# define HDMI_ACR_44 2
+# define HDMI_ACR_48 3
+# define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
+# define HDMI_ACR_AUTO_SEND (1 << 12)
+#define HDMI_VBI_PACKET_CONTROL 0x7410
+# define HDMI_NULL_SEND (1 << 0)
+# define HDMI_GC_SEND (1 << 4)
+# define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI_INFOFRAME_CONTROL0 0x7414
+# define HDMI_AVI_INFO_SEND (1 << 0)
+# define HDMI_AVI_INFO_CONT (1 << 1)
+# define HDMI_AUDIO_INFO_SEND (1 << 4)
+# define HDMI_AUDIO_INFO_CONT (1 << 5)
+# define HDMI_MPEG_INFO_SEND (1 << 8)
+# define HDMI_MPEG_INFO_CONT (1 << 9)
+#define HDMI_INFOFRAME_CONTROL1 0x7418
+# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
+# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
+# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
+#define HDMI_GENERIC_PACKET_CONTROL 0x741c
+# define HDMI_GENERIC0_SEND (1 << 0)
+# define HDMI_GENERIC0_CONT (1 << 1)
+# define HDMI_GENERIC1_SEND (1 << 4)
+# define HDMI_GENERIC1_CONT (1 << 5)
+# define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
+# define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
+#define HDMI_GC 0x7428
+# define HDMI_GC_AVMUTE (1 << 0)
+#define AFMT_AUDIO_PACKET_CONTROL2 0x742c
+# define AFMT_AUDIO_LAYOUT_OVRD (1 << 0)
+# define AFMT_AUDIO_LAYOUT_SELECT (1 << 1)
+# define AFMT_60958_CS_SOURCE (1 << 4)
+# define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8)
+# define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16)
+#define AFMT_AVI_INFO0 0x7454
+# define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_AVI_INFO_S(x) (((x) & 3) << 8)
+# define AFMT_AVI_INFO_B(x) (((x) & 3) << 10)
+# define AFMT_AVI_INFO_A(x) (((x) & 1) << 12)
+# define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13)
+# define AFMT_AVI_INFO_Y_RGB 0
+# define AFMT_AVI_INFO_Y_YCBCR422 1
+# define AFMT_AVI_INFO_Y_YCBCR444 2
+# define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
+# define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16)
+# define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20)
+# define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22)
+# define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
+# define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24)
+# define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26)
+# define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28)
+# define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31)
+# define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
+#define AFMT_AVI_INFO1 0x7458
+# define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+# define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+# define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO2 0x745c
+# define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
+# define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO3 0x7460
+# define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
+# define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24)
+#define AFMT_MPEG_INFO0 0x7464
+# define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
+# define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
+# define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
+#define AFMT_MPEG_INFO1 0x7468
+# define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
+# define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8)
+# define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12)
+#define AFMT_GENERIC0_HDR 0x746c
+#define AFMT_GENERIC0_0 0x7470
+#define AFMT_GENERIC0_1 0x7474
+#define AFMT_GENERIC0_2 0x7478
+#define AFMT_GENERIC0_3 0x747c
+#define AFMT_GENERIC0_4 0x7480
+#define AFMT_GENERIC0_5 0x7484
+#define AFMT_GENERIC0_6 0x7488
+#define AFMT_GENERIC1_HDR 0x748c
+#define AFMT_GENERIC1_0 0x7490
+#define AFMT_GENERIC1_1 0x7494
+#define AFMT_GENERIC1_2 0x7498
+#define AFMT_GENERIC1_3 0x749c
+#define AFMT_GENERIC1_4 0x74a0
+#define AFMT_GENERIC1_5 0x74a4
+#define AFMT_GENERIC1_6 0x74a8
+#define HDMI_ACR_32_0 0x74ac
+# define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_32_1 0x74b0
+# define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_44_0 0x74b4
+# define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_44_1 0x74b8
+# define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_48_0 0x74bc
+# define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_48_1 0x74c0
+# define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_STATUS_0 0x74c4
+#define HDMI_ACR_STATUS_1 0x74c8
+#define AFMT_AUDIO_INFO0 0x74cc
+# define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8)
+# define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16)
+#define AFMT_AUDIO_INFO1 0x74d0
+# define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
+# define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
+# define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
+# define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+#define AFMT_60958_0 0x74d4
+# define AFMT_60958_CS_A(x) (((x) & 1) << 0)
+# define AFMT_60958_CS_B(x) (((x) & 1) << 1)
+# define AFMT_60958_CS_C(x) (((x) & 1) << 2)
+# define AFMT_60958_CS_D(x) (((x) & 3) << 3)
+# define AFMT_60958_CS_MODE(x) (((x) & 3) << 6)
+# define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
+# define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
+# define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
+# define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+# define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
+#define AFMT_60958_1 0x74d8
+# define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
+# define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
+# define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16)
+# define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18)
+# define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
+#define AFMT_AUDIO_CRC_CONTROL 0x74dc
+# define AFMT_AUDIO_CRC_EN (1 << 0)
+#define AFMT_RAMP_CONTROL0 0x74e0
+# define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
+# define AFMT_RAMP_DATA_SIGN (1 << 31)
+#define AFMT_RAMP_CONTROL1 0x74e4
+# define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
+# define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
+#define AFMT_RAMP_CONTROL2 0x74e8
+# define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
+#define AFMT_RAMP_CONTROL3 0x74ec
+# define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
+#define AFMT_60958_2 0x74f0
+# define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
+# define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
+# define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
+# define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
+# define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
+# define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
+#define AFMT_STATUS 0x7600
+# define AFMT_AUDIO_ENABLE (1 << 4)
+# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL 0x7604
+# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
+# define AFMT_AUDIO_TEST_EN (1 << 12)
+# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
+# define AFMT_60958_CS_UPDATE (1 << 26)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
+#define AFMT_VBI_PACKET_CONTROL 0x7608
+# define AFMT_GENERIC0_UPDATE (1 << 2)
+#define AFMT_INFOFRAME_CONTROL0 0x760c
+# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
+# define AFMT_MPEG_INFO_UPDATE (1 << 10)
+#define AFMT_GENERIC0_7 0x7610
+/* second instance starts at 0x7800 */
+#define HDMI_OFFSET0 (0x7400 - 0x7400)
+#define HDMI_OFFSET1 (0x7800 - 0x7400)
+
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 27bda986fc2b..549732e56ca9 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2217,8 +2217,6 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
u32 srbm_status;
u32 grbm_status, grbm_status2;
u32 grbm_status_se0, grbm_status_se1;
- struct r100_gpu_lockup *lockup = &rdev->config.si.lockup;
- int r;
srbm_status = RREG32(SRBM_STATUS);
grbm_status = RREG32(GRBM_STATUS);
@@ -2226,20 +2224,12 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, ring);
+ radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- /* XXX deal with CP0,1,2 */
- ring->rptr = RREG32(ring->rptr_reg);
- return r100_gpu_cp_is_lockup(rdev, lockup, ring);
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
static int si_gpu_soft_reset(struct radeon_device *rdev)
@@ -2275,6 +2265,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
SOFT_RESET_GDS |
SOFT_RESET_PA |
SOFT_RESET_SC |
+ SOFT_RESET_BCI |
SOFT_RESET_SPI |
SOFT_RESET_SX |
SOFT_RESET_TC |
@@ -2985,7 +2976,8 @@ int si_rlc_init(struct radeon_device *rdev)
/* save restore block */
if (rdev->rlc.save_restore_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, &rdev->rlc.save_restore_obj);
+ RADEON_GEM_DOMAIN_VRAM, NULL,
+ &rdev->rlc.save_restore_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
@@ -3009,7 +3001,8 @@ int si_rlc_init(struct radeon_device *rdev)
/* clear state block */
if (rdev->rlc.clear_state_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, &rdev->rlc.clear_state_obj);
+ RADEON_GEM_DOMAIN_VRAM, NULL,
+ &rdev->rlc.clear_state_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
si_rlc_fini(rdev);
@@ -3216,6 +3209,8 @@ static int si_irq_init(struct radeon_device *rdev)
/* force the active interrupt state to all disabled */
si_disable_interrupt_state(rdev);
+ pci_set_master(rdev->pdev);
+
/* enable irqs */
si_enable_interrupts(rdev);
@@ -3994,10 +3989,6 @@ int si_init(struct radeon_device *rdev)
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -4117,7 +4108,6 @@ void si_fini(struct radeon_device *rdev)
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index cb1ee4e0050a..6eb507a5d130 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -735,7 +735,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
return -EINVAL;
}
drm_core_ioremap(dev->agp_buffer_map, dev);
- if (!dev->agp_buffer_map) {
+ if (!dev->agp_buffer_map->handle) {
DRM_ERROR("failed to ioremap DMA buffer region!\n");
savage_do_cleanup_bci(dev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1f5c67c579cf..36792bd4da77 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -343,6 +343,16 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
if (unlikely(bo->ttm == NULL))
ret = -ENOMEM;
break;
+ case ttm_bo_type_sg:
+ bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ page_flags | TTM_PAGE_FLAG_SG,
+ glob->dummy_read_page);
+ if (unlikely(bo->ttm == NULL)) {
+ ret = -ENOMEM;
+ break;
+ }
+ bo->ttm->sg = bo->sg;
+ break;
default:
pr_err("Illegal buffer object type\n");
ret = -EINVAL;
@@ -1169,6 +1179,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bool interruptible,
struct file *persistent_swap_storage,
size_t acc_size,
+ struct sg_table *sg,
void (*destroy) (struct ttm_buffer_object *))
{
int ret = 0;
@@ -1223,6 +1234,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->seq_valid = false;
bo->persistent_swap_storage = persistent_swap_storage;
bo->acc_size = acc_size;
+ bo->sg = sg;
atomic_inc(&bo->glob->bo_count);
ret = ttm_bo_check_placement(bo, placement);
@@ -1233,7 +1245,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
- if (bo->type == ttm_bo_type_device) {
+ if (bo->type == ttm_bo_type_device ||
+ bo->type == ttm_bo_type_sg) {
ret = ttm_bo_setup_vm(bo);
if (ret)
goto out_err;
@@ -1312,7 +1325,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
buffer_start, interruptible,
- persistent_swap_storage, acc_size, NULL);
+ persistent_swap_storage, acc_size, NULL, NULL);
if (likely(ret == 0))
*p_bo = bo;
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 53673907a6a0..4d02c46a9420 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -38,7 +38,7 @@ static void udl_usb_disconnect(struct usb_interface *interface)
drm_unplug_dev(dev);
}
-static struct vm_operations_struct udl_gem_vm_ops = {
+static const struct vm_operations_struct udl_gem_vm_ops = {
.fault = udl_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
@@ -57,7 +57,7 @@ static const struct file_operations udl_driver_fops = {
};
static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = udl_driver_load,
.unload = udl_driver_unload,
@@ -70,6 +70,10 @@ static struct drm_driver driver = {
.dumb_map_offset = udl_gem_mmap,
.dumb_destroy = udl_dumb_destroy,
.fops = &udl_driver_fops,
+
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = udl_gem_prime_import,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 96820d03a303..fccd361f7b50 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -66,6 +66,7 @@ struct udl_gem_object {
struct drm_gem_object base;
struct page **pages;
void *vmapping;
+ struct sg_table *sg;
};
#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
@@ -118,6 +119,8 @@ int udl_gem_init_object(struct drm_gem_object *obj);
void udl_gem_free_object(struct drm_gem_object *gem_obj);
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
size_t size);
+struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
int udl_gem_vmap(struct udl_gem_object *obj);
void udl_gem_vunmap(struct udl_gem_object *obj);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 4d9c3a5d8a45..ce9a61179925 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -156,8 +156,17 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
if (!fb->active_16)
return 0;
- if (!fb->obj->vmapping)
- udl_gem_vmap(fb->obj);
+ if (!fb->obj->vmapping) {
+ ret = udl_gem_vmap(fb->obj);
+ if (ret == -ENOMEM) {
+ DRM_ERROR("failed to vmap fb\n");
+ return 0;
+ }
+ if (!fb->obj->vmapping) {
+ DRM_ERROR("failed to vmapping\n");
+ return 0;
+ }
+ }
start_cycles = get_cycles();
@@ -593,11 +602,20 @@ udl_fb_user_fb_create(struct drm_device *dev,
struct drm_gem_object *obj;
struct udl_framebuffer *ufb;
int ret;
+ uint32_t size;
obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]);
if (obj == NULL)
return ERR_PTR(-ENOENT);
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ if (size > obj->size) {
+ DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height);
+ return ERR_PTR(-ENOMEM);
+ }
+
ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
if (ufb == NULL)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 92f19ef329b0..7bd65bdd15a8 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -9,6 +9,7 @@
#include "drmP.h"
#include "udl_drv.h"
#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
size_t size)
@@ -161,6 +162,12 @@ static void udl_gem_put_pages(struct udl_gem_object *obj)
int page_count = obj->base.size / PAGE_SIZE;
int i;
+ if (obj->base.import_attach) {
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
+ return;
+ }
+
for (i = 0; i < page_count; i++)
page_cache_release(obj->pages[i]);
@@ -173,6 +180,18 @@ int udl_gem_vmap(struct udl_gem_object *obj)
int page_count = obj->base.size / PAGE_SIZE;
int ret;
+ if (obj->base.import_attach) {
+ ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
+ 0, obj->base.size, DMA_BIDIRECTIONAL);
+ if (ret)
+ return -EINVAL;
+
+ obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
+ if (!obj->vmapping)
+ return -ENOMEM;
+ return 0;
+ }
+
ret = udl_gem_get_pages(obj, GFP_KERNEL);
if (ret)
return ret;
@@ -185,6 +204,13 @@ int udl_gem_vmap(struct udl_gem_object *obj)
void udl_gem_vunmap(struct udl_gem_object *obj)
{
+ if (obj->base.import_attach) {
+ dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
+ dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
+ obj->base.size, DMA_BIDIRECTIONAL);
+ return;
+ }
+
if (obj->vmapping)
vunmap(obj->vmapping);
@@ -198,6 +224,9 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->vmapping)
udl_gem_vunmap(obj);
+ if (gem_obj->import_attach)
+ drm_prime_gem_destroy(gem_obj, obj->sg);
+
if (obj->pages)
udl_gem_put_pages(obj);
@@ -224,7 +253,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
ret = udl_gem_get_pages(gobj, GFP_KERNEL);
if (ret)
- return ret;
+ goto out;
if (!gobj->base.map_list.map) {
ret = drm_gem_create_mmap_offset(obj);
if (ret)
@@ -239,3 +268,66 @@ unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
+
+static int udl_prime_create(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg,
+ struct udl_gem_object **obj_p)
+{
+ struct udl_gem_object *obj;
+ int npages;
+
+ npages = size / PAGE_SIZE;
+
+ *obj_p = NULL;
+ obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
+ if (!obj)
+ return -ENOMEM;
+
+ obj->sg = sg;
+ obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (obj->pages == NULL) {
+ DRM_ERROR("obj pages is NULL %d\n", npages);
+ return -ENOMEM;
+ }
+
+ drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
+
+ *obj_p = obj;
+ return 0;
+}
+
+struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sg;
+ struct udl_gem_object *uobj;
+ int ret;
+
+ /* need to attach */
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_PTR(PTR_ERR(attach));
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto fail_detach;
+ }
+
+ ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
+ if (ret) {
+ goto fail_unmap;
+ }
+
+ uobj->base.import_attach = attach;
+
+ return &uobj->base;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index b3ecb3d12a1d..0d7816789da1 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -395,7 +395,7 @@ int udl_modeset_init(struct drm_device *dev)
dev->mode_config.prefer_shadow = 0;
dev->mode_config.preferred_depth = 24;
- dev->mode_config.funcs = (void *)&udl_mode_funcs;
+ dev->mode_config.funcs = &udl_mode_funcs;
drm_mode_create_dirty_info_property(dev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 2286d47e5022..6b0078ffa763 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1178,7 +1178,7 @@ err_out:
return &vfb->base;
}
-static struct drm_mode_config_funcs vmw_kms_funcs = {
+static const struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a37abb581cbb..22bf9a21ec71 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1567,7 +1567,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
0, 0, interruptible,
- NULL, acc_size, bo_free);
+ NULL, acc_size, NULL, bo_free);
return ret;
}
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 96c83a9a76bb..f34838839b08 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -21,6 +21,7 @@ config VGA_SWITCHEROO
bool "Laptop Hybrid Graphics - GPU switching support"
depends on X86
depends on ACPI
+ select VGA_ARB
help
Many laptops released in 2008/9/10 have two GPUs with a multiplexer
to switch between them. This adds support for dynamic switching when
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 58434e804d91..38f9534ac513 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -28,15 +28,16 @@
#include <linux/pci.h>
#include <linux/vga_switcheroo.h>
+#include <linux/vgaarb.h>
+
struct vga_switcheroo_client {
struct pci_dev *pdev;
struct fb_info *fb_info;
int pwr_state;
- void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state);
- void (*reprobe)(struct pci_dev *pdev);
- bool (*can_switch)(struct pci_dev *pdev);
+ const struct vga_switcheroo_client_ops *ops;
int id;
bool active;
+ struct list_head list;
};
static DEFINE_MUTEX(vgasr_mutex);
@@ -51,16 +52,23 @@ struct vgasr_priv {
struct dentry *switch_file;
int registered_clients;
- struct vga_switcheroo_client clients[VGA_SWITCHEROO_MAX_CLIENTS];
+ struct list_head clients;
struct vga_switcheroo_handler *handler;
};
+#define ID_BIT_AUDIO 0x100
+#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO)
+#define client_is_vga(c) ((c)->id == -1 || !client_is_audio(c))
+#define client_id(c) ((c)->id & ~ID_BIT_AUDIO)
+
static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv);
/* only one switcheroo per system */
-static struct vgasr_priv vgasr_priv;
+static struct vgasr_priv vgasr_priv = {
+ .clients = LIST_HEAD_INIT(vgasr_priv.clients),
+};
int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
{
@@ -86,72 +94,119 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
static void vga_switcheroo_enable(void)
{
- int i;
int ret;
+ struct vga_switcheroo_client *client;
+
/* call the handler to init */
vgasr_priv.handler->init();
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- ret = vgasr_priv.handler->get_client_id(vgasr_priv.clients[i].pdev);
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (client->id != -1)
+ continue;
+ ret = vgasr_priv.handler->get_client_id(client->pdev);
if (ret < 0)
return;
- vgasr_priv.clients[i].id = ret;
+ client->id = ret;
}
vga_switcheroo_debugfs_init(&vgasr_priv);
vgasr_priv.active = true;
}
-int vga_switcheroo_register_client(struct pci_dev *pdev,
- void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state),
- void (*reprobe)(struct pci_dev *pdev),
- bool (*can_switch)(struct pci_dev *pdev))
+static int register_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops,
+ int id, bool active)
{
- int index;
+ struct vga_switcheroo_client *client;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->pwr_state = VGA_SWITCHEROO_ON;
+ client->pdev = pdev;
+ client->ops = ops;
+ client->id = id;
+ client->active = active;
mutex_lock(&vgasr_mutex);
- /* don't do IGD vs DIS here */
- if (vgasr_priv.registered_clients & 1)
- index = 1;
- else
- index = 0;
-
- vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON;
- vgasr_priv.clients[index].pdev = pdev;
- vgasr_priv.clients[index].set_gpu_state = set_gpu_state;
- vgasr_priv.clients[index].reprobe = reprobe;
- vgasr_priv.clients[index].can_switch = can_switch;
- vgasr_priv.clients[index].id = -1;
- if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
- vgasr_priv.clients[index].active = true;
-
- vgasr_priv.registered_clients |= (1 << index);
+ list_add_tail(&client->list, &vgasr_priv.clients);
+ if (client_is_vga(client))
+ vgasr_priv.registered_clients++;
/* if we get two clients + handler */
- if (vgasr_priv.registered_clients == 0x3 && vgasr_priv.handler) {
+ if (!vgasr_priv.active &&
+ vgasr_priv.registered_clients == 2 && vgasr_priv.handler) {
printk(KERN_INFO "vga_switcheroo: enabled\n");
vga_switcheroo_enable();
}
mutex_unlock(&vgasr_mutex);
return 0;
}
+
+int vga_switcheroo_register_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops)
+{
+ return register_client(pdev, ops, -1,
+ pdev == vga_default_device());
+}
EXPORT_SYMBOL(vga_switcheroo_register_client);
+int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops,
+ int id, bool active)
+{
+ return register_client(pdev, ops, id | ID_BIT_AUDIO, active);
+}
+EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
+
+static struct vga_switcheroo_client *
+find_client_from_pci(struct list_head *head, struct pci_dev *pdev)
+{
+ struct vga_switcheroo_client *client;
+ list_for_each_entry(client, head, list)
+ if (client->pdev == pdev)
+ return client;
+ return NULL;
+}
+
+static struct vga_switcheroo_client *
+find_client_from_id(struct list_head *head, int client_id)
+{
+ struct vga_switcheroo_client *client;
+ list_for_each_entry(client, head, list)
+ if (client->id == client_id)
+ return client;
+ return NULL;
+}
+
+static struct vga_switcheroo_client *
+find_active_client(struct list_head *head)
+{
+ struct vga_switcheroo_client *client;
+ list_for_each_entry(client, head, list)
+ if (client->active && client_is_vga(client))
+ return client;
+ return NULL;
+}
+
void vga_switcheroo_unregister_client(struct pci_dev *pdev)
{
- int i;
+ struct vga_switcheroo_client *client;
mutex_lock(&vgasr_mutex);
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].pdev == pdev) {
- vgasr_priv.registered_clients &= ~(1 << i);
- break;
- }
+ client = find_client_from_pci(&vgasr_priv.clients, pdev);
+ if (client) {
+ if (client_is_vga(client))
+ vgasr_priv.registered_clients--;
+ list_del(&client->list);
+ kfree(client);
+ }
+ if (vgasr_priv.active && vgasr_priv.registered_clients < 2) {
+ printk(KERN_INFO "vga_switcheroo: disabled\n");
+ vga_switcheroo_debugfs_fini(&vgasr_priv);
+ vgasr_priv.active = false;
}
-
- printk(KERN_INFO "vga_switcheroo: disabled\n");
- vga_switcheroo_debugfs_fini(&vgasr_priv);
- vgasr_priv.active = false;
mutex_unlock(&vgasr_mutex);
}
EXPORT_SYMBOL(vga_switcheroo_unregister_client);
@@ -159,29 +214,29 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_client);
void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
struct fb_info *info)
{
- int i;
+ struct vga_switcheroo_client *client;
mutex_lock(&vgasr_mutex);
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].pdev == pdev) {
- vgasr_priv.clients[i].fb_info = info;
- break;
- }
- }
+ client = find_client_from_pci(&vgasr_priv.clients, pdev);
+ if (client)
+ client->fb_info = info;
mutex_unlock(&vgasr_mutex);
}
EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
static int vga_switcheroo_show(struct seq_file *m, void *v)
{
- int i;
+ struct vga_switcheroo_client *client;
+ int i = 0;
mutex_lock(&vgasr_mutex);
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- seq_printf(m, "%d:%s:%c:%s:%s\n", i,
- vgasr_priv.clients[i].id == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
- vgasr_priv.clients[i].active ? '+' : ' ',
- vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off",
- pci_name(vgasr_priv.clients[i].pdev));
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ seq_printf(m, "%d:%s%s:%c:%s:%s\n", i,
+ client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
+ client_is_vga(client) ? "" : "-Audio",
+ client->active ? '+' : ' ',
+ client->pwr_state ? "Pwr" : "Off",
+ pci_name(client->pdev));
+ i++;
}
mutex_unlock(&vgasr_mutex);
return 0;
@@ -197,7 +252,7 @@ static int vga_switchon(struct vga_switcheroo_client *client)
if (vgasr_priv.handler->power_state)
vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
/* call the driver callback to turn on device */
- client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
+ client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
client->pwr_state = VGA_SWITCHEROO_ON;
return 0;
}
@@ -205,34 +260,39 @@ static int vga_switchon(struct vga_switcheroo_client *client)
static int vga_switchoff(struct vga_switcheroo_client *client)
{
/* call the driver callback to turn off device */
- client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
+ client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
if (vgasr_priv.handler->power_state)
vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
client->pwr_state = VGA_SWITCHEROO_OFF;
return 0;
}
+static void set_audio_state(int id, int state)
+{
+ struct vga_switcheroo_client *client;
+
+ client = find_client_from_id(&vgasr_priv.clients, id | ID_BIT_AUDIO);
+ if (client && client->pwr_state != state) {
+ client->ops->set_gpu_state(client->pdev, state);
+ client->pwr_state = state;
+ }
+}
+
/* stage one happens before delay */
static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
{
- int i;
- struct vga_switcheroo_client *active = NULL;
+ struct vga_switcheroo_client *active;
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].active == true) {
- active = &vgasr_priv.clients[i];
- break;
- }
- }
+ active = find_active_client(&vgasr_priv.clients);
if (!active)
return 0;
if (new_client->pwr_state == VGA_SWITCHEROO_OFF)
vga_switchon(new_client);
- /* swap shadow resource to denote boot VGA device has changed so X starts on new device */
- active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW;
- new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
+ vga_set_default_device(new_client->pdev);
+ set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
+
return 0;
}
@@ -240,15 +300,9 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
{
int ret;
- int i;
- struct vga_switcheroo_client *active = NULL;
+ struct vga_switcheroo_client *active;
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].active == true) {
- active = &vgasr_priv.clients[i];
- break;
- }
- }
+ active = find_active_client(&vgasr_priv.clients);
if (!active)
return 0;
@@ -264,8 +318,10 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
if (ret)
return ret;
- if (new_client->reprobe)
- new_client->reprobe(new_client->pdev);
+ if (new_client->ops->reprobe)
+ new_client->ops->reprobe(new_client->pdev);
+
+ set_audio_state(active->id, VGA_SWITCHEROO_OFF);
if (active->pwr_state == VGA_SWITCHEROO_ON)
vga_switchoff(active);
@@ -274,13 +330,26 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
return 0;
}
+static bool check_can_switch(void)
+{
+ struct vga_switcheroo_client *client;
+
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (!client->ops->can_switch(client->pdev)) {
+ printk(KERN_ERR "vga_switcheroo: client %x refused switch\n", client->id);
+ return false;
+ }
+ }
+ return true;
+}
+
static ssize_t
vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char usercmd[64];
const char *pdev_name;
- int i, ret;
+ int ret;
bool delay = false, can_switch;
bool just_mux = false;
int client_id = -1;
@@ -301,21 +370,21 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
/* pwr off the device not in use */
if (strncmp(usercmd, "OFF", 3) == 0) {
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].active)
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (client->active)
continue;
- if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_ON)
- vga_switchoff(&vgasr_priv.clients[i]);
+ if (client->pwr_state == VGA_SWITCHEROO_ON)
+ vga_switchoff(client);
}
goto out;
}
/* pwr on the device not in use */
if (strncmp(usercmd, "ON", 2) == 0) {
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].active)
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (client->active)
continue;
- if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_OFF)
- vga_switchon(&vgasr_priv.clients[i]);
+ if (client->pwr_state == VGA_SWITCHEROO_OFF)
+ vga_switchon(client);
}
goto out;
}
@@ -348,13 +417,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
if (client_id == -1)
goto out;
-
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].id == client_id) {
- client = &vgasr_priv.clients[i];
- break;
- }
- }
+ client = find_client_from_id(&vgasr_priv.clients, client_id);
+ if (!client)
+ goto out;
vgasr_priv.delayed_switch_active = false;
@@ -363,23 +428,16 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
goto out;
}
- if (client->active == true)
+ if (client->active)
goto out;
/* okay we want a switch - test if devices are willing to switch */
- can_switch = true;
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
- if (can_switch == false) {
- printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
- break;
- }
- }
+ can_switch = check_can_switch();
if (can_switch == false && delay == false)
goto out;
- if (can_switch == true) {
+ if (can_switch) {
pdev_name = pci_name(client->pdev);
ret = vga_switchto_stage1(client);
if (ret)
@@ -451,10 +509,8 @@ fail:
int vga_switcheroo_process_delayed_switch(void)
{
- struct vga_switcheroo_client *client = NULL;
+ struct vga_switcheroo_client *client;
const char *pdev_name;
- bool can_switch = true;
- int i;
int ret;
int err = -EINVAL;
@@ -464,17 +520,9 @@ int vga_switcheroo_process_delayed_switch(void)
printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id);
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].id == vgasr_priv.delayed_client_id)
- client = &vgasr_priv.clients[i];
- can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
- if (can_switch == false) {
- printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
- break;
- }
- }
-
- if (can_switch == false || client == NULL)
+ client = find_client_from_id(&vgasr_priv.clients,
+ vgasr_priv.delayed_client_id);
+ if (!client || !check_can_switch())
goto err;
pdev_name = pci_name(client->pdev);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 111d956d8e7d..3df8fc0ec01a 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -136,6 +136,13 @@ struct pci_dev *vga_default_device(void)
{
return vga_default;
}
+
+EXPORT_SYMBOL_GPL(vga_default_device);
+
+void vga_set_default_device(struct pci_dev *pdev)
+{
+ vga_default = pdev;
+}
#endif
static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
@@ -605,10 +612,12 @@ static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
goto bail;
}
+#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
if (vga_default == pdev) {
pci_dev_put(vga_default);
vga_default = NULL;
}
+#endif
if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
vga_decode_count--;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 7cd9bf42108b..6f1d167cb1ea 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1036,8 +1036,9 @@ config SENSORS_SCH56XX_COMMON
config SENSORS_SCH5627
tristate "SMSC SCH5627"
- depends on !PPC
+ depends on !PPC && WATCHDOG
select SENSORS_SCH56XX_COMMON
+ select WATCHDOG_CORE
help
If you say yes here you get support for the hardware monitoring
features of the SMSC SCH5627 Super-I/O chip including support for
@@ -1048,8 +1049,9 @@ config SENSORS_SCH5627
config SENSORS_SCH5636
tristate "SMSC SCH5636"
- depends on !PPC
+ depends on !PPC && WATCHDOG
select SENSORS_SCH56XX_COMMON
+ select WATCHDOG_CORE
help
SMSC SCH5636 Super I/O chips include an embedded microcontroller for
hardware monitoring solutions, allowing motherboard manufacturers to
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 8ec6dfbccb64..8342275378b8 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -579,7 +579,7 @@ static int __devinit sch5627_probe(struct platform_device *pdev)
}
/* Note failing to register the watchdog is not a fatal error */
- data->watchdog = sch56xx_watchdog_register(data->addr,
+ data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
(build_code << 24) | (build_id << 8) | hwmon_rev,
&data->update_lock, 1);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 906d4ed32d81..96a7e68718ca 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -510,7 +510,7 @@ static int __devinit sch5636_probe(struct platform_device *pdev)
}
/* Note failing to register the watchdog is not a fatal error */
- data->watchdog = sch56xx_watchdog_register(data->addr,
+ data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
(revision[0] << 8) | revision[1],
&data->update_lock, 0);
diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
index ce52fc57d41d..4380f5d07be2 100644
--- a/drivers/hwmon/sch56xx-common.c
+++ b/drivers/hwmon/sch56xx-common.c
@@ -66,15 +66,10 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
struct sch56xx_watchdog_data {
u16 addr;
- u32 revision;
struct mutex *io_lock;
- struct mutex watchdog_lock;
- struct list_head list; /* member of the watchdog_data_list */
struct kref kref;
- struct miscdevice watchdog_miscdev;
- unsigned long watchdog_is_open;
- char watchdog_name[10]; /* must be unique to avoid sysfs conflict */
- char watchdog_expect_close;
+ struct watchdog_info wdinfo;
+ struct watchdog_device wddev;
u8 watchdog_preset;
u8 watchdog_control;
u8 watchdog_output_enable;
@@ -82,15 +77,6 @@ struct sch56xx_watchdog_data {
static struct platform_device *sch56xx_pdev;
-/*
- * Somewhat ugly :( global data pointer list with all sch56xx devices, so that
- * we can find our device data as when using misc_register there is no other
- * method to get to ones device data from the open fop.
- */
-static LIST_HEAD(watchdog_data_list);
-/* Note this lock not only protect list access, but also data.kref access */
-static DEFINE_MUTEX(watchdog_data_mutex);
-
/* Super I/O functions */
static inline int superio_inb(int base, int reg)
{
@@ -272,22 +258,22 @@ EXPORT_SYMBOL(sch56xx_read_virtual_reg12);
* Watchdog routines
*/
-/*
- * Release our data struct when the platform device has been released *and*
- * all references to our watchdog device are released.
- */
-static void sch56xx_watchdog_release_resources(struct kref *r)
+/* Release our data struct when we're unregistered *and*
+ all references to our watchdog device are released */
+static void watchdog_release_resources(struct kref *r)
{
struct sch56xx_watchdog_data *data =
container_of(r, struct sch56xx_watchdog_data, kref);
kfree(data);
}
-static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
- int timeout)
+static int watchdog_set_timeout(struct watchdog_device *wddev,
+ unsigned int timeout)
{
- int ret, resolution;
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
+ unsigned int resolution;
u8 control;
+ int ret;
/* 1 second or 60 second resolution? */
if (timeout <= 255)
@@ -298,12 +284,6 @@ static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
if (timeout < resolution || timeout > (resolution * 255))
return -EINVAL;
- mutex_lock(&data->watchdog_lock);
- if (!data->addr) {
- ret = -ENODEV;
- goto leave;
- }
-
if (resolution == 1)
control = data->watchdog_control | SCH56XX_WDOG_TIME_BASE_SEC;
else
@@ -316,7 +296,7 @@ static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
control);
mutex_unlock(data->io_lock);
if (ret)
- goto leave;
+ return ret;
data->watchdog_control = control;
}
@@ -326,38 +306,17 @@ static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
* the watchdog countdown.
*/
data->watchdog_preset = DIV_ROUND_UP(timeout, resolution);
+ wddev->timeout = data->watchdog_preset * resolution;
- ret = data->watchdog_preset * resolution;
-leave:
- mutex_unlock(&data->watchdog_lock);
- return ret;
-}
-
-static int watchdog_get_timeout(struct sch56xx_watchdog_data *data)
-{
- int timeout;
-
- mutex_lock(&data->watchdog_lock);
- if (data->watchdog_control & SCH56XX_WDOG_TIME_BASE_SEC)
- timeout = data->watchdog_preset;
- else
- timeout = data->watchdog_preset * 60;
- mutex_unlock(&data->watchdog_lock);
-
- return timeout;
+ return 0;
}
-static int watchdog_start(struct sch56xx_watchdog_data *data)
+static int watchdog_start(struct watchdog_device *wddev)
{
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
int ret;
u8 val;
- mutex_lock(&data->watchdog_lock);
- if (!data->addr) {
- ret = -ENODEV;
- goto leave_unlock_watchdog;
- }
-
/*
* The sch56xx's watchdog cannot really be started / stopped
* it is always running, but we can avoid the timer expiring
@@ -385,18 +344,14 @@ static int watchdog_start(struct sch56xx_watchdog_data *data)
if (ret)
goto leave;
- /* 2. Enable output (if not already enabled) */
- if (!(data->watchdog_output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)) {
- val = data->watchdog_output_enable |
- SCH56XX_WDOG_OUTPUT_ENABLE;
- ret = sch56xx_write_virtual_reg(data->addr,
- SCH56XX_REG_WDOG_OUTPUT_ENABLE,
- val);
- if (ret)
- goto leave;
+ /* 2. Enable output */
+ val = data->watchdog_output_enable | SCH56XX_WDOG_OUTPUT_ENABLE;
+ ret = sch56xx_write_virtual_reg(data->addr,
+ SCH56XX_REG_WDOG_OUTPUT_ENABLE, val);
+ if (ret)
+ goto leave;
- data->watchdog_output_enable = val;
- }
+ data->watchdog_output_enable = val;
/* 3. Clear the watchdog event bit if set */
val = inb(data->addr + 9);
@@ -405,234 +360,70 @@ static int watchdog_start(struct sch56xx_watchdog_data *data)
leave:
mutex_unlock(data->io_lock);
-leave_unlock_watchdog:
- mutex_unlock(&data->watchdog_lock);
return ret;
}
-static int watchdog_trigger(struct sch56xx_watchdog_data *data)
+static int watchdog_trigger(struct watchdog_device *wddev)
{
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
int ret;
- mutex_lock(&data->watchdog_lock);
- if (!data->addr) {
- ret = -ENODEV;
- goto leave;
- }
-
/* Reset the watchdog countdown counter */
mutex_lock(data->io_lock);
ret = sch56xx_write_virtual_reg(data->addr, SCH56XX_REG_WDOG_PRESET,
data->watchdog_preset);
mutex_unlock(data->io_lock);
-leave:
- mutex_unlock(&data->watchdog_lock);
+
return ret;
}
-static int watchdog_stop_unlocked(struct sch56xx_watchdog_data *data)
+static int watchdog_stop(struct watchdog_device *wddev)
{
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
int ret = 0;
u8 val;
- if (!data->addr)
- return -ENODEV;
-
- if (data->watchdog_output_enable & SCH56XX_WDOG_OUTPUT_ENABLE) {
- val = data->watchdog_output_enable &
- ~SCH56XX_WDOG_OUTPUT_ENABLE;
- mutex_lock(data->io_lock);
- ret = sch56xx_write_virtual_reg(data->addr,
- SCH56XX_REG_WDOG_OUTPUT_ENABLE,
- val);
- mutex_unlock(data->io_lock);
- if (ret)
- return ret;
-
- data->watchdog_output_enable = val;
- }
-
- return ret;
-}
-
-static int watchdog_stop(struct sch56xx_watchdog_data *data)
-{
- int ret;
-
- mutex_lock(&data->watchdog_lock);
- ret = watchdog_stop_unlocked(data);
- mutex_unlock(&data->watchdog_lock);
-
- return ret;
-}
-
-static int watchdog_release(struct inode *inode, struct file *filp)
-{
- struct sch56xx_watchdog_data *data = filp->private_data;
-
- if (data->watchdog_expect_close) {
- watchdog_stop(data);
- data->watchdog_expect_close = 0;
- } else {
- watchdog_trigger(data);
- pr_crit("unexpected close, not stopping watchdog!\n");
- }
-
- clear_bit(0, &data->watchdog_is_open);
-
- mutex_lock(&watchdog_data_mutex);
- kref_put(&data->kref, sch56xx_watchdog_release_resources);
- mutex_unlock(&watchdog_data_mutex);
+ val = data->watchdog_output_enable & ~SCH56XX_WDOG_OUTPUT_ENABLE;
+ mutex_lock(data->io_lock);
+ ret = sch56xx_write_virtual_reg(data->addr,
+ SCH56XX_REG_WDOG_OUTPUT_ENABLE, val);
+ mutex_unlock(data->io_lock);
+ if (ret)
+ return ret;
+ data->watchdog_output_enable = val;
return 0;
}
-static int watchdog_open(struct inode *inode, struct file *filp)
+static void watchdog_ref(struct watchdog_device *wddev)
{
- struct sch56xx_watchdog_data *pos, *data = NULL;
- int ret, watchdog_is_open;
-
- /*
- * We get called from drivers/char/misc.c with misc_mtx hold, and we
- * call misc_register() from sch56xx_watchdog_probe() with
- * watchdog_data_mutex hold, as misc_register() takes the misc_mtx
- * lock, this is a possible deadlock, so we use mutex_trylock here.
- */
- if (!mutex_trylock(&watchdog_data_mutex))
- return -ERESTARTSYS;
- list_for_each_entry(pos, &watchdog_data_list, list) {
- if (pos->watchdog_miscdev.minor == iminor(inode)) {
- data = pos;
- break;
- }
- }
- /* Note we can never not have found data, so we don't check for this */
- watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open);
- if (!watchdog_is_open)
- kref_get(&data->kref);
- mutex_unlock(&watchdog_data_mutex);
-
- if (watchdog_is_open)
- return -EBUSY;
-
- filp->private_data = data;
-
- /* Start the watchdog */
- ret = watchdog_start(data);
- if (ret) {
- watchdog_release(inode, filp);
- return ret;
- }
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
- return nonseekable_open(inode, filp);
+ kref_get(&data->kref);
}
-static ssize_t watchdog_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *offset)
+static void watchdog_unref(struct watchdog_device *wddev)
{
- int ret;
- struct sch56xx_watchdog_data *data = filp->private_data;
-
- if (count) {
- if (!nowayout) {
- size_t i;
-
- /* Clear it in case it was set with a previous write */
- data->watchdog_expect_close = 0;
-
- for (i = 0; i != count; i++) {
- char c;
- if (get_user(c, buf + i))
- return -EFAULT;
- if (c == 'V')
- data->watchdog_expect_close = 1;
- }
- }
- ret = watchdog_trigger(data);
- if (ret)
- return ret;
- }
- return count;
-}
-
-static long watchdog_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
- .identity = "sch56xx watchdog"
- };
- int i, ret = 0;
- struct sch56xx_watchdog_data *data = filp->private_data;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ident.firmware_version = data->revision;
- if (!nowayout)
- ident.options |= WDIOF_MAGICCLOSE;
- if (copy_to_user((void __user *)arg, &ident, sizeof(ident)))
- ret = -EFAULT;
- break;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, (int __user *)arg);
- break;
-
- case WDIOC_KEEPALIVE:
- ret = watchdog_trigger(data);
- break;
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
- case WDIOC_GETTIMEOUT:
- i = watchdog_get_timeout(data);
- ret = put_user(i, (int __user *)arg);
- break;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(i, (int __user *)arg)) {
- ret = -EFAULT;
- break;
- }
- ret = watchdog_set_timeout(data, i);
- if (ret >= 0)
- ret = put_user(ret, (int __user *)arg);
- break;
-
- case WDIOC_SETOPTIONS:
- if (get_user(i, (int __user *)arg)) {
- ret = -EFAULT;
- break;
- }
-
- if (i & WDIOS_DISABLECARD)
- ret = watchdog_stop(data);
- else if (i & WDIOS_ENABLECARD)
- ret = watchdog_trigger(data);
- else
- ret = -EINVAL;
- break;
-
- default:
- ret = -ENOTTY;
- }
- return ret;
+ kref_put(&data->kref, watchdog_release_resources);
}
-static const struct file_operations watchdog_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = watchdog_open,
- .release = watchdog_release,
- .write = watchdog_write,
- .unlocked_ioctl = watchdog_ioctl,
+static const struct watchdog_ops watchdog_ops = {
+ .owner = THIS_MODULE,
+ .start = watchdog_start,
+ .stop = watchdog_stop,
+ .ping = watchdog_trigger,
+ .set_timeout = watchdog_set_timeout,
+ .ref = watchdog_ref,
+ .unref = watchdog_unref,
};
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(
+struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
u16 addr, u32 revision, struct mutex *io_lock, int check_enabled)
{
struct sch56xx_watchdog_data *data;
- int i, err, control, output_enable;
- const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
+ int err, control, output_enable;
/* Cache the watchdog registers */
mutex_lock(io_lock);
@@ -656,82 +447,55 @@ struct sch56xx_watchdog_data *sch56xx_watchdog_register(
return NULL;
data->addr = addr;
- data->revision = revision;
data->io_lock = io_lock;
- data->watchdog_control = control;
- data->watchdog_output_enable = output_enable;
- mutex_init(&data->watchdog_lock);
- INIT_LIST_HEAD(&data->list);
kref_init(&data->kref);
- err = watchdog_set_timeout(data, 60);
- if (err < 0)
- goto error;
-
- /*
- * We take the data_mutex lock early so that watchdog_open() cannot
- * run when misc_register() has completed, but we've not yet added
- * our data to the watchdog_data_list.
- */
- mutex_lock(&watchdog_data_mutex);
- for (i = 0; i < ARRAY_SIZE(watchdog_minors); i++) {
- /* Register our watchdog part */
- snprintf(data->watchdog_name, sizeof(data->watchdog_name),
- "watchdog%c", (i == 0) ? '\0' : ('0' + i));
- data->watchdog_miscdev.name = data->watchdog_name;
- data->watchdog_miscdev.fops = &watchdog_fops;
- data->watchdog_miscdev.minor = watchdog_minors[i];
- err = misc_register(&data->watchdog_miscdev);
- if (err == -EBUSY)
- continue;
- if (err)
- break;
+ strlcpy(data->wdinfo.identity, "sch56xx watchdog",
+ sizeof(data->wdinfo.identity));
+ data->wdinfo.firmware_version = revision;
+ data->wdinfo.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT;
+ if (!nowayout)
+ data->wdinfo.options |= WDIOF_MAGICCLOSE;
+
+ data->wddev.info = &data->wdinfo;
+ data->wddev.ops = &watchdog_ops;
+ data->wddev.parent = parent;
+ data->wddev.timeout = 60;
+ data->wddev.min_timeout = 1;
+ data->wddev.max_timeout = 255 * 60;
+ if (nowayout)
+ set_bit(WDOG_NO_WAY_OUT, &data->wddev.status);
+ if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)
+ set_bit(WDOG_ACTIVE, &data->wddev.status);
+
+ /* Since the watchdog uses a downcounter there is no register to read
+ the BIOS set timeout from (if any was set at all) ->
+ Choose a preset which will give us a 1 minute timeout */
+ if (control & SCH56XX_WDOG_TIME_BASE_SEC)
+ data->watchdog_preset = 60; /* seconds */
+ else
+ data->watchdog_preset = 1; /* minute */
- list_add(&data->list, &watchdog_data_list);
- pr_info("Registered /dev/%s chardev major 10, minor: %d\n",
- data->watchdog_name, watchdog_minors[i]);
- break;
- }
- mutex_unlock(&watchdog_data_mutex);
+ data->watchdog_control = control;
+ data->watchdog_output_enable = output_enable;
+ watchdog_set_drvdata(&data->wddev, data);
+ err = watchdog_register_device(&data->wddev);
if (err) {
pr_err("Registering watchdog chardev: %d\n", err);
- goto error;
- }
- if (i == ARRAY_SIZE(watchdog_minors)) {
- pr_warn("Couldn't register watchdog (no free minor)\n");
- goto error;
+ kfree(data);
+ return NULL;
}
return data;
-
-error:
- kfree(data);
- return NULL;
}
EXPORT_SYMBOL(sch56xx_watchdog_register);
void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data)
{
- mutex_lock(&watchdog_data_mutex);
- misc_deregister(&data->watchdog_miscdev);
- list_del(&data->list);
- mutex_unlock(&watchdog_data_mutex);
-
- mutex_lock(&data->watchdog_lock);
- if (data->watchdog_is_open) {
- pr_warn("platform device unregistered with watchdog "
- "open! Stopping watchdog.\n");
- watchdog_stop_unlocked(data);
- }
- /* Tell the wdog start/stop/trigger functions our dev is gone */
- data->addr = 0;
- data->io_lock = NULL;
- mutex_unlock(&data->watchdog_lock);
-
- mutex_lock(&watchdog_data_mutex);
- kref_put(&data->kref, sch56xx_watchdog_release_resources);
- mutex_unlock(&watchdog_data_mutex);
+ watchdog_unregister_device(&data->wddev);
+ kref_put(&data->kref, watchdog_release_resources);
+ /* Don't touch data after this it may have been free-ed! */
}
EXPORT_SYMBOL(sch56xx_watchdog_unregister);
diff --git a/drivers/hwmon/sch56xx-common.h b/drivers/hwmon/sch56xx-common.h
index 7475086eb978..704ea2c6d28a 100644
--- a/drivers/hwmon/sch56xx-common.h
+++ b/drivers/hwmon/sch56xx-common.h
@@ -27,6 +27,6 @@ int sch56xx_read_virtual_reg16(u16 addr, u16 reg);
int sch56xx_read_virtual_reg12(u16 addr, u16 msb_reg, u16 lsn_reg,
int high_nibble);
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(
+struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
u16 addr, u32 revision, struct mutex *io_lock, int check_enabled);
void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data);
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 5f13c62e64b4..5a3bb3d738d8 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -49,7 +49,6 @@ config I2C_CHARDEV
config I2C_MUX
tristate "I2C bus multiplexing support"
- depends on EXPERIMENTAL
help
Say Y here if you want the I2C core to support the ability to
handle multiplexed I2C bus topologies, by presenting each
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 7f0b83219744..fad22b0bb5b0 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -608,7 +608,7 @@ bailout:
static u32 bit_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ return I2C_FUNC_I2C | I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_EMUL |
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 94468a64ce3a..7244c8be6063 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -445,20 +445,6 @@ config I2C_IOP3XX
This driver can also be built as a module. If so, the module
will be called i2c-iop3xx.
-config I2C_IXP2000
- tristate "IXP2000 GPIO-Based I2C Interface (DEPRECATED)"
- depends on ARCH_IXP2000
- select I2C_ALGOBIT
- help
- Say Y here if you have an Intel IXP2000 (2400, 2800, 2850) based
- system and are using GPIO lines for an I2C bus.
-
- This support is also available as a module. If so, the module
- will be called i2c-ixp2000.
-
- This driver is deprecated and will be dropped soon. Use i2c-gpio
- instead.
-
config I2C_MPC
tristate "MPC107/824x/85xx/512x/52xx/83xx/86xx"
depends on PPC
@@ -483,6 +469,7 @@ config I2C_MV64XXX
config I2C_MXS
tristate "Freescale i.MX28 I2C interface"
depends on SOC_IMX28
+ select STMP_DEVICE
help
Say Y here if you want to use the I2C bus controller on
the Freescale i.MX28 processors.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 569567b0d027..ce3c2be7fb40 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
obj-$(CONFIG_I2C_IMX) += i2c-imx.o
obj-$(CONFIG_I2C_INTEL_MID) += i2c-intel-mid.o
obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
-obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
obj-$(CONFIG_I2C_MXS) += i2c-mxs.o
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index a76d85fa3ad7..79b4bcb3b85c 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -755,7 +755,7 @@ static int davinci_i2c_remove(struct platform_device *pdev)
dev->clk = NULL;
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
- free_irq(IRQ_I2C, dev);
+ free_irq(dev->irq, dev);
iounmap(dev->base);
kfree(dev);
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index df8799241009..1e48bec80edf 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -164,9 +164,15 @@ static char *abort_sources[] = {
u32 dw_readl(struct dw_i2c_dev *dev, int offset)
{
- u32 value = readl(dev->base + offset);
+ u32 value;
- if (dev->swab)
+ if (dev->accessor_flags & ACCESS_16BIT)
+ value = readw(dev->base + offset) |
+ (readw(dev->base + offset + 2) << 16);
+ else
+ value = readl(dev->base + offset);
+
+ if (dev->accessor_flags & ACCESS_SWAP)
return swab32(value);
else
return value;
@@ -174,10 +180,15 @@ u32 dw_readl(struct dw_i2c_dev *dev, int offset)
void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
{
- if (dev->swab)
+ if (dev->accessor_flags & ACCESS_SWAP)
b = swab32(b);
- writel(b, dev->base + offset);
+ if (dev->accessor_flags & ACCESS_16BIT) {
+ writew((u16)b, dev->base + offset);
+ writew((u16)(b >> 16), dev->base + offset + 2);
+ } else {
+ writel(b, dev->base + offset);
+ }
}
static u32
@@ -251,14 +262,14 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
input_clock_khz = dev->get_clk_rate_khz(dev);
- /* Configure register endianess access */
reg = dw_readl(dev, DW_IC_COMP_TYPE);
if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
- dev->swab = 1;
- reg = DW_IC_COMP_TYPE_VALUE;
- }
-
- if (reg != DW_IC_COMP_TYPE_VALUE) {
+ /* Configure register endianess access */
+ dev->accessor_flags |= ACCESS_SWAP;
+ } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
+ /* Configure register access mode 16bit */
+ dev->accessor_flags |= ACCESS_16BIT;
+ } else if (reg != DW_IC_COMP_TYPE_VALUE) {
dev_err(dev->dev, "Unknown Synopsys component type: "
"0x%08x\n", reg);
return -ENODEV;
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 02d1a2ddd853..9c1840ee09c7 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -82,7 +82,7 @@ struct dw_i2c_dev {
unsigned int status;
u32 abort_source;
int irq;
- int swab;
+ u32 accessor_flags;
struct i2c_adapter adapter;
u32 functionality;
u32 master_cfg;
@@ -90,6 +90,9 @@ struct dw_i2c_dev {
unsigned int rx_fifo_depth;
};
+#define ACCESS_SWAP 0x00000001
+#define ACCESS_16BIT 0x00000002
+
extern u32 dw_readl(struct dw_i2c_dev *dev, int offset);
extern void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset);
extern int i2c_dw_init(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 4ba589ab8614..0506fef8dc00 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -36,6 +36,7 @@
#include <linux/interrupt.h>
#include <linux/of_i2c.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/io.h>
#include <linux/slab.h>
#include "i2c-designware-core.h"
@@ -95,7 +96,7 @@ static int __devinit dw_i2c_probe(struct platform_device *pdev)
r = -ENODEV;
goto err_free_mem;
}
- clk_enable(dev->clk);
+ clk_prepare_enable(dev->clk);
dev->functionality =
I2C_FUNC_I2C |
@@ -155,7 +156,7 @@ err_free_irq:
err_iounmap:
iounmap(dev->base);
err_unuse_clocks:
- clk_disable(dev->clk);
+ clk_disable_unprepare(dev->clk);
clk_put(dev->clk);
dev->clk = NULL;
err_free_mem:
@@ -177,7 +178,7 @@ static int __devexit dw_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&dev->adapter);
put_device(&pdev->dev);
- clk_disable(dev->clk);
+ clk_disable_unprepare(dev->clk);
clk_put(dev->clk);
dev->clk = NULL;
@@ -198,6 +199,31 @@ static const struct of_device_id dw_i2c_of_match[] = {
MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
#endif
+#ifdef CONFIG_PM
+static int dw_i2c_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(i_dev->clk);
+
+ return 0;
+}
+
+static int dw_i2c_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
+
+ clk_prepare_enable(i_dev->clk);
+ i2c_dw_init(i_dev);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
+
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:i2c_designware");
@@ -207,6 +233,7 @@ static struct platform_driver dw_i2c_driver = {
.name = "i2c_designware",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(dw_i2c_of_match),
+ .pm = &dw_i2c_dev_pm_ops,
},
};
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index c811289b61e2..2f74ae872e1e 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -263,11 +263,6 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
init_waitqueue_head(&pch_event);
}
-static inline bool ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
-{
- return cmp1.tv64 < cmp2.tv64;
-}
-
/**
* pch_i2c_wait_for_bus_idle() - check the status of bus.
* @adap: Pointer to struct i2c_algo_pch_data.
@@ -317,33 +312,6 @@ static void pch_i2c_start(struct i2c_algo_pch_data *adap)
}
/**
- * pch_i2c_wait_for_xfer_complete() - initiates a wait for the tx complete event
- * @adap: Pointer to struct i2c_algo_pch_data.
- */
-static s32 pch_i2c_wait_for_xfer_complete(struct i2c_algo_pch_data *adap)
-{
- long ret;
- ret = wait_event_timeout(pch_event,
- (adap->pch_event_flag != 0), msecs_to_jiffies(1000));
-
- if (ret == 0) {
- pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
- adap->pch_event_flag = 0;
- return -ETIMEDOUT;
- }
-
- if (adap->pch_event_flag & I2C_ERROR_MASK) {
- pch_err(adap, "error bits set: %x\n", adap->pch_event_flag);
- adap->pch_event_flag = 0;
- return -EIO;
- }
-
- adap->pch_event_flag = 0;
-
- return 0;
-}
-
-/**
* pch_i2c_getack() - to confirm ACK/NACK
* @adap: Pointer to struct i2c_algo_pch_data.
*/
@@ -373,6 +341,40 @@ static void pch_i2c_stop(struct i2c_algo_pch_data *adap)
pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_START);
}
+static int pch_i2c_wait_for_check_xfer(struct i2c_algo_pch_data *adap)
+{
+ long ret;
+
+ ret = wait_event_timeout(pch_event,
+ (adap->pch_event_flag != 0), msecs_to_jiffies(1000));
+ if (!ret) {
+ pch_err(adap, "%s:wait-event timeout\n", __func__);
+ adap->pch_event_flag = 0;
+ pch_i2c_stop(adap);
+ pch_i2c_init(adap);
+ return -ETIMEDOUT;
+ }
+
+ if (adap->pch_event_flag & I2C_ERROR_MASK) {
+ pch_err(adap, "Lost Arbitration\n");
+ adap->pch_event_flag = 0;
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
+ pch_i2c_init(adap);
+ return -EAGAIN;
+ }
+
+ adap->pch_event_flag = 0;
+
+ if (pch_i2c_getack(adap)) {
+ pch_dbg(adap, "Receive NACK for slave address"
+ "setting\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
/**
* pch_i2c_repstart() - generate repeated start condition in normal mode
* @adap: Pointer to struct i2c_algo_pch_data.
@@ -427,27 +429,12 @@ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
if (first)
pch_i2c_start(adap);
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- addr_8_lsb = (addr & I2C_ADDR_MSK);
- iowrite32(addr_8_lsb, p + PCH_I2CDR);
- } else if (rtn == -EIO) { /* Arbitration Lost */
- pch_err(adap, "Lost Arbitration\n");
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMAL_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMIF_BIT);
- pch_i2c_init(adap);
- return -EAGAIN;
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
+
+ addr_8_lsb = (addr & I2C_ADDR_MSK);
+ iowrite32(addr_8_lsb, p + PCH_I2CDR);
} else {
/* set 7 bit slave address and R/W bit as 0 */
iowrite32(addr << 1, p + PCH_I2CDR);
@@ -455,44 +442,21 @@ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
pch_i2c_start(adap);
}
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- } else if (rtn == -EIO) { /* Arbitration Lost */
- pch_err(adap, "Lost Arbitration\n");
- pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
- pch_i2c_init(adap);
- return -EAGAIN;
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
for (wrcount = 0; wrcount < length; ++wrcount) {
/* write buffer value to I2C data register */
iowrite32(buf[wrcount], p + PCH_I2CDR);
pch_dbg(adap, "writing %x to Data register\n", buf[wrcount]);
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMCF_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMIF_BIT);
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
+
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMCF_BIT);
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
}
/* check if this is the last message */
@@ -580,50 +544,21 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
if (first)
pch_i2c_start(adap);
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- addr_8_lsb = (addr & I2C_ADDR_MSK);
- iowrite32(addr_8_lsb, p + PCH_I2CDR);
- } else if (rtn == -EIO) { /* Arbitration Lost */
- pch_err(adap, "Lost Arbitration\n");
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMAL_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMIF_BIT);
- pch_i2c_init(adap);
- return -EAGAIN;
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
+
+ addr_8_lsb = (addr & I2C_ADDR_MSK);
+ iowrite32(addr_8_lsb, p + PCH_I2CDR);
+
pch_i2c_restart(adap);
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- addr_2_msb |= I2C_RD;
- iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK,
- p + PCH_I2CDR);
- } else if (rtn == -EIO) { /* Arbitration Lost */
- pch_err(adap, "Lost Arbitration\n");
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMAL_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMIF_BIT);
- pch_i2c_init(adap);
- return -EAGAIN;
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
+
+ addr_2_msb |= I2C_RD;
+ iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
} else {
/* 7 address bits + R/W bit */
addr = (((addr) << 1) | (I2C_RD));
@@ -634,23 +569,9 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
if (first)
pch_i2c_start(adap);
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- } else if (rtn == -EIO) { /* Arbitration Lost */
- pch_err(adap, "Lost Arbitration\n");
- pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
- pch_i2c_init(adap);
- return -EAGAIN;
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
if (length == 0) {
pch_i2c_stop(adap);
@@ -669,18 +590,9 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
if (loop != 1)
read_index++;
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave"
- "address setting\n");
- return -EIO;
- }
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
-
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
} /* end for */
pch_i2c_sendnack(adap);
@@ -690,17 +602,9 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
if (length != 1)
read_index++;
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave"
- "address setting\n");
- return -EIO;
- }
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
if (last)
pch_i2c_stop(adap);
@@ -790,7 +694,7 @@ static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap,
ret = mutex_lock_interruptible(&pch_mutex);
if (ret)
- return -ERESTARTSYS;
+ return ret;
if (adap->p_adapter_info->pch_i2c_suspended) {
mutex_unlock(&pch_mutex);
@@ -909,7 +813,7 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
pch_adap->owner = THIS_MODULE;
pch_adap->class = I2C_CLASS_HWMON;
- strcpy(pch_adap->name, KBUILD_MODNAME);
+ strlcpy(pch_adap->name, KBUILD_MODNAME, sizeof(pch_adap->name));
pch_adap->algo = &pch_algorithm;
pch_adap->algo_data = &adap_info->pch_data[i];
@@ -963,7 +867,7 @@ static void __devexit pch_i2c_remove(struct pci_dev *pdev)
pci_iounmap(pdev, adap_info->pch_data[0].pch_base_address);
for (i = 0; i < adap_info->ch_num; i++)
- adap_info->pch_data[i].pch_base_address = 0;
+ adap_info->pch_data[i].pch_base_address = NULL;
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index c0330a41db03..e62d2d938628 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -190,12 +190,7 @@ static int __devinit i2c_gpio_probe(struct platform_device *pdev)
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
- /*
- * If "dev->id" is negative we consider it as zero.
- * The reason to do so is to avoid sysfs names that only make
- * sense when there are multiple adapters.
- */
- adap->nr = (pdev->id != -1) ? pdev->id : 0;
+ adap->nr = pdev->id;
ret = i2c_bit_add_numbered_bus(adap);
if (ret)
goto err_add_bus;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 56bce9a8bcbb..8d6b504d65c4 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -512,7 +512,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
}
/* Setup i2c_imx driver structure */
- strcpy(i2c_imx->adapter.name, pdev->name);
+ strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
i2c_imx->adapter.owner = THIS_MODULE;
i2c_imx->adapter.algo = &i2c_imx_algo;
i2c_imx->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
deleted file mode 100644
index 5d263f9014d6..000000000000
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * drivers/i2c/busses/i2c-ixp2000.c
- *
- * I2C adapter for IXP2000 systems using GPIOs for I2C bus
- *
- * Author: Deepak Saxena <dsaxena@plexity.net>
- * Based on IXDP2400 code by: Naeem M. Afzal <naeem.m.afzal@intel.com>
- * Made generic by: Jeff Daly <jeffrey.daly@intel.com>
- *
- * Copyright (c) 2003-2004 MontaVista Software Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
- * From Jeff Daly:
- *
- * I2C adapter driver for Intel IXDP2xxx platforms. This should work for any
- * IXP2000 platform if it uses the HW GPIO in the same manner. Basically,
- * SDA and SCL GPIOs have external pullups. Setting the respective GPIO to
- * an input will make the signal a '1' via the pullup. Setting them to
- * outputs will pull them down.
- *
- * The GPIOs are open drain signals and are used as configuration strap inputs
- * during power-up so there's generally a buffer on the board that needs to be
- * 'enabled' to drive the GPIOs.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include <linux/slab.h>
-
-#include <mach/hardware.h> /* Pick up IXP2000-specific bits */
-#include <mach/gpio-ixp2000.h>
-
-static inline int ixp2000_scl_pin(void *data)
-{
- return ((struct ixp2000_i2c_pins*)data)->scl_pin;
-}
-
-static inline int ixp2000_sda_pin(void *data)
-{
- return ((struct ixp2000_i2c_pins*)data)->sda_pin;
-}
-
-
-static void ixp2000_bit_setscl(void *data, int val)
-{
- int i = 5000;
-
- if (val) {
- gpio_line_config(ixp2000_scl_pin(data), GPIO_IN);
- while(!gpio_line_get(ixp2000_scl_pin(data)) && i--);
- } else {
- gpio_line_config(ixp2000_scl_pin(data), GPIO_OUT);
- }
-}
-
-static void ixp2000_bit_setsda(void *data, int val)
-{
- if (val) {
- gpio_line_config(ixp2000_sda_pin(data), GPIO_IN);
- } else {
- gpio_line_config(ixp2000_sda_pin(data), GPIO_OUT);
- }
-}
-
-static int ixp2000_bit_getscl(void *data)
-{
- return gpio_line_get(ixp2000_scl_pin(data));
-}
-
-static int ixp2000_bit_getsda(void *data)
-{
- return gpio_line_get(ixp2000_sda_pin(data));
-}
-
-struct ixp2000_i2c_data {
- struct ixp2000_i2c_pins *gpio_pins;
- struct i2c_adapter adapter;
- struct i2c_algo_bit_data algo_data;
-};
-
-static int ixp2000_i2c_remove(struct platform_device *plat_dev)
-{
- struct ixp2000_i2c_data *drv_data = platform_get_drvdata(plat_dev);
-
- platform_set_drvdata(plat_dev, NULL);
-
- i2c_del_adapter(&drv_data->adapter);
-
- kfree(drv_data);
-
- return 0;
-}
-
-static int ixp2000_i2c_probe(struct platform_device *plat_dev)
-{
- int err;
- struct ixp2000_i2c_pins *gpio = plat_dev->dev.platform_data;
- struct ixp2000_i2c_data *drv_data =
- kzalloc(sizeof(struct ixp2000_i2c_data), GFP_KERNEL);
-
- if (!drv_data)
- return -ENOMEM;
- drv_data->gpio_pins = gpio;
-
- drv_data->algo_data.data = gpio;
- drv_data->algo_data.setsda = ixp2000_bit_setsda;
- drv_data->algo_data.setscl = ixp2000_bit_setscl;
- drv_data->algo_data.getsda = ixp2000_bit_getsda;
- drv_data->algo_data.getscl = ixp2000_bit_getscl;
- drv_data->algo_data.udelay = 6;
- drv_data->algo_data.timeout = HZ;
-
- strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name,
- sizeof(drv_data->adapter.name));
- drv_data->adapter.algo_data = &drv_data->algo_data,
-
- drv_data->adapter.dev.parent = &plat_dev->dev;
-
- gpio_line_config(gpio->sda_pin, GPIO_IN);
- gpio_line_config(gpio->scl_pin, GPIO_IN);
- gpio_line_set(gpio->scl_pin, 0);
- gpio_line_set(gpio->sda_pin, 0);
-
- if ((err = i2c_bit_add_bus(&drv_data->adapter)) != 0) {
- dev_err(&plat_dev->dev, "Could not install, error %d\n", err);
- kfree(drv_data);
- return err;
- }
-
- platform_set_drvdata(plat_dev, drv_data);
-
- return 0;
-}
-
-static struct platform_driver ixp2000_i2c_driver = {
- .probe = ixp2000_i2c_probe,
- .remove = ixp2000_i2c_remove,
- .driver = {
- .name = "IXP2000-I2C",
- .owner = THIS_MODULE,
- },
-};
-
-module_platform_driver(ixp2000_i2c_driver);
-
-MODULE_AUTHOR ("Deepak Saxena <dsaxena@plexity.net>");
-MODULE_DESCRIPTION("IXP2000 GPIO-based I2C bus driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:IXP2000-I2C");
-
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 206caacd30d7..b76731edbf10 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -64,6 +64,9 @@ struct mpc_i2c {
struct i2c_adapter adap;
int irq;
u32 real_clk;
+#ifdef CONFIG_PM
+ u8 fdr, dfsrr;
+#endif
};
struct mpc_i2c_divider {
@@ -703,6 +706,30 @@ static int __devexit fsl_i2c_remove(struct platform_device *op)
return 0;
};
+#ifdef CONFIG_PM
+static int mpc_i2c_suspend(struct device *dev)
+{
+ struct mpc_i2c *i2c = dev_get_drvdata(dev);
+
+ i2c->fdr = readb(i2c->base + MPC_I2C_FDR);
+ i2c->dfsrr = readb(i2c->base + MPC_I2C_DFSRR);
+
+ return 0;
+}
+
+static int mpc_i2c_resume(struct device *dev)
+{
+ struct mpc_i2c *i2c = dev_get_drvdata(dev);
+
+ writeb(i2c->fdr, i2c->base + MPC_I2C_FDR);
+ writeb(i2c->dfsrr, i2c->base + MPC_I2C_DFSRR);
+
+ return 0;
+}
+
+SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume);
+#endif
+
static struct mpc_i2c_data mpc_i2c_data_512x __devinitdata = {
.setup = mpc_i2c_setup_512x,
};
@@ -747,6 +774,9 @@ static struct platform_driver mpc_i2c_driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = mpc_i2c_of_match,
+#ifdef CONFIG_PM
+ .pm = &mpc_i2c_pm_ops,
+#endif
},
};
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 7fa73eed84a7..04eb441b6ce1 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -27,8 +27,10 @@
#include <linux/jiffies.h>
#include <linux/io.h>
#include <linux/pinctrl/consumer.h>
-
-#include <mach/common.h>
+#include <linux/stmp_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_i2c.h>
#define DRIVER_NAME "mxs-i2c"
@@ -112,13 +114,9 @@ struct mxs_i2c_dev {
struct i2c_adapter adapter;
};
-/*
- * TODO: check if calls to here are really needed. If not, we could get rid of
- * mxs_reset_block and the mach-dependency. Needs an I2C analyzer, probably.
- */
static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
{
- mxs_reset_block(i2c->regs);
+ stmp_reset_block(i2c->regs);
writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
i2c->regs + MXS_I2C_QUEUECTRL_SET);
@@ -371,6 +369,7 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
adap->algo = &mxs_i2c_algo;
adap->dev.parent = dev;
adap->nr = pdev->id;
+ adap->dev.of_node = pdev->dev.of_node;
i2c_set_adapdata(adap, i2c);
err = i2c_add_numbered_adapter(adap);
if (err) {
@@ -380,6 +379,8 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
return err;
}
+ of_i2c_register_devices(adap);
+
return 0;
}
@@ -399,10 +400,17 @@ static int __devexit mxs_i2c_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id mxs_i2c_dt_ids[] = {
+ { .compatible = "fsl,imx28-i2c", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids);
+
static struct platform_driver mxs_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = mxs_i2c_dt_ids,
},
.remove = __devexit_p(mxs_i2c_remove),
};
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 03b615778887..a26dfb8cd586 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -502,7 +502,8 @@ static int nuc900_i2c_xfer(struct i2c_adapter *adap,
/* declare our i2c functionality */
static u32 nuc900_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
+ I2C_FUNC_PROTOCOL_MANGLING;
}
/* i2c bus registration info */
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 18068dee48f1..75194c579b6d 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -55,6 +55,7 @@
#include <linux/i2c-ocores.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/of_i2c.h>
struct ocores_i2c {
void __iomem *base;
@@ -343,6 +344,8 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
if (pdata) {
for (i = 0; i < pdata->num_devices; i++)
i2c_new_device(&i2c->adap, pdata->devices + i);
+ } else {
+ of_i2c_register_devices(&i2c->adap);
}
return 0;
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 2adbf1a8fdea..675878f49f76 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -171,7 +171,7 @@ static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
i2c->io_size = resource_size(res);
i2c->irq = irq;
- i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0;
+ i2c->adap.nr = pdev->id;
i2c->adap.owner = THIS_MODULE;
snprintf(i2c->adap.name, sizeof(i2c->adap.name),
"PCA9564/PCA9665 at 0x%08lx",
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index f6733267fa9c..a997c7d3f95d 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1131,11 +1131,6 @@ static int i2c_pxa_probe(struct platform_device *dev)
spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
- /*
- * If "dev->id" is negative we consider it as zero.
- * The reason to do so is to avoid sysfs names that only make
- * sense when there are multiple adapters.
- */
i2c->adap.nr = dev->id;
snprintf(i2c->adap.name, sizeof(i2c->adap.name), "pxa_i2c-i2c.%u",
i2c->adap.nr);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 737f7218a32c..01959154572d 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -44,8 +44,12 @@
#include <plat/regs-iic.h>
#include <plat/iic.h>
-/* i2c controller state */
+/* Treat S3C2410 as baseline hardware, anything else is supported via quirks */
+#define QUIRK_S3C2440 (1 << 0)
+#define QUIRK_HDMIPHY (1 << 1)
+#define QUIRK_NO_GPIO (1 << 2)
+/* i2c controller state */
enum s3c24xx_i2c_state {
STATE_IDLE,
STATE_START,
@@ -54,14 +58,10 @@ enum s3c24xx_i2c_state {
STATE_STOP
};
-enum s3c24xx_i2c_type {
- TYPE_S3C2410,
- TYPE_S3C2440,
-};
-
struct s3c24xx_i2c {
spinlock_t lock;
wait_queue_head_t wait;
+ unsigned int quirks;
unsigned int suspended:1;
struct i2c_msg *msg;
@@ -88,26 +88,45 @@ struct s3c24xx_i2c {
#endif
};
-/* default platform data removed, dev should always carry data. */
+static struct platform_device_id s3c24xx_driver_ids[] = {
+ {
+ .name = "s3c2410-i2c",
+ .driver_data = 0,
+ }, {
+ .name = "s3c2440-i2c",
+ .driver_data = QUIRK_S3C2440,
+ }, {
+ .name = "s3c2440-hdmiphy-i2c",
+ .driver_data = QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO,
+ }, { },
+};
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id s3c24xx_i2c_match[] = {
+ { .compatible = "samsung,s3c2410-i2c", .data = (void *)0 },
+ { .compatible = "samsung,s3c2440-i2c", .data = (void *)QUIRK_S3C2440 },
+ { .compatible = "samsung,s3c2440-hdmiphy-i2c",
+ .data = (void *)(QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO) },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
+#endif
-/* s3c24xx_i2c_is2440()
+/* s3c24xx_get_device_quirks
*
- * return true is this is an s3c2440
+ * Get controller type either from device tree or platform device variant.
*/
-static inline int s3c24xx_i2c_is2440(struct s3c24xx_i2c *i2c)
+static inline unsigned int s3c24xx_get_device_quirks(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(i2c->dev);
- enum s3c24xx_i2c_type type;
-
-#ifdef CONFIG_OF
- if (i2c->dev->of_node)
- return of_device_is_compatible(i2c->dev->of_node,
- "samsung,s3c2440-i2c");
-#endif
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(&s3c24xx_i2c_match, pdev->dev.of_node);
+ return (unsigned int)match->data;
+ }
- type = platform_get_device_id(pdev)->driver_data;
- return type == TYPE_S3C2440;
+ return platform_get_device_id(pdev)->driver_data;
}
/* s3c24xx_i2c_master_complete
@@ -471,6 +490,13 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
unsigned long iicstat;
int timeout = 400;
+ /* the timeout for HDMIPHY is reduced to 10 ms because
+ * the hangup is expected to happen, so waiting 400 ms
+ * causes only unnecessary system hangup
+ */
+ if (i2c->quirks & QUIRK_HDMIPHY)
+ timeout = 10;
+
while (timeout-- > 0) {
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
@@ -480,6 +506,15 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
msleep(1);
}
+ /* hang-up of bus dedicated for HDMIPHY occurred, resetting */
+ if (i2c->quirks & QUIRK_HDMIPHY) {
+ writel(0, i2c->regs + S3C2410_IICCON);
+ writel(0, i2c->regs + S3C2410_IICSTAT);
+ writel(0, i2c->regs + S3C2410_IICDS);
+
+ return 0;
+ }
+
return -ETIMEDOUT;
}
@@ -591,7 +626,8 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
/* declare our i2c functionality */
static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
+ I2C_FUNC_PROTOCOL_MANGLING;
}
/* i2c bus registration info */
@@ -676,7 +712,7 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
writel(iiccon, i2c->regs + S3C2410_IICCON);
- if (s3c24xx_i2c_is2440(i2c)) {
+ if (i2c->quirks & QUIRK_S3C2440) {
unsigned long sda_delay;
if (pdata->sda_delay) {
@@ -761,6 +797,9 @@ static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
{
int idx, gpio, ret;
+ if (i2c->quirks & QUIRK_NO_GPIO)
+ return 0;
+
for (idx = 0; idx < 2; idx++) {
gpio = of_get_gpio(i2c->dev->of_node, idx);
if (!gpio_is_valid(gpio)) {
@@ -785,6 +824,10 @@ free_gpio:
static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
{
unsigned int idx;
+
+ if (i2c->quirks & QUIRK_NO_GPIO)
+ return;
+
for (idx = 0; idx < 2; idx++)
gpio_free(i2c->gpios[idx]);
}
@@ -906,6 +949,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
goto err_noclk;
}
+ i2c->quirks = s3c24xx_get_device_quirks(pdev);
if (pdata)
memcpy(i2c->pdata, pdata, sizeof(*pdata));
else
@@ -1110,28 +1154,6 @@ static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
/* device driver for platform bus bits */
-static struct platform_device_id s3c24xx_driver_ids[] = {
- {
- .name = "s3c2410-i2c",
- .driver_data = TYPE_S3C2410,
- }, {
- .name = "s3c2440-i2c",
- .driver_data = TYPE_S3C2440,
- }, { },
-};
-MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
-
-#ifdef CONFIG_OF
-static const struct of_device_id s3c24xx_i2c_match[] = {
- { .compatible = "samsung,s3c2410-i2c" },
- { .compatible = "samsung,s3c2440-i2c" },
- {},
-};
-MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
-#else
-#define s3c24xx_i2c_match NULL
-#endif
-
static struct platform_driver s3c24xx_i2c_driver = {
.probe = s3c24xx_i2c_probe,
.remove = s3c24xx_i2c_remove,
@@ -1140,7 +1162,7 @@ static struct platform_driver s3c24xx_i2c_driver = {
.owner = THIS_MODULE,
.name = "s3c-i2c",
.pm = S3C24XX_DEV_PM_OPS,
- .of_match_table = s3c24xx_i2c_match,
+ .of_match_table = of_match_ptr(s3c24xx_i2c_match),
},
};
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 675c9692d148..8110ca45f342 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -27,6 +27,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
+#include <linux/of_i2c.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
@@ -653,6 +654,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
adap->dev.parent = &dev->dev;
adap->retries = 5;
adap->nr = dev->id;
+ adap->dev.of_node = dev->dev.of_node;
strlcpy(adap->name, dev->name, sizeof(adap->name));
@@ -667,6 +669,8 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
dev_info(&dev->dev, "I2C adapter %d with bus speed %lu Hz\n",
adap->nr, pd->bus_speed);
+
+ of_i2c_register_devices(adap);
return 0;
err_all:
@@ -710,11 +714,18 @@ static const struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = {
.runtime_resume = sh_mobile_i2c_runtime_nop,
};
+static const struct of_device_id sh_mobile_i2c_dt_ids[] __devinitconst = {
+ { .compatible = "renesas,rmobile-iic", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids);
+
static struct platform_driver sh_mobile_i2c_driver = {
.driver = {
.name = "i2c-sh_mobile",
.owner = THIS_MODULE,
.pm = &sh_mobile_i2c_dev_pm_ops,
+ .of_match_table = sh_mobile_i2c_dt_ids,
},
.probe = sh_mobile_i2c_probe,
.remove = sh_mobile_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 55e5ea62ccee..8b2e555a9563 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -401,8 +401,6 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
disable_irq_nosync(i2c_dev->irq);
i2c_dev->irq_disabled = 1;
}
-
- complete(&i2c_dev->msg_complete);
goto err;
}
@@ -411,7 +409,6 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
i2c_dev->msg_err |= I2C_ERR_NO_ACK;
if (status & I2C_INT_ARBITRATION_LOST)
i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST;
- complete(&i2c_dev->msg_complete);
goto err;
}
@@ -429,14 +426,14 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
}
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ if (i2c_dev->is_dvc)
+ dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+
if (status & I2C_INT_PACKET_XFER_COMPLETE) {
BUG_ON(i2c_dev->msg_buf_remaining);
complete(&i2c_dev->msg_complete);
}
-
- i2c_writel(i2c_dev, status, I2C_INT_STATUS);
- if (i2c_dev->is_dvc)
- dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
return IRQ_HANDLED;
err:
/* An error occurred, mask all interrupts */
@@ -446,6 +443,8 @@ err:
i2c_writel(i2c_dev, status, I2C_INT_STATUS);
if (i2c_dev->is_dvc)
dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+
+ complete(&i2c_dev->msg_complete);
return IRQ_HANDLED;
}
@@ -476,12 +475,15 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
packet_header = msg->len - 1;
i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
- packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
- packet_header |= I2C_HEADER_IE_ENABLE;
+ packet_header = I2C_HEADER_IE_ENABLE;
if (!stop)
packet_header |= I2C_HEADER_REPEAT_START;
- if (msg->flags & I2C_M_TEN)
+ if (msg->flags & I2C_M_TEN) {
+ packet_header |= msg->addr;
packet_header |= I2C_HEADER_10BIT_ADDR;
+ } else {
+ packet_header |= msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
+ }
if (msg->flags & I2C_M_IGNORE_NAK)
packet_header |= I2C_HEADER_CONT_ON_NAK;
if (msg->flags & I2C_M_RD)
@@ -557,7 +559,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
static u32 tegra_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR;
}
static const struct i2c_algorithm tegra_i2c_algo = {
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index f585aead50cc..eec20db6246f 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -104,13 +104,8 @@ static int i2c_versatile_probe(struct platform_device *dev)
i2c->algo = i2c_versatile_algo;
i2c->algo.data = i2c;
- if (dev->id >= 0) {
- /* static bus numbering */
- i2c->adap.nr = dev->id;
- ret = i2c_bit_add_numbered_bus(&i2c->adap);
- } else
- /* dynamic bus numbering */
- ret = i2c_bit_add_bus(&i2c->adap);
+ i2c->adap.nr = dev->id;
+ ret = i2c_bit_add_numbered_bus(&i2c->adap);
if (ret >= 0) {
platform_set_drvdata(dev, i2c);
of_i2c_register_devices(&i2c->adap);
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 2bded7647ef2..641d0e5e3303 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -40,6 +40,7 @@
#include <linux/i2c-xiic.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of_i2c.h>
#define DRIVER_NAME "xiic-i2c"
@@ -705,8 +706,6 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
goto resource_missing;
pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
- if (!pdata)
- return -EINVAL;
i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
if (!i2c)
@@ -730,6 +729,7 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
i2c->adap = xiic_adapter;
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.dev.parent = &pdev->dev;
+ i2c->adap.dev.of_node = pdev->dev.of_node;
xiic_reinit(i2c);
@@ -748,9 +748,13 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
goto add_adapter_failed;
}
- /* add in known devices to the bus */
- for (i = 0; i < pdata->num_devices; i++)
- i2c_new_device(&i2c->adap, pdata->devices + i);
+ if (pdata) {
+ /* add in known devices to the bus */
+ for (i = 0; i < pdata->num_devices; i++)
+ i2c_new_device(&i2c->adap, pdata->devices + i);
+ }
+
+ of_i2c_register_devices(&i2c->adap);
return 0;
@@ -795,12 +799,21 @@ static int __devexit xiic_i2c_remove(struct platform_device* pdev)
return 0;
}
+#if defined(CONFIG_OF)
+static const struct of_device_id xiic_of_match[] __devinitconst = {
+ { .compatible = "xlnx,xps-iic-2.00.a", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xiic_of_match);
+#endif
+
static struct platform_driver xiic_i2c_driver = {
.probe = xiic_i2c_probe,
.remove = __devexit_p(xiic_i2c_remove),
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(xiic_of_match),
},
};
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index feb7dc359186..a6ad32bc0a96 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -772,6 +772,23 @@ struct device_type i2c_adapter_type = {
};
EXPORT_SYMBOL_GPL(i2c_adapter_type);
+/**
+ * i2c_verify_adapter - return parameter as i2c_adapter or NULL
+ * @dev: device, probably from some driver model iterator
+ *
+ * When traversing the driver model tree, perhaps using driver model
+ * iterators like @device_for_each_child(), you can't assume very much
+ * about the nodes you find. Use this function to avoid oopses caused
+ * by wrongly treating some non-I2C device as an i2c_adapter.
+ */
+struct i2c_adapter *i2c_verify_adapter(struct device *dev)
+{
+ return (dev->type == &i2c_adapter_type)
+ ? to_i2c_adapter(dev)
+ : NULL;
+}
+EXPORT_SYMBOL(i2c_verify_adapter);
+
#ifdef CONFIG_I2C_COMPAT
static struct class_compat *i2c_adapter_compat_class;
#endif
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 45048323b75e..5ec2261574ec 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -265,19 +265,41 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
res = 0;
for (i = 0; i < rdwr_arg.nmsgs; i++) {
- /* Limit the size of the message to a sane amount;
- * and don't let length change either. */
- if ((rdwr_pa[i].len > 8192) ||
- (rdwr_pa[i].flags & I2C_M_RECV_LEN)) {
+ /* Limit the size of the message to a sane amount */
+ if (rdwr_pa[i].len > 8192) {
res = -EINVAL;
break;
}
+
data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
if (IS_ERR(rdwr_pa[i].buf)) {
res = PTR_ERR(rdwr_pa[i].buf);
break;
}
+
+ /*
+ * If the message length is received from the slave (similar
+ * to SMBus block read), we must ensure that the buffer will
+ * be large enough to cope with a message length of
+ * I2C_SMBUS_BLOCK_MAX as this is the maximum underlying bus
+ * drivers allow. The first byte in the buffer must be
+ * pre-filled with the number of extra bytes, which must be
+ * at least one to hold the message length, but can be
+ * greater (for example to account for a checksum byte at
+ * the end of the message.)
+ */
+ if (rdwr_pa[i].flags & I2C_M_RECV_LEN) {
+ if (!(rdwr_pa[i].flags & I2C_M_RD) ||
+ rdwr_pa[i].buf[0] < 1 ||
+ rdwr_pa[i].len < rdwr_pa[i].buf[0] +
+ I2C_SMBUS_BLOCK_MAX) {
+ res = -EINVAL;
+ break;
+ }
+
+ rdwr_pa[i].len = rdwr_pa[i].buf[0];
+ }
}
if (res < 0) {
int j;
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index d7a4833be416..1038c381aea5 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -24,6 +24,8 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
+#include <linux/of.h>
+#include <linux/of_i2c.h>
/* multiplexer per channel data */
struct i2c_mux_priv {
@@ -31,11 +33,11 @@ struct i2c_mux_priv {
struct i2c_algorithm algo;
struct i2c_adapter *parent;
- void *mux_dev; /* the mux chip/device */
+ void *mux_priv; /* the mux chip/device */
u32 chan_id; /* the channel id */
- int (*select)(struct i2c_adapter *, void *mux_dev, u32 chan_id);
- int (*deselect)(struct i2c_adapter *, void *mux_dev, u32 chan_id);
+ int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
+ int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
};
static int i2c_mux_master_xfer(struct i2c_adapter *adap,
@@ -47,11 +49,11 @@ static int i2c_mux_master_xfer(struct i2c_adapter *adap,
/* Switch to the right mux port and perform the transfer. */
- ret = priv->select(parent, priv->mux_dev, priv->chan_id);
+ ret = priv->select(parent, priv->mux_priv, priv->chan_id);
if (ret >= 0)
ret = parent->algo->master_xfer(parent, msgs, num);
if (priv->deselect)
- priv->deselect(parent, priv->mux_dev, priv->chan_id);
+ priv->deselect(parent, priv->mux_priv, priv->chan_id);
return ret;
}
@@ -67,12 +69,12 @@ static int i2c_mux_smbus_xfer(struct i2c_adapter *adap,
/* Select the right mux port and perform the transfer. */
- ret = priv->select(parent, priv->mux_dev, priv->chan_id);
+ ret = priv->select(parent, priv->mux_priv, priv->chan_id);
if (ret >= 0)
ret = parent->algo->smbus_xfer(parent, addr, flags,
read_write, command, size, data);
if (priv->deselect)
- priv->deselect(parent, priv->mux_dev, priv->chan_id);
+ priv->deselect(parent, priv->mux_priv, priv->chan_id);
return ret;
}
@@ -87,7 +89,8 @@ static u32 i2c_mux_functionality(struct i2c_adapter *adap)
}
struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
- void *mux_dev, u32 force_nr, u32 chan_id,
+ struct device *mux_dev,
+ void *mux_priv, u32 force_nr, u32 chan_id,
int (*select) (struct i2c_adapter *,
void *, u32),
int (*deselect) (struct i2c_adapter *,
@@ -102,7 +105,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
/* Set up private adapter data */
priv->parent = parent;
- priv->mux_dev = mux_dev;
+ priv->mux_priv = mux_priv;
priv->chan_id = chan_id;
priv->select = select;
priv->deselect = deselect;
@@ -124,6 +127,25 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
priv->adap.algo_data = priv;
priv->adap.dev.parent = &parent->dev;
+ /*
+ * Try to populate the mux adapter's of_node, expands to
+ * nothing if !CONFIG_OF.
+ */
+ if (mux_dev->of_node) {
+ struct device_node *child;
+ u32 reg;
+
+ for_each_child_of_node(mux_dev->of_node, child) {
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret)
+ continue;
+ if (chan_id == reg) {
+ priv->adap.dev.of_node = child;
+ break;
+ }
+ }
+ }
+
if (force_nr) {
priv->adap.nr = force_nr;
ret = i2c_add_numbered_adapter(&priv->adap);
@@ -141,6 +163,8 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
dev_info(&parent->dev, "Added multiplexed i2c bus %d\n",
i2c_adapter_id(&priv->adap));
+ of_i2c_register_devices(&priv->adap);
+
return &priv->adap;
}
EXPORT_SYMBOL_GPL(i2c_add_mux_adapter);
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 90b7a0163899..beb2491db274 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -15,7 +15,7 @@ config I2C_MUX_GPIO
through GPIO pins.
This driver can also be built as a module. If so, the module
- will be called gpio-i2cmux.
+ will be called i2c-mux-gpio.
config I2C_MUX_PCA9541
tristate "NXP PCA9541 I2C Master Selector"
@@ -25,7 +25,7 @@ config I2C_MUX_PCA9541
I2C Master Selector.
This driver can also be built as a module. If so, the module
- will be called pca9541.
+ will be called i2c-mux-pca9541.
config I2C_MUX_PCA954x
tristate "Philips PCA954x I2C Mux/switches"
@@ -35,6 +35,6 @@ config I2C_MUX_PCA954x
I2C mux/switch devices.
This driver can also be built as a module. If so, the module
- will be called pca954x.
+ will be called i2c-mux-pca954x.
endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 4640436ea61f..5826249b29ca 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -1,8 +1,8 @@
#
# Makefile for multiplexer I2C chip drivers.
-obj-$(CONFIG_I2C_MUX_GPIO) += gpio-i2cmux.o
-obj-$(CONFIG_I2C_MUX_PCA9541) += pca9541.o
-obj-$(CONFIG_I2C_MUX_PCA954x) += pca954x.o
+obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
+obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
+obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o
ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/muxes/gpio-i2cmux.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index e5fa695eb0fa..68b1f8ec3436 100644
--- a/drivers/i2c/muxes/gpio-i2cmux.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -10,7 +10,7 @@
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
-#include <linux/gpio-i2cmux.h>
+#include <linux/i2c-mux-gpio.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -20,10 +20,10 @@
struct gpiomux {
struct i2c_adapter *parent;
struct i2c_adapter **adap; /* child busses */
- struct gpio_i2cmux_platform_data data;
+ struct i2c_mux_gpio_platform_data data;
};
-static void gpiomux_set(const struct gpiomux *mux, unsigned val)
+static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
{
int i;
@@ -31,28 +31,28 @@ static void gpiomux_set(const struct gpiomux *mux, unsigned val)
gpio_set_value(mux->data.gpios[i], val & (1 << i));
}
-static int gpiomux_select(struct i2c_adapter *adap, void *data, u32 chan)
+static int i2c_mux_gpio_select(struct i2c_adapter *adap, void *data, u32 chan)
{
struct gpiomux *mux = data;
- gpiomux_set(mux, mux->data.values[chan]);
+ i2c_mux_gpio_set(mux, mux->data.values[chan]);
return 0;
}
-static int gpiomux_deselect(struct i2c_adapter *adap, void *data, u32 chan)
+static int i2c_mux_gpio_deselect(struct i2c_adapter *adap, void *data, u32 chan)
{
struct gpiomux *mux = data;
- gpiomux_set(mux, mux->data.idle);
+ i2c_mux_gpio_set(mux, mux->data.idle);
return 0;
}
-static int __devinit gpiomux_probe(struct platform_device *pdev)
+static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
{
struct gpiomux *mux;
- struct gpio_i2cmux_platform_data *pdata;
+ struct i2c_mux_gpio_platform_data *pdata;
struct i2c_adapter *parent;
int (*deselect) (struct i2c_adapter *, void *, u32);
unsigned initial_state;
@@ -86,16 +86,16 @@ static int __devinit gpiomux_probe(struct platform_device *pdev)
goto alloc_failed2;
}
- if (pdata->idle != GPIO_I2CMUX_NO_IDLE) {
+ if (pdata->idle != I2C_MUX_GPIO_NO_IDLE) {
initial_state = pdata->idle;
- deselect = gpiomux_deselect;
+ deselect = i2c_mux_gpio_deselect;
} else {
initial_state = pdata->values[0];
deselect = NULL;
}
for (i = 0; i < pdata->n_gpios; i++) {
- ret = gpio_request(pdata->gpios[i], "gpio-i2cmux");
+ ret = gpio_request(pdata->gpios[i], "i2c-mux-gpio");
if (ret)
goto err_request_gpio;
gpio_direction_output(pdata->gpios[i],
@@ -105,8 +105,8 @@ static int __devinit gpiomux_probe(struct platform_device *pdev)
for (i = 0; i < pdata->n_values; i++) {
u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0;
- mux->adap[i] = i2c_add_mux_adapter(parent, mux, nr, i,
- gpiomux_select, deselect);
+ mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr, i,
+ i2c_mux_gpio_select, deselect);
if (!mux->adap[i]) {
ret = -ENODEV;
dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
@@ -137,7 +137,7 @@ alloc_failed:
return ret;
}
-static int __devexit gpiomux_remove(struct platform_device *pdev)
+static int __devexit i2c_mux_gpio_remove(struct platform_device *pdev)
{
struct gpiomux *mux = platform_get_drvdata(pdev);
int i;
@@ -156,18 +156,18 @@ static int __devexit gpiomux_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_driver gpiomux_driver = {
- .probe = gpiomux_probe,
- .remove = __devexit_p(gpiomux_remove),
+static struct platform_driver i2c_mux_gpio_driver = {
+ .probe = i2c_mux_gpio_probe,
+ .remove = __devexit_p(i2c_mux_gpio_remove),
.driver = {
.owner = THIS_MODULE,
- .name = "gpio-i2cmux",
+ .name = "i2c-mux-gpio",
},
};
-module_platform_driver(gpiomux_driver);
+module_platform_driver(i2c_mux_gpio_driver);
MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver");
MODULE_AUTHOR("Peter Korsgaard <peter.korsgaard@barco.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:gpio-i2cmux");
+MODULE_ALIAS("platform:i2c-mux-gpio");
diff --git a/drivers/i2c/muxes/pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index e0df9b6c66b3..8aacde1516ac 100644
--- a/drivers/i2c/muxes/pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -353,7 +353,8 @@ static int pca9541_probe(struct i2c_client *client,
force = 0;
if (pdata)
force = pdata->modes[0].adap_id;
- data->mux_adap = i2c_add_mux_adapter(adap, client, force, 0,
+ data->mux_adap = i2c_add_mux_adapter(adap, &client->dev, client,
+ force, 0,
pca9541_select_chan,
pca9541_release_chan);
diff --git a/drivers/i2c/muxes/pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 0e37ef27aa12..f2dfe0d8fcce 100644
--- a/drivers/i2c/muxes/pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -226,7 +226,7 @@ static int pca954x_probe(struct i2c_client *client,
}
data->virt_adaps[num] =
- i2c_add_mux_adapter(adap, client,
+ i2c_add_mux_adapter(adap, &client->dev, client,
force, num, pca954x_select_chan,
(pdata && pdata->modes[num].deselect_on_exit)
? pca954x_deselect_mux : NULL);
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 332597980817..55f7e57d4e42 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -25,10 +25,6 @@ config INPUT
if INPUT
-config INPUT_OF_MATRIX_KEYMAP
- depends on USE_OF
- bool
-
config INPUT_FF_MEMLESS
tristate "Support for memoryless force-feedback devices"
help
@@ -68,6 +64,19 @@ config INPUT_SPARSEKMAP
To compile this driver as a module, choose M here: the
module will be called sparse-keymap.
+config INPUT_MATRIXKMAP
+ tristate "Matrix keymap support library"
+ help
+ Say Y here if you are using a driver for an input
+ device that uses matrix keymap. This option is only
+ useful for out-of-tree drivers since in-tree drivers
+ select it automatically.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called matrix-keymap.
+
comment "Userland interfaces"
config INPUT_MOUSEDEV
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index b173a13a73ca..5ca3f631497f 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -10,6 +10,7 @@ input-core-y := input.o input-compat.o input-mt.o ff-core.o
obj-$(CONFIG_INPUT_FF_MEMLESS) += ff-memless.o
obj-$(CONFIG_INPUT_POLLDEV) += input-polldev.o
obj-$(CONFIG_INPUT_SPARSEKMAP) += sparse-keymap.o
+obj-$(CONFIG_INPUT_MATRIXKMAP) += matrix-keymap.o
obj-$(CONFIG_INPUT_MOUSEDEV) += mousedev.o
obj-$(CONFIG_INPUT_JOYDEV) += joydev.o
@@ -24,4 +25,3 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/
obj-$(CONFIG_INPUT_MISC) += misc/
obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o
-obj-$(CONFIG_INPUT_OF_MATRIX_KEYMAP) += of_keymap.o
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 4b2e10d5d641..6c58bfff01a3 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -180,7 +180,10 @@ static int evdev_grab(struct evdev *evdev, struct evdev_client *client)
static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client)
{
- if (evdev->grab != client)
+ struct evdev_client *grab = rcu_dereference_protected(evdev->grab,
+ lockdep_is_held(&evdev->mutex));
+
+ if (grab != client)
return -EINVAL;
rcu_assign_pointer(evdev->grab, NULL);
@@ -259,8 +262,7 @@ static int evdev_release(struct inode *inode, struct file *file)
struct evdev *evdev = client->evdev;
mutex_lock(&evdev->mutex);
- if (evdev->grab == client)
- evdev_ungrab(evdev, client);
+ evdev_ungrab(evdev, client);
mutex_unlock(&evdev->mutex);
evdev_detach_client(evdev, client);
@@ -343,7 +345,7 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
struct input_event event;
int retval = 0;
- if (count < input_event_size())
+ if (count != 0 && count < input_event_size())
return -EINVAL;
retval = mutex_lock_interruptible(&evdev->mutex);
@@ -355,7 +357,8 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
goto out;
}
- do {
+ while (retval + input_event_size() <= count) {
+
if (input_event_from_user(buffer + retval, &event)) {
retval = -EFAULT;
goto out;
@@ -364,7 +367,7 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
input_inject_event(&evdev->handle,
event.type, event.code, event.value);
- } while (retval + input_event_size() <= count);
+ }
out:
mutex_unlock(&evdev->mutex);
@@ -395,35 +398,49 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
struct input_event event;
- int retval = 0;
+ size_t read = 0;
+ int error;
- if (count < input_event_size())
+ if (count != 0 && count < input_event_size())
return -EINVAL;
- if (!(file->f_flags & O_NONBLOCK)) {
- retval = wait_event_interruptible(evdev->wait,
- client->packet_head != client->tail ||
- !evdev->exist);
- if (retval)
- return retval;
- }
+ for (;;) {
+ if (!evdev->exist)
+ return -ENODEV;
- if (!evdev->exist)
- return -ENODEV;
+ if (client->packet_head == client->tail &&
+ (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
- while (retval + input_event_size() <= count &&
- evdev_fetch_next_event(client, &event)) {
+ /*
+ * count == 0 is special - no IO is done but we check
+ * for error conditions (see above).
+ */
+ if (count == 0)
+ break;
- if (input_event_to_user(buffer + retval, &event))
- return -EFAULT;
+ while (read + input_event_size() <= count &&
+ evdev_fetch_next_event(client, &event)) {
- retval += input_event_size();
- }
+ if (input_event_to_user(buffer + read, &event))
+ return -EFAULT;
- if (retval == 0 && (file->f_flags & O_NONBLOCK))
- return -EAGAIN;
+ read += input_event_size();
+ }
- return retval;
+ if (read)
+ break;
+
+ if (!(file->f_flags & O_NONBLOCK)) {
+ error = wait_event_interruptible(evdev->wait,
+ client->packet_head != client->tail ||
+ !evdev->exist);
+ if (error)
+ return error;
+ }
+ }
+
+ return read;
}
/* No kernel lock - fine */
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index 117a59aaa70e..5f558851d646 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -31,8 +31,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
-
-#include "fixp-arith.h"
+#include <linux/fixp-arith.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anssi Hannula <anssi.hannula@gmail.com>");
diff --git a/drivers/input/gameport/emu10k1-gp.c b/drivers/input/gameport/emu10k1-gp.c
index 422aa0a6b77f..daceafe7ee7d 100644
--- a/drivers/input/gameport/emu10k1-gp.c
+++ b/drivers/input/gameport/emu10k1-gp.c
@@ -125,15 +125,4 @@ static struct pci_driver emu_driver = {
.remove = __devexit_p(emu_remove),
};
-static int __init emu_init(void)
-{
- return pci_register_driver(&emu_driver);
-}
-
-static void __exit emu_exit(void)
-{
- pci_unregister_driver(&emu_driver);
-}
-
-module_init(emu_init);
-module_exit(emu_exit);
+module_pci_driver(emu_driver);
diff --git a/drivers/input/gameport/fm801-gp.c b/drivers/input/gameport/fm801-gp.c
index a3b70ff21018..48ad3829ff20 100644
--- a/drivers/input/gameport/fm801-gp.c
+++ b/drivers/input/gameport/fm801-gp.c
@@ -144,6 +144,7 @@ static const struct pci_device_id fm801_gp_id_table[] = {
{ PCI_VENDOR_ID_FORTEMEDIA, PCI_DEVICE_ID_FM801_GP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0 }
};
+MODULE_DEVICE_TABLE(pci, fm801_gp_id_table);
static struct pci_driver fm801_gp_driver = {
.name = "FM801_gameport",
@@ -152,20 +153,7 @@ static struct pci_driver fm801_gp_driver = {
.remove = __devexit_p(fm801_gp_remove),
};
-static int __init fm801_gp_init(void)
-{
- return pci_register_driver(&fm801_gp_driver);
-}
-
-static void __exit fm801_gp_exit(void)
-{
- pci_unregister_driver(&fm801_gp_driver);
-}
-
-module_init(fm801_gp_init);
-module_exit(fm801_gp_exit);
-
-MODULE_DEVICE_TABLE(pci, fm801_gp_id_table);
+module_pci_driver(fm801_gp_driver);
MODULE_DESCRIPTION("FM801 gameport driver");
MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
diff --git a/drivers/input/joystick/a3d.c b/drivers/input/joystick/a3d.c
index 1639ab2b94b7..85bc8dc07cfc 100644
--- a/drivers/input/joystick/a3d.c
+++ b/drivers/input/joystick/a3d.c
@@ -413,15 +413,4 @@ static struct gameport_driver a3d_drv = {
.disconnect = a3d_disconnect,
};
-static int __init a3d_init(void)
-{
- return gameport_register_driver(&a3d_drv);
-}
-
-static void __exit a3d_exit(void)
-{
- gameport_unregister_driver(&a3d_drv);
-}
-
-module_init(a3d_init);
-module_exit(a3d_exit);
+module_gameport_driver(a3d_drv);
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index b992fbf91f2f..0cbfd2dfabf4 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -557,10 +557,6 @@ static void adi_disconnect(struct gameport *gameport)
kfree(port);
}
-/*
- * The gameport device structure.
- */
-
static struct gameport_driver adi_drv = {
.driver = {
.name = "adi",
@@ -570,15 +566,4 @@ static struct gameport_driver adi_drv = {
.disconnect = adi_disconnect,
};
-static int __init adi_init(void)
-{
- return gameport_register_driver(&adi_drv);
-}
-
-static void __exit adi_exit(void)
-{
- gameport_unregister_driver(&adi_drv);
-}
-
-module_init(adi_init);
-module_exit(adi_exit);
+module_gameport_driver(adi_drv);
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
index 3063464474bf..57d19d4e0a2d 100644
--- a/drivers/input/joystick/as5011.c
+++ b/drivers/input/joystick/as5011.c
@@ -231,6 +231,7 @@ static int __devinit as5011_probe(struct i2c_client *client,
}
if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_NOSTART |
I2C_FUNC_PROTOCOL_MANGLING)) {
dev_err(&client->dev,
"need i2c bus that supports protocol mangling\n");
diff --git a/drivers/input/joystick/cobra.c b/drivers/input/joystick/cobra.c
index 3497b87c3d05..65367e44d715 100644
--- a/drivers/input/joystick/cobra.c
+++ b/drivers/input/joystick/cobra.c
@@ -261,15 +261,4 @@ static struct gameport_driver cobra_drv = {
.disconnect = cobra_disconnect,
};
-static int __init cobra_init(void)
-{
- return gameport_register_driver(&cobra_drv);
-}
-
-static void __exit cobra_exit(void)
-{
- gameport_unregister_driver(&cobra_drv);
-}
-
-module_init(cobra_init);
-module_exit(cobra_exit);
+module_gameport_driver(cobra_drv);
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index 0536b1b2f018..ab1cf2882004 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -373,15 +373,4 @@ static struct gameport_driver gf2k_drv = {
.disconnect = gf2k_disconnect,
};
-static int __init gf2k_init(void)
-{
- return gameport_register_driver(&gf2k_drv);
-}
-
-static void __exit gf2k_exit(void)
-{
- gameport_unregister_driver(&gf2k_drv);
-}
-
-module_init(gf2k_init);
-module_exit(gf2k_exit);
+module_gameport_driver(gf2k_drv);
diff --git a/drivers/input/joystick/grip.c b/drivers/input/joystick/grip.c
index fc55899ba6c5..9e1beff57c33 100644
--- a/drivers/input/joystick/grip.c
+++ b/drivers/input/joystick/grip.c
@@ -424,15 +424,4 @@ static struct gameport_driver grip_drv = {
.disconnect = grip_disconnect,
};
-static int __init grip_init(void)
-{
- return gameport_register_driver(&grip_drv);
-}
-
-static void __exit grip_exit(void)
-{
- gameport_unregister_driver(&grip_drv);
-}
-
-module_init(grip_init);
-module_exit(grip_exit);
+module_gameport_driver(grip_drv);
diff --git a/drivers/input/joystick/grip_mp.c b/drivers/input/joystick/grip_mp.c
index 2d47baf47769..c0f9c7b7eb4e 100644
--- a/drivers/input/joystick/grip_mp.c
+++ b/drivers/input/joystick/grip_mp.c
@@ -687,15 +687,4 @@ static struct gameport_driver grip_drv = {
.disconnect = grip_disconnect,
};
-static int __init grip_init(void)
-{
- return gameport_register_driver(&grip_drv);
-}
-
-static void __exit grip_exit(void)
-{
- gameport_unregister_driver(&grip_drv);
-}
-
-module_init(grip_init);
-module_exit(grip_exit);
+module_gameport_driver(grip_drv);
diff --git a/drivers/input/joystick/guillemot.c b/drivers/input/joystick/guillemot.c
index 4058d4b272fe..55196f730af6 100644
--- a/drivers/input/joystick/guillemot.c
+++ b/drivers/input/joystick/guillemot.c
@@ -281,15 +281,4 @@ static struct gameport_driver guillemot_drv = {
.disconnect = guillemot_disconnect,
};
-static int __init guillemot_init(void)
-{
- return gameport_register_driver(&guillemot_drv);
-}
-
-static void __exit guillemot_exit(void)
-{
- gameport_unregister_driver(&guillemot_drv);
-}
-
-module_init(guillemot_init);
-module_exit(guillemot_exit);
+module_gameport_driver(guillemot_drv);
diff --git a/drivers/input/joystick/interact.c b/drivers/input/joystick/interact.c
index 16fb19d1ca25..88c22623a2e8 100644
--- a/drivers/input/joystick/interact.c
+++ b/drivers/input/joystick/interact.c
@@ -311,15 +311,4 @@ static struct gameport_driver interact_drv = {
.disconnect = interact_disconnect,
};
-static int __init interact_init(void)
-{
- return gameport_register_driver(&interact_drv);
-}
-
-static void __exit interact_exit(void)
-{
- gameport_unregister_driver(&interact_drv);
-}
-
-module_init(interact_init);
-module_exit(interact_exit);
+module_gameport_driver(interact_drv);
diff --git a/drivers/input/joystick/joydump.c b/drivers/input/joystick/joydump.c
index cd894a0564a2..7eb878bab968 100644
--- a/drivers/input/joystick/joydump.c
+++ b/drivers/input/joystick/joydump.c
@@ -159,15 +159,4 @@ static struct gameport_driver joydump_drv = {
.disconnect = joydump_disconnect,
};
-static int __init joydump_init(void)
-{
- return gameport_register_driver(&joydump_drv);
-}
-
-static void __exit joydump_exit(void)
-{
- gameport_unregister_driver(&joydump_drv);
-}
-
-module_init(joydump_init);
-module_exit(joydump_exit);
+module_gameport_driver(joydump_drv);
diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c
index 40e40780747d..9fb153eef2fc 100644
--- a/drivers/input/joystick/magellan.c
+++ b/drivers/input/joystick/magellan.c
@@ -222,19 +222,4 @@ static struct serio_driver magellan_drv = {
.disconnect = magellan_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init magellan_init(void)
-{
- return serio_register_driver(&magellan_drv);
-}
-
-static void __exit magellan_exit(void)
-{
- serio_unregister_driver(&magellan_drv);
-}
-
-module_init(magellan_init);
-module_exit(magellan_exit);
+module_serio_driver(magellan_drv);
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index b8d86115644b..04c69af37148 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -820,15 +820,4 @@ static struct gameport_driver sw_drv = {
.disconnect = sw_disconnect,
};
-static int __init sw_init(void)
-{
- return gameport_register_driver(&sw_drv);
-}
-
-static void __exit sw_exit(void)
-{
- gameport_unregister_driver(&sw_drv);
-}
-
-module_init(sw_init);
-module_exit(sw_exit);
+module_gameport_driver(sw_drv);
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index 0cd9b29356a8..80a7b27a457a 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -296,19 +296,4 @@ static struct serio_driver spaceball_drv = {
.disconnect = spaceball_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init spaceball_init(void)
-{
- return serio_register_driver(&spaceball_drv);
-}
-
-static void __exit spaceball_exit(void)
-{
- serio_unregister_driver(&spaceball_drv);
-}
-
-module_init(spaceball_init);
-module_exit(spaceball_exit);
+module_serio_driver(spaceball_drv);
diff --git a/drivers/input/joystick/spaceorb.c b/drivers/input/joystick/spaceorb.c
index a694bf8e557b..a41f291652e6 100644
--- a/drivers/input/joystick/spaceorb.c
+++ b/drivers/input/joystick/spaceorb.c
@@ -237,19 +237,4 @@ static struct serio_driver spaceorb_drv = {
.disconnect = spaceorb_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init spaceorb_init(void)
-{
- return serio_register_driver(&spaceorb_drv);
-}
-
-static void __exit spaceorb_exit(void)
-{
- serio_unregister_driver(&spaceorb_drv);
-}
-
-module_init(spaceorb_init);
-module_exit(spaceorb_exit);
+module_serio_driver(spaceorb_drv);
diff --git a/drivers/input/joystick/stinger.c b/drivers/input/joystick/stinger.c
index e0db9f5e4b41..0f51a60e14a7 100644
--- a/drivers/input/joystick/stinger.c
+++ b/drivers/input/joystick/stinger.c
@@ -208,19 +208,4 @@ static struct serio_driver stinger_drv = {
.disconnect = stinger_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init stinger_init(void)
-{
- return serio_register_driver(&stinger_drv);
-}
-
-static void __exit stinger_exit(void)
-{
- serio_unregister_driver(&stinger_drv);
-}
-
-module_init(stinger_init);
-module_exit(stinger_exit);
+module_serio_driver(stinger_drv);
diff --git a/drivers/input/joystick/tmdc.c b/drivers/input/joystick/tmdc.c
index d6c609807115..5ef9bcdb0345 100644
--- a/drivers/input/joystick/tmdc.c
+++ b/drivers/input/joystick/tmdc.c
@@ -436,15 +436,4 @@ static struct gameport_driver tmdc_drv = {
.disconnect = tmdc_disconnect,
};
-static int __init tmdc_init(void)
-{
- return gameport_register_driver(&tmdc_drv);
-}
-
-static void __exit tmdc_exit(void)
-{
- gameport_unregister_driver(&tmdc_drv);
-}
-
-module_init(tmdc_init);
-module_exit(tmdc_exit);
+module_gameport_driver(tmdc_drv);
diff --git a/drivers/input/joystick/twidjoy.c b/drivers/input/joystick/twidjoy.c
index 3f4ec73c9553..2556a8193579 100644
--- a/drivers/input/joystick/twidjoy.c
+++ b/drivers/input/joystick/twidjoy.c
@@ -257,19 +257,4 @@ static struct serio_driver twidjoy_drv = {
.disconnect = twidjoy_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init twidjoy_init(void)
-{
- return serio_register_driver(&twidjoy_drv);
-}
-
-static void __exit twidjoy_exit(void)
-{
- serio_unregister_driver(&twidjoy_drv);
-}
-
-module_init(twidjoy_init);
-module_exit(twidjoy_exit);
+module_serio_driver(twidjoy_drv);
diff --git a/drivers/input/joystick/warrior.c b/drivers/input/joystick/warrior.c
index f72c83e15e60..23b3071abb6e 100644
--- a/drivers/input/joystick/warrior.c
+++ b/drivers/input/joystick/warrior.c
@@ -217,19 +217,4 @@ static struct serio_driver warrior_drv = {
.disconnect = warrior_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init warrior_init(void)
-{
- return serio_register_driver(&warrior_drv);
-}
-
-static void __exit warrior_exit(void)
-{
- serio_unregister_driver(&warrior_drv);
-}
-
-module_init(warrior_init);
-module_exit(warrior_exit);
+module_serio_driver(warrior_drv);
diff --git a/drivers/input/joystick/zhenhua.c b/drivers/input/joystick/zhenhua.c
index b5853125c898..c4de4388fd7f 100644
--- a/drivers/input/joystick/zhenhua.c
+++ b/drivers/input/joystick/zhenhua.c
@@ -225,19 +225,4 @@ static struct serio_driver zhenhua_drv = {
.disconnect = zhenhua_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init zhenhua_init(void)
-{
- return serio_register_driver(&zhenhua_drv);
-}
-
-static void __exit zhenhua_exit(void)
-{
- serio_unregister_driver(&zhenhua_drv);
-}
-
-module_init(zhenhua_init);
-module_exit(zhenhua_exit);
+module_serio_driver(zhenhua_drv);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index f354813a13e8..c0e11ecc646f 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -166,6 +166,7 @@ config KEYBOARD_LKKBD
config KEYBOARD_EP93XX
tristate "EP93xx Matrix Keypad support"
depends on ARCH_EP93XX
+ select INPUT_MATRIXKMAP
help
Say Y here to enable the matrix keypad on the Cirrus EP93XX.
@@ -224,6 +225,7 @@ config KEYBOARD_TCA6416
config KEYBOARD_TCA8418
tristate "TCA8418 Keypad Support"
depends on I2C
+ select INPUT_MATRIXKMAP
help
This driver implements basic keypad functionality
for keys connected through TCA8418 keypad decoder.
@@ -240,6 +242,7 @@ config KEYBOARD_TCA8418
config KEYBOARD_MATRIX
tristate "GPIO driven matrix keypad support"
depends on GENERIC_GPIO
+ select INPUT_MATRIXKMAP
help
Enable support for GPIO driven matrix keypad.
@@ -309,6 +312,17 @@ config KEYBOARD_LM8323
To compile this driver as a module, choose M here: the
module will be called lm8323.
+config KEYBOARD_LM8333
+ tristate "LM8333 keypad chip"
+ depends on I2C
+ select INPUT_MATRIXKMAP
+ help
+ If you say yes here you get support for the National Semiconductor
+ LM8333 keypad controller.
+
+ To compile this driver as a module, choose M here: the
+ module will be called lm8333.
+
config KEYBOARD_LOCOMO
tristate "LoCoMo Keyboard Support"
depends on SHARP_LOCOMO
@@ -366,6 +380,7 @@ config KEYBOARD_MPR121
config KEYBOARD_IMX
tristate "IMX keypad support"
depends on ARCH_MXC
+ select INPUT_MATRIXKMAP
help
Enable support for IMX keypad port.
@@ -384,6 +399,7 @@ config KEYBOARD_NEWTON
config KEYBOARD_NOMADIK
tristate "ST-Ericsson Nomadik SKE keyboard"
depends on PLAT_NOMADIK
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use a keypad provided on the SKE controller
used on the Ux500 and Nomadik platforms
@@ -394,7 +410,7 @@ config KEYBOARD_NOMADIK
config KEYBOARD_TEGRA
tristate "NVIDIA Tegra internal matrix keyboard controller support"
depends on ARCH_TEGRA
- select INPUT_OF_MATRIX_KEYMAP if USE_OF
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use a matrix keyboard connected directly
to the internal keyboard controller on Tegra SoCs.
@@ -432,6 +448,7 @@ config KEYBOARD_PXA930_ROTARY
config KEYBOARD_PMIC8XXX
tristate "Qualcomm PMIC8XXX keypad support"
depends on MFD_PM8XXX
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to enable the driver for the PMIC8XXX
keypad provided as a reference design from Qualcomm. This is intended
@@ -443,6 +460,7 @@ config KEYBOARD_PMIC8XXX
config KEYBOARD_SAMSUNG
tristate "Samsung keypad support"
depends on HAVE_CLK
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use the keypad on your Samsung mobile
device.
@@ -485,6 +503,7 @@ config KEYBOARD_SH_KEYSC
config KEYBOARD_STMPE
tristate "STMPE keypad support"
depends on MFD_STMPE
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use the keypad controller on STMPE I/O
expanders.
@@ -505,6 +524,7 @@ config KEYBOARD_DAVINCI
config KEYBOARD_OMAP
tristate "TI OMAP keypad support"
depends on (ARCH_OMAP1 || ARCH_OMAP2)
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use the OMAP keypad.
@@ -512,9 +532,10 @@ config KEYBOARD_OMAP
module will be called omap-keypad.
config KEYBOARD_OMAP4
- tristate "TI OMAP4 keypad support"
+ tristate "TI OMAP4+ keypad support"
+ select INPUT_MATRIXKMAP
help
- Say Y here if you want to use the OMAP4 keypad.
+ Say Y here if you want to use the OMAP4+ keypad.
To compile this driver as a module, choose M here: the
module will be called omap4-keypad.
@@ -522,6 +543,7 @@ config KEYBOARD_OMAP4
config KEYBOARD_SPEAR
tristate "ST SPEAR keyboard support"
depends on PLAT_SPEAR
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use the SPEAR keyboard.
@@ -531,6 +553,7 @@ config KEYBOARD_SPEAR
config KEYBOARD_TC3589X
tristate "TC3589X Keypad support"
depends on MFD_TC3589X
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use the keypad controller on
TC35892/3 I/O expander.
@@ -541,6 +564,7 @@ config KEYBOARD_TC3589X
config KEYBOARD_TNETV107X
tristate "TI TNETV107X keypad support"
depends on ARCH_DAVINCI_TNETV107X
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use the TNETV107X keypad.
@@ -550,6 +574,7 @@ config KEYBOARD_TNETV107X
config KEYBOARD_TWL4030
tristate "TI TWL4030/TWL5030/TPS659x0 keypad support"
depends on TWL4030_CORE
+ select INPUT_MATRIXKMAP
help
Say Y here if your board use the keypad controller on
TWL4030 family chips. It's safe to say enable this
@@ -573,6 +598,7 @@ config KEYBOARD_XTKBD
config KEYBOARD_W90P910
tristate "W90P910 Matrix Keypad support"
depends on ARCH_W90X900
+ select INPUT_MATRIXKMAP
help
Say Y here to enable the matrix keypad on evaluation board
based on W90P910.
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index df7061f12918..b03b02456a82 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o
obj-$(CONFIG_KEYBOARD_HP7XX) += jornada720_kbd.o
obj-$(CONFIG_KEYBOARD_LKKBD) += lkkbd.o
obj-$(CONFIG_KEYBOARD_LM8323) += lm8323.o
+obj-$(CONFIG_KEYBOARD_LM8333) += lm8333.o
obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o
obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o
obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 39ebffac207e..b083bf10f139 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -197,6 +197,7 @@ static int __devinit adp5588_gpio_add(struct adp5588_kpad *kpad)
kpad->gc.base = gpio_data->gpio_start;
kpad->gc.label = kpad->client->name;
kpad->gc.owner = THIS_MODULE;
+ kpad->gc.names = gpio_data->names;
mutex_init(&kpad->gpio_lock);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index e05a2e7073c6..add5ffd9fe26 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -433,7 +433,7 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
if (printk_ratelimit())
dev_warn(&serio->dev,
"Spurious %s on %s. "
- "Some program might be trying access hardware directly.\n",
+ "Some program might be trying to access hardware directly.\n",
data == ATKBD_RET_ACK ? "ACK" : "NAK", serio->phys);
goto out;
case ATKBD_RET_ERR:
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 0ba69f3fcb52..c46fc8185469 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -182,16 +182,10 @@ static void ep93xx_keypad_close(struct input_dev *pdev)
}
-#ifdef CONFIG_PM
-/*
- * NOTE: I don't know if this is correct, or will work on the ep93xx.
- *
- * None of the existing ep93xx drivers have power management support.
- * But, this is basically what the pxa27x_keypad driver does.
- */
-static int ep93xx_keypad_suspend(struct platform_device *pdev,
- pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int ep93xx_keypad_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct ep93xx_keypad *keypad = platform_get_drvdata(pdev);
struct input_dev *input_dev = keypad->input_dev;
@@ -210,8 +204,9 @@ static int ep93xx_keypad_suspend(struct platform_device *pdev,
return 0;
}
-static int ep93xx_keypad_resume(struct platform_device *pdev)
+static int ep93xx_keypad_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct ep93xx_keypad *keypad = platform_get_drvdata(pdev);
struct input_dev *input_dev = keypad->input_dev;
@@ -232,10 +227,10 @@ static int ep93xx_keypad_resume(struct platform_device *pdev)
return 0;
}
-#else /* !CONFIG_PM */
-#define ep93xx_keypad_suspend NULL
-#define ep93xx_keypad_resume NULL
-#endif /* !CONFIG_PM */
+#endif
+
+static SIMPLE_DEV_PM_OPS(ep93xx_keypad_pm_ops,
+ ep93xx_keypad_suspend, ep93xx_keypad_resume);
static int __devinit ep93xx_keypad_probe(struct platform_device *pdev)
{
@@ -308,19 +303,16 @@ static int __devinit ep93xx_keypad_probe(struct platform_device *pdev)
input_dev->open = ep93xx_keypad_open;
input_dev->close = ep93xx_keypad_close;
input_dev->dev.parent = &pdev->dev;
- input_dev->keycode = keypad->keycodes;
- input_dev->keycodesize = sizeof(keypad->keycodes[0]);
- input_dev->keycodemax = ARRAY_SIZE(keypad->keycodes);
- input_set_drvdata(input_dev, keypad);
+ err = matrix_keypad_build_keymap(keymap_data, NULL,
+ EP93XX_MATRIX_ROWS, EP93XX_MATRIX_COLS,
+ keypad->keycodes, input_dev);
+ if (err)
+ goto failed_free_dev;
- input_dev->evbit[0] = BIT_MASK(EV_KEY);
if (keypad->pdata->flags & EP93XX_KEYPAD_AUTOREPEAT)
- input_dev->evbit[0] |= BIT_MASK(EV_REP);
-
- matrix_keypad_build_keymap(keymap_data, 3,
- input_dev->keycode, input_dev->keybit);
- platform_set_drvdata(pdev, keypad);
+ __set_bit(EV_REP, input_dev->evbit);
+ input_set_drvdata(input_dev, keypad);
err = request_irq(keypad->irq, ep93xx_keypad_irq_handler,
0, pdev->name, keypad);
@@ -331,6 +323,7 @@ static int __devinit ep93xx_keypad_probe(struct platform_device *pdev)
if (err)
goto failed_free_irq;
+ platform_set_drvdata(pdev, keypad);
device_init_wakeup(&pdev->dev, 1);
return 0;
@@ -384,11 +377,10 @@ static struct platform_driver ep93xx_keypad_driver = {
.driver = {
.name = "ep93xx-keypad",
.owner = THIS_MODULE,
+ .pm = &ep93xx_keypad_pm_ops,
},
.probe = ep93xx_keypad_probe,
.remove = __devexit_p(ep93xx_keypad_remove),
- .suspend = ep93xx_keypad_suspend,
- .resume = ep93xx_keypad_resume,
};
module_platform_driver(ep93xx_keypad_driver);
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index fed31e0947a1..589e3c258f3f 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -583,15 +583,4 @@ static struct serio_driver hil_serio_drv = {
.interrupt = hil_dev_interrupt
};
-static int __init hil_dev_init(void)
-{
- return serio_register_driver(&hil_serio_drv);
-}
-
-static void __exit hil_dev_exit(void)
-{
- serio_unregister_driver(&hil_serio_drv);
-}
-
-module_init(hil_dev_init);
-module_exit(hil_dev_exit);
+module_serio_driver(hil_serio_drv);
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index fb87b3bcadb9..6ee7421e2321 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -481,7 +481,7 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev)
}
if (keypad->rows_en_mask > ((1 << MAX_MATRIX_KEY_ROWS) - 1) ||
- keypad->cols_en_mask > ((1 << MAX_MATRIX_KEY_COLS) - 1)) {
+ keypad->cols_en_mask > ((1 << MAX_MATRIX_KEY_COLS) - 1)) {
dev_err(&pdev->dev,
"invalid key data (too many rows or colums)\n");
error = -EINVAL;
@@ -496,14 +496,17 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev)
input_dev->dev.parent = &pdev->dev;
input_dev->open = imx_keypad_open;
input_dev->close = imx_keypad_close;
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
- input_dev->keycode = keypad->keycodes;
- input_dev->keycodesize = sizeof(keypad->keycodes[0]);
- input_dev->keycodemax = ARRAY_SIZE(keypad->keycodes);
- matrix_keypad_build_keymap(keymap_data, MATRIX_ROW_SHIFT,
- keypad->keycodes, input_dev->keybit);
+ error = matrix_keypad_build_keymap(keymap_data, NULL,
+ MAX_MATRIX_KEY_ROWS,
+ MAX_MATRIX_KEY_COLS,
+ keypad->keycodes, input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to build keymap\n");
+ goto failed_clock_put;
+ }
+ __set_bit(EV_REP, input_dev->evbit);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_set_drvdata(input_dev, keypad);
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index fa9bb6d235e2..fc0a63c2f278 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -731,19 +731,4 @@ static struct serio_driver lkkbd_drv = {
.interrupt = lkkbd_interrupt,
};
-/*
- * The functions for insering/removing us as a module.
- */
-static int __init lkkbd_init(void)
-{
- return serio_register_driver(&lkkbd_drv);
-}
-
-static void __exit lkkbd_exit(void)
-{
- serio_unregister_driver(&lkkbd_drv);
-}
-
-module_init(lkkbd_init);
-module_exit(lkkbd_exit);
-
+module_serio_driver(lkkbd_drv);
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
new file mode 100644
index 000000000000..ca168a6679de
--- /dev/null
+++ b/drivers/input/keyboard/lm8333.c
@@ -0,0 +1,235 @@
+/*
+ * LM8333 keypad driver
+ * Copyright (C) 2012 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/input/lm8333.h>
+
+#define LM8333_FIFO_READ 0x20
+#define LM8333_DEBOUNCE 0x22
+#define LM8333_READ_INT 0xD0
+#define LM8333_ACTIVE 0xE4
+#define LM8333_READ_ERROR 0xF0
+
+#define LM8333_KEYPAD_IRQ (1 << 0)
+#define LM8333_ERROR_IRQ (1 << 3)
+
+#define LM8333_ERROR_KEYOVR 0x04
+#define LM8333_ERROR_FIFOOVR 0x40
+
+#define LM8333_FIFO_TRANSFER_SIZE 16
+
+#define LM8333_NUM_ROWS 8
+#define LM8333_NUM_COLS 16
+#define LM8333_ROW_SHIFT 4
+
+struct lm8333 {
+ struct i2c_client *client;
+ struct input_dev *input;
+ unsigned short keycodes[LM8333_NUM_ROWS << LM8333_ROW_SHIFT];
+};
+
+/* The accessors try twice because the first access may be needed for wakeup */
+#define LM8333_READ_RETRIES 2
+
+int lm8333_read8(struct lm8333 *lm8333, u8 cmd)
+{
+ int retries = 0, ret;
+
+ do {
+ ret = i2c_smbus_read_byte_data(lm8333->client, cmd);
+ } while (ret < 0 && retries++ < LM8333_READ_RETRIES);
+
+ return ret;
+}
+
+int lm8333_write8(struct lm8333 *lm8333, u8 cmd, u8 val)
+{
+ int retries = 0, ret;
+
+ do {
+ ret = i2c_smbus_write_byte_data(lm8333->client, cmd, val);
+ } while (ret < 0 && retries++ < LM8333_READ_RETRIES);
+
+ return ret;
+}
+
+int lm8333_read_block(struct lm8333 *lm8333, u8 cmd, u8 len, u8 *buf)
+{
+ int retries = 0, ret;
+
+ do {
+ ret = i2c_smbus_read_i2c_block_data(lm8333->client,
+ cmd, len, buf);
+ } while (ret < 0 && retries++ < LM8333_READ_RETRIES);
+
+ return ret;
+}
+
+static void lm8333_key_handler(struct lm8333 *lm8333)
+{
+ struct input_dev *input = lm8333->input;
+ u8 keys[LM8333_FIFO_TRANSFER_SIZE];
+ u8 code, pressed;
+ int i, ret;
+
+ ret = lm8333_read_block(lm8333, LM8333_FIFO_READ,
+ LM8333_FIFO_TRANSFER_SIZE, keys);
+ if (ret != LM8333_FIFO_TRANSFER_SIZE) {
+ dev_err(&lm8333->client->dev,
+ "Error %d while reading FIFO\n", ret);
+ return;
+ }
+
+ for (i = 0; keys[i] && i < LM8333_FIFO_TRANSFER_SIZE; i++) {
+ pressed = keys[i] & 0x80;
+ code = keys[i] & 0x7f;
+
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, lm8333->keycodes[code], pressed);
+ }
+
+ input_sync(input);
+}
+
+static irqreturn_t lm8333_irq_thread(int irq, void *data)
+{
+ struct lm8333 *lm8333 = data;
+ u8 status = lm8333_read8(lm8333, LM8333_READ_INT);
+
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & LM8333_ERROR_IRQ) {
+ u8 err = lm8333_read8(lm8333, LM8333_READ_ERROR);
+
+ if (err & (LM8333_ERROR_KEYOVR | LM8333_ERROR_FIFOOVR)) {
+ u8 dummy[LM8333_FIFO_TRANSFER_SIZE];
+
+ lm8333_read_block(lm8333, LM8333_FIFO_READ,
+ LM8333_FIFO_TRANSFER_SIZE, dummy);
+ }
+ dev_err(&lm8333->client->dev, "Got error %02x\n", err);
+ }
+
+ if (status & LM8333_KEYPAD_IRQ)
+ lm8333_key_handler(lm8333);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit lm8333_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct lm8333_platform_data *pdata = client->dev.platform_data;
+ struct lm8333 *lm8333;
+ struct input_dev *input;
+ int err, active_time;
+
+ if (!pdata)
+ return -EINVAL;
+
+ active_time = pdata->active_time ?: 500;
+ if (active_time / 3 <= pdata->debounce_time / 3) {
+ dev_err(&client->dev, "Active time not big enough!\n");
+ return -EINVAL;
+ }
+
+ lm8333 = kzalloc(sizeof(*lm8333), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!lm8333 || !input) {
+ err = -ENOMEM;
+ goto free_mem;
+ }
+
+ lm8333->client = client;
+ lm8333->input = input;
+
+ input->name = client->name;
+ input->dev.parent = &client->dev;
+ input->id.bustype = BUS_I2C;
+
+ input_set_capability(input, EV_MSC, MSC_SCAN);
+
+ err = matrix_keypad_build_keymap(pdata->matrix_data, NULL,
+ LM8333_NUM_ROWS, LM8333_NUM_COLS,
+ lm8333->keycodes, input);
+ if (err)
+ goto free_mem;
+
+ if (pdata->debounce_time) {
+ err = lm8333_write8(lm8333, LM8333_DEBOUNCE,
+ pdata->debounce_time / 3);
+ if (err)
+ dev_warn(&client->dev, "Unable to set debounce time\n");
+ }
+
+ if (pdata->active_time) {
+ err = lm8333_write8(lm8333, LM8333_ACTIVE,
+ pdata->active_time / 3);
+ if (err)
+ dev_warn(&client->dev, "Unable to set active time\n");
+ }
+
+ err = request_threaded_irq(client->irq, NULL, lm8333_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "lm8333", lm8333);
+ if (err)
+ goto free_mem;
+
+ err = input_register_device(input);
+ if (err)
+ goto free_irq;
+
+ i2c_set_clientdata(client, lm8333);
+ return 0;
+
+ free_irq:
+ free_irq(client->irq, lm8333);
+ free_mem:
+ input_free_device(input);
+ kfree(lm8333);
+ return err;
+}
+
+static int __devexit lm8333_remove(struct i2c_client *client)
+{
+ struct lm8333 *lm8333 = i2c_get_clientdata(client);
+
+ free_irq(client->irq, lm8333);
+ input_unregister_device(lm8333->input);
+ kfree(lm8333);
+
+ return 0;
+}
+
+static const struct i2c_device_id lm8333_id[] = {
+ { "lm8333", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lm8333_id);
+
+static struct i2c_driver lm8333_driver = {
+ .driver = {
+ .name = "lm8333",
+ .owner = THIS_MODULE,
+ },
+ .probe = lm8333_probe,
+ .remove = __devexit_p(lm8333_remove),
+ .id_table = lm8333_id,
+};
+module_i2c_driver(lm8333_driver);
+
+MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_DESCRIPTION("LM8333 keyboard driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 9b223d73de32..18b72372028a 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -27,7 +27,6 @@
struct matrix_keypad {
const struct matrix_keypad_platform_data *pdata;
struct input_dev *input_dev;
- unsigned short *keycodes;
unsigned int row_shift;
DECLARE_BITMAP(disabled_gpios, MATRIX_MAX_ROWS);
@@ -38,6 +37,8 @@ struct matrix_keypad {
bool scan_pending;
bool stopped;
bool gpio_all_disabled;
+
+ unsigned short keycodes[];
};
/*
@@ -224,7 +225,7 @@ static void matrix_keypad_stop(struct input_dev *dev)
disable_row_irqs(keypad);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static void matrix_keypad_enable_wakeup(struct matrix_keypad *keypad)
{
const struct matrix_keypad_platform_data *pdata = keypad->pdata;
@@ -293,16 +294,16 @@ static int matrix_keypad_resume(struct device *dev)
return 0;
}
-
-static const SIMPLE_DEV_PM_OPS(matrix_keypad_pm_ops,
- matrix_keypad_suspend, matrix_keypad_resume);
#endif
-static int __devinit init_matrix_gpio(struct platform_device *pdev,
- struct matrix_keypad *keypad)
+static SIMPLE_DEV_PM_OPS(matrix_keypad_pm_ops,
+ matrix_keypad_suspend, matrix_keypad_resume);
+
+static int __devinit matrix_keypad_init_gpio(struct platform_device *pdev,
+ struct matrix_keypad *keypad)
{
const struct matrix_keypad_platform_data *pdata = keypad->pdata;
- int i, err = -EINVAL;
+ int i, err;
/* initialized strobe lines as outputs, activated */
for (i = 0; i < pdata->num_col_gpios; i++) {
@@ -348,8 +349,7 @@ static int __devinit init_matrix_gpio(struct platform_device *pdev,
"matrix-keypad", keypad);
if (err) {
dev_err(&pdev->dev,
- "Unable to acquire interrupt "
- "for GPIO line %i\n",
+ "Unable to acquire interrupt for GPIO line %i\n",
pdata->row_gpios[i]);
goto err_free_irqs;
}
@@ -375,14 +375,33 @@ err_free_cols:
return err;
}
+static void matrix_keypad_free_gpio(struct matrix_keypad *keypad)
+{
+ const struct matrix_keypad_platform_data *pdata = keypad->pdata;
+ int i;
+
+ if (pdata->clustered_irq > 0) {
+ free_irq(pdata->clustered_irq, keypad);
+ } else {
+ for (i = 0; i < pdata->num_row_gpios; i++)
+ free_irq(gpio_to_irq(pdata->row_gpios[i]), keypad);
+ }
+
+ for (i = 0; i < pdata->num_row_gpios; i++)
+ gpio_free(pdata->row_gpios[i]);
+
+ for (i = 0; i < pdata->num_col_gpios; i++)
+ gpio_free(pdata->col_gpios[i]);
+}
+
static int __devinit matrix_keypad_probe(struct platform_device *pdev)
{
const struct matrix_keypad_platform_data *pdata;
const struct matrix_keymap_data *keymap_data;
struct matrix_keypad *keypad;
struct input_dev *input_dev;
- unsigned short *keycodes;
unsigned int row_shift;
+ size_t keymap_size;
int err;
pdata = pdev->dev.platform_data;
@@ -398,20 +417,18 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev)
}
row_shift = get_count_order(pdata->num_col_gpios);
-
- keypad = kzalloc(sizeof(struct matrix_keypad), GFP_KERNEL);
- keycodes = kzalloc((pdata->num_row_gpios << row_shift) *
- sizeof(*keycodes),
- GFP_KERNEL);
+ keymap_size = (pdata->num_row_gpios << row_shift) *
+ sizeof(keypad->keycodes[0]);
+ keypad = kzalloc(sizeof(struct matrix_keypad) + keymap_size,
+ GFP_KERNEL);
input_dev = input_allocate_device();
- if (!keypad || !keycodes || !input_dev) {
+ if (!keypad || !input_dev) {
err = -ENOMEM;
goto err_free_mem;
}
keypad->input_dev = input_dev;
keypad->pdata = pdata;
- keypad->keycodes = keycodes;
keypad->row_shift = row_shift;
keypad->stopped = true;
INIT_DELAYED_WORK(&keypad->work, matrix_keypad_scan);
@@ -420,38 +437,38 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev)
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
input_dev->dev.parent = &pdev->dev;
- input_dev->evbit[0] = BIT_MASK(EV_KEY);
- if (!pdata->no_autorepeat)
- input_dev->evbit[0] |= BIT_MASK(EV_REP);
input_dev->open = matrix_keypad_start;
input_dev->close = matrix_keypad_stop;
- input_dev->keycode = keycodes;
- input_dev->keycodesize = sizeof(*keycodes);
- input_dev->keycodemax = pdata->num_row_gpios << row_shift;
-
- matrix_keypad_build_keymap(keymap_data, row_shift,
- input_dev->keycode, input_dev->keybit);
+ err = matrix_keypad_build_keymap(keymap_data, NULL,
+ pdata->num_row_gpios,
+ pdata->num_col_gpios,
+ keypad->keycodes, input_dev);
+ if (err)
+ goto err_free_mem;
+ if (!pdata->no_autorepeat)
+ __set_bit(EV_REP, input_dev->evbit);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_set_drvdata(input_dev, keypad);
- err = init_matrix_gpio(pdev, keypad);
+ err = matrix_keypad_init_gpio(pdev, keypad);
if (err)
goto err_free_mem;
err = input_register_device(keypad->input_dev);
if (err)
- goto err_free_mem;
+ goto err_free_gpio;
device_init_wakeup(&pdev->dev, pdata->wakeup);
platform_set_drvdata(pdev, keypad);
return 0;
+err_free_gpio:
+ matrix_keypad_free_gpio(keypad);
err_free_mem:
input_free_device(input_dev);
- kfree(keycodes);
kfree(keypad);
return err;
}
@@ -459,29 +476,15 @@ err_free_mem:
static int __devexit matrix_keypad_remove(struct platform_device *pdev)
{
struct matrix_keypad *keypad = platform_get_drvdata(pdev);
- const struct matrix_keypad_platform_data *pdata = keypad->pdata;
- int i;
device_init_wakeup(&pdev->dev, 0);
- if (pdata->clustered_irq > 0) {
- free_irq(pdata->clustered_irq, keypad);
- } else {
- for (i = 0; i < pdata->num_row_gpios; i++)
- free_irq(gpio_to_irq(pdata->row_gpios[i]), keypad);
- }
-
- for (i = 0; i < pdata->num_row_gpios; i++)
- gpio_free(pdata->row_gpios[i]);
-
- for (i = 0; i < pdata->num_col_gpios; i++)
- gpio_free(pdata->col_gpios[i]);
-
+ matrix_keypad_free_gpio(keypad);
input_unregister_device(keypad->input_dev);
- platform_set_drvdata(pdev, NULL);
- kfree(keypad->keycodes);
kfree(keypad);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
@@ -491,9 +494,7 @@ static struct platform_driver matrix_keypad_driver = {
.driver = {
.name = "matrix-keypad",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &matrix_keypad_pm_ops,
-#endif
},
};
module_platform_driver(matrix_keypad_driver);
diff --git a/drivers/input/keyboard/newtonkbd.c b/drivers/input/keyboard/newtonkbd.c
index 48d1cab0aa1c..f971898ad591 100644
--- a/drivers/input/keyboard/newtonkbd.c
+++ b/drivers/input/keyboard/newtonkbd.c
@@ -166,15 +166,4 @@ static struct serio_driver nkbd_drv = {
.disconnect = nkbd_disconnect,
};
-static int __init nkbd_init(void)
-{
- return serio_register_driver(&nkbd_drv);
-}
-
-static void __exit nkbd_exit(void)
-{
- serio_unregister_driver(&nkbd_drv);
-}
-
-module_init(nkbd_init);
-module_exit(nkbd_exit);
+module_serio_driver(nkbd_drv);
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index 101e245944e7..4ea4341a68c5 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -39,7 +39,8 @@
#define SKE_KPRISA (0x1 << 2)
#define SKE_KEYPAD_ROW_SHIFT 3
-#define SKE_KPD_KEYMAP_SIZE (8 * 8)
+#define SKE_KPD_NUM_ROWS 8
+#define SKE_KPD_NUM_COLS 8
/* keypad auto scan registers */
#define SKE_ASR0 0x20
@@ -63,7 +64,7 @@ struct ske_keypad {
void __iomem *reg_base;
struct input_dev *input;
const struct ske_keypad_platform_data *board;
- unsigned short keymap[SKE_KPD_KEYMAP_SIZE];
+ unsigned short keymap[SKE_KPD_NUM_ROWS * SKE_KPD_NUM_COLS];
struct clk *clk;
spinlock_t ske_keypad_lock;
};
@@ -261,19 +262,18 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
input->name = "ux500-ske-keypad";
input->dev.parent = &pdev->dev;
- input->keycode = keypad->keymap;
- input->keycodesize = sizeof(keypad->keymap[0]);
- input->keycodemax = ARRAY_SIZE(keypad->keymap);
+ error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
+ SKE_KPD_NUM_ROWS, SKE_KPD_NUM_COLS,
+ keypad->keymap, input);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to build keymap\n");
+ goto err_iounmap;
+ }
input_set_capability(input, EV_MSC, MSC_SCAN);
-
- __set_bit(EV_KEY, input->evbit);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- matrix_keypad_build_keymap(plat->keymap_data, SKE_KEYPAD_ROW_SHIFT,
- input->keycode, input->keybit);
-
clk_enable(keypad->clk);
/* go through board initialization helpers */
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 6b630d9d3dff..a0222db4dc86 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -61,6 +61,7 @@ struct omap_kp {
unsigned int cols;
unsigned long delay;
unsigned int debounce;
+ unsigned short keymap[];
};
static DECLARE_TASKLET_DISABLED(kp_tasklet, omap_kp_tasklet, 0);
@@ -316,13 +317,6 @@ static int __devinit omap_kp_probe(struct platform_device *pdev)
if (!cpu_is_omap24xx())
omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
- input_dev->keycode = &omap_kp[1];
- input_dev->keycodesize = sizeof(unsigned short);
- input_dev->keycodemax = keycodemax;
-
- if (pdata->rep)
- __set_bit(EV_REP, input_dev->evbit);
-
if (pdata->delay)
omap_kp->delay = pdata->delay;
@@ -371,9 +365,6 @@ static int __devinit omap_kp_probe(struct platform_device *pdev)
goto err2;
/* setup input device */
- __set_bit(EV_KEY, input_dev->evbit);
- matrix_keypad_build_keymap(pdata->keymap_data, row_shift,
- input_dev->keycode, input_dev->keybit);
input_dev->name = "omap-keypad";
input_dev->phys = "omap-keypad/input0";
input_dev->dev.parent = &pdev->dev;
@@ -383,6 +374,15 @@ static int __devinit omap_kp_probe(struct platform_device *pdev)
input_dev->id.product = 0x0001;
input_dev->id.version = 0x0100;
+ if (pdata->rep)
+ __set_bit(EV_REP, input_dev->evbit);
+
+ ret = matrix_keypad_build_keymap(pdata->keymap_data, NULL,
+ pdata->rows, pdata->cols,
+ omap_kp->keymap, input_dev);
+ if (ret < 0)
+ goto err3;
+
ret = input_register_device(omap_kp->input);
if (ret < 0) {
printk(KERN_ERR "Unable to register omap-keypad input device\n");
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index e809ac095a38..aed5f6999ce2 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -68,19 +68,52 @@
#define OMAP4_MASK_IRQSTATUSDISABLE 0xFFFF
+enum {
+ KBD_REVISION_OMAP4 = 0,
+ KBD_REVISION_OMAP5,
+};
+
struct omap4_keypad {
struct input_dev *input;
void __iomem *base;
- int irq;
+ unsigned int irq;
unsigned int rows;
unsigned int cols;
+ u32 reg_offset;
+ u32 irqreg_offset;
unsigned int row_shift;
unsigned char key_state[8];
unsigned short keymap[];
};
+static int kbd_readl(struct omap4_keypad *keypad_data, u32 offset)
+{
+ return __raw_readl(keypad_data->base +
+ keypad_data->reg_offset + offset);
+}
+
+static void kbd_writel(struct omap4_keypad *keypad_data, u32 offset, u32 value)
+{
+ __raw_writel(value,
+ keypad_data->base + keypad_data->reg_offset + offset);
+}
+
+static int kbd_read_irqreg(struct omap4_keypad *keypad_data, u32 offset)
+{
+ return __raw_readl(keypad_data->base +
+ keypad_data->irqreg_offset + offset);
+}
+
+static void kbd_write_irqreg(struct omap4_keypad *keypad_data,
+ u32 offset, u32 value)
+{
+ __raw_writel(value,
+ keypad_data->base + keypad_data->irqreg_offset + offset);
+}
+
+
/* Interrupt handler */
static irqreturn_t omap4_keypad_interrupt(int irq, void *dev_id)
{
@@ -91,12 +124,11 @@ static irqreturn_t omap4_keypad_interrupt(int irq, void *dev_id)
u32 *new_state = (u32 *) key_state;
/* Disable interrupts */
- __raw_writel(OMAP4_VAL_IRQDISABLE,
- keypad_data->base + OMAP4_KBD_IRQENABLE);
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
+ OMAP4_VAL_IRQDISABLE);
- *new_state = __raw_readl(keypad_data->base + OMAP4_KBD_FULLCODE31_0);
- *(new_state + 1) = __raw_readl(keypad_data->base
- + OMAP4_KBD_FULLCODE63_32);
+ *new_state = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE31_0);
+ *(new_state + 1) = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE63_32);
for (row = 0; row < keypad_data->rows; row++) {
changed = key_state[row] ^ keypad_data->key_state[row];
@@ -121,12 +153,13 @@ static irqreturn_t omap4_keypad_interrupt(int irq, void *dev_id)
sizeof(keypad_data->key_state));
/* clear pending interrupts */
- __raw_writel(__raw_readl(keypad_data->base + OMAP4_KBD_IRQSTATUS),
- keypad_data->base + OMAP4_KBD_IRQSTATUS);
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
+ kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
/* enable interrupts */
- __raw_writel(OMAP4_DEF_IRQENABLE_EVENTEN | OMAP4_DEF_IRQENABLE_LONGKEY,
- keypad_data->base + OMAP4_KBD_IRQENABLE);
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
+ OMAP4_DEF_IRQENABLE_EVENTEN |
+ OMAP4_DEF_IRQENABLE_LONGKEY);
return IRQ_HANDLED;
}
@@ -139,16 +172,17 @@ static int omap4_keypad_open(struct input_dev *input)
disable_irq(keypad_data->irq);
- __raw_writel(OMAP4_VAL_FUNCTIONALCFG,
- keypad_data->base + OMAP4_KBD_CTRL);
- __raw_writel(OMAP4_VAL_DEBOUNCINGTIME,
- keypad_data->base + OMAP4_KBD_DEBOUNCINGTIME);
- __raw_writel(OMAP4_VAL_IRQDISABLE,
- keypad_data->base + OMAP4_KBD_IRQSTATUS);
- __raw_writel(OMAP4_DEF_IRQENABLE_EVENTEN | OMAP4_DEF_IRQENABLE_LONGKEY,
- keypad_data->base + OMAP4_KBD_IRQENABLE);
- __raw_writel(OMAP4_DEF_WUP_EVENT_ENA | OMAP4_DEF_WUP_LONG_KEY_ENA,
- keypad_data->base + OMAP4_KBD_WAKEUPENABLE);
+ kbd_writel(keypad_data, OMAP4_KBD_CTRL,
+ OMAP4_VAL_FUNCTIONALCFG);
+ kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME,
+ OMAP4_VAL_DEBOUNCINGTIME);
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
+ OMAP4_VAL_IRQDISABLE);
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
+ OMAP4_DEF_IRQENABLE_EVENTEN |
+ OMAP4_DEF_IRQENABLE_LONGKEY);
+ kbd_writel(keypad_data, OMAP4_KBD_WAKEUPENABLE,
+ OMAP4_DEF_WUP_EVENT_ENA | OMAP4_DEF_WUP_LONG_KEY_ENA);
enable_irq(keypad_data->irq);
@@ -162,12 +196,12 @@ static void omap4_keypad_close(struct input_dev *input)
disable_irq(keypad_data->irq);
/* Disable interrupts */
- __raw_writel(OMAP4_VAL_IRQDISABLE,
- keypad_data->base + OMAP4_KBD_IRQENABLE);
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
+ OMAP4_VAL_IRQDISABLE);
/* clear pending interrupts */
- __raw_writel(__raw_readl(keypad_data->base + OMAP4_KBD_IRQSTATUS),
- keypad_data->base + OMAP4_KBD_IRQSTATUS);
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
+ kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
enable_irq(keypad_data->irq);
@@ -182,6 +216,7 @@ static int __devinit omap4_keypad_probe(struct platform_device *pdev)
struct resource *res;
resource_size_t size;
unsigned int row_shift, max_keys;
+ int rev;
int irq;
int error;
@@ -241,11 +276,40 @@ static int __devinit omap4_keypad_probe(struct platform_device *pdev)
keypad_data->rows = pdata->rows;
keypad_data->cols = pdata->cols;
+ /*
+ * Enable clocks for the keypad module so that we can read
+ * revision register.
+ */
+ pm_runtime_enable(&pdev->dev);
+ error = pm_runtime_get_sync(&pdev->dev);
+ if (error) {
+ dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n");
+ goto err_unmap;
+ }
+ rev = __raw_readl(keypad_data->base + OMAP4_KBD_REVISION);
+ rev &= 0x03 << 30;
+ rev >>= 30;
+ switch (rev) {
+ case KBD_REVISION_OMAP4:
+ keypad_data->reg_offset = 0x00;
+ keypad_data->irqreg_offset = 0x00;
+ break;
+ case KBD_REVISION_OMAP5:
+ keypad_data->reg_offset = 0x10;
+ keypad_data->irqreg_offset = 0x0c;
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "Keypad reports unsupported revision %d", rev);
+ error = -EINVAL;
+ goto err_pm_put_sync;
+ }
+
/* input device allocation */
keypad_data->input = input_dev = input_allocate_device();
if (!input_dev) {
error = -ENOMEM;
- goto err_unmap;
+ goto err_pm_put_sync;
}
input_dev->name = pdev->name;
@@ -258,20 +322,19 @@ static int __devinit omap4_keypad_probe(struct platform_device *pdev)
input_dev->open = omap4_keypad_open;
input_dev->close = omap4_keypad_close;
- input_dev->keycode = keypad_data->keymap;
- input_dev->keycodesize = sizeof(keypad_data->keymap[0]);
- input_dev->keycodemax = max_keys;
+ error = matrix_keypad_build_keymap(pdata->keymap_data, NULL,
+ pdata->rows, pdata->cols,
+ keypad_data->keymap, input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to build keymap\n");
+ goto err_free_input;
+ }
- __set_bit(EV_KEY, input_dev->evbit);
__set_bit(EV_REP, input_dev->evbit);
-
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_set_drvdata(input_dev, keypad_data);
- matrix_keypad_build_keymap(pdata->keymap_data, row_shift,
- input_dev->keycode, input_dev->keybit);
-
error = request_irq(keypad_data->irq, omap4_keypad_interrupt,
IRQF_TRIGGER_RISING,
"omap4-keypad", keypad_data);
@@ -280,7 +343,7 @@ static int __devinit omap4_keypad_probe(struct platform_device *pdev)
goto err_free_input;
}
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
error = input_register_device(keypad_data->input);
if (error < 0) {
@@ -296,6 +359,8 @@ err_pm_disable:
free_irq(keypad_data->irq, keypad_data);
err_free_input:
input_free_device(input_dev);
+err_pm_put_sync:
+ pm_runtime_put_sync(&pdev->dev);
err_unmap:
iounmap(keypad_data->base);
err_release_mem:
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 01a1c9f8a383..52c34657d301 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -626,21 +626,21 @@ static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
kp->input->id.product = 0x0001;
kp->input->id.vendor = 0x0001;
- kp->input->evbit[0] = BIT_MASK(EV_KEY);
-
- if (pdata->rep)
- __set_bit(EV_REP, kp->input->evbit);
-
- kp->input->keycode = kp->keycodes;
- kp->input->keycodemax = PM8XXX_MATRIX_MAX_SIZE;
- kp->input->keycodesize = sizeof(kp->keycodes);
kp->input->open = pmic8xxx_kp_open;
kp->input->close = pmic8xxx_kp_close;
- matrix_keypad_build_keymap(keymap_data, PM8XXX_ROW_SHIFT,
- kp->input->keycode, kp->input->keybit);
+ rc = matrix_keypad_build_keymap(keymap_data, NULL,
+ PM8XXX_MAX_ROWS, PM8XXX_MAX_COLS,
+ kp->keycodes, kp->input);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to build keymap\n");
+ goto err_get_irq;
+ }
+ if (pdata->rep)
+ __set_bit(EV_REP, kp->input->evbit);
input_set_capability(kp->input, EV_MSC, MSC_SCAN);
+
input_set_drvdata(kp->input, kp);
/* initialize keypad state */
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 29fe1b2be1c1..7f7b72464a37 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -311,7 +311,15 @@ static void pxa27x_keypad_scan_direct(struct pxa27x_keypad *keypad)
if (pdata->enable_rotary0 || pdata->enable_rotary1)
pxa27x_keypad_scan_rotary(keypad);
- new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
+ /*
+ * The KPDR_DK only output the key pin level, so it relates to board,
+ * and low level may be active.
+ */
+ if (pdata->direct_key_low_active)
+ new_state = ~KPDK_DK(kpdk) & keypad->direct_key_mask;
+ else
+ new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
+
bits_changed = keypad->direct_key_state ^ new_state;
if (bits_changed == 0)
@@ -383,7 +391,14 @@ static void pxa27x_keypad_config(struct pxa27x_keypad *keypad)
if (pdata->direct_key_num > direct_key_num)
direct_key_num = pdata->direct_key_num;
- keypad->direct_key_mask = ((2 << direct_key_num) - 1) & ~mask;
+ /*
+ * Direct keys usage may not start from KP_DKIN0, check the platfrom
+ * mask data to config the specific.
+ */
+ if (pdata->direct_key_mask)
+ keypad->direct_key_mask = pdata->direct_key_mask;
+ else
+ keypad->direct_key_mask = ((1 << direct_key_num) - 1) & ~mask;
/* enable direct key */
if (direct_key_num)
@@ -399,7 +414,7 @@ static int pxa27x_keypad_open(struct input_dev *dev)
struct pxa27x_keypad *keypad = input_get_drvdata(dev);
/* Enable unit clock */
- clk_enable(keypad->clk);
+ clk_prepare_enable(keypad->clk);
pxa27x_keypad_config(keypad);
return 0;
@@ -410,7 +425,7 @@ static void pxa27x_keypad_close(struct input_dev *dev)
struct pxa27x_keypad *keypad = input_get_drvdata(dev);
/* Disable clock unit */
- clk_disable(keypad->clk);
+ clk_disable_unprepare(keypad->clk);
}
#ifdef CONFIG_PM
@@ -419,10 +434,14 @@ static int pxa27x_keypad_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
- clk_disable(keypad->clk);
-
+ /*
+ * If the keypad is used a wake up source, clock can not be disabled.
+ * Or it can not detect the key pressing.
+ */
if (device_may_wakeup(&pdev->dev))
enable_irq_wake(keypad->irq);
+ else
+ clk_disable_unprepare(keypad->clk);
return 0;
}
@@ -433,19 +452,24 @@ static int pxa27x_keypad_resume(struct device *dev)
struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
struct input_dev *input_dev = keypad->input_dev;
- if (device_may_wakeup(&pdev->dev))
+ /*
+ * If the keypad is used as wake up source, the clock is not turned
+ * off. So do not need configure it again.
+ */
+ if (device_may_wakeup(&pdev->dev)) {
disable_irq_wake(keypad->irq);
+ } else {
+ mutex_lock(&input_dev->mutex);
- mutex_lock(&input_dev->mutex);
+ if (input_dev->users) {
+ /* Enable unit clock */
+ clk_prepare_enable(keypad->clk);
+ pxa27x_keypad_config(keypad);
+ }
- if (input_dev->users) {
- /* Enable unit clock */
- clk_enable(keypad->clk);
- pxa27x_keypad_config(keypad);
+ mutex_unlock(&input_dev->mutex);
}
- mutex_unlock(&input_dev->mutex);
-
return 0;
}
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index 2391ae884fee..a061ba603a29 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -454,23 +454,23 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
input_dev->dev.parent = &pdev->dev;
- input_set_drvdata(input_dev, keypad);
input_dev->open = samsung_keypad_open;
input_dev->close = samsung_keypad_close;
- input_dev->evbit[0] = BIT_MASK(EV_KEY);
- if (!pdata->no_autorepeat)
- input_dev->evbit[0] |= BIT_MASK(EV_REP);
+ error = matrix_keypad_build_keymap(keymap_data, NULL,
+ pdata->rows, pdata->cols,
+ keypad->keycodes, input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to build keymap\n");
+ goto err_put_clk;
+ }
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+ if (!pdata->no_autorepeat)
+ __set_bit(EV_REP, input_dev->evbit);
- input_dev->keycode = keypad->keycodes;
- input_dev->keycodesize = sizeof(keypad->keycodes[0]);
- input_dev->keycodemax = pdata->rows << row_shift;
-
- matrix_keypad_build_keymap(keymap_data, row_shift,
- input_dev->keycode, input_dev->keybit);
+ input_set_drvdata(input_dev, keypad);
keypad->irq = platform_get_irq(pdev, 0);
if (keypad->irq < 0) {
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index 3b6b528f02fd..6f287f7e1538 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -19,6 +19,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeup.h>
#include <linux/slab.h>
@@ -49,7 +50,9 @@
#define KEY_VALUE 0x00FFFFFF
#define ROW_MASK 0xF0
#define COLUMN_MASK 0x0F
-#define ROW_SHIFT 4
+#define NUM_ROWS 16
+#define NUM_COLS 16
+
#define KEY_MATRIX_SHIFT 6
struct spear_kbd {
@@ -60,7 +63,8 @@ struct spear_kbd {
unsigned int irq;
unsigned int mode;
unsigned short last_key;
- unsigned short keycodes[256];
+ unsigned short keycodes[NUM_ROWS * NUM_COLS];
+ bool rep;
};
static irqreturn_t spear_kbd_interrupt(int irq, void *dev_id)
@@ -136,27 +140,49 @@ static void spear_kbd_close(struct input_dev *dev)
kbd->last_key = KEY_RESERVED;
}
-static int __devinit spear_kbd_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+static int __devinit spear_kbd_parse_dt(struct platform_device *pdev,
+ struct spear_kbd *kbd)
{
- const struct kbd_platform_data *pdata = pdev->dev.platform_data;
- const struct matrix_keymap_data *keymap;
- struct spear_kbd *kbd;
- struct input_dev *input_dev;
- struct resource *res;
- int irq;
+ struct device_node *np = pdev->dev.of_node;
int error;
+ u32 val;
- if (!pdata) {
- dev_err(&pdev->dev, "Invalid platform data\n");
+ if (!np) {
+ dev_err(&pdev->dev, "Missing DT data\n");
return -EINVAL;
}
- keymap = pdata->keymap;
- if (!keymap) {
- dev_err(&pdev->dev, "no keymap defined\n");
- return -EINVAL;
+ if (of_property_read_bool(np, "autorepeat"))
+ kbd->rep = true;
+
+ error = of_property_read_u32(np, "st,mode", &val);
+ if (error) {
+ dev_err(&pdev->dev, "DT: Invalid or missing mode\n");
+ return error;
}
+ kbd->mode = val;
+ return 0;
+}
+#else
+static inline int spear_kbd_parse_dt(struct platform_device *pdev,
+ struct spear_kbd *kbd)
+{
+ return -ENOSYS;
+}
+#endif
+
+static int __devinit spear_kbd_probe(struct platform_device *pdev)
+{
+ struct kbd_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ const struct matrix_keymap_data *keymap = pdata ? pdata->keymap : NULL;
+ struct spear_kbd *kbd;
+ struct input_dev *input_dev;
+ struct resource *res;
+ int irq;
+ int error;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "no keyboard resource defined\n");
@@ -179,7 +205,15 @@ static int __devinit spear_kbd_probe(struct platform_device *pdev)
kbd->input = input_dev;
kbd->irq = irq;
- kbd->mode = pdata->mode;
+
+ if (!pdata) {
+ error = spear_kbd_parse_dt(pdev, kbd);
+ if (error)
+ goto err_free_mem;
+ } else {
+ kbd->mode = pdata->mode;
+ kbd->rep = pdata->rep;
+ }
kbd->res = request_mem_region(res->start, resource_size(res),
pdev->name);
@@ -212,18 +246,17 @@ static int __devinit spear_kbd_probe(struct platform_device *pdev)
input_dev->open = spear_kbd_open;
input_dev->close = spear_kbd_close;
- __set_bit(EV_KEY, input_dev->evbit);
- if (pdata->rep)
+ error = matrix_keypad_build_keymap(keymap, NULL, NUM_ROWS, NUM_COLS,
+ kbd->keycodes, input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to build keymap\n");
+ goto err_put_clk;
+ }
+
+ if (kbd->rep)
__set_bit(EV_REP, input_dev->evbit);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
- input_dev->keycode = kbd->keycodes;
- input_dev->keycodesize = sizeof(kbd->keycodes[0]);
- input_dev->keycodemax = ARRAY_SIZE(kbd->keycodes);
-
- matrix_keypad_build_keymap(keymap, ROW_SHIFT,
- input_dev->keycode, input_dev->keybit);
-
input_set_drvdata(input_dev, kbd);
error = request_irq(irq, spear_kbd_interrupt, 0, "keyboard", kbd);
@@ -317,6 +350,14 @@ static int spear_kbd_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(spear_kbd_pm_ops, spear_kbd_suspend, spear_kbd_resume);
+#ifdef CONFIG_OF
+static const struct of_device_id spear_kbd_id_table[] = {
+ { .compatible = "st,spear300-kbd" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spear_kbd_id_table);
+#endif
+
static struct platform_driver spear_kbd_driver = {
.probe = spear_kbd_probe,
.remove = __devexit_p(spear_kbd_remove),
@@ -324,6 +365,7 @@ static struct platform_driver spear_kbd_driver = {
.name = "keyboard",
.owner = THIS_MODULE,
.pm = &spear_kbd_pm_ops,
+ .of_match_table = of_match_ptr(spear_kbd_id_table),
},
};
module_platform_driver(spear_kbd_driver);
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index 9397cf9c625c..470a8778dec1 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -289,19 +289,17 @@ static int __devinit stmpe_keypad_probe(struct platform_device *pdev)
input->id.bustype = BUS_I2C;
input->dev.parent = &pdev->dev;
- input_set_capability(input, EV_MSC, MSC_SCAN);
+ ret = matrix_keypad_build_keymap(plat->keymap_data, NULL,
+ STMPE_KEYPAD_MAX_ROWS,
+ STMPE_KEYPAD_MAX_COLS,
+ keypad->keymap, input);
+ if (ret)
+ goto out_freeinput;
- __set_bit(EV_KEY, input->evbit);
+ input_set_capability(input, EV_MSC, MSC_SCAN);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- input->keycode = keypad->keymap;
- input->keycodesize = sizeof(keypad->keymap[0]);
- input->keycodemax = ARRAY_SIZE(keypad->keymap);
-
- matrix_keypad_build_keymap(plat->keymap_data, STMPE_KEYPAD_ROW_SHIFT,
- input->keycode, input->keybit);
-
for (i = 0; i < plat->keymap_data->keymap_size; i++) {
unsigned int key = plat->keymap_data->keymap[i];
diff --git a/drivers/input/keyboard/stowaway.c b/drivers/input/keyboard/stowaway.c
index 7437219370b1..cc612c5d5427 100644
--- a/drivers/input/keyboard/stowaway.c
+++ b/drivers/input/keyboard/stowaway.c
@@ -170,15 +170,4 @@ static struct serio_driver skbd_drv = {
.disconnect = skbd_disconnect,
};
-static int __init skbd_init(void)
-{
- return serio_register_driver(&skbd_drv);
-}
-
-static void __exit skbd_exit(void)
-{
- serio_unregister_driver(&skbd_drv);
-}
-
-module_init(skbd_init);
-module_exit(skbd_exit);
+module_serio_driver(skbd_drv);
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index a99a04b03ee4..5f836b1638c1 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -369,19 +369,4 @@ static struct serio_driver sunkbd_drv = {
.disconnect = sunkbd_disconnect,
};
-/*
- * The functions for insering/removing us as a module.
- */
-
-static int __init sunkbd_init(void)
-{
- return serio_register_driver(&sunkbd_drv);
-}
-
-static void __exit sunkbd_exit(void)
-{
- serio_unregister_driver(&sunkbd_drv);
-}
-
-module_init(sunkbd_init);
-module_exit(sunkbd_exit);
+module_serio_driver(sunkbd_drv);
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 2dee3e4e7c6f..7d498e698508 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -78,7 +78,7 @@
* @input: pointer to input device object
* @board: keypad platform device
* @krow: number of rows
- * @kcol: number of coloumns
+ * @kcol: number of columns
* @keymap: matrix scan code table for keycodes
* @keypad_stopped: holds keypad status
*/
@@ -96,21 +96,15 @@ static int tc3589x_keypad_init_key_hardware(struct tc_keypad *keypad)
{
int ret;
struct tc3589x *tc3589x = keypad->tc3589x;
- u8 settle_time = keypad->board->settle_time;
- u8 dbounce_period = keypad->board->debounce_period;
- u8 rows = keypad->board->krow & 0xf; /* mask out the nibble */
- u8 column = keypad->board->kcol & 0xf; /* mask out the nibble */
-
- /* validate platform configurations */
- if (keypad->board->kcol > TC3589x_MAX_KPCOL ||
- keypad->board->krow > TC3589x_MAX_KPROW ||
- keypad->board->debounce_period > TC3589x_MAX_DEBOUNCE_SETTLE ||
- keypad->board->settle_time > TC3589x_MAX_DEBOUNCE_SETTLE)
+ const struct tc3589x_keypad_platform_data *board = keypad->board;
+
+ /* validate platform configuration */
+ if (board->kcol > TC3589x_MAX_KPCOL || board->krow > TC3589x_MAX_KPROW)
return -EINVAL;
/* configure KBDSIZE 4 LSbits for cols and 4 MSbits for rows */
ret = tc3589x_reg_write(tc3589x, TC3589x_KBDSIZE,
- (rows << KP_ROW_SHIFT) | column);
+ (board->krow << KP_ROW_SHIFT) | board->kcol);
if (ret < 0)
return ret;
@@ -124,12 +118,14 @@ static int tc3589x_keypad_init_key_hardware(struct tc_keypad *keypad)
return ret;
/* Configure settle time */
- ret = tc3589x_reg_write(tc3589x, TC3589x_KBDSETTLE_REG, settle_time);
+ ret = tc3589x_reg_write(tc3589x, TC3589x_KBDSETTLE_REG,
+ board->settle_time);
if (ret < 0)
return ret;
/* Configure debounce time */
- ret = tc3589x_reg_write(tc3589x, TC3589x_KBDBOUNCE, dbounce_period);
+ ret = tc3589x_reg_write(tc3589x, TC3589x_KBDBOUNCE,
+ board->debounce_period);
if (ret < 0)
return ret;
@@ -337,23 +333,22 @@ static int __devinit tc3589x_keypad_probe(struct platform_device *pdev)
input->name = pdev->name;
input->dev.parent = &pdev->dev;
- input->keycode = keypad->keymap;
- input->keycodesize = sizeof(keypad->keymap[0]);
- input->keycodemax = ARRAY_SIZE(keypad->keymap);
-
input->open = tc3589x_keypad_open;
input->close = tc3589x_keypad_close;
- input_set_drvdata(input, keypad);
+ error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
+ TC3589x_MAX_KPROW, TC3589x_MAX_KPCOL,
+ keypad->keymap, input);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to build keymap\n");
+ goto err_free_mem;
+ }
input_set_capability(input, EV_MSC, MSC_SCAN);
-
- __set_bit(EV_KEY, input->evbit);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- matrix_keypad_build_keymap(plat->keymap_data, 0x3,
- input->keycode, input->keybit);
+ input_set_drvdata(input, keypad);
error = request_threaded_irq(irq, NULL,
tc3589x_keypad_irq, plat->irqtype,
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 958ec107bfbc..5f87b28b3192 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -342,21 +342,20 @@ static int __devinit tca8418_keypad_probe(struct i2c_client *client,
input->id.product = 0x001;
input->id.version = 0x0001;
- input->keycode = keypad_data->keymap;
- input->keycodesize = sizeof(keypad_data->keymap[0]);
- input->keycodemax = max_keys;
+ error = matrix_keypad_build_keymap(pdata->keymap_data, NULL,
+ pdata->rows, pdata->cols,
+ keypad_data->keymap, input);
+ if (error) {
+ dev_dbg(&client->dev, "Failed to build keymap\n");
+ goto fail2;
+ }
- __set_bit(EV_KEY, input->evbit);
if (pdata->rep)
__set_bit(EV_REP, input->evbit);
-
input_set_capability(input, EV_MSC, MSC_SCAN);
input_set_drvdata(input, keypad_data);
- matrix_keypad_build_keymap(pdata->keymap_data, row_shift,
- input->keycode, input->keybit);
-
if (pdata->irq_is_gpio)
client->irq = gpio_to_irq(client->irq);
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index fe4ac95ca6c8..4ffe64d53107 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -619,8 +619,8 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
}
#ifdef CONFIG_OF
-static struct tegra_kbc_platform_data * __devinit
-tegra_kbc_dt_parse_pdata(struct platform_device *pdev)
+static struct tegra_kbc_platform_data * __devinit tegra_kbc_dt_parse_pdata(
+ struct platform_device *pdev)
{
struct tegra_kbc_platform_data *pdata;
struct device_node *np = pdev->dev.of_node;
@@ -660,10 +660,6 @@ tegra_kbc_dt_parse_pdata(struct platform_device *pdev)
pdata->pin_cfg[KBC_MAX_ROW + i].type = PIN_CFG_COL;
}
- pdata->keymap_data = matrix_keyboard_of_fill_keymap(np, "linux,keymap");
-
- /* FIXME: Add handling of linux,fn-keymap here */
-
return pdata;
}
#else
@@ -674,10 +670,36 @@ static inline struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata(
}
#endif
+static int __devinit tegra_kbd_setup_keymap(struct tegra_kbc *kbc)
+{
+ const struct tegra_kbc_platform_data *pdata = kbc->pdata;
+ const struct matrix_keymap_data *keymap_data = pdata->keymap_data;
+ unsigned int keymap_rows = KBC_MAX_KEY;
+ int retval;
+
+ if (keymap_data && pdata->use_fn_map)
+ keymap_rows *= 2;
+
+ retval = matrix_keypad_build_keymap(keymap_data, NULL,
+ keymap_rows, KBC_MAX_COL,
+ kbc->keycode, kbc->idev);
+ if (retval == -ENOSYS || retval == -ENOENT) {
+ /*
+ * If there is no OF support in kernel or keymap
+ * property is missing, use default keymap.
+ */
+ retval = matrix_keypad_build_keymap(
+ &tegra_kbc_default_keymap_data, NULL,
+ keymap_rows, KBC_MAX_COL,
+ kbc->keycode, kbc->idev);
+ }
+
+ return retval;
+}
+
static int __devinit tegra_kbc_probe(struct platform_device *pdev)
{
const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
- const struct matrix_keymap_data *keymap_data;
struct tegra_kbc *kbc;
struct input_dev *input_dev;
struct resource *res;
@@ -757,29 +779,26 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt;
kbc->repoll_dly = DIV_ROUND_UP(kbc->repoll_dly, KBC_CYCLE_MS);
+ kbc->wakeup_key = pdata->wakeup_key;
+ kbc->use_fn_map = pdata->use_fn_map;
+ kbc->use_ghost_filter = pdata->use_ghost_filter;
+
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
input_dev->dev.parent = &pdev->dev;
input_dev->open = tegra_kbc_open;
input_dev->close = tegra_kbc_close;
- input_set_drvdata(input_dev, kbc);
+ err = tegra_kbd_setup_keymap(kbc);
+ if (err) {
+ dev_err(&pdev->dev, "failed to setup keymap\n");
+ goto err_put_clk;
+ }
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ __set_bit(EV_REP, input_dev->evbit);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
- input_dev->keycode = kbc->keycode;
- input_dev->keycodesize = sizeof(kbc->keycode[0]);
- input_dev->keycodemax = KBC_MAX_KEY;
- if (pdata->use_fn_map)
- input_dev->keycodemax *= 2;
-
- kbc->use_fn_map = pdata->use_fn_map;
- kbc->use_ghost_filter = pdata->use_ghost_filter;
- keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
- matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
- input_dev->keycode, input_dev->keybit);
- kbc->wakeup_key = pdata->wakeup_key;
+ input_set_drvdata(input_dev, kbc);
err = request_irq(kbc->irq, tegra_kbc_isr,
IRQF_NO_SUSPEND | IRQF_TRIGGER_HIGH, pdev->name, kbc);
@@ -799,9 +818,6 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, kbc);
device_init_wakeup(&pdev->dev, pdata->wakeup);
- if (!pdev->dev.platform_data)
- matrix_keyboard_of_free_keymap(pdata->keymap_data);
-
return 0;
err_free_irq:
@@ -816,10 +832,8 @@ err_free_mem:
input_free_device(input_dev);
kfree(kbc);
err_free_pdata:
- if (!pdev->dev.platform_data) {
- matrix_keyboard_of_free_keymap(pdata->keymap_data);
+ if (!pdev->dev.platform_data)
kfree(pdata);
- }
return err;
}
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index fb39c94b6fdd..a4a445fb7020 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -247,15 +247,11 @@ static int __devinit keypad_probe(struct platform_device *pdev)
error = -ENOMEM;
goto error_input;
}
- input_set_drvdata(kp->input_dev, kp);
kp->input_dev->name = pdev->name;
kp->input_dev->dev.parent = &pdev->dev;
kp->input_dev->open = keypad_start;
kp->input_dev->close = keypad_stop;
- kp->input_dev->evbit[0] = BIT_MASK(EV_KEY);
- if (!pdata->no_autorepeat)
- kp->input_dev->evbit[0] |= BIT_MASK(EV_REP);
clk_enable(kp->clk);
rev = keypad_read(kp, rev);
@@ -264,15 +260,20 @@ static int __devinit keypad_probe(struct platform_device *pdev)
kp->input_dev->id.version = ((rev >> 16) & 0xfff);
clk_disable(kp->clk);
- kp->input_dev->keycode = kp->keycodes;
- kp->input_dev->keycodesize = sizeof(kp->keycodes[0]);
- kp->input_dev->keycodemax = kp->rows << kp->row_shift;
-
- matrix_keypad_build_keymap(keymap_data, kp->row_shift, kp->keycodes,
- kp->input_dev->keybit);
+ error = matrix_keypad_build_keymap(keymap_data, NULL,
+ kp->rows, kp->cols,
+ kp->keycodes, kp->input_dev);
+ if (error) {
+ dev_err(dev, "Failed to build keymap\n");
+ goto error_reg;
+ }
+ if (!pdata->no_autorepeat)
+ kp->input_dev->evbit[0] |= BIT_MASK(EV_REP);
input_set_capability(kp->input_dev, EV_MSC, MSC_SCAN);
+ input_set_drvdata(kp->input_dev, kp);
+
error = input_register_device(kp->input_dev);
if (error < 0) {
dev_err(dev, "Could not register input device\n");
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index 67bec14e8b96..a2c6f79aa101 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -361,14 +361,6 @@ static int __devinit twl4030_kp_probe(struct platform_device *pdev)
kp->irq = platform_get_irq(pdev, 0);
/* setup input device */
- __set_bit(EV_KEY, input->evbit);
-
- /* Enable auto repeat feature of Linux input subsystem */
- if (pdata->rep)
- __set_bit(EV_REP, input->evbit);
-
- input_set_capability(input, EV_MSC, MSC_SCAN);
-
input->name = "TWL4030 Keypad";
input->phys = "twl4030_keypad/input0";
input->dev.parent = &pdev->dev;
@@ -378,12 +370,19 @@ static int __devinit twl4030_kp_probe(struct platform_device *pdev)
input->id.product = 0x0001;
input->id.version = 0x0003;
- input->keycode = kp->keymap;
- input->keycodesize = sizeof(kp->keymap[0]);
- input->keycodemax = ARRAY_SIZE(kp->keymap);
+ error = matrix_keypad_build_keymap(keymap_data, NULL,
+ TWL4030_MAX_ROWS,
+ 1 << TWL4030_ROW_SHIFT,
+ kp->keymap, input);
+ if (error) {
+ dev_err(kp->dbg_dev, "Failed to build keymap\n");
+ goto err1;
+ }
- matrix_keypad_build_keymap(keymap_data, TWL4030_ROW_SHIFT,
- input->keycode, input->keybit);
+ input_set_capability(input, EV_MSC, MSC_SCAN);
+ /* Enable auto repeat feature of Linux input subsystem */
+ if (pdata->rep)
+ __set_bit(EV_REP, input->evbit);
error = input_register_device(input);
if (error) {
diff --git a/drivers/input/keyboard/w90p910_keypad.c b/drivers/input/keyboard/w90p910_keypad.c
index 99bbb7e775ae..085ede4d972d 100644
--- a/drivers/input/keyboard/w90p910_keypad.c
+++ b/drivers/input/keyboard/w90p910_keypad.c
@@ -42,7 +42,8 @@
#define KGET_RAW(n) (((n) & KEY0R) >> 3)
#define KGET_COLUMN(n) ((n) & KEY0C)
-#define W90P910_MAX_KEY_NUM (8 * 8)
+#define W90P910_NUM_ROWS 8
+#define W90P910_NUM_COLS 8
#define W90P910_ROW_SHIFT 3
struct w90p910_keypad {
@@ -51,7 +52,7 @@ struct w90p910_keypad {
struct input_dev *input_dev;
void __iomem *mmio_base;
int irq;
- unsigned short keymap[W90P910_MAX_KEY_NUM];
+ unsigned short keymap[W90P910_NUM_ROWS * W90P910_NUM_COLS];
};
static void w90p910_keypad_scan_matrix(struct w90p910_keypad *keypad,
@@ -190,17 +191,13 @@ static int __devinit w90p910_keypad_probe(struct platform_device *pdev)
input_dev->close = w90p910_keypad_close;
input_dev->dev.parent = &pdev->dev;
- input_dev->keycode = keypad->keymap;
- input_dev->keycodesize = sizeof(keypad->keymap[0]);
- input_dev->keycodemax = ARRAY_SIZE(keypad->keymap);
-
- input_set_drvdata(input_dev, keypad);
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
- input_set_capability(input_dev, EV_MSC, MSC_SCAN);
-
- matrix_keypad_build_keymap(keymap_data, W90P910_ROW_SHIFT,
- input_dev->keycode, input_dev->keybit);
+ error = matrix_keypad_build_keymap(keymap_data, NULL,
+ W90P910_NUM_ROWS, W90P910_NUM_COLS,
+ keypad->keymap, input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to build keymap\n");
+ goto failed_put_clk;
+ }
error = request_irq(keypad->irq, w90p910_keypad_irq_handler,
0, pdev->name, keypad);
@@ -209,6 +206,10 @@ static int __devinit w90p910_keypad_probe(struct platform_device *pdev)
goto failed_put_clk;
}
+ __set_bit(EV_REP, input_dev->evbit);
+ input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+ input_set_drvdata(input_dev, keypad);
+
/* Register the input device */
error = input_register_device(input_dev);
if (error) {
diff --git a/drivers/input/keyboard/xtkbd.c b/drivers/input/keyboard/xtkbd.c
index 37b01d777a4a..d050d9d0011b 100644
--- a/drivers/input/keyboard/xtkbd.c
+++ b/drivers/input/keyboard/xtkbd.c
@@ -169,15 +169,4 @@ static struct serio_driver xtkbd_drv = {
.disconnect = xtkbd_disconnect,
};
-static int __init xtkbd_init(void)
-{
- return serio_register_driver(&xtkbd_drv);
-}
-
-static void __exit xtkbd_exit(void)
-{
- serio_unregister_driver(&xtkbd_drv);
-}
-
-module_init(xtkbd_init);
-module_exit(xtkbd_exit);
+module_serio_driver(xtkbd_drv);
diff --git a/drivers/input/matrix-keymap.c b/drivers/input/matrix-keymap.c
new file mode 100644
index 000000000000..443ad64b7f2a
--- /dev/null
+++ b/drivers/input/matrix-keymap.c
@@ -0,0 +1,163 @@
+/*
+ * Helpers for matrix keyboard bindings
+ *
+ * Copyright (C) 2012 Google, Inc
+ *
+ * Author:
+ * Olof Johansson <olof@lixom.net>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/of.h>
+#include <linux/export.h>
+#include <linux/input/matrix_keypad.h>
+
+static bool matrix_keypad_map_key(struct input_dev *input_dev,
+ unsigned int rows, unsigned int cols,
+ unsigned int row_shift, unsigned int key)
+{
+ unsigned short *keymap = input_dev->keycode;
+ unsigned int row = KEY_ROW(key);
+ unsigned int col = KEY_COL(key);
+ unsigned short code = KEY_VAL(key);
+
+ if (row >= rows || col >= cols) {
+ dev_err(input_dev->dev.parent,
+ "%s: invalid keymap entry 0x%x (row: %d, col: %d, rows: %d, cols: %d)\n",
+ __func__, key, row, col, rows, cols);
+ return false;
+ }
+
+ keymap[MATRIX_SCAN_CODE(row, col, row_shift)] = code;
+ __set_bit(code, input_dev->keybit);
+
+ return true;
+}
+
+#ifdef CONFIG_OF
+static int matrix_keypad_parse_of_keymap(const char *propname,
+ unsigned int rows, unsigned int cols,
+ struct input_dev *input_dev)
+{
+ struct device *dev = input_dev->dev.parent;
+ struct device_node *np = dev->of_node;
+ unsigned int row_shift = get_count_order(cols);
+ unsigned int max_keys = rows << row_shift;
+ unsigned int proplen, i, size;
+ const __be32 *prop;
+
+ if (!np)
+ return -ENOENT;
+
+ if (!propname)
+ propname = "linux,keymap";
+
+ prop = of_get_property(np, propname, &proplen);
+ if (!prop) {
+ dev_err(dev, "OF: %s property not defined in %s\n",
+ propname, np->full_name);
+ return -ENOENT;
+ }
+
+ if (proplen % sizeof(u32)) {
+ dev_err(dev, "OF: Malformed keycode property %s in %s\n",
+ propname, np->full_name);
+ return -EINVAL;
+ }
+
+ size = proplen / sizeof(u32);
+ if (size > max_keys) {
+ dev_err(dev, "OF: %s size overflow\n", propname);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < size; i++) {
+ unsigned int key = be32_to_cpup(prop + i);
+
+ if (!matrix_keypad_map_key(input_dev, rows, cols,
+ row_shift, key))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#else
+static int matrix_keypad_parse_of_keymap(const char *propname,
+ unsigned int rows, unsigned int cols,
+ struct input_dev *input_dev)
+{
+ return -ENOSYS;
+}
+#endif
+
+/**
+ * matrix_keypad_build_keymap - convert platform keymap into matrix keymap
+ * @keymap_data: keymap supplied by the platform code
+ * @keymap_name: name of device tree property containing keymap (if device
+ * tree support is enabled).
+ * @rows: number of rows in target keymap array
+ * @cols: number of cols in target keymap array
+ * @keymap: expanded version of keymap that is suitable for use by
+ * matrix keyboard driver
+ * @input_dev: input devices for which we are setting up the keymap
+ *
+ * This function converts platform keymap (encoded with KEY() macro) into
+ * an array of keycodes that is suitable for using in a standard matrix
+ * keyboard driver that uses row and col as indices.
+ *
+ * If @keymap_data is not supplied and device tree support is enabled
+ * it will attempt load the keymap from property specified by @keymap_name
+ * argument (or "linux,keymap" if @keymap_name is %NULL).
+ *
+ * Callers are expected to set up input_dev->dev.parent before calling this
+ * function.
+ */
+int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
+ const char *keymap_name,
+ unsigned int rows, unsigned int cols,
+ unsigned short *keymap,
+ struct input_dev *input_dev)
+{
+ unsigned int row_shift = get_count_order(cols);
+ int i;
+ int error;
+
+ input_dev->keycode = keymap;
+ input_dev->keycodesize = sizeof(*keymap);
+ input_dev->keycodemax = rows << row_shift;
+
+ __set_bit(EV_KEY, input_dev->evbit);
+
+ if (keymap_data) {
+ for (i = 0; i < keymap_data->keymap_size; i++) {
+ unsigned int key = keymap_data->keymap[i];
+
+ if (!matrix_keypad_map_key(input_dev, rows, cols,
+ row_shift, key))
+ return -EINVAL;
+ }
+ } else {
+ error = matrix_keypad_parse_of_keymap(keymap_name, rows, cols,
+ input_dev);
+ if (error)
+ return error;
+ }
+
+ __clear_bit(KEY_RESERVED, input_dev->keybit);
+
+ return 0;
+}
+EXPORT_SYMBOL(matrix_keypad_build_keymap);
diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
index 06517e60e50c..a3735a01e9fd 100644
--- a/drivers/input/misc/cma3000_d0x.c
+++ b/drivers/input/misc/cma3000_d0x.c
@@ -318,7 +318,7 @@ struct cma3000_accl_data *cma3000_init(struct device *dev, int irq,
mutex_init(&data->mutex);
data->mode = pdata->mode;
- if (data->mode < CMAMODE_DEFAULT || data->mode > CMAMODE_POFF) {
+ if (data->mode > CMAMODE_POFF) {
data->mode = CMAMODE_MOTDET;
dev_warn(dev,
"Invalid mode specified, assuming Motion Detect\n");
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index 5403c571b6a5..306f84c2d8fb 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -367,7 +367,7 @@ static int __devinit mpu3050_probe(struct i2c_client *client,
error = request_threaded_irq(client->irq,
NULL, mpu3050_interrupt_thread,
- IRQF_TRIGGER_RISING,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"mpu3050", sensor);
if (error) {
dev_err(&client->dev,
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 14e94f56cb7d..c34f6c0371c4 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -27,6 +27,7 @@
*/
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/workqueue.h>
#include <linux/input.h>
#include <linux/mfd/twl6040.h>
@@ -258,10 +259,13 @@ static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
{
struct twl6040_vibra_data *pdata = pdev->dev.platform_data;
+ struct device_node *node = pdev->dev.of_node;
struct vibra_info *info;
+ int vddvibl_uV = 0;
+ int vddvibr_uV = 0;
int ret;
- if (!pdata) {
+ if (!pdata && !node) {
dev_err(&pdev->dev, "platform_data not available\n");
return -EINVAL;
}
@@ -273,11 +277,26 @@ static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
}
info->dev = &pdev->dev;
+
info->twl6040 = dev_get_drvdata(pdev->dev.parent);
- info->vibldrv_res = pdata->vibldrv_res;
- info->vibrdrv_res = pdata->vibrdrv_res;
- info->viblmotor_res = pdata->viblmotor_res;
- info->vibrmotor_res = pdata->vibrmotor_res;
+ if (pdata) {
+ info->vibldrv_res = pdata->vibldrv_res;
+ info->vibrdrv_res = pdata->vibrdrv_res;
+ info->viblmotor_res = pdata->viblmotor_res;
+ info->vibrmotor_res = pdata->vibrmotor_res;
+ vddvibl_uV = pdata->vddvibl_uV;
+ vddvibr_uV = pdata->vddvibr_uV;
+ } else {
+ of_property_read_u32(node, "vibldrv_res", &info->vibldrv_res);
+ of_property_read_u32(node, "vibrdrv_res", &info->vibrdrv_res);
+ of_property_read_u32(node, "viblmotor_res",
+ &info->viblmotor_res);
+ of_property_read_u32(node, "vibrmotor_res",
+ &info->vibrmotor_res);
+ of_property_read_u32(node, "vddvibl_uV", &vddvibl_uV);
+ of_property_read_u32(node, "vddvibr_uV", &vddvibr_uV);
+ }
+
if ((!info->vibldrv_res && !info->viblmotor_res) ||
(!info->vibrdrv_res && !info->vibrmotor_res)) {
dev_err(info->dev, "invalid vibra driver/motor resistance\n");
@@ -339,10 +358,9 @@ static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
goto err_regulator;
}
- if (pdata->vddvibl_uV) {
+ if (vddvibl_uV) {
ret = regulator_set_voltage(info->supplies[0].consumer,
- pdata->vddvibl_uV,
- pdata->vddvibl_uV);
+ vddvibl_uV, vddvibl_uV);
if (ret) {
dev_err(info->dev, "failed to set VDDVIBL volt %d\n",
ret);
@@ -350,10 +368,9 @@ static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
}
}
- if (pdata->vddvibr_uV) {
+ if (vddvibr_uV) {
ret = regulator_set_voltage(info->supplies[1].consumer,
- pdata->vddvibr_uV,
- pdata->vddvibr_uV);
+ vddvibr_uV, vddvibr_uV);
if (ret) {
dev_err(info->dev, "failed to set VDDVIBR volt %d\n",
ret);
@@ -401,6 +418,12 @@ static int __devexit twl6040_vibra_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id twl6040_vibra_of_match[] = {
+ {.compatible = "ti,twl6040-vibra", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, twl6040_vibra_of_match);
+
static struct platform_driver twl6040_vibra_driver = {
.probe = twl6040_vibra_probe,
.remove = __devexit_p(twl6040_vibra_remove),
@@ -408,6 +431,7 @@ static struct platform_driver twl6040_vibra_driver = {
.name = "twl6040-vibra",
.owner = THIS_MODULE,
.pm = &twl6040_vibra_pm_ops,
+ .of_match_table = twl6040_vibra_of_match,
},
};
module_platform_driver(twl6040_vibra_driver);
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index 47f18d6bce46..6790a812a1db 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -73,7 +73,7 @@ static int __devinit wm831x_on_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_on *wm831x_on;
- int irq = platform_get_irq(pdev, 0);
+ int irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
int ret;
wm831x_on = kzalloc(sizeof(struct wm831x_on), GFP_KERNEL);
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 9b8db821d5f0..cd6268cf7cd5 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -339,4 +339,16 @@ config MOUSE_SYNAPTICS_USB
To compile this driver as a module, choose M here: the
module will be called synaptics_usb.
+config MOUSE_NAVPOINT_PXA27x
+ tristate "Synaptics NavPoint (PXA27x SSP/SPI)"
+ depends on PXA27x && PXA_SSP
+ help
+ This driver adds support for the Synaptics NavPoint touchpad connected
+ to a PXA27x SSP port in SPI slave mode. The device emulates a mouse;
+ a tap or tap-and-a-half drag gesture emulates the left mouse button.
+ For example, use the xf86-input-evdev driver for an X pointing device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called navpoint.
+
endif
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 4718effeb8d9..46ba7556fd4f 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o
obj-$(CONFIG_MOUSE_INPORT) += inport.o
obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o
obj-$(CONFIG_MOUSE_MAPLE) += maplemouse.o
+obj-$(CONFIG_MOUSE_NAVPOINT_PXA27x) += navpoint.o
obj-$(CONFIG_MOUSE_PC110PAD) += pc110pad.o
obj-$(CONFIG_MOUSE_PS2) += psmouse.o
obj-$(CONFIG_MOUSE_PXA930_TRKBALL) += pxa930_trkball.o
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 4c6a72d3d48c..4a1347e91bdc 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -553,10 +553,7 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
alps_report_semi_mt_data(dev, fingers, x1, y1, x2, y2);
- input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
- input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
- input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
- input_report_key(dev, BTN_TOOL_QUADTAP, fingers == 4);
+ input_mt_report_finger_count(dev, fingers);
input_report_key(dev, BTN_LEFT, left);
input_report_key(dev, BTN_RIGHT, right);
@@ -604,10 +601,54 @@ static void alps_process_packet_v3(struct psmouse *psmouse)
static void alps_process_packet_v4(struct psmouse *psmouse)
{
+ struct alps_data *priv = psmouse->private;
unsigned char *packet = psmouse->packet;
struct input_dev *dev = psmouse->dev;
+ int offset;
int x, y, z;
int left, right;
+ int x1, y1, x2, y2;
+ int fingers = 0;
+ unsigned int x_bitmap, y_bitmap;
+
+ /*
+ * v4 has a 6-byte encoding for bitmap data, but this data is
+ * broken up between 3 normal packets. Use priv->multi_packet to
+ * track our position in the bitmap packet.
+ */
+ if (packet[6] & 0x40) {
+ /* sync, reset position */
+ priv->multi_packet = 0;
+ }
+
+ if (WARN_ON_ONCE(priv->multi_packet > 2))
+ return;
+
+ offset = 2 * priv->multi_packet;
+ priv->multi_data[offset] = packet[6];
+ priv->multi_data[offset + 1] = packet[7];
+
+ if (++priv->multi_packet > 2) {
+ priv->multi_packet = 0;
+
+ x_bitmap = ((priv->multi_data[2] & 0x1f) << 10) |
+ ((priv->multi_data[3] & 0x60) << 3) |
+ ((priv->multi_data[0] & 0x3f) << 2) |
+ ((priv->multi_data[1] & 0x60) >> 5);
+ y_bitmap = ((priv->multi_data[5] & 0x01) << 10) |
+ ((priv->multi_data[3] & 0x1f) << 5) |
+ (priv->multi_data[1] & 0x1f);
+
+ fingers = alps_process_bitmap(x_bitmap, y_bitmap,
+ &x1, &y1, &x2, &y2);
+
+ /* Store MT data.*/
+ priv->fingers = fingers;
+ priv->x1 = x1;
+ priv->x2 = x2;
+ priv->y1 = y1;
+ priv->y2 = y2;
+ }
left = packet[4] & 0x01;
right = packet[4] & 0x02;
@@ -617,21 +658,41 @@ static void alps_process_packet_v4(struct psmouse *psmouse)
y = ((packet[2] & 0x7f) << 4) | (packet[3] & 0x0f);
z = packet[5] & 0x7f;
+ /*
+ * If there were no contacts in the bitmap, use ST
+ * points in MT reports.
+ * If there were two contacts or more, report MT data.
+ */
+ if (priv->fingers < 2) {
+ x1 = x;
+ y1 = y;
+ fingers = z > 0 ? 1 : 0;
+ } else {
+ fingers = priv->fingers;
+ x1 = priv->x1;
+ x2 = priv->x2;
+ y1 = priv->y1;
+ y2 = priv->y2;
+ }
+
if (z >= 64)
input_report_key(dev, BTN_TOUCH, 1);
else
input_report_key(dev, BTN_TOUCH, 0);
+ alps_report_semi_mt_data(dev, fingers, x1, y1, x2, y2);
+
+ input_mt_report_finger_count(dev, fingers);
+
+ input_report_key(dev, BTN_LEFT, left);
+ input_report_key(dev, BTN_RIGHT, right);
+
if (z > 0) {
input_report_abs(dev, ABS_X, x);
input_report_abs(dev, ABS_Y, y);
}
input_report_abs(dev, ABS_PRESSURE, z);
- input_report_key(dev, BTN_TOOL_FINGER, z > 0);
- input_report_key(dev, BTN_LEFT, left);
- input_report_key(dev, BTN_RIGHT, right);
-
input_sync(dev);
}
@@ -1557,6 +1618,7 @@ int alps_init(struct psmouse *psmouse)
input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
break;
case ALPS_PROTO_V3:
+ case ALPS_PROTO_V4:
set_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
input_mt_init_slots(dev1, 2);
input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, ALPS_V3_X_MAX, 0, 0);
@@ -1565,8 +1627,7 @@ int alps_init(struct psmouse *psmouse)
set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit);
set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
- /* fall through */
- case ALPS_PROTO_V4:
+
input_set_abs_params(dev1, ABS_X, 0, ALPS_V3_X_MAX, 0, 0);
input_set_abs_params(dev1, ABS_Y, 0, ALPS_V3_Y_MAX, 0, 0);
break;
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index a00a4ab92a0f..ae1ac354c778 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -39,6 +39,8 @@ struct alps_data {
int prev_fin; /* Finger bit from previous packet */
int multi_packet; /* Multi-packet data in progress */
unsigned char multi_data[6]; /* Saved multi-packet data */
+ int x1, x2, y1, y2; /* Coordinates from last MT report */
+ int fingers; /* Number of fingers from MT report */
u8 quirks;
struct timer_list timer;
};
diff --git a/drivers/input/mouse/navpoint.c b/drivers/input/mouse/navpoint.c
new file mode 100644
index 000000000000..c29ae7654d5e
--- /dev/null
+++ b/drivers/input/mouse/navpoint.c
@@ -0,0 +1,369 @@
+/*
+ * Synaptics NavPoint (PXA27x SSP/SPI) driver.
+ *
+ * Copyright (C) 2012 Paul Parsons <lost.distance@yahoo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/input/navpoint.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/pxa2xx_ssp.h>
+#include <linux/slab.h>
+
+/*
+ * Synaptics Modular Embedded Protocol: Module Packet Format.
+ * Module header byte 2:0 = Length (# bytes that follow)
+ * Module header byte 4:3 = Control
+ * Module header byte 7:5 = Module Address
+ */
+#define HEADER_LENGTH(byte) ((byte) & 0x07)
+#define HEADER_CONTROL(byte) (((byte) >> 3) & 0x03)
+#define HEADER_ADDRESS(byte) ((byte) >> 5)
+
+struct navpoint {
+ struct ssp_device *ssp;
+ struct input_dev *input;
+ struct device *dev;
+ int gpio;
+ int index;
+ u8 data[1 + HEADER_LENGTH(0xff)];
+};
+
+/*
+ * Initialization values for SSCR0_x, SSCR1_x, SSSR_x.
+ */
+static const u32 sscr0 = 0
+ | SSCR0_TUM /* TIM = 1; No TUR interrupts */
+ | SSCR0_RIM /* RIM = 1; No ROR interrupts */
+ | SSCR0_SSE /* SSE = 1; SSP enabled */
+ | SSCR0_Motorola /* FRF = 0; Motorola SPI */
+ | SSCR0_DataSize(16) /* DSS = 15; Data size = 16-bit */
+ ;
+static const u32 sscr1 = 0
+ | SSCR1_SCFR /* SCFR = 1; SSPSCLK only during transfers */
+ | SSCR1_SCLKDIR /* SCLKDIR = 1; Slave mode */
+ | SSCR1_SFRMDIR /* SFRMDIR = 1; Slave mode */
+ | SSCR1_RWOT /* RWOT = 1; Receive without transmit mode */
+ | SSCR1_RxTresh(1) /* RFT = 0; Receive FIFO threshold = 1 */
+ | SSCR1_SPH /* SPH = 1; SSPSCLK inactive 0.5 + 1 cycles */
+ | SSCR1_RIE /* RIE = 1; Receive FIFO interrupt enabled */
+ ;
+static const u32 sssr = 0
+ | SSSR_BCE /* BCE = 1; Clear BCE */
+ | SSSR_TUR /* TUR = 1; Clear TUR */
+ | SSSR_EOC /* EOC = 1; Clear EOC */
+ | SSSR_TINT /* TINT = 1; Clear TINT */
+ | SSSR_PINT /* PINT = 1; Clear PINT */
+ | SSSR_ROR /* ROR = 1; Clear ROR */
+ ;
+
+/*
+ * MEP Query $22: Touchpad Coordinate Range Query is not supported by
+ * the NavPoint module, so sampled values provide the default limits.
+ */
+#define NAVPOINT_X_MIN 1278
+#define NAVPOINT_X_MAX 5340
+#define NAVPOINT_Y_MIN 1572
+#define NAVPOINT_Y_MAX 4396
+#define NAVPOINT_PRESSURE_MIN 0
+#define NAVPOINT_PRESSURE_MAX 255
+
+static void navpoint_packet(struct navpoint *navpoint)
+{
+ int finger;
+ int gesture;
+ int x, y, z;
+
+ switch (navpoint->data[0]) {
+ case 0xff: /* Garbage (packet?) between reset and Hello packet */
+ case 0x00: /* Module 0, NULL packet */
+ break;
+
+ case 0x0e: /* Module 0, Absolute packet */
+ finger = (navpoint->data[1] & 0x01);
+ gesture = (navpoint->data[1] & 0x02);
+ x = ((navpoint->data[2] & 0x1f) << 8) | navpoint->data[3];
+ y = ((navpoint->data[4] & 0x1f) << 8) | navpoint->data[5];
+ z = navpoint->data[6];
+ input_report_key(navpoint->input, BTN_TOUCH, finger);
+ input_report_abs(navpoint->input, ABS_X, x);
+ input_report_abs(navpoint->input, ABS_Y, y);
+ input_report_abs(navpoint->input, ABS_PRESSURE, z);
+ input_report_key(navpoint->input, BTN_TOOL_FINGER, finger);
+ input_report_key(navpoint->input, BTN_LEFT, gesture);
+ input_sync(navpoint->input);
+ break;
+
+ case 0x19: /* Module 0, Hello packet */
+ if ((navpoint->data[1] & 0xf0) == 0x10)
+ break;
+ /* FALLTHROUGH */
+ default:
+ dev_warn(navpoint->dev,
+ "spurious packet: data=0x%02x,0x%02x,...\n",
+ navpoint->data[0], navpoint->data[1]);
+ break;
+ }
+}
+
+static irqreturn_t navpoint_irq(int irq, void *dev_id)
+{
+ struct navpoint *navpoint = dev_id;
+ struct ssp_device *ssp = navpoint->ssp;
+ irqreturn_t ret = IRQ_NONE;
+ u32 status;
+
+ status = pxa_ssp_read_reg(ssp, SSSR);
+ if (status & sssr) {
+ dev_warn(navpoint->dev,
+ "unexpected interrupt: status=0x%08x\n", status);
+ pxa_ssp_write_reg(ssp, SSSR, (status & sssr));
+ ret = IRQ_HANDLED;
+ }
+
+ while (status & SSSR_RNE) {
+ u32 data;
+
+ data = pxa_ssp_read_reg(ssp, SSDR);
+ navpoint->data[navpoint->index + 0] = (data >> 8);
+ navpoint->data[navpoint->index + 1] = data;
+ navpoint->index += 2;
+ if (HEADER_LENGTH(navpoint->data[0]) < navpoint->index) {
+ navpoint_packet(navpoint);
+ navpoint->index = 0;
+ }
+ status = pxa_ssp_read_reg(ssp, SSSR);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static void navpoint_up(struct navpoint *navpoint)
+{
+ struct ssp_device *ssp = navpoint->ssp;
+ int timeout;
+
+ clk_prepare_enable(ssp->clk);
+
+ pxa_ssp_write_reg(ssp, SSCR1, sscr1);
+ pxa_ssp_write_reg(ssp, SSSR, sssr);
+ pxa_ssp_write_reg(ssp, SSTO, 0);
+ pxa_ssp_write_reg(ssp, SSCR0, sscr0); /* SSCR0_SSE written last */
+
+ /* Wait until SSP port is ready for slave clock operations */
+ for (timeout = 100; timeout != 0; --timeout) {
+ if (!(pxa_ssp_read_reg(ssp, SSSR) & SSSR_CSS))
+ break;
+ msleep(1);
+ }
+
+ if (timeout == 0)
+ dev_err(navpoint->dev,
+ "timeout waiting for SSSR[CSS] to clear\n");
+
+ if (gpio_is_valid(navpoint->gpio))
+ gpio_set_value(navpoint->gpio, 1);
+}
+
+static void navpoint_down(struct navpoint *navpoint)
+{
+ struct ssp_device *ssp = navpoint->ssp;
+
+ if (gpio_is_valid(navpoint->gpio))
+ gpio_set_value(navpoint->gpio, 0);
+
+ pxa_ssp_write_reg(ssp, SSCR0, 0);
+
+ clk_disable_unprepare(ssp->clk);
+}
+
+static int navpoint_open(struct input_dev *input)
+{
+ struct navpoint *navpoint = input_get_drvdata(input);
+
+ navpoint_up(navpoint);
+
+ return 0;
+}
+
+static void navpoint_close(struct input_dev *input)
+{
+ struct navpoint *navpoint = input_get_drvdata(input);
+
+ navpoint_down(navpoint);
+}
+
+static int __devinit navpoint_probe(struct platform_device *pdev)
+{
+ const struct navpoint_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+ struct ssp_device *ssp;
+ struct input_dev *input;
+ struct navpoint *navpoint;
+ int error;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ if (gpio_is_valid(pdata->gpio)) {
+ error = gpio_request_one(pdata->gpio, GPIOF_OUT_INIT_LOW,
+ "SYNAPTICS_ON");
+ if (error)
+ return error;
+ }
+
+ ssp = pxa_ssp_request(pdata->port, pdev->name);
+ if (!ssp) {
+ error = -ENODEV;
+ goto err_free_gpio;
+ }
+
+ /* HaRET does not disable devices before jumping into Linux */
+ if (pxa_ssp_read_reg(ssp, SSCR0) & SSCR0_SSE) {
+ pxa_ssp_write_reg(ssp, SSCR0, 0);
+ dev_warn(&pdev->dev, "ssp%d already enabled\n", pdata->port);
+ }
+
+ navpoint = kzalloc(sizeof(*navpoint), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!navpoint || !input) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ navpoint->ssp = ssp;
+ navpoint->input = input;
+ navpoint->dev = &pdev->dev;
+ navpoint->gpio = pdata->gpio;
+
+ input->name = pdev->name;
+ input->dev.parent = &pdev->dev;
+
+ __set_bit(EV_KEY, input->evbit);
+ __set_bit(EV_ABS, input->evbit);
+ __set_bit(BTN_LEFT, input->keybit);
+ __set_bit(BTN_TOUCH, input->keybit);
+ __set_bit(BTN_TOOL_FINGER, input->keybit);
+
+ input_set_abs_params(input, ABS_X,
+ NAVPOINT_X_MIN, NAVPOINT_X_MAX, 0, 0);
+ input_set_abs_params(input, ABS_Y,
+ NAVPOINT_Y_MIN, NAVPOINT_Y_MAX, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE,
+ NAVPOINT_PRESSURE_MIN, NAVPOINT_PRESSURE_MAX,
+ 0, 0);
+
+ input->open = navpoint_open;
+ input->close = navpoint_close;
+
+ input_set_drvdata(input, navpoint);
+
+ error = request_irq(ssp->irq, navpoint_irq, 0, pdev->name, navpoint);
+ if (error)
+ goto err_free_mem;
+
+ error = input_register_device(input);
+ if (error)
+ goto err_free_irq;
+
+ platform_set_drvdata(pdev, navpoint);
+ dev_dbg(&pdev->dev, "ssp%d, irq %d\n", pdata->port, ssp->irq);
+
+ return 0;
+
+err_free_irq:
+ free_irq(ssp->irq, &pdev->dev);
+err_free_mem:
+ input_free_device(input);
+ kfree(navpoint);
+ pxa_ssp_free(ssp);
+err_free_gpio:
+ if (gpio_is_valid(pdata->gpio))
+ gpio_free(pdata->gpio);
+
+ return error;
+}
+
+static int __devexit navpoint_remove(struct platform_device *pdev)
+{
+ const struct navpoint_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+ struct navpoint *navpoint = platform_get_drvdata(pdev);
+ struct ssp_device *ssp = navpoint->ssp;
+
+ free_irq(ssp->irq, navpoint);
+
+ input_unregister_device(navpoint->input);
+ kfree(navpoint);
+
+ pxa_ssp_free(ssp);
+
+ if (gpio_is_valid(pdata->gpio))
+ gpio_free(pdata->gpio);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int navpoint_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct navpoint *navpoint = platform_get_drvdata(pdev);
+ struct input_dev *input = navpoint->input;
+
+ mutex_lock(&input->mutex);
+ if (input->users)
+ navpoint_down(navpoint);
+ mutex_unlock(&input->mutex);
+
+ return 0;
+}
+
+static int navpoint_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct navpoint *navpoint = platform_get_drvdata(pdev);
+ struct input_dev *input = navpoint->input;
+
+ mutex_lock(&input->mutex);
+ if (input->users)
+ navpoint_up(navpoint);
+ mutex_unlock(&input->mutex);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(navpoint_pm_ops, navpoint_suspend, navpoint_resume);
+
+static struct platform_driver navpoint_driver = {
+ .probe = navpoint_probe,
+ .remove = __devexit_p(navpoint_remove),
+ .driver = {
+ .name = "navpoint",
+ .owner = THIS_MODULE,
+ .pm = &navpoint_pm_ops,
+ },
+};
+
+module_platform_driver(navpoint_driver);
+
+MODULE_AUTHOR("Paul Parsons <lost.distance@yahoo.com>");
+MODULE_DESCRIPTION("Synaptics NavPoint (PXA27x SSP/SPI) driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:navpoint");
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index 661a0ca3b3d6..3f5649f19082 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -41,7 +41,7 @@
#define GET_ABS_Y(packet) ((packet[2] << 2) | (packet[3] & 0x03))
/** Driver version. */
-static const char fsp_drv_ver[] = "1.0.0-K";
+static const char fsp_drv_ver[] = "1.1.0-K";
/*
* Make sure that the value being sent to FSP will not conflict with
@@ -303,6 +303,27 @@ static int fsp_get_revision(struct psmouse *psmouse, int *rev)
return 0;
}
+static int fsp_get_sn(struct psmouse *psmouse, int *sn)
+{
+ int v0, v1, v2;
+ int rc = -EIO;
+
+ /* production number since Cx is available at: 0x0b40 ~ 0x0b42 */
+ if (fsp_page_reg_write(psmouse, FSP_PAGE_0B))
+ goto out;
+ if (fsp_reg_read(psmouse, FSP_REG_SN0, &v0))
+ goto out;
+ if (fsp_reg_read(psmouse, FSP_REG_SN1, &v1))
+ goto out;
+ if (fsp_reg_read(psmouse, FSP_REG_SN2, &v2))
+ goto out;
+ *sn = (v0 << 16) | (v1 << 8) | v2;
+ rc = 0;
+out:
+ fsp_page_reg_write(psmouse, FSP_PAGE_DEFAULT);
+ return rc;
+}
+
static int fsp_get_buttons(struct psmouse *psmouse, int *btn)
{
static const int buttons[] = {
@@ -1000,16 +1021,21 @@ static int fsp_reconnect(struct psmouse *psmouse)
int fsp_init(struct psmouse *psmouse)
{
struct fsp_data *priv;
- int ver, rev;
+ int ver, rev, sn = 0;
int error;
if (fsp_get_version(psmouse, &ver) ||
fsp_get_revision(psmouse, &rev)) {
return -ENODEV;
}
+ if (ver >= FSP_VER_STL3888_C0) {
+ /* firmware information is only available since C0 */
+ fsp_get_sn(psmouse, &sn);
+ }
- psmouse_info(psmouse, "Finger Sensing Pad, hw: %d.%d.%d, sw: %s\n",
- ver >> 4, ver & 0x0F, rev, fsp_drv_ver);
+ psmouse_info(psmouse,
+ "Finger Sensing Pad, hw: %d.%d.%d, sn: %x, sw: %s\n",
+ ver >> 4, ver & 0x0F, rev, sn, fsp_drv_ver);
psmouse->private = priv = kzalloc(sizeof(struct fsp_data), GFP_KERNEL);
if (!priv)
diff --git a/drivers/input/mouse/sentelic.h b/drivers/input/mouse/sentelic.h
index 334de19e5ddb..aa697ece405b 100644
--- a/drivers/input/mouse/sentelic.h
+++ b/drivers/input/mouse/sentelic.h
@@ -65,6 +65,14 @@
#define FSP_BIT_SWC1_GST_GRP1 BIT(6)
#define FSP_BIT_SWC1_BX_COMPAT BIT(7)
+#define FSP_PAGE_0B (0x0b)
+#define FSP_PAGE_82 (0x82)
+#define FSP_PAGE_DEFAULT FSP_PAGE_82
+
+#define FSP_REG_SN0 (0x40)
+#define FSP_REG_SN1 (0x41)
+#define FSP_REG_SN2 (0x42)
+
/* Finger-sensing Pad packet formating related definitions */
/* absolute packet type */
diff --git a/drivers/input/mouse/sermouse.c b/drivers/input/mouse/sermouse.c
index 17ff137b9bd5..d5928fd0c914 100644
--- a/drivers/input/mouse/sermouse.c
+++ b/drivers/input/mouse/sermouse.c
@@ -355,15 +355,4 @@ static struct serio_driver sermouse_drv = {
.disconnect = sermouse_disconnect,
};
-static int __init sermouse_init(void)
-{
- return serio_register_driver(&sermouse_drv);
-}
-
-static void __exit sermouse_exit(void)
-{
- serio_unregister_driver(&sermouse_drv);
-}
-
-module_init(sermouse_init);
-module_exit(sermouse_exit);
+module_serio_driver(sermouse_drv);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index a4b14a41cbf4..c703d53be3a0 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -45,16 +45,6 @@
#define YMIN_NOMINAL 1408
#define YMAX_NOMINAL 4448
-/*
- * Synaptics touchpads report the y coordinate from bottom to top, which is
- * opposite from what userspace expects.
- * This function is used to invert y before reporting.
- */
-static int synaptics_invert_y(int y)
-{
- return YMAX_NOMINAL + YMIN_NOMINAL - y;
-}
-
/*****************************************************************************
* Stuff we need even when we do not want native Synaptics support
@@ -112,6 +102,16 @@ void synaptics_reset(struct psmouse *psmouse)
****************************************************************************/
/*
+ * Synaptics touchpads report the y coordinate from bottom to top, which is
+ * opposite from what userspace expects.
+ * This function is used to invert y before reporting.
+ */
+static int synaptics_invert_y(int y)
+{
+ return YMAX_NOMINAL + YMIN_NOMINAL - y;
+}
+
+/*
* Send a command to the synpatics touchpad by special commands
*/
static int synaptics_send_cmd(struct psmouse *psmouse, unsigned char c, unsigned char *param)
diff --git a/drivers/input/mouse/vsxxxaa.c b/drivers/input/mouse/vsxxxaa.c
index eb9a3cfbeefa..e900d465aaf6 100644
--- a/drivers/input/mouse/vsxxxaa.c
+++ b/drivers/input/mouse/vsxxxaa.c
@@ -548,16 +548,4 @@ static struct serio_driver vsxxxaa_drv = {
.disconnect = vsxxxaa_disconnect,
};
-static int __init vsxxxaa_init(void)
-{
- return serio_register_driver(&vsxxxaa_drv);
-}
-
-static void __exit vsxxxaa_exit(void)
-{
- serio_unregister_driver(&vsxxxaa_drv);
-}
-
-module_init(vsxxxaa_init);
-module_exit(vsxxxaa_exit);
-
+module_serio_driver(vsxxxaa_drv);
diff --git a/drivers/input/of_keymap.c b/drivers/input/of_keymap.c
deleted file mode 100644
index 061493d57682..000000000000
--- a/drivers/input/of_keymap.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Helpers for open firmware matrix keyboard bindings
- *
- * Copyright (C) 2012 Google, Inc
- *
- * Author:
- * Olof Johansson <olof@lixom.net>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/input.h>
-#include <linux/of.h>
-#include <linux/input/matrix_keypad.h>
-#include <linux/export.h>
-#include <linux/gfp.h>
-#include <linux/slab.h>
-
-struct matrix_keymap_data *
-matrix_keyboard_of_fill_keymap(struct device_node *np,
- const char *propname)
-{
- struct matrix_keymap_data *kd;
- u32 *keymap;
- int proplen, i;
- const __be32 *prop;
-
- if (!np)
- return NULL;
-
- if (!propname)
- propname = "linux,keymap";
-
- prop = of_get_property(np, propname, &proplen);
- if (!prop)
- return NULL;
-
- if (proplen % sizeof(u32)) {
- pr_warn("Malformed keymap property %s in %s\n",
- propname, np->full_name);
- return NULL;
- }
-
- kd = kzalloc(sizeof(*kd), GFP_KERNEL);
- if (!kd)
- return NULL;
-
- kd->keymap = keymap = kzalloc(proplen, GFP_KERNEL);
- if (!kd->keymap) {
- kfree(kd);
- return NULL;
- }
-
- kd->keymap_size = proplen / sizeof(u32);
-
- for (i = 0; i < kd->keymap_size; i++) {
- u32 tmp = be32_to_cpup(prop + i);
- int key_code, row, col;
-
- row = (tmp >> 24) & 0xff;
- col = (tmp >> 16) & 0xff;
- key_code = tmp & 0xffff;
- keymap[i] = KEY(row, col, key_code);
- }
-
- return kd;
-}
-EXPORT_SYMBOL_GPL(matrix_keyboard_of_fill_keymap);
-
-void matrix_keyboard_of_free_keymap(const struct matrix_keymap_data *kd)
-{
- if (kd) {
- kfree(kd->keymap);
- kfree(kd);
- }
-}
-EXPORT_SYMBOL_GPL(matrix_keyboard_of_free_keymap);
diff --git a/drivers/input/serio/pcips2.c b/drivers/input/serio/pcips2.c
index 43494742541c..0c42497aaaf4 100644
--- a/drivers/input/serio/pcips2.c
+++ b/drivers/input/serio/pcips2.c
@@ -206,6 +206,7 @@ static const struct pci_device_id pcips2_ids[] = {
},
{ 0, }
};
+MODULE_DEVICE_TABLE(pci, pcips2_ids);
static struct pci_driver pcips2_driver = {
.name = "pcips2",
@@ -214,20 +215,8 @@ static struct pci_driver pcips2_driver = {
.remove = __devexit_p(pcips2_remove),
};
-static int __init pcips2_init(void)
-{
- return pci_register_driver(&pcips2_driver);
-}
-
-static void __exit pcips2_exit(void)
-{
- pci_unregister_driver(&pcips2_driver);
-}
-
-module_init(pcips2_init);
-module_exit(pcips2_exit);
+module_pci_driver(pcips2_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
MODULE_DESCRIPTION("PCI PS/2 keyboard/mouse driver");
-MODULE_DEVICE_TABLE(pci, pcips2_ids);
diff --git a/drivers/input/serio/ps2mult.c b/drivers/input/serio/ps2mult.c
index 15aa81c9f1fb..a76fb64f03db 100644
--- a/drivers/input/serio/ps2mult.c
+++ b/drivers/input/serio/ps2mult.c
@@ -304,15 +304,4 @@ static struct serio_driver ps2mult_drv = {
.reconnect = ps2mult_reconnect,
};
-static int __init ps2mult_init(void)
-{
- return serio_register_driver(&ps2mult_drv);
-}
-
-static void __exit ps2mult_exit(void)
-{
- serio_unregister_driver(&ps2mult_drv);
-}
-
-module_init(ps2mult_init);
-module_exit(ps2mult_exit);
+module_serio_driver(ps2mult_drv);
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index 4494233d331a..59df2e7317a3 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -165,31 +165,38 @@ static ssize_t serio_raw_read(struct file *file, char __user *buffer,
struct serio_raw *serio_raw = client->serio_raw;
char uninitialized_var(c);
ssize_t read = 0;
- int retval;
+ int error;
- if (serio_raw->dead)
- return -ENODEV;
+ for (;;) {
+ if (serio_raw->dead)
+ return -ENODEV;
- if (serio_raw->head == serio_raw->tail && (file->f_flags & O_NONBLOCK))
- return -EAGAIN;
+ if (serio_raw->head == serio_raw->tail &&
+ (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
- retval = wait_event_interruptible(serio_raw->wait,
- serio_raw->head != serio_raw->tail || serio_raw->dead);
- if (retval)
- return retval;
+ if (count == 0)
+ break;
- if (serio_raw->dead)
- return -ENODEV;
+ while (read < count && serio_raw_fetch_byte(serio_raw, &c)) {
+ if (put_user(c, buffer++))
+ return -EFAULT;
+ read++;
+ }
- while (read < count && serio_raw_fetch_byte(serio_raw, &c)) {
- if (put_user(c, buffer++)) {
- retval = -EFAULT;
+ if (read)
break;
+
+ if (!(file->f_flags & O_NONBLOCK)) {
+ error = wait_event_interruptible(serio_raw->wait,
+ serio_raw->head != serio_raw->tail ||
+ serio_raw->dead);
+ if (error)
+ return error;
}
- read++;
}
- return read ?: retval;
+ return read;
}
static ssize_t serio_raw_write(struct file *file, const char __user *buffer,
@@ -197,8 +204,7 @@ static ssize_t serio_raw_write(struct file *file, const char __user *buffer,
{
struct serio_raw_client *client = file->private_data;
struct serio_raw *serio_raw = client->serio_raw;
- ssize_t written = 0;
- int retval;
+ int retval = 0;
unsigned char c;
retval = mutex_lock_interruptible(&serio_raw_mutex);
@@ -218,16 +224,20 @@ static ssize_t serio_raw_write(struct file *file, const char __user *buffer,
retval = -EFAULT;
goto out;
}
+
if (serio_write(serio_raw->serio, c)) {
- retval = -EIO;
+ /* Either signal error or partial write */
+ if (retval == 0)
+ retval = -EIO;
goto out;
}
- written++;
+
+ retval++;
}
out:
mutex_unlock(&serio_raw_mutex);
- return written ?: retval;
+ return retval;
}
static unsigned int serio_raw_poll(struct file *file, poll_table *wait)
@@ -432,15 +442,4 @@ static struct serio_driver serio_raw_drv = {
.manual_bind = true,
};
-static int __init serio_raw_init(void)
-{
- return serio_register_driver(&serio_raw_drv);
-}
-
-static void __exit serio_raw_exit(void)
-{
- serio_unregister_driver(&serio_raw_drv);
-}
-
-module_init(serio_raw_init);
-module_exit(serio_raw_exit);
+module_serio_driver(serio_raw_drv);
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index d96d4c2a76a9..1e983bec7d86 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -73,7 +73,8 @@ struct xps2data {
spinlock_t lock;
void __iomem *base_address; /* virt. address of control registers */
unsigned int flags;
- struct serio serio; /* serio */
+ struct serio *serio; /* serio */
+ struct device *dev;
};
/************************************/
@@ -119,7 +120,7 @@ static irqreturn_t xps2_interrupt(int irq, void *dev_id)
/* Check which interrupt is active */
if (intr_sr & XPS2_IPIXR_RX_OVF)
- dev_warn(drvdata->serio.dev.parent, "receive overrun error\n");
+ dev_warn(drvdata->dev, "receive overrun error\n");
if (intr_sr & XPS2_IPIXR_RX_ERR)
drvdata->flags |= SERIO_PARITY;
@@ -132,10 +133,10 @@ static irqreturn_t xps2_interrupt(int irq, void *dev_id)
/* Error, if a byte is not received */
if (status) {
- dev_err(drvdata->serio.dev.parent,
+ dev_err(drvdata->dev,
"wrong rcvd byte count (%d)\n", status);
} else {
- serio_interrupt(&drvdata->serio, c, drvdata->flags);
+ serio_interrupt(drvdata->serio, c, drvdata->flags);
drvdata->flags = 0;
}
}
@@ -193,7 +194,7 @@ static int sxps2_open(struct serio *pserio)
error = request_irq(drvdata->irq, &xps2_interrupt, 0,
DRIVER_NAME, drvdata);
if (error) {
- dev_err(drvdata->serio.dev.parent,
+ dev_err(drvdata->dev,
"Couldn't allocate interrupt %d\n", drvdata->irq);
return error;
}
@@ -259,15 +260,16 @@ static int __devinit xps2_of_probe(struct platform_device *ofdev)
}
drvdata = kzalloc(sizeof(struct xps2data), GFP_KERNEL);
- if (!drvdata) {
- dev_err(dev, "Couldn't allocate device private record\n");
- return -ENOMEM;
+ serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!drvdata || !serio) {
+ error = -ENOMEM;
+ goto failed1;
}
- dev_set_drvdata(dev, drvdata);
-
spin_lock_init(&drvdata->lock);
drvdata->irq = r_irq.start;
+ drvdata->serio = serio;
+ drvdata->dev = dev;
phys_addr = r_mem.start;
remap_size = resource_size(&r_mem);
@@ -298,7 +300,6 @@ static int __devinit xps2_of_probe(struct platform_device *ofdev)
(unsigned long long)phys_addr, drvdata->base_address,
drvdata->irq);
- serio = &drvdata->serio;
serio->id.type = SERIO_8042;
serio->write = sxps2_write;
serio->open = sxps2_open;
@@ -312,13 +313,14 @@ static int __devinit xps2_of_probe(struct platform_device *ofdev)
serio_register_port(serio);
+ platform_set_drvdata(ofdev, drvdata);
return 0; /* success */
failed2:
release_mem_region(phys_addr, remap_size);
failed1:
+ kfree(serio);
kfree(drvdata);
- dev_set_drvdata(dev, NULL);
return error;
}
@@ -333,22 +335,21 @@ failed1:
*/
static int __devexit xps2_of_remove(struct platform_device *of_dev)
{
- struct device *dev = &of_dev->dev;
- struct xps2data *drvdata = dev_get_drvdata(dev);
+ struct xps2data *drvdata = platform_get_drvdata(of_dev);
struct resource r_mem; /* IO mem resources */
- serio_unregister_port(&drvdata->serio);
+ serio_unregister_port(drvdata->serio);
iounmap(drvdata->base_address);
/* Get iospace of the device */
if (of_address_to_resource(of_dev->dev.of_node, 0, &r_mem))
- dev_err(dev, "invalid address\n");
+ dev_err(drvdata->dev, "invalid address\n");
else
release_mem_region(r_mem.start, resource_size(&r_mem));
kfree(drvdata);
- dev_set_drvdata(dev, NULL);
+ platform_set_drvdata(of_dev, NULL);
return 0;
}
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 755a39e4c9e9..ee83c3904ee8 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1862,7 +1862,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (i == ARRAY_SIZE(speeds)) {
dev_info(&intf->dev,
"Aiptek tried all speeds, no sane response\n");
- goto fail2;
+ goto fail3;
}
/* Associate this driver's struct with the usb interface.
diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
index b4842d0e61dd..b79d45198d82 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -135,6 +135,6 @@ extern const struct usb_device_id wacom_ids[];
void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
void wacom_setup_device_quirks(struct wacom_features *features);
-void wacom_setup_input_capabilities(struct input_dev *input_dev,
- struct wacom_wac *wacom_wac);
+int wacom_setup_input_capabilities(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac);
#endif
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 79a0509882d4..cad5602d3ce4 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -28,6 +28,7 @@
#define HID_USAGE_Y_TILT 0x3e
#define HID_USAGE_FINGER 0x22
#define HID_USAGE_STYLUS 0x20
+#define HID_USAGE_CONTACTMAX 0x55
#define HID_COLLECTION 0xa1
#define HID_COLLECTION_LOGICAL 0x02
#define HID_COLLECTION_END 0xc0
@@ -204,6 +205,27 @@ static int wacom_parse_logical_collection(unsigned char *report,
return length;
}
+static void wacom_retrieve_report_data(struct usb_interface *intf,
+ struct wacom_features *features)
+{
+ int result = 0;
+ unsigned char *rep_data;
+
+ rep_data = kmalloc(2, GFP_KERNEL);
+ if (rep_data) {
+
+ rep_data[0] = 12;
+ result = wacom_get_report(intf, WAC_HID_FEATURE_REPORT,
+ rep_data[0], &rep_data, 2,
+ WAC_MSG_RETRIES);
+
+ if (result >= 0 && rep_data[1] > 2)
+ features->touch_max = rep_data[1];
+
+ kfree(rep_data);
+ }
+}
+
/*
* Interface Descriptor of wacom devices can be incomplete and
* inconsistent so wacom_features table is used to store stylus
@@ -236,6 +258,9 @@ static int wacom_parse_logical_collection(unsigned char *report,
* 3rd gen Bamboo Touch no longer define a Digitizer-Finger Pysical
* Collection. Instead they define a Logical Collection with a single
* Logical Maximum for both X and Y.
+ *
+ * Intuos5 touch interface does not contain useful data. We deal with
+ * this after returning from this function.
*/
static int wacom_parse_hid(struct usb_interface *intf,
struct hid_descriptor *hid_desc,
@@ -295,6 +320,10 @@ static int wacom_parse_hid(struct usb_interface *intf,
/* need to reset back */
features->pktlen = WACOM_PKGLEN_TPC2FG;
}
+
+ if (features->type == MTSCREEN)
+ features->pktlen = WACOM_PKGLEN_MTOUCH;
+
if (features->type == BAMBOO_PT) {
/* need to reset back */
features->pktlen = WACOM_PKGLEN_BBTOUCH;
@@ -327,18 +356,15 @@ static int wacom_parse_hid(struct usb_interface *intf,
case HID_USAGE_Y:
if (usage == WCM_DESKTOP) {
if (finger) {
- features->device_type = BTN_TOOL_FINGER;
- if (features->type == TABLETPC2FG) {
- /* need to reset back */
- features->pktlen = WACOM_PKGLEN_TPC2FG;
+ int type = features->type;
+
+ if (type == TABLETPC2FG || type == MTSCREEN) {
features->y_max =
get_unaligned_le16(&report[i + 3]);
features->y_phy =
get_unaligned_le16(&report[i + 6]);
i += 7;
- } else if (features->type == BAMBOO_PT) {
- /* need to reset back */
- features->pktlen = WACOM_PKGLEN_BBTOUCH;
+ } else if (type == BAMBOO_PT) {
features->y_phy =
get_unaligned_le16(&report[i + 3]);
features->y_max =
@@ -352,10 +378,6 @@ static int wacom_parse_hid(struct usb_interface *intf,
i += 4;
}
} else if (pen) {
- /* penabled only accepts exact bytes of data */
- if (features->type == TABLETPC2FG)
- features->pktlen = WACOM_PKGLEN_GRAPHIRE;
- features->device_type = BTN_TOOL_PEN;
features->y_max =
get_unaligned_le16(&report[i + 3]);
i += 4;
@@ -377,6 +399,11 @@ static int wacom_parse_hid(struct usb_interface *intf,
pen = 1;
i++;
break;
+
+ case HID_USAGE_CONTACTMAX:
+ wacom_retrieve_report_data(intf, features);
+ i++;
+ break;
}
break;
@@ -413,22 +440,29 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
if (!rep_data)
return error;
- /* ask to report tablet data if it is MT Tablet PC or
- * not a Tablet PC */
- if (features->type == TABLETPC2FG) {
- do {
- rep_data[0] = 3;
- rep_data[1] = 4;
- rep_data[2] = 0;
- rep_data[3] = 0;
- report_id = 3;
- error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT,
- report_id, rep_data, 4, 1);
- if (error >= 0)
- error = wacom_get_report(intf,
- WAC_HID_FEATURE_REPORT,
- report_id, rep_data, 4, 1);
- } while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES);
+ /* ask to report Wacom data */
+ if (features->device_type == BTN_TOOL_FINGER) {
+ /* if it is an MT Tablet PC touch */
+ if (features->type == TABLETPC2FG ||
+ features->type == MTSCREEN) {
+ do {
+ rep_data[0] = 3;
+ rep_data[1] = 4;
+ rep_data[2] = 0;
+ rep_data[3] = 0;
+ report_id = 3;
+ error = wacom_set_report(intf,
+ WAC_HID_FEATURE_REPORT,
+ report_id,
+ rep_data, 4, 1);
+ if (error >= 0)
+ error = wacom_get_report(intf,
+ WAC_HID_FEATURE_REPORT,
+ report_id,
+ rep_data, 4, 1);
+ } while ((error < 0 || rep_data[1] != 4) &&
+ limit++ < WAC_MSG_RETRIES);
+ }
} else if (features->type != TABLETPC &&
features->type != WIRELESS &&
features->device_type == BTN_TOOL_PEN) {
@@ -450,7 +484,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
}
static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
- struct wacom_features *features)
+ struct wacom_features *features)
{
int error = 0;
struct usb_host_interface *interface = intf->cur_altsetting;
@@ -478,16 +512,21 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
}
}
- /* only Tablet PCs and Bamboo P&T need to retrieve the info */
- if ((features->type != TABLETPC) && (features->type != TABLETPC2FG) &&
- (features->type != BAMBOO_PT))
+ /* only devices that support touch need to retrieve the info */
+ if (features->type != TABLETPC &&
+ features->type != TABLETPC2FG &&
+ features->type != BAMBOO_PT &&
+ features->type != MTSCREEN) {
goto out;
+ }
- if (usb_get_extra_descriptor(interface, HID_DEVICET_HID, &hid_desc)) {
- if (usb_get_extra_descriptor(&interface->endpoint[0],
- HID_DEVICET_REPORT, &hid_desc)) {
- printk("wacom: can not retrieve extra class descriptor\n");
- error = 1;
+ error = usb_get_extra_descriptor(interface, HID_DEVICET_HID, &hid_desc);
+ if (error) {
+ error = usb_get_extra_descriptor(&interface->endpoint[0],
+ HID_DEVICET_REPORT, &hid_desc);
+ if (error) {
+ dev_err(&intf->dev,
+ "can not retrieve extra class descriptor\n");
goto out;
}
}
@@ -577,23 +616,39 @@ static void wacom_remove_shared_data(struct wacom_wac *wacom)
static int wacom_led_control(struct wacom *wacom)
{
unsigned char *buf;
- int retval, led = 0;
+ int retval;
buf = kzalloc(9, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- if (wacom->wacom_wac.features.type == WACOM_21UX2 ||
- wacom->wacom_wac.features.type == WACOM_24HD)
- led = (wacom->led.select[1] << 4) | 0x40;
-
- led |= wacom->led.select[0] | 0x4;
-
- buf[0] = WAC_CMD_LED_CONTROL;
- buf[1] = led;
- buf[2] = wacom->led.llv;
- buf[3] = wacom->led.hlv;
- buf[4] = wacom->led.img_lum;
+ if (wacom->wacom_wac.features.type >= INTUOS5S &&
+ wacom->wacom_wac.features.type <= INTUOS5L) {
+ /*
+ * Touch Ring and crop mark LED luminance may take on
+ * one of four values:
+ * 0 = Low; 1 = Medium; 2 = High; 3 = Off
+ */
+ int ring_led = wacom->led.select[0] & 0x03;
+ int ring_lum = (((wacom->led.llv & 0x60) >> 5) - 1) & 0x03;
+ int crop_lum = 0;
+
+ buf[0] = WAC_CMD_LED_CONTROL;
+ buf[1] = (crop_lum << 4) | (ring_lum << 2) | (ring_led);
+ }
+ else {
+ int led = wacom->led.select[0] | 0x4;
+
+ if (wacom->wacom_wac.features.type == WACOM_21UX2 ||
+ wacom->wacom_wac.features.type == WACOM_24HD)
+ led |= (wacom->led.select[1] << 4) | 0x40;
+
+ buf[0] = WAC_CMD_LED_CONTROL;
+ buf[1] = led;
+ buf[2] = wacom->led.llv;
+ buf[3] = wacom->led.hlv;
+ buf[4] = wacom->led.img_lum;
+ }
retval = wacom_set_report(wacom->intf, 0x03, WAC_CMD_LED_CONTROL,
buf, 9, WAC_CMD_RETRIES);
@@ -786,6 +841,17 @@ static struct attribute_group intuos4_led_attr_group = {
.attrs = intuos4_led_attrs,
};
+static struct attribute *intuos5_led_attrs[] = {
+ &dev_attr_status0_luminance.attr,
+ &dev_attr_status_led0_select.attr,
+ NULL
+};
+
+static struct attribute_group intuos5_led_attr_group = {
+ .name = "wacom_led",
+ .attrs = intuos5_led_attrs,
+};
+
static int wacom_initialize_leds(struct wacom *wacom)
{
int error;
@@ -815,6 +881,19 @@ static int wacom_initialize_leds(struct wacom *wacom)
&cintiq_led_attr_group);
break;
+ case INTUOS5S:
+ case INTUOS5:
+ case INTUOS5L:
+ wacom->led.select[0] = 0;
+ wacom->led.select[1] = 0;
+ wacom->led.llv = 32;
+ wacom->led.hlv = 0;
+ wacom->led.img_lum = 0;
+
+ error = sysfs_create_group(&wacom->intf->dev.kobj,
+ &intuos5_led_attr_group);
+ break;
+
default:
return 0;
}
@@ -843,6 +922,13 @@ static void wacom_destroy_leds(struct wacom *wacom)
sysfs_remove_group(&wacom->intf->dev.kobj,
&cintiq_led_attr_group);
break;
+
+ case INTUOS5S:
+ case INTUOS5:
+ case INTUOS5L:
+ sysfs_remove_group(&wacom->intf->dev.kobj,
+ &intuos5_led_attr_group);
+ break;
}
}
@@ -904,8 +990,10 @@ static int wacom_register_input(struct wacom *wacom)
int error;
input_dev = input_allocate_device();
- if (!input_dev)
- return -ENOMEM;
+ if (!input_dev) {
+ error = -ENOMEM;
+ goto fail1;
+ }
input_dev->name = wacom_wac->name;
input_dev->dev.parent = &intf->dev;
@@ -915,14 +1003,20 @@ static int wacom_register_input(struct wacom *wacom)
input_set_drvdata(input_dev, wacom);
wacom_wac->input = input_dev;
- wacom_setup_input_capabilities(input_dev, wacom_wac);
+ error = wacom_setup_input_capabilities(input_dev, wacom_wac);
+ if (error)
+ goto fail1;
error = input_register_device(input_dev);
- if (error) {
- input_free_device(input_dev);
- wacom_wac->input = NULL;
- }
+ if (error)
+ goto fail2;
+ return 0;
+
+fail2:
+ input_free_device(input_dev);
+ wacom_wac->input = NULL;
+fail1:
return error;
}
@@ -941,22 +1035,22 @@ static void wacom_wireless_work(struct work_struct *work)
wacom = usb_get_intfdata(usbdev->config->interface[1]);
if (wacom->wacom_wac.input)
input_unregister_device(wacom->wacom_wac.input);
- wacom->wacom_wac.input = 0;
+ wacom->wacom_wac.input = NULL;
/* Touch interface */
wacom = usb_get_intfdata(usbdev->config->interface[2]);
if (wacom->wacom_wac.input)
input_unregister_device(wacom->wacom_wac.input);
- wacom->wacom_wac.input = 0;
+ wacom->wacom_wac.input = NULL;
if (wacom_wac->pid == 0) {
- printk(KERN_INFO "wacom: wireless tablet disconnected\n");
+ dev_info(&wacom->intf->dev, "wireless tablet disconnected\n");
} else {
const struct usb_device_id *id = wacom_ids;
- printk(KERN_INFO
- "wacom: wireless tablet connected with PID %x\n",
- wacom_wac->pid);
+ dev_info(&wacom->intf->dev,
+ "wireless tablet connected with PID %x\n",
+ wacom_wac->pid);
while (id->match_flags) {
if (id->idVendor == USB_VENDOR_ID_WACOM &&
@@ -966,8 +1060,8 @@ static void wacom_wireless_work(struct work_struct *work)
}
if (!id->match_flags) {
- printk(KERN_INFO
- "wacom: ignorning unknown PID.\n");
+ dev_info(&wacom->intf->dev,
+ "ignoring unknown PID.\n");
return;
}
@@ -1038,11 +1132,33 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
endpoint = &intf->cur_altsetting->endpoint[0].desc;
- /* Retrieve the physical and logical size for OEM devices */
+ /* Retrieve the physical and logical size for touch devices */
error = wacom_retrieve_hid_descriptor(intf, features);
if (error)
goto fail3;
+ /*
+ * Intuos5 has no useful data about its touch interface in its
+ * HID descriptor. If this is the touch interface (wMaxPacketSize
+ * of WACOM_PKGLEN_BBTOUCH3), override the table values.
+ */
+ if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
+ if (endpoint->wMaxPacketSize == WACOM_PKGLEN_BBTOUCH3) {
+ features->device_type = BTN_TOOL_FINGER;
+ features->pktlen = WACOM_PKGLEN_BBTOUCH3;
+
+ features->x_phy =
+ (features->x_max * 100) / features->x_resolution;
+ features->y_phy =
+ (features->y_max * 100) / features->y_resolution;
+
+ features->x_max = 4096;
+ features->y_max = 4096;
+ } else {
+ features->device_type = BTN_TOOL_PEN;
+ }
+ }
+
wacom_setup_device_quirks(features);
strlcpy(wacom_wac->name, features->name, sizeof(wacom_wac->name));
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index b327790e9a0c..004bc1bb1544 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -61,7 +61,8 @@ static int wacom_penpartner_irq(struct wacom_wac *wacom)
break;
default:
- printk(KERN_INFO "wacom_penpartner_irq: received unknown report #%d\n", data[0]);
+ dev_dbg(input->dev.parent,
+ "%s: received unknown report #%d\n", __func__, data[0]);
return 0;
}
@@ -76,8 +77,8 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
int prox, pressure;
if (data[0] != WACOM_REPORT_PENABLED) {
- dev_dbg(&input->dev,
- "wacom_pl_irq: received unknown report #%d\n", data[0]);
+ dev_dbg(input->dev.parent,
+ "%s: received unknown report #%d\n", __func__, data[0]);
return 0;
}
@@ -147,7 +148,8 @@ static int wacom_ptu_irq(struct wacom_wac *wacom)
struct input_dev *input = wacom->input;
if (data[0] != WACOM_REPORT_PENABLED) {
- printk(KERN_INFO "wacom_ptu_irq: received unknown report #%d\n", data[0]);
+ dev_dbg(input->dev.parent,
+ "%s: received unknown report #%d\n", __func__, data[0]);
return 0;
}
@@ -176,7 +178,8 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
struct input_dev *input = wacom->input;
int prox = data[1] & 0x20, pressure;
- dev_dbg(&input->dev, "wacom_dtu_irq: received report #%d\n", data[0]);
+ dev_dbg(input->dev.parent,
+ "%s: received report #%d", __func__, data[0]);
if (prox) {
/* Going into proximity select tool */
@@ -212,9 +215,8 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
int retval = 0;
if (data[0] != WACOM_REPORT_PENABLED) {
- dev_dbg(&input->dev,
- "wacom_graphire_irq: received unknown report #%d\n",
- data[0]);
+ dev_dbg(input->dev.parent,
+ "%s: received unknown report #%d\n", __func__, data[0]);
goto exit;
}
@@ -324,6 +326,9 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
/* Enter report */
if ((data[1] & 0xfc) == 0xc0) {
+ if (features->type >= INTUOS5S && features->type <= INTUOS5L)
+ wacom->shared->stylus_in_proximity = true;
+
/* serial number of the tool */
wacom->serial[idx] = ((data[3] & 0x0f) << 28) +
(data[4] << 20) + (data[5] << 12) +
@@ -409,6 +414,9 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
/* Exit report */
if ((data[1] & 0xfe) == 0x80) {
+ if (features->type >= INTUOS5S && features->type <= INTUOS5L)
+ wacom->shared->stylus_in_proximity = false;
+
/*
* Reset all states otherwise we lose the initial states
* when in-prox next time
@@ -455,6 +463,7 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
if ((data[1] & 0xb8) == 0xa0) {
t = (data[6] << 2) | ((data[7] >> 6) & 3);
if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
+ (features->type >= INTUOS5S && features->type <= INTUOS5L) ||
features->type == WACOM_21UX2 || features->type == WACOM_24HD) {
t = (t << 1) | (data[1] & 1);
}
@@ -485,11 +494,13 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
unsigned int t;
int idx = 0, result;
- if (data[0] != WACOM_REPORT_PENABLED && data[0] != WACOM_REPORT_INTUOSREAD
- && data[0] != WACOM_REPORT_INTUOSWRITE && data[0] != WACOM_REPORT_INTUOSPAD) {
- dev_dbg(&input->dev,
- "wacom_intuos_irq: received unknown report #%d\n",
- data[0]);
+ if (data[0] != WACOM_REPORT_PENABLED &&
+ data[0] != WACOM_REPORT_INTUOSREAD &&
+ data[0] != WACOM_REPORT_INTUOSWRITE &&
+ data[0] != WACOM_REPORT_INTUOSPAD &&
+ data[0] != WACOM_REPORT_INTUOS5PAD) {
+ dev_dbg(input->dev.parent,
+ "%s: received unknown report #%d\n", __func__, data[0]);
return 0;
}
@@ -498,7 +509,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
idx = data[1] & 0x01;
/* pad packets. Works as a second tool and is always in prox */
- if (data[0] == WACOM_REPORT_INTUOSPAD) {
+ if (data[0] == WACOM_REPORT_INTUOSPAD || data[0] == WACOM_REPORT_INTUOS5PAD) {
if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
input_report_key(input, BTN_0, (data[2] & 0x01));
input_report_key(input, BTN_1, (data[3] & 0x01));
@@ -574,6 +585,34 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
input_report_key(input, wacom->tool[1], 0);
input_report_abs(input, ABS_MISC, 0);
}
+ } else if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
+ int i;
+
+ /* Touch ring mode switch has no capacitive sensor */
+ input_report_key(input, BTN_0, (data[3] & 0x01));
+
+ /*
+ * ExpressKeys on Intuos5 have a capacitive sensor in
+ * addition to the mechanical switch. Switch data is
+ * stored in data[4], capacitive data in data[5].
+ */
+ for (i = 0; i < 8; i++)
+ input_report_key(input, BTN_1 + i, data[4] & (1 << i));
+
+ if (data[2] & 0x80) {
+ input_report_abs(input, ABS_WHEEL, (data[2] & 0x7f));
+ } else {
+ /* Out of proximity, clear wheel value. */
+ input_report_abs(input, ABS_WHEEL, 0);
+ }
+
+ if (data[2] | (data[3] & 0x01) | data[4]) {
+ input_report_key(input, wacom->tool[1], 1);
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+ } else {
+ input_report_key(input, wacom->tool[1], 0);
+ input_report_abs(input, ABS_MISC, 0);
+ }
} else {
if (features->type == WACOM_21UX2) {
input_report_key(input, BTN_0, (data[5] & 0x01));
@@ -637,7 +676,9 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
(features->type == INTUOS3 ||
features->type == INTUOS3S ||
features->type == INTUOS4 ||
- features->type == INTUOS4S)) {
+ features->type == INTUOS4S ||
+ features->type == INTUOS5 ||
+ features->type == INTUOS5S)) {
return 0;
}
@@ -690,7 +731,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
} else if (wacom->tool[idx] == BTN_TOOL_MOUSE) {
/* I4 mouse */
- if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
+ if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
+ (features->type >= INTUOS5S && features->type <= INTUOS5L)) {
input_report_key(input, BTN_LEFT, data[6] & 0x01);
input_report_key(input, BTN_MIDDLE, data[6] & 0x02);
input_report_key(input, BTN_RIGHT, data[6] & 0x04);
@@ -717,7 +759,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
}
}
} else if ((features->type < INTUOS3S || features->type == INTUOS3L ||
- features->type == INTUOS4L) &&
+ features->type == INTUOS4L || features->type == INTUOS5L) &&
wacom->tool[idx] == BTN_TOOL_LENS) {
/* Lens cursor packets */
input_report_key(input, BTN_LEFT, data[8] & 0x01);
@@ -734,6 +776,72 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
return 1;
}
+static int find_slot_from_contactid(struct wacom_wac *wacom, int contactid)
+{
+ int touch_max = wacom->features.touch_max;
+ int i;
+
+ if (!wacom->slots)
+ return -1;
+
+ for (i = 0; i < touch_max; ++i) {
+ if (wacom->slots[i] == contactid)
+ return i;
+ }
+ for (i = 0; i < touch_max; ++i) {
+ if (wacom->slots[i] == -1)
+ return i;
+ }
+ return -1;
+}
+
+static int wacom_mt_touch(struct wacom_wac *wacom)
+{
+ struct input_dev *input = wacom->input;
+ char *data = wacom->data;
+ int i;
+ int current_num_contacts = data[2];
+ int contacts_to_send = 0;
+
+ /*
+ * First packet resets the counter since only the first
+ * packet in series will have non-zero current_num_contacts.
+ */
+ if (current_num_contacts)
+ wacom->num_contacts_left = current_num_contacts;
+
+ /* There are at most 5 contacts per packet */
+ contacts_to_send = min(5, wacom->num_contacts_left);
+
+ for (i = 0; i < contacts_to_send; i++) {
+ int offset = (WACOM_BYTES_PER_MT_PACKET * i) + 3;
+ bool touch = data[offset] & 0x1;
+ int id = le16_to_cpup((__le16 *)&data[offset + 1]);
+ int slot = find_slot_from_contactid(wacom, id);
+
+ if (slot < 0)
+ continue;
+
+ input_mt_slot(input, slot);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
+ if (touch) {
+ int x = le16_to_cpup((__le16 *)&data[offset + 7]);
+ int y = le16_to_cpup((__le16 *)&data[offset + 9]);
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+ }
+ wacom->slots[slot] = touch ? id : -1;
+ }
+
+ input_mt_report_pointer_emulation(input, true);
+
+ wacom->num_contacts_left -= contacts_to_send;
+ if (wacom->num_contacts_left < 0)
+ wacom->num_contacts_left = 0;
+
+ return 1;
+}
+
static int wacom_tpc_mt_touch(struct wacom_wac *wacom)
{
struct input_dev *input = wacom->input;
@@ -772,6 +880,9 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
bool prox;
int x = 0, y = 0;
+ if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG)
+ return 0;
+
if (!wacom->shared->stylus_in_proximity) {
if (len == WACOM_PKGLEN_TPC1FG) {
prox = data[0] & 0x01;
@@ -835,15 +946,15 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
{
char *data = wacom->data;
- dev_dbg(&wacom->input->dev, "wacom_tpc_irq: received report #%d\n",
- data[0]);
+ dev_dbg(wacom->input->dev.parent,
+ "%s: received report #%d\n", __func__, data[0]);
switch (len) {
case WACOM_PKGLEN_TPC1FG:
- return wacom_tpc_single_touch(wacom, len);
+ return wacom_tpc_single_touch(wacom, len);
case WACOM_PKGLEN_TPC2FG:
- return wacom_tpc_mt_touch(wacom);
+ return wacom_tpc_mt_touch(wacom);
default:
switch (data[0]) {
@@ -852,6 +963,9 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
case WACOM_REPORT_TPCST:
return wacom_tpc_single_touch(wacom, len);
+ case WACOM_REPORT_TPCMT:
+ return wacom_mt_touch(wacom);
+
case WACOM_REPORT_PENABLED:
return wacom_tpc_pen(wacom);
}
@@ -1120,8 +1234,18 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
sync = wacom_intuos_irq(wacom_wac);
break;
+ case INTUOS5S:
+ case INTUOS5:
+ case INTUOS5L:
+ if (len == WACOM_PKGLEN_BBTOUCH3)
+ sync = wacom_bpt3_touch(wacom_wac);
+ else
+ sync = wacom_intuos_irq(wacom_wac);
+ break;
+
case TABLETPC:
case TABLETPC2FG:
+ case MTSCREEN:
sync = wacom_tpc_irq(wacom_wac, len);
break;
@@ -1194,7 +1318,9 @@ void wacom_setup_device_quirks(struct wacom_features *features)
/* these device have multiple inputs */
if (features->type == TABLETPC || features->type == TABLETPC2FG ||
- features->type == BAMBOO_PT || features->type == WIRELESS)
+ features->type == BAMBOO_PT || features->type == WIRELESS ||
+ (features->type >= INTUOS5S && features->type <= INTUOS5L) ||
+ features->type == MTSCREEN)
features->quirks |= WACOM_QUIRK_MULTI_INPUT;
/* quirk for bamboo touch with 2 low res touches */
@@ -1225,8 +1351,8 @@ static unsigned int wacom_calculate_touch_res(unsigned int logical_max,
return (logical_max * 100) / physical_max;
}
-void wacom_setup_input_capabilities(struct input_dev *input_dev,
- struct wacom_wac *wacom_wac)
+int wacom_setup_input_capabilities(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac)
{
struct wacom_features *features = &wacom_wac->features;
int i;
@@ -1361,6 +1487,50 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
wacom_setup_intuos(wacom_wac);
break;
+ case INTUOS5:
+ case INTUOS5L:
+ if (features->device_type == BTN_TOOL_PEN) {
+ __set_bit(BTN_7, input_dev->keybit);
+ __set_bit(BTN_8, input_dev->keybit);
+ }
+ /* fall through */
+
+ case INTUOS5S:
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+ if (features->device_type == BTN_TOOL_PEN) {
+ for (i = 0; i < 7; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_DISTANCE, 0,
+ features->distance_max,
+ 0, 0);
+
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+
+ wacom_setup_intuos(wacom_wac);
+ } else if (features->device_type == BTN_TOOL_FINGER) {
+ __clear_bit(ABS_MISC, input_dev->absbit);
+
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+ __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
+ __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit);
+
+ input_mt_init_slots(input_dev, features->touch_max);
+
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
+ 0, 255, 0, 0);
+
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ 0, features->x_max,
+ features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ 0, features->y_max,
+ features->y_fuzz, 0);
+ }
+ break;
+
case INTUOS4:
case INTUOS4L:
__set_bit(BTN_7, input_dev->keybit);
@@ -1378,9 +1548,19 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
break;
case TABLETPC2FG:
+ case MTSCREEN:
if (features->device_type == BTN_TOOL_FINGER) {
- input_mt_init_slots(input_dev, 2);
+ wacom_wac->slots = kmalloc(features->touch_max *
+ sizeof(int),
+ GFP_KERNEL);
+ if (!wacom_wac->slots)
+ return -ENOMEM;
+
+ for (i = 0; i < features->touch_max; i++)
+ wacom_wac->slots[i] = -1;
+
+ input_mt_init_slots(input_dev, features->touch_max);
input_set_abs_params(input_dev, ABS_MT_TOOL_TYPE,
0, MT_TOOL_MAX, 0, 0);
input_set_abs_params(input_dev, ABS_MT_POSITION_X,
@@ -1435,6 +1615,7 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOOL_FINGER, input_dev->keybit);
__set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+ input_mt_init_slots(input_dev, features->touch_max);
if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
__set_bit(BTN_TOOL_TRIPLETAP,
@@ -1442,13 +1623,9 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOOL_QUADTAP,
input_dev->keybit);
- input_mt_init_slots(input_dev, 16);
-
input_set_abs_params(input_dev,
ABS_MT_TOUCH_MAJOR,
0, 255, 0, 0);
- } else {
- input_mt_init_slots(input_dev, 2);
}
input_set_abs_params(input_dev, ABS_MT_POSITION_X,
@@ -1468,6 +1645,7 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
}
break;
}
+ return 0;
}
static const struct wacom_features wacom_features_0x00 =
@@ -1635,6 +1813,24 @@ static const struct wacom_features wacom_features_0xBB =
static const struct wacom_features wacom_features_0xBC =
{ "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047,
63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0x26 =
+ { "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
+ 63, INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .touch_max = 16 };
+static const struct wacom_features wacom_features_0x27 =
+ { "Wacom Intuos5 touch M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
+ 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .touch_max = 16 };
+static const struct wacom_features wacom_features_0x28 =
+ { "Wacom Intuos5 touch L", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047,
+ 63, INTUOS5L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .touch_max = 16 };
+static const struct wacom_features wacom_features_0x29 =
+ { "Wacom Intuos5 S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
+ 63, INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0x2A =
+ { "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
+ 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xF4 =
{ "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -1676,13 +1872,19 @@ static const struct wacom_features wacom_features_0x9F =
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xE2 =
{ "Wacom ISDv4 E2", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255,
- 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xE3 =
{ "Wacom ISDv4 E3", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255,
- 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
+static const struct wacom_features wacom_features_0xE5 =
+ { "Wacom ISDv4 E5", WACOM_PKGLEN_MTOUCH, 26202, 16325, 255,
+ 0, MTSCREEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xE6 =
{ "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255,
- 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xEC =
{ "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1691,19 +1893,22 @@ static const struct wacom_features wacom_features_0x47 =
31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x84 =
{ "Wacom Wireless Receiver", WACOM_PKGLEN_WIRELESS, 0, 0, 0,
- 0, WIRELESS, 0, 0 };
+ 0, WIRELESS, 0, 0, .touch_max = 16 };
static const struct wacom_features wacom_features_0xD0 =
{ "Wacom Bamboo 2FG", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xD1 =
{ "Wacom Bamboo 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xD2 =
{ "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD3 =
{ "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xD4 =
{ "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1712,28 +1917,35 @@ static const struct wacom_features wacom_features_0xD5 =
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD6 =
{ "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xD7 =
{ "Wacom BambooPT 2FG Small", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xD8 =
{ "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xDA =
{ "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static struct wacom_features wacom_features_0xDB =
{ "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xDD =
{ "Wacom Bamboo Connect", WACOM_PKGLEN_BBPEN, 14720, 9200, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xDE =
{ "Wacom Bamboo 16FG 4x5", WACOM_PKGLEN_BBPEN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 16 };
static const struct wacom_features wacom_features_0xDF =
{ "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 16 };
static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1807,6 +2019,11 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xBA) },
{ USB_DEVICE_WACOM(0xBB) },
{ USB_DEVICE_WACOM(0xBC) },
+ { USB_DEVICE_WACOM(0x26) },
+ { USB_DEVICE_WACOM(0x27) },
+ { USB_DEVICE_WACOM(0x28) },
+ { USB_DEVICE_WACOM(0x29) },
+ { USB_DEVICE_WACOM(0x2A) },
{ USB_DEVICE_WACOM(0x3F) },
{ USB_DEVICE_WACOM(0xC5) },
{ USB_DEVICE_WACOM(0xC6) },
@@ -1842,6 +2059,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x9F) },
{ USB_DEVICE_WACOM(0xE2) },
{ USB_DEVICE_WACOM(0xE3) },
+ { USB_DEVICE_WACOM(0xE5) },
{ USB_DEVICE_WACOM(0xE6) },
{ USB_DEVICE_WACOM(0xEC) },
{ USB_DEVICE_WACOM(0x47) },
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index ba5a334e54d6..78fbd3f42009 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -25,6 +25,10 @@
#define WACOM_PKGLEN_BBTOUCH3 64
#define WACOM_PKGLEN_BBPEN 10
#define WACOM_PKGLEN_WIRELESS 32
+#define WACOM_PKGLEN_MTOUCH 62
+
+/* wacom data size per MT contact */
+#define WACOM_BYTES_PER_MT_PACKET 11
/* device IDs */
#define STYLUS_DEVICE_ID 0x02
@@ -38,8 +42,10 @@
#define WACOM_REPORT_INTUOSREAD 5
#define WACOM_REPORT_INTUOSWRITE 6
#define WACOM_REPORT_INTUOSPAD 12
+#define WACOM_REPORT_INTUOS5PAD 3
#define WACOM_REPORT_TPC1FG 6
#define WACOM_REPORT_TPC2FG 13
+#define WACOM_REPORT_TPCMT 13
#define WACOM_REPORT_TPCHID 15
#define WACOM_REPORT_TPCST 16
@@ -65,6 +71,9 @@ enum {
INTUOS4S,
INTUOS4,
INTUOS4L,
+ INTUOS5S,
+ INTUOS5,
+ INTUOS5L,
WACOM_24HD,
WACOM_21UX2,
CINTIQ,
@@ -72,6 +81,7 @@ enum {
WACOM_MO,
TABLETPC,
TABLETPC2FG,
+ MTSCREEN,
MAX_TYPE
};
@@ -95,6 +105,7 @@ struct wacom_features {
int pressure_fuzz;
int distance_fuzz;
unsigned quirks;
+ unsigned touch_max;
};
struct wacom_shared {
@@ -113,6 +124,8 @@ struct wacom_wac {
struct input_dev *input;
int pid;
int battery_capacity;
+ int num_contacts_left;
+ int *slots;
};
#endif
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 75838d7710ce..98d263504eea 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -187,6 +187,23 @@ config TOUCHSCREEN_DA9034
Say Y here to enable the support for the touchscreen found
on Dialog Semiconductor DA9034 PMIC.
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called da9034-ts.
+
+config TOUCHSCREEN_DA9052
+ tristate "Dialog DA9052/DA9053 TSI"
+ depends on PMIC_DA9052
+ help
+ Say Y here to support the touchscreen found on Dialog Semiconductor
+ DA9052-BC and DA9053-AA/Bx PMICs.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called da9052_tsi.
+
config TOUCHSCREEN_DYNAPRO
tristate "Dynapro serial touchscreen"
select SERIO
@@ -306,6 +323,18 @@ config TOUCHSCREEN_WACOM_W8001
To compile this driver as a module, choose M here: the
module will be called wacom_w8001.
+config TOUCHSCREEN_WACOM_I2C
+ tristate "Wacom Tablet support (I2C)"
+ depends on I2C
+ help
+ Say Y here if you want to use the I2C version of the Wacom
+ Pen Tablet.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called wacom_i2c.
+
config TOUCHSCREEN_LPC32XX
tristate "LPC32XX touchscreen controller"
depends on ARCH_LPC32XX
@@ -635,6 +664,7 @@ config TOUCHSCREEN_USB_COMPOSITE
- Zytronic controllers
- Elo TouchSystems 2700 IntelliTouch
- EasyTouch USB Touch Controller from Data Modul
+ - e2i (Mimo monitors)
Have a look at <http://linux.chapter7.ch/touchkit/> for
a usage description and the required user-space stuff.
@@ -721,7 +751,7 @@ config TOUCHSCREEN_USB_ELO
config TOUCHSCREEN_USB_E2I
default y
- bool "e2i Touchscreen controller (e.g. from Mimo 740)"
+ bool "e2i Touchscreen controller (e.g. from Mimo 740)" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_ZYTRONIC
@@ -744,7 +774,7 @@ config TOUCHSCREEN_USB_EASYTOUCH
bool "EasyTouch USB Touch controller device support" if EMBEDDED
depends on TOUCHSCREEN_USB_COMPOSITE
help
- Say Y here if you have a EasyTouch USB Touch controller device support.
+ Say Y here if you have an EasyTouch USB Touch controller.
If unsure, say N.
config TOUCHSCREEN_TOUCHIT213
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 3d5cf8cbf89c..eb8bfe1c1a46 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_TOUCHSCREEN_CYTTSP_CORE) += cyttsp_core.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C) += cyttsp_i2c.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_SPI) += cyttsp_spi.o
obj-$(CONFIG_TOUCHSCREEN_DA9034) += da9034-ts.o
+obj-$(CONFIG_TOUCHSCREEN_DA9052) += da9052_tsi.o
obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o
obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
@@ -59,6 +60,7 @@ obj-$(CONFIG_TOUCHSCREEN_TSC2005) += tsc2005.o
obj-$(CONFIG_TOUCHSCREEN_TSC2007) += tsc2007.o
obj-$(CONFIG_TOUCHSCREEN_UCB1400) += ucb1400_ts.o
obj-$(CONFIG_TOUCHSCREEN_WACOM_W8001) += wacom_w8001.o
+obj-$(CONFIG_TOUCHSCREEN_WACOM_I2C) += wacom_i2c.o
obj-$(CONFIG_TOUCHSCREEN_WM831X) += wm831x-ts.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX) += wm97xx-ts.o
wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9705) += wm9705.o
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 19d4ea65ea01..42e645062c20 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -236,7 +236,6 @@ struct mxt_object {
struct mxt_message {
u8 reportid;
u8 message[7];
- u8 checksum;
};
struct mxt_finger {
@@ -326,17 +325,12 @@ static bool mxt_object_writable(unsigned int type)
}
static void mxt_dump_message(struct device *dev,
- struct mxt_message *message)
+ struct mxt_message *message)
{
- dev_dbg(dev, "reportid:\t0x%x\n", message->reportid);
- dev_dbg(dev, "message1:\t0x%x\n", message->message[0]);
- dev_dbg(dev, "message2:\t0x%x\n", message->message[1]);
- dev_dbg(dev, "message3:\t0x%x\n", message->message[2]);
- dev_dbg(dev, "message4:\t0x%x\n", message->message[3]);
- dev_dbg(dev, "message5:\t0x%x\n", message->message[4]);
- dev_dbg(dev, "message6:\t0x%x\n", message->message[5]);
- dev_dbg(dev, "message7:\t0x%x\n", message->message[6]);
- dev_dbg(dev, "checksum:\t0x%x\n", message->checksum);
+ dev_dbg(dev, "reportid: %u\tmessage: %02x %02x %02x %02x %02x %02x %02x\n",
+ message->reportid, message->message[0], message->message[1],
+ message->message[2], message->message[3], message->message[4],
+ message->message[5], message->message[6]);
}
static int mxt_check_bootloader(struct i2c_client *client,
@@ -506,7 +500,7 @@ static int mxt_write_object(struct mxt_data *data,
u16 reg;
object = mxt_get_object(data, type);
- if (!object)
+ if (!object || offset >= object->size + 1)
return -EINVAL;
reg = object->start_address;
@@ -1049,8 +1043,8 @@ static ssize_t mxt_update_fw_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(object, 0444, mxt_object_show, NULL);
-static DEVICE_ATTR(update_fw, 0664, NULL, mxt_update_fw_store);
+static DEVICE_ATTR(object, S_IRUGO, mxt_object_show, NULL);
+static DEVICE_ATTR(update_fw, S_IWUSR, NULL, mxt_update_fw_store);
static struct attribute *mxt_attrs[] = {
&dev_attr_object.attr,
@@ -1201,7 +1195,7 @@ static int __devexit mxt_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int mxt_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -1239,13 +1233,10 @@ static int mxt_resume(struct device *dev)
return 0;
}
-
-static const struct dev_pm_ops mxt_pm_ops = {
- .suspend = mxt_suspend,
- .resume = mxt_resume,
-};
#endif
+static SIMPLE_DEV_PM_OPS(mxt_pm_ops, mxt_suspend, mxt_resume);
+
static const struct i2c_device_id mxt_id[] = {
{ "qt602240_ts", 0 },
{ "atmel_mxt_ts", 0 },
@@ -1258,9 +1249,7 @@ static struct i2c_driver mxt_driver = {
.driver = {
.name = "atmel_mxt_ts",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &mxt_pm_ops,
-#endif
},
.probe = mxt_probe,
.remove = __devexit_p(mxt_remove),
diff --git a/drivers/input/touchscreen/da9052_tsi.c b/drivers/input/touchscreen/da9052_tsi.c
new file mode 100644
index 000000000000..e8df341090c0
--- /dev/null
+++ b/drivers/input/touchscreen/da9052_tsi.c
@@ -0,0 +1,370 @@
+/*
+ * TSI driver for Dialog DA9052
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/da9052.h>
+
+#define TSI_PEN_DOWN_STATUS 0x40
+
+struct da9052_tsi {
+ struct da9052 *da9052;
+ struct input_dev *dev;
+ struct delayed_work ts_pen_work;
+ struct mutex mutex;
+ unsigned int irq_pendwn;
+ unsigned int irq_datardy;
+ bool stopped;
+ bool adc_on;
+};
+
+static void da9052_ts_adc_toggle(struct da9052_tsi *tsi, bool on)
+{
+ da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG, 1 << 0, on);
+ tsi->adc_on = on;
+}
+
+static irqreturn_t da9052_ts_pendwn_irq(int irq, void *data)
+{
+ struct da9052_tsi *tsi = data;
+
+ if (!tsi->stopped) {
+ /* Mask PEN_DOWN event and unmask TSI_READY event */
+ disable_irq_nosync(tsi->irq_pendwn);
+ enable_irq(tsi->irq_datardy);
+
+ da9052_ts_adc_toggle(tsi, true);
+
+ schedule_delayed_work(&tsi->ts_pen_work, HZ / 50);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void da9052_ts_read(struct da9052_tsi *tsi)
+{
+ struct input_dev *input = tsi->dev;
+ int ret;
+ u16 x, y, z;
+ u8 v;
+
+ ret = da9052_reg_read(tsi->da9052, DA9052_TSI_X_MSB_REG);
+ if (ret < 0)
+ return;
+
+ x = (u16) ret;
+
+ ret = da9052_reg_read(tsi->da9052, DA9052_TSI_Y_MSB_REG);
+ if (ret < 0)
+ return;
+
+ y = (u16) ret;
+
+ ret = da9052_reg_read(tsi->da9052, DA9052_TSI_Z_MSB_REG);
+ if (ret < 0)
+ return;
+
+ z = (u16) ret;
+
+ ret = da9052_reg_read(tsi->da9052, DA9052_TSI_LSB_REG);
+ if (ret < 0)
+ return;
+
+ v = (u8) ret;
+
+ x = ((x << 2) & 0x3fc) | (v & 0x3);
+ y = ((y << 2) & 0x3fc) | ((v & 0xc) >> 2);
+ z = ((z << 2) & 0x3fc) | ((v & 0x30) >> 4);
+
+ input_report_key(input, BTN_TOUCH, 1);
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_PRESSURE, z);
+ input_sync(input);
+}
+
+static irqreturn_t da9052_ts_datardy_irq(int irq, void *data)
+{
+ struct da9052_tsi *tsi = data;
+
+ da9052_ts_read(tsi);
+
+ return IRQ_HANDLED;
+}
+
+static void da9052_ts_pen_work(struct work_struct *work)
+{
+ struct da9052_tsi *tsi = container_of(work, struct da9052_tsi,
+ ts_pen_work.work);
+ if (!tsi->stopped) {
+ int ret = da9052_reg_read(tsi->da9052, DA9052_TSI_LSB_REG);
+ if (ret < 0 || (ret & TSI_PEN_DOWN_STATUS)) {
+ /* Pen is still DOWN (or read error) */
+ schedule_delayed_work(&tsi->ts_pen_work, HZ / 50);
+ } else {
+ struct input_dev *input = tsi->dev;
+
+ /* Pen UP */
+ da9052_ts_adc_toggle(tsi, false);
+
+ /* Report Pen UP */
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_sync(input);
+
+ /*
+ * FIXME: Fixes the unhandled irq issue when quick
+ * pen down and pen up events occurs
+ */
+ ret = da9052_reg_update(tsi->da9052,
+ DA9052_EVENT_B_REG, 0xC0, 0xC0);
+ if (ret < 0)
+ return;
+
+ /* Mask TSI_READY event and unmask PEN_DOWN event */
+ disable_irq(tsi->irq_datardy);
+ enable_irq(tsi->irq_pendwn);
+ }
+ }
+}
+
+static int __devinit da9052_ts_configure_gpio(struct da9052 *da9052)
+{
+ int error;
+
+ error = da9052_reg_update(da9052, DA9052_GPIO_2_3_REG, 0x30, 0);
+ if (error < 0)
+ return error;
+
+ error = da9052_reg_update(da9052, DA9052_GPIO_4_5_REG, 0x33, 0);
+ if (error < 0)
+ return error;
+
+ error = da9052_reg_update(da9052, DA9052_GPIO_6_7_REG, 0x33, 0);
+ if (error < 0)
+ return error;
+
+ return 0;
+}
+
+static int __devinit da9052_configure_tsi(struct da9052_tsi *tsi)
+{
+ int error;
+
+ error = da9052_ts_configure_gpio(tsi->da9052);
+ if (error)
+ return error;
+
+ /* Measure TSI sample every 1ms */
+ error = da9052_reg_update(tsi->da9052, DA9052_ADC_CONT_REG,
+ 1 << 6, 1 << 6);
+ if (error < 0)
+ return error;
+
+ /* TSI_DELAY: 3 slots, TSI_SKIP: 0 slots, TSI_MODE: XYZP */
+ error = da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG, 0xFC, 0xC0);
+ if (error < 0)
+ return error;
+
+ /* Supply TSIRef through LD09 */
+ error = da9052_reg_write(tsi->da9052, DA9052_LDO9_REG, 0x59);
+ if (error < 0)
+ return error;
+
+ return 0;
+}
+
+static int da9052_ts_input_open(struct input_dev *input_dev)
+{
+ struct da9052_tsi *tsi = input_get_drvdata(input_dev);
+
+ tsi->stopped = false;
+ mb();
+
+ /* Unmask PEN_DOWN event */
+ enable_irq(tsi->irq_pendwn);
+
+ /* Enable Pen Detect Circuit */
+ return da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG,
+ 1 << 1, 1 << 1);
+}
+
+static void da9052_ts_input_close(struct input_dev *input_dev)
+{
+ struct da9052_tsi *tsi = input_get_drvdata(input_dev);
+
+ tsi->stopped = true;
+ mb();
+ disable_irq(tsi->irq_pendwn);
+ cancel_delayed_work_sync(&tsi->ts_pen_work);
+
+ if (tsi->adc_on) {
+ disable_irq(tsi->irq_datardy);
+ da9052_ts_adc_toggle(tsi, false);
+
+ /*
+ * If ADC was on that means that pendwn IRQ was disabled
+ * twice and we need to enable it to keep enable/disable
+ * counter balanced. IRQ is still off though.
+ */
+ enable_irq(tsi->irq_pendwn);
+ }
+
+ /* Disable Pen Detect Circuit */
+ da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG, 1 << 1, 0);
+}
+
+static int __devinit da9052_ts_probe(struct platform_device *pdev)
+{
+ struct da9052 *da9052;
+ struct da9052_tsi *tsi;
+ struct input_dev *input_dev;
+ int irq_pendwn;
+ int irq_datardy;
+ int error;
+
+ da9052 = dev_get_drvdata(pdev->dev.parent);
+ if (!da9052)
+ return -EINVAL;
+
+ irq_pendwn = platform_get_irq_byname(pdev, "PENDWN");
+ irq_datardy = platform_get_irq_byname(pdev, "TSIRDY");
+ if (irq_pendwn < 0 || irq_datardy < 0) {
+ dev_err(da9052->dev, "Unable to determine device interrupts\n");
+ return -ENXIO;
+ }
+
+ tsi = kzalloc(sizeof(struct da9052_tsi), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!tsi || !input_dev) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ tsi->da9052 = da9052;
+ tsi->dev = input_dev;
+ tsi->irq_pendwn = da9052->irq_base + irq_pendwn;
+ tsi->irq_datardy = da9052->irq_base + irq_datardy;
+ tsi->stopped = true;
+ INIT_DELAYED_WORK(&tsi->ts_pen_work, da9052_ts_pen_work);
+
+ input_dev->id.version = 0x0101;
+ input_dev->id.vendor = 0x15B6;
+ input_dev->id.product = 0x9052;
+ input_dev->name = "Dialog DA9052 TouchScreen Driver";
+ input_dev->dev.parent = &pdev->dev;
+ input_dev->open = da9052_ts_input_open;
+ input_dev->close = da9052_ts_input_close;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_X, 0, 1023, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, 1023, 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 1023, 0, 0);
+
+ input_set_drvdata(input_dev, tsi);
+
+ /* Disable Pen Detect Circuit */
+ da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG, 1 << 1, 0);
+
+ /* Disable ADC */
+ da9052_ts_adc_toggle(tsi, false);
+
+ error = request_threaded_irq(tsi->irq_pendwn,
+ NULL, da9052_ts_pendwn_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "PENDWN", tsi);
+ if (error) {
+ dev_err(tsi->da9052->dev,
+ "Failed to register PENDWN IRQ %d, error = %d\n",
+ tsi->irq_pendwn, error);
+ goto err_free_mem;
+ }
+
+ error = request_threaded_irq(tsi->irq_datardy,
+ NULL, da9052_ts_datardy_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "TSIRDY", tsi);
+ if (error) {
+ dev_err(tsi->da9052->dev,
+ "Failed to register TSIRDY IRQ %d, error = %d\n",
+ tsi->irq_datardy, error);
+ goto err_free_pendwn_irq;
+ }
+
+ /* Mask PEN_DOWN and TSI_READY events */
+ disable_irq(tsi->irq_pendwn);
+ disable_irq(tsi->irq_datardy);
+
+ error = da9052_configure_tsi(tsi);
+ if (error)
+ goto err_free_datardy_irq;
+
+ error = input_register_device(tsi->dev);
+ if (error)
+ goto err_free_datardy_irq;
+
+ platform_set_drvdata(pdev, tsi);
+
+ return 0;
+
+err_free_datardy_irq:
+ free_irq(tsi->irq_datardy, tsi);
+err_free_pendwn_irq:
+ free_irq(tsi->irq_pendwn, tsi);
+err_free_mem:
+ kfree(tsi);
+ input_free_device(input_dev);
+
+ return error;
+}
+
+static int __devexit da9052_ts_remove(struct platform_device *pdev)
+{
+ struct da9052_tsi *tsi = platform_get_drvdata(pdev);
+
+ da9052_reg_write(tsi->da9052, DA9052_LDO9_REG, 0x19);
+
+ free_irq(tsi->irq_pendwn, tsi);
+ free_irq(tsi->irq_datardy, tsi);
+
+ input_unregister_device(tsi->dev);
+ kfree(tsi);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver da9052_tsi_driver = {
+ .probe = da9052_ts_probe,
+ .remove = __devexit_p(da9052_ts_remove),
+ .driver = {
+ .name = "da9052-tsi",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(da9052_tsi_driver);
+
+MODULE_DESCRIPTION("Touchscreen driver for Dialog Semiconductor DA9052");
+MODULE_AUTHOR("Anthony Olech <Anthony.Olech@diasemi.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-tsi");
diff --git a/drivers/input/touchscreen/dynapro.c b/drivers/input/touchscreen/dynapro.c
index 455353908bdf..1809677a6513 100644
--- a/drivers/input/touchscreen/dynapro.c
+++ b/drivers/input/touchscreen/dynapro.c
@@ -188,19 +188,4 @@ static struct serio_driver dynapro_drv = {
.disconnect = dynapro_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init dynapro_init(void)
-{
- return serio_register_driver(&dynapro_drv);
-}
-
-static void __exit dynapro_exit(void)
-{
- serio_unregister_driver(&dynapro_drv);
-}
-
-module_init(dynapro_init);
-module_exit(dynapro_exit);
+module_serio_driver(dynapro_drv);
diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
index 486d31ba9c09..957423d1471d 100644
--- a/drivers/input/touchscreen/elo.c
+++ b/drivers/input/touchscreen/elo.c
@@ -405,19 +405,4 @@ static struct serio_driver elo_drv = {
.disconnect = elo_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init elo_init(void)
-{
- return serio_register_driver(&elo_drv);
-}
-
-static void __exit elo_exit(void)
-{
- serio_unregister_driver(&elo_drv);
-}
-
-module_init(elo_init);
-module_exit(elo_exit);
+module_serio_driver(elo_drv);
diff --git a/drivers/input/touchscreen/fujitsu_ts.c b/drivers/input/touchscreen/fujitsu_ts.c
index 80b21800355f..10794ddbdf58 100644
--- a/drivers/input/touchscreen/fujitsu_ts.c
+++ b/drivers/input/touchscreen/fujitsu_ts.c
@@ -175,15 +175,4 @@ static struct serio_driver fujitsu_drv = {
.disconnect = fujitsu_disconnect,
};
-static int __init fujitsu_init(void)
-{
- return serio_register_driver(&fujitsu_drv);
-}
-
-static void __exit fujitsu_exit(void)
-{
- serio_unregister_driver(&fujitsu_drv);
-}
-
-module_init(fujitsu_init);
-module_exit(fujitsu_exit);
+module_serio_driver(fujitsu_drv);
diff --git a/drivers/input/touchscreen/gunze.c b/drivers/input/touchscreen/gunze.c
index a54f90e02ab6..41c71766bf18 100644
--- a/drivers/input/touchscreen/gunze.c
+++ b/drivers/input/touchscreen/gunze.c
@@ -186,19 +186,4 @@ static struct serio_driver gunze_drv = {
.disconnect = gunze_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init gunze_init(void)
-{
- return serio_register_driver(&gunze_drv);
-}
-
-static void __exit gunze_exit(void)
-{
- serio_unregister_driver(&gunze_drv);
-}
-
-module_init(gunze_init);
-module_exit(gunze_exit);
+module_serio_driver(gunze_drv);
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c
index 6107e563e681..b9e8686a6f1c 100644
--- a/drivers/input/touchscreen/h3600_ts_input.c
+++ b/drivers/input/touchscreen/h3600_ts_input.c
@@ -476,19 +476,4 @@ static struct serio_driver h3600ts_drv = {
.disconnect = h3600ts_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init h3600ts_init(void)
-{
- return serio_register_driver(&h3600ts_drv);
-}
-
-static void __exit h3600ts_exit(void)
-{
- serio_unregister_driver(&h3600ts_drv);
-}
-
-module_init(h3600ts_init);
-module_exit(h3600ts_exit);
+module_serio_driver(h3600ts_drv);
diff --git a/drivers/input/touchscreen/hampshire.c b/drivers/input/touchscreen/hampshire.c
index 2da6cc31bb21..0cc47ea98acf 100644
--- a/drivers/input/touchscreen/hampshire.c
+++ b/drivers/input/touchscreen/hampshire.c
@@ -187,19 +187,4 @@ static struct serio_driver hampshire_drv = {
.disconnect = hampshire_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init hampshire_init(void)
-{
- return serio_register_driver(&hampshire_drv);
-}
-
-static void __exit hampshire_exit(void)
-{
- serio_unregister_driver(&hampshire_drv);
-}
-
-module_init(hampshire_init);
-module_exit(hampshire_exit);
+module_serio_driver(hampshire_drv);
diff --git a/drivers/input/touchscreen/inexio.c b/drivers/input/touchscreen/inexio.c
index 192ade0a0fb9..a29c99c32245 100644
--- a/drivers/input/touchscreen/inexio.c
+++ b/drivers/input/touchscreen/inexio.c
@@ -189,19 +189,4 @@ static struct serio_driver inexio_drv = {
.disconnect = inexio_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init inexio_init(void)
-{
- return serio_register_driver(&inexio_drv);
-}
-
-static void __exit inexio_exit(void)
-{
- serio_unregister_driver(&inexio_drv);
-}
-
-module_init(inexio_init);
-module_exit(inexio_exit);
+module_serio_driver(inexio_drv);
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index afcd0691ec67..4c2b8ed3bf16 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -22,6 +22,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of.h>
/*
* Touchscreen controller register offsets
@@ -383,6 +384,14 @@ static const struct dev_pm_ops lpc32xx_ts_pm_ops = {
#define LPC32XX_TS_PM_OPS NULL
#endif
+#ifdef CONFIG_OF
+static struct of_device_id lpc32xx_tsc_of_match[] = {
+ { .compatible = "nxp,lpc3220-tsc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_tsc_of_match);
+#endif
+
static struct platform_driver lpc32xx_ts_driver = {
.probe = lpc32xx_ts_probe,
.remove = __devexit_p(lpc32xx_ts_remove),
@@ -390,6 +399,7 @@ static struct platform_driver lpc32xx_ts_driver = {
.name = MOD_NAME,
.owner = THIS_MODULE,
.pm = LPC32XX_TS_PM_OPS,
+ .of_match_table = of_match_ptr(lpc32xx_tsc_of_match),
},
};
module_platform_driver(lpc32xx_ts_driver);
diff --git a/drivers/input/touchscreen/mtouch.c b/drivers/input/touchscreen/mtouch.c
index 9077228418b7..eb66b7c37c2f 100644
--- a/drivers/input/touchscreen/mtouch.c
+++ b/drivers/input/touchscreen/mtouch.c
@@ -202,19 +202,4 @@ static struct serio_driver mtouch_drv = {
.disconnect = mtouch_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init mtouch_init(void)
-{
- return serio_register_driver(&mtouch_drv);
-}
-
-static void __exit mtouch_exit(void)
-{
- serio_unregister_driver(&mtouch_drv);
-}
-
-module_init(mtouch_init);
-module_exit(mtouch_exit);
+module_serio_driver(mtouch_drv);
diff --git a/drivers/input/touchscreen/penmount.c b/drivers/input/touchscreen/penmount.c
index 4c012fb2b01e..4ccde45b9da2 100644
--- a/drivers/input/touchscreen/penmount.c
+++ b/drivers/input/touchscreen/penmount.c
@@ -317,19 +317,4 @@ static struct serio_driver pm_drv = {
.disconnect = pm_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init pm_init(void)
-{
- return serio_register_driver(&pm_drv);
-}
-
-static void __exit pm_exit(void)
-{
- serio_unregister_driver(&pm_drv);
-}
-
-module_init(pm_init);
-module_exit(pm_exit);
+module_serio_driver(pm_drv);
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index cbbf71b22696..6cb68a1981bf 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -218,7 +218,7 @@ static int __devexit st1232_ts_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int st1232_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -243,18 +243,25 @@ static int st1232_ts_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops st1232_ts_pm_ops = {
- .suspend = st1232_ts_suspend,
- .resume = st1232_ts_resume,
-};
#endif
+static SIMPLE_DEV_PM_OPS(st1232_ts_pm_ops,
+ st1232_ts_suspend, st1232_ts_resume);
+
static const struct i2c_device_id st1232_ts_id[] = {
{ ST1232_TS_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, st1232_ts_id);
+#ifdef CONFIG_OF
+static const struct of_device_id st1232_ts_dt_ids[] __devinitconst = {
+ { .compatible = "sitronix,st1232", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, st1232_ts_dt_ids);
+#endif
+
static struct i2c_driver st1232_ts_driver = {
.probe = st1232_ts_probe,
.remove = __devexit_p(st1232_ts_remove),
@@ -262,9 +269,8 @@ static struct i2c_driver st1232_ts_driver = {
.driver = {
.name = ST1232_TS_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
+ .of_match_table = of_match_ptr(st1232_ts_dt_ids),
.pm = &st1232_ts_pm_ops,
-#endif
},
};
diff --git a/drivers/input/touchscreen/touchit213.c b/drivers/input/touchscreen/touchit213.c
index d1297ba19daf..5f29e5b8e1c1 100644
--- a/drivers/input/touchscreen/touchit213.c
+++ b/drivers/input/touchscreen/touchit213.c
@@ -216,19 +216,4 @@ static struct serio_driver touchit213_drv = {
.disconnect = touchit213_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init touchit213_init(void)
-{
- return serio_register_driver(&touchit213_drv);
-}
-
-static void __exit touchit213_exit(void)
-{
- serio_unregister_driver(&touchit213_drv);
-}
-
-module_init(touchit213_init);
-module_exit(touchit213_exit);
+module_serio_driver(touchit213_drv);
diff --git a/drivers/input/touchscreen/touchright.c b/drivers/input/touchscreen/touchright.c
index 3a5c142c2a78..8a2887daf194 100644
--- a/drivers/input/touchscreen/touchright.c
+++ b/drivers/input/touchscreen/touchright.c
@@ -176,19 +176,4 @@ static struct serio_driver tr_drv = {
.disconnect = tr_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init tr_init(void)
-{
- return serio_register_driver(&tr_drv);
-}
-
-static void __exit tr_exit(void)
-{
- serio_unregister_driver(&tr_drv);
-}
-
-module_init(tr_init);
-module_exit(tr_exit);
+module_serio_driver(tr_drv);
diff --git a/drivers/input/touchscreen/touchwin.c b/drivers/input/touchscreen/touchwin.c
index 763a656a59f8..588cdcb839dd 100644
--- a/drivers/input/touchscreen/touchwin.c
+++ b/drivers/input/touchscreen/touchwin.c
@@ -183,19 +183,4 @@ static struct serio_driver tw_drv = {
.disconnect = tw_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init tw_init(void)
-{
- return serio_register_driver(&tw_drv);
-}
-
-static void __exit tw_exit(void)
-{
- serio_unregister_driver(&tw_drv);
-}
-
-module_init(tw_init);
-module_exit(tw_exit);
+module_serio_driver(tw_drv);
diff --git a/drivers/input/touchscreen/tsc40.c b/drivers/input/touchscreen/tsc40.c
index 29d5ed4dd31c..63209aaa55f0 100644
--- a/drivers/input/touchscreen/tsc40.c
+++ b/drivers/input/touchscreen/tsc40.c
@@ -167,17 +167,7 @@ static struct serio_driver tsc_drv = {
.disconnect = tsc_disconnect,
};
-static int __init tsc_ser_init(void)
-{
- return serio_register_driver(&tsc_drv);
-}
-module_init(tsc_ser_init);
-
-static void __exit tsc_exit(void)
-{
- serio_unregister_driver(&tsc_drv);
-}
-module_exit(tsc_exit);
+module_serio_driver(tsc_drv);
MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
new file mode 100644
index 000000000000..35572575d34a
--- /dev/null
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -0,0 +1,282 @@
+/*
+ * Wacom Penabled Driver for I2C
+ *
+ * Copyright (c) 2011 Tatsunosuke Tobita, Wacom.
+ * <tobita.tatsunosuke@wacom.co.jp>
+ *
+ * This program is free software; you can redistribute it
+ * and/or modify it under the terms of the GNU General
+ * Public License as published by the Free Software
+ * Foundation; either version of 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <asm/unaligned.h>
+
+#define WACOM_CMD_QUERY0 0x04
+#define WACOM_CMD_QUERY1 0x00
+#define WACOM_CMD_QUERY2 0x33
+#define WACOM_CMD_QUERY3 0x02
+#define WACOM_CMD_THROW0 0x05
+#define WACOM_CMD_THROW1 0x00
+#define WACOM_QUERY_SIZE 19
+#define WACOM_RETRY_CNT 100
+
+struct wacom_features {
+ int x_max;
+ int y_max;
+ int pressure_max;
+ char fw_version;
+};
+
+struct wacom_i2c {
+ struct i2c_client *client;
+ struct input_dev *input;
+ u8 data[WACOM_QUERY_SIZE];
+};
+
+static int wacom_query_device(struct i2c_client *client,
+ struct wacom_features *features)
+{
+ int ret;
+ u8 cmd1[] = { WACOM_CMD_QUERY0, WACOM_CMD_QUERY1,
+ WACOM_CMD_QUERY2, WACOM_CMD_QUERY3 };
+ u8 cmd2[] = { WACOM_CMD_THROW0, WACOM_CMD_THROW1 };
+ u8 data[WACOM_QUERY_SIZE];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(cmd1),
+ .buf = cmd1,
+ },
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(cmd2),
+ .buf = cmd2,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(data),
+ .buf = data,
+ },
+ };
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ features->x_max = get_unaligned_le16(&data[3]);
+ features->y_max = get_unaligned_le16(&data[5]);
+ features->pressure_max = get_unaligned_le16(&data[11]);
+ features->fw_version = get_unaligned_le16(&data[13]);
+
+ dev_dbg(&client->dev,
+ "x_max:%d, y_max:%d, pressure:%d, fw:%d\n",
+ features->x_max, features->y_max,
+ features->pressure_max, features->fw_version);
+
+ return 0;
+}
+
+static irqreturn_t wacom_i2c_irq(int irq, void *dev_id)
+{
+ struct wacom_i2c *wac_i2c = dev_id;
+ struct input_dev *input = wac_i2c->input;
+ u8 *data = wac_i2c->data;
+ unsigned int x, y, pressure;
+ unsigned char tsw, f1, f2, ers;
+ int error;
+
+ error = i2c_master_recv(wac_i2c->client,
+ wac_i2c->data, sizeof(wac_i2c->data));
+ if (error < 0)
+ goto out;
+
+ tsw = data[3] & 0x01;
+ ers = data[3] & 0x04;
+ f1 = data[3] & 0x02;
+ f2 = data[3] & 0x10;
+ x = le16_to_cpup((__le16 *)&data[4]);
+ y = le16_to_cpup((__le16 *)&data[6]);
+ pressure = le16_to_cpup((__le16 *)&data[8]);
+
+ input_report_key(input, BTN_TOUCH, tsw || ers);
+ input_report_key(input, BTN_TOOL_PEN, tsw);
+ input_report_key(input, BTN_TOOL_RUBBER, ers);
+ input_report_key(input, BTN_STYLUS, f1);
+ input_report_key(input, BTN_STYLUS2, f2);
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_PRESSURE, pressure);
+ input_sync(input);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int wacom_i2c_open(struct input_dev *dev)
+{
+ struct wacom_i2c *wac_i2c = input_get_drvdata(dev);
+ struct i2c_client *client = wac_i2c->client;
+
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static void wacom_i2c_close(struct input_dev *dev)
+{
+ struct wacom_i2c *wac_i2c = input_get_drvdata(dev);
+ struct i2c_client *client = wac_i2c->client;
+
+ disable_irq(client->irq);
+}
+
+static int __devinit wacom_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct wacom_i2c *wac_i2c;
+ struct input_dev *input;
+ struct wacom_features features;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "i2c_check_functionality error\n");
+ return -EIO;
+ }
+
+ error = wacom_query_device(client, &features);
+ if (error)
+ return error;
+
+ wac_i2c = kzalloc(sizeof(*wac_i2c), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!wac_i2c || !input) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ wac_i2c->client = client;
+ wac_i2c->input = input;
+
+ input->name = "Wacom I2C Digitizer";
+ input->id.bustype = BUS_I2C;
+ input->id.vendor = 0x56a;
+ input->id.version = features.fw_version;
+ input->dev.parent = &client->dev;
+ input->open = wacom_i2c_open;
+ input->close = wacom_i2c_close;
+
+ input->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+
+ __set_bit(BTN_TOOL_PEN, input->keybit);
+ __set_bit(BTN_TOOL_RUBBER, input->keybit);
+ __set_bit(BTN_STYLUS, input->keybit);
+ __set_bit(BTN_STYLUS2, input->keybit);
+ __set_bit(BTN_TOUCH, input->keybit);
+
+ input_set_abs_params(input, ABS_X, 0, features.x_max, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, features.y_max, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE,
+ 0, features.pressure_max, 0, 0);
+
+ input_set_drvdata(input, wac_i2c);
+
+ error = request_threaded_irq(client->irq, NULL, wacom_i2c_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "wacom_i2c", wac_i2c);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to enable IRQ, error: %d\n", error);
+ goto err_free_mem;
+ }
+
+ /* Disable the IRQ, we'll enable it in wac_i2c_open() */
+ disable_irq(client->irq);
+
+ error = input_register_device(wac_i2c->input);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register input device, error: %d\n", error);
+ goto err_free_irq;
+ }
+
+ i2c_set_clientdata(client, wac_i2c);
+ return 0;
+
+err_free_irq:
+ free_irq(client->irq, wac_i2c);
+err_free_mem:
+ input_free_device(input);
+ kfree(wac_i2c);
+
+ return error;
+}
+
+static int __devexit wacom_i2c_remove(struct i2c_client *client)
+{
+ struct wacom_i2c *wac_i2c = i2c_get_clientdata(client);
+
+ free_irq(client->irq, wac_i2c);
+ input_unregister_device(wac_i2c->input);
+ kfree(wac_i2c);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int wacom_i2c_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ disable_irq(client->irq);
+
+ return 0;
+}
+
+static int wacom_i2c_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ enable_irq(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(wacom_i2c_pm, wacom_i2c_suspend, wacom_i2c_resume);
+
+static const struct i2c_device_id wacom_i2c_id[] = {
+ { "WAC_I2C_EMR", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, wacom_i2c_id);
+
+static struct i2c_driver wacom_i2c_driver = {
+ .driver = {
+ .name = "wacom_i2c",
+ .owner = THIS_MODULE,
+ .pm = &wacom_i2c_pm,
+ },
+
+ .probe = wacom_i2c_probe,
+ .remove = __devexit_p(wacom_i2c_remove),
+ .id_table = wacom_i2c_id,
+};
+module_i2c_driver(wacom_i2c_driver);
+
+MODULE_AUTHOR("Tatsunosuke Tobita <tobita.tatsunosuke@wacom.co.jp>");
+MODULE_DESCRIPTION("WACOM EMR I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 1569a3934ab2..8f9ad2f893b8 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -594,15 +594,4 @@ static struct serio_driver w8001_drv = {
.disconnect = w8001_disconnect,
};
-static int __init w8001_init(void)
-{
- return serio_register_driver(&w8001_drv);
-}
-
-static void __exit w8001_exit(void)
-{
- serio_unregister_driver(&w8001_drv);
-}
-
-module_init(w8001_init);
-module_exit(w8001_exit);
+module_serio_driver(w8001_drv);
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index 4bc851a9dc3d..e83410721e38 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -260,15 +260,16 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
* If we have a direct IRQ use it, otherwise use the interrupt
* from the WM831x IRQ controller.
*/
+ wm831x_ts->data_irq = wm831x_irq(wm831x,
+ platform_get_irq_byname(pdev,
+ "TCHDATA"));
if (pdata && pdata->data_irq)
wm831x_ts->data_irq = pdata->data_irq;
- else
- wm831x_ts->data_irq = platform_get_irq_byname(pdev, "TCHDATA");
+ wm831x_ts->pd_irq = wm831x_irq(wm831x,
+ platform_get_irq_byname(pdev, "TCHPD"));
if (pdata && pdata->pd_irq)
wm831x_ts->pd_irq = pdata->pd_irq;
- else
- wm831x_ts->pd_irq = platform_get_irq_byname(pdev, "TCHPD");
if (pdata)
wm831x_ts->pressure = pdata->pressure;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c69843742bb0..340893727538 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -162,4 +162,25 @@ config TEGRA_IOMMU_SMMU
space through the SMMU (System Memory Management Unit)
hardware included on Tegra SoCs.
+config EXYNOS_IOMMU
+ bool "Exynos IOMMU Support"
+ depends on ARCH_EXYNOS && EXYNOS_DEV_SYSMMU
+ select IOMMU_API
+ help
+ Support for the IOMMU(System MMU) of Samsung Exynos application
+ processor family. This enables H/W multimedia accellerators to see
+ non-linear physical memory chunks as a linear memory in their
+ address spaces
+
+ If unsure, say N here.
+
+config EXYNOS_IOMMU_DEBUG
+ bool "Debugging log for Exynos IOMMU"
+ depends on EXYNOS_IOMMU
+ help
+ Select this to see the detailed log message that shows what
+ happens in the IOMMU driver
+
+ Say N unless you need kernel log message for IOMMU debugging
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 3e5e82ae9f0d..76e54ef796de 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
+obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a5bee8e2dfce..d90a421e9cac 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -450,12 +450,27 @@ static void dump_command(unsigned long phys_addr)
static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
{
- u32 *event = __evt;
- int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
- int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
- int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
- int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
- u64 address = (u64)(((u64)event[3]) << 32) | event[2];
+ int type, devid, domid, flags;
+ volatile u32 *event = __evt;
+ int count = 0;
+ u64 address;
+
+retry:
+ type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
+ devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+ domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
+ flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+ address = (u64)(((u64)event[3]) << 32) | event[2];
+
+ if (type == 0) {
+ /* Did we hit the erratum? */
+ if (++count == LOOP_TIMEOUT) {
+ pr_err("AMD-Vi: No event written to event log\n");
+ return;
+ }
+ udelay(1);
+ goto retry;
+ }
printk(KERN_ERR "AMD-Vi: Event logged [");
@@ -508,6 +523,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
default:
printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
}
+
+ memset(__evt, 0, 4 * sizeof(u32));
}
static void iommu_poll_events(struct amd_iommu *iommu)
@@ -2035,20 +2052,20 @@ out_err:
}
/* FIXME: Move this to PCI code */
-#define PCI_PRI_TLP_OFF (1 << 2)
+#define PCI_PRI_TLP_OFF (1 << 15)
bool pci_pri_tlp_required(struct pci_dev *pdev)
{
- u16 control;
+ u16 status;
int pos;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return false;
- pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
- return (control & PCI_PRI_TLP_OFF) ? true : false;
+ return (status & PCI_PRI_TLP_OFF) ? true : false;
}
/*
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
new file mode 100644
index 000000000000..9a114b9ff170
--- /dev/null
+++ b/drivers/iommu/exynos-iommu.c
@@ -0,0 +1,1076 @@
+/* linux/drivers/iommu/exynos_iommu.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/iommu.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/export.h>
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+
+#include <mach/sysmmu.h>
+
+/* We does not consider super section mapping (16MB) */
+#define SECT_ORDER 20
+#define LPAGE_ORDER 16
+#define SPAGE_ORDER 12
+
+#define SECT_SIZE (1 << SECT_ORDER)
+#define LPAGE_SIZE (1 << LPAGE_ORDER)
+#define SPAGE_SIZE (1 << SPAGE_ORDER)
+
+#define SECT_MASK (~(SECT_SIZE - 1))
+#define LPAGE_MASK (~(LPAGE_SIZE - 1))
+#define SPAGE_MASK (~(SPAGE_SIZE - 1))
+
+#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
+#define lv1ent_page(sent) ((*(sent) & 3) == 1)
+#define lv1ent_section(sent) ((*(sent) & 3) == 2)
+
+#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
+#define lv2ent_small(pent) ((*(pent) & 2) == 2)
+#define lv2ent_large(pent) ((*(pent) & 3) == 1)
+
+#define section_phys(sent) (*(sent) & SECT_MASK)
+#define section_offs(iova) ((iova) & 0xFFFFF)
+#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
+#define lpage_offs(iova) ((iova) & 0xFFFF)
+#define spage_phys(pent) (*(pent) & SPAGE_MASK)
+#define spage_offs(iova) ((iova) & 0xFFF)
+
+#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
+#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
+
+#define NUM_LV1ENTRIES 4096
+#define NUM_LV2ENTRIES 256
+
+#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
+
+#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
+
+#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
+
+#define mk_lv1ent_sect(pa) ((pa) | 2)
+#define mk_lv1ent_page(pa) ((pa) | 1)
+#define mk_lv2ent_lpage(pa) ((pa) | 1)
+#define mk_lv2ent_spage(pa) ((pa) | 2)
+
+#define CTRL_ENABLE 0x5
+#define CTRL_BLOCK 0x7
+#define CTRL_DISABLE 0x0
+
+#define REG_MMU_CTRL 0x000
+#define REG_MMU_CFG 0x004
+#define REG_MMU_STATUS 0x008
+#define REG_MMU_FLUSH 0x00C
+#define REG_MMU_FLUSH_ENTRY 0x010
+#define REG_PT_BASE_ADDR 0x014
+#define REG_INT_STATUS 0x018
+#define REG_INT_CLEAR 0x01C
+
+#define REG_PAGE_FAULT_ADDR 0x024
+#define REG_AW_FAULT_ADDR 0x028
+#define REG_AR_FAULT_ADDR 0x02C
+#define REG_DEFAULT_SLAVE_ADDR 0x030
+
+#define REG_MMU_VERSION 0x034
+
+#define REG_PB0_SADDR 0x04C
+#define REG_PB0_EADDR 0x050
+#define REG_PB1_SADDR 0x054
+#define REG_PB1_EADDR 0x058
+
+static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
+{
+ return pgtable + lv1ent_offset(iova);
+}
+
+static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
+{
+ return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
+}
+
+enum exynos_sysmmu_inttype {
+ SYSMMU_PAGEFAULT,
+ SYSMMU_AR_MULTIHIT,
+ SYSMMU_AW_MULTIHIT,
+ SYSMMU_BUSERROR,
+ SYSMMU_AR_SECURITY,
+ SYSMMU_AR_ACCESS,
+ SYSMMU_AW_SECURITY,
+ SYSMMU_AW_PROTECTION, /* 7 */
+ SYSMMU_FAULT_UNKNOWN,
+ SYSMMU_FAULTS_NUM
+};
+
+/*
+ * @itype: type of fault.
+ * @pgtable_base: the physical address of page table base. This is 0 if @itype
+ * is SYSMMU_BUSERROR.
+ * @fault_addr: the device (virtual) address that the System MMU tried to
+ * translated. This is 0 if @itype is SYSMMU_BUSERROR.
+ */
+typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
+ unsigned long pgtable_base, unsigned long fault_addr);
+
+static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
+ REG_PAGE_FAULT_ADDR,
+ REG_AR_FAULT_ADDR,
+ REG_AW_FAULT_ADDR,
+ REG_DEFAULT_SLAVE_ADDR,
+ REG_AR_FAULT_ADDR,
+ REG_AR_FAULT_ADDR,
+ REG_AW_FAULT_ADDR,
+ REG_AW_FAULT_ADDR
+};
+
+static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
+ "PAGE FAULT",
+ "AR MULTI-HIT FAULT",
+ "AW MULTI-HIT FAULT",
+ "BUS ERROR",
+ "AR SECURITY PROTECTION FAULT",
+ "AR ACCESS PROTECTION FAULT",
+ "AW SECURITY PROTECTION FAULT",
+ "AW ACCESS PROTECTION FAULT",
+ "UNKNOWN FAULT"
+};
+
+struct exynos_iommu_domain {
+ struct list_head clients; /* list of sysmmu_drvdata.node */
+ unsigned long *pgtable; /* lv1 page table, 16KB */
+ short *lv2entcnt; /* free lv2 entry counter for each section */
+ spinlock_t lock; /* lock for this structure */
+ spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
+};
+
+struct sysmmu_drvdata {
+ struct list_head node; /* entry of exynos_iommu_domain.clients */
+ struct device *sysmmu; /* System MMU's device descriptor */
+ struct device *dev; /* Owner of system MMU */
+ char *dbgname;
+ int nsfrs;
+ void __iomem **sfrbases;
+ struct clk *clk[2];
+ int activations;
+ rwlock_t lock;
+ struct iommu_domain *domain;
+ sysmmu_fault_handler_t fault_handler;
+ unsigned long pgtable;
+};
+
+static bool set_sysmmu_active(struct sysmmu_drvdata *data)
+{
+ /* return true if the System MMU was not active previously
+ and it needs to be initialized */
+ return ++data->activations == 1;
+}
+
+static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
+{
+ /* return true if the System MMU is needed to be disabled */
+ BUG_ON(data->activations < 1);
+ return --data->activations == 0;
+}
+
+static bool is_sysmmu_active(struct sysmmu_drvdata *data)
+{
+ return data->activations > 0;
+}
+
+static void sysmmu_unblock(void __iomem *sfrbase)
+{
+ __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
+}
+
+static bool sysmmu_block(void __iomem *sfrbase)
+{
+ int i = 120;
+
+ __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
+ while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
+ --i;
+
+ if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
+ sysmmu_unblock(sfrbase);
+ return false;
+ }
+
+ return true;
+}
+
+static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
+{
+ __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
+}
+
+static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
+ unsigned long iova)
+{
+ __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
+}
+
+static void __sysmmu_set_ptbase(void __iomem *sfrbase,
+ unsigned long pgd)
+{
+ __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
+ __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
+
+ __sysmmu_tlb_invalidate(sfrbase);
+}
+
+static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
+ unsigned long size, int idx)
+{
+ __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
+ __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8);
+}
+
+void exynos_sysmmu_set_prefbuf(struct device *dev,
+ unsigned long base0, unsigned long size0,
+ unsigned long base1, unsigned long size1)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ unsigned long flags;
+ int i;
+
+ BUG_ON((base0 + size0) <= base0);
+ BUG_ON((size1 > 0) && ((base1 + size1) <= base1));
+
+ read_lock_irqsave(&data->lock, flags);
+ if (!is_sysmmu_active(data))
+ goto finish;
+
+ for (i = 0; i < data->nsfrs; i++) {
+ if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
+ if (!sysmmu_block(data->sfrbases[i]))
+ continue;
+
+ if (size1 == 0) {
+ if (size0 <= SZ_128K) {
+ base1 = base0;
+ size1 = size0;
+ } else {
+ size1 = size0 -
+ ALIGN(size0 / 2, SZ_64K);
+ size0 = size0 - size1;
+ base1 = base0 + size0;
+ }
+ }
+
+ __sysmmu_set_prefbuf(
+ data->sfrbases[i], base0, size0, 0);
+ __sysmmu_set_prefbuf(
+ data->sfrbases[i], base1, size1, 1);
+
+ sysmmu_unblock(data->sfrbases[i]);
+ }
+ }
+finish:
+ read_unlock_irqrestore(&data->lock, flags);
+}
+
+static void __set_fault_handler(struct sysmmu_drvdata *data,
+ sysmmu_fault_handler_t handler)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&data->lock, flags);
+ data->fault_handler = handler;
+ write_unlock_irqrestore(&data->lock, flags);
+}
+
+void exynos_sysmmu_set_fault_handler(struct device *dev,
+ sysmmu_fault_handler_t handler)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+ __set_fault_handler(data, handler);
+}
+
+static int default_fault_handler(enum exynos_sysmmu_inttype itype,
+ unsigned long pgtable_base, unsigned long fault_addr)
+{
+ unsigned long *ent;
+
+ if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
+ itype = SYSMMU_FAULT_UNKNOWN;
+
+ pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",
+ sysmmu_fault_name[itype], fault_addr, pgtable_base);
+
+ ent = section_entry(__va(pgtable_base), fault_addr);
+ pr_err("\tLv1 entry: 0x%lx\n", *ent);
+
+ if (lv1ent_page(ent)) {
+ ent = page_entry(ent, fault_addr);
+ pr_err("\t Lv2 entry: 0x%lx\n", *ent);
+ }
+
+ pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
+
+ BUG();
+
+ return 0;
+}
+
+static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
+{
+ /* SYSMMU is in blocked when interrupt occurred. */
+ struct sysmmu_drvdata *data = dev_id;
+ struct resource *irqres;
+ struct platform_device *pdev;
+ enum exynos_sysmmu_inttype itype;
+ unsigned long addr = -1;
+
+ int i, ret = -ENOSYS;
+
+ read_lock(&data->lock);
+
+ WARN_ON(!is_sysmmu_active(data));
+
+ pdev = to_platform_device(data->sysmmu);
+ for (i = 0; i < (pdev->num_resources / 2); i++) {
+ irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ if (irqres && ((int)irqres->start == irq))
+ break;
+ }
+
+ if (i == pdev->num_resources) {
+ itype = SYSMMU_FAULT_UNKNOWN;
+ } else {
+ itype = (enum exynos_sysmmu_inttype)
+ __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
+ if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
+ itype = SYSMMU_FAULT_UNKNOWN;
+ else
+ addr = __raw_readl(
+ data->sfrbases[i] + fault_reg_offset[itype]);
+ }
+
+ if (data->domain)
+ ret = report_iommu_fault(data->domain, data->dev,
+ addr, itype);
+
+ if ((ret == -ENOSYS) && data->fault_handler) {
+ unsigned long base = data->pgtable;
+ if (itype != SYSMMU_FAULT_UNKNOWN)
+ base = __raw_readl(
+ data->sfrbases[i] + REG_PT_BASE_ADDR);
+ ret = data->fault_handler(itype, base, addr);
+ }
+
+ if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
+ __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
+ else
+ dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
+ data->dbgname, sysmmu_fault_name[itype]);
+
+ if (itype != SYSMMU_FAULT_UNKNOWN)
+ sysmmu_unblock(data->sfrbases[i]);
+
+ read_unlock(&data->lock);
+
+ return IRQ_HANDLED;
+}
+
+static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
+{
+ unsigned long flags;
+ bool disabled = false;
+ int i;
+
+ write_lock_irqsave(&data->lock, flags);
+
+ if (!set_sysmmu_inactive(data))
+ goto finish;
+
+ for (i = 0; i < data->nsfrs; i++)
+ __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
+
+ if (data->clk[1])
+ clk_disable(data->clk[1]);
+ if (data->clk[0])
+ clk_disable(data->clk[0]);
+
+ disabled = true;
+ data->pgtable = 0;
+ data->domain = NULL;
+finish:
+ write_unlock_irqrestore(&data->lock, flags);
+
+ if (disabled)
+ dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
+ else
+ dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
+ data->dbgname, data->activations);
+
+ return disabled;
+}
+
+/* __exynos_sysmmu_enable: Enables System MMU
+ *
+ * returns -error if an error occurred and System MMU is not enabled,
+ * 0 if the System MMU has been just enabled and 1 if System MMU was already
+ * enabled before.
+ */
+static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
+ unsigned long pgtable, struct iommu_domain *domain)
+{
+ int i, ret = 0;
+ unsigned long flags;
+
+ write_lock_irqsave(&data->lock, flags);
+
+ if (!set_sysmmu_active(data)) {
+ if (WARN_ON(pgtable != data->pgtable)) {
+ ret = -EBUSY;
+ set_sysmmu_inactive(data);
+ } else {
+ ret = 1;
+ }
+
+ dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
+ goto finish;
+ }
+
+ if (data->clk[0])
+ clk_enable(data->clk[0]);
+ if (data->clk[1])
+ clk_enable(data->clk[1]);
+
+ data->pgtable = pgtable;
+
+ for (i = 0; i < data->nsfrs; i++) {
+ __sysmmu_set_ptbase(data->sfrbases[i], pgtable);
+
+ if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
+ /* System MMU version is 3.x */
+ __raw_writel((1 << 12) | (2 << 28),
+ data->sfrbases[i] + REG_MMU_CFG);
+ __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
+ __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
+ }
+
+ __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
+ }
+
+ data->domain = domain;
+
+ dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
+finish:
+ write_unlock_irqrestore(&data->lock, flags);
+
+ return ret;
+}
+
+int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ int ret;
+
+ BUG_ON(!memblock_is_memory(pgtable));
+
+ ret = pm_runtime_get_sync(data->sysmmu);
+ if (ret < 0) {
+ dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
+ return ret;
+ }
+
+ ret = __exynos_sysmmu_enable(data, pgtable, NULL);
+ if (WARN_ON(ret < 0)) {
+ pm_runtime_put(data->sysmmu);
+ dev_err(data->sysmmu,
+ "(%s) Already enabled with page table %#lx\n",
+ data->dbgname, data->pgtable);
+ } else {
+ data->dev = dev;
+ }
+
+ return ret;
+}
+
+bool exynos_sysmmu_disable(struct device *dev)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ bool disabled;
+
+ disabled = __exynos_sysmmu_disable(data);
+ pm_runtime_put(data->sysmmu);
+
+ return disabled;
+}
+
+static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
+{
+ unsigned long flags;
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+ read_lock_irqsave(&data->lock, flags);
+
+ if (is_sysmmu_active(data)) {
+ int i;
+ for (i = 0; i < data->nsfrs; i++) {
+ if (sysmmu_block(data->sfrbases[i])) {
+ __sysmmu_tlb_invalidate_entry(
+ data->sfrbases[i], iova);
+ sysmmu_unblock(data->sfrbases[i]);
+ }
+ }
+ } else {
+ dev_dbg(data->sysmmu,
+ "(%s) Disabled. Skipping invalidating TLB.\n",
+ data->dbgname);
+ }
+
+ read_unlock_irqrestore(&data->lock, flags);
+}
+
+void exynos_sysmmu_tlb_invalidate(struct device *dev)
+{
+ unsigned long flags;
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+ read_lock_irqsave(&data->lock, flags);
+
+ if (is_sysmmu_active(data)) {
+ int i;
+ for (i = 0; i < data->nsfrs; i++) {
+ if (sysmmu_block(data->sfrbases[i])) {
+ __sysmmu_tlb_invalidate(data->sfrbases[i]);
+ sysmmu_unblock(data->sfrbases[i]);
+ }
+ }
+ } else {
+ dev_dbg(data->sysmmu,
+ "(%s) Disabled. Skipping invalidating TLB.\n",
+ data->dbgname);
+ }
+
+ read_unlock_irqrestore(&data->lock, flags);
+}
+
+static int exynos_sysmmu_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ struct device *dev;
+ struct sysmmu_drvdata *data;
+
+ dev = &pdev->dev;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_dbg(dev, "Not enough memory\n");
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ ret = dev_set_drvdata(dev, data);
+ if (ret) {
+ dev_dbg(dev, "Unabled to initialize driver data\n");
+ goto err_init;
+ }
+
+ data->nsfrs = pdev->num_resources / 2;
+ data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
+ GFP_KERNEL);
+ if (data->sfrbases == NULL) {
+ dev_dbg(dev, "Not enough memory\n");
+ ret = -ENOMEM;
+ goto err_init;
+ }
+
+ for (i = 0; i < data->nsfrs; i++) {
+ struct resource *res;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res) {
+ dev_dbg(dev, "Unable to find IOMEM region\n");
+ ret = -ENOENT;
+ goto err_res;
+ }
+
+ data->sfrbases[i] = ioremap(res->start, resource_size(res));
+ if (!data->sfrbases[i]) {
+ dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
+ res->start);
+ ret = -ENOENT;
+ goto err_res;
+ }
+ }
+
+ for (i = 0; i < data->nsfrs; i++) {
+ ret = platform_get_irq(pdev, i);
+ if (ret <= 0) {
+ dev_dbg(dev, "Unable to find IRQ resource\n");
+ goto err_irq;
+ }
+
+ ret = request_irq(ret, exynos_sysmmu_irq, 0,
+ dev_name(dev), data);
+ if (ret) {
+ dev_dbg(dev, "Unabled to register interrupt handler\n");
+ goto err_irq;
+ }
+ }
+
+ if (dev_get_platdata(dev)) {
+ char *deli, *beg;
+ struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
+
+ beg = platdata->clockname;
+
+ for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
+ /* NOTHING */;
+
+ if (*deli == '\0')
+ deli = NULL;
+ else
+ *deli = '\0';
+
+ data->clk[0] = clk_get(dev, beg);
+ if (IS_ERR(data->clk[0])) {
+ data->clk[0] = NULL;
+ dev_dbg(dev, "No clock descriptor registered\n");
+ }
+
+ if (data->clk[0] && deli) {
+ *deli = ',';
+ data->clk[1] = clk_get(dev, deli + 1);
+ if (IS_ERR(data->clk[1]))
+ data->clk[1] = NULL;
+ }
+
+ data->dbgname = platdata->dbgname;
+ }
+
+ data->sysmmu = dev;
+ rwlock_init(&data->lock);
+ INIT_LIST_HEAD(&data->node);
+
+ __set_fault_handler(data, &default_fault_handler);
+
+ if (dev->parent)
+ pm_runtime_enable(dev);
+
+ dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
+ return 0;
+err_irq:
+ while (i-- > 0) {
+ int irq;
+
+ irq = platform_get_irq(pdev, i);
+ free_irq(irq, data);
+ }
+err_res:
+ while (data->nsfrs-- > 0)
+ iounmap(data->sfrbases[data->nsfrs]);
+ kfree(data->sfrbases);
+err_init:
+ kfree(data);
+err_alloc:
+ dev_err(dev, "Failed to initialize\n");
+ return ret;
+}
+
+static struct platform_driver exynos_sysmmu_driver = {
+ .probe = exynos_sysmmu_probe,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "exynos-sysmmu",
+ }
+};
+
+static inline void pgtable_flush(void *vastart, void *vaend)
+{
+ dmac_flush_range(vastart, vaend);
+ outer_flush_range(virt_to_phys(vastart),
+ virt_to_phys(vaend));
+}
+
+static int exynos_iommu_domain_init(struct iommu_domain *domain)
+{
+ struct exynos_iommu_domain *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pgtable = (unsigned long *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO, 2);
+ if (!priv->pgtable)
+ goto err_pgtable;
+
+ priv->lv2entcnt = (short *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO, 1);
+ if (!priv->lv2entcnt)
+ goto err_counter;
+
+ pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
+
+ spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->pgtablelock);
+ INIT_LIST_HEAD(&priv->clients);
+
+ domain->priv = priv;
+ return 0;
+
+err_counter:
+ free_pages((unsigned long)priv->pgtable, 2);
+err_pgtable:
+ kfree(priv);
+ return -ENOMEM;
+}
+
+static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
+{
+ struct exynos_iommu_domain *priv = domain->priv;
+ struct sysmmu_drvdata *data;
+ unsigned long flags;
+ int i;
+
+ WARN_ON(!list_empty(&priv->clients));
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ list_for_each_entry(data, &priv->clients, node) {
+ while (!exynos_sysmmu_disable(data->dev))
+ ; /* until System MMU is actually disabled */
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ for (i = 0; i < NUM_LV1ENTRIES; i++)
+ if (lv1ent_page(priv->pgtable + i))
+ kfree(__va(lv2table_base(priv->pgtable + i)));
+
+ free_pages((unsigned long)priv->pgtable, 2);
+ free_pages((unsigned long)priv->lv2entcnt, 1);
+ kfree(domain->priv);
+ domain->priv = NULL;
+}
+
+static int exynos_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ struct exynos_iommu_domain *priv = domain->priv;
+ unsigned long flags;
+ int ret;
+
+ ret = pm_runtime_get_sync(data->sysmmu);
+ if (ret < 0)
+ return ret;
+
+ ret = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
+
+ if (ret == 0) {
+ /* 'data->node' must not be appeared in priv->clients */
+ BUG_ON(!list_empty(&data->node));
+ data->dev = dev;
+ list_add_tail(&data->node, &priv->clients);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (ret < 0) {
+ dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
+ __func__, __pa(priv->pgtable));
+ pm_runtime_put(data->sysmmu);
+ } else if (ret > 0) {
+ dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
+ __func__, __pa(priv->pgtable));
+ } else {
+ dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
+ __func__, __pa(priv->pgtable));
+ }
+
+ return ret;
+}
+
+static void exynos_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ struct exynos_iommu_domain *priv = domain->priv;
+ struct list_head *pos;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ list_for_each(pos, &priv->clients) {
+ if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ goto finish;
+
+ if (__exynos_sysmmu_disable(data)) {
+ dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
+ __func__, __pa(priv->pgtable));
+ list_del(&data->node);
+ INIT_LIST_HEAD(&data->node);
+
+ } else {
+ dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
+ __func__, __pa(priv->pgtable));
+ }
+
+finish:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (found)
+ pm_runtime_put(data->sysmmu);
+}
+
+static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
+ short *pgcounter)
+{
+ if (lv1ent_fault(sent)) {
+ unsigned long *pent;
+
+ pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
+ BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
+ if (!pent)
+ return NULL;
+
+ *sent = mk_lv1ent_page(__pa(pent));
+ *pgcounter = NUM_LV2ENTRIES;
+ pgtable_flush(pent, pent + NUM_LV2ENTRIES);
+ pgtable_flush(sent, sent + 1);
+ }
+
+ return page_entry(sent, iova);
+}
+
+static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
+{
+ if (lv1ent_section(sent))
+ return -EADDRINUSE;
+
+ if (lv1ent_page(sent)) {
+ if (*pgcnt != NUM_LV2ENTRIES)
+ return -EADDRINUSE;
+
+ kfree(page_entry(sent, 0));
+
+ *pgcnt = 0;
+ }
+
+ *sent = mk_lv1ent_sect(paddr);
+
+ pgtable_flush(sent, sent + 1);
+
+ return 0;
+}
+
+static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
+ short *pgcnt)
+{
+ if (size == SPAGE_SIZE) {
+ if (!lv2ent_fault(pent))
+ return -EADDRINUSE;
+
+ *pent = mk_lv2ent_spage(paddr);
+ pgtable_flush(pent, pent + 1);
+ *pgcnt -= 1;
+ } else { /* size == LPAGE_SIZE */
+ int i;
+ for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
+ if (!lv2ent_fault(pent)) {
+ memset(pent, 0, sizeof(*pent) * i);
+ return -EADDRINUSE;
+ }
+
+ *pent = mk_lv2ent_lpage(paddr);
+ }
+ pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
+ *pgcnt -= SPAGES_PER_LPAGE;
+ }
+
+ return 0;
+}
+
+static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct exynos_iommu_domain *priv = domain->priv;
+ unsigned long *entry;
+ unsigned long flags;
+ int ret = -ENOMEM;
+
+ BUG_ON(priv->pgtable == NULL);
+
+ spin_lock_irqsave(&priv->pgtablelock, flags);
+
+ entry = section_entry(priv->pgtable, iova);
+
+ if (size == SECT_SIZE) {
+ ret = lv1set_section(entry, paddr,
+ &priv->lv2entcnt[lv1ent_offset(iova)]);
+ } else {
+ unsigned long *pent;
+
+ pent = alloc_lv2entry(entry, iova,
+ &priv->lv2entcnt[lv1ent_offset(iova)]);
+
+ if (!pent)
+ ret = -ENOMEM;
+ else
+ ret = lv2set_page(pent, paddr, size,
+ &priv->lv2entcnt[lv1ent_offset(iova)]);
+ }
+
+ if (ret) {
+ pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
+ __func__, iova, size);
+ }
+
+ spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+ return ret;
+}
+
+static size_t exynos_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct exynos_iommu_domain *priv = domain->priv;
+ struct sysmmu_drvdata *data;
+ unsigned long flags;
+ unsigned long *ent;
+
+ BUG_ON(priv->pgtable == NULL);
+
+ spin_lock_irqsave(&priv->pgtablelock, flags);
+
+ ent = section_entry(priv->pgtable, iova);
+
+ if (lv1ent_section(ent)) {
+ BUG_ON(size < SECT_SIZE);
+
+ *ent = 0;
+ pgtable_flush(ent, ent + 1);
+ size = SECT_SIZE;
+ goto done;
+ }
+
+ if (unlikely(lv1ent_fault(ent))) {
+ if (size > SECT_SIZE)
+ size = SECT_SIZE;
+ goto done;
+ }
+
+ /* lv1ent_page(sent) == true here */
+
+ ent = page_entry(ent, iova);
+
+ if (unlikely(lv2ent_fault(ent))) {
+ size = SPAGE_SIZE;
+ goto done;
+ }
+
+ if (lv2ent_small(ent)) {
+ *ent = 0;
+ size = SPAGE_SIZE;
+ priv->lv2entcnt[lv1ent_offset(iova)] += 1;
+ goto done;
+ }
+
+ /* lv1ent_large(ent) == true here */
+ BUG_ON(size < LPAGE_SIZE);
+
+ memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
+
+ size = LPAGE_SIZE;
+ priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
+done:
+ spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_for_each_entry(data, &priv->clients, node)
+ sysmmu_tlb_invalidate_entry(data->dev, iova);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+
+ return size;
+}
+
+static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
+ unsigned long iova)
+{
+ struct exynos_iommu_domain *priv = domain->priv;
+ unsigned long *entry;
+ unsigned long flags;
+ phys_addr_t phys = 0;
+
+ spin_lock_irqsave(&priv->pgtablelock, flags);
+
+ entry = section_entry(priv->pgtable, iova);
+
+ if (lv1ent_section(entry)) {
+ phys = section_phys(entry) + section_offs(iova);
+ } else if (lv1ent_page(entry)) {
+ entry = page_entry(entry, iova);
+
+ if (lv2ent_large(entry))
+ phys = lpage_phys(entry) + lpage_offs(iova);
+ else if (lv2ent_small(entry))
+ phys = spage_phys(entry) + spage_offs(iova);
+ }
+
+ spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+ return phys;
+}
+
+static struct iommu_ops exynos_iommu_ops = {
+ .domain_init = &exynos_iommu_domain_init,
+ .domain_destroy = &exynos_iommu_domain_destroy,
+ .attach_dev = &exynos_iommu_attach_device,
+ .detach_dev = &exynos_iommu_detach_device,
+ .map = &exynos_iommu_map,
+ .unmap = &exynos_iommu_unmap,
+ .iova_to_phys = &exynos_iommu_iova_to_phys,
+ .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
+};
+
+static int __init exynos_iommu_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&exynos_sysmmu_driver);
+
+ if (ret == 0)
+ bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
+
+ return ret;
+}
+subsys_initcall(exynos_iommu_init);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index bf2fbaad5e22..b12af2ff8c54 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1907,6 +1907,15 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}
+static inline void unlink_domain_info(struct device_domain_info *info)
+{
+ assert_spin_locked(&device_domain_lock);
+ list_del(&info->link);
+ list_del(&info->global);
+ if (info->dev)
+ info->dev->dev.archdata.iommu = NULL;
+}
+
static void domain_remove_dev_info(struct dmar_domain *domain)
{
struct device_domain_info *info;
@@ -1917,10 +1926,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
while (!list_empty(&domain->devices)) {
info = list_entry(domain->devices.next,
struct device_domain_info, link);
- list_del(&info->link);
- list_del(&info->global);
- if (info->dev)
- info->dev->dev.archdata.iommu = NULL;
+ unlink_domain_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
iommu_disable_dev_iotlb(info);
@@ -2287,12 +2293,6 @@ static int domain_add_dev_info(struct dmar_domain *domain,
if (!info)
return -ENOMEM;
- ret = domain_context_mapping(domain, pdev, translation);
- if (ret) {
- free_devinfo_mem(info);
- return ret;
- }
-
info->segment = pci_domain_nr(pdev->bus);
info->bus = pdev->bus->number;
info->devfn = pdev->devfn;
@@ -2305,6 +2305,15 @@ static int domain_add_dev_info(struct dmar_domain *domain,
pdev->dev.archdata.iommu = info;
spin_unlock_irqrestore(&device_domain_lock, flags);
+ ret = domain_context_mapping(domain, pdev, translation);
+ if (ret) {
+ spin_lock_irqsave(&device_domain_lock, flags);
+ unlink_domain_info(info);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ free_devinfo_mem(info);
+ return ret;
+ }
+
return 0;
}
@@ -3728,10 +3737,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
if (info->segment == pci_domain_nr(pdev->bus) &&
info->bus == pdev->bus->number &&
info->devfn == pdev->devfn) {
- list_del(&info->link);
- list_del(&info->global);
- if (info->dev)
- info->dev->dev.archdata.iommu = NULL;
+ unlink_domain_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
iommu_disable_dev_iotlb(info);
@@ -3786,11 +3792,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
while (!list_empty(&domain->devices)) {
info = list_entry(domain->devices.next,
struct device_domain_info, link);
- list_del(&info->link);
- list_del(&info->global);
- if (info->dev)
- info->dev->dev.archdata.iommu = NULL;
-
+ unlink_domain_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags1);
iommu_disable_dev_iotlb(info);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2198b2dbbcd3..8b9ded88e6f5 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -119,6 +119,7 @@ EXPORT_SYMBOL_GPL(iommu_present);
* iommu_set_fault_handler() - set a fault handler for an iommu domain
* @domain: iommu domain
* @handler: fault handler
+ * @token: user data, will be passed back to the fault handler
*
* This function should be used by IOMMU users which want to be notified
* whenever an IOMMU fault happens.
@@ -127,11 +128,13 @@ EXPORT_SYMBOL_GPL(iommu_present);
* error code otherwise.
*/
void iommu_set_fault_handler(struct iommu_domain *domain,
- iommu_fault_handler_t handler)
+ iommu_fault_handler_t handler,
+ void *token)
{
BUG_ON(!domain);
domain->handler = handler;
+ domain->handler_token = token;
}
EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 6899dcd02dfa..e70ee2b59df9 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -41,11 +41,13 @@
* @pgtable: the page table
* @iommu_dev: an omap iommu device attached to this domain. only a single
* iommu device can be attached for now.
+ * @dev: Device using this domain.
* @lock: domain lock, should be taken when attaching/detaching
*/
struct omap_iommu_domain {
u32 *pgtable;
struct omap_iommu *iommu_dev;
+ struct device *dev;
spinlock_t lock;
};
@@ -1081,6 +1083,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
+ omap_domain->dev = dev;
oiommu->domain = domain;
out:
@@ -1088,19 +1091,16 @@ out:
return ret;
}
-static void omap_iommu_detach_dev(struct iommu_domain *domain,
- struct device *dev)
+static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
+ struct device *dev)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
-
- spin_lock(&omap_domain->lock);
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
/* only a single device is supported per domain for now */
if (omap_domain->iommu_dev != oiommu) {
dev_err(dev, "invalid iommu device\n");
- goto out;
+ return;
}
iopgtable_clear_entry_all(oiommu);
@@ -1108,8 +1108,16 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
omap_iommu_detach(oiommu);
omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
+ omap_domain->dev = NULL;
+}
-out:
+static void omap_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct omap_iommu_domain *omap_domain = domain->priv;
+
+ spin_lock(&omap_domain->lock);
+ _omap_iommu_detach_dev(omap_domain, dev);
spin_unlock(&omap_domain->lock);
}
@@ -1148,13 +1156,19 @@ out:
return -ENOMEM;
}
-/* assume device was already detached */
static void omap_iommu_domain_destroy(struct iommu_domain *domain)
{
struct omap_iommu_domain *omap_domain = domain->priv;
domain->priv = NULL;
+ /*
+ * An iommu device is still attached
+ * (currently, only one device can be attached) ?
+ */
+ if (omap_domain->iommu_dev)
+ _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
+
kfree(omap_domain->pgtable);
kfree(omap_domain);
}
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 779306ee7b16..0c0a37792218 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -29,15 +29,17 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iommu.h>
+#include <linux/of.h>
#include <asm/cacheflush.h>
/* bitmap of the page sizes currently supported */
#define GART_IOMMU_PGSIZES (SZ_4K)
-#define GART_CONFIG 0x24
-#define GART_ENTRY_ADDR 0x28
-#define GART_ENTRY_DATA 0x2c
+#define GART_REG_BASE 0x24
+#define GART_CONFIG (0x24 - GART_REG_BASE)
+#define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
+#define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
#define GART_ENTRY_PHYS_ADDR_VALID (1 << 31)
#define GART_PAGE_SHIFT 12
@@ -158,7 +160,7 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
struct gart_client *client, *c;
int err = 0;
- gart = dev_get_drvdata(dev->parent);
+ gart = gart_handle;
if (!gart)
return -EINVAL;
domain->priv = gart;
@@ -422,6 +424,14 @@ const struct dev_pm_ops tegra_gart_pm_ops = {
.resume = tegra_gart_resume,
};
+#ifdef CONFIG_OF
+static struct of_device_id tegra_gart_of_match[] __devinitdata = {
+ { .compatible = "nvidia,tegra20-gart", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_gart_of_match);
+#endif
+
static struct platform_driver tegra_gart_driver = {
.probe = tegra_gart_probe,
.remove = tegra_gart_remove,
@@ -429,6 +439,7 @@ static struct platform_driver tegra_gart_driver = {
.owner = THIS_MODULE,
.name = "tegra-gart",
.pm = &tegra_gart_pm_ops,
+ .of_match_table = of_match_ptr(tegra_gart_of_match),
},
};
@@ -448,4 +459,5 @@ module_exit(tegra_gart_exit);
MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
+MODULE_ALIAS("platform:tegra-gart");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index eb93c821f592..ecd679043d77 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -733,7 +733,7 @@ static int smmu_iommu_attach_dev(struct iommu_domain *domain,
pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n");
}
- dev_dbg(smmu->dev, "%s is attached\n", dev_name(c->dev));
+ dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev));
return 0;
err_client:
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.h b/drivers/isdn/hardware/mISDN/hfcsusb.h
index cb1231b08f78..4157311d569d 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.h
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.h
@@ -410,6 +410,12 @@ static struct usb_device_id hfcsusb_idtab[] = {
{LED_SCHEME1, {0x88, -64, -32, -16},
"ZyXEL OMNI.NET USB II"}),
},
+ {
+ USB_DEVICE(0x1ae7, 0x0525),
+ .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+ {LED_SCHEME1, {0x88, -64, -32, -16},
+ "X-Tensions USB ISDN TA XC-525"}),
+ },
{ }
};
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ff4b8cfda585..04cb8c88d74b 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -50,6 +50,19 @@ config LEDS_LM3530
controlled manually or using PWM input or using ambient
light automatically.
+config LEDS_LM3533
+ tristate "LED support for LM3533"
+ depends on LEDS_CLASS
+ depends on MFD_LM3533
+ help
+ This option enables support for the LEDs on National Semiconductor /
+ TI LM3533 Lighting Power chips.
+
+ The LEDs can be controlled directly, through PWM input, or by the
+ ambient-light-sensor interface. The chip supports
+ hardware-accelerated blinking with maximum on and off periods of 9.8
+ and 77 seconds respectively.
+
config LEDS_LOCOMO
tristate "LED Support for Locomo device"
depends on LEDS_CLASS
@@ -259,6 +272,14 @@ config LEDS_DA903X
This option enables support for on-chip LED drivers found
on Dialog Semiconductor DA9030/DA9034 PMICs.
+config LEDS_DA9052
+ tristate "Dialog DA9052/DA9053 LEDS"
+ depends on LEDS_CLASS
+ depends on PMIC_DA9052
+ help
+ This option enables support for on-chip LED drivers found
+ on Dialog Semiconductor DA9052-BC and DA9053-AA/Bx PMICs.
+
config LEDS_DAC124S085
tristate "LED Support for DAC124S085 SPI DAC"
depends on LEDS_CLASS
@@ -471,4 +492,12 @@ config LEDS_TRIGGER_DEFAULT_ON
comment "iptables trigger is under Netfilter config (LED target)"
depends on LEDS_TRIGGERS
+config LEDS_TRIGGER_TRANSIENT
+ tristate "LED Transient Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows one time activation of a transient state on
+ GPIO/PWM based hadrware.
+ If unsure, say Y.
+
endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 890481cb09f6..f8958cd6cf6e 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
+obj-$(CONFIG_LEDS_LM3533) += leds-lm3533.o
obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
obj-$(CONFIG_LEDS_PCA9633) += leds-pca9633.o
obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
+obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o
obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
@@ -56,3 +58,4 @@ obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
+obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 5bff8439dc68..8ee92c81aec2 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -44,23 +44,18 @@ static ssize_t led_brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ unsigned long state;
ssize_t ret = -EINVAL;
- char *after;
- unsigned long state = simple_strtoul(buf, &after, 10);
- size_t count = after - buf;
- if (isspace(*after))
- count++;
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
- if (count == size) {
- ret = count;
+ if (state == LED_OFF)
+ led_trigger_remove(led_cdev);
+ led_set_brightness(led_cdev, state);
- if (state == LED_OFF)
- led_trigger_remove(led_cdev);
- led_set_brightness(led_cdev, state);
- }
-
- return ret;
+ return size;
}
static ssize_t led_max_brightness_show(struct device *dev,
diff --git a/drivers/leds/leds-da9052.c b/drivers/leds/leds-da9052.c
new file mode 100644
index 000000000000..58a5244c437e
--- /dev/null
+++ b/drivers/leds/leds-da9052.c
@@ -0,0 +1,214 @@
+/*
+ * LED Driver for Dialog DA9052 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/pdata.h>
+
+#define DA9052_OPENDRAIN_OUTPUT 2
+#define DA9052_SET_HIGH_LVL_OUTPUT (1 << 3)
+#define DA9052_MASK_UPPER_NIBBLE 0xF0
+#define DA9052_MASK_LOWER_NIBBLE 0x0F
+#define DA9052_NIBBLE_SHIFT 4
+#define DA9052_MAX_BRIGHTNESS 0x5f
+
+struct da9052_led {
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct da9052 *da9052;
+ unsigned char led_index;
+ unsigned char id;
+ int brightness;
+};
+
+static unsigned char led_reg[] = {
+ DA9052_LED_CONT_4_REG,
+ DA9052_LED_CONT_5_REG,
+};
+
+static int da9052_set_led_brightness(struct da9052_led *led)
+{
+ u8 val;
+ int error;
+
+ val = (led->brightness & 0x7f) | DA9052_LED_CONT_DIM;
+
+ error = da9052_reg_write(led->da9052, led_reg[led->led_index], val);
+ if (error < 0)
+ dev_err(led->da9052->dev, "Failed to set led brightness, %d\n",
+ error);
+ return error;
+}
+
+static void da9052_led_work(struct work_struct *work)
+{
+ struct da9052_led *led = container_of(work, struct da9052_led, work);
+
+ da9052_set_led_brightness(led);
+}
+
+static void da9052_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct da9052_led *led;
+
+ led = container_of(led_cdev, struct da9052_led, cdev);
+ led->brightness = value;
+ schedule_work(&led->work);
+}
+
+static int da9052_configure_leds(struct da9052 *da9052)
+{
+ int error;
+ unsigned char register_value = DA9052_OPENDRAIN_OUTPUT
+ | DA9052_SET_HIGH_LVL_OUTPUT;
+
+ error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG,
+ DA9052_MASK_LOWER_NIBBLE,
+ register_value);
+
+ if (error < 0) {
+ dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n",
+ error);
+ return error;
+ }
+
+ error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG,
+ DA9052_MASK_UPPER_NIBBLE,
+ register_value << DA9052_NIBBLE_SHIFT);
+ if (error < 0)
+ dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n",
+ error);
+
+ return error;
+}
+
+static int __devinit da9052_led_probe(struct platform_device *pdev)
+{
+ struct da9052_pdata *pdata;
+ struct da9052 *da9052;
+ struct led_platform_data *pled;
+ struct da9052_led *led = NULL;
+ int error = -ENODEV;
+ int i;
+
+ da9052 = dev_get_drvdata(pdev->dev.parent);
+ pdata = da9052->dev->platform_data;
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "No platform data\n");
+ goto err;
+ }
+
+ pled = pdata->pled;
+ if (pled == NULL) {
+ dev_err(&pdev->dev, "No platform data for LED\n");
+ goto err;
+ }
+
+ led = devm_kzalloc(&pdev->dev,
+ sizeof(struct da9052_led) * pled->num_leds,
+ GFP_KERNEL);
+ if (led == NULL) {
+ dev_err(&pdev->dev, "Failed to alloc memory\n");
+ error = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < pled->num_leds; i++) {
+ led[i].cdev.name = pled->leds[i].name;
+ led[i].cdev.brightness_set = da9052_led_set;
+ led[i].cdev.brightness = LED_OFF;
+ led[i].cdev.max_brightness = DA9052_MAX_BRIGHTNESS;
+ led[i].brightness = LED_OFF;
+ led[i].led_index = pled->leds[i].flags;
+ led[i].da9052 = dev_get_drvdata(pdev->dev.parent);
+ INIT_WORK(&led[i].work, da9052_led_work);
+
+ error = led_classdev_register(pdev->dev.parent, &led[i].cdev);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to register led %d\n",
+ led[i].led_index);
+ goto err_register;
+ }
+
+ error = da9052_set_led_brightness(&led[i]);
+ if (error) {
+ dev_err(&pdev->dev, "Unable to init led %d\n",
+ led[i].led_index);
+ continue;
+ }
+ }
+ error = da9052_configure_leds(led->da9052);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to configure GPIO LED%d\n", error);
+ goto err_register;
+ }
+
+ platform_set_drvdata(pdev, led);
+
+ return 0;
+
+err_register:
+ for (i = i - 1; i >= 0; i--) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+err:
+ return error;
+}
+
+static int __devexit da9052_led_remove(struct platform_device *pdev)
+{
+ struct da9052_led *led = platform_get_drvdata(pdev);
+ struct da9052_pdata *pdata;
+ struct da9052 *da9052;
+ struct led_platform_data *pled;
+ int i;
+
+ da9052 = dev_get_drvdata(pdev->dev.parent);
+ pdata = da9052->dev->platform_data;
+ pled = pdata->pled;
+
+ for (i = 0; i < pled->num_leds; i++) {
+ led[i].brightness = 0;
+ da9052_set_led_brightness(&led[i]);
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+
+ return 0;
+}
+
+static struct platform_driver da9052_led_driver = {
+ .driver = {
+ .name = "da9052-leds",
+ .owner = THIS_MODULE,
+ },
+ .probe = da9052_led_probe,
+ .remove = __devexit_p(da9052_led_remove),
+};
+
+module_platform_driver(da9052_led_driver);
+
+MODULE_AUTHOR("Dialog Semiconductor Ltd <dchen@diasemi.com>");
+MODULE_DESCRIPTION("LED driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 968fd5fef4fc..84ba6de8039c 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -113,6 +113,18 @@ struct lm3530_data {
bool enable;
};
+/*
+ * struct lm3530_als_data
+ * @config : value of ALS configuration register
+ * @imp_sel : value of ALS resistor select register
+ * @zone : values of ALS ZB(Zone Boundary) registers
+ */
+struct lm3530_als_data {
+ u8 config;
+ u8 imp_sel;
+ u8 zones[LM3530_ALS_ZB_MAX];
+};
+
static const u8 lm3530_reg[LM3530_REG_MAX] = {
LM3530_GEN_CONFIG,
LM3530_ALS_CONFIG,
@@ -141,29 +153,65 @@ static int lm3530_get_mode_from_str(const char *str)
return -1;
}
+static void lm3530_als_configure(struct lm3530_platform_data *pdata,
+ struct lm3530_als_data *als)
+{
+ int i;
+ u32 als_vmin, als_vmax, als_vstep;
+
+ if (pdata->als_vmax == 0) {
+ pdata->als_vmin = 0;
+ pdata->als_vmax = LM3530_ALS_WINDOW_mV;
+ }
+
+ als_vmin = pdata->als_vmin;
+ als_vmax = pdata->als_vmax;
+
+ if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
+ pdata->als_vmax = als_vmax = als_vmin + LM3530_ALS_WINDOW_mV;
+
+ /* n zone boundary makes n+1 zones */
+ als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
+
+ for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
+ als->zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
+ als_vstep + (i * als_vstep)) * LED_FULL) / 1000;
+
+ als->config =
+ (pdata->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
+ (LM3530_ENABLE_ALS) |
+ (pdata->als_input_mode << LM3530_ALS_SEL_SHIFT);
+
+ als->imp_sel =
+ (pdata->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
+ (pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
+}
+
static int lm3530_init_registers(struct lm3530_data *drvdata)
{
int ret = 0;
int i;
u8 gen_config;
- u8 als_config = 0;
u8 brt_ramp;
- u8 als_imp_sel = 0;
u8 brightness;
u8 reg_val[LM3530_REG_MAX];
- u8 zones[LM3530_ALS_ZB_MAX];
- u32 als_vmin, als_vmax, als_vstep;
struct lm3530_platform_data *pdata = drvdata->pdata;
struct i2c_client *client = drvdata->client;
struct lm3530_pwm_data *pwm = &pdata->pwm_data;
+ struct lm3530_als_data als;
+
+ memset(&als, 0, sizeof(struct lm3530_als_data));
gen_config = (pdata->brt_ramp_law << LM3530_RAMP_LAW_SHIFT) |
((pdata->max_current & 7) << LM3530_MAX_CURR_SHIFT);
switch (drvdata->mode) {
case LM3530_BL_MODE_MANUAL:
+ gen_config |= LM3530_ENABLE_I2C;
+ break;
case LM3530_BL_MODE_ALS:
gen_config |= LM3530_ENABLE_I2C;
+ lm3530_als_configure(pdata, &als);
break;
case LM3530_BL_MODE_PWM:
gen_config |= LM3530_ENABLE_PWM | LM3530_ENABLE_PWM_SIMPLE |
@@ -171,38 +219,6 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
break;
}
- if (drvdata->mode == LM3530_BL_MODE_ALS) {
- if (pdata->als_vmax == 0) {
- pdata->als_vmin = 0;
- pdata->als_vmax = LM3530_ALS_WINDOW_mV;
- }
-
- als_vmin = pdata->als_vmin;
- als_vmax = pdata->als_vmax;
-
- if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
- pdata->als_vmax = als_vmax =
- als_vmin + LM3530_ALS_WINDOW_mV;
-
- /* n zone boundary makes n+1 zones */
- als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
-
- for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
- zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
- als_vstep + (i * als_vstep)) * LED_FULL)
- / 1000;
-
- als_config =
- (pdata->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
- (LM3530_ENABLE_ALS) |
- (pdata->als_input_mode << LM3530_ALS_SEL_SHIFT);
-
- als_imp_sel =
- (pdata->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
- (pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
-
- }
-
brt_ramp = (pdata->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) |
(pdata->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT);
@@ -215,14 +231,14 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
brightness = drvdata->led_dev.max_brightness;
reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */
- reg_val[1] = als_config; /* LM3530_ALS_CONFIG */
+ reg_val[1] = als.config; /* LM3530_ALS_CONFIG */
reg_val[2] = brt_ramp; /* LM3530_BRT_RAMP_RATE */
- reg_val[3] = als_imp_sel; /* LM3530_ALS_IMP_SELECT */
+ reg_val[3] = als.imp_sel; /* LM3530_ALS_IMP_SELECT */
reg_val[4] = brightness; /* LM3530_BRT_CTRL_REG */
- reg_val[5] = zones[0]; /* LM3530_ALS_ZB0_REG */
- reg_val[6] = zones[1]; /* LM3530_ALS_ZB1_REG */
- reg_val[7] = zones[2]; /* LM3530_ALS_ZB2_REG */
- reg_val[8] = zones[3]; /* LM3530_ALS_ZB3_REG */
+ reg_val[5] = als.zones[0]; /* LM3530_ALS_ZB0_REG */
+ reg_val[6] = als.zones[1]; /* LM3530_ALS_ZB1_REG */
+ reg_val[7] = als.zones[2]; /* LM3530_ALS_ZB2_REG */
+ reg_val[8] = als.zones[3]; /* LM3530_ALS_ZB3_REG */
reg_val[9] = LM3530_DEF_ZT_0; /* LM3530_ALS_Z0T_REG */
reg_val[10] = LM3530_DEF_ZT_1; /* LM3530_ALS_Z1T_REG */
reg_val[11] = LM3530_DEF_ZT_2; /* LM3530_ALS_Z2T_REG */
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
new file mode 100644
index 000000000000..f56b6e7ffdac
--- /dev/null
+++ b/drivers/leds/leds-lm3533.c
@@ -0,0 +1,785 @@
+/*
+ * leds-lm3533.c -- LM3533 LED driver
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/mfd/core.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_LVCTRLBANK_MIN 2
+#define LM3533_LVCTRLBANK_MAX 5
+#define LM3533_LVCTRLBANK_COUNT 4
+#define LM3533_RISEFALLTIME_MAX 7
+#define LM3533_ALS_CHANNEL_LV_MIN 1
+#define LM3533_ALS_CHANNEL_LV_MAX 2
+
+#define LM3533_REG_CTRLBANK_BCONF_BASE 0x1b
+#define LM3533_REG_PATTERN_ENABLE 0x28
+#define LM3533_REG_PATTERN_LOW_TIME_BASE 0x71
+#define LM3533_REG_PATTERN_HIGH_TIME_BASE 0x72
+#define LM3533_REG_PATTERN_RISETIME_BASE 0x74
+#define LM3533_REG_PATTERN_FALLTIME_BASE 0x75
+
+#define LM3533_REG_PATTERN_STEP 0x10
+
+#define LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK 0x04
+#define LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK 0x02
+#define LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK 0x01
+
+#define LM3533_LED_FLAG_PATTERN_ENABLE 1
+
+
+struct lm3533_led {
+ struct lm3533 *lm3533;
+ struct lm3533_ctrlbank cb;
+ struct led_classdev cdev;
+ int id;
+
+ struct mutex mutex;
+ unsigned long flags;
+
+ struct work_struct work;
+ u8 new_brightness;
+};
+
+
+static inline struct lm3533_led *to_lm3533_led(struct led_classdev *cdev)
+{
+ return container_of(cdev, struct lm3533_led, cdev);
+}
+
+static inline int lm3533_led_get_ctrlbank_id(struct lm3533_led *led)
+{
+ return led->id + 2;
+}
+
+static inline u8 lm3533_led_get_lv_reg(struct lm3533_led *led, u8 base)
+{
+ return base + led->id;
+}
+
+static inline u8 lm3533_led_get_pattern(struct lm3533_led *led)
+{
+ return led->id;
+}
+
+static inline u8 lm3533_led_get_pattern_reg(struct lm3533_led *led,
+ u8 base)
+{
+ return base + lm3533_led_get_pattern(led) * LM3533_REG_PATTERN_STEP;
+}
+
+static int lm3533_led_pattern_enable(struct lm3533_led *led, int enable)
+{
+ u8 mask;
+ u8 val;
+ int pattern;
+ int state;
+ int ret = 0;
+
+ dev_dbg(led->cdev.dev, "%s - %d\n", __func__, enable);
+
+ mutex_lock(&led->mutex);
+
+ state = test_bit(LM3533_LED_FLAG_PATTERN_ENABLE, &led->flags);
+ if ((enable && state) || (!enable && !state))
+ goto out;
+
+ pattern = lm3533_led_get_pattern(led);
+ mask = 1 << (2 * pattern);
+
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(led->lm3533, LM3533_REG_PATTERN_ENABLE, val, mask);
+ if (ret) {
+ dev_err(led->cdev.dev, "failed to enable pattern %d (%d)\n",
+ pattern, enable);
+ goto out;
+ }
+
+ __change_bit(LM3533_LED_FLAG_PATTERN_ENABLE, &led->flags);
+out:
+ mutex_unlock(&led->mutex);
+
+ return ret;
+}
+
+static void lm3533_led_work(struct work_struct *work)
+{
+ struct lm3533_led *led = container_of(work, struct lm3533_led, work);
+
+ dev_dbg(led->cdev.dev, "%s - %u\n", __func__, led->new_brightness);
+
+ if (led->new_brightness == 0)
+ lm3533_led_pattern_enable(led, 0); /* disable blink */
+
+ lm3533_ctrlbank_set_brightness(&led->cb, led->new_brightness);
+}
+
+static void lm3533_led_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct lm3533_led *led = to_lm3533_led(cdev);
+
+ dev_dbg(led->cdev.dev, "%s - %d\n", __func__, value);
+
+ led->new_brightness = value;
+ schedule_work(&led->work);
+}
+
+static enum led_brightness lm3533_led_get(struct led_classdev *cdev)
+{
+ struct lm3533_led *led = to_lm3533_led(cdev);
+ u8 val;
+ int ret;
+
+ ret = lm3533_ctrlbank_get_brightness(&led->cb, &val);
+ if (ret)
+ return ret;
+
+ dev_dbg(led->cdev.dev, "%s - %u\n", __func__, val);
+
+ return val;
+}
+
+/* Pattern generator defines (delays in us). */
+#define LM3533_LED_DELAY1_VMIN 0x00
+#define LM3533_LED_DELAY2_VMIN 0x3d
+#define LM3533_LED_DELAY3_VMIN 0x80
+
+#define LM3533_LED_DELAY1_VMAX (LM3533_LED_DELAY2_VMIN - 1)
+#define LM3533_LED_DELAY2_VMAX (LM3533_LED_DELAY3_VMIN - 1)
+#define LM3533_LED_DELAY3_VMAX 0xff
+
+#define LM3533_LED_DELAY1_TMIN 16384U
+#define LM3533_LED_DELAY2_TMIN 1130496U
+#define LM3533_LED_DELAY3_TMIN 10305536U
+
+#define LM3533_LED_DELAY1_TMAX 999424U
+#define LM3533_LED_DELAY2_TMAX 9781248U
+#define LM3533_LED_DELAY3_TMAX 76890112U
+
+/* t_step = (t_max - t_min) / (v_max - v_min) */
+#define LM3533_LED_DELAY1_TSTEP 16384
+#define LM3533_LED_DELAY2_TSTEP 131072
+#define LM3533_LED_DELAY3_TSTEP 524288
+
+/* Delay limits for hardware accelerated blinking (in ms). */
+#define LM3533_LED_DELAY_ON_MAX \
+ ((LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY2_TSTEP / 2) / 1000)
+#define LM3533_LED_DELAY_OFF_MAX \
+ ((LM3533_LED_DELAY3_TMAX + LM3533_LED_DELAY3_TSTEP / 2) / 1000)
+
+/*
+ * Returns linear map of *t from [t_min,t_max] to [v_min,v_max] with a step
+ * size of t_step, where
+ *
+ * t_step = (t_max - t_min) / (v_max - v_min)
+ *
+ * and updates *t to reflect the mapped value.
+ */
+static u8 time_to_val(unsigned *t, unsigned t_min, unsigned t_step,
+ u8 v_min, u8 v_max)
+{
+ unsigned val;
+
+ val = (*t + t_step / 2 - t_min) / t_step + v_min;
+
+ *t = t_step * (val - v_min) + t_min;
+
+ return (u8)val;
+}
+
+/*
+ * Returns time code corresponding to *delay (in ms) and updates *delay to
+ * reflect actual hardware delay.
+ *
+ * Hardware supports 256 discrete delay times, divided into three groups with
+ * the following ranges and step-sizes:
+ *
+ * [ 16, 999] [0x00, 0x3e] step 16 ms
+ * [ 1130, 9781] [0x3d, 0x7f] step 131 ms
+ * [10306, 76890] [0x80, 0xff] step 524 ms
+ *
+ * Note that delay group 3 is only available for delay_off.
+ */
+static u8 lm3533_led_get_hw_delay(unsigned *delay)
+{
+ unsigned t;
+ u8 val;
+
+ t = *delay * 1000;
+
+ if (t >= (LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY3_TMIN) / 2) {
+ t = clamp(t, LM3533_LED_DELAY3_TMIN, LM3533_LED_DELAY3_TMAX);
+ val = time_to_val(&t, LM3533_LED_DELAY3_TMIN,
+ LM3533_LED_DELAY3_TSTEP,
+ LM3533_LED_DELAY3_VMIN,
+ LM3533_LED_DELAY3_VMAX);
+ } else if (t >= (LM3533_LED_DELAY1_TMAX + LM3533_LED_DELAY2_TMIN) / 2) {
+ t = clamp(t, LM3533_LED_DELAY2_TMIN, LM3533_LED_DELAY2_TMAX);
+ val = time_to_val(&t, LM3533_LED_DELAY2_TMIN,
+ LM3533_LED_DELAY2_TSTEP,
+ LM3533_LED_DELAY2_VMIN,
+ LM3533_LED_DELAY2_VMAX);
+ } else {
+ t = clamp(t, LM3533_LED_DELAY1_TMIN, LM3533_LED_DELAY1_TMAX);
+ val = time_to_val(&t, LM3533_LED_DELAY1_TMIN,
+ LM3533_LED_DELAY1_TSTEP,
+ LM3533_LED_DELAY1_VMIN,
+ LM3533_LED_DELAY1_VMAX);
+ }
+
+ *delay = (t + 500) / 1000;
+
+ return val;
+}
+
+/*
+ * Set delay register base to *delay (in ms) and update *delay to reflect
+ * actual hardware delay used.
+ */
+static u8 lm3533_led_delay_set(struct lm3533_led *led, u8 base,
+ unsigned long *delay)
+{
+ unsigned t;
+ u8 val;
+ u8 reg;
+ int ret;
+
+ t = (unsigned)*delay;
+
+ /* Delay group 3 is only available for low time (delay off). */
+ if (base != LM3533_REG_PATTERN_LOW_TIME_BASE)
+ t = min(t, LM3533_LED_DELAY2_TMAX / 1000);
+
+ val = lm3533_led_get_hw_delay(&t);
+
+ dev_dbg(led->cdev.dev, "%s - %lu: %u (0x%02x)\n", __func__,
+ *delay, t, val);
+ reg = lm3533_led_get_pattern_reg(led, base);
+ ret = lm3533_write(led->lm3533, reg, val);
+ if (ret)
+ dev_err(led->cdev.dev, "failed to set delay (%02x)\n", reg);
+
+ *delay = t;
+
+ return ret;
+}
+
+static int lm3533_led_delay_on_set(struct lm3533_led *led, unsigned long *t)
+{
+ return lm3533_led_delay_set(led, LM3533_REG_PATTERN_HIGH_TIME_BASE, t);
+}
+
+static int lm3533_led_delay_off_set(struct lm3533_led *led, unsigned long *t)
+{
+ return lm3533_led_delay_set(led, LM3533_REG_PATTERN_LOW_TIME_BASE, t);
+}
+
+static int lm3533_led_blink_set(struct led_classdev *cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct lm3533_led *led = to_lm3533_led(cdev);
+ int ret;
+
+ dev_dbg(led->cdev.dev, "%s - on = %lu, off = %lu\n", __func__,
+ *delay_on, *delay_off);
+
+ if (*delay_on > LM3533_LED_DELAY_ON_MAX ||
+ *delay_off > LM3533_LED_DELAY_OFF_MAX)
+ return -EINVAL;
+
+ if (*delay_on == 0 && *delay_off == 0) {
+ *delay_on = 500;
+ *delay_off = 500;
+ }
+
+ ret = lm3533_led_delay_on_set(led, delay_on);
+ if (ret)
+ return ret;
+
+ ret = lm3533_led_delay_off_set(led, delay_off);
+ if (ret)
+ return ret;
+
+ return lm3533_led_pattern_enable(led, 1);
+}
+
+static ssize_t show_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", led->id);
+}
+
+/*
+ * Pattern generator rise/fall times:
+ *
+ * 0 - 2048 us (default)
+ * 1 - 262 ms
+ * 2 - 524 ms
+ * 3 - 1.049 s
+ * 4 - 2.097 s
+ * 5 - 4.194 s
+ * 6 - 8.389 s
+ * 7 - 16.78 s
+ */
+static ssize_t show_risefalltime(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, u8 base)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ ssize_t ret;
+ u8 reg;
+ u8 val;
+
+ reg = lm3533_led_get_pattern_reg(led, base);
+ ret = lm3533_read(led->lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%x\n", val);
+}
+
+static ssize_t show_risetime(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return show_risefalltime(dev, attr, buf,
+ LM3533_REG_PATTERN_RISETIME_BASE);
+}
+
+static ssize_t show_falltime(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return show_risefalltime(dev, attr, buf,
+ LM3533_REG_PATTERN_FALLTIME_BASE);
+}
+
+static ssize_t store_risefalltime(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, u8 base)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ u8 val;
+ u8 reg;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val) || val > LM3533_RISEFALLTIME_MAX)
+ return -EINVAL;
+
+ reg = lm3533_led_get_pattern_reg(led, base);
+ ret = lm3533_write(led->lm3533, reg, val);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t store_risetime(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_risefalltime(dev, attr, buf, len,
+ LM3533_REG_PATTERN_RISETIME_BASE);
+}
+
+static ssize_t store_falltime(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_risefalltime(dev, attr, buf, len,
+ LM3533_REG_PATTERN_FALLTIME_BASE);
+}
+
+static ssize_t show_als_channel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ unsigned channel;
+ u8 reg;
+ u8 val;
+ int ret;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ ret = lm3533_read(led->lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ channel = (val & LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK) + 1;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", channel);
+}
+
+static ssize_t store_als_channel(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ unsigned channel;
+ u8 reg;
+ u8 val;
+ u8 mask;
+ int ret;
+
+ if (kstrtouint(buf, 0, &channel))
+ return -EINVAL;
+
+ if (channel < LM3533_ALS_CHANNEL_LV_MIN ||
+ channel > LM3533_ALS_CHANNEL_LV_MAX)
+ return -EINVAL;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ mask = LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK;
+ val = channel - 1;
+
+ ret = lm3533_update(led->lm3533, reg, val, mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_als_en(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ bool enable;
+ u8 reg;
+ u8 val;
+ int ret;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ ret = lm3533_read(led->lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ enable = val & LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", enable);
+}
+
+static ssize_t store_als_en(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ unsigned enable;
+ u8 reg;
+ u8 mask;
+ u8 val;
+ int ret;
+
+ if (kstrtouint(buf, 0, &enable))
+ return -EINVAL;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ mask = LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK;
+
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(led->lm3533, reg, val, mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_linear(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ u8 reg;
+ u8 val;
+ int linear;
+ int ret;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ ret = lm3533_read(led->lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ if (val & LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK)
+ linear = 1;
+ else
+ linear = 0;
+
+ return scnprintf(buf, PAGE_SIZE, "%x\n", linear);
+}
+
+static ssize_t store_linear(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ unsigned long linear;
+ u8 reg;
+ u8 mask;
+ u8 val;
+ int ret;
+
+ if (kstrtoul(buf, 0, &linear))
+ return -EINVAL;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ mask = LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK;
+
+ if (linear)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(led->lm3533, reg, val, mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_pwm(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ u8 val;
+ int ret;
+
+ ret = lm3533_ctrlbank_get_pwm(&led->cb, &val);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ u8 val;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val))
+ return -EINVAL;
+
+ ret = lm3533_ctrlbank_set_pwm(&led->cb, val);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static LM3533_ATTR_RW(als_channel);
+static LM3533_ATTR_RW(als_en);
+static LM3533_ATTR_RW(falltime);
+static LM3533_ATTR_RO(id);
+static LM3533_ATTR_RW(linear);
+static LM3533_ATTR_RW(pwm);
+static LM3533_ATTR_RW(risetime);
+
+static struct attribute *lm3533_led_attributes[] = {
+ &dev_attr_als_channel.attr,
+ &dev_attr_als_en.attr,
+ &dev_attr_falltime.attr,
+ &dev_attr_id.attr,
+ &dev_attr_linear.attr,
+ &dev_attr_pwm.attr,
+ &dev_attr_risetime.attr,
+ NULL,
+};
+
+static umode_t lm3533_led_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ umode_t mode = attr->mode;
+
+ if (attr == &dev_attr_als_channel.attr ||
+ attr == &dev_attr_als_en.attr) {
+ if (!led->lm3533->have_als)
+ mode = 0;
+ }
+
+ return mode;
+};
+
+static struct attribute_group lm3533_led_attribute_group = {
+ .is_visible = lm3533_led_attr_is_visible,
+ .attrs = lm3533_led_attributes
+};
+
+static int __devinit lm3533_led_setup(struct lm3533_led *led,
+ struct lm3533_led_platform_data *pdata)
+{
+ int ret;
+
+ ret = lm3533_ctrlbank_set_max_current(&led->cb, pdata->max_current);
+ if (ret)
+ return ret;
+
+ return lm3533_ctrlbank_set_pwm(&led->cb, pdata->pwm);
+}
+
+static int __devinit lm3533_led_probe(struct platform_device *pdev)
+{
+ struct lm3533 *lm3533;
+ struct lm3533_led_platform_data *pdata;
+ struct lm3533_led *led;
+ int ret;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533 = dev_get_drvdata(pdev->dev.parent);
+ if (!lm3533)
+ return -EINVAL;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ if (pdev->id < 0 || pdev->id >= LM3533_LVCTRLBANK_COUNT) {
+ dev_err(&pdev->dev, "illegal LED id %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->lm3533 = lm3533;
+ led->cdev.name = pdata->name;
+ led->cdev.default_trigger = pdata->default_trigger;
+ led->cdev.brightness_set = lm3533_led_set;
+ led->cdev.brightness_get = lm3533_led_get;
+ led->cdev.blink_set = lm3533_led_blink_set;
+ led->cdev.brightness = LED_OFF;
+ led->id = pdev->id;
+
+ mutex_init(&led->mutex);
+ INIT_WORK(&led->work, lm3533_led_work);
+
+ /* The class framework makes a callback to get brightness during
+ * registration so use parent device (for error reporting) until
+ * registered.
+ */
+ led->cb.lm3533 = lm3533;
+ led->cb.id = lm3533_led_get_ctrlbank_id(led);
+ led->cb.dev = lm3533->dev;
+
+ platform_set_drvdata(pdev, led);
+
+ ret = led_classdev_register(pdev->dev.parent, &led->cdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id);
+ return ret;
+ }
+
+ led->cb.dev = led->cdev.dev;
+
+ ret = sysfs_create_group(&led->cdev.dev->kobj,
+ &lm3533_led_attribute_group);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to create sysfs attributes\n");
+ goto err_unregister;
+ }
+
+ ret = lm3533_led_setup(led, pdata);
+ if (ret)
+ goto err_sysfs_remove;
+
+ ret = lm3533_ctrlbank_enable(&led->cb);
+ if (ret)
+ goto err_sysfs_remove;
+
+ return 0;
+
+err_sysfs_remove:
+ sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
+err_unregister:
+ led_classdev_unregister(&led->cdev);
+ flush_work_sync(&led->work);
+
+ return ret;
+}
+
+static int __devexit lm3533_led_remove(struct platform_device *pdev)
+{
+ struct lm3533_led *led = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533_ctrlbank_disable(&led->cb);
+ sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
+ led_classdev_unregister(&led->cdev);
+ flush_work_sync(&led->work);
+
+ return 0;
+}
+
+static void lm3533_led_shutdown(struct platform_device *pdev)
+{
+
+ struct lm3533_led *led = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533_ctrlbank_disable(&led->cb);
+ lm3533_led_set(&led->cdev, LED_OFF); /* disable blink */
+ flush_work_sync(&led->work);
+}
+
+static struct platform_driver lm3533_led_driver = {
+ .driver = {
+ .name = "lm3533-leds",
+ .owner = THIS_MODULE,
+ },
+ .probe = lm3533_led_probe,
+ .remove = __devexit_p(lm3533_led_remove),
+ .shutdown = lm3533_led_shutdown,
+};
+module_platform_driver(lm3533_led_driver);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lm3533-leds");
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 410a723b8691..23815624f35e 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -193,9 +193,14 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
/* move current engine to direct mode and remember the state */
ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
+ if (ret)
+ return ret;
+
/* Mode change requires min 500 us delay. 1 - 2 ms with margin */
usleep_range(1000, 2000);
- ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode);
+ ret = lp5521_read(client, LP5521_REG_OP_MODE, &mode);
+ if (ret)
+ return ret;
/* For loading, all the engines to load mode */
lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
@@ -211,8 +216,7 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
LP5521_PROG_MEM_SIZE,
pattern);
- ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode);
- return ret;
+ return lp5521_write(client, LP5521_REG_OP_MODE, mode);
}
static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
@@ -785,7 +789,7 @@ static int __devinit lp5521_probe(struct i2c_client *client,
* LP5521_REG_ENABLE register will not have any effect - strange!
*/
ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
- if (buf != LP5521_REG_R_CURR_DEFAULT) {
+ if (ret || buf != LP5521_REG_R_CURR_DEFAULT) {
dev_err(&client->dev, "error in resetting chip\n");
goto fail2;
}
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index 8bc491541550..4cc6a2e3df34 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
return -EINVAL;
}
- led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+ led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
if (led == NULL) {
dev_err(&pdev->dev, "failed to alloc memory\n");
return -ENOMEM;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index dcc3bc3d38db..5f462dbf0dbb 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -101,11 +101,16 @@ static const struct i2c_device_id pca955x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca955x_id);
-struct pca955x_led {
+struct pca955x {
+ struct mutex lock;
+ struct pca955x_led *leds;
struct pca955x_chipdef *chipdef;
struct i2c_client *client;
+};
+
+struct pca955x_led {
+ struct pca955x *pca955x;
struct work_struct work;
- spinlock_t lock;
enum led_brightness brightness;
struct led_classdev led_cdev;
int led_num; /* 0 .. 15 potentially */
@@ -140,7 +145,7 @@ static inline u8 pca955x_ledsel(u8 oldval, int led_num, int state)
*/
static void pca955x_write_psc(struct i2c_client *client, int n, u8 val)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
i2c_smbus_write_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 2*n,
@@ -156,7 +161,7 @@ static void pca955x_write_psc(struct i2c_client *client, int n, u8 val)
*/
static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
i2c_smbus_write_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + 2*n,
@@ -169,7 +174,7 @@ static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
*/
static void pca955x_write_ls(struct i2c_client *client, int n, u8 val)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
i2c_smbus_write_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n,
@@ -182,7 +187,7 @@ static void pca955x_write_ls(struct i2c_client *client, int n, u8 val)
*/
static u8 pca955x_read_ls(struct i2c_client *client, int n)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
return (u8) i2c_smbus_read_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n);
@@ -190,18 +195,23 @@ static u8 pca955x_read_ls(struct i2c_client *client, int n)
static void pca955x_led_work(struct work_struct *work)
{
- struct pca955x_led *pca955x;
+ struct pca955x_led *pca955x_led;
+ struct pca955x *pca955x;
u8 ls;
int chip_ls; /* which LSx to use (0-3 potentially) */
int ls_led; /* which set of bits within LSx to use (0-3) */
- pca955x = container_of(work, struct pca955x_led, work);
- chip_ls = pca955x->led_num / 4;
- ls_led = pca955x->led_num % 4;
+ pca955x_led = container_of(work, struct pca955x_led, work);
+ pca955x = pca955x_led->pca955x;
+
+ chip_ls = pca955x_led->led_num / 4;
+ ls_led = pca955x_led->led_num % 4;
+
+ mutex_lock(&pca955x->lock);
ls = pca955x_read_ls(pca955x->client, chip_ls);
- switch (pca955x->brightness) {
+ switch (pca955x_led->brightness) {
case LED_FULL:
ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON);
break;
@@ -219,12 +229,15 @@ static void pca955x_led_work(struct work_struct *work)
* OFF, HALF, or FULL. But, this is probably better than
* just turning off for all other values.
*/
- pca955x_write_pwm(pca955x->client, 1, 255-pca955x->brightness);
+ pca955x_write_pwm(pca955x->client, 1,
+ 255 - pca955x_led->brightness);
ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1);
break;
}
pca955x_write_ls(pca955x->client, chip_ls, ls);
+
+ mutex_unlock(&pca955x->lock);
}
static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness value)
@@ -233,7 +246,6 @@ static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness v
pca955x = container_of(led_cdev, struct pca955x_led, led_cdev);
- spin_lock(&pca955x->lock);
pca955x->brightness = value;
/*
@@ -241,14 +253,13 @@ static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness v
* can sleep.
*/
schedule_work(&pca955x->work);
-
- spin_unlock(&pca955x->lock);
}
static int __devinit pca955x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct pca955x_led *pca955x;
+ struct pca955x *pca955x;
+ struct pca955x_led *pca955x_led;
struct pca955x_chipdef *chip;
struct i2c_adapter *adapter;
struct led_platform_data *pdata;
@@ -282,39 +293,48 @@ static int __devinit pca955x_probe(struct i2c_client *client,
}
}
- pca955x = kzalloc(sizeof(*pca955x) * chip->bits, GFP_KERNEL);
+ pca955x = kzalloc(sizeof(*pca955x), GFP_KERNEL);
if (!pca955x)
return -ENOMEM;
+ pca955x->leds = kzalloc(sizeof(*pca955x_led) * chip->bits, GFP_KERNEL);
+ if (!pca955x->leds) {
+ err = -ENOMEM;
+ goto exit_nomem;
+ }
+
i2c_set_clientdata(client, pca955x);
+ mutex_init(&pca955x->lock);
+ pca955x->client = client;
+ pca955x->chipdef = chip;
+
for (i = 0; i < chip->bits; i++) {
- pca955x[i].chipdef = chip;
- pca955x[i].client = client;
- pca955x[i].led_num = i;
+ pca955x_led = &pca955x->leds[i];
+ pca955x_led->led_num = i;
+ pca955x_led->pca955x = pca955x;
/* Platform data can specify LED names and default triggers */
if (pdata) {
if (pdata->leds[i].name)
- snprintf(pca955x[i].name,
- sizeof(pca955x[i].name), "pca955x:%s",
- pdata->leds[i].name);
+ snprintf(pca955x_led->name,
+ sizeof(pca955x_led->name), "pca955x:%s",
+ pdata->leds[i].name);
if (pdata->leds[i].default_trigger)
- pca955x[i].led_cdev.default_trigger =
+ pca955x_led->led_cdev.default_trigger =
pdata->leds[i].default_trigger;
} else {
- snprintf(pca955x[i].name, sizeof(pca955x[i].name),
+ snprintf(pca955x_led->name, sizeof(pca955x_led->name),
"pca955x:%d", i);
}
- spin_lock_init(&pca955x[i].lock);
-
- pca955x[i].led_cdev.name = pca955x[i].name;
- pca955x[i].led_cdev.brightness_set = pca955x_led_set;
+ pca955x_led->led_cdev.name = pca955x_led->name;
+ pca955x_led->led_cdev.brightness_set = pca955x_led_set;
- INIT_WORK(&pca955x[i].work, pca955x_led_work);
+ INIT_WORK(&pca955x_led->work, pca955x_led_work);
- err = led_classdev_register(&client->dev, &pca955x[i].led_cdev);
+ err = led_classdev_register(&client->dev,
+ &pca955x_led->led_cdev);
if (err < 0)
goto exit;
}
@@ -337,10 +357,12 @@ static int __devinit pca955x_probe(struct i2c_client *client,
exit:
while (i--) {
- led_classdev_unregister(&pca955x[i].led_cdev);
- cancel_work_sync(&pca955x[i].work);
+ led_classdev_unregister(&pca955x->leds[i].led_cdev);
+ cancel_work_sync(&pca955x->leds[i].work);
}
+ kfree(pca955x->leds);
+exit_nomem:
kfree(pca955x);
return err;
@@ -348,14 +370,15 @@ exit:
static int __devexit pca955x_remove(struct i2c_client *client)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
int i;
for (i = 0; i < pca955x->chipdef->bits; i++) {
- led_classdev_unregister(&pca955x[i].led_cdev);
- cancel_work_sync(&pca955x[i].work);
+ led_classdev_unregister(&pca955x->leds[i].led_cdev);
+ cancel_work_sync(&pca955x->leds[i].work);
}
+ kfree(pca955x->leds);
kfree(pca955x);
return 0;
diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c
index 2b513a2ad7de..e2726867c5d4 100644
--- a/drivers/leds/ledtrig-backlight.c
+++ b/drivers/leds/ledtrig-backlight.c
@@ -120,6 +120,7 @@ static void bl_trig_activate(struct led_classdev *led)
ret = fb_register_client(&n->notifier);
if (ret)
dev_err(led->dev, "unable to register backlight trigger\n");
+ led->activated = true;
return;
@@ -133,10 +134,11 @@ static void bl_trig_deactivate(struct led_classdev *led)
struct bl_trig_notifier *n =
(struct bl_trig_notifier *) led->trigger_data;
- if (n) {
+ if (led->activated) {
device_remove_file(led->dev, &dev_attr_inverted);
fb_unregister_client(&n->notifier);
kfree(n);
+ led->activated = false;
}
}
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index ecc4bf3f37a9..f057c101b896 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -200,6 +200,7 @@ static void gpio_trig_activate(struct led_classdev *led)
gpio_data->led = led;
led->trigger_data = gpio_data;
INIT_WORK(&gpio_data->work, gpio_trig_work);
+ led->activated = true;
return;
@@ -217,7 +218,7 @@ static void gpio_trig_deactivate(struct led_classdev *led)
{
struct gpio_trig_data *gpio_data = led->trigger_data;
- if (gpio_data) {
+ if (led->activated) {
device_remove_file(led->dev, &dev_attr_gpio);
device_remove_file(led->dev, &dev_attr_inverted);
device_remove_file(led->dev, &dev_attr_desired_brightness);
@@ -225,6 +226,7 @@ static void gpio_trig_deactivate(struct led_classdev *led)
if (gpio_data->gpio != 0)
free_irq(gpio_to_irq(gpio_data->gpio), led);
kfree(gpio_data);
+ led->activated = false;
}
}
diff --git a/drivers/leds/ledtrig-heartbeat.c b/drivers/leds/ledtrig-heartbeat.c
index 759c0bba4a8f..41dc76db4311 100644
--- a/drivers/leds/ledtrig-heartbeat.c
+++ b/drivers/leds/ledtrig-heartbeat.c
@@ -18,6 +18,7 @@
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/leds.h>
+#include <linux/reboot.h>
#include "leds.h"
struct heartbeat_trig_data {
@@ -83,15 +84,17 @@ static void heartbeat_trig_activate(struct led_classdev *led_cdev)
led_heartbeat_function, (unsigned long) led_cdev);
heartbeat_data->phase = 0;
led_heartbeat_function(heartbeat_data->timer.data);
+ led_cdev->activated = true;
}
static void heartbeat_trig_deactivate(struct led_classdev *led_cdev)
{
struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
- if (heartbeat_data) {
+ if (led_cdev->activated) {
del_timer_sync(&heartbeat_data->timer);
kfree(heartbeat_data);
+ led_cdev->activated = false;
}
}
@@ -101,13 +104,38 @@ static struct led_trigger heartbeat_led_trigger = {
.deactivate = heartbeat_trig_deactivate,
};
+static int heartbeat_reboot_notifier(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ led_trigger_unregister(&heartbeat_led_trigger);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block heartbeat_reboot_nb = {
+ .notifier_call = heartbeat_reboot_notifier,
+};
+
+static struct notifier_block heartbeat_panic_nb = {
+ .notifier_call = heartbeat_reboot_notifier,
+};
+
static int __init heartbeat_trig_init(void)
{
- return led_trigger_register(&heartbeat_led_trigger);
+ int rc = led_trigger_register(&heartbeat_led_trigger);
+
+ if (!rc) {
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &heartbeat_panic_nb);
+ register_reboot_notifier(&heartbeat_reboot_nb);
+ }
+ return rc;
}
static void __exit heartbeat_trig_exit(void)
{
+ unregister_reboot_notifier(&heartbeat_reboot_nb);
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &heartbeat_panic_nb);
led_trigger_unregister(&heartbeat_led_trigger);
}
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 328c64c0841c..9010f7abaf2c 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -31,21 +31,17 @@ static ssize_t led_delay_on_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- int ret = -EINVAL;
- char *after;
- unsigned long state = simple_strtoul(buf, &after, 10);
- size_t count = after - buf;
-
- if (isspace(*after))
- count++;
-
- if (count == size) {
- led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
- led_cdev->blink_delay_on = state;
- ret = count;
- }
+ unsigned long state;
+ ssize_t ret = -EINVAL;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
- return ret;
+ led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
+ led_cdev->blink_delay_on = state;
+
+ return size;
}
static ssize_t led_delay_off_show(struct device *dev,
@@ -60,21 +56,17 @@ static ssize_t led_delay_off_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- int ret = -EINVAL;
- char *after;
- unsigned long state = simple_strtoul(buf, &after, 10);
- size_t count = after - buf;
-
- if (isspace(*after))
- count++;
-
- if (count == size) {
- led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
- led_cdev->blink_delay_off = state;
- ret = count;
- }
+ unsigned long state;
+ ssize_t ret = -EINVAL;
- return ret;
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
+ led_cdev->blink_delay_off = state;
+
+ return size;
}
static DEVICE_ATTR(delay_on, 0644, led_delay_on_show, led_delay_on_store);
@@ -95,8 +87,7 @@ static void timer_trig_activate(struct led_classdev *led_cdev)
led_blink_set(led_cdev, &led_cdev->blink_delay_on,
&led_cdev->blink_delay_off);
-
- led_cdev->trigger_data = (void *)1;
+ led_cdev->activated = true;
return;
@@ -106,9 +97,10 @@ err_out_delayon:
static void timer_trig_deactivate(struct led_classdev *led_cdev)
{
- if (led_cdev->trigger_data) {
+ if (led_cdev->activated) {
device_remove_file(led_cdev->dev, &dev_attr_delay_on);
device_remove_file(led_cdev->dev, &dev_attr_delay_off);
+ led_cdev->activated = false;
}
/* Stop blinking */
diff --git a/drivers/leds/ledtrig-transient.c b/drivers/leds/ledtrig-transient.c
new file mode 100644
index 000000000000..83179f435e1e
--- /dev/null
+++ b/drivers/leds/ledtrig-transient.c
@@ -0,0 +1,237 @@
+/*
+ * LED Kernel Transient Trigger
+ *
+ * Copyright (C) 2012 Shuah Khan <shuahkhan@gmail.com>
+ *
+ * Based on Richard Purdie's ledtrig-timer.c and Atsushi Nemoto's
+ * ledtrig-heartbeat.c
+ * Design and use-case input from Jonas Bonn <jonas@southpole.se> and
+ * Neil Brown <neilb@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+/*
+ * Transient trigger allows one shot timer activation. Please refer to
+ * Documentation/leds/ledtrig-transient.txt for details
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/leds.h>
+#include "leds.h"
+
+struct transient_trig_data {
+ int activate;
+ int state;
+ int restore_state;
+ unsigned long duration;
+ struct timer_list timer;
+};
+
+static void transient_timer_function(unsigned long data)
+{
+ struct led_classdev *led_cdev = (struct led_classdev *) data;
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+ transient_data->activate = 0;
+ led_set_brightness(led_cdev, transient_data->restore_state);
+}
+
+static ssize_t transient_activate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+ return sprintf(buf, "%d\n", transient_data->activate);
+}
+
+static ssize_t transient_activate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ unsigned long state;
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ if (state != 1 && state != 0)
+ return -EINVAL;
+
+ /* cancel the running timer */
+ if (state == 0 && transient_data->activate == 1) {
+ del_timer(&transient_data->timer);
+ transient_data->activate = state;
+ led_set_brightness(led_cdev, transient_data->restore_state);
+ return size;
+ }
+
+ /* start timer if there is no active timer */
+ if (state == 1 && transient_data->activate == 0 &&
+ transient_data->duration != 0) {
+ transient_data->activate = state;
+ led_set_brightness(led_cdev, transient_data->state);
+ transient_data->restore_state =
+ (transient_data->state == LED_FULL) ? LED_OFF : LED_FULL;
+ mod_timer(&transient_data->timer,
+ jiffies + transient_data->duration);
+ }
+
+ /* state == 0 && transient_data->activate == 0
+ timer is not active - just return */
+ /* state == 1 && transient_data->activate == 1
+ timer is already active - just return */
+
+ return size;
+}
+
+static ssize_t transient_duration_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+ return sprintf(buf, "%lu\n", transient_data->duration);
+}
+
+static ssize_t transient_duration_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ unsigned long state;
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ transient_data->duration = state;
+ return size;
+}
+
+static ssize_t transient_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ int state;
+
+ state = (transient_data->state == LED_FULL) ? 1 : 0;
+ return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t transient_state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ unsigned long state;
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ if (state != 1 && state != 0)
+ return -EINVAL;
+
+ transient_data->state = (state == 1) ? LED_FULL : LED_OFF;
+ return size;
+}
+
+static DEVICE_ATTR(activate, 0644, transient_activate_show,
+ transient_activate_store);
+static DEVICE_ATTR(duration, 0644, transient_duration_show,
+ transient_duration_store);
+static DEVICE_ATTR(state, 0644, transient_state_show, transient_state_store);
+
+static void transient_trig_activate(struct led_classdev *led_cdev)
+{
+ int rc;
+ struct transient_trig_data *tdata;
+
+ tdata = kzalloc(sizeof(struct transient_trig_data), GFP_KERNEL);
+ if (!tdata) {
+ dev_err(led_cdev->dev,
+ "unable to allocate transient trigger\n");
+ return;
+ }
+ led_cdev->trigger_data = tdata;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_activate);
+ if (rc)
+ goto err_out;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_duration);
+ if (rc)
+ goto err_out_duration;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_state);
+ if (rc)
+ goto err_out_state;
+
+ setup_timer(&tdata->timer, transient_timer_function,
+ (unsigned long) led_cdev);
+ led_cdev->activated = true;
+
+ return;
+
+err_out_state:
+ device_remove_file(led_cdev->dev, &dev_attr_duration);
+err_out_duration:
+ device_remove_file(led_cdev->dev, &dev_attr_activate);
+err_out:
+ dev_err(led_cdev->dev, "unable to register transient trigger\n");
+ led_cdev->trigger_data = NULL;
+ kfree(tdata);
+}
+
+static void transient_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+ if (led_cdev->activated) {
+ del_timer_sync(&transient_data->timer);
+ led_set_brightness(led_cdev, transient_data->restore_state);
+ device_remove_file(led_cdev->dev, &dev_attr_activate);
+ device_remove_file(led_cdev->dev, &dev_attr_duration);
+ device_remove_file(led_cdev->dev, &dev_attr_state);
+ led_cdev->trigger_data = NULL;
+ led_cdev->activated = false;
+ kfree(transient_data);
+ }
+}
+
+static struct led_trigger transient_trigger = {
+ .name = "transient",
+ .activate = transient_trig_activate,
+ .deactivate = transient_trig_deactivate,
+};
+
+static int __init transient_trig_init(void)
+{
+ return led_trigger_register(&transient_trigger);
+}
+
+static void __exit transient_trig_exit(void)
+{
+ led_trigger_unregister(&transient_trigger);
+}
+
+module_init(transient_trig_init);
+module_exit(transient_trig_exit);
+
+MODULE_AUTHOR("Shuah Khan <shuahkhan@gmail.com>");
+MODULE_DESCRIPTION("Transient LED trigger");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index 71f8e018e564..7d42c11c8684 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -198,7 +198,6 @@ static int fops_open(struct file *file)
struct saa7146_dev *dev = video_drvdata(file);
struct saa7146_fh *fh = NULL;
int result = 0;
-
enum v4l2_buf_type type;
DEB_EE("file:%p, dev:%s\n", file, video_device_node_name(vdev));
@@ -227,11 +226,12 @@ static int fops_open(struct file *file)
goto out;
}
- file->private_data = fh;
+ v4l2_fh_init(&fh->fh, vdev);
+
+ file->private_data = &fh->fh;
fh->dev = dev;
- fh->type = type;
- if( fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ if (vdev->vfl_type == VFL_TYPE_VBI) {
DEB_S("initializing vbi...\n");
if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
result = saa7146_vbi_uops.open(dev,file);
@@ -252,6 +252,7 @@ static int fops_open(struct file *file)
}
result = 0;
+ v4l2_fh_add(&fh->fh);
out:
if (fh && result != 0) {
kfree(fh);
@@ -263,6 +264,7 @@ out:
static int fops_release(struct file *file)
{
+ struct video_device *vdev = video_devdata(file);
struct saa7146_fh *fh = file->private_data;
struct saa7146_dev *dev = fh->dev;
@@ -271,7 +273,7 @@ static int fops_release(struct file *file)
if (mutex_lock_interruptible(&saa7146_devices_lock))
return -ERESTARTSYS;
- if( fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ if (vdev->vfl_type == VFL_TYPE_VBI) {
if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
saa7146_vbi_uops.release(dev,file);
if (dev->ext_vv_data->vbi_fops.release)
@@ -280,6 +282,8 @@ static int fops_release(struct file *file)
saa7146_video_uops.release(dev,file);
}
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
module_put(dev->ext->module);
file->private_data = NULL;
kfree(fh);
@@ -291,19 +295,22 @@ static int fops_release(struct file *file)
static int fops_mmap(struct file *file, struct vm_area_struct * vma)
{
+ struct video_device *vdev = video_devdata(file);
struct saa7146_fh *fh = file->private_data;
struct videobuf_queue *q;
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER: {
DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, vma:%p\n",
file, vma);
q = &fh->video_q;
break;
}
- case V4L2_BUF_TYPE_VBI_CAPTURE: {
+ case VFL_TYPE_VBI: {
DEB_EE("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, vma:%p\n",
file, vma);
+ if (fh->dev->ext_vv_data->capabilities & V4L2_CAP_SLICED_VBI_OUTPUT)
+ return -ENODEV;
q = &fh->vbi_q;
break;
}
@@ -317,15 +324,19 @@ static int fops_mmap(struct file *file, struct vm_area_struct * vma)
static unsigned int fops_poll(struct file *file, struct poll_table_struct *wait)
{
+ struct video_device *vdev = video_devdata(file);
struct saa7146_fh *fh = file->private_data;
struct videobuf_buffer *buf = NULL;
struct videobuf_queue *q;
+ unsigned int res = v4l2_ctrl_poll(file, wait);
DEB_EE("file:%p, poll:%p\n", file, wait);
- if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
+ if (vdev->vfl_type == VFL_TYPE_VBI) {
+ if (fh->dev->ext_vv_data->capabilities & V4L2_CAP_SLICED_VBI_OUTPUT)
+ return res | POLLOUT | POLLWRNORM;
if( 0 == fh->vbi_q.streaming )
- return videobuf_poll_stream(file, &fh->vbi_q, wait);
+ return res | videobuf_poll_stream(file, &fh->vbi_q, wait);
q = &fh->vbi_q;
} else {
DEB_D("using video queue\n");
@@ -337,31 +348,32 @@ static unsigned int fops_poll(struct file *file, struct poll_table_struct *wait)
if (!buf) {
DEB_D("buf == NULL!\n");
- return POLLERR;
+ return res | POLLERR;
}
poll_wait(file, &buf->done, wait);
if (buf->state == VIDEOBUF_DONE || buf->state == VIDEOBUF_ERROR) {
DEB_D("poll succeeded!\n");
- return POLLIN|POLLRDNORM;
+ return res | POLLIN | POLLRDNORM;
}
DEB_D("nothing to poll for, buf->state:%d\n", buf->state);
- return 0;
+ return res;
}
static ssize_t fops_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
+ struct video_device *vdev = video_devdata(file);
struct saa7146_fh *fh = file->private_data;
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER:
/*
DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, data:%p, count:%lun",
file, data, (unsigned long)count);
*/
return saa7146_video_uops.read(file,data,count,ppos);
- case V4L2_BUF_TYPE_VBI_CAPTURE:
+ case VFL_TYPE_VBI:
/*
DEB_EE("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, data:%p, count:%lu\n",
file, data, (unsigned long)count);
@@ -377,12 +389,13 @@ static ssize_t fops_read(struct file *file, char __user *data, size_t count, lof
static ssize_t fops_write(struct file *file, const char __user *data, size_t count, loff_t *ppos)
{
+ struct video_device *vdev = video_devdata(file);
struct saa7146_fh *fh = file->private_data;
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER:
return -EINVAL;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
+ case VFL_TYPE_VBI:
if (fh->dev->ext_vv_data->vbi_fops.write)
return fh->dev->ext_vv_data->vbi_fops.write(file, data, count, ppos);
else
@@ -429,8 +442,15 @@ static void vv_callback(struct saa7146_dev *dev, unsigned long status)
}
}
+static const struct v4l2_ctrl_ops saa7146_ctrl_ops = {
+ .s_ctrl = saa7146_s_ctrl,
+};
+
int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
{
+ struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
+ struct v4l2_pix_format *fmt;
+ struct v4l2_vbi_format *vbi;
struct saa7146_vv *vv;
int err;
@@ -438,12 +458,32 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
if (err)
return err;
+ v4l2_ctrl_handler_init(hdl, 6);
+ v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ if (hdl->error) {
+ err = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ return err;
+ }
+ dev->v4l2_dev.ctrl_handler = hdl;
+
vv = kzalloc(sizeof(struct saa7146_vv), GFP_KERNEL);
if (vv == NULL) {
ERR("out of memory. aborting.\n");
+ v4l2_ctrl_handler_free(hdl);
return -ENOMEM;
}
- ext_vv->ops = saa7146_video_ioctl_ops;
+ ext_vv->vid_ops = saa7146_video_ioctl_ops;
+ ext_vv->vbi_ops = saa7146_vbi_ioctl_ops;
ext_vv->core_ops = &saa7146_video_ioctl_ops;
DEB_EE("dev:%p\n", dev);
@@ -463,6 +503,7 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
if( NULL == vv->d_clipping.cpu_addr ) {
ERR("out of memory. aborting.\n");
kfree(vv);
+ v4l2_ctrl_handler_free(hdl);
return -1;
}
memset(vv->d_clipping.cpu_addr, 0x0, SAA7146_CLIPPING_MEM);
@@ -471,6 +512,39 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
saa7146_vbi_uops.init(dev,vv);
+ fmt = &vv->ov_fb.fmt;
+ fmt->width = vv->standard->h_max_out;
+ fmt->height = vv->standard->v_max_out;
+ fmt->pixelformat = V4L2_PIX_FMT_RGB565;
+ fmt->bytesperline = 2 * fmt->width;
+ fmt->sizeimage = fmt->bytesperline * fmt->height;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt = &vv->video_fmt;
+ fmt->width = 384;
+ fmt->height = 288;
+ fmt->pixelformat = V4L2_PIX_FMT_BGR24;
+ fmt->field = V4L2_FIELD_ANY;
+ fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ fmt->bytesperline = 3 * fmt->width;
+ fmt->sizeimage = fmt->bytesperline * fmt->height;
+
+ vbi = &vv->vbi_fmt;
+ vbi->sampling_rate = 27000000;
+ vbi->offset = 248; /* todo */
+ vbi->samples_per_line = 720 * 2;
+ vbi->sample_format = V4L2_PIX_FMT_GREY;
+
+ /* fixme: this only works for PAL */
+ vbi->start[0] = 5;
+ vbi->count[0] = 16;
+ vbi->start[1] = 312;
+ vbi->count[1] = 16;
+
+ init_timer(&vv->vbi_read_timeout);
+
+ vv->ov_fb.capability = V4L2_FBUF_CAP_LIST_CLIPPING;
+ vv->ov_fb.flags = V4L2_FBUF_FLAG_PRIMARY;
dev->vv_data = vv;
dev->vv_callback = &vv_callback;
@@ -486,6 +560,7 @@ int saa7146_vv_release(struct saa7146_dev* dev)
v4l2_device_unregister(&dev->v4l2_dev);
pci_free_consistent(dev->pci, SAA7146_CLIPPING_MEM, vv->d_clipping.cpu_addr, vv->d_clipping.dma_handle);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
kfree(vv);
dev->vv_data = NULL;
dev->vv_callback = NULL;
@@ -509,10 +584,19 @@ int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
return -ENOMEM;
vfd->fops = &video_fops;
- vfd->ioctl_ops = &dev->ext_vv_data->ops;
+ if (type == VFL_TYPE_GRABBER)
+ vfd->ioctl_ops = &dev->ext_vv_data->vid_ops;
+ else
+ vfd->ioctl_ops = &dev->ext_vv_data->vbi_ops;
vfd->release = video_device_release;
+ /* Locking in file operations other than ioctl should be done by
+ the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
vfd->lock = &dev->v4l2_lock;
+ vfd->v4l2_dev = &dev->v4l2_dev;
vfd->tvnorms = 0;
+ set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
for (i = 0; i < dev->ext_vv_data->num_stds; i++)
vfd->tvnorms |= dev->ext_vv_data->stds[i].id;
strlcpy(vfd->name, name, sizeof(vfd->name));
diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
index bc1f545c95cb..be746d1aee9a 100644
--- a/drivers/media/common/saa7146_hlp.c
+++ b/drivers/media/common/saa7146_hlp.c
@@ -343,9 +343,9 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
struct saa7146_vv *vv = dev->vv_data;
__le32 *clipping = vv->d_clipping.cpu_addr;
- int width = fh->ov.win.w.width;
- int height = fh->ov.win.w.height;
- int clipcount = fh->ov.nclips;
+ int width = vv->ov.win.w.width;
+ int height = vv->ov.win.w.height;
+ int clipcount = vv->ov.nclips;
u32 line_list[32];
u32 pixel_list[32];
@@ -365,10 +365,10 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
for(i = 0; i < clipcount; i++) {
int l = 0, r = 0, t = 0, b = 0;
- x[i] = fh->ov.clips[i].c.left;
- y[i] = fh->ov.clips[i].c.top;
- w[i] = fh->ov.clips[i].c.width;
- h[i] = fh->ov.clips[i].c.height;
+ x[i] = vv->ov.clips[i].c.left;
+ y[i] = vv->ov.clips[i].c.top;
+ w[i] = vv->ov.clips[i].c.width;
+ h[i] = vv->ov.clips[i].c.height;
if( w[i] < 0) {
x[i] += w[i]; w[i] = -w[i];
@@ -485,13 +485,14 @@ static void saa7146_disable_clipping(struct saa7146_dev *dev)
static void saa7146_set_clipping_rect(struct saa7146_fh *fh)
{
struct saa7146_dev *dev = fh->dev;
- enum v4l2_field field = fh->ov.win.field;
+ struct saa7146_vv *vv = dev->vv_data;
+ enum v4l2_field field = vv->ov.win.field;
struct saa7146_video_dma vdma2;
u32 clip_format;
u32 arbtr_ctrl;
/* check clipcount, disable clipping if clipcount == 0*/
- if( fh->ov.nclips == 0 ) {
+ if (vv->ov.nclips == 0) {
saa7146_disable_clipping(dev);
return;
}
@@ -651,8 +652,8 @@ int saa7146_enable_overlay(struct saa7146_fh *fh)
struct saa7146_dev *dev = fh->dev;
struct saa7146_vv *vv = dev->vv_data;
- saa7146_set_window(dev, fh->ov.win.w.width, fh->ov.win.w.height, fh->ov.win.field);
- saa7146_set_position(dev, fh->ov.win.w.left, fh->ov.win.w.top, fh->ov.win.w.height, fh->ov.win.field, vv->ov_fmt->pixelformat);
+ saa7146_set_window(dev, vv->ov.win.w.width, vv->ov.win.w.height, vv->ov.win.field);
+ saa7146_set_position(dev, vv->ov.win.w.left, vv->ov.win.w.top, vv->ov.win.w.height, vv->ov.win.field, vv->ov_fmt->pixelformat);
saa7146_set_output_format(dev, vv->ov_fmt->trans);
saa7146_set_clipping_rect(fh);
diff --git a/drivers/media/common/saa7146_vbi.c b/drivers/media/common/saa7146_vbi.c
index b2e718343739..1e71e374bbfe 100644
--- a/drivers/media/common/saa7146_vbi.c
+++ b/drivers/media/common/saa7146_vbi.c
@@ -211,7 +211,7 @@ static int buffer_activate(struct saa7146_dev *dev,
DEB_VBI("dev:%p, buf:%p, next:%p\n", dev, buf, next);
saa7146_set_vbi_capture(dev,buf,next);
- mod_timer(&vv->vbi_q.timeout, jiffies+BUFFER_TIMEOUT);
+ mod_timer(&vv->vbi_dmaq.timeout, jiffies+BUFFER_TIMEOUT);
return 0;
}
@@ -294,7 +294,7 @@ static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
struct saa7146_buf *buf = (struct saa7146_buf *)vb;
DEB_VBI("vb:%p\n", vb);
- saa7146_buffer_queue(dev,&vv->vbi_q,buf);
+ saa7146_buffer_queue(dev, &vv->vbi_dmaq, buf);
}
static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
@@ -335,16 +335,15 @@ static void vbi_stop(struct saa7146_fh *fh, struct file *file)
/* shut down dma 3 transfers */
saa7146_write(dev, MC1, MASK_20);
- if (vv->vbi_q.curr) {
- saa7146_buffer_finish(dev,&vv->vbi_q,VIDEOBUF_DONE);
- }
+ if (vv->vbi_dmaq.curr)
+ saa7146_buffer_finish(dev, &vv->vbi_dmaq, VIDEOBUF_DONE);
videobuf_queue_cancel(&fh->vbi_q);
vv->vbi_streaming = NULL;
- del_timer(&vv->vbi_q.timeout);
- del_timer(&fh->vbi_read_timeout);
+ del_timer(&vv->vbi_dmaq.timeout);
+ del_timer(&vv->vbi_read_timeout);
spin_unlock_irqrestore(&dev->slock, flags);
}
@@ -364,12 +363,12 @@ static void vbi_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
{
DEB_VBI("dev:%p\n", dev);
- INIT_LIST_HEAD(&vv->vbi_q.queue);
+ INIT_LIST_HEAD(&vv->vbi_dmaq.queue);
- init_timer(&vv->vbi_q.timeout);
- vv->vbi_q.timeout.function = saa7146_buffer_timeout;
- vv->vbi_q.timeout.data = (unsigned long)(&vv->vbi_q);
- vv->vbi_q.dev = dev;
+ init_timer(&vv->vbi_dmaq.timeout);
+ vv->vbi_dmaq.timeout.function = saa7146_buffer_timeout;
+ vv->vbi_dmaq.timeout.data = (unsigned long)(&vv->vbi_dmaq);
+ vv->vbi_dmaq.dev = dev;
init_waitqueue_head(&vv->vbi_wq);
}
@@ -377,6 +376,7 @@ static void vbi_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
static int vbi_open(struct saa7146_dev *dev, struct file *file)
{
struct saa7146_fh *fh = file->private_data;
+ struct saa7146_vv *vv = fh->dev->vv_data;
u32 arbtr_ctrl = saa7146_read(dev, PCI_BT_V1);
int ret = 0;
@@ -395,19 +395,6 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
saa7146_write(dev, PCI_BT_V1, arbtr_ctrl);
saa7146_write(dev, MC2, (MASK_04|MASK_20));
- memset(&fh->vbi_fmt,0,sizeof(fh->vbi_fmt));
-
- fh->vbi_fmt.sampling_rate = 27000000;
- fh->vbi_fmt.offset = 248; /* todo */
- fh->vbi_fmt.samples_per_line = vbi_pixel_to_capture;
- fh->vbi_fmt.sample_format = V4L2_PIX_FMT_GREY;
-
- /* fixme: this only works for PAL */
- fh->vbi_fmt.start[0] = 5;
- fh->vbi_fmt.count[0] = 16;
- fh->vbi_fmt.start[1] = 312;
- fh->vbi_fmt.count[1] = 16;
-
videobuf_queue_sg_init(&fh->vbi_q, &vbi_qops,
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VBI_CAPTURE,
@@ -415,9 +402,8 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
sizeof(struct saa7146_buf),
file, &dev->v4l2_lock);
- init_timer(&fh->vbi_read_timeout);
- fh->vbi_read_timeout.function = vbi_read_timeout;
- fh->vbi_read_timeout.data = (unsigned long)file;
+ vv->vbi_read_timeout.function = vbi_read_timeout;
+ vv->vbi_read_timeout.data = (unsigned long)file;
/* initialize the brs */
if ( 0 != (SAA7146_USE_PORT_B_FOR_VBI & dev->ext_vv_data->flags)) {
@@ -453,16 +439,16 @@ static void vbi_irq_done(struct saa7146_dev *dev, unsigned long status)
struct saa7146_vv *vv = dev->vv_data;
spin_lock(&dev->slock);
- if (vv->vbi_q.curr) {
- DEB_VBI("dev:%p, curr:%p\n", dev, vv->vbi_q.curr);
+ if (vv->vbi_dmaq.curr) {
+ DEB_VBI("dev:%p, curr:%p\n", dev, vv->vbi_dmaq.curr);
/* this must be += 2, one count for each field */
vv->vbi_fieldcount+=2;
- vv->vbi_q.curr->vb.field_count = vv->vbi_fieldcount;
- saa7146_buffer_finish(dev,&vv->vbi_q,VIDEOBUF_DONE);
+ vv->vbi_dmaq.curr->vb.field_count = vv->vbi_fieldcount;
+ saa7146_buffer_finish(dev, &vv->vbi_dmaq, VIDEOBUF_DONE);
} else {
DEB_VBI("dev:%p\n", dev);
}
- saa7146_buffer_next(dev,&vv->vbi_q,1);
+ saa7146_buffer_next(dev, &vv->vbi_dmaq, 1);
spin_unlock(&dev->slock);
}
@@ -488,7 +474,7 @@ static ssize_t vbi_read(struct file *file, char __user *data, size_t count, loff
return -EBUSY;
}
- mod_timer(&fh->vbi_read_timeout, jiffies+BUFFER_TIMEOUT);
+ mod_timer(&vv->vbi_read_timeout, jiffies+BUFFER_TIMEOUT);
ret = videobuf_read_stream(&fh->vbi_q, data, count, ppos, 1,
file->f_flags & O_NONBLOCK);
/*
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index ce30533fd972..6d14785d4747 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -2,6 +2,8 @@
#include <media/saa7146_vv.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ctrls.h>
#include <linux/module.h>
static int max_memory = 32;
@@ -112,8 +114,8 @@ int saa7146_start_preview(struct saa7146_fh *fh)
DEB_EE("dev:%p, fh:%p\n", dev, fh);
- /* check if we have overlay informations */
- if( NULL == fh->ov.fh ) {
+ /* check if we have overlay information */
+ if (vv->ov.fh == NULL) {
DEB_D("no overlay data available. try S_FMT first.\n");
return -EAGAIN;
}
@@ -139,19 +141,18 @@ int saa7146_start_preview(struct saa7146_fh *fh)
return -EBUSY;
}
- fmt.fmt.win = fh->ov.win;
+ fmt.fmt.win = vv->ov.win;
err = vidioc_try_fmt_vid_overlay(NULL, fh, &fmt);
if (0 != err) {
saa7146_res_free(vv->video_fh, RESOURCE_DMA1_HPS|RESOURCE_DMA2_CLP);
return -EBUSY;
}
- fh->ov.win = fmt.fmt.win;
- vv->ov_data = &fh->ov;
+ vv->ov.win = fmt.fmt.win;
DEB_D("%dx%d+%d+%d %s field=%s\n",
- fh->ov.win.w.width, fh->ov.win.w.height,
- fh->ov.win.w.left, fh->ov.win.w.top,
- vv->ov_fmt->name, v4l2_field_names[fh->ov.win.field]);
+ vv->ov.win.w.width, vv->ov.win.w.height,
+ vv->ov.win.w.left, vv->ov.win.w.top,
+ vv->ov_fmt->name, v4l2_field_names[vv->ov.win.field]);
if (0 != (ret = saa7146_enable_overlay(fh))) {
DEB_D("enabling overlay failed: %d\n", ret);
@@ -202,65 +203,6 @@ int saa7146_stop_preview(struct saa7146_fh *fh)
EXPORT_SYMBOL_GPL(saa7146_stop_preview);
/********************************************************************************/
-/* device controls */
-
-static struct v4l2_queryctrl controls[] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 128,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- },{
- .id = V4L2_CID_CONTRAST,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 127,
- .step = 1,
- .default_value = 64,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- },{
- .id = V4L2_CID_SATURATION,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 127,
- .step = 1,
- .default_value = 64,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- },{
- .id = V4L2_CID_VFLIP,
- .name = "Vertical Flip",
- .minimum = 0,
- .maximum = 1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- },{
- .id = V4L2_CID_HFLIP,
- .name = "Horizontal Flip",
- .minimum = 0,
- .maximum = 1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- },
-};
-static int NUM_CONTROLS = sizeof(controls)/sizeof(struct v4l2_queryctrl);
-
-#define V4L2_CID_PRIVATE_LASTP1 (V4L2_CID_PRIVATE_BASE + 0)
-
-static struct v4l2_queryctrl* ctrl_by_id(int id)
-{
- int i;
-
- for (i = 0; i < NUM_CONTROLS; i++)
- if (controls[i].id == id)
- return controls+i;
- return NULL;
-}
-
-/********************************************************************************/
/* common pagetable functions */
static int saa7146_pgtable_build(struct saa7146_dev *dev, struct saa7146_buf *buf)
@@ -413,7 +355,7 @@ static int video_begin(struct saa7146_fh *fh)
}
}
- fmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat);
+ fmt = saa7146_format_by_fourcc(dev, vv->video_fmt.pixelformat);
/* we need to have a valid format set here */
BUG_ON(NULL == fmt);
@@ -465,7 +407,7 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
return -EBUSY;
}
- fmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat);
+ fmt = saa7146_format_by_fourcc(dev, vv->video_fmt.pixelformat);
/* we need to have a valid format set here */
BUG_ON(NULL == fmt);
@@ -504,18 +446,25 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
static int vidioc_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
+ struct video_device *vdev = video_devdata(file);
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
strcpy((char *)cap->driver, "saa7146 v4l2");
strlcpy((char *)cap->card, dev->ext->name, sizeof(cap->card));
sprintf((char *)cap->bus_info, "PCI:%s", pci_name(dev->pci));
- cap->version = SAA7146_VERSION_CODE;
- cap->capabilities =
+ cap->device_caps =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_VIDEO_OVERLAY |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
- cap->capabilities |= dev->ext_vv_data->capabilities;
+ cap->device_caps |= dev->ext_vv_data->capabilities;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ cap->device_caps &=
+ ~(V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_OUTPUT);
+ else
+ cap->device_caps &=
+ ~(V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY | V4L2_CAP_AUDIO);
return 0;
}
@@ -526,6 +475,7 @@ static int vidioc_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *f
*fb = vv->ov_fb;
fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
+ fb->flags = V4L2_FBUF_FLAG_PRIMARY;
return 0;
}
@@ -579,135 +529,58 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtd
return 0;
}
-static int vidioc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *c)
+int saa7146_s_ctrl(struct v4l2_ctrl *ctrl)
{
- const struct v4l2_queryctrl *ctrl;
-
- if ((c->id < V4L2_CID_BASE ||
- c->id >= V4L2_CID_LASTP1) &&
- (c->id < V4L2_CID_PRIVATE_BASE ||
- c->id >= V4L2_CID_PRIVATE_LASTP1))
- return -EINVAL;
-
- ctrl = ctrl_by_id(c->id);
- if (ctrl == NULL)
- return -EINVAL;
-
- DEB_EE("VIDIOC_QUERYCTRL: id:%d\n", c->id);
- *c = *ctrl;
- return 0;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *c)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
+ struct saa7146_dev *dev = container_of(ctrl->handler,
+ struct saa7146_dev, ctrl_handler);
struct saa7146_vv *vv = dev->vv_data;
- const struct v4l2_queryctrl *ctrl;
- u32 value = 0;
+ u32 val;
- ctrl = ctrl_by_id(c->id);
- if (NULL == ctrl)
- return -EINVAL;
- switch (c->id) {
+ switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- value = saa7146_read(dev, BCS_CTRL);
- c->value = 0xff & (value >> 24);
- DEB_D("V4L2_CID_BRIGHTNESS: %d\n", c->value);
- break;
- case V4L2_CID_CONTRAST:
- value = saa7146_read(dev, BCS_CTRL);
- c->value = 0x7f & (value >> 16);
- DEB_D("V4L2_CID_CONTRAST: %d\n", c->value);
- break;
- case V4L2_CID_SATURATION:
- value = saa7146_read(dev, BCS_CTRL);
- c->value = 0x7f & (value >> 0);
- DEB_D("V4L2_CID_SATURATION: %d\n", c->value);
- break;
- case V4L2_CID_VFLIP:
- c->value = vv->vflip;
- DEB_D("V4L2_CID_VFLIP: %d\n", c->value);
- break;
- case V4L2_CID_HFLIP:
- c->value = vv->hflip;
- DEB_D("V4L2_CID_HFLIP: %d\n", c->value);
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- struct saa7146_vv *vv = dev->vv_data;
- const struct v4l2_queryctrl *ctrl;
-
- ctrl = ctrl_by_id(c->id);
- if (NULL == ctrl) {
- DEB_D("unknown control %d\n", c->id);
- return -EINVAL;
- }
-
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_BOOLEAN:
- case V4L2_CTRL_TYPE_MENU:
- case V4L2_CTRL_TYPE_INTEGER:
- if (c->value < ctrl->minimum)
- c->value = ctrl->minimum;
- if (c->value > ctrl->maximum)
- c->value = ctrl->maximum;
- break;
- default:
- /* nothing */;
- }
-
- switch (c->id) {
- case V4L2_CID_BRIGHTNESS: {
- u32 value = saa7146_read(dev, BCS_CTRL);
- value &= 0x00ffffff;
- value |= (c->value << 24);
- saa7146_write(dev, BCS_CTRL, value);
+ val = saa7146_read(dev, BCS_CTRL);
+ val &= 0x00ffffff;
+ val |= (ctrl->val << 24);
+ saa7146_write(dev, BCS_CTRL, val);
saa7146_write(dev, MC2, MASK_22 | MASK_06);
break;
- }
- case V4L2_CID_CONTRAST: {
- u32 value = saa7146_read(dev, BCS_CTRL);
- value &= 0xff00ffff;
- value |= (c->value << 16);
- saa7146_write(dev, BCS_CTRL, value);
+
+ case V4L2_CID_CONTRAST:
+ val = saa7146_read(dev, BCS_CTRL);
+ val &= 0xff00ffff;
+ val |= (ctrl->val << 16);
+ saa7146_write(dev, BCS_CTRL, val);
saa7146_write(dev, MC2, MASK_22 | MASK_06);
break;
- }
- case V4L2_CID_SATURATION: {
- u32 value = saa7146_read(dev, BCS_CTRL);
- value &= 0xffffff00;
- value |= (c->value << 0);
- saa7146_write(dev, BCS_CTRL, value);
+
+ case V4L2_CID_SATURATION:
+ val = saa7146_read(dev, BCS_CTRL);
+ val &= 0xffffff00;
+ val |= (ctrl->val << 0);
+ saa7146_write(dev, BCS_CTRL, val);
saa7146_write(dev, MC2, MASK_22 | MASK_06);
break;
- }
+
case V4L2_CID_HFLIP:
/* fixme: we can support changing VFLIP and HFLIP here... */
- if (IS_CAPTURE_ACTIVE(fh) != 0) {
- DEB_D("V4L2_CID_HFLIP while active capture\n");
+ if ((vv->video_status & STATUS_CAPTURE))
return -EBUSY;
- }
- vv->hflip = c->value;
+ vv->hflip = ctrl->val;
break;
+
case V4L2_CID_VFLIP:
- if (IS_CAPTURE_ACTIVE(fh) != 0) {
- DEB_D("V4L2_CID_VFLIP while active capture\n");
+ if ((vv->video_status & STATUS_CAPTURE))
return -EBUSY;
- }
- vv->vflip = c->value;
+ vv->vflip = ctrl->val;
break;
+
default:
return -EINVAL;
}
- if (IS_OVERLAY_ACTIVE(fh) != 0) {
+ if ((vv->video_status & STATUS_OVERLAY) != 0) { /* CHECK: && (vv->video_fh == fh)) */
+ struct saa7146_fh *fh = vv->video_fh;
+
saa7146_stop_preview(fh);
saa7146_start_preview(fh);
}
@@ -720,6 +593,8 @@ static int vidioc_g_parm(struct file *file, void *fh,
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct saa7146_vv *vv = dev->vv_data;
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
parm->parm.capture.readbuffers = 1;
v4l2_video_std_frame_period(vv->standard->id,
&parm->parm.capture.timeperframe);
@@ -728,19 +603,28 @@ static int vidioc_g_parm(struct file *file, void *fh,
static int vidioc_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
{
- f->fmt.pix = ((struct saa7146_fh *)fh)->video_fmt;
+ struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
+ struct saa7146_vv *vv = dev->vv_data;
+
+ f->fmt.pix = vv->video_fmt;
return 0;
}
static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh, struct v4l2_format *f)
{
- f->fmt.win = ((struct saa7146_fh *)fh)->ov.win;
+ struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
+ struct saa7146_vv *vv = dev->vv_data;
+
+ f->fmt.win = vv->ov.win;
return 0;
}
static int vidioc_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f)
{
- f->fmt.vbi = ((struct saa7146_fh *)fh)->vbi_fmt;
+ struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
+ struct saa7146_vv *vv = dev->vv_data;
+
+ f->fmt.vbi = vv->vbi_fmt;
return 0;
}
@@ -787,6 +671,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_forma
}
f->fmt.pix.field = field;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
if (f->fmt.pix.width > maxw)
f->fmt.pix.width = maxw;
if (f->fmt.pix.height > maxh)
@@ -883,9 +768,9 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *__fh, struct v4l2_forma
err = vidioc_try_fmt_vid_cap(file, fh, f);
if (0 != err)
return err;
- fh->video_fmt = f->fmt.pix;
+ vv->video_fmt = f->fmt.pix;
DEB_EE("set to pixelformat '%4.4s'\n",
- (char *)&fh->video_fmt.pixelformat);
+ (char *)&vv->video_fmt.pixelformat);
return 0;
}
@@ -900,17 +785,17 @@ static int vidioc_s_fmt_vid_overlay(struct file *file, void *__fh, struct v4l2_f
err = vidioc_try_fmt_vid_overlay(file, fh, f);
if (0 != err)
return err;
- fh->ov.win = f->fmt.win;
- fh->ov.nclips = f->fmt.win.clipcount;
- if (fh->ov.nclips > 16)
- fh->ov.nclips = 16;
- if (copy_from_user(fh->ov.clips, f->fmt.win.clips,
- sizeof(struct v4l2_clip) * fh->ov.nclips)) {
+ vv->ov.win = f->fmt.win;
+ vv->ov.nclips = f->fmt.win.clipcount;
+ if (vv->ov.nclips > 16)
+ vv->ov.nclips = 16;
+ if (copy_from_user(vv->ov.clips, f->fmt.win.clips,
+ sizeof(struct v4l2_clip) * vv->ov.nclips)) {
return -EFAULT;
}
- /* fh->ov.fh is used to indicate that we have valid overlay informations, too */
- fh->ov.fh = fh;
+ /* vv->ov.fh is used to indicate that we have valid overlay informations, too */
+ vv->ov.fh = fh;
/* check if our current overlay is active */
if (IS_OVERLAY_ACTIVE(fh) != 0) {
@@ -1111,10 +996,14 @@ static int vidioc_g_chip_ident(struct file *file, void *__fh,
chip->ident = V4L2_IDENT_NONE;
chip->revision = 0;
- if (chip->match.type == V4L2_CHIP_MATCH_HOST && !chip->match.addr) {
- chip->ident = V4L2_IDENT_SAA7146;
+ if (chip->match.type == V4L2_CHIP_MATCH_HOST) {
+ if (v4l2_chip_match_host(&chip->match))
+ chip->ident = V4L2_IDENT_SAA7146;
return 0;
}
+ if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
+ chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+ return -EINVAL;
return v4l2_device_call_until_err(&dev->v4l2_dev, 0,
core, g_chip_ident, chip);
}
@@ -1129,7 +1018,6 @@ const struct v4l2_ioctl_ops saa7146_video_ioctl_ops = {
.vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay,
.vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay,
.vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay,
- .vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
.vidioc_g_chip_ident = vidioc_g_chip_ident,
.vidioc_overlay = vidioc_overlay,
@@ -1141,12 +1029,29 @@ const struct v4l2_ioctl_ops saa7146_video_ioctl_ops = {
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_g_std = vidioc_g_std,
.vidioc_s_std = vidioc_s_std,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_g_parm = vidioc_g_parm,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+const struct v4l2_ioctl_ops saa7146_vbi_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
+ .vidioc_g_chip_ident = vidioc_g_chip_ident,
+
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_g_std = vidioc_g_std,
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_g_parm = vidioc_g_parm,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/*********************************************************************************/
@@ -1161,7 +1066,7 @@ static int buffer_activate (struct saa7146_dev *dev,
buf->vb.state = VIDEOBUF_ACTIVE;
saa7146_set_capture(dev,buf,next);
- mod_timer(&vv->video_q.timeout, jiffies+BUFFER_TIMEOUT);
+ mod_timer(&vv->video_dmaq.timeout, jiffies+BUFFER_TIMEOUT);
return 0;
}
@@ -1185,44 +1090,44 @@ static int buffer_prepare(struct videobuf_queue *q,
DEB_CAP("vbuf:%p\n", vb);
/* sanity checks */
- if (fh->video_fmt.width < 48 ||
- fh->video_fmt.height < 32 ||
- fh->video_fmt.width > vv->standard->h_max_out ||
- fh->video_fmt.height > vv->standard->v_max_out) {
+ if (vv->video_fmt.width < 48 ||
+ vv->video_fmt.height < 32 ||
+ vv->video_fmt.width > vv->standard->h_max_out ||
+ vv->video_fmt.height > vv->standard->v_max_out) {
DEB_D("w (%d) / h (%d) out of bounds\n",
- fh->video_fmt.width, fh->video_fmt.height);
+ vv->video_fmt.width, vv->video_fmt.height);
return -EINVAL;
}
- size = fh->video_fmt.sizeimage;
+ size = vv->video_fmt.sizeimage;
if (0 != buf->vb.baddr && buf->vb.bsize < size) {
DEB_D("size mismatch\n");
return -EINVAL;
}
DEB_CAP("buffer_prepare [size=%dx%d,bytes=%d,fields=%s]\n",
- fh->video_fmt.width, fh->video_fmt.height,
- size, v4l2_field_names[fh->video_fmt.field]);
- if (buf->vb.width != fh->video_fmt.width ||
- buf->vb.bytesperline != fh->video_fmt.bytesperline ||
- buf->vb.height != fh->video_fmt.height ||
+ vv->video_fmt.width, vv->video_fmt.height,
+ size, v4l2_field_names[vv->video_fmt.field]);
+ if (buf->vb.width != vv->video_fmt.width ||
+ buf->vb.bytesperline != vv->video_fmt.bytesperline ||
+ buf->vb.height != vv->video_fmt.height ||
buf->vb.size != size ||
buf->vb.field != field ||
- buf->vb.field != fh->video_fmt.field ||
- buf->fmt != &fh->video_fmt) {
+ buf->vb.field != vv->video_fmt.field ||
+ buf->fmt != &vv->video_fmt) {
saa7146_dma_free(dev,q,buf);
}
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
struct saa7146_format *sfmt;
- buf->vb.bytesperline = fh->video_fmt.bytesperline;
- buf->vb.width = fh->video_fmt.width;
- buf->vb.height = fh->video_fmt.height;
+ buf->vb.bytesperline = vv->video_fmt.bytesperline;
+ buf->vb.width = vv->video_fmt.width;
+ buf->vb.height = vv->video_fmt.height;
buf->vb.size = size;
buf->vb.field = field;
- buf->fmt = &fh->video_fmt;
- buf->vb.field = fh->video_fmt.field;
+ buf->fmt = &vv->video_fmt;
+ buf->vb.field = vv->video_fmt.field;
sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
@@ -1258,11 +1163,12 @@ static int buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned
{
struct file *file = q->priv_data;
struct saa7146_fh *fh = file->private_data;
+ struct saa7146_vv *vv = fh->dev->vv_data;
if (0 == *count || *count > MAX_SAA7146_CAPTURE_BUFFERS)
*count = MAX_SAA7146_CAPTURE_BUFFERS;
- *size = fh->video_fmt.sizeimage;
+ *size = vv->video_fmt.sizeimage;
/* check if we exceed the "max_memory" parameter */
if( (*count * *size) > (max_memory*1048576) ) {
@@ -1283,7 +1189,7 @@ static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
struct saa7146_buf *buf = (struct saa7146_buf *)vb;
DEB_CAP("vbuf:%p\n", vb);
- saa7146_buffer_queue(fh->dev,&vv->video_q,buf);
+ saa7146_buffer_queue(fh->dev, &vv->video_dmaq, buf);
}
static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
@@ -1312,12 +1218,12 @@ static struct videobuf_queue_ops video_qops = {
static void video_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
{
- INIT_LIST_HEAD(&vv->video_q.queue);
+ INIT_LIST_HEAD(&vv->video_dmaq.queue);
- init_timer(&vv->video_q.timeout);
- vv->video_q.timeout.function = saa7146_buffer_timeout;
- vv->video_q.timeout.data = (unsigned long)(&vv->video_q);
- vv->video_q.dev = dev;
+ init_timer(&vv->video_dmaq.timeout);
+ vv->video_dmaq.timeout.function = saa7146_buffer_timeout;
+ vv->video_dmaq.timeout.data = (unsigned long)(&vv->video_dmaq);
+ vv->video_dmaq.dev = dev;
/* set some default values */
vv->standard = &dev->ext_vv_data->stds[0];
@@ -1331,15 +1237,6 @@ static void video_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
static int video_open(struct saa7146_dev *dev, struct file *file)
{
struct saa7146_fh *fh = file->private_data;
- struct saa7146_format *sfmt;
-
- fh->video_fmt.width = 384;
- fh->video_fmt.height = 288;
- fh->video_fmt.pixelformat = V4L2_PIX_FMT_BGR24;
- fh->video_fmt.bytesperline = 0;
- fh->video_fmt.field = V4L2_FIELD_ANY;
- sfmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat);
- fh->video_fmt.sizeimage = (fh->video_fmt.width * fh->video_fmt.height * sfmt->depth)/8;
videobuf_queue_sg_init(&fh->video_q, &video_qops,
&dev->pci->dev, &dev->slock,
@@ -1371,7 +1268,7 @@ static void video_close(struct saa7146_dev *dev, struct file *file)
static void video_irq_done(struct saa7146_dev *dev, unsigned long st)
{
struct saa7146_vv *vv = dev->vv_data;
- struct saa7146_dmaqueue *q = &vv->video_q;
+ struct saa7146_dmaqueue *q = &vv->video_dmaq;
spin_lock(&dev->slock);
DEB_CAP("called\n");
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index 4a6d5cef3964..bbf4945149a9 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -204,6 +204,27 @@ config MEDIA_TUNER_TDA18218
help
NXP TDA18218 silicon tuner driver.
+config MEDIA_TUNER_FC0011
+ tristate "Fitipower FC0011 silicon tuner"
+ depends on VIDEO_MEDIA && I2C
+ default m if MEDIA_TUNER_CUSTOMISE
+ help
+ Fitipower FC0011 silicon tuner driver.
+
+config MEDIA_TUNER_FC0012
+ tristate "Fitipower FC0012 silicon tuner"
+ depends on VIDEO_MEDIA && I2C
+ default m if MEDIA_TUNER_CUSTOMISE
+ help
+ Fitipower FC0012 silicon tuner driver.
+
+config MEDIA_TUNER_FC0013
+ tristate "Fitipower FC0013 silicon tuner"
+ depends on VIDEO_MEDIA && I2C
+ default m if MEDIA_TUNER_CUSTOMISE
+ help
+ Fitipower FC0013 silicon tuner driver.
+
config MEDIA_TUNER_TDA18212
tristate "NXP TDA18212 silicon tuner"
depends on VIDEO_MEDIA && I2C
@@ -211,4 +232,10 @@ config MEDIA_TUNER_TDA18212
help
NXP TDA18212 silicon tuner driver.
+config MEDIA_TUNER_TUA9001
+ tristate "Infineon TUA 9001 silicon tuner"
+ depends on VIDEO_MEDIA && I2C
+ default m if MEDIA_TUNER_CUSTOMISE
+ help
+ Infineon TUA 9001 silicon tuner driver.
endmenu
diff --git a/drivers/media/common/tuners/Makefile b/drivers/media/common/tuners/Makefile
index f80407eb8998..891b80e60808 100644
--- a/drivers/media/common/tuners/Makefile
+++ b/drivers/media/common/tuners/Makefile
@@ -28,6 +28,10 @@ obj-$(CONFIG_MEDIA_TUNER_MC44S803) += mc44s803.o
obj-$(CONFIG_MEDIA_TUNER_MAX2165) += max2165.o
obj-$(CONFIG_MEDIA_TUNER_TDA18218) += tda18218.o
obj-$(CONFIG_MEDIA_TUNER_TDA18212) += tda18212.o
+obj-$(CONFIG_MEDIA_TUNER_TUA9001) += tua9001.o
+obj-$(CONFIG_MEDIA_TUNER_FC0011) += fc0011.o
+obj-$(CONFIG_MEDIA_TUNER_FC0012) += fc0012.o
+obj-$(CONFIG_MEDIA_TUNER_FC0013) += fc0013.o
ccflags-y += -I$(srctree)/drivers/media/dvb/dvb-core
ccflags-y += -I$(srctree)/drivers/media/dvb/frontends
diff --git a/drivers/media/common/tuners/fc0011.c b/drivers/media/common/tuners/fc0011.c
new file mode 100644
index 000000000000..e4882546c283
--- /dev/null
+++ b/drivers/media/common/tuners/fc0011.c
@@ -0,0 +1,524 @@
+/*
+ * Fitipower FC0011 tuner driver
+ *
+ * Copyright (C) 2012 Michael Buesch <m@bues.ch>
+ *
+ * Derived from FC0012 tuner driver:
+ * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "fc0011.h"
+
+
+/* Tuner registers */
+enum {
+ FC11_REG_0,
+ FC11_REG_FA, /* FA */
+ FC11_REG_FP, /* FP */
+ FC11_REG_XINHI, /* XIN high 8 bit */
+ FC11_REG_XINLO, /* XIN low 8 bit */
+ FC11_REG_VCO, /* VCO */
+ FC11_REG_VCOSEL, /* VCO select */
+ FC11_REG_7, /* Unknown tuner reg 7 */
+ FC11_REG_8, /* Unknown tuner reg 8 */
+ FC11_REG_9,
+ FC11_REG_10, /* Unknown tuner reg 10 */
+ FC11_REG_11, /* Unknown tuner reg 11 */
+ FC11_REG_12,
+ FC11_REG_RCCAL, /* RC calibrate */
+ FC11_REG_VCOCAL, /* VCO calibrate */
+ FC11_REG_15,
+ FC11_REG_16, /* Unknown tuner reg 16 */
+ FC11_REG_17,
+
+ FC11_NR_REGS, /* Number of registers */
+};
+
+enum FC11_REG_VCOSEL_bits {
+ FC11_VCOSEL_2 = 0x08, /* VCO select 2 */
+ FC11_VCOSEL_1 = 0x10, /* VCO select 1 */
+ FC11_VCOSEL_CLKOUT = 0x20, /* Fix clock out */
+ FC11_VCOSEL_BW7M = 0x40, /* 7MHz bw */
+ FC11_VCOSEL_BW6M = 0x80, /* 6MHz bw */
+};
+
+enum FC11_REG_RCCAL_bits {
+ FC11_RCCAL_FORCE = 0x10, /* force */
+};
+
+enum FC11_REG_VCOCAL_bits {
+ FC11_VCOCAL_RUN = 0, /* VCO calibration run */
+ FC11_VCOCAL_VALUEMASK = 0x3F, /* VCO calibration value mask */
+ FC11_VCOCAL_OK = 0x40, /* VCO calibration Ok */
+ FC11_VCOCAL_RESET = 0x80, /* VCO calibration reset */
+};
+
+
+struct fc0011_priv {
+ struct i2c_adapter *i2c;
+ u8 addr;
+
+ u32 frequency;
+ u32 bandwidth;
+};
+
+
+static int fc0011_writereg(struct fc0011_priv *priv, u8 reg, u8 val)
+{
+ u8 buf[2] = { reg, val };
+ struct i2c_msg msg = { .addr = priv->addr,
+ .flags = 0, .buf = buf, .len = 2 };
+
+ if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
+ dev_err(&priv->i2c->dev,
+ "I2C write reg failed, reg: %02x, val: %02x\n",
+ reg, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int fc0011_readreg(struct fc0011_priv *priv, u8 reg, u8 *val)
+{
+ u8 dummy;
+ struct i2c_msg msg[2] = {
+ { .addr = priv->addr,
+ .flags = 0, .buf = &reg, .len = 1 },
+ { .addr = priv->addr,
+ .flags = I2C_M_RD, .buf = val ? : &dummy, .len = 1 },
+ };
+
+ if (i2c_transfer(priv->i2c, msg, 2) != 2) {
+ dev_err(&priv->i2c->dev,
+ "I2C read failed, reg: %02x\n", reg);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int fc0011_release(struct dvb_frontend *fe)
+{
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+
+ return 0;
+}
+
+static int fc0011_init(struct dvb_frontend *fe)
+{
+ struct fc0011_priv *priv = fe->tuner_priv;
+ int err;
+
+ if (WARN_ON(!fe->callback))
+ return -EINVAL;
+
+ err = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER,
+ FC0011_FE_CALLBACK_POWER, priv->addr);
+ if (err) {
+ dev_err(&priv->i2c->dev, "Power-on callback failed\n");
+ return err;
+ }
+ err = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER,
+ FC0011_FE_CALLBACK_RESET, priv->addr);
+ if (err) {
+ dev_err(&priv->i2c->dev, "Reset callback failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/* Initiate VCO calibration */
+static int fc0011_vcocal_trigger(struct fc0011_priv *priv)
+{
+ int err;
+
+ err = fc0011_writereg(priv, FC11_REG_VCOCAL, FC11_VCOCAL_RESET);
+ if (err)
+ return err;
+ err = fc0011_writereg(priv, FC11_REG_VCOCAL, FC11_VCOCAL_RUN);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Read VCO calibration value */
+static int fc0011_vcocal_read(struct fc0011_priv *priv, u8 *value)
+{
+ int err;
+
+ err = fc0011_writereg(priv, FC11_REG_VCOCAL, FC11_VCOCAL_RUN);
+ if (err)
+ return err;
+ usleep_range(10000, 20000);
+ err = fc0011_readreg(priv, FC11_REG_VCOCAL, value);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int fc0011_set_params(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct fc0011_priv *priv = fe->tuner_priv;
+ int err;
+ unsigned int i, vco_retries;
+ u32 freq = p->frequency / 1000;
+ u32 bandwidth = p->bandwidth_hz / 1000;
+ u32 fvco, xin, xdiv, xdivr;
+ u16 frac;
+ u8 fa, fp, vco_sel, vco_cal;
+ u8 regs[FC11_NR_REGS] = { };
+
+ regs[FC11_REG_7] = 0x0F;
+ regs[FC11_REG_8] = 0x3E;
+ regs[FC11_REG_10] = 0xB8;
+ regs[FC11_REG_11] = 0x80;
+ regs[FC11_REG_RCCAL] = 0x04;
+ err = fc0011_writereg(priv, FC11_REG_7, regs[FC11_REG_7]);
+ err |= fc0011_writereg(priv, FC11_REG_8, regs[FC11_REG_8]);
+ err |= fc0011_writereg(priv, FC11_REG_10, regs[FC11_REG_10]);
+ err |= fc0011_writereg(priv, FC11_REG_11, regs[FC11_REG_11]);
+ err |= fc0011_writereg(priv, FC11_REG_RCCAL, regs[FC11_REG_RCCAL]);
+ if (err)
+ return -EIO;
+
+ /* Set VCO freq and VCO div */
+ if (freq < 54000) {
+ fvco = freq * 64;
+ regs[FC11_REG_VCO] = 0x82;
+ } else if (freq < 108000) {
+ fvco = freq * 32;
+ regs[FC11_REG_VCO] = 0x42;
+ } else if (freq < 216000) {
+ fvco = freq * 16;
+ regs[FC11_REG_VCO] = 0x22;
+ } else if (freq < 432000) {
+ fvco = freq * 8;
+ regs[FC11_REG_VCO] = 0x12;
+ } else {
+ fvco = freq * 4;
+ regs[FC11_REG_VCO] = 0x0A;
+ }
+
+ /* Calc XIN. The PLL reference frequency is 18 MHz. */
+ xdiv = fvco / 18000;
+ frac = fvco - xdiv * 18000;
+ frac = (frac << 15) / 18000;
+ if (frac >= 16384)
+ frac += 32786;
+ if (!frac)
+ xin = 0;
+ else if (frac < 511)
+ xin = 512;
+ else if (frac < 65026)
+ xin = frac;
+ else
+ xin = 65024;
+ regs[FC11_REG_XINHI] = xin >> 8;
+ regs[FC11_REG_XINLO] = xin;
+
+ /* Calc FP and FA */
+ xdivr = xdiv;
+ if (fvco - xdiv * 18000 >= 9000)
+ xdivr += 1; /* round */
+ fp = xdivr / 8;
+ fa = xdivr - fp * 8;
+ if (fa < 2) {
+ fp -= 1;
+ fa += 8;
+ }
+ if (fp > 0x1F) {
+ fp &= 0x1F;
+ fa &= 0xF;
+ }
+ if (fa >= fp) {
+ dev_warn(&priv->i2c->dev,
+ "fa %02X >= fp %02X, but trying to continue\n",
+ (unsigned int)(u8)fa, (unsigned int)(u8)fp);
+ }
+ regs[FC11_REG_FA] = fa;
+ regs[FC11_REG_FP] = fp;
+
+ /* Select bandwidth */
+ switch (bandwidth) {
+ case 8000:
+ break;
+ case 7000:
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_BW7M;
+ break;
+ default:
+ dev_warn(&priv->i2c->dev, "Unsupported bandwidth %u kHz. "
+ "Using 6000 kHz.\n",
+ bandwidth);
+ bandwidth = 6000;
+ /* fallthrough */
+ case 6000:
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_BW6M;
+ break;
+ }
+
+ /* Pre VCO select */
+ if (fvco < 2320000) {
+ vco_sel = 0;
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ } else if (fvco < 3080000) {
+ vco_sel = 1;
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_1;
+ } else {
+ vco_sel = 2;
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_2;
+ }
+
+ /* Fix for low freqs */
+ if (freq < 45000) {
+ regs[FC11_REG_FA] = 0x6;
+ regs[FC11_REG_FP] = 0x11;
+ }
+
+ /* Clock out fix */
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_CLKOUT;
+
+ /* Write the cached registers */
+ for (i = FC11_REG_FA; i <= FC11_REG_VCOSEL; i++) {
+ err = fc0011_writereg(priv, i, regs[i]);
+ if (err)
+ return err;
+ }
+
+ /* VCO calibration */
+ err = fc0011_vcocal_trigger(priv);
+ if (err)
+ return err;
+ err = fc0011_vcocal_read(priv, &vco_cal);
+ if (err)
+ return err;
+ vco_retries = 0;
+ while (!(vco_cal & FC11_VCOCAL_OK) && vco_retries < 3) {
+ /* Reset the tuner and try again */
+ err = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER,
+ FC0011_FE_CALLBACK_RESET, priv->addr);
+ if (err) {
+ dev_err(&priv->i2c->dev, "Failed to reset tuner\n");
+ return err;
+ }
+ /* Reinit tuner config */
+ err = 0;
+ for (i = FC11_REG_FA; i <= FC11_REG_VCOSEL; i++)
+ err |= fc0011_writereg(priv, i, regs[i]);
+ err |= fc0011_writereg(priv, FC11_REG_7, regs[FC11_REG_7]);
+ err |= fc0011_writereg(priv, FC11_REG_8, regs[FC11_REG_8]);
+ err |= fc0011_writereg(priv, FC11_REG_10, regs[FC11_REG_10]);
+ err |= fc0011_writereg(priv, FC11_REG_11, regs[FC11_REG_11]);
+ err |= fc0011_writereg(priv, FC11_REG_RCCAL, regs[FC11_REG_RCCAL]);
+ if (err)
+ return -EIO;
+ /* VCO calibration */
+ err = fc0011_vcocal_trigger(priv);
+ if (err)
+ return err;
+ err = fc0011_vcocal_read(priv, &vco_cal);
+ if (err)
+ return err;
+ vco_retries++;
+ }
+ if (!(vco_cal & FC11_VCOCAL_OK)) {
+ dev_err(&priv->i2c->dev,
+ "Failed to read VCO calibration value (got %02X)\n",
+ (unsigned int)vco_cal);
+ return -EIO;
+ }
+ vco_cal &= FC11_VCOCAL_VALUEMASK;
+
+ switch (vco_sel) {
+ case 0:
+ if (vco_cal < 8) {
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_1;
+ err = fc0011_writereg(priv, FC11_REG_VCOSEL,
+ regs[FC11_REG_VCOSEL]);
+ if (err)
+ return err;
+ err = fc0011_vcocal_trigger(priv);
+ if (err)
+ return err;
+ } else {
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ err = fc0011_writereg(priv, FC11_REG_VCOSEL,
+ regs[FC11_REG_VCOSEL]);
+ if (err)
+ return err;
+ }
+ break;
+ case 1:
+ if (vco_cal < 5) {
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_2;
+ err = fc0011_writereg(priv, FC11_REG_VCOSEL,
+ regs[FC11_REG_VCOSEL]);
+ if (err)
+ return err;
+ err = fc0011_vcocal_trigger(priv);
+ if (err)
+ return err;
+ } else if (vco_cal <= 48) {
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_1;
+ err = fc0011_writereg(priv, FC11_REG_VCOSEL,
+ regs[FC11_REG_VCOSEL]);
+ if (err)
+ return err;
+ } else {
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ err = fc0011_writereg(priv, FC11_REG_VCOSEL,
+ regs[FC11_REG_VCOSEL]);
+ if (err)
+ return err;
+ err = fc0011_vcocal_trigger(priv);
+ if (err)
+ return err;
+ }
+ break;
+ case 2:
+ if (vco_cal > 53) {
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_1;
+ err = fc0011_writereg(priv, FC11_REG_VCOSEL,
+ regs[FC11_REG_VCOSEL]);
+ if (err)
+ return err;
+ err = fc0011_vcocal_trigger(priv);
+ if (err)
+ return err;
+ } else {
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_2;
+ err = fc0011_writereg(priv, FC11_REG_VCOSEL,
+ regs[FC11_REG_VCOSEL]);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ err = fc0011_vcocal_read(priv, NULL);
+ if (err)
+ return err;
+ usleep_range(10000, 50000);
+
+ err = fc0011_readreg(priv, FC11_REG_RCCAL, &regs[FC11_REG_RCCAL]);
+ if (err)
+ return err;
+ regs[FC11_REG_RCCAL] |= FC11_RCCAL_FORCE;
+ err = fc0011_writereg(priv, FC11_REG_RCCAL, regs[FC11_REG_RCCAL]);
+ if (err)
+ return err;
+ err = fc0011_writereg(priv, FC11_REG_16, 0xB);
+ if (err)
+ return err;
+
+ dev_dbg(&priv->i2c->dev, "Tuned to "
+ "fa=%02X fp=%02X xin=%02X%02X vco=%02X vcosel=%02X "
+ "vcocal=%02X(%u) bw=%u\n",
+ (unsigned int)regs[FC11_REG_FA],
+ (unsigned int)regs[FC11_REG_FP],
+ (unsigned int)regs[FC11_REG_XINHI],
+ (unsigned int)regs[FC11_REG_XINLO],
+ (unsigned int)regs[FC11_REG_VCO],
+ (unsigned int)regs[FC11_REG_VCOSEL],
+ (unsigned int)vco_cal, vco_retries,
+ (unsigned int)bandwidth);
+
+ priv->frequency = p->frequency;
+ priv->bandwidth = p->bandwidth_hz;
+
+ return 0;
+}
+
+static int fc0011_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct fc0011_priv *priv = fe->tuner_priv;
+
+ *frequency = priv->frequency;
+
+ return 0;
+}
+
+static int fc0011_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ *frequency = 0;
+
+ return 0;
+}
+
+static int fc0011_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
+{
+ struct fc0011_priv *priv = fe->tuner_priv;
+
+ *bandwidth = priv->bandwidth;
+
+ return 0;
+}
+
+static const struct dvb_tuner_ops fc0011_tuner_ops = {
+ .info = {
+ .name = "Fitipower FC0011",
+
+ .frequency_min = 45000000,
+ .frequency_max = 1000000000,
+ },
+
+ .release = fc0011_release,
+ .init = fc0011_init,
+
+ .set_params = fc0011_set_params,
+
+ .get_frequency = fc0011_get_frequency,
+ .get_if_frequency = fc0011_get_if_frequency,
+ .get_bandwidth = fc0011_get_bandwidth,
+};
+
+struct dvb_frontend *fc0011_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ const struct fc0011_config *config)
+{
+ struct fc0011_priv *priv;
+
+ priv = kzalloc(sizeof(struct fc0011_priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ priv->i2c = i2c;
+ priv->addr = config->i2c_address;
+
+ fe->tuner_priv = priv;
+ fe->ops.tuner_ops = fc0011_tuner_ops;
+
+ dev_info(&priv->i2c->dev, "Fitipower FC0011 tuner attached\n");
+
+ return fe;
+}
+EXPORT_SYMBOL(fc0011_attach);
+
+MODULE_DESCRIPTION("Fitipower FC0011 silicon tuner driver");
+MODULE_AUTHOR("Michael Buesch <m@bues.ch>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/tuners/fc0011.h b/drivers/media/common/tuners/fc0011.h
new file mode 100644
index 000000000000..0ee581f122d2
--- /dev/null
+++ b/drivers/media/common/tuners/fc0011.h
@@ -0,0 +1,41 @@
+#ifndef LINUX_FC0011_H_
+#define LINUX_FC0011_H_
+
+#include "dvb_frontend.h"
+
+
+/** struct fc0011_config - fc0011 hardware config
+ *
+ * @i2c_address: I2C bus address.
+ */
+struct fc0011_config {
+ u8 i2c_address;
+};
+
+/** enum fc0011_fe_callback_commands - Frontend callbacks
+ *
+ * @FC0011_FE_CALLBACK_POWER: Power on tuner hardware.
+ * @FC0011_FE_CALLBACK_RESET: Request a tuner reset.
+ */
+enum fc0011_fe_callback_commands {
+ FC0011_FE_CALLBACK_POWER,
+ FC0011_FE_CALLBACK_RESET,
+};
+
+#if defined(CONFIG_MEDIA_TUNER_FC0011) ||\
+ defined(CONFIG_MEDIA_TUNER_FC0011_MODULE)
+struct dvb_frontend *fc0011_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ const struct fc0011_config *config);
+#else
+static inline
+struct dvb_frontend *fc0011_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ const struct fc0011_config *config)
+{
+ dev_err(&i2c->dev, "fc0011 driver disabled in Kconfig\n");
+ return NULL;
+}
+#endif
+
+#endif /* LINUX_FC0011_H_ */
diff --git a/drivers/media/common/tuners/fc0012-priv.h b/drivers/media/common/tuners/fc0012-priv.h
new file mode 100644
index 000000000000..4577c917e616
--- /dev/null
+++ b/drivers/media/common/tuners/fc0012-priv.h
@@ -0,0 +1,43 @@
+/*
+ * Fitipower FC0012 tuner driver - private includes
+ *
+ * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _FC0012_PRIV_H_
+#define _FC0012_PRIV_H_
+
+#define LOG_PREFIX "fc0012"
+
+#undef err
+#define err(f, arg...) printk(KERN_ERR LOG_PREFIX": " f "\n" , ## arg)
+#undef info
+#define info(f, arg...) printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
+#undef warn
+#define warn(f, arg...) printk(KERN_WARNING LOG_PREFIX": " f "\n" , ## arg)
+
+struct fc0012_priv {
+ struct i2c_adapter *i2c;
+ u8 addr;
+ u8 dual_master;
+ u8 xtal_freq;
+
+ u32 frequency;
+ u32 bandwidth;
+};
+
+#endif
diff --git a/drivers/media/common/tuners/fc0012.c b/drivers/media/common/tuners/fc0012.c
new file mode 100644
index 000000000000..308135abd54c
--- /dev/null
+++ b/drivers/media/common/tuners/fc0012.c
@@ -0,0 +1,467 @@
+/*
+ * Fitipower FC0012 tuner driver
+ *
+ * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "fc0012.h"
+#include "fc0012-priv.h"
+
+static int fc0012_writereg(struct fc0012_priv *priv, u8 reg, u8 val)
+{
+ u8 buf[2] = {reg, val};
+ struct i2c_msg msg = {
+ .addr = priv->addr, .flags = 0, .buf = buf, .len = 2
+ };
+
+ if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
+ err("I2C write reg failed, reg: %02x, val: %02x", reg, val);
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int fc0012_readreg(struct fc0012_priv *priv, u8 reg, u8 *val)
+{
+ struct i2c_msg msg[2] = {
+ { .addr = priv->addr, .flags = 0, .buf = &reg, .len = 1 },
+ { .addr = priv->addr, .flags = I2C_M_RD, .buf = val, .len = 1 },
+ };
+
+ if (i2c_transfer(priv->i2c, msg, 2) != 2) {
+ err("I2C read reg failed, reg: %02x", reg);
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int fc0012_release(struct dvb_frontend *fe)
+{
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+ return 0;
+}
+
+static int fc0012_init(struct dvb_frontend *fe)
+{
+ struct fc0012_priv *priv = fe->tuner_priv;
+ int i, ret = 0;
+ unsigned char reg[] = {
+ 0x00, /* dummy reg. 0 */
+ 0x05, /* reg. 0x01 */
+ 0x10, /* reg. 0x02 */
+ 0x00, /* reg. 0x03 */
+ 0x00, /* reg. 0x04 */
+ 0x0f, /* reg. 0x05: may also be 0x0a */
+ 0x00, /* reg. 0x06: divider 2, VCO slow */
+ 0x00, /* reg. 0x07: may also be 0x0f */
+ 0xff, /* reg. 0x08: AGC Clock divide by 256, AGC gain 1/256,
+ Loop Bw 1/8 */
+ 0x6e, /* reg. 0x09: Disable LoopThrough, Enable LoopThrough: 0x6f */
+ 0xb8, /* reg. 0x0a: Disable LO Test Buffer */
+ 0x82, /* reg. 0x0b: Output Clock is same as clock frequency,
+ may also be 0x83 */
+ 0xfc, /* reg. 0x0c: depending on AGC Up-Down mode, may need 0xf8 */
+ 0x02, /* reg. 0x0d: AGC Not Forcing & LNA Forcing, 0x02 for DVB-T */
+ 0x00, /* reg. 0x0e */
+ 0x00, /* reg. 0x0f */
+ 0x00, /* reg. 0x10: may also be 0x0d */
+ 0x00, /* reg. 0x11 */
+ 0x1f, /* reg. 0x12: Set to maximum gain */
+ 0x08, /* reg. 0x13: Set to Middle Gain: 0x08,
+ Low Gain: 0x00, High Gain: 0x10, enable IX2: 0x80 */
+ 0x00, /* reg. 0x14 */
+ 0x04, /* reg. 0x15: Enable LNA COMPS */
+ };
+
+ switch (priv->xtal_freq) {
+ case FC_XTAL_27_MHZ:
+ case FC_XTAL_28_8_MHZ:
+ reg[0x07] |= 0x20;
+ break;
+ case FC_XTAL_36_MHZ:
+ default:
+ break;
+ }
+
+ if (priv->dual_master)
+ reg[0x0c] |= 0x02;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ for (i = 1; i < sizeof(reg); i++) {
+ ret = fc0012_writereg(priv, i, reg[i]);
+ if (ret)
+ break;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ if (ret)
+ err("fc0012_writereg failed: %d", ret);
+
+ return ret;
+}
+
+static int fc0012_sleep(struct dvb_frontend *fe)
+{
+ /* nothing to do here */
+ return 0;
+}
+
+static int fc0012_set_params(struct dvb_frontend *fe)
+{
+ struct fc0012_priv *priv = fe->tuner_priv;
+ int i, ret = 0;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 freq = p->frequency / 1000;
+ u32 delsys = p->delivery_system;
+ unsigned char reg[7], am, pm, multi, tmp;
+ unsigned long f_vco;
+ unsigned short xtal_freq_khz_2, xin, xdiv;
+ int vco_select = false;
+
+ if (fe->callback) {
+ ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER,
+ FC_FE_CALLBACK_VHF_ENABLE, (freq > 300000 ? 0 : 1));
+ if (ret)
+ goto exit;
+ }
+
+ switch (priv->xtal_freq) {
+ case FC_XTAL_27_MHZ:
+ xtal_freq_khz_2 = 27000 / 2;
+ break;
+ case FC_XTAL_36_MHZ:
+ xtal_freq_khz_2 = 36000 / 2;
+ break;
+ case FC_XTAL_28_8_MHZ:
+ default:
+ xtal_freq_khz_2 = 28800 / 2;
+ break;
+ }
+
+ /* select frequency divider and the frequency of VCO */
+ if (freq < 37084) { /* freq * 96 < 3560000 */
+ multi = 96;
+ reg[5] = 0x82;
+ reg[6] = 0x00;
+ } else if (freq < 55625) { /* freq * 64 < 3560000 */
+ multi = 64;
+ reg[5] = 0x82;
+ reg[6] = 0x02;
+ } else if (freq < 74167) { /* freq * 48 < 3560000 */
+ multi = 48;
+ reg[5] = 0x42;
+ reg[6] = 0x00;
+ } else if (freq < 111250) { /* freq * 32 < 3560000 */
+ multi = 32;
+ reg[5] = 0x42;
+ reg[6] = 0x02;
+ } else if (freq < 148334) { /* freq * 24 < 3560000 */
+ multi = 24;
+ reg[5] = 0x22;
+ reg[6] = 0x00;
+ } else if (freq < 222500) { /* freq * 16 < 3560000 */
+ multi = 16;
+ reg[5] = 0x22;
+ reg[6] = 0x02;
+ } else if (freq < 296667) { /* freq * 12 < 3560000 */
+ multi = 12;
+ reg[5] = 0x12;
+ reg[6] = 0x00;
+ } else if (freq < 445000) { /* freq * 8 < 3560000 */
+ multi = 8;
+ reg[5] = 0x12;
+ reg[6] = 0x02;
+ } else if (freq < 593334) { /* freq * 6 < 3560000 */
+ multi = 6;
+ reg[5] = 0x0a;
+ reg[6] = 0x00;
+ } else {
+ multi = 4;
+ reg[5] = 0x0a;
+ reg[6] = 0x02;
+ }
+
+ f_vco = freq * multi;
+
+ if (f_vco >= 3060000) {
+ reg[6] |= 0x08;
+ vco_select = true;
+ }
+
+ if (freq >= 45000) {
+ /* From divided value (XDIV) determined the FA and FP value */
+ xdiv = (unsigned short)(f_vco / xtal_freq_khz_2);
+ if ((f_vco - xdiv * xtal_freq_khz_2) >= (xtal_freq_khz_2 / 2))
+ xdiv++;
+
+ pm = (unsigned char)(xdiv / 8);
+ am = (unsigned char)(xdiv - (8 * pm));
+
+ if (am < 2) {
+ reg[1] = am + 8;
+ reg[2] = pm - 1;
+ } else {
+ reg[1] = am;
+ reg[2] = pm;
+ }
+ } else {
+ /* fix for frequency less than 45 MHz */
+ reg[1] = 0x06;
+ reg[2] = 0x11;
+ }
+
+ /* fix clock out */
+ reg[6] |= 0x20;
+
+ /* From VCO frequency determines the XIN ( fractional part of Delta
+ Sigma PLL) and divided value (XDIV) */
+ xin = (unsigned short)(f_vco - (f_vco / xtal_freq_khz_2) * xtal_freq_khz_2);
+ xin = (xin << 15) / xtal_freq_khz_2;
+ if (xin >= 16384)
+ xin += 32768;
+
+ reg[3] = xin >> 8; /* xin with 9 bit resolution */
+ reg[4] = xin & 0xff;
+
+ if (delsys == SYS_DVBT) {
+ reg[6] &= 0x3f; /* bits 6 and 7 describe the bandwidth */
+ switch (p->bandwidth_hz) {
+ case 6000000:
+ reg[6] |= 0x80;
+ break;
+ case 7000000:
+ reg[6] |= 0x40;
+ break;
+ case 8000000:
+ default:
+ break;
+ }
+ } else {
+ err("%s: modulation type not supported!", __func__);
+ return -EINVAL;
+ }
+
+ /* modified for Realtek demod */
+ reg[5] |= 0x07;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ for (i = 1; i <= 6; i++) {
+ ret = fc0012_writereg(priv, i, reg[i]);
+ if (ret)
+ goto exit;
+ }
+
+ /* VCO Calibration */
+ ret = fc0012_writereg(priv, 0x0e, 0x80);
+ if (!ret)
+ ret = fc0012_writereg(priv, 0x0e, 0x00);
+
+ /* VCO Re-Calibration if needed */
+ if (!ret)
+ ret = fc0012_writereg(priv, 0x0e, 0x00);
+
+ if (!ret) {
+ msleep(10);
+ ret = fc0012_readreg(priv, 0x0e, &tmp);
+ }
+ if (ret)
+ goto exit;
+
+ /* vco selection */
+ tmp &= 0x3f;
+
+ if (vco_select) {
+ if (tmp > 0x3c) {
+ reg[6] &= ~0x08;
+ ret = fc0012_writereg(priv, 0x06, reg[6]);
+ if (!ret)
+ ret = fc0012_writereg(priv, 0x0e, 0x80);
+ if (!ret)
+ ret = fc0012_writereg(priv, 0x0e, 0x00);
+ }
+ } else {
+ if (tmp < 0x02) {
+ reg[6] |= 0x08;
+ ret = fc0012_writereg(priv, 0x06, reg[6]);
+ if (!ret)
+ ret = fc0012_writereg(priv, 0x0e, 0x80);
+ if (!ret)
+ ret = fc0012_writereg(priv, 0x0e, 0x00);
+ }
+ }
+
+ priv->frequency = p->frequency;
+ priv->bandwidth = p->bandwidth_hz;
+
+exit:
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+ if (ret)
+ warn("%s: failed: %d", __func__, ret);
+ return ret;
+}
+
+static int fc0012_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct fc0012_priv *priv = fe->tuner_priv;
+ *frequency = priv->frequency;
+ return 0;
+}
+
+static int fc0012_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ /* CHECK: always ? */
+ *frequency = 0;
+ return 0;
+}
+
+static int fc0012_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
+{
+ struct fc0012_priv *priv = fe->tuner_priv;
+ *bandwidth = priv->bandwidth;
+ return 0;
+}
+
+#define INPUT_ADC_LEVEL -8
+
+static int fc0012_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ struct fc0012_priv *priv = fe->tuner_priv;
+ int ret;
+ unsigned char tmp;
+ int int_temp, lna_gain, int_lna, tot_agc_gain, power;
+ const int fc0012_lna_gain_table[] = {
+ /* low gain */
+ -63, -58, -99, -73,
+ -63, -65, -54, -60,
+ /* middle gain */
+ 71, 70, 68, 67,
+ 65, 63, 61, 58,
+ /* high gain */
+ 197, 191, 188, 186,
+ 184, 182, 181, 179,
+ };
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ ret = fc0012_writereg(priv, 0x12, 0x00);
+ if (ret)
+ goto err;
+
+ ret = fc0012_readreg(priv, 0x12, &tmp);
+ if (ret)
+ goto err;
+ int_temp = tmp;
+
+ ret = fc0012_readreg(priv, 0x13, &tmp);
+ if (ret)
+ goto err;
+ lna_gain = tmp & 0x1f;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ if (lna_gain < ARRAY_SIZE(fc0012_lna_gain_table)) {
+ int_lna = fc0012_lna_gain_table[lna_gain];
+ tot_agc_gain = (abs((int_temp >> 5) - 7) - 2 +
+ (int_temp & 0x1f)) * 2;
+ power = INPUT_ADC_LEVEL - tot_agc_gain - int_lna / 10;
+
+ if (power >= 45)
+ *strength = 255; /* 100% */
+ else if (power < -95)
+ *strength = 0;
+ else
+ *strength = (power + 95) * 255 / 140;
+
+ *strength |= *strength << 8;
+ } else {
+ ret = -1;
+ }
+
+ goto exit;
+
+err:
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+exit:
+ if (ret)
+ warn("%s: failed: %d", __func__, ret);
+ return ret;
+}
+
+static const struct dvb_tuner_ops fc0012_tuner_ops = {
+ .info = {
+ .name = "Fitipower FC0012",
+
+ .frequency_min = 37000000, /* estimate */
+ .frequency_max = 862000000, /* estimate */
+ .frequency_step = 0,
+ },
+
+ .release = fc0012_release,
+
+ .init = fc0012_init,
+ .sleep = fc0012_sleep,
+
+ .set_params = fc0012_set_params,
+
+ .get_frequency = fc0012_get_frequency,
+ .get_if_frequency = fc0012_get_if_frequency,
+ .get_bandwidth = fc0012_get_bandwidth,
+
+ .get_rf_strength = fc0012_get_rf_strength,
+};
+
+struct dvb_frontend *fc0012_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, u8 i2c_address, int dual_master,
+ enum fc001x_xtal_freq xtal_freq)
+{
+ struct fc0012_priv *priv = NULL;
+
+ priv = kzalloc(sizeof(struct fc0012_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return NULL;
+
+ priv->i2c = i2c;
+ priv->dual_master = dual_master;
+ priv->addr = i2c_address;
+ priv->xtal_freq = xtal_freq;
+
+ info("Fitipower FC0012 successfully attached.");
+
+ fe->tuner_priv = priv;
+
+ memcpy(&fe->ops.tuner_ops, &fc0012_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
+ return fe;
+}
+EXPORT_SYMBOL(fc0012_attach);
+
+MODULE_DESCRIPTION("Fitipower FC0012 silicon tuner driver");
+MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.6");
diff --git a/drivers/media/common/tuners/fc0012.h b/drivers/media/common/tuners/fc0012.h
new file mode 100644
index 000000000000..4dbd5efe8845
--- /dev/null
+++ b/drivers/media/common/tuners/fc0012.h
@@ -0,0 +1,44 @@
+/*
+ * Fitipower FC0012 tuner driver - include
+ *
+ * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _FC0012_H_
+#define _FC0012_H_
+
+#include "dvb_frontend.h"
+#include "fc001x-common.h"
+
+#if defined(CONFIG_MEDIA_TUNER_FC0012) || \
+ (defined(CONFIG_MEDIA_TUNER_FC0012_MODULE) && defined(MODULE))
+extern struct dvb_frontend *fc0012_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ u8 i2c_address, int dual_master,
+ enum fc001x_xtal_freq xtal_freq);
+#else
+static inline struct dvb_frontend *fc0012_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ u8 i2c_address, int dual_master,
+ enum fc001x_xtal_freq xtal_freq)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/common/tuners/fc0013-priv.h b/drivers/media/common/tuners/fc0013-priv.h
new file mode 100644
index 000000000000..bfd49dedea22
--- /dev/null
+++ b/drivers/media/common/tuners/fc0013-priv.h
@@ -0,0 +1,44 @@
+/*
+ * Fitipower FC0013 tuner driver
+ *
+ * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef _FC0013_PRIV_H_
+#define _FC0013_PRIV_H_
+
+#define LOG_PREFIX "fc0013"
+
+#undef err
+#define err(f, arg...) printk(KERN_ERR LOG_PREFIX": " f "\n" , ## arg)
+#undef info
+#define info(f, arg...) printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
+#undef warn
+#define warn(f, arg...) printk(KERN_WARNING LOG_PREFIX": " f "\n" , ## arg)
+
+struct fc0013_priv {
+ struct i2c_adapter *i2c;
+ u8 addr;
+ u8 dual_master;
+ u8 xtal_freq;
+
+ u32 frequency;
+ u32 bandwidth;
+};
+
+#endif
diff --git a/drivers/media/common/tuners/fc0013.c b/drivers/media/common/tuners/fc0013.c
new file mode 100644
index 000000000000..bd8f0f1e8f3b
--- /dev/null
+++ b/drivers/media/common/tuners/fc0013.c
@@ -0,0 +1,634 @@
+/*
+ * Fitipower FC0013 tuner driver
+ *
+ * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net>
+ * partially based on driver code from Fitipower
+ * Copyright (C) 2010 Fitipower Integrated Technology Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include "fc0013.h"
+#include "fc0013-priv.h"
+
+static int fc0013_writereg(struct fc0013_priv *priv, u8 reg, u8 val)
+{
+ u8 buf[2] = {reg, val};
+ struct i2c_msg msg = {
+ .addr = priv->addr, .flags = 0, .buf = buf, .len = 2
+ };
+
+ if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
+ err("I2C write reg failed, reg: %02x, val: %02x", reg, val);
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int fc0013_readreg(struct fc0013_priv *priv, u8 reg, u8 *val)
+{
+ struct i2c_msg msg[2] = {
+ { .addr = priv->addr, .flags = 0, .buf = &reg, .len = 1 },
+ { .addr = priv->addr, .flags = I2C_M_RD, .buf = val, .len = 1 },
+ };
+
+ if (i2c_transfer(priv->i2c, msg, 2) != 2) {
+ err("I2C read reg failed, reg: %02x", reg);
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int fc0013_release(struct dvb_frontend *fe)
+{
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+ return 0;
+}
+
+static int fc0013_init(struct dvb_frontend *fe)
+{
+ struct fc0013_priv *priv = fe->tuner_priv;
+ int i, ret = 0;
+ unsigned char reg[] = {
+ 0x00, /* reg. 0x00: dummy */
+ 0x09, /* reg. 0x01 */
+ 0x16, /* reg. 0x02 */
+ 0x00, /* reg. 0x03 */
+ 0x00, /* reg. 0x04 */
+ 0x17, /* reg. 0x05 */
+ 0x02, /* reg. 0x06 */
+ 0x0a, /* reg. 0x07: CHECK */
+ 0xff, /* reg. 0x08: AGC Clock divide by 256, AGC gain 1/256,
+ Loop Bw 1/8 */
+ 0x6f, /* reg. 0x09: enable LoopThrough */
+ 0xb8, /* reg. 0x0a: Disable LO Test Buffer */
+ 0x82, /* reg. 0x0b: CHECK */
+ 0xfc, /* reg. 0x0c: depending on AGC Up-Down mode, may need 0xf8 */
+ 0x01, /* reg. 0x0d: AGC Not Forcing & LNA Forcing, may need 0x02 */
+ 0x00, /* reg. 0x0e */
+ 0x00, /* reg. 0x0f */
+ 0x00, /* reg. 0x10 */
+ 0x00, /* reg. 0x11 */
+ 0x00, /* reg. 0x12 */
+ 0x00, /* reg. 0x13 */
+ 0x50, /* reg. 0x14: DVB-t High Gain, UHF.
+ Middle Gain: 0x48, Low Gain: 0x40 */
+ 0x01, /* reg. 0x15 */
+ };
+
+ switch (priv->xtal_freq) {
+ case FC_XTAL_27_MHZ:
+ case FC_XTAL_28_8_MHZ:
+ reg[0x07] |= 0x20;
+ break;
+ case FC_XTAL_36_MHZ:
+ default:
+ break;
+ }
+
+ if (priv->dual_master)
+ reg[0x0c] |= 0x02;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ for (i = 1; i < sizeof(reg); i++) {
+ ret = fc0013_writereg(priv, i, reg[i]);
+ if (ret)
+ break;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ if (ret)
+ err("fc0013_writereg failed: %d", ret);
+
+ return ret;
+}
+
+static int fc0013_sleep(struct dvb_frontend *fe)
+{
+ /* nothing to do here */
+ return 0;
+}
+
+int fc0013_rc_cal_add(struct dvb_frontend *fe, int rc_val)
+{
+ struct fc0013_priv *priv = fe->tuner_priv;
+ int ret;
+ u8 rc_cal;
+ int val;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ /* push rc_cal value, get rc_cal value */
+ ret = fc0013_writereg(priv, 0x10, 0x00);
+ if (ret)
+ goto error_out;
+
+ /* get rc_cal value */
+ ret = fc0013_readreg(priv, 0x10, &rc_cal);
+ if (ret)
+ goto error_out;
+
+ rc_cal &= 0x0f;
+
+ val = (int)rc_cal + rc_val;
+
+ /* forcing rc_cal */
+ ret = fc0013_writereg(priv, 0x0d, 0x11);
+ if (ret)
+ goto error_out;
+
+ /* modify rc_cal value */
+ if (val > 15)
+ ret = fc0013_writereg(priv, 0x10, 0x0f);
+ else if (val < 0)
+ ret = fc0013_writereg(priv, 0x10, 0x00);
+ else
+ ret = fc0013_writereg(priv, 0x10, (u8)val);
+
+error_out:
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ return ret;
+}
+EXPORT_SYMBOL(fc0013_rc_cal_add);
+
+int fc0013_rc_cal_reset(struct dvb_frontend *fe)
+{
+ struct fc0013_priv *priv = fe->tuner_priv;
+ int ret;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ ret = fc0013_writereg(priv, 0x0d, 0x01);
+ if (!ret)
+ ret = fc0013_writereg(priv, 0x10, 0x00);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ return ret;
+}
+EXPORT_SYMBOL(fc0013_rc_cal_reset);
+
+static int fc0013_set_vhf_track(struct fc0013_priv *priv, u32 freq)
+{
+ int ret;
+ u8 tmp;
+
+ ret = fc0013_readreg(priv, 0x1d, &tmp);
+ if (ret)
+ goto error_out;
+ tmp &= 0xe3;
+ if (freq <= 177500) { /* VHF Track: 7 */
+ ret = fc0013_writereg(priv, 0x1d, tmp | 0x1c);
+ } else if (freq <= 184500) { /* VHF Track: 6 */
+ ret = fc0013_writereg(priv, 0x1d, tmp | 0x18);
+ } else if (freq <= 191500) { /* VHF Track: 5 */
+ ret = fc0013_writereg(priv, 0x1d, tmp | 0x14);
+ } else if (freq <= 198500) { /* VHF Track: 4 */
+ ret = fc0013_writereg(priv, 0x1d, tmp | 0x10);
+ } else if (freq <= 205500) { /* VHF Track: 3 */
+ ret = fc0013_writereg(priv, 0x1d, tmp | 0x0c);
+ } else if (freq <= 219500) { /* VHF Track: 2 */
+ ret = fc0013_writereg(priv, 0x1d, tmp | 0x08);
+ } else if (freq < 300000) { /* VHF Track: 1 */
+ ret = fc0013_writereg(priv, 0x1d, tmp | 0x04);
+ } else { /* UHF and GPS */
+ ret = fc0013_writereg(priv, 0x1d, tmp | 0x1c);
+ }
+ if (ret)
+ goto error_out;
+error_out:
+ return ret;
+}
+
+static int fc0013_set_params(struct dvb_frontend *fe)
+{
+ struct fc0013_priv *priv = fe->tuner_priv;
+ int i, ret = 0;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 freq = p->frequency / 1000;
+ u32 delsys = p->delivery_system;
+ unsigned char reg[7], am, pm, multi, tmp;
+ unsigned long f_vco;
+ unsigned short xtal_freq_khz_2, xin, xdiv;
+ int vco_select = false;
+
+ if (fe->callback) {
+ ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER,
+ FC_FE_CALLBACK_VHF_ENABLE, (freq > 300000 ? 0 : 1));
+ if (ret)
+ goto exit;
+ }
+
+ switch (priv->xtal_freq) {
+ case FC_XTAL_27_MHZ:
+ xtal_freq_khz_2 = 27000 / 2;
+ break;
+ case FC_XTAL_36_MHZ:
+ xtal_freq_khz_2 = 36000 / 2;
+ break;
+ case FC_XTAL_28_8_MHZ:
+ default:
+ xtal_freq_khz_2 = 28800 / 2;
+ break;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ /* set VHF track */
+ ret = fc0013_set_vhf_track(priv, freq);
+ if (ret)
+ goto exit;
+
+ if (freq < 300000) {
+ /* enable VHF filter */
+ ret = fc0013_readreg(priv, 0x07, &tmp);
+ if (ret)
+ goto exit;
+ ret = fc0013_writereg(priv, 0x07, tmp | 0x10);
+ if (ret)
+ goto exit;
+
+ /* disable UHF & disable GPS */
+ ret = fc0013_readreg(priv, 0x14, &tmp);
+ if (ret)
+ goto exit;
+ ret = fc0013_writereg(priv, 0x14, tmp & 0x1f);
+ if (ret)
+ goto exit;
+ } else if (freq <= 862000) {
+ /* disable VHF filter */
+ ret = fc0013_readreg(priv, 0x07, &tmp);
+ if (ret)
+ goto exit;
+ ret = fc0013_writereg(priv, 0x07, tmp & 0xef);
+ if (ret)
+ goto exit;
+
+ /* enable UHF & disable GPS */
+ ret = fc0013_readreg(priv, 0x14, &tmp);
+ if (ret)
+ goto exit;
+ ret = fc0013_writereg(priv, 0x14, (tmp & 0x1f) | 0x40);
+ if (ret)
+ goto exit;
+ } else {
+ /* disable VHF filter */
+ ret = fc0013_readreg(priv, 0x07, &tmp);
+ if (ret)
+ goto exit;
+ ret = fc0013_writereg(priv, 0x07, tmp & 0xef);
+ if (ret)
+ goto exit;
+
+ /* disable UHF & enable GPS */
+ ret = fc0013_readreg(priv, 0x14, &tmp);
+ if (ret)
+ goto exit;
+ ret = fc0013_writereg(priv, 0x14, (tmp & 0x1f) | 0x20);
+ if (ret)
+ goto exit;
+ }
+
+ /* select frequency divider and the frequency of VCO */
+ if (freq < 37084) { /* freq * 96 < 3560000 */
+ multi = 96;
+ reg[5] = 0x82;
+ reg[6] = 0x00;
+ } else if (freq < 55625) { /* freq * 64 < 3560000 */
+ multi = 64;
+ reg[5] = 0x02;
+ reg[6] = 0x02;
+ } else if (freq < 74167) { /* freq * 48 < 3560000 */
+ multi = 48;
+ reg[5] = 0x42;
+ reg[6] = 0x00;
+ } else if (freq < 111250) { /* freq * 32 < 3560000 */
+ multi = 32;
+ reg[5] = 0x82;
+ reg[6] = 0x02;
+ } else if (freq < 148334) { /* freq * 24 < 3560000 */
+ multi = 24;
+ reg[5] = 0x22;
+ reg[6] = 0x00;
+ } else if (freq < 222500) { /* freq * 16 < 3560000 */
+ multi = 16;
+ reg[5] = 0x42;
+ reg[6] = 0x02;
+ } else if (freq < 296667) { /* freq * 12 < 3560000 */
+ multi = 12;
+ reg[5] = 0x12;
+ reg[6] = 0x00;
+ } else if (freq < 445000) { /* freq * 8 < 3560000 */
+ multi = 8;
+ reg[5] = 0x22;
+ reg[6] = 0x02;
+ } else if (freq < 593334) { /* freq * 6 < 3560000 */
+ multi = 6;
+ reg[5] = 0x0a;
+ reg[6] = 0x00;
+ } else if (freq < 950000) { /* freq * 4 < 3800000 */
+ multi = 4;
+ reg[5] = 0x12;
+ reg[6] = 0x02;
+ } else {
+ multi = 2;
+ reg[5] = 0x0a;
+ reg[6] = 0x02;
+ }
+
+ f_vco = freq * multi;
+
+ if (f_vco >= 3060000) {
+ reg[6] |= 0x08;
+ vco_select = true;
+ }
+
+ if (freq >= 45000) {
+ /* From divided value (XDIV) determined the FA and FP value */
+ xdiv = (unsigned short)(f_vco / xtal_freq_khz_2);
+ if ((f_vco - xdiv * xtal_freq_khz_2) >= (xtal_freq_khz_2 / 2))
+ xdiv++;
+
+ pm = (unsigned char)(xdiv / 8);
+ am = (unsigned char)(xdiv - (8 * pm));
+
+ if (am < 2) {
+ reg[1] = am + 8;
+ reg[2] = pm - 1;
+ } else {
+ reg[1] = am;
+ reg[2] = pm;
+ }
+ } else {
+ /* fix for frequency less than 45 MHz */
+ reg[1] = 0x06;
+ reg[2] = 0x11;
+ }
+
+ /* fix clock out */
+ reg[6] |= 0x20;
+
+ /* From VCO frequency determines the XIN ( fractional part of Delta
+ Sigma PLL) and divided value (XDIV) */
+ xin = (unsigned short)(f_vco - (f_vco / xtal_freq_khz_2) * xtal_freq_khz_2);
+ xin = (xin << 15) / xtal_freq_khz_2;
+ if (xin >= 16384)
+ xin += 32768;
+
+ reg[3] = xin >> 8;
+ reg[4] = xin & 0xff;
+
+ if (delsys == SYS_DVBT) {
+ reg[6] &= 0x3f; /* bits 6 and 7 describe the bandwidth */
+ switch (p->bandwidth_hz) {
+ case 6000000:
+ reg[6] |= 0x80;
+ break;
+ case 7000000:
+ reg[6] |= 0x40;
+ break;
+ case 8000000:
+ default:
+ break;
+ }
+ } else {
+ err("%s: modulation type not supported!", __func__);
+ return -EINVAL;
+ }
+
+ /* modified for Realtek demod */
+ reg[5] |= 0x07;
+
+ for (i = 1; i <= 6; i++) {
+ ret = fc0013_writereg(priv, i, reg[i]);
+ if (ret)
+ goto exit;
+ }
+
+ ret = fc0013_readreg(priv, 0x11, &tmp);
+ if (ret)
+ goto exit;
+ if (multi == 64)
+ ret = fc0013_writereg(priv, 0x11, tmp | 0x04);
+ else
+ ret = fc0013_writereg(priv, 0x11, tmp & 0xfb);
+ if (ret)
+ goto exit;
+
+ /* VCO Calibration */
+ ret = fc0013_writereg(priv, 0x0e, 0x80);
+ if (!ret)
+ ret = fc0013_writereg(priv, 0x0e, 0x00);
+
+ /* VCO Re-Calibration if needed */
+ if (!ret)
+ ret = fc0013_writereg(priv, 0x0e, 0x00);
+
+ if (!ret) {
+ msleep(10);
+ ret = fc0013_readreg(priv, 0x0e, &tmp);
+ }
+ if (ret)
+ goto exit;
+
+ /* vco selection */
+ tmp &= 0x3f;
+
+ if (vco_select) {
+ if (tmp > 0x3c) {
+ reg[6] &= ~0x08;
+ ret = fc0013_writereg(priv, 0x06, reg[6]);
+ if (!ret)
+ ret = fc0013_writereg(priv, 0x0e, 0x80);
+ if (!ret)
+ ret = fc0013_writereg(priv, 0x0e, 0x00);
+ }
+ } else {
+ if (tmp < 0x02) {
+ reg[6] |= 0x08;
+ ret = fc0013_writereg(priv, 0x06, reg[6]);
+ if (!ret)
+ ret = fc0013_writereg(priv, 0x0e, 0x80);
+ if (!ret)
+ ret = fc0013_writereg(priv, 0x0e, 0x00);
+ }
+ }
+
+ priv->frequency = p->frequency;
+ priv->bandwidth = p->bandwidth_hz;
+
+exit:
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+ if (ret)
+ warn("%s: failed: %d", __func__, ret);
+ return ret;
+}
+
+static int fc0013_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct fc0013_priv *priv = fe->tuner_priv;
+ *frequency = priv->frequency;
+ return 0;
+}
+
+static int fc0013_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ /* always ? */
+ *frequency = 0;
+ return 0;
+}
+
+static int fc0013_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
+{
+ struct fc0013_priv *priv = fe->tuner_priv;
+ *bandwidth = priv->bandwidth;
+ return 0;
+}
+
+#define INPUT_ADC_LEVEL -8
+
+static int fc0013_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ struct fc0013_priv *priv = fe->tuner_priv;
+ int ret;
+ unsigned char tmp;
+ int int_temp, lna_gain, int_lna, tot_agc_gain, power;
+ const int fc0013_lna_gain_table[] = {
+ /* low gain */
+ -63, -58, -99, -73,
+ -63, -65, -54, -60,
+ /* middle gain */
+ 71, 70, 68, 67,
+ 65, 63, 61, 58,
+ /* high gain */
+ 197, 191, 188, 186,
+ 184, 182, 181, 179,
+ };
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ ret = fc0013_writereg(priv, 0x13, 0x00);
+ if (ret)
+ goto err;
+
+ ret = fc0013_readreg(priv, 0x13, &tmp);
+ if (ret)
+ goto err;
+ int_temp = tmp;
+
+ ret = fc0013_readreg(priv, 0x14, &tmp);
+ if (ret)
+ goto err;
+ lna_gain = tmp & 0x1f;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ if (lna_gain < ARRAY_SIZE(fc0013_lna_gain_table)) {
+ int_lna = fc0013_lna_gain_table[lna_gain];
+ tot_agc_gain = (abs((int_temp >> 5) - 7) - 2 +
+ (int_temp & 0x1f)) * 2;
+ power = INPUT_ADC_LEVEL - tot_agc_gain - int_lna / 10;
+
+ if (power >= 45)
+ *strength = 255; /* 100% */
+ else if (power < -95)
+ *strength = 0;
+ else
+ *strength = (power + 95) * 255 / 140;
+
+ *strength |= *strength << 8;
+ } else {
+ ret = -1;
+ }
+
+ goto exit;
+
+err:
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+exit:
+ if (ret)
+ warn("%s: failed: %d", __func__, ret);
+ return ret;
+}
+
+static const struct dvb_tuner_ops fc0013_tuner_ops = {
+ .info = {
+ .name = "Fitipower FC0013",
+
+ .frequency_min = 37000000, /* estimate */
+ .frequency_max = 1680000000, /* CHECK */
+ .frequency_step = 0,
+ },
+
+ .release = fc0013_release,
+
+ .init = fc0013_init,
+ .sleep = fc0013_sleep,
+
+ .set_params = fc0013_set_params,
+
+ .get_frequency = fc0013_get_frequency,
+ .get_if_frequency = fc0013_get_if_frequency,
+ .get_bandwidth = fc0013_get_bandwidth,
+
+ .get_rf_strength = fc0013_get_rf_strength,
+};
+
+struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, u8 i2c_address, int dual_master,
+ enum fc001x_xtal_freq xtal_freq)
+{
+ struct fc0013_priv *priv = NULL;
+
+ priv = kzalloc(sizeof(struct fc0013_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return NULL;
+
+ priv->i2c = i2c;
+ priv->dual_master = dual_master;
+ priv->addr = i2c_address;
+ priv->xtal_freq = xtal_freq;
+
+ info("Fitipower FC0013 successfully attached.");
+
+ fe->tuner_priv = priv;
+
+ memcpy(&fe->ops.tuner_ops, &fc0013_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
+ return fe;
+}
+EXPORT_SYMBOL(fc0013_attach);
+
+MODULE_DESCRIPTION("Fitipower FC0013 silicon tuner driver");
+MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.2");
diff --git a/drivers/media/common/tuners/fc0013.h b/drivers/media/common/tuners/fc0013.h
new file mode 100644
index 000000000000..594efd64aeec
--- /dev/null
+++ b/drivers/media/common/tuners/fc0013.h
@@ -0,0 +1,57 @@
+/*
+ * Fitipower FC0013 tuner driver
+ *
+ * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef _FC0013_H_
+#define _FC0013_H_
+
+#include "dvb_frontend.h"
+#include "fc001x-common.h"
+
+#if defined(CONFIG_MEDIA_TUNER_FC0013) || \
+ (defined(CONFIG_MEDIA_TUNER_FC0013_MODULE) && defined(MODULE))
+extern struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ u8 i2c_address, int dual_master,
+ enum fc001x_xtal_freq xtal_freq);
+extern int fc0013_rc_cal_add(struct dvb_frontend *fe, int rc_val);
+extern int fc0013_rc_cal_reset(struct dvb_frontend *fe);
+#else
+static inline struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ u8 i2c_address, int dual_master,
+ enum fc001x_xtal_freq xtal_freq)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+static inline int fc0013_rc_cal_add(struct dvb_frontend *fe, int rc_val)
+{
+ return 0;
+}
+
+static inline int fc0013_rc_cal_reset(struct dvb_frontend *fe)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/media/common/tuners/fc001x-common.h b/drivers/media/common/tuners/fc001x-common.h
new file mode 100644
index 000000000000..718818156934
--- /dev/null
+++ b/drivers/media/common/tuners/fc001x-common.h
@@ -0,0 +1,39 @@
+/*
+ * Fitipower FC0012 & FC0013 tuner driver - common defines
+ *
+ * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _FC001X_COMMON_H_
+#define _FC001X_COMMON_H_
+
+enum fc001x_xtal_freq {
+ FC_XTAL_27_MHZ, /* 27000000 */
+ FC_XTAL_28_8_MHZ, /* 28800000 */
+ FC_XTAL_36_MHZ, /* 36000000 */
+};
+
+/*
+ * enum fc001x_fe_callback_commands - Frontend callbacks
+ *
+ * @FC_FE_CALLBACK_VHF_ENABLE: enable VHF or UHF
+ */
+enum fc001x_fe_callback_commands {
+ FC_FE_CALLBACK_VHF_ENABLE,
+};
+
+#endif
diff --git a/drivers/media/common/tuners/tua9001.c b/drivers/media/common/tuners/tua9001.c
new file mode 100644
index 000000000000..de2607084672
--- /dev/null
+++ b/drivers/media/common/tuners/tua9001.c
@@ -0,0 +1,215 @@
+/*
+ * Infineon TUA 9001 silicon tuner driver
+ *
+ * Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "tua9001.h"
+#include "tua9001_priv.h"
+
+/* write register */
+static int tua9001_wr_reg(struct tua9001_priv *priv, u8 reg, u16 val)
+{
+ int ret;
+ u8 buf[3] = { reg, (val >> 8) & 0xff, (val >> 0) & 0xff };
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg->i2c_addr,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+
+ ret = i2c_transfer(priv->i2c, msg, 1);
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ printk(KERN_WARNING "%s: I2C wr failed=%d reg=%02x\n",
+ __func__, ret, reg);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+static int tua9001_release(struct dvb_frontend *fe)
+{
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+
+ return 0;
+}
+
+static int tua9001_init(struct dvb_frontend *fe)
+{
+ struct tua9001_priv *priv = fe->tuner_priv;
+ int ret = 0;
+ u8 i;
+ struct reg_val data[] = {
+ { 0x1e, 0x6512 },
+ { 0x25, 0xb888 },
+ { 0x39, 0x5460 },
+ { 0x3b, 0x00c0 },
+ { 0x3a, 0xf000 },
+ { 0x08, 0x0000 },
+ { 0x32, 0x0030 },
+ { 0x41, 0x703a },
+ { 0x40, 0x1c78 },
+ { 0x2c, 0x1c00 },
+ { 0x36, 0xc013 },
+ { 0x37, 0x6f18 },
+ { 0x27, 0x0008 },
+ { 0x2a, 0x0001 },
+ { 0x34, 0x0a40 },
+ };
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c-gate */
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ ret = tua9001_wr_reg(priv, data[i].reg, data[i].val);
+ if (ret)
+ break;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c-gate */
+
+ if (ret < 0)
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int tua9001_set_params(struct dvb_frontend *fe)
+{
+ struct tua9001_priv *priv = fe->tuner_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, i;
+ u16 val;
+ u32 frequency;
+ struct reg_val data[2];
+
+ pr_debug("%s: delivery_system=%d frequency=%d bandwidth_hz=%d\n",
+ __func__, c->delivery_system, c->frequency,
+ c->bandwidth_hz);
+
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ switch (c->bandwidth_hz) {
+ case 8000000:
+ val = 0x0000;
+ break;
+ case 7000000:
+ val = 0x1000;
+ break;
+ case 6000000:
+ val = 0x2000;
+ break;
+ case 5000000:
+ val = 0x3000;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ data[0].reg = 0x04;
+ data[0].val = val;
+
+ frequency = (c->frequency - 150000000);
+ frequency /= 100;
+ frequency *= 48;
+ frequency /= 10000;
+
+ data[1].reg = 0x1f;
+ data[1].val = frequency;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c-gate */
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ ret = tua9001_wr_reg(priv, data[i].reg, data[i].val);
+ if (ret < 0)
+ break;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c-gate */
+
+err:
+ if (ret < 0)
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int tua9001_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ *frequency = 0; /* Zero-IF */
+
+ return 0;
+}
+
+static const struct dvb_tuner_ops tua9001_tuner_ops = {
+ .info = {
+ .name = "Infineon TUA 9001",
+
+ .frequency_min = 170000000,
+ .frequency_max = 862000000,
+ .frequency_step = 0,
+ },
+
+ .release = tua9001_release,
+
+ .init = tua9001_init,
+ .set_params = tua9001_set_params,
+
+ .get_if_frequency = tua9001_get_if_frequency,
+};
+
+struct dvb_frontend *tua9001_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, struct tua9001_config *cfg)
+{
+ struct tua9001_priv *priv = NULL;
+
+ priv = kzalloc(sizeof(struct tua9001_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return NULL;
+
+ priv->cfg = cfg;
+ priv->i2c = i2c;
+
+ printk(KERN_INFO "Infineon TUA 9001 successfully attached.");
+
+ memcpy(&fe->ops.tuner_ops, &tua9001_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
+ fe->tuner_priv = priv;
+ return fe;
+}
+EXPORT_SYMBOL(tua9001_attach);
+
+MODULE_DESCRIPTION("Infineon TUA 9001 silicon tuner driver");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/tuners/tua9001.h b/drivers/media/common/tuners/tua9001.h
new file mode 100644
index 000000000000..38d6ae76b1d6
--- /dev/null
+++ b/drivers/media/common/tuners/tua9001.h
@@ -0,0 +1,46 @@
+/*
+ * Infineon TUA 9001 silicon tuner driver
+ *
+ * Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef TUA9001_H
+#define TUA9001_H
+
+#include "dvb_frontend.h"
+
+struct tua9001_config {
+ /*
+ * I2C address
+ */
+ u8 i2c_addr;
+};
+
+#if defined(CONFIG_MEDIA_TUNER_TUA9001) || \
+ (defined(CONFIG_MEDIA_TUNER_TUA9001_MODULE) && defined(MODULE))
+extern struct dvb_frontend *tua9001_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, struct tua9001_config *cfg);
+#else
+static inline struct dvb_frontend *tua9001_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, struct tua9001_config *cfg)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/common/tuners/tua9001_priv.h b/drivers/media/common/tuners/tua9001_priv.h
new file mode 100644
index 000000000000..73cc1ce0575c
--- /dev/null
+++ b/drivers/media/common/tuners/tua9001_priv.h
@@ -0,0 +1,34 @@
+/*
+ * Infineon TUA 9001 silicon tuner driver
+ *
+ * Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef TUA9001_PRIV_H
+#define TUA9001_PRIV_H
+
+struct reg_val {
+ u8 reg;
+ u16 val;
+};
+
+struct tua9001_priv {
+ struct tua9001_config *cfg;
+ struct i2c_adapter *i2c;
+};
+
+#endif
diff --git a/drivers/media/common/tuners/xc5000.c b/drivers/media/common/tuners/xc5000.c
index eab2ea424200..dcca42ca57be 100644
--- a/drivers/media/common/tuners/xc5000.c
+++ b/drivers/media/common/tuners/xc5000.c
@@ -54,7 +54,7 @@ struct xc5000_priv {
struct list_head hybrid_tuner_instance_list;
u32 if_khz;
- u32 xtal_khz;
+ u16 xtal_khz;
u32 freq_hz;
u32 bandwidth;
u8 video_standard;
@@ -631,7 +631,10 @@ static int xc5000_fwupload(struct dvb_frontend *fe)
ret = xc_load_i2c_sequence(fe, fw->data);
if (XC_RESULT_SUCCESS == ret)
ret = xc_set_xtal(fe);
- printk(KERN_INFO "xc5000: firmware upload complete...\n");
+ if (XC_RESULT_SUCCESS == ret)
+ printk(KERN_INFO "xc5000: firmware upload complete...\n");
+ else
+ printk(KERN_ERR "xc5000: firmware upload failed...\n");
}
out:
diff --git a/drivers/media/common/tuners/xc5000.h b/drivers/media/common/tuners/xc5000.h
index 39a73bf01406..b1a547494625 100644
--- a/drivers/media/common/tuners/xc5000.h
+++ b/drivers/media/common/tuners/xc5000.h
@@ -34,7 +34,7 @@ struct xc5000_config {
u8 i2c_address;
u32 if_khz;
u8 radio_input;
- u32 xtal_khz;
+ u16 xtal_khz;
int chip_id;
};
diff --git a/drivers/media/dvb/bt8xx/dst_ca.c b/drivers/media/dvb/bt8xx/dst_ca.c
index 48e48e8af55a..66f52f116b60 100644
--- a/drivers/media/dvb/bt8xx/dst_ca.c
+++ b/drivers/media/dvb/bt8xx/dst_ca.c
@@ -477,7 +477,6 @@ static int dst_check_ca_pmt(struct dst_state *state, struct ca_msg *p_ca_message
static int ca_send_message(struct dst_state *state, struct ca_msg *p_ca_message, void __user *arg)
{
int i = 0;
- unsigned int ca_message_header_len;
u32 command = 0;
struct ca_msg *hw_buffer;
@@ -496,7 +495,6 @@ static int ca_send_message(struct dst_state *state, struct ca_msg *p_ca_message,
if (p_ca_message->msg) {
- ca_message_header_len = p_ca_message->length; /* Restore it back when you are done */
/* EN50221 tag */
command = 0;
diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
index d88c4aa7d24d..131b938e9e81 100644
--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
@@ -31,7 +31,6 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/timer.h>
-#include <linux/version.h>
#include <linux/i2c.h>
#include <linux/swab.h>
#include <linux/vmalloc.h>
@@ -1696,7 +1695,7 @@ static struct pci_driver ddb_pci_driver = {
.name = "DDBridge",
.id_table = ddb_id_tbl,
.probe = ddb_probe,
- .remove = ddb_remove,
+ .remove = __devexit_p(ddb_remove),
};
static __init int module_init_ddbridge(void)
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index faa3671b649e..d82469f842e2 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -568,6 +568,16 @@ void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count)
}
EXPORT_SYMBOL(dvb_dmx_swfilter_204);
+void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count)
+{
+ spin_lock(&demux->lock);
+
+ demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts, DMX_OK);
+
+ spin_unlock(&demux->lock);
+}
+EXPORT_SYMBOL(dvb_dmx_swfilter_raw);
+
static struct dvb_demux_filter *dvb_dmx_filter_alloc(struct dvb_demux *demux)
{
int i;
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index a7d876fd02dd..fa7188a253aa 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -145,5 +145,7 @@ void dvb_dmx_swfilter_packets(struct dvb_demux *dvbdmx, const u8 *buf,
void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count);
void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf,
size_t count);
+void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf,
+ size_t count);
#endif /* _DVB_DEMUX_H_ */
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index cb888d835a89..aebcdf221dda 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -182,13 +182,13 @@ static enum dvbv3_emulation_type dvbv3_type(u32 delivery_system)
case SYS_DMBTH:
return DVBV3_OFDM;
case SYS_ATSC:
+ case SYS_ATSCMH:
case SYS_DVBC_ANNEX_B:
return DVBV3_ATSC;
case SYS_UNDEFINED:
case SYS_ISDBC:
case SYS_DVBH:
case SYS_DAB:
- case SYS_ATSCMH:
default:
/*
* Doesn't know how to emulate those types and/or
@@ -1030,6 +1030,25 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_HIERARCHY, 0, 0),
_DTV_CMD(DTV_ENUM_DELSYS, 0, 0),
+
+ _DTV_CMD(DTV_ATSCMH_PARADE_ID, 1, 0),
+ _DTV_CMD(DTV_ATSCMH_RS_FRAME_ENSEMBLE, 1, 0),
+
+ _DTV_CMD(DTV_ATSCMH_FIC_VER, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_PARADE_ID, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_NOG, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_TNOG, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_SGN, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_PRC, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_RS_FRAME_MODE, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_RS_FRAME_ENSEMBLE, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_PRI, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_SEC, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_SCCC_BLOCK_MODE, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_A, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_B, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_C, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_D, 0, 0),
};
static void dtv_property_dump(struct dtv_property *tvp)
@@ -1121,6 +1140,8 @@ static int dtv_property_cache_sync(struct dvb_frontend *fe,
case DVBV3_ATSC:
dprintk("%s() Preparing ATSC req\n", __func__);
c->modulation = p->u.vsb.modulation;
+ if (c->delivery_system == SYS_ATSCMH)
+ break;
if ((c->modulation == VSB_8) || (c->modulation == VSB_16))
c->delivery_system = SYS_ATSC;
else
@@ -1367,6 +1388,54 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
case DTV_DVBT2_PLP_ID:
tvp->u.data = c->dvbt2_plp_id;
break;
+
+ /* ATSC-MH */
+ case DTV_ATSCMH_FIC_VER:
+ tvp->u.data = fe->dtv_property_cache.atscmh_fic_ver;
+ break;
+ case DTV_ATSCMH_PARADE_ID:
+ tvp->u.data = fe->dtv_property_cache.atscmh_parade_id;
+ break;
+ case DTV_ATSCMH_NOG:
+ tvp->u.data = fe->dtv_property_cache.atscmh_nog;
+ break;
+ case DTV_ATSCMH_TNOG:
+ tvp->u.data = fe->dtv_property_cache.atscmh_tnog;
+ break;
+ case DTV_ATSCMH_SGN:
+ tvp->u.data = fe->dtv_property_cache.atscmh_sgn;
+ break;
+ case DTV_ATSCMH_PRC:
+ tvp->u.data = fe->dtv_property_cache.atscmh_prc;
+ break;
+ case DTV_ATSCMH_RS_FRAME_MODE:
+ tvp->u.data = fe->dtv_property_cache.atscmh_rs_frame_mode;
+ break;
+ case DTV_ATSCMH_RS_FRAME_ENSEMBLE:
+ tvp->u.data = fe->dtv_property_cache.atscmh_rs_frame_ensemble;
+ break;
+ case DTV_ATSCMH_RS_CODE_MODE_PRI:
+ tvp->u.data = fe->dtv_property_cache.atscmh_rs_code_mode_pri;
+ break;
+ case DTV_ATSCMH_RS_CODE_MODE_SEC:
+ tvp->u.data = fe->dtv_property_cache.atscmh_rs_code_mode_sec;
+ break;
+ case DTV_ATSCMH_SCCC_BLOCK_MODE:
+ tvp->u.data = fe->dtv_property_cache.atscmh_sccc_block_mode;
+ break;
+ case DTV_ATSCMH_SCCC_CODE_MODE_A:
+ tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_a;
+ break;
+ case DTV_ATSCMH_SCCC_CODE_MODE_B:
+ tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_b;
+ break;
+ case DTV_ATSCMH_SCCC_CODE_MODE_C:
+ tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_c;
+ break;
+ case DTV_ATSCMH_SCCC_CODE_MODE_D:
+ tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_d;
+ break;
+
default:
return -EINVAL;
}
@@ -1708,6 +1777,15 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
case DTV_DVBT2_PLP_ID:
c->dvbt2_plp_id = tvp->u.data;
break;
+
+ /* ATSC-MH */
+ case DTV_ATSCMH_PARADE_ID:
+ fe->dtv_property_cache.atscmh_parade_id = tvp->u.data;
+ break;
+ case DTV_ATSCMH_RS_FRAME_ENSEMBLE:
+ fe->dtv_property_cache.atscmh_rs_frame_ensemble = tvp->u.data;
+ break;
+
default:
return -EINVAL;
}
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h
index d63a8215fe03..e929d5697b87 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.h
@@ -372,6 +372,24 @@ struct dtv_frontend_properties {
/* DVB-T2 specifics */
u32 dvbt2_plp_id;
+
+ /* ATSC-MH specifics */
+ u8 atscmh_fic_ver;
+ u8 atscmh_parade_id;
+ u8 atscmh_nog;
+ u8 atscmh_tnog;
+ u8 atscmh_sgn;
+ u8 atscmh_prc;
+
+ u8 atscmh_rs_frame_mode;
+ u8 atscmh_rs_frame_ensemble;
+ u8 atscmh_rs_code_mode_pri;
+ u8 atscmh_rs_code_mode_sec;
+ u8 atscmh_sccc_block_mode;
+ u8 atscmh_sccc_code_mode_a;
+ u8 atscmh_sccc_code_mode_b;
+ u8 atscmh_sccc_code_mode_c;
+ u8 atscmh_sccc_code_mode_d;
};
struct dvb_frontend {
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 63bf45679f98..a26949336b3d 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -409,6 +409,7 @@ config DVB_USB_MXL111SF
tristate "MxL111SF DTV USB2.0 support"
depends on DVB_USB
select DVB_LGDT3305 if !DVB_FE_CUSTOMISE
+ select DVB_LG2160 if !DVB_FE_CUSTOMISE
select VIDEO_TVEEPROM
help
Say Y here to support the MxL111SF USB2.0 DTV receiver.
@@ -422,3 +423,15 @@ config DVB_USB_RTL28XXU
select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE
help
Say Y here to support the Realtek RTL28xxU DVB USB receiver.
+
+config DVB_USB_AF9035
+ tristate "Afatech AF9035 DVB-T USB2.0 support"
+ depends on DVB_USB
+ select DVB_AF9033
+ select MEDIA_TUNER_TUA9001 if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_FC0011 if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_MXL5007T if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_TDA18218 if !MEDIA_TUNER_CUSTOMISE
+ help
+ Say Y here to support the Afatech AF9035 based DVB USB receiver.
+
diff --git a/drivers/media/dvb/dvb-usb/Makefile b/drivers/media/dvb/dvb-usb/Makefile
index b76acb5387e6..b667ac39a4e3 100644
--- a/drivers/media/dvb/dvb-usb/Makefile
+++ b/drivers/media/dvb/dvb-usb/Makefile
@@ -110,6 +110,9 @@ obj-$(CONFIG_DVB_USB_MXL111SF) += mxl111sf-tuner.o
dvb-usb-rtl28xxu-objs = rtl28xxu.o
obj-$(CONFIG_DVB_USB_RTL28XXU) += dvb-usb-rtl28xxu.o
+dvb-usb-af9035-objs = af9035.o
+obj-$(CONFIG_DVB_USB_AF9035) += dvb-usb-af9035.o
+
ccflags-y += -I$(srctree)/drivers/media/dvb/dvb-core
ccflags-y += -I$(srctree)/drivers/media/dvb/frontends/
# due to tuner-xc3028
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index 7e70ea50ef26..677fed79b01e 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -244,8 +244,7 @@ static int af9015_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
u8 uninitialized_var(mbox), addr_len;
struct req_t req;
-/* TODO: implement bus lock
-
+/*
The bus lock is needed because there is two tuners both using same I2C-address.
Due to that the only way to select correct tuner is use demodulator I2C-gate.
@@ -789,7 +788,7 @@ static void af9015_set_remote_config(struct usb_device *udev,
/* try to load remote based USB ID */
if (!props->rc.core.rc_codes)
props->rc.core.rc_codes = af9015_rc_setup_match(
- (vid << 16) + pid, af9015_rc_setup_usbids);
+ (vid << 16) | pid, af9015_rc_setup_usbids);
/* try to load remote based USB iManufacturer string */
if (!props->rc.core.rc_codes && vid == USB_VID_AFATECH) {
@@ -1220,8 +1219,8 @@ static int af9015_af9013_frontend_attach(struct dvb_usb_adapter *adap)
}
/* attach demodulator */
- adap->fe_adap[0].fe = dvb_attach(af9013_attach, &af9015_af9013_config[adap->id],
- &adap->dev->i2c_adap);
+ adap->fe_adap[0].fe = dvb_attach(af9013_attach,
+ &af9015_af9013_config[adap->id], &adap->dev->i2c_adap);
/*
* AF9015 firmware does not like if it gets interrupted by I2C adapter
@@ -1324,14 +1323,15 @@ static int af9015_tuner_attach(struct dvb_usb_adapter *adap)
switch (af9015_af9013_config[adap->id].tuner) {
case AF9013_TUNER_MT2060:
case AF9013_TUNER_MT2060_2:
- ret = dvb_attach(mt2060_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap,
- &af9015_mt2060_config,
+ ret = dvb_attach(mt2060_attach, adap->fe_adap[0].fe,
+ &adap->dev->i2c_adap, &af9015_mt2060_config,
af9015_config.mt2060_if1[adap->id])
== NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_QT1010:
case AF9013_TUNER_QT1010A:
- ret = dvb_attach(qt1010_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap,
+ ret = dvb_attach(qt1010_attach, adap->fe_adap[0].fe,
+ &adap->dev->i2c_adap,
&af9015_qt1010_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_TDA18271:
@@ -1434,69 +1434,85 @@ enum af9015_usb_table_entry {
};
static struct usb_device_id af9015_usb_table[] = {
- [AFATECH_9015] =
- {USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9015_9015)},
- [AFATECH_9016] =
- {USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9015_9016)},
- [WINFAST_DTV_GOLD] =
- {USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_GOLD)},
- [PINNACLE_PCTV_71E] =
- {USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV71E)},
- [KWORLD_PLUSTV_399U] =
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U)},
- [TINYTWIN] = {USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_TINYTWIN)},
- [AZUREWAVE_TU700] =
- {USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_AZUREWAVE_AD_TU700)},
- [TERRATEC_AF9015] = {USB_DEVICE(USB_VID_TERRATEC,
+ [AFATECH_9015] = {
+ USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9015_9015)},
+ [AFATECH_9016] = {
+ USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9015_9016)},
+ [WINFAST_DTV_GOLD] = {
+ USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_GOLD)},
+ [PINNACLE_PCTV_71E] = {
+ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV71E)},
+ [KWORLD_PLUSTV_399U] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U)},
+ [TINYTWIN] = {
+ USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_TINYTWIN)},
+ [AZUREWAVE_TU700] = {
+ USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_AZUREWAVE_AD_TU700)},
+ [TERRATEC_AF9015] = {
+ USB_DEVICE(USB_VID_TERRATEC,
USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2)},
- [KWORLD_PLUSTV_PC160] =
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_PC160_2T)},
- [AVERTV_VOLAR_X] =
- {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR_X)},
- [XTENSIONS_380U] =
- {USB_DEVICE(USB_VID_XTENSIONS, USB_PID_XTENSIONS_XD_380)},
- [MSI_DIGIVOX_DUO] =
- {USB_DEVICE(USB_VID_MSI_2, USB_PID_MSI_DIGIVOX_DUO)},
- [AVERTV_VOLAR_X_REV2] =
- {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR_X_2)},
- [TELESTAR_STARSTICK_2] =
- {USB_DEVICE(USB_VID_TELESTAR, USB_PID_TELESTAR_STARSTICK_2)},
- [AVERMEDIA_A309_USB] =
- {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A309)},
- [MSI_DIGIVOX_MINI_III] =
- {USB_DEVICE(USB_VID_MSI_2, USB_PID_MSI_DIGI_VOX_MINI_III)},
- [KWORLD_E396] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U)},
- [KWORLD_E39B] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_2)},
- [KWORLD_E395] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_3)},
- [TREKSTOR_DVBT] = {USB_DEVICE(USB_VID_AFATECH, USB_PID_TREKSTOR_DVBT)},
- [AVERTV_A850] = {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850)},
- [AVERTV_A805] = {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A805)},
- [CONCEPTRONIC_CTVDIGRCU] =
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CONCEPTRONIC_CTVDIGRCU)},
- [KWORLD_MC810] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_MC810)},
- [GENIUS_TVGO_DVB_T03] =
- {USB_DEVICE(USB_VID_KYE, USB_PID_GENIUS_TVGO_DVB_T03)},
- [KWORLD_399U_2] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U_2)},
- [KWORLD_PC160_T] =
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_PC160_T)},
- [SVEON_STV20] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV20)},
- [TINYTWIN_2] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_TINYTWIN_2)},
- [WINFAST_DTV2000DS] =
- {USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV2000DS)},
- [KWORLD_UB383_T] =
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB383_T)},
- [KWORLD_E39A] =
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_4)},
- [AVERMEDIA_A815M] =
- {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A815M)},
- [CINERGY_T_STICK_RC] = {USB_DEVICE(USB_VID_TERRATEC,
+ [KWORLD_PLUSTV_PC160] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_PC160_2T)},
+ [AVERTV_VOLAR_X] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR_X)},
+ [XTENSIONS_380U] = {
+ USB_DEVICE(USB_VID_XTENSIONS, USB_PID_XTENSIONS_XD_380)},
+ [MSI_DIGIVOX_DUO] = {
+ USB_DEVICE(USB_VID_MSI_2, USB_PID_MSI_DIGIVOX_DUO)},
+ [AVERTV_VOLAR_X_REV2] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR_X_2)},
+ [TELESTAR_STARSTICK_2] = {
+ USB_DEVICE(USB_VID_TELESTAR, USB_PID_TELESTAR_STARSTICK_2)},
+ [AVERMEDIA_A309_USB] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A309)},
+ [MSI_DIGIVOX_MINI_III] = {
+ USB_DEVICE(USB_VID_MSI_2, USB_PID_MSI_DIGI_VOX_MINI_III)},
+ [KWORLD_E396] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U)},
+ [KWORLD_E39B] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_2)},
+ [KWORLD_E395] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_3)},
+ [TREKSTOR_DVBT] = {
+ USB_DEVICE(USB_VID_AFATECH, USB_PID_TREKSTOR_DVBT)},
+ [AVERTV_A850] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850)},
+ [AVERTV_A805] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A805)},
+ [CONCEPTRONIC_CTVDIGRCU] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CONCEPTRONIC_CTVDIGRCU)},
+ [KWORLD_MC810] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_MC810)},
+ [GENIUS_TVGO_DVB_T03] = {
+ USB_DEVICE(USB_VID_KYE, USB_PID_GENIUS_TVGO_DVB_T03)},
+ [KWORLD_399U_2] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U_2)},
+ [KWORLD_PC160_T] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_PC160_T)},
+ [SVEON_STV20] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV20)},
+ [TINYTWIN_2] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_TINYTWIN_2)},
+ [WINFAST_DTV2000DS] = {
+ USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV2000DS)},
+ [KWORLD_UB383_T] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB383_T)},
+ [KWORLD_E39A] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_4)},
+ [AVERMEDIA_A815M] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A815M)},
+ [CINERGY_T_STICK_RC] = {
+ USB_DEVICE(USB_VID_TERRATEC,
USB_PID_TERRATEC_CINERGY_T_STICK_RC)},
- [CINERGY_T_DUAL_RC] = {USB_DEVICE(USB_VID_TERRATEC,
+ [CINERGY_T_DUAL_RC] = {
+ USB_DEVICE(USB_VID_TERRATEC,
USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC)},
- [AVERTV_A850T] =
- {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850T)},
- [TINYTWIN_3] = {USB_DEVICE(USB_VID_GTEK, USB_PID_TINYTWIN_3)},
- [SVEON_STV22] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV22)},
+ [AVERTV_A850T] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850T)},
+ [TINYTWIN_3] = {
+ USB_DEVICE(USB_VID_GTEK, USB_PID_TINYTWIN_3)},
+ [SVEON_STV22] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV22)},
{ }
};
MODULE_DEVICE_TABLE(usb, af9015_usb_table);
@@ -1516,43 +1532,44 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.num_adapters = 2,
.adapter = {
{
- .num_frontends = 1,
- .fe = {{
- .caps = DVB_USB_ADAP_HAS_PID_FILTER |
- DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
-
- .pid_filter_count = 32,
- .pid_filter = af9015_pid_filter,
- .pid_filter_ctrl = af9015_pid_filter_ctrl,
-
- .frontend_attach =
- af9015_af9013_frontend_attach,
- .tuner_attach = af9015_tuner_attach,
- .stream = {
- .type = USB_BULK,
- .count = 6,
- .endpoint = 0x84,
+ .num_frontends = 1,
+ .fe = {
+ {
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+
+ .pid_filter_count = 32,
+ .pid_filter = af9015_pid_filter,
+ .pid_filter_ctrl = af9015_pid_filter_ctrl,
+
+ .frontend_attach = af9015_af9013_frontend_attach,
+ .tuner_attach = af9015_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x84,
+ },
+ }
},
- }},
},
{
- .num_frontends = 1,
- .fe = {{
- .frontend_attach =
- af9015_af9013_frontend_attach,
- .tuner_attach = af9015_tuner_attach,
- .stream = {
- .type = USB_BULK,
- .count = 6,
- .endpoint = 0x85,
- .u = {
- .bulk = {
- .buffersize =
- TS_USB20_FRAME_SIZE,
- }
+ .num_frontends = 1,
+ .fe = {
+ {
+ .frontend_attach = af9015_af9013_frontend_attach,
+ .tuner_attach = af9015_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x85,
+ .u = {
+ .bulk = {
+ .buffersize = TS_USB20_FRAME_SIZE,
+ }
+ }
+ },
}
},
- }},
}
},
@@ -1575,102 +1592,67 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.cold_ids = {
&af9015_usb_table[AFATECH_9015],
&af9015_usb_table[AFATECH_9016],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Leadtek WinFast DTV Dongle Gold",
.cold_ids = {
&af9015_usb_table[WINFAST_DTV_GOLD],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Pinnacle PCTV 71e",
.cold_ids = {
&af9015_usb_table[PINNACLE_PCTV_71E],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "KWorld PlusTV Dual DVB-T Stick " \
"(DVB-T 399U)",
.cold_ids = {
&af9015_usb_table[KWORLD_PLUSTV_399U],
&af9015_usb_table[KWORLD_399U_2],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "DigitalNow TinyTwin DVB-T Receiver",
.cold_ids = {
&af9015_usb_table[TINYTWIN],
&af9015_usb_table[TINYTWIN_2],
&af9015_usb_table[TINYTWIN_3],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "TwinHan AzureWave AD-TU700(704J)",
.cold_ids = {
&af9015_usb_table[AZUREWAVE_TU700],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "TerraTec Cinergy T USB XE",
.cold_ids = {
&af9015_usb_table[TERRATEC_AF9015],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "KWorld PlusTV Dual DVB-T PCI " \
"(DVB-T PC160-2T)",
.cold_ids = {
&af9015_usb_table[KWORLD_PLUSTV_PC160],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "AVerMedia AVerTV DVB-T Volar X",
.cold_ids = {
&af9015_usb_table[AVERTV_VOLAR_X],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "TerraTec Cinergy T Stick RC",
.cold_ids = {
&af9015_usb_table[CINERGY_T_STICK_RC],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "TerraTec Cinergy T Stick Dual RC",
.cold_ids = {
&af9015_usb_table[CINERGY_T_DUAL_RC],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "AverMedia AVerTV Red HD+ (A850T)",
.cold_ids = {
&af9015_usb_table[AVERTV_A850T],
- NULL
},
- .warm_ids = {NULL},
},
}
}, {
@@ -1686,43 +1668,44 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.num_adapters = 2,
.adapter = {
{
- .num_frontends = 1,
- .fe = {{
- .caps = DVB_USB_ADAP_HAS_PID_FILTER |
- DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
-
- .pid_filter_count = 32,
- .pid_filter = af9015_pid_filter,
- .pid_filter_ctrl = af9015_pid_filter_ctrl,
-
- .frontend_attach =
- af9015_af9013_frontend_attach,
- .tuner_attach = af9015_tuner_attach,
- .stream = {
- .type = USB_BULK,
- .count = 6,
- .endpoint = 0x84,
+ .num_frontends = 1,
+ .fe = {
+ {
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+
+ .pid_filter_count = 32,
+ .pid_filter = af9015_pid_filter,
+ .pid_filter_ctrl = af9015_pid_filter_ctrl,
+
+ .frontend_attach = af9015_af9013_frontend_attach,
+ .tuner_attach = af9015_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x84,
+ },
+ }
},
- }},
},
{
- .num_frontends = 1,
- .fe = {{
- .frontend_attach =
- af9015_af9013_frontend_attach,
- .tuner_attach = af9015_tuner_attach,
- .stream = {
- .type = USB_BULK,
- .count = 6,
- .endpoint = 0x85,
- .u = {
- .bulk = {
- .buffersize =
- TS_USB20_FRAME_SIZE,
- }
+ .num_frontends = 1,
+ .fe = {
+ {
+ .frontend_attach = af9015_af9013_frontend_attach,
+ .tuner_attach = af9015_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x85,
+ .u = {
+ .bulk = {
+ .buffersize = TS_USB20_FRAME_SIZE,
+ }
+ }
+ },
}
},
- }},
}
},
@@ -1744,51 +1727,33 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.name = "Xtensions XD-380",
.cold_ids = {
&af9015_usb_table[XTENSIONS_380U],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "MSI DIGIVOX Duo",
.cold_ids = {
&af9015_usb_table[MSI_DIGIVOX_DUO],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Fujitsu-Siemens Slim Mobile USB DVB-T",
.cold_ids = {
&af9015_usb_table[AVERTV_VOLAR_X_REV2],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Telestar Starstick 2",
.cold_ids = {
&af9015_usb_table[TELESTAR_STARSTICK_2],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "AVerMedia A309",
.cold_ids = {
&af9015_usb_table[AVERMEDIA_A309_USB],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "MSI Digi VOX mini III",
.cold_ids = {
&af9015_usb_table[MSI_DIGIVOX_MINI_III],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "KWorld USB DVB-T TV Stick II " \
"(VS-DVB-T 395U)",
.cold_ids = {
@@ -1796,34 +1761,23 @@ static struct dvb_usb_device_properties af9015_properties[] = {
&af9015_usb_table[KWORLD_E39B],
&af9015_usb_table[KWORLD_E395],
&af9015_usb_table[KWORLD_E39A],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "TrekStor DVB-T USB Stick",
.cold_ids = {
&af9015_usb_table[TREKSTOR_DVBT],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "AverMedia AVerTV Volar Black HD " \
"(A850)",
.cold_ids = {
&af9015_usb_table[AVERTV_A850],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Sveon STV22 Dual USB DVB-T Tuner HDTV",
.cold_ids = {
&af9015_usb_table[SVEON_STV22],
- NULL
},
- .warm_ids = {NULL},
},
}
}, {
@@ -1839,43 +1793,44 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.num_adapters = 2,
.adapter = {
{
- .num_frontends = 1,
- .fe = {{
- .caps = DVB_USB_ADAP_HAS_PID_FILTER |
- DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
-
- .pid_filter_count = 32,
- .pid_filter = af9015_pid_filter,
- .pid_filter_ctrl = af9015_pid_filter_ctrl,
-
- .frontend_attach =
- af9015_af9013_frontend_attach,
- .tuner_attach = af9015_tuner_attach,
- .stream = {
- .type = USB_BULK,
- .count = 6,
- .endpoint = 0x84,
+ .num_frontends = 1,
+ .fe = {
+ {
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+
+ .pid_filter_count = 32,
+ .pid_filter = af9015_pid_filter,
+ .pid_filter_ctrl = af9015_pid_filter_ctrl,
+
+ .frontend_attach = af9015_af9013_frontend_attach,
+ .tuner_attach = af9015_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x84,
+ },
+ }
},
- }},
},
{
- .num_frontends = 1,
- .fe = {{
- .frontend_attach =
- af9015_af9013_frontend_attach,
- .tuner_attach = af9015_tuner_attach,
- .stream = {
- .type = USB_BULK,
- .count = 6,
- .endpoint = 0x85,
- .u = {
- .bulk = {
- .buffersize =
- TS_USB20_FRAME_SIZE,
- }
+ .num_frontends = 1,
+ .fe = {
+ {
+ .frontend_attach = af9015_af9013_frontend_attach,
+ .tuner_attach = af9015_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x85,
+ .u = {
+ .bulk = {
+ .buffersize = TS_USB20_FRAME_SIZE,
+ }
+ }
+ },
}
},
- }},
}
},
@@ -1897,76 +1852,50 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.name = "AverMedia AVerTV Volar GPS 805 (A805)",
.cold_ids = {
&af9015_usb_table[AVERTV_A805],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Conceptronic USB2.0 DVB-T CTVDIGRCU " \
"V3.0",
.cold_ids = {
&af9015_usb_table[CONCEPTRONIC_CTVDIGRCU],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "KWorld Digial MC-810",
.cold_ids = {
&af9015_usb_table[KWORLD_MC810],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Genius TVGo DVB-T03",
.cold_ids = {
&af9015_usb_table[GENIUS_TVGO_DVB_T03],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "KWorld PlusTV DVB-T PCI Pro Card " \
"(DVB-T PC160-T)",
.cold_ids = {
&af9015_usb_table[KWORLD_PC160_T],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Sveon STV20 Tuner USB DVB-T HDTV",
.cold_ids = {
&af9015_usb_table[SVEON_STV20],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Leadtek WinFast DTV2000DS",
.cold_ids = {
&af9015_usb_table[WINFAST_DTV2000DS],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "KWorld USB DVB-T Stick Mobile " \
"(UB383-T)",
.cold_ids = {
&af9015_usb_table[KWORLD_UB383_T],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "AverMedia AVerTV Volar M (A815Mac)",
.cold_ids = {
&af9015_usb_table[AVERMEDIA_A815M],
- NULL
},
- .warm_ids = {NULL},
},
}
},
@@ -2019,5 +1948,5 @@ static struct usb_driver af9015_usb_driver = {
module_usb_driver(af9015_usb_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
-MODULE_DESCRIPTION("Driver for Afatech AF9015 DVB-T");
+MODULE_DESCRIPTION("Afatech AF9015 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/af9035.c b/drivers/media/dvb/dvb-usb/af9035.c
new file mode 100644
index 000000000000..e83b39d3993c
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/af9035.c
@@ -0,0 +1,1242 @@
+/*
+ * Afatech AF9035 DVB USB driver
+ *
+ * Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
+ * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "af9035.h"
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+static DEFINE_MUTEX(af9035_usb_mutex);
+static struct dvb_usb_device_properties af9035_properties[2];
+static int af9035_properties_count = ARRAY_SIZE(af9035_properties);
+
+static u16 af9035_checksum(const u8 *buf, size_t len)
+{
+ size_t i;
+ u16 checksum = 0;
+
+ for (i = 1; i < len; i++) {
+ if (i % 2)
+ checksum += buf[i] << 8;
+ else
+ checksum += buf[i];
+ }
+ checksum = ~checksum;
+
+ return checksum;
+}
+
+static int af9035_ctrl_msg(struct usb_device *udev, struct usb_req *req)
+{
+#define BUF_LEN 64
+#define REQ_HDR_LEN 4 /* send header size */
+#define ACK_HDR_LEN 3 /* rece header size */
+#define CHECKSUM_LEN 2
+#define USB_TIMEOUT 2000
+
+ int ret, msg_len, act_len;
+ u8 buf[BUF_LEN];
+ static u8 seq; /* packet sequence number */
+ u16 checksum, tmp_checksum;
+
+ /* buffer overflow check */
+ if (req->wlen > (BUF_LEN - REQ_HDR_LEN - CHECKSUM_LEN) ||
+ req->rlen > (BUF_LEN - ACK_HDR_LEN - CHECKSUM_LEN)) {
+ pr_debug("%s: too much data wlen=%d rlen=%d\n", __func__,
+ req->wlen, req->rlen);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&af9035_usb_mutex) < 0)
+ return -EAGAIN;
+
+ buf[0] = REQ_HDR_LEN + req->wlen + CHECKSUM_LEN - 1;
+ buf[1] = req->mbox;
+ buf[2] = req->cmd;
+ buf[3] = seq++;
+ if (req->wlen)
+ memcpy(&buf[4], req->wbuf, req->wlen);
+
+ /* calc and add checksum */
+ checksum = af9035_checksum(buf, buf[0] - 1);
+ buf[buf[0] - 1] = (checksum >> 8);
+ buf[buf[0] - 0] = (checksum & 0xff);
+
+ msg_len = REQ_HDR_LEN + req->wlen + CHECKSUM_LEN ;
+
+ /* send req */
+ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 0x02), buf, msg_len,
+ &act_len, USB_TIMEOUT);
+ if (ret < 0)
+ err("bulk message failed=%d (%d/%d)", ret, msg_len, act_len);
+ else
+ if (act_len != msg_len)
+ ret = -EIO; /* all data is not send */
+ if (ret < 0)
+ goto err_mutex_unlock;
+
+ /* no ack for those packets */
+ if (req->cmd == CMD_FW_DL)
+ goto exit_mutex_unlock;
+
+ /* receive ack and data if read req */
+ msg_len = ACK_HDR_LEN + req->rlen + CHECKSUM_LEN;
+ ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, 0x81), buf, msg_len,
+ &act_len, USB_TIMEOUT);
+ if (ret < 0) {
+ err("recv bulk message failed=%d", ret);
+ ret = -EIO;
+ goto err_mutex_unlock;
+ }
+
+ if (act_len != msg_len) {
+ err("recv bulk message truncated (%d != %d)", act_len, msg_len);
+ ret = -EIO;
+ goto err_mutex_unlock;
+ }
+
+ /* verify checksum */
+ checksum = af9035_checksum(buf, act_len - 2);
+ tmp_checksum = (buf[act_len - 2] << 8) | buf[act_len - 1];
+ if (tmp_checksum != checksum) {
+ err("%s: command=%02x checksum mismatch (%04x != %04x)",
+ __func__, req->cmd, tmp_checksum, checksum);
+ ret = -EIO;
+ goto err_mutex_unlock;
+ }
+
+ /* check status */
+ if (buf[2]) {
+ pr_debug("%s: command=%02x failed fw error=%d\n", __func__,
+ req->cmd, buf[2]);
+ ret = -EIO;
+ goto err_mutex_unlock;
+ }
+
+ /* read request, copy returned data to return buf */
+ if (req->rlen)
+ memcpy(req->rbuf, &buf[ACK_HDR_LEN], req->rlen);
+
+err_mutex_unlock:
+exit_mutex_unlock:
+ mutex_unlock(&af9035_usb_mutex);
+
+ return ret;
+}
+
+/* write multiple registers */
+static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len)
+{
+ u8 wbuf[6 + len];
+ u8 mbox = (reg >> 16) & 0xff;
+ struct usb_req req = { CMD_MEM_WR, mbox, sizeof(wbuf), wbuf, 0, NULL };
+
+ wbuf[0] = len;
+ wbuf[1] = 2;
+ wbuf[2] = 0;
+ wbuf[3] = 0;
+ wbuf[4] = (reg >> 8) & 0xff;
+ wbuf[5] = (reg >> 0) & 0xff;
+ memcpy(&wbuf[6], val, len);
+
+ return af9035_ctrl_msg(d->udev, &req);
+}
+
+/* read multiple registers */
+static int af9035_rd_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len)
+{
+ u8 wbuf[] = { len, 2, 0, 0, (reg >> 8) & 0xff, reg & 0xff };
+ u8 mbox = (reg >> 16) & 0xff;
+ struct usb_req req = { CMD_MEM_RD, mbox, sizeof(wbuf), wbuf, len, val };
+
+ return af9035_ctrl_msg(d->udev, &req);
+}
+
+/* write single register */
+static int af9035_wr_reg(struct dvb_usb_device *d, u32 reg, u8 val)
+{
+ return af9035_wr_regs(d, reg, &val, 1);
+}
+
+/* read single register */
+static int af9035_rd_reg(struct dvb_usb_device *d, u32 reg, u8 *val)
+{
+ return af9035_rd_regs(d, reg, val, 1);
+}
+
+/* write single register with mask */
+static int af9035_wr_reg_mask(struct dvb_usb_device *d, u32 reg, u8 val,
+ u8 mask)
+{
+ int ret;
+ u8 tmp;
+
+ /* no need for read if whole reg is written */
+ if (mask != 0xff) {
+ ret = af9035_rd_regs(d, reg, &tmp, 1);
+ if (ret)
+ return ret;
+
+ val &= mask;
+ tmp &= ~mask;
+ val |= tmp;
+ }
+
+ return af9035_wr_regs(d, reg, &val, 1);
+}
+
+static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg msg[], int num)
+{
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ struct state *state = d->priv;
+ int ret;
+
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ /*
+ * I2C sub header is 5 bytes long. Meaning of those bytes are:
+ * 0: data len
+ * 1: I2C addr << 1
+ * 2: reg addr len
+ * byte 3 and 4 can be used as reg addr
+ * 3: reg addr MSB
+ * used when reg addr len is set to 2
+ * 4: reg addr LSB
+ * used when reg addr len is set to 1 or 2
+ *
+ * For the simplify we do not use register addr at all.
+ * NOTE: As a firmware knows tuner type there is very small possibility
+ * there could be some tuner I2C hacks done by firmware and this may
+ * lead problems if firmware expects those bytes are used.
+ */
+ if (num == 2 && !(msg[0].flags & I2C_M_RD) &&
+ (msg[1].flags & I2C_M_RD)) {
+ if (msg[0].len > 40 || msg[1].len > 40) {
+ /* TODO: correct limits > 40 */
+ ret = -EOPNOTSUPP;
+ } else if (msg[0].addr == state->af9033_config[0].i2c_addr) {
+ /* integrated demod */
+ u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ msg[0].buf[2];
+ ret = af9035_rd_regs(d, reg, &msg[1].buf[0],
+ msg[1].len);
+ } else {
+ /* I2C */
+ u8 buf[5 + msg[0].len];
+ struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf),
+ buf, msg[1].len, msg[1].buf };
+ buf[0] = msg[1].len;
+ buf[1] = msg[0].addr << 1;
+ buf[2] = 0x00; /* reg addr len */
+ buf[3] = 0x00; /* reg addr MSB */
+ buf[4] = 0x00; /* reg addr LSB */
+ memcpy(&buf[5], msg[0].buf, msg[0].len);
+ ret = af9035_ctrl_msg(d->udev, &req);
+ }
+ } else if (num == 1 && !(msg[0].flags & I2C_M_RD)) {
+ if (msg[0].len > 40) {
+ /* TODO: correct limits > 40 */
+ ret = -EOPNOTSUPP;
+ } else if (msg[0].addr == state->af9033_config[0].i2c_addr) {
+ /* integrated demod */
+ u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ msg[0].buf[2];
+ ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
+ msg[0].len - 3);
+ } else {
+ /* I2C */
+ u8 buf[5 + msg[0].len];
+ struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf,
+ 0, NULL };
+ buf[0] = msg[0].len;
+ buf[1] = msg[0].addr << 1;
+ buf[2] = 0x00; /* reg addr len */
+ buf[3] = 0x00; /* reg addr MSB */
+ buf[4] = 0x00; /* reg addr LSB */
+ memcpy(&buf[5], msg[0].buf, msg[0].len);
+ ret = af9035_ctrl_msg(d->udev, &req);
+ }
+ } else {
+ /*
+ * We support only two kind of I2C transactions:
+ * 1) 1 x read + 1 x write
+ * 2) 1 x write
+ */
+ ret = -EOPNOTSUPP;
+ }
+
+ mutex_unlock(&d->i2c_mutex);
+
+ if (ret < 0)
+ return ret;
+ else
+ return num;
+}
+
+static u32 af9035_i2c_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C;
+}
+
+static struct i2c_algorithm af9035_i2c_algo = {
+ .master_xfer = af9035_i2c_master_xfer,
+ .functionality = af9035_i2c_functionality,
+};
+
+#define AF9035_POLL 250
+static int af9035_rc_query(struct dvb_usb_device *d)
+{
+ unsigned int key;
+ unsigned char b[4];
+ int ret;
+ struct usb_req req = { CMD_IR_GET, 0, 0, NULL, 4, b };
+
+ ret = af9035_ctrl_msg(d->udev, &req);
+ if (ret < 0)
+ goto err;
+
+ if ((b[2] + b[3]) == 0xff) {
+ if ((b[0] + b[1]) == 0xff) {
+ /* NEC */
+ key = b[0] << 8 | b[2];
+ } else {
+ /* ext. NEC */
+ key = b[0] << 16 | b[1] << 8 | b[2];
+ }
+ } else {
+ key = b[0] << 24 | b[1] << 16 | b[2] << 8 | b[3];
+ }
+
+ rc_keydown(d->rc_dev, key, 0);
+
+err:
+ /* ignore errors */
+ return 0;
+}
+
+static int af9035_init(struct dvb_usb_device *d)
+{
+ struct state *state = d->priv;
+ int ret, i;
+ u16 frame_size = 87 * 188 / 4;
+ u8 packet_size = 512 / 4;
+ struct reg_val_mask tab[] = {
+ { 0x80f99d, 0x01, 0x01 },
+ { 0x80f9a4, 0x01, 0x01 },
+ { 0x00dd11, 0x00, 0x20 },
+ { 0x00dd11, 0x00, 0x40 },
+ { 0x00dd13, 0x00, 0x20 },
+ { 0x00dd13, 0x00, 0x40 },
+ { 0x00dd11, 0x20, 0x20 },
+ { 0x00dd88, (frame_size >> 0) & 0xff, 0xff},
+ { 0x00dd89, (frame_size >> 8) & 0xff, 0xff},
+ { 0x00dd0c, packet_size, 0xff},
+ { 0x00dd11, state->dual_mode << 6, 0x40 },
+ { 0x00dd8a, (frame_size >> 0) & 0xff, 0xff},
+ { 0x00dd8b, (frame_size >> 8) & 0xff, 0xff},
+ { 0x00dd0d, packet_size, 0xff },
+ { 0x80f9a3, 0x00, 0x01 },
+ { 0x80f9cd, 0x00, 0x01 },
+ { 0x80f99d, 0x00, 0x01 },
+ { 0x80f9a4, 0x00, 0x01 },
+ };
+
+ pr_debug("%s: USB speed=%d frame_size=%04x packet_size=%02x\n",
+ __func__, d->udev->speed, frame_size, packet_size);
+
+ /* init endpoints */
+ for (i = 0; i < ARRAY_SIZE(tab); i++) {
+ ret = af9035_wr_reg_mask(d, tab[i].reg, tab[i].val,
+ tab[i].mask);
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9035_identify_state(struct usb_device *udev,
+ struct dvb_usb_device_properties *props,
+ struct dvb_usb_device_description **desc,
+ int *cold)
+{
+ int ret;
+ u8 wbuf[1] = { 1 };
+ u8 rbuf[4];
+ struct usb_req req = { CMD_FW_QUERYINFO, 0, sizeof(wbuf), wbuf,
+ sizeof(rbuf), rbuf };
+
+ ret = af9035_ctrl_msg(udev, &req);
+ if (ret < 0)
+ goto err;
+
+ pr_debug("%s: reply=%02x %02x %02x %02x\n", __func__,
+ rbuf[0], rbuf[1], rbuf[2], rbuf[3]);
+ if (rbuf[0] || rbuf[1] || rbuf[2] || rbuf[3])
+ *cold = 0;
+ else
+ *cold = 1;
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9035_download_firmware(struct usb_device *udev,
+ const struct firmware *fw)
+{
+ int ret, i, j, len;
+ u8 wbuf[1];
+ u8 rbuf[4];
+ struct usb_req req = { 0, 0, 0, NULL, 0, NULL };
+ struct usb_req req_fw_dl = { CMD_FW_DL, 0, 0, wbuf, 0, NULL };
+ struct usb_req req_fw_ver = { CMD_FW_QUERYINFO, 0, 1, wbuf, 4, rbuf } ;
+ u8 hdr_core;
+ u16 hdr_addr, hdr_data_len, hdr_checksum;
+ #define MAX_DATA 58
+ #define HDR_SIZE 7
+
+ /*
+ * Thanks to Daniel Glöckner <daniel-gl@gmx.net> about that info!
+ *
+ * byte 0: MCS 51 core
+ * There are two inside the AF9035 (1=Link and 2=OFDM) with separate
+ * address spaces
+ * byte 1-2: Big endian destination address
+ * byte 3-4: Big endian number of data bytes following the header
+ * byte 5-6: Big endian header checksum, apparently ignored by the chip
+ * Calculated as ~(h[0]*256+h[1]+h[2]*256+h[3]+h[4]*256)
+ */
+
+ for (i = fw->size; i > HDR_SIZE;) {
+ hdr_core = fw->data[fw->size - i + 0];
+ hdr_addr = fw->data[fw->size - i + 1] << 8;
+ hdr_addr |= fw->data[fw->size - i + 2] << 0;
+ hdr_data_len = fw->data[fw->size - i + 3] << 8;
+ hdr_data_len |= fw->data[fw->size - i + 4] << 0;
+ hdr_checksum = fw->data[fw->size - i + 5] << 8;
+ hdr_checksum |= fw->data[fw->size - i + 6] << 0;
+
+ pr_debug("%s: core=%d addr=%04x data_len=%d checksum=%04x\n",
+ __func__, hdr_core, hdr_addr, hdr_data_len,
+ hdr_checksum);
+
+ if (((hdr_core != 1) && (hdr_core != 2)) ||
+ (hdr_data_len > i)) {
+ pr_debug("%s: bad firmware\n", __func__);
+ break;
+ }
+
+ /* download begin packet */
+ req.cmd = CMD_FW_DL_BEGIN;
+ ret = af9035_ctrl_msg(udev, &req);
+ if (ret < 0)
+ goto err;
+
+ /* download firmware packet(s) */
+ for (j = HDR_SIZE + hdr_data_len; j > 0; j -= MAX_DATA) {
+ len = j;
+ if (len > MAX_DATA)
+ len = MAX_DATA;
+ req_fw_dl.wlen = len;
+ req_fw_dl.wbuf = (u8 *) &fw->data[fw->size - i +
+ HDR_SIZE + hdr_data_len - j];
+ ret = af9035_ctrl_msg(udev, &req_fw_dl);
+ if (ret < 0)
+ goto err;
+ }
+
+ /* download end packet */
+ req.cmd = CMD_FW_DL_END;
+ ret = af9035_ctrl_msg(udev, &req);
+ if (ret < 0)
+ goto err;
+
+ i -= hdr_data_len + HDR_SIZE;
+
+ pr_debug("%s: data uploaded=%zu\n", __func__, fw->size - i);
+ }
+
+ /* firmware loaded, request boot */
+ req.cmd = CMD_FW_BOOT;
+ ret = af9035_ctrl_msg(udev, &req);
+ if (ret < 0)
+ goto err;
+
+ /* ensure firmware starts */
+ wbuf[0] = 1;
+ ret = af9035_ctrl_msg(udev, &req_fw_ver);
+ if (ret < 0)
+ goto err;
+
+ if (!(rbuf[0] || rbuf[1] || rbuf[2] || rbuf[3])) {
+ info("firmware did not run");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ info("firmware version=%d.%d.%d.%d", rbuf[0], rbuf[1], rbuf[2],
+ rbuf[3]);
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9035_download_firmware_it9135(struct usb_device *udev,
+ const struct firmware *fw)
+{
+ int ret, i, i_prev;
+ u8 wbuf[1];
+ u8 rbuf[4];
+ struct usb_req req = { 0, 0, 0, NULL, 0, NULL };
+ struct usb_req req_fw_dl = { CMD_FW_SCATTER_WR, 0, 0, NULL, 0, NULL };
+ struct usb_req req_fw_ver = { CMD_FW_QUERYINFO, 0, 1, wbuf, 4, rbuf } ;
+ #define HDR_SIZE 7
+
+ /*
+ * There seems to be following firmware header. Meaning of bytes 0-3
+ * is unknown.
+ *
+ * 0: 3
+ * 1: 0, 1
+ * 2: 0
+ * 3: 1, 2, 3
+ * 4: addr MSB
+ * 5: addr LSB
+ * 6: count of data bytes ?
+ */
+
+ for (i = HDR_SIZE, i_prev = 0; i <= fw->size; i++) {
+ if (i == fw->size ||
+ (fw->data[i + 0] == 0x03 &&
+ (fw->data[i + 1] == 0x00 ||
+ fw->data[i + 1] == 0x01) &&
+ fw->data[i + 2] == 0x00)) {
+ req_fw_dl.wlen = i - i_prev;
+ req_fw_dl.wbuf = (u8 *) &fw->data[i_prev];
+ i_prev = i;
+ ret = af9035_ctrl_msg(udev, &req_fw_dl);
+ if (ret < 0)
+ goto err;
+
+ pr_debug("%s: data uploaded=%d\n", __func__, i);
+ }
+ }
+
+ /* firmware loaded, request boot */
+ req.cmd = CMD_FW_BOOT;
+ ret = af9035_ctrl_msg(udev, &req);
+ if (ret < 0)
+ goto err;
+
+ /* ensure firmware starts */
+ wbuf[0] = 1;
+ ret = af9035_ctrl_msg(udev, &req_fw_ver);
+ if (ret < 0)
+ goto err;
+
+ if (!(rbuf[0] || rbuf[1] || rbuf[2] || rbuf[3])) {
+ info("firmware did not run");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ info("firmware version=%d.%d.%d.%d", rbuf[0], rbuf[1], rbuf[2],
+ rbuf[3]);
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+/* abuse that callback as there is no better one for reading eeprom */
+static int af9035_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
+{
+ struct state *state = d->priv;
+ int ret, i, eeprom_shift = 0;
+ u8 tmp;
+ u16 tmp16;
+
+ /* check if there is dual tuners */
+ ret = af9035_rd_reg(d, EEPROM_DUAL_MODE, &tmp);
+ if (ret < 0)
+ goto err;
+
+ state->dual_mode = tmp;
+ pr_debug("%s: dual mode=%d\n", __func__, state->dual_mode);
+
+ for (i = 0; i < af9035_properties[0].num_adapters; i++) {
+ /* tuner */
+ ret = af9035_rd_reg(d, EEPROM_1_TUNER_ID + eeprom_shift, &tmp);
+ if (ret < 0)
+ goto err;
+
+ state->af9033_config[i].tuner = tmp;
+ pr_debug("%s: [%d]tuner=%02x\n", __func__, i, tmp);
+
+ switch (tmp) {
+ case AF9033_TUNER_TUA9001:
+ case AF9033_TUNER_FC0011:
+ case AF9033_TUNER_MXL5007T:
+ case AF9033_TUNER_TDA18218:
+ state->af9033_config[i].spec_inv = 1;
+ break;
+ default:
+ warn("tuner ID=%02x not supported, please report!",
+ tmp);
+ };
+
+ /* tuner IF frequency */
+ ret = af9035_rd_reg(d, EEPROM_1_IFFREQ_L + eeprom_shift, &tmp);
+ if (ret < 0)
+ goto err;
+
+ tmp16 = tmp;
+
+ ret = af9035_rd_reg(d, EEPROM_1_IFFREQ_H + eeprom_shift, &tmp);
+ if (ret < 0)
+ goto err;
+
+ tmp16 |= tmp << 8;
+
+ pr_debug("%s: [%d]IF=%d\n", __func__, i, tmp16);
+
+ eeprom_shift = 0x10; /* shift for the 2nd tuner params */
+ }
+
+ /* get demod clock */
+ ret = af9035_rd_reg(d, 0x00d800, &tmp);
+ if (ret < 0)
+ goto err;
+
+ tmp = (tmp >> 0) & 0x0f;
+
+ for (i = 0; i < af9035_properties[0].num_adapters; i++)
+ state->af9033_config[i].clock = clock_lut[tmp];
+
+ ret = af9035_rd_reg(d, EEPROM_IR_MODE, &tmp);
+ if (ret < 0)
+ goto err;
+ pr_debug("%s: ir_mode=%02x\n", __func__, tmp);
+
+ /* don't activate rc if in HID mode or if not available */
+ if (tmp == 5) {
+ ret = af9035_rd_reg(d, EEPROM_IR_TYPE, &tmp);
+ if (ret < 0)
+ goto err;
+ pr_debug("%s: ir_type=%02x\n", __func__, tmp);
+
+ switch (tmp) {
+ case 0: /* NEC */
+ default:
+ d->props.rc.core.protocol = RC_TYPE_NEC;
+ d->props.rc.core.allowed_protos = RC_TYPE_NEC;
+ break;
+ case 1: /* RC6 */
+ d->props.rc.core.protocol = RC_TYPE_RC6;
+ d->props.rc.core.allowed_protos = RC_TYPE_RC6;
+ break;
+ }
+ d->props.rc.core.rc_query = af9035_rc_query;
+ }
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+/* abuse that callback as there is no better one for reading eeprom */
+static int af9035_read_mac_address_it9135(struct dvb_usb_device *d, u8 mac[6])
+{
+ struct state *state = d->priv;
+ int ret, i;
+ u8 tmp;
+
+ state->dual_mode = false;
+
+ /* get demod clock */
+ ret = af9035_rd_reg(d, 0x00d800, &tmp);
+ if (ret < 0)
+ goto err;
+
+ tmp = (tmp >> 0) & 0x0f;
+
+ for (i = 0; i < af9035_properties[0].num_adapters; i++)
+ state->af9033_config[i].clock = clock_lut_it9135[tmp];
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9035_fc0011_tuner_callback(struct dvb_usb_device *d,
+ int cmd, int arg)
+{
+ int ret;
+
+ switch (cmd) {
+ case FC0011_FE_CALLBACK_POWER:
+ /* Tuner enable */
+ ret = af9035_wr_reg_mask(d, 0xd8eb, 1, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8ec, 1, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8ed, 1, 1);
+ if (ret < 0)
+ goto err;
+
+ /* LED */
+ ret = af9035_wr_reg_mask(d, 0xd8d0, 1, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8d1, 1, 1);
+ if (ret < 0)
+ goto err;
+
+ usleep_range(10000, 50000);
+ break;
+ case FC0011_FE_CALLBACK_RESET:
+ ret = af9035_wr_reg(d, 0xd8e9, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(d, 0xd8e8, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(d, 0xd8e7, 1);
+ if (ret < 0)
+ goto err;
+
+ usleep_range(10000, 20000);
+
+ ret = af9035_wr_reg(d, 0xd8e7, 0);
+ if (ret < 0)
+ goto err;
+
+ usleep_range(10000, 20000);
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9035_tuner_callback(struct dvb_usb_device *d, int cmd, int arg)
+{
+ struct state *state = d->priv;
+
+ switch (state->af9033_config[0].tuner) {
+ case AF9033_TUNER_FC0011:
+ return af9035_fc0011_tuner_callback(d, cmd, arg);
+ default:
+ break;
+ }
+
+ return -ENODEV;
+}
+
+static int af9035_frontend_callback(void *adapter_priv, int component,
+ int cmd, int arg)
+{
+ struct i2c_adapter *adap = adapter_priv;
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+
+ switch (component) {
+ case DVB_FRONTEND_COMPONENT_TUNER:
+ return af9035_tuner_callback(d, cmd, arg);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int af9035_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct state *state = adap->dev->priv;
+ int ret;
+
+ if (!state->af9033_config[adap->id].tuner) {
+ /* unsupported tuner */
+ ret = -ENODEV;
+ goto err;
+ }
+
+ if (adap->id == 0) {
+ state->af9033_config[0].ts_mode = AF9033_TS_MODE_USB;
+ state->af9033_config[1].ts_mode = AF9033_TS_MODE_SERIAL;
+
+ ret = af9035_wr_reg(adap->dev, 0x00417f,
+ state->af9033_config[1].i2c_addr);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(adap->dev, 0x00d81a,
+ state->dual_mode);
+ if (ret < 0)
+ goto err;
+ }
+
+ /* attach demodulator */
+ adap->fe_adap[0].fe = dvb_attach(af9033_attach,
+ &state->af9033_config[adap->id], &adap->dev->i2c_adap);
+ if (adap->fe_adap[0].fe == NULL) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* disable I2C-gate */
+ adap->fe_adap[0].fe->ops.i2c_gate_ctrl = NULL;
+ adap->fe_adap[0].fe->callback = af9035_frontend_callback;
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static struct tua9001_config af9035_tua9001_config = {
+ .i2c_addr = 0x60,
+};
+
+static const struct fc0011_config af9035_fc0011_config = {
+ .i2c_address = 0x60,
+};
+
+static struct mxl5007t_config af9035_mxl5007t_config = {
+ .xtal_freq_hz = MxL_XTAL_24_MHZ,
+ .if_freq_hz = MxL_IF_4_57_MHZ,
+ .invert_if = 0,
+ .loop_thru_enable = 0,
+ .clk_out_enable = 0,
+ .clk_out_amp = MxL_CLKOUT_AMP_0_94V,
+};
+
+static struct tda18218_config af9035_tda18218_config = {
+ .i2c_address = 0x60,
+ .i2c_wr_max = 21,
+};
+
+static int af9035_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ struct state *state = adap->dev->priv;
+ int ret;
+ struct dvb_frontend *fe;
+
+ switch (state->af9033_config[adap->id].tuner) {
+ case AF9033_TUNER_TUA9001:
+ /* AF9035 gpiot3 = TUA9001 RESETN
+ AF9035 gpiot2 = TUA9001 RXEN */
+
+ /* configure gpiot2 and gpiot2 as output */
+ ret = af9035_wr_reg_mask(adap->dev, 0x00d8ec, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(adap->dev, 0x00d8ed, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(adap->dev, 0x00d8e8, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(adap->dev, 0x00d8e9, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ /* reset tuner */
+ ret = af9035_wr_reg_mask(adap->dev, 0x00d8e7, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ usleep_range(2000, 20000);
+
+ ret = af9035_wr_reg_mask(adap->dev, 0x00d8e7, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ /* activate tuner RX */
+ /* TODO: use callback for TUA9001 RXEN */
+ ret = af9035_wr_reg_mask(adap->dev, 0x00d8eb, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ /* attach tuner */
+ fe = dvb_attach(tua9001_attach, adap->fe_adap[0].fe,
+ &adap->dev->i2c_adap, &af9035_tua9001_config);
+ break;
+ case AF9033_TUNER_FC0011:
+ fe = dvb_attach(fc0011_attach, adap->fe_adap[0].fe,
+ &adap->dev->i2c_adap, &af9035_fc0011_config);
+ break;
+ case AF9033_TUNER_MXL5007T:
+ ret = af9035_wr_reg(adap->dev, 0x00d8e0, 1);
+ if (ret < 0)
+ goto err;
+ ret = af9035_wr_reg(adap->dev, 0x00d8e1, 1);
+ if (ret < 0)
+ goto err;
+ ret = af9035_wr_reg(adap->dev, 0x00d8df, 0);
+ if (ret < 0)
+ goto err;
+
+ msleep(30);
+
+ ret = af9035_wr_reg(adap->dev, 0x00d8df, 1);
+ if (ret < 0)
+ goto err;
+
+ msleep(300);
+
+ ret = af9035_wr_reg(adap->dev, 0x00d8c0, 1);
+ if (ret < 0)
+ goto err;
+ ret = af9035_wr_reg(adap->dev, 0x00d8c1, 1);
+ if (ret < 0)
+ goto err;
+ ret = af9035_wr_reg(adap->dev, 0x00d8bf, 0);
+ if (ret < 0)
+ goto err;
+ ret = af9035_wr_reg(adap->dev, 0x00d8b4, 1);
+ if (ret < 0)
+ goto err;
+ ret = af9035_wr_reg(adap->dev, 0x00d8b5, 1);
+ if (ret < 0)
+ goto err;
+ ret = af9035_wr_reg(adap->dev, 0x00d8b3, 1);
+ if (ret < 0)
+ goto err;
+
+ /* attach tuner */
+ fe = dvb_attach(mxl5007t_attach, adap->fe_adap[0].fe,
+ &adap->dev->i2c_adap, 0x60, &af9035_mxl5007t_config);
+ break;
+ case AF9033_TUNER_TDA18218:
+ /* attach tuner */
+ fe = dvb_attach(tda18218_attach, adap->fe_adap[0].fe,
+ &adap->dev->i2c_adap, &af9035_tda18218_config);
+ break;
+ default:
+ fe = NULL;
+ }
+
+ if (fe == NULL) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+enum af9035_id_entry {
+ AF9035_15A4_9035,
+ AF9035_15A4_1000,
+ AF9035_15A4_1001,
+ AF9035_15A4_1002,
+ AF9035_15A4_1003,
+ AF9035_0CCD_0093,
+ AF9035_07CA_A835,
+ AF9035_07CA_B835,
+ AF9035_07CA_1867,
+ AF9035_07CA_A867,
+ AF9035_07CA_0825,
+};
+
+static struct usb_device_id af9035_id[] = {
+ [AF9035_15A4_9035] = {
+ USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_9035)},
+ [AF9035_15A4_1000] = {
+ USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_1000)},
+ [AF9035_15A4_1001] = {
+ USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_1001)},
+ [AF9035_15A4_1002] = {
+ USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_1002)},
+ [AF9035_15A4_1003] = {
+ USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_1003)},
+ [AF9035_0CCD_0093] = {
+ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_STICK)},
+ [AF9035_07CA_A835] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835)},
+ [AF9035_07CA_B835] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_B835)},
+ [AF9035_07CA_1867] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_1867)},
+ [AF9035_07CA_A867] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A867)},
+ [AF9035_07CA_0825] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_TWINSTAR)},
+ {},
+};
+
+MODULE_DEVICE_TABLE(usb, af9035_id);
+
+static struct dvb_usb_device_properties af9035_properties[] = {
+ {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .download_firmware = af9035_download_firmware,
+ .firmware = "dvb-usb-af9035-02.fw",
+ .no_reconnect = 1,
+
+ .size_of_priv = sizeof(struct state),
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {
+ {
+ .frontend_attach = af9035_frontend_attach,
+ .tuner_attach = af9035_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x84,
+ .u = {
+ .bulk = {
+ .buffersize = (87 * 188),
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+
+ .identify_state = af9035_identify_state,
+ .read_mac_address = af9035_read_mac_address,
+
+ .i2c_algo = &af9035_i2c_algo,
+
+ .rc.core = {
+ .protocol = RC_TYPE_UNKNOWN,
+ .module_name = "af9035",
+ .rc_query = NULL,
+ .rc_interval = AF9035_POLL,
+ .allowed_protos = RC_TYPE_UNKNOWN,
+ .rc_codes = RC_MAP_EMPTY,
+ },
+ .num_device_descs = 5,
+ .devices = {
+ {
+ .name = "Afatech AF9035 reference design",
+ .cold_ids = {
+ &af9035_id[AF9035_15A4_9035],
+ &af9035_id[AF9035_15A4_1000],
+ &af9035_id[AF9035_15A4_1001],
+ &af9035_id[AF9035_15A4_1002],
+ &af9035_id[AF9035_15A4_1003],
+ },
+ }, {
+ .name = "TerraTec Cinergy T Stick",
+ .cold_ids = {
+ &af9035_id[AF9035_0CCD_0093],
+ },
+ }, {
+ .name = "AVerMedia AVerTV Volar HD/PRO (A835)",
+ .cold_ids = {
+ &af9035_id[AF9035_07CA_A835],
+ &af9035_id[AF9035_07CA_B835],
+ },
+ }, {
+ .name = "AVerMedia HD Volar (A867)",
+ .cold_ids = {
+ &af9035_id[AF9035_07CA_1867],
+ &af9035_id[AF9035_07CA_A867],
+ },
+ }, {
+ .name = "AVerMedia Twinstar (A825)",
+ .cold_ids = {
+ &af9035_id[AF9035_07CA_0825],
+ },
+ },
+ }
+ },
+ {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .download_firmware = af9035_download_firmware_it9135,
+ .firmware = "dvb-usb-it9135-01.fw",
+ .no_reconnect = 1,
+
+ .size_of_priv = sizeof(struct state),
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {
+ {
+ .frontend_attach = af9035_frontend_attach,
+ .tuner_attach = af9035_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x84,
+ .u = {
+ .bulk = {
+ .buffersize = (87 * 188),
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+
+ .identify_state = af9035_identify_state,
+ .read_mac_address = af9035_read_mac_address_it9135,
+
+ .i2c_algo = &af9035_i2c_algo,
+
+ .num_device_descs = 0, /* disabled as no support for IT9135 */
+ .devices = {
+ {
+ .name = "ITE Tech. IT9135 reference design",
+ },
+ }
+ },
+};
+
+static int af9035_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ int ret, i;
+ struct dvb_usb_device *d = NULL;
+ struct usb_device *udev;
+ bool found;
+
+ pr_debug("%s: interface=%d\n", __func__,
+ intf->cur_altsetting->desc.bInterfaceNumber);
+
+ /* interface 0 is used by DVB-T receiver and
+ interface 1 is for remote controller (HID) */
+ if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
+ return 0;
+
+ /* Dynamic USB ID support. Replaces first device ID with current one. */
+ udev = interface_to_usbdev(intf);
+
+ for (i = 0, found = false; i < ARRAY_SIZE(af9035_id) - 1; i++) {
+ if (af9035_id[i].idVendor ==
+ le16_to_cpu(udev->descriptor.idVendor) &&
+ af9035_id[i].idProduct ==
+ le16_to_cpu(udev->descriptor.idProduct)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_debug("%s: using dynamic ID %04x:%04x\n", __func__,
+ le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct));
+ af9035_properties[0].devices[0].cold_ids[0]->idVendor =
+ le16_to_cpu(udev->descriptor.idVendor);
+ af9035_properties[0].devices[0].cold_ids[0]->idProduct =
+ le16_to_cpu(udev->descriptor.idProduct);
+ }
+
+
+ for (i = 0; i < af9035_properties_count; i++) {
+ ret = dvb_usb_device_init(intf, &af9035_properties[i],
+ THIS_MODULE, &d, adapter_nr);
+
+ if (ret == -ENODEV)
+ continue;
+ else
+ break;
+ }
+
+ if (ret < 0)
+ goto err;
+
+ if (d) {
+ ret = af9035_init(d);
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver af9035_usb_driver = {
+ .name = "dvb_usb_af9035",
+ .probe = af9035_usb_probe,
+ .disconnect = dvb_usb_device_exit,
+ .id_table = af9035_id,
+};
+
+module_usb_driver(af9035_usb_driver);
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Afatech AF9035 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/af9035.h b/drivers/media/dvb/dvb-usb/af9035.h
new file mode 100644
index 000000000000..481a1a43dd2a
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/af9035.h
@@ -0,0 +1,113 @@
+/*
+ * Afatech AF9035 DVB USB driver
+ *
+ * Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
+ * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef AF9035_H
+#define AF9035_H
+
+/* prefix for dvb-usb log writings */
+#define DVB_USB_LOG_PREFIX "af9035"
+
+#include "dvb-usb.h"
+#include "af9033.h"
+#include "tua9001.h"
+#include "fc0011.h"
+#include "mxl5007t.h"
+#include "tda18218.h"
+
+struct reg_val {
+ u32 reg;
+ u8 val;
+};
+
+struct reg_val_mask {
+ u32 reg;
+ u8 val;
+ u8 mask;
+};
+
+struct usb_req {
+ u8 cmd;
+ u8 mbox;
+ u8 wlen;
+ u8 *wbuf;
+ u8 rlen;
+ u8 *rbuf;
+};
+
+struct state {
+ bool dual_mode;
+
+ struct af9033_config af9033_config[2];
+};
+
+u32 clock_lut[] = {
+ 20480000, /* FPGA */
+ 16384000, /* 16.38 MHz */
+ 20480000, /* 20.48 MHz */
+ 36000000, /* 36.00 MHz */
+ 30000000, /* 30.00 MHz */
+ 26000000, /* 26.00 MHz */
+ 28000000, /* 28.00 MHz */
+ 32000000, /* 32.00 MHz */
+ 34000000, /* 34.00 MHz */
+ 24000000, /* 24.00 MHz */
+ 22000000, /* 22.00 MHz */
+ 12000000, /* 12.00 MHz */
+};
+
+u32 clock_lut_it9135[] = {
+ 12000000, /* 12.00 MHz */
+ 20480000, /* 20.48 MHz */
+ 36000000, /* 36.00 MHz */
+ 30000000, /* 30.00 MHz */
+ 26000000, /* 26.00 MHz */
+ 28000000, /* 28.00 MHz */
+ 32000000, /* 32.00 MHz */
+ 34000000, /* 34.00 MHz */
+ 24000000, /* 24.00 MHz */
+ 22000000, /* 22.00 MHz */
+};
+
+/* EEPROM locations */
+#define EEPROM_IR_MODE 0x430d
+#define EEPROM_DUAL_MODE 0x4326
+#define EEPROM_IR_TYPE 0x4329
+#define EEPROM_1_IFFREQ_L 0x432d
+#define EEPROM_1_IFFREQ_H 0x432e
+#define EEPROM_1_TUNER_ID 0x4331
+#define EEPROM_2_IFFREQ_L 0x433d
+#define EEPROM_2_IFFREQ_H 0x433e
+#define EEPROM_2_TUNER_ID 0x4341
+
+/* USB commands */
+#define CMD_MEM_RD 0x00
+#define CMD_MEM_WR 0x01
+#define CMD_I2C_RD 0x02
+#define CMD_I2C_WR 0x03
+#define CMD_IR_GET 0x18
+#define CMD_FW_DL 0x21
+#define CMD_FW_QUERYINFO 0x22
+#define CMD_FW_BOOT 0x23
+#define CMD_FW_DL_BEGIN 0x24
+#define CMD_FW_DL_END 0x25
+#define CMD_FW_SCATTER_WR 0x29
+
+#endif
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 02290c60f72f..7e9e00fae04e 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -32,7 +32,7 @@ int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion,
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ return -EINTR;
}
ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
@@ -118,7 +118,7 @@ int dib0700_set_gpio(struct dvb_usb_device *d, enum dib07x0_gpios gpio, u8 gpio_
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ return -EINTR;
}
st->buf[0] = REQUEST_SET_GPIO;
@@ -139,7 +139,7 @@ static int dib0700_set_usb_xfer_len(struct dvb_usb_device *d, u16 nb_ts_packets)
if (st->fw_version >= 0x10201) {
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ return -EINTR;
}
st->buf[0] = REQUEST_SET_USB_XFER_LEN;
@@ -178,7 +178,7 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
/* Ensure nobody else hits the i2c bus while we're sending our
sequence of messages, (such as the remote control thread) */
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
- return -EAGAIN;
+ return -EINTR;
for (i = 0; i < num; i++) {
if (i == 0) {
@@ -228,7 +228,8 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
/* Write request */
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ mutex_unlock(&d->i2c_mutex);
+ return -EINTR;
}
st->buf[0] = REQUEST_NEW_I2C_WRITE;
st->buf[1] = msg[i].addr << 1;
@@ -271,10 +272,11 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
int i,len;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
- return -EAGAIN;
+ return -EINTR;
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ mutex_unlock(&d->i2c_mutex);
+ return -EINTR;
}
for (i = 0; i < num; i++) {
@@ -369,7 +371,7 @@ static int dib0700_set_clock(struct dvb_usb_device *d, u8 en_pll,
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ return -EINTR;
}
st->buf[0] = REQUEST_SET_CLOCK;
@@ -401,7 +403,7 @@ int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz)
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ return -EINTR;
}
st->buf[0] = REQUEST_SET_I2C_PARAM;
@@ -561,7 +563,7 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
if (mutex_lock_interruptible(&adap->dev->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ return -EINTR;
}
st->buf[0] = REQUEST_ENABLE_VIDEO;
@@ -611,7 +613,7 @@ int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ return -EINTR;
}
st->buf[0] = REQUEST_SET_RC;
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index f9e966aa26e7..510001da6e83 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -3569,6 +3569,7 @@ struct usb_device_id dib0700_usb_id_table[] = {
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_TFE7090E) },
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_TFE7790E) },
/* 80 */{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_TFE8096P) },
+ { USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_DTT_2) },
{ 0 } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -3832,7 +3833,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
},
- .num_device_descs = 11,
+ .num_device_descs = 12,
.devices = {
{ "DiBcom STK7070P reference design",
{ &dib0700_usb_id_table[15], NULL },
@@ -3878,6 +3879,10 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{ &dib0700_usb_id_table[50], NULL },
{ NULL },
},
+ { "Elgato EyeTV DTT rev. 2",
+ { &dib0700_usb_id_table[81], NULL },
+ { NULL },
+ },
},
.rc.core = {
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 397d8f232731..7a6160bf54ba 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -76,6 +76,11 @@
#define USB_PID_AFATECH_AF9005 0x9020
#define USB_PID_AFATECH_AF9015_9015 0x9015
#define USB_PID_AFATECH_AF9015_9016 0x9016
+#define USB_PID_AFATECH_AF9035_1000 0x1000
+#define USB_PID_AFATECH_AF9035_1001 0x1001
+#define USB_PID_AFATECH_AF9035_1002 0x1002
+#define USB_PID_AFATECH_AF9035_1003 0x1003
+#define USB_PID_AFATECH_AF9035_9035 0x9035
#define USB_PID_TREKSTOR_DVBT 0x901b
#define USB_VID_ALINK_DTU 0xf170
#define USB_PID_ANSONIC_DVBT_USB 0x6000
@@ -152,6 +157,7 @@
#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055
#define USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2 0x0069
+#define USB_PID_TERRATEC_CINERGY_T_STICK 0x0093
#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097
#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099
#define USB_PID_TWINHAN_VP7041_COLD 0x3201
@@ -221,6 +227,11 @@
#define USB_PID_AVERMEDIA_A850T 0x850b
#define USB_PID_AVERMEDIA_A805 0xa805
#define USB_PID_AVERMEDIA_A815M 0x815a
+#define USB_PID_AVERMEDIA_A835 0xa835
+#define USB_PID_AVERMEDIA_B835 0xb835
+#define USB_PID_AVERMEDIA_1867 0x1867
+#define USB_PID_AVERMEDIA_A867 0xa867
+#define USB_PID_AVERMEDIA_TWINSTAR 0x0825
#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a
@@ -327,6 +338,7 @@
#define USB_PID_MYGICA_D689 0xd811
#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
#define USB_PID_ELGATO_EYETV_DTT 0x0021
+#define USB_PID_ELGATO_EYETV_DTT_2 0x003f
#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020
#define USB_PID_ELGATO_EYETV_SAT 0x002a
#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_COLD 0x5000
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
index 53a5c30b51b2..5c8f651344fc 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
@@ -80,6 +80,14 @@ static void dvb_usb_data_complete_204(struct usb_data_stream *stream, u8 *buffer
dvb_dmx_swfilter_204(&adap->demux, buffer, length);
}
+static void dvb_usb_data_complete_raw(struct usb_data_stream *stream,
+ u8 *buffer, size_t length)
+{
+ struct dvb_usb_adapter *adap = stream->user_priv;
+ if (adap->feedcount > 0 && adap->state & DVB_USB_ADAP_STATE_DVB)
+ dvb_dmx_swfilter_raw(&adap->demux, buffer, length);
+}
+
int dvb_usb_adapter_stream_init(struct dvb_usb_adapter *adap)
{
int i, ret = 0;
@@ -90,6 +98,10 @@ int dvb_usb_adapter_stream_init(struct dvb_usb_adapter *adap)
adap->fe_adap[i].stream.complete =
dvb_usb_data_complete_204;
else
+ if (adap->props.fe[i].caps & DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD)
+ adap->fe_adap[i].stream.complete =
+ dvb_usb_data_complete_raw;
+ else
adap->fe_adap[i].stream.complete = dvb_usb_data_complete;
adap->fe_adap[i].stream.user_priv = adap;
ret = usb_urb_init(&adap->fe_adap[i].stream,
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 6d7d13f9ce68..99f94409efa1 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -141,6 +141,7 @@ struct dvb_usb_adapter_fe_properties {
#define DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF 0x02
#define DVB_USB_ADAP_NEED_PID_FILTERING 0x04
#define DVB_USB_ADAP_RECEIVES_204_BYTE_TS 0x08
+#define DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD 0x10
int caps;
int pid_filter_count;
@@ -156,7 +157,7 @@ struct dvb_usb_adapter_fe_properties {
int size_of_priv;
};
-#define MAX_NO_OF_FE_PER_ADAP 2
+#define MAX_NO_OF_FE_PER_ADAP 3
struct dvb_usb_adapter_properties {
int size_of_priv;
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index 451c5a7adfb2..9382895b1b88 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -148,7 +148,7 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int i = 0, ret = 0;
+ int i = 0;
u8 buf6[] = {0x2c, 0x05, 0xc0, 0, 0, 0, 0};
u16 value;
@@ -162,7 +162,7 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
/* read stv0299 register */
value = msg[0].buf[0];/* register */
for (i = 0; i < msg[1].len; i++) {
- ret = dw210x_op_rw(d->udev, 0xb5, value + i, 0,
+ dw210x_op_rw(d->udev, 0xb5, value + i, 0,
buf6, 2, DW210X_READ_MSG);
msg[1].buf[i] = buf6[0];
}
@@ -174,7 +174,7 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
buf6[0] = 0x2a;
buf6[1] = msg[0].buf[0];
buf6[2] = msg[0].buf[1];
- ret = dw210x_op_rw(d->udev, 0xb2, 0, 0,
+ dw210x_op_rw(d->udev, 0xb2, 0, 0,
buf6, 3, DW210X_WRITE_MSG);
break;
case 0x60:
@@ -187,17 +187,17 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
buf6[4] = msg[0].buf[1];
buf6[5] = msg[0].buf[2];
buf6[6] = msg[0].buf[3];
- ret = dw210x_op_rw(d->udev, 0xb2, 0, 0,
+ dw210x_op_rw(d->udev, 0xb2, 0, 0,
buf6, 7, DW210X_WRITE_MSG);
} else {
/* read from tuner */
- ret = dw210x_op_rw(d->udev, 0xb5, 0, 0,
+ dw210x_op_rw(d->udev, 0xb5, 0, 0,
buf6, 1, DW210X_READ_MSG);
msg[0].buf[0] = buf6[0];
}
break;
case (DW2102_RC_QUERY):
- ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
+ dw210x_op_rw(d->udev, 0xb8, 0, 0,
buf6, 2, DW210X_READ_MSG);
msg[0].buf[0] = buf6[0];
msg[0].buf[1] = buf6[1];
@@ -205,7 +205,7 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
case (DW2102_VOLTAGE_CTRL):
buf6[0] = 0x30;
buf6[1] = msg[0].buf[0];
- ret = dw210x_op_rw(d->udev, 0xb2, 0, 0,
+ dw210x_op_rw(d->udev, 0xb2, 0, 0,
buf6, 2, DW210X_WRITE_MSG);
break;
}
@@ -221,7 +221,6 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int ret = 0;
u8 buf6[] = {0, 0, 0, 0, 0, 0, 0};
if (!d)
@@ -235,10 +234,10 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
buf6[0] = msg[0].addr << 1;
buf6[1] = msg[0].len;
buf6[2] = msg[0].buf[0];
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0,
buf6, msg[0].len + 2, DW210X_WRITE_MSG);
/* read si2109 register */
- ret = dw210x_op_rw(d->udev, 0xc3, 0xd0, 0,
+ dw210x_op_rw(d->udev, 0xc3, 0xd0, 0,
buf6, msg[1].len + 2, DW210X_READ_MSG);
memcpy(msg[1].buf, buf6 + 2, msg[1].len);
@@ -250,11 +249,11 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
buf6[0] = msg[0].addr << 1;
buf6[1] = msg[0].len;
memcpy(buf6 + 2, msg[0].buf, msg[0].len);
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6,
msg[0].len + 2, DW210X_WRITE_MSG);
break;
case(DW2102_RC_QUERY):
- ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
+ dw210x_op_rw(d->udev, 0xb8, 0, 0,
buf6, 2, DW210X_READ_MSG);
msg[0].buf[0] = buf6[0];
msg[0].buf[1] = buf6[1];
@@ -262,7 +261,7 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
case(DW2102_VOLTAGE_CTRL):
buf6[0] = 0x30;
buf6[1] = msg[0].buf[0];
- ret = dw210x_op_rw(d->udev, 0xb2, 0, 0,
+ dw210x_op_rw(d->udev, 0xb2, 0, 0,
buf6, 2, DW210X_WRITE_MSG);
break;
}
@@ -276,7 +275,6 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int ret = 0;
if (!d)
return -ENODEV;
@@ -291,10 +289,10 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
obuf[2] = msg[0].buf[0];
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0,
obuf, msg[0].len + 2, DW210X_WRITE_MSG);
/* second read registers */
- ret = dw210x_op_rw(d->udev, 0xc3, 0xd1 , 0,
+ dw210x_op_rw(d->udev, 0xc3, 0xd1 , 0,
ibuf, msg[1].len + 2, DW210X_READ_MSG);
memcpy(msg[1].buf, ibuf + 2, msg[1].len);
@@ -308,7 +306,7 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
memcpy(obuf + 2, msg[0].buf, msg[0].len);
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0,
obuf, msg[0].len + 2, DW210X_WRITE_MSG);
break;
}
@@ -318,13 +316,13 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
memcpy(obuf + 2, msg[0].buf, msg[0].len);
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0,
obuf, msg[0].len + 2, DW210X_WRITE_MSG);
break;
}
case(DW2102_RC_QUERY): {
u8 ibuf[2];
- ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
+ dw210x_op_rw(d->udev, 0xb8, 0, 0,
ibuf, 2, DW210X_READ_MSG);
memcpy(msg[0].buf, ibuf , 2);
break;
@@ -333,7 +331,7 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
u8 obuf[2];
obuf[0] = 0x30;
obuf[1] = msg[0].buf[0];
- ret = dw210x_op_rw(d->udev, 0xb2, 0, 0,
+ dw210x_op_rw(d->udev, 0xb2, 0, 0,
obuf, 2, DW210X_WRITE_MSG);
break;
}
@@ -349,7 +347,6 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int ret = 0;
int len, i, j;
if (!d)
@@ -361,7 +358,7 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
switch (msg[j].addr) {
case(DW2102_RC_QUERY): {
u8 ibuf[2];
- ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
+ dw210x_op_rw(d->udev, 0xb8, 0, 0,
ibuf, 2, DW210X_READ_MSG);
memcpy(msg[j].buf, ibuf , 2);
break;
@@ -370,7 +367,7 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
u8 obuf[2];
obuf[0] = 0x30;
obuf[1] = msg[j].buf[0];
- ret = dw210x_op_rw(d->udev, 0xb2, 0, 0,
+ dw210x_op_rw(d->udev, 0xb2, 0, 0,
obuf, 2, DW210X_WRITE_MSG);
break;
}
@@ -382,7 +379,7 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
if (msg[j].flags == I2C_M_RD) {
/* read registers */
u8 ibuf[msg[j].len + 2];
- ret = dw210x_op_rw(d->udev, 0xc3,
+ dw210x_op_rw(d->udev, 0xc3,
(msg[j].addr << 1) + 1, 0,
ibuf, msg[j].len + 2,
DW210X_READ_MSG);
@@ -402,7 +399,7 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
do {
memcpy(obuf + 3, msg[j].buf + i,
(len > 16 ? 16 : len));
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0,
obuf, (len > 16 ? 16 : len) + 3,
DW210X_WRITE_MSG);
i += 16;
@@ -414,7 +411,7 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
obuf[0] = msg[j].addr << 1;
obuf[1] = msg[j].len;
memcpy(obuf + 2, msg[j].buf, msg[j].len);
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0,
obuf, msg[j].len + 2,
DW210X_WRITE_MSG);
}
@@ -432,7 +429,7 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int ret = 0, i;
+ int i;
if (!d)
return -ENODEV;
@@ -447,10 +444,10 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
obuf[2] = msg[0].buf[0];
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0,
obuf, msg[0].len + 2, DW210X_WRITE_MSG);
/* second read registers */
- ret = dw210x_op_rw(d->udev, 0xc3, 0x19 , 0,
+ dw210x_op_rw(d->udev, 0xc3, 0x19 , 0,
ibuf, msg[1].len + 2, DW210X_READ_MSG);
memcpy(msg[1].buf, ibuf + 2, msg[1].len);
@@ -465,13 +462,13 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
memcpy(obuf + 2, msg[0].buf, msg[0].len);
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0,
obuf, msg[0].len + 2, DW210X_WRITE_MSG);
break;
}
case(DW2102_RC_QUERY): {
u8 ibuf[2];
- ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
+ dw210x_op_rw(d->udev, 0xb8, 0, 0,
ibuf, 2, DW210X_READ_MSG);
memcpy(msg[0].buf, ibuf , 2);
break;
@@ -496,7 +493,6 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
struct usb_device *udev;
- int ret = 0;
int len, i, j;
if (!d)
@@ -509,7 +505,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
switch (msg[j].addr) {
case (DW2102_RC_QUERY): {
u8 ibuf[5];
- ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
+ dw210x_op_rw(d->udev, 0xb8, 0, 0,
ibuf, 5, DW210X_READ_MSG);
memcpy(msg[j].buf, ibuf + 3, 2);
break;
@@ -519,11 +515,11 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[0] = 1;
obuf[1] = msg[j].buf[1];/* off-on */
- ret = dw210x_op_rw(d->udev, 0x8a, 0, 0,
+ dw210x_op_rw(d->udev, 0x8a, 0, 0,
obuf, 2, DW210X_WRITE_MSG);
obuf[0] = 3;
obuf[1] = msg[j].buf[0];/* 13v-18v */
- ret = dw210x_op_rw(d->udev, 0x8a, 0, 0,
+ dw210x_op_rw(d->udev, 0x8a, 0, 0,
obuf, 2, DW210X_WRITE_MSG);
break;
}
@@ -532,7 +528,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[0] = 5;
obuf[1] = msg[j].buf[0];
- ret = dw210x_op_rw(d->udev, 0x8a, 0, 0,
+ dw210x_op_rw(d->udev, 0x8a, 0, 0,
obuf, 2, DW210X_WRITE_MSG);
break;
}
@@ -545,7 +541,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (msg[j].flags == I2C_M_RD) {
/* read registers */
u8 ibuf[msg[j].len];
- ret = dw210x_op_rw(d->udev, 0x91, 0, 0,
+ dw210x_op_rw(d->udev, 0x91, 0, 0,
ibuf, msg[j].len,
DW210X_READ_MSG);
memcpy(msg[j].buf, ibuf, msg[j].len);
@@ -563,7 +559,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
do {
memcpy(obuf + 3, msg[j].buf + i,
(len > 16 ? 16 : len));
- ret = dw210x_op_rw(d->udev, 0x80, 0, 0,
+ dw210x_op_rw(d->udev, 0x80, 0, 0,
obuf, (len > 16 ? 16 : len) + 3,
DW210X_WRITE_MSG);
i += 16;
@@ -575,7 +571,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[0] = msg[j + 1].len;
obuf[1] = (msg[j].addr << 1);
memcpy(obuf + 2, msg[j].buf, msg[j].len);
- ret = dw210x_op_rw(d->udev,
+ dw210x_op_rw(d->udev,
udev->descriptor.idProduct ==
0x7500 ? 0x92 : 0x90, 0, 0,
obuf, msg[j].len + 2,
@@ -587,7 +583,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[0] = msg[j].len + 1;
obuf[1] = (msg[j].addr << 1);
memcpy(obuf + 2, msg[j].buf, msg[j].len);
- ret = dw210x_op_rw(d->udev, 0x80, 0, 0,
+ dw210x_op_rw(d->udev, 0x80, 0, 0,
obuf, msg[j].len + 2,
DW210X_WRITE_MSG);
break;
diff --git a/drivers/media/dvb/dvb-usb/it913x.c b/drivers/media/dvb/dvb-usb/it913x.c
index 482d249ca7f3..6244fe9d1a3a 100644
--- a/drivers/media/dvb/dvb-usb/it913x.c
+++ b/drivers/media/dvb/dvb-usb/it913x.c
@@ -81,7 +81,7 @@ static int it913x_bulk_write(struct usb_device *dev,
for (i = 0; i < IT913X_RETRY; i++) {
ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, pipe),
snd, len , &actual_l, IT913X_SND_TIMEOUT);
- if (ret == 0 || ret != -EBUSY || ret != -ETIMEDOUT)
+ if (ret != -EBUSY && ret != -ETIMEDOUT)
break;
}
@@ -99,7 +99,7 @@ static int it913x_bulk_read(struct usb_device *dev,
for (i = 0; i < IT913X_RETRY; i++) {
ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, pipe),
rev, len , &actual_l, IT913X_RCV_TIMEOUT);
- if (ret == 0 || ret != -EBUSY || ret != -ETIMEDOUT)
+ if (ret != -EBUSY && ret != -ETIMEDOUT)
break;
}
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
index 5dde06d066ff..25d1031460f8 100644
--- a/drivers/media/dvb/dvb-usb/lmedm04.c
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -373,7 +373,7 @@ static int lme2510_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff)
struct lme2510_state *st = adap->dev->priv;
static u8 clear_pid_reg[] = LME_ALL_PIDS;
static u8 rbuf[1];
- int ret;
+ int ret = 0;
deb_info(1, "PID Clearing Filter");
@@ -1205,14 +1205,13 @@ static int lme2510_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
- int ret = 0;
usb_reset_configuration(udev);
usb_set_interface(udev, intf->cur_altsetting->desc.bInterfaceNumber, 1);
if (udev->speed != USB_SPEED_HIGH) {
- ret = usb_reset_device(udev);
+ usb_reset_device(udev);
info("DEV Failed to connect in HIGH SPEED mode");
return -ENODEV;
}
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-tuner.c b/drivers/media/dvb/dvb-usb/mxl111sf-tuner.c
index 72db6eef4b9c..74da5bb1ce99 100644
--- a/drivers/media/dvb/dvb-usb/mxl111sf-tuner.c
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-tuner.c
@@ -284,6 +284,7 @@ static int mxl111sf_tuner_set_params(struct dvb_frontend *fe)
switch (delsys) {
case SYS_ATSC:
+ case SYS_ATSCMH:
bw = 0; /* ATSC */
break;
case SYS_DVBC_ANNEX_B:
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf.c b/drivers/media/dvb/dvb-usb/mxl111sf.c
index 81305de2fea5..cd842798f5af 100644
--- a/drivers/media/dvb/dvb-usb/mxl111sf.c
+++ b/drivers/media/dvb/dvb-usb/mxl111sf.c
@@ -21,6 +21,7 @@
#include "mxl111sf-tuner.h"
#include "lgdt3305.h"
+#include "lg2160.h"
int dvb_usb_mxl111sf_debug;
module_param_named(debug, dvb_usb_mxl111sf_debug, int, 0644);
@@ -31,6 +32,10 @@ int dvb_usb_mxl111sf_isoc;
module_param_named(isoc, dvb_usb_mxl111sf_isoc, int, 0644);
MODULE_PARM_DESC(isoc, "enable usb isoc xfer (0=bulk, 1=isoc).");
+int dvb_usb_mxl111sf_spi;
+module_param_named(spi, dvb_usb_mxl111sf_spi, int, 0644);
+MODULE_PARM_DESC(spi, "use spi rather than tp for data xfer (0=tp, 1=spi).");
+
#define ANT_PATH_AUTO 0
#define ANT_PATH_EXTERNAL 1
#define ANT_PATH_INTERNAL 2
@@ -340,7 +345,6 @@ static int mxl111sf_ep6_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
struct mxl111sf_state *state = d->priv;
struct mxl111sf_adap_state *adap_state = adap->fe_adap[adap->active_fe].priv;
int ret = 0;
- u8 tmp;
deb_info("%s(%d)\n", __func__, onoff);
@@ -361,6 +365,33 @@ static int mxl111sf_ep6_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
return ret;
}
+static int mxl111sf_ep5_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct mxl111sf_state *state = d->priv;
+ int ret = 0;
+
+ deb_info("%s(%d)\n", __func__, onoff);
+
+ if (onoff) {
+ ret = mxl111sf_enable_usb_output(state);
+ mxl_fail(ret);
+
+ ret = mxl111sf_init_i2s_port(state, 200);
+ mxl_fail(ret);
+ ret = mxl111sf_config_i2s(state, 0, 15);
+ mxl_fail(ret);
+ } else {
+ ret = mxl111sf_disable_i2s_port(state);
+ mxl_fail(ret);
+ }
+ if (state->chip_rev > MXL111SF_V6)
+ ret = mxl111sf_config_spi(state, onoff);
+ mxl_fail(ret);
+
+ return ret;
+}
+
static int mxl111sf_ep4_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
struct dvb_usb_device *d = adap->dev;
@@ -453,6 +484,255 @@ fail:
return ret;
}
+static struct lg2160_config hauppauge_lg2160_config = {
+ .lg_chip = LG2160,
+ .i2c_addr = 0x1c >> 1,
+ .deny_i2c_rptr = 1,
+ .spectral_inversion = 0,
+ .if_khz = 6000,
+};
+
+static int mxl111sf_lg2160_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct mxl111sf_state *state = d->priv;
+ int fe_id = adap->num_frontends_initialized;
+ struct mxl111sf_adap_state *adap_state = adap->fe_adap[fe_id].priv;
+ int ret;
+
+ deb_adv("%s()\n", __func__);
+
+ /* save a pointer to the dvb_usb_device in device state */
+ state->d = d;
+ adap_state->alt_mode = (dvb_usb_mxl111sf_isoc) ? 2 : 1;
+ state->alt_mode = adap_state->alt_mode;
+
+ if (usb_set_interface(adap->dev->udev, 0, state->alt_mode) < 0)
+ err("set interface failed");
+
+ state->gpio_mode = MXL111SF_GPIO_MOD_MH;
+ adap_state->gpio_mode = state->gpio_mode;
+ adap_state->device_mode = MXL_TUNER_MODE;
+ adap_state->ep6_clockphase = 1;
+
+ ret = mxl1x1sf_soft_reset(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_init_tuner_demod(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl1x1sf_set_device_mode(state, adap_state->device_mode);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_enable_usb_output(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl1x1sf_top_master_ctrl(state, 1);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_init_port_expander(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_gpio_mode_switch(state, state->gpio_mode);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = get_chip_info(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ adap->fe_adap[fe_id].fe = dvb_attach(lg2160_attach,
+ &hauppauge_lg2160_config,
+ &adap->dev->i2c_adap);
+ if (adap->fe_adap[fe_id].fe) {
+ adap_state->fe_init = adap->fe_adap[fe_id].fe->ops.init;
+ adap->fe_adap[fe_id].fe->ops.init = mxl111sf_adap_fe_init;
+ adap_state->fe_sleep = adap->fe_adap[fe_id].fe->ops.sleep;
+ adap->fe_adap[fe_id].fe->ops.sleep = mxl111sf_adap_fe_sleep;
+ return 0;
+ }
+ ret = -EIO;
+fail:
+ return ret;
+}
+
+static struct lg2160_config hauppauge_lg2161_1019_config = {
+ .lg_chip = LG2161_1019,
+ .i2c_addr = 0x1c >> 1,
+ .deny_i2c_rptr = 1,
+ .spectral_inversion = 0,
+ .if_khz = 6000,
+ .output_if = 2, /* LG2161_OIF_SPI_MAS */
+};
+
+static struct lg2160_config hauppauge_lg2161_1040_config = {
+ .lg_chip = LG2161_1040,
+ .i2c_addr = 0x1c >> 1,
+ .deny_i2c_rptr = 1,
+ .spectral_inversion = 0,
+ .if_khz = 6000,
+ .output_if = 4, /* LG2161_OIF_SPI_MAS */
+};
+
+static int mxl111sf_lg2161_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct mxl111sf_state *state = d->priv;
+ int fe_id = adap->num_frontends_initialized;
+ struct mxl111sf_adap_state *adap_state = adap->fe_adap[fe_id].priv;
+ int ret;
+
+ deb_adv("%s()\n", __func__);
+
+ /* save a pointer to the dvb_usb_device in device state */
+ state->d = d;
+ adap_state->alt_mode = (dvb_usb_mxl111sf_isoc) ? 2 : 1;
+ state->alt_mode = adap_state->alt_mode;
+
+ if (usb_set_interface(adap->dev->udev, 0, state->alt_mode) < 0)
+ err("set interface failed");
+
+ state->gpio_mode = MXL111SF_GPIO_MOD_MH;
+ adap_state->gpio_mode = state->gpio_mode;
+ adap_state->device_mode = MXL_TUNER_MODE;
+ adap_state->ep6_clockphase = 1;
+
+ ret = mxl1x1sf_soft_reset(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_init_tuner_demod(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl1x1sf_set_device_mode(state, adap_state->device_mode);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_enable_usb_output(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl1x1sf_top_master_ctrl(state, 1);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_init_port_expander(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_gpio_mode_switch(state, state->gpio_mode);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = get_chip_info(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ adap->fe_adap[fe_id].fe = dvb_attach(lg2160_attach,
+ (MXL111SF_V8_200 == state->chip_rev) ?
+ &hauppauge_lg2161_1040_config :
+ &hauppauge_lg2161_1019_config,
+ &adap->dev->i2c_adap);
+ if (adap->fe_adap[fe_id].fe) {
+ adap_state->fe_init = adap->fe_adap[fe_id].fe->ops.init;
+ adap->fe_adap[fe_id].fe->ops.init = mxl111sf_adap_fe_init;
+ adap_state->fe_sleep = adap->fe_adap[fe_id].fe->ops.sleep;
+ adap->fe_adap[fe_id].fe->ops.sleep = mxl111sf_adap_fe_sleep;
+ return 0;
+ }
+ ret = -EIO;
+fail:
+ return ret;
+}
+
+static struct lg2160_config hauppauge_lg2161_1019_ep6_config = {
+ .lg_chip = LG2161_1019,
+ .i2c_addr = 0x1c >> 1,
+ .deny_i2c_rptr = 1,
+ .spectral_inversion = 0,
+ .if_khz = 6000,
+ .output_if = 1, /* LG2161_OIF_SERIAL_TS */
+};
+
+static struct lg2160_config hauppauge_lg2161_1040_ep6_config = {
+ .lg_chip = LG2161_1040,
+ .i2c_addr = 0x1c >> 1,
+ .deny_i2c_rptr = 1,
+ .spectral_inversion = 0,
+ .if_khz = 6000,
+ .output_if = 7, /* LG2161_OIF_SERIAL_TS */
+};
+
+static int mxl111sf_lg2161_ep6_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct mxl111sf_state *state = d->priv;
+ int fe_id = adap->num_frontends_initialized;
+ struct mxl111sf_adap_state *adap_state = adap->fe_adap[fe_id].priv;
+ int ret;
+
+ deb_adv("%s()\n", __func__);
+
+ /* save a pointer to the dvb_usb_device in device state */
+ state->d = d;
+ adap_state->alt_mode = (dvb_usb_mxl111sf_isoc) ? 2 : 1;
+ state->alt_mode = adap_state->alt_mode;
+
+ if (usb_set_interface(adap->dev->udev, 0, state->alt_mode) < 0)
+ err("set interface failed");
+
+ state->gpio_mode = MXL111SF_GPIO_MOD_MH;
+ adap_state->gpio_mode = state->gpio_mode;
+ adap_state->device_mode = MXL_TUNER_MODE;
+ adap_state->ep6_clockphase = 0;
+
+ ret = mxl1x1sf_soft_reset(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_init_tuner_demod(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl1x1sf_set_device_mode(state, adap_state->device_mode);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_enable_usb_output(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl1x1sf_top_master_ctrl(state, 1);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_init_port_expander(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_gpio_mode_switch(state, state->gpio_mode);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = get_chip_info(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ adap->fe_adap[fe_id].fe = dvb_attach(lg2160_attach,
+ (MXL111SF_V8_200 == state->chip_rev) ?
+ &hauppauge_lg2161_1040_ep6_config :
+ &hauppauge_lg2161_1019_ep6_config,
+ &adap->dev->i2c_adap);
+ if (adap->fe_adap[fe_id].fe) {
+ adap_state->fe_init = adap->fe_adap[fe_id].fe->ops.init;
+ adap->fe_adap[fe_id].fe->ops.init = mxl111sf_adap_fe_init;
+ adap_state->fe_sleep = adap->fe_adap[fe_id].fe->ops.sleep;
+ adap->fe_adap[fe_id].fe->ops.sleep = mxl111sf_adap_fe_sleep;
+ return 0;
+ }
+ ret = -EIO;
+fail:
+ return ret;
+}
+
static struct mxl111sf_demod_config mxl_demod_config = {
.read_reg = mxl111sf_read_reg,
.write_reg = mxl111sf_write_reg,
@@ -650,6 +930,18 @@ static struct dvb_usb_device_properties mxl111sf_dvbt_bulk_properties;
static struct dvb_usb_device_properties mxl111sf_dvbt_isoc_properties;
static struct dvb_usb_device_properties mxl111sf_atsc_bulk_properties;
static struct dvb_usb_device_properties mxl111sf_atsc_isoc_properties;
+static struct dvb_usb_device_properties mxl111sf_atsc_mh_bulk_properties;
+static struct dvb_usb_device_properties mxl111sf_atsc_mh_isoc_properties;
+static struct dvb_usb_device_properties mxl111sf_mh_bulk_properties;
+static struct dvb_usb_device_properties mxl111sf_mh_isoc_properties;
+static struct dvb_usb_device_properties mxl111sf_mercury_spi_bulk_properties;
+static struct dvb_usb_device_properties mxl111sf_mercury_spi_isoc_properties;
+static struct dvb_usb_device_properties mxl111sf_mercury_tp_bulk_properties;
+static struct dvb_usb_device_properties mxl111sf_mercury_tp_isoc_properties;
+static struct dvb_usb_device_properties mxl111sf_mercury_mh_spi_bulk_properties;
+static struct dvb_usb_device_properties mxl111sf_mercury_mh_spi_isoc_properties;
+static struct dvb_usb_device_properties mxl111sf_mercury_mh_tp_bulk_properties;
+static struct dvb_usb_device_properties mxl111sf_mercury_mh_tp_isoc_properties;
static int mxl111sf_probe(struct usb_interface *intf,
const struct usb_device_id *id)
@@ -664,12 +956,50 @@ static int mxl111sf_probe(struct usb_interface *intf,
THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf,
&mxl111sf_atsc_isoc_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_atsc_mh_isoc_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_mh_isoc_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ ((dvb_usb_mxl111sf_spi) &&
+ (0 == dvb_usb_device_init(intf,
+ &mxl111sf_mercury_spi_isoc_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_mercury_mh_spi_isoc_properties,
+ THIS_MODULE, &d, adapter_nr))) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_mercury_tp_isoc_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_mercury_mh_tp_isoc_properties,
THIS_MODULE, &d, adapter_nr))) ||
0 == dvb_usb_device_init(intf,
&mxl111sf_dvbt_bulk_properties,
THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf,
&mxl111sf_atsc_bulk_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_atsc_mh_bulk_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_mh_bulk_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ ((dvb_usb_mxl111sf_spi) &&
+ (0 == dvb_usb_device_init(intf,
+ &mxl111sf_mercury_spi_bulk_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_mercury_mh_spi_bulk_properties,
+ THIS_MODULE, &d, adapter_nr))) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_mercury_tp_bulk_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_mercury_mh_tp_bulk_properties,
THIS_MODULE, &d, adapter_nr) || 0) {
struct mxl111sf_state *state = d->priv;
@@ -787,6 +1117,36 @@ MODULE_DEVICE_TABLE(usb, mxl111sf_table);
} \
}
+#define MXL111SF_EP5_BULK_STREAMING_CONFIG \
+ .size_of_priv = sizeof(struct mxl111sf_adap_state), \
+ .streaming_ctrl = mxl111sf_ep5_streaming_ctrl, \
+ .stream = { \
+ .type = USB_BULK, \
+ .count = 5, \
+ .endpoint = 0x05, \
+ .u = { \
+ .bulk = { \
+ .buffersize = 8192, \
+ } \
+ } \
+ }
+
+#define MXL111SF_EP5_ISOC_STREAMING_CONFIG \
+ .size_of_priv = sizeof(struct mxl111sf_adap_state), \
+ .streaming_ctrl = mxl111sf_ep5_streaming_ctrl, \
+ .stream = { \
+ .type = USB_ISOC, \
+ .count = 5, \
+ .endpoint = 0x05, \
+ .u = { \
+ .isoc = { \
+ .framesperurb = 96, \
+ .framesize = 200, \
+ .interval = 1, \
+ } \
+ } \
+ }
+
#define MXL111SF_EP6_BULK_STREAMING_CONFIG \
.size_of_priv = sizeof(struct mxl111sf_adap_state), \
.streaming_ctrl = mxl111sf_ep6_streaming_ctrl, \
@@ -848,7 +1208,7 @@ static struct dvb_usb_device_properties mxl111sf_dvbt_bulk_properties = {
} },
},
},
- .num_device_descs = 4,
+ .num_device_descs = 3,
.devices = {
{ "Hauppauge 126xxx DVBT (bulk)",
{ NULL },
@@ -866,11 +1226,6 @@ static struct dvb_usb_device_properties mxl111sf_dvbt_bulk_properties = {
&mxl111sf_table[24], &mxl111sf_table[26],
NULL },
},
- { "Hauppauge 126xxx (tp-bulk)",
- { NULL },
- { &mxl111sf_table[28], &mxl111sf_table[30],
- NULL },
- },
}
};
@@ -890,7 +1245,7 @@ static struct dvb_usb_device_properties mxl111sf_dvbt_isoc_properties = {
} },
},
},
- .num_device_descs = 4,
+ .num_device_descs = 3,
.devices = {
{ "Hauppauge 126xxx DVBT (isoc)",
{ NULL },
@@ -908,11 +1263,6 @@ static struct dvb_usb_device_properties mxl111sf_dvbt_isoc_properties = {
&mxl111sf_table[24], &mxl111sf_table[26],
NULL },
},
- { "Hauppauge 126xxx (tp-isoc)",
- { NULL },
- { &mxl111sf_table[28], &mxl111sf_table[30],
- NULL },
- },
}
};
@@ -923,33 +1273,159 @@ static struct dvb_usb_device_properties mxl111sf_atsc_bulk_properties = {
.adapter = {
{
.fe_ioctl_override = mxl111sf_fe_ioctl_override,
- .num_frontends = 2,
+ .num_frontends = 1,
.fe = {{
.frontend_attach = mxl111sf_lgdt3305_frontend_attach,
.tuner_attach = mxl111sf_attach_tuner,
MXL111SF_EP6_BULK_STREAMING_CONFIG,
+ }},
},
+ },
+ .num_device_descs = 2,
+ .devices = {
+ { "Hauppauge 126xxx ATSC (bulk)",
+ { NULL },
+ { &mxl111sf_table[1], &mxl111sf_table[5],
+ NULL },
+ },
+ { "Hauppauge 117xxx ATSC (bulk)",
+ { NULL },
+ { &mxl111sf_table[12],
+ NULL },
+ },
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_atsc_isoc_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
{
- .frontend_attach = mxl111sf_attach_demod,
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 1,
+ .fe = {{
+ .frontend_attach = mxl111sf_lgdt3305_frontend_attach,
.tuner_attach = mxl111sf_attach_tuner,
- MXL111SF_EP4_BULK_STREAMING_CONFIG,
+ MXL111SF_EP6_ISOC_STREAMING_CONFIG,
}},
},
},
- .num_device_descs = 6,
+ .num_device_descs = 2,
.devices = {
- { "Hauppauge 126xxx ATSC (bulk)",
+ { "Hauppauge 126xxx ATSC (isoc)",
{ NULL },
{ &mxl111sf_table[1], &mxl111sf_table[5],
NULL },
},
- { "Hauppauge 117xxx ATSC (bulk)",
+ { "Hauppauge 117xxx ATSC (isoc)",
{ NULL },
{ &mxl111sf_table[12],
NULL },
},
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_mh_bulk_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 1,
+ .fe = {{
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2160_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP5_BULK_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 2,
+ .devices = {
+ { "HCW 126xxx (bulk)",
+ { NULL },
+ { &mxl111sf_table[2], &mxl111sf_table[6],
+ NULL },
+ },
+ { "HCW 117xxx (bulk)",
+ { NULL },
+ { &mxl111sf_table[13],
+ NULL },
+ },
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_mh_isoc_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 1,
+ .fe = {{
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2160_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP5_ISOC_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 2,
+ .devices = {
+ { "HCW 126xxx (isoc)",
+ { NULL },
+ { &mxl111sf_table[2], &mxl111sf_table[6],
+ NULL },
+ },
+ { "HCW 117xxx (isoc)",
+ { NULL },
+ { &mxl111sf_table[13],
+ NULL },
+ },
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_atsc_mh_bulk_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 3,
+ .fe = {{
+ .frontend_attach = mxl111sf_lgdt3305_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_BULK_STREAMING_CONFIG,
+ },
+ {
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_BULK_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2160_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP5_BULK_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 2,
+ .devices = {
{ "Hauppauge 126xxx ATSC+ (bulk)",
{ NULL },
{ &mxl111sf_table[0], &mxl111sf_table[3],
@@ -963,13 +1439,96 @@ static struct dvb_usb_device_properties mxl111sf_atsc_bulk_properties = {
&mxl111sf_table[32], &mxl111sf_table[33],
NULL },
},
- { "Hauppauge Mercury (tp-bulk)",
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_atsc_mh_isoc_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 3,
+ .fe = {{
+ .frontend_attach = mxl111sf_lgdt3305_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_ISOC_STREAMING_CONFIG,
+ },
+ {
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_ISOC_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2160_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP5_ISOC_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 2,
+ .devices = {
+ { "Hauppauge 126xxx ATSC+ (isoc)",
+ { NULL },
+ { &mxl111sf_table[0], &mxl111sf_table[3],
+ &mxl111sf_table[7], &mxl111sf_table[9],
+ &mxl111sf_table[10], NULL },
+ },
+ { "Hauppauge 117xxx ATSC+ (isoc)",
+ { NULL },
+ { &mxl111sf_table[11], &mxl111sf_table[14],
+ &mxl111sf_table[16], &mxl111sf_table[17],
+ &mxl111sf_table[32], &mxl111sf_table[33],
+ NULL },
+ },
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_mercury_spi_bulk_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 3,
+ .fe = {{
+ .frontend_attach = mxl111sf_lgdt3305_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_BULK_STREAMING_CONFIG,
+ },
+ {
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_BULK_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2161_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP5_BULK_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 2,
+ .devices = {
+ { "Hauppauge Mercury (spi-bulk)",
{ NULL },
{ &mxl111sf_table[19], &mxl111sf_table[21],
&mxl111sf_table[23], &mxl111sf_table[25],
- &mxl111sf_table[27], NULL },
+ NULL },
},
- { "Hauppauge WinTV-Aero-M",
+ { "Hauppauge WinTV-Aero-M (spi-bulk)",
{ NULL },
{ &mxl111sf_table[29], &mxl111sf_table[31],
NULL },
@@ -977,14 +1536,14 @@ static struct dvb_usb_device_properties mxl111sf_atsc_bulk_properties = {
}
};
-static struct dvb_usb_device_properties mxl111sf_atsc_isoc_properties = {
+static struct dvb_usb_device_properties mxl111sf_mercury_spi_isoc_properties = {
MXL111SF_DEFAULT_DEVICE_PROPERTIES,
.num_adapters = 1,
.adapter = {
{
.fe_ioctl_override = mxl111sf_fe_ioctl_override,
- .num_frontends = 2,
+ .num_frontends = 3,
.fe = {{
.frontend_attach = mxl111sf_lgdt3305_frontend_attach,
.tuner_attach = mxl111sf_attach_tuner,
@@ -996,34 +1555,111 @@ static struct dvb_usb_device_properties mxl111sf_atsc_isoc_properties = {
.tuner_attach = mxl111sf_attach_tuner,
MXL111SF_EP4_ISOC_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2161_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP5_ISOC_STREAMING_CONFIG,
}},
},
},
- .num_device_descs = 6,
+ .num_device_descs = 2,
.devices = {
- { "Hauppauge 126xxx ATSC (isoc)",
+ { "Hauppauge Mercury (spi-isoc)",
{ NULL },
- { &mxl111sf_table[1], &mxl111sf_table[5],
+ { &mxl111sf_table[19], &mxl111sf_table[21],
+ &mxl111sf_table[23], &mxl111sf_table[25],
NULL },
},
- { "Hauppauge 117xxx ATSC (isoc)",
+ { "Hauppauge WinTV-Aero-M (spi-isoc)",
{ NULL },
- { &mxl111sf_table[12],
+ { &mxl111sf_table[29], &mxl111sf_table[31],
NULL },
},
- { "Hauppauge 126xxx ATSC+ (isoc)",
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_mercury_tp_bulk_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 3,
+ .fe = {{
+ .frontend_attach = mxl111sf_lgdt3305_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_BULK_STREAMING_CONFIG,
+ },
+ {
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_BULK_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2161_ep6_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_BULK_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 2,
+ .devices = {
+ { "Hauppauge Mercury (tp-bulk)",
{ NULL },
- { &mxl111sf_table[0], &mxl111sf_table[3],
- &mxl111sf_table[7], &mxl111sf_table[9],
- &mxl111sf_table[10], NULL },
+ { &mxl111sf_table[19], &mxl111sf_table[21],
+ &mxl111sf_table[23], &mxl111sf_table[25],
+ &mxl111sf_table[27], NULL },
},
- { "Hauppauge 117xxx ATSC+ (isoc)",
+ { "Hauppauge WinTV-Aero-M",
{ NULL },
- { &mxl111sf_table[11], &mxl111sf_table[14],
- &mxl111sf_table[16], &mxl111sf_table[17],
- &mxl111sf_table[32], &mxl111sf_table[33],
+ { &mxl111sf_table[29], &mxl111sf_table[31],
NULL },
},
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_mercury_tp_isoc_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 3,
+ .fe = {{
+ .frontend_attach = mxl111sf_lgdt3305_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_ISOC_STREAMING_CONFIG,
+ },
+ {
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_ISOC_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2161_ep6_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_ISOC_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 2,
+ .devices = {
{ "Hauppauge Mercury (tp-isoc)",
{ NULL },
{ &mxl111sf_table[19], &mxl111sf_table[21],
@@ -1038,6 +1674,146 @@ static struct dvb_usb_device_properties mxl111sf_atsc_isoc_properties = {
}
};
+static
+struct dvb_usb_device_properties mxl111sf_mercury_mh_tp_bulk_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 2,
+ .fe = {{
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_BULK_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2161_ep6_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_BULK_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 1,
+ .devices = {
+ { "Hauppauge 126xxx (tp-bulk)",
+ { NULL },
+ { &mxl111sf_table[28], &mxl111sf_table[30],
+ NULL },
+ },
+ }
+};
+
+static
+struct dvb_usb_device_properties mxl111sf_mercury_mh_tp_isoc_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 2,
+ .fe = {{
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_ISOC_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2161_ep6_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_ISOC_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 1,
+ .devices = {
+ { "Hauppauge 126xxx (tp-isoc)",
+ { NULL },
+ { &mxl111sf_table[28], &mxl111sf_table[30],
+ NULL },
+ },
+ }
+};
+
+static
+struct dvb_usb_device_properties mxl111sf_mercury_mh_spi_bulk_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 2,
+ .fe = {{
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_BULK_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2161_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP5_BULK_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 1,
+ .devices = {
+ { "Hauppauge 126xxx (spi-bulk)",
+ { NULL },
+ { &mxl111sf_table[28], &mxl111sf_table[30],
+ NULL },
+ },
+ }
+};
+
+static
+struct dvb_usb_device_properties mxl111sf_mercury_mh_spi_isoc_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 2,
+ .fe = {{
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_ISOC_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2161_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP5_ISOC_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 1,
+ .devices = {
+ { "Hauppauge 126xxx (spi-isoc)",
+ { NULL },
+ { &mxl111sf_table[28], &mxl111sf_table[30],
+ NULL },
+ },
+ }
+};
+
static struct usb_driver mxl111sf_driver = {
.name = "dvb_usb_mxl111sf",
.probe = mxl111sf_probe,
diff --git a/drivers/media/dvb/dvb-usb/rtl28xxu.c b/drivers/media/dvb/dvb-usb/rtl28xxu.c
index 8f4736a10fc8..41e1f5537f44 100644
--- a/drivers/media/dvb/dvb-usb/rtl28xxu.c
+++ b/drivers/media/dvb/dvb-usb/rtl28xxu.c
@@ -322,6 +322,9 @@ static int rtl2831u_frontend_attach(struct dvb_usb_adapter *adap)
* since there is some demod params needed to set according to tuner.
*/
+ /* demod needs some time to wake up */
+ msleep(20);
+
/* open demod I2C gate */
ret = rtl28xxu_ctrl_msg(adap->dev, &req_gate);
if (ret)
@@ -909,6 +912,8 @@ static int rtl28xxu_probe(struct usb_interface *intf,
int ret, i;
int properties_count = ARRAY_SIZE(rtl28xxu_properties);
struct dvb_usb_device *d;
+ struct usb_device *udev;
+ bool found;
deb_info("%s: interface=%d\n", __func__,
intf->cur_altsetting->desc.bInterfaceNumber);
@@ -916,6 +921,29 @@ static int rtl28xxu_probe(struct usb_interface *intf,
if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
return 0;
+ /* Dynamic USB ID support. Replaces first device ID with current one .*/
+ udev = interface_to_usbdev(intf);
+
+ for (i = 0, found = false; i < ARRAY_SIZE(rtl28xxu_table) - 1; i++) {
+ if (rtl28xxu_table[i].idVendor ==
+ le16_to_cpu(udev->descriptor.idVendor) &&
+ rtl28xxu_table[i].idProduct ==
+ le16_to_cpu(udev->descriptor.idProduct)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ deb_info("%s: using dynamic ID %04x:%04x\n", __func__,
+ le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct));
+ rtl28xxu_properties[0].devices[0].warm_ids[0]->idVendor =
+ le16_to_cpu(udev->descriptor.idVendor);
+ rtl28xxu_properties[0].devices[0].warm_ids[0]->idProduct =
+ le16_to_cpu(udev->descriptor.idProduct);
+ }
+
for (i = 0; i < properties_count; i++) {
ret = dvb_usb_device_init(intf, &rtl28xxu_properties[i],
THIS_MODULE, &d, adapter_nr);
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 21246707fbfb..b98ebb264e29 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -531,6 +531,14 @@ config DVB_LGDT3305
An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
to support this frontend.
+config DVB_LG2160
+ tristate "LG Electronics LG216x based"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ An ATSC/MH demodulator module. Say Y when you want
+ to support this frontend.
+
config DVB_S5H1409
tristate "Samsung S5H1409 based"
depends on DVB_CORE && I2C
@@ -540,12 +548,26 @@ config DVB_S5H1409
to support this frontend.
config DVB_AU8522
- tristate "Auvitek AU8522 based"
- depends on DVB_CORE && I2C && VIDEO_V4L2
+ depends on I2C
+ tristate
+
+config DVB_AU8522_DTV
+ tristate "Auvitek AU8522 based DTV demod"
+ depends on DVB_CORE && I2C
+ select DVB_AU8522
default m if DVB_FE_CUSTOMISE
help
- An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
- to support this frontend.
+ An ATSC 8VSB, QAM64/256 & NTSC demodulator module. Say Y when
+ you want to enable DTV demodulation support for this frontend.
+
+config DVB_AU8522_V4L
+ tristate "Auvitek AU8522 based ATV demod"
+ depends on VIDEO_V4L2 && I2C
+ select DVB_AU8522
+ default m if DVB_FE_CUSTOMISE
+ help
+ An ATSC 8VSB, QAM64/256 & NTSC demodulator module. Say Y when
+ you want to enable ATV demodulation support for this frontend.
config DVB_S5H1411
tristate "Samsung S5H1411 based"
@@ -713,6 +735,11 @@ config DVB_M88RS2000
A DVB-S tuner module.
Say Y when you want to support this frontend.
+config DVB_AF9033
+ tristate "Afatech AF9033 DVB-T demodulator"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+
comment "Tools to develop new frontends"
config DVB_DUMMY_FE
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 86fa808bf589..cd1ac2fd5774 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -7,7 +7,6 @@ ccflags-y += -I$(srctree)/drivers/media/common/tuners/
stb0899-objs = stb0899_drv.o stb0899_algo.o
stv0900-objs = stv0900_core.o stv0900_sw.o
-au8522-objs = au8522_dig.o au8522_decoder.o
drxd-objs = drxd_firm.o drxd_hard.o
cxd2820r-objs = cxd2820r_core.o cxd2820r_c.o cxd2820r_t.o cxd2820r_t2.o
drxk-objs := drxk_hard.o
@@ -50,6 +49,7 @@ obj-$(CONFIG_DVB_BCM3510) += bcm3510.o
obj-$(CONFIG_DVB_S5H1420) += s5h1420.o
obj-$(CONFIG_DVB_LGDT330X) += lgdt330x.o
obj-$(CONFIG_DVB_LGDT3305) += lgdt3305.o
+obj-$(CONFIG_DVB_LG2160) += lg2160.o
obj-$(CONFIG_DVB_CX24123) += cx24123.o
obj-$(CONFIG_DVB_LNBP21) += lnbp21.o
obj-$(CONFIG_DVB_LNBP22) += lnbp22.o
@@ -63,7 +63,9 @@ obj-$(CONFIG_DVB_TUNER_DIB0090) += dib0090.o
obj-$(CONFIG_DVB_TUA6100) += tua6100.o
obj-$(CONFIG_DVB_S5H1409) += s5h1409.o
obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o
-obj-$(CONFIG_DVB_AU8522) += au8522.o
+obj-$(CONFIG_DVB_AU8522) += au8522_common.o
+obj-$(CONFIG_DVB_AU8522_DTV) += au8522_dig.o
+obj-$(CONFIG_DVB_AU8522_V4L) += au8522_decoder.o
obj-$(CONFIG_DVB_TDA10048) += tda10048.o
obj-$(CONFIG_DVB_TUNER_CX24113) += cx24113.o
obj-$(CONFIG_DVB_S5H1411) += s5h1411.o
@@ -98,4 +100,5 @@ obj-$(CONFIG_DVB_A8293) += a8293.o
obj-$(CONFIG_DVB_TDA10071) += tda10071.o
obj-$(CONFIG_DVB_RTL2830) += rtl2830.o
obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o
+obj-$(CONFIG_DVB_AF9033) += af9033.o
diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c
index 6bcbcf543b38..5bc570d77846 100644
--- a/drivers/media/dvb/frontends/af9013.c
+++ b/drivers/media/dvb/frontends/af9013.c
@@ -514,7 +514,6 @@ err:
static void af9013_statistics_work(struct work_struct *work)
{
- int ret;
struct af9013_state *state = container_of(work,
struct af9013_state, statistics_work.work);
unsigned int next_msec;
@@ -530,27 +529,27 @@ static void af9013_statistics_work(struct work_struct *work)
default:
state->statistics_step = 0;
case 0:
- ret = af9013_statistics_signal_strength(&state->fe);
+ af9013_statistics_signal_strength(&state->fe);
state->statistics_step++;
next_msec = 300;
break;
case 1:
- ret = af9013_statistics_snr_start(&state->fe);
+ af9013_statistics_snr_start(&state->fe);
state->statistics_step++;
next_msec = 200;
break;
case 2:
- ret = af9013_statistics_ber_unc_start(&state->fe);
+ af9013_statistics_ber_unc_start(&state->fe);
state->statistics_step++;
next_msec = 1000;
break;
case 3:
- ret = af9013_statistics_snr_result(&state->fe);
+ af9013_statistics_snr_result(&state->fe);
state->statistics_step++;
next_msec = 400;
break;
case 4:
- ret = af9013_statistics_ber_unc_result(&state->fe);
+ af9013_statistics_ber_unc_result(&state->fe);
state->statistics_step++;
next_msec = 100;
break;
@@ -558,8 +557,6 @@ static void af9013_statistics_work(struct work_struct *work)
schedule_delayed_work(&state->statistics_work,
msecs_to_jiffies(next_msec));
-
- return;
}
static int af9013_get_tune_settings(struct dvb_frontend *fe,
diff --git a/drivers/media/dvb/frontends/af9033.c b/drivers/media/dvb/frontends/af9033.c
new file mode 100644
index 000000000000..a38998286260
--- /dev/null
+++ b/drivers/media/dvb/frontends/af9033.c
@@ -0,0 +1,980 @@
+/*
+ * Afatech AF9033 demodulator driver
+ *
+ * Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
+ * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "af9033_priv.h"
+
+struct af9033_state {
+ struct i2c_adapter *i2c;
+ struct dvb_frontend fe;
+ struct af9033_config cfg;
+
+ u32 bandwidth_hz;
+ bool ts_mode_parallel;
+ bool ts_mode_serial;
+
+ u32 ber;
+ u32 ucb;
+ unsigned long last_stat_check;
+};
+
+/* write multiple registers */
+static int af9033_wr_regs(struct af9033_state *state, u32 reg, const u8 *val,
+ int len)
+{
+ int ret;
+ u8 buf[3 + len];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = state->cfg.i2c_addr,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+
+ buf[0] = (reg >> 16) & 0xff;
+ buf[1] = (reg >> 8) & 0xff;
+ buf[2] = (reg >> 0) & 0xff;
+ memcpy(&buf[3], val, len);
+
+ ret = i2c_transfer(state->i2c, msg, 1);
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ printk(KERN_WARNING "%s: i2c wr failed=%d reg=%06x len=%d\n",
+ __func__, ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+/* read multiple registers */
+static int af9033_rd_regs(struct af9033_state *state, u32 reg, u8 *val, int len)
+{
+ int ret;
+ u8 buf[3] = { (reg >> 16) & 0xff, (reg >> 8) & 0xff,
+ (reg >> 0) & 0xff };
+ struct i2c_msg msg[2] = {
+ {
+ .addr = state->cfg.i2c_addr,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf
+ }, {
+ .addr = state->cfg.i2c_addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = val
+ }
+ };
+
+ ret = i2c_transfer(state->i2c, msg, 2);
+ if (ret == 2) {
+ ret = 0;
+ } else {
+ printk(KERN_WARNING "%s: i2c rd failed=%d reg=%06x len=%d\n",
+ __func__, ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+
+/* write single register */
+static int af9033_wr_reg(struct af9033_state *state, u32 reg, u8 val)
+{
+ return af9033_wr_regs(state, reg, &val, 1);
+}
+
+/* read single register */
+static int af9033_rd_reg(struct af9033_state *state, u32 reg, u8 *val)
+{
+ return af9033_rd_regs(state, reg, val, 1);
+}
+
+/* write single register with mask */
+static int af9033_wr_reg_mask(struct af9033_state *state, u32 reg, u8 val,
+ u8 mask)
+{
+ int ret;
+ u8 tmp;
+
+ /* no need for read if whole reg is written */
+ if (mask != 0xff) {
+ ret = af9033_rd_regs(state, reg, &tmp, 1);
+ if (ret)
+ return ret;
+
+ val &= mask;
+ tmp &= ~mask;
+ val |= tmp;
+ }
+
+ return af9033_wr_regs(state, reg, &val, 1);
+}
+
+/* read single register with mask */
+static int af9033_rd_reg_mask(struct af9033_state *state, u32 reg, u8 *val,
+ u8 mask)
+{
+ int ret, i;
+ u8 tmp;
+
+ ret = af9033_rd_regs(state, reg, &tmp, 1);
+ if (ret)
+ return ret;
+
+ tmp &= mask;
+
+ /* find position of the first bit */
+ for (i = 0; i < 8; i++) {
+ if ((mask >> i) & 0x01)
+ break;
+ }
+ *val = tmp >> i;
+
+ return 0;
+}
+
+static u32 af9033_div(u32 a, u32 b, u32 x)
+{
+ u32 r = 0, c = 0, i;
+
+ pr_debug("%s: a=%d b=%d x=%d\n", __func__, a, b, x);
+
+ if (a > b) {
+ c = a / b;
+ a = a - c * b;
+ }
+
+ for (i = 0; i < x; i++) {
+ if (a >= b) {
+ r += 1;
+ a -= b;
+ }
+ a <<= 1;
+ r <<= 1;
+ }
+ r = (c << (u32)x) + r;
+
+ pr_debug("%s: a=%d b=%d x=%d r=%d r=%x\n", __func__, a, b, x, r, r);
+
+ return r;
+}
+
+static void af9033_release(struct dvb_frontend *fe)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+
+ kfree(state);
+}
+
+static int af9033_init(struct dvb_frontend *fe)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret, i, len;
+ const struct reg_val *init;
+ u8 buf[4];
+ u32 adc_cw, clock_cw;
+ struct reg_val_mask tab[] = {
+ { 0x80fb24, 0x00, 0x08 },
+ { 0x80004c, 0x00, 0xff },
+ { 0x00f641, state->cfg.tuner, 0xff },
+ { 0x80f5ca, 0x01, 0x01 },
+ { 0x80f715, 0x01, 0x01 },
+ { 0x00f41f, 0x04, 0x04 },
+ { 0x00f41a, 0x01, 0x01 },
+ { 0x80f731, 0x00, 0x01 },
+ { 0x00d91e, 0x00, 0x01 },
+ { 0x00d919, 0x00, 0x01 },
+ { 0x80f732, 0x00, 0x01 },
+ { 0x00d91f, 0x00, 0x01 },
+ { 0x00d91a, 0x00, 0x01 },
+ { 0x80f730, 0x00, 0x01 },
+ { 0x80f778, 0x00, 0xff },
+ { 0x80f73c, 0x01, 0x01 },
+ { 0x80f776, 0x00, 0x01 },
+ { 0x00d8fd, 0x01, 0xff },
+ { 0x00d830, 0x01, 0xff },
+ { 0x00d831, 0x00, 0xff },
+ { 0x00d832, 0x00, 0xff },
+ { 0x80f985, state->ts_mode_serial, 0x01 },
+ { 0x80f986, state->ts_mode_parallel, 0x01 },
+ { 0x00d827, 0x00, 0xff },
+ { 0x00d829, 0x00, 0xff },
+ };
+
+ /* program clock control */
+ clock_cw = af9033_div(state->cfg.clock, 1000000ul, 19ul);
+ buf[0] = (clock_cw >> 0) & 0xff;
+ buf[1] = (clock_cw >> 8) & 0xff;
+ buf[2] = (clock_cw >> 16) & 0xff;
+ buf[3] = (clock_cw >> 24) & 0xff;
+
+ pr_debug("%s: clock=%d clock_cw=%08x\n", __func__, state->cfg.clock,
+ clock_cw);
+
+ ret = af9033_wr_regs(state, 0x800025, buf, 4);
+ if (ret < 0)
+ goto err;
+
+ /* program ADC control */
+ for (i = 0; i < ARRAY_SIZE(clock_adc_lut); i++) {
+ if (clock_adc_lut[i].clock == state->cfg.clock)
+ break;
+ }
+
+ adc_cw = af9033_div(clock_adc_lut[i].adc, 1000000ul, 19ul);
+ buf[0] = (adc_cw >> 0) & 0xff;
+ buf[1] = (adc_cw >> 8) & 0xff;
+ buf[2] = (adc_cw >> 16) & 0xff;
+
+ pr_debug("%s: adc=%d adc_cw=%06x\n", __func__, clock_adc_lut[i].adc,
+ adc_cw);
+
+ ret = af9033_wr_regs(state, 0x80f1cd, buf, 3);
+ if (ret < 0)
+ goto err;
+
+ /* program register table */
+ for (i = 0; i < ARRAY_SIZE(tab); i++) {
+ ret = af9033_wr_reg_mask(state, tab[i].reg, tab[i].val,
+ tab[i].mask);
+ if (ret < 0)
+ goto err;
+ }
+
+ /* settings for TS interface */
+ if (state->cfg.ts_mode == AF9033_TS_MODE_USB) {
+ ret = af9033_wr_reg_mask(state, 0x80f9a5, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg_mask(state, 0x80f9b5, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+ } else {
+ ret = af9033_wr_reg_mask(state, 0x80f990, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg_mask(state, 0x80f9b5, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+ }
+
+ /* load OFSM settings */
+ pr_debug("%s: load ofsm settings\n", __func__);
+ len = ARRAY_SIZE(ofsm_init);
+ init = ofsm_init;
+ for (i = 0; i < len; i++) {
+ ret = af9033_wr_reg(state, init[i].reg, init[i].val);
+ if (ret < 0)
+ goto err;
+ }
+
+ /* load tuner specific settings */
+ pr_debug("%s: load tuner specific settings\n",
+ __func__);
+ switch (state->cfg.tuner) {
+ case AF9033_TUNER_TUA9001:
+ len = ARRAY_SIZE(tuner_init_tua9001);
+ init = tuner_init_tua9001;
+ break;
+ case AF9033_TUNER_FC0011:
+ len = ARRAY_SIZE(tuner_init_fc0011);
+ init = tuner_init_fc0011;
+ break;
+ case AF9033_TUNER_MXL5007T:
+ len = ARRAY_SIZE(tuner_init_mxl5007t);
+ init = tuner_init_mxl5007t;
+ break;
+ case AF9033_TUNER_TDA18218:
+ len = ARRAY_SIZE(tuner_init_tda18218);
+ init = tuner_init_tda18218;
+ break;
+ default:
+ pr_debug("%s: unsupported tuner ID=%d\n", __func__,
+ state->cfg.tuner);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ for (i = 0; i < len; i++) {
+ ret = af9033_wr_reg(state, init[i].reg, init[i].val);
+ if (ret < 0)
+ goto err;
+ }
+
+ state->bandwidth_hz = 0; /* force to program all parameters */
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9033_sleep(struct dvb_frontend *fe)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret, i;
+ u8 tmp;
+
+ ret = af9033_wr_reg(state, 0x80004c, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg(state, 0x800000, 0);
+ if (ret < 0)
+ goto err;
+
+ for (i = 100, tmp = 1; i && tmp; i--) {
+ ret = af9033_rd_reg(state, 0x80004c, &tmp);
+ if (ret < 0)
+ goto err;
+
+ usleep_range(200, 10000);
+ }
+
+ pr_debug("%s: loop=%d\n", __func__, i);
+
+ if (i == 0) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ ret = af9033_wr_reg_mask(state, 0x80fb24, 0x08, 0x08);
+ if (ret < 0)
+ goto err;
+
+ /* prevent current leak (?) */
+ if (state->cfg.ts_mode == AF9033_TS_MODE_SERIAL) {
+ /* enable parallel TS */
+ ret = af9033_wr_reg_mask(state, 0x00d917, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg_mask(state, 0x00d916, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9033_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *fesettings)
+{
+ fesettings->min_delay_ms = 800;
+ fesettings->step_size = 0;
+ fesettings->max_drift = 0;
+
+ return 0;
+}
+
+static int af9033_set_frontend(struct dvb_frontend *fe)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, i, spec_inv;
+ u8 tmp, buf[3], bandwidth_reg_val;
+ u32 if_frequency, freq_cw, adc_freq;
+
+ pr_debug("%s: frequency=%d bandwidth_hz=%d\n", __func__, c->frequency,
+ c->bandwidth_hz);
+
+ /* check bandwidth */
+ switch (c->bandwidth_hz) {
+ case 6000000:
+ bandwidth_reg_val = 0x00;
+ break;
+ case 7000000:
+ bandwidth_reg_val = 0x01;
+ break;
+ case 8000000:
+ bandwidth_reg_val = 0x02;
+ break;
+ default:
+ pr_debug("%s: invalid bandwidth_hz\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* program tuner */
+ if (fe->ops.tuner_ops.set_params)
+ fe->ops.tuner_ops.set_params(fe);
+
+ /* program CFOE coefficients */
+ if (c->bandwidth_hz != state->bandwidth_hz) {
+ for (i = 0; i < ARRAY_SIZE(coeff_lut); i++) {
+ if (coeff_lut[i].clock == state->cfg.clock &&
+ coeff_lut[i].bandwidth_hz == c->bandwidth_hz) {
+ break;
+ }
+ }
+ ret = af9033_wr_regs(state, 0x800001,
+ coeff_lut[i].val, sizeof(coeff_lut[i].val));
+ }
+
+ /* program frequency control */
+ if (c->bandwidth_hz != state->bandwidth_hz) {
+ spec_inv = state->cfg.spec_inv ? -1 : 1;
+
+ for (i = 0; i < ARRAY_SIZE(clock_adc_lut); i++) {
+ if (clock_adc_lut[i].clock == state->cfg.clock)
+ break;
+ }
+ adc_freq = clock_adc_lut[i].adc;
+
+ /* get used IF frequency */
+ if (fe->ops.tuner_ops.get_if_frequency)
+ fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
+ else
+ if_frequency = 0;
+
+ while (if_frequency > (adc_freq / 2))
+ if_frequency -= adc_freq;
+
+ if (if_frequency >= 0)
+ spec_inv *= -1;
+ else
+ if_frequency *= -1;
+
+ freq_cw = af9033_div(if_frequency, adc_freq, 23ul);
+
+ if (spec_inv == -1)
+ freq_cw *= -1;
+
+ /* get adc multiplies */
+ ret = af9033_rd_reg(state, 0x800045, &tmp);
+ if (ret < 0)
+ goto err;
+
+ if (tmp == 1)
+ freq_cw /= 2;
+
+ buf[0] = (freq_cw >> 0) & 0xff;
+ buf[1] = (freq_cw >> 8) & 0xff;
+ buf[2] = (freq_cw >> 16) & 0x7f;
+ ret = af9033_wr_regs(state, 0x800029, buf, 3);
+ if (ret < 0)
+ goto err;
+
+ state->bandwidth_hz = c->bandwidth_hz;
+ }
+
+ ret = af9033_wr_reg_mask(state, 0x80f904, bandwidth_reg_val, 0x03);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg(state, 0x800040, 0x00);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg(state, 0x800047, 0x00);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg_mask(state, 0x80f999, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ if (c->frequency <= 230000000)
+ tmp = 0x00; /* VHF */
+ else
+ tmp = 0x01; /* UHF */
+
+ ret = af9033_wr_reg(state, 0x80004b, tmp);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg(state, 0x800000, 0x00);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9033_get_frontend(struct dvb_frontend *fe)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ u8 buf[8];
+
+ pr_debug("%s\n", __func__);
+
+ /* read all needed registers */
+ ret = af9033_rd_regs(state, 0x80f900, buf, sizeof(buf));
+ if (ret < 0)
+ goto err;
+
+ switch ((buf[0] >> 0) & 3) {
+ case 0:
+ c->transmission_mode = TRANSMISSION_MODE_2K;
+ break;
+ case 1:
+ c->transmission_mode = TRANSMISSION_MODE_8K;
+ break;
+ }
+
+ switch ((buf[1] >> 0) & 3) {
+ case 0:
+ c->guard_interval = GUARD_INTERVAL_1_32;
+ break;
+ case 1:
+ c->guard_interval = GUARD_INTERVAL_1_16;
+ break;
+ case 2:
+ c->guard_interval = GUARD_INTERVAL_1_8;
+ break;
+ case 3:
+ c->guard_interval = GUARD_INTERVAL_1_4;
+ break;
+ }
+
+ switch ((buf[2] >> 0) & 7) {
+ case 0:
+ c->hierarchy = HIERARCHY_NONE;
+ break;
+ case 1:
+ c->hierarchy = HIERARCHY_1;
+ break;
+ case 2:
+ c->hierarchy = HIERARCHY_2;
+ break;
+ case 3:
+ c->hierarchy = HIERARCHY_4;
+ break;
+ }
+
+ switch ((buf[3] >> 0) & 3) {
+ case 0:
+ c->modulation = QPSK;
+ break;
+ case 1:
+ c->modulation = QAM_16;
+ break;
+ case 2:
+ c->modulation = QAM_64;
+ break;
+ }
+
+ switch ((buf[4] >> 0) & 3) {
+ case 0:
+ c->bandwidth_hz = 6000000;
+ break;
+ case 1:
+ c->bandwidth_hz = 7000000;
+ break;
+ case 2:
+ c->bandwidth_hz = 8000000;
+ break;
+ }
+
+ switch ((buf[6] >> 0) & 7) {
+ case 0:
+ c->code_rate_HP = FEC_1_2;
+ break;
+ case 1:
+ c->code_rate_HP = FEC_2_3;
+ break;
+ case 2:
+ c->code_rate_HP = FEC_3_4;
+ break;
+ case 3:
+ c->code_rate_HP = FEC_5_6;
+ break;
+ case 4:
+ c->code_rate_HP = FEC_7_8;
+ break;
+ case 5:
+ c->code_rate_HP = FEC_NONE;
+ break;
+ }
+
+ switch ((buf[7] >> 0) & 7) {
+ case 0:
+ c->code_rate_LP = FEC_1_2;
+ break;
+ case 1:
+ c->code_rate_LP = FEC_2_3;
+ break;
+ case 2:
+ c->code_rate_LP = FEC_3_4;
+ break;
+ case 3:
+ c->code_rate_LP = FEC_5_6;
+ break;
+ case 4:
+ c->code_rate_LP = FEC_7_8;
+ break;
+ case 5:
+ c->code_rate_LP = FEC_NONE;
+ break;
+ }
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9033_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret;
+ u8 tmp;
+
+ *status = 0;
+
+ /* radio channel status, 0=no result, 1=has signal, 2=no signal */
+ ret = af9033_rd_reg(state, 0x800047, &tmp);
+ if (ret < 0)
+ goto err;
+
+ /* has signal */
+ if (tmp == 0x01)
+ *status |= FE_HAS_SIGNAL;
+
+ if (tmp != 0x02) {
+ /* TPS lock */
+ ret = af9033_rd_reg_mask(state, 0x80f5a9, &tmp, 0x01);
+ if (ret < 0)
+ goto err;
+
+ if (tmp)
+ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI;
+
+ /* full lock */
+ ret = af9033_rd_reg_mask(state, 0x80f999, &tmp, 0x01);
+ if (ret < 0)
+ goto err;
+
+ if (tmp)
+ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC |
+ FE_HAS_LOCK;
+ }
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9033_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret, i, len;
+ u8 buf[3], tmp;
+ u32 snr_val;
+ const struct val_snr *uninitialized_var(snr_lut);
+
+ /* read value */
+ ret = af9033_rd_regs(state, 0x80002c, buf, 3);
+ if (ret < 0)
+ goto err;
+
+ snr_val = (buf[2] << 16) | (buf[1] << 8) | buf[0];
+
+ /* read current modulation */
+ ret = af9033_rd_reg(state, 0x80f903, &tmp);
+ if (ret < 0)
+ goto err;
+
+ switch ((tmp >> 0) & 3) {
+ case 0:
+ len = ARRAY_SIZE(qpsk_snr_lut);
+ snr_lut = qpsk_snr_lut;
+ break;
+ case 1:
+ len = ARRAY_SIZE(qam16_snr_lut);
+ snr_lut = qam16_snr_lut;
+ break;
+ case 2:
+ len = ARRAY_SIZE(qam64_snr_lut);
+ snr_lut = qam64_snr_lut;
+ break;
+ default:
+ goto err;
+ }
+
+ for (i = 0; i < len; i++) {
+ tmp = snr_lut[i].snr;
+
+ if (snr_val < snr_lut[i].val)
+ break;
+ }
+
+ *snr = tmp * 10; /* dB/10 */
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9033_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret;
+ u8 strength2;
+
+ /* read signal strength of 0-100 scale */
+ ret = af9033_rd_reg(state, 0x800048, &strength2);
+ if (ret < 0)
+ goto err;
+
+ /* scale value to 0x0000-0xffff */
+ *strength = strength2 * 0xffff / 100;
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9033_update_ch_stat(struct af9033_state *state)
+{
+ int ret = 0;
+ u32 err_cnt, bit_cnt;
+ u16 abort_cnt;
+ u8 buf[7];
+
+ /* only update data every half second */
+ if (time_after(jiffies, state->last_stat_check + msecs_to_jiffies(500))) {
+ ret = af9033_rd_regs(state, 0x800032, buf, sizeof(buf));
+ if (ret < 0)
+ goto err;
+ /* in 8 byte packets? */
+ abort_cnt = (buf[1] << 8) + buf[0];
+ /* in bits */
+ err_cnt = (buf[4] << 16) + (buf[3] << 8) + buf[2];
+ /* in 8 byte packets? always(?) 0x2710 = 10000 */
+ bit_cnt = (buf[6] << 8) + buf[5];
+
+ if (bit_cnt < abort_cnt) {
+ abort_cnt = 1000;
+ state->ber = 0xffffffff;
+ } else {
+ /* 8 byte packets, that have not been rejected already */
+ bit_cnt -= (u32)abort_cnt;
+ if (bit_cnt == 0) {
+ state->ber = 0xffffffff;
+ } else {
+ err_cnt -= (u32)abort_cnt * 8 * 8;
+ bit_cnt *= 8 * 8;
+ state->ber = err_cnt * (0xffffffff / bit_cnt);
+ }
+ }
+ state->ucb += abort_cnt;
+ state->last_stat_check = jiffies;
+ }
+
+ return 0;
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int af9033_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret;
+
+ ret = af9033_update_ch_stat(state);
+ if (ret < 0)
+ return ret;
+
+ *ber = state->ber;
+
+ return 0;
+}
+
+static int af9033_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret;
+
+ ret = af9033_update_ch_stat(state);
+ if (ret < 0)
+ return ret;
+
+ *ucblocks = state->ucb;
+
+ return 0;
+}
+
+static int af9033_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret;
+
+ pr_debug("%s: enable=%d\n", __func__, enable);
+
+ ret = af9033_wr_reg_mask(state, 0x00fa04, enable, 0x01);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static struct dvb_frontend_ops af9033_ops;
+
+struct dvb_frontend *af9033_attach(const struct af9033_config *config,
+ struct i2c_adapter *i2c)
+{
+ int ret;
+ struct af9033_state *state;
+ u8 buf[8];
+
+ pr_debug("%s:\n", __func__);
+
+ /* allocate memory for the internal state */
+ state = kzalloc(sizeof(struct af9033_state), GFP_KERNEL);
+ if (state == NULL)
+ goto err;
+
+ /* setup the state */
+ state->i2c = i2c;
+ memcpy(&state->cfg, config, sizeof(struct af9033_config));
+
+ if (state->cfg.clock != 12000000) {
+ printk(KERN_INFO "af9033: unsupported clock=%d, only " \
+ "12000000 Hz is supported currently\n",
+ state->cfg.clock);
+ goto err;
+ }
+
+ /* firmware version */
+ ret = af9033_rd_regs(state, 0x0083e9, &buf[0], 4);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_rd_regs(state, 0x804191, &buf[4], 4);
+ if (ret < 0)
+ goto err;
+
+ printk(KERN_INFO "af9033: firmware version: LINK=%d.%d.%d.%d " \
+ "OFDM=%d.%d.%d.%d\n", buf[0], buf[1], buf[2], buf[3],
+ buf[4], buf[5], buf[6], buf[7]);
+
+ /* configure internal TS mode */
+ switch (state->cfg.ts_mode) {
+ case AF9033_TS_MODE_PARALLEL:
+ state->ts_mode_parallel = true;
+ break;
+ case AF9033_TS_MODE_SERIAL:
+ state->ts_mode_serial = true;
+ break;
+ case AF9033_TS_MODE_USB:
+ /* usb mode for AF9035 */
+ default:
+ break;
+ }
+
+ /* create dvb_frontend */
+ memcpy(&state->fe.ops, &af9033_ops, sizeof(struct dvb_frontend_ops));
+ state->fe.demodulator_priv = state;
+
+ return &state->fe;
+
+err:
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(af9033_attach);
+
+static struct dvb_frontend_ops af9033_ops = {
+ .delsys = { SYS_DVBT },
+ .info = {
+ .name = "Afatech AF9033 (DVB-T)",
+ .frequency_min = 174000000,
+ .frequency_max = 862000000,
+ .frequency_stepsize = 250000,
+ .frequency_tolerance = 0,
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO |
+ FE_CAN_RECOVER |
+ FE_CAN_MUTE_TS
+ },
+
+ .release = af9033_release,
+
+ .init = af9033_init,
+ .sleep = af9033_sleep,
+
+ .get_tune_settings = af9033_get_tune_settings,
+ .set_frontend = af9033_set_frontend,
+ .get_frontend = af9033_get_frontend,
+
+ .read_status = af9033_read_status,
+ .read_snr = af9033_read_snr,
+ .read_signal_strength = af9033_read_signal_strength,
+ .read_ber = af9033_read_ber,
+ .read_ucblocks = af9033_read_ucblocks,
+
+ .i2c_gate_ctrl = af9033_i2c_gate_ctrl,
+};
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Afatech AF9033 DVB-T demodulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/af9033.h b/drivers/media/dvb/frontends/af9033.h
new file mode 100644
index 000000000000..9e302c3f0f7d
--- /dev/null
+++ b/drivers/media/dvb/frontends/af9033.h
@@ -0,0 +1,75 @@
+/*
+ * Afatech AF9033 demodulator driver
+ *
+ * Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
+ * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef AF9033_H
+#define AF9033_H
+
+struct af9033_config {
+ /*
+ * I2C address
+ */
+ u8 i2c_addr;
+
+ /*
+ * clock Hz
+ * 12000000, 22000000, 24000000, 34000000, 32000000, 28000000, 26000000,
+ * 30000000, 36000000, 20480000, 16384000
+ */
+ u32 clock;
+
+ /*
+ * tuner
+ */
+#define AF9033_TUNER_TUA9001 0x27 /* Infineon TUA 9001 */
+#define AF9033_TUNER_FC0011 0x28 /* Fitipower FC0011 */
+#define AF9033_TUNER_MXL5007T 0xa0 /* MaxLinear MxL5007T */
+#define AF9033_TUNER_TDA18218 0xa1 /* NXP TDA 18218HN */
+ u8 tuner;
+
+ /*
+ * TS settings
+ */
+#define AF9033_TS_MODE_USB 0
+#define AF9033_TS_MODE_PARALLEL 1
+#define AF9033_TS_MODE_SERIAL 2
+ u8 ts_mode:2;
+
+ /*
+ * input spectrum inversion
+ */
+ bool spec_inv;
+};
+
+
+#if defined(CONFIG_DVB_AF9033) || \
+ (defined(CONFIG_DVB_AF9033_MODULE) && defined(MODULE))
+extern struct dvb_frontend *af9033_attach(const struct af9033_config *config,
+ struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *af9033_attach(
+ const struct af9033_config *config, struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif /* AF9033_H */
diff --git a/drivers/media/dvb/frontends/af9033_priv.h b/drivers/media/dvb/frontends/af9033_priv.h
new file mode 100644
index 000000000000..0b783b9ed75e
--- /dev/null
+++ b/drivers/media/dvb/frontends/af9033_priv.h
@@ -0,0 +1,470 @@
+/*
+ * Afatech AF9033 demodulator driver
+ *
+ * Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
+ * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef AF9033_PRIV_H
+#define AF9033_PRIV_H
+
+#include "dvb_frontend.h"
+#include "af9033.h"
+
+struct reg_val {
+ u32 reg;
+ u8 val;
+};
+
+struct reg_val_mask {
+ u32 reg;
+ u8 val;
+ u8 mask;
+};
+
+struct coeff {
+ u32 clock;
+ u32 bandwidth_hz;
+ u8 val[36];
+};
+
+struct clock_adc {
+ u32 clock;
+ u32 adc;
+};
+
+struct val_snr {
+ u32 val;
+ u8 snr;
+};
+
+/* Xtal clock vs. ADC clock lookup table */
+static const struct clock_adc clock_adc_lut[] = {
+ { 16384000, 20480000 },
+ { 20480000, 20480000 },
+ { 36000000, 20250000 },
+ { 30000000, 20156250 },
+ { 26000000, 20583333 },
+ { 28000000, 20416667 },
+ { 32000000, 20500000 },
+ { 34000000, 20187500 },
+ { 24000000, 20500000 },
+ { 22000000, 20625000 },
+ { 12000000, 20250000 },
+};
+
+/* pre-calculated coeff lookup table */
+static const struct coeff coeff_lut[] = {
+ /* 12.000 MHz */
+ { 12000000, 8000000, {
+ 0x01, 0xce, 0x55, 0xc9, 0x00, 0xe7, 0x2a, 0xe4, 0x00, 0x73,
+ 0x99, 0x0f, 0x00, 0x73, 0x95, 0x72, 0x00, 0x73, 0x91, 0xd5,
+ 0x00, 0x39, 0xca, 0xb9, 0x00, 0xe7, 0x2a, 0xe4, 0x00, 0x73,
+ 0x95, 0x72, 0x37, 0x02, 0xce, 0x01 }
+ },
+ { 12000000, 7000000, {
+ 0x01, 0x94, 0x8b, 0x10, 0x00, 0xca, 0x45, 0x88, 0x00, 0x65,
+ 0x25, 0xed, 0x00, 0x65, 0x22, 0xc4, 0x00, 0x65, 0x1f, 0x9b,
+ 0x00, 0x32, 0x91, 0x62, 0x00, 0xca, 0x45, 0x88, 0x00, 0x65,
+ 0x22, 0xc4, 0x88, 0x02, 0x95, 0x01 }
+ },
+ { 12000000, 6000000, {
+ 0x01, 0x5a, 0xc0, 0x56, 0x00, 0xad, 0x60, 0x2b, 0x00, 0x56,
+ 0xb2, 0xcb, 0x00, 0x56, 0xb0, 0x15, 0x00, 0x56, 0xad, 0x60,
+ 0x00, 0x2b, 0x58, 0x0b, 0x00, 0xad, 0x60, 0x2b, 0x00, 0x56,
+ 0xb0, 0x15, 0xf4, 0x02, 0x5b, 0x01 }
+ },
+};
+
+/* QPSK SNR lookup table */
+static const struct val_snr qpsk_snr_lut[] = {
+ { 0x0b4771, 0 },
+ { 0x0c1aed, 1 },
+ { 0x0d0d27, 2 },
+ { 0x0e4d19, 3 },
+ { 0x0e5da8, 4 },
+ { 0x107097, 5 },
+ { 0x116975, 6 },
+ { 0x1252d9, 7 },
+ { 0x131fa4, 8 },
+ { 0x13d5e1, 9 },
+ { 0x148e53, 10 },
+ { 0x15358b, 11 },
+ { 0x15dd29, 12 },
+ { 0x168112, 13 },
+ { 0x170b61, 14 },
+ { 0x17a532, 15 },
+ { 0x180f94, 16 },
+ { 0x186ed2, 17 },
+ { 0x18b271, 18 },
+ { 0x18e118, 19 },
+ { 0x18ff4b, 20 },
+ { 0x190af1, 21 },
+ { 0x191451, 22 },
+ { 0xffffff, 23 },
+};
+
+/* QAM16 SNR lookup table */
+static const struct val_snr qam16_snr_lut[] = {
+ { 0x04f0d5, 0 },
+ { 0x05387a, 1 },
+ { 0x0573a4, 2 },
+ { 0x05a99e, 3 },
+ { 0x05cc80, 4 },
+ { 0x05eb62, 5 },
+ { 0x05fecf, 6 },
+ { 0x060b80, 7 },
+ { 0x062501, 8 },
+ { 0x064865, 9 },
+ { 0x069604, 10 },
+ { 0x06f356, 11 },
+ { 0x07706a, 12 },
+ { 0x0804d3, 13 },
+ { 0x089d1a, 14 },
+ { 0x093e3d, 15 },
+ { 0x09e35d, 16 },
+ { 0x0a7c3c, 17 },
+ { 0x0afaf8, 18 },
+ { 0x0b719d, 19 },
+ { 0x0bda6a, 20 },
+ { 0x0c0c75, 21 },
+ { 0x0c3f7d, 22 },
+ { 0x0c5e62, 23 },
+ { 0x0c6c31, 24 },
+ { 0x0c7925, 25 },
+ { 0xffffff, 26 },
+};
+
+/* QAM64 SNR lookup table */
+static const struct val_snr qam64_snr_lut[] = {
+ { 0x0256d0, 0 },
+ { 0x027a65, 1 },
+ { 0x029873, 2 },
+ { 0x02b7fe, 3 },
+ { 0x02cf1e, 4 },
+ { 0x02e234, 5 },
+ { 0x02f409, 6 },
+ { 0x030046, 7 },
+ { 0x030844, 8 },
+ { 0x030a02, 9 },
+ { 0x030cde, 10 },
+ { 0x031031, 11 },
+ { 0x03144c, 12 },
+ { 0x0315dd, 13 },
+ { 0x031920, 14 },
+ { 0x0322d0, 15 },
+ { 0x0339fc, 16 },
+ { 0x0364a1, 17 },
+ { 0x038bcc, 18 },
+ { 0x03c7d3, 19 },
+ { 0x0408cc, 20 },
+ { 0x043bed, 21 },
+ { 0x048061, 22 },
+ { 0x04be95, 23 },
+ { 0x04fa7d, 24 },
+ { 0x052405, 25 },
+ { 0x05570d, 26 },
+ { 0x059feb, 27 },
+ { 0x05bf38, 28 },
+ { 0xffffff, 29 },
+};
+
+static const struct reg_val ofsm_init[] = {
+ { 0x800051, 0x01 },
+ { 0x800070, 0x0a },
+ { 0x80007e, 0x04 },
+ { 0x800081, 0x0a },
+ { 0x80008a, 0x01 },
+ { 0x80008e, 0x01 },
+ { 0x800092, 0x06 },
+ { 0x800099, 0x01 },
+ { 0x80009f, 0xe1 },
+ { 0x8000a0, 0xcf },
+ { 0x8000a3, 0x01 },
+ { 0x8000a5, 0x01 },
+ { 0x8000a6, 0x01 },
+ { 0x8000a9, 0x00 },
+ { 0x8000aa, 0x01 },
+ { 0x8000ab, 0x01 },
+ { 0x8000b0, 0x01 },
+ { 0x8000c0, 0x05 },
+ { 0x8000c4, 0x19 },
+ { 0x80f000, 0x0f },
+ { 0x80f016, 0x10 },
+ { 0x80f017, 0x04 },
+ { 0x80f018, 0x05 },
+ { 0x80f019, 0x04 },
+ { 0x80f01a, 0x05 },
+ { 0x80f021, 0x03 },
+ { 0x80f022, 0x0a },
+ { 0x80f023, 0x0a },
+ { 0x80f02b, 0x00 },
+ { 0x80f02c, 0x01 },
+ { 0x80f064, 0x03 },
+ { 0x80f065, 0xf9 },
+ { 0x80f066, 0x03 },
+ { 0x80f067, 0x01 },
+ { 0x80f06f, 0xe0 },
+ { 0x80f070, 0x03 },
+ { 0x80f072, 0x0f },
+ { 0x80f073, 0x03 },
+ { 0x80f078, 0x00 },
+ { 0x80f087, 0x00 },
+ { 0x80f09b, 0x3f },
+ { 0x80f09c, 0x00 },
+ { 0x80f09d, 0x20 },
+ { 0x80f09e, 0x00 },
+ { 0x80f09f, 0x0c },
+ { 0x80f0a0, 0x00 },
+ { 0x80f130, 0x04 },
+ { 0x80f132, 0x04 },
+ { 0x80f144, 0x1a },
+ { 0x80f146, 0x00 },
+ { 0x80f14a, 0x01 },
+ { 0x80f14c, 0x00 },
+ { 0x80f14d, 0x00 },
+ { 0x80f14f, 0x04 },
+ { 0x80f158, 0x7f },
+ { 0x80f15a, 0x00 },
+ { 0x80f15b, 0x08 },
+ { 0x80f15d, 0x03 },
+ { 0x80f15e, 0x05 },
+ { 0x80f163, 0x05 },
+ { 0x80f166, 0x01 },
+ { 0x80f167, 0x40 },
+ { 0x80f168, 0x0f },
+ { 0x80f17a, 0x00 },
+ { 0x80f17b, 0x00 },
+ { 0x80f183, 0x01 },
+ { 0x80f19d, 0x40 },
+ { 0x80f1bc, 0x36 },
+ { 0x80f1bd, 0x00 },
+ { 0x80f1cb, 0xa0 },
+ { 0x80f1cc, 0x01 },
+ { 0x80f204, 0x10 },
+ { 0x80f214, 0x00 },
+ { 0x80f40e, 0x0a },
+ { 0x80f40f, 0x40 },
+ { 0x80f410, 0x08 },
+ { 0x80f55f, 0x0a },
+ { 0x80f561, 0x15 },
+ { 0x80f562, 0x20 },
+ { 0x80f5df, 0xfb },
+ { 0x80f5e0, 0x00 },
+ { 0x80f5e3, 0x09 },
+ { 0x80f5e4, 0x01 },
+ { 0x80f5e5, 0x01 },
+ { 0x80f5f8, 0x01 },
+ { 0x80f5fd, 0x01 },
+ { 0x80f600, 0x05 },
+ { 0x80f601, 0x08 },
+ { 0x80f602, 0x0b },
+ { 0x80f603, 0x0e },
+ { 0x80f604, 0x11 },
+ { 0x80f605, 0x14 },
+ { 0x80f606, 0x17 },
+ { 0x80f607, 0x1f },
+ { 0x80f60e, 0x00 },
+ { 0x80f60f, 0x04 },
+ { 0x80f610, 0x32 },
+ { 0x80f611, 0x10 },
+ { 0x80f707, 0xfc },
+ { 0x80f708, 0x00 },
+ { 0x80f709, 0x37 },
+ { 0x80f70a, 0x00 },
+ { 0x80f78b, 0x01 },
+ { 0x80f80f, 0x40 },
+ { 0x80f810, 0x54 },
+ { 0x80f811, 0x5a },
+ { 0x80f905, 0x01 },
+ { 0x80fb06, 0x03 },
+ { 0x80fd8b, 0x00 },
+};
+
+/* Infineon TUA 9001 tuner init
+ AF9033_TUNER_TUA9001 = 0x27 */
+static const struct reg_val tuner_init_tua9001[] = {
+ { 0x800046, 0x27 },
+ { 0x800057, 0x00 },
+ { 0x800058, 0x01 },
+ { 0x80005f, 0x00 },
+ { 0x800060, 0x00 },
+ { 0x80006d, 0x00 },
+ { 0x800071, 0x05 },
+ { 0x800072, 0x02 },
+ { 0x800074, 0x01 },
+ { 0x800075, 0x03 },
+ { 0x800076, 0x02 },
+ { 0x800077, 0x00 },
+ { 0x800078, 0x01 },
+ { 0x800079, 0x00 },
+ { 0x80007a, 0x7e },
+ { 0x80007b, 0x3e },
+ { 0x800093, 0x00 },
+ { 0x800094, 0x01 },
+ { 0x800095, 0x02 },
+ { 0x800096, 0x01 },
+ { 0x800098, 0x0a },
+ { 0x80009b, 0x05 },
+ { 0x80009c, 0x80 },
+ { 0x8000b3, 0x00 },
+ { 0x8000c1, 0x01 },
+ { 0x8000c2, 0x00 },
+ { 0x80f007, 0x00 },
+ { 0x80f01f, 0x82 },
+ { 0x80f020, 0x00 },
+ { 0x80f029, 0x82 },
+ { 0x80f02a, 0x00 },
+ { 0x80f047, 0x00 },
+ { 0x80f054, 0x00 },
+ { 0x80f055, 0x00 },
+ { 0x80f077, 0x01 },
+ { 0x80f1e6, 0x00 },
+};
+
+/* Fitipower fc0011 tuner init
+ AF9033_TUNER_FC0011 = 0x28 */
+static const struct reg_val tuner_init_fc0011[] = {
+ { 0x800046, AF9033_TUNER_FC0011 },
+ { 0x800057, 0x00 },
+ { 0x800058, 0x01 },
+ { 0x80005f, 0x00 },
+ { 0x800060, 0x00 },
+ { 0x800068, 0xa5 },
+ { 0x80006e, 0x01 },
+ { 0x800071, 0x0A },
+ { 0x800072, 0x02 },
+ { 0x800074, 0x01 },
+ { 0x800079, 0x01 },
+ { 0x800093, 0x00 },
+ { 0x800094, 0x00 },
+ { 0x800095, 0x00 },
+ { 0x800096, 0x00 },
+ { 0x80009b, 0x2D },
+ { 0x80009c, 0x60 },
+ { 0x80009d, 0x23 },
+ { 0x8000a4, 0x50 },
+ { 0x8000ad, 0x50 },
+ { 0x8000b3, 0x01 },
+ { 0x8000b7, 0x88 },
+ { 0x8000b8, 0xa6 },
+ { 0x8000c3, 0x01 },
+ { 0x8000c4, 0x01 },
+ { 0x8000c7, 0x69 },
+ { 0x80F007, 0x00 },
+ { 0x80F00A, 0x1B },
+ { 0x80F00B, 0x1B },
+ { 0x80F00C, 0x1B },
+ { 0x80F00D, 0x1B },
+ { 0x80F00E, 0xFF },
+ { 0x80F00F, 0x01 },
+ { 0x80F010, 0x00 },
+ { 0x80F011, 0x02 },
+ { 0x80F012, 0xFF },
+ { 0x80F013, 0x01 },
+ { 0x80F014, 0x00 },
+ { 0x80F015, 0x02 },
+ { 0x80F01B, 0xEF },
+ { 0x80F01C, 0x01 },
+ { 0x80F01D, 0x0f },
+ { 0x80F01E, 0x02 },
+ { 0x80F01F, 0x6E },
+ { 0x80F020, 0x00 },
+ { 0x80F025, 0xDE },
+ { 0x80F026, 0x00 },
+ { 0x80F027, 0x0A },
+ { 0x80F028, 0x03 },
+ { 0x80F029, 0x6E },
+ { 0x80F02A, 0x00 },
+ { 0x80F047, 0x00 },
+ { 0x80F054, 0x00 },
+ { 0x80F055, 0x00 },
+ { 0x80F077, 0x01 },
+ { 0x80F1E6, 0x00 },
+};
+
+/* MaxLinear MxL5007T tuner init
+ AF9033_TUNER_MXL5007T = 0xa0 */
+static const struct reg_val tuner_init_mxl5007t[] = {
+ { 0x800046, 0x1b },
+ { 0x800057, 0x01 },
+ { 0x800058, 0x01 },
+ { 0x80005f, 0x00 },
+ { 0x800060, 0x00 },
+ { 0x800068, 0x96 },
+ { 0x800071, 0x05 },
+ { 0x800072, 0x02 },
+ { 0x800074, 0x01 },
+ { 0x800079, 0x01 },
+ { 0x800093, 0x00 },
+ { 0x800094, 0x00 },
+ { 0x800095, 0x00 },
+ { 0x800096, 0x00 },
+ { 0x8000b3, 0x01 },
+ { 0x8000c1, 0x01 },
+ { 0x8000c2, 0x00 },
+ { 0x80f007, 0x00 },
+ { 0x80f00c, 0x19 },
+ { 0x80f00d, 0x1a },
+ { 0x80f012, 0xda },
+ { 0x80f013, 0x00 },
+ { 0x80f014, 0x00 },
+ { 0x80f015, 0x02 },
+ { 0x80f01f, 0x82 },
+ { 0x80f020, 0x00 },
+ { 0x80f029, 0x82 },
+ { 0x80f02a, 0x00 },
+ { 0x80f077, 0x02 },
+ { 0x80f1e6, 0x00 },
+};
+
+/* NXP TDA 18218HN tuner init
+ AF9033_TUNER_TDA18218 = 0xa1 */
+static const struct reg_val tuner_init_tda18218[] = {
+ {0x800046, 0xa1},
+ {0x800057, 0x01},
+ {0x800058, 0x01},
+ {0x80005f, 0x00},
+ {0x800060, 0x00},
+ {0x800071, 0x05},
+ {0x800072, 0x02},
+ {0x800074, 0x01},
+ {0x800079, 0x01},
+ {0x800093, 0x00},
+ {0x800094, 0x00},
+ {0x800095, 0x00},
+ {0x800096, 0x00},
+ {0x8000b3, 0x01},
+ {0x8000c3, 0x01},
+ {0x8000c4, 0x00},
+ {0x80f007, 0x00},
+ {0x80f00c, 0x19},
+ {0x80f00d, 0x1a},
+ {0x80f012, 0xda},
+ {0x80f013, 0x00},
+ {0x80f014, 0x00},
+ {0x80f015, 0x02},
+ {0x80f01f, 0x82},
+ {0x80f020, 0x00},
+ {0x80f029, 0x82},
+ {0x80f02a, 0x00},
+ {0x80f077, 0x02},
+ {0x80f1e6, 0x00},
+};
+
+#endif /* AF9033_PRIV_H */
+
diff --git a/drivers/media/dvb/frontends/au8522_common.c b/drivers/media/dvb/frontends/au8522_common.c
new file mode 100644
index 000000000000..5cfe151ee394
--- /dev/null
+++ b/drivers/media/dvb/frontends/au8522_common.c
@@ -0,0 +1,259 @@
+/*
+ Auvitek AU8522 QAM/8VSB demodulator driver
+
+ Copyright (C) 2008 Steven Toth <stoth@linuxtv.org>
+ Copyright (C) 2008 Devin Heitmueller <dheitmueller@linuxtv.org>
+ Copyright (C) 2005-2008 Auvitek International, Ltd.
+ Copyright (C) 2012 Michael Krufky <mkrufky@linuxtv.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include <linux/i2c.h>
+#include "dvb_frontend.h"
+#include "au8522_priv.h"
+
+MODULE_LICENSE("GPL");
+
+static int debug;
+
+#define dprintk(arg...)\
+ do { if (debug)\
+ printk(arg);\
+ } while (0)
+
+/* Despite the name "hybrid_tuner", the framework works just as well for
+ hybrid demodulators as well... */
+static LIST_HEAD(hybrid_tuner_instance_list);
+static DEFINE_MUTEX(au8522_list_mutex);
+
+/* 16 bit registers, 8 bit values */
+int au8522_writereg(struct au8522_state *state, u16 reg, u8 data)
+{
+ int ret;
+ u8 buf[] = { (reg >> 8) | 0x80, reg & 0xff, data };
+
+ struct i2c_msg msg = { .addr = state->config->demod_address,
+ .flags = 0, .buf = buf, .len = 3 };
+
+ ret = i2c_transfer(state->i2c, &msg, 1);
+
+ if (ret != 1)
+ printk("%s: writereg error (reg == 0x%02x, val == 0x%04x, "
+ "ret == %i)\n", __func__, reg, data, ret);
+
+ return (ret != 1) ? -1 : 0;
+}
+EXPORT_SYMBOL(au8522_writereg);
+
+u8 au8522_readreg(struct au8522_state *state, u16 reg)
+{
+ int ret;
+ u8 b0[] = { (reg >> 8) | 0x40, reg & 0xff };
+ u8 b1[] = { 0 };
+
+ struct i2c_msg msg[] = {
+ { .addr = state->config->demod_address, .flags = 0,
+ .buf = b0, .len = 2 },
+ { .addr = state->config->demod_address, .flags = I2C_M_RD,
+ .buf = b1, .len = 1 } };
+
+ ret = i2c_transfer(state->i2c, msg, 2);
+
+ if (ret != 2)
+ printk(KERN_ERR "%s: readreg error (ret == %i)\n",
+ __func__, ret);
+ return b1[0];
+}
+EXPORT_SYMBOL(au8522_readreg);
+
+int au8522_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct au8522_state *state = fe->demodulator_priv;
+
+ dprintk("%s(%d)\n", __func__, enable);
+
+ if (state->operational_mode == AU8522_ANALOG_MODE) {
+ /* We're being asked to manage the gate even though we're
+ not in digital mode. This can occur if we get switched
+ over to analog mode before the dvb_frontend kernel thread
+ has completely shutdown */
+ return 0;
+ }
+
+ if (enable)
+ return au8522_writereg(state, 0x106, 1);
+ else
+ return au8522_writereg(state, 0x106, 0);
+}
+EXPORT_SYMBOL(au8522_i2c_gate_ctrl);
+
+/* Reset the demod hardware and reset all of the configuration registers
+ to a default state. */
+int au8522_get_state(struct au8522_state **state, struct i2c_adapter *i2c,
+ u8 client_address)
+{
+ int ret;
+
+ mutex_lock(&au8522_list_mutex);
+ ret = hybrid_tuner_request_state(struct au8522_state, (*state),
+ hybrid_tuner_instance_list,
+ i2c, client_address, "au8522");
+ mutex_unlock(&au8522_list_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(au8522_get_state);
+
+void au8522_release_state(struct au8522_state *state)
+{
+ mutex_lock(&au8522_list_mutex);
+ if (state != NULL)
+ hybrid_tuner_release_state(state);
+ mutex_unlock(&au8522_list_mutex);
+}
+EXPORT_SYMBOL(au8522_release_state);
+
+static int au8522_led_gpio_enable(struct au8522_state *state, int onoff)
+{
+ struct au8522_led_config *led_config = state->config->led_cfg;
+ u8 val;
+
+ /* bail out if we can't control an LED */
+ if (!led_config || !led_config->gpio_output ||
+ !led_config->gpio_output_enable || !led_config->gpio_output_disable)
+ return 0;
+
+ val = au8522_readreg(state, 0x4000 |
+ (led_config->gpio_output & ~0xc000));
+ if (onoff) {
+ /* enable GPIO output */
+ val &= ~((led_config->gpio_output_enable >> 8) & 0xff);
+ val |= (led_config->gpio_output_enable & 0xff);
+ } else {
+ /* disable GPIO output */
+ val &= ~((led_config->gpio_output_disable >> 8) & 0xff);
+ val |= (led_config->gpio_output_disable & 0xff);
+ }
+ return au8522_writereg(state, 0x8000 |
+ (led_config->gpio_output & ~0xc000), val);
+}
+
+/* led = 0 | off
+ * led = 1 | signal ok
+ * led = 2 | signal strong
+ * led < 0 | only light led if leds are currently off
+ */
+int au8522_led_ctrl(struct au8522_state *state, int led)
+{
+ struct au8522_led_config *led_config = state->config->led_cfg;
+ int i, ret = 0;
+
+ /* bail out if we can't control an LED */
+ if (!led_config || !led_config->gpio_leds ||
+ !led_config->num_led_states || !led_config->led_states)
+ return 0;
+
+ if (led < 0) {
+ /* if LED is already lit, then leave it as-is */
+ if (state->led_state)
+ return 0;
+ else
+ led *= -1;
+ }
+
+ /* toggle LED if changing state */
+ if (state->led_state != led) {
+ u8 val;
+
+ dprintk("%s: %d\n", __func__, led);
+
+ au8522_led_gpio_enable(state, 1);
+
+ val = au8522_readreg(state, 0x4000 |
+ (led_config->gpio_leds & ~0xc000));
+
+ /* start with all leds off */
+ for (i = 0; i < led_config->num_led_states; i++)
+ val &= ~led_config->led_states[i];
+
+ /* set selected LED state */
+ if (led < led_config->num_led_states)
+ val |= led_config->led_states[led];
+ else if (led_config->num_led_states)
+ val |=
+ led_config->led_states[led_config->num_led_states - 1];
+
+ ret = au8522_writereg(state, 0x8000 |
+ (led_config->gpio_leds & ~0xc000), val);
+ if (ret < 0)
+ return ret;
+
+ state->led_state = led;
+
+ if (led == 0)
+ au8522_led_gpio_enable(state, 0);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(au8522_led_ctrl);
+
+int au8522_init(struct dvb_frontend *fe)
+{
+ struct au8522_state *state = fe->demodulator_priv;
+ dprintk("%s()\n", __func__);
+
+ state->operational_mode = AU8522_DIGITAL_MODE;
+
+ /* Clear out any state associated with the digital side of the
+ chip, so that when it gets powered back up it won't think
+ that it is already tuned */
+ state->current_frequency = 0;
+
+ au8522_writereg(state, 0xa4, 1 << 5);
+
+ au8522_i2c_gate_ctrl(fe, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL(au8522_init);
+
+int au8522_sleep(struct dvb_frontend *fe)
+{
+ struct au8522_state *state = fe->demodulator_priv;
+ dprintk("%s()\n", __func__);
+
+ /* Only power down if the digital side is currently using the chip */
+ if (state->operational_mode == AU8522_ANALOG_MODE) {
+ /* We're not in one of the expected power modes, which means
+ that the DVB thread is probably telling us to go to sleep
+ even though the analog frontend has already started using
+ the chip. So ignore the request */
+ return 0;
+ }
+
+ /* turn off led */
+ au8522_led_ctrl(state, 0);
+
+ /* Power down the chip */
+ au8522_writereg(state, 0xa4, 1 << 5);
+
+ state->current_frequency = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(au8522_sleep);
diff --git a/drivers/media/dvb/frontends/au8522_dig.c b/drivers/media/dvb/frontends/au8522_dig.c
index 25f650934c73..5fc70d6cd04f 100644
--- a/drivers/media/dvb/frontends/au8522_dig.c
+++ b/drivers/media/dvb/frontends/au8522_dig.c
@@ -30,74 +30,11 @@
static int debug;
-/* Despite the name "hybrid_tuner", the framework works just as well for
- hybrid demodulators as well... */
-static LIST_HEAD(hybrid_tuner_instance_list);
-static DEFINE_MUTEX(au8522_list_mutex);
-
#define dprintk(arg...)\
do { if (debug)\
printk(arg);\
} while (0)
-/* 16 bit registers, 8 bit values */
-int au8522_writereg(struct au8522_state *state, u16 reg, u8 data)
-{
- int ret;
- u8 buf[] = { (reg >> 8) | 0x80, reg & 0xff, data };
-
- struct i2c_msg msg = { .addr = state->config->demod_address,
- .flags = 0, .buf = buf, .len = 3 };
-
- ret = i2c_transfer(state->i2c, &msg, 1);
-
- if (ret != 1)
- printk("%s: writereg error (reg == 0x%02x, val == 0x%04x, "
- "ret == %i)\n", __func__, reg, data, ret);
-
- return (ret != 1) ? -1 : 0;
-}
-
-u8 au8522_readreg(struct au8522_state *state, u16 reg)
-{
- int ret;
- u8 b0[] = { (reg >> 8) | 0x40, reg & 0xff };
- u8 b1[] = { 0 };
-
- struct i2c_msg msg[] = {
- { .addr = state->config->demod_address, .flags = 0,
- .buf = b0, .len = 2 },
- { .addr = state->config->demod_address, .flags = I2C_M_RD,
- .buf = b1, .len = 1 } };
-
- ret = i2c_transfer(state->i2c, msg, 2);
-
- if (ret != 2)
- printk(KERN_ERR "%s: readreg error (ret == %i)\n",
- __func__, ret);
- return b1[0];
-}
-
-static int au8522_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
-{
- struct au8522_state *state = fe->demodulator_priv;
-
- dprintk("%s(%d)\n", __func__, enable);
-
- if (state->operational_mode == AU8522_ANALOG_MODE) {
- /* We're being asked to manage the gate even though we're
- not in digital mode. This can occur if we get switched
- over to analog mode before the dvb_frontend kernel thread
- has completely shutdown */
- return 0;
- }
-
- if (enable)
- return au8522_writereg(state, 0x106, 1);
- else
- return au8522_writereg(state, 0x106, 0);
-}
-
struct mse2snr_tab {
u16 val;
u16 data;
@@ -609,136 +546,6 @@ static int au8522_set_frontend(struct dvb_frontend *fe)
return 0;
}
-/* Reset the demod hardware and reset all of the configuration registers
- to a default state. */
-int au8522_init(struct dvb_frontend *fe)
-{
- struct au8522_state *state = fe->demodulator_priv;
- dprintk("%s()\n", __func__);
-
- state->operational_mode = AU8522_DIGITAL_MODE;
-
- /* Clear out any state associated with the digital side of the
- chip, so that when it gets powered back up it won't think
- that it is already tuned */
- state->current_frequency = 0;
-
- au8522_writereg(state, 0xa4, 1 << 5);
-
- au8522_i2c_gate_ctrl(fe, 1);
-
- return 0;
-}
-
-static int au8522_led_gpio_enable(struct au8522_state *state, int onoff)
-{
- struct au8522_led_config *led_config = state->config->led_cfg;
- u8 val;
-
- /* bail out if we can't control an LED */
- if (!led_config || !led_config->gpio_output ||
- !led_config->gpio_output_enable || !led_config->gpio_output_disable)
- return 0;
-
- val = au8522_readreg(state, 0x4000 |
- (led_config->gpio_output & ~0xc000));
- if (onoff) {
- /* enable GPIO output */
- val &= ~((led_config->gpio_output_enable >> 8) & 0xff);
- val |= (led_config->gpio_output_enable & 0xff);
- } else {
- /* disable GPIO output */
- val &= ~((led_config->gpio_output_disable >> 8) & 0xff);
- val |= (led_config->gpio_output_disable & 0xff);
- }
- return au8522_writereg(state, 0x8000 |
- (led_config->gpio_output & ~0xc000), val);
-}
-
-/* led = 0 | off
- * led = 1 | signal ok
- * led = 2 | signal strong
- * led < 0 | only light led if leds are currently off
- */
-static int au8522_led_ctrl(struct au8522_state *state, int led)
-{
- struct au8522_led_config *led_config = state->config->led_cfg;
- int i, ret = 0;
-
- /* bail out if we can't control an LED */
- if (!led_config || !led_config->gpio_leds ||
- !led_config->num_led_states || !led_config->led_states)
- return 0;
-
- if (led < 0) {
- /* if LED is already lit, then leave it as-is */
- if (state->led_state)
- return 0;
- else
- led *= -1;
- }
-
- /* toggle LED if changing state */
- if (state->led_state != led) {
- u8 val;
-
- dprintk("%s: %d\n", __func__, led);
-
- au8522_led_gpio_enable(state, 1);
-
- val = au8522_readreg(state, 0x4000 |
- (led_config->gpio_leds & ~0xc000));
-
- /* start with all leds off */
- for (i = 0; i < led_config->num_led_states; i++)
- val &= ~led_config->led_states[i];
-
- /* set selected LED state */
- if (led < led_config->num_led_states)
- val |= led_config->led_states[led];
- else if (led_config->num_led_states)
- val |=
- led_config->led_states[led_config->num_led_states - 1];
-
- ret = au8522_writereg(state, 0x8000 |
- (led_config->gpio_leds & ~0xc000), val);
- if (ret < 0)
- return ret;
-
- state->led_state = led;
-
- if (led == 0)
- au8522_led_gpio_enable(state, 0);
- }
-
- return 0;
-}
-
-int au8522_sleep(struct dvb_frontend *fe)
-{
- struct au8522_state *state = fe->demodulator_priv;
- dprintk("%s()\n", __func__);
-
- /* Only power down if the digital side is currently using the chip */
- if (state->operational_mode == AU8522_ANALOG_MODE) {
- /* We're not in one of the expected power modes, which means
- that the DVB thread is probably telling us to go to sleep
- even though the analog frontend has already started using
- the chip. So ignore the request */
- return 0;
- }
-
- /* turn off led */
- au8522_led_ctrl(state, 0);
-
- /* Power down the chip */
- au8522_writereg(state, 0xa4, 1 << 5);
-
- state->current_frequency = 0;
-
- return 0;
-}
-
static int au8522_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct au8522_state *state = fe->demodulator_priv;
@@ -931,28 +738,6 @@ static int au8522_get_tune_settings(struct dvb_frontend *fe,
static struct dvb_frontend_ops au8522_ops;
-int au8522_get_state(struct au8522_state **state, struct i2c_adapter *i2c,
- u8 client_address)
-{
- int ret;
-
- mutex_lock(&au8522_list_mutex);
- ret = hybrid_tuner_request_state(struct au8522_state, (*state),
- hybrid_tuner_instance_list,
- i2c, client_address, "au8522");
- mutex_unlock(&au8522_list_mutex);
-
- return ret;
-}
-
-void au8522_release_state(struct au8522_state *state)
-{
- mutex_lock(&au8522_list_mutex);
- if (state != NULL)
- hybrid_tuner_release_state(state);
- mutex_unlock(&au8522_list_mutex);
-}
-
static void au8522_release(struct dvb_frontend *fe)
{
diff --git a/drivers/media/dvb/frontends/au8522_priv.h b/drivers/media/dvb/frontends/au8522_priv.h
index 751e17d692a9..6e4a438732b5 100644
--- a/drivers/media/dvb/frontends/au8522_priv.h
+++ b/drivers/media/dvb/frontends/au8522_priv.h
@@ -81,6 +81,8 @@ int au8522_sleep(struct dvb_frontend *fe);
int au8522_get_state(struct au8522_state **state, struct i2c_adapter *i2c,
u8 client_address);
void au8522_release_state(struct au8522_state *state);
+int au8522_i2c_gate_ctrl(struct dvb_frontend *fe, int enable);
+int au8522_led_ctrl(struct au8522_state *state, int led);
/* REGISTERS */
#define AU8522_INPUT_CONTROL_REG081H 0x081
diff --git a/drivers/media/dvb/frontends/cx24110.c b/drivers/media/dvb/frontends/cx24110.c
index 5101f10f2d7a..98ecaf0900d6 100644
--- a/drivers/media/dvb/frontends/cx24110.c
+++ b/drivers/media/dvb/frontends/cx24110.c
@@ -512,14 +512,13 @@ static int cx24110_read_snr(struct dvb_frontend* fe, u16* snr)
static int cx24110_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
{
struct cx24110_state *state = fe->demodulator_priv;
- u32 lastbyer;
if(cx24110_readreg(state,0x10)&0x40) {
/* the RS error counter has finished one counting window */
cx24110_writereg(state,0x10,0x60); /* select the byer reg */
- lastbyer=cx24110_readreg(state,0x12)|
- (cx24110_readreg(state,0x13)<<8)|
- (cx24110_readreg(state,0x14)<<16);
+ cx24110_readreg(state, 0x12) |
+ (cx24110_readreg(state, 0x13) << 8) |
+ (cx24110_readreg(state, 0x14) << 16);
cx24110_writereg(state,0x10,0x70); /* select the bler reg */
state->lastbler=cx24110_readreg(state,0x12)|
(cx24110_readreg(state,0x13)<<8)|
diff --git a/drivers/media/dvb/frontends/cxd2820r_core.c b/drivers/media/dvb/frontends/cxd2820r_core.c
index 5c7c2aaf9bf5..3bba37d74f57 100644
--- a/drivers/media/dvb/frontends/cxd2820r_core.c
+++ b/drivers/media/dvb/frontends/cxd2820r_core.c
@@ -526,12 +526,12 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
if (ret)
goto error;
- if (status & FE_HAS_SIGNAL)
+ if (status & FE_HAS_LOCK)
break;
}
/* check if we have a valid signal */
- if (status) {
+ if (status & FE_HAS_LOCK) {
priv->last_tune_failed = 0;
return DVBFE_ALGO_SEARCH_SUCCESS;
} else {
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 5ceadc285b3a..3e1eefada0e8 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -2396,11 +2396,6 @@ struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
more common) */
st->i2c_master.gated_tuner_i2c_adap.dev.parent = i2c_adap->dev.parent;
- /* FIXME: make sure the dev.parent field is initialized, or else
- request_firmware() will hit an OOPS (this should be moved somewhere
- more common) */
- st->i2c_master.gated_tuner_i2c_adap.dev.parent = i2c_adap->dev.parent;
-
dibx000_init_i2c_master(&st->i2c_master, DIB7000P, st->i2c_adap, st->i2c_addr);
/* init 7090 tuner adapter */
diff --git a/drivers/media/dvb/frontends/dib9000.c b/drivers/media/dvb/frontends/dib9000.c
index 80848b4c15d4..6201c59a78dd 100644
--- a/drivers/media/dvb/frontends/dib9000.c
+++ b/drivers/media/dvb/frontends/dib9000.c
@@ -31,13 +31,6 @@ struct i2c_device {
u8 *i2c_write_buffer;
};
-/* lock */
-#define DIB_LOCK struct mutex
-#define DibAcquireLock(lock) mutex_lock_interruptible(lock)
-#define DibReleaseLock(lock) mutex_unlock(lock)
-#define DibInitLock(lock) mutex_init(lock)
-#define DibFreeLock(lock)
-
struct dib9000_pid_ctrl {
#define DIB9000_PID_FILTER_CTRL 0
#define DIB9000_PID_FILTER 1
@@ -82,11 +75,11 @@ struct dib9000_state {
} fe_mm[18];
u8 memcmd;
- DIB_LOCK mbx_if_lock; /* to protect read/write operations */
- DIB_LOCK mbx_lock; /* to protect the whole mailbox handling */
+ struct mutex mbx_if_lock; /* to protect read/write operations */
+ struct mutex mbx_lock; /* to protect the whole mailbox handling */
- DIB_LOCK mem_lock; /* to protect the memory accesses */
- DIB_LOCK mem_mbx_lock; /* to protect the memory-based mailbox */
+ struct mutex mem_lock; /* to protect the memory accesses */
+ struct mutex mem_mbx_lock; /* to protect the memory-based mailbox */
#define MBX_MAX_WORDS (256 - 200 - 2)
#define DIB9000_MSG_CACHE_SIZE 2
@@ -108,7 +101,7 @@ struct dib9000_state {
struct i2c_msg msg[2];
u8 i2c_write_buffer[255];
u8 i2c_read_buffer[255];
- DIB_LOCK demod_lock;
+ struct mutex demod_lock;
u8 get_frontend_internal;
struct dib9000_pid_ctrl pid_ctrl[10];
s8 pid_ctrl_index; /* -1: empty list; -2: do not use the list */
@@ -446,13 +439,13 @@ static int dib9000_risc_mem_read(struct dib9000_state *state, u8 cmd, u8 * b, u1
if (!state->platform.risc.fw_is_running)
return -EIO;
- if (DibAcquireLock(&state->platform.risc.mem_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mem_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
dib9000_risc_mem_setup(state, cmd | 0x80);
dib9000_risc_mem_read_chunks(state, b, len);
- DibReleaseLock(&state->platform.risc.mem_lock);
+ mutex_unlock(&state->platform.risc.mem_lock);
return 0;
}
@@ -462,13 +455,13 @@ static int dib9000_risc_mem_write(struct dib9000_state *state, u8 cmd, const u8
if (!state->platform.risc.fw_is_running)
return -EIO;
- if (DibAcquireLock(&state->platform.risc.mem_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mem_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
dib9000_risc_mem_setup(state, cmd);
dib9000_risc_mem_write_chunks(state, b, m->size);
- DibReleaseLock(&state->platform.risc.mem_lock);
+ mutex_unlock(&state->platform.risc.mem_lock);
return 0;
}
@@ -537,7 +530,7 @@ static int dib9000_mbx_send_attr(struct dib9000_state *state, u8 id, u16 * data,
if (!state->platform.risc.fw_is_running)
return -EINVAL;
- if (DibAcquireLock(&state->platform.risc.mbx_if_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mbx_if_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
@@ -584,7 +577,7 @@ static int dib9000_mbx_send_attr(struct dib9000_state *state, u8 id, u16 * data,
ret = (u8) dib9000_write_word_attr(state, 1043, 1 << 14, attr);
out:
- DibReleaseLock(&state->platform.risc.mbx_if_lock);
+ mutex_unlock(&state->platform.risc.mbx_if_lock);
return ret;
}
@@ -602,7 +595,7 @@ static u8 dib9000_mbx_read(struct dib9000_state *state, u16 * data, u8 risc_id,
if (!state->platform.risc.fw_is_running)
return 0;
- if (DibAcquireLock(&state->platform.risc.mbx_if_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mbx_if_lock) < 0) {
dprintk("could not get the lock");
return 0;
}
@@ -643,7 +636,7 @@ static u8 dib9000_mbx_read(struct dib9000_state *state, u16 * data, u8 risc_id,
/* Update register nb_mes_in_TX */
dib9000_write_word_attr(state, 1028 + mc_base, 1 << 14, attr);
- DibReleaseLock(&state->platform.risc.mbx_if_lock);
+ mutex_unlock(&state->platform.risc.mbx_if_lock);
return size + 1;
}
@@ -708,12 +701,11 @@ static u8 dib9000_mbx_count(struct dib9000_state *state, u8 risc_id, u16 attr)
static int dib9000_mbx_process(struct dib9000_state *state, u16 attr)
{
int ret = 0;
- u16 tmp;
if (!state->platform.risc.fw_is_running)
return -1;
- if (DibAcquireLock(&state->platform.risc.mbx_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mbx_lock) < 0) {
dprintk("could not get the lock");
return -1;
}
@@ -721,10 +713,10 @@ static int dib9000_mbx_process(struct dib9000_state *state, u16 attr)
if (dib9000_mbx_count(state, 1, attr)) /* 1=RiscB */
ret = dib9000_mbx_fetch_to_cache(state, attr);
- tmp = dib9000_read_word_attr(state, 1229, attr); /* Clear the IRQ */
+ dib9000_read_word_attr(state, 1229, attr); /* Clear the IRQ */
/* if (tmp) */
/* dprintk( "cleared IRQ: %x", tmp); */
- DibReleaseLock(&state->platform.risc.mbx_lock);
+ mutex_unlock(&state->platform.risc.mbx_lock);
return ret;
}
@@ -1193,7 +1185,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe)
struct dibDVBTChannel *ch;
int ret = 0;
- if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
@@ -1323,7 +1315,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe)
}
error:
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
return ret;
}
@@ -1678,7 +1670,7 @@ static int dib9000_fw_component_bus_xfer(struct i2c_adapter *i2c_adap, struct i2
p[12] = 0;
}
- if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
dprintk("could not get the lock");
return 0;
}
@@ -1692,7 +1684,7 @@ static int dib9000_fw_component_bus_xfer(struct i2c_adapter *i2c_adap, struct i2
/* do the transaction */
if (dib9000_fw_memmbx_sync(state, FE_SYNC_COMPONENT_ACCESS) < 0) {
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
return 0;
}
@@ -1700,7 +1692,7 @@ static int dib9000_fw_component_bus_xfer(struct i2c_adapter *i2c_adap, struct i2
if ((num > 1) && (msg[1].flags & I2C_M_RD))
dib9000_risc_mem_read(state, FE_MM_RW_COMPONENT_ACCESS_BUFFER, msg[1].buf, msg[1].len);
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
return num;
}
@@ -1789,7 +1781,7 @@ int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
return 0;
}
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
@@ -1799,7 +1791,7 @@ int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
dprintk("PID filter enabled %d", onoff);
ret = dib9000_write_word(state, 294 + 1, val);
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return ret;
}
@@ -1824,14 +1816,14 @@ int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
return 0;
}
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff);
ret = dib9000_write_word(state, 300 + 1 + id,
onoff ? (1 << 13) | pid : 0);
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return ret;
}
EXPORT_SYMBOL(dib9000_fw_pid_filter);
@@ -1851,11 +1843,6 @@ static void dib9000_release(struct dvb_frontend *demod)
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (st->fe[index_frontend] != NULL); index_frontend++)
dvb_frontend_detach(st->fe[index_frontend]);
- DibFreeLock(&state->platform.risc.mbx_if_lock);
- DibFreeLock(&state->platform.risc.mbx_lock);
- DibFreeLock(&state->platform.risc.mem_lock);
- DibFreeLock(&state->platform.risc.mem_mbx_lock);
- DibFreeLock(&state->demod_lock);
dibx000_exit_i2c_master(&st->i2c_master);
i2c_del_adapter(&st->tuner_adap);
@@ -1875,7 +1862,7 @@ static int dib9000_sleep(struct dvb_frontend *fe)
u8 index_frontend;
int ret = 0;
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
@@ -1887,7 +1874,7 @@ static int dib9000_sleep(struct dvb_frontend *fe)
ret = dib9000_mbx_send(state, OUT_MSG_FE_SLEEP, NULL, 0);
error:
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return ret;
}
@@ -1905,7 +1892,7 @@ static int dib9000_get_frontend(struct dvb_frontend *fe)
int ret = 0;
if (state->get_frontend_internal == 0) {
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
@@ -1964,7 +1951,7 @@ static int dib9000_get_frontend(struct dvb_frontend *fe)
return_value:
if (state->get_frontend_internal == 0)
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return ret;
}
@@ -2012,7 +1999,7 @@ static int dib9000_set_frontend(struct dvb_frontend *fe)
}
state->pid_ctrl_index = -1; /* postpone the pid filtering cmd */
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return 0;
}
@@ -2081,7 +2068,7 @@ static int dib9000_set_frontend(struct dvb_frontend *fe)
/* check the tune result */
if (exit_condition == 1) { /* tune failed */
dprintk("tune failed");
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
/* tune failed; put all the pid filtering cmd to junk */
state->pid_ctrl_index = -1;
return 0;
@@ -2137,7 +2124,7 @@ static int dib9000_set_frontend(struct dvb_frontend *fe)
/* turn off the diversity for the last frontend */
dib9000_fw_set_diversity_in(state->fe[index_frontend - 1], 0);
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
if (state->pid_ctrl_index >= 0) {
u8 index_pid_filter_cmd;
u8 pid_ctrl_index = state->pid_ctrl_index;
@@ -2175,7 +2162,7 @@ static int dib9000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
u8 index_frontend;
u16 lock = 0, lock_slave = 0;
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
@@ -2197,7 +2184,7 @@ static int dib9000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
if ((lock & 0x0008) || (lock_slave & 0x0008))
*stat |= FE_HAS_LOCK;
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return 0;
}
@@ -2208,30 +2195,30 @@ static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber)
u16 *c;
int ret = 0;
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
- if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
dprintk("could not get the lock");
ret = -EINTR;
goto error;
}
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
ret = -EIO;
goto error;
}
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR,
state->i2c_read_buffer, 16 * 2);
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
c = (u16 *)state->i2c_read_buffer;
*ber = c[10] << 16 | c[11];
error:
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return ret;
}
@@ -2243,7 +2230,7 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
u16 val;
int ret = 0;
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
@@ -2256,18 +2243,18 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
*strength += val;
}
- if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
dprintk("could not get the lock");
ret = -EINTR;
goto error;
}
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
ret = -EIO;
goto error;
}
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
val = 65535 - c[4];
if (val > 65535 - *strength)
@@ -2276,7 +2263,7 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
*strength += val;
error:
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return ret;
}
@@ -2287,16 +2274,16 @@ static u32 dib9000_get_snr(struct dvb_frontend *fe)
u32 n, s, exp;
u16 val;
- if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
dprintk("could not get the lock");
return 0;
}
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
return 0;
}
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
val = c[7];
n = (val >> 4) & 0xff;
@@ -2326,7 +2313,7 @@ static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
u8 index_frontend;
u32 snr_master;
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
@@ -2340,7 +2327,7 @@ static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
} else
*snr = 0;
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return 0;
}
@@ -2351,27 +2338,27 @@ static int dib9000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
u16 *c = (u16 *)state->i2c_read_buffer;
int ret = 0;
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
- if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
dprintk("could not get the lock");
ret = -EINTR;
goto error;
}
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
ret = -EIO;
goto error;
}
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
*unc = c[12];
error:
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return ret;
}
@@ -2514,11 +2501,11 @@ struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, c
st->gpio_val = DIB9000_GPIO_DEFAULT_VALUES;
st->gpio_pwm_pos = DIB9000_GPIO_DEFAULT_PWM_POS;
- DibInitLock(&st->platform.risc.mbx_if_lock);
- DibInitLock(&st->platform.risc.mbx_lock);
- DibInitLock(&st->platform.risc.mem_lock);
- DibInitLock(&st->platform.risc.mem_mbx_lock);
- DibInitLock(&st->demod_lock);
+ mutex_init(&st->platform.risc.mbx_if_lock);
+ mutex_init(&st->platform.risc.mbx_lock);
+ mutex_init(&st->platform.risc.mem_lock);
+ mutex_init(&st->platform.risc.mem_mbx_lock);
+ mutex_init(&st->demod_lock);
st->get_frontend_internal = 0;
st->pid_ctrl_index = -2;
diff --git a/drivers/media/dvb/frontends/drxd.h b/drivers/media/dvb/frontends/drxd.h
index 34398738f9bc..216c8c3702f8 100644
--- a/drivers/media/dvb/frontends/drxd.h
+++ b/drivers/media/dvb/frontends/drxd.h
@@ -51,9 +51,23 @@ struct drxd_config {
s16(*osc_deviation) (void *priv, s16 dev, int flag);
};
+#if defined(CONFIG_DVB_DRXD) || \
+ (defined(CONFIG_DVB_DRXD_MODULE) && defined(MODULE))
extern
struct dvb_frontend *drxd_attach(const struct drxd_config *config,
void *priv, struct i2c_adapter *i2c,
struct device *dev);
+#else
+static inline
+struct dvb_frontend *drxd_attach(const struct drxd_config *config,
+ void *priv, struct i2c_adapter *i2c,
+ struct device *dev)
+{
+ printk(KERN_INFO "%s: not probed - driver disabled by Kconfig\n",
+ __func__);
+ return NULL;
+}
+#endif
+
extern int drxd_config_i2c(struct dvb_frontend *, int);
#endif
diff --git a/drivers/media/dvb/frontends/drxk_hard.c b/drivers/media/dvb/frontends/drxk_hard.c
index a414b1f2b6a5..60b868faeacf 100644
--- a/drivers/media/dvb/frontends/drxk_hard.c
+++ b/drivers/media/dvb/frontends/drxk_hard.c
@@ -1380,20 +1380,20 @@ static int DownloadMicrocode(struct drxk_state *state,
const u8 pMCImage[], u32 Length)
{
const u8 *pSrc = pMCImage;
- u16 Flags;
- u16 Drain;
u32 Address;
u16 nBlocks;
u16 BlockSize;
- u16 BlockCRC;
u32 offset = 0;
u32 i;
int status = 0;
dprintk(1, "\n");
- /* down the drain (we don care about MAGIC_WORD) */
+ /* down the drain (we don't care about MAGIC_WORD) */
+#if 0
+ /* For future reference */
Drain = (pSrc[0] << 8) | pSrc[1];
+#endif
pSrc += sizeof(u16);
offset += sizeof(u16);
nBlocks = (pSrc[0] << 8) | pSrc[1];
@@ -1410,11 +1410,17 @@ static int DownloadMicrocode(struct drxk_state *state,
pSrc += sizeof(u16);
offset += sizeof(u16);
+#if 0
+ /* For future reference */
Flags = (pSrc[0] << 8) | pSrc[1];
+#endif
pSrc += sizeof(u16);
offset += sizeof(u16);
+#if 0
+ /* For future reference */
BlockCRC = (pSrc[0] << 8) | pSrc[1];
+#endif
pSrc += sizeof(u16);
offset += sizeof(u16);
@@ -5829,7 +5835,7 @@ static int WriteGPIO(struct drxk_state *state)
}
if (state->UIO_mask & 0x0002) { /* UIO-2 */
/* write to io pad configuration register - output mode */
- status = write16(state, SIO_PDR_SMA_TX_CFG__A, state->m_GPIOCfg);
+ status = write16(state, SIO_PDR_SMA_RX_CFG__A, state->m_GPIOCfg);
if (status < 0)
goto error;
@@ -5848,7 +5854,7 @@ static int WriteGPIO(struct drxk_state *state)
}
if (state->UIO_mask & 0x0004) { /* UIO-3 */
/* write to io pad configuration register - output mode */
- status = write16(state, SIO_PDR_SMA_TX_CFG__A, state->m_GPIOCfg);
+ status = write16(state, SIO_PDR_GPIO_CFG__A, state->m_GPIOCfg);
if (status < 0)
goto error;
diff --git a/drivers/media/dvb/frontends/drxk_map.h b/drivers/media/dvb/frontends/drxk_map.h
index 9b11a8328869..23e16c12f234 100644
--- a/drivers/media/dvb/frontends/drxk_map.h
+++ b/drivers/media/dvb/frontends/drxk_map.h
@@ -432,6 +432,7 @@
#define SIO_PDR_UIO_OUT_LO__A 0x7F0016
#define SIO_PDR_OHW_CFG__A 0x7F001F
#define SIO_PDR_OHW_CFG_FREF_SEL__M 0x3
+#define SIO_PDR_GPIO_CFG__A 0x7F0021
#define SIO_PDR_MSTRT_CFG__A 0x7F0025
#define SIO_PDR_MERR_CFG__A 0x7F0026
#define SIO_PDR_MCLK_CFG__A 0x7F0028
@@ -446,4 +447,5 @@
#define SIO_PDR_MD5_CFG__A 0x7F0030
#define SIO_PDR_MD6_CFG__A 0x7F0031
#define SIO_PDR_MD7_CFG__A 0x7F0032
+#define SIO_PDR_SMA_RX_CFG__A 0x7F0037
#define SIO_PDR_SMA_TX_CFG__A 0x7F0038
diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
index af65d013db11..4c8ac2657c4a 100644
--- a/drivers/media/dvb/frontends/ds3000.c
+++ b/drivers/media/dvb/frontends/ds3000.c
@@ -1114,7 +1114,10 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
ds3000_writereg(state,
ds3000_dvbs2_init_tab[i],
ds3000_dvbs2_init_tab[i + 1]);
- ds3000_writereg(state, 0xfe, 0x98);
+ if (c->symbol_rate >= 30000000)
+ ds3000_writereg(state, 0xfe, 0x54);
+ else
+ ds3000_writereg(state, 0xfe, 0x98);
break;
default:
return 1;
diff --git a/drivers/media/dvb/frontends/it913x-fe.c b/drivers/media/dvb/frontends/it913x-fe.c
index 84df03c29179..708cbf197913 100644
--- a/drivers/media/dvb/frontends/it913x-fe.c
+++ b/drivers/media/dvb/frontends/it913x-fe.c
@@ -633,10 +633,9 @@ static int it913x_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
static int it913x_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct it913x_fe_state *state = fe->demodulator_priv;
- int ret;
u8 reg[5];
/* Read Aborted Packets and Pre-Viterbi error rate 5 bytes */
- ret = it913x_read_reg(state, RSD_ABORT_PKT_LSB, reg, sizeof(reg));
+ it913x_read_reg(state, RSD_ABORT_PKT_LSB, reg, sizeof(reg));
state->ucblocks += (u32)(reg[1] << 8) | reg[0];
*ber = (u32)(reg[4] << 16) | (reg[3] << 8) | reg[2];
return 0;
@@ -658,10 +657,9 @@ static int it913x_fe_get_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct it913x_fe_state *state = fe->demodulator_priv;
- int ret;
u8 reg[8];
- ret = it913x_read_reg(state, REG_TPSD_TX_MODE, reg, sizeof(reg));
+ it913x_read_reg(state, REG_TPSD_TX_MODE, reg, sizeof(reg));
if (reg[3] < 3)
p->modulation = fe_con[reg[3]];
@@ -691,25 +689,25 @@ static int it913x_fe_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct it913x_fe_state *state = fe->demodulator_priv;
- int ret, i;
+ int i;
u8 empty_ch, last_ch;
state->it913x_status = 0;
/* Set bw*/
- ret = it913x_fe_select_bw(state, p->bandwidth_hz,
+ it913x_fe_select_bw(state, p->bandwidth_hz,
state->adcFrequency);
/* Training Mode Off */
- ret = it913x_write_reg(state, PRO_LINK, TRAINING_MODE, 0x0);
+ it913x_write_reg(state, PRO_LINK, TRAINING_MODE, 0x0);
/* Clear Empty Channel */
- ret = it913x_write_reg(state, PRO_DMOD, EMPTY_CHANNEL_STATUS, 0x0);
+ it913x_write_reg(state, PRO_DMOD, EMPTY_CHANNEL_STATUS, 0x0);
/* Clear bits */
- ret = it913x_write_reg(state, PRO_DMOD, MP2IF_SYNC_LK, 0x0);
+ it913x_write_reg(state, PRO_DMOD, MP2IF_SYNC_LK, 0x0);
/* LED on */
- ret = it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x1);
+ it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x1);
/* Select Band*/
if ((p->frequency >= 51000000) && (p->frequency <= 230000000))
i = 0;
@@ -720,7 +718,7 @@ static int it913x_fe_set_frontend(struct dvb_frontend *fe)
else
return -EOPNOTSUPP;
- ret = it913x_write_reg(state, PRO_DMOD, FREE_BAND, i);
+ it913x_write_reg(state, PRO_DMOD, FREE_BAND, i);
deb_info("Frontend Set Tuner Type %02x", state->tuner_type);
switch (state->tuner_type) {
@@ -730,7 +728,7 @@ static int it913x_fe_set_frontend(struct dvb_frontend *fe)
case IT9135_60:
case IT9135_61:
case IT9135_62:
- ret = it9137_set_tuner(state,
+ it9137_set_tuner(state,
p->bandwidth_hz, p->frequency);
break;
default:
@@ -742,9 +740,9 @@ static int it913x_fe_set_frontend(struct dvb_frontend *fe)
break;
}
/* LED off */
- ret = it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x0);
+ it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x0);
/* Trigger ofsm */
- ret = it913x_write_reg(state, PRO_DMOD, TRIGGER_OFSM, 0x0);
+ it913x_write_reg(state, PRO_DMOD, TRIGGER_OFSM, 0x0);
last_ch = 2;
for (i = 0; i < 40; ++i) {
empty_ch = it913x_read_reg_u8(state, EMPTY_CHANNEL_STATUS);
diff --git a/drivers/media/dvb/frontends/lg2160.c b/drivers/media/dvb/frontends/lg2160.c
new file mode 100644
index 000000000000..a3ab1a5b6597
--- /dev/null
+++ b/drivers/media/dvb/frontends/lg2160.c
@@ -0,0 +1,1468 @@
+/*
+ * Support for LG2160 - ATSC/MH
+ *
+ * Copyright (C) 2010 Michael Krufky <mkrufky@linuxtv.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/jiffies.h>
+#include <linux/dvb/frontend.h>
+#include "lg2160.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "set debug level (info=1, reg=2 (or-able))");
+
+#define DBG_INFO 1
+#define DBG_REG 2
+
+#define lg_printk(kern, fmt, arg...) \
+ printk(kern "%s: " fmt, __func__, ##arg)
+
+#define lg_info(fmt, arg...) printk(KERN_INFO "lg2160: " fmt, ##arg)
+#define lg_warn(fmt, arg...) lg_printk(KERN_WARNING, fmt, ##arg)
+#define lg_err(fmt, arg...) lg_printk(KERN_ERR, fmt, ##arg)
+#define lg_dbg(fmt, arg...) if (debug & DBG_INFO) \
+ lg_printk(KERN_DEBUG, fmt, ##arg)
+#define lg_reg(fmt, arg...) if (debug & DBG_REG) \
+ lg_printk(KERN_DEBUG, fmt, ##arg)
+
+#define lg_fail(ret) \
+({ \
+ int __ret; \
+ __ret = (ret < 0); \
+ if (__ret) \
+ lg_err("error %d on line %d\n", ret, __LINE__); \
+ __ret; \
+})
+
+struct lg216x_state {
+ struct i2c_adapter *i2c_adap;
+ const struct lg2160_config *cfg;
+
+ struct dvb_frontend frontend;
+
+ u32 current_frequency;
+ u8 parade_id;
+ u8 fic_ver;
+ unsigned int last_reset;
+};
+
+/* ------------------------------------------------------------------------ */
+
+static int lg216x_write_reg(struct lg216x_state *state, u16 reg, u8 val)
+{
+ int ret;
+ u8 buf[] = { reg >> 8, reg & 0xff, val };
+ struct i2c_msg msg = {
+ .addr = state->cfg->i2c_addr, .flags = 0,
+ .buf = buf, .len = 3,
+ };
+
+ lg_reg("reg: 0x%04x, val: 0x%02x\n", reg, val);
+
+ ret = i2c_transfer(state->i2c_adap, &msg, 1);
+
+ if (ret != 1) {
+ lg_err("error (addr %02x %02x <- %02x, err = %i)\n",
+ msg.buf[0], msg.buf[1], msg.buf[2], ret);
+ if (ret < 0)
+ return ret;
+ else
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int lg216x_read_reg(struct lg216x_state *state, u16 reg, u8 *val)
+{
+ int ret;
+ u8 reg_buf[] = { reg >> 8, reg & 0xff };
+ struct i2c_msg msg[] = {
+ { .addr = state->cfg->i2c_addr,
+ .flags = 0, .buf = reg_buf, .len = 2 },
+ { .addr = state->cfg->i2c_addr,
+ .flags = I2C_M_RD, .buf = val, .len = 1 },
+ };
+
+ lg_reg("reg: 0x%04x\n", reg);
+
+ ret = i2c_transfer(state->i2c_adap, msg, 2);
+
+ if (ret != 2) {
+ lg_err("error (addr %02x reg %04x error (ret == %i)\n",
+ state->cfg->i2c_addr, reg, ret);
+ if (ret < 0)
+ return ret;
+ else
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+struct lg216x_reg {
+ u16 reg;
+ u8 val;
+};
+
+static int lg216x_write_regs(struct lg216x_state *state,
+ struct lg216x_reg *regs, int len)
+{
+ int i, ret;
+
+ lg_reg("writing %d registers...\n", len);
+
+ for (i = 0; i < len - 1; i++) {
+ ret = lg216x_write_reg(state, regs[i].reg, regs[i].val);
+ if (lg_fail(ret))
+ return ret;
+ }
+ return 0;
+}
+
+static int lg216x_set_reg_bit(struct lg216x_state *state,
+ u16 reg, int bit, int onoff)
+{
+ u8 val;
+ int ret;
+
+ lg_reg("reg: 0x%04x, bit: %d, level: %d\n", reg, bit, onoff);
+
+ ret = lg216x_read_reg(state, reg, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= ~(1 << bit);
+ val |= (onoff & 1) << bit;
+
+ ret = lg216x_write_reg(state, reg, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lg216x_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct lg216x_state *state = fe->demodulator_priv;
+ int ret;
+
+ if (state->cfg->deny_i2c_rptr)
+ return 0;
+
+ lg_dbg("(%d)\n", enable);
+
+ ret = lg216x_set_reg_bit(state, 0x0000, 0, enable ? 0 : 1);
+
+ msleep(1);
+
+ return ret;
+}
+
+static int lg216x_soft_reset(struct lg216x_state *state)
+{
+ int ret;
+
+ lg_dbg("\n");
+
+ ret = lg216x_write_reg(state, 0x0002, 0x00);
+ if (lg_fail(ret))
+ goto fail;
+
+ msleep(20);
+ ret = lg216x_write_reg(state, 0x0002, 0x01);
+ if (lg_fail(ret))
+ goto fail;
+
+ state->last_reset = jiffies_to_msecs(jiffies);
+fail:
+ return ret;
+}
+
+static int lg216x_initialize(struct lg216x_state *state)
+{
+ int ret;
+
+ static struct lg216x_reg lg2160_init[] = {
+#if 0
+ { .reg = 0x0015, .val = 0xe6 },
+#else
+ { .reg = 0x0015, .val = 0xf7 },
+ { .reg = 0x001b, .val = 0x52 },
+ { .reg = 0x0208, .val = 0x00 },
+ { .reg = 0x0209, .val = 0x82 },
+ { .reg = 0x0210, .val = 0xf9 },
+ { .reg = 0x020a, .val = 0x00 },
+ { .reg = 0x020b, .val = 0x82 },
+ { .reg = 0x020d, .val = 0x28 },
+ { .reg = 0x020f, .val = 0x14 },
+#endif
+ };
+
+ static struct lg216x_reg lg2161_init[] = {
+ { .reg = 0x0000, .val = 0x41 },
+ { .reg = 0x0001, .val = 0xfb },
+ { .reg = 0x0216, .val = 0x00 },
+ { .reg = 0x0219, .val = 0x00 },
+ { .reg = 0x021b, .val = 0x55 },
+ { .reg = 0x0606, .val = 0x0a },
+ };
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg216x_write_regs(state,
+ lg2160_init, ARRAY_SIZE(lg2160_init));
+ break;
+ case LG2161:
+ ret = lg216x_write_regs(state,
+ lg2161_init, ARRAY_SIZE(lg2161_init));
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_soft_reset(state);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lg216x_set_if(struct lg216x_state *state)
+{
+ u8 val;
+ int ret;
+
+ lg_dbg("%d KHz\n", state->cfg->if_khz);
+
+ ret = lg216x_read_reg(state, 0x0132, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xfb;
+ val |= (0 == state->cfg->if_khz) ? 0x04 : 0x00;
+
+ ret = lg216x_write_reg(state, 0x0132, val);
+ lg_fail(ret);
+
+ /* if NOT zero IF, 6 MHz is the default */
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lg2160_agc_fix(struct lg216x_state *state,
+ int if_agc_fix, int rf_agc_fix)
+{
+ u8 val;
+ int ret;
+
+ ret = lg216x_read_reg(state, 0x0100, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xf3;
+ val |= (if_agc_fix) ? 0x08 : 0x00;
+ val |= (rf_agc_fix) ? 0x04 : 0x00;
+
+ ret = lg216x_write_reg(state, 0x0100, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+#if 0
+static int lg2160_agc_freeze(struct lg216x_state *state,
+ int if_agc_freeze, int rf_agc_freeze)
+{
+ u8 val;
+ int ret;
+
+ ret = lg216x_read_reg(state, 0x0100, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xcf;
+ val |= (if_agc_freeze) ? 0x20 : 0x00;
+ val |= (rf_agc_freeze) ? 0x10 : 0x00;
+
+ ret = lg216x_write_reg(state, 0x0100, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+#endif
+
+static int lg2160_agc_polarity(struct lg216x_state *state,
+ int if_agc_polarity, int rf_agc_polarity)
+{
+ u8 val;
+ int ret;
+
+ ret = lg216x_read_reg(state, 0x0100, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xfc;
+ val |= (if_agc_polarity) ? 0x02 : 0x00;
+ val |= (rf_agc_polarity) ? 0x01 : 0x00;
+
+ ret = lg216x_write_reg(state, 0x0100, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+static int lg2160_tuner_pwr_save_polarity(struct lg216x_state *state,
+ int polarity)
+{
+ u8 val;
+ int ret;
+
+ ret = lg216x_read_reg(state, 0x0008, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xfe;
+ val |= (polarity) ? 0x01 : 0x00;
+
+ ret = lg216x_write_reg(state, 0x0008, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+static int lg2160_spectrum_polarity(struct lg216x_state *state,
+ int inverted)
+{
+ u8 val;
+ int ret;
+
+ ret = lg216x_read_reg(state, 0x0132, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xfd;
+ val |= (inverted) ? 0x02 : 0x00;
+
+ ret = lg216x_write_reg(state, 0x0132, val);
+ lg_fail(ret);
+fail:
+ return lg216x_soft_reset(state);
+}
+
+static int lg2160_tuner_pwr_save(struct lg216x_state *state, int onoff)
+{
+ u8 val;
+ int ret;
+
+ ret = lg216x_read_reg(state, 0x0007, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xbf;
+ val |= (onoff) ? 0x40 : 0x00;
+
+ ret = lg216x_write_reg(state, 0x0007, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+static int lg216x_set_parade(struct lg216x_state *state, int id)
+{
+ int ret;
+
+ ret = lg216x_write_reg(state, 0x013e, id & 0x7f);
+ if (lg_fail(ret))
+ goto fail;
+
+ state->parade_id = id & 0x7f;
+fail:
+ return ret;
+}
+
+static int lg216x_set_ensemble(struct lg216x_state *state, int id)
+{
+ int ret;
+ u16 reg;
+ u8 val;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ reg = 0x0400;
+ break;
+ case LG2161:
+ default:
+ reg = 0x0500;
+ break;
+ }
+
+ ret = lg216x_read_reg(state, reg, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xfe;
+ val |= (id) ? 0x01 : 0x00;
+
+ ret = lg216x_write_reg(state, reg, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+static int lg2160_set_spi_clock(struct lg216x_state *state)
+{
+ u8 val;
+ int ret;
+
+ ret = lg216x_read_reg(state, 0x0014, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xf3;
+ val |= (state->cfg->spi_clock << 2);
+
+ ret = lg216x_write_reg(state, 0x0014, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+static int lg2161_set_output_interface(struct lg216x_state *state)
+{
+ u8 val;
+ int ret;
+
+ ret = lg216x_read_reg(state, 0x0014, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= ~0x07;
+ val |= state->cfg->output_if; /* FIXME: needs sanity check */
+
+ ret = lg216x_write_reg(state, 0x0014, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+static int lg216x_enable_fic(struct lg216x_state *state, int onoff)
+{
+ int ret;
+
+ ret = lg216x_write_reg(state, 0x0017, 0x23);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_write_reg(state, 0x0016, 0xfc);
+ if (lg_fail(ret))
+ goto fail;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg216x_write_reg(state, 0x0016,
+ 0xfc | ((onoff) ? 0x02 : 0x00));
+ break;
+ case LG2161:
+ ret = lg216x_write_reg(state, 0x0016, (onoff) ? 0x10 : 0x00);
+ break;
+ }
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_initialize(state);
+ if (lg_fail(ret))
+ goto fail;
+
+ if (onoff) {
+ ret = lg216x_write_reg(state, 0x0017, 0x03);
+ lg_fail(ret);
+ }
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lg216x_get_fic_version(struct lg216x_state *state, u8 *ficver)
+{
+ u8 val;
+ int ret;
+
+ *ficver = 0xff; /* invalid value */
+
+ ret = lg216x_read_reg(state, 0x0128, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *ficver = (val >> 3) & 0x1f;
+fail:
+ return ret;
+}
+
+#if 0
+static int lg2160_get_parade_id(struct lg216x_state *state, u8 *id)
+{
+ u8 val;
+ int ret;
+
+ *id = 0xff; /* invalid value */
+
+ ret = lg216x_read_reg(state, 0x0123, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *id = val & 0x7f;
+fail:
+ return ret;
+}
+#endif
+
+static int lg216x_get_nog(struct lg216x_state *state, u8 *nog)
+{
+ u8 val;
+ int ret;
+
+ *nog = 0xff; /* invalid value */
+
+ ret = lg216x_read_reg(state, 0x0124, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *nog = ((val >> 4) & 0x07) + 1;
+fail:
+ return ret;
+}
+
+static int lg216x_get_tnog(struct lg216x_state *state, u8 *tnog)
+{
+ u8 val;
+ int ret;
+
+ *tnog = 0xff; /* invalid value */
+
+ ret = lg216x_read_reg(state, 0x0125, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *tnog = val & 0x1f;
+fail:
+ return ret;
+}
+
+static int lg216x_get_sgn(struct lg216x_state *state, u8 *sgn)
+{
+ u8 val;
+ int ret;
+
+ *sgn = 0xff; /* invalid value */
+
+ ret = lg216x_read_reg(state, 0x0124, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *sgn = val & 0x0f;
+fail:
+ return ret;
+}
+
+static int lg216x_get_prc(struct lg216x_state *state, u8 *prc)
+{
+ u8 val;
+ int ret;
+
+ *prc = 0xff; /* invalid value */
+
+ ret = lg216x_read_reg(state, 0x0125, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *prc = ((val >> 5) & 0x07) + 1;
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lg216x_get_rs_frame_mode(struct lg216x_state *state,
+ enum atscmh_rs_frame_mode *rs_framemode)
+{
+ u8 val;
+ int ret;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg216x_read_reg(state, 0x0410, &val);
+ break;
+ case LG2161:
+ ret = lg216x_read_reg(state, 0x0513, &val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (lg_fail(ret))
+ goto fail;
+
+ switch ((val >> 4) & 0x03) {
+#if 1
+ default:
+#endif
+ case 0x00:
+ *rs_framemode = ATSCMH_RSFRAME_PRI_ONLY;
+ break;
+ case 0x01:
+ *rs_framemode = ATSCMH_RSFRAME_PRI_SEC;
+ break;
+#if 0
+ default:
+ *rs_framemode = ATSCMH_RSFRAME_RES;
+ break;
+#endif
+ }
+fail:
+ return ret;
+}
+
+static
+int lg216x_get_rs_frame_ensemble(struct lg216x_state *state,
+ enum atscmh_rs_frame_ensemble *rs_frame_ens)
+{
+ u8 val;
+ int ret;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg216x_read_reg(state, 0x0400, &val);
+ break;
+ case LG2161:
+ ret = lg216x_read_reg(state, 0x0500, &val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0x01;
+ *rs_frame_ens = (enum atscmh_rs_frame_ensemble) val;
+fail:
+ return ret;
+}
+
+static int lg216x_get_rs_code_mode(struct lg216x_state *state,
+ enum atscmh_rs_code_mode *rs_code_pri,
+ enum atscmh_rs_code_mode *rs_code_sec)
+{
+ u8 val;
+ int ret;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg216x_read_reg(state, 0x0410, &val);
+ break;
+ case LG2161:
+ ret = lg216x_read_reg(state, 0x0513, &val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (lg_fail(ret))
+ goto fail;
+
+ *rs_code_pri = (enum atscmh_rs_code_mode) ((val >> 2) & 0x03);
+ *rs_code_sec = (enum atscmh_rs_code_mode) (val & 0x03);
+fail:
+ return ret;
+}
+
+static int lg216x_get_sccc_block_mode(struct lg216x_state *state,
+ enum atscmh_sccc_block_mode *sccc_block)
+{
+ u8 val;
+ int ret;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg216x_read_reg(state, 0x0315, &val);
+ break;
+ case LG2161:
+ ret = lg216x_read_reg(state, 0x0511, &val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (lg_fail(ret))
+ goto fail;
+
+ switch (val & 0x03) {
+ case 0x00:
+ *sccc_block = ATSCMH_SCCC_BLK_SEP;
+ break;
+ case 0x01:
+ *sccc_block = ATSCMH_SCCC_BLK_COMB;
+ break;
+ default:
+ *sccc_block = ATSCMH_SCCC_BLK_RES;
+ break;
+ }
+fail:
+ return ret;
+}
+
+static int lg216x_get_sccc_code_mode(struct lg216x_state *state,
+ enum atscmh_sccc_code_mode *mode_a,
+ enum atscmh_sccc_code_mode *mode_b,
+ enum atscmh_sccc_code_mode *mode_c,
+ enum atscmh_sccc_code_mode *mode_d)
+{
+ u8 val;
+ int ret;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg216x_read_reg(state, 0x0316, &val);
+ break;
+ case LG2161:
+ ret = lg216x_read_reg(state, 0x0512, &val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (lg_fail(ret))
+ goto fail;
+
+ switch ((val >> 6) & 0x03) {
+ case 0x00:
+ *mode_a = ATSCMH_SCCC_CODE_HLF;
+ break;
+ case 0x01:
+ *mode_a = ATSCMH_SCCC_CODE_QTR;
+ break;
+ default:
+ *mode_a = ATSCMH_SCCC_CODE_RES;
+ break;
+ }
+
+ switch ((val >> 4) & 0x03) {
+ case 0x00:
+ *mode_b = ATSCMH_SCCC_CODE_HLF;
+ break;
+ case 0x01:
+ *mode_b = ATSCMH_SCCC_CODE_QTR;
+ break;
+ default:
+ *mode_b = ATSCMH_SCCC_CODE_RES;
+ break;
+ }
+
+ switch ((val >> 2) & 0x03) {
+ case 0x00:
+ *mode_c = ATSCMH_SCCC_CODE_HLF;
+ break;
+ case 0x01:
+ *mode_c = ATSCMH_SCCC_CODE_QTR;
+ break;
+ default:
+ *mode_c = ATSCMH_SCCC_CODE_RES;
+ break;
+ }
+
+ switch (val & 0x03) {
+ case 0x00:
+ *mode_d = ATSCMH_SCCC_CODE_HLF;
+ break;
+ case 0x01:
+ *mode_d = ATSCMH_SCCC_CODE_QTR;
+ break;
+ default:
+ *mode_d = ATSCMH_SCCC_CODE_RES;
+ break;
+ }
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#if 0
+static int lg216x_read_fic_err_count(struct lg216x_state *state, u8 *err)
+{
+ u8 fic_err;
+ int ret;
+
+ *err = 0;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg216x_read_reg(state, 0x0012, &fic_err);
+ break;
+ case LG2161:
+ ret = lg216x_read_reg(state, 0x001e, &fic_err);
+ break;
+ }
+ if (lg_fail(ret))
+ goto fail;
+
+ *err = fic_err;
+fail:
+ return ret;
+}
+
+static int lg2160_read_crc_err_count(struct lg216x_state *state, u16 *err)
+{
+ u8 crc_err1, crc_err2;
+ int ret;
+
+ *err = 0;
+
+ ret = lg216x_read_reg(state, 0x0411, &crc_err1);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_read_reg(state, 0x0412, &crc_err2);
+ if (lg_fail(ret))
+ goto fail;
+
+ *err = (u16)(((crc_err2 & 0x0f) << 8) | crc_err1);
+fail:
+ return ret;
+}
+
+static int lg2161_read_crc_err_count(struct lg216x_state *state, u16 *err)
+{
+ u8 crc_err;
+ int ret;
+
+ *err = 0;
+
+ ret = lg216x_read_reg(state, 0x0612, &crc_err);
+ if (lg_fail(ret))
+ goto fail;
+
+ *err = (u16)crc_err;
+fail:
+ return ret;
+}
+
+static int lg216x_read_crc_err_count(struct lg216x_state *state, u16 *err)
+{
+ int ret;
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg2160_read_crc_err_count(state, err);
+ break;
+ case LG2161:
+ ret = lg2161_read_crc_err_count(state, err);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int lg2160_read_rs_err_count(struct lg216x_state *state, u16 *err)
+{
+ u8 rs_err1, rs_err2;
+ int ret;
+
+ *err = 0;
+
+ ret = lg216x_read_reg(state, 0x0413, &rs_err1);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_read_reg(state, 0x0414, &rs_err2);
+ if (lg_fail(ret))
+ goto fail;
+
+ *err = (u16)(((rs_err2 & 0x0f) << 8) | rs_err1);
+fail:
+ return ret;
+}
+
+static int lg2161_read_rs_err_count(struct lg216x_state *state, u16 *err)
+{
+ u8 rs_err1, rs_err2;
+ int ret;
+
+ *err = 0;
+
+ ret = lg216x_read_reg(state, 0x0613, &rs_err1);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_read_reg(state, 0x0614, &rs_err2);
+ if (lg_fail(ret))
+ goto fail;
+
+ *err = (u16)((rs_err1 << 8) | rs_err2);
+fail:
+ return ret;
+}
+
+static int lg216x_read_rs_err_count(struct lg216x_state *state, u16 *err)
+{
+ int ret;
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg2160_read_rs_err_count(state, err);
+ break;
+ case LG2161:
+ ret = lg2161_read_rs_err_count(state, err);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+static int lg216x_get_frontend(struct dvb_frontend *fe)
+{
+ struct lg216x_state *state = fe->demodulator_priv;
+ int ret;
+
+ lg_dbg("\n");
+
+ fe->dtv_property_cache.modulation = VSB_8;
+ fe->dtv_property_cache.frequency = state->current_frequency;
+ fe->dtv_property_cache.delivery_system = SYS_ATSCMH;
+
+ ret = lg216x_get_fic_version(state,
+ &fe->dtv_property_cache.atscmh_fic_ver);
+ if (lg_fail(ret))
+ goto fail;
+ if (state->fic_ver != fe->dtv_property_cache.atscmh_fic_ver) {
+ state->fic_ver = fe->dtv_property_cache.atscmh_fic_ver;
+
+#if 0
+ ret = lg2160_get_parade_id(state,
+ &fe->dtv_property_cache.atscmh_parade_id);
+ if (lg_fail(ret))
+ goto fail;
+/* #else */
+ fe->dtv_property_cache.atscmh_parade_id = state->parade_id;
+#endif
+ ret = lg216x_get_nog(state,
+ &fe->dtv_property_cache.atscmh_nog);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_get_tnog(state,
+ &fe->dtv_property_cache.atscmh_tnog);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_get_sgn(state,
+ &fe->dtv_property_cache.atscmh_sgn);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_get_prc(state,
+ &fe->dtv_property_cache.atscmh_prc);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_get_rs_frame_mode(state,
+ (enum atscmh_rs_frame_mode *)
+ &fe->dtv_property_cache.atscmh_rs_frame_mode);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_get_rs_frame_ensemble(state,
+ (enum atscmh_rs_frame_ensemble *)
+ &fe->dtv_property_cache.atscmh_rs_frame_ensemble);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_get_rs_code_mode(state,
+ (enum atscmh_rs_code_mode *)
+ &fe->dtv_property_cache.atscmh_rs_code_mode_pri,
+ (enum atscmh_rs_code_mode *)
+ &fe->dtv_property_cache.atscmh_rs_code_mode_sec);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_get_sccc_block_mode(state,
+ (enum atscmh_sccc_block_mode *)
+ &fe->dtv_property_cache.atscmh_sccc_block_mode);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_get_sccc_code_mode(state,
+ (enum atscmh_sccc_code_mode *)
+ &fe->dtv_property_cache.atscmh_sccc_code_mode_a,
+ (enum atscmh_sccc_code_mode *)
+ &fe->dtv_property_cache.atscmh_sccc_code_mode_b,
+ (enum atscmh_sccc_code_mode *)
+ &fe->dtv_property_cache.atscmh_sccc_code_mode_c,
+ (enum atscmh_sccc_code_mode *)
+ &fe->dtv_property_cache.atscmh_sccc_code_mode_d);
+ if (lg_fail(ret))
+ goto fail;
+ }
+#if 0
+ ret = lg216x_read_fic_err_count(state,
+ (u8 *)&fe->dtv_property_cache.atscmh_fic_err);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_read_crc_err_count(state,
+ &fe->dtv_property_cache.atscmh_crc_err);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_read_rs_err_count(state,
+ &fe->dtv_property_cache.atscmh_rs_err);
+ if (lg_fail(ret))
+ goto fail;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ if (((fe->dtv_property_cache.atscmh_rs_err >= 240) &&
+ (fe->dtv_property_cache.atscmh_crc_err >= 240)) &&
+ ((jiffies_to_msecs(jiffies) - state->last_reset) > 6000))
+ ret = lg216x_soft_reset(state);
+ break;
+ case LG2161:
+ /* no fix needed here (as far as we know) */
+ ret = 0;
+ break;
+ }
+ lg_fail(ret);
+#endif
+fail:
+ return ret;
+}
+
+static int lg216x_get_property(struct dvb_frontend *fe,
+ struct dtv_property *tvp)
+{
+ return (DTV_ATSCMH_FIC_VER == tvp->cmd) ?
+ lg216x_get_frontend(fe) : 0;
+}
+
+
+static int lg2160_set_frontend(struct dvb_frontend *fe)
+{
+ struct lg216x_state *state = fe->demodulator_priv;
+ int ret;
+
+ lg_dbg("(%d)\n", fe->dtv_property_cache.frequency);
+
+ if (fe->ops.tuner_ops.set_params) {
+ ret = fe->ops.tuner_ops.set_params(fe);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ if (lg_fail(ret))
+ goto fail;
+ state->current_frequency = fe->dtv_property_cache.frequency;
+ }
+
+ ret = lg2160_agc_fix(state, 0, 0);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg2160_agc_polarity(state, 0, 0);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg2160_tuner_pwr_save_polarity(state, 1);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_set_if(state);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg2160_spectrum_polarity(state, state->cfg->spectral_inversion);
+ if (lg_fail(ret))
+ goto fail;
+
+ /* be tuned before this point */
+ ret = lg216x_soft_reset(state);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg2160_tuner_pwr_save(state, 0);
+ if (lg_fail(ret))
+ goto fail;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg2160_set_spi_clock(state);
+ if (lg_fail(ret))
+ goto fail;
+ break;
+ case LG2161:
+ ret = lg2161_set_output_interface(state);
+ if (lg_fail(ret))
+ goto fail;
+ break;
+ }
+
+ ret = lg216x_set_parade(state, fe->dtv_property_cache.atscmh_parade_id);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_set_ensemble(state,
+ fe->dtv_property_cache.atscmh_rs_frame_ensemble);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_initialize(state);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_enable_fic(state, 1);
+ lg_fail(ret);
+
+ lg216x_get_frontend(fe);
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lg2160_read_lock_status(struct lg216x_state *state,
+ int *acq_lock, int *sync_lock)
+{
+ u8 val;
+ int ret;
+
+ *acq_lock = 0;
+ *sync_lock = 0;
+
+ ret = lg216x_read_reg(state, 0x011b, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *sync_lock = (val & 0x20) ? 0 : 1;
+ *acq_lock = (val & 0x40) ? 0 : 1;
+fail:
+ return ret;
+}
+
+#ifdef USE_LG2161_LOCK_BITS
+static int lg2161_read_lock_status(struct lg216x_state *state,
+ int *acq_lock, int *sync_lock)
+{
+ u8 val;
+ int ret;
+
+ *acq_lock = 0;
+ *sync_lock = 0;
+
+ ret = lg216x_read_reg(state, 0x0304, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *sync_lock = (val & 0x80) ? 0 : 1;
+
+ ret = lg216x_read_reg(state, 0x011b, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *acq_lock = (val & 0x40) ? 0 : 1;
+fail:
+ return ret;
+}
+#endif
+
+static int lg216x_read_lock_status(struct lg216x_state *state,
+ int *acq_lock, int *sync_lock)
+{
+#ifdef USE_LG2161_LOCK_BITS
+ int ret;
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg2160_read_lock_status(state, acq_lock, sync_lock);
+ break;
+ case LG2161:
+ ret = lg2161_read_lock_status(state, acq_lock, sync_lock);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+#else
+ return lg2160_read_lock_status(state, acq_lock, sync_lock);
+#endif
+}
+
+static int lg216x_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct lg216x_state *state = fe->demodulator_priv;
+ int ret, acq_lock, sync_lock;
+
+ *status = 0;
+
+ ret = lg216x_read_lock_status(state, &acq_lock, &sync_lock);
+ if (lg_fail(ret))
+ goto fail;
+
+ lg_dbg("%s%s\n",
+ acq_lock ? "SIGNALEXIST " : "",
+ sync_lock ? "SYNCLOCK" : "");
+
+ if (acq_lock)
+ *status |= FE_HAS_SIGNAL;
+ if (sync_lock)
+ *status |= FE_HAS_SYNC;
+
+ if (*status)
+ *status |= FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_LOCK;
+
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lg2160_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct lg216x_state *state = fe->demodulator_priv;
+ u8 snr1, snr2;
+ int ret;
+
+ *snr = 0;
+
+ ret = lg216x_read_reg(state, 0x0202, &snr1);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_read_reg(state, 0x0203, &snr2);
+ if (lg_fail(ret))
+ goto fail;
+
+ if ((snr1 == 0xba) || (snr2 == 0xdf))
+ *snr = 0;
+ else
+#if 1
+ *snr = ((snr1 >> 4) * 100) + ((snr1 & 0x0f) * 10) + (snr2 >> 4);
+#else /* BCD */
+ *snr = (snr2 | (snr1 << 8));
+#endif
+fail:
+ return ret;
+}
+
+static int lg2161_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct lg216x_state *state = fe->demodulator_priv;
+ u8 snr1, snr2;
+ int ret;
+
+ *snr = 0;
+
+ ret = lg216x_read_reg(state, 0x0302, &snr1);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_read_reg(state, 0x0303, &snr2);
+ if (lg_fail(ret))
+ goto fail;
+
+ if ((snr1 == 0xba) || (snr2 == 0xfd))
+ *snr = 0;
+ else
+
+ *snr = ((snr1 >> 4) * 100) + ((snr1 & 0x0f) * 10) + (snr2 & 0x0f);
+fail:
+ return ret;
+}
+
+static int lg216x_read_signal_strength(struct dvb_frontend *fe,
+ u16 *strength)
+{
+#if 0
+ /* borrowed from lgdt330x.c
+ *
+ * Calculate strength from SNR up to 35dB
+ * Even though the SNR can go higher than 35dB,
+ * there is some comfort factor in having a range of
+ * strong signals that can show at 100%
+ */
+ struct lg216x_state *state = fe->demodulator_priv;
+ u16 snr;
+ int ret;
+#endif
+ *strength = 0;
+#if 0
+ ret = fe->ops.read_snr(fe, &snr);
+ if (lg_fail(ret))
+ goto fail;
+ /* Rather than use the 8.8 value snr, use state->snr which is 8.24 */
+ /* scale the range 0 - 35*2^24 into 0 - 65535 */
+ if (state->snr >= 8960 * 0x10000)
+ *strength = 0xffff;
+ else
+ *strength = state->snr / 8960;
+fail:
+ return ret;
+#else
+ return 0;
+#endif
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lg216x_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+#if 0
+ struct lg216x_state *state = fe->demodulator_priv;
+ int ret;
+
+ ret = lg216x_read_rs_err_count(state,
+ &fe->dtv_property_cache.atscmh_rs_err);
+ if (lg_fail(ret))
+ goto fail;
+
+ *ucblocks = fe->dtv_property_cache.atscmh_rs_err;
+fail:
+#else
+ *ucblocks = 0;
+#endif
+ return 0;
+}
+
+static int lg216x_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings
+ *fe_tune_settings)
+{
+ fe_tune_settings->min_delay_ms = 500;
+ lg_dbg("\n");
+ return 0;
+}
+
+static void lg216x_release(struct dvb_frontend *fe)
+{
+ struct lg216x_state *state = fe->demodulator_priv;
+ lg_dbg("\n");
+ kfree(state);
+}
+
+static struct dvb_frontend_ops lg2160_ops = {
+ .delsys = { SYS_ATSCMH },
+ .info = {
+ .name = "LG Electronics LG2160 ATSC/MH Frontend",
+ .frequency_min = 54000000,
+ .frequency_max = 858000000,
+ .frequency_stepsize = 62500,
+ },
+ .i2c_gate_ctrl = lg216x_i2c_gate_ctrl,
+#if 0
+ .init = lg216x_init,
+ .sleep = lg216x_sleep,
+#endif
+ .get_property = lg216x_get_property,
+
+ .set_frontend = lg2160_set_frontend,
+ .get_frontend = lg216x_get_frontend,
+ .get_tune_settings = lg216x_get_tune_settings,
+ .read_status = lg216x_read_status,
+#if 0
+ .read_ber = lg216x_read_ber,
+#endif
+ .read_signal_strength = lg216x_read_signal_strength,
+ .read_snr = lg2160_read_snr,
+ .read_ucblocks = lg216x_read_ucblocks,
+ .release = lg216x_release,
+};
+
+static struct dvb_frontend_ops lg2161_ops = {
+ .delsys = { SYS_ATSCMH },
+ .info = {
+ .name = "LG Electronics LG2161 ATSC/MH Frontend",
+ .frequency_min = 54000000,
+ .frequency_max = 858000000,
+ .frequency_stepsize = 62500,
+ },
+ .i2c_gate_ctrl = lg216x_i2c_gate_ctrl,
+#if 0
+ .init = lg216x_init,
+ .sleep = lg216x_sleep,
+#endif
+ .get_property = lg216x_get_property,
+
+ .set_frontend = lg2160_set_frontend,
+ .get_frontend = lg216x_get_frontend,
+ .get_tune_settings = lg216x_get_tune_settings,
+ .read_status = lg216x_read_status,
+#if 0
+ .read_ber = lg216x_read_ber,
+#endif
+ .read_signal_strength = lg216x_read_signal_strength,
+ .read_snr = lg2161_read_snr,
+ .read_ucblocks = lg216x_read_ucblocks,
+ .release = lg216x_release,
+};
+
+struct dvb_frontend *lg2160_attach(const struct lg2160_config *config,
+ struct i2c_adapter *i2c_adap)
+{
+ struct lg216x_state *state = NULL;
+
+ lg_dbg("(%d-%04x)\n",
+ i2c_adap ? i2c_adapter_id(i2c_adap) : 0,
+ config ? config->i2c_addr : 0);
+
+ state = kzalloc(sizeof(struct lg216x_state), GFP_KERNEL);
+ if (state == NULL)
+ goto fail;
+
+ state->cfg = config;
+ state->i2c_adap = i2c_adap;
+ state->fic_ver = 0xff;
+ state->parade_id = 0xff;
+
+ switch (config->lg_chip) {
+ default:
+ lg_warn("invalid chip requested, defaulting to LG2160");
+ /* fall-thru */
+ case LG2160:
+ memcpy(&state->frontend.ops, &lg2160_ops,
+ sizeof(struct dvb_frontend_ops));
+ break;
+ case LG2161:
+ memcpy(&state->frontend.ops, &lg2161_ops,
+ sizeof(struct dvb_frontend_ops));
+ break;
+ }
+
+ state->frontend.demodulator_priv = state;
+ state->current_frequency = -1;
+ /* parade 1 by default */
+ state->frontend.dtv_property_cache.atscmh_parade_id = 1;
+
+ return &state->frontend;
+fail:
+ lg_warn("unable to detect LG216x hardware\n");
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(lg2160_attach);
+
+MODULE_DESCRIPTION("LG Electronics LG216x ATSC/MH Demodulator Driver");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.3");
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/dvb/frontends/lg2160.h b/drivers/media/dvb/frontends/lg2160.h
new file mode 100644
index 000000000000..9e2c0f41199a
--- /dev/null
+++ b/drivers/media/dvb/frontends/lg2160.h
@@ -0,0 +1,84 @@
+/*
+ * Support for LG2160 - ATSC/MH
+ *
+ * Copyright (C) 2010 Michael Krufky <mkrufky@linuxtv.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef _LG2160_H_
+#define _LG2160_H_
+
+#include <linux/i2c.h>
+#include "dvb_frontend.h"
+
+enum lg_chip_type {
+ LG2160 = 0,
+ LG2161 = 1,
+};
+
+#define LG2161_1019 LG2161
+#define LG2161_1040 LG2161
+
+enum lg2160_spi_clock {
+ LG2160_SPI_3_125_MHZ = 0,
+ LG2160_SPI_6_25_MHZ = 1,
+ LG2160_SPI_12_5_MHZ = 2,
+};
+
+#if 0
+enum lg2161_oif {
+ LG2161_OIF_EBI2_SLA = 1,
+ LG2161_OIF_SDIO_SLA = 2,
+ LG2161_OIF_SPI_SLA = 3,
+ LG2161_OIF_SPI_MAS = 4,
+ LG2161_OIF_SERIAL_TS = 7,
+};
+#endif
+
+struct lg2160_config {
+ u8 i2c_addr;
+
+ /* user defined IF frequency in KHz */
+ u16 if_khz;
+
+ /* disable i2c repeater - 0:repeater enabled 1:repeater disabled */
+ int deny_i2c_rptr:1;
+
+ /* spectral inversion - 0:disabled 1:enabled */
+ int spectral_inversion:1;
+
+ unsigned int output_if;
+ enum lg2160_spi_clock spi_clock;
+ enum lg_chip_type lg_chip;
+};
+
+#if defined(CONFIG_DVB_LG2160) || (defined(CONFIG_DVB_LG2160_MODULE) && \
+ defined(MODULE))
+extern
+struct dvb_frontend *lg2160_attach(const struct lg2160_config *config,
+ struct i2c_adapter *i2c_adap);
+#else
+static inline
+struct dvb_frontend *lg2160_attach(const struct lg2160_config *config,
+ struct i2c_adapter *i2c_adap)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif /* CONFIG_DVB_LG2160 */
+
+#endif /* _LG2160_H_ */
diff --git a/drivers/media/dvb/frontends/lgs8gxx.c b/drivers/media/dvb/frontends/lgs8gxx.c
index 4de1d3520cd2..568363a10a31 100644
--- a/drivers/media/dvb/frontends/lgs8gxx.c
+++ b/drivers/media/dvb/frontends/lgs8gxx.c
@@ -262,7 +262,6 @@ static int lgs8gxx_set_mode_auto(struct lgs8gxx_state *priv)
static int lgs8gxx_set_mode_manual(struct lgs8gxx_state *priv)
{
- int ret = 0;
u8 t;
if (priv->config->prod == LGS8GXX_PROD_LGS8G75) {
@@ -296,7 +295,7 @@ static int lgs8gxx_set_mode_manual(struct lgs8gxx_state *priv)
if (priv->config->prod == LGS8GXX_PROD_LGS8913)
lgs8gxx_write_reg(priv, 0xC1, 0);
- ret = lgs8gxx_read_reg(priv, 0xC5, &t);
+ lgs8gxx_read_reg(priv, 0xC5, &t);
t = (t & 0xE0) | 0x06;
lgs8gxx_write_reg(priv, 0xC5, t);
diff --git a/drivers/media/dvb/frontends/m88rs2000.c b/drivers/media/dvb/frontends/m88rs2000.c
index 045ee5a6f7ae..312588e84dae 100644
--- a/drivers/media/dvb/frontends/m88rs2000.c
+++ b/drivers/media/dvb/frontends/m88rs2000.c
@@ -416,9 +416,25 @@ static int m88rs2000_tab_set(struct m88rs2000_state *state,
static int m88rs2000_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t volt)
{
- deb_info("%s: %s\n", __func__,
- volt == SEC_VOLTAGE_13 ? "SEC_VOLTAGE_13" :
- volt == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" : "??");
+ struct m88rs2000_state *state = fe->demodulator_priv;
+ u8 data;
+
+ data = m88rs2000_demod_read(state, 0xb2);
+ data |= 0x03; /* bit0 V/H, bit1 off/on */
+
+ switch (volt) {
+ case SEC_VOLTAGE_18:
+ data &= ~0x03;
+ break;
+ case SEC_VOLTAGE_13:
+ data &= ~0x03;
+ data |= 0x01;
+ break;
+ case SEC_VOLTAGE_OFF:
+ break;
+ }
+
+ m88rs2000_demod_write(state, 0xb2, data);
return 0;
}
@@ -654,7 +670,6 @@ static int m88rs2000_set_tuner(struct dvb_frontend *fe, u16 *offset)
static int m88rs2000_set_fec(struct m88rs2000_state *state,
fe_code_rate_t fec)
{
- int ret;
u16 fec_set;
switch (fec) {
/* This is not confirmed kept for reference */
@@ -677,7 +692,7 @@ static int m88rs2000_set_fec(struct m88rs2000_state *state,
default:
fec_set = 0x08;
}
- ret = m88rs2000_demod_write(state, 0x76, fec_set);
+ m88rs2000_demod_write(state, 0x76, fec_set);
return 0;
}
@@ -772,13 +787,13 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
return -ENODEV;
for (i = 0; i < 25; i++) {
- u8 reg = m88rs2000_demod_read(state, 0x8c);
+ reg = m88rs2000_demod_read(state, 0x8c);
if ((reg & 0x7) == 0x7) {
status = FE_HAS_LOCK;
break;
}
state->no_lock_count++;
- if (state->no_lock_count > 15) {
+ if (state->no_lock_count == 15) {
reg = m88rs2000_demod_read(state, 0x70);
reg ^= 0x4;
m88rs2000_demod_write(state, 0x70, reg);
diff --git a/drivers/media/dvb/frontends/rtl2830.c b/drivers/media/dvb/frontends/rtl2830.c
index 45196c5b0736..93612ebac519 100644
--- a/drivers/media/dvb/frontends/rtl2830.c
+++ b/drivers/media/dvb/frontends/rtl2830.c
@@ -374,6 +374,118 @@ err:
return ret;
}
+static int rtl2830_get_frontend(struct dvb_frontend *fe)
+{
+ struct rtl2830_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ u8 buf[3];
+
+ if (priv->sleeping)
+ return 0;
+
+ ret = rtl2830_rd_regs(priv, 0x33c, buf, 2);
+ if (ret)
+ goto err;
+
+ ret = rtl2830_rd_reg(priv, 0x351, &buf[2]);
+ if (ret)
+ goto err;
+
+ dbg("%s: TPS=%02x %02x %02x", __func__, buf[0], buf[1], buf[2]);
+
+ switch ((buf[0] >> 2) & 3) {
+ case 0:
+ c->modulation = QPSK;
+ break;
+ case 1:
+ c->modulation = QAM_16;
+ break;
+ case 2:
+ c->modulation = QAM_64;
+ break;
+ }
+
+ switch ((buf[2] >> 2) & 1) {
+ case 0:
+ c->transmission_mode = TRANSMISSION_MODE_2K;
+ break;
+ case 1:
+ c->transmission_mode = TRANSMISSION_MODE_8K;
+ }
+
+ switch ((buf[2] >> 0) & 3) {
+ case 0:
+ c->guard_interval = GUARD_INTERVAL_1_32;
+ break;
+ case 1:
+ c->guard_interval = GUARD_INTERVAL_1_16;
+ break;
+ case 2:
+ c->guard_interval = GUARD_INTERVAL_1_8;
+ break;
+ case 3:
+ c->guard_interval = GUARD_INTERVAL_1_4;
+ break;
+ }
+
+ switch ((buf[0] >> 4) & 7) {
+ case 0:
+ c->hierarchy = HIERARCHY_NONE;
+ break;
+ case 1:
+ c->hierarchy = HIERARCHY_1;
+ break;
+ case 2:
+ c->hierarchy = HIERARCHY_2;
+ break;
+ case 3:
+ c->hierarchy = HIERARCHY_4;
+ break;
+ }
+
+ switch ((buf[1] >> 3) & 7) {
+ case 0:
+ c->code_rate_HP = FEC_1_2;
+ break;
+ case 1:
+ c->code_rate_HP = FEC_2_3;
+ break;
+ case 2:
+ c->code_rate_HP = FEC_3_4;
+ break;
+ case 3:
+ c->code_rate_HP = FEC_5_6;
+ break;
+ case 4:
+ c->code_rate_HP = FEC_7_8;
+ break;
+ }
+
+ switch ((buf[1] >> 0) & 7) {
+ case 0:
+ c->code_rate_LP = FEC_1_2;
+ break;
+ case 1:
+ c->code_rate_LP = FEC_2_3;
+ break;
+ case 2:
+ c->code_rate_LP = FEC_3_4;
+ break;
+ case 3:
+ c->code_rate_LP = FEC_5_6;
+ break;
+ case 4:
+ c->code_rate_LP = FEC_7_8;
+ break;
+ }
+
+ return 0;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
static int rtl2830_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct rtl2830_priv *priv = fe->demodulator_priv;
@@ -404,14 +516,72 @@ err:
static int rtl2830_read_snr(struct dvb_frontend *fe, u16 *snr)
{
- *snr = 0;
+ struct rtl2830_priv *priv = fe->demodulator_priv;
+ int ret, hierarchy, constellation;
+ u8 buf[2], tmp;
+ u16 tmp16;
+#define CONSTELLATION_NUM 3
+#define HIERARCHY_NUM 4
+ static const u32 snr_constant[CONSTELLATION_NUM][HIERARCHY_NUM] = {
+ { 70705899, 70705899, 70705899, 70705899 },
+ { 82433173, 82433173, 87483115, 94445660 },
+ { 92888734, 92888734, 95487525, 99770748 },
+ };
+
+ if (priv->sleeping)
+ return 0;
+
+ /* reports SNR in resolution of 0.1 dB */
+
+ ret = rtl2830_rd_reg(priv, 0x33c, &tmp);
+ if (ret)
+ goto err;
+
+ constellation = (tmp >> 2) & 0x03; /* [3:2] */
+ if (constellation > CONSTELLATION_NUM - 1)
+ goto err;
+
+ hierarchy = (tmp >> 4) & 0x07; /* [6:4] */
+ if (hierarchy > HIERARCHY_NUM - 1)
+ goto err;
+
+ ret = rtl2830_rd_regs(priv, 0x40c, buf, 2);
+ if (ret)
+ goto err;
+
+ tmp16 = buf[0] << 8 | buf[1];
+
+ if (tmp16)
+ *snr = (snr_constant[constellation][hierarchy] -
+ intlog10(tmp16)) / ((1 << 24) / 100);
+ else
+ *snr = 0;
+
return 0;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
}
static int rtl2830_read_ber(struct dvb_frontend *fe, u32 *ber)
{
- *ber = 0;
+ struct rtl2830_priv *priv = fe->demodulator_priv;
+ int ret;
+ u8 buf[2];
+
+ if (priv->sleeping)
+ return 0;
+
+ ret = rtl2830_rd_regs(priv, 0x34e, buf, 2);
+ if (ret)
+ goto err;
+
+ *ber = buf[0] << 8 | buf[1];
+
return 0;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
}
static int rtl2830_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
@@ -422,8 +592,32 @@ static int rtl2830_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
static int rtl2830_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
- *strength = 0;
+ struct rtl2830_priv *priv = fe->demodulator_priv;
+ int ret;
+ u8 buf[2];
+ u16 if_agc_raw, if_agc;
+
+ if (priv->sleeping)
+ return 0;
+
+ ret = rtl2830_rd_regs(priv, 0x359, buf, 2);
+ if (ret)
+ goto err;
+
+ if_agc_raw = (buf[0] << 8 | buf[1]) & 0x3fff;
+
+ if (if_agc_raw & (1 << 9))
+ if_agc = -(~(if_agc_raw - 1) & 0x1ff);
+ else
+ if_agc = if_agc_raw;
+
+ *strength = (u8) (55 - if_agc / 182);
+ *strength |= *strength << 8;
+
return 0;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
}
static struct dvb_frontend_ops rtl2830_ops;
@@ -549,6 +743,7 @@ static struct dvb_frontend_ops rtl2830_ops = {
.get_tune_settings = rtl2830_get_tune_settings,
.set_frontend = rtl2830_set_frontend,
+ .get_frontend = rtl2830_get_frontend,
.read_status = rtl2830_read_status,
.read_snr = rtl2830_read_snr,
diff --git a/drivers/media/dvb/frontends/rtl2830_priv.h b/drivers/media/dvb/frontends/rtl2830_priv.h
index 4a464761b5b8..9b20557ccf6c 100644
--- a/drivers/media/dvb/frontends/rtl2830_priv.h
+++ b/drivers/media/dvb/frontends/rtl2830_priv.h
@@ -22,6 +22,7 @@
#define RTL2830_PRIV_H
#include "dvb_frontend.h"
+#include "dvb_math.h"
#include "rtl2830.h"
#define LOG_PREFIX "rtl2830"
diff --git a/drivers/media/dvb/frontends/stb0899_drv.c b/drivers/media/dvb/frontends/stb0899_drv.c
index dd08f4ac64a8..8b0dc74a3298 100644
--- a/drivers/media/dvb/frontends/stb0899_drv.c
+++ b/drivers/media/dvb/frontends/stb0899_drv.c
@@ -637,11 +637,9 @@ static void stb0899_init_calc(struct stb0899_state *state)
struct stb0899_internal *internal = &state->internal;
int master_clk;
u8 agc[2];
- u8 agc1cn;
u32 reg;
/* Read registers (in burst mode) */
- agc1cn = stb0899_read_reg(state, STB0899_AGC1CN);
stb0899_read_regs(state, STB0899_AGC1REF, agc, 2); /* AGC1R and AGC2O */
/* Initial calculations */
@@ -823,15 +821,12 @@ static int stb0899_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t
static int stb0899_diseqc_init(struct stb0899_state *state)
{
- struct dvb_diseqc_master_cmd tx_data;
/*
struct dvb_diseqc_slave_reply rx_data;
*/
- u8 f22_tx, f22_rx, reg;
+ u8 f22_tx, reg;
u32 mclk, tx_freq = 22000;/* count = 0, i; */
- tx_data.msg[0] = 0xe2;
- tx_data.msg_len = 3;
reg = stb0899_read_reg(state, STB0899_DISCNTRL2);
STB0899_SETFIELD_VAL(ONECHIP_TRX, reg, 0);
stb0899_write_reg(state, STB0899_DISCNTRL2, reg);
@@ -849,7 +844,6 @@ static int stb0899_diseqc_init(struct stb0899_state *state)
f22_tx = mclk / (tx_freq * 32);
stb0899_write_reg(state, STB0899_DISF22, f22_tx); /* DiSEqC Tx freq */
state->rx_freq = 20000;
- f22_rx = mclk / (state->rx_freq * 32);
return 0;
}
diff --git a/drivers/media/dvb/frontends/stb6100.c b/drivers/media/dvb/frontends/stb6100.c
index def88abb30bf..2e93e65d2cdb 100644
--- a/drivers/media/dvb/frontends/stb6100.c
+++ b/drivers/media/dvb/frontends/stb6100.c
@@ -158,7 +158,6 @@ static int stb6100_read_regs(struct stb6100_state *state, u8 regs[])
static int stb6100_read_reg(struct stb6100_state *state, u8 reg)
{
u8 regs[STB6100_NUMREGS];
- int rc;
struct i2c_msg msg = {
.addr = state->config->tuner_address + reg,
@@ -167,7 +166,7 @@ static int stb6100_read_reg(struct stb6100_state *state, u8 reg)
.len = 1
};
- rc = i2c_transfer(state->i2c, &msg, 1);
+ i2c_transfer(state->i2c, &msg, 1);
if (unlikely(reg >= STB6100_NUMREGS)) {
dprintk(verbose, FE_ERROR, 1, "Invalid register offset 0x%x", reg);
diff --git a/drivers/media/dvb/frontends/stv0297.c b/drivers/media/dvb/frontends/stv0297.c
index 85c157a1fe5e..d40f226160ef 100644
--- a/drivers/media/dvb/frontends/stv0297.c
+++ b/drivers/media/dvb/frontends/stv0297.c
@@ -414,7 +414,6 @@ static int stv0297_set_frontend(struct dvb_frontend *fe)
int delay;
int sweeprate;
int carrieroffset;
- unsigned long starttime;
unsigned long timeout;
fe_spectral_inversion_t inversion;
@@ -543,7 +542,6 @@ static int stv0297_set_frontend(struct dvb_frontend *fe)
stv0297_writereg_mask(state, 0x43, 0x10, 0x10);
/* wait for WGAGC lock */
- starttime = jiffies;
timeout = jiffies + msecs_to_jiffies(2000);
while (time_before(jiffies, timeout)) {
msleep(10);
diff --git a/drivers/media/dvb/frontends/stv0900_sw.c b/drivers/media/dvb/frontends/stv0900_sw.c
index ba0709b2d433..4af20780fb9c 100644
--- a/drivers/media/dvb/frontends/stv0900_sw.c
+++ b/drivers/media/dvb/frontends/stv0900_sw.c
@@ -835,7 +835,6 @@ static void stv0900_track_optimization(struct dvb_frontend *fe)
blind_tun_sw = 0,
modulation;
- enum fe_stv0900_rolloff rolloff;
enum fe_stv0900_modcode foundModcod;
dprintk("%s\n", __func__);
@@ -940,7 +939,6 @@ static void stv0900_track_optimization(struct dvb_frontend *fe)
freq1 = stv0900_read_reg(intp, CFR2);
freq0 = stv0900_read_reg(intp, CFR1);
- rolloff = stv0900_get_bits(intp, ROLLOFF_STATUS);
if (intp->srch_algo[demod] == STV0900_BLIND_SEARCH) {
stv0900_write_reg(intp, SFRSTEP, 0x00);
stv0900_write_bits(intp, SCAN_ENABLE, 0);
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
index 4aef1877ed42..d79e69f65cbb 100644
--- a/drivers/media/dvb/frontends/stv090x.c
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -2842,7 +2842,6 @@ static int stv090x_optimize_track(struct stv090x_state *state)
{
struct dvb_frontend *fe = &state->frontend;
- enum stv090x_rolloff rolloff;
enum stv090x_modcod modcod;
s32 srate, pilots, aclc, f_1, f_0, i = 0, blind_tune = 0;
@@ -2966,7 +2965,6 @@ static int stv090x_optimize_track(struct stv090x_state *state)
f_1 = STV090x_READ_DEMOD(state, CFR2);
f_0 = STV090x_READ_DEMOD(state, CFR1);
reg = STV090x_READ_DEMOD(state, TMGOBS);
- rolloff = STV090x_GETFIELD_Px(reg, ROLLOFF_STATUS_FIELD);
if (state->algo == STV090x_BLIND_SEARCH) {
STV090x_WRITE_DEMOD(state, SFRSTEP, 0x00);
diff --git a/drivers/media/dvb/frontends/zl10353.c b/drivers/media/dvb/frontends/zl10353.c
index ac7237891374..82946cd517f5 100644
--- a/drivers/media/dvb/frontends/zl10353.c
+++ b/drivers/media/dvb/frontends/zl10353.c
@@ -525,7 +525,7 @@ static int zl10353_read_snr(struct dvb_frontend *fe, u16 *snr)
zl10353_dump_regs(fe);
_snr = zl10353_read_register(state, SNR);
- *snr = (_snr << 8) | _snr;
+ *snr = 10 * _snr / 8;
return 0;
}
@@ -559,7 +559,6 @@ static int zl10353_init(struct dvb_frontend *fe)
{
struct zl10353_state *state = fe->demodulator_priv;
u8 zl10353_reset_attach[6] = { 0x50, 0x03, 0x64, 0x46, 0x15, 0x0F };
- int rc = 0;
if (debug_regs)
zl10353_dump_regs(fe);
@@ -573,7 +572,7 @@ static int zl10353_init(struct dvb_frontend *fe)
/* Do a "hard" reset if not already done */
if (zl10353_read_register(state, 0x50) != zl10353_reset_attach[1] ||
zl10353_read_register(state, 0x51) != zl10353_reset_attach[2]) {
- rc = zl10353_write(fe, zl10353_reset_attach,
+ zl10353_write(fe, zl10353_reset_attach,
sizeof(zl10353_reset_attach));
if (debug_regs)
zl10353_dump_regs(fe);
diff --git a/drivers/media/dvb/mantis/hopper_cards.c b/drivers/media/dvb/mantis/hopper_cards.c
index 71622f65c037..cc0251e01077 100644
--- a/drivers/media/dvb/mantis/hopper_cards.c
+++ b/drivers/media/dvb/mantis/hopper_cards.c
@@ -65,7 +65,7 @@ static int devs;
static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
{
- u32 stat = 0, mask = 0, lstat = 0;
+ u32 stat = 0, mask = 0;
u32 rst_stat = 0, rst_mask = 0;
struct mantis_pci *mantis;
@@ -80,7 +80,6 @@ static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
stat = mmread(MANTIS_INT_STAT);
mask = mmread(MANTIS_INT_MASK);
- lstat = stat & ~MANTIS_INT_RISCSTAT;
if (!(stat & mask))
return IRQ_NONE;
diff --git a/drivers/media/dvb/mantis/mantis_cards.c b/drivers/media/dvb/mantis/mantis_cards.c
index c2bb90b3e529..095cf3a994e2 100644
--- a/drivers/media/dvb/mantis/mantis_cards.c
+++ b/drivers/media/dvb/mantis/mantis_cards.c
@@ -73,7 +73,7 @@ static char *label[10] = {
static irqreturn_t mantis_irq_handler(int irq, void *dev_id)
{
- u32 stat = 0, mask = 0, lstat = 0;
+ u32 stat = 0, mask = 0;
u32 rst_stat = 0, rst_mask = 0;
struct mantis_pci *mantis;
@@ -88,7 +88,6 @@ static irqreturn_t mantis_irq_handler(int irq, void *dev_id)
stat = mmread(MANTIS_INT_STAT);
mask = mmread(MANTIS_INT_MASK);
- lstat = stat & ~MANTIS_INT_RISCSTAT;
if (!(stat & mask))
return IRQ_NONE;
diff --git a/drivers/media/dvb/mantis/mantis_dma.c b/drivers/media/dvb/mantis/mantis_dma.c
index c61ca7d3daea..566c407175a4 100644
--- a/drivers/media/dvb/mantis/mantis_dma.c
+++ b/drivers/media/dvb/mantis/mantis_dma.c
@@ -199,10 +199,6 @@ void mantis_dma_start(struct mantis_pci *mantis)
void mantis_dma_stop(struct mantis_pci *mantis)
{
- u32 stat = 0, mask = 0;
-
- stat = mmread(MANTIS_INT_STAT);
- mask = mmread(MANTIS_INT_MASK);
dprintk(MANTIS_DEBUG, 1, "Mantis Stop DMA engine");
mmwrite((mmread(MANTIS_GPIF_ADDR) & (~(MANTIS_GPIF_HIFRDWRN))), MANTIS_GPIF_ADDR);
diff --git a/drivers/media/dvb/mantis/mantis_evm.c b/drivers/media/dvb/mantis/mantis_evm.c
index 36f2256ebb0e..71ce52875c38 100644
--- a/drivers/media/dvb/mantis/mantis_evm.c
+++ b/drivers/media/dvb/mantis/mantis_evm.c
@@ -41,10 +41,9 @@ static void mantis_hifevm_work(struct work_struct *work)
struct mantis_ca *ca = container_of(work, struct mantis_ca, hif_evm_work);
struct mantis_pci *mantis = ca->ca_priv;
- u32 gpif_stat, gpif_mask;
+ u32 gpif_stat;
gpif_stat = mmread(MANTIS_GPIF_STATUS);
- gpif_mask = mmread(MANTIS_GPIF_IRQCFG);
if (gpif_stat & MANTIS_GPIF_DETSTAT) {
if (gpif_stat & MANTIS_CARD_PLUGIN) {
diff --git a/drivers/media/dvb/ngene/ngene-core.c b/drivers/media/dvb/ngene/ngene-core.c
index f129a9303f80..39857384af10 100644
--- a/drivers/media/dvb/ngene/ngene-core.c
+++ b/drivers/media/dvb/ngene/ngene-core.c
@@ -1409,10 +1409,8 @@ static int ngene_start(struct ngene *dev)
if (stat < 0)
goto fail;
- if (!stat)
- return stat;
+ return 0;
- /* otherwise error: fall through */
fail:
ngwritel(0, NGENE_INT_ENABLE);
free_irq(dev->pci_dev->irq, dev);
diff --git a/drivers/media/dvb/pluto2/pluto2.c b/drivers/media/dvb/pluto2/pluto2.c
index e1f20c236989..f148b19a206a 100644
--- a/drivers/media/dvb/pluto2/pluto2.c
+++ b/drivers/media/dvb/pluto2/pluto2.c
@@ -481,14 +481,6 @@ static int lg_tdtpe001p_tuner_set_params(struct dvb_frontend *fe)
if (p->bandwidth_hz == 8000000)
buf[3] |= 0x08;
- if (sizeof(buf) == 6) {
- buf[4] = buf[2];
- buf[4] &= ~0x1c;
- buf[4] |= 0x18;
-
- buf[5] = (0 << 7) | (2 << 4);
- }
-
msg.addr = I2C_ADDR_TUA6034 >> 1;
msg.flags = 0;
msg.buf = buf;
diff --git a/drivers/media/dvb/siano/smssdio.c b/drivers/media/dvb/siano/smssdio.c
index 91f8c8291e2b..d6f3f100699a 100644
--- a/drivers/media/dvb/siano/smssdio.c
+++ b/drivers/media/dvb/siano/smssdio.c
@@ -114,7 +114,7 @@ out:
static void smssdio_interrupt(struct sdio_func *func)
{
- int ret, isr;
+ int ret;
struct smssdio_device *smsdev;
struct smscore_buffer_t *cb;
@@ -127,7 +127,7 @@ static void smssdio_interrupt(struct sdio_func *func)
* The interrupt register has no defined meaning. It is just
* a way of turning of the level triggered interrupt.
*/
- isr = sdio_readb(func, SMSSDIO_INT, &ret);
+ (void)sdio_readb(func, SMSSDIO_INT, &ret);
if (ret) {
sms_err("Unable to read interrupt register!\n");
return;
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index b1fe5137df09..63c004a25e0b 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -542,6 +542,8 @@ static const struct usb_device_id smsusb_id_table[] __devinitconst = {
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ USB_DEVICE(0x2040, 0xc090),
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { USB_DEVICE(0x2040, 0xc0a0),
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ } /* Terminating entry */
};
diff --git a/drivers/media/dvb/ttpci/av7110_v4l.c b/drivers/media/dvb/ttpci/av7110_v4l.c
index ee8ee1d481fa..1b2d15140a1d 100644
--- a/drivers/media/dvb/ttpci/av7110_v4l.c
+++ b/drivers/media/dvb/ttpci/av7110_v4l.c
@@ -107,7 +107,7 @@ static struct v4l2_input inputs[4] = {
.index = 1,
.name = "Television",
.type = V4L2_INPUT_TYPE_TUNER,
- .audioset = 2,
+ .audioset = 1,
.tuner = 0,
.std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M,
.status = 0,
@@ -494,7 +494,7 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
dprintk(2, "VIDIOC_S_INPUT: %d\n", input);
if (!av7110->analog_tuner_flags)
- return 0;
+ return input ? -EINVAL : 0;
if (input >= 4)
return -EINVAL;
@@ -503,19 +503,38 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
return av7110_dvb_c_switch(fh);
}
+static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a)
+{
+ dprintk(2, "VIDIOC_G_AUDIO: %d\n", a->index);
+ if (a->index != 0)
+ return -EINVAL;
+ *a = msp3400_v4l2_audio;
+ return 0;
+}
+
static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
+ struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+
dprintk(2, "VIDIOC_G_AUDIO: %d\n", a->index);
if (a->index != 0)
return -EINVAL;
- memcpy(a, &msp3400_v4l2_audio, sizeof(struct v4l2_audio));
+ if (av7110->current_input >= 2)
+ return -EINVAL;
+ *a = msp3400_v4l2_audio;
return 0;
}
static int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
+ struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+
dprintk(2, "VIDIOC_S_AUDIO: %d\n", a->index);
- return 0;
+ if (av7110->current_input >= 2)
+ return -EINVAL;
+ return a->index ? -EINVAL : 0;
}
static int vidioc_g_sliced_vbi_cap(struct file *file, void *fh,
@@ -802,26 +821,39 @@ int av7110_init_v4l(struct av7110 *av7110)
ERR("cannot init capture device. skipping\n");
return -ENODEV;
}
- vv_data->ops.vidioc_enum_input = vidioc_enum_input;
- vv_data->ops.vidioc_g_input = vidioc_g_input;
- vv_data->ops.vidioc_s_input = vidioc_s_input;
- vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
- vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
- vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
- vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
- vv_data->ops.vidioc_g_audio = vidioc_g_audio;
- vv_data->ops.vidioc_s_audio = vidioc_s_audio;
- vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
- vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
- vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
+ vv_data->vid_ops.vidioc_enum_input = vidioc_enum_input;
+ vv_data->vid_ops.vidioc_g_input = vidioc_g_input;
+ vv_data->vid_ops.vidioc_s_input = vidioc_s_input;
+ vv_data->vid_ops.vidioc_g_tuner = vidioc_g_tuner;
+ vv_data->vid_ops.vidioc_s_tuner = vidioc_s_tuner;
+ vv_data->vid_ops.vidioc_g_frequency = vidioc_g_frequency;
+ vv_data->vid_ops.vidioc_s_frequency = vidioc_s_frequency;
+ vv_data->vid_ops.vidioc_enumaudio = vidioc_enumaudio;
+ vv_data->vid_ops.vidioc_g_audio = vidioc_g_audio;
+ vv_data->vid_ops.vidioc_s_audio = vidioc_s_audio;
+ vv_data->vid_ops.vidioc_g_fmt_vbi_cap = NULL;
+
+ vv_data->vbi_ops.vidioc_g_tuner = vidioc_g_tuner;
+ vv_data->vbi_ops.vidioc_s_tuner = vidioc_s_tuner;
+ vv_data->vbi_ops.vidioc_g_frequency = vidioc_g_frequency;
+ vv_data->vbi_ops.vidioc_s_frequency = vidioc_s_frequency;
+ vv_data->vbi_ops.vidioc_g_fmt_vbi_cap = NULL;
+ vv_data->vbi_ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
+ vv_data->vbi_ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
+ vv_data->vbi_ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
+
+ if (FW_VERSION(av7110->arm_app) < 0x2623)
+ vv_data->capabilities &= ~V4L2_CAP_SLICED_VBI_OUTPUT;
if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_GRABBER)) {
ERR("cannot register capture device. skipping\n");
saa7146_vv_release(dev);
return -ENODEV;
}
- if (saa7146_register_device(&av7110->vbi_dev, dev, "av7110", VFL_TYPE_VBI))
- ERR("cannot register vbi v4l2 device. skipping\n");
+ if (FW_VERSION(av7110->arm_app) >= 0x2623) {
+ if (saa7146_register_device(&av7110->vbi_dev, dev, "av7110", VFL_TYPE_VBI))
+ ERR("cannot register vbi v4l2 device. skipping\n");
+ }
return 0;
}
@@ -905,7 +937,7 @@ static int std_callback(struct saa7146_dev* dev, struct saa7146_standard *std)
static struct saa7146_ext_vv av7110_vv_data_st = {
.inputs = 1,
.audios = 1,
- .capabilities = V4L2_CAP_SLICED_VBI_OUTPUT,
+ .capabilities = V4L2_CAP_SLICED_VBI_OUTPUT | V4L2_CAP_AUDIO,
.flags = 0,
.stds = &standard[0],
@@ -920,7 +952,7 @@ static struct saa7146_ext_vv av7110_vv_data_st = {
static struct saa7146_ext_vv av7110_vv_data_c = {
.inputs = 1,
.audios = 1,
- .capabilities = V4L2_CAP_TUNER | V4L2_CAP_SLICED_VBI_OUTPUT,
+ .capabilities = V4L2_CAP_TUNER | V4L2_CAP_SLICED_VBI_OUTPUT | V4L2_CAP_AUDIO,
.flags = SAA7146_USE_PORT_B_FOR_VBI,
.stds = &standard[0],
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index 8b32e282bf5d..12ddb53c58dc 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -1483,9 +1483,9 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
ERR("cannot init vv subsystem\n");
return err;
}
- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
- vv_data.ops.vidioc_g_input = vidioc_g_input;
- vv_data.ops.vidioc_s_input = vidioc_s_input;
+ vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
+ vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
+ vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_GRABBER))) {
/* fixme: proper cleanup here */
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 056138f63c7d..e1cd13283407 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -214,23 +214,76 @@ EXPORT_SYMBOL_GPL(media_entity_graph_walk_next);
* pipeline pointer must be identical for all nested calls to
* media_entity_pipeline_start().
*/
-void media_entity_pipeline_start(struct media_entity *entity,
- struct media_pipeline *pipe)
+__must_check int media_entity_pipeline_start(struct media_entity *entity,
+ struct media_pipeline *pipe)
{
struct media_device *mdev = entity->parent;
struct media_entity_graph graph;
+ struct media_entity *entity_err = entity;
+ int ret;
mutex_lock(&mdev->graph_mutex);
media_entity_graph_walk_start(&graph, entity);
while ((entity = media_entity_graph_walk_next(&graph))) {
+ unsigned int i;
+
entity->stream_count++;
WARN_ON(entity->pipe && entity->pipe != pipe);
entity->pipe = pipe;
+
+ /* Already streaming --- no need to check. */
+ if (entity->stream_count > 1)
+ continue;
+
+ if (!entity->ops || !entity->ops->link_validate)
+ continue;
+
+ for (i = 0; i < entity->num_links; i++) {
+ struct media_link *link = &entity->links[i];
+
+ /* Is this pad part of an enabled link? */
+ if (!(link->flags & MEDIA_LNK_FL_ENABLED))
+ continue;
+
+ /* Are we the sink or not? */
+ if (link->sink->entity != entity)
+ continue;
+
+ ret = entity->ops->link_validate(link);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto error;
+ }
}
mutex_unlock(&mdev->graph_mutex);
+
+ return 0;
+
+error:
+ /*
+ * Link validation on graph failed. We revert what we did and
+ * return the error.
+ */
+ media_entity_graph_walk_start(&graph, entity_err);
+
+ while ((entity_err = media_entity_graph_walk_next(&graph))) {
+ entity_err->stream_count--;
+ if (entity_err->stream_count == 0)
+ entity_err->pipe = NULL;
+
+ /*
+ * We haven't increased stream_count further than this
+ * so we quit here.
+ */
+ if (entity_err == entity)
+ break;
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(media_entity_pipeline_start);
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 8db2d7f4b52a..c257da13d766 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -320,7 +320,7 @@ config RADIO_MIROPCM20
module will be called radio-miropcm20.
config RADIO_SF16FMI
- tristate "SF16-FMI/SF16-FMP Radio"
+ tristate "SF16-FMI/SF16-FMP/SF16-FMD Radio"
depends on ISA && VIDEO_V4L2
---help---
Choose Y here if you have one of these FM radio cards.
@@ -329,7 +329,7 @@ config RADIO_SF16FMI
module will be called radio-sf16fmi.
config RADIO_SF16FMR2
- tristate "SF16FMR2 Radio"
+ tristate "SF16-FMR2/SF16-FMD2 Radio"
depends on ISA && VIDEO_V4L2 && SND
---help---
Choose Y here if you have one of these FM radio cards.
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index f36905b63645..63b112b555b2 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -1,92 +1,37 @@
/* A driver for the D-Link DSB-R100 USB radio and Gemtek USB Radio 21.
- The device plugs into both the USB and an analog audio input, so this thing
- only deals with initialisation and frequency setting, the
- audio data has to be handled by a sound driver.
-
- Major issue: I can't find out where the device reports the signal
- strength, and indeed the windows software appearantly just looks
- at the stereo indicator as well. So, scanning will only find
- stereo stations. Sad, but I can't help it.
-
- Also, the windows program sends oodles of messages over to the
- device, and I couldn't figure out their meaning. My suspicion
- is that they don't have any:-)
-
- You might find some interesting stuff about this module at
- http://unimut.fsk.uni-heidelberg.de/unimut/demi/dsbr
-
- Copyright (c) 2000 Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- History:
-
- Version 0.46:
- Removed usb_dsbr100_open/close calls and radio->users counter. Also,
- radio->muted changed to radio->status and suspend/resume calls updated.
-
- Version 0.45:
- Converted to v4l2_device.
-
- Version 0.44:
- Add suspend/resume functions, fix unplug of device,
- a lot of cleanups and fixes by Alexey Klimov <klimov.linux@gmail.com>
-
- Version 0.43:
- Oliver Neukum: avoided DMA coherency issue
-
- Version 0.42:
- Converted dsbr100 to use video_ioctl2
- by Douglas Landgraf <dougsland@gmail.com>
-
- Version 0.41-ac1:
- Alan Cox: Some cleanups and fixes
-
- Version 0.41:
- Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
-
- Version 0.40:
- Markus: Updates for 2.6.x kernels, code layout changes, name sanitizing
-
- Version 0.30:
- Markus: Updates for 2.5.x kernel and more ISO compliant source
-
- Version 0.25:
- PSL and Markus: Cleanup, radio now doesn't stop on device close
-
- Version 0.24:
- Markus: Hope I got these silly VIDEO_TUNER_LOW issues finally
- right. Some minor cleanup, improved standalone compilation
-
- Version 0.23:
- Markus: Sign extension bug fixed by declaring transfer_buffer unsigned
-
- Version 0.22:
- Markus: Some (brown bag) cleanup in what VIDIOCSTUNER returns,
- thanks to Mike Cox for pointing the problem out.
-
- Version 0.21:
- Markus: Minor cleanup, warnings if something goes wrong, lame attempt
- to adhere to Documentation/CodingStyle
-
- Version 0.2:
- Brad Hards <bradh@dynamite.com.au>: Fixes to make it work as non-module
- Markus: Copyright clarification
-
- Version 0.01: Markus: initial release
-
+ * The device plugs into both the USB and an analog audio input, so this thing
+ * only deals with initialisation and frequency setting, the
+ * audio data has to be handled by a sound driver.
+ *
+ * Major issue: I can't find out where the device reports the signal
+ * strength, and indeed the windows software appearantly just looks
+ * at the stereo indicator as well. So, scanning will only find
+ * stereo stations. Sad, but I can't help it.
+ *
+ * Also, the windows program sends oodles of messages over to the
+ * device, and I couldn't figure out their meaning. My suspicion
+ * is that they don't have any:-)
+ *
+ * You might find some interesting stuff about this module at
+ * http://unimut.fsk.uni-heidelberg.de/unimut/demi/dsbr
+ *
+ * Fully tested with the Keene USB FM Transmitter and the v4l2-compliance tool.
+ *
+ * Copyright (c) 2000 Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
@@ -95,17 +40,19 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/videodev2.h>
+#include <linux/usb.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
-#include <linux/usb.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
/*
* Version Information
*/
-#define DRIVER_VERSION "0.4.7"
-
-#define DRIVER_AUTHOR "Markus Demleitner <msdemlei@tucana.harvard.edu>"
-#define DRIVER_DESC "D-Link DSB-R100 USB FM radio driver"
+MODULE_AUTHOR("Markus Demleitner <msdemlei@tucana.harvard.edu>");
+MODULE_DESCRIPTION("D-Link DSB-R100 USB FM radio driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.1.0");
#define DSB100_VENDOR 0x04b4
#define DSB100_PRODUCT 0x1002
@@ -122,19 +69,8 @@ devices, that would be 76 and 91. */
#define FREQ_MAX 108.0
#define FREQ_MUL 16000
-/* defines for radio->status */
-#define STARTED 0
-#define STOPPED 1
-
#define v4l2_dev_to_radio(d) container_of(d, struct dsbr100_device, v4l2_dev)
-static int usb_dsbr100_probe(struct usb_interface *intf,
- const struct usb_device_id *id);
-static void usb_dsbr100_disconnect(struct usb_interface *intf);
-static int usb_dsbr100_suspend(struct usb_interface *intf,
- pm_message_t message);
-static int usb_dsbr100_resume(struct usb_interface *intf);
-
static int radio_nr = -1;
module_param(radio_nr, int, 0);
@@ -143,179 +79,92 @@ struct dsbr100_device {
struct usb_device *usbdev;
struct video_device videodev;
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler hdl;
u8 *transfer_buffer;
struct mutex v4l2_lock;
int curfreq;
- int stereo;
- int status;
-};
-
-static struct usb_device_id usb_dsbr100_device_table [] = {
- { USB_DEVICE(DSB100_VENDOR, DSB100_PRODUCT) },
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE (usb, usb_dsbr100_device_table);
-
-/* USB subsystem interface */
-static struct usb_driver usb_dsbr100_driver = {
- .name = "dsbr100",
- .probe = usb_dsbr100_probe,
- .disconnect = usb_dsbr100_disconnect,
- .id_table = usb_dsbr100_device_table,
- .suspend = usb_dsbr100_suspend,
- .resume = usb_dsbr100_resume,
- .reset_resume = usb_dsbr100_resume,
- .supports_autosuspend = 0,
+ bool stereo;
+ bool muted;
};
/* Low-level device interface begins here */
-/* switch on radio */
-static int dsbr100_start(struct dsbr100_device *radio)
+/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
+static int dsbr100_setfreq(struct dsbr100_device *radio, unsigned freq)
{
- int retval;
- int request;
-
- retval = usb_control_msg(radio->usbdev,
- usb_rcvctrlpipe(radio->usbdev, 0),
- USB_REQ_GET_STATUS,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x00, 0xC7, radio->transfer_buffer, 8, 300);
-
- if (retval < 0) {
- request = USB_REQ_GET_STATUS;
- goto usb_control_msg_failed;
+ unsigned f = (freq / 16 * 80) / 1000 + 856;
+ int retval = 0;
+
+ if (!radio->muted) {
+ retval = usb_control_msg(radio->usbdev,
+ usb_rcvctrlpipe(radio->usbdev, 0),
+ DSB100_TUNE,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ (f >> 8) & 0x00ff, f & 0xff,
+ radio->transfer_buffer, 8, 300);
+ if (retval >= 0)
+ mdelay(1);
}
- retval = usb_control_msg(radio->usbdev,
- usb_rcvctrlpipe(radio->usbdev, 0),
- DSB100_ONOFF,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x01, 0x00, radio->transfer_buffer, 8, 300);
-
- if (retval < 0) {
- request = DSB100_ONOFF;
- goto usb_control_msg_failed;
+ if (retval >= 0) {
+ radio->curfreq = freq;
+ return 0;
}
-
- radio->status = STARTED;
- return (radio->transfer_buffer)[0];
-
-usb_control_msg_failed:
dev_err(&radio->usbdev->dev,
"%s - usb_control_msg returned %i, request %i\n",
- __func__, retval, request);
+ __func__, retval, DSB100_TUNE);
return retval;
-
}
-/* switch off radio */
-static int dsbr100_stop(struct dsbr100_device *radio)
+/* switch on radio */
+static int dsbr100_start(struct dsbr100_device *radio)
{
- int retval;
- int request;
-
- retval = usb_control_msg(radio->usbdev,
- usb_rcvctrlpipe(radio->usbdev, 0),
- USB_REQ_GET_STATUS,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x16, 0x1C, radio->transfer_buffer, 8, 300);
-
- if (retval < 0) {
- request = USB_REQ_GET_STATUS;
- goto usb_control_msg_failed;
- }
-
- retval = usb_control_msg(radio->usbdev,
+ int retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
DSB100_ONOFF,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x00, 0x00, radio->transfer_buffer, 8, 300);
-
- if (retval < 0) {
- request = DSB100_ONOFF;
- goto usb_control_msg_failed;
- }
-
- radio->status = STOPPED;
- return (radio->transfer_buffer)[0];
+ 0x01, 0x00, radio->transfer_buffer, 8, 300);
-usb_control_msg_failed:
+ if (retval >= 0)
+ return dsbr100_setfreq(radio, radio->curfreq);
dev_err(&radio->usbdev->dev,
"%s - usb_control_msg returned %i, request %i\n",
- __func__, retval, request);
+ __func__, retval, DSB100_ONOFF);
return retval;
}
-/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
-static int dsbr100_setfreq(struct dsbr100_device *radio)
+/* switch off radio */
+static int dsbr100_stop(struct dsbr100_device *radio)
{
- int retval;
- int request;
- int freq = (radio->curfreq / 16 * 80) / 1000 + 856;
-
- retval = usb_control_msg(radio->usbdev,
- usb_rcvctrlpipe(radio->usbdev, 0),
- DSB100_TUNE,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- (freq >> 8) & 0x00ff, freq & 0xff,
- radio->transfer_buffer, 8, 300);
-
- if (retval < 0) {
- request = DSB100_TUNE;
- goto usb_control_msg_failed;
- }
-
- retval = usb_control_msg(radio->usbdev,
+ int retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
- USB_REQ_GET_STATUS,
+ DSB100_ONOFF,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x96, 0xB7, radio->transfer_buffer, 8, 300);
-
- if (retval < 0) {
- request = USB_REQ_GET_STATUS;
- goto usb_control_msg_failed;
- }
-
- retval = usb_control_msg(radio->usbdev,
- usb_rcvctrlpipe(radio->usbdev, 0),
- USB_REQ_GET_STATUS,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x00, 0x24, radio->transfer_buffer, 8, 300);
-
- if (retval < 0) {
- request = USB_REQ_GET_STATUS;
- goto usb_control_msg_failed;
- }
-
- radio->stereo = !((radio->transfer_buffer)[0] & 0x01);
- return (radio->transfer_buffer)[0];
+ 0x00, 0x00, radio->transfer_buffer, 8, 300);
-usb_control_msg_failed:
- radio->stereo = -1;
+ if (retval >= 0)
+ return 0;
dev_err(&radio->usbdev->dev,
"%s - usb_control_msg returned %i, request %i\n",
- __func__, retval, request);
+ __func__, retval, DSB100_ONOFF);
return retval;
+
}
/* return the device status. This is, in effect, just whether it
sees a stereo signal or not. Pity. */
static void dsbr100_getstat(struct dsbr100_device *radio)
{
- int retval;
-
- retval = usb_control_msg(radio->usbdev,
+ int retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x00 , 0x24, radio->transfer_buffer, 8, 300);
+ 0x00, 0x24, radio->transfer_buffer, 8, 300);
if (retval < 0) {
- radio->stereo = -1;
+ radio->stereo = false;
dev_err(&radio->usbdev->dev,
"%s - usb_control_msg returned %i, request %i\n",
__func__, retval, USB_REQ_GET_STATUS);
@@ -332,7 +181,8 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "dsbr100", sizeof(v->driver));
strlcpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card));
usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
- v->capabilities = V4L2_CAP_TUNER;
+ v->device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
+ v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -349,13 +199,11 @@ static int vidioc_g_tuner(struct file *file, void *priv,
v->type = V4L2_TUNER_RADIO;
v->rangelow = FREQ_MIN * FREQ_MUL;
v->rangehigh = FREQ_MAX * FREQ_MUL;
- v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
- v->capability = V4L2_TUNER_CAP_LOW;
- if(radio->stereo)
- v->audmode = V4L2_TUNER_MODE_STEREO;
- else
- v->audmode = V4L2_TUNER_MODE_MONO;
- v->signal = 0xffff; /* We can't get the signal strength */
+ v->rxsubchans = radio->stereo ? V4L2_TUNER_SUB_STEREO :
+ V4L2_TUNER_SUB_MONO;
+ v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
+ v->audmode = V4L2_TUNER_MODE_STEREO;
+ v->signal = radio->stereo ? 0xffff : 0; /* We can't get the signal strength */
return 0;
}
@@ -369,14 +217,12 @@ static int vidioc_s_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct dsbr100_device *radio = video_drvdata(file);
- int retval;
- radio->curfreq = f->frequency;
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
- retval = dsbr100_setfreq(radio);
- if (retval < 0)
- dev_warn(&radio->usbdev->dev, "Set frequency failed\n");
- return 0;
+ return dsbr100_setfreq(radio, clamp_t(unsigned, f->frequency,
+ FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL));
}
static int vidioc_g_frequency(struct file *file, void *priv,
@@ -384,90 +230,26 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct dsbr100_device *radio = video_drvdata(file);
+ if (f->tuner)
+ return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = radio->curfreq;
return 0;
}
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
- }
-
- return -EINVAL;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int usb_dsbr100_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct dsbr100_device *radio = video_drvdata(file);
+ struct dsbr100_device *radio =
+ container_of(ctrl->handler, struct dsbr100_device, hdl);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- ctrl->value = radio->status;
- return 0;
+ radio->muted = ctrl->val;
+ return radio->muted ? dsbr100_stop(radio) : dsbr100_start(radio);
}
return -EINVAL;
}
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct dsbr100_device *radio = video_drvdata(file);
- int retval;
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- if (ctrl->value) {
- retval = dsbr100_stop(radio);
- if (retval < 0) {
- dev_warn(&radio->usbdev->dev,
- "Radio did not respond properly\n");
- return -EBUSY;
- }
- } else {
- retval = dsbr100_start(radio);
- if (retval < 0) {
- dev_warn(&radio->usbdev->dev,
- "Radio did not respond properly\n");
- return -EBUSY;
- }
- }
- return 0;
- }
- return -EINVAL;
-}
-
-static int vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- if (a->index > 1)
- return -EINVAL;
-
- strcpy(a->name, "Radio");
- a->capability = V4L2_AUDCAP_STEREO;
- return 0;
-}
-
-static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
-{
- return i ? -EINVAL : 0;
-}
-
-static int vidioc_s_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- return a->index ? -EINVAL : 0;
-}
/* USB subsystem interface begins here */
@@ -481,8 +263,17 @@ static void usb_dsbr100_disconnect(struct usb_interface *intf)
{
struct dsbr100_device *radio = usb_get_intfdata(intf);
- v4l2_device_get(&radio->v4l2_dev);
mutex_lock(&radio->v4l2_lock);
+ /*
+ * Disconnect is also called on unload, and in that case we need to
+ * mute the device. This call will silently fail if it is called
+ * after a physical disconnect.
+ */
+ usb_control_msg(radio->usbdev,
+ usb_rcvctrlpipe(radio->usbdev, 0),
+ DSB100_ONOFF,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ 0x00, 0x00, radio->transfer_buffer, 8, 300);
usb_set_intfdata(intf, NULL);
video_unregister_device(&radio->videodev);
v4l2_device_disconnect(&radio->v4l2_dev);
@@ -495,25 +286,13 @@ static void usb_dsbr100_disconnect(struct usb_interface *intf)
static int usb_dsbr100_suspend(struct usb_interface *intf, pm_message_t message)
{
struct dsbr100_device *radio = usb_get_intfdata(intf);
- int retval;
mutex_lock(&radio->v4l2_lock);
- if (radio->status == STARTED) {
- retval = dsbr100_stop(radio);
- if (retval < 0)
- dev_warn(&intf->dev, "dsbr100_stop failed\n");
-
- /* After dsbr100_stop() status set to STOPPED.
- * If we want driver to start radio on resume
- * we set status equal to STARTED.
- * On resume we will check status and run radio if needed.
- */
- radio->status = STARTED;
- }
+ if (!radio->muted && dsbr100_stop(radio) < 0)
+ dev_warn(&intf->dev, "dsbr100_stop failed\n");
mutex_unlock(&radio->v4l2_lock);
dev_info(&intf->dev, "going into suspend..\n");
-
return 0;
}
@@ -521,18 +300,13 @@ static int usb_dsbr100_suspend(struct usb_interface *intf, pm_message_t message)
static int usb_dsbr100_resume(struct usb_interface *intf)
{
struct dsbr100_device *radio = usb_get_intfdata(intf);
- int retval;
mutex_lock(&radio->v4l2_lock);
- if (radio->status == STARTED) {
- retval = dsbr100_start(radio);
- if (retval < 0)
- dev_warn(&intf->dev, "dsbr100_start failed\n");
- }
+ if (!radio->muted && dsbr100_start(radio) < 0)
+ dev_warn(&intf->dev, "dsbr100_start failed\n");
mutex_unlock(&radio->v4l2_lock);
dev_info(&intf->dev, "coming out of suspend..\n");
-
return 0;
}
@@ -541,15 +315,23 @@ static void usb_dsbr100_release(struct v4l2_device *v4l2_dev)
{
struct dsbr100_device *radio = v4l2_dev_to_radio(v4l2_dev);
+ v4l2_ctrl_handler_free(&radio->hdl);
v4l2_device_unregister(&radio->v4l2_dev);
kfree(radio->transfer_buffer);
kfree(radio);
}
+static const struct v4l2_ctrl_ops usb_dsbr100_ctrl_ops = {
+ .s_ctrl = usb_dsbr100_s_ctrl,
+};
+
/* File system interface */
static const struct v4l2_file_operations usb_dsbr100_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = v4l2_ctrl_poll,
};
static const struct v4l2_ioctl_ops usb_dsbr100_ioctl_ops = {
@@ -558,13 +340,9 @@ static const struct v4l2_ioctl_ops usb_dsbr100_ioctl_ops = {
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_g_audio = vidioc_g_audio,
- .vidioc_s_audio = vidioc_s_audio,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/* check if the device is present and register with v4l and usb if it is */
@@ -593,11 +371,17 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
retval = v4l2_device_register(&intf->dev, v4l2_dev);
if (retval < 0) {
v4l2_err(v4l2_dev, "couldn't register v4l2_device\n");
- kfree(radio->transfer_buffer);
- kfree(radio);
- return retval;
+ goto err_reg_dev;
}
+ v4l2_ctrl_handler_init(&radio->hdl, 1);
+ v4l2_ctrl_new_std(&radio->hdl, &usb_dsbr100_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ if (radio->hdl.error) {
+ retval = radio->hdl.error;
+ v4l2_err(v4l2_dev, "couldn't register control\n");
+ goto err_reg_ctrl;
+ }
mutex_init(&radio->v4l2_lock);
strlcpy(radio->videodev.name, v4l2_dev->name, sizeof(radio->videodev.name));
radio->videodev.v4l2_dev = v4l2_dev;
@@ -605,28 +389,46 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
radio->videodev.ioctl_ops = &usb_dsbr100_ioctl_ops;
radio->videodev.release = video_device_release_empty;
radio->videodev.lock = &radio->v4l2_lock;
+ radio->videodev.ctrl_handler = &radio->hdl;
+ set_bit(V4L2_FL_USE_FH_PRIO, &radio->videodev.flags);
radio->usbdev = interface_to_usbdev(intf);
radio->curfreq = FREQ_MIN * FREQ_MUL;
- radio->status = STOPPED;
+ radio->muted = true;
video_set_drvdata(&radio->videodev, radio);
+ usb_set_intfdata(intf, radio);
retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO, radio_nr);
- if (retval < 0) {
- v4l2_err(v4l2_dev, "couldn't register video device\n");
- v4l2_device_unregister(v4l2_dev);
- kfree(radio->transfer_buffer);
- kfree(radio);
- return -EIO;
- }
- usb_set_intfdata(intf, radio);
- return 0;
+ if (retval == 0)
+ return 0;
+ v4l2_err(v4l2_dev, "couldn't register video device\n");
+
+err_reg_ctrl:
+ v4l2_ctrl_handler_free(&radio->hdl);
+ v4l2_device_unregister(v4l2_dev);
+err_reg_dev:
+ kfree(radio->transfer_buffer);
+ kfree(radio);
+ return retval;
}
-module_usb_driver(usb_dsbr100_driver);
+static struct usb_device_id usb_dsbr100_device_table[] = {
+ { USB_DEVICE(DSB100_VENDOR, DSB100_PRODUCT) },
+ { } /* Terminating entry */
+};
-MODULE_AUTHOR( DRIVER_AUTHOR );
-MODULE_DESCRIPTION( DRIVER_DESC );
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRIVER_VERSION);
+MODULE_DEVICE_TABLE(usb, usb_dsbr100_device_table);
+
+/* USB subsystem interface */
+static struct usb_driver usb_dsbr100_driver = {
+ .name = "dsbr100",
+ .probe = usb_dsbr100_probe,
+ .disconnect = usb_dsbr100_disconnect,
+ .id_table = usb_dsbr100_device_table,
+ .suspend = usb_dsbr100_suspend,
+ .resume = usb_dsbr100_resume,
+ .reset_resume = usb_dsbr100_resume,
+};
+
+module_usb_driver(usb_dsbr100_driver);
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 2e639ce6f256..235c0e349820 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -29,6 +29,7 @@
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/mutex.h>
#include <linux/io.h> /* outb, outb_p */
+#include <linux/pnp.h>
#include <linux/slab.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
@@ -283,6 +284,16 @@ static const struct radio_isa_ops gemtek_ops = {
static const int gemtek_ioports[] = { 0x20c, 0x30c, 0x24c, 0x34c, 0x248, 0x28c };
+#ifdef CONFIG_PNP
+static struct pnp_device_id gemtek_pnp_devices[] = {
+ /* AOpen FX-3D/Pro Radio */
+ {.id = "ADS7183", .driver_data = 0},
+ {.id = ""}
+};
+
+MODULE_DEVICE_TABLE(pnp, gemtek_pnp_devices);
+#endif
+
static struct radio_isa_driver gemtek_driver = {
.driver = {
.match = radio_isa_match,
@@ -292,6 +303,14 @@ static struct radio_isa_driver gemtek_driver = {
.name = "radio-gemtek",
},
},
+#ifdef CONFIG_PNP
+ .pnp_driver = {
+ .name = "radio-gemtek",
+ .id_table = gemtek_pnp_devices,
+ .probe = radio_isa_pnp_probe,
+ .remove = radio_isa_pnp_remove,
+ },
+#endif
.io_params = io,
.radio_nr_params = radio_nr,
.io_ports = gemtek_ioports,
@@ -305,12 +324,18 @@ static struct radio_isa_driver gemtek_driver = {
static int __init gemtek_init(void)
{
gemtek_driver.probe = probe;
+#ifdef CONFIG_PNP
+ pnp_register_driver(&gemtek_driver.pnp_driver);
+#endif
return isa_register_driver(&gemtek_driver.driver, GEMTEK_MAX);
}
static void __exit gemtek_exit(void)
{
hardmute = 1; /* Turn off PLL */
+#ifdef CONFIG_PNP
+ pnp_unregister_driver(&gemtek_driver.pnp_driver);
+#endif
isa_unregister_driver(&gemtek_driver.driver);
}
diff --git a/drivers/media/radio/radio-isa.c b/drivers/media/radio/radio-isa.c
index 06f906351fad..3c0067de4324 100644
--- a/drivers/media/radio/radio-isa.c
+++ b/drivers/media/radio/radio-isa.c
@@ -150,14 +150,6 @@ static int radio_isa_log_status(struct file *file, void *priv)
return 0;
}
-static int radio_isa_subscribe_event(struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
-{
- if (sub->type == V4L2_EVENT_CTRL)
- return v4l2_event_subscribe(fh, sub, 0);
- return -EINVAL;
-}
-
static const struct v4l2_ctrl_ops radio_isa_ctrl_ops = {
.s_ctrl = radio_isa_s_ctrl,
};
@@ -177,7 +169,7 @@ static const struct v4l2_ioctl_ops radio_isa_ioctl_ops = {
.vidioc_g_frequency = radio_isa_g_frequency,
.vidioc_s_frequency = radio_isa_s_frequency,
.vidioc_log_status = radio_isa_log_status,
- .vidioc_subscribe_event = radio_isa_subscribe_event,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -199,56 +191,31 @@ static bool radio_isa_valid_io(const struct radio_isa_driver *drv, int io)
return false;
}
-int radio_isa_probe(struct device *pdev, unsigned int dev)
+struct radio_isa_card *radio_isa_alloc(struct radio_isa_driver *drv,
+ struct device *pdev)
{
- struct radio_isa_driver *drv = pdev->platform_data;
- const struct radio_isa_ops *ops = drv->ops;
struct v4l2_device *v4l2_dev;
- struct radio_isa_card *isa;
- int res;
+ struct radio_isa_card *isa = drv->ops->alloc();
+ if (!isa)
+ return NULL;
- isa = drv->ops->alloc();
- if (isa == NULL)
- return -ENOMEM;
dev_set_drvdata(pdev, isa);
isa->drv = drv;
- isa->io = drv->io_params[dev];
v4l2_dev = &isa->v4l2_dev;
strlcpy(v4l2_dev->name, dev_name(pdev), sizeof(v4l2_dev->name));
- if (drv->probe && ops->probe) {
- int i;
-
- for (i = 0; i < drv->num_of_io_ports; ++i) {
- int io = drv->io_ports[i];
-
- if (request_region(io, drv->region_size, v4l2_dev->name)) {
- bool found = ops->probe(isa, io);
-
- release_region(io, drv->region_size);
- if (found) {
- isa->io = io;
- break;
- }
- }
- }
- }
-
- if (!radio_isa_valid_io(drv, isa->io)) {
- int i;
+ return isa;
+}
- if (isa->io < 0)
- return -ENODEV;
- v4l2_err(v4l2_dev, "you must set an I/O address with io=0x%03x",
- drv->io_ports[0]);
- for (i = 1; i < drv->num_of_io_ports; i++)
- printk(KERN_CONT "/0x%03x", drv->io_ports[i]);
- printk(KERN_CONT ".\n");
- kfree(isa);
- return -EINVAL;
- }
+int radio_isa_common_probe(struct radio_isa_card *isa, struct device *pdev,
+ int radio_nr, unsigned region_size)
+{
+ const struct radio_isa_driver *drv = isa->drv;
+ const struct radio_isa_ops *ops = drv->ops;
+ struct v4l2_device *v4l2_dev = &isa->v4l2_dev;
+ int res;
- if (!request_region(isa->io, drv->region_size, v4l2_dev->name)) {
+ if (!request_region(isa->io, region_size, v4l2_dev->name)) {
v4l2_err(v4l2_dev, "port 0x%x already in use\n", isa->io);
kfree(isa);
return -EBUSY;
@@ -299,42 +266,126 @@ int radio_isa_probe(struct device *pdev, unsigned int dev)
res = ops->s_stereo(isa, isa->stereo);
if (res < 0) {
v4l2_err(v4l2_dev, "Could not setup card\n");
- goto err_node_reg;
+ goto err_hdl;
}
- res = video_register_device(&isa->vdev, VFL_TYPE_RADIO,
- drv->radio_nr_params[dev]);
+ res = video_register_device(&isa->vdev, VFL_TYPE_RADIO, radio_nr);
+
if (res < 0) {
v4l2_err(v4l2_dev, "Could not register device node\n");
- goto err_node_reg;
+ goto err_hdl;
}
v4l2_info(v4l2_dev, "Initialized radio card %s on port 0x%03x\n",
drv->card, isa->io);
return 0;
-err_node_reg:
- v4l2_ctrl_handler_free(&isa->hdl);
err_hdl:
- v4l2_device_unregister(&isa->v4l2_dev);
+ v4l2_ctrl_handler_free(&isa->hdl);
err_dev_reg:
- release_region(isa->io, drv->region_size);
+ release_region(isa->io, region_size);
kfree(isa);
return res;
}
-EXPORT_SYMBOL_GPL(radio_isa_probe);
-int radio_isa_remove(struct device *pdev, unsigned int dev)
+int radio_isa_common_remove(struct radio_isa_card *isa, unsigned region_size)
{
- struct radio_isa_card *isa = dev_get_drvdata(pdev);
const struct radio_isa_ops *ops = isa->drv->ops;
ops->s_mute_volume(isa, true, isa->volume ? isa->volume->cur.val : 0);
video_unregister_device(&isa->vdev);
v4l2_ctrl_handler_free(&isa->hdl);
v4l2_device_unregister(&isa->v4l2_dev);
- release_region(isa->io, isa->drv->region_size);
+ release_region(isa->io, region_size);
v4l2_info(&isa->v4l2_dev, "Removed radio card %s\n", isa->drv->card);
kfree(isa);
return 0;
}
+
+int radio_isa_probe(struct device *pdev, unsigned int dev)
+{
+ struct radio_isa_driver *drv = pdev->platform_data;
+ const struct radio_isa_ops *ops = drv->ops;
+ struct v4l2_device *v4l2_dev;
+ struct radio_isa_card *isa;
+
+ isa = radio_isa_alloc(drv, pdev);
+ if (!isa)
+ return -ENOMEM;
+ isa->io = drv->io_params[dev];
+ v4l2_dev = &isa->v4l2_dev;
+
+ if (drv->probe && ops->probe) {
+ int i;
+
+ for (i = 0; i < drv->num_of_io_ports; ++i) {
+ int io = drv->io_ports[i];
+
+ if (request_region(io, drv->region_size, v4l2_dev->name)) {
+ bool found = ops->probe(isa, io);
+
+ release_region(io, drv->region_size);
+ if (found) {
+ isa->io = io;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!radio_isa_valid_io(drv, isa->io)) {
+ int i;
+
+ if (isa->io < 0)
+ return -ENODEV;
+ v4l2_err(v4l2_dev, "you must set an I/O address with io=0x%03x",
+ drv->io_ports[0]);
+ for (i = 1; i < drv->num_of_io_ports; i++)
+ printk(KERN_CONT "/0x%03x", drv->io_ports[i]);
+ printk(KERN_CONT ".\n");
+ kfree(isa);
+ return -EINVAL;
+ }
+
+ return radio_isa_common_probe(isa, pdev, drv->radio_nr_params[dev],
+ drv->region_size);
+}
+EXPORT_SYMBOL_GPL(radio_isa_probe);
+
+int radio_isa_remove(struct device *pdev, unsigned int dev)
+{
+ struct radio_isa_card *isa = dev_get_drvdata(pdev);
+
+ return radio_isa_common_remove(isa, isa->drv->region_size);
+}
EXPORT_SYMBOL_GPL(radio_isa_remove);
+
+#ifdef CONFIG_PNP
+int radio_isa_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
+{
+ struct pnp_driver *pnp_drv = to_pnp_driver(dev->dev.driver);
+ struct radio_isa_driver *drv = container_of(pnp_drv,
+ struct radio_isa_driver, pnp_driver);
+ struct radio_isa_card *isa;
+
+ if (!pnp_port_valid(dev, 0))
+ return -ENODEV;
+
+ isa = radio_isa_alloc(drv, &dev->dev);
+ if (!isa)
+ return -ENOMEM;
+
+ isa->io = pnp_port_start(dev, 0);
+
+ return radio_isa_common_probe(isa, &dev->dev, drv->radio_nr_params[0],
+ pnp_port_len(dev, 0));
+}
+EXPORT_SYMBOL_GPL(radio_isa_pnp_probe);
+
+void radio_isa_pnp_remove(struct pnp_dev *dev)
+{
+ struct radio_isa_card *isa = dev_get_drvdata(&dev->dev);
+
+ radio_isa_common_remove(isa, pnp_port_len(dev, 0));
+}
+EXPORT_SYMBOL_GPL(radio_isa_pnp_remove);
+#endif
diff --git a/drivers/media/radio/radio-isa.h b/drivers/media/radio/radio-isa.h
index 8a0ea84d86de..ba4c01f1bd0c 100644
--- a/drivers/media/radio/radio-isa.h
+++ b/drivers/media/radio/radio-isa.h
@@ -24,6 +24,7 @@
#define _RADIO_ISA_H_
#include <linux/isa.h>
+#include <linux/pnp.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
@@ -76,6 +77,9 @@ struct radio_isa_ops {
/* Top level structure needed to instantiate the cards */
struct radio_isa_driver {
struct isa_driver driver;
+#ifdef CONFIG_PNP
+ struct pnp_driver pnp_driver;
+#endif
const struct radio_isa_ops *ops;
/* The module_param_array with the specified I/O ports */
int *io_params;
@@ -101,5 +105,10 @@ struct radio_isa_driver {
int radio_isa_match(struct device *pdev, unsigned int dev);
int radio_isa_probe(struct device *pdev, unsigned int dev);
int radio_isa_remove(struct device *pdev, unsigned int dev);
+#ifdef CONFIG_PNP
+int radio_isa_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *dev_id);
+void radio_isa_pnp_remove(struct pnp_dev *dev);
+#endif
#endif
diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c
index 55bd1d2937c8..79adf3e654e5 100644
--- a/drivers/media/radio/radio-keene.c
+++ b/drivers/media/radio/radio-keene.c
@@ -28,7 +28,6 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#include <linux/usb.h>
-#include <linux/version.h>
#include <linux/mutex.h>
/* driver and module definitions */
@@ -149,7 +148,6 @@ static void usb_keene_disconnect(struct usb_interface *intf)
{
struct keene_device *radio = to_keene_dev(usb_get_intfdata(intf));
- v4l2_device_get(&radio->v4l2_dev);
mutex_lock(&radio->lock);
usb_set_intfdata(intf, NULL);
video_unregister_device(&radio->vdev);
@@ -158,6 +156,23 @@ static void usb_keene_disconnect(struct usb_interface *intf)
v4l2_device_put(&radio->v4l2_dev);
}
+static int usb_keene_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct keene_device *radio = to_keene_dev(usb_get_intfdata(intf));
+
+ return keene_cmd_main(radio, 0, false);
+}
+
+static int usb_keene_resume(struct usb_interface *intf)
+{
+ struct keene_device *radio = to_keene_dev(usb_get_intfdata(intf));
+
+ mdelay(50);
+ keene_cmd_set(radio);
+ keene_cmd_main(radio, radio->curfreq, true);
+ return 0;
+}
+
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *v)
{
@@ -256,18 +271,6 @@ static int keene_s_ctrl(struct v4l2_ctrl *ctrl)
return -EINVAL;
}
-static int vidioc_subscribe_event(struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
-{
- switch (sub->type) {
- case V4L2_EVENT_CTRL:
- return v4l2_event_subscribe(fh, sub, 0);
- default:
- return -EINVAL;
- }
-}
-
-
/* File system interface */
static const struct v4l2_file_operations usb_keene_fops = {
.owner = THIS_MODULE,
@@ -288,7 +291,7 @@ static const struct v4l2_ioctl_ops usb_keene_ioctl_ops = {
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
.vidioc_log_status = v4l2_ctrl_log_status,
- .vidioc_subscribe_event = vidioc_subscribe_event,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -404,6 +407,9 @@ static struct usb_driver usb_keene_driver = {
.probe = usb_keene_probe,
.disconnect = usb_keene_disconnect,
.id_table = usb_keene_device_table,
+ .suspend = usb_keene_suspend,
+ .resume = usb_keene_resume,
+ .reset_resume = usb_keene_resume,
};
static int __init keene_init(void)
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index a860a72a58ec..94cb6bc690f5 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -62,6 +62,8 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
#include <linux/usb.h>
#include <linux/mutex.h>
@@ -101,12 +103,17 @@ devices, that would be 76 and 91. */
* List isn't full and will be updated with implementation of new functions
*/
#define AMRADIO_SET_FREQ 0xa4
+#define AMRADIO_GET_READY_FLAG 0xa5
+#define AMRADIO_GET_SIGNAL 0xa7
+#define AMRADIO_GET_FREQ 0xa8
+#define AMRADIO_SET_SEARCH_UP 0xa9
+#define AMRADIO_SET_SEARCH_DOWN 0xaa
#define AMRADIO_SET_MUTE 0xab
+#define AMRADIO_SET_RIGHT_MUTE 0xac
+#define AMRADIO_SET_LEFT_MUTE 0xad
#define AMRADIO_SET_MONO 0xae
-
-/* Comfortable defines for amradio_set_mute */
-#define AMRADIO_START 0x00
-#define AMRADIO_STOP 0x01
+#define AMRADIO_SET_SEARCH_LVL 0xb0
+#define AMRADIO_STOP_SEARCH 0xb1
/* Comfortable defines for amradio_set_stereo */
#define WANT_STEREO 0x00
@@ -117,29 +124,20 @@ static int radio_nr = -1;
module_param(radio_nr, int, 0);
MODULE_PARM_DESC(radio_nr, "Radio Nr");
-static int usb_amradio_probe(struct usb_interface *intf,
- const struct usb_device_id *id);
-static void usb_amradio_disconnect(struct usb_interface *intf);
-static int usb_amradio_open(struct file *file);
-static int usb_amradio_close(struct file *file);
-static int usb_amradio_suspend(struct usb_interface *intf,
- pm_message_t message);
-static int usb_amradio_resume(struct usb_interface *intf);
-
/* Data for one (physical) device */
struct amradio_device {
/* reference to USB and video device */
struct usb_device *usbdev;
struct usb_interface *intf;
- struct video_device videodev;
+ struct video_device vdev;
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler hdl;
- unsigned char *buffer;
+ u8 *buffer;
struct mutex lock; /* buffer locking */
int curfreq;
int stereo;
int muted;
- int initialized;
};
static inline struct amradio_device *to_amradio_dev(struct v4l2_device *v4l2_dev)
@@ -147,29 +145,8 @@ static inline struct amradio_device *to_amradio_dev(struct v4l2_device *v4l2_dev
return container_of(v4l2_dev, struct amradio_device, v4l2_dev);
}
-/* USB Device ID List */
-static struct usb_device_id usb_amradio_device_table[] = {
- {USB_DEVICE_AND_INTERFACE_INFO(USB_AMRADIO_VENDOR, USB_AMRADIO_PRODUCT,
- USB_CLASS_HID, 0, 0) },
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(usb, usb_amradio_device_table);
-
-/* USB subsystem interface */
-static struct usb_driver usb_amradio_driver = {
- .name = MR800_DRIVER_NAME,
- .probe = usb_amradio_probe,
- .disconnect = usb_amradio_disconnect,
- .suspend = usb_amradio_suspend,
- .resume = usb_amradio_resume,
- .reset_resume = usb_amradio_resume,
- .id_table = usb_amradio_device_table,
- .supports_autosuspend = 1,
-};
-
-/* switch on/off the radio. Send 8 bytes to device */
-static int amradio_set_mute(struct amradio_device *radio, char argument)
+static int amradio_send_cmd(struct amradio_device *radio, u8 cmd, u8 arg,
+ u8 *extra, u8 extralen, bool reply)
{
int retval;
int size;
@@ -177,99 +154,92 @@ static int amradio_set_mute(struct amradio_device *radio, char argument)
radio->buffer[0] = 0x00;
radio->buffer[1] = 0x55;
radio->buffer[2] = 0xaa;
- radio->buffer[3] = 0x00;
- radio->buffer[4] = AMRADIO_SET_MUTE;
- radio->buffer[5] = argument;
+ radio->buffer[3] = extralen;
+ radio->buffer[4] = cmd;
+ radio->buffer[5] = arg;
radio->buffer[6] = 0x00;
- radio->buffer[7] = 0x00;
+ radio->buffer[7] = extra || reply ? 8 : 0;
retval = usb_bulk_msg(radio->usbdev, usb_sndintpipe(radio->usbdev, 2),
- (void *) (radio->buffer), BUFFER_LENGTH, &size, USB_TIMEOUT);
+ radio->buffer, BUFFER_LENGTH, &size, USB_TIMEOUT);
if (retval < 0 || size != BUFFER_LENGTH) {
- amradio_dev_warn(&radio->videodev.dev, "set mute failed\n");
- return retval;
+ if (video_is_registered(&radio->vdev))
+ amradio_dev_warn(&radio->vdev.dev,
+ "cmd %02x failed\n", cmd);
+ return retval ? retval : -EIO;
}
+ if (!extra && !reply)
+ return 0;
- radio->muted = argument;
+ if (extra) {
+ memcpy(radio->buffer, extra, extralen);
+ memset(radio->buffer + extralen, 0, 8 - extralen);
+ retval = usb_bulk_msg(radio->usbdev, usb_sndintpipe(radio->usbdev, 2),
+ radio->buffer, BUFFER_LENGTH, &size, USB_TIMEOUT);
+ } else {
+ memset(radio->buffer, 0, 8);
+ retval = usb_bulk_msg(radio->usbdev, usb_rcvbulkpipe(radio->usbdev, 0x81),
+ radio->buffer, BUFFER_LENGTH, &size, USB_TIMEOUT);
+ }
+ if (retval == 0 && size == BUFFER_LENGTH)
+ return 0;
+ if (video_is_registered(&radio->vdev) && cmd != AMRADIO_GET_READY_FLAG)
+ amradio_dev_warn(&radio->vdev.dev, "follow-up to cmd %02x failed\n", cmd);
+ return retval ? retval : -EIO;
+}
- return retval;
+/* switch on/off the radio. Send 8 bytes to device */
+static int amradio_set_mute(struct amradio_device *radio, bool mute)
+{
+ int ret = amradio_send_cmd(radio,
+ AMRADIO_SET_MUTE, mute, NULL, 0, false);
+
+ if (!ret)
+ radio->muted = mute;
+ return ret;
}
/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
-static int amradio_setfreq(struct amradio_device *radio, int freq)
+static int amradio_set_freq(struct amradio_device *radio, int freq)
{
- int retval;
- int size;
unsigned short freq_send = 0x10 + (freq >> 3) / 25;
-
- radio->buffer[0] = 0x00;
- radio->buffer[1] = 0x55;
- radio->buffer[2] = 0xaa;
- radio->buffer[3] = 0x03;
- radio->buffer[4] = AMRADIO_SET_FREQ;
- radio->buffer[5] = 0x00;
- radio->buffer[6] = 0x00;
- radio->buffer[7] = 0x08;
-
- retval = usb_bulk_msg(radio->usbdev, usb_sndintpipe(radio->usbdev, 2),
- (void *) (radio->buffer), BUFFER_LENGTH, &size, USB_TIMEOUT);
-
- if (retval < 0 || size != BUFFER_LENGTH)
- goto out_err;
+ u8 buf[3];
+ int retval;
/* frequency is calculated from freq_send and placed in first 2 bytes */
- radio->buffer[0] = (freq_send >> 8) & 0xff;
- radio->buffer[1] = freq_send & 0xff;
- radio->buffer[2] = 0x01;
- radio->buffer[3] = 0x00;
- radio->buffer[4] = 0x00;
- /* 5 and 6 bytes of buffer already = 0x00 */
- radio->buffer[7] = 0x00;
-
- retval = usb_bulk_msg(radio->usbdev, usb_sndintpipe(radio->usbdev, 2),
- (void *) (radio->buffer), BUFFER_LENGTH, &size, USB_TIMEOUT);
-
- if (retval < 0 || size != BUFFER_LENGTH)
- goto out_err;
+ buf[0] = (freq_send >> 8) & 0xff;
+ buf[1] = freq_send & 0xff;
+ buf[2] = 0x01;
+ retval = amradio_send_cmd(radio, AMRADIO_SET_FREQ, 0, buf, 3, false);
+ if (retval)
+ return retval;
radio->curfreq = freq;
- goto out;
-
-out_err:
- amradio_dev_warn(&radio->videodev.dev, "set frequency failed\n");
-out:
- return retval;
+ msleep(40);
+ return 0;
}
-static int amradio_set_stereo(struct amradio_device *radio, char argument)
+static int amradio_set_stereo(struct amradio_device *radio, bool stereo)
{
- int retval;
- int size;
+ int ret = amradio_send_cmd(radio,
+ AMRADIO_SET_MONO, !stereo, NULL, 0, false);
- radio->buffer[0] = 0x00;
- radio->buffer[1] = 0x55;
- radio->buffer[2] = 0xaa;
- radio->buffer[3] = 0x00;
- radio->buffer[4] = AMRADIO_SET_MONO;
- radio->buffer[5] = argument;
- radio->buffer[6] = 0x00;
- radio->buffer[7] = 0x00;
-
- retval = usb_bulk_msg(radio->usbdev, usb_sndintpipe(radio->usbdev, 2),
- (void *) (radio->buffer), BUFFER_LENGTH, &size, USB_TIMEOUT);
-
- if (retval < 0 || size != BUFFER_LENGTH) {
- amradio_dev_warn(&radio->videodev.dev, "set stereo failed\n");
- return retval;
- }
+ if (!ret)
+ radio->stereo = stereo;
+ return ret;
+}
- if (argument == WANT_STEREO)
- radio->stereo = 1;
- else
- radio->stereo = 0;
+static int amradio_get_stat(struct amradio_device *radio, bool *is_stereo, u32 *signal)
+{
+ int ret = amradio_send_cmd(radio,
+ AMRADIO_GET_SIGNAL, 0, NULL, 0, true);
- return retval;
+ if (ret)
+ return ret;
+ *is_stereo = radio->buffer[2] >> 7;
+ *signal = (radio->buffer[3] & 0xf0) << 8;
+ return 0;
}
/* Handle unplugging the device.
@@ -282,25 +252,26 @@ static void usb_amradio_disconnect(struct usb_interface *intf)
struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
- /* increase the device node's refcount */
- get_device(&radio->videodev.dev);
+ video_unregister_device(&radio->vdev);
+ amradio_set_mute(radio, true);
+ usb_set_intfdata(intf, NULL);
v4l2_device_disconnect(&radio->v4l2_dev);
- video_unregister_device(&radio->videodev);
mutex_unlock(&radio->lock);
- /* decrease the device node's refcount, allowing it to be released */
- put_device(&radio->videodev.dev);
+ v4l2_device_put(&radio->v4l2_dev);
}
/* vidioc_querycap - query device capabilities */
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *v)
{
- struct amradio_device *radio = file->private_data;
+ struct amradio_device *radio = video_drvdata(file);
strlcpy(v->driver, "radio-mr800", sizeof(v->driver));
strlcpy(v->card, "AverMedia MR 800 USB FM Radio", sizeof(v->card));
usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
- v->capabilities = V4L2_CAP_TUNER;
+ v->device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER |
+ V4L2_CAP_HW_FREQ_SEEK;
+ v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -308,44 +279,34 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
- struct amradio_device *radio = file->private_data;
+ struct amradio_device *radio = video_drvdata(file);
+ bool is_stereo = false;
int retval;
if (v->index > 0)
return -EINVAL;
-/* TODO: Add function which look is signal stereo or not
- * amradio_getstat(radio);
- */
-
-/* we call amradio_set_stereo to set radio->stereo
- * Honestly, amradio_getstat should cover this in future and
- * amradio_set_stereo shouldn't be here
- */
- retval = amradio_set_stereo(radio, WANT_STEREO);
+ v->signal = 0;
+ retval = amradio_get_stat(radio, &is_stereo, &v->signal);
+ if (retval)
+ return retval;
strcpy(v->name, "FM");
v->type = V4L2_TUNER_RADIO;
v->rangelow = FREQ_MIN * FREQ_MUL;
v->rangehigh = FREQ_MAX * FREQ_MUL;
- v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
- v->capability = V4L2_TUNER_CAP_LOW;
- if (radio->stereo)
- v->audmode = V4L2_TUNER_MODE_STEREO;
- else
- v->audmode = V4L2_TUNER_MODE_MONO;
- v->signal = 0xffff; /* Can't get the signal strength, sad.. */
- v->afc = 0; /* Don't know what is this */
-
- return retval;
+ v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
+ v->rxsubchans = is_stereo ? V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO;
+ v->audmode = radio->stereo ?
+ V4L2_TUNER_MODE_STEREO : V4L2_TUNER_MODE_MONO;
+ return 0;
}
/* vidioc_s_tuner - set tuner attributes */
static int vidioc_s_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
- struct amradio_device *radio = file->private_data;
- int retval = -EINVAL;
+ struct amradio_device *radio = video_drvdata(file);
if (v->index > 0)
return -EINVAL;
@@ -353,34 +314,31 @@ static int vidioc_s_tuner(struct file *file, void *priv,
/* mono/stereo selector */
switch (v->audmode) {
case V4L2_TUNER_MODE_MONO:
- retval = amradio_set_stereo(radio, WANT_MONO);
- break;
- case V4L2_TUNER_MODE_STEREO:
- retval = amradio_set_stereo(radio, WANT_STEREO);
- break;
+ return amradio_set_stereo(radio, WANT_MONO);
+ default:
+ return amradio_set_stereo(radio, WANT_STEREO);
}
-
- return retval;
}
/* vidioc_s_frequency - set tuner radio frequency */
static int vidioc_s_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct amradio_device *radio = file->private_data;
+ struct amradio_device *radio = video_drvdata(file);
- if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ if (f->tuner != 0)
return -EINVAL;
- return amradio_setfreq(radio, f->frequency);
+ return amradio_set_freq(radio, clamp_t(unsigned, f->frequency,
+ FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL));
}
/* vidioc_g_frequency - get tuner radio frequency */
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct amradio_device *radio = file->private_data;
+ struct amradio_device *radio = video_drvdata(file);
- if (f->tuner != 0)
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = radio->curfreq;
@@ -388,148 +346,101 @@ static int vidioc_g_frequency(struct file *file, void *priv,
return 0;
}
-/* vidioc_queryctrl - enumerate control items */
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
+static int vidioc_s_hw_freq_seek(struct file *file, void *priv,
+ struct v4l2_hw_freq_seek *seek)
{
- switch (qc->id) {
- case V4L2_CID_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
- }
-
- return -EINVAL;
-}
+ static u8 buf[8] = {
+ 0x3d, 0x32, 0x0f, 0x08, 0x3d, 0x32, 0x0f, 0x08
+ };
+ struct amradio_device *radio = video_drvdata(file);
+ unsigned long timeout;
+ int retval;
-/* vidioc_g_ctrl - get the value of a control */
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct amradio_device *radio = file->private_data;
+ if (seek->tuner != 0 || !seek->wrap_around)
+ return -EINVAL;
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- ctrl->value = radio->muted;
- return 0;
+ retval = amradio_send_cmd(radio,
+ AMRADIO_SET_SEARCH_LVL, 0, buf, 8, false);
+ if (retval)
+ return retval;
+ amradio_set_freq(radio, radio->curfreq);
+ retval = amradio_send_cmd(radio,
+ seek->seek_upward ? AMRADIO_SET_SEARCH_UP : AMRADIO_SET_SEARCH_DOWN,
+ 0, NULL, 0, false);
+ if (retval)
+ return retval;
+ timeout = jiffies + msecs_to_jiffies(30000);
+ for (;;) {
+ if (time_after(jiffies, timeout)) {
+ retval = -EAGAIN;
+ break;
+ }
+ if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+ retval = amradio_send_cmd(radio, AMRADIO_GET_READY_FLAG,
+ 0, NULL, 0, true);
+ if (retval)
+ continue;
+ amradio_send_cmd(radio, AMRADIO_GET_FREQ, 0, NULL, 0, true);
+ if (radio->buffer[1] || radio->buffer[2]) {
+ radio->curfreq = (radio->buffer[1] << 8) | radio->buffer[2];
+ radio->curfreq = (radio->curfreq - 0x10) * 200;
+ amradio_send_cmd(radio, AMRADIO_STOP_SEARCH,
+ 0, NULL, 0, false);
+ amradio_set_freq(radio, radio->curfreq);
+ retval = 0;
+ break;
+ }
}
-
- return -EINVAL;
+ amradio_send_cmd(radio, AMRADIO_STOP_SEARCH, 0, NULL, 0, false);
+ amradio_set_freq(radio, radio->curfreq);
+ return retval;
}
-/* vidioc_s_ctrl - set the value of a control */
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int usb_amradio_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct amradio_device *radio = file->private_data;
- int retval = -EINVAL;
+ struct amradio_device *radio =
+ container_of(ctrl->handler, struct amradio_device, hdl);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- if (ctrl->value)
- retval = amradio_set_mute(radio, AMRADIO_STOP);
- else
- retval = amradio_set_mute(radio, AMRADIO_START);
-
- break;
+ return amradio_set_mute(radio, ctrl->val);
}
- return retval;
-}
-
-/* vidioc_g_audio - get audio attributes */
-static int vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- if (a->index > 1)
- return -EINVAL;
-
- strcpy(a->name, "Radio");
- a->capability = V4L2_AUDCAP_STEREO;
- return 0;
-}
-
-/* vidioc_s_audio - set audio attributes */
-static int vidioc_s_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- if (a->index != 0)
- return -EINVAL;
- return 0;
-}
-
-/* vidioc_g_input - get input */
-static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-/* vidioc_s_input - set input */
-static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
-{
- if (i != 0)
- return -EINVAL;
- return 0;
+ return -EINVAL;
}
static int usb_amradio_init(struct amradio_device *radio)
{
int retval;
- retval = amradio_set_mute(radio, AMRADIO_STOP);
+ retval = amradio_set_mute(radio, true);
if (retval)
goto out_err;
-
- retval = amradio_set_stereo(radio, WANT_STEREO);
+ retval = amradio_set_stereo(radio, true);
if (retval)
goto out_err;
-
- radio->initialized = 1;
- goto out;
-
-out_err:
- amradio_dev_err(&radio->videodev.dev, "initialization failed\n");
-out:
- return retval;
-}
-
-/* open device - amradio_start() and amradio_setfreq() */
-static int usb_amradio_open(struct file *file)
-{
- struct amradio_device *radio = video_drvdata(file);
- int retval;
-
- file->private_data = radio;
- retval = usb_autopm_get_interface(radio->intf);
+ retval = amradio_set_freq(radio, radio->curfreq);
if (retval)
- return retval;
+ goto out_err;
+ return 0;
- if (unlikely(!radio->initialized)) {
- retval = usb_amradio_init(radio);
- if (retval)
- usb_autopm_put_interface(radio->intf);
- }
+out_err:
+ amradio_dev_err(&radio->vdev.dev, "initialization failed\n");
return retval;
}
-/*close device */
-static int usb_amradio_close(struct file *file)
-{
- struct amradio_device *radio = file->private_data;
-
- if (video_is_registered(&radio->videodev))
- usb_autopm_put_interface(radio->intf);
- return 0;
-}
-
/* Suspend device - stop device. Need to be checked and fixed */
static int usb_amradio_suspend(struct usb_interface *intf, pm_message_t message)
{
struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
- if (!radio->muted && radio->initialized) {
- amradio_set_mute(radio, AMRADIO_STOP);
- radio->muted = 0;
+ if (!radio->muted) {
+ amradio_set_mute(radio, true);
+ radio->muted = false;
}
mutex_unlock(&radio->lock);
@@ -543,31 +454,28 @@ static int usb_amradio_resume(struct usb_interface *intf)
struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
- if (unlikely(!radio->initialized))
- goto unlock;
-
- if (radio->stereo)
- amradio_set_stereo(radio, WANT_STEREO);
- else
- amradio_set_stereo(radio, WANT_MONO);
-
- amradio_setfreq(radio, radio->curfreq);
+ amradio_set_stereo(radio, radio->stereo);
+ amradio_set_freq(radio, radio->curfreq);
if (!radio->muted)
- amradio_set_mute(radio, AMRADIO_START);
+ amradio_set_mute(radio, false);
-unlock:
mutex_unlock(&radio->lock);
dev_info(&intf->dev, "coming out of suspend..\n");
return 0;
}
+static const struct v4l2_ctrl_ops usb_amradio_ctrl_ops = {
+ .s_ctrl = usb_amradio_s_ctrl,
+};
+
/* File system interface */
static const struct v4l2_file_operations usb_amradio_fops = {
.owner = THIS_MODULE,
- .open = usb_amradio_open,
- .release = usb_amradio_close,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = v4l2_ctrl_poll,
.unlocked_ioctl = video_ioctl2,
};
@@ -577,20 +485,19 @@ static const struct v4l2_ioctl_ops usb_amradio_ioctl_ops = {
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_g_audio = vidioc_g_audio,
- .vidioc_s_audio = vidioc_s_audio,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
+ .vidioc_s_hw_freq_seek = vidioc_s_hw_freq_seek,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
-static void usb_amradio_video_device_release(struct video_device *videodev)
+static void usb_amradio_release(struct v4l2_device *v4l2_dev)
{
- struct amradio_device *radio = video_get_drvdata(videodev);
+ struct amradio_device *radio = to_amradio_dev(v4l2_dev);
/* free rest memory */
+ v4l2_ctrl_handler_free(&radio->hdl);
+ v4l2_device_unregister(&radio->v4l2_dev);
kfree(radio->buffer);
kfree(radio);
}
@@ -624,23 +531,38 @@ static int usb_amradio_probe(struct usb_interface *intf,
goto err_v4l2;
}
+ v4l2_ctrl_handler_init(&radio->hdl, 1);
+ v4l2_ctrl_new_std(&radio->hdl, &usb_amradio_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ if (radio->hdl.error) {
+ retval = radio->hdl.error;
+ dev_err(&intf->dev, "couldn't register control\n");
+ goto err_ctrl;
+ }
mutex_init(&radio->lock);
- strlcpy(radio->videodev.name, radio->v4l2_dev.name,
- sizeof(radio->videodev.name));
- radio->videodev.v4l2_dev = &radio->v4l2_dev;
- radio->videodev.fops = &usb_amradio_fops;
- radio->videodev.ioctl_ops = &usb_amradio_ioctl_ops;
- radio->videodev.release = usb_amradio_video_device_release;
- radio->videodev.lock = &radio->lock;
+ radio->v4l2_dev.ctrl_handler = &radio->hdl;
+ radio->v4l2_dev.release = usb_amradio_release;
+ strlcpy(radio->vdev.name, radio->v4l2_dev.name,
+ sizeof(radio->vdev.name));
+ radio->vdev.v4l2_dev = &radio->v4l2_dev;
+ radio->vdev.fops = &usb_amradio_fops;
+ radio->vdev.ioctl_ops = &usb_amradio_ioctl_ops;
+ radio->vdev.release = video_device_release_empty;
+ radio->vdev.lock = &radio->lock;
+ set_bit(V4L2_FL_USE_FH_PRIO, &radio->vdev.flags);
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
+ usb_set_intfdata(intf, &radio->v4l2_dev);
radio->curfreq = 95.16 * FREQ_MUL;
- video_set_drvdata(&radio->videodev, radio);
+ video_set_drvdata(&radio->vdev, radio);
+ retval = usb_amradio_init(radio);
+ if (retval)
+ goto err_vdev;
- retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO,
+ retval = video_register_device(&radio->vdev, VFL_TYPE_RADIO,
radio_nr);
if (retval < 0) {
dev_err(&intf->dev, "could not register video device\n");
@@ -650,6 +572,8 @@ static int usb_amradio_probe(struct usb_interface *intf,
return 0;
err_vdev:
+ v4l2_ctrl_handler_free(&radio->hdl);
+err_ctrl:
v4l2_device_unregister(&radio->v4l2_dev);
err_v4l2:
kfree(radio->buffer);
@@ -659,4 +583,24 @@ err:
return retval;
}
+/* USB Device ID List */
+static struct usb_device_id usb_amradio_device_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(USB_AMRADIO_VENDOR, USB_AMRADIO_PRODUCT,
+ USB_CLASS_HID, 0, 0) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, usb_amradio_device_table);
+
+/* USB subsystem interface */
+static struct usb_driver usb_amradio_driver = {
+ .name = MR800_DRIVER_NAME,
+ .probe = usb_amradio_probe,
+ .disconnect = usb_amradio_disconnect,
+ .suspend = usb_amradio_suspend,
+ .resume = usb_amradio_resume,
+ .reset_resume = usb_amradio_resume,
+ .id_table = usb_amradio_device_table,
+};
+
module_usb_driver(usb_amradio_driver);
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index b275c5d0fe9a..b1f844c64fde 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -17,6 +17,7 @@
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/mutex.h>
#include <linux/io.h> /* outb, outb_p */
+#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include "radio-isa.h"
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 22c5743bf9db..a81d723b8c77 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -1,4 +1,4 @@
-/* SF16-FMI and SF16-FMP radio driver for Linux radio support
+/* SF16-FMI, SF16-FMP and SF16-FMD radio driver for Linux radio support
* heavily based on rtrack driver...
* (c) 1997 M. Kirkwood
* (c) 1998 Petr Vandrovec, vandrove@vc.cvut.cz
@@ -11,7 +11,7 @@
*
* Frequency control is done digitally -- ie out(port,encodefreq(95.8));
* No volume control - only mute/unmute - you have to use line volume
- * control on SB-part of SF16-FMI/SF16-FMP
+ * control on SB-part of SF16-FMI/SF16-FMP/SF16-FMD
*
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
*/
@@ -29,7 +29,7 @@
#include <media/v4l2-ioctl.h>
MODULE_AUTHOR("Petr Vandrovec, vandrove@vc.cvut.cz and M. Kirkwood");
-MODULE_DESCRIPTION("A driver for the SF16-FMI and SF16-FMP radio.");
+MODULE_DESCRIPTION("A driver for the SF16-FMI, SF16-FMP and SF16-FMD radio.");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.0.3");
@@ -37,7 +37,7 @@ static int io = -1;
static int radio_nr = -1;
module_param(io, int, 0);
-MODULE_PARM_DESC(io, "I/O address of the SF16-FMI or SF16-FMP card (0x284 or 0x384)");
+MODULE_PARM_DESC(io, "I/O address of the SF16-FMI/SF16-FMP/SF16-FMD card (0x284 or 0x384)");
module_param(radio_nr, int, 0);
struct fmi
@@ -130,7 +130,7 @@ static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *v)
{
strlcpy(v->driver, "radio-sf16fmi", sizeof(v->driver));
- strlcpy(v->card, "SF16-FMx radio", sizeof(v->card));
+ strlcpy(v->card, "SF16-FMI/FMP/FMD radio", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
@@ -277,8 +277,12 @@ static const struct v4l2_ioctl_ops fmi_ioctl_ops = {
/* ladis: this is my card. does any other types exist? */
static struct isapnp_device_id id_table[] __devinitdata = {
+ /* SF16-FMI */
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
ISAPNP_VENDOR('M','F','R'), ISAPNP_FUNCTION(0xad10), 0},
+ /* SF16-FMD */
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('M','F','R'), ISAPNP_FUNCTION(0xad12), 0},
{ ISAPNP_CARD_END, },
};
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 7c69214334bf..52b8011f1b23 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -1,4 +1,4 @@
-/* SF16-FMR2 radio driver for Linux
+/* SF16-FMR2 and SF16-FMD2 radio driver for Linux
* Copyright (c) 2011 Ondrej Zary
*
* Original driver was (c) 2000-2002 Ziglio Frediano, freddy77@angelfire.com
@@ -13,15 +13,19 @@
#include <linux/ioport.h> /* request_region */
#include <linux/io.h> /* outb, outb_p */
#include <linux/isa.h>
+#include <linux/pnp.h>
#include <sound/tea575x-tuner.h>
MODULE_AUTHOR("Ondrej Zary");
-MODULE_DESCRIPTION("MediaForte SF16-FMR2 FM radio card driver");
+MODULE_DESCRIPTION("MediaForte SF16-FMR2 and SF16-FMD2 FM radio card driver");
MODULE_LICENSE("GPL");
-static int radio_nr = -1;
-module_param(radio_nr, int, 0444);
-MODULE_PARM_DESC(radio_nr, "Radio device number");
+/* these cards can only use two different ports (0x384 and 0x284) */
+#define FMR2_MAX 2
+
+static int radio_nr[FMR2_MAX] = { [0 ... (FMR2_MAX - 1)] = -1 };
+module_param_array(radio_nr, int, NULL, 0444);
+MODULE_PARM_DESC(radio_nr, "Radio device numbers");
struct fmr2 {
int io;
@@ -29,9 +33,15 @@ struct fmr2 {
struct snd_tea575x tea;
struct v4l2_ctrl *volume;
struct v4l2_ctrl *balance;
+ bool is_fmd2;
};
-/* the port is hardwired so no need to support multiple cards */
+static int num_fmr2_cards;
+static struct fmr2 *fmr2_cards[FMR2_MAX];
+static bool isa_registered;
+static bool pnp_registered;
+
+/* the port is hardwired on SF16-FMR2 */
#define FMR2_PORT 0x384
/* TEA575x tuner pins */
@@ -174,7 +184,8 @@ static int fmr2_tea_ext_init(struct snd_tea575x *tea)
{
struct fmr2 *fmr2 = tea->private_data;
- if (inb(fmr2->io) & FMR2_HASVOL) {
+ /* FMR2 can have volume control, FMD2 can't (uses SB16 mixer) */
+ if (!fmr2->is_fmd2 && inb(fmr2->io) & FMR2_HASVOL) {
fmr2->volume = v4l2_ctrl_new_std(&tea->ctrl_handler, &fmr2_ctrl_ops, V4L2_CID_AUDIO_VOLUME, 0, 68, 2, 56);
fmr2->balance = v4l2_ctrl_new_std(&tea->ctrl_handler, &fmr2_ctrl_ops, V4L2_CID_AUDIO_BALANCE, -68, 68, 2, 0);
if (tea->ctrl_handler.error) {
@@ -186,22 +197,28 @@ static int fmr2_tea_ext_init(struct snd_tea575x *tea)
return 0;
}
-static int __devinit fmr2_probe(struct device *pdev, unsigned int dev)
+static struct pnp_device_id fmr2_pnp_ids[] __devinitdata = {
+ { .id = "MFRad13" }, /* tuner subdevice of SF16-FMD2 */
+ { .id = "" }
+};
+MODULE_DEVICE_TABLE(pnp, fmr2_pnp_ids);
+
+static int __devinit fmr2_probe(struct fmr2 *fmr2, struct device *pdev, int io)
{
- struct fmr2 *fmr2;
- int err;
+ int err, i;
+ char *card_name = fmr2->is_fmd2 ? "SF16-FMD2" : "SF16-FMR2";
- fmr2 = kzalloc(sizeof(*fmr2), GFP_KERNEL);
- if (fmr2 == NULL)
- return -ENOMEM;
+ /* avoid errors if a card was already registered at given port */
+ for (i = 0; i < num_fmr2_cards; i++)
+ if (io == fmr2_cards[i]->io)
+ return -EBUSY;
- strlcpy(fmr2->v4l2_dev.name, dev_name(pdev),
- sizeof(fmr2->v4l2_dev.name));
- fmr2->io = FMR2_PORT;
+ strlcpy(fmr2->v4l2_dev.name, "radio-sf16fmr2",
+ sizeof(fmr2->v4l2_dev.name)),
+ fmr2->io = io;
if (!request_region(fmr2->io, 2, fmr2->v4l2_dev.name)) {
printk(KERN_ERR "radio-sf16fmr2: I/O port 0x%x already in use\n", fmr2->io);
- kfree(fmr2);
return -EBUSY;
}
@@ -210,56 +227,121 @@ static int __devinit fmr2_probe(struct device *pdev, unsigned int dev)
if (err < 0) {
v4l2_err(&fmr2->v4l2_dev, "Could not register v4l2_device\n");
release_region(fmr2->io, 2);
- kfree(fmr2);
return err;
}
fmr2->tea.v4l2_dev = &fmr2->v4l2_dev;
fmr2->tea.private_data = fmr2;
- fmr2->tea.radio_nr = radio_nr;
+ fmr2->tea.radio_nr = radio_nr[num_fmr2_cards];
fmr2->tea.ops = &fmr2_tea_ops;
fmr2->tea.ext_init = fmr2_tea_ext_init;
- strlcpy(fmr2->tea.card, "SF16-FMR2", sizeof(fmr2->tea.card));
- snprintf(fmr2->tea.bus_info, sizeof(fmr2->tea.bus_info), "ISA:%s",
- fmr2->v4l2_dev.name);
+ strlcpy(fmr2->tea.card, card_name, sizeof(fmr2->tea.card));
+ snprintf(fmr2->tea.bus_info, sizeof(fmr2->tea.bus_info), "%s:%s",
+ fmr2->is_fmd2 ? "PnP" : "ISA", dev_name(pdev));
if (snd_tea575x_init(&fmr2->tea)) {
printk(KERN_ERR "radio-sf16fmr2: Unable to detect TEA575x tuner\n");
release_region(fmr2->io, 2);
- kfree(fmr2);
return -ENODEV;
}
- printk(KERN_INFO "radio-sf16fmr2: SF16-FMR2 radio card at 0x%x.\n", fmr2->io);
+ printk(KERN_INFO "radio-sf16fmr2: %s radio card at 0x%x.\n",
+ card_name, fmr2->io);
return 0;
}
-static int __exit fmr2_remove(struct device *pdev, unsigned int dev)
+static int __devinit fmr2_isa_match(struct device *pdev, unsigned int ndev)
+{
+ struct fmr2 *fmr2 = kzalloc(sizeof(*fmr2), GFP_KERNEL);
+ if (!fmr2)
+ return 0;
+
+ if (fmr2_probe(fmr2, pdev, FMR2_PORT)) {
+ kfree(fmr2);
+ return 0;
+ }
+ dev_set_drvdata(pdev, fmr2);
+ fmr2_cards[num_fmr2_cards++] = fmr2;
+
+ return 1;
+}
+
+static int __devinit fmr2_pnp_probe(struct pnp_dev *pdev,
+ const struct pnp_device_id *id)
{
- struct fmr2 *fmr2 = dev_get_drvdata(pdev);
+ int ret;
+ struct fmr2 *fmr2 = kzalloc(sizeof(*fmr2), GFP_KERNEL);
+ if (!fmr2)
+ return -ENOMEM;
+ fmr2->is_fmd2 = true;
+ ret = fmr2_probe(fmr2, &pdev->dev, pnp_port_start(pdev, 0));
+ if (ret) {
+ kfree(fmr2);
+ return ret;
+ }
+ pnp_set_drvdata(pdev, fmr2);
+ fmr2_cards[num_fmr2_cards++] = fmr2;
+
+ return 0;
+}
+
+static void __devexit fmr2_remove(struct fmr2 *fmr2)
+{
snd_tea575x_exit(&fmr2->tea);
release_region(fmr2->io, 2);
v4l2_device_unregister(&fmr2->v4l2_dev);
kfree(fmr2);
+}
+
+static int __devexit fmr2_isa_remove(struct device *pdev, unsigned int ndev)
+{
+ fmr2_remove(dev_get_drvdata(pdev));
+ dev_set_drvdata(pdev, NULL);
+
return 0;
}
-struct isa_driver fmr2_driver = {
- .probe = fmr2_probe,
- .remove = fmr2_remove,
+static void __devexit fmr2_pnp_remove(struct pnp_dev *pdev)
+{
+ fmr2_remove(pnp_get_drvdata(pdev));
+ pnp_set_drvdata(pdev, NULL);
+}
+
+struct isa_driver fmr2_isa_driver = {
+ .match = fmr2_isa_match,
+ .remove = __devexit_p(fmr2_isa_remove),
.driver = {
.name = "radio-sf16fmr2",
},
};
+struct pnp_driver fmr2_pnp_driver = {
+ .name = "radio-sf16fmr2",
+ .id_table = fmr2_pnp_ids,
+ .probe = fmr2_pnp_probe,
+ .remove = __devexit_p(fmr2_pnp_remove),
+};
+
static int __init fmr2_init(void)
{
- return isa_register_driver(&fmr2_driver, 1);
+ int ret;
+
+ ret = pnp_register_driver(&fmr2_pnp_driver);
+ if (!ret)
+ pnp_registered = true;
+ ret = isa_register_driver(&fmr2_isa_driver, 1);
+ if (!ret)
+ isa_registered = true;
+
+ return (pnp_registered || isa_registered) ? 0 : ret;
}
static void __exit fmr2_exit(void)
{
- isa_unregister_driver(&fmr2_driver);
+ if (pnp_registered)
+ pnp_unregister_driver(&fmr2_pnp_driver);
+ if (isa_registered)
+ isa_unregister_driver(&fmr2_isa_driver);
}
module_init(fmr2_init);
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
index 5d9a90ac3a1c..7052adc0c0b0 100644
--- a/drivers/media/radio/radio-timb.c
+++ b/drivers/media/radio/radio-timb.c
@@ -223,7 +223,7 @@ static struct platform_driver timbradio_platform_driver = {
.owner = THIS_MODULE,
},
.probe = timbradio_probe,
- .remove = timbradio_remove,
+ .remove = __devexit_p(timbradio_remove),
};
module_platform_driver(timbradio_platform_driver);
diff --git a/drivers/media/radio/saa7706h.c b/drivers/media/radio/saa7706h.c
index 9474706350f8..bb953ef75f61 100644
--- a/drivers/media/radio/saa7706h.c
+++ b/drivers/media/radio/saa7706h.c
@@ -430,7 +430,7 @@ static struct i2c_driver saa7706h_driver = {
.name = DRIVER_NAME,
},
.probe = saa7706h_probe,
- .remove = saa7706h_remove,
+ .remove = __devexit_p(saa7706h_remove),
.id_table = saa7706h_id,
};
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index 0e740c98786c..969cf494d85b 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -196,9 +196,9 @@ static int si470x_set_chan(struct si470x_device *radio, unsigned short chan)
}
if ((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0)
- dev_warn(&radio->videodev->dev, "tune does not complete\n");
+ dev_warn(&radio->videodev.dev, "tune does not complete\n");
if (timed_out)
- dev_warn(&radio->videodev->dev,
+ dev_warn(&radio->videodev.dev,
"tune timed out after %u ms\n", tune_timeout);
stop:
@@ -262,7 +262,7 @@ static int si470x_get_freq(struct si470x_device *radio, unsigned int *freq)
*/
int si470x_set_freq(struct si470x_device *radio, unsigned int freq)
{
- unsigned int spacing, band_bottom;
+ unsigned int spacing, band_bottom, band_top;
unsigned short chan;
/* Spacing (kHz) */
@@ -278,19 +278,26 @@ int si470x_set_freq(struct si470x_device *radio, unsigned int freq)
spacing = 0.050 * FREQ_MUL; break;
};
- /* Bottom of Band (MHz) */
+ /* Bottom/Top of Band (MHz) */
switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) {
/* 0: 87.5 - 108 MHz (USA, Europe) */
case 0:
- band_bottom = 87.5 * FREQ_MUL; break;
+ band_bottom = 87.5 * FREQ_MUL;
+ band_top = 108 * FREQ_MUL;
+ break;
/* 1: 76 - 108 MHz (Japan wide band) */
default:
- band_bottom = 76 * FREQ_MUL; break;
+ band_bottom = 76 * FREQ_MUL;
+ band_top = 108 * FREQ_MUL;
+ break;
/* 2: 76 - 90 MHz (Japan) */
case 2:
- band_bottom = 76 * FREQ_MUL; break;
+ band_bottom = 76 * FREQ_MUL;
+ band_top = 90 * FREQ_MUL;
+ break;
};
+ freq = clamp(freq, band_bottom, band_top);
/* Chan = [ Freq (Mhz) - Bottom of Band (MHz) ] / Spacing (kHz) */
chan = (freq - band_bottom) / spacing;
@@ -320,7 +327,7 @@ static int si470x_set_seek(struct si470x_device *radio,
radio->registers[POWERCFG] &= ~POWERCFG_SEEKUP;
retval = si470x_set_register(radio, POWERCFG);
if (retval < 0)
- goto done;
+ return retval;
/* currently I2C driver only uses interrupt way to seek */
if (radio->stci_enabled) {
@@ -344,24 +351,19 @@ static int si470x_set_seek(struct si470x_device *radio,
}
if ((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0)
- dev_warn(&radio->videodev->dev, "seek does not complete\n");
+ dev_warn(&radio->videodev.dev, "seek does not complete\n");
if (radio->registers[STATUSRSSI] & STATUSRSSI_SF)
- dev_warn(&radio->videodev->dev,
+ dev_warn(&radio->videodev.dev,
"seek failed / band limit reached\n");
- if (timed_out)
- dev_warn(&radio->videodev->dev,
- "seek timed out after %u ms\n", seek_timeout);
stop:
/* stop seeking */
radio->registers[POWERCFG] &= ~POWERCFG_SEEK;
retval = si470x_set_register(radio, POWERCFG);
-done:
/* try again, if timed out */
- if ((retval == 0) && timed_out)
- retval = -EAGAIN;
-
+ if (retval == 0 && timed_out)
+ return -EAGAIN;
return retval;
}
@@ -463,7 +465,6 @@ static ssize_t si470x_fops_read(struct file *file, char __user *buf,
unsigned int block_count = 0;
/* switch on rds reception */
- mutex_lock(&radio->lock);
if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
si470x_rds_on(radio);
@@ -505,7 +506,6 @@ static ssize_t si470x_fops_read(struct file *file, char __user *buf,
}
done:
- mutex_unlock(&radio->lock);
return retval;
}
@@ -517,19 +517,19 @@ static unsigned int si470x_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
+ unsigned long req_events = poll_requested_events(pts);
+ int retval = v4l2_ctrl_poll(file, pts);
- /* switch on rds reception */
-
- mutex_lock(&radio->lock);
- if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
- si470x_rds_on(radio);
- mutex_unlock(&radio->lock);
+ if (req_events & (POLLIN | POLLRDNORM)) {
+ /* switch on rds reception */
+ if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
+ si470x_rds_on(radio);
- poll_wait(file, &radio->read_queue, pts);
+ poll_wait(file, &radio->read_queue, pts);
- if (radio->rd_index != radio->wr_index)
- retval = POLLIN | POLLRDNORM;
+ if (radio->rd_index != radio->wr_index)
+ retval |= POLLIN | POLLRDNORM;
+ }
return retval;
}
@@ -553,134 +553,26 @@ static const struct v4l2_file_operations si470x_fops = {
* Video4Linux Interface
**************************************************************************/
-/*
- * si470x_vidioc_queryctrl - enumerate control items
- */
-static int si470x_vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- struct si470x_device *radio = video_drvdata(file);
- int retval = -EINVAL;
-
- /* abort if qc->id is below V4L2_CID_BASE */
- if (qc->id < V4L2_CID_BASE)
- goto done;
-
- /* search video control */
- switch (qc->id) {
- case V4L2_CID_AUDIO_VOLUME:
- return v4l2_ctrl_query_fill(qc, 0, 15, 1, 15);
- case V4L2_CID_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
- }
-
- /* disable unsupported base controls */
- /* to satisfy kradio and such apps */
- if ((retval == -EINVAL) && (qc->id < V4L2_CID_LASTP1)) {
- qc->flags = V4L2_CTRL_FLAG_DISABLED;
- retval = 0;
- }
-done:
- if (retval < 0)
- dev_warn(&radio->videodev->dev,
- "query controls failed with %d\n", retval);
- return retval;
-}
-
-
-/*
- * si470x_vidioc_g_ctrl - get the value of a control
- */
-static int si470x_vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- mutex_lock(&radio->lock);
- /* safety checks */
- retval = si470x_disconnect_check(radio);
- if (retval)
- goto done;
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_VOLUME:
- ctrl->value = radio->registers[SYSCONFIG2] &
- SYSCONFIG2_VOLUME;
- break;
- case V4L2_CID_AUDIO_MUTE:
- ctrl->value = ((radio->registers[POWERCFG] &
- POWERCFG_DMUTE) == 0) ? 1 : 0;
- break;
- default:
- retval = -EINVAL;
- }
-
-done:
- if (retval < 0)
- dev_warn(&radio->videodev->dev,
- "get control failed with %d\n", retval);
-
- mutex_unlock(&radio->lock);
- return retval;
-}
-
-
-/*
- * si470x_vidioc_s_ctrl - set the value of a control
- */
-static int si470x_vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int si470x_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- mutex_lock(&radio->lock);
- /* safety checks */
- retval = si470x_disconnect_check(radio);
- if (retval)
- goto done;
+ struct si470x_device *radio =
+ container_of(ctrl->handler, struct si470x_device, hdl);
switch (ctrl->id) {
case V4L2_CID_AUDIO_VOLUME:
radio->registers[SYSCONFIG2] &= ~SYSCONFIG2_VOLUME;
- radio->registers[SYSCONFIG2] |= ctrl->value;
- retval = si470x_set_register(radio, SYSCONFIG2);
- break;
+ radio->registers[SYSCONFIG2] |= ctrl->val;
+ return si470x_set_register(radio, SYSCONFIG2);
case V4L2_CID_AUDIO_MUTE:
- if (ctrl->value == 1)
+ if (ctrl->val)
radio->registers[POWERCFG] &= ~POWERCFG_DMUTE;
else
radio->registers[POWERCFG] |= POWERCFG_DMUTE;
- retval = si470x_set_register(radio, POWERCFG);
- break;
+ return si470x_set_register(radio, POWERCFG);
default:
- retval = -EINVAL;
+ return -EINVAL;
}
-
-done:
- if (retval < 0)
- dev_warn(&radio->videodev->dev,
- "set control failed with %d\n", retval);
- mutex_unlock(&radio->lock);
- return retval;
-}
-
-
-/*
- * si470x_vidioc_g_audio - get audio attributes
- */
-static int si470x_vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *audio)
-{
- /* driver constants */
- audio->index = 0;
- strcpy(audio->name, "Radio");
- audio->capability = V4L2_AUDCAP_STEREO;
- audio->mode = 0;
-
- return 0;
}
@@ -691,22 +583,14 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *tuner)
{
struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- mutex_lock(&radio->lock);
- /* safety checks */
- retval = si470x_disconnect_check(radio);
- if (retval)
- goto done;
+ int retval;
- if (tuner->index != 0) {
- retval = -EINVAL;
- goto done;
- }
+ if (tuner->index != 0)
+ return -EINVAL;
retval = si470x_get_register(radio, STATUSRSSI);
if (retval < 0)
- goto done;
+ return retval;
/* driver constants */
strcpy(tuner->name, "FM");
@@ -737,7 +621,7 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
if ((radio->registers[STATUSRSSI] & STATUSRSSI_ST) == 0)
tuner->rxsubchans = V4L2_TUNER_SUB_MONO;
else
- tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
+ tuner->rxsubchans = V4L2_TUNER_SUB_STEREO;
/* If there is a reliable method of detecting an RDS channel,
then this code should check for that before setting this
RDS subchannel. */
@@ -754,16 +638,13 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
tuner->signal = (radio->registers[STATUSRSSI] & STATUSRSSI_RSSI);
/* the ideal factor is 0xffff/75 = 873,8 */
tuner->signal = (tuner->signal * 873) + (8 * tuner->signal / 10);
+ if (tuner->signal > 0xffff)
+ tuner->signal = 0xffff;
/* automatic frequency control: -1: freq to low, 1 freq to high */
/* AFCRL does only indicate that freq. differs, not if too low/high */
tuner->afc = (radio->registers[STATUSRSSI] & STATUSRSSI_AFCRL) ? 1 : 0;
-done:
- if (retval < 0)
- dev_warn(&radio->videodev->dev,
- "get tuner failed with %d\n", retval);
- mutex_unlock(&radio->lock);
return retval;
}
@@ -775,16 +656,9 @@ static int si470x_vidioc_s_tuner(struct file *file, void *priv,
struct v4l2_tuner *tuner)
{
struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- mutex_lock(&radio->lock);
- /* safety checks */
- retval = si470x_disconnect_check(radio);
- if (retval)
- goto done;
if (tuner->index != 0)
- goto done;
+ return -EINVAL;
/* mono/stereo selector */
switch (tuner->audmode) {
@@ -792,20 +666,12 @@ static int si470x_vidioc_s_tuner(struct file *file, void *priv,
radio->registers[POWERCFG] |= POWERCFG_MONO; /* force mono */
break;
case V4L2_TUNER_MODE_STEREO:
+ default:
radio->registers[POWERCFG] &= ~POWERCFG_MONO; /* try stereo */
break;
- default:
- goto done;
}
- retval = si470x_set_register(radio, POWERCFG);
-
-done:
- if (retval < 0)
- dev_warn(&radio->videodev->dev,
- "set tuner failed with %d\n", retval);
- mutex_unlock(&radio->lock);
- return retval;
+ return si470x_set_register(radio, POWERCFG);
}
@@ -816,28 +682,12 @@ static int si470x_vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *freq)
{
struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- /* safety checks */
- mutex_lock(&radio->lock);
- retval = si470x_disconnect_check(radio);
- if (retval)
- goto done;
- if (freq->tuner != 0) {
- retval = -EINVAL;
- goto done;
- }
+ if (freq->tuner != 0)
+ return -EINVAL;
freq->type = V4L2_TUNER_RADIO;
- retval = si470x_get_freq(radio, &freq->frequency);
-
-done:
- if (retval < 0)
- dev_warn(&radio->videodev->dev,
- "get frequency failed with %d\n", retval);
- mutex_unlock(&radio->lock);
- return retval;
+ return si470x_get_freq(radio, &freq->frequency);
}
@@ -848,27 +698,11 @@ static int si470x_vidioc_s_frequency(struct file *file, void *priv,
struct v4l2_frequency *freq)
{
struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- mutex_lock(&radio->lock);
- /* safety checks */
- retval = si470x_disconnect_check(radio);
- if (retval)
- goto done;
- if (freq->tuner != 0) {
- retval = -EINVAL;
- goto done;
- }
+ if (freq->tuner != 0)
+ return -EINVAL;
- retval = si470x_set_freq(radio, freq->frequency);
-
-done:
- if (retval < 0)
- dev_warn(&radio->videodev->dev,
- "set frequency failed with %d\n", retval);
- mutex_unlock(&radio->lock);
- return retval;
+ return si470x_set_freq(radio, freq->frequency);
}
@@ -879,44 +713,29 @@ static int si470x_vidioc_s_hw_freq_seek(struct file *file, void *priv,
struct v4l2_hw_freq_seek *seek)
{
struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- mutex_lock(&radio->lock);
- /* safety checks */
- retval = si470x_disconnect_check(radio);
- if (retval)
- goto done;
-
- if (seek->tuner != 0) {
- retval = -EINVAL;
- goto done;
- }
- retval = si470x_set_seek(radio, seek->wrap_around, seek->seek_upward);
+ if (seek->tuner != 0)
+ return -EINVAL;
-done:
- if (retval < 0)
- dev_warn(&radio->videodev->dev,
- "set hardware frequency seek failed with %d\n", retval);
- mutex_unlock(&radio->lock);
- return retval;
+ return si470x_set_seek(radio, seek->wrap_around, seek->seek_upward);
}
+const struct v4l2_ctrl_ops si470x_ctrl_ops = {
+ .s_ctrl = si470x_s_ctrl,
+};
/*
* si470x_ioctl_ops - video device ioctl operations
*/
static const struct v4l2_ioctl_ops si470x_ioctl_ops = {
.vidioc_querycap = si470x_vidioc_querycap,
- .vidioc_queryctrl = si470x_vidioc_queryctrl,
- .vidioc_g_ctrl = si470x_vidioc_g_ctrl,
- .vidioc_s_ctrl = si470x_vidioc_s_ctrl,
- .vidioc_g_audio = si470x_vidioc_g_audio,
.vidioc_g_tuner = si470x_vidioc_g_tuner,
.vidioc_s_tuner = si470x_vidioc_s_tuner,
.vidioc_g_frequency = si470x_vidioc_g_frequency,
.vidioc_s_frequency = si470x_vidioc_s_frequency,
.vidioc_s_hw_freq_seek = si470x_vidioc_s_hw_freq_seek,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -926,6 +745,6 @@ static const struct v4l2_ioctl_ops si470x_ioctl_ops = {
struct video_device si470x_viddev_template = {
.fops = &si470x_fops,
.name = DRIVER_NAME,
- .release = video_device_release,
+ .release = video_device_release_empty,
.ioctl_ops = &si470x_ioctl_ops,
};
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 9b546a5523f3..a80044c5874e 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -162,20 +162,6 @@ static int si470x_get_all_registers(struct si470x_device *radio)
/**************************************************************************
- * General Driver Functions - DISCONNECT_CHECK
- **************************************************************************/
-
-/*
- * si470x_disconnect_check - check whether radio disconnects
- */
-int si470x_disconnect_check(struct si470x_device *radio)
-{
- return 0;
-}
-
-
-
-/**************************************************************************
* File Operations Interface
**************************************************************************/
@@ -185,12 +171,12 @@ int si470x_disconnect_check(struct si470x_device *radio)
int si470x_fops_open(struct file *file)
{
struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
+ int retval = v4l2_fh_open(file);
- mutex_lock(&radio->lock);
- radio->users++;
+ if (retval)
+ return retval;
- if (radio->users == 1) {
+ if (v4l2_fh_is_singular_file(file)) {
/* start radio */
retval = si470x_start(radio);
if (retval < 0)
@@ -205,7 +191,8 @@ int si470x_fops_open(struct file *file)
}
done:
- mutex_unlock(&radio->lock);
+ if (retval)
+ v4l2_fh_release(file);
return retval;
}
@@ -216,21 +203,12 @@ done:
int si470x_fops_release(struct file *file)
{
struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- /* safety check */
- if (!radio)
- return -ENODEV;
- mutex_lock(&radio->lock);
- radio->users--;
- if (radio->users == 0)
+ if (v4l2_fh_is_singular_file(file))
/* stop radio */
- retval = si470x_stop(radio);
+ si470x_stop(radio);
- mutex_unlock(&radio->lock);
-
- return retval;
+ return v4l2_fh_release(file);
}
@@ -371,32 +349,25 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
goto err_initial;
}
- radio->users = 0;
radio->client = client;
mutex_init(&radio->lock);
- /* video device allocation and initialization */
- radio->videodev = video_device_alloc();
- if (!radio->videodev) {
- retval = -ENOMEM;
- goto err_radio;
- }
- memcpy(radio->videodev, &si470x_viddev_template,
- sizeof(si470x_viddev_template));
- video_set_drvdata(radio->videodev, radio);
+ /* video device initialization */
+ radio->videodev = si470x_viddev_template;
+ video_set_drvdata(&radio->videodev, radio);
/* power up : need 110ms */
radio->registers[POWERCFG] = POWERCFG_ENABLE;
if (si470x_set_register(radio, POWERCFG) < 0) {
retval = -EIO;
- goto err_video;
+ goto err_radio;
}
msleep(110);
/* get device and chip versions */
if (si470x_get_all_registers(radio) < 0) {
retval = -EIO;
- goto err_video;
+ goto err_radio;
}
dev_info(&client->dev, "DeviceID=0x%4.4hx ChipID=0x%4.4hx\n",
radio->registers[DEVICEID], radio->registers[CHIPID]);
@@ -427,7 +398,7 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
radio->buffer = kmalloc(radio->buf_size, GFP_KERNEL);
if (!radio->buffer) {
retval = -EIO;
- goto err_video;
+ goto err_radio;
}
/* rds buffer configuration */
@@ -447,7 +418,7 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
}
/* register video device */
- retval = video_register_device(radio->videodev, VFL_TYPE_RADIO,
+ retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO,
radio_nr);
if (retval) {
dev_warn(&client->dev, "Could not register video device\n");
@@ -460,8 +431,6 @@ err_all:
free_irq(client->irq, radio);
err_rds:
kfree(radio->buffer);
-err_video:
- video_device_release(radio->videodev);
err_radio:
kfree(radio);
err_initial:
@@ -477,7 +446,7 @@ static __devexit int si470x_i2c_remove(struct i2c_client *client)
struct si470x_device *radio = i2c_get_clientdata(client);
free_irq(client->irq, radio);
- video_unregister_device(radio->videodev);
+ video_unregister_device(&radio->videodev);
kfree(radio);
return 0;
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index b7debb67932a..e9f638761296 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -367,23 +367,6 @@ static int si470x_get_scratch_page_versions(struct si470x_device *radio)
/**************************************************************************
- * General Driver Functions - DISCONNECT_CHECK
- **************************************************************************/
-
-/*
- * si470x_disconnect_check - check whether radio disconnects
- */
-int si470x_disconnect_check(struct si470x_device *radio)
-{
- if (radio->disconnected)
- return -EIO;
- else
- return 0;
-}
-
-
-
-/**************************************************************************
* RDS Driver Functions
**************************************************************************/
@@ -414,9 +397,6 @@ static void si470x_int_in_callback(struct urb *urb)
}
}
- /* safety checks */
- if (radio->disconnected)
- return;
if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
goto resubmit;
@@ -501,112 +481,30 @@ resubmit:
}
-
-/**************************************************************************
- * File Operations Interface
- **************************************************************************/
-
-/*
- * si470x_fops_open - file open
- */
int si470x_fops_open(struct file *file)
{
- struct si470x_device *radio = video_drvdata(file);
- int retval;
-
- mutex_lock(&radio->lock);
- radio->users++;
-
- retval = usb_autopm_get_interface(radio->intf);
- if (retval < 0) {
- radio->users--;
- retval = -EIO;
- goto done;
- }
-
- if (radio->users == 1) {
- /* start radio */
- retval = si470x_start(radio);
- if (retval < 0) {
- usb_autopm_put_interface(radio->intf);
- goto done;
- }
-
- /* initialize interrupt urb */
- usb_fill_int_urb(radio->int_in_urb, radio->usbdev,
- usb_rcvintpipe(radio->usbdev,
- radio->int_in_endpoint->bEndpointAddress),
- radio->int_in_buffer,
- le16_to_cpu(radio->int_in_endpoint->wMaxPacketSize),
- si470x_int_in_callback,
- radio,
- radio->int_in_endpoint->bInterval);
-
- radio->int_in_running = 1;
- mb();
-
- retval = usb_submit_urb(radio->int_in_urb, GFP_KERNEL);
- if (retval) {
- dev_info(&radio->intf->dev,
- "submitting int urb failed (%d)\n", retval);
- radio->int_in_running = 0;
- usb_autopm_put_interface(radio->intf);
- }
- }
-
-done:
- mutex_unlock(&radio->lock);
- return retval;
+ return v4l2_fh_open(file);
}
-
-/*
- * si470x_fops_release - file release
- */
int si470x_fops_release(struct file *file)
{
- struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- /* safety check */
- if (!radio) {
- retval = -ENODEV;
- goto done;
- }
-
- mutex_lock(&radio->lock);
- radio->users--;
- if (radio->users == 0) {
- /* shutdown interrupt handler */
- if (radio->int_in_running) {
- radio->int_in_running = 0;
- if (radio->int_in_urb)
- usb_kill_urb(radio->int_in_urb);
- }
-
- if (radio->disconnected) {
- video_unregister_device(radio->videodev);
- kfree(radio->int_in_buffer);
- kfree(radio->buffer);
- mutex_unlock(&radio->lock);
- kfree(radio);
- goto done;
- }
+ return v4l2_fh_release(file);
+}
- /* cancel read processes */
- wake_up_interruptible(&radio->read_queue);
+static void si470x_usb_release(struct v4l2_device *v4l2_dev)
+{
+ struct si470x_device *radio =
+ container_of(v4l2_dev, struct si470x_device, v4l2_dev);
- /* stop radio */
- retval = si470x_stop(radio);
- usb_autopm_put_interface(radio->intf);
- }
- mutex_unlock(&radio->lock);
-done:
- return retval;
+ usb_free_urb(radio->int_in_urb);
+ v4l2_ctrl_handler_free(&radio->hdl);
+ v4l2_device_unregister(&radio->v4l2_dev);
+ kfree(radio->int_in_buffer);
+ kfree(radio->buffer);
+ kfree(radio);
}
-
/**************************************************************************
* Video4Linux Interface
**************************************************************************/
@@ -623,13 +521,45 @@ int si470x_vidioc_querycap(struct file *file, void *priv,
strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
usb_make_path(radio->usbdev, capability->bus_info,
sizeof(capability->bus_info));
- capability->capabilities = V4L2_CAP_HW_FREQ_SEEK |
+ capability->device_caps = V4L2_CAP_HW_FREQ_SEEK |
V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
-
+ capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
+static int si470x_start_usb(struct si470x_device *radio)
+{
+ int retval;
+
+ /* start radio */
+ retval = si470x_start(radio);
+ if (retval < 0)
+ return retval;
+
+ v4l2_ctrl_handler_setup(&radio->hdl);
+
+ /* initialize interrupt urb */
+ usb_fill_int_urb(radio->int_in_urb, radio->usbdev,
+ usb_rcvintpipe(radio->usbdev,
+ radio->int_in_endpoint->bEndpointAddress),
+ radio->int_in_buffer,
+ le16_to_cpu(radio->int_in_endpoint->wMaxPacketSize),
+ si470x_int_in_callback,
+ radio,
+ radio->int_in_endpoint->bInterval);
+
+ radio->int_in_running = 1;
+ mb();
+
+ retval = usb_submit_urb(radio->int_in_urb, GFP_KERNEL);
+ if (retval) {
+ dev_info(&radio->intf->dev,
+ "submitting int urb failed (%d)\n", retval);
+ radio->int_in_running = 0;
+ }
+ return retval;
+}
/**************************************************************************
* USB Interface
@@ -653,8 +583,6 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
retval = -ENOMEM;
goto err_initial;
}
- radio->users = 0;
- radio->disconnected = 0;
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
mutex_init(&radio->lock);
@@ -691,20 +619,35 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
goto err_intbuffer;
}
- /* video device allocation and initialization */
- radio->videodev = video_device_alloc();
- if (!radio->videodev) {
- retval = -ENOMEM;
+ radio->v4l2_dev.release = si470x_usb_release;
+ retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
+ if (retval < 0) {
+ dev_err(&intf->dev, "couldn't register v4l2_device\n");
goto err_urb;
}
- memcpy(radio->videodev, &si470x_viddev_template,
- sizeof(si470x_viddev_template));
- video_set_drvdata(radio->videodev, radio);
+
+ v4l2_ctrl_handler_init(&radio->hdl, 2);
+ v4l2_ctrl_new_std(&radio->hdl, &si470x_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ v4l2_ctrl_new_std(&radio->hdl, &si470x_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, 0, 15, 1, 15);
+ if (radio->hdl.error) {
+ retval = radio->hdl.error;
+ dev_err(&intf->dev, "couldn't register control\n");
+ goto err_dev;
+ }
+ radio->videodev = si470x_viddev_template;
+ radio->videodev.ctrl_handler = &radio->hdl;
+ radio->videodev.lock = &radio->lock;
+ radio->videodev.v4l2_dev = &radio->v4l2_dev;
+ radio->videodev.release = video_device_release_empty;
+ set_bit(V4L2_FL_USE_FH_PRIO, &radio->videodev.flags);
+ video_set_drvdata(&radio->videodev, radio);
/* get device and chip versions */
if (si470x_get_all_registers(radio) < 0) {
retval = -EIO;
- goto err_video;
+ goto err_ctrl;
}
dev_info(&intf->dev, "DeviceID=0x%4.4hx ChipID=0x%4.4hx\n",
radio->registers[DEVICEID], radio->registers[CHIPID]);
@@ -721,7 +664,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
/* get software and hardware versions */
if (si470x_get_scratch_page_versions(radio) < 0) {
retval = -EIO;
- goto err_video;
+ goto err_ctrl;
}
dev_info(&intf->dev, "software version %d, hardware version %d\n",
radio->software_version, radio->hardware_version);
@@ -764,28 +707,35 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
radio->buffer = kmalloc(radio->buf_size, GFP_KERNEL);
if (!radio->buffer) {
retval = -EIO;
- goto err_video;
+ goto err_ctrl;
}
/* rds buffer configuration */
radio->wr_index = 0;
radio->rd_index = 0;
init_waitqueue_head(&radio->read_queue);
+ usb_set_intfdata(intf, radio);
+
+ /* start radio */
+ retval = si470x_start_usb(radio);
+ if (retval < 0)
+ goto err_all;
/* register video device */
- retval = video_register_device(radio->videodev, VFL_TYPE_RADIO,
+ retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO,
radio_nr);
if (retval) {
- dev_warn(&intf->dev, "Could not register video device\n");
+ dev_err(&intf->dev, "Could not register video device\n");
goto err_all;
}
- usb_set_intfdata(intf, radio);
return 0;
err_all:
kfree(radio->buffer);
-err_video:
- video_device_release(radio->videodev);
+err_ctrl:
+ v4l2_ctrl_handler_free(&radio->hdl);
+err_dev:
+ v4l2_device_unregister(&radio->v4l2_dev);
err_urb:
usb_free_urb(radio->int_in_urb);
err_intbuffer:
@@ -803,8 +753,22 @@ err_initial:
static int si470x_usb_driver_suspend(struct usb_interface *intf,
pm_message_t message)
{
+ struct si470x_device *radio = usb_get_intfdata(intf);
+
dev_info(&intf->dev, "suspending now...\n");
+ /* shutdown interrupt handler */
+ if (radio->int_in_running) {
+ radio->int_in_running = 0;
+ if (radio->int_in_urb)
+ usb_kill_urb(radio->int_in_urb);
+ }
+
+ /* cancel read processes */
+ wake_up_interruptible(&radio->read_queue);
+
+ /* stop radio */
+ si470x_stop(radio);
return 0;
}
@@ -814,9 +778,12 @@ static int si470x_usb_driver_suspend(struct usb_interface *intf,
*/
static int si470x_usb_driver_resume(struct usb_interface *intf)
{
+ struct si470x_device *radio = usb_get_intfdata(intf);
+
dev_info(&intf->dev, "resuming now...\n");
- return 0;
+ /* start radio */
+ return si470x_start_usb(radio);
}
@@ -828,28 +795,22 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
struct si470x_device *radio = usb_get_intfdata(intf);
mutex_lock(&radio->lock);
- radio->disconnected = 1;
+ v4l2_device_disconnect(&radio->v4l2_dev);
+ video_unregister_device(&radio->videodev);
usb_set_intfdata(intf, NULL);
- if (radio->users == 0) {
- /* set led to disconnect state */
- si470x_set_led_state(radio, BLINK_ORANGE_LED);
-
- /* Free data structures. */
- usb_free_urb(radio->int_in_urb);
-
- kfree(radio->int_in_buffer);
- video_unregister_device(radio->videodev);
- kfree(radio->buffer);
- mutex_unlock(&radio->lock);
- kfree(radio);
- } else {
- mutex_unlock(&radio->lock);
- }
+ mutex_unlock(&radio->lock);
+ v4l2_device_put(&radio->v4l2_dev);
}
/*
* si470x_usb_driver - usb driver interface
+ *
+ * A note on suspend/resume: this driver had only empty suspend/resume
+ * functions, and when I tried to test suspend/resume it always disconnected
+ * instead of resuming (using my ADS InstantFM stick). So I've decided to
+ * remove these callbacks until someone else with better hardware can
+ * implement and test this.
*/
static struct usb_driver si470x_usb_driver = {
.name = DRIVER_NAME,
@@ -857,8 +818,8 @@ static struct usb_driver si470x_usb_driver = {
.disconnect = si470x_usb_driver_disconnect,
.suspend = si470x_usb_driver_suspend,
.resume = si470x_usb_driver_resume,
+ .reset_resume = si470x_usb_driver_resume,
.id_table = si470x_usb_driver_id_table,
- .supports_autosuspend = 1,
};
module_usb_driver(si470x_usb_driver);
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index f300a55ed85c..4921cab8e0fa 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -36,6 +36,9 @@
#include <linux/mutex.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-device.h>
#include <asm/unaligned.h>
@@ -141,10 +144,9 @@
* si470x_device - private data
*/
struct si470x_device {
- struct video_device *videodev;
-
- /* driver management */
- unsigned int users;
+ struct v4l2_device v4l2_dev;
+ struct video_device videodev;
+ struct v4l2_ctrl_handler hdl;
/* Silabs internal registers (0..15) */
unsigned short registers[RADIO_REGISTER_NUM];
@@ -174,9 +176,6 @@ struct si470x_device {
/* scratch page */
unsigned char software_version;
unsigned char hardware_version;
-
- /* driver management */
- unsigned char disconnected;
#endif
#if defined(CONFIG_I2C_SI470X) || defined(CONFIG_I2C_SI470X_MODULE)
@@ -213,6 +212,7 @@ struct si470x_device {
* Common Functions
**************************************************************************/
extern struct video_device si470x_viddev_template;
+extern const struct v4l2_ctrl_ops si470x_ctrl_ops;
int si470x_get_register(struct si470x_device *radio, int regnr);
int si470x_set_register(struct si470x_device *radio, int regnr);
int si470x_disconnect_check(struct si470x_device *radio);
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index 6418c4c9faf1..06d47e5cce9f 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -211,7 +211,7 @@ static struct i2c_driver tef6862_driver = {
.name = DRIVER_NAME,
},
.probe = tef6862_probe,
- .remove = tef6862_remove,
+ .remove = __devexit_p(tef6862_remove),
.id_table = tef6862_id,
};
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index 077d369a0173..080b96a61f1a 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -518,6 +518,10 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
video_set_drvdata(gradio_dev, fmdev);
gradio_dev->lock = &fmdev->mutex;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &gradio_dev->flags);
/* Register with V4L2 subsystem as RADIO device */
if (video_register_device(gradio_dev, VFL_TYPE_RADIO, radio_nr)) {
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index a3fbb21350e9..f97eeb870455 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -69,6 +69,7 @@ config IR_JVC_DECODER
config IR_SONY_DECODER
tristate "Enable IR raw decoder for the Sony protocol"
depends on RC_CORE
+ select BITREVERSE
default y
---help---
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index baf907b3ce76..7be377fc1be8 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -1,7 +1,7 @@
/*
* USB ATI Remote support
*
- * Copyright (c) 2011 Anssi Hannula <anssi.hannula@iki.fi>
+ * Copyright (c) 2011, 2012 Anssi Hannula <anssi.hannula@iki.fi>
* Version 2.2.0 Copyright (c) 2004 Torrey Hoffman <thoffman@arnor.net>
* Version 2.1.1 Copyright (c) 2002 Vladimir Dergachev
*
@@ -151,13 +151,57 @@ MODULE_PARM_DESC(mouse, "Enable mouse device, default = yes");
#undef err
#define err(format, arg...) printk(KERN_ERR format , ## arg)
+struct ati_receiver_type {
+ /* either default_keymap or get_default_keymap should be set */
+ const char *default_keymap;
+ const char *(*get_default_keymap)(struct usb_interface *interface);
+};
+
+static const char *get_medion_keymap(struct usb_interface *interface)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+
+ /*
+ * There are many different Medion remotes shipped with a receiver
+ * with the same usb id, but the receivers have subtle differences
+ * in the USB descriptors allowing us to detect them.
+ */
+
+ if (udev->manufacturer && udev->product) {
+ if (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP) {
+
+ if (!strcmp(udev->manufacturer, "X10 Wireless Technology Inc")
+ && !strcmp(udev->product, "USB Receiver"))
+ return RC_MAP_MEDION_X10_DIGITAINER;
+
+ if (!strcmp(udev->manufacturer, "X10 WTI")
+ && !strcmp(udev->product, "RF receiver"))
+ return RC_MAP_MEDION_X10_OR2X;
+ } else {
+
+ if (!strcmp(udev->manufacturer, "X10 Wireless Technology Inc")
+ && !strcmp(udev->product, "USB Receiver"))
+ return RC_MAP_MEDION_X10;
+ }
+ }
+
+ dev_info(&interface->dev,
+ "Unknown Medion X10 receiver, using default ati_remote Medion keymap\n");
+
+ return RC_MAP_MEDION_X10;
+}
+
+static const struct ati_receiver_type type_ati = { .default_keymap = RC_MAP_ATI_X10 };
+static const struct ati_receiver_type type_medion = { .get_default_keymap = get_medion_keymap };
+static const struct ati_receiver_type type_firefly = { .default_keymap = RC_MAP_SNAPSTREAM_FIREFLY };
+
static struct usb_device_id ati_remote_table[] = {
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_ATI_X10 },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA2_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_ATI_X10 },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, ATI_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_ATI_X10 },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, NVIDIA_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_ATI_X10 },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, MEDION_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_MEDION_X10 },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, FIREFLY_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_SNAPSTREAM_FIREFLY },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_ati },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA2_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_ati },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, ATI_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_ati },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, NVIDIA_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_ati },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, MEDION_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_medion },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, FIREFLY_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_firefly },
{} /* Terminating entry */
};
@@ -445,6 +489,7 @@ static void ati_remote_input_report(struct urb *urb)
int acc;
int remote_num;
unsigned char scancode;
+ u32 wheel_keycode = KEY_RESERVED;
int i;
/*
@@ -484,26 +529,33 @@ static void ati_remote_input_report(struct urb *urb)
*/
scancode = data[2] & 0x7f;
- /* Look up event code index in the mouse translation table. */
- for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
- if (scancode == ati_remote_tbl[i].data) {
- index = i;
- break;
+ dbginfo(&ati_remote->interface->dev,
+ "channel 0x%02x; key data %02x, scancode %02x\n",
+ remote_num, data[2], scancode);
+
+ if (scancode >= 0x70) {
+ /*
+ * This is either a mouse or scrollwheel event, depending on
+ * the remote/keymap.
+ * Get the keycode assigned to scancode 0x78/0x70. If it is
+ * set, assume this is a scrollwheel up/down event.
+ */
+ wheel_keycode = rc_g_keycode_from_table(ati_remote->rdev,
+ scancode & 0x78);
+
+ if (wheel_keycode == KEY_RESERVED) {
+ /* scrollwheel was not mapped, assume mouse */
+
+ /* Look up event code index in the mouse translation table. */
+ for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
+ if (scancode == ati_remote_tbl[i].data) {
+ index = i;
+ break;
+ }
+ }
}
}
- if (index >= 0) {
- dbginfo(&ati_remote->interface->dev,
- "channel 0x%02x; mouse data %02x; index %d; keycode %d\n",
- remote_num, data[2], index, ati_remote_tbl[index].code);
- if (!dev)
- return; /* no mouse device */
- } else
- dbginfo(&ati_remote->interface->dev,
- "channel 0x%02x; key data %02x, scancode %02x\n",
- remote_num, data[2], scancode);
-
-
if (index >= 0 && ati_remote_tbl[index].kind == KIND_LITERAL) {
input_event(dev, ati_remote_tbl[index].type,
ati_remote_tbl[index].code,
@@ -542,15 +594,29 @@ static void ati_remote_input_report(struct urb *urb)
if (index < 0) {
/* Not a mouse event, hand it to rc-core. */
-
- /*
- * We don't use the rc-core repeat handling yet as
- * it would cause ghost repeats which would be a
- * regression for this driver.
- */
- rc_keydown_notimeout(ati_remote->rdev, scancode,
- data[2]);
- rc_keyup(ati_remote->rdev);
+ int count = 1;
+
+ if (wheel_keycode != KEY_RESERVED) {
+ /*
+ * This is a scrollwheel event, send the
+ * scroll up (0x78) / down (0x70) scancode
+ * repeatedly as many times as indicated by
+ * rest of the scancode.
+ */
+ count = (scancode & 0x07) + 1;
+ scancode &= 0x78;
+ }
+
+ while (count--) {
+ /*
+ * We don't use the rc-core repeat handling yet as
+ * it would cause ghost repeats which would be a
+ * regression for this driver.
+ */
+ rc_keydown_notimeout(ati_remote->rdev, scancode,
+ data[2]);
+ rc_keyup(ati_remote->rdev);
+ }
return;
}
@@ -766,6 +832,7 @@ static int ati_remote_probe(struct usb_interface *interface, const struct usb_de
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_host_interface *iface_host = interface->cur_altsetting;
struct usb_endpoint_descriptor *endpoint_in, *endpoint_out;
+ struct ati_receiver_type *type = (struct ati_receiver_type *)id->driver_info;
struct ati_remote *ati_remote;
struct input_dev *input_dev;
struct rc_dev *rc_dev;
@@ -827,10 +894,15 @@ static int ati_remote_probe(struct usb_interface *interface, const struct usb_de
snprintf(ati_remote->mouse_name, sizeof(ati_remote->mouse_name),
"%s mouse", ati_remote->rc_name);
- if (id->driver_info)
- rc_dev->map_name = (const char *)id->driver_info;
- else
- rc_dev->map_name = RC_MAP_ATI_X10;
+ rc_dev->map_name = RC_MAP_ATI_X10; /* default map */
+
+ /* set default keymap according to receiver model */
+ if (type) {
+ if (type->default_keymap)
+ rc_dev->map_name = type->default_keymap;
+ else if (type->get_default_keymap)
+ rc_dev->map_name = type->get_default_keymap(interface);
+ }
ati_remote_rc_init(ati_remote);
mutex_init(&ati_remote->open_mutex);
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index 4a3a238bcfbc..6aabf7ae3a31 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -556,11 +556,11 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
FINTEK_DRIVER_NAME, (void *)fintek))
- goto failure;
+ goto failure2;
ret = rc_register_device(rdev);
if (ret)
- goto failure;
+ goto failure3;
device_init_wakeup(&pdev->dev, true);
fintek->rdev = rdev;
@@ -570,12 +570,11 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
return 0;
+failure3:
+ free_irq(fintek->cir_irq, fintek);
+failure2:
+ release_region(fintek->cir_addr, fintek->cir_port_len);
failure:
- if (fintek->cir_irq)
- free_irq(fintek->cir_irq, fintek);
- if (fintek->cir_addr)
- release_region(fintek->cir_addr, fintek->cir_port_len);
-
rc_free_device(rdev);
kfree(fintek);
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 7f26fdf2e54e..5dd0386604f0 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -255,7 +255,7 @@ static struct usb_device_id imon_usb_id_table[] = {
static struct usb_driver imon_driver = {
.name = MOD_NAME,
.probe = imon_probe,
- .disconnect = imon_disconnect,
+ .disconnect = __devexit_p(imon_disconnect),
.suspend = imon_suspend,
.resume = imon_resume,
.id_table = imon_usb_id_table,
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 95e630998aaf..a82025121345 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -46,9 +46,9 @@ static int ir_raw_event_thread(void *data)
while (!kthread_should_stop()) {
spin_lock_irq(&raw->lock);
- retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
+ retval = kfifo_len(&raw->kfifo);
- if (!retval) {
+ if (retval < sizeof(ev)) {
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
@@ -59,11 +59,9 @@ static int ir_raw_event_thread(void *data)
continue;
}
+ retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
spin_unlock_irq(&raw->lock);
-
- BUG_ON(retval != sizeof(ev));
-
mutex_lock(&ir_raw_handler_lock);
list_for_each_entry(handler, &ir_raw_handler_list, list)
handler->decode(raw->dev, ev);
diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c
index d38fbdd0b25a..7e54ec57bcf9 100644
--- a/drivers/media/rc/ir-sanyo-decoder.c
+++ b/drivers/media/rc/ir-sanyo-decoder.c
@@ -56,7 +56,7 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct sanyo_dec *data = &dev->raw->sanyo;
u32 scancode;
- u8 address, not_address, command, not_command;
+ u8 address, command, not_command;
if (!(dev->raw->enabled_protocols & RC_TYPE_SANYO))
return 0;
@@ -154,7 +154,7 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
break;
address = bitrev16((data->bits >> 29) & 0x1fff) >> 3;
- not_address = bitrev16((data->bits >> 16) & 0x1fff) >> 3;
+ /* not_address = bitrev16((data->bits >> 16) & 0x1fff) >> 3; */
command = bitrev8((data->bits >> 8) & 0xff);
not_command = bitrev8((data->bits >> 0) & 0xff);
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 0e49c99abf68..36fe5a349b95 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1598,24 +1598,22 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED,
ITE_DRIVER_NAME, (void *)itdev))
- goto failure;
+ goto failure2;
ret = rc_register_device(rdev);
if (ret)
- goto failure;
+ goto failure3;
itdev->rdev = rdev;
ite_pr(KERN_NOTICE, "driver has been successfully loaded\n");
return 0;
+failure3:
+ free_irq(itdev->cir_irq, itdev);
+failure2:
+ release_region(itdev->cir_addr, itdev->params.io_region_size);
failure:
- if (itdev->cir_irq)
- free_irq(itdev->cir_irq, itdev);
-
- if (itdev->cir_addr)
- release_region(itdev->cir_addr, itdev->params.io_region_size);
-
rc_free_device(rdev);
kfree(itdev);
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index 49ce2662f56b..ab84d66c67c1 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-anysee.o \
rc-apac-viewcomp.o \
rc-asus-pc39.o \
+ rc-asus-ps3-100.o \
rc-ati-tv-wonder-hd-600.o \
rc-ati-x10.o \
rc-avermedia-a16d.o \
@@ -52,6 +53,8 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-lme2510.o \
rc-manli.o \
rc-medion-x10.o \
+ rc-medion-x10-digitainer.o \
+ rc-medion-x10-or2x.o \
rc-msi-digivox-ii.o \
rc-msi-digivox-iii.o \
rc-msi-tvanywhere.o \
diff --git a/drivers/media/rc/keymaps/rc-asus-ps3-100.c b/drivers/media/rc/keymaps/rc-asus-ps3-100.c
new file mode 100644
index 000000000000..ba76609c5936
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-asus-ps3-100.c
@@ -0,0 +1,91 @@
+/* asus-ps3-100.h - Keytable for asus_ps3_100 Remote Controller
+ *
+ * Copyright (c) 2012 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Based on a previous patch from Remi Schwartz <remi.schwartz@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+static struct rc_map_table asus_ps3_100[] = {
+ { 0x081c, KEY_HOME }, /* home */
+ { 0x081e, KEY_TV }, /* tv */
+ { 0x0803, KEY_TEXT }, /* teletext */
+ { 0x0829, KEY_POWER }, /* close */
+
+ { 0x080b, KEY_RED }, /* red */
+ { 0x080d, KEY_YELLOW }, /* yellow */
+ { 0x0806, KEY_BLUE }, /* blue */
+ { 0x0807, KEY_GREEN }, /* green */
+
+ /* Keys 0 to 9 */
+ { 0x082a, KEY_0 },
+ { 0x0816, KEY_1 },
+ { 0x0812, KEY_2 },
+ { 0x0814, KEY_3 },
+ { 0x0836, KEY_4 },
+ { 0x0832, KEY_5 },
+ { 0x0834, KEY_6 },
+ { 0x080e, KEY_7 },
+ { 0x080a, KEY_8 },
+ { 0x080c, KEY_9 },
+
+ { 0x0815, KEY_VOLUMEUP },
+ { 0x0826, KEY_VOLUMEDOWN },
+ { 0x0835, KEY_CHANNELUP }, /* channel / program + */
+ { 0x0824, KEY_CHANNELDOWN }, /* channel / program - */
+
+ { 0x0808, KEY_UP },
+ { 0x0804, KEY_DOWN },
+ { 0x0818, KEY_LEFT },
+ { 0x0810, KEY_RIGHT },
+ { 0x0825, KEY_ENTER }, /* enter */
+
+ { 0x0822, KEY_EXIT }, /* back */
+ { 0x082c, KEY_AB }, /* recall */
+
+ { 0x0820, KEY_AUDIO }, /* TV audio */
+ { 0x0837, KEY_SCREEN }, /* snapshot */
+ { 0x082e, KEY_ZOOM }, /* full screen */
+ { 0x0802, KEY_MUTE }, /* mute */
+
+ { 0x0831, KEY_REWIND }, /* backward << */
+ { 0x0811, KEY_RECORD }, /* recording */
+ { 0x0809, KEY_STOP },
+ { 0x0805, KEY_FASTFORWARD }, /* forward >> */
+ { 0x0821, KEY_PREVIOUS }, /* rew */
+ { 0x081a, KEY_PAUSE }, /* pause */
+ { 0x0839, KEY_PLAY }, /* play */
+ { 0x0819, KEY_NEXT }, /* forward */
+};
+
+static struct rc_map_list asus_ps3_100_map = {
+.map = {
+ .scan = asus_ps3_100,
+ .size = ARRAY_SIZE(asus_ps3_100),
+ .rc_type = RC_TYPE_RC5,
+ .name = RC_MAP_ASUS_PS3_100,
+}
+};
+
+static int __init init_rc_map_asus_ps3_100(void)
+{
+return rc_map_register(&asus_ps3_100_map);
+}
+
+static void __exit exit_rc_map_asus_ps3_100(void)
+{
+rc_map_unregister(&asus_ps3_100_map);
+}
+
+module_init(init_rc_map_asus_ps3_100)
+module_exit(exit_rc_map_asus_ps3_100)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/rc/keymaps/rc-it913x-v2.c b/drivers/media/rc/keymaps/rc-it913x-v2.c
index 28e376e18b99..bd42a30ec06f 100644
--- a/drivers/media/rc/keymaps/rc-it913x-v2.c
+++ b/drivers/media/rc/keymaps/rc-it913x-v2.c
@@ -40,7 +40,7 @@ static struct rc_map_table it913x_v2_rc[] = {
/* Type 2 */
/* keys stereo, snapshot unassigned */
{ 0x866b00, KEY_0 },
- { 0x866b1b, KEY_1 },
+ { 0x866b01, KEY_1 },
{ 0x866b02, KEY_2 },
{ 0x866b03, KEY_3 },
{ 0x866b04, KEY_4 },
diff --git a/drivers/media/rc/keymaps/rc-medion-x10-digitainer.c b/drivers/media/rc/keymaps/rc-medion-x10-digitainer.c
new file mode 100644
index 000000000000..966f9b3c71da
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-medion-x10-digitainer.c
@@ -0,0 +1,123 @@
+/*
+ * Medion X10 RF remote keytable (Digitainer variant)
+ *
+ * Copyright (C) 2012 Anssi Hannula <anssi.hannula@iki.fi>
+ *
+ * This keymap is for a variant that has a distinctive scrollwheel instead of
+ * up/down buttons (tested with P/N 40009936 / 20018268), reportedly
+ * originally shipped with Medion Digitainer but now sold separately simply as
+ * an "X10" remote.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <media/rc-map.h>
+
+static struct rc_map_table medion_x10_digitainer[] = {
+ { 0x02, KEY_POWER },
+
+ { 0x2c, KEY_TV },
+ { 0x2d, KEY_VIDEO },
+ { 0x04, KEY_DVD }, /* CD/DVD */
+ { 0x16, KEY_TEXT }, /* "teletext" icon, i.e. a screen with lines */
+ { 0x06, KEY_AUDIO },
+ { 0x2e, KEY_RADIO },
+ { 0x31, KEY_EPG }, /* a screen with an open book */
+ { 0x05, KEY_IMAGES }, /* Photo */
+ { 0x2f, KEY_INFO },
+
+ { 0x78, KEY_UP }, /* scrollwheel up 1 notch */
+ /* 0x79..0x7f: 2-8 notches, driver repeats 0x78 entry */
+
+ { 0x70, KEY_DOWN }, /* scrollwheel down 1 notch */
+ /* 0x71..0x77: 2-8 notches, driver repeats 0x70 entry */
+
+ { 0x19, KEY_MENU },
+ { 0x1d, KEY_LEFT },
+ { 0x1e, KEY_OK }, /* scrollwheel press */
+ { 0x1f, KEY_RIGHT },
+ { 0x20, KEY_BACK },
+
+ { 0x09, KEY_VOLUMEUP },
+ { 0x08, KEY_VOLUMEDOWN },
+ { 0x00, KEY_MUTE },
+
+ { 0x1b, KEY_SELECT }, /* also has "U" rotated 90 degrees CCW */
+
+ { 0x0b, KEY_CHANNELUP },
+ { 0x0c, KEY_CHANNELDOWN },
+ { 0x1c, KEY_LAST },
+
+ { 0x32, KEY_RED }, /* also Audio */
+ { 0x33, KEY_GREEN }, /* also Subtitle */
+ { 0x34, KEY_YELLOW }, /* also Angle */
+ { 0x35, KEY_BLUE }, /* also Title */
+
+ { 0x28, KEY_STOP },
+ { 0x29, KEY_PAUSE },
+ { 0x25, KEY_PLAY },
+ { 0x21, KEY_PREVIOUS },
+ { 0x18, KEY_CAMERA },
+ { 0x23, KEY_NEXT },
+ { 0x24, KEY_REWIND },
+ { 0x27, KEY_RECORD },
+ { 0x26, KEY_FORWARD },
+
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+
+ /* these do not actually exist on this remote, but these scancodes
+ * exist on all other Medion X10 remotes and adding them here allows
+ * such remotes to be adequately usable with this keymap in case
+ * this keymap is wrongly used with them (which is quite possible as
+ * there are lots of different Medion X10 remotes): */
+ { 0x1a, KEY_UP },
+ { 0x22, KEY_DOWN },
+};
+
+static struct rc_map_list medion_x10_digitainer_map = {
+ .map = {
+ .scan = medion_x10_digitainer,
+ .size = ARRAY_SIZE(medion_x10_digitainer),
+ .rc_type = RC_TYPE_OTHER,
+ .name = RC_MAP_MEDION_X10_DIGITAINER,
+ }
+};
+
+static int __init init_rc_map_medion_x10_digitainer(void)
+{
+ return rc_map_register(&medion_x10_digitainer_map);
+}
+
+static void __exit exit_rc_map_medion_x10_digitainer(void)
+{
+ rc_map_unregister(&medion_x10_digitainer_map);
+}
+
+module_init(init_rc_map_medion_x10_digitainer)
+module_exit(exit_rc_map_medion_x10_digitainer)
+
+MODULE_DESCRIPTION("Medion X10 RF remote keytable (Digitainer variant)");
+MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/keymaps/rc-medion-x10-or2x.c b/drivers/media/rc/keymaps/rc-medion-x10-or2x.c
new file mode 100644
index 000000000000..b077300ecb5c
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-medion-x10-or2x.c
@@ -0,0 +1,108 @@
+/*
+ * Medion X10 OR22/OR24 RF remote keytable
+ *
+ * Copyright (C) 2012 Anssi Hannula <anssi.hannula@iki.fi>
+ *
+ * This keymap is for several Medion X10 remotes that have the Windows MCE
+ * button. This has been tested with a "RF VISTA Remote Control", OR24V,
+ * P/N 20035335, but should work with other variants that have the same
+ * buttons, such as OR22V and OR24E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <media/rc-map.h>
+
+static struct rc_map_table medion_x10_or2x[] = {
+ { 0x02, KEY_POWER },
+ { 0x16, KEY_TEXT }, /* "T" in a box, for teletext */
+
+ { 0x09, KEY_VOLUMEUP },
+ { 0x08, KEY_VOLUMEDOWN },
+ { 0x00, KEY_MUTE },
+ { 0x0b, KEY_CHANNELUP },
+ { 0x0c, KEY_CHANNELDOWN },
+
+ { 0x32, KEY_RED },
+ { 0x33, KEY_GREEN },
+ { 0x34, KEY_YELLOW },
+ { 0x35, KEY_BLUE },
+
+ { 0x18, KEY_PVR }, /* record symbol inside a tv symbol */
+ { 0x04, KEY_DVD }, /* disc symbol */
+ { 0x31, KEY_EPG }, /* a tv schedule symbol */
+ { 0x1c, KEY_TV }, /* play symbol inside a tv symbol */
+ { 0x20, KEY_BACK },
+ { 0x2f, KEY_INFO },
+
+ { 0x1a, KEY_UP },
+ { 0x22, KEY_DOWN },
+ { 0x1d, KEY_LEFT },
+ { 0x1f, KEY_RIGHT },
+ { 0x1e, KEY_OK },
+
+ { 0x1b, KEY_MEDIA }, /* Windows MCE button */
+
+ { 0x21, KEY_PREVIOUS },
+ { 0x23, KEY_NEXT },
+ { 0x24, KEY_REWIND },
+ { 0x26, KEY_FORWARD },
+ { 0x25, KEY_PLAY },
+ { 0x28, KEY_STOP },
+ { 0x29, KEY_PAUSE },
+ { 0x27, KEY_RECORD },
+
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x30, KEY_CLEAR },
+ { 0x36, KEY_ENTER },
+ { 0x37, KEY_NUMERIC_STAR },
+ { 0x38, KEY_NUMERIC_POUND },
+};
+
+static struct rc_map_list medion_x10_or2x_map = {
+ .map = {
+ .scan = medion_x10_or2x,
+ .size = ARRAY_SIZE(medion_x10_or2x),
+ .rc_type = RC_TYPE_OTHER,
+ .name = RC_MAP_MEDION_X10_OR2X,
+ }
+};
+
+static int __init init_rc_map_medion_x10_or2x(void)
+{
+ return rc_map_register(&medion_x10_or2x_map);
+}
+
+static void __exit exit_rc_map_medion_x10_or2x(void)
+{
+ rc_map_unregister(&medion_x10_or2x_map);
+}
+
+module_init(init_rc_map_medion_x10_or2x)
+module_exit(exit_rc_map_medion_x10_or2x)
+
+MODULE_DESCRIPTION("Medion X10 OR22/OR24 RF remote keytable");
+MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index e150a2e29a4b..84e06d3aa696 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -520,7 +520,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
{
char codes[USB_BUFLEN * 3 + 1];
char inout[9];
- u8 cmd, subcmd, data1, data2, data3, data4, data5;
+ u8 cmd, subcmd, data1, data2, data3, data4;
struct device *dev = ir->dev;
int i, start, skip = 0;
u32 carrier, period;
@@ -553,7 +553,6 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
data2 = buf[start + 3] & 0xff;
data3 = buf[start + 4] & 0xff;
data4 = buf[start + 5] & 0xff;
- data5 = buf[start + 6] & 0xff;
switch (cmd) {
case MCE_CMD_NULL:
@@ -1443,7 +1442,7 @@ static int mceusb_dev_resume(struct usb_interface *intf)
static struct usb_driver mceusb_dev_driver = {
.name = DRIVER_NAME,
.probe = mceusb_dev_probe,
- .disconnect = mceusb_dev_disconnect,
+ .disconnect = __devexit_p(mceusb_dev_disconnect),
.suspend = mceusb_dev_suspend,
.resume = mceusb_dev_resume,
.reset_resume = mceusb_dev_resume,
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 8b2c071ac0ab..dc8a7dddccd4 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -1075,19 +1075,19 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
NVT_DRIVER_NAME, (void *)nvt))
- goto failure;
+ goto failure2;
if (!request_region(nvt->cir_wake_addr,
CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
- goto failure;
+ goto failure3;
if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
NVT_DRIVER_NAME, (void *)nvt))
- goto failure;
+ goto failure4;
ret = rc_register_device(rdev);
if (ret)
- goto failure;
+ goto failure5;
device_init_wakeup(&pdev->dev, true);
nvt->rdev = rdev;
@@ -1099,17 +1099,15 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
return 0;
+failure5:
+ free_irq(nvt->cir_wake_irq, nvt);
+failure4:
+ release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
+failure3:
+ free_irq(nvt->cir_irq, nvt);
+failure2:
+ release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
failure:
- if (nvt->cir_irq)
- free_irq(nvt->cir_irq, nvt);
- if (nvt->cir_addr)
- release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
-
- if (nvt->cir_wake_irq)
- free_irq(nvt->cir_wake_irq, nvt);
- if (nvt->cir_wake_addr)
- release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
-
rc_free_device(rdev);
kfree(nvt);
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index efc6a514348a..fae1615e0ff2 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -221,7 +221,6 @@ static int __init loop_init(void)
rc->s_idle = loop_set_idle;
rc->s_learning_mode = loop_set_learning_mode;
rc->s_carrier_report = loop_set_carrier_report;
- rc->priv = &loopdev;
loopdev.txmask = RXMASK_REGULAR;
loopdev.txcarrier = 36000;
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index ad95c67a4dba..2878b0ed9741 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -1277,7 +1277,7 @@ static int redrat3_dev_resume(struct usb_interface *intf)
static struct usb_driver redrat3_dev_driver = {
.name = DRIVER_NAME,
.probe = redrat3_dev_probe,
- .disconnect = redrat3_dev_disconnect,
+ .disconnect = __devexit_p(redrat3_dev_disconnect),
.suspend = redrat3_dev_suspend,
.resume = redrat3_dev_resume,
.reset_resume = redrat3_dev_resume,
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index ce1e7ba940f6..99937c94d7df 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -472,6 +472,9 @@ comment "Camera sensor devices"
config VIDEO_APTINA_PLL
tristate
+config VIDEO_SMIAPP_PLL
+ tristate
+
config VIDEO_OV7670
tristate "OmniVision OV7670 sensor support"
depends on I2C && VIDEO_V4L2
@@ -556,6 +559,8 @@ config VIDEO_S5K6AA
This is a V4L2 sensor-level driver for Samsung S5K6AA(FX) 1.3M
camera sensor with an embedded SoC image signal processor.
+source "drivers/media/video/smiapp/Kconfig"
+
comment "Flash devices"
config VIDEO_ADP1653
@@ -644,6 +649,8 @@ menuconfig V4L_USB_DRIVERS
if V4L_USB_DRIVERS
+source "drivers/media/video/au0828/Kconfig"
+
source "drivers/media/video/uvc/Kconfig"
source "drivers/media/video/gspca/Kconfig"
@@ -662,8 +669,6 @@ source "drivers/media/video/tm6000/Kconfig"
source "drivers/media/video/usbvision/Kconfig"
-source "drivers/media/video/et61x251/Kconfig"
-
source "drivers/media/video/sn9c102/Kconfig"
source "drivers/media/video/pwc/Kconfig"
@@ -721,8 +726,6 @@ menuconfig V4L_PCI_DRIVERS
if V4L_PCI_DRIVERS
-source "drivers/media/video/au0828/Kconfig"
-
source "drivers/media/video/bt8xx/Kconfig"
source "drivers/media/video/cx18/Kconfig"
@@ -794,6 +797,19 @@ source "drivers/media/video/saa7164/Kconfig"
source "drivers/media/video/zoran/Kconfig"
+config STA2X11_VIP
+ tristate "STA2X11 VIP Video For Linux"
+ depends on STA2X11
+ select VIDEO_ADV7180 if VIDEO_HELPER_CHIPS_AUTO
+ select VIDEOBUF_DMA_CONTIG
+ depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS
+ help
+ Say Y for support for STA2X11 VIP (Video Input Port) capture
+ device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sta2x11_vip.
+
endif # V4L_PCI_DRIVERS
#
@@ -1127,19 +1143,6 @@ config VIDEO_MX2
This is a v4l2 driver for the i.MX27 and the i.MX25 Camera Sensor
Interface
-config VIDEO_SAMSUNG_S5P_FIMC
- tristate "Samsung S5P and EXYNOS4 camera interface driver (EXPERIMENTAL)"
- depends on VIDEO_V4L2 && I2C && PLAT_S5P && PM_RUNTIME && \
- VIDEO_V4L2_SUBDEV_API && EXPERIMENTAL
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- ---help---
- This is a v4l2 driver for Samsung S5P and EXYNOS4 camera
- host interface and video postprocessor.
-
- To compile this driver as a module, choose M here: the
- module will be called s5p-fimc.
-
config VIDEO_ATMEL_ISI
tristate "ATMEL Image Sensor Interface (ISI) support"
depends on VIDEO_DEV && SOC_CAMERA && ARCH_AT91
@@ -1148,16 +1151,7 @@ config VIDEO_ATMEL_ISI
This module makes the ATMEL Image Sensor Interface available
as a v4l2 device.
-config VIDEO_S5P_MIPI_CSIS
- tristate "Samsung S5P and EXYNOS4 MIPI CSI receiver driver"
- depends on VIDEO_V4L2 && PM_RUNTIME && PLAT_S5P
- depends on VIDEO_V4L2_SUBDEV_API && REGULATOR
- ---help---
- This is a v4l2 driver for Samsung S5P/EXYNOS4 MIPI-CSI receiver.
-
- To compile this driver as a module, choose M here: the
- module will be called s5p-csis.
-
+source "drivers/media/video/s5p-fimc/Kconfig"
source "drivers/media/video/s5p-tv/Kconfig"
endif # V4L_PLATFORM_DRIVERS
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index a6282a3a6a82..d209de0e0ca8 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -79,9 +79,12 @@ obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/
obj-$(CONFIG_VIDEO_S5K6AA) += s5k6aa.o
+obj-$(CONFIG_VIDEO_SMIAPP) += smiapp/
obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
obj-$(CONFIG_VIDEO_AS3645A) += as3645a.o
+obj-$(CONFIG_VIDEO_SMIAPP_PLL) += smiapp-pll.o
+
obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o
@@ -120,6 +123,7 @@ obj-$(CONFIG_VIDEO_TM6000) += tm6000/
obj-$(CONFIG_VIDEO_MXB) += mxb.o
obj-$(CONFIG_VIDEO_HEXIUM_ORION) += hexium_orion.o
obj-$(CONFIG_VIDEO_HEXIUM_GEMINI) += hexium_gemini.o
+obj-$(CONFIG_STA2X11_VIP) += sta2x11_vip.o
obj-$(CONFIG_VIDEO_TIMBERDALE) += timblogiw.o
obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
@@ -152,7 +156,6 @@ obj-$(CONFIG_USB_ZR364XX) += zr364xx.o
obj-$(CONFIG_USB_STKWEBCAM) += stkwebcam.o
obj-$(CONFIG_USB_SN9C102) += sn9c102/
-obj-$(CONFIG_USB_ET61X251) += et61x251/
obj-$(CONFIG_USB_PWC) += pwc/
obj-$(CONFIG_USB_GSPCA) += gspca/
diff --git a/drivers/media/video/adp1653.c b/drivers/media/video/adp1653.c
index 5b045b4a66fe..57e87090388d 100644
--- a/drivers/media/video/adp1653.c
+++ b/drivers/media/video/adp1653.c
@@ -34,7 +34,6 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <media/adp1653.h>
#include <media/v4l2-device.h>
@@ -282,19 +281,19 @@ adp1653_init_device(struct adp1653_flash *flash)
return -EIO;
}
- mutex_lock(&flash->ctrls.lock);
+ mutex_lock(flash->ctrls.lock);
/* Reset faults before reading new ones. */
flash->fault = 0;
rval = adp1653_get_fault(flash);
- mutex_unlock(&flash->ctrls.lock);
+ mutex_unlock(flash->ctrls.lock);
if (rval > 0) {
dev_err(&client->dev, "faults detected: 0x%1.1x\n", rval);
return -EIO;
}
- mutex_lock(&flash->ctrls.lock);
+ mutex_lock(flash->ctrls.lock);
rval = adp1653_update_hw(flash);
- mutex_unlock(&flash->ctrls.lock);
+ mutex_unlock(flash->ctrls.lock);
if (rval) {
dev_err(&client->dev,
"adp1653_update_hw failed at %s\n", __func__);
diff --git a/drivers/media/video/adv7180.c b/drivers/media/video/adv7180.c
index b8b6c4b0cad4..174bffacf117 100644
--- a/drivers/media/video/adv7180.c
+++ b/drivers/media/video/adv7180.c
@@ -48,6 +48,7 @@
#define ADV7180_INPUT_CONTROL_PAL_COMB_N_PED 0xd0
#define ADV7180_INPUT_CONTROL_PAL_SECAM 0xe0
#define ADV7180_INPUT_CONTROL_PAL_SECAM_PED 0xf0
+#define ADV7180_INPUT_CONTROL_INSEL_MASK 0x0f
#define ADV7180_EXTENDED_OUTPUT_CONTROL_REG 0x04
#define ADV7180_EXTENDED_OUTPUT_CONTROL_NTSCDIS 0xC5
@@ -55,9 +56,29 @@
#define ADV7180_AUTODETECT_ENABLE_REG 0x07
#define ADV7180_AUTODETECT_DEFAULT 0x7f
+#define ADV7180_CON_REG 0x08 /*Unsigned */
+#define CON_REG_MIN 0
+#define CON_REG_DEF 128
+#define CON_REG_MAX 255
+
+#define ADV7180_BRI_REG 0x0a /*Signed */
+#define BRI_REG_MIN -128
+#define BRI_REG_DEF 0
+#define BRI_REG_MAX 127
+
+#define ADV7180_HUE_REG 0x0b /*Signed, inverted */
+#define HUE_REG_MIN -127
+#define HUE_REG_DEF 0
+#define HUE_REG_MAX 128
+
#define ADV7180_ADI_CTRL_REG 0x0e
#define ADV7180_ADI_CTRL_IRQ_SPACE 0x20
+#define ADV7180_PWR_MAN_REG 0x0f
+#define ADV7180_PWR_MAN_ON 0x04
+#define ADV7180_PWR_MAN_OFF 0x24
+#define ADV7180_PWR_MAN_RES 0x80
+
#define ADV7180_STATUS1_REG 0x10
#define ADV7180_STATUS1_IN_LOCK 0x01
#define ADV7180_STATUS1_AUTOD_MASK 0x70
@@ -78,6 +99,12 @@
#define ADV7180_ICONF1_PSYNC_ONLY 0x10
#define ADV7180_ICONF1_ACTIVE_TO_CLR 0xC0
+#define ADV7180_SD_SAT_CB_REG 0xe3 /*Unsigned */
+#define ADV7180_SD_SAT_CR_REG 0xe4 /*Unsigned */
+#define SAT_REG_MIN 0
+#define SAT_REG_DEF 128
+#define SAT_REG_MAX 255
+
#define ADV7180_IRQ1_LOCK 0x01
#define ADV7180_IRQ1_UNLOCK 0x02
#define ADV7180_ISR1_ADI 0x42
@@ -90,6 +117,9 @@
#define ADV7180_IMR3_ADI 0x4C
#define ADV7180_IMR4_ADI 0x50
+#define ADV7180_NTSC_V_BIT_END_REG 0xE6
+#define ADV7180_NTSC_V_BIT_END_MANUAL_NVEND 0x4F
+
struct adv7180_state {
struct v4l2_subdev sd;
struct work_struct work;
@@ -97,6 +127,11 @@ struct adv7180_state {
int irq;
v4l2_std_id curr_norm;
bool autodetect;
+ s8 brightness;
+ s16 hue;
+ u8 contrast;
+ u8 saturation;
+ u8 input;
};
static v4l2_std_id adv7180_std_to_v4l2(u8 status1)
@@ -155,7 +190,7 @@ static u32 adv7180_status_to_v4l2(u8 status1)
}
static int __adv7180_status(struct i2c_client *client, u32 *status,
- v4l2_std_id *std)
+ v4l2_std_id *std)
{
int status1 = i2c_smbus_read_byte_data(client, ADV7180_STATUS1_REG);
@@ -192,6 +227,36 @@ static int adv7180_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
return err;
}
+static int adv7180_s_routing(struct v4l2_subdev *sd, u32 input,
+ u32 output, u32 config)
+{
+ struct adv7180_state *state = to_state(sd);
+ int ret = mutex_lock_interruptible(&state->mutex);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (ret)
+ return ret;
+
+ /*We cannot discriminate between LQFP and 40-pin LFCSP, so accept
+ * all inputs and let the card driver take care of validation
+ */
+ if ((input & ADV7180_INPUT_CONTROL_INSEL_MASK) != input)
+ goto out;
+
+ ret = i2c_smbus_read_byte_data(client, ADV7180_INPUT_CONTROL_REG);
+
+ if (ret < 0)
+ goto out;
+
+ ret &= ~ADV7180_INPUT_CONTROL_INSEL_MASK;
+ ret = i2c_smbus_write_byte_data(client,
+ ADV7180_INPUT_CONTROL_REG, ret | input);
+ state->input = input;
+out:
+ mutex_unlock(&state->mutex);
+ return ret;
+}
+
static int adv7180_g_input_status(struct v4l2_subdev *sd, u32 *status)
{
struct adv7180_state *state = to_state(sd);
@@ -205,7 +270,7 @@ static int adv7180_g_input_status(struct v4l2_subdev *sd, u32 *status)
}
static int adv7180_g_chip_ident(struct v4l2_subdev *sd,
- struct v4l2_dbg_chip_ident *chip)
+ struct v4l2_dbg_chip_ident *chip)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -222,9 +287,10 @@ static int adv7180_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
/* all standards -> autodetect */
if (std == V4L2_STD_ALL) {
- ret = i2c_smbus_write_byte_data(client,
- ADV7180_INPUT_CONTROL_REG,
- ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM);
+ ret =
+ i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
+ ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM
+ | state->input);
if (ret < 0)
goto out;
@@ -236,7 +302,8 @@ static int adv7180_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
goto out;
ret = i2c_smbus_write_byte_data(client,
- ADV7180_INPUT_CONTROL_REG, ret);
+ ADV7180_INPUT_CONTROL_REG,
+ ret | state->input);
if (ret < 0)
goto out;
@@ -249,14 +316,138 @@ out:
return ret;
}
+static int adv7180_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
+{
+ switch (qc->id) {
+ case V4L2_CID_BRIGHTNESS:
+ return v4l2_ctrl_query_fill(qc, BRI_REG_MIN, BRI_REG_MAX,
+ 1, BRI_REG_DEF);
+ case V4L2_CID_HUE:
+ return v4l2_ctrl_query_fill(qc, HUE_REG_MIN, HUE_REG_MAX,
+ 1, HUE_REG_DEF);
+ case V4L2_CID_CONTRAST:
+ return v4l2_ctrl_query_fill(qc, CON_REG_MIN, CON_REG_MAX,
+ 1, CON_REG_DEF);
+ case V4L2_CID_SATURATION:
+ return v4l2_ctrl_query_fill(qc, SAT_REG_MIN, SAT_REG_MAX,
+ 1, SAT_REG_DEF);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int adv7180_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct adv7180_state *state = to_state(sd);
+ int ret = mutex_lock_interruptible(&state->mutex);
+ if (ret)
+ return ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctrl->value = state->brightness;
+ break;
+ case V4L2_CID_HUE:
+ ctrl->value = state->hue;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctrl->value = state->contrast;
+ break;
+ case V4L2_CID_SATURATION:
+ ctrl->value = state->saturation;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&state->mutex);
+ return ret;
+}
+
+static int adv7180_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct adv7180_state *state = to_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = mutex_lock_interruptible(&state->mutex);
+ if (ret)
+ return ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ if ((ctrl->value > BRI_REG_MAX)
+ || (ctrl->value < BRI_REG_MIN)) {
+ ret = -ERANGE;
+ break;
+ }
+ state->brightness = ctrl->value;
+ ret = i2c_smbus_write_byte_data(client,
+ ADV7180_BRI_REG,
+ state->brightness);
+ break;
+ case V4L2_CID_HUE:
+ if ((ctrl->value > HUE_REG_MAX)
+ || (ctrl->value < HUE_REG_MIN)) {
+ ret = -ERANGE;
+ break;
+ }
+ state->hue = ctrl->value;
+ /*Hue is inverted according to HSL chart */
+ ret = i2c_smbus_write_byte_data(client,
+ ADV7180_HUE_REG, -state->hue);
+ break;
+ case V4L2_CID_CONTRAST:
+ if ((ctrl->value > CON_REG_MAX)
+ || (ctrl->value < CON_REG_MIN)) {
+ ret = -ERANGE;
+ break;
+ }
+ state->contrast = ctrl->value;
+ ret = i2c_smbus_write_byte_data(client,
+ ADV7180_CON_REG,
+ state->contrast);
+ break;
+ case V4L2_CID_SATURATION:
+ if ((ctrl->value > SAT_REG_MAX)
+ || (ctrl->value < SAT_REG_MIN)) {
+ ret = -ERANGE;
+ break;
+ }
+ /*
+ *This could be V4L2_CID_BLUE_BALANCE/V4L2_CID_RED_BALANCE
+ *Let's not confuse the user, everybody understands saturation
+ */
+ state->saturation = ctrl->value;
+ ret = i2c_smbus_write_byte_data(client,
+ ADV7180_SD_SAT_CB_REG,
+ state->saturation);
+ if (ret < 0)
+ break;
+ ret = i2c_smbus_write_byte_data(client,
+ ADV7180_SD_SAT_CR_REG,
+ state->saturation);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&state->mutex);
+ return ret;
+}
+
static const struct v4l2_subdev_video_ops adv7180_video_ops = {
.querystd = adv7180_querystd,
.g_input_status = adv7180_g_input_status,
+ .s_routing = adv7180_s_routing,
};
static const struct v4l2_subdev_core_ops adv7180_core_ops = {
.g_chip_ident = adv7180_g_chip_ident,
.s_std = adv7180_s_std,
+ .queryctrl = adv7180_queryctrl,
+ .g_ctrl = adv7180_g_ctrl,
+ .s_ctrl = adv7180_s_ctrl,
};
static const struct v4l2_subdev_ops adv7180_ops = {
@@ -267,13 +458,13 @@ static const struct v4l2_subdev_ops adv7180_ops = {
static void adv7180_work(struct work_struct *work)
{
struct adv7180_state *state = container_of(work, struct adv7180_state,
- work);
+ work);
struct i2c_client *client = v4l2_get_subdevdata(&state->sd);
u8 isr3;
mutex_lock(&state->mutex);
i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
- ADV7180_ADI_CTRL_IRQ_SPACE);
+ ADV7180_ADI_CTRL_IRQ_SPACE);
isr3 = i2c_smbus_read_byte_data(client, ADV7180_ISR3_ADI);
/* clear */
i2c_smbus_write_byte_data(client, ADV7180_ICR3_ADI, isr3);
@@ -297,56 +488,51 @@ static irqreturn_t adv7180_irq(int irq, void *devid)
return IRQ_HANDLED;
}
-/*
- * Generic i2c probe
- * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
- */
-
-static __devinit int adv7180_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int init_device(struct i2c_client *client, struct adv7180_state *state)
{
- struct adv7180_state *state;
- struct v4l2_subdev *sd;
int ret;
- /* Check if the adapter supports the needed features */
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
- return -EIO;
-
- v4l_info(client, "chip found @ 0x%02x (%s)\n",
- client->addr << 1, client->adapter->name);
-
- state = kzalloc(sizeof(struct adv7180_state), GFP_KERNEL);
- if (state == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- state->irq = client->irq;
- INIT_WORK(&state->work, adv7180_work);
- mutex_init(&state->mutex);
- state->autodetect = true;
- sd = &state->sd;
- v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
-
/* Initialize adv7180 */
/* Enable autodetection */
- ret = i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
- ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM);
- if (ret < 0)
- goto err_unreg_subdev;
+ if (state->autodetect) {
+ ret =
+ i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
+ ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM
+ | state->input);
+ if (ret < 0)
+ return ret;
- ret = i2c_smbus_write_byte_data(client, ADV7180_AUTODETECT_ENABLE_REG,
- ADV7180_AUTODETECT_DEFAULT);
- if (ret < 0)
- goto err_unreg_subdev;
+ ret =
+ i2c_smbus_write_byte_data(client,
+ ADV7180_AUTODETECT_ENABLE_REG,
+ ADV7180_AUTODETECT_DEFAULT);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = v4l2_std_to_adv7180(state->curr_norm);
+ if (ret < 0)
+ return ret;
+ ret =
+ i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
+ ret | state->input);
+ if (ret < 0)
+ return ret;
+
+ }
/* ITU-R BT.656-4 compatible */
ret = i2c_smbus_write_byte_data(client,
- ADV7180_EXTENDED_OUTPUT_CONTROL_REG,
- ADV7180_EXTENDED_OUTPUT_CONTROL_NTSCDIS);
+ ADV7180_EXTENDED_OUTPUT_CONTROL_REG,
+ ADV7180_EXTENDED_OUTPUT_CONTROL_NTSCDIS);
if (ret < 0)
- goto err_unreg_subdev;
+ return ret;
+
+ /* Manually set V bit end position in NTSC mode */
+ ret = i2c_smbus_write_byte_data(client,
+ ADV7180_NTSC_V_BIT_END_REG,
+ ADV7180_NTSC_V_BIT_END_MANUAL_NVEND);
+ if (ret < 0)
+ return ret;
/* read current norm */
__adv7180_status(client, NULL, &state->curr_norm);
@@ -354,45 +540,109 @@ static __devinit int adv7180_probe(struct i2c_client *client,
/* register for interrupts */
if (state->irq > 0) {
ret = request_irq(state->irq, adv7180_irq, 0, DRIVER_NAME,
- state);
+ state);
if (ret)
- goto err_unreg_subdev;
+ return ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
- ADV7180_ADI_CTRL_IRQ_SPACE);
+ ADV7180_ADI_CTRL_IRQ_SPACE);
if (ret < 0)
- goto err_unreg_subdev;
+ return ret;
/* config the Interrupt pin to be active low */
ret = i2c_smbus_write_byte_data(client, ADV7180_ICONF1_ADI,
- ADV7180_ICONF1_ACTIVE_LOW | ADV7180_ICONF1_PSYNC_ONLY);
+ ADV7180_ICONF1_ACTIVE_LOW |
+ ADV7180_ICONF1_PSYNC_ONLY);
if (ret < 0)
- goto err_unreg_subdev;
+ return ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_IMR1_ADI, 0);
if (ret < 0)
- goto err_unreg_subdev;
+ return ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_IMR2_ADI, 0);
if (ret < 0)
- goto err_unreg_subdev;
+ return ret;
/* enable AD change interrupts interrupts */
ret = i2c_smbus_write_byte_data(client, ADV7180_IMR3_ADI,
- ADV7180_IRQ3_AD_CHANGE);
+ ADV7180_IRQ3_AD_CHANGE);
if (ret < 0)
- goto err_unreg_subdev;
+ return ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_IMR4_ADI, 0);
if (ret < 0)
- goto err_unreg_subdev;
+ return ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
- 0);
+ 0);
if (ret < 0)
- goto err_unreg_subdev;
+ return ret;
}
+ /*Set default value for controls */
+ ret = i2c_smbus_write_byte_data(client, ADV7180_BRI_REG,
+ state->brightness);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, ADV7180_HUE_REG, state->hue);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, ADV7180_CON_REG,
+ state->contrast);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, ADV7180_SD_SAT_CB_REG,
+ state->saturation);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, ADV7180_SD_SAT_CR_REG,
+ state->saturation);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static __devinit int adv7180_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adv7180_state *state;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ /* Check if the adapter supports the needed features */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+
+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
+ client->addr, client->adapter->name);
+
+ state = kzalloc(sizeof(struct adv7180_state), GFP_KERNEL);
+ if (state == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ state->irq = client->irq;
+ INIT_WORK(&state->work, adv7180_work);
+ mutex_init(&state->mutex);
+ state->autodetect = true;
+ state->brightness = BRI_REG_DEF;
+ state->hue = HUE_REG_DEF;
+ state->contrast = CON_REG_DEF;
+ state->saturation = SAT_REG_DEF;
+ state->input = 0;
+ sd = &state->sd;
+ v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
+
+ ret = init_device(client, state);
+ if (0 != ret)
+ goto err_unreg_subdev;
return 0;
err_unreg_subdev:
@@ -432,16 +682,49 @@ static const struct i2c_device_id adv7180_id[] = {
{},
};
+#ifdef CONFIG_PM
+static int adv7180_suspend(struct i2c_client *client, pm_message_t state)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, ADV7180_PWR_MAN_REG,
+ ADV7180_PWR_MAN_OFF);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int adv7180_resume(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct adv7180_state *state = to_state(sd);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, ADV7180_PWR_MAN_REG,
+ ADV7180_PWR_MAN_ON);
+ if (ret < 0)
+ return ret;
+ ret = init_device(client, state);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+#endif
+
MODULE_DEVICE_TABLE(i2c, adv7180_id);
static struct i2c_driver adv7180_driver = {
.driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- },
- .probe = adv7180_probe,
- .remove = __devexit_p(adv7180_remove),
- .id_table = adv7180_id,
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+ .probe = adv7180_probe,
+ .remove = __devexit_p(adv7180_remove),
+#ifdef CONFIG_PM
+ .suspend = adv7180_suspend,
+ .resume = adv7180_resume,
+#endif
+ .id_table = adv7180_id,
};
module_i2c_driver(adv7180_driver);
diff --git a/drivers/media/video/adv7343.c b/drivers/media/video/adv7343.c
index 119b60401bf3..2b5aa676a84e 100644
--- a/drivers/media/video/adv7343.c
+++ b/drivers/media/video/adv7343.c
@@ -130,14 +130,12 @@ static int adv7343_setstd(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct adv7343_state *state = to_state(sd);
struct adv7343_std_info *std_info;
- int output_idx, num_std;
+ int num_std;
char *fsc_ptr;
u8 reg, val;
int err = 0;
int i = 0;
- output_idx = state->output;
-
std_info = (struct adv7343_std_info *)stdinfo;
num_std = ARRAY_SIZE(stdinfo);
diff --git a/drivers/media/video/aptina-pll.c b/drivers/media/video/aptina-pll.c
index 0bd3813bb59d..8153a449846b 100644
--- a/drivers/media/video/aptina-pll.c
+++ b/drivers/media/video/aptina-pll.c
@@ -148,9 +148,8 @@ int aptina_pll_calculate(struct device *dev,
unsigned int mf_high;
unsigned int mf_low;
- mf_low = max(roundup(mf_min, mf_inc),
- DIV_ROUND_UP(pll->ext_clock * p1,
- limits->int_clock_max * div));
+ mf_low = roundup(max(mf_min, DIV_ROUND_UP(pll->ext_clock * p1,
+ limits->int_clock_max * div)), mf_inc);
mf_high = min(mf_max, pll->ext_clock * p1 /
(limits->int_clock_min * div));
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index b6ed44aebe30..e346d32d08ce 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -31,6 +31,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
@@ -403,7 +404,8 @@ static int ar_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, ar->vdev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "Colour AR VGA", sizeof(vcap->card));
strlcpy(vcap->bus_info, "Platform", sizeof(vcap->bus_info));
- vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -709,6 +711,8 @@ static int ar_initialize(struct ar *ar)
static const struct v4l2_file_operations ar_fops = {
.owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
.read = ar_read,
.unlocked_ioctl = video_ioctl2,
};
@@ -769,6 +773,7 @@ static int __init ar_init(void)
ar->vdev.fops = &ar_fops;
ar->vdev.ioctl_ops = &ar_ioctl_ops;
ar->vdev.release = video_device_release_empty;
+ set_bit(V4L2_FL_USE_FH_PRIO, &ar->vdev.flags);
video_set_drvdata(&ar->vdev, ar);
if (vga) {
diff --git a/drivers/media/video/as3645a.c b/drivers/media/video/as3645a.c
index 7a3371f044fc..c4b03572dce8 100644
--- a/drivers/media/video/as3645a.c
+++ b/drivers/media/video/as3645a.c
@@ -713,7 +713,7 @@ static int as3645a_resume(struct device *dev)
* The number of LEDs reported in platform data is used to compute default
* limits. Parameters passed through platform data can override those limits.
*/
-static int as3645a_init_controls(struct as3645a *flash)
+static int __devinit as3645a_init_controls(struct as3645a *flash)
{
const struct as3645a_platform_data *pdata = flash->pdata;
struct v4l2_ctrl *ctrl;
@@ -804,8 +804,8 @@ static int as3645a_init_controls(struct as3645a *flash)
return flash->ctrls.error;
}
-static int as3645a_probe(struct i2c_client *client,
- const struct i2c_device_id *devid)
+static int __devinit as3645a_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
{
struct as3645a *flash;
int ret;
@@ -846,7 +846,7 @@ done:
return ret;
}
-static int __exit as3645a_remove(struct i2c_client *client)
+static int __devexit as3645a_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct as3645a *flash = to_as3645a(subdev);
@@ -877,7 +877,7 @@ static struct i2c_driver as3645a_i2c_driver = {
.pm = &as3645a_pm_ops,
},
.probe = as3645a_probe,
- .remove = __exit_p(as3645a_remove),
+ .remove = __devexit_p(as3645a_remove),
.id_table = as3645a_id_table,
};
diff --git a/drivers/media/video/atmel-isi.c b/drivers/media/video/atmel-isi.c
index ec3f6a06f9c3..6274a91c25c7 100644
--- a/drivers/media/video/atmel-isi.c
+++ b/drivers/media/video/atmel-isi.c
@@ -260,7 +260,7 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct atmel_isi *isi = ici->priv;
unsigned long size;
- int ret, bytes_per_line;
+ int ret;
/* Reset ISI */
ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
@@ -271,13 +271,7 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
/* Disable all interrupts */
isi_writel(isi, ISI_INTDIS, ~0UL);
- bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
-
- if (bytes_per_line < 0)
- return bytes_per_line;
-
- size = bytes_per_line * icd->user_height;
+ size = icd->sizeimage;
if (!*nbuffers || *nbuffers > MAX_BUFFER_NUM)
*nbuffers = MAX_BUFFER_NUM;
@@ -316,13 +310,8 @@ static int buffer_prepare(struct vb2_buffer *vb)
struct atmel_isi *isi = ici->priv;
unsigned long size;
struct isi_dma_desc *desc;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
-
- if (bytes_per_line < 0)
- return bytes_per_line;
- size = bytes_per_line * icd->user_height;
+ size = icd->sizeimage;
if (vb2_plane_size(vb, 0) < size) {
dev_err(icd->parent, "%s data will not fit into plane (%lu < %lu)\n",
@@ -638,6 +627,7 @@ static const struct soc_mbus_pixelfmt isi_camera_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
};
diff --git a/drivers/media/video/au0828/Kconfig b/drivers/media/video/au0828/Kconfig
index 81ba9d9d1b52..23f7fd22f0eb 100644
--- a/drivers/media/video/au0828/Kconfig
+++ b/drivers/media/video/au0828/Kconfig
@@ -6,7 +6,8 @@ config VIDEO_AU0828
select I2C_ALGOBIT
select VIDEO_TVEEPROM
select VIDEOBUF_VMALLOC
- select DVB_AU8522 if !DVB_FE_CUSTOMISE
+ select DVB_AU8522_DTV if !DVB_FE_CUSTOMISE
+ select DVB_AU8522_V4L if !DVB_FE_CUSTOMISE
select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MXL5007T if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_TDA18271 if !MEDIA_TUNER_CUSTOMISE
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 1c6015a04f96..e3fe9a6637f6 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -325,6 +325,8 @@ struct usb_device_id au0828_usb_id_table[] = {
.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL },
{ USB_DEVICE(0x2040, 0x7281),
.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL },
+ { USB_DEVICE(0x05e1, 0x0480),
+ .driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY },
{ USB_DEVICE(0x2040, 0x8200),
.driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY },
{ USB_DEVICE(0x2040, 0x7260),
diff --git a/drivers/media/video/au0828/au0828-dvb.c b/drivers/media/video/au0828/au0828-dvb.c
index 518216743c9c..39ece8e24985 100644
--- a/drivers/media/video/au0828/au0828-dvb.c
+++ b/drivers/media/video/au0828/au0828-dvb.c
@@ -25,6 +25,7 @@
#include <linux/device.h>
#include <linux/suspend.h>
#include <media/v4l2-common.h>
+#include <media/tuner.h>
#include "au0828.h"
#include "au8522.h"
@@ -79,9 +80,16 @@ static struct au8522_config hauppauge_woodbury_config = {
.vsb_if = AU8522_IF_3_25MHZ,
};
-static struct xc5000_config hauppauge_hvr950q_tunerconfig = {
+static struct xc5000_config hauppauge_xc5000a_config = {
.i2c_address = 0x61,
.if_khz = 6000,
+ .chip_id = XC5000A,
+};
+
+static struct xc5000_config hauppauge_xc5000c_config = {
+ .i2c_address = 0x61,
+ .if_khz = 6000,
+ .chip_id = XC5000C,
};
static struct mxl5007t_config mxl5007t_hvr950q_config = {
@@ -383,8 +391,19 @@ int au0828_dvb_register(struct au0828_dev *dev)
&hauppauge_hvr950q_config,
&dev->i2c_adap);
if (dvb->frontend != NULL)
- dvb_attach(xc5000_attach, dvb->frontend, &dev->i2c_adap,
- &hauppauge_hvr950q_tunerconfig);
+ switch (dev->board.tuner_type) {
+ default:
+ case TUNER_XC5000:
+ dvb_attach(xc5000_attach, dvb->frontend,
+ &dev->i2c_adap,
+ &hauppauge_xc5000a_config);
+ break;
+ case TUNER_XC5000C:
+ dvb_attach(xc5000_attach, dvb->frontend,
+ &dev->i2c_adap,
+ &hauppauge_xc5000c_config);
+ break;
+ }
break;
case AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL:
dvb->frontend = dvb_attach(au8522_attach,
@@ -411,7 +430,7 @@ int au0828_dvb_register(struct au0828_dev *dev)
if (dvb->frontend != NULL) {
dvb_attach(xc5000_attach, dvb->frontend,
&dev->i2c_adap,
- &hauppauge_hvr950q_tunerconfig);
+ &hauppauge_xc5000a_config);
}
break;
default:
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index 0b3e481ffe8c..ac3dd733ab81 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -120,7 +120,7 @@ static void au0828_irq_callback(struct urb *urb)
struct au0828_dmaqueue *dma_q = urb->context;
struct au0828_dev *dev = container_of(dma_q, struct au0828_dev, vidq);
unsigned long flags = 0;
- int rc, i;
+ int i;
switch (urb->status) {
case 0: /* success */
@@ -138,7 +138,7 @@ static void au0828_irq_callback(struct urb *urb)
/* Copy data from URB */
spin_lock_irqsave(&dev->slock, flags);
- rc = dev->isoc_ctl.isoc_copy(dev, urb);
+ dev->isoc_ctl.isoc_copy(dev, urb);
spin_unlock_irqrestore(&dev->slock, flags);
/* Reset urb buffers */
@@ -1881,7 +1881,7 @@ int au0828_analog_register(struct au0828_dev *dev,
int retval = -ENOMEM;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
- int i;
+ int i, ret;
dprintk(1, "au0828_analog_register called!\n");
@@ -1951,8 +1951,8 @@ int au0828_analog_register(struct au0828_dev *dev,
dev->vbi_dev = video_device_alloc();
if (NULL == dev->vbi_dev) {
dprintk(1, "Can't allocate vbi_device.\n");
- kfree(dev->vdev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_vdev;
}
/* Fill the video capture device struct */
@@ -1971,8 +1971,8 @@ int au0828_analog_register(struct au0828_dev *dev,
if (retval != 0) {
dprintk(1, "unable to register video device (error = %d).\n",
retval);
- video_device_release(dev->vdev);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_vbi_dev;
}
/* Register the vbi device */
@@ -1981,13 +1981,18 @@ int au0828_analog_register(struct au0828_dev *dev,
if (retval != 0) {
dprintk(1, "unable to register vbi device (error = %d).\n",
retval);
- video_device_release(dev->vbi_dev);
- video_device_release(dev->vdev);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_vbi_dev;
}
dprintk(1, "%s completed!\n", __func__);
return 0;
+
+err_vbi_dev:
+ video_device_release(dev->vbi_dev);
+err_vdev:
+ video_device_release(dev->vdev);
+ return ret;
}
diff --git a/drivers/media/video/blackfin/bfin_capture.c b/drivers/media/video/blackfin/bfin_capture.c
index 514fcf742f5a..0aba45e34f70 100644
--- a/drivers/media/video/blackfin/bfin_capture.c
+++ b/drivers/media/video/blackfin/bfin_capture.c
@@ -942,6 +942,10 @@ static int __devinit bcap_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&bcap_dev->dma_queue);
vfd->lock = &bcap_dev->mutex;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
/* register video device */
ret = video_register_device(bcap_dev->video_dev, VFL_TYPE_GRABBER, -1);
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index e581b37be789..a9cfb0f4be48 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -663,7 +663,7 @@ static const struct v4l2_queryctrl bttv_ctls[] = {
.minimum = 0,
.maximum = 65535,
.step = 128,
- .default_value = 32768,
+ .default_value = 27648,
.type = V4L2_CTRL_TYPE_INTEGER,
},{
.id = V4L2_CID_SATURATION,
@@ -4394,7 +4394,7 @@ static int __devinit bttv_probe(struct pci_dev *dev,
if (!bttv_tvcards[btv->c.type].no_video) {
bttv_register_video(btv);
bt848_bright(btv,32768);
- bt848_contrast(btv,32768);
+ bt848_contrast(btv, 27648);
bt848_hue(btv,32768);
bt848_sat(btv,32768);
audio_mute(btv, 1);
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index f09df9dffaae..2520219f01ba 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -77,6 +77,9 @@ OTHER DEALINGS IN THE SOFTWARE.
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
/* One from column A... */
#define QC_NOTSET 0
@@ -103,6 +106,7 @@ OTHER DEALINGS IN THE SOFTWARE.
struct qcam {
struct v4l2_device v4l2_dev;
struct video_device vdev;
+ struct v4l2_ctrl_handler hdl;
struct pardevice *pdev;
struct parport *pport;
struct mutex lock;
@@ -646,7 +650,8 @@ static int qcam_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "B&W Quickcam", sizeof(vcap->card));
strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
- vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -674,72 +679,6 @@ static int qcam_s_input(struct file *file, void *fh, unsigned int inp)
return (inp > 0) ? -EINVAL : 0;
}
-static int qcam_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 180);
- case V4L2_CID_CONTRAST:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 192);
- case V4L2_CID_GAMMA:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 105);
- }
- return -EINVAL;
-}
-
-static int qcam_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct qcam *qcam = video_drvdata(file);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = qcam->brightness;
- break;
- case V4L2_CID_CONTRAST:
- ctrl->value = qcam->contrast;
- break;
- case V4L2_CID_GAMMA:
- ctrl->value = qcam->whitebal;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int qcam_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct qcam *qcam = video_drvdata(file);
- int ret = 0;
-
- mutex_lock(&qcam->lock);
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- qcam->brightness = ctrl->value;
- break;
- case V4L2_CID_CONTRAST:
- qcam->contrast = ctrl->value;
- break;
- case V4L2_CID_GAMMA:
- qcam->whitebal = ctrl->value;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- if (ret == 0) {
- qc_setscanmode(qcam);
- qcam->status |= QC_PARAM_CHANGE;
- }
- mutex_unlock(&qcam->lock);
- return ret;
-}
-
static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct qcam *qcam = video_drvdata(file);
@@ -856,8 +795,40 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
return len;
}
+static int qcam_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct qcam *qcam =
+ container_of(ctrl->handler, struct qcam, hdl);
+ int ret = 0;
+
+ mutex_lock(&qcam->lock);
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ qcam->brightness = ctrl->val;
+ break;
+ case V4L2_CID_CONTRAST:
+ qcam->contrast = ctrl->val;
+ break;
+ case V4L2_CID_GAMMA:
+ qcam->whitebal = ctrl->val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret == 0) {
+ qc_setscanmode(qcam);
+ qcam->status |= QC_PARAM_CHANGE;
+ }
+ mutex_unlock(&qcam->lock);
+ return ret;
+}
+
static const struct v4l2_file_operations qcam_fops = {
.owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = v4l2_ctrl_poll,
.unlocked_ioctl = video_ioctl2,
.read = qcam_read,
};
@@ -867,13 +838,17 @@ static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
.vidioc_g_input = qcam_g_input,
.vidioc_s_input = qcam_s_input,
.vidioc_enum_input = qcam_enum_input,
- .vidioc_queryctrl = qcam_queryctrl,
- .vidioc_g_ctrl = qcam_g_ctrl,
- .vidioc_s_ctrl = qcam_s_ctrl,
.vidioc_enum_fmt_vid_cap = qcam_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = qcam_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = qcam_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = qcam_try_fmt_vid_cap,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_ctrl_ops qcam_ctrl_ops = {
+ .s_ctrl = qcam_s_ctrl,
};
/* Initialize the QuickCam driver control structure. This is where
@@ -897,19 +872,35 @@ static struct qcam *qcam_init(struct parport *port)
return NULL;
}
+ v4l2_ctrl_handler_init(&qcam->hdl, 3);
+ v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 180);
+ v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 192);
+ v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
+ V4L2_CID_GAMMA, 0, 255, 1, 105);
+ if (qcam->hdl.error) {
+ v4l2_err(v4l2_dev, "couldn't register controls\n");
+ v4l2_ctrl_handler_free(&qcam->hdl);
+ kfree(qcam);
+ return NULL;
+ }
qcam->pport = port;
qcam->pdev = parport_register_device(port, "bw-qcam", NULL, NULL,
NULL, 0, NULL);
if (qcam->pdev == NULL) {
v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name);
+ v4l2_ctrl_handler_free(&qcam->hdl);
kfree(qcam);
return NULL;
}
strlcpy(qcam->vdev.name, "Connectix QuickCam", sizeof(qcam->vdev.name));
qcam->vdev.v4l2_dev = v4l2_dev;
+ qcam->vdev.ctrl_handler = &qcam->hdl;
qcam->vdev.fops = &qcam_fops;
qcam->vdev.ioctl_ops = &qcam_ioctl_ops;
+ set_bit(V4L2_FL_USE_FH_PRIO, &qcam->vdev.flags);
qcam->vdev.release = video_device_release_empty;
video_set_drvdata(&qcam->vdev, qcam);
@@ -1003,6 +994,7 @@ static int init_bwqcam(struct parport *port)
static void close_bwqcam(struct qcam *qcam)
{
video_unregister_device(&qcam->vdev);
+ v4l2_ctrl_handler_free(&qcam->hdl);
parport_unregister_device(qcam->pdev);
kfree(qcam);
}
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index fda32f52554a..ec51e1f12e82 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -40,10 +40,14 @@
#include <media/v4l2-device.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
struct qcam {
struct v4l2_device v4l2_dev;
struct video_device vdev;
+ struct v4l2_ctrl_handler hdl;
struct pardevice *pdev;
struct parport *pport;
int width, height;
@@ -378,7 +382,7 @@ get_fragment:
static long qc_capture(struct qcam *qcam, char __user *buf, unsigned long len)
{
struct v4l2_device *v4l2_dev = &qcam->v4l2_dev;
- unsigned lines, pixelsperline, bitsperxfer;
+ unsigned lines, pixelsperline;
unsigned int is_bi_dir = qcam->bidirectional;
size_t wantlen, outptr = 0;
char tmpbuf[BUFSZ];
@@ -404,7 +408,6 @@ static long qc_capture(struct qcam *qcam, char __user *buf, unsigned long len)
lines = qcam->height;
pixelsperline = qcam->width;
- bitsperxfer = (is_bi_dir) ? 24 : 8;
if (is_bi_dir) {
/* Turn the port around */
@@ -516,7 +519,8 @@ static int qcam_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "Color Quickcam", sizeof(vcap->card));
strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
- vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -544,73 +548,6 @@ static int qcam_s_input(struct file *file, void *fh, unsigned int inp)
return (inp > 0) ? -EINVAL : 0;
}
-static int qcam_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 240);
- case V4L2_CID_CONTRAST:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 192);
- case V4L2_CID_GAMMA:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
- }
- return -EINVAL;
-}
-
-static int qcam_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct qcam *qcam = video_drvdata(file);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = qcam->brightness;
- break;
- case V4L2_CID_CONTRAST:
- ctrl->value = qcam->contrast;
- break;
- case V4L2_CID_GAMMA:
- ctrl->value = qcam->whitebal;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int qcam_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct qcam *qcam = video_drvdata(file);
- int ret = 0;
-
- mutex_lock(&qcam->lock);
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- qcam->brightness = ctrl->value;
- break;
- case V4L2_CID_CONTRAST:
- qcam->contrast = ctrl->value;
- break;
- case V4L2_CID_GAMMA:
- qcam->whitebal = ctrl->value;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- if (ret == 0) {
- parport_claim_or_block(qcam->pdev);
- qc_setup(qcam);
- parport_release(qcam->pdev);
- }
- mutex_unlock(&qcam->lock);
- return ret;
-}
-
static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct qcam *qcam = video_drvdata(file);
@@ -714,8 +651,41 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
return len;
}
+static int qcam_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct qcam *qcam =
+ container_of(ctrl->handler, struct qcam, hdl);
+ int ret = 0;
+
+ mutex_lock(&qcam->lock);
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ qcam->brightness = ctrl->val;
+ break;
+ case V4L2_CID_CONTRAST:
+ qcam->contrast = ctrl->val;
+ break;
+ case V4L2_CID_GAMMA:
+ qcam->whitebal = ctrl->val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret == 0) {
+ parport_claim_or_block(qcam->pdev);
+ qc_setup(qcam);
+ parport_release(qcam->pdev);
+ }
+ mutex_unlock(&qcam->lock);
+ return ret;
+}
+
static const struct v4l2_file_operations qcam_fops = {
.owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = v4l2_ctrl_poll,
.unlocked_ioctl = video_ioctl2,
.read = qcam_read,
};
@@ -725,13 +695,17 @@ static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
.vidioc_g_input = qcam_g_input,
.vidioc_s_input = qcam_s_input,
.vidioc_enum_input = qcam_enum_input,
- .vidioc_queryctrl = qcam_queryctrl,
- .vidioc_g_ctrl = qcam_g_ctrl,
- .vidioc_s_ctrl = qcam_s_ctrl,
- .vidioc_enum_fmt_vid_cap = qcam_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = qcam_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = qcam_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = qcam_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = qcam_try_fmt_vid_cap,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_ctrl_ops qcam_ctrl_ops = {
+ .s_ctrl = qcam_s_ctrl,
};
/* Initialize the QuickCam driver control structure. */
@@ -754,6 +728,20 @@ static struct qcam *qcam_init(struct parport *port)
return NULL;
}
+ v4l2_ctrl_handler_init(&qcam->hdl, 3);
+ v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 240);
+ v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 192);
+ v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
+ V4L2_CID_GAMMA, 0, 255, 1, 128);
+ if (qcam->hdl.error) {
+ v4l2_err(v4l2_dev, "couldn't register controls\n");
+ v4l2_ctrl_handler_free(&qcam->hdl);
+ kfree(qcam);
+ return NULL;
+ }
+
qcam->pport = port;
qcam->pdev = parport_register_device(port, "c-qcam", NULL, NULL,
NULL, 0, NULL);
@@ -762,6 +750,7 @@ static struct qcam *qcam_init(struct parport *port)
if (qcam->pdev == NULL) {
v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name);
+ v4l2_ctrl_handler_free(&qcam->hdl);
kfree(qcam);
return NULL;
}
@@ -771,6 +760,8 @@ static struct qcam *qcam_init(struct parport *port)
qcam->vdev.fops = &qcam_fops;
qcam->vdev.ioctl_ops = &qcam_ioctl_ops;
qcam->vdev.release = video_device_release_empty;
+ qcam->vdev.ctrl_handler = &qcam->hdl;
+ set_bit(V4L2_FL_USE_FH_PRIO, &qcam->vdev.flags);
video_set_drvdata(&qcam->vdev, qcam);
mutex_init(&qcam->lock);
@@ -845,6 +836,7 @@ static int init_cqcam(struct parport *port)
static void close_cqcam(struct qcam *qcam)
{
video_unregister_device(&qcam->vdev);
+ v4l2_ctrl_handler_free(&qcam->hdl);
parport_unregister_device(qcam->pdev);
kfree(qcam);
}
diff --git a/drivers/media/video/cpia2/cpia2.h b/drivers/media/video/cpia2/cpia2.h
index ab252188981b..cdef677d57ec 100644
--- a/drivers/media/video/cpia2/cpia2.h
+++ b/drivers/media/video/cpia2/cpia2.h
@@ -32,11 +32,12 @@
#define __CPIA2_H__
#include <linux/videodev2.h>
-#include <media/v4l2-common.h>
#include <linux/usb.h>
#include <linux/poll.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
-#include "cpia2dev.h"
#include "cpia2_registers.h"
/* define for verbose debug output */
@@ -65,7 +66,6 @@
/* Flicker Modes */
#define NEVER_FLICKER 0
-#define ANTI_FLICKER_ON 1
#define FLICKER_60 60
#define FLICKER_50 50
@@ -148,7 +148,6 @@ enum {
#define DEFAULT_BRIGHTNESS 0x46
#define DEFAULT_CONTRAST 0x93
#define DEFAULT_SATURATION 0x7f
-#define DEFAULT_TARGET_KB 0x30
/* Power state */
#define HI_POWER_MODE CPIA2_SYSTEM_CONTROL_HIGH_POWER
@@ -287,7 +286,6 @@ struct camera_params {
struct {
u8 cam_register;
u8 flicker_mode_req; /* 1 if flicker on, else never flicker */
- int mains_frequency;
} flicker_control;
struct {
@@ -337,7 +335,7 @@ struct camera_params {
u8 vc_control;
u8 vc_mp_direction;
u8 vc_mp_data;
- u8 target_kb;
+ u8 quality;
} vc_params;
struct {
@@ -366,23 +364,23 @@ struct framebuf {
struct framebuf *next;
};
-struct cpia2_fh {
- enum v4l2_priority prio;
- u8 mmapped;
-};
-
struct camera_data {
/* locks */
+ struct v4l2_device v4l2_dev;
struct mutex v4l2_lock; /* serialize file operations */
- struct v4l2_prio_state prio;
+ struct v4l2_ctrl_handler hdl;
+ struct {
+ /* Lights control cluster */
+ struct v4l2_ctrl *top_light;
+ struct v4l2_ctrl *bottom_light;
+ };
+ struct v4l2_ctrl *usb_alt;
/* camera status */
- volatile int present; /* Is the camera still present? */
- int open_count; /* # of process that have camera open */
int first_image_seen;
- u8 mains_freq; /* for flicker control */
enum sensors sensor_type;
u8 flush;
+ struct v4l2_fh *stream_fh;
u8 mmapped;
int streaming; /* 0 = no, 1 = yes */
int xfer_mode; /* XFER_BULK or XFER_ISOC */
@@ -390,7 +388,7 @@ struct camera_data {
/* v4l */
int video_size; /* VIDEO_SIZE_ */
- struct video_device *vdev; /* v4l videodev */
+ struct video_device vdev; /* v4l videodev */
u32 width;
u32 height; /* Its size */
__u32 pixelformat; /* Format fourcc */
@@ -425,6 +423,7 @@ struct camera_data {
/* v4l */
int cpia2_register_camera(struct camera_data *cam);
void cpia2_unregister_camera(struct camera_data *cam);
+void cpia2_camera_release(struct v4l2_device *v4l2_dev);
/* core */
int cpia2_reset_camera(struct camera_data *cam);
@@ -443,7 +442,7 @@ int cpia2_send_command(struct camera_data *cam, struct cpia2_command *cmd);
int cpia2_do_command(struct camera_data *cam,
unsigned int command,
unsigned char direction, unsigned char param);
-struct camera_data *cpia2_init_camera_struct(void);
+struct camera_data *cpia2_init_camera_struct(struct usb_interface *intf);
int cpia2_init_camera(struct camera_data *cam);
int cpia2_allocate_buffers(struct camera_data *cam);
void cpia2_free_buffers(struct camera_data *cam);
@@ -454,7 +453,6 @@ unsigned int cpia2_poll(struct camera_data *cam,
int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma);
void cpia2_set_property_flip(struct camera_data *cam, int prop_val);
void cpia2_set_property_mirror(struct camera_data *cam, int prop_val);
-int cpia2_set_target_kb(struct camera_data *cam, unsigned char value);
int cpia2_set_gpio(struct camera_data *cam, unsigned char setting);
int cpia2_set_fps(struct camera_data *cam, int framerate);
diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
index ee91e295c90a..17188e234770 100644
--- a/drivers/media/video/cpia2/cpia2_core.c
+++ b/drivers/media/video/cpia2/cpia2_core.c
@@ -66,7 +66,6 @@ static int config_sensor_410(struct camera_data *cam,
static int config_sensor_500(struct camera_data *cam,
int reqwidth, int reqheight);
static int set_all_properties(struct camera_data *cam);
-static void get_color_params(struct camera_data *cam);
static void wake_system(struct camera_data *cam);
static void set_lowlight_boost(struct camera_data *cam);
static void reset_camera_struct(struct camera_data *cam);
@@ -453,15 +452,6 @@ int cpia2_do_command(struct camera_data *cam,
cam->params.version.vp_device_hi = cmd.buffer.block_data[0];
cam->params.version.vp_device_lo = cmd.buffer.block_data[1];
break;
- case CPIA2_CMD_GET_VP_BRIGHTNESS:
- cam->params.color_params.brightness = cmd.buffer.block_data[0];
- break;
- case CPIA2_CMD_GET_CONTRAST:
- cam->params.color_params.contrast = cmd.buffer.block_data[0];
- break;
- case CPIA2_CMD_GET_VP_SATURATION:
- cam->params.color_params.saturation = cmd.buffer.block_data[0];
- break;
case CPIA2_CMD_GET_VP_GPIO_DATA:
cam->params.vp_params.gpio_data = cmd.buffer.block_data[0];
break;
@@ -617,6 +607,7 @@ int cpia2_reset_camera(struct camera_data *cam)
{
u8 tmp_reg;
int retval = 0;
+ int target_kb;
int i;
struct cpia2_command cmd;
@@ -800,9 +791,16 @@ int cpia2_reset_camera(struct camera_data *cam)
}
cpia2_do_command(cam, CPIA2_CMD_SET_VC_CONTROL, TRANSFER_WRITE,tmp_reg);
- /* Set target size (kb) on vc */
+ /* Set target size (kb) on vc
+ This is a heuristic based on the quality parameter and the raw
+ framesize in kB divided by 16 (the compression factor when the
+ quality is 100%) */
+ target_kb = (cam->width * cam->height * 2 / 16384) *
+ cam->params.vc_params.quality / 100;
+ if (target_kb < 1)
+ target_kb = 1;
cpia2_do_command(cam, CPIA2_CMD_SET_TARGET_KB,
- TRANSFER_WRITE, cam->params.vc_params.target_kb);
+ TRANSFER_WRITE, target_kb);
/* Wiggle VC Reset */
/***
@@ -1538,23 +1536,17 @@ static int set_all_properties(struct camera_data *cam)
* framerate and user_mode were already set (set_default_user_mode).
**/
- cpia2_set_color_params(cam);
-
cpia2_usb_change_streaming_alternate(cam,
cam->params.camera_state.stream_mode);
- cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE,
- cam->params.vp_params.user_effects);
-
- cpia2_set_flicker_mode(cam,
- cam->params.flicker_control.flicker_mode_req);
-
cpia2_do_command(cam,
CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION,
TRANSFER_WRITE, cam->params.vp_params.gpio_direction);
cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DATA, TRANSFER_WRITE,
cam->params.vp_params.gpio_data);
+ v4l2_ctrl_handler_setup(&cam->hdl);
+
wake_system(cam);
set_lowlight_boost(cam);
@@ -1569,7 +1561,6 @@ static int set_all_properties(struct camera_data *cam)
*****************************************************************************/
void cpia2_save_camera_state(struct camera_data *cam)
{
- get_color_params(cam);
cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS, TRANSFER_READ, 0);
cpia2_do_command(cam, CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION, TRANSFER_READ,
0);
@@ -1577,30 +1568,6 @@ void cpia2_save_camera_state(struct camera_data *cam)
/* Don't get framerate or target_kb. Trust the values we already have */
}
-/******************************************************************************
- *
- * get_color_params
- *
- *****************************************************************************/
-static void get_color_params(struct camera_data *cam)
-{
- cpia2_do_command(cam, CPIA2_CMD_GET_VP_BRIGHTNESS, TRANSFER_READ, 0);
- cpia2_do_command(cam, CPIA2_CMD_GET_VP_SATURATION, TRANSFER_READ, 0);
- cpia2_do_command(cam, CPIA2_CMD_GET_CONTRAST, TRANSFER_READ, 0);
-}
-
-/******************************************************************************
- *
- * cpia2_set_color_params
- *
- *****************************************************************************/
-void cpia2_set_color_params(struct camera_data *cam)
-{
- DBG("Setting color params\n");
- cpia2_set_brightness(cam, cam->params.color_params.brightness);
- cpia2_set_contrast(cam, cam->params.color_params.contrast);
- cpia2_set_saturation(cam, cam->params.color_params.saturation);
-}
/******************************************************************************
*
@@ -1664,15 +1631,9 @@ int cpia2_set_flicker_mode(struct camera_data *cam, int mode)
switch(mode) {
case NEVER_FLICKER:
- cam->params.flicker_control.flicker_mode_req = mode;
- break;
case FLICKER_60:
- cam->params.flicker_control.flicker_mode_req = mode;
- cam->params.flicker_control.mains_frequency = 60;
- break;
case FLICKER_50:
cam->params.flicker_control.flicker_mode_req = mode;
- cam->params.flicker_control.mains_frequency = 50;
break;
default:
err = -EINVAL;
@@ -1701,6 +1662,7 @@ void cpia2_set_property_flip(struct camera_data *cam, int prop_val)
{
cam_reg &= ~CPIA2_VP_USER_EFFECTS_FLIP;
}
+ cam->params.vp_params.user_effects = cam_reg;
cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE,
cam_reg);
}
@@ -1725,37 +1687,13 @@ void cpia2_set_property_mirror(struct camera_data *cam, int prop_val)
{
cam_reg &= ~CPIA2_VP_USER_EFFECTS_MIRROR;
}
+ cam->params.vp_params.user_effects = cam_reg;
cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE,
cam_reg);
}
/******************************************************************************
*
- * set_target_kb
- *
- * The new Target KB is set in cam->params.vc_params.target_kb and
- * activates on reset.
- *****************************************************************************/
-
-int cpia2_set_target_kb(struct camera_data *cam, unsigned char value)
-{
- DBG("Requested target_kb = %d\n", value);
- if (value != cam->params.vc_params.target_kb) {
-
- cpia2_usb_stream_pause(cam);
-
- /* reset camera for new target_kb */
- cam->params.vc_params.target_kb = value;
- cpia2_reset_camera(cam);
-
- cpia2_usb_stream_resume(cam);
- }
-
- return 0;
-}
-
-/******************************************************************************
- *
* cpia2_set_gpio
*
*****************************************************************************/
@@ -1843,7 +1781,7 @@ void cpia2_set_brightness(struct camera_data *cam, unsigned char value)
if (cam->params.pnp_id.device_type == DEVICE_STV_672 && value == 0)
value++;
DBG("Setting brightness to %d (0x%0x)\n", value, value);
- cpia2_do_command(cam,CPIA2_CMD_SET_VP_BRIGHTNESS, TRANSFER_WRITE,value);
+ cpia2_do_command(cam, CPIA2_CMD_SET_VP_BRIGHTNESS, TRANSFER_WRITE, value);
}
/******************************************************************************
@@ -1854,7 +1792,6 @@ void cpia2_set_brightness(struct camera_data *cam, unsigned char value)
void cpia2_set_contrast(struct camera_data *cam, unsigned char value)
{
DBG("Setting contrast to %d (0x%0x)\n", value, value);
- cam->params.color_params.contrast = value;
cpia2_do_command(cam, CPIA2_CMD_SET_CONTRAST, TRANSFER_WRITE, value);
}
@@ -1866,7 +1803,6 @@ void cpia2_set_contrast(struct camera_data *cam, unsigned char value)
void cpia2_set_saturation(struct camera_data *cam, unsigned char value)
{
DBG("Setting saturation to %d (0x%0x)\n", value, value);
- cam->params.color_params.saturation = value;
cpia2_do_command(cam,CPIA2_CMD_SET_VP_SATURATION, TRANSFER_WRITE,value);
}
@@ -2168,14 +2104,10 @@ static void reset_camera_struct(struct camera_data *cam)
/***
* The following parameter values are the defaults from the register map.
***/
- cam->params.color_params.brightness = DEFAULT_BRIGHTNESS;
- cam->params.color_params.contrast = DEFAULT_CONTRAST;
- cam->params.color_params.saturation = DEFAULT_SATURATION;
cam->params.vp_params.lowlight_boost = 0;
/* FlickerModes */
cam->params.flicker_control.flicker_mode_req = NEVER_FLICKER;
- cam->params.flicker_control.mains_frequency = 60;
/* jpeg params */
cam->params.compression.jpeg_options = CPIA2_VC_VC_JPEG_OPT_DEFAULT;
@@ -2188,7 +2120,7 @@ static void reset_camera_struct(struct camera_data *cam)
cam->params.vp_params.gpio_data = 0;
/* Target kb params */
- cam->params.vc_params.target_kb = DEFAULT_TARGET_KB;
+ cam->params.vc_params.quality = 100;
/***
* Set Sensor FPS as fast as possible.
@@ -2228,7 +2160,7 @@ static void reset_camera_struct(struct camera_data *cam)
*
* Initializes camera struct, does not call reset to fill in defaults.
*****************************************************************************/
-struct camera_data *cpia2_init_camera_struct(void)
+struct camera_data *cpia2_init_camera_struct(struct usb_interface *intf)
{
struct camera_data *cam;
@@ -2239,8 +2171,13 @@ struct camera_data *cpia2_init_camera_struct(void)
return NULL;
}
+ cam->v4l2_dev.release = cpia2_camera_release;
+ if (v4l2_device_register(&intf->dev, &cam->v4l2_dev) < 0) {
+ v4l2_err(&cam->v4l2_dev, "couldn't register v4l2_device\n");
+ kfree(cam);
+ return NULL;
+ }
- cam->present = 1;
mutex_init(&cam->v4l2_lock);
init_waitqueue_head(&cam->wq_stream);
@@ -2373,11 +2310,6 @@ long cpia2_read(struct camera_data *cam,
return -EINVAL;
}
- if (!cam->present) {
- LOG("%s: camera removed\n",__func__);
- return 0; /* EOF */
- }
-
if (!cam->streaming) {
/* Start streaming */
cpia2_usb_stream_start(cam,
@@ -2393,12 +2325,12 @@ long cpia2_read(struct camera_data *cam,
if (frame->status != FRAME_READY) {
mutex_unlock(&cam->v4l2_lock);
wait_event_interruptible(cam->wq_stream,
- !cam->present ||
+ !video_is_registered(&cam->vdev) ||
(frame = cam->curbuff)->status == FRAME_READY);
mutex_lock(&cam->v4l2_lock);
if (signal_pending(current))
return -ERESTARTSYS;
- if (!cam->present)
+ if (!video_is_registered(&cam->vdev))
return 0;
}
@@ -2423,17 +2355,10 @@ long cpia2_read(struct camera_data *cam,
unsigned int cpia2_poll(struct camera_data *cam, struct file *filp,
poll_table *wait)
{
- unsigned int status=0;
+ unsigned int status = v4l2_ctrl_poll(filp, wait);
- if (!cam) {
- ERR("%s: Internal error, camera_data not found!\n",__func__);
- return POLLERR;
- }
-
- if (!cam->present)
- return POLLHUP;
-
- if(!cam->streaming) {
+ if ((poll_requested_events(wait) & (POLLIN | POLLRDNORM)) &&
+ !cam->streaming) {
/* Start streaming */
cpia2_usb_stream_start(cam,
cam->params.camera_state.stream_mode);
@@ -2441,10 +2366,8 @@ unsigned int cpia2_poll(struct camera_data *cam, struct file *filp,
poll_wait(filp, &cam->wq_stream, wait);
- if(!cam->present)
- status = POLLHUP;
- else if(cam->curbuff->status == FRAME_READY)
- status = POLLIN | POLLRDNORM;
+ if (cam->curbuff->status == FRAME_READY)
+ status |= POLLIN | POLLRDNORM;
return status;
}
@@ -2462,12 +2385,9 @@ int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma)
unsigned long start = (unsigned long) adr;
unsigned long page, pos;
- if (!cam)
- return -ENODEV;
-
DBG("mmap offset:%ld size:%ld\n", start_offset, size);
- if (!cam->present)
+ if (!video_is_registered(&cam->vdev))
return -ENODEV;
if (size > cam->frame_size*cam->num_frames ||
diff --git a/drivers/media/video/cpia2/cpia2_usb.c b/drivers/media/video/cpia2/cpia2_usb.c
index 59c797c15277..95b5d6e7cdc4 100644
--- a/drivers/media/video/cpia2/cpia2_usb.c
+++ b/drivers/media/video/cpia2/cpia2_usb.c
@@ -54,6 +54,8 @@ static void cpia2_usb_complete(struct urb *urb);
static int cpia2_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void cpia2_usb_disconnect(struct usb_interface *intf);
+static int cpia2_usb_suspend(struct usb_interface *intf, pm_message_t message);
+static int cpia2_usb_resume(struct usb_interface *intf);
static void free_sbufs(struct camera_data *cam);
static void add_APPn(struct camera_data *cam);
@@ -74,6 +76,9 @@ static struct usb_driver cpia2_driver = {
.name = "cpia2",
.probe = cpia2_usb_probe,
.disconnect = cpia2_usb_disconnect,
+ .suspend = cpia2_usb_suspend,
+ .resume = cpia2_usb_resume,
+ .reset_resume = cpia2_usb_resume,
.id_table = cpia2_id_table
};
@@ -218,10 +223,9 @@ static void cpia2_usb_complete(struct urb *urb)
return;
}
- if (!cam->streaming || !cam->present || cam->open_count == 0) {
- LOG("Will now stop the streaming: streaming = %d, "
- "present=%d, open_count=%d\n",
- cam->streaming, cam->present, cam->open_count);
+ if (!cam->streaming || !video_is_registered(&cam->vdev)) {
+ LOG("Will now stop the streaming: streaming = %d, present=%d\n",
+ cam->streaming, video_is_registered(&cam->vdev));
return;
}
@@ -392,7 +396,7 @@ static int configure_transfer_mode(struct camera_data *cam, unsigned int alt)
struct cpia2_command cmd;
unsigned char reg;
- if(!cam->present)
+ if (!video_is_registered(&cam->vdev))
return -ENODEV;
/***
@@ -752,8 +756,8 @@ int cpia2_usb_stream_pause(struct camera_data *cam)
{
int ret = 0;
if(cam->streaming) {
- ret = set_alternate(cam, USBIF_CMDONLY);
free_sbufs(cam);
+ ret = set_alternate(cam, USBIF_CMDONLY);
}
return ret;
}
@@ -770,6 +774,10 @@ int cpia2_usb_stream_resume(struct camera_data *cam)
cam->first_image_seen = 0;
ret = set_alternate(cam, cam->params.camera_state.stream_mode);
if(ret == 0) {
+ /* for some reason the user effects need to be set
+ again when starting streaming. */
+ cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE,
+ cam->params.vp_params.user_effects);
ret = submit_urbs(cam);
}
}
@@ -784,6 +792,7 @@ int cpia2_usb_stream_resume(struct camera_data *cam)
int cpia2_usb_stream_stop(struct camera_data *cam)
{
int ret;
+
ret = cpia2_usb_stream_pause(cam);
cam->streaming = 0;
configure_transfer_mode(cam, 0);
@@ -812,7 +821,8 @@ static int cpia2_usb_probe(struct usb_interface *intf,
/* If we get to this point, we found a CPiA2 camera */
LOG("CPiA2 USB camera found\n");
- if((cam = cpia2_init_camera_struct()) == NULL)
+ cam = cpia2_init_camera_struct(intf);
+ if (cam == NULL)
return -ENOMEM;
cam->dev = udev;
@@ -825,16 +835,9 @@ static int cpia2_usb_probe(struct usb_interface *intf,
return ret;
}
- if ((ret = cpia2_register_camera(cam)) < 0) {
- ERR("%s: Failed to register cpia2 camera (ret = %d)\n", __func__, ret);
- kfree(cam);
- return ret;
- }
-
if((ret = cpia2_init_camera(cam)) < 0) {
ERR("%s: failed to initialize cpia2 camera (ret = %d)\n", __func__, ret);
- cpia2_unregister_camera(cam);
kfree(cam);
return ret;
}
@@ -853,6 +856,13 @@ static int cpia2_usb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, cam);
+ ret = cpia2_register_camera(cam);
+ if (ret < 0) {
+ ERR("%s: Failed to register cpia2 camera (ret = %d)\n", __func__, ret);
+ kfree(cam);
+ return ret;
+ }
+
return 0;
}
@@ -865,13 +875,16 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
{
struct camera_data *cam = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
- cam->present = 0;
DBG("Stopping stream\n");
cpia2_usb_stream_stop(cam);
+ mutex_lock(&cam->v4l2_lock);
DBG("Unregistering camera\n");
cpia2_unregister_camera(cam);
+ v4l2_device_disconnect(&cam->v4l2_dev);
+ mutex_unlock(&cam->v4l2_lock);
+ v4l2_device_put(&cam->v4l2_dev);
if(cam->buffers) {
DBG("Wakeup waiting processes\n");
@@ -884,14 +897,41 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
DBG("Releasing interface\n");
usb_driver_release_interface(&cpia2_driver, intf);
- if (cam->open_count == 0) {
- DBG("Freeing camera structure\n");
- kfree(cam);
+ LOG("CPiA2 camera disconnected.\n");
+}
+
+static int cpia2_usb_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct camera_data *cam = usb_get_intfdata(intf);
+
+ mutex_lock(&cam->v4l2_lock);
+ if (cam->streaming) {
+ cpia2_usb_stream_stop(cam);
+ cam->streaming = 1;
}
+ mutex_unlock(&cam->v4l2_lock);
- LOG("CPiA2 camera disconnected.\n");
+ dev_info(&intf->dev, "going into suspend..\n");
+ return 0;
}
+/* Resume device - start device. */
+static int cpia2_usb_resume(struct usb_interface *intf)
+{
+ struct camera_data *cam = usb_get_intfdata(intf);
+
+ mutex_lock(&cam->v4l2_lock);
+ v4l2_ctrl_handler_setup(&cam->hdl);
+ if (cam->streaming) {
+ cam->streaming = 0;
+ cpia2_usb_stream_start(cam,
+ cam->params.camera_state.stream_mode);
+ }
+ mutex_unlock(&cam->v4l2_lock);
+
+ dev_info(&intf->dev, "coming out of suspend..\n");
+ return 0;
+}
/******************************************************************************
*
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 077eb1db80a1..55e92902a76c 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -39,15 +39,15 @@
#include <linux/videodev2.h>
#include <linux/stringify.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
#include "cpia2.h"
-#include "cpia2dev.h"
static int video_nr = -1;
module_param(video_nr, int, 0);
-MODULE_PARM_DESC(video_nr,"video device to register (0=/dev/video0, etc)");
+MODULE_PARM_DESC(video_nr, "video device to register (0=/dev/video0, etc)");
-static int buffer_size = 68*1024;
+static int buffer_size = 68 * 1024;
module_param(buffer_size, int, 0);
MODULE_PARM_DESC(buffer_size, "Size for each frame buffer in bytes (default 68k)");
@@ -62,18 +62,10 @@ MODULE_PARM_DESC(alternate, "USB Alternate (" __stringify(USBIF_ISO_1) "-"
__stringify(USBIF_ISO_6) ", default "
__stringify(DEFAULT_ALT) ")");
-static int flicker_freq = 60;
-module_param(flicker_freq, int, 0);
-MODULE_PARM_DESC(flicker_freq, "Flicker frequency (" __stringify(50) "or"
- __stringify(60) ", default "
- __stringify(60) ")");
-
-static int flicker_mode = NEVER_FLICKER;
+static int flicker_mode;
module_param(flicker_mode, int, 0);
-MODULE_PARM_DESC(flicker_mode,
- "Flicker supression (" __stringify(NEVER_FLICKER) "or"
- __stringify(ANTI_FLICKER_ON) ", default "
- __stringify(NEVER_FLICKER) ")");
+MODULE_PARM_DESC(flicker_mode, "Flicker frequency (0 (disabled), " __stringify(50) " or "
+ __stringify(60) ", default 0)");
MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>");
MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras");
@@ -82,153 +74,7 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(CPIA_VERSION);
#define ABOUT "V4L-Driver for Vision CPiA2 based cameras"
-
-struct control_menu_info {
- int value;
- char name[32];
-};
-
-static struct control_menu_info framerate_controls[] =
-{
- { CPIA2_VP_FRAMERATE_6_25, "6.25 fps" },
- { CPIA2_VP_FRAMERATE_7_5, "7.5 fps" },
- { CPIA2_VP_FRAMERATE_12_5, "12.5 fps" },
- { CPIA2_VP_FRAMERATE_15, "15 fps" },
- { CPIA2_VP_FRAMERATE_25, "25 fps" },
- { CPIA2_VP_FRAMERATE_30, "30 fps" },
-};
-#define NUM_FRAMERATE_CONTROLS (ARRAY_SIZE(framerate_controls))
-
-static struct control_menu_info flicker_controls[] =
-{
- { NEVER_FLICKER, "Off" },
- { FLICKER_50, "50 Hz" },
- { FLICKER_60, "60 Hz" },
-};
-#define NUM_FLICKER_CONTROLS (ARRAY_SIZE(flicker_controls))
-
-static struct control_menu_info lights_controls[] =
-{
- { 0, "Off" },
- { 64, "Top" },
- { 128, "Bottom" },
- { 192, "Both" },
-};
-#define NUM_LIGHTS_CONTROLS (ARRAY_SIZE(lights_controls))
-#define GPIO_LIGHTS_MASK 192
-
-static struct v4l2_queryctrl controls[] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = DEFAULT_BRIGHTNESS,
- },
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = DEFAULT_CONTRAST,
- },
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = DEFAULT_SATURATION,
- },
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mirror Horizontally",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Vertically",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = CPIA2_CID_TARGET_KB,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Target KB",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = DEFAULT_TARGET_KB,
- },
- {
- .id = CPIA2_CID_GPIO,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "GPIO",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = CPIA2_CID_FLICKER_MODE,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Flicker Reduction",
- .minimum = 0,
- .maximum = NUM_FLICKER_CONTROLS-1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = CPIA2_CID_FRAMERATE,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Framerate",
- .minimum = 0,
- .maximum = NUM_FRAMERATE_CONTROLS-1,
- .step = 1,
- .default_value = NUM_FRAMERATE_CONTROLS-1,
- },
- {
- .id = CPIA2_CID_USB_ALT,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "USB Alternate",
- .minimum = USBIF_ISO_1,
- .maximum = USBIF_ISO_6,
- .step = 1,
- .default_value = DEFAULT_ALT,
- },
- {
- .id = CPIA2_CID_LIGHTS,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Lights",
- .minimum = 0,
- .maximum = NUM_LIGHTS_CONTROLS-1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = CPIA2_CID_RESET_CAMERA,
- .type = V4L2_CTRL_TYPE_BUTTON,
- .name = "Reset Camera",
- .minimum = 0,
- .maximum = 0,
- .step = 0,
- .default_value = 0,
- },
-};
-#define NUM_CONTROLS (ARRAY_SIZE(controls))
-
+#define CPIA2_CID_USB_ALT (V4L2_CID_USER_BASE | 0xf000)
/******************************************************************************
*
@@ -238,38 +84,27 @@ static struct v4l2_queryctrl controls[] = {
static int cpia2_open(struct file *file)
{
struct camera_data *cam = video_drvdata(file);
- struct cpia2_fh *fh;
-
- if (!cam) {
- ERR("Internal error, camera_data not found!\n");
- return -ENODEV;
- }
+ int retval = v4l2_fh_open(file);
- if (!cam->present)
- return -ENODEV;
+ if (retval)
+ return retval;
- if (cam->open_count == 0) {
- if (cpia2_allocate_buffers(cam))
+ if (v4l2_fh_is_singular_file(file)) {
+ if (cpia2_allocate_buffers(cam)) {
+ v4l2_fh_release(file);
return -ENOMEM;
+ }
/* reset the camera */
- if (cpia2_reset_camera(cam) < 0)
+ if (cpia2_reset_camera(cam) < 0) {
+ v4l2_fh_release(file);
return -EIO;
+ }
cam->APP_len = 0;
cam->COM_len = 0;
}
- fh = kmalloc(sizeof(*fh), GFP_KERNEL);
- if (!fh)
- return -ENOMEM;
- file->private_data = fh;
- fh->prio = V4L2_PRIORITY_UNSET;
- v4l2_prio_open(&cam->prio, &fh->prio);
- fh->mmapped = 0;
-
- ++cam->open_count;
-
cpia2_dbg_dump_registers(cam);
return 0;
}
@@ -283,37 +118,22 @@ static int cpia2_close(struct file *file)
{
struct video_device *dev = video_devdata(file);
struct camera_data *cam = video_get_drvdata(dev);
- struct cpia2_fh *fh = file->private_data;
- if (cam->present &&
- (cam->open_count == 1 || fh->prio == V4L2_PRIORITY_RECORD)) {
+ if (video_is_registered(&cam->vdev) && v4l2_fh_is_singular_file(file)) {
cpia2_usb_stream_stop(cam);
- if (cam->open_count == 1) {
- /* save camera state for later open */
- cpia2_save_camera_state(cam);
+ /* save camera state for later open */
+ cpia2_save_camera_state(cam);
- cpia2_set_low_power(cam);
- cpia2_free_buffers(cam);
- }
+ cpia2_set_low_power(cam);
+ cpia2_free_buffers(cam);
}
- if (fh->mmapped)
+ if (cam->stream_fh == file->private_data) {
+ cam->stream_fh = NULL;
cam->mmapped = 0;
- v4l2_prio_close(&cam->prio, fh->prio);
- file->private_data = NULL;
- kfree(fh);
-
- if (--cam->open_count == 0) {
- cpia2_free_buffers(cam);
- if (!cam->present) {
- video_unregister_device(dev);
- kfree(cam);
- return 0;
- }
}
-
- return 0;
+ return v4l2_fh_release(file);
}
/******************************************************************************
@@ -327,16 +147,9 @@ static ssize_t cpia2_v4l_read(struct file *file, char __user *buf, size_t count,
struct camera_data *cam = video_drvdata(file);
int noblock = file->f_flags&O_NONBLOCK;
- struct cpia2_fh *fh = file->private_data;
-
if(!cam)
return -EINVAL;
- /* Priority check */
- if(fh->prio != V4L2_PRIORITY_RECORD) {
- return -EBUSY;
- }
-
return cpia2_read(cam, buf, count, noblock);
}
@@ -349,15 +162,6 @@ static ssize_t cpia2_v4l_read(struct file *file, char __user *buf, size_t count,
static unsigned int cpia2_v4l_poll(struct file *filp, struct poll_table_struct *wait)
{
struct camera_data *cam = video_drvdata(filp);
- struct cpia2_fh *fh = filp->private_data;
-
- if(!cam)
- return POLLERR;
-
- /* Priority check */
- if(fh->prio != V4L2_PRIORITY_RECORD) {
- return POLLERR;
- }
return cpia2_poll(cam, filp, wait);
}
@@ -384,36 +188,13 @@ static int sync(struct camera_data *cam, int frame_nr)
mutex_lock(&cam->v4l2_lock);
if (signal_pending(current))
return -ERESTARTSYS;
- if(!cam->present)
+ if (!video_is_registered(&cam->vdev))
return -ENOTTY;
}
}
/******************************************************************************
*
- * ioctl_set_gpio
- *
- *****************************************************************************/
-
-static long cpia2_default(struct file *file, void *fh, bool valid_prio,
- int cmd, void *arg)
-{
- struct camera_data *cam = video_drvdata(file);
- __u32 gpio_val;
-
- if (cmd != CPIA2_CID_GPIO)
- return -EINVAL;
-
- gpio_val = *(__u32*) arg;
-
- if (gpio_val &~ 0xFFU)
- return -EINVAL;
-
- return cpia2_set_gpio(cam, (unsigned char)gpio_val);
-}
-
-/******************************************************************************
- *
* ioctl_querycap
*
* V4L2 device capabilities
@@ -465,9 +246,11 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
if (usb_make_path(cam->dev, vc->bus_info, sizeof(vc->bus_info)) <0)
memset(vc->bus_info,0, sizeof(vc->bus_info));
- vc->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ vc->device_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
+ vc->capabilities = vc->device_caps |
+ V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -610,22 +393,12 @@ static int cpia2_s_fmt_vid_cap(struct file *file, void *_fh,
struct v4l2_format *f)
{
struct camera_data *cam = video_drvdata(file);
- struct cpia2_fh *fh = _fh;
int err, frame;
- err = v4l2_prio_check(&cam->prio, fh->prio);
- if (err)
- return err;
err = cpia2_try_fmt_vid_cap(file, _fh, f);
if(err != 0)
return err;
- /* Ensure that only this process can change the format. */
- err = v4l2_prio_change(&cam->prio, &fh->prio, V4L2_PRIORITY_RECORD);
- if(err != 0) {
- return err;
- }
-
cam->pixelformat = f->fmt.pix.pixelformat;
/* NOTE: This should be set to 1 for MJPEG, but some apps don't handle
@@ -713,240 +486,126 @@ static int cpia2_cropcap(struct file *file, void *fh, struct v4l2_cropcap *c)
return 0;
}
-/******************************************************************************
- *
- * ioctl_queryctrl
- *
- * V4L2 query possible control variables
- *
- *****************************************************************************/
+struct framerate_info {
+ int value;
+ struct v4l2_fract period;
+};
-static int cpia2_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *c)
+static const struct framerate_info framerate_controls[] = {
+ { CPIA2_VP_FRAMERATE_6_25, { 4, 25 } },
+ { CPIA2_VP_FRAMERATE_7_5, { 2, 15 } },
+ { CPIA2_VP_FRAMERATE_12_5, { 2, 25 } },
+ { CPIA2_VP_FRAMERATE_15, { 1, 15 } },
+ { CPIA2_VP_FRAMERATE_25, { 1, 25 } },
+ { CPIA2_VP_FRAMERATE_30, { 1, 30 } },
+};
+
+static int cpia2_g_parm(struct file *file, void *fh, struct v4l2_streamparm *p)
{
struct camera_data *cam = video_drvdata(file);
+ struct v4l2_captureparm *cap = &p->parm.capture;
int i;
- for(i=0; i<NUM_CONTROLS; ++i) {
- if(c->id == controls[i].id) {
- memcpy(c, controls+i, sizeof(*c));
- break;
- }
- }
-
- if(i == NUM_CONTROLS)
+ if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- /* Some devices have additional limitations */
- switch(c->id) {
- case V4L2_CID_BRIGHTNESS:
- /***
- * Don't let the register be set to zero - bug in VP4
- * flash of full brightness
- ***/
- if (cam->params.pnp_id.device_type == DEVICE_STV_672)
- c->minimum = 1;
- break;
- case V4L2_CID_VFLIP:
- // VP5 Only
- if(cam->params.pnp_id.device_type == DEVICE_STV_672)
- c->flags |= V4L2_CTRL_FLAG_DISABLED;
- break;
- case CPIA2_CID_FRAMERATE:
- if(cam->params.pnp_id.device_type == DEVICE_STV_672 &&
- cam->params.version.sensor_flags==CPIA2_VP_SENSOR_FLAGS_500){
- // Maximum 15fps
- for(i=0; i<c->maximum; ++i) {
- if(framerate_controls[i].value ==
- CPIA2_VP_FRAMERATE_15) {
- c->maximum = i;
- c->default_value = i;
- }
- }
+ cap->capability = V4L2_CAP_TIMEPERFRAME;
+ cap->readbuffers = cam->num_frames;
+ for (i = 0; i < ARRAY_SIZE(framerate_controls); i++)
+ if (cam->params.vp_params.frame_rate == framerate_controls[i].value) {
+ cap->timeperframe = framerate_controls[i].period;
+ break;
}
- break;
- case CPIA2_CID_FLICKER_MODE:
- // Flicker control only valid for 672.
- if(cam->params.pnp_id.device_type != DEVICE_STV_672)
- c->flags |= V4L2_CTRL_FLAG_DISABLED;
- break;
- case CPIA2_CID_LIGHTS:
- // Light control only valid for the QX5 Microscope.
- if(cam->params.pnp_id.product != 0x151)
- c->flags |= V4L2_CTRL_FLAG_DISABLED;
- break;
- default:
- break;
- }
-
return 0;
}
-/******************************************************************************
- *
- * ioctl_querymenu
- *
- * V4L2 query possible control variables
- *
- *****************************************************************************/
-
-static int cpia2_querymenu(struct file *file, void *fh, struct v4l2_querymenu *m)
+static int cpia2_s_parm(struct file *file, void *fh, struct v4l2_streamparm *p)
{
struct camera_data *cam = video_drvdata(file);
+ struct v4l2_captureparm *cap = &p->parm.capture;
+ struct v4l2_fract tpf = cap->timeperframe;
+ int max = ARRAY_SIZE(framerate_controls) - 1;
+ int ret;
+ int i;
- switch(m->id) {
- case CPIA2_CID_FLICKER_MODE:
- if (m->index >= NUM_FLICKER_CONTROLS)
- return -EINVAL;
+ ret = cpia2_g_parm(file, fh, p);
+ if (ret || !tpf.denominator || !tpf.numerator)
+ return ret;
+
+ /* Maximum 15 fps for this model */
+ if (cam->params.pnp_id.device_type == DEVICE_STV_672 &&
+ cam->params.version.sensor_flags == CPIA2_VP_SENSOR_FLAGS_500)
+ max -= 2;
+ for (i = 0; i <= max; i++) {
+ struct v4l2_fract f1 = tpf;
+ struct v4l2_fract f2 = framerate_controls[i].period;
+
+ f1.numerator *= f2.denominator;
+ f2.numerator *= f1.denominator;
+ if (f1.numerator >= f2.numerator)
+ break;
+ }
+ if (i > max)
+ i = max;
+ cap->timeperframe = framerate_controls[i].period;
+ return cpia2_set_fps(cam, framerate_controls[i].value);
+}
- strcpy(m->name, flicker_controls[m->index].name);
- break;
- case CPIA2_CID_FRAMERATE:
- {
- int maximum = NUM_FRAMERATE_CONTROLS - 1;
- if(cam->params.pnp_id.device_type == DEVICE_STV_672 &&
- cam->params.version.sensor_flags==CPIA2_VP_SENSOR_FLAGS_500){
- // Maximum 15fps
- int i;
- for(i=0; i<maximum; ++i) {
- if(framerate_controls[i].value ==
- CPIA2_VP_FRAMERATE_15)
- maximum = i;
- }
- }
- if (m->index > maximum)
- return -EINVAL;
+static const struct {
+ u32 width;
+ u32 height;
+} cpia2_framesizes[] = {
+ { 640, 480 },
+ { 352, 288 },
+ { 320, 240 },
+ { 288, 216 },
+ { 256, 192 },
+ { 224, 168 },
+ { 192, 144 },
+ { 176, 144 },
+};
- strcpy(m->name, framerate_controls[m->index].name);
- break;
- }
- case CPIA2_CID_LIGHTS:
- if (m->index >= NUM_LIGHTS_CONTROLS)
- return -EINVAL;
+static int cpia2_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
- strcpy(m->name, lights_controls[m->index].name);
- break;
- default:
+ if (fsize->pixel_format != V4L2_PIX_FMT_MJPEG &&
+ fsize->pixel_format != V4L2_PIX_FMT_JPEG)
return -EINVAL;
- }
+ if (fsize->index >= ARRAY_SIZE(cpia2_framesizes))
+ return -EINVAL;
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = cpia2_framesizes[fsize->index].width;
+ fsize->discrete.height = cpia2_framesizes[fsize->index].height;
return 0;
}
-/******************************************************************************
- *
- * ioctl_g_ctrl
- *
- * V4L2 get the value of a control variable
- *
- *****************************************************************************/
-
-static int cpia2_g_ctrl(struct file *file, void *fh, struct v4l2_control *c)
+static int cpia2_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
{
struct camera_data *cam = video_drvdata(file);
+ int max = ARRAY_SIZE(framerate_controls) - 1;
+ int i;
- switch(c->id) {
- case V4L2_CID_BRIGHTNESS:
- cpia2_do_command(cam, CPIA2_CMD_GET_VP_BRIGHTNESS,
- TRANSFER_READ, 0);
- c->value = cam->params.color_params.brightness;
- break;
- case V4L2_CID_CONTRAST:
- cpia2_do_command(cam, CPIA2_CMD_GET_CONTRAST,
- TRANSFER_READ, 0);
- c->value = cam->params.color_params.contrast;
- break;
- case V4L2_CID_SATURATION:
- cpia2_do_command(cam, CPIA2_CMD_GET_VP_SATURATION,
- TRANSFER_READ, 0);
- c->value = cam->params.color_params.saturation;
- break;
- case V4L2_CID_HFLIP:
- cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS,
- TRANSFER_READ, 0);
- c->value = (cam->params.vp_params.user_effects &
- CPIA2_VP_USER_EFFECTS_MIRROR) != 0;
- break;
- case V4L2_CID_VFLIP:
- cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS,
- TRANSFER_READ, 0);
- c->value = (cam->params.vp_params.user_effects &
- CPIA2_VP_USER_EFFECTS_FLIP) != 0;
- break;
- case CPIA2_CID_TARGET_KB:
- c->value = cam->params.vc_params.target_kb;
- break;
- case CPIA2_CID_GPIO:
- cpia2_do_command(cam, CPIA2_CMD_GET_VP_GPIO_DATA,
- TRANSFER_READ, 0);
- c->value = cam->params.vp_params.gpio_data;
- break;
- case CPIA2_CID_FLICKER_MODE:
- {
- int i, mode;
- cpia2_do_command(cam, CPIA2_CMD_GET_FLICKER_MODES,
- TRANSFER_READ, 0);
- if(cam->params.flicker_control.cam_register &
- CPIA2_VP_FLICKER_MODES_NEVER_FLICKER) {
- mode = NEVER_FLICKER;
- } else {
- if(cam->params.flicker_control.cam_register &
- CPIA2_VP_FLICKER_MODES_50HZ) {
- mode = FLICKER_50;
- } else {
- mode = FLICKER_60;
- }
- }
- for(i=0; i<NUM_FLICKER_CONTROLS; i++) {
- if(flicker_controls[i].value == mode) {
- c->value = i;
- break;
- }
- }
- if(i == NUM_FLICKER_CONTROLS)
- return -EINVAL;
- break;
- }
- case CPIA2_CID_FRAMERATE:
- {
- int maximum = NUM_FRAMERATE_CONTROLS - 1;
- int i;
- for(i=0; i<= maximum; i++) {
- if(cam->params.vp_params.frame_rate ==
- framerate_controls[i].value)
- break;
- }
- if(i > maximum)
- return -EINVAL;
- c->value = i;
- break;
- }
- case CPIA2_CID_USB_ALT:
- c->value = cam->params.camera_state.stream_mode;
- break;
- case CPIA2_CID_LIGHTS:
- {
- int i;
- cpia2_do_command(cam, CPIA2_CMD_GET_VP_GPIO_DATA,
- TRANSFER_READ, 0);
- for(i=0; i<NUM_LIGHTS_CONTROLS; i++) {
- if((cam->params.vp_params.gpio_data&GPIO_LIGHTS_MASK) ==
- lights_controls[i].value) {
- break;
- }
- }
- if(i == NUM_LIGHTS_CONTROLS)
- return -EINVAL;
- c->value = i;
- break;
- }
- case CPIA2_CID_RESET_CAMERA:
+ if (fival->pixel_format != V4L2_PIX_FMT_MJPEG &&
+ fival->pixel_format != V4L2_PIX_FMT_JPEG)
return -EINVAL;
- default:
- return -EINVAL;
- }
-
- DBG("Get control id:%d, value:%d\n", c->id, c->value);
+ /* Maximum 15 fps for this model */
+ if (cam->params.pnp_id.device_type == DEVICE_STV_672 &&
+ cam->params.version.sensor_flags == CPIA2_VP_SENSOR_FLAGS_500)
+ max -= 2;
+ if (fival->index > max)
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(cpia2_framesizes); i++)
+ if (fival->width == cpia2_framesizes[i].width &&
+ fival->height == cpia2_framesizes[i].height)
+ break;
+ if (i == ARRAY_SIZE(cpia2_framesizes))
+ return -EINVAL;
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = framerate_controls[fival->index].period;
return 0;
}
@@ -958,72 +617,54 @@ static int cpia2_g_ctrl(struct file *file, void *fh, struct v4l2_control *c)
*
*****************************************************************************/
-static int cpia2_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
+static int cpia2_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct camera_data *cam = video_drvdata(file);
- int i;
- int retval = 0;
+ struct camera_data *cam =
+ container_of(ctrl->handler, struct camera_data, hdl);
+ static const int flicker_table[] = {
+ NEVER_FLICKER,
+ FLICKER_50,
+ FLICKER_60,
+ };
- DBG("Set control id:%d, value:%d\n", c->id, c->value);
-
- /* Check that the value is in range */
- for(i=0; i<NUM_CONTROLS; i++) {
- if(c->id == controls[i].id) {
- if(c->value < controls[i].minimum ||
- c->value > controls[i].maximum) {
- return -EINVAL;
- }
- break;
- }
- }
- if(i == NUM_CONTROLS)
- return -EINVAL;
+ DBG("Set control id:%d, value:%d\n", ctrl->id, ctrl->val);
- switch(c->id) {
+ switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- cpia2_set_brightness(cam, c->value);
+ cpia2_set_brightness(cam, ctrl->val);
break;
case V4L2_CID_CONTRAST:
- cpia2_set_contrast(cam, c->value);
+ cpia2_set_contrast(cam, ctrl->val);
break;
case V4L2_CID_SATURATION:
- cpia2_set_saturation(cam, c->value);
+ cpia2_set_saturation(cam, ctrl->val);
break;
case V4L2_CID_HFLIP:
- cpia2_set_property_mirror(cam, c->value);
+ cpia2_set_property_mirror(cam, ctrl->val);
break;
case V4L2_CID_VFLIP:
- cpia2_set_property_flip(cam, c->value);
+ cpia2_set_property_flip(cam, ctrl->val);
break;
- case CPIA2_CID_TARGET_KB:
- retval = cpia2_set_target_kb(cam, c->value);
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ return cpia2_set_flicker_mode(cam, flicker_table[ctrl->val]);
+ case V4L2_CID_ILLUMINATORS_1:
+ return cpia2_set_gpio(cam, (cam->top_light->val << 6) |
+ (cam->bottom_light->val << 7));
+ case V4L2_CID_JPEG_ACTIVE_MARKER:
+ cam->params.compression.inhibit_htables =
+ !(ctrl->val & V4L2_JPEG_ACTIVE_MARKER_DHT);
break;
- case CPIA2_CID_GPIO:
- retval = cpia2_set_gpio(cam, c->value);
- break;
- case CPIA2_CID_FLICKER_MODE:
- retval = cpia2_set_flicker_mode(cam,
- flicker_controls[c->value].value);
- break;
- case CPIA2_CID_FRAMERATE:
- retval = cpia2_set_fps(cam, framerate_controls[c->value].value);
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ cam->params.vc_params.quality = ctrl->val;
break;
case CPIA2_CID_USB_ALT:
- retval = cpia2_usb_change_streaming_alternate(cam, c->value);
- break;
- case CPIA2_CID_LIGHTS:
- retval = cpia2_set_gpio(cam, lights_controls[c->value].value);
- break;
- case CPIA2_CID_RESET_CAMERA:
- cpia2_usb_stream_pause(cam);
- cpia2_reset_camera(cam);
- cpia2_usb_stream_resume(cam);
+ cam->params.camera_state.stream_mode = ctrl->val;
break;
default:
- retval = -EINVAL;
+ return -EINVAL;
}
- return retval;
+ return 0;
}
/******************************************************************************
@@ -1084,6 +725,8 @@ static int cpia2_s_jpegcomp(struct file *file, void *fh, struct v4l2_jpegcompres
cam->params.compression.inhibit_htables =
!(parms->jpeg_markers & V4L2_JPEG_MARKER_DHT);
+ parms->jpeg_markers &= V4L2_JPEG_MARKER_DQT | V4L2_JPEG_MARKER_DRI |
+ V4L2_JPEG_MARKER_DHT;
if(parms->APP_len != 0) {
if(parms->APP_len > 0 &&
@@ -1270,12 +913,12 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
struct framebuf *cb=cam->curbuff;
mutex_unlock(&cam->v4l2_lock);
wait_event_interruptible(cam->wq_stream,
- !cam->present ||
+ !video_is_registered(&cam->vdev) ||
(cb=cam->curbuff)->status == FRAME_READY);
mutex_lock(&cam->v4l2_lock);
if (signal_pending(current))
return -ERESTARTSYS;
- if(!cam->present)
+ if (!video_is_registered(&cam->vdev))
return -ENOTTY;
frame = cb->num;
}
@@ -1299,56 +942,39 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
return 0;
}
-static int cpia2_g_priority(struct file *file, void *_fh, enum v4l2_priority *p)
-{
- struct cpia2_fh *fh = _fh;
-
- *p = fh->prio;
- return 0;
-}
-
-static int cpia2_s_priority(struct file *file, void *_fh, enum v4l2_priority prio)
-{
- struct camera_data *cam = video_drvdata(file);
- struct cpia2_fh *fh = _fh;
-
- if (cam->streaming && prio != fh->prio &&
- fh->prio == V4L2_PRIORITY_RECORD)
- /* Can't drop record priority while streaming */
- return -EBUSY;
-
- if (prio == V4L2_PRIORITY_RECORD && prio != fh->prio &&
- v4l2_prio_max(&cam->prio) == V4L2_PRIORITY_RECORD)
- /* Only one program can record at a time */
- return -EBUSY;
- return v4l2_prio_change(&cam->prio, &fh->prio, prio);
-}
-
static int cpia2_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
{
struct camera_data *cam = video_drvdata(file);
+ int ret = -EINVAL;
DBG("VIDIOC_STREAMON, streaming=%d\n", cam->streaming);
if (!cam->mmapped || type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- if (!cam->streaming)
- return cpia2_usb_stream_start(cam,
+ if (!cam->streaming) {
+ ret = cpia2_usb_stream_start(cam,
cam->params.camera_state.stream_mode);
- return -EINVAL;
+ if (!ret)
+ v4l2_ctrl_grab(cam->usb_alt, true);
+ }
+ return ret;
}
static int cpia2_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
{
struct camera_data *cam = video_drvdata(file);
+ int ret = -EINVAL;
DBG("VIDIOC_STREAMOFF, streaming=%d\n", cam->streaming);
if (!cam->mmapped || type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- if (cam->streaming)
- return cpia2_usb_stream_stop(cam);
- return -EINVAL;
+ if (cam->streaming) {
+ ret = cpia2_usb_stream_stop(cam);
+ if (!ret)
+ v4l2_ctrl_grab(cam->usb_alt, false);
+ }
+ return ret;
}
/******************************************************************************
@@ -1361,16 +987,10 @@ static int cpia2_mmap(struct file *file, struct vm_area_struct *area)
struct camera_data *cam = video_drvdata(file);
int retval;
- /* Priority check */
- struct cpia2_fh *fh = file->private_data;
- if(fh->prio != V4L2_PRIORITY_RECORD) {
- return -EBUSY;
- }
-
retval = cpia2_remap_buffer(cam, area);
if(!retval)
- fh->mmapped = 1;
+ cam->stream_fh = file->private_data;
return retval;
}
@@ -1388,15 +1008,13 @@ static void reset_camera_struct_v4l(struct camera_data *cam)
cam->frame_size = buffer_size;
cam->num_frames = num_buffers;
- /* FlickerModes */
+ /* Flicker modes */
cam->params.flicker_control.flicker_mode_req = flicker_mode;
- cam->params.flicker_control.mains_frequency = flicker_freq;
- /* streamMode */
+ /* stream modes */
cam->params.camera_state.stream_mode = alternate;
cam->pixelformat = V4L2_PIX_FMT_JPEG;
- v4l2_prio_init(&cam->prio);
}
static const struct v4l2_ioctl_ops cpia2_ioctl_ops = {
@@ -1408,10 +1026,6 @@ static const struct v4l2_ioctl_ops cpia2_ioctl_ops = {
.vidioc_g_fmt_vid_cap = cpia2_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = cpia2_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = cpia2_try_fmt_vid_cap,
- .vidioc_queryctrl = cpia2_queryctrl,
- .vidioc_querymenu = cpia2_querymenu,
- .vidioc_g_ctrl = cpia2_g_ctrl,
- .vidioc_s_ctrl = cpia2_s_ctrl,
.vidioc_g_jpegcomp = cpia2_g_jpegcomp,
.vidioc_s_jpegcomp = cpia2_s_jpegcomp,
.vidioc_cropcap = cpia2_cropcap,
@@ -1421,9 +1035,12 @@ static const struct v4l2_ioctl_ops cpia2_ioctl_ops = {
.vidioc_dqbuf = cpia2_dqbuf,
.vidioc_streamon = cpia2_streamon,
.vidioc_streamoff = cpia2_streamoff,
- .vidioc_g_priority = cpia2_g_priority,
- .vidioc_s_priority = cpia2_s_priority,
- .vidioc_default = cpia2_default,
+ .vidioc_s_parm = cpia2_s_parm,
+ .vidioc_g_parm = cpia2_g_parm,
+ .vidioc_enum_framesizes = cpia2_enum_framesizes,
+ .vidioc_enum_frameintervals = cpia2_enum_frameintervals,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/***
@@ -1444,7 +1061,21 @@ static struct video_device cpia2_template = {
.name = "CPiA2 Camera",
.fops = &cpia2_fops,
.ioctl_ops = &cpia2_ioctl_ops,
- .release = video_device_release,
+ .release = video_device_release_empty,
+};
+
+void cpia2_camera_release(struct v4l2_device *v4l2_dev)
+{
+ struct camera_data *cam =
+ container_of(v4l2_dev, struct camera_data, v4l2_dev);
+
+ v4l2_ctrl_handler_free(&cam->hdl);
+ v4l2_device_unregister(&cam->v4l2_dev);
+ kfree(cam);
+}
+
+static const struct v4l2_ctrl_ops cpia2_ctrl_ops = {
+ .s_ctrl = cpia2_s_ctrl,
};
/******************************************************************************
@@ -1454,20 +1085,78 @@ static struct video_device cpia2_template = {
*****************************************************************************/
int cpia2_register_camera(struct camera_data *cam)
{
- cam->vdev = video_device_alloc();
- if(!cam->vdev)
- return -ENOMEM;
+ struct v4l2_ctrl_handler *hdl = &cam->hdl;
+ struct v4l2_ctrl_config cpia2_usb_alt = {
+ .ops = &cpia2_ctrl_ops,
+ .id = CPIA2_CID_USB_ALT,
+ .name = "USB Alternate",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = USBIF_ISO_1,
+ .max = USBIF_ISO_6,
+ .step = 1,
+ };
+ int ret;
+
+ v4l2_ctrl_handler_init(hdl, 12);
+ v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_BRIGHTNESS,
+ cam->params.pnp_id.device_type == DEVICE_STV_672 ? 1 : 0,
+ 255, 1, DEFAULT_BRIGHTNESS);
+ v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, DEFAULT_CONTRAST);
+ v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, DEFAULT_SATURATION);
+ v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_JPEG_ACTIVE_MARKER, 0,
+ V4L2_JPEG_ACTIVE_MARKER_DHT, 0,
+ V4L2_JPEG_ACTIVE_MARKER_DHT);
+ v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 1,
+ 100, 1, 100);
+ cpia2_usb_alt.def = alternate;
+ cam->usb_alt = v4l2_ctrl_new_custom(hdl, &cpia2_usb_alt, NULL);
+ /* VP5 Only */
+ if (cam->params.pnp_id.device_type != DEVICE_STV_672)
+ v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ /* Flicker control only valid for 672 */
+ if (cam->params.pnp_id.device_type == DEVICE_STV_672)
+ v4l2_ctrl_new_std_menu(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0, 0);
+ /* Light control only valid for the QX5 Microscope */
+ if (cam->params.pnp_id.product == 0x151) {
+ cam->top_light = v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_ILLUMINATORS_1, 0, 1, 1, 0);
+ cam->bottom_light = v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_ILLUMINATORS_2, 0, 1, 1, 0);
+ v4l2_ctrl_cluster(2, &cam->top_light);
+ }
- memcpy(cam->vdev, &cpia2_template, sizeof(cpia2_template));
- video_set_drvdata(cam->vdev, cam);
- cam->vdev->lock = &cam->v4l2_lock;
+ if (hdl->error) {
+ ret = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ return ret;
+ }
+
+ cam->vdev = cpia2_template;
+ video_set_drvdata(&cam->vdev, cam);
+ cam->vdev.lock = &cam->v4l2_lock;
+ cam->vdev.ctrl_handler = hdl;
+ cam->vdev.v4l2_dev = &cam->v4l2_dev;
+ set_bit(V4L2_FL_USE_FH_PRIO, &cam->vdev.flags);
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &cam->vdev.flags);
reset_camera_struct_v4l(cam);
/* register v4l device */
- if (video_register_device(cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
+ if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
ERR("video_register_device failed\n");
- video_device_release(cam->vdev);
return -ENODEV;
}
@@ -1481,13 +1170,7 @@ int cpia2_register_camera(struct camera_data *cam)
*****************************************************************************/
void cpia2_unregister_camera(struct camera_data *cam)
{
- if (!cam->open_count) {
- video_unregister_device(cam->vdev);
- } else {
- LOG("%s removed while open, deferring "
- "video_unregister_device\n",
- video_device_node_name(cam->vdev));
- }
+ video_unregister_device(&cam->vdev);
}
/******************************************************************************
@@ -1524,23 +1207,12 @@ static void __init check_parameters(void)
LOG("alternate specified is invalid, using %d\n", alternate);
}
- if (flicker_mode != NEVER_FLICKER && flicker_mode != ANTI_FLICKER_ON) {
- flicker_mode = NEVER_FLICKER;
+ if (flicker_mode != 0 && flicker_mode != FLICKER_50 && flicker_mode != FLICKER_60) {
+ flicker_mode = 0;
LOG("Flicker mode specified is invalid, using %d\n",
flicker_mode);
}
- if (flicker_freq != FLICKER_50 && flicker_freq != FLICKER_60) {
- flicker_freq = FLICKER_60;
- LOG("Flicker mode specified is invalid, using %d\n",
- flicker_freq);
- }
-
- if(video_nr < -1 || video_nr > 64) {
- video_nr = -1;
- LOG("invalid video_nr specified, must be -1 to 64\n");
- }
-
DBG("Using %d buffers, each %d bytes, alternate=%d\n",
num_buffers, buffer_size, alternate);
}
diff --git a/drivers/media/video/cpia2/cpia2dev.h b/drivers/media/video/cpia2/cpia2dev.h
deleted file mode 100644
index f66691fe5a35..000000000000
--- a/drivers/media/video/cpia2/cpia2dev.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/****************************************************************************
- *
- * Filename: cpia2dev.h
- *
- * Copyright 2001, STMicrolectronics, Inc.
- *
- * Contact: steve.miller@st.com
- *
- * Description:
- * This file provides definitions for applications wanting to use the
- * cpia2 driver beyond the generic v4l capabilities.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************************/
-
-#ifndef CPIA2_DEV_HEADER
-#define CPIA2_DEV_HEADER
-
-#include <linux/videodev2.h>
-
-/***
- * The following defines are ioctl numbers based on video4linux private ioctls,
- * which can range from 192 (BASE_VIDIOCPRIVATE) to 255. All of these take int
- * args
- */
-#define CPIA2_IOC_SET_GPIO _IOW('v', BASE_VIDIOC_PRIVATE + 17, __u32)
-
-/* V4L2 driver specific controls */
-#define CPIA2_CID_TARGET_KB (V4L2_CID_PRIVATE_BASE+0)
-#define CPIA2_CID_GPIO (V4L2_CID_PRIVATE_BASE+1)
-#define CPIA2_CID_FLICKER_MODE (V4L2_CID_PRIVATE_BASE+2)
-#define CPIA2_CID_FRAMERATE (V4L2_CID_PRIVATE_BASE+3)
-#define CPIA2_CID_USB_ALT (V4L2_CID_PRIVATE_BASE+4)
-#define CPIA2_CID_LIGHTS (V4L2_CID_PRIVATE_BASE+5)
-#define CPIA2_CID_RESET_CAMERA (V4L2_CID_PRIVATE_BASE+6)
-
-#endif
diff --git a/drivers/media/video/cx18/cx18-alsa-main.c b/drivers/media/video/cx18/cx18-alsa-main.c
index e118361c2e7b..6d2a98246b6d 100644
--- a/drivers/media/video/cx18/cx18-alsa-main.c
+++ b/drivers/media/video/cx18/cx18-alsa-main.c
@@ -285,6 +285,7 @@ static void __exit cx18_alsa_exit(void)
drv = driver_find("cx18", &pci_bus_type);
ret = driver_for_each_device(drv, NULL, NULL, cx18_alsa_exit_callback);
+ (void)ret; /* suppress compiler warning */
cx18_ext_init = NULL;
printk(KERN_INFO "cx18-alsa: module unload complete\n");
diff --git a/drivers/media/video/cx18/cx18-alsa-pcm.c b/drivers/media/video/cx18/cx18-alsa-pcm.c
index 82d195be9197..7a5b84a86bb3 100644
--- a/drivers/media/video/cx18/cx18-alsa-pcm.c
+++ b/drivers/media/video/cx18/cx18-alsa-pcm.c
@@ -190,7 +190,7 @@ static int snd_cx18_pcm_capture_open(struct snd_pcm_substream *substream)
ret = cx18_start_v4l2_encode_stream(s);
snd_cx18_unlock(cxsc);
- return 0;
+ return ret;
}
static int snd_cx18_pcm_capture_close(struct snd_pcm_substream *substream)
@@ -199,12 +199,11 @@ static int snd_cx18_pcm_capture_close(struct snd_pcm_substream *substream)
struct v4l2_device *v4l2_dev = cxsc->v4l2_dev;
struct cx18 *cx = to_cx18(v4l2_dev);
struct cx18_stream *s;
- int ret;
/* Instruct the cx18 to stop sending packets */
snd_cx18_lock(cxsc);
s = &cx->streams[CX18_ENC_STREAM_TYPE_PCM];
- ret = cx18_stop_v4l2_encode_stream(s, 0);
+ cx18_stop_v4l2_encode_stream(s, 0);
clear_bit(CX18_F_S_STREAMING, &s->s_flags);
cx18_release_stream(s);
@@ -252,13 +251,10 @@ static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
static int snd_cx18_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
- int ret;
-
dprintk("%s called\n", __func__);
- ret = snd_pcm_alloc_vmalloc_buffer(substream,
+ return snd_pcm_alloc_vmalloc_buffer(substream,
params_buffer_bytes(params));
- return 0;
}
static int snd_cx18_pcm_hw_free(struct snd_pcm_substream *substream)
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index be49f68ddf37..35fde4e931f5 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -1137,7 +1137,7 @@ static long cx18_default(struct file *file, void *fh, bool valid_prio,
}
default:
- return -EINVAL;
+ return -ENOTTY;
}
return 0;
}
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c
index 0c7796e76ac0..ed8118390b02 100644
--- a/drivers/media/video/cx18/cx18-mailbox.c
+++ b/drivers/media/video/cx18/cx18-mailbox.c
@@ -595,9 +595,8 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
{
const struct cx18_api_info *info = find_api_info(cmd);
- u32 state, irq, req, ack, err;
+ u32 irq, req, ack, err;
struct cx18_mailbox __iomem *mb;
- u32 __iomem *xpu_state;
wait_queue_head_t *waitq;
struct mutex *mb_lock;
unsigned long int t0, timeout, ret;
@@ -628,14 +627,12 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
mb_lock = &cx->epu2apu_mb_lock;
irq = IRQ_EPU_TO_APU;
mb = &cx->scb->epu2apu_mb;
- xpu_state = &cx->scb->apu_state;
break;
case CPU:
waitq = &cx->mb_cpu_waitq;
mb_lock = &cx->epu2cpu_mb_lock;
irq = IRQ_EPU_TO_CPU;
mb = &cx->scb->epu2cpu_mb;
- xpu_state = &cx->scb->cpu_state;
break;
default:
CX18_WARN("Unknown RPU (%d) for API call\n", info->rpu);
@@ -653,7 +650,6 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
* by a signal, we may get here and find a busy mailbox. After waiting,
* mark it "not busy" from our end, if the XPU hasn't ack'ed it still.
*/
- state = cx18_readl(cx, xpu_state);
req = cx18_readl(cx, &mb->request);
timeout = msecs_to_jiffies(10);
ret = wait_event_timeout(*waitq,
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index 638cca156b58..4185bcb80ca3 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -980,7 +980,6 @@ void cx18_stop_all_captures(struct cx18 *cx)
int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end)
{
struct cx18 *cx = s->cx;
- unsigned long then;
if (!cx18_stream_enabled(s))
return -EINVAL;
@@ -999,8 +998,6 @@ int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end)
else
cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 1, s->handle);
- then = jiffies;
-
if (s->type == CX18_ENC_STREAM_TYPE_MPG && gop_end) {
CX18_INFO("ignoring gop_end: not (yet?) supported by the firmware\n");
}
diff --git a/drivers/media/video/cx231xx/cx231xx-417.c b/drivers/media/video/cx231xx/cx231xx-417.c
index d4327dab5a36..ce2f62238a19 100644
--- a/drivers/media/video/cx231xx/cx231xx-417.c
+++ b/drivers/media/video/cx231xx/cx231xx-417.c
@@ -1095,7 +1095,7 @@ static int cx231xx_initialize_codec(struct cx231xx *dev)
{
int version;
int retval;
- u32 i, data[7];
+ u32 i;
u32 val = 0;
dprintk(1, "%s()\n", __func__);
@@ -1154,6 +1154,11 @@ static int cx231xx_initialize_codec(struct cx231xx *dev)
CX231xx_CUSTOM_EXTENSION_USR_DATA, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0);
*/
+
+#if 0
+ /* TODO */
+ u32 data[7];
+
/* Setup to capture VBI */
data[0] = 0x0001BD00;
data[1] = 1; /* frames per interrupt */
@@ -1162,7 +1167,7 @@ static int cx231xx_initialize_codec(struct cx231xx *dev)
data[4] = 0x206080C0; /* stop codes */
data[5] = 6; /* lines */
data[6] = 64; /* BPL */
-/*
+
cx231xx_api_cmd(dev, CX2341X_ENC_SET_VBI_CONFIG, 7, 0, data[0], data[1],
data[2], data[3], data[4], data[5], data[6]);
@@ -1175,7 +1180,7 @@ static int cx231xx_initialize_codec(struct cx231xx *dev)
cx231xx_api_cmd(dev, CX2341X_ENC_SET_VBI_LINE, 5, 0,
i | 0x80000000, valid, 0, 0, 0);
}
-*/
+#endif
/* cx231xx_api_cmd(dev, CX2341X_ENC_MUTE_AUDIO, 1, 0, CX231xx_UNMUTE);
msleep(60);
*/
@@ -1792,17 +1797,16 @@ static int vidioc_streamon(struct file *file, void *priv,
struct cx231xx_fh *fh = file->private_data;
struct cx231xx *dev = fh->dev;
- int rc = 0;
dprintk(3, "enter vidioc_streamon()\n");
cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
- rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+ cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
if (dev->USE_ISO)
- rc = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
+ cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
CX231XX_NUM_BUFS,
dev->video_mode.max_pkt_size,
cx231xx_isoc_copy);
else {
- rc = cx231xx_init_bulk(dev, 320,
+ cx231xx_init_bulk(dev, 320,
5,
dev->ts1_mode.max_pkt_size,
cx231xx_bulk_copy);
diff --git a/drivers/media/video/cx231xx/cx231xx-audio.c b/drivers/media/video/cx231xx/cx231xx-audio.c
index a2c2b7d343ec..068f78dc5d13 100644
--- a/drivers/media/video/cx231xx/cx231xx-audio.c
+++ b/drivers/media/video/cx231xx/cx231xx-audio.c
@@ -523,21 +523,24 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
static int snd_cx231xx_hw_capture_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- unsigned int channels, rate, format;
int ret;
dprintk("Setting capture parameters\n");
ret = snd_pcm_alloc_vmalloc_buffer(substream,
params_buffer_bytes(hw_params));
+#if 0
+ /* TODO: set up cx231xx audio chip to deliver the correct audio format,
+ current default is 48000hz multiplexed => 96000hz mono
+ which shouldn't matter since analogue TV only supports mono */
+ unsigned int channels, rate, format;
+
format = params_format(hw_params);
rate = params_rate(hw_params);
channels = params_channels(hw_params);
+#endif
- /* TODO: set up cx231xx audio chip to deliver the correct audio format,
- current default is 48000hz multiplexed => 96000hz mono
- which shouldn't matter since analogue TV only supports mono */
- return 0;
+ return ret;
}
static int snd_cx231xx_hw_capture_free(struct snd_pcm_substream *substream)
@@ -586,7 +589,7 @@ static int snd_cx231xx_capture_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct cx231xx *dev = snd_pcm_substream_chip(substream);
- int retval;
+ int retval = 0;
if (dev->state & DEV_DISCONNECTED)
return -ENODEV;
@@ -601,12 +604,13 @@ static int snd_cx231xx_capture_trigger(struct snd_pcm_substream *substream,
break;
default:
retval = -EINVAL;
+ break;
}
spin_unlock(&dev->adev.slock);
schedule_work(&dev->wq_trigger);
- return 0;
+ return retval;
}
static snd_pcm_uframes_t snd_cx231xx_capture_pointer(struct snd_pcm_substream
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index 53ff26e7abf7..b085a3c6dc04 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -934,33 +934,29 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
void cx231xx_enable656(struct cx231xx *dev)
{
u8 temp = 0;
- int status;
/*enable TS1 data[0:7] as output to export 656*/
- status = vid_blk_write_byte(dev, TS1_PIN_CTL0, 0xFF);
+ vid_blk_write_byte(dev, TS1_PIN_CTL0, 0xFF);
/*enable TS1 clock as output to export 656*/
- status = vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp);
+ vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp);
temp = temp|0x04;
- status = vid_blk_write_byte(dev, TS1_PIN_CTL1, temp);
-
+ vid_blk_write_byte(dev, TS1_PIN_CTL1, temp);
}
EXPORT_SYMBOL_GPL(cx231xx_enable656);
void cx231xx_disable656(struct cx231xx *dev)
{
u8 temp = 0;
- int status;
-
- status = vid_blk_write_byte(dev, TS1_PIN_CTL0, 0x00);
+ vid_blk_write_byte(dev, TS1_PIN_CTL0, 0x00);
- status = vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp);
+ vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp);
temp = temp&0xFB;
- status = vid_blk_write_byte(dev, TS1_PIN_CTL1, temp);
+ vid_blk_write_byte(dev, TS1_PIN_CTL1, temp);
}
EXPORT_SYMBOL_GPL(cx231xx_disable656);
@@ -1320,117 +1316,115 @@ void update_HH_register_after_set_DIF(struct cx231xx *dev)
void cx231xx_dump_HH_reg(struct cx231xx *dev)
{
- u8 status = 0;
u32 value = 0;
u16 i = 0;
value = 0x45005390;
- status = vid_blk_write_word(dev, 0x104, value);
+ vid_blk_write_word(dev, 0x104, value);
for (i = 0x100; i < 0x140; i++) {
- status = vid_blk_read_word(dev, i, &value);
+ vid_blk_read_word(dev, i, &value);
cx231xx_info("reg0x%x=0x%x\n", i, value);
i = i+3;
}
for (i = 0x300; i < 0x400; i++) {
- status = vid_blk_read_word(dev, i, &value);
+ vid_blk_read_word(dev, i, &value);
cx231xx_info("reg0x%x=0x%x\n", i, value);
i = i+3;
}
for (i = 0x400; i < 0x440; i++) {
- status = vid_blk_read_word(dev, i, &value);
+ vid_blk_read_word(dev, i, &value);
cx231xx_info("reg0x%x=0x%x\n", i, value);
i = i+3;
}
- status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
+ vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
cx231xx_info("AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value);
vid_blk_write_word(dev, AFE_CTRL_C2HH_SRC_CTRL, 0x4485D390);
- status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
+ vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
cx231xx_info("AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value);
}
void cx231xx_dump_SC_reg(struct cx231xx *dev)
{
u8 value[4] = { 0, 0, 0, 0 };
- int status = 0;
cx231xx_info("cx231xx_dump_SC_reg!\n");
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", BOARD_CFG_STAT, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS_MODE_REG,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS_MODE_REG,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS_MODE_REG, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_CFG_REG,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_CFG_REG,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_CFG_REG, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_LENGTH_REG,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_LENGTH_REG,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_LENGTH_REG, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_CFG_REG,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_CFG_REG,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_CFG_REG, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_LENGTH_REG,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_LENGTH_REG,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_LENGTH_REG, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", EP_MODE_SET, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN1,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN1,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN1, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN2,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN2,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN2, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN3,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN3,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN3, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK0,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK0,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK0, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK1,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK1,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK1, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK2,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK2,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK2, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_GAIN,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_GAIN,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_GAIN, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_CAR_REG,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_CAR_REG,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_CAR_REG, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG1,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG1,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG1, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG2,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG2,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG2, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0],
value[1], value[2], value[3]);
@@ -1441,18 +1435,15 @@ void cx231xx_dump_SC_reg(struct cx231xx *dev)
void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev)
{
- u8 status = 0;
u8 value = 0;
-
-
- status = afe_read_byte(dev, ADC_STATUS2_CH3, &value);
+ afe_read_byte(dev, ADC_STATUS2_CH3, &value);
value = (value & 0xFE)|0x01;
- status = afe_write_byte(dev, ADC_STATUS2_CH3, value);
+ afe_write_byte(dev, ADC_STATUS2_CH3, value);
- status = afe_read_byte(dev, ADC_STATUS2_CH3, &value);
+ afe_read_byte(dev, ADC_STATUS2_CH3, &value);
value = (value & 0xFE)|0x00;
- status = afe_write_byte(dev, ADC_STATUS2_CH3, value);
+ afe_write_byte(dev, ADC_STATUS2_CH3, value);
/*
@@ -1464,44 +1455,43 @@ void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev)
for low-if agc defect
*/
- status = afe_read_byte(dev, ADC_NTF_PRECLMP_EN_CH3, &value);
+ afe_read_byte(dev, ADC_NTF_PRECLMP_EN_CH3, &value);
value = (value & 0xFC)|0x00;
- status = afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH3, value);
+ afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH3, value);
- status = afe_read_byte(dev, ADC_INPUT_CH3, &value);
+ afe_read_byte(dev, ADC_INPUT_CH3, &value);
value = (value & 0xF9)|0x02;
- status = afe_write_byte(dev, ADC_INPUT_CH3, value);
+ afe_write_byte(dev, ADC_INPUT_CH3, value);
- status = afe_read_byte(dev, ADC_FB_FRCRST_CH3, &value);
+ afe_read_byte(dev, ADC_FB_FRCRST_CH3, &value);
value = (value & 0xFB)|0x04;
- status = afe_write_byte(dev, ADC_FB_FRCRST_CH3, value);
+ afe_write_byte(dev, ADC_FB_FRCRST_CH3, value);
- status = afe_read_byte(dev, ADC_DCSERVO_DEM_CH3, &value);
+ afe_read_byte(dev, ADC_DCSERVO_DEM_CH3, &value);
value = (value & 0xFC)|0x03;
- status = afe_write_byte(dev, ADC_DCSERVO_DEM_CH3, value);
+ afe_write_byte(dev, ADC_DCSERVO_DEM_CH3, value);
- status = afe_read_byte(dev, ADC_CTRL_DAC1_CH3, &value);
+ afe_read_byte(dev, ADC_CTRL_DAC1_CH3, &value);
value = (value & 0xFB)|0x04;
- status = afe_write_byte(dev, ADC_CTRL_DAC1_CH3, value);
+ afe_write_byte(dev, ADC_CTRL_DAC1_CH3, value);
- status = afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value);
+ afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value);
value = (value & 0xF8)|0x06;
- status = afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value);
+ afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value);
- status = afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value);
+ afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value);
value = (value & 0x8F)|0x40;
- status = afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value);
+ afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value);
- status = afe_read_byte(dev, ADC_PWRDN_CLAMP_CH3, &value);
+ afe_read_byte(dev, ADC_PWRDN_CLAMP_CH3, &value);
value = (value & 0xDF)|0x20;
- status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, value);
+ afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, value);
}
void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq,
u8 spectral_invert, u32 mode)
{
u32 colibri_carrier_offset = 0;
- u8 status = 0;
u32 func_mode = 0x01; /* Device has a DIF if this function is called */
u32 standard = 0;
u8 value[4] = { 0, 0, 0, 0 };
@@ -1511,15 +1501,15 @@ void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq,
value[1] = (u8) 0x6F;
value[2] = (u8) 0x6F;
value[3] = (u8) 0x6F;
- status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
PWR_CTL_EN, value, 4);
/*Set colibri for low IF*/
- status = cx231xx_afe_set_mode(dev, AFE_MODE_LOW_IF);
+ cx231xx_afe_set_mode(dev, AFE_MODE_LOW_IF);
/* Set C2HH for low IF operation.*/
standard = dev->norm;
- status = cx231xx_dif_configure_C2HH_for_low_IF(dev, dev->active_mode,
+ cx231xx_dif_configure_C2HH_for_low_IF(dev, dev->active_mode,
func_mode, standard);
/* Get colibri offsets.*/
@@ -1556,7 +1546,6 @@ void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
u8 spectral_invert, u32 mode)
{
unsigned long pll_freq_word;
- int status = 0;
u32 dif_misc_ctrl_value = 0;
u64 pll_freq_u64 = 0;
u32 i = 0;
@@ -1567,7 +1556,7 @@ void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
if (mode == TUNER_MODE_FM_RADIO) {
pll_freq_word = 0x905A1CAC;
- status = vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word);
+ vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word);
} else /*KSPROPERTY_TUNER_MODE_TV*/{
/* Calculate the PLL frequency word based on the adjusted if_freq*/
@@ -1576,23 +1565,23 @@ void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
do_div(pll_freq_u64, 50000000);
pll_freq_word = (u32)pll_freq_u64;
/*pll_freq_word = 0x3463497;*/
- status = vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word);
+ vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word);
if (spectral_invert) {
if_freq -= 400000;
/* Enable Spectral Invert*/
- status = vid_blk_read_word(dev, DIF_MISC_CTRL,
+ vid_blk_read_word(dev, DIF_MISC_CTRL,
&dif_misc_ctrl_value);
dif_misc_ctrl_value = dif_misc_ctrl_value | 0x00200000;
- status = vid_blk_write_word(dev, DIF_MISC_CTRL,
+ vid_blk_write_word(dev, DIF_MISC_CTRL,
dif_misc_ctrl_value);
} else {
if_freq += 400000;
/* Disable Spectral Invert*/
- status = vid_blk_read_word(dev, DIF_MISC_CTRL,
+ vid_blk_read_word(dev, DIF_MISC_CTRL,
&dif_misc_ctrl_value);
dif_misc_ctrl_value = dif_misc_ctrl_value & 0xFFDFFFFF;
- status = vid_blk_write_word(dev, DIF_MISC_CTRL,
+ vid_blk_write_word(dev, DIF_MISC_CTRL,
dif_misc_ctrl_value);
}
@@ -1606,10 +1595,10 @@ void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
}
cx231xx_info("Enter IF=%zd\n",
- sizeof(Dif_set_array)/sizeof(struct dif_settings));
- for (i = 0; i < sizeof(Dif_set_array)/sizeof(struct dif_settings); i++) {
+ ARRAY_SIZE(Dif_set_array));
+ for (i = 0; i < ARRAY_SIZE(Dif_set_array); i++) {
if (Dif_set_array[i].if_freq == if_freq) {
- status = vid_blk_write_word(dev,
+ vid_blk_write_word(dev,
Dif_set_array[i].register_address, Dif_set_array[i].value);
}
}
@@ -3090,31 +3079,30 @@ int cx231xx_gpio_i2c_read(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len)
*/
int cx231xx_gpio_i2c_write(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len)
{
- int status = 0;
int i = 0;
/* get the lock */
mutex_lock(&dev->gpio_i2c_lock);
/* start */
- status = cx231xx_gpio_i2c_start(dev);
+ cx231xx_gpio_i2c_start(dev);
/* write dev_addr */
- status = cx231xx_gpio_i2c_write_byte(dev, dev_addr << 1);
+ cx231xx_gpio_i2c_write_byte(dev, dev_addr << 1);
/* read Ack */
- status = cx231xx_gpio_i2c_read_ack(dev);
+ cx231xx_gpio_i2c_read_ack(dev);
for (i = 0; i < len; i++) {
/* Write data */
- status = cx231xx_gpio_i2c_write_byte(dev, buf[i]);
+ cx231xx_gpio_i2c_write_byte(dev, buf[i]);
/* read Ack */
- status = cx231xx_gpio_i2c_read_ack(dev);
+ cx231xx_gpio_i2c_read_ack(dev);
}
/* write End */
- status = cx231xx_gpio_i2c_end(dev);
+ cx231xx_gpio_i2c_end(dev);
/* release the lock */
mutex_unlock(&dev->gpio_i2c_lock);
diff --git a/drivers/media/video/cx231xx/cx231xx-core.c b/drivers/media/video/cx231xx/cx231xx-core.c
index 08dd930f882a..05358d486135 100644
--- a/drivers/media/video/cx231xx/cx231xx-core.c
+++ b/drivers/media/video/cx231xx/cx231xx-core.c
@@ -754,7 +754,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
}
}
- return 0;
+ return errCode ? -EINVAL : 0;
}
EXPORT_SYMBOL_GPL(cx231xx_set_mode);
@@ -764,7 +764,7 @@ int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size)
int actlen, ret = -ENOMEM;
u32 *buffer;
-buffer = kzalloc(4096, GFP_KERNEL);
+ buffer = kzalloc(4096, GFP_KERNEL);
if (buffer == NULL) {
cx231xx_info("out of mem\n");
return -ENOMEM;
@@ -772,16 +772,16 @@ buffer = kzalloc(4096, GFP_KERNEL);
memcpy(&buffer[0], firmware, 4096);
ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 5),
- buffer, 4096, &actlen, 2000);
+ buffer, 4096, &actlen, 2000);
if (ret)
cx231xx_info("bulk message failed: %d (%d/%d)", ret,
- size, actlen);
+ size, actlen);
else {
errCode = actlen != size ? -1 : 0;
}
-kfree(buffer);
- return 0;
+ kfree(buffer);
+ return errCode;
}
/*****************************************************************
@@ -797,7 +797,7 @@ static void cx231xx_isoc_irq_callback(struct urb *urb)
struct cx231xx_video_mode *vmode =
container_of(dma_q, struct cx231xx_video_mode, vidq);
struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode);
- int rc, i;
+ int i;
switch (urb->status) {
case 0: /* success */
@@ -814,7 +814,7 @@ static void cx231xx_isoc_irq_callback(struct urb *urb)
/* Copy data from URB */
spin_lock(&dev->video_mode.slock);
- rc = dev->video_mode.isoc_ctl.isoc_copy(dev, urb);
+ dev->video_mode.isoc_ctl.isoc_copy(dev, urb);
spin_unlock(&dev->video_mode.slock);
/* Reset urb buffers */
@@ -822,7 +822,6 @@ static void cx231xx_isoc_irq_callback(struct urb *urb)
urb->iso_frame_desc[i].status = 0;
urb->iso_frame_desc[i].actual_length = 0;
}
- urb->status = 0;
urb->status = usb_submit_urb(urb, GFP_ATOMIC);
if (urb->status) {
@@ -843,7 +842,6 @@ static void cx231xx_bulk_irq_callback(struct urb *urb)
struct cx231xx_video_mode *vmode =
container_of(dma_q, struct cx231xx_video_mode, vidq);
struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode);
- int rc;
switch (urb->status) {
case 0: /* success */
@@ -860,12 +858,10 @@ static void cx231xx_bulk_irq_callback(struct urb *urb)
/* Copy data from URB */
spin_lock(&dev->video_mode.slock);
- rc = dev->video_mode.bulk_ctl.bulk_copy(dev, urb);
+ dev->video_mode.bulk_ctl.bulk_copy(dev, urb);
spin_unlock(&dev->video_mode.slock);
/* Reset urb buffers */
- urb->status = 0;
-
urb->status = usb_submit_urb(urb, GFP_ATOMIC);
if (urb->status) {
cx231xx_isocdbg("urb resubmit failed (error=%i)\n",
@@ -1231,42 +1227,40 @@ int cx231xx_init_bulk(struct cx231xx *dev, int max_packets,
EXPORT_SYMBOL_GPL(cx231xx_init_bulk);
void cx231xx_stop_TS1(struct cx231xx *dev)
{
- int status = 0;
u8 val[4] = { 0, 0, 0, 0 };
- val[0] = 0x00;
- val[1] = 0x03;
- val[2] = 0x00;
- val[3] = 0x00;
- status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
- TS_MODE_REG, val, 4);
-
- val[0] = 0x00;
- val[1] = 0x70;
- val[2] = 0x04;
- val[3] = 0x00;
- status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
- TS1_CFG_REG, val, 4);
+ val[0] = 0x00;
+ val[1] = 0x03;
+ val[2] = 0x00;
+ val[3] = 0x00;
+ cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS_MODE_REG, val, 4);
+
+ val[0] = 0x00;
+ val[1] = 0x70;
+ val[2] = 0x04;
+ val[3] = 0x00;
+ cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS1_CFG_REG, val, 4);
}
/* EXPORT_SYMBOL_GPL(cx231xx_stop_TS1); */
void cx231xx_start_TS1(struct cx231xx *dev)
{
- int status = 0;
u8 val[4] = { 0, 0, 0, 0 };
- val[0] = 0x03;
- val[1] = 0x03;
- val[2] = 0x00;
- val[3] = 0x00;
- status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
- TS_MODE_REG, val, 4);
-
- val[0] = 0x04;
- val[1] = 0xA3;
- val[2] = 0x3B;
- val[3] = 0x00;
- status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
- TS1_CFG_REG, val, 4);
+ val[0] = 0x03;
+ val[1] = 0x03;
+ val[2] = 0x00;
+ val[3] = 0x00;
+ cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS_MODE_REG, val, 4);
+
+ val[0] = 0x04;
+ val[1] = 0xA3;
+ val[2] = 0x3B;
+ val[3] = 0x00;
+ cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS1_CFG_REG, val, 4);
}
/* EXPORT_SYMBOL_GPL(cx231xx_start_TS1); */
/*****************************************************************
diff --git a/drivers/media/video/cx231xx/cx231xx-vbi.c b/drivers/media/video/cx231xx/cx231xx-vbi.c
index 8cdee5f78f13..3d15314e1f88 100644
--- a/drivers/media/video/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/video/cx231xx/cx231xx-vbi.c
@@ -83,7 +83,6 @@ static inline void print_err_status(struct cx231xx *dev, int packet, int status)
*/
static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb)
{
- struct cx231xx_buffer *buf;
struct cx231xx_dmaqueue *dma_q = urb->context;
int rc = 1;
unsigned char *p_buffer;
@@ -102,8 +101,6 @@ static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb)
return 0;
}
- buf = dev->vbi_mode.bulk_ctl.buf;
-
/* get buffer pointer and length */
p_buffer = urb->transfer_buffer;
buffer_size = urb->actual_length;
@@ -310,7 +307,6 @@ static void cx231xx_irq_vbi_callback(struct urb *urb)
struct cx231xx_video_mode *vmode =
container_of(dma_q, struct cx231xx_video_mode, vidq);
struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode);
- int rc;
switch (urb->status) {
case 0: /* success */
@@ -328,7 +324,7 @@ static void cx231xx_irq_vbi_callback(struct urb *urb)
/* Copy data from URB */
spin_lock(&dev->vbi_mode.slock);
- rc = dev->vbi_mode.bulk_ctl.bulk_copy(dev, urb);
+ dev->vbi_mode.bulk_ctl.bulk_copy(dev, urb);
spin_unlock(&dev->vbi_mode.slock);
/* Reset status */
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index 7f916f0685e9..523aa49d6b86 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -326,9 +326,7 @@ static inline void get_next_buf(struct cx231xx_dmaqueue *dma_q,
*/
static inline int cx231xx_isoc_copy(struct cx231xx *dev, struct urb *urb)
{
- struct cx231xx_buffer *buf;
struct cx231xx_dmaqueue *dma_q = urb->context;
- unsigned char *outp = NULL;
int i, rc = 1;
unsigned char *p_buffer;
u32 bytes_parsed = 0, buffer_size = 0;
@@ -346,10 +344,6 @@ static inline int cx231xx_isoc_copy(struct cx231xx *dev, struct urb *urb)
return 0;
}
- buf = dev->video_mode.isoc_ctl.buf;
- if (buf != NULL)
- outp = videobuf_to_vmalloc(&buf->vb);
-
for (i = 0; i < urb->number_of_packets; i++) {
int status = urb->iso_frame_desc[i].status;
@@ -429,9 +423,7 @@ static inline int cx231xx_isoc_copy(struct cx231xx *dev, struct urb *urb)
static inline int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb)
{
- struct cx231xx_buffer *buf;
struct cx231xx_dmaqueue *dma_q = urb->context;
- unsigned char *outp = NULL;
int rc = 1;
unsigned char *p_buffer;
u32 bytes_parsed = 0, buffer_size = 0;
@@ -449,10 +441,6 @@ static inline int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb)
return 0;
}
- buf = dev->video_mode.bulk_ctl.buf;
- if (buf != NULL)
- outp = videobuf_to_vmalloc(&buf->vb);
-
if (1) {
/* get buffer pointer and length */
@@ -701,13 +689,9 @@ void cx231xx_reset_video_buffer(struct cx231xx *dev,
buf = dev->video_mode.bulk_ctl.buf;
if (buf == NULL) {
- u8 *outp = NULL;
/* first try to get the buffer */
get_next_buf(dma_q, &buf);
- if (buf)
- outp = videobuf_to_vmalloc(&buf->vb);
-
dma_q->pos = 0;
dma_q->field1_done = 0;
dma_q->current_field = -1;
@@ -2561,6 +2545,10 @@ static struct video_device *cx231xx_vdev_init(struct cx231xx *dev,
vfd->release = video_device_release;
vfd->debug = video_debug;
vfd->lock = &dev->lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 19b5499d2624..13739e002a63 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -497,6 +497,10 @@ struct cx23885_board cx23885_boards[] = {
.name = "TerraTec Cinergy T PCIe Dual",
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_TEVII_S471] = {
+ .name = "TeVii S471",
+ .portb = CX23885_MPEG_DVB,
}
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
@@ -705,6 +709,10 @@ struct cx23885_subid cx23885_subids[] = {
.subvendor = 0x153b,
.subdevice = 0x117e,
.card = CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL,
+ }, {
+ .subvendor = 0xd471,
+ .subdevice = 0x9022,
+ .card = CX23885_BOARD_TEVII_S471,
},
};
const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -1460,6 +1468,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_TEVII_S470:
+ case CX23885_BOARD_TEVII_S471:
case CX23885_BOARD_DVBWORLD_2005:
ts1->gen_ctrl_val = 0x5; /* Parallel */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index 6ad227029a0f..697728f09430 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -1046,6 +1046,13 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
if (cx23885_boards[dev->board].ci_type > 0)
cx_clear(RDR_RDRCTL1, 1 << 8);
+ switch (dev->board) {
+ case CX23885_BOARD_TEVII_S470:
+ case CX23885_BOARD_TEVII_S471:
+ cx_clear(RDR_RDRCTL1, 1 << 8);
+ break;
+ }
+
return 0;
}
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 6835eb1fc093..a80a92c47455 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -1173,6 +1173,13 @@ static int dvb_register(struct cx23885_tsport *port)
break;
}
break;
+ case CX23885_BOARD_TEVII_S471:
+ i2c_bus = &dev->i2c_bus[1];
+
+ fe0->dvb.frontend = dvb_attach(ds3000_attach,
+ &tevii_ds3000_config,
+ &i2c_bus->i2c_adap);
+ break;
default:
printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
" isn't supported yet\n",
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index f020f0568df4..d884784a1c85 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -89,6 +89,7 @@
#define CX23885_BOARD_MPX885 32
#define CX23885_BOARD_MYGICA_X8507 33
#define CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL 34
+#define CX23885_BOARD_TEVII_S471 35
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/video/cx23885/cx23888-ir.c b/drivers/media/video/cx23885/cx23888-ir.c
index bb1ce346425d..c2bc39c58f82 100644
--- a/drivers/media/video/cx23885/cx23888-ir.c
+++ b/drivers/media/video/cx23885/cx23888-ir.c
@@ -331,9 +331,7 @@ static u64 ns_to_pulse_clocks(u32 ns)
static u16 pulse_clocks_to_clock_divider(u64 count)
{
- u32 rem;
-
- rem = do_div(count, (FIFO_RXTX << 2) | 0x3);
+ do_div(count, (FIFO_RXTX << 2) | 0x3);
/* net result needs to be rounded down and decremented by 1 */
if (count > RXCLK_RCD + 1)
diff --git a/drivers/media/video/cx25821/cx25821-alsa.c b/drivers/media/video/cx25821/cx25821-alsa.c
index 03cfac476b03..1858a45dd081 100644
--- a/drivers/media/video/cx25821/cx25821-alsa.c
+++ b/drivers/media/video/cx25821/cx25821-alsa.c
@@ -290,11 +290,9 @@ static irqreturn_t cx25821_irq(int irq, void *dev_id)
u32 status, pci_status;
u32 audint_status, audint_mask;
int loop, handled = 0;
- int audint_count = 0;
audint_status = cx_read(AUD_A_INT_STAT);
audint_mask = cx_read(AUD_A_INT_MSK);
- audint_count = cx_read(AUD_A_GPCNT);
status = cx_read(PCI_INT_STAT);
for (loop = 0; loop < 1; loop++) {
diff --git a/drivers/media/video/cx25821/cx25821-audio-upstream.c b/drivers/media/video/cx25821/cx25821-audio-upstream.c
index 20c7ca3351a8..8b2a99975c23 100644
--- a/drivers/media/video/cx25821/cx25821-audio-upstream.c
+++ b/drivers/media/video/cx25821/cx25821-audio-upstream.c
@@ -585,7 +585,7 @@ int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
static irqreturn_t cx25821_upstream_irq_audio(int irq, void *dev_id)
{
struct cx25821_dev *dev = dev_id;
- u32 msk_stat, audio_status;
+ u32 audio_status;
int handled = 0;
struct sram_channel *sram_ch;
@@ -594,7 +594,6 @@ static irqreturn_t cx25821_upstream_irq_audio(int irq, void *dev_id)
sram_ch = dev->channels[dev->_audio_upstream_channel].sram_channels;
- msk_stat = cx_read(sram_ch->int_mstat);
audio_status = cx_read(sram_ch->int_stat);
/* Only deal with our interrupt */
diff --git a/drivers/media/video/cx25821/cx25821-core.c b/drivers/media/video/cx25821/cx25821-core.c
index 7930ca58349f..83c1aa6b2e6c 100644
--- a/drivers/media/video/cx25821/cx25821-core.c
+++ b/drivers/media/video/cx25821/cx25821-core.c
@@ -379,14 +379,6 @@ static inline int i2c_slave_did_ack(struct i2c_adapter *i2c_adap)
return cx_read(bus->reg_stat) & 0x01;
}
-void cx_i2c_read_print(struct cx25821_dev *dev, u32 reg, const char *reg_string)
-{
- int tmp = 0;
- u32 value = 0;
-
- value = cx25821_i2c_read(&dev->i2c_bus[0], reg, &tmp);
-}
-
static void cx25821_registers_init(struct cx25821_dev *dev)
{
u32 tmp;
@@ -895,7 +887,7 @@ static void cx25821_iounmap(struct cx25821_dev *dev)
static int cx25821_dev_setup(struct cx25821_dev *dev)
{
- int io_size = 0, i;
+ int i;
pr_info("\n***********************************\n");
pr_info("cx25821 set up\n");
@@ -960,7 +952,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
/* PCIe stuff */
dev->base_io_addr = pci_resource_start(dev->pci, 0);
- io_size = pci_resource_len(dev->pci, 0);
if (!dev->base_io_addr) {
CX25821_ERR("No PCI Memory resources, exiting!\n");
@@ -1317,13 +1308,12 @@ void cx25821_free_buffer(struct videobuf_queue *q, struct cx25821_buffer *buf)
static irqreturn_t cx25821_irq(int irq, void *dev_id)
{
struct cx25821_dev *dev = dev_id;
- u32 pci_status, pci_mask;
+ u32 pci_status;
u32 vid_status;
int i, handled = 0;
u32 mask[8] = { 1, 2, 4, 8, 16, 32, 64, 128 };
pci_status = cx_read(PCI_INT_STAT);
- pci_mask = cx_read(PCI_INT_MSK);
if (pci_status == 0)
goto out;
diff --git a/drivers/media/video/cx25821/cx25821-i2c.c b/drivers/media/video/cx25821/cx25821-i2c.c
index 12d7300fa1e9..6311180f430c 100644
--- a/drivers/media/video/cx25821/cx25821-i2c.c
+++ b/drivers/media/video/cx25821/cx25821-i2c.c
@@ -361,7 +361,6 @@ void cx25821_av_clk(struct cx25821_dev *dev, int enable)
int cx25821_i2c_read(struct cx25821_i2c *bus, u16 reg_addr, int *value)
{
struct i2c_client *client = &bus->i2c_client;
- int retval = 0;
int v = 0;
u8 addr[2] = { 0, 0 };
u8 buf[4] = { 0, 0, 0, 0 };
@@ -385,7 +384,7 @@ int cx25821_i2c_read(struct cx25821_i2c *bus, u16 reg_addr, int *value)
msgs[0].addr = 0x44;
msgs[1].addr = 0x44;
- retval = i2c_xfer(client->adapter, msgs, 2);
+ i2c_xfer(client->adapter, msgs, 2);
v = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
*value = v;
diff --git a/drivers/media/video/cx25821/cx25821-medusa-video.c b/drivers/media/video/cx25821/cx25821-medusa-video.c
index 298a68d98c2f..313fb20a0b47 100644
--- a/drivers/media/video/cx25821/cx25821-medusa-video.c
+++ b/drivers/media/video/cx25821/cx25821-medusa-video.c
@@ -35,7 +35,6 @@
static void medusa_enable_bluefield_output(struct cx25821_dev *dev, int channel,
int enable)
{
- int ret_val = 1;
u32 value = 0;
u32 tmp = 0;
int out_ctrl = OUT_CTRL1;
@@ -79,13 +78,13 @@ static void medusa_enable_bluefield_output(struct cx25821_dev *dev, int channel,
value &= 0xFFFFFF7F; /* clear BLUE_FIELD_EN */
if (enable)
value |= 0x00000080; /* set BLUE_FIELD_EN */
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0], out_ctrl, value);
+ cx25821_i2c_write(&dev->i2c_bus[0], out_ctrl, value);
value = cx25821_i2c_read(&dev->i2c_bus[0], out_ctrl_ns, &tmp);
value &= 0xFFFFFF7F;
if (enable)
value |= 0x00000080; /* set BLUE_FIELD_EN */
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0], out_ctrl_ns, value);
+ cx25821_i2c_write(&dev->i2c_bus[0], out_ctrl_ns, value);
}
static int medusa_initialize_ntsc(struct cx25821_dev *dev)
@@ -431,7 +430,6 @@ void medusa_set_resolution(struct cx25821_dev *dev, int width,
{
int decoder = 0;
int decoder_count = 0;
- int ret_val = 0;
u32 hscale = 0x0;
u32 vscale = 0x0;
const int MAX_WIDTH = 720;
@@ -482,9 +480,9 @@ void medusa_set_resolution(struct cx25821_dev *dev, int width,
for (; decoder < decoder_count; decoder++) {
/* write scaling values for each decoder */
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ cx25821_i2c_write(&dev->i2c_bus[0],
HSCALE_CTRL + (0x200 * decoder), hscale);
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ cx25821_i2c_write(&dev->i2c_bus[0],
VSCALE_CTRL + (0x200 * decoder), vscale);
}
@@ -494,7 +492,6 @@ void medusa_set_resolution(struct cx25821_dev *dev, int width,
static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder,
int duration)
{
- int ret_val = 0;
u32 fld_cnt = 0;
u32 tmp = 0;
u32 disp_cnt_reg = DISP_AB_CNT;
@@ -537,7 +534,7 @@ static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder,
fld_cnt |= ((u32) duration) << 16;
}
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0], disp_cnt_reg, fld_cnt);
+ cx25821_i2c_write(&dev->i2c_bus[0], disp_cnt_reg, fld_cnt);
mutex_unlock(&dev->lock);
}
diff --git a/drivers/media/video/cx25821/cx25821-video-upstream-ch2.c b/drivers/media/video/cx25821/cx25821-video-upstream-ch2.c
index 5a157cf4a95e..c8c94fbf5d8d 100644
--- a/drivers/media/video/cx25821/cx25821-video-upstream-ch2.c
+++ b/drivers/media/video/cx25821/cx25821-video-upstream-ch2.c
@@ -587,7 +587,7 @@ int cx25821_video_upstream_irq_ch2(struct cx25821_dev *dev, int chan_num,
static irqreturn_t cx25821_upstream_irq_ch2(int irq, void *dev_id)
{
struct cx25821_dev *dev = dev_id;
- u32 msk_stat, vid_status;
+ u32 vid_status;
int handled = 0;
int channel_num = 0;
struct sram_channel *sram_ch;
@@ -598,7 +598,6 @@ static irqreturn_t cx25821_upstream_irq_ch2(int irq, void *dev_id)
channel_num = VID_UPSTREAM_SRAM_CHANNEL_J;
sram_ch = dev->channels[channel_num].sram_channels;
- msk_stat = cx_read(sram_ch->int_mstat);
vid_status = cx_read(sram_ch->int_stat);
/* Only deal with our interrupt */
diff --git a/drivers/media/video/cx25821/cx25821-video-upstream.c b/drivers/media/video/cx25821/cx25821-video-upstream.c
index 21e7d657f049..52c13e0b6492 100644
--- a/drivers/media/video/cx25821/cx25821-video-upstream.c
+++ b/drivers/media/video/cx25821/cx25821-video-upstream.c
@@ -637,7 +637,7 @@ int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
static irqreturn_t cx25821_upstream_irq(int irq, void *dev_id)
{
struct cx25821_dev *dev = dev_id;
- u32 msk_stat, vid_status;
+ u32 vid_status;
int handled = 0;
int channel_num = 0;
struct sram_channel *sram_ch;
@@ -649,7 +649,6 @@ static irqreturn_t cx25821_upstream_irq(int irq, void *dev_id)
sram_ch = dev->channels[channel_num].sram_channels;
- msk_stat = cx_read(sram_ch->int_mstat);
vid_status = cx_read(sram_ch->int_stat);
/* Only deal with our interrupt */
diff --git a/drivers/media/video/cx25821/cx25821-video.c b/drivers/media/video/cx25821/cx25821-video.c
index ffd8bc79c02e..b38d4379cc36 100644
--- a/drivers/media/video/cx25821/cx25821-video.c
+++ b/drivers/media/video/cx25821/cx25821-video.c
@@ -109,25 +109,6 @@ struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc)
return NULL;
}
-void cx25821_dump_video_queue(struct cx25821_dev *dev,
- struct cx25821_dmaqueue *q)
-{
- struct cx25821_buffer *buf;
- struct list_head *item;
- dprintk(1, "%s()\n", __func__);
-
- if (!list_empty(&q->active)) {
- list_for_each(item, &q->active)
- buf = list_entry(item, struct cx25821_buffer, vb.queue);
- }
-
- if (!list_empty(&q->queued)) {
- list_for_each(item, &q->queued)
- buf = list_entry(item, struct cx25821_buffer, vb.queue);
- }
-
-}
-
void cx25821_video_wakeup(struct cx25821_dev *dev, struct cx25821_dmaqueue *q,
u32 count)
{
@@ -557,7 +538,7 @@ int cx25821_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
struct cx25821_buffer *buf =
container_of(vb, struct cx25821_buffer, vb);
int rc, init_buffer = 0;
- u32 line0_offset, line1_offset;
+ u32 line0_offset;
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
int bpl_local = LINE_SIZE_D1;
int channel_opened = fh->channel_id;
@@ -639,7 +620,6 @@ int cx25821_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
case V4L2_FIELD_INTERLACED:
/* All other formats are top field first */
line0_offset = 0;
- line1_offset = buf->bpl;
dprintk(1, "top field first\n");
cx25821_risc_buffer(dev->pci, &buf->risc,
@@ -1830,7 +1810,6 @@ static long video_ioctl_set(struct file *file, unsigned int cmd,
int i = 0;
int cif_enable = 0;
int cif_width = 0;
- u32 value = 0;
data_from_user = (struct downstream_user_struct *)arg;
@@ -1914,7 +1893,7 @@ static long video_ioctl_set(struct file *file, unsigned int cmd,
cx_write(data_from_user->reg_address, data_from_user->reg_data);
break;
case MEDUSA_READ:
- value = cx25821_i2c_read(&dev->i2c_bus[0],
+ cx25821_i2c_read(&dev->i2c_bus[0],
(u16) data_from_user->reg_address,
&data_from_user->reg_data);
break;
diff --git a/drivers/media/video/cx25821/cx25821-video.h b/drivers/media/video/cx25821/cx25821-video.h
index d0d9538ca5b3..9652a5e35ba2 100644
--- a/drivers/media/video/cx25821/cx25821-video.h
+++ b/drivers/media/video/cx25821/cx25821-video.h
@@ -86,8 +86,6 @@ extern struct cx25821_fmt formats[];
extern struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc);
extern struct cx25821_data timeout_data[MAX_VID_CHANNEL_NUM];
-extern void cx25821_dump_video_queue(struct cx25821_dev *dev,
- struct cx25821_dmaqueue *q);
extern void cx25821_video_wakeup(struct cx25821_dev *dev,
struct cx25821_dmaqueue *q, u32 count);
diff --git a/drivers/media/video/cx25840/cx25840-ir.c b/drivers/media/video/cx25840/cx25840-ir.c
index 13c380ebb562..38ce76ed1924 100644
--- a/drivers/media/video/cx25840/cx25840-ir.c
+++ b/drivers/media/video/cx25840/cx25840-ir.c
@@ -316,9 +316,7 @@ static u64 ns_to_pulse_clocks(u32 ns)
static u16 pulse_clocks_to_clock_divider(u64 count)
{
- u32 rem;
-
- rem = do_div(count, (FIFO_RXTX << 2) | 0x3);
+ do_div(count, (FIFO_RXTX << 2) | 0x3);
/* net result needs to be rounded down and decremented by 1 */
if (count > RXCLK_RCD + 1)
@@ -860,12 +858,10 @@ static int cx25840_ir_tx_write(struct v4l2_subdev *sd, u8 *buf, size_t count,
ssize_t *num)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
- struct i2c_client *c;
if (ir_state == NULL)
return -ENODEV;
- c = ir_state->c;
#if 0
/*
* FIXME - the code below is an incomplete and untested sketch of what
diff --git a/drivers/media/video/davinci/Kconfig b/drivers/media/video/davinci/Kconfig
index 60a456ebdc7c..9337b5605c90 100644
--- a/drivers/media/video/davinci/Kconfig
+++ b/drivers/media/video/davinci/Kconfig
@@ -40,6 +40,7 @@ config VIDEO_VPSS_SYSTEM
config VIDEO_VPFE_CAPTURE
tristate "VPFE Video Capture Driver"
depends on VIDEO_V4L2 && (ARCH_DAVINCI || ARCH_OMAP3)
+ depends on I2C
select VIDEOBUF_DMA_CONTIG
help
Support for DMx/AMx VPFE based frame grabber. This is the
diff --git a/drivers/media/video/davinci/vpbe_display.c b/drivers/media/video/davinci/vpbe_display.c
index 1f3b1c729252..e106b72810a9 100644
--- a/drivers/media/video/davinci/vpbe_display.c
+++ b/drivers/media/video/davinci/vpbe_display.c
@@ -1618,6 +1618,10 @@ static __devinit int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
vbd->ioctl_ops = &vpbe_ioctl_ops;
vbd->minor = -1;
vbd->v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vbd->flags);
vbd->lock = &vpbe_display_layer->opslock;
if (disp_dev->vpbe_dev->current_timings.timings_type &
diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c
index 20cf271a774b..49a845fb804a 100644
--- a/drivers/media/video/davinci/vpfe_capture.c
+++ b/drivers/media/video/davinci/vpfe_capture.c
@@ -1761,7 +1761,7 @@ static long vpfe_param_handler(struct file *file, void *priv,
}
break;
default:
- ret = -EINVAL;
+ ret = -ENOTTY;
}
unlock_out:
mutex_unlock(&vpfe_dev->lock);
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index 6504e40a31dd..96046957bf21 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -2228,6 +2228,10 @@ static __init int vpif_probe(struct platform_device *pdev)
common = &(ch->common[VPIF_VIDEO_INDEX]);
spin_lock_init(&common->irqlock);
mutex_init(&common->lock);
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &ch->video_dev->flags);
ch->video_dev->lock = &common->lock;
/* Initialize prio member of channel object */
v4l2_prio_init(&ch->prio);
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index 7fa34b4fae26..e6488ee7db18 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -1778,6 +1778,10 @@ static __init int vpif_probe(struct platform_device *pdev)
v4l2_prio_init(&ch->prio);
ch->common[VPIF_VIDEO_INDEX].fmt.type =
V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &ch->video_dev->flags);
ch->video_dev->lock = &common->lock;
/* register video device */
diff --git a/drivers/media/video/em28xx/Kconfig b/drivers/media/video/em28xx/Kconfig
index f6f622e123bd..928ef0d0429f 100644
--- a/drivers/media/video/em28xx/Kconfig
+++ b/drivers/media/video/em28xx/Kconfig
@@ -49,10 +49,10 @@ config VIDEO_EM28XX_DVB
Empiatech em28xx chips.
config VIDEO_EM28XX_RC
- bool "EM28XX Remote Controller support"
+ tristate "EM28XX Remote Controller support"
depends on RC_CORE
depends on VIDEO_EM28XX
depends on !(RC_CORE=m && VIDEO_EM28XX=y)
- default y
+ default VIDEO_EM28XX
---help---
Enables Remote Controller support on em28xx driver.
diff --git a/drivers/media/video/em28xx/Makefile b/drivers/media/video/em28xx/Makefile
index 2abdf76c5203..c8b338d4be05 100644
--- a/drivers/media/video/em28xx/Makefile
+++ b/drivers/media/video/em28xx/Makefile
@@ -1,16 +1,15 @@
em28xx-y := em28xx-video.o em28xx-i2c.o em28xx-cards.o
em28xx-y += em28xx-core.o em28xx-vbi.o
-em28xx-$(CONFIG_VIDEO_EM28XX_RC) += em28xx-input.o
-
em28xx-alsa-objs := em28xx-audio.o
+em28xx-rc-objs := em28xx-input.o
obj-$(CONFIG_VIDEO_EM28XX) += em28xx.o
obj-$(CONFIG_VIDEO_EM28XX_ALSA) += em28xx-alsa.o
obj-$(CONFIG_VIDEO_EM28XX_DVB) += em28xx-dvb.o
+obj-$(CONFIG_VIDEO_EM28XX_RC) += em28xx-rc.o
ccflags-y += -Idrivers/media/video
ccflags-y += -Idrivers/media/common/tuners
ccflags-y += -Idrivers/media/dvb/dvb-core
ccflags-y += -Idrivers/media/dvb/frontends
-
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index e2a7b77c39c7..d7e2a3dc5525 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -343,7 +343,6 @@ static int snd_em28xx_pcm_close(struct snd_pcm_substream *substream)
static int snd_em28xx_hw_capture_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- unsigned int channels, rate, format;
int ret;
dprintk("Setting capture parameters\n");
@@ -352,13 +351,17 @@ static int snd_em28xx_hw_capture_params(struct snd_pcm_substream *substream,
params_buffer_bytes(hw_params));
if (ret < 0)
return ret;
+#if 0
+ /* TODO: set up em28xx audio chip to deliver the correct audio format,
+ current default is 48000hz multiplexed => 96000hz mono
+ which shouldn't matter since analogue TV only supports mono */
+ unsigned int channels, rate, format;
+
format = params_format(hw_params);
rate = params_rate(hw_params);
channels = params_channels(hw_params);
+#endif
- /* TODO: set up em28xx audio chip to deliver the correct audio format,
- current default is 48000hz multiplexed => 96000hz mono
- which shouldn't matter since analogue TV only supports mono */
return 0;
}
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 9fd8cc7dbb23..20a7e24de6fb 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -69,6 +69,8 @@ struct em28xx_hash_table {
unsigned int tuner;
};
+static void em28xx_pre_card_setup(struct em28xx *dev);
+
/*
* Reset sequences for analog/digital modes
*/
@@ -2361,7 +2363,7 @@ static int em28xx_hint_sensor(struct em28xx *dev)
/* Since em28xx_pre_card_setup() requires a proper dev->model,
* this won't work for boards with generic PCI IDs
*/
-void em28xx_pre_card_setup(struct em28xx *dev)
+static void em28xx_pre_card_setup(struct em28xx *dev)
{
/* Set the initial XCLK and I2C clock values based on the board
definition */
@@ -2661,55 +2663,7 @@ static int em28xx_hint_board(struct em28xx *dev)
return -1;
}
-/* ----------------------------------------------------------------------- */
-void em28xx_register_i2c_ir(struct em28xx *dev)
-{
- /* Leadtek winfast tv USBII deluxe can find a non working IR-device */
- /* at address 0x18, so if that address is needed for another board in */
- /* the future, please put it after 0x1f. */
- struct i2c_board_info info;
- const unsigned short addr_list[] = {
- 0x1f, 0x30, 0x47, I2C_CLIENT_END
- };
-
- if (disable_ir)
- return;
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- memset(&dev->init_data, 0, sizeof(dev->init_data));
- strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
-
- /* detect & configure */
- switch (dev->model) {
- case EM2800_BOARD_TERRATEC_CINERGY_200:
- case EM2820_BOARD_TERRATEC_CINERGY_250:
- dev->init_data.ir_codes = RC_MAP_EM_TERRATEC;
- dev->init_data.get_key = em28xx_get_key_terratec;
- dev->init_data.name = "i2c IR (EM28XX Terratec)";
- break;
- case EM2820_BOARD_PINNACLE_USB_2:
- dev->init_data.ir_codes = RC_MAP_PINNACLE_GREY;
- dev->init_data.get_key = em28xx_get_key_pinnacle_usb_grey;
- dev->init_data.name = "i2c IR (EM28XX Pinnacle PCTV)";
- break;
- case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
- dev->init_data.ir_codes = RC_MAP_HAUPPAUGE;
- dev->init_data.get_key = em28xx_get_key_em_haup;
- dev->init_data.name = "i2c IR (EM2840 Hauppauge)";
- break;
- case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
- dev->init_data.ir_codes = RC_MAP_WINFAST_USBII_DELUXE;
- dev->init_data.get_key = em28xx_get_key_winfast_usbii_deluxe;
- dev->init_data.name = "i2c IR (EM2820 Winfast TV USBII Deluxe)";
- break;
- }
-
- if (dev->init_data.name)
- info.platform_data = &dev->init_data;
- i2c_new_probed_device(&dev->i2c_adap, &info, addr_list, NULL);
-}
-
-void em28xx_card_setup(struct em28xx *dev)
+static void em28xx_card_setup(struct em28xx *dev)
{
/*
* If the device can be a webcam, seek for a sensor.
@@ -2849,13 +2803,6 @@ void em28xx_card_setup(struct em28xx *dev)
break;
}
-#if defined(CONFIG_MODULES) && defined(MODULE)
- if (dev->board.has_ir_i2c && !disable_ir)
- request_module("ir-kbd-i2c");
-#endif
- if (dev->board.has_snapshot_button)
- em28xx_register_snapshot_button(dev);
-
if (dev->board.valid == EM28XX_BOARD_NOT_VALIDATED) {
em28xx_errdev("\n\n");
em28xx_errdev("The support for this board weren't "
@@ -2929,9 +2876,6 @@ void em28xx_card_setup(struct em28xx *dev)
}
em28xx_tuner_setup(dev);
-
- if(!disable_ir)
- em28xx_ir_init(dev);
}
@@ -2948,6 +2892,8 @@ static void request_module_async(struct work_struct *work)
if (dev->board.has_dvb)
request_module("em28xx-dvb");
+ if (dev->board.has_ir_i2c && !disable_ir)
+ request_module("em28xx-rc");
}
static void request_modules(struct em28xx *dev)
@@ -2972,12 +2918,6 @@ static void flush_request_modules(struct em28xx *dev)
*/
void em28xx_release_resources(struct em28xx *dev)
{
- if (dev->sbutton_input_dev)
- em28xx_deregister_snapshot_button(dev);
-
- if (dev->ir)
- em28xx_ir_fini(dev);
-
/*FIXME: I2C IR should be disconnected */
em28xx_release_analog_resources(dev);
@@ -3005,9 +2945,6 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
dev->udev = udev;
mutex_init(&dev->ctrl_urb_lock);
spin_lock_init(&dev->slock);
- init_waitqueue_head(&dev->open);
- init_waitqueue_head(&dev->wait_frame);
- init_waitqueue_head(&dev->wait_stream);
dev->em28xx_write_regs = em28xx_write_regs;
dev->em28xx_read_reg = em28xx_read_reg;
@@ -3140,9 +3077,7 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
- INIT_LIST_HEAD(&dev->vidq.queued);
INIT_LIST_HEAD(&dev->vbiq.active);
- INIT_LIST_HEAD(&dev->vbiq.queued);
if (dev->board.has_msp34xx) {
/* Send a reset to other chips via gpio */
@@ -3447,8 +3382,6 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
resources */
mutex_lock(&dev->lock);
- wake_up_interruptible_all(&dev->open);
-
v4l2_device_disconnect(&dev->v4l2_dev);
if (dev->users) {
@@ -3460,8 +3393,6 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
dev->state |= DEV_MISCONFIGURED;
em28xx_uninit_isoc(dev, dev->mode);
dev->state |= DEV_DISCONNECTED;
- wake_up_interruptible(&dev->wait_frame);
- wake_up_interruptible(&dev->wait_stream);
} else {
dev->state |= DEV_DISCONNECTED;
em28xx_release_resources(dev);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index 53a9fb91e97e..5717bdee8f1b 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -139,6 +139,7 @@ int em28xx_read_reg(struct em28xx *dev, u16 reg)
{
return em28xx_read_reg_req(dev, USB_REQ_GET_STATUS, reg);
}
+EXPORT_SYMBOL_GPL(em28xx_read_reg);
/*
* em28xx_write_regs_req()
@@ -205,6 +206,7 @@ int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len)
return rc;
}
+EXPORT_SYMBOL_GPL(em28xx_write_regs);
/* Write a single register */
int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val)
@@ -239,6 +241,7 @@ int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
return em28xx_write_regs(dev, reg, &newval, 1);
}
+EXPORT_SYMBOL_GPL(em28xx_write_reg_bits);
/*
* em28xx_is_ac97_ready()
@@ -666,7 +669,6 @@ int em28xx_capture_start(struct em28xx *dev, int start)
return rc;
}
-EXPORT_SYMBOL_GPL(em28xx_capture_start);
int em28xx_vbi_supported(struct em28xx *dev)
{
@@ -975,7 +977,6 @@ void em28xx_uninit_isoc(struct em28xx *dev, enum em28xx_mode mode)
else
isoc_bufs = &dev->isoc_ctl.analog_bufs;
- dev->isoc_ctl.nfields = -1;
for (i = 0; i < isoc_bufs->num_bufs; i++) {
urb = isoc_bufs->urb[i];
if (urb) {
@@ -1008,6 +1009,31 @@ void em28xx_uninit_isoc(struct em28xx *dev, enum em28xx_mode mode)
EXPORT_SYMBOL_GPL(em28xx_uninit_isoc);
/*
+ * Stop URBs
+ */
+void em28xx_stop_urbs(struct em28xx *dev)
+{
+ int i;
+ struct urb *urb;
+ struct em28xx_usb_isoc_bufs *isoc_bufs = &dev->isoc_ctl.digital_bufs;
+
+ em28xx_isocdbg("em28xx: called em28xx_stop_urbs\n");
+
+ for (i = 0; i < isoc_bufs->num_bufs; i++) {
+ urb = isoc_bufs->urb[i];
+ if (urb) {
+ if (!irqs_disabled())
+ usb_kill_urb(urb);
+ else
+ usb_unlink_urb(urb);
+ }
+ }
+
+ em28xx_capture_start(dev, 0);
+}
+EXPORT_SYMBOL_GPL(em28xx_stop_urbs);
+
+/*
* Allocate URBs
*/
int em28xx_alloc_isoc(struct em28xx *dev, enum em28xx_mode mode,
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index 503a8d5b5382..16410ac20092 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -183,7 +183,7 @@ static int em28xx_stop_streaming(struct em28xx_dvb *dvb)
{
struct em28xx *dev = dvb->adapter.priv;
- em28xx_capture_start(dev, 0);
+ em28xx_stop_urbs(dev);
em28xx_set_mode(dev, EM28XX_SUSPEND);
@@ -336,6 +336,8 @@ struct drxk_config pctv_520e_drxk = {
.single_master = 1,
.microcode_name = "dvb-demod-drxk-pctv.fw",
.chunk_size = 58,
+ .antenna_dvbt = true, /* disable LNA */
+ .antenna_gpio = (1 << 2), /* disable LNA */
};
static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
@@ -474,8 +476,8 @@ static void terratec_h5_init(struct em28xx *dev)
static void pctv_520e_init(struct em28xx *dev)
{
/*
- * Init TDA8295(?) analog demodulator. Looks like I2C traffic to
- * digital demodulator and tuner are routed via TDA8295.
+ * Init AVF4910B analog decoder. Looks like I2C traffic to
+ * digital demodulator and tuner are routed via AVF4910B.
*/
int i;
struct {
@@ -542,7 +544,8 @@ static struct cxd2820r_config em28xx_cxd2820r_config = {
.i2c_address = (0xd8 >> 1),
.ts_mode = CXD2820R_TS_SERIAL,
- /* enable LNA for DVB-T2 and DVB-C */
+ /* enable LNA for DVB-T, DVB-T2 and DVB-C */
+ .gpio_dvbt[0] = CXD2820R_GPIO_E | CXD2820R_GPIO_O | CXD2820R_GPIO_L,
.gpio_dvbt2[0] = CXD2820R_GPIO_E | CXD2820R_GPIO_O | CXD2820R_GPIO_L,
.gpio_dvbc[0] = CXD2820R_GPIO_E | CXD2820R_GPIO_O | CXD2820R_GPIO_L,
};
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index a88e169dba23..185db65b766e 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -553,9 +553,6 @@ int em28xx_i2c_register(struct em28xx *dev)
if (i2c_scan)
em28xx_do_i2c_scan(dev);
- /* Instantiate the IR receiver device, if present */
- em28xx_register_i2c_ir(dev);
-
return 0;
}
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index 2630b265b0e8..fce5f7680c99 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -80,7 +80,7 @@ struct em28xx_IR {
I2C IR based get keycodes - should be used with ir-kbd-i2c
**********************************************************/
-int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+static int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
{
unsigned char b;
@@ -108,7 +108,7 @@ int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
return 1;
}
-int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+static int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
{
unsigned char buf[2];
u16 code;
@@ -157,7 +157,7 @@ int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
return 1;
}
-int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
+static int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
u32 *ir_raw)
{
unsigned char buf[3];
@@ -179,7 +179,8 @@ int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
return 1;
}
-int em28xx_get_key_winfast_usbii_deluxe(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+static int em28xx_get_key_winfast_usbii_deluxe(struct IR_i2c *ir, u32 *ir_key,
+ u32 *ir_raw)
{
unsigned char subaddr, keydetect, key;
@@ -387,7 +388,138 @@ int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 rc_type)
return rc;
}
-int em28xx_ir_init(struct em28xx *dev)
+static void em28xx_register_i2c_ir(struct em28xx *dev)
+{
+ /* Leadtek winfast tv USBII deluxe can find a non working IR-device */
+ /* at address 0x18, so if that address is needed for another board in */
+ /* the future, please put it after 0x1f. */
+ struct i2c_board_info info;
+ const unsigned short addr_list[] = {
+ 0x1f, 0x30, 0x47, I2C_CLIENT_END
+ };
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ memset(&dev->init_data, 0, sizeof(dev->init_data));
+ strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
+
+ /* detect & configure */
+ switch (dev->model) {
+ case EM2800_BOARD_TERRATEC_CINERGY_200:
+ case EM2820_BOARD_TERRATEC_CINERGY_250:
+ dev->init_data.ir_codes = RC_MAP_EM_TERRATEC;
+ dev->init_data.get_key = em28xx_get_key_terratec;
+ dev->init_data.name = "i2c IR (EM28XX Terratec)";
+ break;
+ case EM2820_BOARD_PINNACLE_USB_2:
+ dev->init_data.ir_codes = RC_MAP_PINNACLE_GREY;
+ dev->init_data.get_key = em28xx_get_key_pinnacle_usb_grey;
+ dev->init_data.name = "i2c IR (EM28XX Pinnacle PCTV)";
+ break;
+ case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
+ dev->init_data.ir_codes = RC_MAP_HAUPPAUGE;
+ dev->init_data.get_key = em28xx_get_key_em_haup;
+ dev->init_data.name = "i2c IR (EM2840 Hauppauge)";
+ break;
+ case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
+ dev->init_data.ir_codes = RC_MAP_WINFAST_USBII_DELUXE;
+ dev->init_data.get_key = em28xx_get_key_winfast_usbii_deluxe;
+ dev->init_data.name = "i2c IR (EM2820 Winfast TV USBII Deluxe)";
+ break;
+ }
+
+ if (dev->init_data.name)
+ info.platform_data = &dev->init_data;
+ i2c_new_probed_device(&dev->i2c_adap, &info, addr_list, NULL);
+}
+
+/**********************************************************
+ Handle Webcam snapshot button
+ **********************************************************/
+
+static void em28xx_query_sbutton(struct work_struct *work)
+{
+ /* Poll the register and see if the button is depressed */
+ struct em28xx *dev =
+ container_of(work, struct em28xx, sbutton_query_work.work);
+ int ret;
+
+ ret = em28xx_read_reg(dev, EM28XX_R0C_USBSUSP);
+
+ if (ret & EM28XX_R0C_USBSUSP_SNAPSHOT) {
+ u8 cleared;
+ /* Button is depressed, clear the register */
+ cleared = ((u8) ret) & ~EM28XX_R0C_USBSUSP_SNAPSHOT;
+ em28xx_write_regs(dev, EM28XX_R0C_USBSUSP, &cleared, 1);
+
+ /* Not emulate the keypress */
+ input_report_key(dev->sbutton_input_dev, EM28XX_SNAPSHOT_KEY,
+ 1);
+ /* Now unpress the key */
+ input_report_key(dev->sbutton_input_dev, EM28XX_SNAPSHOT_KEY,
+ 0);
+ }
+
+ /* Schedule next poll */
+ schedule_delayed_work(&dev->sbutton_query_work,
+ msecs_to_jiffies(EM28XX_SBUTTON_QUERY_INTERVAL));
+}
+
+static void em28xx_register_snapshot_button(struct em28xx *dev)
+{
+ struct input_dev *input_dev;
+ int err;
+
+ em28xx_info("Registering snapshot button...\n");
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ em28xx_errdev("input_allocate_device failed\n");
+ return;
+ }
+
+ usb_make_path(dev->udev, dev->snapshot_button_path,
+ sizeof(dev->snapshot_button_path));
+ strlcat(dev->snapshot_button_path, "/sbutton",
+ sizeof(dev->snapshot_button_path));
+ INIT_DELAYED_WORK(&dev->sbutton_query_work, em28xx_query_sbutton);
+
+ input_dev->name = "em28xx snapshot button";
+ input_dev->phys = dev->snapshot_button_path;
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ set_bit(EM28XX_SNAPSHOT_KEY, input_dev->keybit);
+ input_dev->keycodesize = 0;
+ input_dev->keycodemax = 0;
+ input_dev->id.bustype = BUS_USB;
+ input_dev->id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
+ input_dev->id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
+ input_dev->id.version = 1;
+ input_dev->dev.parent = &dev->udev->dev;
+
+ err = input_register_device(input_dev);
+ if (err) {
+ em28xx_errdev("input_register_device failed\n");
+ input_free_device(input_dev);
+ return;
+ }
+
+ dev->sbutton_input_dev = input_dev;
+ schedule_delayed_work(&dev->sbutton_query_work,
+ msecs_to_jiffies(EM28XX_SBUTTON_QUERY_INTERVAL));
+ return;
+
+}
+
+static void em28xx_deregister_snapshot_button(struct em28xx *dev)
+{
+ if (dev->sbutton_input_dev != NULL) {
+ em28xx_info("Deregistering snapshot button\n");
+ cancel_delayed_work_sync(&dev->sbutton_query_work);
+ input_unregister_device(dev->sbutton_input_dev);
+ dev->sbutton_input_dev = NULL;
+ }
+ return;
+}
+
+static int em28xx_ir_init(struct em28xx *dev)
{
struct em28xx_IR *ir;
struct rc_dev *rc;
@@ -448,6 +580,15 @@ int em28xx_ir_init(struct em28xx *dev)
if (err)
goto err_out_stop;
+ em28xx_register_i2c_ir(dev);
+
+#if defined(CONFIG_MODULES) && defined(MODULE)
+ if (dev->board.has_ir_i2c)
+ request_module("ir-kbd-i2c");
+#endif
+ if (dev->board.has_snapshot_button)
+ em28xx_register_snapshot_button(dev);
+
return 0;
err_out_stop:
@@ -458,10 +599,12 @@ int em28xx_ir_init(struct em28xx *dev)
return err;
}
-int em28xx_ir_fini(struct em28xx *dev)
+static int em28xx_ir_fini(struct em28xx *dev)
{
struct em28xx_IR *ir = dev->ir;
+ em28xx_deregister_snapshot_button(dev);
+
/* skip detach on non attached boards */
if (!ir)
return 0;
@@ -475,89 +618,26 @@ int em28xx_ir_fini(struct em28xx *dev)
return 0;
}
-/**********************************************************
- Handle Webcam snapshot button
- **********************************************************/
+static struct em28xx_ops rc_ops = {
+ .id = EM28XX_RC,
+ .name = "Em28xx Input Extension",
+ .init = em28xx_ir_init,
+ .fini = em28xx_ir_fini,
+};
-static void em28xx_query_sbutton(struct work_struct *work)
+static int __init em28xx_rc_register(void)
{
- /* Poll the register and see if the button is depressed */
- struct em28xx *dev =
- container_of(work, struct em28xx, sbutton_query_work.work);
- int ret;
-
- ret = em28xx_read_reg(dev, EM28XX_R0C_USBSUSP);
-
- if (ret & EM28XX_R0C_USBSUSP_SNAPSHOT) {
- u8 cleared;
- /* Button is depressed, clear the register */
- cleared = ((u8) ret) & ~EM28XX_R0C_USBSUSP_SNAPSHOT;
- em28xx_write_regs(dev, EM28XX_R0C_USBSUSP, &cleared, 1);
-
- /* Not emulate the keypress */
- input_report_key(dev->sbutton_input_dev, EM28XX_SNAPSHOT_KEY,
- 1);
- /* Now unpress the key */
- input_report_key(dev->sbutton_input_dev, EM28XX_SNAPSHOT_KEY,
- 0);
- }
-
- /* Schedule next poll */
- schedule_delayed_work(&dev->sbutton_query_work,
- msecs_to_jiffies(EM28XX_SBUTTON_QUERY_INTERVAL));
+ return em28xx_register_extension(&rc_ops);
}
-void em28xx_register_snapshot_button(struct em28xx *dev)
+static void __exit em28xx_rc_unregister(void)
{
- struct input_dev *input_dev;
- int err;
-
- em28xx_info("Registering snapshot button...\n");
- input_dev = input_allocate_device();
- if (!input_dev) {
- em28xx_errdev("input_allocate_device failed\n");
- return;
- }
-
- usb_make_path(dev->udev, dev->snapshot_button_path,
- sizeof(dev->snapshot_button_path));
- strlcat(dev->snapshot_button_path, "/sbutton",
- sizeof(dev->snapshot_button_path));
- INIT_DELAYED_WORK(&dev->sbutton_query_work, em28xx_query_sbutton);
-
- input_dev->name = "em28xx snapshot button";
- input_dev->phys = dev->snapshot_button_path;
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
- set_bit(EM28XX_SNAPSHOT_KEY, input_dev->keybit);
- input_dev->keycodesize = 0;
- input_dev->keycodemax = 0;
- input_dev->id.bustype = BUS_USB;
- input_dev->id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
- input_dev->id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
- input_dev->id.version = 1;
- input_dev->dev.parent = &dev->udev->dev;
-
- err = input_register_device(input_dev);
- if (err) {
- em28xx_errdev("input_register_device failed\n");
- input_free_device(input_dev);
- return;
- }
-
- dev->sbutton_input_dev = input_dev;
- schedule_delayed_work(&dev->sbutton_query_work,
- msecs_to_jiffies(EM28XX_SBUTTON_QUERY_INTERVAL));
- return;
-
+ em28xx_unregister_extension(&rc_ops);
}
-void em28xx_deregister_snapshot_button(struct em28xx *dev)
-{
- if (dev->sbutton_input_dev != NULL) {
- em28xx_info("Deregistering snapshot button\n");
- cancel_delayed_work_sync(&dev->sbutton_query_work);
- input_unregister_device(dev->sbutton_input_dev);
- dev->sbutton_input_dev = NULL;
- }
- return;
-}
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_DESCRIPTION("Em28xx Input driver");
+
+module_init(em28xx_rc_register);
+module_exit(em28xx_rc_unregister);
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 324b695c0724..50f5f4fc2148 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -1305,9 +1305,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
if (0 == INPUT(i)->type)
return -EINVAL;
- dev->ctl_input = i;
-
- video_mux(dev, dev->ctl_input);
+ video_mux(dev, i);
return 0;
}
@@ -2262,6 +2260,7 @@ static int em28xx_v4l2_close(struct file *filp)
em28xx_release_resources(dev);
kfree(dev->alt_max_pkt_size);
kfree(dev);
+ kfree(fh);
return 0;
}
@@ -2286,7 +2285,6 @@ static int em28xx_v4l2_close(struct file *filp)
videobuf_mmap_free(&fh->vb_vbiq);
kfree(fh);
dev->users--;
- wake_up_interruptible_nr(&dev->open, 1);
return 0;
}
@@ -2497,6 +2495,10 @@ static struct video_device *em28xx_vdev_init(struct em28xx *dev,
vfd->release = video_device_release;
vfd->debug = video_debug;
vfd->lock = &dev->lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
snprintf(vfd->name, sizeof(vfd->name), "%s %s",
dev->name, type_name);
@@ -2518,7 +2520,6 @@ int em28xx_register_analog_devices(struct em28xx *dev)
dev->norm = em28xx_video_template.current_norm;
v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm);
dev->interlaced = EM28XX_INTERLACED_DEFAULT;
- dev->ctl_input = 0;
/* Analog specific initialization */
dev->format = &format[0];
@@ -2532,7 +2533,7 @@ int em28xx_register_analog_devices(struct em28xx *dev)
em28xx_set_video_format(dev, format[0].fourcc,
maxw, norm_maxh(dev));
- video_mux(dev, dev->ctl_input);
+ video_mux(dev, 0);
/* Audio defaults */
dev->mute = 1;
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 2868b19f8b54..8757523e6863 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -226,24 +226,10 @@ struct em28xx_usb_isoc_ctl {
/* isoc transfer buffers for digital mode */
struct em28xx_usb_isoc_bufs digital_bufs;
- /* Last buffer command and region */
- u8 cmd;
- int pos, size, pktsize;
-
- /* Last field: ODD or EVEN? */
- int field;
-
- /* Stores incomplete commands */
- u32 tmp_buf;
- int tmp_buf_len;
-
/* Stores already requested buffers */
struct em28xx_buffer *vid_buf;
struct em28xx_buffer *vbi_buf;
- /* Stores the number of received fields */
- int nfields;
-
/* isoc urb callback */
int (*isoc_copy) (struct em28xx *dev, struct urb *urb);
@@ -264,12 +250,10 @@ struct em28xx_buffer {
struct list_head frame;
int top_field;
- int receiving;
};
struct em28xx_dmaqueue {
struct list_head active;
- struct list_head queued;
wait_queue_head_t wq;
@@ -277,13 +261,6 @@ struct em28xx_dmaqueue {
int pos;
};
-/* io methods */
-enum em28xx_io_method {
- IO_NONE,
- IO_READ,
- IO_MMAP,
-};
-
/* inputs */
#define MAX_EM28XX_INPUT 4
@@ -467,6 +444,7 @@ enum em28xx_dev_state {
/* em28xx extensions */
#define EM28XX_AUDIO 0x10
#define EM28XX_DVB 0x20
+#define EM28XX_RC 0x30
/* em28xx resource types (used for res_get/res_lock etc */
#define EM28XX_RESOURCE_VIDEO 0x01
@@ -577,7 +555,6 @@ struct em28xx {
/* states */
enum em28xx_dev_state state;
- enum em28xx_io_method io;
/* vbi related state tracking */
int capture_type;
@@ -593,7 +570,6 @@ struct em28xx {
struct mutex ctrl_urb_lock; /* protects urb_buf */
/* spinlock_t queue_lock; */
struct list_head inqueue, outqueue;
- wait_queue_head_t open, wait_frame, wait_stream;
struct video_device *vbi_dev;
struct video_device *radio_dev;
@@ -695,6 +671,7 @@ int em28xx_init_isoc(struct em28xx *dev, enum em28xx_mode mode,
int max_packets, int num_bufs, int max_pkt_size,
int (*isoc_copy) (struct em28xx *dev, struct urb *urb));
void em28xx_uninit_isoc(struct em28xx *dev, enum em28xx_mode mode);
+void em28xx_stop_urbs(struct em28xx *dev);
int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev);
int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode);
int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio);
@@ -710,45 +687,12 @@ void em28xx_release_analog_resources(struct em28xx *dev);
/* Provided by em28xx-cards.c */
extern int em2800_variant_detect(struct usb_device *udev, int model);
-extern void em28xx_pre_card_setup(struct em28xx *dev);
-extern void em28xx_card_setup(struct em28xx *dev);
extern struct em28xx_board em28xx_boards[];
extern struct usb_device_id em28xx_id_table[];
extern const unsigned int em28xx_bcount;
-void em28xx_register_i2c_ir(struct em28xx *dev);
int em28xx_tuner_callback(void *ptr, int component, int command, int arg);
void em28xx_release_resources(struct em28xx *dev);
-/* Provided by em28xx-input.c */
-
-#ifdef CONFIG_VIDEO_EM28XX_RC
-
-int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw);
-int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw);
-int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
- u32 *ir_raw);
-int em28xx_get_key_winfast_usbii_deluxe(struct IR_i2c *ir, u32 *ir_key,
- u32 *ir_raw);
-void em28xx_register_snapshot_button(struct em28xx *dev);
-void em28xx_deregister_snapshot_button(struct em28xx *dev);
-
-int em28xx_ir_init(struct em28xx *dev);
-int em28xx_ir_fini(struct em28xx *dev);
-
-#else
-
-#define em28xx_get_key_terratec NULL
-#define em28xx_get_key_em_haup NULL
-#define em28xx_get_key_pinnacle_usb_grey NULL
-#define em28xx_get_key_winfast_usbii_deluxe NULL
-
-static inline void em28xx_register_snapshot_button(struct em28xx *dev) {}
-static inline void em28xx_deregister_snapshot_button(struct em28xx *dev) {}
-static inline int em28xx_ir_init(struct em28xx *dev) { return 0; }
-static inline int em28xx_ir_fini(struct em28xx *dev) { return 0; }
-
-#endif
-
/* Provided by em28xx-vbi.c */
extern struct videobuf_queue_ops em28xx_vbi_qops;
diff --git a/drivers/media/video/et61x251/Kconfig b/drivers/media/video/et61x251/Kconfig
deleted file mode 100644
index 87981b078fe6..000000000000
--- a/drivers/media/video/et61x251/Kconfig
+++ /dev/null
@@ -1,18 +0,0 @@
-config USB_ET61X251
- tristate "USB ET61X[12]51 PC Camera Controller support (DEPRECATED)"
- depends on VIDEO_V4L2
- default n
- ---help---
- This driver is DEPRECATED please use the gspca zc3xx module
- instead.
-
- Say Y here if you want support for cameras based on Etoms ET61X151
- or ET61X251 PC Camera Controllers.
-
- See <file:Documentation/video4linux/et61x251.txt> for more info.
-
- This driver uses the Video For Linux API. You must say Y or M to
- "Video For Linux" to use this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called et61x251.
diff --git a/drivers/media/video/et61x251/Makefile b/drivers/media/video/et61x251/Makefile
deleted file mode 100644
index 2ff4db9ec882..000000000000
--- a/drivers/media/video/et61x251/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-et61x251-objs := et61x251_core.o et61x251_tas5130d1b.o
-
-obj-$(CONFIG_USB_ET61X251) += et61x251.o
-
diff --git a/drivers/media/video/et61x251/et61x251.h b/drivers/media/video/et61x251/et61x251.h
deleted file mode 100644
index 337ded4a6388..000000000000
--- a/drivers/media/video/et61x251/et61x251.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/***************************************************************************
- * V4L2 driver for ET61X[12]51 PC Camera Controllers *
- * *
- * Copyright (C) 2006 by Luca Risolia <luca.risolia@studio.unibo.it> *
- * *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License as published by *
- * the Free Software Foundation; either version 2 of the License, or *
- * (at your option) any later version. *
- * *
- * This program is distributed in the hope that it will be useful, *
- * but WITHOUT ANY WARRANTY; without even the implied warranty of *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
- * GNU General Public License for more details. *
- * *
- * You should have received a copy of the GNU General Public License *
- * along with this program; if not, write to the Free Software *
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
- ***************************************************************************/
-
-#ifndef _ET61X251_H_
-#define _ET61X251_H_
-
-#include <linux/usb.h>
-#include <linux/videodev2.h>
-#include <media/v4l2-common.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/time.h>
-#include <linux/wait.h>
-#include <linux/types.h>
-#include <linux/param.h>
-#include <linux/rwsem.h>
-#include <linux/mutex.h>
-#include <linux/stddef.h>
-#include <linux/string.h>
-#include <linux/kref.h>
-
-#include "et61x251_sensor.h"
-
-/*****************************************************************************/
-
-#define ET61X251_DEBUG
-#define ET61X251_DEBUG_LEVEL 2
-#define ET61X251_MAX_DEVICES 64
-#define ET61X251_PRESERVE_IMGSCALE 0
-#define ET61X251_FORCE_MUNMAP 0
-#define ET61X251_MAX_FRAMES 32
-#define ET61X251_COMPRESSION_QUALITY 0
-#define ET61X251_URBS 2
-#define ET61X251_ISO_PACKETS 7
-#define ET61X251_ALTERNATE_SETTING 13
-#define ET61X251_URB_TIMEOUT msecs_to_jiffies(2 * ET61X251_ISO_PACKETS)
-#define ET61X251_CTRL_TIMEOUT 100
-#define ET61X251_FRAME_TIMEOUT 2
-
-/*****************************************************************************/
-
-static const struct usb_device_id et61x251_id_table[] = {
- { USB_DEVICE(0x102c, 0x6251), },
- { }
-};
-
-ET61X251_SENSOR_TABLE
-
-/*****************************************************************************/
-
-enum et61x251_frame_state {
- F_UNUSED,
- F_QUEUED,
- F_GRABBING,
- F_DONE,
- F_ERROR,
-};
-
-struct et61x251_frame_t {
- void* bufmem;
- struct v4l2_buffer buf;
- enum et61x251_frame_state state;
- struct list_head frame;
- unsigned long vma_use_count;
-};
-
-enum et61x251_dev_state {
- DEV_INITIALIZED = 0x01,
- DEV_DISCONNECTED = 0x02,
- DEV_MISCONFIGURED = 0x04,
-};
-
-enum et61x251_io_method {
- IO_NONE,
- IO_READ,
- IO_MMAP,
-};
-
-enum et61x251_stream_state {
- STREAM_OFF,
- STREAM_INTERRUPT,
- STREAM_ON,
-};
-
-struct et61x251_sysfs_attr {
- u8 reg, i2c_reg;
-};
-
-struct et61x251_module_param {
- u8 force_munmap;
- u16 frame_timeout;
-};
-
-static DEFINE_MUTEX(et61x251_sysfs_lock);
-static DECLARE_RWSEM(et61x251_dev_lock);
-
-struct et61x251_device {
- struct video_device* v4ldev;
-
- struct et61x251_sensor sensor;
-
- struct usb_device* usbdev;
- struct urb* urb[ET61X251_URBS];
- void* transfer_buffer[ET61X251_URBS];
- u8* control_buffer;
-
- struct et61x251_frame_t *frame_current, frame[ET61X251_MAX_FRAMES];
- struct list_head inqueue, outqueue;
- u32 frame_count, nbuffers, nreadbuffers;
-
- enum et61x251_io_method io;
- enum et61x251_stream_state stream;
-
- struct v4l2_jpegcompression compression;
-
- struct et61x251_sysfs_attr sysfs;
- struct et61x251_module_param module_param;
-
- struct kref kref;
- enum et61x251_dev_state state;
- u8 users;
-
- struct completion probe;
- struct mutex open_mutex, fileop_mutex;
- spinlock_t queue_lock;
- wait_queue_head_t wait_open, wait_frame, wait_stream;
-};
-
-/*****************************************************************************/
-
-struct et61x251_device*
-et61x251_match_id(struct et61x251_device* cam, const struct usb_device_id *id)
-{
- return usb_match_id(usb_ifnum_to_if(cam->usbdev, 0), id) ? cam : NULL;
-}
-
-
-void
-et61x251_attach_sensor(struct et61x251_device* cam,
- const struct et61x251_sensor* sensor)
-{
- memcpy(&cam->sensor, sensor, sizeof(struct et61x251_sensor));
-}
-
-/*****************************************************************************/
-
-#undef DBG
-#undef KDBG
-#ifdef ET61X251_DEBUG
-#define DBG(level, fmt, ...) \
-do { \
- if (debug >= (level)) { \
- if ((level) == 1) \
- dev_err(&cam->usbdev->dev, fmt "\n", \
- ##__VA_ARGS__); \
- else if ((level) == 2) \
- dev_info(&cam->usbdev->dev, fmt "\n", \
- ##__VA_ARGS__); \
- else if ((level) >= 3) \
- dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", \
- __FILE__, __func__, __LINE__, \
- ##__VA_ARGS__); \
- } \
-} while (0)
-#define KDBG(level, fmt, ...) \
-do { \
- if (debug >= (level)) { \
- if ((level) == 1 || (level) == 2) \
- pr_info(fmt "\n", ##__VA_ARGS__); \
- else if ((level) == 3) \
- pr_debug("[%s:%s:%d] " fmt "\n", \
- __FILE__, __func__, __LINE__, \
- ##__VA_ARGS__); \
- } \
-} while (0)
-#define V4LDBG(level, name, cmd) \
-do { \
- if (debug >= (level)) \
- v4l_print_ioctl(name, cmd); \
-} while (0)
-#else
-#define DBG(level, fmt, ...) do {;} while(0)
-#define KDBG(level, fmt, ...) do {;} while(0)
-#define V4LDBG(level, name, cmd) do {;} while(0)
-#endif
-
-#undef PDBG
-#define PDBG(fmt, ...) \
- dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", \
- __FILE__, __func__, __LINE__, ##__VA_ARGS__)
-
-#undef PDBGG
-#define PDBGG(fmt, args...) do {;} while (0) /* placeholder */
-
-#endif /* _ET61X251_H_ */
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
deleted file mode 100644
index 5539f09440ac..000000000000
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ /dev/null
@@ -1,2683 +0,0 @@
-/***************************************************************************
- * V4L2 driver for ET61X[12]51 PC Camera Controllers *
- * *
- * Copyright (C) 2006-2007 by Luca Risolia <luca.risolia@studio.unibo.it> *
- * *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License as published by *
- * the Free Software Foundation; either version 2 of the License, or *
- * (at your option) any later version. *
- * *
- * This program is distributed in the hope that it will be useful, *
- * but WITHOUT ANY WARRANTY; without even the implied warranty of *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
- * GNU General Public License for more details. *
- * *
- * You should have received a copy of the GNU General Public License *
- * along with this program; if not, write to the Free Software *
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
- ***************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/compiler.h>
-#include <linux/ioctl.h>
-#include <linux/poll.h>
-#include <linux/stat.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/page-flags.h>
-#include <media/v4l2-ioctl.h>
-#include <asm/byteorder.h>
-#include <asm/page.h>
-#include <asm/uaccess.h>
-
-#include "et61x251.h"
-
-/*****************************************************************************/
-
-#define ET61X251_MODULE_NAME "V4L2 driver for ET61X[12]51 " \
- "PC Camera Controllers"
-#define ET61X251_MODULE_AUTHOR "(C) 2006-2007 Luca Risolia"
-#define ET61X251_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>"
-#define ET61X251_MODULE_LICENSE "GPL"
-#define ET61X251_MODULE_VERSION "1.1.10"
-
-/*****************************************************************************/
-
-MODULE_DEVICE_TABLE(usb, et61x251_id_table);
-
-MODULE_AUTHOR(ET61X251_MODULE_AUTHOR " " ET61X251_AUTHOR_EMAIL);
-MODULE_DESCRIPTION(ET61X251_MODULE_NAME);
-MODULE_VERSION(ET61X251_MODULE_VERSION);
-MODULE_LICENSE(ET61X251_MODULE_LICENSE);
-
-static short video_nr[] = {[0 ... ET61X251_MAX_DEVICES-1] = -1};
-module_param_array(video_nr, short, NULL, 0444);
-MODULE_PARM_DESC(video_nr,
- "\n<-1|n[,...]> Specify V4L2 minor mode number."
- "\n -1 = use next available (default)"
- "\n n = use minor number n (integer >= 0)"
- "\nYou can specify up to "
- __MODULE_STRING(ET61X251_MAX_DEVICES) " cameras this way."
- "\nFor example:"
- "\nvideo_nr=-1,2,-1 would assign minor number 2 to"
- "\nthe second registered camera and use auto for the first"
- "\none and for every other camera."
- "\n");
-
-static bool force_munmap[] = {[0 ... ET61X251_MAX_DEVICES-1] =
- ET61X251_FORCE_MUNMAP};
-module_param_array(force_munmap, bool, NULL, 0444);
-MODULE_PARM_DESC(force_munmap,
- "\n<0|1[,...]> Force the application to unmap previously"
- "\nmapped buffer memory before calling any VIDIOC_S_CROP or"
- "\nVIDIOC_S_FMT ioctl's. Not all the applications support"
- "\nthis feature. This parameter is specific for each"
- "\ndetected camera."
- "\n 0 = do not force memory unmapping"
- "\n 1 = force memory unmapping (save memory)"
- "\nDefault value is "__MODULE_STRING(ET61X251_FORCE_MUNMAP)"."
- "\n");
-
-static unsigned int frame_timeout[] = {[0 ... ET61X251_MAX_DEVICES-1] =
- ET61X251_FRAME_TIMEOUT};
-module_param_array(frame_timeout, uint, NULL, 0644);
-MODULE_PARM_DESC(frame_timeout,
- "\n<n[,...]> Timeout for a video frame in seconds."
- "\nThis parameter is specific for each detected camera."
- "\nDefault value is "
- __MODULE_STRING(ET61X251_FRAME_TIMEOUT)"."
- "\n");
-
-#ifdef ET61X251_DEBUG
-static unsigned short debug = ET61X251_DEBUG_LEVEL;
-module_param(debug, ushort, 0644);
-MODULE_PARM_DESC(debug,
- "\n<n> Debugging information level, from 0 to 3:"
- "\n0 = none (use carefully)"
- "\n1 = critical errors"
- "\n2 = significant informations"
- "\n3 = more verbose messages"
- "\nLevel 3 is useful for testing only, when only "
- "one device is used."
- "\nDefault value is "__MODULE_STRING(ET61X251_DEBUG_LEVEL)"."
- "\n");
-#endif
-
-/*****************************************************************************/
-
-static u32
-et61x251_request_buffers(struct et61x251_device* cam, u32 count,
- enum et61x251_io_method io)
-{
- struct v4l2_pix_format* p = &(cam->sensor.pix_format);
- struct v4l2_rect* r = &(cam->sensor.cropcap.bounds);
- const size_t imagesize = cam->module_param.force_munmap ||
- io == IO_READ ?
- (p->width * p->height * p->priv) / 8 :
- (r->width * r->height * p->priv) / 8;
- void* buff = NULL;
- u32 i;
-
- if (count > ET61X251_MAX_FRAMES)
- count = ET61X251_MAX_FRAMES;
-
- cam->nbuffers = count;
- while (cam->nbuffers > 0) {
- if ((buff = vmalloc_32_user(cam->nbuffers *
- PAGE_ALIGN(imagesize))))
- break;
- cam->nbuffers--;
- }
-
- for (i = 0; i < cam->nbuffers; i++) {
- cam->frame[i].bufmem = buff + i*PAGE_ALIGN(imagesize);
- cam->frame[i].buf.index = i;
- cam->frame[i].buf.m.offset = i*PAGE_ALIGN(imagesize);
- cam->frame[i].buf.length = imagesize;
- cam->frame[i].buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- cam->frame[i].buf.sequence = 0;
- cam->frame[i].buf.field = V4L2_FIELD_NONE;
- cam->frame[i].buf.memory = V4L2_MEMORY_MMAP;
- cam->frame[i].buf.flags = 0;
- }
-
- return cam->nbuffers;
-}
-
-
-static void et61x251_release_buffers(struct et61x251_device* cam)
-{
- if (cam->nbuffers) {
- vfree(cam->frame[0].bufmem);
- cam->nbuffers = 0;
- }
- cam->frame_current = NULL;
-}
-
-
-static void et61x251_empty_framequeues(struct et61x251_device* cam)
-{
- u32 i;
-
- INIT_LIST_HEAD(&cam->inqueue);
- INIT_LIST_HEAD(&cam->outqueue);
-
- for (i = 0; i < ET61X251_MAX_FRAMES; i++) {
- cam->frame[i].state = F_UNUSED;
- cam->frame[i].buf.bytesused = 0;
- }
-}
-
-
-static void et61x251_requeue_outqueue(struct et61x251_device* cam)
-{
- struct et61x251_frame_t *i;
-
- list_for_each_entry(i, &cam->outqueue, frame) {
- i->state = F_QUEUED;
- list_add(&i->frame, &cam->inqueue);
- }
-
- INIT_LIST_HEAD(&cam->outqueue);
-}
-
-
-static void et61x251_queue_unusedframes(struct et61x251_device* cam)
-{
- unsigned long lock_flags;
- u32 i;
-
- for (i = 0; i < cam->nbuffers; i++)
- if (cam->frame[i].state == F_UNUSED) {
- cam->frame[i].state = F_QUEUED;
- spin_lock_irqsave(&cam->queue_lock, lock_flags);
- list_add_tail(&cam->frame[i].frame, &cam->inqueue);
- spin_unlock_irqrestore(&cam->queue_lock, lock_flags);
- }
-}
-
-/*****************************************************************************/
-
-int et61x251_write_reg(struct et61x251_device* cam, u8 value, u16 index)
-{
- struct usb_device* udev = cam->usbdev;
- u8* buff = cam->control_buffer;
- int res;
-
- *buff = value;
-
- res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41,
- 0, index, buff, 1, ET61X251_CTRL_TIMEOUT);
- if (res < 0) {
- DBG(3, "Failed to write a register (value 0x%02X, index "
- "0x%02X, error %d)", value, index, res);
- return -1;
- }
-
- return 0;
-}
-
-
-static int et61x251_read_reg(struct et61x251_device* cam, u16 index)
-{
- struct usb_device* udev = cam->usbdev;
- u8* buff = cam->control_buffer;
- int res;
-
- res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x00, 0xc1,
- 0, index, buff, 1, ET61X251_CTRL_TIMEOUT);
- if (res < 0)
- DBG(3, "Failed to read a register (index 0x%02X, error %d)",
- index, res);
-
- return (res >= 0) ? (int)(*buff) : -1;
-}
-
-
-static int
-et61x251_i2c_wait(struct et61x251_device* cam,
- const struct et61x251_sensor* sensor)
-{
- int i, r;
-
- for (i = 1; i <= 8; i++) {
- if (sensor->interface == ET61X251_I2C_3WIRES) {
- r = et61x251_read_reg(cam, 0x8e);
- if (!(r & 0x02) && (r >= 0))
- return 0;
- } else {
- r = et61x251_read_reg(cam, 0x8b);
- if (!(r & 0x01) && (r >= 0))
- return 0;
- }
- if (r < 0)
- return -EIO;
- udelay(8*8); /* minimum for sensors at 400kHz */
- }
-
- return -EBUSY;
-}
-
-
-int
-et61x251_i2c_raw_write(struct et61x251_device* cam, u8 n, u8 data1, u8 data2,
- u8 data3, u8 data4, u8 data5, u8 data6, u8 data7,
- u8 data8, u8 address)
-{
- struct usb_device* udev = cam->usbdev;
- u8* data = cam->control_buffer;
- int err = 0, res;
-
- data[0] = data2;
- data[1] = data3;
- data[2] = data4;
- data[3] = data5;
- data[4] = data6;
- data[5] = data7;
- data[6] = data8;
- res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41,
- 0, 0x81, data, n-1, ET61X251_CTRL_TIMEOUT);
- if (res < 0)
- err += res;
-
- data[0] = address;
- data[1] = cam->sensor.i2c_slave_id;
- data[2] = cam->sensor.rsta | 0x02 | (n << 4);
- res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41,
- 0, 0x88, data, 3, ET61X251_CTRL_TIMEOUT);
- if (res < 0)
- err += res;
-
- /* Start writing through the serial interface */
- data[0] = data1;
- res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41,
- 0, 0x80, data, 1, ET61X251_CTRL_TIMEOUT);
- if (res < 0)
- err += res;
-
- err += et61x251_i2c_wait(cam, &cam->sensor);
-
- if (err)
- DBG(3, "I2C raw write failed for %s image sensor",
- cam->sensor.name);
-
- PDBGG("I2C raw write: %u bytes, address = 0x%02X, data1 = 0x%02X, "
- "data2 = 0x%02X, data3 = 0x%02X, data4 = 0x%02X, data5 = 0x%02X,"
- " data6 = 0x%02X, data7 = 0x%02X, data8 = 0x%02X", n, address,
- data1, data2, data3, data4, data5, data6, data7, data8);
-
- return err ? -1 : 0;
-
-}
-
-
-/*****************************************************************************/
-
-static void et61x251_urb_complete(struct urb *urb)
-{
- struct et61x251_device* cam = urb->context;
- struct et61x251_frame_t** f;
- size_t imagesize;
- u8 i;
- int err = 0;
-
- if (urb->status == -ENOENT)
- return;
-
- f = &cam->frame_current;
-
- if (cam->stream == STREAM_INTERRUPT) {
- cam->stream = STREAM_OFF;
- if ((*f))
- (*f)->state = F_QUEUED;
- DBG(3, "Stream interrupted");
- wake_up(&cam->wait_stream);
- }
-
- if (cam->state & DEV_DISCONNECTED)
- return;
-
- if (cam->state & DEV_MISCONFIGURED) {
- wake_up_interruptible(&cam->wait_frame);
- return;
- }
-
- if (cam->stream == STREAM_OFF || list_empty(&cam->inqueue))
- goto resubmit_urb;
-
- if (!(*f))
- (*f) = list_entry(cam->inqueue.next, struct et61x251_frame_t,
- frame);
-
- imagesize = (cam->sensor.pix_format.width *
- cam->sensor.pix_format.height *
- cam->sensor.pix_format.priv) / 8;
-
- for (i = 0; i < urb->number_of_packets; i++) {
- unsigned int len, status;
- void *pos;
- u8* b1, * b2, sof;
- const u8 VOID_BYTES = 6;
- size_t imglen;
-
- len = urb->iso_frame_desc[i].actual_length;
- status = urb->iso_frame_desc[i].status;
- pos = urb->iso_frame_desc[i].offset + urb->transfer_buffer;
-
- if (status) {
- DBG(3, "Error in isochronous frame");
- (*f)->state = F_ERROR;
- continue;
- }
-
- b1 = pos++;
- b2 = pos++;
- sof = ((*b1 & 0x3f) == 63);
- imglen = ((*b1 & 0xc0) << 2) | *b2;
-
- PDBGG("Isochrnous frame: length %u, #%u i, image length %zu",
- len, i, imglen);
-
- if ((*f)->state == F_QUEUED || (*f)->state == F_ERROR)
-start_of_frame:
- if (sof) {
- (*f)->state = F_GRABBING;
- (*f)->buf.bytesused = 0;
- do_gettimeofday(&(*f)->buf.timestamp);
- pos += 22;
- DBG(3, "SOF detected: new video frame");
- }
-
- if ((*f)->state == F_GRABBING) {
- if (sof && (*f)->buf.bytesused) {
- if (cam->sensor.pix_format.pixelformat ==
- V4L2_PIX_FMT_ET61X251)
- goto end_of_frame;
- else {
- DBG(3, "Not expected SOF detected "
- "after %lu bytes",
- (unsigned long)(*f)->buf.bytesused);
- (*f)->state = F_ERROR;
- continue;
- }
- }
-
- if ((*f)->buf.bytesused + imglen > imagesize) {
- DBG(3, "Video frame size exceeded");
- (*f)->state = F_ERROR;
- continue;
- }
-
- pos += VOID_BYTES;
-
- memcpy((*f)->bufmem+(*f)->buf.bytesused, pos, imglen);
- (*f)->buf.bytesused += imglen;
-
- if ((*f)->buf.bytesused == imagesize) {
- u32 b;
-end_of_frame:
- b = (*f)->buf.bytesused;
- (*f)->state = F_DONE;
- (*f)->buf.sequence= ++cam->frame_count;
- spin_lock(&cam->queue_lock);
- list_move_tail(&(*f)->frame, &cam->outqueue);
- if (!list_empty(&cam->inqueue))
- (*f) = list_entry(cam->inqueue.next,
- struct et61x251_frame_t,
- frame);
- else
- (*f) = NULL;
- spin_unlock(&cam->queue_lock);
- DBG(3, "Video frame captured: : %lu bytes",
- (unsigned long)(b));
-
- if (!(*f))
- goto resubmit_urb;
-
- if (sof &&
- cam->sensor.pix_format.pixelformat ==
- V4L2_PIX_FMT_ET61X251)
- goto start_of_frame;
- }
- }
- }
-
-resubmit_urb:
- urb->dev = cam->usbdev;
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0 && err != -EPERM) {
- cam->state |= DEV_MISCONFIGURED;
- DBG(1, "usb_submit_urb() failed");
- }
-
- wake_up_interruptible(&cam->wait_frame);
-}
-
-
-static int et61x251_start_transfer(struct et61x251_device* cam)
-{
- struct usb_device *udev = cam->usbdev;
- struct urb* urb;
- struct usb_host_interface* altsetting = usb_altnum_to_altsetting(
- usb_ifnum_to_if(udev, 0),
- ET61X251_ALTERNATE_SETTING);
- const unsigned int psz = le16_to_cpu(altsetting->
- endpoint[0].desc.wMaxPacketSize);
- s8 i, j;
- int err = 0;
-
- for (i = 0; i < ET61X251_URBS; i++) {
- cam->transfer_buffer[i] = kzalloc(ET61X251_ISO_PACKETS * psz,
- GFP_KERNEL);
- if (!cam->transfer_buffer[i]) {
- err = -ENOMEM;
- DBG(1, "Not enough memory");
- goto free_buffers;
- }
- }
-
- for (i = 0; i < ET61X251_URBS; i++) {
- urb = usb_alloc_urb(ET61X251_ISO_PACKETS, GFP_KERNEL);
- cam->urb[i] = urb;
- if (!urb) {
- err = -ENOMEM;
- DBG(1, "usb_alloc_urb() failed");
- goto free_urbs;
- }
- urb->dev = udev;
- urb->context = cam;
- urb->pipe = usb_rcvisocpipe(udev, 1);
- urb->transfer_flags = URB_ISO_ASAP;
- urb->number_of_packets = ET61X251_ISO_PACKETS;
- urb->complete = et61x251_urb_complete;
- urb->transfer_buffer = cam->transfer_buffer[i];
- urb->transfer_buffer_length = psz * ET61X251_ISO_PACKETS;
- urb->interval = 1;
- for (j = 0; j < ET61X251_ISO_PACKETS; j++) {
- urb->iso_frame_desc[j].offset = psz * j;
- urb->iso_frame_desc[j].length = psz;
- }
- }
-
- err = et61x251_write_reg(cam, 0x01, 0x03);
- err = et61x251_write_reg(cam, 0x00, 0x03);
- err = et61x251_write_reg(cam, 0x08, 0x03);
- if (err) {
- err = -EIO;
- DBG(1, "I/O hardware error");
- goto free_urbs;
- }
-
- err = usb_set_interface(udev, 0, ET61X251_ALTERNATE_SETTING);
- if (err) {
- DBG(1, "usb_set_interface() failed");
- goto free_urbs;
- }
-
- cam->frame_current = NULL;
-
- for (i = 0; i < ET61X251_URBS; i++) {
- err = usb_submit_urb(cam->urb[i], GFP_KERNEL);
- if (err) {
- for (j = i-1; j >= 0; j--)
- usb_kill_urb(cam->urb[j]);
- DBG(1, "usb_submit_urb() failed, error %d", err);
- goto free_urbs;
- }
- }
-
- return 0;
-
-free_urbs:
- for (i = 0; (i < ET61X251_URBS) && cam->urb[i]; i++)
- usb_free_urb(cam->urb[i]);
-
-free_buffers:
- for (i = 0; (i < ET61X251_URBS) && cam->transfer_buffer[i]; i++)
- kfree(cam->transfer_buffer[i]);
-
- return err;
-}
-
-
-static int et61x251_stop_transfer(struct et61x251_device* cam)
-{
- struct usb_device *udev = cam->usbdev;
- s8 i;
- int err = 0;
-
- if (cam->state & DEV_DISCONNECTED)
- return 0;
-
- for (i = ET61X251_URBS-1; i >= 0; i--) {
- usb_kill_urb(cam->urb[i]);
- usb_free_urb(cam->urb[i]);
- kfree(cam->transfer_buffer[i]);
- }
-
- err = usb_set_interface(udev, 0, 0); /* 0 Mb/s */
- if (err)
- DBG(3, "usb_set_interface() failed");
-
- return err;
-}
-
-
-static int et61x251_stream_interrupt(struct et61x251_device* cam)
-{
- long timeout;
-
- cam->stream = STREAM_INTERRUPT;
- timeout = wait_event_timeout(cam->wait_stream,
- (cam->stream == STREAM_OFF) ||
- (cam->state & DEV_DISCONNECTED),
- ET61X251_URB_TIMEOUT);
- if (cam->state & DEV_DISCONNECTED)
- return -ENODEV;
- else if (cam->stream != STREAM_OFF) {
- cam->state |= DEV_MISCONFIGURED;
- DBG(1, "URB timeout reached. The camera is misconfigured. To "
- "use it, close and open %s again.",
- video_device_node_name(cam->v4ldev));
- return -EIO;
- }
-
- return 0;
-}
-
-/*****************************************************************************/
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-
-static int et61x251_i2c_try_read(struct et61x251_device* cam,
- const struct et61x251_sensor* sensor,
- u8 address)
-{
- struct usb_device* udev = cam->usbdev;
- u8* data = cam->control_buffer;
- int err = 0, res;
-
- data[0] = address;
- data[1] = cam->sensor.i2c_slave_id;
- data[2] = cam->sensor.rsta | 0x10;
- data[3] = !(et61x251_read_reg(cam, 0x8b) & 0x02);
- res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41,
- 0, 0x88, data, 4, ET61X251_CTRL_TIMEOUT);
- if (res < 0)
- err += res;
-
- err += et61x251_i2c_wait(cam, sensor);
-
- res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x00, 0xc1,
- 0, 0x80, data, 8, ET61X251_CTRL_TIMEOUT);
- if (res < 0)
- err += res;
-
- if (err)
- DBG(3, "I2C read failed for %s image sensor", sensor->name);
-
- PDBGG("I2C read: address 0x%02X, value: 0x%02X", address, data[0]);
-
- return err ? -1 : (int)data[0];
-}
-
-
-static int et61x251_i2c_try_write(struct et61x251_device* cam,
- const struct et61x251_sensor* sensor,
- u8 address, u8 value)
-{
- struct usb_device* udev = cam->usbdev;
- u8* data = cam->control_buffer;
- int err = 0, res;
-
- data[0] = address;
- data[1] = cam->sensor.i2c_slave_id;
- data[2] = cam->sensor.rsta | 0x12;
- res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41,
- 0, 0x88, data, 3, ET61X251_CTRL_TIMEOUT);
- if (res < 0)
- err += res;
-
- data[0] = value;
- res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41,
- 0, 0x80, data, 1, ET61X251_CTRL_TIMEOUT);
- if (res < 0)
- err += res;
-
- err += et61x251_i2c_wait(cam, sensor);
-
- if (err)
- DBG(3, "I2C write failed for %s image sensor", sensor->name);
-
- PDBGG("I2C write: address 0x%02X, value: 0x%02X", address, value);
-
- return err ? -1 : 0;
-}
-
-static int et61x251_i2c_read(struct et61x251_device* cam, u8 address)
-{
- return et61x251_i2c_try_read(cam, &cam->sensor, address);
-}
-
-static int et61x251_i2c_write(struct et61x251_device* cam,
- u8 address, u8 value)
-{
- return et61x251_i2c_try_write(cam, &cam->sensor, address, value);
-}
-
-static u8 et61x251_strtou8(const char* buff, size_t len, ssize_t* count)
-{
- char str[5];
- char* endp;
- unsigned long val;
-
- if (len < 4) {
- strncpy(str, buff, len);
- str[len] = '\0';
- } else {
- strncpy(str, buff, 4);
- str[4] = '\0';
- }
-
- val = simple_strtoul(str, &endp, 0);
-
- *count = 0;
- if (val <= 0xff)
- *count = (ssize_t)(endp - str);
- if ((*count) && (len == *count+1) && (buff[*count] == '\n'))
- *count += 1;
-
- return (u8)val;
-}
-
-/*
- NOTE 1: being inside one of the following methods implies that the v4l
- device exists for sure (see kobjects and reference counters)
- NOTE 2: buffers are PAGE_SIZE long
-*/
-
-static ssize_t et61x251_show_reg(struct device* cd,
- struct device_attribute *attr, char* buf)
-{
- struct et61x251_device* cam;
- ssize_t count;
-
- if (mutex_lock_interruptible(&et61x251_sysfs_lock))
- return -ERESTARTSYS;
-
- cam = video_get_drvdata(to_video_device(cd));
- if (!cam) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENODEV;
- }
-
- count = sprintf(buf, "%u\n", cam->sysfs.reg);
-
- mutex_unlock(&et61x251_sysfs_lock);
-
- return count;
-}
-
-
-static ssize_t
-et61x251_store_reg(struct device* cd,
- struct device_attribute *attr, const char* buf, size_t len)
-{
- struct et61x251_device* cam;
- u8 index;
- ssize_t count;
-
- if (mutex_lock_interruptible(&et61x251_sysfs_lock))
- return -ERESTARTSYS;
-
- cam = video_get_drvdata(to_video_device(cd));
- if (!cam) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENODEV;
- }
-
- index = et61x251_strtou8(buf, len, &count);
- if (index > 0x8e || !count) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -EINVAL;
- }
-
- cam->sysfs.reg = index;
-
- DBG(2, "Moved ET61X[12]51 register index to 0x%02X", cam->sysfs.reg);
- DBG(3, "Written bytes: %zd", count);
-
- mutex_unlock(&et61x251_sysfs_lock);
-
- return count;
-}
-
-
-static ssize_t et61x251_show_val(struct device* cd,
- struct device_attribute *attr, char* buf)
-{
- struct et61x251_device* cam;
- ssize_t count;
- int val;
-
- if (mutex_lock_interruptible(&et61x251_sysfs_lock))
- return -ERESTARTSYS;
-
- cam = video_get_drvdata(to_video_device(cd));
- if (!cam) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENODEV;
- }
-
- if ((val = et61x251_read_reg(cam, cam->sysfs.reg)) < 0) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -EIO;
- }
-
- count = sprintf(buf, "%d\n", val);
-
- DBG(3, "Read bytes: %zd", count);
-
- mutex_unlock(&et61x251_sysfs_lock);
-
- return count;
-}
-
-
-static ssize_t
-et61x251_store_val(struct device* cd, struct device_attribute *attr,
- const char* buf, size_t len)
-{
- struct et61x251_device* cam;
- u8 value;
- ssize_t count;
- int err;
-
- if (mutex_lock_interruptible(&et61x251_sysfs_lock))
- return -ERESTARTSYS;
-
- cam = video_get_drvdata(to_video_device(cd));
- if (!cam) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENODEV;
- }
-
- value = et61x251_strtou8(buf, len, &count);
- if (!count) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -EINVAL;
- }
-
- err = et61x251_write_reg(cam, value, cam->sysfs.reg);
- if (err) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -EIO;
- }
-
- DBG(2, "Written ET61X[12]51 reg. 0x%02X, val. 0x%02X",
- cam->sysfs.reg, value);
- DBG(3, "Written bytes: %zd", count);
-
- mutex_unlock(&et61x251_sysfs_lock);
-
- return count;
-}
-
-
-static ssize_t et61x251_show_i2c_reg(struct device* cd,
- struct device_attribute *attr, char* buf)
-{
- struct et61x251_device* cam;
- ssize_t count;
-
- if (mutex_lock_interruptible(&et61x251_sysfs_lock))
- return -ERESTARTSYS;
-
- cam = video_get_drvdata(to_video_device(cd));
- if (!cam) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENODEV;
- }
-
- count = sprintf(buf, "%u\n", cam->sysfs.i2c_reg);
-
- DBG(3, "Read bytes: %zd", count);
-
- mutex_unlock(&et61x251_sysfs_lock);
-
- return count;
-}
-
-
-static ssize_t
-et61x251_store_i2c_reg(struct device* cd, struct device_attribute *attr,
- const char* buf, size_t len)
-{
- struct et61x251_device* cam;
- u8 index;
- ssize_t count;
-
- if (mutex_lock_interruptible(&et61x251_sysfs_lock))
- return -ERESTARTSYS;
-
- cam = video_get_drvdata(to_video_device(cd));
- if (!cam) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENODEV;
- }
-
- index = et61x251_strtou8(buf, len, &count);
- if (!count) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -EINVAL;
- }
-
- cam->sysfs.i2c_reg = index;
-
- DBG(2, "Moved sensor register index to 0x%02X", cam->sysfs.i2c_reg);
- DBG(3, "Written bytes: %zd", count);
-
- mutex_unlock(&et61x251_sysfs_lock);
-
- return count;
-}
-
-
-static ssize_t et61x251_show_i2c_val(struct device* cd,
- struct device_attribute *attr, char* buf)
-{
- struct et61x251_device* cam;
- ssize_t count;
- int val;
-
- if (mutex_lock_interruptible(&et61x251_sysfs_lock))
- return -ERESTARTSYS;
-
- cam = video_get_drvdata(to_video_device(cd));
- if (!cam) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENODEV;
- }
-
- if (!(cam->sensor.sysfs_ops & ET61X251_I2C_READ)) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENOSYS;
- }
-
- if ((val = et61x251_i2c_read(cam, cam->sysfs.i2c_reg)) < 0) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -EIO;
- }
-
- count = sprintf(buf, "%d\n", val);
-
- DBG(3, "Read bytes: %zd", count);
-
- mutex_unlock(&et61x251_sysfs_lock);
-
- return count;
-}
-
-
-static ssize_t
-et61x251_store_i2c_val(struct device* cd, struct device_attribute *attr,
- const char* buf, size_t len)
-{
- struct et61x251_device* cam;
- u8 value;
- ssize_t count;
- int err;
-
- if (mutex_lock_interruptible(&et61x251_sysfs_lock))
- return -ERESTARTSYS;
-
- cam = video_get_drvdata(to_video_device(cd));
- if (!cam) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENODEV;
- }
-
- if (!(cam->sensor.sysfs_ops & ET61X251_I2C_READ)) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENOSYS;
- }
-
- value = et61x251_strtou8(buf, len, &count);
- if (!count) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -EINVAL;
- }
-
- err = et61x251_i2c_write(cam, cam->sysfs.i2c_reg, value);
- if (err) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -EIO;
- }
-
- DBG(2, "Written sensor reg. 0x%02X, val. 0x%02X",
- cam->sysfs.i2c_reg, value);
- DBG(3, "Written bytes: %zd", count);
-
- mutex_unlock(&et61x251_sysfs_lock);
-
- return count;
-}
-
-
-static DEVICE_ATTR(reg, S_IRUGO | S_IWUSR,
- et61x251_show_reg, et61x251_store_reg);
-static DEVICE_ATTR(val, S_IRUGO | S_IWUSR,
- et61x251_show_val, et61x251_store_val);
-static DEVICE_ATTR(i2c_reg, S_IRUGO | S_IWUSR,
- et61x251_show_i2c_reg, et61x251_store_i2c_reg);
-static DEVICE_ATTR(i2c_val, S_IRUGO | S_IWUSR,
- et61x251_show_i2c_val, et61x251_store_i2c_val);
-
-
-static int et61x251_create_sysfs(struct et61x251_device* cam)
-{
- struct device *classdev = &(cam->v4ldev->dev);
- int err = 0;
-
- if ((err = device_create_file(classdev, &dev_attr_reg)))
- goto err_out;
- if ((err = device_create_file(classdev, &dev_attr_val)))
- goto err_reg;
-
- if (cam->sensor.sysfs_ops) {
- if ((err = device_create_file(classdev, &dev_attr_i2c_reg)))
- goto err_val;
- if ((err = device_create_file(classdev, &dev_attr_i2c_val)))
- goto err_i2c_reg;
- }
-
-err_i2c_reg:
- if (cam->sensor.sysfs_ops)
- device_remove_file(classdev, &dev_attr_i2c_reg);
-err_val:
- device_remove_file(classdev, &dev_attr_val);
-err_reg:
- device_remove_file(classdev, &dev_attr_reg);
-err_out:
- return err;
-}
-#endif /* CONFIG_VIDEO_ADV_DEBUG */
-
-/*****************************************************************************/
-
-static int
-et61x251_set_pix_format(struct et61x251_device* cam,
- struct v4l2_pix_format* pix)
-{
- int r, err = 0;
-
- if ((r = et61x251_read_reg(cam, 0x12)) < 0)
- err += r;
- if (pix->pixelformat == V4L2_PIX_FMT_ET61X251)
- err += et61x251_write_reg(cam, r & 0xfd, 0x12);
- else
- err += et61x251_write_reg(cam, r | 0x02, 0x12);
-
- return err ? -EIO : 0;
-}
-
-
-static int
-et61x251_set_compression(struct et61x251_device* cam,
- struct v4l2_jpegcompression* compression)
-{
- int r, err = 0;
-
- if ((r = et61x251_read_reg(cam, 0x12)) < 0)
- err += r;
- if (compression->quality == 0)
- err += et61x251_write_reg(cam, r & 0xfb, 0x12);
- else
- err += et61x251_write_reg(cam, r | 0x04, 0x12);
-
- return err ? -EIO : 0;
-}
-
-
-static int et61x251_set_scale(struct et61x251_device* cam, u8 scale)
-{
- int r = 0, err = 0;
-
- r = et61x251_read_reg(cam, 0x12);
- if (r < 0)
- err += r;
-
- if (scale == 1)
- err += et61x251_write_reg(cam, r & ~0x01, 0x12);
- else if (scale == 2)
- err += et61x251_write_reg(cam, r | 0x01, 0x12);
-
- if (err)
- return -EIO;
-
- PDBGG("Scaling factor: %u", scale);
-
- return 0;
-}
-
-
-static int
-et61x251_set_crop(struct et61x251_device* cam, struct v4l2_rect* rect)
-{
- struct et61x251_sensor* s = &cam->sensor;
- u16 fmw_sx = (u16)(rect->left - s->cropcap.bounds.left +
- s->active_pixel.left),
- fmw_sy = (u16)(rect->top - s->cropcap.bounds.top +
- s->active_pixel.top),
- fmw_length = (u16)(rect->width),
- fmw_height = (u16)(rect->height);
- int err = 0;
-
- err += et61x251_write_reg(cam, fmw_sx & 0xff, 0x69);
- err += et61x251_write_reg(cam, fmw_sy & 0xff, 0x6a);
- err += et61x251_write_reg(cam, fmw_length & 0xff, 0x6b);
- err += et61x251_write_reg(cam, fmw_height & 0xff, 0x6c);
- err += et61x251_write_reg(cam, (fmw_sx >> 8) | ((fmw_sy & 0x300) >> 6)
- | ((fmw_length & 0x300) >> 4)
- | ((fmw_height & 0x300) >> 2), 0x6d);
- if (err)
- return -EIO;
-
- PDBGG("fmw_sx, fmw_sy, fmw_length, fmw_height: %u %u %u %u",
- fmw_sx, fmw_sy, fmw_length, fmw_height);
-
- return 0;
-}
-
-
-static int et61x251_init(struct et61x251_device* cam)
-{
- struct et61x251_sensor* s = &cam->sensor;
- struct v4l2_control ctrl;
- struct v4l2_queryctrl *qctrl;
- struct v4l2_rect* rect;
- u8 i = 0;
- int err = 0;
-
- if (!(cam->state & DEV_INITIALIZED)) {
- mutex_init(&cam->open_mutex);
- init_waitqueue_head(&cam->wait_open);
- qctrl = s->qctrl;
- rect = &(s->cropcap.defrect);
- cam->compression.quality = ET61X251_COMPRESSION_QUALITY;
- } else { /* use current values */
- qctrl = s->_qctrl;
- rect = &(s->_rect);
- }
-
- err += et61x251_set_scale(cam, rect->width / s->pix_format.width);
- err += et61x251_set_crop(cam, rect);
- if (err)
- return err;
-
- if (s->init) {
- err = s->init(cam);
- if (err) {
- DBG(3, "Sensor initialization failed");
- return err;
- }
- }
-
- err += et61x251_set_compression(cam, &cam->compression);
- err += et61x251_set_pix_format(cam, &s->pix_format);
- if (s->set_pix_format)
- err += s->set_pix_format(cam, &s->pix_format);
- if (err)
- return err;
-
- if (s->pix_format.pixelformat == V4L2_PIX_FMT_ET61X251)
- DBG(3, "Compressed video format is active, quality %d",
- cam->compression.quality);
- else
- DBG(3, "Uncompressed video format is active");
-
- if (s->set_crop)
- if ((err = s->set_crop(cam, rect))) {
- DBG(3, "set_crop() failed");
- return err;
- }
-
- if (s->set_ctrl) {
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
- if (s->qctrl[i].id != 0 &&
- !(s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)) {
- ctrl.id = s->qctrl[i].id;
- ctrl.value = qctrl[i].default_value;
- err = s->set_ctrl(cam, &ctrl);
- if (err) {
- DBG(3, "Set %s control failed",
- s->qctrl[i].name);
- return err;
- }
- DBG(3, "Image sensor supports '%s' control",
- s->qctrl[i].name);
- }
- }
-
- if (!(cam->state & DEV_INITIALIZED)) {
- mutex_init(&cam->fileop_mutex);
- spin_lock_init(&cam->queue_lock);
- init_waitqueue_head(&cam->wait_frame);
- init_waitqueue_head(&cam->wait_stream);
- cam->nreadbuffers = 2;
- memcpy(s->_qctrl, s->qctrl, sizeof(s->qctrl));
- memcpy(&(s->_rect), &(s->cropcap.defrect),
- sizeof(struct v4l2_rect));
- cam->state |= DEV_INITIALIZED;
- }
-
- DBG(2, "Initialization succeeded");
- return 0;
-}
-
-/*****************************************************************************/
-
-static void et61x251_release_resources(struct kref *kref)
-{
- struct et61x251_device *cam;
-
- mutex_lock(&et61x251_sysfs_lock);
-
- cam = container_of(kref, struct et61x251_device, kref);
-
- DBG(2, "V4L2 device %s deregistered",
- video_device_node_name(cam->v4ldev));
- video_set_drvdata(cam->v4ldev, NULL);
- video_unregister_device(cam->v4ldev);
- usb_put_dev(cam->usbdev);
- kfree(cam->control_buffer);
- kfree(cam);
-
- mutex_unlock(&et61x251_sysfs_lock);
-}
-
-
-static int et61x251_open(struct file *filp)
-{
- struct et61x251_device* cam;
- int err = 0;
-
- if (!down_read_trylock(&et61x251_dev_lock))
- return -ERESTARTSYS;
-
- cam = video_drvdata(filp);
-
- if (wait_for_completion_interruptible(&cam->probe)) {
- up_read(&et61x251_dev_lock);
- return -ERESTARTSYS;
- }
-
- kref_get(&cam->kref);
-
- if (mutex_lock_interruptible(&cam->open_mutex)) {
- kref_put(&cam->kref, et61x251_release_resources);
- up_read(&et61x251_dev_lock);
- return -ERESTARTSYS;
- }
-
- if (cam->state & DEV_DISCONNECTED) {
- DBG(1, "Device not present");
- err = -ENODEV;
- goto out;
- }
-
- if (cam->users) {
- DBG(2, "Device %s is already in use",
- video_device_node_name(cam->v4ldev));
- DBG(3, "Simultaneous opens are not supported");
- if ((filp->f_flags & O_NONBLOCK) ||
- (filp->f_flags & O_NDELAY)) {
- err = -EWOULDBLOCK;
- goto out;
- }
- DBG(2, "A blocking open() has been requested. Wait for the "
- "device to be released...");
- up_read(&et61x251_dev_lock);
- err = wait_event_interruptible_exclusive(cam->wait_open,
- (cam->state & DEV_DISCONNECTED)
- || !cam->users);
- down_read(&et61x251_dev_lock);
- if (err)
- goto out;
- if (cam->state & DEV_DISCONNECTED) {
- err = -ENODEV;
- goto out;
- }
- }
-
- if (cam->state & DEV_MISCONFIGURED) {
- err = et61x251_init(cam);
- if (err) {
- DBG(1, "Initialization failed again. "
- "I will retry on next open().");
- goto out;
- }
- cam->state &= ~DEV_MISCONFIGURED;
- }
-
- if ((err = et61x251_start_transfer(cam)))
- goto out;
-
- filp->private_data = cam;
- cam->users++;
- cam->io = IO_NONE;
- cam->stream = STREAM_OFF;
- cam->nbuffers = 0;
- cam->frame_count = 0;
- et61x251_empty_framequeues(cam);
-
- DBG(3, "Video device %s is open",
- video_device_node_name(cam->v4ldev));
-
-out:
- mutex_unlock(&cam->open_mutex);
- if (err)
- kref_put(&cam->kref, et61x251_release_resources);
- up_read(&et61x251_dev_lock);
- return err;
-}
-
-
-static int et61x251_release(struct file *filp)
-{
- struct et61x251_device* cam;
-
- down_write(&et61x251_dev_lock);
-
- cam = video_drvdata(filp);
-
- et61x251_stop_transfer(cam);
- et61x251_release_buffers(cam);
- cam->users--;
- wake_up_interruptible_nr(&cam->wait_open, 1);
-
- DBG(3, "Video device %s closed",
- video_device_node_name(cam->v4ldev));
-
- kref_put(&cam->kref, et61x251_release_resources);
-
- up_write(&et61x251_dev_lock);
-
- return 0;
-}
-
-
-static ssize_t
-et61x251_read(struct file* filp, char __user * buf,
- size_t count, loff_t* f_pos)
-{
- struct et61x251_device *cam = video_drvdata(filp);
- struct et61x251_frame_t* f, * i;
- unsigned long lock_flags;
- long timeout;
- int err = 0;
-
- if (mutex_lock_interruptible(&cam->fileop_mutex))
- return -ERESTARTSYS;
-
- if (cam->state & DEV_DISCONNECTED) {
- DBG(1, "Device not present");
- mutex_unlock(&cam->fileop_mutex);
- return -ENODEV;
- }
-
- if (cam->state & DEV_MISCONFIGURED) {
- DBG(1, "The camera is misconfigured. Close and open it "
- "again.");
- mutex_unlock(&cam->fileop_mutex);
- return -EIO;
- }
-
- if (cam->io == IO_MMAP) {
- DBG(3, "Close and open the device again to choose the read "
- "method");
- mutex_unlock(&cam->fileop_mutex);
- return -EBUSY;
- }
-
- if (cam->io == IO_NONE) {
- if (!et61x251_request_buffers(cam, cam->nreadbuffers,
- IO_READ)) {
- DBG(1, "read() failed, not enough memory");
- mutex_unlock(&cam->fileop_mutex);
- return -ENOMEM;
- }
- cam->io = IO_READ;
- cam->stream = STREAM_ON;
- }
-
- if (list_empty(&cam->inqueue)) {
- if (!list_empty(&cam->outqueue))
- et61x251_empty_framequeues(cam);
- et61x251_queue_unusedframes(cam);
- }
-
- if (!count) {
- mutex_unlock(&cam->fileop_mutex);
- return 0;
- }
-
- if (list_empty(&cam->outqueue)) {
- if (filp->f_flags & O_NONBLOCK) {
- mutex_unlock(&cam->fileop_mutex);
- return -EAGAIN;
- }
- timeout = wait_event_interruptible_timeout
- ( cam->wait_frame,
- (!list_empty(&cam->outqueue)) ||
- (cam->state & DEV_DISCONNECTED) ||
- (cam->state & DEV_MISCONFIGURED),
- msecs_to_jiffies(
- cam->module_param.frame_timeout * 1000
- )
- );
- if (timeout < 0) {
- mutex_unlock(&cam->fileop_mutex);
- return timeout;
- }
- if (cam->state & DEV_DISCONNECTED) {
- mutex_unlock(&cam->fileop_mutex);
- return -ENODEV;
- }
- if (!timeout || (cam->state & DEV_MISCONFIGURED)) {
- mutex_unlock(&cam->fileop_mutex);
- return -EIO;
- }
- }
-
- f = list_entry(cam->outqueue.prev, struct et61x251_frame_t, frame);
-
- if (count > f->buf.bytesused)
- count = f->buf.bytesused;
-
- if (copy_to_user(buf, f->bufmem, count)) {
- err = -EFAULT;
- goto exit;
- }
- *f_pos += count;
-
-exit:
- spin_lock_irqsave(&cam->queue_lock, lock_flags);
- list_for_each_entry(i, &cam->outqueue, frame)
- i->state = F_UNUSED;
- INIT_LIST_HEAD(&cam->outqueue);
- spin_unlock_irqrestore(&cam->queue_lock, lock_flags);
-
- et61x251_queue_unusedframes(cam);
-
- PDBGG("Frame #%lu, bytes read: %zu",
- (unsigned long)f->buf.index, count);
-
- mutex_unlock(&cam->fileop_mutex);
-
- return err ? err : count;
-}
-
-
-static unsigned int et61x251_poll(struct file *filp, poll_table *wait)
-{
- struct et61x251_device *cam = video_drvdata(filp);
- struct et61x251_frame_t* f;
- unsigned long lock_flags;
- unsigned int mask = 0;
-
- if (mutex_lock_interruptible(&cam->fileop_mutex))
- return POLLERR;
-
- if (cam->state & DEV_DISCONNECTED) {
- DBG(1, "Device not present");
- goto error;
- }
-
- if (cam->state & DEV_MISCONFIGURED) {
- DBG(1, "The camera is misconfigured. Close and open it "
- "again.");
- goto error;
- }
-
- if (cam->io == IO_NONE) {
- if (!et61x251_request_buffers(cam, cam->nreadbuffers,
- IO_READ)) {
- DBG(1, "poll() failed, not enough memory");
- goto error;
- }
- cam->io = IO_READ;
- cam->stream = STREAM_ON;
- }
-
- if (cam->io == IO_READ) {
- spin_lock_irqsave(&cam->queue_lock, lock_flags);
- list_for_each_entry(f, &cam->outqueue, frame)
- f->state = F_UNUSED;
- INIT_LIST_HEAD(&cam->outqueue);
- spin_unlock_irqrestore(&cam->queue_lock, lock_flags);
- et61x251_queue_unusedframes(cam);
- }
-
- poll_wait(filp, &cam->wait_frame, wait);
-
- if (!list_empty(&cam->outqueue))
- mask |= POLLIN | POLLRDNORM;
-
- mutex_unlock(&cam->fileop_mutex);
-
- return mask;
-
-error:
- mutex_unlock(&cam->fileop_mutex);
- return POLLERR;
-}
-
-
-static void et61x251_vm_open(struct vm_area_struct* vma)
-{
- struct et61x251_frame_t* f = vma->vm_private_data;
- f->vma_use_count++;
-}
-
-
-static void et61x251_vm_close(struct vm_area_struct* vma)
-{
- /* NOTE: buffers are not freed here */
- struct et61x251_frame_t* f = vma->vm_private_data;
- f->vma_use_count--;
-}
-
-
-static const struct vm_operations_struct et61x251_vm_ops = {
- .open = et61x251_vm_open,
- .close = et61x251_vm_close,
-};
-
-
-static int et61x251_mmap(struct file* filp, struct vm_area_struct *vma)
-{
- struct et61x251_device *cam = video_drvdata(filp);
- unsigned long size = vma->vm_end - vma->vm_start,
- start = vma->vm_start;
- void *pos;
- u32 i;
-
- if (mutex_lock_interruptible(&cam->fileop_mutex))
- return -ERESTARTSYS;
-
- if (cam->state & DEV_DISCONNECTED) {
- DBG(1, "Device not present");
- mutex_unlock(&cam->fileop_mutex);
- return -ENODEV;
- }
-
- if (cam->state & DEV_MISCONFIGURED) {
- DBG(1, "The camera is misconfigured. Close and open it "
- "again.");
- mutex_unlock(&cam->fileop_mutex);
- return -EIO;
- }
-
- if (!(vma->vm_flags & (VM_WRITE | VM_READ))) {
- mutex_unlock(&cam->fileop_mutex);
- return -EACCES;
- }
-
- if (cam->io != IO_MMAP ||
- size != PAGE_ALIGN(cam->frame[0].buf.length)) {
- mutex_unlock(&cam->fileop_mutex);
- return -EINVAL;
- }
-
- for (i = 0; i < cam->nbuffers; i++) {
- if ((cam->frame[i].buf.m.offset>>PAGE_SHIFT) == vma->vm_pgoff)
- break;
- }
- if (i == cam->nbuffers) {
- mutex_unlock(&cam->fileop_mutex);
- return -EINVAL;
- }
-
- vma->vm_flags |= VM_IO;
- vma->vm_flags |= VM_RESERVED;
-
- pos = cam->frame[i].bufmem;
- while (size > 0) { /* size is page-aligned */
- if (vm_insert_page(vma, start, vmalloc_to_page(pos))) {
- mutex_unlock(&cam->fileop_mutex);
- return -EAGAIN;
- }
- start += PAGE_SIZE;
- pos += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
-
- vma->vm_ops = &et61x251_vm_ops;
- vma->vm_private_data = &cam->frame[i];
- et61x251_vm_open(vma);
-
- mutex_unlock(&cam->fileop_mutex);
-
- return 0;
-}
-
-/*****************************************************************************/
-
-static int
-et61x251_vidioc_querycap(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_capability cap = {
- .driver = "et61x251",
- .version = LINUX_VERSION_CODE,
- .capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING,
- };
-
- strlcpy(cap.card, cam->v4ldev->name, sizeof(cap.card));
- if (usb_make_path(cam->usbdev, cap.bus_info, sizeof(cap.bus_info)) < 0)
- strlcpy(cap.bus_info, dev_name(&cam->usbdev->dev),
- sizeof(cap.bus_info));
-
- if (copy_to_user(arg, &cap, sizeof(cap)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_enuminput(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_input i;
-
- if (copy_from_user(&i, arg, sizeof(i)))
- return -EFAULT;
-
- if (i.index)
- return -EINVAL;
-
- memset(&i, 0, sizeof(i));
- strcpy(i.name, "Camera");
- i.type = V4L2_INPUT_TYPE_CAMERA;
- i.capabilities = V4L2_IN_CAP_STD;
-
- if (copy_to_user(arg, &i, sizeof(i)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_g_input(struct et61x251_device* cam, void __user * arg)
-{
- int index = 0;
-
- if (copy_to_user(arg, &index, sizeof(index)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_s_input(struct et61x251_device* cam, void __user * arg)
-{
- int index;
-
- if (copy_from_user(&index, arg, sizeof(index)))
- return -EFAULT;
-
- if (index != 0)
- return -EINVAL;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_query_ctrl(struct et61x251_device* cam, void __user * arg)
-{
- struct et61x251_sensor* s = &cam->sensor;
- struct v4l2_queryctrl qc;
- u8 i;
-
- if (copy_from_user(&qc, arg, sizeof(qc)))
- return -EFAULT;
-
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
- if (qc.id && qc.id == s->qctrl[i].id) {
- memcpy(&qc, &(s->qctrl[i]), sizeof(qc));
- if (copy_to_user(arg, &qc, sizeof(qc)))
- return -EFAULT;
- return 0;
- }
-
- return -EINVAL;
-}
-
-
-static int
-et61x251_vidioc_g_ctrl(struct et61x251_device* cam, void __user * arg)
-{
- struct et61x251_sensor* s = &cam->sensor;
- struct v4l2_control ctrl;
- int err = 0;
- u8 i;
-
- if (!s->get_ctrl && !s->set_ctrl)
- return -EINVAL;
-
- if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
- return -EFAULT;
-
- if (!s->get_ctrl) {
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
- if (ctrl.id == s->qctrl[i].id) {
- ctrl.value = s->_qctrl[i].default_value;
- goto exit;
- }
- return -EINVAL;
- } else
- err = s->get_ctrl(cam, &ctrl);
-
-exit:
- if (copy_to_user(arg, &ctrl, sizeof(ctrl)))
- return -EFAULT;
-
- return err;
-}
-
-
-static int
-et61x251_vidioc_s_ctrl(struct et61x251_device* cam, void __user * arg)
-{
- struct et61x251_sensor* s = &cam->sensor;
- struct v4l2_control ctrl;
- u8 i;
- int err = 0;
-
- if (!s->set_ctrl)
- return -EINVAL;
-
- if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
- return -EFAULT;
-
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) {
- if (ctrl.id == s->qctrl[i].id) {
- if (s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)
- return -EINVAL;
- if (ctrl.value < s->qctrl[i].minimum ||
- ctrl.value > s->qctrl[i].maximum)
- return -ERANGE;
- ctrl.value -= ctrl.value % s->qctrl[i].step;
- break;
- }
- }
- if (i == ARRAY_SIZE(s->qctrl))
- return -EINVAL;
- if ((err = s->set_ctrl(cam, &ctrl)))
- return err;
-
- s->_qctrl[i].default_value = ctrl.value;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_cropcap(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_cropcap* cc = &(cam->sensor.cropcap);
-
- cc->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- cc->pixelaspect.numerator = 1;
- cc->pixelaspect.denominator = 1;
-
- if (copy_to_user(arg, cc, sizeof(*cc)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_g_crop(struct et61x251_device* cam, void __user * arg)
-{
- struct et61x251_sensor* s = &cam->sensor;
- struct v4l2_crop crop = {
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
- };
-
- memcpy(&(crop.c), &(s->_rect), sizeof(struct v4l2_rect));
-
- if (copy_to_user(arg, &crop, sizeof(crop)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_s_crop(struct et61x251_device* cam, void __user * arg)
-{
- struct et61x251_sensor* s = &cam->sensor;
- struct v4l2_crop crop;
- struct v4l2_rect* rect;
- struct v4l2_rect* bounds = &(s->cropcap.bounds);
- struct v4l2_pix_format* pix_format = &(s->pix_format);
- u8 scale;
- const enum et61x251_stream_state stream = cam->stream;
- const u32 nbuffers = cam->nbuffers;
- u32 i;
- int err = 0;
-
- if (copy_from_user(&crop, arg, sizeof(crop)))
- return -EFAULT;
-
- rect = &(crop.c);
-
- if (crop.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- if (cam->module_param.force_munmap)
- for (i = 0; i < cam->nbuffers; i++)
- if (cam->frame[i].vma_use_count) {
- DBG(3, "VIDIOC_S_CROP failed. "
- "Unmap the buffers first.");
- return -EBUSY;
- }
-
- /* Preserve R,G or B origin */
- rect->left = (s->_rect.left & 1L) ? rect->left | 1L : rect->left & ~1L;
- rect->top = (s->_rect.top & 1L) ? rect->top | 1L : rect->top & ~1L;
-
- if (rect->width < 16)
- rect->width = 16;
- if (rect->height < 16)
- rect->height = 16;
- if (rect->width > bounds->width)
- rect->width = bounds->width;
- if (rect->height > bounds->height)
- rect->height = bounds->height;
- if (rect->left < bounds->left)
- rect->left = bounds->left;
- if (rect->top < bounds->top)
- rect->top = bounds->top;
- if (rect->left + rect->width > bounds->left + bounds->width)
- rect->left = bounds->left+bounds->width - rect->width;
- if (rect->top + rect->height > bounds->top + bounds->height)
- rect->top = bounds->top+bounds->height - rect->height;
-
- rect->width &= ~15L;
- rect->height &= ~15L;
-
- if (ET61X251_PRESERVE_IMGSCALE) {
- /* Calculate the actual scaling factor */
- u32 a, b;
- a = rect->width * rect->height;
- b = pix_format->width * pix_format->height;
- scale = b ? (u8)((a / b) < 4 ? 1 : 2) : 1;
- } else
- scale = 1;
-
- if (cam->stream == STREAM_ON)
- if ((err = et61x251_stream_interrupt(cam)))
- return err;
-
- if (copy_to_user(arg, &crop, sizeof(crop))) {
- cam->stream = stream;
- return -EFAULT;
- }
-
- if (cam->module_param.force_munmap || cam->io == IO_READ)
- et61x251_release_buffers(cam);
-
- err = et61x251_set_crop(cam, rect);
- if (s->set_crop)
- err += s->set_crop(cam, rect);
- err += et61x251_set_scale(cam, scale);
-
- if (err) { /* atomic, no rollback in ioctl() */
- cam->state |= DEV_MISCONFIGURED;
- DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
- "use the camera, close and open %s again.",
- video_device_node_name(cam->v4ldev));
- return -EIO;
- }
-
- s->pix_format.width = rect->width/scale;
- s->pix_format.height = rect->height/scale;
- memcpy(&(s->_rect), rect, sizeof(*rect));
-
- if ((cam->module_param.force_munmap || cam->io == IO_READ) &&
- nbuffers != et61x251_request_buffers(cam, nbuffers, cam->io)) {
- cam->state |= DEV_MISCONFIGURED;
- DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
- "use the camera, close and open %s again.",
- video_device_node_name(cam->v4ldev));
- return -ENOMEM;
- }
-
- if (cam->io == IO_READ)
- et61x251_empty_framequeues(cam);
- else if (cam->module_param.force_munmap)
- et61x251_requeue_outqueue(cam);
-
- cam->stream = stream;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_enum_framesizes(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_frmsizeenum frmsize;
-
- if (copy_from_user(&frmsize, arg, sizeof(frmsize)))
- return -EFAULT;
-
- if (frmsize.index != 0)
- return -EINVAL;
-
- if (frmsize.pixel_format != V4L2_PIX_FMT_ET61X251 &&
- frmsize.pixel_format != V4L2_PIX_FMT_SBGGR8)
- return -EINVAL;
-
- frmsize.type = V4L2_FRMSIZE_TYPE_STEPWISE;
- frmsize.stepwise.min_width = frmsize.stepwise.step_width = 16;
- frmsize.stepwise.min_height = frmsize.stepwise.step_height = 16;
- frmsize.stepwise.max_width = cam->sensor.cropcap.bounds.width;
- frmsize.stepwise.max_height = cam->sensor.cropcap.bounds.height;
- memset(&frmsize.reserved, 0, sizeof(frmsize.reserved));
-
- if (copy_to_user(arg, &frmsize, sizeof(frmsize)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_enum_fmt(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_fmtdesc fmtd;
-
- if (copy_from_user(&fmtd, arg, sizeof(fmtd)))
- return -EFAULT;
-
- if (fmtd.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- if (fmtd.index == 0) {
- strcpy(fmtd.description, "bayer rgb");
- fmtd.pixelformat = V4L2_PIX_FMT_SBGGR8;
- } else if (fmtd.index == 1) {
- strcpy(fmtd.description, "compressed");
- fmtd.pixelformat = V4L2_PIX_FMT_ET61X251;
- fmtd.flags = V4L2_FMT_FLAG_COMPRESSED;
- } else
- return -EINVAL;
-
- fmtd.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- memset(&fmtd.reserved, 0, sizeof(fmtd.reserved));
-
- if (copy_to_user(arg, &fmtd, sizeof(fmtd)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_g_fmt(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_format format;
- struct v4l2_pix_format* pfmt = &(cam->sensor.pix_format);
-
- if (copy_from_user(&format, arg, sizeof(format)))
- return -EFAULT;
-
- if (format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- pfmt->colorspace = (pfmt->pixelformat == V4L2_PIX_FMT_ET61X251) ?
- 0 : V4L2_COLORSPACE_SRGB;
- pfmt->bytesperline = (pfmt->pixelformat==V4L2_PIX_FMT_ET61X251)
- ? 0 : (pfmt->width * pfmt->priv) / 8;
- pfmt->sizeimage = pfmt->height * ((pfmt->width*pfmt->priv)/8);
- pfmt->field = V4L2_FIELD_NONE;
- memcpy(&(format.fmt.pix), pfmt, sizeof(*pfmt));
-
- if (copy_to_user(arg, &format, sizeof(format)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_try_s_fmt(struct et61x251_device* cam, unsigned int cmd,
- void __user * arg)
-{
- struct et61x251_sensor* s = &cam->sensor;
- struct v4l2_format format;
- struct v4l2_pix_format* pix;
- struct v4l2_pix_format* pfmt = &(s->pix_format);
- struct v4l2_rect* bounds = &(s->cropcap.bounds);
- struct v4l2_rect rect;
- u8 scale;
- const enum et61x251_stream_state stream = cam->stream;
- const u32 nbuffers = cam->nbuffers;
- u32 i;
- int err = 0;
-
- if (copy_from_user(&format, arg, sizeof(format)))
- return -EFAULT;
-
- pix = &(format.fmt.pix);
-
- if (format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- memcpy(&rect, &(s->_rect), sizeof(rect));
-
- { /* calculate the actual scaling factor */
- u32 a, b;
- a = rect.width * rect.height;
- b = pix->width * pix->height;
- scale = b ? (u8)((a / b) < 4 ? 1 : 2) : 1;
- }
-
- rect.width = scale * pix->width;
- rect.height = scale * pix->height;
-
- if (rect.width < 16)
- rect.width = 16;
- if (rect.height < 16)
- rect.height = 16;
- if (rect.width > bounds->left + bounds->width - rect.left)
- rect.width = bounds->left + bounds->width - rect.left;
- if (rect.height > bounds->top + bounds->height - rect.top)
- rect.height = bounds->top + bounds->height - rect.top;
-
- rect.width &= ~15L;
- rect.height &= ~15L;
-
- { /* adjust the scaling factor */
- u32 a, b;
- a = rect.width * rect.height;
- b = pix->width * pix->height;
- scale = b ? (u8)((a / b) < 4 ? 1 : 2) : 1;
- }
-
- pix->width = rect.width / scale;
- pix->height = rect.height / scale;
-
- if (pix->pixelformat != V4L2_PIX_FMT_ET61X251 &&
- pix->pixelformat != V4L2_PIX_FMT_SBGGR8)
- pix->pixelformat = pfmt->pixelformat;
- pix->priv = pfmt->priv; /* bpp */
- pix->colorspace = (pix->pixelformat == V4L2_PIX_FMT_ET61X251) ?
- 0 : V4L2_COLORSPACE_SRGB;
- pix->colorspace = pfmt->colorspace;
- pix->bytesperline = (pix->pixelformat == V4L2_PIX_FMT_ET61X251)
- ? 0 : (pix->width * pix->priv) / 8;
- pix->sizeimage = pix->height * ((pix->width * pix->priv) / 8);
- pix->field = V4L2_FIELD_NONE;
-
- if (cmd == VIDIOC_TRY_FMT) {
- if (copy_to_user(arg, &format, sizeof(format)))
- return -EFAULT;
- return 0;
- }
-
- if (cam->module_param.force_munmap)
- for (i = 0; i < cam->nbuffers; i++)
- if (cam->frame[i].vma_use_count) {
- DBG(3, "VIDIOC_S_FMT failed. "
- "Unmap the buffers first.");
- return -EBUSY;
- }
-
- if (cam->stream == STREAM_ON)
- if ((err = et61x251_stream_interrupt(cam)))
- return err;
-
- if (copy_to_user(arg, &format, sizeof(format))) {
- cam->stream = stream;
- return -EFAULT;
- }
-
- if (cam->module_param.force_munmap || cam->io == IO_READ)
- et61x251_release_buffers(cam);
-
- err += et61x251_set_pix_format(cam, pix);
- err += et61x251_set_crop(cam, &rect);
- if (s->set_pix_format)
- err += s->set_pix_format(cam, pix);
- if (s->set_crop)
- err += s->set_crop(cam, &rect);
- err += et61x251_set_scale(cam, scale);
-
- if (err) { /* atomic, no rollback in ioctl() */
- cam->state |= DEV_MISCONFIGURED;
- DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
- "use the camera, close and open %s again.",
- video_device_node_name(cam->v4ldev));
- return -EIO;
- }
-
- memcpy(pfmt, pix, sizeof(*pix));
- memcpy(&(s->_rect), &rect, sizeof(rect));
-
- if ((cam->module_param.force_munmap || cam->io == IO_READ) &&
- nbuffers != et61x251_request_buffers(cam, nbuffers, cam->io)) {
- cam->state |= DEV_MISCONFIGURED;
- DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
- "use the camera, close and open %s again.",
- video_device_node_name(cam->v4ldev));
- return -ENOMEM;
- }
-
- if (cam->io == IO_READ)
- et61x251_empty_framequeues(cam);
- else if (cam->module_param.force_munmap)
- et61x251_requeue_outqueue(cam);
-
- cam->stream = stream;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_g_jpegcomp(struct et61x251_device* cam, void __user * arg)
-{
- if (copy_to_user(arg, &cam->compression,
- sizeof(cam->compression)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_s_jpegcomp(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_jpegcompression jc;
- const enum et61x251_stream_state stream = cam->stream;
- int err = 0;
-
- if (copy_from_user(&jc, arg, sizeof(jc)))
- return -EFAULT;
-
- if (jc.quality != 0 && jc.quality != 1)
- return -EINVAL;
-
- if (cam->stream == STREAM_ON)
- if ((err = et61x251_stream_interrupt(cam)))
- return err;
-
- err += et61x251_set_compression(cam, &jc);
- if (err) { /* atomic, no rollback in ioctl() */
- cam->state |= DEV_MISCONFIGURED;
- DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
- "problems. To use the camera, close and open "
- "%s again.", video_device_node_name(cam->v4ldev));
- return -EIO;
- }
-
- cam->compression.quality = jc.quality;
-
- cam->stream = stream;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_reqbufs(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_requestbuffers rb;
- u32 i;
- int err;
-
- if (copy_from_user(&rb, arg, sizeof(rb)))
- return -EFAULT;
-
- if (rb.type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- rb.memory != V4L2_MEMORY_MMAP)
- return -EINVAL;
-
- if (cam->io == IO_READ) {
- DBG(3, "Close and open the device again to choose the mmap "
- "I/O method");
- return -EBUSY;
- }
-
- for (i = 0; i < cam->nbuffers; i++)
- if (cam->frame[i].vma_use_count) {
- DBG(3, "VIDIOC_REQBUFS failed. "
- "Previous buffers are still mapped.");
- return -EBUSY;
- }
-
- if (cam->stream == STREAM_ON)
- if ((err = et61x251_stream_interrupt(cam)))
- return err;
-
- et61x251_empty_framequeues(cam);
-
- et61x251_release_buffers(cam);
- if (rb.count)
- rb.count = et61x251_request_buffers(cam, rb.count, IO_MMAP);
-
- if (copy_to_user(arg, &rb, sizeof(rb))) {
- et61x251_release_buffers(cam);
- cam->io = IO_NONE;
- return -EFAULT;
- }
-
- cam->io = rb.count ? IO_MMAP : IO_NONE;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_querybuf(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_buffer b;
-
- if (copy_from_user(&b, arg, sizeof(b)))
- return -EFAULT;
-
- if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- b.index >= cam->nbuffers || cam->io != IO_MMAP)
- return -EINVAL;
-
- memcpy(&b, &cam->frame[b.index].buf, sizeof(b));
-
- if (cam->frame[b.index].vma_use_count)
- b.flags |= V4L2_BUF_FLAG_MAPPED;
-
- if (cam->frame[b.index].state == F_DONE)
- b.flags |= V4L2_BUF_FLAG_DONE;
- else if (cam->frame[b.index].state != F_UNUSED)
- b.flags |= V4L2_BUF_FLAG_QUEUED;
-
- if (copy_to_user(arg, &b, sizeof(b)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_qbuf(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_buffer b;
- unsigned long lock_flags;
-
- if (copy_from_user(&b, arg, sizeof(b)))
- return -EFAULT;
-
- if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- b.index >= cam->nbuffers || cam->io != IO_MMAP)
- return -EINVAL;
-
- if (cam->frame[b.index].state != F_UNUSED)
- return -EINVAL;
-
- cam->frame[b.index].state = F_QUEUED;
-
- spin_lock_irqsave(&cam->queue_lock, lock_flags);
- list_add_tail(&cam->frame[b.index].frame, &cam->inqueue);
- spin_unlock_irqrestore(&cam->queue_lock, lock_flags);
-
- PDBGG("Frame #%lu queued", (unsigned long)b.index);
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_dqbuf(struct et61x251_device* cam, struct file* filp,
- void __user * arg)
-{
- struct v4l2_buffer b;
- struct et61x251_frame_t *f;
- unsigned long lock_flags;
- long timeout;
-
- if (copy_from_user(&b, arg, sizeof(b)))
- return -EFAULT;
-
- if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io!= IO_MMAP)
- return -EINVAL;
-
- if (list_empty(&cam->outqueue)) {
- if (cam->stream == STREAM_OFF)
- return -EINVAL;
- if (filp->f_flags & O_NONBLOCK)
- return -EAGAIN;
- timeout = wait_event_interruptible_timeout
- ( cam->wait_frame,
- (!list_empty(&cam->outqueue)) ||
- (cam->state & DEV_DISCONNECTED) ||
- (cam->state & DEV_MISCONFIGURED),
- cam->module_param.frame_timeout *
- 1000 * msecs_to_jiffies(1) );
- if (timeout < 0)
- return timeout;
- if (cam->state & DEV_DISCONNECTED)
- return -ENODEV;
- if (!timeout || (cam->state & DEV_MISCONFIGURED))
- return -EIO;
- }
-
- spin_lock_irqsave(&cam->queue_lock, lock_flags);
- f = list_entry(cam->outqueue.next, struct et61x251_frame_t, frame);
- list_del(cam->outqueue.next);
- spin_unlock_irqrestore(&cam->queue_lock, lock_flags);
-
- f->state = F_UNUSED;
-
- memcpy(&b, &f->buf, sizeof(b));
- if (f->vma_use_count)
- b.flags |= V4L2_BUF_FLAG_MAPPED;
-
- if (copy_to_user(arg, &b, sizeof(b)))
- return -EFAULT;
-
- PDBGG("Frame #%lu dequeued", (unsigned long)f->buf.index);
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_streamon(struct et61x251_device* cam, void __user * arg)
-{
- int type;
-
- if (copy_from_user(&type, arg, sizeof(type)))
- return -EFAULT;
-
- if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP)
- return -EINVAL;
-
- cam->stream = STREAM_ON;
-
- DBG(3, "Stream on");
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_streamoff(struct et61x251_device* cam, void __user * arg)
-{
- int type, err;
-
- if (copy_from_user(&type, arg, sizeof(type)))
- return -EFAULT;
-
- if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP)
- return -EINVAL;
-
- if (cam->stream == STREAM_ON)
- if ((err = et61x251_stream_interrupt(cam)))
- return err;
-
- et61x251_empty_framequeues(cam);
-
- DBG(3, "Stream off");
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_g_parm(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_streamparm sp;
-
- if (copy_from_user(&sp, arg, sizeof(sp)))
- return -EFAULT;
-
- if (sp.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- sp.parm.capture.extendedmode = 0;
- sp.parm.capture.readbuffers = cam->nreadbuffers;
-
- if (copy_to_user(arg, &sp, sizeof(sp)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_s_parm(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_streamparm sp;
-
- if (copy_from_user(&sp, arg, sizeof(sp)))
- return -EFAULT;
-
- if (sp.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- sp.parm.capture.extendedmode = 0;
-
- if (sp.parm.capture.readbuffers == 0)
- sp.parm.capture.readbuffers = cam->nreadbuffers;
-
- if (sp.parm.capture.readbuffers > ET61X251_MAX_FRAMES)
- sp.parm.capture.readbuffers = ET61X251_MAX_FRAMES;
-
- if (copy_to_user(arg, &sp, sizeof(sp)))
- return -EFAULT;
-
- cam->nreadbuffers = sp.parm.capture.readbuffers;
-
- return 0;
-}
-
-
-static long et61x251_ioctl_v4l2(struct file *filp,
- unsigned int cmd, void __user *arg)
-{
- struct et61x251_device *cam = video_drvdata(filp);
-
- switch (cmd) {
-
- case VIDIOC_QUERYCAP:
- return et61x251_vidioc_querycap(cam, arg);
-
- case VIDIOC_ENUMINPUT:
- return et61x251_vidioc_enuminput(cam, arg);
-
- case VIDIOC_G_INPUT:
- return et61x251_vidioc_g_input(cam, arg);
-
- case VIDIOC_S_INPUT:
- return et61x251_vidioc_s_input(cam, arg);
-
- case VIDIOC_QUERYCTRL:
- return et61x251_vidioc_query_ctrl(cam, arg);
-
- case VIDIOC_G_CTRL:
- return et61x251_vidioc_g_ctrl(cam, arg);
-
- case VIDIOC_S_CTRL:
- return et61x251_vidioc_s_ctrl(cam, arg);
-
- case VIDIOC_CROPCAP:
- return et61x251_vidioc_cropcap(cam, arg);
-
- case VIDIOC_G_CROP:
- return et61x251_vidioc_g_crop(cam, arg);
-
- case VIDIOC_S_CROP:
- return et61x251_vidioc_s_crop(cam, arg);
-
- case VIDIOC_ENUM_FMT:
- return et61x251_vidioc_enum_fmt(cam, arg);
-
- case VIDIOC_G_FMT:
- return et61x251_vidioc_g_fmt(cam, arg);
-
- case VIDIOC_TRY_FMT:
- case VIDIOC_S_FMT:
- return et61x251_vidioc_try_s_fmt(cam, cmd, arg);
-
- case VIDIOC_ENUM_FRAMESIZES:
- return et61x251_vidioc_enum_framesizes(cam, arg);
-
- case VIDIOC_G_JPEGCOMP:
- return et61x251_vidioc_g_jpegcomp(cam, arg);
-
- case VIDIOC_S_JPEGCOMP:
- return et61x251_vidioc_s_jpegcomp(cam, arg);
-
- case VIDIOC_REQBUFS:
- return et61x251_vidioc_reqbufs(cam, arg);
-
- case VIDIOC_QUERYBUF:
- return et61x251_vidioc_querybuf(cam, arg);
-
- case VIDIOC_QBUF:
- return et61x251_vidioc_qbuf(cam, arg);
-
- case VIDIOC_DQBUF:
- return et61x251_vidioc_dqbuf(cam, filp, arg);
-
- case VIDIOC_STREAMON:
- return et61x251_vidioc_streamon(cam, arg);
-
- case VIDIOC_STREAMOFF:
- return et61x251_vidioc_streamoff(cam, arg);
-
- case VIDIOC_G_PARM:
- return et61x251_vidioc_g_parm(cam, arg);
-
- case VIDIOC_S_PARM:
- return et61x251_vidioc_s_parm(cam, arg);
-
- default:
- return -ENOTTY;
-
- }
-}
-
-
-static long et61x251_ioctl(struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- struct et61x251_device *cam = video_drvdata(filp);
- long err = 0;
-
- if (mutex_lock_interruptible(&cam->fileop_mutex))
- return -ERESTARTSYS;
-
- if (cam->state & DEV_DISCONNECTED) {
- DBG(1, "Device not present");
- mutex_unlock(&cam->fileop_mutex);
- return -ENODEV;
- }
-
- if (cam->state & DEV_MISCONFIGURED) {
- DBG(1, "The camera is misconfigured. Close and open it "
- "again.");
- mutex_unlock(&cam->fileop_mutex);
- return -EIO;
- }
-
- V4LDBG(3, "et61x251", cmd);
-
- err = et61x251_ioctl_v4l2(filp, cmd, (void __user *)arg);
-
- mutex_unlock(&cam->fileop_mutex);
-
- return err;
-}
-
-
-static const struct v4l2_file_operations et61x251_fops = {
- .owner = THIS_MODULE,
- .open = et61x251_open,
- .release = et61x251_release,
- .unlocked_ioctl = et61x251_ioctl,
- .read = et61x251_read,
- .poll = et61x251_poll,
- .mmap = et61x251_mmap,
-};
-
-/*****************************************************************************/
-
-/* It exists a single interface only. We do not need to validate anything. */
-static int
-et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
-{
- struct usb_device *udev = interface_to_usbdev(intf);
- struct et61x251_device* cam;
- static unsigned int dev_nr;
- unsigned int i;
- int err = 0;
-
- if (!(cam = kzalloc(sizeof(struct et61x251_device), GFP_KERNEL)))
- return -ENOMEM;
-
- cam->usbdev = udev;
-
- if (!(cam->control_buffer = kzalloc(8, GFP_KERNEL))) {
- DBG(1, "kmalloc() failed");
- err = -ENOMEM;
- goto fail;
- }
-
- if (!(cam->v4ldev = video_device_alloc())) {
- DBG(1, "video_device_alloc() failed");
- err = -ENOMEM;
- goto fail;
- }
-
- DBG(2, "ET61X[12]51 PC Camera Controller detected "
- "(vid/pid 0x%04X:0x%04X)",id->idVendor, id->idProduct);
-
- for (i = 0; et61x251_sensor_table[i]; i++) {
- err = et61x251_sensor_table[i](cam);
- if (!err)
- break;
- }
-
- if (!err)
- DBG(2, "%s image sensor detected", cam->sensor.name);
- else {
- DBG(1, "No supported image sensor detected");
- err = -ENODEV;
- goto fail;
- }
-
- if (et61x251_init(cam)) {
- DBG(1, "Initialization failed. I will retry on open().");
- cam->state |= DEV_MISCONFIGURED;
- }
-
- strcpy(cam->v4ldev->name, "ET61X[12]51 PC Camera");
- cam->v4ldev->fops = &et61x251_fops;
- cam->v4ldev->release = video_device_release;
- cam->v4ldev->parent = &udev->dev;
- video_set_drvdata(cam->v4ldev, cam);
-
- init_completion(&cam->probe);
-
- err = video_register_device(cam->v4ldev, VFL_TYPE_GRABBER,
- video_nr[dev_nr]);
- if (err) {
- DBG(1, "V4L2 device registration failed");
- if (err == -ENFILE && video_nr[dev_nr] == -1)
- DBG(1, "Free /dev/videoX node not found");
- video_nr[dev_nr] = -1;
- dev_nr = (dev_nr < ET61X251_MAX_DEVICES-1) ? dev_nr+1 : 0;
- complete_all(&cam->probe);
- goto fail;
- }
-
- DBG(2, "V4L2 device registered as %s",
- video_device_node_name(cam->v4ldev));
-
- cam->module_param.force_munmap = force_munmap[dev_nr];
- cam->module_param.frame_timeout = frame_timeout[dev_nr];
-
- dev_nr = (dev_nr < ET61X251_MAX_DEVICES-1) ? dev_nr+1 : 0;
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- err = et61x251_create_sysfs(cam);
- if (!err)
- DBG(2, "Optional device control through 'sysfs' "
- "interface ready");
- else
- DBG(2, "Failed to create 'sysfs' interface for optional "
- "device controlling. Error #%d", err);
-#else
- DBG(2, "Optional device control through 'sysfs' interface disabled");
- DBG(3, "Compile the kernel with the 'CONFIG_VIDEO_ADV_DEBUG' "
- "configuration option to enable it.");
-#endif
-
- usb_set_intfdata(intf, cam);
- kref_init(&cam->kref);
- usb_get_dev(cam->usbdev);
-
- complete_all(&cam->probe);
-
- return 0;
-
-fail:
- if (cam) {
- kfree(cam->control_buffer);
- if (cam->v4ldev)
- video_device_release(cam->v4ldev);
- kfree(cam);
- }
- return err;
-}
-
-
-static void et61x251_usb_disconnect(struct usb_interface* intf)
-{
- struct et61x251_device* cam;
-
- down_write(&et61x251_dev_lock);
-
- cam = usb_get_intfdata(intf);
-
- DBG(2, "Disconnecting %s...", cam->v4ldev->name);
-
- if (cam->users) {
- DBG(2, "Device %s is open! Deregistration and memory "
- "deallocation are deferred.",
- video_device_node_name(cam->v4ldev));
- cam->state |= DEV_MISCONFIGURED;
- et61x251_stop_transfer(cam);
- cam->state |= DEV_DISCONNECTED;
- wake_up_interruptible(&cam->wait_frame);
- wake_up(&cam->wait_stream);
- } else
- cam->state |= DEV_DISCONNECTED;
-
- wake_up_interruptible_all(&cam->wait_open);
-
- kref_put(&cam->kref, et61x251_release_resources);
-
- up_write(&et61x251_dev_lock);
-}
-
-
-static struct usb_driver et61x251_usb_driver = {
- .name = "et61x251",
- .id_table = et61x251_id_table,
- .probe = et61x251_usb_probe,
- .disconnect = et61x251_usb_disconnect,
-};
-
-module_usb_driver(et61x251_usb_driver);
diff --git a/drivers/media/video/et61x251/et61x251_sensor.h b/drivers/media/video/et61x251/et61x251_sensor.h
deleted file mode 100644
index 71a03148cb09..000000000000
--- a/drivers/media/video/et61x251/et61x251_sensor.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/***************************************************************************
- * API for image sensors connected to ET61X[12]51 PC Camera Controllers *
- * *
- * Copyright (C) 2006-2007 by Luca Risolia <luca.risolia@studio.unibo.it> *
- * *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License as published by *
- * the Free Software Foundation; either version 2 of the License, or *
- * (at your option) any later version. *
- * *
- * This program is distributed in the hope that it will be useful, *
- * but WITHOUT ANY WARRANTY; without even the implied warranty of *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
- * GNU General Public License for more details. *
- * *
- * You should have received a copy of the GNU General Public License *
- * along with this program; if not, write to the Free Software *
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
- ***************************************************************************/
-
-#ifndef _ET61X251_SENSOR_H_
-#define _ET61X251_SENSOR_H_
-
-#include <linux/usb.h>
-#include <linux/videodev2.h>
-#include <linux/device.h>
-#include <linux/stddef.h>
-#include <linux/errno.h>
-#include <asm/types.h>
-
-struct et61x251_device;
-struct et61x251_sensor;
-
-/*****************************************************************************/
-
-extern int et61x251_probe_tas5130d1b(struct et61x251_device* cam);
-
-#define ET61X251_SENSOR_TABLE \
-/* Weak detections must go at the end of the list */ \
-static int (*et61x251_sensor_table[])(struct et61x251_device*) = { \
- &et61x251_probe_tas5130d1b, \
- NULL, \
-};
-
-extern struct et61x251_device*
-et61x251_match_id(struct et61x251_device* cam, const struct usb_device_id *id);
-
-extern void
-et61x251_attach_sensor(struct et61x251_device* cam,
- const struct et61x251_sensor* sensor);
-
-/*****************************************************************************/
-
-extern int et61x251_write_reg(struct et61x251_device*, u8 value, u16 index);
-extern int et61x251_i2c_raw_write(struct et61x251_device*, u8 n, u8 data1,
- u8 data2, u8 data3, u8 data4, u8 data5,
- u8 data6, u8 data7, u8 data8, u8 address);
-
-/*****************************************************************************/
-
-enum et61x251_i2c_sysfs_ops {
- ET61X251_I2C_READ = 0x01,
- ET61X251_I2C_WRITE = 0x02,
-};
-
-enum et61x251_i2c_interface {
- ET61X251_I2C_2WIRES,
- ET61X251_I2C_3WIRES,
-};
-
-/* Repeat start condition when RSTA is high */
-enum et61x251_i2c_rsta {
- ET61X251_I2C_RSTA_STOP = 0x00, /* stop then start */
- ET61X251_I2C_RSTA_REPEAT = 0x01, /* repeat start */
-};
-
-#define ET61X251_MAX_CTRLS (V4L2_CID_LASTP1-V4L2_CID_BASE+10)
-
-struct et61x251_sensor {
- char name[32];
-
- enum et61x251_i2c_sysfs_ops sysfs_ops;
-
- enum et61x251_i2c_interface interface;
- u8 i2c_slave_id;
- enum et61x251_i2c_rsta rsta;
- struct v4l2_rect active_pixel; /* left and top define FVSX and FVSY */
-
- struct v4l2_queryctrl qctrl[ET61X251_MAX_CTRLS];
- struct v4l2_cropcap cropcap;
- struct v4l2_pix_format pix_format;
-
- int (*init)(struct et61x251_device* cam);
- int (*get_ctrl)(struct et61x251_device* cam,
- struct v4l2_control* ctrl);
- int (*set_ctrl)(struct et61x251_device* cam,
- const struct v4l2_control* ctrl);
- int (*set_crop)(struct et61x251_device* cam,
- const struct v4l2_rect* rect);
- int (*set_pix_format)(struct et61x251_device* cam,
- const struct v4l2_pix_format* pix);
-
- /* Private */
- struct v4l2_queryctrl _qctrl[ET61X251_MAX_CTRLS];
- struct v4l2_rect _rect;
-};
-
-#endif /* _ET61X251_SENSOR_H_ */
diff --git a/drivers/media/video/et61x251/et61x251_tas5130d1b.c b/drivers/media/video/et61x251/et61x251_tas5130d1b.c
deleted file mode 100644
index ced2e167935d..000000000000
--- a/drivers/media/video/et61x251/et61x251_tas5130d1b.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/***************************************************************************
- * Plug-in for TAS5130D1B image sensor connected to the ET61X[12]51 *
- * PC Camera Controllers *
- * *
- * Copyright (C) 2006-2007 by Luca Risolia <luca.risolia@studio.unibo.it> *
- * *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License as published by *
- * the Free Software Foundation; either version 2 of the License, or *
- * (at your option) any later version. *
- * *
- * This program is distributed in the hope that it will be useful, *
- * but WITHOUT ANY WARRANTY; without even the implied warranty of *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
- * GNU General Public License for more details. *
- * *
- * You should have received a copy of the GNU General Public License *
- * along with this program; if not, write to the Free Software *
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
- ***************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include "et61x251_sensor.h"
-
-
-static int tas5130d1b_init(struct et61x251_device* cam)
-{
- int err = 0;
-
- err += et61x251_write_reg(cam, 0x14, 0x01);
- err += et61x251_write_reg(cam, 0x1b, 0x02);
- err += et61x251_write_reg(cam, 0x02, 0x12);
- err += et61x251_write_reg(cam, 0x0e, 0x60);
- err += et61x251_write_reg(cam, 0x80, 0x61);
- err += et61x251_write_reg(cam, 0xf0, 0x62);
- err += et61x251_write_reg(cam, 0x03, 0x63);
- err += et61x251_write_reg(cam, 0x14, 0x64);
- err += et61x251_write_reg(cam, 0xf4, 0x65);
- err += et61x251_write_reg(cam, 0x01, 0x66);
- err += et61x251_write_reg(cam, 0x05, 0x67);
- err += et61x251_write_reg(cam, 0x8f, 0x68);
- err += et61x251_write_reg(cam, 0x0f, 0x8d);
- err += et61x251_write_reg(cam, 0x08, 0x8e);
-
- return err;
-}
-
-
-static int tas5130d1b_set_ctrl(struct et61x251_device* cam,
- const struct v4l2_control* ctrl)
-{
- int err = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_GAIN:
- err += et61x251_i2c_raw_write(cam, 2, 0x20,
- 0xf6-ctrl->value, 0, 0, 0,
- 0, 0, 0, 0);
- break;
- case V4L2_CID_EXPOSURE:
- err += et61x251_i2c_raw_write(cam, 2, 0x40,
- 0x47-ctrl->value, 0, 0, 0,
- 0, 0, 0, 0);
- break;
- default:
- return -EINVAL;
- }
-
- return err ? -EIO : 0;
-}
-
-
-static const struct et61x251_sensor tas5130d1b = {
- .name = "TAS5130D1B",
- .interface = ET61X251_I2C_3WIRES,
- .rsta = ET61X251_I2C_RSTA_STOP,
- .active_pixel = {
- .left = 106,
- .top = 13,
- },
- .init = &tas5130d1b_init,
- .qctrl = {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "global gain",
- .minimum = 0x00,
- .maximum = 0xf6,
- .step = 0x02,
- .default_value = 0x0d,
- .flags = 0,
- },
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
- .minimum = 0x00,
- .maximum = 0x47,
- .step = 0x01,
- .default_value = 0x23,
- .flags = 0,
- },
- },
- .set_ctrl = &tas5130d1b_set_ctrl,
- .cropcap = {
- .bounds = {
- .left = 0,
- .top = 0,
- .width = 640,
- .height = 480,
- },
- .defrect = {
- .left = 0,
- .top = 0,
- .width = 640,
- .height = 480,
- },
- },
- .pix_format = {
- .width = 640,
- .height = 480,
- .pixelformat = V4L2_PIX_FMT_SBGGR8,
- .priv = 8,
- },
-};
-
-
-int et61x251_probe_tas5130d1b(struct et61x251_device* cam)
-{
- const struct usb_device_id tas5130d1b_id_table[] = {
- { USB_DEVICE(0x102c, 0x6251), },
- { }
- };
-
- /* Sensor detection is based on USB pid/vid */
- if (!et61x251_match_id(cam, tas5130d1b_id_table))
- return -ENODEV;
-
- et61x251_attach_sensor(cam, &tas5130d1b);
-
- return 0;
-}
diff --git a/drivers/media/video/fsl-viu.c b/drivers/media/video/fsl-viu.c
index 27e3e0c0b219..777486f7cadb 100644
--- a/drivers/media/video/fsl-viu.c
+++ b/drivers/media/video/fsl-viu.c
@@ -1544,6 +1544,10 @@ static int __devinit viu_of_probe(struct platform_device *op)
/* initialize locks */
mutex_init(&viu_dev->lock);
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &viu_dev->vdev->flags);
viu_dev->vdev->lock = &viu_dev->lock;
spin_lock_init(&viu_dev->slock);
diff --git a/drivers/media/video/gspca/Makefile b/drivers/media/video/gspca/Makefile
index 79ebe46e1ad7..c901da0bd657 100644
--- a/drivers/media/video/gspca/Makefile
+++ b/drivers/media/video/gspca/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_USB_GSPCA_VICAM) += gspca_vicam.o
obj-$(CONFIG_USB_GSPCA_XIRLINK_CIT) += gspca_xirlink_cit.o
obj-$(CONFIG_USB_GSPCA_ZC3XX) += gspca_zc3xx.o
-gspca_main-objs := gspca.o
+gspca_main-objs := gspca.o autogain_functions.o
gspca_benq-objs := benq.o
gspca_conex-objs := conex.o
gspca_cpia1-objs := cpia1.o
diff --git a/drivers/media/video/gspca/autogain_functions.c b/drivers/media/video/gspca/autogain_functions.c
new file mode 100644
index 000000000000..67db674bb044
--- /dev/null
+++ b/drivers/media/video/gspca/autogain_functions.c
@@ -0,0 +1,178 @@
+/*
+ * Functions for auto gain.
+ *
+ * Copyright (C) 2010-2012 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "gspca.h"
+
+/* auto gain and exposure algorithm based on the knee algorithm described here:
+ http://ytse.tricolour.net/docs/LowLightOptimization.html
+
+ Returns 0 if no changes were made, 1 if the gain and or exposure settings
+ where changed. */
+int gspca_expo_autogain(
+ struct gspca_dev *gspca_dev,
+ int avg_lum,
+ int desired_avg_lum,
+ int deadzone,
+ int gain_knee,
+ int exposure_knee)
+{
+ s32 gain, orig_gain, exposure, orig_exposure;
+ int i, steps, retval = 0;
+
+ if (v4l2_ctrl_g_ctrl(gspca_dev->autogain) == 0)
+ return 0;
+
+ orig_gain = gain = v4l2_ctrl_g_ctrl(gspca_dev->gain);
+ orig_exposure = exposure = v4l2_ctrl_g_ctrl(gspca_dev->exposure);
+
+ /* If we are of a multiple of deadzone, do multiple steps to reach the
+ desired lumination fast (with the risc of a slight overshoot) */
+ steps = abs(desired_avg_lum - avg_lum) / deadzone;
+
+ PDEBUG(D_FRAM, "autogain: lum: %d, desired: %d, steps: %d",
+ avg_lum, desired_avg_lum, steps);
+
+ for (i = 0; i < steps; i++) {
+ if (avg_lum > desired_avg_lum) {
+ if (gain > gain_knee)
+ gain--;
+ else if (exposure > exposure_knee)
+ exposure--;
+ else if (gain > gspca_dev->gain->default_value)
+ gain--;
+ else if (exposure > gspca_dev->exposure->minimum)
+ exposure--;
+ else if (gain > gspca_dev->gain->minimum)
+ gain--;
+ else
+ break;
+ } else {
+ if (gain < gspca_dev->gain->default_value)
+ gain++;
+ else if (exposure < exposure_knee)
+ exposure++;
+ else if (gain < gain_knee)
+ gain++;
+ else if (exposure < gspca_dev->exposure->maximum)
+ exposure++;
+ else if (gain < gspca_dev->gain->maximum)
+ gain++;
+ else
+ break;
+ }
+ }
+
+ if (gain != orig_gain) {
+ v4l2_ctrl_s_ctrl(gspca_dev->gain, gain);
+ retval = 1;
+ }
+ if (exposure != orig_exposure) {
+ v4l2_ctrl_s_ctrl(gspca_dev->exposure, exposure);
+ retval = 1;
+ }
+
+ if (retval)
+ PDEBUG(D_FRAM, "autogain: changed gain: %d, expo: %d",
+ gain, exposure);
+ return retval;
+}
+EXPORT_SYMBOL(gspca_expo_autogain);
+
+/* Autogain + exposure algorithm for cameras with a coarse exposure control
+ (usually this means we can only control the clockdiv to change exposure)
+ As changing the clockdiv so that the fps drops from 30 to 15 fps for
+ example, will lead to a huge exposure change (it effectively doubles),
+ this algorithm normally tries to only adjust the gain (between 40 and
+ 80 %) and if that does not help, only then changes exposure. This leads
+ to a much more stable image then using the knee algorithm which at
+ certain points of the knee graph will only try to adjust exposure,
+ which leads to oscilating as one exposure step is huge.
+
+ Returns 0 if no changes were made, 1 if the gain and or exposure settings
+ where changed. */
+int gspca_coarse_grained_expo_autogain(
+ struct gspca_dev *gspca_dev,
+ int avg_lum,
+ int desired_avg_lum,
+ int deadzone)
+{
+ s32 gain_low, gain_high, gain, orig_gain, exposure, orig_exposure;
+ int steps, retval = 0;
+
+ if (v4l2_ctrl_g_ctrl(gspca_dev->autogain) == 0)
+ return 0;
+
+ orig_gain = gain = v4l2_ctrl_g_ctrl(gspca_dev->gain);
+ orig_exposure = exposure = v4l2_ctrl_g_ctrl(gspca_dev->exposure);
+
+ gain_low = (gspca_dev->gain->maximum - gspca_dev->gain->minimum) /
+ 5 * 2 + gspca_dev->gain->minimum;
+ gain_high = (gspca_dev->gain->maximum - gspca_dev->gain->minimum) /
+ 5 * 4 + gspca_dev->gain->minimum;
+
+ /* If we are of a multiple of deadzone, do multiple steps to reach the
+ desired lumination fast (with the risc of a slight overshoot) */
+ steps = (desired_avg_lum - avg_lum) / deadzone;
+
+ PDEBUG(D_FRAM, "autogain: lum: %d, desired: %d, steps: %d",
+ avg_lum, desired_avg_lum, steps);
+
+ if ((gain + steps) > gain_high &&
+ exposure < gspca_dev->exposure->maximum) {
+ gain = gain_high;
+ gspca_dev->exp_too_low_cnt++;
+ gspca_dev->exp_too_high_cnt = 0;
+ } else if ((gain + steps) < gain_low &&
+ exposure > gspca_dev->exposure->minimum) {
+ gain = gain_low;
+ gspca_dev->exp_too_high_cnt++;
+ gspca_dev->exp_too_low_cnt = 0;
+ } else {
+ gain += steps;
+ if (gain > gspca_dev->gain->maximum)
+ gain = gspca_dev->gain->maximum;
+ else if (gain < gspca_dev->gain->minimum)
+ gain = gspca_dev->gain->minimum;
+ gspca_dev->exp_too_high_cnt = 0;
+ gspca_dev->exp_too_low_cnt = 0;
+ }
+
+ if (gspca_dev->exp_too_high_cnt > 3) {
+ exposure--;
+ gspca_dev->exp_too_high_cnt = 0;
+ } else if (gspca_dev->exp_too_low_cnt > 3) {
+ exposure++;
+ gspca_dev->exp_too_low_cnt = 0;
+ }
+
+ if (gain != orig_gain) {
+ v4l2_ctrl_s_ctrl(gspca_dev->gain, gain);
+ retval = 1;
+ }
+ if (exposure != orig_exposure) {
+ v4l2_ctrl_s_ctrl(gspca_dev->exposure, exposure);
+ retval = 1;
+ }
+
+ if (retval)
+ PDEBUG(D_FRAM, "autogain: changed gain: %d, expo: %d",
+ gain, exposure);
+ return retval;
+}
+EXPORT_SYMBOL(gspca_coarse_grained_expo_autogain);
diff --git a/drivers/media/video/gspca/autogain_functions.h b/drivers/media/video/gspca/autogain_functions.h
index 46777eee678b..d625eafe63eb 100644
--- a/drivers/media/video/gspca/autogain_functions.h
+++ b/drivers/media/video/gspca/autogain_functions.h
@@ -18,6 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#ifdef WANT_REGULAR_AUTOGAIN
/* auto gain and exposure algorithm based on the knee algorithm described here:
http://ytse.tricolour.net/docs/LowLightOptimization.html
@@ -91,7 +92,9 @@ static inline int auto_gain_n_exposure(
gain, exposure);
return retval;
}
+#endif
+#ifdef WANT_COARSE_EXPO_AUTOGAIN
/* Autogain + exposure algorithm for cameras with a coarse exposure control
(usually this means we can only control the clockdiv to change exposure)
As changing the clockdiv so that the fps drops from 30 to 15 fps for
@@ -103,7 +106,7 @@ static inline int auto_gain_n_exposure(
which leads to oscilating as one exposure step is huge.
Note this assumes that the sd struct for the cam in question has
- exp_too_high_cnt and exp_too_high_cnt int members for use by this function.
+ exp_too_low_cnt and exp_too_high_cnt int members for use by this function.
Returns 0 if no changes were made, 1 if the gain and or exposure settings
where changed. */
@@ -177,3 +180,4 @@ static inline int coarse_grained_expo_autogain(
gain, exposure);
return retval;
}
+#endif
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index ea17b5d94ea4..f39fee0fd10f 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -306,7 +306,7 @@ static void cx_sensor(struct gspca_dev*gspca_dev)
reg_w(gspca_dev, 0x0020, reg20, 8);
reg_w(gspca_dev, 0x0028, reg28, 8);
- reg_w(gspca_dev, 0x0010, reg10, 8);
+ reg_w(gspca_dev, 0x0010, reg10, 2);
reg_w_val(gspca_dev, 0x0092, 0x03);
switch (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) {
@@ -326,7 +326,7 @@ static void cx_sensor(struct gspca_dev*gspca_dev)
}
reg_w(gspca_dev, 0x007b, reg7b, 6);
reg_w_val(gspca_dev, 0x00f8, 0x00);
- reg_w(gspca_dev, 0x0010, reg10, 8);
+ reg_w(gspca_dev, 0x0010, reg10, 2);
reg_w_val(gspca_dev, 0x0098, 0x41);
for (i = 0; i < 11; i++) {
if (i == 3 || i == 5 || i == 8)
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index 0107513cd728..6e26c93b4656 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -94,7 +94,11 @@ static void dostream(struct work_struct *work)
/* loop reading a frame */
again:
- while (gspca_dev->present && gspca_dev->streaming) {
+ while (gspca_dev->dev && gspca_dev->streaming) {
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
+ break;
+#endif
/* request a frame */
mutex_lock(&gspca_dev->usb_lock);
@@ -102,7 +106,11 @@ again:
mutex_unlock(&gspca_dev->usb_lock);
if (ret < 0)
break;
- if (!gspca_dev->present || !gspca_dev->streaming)
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
+ break;
+#endif
+ if (!gspca_dev->dev || !gspca_dev->streaming)
break;
/* the frame comes in parts */
@@ -117,7 +125,11 @@ again:
* error. Just restart. */
goto again;
}
- if (!gspca_dev->present || !gspca_dev->streaming)
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
+ goto out;
+#endif
+ if (!gspca_dev->dev || !gspca_dev->streaming)
goto out;
if (len < FPIX_MAX_TRANSFER ||
(data[len - 2] == 0xff &&
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index c84e26006fc3..c549574c1c7e 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -405,6 +405,9 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
+ if (!sd->gspca_dev.present)
+ return;
+
return sd->dev_post_unset_alt(gspca_dev);
}
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index ca5a2b139d0b..137166d73945 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -38,6 +38,9 @@
#include <linux/uaccess.h>
#include <linux/ktime.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include "gspca.h"
@@ -592,16 +595,13 @@ static int gspca_set_alt0(struct gspca_dev *gspca_dev)
static void gspca_stream_off(struct gspca_dev *gspca_dev)
{
gspca_dev->streaming = 0;
- if (gspca_dev->present) {
- if (gspca_dev->sd_desc->stopN)
- gspca_dev->sd_desc->stopN(gspca_dev);
- destroy_urbs(gspca_dev);
- gspca_input_destroy_urb(gspca_dev);
- gspca_set_alt0(gspca_dev);
- gspca_input_create_urb(gspca_dev);
- }
-
- /* always call stop0 to free the subdriver's resources */
+ gspca_dev->usb_err = 0;
+ if (gspca_dev->sd_desc->stopN)
+ gspca_dev->sd_desc->stopN(gspca_dev);
+ destroy_urbs(gspca_dev);
+ gspca_input_destroy_urb(gspca_dev);
+ gspca_set_alt0(gspca_dev);
+ gspca_input_create_urb(gspca_dev);
if (gspca_dev->sd_desc->stop0)
gspca_dev->sd_desc->stop0(gspca_dev);
PDEBUG(D_STREAM, "stream off OK");
@@ -847,14 +847,6 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
struct ep_tb_s ep_tb[MAX_ALT];
int n, ret, xfer, alt, alt_idx;
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
-
- if (!gspca_dev->present) {
- ret = -ENODEV;
- goto unlock;
- }
-
/* reset the streaming variables */
gspca_dev->image = NULL;
gspca_dev->image_len = 0;
@@ -869,7 +861,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
if (gspca_dev->sd_desc->isoc_init) {
ret = gspca_dev->sd_desc->isoc_init(gspca_dev);
if (ret < 0)
- goto unlock;
+ return ret;
}
xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK
: USB_ENDPOINT_XFER_ISOC;
@@ -880,8 +872,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
ep = alt_xfer(&intf->altsetting[gspca_dev->alt], xfer);
if (ep == NULL) {
pr_err("bad altsetting %d\n", gspca_dev->alt);
- ret = -EIO;
- goto out;
+ return -EIO;
}
ep_tb[0].alt = gspca_dev->alt;
alt_idx = 1;
@@ -892,8 +883,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
alt_idx = build_isoc_ep_tb(gspca_dev, intf, ep_tb);
if (alt_idx <= 0) {
pr_err("no transfer endpoint found\n");
- ret = -EIO;
- goto unlock;
+ return -EIO;
}
}
@@ -988,8 +978,6 @@ retry:
}
out:
gspca_input_create_urb(gspca_dev);
-unlock:
- mutex_unlock(&gspca_dev->usb_lock);
return ret;
}
@@ -1006,6 +994,8 @@ static void gspca_set_default_mode(struct gspca_dev *gspca_dev)
/* set the current control values to their default values
* which may have changed in sd_init() */
+ /* does nothing if ctrl_handler == NULL */
+ v4l2_ctrl_handler_setup(gspca_dev->vdev.ctrl_handler);
ctrl = gspca_dev->cam.ctrls;
if (ctrl != NULL) {
for (i = 0;
@@ -1057,77 +1047,50 @@ static int gspca_get_mode(struct gspca_dev *gspca_dev,
static int vidioc_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
- int ret;
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
if (!gspca_dev->sd_desc->get_chip_ident)
- return -EINVAL;
+ return -ENOTTY;
if (!gspca_dev->sd_desc->get_register)
- return -EINVAL;
+ return -ENOTTY;
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
gspca_dev->usb_err = 0;
- if (gspca_dev->present)
- ret = gspca_dev->sd_desc->get_register(gspca_dev, reg);
- else
- ret = -ENODEV;
- mutex_unlock(&gspca_dev->usb_lock);
-
- return ret;
+ return gspca_dev->sd_desc->get_register(gspca_dev, reg);
}
static int vidioc_s_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
- int ret;
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
if (!gspca_dev->sd_desc->get_chip_ident)
- return -EINVAL;
+ return -ENOTTY;
if (!gspca_dev->sd_desc->set_register)
- return -EINVAL;
+ return -ENOTTY;
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
gspca_dev->usb_err = 0;
- if (gspca_dev->present)
- ret = gspca_dev->sd_desc->set_register(gspca_dev, reg);
- else
- ret = -ENODEV;
- mutex_unlock(&gspca_dev->usb_lock);
-
- return ret;
+ return gspca_dev->sd_desc->set_register(gspca_dev, reg);
}
#endif
static int vidioc_g_chip_ident(struct file *file, void *priv,
struct v4l2_dbg_chip_ident *chip)
{
- int ret;
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
if (!gspca_dev->sd_desc->get_chip_ident)
- return -EINVAL;
+ return -ENOTTY;
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
gspca_dev->usb_err = 0;
- if (gspca_dev->present)
- ret = gspca_dev->sd_desc->get_chip_ident(gspca_dev, chip);
- else
- ret = -ENODEV;
- mutex_unlock(&gspca_dev->usb_lock);
-
- return ret;
+ return gspca_dev->sd_desc->get_chip_ident(gspca_dev, chip);
}
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *fmtdesc)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
int i, j, index;
__u32 fmt_tb[8];
@@ -1169,7 +1132,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
int mode;
mode = gspca_dev->curr_mode;
@@ -1214,7 +1177,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file,
void *priv,
struct v4l2_format *fmt)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
int ret;
ret = try_fmt_vid_cap(gspca_dev, fmt);
@@ -1226,7 +1189,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file,
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
int ret;
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
@@ -1265,7 +1228,7 @@ out:
static int vidioc_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
int i;
__u32 index = 0;
@@ -1291,7 +1254,7 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
static int vidioc_enum_frameintervals(struct file *filp, void *priv,
struct v4l2_frmivalenum *fival)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(filp);
int mode = wxh_to_mode(gspca_dev, fival->width, fival->height);
__u32 i;
@@ -1316,31 +1279,30 @@ static int vidioc_enum_frameintervals(struct file *filp, void *priv,
return -EINVAL;
}
-static void gspca_release(struct video_device *vfd)
+static void gspca_release(struct v4l2_device *v4l2_device)
{
- struct gspca_dev *gspca_dev = container_of(vfd, struct gspca_dev, vdev);
+ struct gspca_dev *gspca_dev =
+ container_of(v4l2_device, struct gspca_dev, v4l2_dev);
PDEBUG(D_PROBE, "%s released",
video_device_node_name(&gspca_dev->vdev));
+ v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler);
+ v4l2_device_unregister(&gspca_dev->v4l2_dev);
kfree(gspca_dev->usb_buf);
kfree(gspca_dev);
}
static int dev_open(struct file *file)
{
- struct gspca_dev *gspca_dev;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
PDEBUG(D_STREAM, "[%s] open", current->comm);
- gspca_dev = (struct gspca_dev *) video_devdata(file);
- if (!gspca_dev->present)
- return -ENODEV;
/* protect the subdriver against rmmod */
if (!try_module_get(gspca_dev->module))
return -ENODEV;
- file->private_data = gspca_dev;
#ifdef GSPCA_DEBUG
/* activate the v4l2 debug */
if (gspca_debug & D_V4L2)
@@ -1350,49 +1312,44 @@ static int dev_open(struct file *file)
gspca_dev->vdev.debug &= ~(V4L2_DEBUG_IOCTL
| V4L2_DEBUG_IOCTL_ARG);
#endif
- return 0;
+ return v4l2_fh_open(file);
}
static int dev_close(struct file *file)
{
- struct gspca_dev *gspca_dev = file->private_data;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
PDEBUG(D_STREAM, "[%s] close", current->comm);
- if (mutex_lock_interruptible(&gspca_dev->queue_lock))
+
+ /* Needed for gspca_stream_off, always lock before queue_lock! */
+ if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
+ if (mutex_lock_interruptible(&gspca_dev->queue_lock)) {
+ mutex_unlock(&gspca_dev->usb_lock);
+ return -ERESTARTSYS;
+ }
+
/* if the file did the capture, free the streaming resources */
if (gspca_dev->capt_file == file) {
- if (gspca_dev->streaming) {
- mutex_lock(&gspca_dev->usb_lock);
- gspca_dev->usb_err = 0;
+ if (gspca_dev->streaming)
gspca_stream_off(gspca_dev);
- mutex_unlock(&gspca_dev->usb_lock);
- }
frame_free(gspca_dev);
}
- file->private_data = NULL;
module_put(gspca_dev->module);
mutex_unlock(&gspca_dev->queue_lock);
+ mutex_unlock(&gspca_dev->usb_lock);
PDEBUG(D_STREAM, "close done");
- return 0;
+ return v4l2_fh_release(file);
}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct gspca_dev *gspca_dev = priv;
- int ret;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
- /* protect the access to the usb device */
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
- if (!gspca_dev->present) {
- ret = -ENODEV;
- goto out;
- }
strlcpy((char *) cap->driver, gspca_dev->sd_desc->name,
sizeof cap->driver);
if (gspca_dev->dev->product != NULL) {
@@ -1406,13 +1363,11 @@ static int vidioc_querycap(struct file *file, void *priv,
}
usb_make_path(gspca_dev->dev, (char *) cap->bus_info,
sizeof(cap->bus_info));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE
| V4L2_CAP_STREAMING
| V4L2_CAP_READWRITE;
- ret = 0;
-out:
- mutex_unlock(&gspca_dev->usb_lock);
- return ret;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
}
static int get_ctrl(struct gspca_dev *gspca_dev,
@@ -1435,7 +1390,7 @@ static int get_ctrl(struct gspca_dev *gspca_dev,
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *q_ctrl)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
const struct ctrl *ctrls;
struct gspca_ctrl *gspca_ctrl;
int i, idx;
@@ -1478,10 +1433,10 @@ static int vidioc_queryctrl(struct file *file, void *priv,
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
const struct ctrl *ctrls;
struct gspca_ctrl *gspca_ctrl;
- int idx, ret;
+ int idx;
idx = get_ctrl(gspca_dev, ctrl->id);
if (idx < 0)
@@ -1501,74 +1456,52 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
return -ERANGE;
}
PDEBUG(D_CONF, "set ctrl [%08x] = %d", ctrl->id, ctrl->value);
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
- if (!gspca_dev->present) {
- ret = -ENODEV;
- goto out;
- }
gspca_dev->usb_err = 0;
- if (ctrls->set != NULL) {
- ret = ctrls->set(gspca_dev, ctrl->value);
- goto out;
- }
+ if (ctrls->set != NULL)
+ return ctrls->set(gspca_dev, ctrl->value);
if (gspca_ctrl != NULL) {
gspca_ctrl->val = ctrl->value;
if (ctrls->set_control != NULL
&& gspca_dev->streaming)
ctrls->set_control(gspca_dev);
}
- ret = gspca_dev->usb_err;
-out:
- mutex_unlock(&gspca_dev->usb_lock);
- return ret;
+ return gspca_dev->usb_err;
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
const struct ctrl *ctrls;
- int idx, ret;
+ int idx;
idx = get_ctrl(gspca_dev, ctrl->id);
if (idx < 0)
return -EINVAL;
ctrls = &gspca_dev->sd_desc->ctrls[idx];
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
- if (!gspca_dev->present) {
- ret = -ENODEV;
- goto out;
- }
gspca_dev->usb_err = 0;
- if (ctrls->get != NULL) {
- ret = ctrls->get(gspca_dev, &ctrl->value);
- goto out;
- }
+ if (ctrls->get != NULL)
+ return ctrls->get(gspca_dev, &ctrl->value);
if (gspca_dev->cam.ctrls != NULL)
ctrl->value = gspca_dev->cam.ctrls[idx].val;
- ret = 0;
-out:
- mutex_unlock(&gspca_dev->usb_lock);
- return ret;
+ return 0;
}
static int vidioc_querymenu(struct file *file, void *priv,
struct v4l2_querymenu *qmenu)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
if (!gspca_dev->sd_desc->querymenu)
- return -EINVAL;
+ return -ENOTTY;
return gspca_dev->sd_desc->querymenu(gspca_dev, qmenu);
}
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *input)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
if (input->index != 0)
return -EINVAL;
@@ -1595,7 +1528,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
int i, ret = 0, streaming;
i = rb->memory; /* (avoid compilation warning) */
@@ -1635,10 +1568,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
/* stop streaming */
streaming = gspca_dev->streaming;
if (streaming) {
- mutex_lock(&gspca_dev->usb_lock);
- gspca_dev->usb_err = 0;
gspca_stream_off(gspca_dev);
- mutex_unlock(&gspca_dev->usb_lock);
/* Don't restart the stream when switching from read
* to mmap mode */
@@ -1666,7 +1596,7 @@ out:
static int vidioc_querybuf(struct file *file, void *priv,
struct v4l2_buffer *v4l2_buf)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
struct gspca_frame *frame;
if (v4l2_buf->index < 0
@@ -1681,7 +1611,7 @@ static int vidioc_querybuf(struct file *file, void *priv,
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type buf_type)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
int ret;
if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1722,7 +1652,7 @@ out:
static int vidioc_streamoff(struct file *file, void *priv,
enum v4l2_buf_type buf_type)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
int ret;
if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1743,13 +1673,7 @@ static int vidioc_streamoff(struct file *file, void *priv,
}
/* stop streaming */
- if (mutex_lock_interruptible(&gspca_dev->usb_lock)) {
- ret = -ERESTARTSYS;
- goto out;
- }
- gspca_dev->usb_err = 0;
gspca_stream_off(gspca_dev);
- mutex_unlock(&gspca_dev->usb_lock);
/* In case another thread is waiting in dqbuf */
wake_up_interruptible(&gspca_dev->wq);
@@ -1766,71 +1690,44 @@ out:
static int vidioc_g_jpegcomp(struct file *file, void *priv,
struct v4l2_jpegcompression *jpegcomp)
{
- struct gspca_dev *gspca_dev = priv;
- int ret;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
if (!gspca_dev->sd_desc->get_jcomp)
- return -EINVAL;
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
+ return -ENOTTY;
gspca_dev->usb_err = 0;
- if (gspca_dev->present)
- ret = gspca_dev->sd_desc->get_jcomp(gspca_dev, jpegcomp);
- else
- ret = -ENODEV;
- mutex_unlock(&gspca_dev->usb_lock);
- return ret;
+ return gspca_dev->sd_desc->get_jcomp(gspca_dev, jpegcomp);
}
static int vidioc_s_jpegcomp(struct file *file, void *priv,
struct v4l2_jpegcompression *jpegcomp)
{
- struct gspca_dev *gspca_dev = priv;
- int ret;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
if (!gspca_dev->sd_desc->set_jcomp)
- return -EINVAL;
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
+ return -ENOTTY;
gspca_dev->usb_err = 0;
- if (gspca_dev->present)
- ret = gspca_dev->sd_desc->set_jcomp(gspca_dev, jpegcomp);
- else
- ret = -ENODEV;
- mutex_unlock(&gspca_dev->usb_lock);
- return ret;
+ return gspca_dev->sd_desc->set_jcomp(gspca_dev, jpegcomp);
}
static int vidioc_g_parm(struct file *filp, void *priv,
struct v4l2_streamparm *parm)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(filp);
parm->parm.capture.readbuffers = gspca_dev->nbufread;
if (gspca_dev->sd_desc->get_streamparm) {
- int ret;
-
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
- if (gspca_dev->present) {
- gspca_dev->usb_err = 0;
- gspca_dev->sd_desc->get_streamparm(gspca_dev, parm);
- ret = gspca_dev->usb_err;
- } else {
- ret = -ENODEV;
- }
- mutex_unlock(&gspca_dev->usb_lock);
- return ret;
+ gspca_dev->usb_err = 0;
+ gspca_dev->sd_desc->get_streamparm(gspca_dev, parm);
+ return gspca_dev->usb_err;
}
-
return 0;
}
static int vidioc_s_parm(struct file *filp, void *priv,
struct v4l2_streamparm *parm)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(filp);
int n;
n = parm->parm.capture.readbuffers;
@@ -1840,19 +1737,9 @@ static int vidioc_s_parm(struct file *filp, void *priv,
gspca_dev->nbufread = n;
if (gspca_dev->sd_desc->set_streamparm) {
- int ret;
-
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
- if (gspca_dev->present) {
- gspca_dev->usb_err = 0;
- gspca_dev->sd_desc->set_streamparm(gspca_dev, parm);
- ret = gspca_dev->usb_err;
- } else {
- ret = -ENODEV;
- }
- mutex_unlock(&gspca_dev->usb_lock);
- return ret;
+ gspca_dev->usb_err = 0;
+ gspca_dev->sd_desc->set_streamparm(gspca_dev, parm);
+ return gspca_dev->usb_err;
}
return 0;
@@ -1860,7 +1747,7 @@ static int vidioc_s_parm(struct file *filp, void *priv,
static int dev_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct gspca_dev *gspca_dev = file->private_data;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
struct gspca_frame *frame;
struct page *page;
unsigned long addr, start, size;
@@ -1872,10 +1759,6 @@ static int dev_mmap(struct file *file, struct vm_area_struct *vma)
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
- if (!gspca_dev->present) {
- ret = -ENODEV;
- goto out;
- }
if (gspca_dev->capt_file != file) {
ret = -EINVAL;
goto out;
@@ -1963,7 +1846,7 @@ static int frame_ready(struct gspca_dev *gspca_dev, struct file *file,
static int vidioc_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *v4l2_buf)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
struct gspca_frame *frame;
int i, j, ret;
@@ -2003,14 +1886,6 @@ static int vidioc_dqbuf(struct file *file, void *priv,
gspca_dev->fr_o = (i + 1) % GSPCA_MAX_FRAMES;
- if (gspca_dev->sd_desc->dq_callback) {
- mutex_lock(&gspca_dev->usb_lock);
- gspca_dev->usb_err = 0;
- if (gspca_dev->present)
- gspca_dev->sd_desc->dq_callback(gspca_dev);
- mutex_unlock(&gspca_dev->usb_lock);
- }
-
frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_DONE;
memcpy(v4l2_buf, &frame->v4l2_buf, sizeof *v4l2_buf);
PDEBUG(D_FRAM, "dqbuf %d", j);
@@ -2027,6 +1902,15 @@ static int vidioc_dqbuf(struct file *file, void *priv,
}
out:
mutex_unlock(&gspca_dev->queue_lock);
+
+ if (ret == 0 && gspca_dev->sd_desc->dq_callback) {
+ mutex_lock(&gspca_dev->usb_lock);
+ gspca_dev->usb_err = 0;
+ if (gspca_dev->present)
+ gspca_dev->sd_desc->dq_callback(gspca_dev);
+ mutex_unlock(&gspca_dev->usb_lock);
+ }
+
return ret;
}
@@ -2039,7 +1923,7 @@ out:
static int vidioc_qbuf(struct file *file, void *priv,
struct v4l2_buffer *v4l2_buf)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
struct gspca_frame *frame;
int i, index, ret;
@@ -2098,6 +1982,10 @@ static int read_alloc(struct gspca_dev *gspca_dev,
int i, ret;
PDEBUG(D_STREAM, "read alloc");
+
+ if (mutex_lock_interruptible(&gspca_dev->usb_lock))
+ return -ERESTARTSYS;
+
if (gspca_dev->nframes == 0) {
struct v4l2_requestbuffers rb;
@@ -2108,7 +1996,7 @@ static int read_alloc(struct gspca_dev *gspca_dev,
ret = vidioc_reqbufs(file, gspca_dev, &rb);
if (ret != 0) {
PDEBUG(D_STREAM, "read reqbuf err %d", ret);
- return ret;
+ goto out;
}
memset(&v4l2_buf, 0, sizeof v4l2_buf);
v4l2_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -2118,61 +2006,69 @@ static int read_alloc(struct gspca_dev *gspca_dev,
ret = vidioc_qbuf(file, gspca_dev, &v4l2_buf);
if (ret != 0) {
PDEBUG(D_STREAM, "read qbuf err: %d", ret);
- return ret;
+ goto out;
}
}
- gspca_dev->memory = GSPCA_MEMORY_READ;
}
/* start streaming */
ret = vidioc_streamon(file, gspca_dev, V4L2_BUF_TYPE_VIDEO_CAPTURE);
if (ret != 0)
PDEBUG(D_STREAM, "read streamon err %d", ret);
+out:
+ mutex_unlock(&gspca_dev->usb_lock);
return ret;
}
static unsigned int dev_poll(struct file *file, poll_table *wait)
{
- struct gspca_dev *gspca_dev = file->private_data;
- int ret;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
+ unsigned long req_events = poll_requested_events(wait);
+ int ret = 0;
PDEBUG(D_FRAM, "poll");
- poll_wait(file, &gspca_dev->wq, wait);
+ if (req_events & POLLPRI)
+ ret |= v4l2_ctrl_poll(file, wait);
- /* if reqbufs is not done, the user would use read() */
- if (gspca_dev->memory == GSPCA_MEMORY_NO) {
- ret = read_alloc(gspca_dev, file);
- if (ret != 0)
- return POLLERR;
- }
+ if (req_events & (POLLIN | POLLRDNORM)) {
+ /* if reqbufs is not done, the user would use read() */
+ if (gspca_dev->memory == GSPCA_MEMORY_NO) {
+ if (read_alloc(gspca_dev, file) != 0) {
+ ret |= POLLERR;
+ goto out;
+ }
+ }
- if (mutex_lock_interruptible(&gspca_dev->queue_lock) != 0)
- return POLLERR;
+ poll_wait(file, &gspca_dev->wq, wait);
- /* check if an image has been received */
- if (gspca_dev->fr_o != atomic_read(&gspca_dev->fr_i))
- ret = POLLIN | POLLRDNORM; /* yes */
- else
- ret = 0;
- mutex_unlock(&gspca_dev->queue_lock);
+ /* check if an image has been received */
+ if (mutex_lock_interruptible(&gspca_dev->queue_lock) != 0) {
+ ret |= POLLERR;
+ goto out;
+ }
+ if (gspca_dev->fr_o != atomic_read(&gspca_dev->fr_i))
+ ret |= POLLIN | POLLRDNORM;
+ mutex_unlock(&gspca_dev->queue_lock);
+ }
+
+out:
if (!gspca_dev->present)
- return POLLHUP;
+ ret |= POLLHUP;
+
return ret;
}
static ssize_t dev_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
{
- struct gspca_dev *gspca_dev = file->private_data;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
struct gspca_frame *frame;
struct v4l2_buffer v4l2_buf;
struct timeval timestamp;
int n, ret, ret2;
PDEBUG(D_FRAM, "read (%zd)", count);
- if (!gspca_dev->present)
- return -ENODEV;
if (gspca_dev->memory == GSPCA_MEMORY_NO) { /* first time ? */
ret = read_alloc(gspca_dev, file);
if (ret != 0)
@@ -2266,13 +2162,15 @@ static const struct v4l2_ioctl_ops dev_ioctl_ops = {
.vidioc_s_register = vidioc_s_register,
#endif
.vidioc_g_chip_ident = vidioc_g_chip_ident,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static const struct video_device gspca_template = {
.name = "gspca main driver",
.fops = &dev_fops,
.ioctl_ops = &dev_ioctl_ops,
- .release = gspca_release,
+ .release = video_device_release_empty, /* We use v4l2_dev.release */
};
/* initialize the controls */
@@ -2344,9 +2242,24 @@ int gspca_dev_probe2(struct usb_interface *intf,
}
}
+ gspca_dev->v4l2_dev.release = gspca_release;
+ ret = v4l2_device_register(&intf->dev, &gspca_dev->v4l2_dev);
+ if (ret)
+ goto out;
gspca_dev->sd_desc = sd_desc;
gspca_dev->nbufread = 2;
gspca_dev->empty_packet = -1; /* don't check the empty packets */
+ gspca_dev->vdev = gspca_template;
+ gspca_dev->vdev.v4l2_dev = &gspca_dev->v4l2_dev;
+ video_set_drvdata(&gspca_dev->vdev, gspca_dev);
+ set_bit(V4L2_FL_USE_FH_PRIO, &gspca_dev->vdev.flags);
+ gspca_dev->module = module;
+ gspca_dev->present = 1;
+
+ mutex_init(&gspca_dev->usb_lock);
+ gspca_dev->vdev.lock = &gspca_dev->usb_lock;
+ mutex_init(&gspca_dev->queue_lock);
+ init_waitqueue_head(&gspca_dev->wq);
/* configure the subdriver and initialize the USB device */
ret = sd_desc->config(gspca_dev, id);
@@ -2357,21 +2270,26 @@ int gspca_dev_probe2(struct usb_interface *intf,
ret = sd_desc->init(gspca_dev);
if (ret < 0)
goto out;
+ if (sd_desc->init_controls)
+ ret = sd_desc->init_controls(gspca_dev);
+ if (ret < 0)
+ goto out;
gspca_set_default_mode(gspca_dev);
ret = gspca_input_connect(gspca_dev);
if (ret)
goto out;
- mutex_init(&gspca_dev->usb_lock);
- mutex_init(&gspca_dev->queue_lock);
- init_waitqueue_head(&gspca_dev->wq);
+ /*
+ * Don't take usb_lock for these ioctls. This improves latency if
+ * usb_lock is taken for a long time, e.g. when changing a control
+ * value, and a new frame is ready to be dequeued.
+ */
+ v4l2_disable_ioctl_locking(&gspca_dev->vdev, VIDIOC_DQBUF);
+ v4l2_disable_ioctl_locking(&gspca_dev->vdev, VIDIOC_QBUF);
+ v4l2_disable_ioctl_locking(&gspca_dev->vdev, VIDIOC_QUERYBUF);
/* init video stuff */
- memcpy(&gspca_dev->vdev, &gspca_template, sizeof gspca_template);
- gspca_dev->vdev.parent = &intf->dev;
- gspca_dev->module = module;
- gspca_dev->present = 1;
ret = video_register_device(&gspca_dev->vdev,
VFL_TYPE_GRABBER,
-1);
@@ -2391,6 +2309,7 @@ out:
if (gspca_dev->input_dev)
input_unregister_device(gspca_dev->input_dev);
#endif
+ v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler);
kfree(gspca_dev->usb_buf);
kfree(gspca_dev);
return ret;
@@ -2437,11 +2356,12 @@ void gspca_disconnect(struct usb_interface *intf)
PDEBUG(D_PROBE, "%s disconnect",
video_device_node_name(&gspca_dev->vdev));
+
mutex_lock(&gspca_dev->usb_lock);
+ usb_set_intfdata(intf, NULL);
+ gspca_dev->dev = NULL;
gspca_dev->present = 0;
- wake_up_interruptible(&gspca_dev->wq);
-
destroy_urbs(gspca_dev);
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
@@ -2452,18 +2372,19 @@ void gspca_disconnect(struct usb_interface *intf)
input_unregister_device(input_dev);
}
#endif
+ /* Free subdriver's streaming resources / stop sd workqueue(s) */
+ if (gspca_dev->sd_desc->stop0 && gspca_dev->streaming)
+ gspca_dev->sd_desc->stop0(gspca_dev);
+ gspca_dev->streaming = 0;
+ wake_up_interruptible(&gspca_dev->wq);
- /* the device is freed at exit of this function */
- gspca_dev->dev = NULL;
- mutex_unlock(&gspca_dev->usb_lock);
+ v4l2_device_disconnect(&gspca_dev->v4l2_dev);
+ video_unregister_device(&gspca_dev->vdev);
- usb_set_intfdata(intf, NULL);
+ mutex_unlock(&gspca_dev->usb_lock);
- /* release the device */
/* (this will call gspca_release() immediately or on last close) */
- video_unregister_device(&gspca_dev->vdev);
-
-/* PDEBUG(D_PROBE, "disconnect complete"); */
+ v4l2_device_put(&gspca_dev->v4l2_dev);
}
EXPORT_SYMBOL(gspca_disconnect);
@@ -2474,7 +2395,9 @@ int gspca_suspend(struct usb_interface *intf, pm_message_t message)
if (!gspca_dev->streaming)
return 0;
+ mutex_lock(&gspca_dev->usb_lock);
gspca_dev->frozen = 1; /* avoid urb error messages */
+ gspca_dev->usb_err = 0;
if (gspca_dev->sd_desc->stopN)
gspca_dev->sd_desc->stopN(gspca_dev);
destroy_urbs(gspca_dev);
@@ -2482,6 +2405,7 @@ int gspca_suspend(struct usb_interface *intf, pm_message_t message)
gspca_set_alt0(gspca_dev);
if (gspca_dev->sd_desc->stop0)
gspca_dev->sd_desc->stop0(gspca_dev);
+ mutex_unlock(&gspca_dev->usb_lock);
return 0;
}
EXPORT_SYMBOL(gspca_suspend);
@@ -2489,105 +2413,28 @@ EXPORT_SYMBOL(gspca_suspend);
int gspca_resume(struct usb_interface *intf)
{
struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
+ int streaming, ret = 0;
+ mutex_lock(&gspca_dev->usb_lock);
gspca_dev->frozen = 0;
+ gspca_dev->usb_err = 0;
gspca_dev->sd_desc->init(gspca_dev);
gspca_input_create_urb(gspca_dev);
- if (gspca_dev->streaming)
- return gspca_init_transfer(gspca_dev);
- return 0;
+ /*
+ * Most subdrivers send all ctrl values on sd_start and thus
+ * only write to the device registers on s_ctrl when streaming ->
+ * Clear streaming to avoid setting all ctrls twice.
+ */
+ streaming = gspca_dev->streaming;
+ gspca_dev->streaming = 0;
+ v4l2_ctrl_handler_setup(gspca_dev->vdev.ctrl_handler);
+ if (streaming)
+ ret = gspca_init_transfer(gspca_dev);
+ mutex_unlock(&gspca_dev->usb_lock);
+ return ret;
}
EXPORT_SYMBOL(gspca_resume);
#endif
-/* -- cam driver utility functions -- */
-
-/* auto gain and exposure algorithm based on the knee algorithm described here:
- http://ytse.tricolour.net/docs/LowLightOptimization.html
-
- Returns 0 if no changes were made, 1 if the gain and or exposure settings
- where changed. */
-int gspca_auto_gain_n_exposure(struct gspca_dev *gspca_dev, int avg_lum,
- int desired_avg_lum, int deadzone, int gain_knee, int exposure_knee)
-{
- int i, steps, gain, orig_gain, exposure, orig_exposure, autogain;
- const struct ctrl *gain_ctrl = NULL;
- const struct ctrl *exposure_ctrl = NULL;
- const struct ctrl *autogain_ctrl = NULL;
- int retval = 0;
-
- for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
- if (gspca_dev->ctrl_dis & (1 << i))
- continue;
- if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_GAIN)
- gain_ctrl = &gspca_dev->sd_desc->ctrls[i];
- if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_EXPOSURE)
- exposure_ctrl = &gspca_dev->sd_desc->ctrls[i];
- if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_AUTOGAIN)
- autogain_ctrl = &gspca_dev->sd_desc->ctrls[i];
- }
- if (!gain_ctrl || !exposure_ctrl || !autogain_ctrl) {
- PDEBUG(D_ERR, "Error: gspca_auto_gain_n_exposure called "
- "on cam without (auto)gain/exposure");
- return 0;
- }
-
- if (gain_ctrl->get(gspca_dev, &gain) ||
- exposure_ctrl->get(gspca_dev, &exposure) ||
- autogain_ctrl->get(gspca_dev, &autogain) || !autogain)
- return 0;
-
- orig_gain = gain;
- orig_exposure = exposure;
-
- /* If we are of a multiple of deadzone, do multiple steps to reach the
- desired lumination fast (with the risc of a slight overshoot) */
- steps = abs(desired_avg_lum - avg_lum) / deadzone;
-
- PDEBUG(D_FRAM, "autogain: lum: %d, desired: %d, steps: %d",
- avg_lum, desired_avg_lum, steps);
-
- for (i = 0; i < steps; i++) {
- if (avg_lum > desired_avg_lum) {
- if (gain > gain_knee)
- gain--;
- else if (exposure > exposure_knee)
- exposure--;
- else if (gain > gain_ctrl->qctrl.default_value)
- gain--;
- else if (exposure > exposure_ctrl->qctrl.minimum)
- exposure--;
- else if (gain > gain_ctrl->qctrl.minimum)
- gain--;
- else
- break;
- } else {
- if (gain < gain_ctrl->qctrl.default_value)
- gain++;
- else if (exposure < exposure_knee)
- exposure++;
- else if (gain < gain_knee)
- gain++;
- else if (exposure < exposure_ctrl->qctrl.maximum)
- exposure++;
- else if (gain < gain_ctrl->qctrl.maximum)
- gain++;
- else
- break;
- }
- }
-
- if (gain != orig_gain) {
- gain_ctrl->set(gspca_dev, gain);
- retval = 1;
- }
- if (exposure != orig_exposure) {
- exposure_ctrl->set(gspca_dev, exposure);
- retval = 1;
- }
-
- return retval;
-}
-EXPORT_SYMBOL(gspca_auto_gain_n_exposure);
/* -- module insert / remove -- */
static int __init gspca_init(void)
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 589009f4496f..dc688c7f5e48 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -6,6 +6,8 @@
#include <linux/usb.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
#include <linux/mutex.h>
/* compilation option */
@@ -115,6 +117,7 @@ struct sd_desc {
/* mandatory operations */
cam_cf_op config; /* called on probe */
cam_op init; /* called on probe and resume */
+ cam_op init_controls; /* called on probe */
cam_op start; /* called on stream on after URBs creation */
cam_pkt_op pkt_scan;
/* optional operations */
@@ -158,8 +161,10 @@ struct gspca_frame {
struct gspca_dev {
struct video_device vdev; /* !! must be the first item */
struct module *module; /* subdriver handling the device */
+ struct v4l2_device v4l2_dev;
struct usb_device *dev;
struct file *capt_file; /* file doing video capture */
+ /* protected by queue_lock */
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
struct input_dev *input_dev;
char phys[64]; /* physical device path */
@@ -169,6 +174,16 @@ struct gspca_dev {
const struct sd_desc *sd_desc; /* subdriver description */
unsigned ctrl_dis; /* disabled controls (bit map) */
unsigned ctrl_inac; /* inactive controls (bit map) */
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* autogain and exposure or gain control cluster, these are global as
+ the autogain/exposure functions in autogain_functions.c use them */
+ struct {
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *gain;
+ int exp_too_low_cnt, exp_too_high_cnt;
+ };
#define USB_BUF_SZ 64
__u8 *usb_buf; /* buffer for USB exchanges */
@@ -189,7 +204,7 @@ struct gspca_dev {
u8 fr_o; /* next frame to dequeue */
__u8 last_packet_type;
__s8 empty_packet; /* if (-1) don't check empty packets */
- __u8 streaming;
+ __u8 streaming; /* protected by both mutexes (*) */
__u8 curr_mode; /* current camera mode */
__u32 pixfmt; /* current mode parameters */
@@ -211,6 +226,10 @@ struct gspca_dev {
__u8 iface; /* USB interface number */
__u8 alt; /* USB alternate setting */
u8 audio; /* presence of audio device */
+
+ /* (*) These variables are proteced by both usb_lock and queue_lock,
+ that is any code setting them is holding *both*, which means that
+ any code getting them needs to hold at least one of them */
};
int gspca_dev_probe(struct usb_interface *intf,
@@ -232,6 +251,9 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
int gspca_suspend(struct usb_interface *intf, pm_message_t message);
int gspca_resume(struct usb_interface *intf);
#endif
-int gspca_auto_gain_n_exposure(struct gspca_dev *gspca_dev, int avg_lum,
+int gspca_expo_autogain(struct gspca_dev *gspca_dev, int avg_lum,
int desired_avg_lum, int deadzone, int gain_knee, int exposure_knee);
+int gspca_coarse_grained_expo_autogain(struct gspca_dev *gspca_dev,
+ int avg_lum, int desired_avg_lum, int deadzone);
+
#endif /* GSPCAV2_H */
diff --git a/drivers/media/video/gspca/jl2005bcd.c b/drivers/media/video/gspca/jl2005bcd.c
index 53f58ef367cf..9c591c7c6f54 100644
--- a/drivers/media/video/gspca/jl2005bcd.c
+++ b/drivers/media/video/gspca/jl2005bcd.c
@@ -335,7 +335,11 @@ static void jl2005c_dostream(struct work_struct *work)
goto quit_stream;
}
- while (gspca_dev->present && gspca_dev->streaming) {
+ while (gspca_dev->dev && gspca_dev->streaming) {
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
+ break;
+#endif
/* Check if this is a new frame. If so, start the frame first */
if (!header_read) {
mutex_lock(&gspca_dev->usb_lock);
@@ -367,7 +371,7 @@ static void jl2005c_dostream(struct work_struct *work)
buffer, act_len);
header_read = 1;
}
- while (bytes_left > 0 && gspca_dev->present) {
+ while (bytes_left > 0 && gspca_dev->dev) {
data_len = bytes_left > JL2005C_MAX_TRANSFER ?
JL2005C_MAX_TRANSFER : bytes_left;
ret = usb_bulk_msg(gspca_dev->dev,
@@ -390,7 +394,7 @@ static void jl2005c_dostream(struct work_struct *work)
}
}
quit_stream:
- if (gspca_dev->present) {
+ if (gspca_dev->dev) {
mutex_lock(&gspca_dev->usb_lock);
jl2005c_stop(gspca_dev);
mutex_unlock(&gspca_dev->usb_lock);
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index b0231465afae..ec7b21ee79fb 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -30,22 +30,19 @@ MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
MODULE_DESCRIPTION("GSPCA/Mars USB Camera Driver");
MODULE_LICENSE("GPL");
-/* controls */
-enum e_ctrl {
- BRIGHTNESS,
- COLORS,
- GAMMA,
- SHARPNESS,
- ILLUM_TOP,
- ILLUM_BOT,
- NCTRLS /* number of controls */
-};
-
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- struct gspca_ctrl ctrls[NCTRLS];
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *sharpness;
+ struct v4l2_ctrl *gamma;
+ struct { /* illuminator control cluster */
+ struct v4l2_ctrl *illum_top;
+ struct v4l2_ctrl *illum_bottom;
+ };
+ struct v4l2_ctrl *jpegqual;
u8 quality;
#define QUALITY_MIN 40
@@ -56,89 +53,10 @@ struct sd {
};
/* V4L2 controls supported by the driver */
-static void setbrightness(struct gspca_dev *gspca_dev);
-static void setcolors(struct gspca_dev *gspca_dev);
-static void setgamma(struct gspca_dev *gspca_dev);
-static void setsharpness(struct gspca_dev *gspca_dev);
-static int sd_setilluminator1(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_setilluminator2(struct gspca_dev *gspca_dev, __s32 val);
-
-static const struct ctrl sd_ctrls[NCTRLS] = {
-[BRIGHTNESS] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 30,
- .step = 1,
- .default_value = 15,
- },
- .set_control = setbrightness
- },
-[COLORS] = {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Color",
- .minimum = 1,
- .maximum = 255,
- .step = 1,
- .default_value = 200,
- },
- .set_control = setcolors
- },
-[GAMMA] = {
- {
- .id = V4L2_CID_GAMMA,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gamma",
- .minimum = 0,
- .maximum = 3,
- .step = 1,
- .default_value = 1,
- },
- .set_control = setgamma
- },
-[SHARPNESS] = {
- {
- .id = V4L2_CID_SHARPNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Sharpness",
- .minimum = 0,
- .maximum = 2,
- .step = 1,
- .default_value = 1,
- },
- .set_control = setsharpness
- },
-[ILLUM_TOP] = {
- {
- .id = V4L2_CID_ILLUMINATORS_1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Top illuminator",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- .flags = V4L2_CTRL_FLAG_UPDATE,
- },
- .set = sd_setilluminator1
- },
-[ILLUM_BOT] = {
- {
- .id = V4L2_CID_ILLUMINATORS_2,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Bottom illuminator",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- .flags = V4L2_CTRL_FLAG_UPDATE,
- },
- .set = sd_setilluminator2
- },
-};
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val);
+static void setcolors(struct gspca_dev *gspca_dev, s32 val);
+static void setgamma(struct gspca_dev *gspca_dev, s32 val);
+static void setsharpness(struct gspca_dev *gspca_dev, s32 val);
static const struct v4l2_pix_format vga_mode[] = {
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
@@ -198,59 +116,130 @@ static void mi_w(struct gspca_dev *gspca_dev,
reg_w(gspca_dev, 4);
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
gspca_dev->usb_buf[0] = 0x61;
- gspca_dev->usb_buf[1] = sd->ctrls[BRIGHTNESS].val;
+ gspca_dev->usb_buf[1] = val;
reg_w(gspca_dev, 2);
}
-static void setcolors(struct gspca_dev *gspca_dev)
+static void setcolors(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- s16 val;
-
- val = sd->ctrls[COLORS].val;
gspca_dev->usb_buf[0] = 0x5f;
gspca_dev->usb_buf[1] = val << 3;
gspca_dev->usb_buf[2] = ((val >> 2) & 0xf8) | 0x04;
reg_w(gspca_dev, 3);
}
-static void setgamma(struct gspca_dev *gspca_dev)
+static void setgamma(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
gspca_dev->usb_buf[0] = 0x06;
- gspca_dev->usb_buf[1] = sd->ctrls[GAMMA].val * 0x40;
+ gspca_dev->usb_buf[1] = val * 0x40;
reg_w(gspca_dev, 2);
}
-static void setsharpness(struct gspca_dev *gspca_dev)
+static void setsharpness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
gspca_dev->usb_buf[0] = 0x67;
- gspca_dev->usb_buf[1] = sd->ctrls[SHARPNESS].val * 4 + 3;
+ gspca_dev->usb_buf[1] = val * 4 + 3;
reg_w(gspca_dev, 2);
}
-static void setilluminators(struct gspca_dev *gspca_dev)
+static void setilluminators(struct gspca_dev *gspca_dev, bool top, bool bottom)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
+ /* both are off if not streaming */
gspca_dev->usb_buf[0] = 0x22;
- if (sd->ctrls[ILLUM_TOP].val)
+ if (top)
gspca_dev->usb_buf[1] = 0x76;
- else if (sd->ctrls[ILLUM_BOT].val)
+ else if (bottom)
gspca_dev->usb_buf[1] = 0x7a;
else
gspca_dev->usb_buf[1] = 0x7e;
reg_w(gspca_dev, 2);
}
+static int mars_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (ctrl->id == V4L2_CID_ILLUMINATORS_1) {
+ /* only one can be on at a time */
+ if (ctrl->is_new && ctrl->val)
+ sd->illum_bottom->val = 0;
+ if (sd->illum_bottom->is_new && sd->illum_bottom->val)
+ sd->illum_top->val = 0;
+ }
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ setcolors(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_GAMMA:
+ setgamma(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_ILLUMINATORS_1:
+ setilluminators(gspca_dev, sd->illum_top->val,
+ sd->illum_bottom->val);
+ break;
+ case V4L2_CID_SHARPNESS:
+ setsharpness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ jpeg_set_qual(sd->jpeg_hdr, ctrl->val);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops mars_ctrl_ops = {
+ .s_ctrl = mars_s_ctrl,
+};
+
+/* this function is called at probe time */
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 7);
+ sd->brightness = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 30, 1, 15);
+ sd->saturation = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 200);
+ sd->gamma = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
+ V4L2_CID_GAMMA, 0, 3, 1, 1);
+ sd->sharpness = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0, 2, 1, 1);
+ sd->illum_top = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
+ V4L2_CID_ILLUMINATORS_1, 0, 1, 1, 0);
+ sd->illum_top->flags |= V4L2_CTRL_FLAG_UPDATE;
+ sd->illum_bottom = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
+ V4L2_CID_ILLUMINATORS_2, 0, 1, 1, 0);
+ sd->illum_bottom->flags |= V4L2_CTRL_FLAG_UPDATE;
+ sd->jpegqual = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ QUALITY_MIN, QUALITY_MAX, 1, QUALITY_DEF);
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ v4l2_ctrl_cluster(2, &sd->illum_top);
+ return 0;
+}
+
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
@@ -261,7 +250,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam = &gspca_dev->cam;
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
- cam->ctrls = sd->ctrls;
sd->quality = QUALITY_DEF;
return 0;
}
@@ -269,7 +257,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
- gspca_dev->ctrl_inac = (1 << ILLUM_TOP) | (1 << ILLUM_BOT);
return 0;
}
@@ -282,7 +269,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* create the JPEG header */
jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
0x21); /* JPEG 422 */
- jpeg_set_qual(sd->jpeg_hdr, sd->quality);
+ jpeg_set_qual(sd->jpeg_hdr, v4l2_ctrl_g_ctrl(sd->jpegqual));
data = gspca_dev->usb_buf;
@@ -301,7 +288,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
data[5] = 0x30; /* reg 4, MI, PAS5101 :
* 0x30 for 24mhz , 0x28 for 12mhz */
data[6] = 0x02; /* reg 5, H start - was 0x04 */
- data[7] = sd->ctrls[GAMMA].val * 0x40; /* reg 0x06: gamma */
+ data[7] = v4l2_ctrl_g_ctrl(sd->gamma) * 0x40; /* reg 0x06: gamma */
data[8] = 0x01; /* reg 7, V start - was 0x03 */
/* if (h_size == 320 ) */
/* data[9]= 0x56; * reg 8, 24MHz, 2:1 scale down */
@@ -333,16 +320,16 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* reg 0x5f/0x60 (LE) = saturation */
/* h (60): xxxx x100
* l (5f): xxxx x000 */
- data[2] = sd->ctrls[COLORS].val << 3;
- data[3] = ((sd->ctrls[COLORS].val >> 2) & 0xf8) | 0x04;
- data[4] = sd->ctrls[BRIGHTNESS].val; /* reg 0x61 = brightness */
+ data[2] = v4l2_ctrl_g_ctrl(sd->saturation) << 3;
+ data[3] = ((v4l2_ctrl_g_ctrl(sd->saturation) >> 2) & 0xf8) | 0x04;
+ data[4] = v4l2_ctrl_g_ctrl(sd->brightness); /* reg 0x61 = brightness */
data[5] = 0x00;
reg_w(gspca_dev, 6);
data[0] = 0x67;
/*jfm: from win trace*/
- data[1] = sd->ctrls[SHARPNESS].val * 4 + 3;
+ data[1] = v4l2_ctrl_g_ctrl(sd->sharpness) * 4 + 3;
data[2] = 0x14;
reg_w(gspca_dev, 3);
@@ -365,7 +352,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
data[1] = 0x4d; /* ISOC transferring enable... */
reg_w(gspca_dev, 2);
- gspca_dev->ctrl_inac = 0; /* activate the illuminator controls */
+ setilluminators(gspca_dev, v4l2_ctrl_g_ctrl(sd->illum_top),
+ v4l2_ctrl_g_ctrl(sd->illum_bottom));
+
return gspca_dev->usb_err;
}
@@ -373,11 +362,9 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- gspca_dev->ctrl_inac = (1 << ILLUM_TOP) | (1 << ILLUM_BOT);
- if (sd->ctrls[ILLUM_TOP].val || sd->ctrls[ILLUM_BOT].val) {
- sd->ctrls[ILLUM_TOP].val = 0;
- sd->ctrls[ILLUM_BOT].val = 0;
- setilluminators(gspca_dev);
+ if (v4l2_ctrl_g_ctrl(sd->illum_top) ||
+ v4l2_ctrl_g_ctrl(sd->illum_bottom)) {
+ setilluminators(gspca_dev, false, false);
msleep(20);
}
@@ -424,43 +411,16 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setilluminator1(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- /* only one illuminator may be on */
- sd->ctrls[ILLUM_TOP].val = val;
- if (val)
- sd->ctrls[ILLUM_BOT].val = 0;
- setilluminators(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_setilluminator2(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- /* only one illuminator may be on */
- sd->ctrls[ILLUM_BOT].val = val;
- if (val)
- sd->ctrls[ILLUM_TOP].val = 0;
- setilluminators(gspca_dev);
- return gspca_dev->usb_err;
-}
-
static int sd_set_jcomp(struct gspca_dev *gspca_dev,
struct v4l2_jpegcompression *jcomp)
{
struct sd *sd = (struct sd *) gspca_dev;
+ int ret;
- if (jcomp->quality < QUALITY_MIN)
- sd->quality = QUALITY_MIN;
- else if (jcomp->quality > QUALITY_MAX)
- sd->quality = QUALITY_MAX;
- else
- sd->quality = jcomp->quality;
- if (gspca_dev->streaming)
- jpeg_set_qual(sd->jpeg_hdr, sd->quality);
+ ret = v4l2_ctrl_s_ctrl(sd->jpegqual, jcomp->quality);
+ if (ret)
+ return ret;
+ jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual);
return 0;
}
@@ -470,7 +430,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
struct sd *sd = (struct sd *) gspca_dev;
memset(jcomp, 0, sizeof *jcomp);
- jcomp->quality = sd->quality;
+ jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual);
jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT
| V4L2_JPEG_MARKER_DQT;
return 0;
@@ -479,10 +439,9 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = NCTRLS,
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
@@ -513,6 +472,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/nw80x.c b/drivers/media/video/gspca/nw80x.c
index 7167cac7359c..42e021931e60 100644
--- a/drivers/media/video/gspca/nw80x.c
+++ b/drivers/media/video/gspca/nw80x.c
@@ -2001,6 +2001,8 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
return gspca_dev->usb_err;
}
+#define WANT_REGULAR_AUTOGAIN
+#define WANT_COARSE_EXPO_AUTOGAIN
#include "autogain_functions.h"
static void do_autogain(struct gspca_dev *gspca_dev)
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 739e8a2a2d30..183457c5cfdb 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -2804,7 +2804,7 @@ static void ov7xx0_configure(struct sd *sd)
/* add OV7670 here
* it appears to be wrongly detected as a 7610 by default */
if (rc < 0) {
- PDEBUG(D_ERR, "Error detecting sensor type");
+ pr_err("Error detecting sensor type\n");
return;
}
if ((rc & 3) == 3) {
@@ -2832,12 +2832,12 @@ static void ov7xx0_configure(struct sd *sd)
/* try to read product id registers */
high = i2c_r(sd, 0x0a);
if (high < 0) {
- PDEBUG(D_ERR, "Error detecting camera chip PID");
+ pr_err("Error detecting camera chip PID\n");
return;
}
low = i2c_r(sd, 0x0b);
if (low < 0) {
- PDEBUG(D_ERR, "Error detecting camera chip VER");
+ pr_err("Error detecting camera chip VER\n");
return;
}
if (high == 0x76) {
@@ -2863,7 +2863,7 @@ static void ov7xx0_configure(struct sd *sd)
sd->sensor = SEN_OV7660;
break;
default:
- PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low);
+ pr_err("Unknown sensor: 0x76%02x\n", low);
return;
}
} else {
@@ -2884,7 +2884,7 @@ static void ov6xx0_configure(struct sd *sd)
/* Detect sensor (sub)type */
rc = i2c_r(sd, OV7610_REG_COM_I);
if (rc < 0) {
- PDEBUG(D_ERR, "Error detecting sensor type");
+ pr_err("Error detecting sensor type\n");
return;
}
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 04753391de3e..b5acb1e4b4e7 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -34,6 +34,8 @@
#include "gspca.h"
+#include <linux/fixp-arith.h>
+
#define OV534_REG_ADDRESS 0xf1 /* sensor address */
#define OV534_REG_SUBADDR 0xf2
#define OV534_REG_WRITE 0xf3
@@ -53,6 +55,8 @@ MODULE_LICENSE("GPL");
/* controls */
enum e_ctrl {
+ HUE,
+ SATURATION,
BRIGHTNESS,
CONTRAST,
GAIN,
@@ -63,7 +67,6 @@ enum e_ctrl {
SHARPNESS,
HFLIP,
VFLIP,
- COLORS,
LIGHTFREQ,
NCTRLS /* number of controls */
};
@@ -87,6 +90,8 @@ enum sensors {
};
/* V4L2 controls supported by the driver */
+static void sethue(struct gspca_dev *gspca_dev);
+static void setsaturation(struct gspca_dev *gspca_dev);
static void setbrightness(struct gspca_dev *gspca_dev);
static void setcontrast(struct gspca_dev *gspca_dev);
static void setgain(struct gspca_dev *gspca_dev);
@@ -96,13 +101,36 @@ static void setawb(struct gspca_dev *gspca_dev);
static void setaec(struct gspca_dev *gspca_dev);
static void setsharpness(struct gspca_dev *gspca_dev);
static void sethvflip(struct gspca_dev *gspca_dev);
-static void setcolors(struct gspca_dev *gspca_dev);
static void setlightfreq(struct gspca_dev *gspca_dev);
static int sd_start(struct gspca_dev *gspca_dev);
static void sd_stopN(struct gspca_dev *gspca_dev);
static const struct ctrl sd_ctrls[] = {
+[HUE] = {
+ {
+ .id = V4L2_CID_HUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Hue",
+ .minimum = -90,
+ .maximum = 90,
+ .step = 1,
+ .default_value = 0,
+ },
+ .set_control = sethue
+ },
+[SATURATION] = {
+ {
+ .id = V4L2_CID_SATURATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Saturation",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 64,
+ },
+ .set_control = setsaturation
+ },
[BRIGHTNESS] = {
{
.id = V4L2_CID_BRIGHTNESS,
@@ -223,18 +251,6 @@ static const struct ctrl sd_ctrls[] = {
},
.set_control = sethvflip
},
-[COLORS] = {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 6,
- .step = 1,
- .default_value = 3,
- },
- .set_control = setcolors
- },
[LIGHTFREQ] = {
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
@@ -684,7 +700,7 @@ static const u8 sensor_init_772x[][2] = {
{ 0x9c, 0x20 },
{ 0x9e, 0x81 },
- { 0xa6, 0x04 },
+ { 0xa6, 0x07 },
{ 0x7e, 0x0c },
{ 0x7f, 0x16 },
{ 0x80, 0x2a },
@@ -955,6 +971,74 @@ static void set_frame_rate(struct gspca_dev *gspca_dev)
PDEBUG(D_PROBE, "frame_rate: %d", r->fps);
}
+static void sethue(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int val;
+
+ val = sd->ctrls[HUE].val;
+ if (sd->sensor == SENSOR_OV767x) {
+ /* TBD */
+ } else {
+ s16 huesin;
+ s16 huecos;
+
+ /* fixp_sin and fixp_cos accept only positive values, while
+ * our val is between -90 and 90
+ */
+ val += 360;
+
+ /* According to the datasheet the registers expect HUESIN and
+ * HUECOS to be the result of the trigonometric functions,
+ * scaled by 0x80.
+ *
+ * The 0x100 here represents the maximun absolute value
+ * returned byt fixp_sin and fixp_cos, so the scaling will
+ * consider the result like in the interval [-1.0, 1.0].
+ */
+ huesin = fixp_sin(val) * 0x80 / 0x100;
+ huecos = fixp_cos(val) * 0x80 / 0x100;
+
+ if (huesin < 0) {
+ sccb_reg_write(gspca_dev, 0xab,
+ sccb_reg_read(gspca_dev, 0xab) | 0x2);
+ huesin = -huesin;
+ } else {
+ sccb_reg_write(gspca_dev, 0xab,
+ sccb_reg_read(gspca_dev, 0xab) & ~0x2);
+
+ }
+ sccb_reg_write(gspca_dev, 0xa9, (u8)huecos);
+ sccb_reg_write(gspca_dev, 0xaa, (u8)huesin);
+ }
+}
+
+static void setsaturation(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int val;
+
+ val = sd->ctrls[SATURATION].val;
+ if (sd->sensor == SENSOR_OV767x) {
+ int i;
+ static u8 color_tb[][6] = {
+ {0x42, 0x42, 0x00, 0x11, 0x30, 0x41},
+ {0x52, 0x52, 0x00, 0x16, 0x3c, 0x52},
+ {0x66, 0x66, 0x00, 0x1b, 0x4b, 0x66},
+ {0x80, 0x80, 0x00, 0x22, 0x5e, 0x80},
+ {0x9a, 0x9a, 0x00, 0x29, 0x71, 0x9a},
+ {0xb8, 0xb8, 0x00, 0x31, 0x87, 0xb8},
+ {0xdd, 0xdd, 0x00, 0x3b, 0xa2, 0xdd},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(color_tb[0]); i++)
+ sccb_reg_write(gspca_dev, 0x4f + i, color_tb[val][i]);
+ } else {
+ sccb_reg_write(gspca_dev, 0xa7, val); /* U saturation */
+ sccb_reg_write(gspca_dev, 0xa8, val); /* V saturation */
+ }
+}
+
static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1132,26 +1216,6 @@ static void sethvflip(struct gspca_dev *gspca_dev)
}
}
-static void setcolors(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- u8 val;
- int i;
- static u8 color_tb[][6] = {
- {0x42, 0x42, 0x00, 0x11, 0x30, 0x41},
- {0x52, 0x52, 0x00, 0x16, 0x3c, 0x52},
- {0x66, 0x66, 0x00, 0x1b, 0x4b, 0x66},
- {0x80, 0x80, 0x00, 0x22, 0x5e, 0x80},
- {0x9a, 0x9a, 0x00, 0x29, 0x71, 0x9a},
- {0xb8, 0xb8, 0x00, 0x31, 0x87, 0xb8},
- {0xdd, 0xdd, 0x00, 0x3b, 0xa2, 0xdd},
- };
-
- val = sd->ctrls[COLORS].val;
- for (i = 0; i < ARRAY_SIZE(color_tb[0]); i++)
- sccb_reg_write(gspca_dev, 0x4f + i, color_tb[val][i]);
-}
-
static void setlightfreq(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1225,9 +1289,13 @@ static int sd_init(struct gspca_dev *gspca_dev)
if ((sensor_id & 0xfff0) == 0x7670) {
sd->sensor = SENSOR_OV767x;
- gspca_dev->ctrl_dis = (1 << GAIN) |
+ gspca_dev->ctrl_dis = (1 << HUE) |
+ (1 << GAIN) |
(1 << AGC) |
(1 << SHARPNESS); /* auto */
+ sd->ctrls[SATURATION].min = 0,
+ sd->ctrls[SATURATION].max = 6,
+ sd->ctrls[SATURATION].def = 3,
sd->ctrls[BRIGHTNESS].min = -127;
sd->ctrls[BRIGHTNESS].max = 127;
sd->ctrls[BRIGHTNESS].def = 0;
@@ -1243,7 +1311,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
gspca_dev->cam.nmodes = ARRAY_SIZE(ov767x_mode);
} else {
sd->sensor = SENSOR_OV772x;
- gspca_dev->ctrl_dis = (1 << COLORS);
gspca_dev->cam.bulk = 1;
gspca_dev->cam.bulk_size = 16384;
gspca_dev->cam.bulk_nurbs = 2;
@@ -1302,6 +1369,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
set_frame_rate(gspca_dev);
+ if (!(gspca_dev->ctrl_dis & (1 << HUE)))
+ sethue(gspca_dev);
+ setsaturation(gspca_dev);
if (!(gspca_dev->ctrl_dis & (1 << AGC)))
setagc(gspca_dev);
setawb(gspca_dev);
@@ -1314,8 +1384,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
if (!(gspca_dev->ctrl_dis & (1 << SHARPNESS)))
setsharpness(gspca_dev);
sethvflip(gspca_dev);
- if (!(gspca_dev->ctrl_dis & (1 << COLORS)))
- setcolors(gspca_dev);
setlightfreq(gspca_dev);
ov534_set_led(gspca_dev, 1);
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index 3844c49f269c..fa661c6d6d55 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -29,6 +29,8 @@
#include <linux/input.h>
#include "gspca.h"
+/* Include pac common sof detection functions */
+#include "pac_common.h"
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("Pixart PAC207");
@@ -39,16 +41,17 @@ MODULE_LICENSE("GPL");
#define PAC207_BRIGHTNESS_MIN 0
#define PAC207_BRIGHTNESS_MAX 255
#define PAC207_BRIGHTNESS_DEFAULT 46
+#define PAC207_BRIGHTNESS_REG 0x08
#define PAC207_EXPOSURE_MIN 3
#define PAC207_EXPOSURE_MAX 90 /* 1 sec expo time / 1 fps */
#define PAC207_EXPOSURE_DEFAULT 5 /* power on default: 3 */
-#define PAC207_EXPOSURE_KNEE 9 /* fps: 90 / exposure -> 9: 10 fps */
+#define PAC207_EXPOSURE_REG 0x02
#define PAC207_GAIN_MIN 0
#define PAC207_GAIN_MAX 31
#define PAC207_GAIN_DEFAULT 7 /* power on default: 9 */
-#define PAC207_GAIN_KNEE 15
+#define PAC207_GAIN_REG 0x0e
#define PAC207_AUTOGAIN_DEADZONE 30
@@ -56,13 +59,9 @@ MODULE_LICENSE("GPL");
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- u8 mode;
-
- u8 brightness;
- u8 exposure;
- u8 autogain;
- u8 gain;
+ struct v4l2_ctrl *brightness;
+ u8 mode;
u8 sof_read;
u8 header_read;
u8 autogain_ignore_frames;
@@ -70,80 +69,6 @@ struct sd {
atomic_t avg_lum;
};
-/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
-#define SD_BRIGHTNESS 0
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = PAC207_BRIGHTNESS_MIN,
- .maximum = PAC207_BRIGHTNESS_MAX,
- .step = 1,
- .default_value = PAC207_BRIGHTNESS_DEFAULT,
- .flags = 0,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
-#define SD_EXPOSURE 1
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = PAC207_EXPOSURE_MIN,
- .maximum = PAC207_EXPOSURE_MAX,
- .step = 1,
- .default_value = PAC207_EXPOSURE_DEFAULT,
- .flags = 0,
- },
- .set = sd_setexposure,
- .get = sd_getexposure,
- },
-#define SD_AUTOGAIN 2
- {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AUTOGAIN_DEF 1
- .default_value = AUTOGAIN_DEF,
- .flags = 0,
- },
- .set = sd_setautogain,
- .get = sd_getautogain,
- },
-#define SD_GAIN 3
- {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = PAC207_GAIN_MIN,
- .maximum = PAC207_GAIN_MAX,
- .step = 1,
- .default_value = PAC207_GAIN_DEFAULT,
- .flags = 0,
- },
- .set = sd_setgain,
- .get = sd_getgain,
- },
-};
-
static const struct v4l2_pix_format sif_mode[] = {
{176, 144, V4L2_PIX_FMT_PAC207, V4L2_FIELD_NONE,
.bytesperline = 176,
@@ -167,39 +92,44 @@ static const __u8 pac207_sensor_init[][8] = {
{0x32, 0x00, 0x96, 0x00, 0xa2, 0x02, 0xaf, 0x00},
};
-static int pac207_write_regs(struct gspca_dev *gspca_dev, u16 index,
+static void pac207_write_regs(struct gspca_dev *gspca_dev, u16 index,
const u8 *buffer, u16 length)
{
struct usb_device *udev = gspca_dev->dev;
int err;
+ if (gspca_dev->usb_err < 0)
+ return;
+
memcpy(gspca_dev->usb_buf, buffer, length);
err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
0x00, index,
gspca_dev->usb_buf, length, PAC207_CTRL_TIMEOUT);
- if (err < 0)
+ if (err < 0) {
pr_err("Failed to write registers to index 0x%04X, error %d\n",
index, err);
-
- return err;
+ gspca_dev->usb_err = err;
+ }
}
-
-static int pac207_write_reg(struct gspca_dev *gspca_dev, u16 index, u16 value)
+static void pac207_write_reg(struct gspca_dev *gspca_dev, u16 index, u16 value)
{
struct usb_device *udev = gspca_dev->dev;
int err;
+ if (gspca_dev->usb_err < 0)
+ return;
+
err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
value, index, NULL, 0, PAC207_CTRL_TIMEOUT);
- if (err)
+ if (err) {
pr_err("Failed to write a register (index 0x%04X, value 0x%02X, error %d)\n",
index, value, err);
-
- return err;
+ gspca_dev->usb_err = err;
+ }
}
static int pac207_read_reg(struct gspca_dev *gspca_dev, u16 index)
@@ -207,6 +137,9 @@ static int pac207_read_reg(struct gspca_dev *gspca_dev, u16 index)
struct usb_device *udev = gspca_dev->dev;
int res;
+ if (gspca_dev->usb_err < 0)
+ return 0;
+
res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x00,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
0x00, index,
@@ -214,7 +147,8 @@ static int pac207_read_reg(struct gspca_dev *gspca_dev, u16 index)
if (res < 0) {
pr_err("Failed to read a register (index 0x%04X, error %d)\n",
index, res);
- return res;
+ gspca_dev->usb_err = res;
+ return 0;
}
return gspca_dev->usb_buf[0];
@@ -224,7 +158,6 @@ static int pac207_read_reg(struct gspca_dev *gspca_dev, u16 index)
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
- struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
u8 idreg[2];
@@ -247,10 +180,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam = &gspca_dev->cam;
cam->cam_mode = sif_mode;
cam->nmodes = ARRAY_SIZE(sif_mode);
- sd->brightness = PAC207_BRIGHTNESS_DEFAULT;
- sd->exposure = PAC207_EXPOSURE_DEFAULT;
- sd->gain = PAC207_GAIN_DEFAULT;
- sd->autogain = AUTOGAIN_DEF;
return 0;
}
@@ -264,6 +193,87 @@ static int sd_init(struct gspca_dev *gspca_dev)
* Bit_2=Compression test mode enable */
pac207_write_reg(gspca_dev, 0x0f, 0x00); /* Power Control */
+ return gspca_dev->usb_err;
+}
+
+static void setcontrol(struct gspca_dev *gspca_dev, u16 reg, u16 val)
+{
+ pac207_write_reg(gspca_dev, reg, val);
+ pac207_write_reg(gspca_dev, 0x13, 0x01); /* Bit 0, auto clear */
+ pac207_write_reg(gspca_dev, 0x1c, 0x01); /* not documented */
+}
+
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (ctrl->id == V4L2_CID_AUTOGAIN && ctrl->is_new && ctrl->val) {
+ /* when switching to autogain set defaults to make sure
+ we are on a valid point of the autogain gain /
+ exposure knee graph, and give this change time to
+ take effect before doing autogain. */
+ gspca_dev->exposure->val = PAC207_EXPOSURE_DEFAULT;
+ gspca_dev->gain->val = PAC207_GAIN_DEFAULT;
+ sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES;
+ }
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setcontrol(gspca_dev, PAC207_BRIGHTNESS_REG, ctrl->val);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ if (gspca_dev->exposure->is_new || (ctrl->is_new && ctrl->val))
+ setcontrol(gspca_dev, PAC207_EXPOSURE_REG,
+ gspca_dev->exposure->val);
+ if (gspca_dev->gain->is_new || (ctrl->is_new && ctrl->val))
+ setcontrol(gspca_dev, PAC207_GAIN_REG,
+ gspca_dev->gain->val);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+
+/* this function is called at probe time */
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+
+ sd->brightness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS,
+ PAC207_BRIGHTNESS_MIN, PAC207_BRIGHTNESS_MAX,
+ 1, PAC207_BRIGHTNESS_DEFAULT);
+ gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ PAC207_EXPOSURE_MIN, PAC207_EXPOSURE_MAX,
+ 1, PAC207_EXPOSURE_DEFAULT);
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN,
+ PAC207_GAIN_MIN, PAC207_GAIN_MAX,
+ 1, PAC207_GAIN_DEFAULT);
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false);
return 0;
}
@@ -285,11 +295,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
else
pac207_write_reg(gspca_dev, 0x4a, 0x30);
pac207_write_reg(gspca_dev, 0x4b, 0x00); /* Sram test value */
- pac207_write_reg(gspca_dev, 0x08, sd->brightness);
+ pac207_write_reg(gspca_dev, 0x08, v4l2_ctrl_g_ctrl(sd->brightness));
/* PGA global gain (Bit 4-0) */
- pac207_write_reg(gspca_dev, 0x0e, sd->gain);
- pac207_write_reg(gspca_dev, 0x02, sd->exposure); /* PXCK = 12MHz /n */
+ pac207_write_reg(gspca_dev, 0x0e,
+ v4l2_ctrl_g_ctrl(gspca_dev->gain));
+ pac207_write_reg(gspca_dev, 0x02,
+ v4l2_ctrl_g_ctrl(gspca_dev->exposure)); /* PXCK = 12MHz /n */
mode = 0x02; /* Image Format (Bit 0), LED (1), Compr. test mode (2) */
if (gspca_dev->width == 176) { /* 176x144 */
@@ -308,7 +320,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
sd->sof_read = 0;
sd->autogain_ignore_frames = 0;
atomic_set(&sd->avg_lum, -1);
- return 0;
+ return gspca_dev->usb_err;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
@@ -318,8 +330,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
pac207_write_reg(gspca_dev, 0x0f, 0x00); /* Power Control */
}
-/* Include pac common sof detection functions */
-#include "pac_common.h"
static void pac207_do_auto_gain(struct gspca_dev *gspca_dev)
{
@@ -331,9 +341,8 @@ static void pac207_do_auto_gain(struct gspca_dev *gspca_dev)
if (sd->autogain_ignore_frames > 0)
sd->autogain_ignore_frames--;
- else if (gspca_auto_gain_n_exposure(gspca_dev, avg_lum,
- 90, PAC207_AUTOGAIN_DEADZONE,
- PAC207_GAIN_KNEE, PAC207_EXPOSURE_KNEE))
+ else if (gspca_coarse_grained_expo_autogain(gspca_dev, avg_lum,
+ 90, PAC207_AUTOGAIN_DEADZONE))
sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES;
}
@@ -384,118 +393,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static void setbrightness(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- pac207_write_reg(gspca_dev, 0x08, sd->brightness);
- pac207_write_reg(gspca_dev, 0x13, 0x01); /* Bit 0, auto clear */
- pac207_write_reg(gspca_dev, 0x1c, 0x01); /* not documented */
-}
-
-static void setexposure(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- pac207_write_reg(gspca_dev, 0x02, sd->exposure);
- pac207_write_reg(gspca_dev, 0x13, 0x01); /* Bit 0, auto clear */
- pac207_write_reg(gspca_dev, 0x1c, 0x01); /* not documented */
-}
-
-static void setgain(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- pac207_write_reg(gspca_dev, 0x0e, sd->gain);
- pac207_write_reg(gspca_dev, 0x13, 0x01); /* Bit 0, auto clear */
- pac207_write_reg(gspca_dev, 0x1c, 0x01); /* not documented */
-}
-
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->exposure = val;
- if (gspca_dev->streaming)
- setexposure(gspca_dev);
- return 0;
-}
-
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->exposure;
- return 0;
-}
-
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->gain = val;
- if (gspca_dev->streaming)
- setgain(gspca_dev);
- return 0;
-}
-
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->gain;
- return 0;
-}
-
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->autogain = val;
- /* when switching to autogain set defaults to make sure
- we are on a valid point of the autogain gain /
- exposure knee graph, and give this change time to
- take effect before doing autogain. */
- if (sd->autogain) {
- sd->exposure = PAC207_EXPOSURE_DEFAULT;
- sd->gain = PAC207_GAIN_DEFAULT;
- if (gspca_dev->streaming) {
- sd->autogain_ignore_frames =
- PAC_AUTOGAIN_IGNORE_FRAMES;
- setexposure(gspca_dev);
- setgain(gspca_dev);
- }
- }
-
- return 0;
-}
-
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->autogain;
- return 0;
-}
-
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
@@ -518,10 +415,9 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.dq_callback = pac207_do_auto_gain,
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index 30662fccb0cf..a0369a58c4bb 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -23,43 +23,58 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-/* Some documentation about various registers as determined by trial and error.
-
- Register page 1:
-
- Address Description
- 0x78 Global control, bit 6 controls the LED (inverted)
-
- Register page 3:
-
- Address Description
- 0x02 Clock divider 3-63, fps = 90 / val. Must be a multiple of 3 on
- the 7302, so one of 3, 6, 9, ..., except when between 6 and 12?
- 0x03 Variable framerate ctrl reg2==3: 0 -> ~30 fps, 255 -> ~22fps
- 0x04 Another var framerate ctrl reg2==3, reg3==0: 0 -> ~30 fps,
- 63 -> ~27 fps, the 2 msb's must always be 1 !!
- 0x05 Another var framerate ctrl reg2==3, reg3==0, reg4==0xc0:
- 1 -> ~30 fps, 2 -> ~20 fps
- 0x0e Exposure bits 0-7, 0-448, 0 = use full frame time
- 0x0f Exposure bit 8, 0-448, 448 = no exposure at all
- 0x10 Master gain 0-31
- 0x21 Bitfield: 0-1 unused, 2-3 vflip/hflip, 4-5 unknown, 6-7 unused
-
- The registers are accessed in the following functions:
-
- Page | Register | Function
- -----+------------+---------------------------------------------------
- 0 | 0x0f..0x20 | setcolors()
- 0 | 0xa2..0xab | setbrightcont()
- 0 | 0xc5 | setredbalance()
- 0 | 0xc6 | setwhitebalance()
- 0 | 0xc7 | setbluebalance()
- 0 | 0xdc | setbrightcont(), setcolors()
- 3 | 0x02 | setexposure()
- 3 | 0x10 | setgain()
- 3 | 0x11 | setcolors(), setgain(), setexposure(), sethvflip()
- 3 | 0x21 | sethvflip()
-*/
+/*
+ * Some documentation about various registers as determined by trial and error.
+ *
+ * Register page 1:
+ *
+ * Address Description
+ * 0x78 Global control, bit 6 controls the LED (inverted)
+ * 0x80 Compression balance, 2 interesting settings:
+ * 0x0f Default
+ * 0x50 Values >= this switch the camera to a lower compression,
+ * using the same table for both luminance and chrominance.
+ * This gives a sharper picture. Only usable when running
+ * at < 15 fps! Note currently the driver does not use this
+ * as the quality gain is small and the generated JPG-s are
+ * only understood by v4l-utils >= 0.8.9
+ *
+ * Register page 3:
+ *
+ * Address Description
+ * 0x02 Clock divider 3-63, fps = 90 / val. Must be a multiple of 3 on
+ * the 7302, so one of 3, 6, 9, ..., except when between 6 and 12?
+ * 0x03 Variable framerate ctrl reg2==3: 0 -> ~30 fps, 255 -> ~22fps
+ * 0x04 Another var framerate ctrl reg2==3, reg3==0: 0 -> ~30 fps,
+ * 63 -> ~27 fps, the 2 msb's must always be 1 !!
+ * 0x05 Another var framerate ctrl reg2==3, reg3==0, reg4==0xc0:
+ * 1 -> ~30 fps, 2 -> ~20 fps
+ * 0x0e Exposure bits 0-7, 0-448, 0 = use full frame time
+ * 0x0f Exposure bit 8, 0-448, 448 = no exposure at all
+ * 0x10 Gain 0-31
+ * 0x12 Another gain 0-31, unlike 0x10 this one seems to start with an
+ * amplification value of 1 rather then 0 at its lowest setting
+ * 0x21 Bitfield: 0-1 unused, 2-3 vflip/hflip, 4-5 unknown, 6-7 unused
+ * 0x80 Another framerate control, best left at 1, moving it from 1 to
+ * 2 causes the framerate to become 3/4th of what it was, and
+ * also seems to cause pixel averaging, resulting in an effective
+ * resolution of 320x240 and thus a much blockier image
+ *
+ * The registers are accessed in the following functions:
+ *
+ * Page | Register | Function
+ * -----+------------+---------------------------------------------------
+ * 0 | 0x0f..0x20 | setcolors()
+ * 0 | 0xa2..0xab | setbrightcont()
+ * 0 | 0xc5 | setredbalance()
+ * 0 | 0xc6 | setwhitebalance()
+ * 0 | 0xc7 | setbluebalance()
+ * 0 | 0xdc | setbrightcont(), setcolors()
+ * 3 | 0x02 | setexposure()
+ * 3 | 0x10, 0x12 | setgain()
+ * 3 | 0x11 | setcolors(), setgain(), setexposure(), sethvflip()
+ * 3 | 0x21 | sethvflip()
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -89,7 +104,6 @@ enum e_ctrl {
NCTRLS /* number of controls */
};
-/* specific webcam descriptor for pac7302 */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
@@ -198,10 +212,10 @@ static const struct ctrl sd_ctrls[] = {
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Gain",
.minimum = 0,
- .maximum = 255,
+ .maximum = 62,
.step = 1,
-#define GAIN_DEF 127
-#define GAIN_KNEE 255 /* Gain seems to cause little noise on the pac73xx */
+#define GAIN_DEF 15
+#define GAIN_KNEE 46
.default_value = GAIN_DEF,
},
.set_control = setgain
@@ -270,7 +284,6 @@ static const struct v4l2_pix_format vga_mode[] = {
#define LOAD_PAGE3 255
#define END_OF_SEQUENCE 0
-/* pac 7302 */
static const u8 init_7302[] = {
/* index,value */
0xff, 0x01, /* page 1 */
@@ -509,7 +522,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
return 0;
}
-/* This function is used by pac7302 only */
static void setbrightcont(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -536,7 +548,6 @@ static void setbrightcont(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0xdc, 0x01);
}
-/* This function is used by pac7302 only */
static void setcolors(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -590,9 +601,19 @@ static void setbluebalance(struct gspca_dev *gspca_dev)
static void setgain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
+ u8 reg10, reg12;
+
+ if (sd->ctrls[GAIN].val < 32) {
+ reg10 = sd->ctrls[GAIN].val;
+ reg12 = 0;
+ } else {
+ reg10 = 31;
+ reg12 = sd->ctrls[GAIN].val - 31;
+ }
reg_w(gspca_dev, 0xff, 0x03); /* page 3 */
- reg_w(gspca_dev, 0x10, sd->ctrls[GAIN].val >> 3);
+ reg_w(gspca_dev, 0x10, reg10);
+ reg_w(gspca_dev, 0x12, reg12);
/* load registers to sensor (Bit 0, auto clear) */
reg_w(gspca_dev, 0x11, 0x01);
@@ -604,28 +625,36 @@ static void setexposure(struct gspca_dev *gspca_dev)
u8 clockdiv;
u16 exposure;
- /* register 2 of frame 3 contains the clock divider configuring the
- no fps according to the formula: 90 / reg. sd->exposure is the
- desired exposure time in 0.5 ms. */
+ /*
+ * Register 2 of frame 3 contains the clock divider configuring the
+ * no fps according to the formula: 90 / reg. sd->exposure is the
+ * desired exposure time in 0.5 ms.
+ */
clockdiv = (90 * sd->ctrls[EXPOSURE].val + 1999) / 2000;
- /* Note clockdiv = 3 also works, but when running at 30 fps, depending
- on the scene being recorded, the camera switches to another
- quantization table for certain JPEG blocks, and we don't know how
- to decompress these blocks. So we cap the framerate at 15 fps */
+ /*
+ * Note clockdiv = 3 also works, but when running at 30 fps, depending
+ * on the scene being recorded, the camera switches to another
+ * quantization table for certain JPEG blocks, and we don't know how
+ * to decompress these blocks. So we cap the framerate at 15 fps.
+ */
if (clockdiv < 6)
clockdiv = 6;
else if (clockdiv > 63)
clockdiv = 63;
- /* reg2 MUST be a multiple of 3, except when between 6 and 12?
- Always round up, otherwise we cannot get the desired frametime
- using the partial frame time exposure control */
+ /*
+ * Register 2 MUST be a multiple of 3, except when between 6 and 12?
+ * Always round up, otherwise we cannot get the desired frametime
+ * using the partial frame time exposure control.
+ */
if (clockdiv < 6 || clockdiv > 12)
clockdiv = ((clockdiv + 2) / 3) * 3;
- /* frame exposure time in ms = 1000 * clockdiv / 90 ->
- exposure = (sd->exposure / 2) * 448 / (1000 * clockdiv / 90) */
+ /*
+ * frame exposure time in ms = 1000 * clockdiv / 90 ->
+ * exposure = (sd->exposure / 2) * 448 / (1000 * clockdiv / 90)
+ */
exposure = (sd->ctrls[EXPOSURE].val * 45 * 448) / (1000 * clockdiv);
/* 0 = use full frametime, 448 = no exposure, reverse it */
exposure = 448 - exposure;
@@ -643,10 +672,12 @@ static void setautogain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- /* when switching to autogain set defaults to make sure
- we are on a valid point of the autogain gain /
- exposure knee graph, and give this change time to
- take effect before doing autogain. */
+ /*
+ * When switching to autogain set defaults to make sure
+ * we are on a valid point of the autogain gain /
+ * exposure knee graph, and give this change time to
+ * take effect before doing autogain.
+ */
if (sd->ctrls[AUTOGAIN].val) {
sd->ctrls[EXPOSURE].val = EXPOSURE_DEF;
sd->ctrls[GAIN].val = GAIN_DEF;
@@ -700,8 +731,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
setautogain(gspca_dev);
sethvflip(gspca_dev);
- /* only resolution 640x480 is supported for pac7302 */
-
sd->sof_read = 0;
atomic_set(&sd->avg_lum, 270 + sd->ctrls[BRIGHTNESS].val);
@@ -729,9 +758,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x78, 0x40);
}
-/* !! coarse_grained_expo_autogain is not used !! */
-#define exp_too_low_cnt flags
-#define exp_too_high_cnt sof_read
+#define WANT_REGULAR_AUTOGAIN
#include "autogain_functions.h"
static void do_autogain(struct gspca_dev *gspca_dev)
@@ -792,10 +819,12 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
if (sof) {
int n, lum_offset, footer_length;
- /* 6 bytes after the FF D9 EOF marker a number of lumination
- bytes are send corresponding to different parts of the
- image, the 14th and 15th byte after the EOF seem to
- correspond to the center of the image */
+ /*
+ * 6 bytes after the FF D9 EOF marker a number of lumination
+ * bytes are send corresponding to different parts of the
+ * image, the 14th and 15th byte after the EOF seem to
+ * correspond to the center of the image.
+ */
lum_offset = 61 + sizeof pac_sof_marker;
footer_length = 74;
@@ -839,9 +868,10 @@ static int sd_dbg_s_register(struct gspca_dev *gspca_dev,
u8 index;
u8 value;
- /* reg->reg: bit0..15: reserved for register index (wIndex is 16bit
- long on the USB bus)
- */
+ /*
+ * reg->reg: bit0..15: reserved for register index (wIndex is 16bit
+ * long on the USB bus)
+ */
if (reg->match.type == V4L2_CHIP_MATCH_HOST &&
reg->match.addr == 0 &&
(reg->reg < 0x000000ff) &&
@@ -852,9 +882,11 @@ static int sd_dbg_s_register(struct gspca_dev *gspca_dev,
index = reg->reg;
value = reg->val;
- /* Note that there shall be no access to other page
- by any other function between the page swith and
- the actual register write */
+ /*
+ * Note that there shall be no access to other page
+ * by any other function between the page switch and
+ * the actual register write.
+ */
reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
reg_w(gspca_dev, index, value);
@@ -940,6 +972,7 @@ static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
{USB_DEVICE(0x093a, 0x2625)},
{USB_DEVICE(0x093a, 0x2626)},
+ {USB_DEVICE(0x093a, 0x2627), .driver_info = FL_VFLIP},
{USB_DEVICE(0x093a, 0x2628)},
{USB_DEVICE(0x093a, 0x2629), .driver_info = FL_VFLIP},
{USB_DEVICE(0x093a, 0x262a)},
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 1ac111176ffa..2cb7d95f7be7 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -20,34 +20,42 @@
*/
/* Some documentation about various registers as determined by trial and error.
- When the register addresses differ between the 7202 and the 7311 the 2
- different addresses are written as 7302addr/7311addr, when one of the 2
- addresses is a - sign that register description is not valid for the
- matching IC.
-
- Register page 1:
-
- Address Description
- -/0x08 Unknown compressor related, must always be 8 except when not
- in 640x480 resolution and page 4 reg 2 <= 3 then set it to 9 !
- -/0x1b Auto white balance related, bit 0 is AWB enable (inverted)
- bits 345 seem to toggle per color gains on/off (inverted)
- 0x78 Global control, bit 6 controls the LED (inverted)
- -/0x80 JPEG compression ratio ? Best not touched
-
- Register page 3/4:
-
- Address Description
- 0x02 Clock divider 2-63, fps =~ 60 / val. Must be a multiple of 3 on
- the 7302, so one of 3, 6, 9, ..., except when between 6 and 12?
- -/0x0f Master gain 1-245, low value = high gain
- 0x10/- Master gain 0-31
- -/0x10 Another gain 0-15, limited influence (1-2x gain I guess)
- 0x21 Bitfield: 0-1 unused, 2-3 vflip/hflip, 4-5 unknown, 6-7 unused
- -/0x27 Seems to toggle various gains on / off, Setting bit 7 seems to
- completely disable the analog amplification block. Set to 0x68
- for max gain, 0x14 for minimal gain.
-*/
+ *
+ * Register page 1:
+ *
+ * Address Description
+ * 0x08 Unknown compressor related, must always be 8 except when not
+ * in 640x480 resolution and page 4 reg 2 <= 3 then set it to 9 !
+ * 0x1b Auto white balance related, bit 0 is AWB enable (inverted)
+ * bits 345 seem to toggle per color gains on/off (inverted)
+ * 0x78 Global control, bit 6 controls the LED (inverted)
+ * 0x80 Compression balance, interesting settings:
+ * 0x01 Use this to allow the camera to switch to higher compr.
+ * on the fly. Needed to stay within bandwidth @ 640x480@30
+ * 0x1c From usb captures under Windows for 640x480
+ * 0x2a Values >= this switch the camera to a lower compression,
+ * using the same table for both luminance and chrominance.
+ * This gives a sharper picture. Usable only at 640x480@ <
+ * 15 fps or 320x240 / 160x120. Note currently the driver
+ * does not use this as the quality gain is small and the
+ * generated JPG-s are only understood by v4l-utils >= 0.8.9
+ * 0x3f From usb captures under Windows for 320x240
+ * 0x69 From usb captures under Windows for 160x120
+ *
+ * Register page 4:
+ *
+ * Address Description
+ * 0x02 Clock divider 2-63, fps =~ 60 / val. Must be a multiple of 3 on
+ * the 7302, so one of 3, 6, 9, ..., except when between 6 and 12?
+ * 0x0f Master gain 1-245, low value = high gain
+ * 0x10 Another gain 0-15, limited influence (1-2x gain I guess)
+ * 0x21 Bitfield: 0-1 unused, 2-3 vflip/hflip, 4-5 unknown, 6-7 unused
+ * Note setting vflip disabled leads to a much lower image quality,
+ * so we always vflip, and tell userspace to flip it back
+ * 0x27 Seems to toggle various gains on / off, Setting bit 7 seems to
+ * completely disable the analog amplification block. Set to 0x68
+ * for max gain, 0x14 for minimal gain.
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -55,21 +63,21 @@
#include <linux/input.h>
#include "gspca.h"
+/* Include pac common sof detection functions */
+#include "pac_common.h"
+
+#define PAC7311_GAIN_DEFAULT 122
+#define PAC7311_EXPOSURE_DEFAULT 3 /* 20 fps, avoid using high compr. */
MODULE_AUTHOR("Thomas Kaiser thomas@kaiser-linux.li");
MODULE_DESCRIPTION("Pixart PAC7311");
MODULE_LICENSE("GPL");
-/* specific webcam descriptor for pac7311 */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- unsigned char contrast;
- unsigned char gain;
- unsigned char exposure;
- unsigned char autogain;
- __u8 hflip;
- __u8 vflip;
+ struct v4l2_ctrl *contrast;
+ struct v4l2_ctrl *hflip;
u8 sof_read;
u8 autogain_ignore_frames;
@@ -77,114 +85,6 @@ struct sd {
atomic_t avg_lum;
};
-/* V4L2 controls supported by the driver */
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
-/* This control is for both the 7302 and the 7311 */
- {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
-#define CONTRAST_MAX 255
- .maximum = CONTRAST_MAX,
- .step = 1,
-#define CONTRAST_DEF 127
- .default_value = CONTRAST_DEF,
- },
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
-/* All controls below are for both the 7302 and the 7311 */
- {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
-#define GAIN_MAX 255
- .maximum = GAIN_MAX,
- .step = 1,
-#define GAIN_DEF 127
-#define GAIN_KNEE 255 /* Gain seems to cause little noise on the pac73xx */
- .default_value = GAIN_DEF,
- },
- .set = sd_setgain,
- .get = sd_getgain,
- },
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
-#define EXPOSURE_MAX 255
- .maximum = EXPOSURE_MAX,
- .step = 1,
-#define EXPOSURE_DEF 16 /* 32 ms / 30 fps */
-#define EXPOSURE_KNEE 50 /* 100 ms / 10 fps */
- .default_value = EXPOSURE_DEF,
- },
- .set = sd_setexposure,
- .get = sd_getexposure,
- },
- {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AUTOGAIN_DEF 1
- .default_value = AUTOGAIN_DEF,
- },
- .set = sd_setautogain,
- .get = sd_getautogain,
- },
- {
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mirror",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define HFLIP_DEF 0
- .default_value = HFLIP_DEF,
- },
- .set = sd_sethflip,
- .get = sd_gethflip,
- },
- {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Vflip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define VFLIP_DEF 0
- .default_value = VFLIP_DEF,
- },
- .set = sd_setvflip,
- .get = sd_getvflip,
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{160, 120, V4L2_PIX_FMT_PJPG, V4L2_FIELD_NONE,
.bytesperline = 160,
@@ -206,8 +106,8 @@ static const struct v4l2_pix_format vga_mode[] = {
#define LOAD_PAGE4 254
#define END_OF_SEQUENCE 0
-/* pac 7311 */
static const __u8 init_7311[] = {
+ 0xff, 0x01,
0x78, 0x40, /* Bit_0=start stream, Bit_6=LED */
0x78, 0x40, /* Bit_0=start stream, Bit_6=LED */
0x78, 0x44, /* Bit_0=start stream, Bit_6=LED */
@@ -387,90 +287,73 @@ static void reg_w_var(struct gspca_dev *gspca_dev,
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
- struct sd *sd = (struct sd *) gspca_dev;
- struct cam *cam;
+ struct cam *cam = &gspca_dev->cam;
- cam = &gspca_dev->cam;
-
- PDEBUG(D_CONF, "Find Sensor PAC7311");
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
+ cam->input_flags = V4L2_IN_ST_VFLIP;
- sd->contrast = CONTRAST_DEF;
- sd->gain = GAIN_DEF;
- sd->exposure = EXPOSURE_DEF;
- sd->autogain = AUTOGAIN_DEF;
- sd->hflip = HFLIP_DEF;
- sd->vflip = VFLIP_DEF;
return 0;
}
-/* This function is used by pac7311 only */
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
reg_w(gspca_dev, 0xff, 0x04);
- reg_w(gspca_dev, 0x10, sd->contrast >> 4);
+ reg_w(gspca_dev, 0x10, val);
/* load registers to sensor (Bit 0, auto clear) */
reg_w(gspca_dev, 0x11, 0x01);
}
-static void setgain(struct gspca_dev *gspca_dev)
+static void setgain(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- int gain = GAIN_MAX - sd->gain;
-
- if (gain < 1)
- gain = 1;
- else if (gain > 245)
- gain = 245;
reg_w(gspca_dev, 0xff, 0x04); /* page 4 */
reg_w(gspca_dev, 0x0e, 0x00);
- reg_w(gspca_dev, 0x0f, gain);
+ reg_w(gspca_dev, 0x0f, gspca_dev->gain->maximum - val + 1);
/* load registers to sensor (Bit 0, auto clear) */
reg_w(gspca_dev, 0x11, 0x01);
}
-static void setexposure(struct gspca_dev *gspca_dev)
+static void setexposure(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- __u8 reg;
-
- /* register 2 of frame 3/4 contains the clock divider configuring the
- no fps according to the formula: 60 / reg. sd->exposure is the
- desired exposure time in ms. */
- reg = 120 * sd->exposure / 1000;
- if (reg < 2)
- reg = 2;
- else if (reg > 63)
- reg = 63;
-
reg_w(gspca_dev, 0xff, 0x04); /* page 4 */
- reg_w(gspca_dev, 0x02, reg);
+ reg_w(gspca_dev, 0x02, val);
- /* Page 1 register 8 must always be 0x08 except when not in
- 640x480 mode and Page3/4 reg 2 <= 3 then it must be 9 */
+ /* load registers to sensor (Bit 0, auto clear) */
+ reg_w(gspca_dev, 0x11, 0x01);
+
+ /*
+ * Page 1 register 8 must always be 0x08 except when not in
+ * 640x480 mode and page 4 reg 2 <= 3 then it must be 9
+ */
reg_w(gspca_dev, 0xff, 0x01);
- if (gspca_dev->cam.cam_mode[(int)gspca_dev->curr_mode].priv &&
- reg <= 3) {
+ if (gspca_dev->width != 640 && val <= 3)
reg_w(gspca_dev, 0x08, 0x09);
- } else {
+ else
reg_w(gspca_dev, 0x08, 0x08);
- }
+
+ /*
+ * Page1 register 80 sets the compression balance, normally we
+ * want / use 0x1c, but for 640x480@30fps we must allow the
+ * camera to use higher compression or we may run out of
+ * bandwidth.
+ */
+ if (gspca_dev->width == 640 && val == 2)
+ reg_w(gspca_dev, 0x80, 0x01);
+ else
+ reg_w(gspca_dev, 0x80, 0x1c);
/* load registers to sensor (Bit 0, auto clear) */
reg_w(gspca_dev, 0x11, 0x01);
}
-static void sethvflip(struct gspca_dev *gspca_dev)
+static void sethvflip(struct gspca_dev *gspca_dev, s32 hflip, s32 vflip)
{
- struct sd *sd = (struct sd *) gspca_dev;
__u8 data;
reg_w(gspca_dev, 0xff, 0x04); /* page 4 */
- data = (sd->hflip ? 0x04 : 0x00) | (sd->vflip ? 0x08 : 0x00);
+ data = (hflip ? 0x04 : 0x00) |
+ (vflip ? 0x08 : 0x00);
reg_w(gspca_dev, 0x21, data);
/* load registers to sensor (Bit 0, auto clear) */
@@ -484,6 +367,82 @@ static int sd_init(struct gspca_dev *gspca_dev)
return gspca_dev->usb_err;
}
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (ctrl->id == V4L2_CID_AUTOGAIN && ctrl->is_new && ctrl->val) {
+ /* when switching to autogain set defaults to make sure
+ we are on a valid point of the autogain gain /
+ exposure knee graph, and give this change time to
+ take effect before doing autogain. */
+ gspca_dev->exposure->val = PAC7311_EXPOSURE_DEFAULT;
+ gspca_dev->gain->val = PAC7311_GAIN_DEFAULT;
+ sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES;
+ }
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_CONTRAST:
+ setcontrast(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ if (gspca_dev->exposure->is_new || (ctrl->is_new && ctrl->val))
+ setexposure(gspca_dev, gspca_dev->exposure->val);
+ if (gspca_dev->gain->is_new || (ctrl->is_new && ctrl->val))
+ setgain(gspca_dev, gspca_dev->gain->val);
+ break;
+ case V4L2_CID_HFLIP:
+ sethvflip(gspca_dev, sd->hflip->val, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+
+/* this function is called at probe time */
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+
+ sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 15, 1, 7);
+ gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 2, 63, 1,
+ PAC7311_EXPOSURE_DEFAULT);
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 244, 1,
+ PAC7311_GAIN_DEFAULT);
+ sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+
+ v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false);
+ return 0;
+}
+
+/* -- start the camera -- */
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -492,19 +451,19 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w_var(gspca_dev, start_7311,
page4_7311, sizeof(page4_7311));
- setcontrast(gspca_dev);
- setgain(gspca_dev);
- setexposure(gspca_dev);
- sethvflip(gspca_dev);
+ setcontrast(gspca_dev, v4l2_ctrl_g_ctrl(sd->contrast));
+ setgain(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->gain));
+ setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
+ sethvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip), 1);
/* set correct resolution */
switch (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) {
- case 2: /* 160x120 pac7311 */
+ case 2: /* 160x120 */
reg_w(gspca_dev, 0xff, 0x01);
reg_w(gspca_dev, 0x17, 0x20);
reg_w(gspca_dev, 0x87, 0x10);
break;
- case 1: /* 320x240 pac7311 */
+ case 1: /* 320x240 */
reg_w(gspca_dev, 0xff, 0x01);
reg_w(gspca_dev, 0x17, 0x30);
reg_w(gspca_dev, 0x87, 0x11);
@@ -541,14 +500,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */
}
-/* called on streamoff with alt 0 and on disconnect for 7311 */
-static void sd_stop0(struct gspca_dev *gspca_dev)
-{
-}
-
-/* Include pac common sof detection functions */
-#include "pac_common.h"
-
static void do_autogain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -558,13 +509,13 @@ static void do_autogain(struct gspca_dev *gspca_dev)
if (avg_lum == -1)
return;
- desired_lum = 200;
+ desired_lum = 170;
deadzone = 20;
if (sd->autogain_ignore_frames > 0)
sd->autogain_ignore_frames--;
- else if (gspca_auto_gain_n_exposure(gspca_dev, avg_lum, desired_lum,
- deadzone, GAIN_KNEE, EXPOSURE_KNEE))
+ else if (gspca_coarse_grained_expo_autogain(gspca_dev, avg_lum,
+ desired_lum, deadzone))
sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES;
}
@@ -628,10 +579,12 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
if (sof) {
int n, lum_offset, footer_length;
- /* 6 bytes after the FF D9 EOF marker a number of lumination
- bytes are send corresponding to different parts of the
- image, the 14th and 15th byte after the EOF seem to
- correspond to the center of the image */
+ /*
+ * 6 bytes after the FF D9 EOF marker a number of lumination
+ * bytes are send corresponding to different parts of the
+ * image, the 14th and 15th byte after the EOF seem to
+ * correspond to the center of the image.
+ */
lum_offset = 24 + sizeof pac_sof_marker;
footer_length = 26;
@@ -668,127 +621,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->gain = val;
- if (gspca_dev->streaming)
- setgain(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->gain;
- return 0;
-}
-
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->exposure = val;
- if (gspca_dev->streaming)
- setexposure(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->exposure;
- return 0;
-}
-
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->autogain = val;
- /* when switching to autogain set defaults to make sure
- we are on a valid point of the autogain gain /
- exposure knee graph, and give this change time to
- take effect before doing autogain. */
- if (sd->autogain) {
- sd->exposure = EXPOSURE_DEF;
- sd->gain = GAIN_DEF;
- if (gspca_dev->streaming) {
- sd->autogain_ignore_frames =
- PAC_AUTOGAIN_IGNORE_FRAMES;
- setexposure(gspca_dev);
- setgain(gspca_dev);
- }
- }
-
- return gspca_dev->usb_err;
-}
-
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->autogain;
- return 0;
-}
-
-static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->hflip = val;
- if (gspca_dev->streaming)
- sethvflip(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->hflip;
- return 0;
-}
-
-static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->vflip = val;
- if (gspca_dev->streaming)
- sethvflip(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->vflip;
- return 0;
-}
-
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
@@ -820,16 +652,13 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
}
#endif
-/* sub-driver description for pac7311 */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
- .stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 7e71aa2d2522..ad098202d7f0 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -59,35 +59,38 @@ MODULE_LICENSE("GPL");
#define SENSOR_MT9M111 9
#define SENSOR_MT9M112 10
#define SENSOR_HV7131R 11
-#define SENSOR_MT9VPRB 20
+#define SENSOR_MT9VPRB 12
/* camera flags */
#define HAS_NO_BUTTON 0x1
#define LED_REVERSE 0x2 /* some cameras unset gpio to turn on leds */
#define FLIP_DETECT 0x4
-enum e_ctrl {
- BRIGHTNESS,
- CONTRAST,
- SATURATION,
- HUE,
- GAMMA,
- BLUE,
- RED,
- VFLIP,
- HFLIP,
- EXPOSURE,
- GAIN,
- AUTOGAIN,
- QUALITY,
- NCTRLS /* number of controls */
-};
-
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev;
- struct gspca_ctrl ctrls[NCTRLS];
+ struct { /* color control cluster */
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *contrast;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *hue;
+ };
+ struct { /* blue/red balance control cluster */
+ struct v4l2_ctrl *blue;
+ struct v4l2_ctrl *red;
+ };
+ struct { /* h/vflip control cluster */
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ };
+ struct v4l2_ctrl *gamma;
+ struct { /* autogain and exposure or gain control cluster */
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *gain;
+ };
+ struct v4l2_ctrl *jpegqual;
struct work_struct work;
struct workqueue_struct *work_thread;
@@ -105,6 +108,7 @@ struct sd {
u8 exposure_step;
u8 i2c_addr;
+ u8 i2c_intf;
u8 sensor;
u8 hstart;
u8 vstart;
@@ -166,175 +170,6 @@ static const struct dmi_system_id flip_dmi_table[] = {
{}
};
-static void set_cmatrix(struct gspca_dev *gspca_dev);
-static void set_gamma(struct gspca_dev *gspca_dev);
-static void set_redblue(struct gspca_dev *gspca_dev);
-static void set_hvflip(struct gspca_dev *gspca_dev);
-static void set_exposure(struct gspca_dev *gspca_dev);
-static void set_gain(struct gspca_dev *gspca_dev);
-static void set_quality(struct gspca_dev *gspca_dev);
-
-static const struct ctrl sd_ctrls[NCTRLS] = {
-[BRIGHTNESS] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x7f
- },
- .set_control = set_cmatrix
- },
-[CONTRAST] = {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x7f
- },
- .set_control = set_cmatrix
- },
-[SATURATION] = {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x7f
- },
- .set_control = set_cmatrix
- },
-[HUE] = {
- {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = -180,
- .maximum = 180,
- .step = 1,
- .default_value = 0
- },
- .set_control = set_cmatrix
- },
-[GAMMA] = {
- {
- .id = V4L2_CID_GAMMA,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gamma",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x10
- },
- .set_control = set_gamma
- },
-[BLUE] = {
- {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue Balance",
- .minimum = 0,
- .maximum = 0x7f,
- .step = 1,
- .default_value = 0x28
- },
- .set_control = set_redblue
- },
-[RED] = {
- {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red Balance",
- .minimum = 0,
- .maximum = 0x7f,
- .step = 1,
- .default_value = 0x28
- },
- .set_control = set_redblue
- },
-[HFLIP] = {
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Horizontal Flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- .set_control = set_hvflip
- },
-[VFLIP] = {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Vertical Flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- .set_control = set_hvflip
- },
-[EXPOSURE] = {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 0x1780,
- .step = 1,
- .default_value = 0x33,
- },
- .set_control = set_exposure
- },
-[GAIN] = {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 28,
- .step = 1,
- .default_value = 0,
- },
- .set_control = set_gain
- },
-[AUTOGAIN] = {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- },
- },
-[QUALITY] = {
- {
- .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Compression Quality",
-#define QUALITY_MIN 50
-#define QUALITY_MAX 90
-#define QUALITY_DEF 80
- .minimum = QUALITY_MIN,
- .maximum = QUALITY_MAX,
- .step = 1,
- .default_value = QUALITY_DEF,
- },
- .set_control = set_quality
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 160,
@@ -747,7 +582,7 @@ static const s16 hsv_blue_y[] = {
4, 2, 0, -1, -3, -5, -7, -9, -11
};
-static u16 i2c_ident[] = {
+static const u16 i2c_ident[] = {
V4L2_IDENT_OV9650,
V4L2_IDENT_OV9655,
V4L2_IDENT_SOI968,
@@ -760,9 +595,10 @@ static u16 i2c_ident[] = {
V4L2_IDENT_MT9M111,
V4L2_IDENT_MT9M112,
V4L2_IDENT_HV7131R,
+[SENSOR_MT9VPRB] = V4L2_IDENT_UNKNOWN,
};
-static u16 bridge_init[][2] = {
+static const u16 bridge_init[][2] = {
{0x1000, 0x78}, {0x1001, 0x40}, {0x1002, 0x1c},
{0x1020, 0x80}, {0x1061, 0x01}, {0x1067, 0x40},
{0x1068, 0x30}, {0x1069, 0x20}, {0x106a, 0x10},
@@ -786,7 +622,7 @@ static u16 bridge_init[][2] = {
};
/* Gain = (bit[3:0] / 16 + 1) * (bit[4] + 1) * (bit[5] + 1) * (bit[6] + 1) */
-static u8 ov_gain[] = {
+static const u8 ov_gain[] = {
0x00 /* 1x */, 0x04 /* 1.25x */, 0x08 /* 1.5x */, 0x0c /* 1.75x */,
0x10 /* 2x */, 0x12 /* 2.25x */, 0x14 /* 2.5x */, 0x16 /* 2.75x */,
0x18 /* 3x */, 0x1a /* 3.25x */, 0x1c /* 3.5x */, 0x1e /* 3.75x */,
@@ -798,7 +634,7 @@ static u8 ov_gain[] = {
};
/* Gain = (bit[8] + 1) * (bit[7] + 1) * (bit[6:0] * 0.03125) */
-static u16 micron1_gain[] = {
+static const u16 micron1_gain[] = {
/* 1x 1.25x 1.5x 1.75x */
0x0020, 0x0028, 0x0030, 0x0038,
/* 2x 2.25x 2.5x 2.75x */
@@ -819,7 +655,7 @@ static u16 micron1_gain[] = {
/* mt9m001 sensor uses a different gain formula then other micron sensors */
/* Gain = (bit[6] + 1) * (bit[5-0] * 0.125) */
-static u16 micron2_gain[] = {
+static const u16 micron2_gain[] = {
/* 1x 1.25x 1.5x 1.75x */
0x0008, 0x000a, 0x000c, 0x000e,
/* 2x 2.25x 2.5x 2.75x */
@@ -839,7 +675,7 @@ static u16 micron2_gain[] = {
};
/* Gain = .5 + bit[7:0] / 16 */
-static u8 hv7131r_gain[] = {
+static const u8 hv7131r_gain[] = {
0x08 /* 1x */, 0x0c /* 1.25x */, 0x10 /* 1.5x */, 0x14 /* 1.75x */,
0x18 /* 2x */, 0x1c /* 2.25x */, 0x20 /* 2.5x */, 0x24 /* 2.75x */,
0x28 /* 3x */, 0x2c /* 3.25x */, 0x30 /* 3.5x */, 0x34 /* 3.75x */,
@@ -850,7 +686,7 @@ static u8 hv7131r_gain[] = {
0x78 /* 8x */
};
-static struct i2c_reg_u8 soi968_init[] = {
+static const struct i2c_reg_u8 soi968_init[] = {
{0x0c, 0x00}, {0x0f, 0x1f},
{0x11, 0x80}, {0x38, 0x52}, {0x1e, 0x00},
{0x33, 0x08}, {0x35, 0x8c}, {0x36, 0x0c},
@@ -864,7 +700,7 @@ static struct i2c_reg_u8 soi968_init[] = {
{0x00, 0x00}, {0x01, 0x80}, {0x02, 0x80},
};
-static struct i2c_reg_u8 ov7660_init[] = {
+static const struct i2c_reg_u8 ov7660_init[] = {
{0x0e, 0x80}, {0x0d, 0x08}, {0x0f, 0xc3},
{0x04, 0xc3}, {0x10, 0x40}, {0x11, 0x40},
{0x12, 0x05}, {0x13, 0xba}, {0x14, 0x2a},
@@ -872,11 +708,11 @@ static struct i2c_reg_u8 ov7660_init[] = {
0x10, 0x61 and sd->hstart, vstart = 3, fixes ugly colored borders */
{0x17, 0x10}, {0x18, 0x61},
{0x37, 0x0f}, {0x38, 0x02}, {0x39, 0x43},
- {0x3a, 0x00}, {0x69, 0x90}, {0x2d, 0xf6},
- {0x2e, 0x0b}, {0x01, 0x78}, {0x02, 0x50},
+ {0x3a, 0x00}, {0x69, 0x90}, {0x2d, 0x00},
+ {0x2e, 0x00}, {0x01, 0x78}, {0x02, 0x50},
};
-static struct i2c_reg_u8 ov7670_init[] = {
+static const struct i2c_reg_u8 ov7670_init[] = {
{0x11, 0x80}, {0x3a, 0x04}, {0x12, 0x01},
{0x32, 0xb6}, {0x03, 0x0a}, {0x0c, 0x00}, {0x3e, 0x00},
{0x70, 0x3a}, {0x71, 0x35}, {0x72, 0x11}, {0x73, 0xf0},
@@ -933,7 +769,7 @@ static struct i2c_reg_u8 ov7670_init[] = {
{0x93, 0x00},
};
-static struct i2c_reg_u8 ov9650_init[] = {
+static const struct i2c_reg_u8 ov9650_init[] = {
{0x00, 0x00}, {0x01, 0x78},
{0x02, 0x78}, {0x03, 0x36}, {0x04, 0x03},
{0x05, 0x00}, {0x06, 0x00}, {0x08, 0x00},
@@ -963,7 +799,7 @@ static struct i2c_reg_u8 ov9650_init[] = {
{0xaa, 0x92}, {0xab, 0x0a},
};
-static struct i2c_reg_u8 ov9655_init[] = {
+static const struct i2c_reg_u8 ov9655_init[] = {
{0x0e, 0x61}, {0x11, 0x80}, {0x13, 0xba},
{0x14, 0x2e}, {0x16, 0x24}, {0x1e, 0x04}, {0x27, 0x08},
{0x28, 0x08}, {0x29, 0x15}, {0x2c, 0x08}, {0x34, 0x3d},
@@ -990,7 +826,7 @@ static struct i2c_reg_u8 ov9655_init[] = {
{0x04, 0x03}, {0x00, 0x13},
};
-static struct i2c_reg_u16 mt9v112_init[] = {
+static const struct i2c_reg_u16 mt9v112_init[] = {
{0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0020},
{0x34, 0xc019}, {0x0a, 0x0011}, {0x0b, 0x000b},
{0x20, 0x0703}, {0x35, 0x2022}, {0xf0, 0x0001},
@@ -1009,7 +845,7 @@ static struct i2c_reg_u16 mt9v112_init[] = {
{0x2c, 0x00ae}, {0x2d, 0x00ae}, {0x2e, 0x00ae},
};
-static struct i2c_reg_u16 mt9v111_init[] = {
+static const struct i2c_reg_u16 mt9v111_init[] = {
{0x01, 0x0004}, {0x0d, 0x0001}, {0x0d, 0x0000},
{0x01, 0x0001}, {0x05, 0x0004}, {0x2d, 0xe0a0},
{0x2e, 0x0c64}, {0x2f, 0x0064}, {0x06, 0x600e},
@@ -1019,7 +855,7 @@ static struct i2c_reg_u16 mt9v111_init[] = {
{0x0e, 0x0008}, {0x20, 0x0000}
};
-static struct i2c_reg_u16 mt9v011_init[] = {
+static const struct i2c_reg_u16 mt9v011_init[] = {
{0x07, 0x0002}, {0x0d, 0x0001}, {0x0d, 0x0000},
{0x01, 0x0008}, {0x02, 0x0016}, {0x03, 0x01e1},
{0x04, 0x0281}, {0x05, 0x0083}, {0x06, 0x0006},
@@ -1046,7 +882,7 @@ static struct i2c_reg_u16 mt9v011_init[] = {
{0x06, 0x0029}, {0x05, 0x0009},
};
-static struct i2c_reg_u16 mt9m001_init[] = {
+static const struct i2c_reg_u16 mt9m001_init[] = {
{0x0d, 0x0001},
{0x0d, 0x0000},
{0x04, 0x0500}, /* hres = 1280 */
@@ -1062,21 +898,21 @@ static struct i2c_reg_u16 mt9m001_init[] = {
{0x35, 0x0057},
};
-static struct i2c_reg_u16 mt9m111_init[] = {
+static const struct i2c_reg_u16 mt9m111_init[] = {
{0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0008},
{0xf0, 0x0001}, {0x3a, 0x4300}, {0x9b, 0x4300},
{0x06, 0x708e}, {0xf0, 0x0002}, {0x2e, 0x0a1e},
{0xf0, 0x0000},
};
-static struct i2c_reg_u16 mt9m112_init[] = {
+static const struct i2c_reg_u16 mt9m112_init[] = {
{0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0008},
{0xf0, 0x0001}, {0x3a, 0x4300}, {0x9b, 0x4300},
{0x06, 0x708e}, {0xf0, 0x0002}, {0x2e, 0x0a1e},
{0xf0, 0x0000},
};
-static struct i2c_reg_u8 hv7131r_init[] = {
+static const struct i2c_reg_u8 hv7131r_init[] = {
{0x02, 0x08}, {0x02, 0x00}, {0x01, 0x08},
{0x02, 0x00}, {0x20, 0x00}, {0x21, 0xd0},
{0x22, 0x00}, {0x23, 0x09}, {0x01, 0x08},
@@ -1167,7 +1003,7 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
* from the point of view of the bridge, the length
* includes the address
*/
- row[0] = 0x81 | (2 << 4);
+ row[0] = sd->i2c_intf | (2 << 4);
row[1] = sd->i2c_addr;
row[2] = reg;
row[3] = val;
@@ -1180,7 +1016,7 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
}
static void i2c_w1_buf(struct gspca_dev *gspca_dev,
- struct i2c_reg_u8 *buf, int sz)
+ const struct i2c_reg_u8 *buf, int sz)
{
while (--sz >= 0) {
i2c_w1(gspca_dev, buf->reg, buf->val);
@@ -1197,7 +1033,7 @@ static void i2c_w2(struct gspca_dev *gspca_dev, u8 reg, u16 val)
* from the point of view of the bridge, the length
* includes the address
*/
- row[0] = 0x81 | (3 << 4);
+ row[0] = sd->i2c_intf | (3 << 4);
row[1] = sd->i2c_addr;
row[2] = reg;
row[3] = val >> 8;
@@ -1210,7 +1046,7 @@ static void i2c_w2(struct gspca_dev *gspca_dev, u8 reg, u16 val)
}
static void i2c_w2_buf(struct gspca_dev *gspca_dev,
- struct i2c_reg_u16 *buf, int sz)
+ const struct i2c_reg_u16 *buf, int sz)
{
while (--sz >= 0) {
i2c_w2(gspca_dev, buf->reg, buf->val);
@@ -1223,7 +1059,7 @@ static void i2c_r1(struct gspca_dev *gspca_dev, u8 reg, u8 *val)
struct sd *sd = (struct sd *) gspca_dev;
u8 row[8];
- row[0] = 0x81 | (1 << 4);
+ row[0] = sd->i2c_intf | (1 << 4);
row[1] = sd->i2c_addr;
row[2] = reg;
row[3] = 0;
@@ -1232,7 +1068,7 @@ static void i2c_r1(struct gspca_dev *gspca_dev, u8 reg, u8 *val)
row[6] = 0;
row[7] = 0x10;
i2c_w(gspca_dev, row);
- row[0] = 0x81 | (1 << 4) | 0x02;
+ row[0] = sd->i2c_intf | (1 << 4) | 0x02;
row[2] = 0;
i2c_w(gspca_dev, row);
reg_r(gspca_dev, 0x10c2, 5);
@@ -1244,7 +1080,7 @@ static void i2c_r2(struct gspca_dev *gspca_dev, u8 reg, u16 *val)
struct sd *sd = (struct sd *) gspca_dev;
u8 row[8];
- row[0] = 0x81 | (1 << 4);
+ row[0] = sd->i2c_intf | (1 << 4);
row[1] = sd->i2c_addr;
row[2] = reg;
row[3] = 0;
@@ -1253,7 +1089,7 @@ static void i2c_r2(struct gspca_dev *gspca_dev, u8 reg, u16 *val)
row[6] = 0;
row[7] = 0x10;
i2c_w(gspca_dev, row);
- row[0] = 0x81 | (2 << 4) | 0x02;
+ row[0] = sd->i2c_intf | (2 << 4) | 0x02;
row[2] = 0;
i2c_w(gspca_dev, row);
reg_r(gspca_dev, 0x10c2, 5);
@@ -1294,8 +1130,6 @@ static void ov9655_init_sensor(struct gspca_dev *gspca_dev)
if (gspca_dev->usb_err < 0)
pr_err("OV9655 sensor initialization failed\n");
- /* disable hflip and vflip */
- gspca_dev->ctrl_dis = (1 << HFLIP) | (1 << VFLIP);
sd->hstart = 1;
sd->vstart = 2;
}
@@ -1310,9 +1144,6 @@ static void soi968_init_sensor(struct gspca_dev *gspca_dev)
if (gspca_dev->usb_err < 0)
pr_err("SOI968 sensor initialization failed\n");
- /* disable hflip and vflip */
- gspca_dev->ctrl_dis = (1 << HFLIP) | (1 << VFLIP)
- | (1 << EXPOSURE);
sd->hstart = 60;
sd->vstart = 11;
}
@@ -1340,8 +1171,6 @@ static void ov7670_init_sensor(struct gspca_dev *gspca_dev)
if (gspca_dev->usb_err < 0)
pr_err("OV7670 sensor initialization failed\n");
- /* disable hflip and vflip */
- gspca_dev->ctrl_dis = (1 << HFLIP) | (1 << VFLIP);
sd->hstart = 0;
sd->vstart = 1;
}
@@ -1378,9 +1207,6 @@ static void mt9v_init_sensor(struct gspca_dev *gspca_dev)
pr_err("MT9V111 sensor initialization failed\n");
return;
}
- gspca_dev->ctrl_dis = (1 << EXPOSURE)
- | (1 << AUTOGAIN)
- | (1 << GAIN);
sd->hstart = 2;
sd->vstart = 2;
sd->sensor = SENSOR_MT9V111;
@@ -1422,8 +1248,6 @@ static void mt9m112_init_sensor(struct gspca_dev *gspca_dev)
if (gspca_dev->usb_err < 0)
pr_err("MT9M112 sensor initialization failed\n");
- gspca_dev->ctrl_dis = (1 << EXPOSURE) | (1 << AUTOGAIN)
- | (1 << GAIN);
sd->hstart = 0;
sd->vstart = 2;
}
@@ -1436,8 +1260,6 @@ static void mt9m111_init_sensor(struct gspca_dev *gspca_dev)
if (gspca_dev->usb_err < 0)
pr_err("MT9M111 sensor initialization failed\n");
- gspca_dev->ctrl_dis = (1 << EXPOSURE) | (1 << AUTOGAIN)
- | (1 << GAIN);
sd->hstart = 0;
sd->vstart = 2;
}
@@ -1470,8 +1292,6 @@ static void mt9m001_init_sensor(struct gspca_dev *gspca_dev)
if (gspca_dev->usb_err < 0)
pr_err("MT9M001 sensor initialization failed\n");
- /* disable hflip and vflip */
- gspca_dev->ctrl_dis = (1 << HFLIP) | (1 << VFLIP);
sd->hstart = 1;
sd->vstart = 1;
}
@@ -1488,20 +1308,18 @@ static void hv7131r_init_sensor(struct gspca_dev *gspca_dev)
sd->vstart = 1;
}
-static void set_cmatrix(struct gspca_dev *gspca_dev)
+static void set_cmatrix(struct gspca_dev *gspca_dev,
+ s32 brightness, s32 contrast, s32 satur, s32 hue)
{
- struct sd *sd = (struct sd *) gspca_dev;
- int satur;
- s32 hue_coord, hue_index = 180 + sd->ctrls[HUE].val;
+ s32 hue_coord, hue_index = 180 + hue;
u8 cmatrix[21];
memset(cmatrix, 0, sizeof cmatrix);
- cmatrix[2] = (sd->ctrls[CONTRAST].val * 0x25 / 0x100) + 0x26;
+ cmatrix[2] = (contrast * 0x25 / 0x100) + 0x26;
cmatrix[0] = 0x13 + (cmatrix[2] - 0x26) * 0x13 / 0x25;
cmatrix[4] = 0x07 + (cmatrix[2] - 0x26) * 0x07 / 0x25;
- cmatrix[18] = sd->ctrls[BRIGHTNESS].val - 0x80;
+ cmatrix[18] = brightness - 0x80;
- satur = sd->ctrls[SATURATION].val;
hue_coord = (hsv_red_x[hue_index] * satur) >> 8;
cmatrix[6] = hue_coord;
cmatrix[7] = (hue_coord >> 8) & 0x0f;
@@ -1529,11 +1347,10 @@ static void set_cmatrix(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x10e1, cmatrix, 21);
}
-static void set_gamma(struct gspca_dev *gspca_dev)
+static void set_gamma(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 gamma[17];
- u8 gval = sd->ctrls[GAMMA].val * 0xb8 / 0x100;
+ u8 gval = val * 0xb8 / 0x100;
gamma[0] = 0x0a;
gamma[1] = 0x13 + (gval * (0xcb - 0x13) / 0xb8);
@@ -1556,26 +1373,21 @@ static void set_gamma(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x1190, gamma, 17);
}
-static void set_redblue(struct gspca_dev *gspca_dev)
+static void set_redblue(struct gspca_dev *gspca_dev, s32 blue, s32 red)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- reg_w1(gspca_dev, 0x118c, sd->ctrls[RED].val);
- reg_w1(gspca_dev, 0x118f, sd->ctrls[BLUE].val);
+ reg_w1(gspca_dev, 0x118c, red);
+ reg_w1(gspca_dev, 0x118f, blue);
}
-static void set_hvflip(struct gspca_dev *gspca_dev)
+static void set_hvflip(struct gspca_dev *gspca_dev, s32 hflip, s32 vflip)
{
- u8 value, tslb, hflip, vflip;
+ u8 value, tslb;
u16 value2;
struct sd *sd = (struct sd *) gspca_dev;
if ((sd->flags & FLIP_DETECT) && dmi_check_system(flip_dmi_table)) {
- hflip = !sd->ctrls[HFLIP].val;
- vflip = !sd->ctrls[VFLIP].val;
- } else {
- hflip = sd->ctrls[HFLIP].val;
- vflip = sd->ctrls[VFLIP].val;
+ hflip = !hflip;
+ vflip = !vflip;
}
switch (sd->sensor) {
@@ -1638,20 +1450,38 @@ static void set_hvflip(struct gspca_dev *gspca_dev)
}
}
-static void set_exposure(struct gspca_dev *gspca_dev)
+static void set_exposure(struct gspca_dev *gspca_dev, s32 expo)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 exp[8] = {0x81, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e};
- int expo;
+ u8 exp[8] = {sd->i2c_intf, sd->i2c_addr,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10};
+ int expo2;
+
+ if (gspca_dev->streaming)
+ exp[7] = 0x1e;
- expo = sd->ctrls[EXPOSURE].val;
switch (sd->sensor) {
case SENSOR_OV7660:
case SENSOR_OV7670:
case SENSOR_OV9655:
case SENSOR_OV9650:
+ if (expo > 547)
+ expo2 = 547;
+ else
+ expo2 = expo;
+ exp[0] |= (2 << 4);
+ exp[2] = 0x10; /* AECH */
+ exp[3] = expo2 >> 2;
+ exp[7] = 0x10;
+ i2c_w(gspca_dev, exp);
+ exp[2] = 0x04; /* COM1 */
+ exp[3] = expo2 & 0x0003;
+ exp[7] = 0x10;
+ i2c_w(gspca_dev, exp);
+ expo -= expo2;
+ exp[7] = 0x1e;
exp[0] |= (3 << 4);
- exp[2] = 0x2d;
+ exp[2] = 0x2d; /* ADVFL & ADVFH */
exp[3] = expo;
exp[4] = expo >> 8;
break;
@@ -1676,13 +1506,15 @@ static void set_exposure(struct gspca_dev *gspca_dev)
i2c_w(gspca_dev, exp);
}
-static void set_gain(struct gspca_dev *gspca_dev)
+static void set_gain(struct gspca_dev *gspca_dev, s32 g)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 gain[8] = {0x81, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d};
- int g;
+ u8 gain[8] = {sd->i2c_intf, sd->i2c_addr,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10};
+
+ if (gspca_dev->streaming)
+ gain[7] = 0x15; /* or 1d ? */
- g = sd->ctrls[GAIN].val;
switch (sd->sensor) {
case SENSOR_OV7660:
case SENSOR_OV7670:
@@ -1721,11 +1553,11 @@ static void set_gain(struct gspca_dev *gspca_dev)
i2c_w(gspca_dev, gain);
}
-static void set_quality(struct gspca_dev *gspca_dev)
+static void set_quality(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- jpeg_set_qual(sd->jpeg_hdr, sd->ctrls[QUALITY].val);
+ jpeg_set_qual(sd->jpeg_hdr, val);
reg_w1(gspca_dev, 0x1061, 0x01); /* stop transfer */
reg_w1(gspca_dev, 0x10e0, sd->fmt | 0x20); /* write QTAB */
reg_w(gspca_dev, 0x1100, &sd->jpeg_hdr[JPEG_QT0_OFFSET], 64);
@@ -1827,6 +1659,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->sensor = id->driver_info >> 8;
sd->i2c_addr = id->driver_info;
sd->flags = id->driver_info >> 16;
+ sd->i2c_intf = 0x80; /* i2c 100 Kb/s */
switch (sd->sensor) {
case SENSOR_MT9M112:
@@ -1840,6 +1673,9 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = mono_mode;
cam->nmodes = ARRAY_SIZE(mono_mode);
break;
+ case SENSOR_HV7131R:
+ sd->i2c_intf = 0x81; /* i2c 400 Kb/s */
+ /* fall thru */
default:
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
@@ -1850,13 +1686,133 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->older_step = 0;
sd->exposure_step = 16;
- gspca_dev->cam.ctrls = sd->ctrls;
-
INIT_WORK(&sd->work, qual_upd);
return 0;
}
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ /* color control cluster */
+ case V4L2_CID_BRIGHTNESS:
+ set_cmatrix(gspca_dev, sd->brightness->val,
+ sd->contrast->val, sd->saturation->val, sd->hue->val);
+ break;
+ case V4L2_CID_GAMMA:
+ set_gamma(gspca_dev, ctrl->val);
+ break;
+ /* blue/red balance cluster */
+ case V4L2_CID_BLUE_BALANCE:
+ set_redblue(gspca_dev, sd->blue->val, sd->red->val);
+ break;
+ /* h/vflip cluster */
+ case V4L2_CID_HFLIP:
+ set_hvflip(gspca_dev, sd->hflip->val, sd->vflip->val);
+ break;
+ /* standalone exposure control */
+ case V4L2_CID_EXPOSURE:
+ set_exposure(gspca_dev, ctrl->val);
+ break;
+ /* standalone gain control */
+ case V4L2_CID_GAIN:
+ set_gain(gspca_dev, ctrl->val);
+ break;
+ /* autogain + exposure or gain control cluster */
+ case V4L2_CID_AUTOGAIN:
+ if (sd->sensor == SENSOR_SOI968)
+ set_gain(gspca_dev, sd->gain->val);
+ else
+ set_exposure(gspca_dev, sd->exposure->val);
+ break;
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ set_quality(gspca_dev, ctrl->val);
+ break;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 13);
+
+ sd->brightness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 127);
+ sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 127);
+ sd->saturation = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 127);
+ sd->hue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HUE, -180, 180, 1, 0);
+ v4l2_ctrl_cluster(4, &sd->brightness);
+
+ sd->gamma = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAMMA, 0, 255, 1, 0x10);
+
+ sd->blue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BLUE_BALANCE, 0, 127, 1, 0x28);
+ sd->red = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_RED_BALANCE, 0, 127, 1, 0x28);
+ v4l2_ctrl_cluster(2, &sd->blue);
+
+ if (sd->sensor != SENSOR_OV9655 && sd->sensor != SENSOR_SOI968 &&
+ sd->sensor != SENSOR_OV7670 && sd->sensor != SENSOR_MT9M001 &&
+ sd->sensor != SENSOR_MT9VPRB) {
+ sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_cluster(2, &sd->hflip);
+ }
+
+ if (sd->sensor != SENSOR_SOI968 && sd->sensor != SENSOR_MT9VPRB &&
+ sd->sensor != SENSOR_MT9M112 && sd->sensor != SENSOR_MT9M111 &&
+ sd->sensor != SENSOR_MT9V111)
+ sd->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 0x1780, 1, 0x33);
+
+ if (sd->sensor != SENSOR_MT9VPRB && sd->sensor != SENSOR_MT9M112 &&
+ sd->sensor != SENSOR_MT9M111 && sd->sensor != SENSOR_MT9V111) {
+ sd->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 28, 1, 0);
+ sd->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ if (sd->sensor == SENSOR_SOI968)
+ /* this sensor doesn't have the exposure control and
+ autogain is clustered with gain instead. This works
+ because sd->exposure == NULL. */
+ v4l2_ctrl_auto_cluster(3, &sd->autogain, 0, false);
+ else
+ /* Otherwise autogain is clustered with exposure. */
+ v4l2_ctrl_auto_cluster(2, &sd->autogain, 0, false);
+ }
+
+ sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ return 0;
+}
+
static int sd_init(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1949,7 +1905,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
pr_err("Unsupported sensor\n");
gspca_dev->usb_err = -ENODEV;
}
-
return gspca_dev->usb_err;
}
@@ -2025,8 +1980,8 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
if (intf->num_altsetting != 9) {
pr_warn("sn9c20x camera with unknown number of alt "
- "settings (%d), please report!\n",
- intf->num_altsetting);
+ "settings (%d), please report!\n",
+ intf->num_altsetting);
gspca_dev->alt = intf->num_altsetting;
return 0;
}
@@ -2067,7 +2022,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
jpeg_define(sd->jpeg_hdr, height, width,
0x21);
- jpeg_set_qual(sd->jpeg_hdr, sd->ctrls[QUALITY].val);
+ jpeg_set_qual(sd->jpeg_hdr, v4l2_ctrl_g_ctrl(sd->jpegqual));
if (mode & MODE_RAW)
fmt = 0x2d;
@@ -2104,12 +2059,17 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w1(gspca_dev, 0x1189, scale);
reg_w1(gspca_dev, 0x10e0, fmt);
- set_cmatrix(gspca_dev);
- set_gamma(gspca_dev);
- set_redblue(gspca_dev);
- set_gain(gspca_dev);
- set_exposure(gspca_dev);
- set_hvflip(gspca_dev);
+ set_cmatrix(gspca_dev, v4l2_ctrl_g_ctrl(sd->brightness),
+ v4l2_ctrl_g_ctrl(sd->contrast),
+ v4l2_ctrl_g_ctrl(sd->saturation),
+ v4l2_ctrl_g_ctrl(sd->hue));
+ set_gamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma));
+ set_redblue(gspca_dev, v4l2_ctrl_g_ctrl(sd->blue),
+ v4l2_ctrl_g_ctrl(sd->red));
+ set_gain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain));
+ set_exposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
+ set_hvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip),
+ v4l2_ctrl_g_ctrl(sd->vflip));
reg_w1(gspca_dev, 0x1007, 0x20);
reg_w1(gspca_dev, 0x1061, 0x03);
@@ -2148,6 +2108,9 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
static void do_autoexposure(struct gspca_dev *gspca_dev, u16 avg_lum)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 cur_exp = v4l2_ctrl_g_ctrl(sd->exposure);
+ s32 max = sd->exposure->maximum - sd->exposure_step;
+ s32 min = sd->exposure->minimum + sd->exposure_step;
s16 new_exp;
/*
@@ -2156,16 +2119,15 @@ static void do_autoexposure(struct gspca_dev *gspca_dev, u16 avg_lum)
* and exposure steps
*/
if (avg_lum < MIN_AVG_LUM) {
- if (sd->ctrls[EXPOSURE].val > 0x1770)
+ if (cur_exp > max)
return;
- new_exp = sd->ctrls[EXPOSURE].val + sd->exposure_step;
- if (new_exp > 0x1770)
- new_exp = 0x1770;
- if (new_exp < 0x10)
- new_exp = 0x10;
- sd->ctrls[EXPOSURE].val = new_exp;
- set_exposure(gspca_dev);
+ new_exp = cur_exp + sd->exposure_step;
+ if (new_exp > max)
+ new_exp = max;
+ if (new_exp < min)
+ new_exp = min;
+ v4l2_ctrl_s_ctrl(sd->exposure, new_exp);
sd->older_step = sd->old_step;
sd->old_step = 1;
@@ -2176,15 +2138,14 @@ static void do_autoexposure(struct gspca_dev *gspca_dev, u16 avg_lum)
sd->exposure_step += 2;
}
if (avg_lum > MAX_AVG_LUM) {
- if (sd->ctrls[EXPOSURE].val < 0x10)
+ if (cur_exp < min)
return;
- new_exp = sd->ctrls[EXPOSURE].val - sd->exposure_step;
- if (new_exp > 0x1700)
- new_exp = 0x1770;
- if (new_exp < 0x10)
- new_exp = 0x10;
- sd->ctrls[EXPOSURE].val = new_exp;
- set_exposure(gspca_dev);
+ new_exp = cur_exp - sd->exposure_step;
+ if (new_exp > max)
+ new_exp = max;
+ if (new_exp < min)
+ new_exp = min;
+ v4l2_ctrl_s_ctrl(sd->exposure, new_exp);
sd->older_step = sd->old_step;
sd->old_step = 0;
@@ -2198,19 +2159,12 @@ static void do_autoexposure(struct gspca_dev *gspca_dev, u16 avg_lum)
static void do_autogain(struct gspca_dev *gspca_dev, u16 avg_lum)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 cur_gain = v4l2_ctrl_g_ctrl(sd->gain);
- if (avg_lum < MIN_AVG_LUM) {
- if (sd->ctrls[GAIN].val + 1 <= 28) {
- sd->ctrls[GAIN].val++;
- set_gain(gspca_dev);
- }
- }
- if (avg_lum > MAX_AVG_LUM) {
- if (sd->ctrls[GAIN].val > 0) {
- sd->ctrls[GAIN].val--;
- set_gain(gspca_dev);
- }
- }
+ if (avg_lum < MIN_AVG_LUM && cur_gain < sd->gain->maximum)
+ v4l2_ctrl_s_ctrl(sd->gain, cur_gain + 1);
+ if (avg_lum > MAX_AVG_LUM && cur_gain > sd->gain->minimum)
+ v4l2_ctrl_s_ctrl(sd->gain, cur_gain - 1);
}
static void sd_dqcallback(struct gspca_dev *gspca_dev)
@@ -2218,7 +2172,7 @@ static void sd_dqcallback(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int avg_lum;
- if (!sd->ctrls[AUTOGAIN].val)
+ if (!v4l2_ctrl_g_ctrl(sd->autogain))
return;
avg_lum = atomic_read(&sd->avg_lum);
@@ -2234,10 +2188,11 @@ static void qual_upd(struct work_struct *work)
{
struct sd *sd = container_of(work, struct sd, work);
struct gspca_dev *gspca_dev = &sd->gspca_dev;
+ s32 qual = v4l2_ctrl_g_ctrl(sd->jpegqual);
mutex_lock(&gspca_dev->usb_lock);
- PDEBUG(D_STREAM, "qual_upd %d%%", sd->ctrls[QUALITY].val);
- set_quality(gspca_dev);
+ PDEBUG(D_STREAM, "qual_upd %d%%", qual);
+ set_quality(gspca_dev, qual);
mutex_unlock(&gspca_dev->usb_lock);
}
@@ -2286,14 +2241,18 @@ static void transfer_check(struct gspca_dev *gspca_dev,
if (new_qual != 0) {
sd->nchg += new_qual;
if (sd->nchg < -6 || sd->nchg >= 12) {
+ /* Note: we are in interrupt context, so we can't
+ use v4l2_ctrl_g/s_ctrl here. Access the value
+ directly instead. */
+ s32 curqual = sd->jpegqual->cur.val;
sd->nchg = 0;
- new_qual += sd->ctrls[QUALITY].val;
- if (new_qual < QUALITY_MIN)
- new_qual = QUALITY_MIN;
- else if (new_qual > QUALITY_MAX)
- new_qual = QUALITY_MAX;
- if (new_qual != sd->ctrls[QUALITY].val) {
- sd->ctrls[QUALITY].val = new_qual;
+ new_qual += curqual;
+ if (new_qual < sd->jpegqual->minimum)
+ new_qual = sd->jpegqual->minimum;
+ else if (new_qual > sd->jpegqual->maximum)
+ new_qual = sd->jpegqual->maximum;
+ if (new_qual != curqual) {
+ sd->jpegqual->cur.val = new_qual;
queue_work(sd->work_thread, &sd->work);
}
}
@@ -2309,7 +2268,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
{
struct sd *sd = (struct sd *) gspca_dev;
int avg_lum, is_jpeg;
- static u8 frame_header[] =
+ static const u8 frame_header[] =
{0xff, 0xff, 0x00, 0xc4, 0xc4, 0x96};
is_jpeg = (sd->fmt & 0x03) == 0;
@@ -2373,10 +2332,9 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = KBUILD_MODNAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.isoc_init = sd_isoc_init,
.start = sd_start,
.stopN = sd_stopN,
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 6a1148d7fe92..e2bdf8f632f4 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -1000,6 +1000,8 @@ static void setfreq(struct gspca_dev *gspca_dev)
}
}
+#define WANT_REGULAR_AUTOGAIN
+#define WANT_COARSE_EXPO_AUTOGAIN
#include "autogain_functions.h"
static void do_autogain(struct gspca_dev *gspca_dev)
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 863c755dd2b7..4d1696d1a7f4 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -2800,10 +2800,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
}
}
-/* !! coarse_grained_expo_autogain is not used !! */
-#define exp_too_low_cnt bridge
-#define exp_too_high_cnt sensor
-
+#define WANT_REGULAR_AUTOGAIN
#include "autogain_functions.h"
static void do_autogain(struct gspca_dev *gspca_dev)
diff --git a/drivers/media/video/gspca/sq905.c b/drivers/media/video/gspca/sq905.c
index 2fe3c29bd6b7..04f54654a026 100644
--- a/drivers/media/video/gspca/sq905.c
+++ b/drivers/media/video/gspca/sq905.c
@@ -232,7 +232,11 @@ static void sq905_dostream(struct work_struct *work)
frame_sz = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].sizeimage
+ FRAME_HEADER_LEN;
- while (gspca_dev->present && gspca_dev->streaming) {
+ while (gspca_dev->dev && gspca_dev->streaming) {
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
+ break;
+#endif
/* request some data and then read it until we have
* a complete frame. */
bytes_left = frame_sz;
@@ -242,7 +246,7 @@ static void sq905_dostream(struct work_struct *work)
we must finish reading an entire frame, otherwise the
next time we stream we start reading in the middle of a
frame. */
- while (bytes_left > 0 && gspca_dev->present) {
+ while (bytes_left > 0 && gspca_dev->dev) {
data_len = bytes_left > SQ905_MAX_TRANSFER ?
SQ905_MAX_TRANSFER : bytes_left;
ret = sq905_read_data(gspca_dev, buffer, data_len, 1);
@@ -274,7 +278,7 @@ static void sq905_dostream(struct work_struct *work)
gspca_frame_add(gspca_dev, LAST_PACKET,
NULL, 0);
}
- if (gspca_dev->present) {
+ if (gspca_dev->dev) {
/* acknowledge the frame */
mutex_lock(&gspca_dev->usb_lock);
ret = sq905_ack_frame(gspca_dev);
@@ -284,7 +288,7 @@ static void sq905_dostream(struct work_struct *work)
}
}
quit_stream:
- if (gspca_dev->present) {
+ if (gspca_dev->dev) {
mutex_lock(&gspca_dev->usb_lock);
sq905_command(gspca_dev, SQ905_CLEAR);
mutex_unlock(&gspca_dev->usb_lock);
diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c
index ae783634712f..f34ddb0570c8 100644
--- a/drivers/media/video/gspca/sq905c.c
+++ b/drivers/media/video/gspca/sq905c.c
@@ -150,7 +150,11 @@ static void sq905c_dostream(struct work_struct *work)
goto quit_stream;
}
- while (gspca_dev->present && gspca_dev->streaming) {
+ while (gspca_dev->dev && gspca_dev->streaming) {
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
+ break;
+#endif
/* Request the header, which tells the size to download */
ret = usb_bulk_msg(gspca_dev->dev,
usb_rcvbulkpipe(gspca_dev->dev, 0x81),
@@ -169,7 +173,7 @@ static void sq905c_dostream(struct work_struct *work)
packet_type = FIRST_PACKET;
gspca_frame_add(gspca_dev, packet_type,
buffer, FRAME_HEADER_LEN);
- while (bytes_left > 0 && gspca_dev->present) {
+ while (bytes_left > 0 && gspca_dev->dev) {
data_len = bytes_left > SQ905C_MAX_TRANSFER ?
SQ905C_MAX_TRANSFER : bytes_left;
ret = usb_bulk_msg(gspca_dev->dev,
@@ -191,7 +195,7 @@ static void sq905c_dostream(struct work_struct *work)
}
}
quit_stream:
- if (gspca_dev->present) {
+ if (gspca_dev->dev) {
mutex_lock(&gspca_dev->usb_lock);
sq905c_command(gspca_dev, SQ905C_CLEAR, 0);
mutex_unlock(&gspca_dev->usb_lock);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index 91d99b4cc57b..999ec7764449 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -261,6 +261,17 @@ static int stv06xx_init(struct gspca_dev *gspca_dev)
return (err < 0) ? err : 0;
}
+/* this function is called at probe time */
+static int stv06xx_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ PDEBUG(D_PROBE, "Initializing controls");
+
+ gspca_dev->vdev.ctrl_handler = &gspca_dev->ctrl_handler;
+ return sd->sensor->init_controls(sd);
+}
+
/* Start the camera */
static int stv06xx_start(struct gspca_dev *gspca_dev)
{
@@ -512,6 +523,7 @@ static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.config = stv06xx_config,
.init = stv06xx_init,
+ .init_controls = stv06xx_init_controls,
.start = stv06xx_start,
.stopN = stv06xx_stopN,
.pkt_scan = stv06xx_pkt_scan,
@@ -530,9 +542,8 @@ static int stv06xx_config(struct gspca_dev *gspca_dev,
PDEBUG(D_PROBE, "Configuring camera");
- sd->desc = sd_desc;
sd->bridge = id->driver_info;
- gspca_dev->sd_desc = &sd->desc;
+ gspca_dev->sd_desc = &sd_desc;
if (dump_bridge)
stv06xx_dump_bridge(sd);
@@ -594,11 +605,12 @@ static void sd_disconnect(struct usb_interface *intf)
{
struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
struct sd *sd = (struct sd *) gspca_dev;
+ void *priv = sd->sensor_priv;
PDEBUG(D_PROBE, "Disconnecting the stv06xx device");
- if (sd->sensor->disconnect)
- sd->sensor->disconnect(sd);
+ sd->sensor = NULL;
gspca_disconnect(intf);
+ kfree(priv);
}
static struct usb_driver sd_driver = {
@@ -609,6 +621,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.h b/drivers/media/video/gspca/stv06xx/stv06xx.h
index d270a5981afe..34957a4ec150 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.h
@@ -89,9 +89,6 @@ struct sd {
/* A pointer to the currently connected sensor */
const struct stv06xx_sensor *sensor;
- /* A pointer to the sd_desc struct */
- struct sd_desc desc;
-
/* Sensor private data */
void *sensor_priv;
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
index a8698b7a7566..06fa54c5efb2 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
@@ -32,36 +32,6 @@
#include "stv06xx_hdcs.h"
-static const struct ctrl hdcs1x00_ctrl[] = {
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 0x1,
- .default_value = HDCS_DEFAULT_EXPOSURE,
- .flags = V4L2_CTRL_FLAG_SLIDER
- },
- .set = hdcs_set_exposure,
- .get = hdcs_get_exposure
- }, {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "gain",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 0x1,
- .default_value = HDCS_DEFAULT_GAIN,
- .flags = V4L2_CTRL_FLAG_SLIDER
- },
- .set = hdcs_set_gain,
- .get = hdcs_get_gain
- }
-};
-
static struct v4l2_pix_format hdcs1x00_mode[] = {
{
HDCS_1X00_DEF_WIDTH,
@@ -76,36 +46,6 @@ static struct v4l2_pix_format hdcs1x00_mode[] = {
}
};
-static const struct ctrl hdcs1020_ctrl[] = {
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
- .minimum = 0x00,
- .maximum = 0xffff,
- .step = 0x1,
- .default_value = HDCS_DEFAULT_EXPOSURE,
- .flags = V4L2_CTRL_FLAG_SLIDER
- },
- .set = hdcs_set_exposure,
- .get = hdcs_get_exposure
- }, {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "gain",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 0x1,
- .default_value = HDCS_DEFAULT_GAIN,
- .flags = V4L2_CTRL_FLAG_SLIDER
- },
- .set = hdcs_set_gain,
- .get = hdcs_get_gain
- }
-};
-
static struct v4l2_pix_format hdcs1020_mode[] = {
{
HDCS_1020_DEF_WIDTH,
@@ -150,7 +90,6 @@ struct hdcs {
} exp;
int psmp;
- u8 exp_cache, gain_cache;
};
static int hdcs_reg_write_seq(struct sd *sd, u8 reg, u8 *vals, u8 len)
@@ -232,16 +171,6 @@ static int hdcs_reset(struct sd *sd)
return err;
}
-static int hdcs_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- struct hdcs *hdcs = sd->sensor_priv;
-
- *val = hdcs->exp_cache;
-
- return 0;
-}
-
static int hdcs_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -260,9 +189,6 @@ static int hdcs_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
int cycles, err;
u8 exp[14];
- val &= 0xff;
- hdcs->exp_cache = val;
-
cycles = val * HDCS_CLK_FREQ_MHZ * 257;
ct = hdcs->exp.cto + hdcs->psmp + (HDCS_ADC_START_SIG_DUR + 2);
@@ -336,12 +262,9 @@ static int hdcs_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
static int hdcs_set_gains(struct sd *sd, u8 g)
{
- struct hdcs *hdcs = sd->sensor_priv;
int err;
u8 gains[4];
- hdcs->gain_cache = g;
-
/* the voltage gain Av = (1 + 19 * val / 127) * (1 + bit7) */
if (g > 127)
g = 0x80 | (g / 2);
@@ -352,17 +275,7 @@ static int hdcs_set_gains(struct sd *sd, u8 g)
gains[3] = g;
err = hdcs_reg_write_seq(sd, HDCS_ERECPGA, gains, 4);
- return err;
-}
-
-static int hdcs_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- struct hdcs *hdcs = sd->sensor_priv;
-
- *val = hdcs->gain_cache;
-
- return 0;
+ return err;
}
static int hdcs_set_gain(struct gspca_dev *gspca_dev, __s32 val)
@@ -420,6 +333,39 @@ static int hdcs_set_size(struct sd *sd,
return err;
}
+static int hdcs_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ int err = -EINVAL;
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ err = hdcs_set_gain(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ err = hdcs_set_exposure(gspca_dev, ctrl->val);
+ break;
+ }
+ return err;
+}
+
+static const struct v4l2_ctrl_ops hdcs_ctrl_ops = {
+ .s_ctrl = hdcs_s_ctrl,
+};
+
+static int hdcs_init_controls(struct sd *sd)
+{
+ struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler;
+
+ v4l2_ctrl_handler_init(hdl, 2);
+ v4l2_ctrl_new_std(hdl, &hdcs_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 0xff, 1, HDCS_DEFAULT_EXPOSURE);
+ v4l2_ctrl_new_std(hdl, &hdcs_ctrl_ops,
+ V4L2_CID_GAIN, 0, 0xff, 1, HDCS_DEFAULT_GAIN);
+ return hdl->error;
+}
+
static int hdcs_probe_1x00(struct sd *sd)
{
struct hdcs *hdcs;
@@ -434,8 +380,6 @@ static int hdcs_probe_1x00(struct sd *sd)
sd->gspca_dev.cam.cam_mode = hdcs1x00_mode;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(hdcs1x00_mode);
- sd->desc.ctrls = hdcs1x00_ctrl;
- sd->desc.nctrls = ARRAY_SIZE(hdcs1x00_ctrl);
hdcs = kmalloc(sizeof(struct hdcs), GFP_KERNEL);
if (!hdcs)
@@ -493,8 +437,6 @@ static int hdcs_probe_1020(struct sd *sd)
sd->gspca_dev.cam.cam_mode = hdcs1020_mode;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(hdcs1020_mode);
- sd->desc.ctrls = hdcs1020_ctrl;
- sd->desc.nctrls = ARRAY_SIZE(hdcs1020_ctrl);
hdcs = kmalloc(sizeof(struct hdcs), GFP_KERNEL);
if (!hdcs)
@@ -537,12 +479,6 @@ static int hdcs_stop(struct sd *sd)
return hdcs_set_state(sd, HDCS_STATE_SLEEP);
}
-static void hdcs_disconnect(struct sd *sd)
-{
- PDEBUG(D_PROBE, "Disconnecting the sensor");
- kfree(sd->sensor_priv);
-}
-
static int hdcs_init(struct sd *sd)
{
struct hdcs *hdcs = sd->sensor_priv;
@@ -587,16 +523,7 @@ static int hdcs_init(struct sd *sd)
if (err < 0)
return err;
- err = hdcs_set_gains(sd, HDCS_DEFAULT_GAIN);
- if (err < 0)
- return err;
-
- err = hdcs_set_size(sd, hdcs->array.width, hdcs->array.height);
- if (err < 0)
- return err;
-
- err = hdcs_set_exposure(&sd->gspca_dev, HDCS_DEFAULT_EXPOSURE);
- return err;
+ return hdcs_set_size(sd, hdcs->array.width, hdcs->array.height);
}
static int hdcs_dump(struct sd *sd)
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
index a14a84a5079b..1ba9158d0102 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
@@ -131,14 +131,12 @@ static int hdcs_probe_1x00(struct sd *sd);
static int hdcs_probe_1020(struct sd *sd);
static int hdcs_start(struct sd *sd);
static int hdcs_init(struct sd *sd);
+static int hdcs_init_controls(struct sd *sd);
static int hdcs_stop(struct sd *sd);
static int hdcs_dump(struct sd *sd);
-static void hdcs_disconnect(struct sd *sd);
-static int hdcs_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
static int hdcs_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
static int hdcs_set_gain(struct gspca_dev *gspca_dev, __s32 val);
-static int hdcs_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
const struct stv06xx_sensor stv06xx_sensor_hdcs1x00 = {
.name = "HP HDCS-1000/1100",
@@ -152,10 +150,10 @@ const struct stv06xx_sensor stv06xx_sensor_hdcs1x00 = {
.max_packet_size = { 847 },
.init = hdcs_init,
+ .init_controls = hdcs_init_controls,
.probe = hdcs_probe_1x00,
.start = hdcs_start,
.stop = hdcs_stop,
- .disconnect = hdcs_disconnect,
.dump = hdcs_dump,
};
@@ -171,6 +169,7 @@ const struct stv06xx_sensor stv06xx_sensor_hdcs1020 = {
.max_packet_size = { 847 },
.init = hdcs_init,
+ .init_controls = hdcs_init_controls,
.probe = hdcs_probe_1020,
.start = hdcs_start,
.stop = hdcs_stop,
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c
index 26f14fc4a135..cdfc3d05ab6b 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c
@@ -48,105 +48,16 @@
#include "stv06xx_pb0100.h"
-static const struct ctrl pb0100_ctrl[] = {
-#define GAIN_IDX 0
- {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 128
- },
- .set = pb0100_set_gain,
- .get = pb0100_get_gain
- },
-#define RED_BALANCE_IDX 1
- {
- {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red Balance",
- .minimum = -255,
- .maximum = 255,
- .step = 1,
- .default_value = 0
- },
- .set = pb0100_set_red_balance,
- .get = pb0100_get_red_balance
- },
-#define BLUE_BALANCE_IDX 2
- {
- {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue Balance",
- .minimum = -255,
- .maximum = 255,
- .step = 1,
- .default_value = 0
- },
- .set = pb0100_set_blue_balance,
- .get = pb0100_get_blue_balance
- },
-#define EXPOSURE_IDX 3
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 511,
- .step = 1,
- .default_value = 12
- },
- .set = pb0100_set_exposure,
- .get = pb0100_get_exposure
- },
-#define AUTOGAIN_IDX 4
- {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Automatic Gain and Exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1
- },
- .set = pb0100_set_autogain,
- .get = pb0100_get_autogain
- },
-#define AUTOGAIN_TARGET_IDX 5
- {
- {
- .id = V4L2_CTRL_CLASS_USER + 0x1000,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Automatic Gain Target",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 128
- },
- .set = pb0100_set_autogain_target,
- .get = pb0100_get_autogain_target
- },
-#define NATURAL_IDX 6
- {
- {
- .id = V4L2_CTRL_CLASS_USER + 0x1001,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Natural Light Source",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1
- },
- .set = pb0100_set_natural,
- .get = pb0100_get_natural
- }
+struct pb0100_ctrls {
+ struct { /* one big happy control cluster... */
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *gain;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *red;
+ struct v4l2_ctrl *blue;
+ struct v4l2_ctrl *natural;
+ };
+ struct v4l2_ctrl *target;
};
static struct v4l2_pix_format pb0100_mode[] = {
@@ -174,38 +85,104 @@ static struct v4l2_pix_format pb0100_mode[] = {
}
};
+static int pb0100_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct pb0100_ctrls *ctrls = sd->sensor_priv;
+ int err = -EINVAL;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ err = pb0100_set_autogain(gspca_dev, ctrl->val);
+ if (err)
+ break;
+ if (ctrl->val)
+ break;
+ err = pb0100_set_gain(gspca_dev, ctrls->gain->val);
+ if (err)
+ break;
+ err = pb0100_set_exposure(gspca_dev, ctrls->exposure->val);
+ break;
+ case V4L2_CTRL_CLASS_USER + 0x1001:
+ err = pb0100_set_autogain_target(gspca_dev, ctrl->val);
+ break;
+ }
+ return err;
+}
+
+static const struct v4l2_ctrl_ops pb0100_ctrl_ops = {
+ .s_ctrl = pb0100_s_ctrl,
+};
+
+static int pb0100_init_controls(struct sd *sd)
+{
+ struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler;
+ struct pb0100_ctrls *ctrls;
+ static const struct v4l2_ctrl_config autogain_target = {
+ .ops = &pb0100_ctrl_ops,
+ .id = V4L2_CTRL_CLASS_USER + 0x1000,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Automatic Gain Target",
+ .max = 255,
+ .step = 1,
+ .def = 128,
+ };
+ static const struct v4l2_ctrl_config natural_light = {
+ .ops = &pb0100_ctrl_ops,
+ .id = V4L2_CTRL_CLASS_USER + 0x1001,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Natural Light Source",
+ .max = 1,
+ .step = 1,
+ .def = 1,
+ };
+
+ ctrls = kzalloc(sizeof(*ctrls), GFP_KERNEL);
+ if (!ctrls)
+ return -ENOMEM;
+
+ v4l2_ctrl_handler_init(hdl, 6);
+ ctrls->autogain = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ ctrls->exposure = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 511, 1, 12);
+ ctrls->gain = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
+ V4L2_CID_GAIN, 0, 255, 1, 128);
+ ctrls->red = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
+ V4L2_CID_RED_BALANCE, -255, 255, 1, 0);
+ ctrls->blue = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
+ V4L2_CID_BLUE_BALANCE, -255, 255, 1, 0);
+ ctrls->natural = v4l2_ctrl_new_custom(hdl, &natural_light, NULL);
+ ctrls->target = v4l2_ctrl_new_custom(hdl, &autogain_target, NULL);
+ if (hdl->error) {
+ kfree(ctrls);
+ return hdl->error;
+ }
+ sd->sensor_priv = ctrls;
+ v4l2_ctrl_auto_cluster(5, &ctrls->autogain, 0, false);
+ return 0;
+}
+
static int pb0100_probe(struct sd *sd)
{
u16 sensor;
- int i, err;
- s32 *sensor_settings;
+ int err;
err = stv06xx_read_sensor(sd, PB_IDENT, &sensor);
if (err < 0)
return -ENODEV;
+ if ((sensor >> 8) != 0x64)
+ return -ENODEV;
- if ((sensor >> 8) == 0x64) {
- sensor_settings = kmalloc(
- ARRAY_SIZE(pb0100_ctrl) * sizeof(s32),
- GFP_KERNEL);
- if (!sensor_settings)
- return -ENOMEM;
-
- pr_info("Photobit pb0100 sensor detected\n");
-
- sd->gspca_dev.cam.cam_mode = pb0100_mode;
- sd->gspca_dev.cam.nmodes = ARRAY_SIZE(pb0100_mode);
- sd->desc.ctrls = pb0100_ctrl;
- sd->desc.nctrls = ARRAY_SIZE(pb0100_ctrl);
- for (i = 0; i < sd->desc.nctrls; i++)
- sensor_settings[i] = pb0100_ctrl[i].qctrl.default_value;
- sd->sensor_priv = sensor_settings;
+ pr_info("Photobit pb0100 sensor detected\n");
- return 0;
- }
+ sd->gspca_dev.cam.cam_mode = pb0100_mode;
+ sd->gspca_dev.cam.nmodes = ARRAY_SIZE(pb0100_mode);
- return -ENODEV;
+ return 0;
}
static int pb0100_start(struct sd *sd)
@@ -214,7 +191,6 @@ static int pb0100_start(struct sd *sd)
struct usb_host_interface *alt;
struct usb_interface *intf;
struct cam *cam = &sd->gspca_dev.cam;
- s32 *sensor_settings = sd->sensor_priv;
u32 mode = cam->cam_mode[sd->gspca_dev.curr_mode].priv;
intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
@@ -255,13 +231,6 @@ static int pb0100_start(struct sd *sd)
stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x20);
}
- /* set_gain also sets red and blue balance */
- pb0100_set_gain(&sd->gspca_dev, sensor_settings[GAIN_IDX]);
- pb0100_set_exposure(&sd->gspca_dev, sensor_settings[EXPOSURE_IDX]);
- pb0100_set_autogain_target(&sd->gspca_dev,
- sensor_settings[AUTOGAIN_TARGET_IDX]);
- pb0100_set_autogain(&sd->gspca_dev, sensor_settings[AUTOGAIN_IDX]);
-
err = stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3)|BIT(1));
PDEBUG(D_STREAM, "Started stream, status: %d", err);
@@ -285,12 +254,6 @@ out:
return (err < 0) ? err : 0;
}
-static void pb0100_disconnect(struct sd *sd)
-{
- sd->sensor = NULL;
- kfree(sd->sensor_priv);
-}
-
/* FIXME: Sort the init commands out and put them into tables,
this is only for getting the camera to work */
/* FIXME: No error handling for now,
@@ -362,62 +325,32 @@ static int pb0100_dump(struct sd *sd)
return 0;
}
-static int pb0100_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[GAIN_IDX];
-
- return 0;
-}
-
static int pb0100_set_gain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
+ struct pb0100_ctrls *ctrls = sd->sensor_priv;
- if (sensor_settings[AUTOGAIN_IDX])
- return -EBUSY;
-
- sensor_settings[GAIN_IDX] = val;
err = stv06xx_write_sensor(sd, PB_G1GAIN, val);
if (!err)
err = stv06xx_write_sensor(sd, PB_G2GAIN, val);
PDEBUG(D_V4L2, "Set green gain to %d, status: %d", val, err);
if (!err)
- err = pb0100_set_red_balance(gspca_dev,
- sensor_settings[RED_BALANCE_IDX]);
+ err = pb0100_set_red_balance(gspca_dev, ctrls->red->val);
if (!err)
- err = pb0100_set_blue_balance(gspca_dev,
- sensor_settings[BLUE_BALANCE_IDX]);
+ err = pb0100_set_blue_balance(gspca_dev, ctrls->blue->val);
return err;
}
-static int pb0100_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[RED_BALANCE_IDX];
-
- return 0;
-}
-
static int pb0100_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
+ struct pb0100_ctrls *ctrls = sd->sensor_priv;
- if (sensor_settings[AUTOGAIN_IDX])
- return -EBUSY;
-
- sensor_settings[RED_BALANCE_IDX] = val;
- val += sensor_settings[GAIN_IDX];
+ val += ctrls->gain->val;
if (val < 0)
val = 0;
else if (val > 255)
@@ -429,27 +362,13 @@ static int pb0100_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
return err;
}
-static int pb0100_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[BLUE_BALANCE_IDX];
-
- return 0;
-}
-
static int pb0100_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
+ struct pb0100_ctrls *ctrls = sd->sensor_priv;
- if (sensor_settings[AUTOGAIN_IDX])
- return -EBUSY;
-
- sensor_settings[BLUE_BALANCE_IDX] = val;
- val += sensor_settings[GAIN_IDX];
+ val += ctrls->gain->val;
if (val < 0)
val = 0;
else if (val > 255)
@@ -461,51 +380,25 @@ static int pb0100_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
return err;
}
-static int pb0100_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[EXPOSURE_IDX];
-
- return 0;
-}
-
static int pb0100_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
{
- int err;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- if (sensor_settings[AUTOGAIN_IDX])
- return -EBUSY;
+ int err;
- sensor_settings[EXPOSURE_IDX] = val;
err = stv06xx_write_sensor(sd, PB_RINTTIME, val);
PDEBUG(D_V4L2, "Set exposure to %d, status: %d", val, err);
return err;
}
-static int pb0100_get_autogain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[AUTOGAIN_IDX];
-
- return 0;
-}
-
static int pb0100_set_autogain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
+ struct pb0100_ctrls *ctrls = sd->sensor_priv;
- sensor_settings[AUTOGAIN_IDX] = val;
- if (sensor_settings[AUTOGAIN_IDX]) {
- if (sensor_settings[NATURAL_IDX])
+ if (val) {
+ if (ctrls->natural->val)
val = BIT(6)|BIT(4)|BIT(0);
else
val = BIT(4)|BIT(0);
@@ -514,29 +407,15 @@ static int pb0100_set_autogain(struct gspca_dev *gspca_dev, __s32 val)
err = stv06xx_write_sensor(sd, PB_EXPGAIN, val);
PDEBUG(D_V4L2, "Set autogain to %d (natural: %d), status: %d",
- sensor_settings[AUTOGAIN_IDX], sensor_settings[NATURAL_IDX],
- err);
+ val, ctrls->natural->val, err);
return err;
}
-static int pb0100_get_autogain_target(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[AUTOGAIN_TARGET_IDX];
-
- return 0;
-}
-
static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val)
{
int err, totalpixels, brightpixels, darkpixels;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- sensor_settings[AUTOGAIN_TARGET_IDX] = val;
/* Number of pixels counted by the sensor when subsampling the pixels.
* Slightly larger than the real value to avoid oscillation */
@@ -553,23 +432,3 @@ static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val)
return err;
}
-
-static int pb0100_get_natural(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[NATURAL_IDX];
-
- return 0;
-}
-
-static int pb0100_set_natural(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- sensor_settings[NATURAL_IDX] = val;
-
- return pb0100_set_autogain(gspca_dev, sensor_settings[AUTOGAIN_IDX]);
-}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.h b/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.h
index 757de246dc75..5071e5353fd3 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.h
@@ -112,25 +112,17 @@
static int pb0100_probe(struct sd *sd);
static int pb0100_start(struct sd *sd);
static int pb0100_init(struct sd *sd);
+static int pb0100_init_controls(struct sd *sd);
static int pb0100_stop(struct sd *sd);
static int pb0100_dump(struct sd *sd);
-static void pb0100_disconnect(struct sd *sd);
/* V4L2 controls supported by the driver */
-static int pb0100_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
static int pb0100_set_gain(struct gspca_dev *gspca_dev, __s32 val);
-static int pb0100_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val);
static int pb0100_set_red_balance(struct gspca_dev *gspca_dev, __s32 val);
-static int pb0100_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val);
static int pb0100_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val);
-static int pb0100_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
static int pb0100_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
-static int pb0100_get_autogain(struct gspca_dev *gspca_dev, __s32 *val);
static int pb0100_set_autogain(struct gspca_dev *gspca_dev, __s32 val);
-static int pb0100_get_autogain_target(struct gspca_dev *gspca_dev, __s32 *val);
static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val);
-static int pb0100_get_natural(struct gspca_dev *gspca_dev, __s32 *val);
-static int pb0100_set_natural(struct gspca_dev *gspca_dev, __s32 val);
const struct stv06xx_sensor stv06xx_sensor_pb0100 = {
.name = "PB-0100",
@@ -142,11 +134,11 @@ const struct stv06xx_sensor stv06xx_sensor_pb0100 = {
.max_packet_size = { 847, 923 },
.init = pb0100_init,
+ .init_controls = pb0100_init_controls,
.probe = pb0100_probe,
.start = pb0100_start,
.stop = pb0100_stop,
.dump = pb0100_dump,
- .disconnect = pb0100_disconnect,
};
#endif
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h b/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
index fb229d8ded58..3a498c2495c6 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
@@ -63,8 +63,8 @@ struct stv06xx_sensor {
/* Performs a initialization sequence */
int (*init)(struct sd *sd);
- /* Executed at device disconnect */
- void (*disconnect)(struct sd *sd);
+ /* Initializes the controls */
+ int (*init_controls)(struct sd *sd);
/* Reads a sensor register */
int (*read_sensor)(struct sd *sd, const u8 address,
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
index 9940e035b3ab..8a57990dfe0f 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
@@ -30,20 +30,6 @@
#include "stv06xx_st6422.h"
-/* controls */
-enum e_ctrl {
- BRIGHTNESS,
- CONTRAST,
- GAIN,
- EXPOSURE,
- NCTRLS /* number of controls */
-};
-
-/* sensor settings */
-struct st6422_settings {
- struct gspca_ctrl ctrls[NCTRLS];
-};
-
static struct v4l2_pix_format st6422_mode[] = {
/* Note we actually get 124 lines of data, of which we skip the 4st
4 as they are garbage */
@@ -74,83 +60,70 @@ static struct v4l2_pix_format st6422_mode[] = {
};
/* V4L2 controls supported by the driver */
-static void st6422_set_brightness(struct gspca_dev *gspca_dev);
-static void st6422_set_contrast(struct gspca_dev *gspca_dev);
-static void st6422_set_gain(struct gspca_dev *gspca_dev);
-static void st6422_set_exposure(struct gspca_dev *gspca_dev);
-
-static const struct ctrl st6422_ctrl[NCTRLS] = {
-[BRIGHTNESS] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 31,
- .step = 1,
- .default_value = 3
- },
- .set_control = st6422_set_brightness
- },
-[CONTRAST] = {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 15,
- .step = 1,
- .default_value = 11
- },
- .set_control = st6422_set_contrast
- },
-[GAIN] = {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 64
- },
- .set_control = st6422_set_gain
- },
-[EXPOSURE] = {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
-#define EXPOSURE_MAX 1023
- .maximum = EXPOSURE_MAX,
- .step = 1,
- .default_value = 256
- },
- .set_control = st6422_set_exposure
- },
+static int setbrightness(struct sd *sd, s32 val);
+static int setcontrast(struct sd *sd, s32 val);
+static int setgain(struct sd *sd, u8 gain);
+static int setexposure(struct sd *sd, s16 expo);
+
+static int st6422_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+ int err = -EINVAL;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ err = setbrightness(sd, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ err = setcontrast(sd, ctrl->val);
+ break;
+ case V4L2_CID_GAIN:
+ err = setgain(sd, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ err = setexposure(sd, ctrl->val);
+ break;
+ }
+
+ /* commit settings */
+ if (err >= 0)
+ err = stv06xx_write_bridge(sd, 0x143f, 0x01);
+ sd->gspca_dev.usb_err = err;
+ return err;
+}
+
+static const struct v4l2_ctrl_ops st6422_ctrl_ops = {
+ .s_ctrl = st6422_s_ctrl,
};
-static int st6422_probe(struct sd *sd)
+static int st6422_init_controls(struct sd *sd)
{
- struct st6422_settings *sensor_settings;
+ struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler;
+
+ v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_new_std(hdl, &st6422_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 31, 1, 3);
+ v4l2_ctrl_new_std(hdl, &st6422_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 15, 1, 11);
+ v4l2_ctrl_new_std(hdl, &st6422_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 1023, 1, 256);
+ v4l2_ctrl_new_std(hdl, &st6422_ctrl_ops,
+ V4L2_CID_GAIN, 0, 255, 1, 64);
+
+ return hdl->error;
+}
+static int st6422_probe(struct sd *sd)
+{
if (sd->bridge != BRIDGE_ST6422)
return -ENODEV;
pr_info("st6422 sensor detected\n");
- sensor_settings = kmalloc(sizeof *sensor_settings, GFP_KERNEL);
- if (!sensor_settings)
- return -ENOMEM;
-
sd->gspca_dev.cam.cam_mode = st6422_mode;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(st6422_mode);
- sd->gspca_dev.cam.ctrls = sensor_settings->ctrls;
- sd->desc.ctrls = st6422_ctrl;
- sd->desc.nctrls = ARRAY_SIZE(st6422_ctrl);
- sd->sensor_priv = sensor_settings;
-
return 0;
}
@@ -239,38 +212,22 @@ static int st6422_init(struct sd *sd)
return err;
}
-static void st6422_disconnect(struct sd *sd)
-{
- sd->sensor = NULL;
- kfree(sd->sensor_priv);
-}
-
-static int setbrightness(struct sd *sd)
+static int setbrightness(struct sd *sd, s32 val)
{
- struct st6422_settings *sensor_settings = sd->sensor_priv;
-
/* val goes from 0 -> 31 */
- return stv06xx_write_bridge(sd, 0x1432,
- sensor_settings->ctrls[BRIGHTNESS].val);
+ return stv06xx_write_bridge(sd, 0x1432, val);
}
-static int setcontrast(struct sd *sd)
+static int setcontrast(struct sd *sd, s32 val)
{
- struct st6422_settings *sensor_settings = sd->sensor_priv;
-
/* Val goes from 0 -> 15 */
- return stv06xx_write_bridge(sd, 0x143a,
- sensor_settings->ctrls[CONTRAST].val | 0xf0);
+ return stv06xx_write_bridge(sd, 0x143a, val | 0xf0);
}
-static int setgain(struct sd *sd)
+static int setgain(struct sd *sd, u8 gain)
{
- struct st6422_settings *sensor_settings = sd->sensor_priv;
- u8 gain;
int err;
- gain = sensor_settings->ctrls[GAIN].val;
-
/* Set red, green, blue, gain */
err = stv06xx_write_bridge(sd, 0x0509, gain);
if (err < 0)
@@ -292,13 +249,10 @@ static int setgain(struct sd *sd)
return stv06xx_write_bridge(sd, 0x050d, 0x01);
}
-static int setexposure(struct sd *sd)
+static int setexposure(struct sd *sd, s16 expo)
{
- struct st6422_settings *sensor_settings = sd->sensor_priv;
- u16 expo;
int err;
- expo = sensor_settings->ctrls[EXPOSURE].val;
err = stv06xx_write_bridge(sd, 0x143d, expo & 0xff);
if (err < 0)
return err;
@@ -318,22 +272,6 @@ static int st6422_start(struct sd *sd)
if (err < 0)
return err;
- err = setbrightness(sd);
- if (err < 0)
- return err;
-
- err = setcontrast(sd);
- if (err < 0)
- return err;
-
- err = setexposure(sd);
- if (err < 0)
- return err;
-
- err = setgain(sd);
- if (err < 0)
- return err;
-
/* commit settings */
err = stv06xx_write_bridge(sd, 0x143f, 0x01);
return (err < 0) ? err : 0;
@@ -345,59 +283,3 @@ static int st6422_stop(struct sd *sd)
return 0;
}
-
-static void st6422_set_brightness(struct gspca_dev *gspca_dev)
-{
- int err;
- struct sd *sd = (struct sd *) gspca_dev;
-
- err = setbrightness(sd);
-
- /* commit settings */
- if (err >= 0)
- err = stv06xx_write_bridge(sd, 0x143f, 0x01);
-
- gspca_dev->usb_err = err;
-}
-
-static void st6422_set_contrast(struct gspca_dev *gspca_dev)
-{
- int err;
- struct sd *sd = (struct sd *) gspca_dev;
-
- err = setcontrast(sd);
-
- /* commit settings */
- if (err >= 0)
- err = stv06xx_write_bridge(sd, 0x143f, 0x01);
-
- gspca_dev->usb_err = err;
-}
-
-static void st6422_set_gain(struct gspca_dev *gspca_dev)
-{
- int err;
- struct sd *sd = (struct sd *) gspca_dev;
-
- err = setgain(sd);
-
- /* commit settings */
- if (err >= 0)
- err = stv06xx_write_bridge(sd, 0x143f, 0x01);
-
- gspca_dev->usb_err = err;
-}
-
-static void st6422_set_exposure(struct gspca_dev *gspca_dev)
-{
- int err;
- struct sd *sd = (struct sd *) gspca_dev;
-
- err = setexposure(sd);
-
- /* commit settings */
- if (err >= 0)
- err = stv06xx_write_bridge(sd, 0x143f, 0x01);
-
- gspca_dev->usb_err = err;
-}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h
index d7498e06432b..8f20fbf30f33 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h
@@ -34,8 +34,8 @@
static int st6422_probe(struct sd *sd);
static int st6422_start(struct sd *sd);
static int st6422_init(struct sd *sd);
+static int st6422_init_controls(struct sd *sd);
static int st6422_stop(struct sd *sd);
-static void st6422_disconnect(struct sd *sd);
const struct stv06xx_sensor stv06xx_sensor_st6422 = {
.name = "ST6422",
@@ -43,10 +43,10 @@ const struct stv06xx_sensor stv06xx_sensor_st6422 = {
.min_packet_size = { 300, 847 },
.max_packet_size = { 300, 847 },
.init = st6422_init,
+ .init_controls = st6422_init_controls,
.probe = st6422_probe,
.start = st6422_start,
.stop = st6422_stop,
- .disconnect = st6422_disconnect,
};
#endif
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
index a5c69d9ebdd4..748e1421d6d8 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
@@ -44,130 +44,83 @@ static struct v4l2_pix_format vv6410_mode[] = {
}
};
-static const struct ctrl vv6410_ctrl[] = {
-#define HFLIP_IDX 0
- {
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "horizontal flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
- },
- .set = vv6410_set_hflip,
- .get = vv6410_get_hflip
- },
-#define VFLIP_IDX 1
- {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "vertical flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
- },
- .set = vv6410_set_vflip,
- .get = vv6410_get_vflip
- },
-#define GAIN_IDX 2
- {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "analog gain",
- .minimum = 0,
- .maximum = 15,
- .step = 1,
- .default_value = 10
- },
- .set = vv6410_set_analog_gain,
- .get = vv6410_get_analog_gain
- },
-#define EXPOSURE_IDX 3
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
- .minimum = 0,
- .maximum = 32768,
- .step = 1,
- .default_value = 20000
- },
- .set = vv6410_set_exposure,
- .get = vv6410_get_exposure
+static int vv6410_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ int err = -EINVAL;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ err = vv6410_set_hflip(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_VFLIP:
+ err = vv6410_set_vflip(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_GAIN:
+ err = vv6410_set_analog_gain(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ err = vv6410_set_exposure(gspca_dev, ctrl->val);
+ break;
}
- };
+ return err;
+}
+
+static const struct v4l2_ctrl_ops vv6410_ctrl_ops = {
+ .s_ctrl = vv6410_s_ctrl,
+};
static int vv6410_probe(struct sd *sd)
{
u16 data;
- int err, i;
- s32 *sensor_settings;
+ int err;
err = stv06xx_read_sensor(sd, VV6410_DEVICEH, &data);
if (err < 0)
return -ENODEV;
- if (data == 0x19) {
- pr_info("vv6410 sensor detected\n");
+ if (data != 0x19)
+ return -ENODEV;
- sensor_settings = kmalloc(ARRAY_SIZE(vv6410_ctrl) * sizeof(s32),
- GFP_KERNEL);
- if (!sensor_settings)
- return -ENOMEM;
+ pr_info("vv6410 sensor detected\n");
- sd->gspca_dev.cam.cam_mode = vv6410_mode;
- sd->gspca_dev.cam.nmodes = ARRAY_SIZE(vv6410_mode);
- sd->desc.ctrls = vv6410_ctrl;
- sd->desc.nctrls = ARRAY_SIZE(vv6410_ctrl);
+ sd->gspca_dev.cam.cam_mode = vv6410_mode;
+ sd->gspca_dev.cam.nmodes = ARRAY_SIZE(vv6410_mode);
+ return 0;
+}
- for (i = 0; i < sd->desc.nctrls; i++)
- sensor_settings[i] = vv6410_ctrl[i].qctrl.default_value;
- sd->sensor_priv = sensor_settings;
- return 0;
- }
- return -ENODEV;
+static int vv6410_init_controls(struct sd *sd)
+{
+ struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler;
+
+ v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 32768, 1, 20000);
+ v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops,
+ V4L2_CID_GAIN, 0, 15, 1, 10);
+ return hdl->error;
}
static int vv6410_init(struct sd *sd)
{
int err = 0, i;
- s32 *sensor_settings = sd->sensor_priv;
- for (i = 0; i < ARRAY_SIZE(stv_bridge_init); i++) {
+ for (i = 0; i < ARRAY_SIZE(stv_bridge_init); i++)
stv06xx_write_bridge(sd, stv_bridge_init[i].addr, stv_bridge_init[i].data);
- }
if (err < 0)
return err;
err = stv06xx_write_sensor_bytes(sd, (u8 *) vv6410_sensor_init,
ARRAY_SIZE(vv6410_sensor_init));
- if (err < 0)
- return err;
-
- err = vv6410_set_exposure(&sd->gspca_dev,
- sensor_settings[EXPOSURE_IDX]);
- if (err < 0)
- return err;
-
- err = vv6410_set_analog_gain(&sd->gspca_dev,
- sensor_settings[GAIN_IDX]);
-
return (err < 0) ? err : 0;
}
-static void vv6410_disconnect(struct sd *sd)
-{
- sd->sensor = NULL;
- kfree(sd->sensor_priv);
-}
-
static int vv6410_start(struct sd *sd)
{
int err;
@@ -233,25 +186,12 @@ static int vv6410_dump(struct sd *sd)
return (err < 0) ? err : 0;
}
-static int vv6410_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[HFLIP_IDX];
- PDEBUG(D_V4L2, "Read horizontal flip %d", *val);
-
- return 0;
-}
-
static int vv6410_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u16 i2c_data;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
- sensor_settings[HFLIP_IDX] = val;
err = stv06xx_read_sensor(sd, VV6410_DATAFORMAT, &i2c_data);
if (err < 0)
return err;
@@ -267,25 +207,12 @@ static int vv6410_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
return (err < 0) ? err : 0;
}
-static int vv6410_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[VFLIP_IDX];
- PDEBUG(D_V4L2, "Read vertical flip %d", *val);
-
- return 0;
-}
-
static int vv6410_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u16 i2c_data;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
- sensor_settings[VFLIP_IDX] = val;
err = stv06xx_read_sensor(sd, VV6410_DATAFORMAT, &i2c_data);
if (err < 0)
return err;
@@ -301,52 +228,23 @@ static int vv6410_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
return (err < 0) ? err : 0;
}
-static int vv6410_get_analog_gain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[GAIN_IDX];
-
- PDEBUG(D_V4L2, "Read analog gain %d", *val);
-
- return 0;
-}
-
static int vv6410_set_analog_gain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
- sensor_settings[GAIN_IDX] = val;
PDEBUG(D_V4L2, "Set analog gain to %d", val);
err = stv06xx_write_sensor(sd, VV6410_ANALOGGAIN, 0xf0 | (val & 0xf));
return (err < 0) ? err : 0;
}
-static int vv6410_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[EXPOSURE_IDX];
-
- PDEBUG(D_V4L2, "Read exposure %d", *val);
-
- return 0;
-}
-
static int vv6410_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
unsigned int fine, coarse;
- sensor_settings[EXPOSURE_IDX] = val;
-
val = (val * val >> 14) + val / 4;
fine = val % VV6410_CIF_LINELENGTH;
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
index a25b8873f2e6..53e67b40ca05 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
@@ -178,18 +178,14 @@
static int vv6410_probe(struct sd *sd);
static int vv6410_start(struct sd *sd);
static int vv6410_init(struct sd *sd);
+static int vv6410_init_controls(struct sd *sd);
static int vv6410_stop(struct sd *sd);
static int vv6410_dump(struct sd *sd);
-static void vv6410_disconnect(struct sd *sd);
/* V4L2 controls supported by the driver */
-static int vv6410_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
static int vv6410_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
-static int vv6410_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
static int vv6410_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
-static int vv6410_get_analog_gain(struct gspca_dev *gspca_dev, __s32 *val);
static int vv6410_set_analog_gain(struct gspca_dev *gspca_dev, __s32 val);
-static int vv6410_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
static int vv6410_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
const struct stv06xx_sensor stv06xx_sensor_vv6410 = {
@@ -202,11 +198,11 @@ const struct stv06xx_sensor stv06xx_sensor_vv6410 = {
.min_packet_size = { 1023 },
.max_packet_size = { 1023 },
.init = vv6410_init,
+ .init_controls = vv6410_init_controls,
.probe = vv6410_probe,
.start = vv6410_start,
.stop = vv6410_stop,
.dump = vv6410_dump,
- .disconnect = vv6410_disconnect,
};
/* If NULL, only single value to write, stored in len */
diff --git a/drivers/media/video/gspca/topro.c b/drivers/media/video/gspca/topro.c
index 444d3c5b9079..c6326d177a3d 100644
--- a/drivers/media/video/gspca/topro.c
+++ b/drivers/media/video/gspca/topro.c
@@ -4675,11 +4675,9 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
/* -- do autogain -- */
/* gain setting is done in setexposure() for tp6810 */
static void setgain(struct gspca_dev *gspca_dev) {}
-/* !! coarse_grained_expo_autogain is not used !! */
-#define exp_too_low_cnt bridge
-#define exp_too_high_cnt sensor
-
+#define WANT_REGULAR_AUTOGAIN
#include "autogain_functions.h"
+
static void sd_dq_callback(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
diff --git a/drivers/media/video/gspca/vicam.c b/drivers/media/video/gspca/vicam.c
index 911152e169d6..15a30f7a4b2a 100644
--- a/drivers/media/video/gspca/vicam.c
+++ b/drivers/media/video/gspca/vicam.c
@@ -37,9 +37,12 @@
#include <linux/ihex.h>
#include "gspca.h"
+#define VICAM_FIRMWARE "vicam/firmware.fw"
+
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("GSPCA ViCam USB Camera Driver");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(VICAM_FIRMWARE);
enum e_ctrl {
GAIN,
@@ -222,7 +225,11 @@ static void vicam_dostream(struct work_struct *work)
goto exit;
}
- while (gspca_dev->present && gspca_dev->streaming) {
+ while (gspca_dev->dev && gspca_dev->streaming) {
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
+ break;
+#endif
ret = vicam_read_frame(gspca_dev, buffer, frame_sz);
if (ret < 0)
break;
@@ -268,7 +275,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
const struct firmware *uninitialized_var(fw);
u8 *firmware_buf;
- ret = request_ihex_firmware(&fw, "vicam/firmware.fw",
+ ret = request_ihex_firmware(&fw, VICAM_FIRMWARE,
&gspca_dev->dev->dev);
if (ret) {
pr_err("Failed to load \"vicam/firmware.fw\": %d\n", ret);
@@ -324,7 +331,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
dev->work_thread = NULL;
mutex_lock(&gspca_dev->usb_lock);
- if (gspca_dev->present)
+ if (gspca_dev->dev)
vicam_set_camera_power(gspca_dev, 0);
}
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 7d9a4f1be9dc..f0bacee33ef9 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -32,29 +32,25 @@ MODULE_LICENSE("GPL");
static int force_sensor = -1;
-#define REG08_DEF 3 /* default JPEG compression (70%) */
+#define REG08_DEF 3 /* default JPEG compression (75%) */
#include "zc3xx-reg.h"
-/* controls */
-enum e_ctrl {
- BRIGHTNESS,
- CONTRAST,
- EXPOSURE,
- GAMMA,
- AUTOGAIN,
- LIGHTFREQ,
- SHARPNESS,
- QUALITY,
- NCTRLS /* number of controls */
-};
-
-#define AUTOGAIN_DEF 1
-
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- struct gspca_ctrl ctrls[NCTRLS];
+ struct { /* gamma/brightness/contrast control cluster */
+ struct v4l2_ctrl *gamma;
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *contrast;
+ };
+ struct { /* autogain/exposure control cluster */
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *exposure;
+ };
+ struct v4l2_ctrl *plfreq;
+ struct v4l2_ctrl *sharpness;
+ struct v4l2_ctrl *jpegqual;
struct work_struct work;
struct workqueue_struct *work_thread;
@@ -94,114 +90,6 @@ enum sensors {
SENSOR_MAX
};
-/* V4L2 controls supported by the driver */
-static void setcontrast(struct gspca_dev *gspca_dev);
-static void setexposure(struct gspca_dev *gspca_dev);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static void setlightfreq(struct gspca_dev *gspca_dev);
-static void setsharpness(struct gspca_dev *gspca_dev);
-static int sd_setquality(struct gspca_dev *gspca_dev, __s32 val);
-
-static const struct ctrl sd_ctrls[NCTRLS] = {
-[BRIGHTNESS] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 128,
- },
- .set_control = setcontrast
- },
-[CONTRAST] = {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 128,
- },
- .set_control = setcontrast
- },
-[EXPOSURE] = {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0x30d,
- .maximum = 0x493e,
- .step = 1,
- .default_value = 0x927
- },
- .set_control = setexposure
- },
-[GAMMA] = {
- {
- .id = V4L2_CID_GAMMA,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gamma",
- .minimum = 1,
- .maximum = 6,
- .step = 1,
- .default_value = 4,
- },
- .set_control = setcontrast
- },
-[AUTOGAIN] = {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = AUTOGAIN_DEF,
- .flags = V4L2_CTRL_FLAG_UPDATE
- },
- .set = sd_setautogain
- },
-[LIGHTFREQ] = {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Light frequency filter",
- .minimum = 0,
- .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
- .step = 1,
- .default_value = 0,
- },
- .set_control = setlightfreq
- },
-[SHARPNESS] = {
- {
- .id = V4L2_CID_SHARPNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Sharpness",
- .minimum = 0,
- .maximum = 3,
- .step = 1,
- .default_value = 2,
- },
- .set_control = setsharpness
- },
-[QUALITY] = {
- {
- .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Compression Quality",
- .minimum = 40,
- .maximum = 70,
- .step = 1,
- .default_value = 70 /* updated in sd_init() */
- },
- .set = sd_setquality
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 320,
@@ -241,8 +129,11 @@ static const struct v4l2_pix_format sif_mode[] = {
.priv = 0},
};
-/* bridge reg08 -> JPEG quality conversion table */
-static u8 jpeg_qual[] = {40, 50, 60, 70, /*80*/};
+/*
+ * Bridge reg08 bits 1-2 -> JPEG quality conversion table. Note the highest
+ * quality setting is not usable as USB 1 does not have enough bandwidth.
+ */
+static u8 jpeg_qual[] = {50, 75, 87, /* 94 */};
/* usb exchanges */
struct usb_action {
@@ -5818,10 +5709,8 @@ static void setmatrix(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, matrix[i], 0x010a + i);
}
-static void setsharpness(struct gspca_dev *gspca_dev)
+static void setsharpness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- int sharpness;
static const u8 sharpness_tb[][2] = {
{0x02, 0x03},
{0x04, 0x07},
@@ -5829,19 +5718,18 @@ static void setsharpness(struct gspca_dev *gspca_dev)
{0x10, 0x1e}
};
- sharpness = sd->ctrls[SHARPNESS].val;
- reg_w(gspca_dev, sharpness_tb[sharpness][0], 0x01c6);
+ reg_w(gspca_dev, sharpness_tb[val][0], 0x01c6);
reg_r(gspca_dev, 0x01c8);
reg_r(gspca_dev, 0x01c9);
reg_r(gspca_dev, 0x01ca);
- reg_w(gspca_dev, sharpness_tb[sharpness][1], 0x01cb);
+ reg_w(gspca_dev, sharpness_tb[val][1], 0x01cb);
}
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev,
+ s32 gamma, s32 brightness, s32 contrast)
{
- struct sd *sd = (struct sd *) gspca_dev;
const u8 *Tgamma;
- int g, i, brightness, contrast, adj, gp1, gp2;
+ int g, i, adj, gp1, gp2;
u8 gr[16];
static const u8 delta_b[16] = /* delta for brightness */
{0x50, 0x38, 0x2d, 0x28, 0x24, 0x21, 0x1e, 0x1d,
@@ -5864,10 +5752,10 @@ static void setcontrast(struct gspca_dev *gspca_dev)
0xe0, 0xeb, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff},
};
- Tgamma = gamma_tb[sd->ctrls[GAMMA].val - 1];
+ Tgamma = gamma_tb[gamma - 1];
- contrast = ((int) sd->ctrls[CONTRAST].val - 128); /* -128 / 127 */
- brightness = ((int) sd->ctrls[BRIGHTNESS].val - 128); /* -128 / 92 */
+ contrast -= 128; /* -128 / 127 */
+ brightness -= 128; /* -128 / 92 */
adj = 0;
gp1 = gp2 = 0;
for (i = 0; i < 16; i++) {
@@ -5894,25 +5782,15 @@ static void setcontrast(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, gr[i], 0x0130 + i); /* gradient */
}
-static void getexposure(struct gspca_dev *gspca_dev)
+static s32 getexposure(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- if (sd->sensor != SENSOR_HV7131R)
- return;
- sd->ctrls[EXPOSURE].val = (i2c_read(gspca_dev, 0x25) << 9)
+ return (i2c_read(gspca_dev, 0x25) << 9)
| (i2c_read(gspca_dev, 0x26) << 1)
| (i2c_read(gspca_dev, 0x27) >> 7);
}
-static void setexposure(struct gspca_dev *gspca_dev)
+static void setexposure(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- int val;
-
- if (sd->sensor != SENSOR_HV7131R)
- return;
- val = sd->ctrls[EXPOSURE].val;
i2c_write(gspca_dev, 0x25, val >> 9, 0x00);
i2c_write(gspca_dev, 0x26, val >> 1, 0x00);
i2c_write(gspca_dev, 0x27, val << 7, 0x00);
@@ -5921,20 +5799,8 @@ static void setexposure(struct gspca_dev *gspca_dev)
static void setquality(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- s8 reg07;
-
- reg07 = 0;
- switch (sd->sensor) {
- case SENSOR_OV7620:
- reg07 = 0x30;
- break;
- case SENSOR_HV7131R:
- case SENSOR_PAS202B:
- return; /* done by work queue */
- }
+ jpeg_set_qual(sd->jpeg_hdr, jpeg_qual[sd->reg08 >> 1]);
reg_w(gspca_dev, sd->reg08, ZC3XX_R008_CLOCKSETTING);
- if (reg07 != 0)
- reg_w(gspca_dev, reg07, 0x0007);
}
/* Matches the sensor's internal frame rate to the lighting frequency.
@@ -5943,7 +5809,7 @@ static void setquality(struct gspca_dev *gspca_dev)
* 60Hz, for American lighting
* 0 = No Fliker (for outdoore usage)
*/
-static void setlightfreq(struct gspca_dev *gspca_dev)
+static void setlightfreq(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
int i, mode;
@@ -6027,7 +5893,7 @@ static void setlightfreq(struct gspca_dev *gspca_dev)
tas5130c_60HZ, tas5130c_60HZScale},
};
- i = sd->ctrls[LIGHTFREQ].val * 2;
+ i = val * 2;
mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
if (mode)
i++; /* 320x240 */
@@ -6037,14 +5903,14 @@ static void setlightfreq(struct gspca_dev *gspca_dev)
usb_exchange(gspca_dev, zc3_freq);
switch (sd->sensor) {
case SENSOR_GC0305:
- if (mode /* if 320x240 */
- && sd->ctrls[LIGHTFREQ].val == 1) /* and 50Hz */
+ if (mode /* if 320x240 */
+ && val == 1) /* and 50Hz */
reg_w(gspca_dev, 0x85, 0x018d);
/* win: 0x80, 0x018d */
break;
case SENSOR_OV7620:
- if (!mode) { /* if 640x480 */
- if (sd->ctrls[LIGHTFREQ].val != 0) /* and filter */
+ if (!mode) { /* if 640x480 */
+ if (val != 0) /* and filter */
reg_w(gspca_dev, 0x40, 0x0002);
else
reg_w(gspca_dev, 0x44, 0x0002);
@@ -6056,22 +5922,15 @@ static void setlightfreq(struct gspca_dev *gspca_dev)
}
}
-static void setautogain(struct gspca_dev *gspca_dev)
+static void setautogain(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- u8 autoval;
-
- if (sd->ctrls[AUTOGAIN].val)
- autoval = 0x42;
- else
- autoval = 0x02;
- reg_w(gspca_dev, autoval, 0x0180);
+ reg_w(gspca_dev, val ? 0x42 : 0x02, 0x0180);
}
-/* update the transfer parameters */
-/* This function is executed from a work queue. */
-/* The exact use of the bridge registers 07 and 08 is not known.
- * The following algorithm has been adapted from ms-win traces */
+/*
+ * Update the transfer parameters.
+ * This function is executed from a work queue.
+ */
static void transfer_update(struct work_struct *work)
{
struct sd *sd = container_of(work, struct sd, work);
@@ -6079,96 +5938,55 @@ static void transfer_update(struct work_struct *work)
int change, good;
u8 reg07, reg11;
- /* synchronize with the main driver and initialize the registers */
- mutex_lock(&gspca_dev->usb_lock);
- reg07 = 0; /* max */
- reg_w(gspca_dev, reg07, 0x0007);
- reg_w(gspca_dev, sd->reg08, ZC3XX_R008_CLOCKSETTING);
- mutex_unlock(&gspca_dev->usb_lock);
+ /* reg07 gets set to 0 by sd_start before starting us */
+ reg07 = 0;
good = 0;
for (;;) {
msleep(100);
- /* get the transfer status */
- /* the bit 0 of the bridge register 11 indicates overflow */
mutex_lock(&gspca_dev->usb_lock);
- if (!gspca_dev->present || !gspca_dev->streaming)
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
goto err;
+#endif
+ if (!gspca_dev->dev || !gspca_dev->streaming)
+ goto err;
+
+ /* Bit 0 of register 11 indicates FIFO overflow */
+ gspca_dev->usb_err = 0;
reg11 = reg_r(gspca_dev, 0x0011);
- if (gspca_dev->usb_err < 0
- || !gspca_dev->present || !gspca_dev->streaming)
+ if (gspca_dev->usb_err)
goto err;
change = reg11 & 0x01;
if (change) { /* overflow */
- switch (reg07) {
- case 0: /* max */
- reg07 = sd->sensor == SENSOR_HV7131R
- ? 0x30 : 0x32;
- if (sd->reg08 != 0) {
- change = 3;
- sd->reg08--;
- }
- break;
- case 0x32:
- reg07 -= 4;
- break;
- default:
- reg07 -= 2;
- break;
- case 2:
- change = 0; /* already min */
- break;
- }
good = 0;
+
+ if (reg07 == 0) /* Bit Rate Control not enabled? */
+ reg07 = 0x32; /* Allow 98 bytes / unit */
+ else if (reg07 > 2)
+ reg07 -= 2; /* Decrease allowed bytes / unit */
+ else
+ change = 0;
} else { /* no overflow */
- if (reg07 != 0) { /* if not max */
- good++;
- if (good >= 10) {
- good = 0;
+ good++;
+ if (good >= 10) {
+ good = 0;
+ if (reg07) { /* BRC enabled? */
change = 1;
- reg07 += 2;
- switch (reg07) {
- case 0x30:
- if (sd->sensor == SENSOR_PAS202B)
- reg07 += 2;
- break;
- case 0x32:
- case 0x34:
+ if (reg07 < 0x32)
+ reg07 += 2;
+ else
reg07 = 0;
- break;
- }
- }
- } else { /* reg07 max */
- if (sd->reg08 < sizeof jpeg_qual - 1) {
- good++;
- if (good > 10) {
- sd->reg08++;
- change = 2;
- }
}
}
}
if (change) {
- if (change & 1) {
- reg_w(gspca_dev, reg07, 0x0007);
- if (gspca_dev->usb_err < 0
- || !gspca_dev->present
- || !gspca_dev->streaming)
- goto err;
- }
- if (change & 2) {
- reg_w(gspca_dev, sd->reg08,
- ZC3XX_R008_CLOCKSETTING);
- if (gspca_dev->usb_err < 0
- || !gspca_dev->present
- || !gspca_dev->streaming)
- goto err;
- sd->ctrls[QUALITY].val = jpeg_qual[sd->reg08];
- jpeg_set_qual(sd->jpeg_hdr,
- jpeg_qual[sd->reg08]);
- }
+ gspca_dev->usb_err = 0;
+ reg_w(gspca_dev, reg07, 0x0007);
+ if (gspca_dev->usb_err)
+ goto err;
}
mutex_unlock(&gspca_dev->usb_lock);
}
@@ -6503,7 +6321,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
/* define some sensors from the vendor/product */
sd->sensor = id->driver_info;
- gspca_dev->cam.ctrls = sd->ctrls;
sd->reg08 = REG08_DEF;
INIT_WORK(&sd->work, transfer_update);
@@ -6511,12 +6328,87 @@ static int sd_config(struct gspca_dev *gspca_dev,
return 0;
}
-/* this function is called at probe and resume time */
-static int sd_init(struct gspca_dev *gspca_dev)
+static int zcxx_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
- struct cam *cam;
- int sensor;
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ gspca_dev->usb_err = 0;
+ if (ctrl->val && sd->exposure && gspca_dev->streaming)
+ sd->exposure->val = getexposure(gspca_dev);
+ return gspca_dev->usb_err;
+ }
+ return -EINVAL;
+}
+
+static int zcxx_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+ int i, qual;
+
+ gspca_dev->usb_err = 0;
+
+ if (ctrl->id == V4L2_CID_JPEG_COMPRESSION_QUALITY) {
+ qual = sd->reg08 >> 1;
+
+ for (i = 0; i < ARRAY_SIZE(jpeg_qual); i++) {
+ if (ctrl->val <= jpeg_qual[i])
+ break;
+ }
+ if (i > 0 && i == qual && ctrl->val < jpeg_qual[i])
+ i--;
+
+ /* With high quality settings we need max bandwidth */
+ if (i >= 2 && gspca_dev->streaming &&
+ !gspca_dev->cam.needs_full_bandwidth)
+ return -EBUSY;
+
+ sd->reg08 = (i << 1) | 1;
+ ctrl->val = jpeg_qual[i];
+ }
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ /* gamma/brightness/contrast cluster */
+ case V4L2_CID_GAMMA:
+ setcontrast(gspca_dev, sd->gamma->val,
+ sd->brightness->val, sd->contrast->val);
+ break;
+ /* autogain/exposure cluster */
+ case V4L2_CID_AUTOGAIN:
+ setautogain(gspca_dev, ctrl->val);
+ if (!gspca_dev->usb_err && !ctrl->val && sd->exposure)
+ setexposure(gspca_dev, sd->exposure->val);
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ setlightfreq(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SHARPNESS:
+ setsharpness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ setquality(gspca_dev);
+ break;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops zcxx_ctrl_ops = {
+ .g_volatile_ctrl = zcxx_g_volatile_ctrl,
+ .s_ctrl = zcxx_s_ctrl,
+};
+
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
static const u8 gamma[SENSOR_MAX] = {
[SENSOR_ADCM2700] = 4,
[SENSOR_CS2102] = 4,
@@ -6538,6 +6430,48 @@ static int sd_init(struct gspca_dev *gspca_dev)
[SENSOR_PO2030] = 4,
[SENSOR_TAS5130C] = 3,
};
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 8);
+ sd->brightness = v4l2_ctrl_new_std(hdl, &zcxx_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ sd->contrast = v4l2_ctrl_new_std(hdl, &zcxx_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 128);
+ sd->gamma = v4l2_ctrl_new_std(hdl, &zcxx_ctrl_ops,
+ V4L2_CID_GAMMA, 1, 6, 1, gamma[sd->sensor]);
+ if (sd->sensor == SENSOR_HV7131R)
+ sd->exposure = v4l2_ctrl_new_std(hdl, &zcxx_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0x30d, 0x493e, 1, 0x927);
+ sd->autogain = v4l2_ctrl_new_std(hdl, &zcxx_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ if (sd->sensor != SENSOR_OV7630C)
+ sd->plfreq = v4l2_ctrl_new_std_menu(hdl, &zcxx_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0,
+ V4L2_CID_POWER_LINE_FREQUENCY_DISABLED);
+ sd->sharpness = v4l2_ctrl_new_std(hdl, &zcxx_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0, 3, 1,
+ sd->sensor == SENSOR_PO2030 ? 0 : 2);
+ sd->jpegqual = v4l2_ctrl_new_std(hdl, &zcxx_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ jpeg_qual[0], jpeg_qual[ARRAY_SIZE(jpeg_qual) - 1], 1,
+ jpeg_qual[REG08_DEF >> 1]);
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ v4l2_ctrl_cluster(3, &sd->gamma);
+ if (sd->sensor == SENSOR_HV7131R)
+ v4l2_ctrl_auto_cluster(2, &sd->autogain, 0, true);
+ return 0;
+}
+
+/* this function is called at probe and resume time */
+static int sd_init(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct cam *cam;
+ int sensor;
static const u8 mode_tb[SENSOR_MAX] = {
[SENSOR_ADCM2700] = 2,
[SENSOR_CS2102] = 1,
@@ -6559,27 +6493,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
[SENSOR_PO2030] = 1,
[SENSOR_TAS5130C] = 1,
};
- static const u8 reg08_tb[SENSOR_MAX] = {
- [SENSOR_ADCM2700] = 1,
- [SENSOR_CS2102] = 3,
- [SENSOR_CS2102K] = 3,
- [SENSOR_GC0303] = 2,
- [SENSOR_GC0305] = 3,
- [SENSOR_HDCS2020] = 1,
- [SENSOR_HV7131B] = 3,
- [SENSOR_HV7131R] = 3,
- [SENSOR_ICM105A] = 3,
- [SENSOR_MC501CB] = 3,
- [SENSOR_MT9V111_1] = 3,
- [SENSOR_MT9V111_3] = 3,
- [SENSOR_OV7620] = 1,
- [SENSOR_OV7630C] = 3,
- [SENSOR_PAS106] = 3,
- [SENSOR_PAS202B] = 3,
- [SENSOR_PB0330] = 3,
- [SENSOR_PO2030] = 2,
- [SENSOR_TAS5130C] = 3,
- };
sensor = zcxx_probeSensor(gspca_dev);
if (sensor >= 0)
@@ -6688,7 +6601,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
case 0x2030:
PDEBUG(D_PROBE, "Find Sensor PO2030");
sd->sensor = SENSOR_PO2030;
- sd->ctrls[SHARPNESS].def = 0; /* from win traces */
break;
case 0x7620:
PDEBUG(D_PROBE, "Find Sensor OV7620");
@@ -6730,36 +6642,18 @@ static int sd_init(struct gspca_dev *gspca_dev)
break;
}
- sd->ctrls[GAMMA].def = gamma[sd->sensor];
- sd->reg08 = reg08_tb[sd->sensor];
- sd->ctrls[QUALITY].def = jpeg_qual[sd->reg08];
- sd->ctrls[QUALITY].min = jpeg_qual[0];
- sd->ctrls[QUALITY].max = jpeg_qual[ARRAY_SIZE(jpeg_qual) - 1];
-
- switch (sd->sensor) {
- case SENSOR_HV7131R:
- gspca_dev->ctrl_dis = (1 << QUALITY);
- break;
- case SENSOR_OV7630C:
- gspca_dev->ctrl_dis = (1 << LIGHTFREQ) | (1 << EXPOSURE);
- break;
- case SENSOR_PAS202B:
- gspca_dev->ctrl_dis = (1 << QUALITY) | (1 << EXPOSURE);
- break;
- default:
- gspca_dev->ctrl_dis = (1 << EXPOSURE);
- break;
- }
-#if AUTOGAIN_DEF
- if (sd->ctrls[AUTOGAIN].val)
- gspca_dev->ctrl_inac = (1 << EXPOSURE);
-#endif
-
/* switch off the led */
reg_w(gspca_dev, 0x01, 0x0000);
return gspca_dev->usb_err;
}
+static int sd_pre_start(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ gspca_dev->cam.needs_full_bandwidth = (sd->reg08 >= 4) ? 1 : 0;
+ return 0;
+}
+
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -6864,7 +6758,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x03, 0x0008);
break;
}
- setsharpness(gspca_dev);
+ setsharpness(gspca_dev, v4l2_ctrl_g_ctrl(sd->sharpness));
/* set the gamma tables when not set */
switch (sd->sensor) {
@@ -6873,7 +6767,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
case SENSOR_OV7630C:
break;
default:
- setcontrast(gspca_dev);
+ setcontrast(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma),
+ v4l2_ctrl_g_ctrl(sd->brightness),
+ v4l2_ctrl_g_ctrl(sd->contrast));
break;
}
setmatrix(gspca_dev); /* one more time? */
@@ -6885,8 +6781,10 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
}
setquality(gspca_dev);
- jpeg_set_qual(sd->jpeg_hdr, jpeg_qual[sd->reg08]);
- setlightfreq(gspca_dev);
+ /* Start with BRC disabled, transfer_update will enable it if needed */
+ reg_w(gspca_dev, 0x00, 0x0007);
+ if (sd->plfreq)
+ setlightfreq(gspca_dev, v4l2_ctrl_g_ctrl(sd->plfreq));
switch (sd->sensor) {
case SENSOR_ADCM2700:
@@ -6897,7 +6795,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x40, 0x0117);
break;
case SENSOR_HV7131R:
- setexposure(gspca_dev);
+ setexposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
reg_w(gspca_dev, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN);
break;
case SENSOR_GC0305:
@@ -6921,21 +6819,16 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
}
- setautogain(gspca_dev);
+ setautogain(gspca_dev, v4l2_ctrl_g_ctrl(sd->autogain));
- /* start the transfer update thread if needed */
- if (gspca_dev->usb_err >= 0) {
- switch (sd->sensor) {
- case SENSOR_HV7131R:
- case SENSOR_PAS202B:
- sd->work_thread =
- create_singlethread_workqueue(KBUILD_MODNAME);
- queue_work(sd->work_thread, &sd->work);
- break;
- }
- }
+ if (gspca_dev->usb_err < 0)
+ return gspca_dev->usb_err;
- return gspca_dev->usb_err;
+ /* Start the transfer parameters update thread */
+ sd->work_thread = create_singlethread_workqueue(KBUILD_MODNAME);
+ queue_work(sd->work_thread, &sd->work);
+
+ return 0;
}
/* called on streamoff with alt 0 and on disconnect */
@@ -6949,7 +6842,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
mutex_lock(&gspca_dev->usb_lock);
sd->work_thread = NULL;
}
- if (!gspca_dev->present)
+ if (!gspca_dev->dev)
return;
send_unknown(gspca_dev, sd->sensor);
}
@@ -6987,72 +6880,17 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->ctrls[AUTOGAIN].val = val;
- if (val) {
- gspca_dev->ctrl_inac |= (1 << EXPOSURE);
- } else {
- gspca_dev->ctrl_inac &= ~(1 << EXPOSURE);
- if (gspca_dev->streaming)
- getexposure(gspca_dev);
- }
- if (gspca_dev->streaming)
- setautogain(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_querymenu(struct gspca_dev *gspca_dev,
- struct v4l2_querymenu *menu)
-{
- switch (menu->id) {
- case V4L2_CID_POWER_LINE_FREQUENCY:
- switch (menu->index) {
- case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
- strcpy((char *) menu->name, "NoFliker");
- return 0;
- case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
- strcpy((char *) menu->name, "50 Hz");
- return 0;
- case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */
- strcpy((char *) menu->name, "60 Hz");
- return 0;
- }
- break;
- }
- return -EINVAL;
-}
-
-static int sd_setquality(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(jpeg_qual) - 1; i++) {
- if (val <= jpeg_qual[i])
- break;
- }
- if (i > 0
- && i == sd->reg08
- && val < jpeg_qual[sd->reg08])
- i--;
- sd->reg08 = i;
- sd->ctrls[QUALITY].val = jpeg_qual[i];
- if (gspca_dev->streaming)
- jpeg_set_qual(sd->jpeg_hdr, sd->ctrls[QUALITY].val);
- return gspca_dev->usb_err;
-}
-
static int sd_set_jcomp(struct gspca_dev *gspca_dev,
struct v4l2_jpegcompression *jcomp)
{
struct sd *sd = (struct sd *) gspca_dev;
+ int ret;
- sd_setquality(gspca_dev, jcomp->quality);
- jcomp->quality = sd->ctrls[QUALITY].val;
- return gspca_dev->usb_err;
+ ret = v4l2_ctrl_s_ctrl(sd->jpegqual, jcomp->quality);
+ if (ret)
+ return ret;
+ jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual);
+ return 0;
}
static int sd_get_jcomp(struct gspca_dev *gspca_dev,
@@ -7061,7 +6899,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
struct sd *sd = (struct sd *) gspca_dev;
memset(jcomp, 0, sizeof *jcomp);
- jcomp->quality = sd->ctrls[QUALITY].val;
+ jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual);
jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT
| V4L2_JPEG_MARKER_DQT;
return 0;
@@ -7085,14 +6923,13 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
static const struct sd_desc sd_desc = {
.name = KBUILD_MODNAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
+ .isoc_init = sd_pre_start,
.start = sd_start,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
- .querymenu = sd_querymenu,
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
@@ -7176,6 +7013,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/hdpvr/hdpvr-control.c b/drivers/media/video/hdpvr/hdpvr-control.c
index 068df4ba3f51..ae8f229d1141 100644
--- a/drivers/media/video/hdpvr/hdpvr-control.c
+++ b/drivers/media/video/hdpvr/hdpvr-control.c
@@ -113,6 +113,8 @@ int get_input_lines_info(struct hdpvr_device *dev)
"get input lines info returned: %d, %s\n", ret,
print_buf);
}
+#else
+ (void)ret; /* suppress compiler warning */
#endif
lines = dev->usbc_buf[1] << 8 | dev->usbc_buf[0];
mutex_unlock(&dev->usbc_mutex);
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index 11ffe9cc1780..0e9e156bb2aa 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -994,7 +994,7 @@ static int hdpvr_try_ctrl(struct v4l2_ext_control *ctrl, int ac3)
default:
return -EINVAL;
}
- return 0;
+ return ret;
}
static int vidioc_try_ext_ctrls(struct file *file, void *priv,
diff --git a/drivers/media/video/hexium_gemini.c b/drivers/media/video/hexium_gemini.c
index a62322d5c0d8..366434f5647e 100644
--- a/drivers/media/video/hexium_gemini.c
+++ b/drivers/media/video/hexium_gemini.c
@@ -40,15 +40,15 @@ static int hexium_num;
#define HEXIUM_INPUTS 9
static struct v4l2_input hexium_inputs[HEXIUM_INPUTS] = {
- { 0, "CVBS 1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 1, "CVBS 2", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 2, "CVBS 3", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 3, "CVBS 4", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 4, "CVBS 5", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 5, "CVBS 6", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 6, "Y/C 1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 7, "Y/C 2", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 8, "Y/C 3", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
+ { 0, "CVBS 1", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 1, "CVBS 2", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 2, "CVBS 3", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 3, "CVBS 4", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 4, "CVBS 5", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 5, "CVBS 6", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 6, "Y/C 1", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 7, "Y/C 2", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 8, "Y/C 3", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
};
#define HEXIUM_AUDIOS 0
@@ -59,11 +59,6 @@ struct hexium_data
u8 byte;
};
-#define HEXIUM_CONTROLS 1
-static struct v4l2_queryctrl hexium_controls[] = {
- { V4L2_CID_PRIVATE_BASE, V4L2_CTRL_TYPE_BOOLEAN, "B/W", 0, 1, 1, 0, 0 },
-};
-
#define HEXIUM_GEMINI_V_1_0 1
#define HEXIUM_GEMINI_DUAL_V_1_0 2
@@ -76,7 +71,6 @@ struct hexium
int cur_input; /* current input */
v4l2_std_id cur_std; /* current standard */
- int cur_bw; /* current black/white status */
};
/* Samsung KS0127B decoder default registers */
@@ -119,18 +113,10 @@ static struct hexium_data hexium_pal[] = {
{ 0x01, 0x52 }, { 0x12, 0x64 }, { 0x2D, 0x2C }, { 0x2E, 0x9B }, { -1 , 0xFF }
};
-static struct hexium_data hexium_pal_bw[] = {
- { 0x01, 0x52 }, { 0x12, 0x64 }, { 0x2D, 0x2C }, { 0x2E, 0x9B }, { -1 , 0xFF }
-};
-
static struct hexium_data hexium_ntsc[] = {
{ 0x01, 0x53 }, { 0x12, 0x04 }, { 0x2D, 0x23 }, { 0x2E, 0x81 }, { -1 , 0xFF }
};
-static struct hexium_data hexium_ntsc_bw[] = {
- { 0x01, 0x53 }, { 0x12, 0x04 }, { 0x2D, 0x23 }, { 0x2E, 0x81 }, { -1 , 0xFF }
-};
-
static struct hexium_data hexium_secam[] = {
{ 0x01, 0x52 }, { 0x12, 0x64 }, { 0x2D, 0x2C }, { 0x2E, 0x9B }, { -1 , 0xFF }
};
@@ -264,93 +250,6 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
return 0;
}
-/* the saa7146 provides some controls (brightness, contrast, saturation)
- which gets registered *after* this function. because of this we have
- to return with a value != 0 even if the function succeeded.. */
-static int vidioc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qc)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- int i;
-
- for (i = HEXIUM_CONTROLS - 1; i >= 0; i--) {
- if (hexium_controls[i].id == qc->id) {
- *qc = hexium_controls[i];
- DEB_D("VIDIOC_QUERYCTRL %d\n", qc->id);
- return 0;
- }
- }
- return dev->ext_vv_data->core_ops->vidioc_queryctrl(file, fh, qc);
-}
-
-static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *vc)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- struct hexium *hexium = (struct hexium *) dev->ext_priv;
- int i;
-
- for (i = HEXIUM_CONTROLS - 1; i >= 0; i--) {
- if (hexium_controls[i].id == vc->id)
- break;
- }
-
- if (i < 0)
- return dev->ext_vv_data->core_ops->vidioc_g_ctrl(file, fh, vc);
-
- if (vc->id == V4L2_CID_PRIVATE_BASE) {
- vc->value = hexium->cur_bw;
- DEB_D("VIDIOC_G_CTRL BW:%d\n", vc->value);
- return 0;
- }
- return -EINVAL;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *vc)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- struct hexium *hexium = (struct hexium *) dev->ext_priv;
- int i = 0;
-
- for (i = HEXIUM_CONTROLS - 1; i >= 0; i--) {
- if (hexium_controls[i].id == vc->id)
- break;
- }
-
- if (i < 0)
- return dev->ext_vv_data->core_ops->vidioc_s_ctrl(file, fh, vc);
-
- if (vc->id == V4L2_CID_PRIVATE_BASE)
- hexium->cur_bw = vc->value;
-
- DEB_D("VIDIOC_S_CTRL BW:%d\n", hexium->cur_bw);
-
- if (0 == hexium->cur_bw && V4L2_STD_PAL == hexium->cur_std) {
- hexium_set_standard(hexium, hexium_pal);
- return 0;
- }
- if (0 == hexium->cur_bw && V4L2_STD_NTSC == hexium->cur_std) {
- hexium_set_standard(hexium, hexium_ntsc);
- return 0;
- }
- if (0 == hexium->cur_bw && V4L2_STD_SECAM == hexium->cur_std) {
- hexium_set_standard(hexium, hexium_secam);
- return 0;
- }
- if (1 == hexium->cur_bw && V4L2_STD_PAL == hexium->cur_std) {
- hexium_set_standard(hexium, hexium_pal_bw);
- return 0;
- }
- if (1 == hexium->cur_bw && V4L2_STD_NTSC == hexium->cur_std) {
- hexium_set_standard(hexium, hexium_ntsc_bw);
- return 0;
- }
- if (1 == hexium->cur_bw && V4L2_STD_SECAM == hexium->cur_std)
- /* fixme: is there no bw secam mode? */
- return -EINVAL;
-
- return -EINVAL;
-}
-
-
static struct saa7146_ext_vv vv_data;
/* this function only gets called when the probing was successful */
@@ -399,12 +298,10 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
hexium->cur_input = 0;
saa7146_vv_init(dev, &vv_data);
- vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
- vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
- vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
- vv_data.ops.vidioc_g_input = vidioc_g_input;
- vv_data.ops.vidioc_s_input = vidioc_s_input;
+
+ vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
+ vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
+ vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER);
if (ret < 0) {
pr_err("cannot register capture v4l2 device. skipping.\n");
diff --git a/drivers/media/video/hexium_orion.c b/drivers/media/video/hexium_orion.c
index 23debc967d94..a1eb26d11070 100644
--- a/drivers/media/video/hexium_orion.c
+++ b/drivers/media/video/hexium_orion.c
@@ -41,15 +41,15 @@ static int hexium_num;
#define HEXIUM_INPUTS 9
static struct v4l2_input hexium_inputs[HEXIUM_INPUTS] = {
- { 0, "CVBS 1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 1, "CVBS 2", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 2, "CVBS 3", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 3, "CVBS 4", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 4, "CVBS 5", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 5, "CVBS 6", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 6, "Y/C 1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 7, "Y/C 2", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 8, "Y/C 3", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
+ { 0, "CVBS 1", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 1, "CVBS 2", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 2, "CVBS 3", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 3, "CVBS 4", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 4, "CVBS 5", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 5, "CVBS 6", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 6, "Y/C 1", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 7, "Y/C 2", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 8, "Y/C 3", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
};
#define HEXIUM_AUDIOS 0
@@ -371,9 +371,9 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
DEB_EE("\n");
saa7146_vv_init(dev, &vv_data);
- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
- vv_data.ops.vidioc_g_input = vidioc_g_input;
- vv_data.ops.vidioc_s_input = vidioc_s_input;
+ vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
+ vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
+ vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_GRABBER)) {
pr_err("cannot register capture v4l2 device. skipping.\n");
return -1;
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 679262ed13bc..057929e165ab 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -1201,9 +1201,9 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
struct v4l2_ctrl_handler *hdl = itv->v4l2_dev.ctrl_handler;
itv->ctrl_pts = v4l2_ctrl_new_std(hdl, &ivtv_hdl_out_ops,
- V4L2_CID_MPEG_VIDEO_DEC_PTS, 0, 0, 1, 0);
+ V4L2_CID_MPEG_VIDEO_DEC_PTS, 0, 0, 0, 0);
itv->ctrl_frame = v4l2_ctrl_new_std(hdl, &ivtv_hdl_out_ops,
- V4L2_CID_MPEG_VIDEO_DEC_FRAME, 0, 0x7fffffff, 1, 0);
+ V4L2_CID_MPEG_VIDEO_DEC_FRAME, 0, 0, 0, 0);
/* Note: V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO is not supported,
mask that menu item. */
itv->ctrl_audio_playback =
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index c9663e885b9f..9ff69b5a87e2 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -746,8 +746,9 @@ unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
return res;
}
-unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
+unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait)
{
+ unsigned long req_events = poll_requested_events(wait);
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
@@ -755,7 +756,8 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
unsigned res = 0;
/* Start a capture if there is none */
- if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
+ if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags) &&
+ (req_events & (POLLIN | POLLRDNORM))) {
int rc;
rc = ivtv_start_capture(id);
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 989e556913ed..f7d57b3f2842 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -135,7 +135,6 @@ void ivtv_set_osd_alpha(struct ivtv *itv)
int ivtv_set_speed(struct ivtv *itv, int speed)
{
u32 data[CX2341X_MBOX_MAX_DATA];
- struct ivtv_stream *s;
int single_step = (speed == 1 || speed == -1);
DEFINE_WAIT(wait);
@@ -145,8 +144,6 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
if (speed == itv->speed && !single_step)
return 0;
- s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
-
if (single_step && (speed < 0) == (itv->speed < 0)) {
/* Single step video and no need to change direction */
ivtv_vapi(itv, CX2341X_DEC_STEP_VIDEO, 1, 0);
@@ -1468,8 +1465,9 @@ static int ivtv_subscribe_event(struct v4l2_fh *fh, struct v4l2_event_subscripti
switch (sub->type) {
case V4L2_EVENT_VSYNC:
case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
case V4L2_EVENT_CTRL:
- return v4l2_event_subscribe(fh, sub, 0);
+ return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops);
default:
return -EINVAL;
}
@@ -1827,7 +1825,7 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio,
return ivtv_decoder_ioctls(file, cmd, (void *)arg);
default:
- return -EINVAL;
+ return -ENOTTY;
}
return 0;
}
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 7ea5ca7f012b..6738592aa35d 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -228,6 +228,10 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
s->vdev->release = video_device_release;
s->vdev->tvnorms = V4L2_STD_ALL;
s->vdev->lock = &itv->serialize_lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &s->vdev->flags);
set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev->flags);
ivtv_set_funcs(s->vdev);
return 0;
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index e5e7fa9e737b..05b94aa8ba32 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -1293,6 +1293,7 @@ static int __init ivtvfb_init(void)
drv = driver_find("ivtv", &pci_bus_type);
err = driver_for_each_device(drv, NULL, &registered, ivtvfb_callback_init);
+ (void)err; /* suppress compiler warning */
if (!registered) {
printk(KERN_ERR "ivtvfb: no cards found\n");
return -ENODEV;
@@ -1309,6 +1310,7 @@ static void ivtvfb_cleanup(void)
drv = driver_find("ivtv", &pci_bus_type);
err = driver_for_each_device(drv, NULL, NULL, ivtvfb_callback_cleanup);
+ (void)err; /* suppress compiler warning */
}
module_init(ivtvfb_init);
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
index 4b021e1ee5f2..bb589917b65b 100644
--- a/drivers/media/video/m5mols/m5mols.h
+++ b/drivers/media/video/m5mols/m5mols.h
@@ -21,11 +21,6 @@
extern int m5mols_debug;
-#define to_m5mols(__sd) container_of(__sd, struct m5mols_info, sd)
-
-#define to_sd(__ctrl) \
- (&container_of(__ctrl->handler, struct m5mols_info, handle)->sd)
-
enum m5mols_restype {
M5MOLS_RESTYPE_MONITOR,
M5MOLS_RESTYPE_CAPTURE,
@@ -163,21 +158,27 @@ struct m5mols_version {
* @ffmt: current fmt according to resolution type
* @res_type: current resolution type
* @irq_waitq: waitqueue for the capture
- * @flags: state variable for the interrupt handler
+ * @irq_done: set to 1 in the interrupt handler
* @handle: control handler
- * @autoexposure: Auto Exposure control
- * @exposure: Exposure control
- * @autowb: Auto White Balance control
- * @colorfx: Color effect control
- * @saturation: Saturation control
- * @zoom: Zoom control
+ * @auto_exposure: auto/manual exposure control
+ * @exposure_bias: exposure compensation control
+ * @exposure: manual exposure control
+ * @metering: exposure metering control
+ * @auto_iso: auto/manual ISO sensitivity control
+ * @iso: manual ISO sensitivity control
+ * @auto_wb: auto white balance control
+ * @lock_3a: 3A lock control
+ * @colorfx: color effect control
+ * @saturation: saturation control
+ * @zoom: zoom control
+ * @wdr: wide dynamic range control
+ * @stabilization: image stabilization control
+ * @jpeg_quality: JPEG compression quality control
* @ver: information of the version
* @cap: the capture mode attributes
- * @power: current sensor's power status
* @isp_ready: 1 when the ISP controller has completed booting
+ * @power: current sensor's power status
* @ctrl_sync: 1 when the control handler state is restored in H/W
- * @lock_ae: true means the Auto Exposure is locked
- * @lock_awb: true means the Aut WhiteBalance is locked
* @resolution: register value for current resolution
* @mode: register value for current operation mode
* @set_power: optional power callback to the board code
@@ -193,15 +194,27 @@ struct m5mols_info {
atomic_t irq_done;
struct v4l2_ctrl_handler handle;
+ struct {
+ /* exposure/exposure bias/auto exposure cluster */
+ struct v4l2_ctrl *auto_exposure;
+ struct v4l2_ctrl *exposure_bias;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *metering;
+ };
+ struct {
+ /* iso/auto iso cluster */
+ struct v4l2_ctrl *auto_iso;
+ struct v4l2_ctrl *iso;
+ };
+ struct v4l2_ctrl *auto_wb;
- /* Autoexposure/exposure control cluster */
- struct v4l2_ctrl *autoexposure;
- struct v4l2_ctrl *exposure;
-
- struct v4l2_ctrl *autowb;
+ struct v4l2_ctrl *lock_3a;
struct v4l2_ctrl *colorfx;
struct v4l2_ctrl *saturation;
struct v4l2_ctrl *zoom;
+ struct v4l2_ctrl *wdr;
+ struct v4l2_ctrl *stabilization;
+ struct v4l2_ctrl *jpeg_quality;
struct m5mols_version ver;
struct m5mols_capture cap;
@@ -210,8 +223,6 @@ struct m5mols_info {
unsigned int power:1;
unsigned int ctrl_sync:1;
- bool lock_ae;
- bool lock_awb;
u8 resolution;
u8 mode;
@@ -282,7 +293,7 @@ int m5mols_busy_wait(struct v4l2_subdev *sd, u32 reg, u32 value, u32 mask,
* The available executing order between each modes are as follows:
* PARAMETER <---> MONITOR <---> CAPTURE
*/
-int m5mols_mode(struct m5mols_info *info, u8 mode);
+int m5mols_set_mode(struct m5mols_info *info, u8 mode);
int m5mols_enable_interrupt(struct v4l2_subdev *sd, u8 reg);
int m5mols_wait_interrupt(struct v4l2_subdev *sd, u8 condition, u32 timeout);
@@ -291,9 +302,33 @@ int m5mols_start_capture(struct m5mols_info *info);
int m5mols_do_scenemode(struct m5mols_info *info, u8 mode);
int m5mols_lock_3a(struct m5mols_info *info, bool lock);
int m5mols_set_ctrl(struct v4l2_ctrl *ctrl);
+int m5mols_init_controls(struct v4l2_subdev *sd);
/* The firmware function */
int m5mols_update_fw(struct v4l2_subdev *sd,
int (*set_power)(struct m5mols_info *, bool));
+static inline struct m5mols_info *to_m5mols(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct m5mols_info, sd);
+}
+
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ struct m5mols_info *info = container_of(ctrl->handler,
+ struct m5mols_info, handle);
+ return &info->sd;
+}
+
+static inline void m5mols_set_ctrl_mode(struct v4l2_ctrl *ctrl,
+ unsigned int mode)
+{
+ ctrl->priv = (void *)mode;
+}
+
+static inline unsigned int m5mols_get_ctrl_mode(struct v4l2_ctrl *ctrl)
+{
+ return (unsigned int)ctrl->priv;
+}
+
#endif /* M5MOLS_H */
diff --git a/drivers/media/video/m5mols/m5mols_capture.c b/drivers/media/video/m5mols/m5mols_capture.c
index ba25e8e2ba4c..cb243bd278ce 100644
--- a/drivers/media/video/m5mols/m5mols_capture.c
+++ b/drivers/media/video/m5mols/m5mols_capture.c
@@ -106,7 +106,6 @@ static int m5mols_capture_info(struct m5mols_info *info)
int m5mols_start_capture(struct m5mols_info *info)
{
struct v4l2_subdev *sd = &info->sd;
- u8 resolution = info->resolution;
int ret;
/*
@@ -114,22 +113,18 @@ int m5mols_start_capture(struct m5mols_info *info)
* format. The frame capture is initiated during switching from Monitor
* to Capture mode.
*/
- ret = m5mols_mode(info, REG_MONITOR);
+ ret = m5mols_set_mode(info, REG_MONITOR);
if (!ret)
ret = m5mols_restore_controls(info);
if (!ret)
ret = m5mols_write(sd, CAPP_YUVOUT_MAIN, REG_JPEG);
if (!ret)
- ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, resolution);
+ ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, info->resolution);
if (!ret)
- ret = m5mols_lock_3a(info, true);
- if (!ret)
- ret = m5mols_mode(info, REG_CAPTURE);
+ ret = m5mols_set_mode(info, REG_CAPTURE);
if (!ret)
/* Wait until a frame is captured to ISP internal memory */
ret = m5mols_wait_interrupt(sd, REG_INT_CAPTURE, 2000);
- if (!ret)
- ret = m5mols_lock_3a(info, false);
if (ret)
return ret;
diff --git a/drivers/media/video/m5mols/m5mols_controls.c b/drivers/media/video/m5mols/m5mols_controls.c
index d135d20d09cf..392a028730e2 100644
--- a/drivers/media/video/m5mols/m5mols_controls.c
+++ b/drivers/media/video/m5mols/m5mols_controls.c
@@ -139,7 +139,7 @@ int m5mols_do_scenemode(struct m5mols_info *info, u8 mode)
if (mode > REG_SCENE_CANDLE)
return -EINVAL;
- ret = m5mols_lock_3a(info, false);
+ ret = v4l2_ctrl_s_ctrl(info->lock_3a, 0);
if (!ret)
ret = m5mols_write(sd, AE_EV_PRESET_MONITOR, mode);
if (!ret)
@@ -169,7 +169,7 @@ int m5mols_do_scenemode(struct m5mols_info *info, u8 mode)
if (!ret)
ret = m5mols_write(sd, AE_ISO, scenemode.iso);
if (!ret)
- ret = m5mols_mode(info, REG_CAPTURE);
+ ret = m5mols_set_mode(info, REG_CAPTURE);
if (!ret)
ret = m5mols_write(sd, CAPP_WDR_EN, scenemode.wdr);
if (!ret)
@@ -181,119 +181,448 @@ int m5mols_do_scenemode(struct m5mols_info *info, u8 mode)
if (!ret)
ret = m5mols_write(sd, CAPC_MODE, scenemode.capt_mode);
if (!ret)
- ret = m5mols_mode(info, REG_MONITOR);
+ ret = m5mols_set_mode(info, REG_MONITOR);
return ret;
}
-static int m5mols_lock_ae(struct m5mols_info *info, bool lock)
+static int m5mols_3a_lock(struct m5mols_info *info, struct v4l2_ctrl *ctrl)
{
+ bool af_lock = ctrl->val & V4L2_LOCK_FOCUS;
int ret = 0;
- if (info->lock_ae != lock)
- ret = m5mols_write(&info->sd, AE_LOCK,
- lock ? REG_AE_LOCK : REG_AE_UNLOCK);
- if (!ret)
- info->lock_ae = lock;
+ if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_EXPOSURE) {
+ bool ae_lock = ctrl->val & V4L2_LOCK_EXPOSURE;
+
+ ret = m5mols_write(&info->sd, AE_LOCK, ae_lock ?
+ REG_AE_LOCK : REG_AE_UNLOCK);
+ if (ret)
+ return ret;
+ }
+
+ if (((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_WHITE_BALANCE)
+ && info->auto_wb->val) {
+ bool awb_lock = ctrl->val & V4L2_LOCK_WHITE_BALANCE;
+
+ ret = m5mols_write(&info->sd, AWB_LOCK, awb_lock ?
+ REG_AWB_LOCK : REG_AWB_UNLOCK);
+ if (ret)
+ return ret;
+ }
+
+ if (!info->ver.af || !af_lock)
+ return ret;
+
+ if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_FOCUS)
+ ret = m5mols_write(&info->sd, AF_EXECUTE, REG_AF_STOP);
+
+ return ret;
+}
+
+static int m5mols_set_metering_mode(struct m5mols_info *info, int mode)
+{
+ unsigned int metering;
+
+ switch (mode) {
+ case V4L2_EXPOSURE_METERING_CENTER_WEIGHTED:
+ metering = REG_AE_CENTER;
+ break;
+ case V4L2_EXPOSURE_METERING_SPOT:
+ metering = REG_AE_SPOT;
+ break;
+ default:
+ metering = REG_AE_ALL;
+ break;
+ }
+
+ return m5mols_write(&info->sd, AE_MODE, metering);
+}
+
+static int m5mols_set_exposure(struct m5mols_info *info, int exposure)
+{
+ struct v4l2_subdev *sd = &info->sd;
+ int ret = 0;
+
+ if (exposure == V4L2_EXPOSURE_AUTO) {
+ /* Unlock auto exposure */
+ info->lock_3a->val &= ~V4L2_LOCK_EXPOSURE;
+ m5mols_3a_lock(info, info->lock_3a);
+
+ ret = m5mols_set_metering_mode(info, info->metering->val);
+ if (ret < 0)
+ return ret;
+
+ v4l2_dbg(1, m5mols_debug, sd,
+ "%s: exposure bias: %#x, metering: %#x\n",
+ __func__, info->exposure_bias->val,
+ info->metering->val);
+
+ return m5mols_write(sd, AE_INDEX, info->exposure_bias->val);
+ }
+
+ if (exposure == V4L2_EXPOSURE_MANUAL) {
+ ret = m5mols_write(sd, AE_MODE, REG_AE_OFF);
+ if (ret == 0)
+ ret = m5mols_write(sd, AE_MAN_GAIN_MON,
+ info->exposure->val);
+ if (ret == 0)
+ ret = m5mols_write(sd, AE_MAN_GAIN_CAP,
+ info->exposure->val);
+
+ v4l2_dbg(1, m5mols_debug, sd, "%s: exposure: %#x\n",
+ __func__, info->exposure->val);
+ }
+
+ return ret;
+}
+
+static int m5mols_set_white_balance(struct m5mols_info *info, int val)
+{
+ static const unsigned short wb[][2] = {
+ { V4L2_WHITE_BALANCE_INCANDESCENT, REG_AWB_INCANDESCENT },
+ { V4L2_WHITE_BALANCE_FLUORESCENT, REG_AWB_FLUORESCENT_1 },
+ { V4L2_WHITE_BALANCE_FLUORESCENT_H, REG_AWB_FLUORESCENT_2 },
+ { V4L2_WHITE_BALANCE_HORIZON, REG_AWB_HORIZON },
+ { V4L2_WHITE_BALANCE_DAYLIGHT, REG_AWB_DAYLIGHT },
+ { V4L2_WHITE_BALANCE_FLASH, REG_AWB_LEDLIGHT },
+ { V4L2_WHITE_BALANCE_CLOUDY, REG_AWB_CLOUDY },
+ { V4L2_WHITE_BALANCE_SHADE, REG_AWB_SHADE },
+ { V4L2_WHITE_BALANCE_AUTO, REG_AWB_AUTO },
+ };
+ int i;
+ struct v4l2_subdev *sd = &info->sd;
+ int ret = -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(wb); i++) {
+ int awb;
+ if (wb[i][0] != val)
+ continue;
+
+ v4l2_dbg(1, m5mols_debug, sd,
+ "Setting white balance to: %#x\n", wb[i][0]);
+
+ awb = wb[i][0] == V4L2_WHITE_BALANCE_AUTO;
+ ret = m5mols_write(sd, AWB_MODE, awb ? REG_AWB_AUTO :
+ REG_AWB_PRESET);
+ if (ret < 0)
+ return ret;
+
+ if (!awb)
+ ret = m5mols_write(sd, AWB_MANUAL, wb[i][1]);
+ }
return ret;
}
-static int m5mols_lock_awb(struct m5mols_info *info, bool lock)
+static int m5mols_set_saturation(struct m5mols_info *info, int val)
+{
+ int ret = m5mols_write(&info->sd, MON_CHROMA_LVL, val);
+ if (ret < 0)
+ return ret;
+
+ return m5mols_write(&info->sd, MON_CHROMA_EN, REG_CHROMA_ON);
+}
+
+static int m5mols_set_color_effect(struct m5mols_info *info, int val)
{
+ unsigned int m_effect = REG_COLOR_EFFECT_OFF;
+ unsigned int p_effect = REG_EFFECT_OFF;
+ unsigned int cfix_r = 0, cfix_b = 0;
+ struct v4l2_subdev *sd = &info->sd;
int ret = 0;
- if (info->lock_awb != lock)
- ret = m5mols_write(&info->sd, AWB_LOCK,
- lock ? REG_AWB_LOCK : REG_AWB_UNLOCK);
+ switch (val) {
+ case V4L2_COLORFX_BW:
+ m_effect = REG_COLOR_EFFECT_ON;
+ break;
+ case V4L2_COLORFX_NEGATIVE:
+ p_effect = REG_EFFECT_NEGA;
+ break;
+ case V4L2_COLORFX_EMBOSS:
+ p_effect = REG_EFFECT_EMBOSS;
+ break;
+ case V4L2_COLORFX_SEPIA:
+ m_effect = REG_COLOR_EFFECT_ON;
+ cfix_r = REG_CFIXR_SEPIA;
+ cfix_b = REG_CFIXB_SEPIA;
+ break;
+ }
+
+ ret = m5mols_write(sd, PARM_EFFECT, p_effect);
if (!ret)
- info->lock_awb = lock;
+ ret = m5mols_write(sd, MON_EFFECT, m_effect);
+
+ if (ret == 0 && m_effect == REG_COLOR_EFFECT_ON) {
+ ret = m5mols_write(sd, MON_CFIXR, cfix_r);
+ if (!ret)
+ ret = m5mols_write(sd, MON_CFIXB, cfix_b);
+ }
+
+ v4l2_dbg(1, m5mols_debug, sd,
+ "p_effect: %#x, m_effect: %#x, r: %#x, b: %#x (%d)\n",
+ p_effect, m_effect, cfix_r, cfix_b, ret);
return ret;
}
-/* m5mols_lock_3a() - Lock 3A(Auto Exposure, Auto Whitebalance, Auto Focus) */
-int m5mols_lock_3a(struct m5mols_info *info, bool lock)
+static int m5mols_set_iso(struct m5mols_info *info, int auto_iso)
+{
+ u32 iso = auto_iso ? 0 : info->iso->val + 1;
+
+ return m5mols_write(&info->sd, AE_ISO, iso);
+}
+
+static int m5mols_set_wdr(struct m5mols_info *info, int wdr)
{
int ret;
- ret = m5mols_lock_ae(info, lock);
- if (!ret)
- ret = m5mols_lock_awb(info, lock);
- /* Don't need to handle unlocking AF */
- if (!ret && is_available_af(info) && lock)
- ret = m5mols_write(&info->sd, AF_EXECUTE, REG_AF_STOP);
+ ret = m5mols_write(&info->sd, MON_TONE_CTL, wdr ? 9 : 5);
+ if (ret < 0)
+ return ret;
+
+ ret = m5mols_set_mode(info, REG_CAPTURE);
+ if (ret < 0)
+ return ret;
+
+ return m5mols_write(&info->sd, CAPP_WDR_EN, wdr);
+}
+
+static int m5mols_set_stabilization(struct m5mols_info *info, int val)
+{
+ struct v4l2_subdev *sd = &info->sd;
+ unsigned int evp = val ? 0xe : 0x0;
+ int ret;
+
+ ret = m5mols_write(sd, AE_EV_PRESET_MONITOR, evp);
+ if (ret < 0)
+ return ret;
+
+ return m5mols_write(sd, AE_EV_PRESET_CAPTURE, evp);
+}
+
+static int m5mols_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = to_sd(ctrl);
+ struct m5mols_info *info = to_m5mols(sd);
+ int ret = 0;
+ u8 status;
+
+ v4l2_dbg(1, m5mols_debug, sd, "%s: ctrl: %s (%d)\n",
+ __func__, ctrl->name, info->isp_ready);
+
+ if (!info->isp_ready)
+ return -EBUSY;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ ret = m5mols_read_u8(sd, AE_ISO, &status);
+ if (ret == 0)
+ ctrl->val = !status;
+ if (status != REG_ISO_AUTO)
+ info->iso->val = status - 1;
+ break;
+
+ case V4L2_CID_3A_LOCK:
+ ctrl->val &= ~0x7;
+
+ ret = m5mols_read_u8(sd, AE_LOCK, &status);
+ if (ret)
+ return ret;
+ if (status)
+ info->lock_3a->val |= V4L2_LOCK_EXPOSURE;
+
+ ret = m5mols_read_u8(sd, AWB_LOCK, &status);
+ if (ret)
+ return ret;
+ if (status)
+ info->lock_3a->val |= V4L2_LOCK_EXPOSURE;
+
+ ret = m5mols_read_u8(sd, AF_EXECUTE, &status);
+ if (!status)
+ info->lock_3a->val |= V4L2_LOCK_EXPOSURE;
+ break;
+ }
return ret;
}
-/* m5mols_set_ctrl() - The main s_ctrl function called by m5mols_set_ctrl() */
-int m5mols_set_ctrl(struct v4l2_ctrl *ctrl)
+static int m5mols_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ unsigned int ctrl_mode = m5mols_get_ctrl_mode(ctrl);
struct v4l2_subdev *sd = to_sd(ctrl);
struct m5mols_info *info = to_m5mols(sd);
- int ret;
+ int last_mode = info->mode;
+ int ret = 0;
+
+ /*
+ * If needed, defer restoring the controls until
+ * the device is fully initialized.
+ */
+ if (!info->isp_ready) {
+ info->ctrl_sync = 0;
+ return 0;
+ }
+
+ v4l2_dbg(1, m5mols_debug, sd, "%s: %s, val: %d, priv: %#x\n",
+ __func__, ctrl->name, ctrl->val, (int)ctrl->priv);
+
+ if (ctrl_mode && ctrl_mode != info->mode) {
+ ret = m5mols_set_mode(info, ctrl_mode);
+ if (ret < 0)
+ return ret;
+ }
switch (ctrl->id) {
+ case V4L2_CID_3A_LOCK:
+ ret = m5mols_3a_lock(info, ctrl);
+ break;
+
case V4L2_CID_ZOOM_ABSOLUTE:
- return m5mols_write(sd, MON_ZOOM, ctrl->val);
+ ret = m5mols_write(sd, MON_ZOOM, ctrl->val);
+ break;
case V4L2_CID_EXPOSURE_AUTO:
- ret = m5mols_lock_ae(info,
- ctrl->val == V4L2_EXPOSURE_AUTO ? false : true);
- if (!ret && ctrl->val == V4L2_EXPOSURE_AUTO)
- ret = m5mols_write(sd, AE_MODE, REG_AE_ALL);
- if (!ret && ctrl->val == V4L2_EXPOSURE_MANUAL) {
- int val = info->exposure->val;
- ret = m5mols_write(sd, AE_MODE, REG_AE_OFF);
- if (!ret)
- ret = m5mols_write(sd, AE_MAN_GAIN_MON, val);
- if (!ret)
- ret = m5mols_write(sd, AE_MAN_GAIN_CAP, val);
- }
- return ret;
+ ret = m5mols_set_exposure(info, ctrl->val);
+ break;
- case V4L2_CID_AUTO_WHITE_BALANCE:
- ret = m5mols_lock_awb(info, ctrl->val ? false : true);
- if (!ret)
- ret = m5mols_write(sd, AWB_MODE, ctrl->val ?
- REG_AWB_AUTO : REG_AWB_PRESET);
- return ret;
+ case V4L2_CID_ISO_SENSITIVITY:
+ ret = m5mols_set_iso(info, ctrl->val);
+ break;
+
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ ret = m5mols_set_white_balance(info, ctrl->val);
+ break;
case V4L2_CID_SATURATION:
- ret = m5mols_write(sd, MON_CHROMA_LVL, ctrl->val);
- if (!ret)
- ret = m5mols_write(sd, MON_CHROMA_EN, REG_CHROMA_ON);
- return ret;
+ ret = m5mols_set_saturation(info, ctrl->val);
+ break;
case V4L2_CID_COLORFX:
- /*
- * This control uses two kinds of registers: normal & color.
- * The normal effect belongs to category 1, while the color
- * one belongs to category 2.
- *
- * The normal effect uses one register: CAT1_EFFECT.
- * The color effect uses three registers:
- * CAT2_COLOR_EFFECT, CAT2_CFIXR, CAT2_CFIXB.
- */
- ret = m5mols_write(sd, PARM_EFFECT,
- ctrl->val == V4L2_COLORFX_NEGATIVE ? REG_EFFECT_NEGA :
- ctrl->val == V4L2_COLORFX_EMBOSS ? REG_EFFECT_EMBOSS :
- REG_EFFECT_OFF);
- if (!ret)
- ret = m5mols_write(sd, MON_EFFECT,
- ctrl->val == V4L2_COLORFX_SEPIA ?
- REG_COLOR_EFFECT_ON : REG_COLOR_EFFECT_OFF);
- if (!ret)
- ret = m5mols_write(sd, MON_CFIXR,
- ctrl->val == V4L2_COLORFX_SEPIA ?
- REG_CFIXR_SEPIA : 0);
- if (!ret)
- ret = m5mols_write(sd, MON_CFIXB,
- ctrl->val == V4L2_COLORFX_SEPIA ?
- REG_CFIXB_SEPIA : 0);
+ ret = m5mols_set_color_effect(info, ctrl->val);
+ break;
+
+ case V4L2_CID_WIDE_DYNAMIC_RANGE:
+ ret = m5mols_set_wdr(info, ctrl->val);
+ break;
+
+ case V4L2_CID_IMAGE_STABILIZATION:
+ ret = m5mols_set_stabilization(info, ctrl->val);
+ break;
+
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ ret = m5mols_write(sd, CAPP_JPEG_RATIO, ctrl->val);
+ break;
+ }
+
+ if (ret == 0 && info->mode != last_mode)
+ ret = m5mols_set_mode(info, last_mode);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops m5mols_ctrl_ops = {
+ .g_volatile_ctrl = m5mols_g_volatile_ctrl,
+ .s_ctrl = m5mols_s_ctrl,
+};
+
+/* Supported manual ISO values */
+static const s64 iso_qmenu[] = {
+ /* AE_ISO: 0x01...0x07 */
+ 50, 100, 200, 400, 800, 1600, 3200
+};
+
+/* Supported Exposure Bias values, -2.0EV...+2.0EV */
+static const s64 ev_bias_qmenu[] = {
+ /* AE_INDEX: 0x00...0x08 */
+ -2000, -1500, -1000, -500, 0, 500, 1000, 1500, 2000
+};
+
+int m5mols_init_controls(struct v4l2_subdev *sd)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+ u16 exposure_max;
+ u16 zoom_step;
+ int ret;
+
+ /* Determine the firmware dependant control range and step values */
+ ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &exposure_max);
+ if (ret < 0)
+ return ret;
+
+ zoom_step = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1;
+ v4l2_ctrl_handler_init(&info->handle, 20);
+
+ info->auto_wb = v4l2_ctrl_new_std_menu(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
+ 9, ~0x3fe, V4L2_WHITE_BALANCE_AUTO);
+
+ /* Exposure control cluster */
+ info->auto_exposure = v4l2_ctrl_new_std_menu(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_EXPOSURE_AUTO,
+ 1, ~0x03, V4L2_EXPOSURE_AUTO);
+
+ info->exposure = v4l2_ctrl_new_std(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_EXPOSURE,
+ 0, exposure_max, 1, exposure_max / 2);
+
+ info->exposure_bias = v4l2_ctrl_new_int_menu(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_AUTO_EXPOSURE_BIAS,
+ ARRAY_SIZE(ev_bias_qmenu) - 1,
+ ARRAY_SIZE(ev_bias_qmenu)/2 - 1,
+ ev_bias_qmenu);
+
+ info->metering = v4l2_ctrl_new_std_menu(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_EXPOSURE_METERING,
+ 2, ~0x7, V4L2_EXPOSURE_METERING_AVERAGE);
+
+ /* ISO control cluster */
+ info->auto_iso = v4l2_ctrl_new_std_menu(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_ISO_SENSITIVITY_AUTO, 1, ~0x03, 1);
+
+ info->iso = v4l2_ctrl_new_int_menu(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_ISO_SENSITIVITY, ARRAY_SIZE(iso_qmenu) - 1,
+ ARRAY_SIZE(iso_qmenu)/2 - 1, iso_qmenu);
+
+ info->saturation = v4l2_ctrl_new_std(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_SATURATION, 1, 5, 1, 3);
+
+ info->zoom = v4l2_ctrl_new_std(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_ZOOM_ABSOLUTE, 1, 70, zoom_step, 1);
+
+ info->colorfx = v4l2_ctrl_new_std_menu(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_COLORFX, 4, 0, V4L2_COLORFX_NONE);
+
+ info->wdr = v4l2_ctrl_new_std(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_WIDE_DYNAMIC_RANGE, 0, 1, 1, 0);
+
+ info->stabilization = v4l2_ctrl_new_std(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_IMAGE_STABILIZATION, 0, 1, 1, 0);
+
+ info->jpeg_quality = v4l2_ctrl_new_std(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 1, 100, 1, 80);
+
+ info->lock_3a = v4l2_ctrl_new_std(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_3A_LOCK, 0, 0x7, 0, 0);
+
+ if (info->handle.error) {
+ int ret = info->handle.error;
+ v4l2_err(sd, "Failed to initialize controls: %d\n", ret);
+ v4l2_ctrl_handler_free(&info->handle);
return ret;
}
- return -EINVAL;
+ v4l2_ctrl_auto_cluster(4, &info->auto_exposure, 1, false);
+ info->auto_iso->flags |= V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_UPDATE;
+ v4l2_ctrl_auto_cluster(2, &info->auto_iso, 0, false);
+
+ info->lock_3a->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ m5mols_set_ctrl_mode(info->auto_exposure, REG_PARAMETER);
+ m5mols_set_ctrl_mode(info->auto_wb, REG_PARAMETER);
+ m5mols_set_ctrl_mode(info->colorfx, REG_MONITOR);
+
+ sd->ctrl_handler = &info->handle;
+
+ return 0;
}
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
index d718aee01c77..ac7d28b6ddf2 100644
--- a/drivers/media/video/m5mols/m5mols_core.c
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -362,14 +362,14 @@ static int m5mols_reg_mode(struct v4l2_subdev *sd, u8 mode)
}
/**
- * m5mols_mode - manage the M-5MOLS's mode
+ * m5mols_set_mode - set the M-5MOLS controller mode
* @mode: the required operation mode
*
* The commands of M-5MOLS are grouped into specific modes. Each functionality
- * can be guaranteed only when the sensor is operating in mode which which
- * a command belongs to.
+ * can be guaranteed only when the sensor is operating in mode which a command
+ * belongs to.
*/
-int m5mols_mode(struct m5mols_info *info, u8 mode)
+int m5mols_set_mode(struct m5mols_info *info, u8 mode)
{
struct v4l2_subdev *sd = &info->sd;
int ret = -EINVAL;
@@ -645,13 +645,13 @@ static int m5mols_start_monitor(struct m5mols_info *info)
struct v4l2_subdev *sd = &info->sd;
int ret;
- ret = m5mols_mode(info, REG_PARAMETER);
+ ret = m5mols_set_mode(info, REG_PARAMETER);
if (!ret)
ret = m5mols_write(sd, PARM_MON_SIZE, info->resolution);
if (!ret)
ret = m5mols_write(sd, PARM_MON_FPS, REG_FPS_30);
if (!ret)
- ret = m5mols_mode(info, REG_MONITOR);
+ ret = m5mols_set_mode(info, REG_MONITOR);
if (!ret)
ret = m5mols_restore_controls(info);
@@ -674,42 +674,13 @@ static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
return ret;
}
- return m5mols_mode(info, REG_PARAMETER);
+ return m5mols_set_mode(info, REG_PARAMETER);
}
static const struct v4l2_subdev_video_ops m5mols_video_ops = {
.s_stream = m5mols_s_stream,
};
-static int m5mols_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct v4l2_subdev *sd = to_sd(ctrl);
- struct m5mols_info *info = to_m5mols(sd);
- int ispstate = info->mode;
- int ret;
-
- /*
- * If needed, defer restoring the controls until
- * the device is fully initialized.
- */
- if (!info->isp_ready) {
- info->ctrl_sync = 0;
- return 0;
- }
-
- ret = m5mols_mode(info, REG_PARAMETER);
- if (ret < 0)
- return ret;
- ret = m5mols_set_ctrl(ctrl);
- if (ret < 0)
- return ret;
- return m5mols_mode(info, ispstate);
-}
-
-static const struct v4l2_ctrl_ops m5mols_ctrl_ops = {
- .s_ctrl = m5mols_s_ctrl,
-};
-
static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
{
struct v4l2_subdev *sd = &info->sd;
@@ -802,52 +773,6 @@ static int m5mols_fw_start(struct v4l2_subdev *sd)
return ret;
}
-static int m5mols_init_controls(struct m5mols_info *info)
-{
- struct v4l2_subdev *sd = &info->sd;
- u16 max_exposure;
- u16 step_zoom;
- int ret;
-
- /* Determine value's range & step of controls for various FW version */
- ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &max_exposure);
- if (!ret)
- step_zoom = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1;
- if (ret)
- return ret;
-
- v4l2_ctrl_handler_init(&info->handle, 6);
- info->autowb = v4l2_ctrl_new_std(&info->handle,
- &m5mols_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE,
- 0, 1, 1, 0);
- info->saturation = v4l2_ctrl_new_std(&info->handle,
- &m5mols_ctrl_ops, V4L2_CID_SATURATION,
- 1, 5, 1, 3);
- info->zoom = v4l2_ctrl_new_std(&info->handle,
- &m5mols_ctrl_ops, V4L2_CID_ZOOM_ABSOLUTE,
- 1, 70, step_zoom, 1);
- info->exposure = v4l2_ctrl_new_std(&info->handle,
- &m5mols_ctrl_ops, V4L2_CID_EXPOSURE,
- 0, max_exposure, 1, (int)max_exposure/2);
- info->colorfx = v4l2_ctrl_new_std_menu(&info->handle,
- &m5mols_ctrl_ops, V4L2_CID_COLORFX,
- 4, (1 << V4L2_COLORFX_BW), V4L2_COLORFX_NONE);
- info->autoexposure = v4l2_ctrl_new_std_menu(&info->handle,
- &m5mols_ctrl_ops, V4L2_CID_EXPOSURE_AUTO,
- 1, 0, V4L2_EXPOSURE_AUTO);
-
- sd->ctrl_handler = &info->handle;
- if (info->handle.error) {
- v4l2_err(sd, "Failed to initialize controls: %d\n", ret);
- v4l2_ctrl_handler_free(&info->handle);
- return info->handle.error;
- }
-
- v4l2_ctrl_cluster(2, &info->autoexposure);
-
- return 0;
-}
-
/**
* m5mols_s_power - Main sensor power control function
*
@@ -868,7 +793,7 @@ static int m5mols_s_power(struct v4l2_subdev *sd, int on)
}
if (is_manufacturer(info, REG_SAMSUNG_TECHWIN)) {
- ret = m5mols_mode(info, REG_MONITOR);
+ ret = m5mols_set_mode(info, REG_MONITOR);
if (!ret)
ret = m5mols_write(sd, AF_EXECUTE, REG_AF_STOP);
if (!ret)
@@ -1010,7 +935,7 @@ static int __devinit m5mols_probe(struct i2c_client *client,
ret = m5mols_fw_start(sd);
if (!ret)
- ret = m5mols_init_controls(info);
+ ret = m5mols_init_controls(sd);
m5mols_sensor_power(info, false);
if (!ret)
diff --git a/drivers/media/video/m5mols/m5mols_reg.h b/drivers/media/video/m5mols/m5mols_reg.h
index ae4aced0f9b2..14d4be72aeff 100644
--- a/drivers/media/video/m5mols/m5mols_reg.h
+++ b/drivers/media/video/m5mols/m5mols_reg.h
@@ -310,6 +310,7 @@
#define REG_JPEG 0x10
#define CAPP_MAIN_IMAGE_SIZE I2C_REG(CAT_CAPT_PARM, 0x01, 1)
+#define CAPP_JPEG_RATIO I2C_REG(CAT_CAPT_PARM, 0x17, 1)
#define CAPP_MCC_MODE I2C_REG(CAT_CAPT_PARM, 0x1d, 1)
#define REG_MCC_OFF 0x00
diff --git a/drivers/media/video/marvell-ccic/mcam-core.c b/drivers/media/video/marvell-ccic/mcam-core.c
index 996ac34d9a89..ce2b7b4788d6 100644
--- a/drivers/media/video/marvell-ccic/mcam-core.c
+++ b/drivers/media/video/marvell-ccic/mcam-core.c
@@ -1356,7 +1356,6 @@ static int mcam_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
goto out;
}
mcam_set_config_needed(cam, 1);
- ret = 0;
out:
mutex_unlock(&cam->s_mutex);
return ret;
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index 12897e8a3314..d2dec585e61b 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -40,7 +40,7 @@ MODULE_VERSION("0.1.1");
#define MIN_H 32
#define MAX_W 640
#define MAX_H 480
-#define DIM_ALIGN_MASK 0x08 /* 8-alignment for dimensions */
+#define DIM_ALIGN_MASK 7 /* 8-byte alignment for line length */
/* Flags that indicate a format can be used for capture/output */
#define MEM2MEM_CAPTURE (1 << 0)
@@ -958,6 +958,10 @@ static int m2mtest_probe(struct platform_device *pdev)
}
*vfd = m2mtest_videodev;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
vfd->lock = &dev->dev_mutex;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index b09a3c80a15e..7bc775219f97 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -1570,7 +1570,7 @@ static long vidioc_default(struct file *file, void *fh, bool valid_prio,
return meyeioc_stilljcapt((int *) arg);
default:
- return -EINVAL;
+ return -ENOTTY;
}
}
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index 82ce50721de3..aeb22be7dcbd 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -597,19 +597,23 @@ static int msp_log_status(struct v4l2_subdev *sd)
return 0;
}
-static int msp_suspend(struct i2c_client *client, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int msp_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
v4l_dbg(1, msp_debug, client, "suspend\n");
msp_reset(client);
return 0;
}
-static int msp_resume(struct i2c_client *client)
+static int msp_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
v4l_dbg(1, msp_debug, client, "resume\n");
msp_wake_thread(client);
return 0;
}
+#endif
/* ----------------------------------------------------------------------- */
@@ -863,6 +867,10 @@ static int msp_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
+static const struct dev_pm_ops msp3400_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(msp_suspend, msp_resume)
+};
+
static const struct i2c_device_id msp_id[] = {
{ "msp3400", 0 },
{ }
@@ -873,11 +881,10 @@ static struct i2c_driver msp_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "msp3400",
+ .pm = &msp3400_pm_ops,
},
.probe = msp_probe,
.remove = msp_remove,
- .suspend = msp_suspend,
- .resume = msp_resume,
.id_table = msp_id,
};
diff --git a/drivers/media/video/mt9m032.c b/drivers/media/video/mt9m032.c
index 645973c5feb0..3c1e626139b7 100644
--- a/drivers/media/video/mt9m032.c
+++ b/drivers/media/video/mt9m032.c
@@ -838,9 +838,9 @@ static int mt9m032_remove(struct i2c_client *client)
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct mt9m032 *sensor = to_mt9m032(subdev);
- v4l2_device_unregister_subdev(&sensor->subdev);
+ v4l2_device_unregister_subdev(subdev);
v4l2_ctrl_handler_free(&sensor->ctrls);
- media_entity_cleanup(&sensor->subdev.entity);
+ media_entity_cleanup(&subdev->entity);
mutex_destroy(&sensor->lock);
kfree(sensor);
return 0;
diff --git a/drivers/media/video/mt9p031.c b/drivers/media/video/mt9p031.c
index c81eaf4fbe01..8f061d9ac443 100644
--- a/drivers/media/video/mt9p031.c
+++ b/drivers/media/video/mt9p031.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/log2.h>
@@ -90,7 +91,14 @@
#define MT9P031_GLOBAL_GAIN_MAX 1024
#define MT9P031_GLOBAL_GAIN_DEF 8
#define MT9P031_GLOBAL_GAIN_MULT (1 << 6)
+#define MT9P031_ROW_BLACK_TARGET 0x49
#define MT9P031_ROW_BLACK_DEF_OFFSET 0x4b
+#define MT9P031_GREEN1_OFFSET 0x60
+#define MT9P031_GREEN2_OFFSET 0x61
+#define MT9P031_BLACK_LEVEL_CALIBRATION 0x62
+#define MT9P031_BLC_MANUAL_BLC (1 << 0)
+#define MT9P031_RED_OFFSET 0x63
+#define MT9P031_BLUE_OFFSET 0x64
#define MT9P031_TEST_PATTERN 0xa0
#define MT9P031_TEST_PATTERN_SHIFT 3
#define MT9P031_TEST_PATTERN_ENABLE (1 << 0)
@@ -99,17 +107,27 @@
#define MT9P031_TEST_PATTERN_RED 0xa2
#define MT9P031_TEST_PATTERN_BLUE 0xa3
+enum mt9p031_model {
+ MT9P031_MODEL_COLOR,
+ MT9P031_MODEL_MONOCHROME,
+};
+
struct mt9p031 {
struct v4l2_subdev subdev;
struct media_pad pad;
struct v4l2_rect crop; /* Sensor window */
struct v4l2_mbus_framefmt format;
- struct v4l2_ctrl_handler ctrls;
struct mt9p031_platform_data *pdata;
struct mutex power_lock; /* lock to protect power_count */
int power_count;
+ enum mt9p031_model model;
struct aptina_pll pll;
+ int reset;
+
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *blc_auto;
+ struct v4l2_ctrl *blc_offset;
/* Registers cache */
u16 output_control;
@@ -241,8 +259,8 @@ static inline int mt9p031_pll_disable(struct mt9p031 *mt9p031)
static int mt9p031_power_on(struct mt9p031 *mt9p031)
{
/* Ensure RESET_BAR is low */
- if (mt9p031->pdata->reset) {
- mt9p031->pdata->reset(&mt9p031->subdev, 1);
+ if (mt9p031->reset != -1) {
+ gpio_set_value(mt9p031->reset, 0);
usleep_range(1000, 2000);
}
@@ -252,8 +270,8 @@ static int mt9p031_power_on(struct mt9p031 *mt9p031)
mt9p031->pdata->ext_freq);
/* Now RESET_BAR must be high */
- if (mt9p031->pdata->reset) {
- mt9p031->pdata->reset(&mt9p031->subdev, 0);
+ if (mt9p031->reset != -1) {
+ gpio_set_value(mt9p031->reset, 1);
usleep_range(1000, 2000);
}
@@ -262,8 +280,8 @@ static int mt9p031_power_on(struct mt9p031 *mt9p031)
static void mt9p031_power_off(struct mt9p031 *mt9p031)
{
- if (mt9p031->pdata->reset) {
- mt9p031->pdata->reset(&mt9p031->subdev, 1);
+ if (mt9p031->reset != -1) {
+ gpio_set_value(mt9p031->reset, 0);
usleep_range(1000, 2000);
}
@@ -557,6 +575,10 @@ static int mt9p031_set_crop(struct v4l2_subdev *subdev,
*/
#define V4L2_CID_TEST_PATTERN (V4L2_CID_USER_BASE | 0x1001)
+#define V4L2_CID_BLC_AUTO (V4L2_CID_USER_BASE | 0x1002)
+#define V4L2_CID_BLC_TARGET_LEVEL (V4L2_CID_USER_BASE | 0x1003)
+#define V4L2_CID_BLC_ANALOG_OFFSET (V4L2_CID_USER_BASE | 0x1004)
+#define V4L2_CID_BLC_DIGITAL_OFFSET (V4L2_CID_USER_BASE | 0x1005)
static int mt9p031_s_ctrl(struct v4l2_ctrl *ctrl)
{
@@ -621,11 +643,17 @@ static int mt9p031_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_TEST_PATTERN:
if (!ctrl->val) {
- ret = mt9p031_set_mode2(mt9p031,
- 0, MT9P031_READ_MODE_2_ROW_BLC);
- if (ret < 0)
- return ret;
-
+ /* Restore the black level compensation settings. */
+ if (mt9p031->blc_auto->cur.val != 0) {
+ ret = mt9p031_s_ctrl(mt9p031->blc_auto);
+ if (ret < 0)
+ return ret;
+ }
+ if (mt9p031->blc_offset->cur.val != 0) {
+ ret = mt9p031_s_ctrl(mt9p031->blc_offset);
+ if (ret < 0)
+ return ret;
+ }
return mt9p031_write(client, MT9P031_TEST_PATTERN,
MT9P031_TEST_PATTERN_DISABLE);
}
@@ -640,10 +668,14 @@ static int mt9p031_s_ctrl(struct v4l2_ctrl *ctrl)
if (ret < 0)
return ret;
+ /* Disable digital black level compensation when using a test
+ * pattern.
+ */
ret = mt9p031_set_mode2(mt9p031, MT9P031_READ_MODE_2_ROW_BLC,
0);
if (ret < 0)
return ret;
+
ret = mt9p031_write(client, MT9P031_ROW_BLACK_DEF_OFFSET, 0);
if (ret < 0)
return ret;
@@ -651,7 +683,40 @@ static int mt9p031_s_ctrl(struct v4l2_ctrl *ctrl)
return mt9p031_write(client, MT9P031_TEST_PATTERN,
((ctrl->val - 1) << MT9P031_TEST_PATTERN_SHIFT)
| MT9P031_TEST_PATTERN_ENABLE);
+
+ case V4L2_CID_BLC_AUTO:
+ ret = mt9p031_set_mode2(mt9p031,
+ ctrl->val ? 0 : MT9P031_READ_MODE_2_ROW_BLC,
+ ctrl->val ? MT9P031_READ_MODE_2_ROW_BLC : 0);
+ if (ret < 0)
+ return ret;
+
+ return mt9p031_write(client, MT9P031_BLACK_LEVEL_CALIBRATION,
+ ctrl->val ? 0 : MT9P031_BLC_MANUAL_BLC);
+
+ case V4L2_CID_BLC_TARGET_LEVEL:
+ return mt9p031_write(client, MT9P031_ROW_BLACK_TARGET,
+ ctrl->val);
+
+ case V4L2_CID_BLC_ANALOG_OFFSET:
+ data = ctrl->val & ((1 << 9) - 1);
+
+ ret = mt9p031_write(client, MT9P031_GREEN1_OFFSET, data);
+ if (ret < 0)
+ return ret;
+ ret = mt9p031_write(client, MT9P031_GREEN2_OFFSET, data);
+ if (ret < 0)
+ return ret;
+ ret = mt9p031_write(client, MT9P031_RED_OFFSET, data);
+ if (ret < 0)
+ return ret;
+ return mt9p031_write(client, MT9P031_BLUE_OFFSET, data);
+
+ case V4L2_CID_BLC_DIGITAL_OFFSET:
+ return mt9p031_write(client, MT9P031_ROW_BLACK_DEF_OFFSET,
+ ctrl->val & ((1 << 12) - 1));
}
+
return 0;
}
@@ -685,6 +750,46 @@ static const struct v4l2_ctrl_config mt9p031_ctrls[] = {
.flags = 0,
.menu_skip_mask = 0,
.qmenu = mt9p031_test_pattern_menu,
+ }, {
+ .ops = &mt9p031_ctrl_ops,
+ .id = V4L2_CID_BLC_AUTO,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "BLC, Auto",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+ .flags = 0,
+ }, {
+ .ops = &mt9p031_ctrl_ops,
+ .id = V4L2_CID_BLC_TARGET_LEVEL,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "BLC Target Level",
+ .min = 0,
+ .max = 4095,
+ .step = 1,
+ .def = 168,
+ .flags = 0,
+ }, {
+ .ops = &mt9p031_ctrl_ops,
+ .id = V4L2_CID_BLC_ANALOG_OFFSET,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "BLC Analog Offset",
+ .min = -255,
+ .max = 255,
+ .step = 1,
+ .def = 32,
+ .flags = 0,
+ }, {
+ .ops = &mt9p031_ctrl_ops,
+ .id = V4L2_CID_BLC_DIGITAL_OFFSET,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "BLC Digital Offset",
+ .min = -2048,
+ .max = 2047,
+ .step = 1,
+ .def = 40,
+ .flags = 0,
}
};
@@ -764,7 +869,7 @@ static int mt9p031_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
format = v4l2_subdev_get_try_format(fh, 0);
- if (mt9p031->pdata->version == MT9P031_MONOCHROME_VERSION)
+ if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
format->code = V4L2_MBUS_FMT_Y12_1X12;
else
format->code = V4L2_MBUS_FMT_SGRBG12_1X12;
@@ -842,6 +947,8 @@ static int mt9p031_probe(struct i2c_client *client,
mt9p031->pdata = pdata;
mt9p031->output_control = MT9P031_OUTPUT_CONTROL_DEF;
mt9p031->mode2 = MT9P031_READ_MODE_2_ROW_BLC;
+ mt9p031->model = did->driver_data;
+ mt9p031->reset = -1;
v4l2_ctrl_handler_init(&mt9p031->ctrls, ARRAY_SIZE(mt9p031_ctrls) + 4);
@@ -862,9 +969,16 @@ static int mt9p031_probe(struct i2c_client *client,
mt9p031->subdev.ctrl_handler = &mt9p031->ctrls;
- if (mt9p031->ctrls.error)
+ if (mt9p031->ctrls.error) {
printk(KERN_INFO "%s: control initialization error %d\n",
__func__, mt9p031->ctrls.error);
+ ret = mt9p031->ctrls.error;
+ goto done;
+ }
+
+ mt9p031->blc_auto = v4l2_ctrl_find(&mt9p031->ctrls, V4L2_CID_BLC_AUTO);
+ mt9p031->blc_offset = v4l2_ctrl_find(&mt9p031->ctrls,
+ V4L2_CID_BLC_DIGITAL_OFFSET);
mutex_init(&mt9p031->power_lock);
v4l2_i2c_subdev_init(&mt9p031->subdev, client, &mt9p031_subdev_ops);
@@ -882,7 +996,7 @@ static int mt9p031_probe(struct i2c_client *client,
mt9p031->crop.left = MT9P031_COLUMN_START_DEF;
mt9p031->crop.top = MT9P031_ROW_START_DEF;
- if (mt9p031->pdata->version == MT9P031_MONOCHROME_VERSION)
+ if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
mt9p031->format.code = V4L2_MBUS_FMT_Y12_1X12;
else
mt9p031->format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
@@ -892,10 +1006,22 @@ static int mt9p031_probe(struct i2c_client *client,
mt9p031->format.field = V4L2_FIELD_NONE;
mt9p031->format.colorspace = V4L2_COLORSPACE_SRGB;
+ if (pdata->reset != -1) {
+ ret = gpio_request_one(pdata->reset, GPIOF_OUT_INIT_LOW,
+ "mt9p031_rst");
+ if (ret < 0)
+ goto done;
+
+ mt9p031->reset = pdata->reset;
+ }
+
ret = mt9p031_pll_setup(mt9p031);
done:
if (ret < 0) {
+ if (mt9p031->reset != -1)
+ gpio_free(mt9p031->reset);
+
v4l2_ctrl_handler_free(&mt9p031->ctrls);
media_entity_cleanup(&mt9p031->subdev.entity);
kfree(mt9p031);
@@ -912,13 +1038,16 @@ static int mt9p031_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&mt9p031->ctrls);
v4l2_device_unregister_subdev(subdev);
media_entity_cleanup(&subdev->entity);
+ if (mt9p031->reset != -1)
+ gpio_free(mt9p031->reset);
kfree(mt9p031);
return 0;
}
static const struct i2c_device_id mt9p031_id[] = {
- { "mt9p031", 0 },
+ { "mt9p031", MT9P031_MODEL_COLOR },
+ { "mt9p031m", MT9P031_MODEL_MONOCHROME },
{ }
};
MODULE_DEVICE_TABLE(i2c, mt9p031_id);
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index 8d1445f12708..e1ae46a7ee96 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -453,6 +453,7 @@ static int mt9t112_init_pll(const struct i2c_client *client)
* I2C Master Clock Divider
*/
mt9t112_reg_write(ret, client, 0x0014, 0x3046);
+ mt9t112_reg_write(ret, client, 0x0016, 0x0400); /* JPEG initialization workaround */
mt9t112_reg_write(ret, client, 0x0022, 0x0190);
mt9t112_reg_write(ret, client, 0x3B84, 0x0212);
diff --git a/drivers/media/video/mt9v032.c b/drivers/media/video/mt9v032.c
index 75e253a343c5..4ba4884c016e 100644
--- a/drivers/media/video/mt9v032.c
+++ b/drivers/media/video/mt9v032.c
@@ -481,7 +481,7 @@ static int mt9v032_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_EXPOSURE_AUTO:
return mt9v032_update_aec_agc(mt9v032, MT9V032_AEC_ENABLE,
- ctrl->val);
+ !ctrl->val);
case V4L2_CID_EXPOSURE:
return mt9v032_write(client, MT9V032_TOTAL_SHUTTER_WIDTH,
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 055d11ddb038..4296a8350298 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -126,13 +126,8 @@ static int mx1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
unsigned int *size)
{
struct soc_camera_device *icd = vq->priv_data;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
- if (bytes_per_line < 0)
- return bytes_per_line;
-
- *size = bytes_per_line * icd->user_height;
+ *size = icd->sizeimage;
if (!*count)
*count = 32;
@@ -171,11 +166,6 @@ static int mx1_videobuf_prepare(struct videobuf_queue *vq,
struct soc_camera_device *icd = vq->priv_data;
struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
int ret;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
-
- if (bytes_per_line < 0)
- return bytes_per_line;
dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
@@ -202,7 +192,7 @@ static int mx1_videobuf_prepare(struct videobuf_queue *vq,
vb->state = VIDEOBUF_NEEDS_INIT;
}
- vb->size = bytes_per_line * vb->height;
+ vb->size = icd->sizeimage;
if (0 != vb->baddr && vb->bsize < vb->size) {
ret = -EINVAL;
goto out;
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index 18afaeeadb7b..ded26b7286fa 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -22,6 +22,7 @@
#include <linux/gcd.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/math64.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/time.h>
@@ -344,6 +345,19 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
PRP_INTR_CH2OVF,
}
},
+ {
+ .in_fmt = V4L2_MBUS_FMT_UYVY8_2X8,
+ .out_fmt = V4L2_PIX_FMT_YUV420,
+ .cfg = {
+ .channel = 2,
+ .in_fmt = PRP_CNTL_DATA_IN_YUV422,
+ .out_fmt = PRP_CNTL_CH2_OUT_YUV420,
+ .src_pixel = 0x22000888, /* YUV422 (YUYV) */
+ .irq_flags = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
+ PRP_INTR_CH2FC | PRP_INTR_LBOVF |
+ PRP_INTR_CH2OVF,
+ }
+ },
};
static struct mx2_fmt_cfg *mx27_emma_prp_get_format(
@@ -525,8 +539,6 @@ static int mx2_videobuf_setup(struct vb2_queue *vq,
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
dev_dbg(icd->parent, "count=%d, size=%d\n", *count, sizes[0]);
@@ -534,12 +546,9 @@ static int mx2_videobuf_setup(struct vb2_queue *vq,
if (fmt != NULL)
return -ENOTTY;
- if (bytes_per_line < 0)
- return bytes_per_line;
-
alloc_ctxs[0] = pcdev->alloc_ctx;
- sizes[0] = bytes_per_line * icd->user_height;
+ sizes[0] = icd->sizeimage;
if (0 == *count)
*count = 32;
@@ -555,16 +564,11 @@ static int mx2_videobuf_setup(struct vb2_queue *vq,
static int mx2_videobuf_prepare(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
int ret = 0;
dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
- if (bytes_per_line < 0)
- return bytes_per_line;
-
#ifdef DEBUG
/*
* This can be useful if you want to see if we actually fill
@@ -574,7 +578,7 @@ static int mx2_videobuf_prepare(struct vb2_buffer *vb)
0xaa, vb2_get_plane_payload(vb, 0));
#endif
- vb2_set_plane_payload(vb, 0, bytes_per_line * icd->user_height);
+ vb2_set_plane_payload(vb, 0, icd->sizeimage);
if (vb2_plane_vaddr(vb, 0) &&
vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
ret = -EINVAL;
@@ -980,6 +984,7 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ const struct soc_camera_format_xlate *xlate;
unsigned long common_flags;
int ret;
int bytesperline;
@@ -1024,14 +1029,31 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
return ret;
}
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ if (!xlate) {
+ dev_warn(icd->parent, "Format %x not found\n", pixfmt);
+ return -EINVAL;
+ }
+
+ if (xlate->code == V4L2_MBUS_FMT_YUYV8_2X8) {
+ csicr1 |= CSICR1_PACK_DIR;
+ csicr1 &= ~CSICR1_SWAP16_EN;
+ dev_dbg(icd->parent, "already yuyv format, don't convert\n");
+ } else if (xlate->code == V4L2_MBUS_FMT_UYVY8_2X8) {
+ csicr1 &= ~CSICR1_PACK_DIR;
+ csicr1 |= CSICR1_SWAP16_EN;
+ dev_dbg(icd->parent, "convert uyvy mbus format into yuyv\n");
+ } else {
+ dev_warn(icd->parent, "mbus format not supported\n");
+ return -EINVAL;
+ }
+
if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
csicr1 |= CSICR1_REDGE;
if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
csicr1 |= CSICR1_SOF_POL;
if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
csicr1 |= CSICR1_HSYNC_POL;
- if (pcdev->platform_flags & MX2_CAMERA_SWAP16)
- csicr1 |= CSICR1_SWAP16_EN;
if (pcdev->platform_flags & MX2_CAMERA_EXT_VSYNC)
csicr1 |= CSICR1_EXT_VSYNC;
if (pcdev->platform_flags & MX2_CAMERA_CCIR)
@@ -1042,8 +1064,6 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
csicr1 |= CSICR1_GCLK_MODE;
if (pcdev->platform_flags & MX2_CAMERA_INV_DATA)
csicr1 |= CSICR1_INV_DATA;
- if (pcdev->platform_flags & MX2_CAMERA_PACK_DIR_MSB)
- csicr1 |= CSICR1_PACK_DIR;
pcdev->csicr1 = csicr1;
@@ -1118,7 +1138,8 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
return 0;
}
- if (code == V4L2_MBUS_FMT_YUYV8_2X8) {
+ if (code == V4L2_MBUS_FMT_YUYV8_2X8 ||
+ code == V4L2_MBUS_FMT_UYVY8_2X8) {
formats++;
if (xlate) {
/*
@@ -1134,6 +1155,18 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
}
}
+ if (code == V4L2_MBUS_FMT_UYVY8_2X8) {
+ formats++;
+ if (xlate) {
+ xlate->host_fmt =
+ soc_mbus_get_fmtdesc(V4L2_MBUS_FMT_YUYV8_2X8);
+ xlate->code = code;
+ dev_dbg(dev, "Providing host format %s for sensor code %d\n",
+ xlate->host_fmt->name, code);
+ xlate++;
+ }
+ }
+
/* Generic pass-trough */
formats++;
if (xlate) {
@@ -1363,17 +1396,20 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
xlate->host_fmt);
if (pix->bytesperline < 0)
return pix->bytesperline;
- pix->sizeimage = pix->height * pix->bytesperline;
+ pix->sizeimage = soc_mbus_image_size(xlate->host_fmt,
+ pix->bytesperline, pix->height);
/* Check against the CSIRXCNT limit */
if (pix->sizeimage > 4 * 0x3ffff) {
/* Adjust geometry, preserve aspect ratio */
- unsigned int new_height = int_sqrt(4 * 0x3ffff *
- pix->height / pix->bytesperline);
+ unsigned int new_height = int_sqrt(div_u64(0x3ffffULL *
+ 4 * pix->height, pix->bytesperline));
pix->width = new_height * pix->width / pix->height;
pix->height = new_height;
pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
xlate->host_fmt);
BUG_ON(pix->bytesperline < 0);
+ pix->sizeimage = soc_mbus_image_size(xlate->host_fmt,
+ pix->bytesperline, pix->height);
}
}
@@ -1752,6 +1788,8 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev)
pcdev->soc_host.priv = pcdev;
pcdev->soc_host.v4l2_dev.dev = &pdev->dev;
pcdev->soc_host.nr = pdev->id;
+ if (cpu_is_mx25())
+ pcdev->soc_host.capabilities = SOCAM_HOST_CAP_STRIDE;
pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
if (IS_ERR(pcdev->alloc_ctx)) {
diff --git a/drivers/media/video/mx2_emmaprp.c b/drivers/media/video/mx2_emmaprp.c
index ba89a7401c8c..0bd5815de369 100644
--- a/drivers/media/video/mx2_emmaprp.c
+++ b/drivers/media/video/mx2_emmaprp.c
@@ -755,7 +755,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
memset(src_vq, 0, sizeof(*src_vq));
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- src_vq->io_modes = VB2_MMAP;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &emmaprp_qops;
@@ -767,7 +767,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
memset(dst_vq, 0, sizeof(*dst_vq));
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- dst_vq->io_modes = VB2_MMAP;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &emmaprp_qops;
@@ -904,6 +904,10 @@ static int emmaprp_probe(struct platform_device *pdev)
}
*vfd = emmaprp_videodev;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
vfd->lock = &pcdev->dev_mutex;
video_set_drvdata(vfd, pcdev);
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index 93c35ef5f0ad..f13643d31353 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -199,8 +199,6 @@ static int mx3_videobuf_setup(struct vb2_queue *vq,
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
- int bytes_per_line;
- unsigned int height;
if (!mx3_cam->idmac_channel[0])
return -EINVAL;
@@ -208,21 +206,29 @@ static int mx3_videobuf_setup(struct vb2_queue *vq,
if (fmt) {
const struct soc_camera_format_xlate *xlate = soc_camera_xlate_by_fourcc(icd,
fmt->fmt.pix.pixelformat);
+ unsigned int bytes_per_line;
+ int ret;
+
if (!xlate)
return -EINVAL;
- bytes_per_line = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
- xlate->host_fmt);
- height = fmt->fmt.pix.height;
+
+ ret = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
+ xlate->host_fmt);
+ if (ret < 0)
+ return ret;
+
+ bytes_per_line = max_t(u32, fmt->fmt.pix.bytesperline, ret);
+
+ ret = soc_mbus_image_size(xlate->host_fmt, bytes_per_line,
+ fmt->fmt.pix.height);
+ if (ret < 0)
+ return ret;
+
+ sizes[0] = max_t(u32, fmt->fmt.pix.sizeimage, ret);
} else {
/* Called from VIDIOC_REQBUFS or in compatibility mode */
- bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
- height = icd->user_height;
+ sizes[0] = icd->sizeimage;
}
- if (bytes_per_line < 0)
- return bytes_per_line;
-
- sizes[0] = bytes_per_line * height;
alloc_ctxs[0] = mx3_cam->alloc_ctx;
@@ -267,14 +273,11 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
struct idmac_video_param *video = &ichan->params.video;
const struct soc_mbus_pixelfmt *host_fmt = icd->current_fmt->host_fmt;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, host_fmt);
unsigned long flags;
dma_cookie_t cookie;
size_t new_size;
- BUG_ON(bytes_per_line <= 0);
-
- new_size = bytes_per_line * icd->user_height;
+ new_size = icd->sizeimage;
if (vb2_plane_size(vb, 0) < new_size) {
dev_err(icd->parent, "Buffer #%d too small (%lu < %zu)\n",
@@ -314,9 +317,9 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
* horizontal parameters in this case are expressed in bytes,
* not in pixels.
*/
- video->out_width = bytes_per_line;
+ video->out_width = icd->bytesperline;
video->out_height = icd->user_height;
- video->out_stride = bytes_per_line;
+ video->out_stride = icd->bytesperline;
} else {
/*
* For IPU known formats the pixel unit will be managed
@@ -508,7 +511,7 @@ static void mx3_camera_activate(struct mx3_camera_dev *mx3_cam,
/* ipu_csi_init_interface() */
csi_reg_write(mx3_cam, conf, CSI_SENS_CONF);
- clk_enable(mx3_cam->clk);
+ clk_prepare_enable(mx3_cam->clk);
rate = clk_round_rate(mx3_cam->clk, mx3_cam->mclk);
dev_dbg(icd->parent, "Set SENS_CONF to %x, rate %ld\n", conf, rate);
if (rate)
@@ -549,7 +552,7 @@ static void mx3_camera_remove_device(struct soc_camera_device *icd)
*ichan = NULL;
}
- clk_disable(mx3_cam->clk);
+ clk_disable_unprepare(mx3_cam->clk);
mx3_cam->icd = NULL;
@@ -642,12 +645,14 @@ static const struct soc_mbus_pixelfmt mx3_camera_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
}, {
.fourcc = V4L2_PIX_FMT_GREY,
.name = "Monochrome 8 bit",
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
};
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c
index 2e4131748438..b520a45cb3f3 100644
--- a/drivers/media/video/mxb.c
+++ b/drivers/media/video/mxb.c
@@ -31,10 +31,11 @@
#include <media/saa7115.h>
#include <linux/module.h>
-#include "mxb.h"
#include "tea6415c.h"
#include "tea6420.h"
+#define MXB_AUDIOS 6
+
#define I2C_SAA7111A 0x24
#define I2C_TDA9840 0x42
#define I2C_TEA6415C 0x43
@@ -62,10 +63,14 @@ MODULE_PARM_DESC(debug, "Turn on/off device debugging (default:off).");
enum { TUNER, AUX1, AUX3, AUX3_YC };
static struct v4l2_input mxb_inputs[MXB_INPUTS] = {
- { TUNER, "Tuner", V4L2_INPUT_TYPE_TUNER, 1, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { AUX1, "AUX1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { AUX3, "AUX3 Composite", V4L2_INPUT_TYPE_CAMERA, 4, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { AUX3_YC, "AUX3 S-Video", V4L2_INPUT_TYPE_CAMERA, 4, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
+ { TUNER, "Tuner", V4L2_INPUT_TYPE_TUNER, 0x3f, 0,
+ V4L2_STD_PAL_BG | V4L2_STD_PAL_I, 0, V4L2_IN_CAP_STD },
+ { AUX1, "AUX1", V4L2_INPUT_TYPE_CAMERA, 0x3f, 0,
+ V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { AUX3, "AUX3 Composite", V4L2_INPUT_TYPE_CAMERA, 0x3f, 0,
+ V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { AUX3_YC, "AUX3 S-Video", V4L2_INPUT_TYPE_CAMERA, 0x3f, 0,
+ V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
};
/* this array holds the information, which port of the saa7146 each
@@ -90,6 +95,36 @@ struct mxb_routing {
u32 output;
};
+/* these are the available audio sources, which can switched
+ to the line- and cd-output individually */
+static struct v4l2_audio mxb_audios[MXB_AUDIOS] = {
+ {
+ .index = 0,
+ .name = "Tuner",
+ .capability = V4L2_AUDCAP_STEREO,
+ } , {
+ .index = 1,
+ .name = "AUX1",
+ .capability = V4L2_AUDCAP_STEREO,
+ } , {
+ .index = 2,
+ .name = "AUX2",
+ .capability = V4L2_AUDCAP_STEREO,
+ } , {
+ .index = 3,
+ .name = "AUX3",
+ .capability = V4L2_AUDCAP_STEREO,
+ } , {
+ .index = 4,
+ .name = "Radio (X9)",
+ .capability = V4L2_AUDCAP_STEREO,
+ } , {
+ .index = 5,
+ .name = "CD-ROM (X10)",
+ .capability = V4L2_AUDCAP_STEREO,
+ }
+};
+
/* These are the necessary input-output-pins for bringing one audio source
(see above) to the CD-output. Note that gain is set to 0 in this table. */
static struct mxb_routing TEA6420_cd[MXB_AUDIOS + 1][2] = {
@@ -114,11 +149,6 @@ static struct mxb_routing TEA6420_line[MXB_AUDIOS + 1][2] = {
{ { 6, 3 }, { 6, 2 } } /* Mute */
};
-#define MAXCONTROLS 1
-static struct v4l2_queryctrl mxb_controls[] = {
- { V4L2_CID_AUDIO_MUTE, V4L2_CTRL_TYPE_BOOLEAN, "Mute", 0, 1, 1, 0, 0 },
-};
-
struct mxb
{
struct video_device *video_dev;
@@ -135,6 +165,7 @@ struct mxb
int cur_mode; /* current audio mode (mono, stereo, ...) */
int cur_input; /* current input */
+ int cur_audinput; /* current audio input */
int cur_mute; /* current mute status */
struct v4l2_frequency cur_freq; /* current frequency the tuner is tuned to */
};
@@ -150,16 +181,21 @@ struct mxb
#define call_all(dev, o, f, args...) \
v4l2_device_call_until_err(&dev->v4l2_dev, 0, o, f, ##args)
-static inline void tea6420_route_cd(struct mxb *mxb, int idx)
+static void mxb_update_audmode(struct mxb *mxb)
+{
+ struct v4l2_tuner t = {
+ .audmode = mxb->cur_mode,
+ };
+
+ tda9840_call(mxb, tuner, s_tuner, &t);
+}
+
+static inline void tea6420_route(struct mxb *mxb, int idx)
{
v4l2_subdev_call(mxb->tea6420_1, audio, s_routing,
TEA6420_cd[idx][0].input, TEA6420_cd[idx][0].output, 0);
v4l2_subdev_call(mxb->tea6420_2, audio, s_routing,
TEA6420_cd[idx][1].input, TEA6420_cd[idx][1].output, 0);
-}
-
-static inline void tea6420_route_line(struct mxb *mxb, int idx)
-{
v4l2_subdev_call(mxb->tea6420_1, audio, s_routing,
TEA6420_line[idx][0].input, TEA6420_line[idx][0].output, 0);
v4l2_subdev_call(mxb->tea6420_2, audio, s_routing,
@@ -168,16 +204,45 @@ static inline void tea6420_route_line(struct mxb *mxb, int idx)
static struct saa7146_extension extension;
+static int mxb_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct saa7146_dev *dev = container_of(ctrl->handler,
+ struct saa7146_dev, ctrl_handler);
+ struct mxb *mxb = dev->ext_priv;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ mxb->cur_mute = ctrl->val;
+ /* switch the audio-source */
+ tea6420_route(mxb, ctrl->val ? 6 :
+ video_audio_connect[mxb->cur_input]);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops mxb_ctrl_ops = {
+ .s_ctrl = mxb_s_ctrl,
+};
+
static int mxb_probe(struct saa7146_dev *dev)
{
+ struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
struct mxb *mxb = NULL;
+ v4l2_ctrl_new_std(hdl, &mxb_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ if (hdl->error)
+ return hdl->error;
mxb = kzalloc(sizeof(struct mxb), GFP_KERNEL);
if (mxb == NULL) {
DEB_D("not enough kernel memory\n");
return -ENOMEM;
}
+
snprintf(mxb->i2c_adapter.name, sizeof(mxb->i2c_adapter.name), "mxb%d", mxb_num);
saa7146_i2c_adapter_prepare(dev, &mxb->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
@@ -214,6 +279,8 @@ static int mxb_probe(struct saa7146_dev *dev)
/* we store the pointer in our private data field */
dev->ext_priv = mxb;
+ v4l2_ctrl_handler_setup(hdl);
+
return 0;
}
@@ -286,6 +353,9 @@ static int mxb_init_done(struct saa7146_dev* dev)
int i = 0, err = 0;
+ /* mute audio on tea6420s */
+ tea6420_route(mxb, 6);
+
/* select video mode in saa7111a */
saa7111a_call(mxb, core, s_std, std);
@@ -306,12 +376,12 @@ static int mxb_init_done(struct saa7146_dev* dev)
tuner_call(mxb, tuner, s_frequency, &mxb->cur_freq);
/* set a default video standard */
+ /* These two gpio calls set the GPIO pins that control the tda9820 */
+ saa7146_write(dev, GPIO_CTRL, 0x00404050);
+ saa7111a_call(mxb, core, s_gpio, 1);
+ saa7111a_call(mxb, core, s_std, std);
tuner_call(mxb, core, s_std, std);
- /* mute audio on tea6420s */
- tea6420_route_line(mxb, 6);
- tea6420_route_cd(mxb, 6);
-
/* switch to tuner-channel on tea6415c */
tea6415c_call(mxb, video, s_routing, 3, 17, 0);
@@ -320,9 +390,11 @@ static int mxb_init_done(struct saa7146_dev* dev)
/* the rest for mxb */
mxb->cur_input = 0;
+ mxb->cur_audinput = video_audio_connect[mxb->cur_input];
mxb->cur_mute = 1;
mxb->cur_mode = V4L2_TUNER_MODE_STEREO;
+ mxb_update_audmode(mxb);
/* check if the saa7740 (aka 'sound arena module') is present
on the mxb. if so, we must initialize it. due to lack of
@@ -385,69 +457,6 @@ void mxb_irq_bh(struct saa7146_dev* dev, u32* irq_mask)
}
*/
-static int vidioc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qc)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- int i;
-
- for (i = MAXCONTROLS - 1; i >= 0; i--) {
- if (mxb_controls[i].id == qc->id) {
- *qc = mxb_controls[i];
- DEB_D("VIDIOC_QUERYCTRL %d\n", qc->id);
- return 0;
- }
- }
- return dev->ext_vv_data->core_ops->vidioc_queryctrl(file, fh, qc);
-}
-
-static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *vc)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- struct mxb *mxb = (struct mxb *)dev->ext_priv;
- int i;
-
- for (i = MAXCONTROLS - 1; i >= 0; i--) {
- if (mxb_controls[i].id == vc->id)
- break;
- }
-
- if (i < 0)
- return dev->ext_vv_data->core_ops->vidioc_g_ctrl(file, fh, vc);
-
- if (vc->id == V4L2_CID_AUDIO_MUTE) {
- vc->value = mxb->cur_mute;
- DEB_D("VIDIOC_G_CTRL V4L2_CID_AUDIO_MUTE:%d\n", vc->value);
- return 0;
- }
-
- DEB_EE("VIDIOC_G_CTRL V4L2_CID_AUDIO_MUTE:%d\n", vc->value);
- return 0;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *vc)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- struct mxb *mxb = (struct mxb *)dev->ext_priv;
- int i = 0;
-
- for (i = MAXCONTROLS - 1; i >= 0; i--) {
- if (mxb_controls[i].id == vc->id)
- break;
- }
-
- if (i < 0)
- return dev->ext_vv_data->core_ops->vidioc_s_ctrl(file, fh, vc);
-
- if (vc->id == V4L2_CID_AUDIO_MUTE) {
- mxb->cur_mute = vc->value;
- /* switch the audio-source */
- tea6420_route_line(mxb, vc->value ? 6 :
- video_audio_connect[mxb->cur_input]);
- DEB_EE("VIDIOC_S_CTRL, V4L2_CID_AUDIO_MUTE: %d\n", vc->value);
- }
- return 0;
-}
-
static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
DEB_EE("VIDIOC_ENUMINPUT %d\n", i->index);
@@ -519,9 +528,12 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
if (saa7111a_call(mxb, video, s_routing, i, SAA7111_FMT_CCIR, 0))
pr_err("VIDIOC_S_INPUT: could not address saa7111a\n");
+ mxb->cur_audinput = video_audio_connect[input];
/* switch the audio-source only if necessary */
if (0 == mxb->cur_mute)
- tea6420_route_line(mxb, video_audio_connect[input]);
+ tea6420_route(mxb, mxb->cur_audinput);
+ if (mxb->cur_audinput == 0)
+ mxb_update_audmode(mxb);
return 0;
}
@@ -563,17 +575,20 @@ static int vidioc_s_tuner(struct file *file, void *fh, struct v4l2_tuner *t)
return call_all(dev, tuner, s_tuner, t);
}
+static int vidioc_querystd(struct file *file, void *fh, v4l2_std_id *norm)
+{
+ struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
+
+ return call_all(dev, video, querystd, norm);
+}
+
static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency *f)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct mxb *mxb = (struct mxb *)dev->ext_priv;
- if (mxb->cur_input) {
- DEB_D("VIDIOC_G_FREQ: channel %d does not have a tuner!\n",
- mxb->cur_input);
+ if (f->tuner)
return -EINVAL;
- }
-
*f = mxb->cur_freq;
DEB_EE("VIDIOC_G_FREQ: freq:0x%08x\n", mxb->cur_freq.frequency);
@@ -592,17 +607,18 @@ static int vidioc_s_frequency(struct file *file, void *fh, struct v4l2_frequency
if (V4L2_TUNER_ANALOG_TV != f->type)
return -EINVAL;
- if (mxb->cur_input) {
- DEB_D("VIDIOC_S_FREQ: channel %d does not have a tuner!\n",
- mxb->cur_input);
- return -EINVAL;
- }
-
- mxb->cur_freq = *f;
DEB_EE("VIDIOC_S_FREQUENCY: freq:0x%08x\n", mxb->cur_freq.frequency);
/* tune in desired frequency */
- tuner_call(mxb, tuner, s_frequency, &mxb->cur_freq);
+ tuner_call(mxb, tuner, s_frequency, f);
+ /* let the tuner subdev clamp the frequency to the tuner range */
+ tuner_call(mxb, tuner, g_frequency, f);
+ mxb->cur_freq = *f;
+ if (mxb->cur_audinput == 0)
+ mxb_update_audmode(mxb);
+
+ if (mxb->cur_input)
+ return 0;
/* hack: changing the frequency should invalidate the vbi-counter (=> alevt) */
spin_lock(&dev->slock);
@@ -612,25 +628,40 @@ static int vidioc_s_frequency(struct file *file, void *fh, struct v4l2_frequency
return 0;
}
+static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a)
+{
+ if (a->index >= MXB_AUDIOS)
+ return -EINVAL;
+ *a = mxb_audios[a->index];
+ return 0;
+}
+
static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct mxb *mxb = (struct mxb *)dev->ext_priv;
- if (a->index > MXB_INPUTS) {
- DEB_D("VIDIOC_G_AUDIO %d out of range\n", a->index);
- return -EINVAL;
- }
-
- DEB_EE("VIDIOC_G_AUDIO %d\n", a->index);
- memcpy(a, &mxb_audios[video_audio_connect[mxb->cur_input]], sizeof(struct v4l2_audio));
+ DEB_EE("VIDIOC_G_AUDIO\n");
+ *a = mxb_audios[mxb->cur_audinput];
return 0;
}
static int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
+ struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
+ struct mxb *mxb = (struct mxb *)dev->ext_priv;
+
DEB_D("VIDIOC_S_AUDIO %d\n", a->index);
- return 0;
+ if (mxb_inputs[mxb->cur_input].audioset & (1 << a->index)) {
+ if (mxb->cur_audinput != a->index) {
+ mxb->cur_audinput = a->index;
+ tea6420_route(mxb, a->index);
+ if (mxb->cur_audinput == 0)
+ mxb_update_audmode(mxb);
+ }
+ return 0;
+ }
+ return -EINVAL;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -638,60 +669,31 @@ static int vidioc_g_register(struct file *file, void *fh, struct v4l2_dbg_regist
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- return call_all(dev, core, g_register, reg);
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (v4l2_chip_match_host(&reg->match)) {
+ reg->val = saa7146_read(dev, reg->reg);
+ reg->size = 4;
+ return 0;
+ }
+ call_all(dev, core, g_register, reg);
+ return 0;
}
static int vidioc_s_register(struct file *file, void *fh, struct v4l2_dbg_register *reg)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- return call_all(dev, core, s_register, reg);
-}
-#endif
-
-static long vidioc_default(struct file *file, void *fh, bool valid_prio,
- int cmd, void *arg)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- struct mxb *mxb = (struct mxb *)dev->ext_priv;
-
- switch (cmd) {
- case MXB_S_AUDIO_CD:
- {
- int i = *(int *)arg;
-
- if (i < 0 || i >= MXB_AUDIOS) {
- DEB_D("invalid argument to MXB_S_AUDIO_CD: i:%d\n", i);
- return -EINVAL;
- }
-
- DEB_EE("MXB_S_AUDIO_CD: i:%d\n", i);
-
- tea6420_route_cd(mxb, i);
- return 0;
- }
- case MXB_S_AUDIO_LINE:
- {
- int i = *(int *)arg;
-
- if (i < 0 || i >= MXB_AUDIOS) {
- DEB_D("invalid argument to MXB_S_AUDIO_LINE: i:%d\n",
- i);
- return -EINVAL;
- }
-
- DEB_EE("MXB_S_AUDIO_LINE: i:%d\n", i);
- tea6420_route_line(mxb, i);
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (v4l2_chip_match_host(&reg->match)) {
+ saa7146_write(dev, reg->reg, reg->val);
+ reg->size = 4;
return 0;
}
- default:
-/*
- DEB2(pr_err("does not handle this ioctl\n"));
-*/
- return -ENOIOCTLCMD;
- }
- return 0;
+ return call_all(dev, core, s_register, reg);
}
+#endif
static struct saa7146_ext_vv vv_data;
@@ -709,23 +711,21 @@ static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data
}
mxb = (struct mxb *)dev->ext_priv;
- vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
- vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
- vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
- vv_data.ops.vidioc_g_input = vidioc_g_input;
- vv_data.ops.vidioc_s_input = vidioc_s_input;
- vv_data.ops.vidioc_g_tuner = vidioc_g_tuner;
- vv_data.ops.vidioc_s_tuner = vidioc_s_tuner;
- vv_data.ops.vidioc_g_frequency = vidioc_g_frequency;
- vv_data.ops.vidioc_s_frequency = vidioc_s_frequency;
- vv_data.ops.vidioc_g_audio = vidioc_g_audio;
- vv_data.ops.vidioc_s_audio = vidioc_s_audio;
+ vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
+ vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
+ vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
+ vv_data.vid_ops.vidioc_querystd = vidioc_querystd;
+ vv_data.vid_ops.vidioc_g_tuner = vidioc_g_tuner;
+ vv_data.vid_ops.vidioc_s_tuner = vidioc_s_tuner;
+ vv_data.vid_ops.vidioc_g_frequency = vidioc_g_frequency;
+ vv_data.vid_ops.vidioc_s_frequency = vidioc_s_frequency;
+ vv_data.vid_ops.vidioc_enumaudio = vidioc_enumaudio;
+ vv_data.vid_ops.vidioc_g_audio = vidioc_g_audio;
+ vv_data.vid_ops.vidioc_s_audio = vidioc_s_audio;
#ifdef CONFIG_VIDEO_ADV_DEBUG
- vv_data.ops.vidioc_g_register = vidioc_g_register;
- vv_data.ops.vidioc_s_register = vidioc_s_register;
+ vv_data.vid_ops.vidioc_g_register = vidioc_g_register;
+ vv_data.vid_ops.vidioc_s_register = vidioc_s_register;
#endif
- vv_data.ops.vidioc_default = vidioc_default;
if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) {
ERR("cannot register capture v4l2 device. skipping.\n");
saa7146_vv_release(dev);
@@ -752,6 +752,9 @@ static int mxb_detach(struct saa7146_dev *dev)
DEB_EE("dev:%p\n", dev);
+ /* mute audio on tea6420s */
+ tea6420_route(mxb, 6);
+
saa7146_unregister_device(&mxb->video_dev,dev);
if (MXB_BOARD_CAN_DO_VBI(dev))
saa7146_unregister_device(&mxb->vbi_dev, dev);
@@ -773,20 +776,24 @@ static int std_callback(struct saa7146_dev *dev, struct saa7146_standard *standa
v4l2_std_id std = V4L2_STD_PAL_I;
DEB_D("VIDIOC_S_STD: setting mxb for PAL_I\n");
- /* set the 7146 gpio register -- I don't know what this does exactly */
+ /* These two gpio calls set the GPIO pins that control the tda9820 */
saa7146_write(dev, GPIO_CTRL, 0x00404050);
- /* unset the 7111 gpio register -- I don't know what this does exactly */
saa7111a_call(mxb, core, s_gpio, 0);
- tuner_call(mxb, core, s_std, std);
+ saa7111a_call(mxb, core, s_std, std);
+ if (mxb->cur_input == 0)
+ tuner_call(mxb, core, s_std, std);
} else {
v4l2_std_id std = V4L2_STD_PAL_BG;
+ if (mxb->cur_input)
+ std = standard->id;
DEB_D("VIDIOC_S_STD: setting mxb for PAL/NTSC/SECAM\n");
- /* set the 7146 gpio register -- I don't know what this does exactly */
+ /* These two gpio calls set the GPIO pins that control the tda9820 */
saa7146_write(dev, GPIO_CTRL, 0x00404050);
- /* set the 7111 gpio register -- I don't know what this does exactly */
saa7111a_call(mxb, core, s_gpio, 1);
- tuner_call(mxb, core, s_std, std);
+ saa7111a_call(mxb, core, s_std, std);
+ if (mxb->cur_input == 0)
+ tuner_call(mxb, core, s_std, std);
}
return 0;
}
@@ -836,14 +843,14 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
static struct saa7146_ext_vv vv_data = {
.inputs = MXB_INPUTS,
- .capabilities = V4L2_CAP_TUNER | V4L2_CAP_VBI_CAPTURE,
+ .capabilities = V4L2_CAP_TUNER | V4L2_CAP_VBI_CAPTURE | V4L2_CAP_AUDIO,
.stds = &standard[0],
.num_stds = sizeof(standard)/sizeof(struct saa7146_standard),
.std_callback = &std_callback,
};
static struct saa7146_extension extension = {
- .name = MXB_IDENTIFIER,
+ .name = "Multimedia eXtension Board",
.flags = SAA7146_USE_I2C_IRQ,
.pci_tbl = &pci_tbl[0],
diff --git a/drivers/media/video/mxb.h b/drivers/media/video/mxb.h
deleted file mode 100644
index 400a57ba62ec..000000000000
--- a/drivers/media/video/mxb.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef __MXB__
-#define __MXB__
-
-#define BASE_VIDIOC_MXB 10
-
-#define MXB_S_AUDIO_CD _IOW ('V', BASE_VIDIOC_PRIVATE+BASE_VIDIOC_MXB+0, int)
-#define MXB_S_AUDIO_LINE _IOW ('V', BASE_VIDIOC_PRIVATE+BASE_VIDIOC_MXB+1, int)
-
-#define MXB_IDENTIFIER "Multimedia eXtension Board"
-
-#define MXB_AUDIOS 6
-
-/* these are the available audio sources, which can switched
- to the line- and cd-output individually */
-static struct v4l2_audio mxb_audios[MXB_AUDIOS] = {
- {
- .index = 0,
- .name = "Tuner",
- .capability = V4L2_AUDCAP_STEREO,
- } , {
- .index = 1,
- .name = "AUX1",
- .capability = V4L2_AUDCAP_STEREO,
- } , {
- .index = 2,
- .name = "AUX2",
- .capability = V4L2_AUDCAP_STEREO,
- } , {
- .index = 3,
- .name = "AUX3",
- .capability = V4L2_AUDCAP_STEREO,
- } , {
- .index = 4,
- .name = "Radio (X9)",
- .capability = V4L2_AUDCAP_STEREO,
- } , {
- .index = 5,
- .name = "CD-ROM (X10)",
- .capability = V4L2_AUDCAP_STEREO,
- }
-};
-#endif
diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c
index c20f5ecd6790..c7e41145041f 100644
--- a/drivers/media/video/omap1_camera.c
+++ b/drivers/media/video/omap1_camera.c
@@ -206,15 +206,10 @@ static int omap1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
unsigned int *size)
{
struct soc_camera_device *icd = vq->priv_data;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
- if (bytes_per_line < 0)
- return bytes_per_line;
-
- *size = bytes_per_line * icd->user_height;
+ *size = icd->sizeimage;
if (!*count || *count < OMAP1_CAMERA_MIN_BUF_COUNT(pcdev->vb_mode))
*count = OMAP1_CAMERA_MIN_BUF_COUNT(pcdev->vb_mode);
@@ -256,15 +251,10 @@ static int omap1_videobuf_prepare(struct videobuf_queue *vq,
{
struct soc_camera_device *icd = vq->priv_data;
struct omap1_cam_buf *buf = container_of(vb, struct omap1_cam_buf, vb);
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
int ret;
- if (bytes_per_line < 0)
- return bytes_per_line;
-
WARN_ON(!list_empty(&vb->queue));
BUG_ON(NULL == icd->current_fmt);
@@ -281,7 +271,7 @@ static int omap1_videobuf_prepare(struct videobuf_queue *vq,
vb->state = VIDEOBUF_NEEDS_INIT;
}
- vb->size = bytes_per_line * vb->height;
+ vb->size = icd->sizeimage;
if (vb->baddr && vb->bsize < vb->size) {
ret = -EINVAL;
@@ -999,6 +989,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_VYUY8_2X8,
@@ -1008,6 +999,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_YUYV8_2X8,
@@ -1017,6 +1009,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_YVYU8_2X8,
@@ -1026,6 +1019,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
@@ -1035,6 +1029,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
@@ -1044,6 +1039,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB565_2X8_BE,
@@ -1053,6 +1049,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB565_2X8_LE,
@@ -1062,6 +1059,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
},
};
diff --git a/drivers/media/video/omap24xxcam-dma.c b/drivers/media/video/omap24xxcam-dma.c
index 3ea38a8def8e..b5ae170de4a5 100644
--- a/drivers/media/video/omap24xxcam-dma.c
+++ b/drivers/media/video/omap24xxcam-dma.c
@@ -38,7 +38,7 @@
*/
/* Ack all interrupt on CSR and IRQSTATUS_L0 */
-static void omap24xxcam_dmahw_ack_all(unsigned long base)
+static void omap24xxcam_dmahw_ack_all(void __iomem *base)
{
u32 csr;
int i;
@@ -52,7 +52,7 @@ static void omap24xxcam_dmahw_ack_all(unsigned long base)
}
/* Ack dmach on CSR and IRQSTATUS_L0 */
-static u32 omap24xxcam_dmahw_ack_ch(unsigned long base, int dmach)
+static u32 omap24xxcam_dmahw_ack_ch(void __iomem *base, int dmach)
{
u32 csr;
@@ -65,12 +65,12 @@ static u32 omap24xxcam_dmahw_ack_ch(unsigned long base, int dmach)
return csr;
}
-static int omap24xxcam_dmahw_running(unsigned long base, int dmach)
+static int omap24xxcam_dmahw_running(void __iomem *base, int dmach)
{
return omap24xxcam_reg_in(base, CAMDMA_CCR(dmach)) & CAMDMA_CCR_ENABLE;
}
-static void omap24xxcam_dmahw_transfer_setup(unsigned long base, int dmach,
+static void omap24xxcam_dmahw_transfer_setup(void __iomem *base, int dmach,
dma_addr_t start, u32 len)
{
omap24xxcam_reg_out(base, CAMDMA_CCR(dmach),
@@ -112,7 +112,7 @@ static void omap24xxcam_dmahw_transfer_setup(unsigned long base, int dmach,
| CAMDMA_CICR_DROP_IE);
}
-static void omap24xxcam_dmahw_transfer_start(unsigned long base, int dmach)
+static void omap24xxcam_dmahw_transfer_start(void __iomem *base, int dmach)
{
omap24xxcam_reg_out(base, CAMDMA_CCR(dmach),
CAMDMA_CCR_SEL_SRC_DST_SYNC
@@ -124,7 +124,7 @@ static void omap24xxcam_dmahw_transfer_start(unsigned long base, int dmach)
| CAMDMA_CCR_SYNCHRO_CAMERA);
}
-static void omap24xxcam_dmahw_transfer_chain(unsigned long base, int dmach,
+static void omap24xxcam_dmahw_transfer_chain(void __iomem *base, int dmach,
int free_dmach)
{
int prev_dmach, ch;
@@ -160,7 +160,7 @@ static void omap24xxcam_dmahw_transfer_chain(unsigned long base, int dmach,
* controller may not be idle after this routine completes, because
* the completion routines might start new transfers.
*/
-static void omap24xxcam_dmahw_abort_ch(unsigned long base, int dmach)
+static void omap24xxcam_dmahw_abort_ch(void __iomem *base, int dmach)
{
/* mask all interrupts from this channel */
omap24xxcam_reg_out(base, CAMDMA_CICR(dmach), 0);
@@ -171,7 +171,7 @@ static void omap24xxcam_dmahw_abort_ch(unsigned long base, int dmach)
omap24xxcam_reg_merge(base, CAMDMA_CCR(dmach), 0, CAMDMA_CCR_ENABLE);
}
-static void omap24xxcam_dmahw_init(unsigned long base)
+static void omap24xxcam_dmahw_init(void __iomem *base)
{
omap24xxcam_reg_out(base, CAMDMA_OCP_SYSCONFIG,
CAMDMA_OCP_SYSCONFIG_MIDLEMODE_FSTANDBY
@@ -362,7 +362,7 @@ void omap24xxcam_dma_hwinit(struct omap24xxcam_dma *dma)
}
static void omap24xxcam_dma_init(struct omap24xxcam_dma *dma,
- unsigned long base)
+ void __iomem *base)
{
int ch;
@@ -577,7 +577,7 @@ void omap24xxcam_sgdma_sync(struct omap24xxcam_sgdma *sgdma)
}
void omap24xxcam_sgdma_init(struct omap24xxcam_sgdma *sgdma,
- unsigned long base,
+ void __iomem *base,
void (*reset_callback)(unsigned long data),
unsigned long reset_callback_data)
{
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index 7d3864144368..e5015b0d5508 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -1776,8 +1776,7 @@ static int __devinit omap24xxcam_probe(struct platform_device *pdev)
cam->mmio_size = resource_size(mem);
/* map the region */
- cam->mmio_base = (unsigned long)
- ioremap_nocache(cam->mmio_base_phys, cam->mmio_size);
+ cam->mmio_base = ioremap_nocache(cam->mmio_base_phys, cam->mmio_size);
if (!cam->mmio_base) {
dev_err(cam->dev, "cannot map camera register I/O region\n");
goto err;
diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
index 2ce67f5a48d5..d59727afe894 100644
--- a/drivers/media/video/omap24xxcam.h
+++ b/drivers/media/video/omap24xxcam.h
@@ -429,7 +429,7 @@ struct sgdma_state {
struct omap24xxcam_dma {
spinlock_t lock; /* Lock for the whole structure. */
- unsigned long base; /* base address for dma controller */
+ void __iomem *base; /* base address for dma controller */
/* While dma_stop!=0, an attempt to start a new DMA transfer will
* fail.
@@ -491,7 +491,7 @@ struct omap24xxcam_device {
/*** hardware resources ***/
unsigned int irq;
- unsigned long mmio_base;
+ void __iomem *mmio_base;
unsigned long mmio_base_phys;
unsigned long mmio_size;
@@ -544,22 +544,22 @@ struct omap24xxcam_fh {
*
*/
-static inline u32 omap24xxcam_reg_in(unsigned long base, u32 offset)
+static inline u32 omap24xxcam_reg_in(u32 __iomem *base, u32 offset)
{
return readl(base + offset);
}
-static inline u32 omap24xxcam_reg_out(unsigned long base, u32 offset,
+static inline u32 omap24xxcam_reg_out(u32 __iomem *base, u32 offset,
u32 val)
{
writel(val, base + offset);
return val;
}
-static inline u32 omap24xxcam_reg_merge(unsigned long base, u32 offset,
+static inline u32 omap24xxcam_reg_merge(u32 __iomem *base, u32 offset,
u32 val, u32 mask)
{
- u32 addr = base + offset;
+ u32 __iomem *addr = base + offset;
u32 new_val = (readl(addr) & ~mask) | (val & mask);
writel(new_val, addr);
@@ -585,7 +585,7 @@ int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
int len, sgdma_callback_t callback, void *arg);
void omap24xxcam_sgdma_sync(struct omap24xxcam_sgdma *sgdma);
void omap24xxcam_sgdma_init(struct omap24xxcam_sgdma *sgdma,
- unsigned long base,
+ void __iomem *base,
void (*reset_callback)(unsigned long data),
unsigned long reset_callback_data);
void omap24xxcam_sgdma_exit(struct omap24xxcam_sgdma *sgdma);
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index 12d5f923e1d0..1c347633e663 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -329,19 +329,6 @@ void omap3isp_configure_bridge(struct isp_device *isp,
isp_reg_writel(isp, ispctrl_val, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
}
-/**
- * isp_set_pixel_clock - Configures the ISP pixel clock
- * @isp: OMAP3 ISP device
- * @pixelclk: Average pixel clock in Hz
- *
- * Set the average pixel clock required by the sensor. The ISP will use the
- * lowest possible memory bandwidth settings compatible with the clock.
- **/
-static void isp_set_pixel_clock(struct isp_device *isp, unsigned int pixelclk)
-{
- isp->isp_ccdc.vpcfg.pixelclk = pixelclk;
-}
-
void omap3isp_hist_dma_done(struct isp_device *isp)
{
if (omap3isp_ccdc_busy(&isp->isp_ccdc) ||
@@ -739,6 +726,17 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
unsigned long flags;
int ret;
+ /* If the preview engine crashed it might not respond to read/write
+ * operations on the L4 bus. This would result in a bus fault and a
+ * kernel oops. Refuse to start streaming in that case. This check must
+ * be performed before the loop below to avoid starting entities if the
+ * pipeline won't start anyway (those entities would then likely fail to
+ * stop, making the problem worse).
+ */
+ if ((pipe->entities & isp->crashed) &
+ (1U << isp->isp_prev.subdev.entity.id))
+ return -EIO;
+
spin_lock_irqsave(&pipe->lock, flags);
pipe->state &= ~(ISP_PIPELINE_IDLE_INPUT | ISP_PIPELINE_IDLE_OUTPUT);
spin_unlock_irqrestore(&pipe->lock, flags);
@@ -774,14 +772,6 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
}
}
- /* Frame number propagation. In continuous streaming mode the number
- * is incremented in the frame start ISR. In mem-to-mem mode
- * singleshot is used and frame start IRQs are not available.
- * Thus we have to increment the number here.
- */
- if (pipe->do_propagation && mode == ISP_PIPELINE_STREAM_SINGLESHOT)
- atomic_inc(&pipe->frame_number);
-
return 0;
}
@@ -879,13 +869,15 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
if (ret) {
dev_info(isp->dev, "Unable to stop %s\n", subdev->name);
+ /* If the entity failed to stopped, assume it has
+ * crashed. Mark it as such, the ISP will be reset when
+ * applications will release it.
+ */
+ isp->crashed |= 1U << subdev->entity.id;
failure = -ETIMEDOUT;
}
}
- if (failure < 0)
- isp->needs_reset = true;
-
return failure;
}
@@ -1069,6 +1061,7 @@ static int isp_reset(struct isp_device *isp)
udelay(1);
}
+ isp->crashed = 0;
return 0;
}
@@ -1495,11 +1488,13 @@ void omap3isp_put(struct isp_device *isp)
BUG_ON(isp->ref_count == 0);
if (--isp->ref_count == 0) {
isp_disable_interrupts(isp);
- isp_save_ctx(isp);
- if (isp->needs_reset) {
+ if (isp->domain)
+ isp_save_ctx(isp);
+ /* Reset the ISP if an entity has failed to stop. This is the
+ * only way to recover from such conditions.
+ */
+ if (isp->crashed)
isp_reset(isp);
- isp->needs_reset = false;
- }
isp_disable_clocks(isp);
}
mutex_unlock(&isp->isp_mutex);
@@ -1970,7 +1965,7 @@ error_csiphy:
*
* Always returns 0.
*/
-static int isp_remove(struct platform_device *pdev)
+static int __devexit isp_remove(struct platform_device *pdev)
{
struct isp_device *isp = platform_get_drvdata(pdev);
int i;
@@ -1981,6 +1976,7 @@ static int isp_remove(struct platform_device *pdev)
omap3isp_get(isp);
iommu_detach_device(isp->domain, &pdev->dev);
iommu_domain_free(isp->domain);
+ isp->domain = NULL;
omap3isp_put(isp);
free_irq(isp->irq_num, isp);
@@ -2050,7 +2046,7 @@ static int isp_map_mem_resource(struct platform_device *pdev,
* -EINVAL if couldn't install ISR,
* or clk_get return error value.
*/
-static int isp_probe(struct platform_device *pdev)
+static int __devinit isp_probe(struct platform_device *pdev)
{
struct isp_platform_data *pdata = pdev->dev.platform_data;
struct isp_device *isp;
@@ -2068,7 +2064,6 @@ static int isp_probe(struct platform_device *pdev)
isp->autoidle = autoidle;
isp->platform_cb.set_xclk = isp_set_xclk;
- isp->platform_cb.set_pixel_clock = isp_set_pixel_clock;
mutex_init(&isp->isp_mutex);
spin_lock_init(&isp->stat_lock);
@@ -2218,7 +2213,7 @@ MODULE_DEVICE_TABLE(platform, omap3isp_id_table);
static struct platform_driver omap3isp_driver = {
.probe = isp_probe,
- .remove = isp_remove,
+ .remove = __devexit_p(isp_remove),
.id_table = omap3isp_id_table,
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/media/video/omap3isp/isp.h b/drivers/media/video/omap3isp/isp.h
index d96603eb0d17..fc7af3e32efd 100644
--- a/drivers/media/video/omap3isp/isp.h
+++ b/drivers/media/video/omap3isp/isp.h
@@ -129,7 +129,6 @@ struct isp_platform_callback {
int (*csiphy_config)(struct isp_csiphy *phy,
struct isp_csiphy_dphy_cfg *dphy,
struct isp_csiphy_lanes_cfg *lanes);
- void (*set_pixel_clock)(struct isp_device *isp, unsigned int pixelclk);
};
/*
@@ -145,6 +144,7 @@ struct isp_platform_callback {
* @raw_dmamask: Raw DMA mask
* @stat_lock: Spinlock for handling statistics
* @isp_mutex: Mutex for serializing requests to ISP.
+ * @crashed: Bitmask of crashed entities (indexed by entity ID)
* @has_context: Context has been saved at least once and can be restored.
* @ref_count: Reference count for handling multiple ISP requests.
* @cam_ick: Pointer to camera interface clock structure.
@@ -184,7 +184,7 @@ struct isp_device {
/* ISP Obj */
spinlock_t stat_lock; /* common lock for statistic drivers */
struct mutex isp_mutex; /* For handling ref_count field */
- bool needs_reset;
+ u32 crashed;
int has_context;
int ref_count;
unsigned int autoidle;
@@ -237,10 +237,6 @@ void omap3isp_configure_bridge(struct isp_device *isp,
const struct isp_parallel_platform_data *pdata,
unsigned int shift);
-#define ISP_XCLK_NONE 0
-#define ISP_XCLK_A 1
-#define ISP_XCLK_B 2
-
struct isp_device *omap3isp_get(struct isp_device *isp);
void omap3isp_put(struct isp_device *isp);
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index eaabc27f0fa2..7e32331b60fb 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -38,6 +38,9 @@
#include "ispreg.h"
#include "ispccdc.h"
+#define CCDC_MIN_WIDTH 32
+#define CCDC_MIN_HEIGHT 32
+
static struct v4l2_mbus_framefmt *
__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
unsigned int pad, enum v4l2_subdev_format_whence which);
@@ -836,8 +839,8 @@ static void ccdc_config_vp(struct isp_ccdc_device *ccdc)
if (pipe->input)
div = DIV_ROUND_UP(l3_ick, pipe->max_rate);
- else if (ccdc->vpcfg.pixelclk)
- div = l3_ick / ccdc->vpcfg.pixelclk;
+ else if (pipe->external_rate)
+ div = l3_ick / pipe->external_rate;
div = clamp(div, 2U, max_div);
fmtcfg_vp |= (div - 2) << ISPCCDC_FMTCFG_VPIF_FRQ_SHIFT;
@@ -1118,6 +1121,7 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
struct isp_parallel_platform_data *pdata = NULL;
struct v4l2_subdev *sensor;
struct v4l2_mbus_framefmt *format;
+ const struct v4l2_rect *crop;
const struct isp_format_info *fmt_info;
struct v4l2_subdev_format fmt_src;
unsigned int depth_out;
@@ -1211,14 +1215,14 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VDINT);
/* CCDC_PAD_SOURCE_OF */
- format = &ccdc->formats[CCDC_PAD_SOURCE_OF];
+ crop = &ccdc->crop;
- isp_reg_writel(isp, (0 << ISPCCDC_HORZ_INFO_SPH_SHIFT) |
- ((format->width - 1) << ISPCCDC_HORZ_INFO_NPH_SHIFT),
+ isp_reg_writel(isp, (crop->left << ISPCCDC_HORZ_INFO_SPH_SHIFT) |
+ ((crop->width - 1) << ISPCCDC_HORZ_INFO_NPH_SHIFT),
OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HORZ_INFO);
- isp_reg_writel(isp, 0 << ISPCCDC_VERT_START_SLV0_SHIFT,
+ isp_reg_writel(isp, crop->top << ISPCCDC_VERT_START_SLV0_SHIFT,
OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_START);
- isp_reg_writel(isp, (format->height - 1)
+ isp_reg_writel(isp, (crop->height - 1)
<< ISPCCDC_VERT_LINES_NLV_SHIFT,
OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_LINES);
@@ -1410,6 +1414,9 @@ static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
struct video_device *vdev = ccdc->subdev.devnode;
struct v4l2_event event;
+ /* Frame number propagation */
+ atomic_inc(&pipe->frame_number);
+
memset(&event, 0, sizeof(event));
event.type = V4L2_EVENT_FRAME_SYNC;
event.u.frame_sync.frame_sequence = atomic_read(&pipe->frame_number);
@@ -1703,7 +1710,7 @@ static int ccdc_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
if (sub->id != 0)
return -EINVAL;
- return v4l2_event_subscribe(fh, sub, OMAP3ISP_CCDC_NEVENTS);
+ return v4l2_event_subscribe(fh, sub, OMAP3ISP_CCDC_NEVENTS, NULL);
}
static int ccdc_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
@@ -1790,6 +1797,16 @@ __ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
return &ccdc->formats[pad];
}
+static struct v4l2_rect *
+__ccdc_get_crop(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(fh, CCDC_PAD_SOURCE_OF);
+ else
+ return &ccdc->crop;
+}
+
/*
* ccdc_try_format - Try video format on a pad
* @ccdc: ISP CCDC device
@@ -1806,6 +1823,7 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
const struct isp_format_info *info;
unsigned int width = fmt->width;
unsigned int height = fmt->height;
+ struct v4l2_rect *crop;
unsigned int i;
switch (pad) {
@@ -1831,14 +1849,10 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, which);
memcpy(fmt, format, sizeof(*fmt));
- /* The data formatter truncates the number of horizontal output
- * pixels to a multiple of 16. To avoid clipping data, allow
- * callers to request an output size bigger than the input size
- * up to the nearest multiple of 16.
- */
- fmt->width = clamp_t(u32, width, 32, fmt->width + 15);
- fmt->width &= ~15;
- fmt->height = clamp_t(u32, height, 32, fmt->height);
+ /* Hardcode the output size to the crop rectangle size. */
+ crop = __ccdc_get_crop(ccdc, fh, which);
+ fmt->width = crop->width;
+ fmt->height = crop->height;
break;
case CCDC_PAD_SOURCE_VP:
@@ -1866,6 +1880,49 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
}
/*
+ * ccdc_try_crop - Validate a crop rectangle
+ * @ccdc: ISP CCDC device
+ * @sink: format on the sink pad
+ * @crop: crop rectangle to be validated
+ */
+static void ccdc_try_crop(struct isp_ccdc_device *ccdc,
+ const struct v4l2_mbus_framefmt *sink,
+ struct v4l2_rect *crop)
+{
+ const struct isp_format_info *info;
+ unsigned int max_width;
+
+ /* For Bayer formats, restrict left/top and width/height to even values
+ * to keep the Bayer pattern.
+ */
+ info = omap3isp_video_format_info(sink->code);
+ if (info->flavor != V4L2_MBUS_FMT_Y8_1X8) {
+ crop->left &= ~1;
+ crop->top &= ~1;
+ }
+
+ crop->left = clamp_t(u32, crop->left, 0, sink->width - CCDC_MIN_WIDTH);
+ crop->top = clamp_t(u32, crop->top, 0, sink->height - CCDC_MIN_HEIGHT);
+
+ /* The data formatter truncates the number of horizontal output pixels
+ * to a multiple of 16. To avoid clipping data, allow callers to request
+ * an output size bigger than the input size up to the nearest multiple
+ * of 16.
+ */
+ max_width = (sink->width - crop->left + 15) & ~15;
+ crop->width = clamp_t(u32, crop->width, CCDC_MIN_WIDTH, max_width)
+ & ~15;
+ crop->height = clamp_t(u32, crop->height, CCDC_MIN_HEIGHT,
+ sink->height - crop->top);
+
+ /* Odd width/height values don't make sense for Bayer formats. */
+ if (info->flavor != V4L2_MBUS_FMT_Y8_1X8) {
+ crop->width &= ~1;
+ crop->height &= ~1;
+ }
+}
+
+/*
* ccdc_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
* @fh : V4L2 subdev file handle
@@ -1937,6 +1994,93 @@ static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
}
/*
+ * ccdc_get_selection - Retrieve a selection rectangle on a pad
+ * @sd: ISP CCDC V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangles are the crop rectangles on the output formatter
+ * source pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (sel->pad != CCDC_PAD_SOURCE_OF)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = INT_MAX;
+ sel->r.height = INT_MAX;
+
+ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, sel->which);
+ ccdc_try_crop(ccdc, format, &sel->r);
+ break;
+
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ sel->r = *__ccdc_get_crop(ccdc, fh, sel->which);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * ccdc_set_selection - Set a selection rectangle on a pad
+ * @sd: ISP CCDC V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangle is the actual crop rectangle on the output
+ * formatter source pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (sel->target != V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL ||
+ sel->pad != CCDC_PAD_SOURCE_OF)
+ return -EINVAL;
+
+ /* The crop rectangle can't be changed while streaming. */
+ if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED)
+ return -EBUSY;
+
+ /* Modifying the crop rectangle always changes the format on the source
+ * pad. If the KEEP_CONFIG flag is set, just return the current crop
+ * rectangle.
+ */
+ if (sel->flags & V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG) {
+ sel->r = *__ccdc_get_crop(ccdc, fh, sel->which);
+ return 0;
+ }
+
+ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, sel->which);
+ ccdc_try_crop(ccdc, format, &sel->r);
+ *__ccdc_get_crop(ccdc, fh, sel->which) = sel->r;
+
+ /* Update the source format. */
+ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_OF, sel->which);
+ ccdc_try_format(ccdc, fh, CCDC_PAD_SOURCE_OF, format, sel->which);
+
+ return 0;
+}
+
+/*
* ccdc_get_format - Retrieve the video format on a pad
* @sd : ISP CCDC V4L2 subdevice
* @fh : V4L2 subdev file handle
@@ -1973,6 +2117,7 @@ static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
format = __ccdc_get_format(ccdc, fh, fmt->pad, fmt->which);
if (format == NULL)
@@ -1983,6 +2128,16 @@ static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/* Propagate the format from sink to source */
if (fmt->pad == CCDC_PAD_SINK) {
+ /* Reset the crop rectangle. */
+ crop = __ccdc_get_crop(ccdc, fh, fmt->which);
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+
+ ccdc_try_crop(ccdc, &fmt->format, crop);
+
+ /* Update the source formats. */
format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_OF,
fmt->which);
*format = fmt->format;
@@ -2000,6 +2155,69 @@ static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
}
/*
+ * Decide whether desired output pixel code can be obtained with
+ * the lane shifter by shifting the input pixel code.
+ * @in: input pixelcode to shifter
+ * @out: output pixelcode from shifter
+ * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0]
+ *
+ * return true if the combination is possible
+ * return false otherwise
+ */
+static bool ccdc_is_shiftable(enum v4l2_mbus_pixelcode in,
+ enum v4l2_mbus_pixelcode out,
+ unsigned int additional_shift)
+{
+ const struct isp_format_info *in_info, *out_info;
+
+ if (in == out)
+ return true;
+
+ in_info = omap3isp_video_format_info(in);
+ out_info = omap3isp_video_format_info(out);
+
+ if ((in_info->flavor == 0) || (out_info->flavor == 0))
+ return false;
+
+ if (in_info->flavor != out_info->flavor)
+ return false;
+
+ return in_info->bpp - out_info->bpp + additional_shift <= 6;
+}
+
+static int ccdc_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ unsigned long parallel_shift;
+
+ /* Check if the two ends match */
+ if (source_fmt->format.width != sink_fmt->format.width ||
+ source_fmt->format.height != sink_fmt->format.height)
+ return -EPIPE;
+
+ /* We've got a parallel sensor here. */
+ if (ccdc->input == CCDC_INPUT_PARALLEL) {
+ struct isp_parallel_platform_data *pdata =
+ &((struct isp_v4l2_subdevs_group *)
+ media_entity_to_v4l2_subdev(link->source->entity)
+ ->host_priv)->bus.parallel;
+ parallel_shift = pdata->data_lane_shift * 2;
+ } else {
+ parallel_shift = 0;
+ }
+
+ /* Lane shifter may be used to drop bits on CCDC sink pad */
+ if (!ccdc_is_shiftable(source_fmt->format.code,
+ sink_fmt->format.code, parallel_shift))
+ return -EPIPE;
+
+ return 0;
+}
+
+/*
* ccdc_init_formats - Initialize formats on all pads
* @sd: ISP CCDC V4L2 subdevice
* @fh: V4L2 subdev file handle
@@ -2041,6 +2259,9 @@ static const struct v4l2_subdev_pad_ops ccdc_v4l2_pad_ops = {
.enum_frame_size = ccdc_enum_frame_size,
.get_fmt = ccdc_get_format,
.set_fmt = ccdc_set_format,
+ .get_selection = ccdc_get_selection,
+ .set_selection = ccdc_set_selection,
+ .link_validate = ccdc_link_validate,
};
/* V4L2 subdev operations */
@@ -2150,6 +2371,7 @@ static int ccdc_link_setup(struct media_entity *entity,
/* media operations */
static const struct media_entity_operations ccdc_media_ops = {
.link_setup = ccdc_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
};
void omap3isp_ccdc_unregister_entities(struct isp_ccdc_device *ccdc)
@@ -2276,8 +2498,6 @@ int omap3isp_ccdc_init(struct isp_device *isp)
ccdc->clamp.oblen = 0;
ccdc->clamp.dcsubval = 0;
- ccdc->vpcfg.pixelclk = 0;
-
ccdc->update = OMAP3ISP_CCDC_BLCLAMP;
ccdc_apply_controls(ccdc);
diff --git a/drivers/media/video/omap3isp/ispccdc.h b/drivers/media/video/omap3isp/ispccdc.h
index 6d0264bab75b..890f6b3a68fd 100644
--- a/drivers/media/video/omap3isp/ispccdc.h
+++ b/drivers/media/video/omap3isp/ispccdc.h
@@ -80,14 +80,6 @@ struct ispccdc_syncif {
u8 bt_r656_en;
};
-/*
- * struct ispccdc_vp - Structure for Video Port parameters
- * @pixelclk: Input pixel clock in Hz
- */
-struct ispccdc_vp {
- unsigned int pixelclk;
-};
-
enum ispccdc_lsc_state {
LSC_STATE_STOPPED = 0,
LSC_STATE_STOPPING = 1,
@@ -147,6 +139,7 @@ struct ispccdc_lsc {
* @subdev: V4L2 subdevice
* @pads: Sink and source media entity pads
* @formats: Active video formats
+ * @crop: Active crop rectangle on the OF source pad
* @input: Active input
* @output: Active outputs
* @video_out: Output video node
@@ -161,7 +154,6 @@ struct ispccdc_lsc {
* @update: Bitmask of controls to update during the next interrupt
* @shadow_update: Controls update in progress by userspace
* @syncif: Interface synchronization configuration
- * @vpcfg: Video port configuration
* @underrun: A buffer underrun occurred and a new buffer has been queued
* @state: Streaming state
* @lock: Serializes shadow_update with interrupt handler
@@ -173,6 +165,7 @@ struct isp_ccdc_device {
struct v4l2_subdev subdev;
struct media_pad pads[CCDC_PADS_NUM];
struct v4l2_mbus_framefmt formats[CCDC_PADS_NUM];
+ struct v4l2_rect crop;
enum ccdc_input_entity input;
unsigned int output;
@@ -190,7 +183,6 @@ struct isp_ccdc_device {
unsigned int shadow_update;
struct ispccdc_syncif syncif;
- struct ispccdc_vp vpcfg;
unsigned int underrun:1;
enum isp_pipeline_stream_state state;
diff --git a/drivers/media/video/omap3isp/ispccp2.c b/drivers/media/video/omap3isp/ispccp2.c
index 70ddbf35b223..85f0de85f37c 100644
--- a/drivers/media/video/omap3isp/ispccp2.c
+++ b/drivers/media/video/omap3isp/ispccp2.c
@@ -161,7 +161,6 @@ static void ccp2_pwr_cfg(struct isp_ccp2_device *ccp2)
static void ccp2_if_enable(struct isp_ccp2_device *ccp2, u8 enable)
{
struct isp_device *isp = to_isp_device(ccp2);
- struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
int i;
if (enable && ccp2->vdds_csib)
@@ -178,19 +177,6 @@ static void ccp2_if_enable(struct isp_ccp2_device *ccp2, u8 enable)
ISPCCP2_CTRL_MODE | ISPCCP2_CTRL_IF_EN,
enable ? (ISPCCP2_CTRL_MODE | ISPCCP2_CTRL_IF_EN) : 0);
- /* For frame count propagation */
- if (pipe->do_propagation) {
- /* We may want the Frame Start IRQ from LC0 */
- if (enable)
- isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2,
- ISPCCP2_LC01_IRQENABLE,
- ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ);
- else
- isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCP2,
- ISPCCP2_LC01_IRQENABLE,
- ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ);
- }
-
if (!enable && ccp2->vdds_csib)
regulator_disable(ccp2->vdds_csib);
}
@@ -350,7 +336,6 @@ static void ccp2_lcx_config(struct isp_ccp2_device *ccp2,
ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ |
ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ |
ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ |
- ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ |
ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ |
ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ;
@@ -613,14 +598,6 @@ void omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2)
if (omap3isp_module_sync_is_stopping(&ccp2->wait, &ccp2->stopping))
return;
- /* Frame number propagation */
- if (lcx_irqstatus & ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ) {
- struct isp_pipeline *pipe =
- to_isp_pipeline(&ccp2->subdev.entity);
- if (pipe->do_propagation)
- atomic_inc(&pipe->frame_number);
- }
-
/* Handle queued buffers on frame end interrupts */
if (lcm_irqstatus & ISPCCP2_LCM_IRQSTATUS_EOF_IRQ)
ccp2_isr_buffer(ccp2);
@@ -1021,6 +998,7 @@ static int ccp2_link_setup(struct media_entity *entity,
/* media operations */
static const struct media_entity_operations ccp2_media_ops = {
.link_setup = ccp2_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
};
/*
diff --git a/drivers/media/video/omap3isp/ispcsi2.c b/drivers/media/video/omap3isp/ispcsi2.c
index fcb5168996a7..a1724362b6d5 100644
--- a/drivers/media/video/omap3isp/ispcsi2.c
+++ b/drivers/media/video/omap3isp/ispcsi2.c
@@ -378,21 +378,17 @@ static void csi2_timing_config(struct isp_device *isp,
static void csi2_irq_ctx_set(struct isp_device *isp,
struct isp_csi2_device *csi2, int enable)
{
- u32 reg = ISPCSI2_CTX_IRQSTATUS_FE_IRQ;
int i;
- if (csi2->use_fs_irq)
- reg |= ISPCSI2_CTX_IRQSTATUS_FS_IRQ;
-
for (i = 0; i < 8; i++) {
- isp_reg_writel(isp, reg, csi2->regs1,
+ isp_reg_writel(isp, ISPCSI2_CTX_IRQSTATUS_FE_IRQ, csi2->regs1,
ISPCSI2_CTX_IRQSTATUS(i));
if (enable)
isp_reg_set(isp, csi2->regs1, ISPCSI2_CTX_IRQENABLE(i),
- reg);
+ ISPCSI2_CTX_IRQSTATUS_FE_IRQ);
else
isp_reg_clr(isp, csi2->regs1, ISPCSI2_CTX_IRQENABLE(i),
- reg);
+ ISPCSI2_CTX_IRQSTATUS_FE_IRQ);
}
}
@@ -690,14 +686,6 @@ static void csi2_isr_ctx(struct isp_csi2_device *csi2,
status = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(n));
isp_reg_writel(isp, status, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(n));
- /* Propagate frame number */
- if (status & ISPCSI2_CTX_IRQSTATUS_FS_IRQ) {
- struct isp_pipeline *pipe =
- to_isp_pipeline(&csi2->subdev.entity);
- if (pipe->do_propagation)
- atomic_inc(&pipe->frame_number);
- }
-
if (!(status & ISPCSI2_CTX_IRQSTATUS_FE_IRQ))
return;
@@ -1047,14 +1035,12 @@ static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
{
struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct isp_device *isp = csi2->isp;
- struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity);
struct isp_video *video_out = &csi2->video_out;
switch (enable) {
case ISP_PIPELINE_STREAM_CONTINUOUS:
if (omap3isp_csiphy_acquire(csi2->phy) < 0)
return -ENODEV;
- csi2->use_fs_irq = pipe->do_propagation;
if (csi2->output & CSI2_OUTPUT_MEMORY)
omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CSI2A_WRITE);
csi2_configure(csi2);
@@ -1181,6 +1167,7 @@ static int csi2_link_setup(struct media_entity *entity,
/* media operations */
static const struct media_entity_operations csi2_media_ops = {
.link_setup = csi2_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
};
void omap3isp_csi2_unregister_entities(struct isp_csi2_device *csi2)
diff --git a/drivers/media/video/omap3isp/ispcsi2.h b/drivers/media/video/omap3isp/ispcsi2.h
index 885ad79a7678..c57729b7e86e 100644
--- a/drivers/media/video/omap3isp/ispcsi2.h
+++ b/drivers/media/video/omap3isp/ispcsi2.h
@@ -145,7 +145,6 @@ struct isp_csi2_device {
u32 output; /* output to CCDC, memory or both? */
bool dpcm_decompress;
unsigned int frame_skip;
- bool use_fs_irq;
struct isp_csiphy *phy;
struct isp_csi2_ctx_cfg contexts[ISP_CSI2_MAX_CTX_NUM + 1];
diff --git a/drivers/media/video/omap3isp/ispcsiphy.c b/drivers/media/video/omap3isp/ispcsiphy.c
index 5be37ce7d0c2..348f67ebbbc9 100644
--- a/drivers/media/video/omap3isp/ispcsiphy.c
+++ b/drivers/media/video/omap3isp/ispcsiphy.c
@@ -186,7 +186,9 @@ int omap3isp_csiphy_acquire(struct isp_csiphy *phy)
if (rval < 0)
goto done;
- omap3isp_csi2_reset(phy->csi2);
+ rval = omap3isp_csi2_reset(phy->csi2);
+ if (rval < 0)
+ goto done;
csiphy_dphy_config(phy);
csiphy_lanes_config(phy);
diff --git a/drivers/media/video/omap3isp/ispcsiphy.h b/drivers/media/video/omap3isp/ispcsiphy.h
index 9596dc6830a6..e93a661e65d9 100644
--- a/drivers/media/video/omap3isp/ispcsiphy.h
+++ b/drivers/media/video/omap3isp/ispcsiphy.h
@@ -27,22 +27,11 @@
#ifndef OMAP3_ISP_CSI_PHY_H
#define OMAP3_ISP_CSI_PHY_H
+#include <media/omap3isp.h>
+
struct isp_csi2_device;
struct regulator;
-struct csiphy_lane {
- u8 pos;
- u8 pol;
-};
-
-#define ISP_CSIPHY2_NUM_DATA_LANES 2
-#define ISP_CSIPHY1_NUM_DATA_LANES 1
-
-struct isp_csiphy_lanes_cfg {
- struct csiphy_lane data[ISP_CSIPHY2_NUM_DATA_LANES];
- struct csiphy_lane clk;
-};
-
struct isp_csiphy_dphy_cfg {
u8 ths_term;
u8 ths_settle;
diff --git a/drivers/media/video/omap3isp/isppreview.c b/drivers/media/video/omap3isp/isppreview.c
index 6d0fb2c8c26d..8a4935ecc655 100644
--- a/drivers/media/video/omap3isp/isppreview.c
+++ b/drivers/media/video/omap3isp/isppreview.c
@@ -441,23 +441,6 @@ preview_enable_dcor(struct isp_prev_device *prev, u8 enable)
}
/*
- * preview_enable_cfa - Enable/Disable the CFA Interpolation.
- * @enable: 1 - Enables the CFA.
- */
-static void
-preview_enable_cfa(struct isp_prev_device *prev, u8 enable)
-{
- struct isp_device *isp = to_isp_device(prev);
-
- if (enable)
- isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
- ISPPRV_PCR_CFAEN);
- else
- isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
- ISPPRV_PCR_CFAEN);
-}
-
-/*
* preview_enable_gammabypass - Enables/Disables the GammaByPass
* @enable: 1 - Bypasses Gamma - 10bit input is cropped to 8MSB.
* 0 - Goes through Gamma Correction. input and output is 10bit.
@@ -608,12 +591,12 @@ preview_config_rgb_blending(struct isp_prev_device *prev, const void *rgb2rgb)
}
/*
- * Configures the RGB-YCbYCr conversion matrix
+ * Configures the color space conversion (RGB toYCbYCr) matrix
* @prev_csc: Structure containing the RGB to YCbYCr matrix and the
* YCbCr offset.
*/
static void
-preview_config_rgb_to_ycbcr(struct isp_prev_device *prev, const void *prev_csc)
+preview_config_csc(struct isp_prev_device *prev, const void *prev_csc)
{
struct isp_device *isp = to_isp_device(prev);
const struct omap3isp_prev_csc *csc = prev_csc;
@@ -649,12 +632,18 @@ preview_config_rgb_to_ycbcr(struct isp_prev_device *prev, const void *prev_csc)
static void
preview_update_contrast(struct isp_prev_device *prev, u8 contrast)
{
- struct prev_params *params = &prev->params;
+ struct prev_params *params;
+ unsigned long flags;
+
+ spin_lock_irqsave(&prev->params.lock, flags);
+ params = (prev->params.active & OMAP3ISP_PREV_CONTRAST)
+ ? &prev->params.params[0] : &prev->params.params[1];
if (params->contrast != (contrast * ISPPRV_CONTRAST_UNITS)) {
params->contrast = contrast * ISPPRV_CONTRAST_UNITS;
- prev->update |= PREV_CONTRAST;
+ params->update |= OMAP3ISP_PREV_CONTRAST;
}
+ spin_unlock_irqrestore(&prev->params.lock, flags);
}
/*
@@ -681,12 +670,18 @@ preview_config_contrast(struct isp_prev_device *prev, const void *params)
static void
preview_update_brightness(struct isp_prev_device *prev, u8 brightness)
{
- struct prev_params *params = &prev->params;
+ struct prev_params *params;
+ unsigned long flags;
+
+ spin_lock_irqsave(&prev->params.lock, flags);
+ params = (prev->params.active & OMAP3ISP_PREV_BRIGHTNESS)
+ ? &prev->params.params[0] : &prev->params.params[1];
if (params->brightness != (brightness * ISPPRV_BRIGHT_UNITS)) {
params->brightness = brightness * ISPPRV_BRIGHT_UNITS;
- prev->update |= PREV_BRIGHTNESS;
+ params->update |= OMAP3ISP_PREV_BRIGHTNESS;
}
+ spin_unlock_irqrestore(&prev->params.lock, flags);
}
/*
@@ -721,159 +716,188 @@ preview_config_yc_range(struct isp_prev_device *prev, const void *yclimit)
OMAP3_ISP_IOMEM_PREV, ISPPRV_SETUP_YC);
}
+static u32
+preview_params_lock(struct isp_prev_device *prev, u32 update, bool shadow)
+{
+ u32 active = prev->params.active;
+
+ if (shadow) {
+ /* Mark all shadow parameters we are going to touch as busy. */
+ prev->params.params[0].busy |= ~active & update;
+ prev->params.params[1].busy |= active & update;
+ } else {
+ /* Mark all active parameters we are going to touch as busy. */
+ update = (prev->params.params[0].update & active)
+ | (prev->params.params[1].update & ~active);
+
+ prev->params.params[0].busy |= active & update;
+ prev->params.params[1].busy |= ~active & update;
+ }
+
+ return update;
+}
+
+static void
+preview_params_unlock(struct isp_prev_device *prev, u32 update, bool shadow)
+{
+ u32 active = prev->params.active;
+
+ if (shadow) {
+ /* Set the update flag for shadow parameters that have been
+ * updated and clear the busy flag for all shadow parameters.
+ */
+ prev->params.params[0].update |= (~active & update);
+ prev->params.params[1].update |= (active & update);
+ prev->params.params[0].busy &= active;
+ prev->params.params[1].busy &= ~active;
+ } else {
+ /* Clear the update flag for active parameters that have been
+ * applied and the busy flag for all active parameters.
+ */
+ prev->params.params[0].update &= ~(active & update);
+ prev->params.params[1].update &= ~(~active & update);
+ prev->params.params[0].busy &= ~active;
+ prev->params.params[1].busy &= active;
+ }
+}
+
+static void preview_params_switch(struct isp_prev_device *prev)
+{
+ u32 to_switch;
+
+ /* Switch active parameters with updated shadow parameters when the
+ * shadow parameter has been updated and neither the active not the
+ * shadow parameter is busy.
+ */
+ to_switch = (prev->params.params[0].update & ~prev->params.active)
+ | (prev->params.params[1].update & prev->params.active);
+ to_switch &= ~(prev->params.params[0].busy |
+ prev->params.params[1].busy);
+ if (to_switch == 0)
+ return;
+
+ prev->params.active ^= to_switch;
+
+ /* Remove the update flag for the shadow copy of parameters we have
+ * switched.
+ */
+ prev->params.params[0].update &= ~(~prev->params.active & to_switch);
+ prev->params.params[1].update &= ~(prev->params.active & to_switch);
+}
+
/* preview parameters update structure */
struct preview_update {
- int cfg_bit;
- int feature_bit;
void (*config)(struct isp_prev_device *, const void *);
void (*enable)(struct isp_prev_device *, u8);
+ unsigned int param_offset;
+ unsigned int param_size;
+ unsigned int config_offset;
+ bool skip;
};
-static struct preview_update update_attrs[] = {
- {OMAP3ISP_PREV_LUMAENH, PREV_LUMA_ENHANCE,
+/* Keep the array indexed by the OMAP3ISP_PREV_* bit number. */
+static const struct preview_update update_attrs[] = {
+ /* OMAP3ISP_PREV_LUMAENH */ {
preview_config_luma_enhancement,
- preview_enable_luma_enhancement},
- {OMAP3ISP_PREV_INVALAW, PREV_INVERSE_ALAW,
+ preview_enable_luma_enhancement,
+ offsetof(struct prev_params, luma),
+ FIELD_SIZEOF(struct prev_params, luma),
+ offsetof(struct omap3isp_prev_update_config, luma),
+ }, /* OMAP3ISP_PREV_INVALAW */ {
NULL,
- preview_enable_invalaw},
- {OMAP3ISP_PREV_HRZ_MED, PREV_HORZ_MEDIAN_FILTER,
+ preview_enable_invalaw,
+ }, /* OMAP3ISP_PREV_HRZ_MED */ {
preview_config_hmed,
- preview_enable_hmed},
- {OMAP3ISP_PREV_CFA, PREV_CFA,
+ preview_enable_hmed,
+ offsetof(struct prev_params, hmed),
+ FIELD_SIZEOF(struct prev_params, hmed),
+ offsetof(struct omap3isp_prev_update_config, hmed),
+ }, /* OMAP3ISP_PREV_CFA */ {
preview_config_cfa,
- preview_enable_cfa},
- {OMAP3ISP_PREV_CHROMA_SUPP, PREV_CHROMA_SUPPRESS,
+ NULL,
+ offsetof(struct prev_params, cfa),
+ FIELD_SIZEOF(struct prev_params, cfa),
+ offsetof(struct omap3isp_prev_update_config, cfa),
+ }, /* OMAP3ISP_PREV_CHROMA_SUPP */ {
preview_config_chroma_suppression,
- preview_enable_chroma_suppression},
- {OMAP3ISP_PREV_WB, PREV_WB,
+ preview_enable_chroma_suppression,
+ offsetof(struct prev_params, csup),
+ FIELD_SIZEOF(struct prev_params, csup),
+ offsetof(struct omap3isp_prev_update_config, csup),
+ }, /* OMAP3ISP_PREV_WB */ {
preview_config_whitebalance,
- NULL},
- {OMAP3ISP_PREV_BLKADJ, PREV_BLKADJ,
+ NULL,
+ offsetof(struct prev_params, wbal),
+ FIELD_SIZEOF(struct prev_params, wbal),
+ offsetof(struct omap3isp_prev_update_config, wbal),
+ }, /* OMAP3ISP_PREV_BLKADJ */ {
preview_config_blkadj,
- NULL},
- {OMAP3ISP_PREV_RGB2RGB, PREV_RGB2RGB,
+ NULL,
+ offsetof(struct prev_params, blkadj),
+ FIELD_SIZEOF(struct prev_params, blkadj),
+ offsetof(struct omap3isp_prev_update_config, blkadj),
+ }, /* OMAP3ISP_PREV_RGB2RGB */ {
preview_config_rgb_blending,
- NULL},
- {OMAP3ISP_PREV_COLOR_CONV, PREV_COLOR_CONV,
- preview_config_rgb_to_ycbcr,
- NULL},
- {OMAP3ISP_PREV_YC_LIMIT, PREV_YCLIMITS,
+ NULL,
+ offsetof(struct prev_params, rgb2rgb),
+ FIELD_SIZEOF(struct prev_params, rgb2rgb),
+ offsetof(struct omap3isp_prev_update_config, rgb2rgb),
+ }, /* OMAP3ISP_PREV_COLOR_CONV */ {
+ preview_config_csc,
+ NULL,
+ offsetof(struct prev_params, csc),
+ FIELD_SIZEOF(struct prev_params, csc),
+ offsetof(struct omap3isp_prev_update_config, csc),
+ }, /* OMAP3ISP_PREV_YC_LIMIT */ {
preview_config_yc_range,
- NULL},
- {OMAP3ISP_PREV_DEFECT_COR, PREV_DEFECT_COR,
+ NULL,
+ offsetof(struct prev_params, yclimit),
+ FIELD_SIZEOF(struct prev_params, yclimit),
+ offsetof(struct omap3isp_prev_update_config, yclimit),
+ }, /* OMAP3ISP_PREV_DEFECT_COR */ {
preview_config_dcor,
- preview_enable_dcor},
- {OMAP3ISP_PREV_GAMMABYPASS, PREV_GAMMA_BYPASS,
+ preview_enable_dcor,
+ offsetof(struct prev_params, dcor),
+ FIELD_SIZEOF(struct prev_params, dcor),
+ offsetof(struct omap3isp_prev_update_config, dcor),
+ }, /* OMAP3ISP_PREV_GAMMABYPASS */ {
NULL,
- preview_enable_gammabypass},
- {OMAP3ISP_PREV_DRK_FRM_CAPTURE, PREV_DARK_FRAME_CAPTURE,
+ preview_enable_gammabypass,
+ }, /* OMAP3ISP_PREV_DRK_FRM_CAPTURE */ {
NULL,
- preview_enable_drkframe_capture},
- {OMAP3ISP_PREV_DRK_FRM_SUBTRACT, PREV_DARK_FRAME_SUBTRACT,
+ preview_enable_drkframe_capture,
+ }, /* OMAP3ISP_PREV_DRK_FRM_SUBTRACT */ {
NULL,
- preview_enable_drkframe},
- {OMAP3ISP_PREV_LENS_SHADING, PREV_LENS_SHADING,
+ preview_enable_drkframe,
+ }, /* OMAP3ISP_PREV_LENS_SHADING */ {
preview_config_drkf_shadcomp,
- preview_enable_drkframe},
- {OMAP3ISP_PREV_NF, PREV_NOISE_FILTER,
+ preview_enable_drkframe,
+ }, /* OMAP3ISP_PREV_NF */ {
preview_config_noisefilter,
- preview_enable_noisefilter},
- {OMAP3ISP_PREV_GAMMA, PREV_GAMMA,
+ preview_enable_noisefilter,
+ offsetof(struct prev_params, nf),
+ FIELD_SIZEOF(struct prev_params, nf),
+ offsetof(struct omap3isp_prev_update_config, nf),
+ }, /* OMAP3ISP_PREV_GAMMA */ {
preview_config_gammacorrn,
- NULL},
- {-1, PREV_CONTRAST,
+ NULL,
+ offsetof(struct prev_params, gamma),
+ FIELD_SIZEOF(struct prev_params, gamma),
+ offsetof(struct omap3isp_prev_update_config, gamma),
+ }, /* OMAP3ISP_PREV_CONTRAST */ {
preview_config_contrast,
- NULL},
- {-1, PREV_BRIGHTNESS,
+ NULL,
+ offsetof(struct prev_params, contrast),
+ 0, true,
+ }, /* OMAP3ISP_PREV_BRIGHTNESS */ {
preview_config_brightness,
- NULL},
+ NULL,
+ offsetof(struct prev_params, brightness),
+ 0, true,
+ },
};
/*
- * __preview_get_ptrs - helper function which return pointers to members
- * of params and config structures.
- * @params - pointer to preview_params structure.
- * @param - return pointer to appropriate structure field.
- * @configs - pointer to update config structure.
- * @config - return pointer to appropriate structure field.
- * @bit - for which feature to return pointers.
- * Return size of corresponding prev_params member
- */
-static u32
-__preview_get_ptrs(struct prev_params *params, void **param,
- struct omap3isp_prev_update_config *configs,
- void __user **config, u32 bit)
-{
-#define CHKARG(cfgs, cfg, field) \
- if (cfgs && cfg) { \
- *(cfg) = (cfgs)->field; \
- }
-
- switch (bit) {
- case PREV_HORZ_MEDIAN_FILTER:
- *param = &params->hmed;
- CHKARG(configs, config, hmed)
- return sizeof(params->hmed);
- case PREV_NOISE_FILTER:
- *param = &params->nf;
- CHKARG(configs, config, nf)
- return sizeof(params->nf);
- break;
- case PREV_CFA:
- *param = &params->cfa;
- CHKARG(configs, config, cfa)
- return sizeof(params->cfa);
- case PREV_LUMA_ENHANCE:
- *param = &params->luma;
- CHKARG(configs, config, luma)
- return sizeof(params->luma);
- case PREV_CHROMA_SUPPRESS:
- *param = &params->csup;
- CHKARG(configs, config, csup)
- return sizeof(params->csup);
- case PREV_DEFECT_COR:
- *param = &params->dcor;
- CHKARG(configs, config, dcor)
- return sizeof(params->dcor);
- case PREV_BLKADJ:
- *param = &params->blk_adj;
- CHKARG(configs, config, blkadj)
- return sizeof(params->blk_adj);
- case PREV_YCLIMITS:
- *param = &params->yclimit;
- CHKARG(configs, config, yclimit)
- return sizeof(params->yclimit);
- case PREV_RGB2RGB:
- *param = &params->rgb2rgb;
- CHKARG(configs, config, rgb2rgb)
- return sizeof(params->rgb2rgb);
- case PREV_COLOR_CONV:
- *param = &params->rgb2ycbcr;
- CHKARG(configs, config, csc)
- return sizeof(params->rgb2ycbcr);
- case PREV_WB:
- *param = &params->wbal;
- CHKARG(configs, config, wbal)
- return sizeof(params->wbal);
- case PREV_GAMMA:
- *param = &params->gamma;
- CHKARG(configs, config, gamma)
- return sizeof(params->gamma);
- case PREV_CONTRAST:
- *param = &params->contrast;
- return 0;
- case PREV_BRIGHTNESS:
- *param = &params->brightness;
- return 0;
- default:
- *param = NULL;
- *config = NULL;
- break;
- }
- return 0;
-}
-
-/*
* preview_config - Copy and update local structure with userspace preview
* configuration.
* @prev: ISP preview engine
@@ -885,84 +909,103 @@ __preview_get_ptrs(struct prev_params *params, void **param,
static int preview_config(struct isp_prev_device *prev,
struct omap3isp_prev_update_config *cfg)
{
- struct prev_params *params;
- struct preview_update *attr;
- int i, bit, rval = 0;
+ unsigned long flags;
+ unsigned int i;
+ int rval = 0;
+ u32 update;
+ u32 active;
- params = &prev->params;
+ if (cfg->update == 0)
+ return 0;
- if (prev->state != ISP_PIPELINE_STREAM_STOPPED) {
- unsigned long flags;
+ /* Mark the shadow parameters we're going to update as busy. */
+ spin_lock_irqsave(&prev->params.lock, flags);
+ preview_params_lock(prev, cfg->update, true);
+ active = prev->params.active;
+ spin_unlock_irqrestore(&prev->params.lock, flags);
- spin_lock_irqsave(&prev->lock, flags);
- prev->shadow_update = 1;
- spin_unlock_irqrestore(&prev->lock, flags);
- }
+ update = 0;
for (i = 0; i < ARRAY_SIZE(update_attrs); i++) {
- attr = &update_attrs[i];
- bit = 0;
+ const struct preview_update *attr = &update_attrs[i];
+ struct prev_params *params;
+ unsigned int bit = 1 << i;
- if (!(cfg->update & attr->cfg_bit))
+ if (attr->skip || !(cfg->update & bit))
continue;
- bit = cfg->flag & attr->cfg_bit;
- if (bit) {
- void *to = NULL, __user *from = NULL;
- unsigned long sz = 0;
+ params = &prev->params.params[!!(active & bit)];
+
+ if (cfg->flag & bit) {
+ void __user *from = *(void * __user *)
+ ((void *)cfg + attr->config_offset);
+ void *to = (void *)params + attr->param_offset;
+ size_t size = attr->param_size;
- sz = __preview_get_ptrs(params, &to, cfg, &from,
- bit);
- if (to && from && sz) {
- if (copy_from_user(to, from, sz)) {
+ if (to && from && size) {
+ if (copy_from_user(to, from, size)) {
rval = -EFAULT;
break;
}
}
- params->features |= attr->feature_bit;
+ params->features |= bit;
} else {
- params->features &= ~attr->feature_bit;
+ params->features &= ~bit;
}
- prev->update |= attr->feature_bit;
+ update |= bit;
}
- prev->shadow_update = 0;
+ spin_lock_irqsave(&prev->params.lock, flags);
+ preview_params_unlock(prev, update, true);
+ preview_params_switch(prev);
+ spin_unlock_irqrestore(&prev->params.lock, flags);
+
return rval;
}
/*
* preview_setup_hw - Setup preview registers and/or internal memory
* @prev: pointer to preview private structure
+ * @update: Bitmask of parameters to setup
+ * @active: Bitmask of parameters active in set 0
* Note: can be called from interrupt context
* Return none
*/
-static void preview_setup_hw(struct isp_prev_device *prev)
+static void preview_setup_hw(struct isp_prev_device *prev, u32 update,
+ u32 active)
{
- struct prev_params *params = &prev->params;
- struct preview_update *attr;
- int i, bit;
- void *param_ptr;
+ unsigned int i;
+ u32 features;
+
+ if (update == 0)
+ return;
+
+ features = (prev->params.params[0].features & active)
+ | (prev->params.params[1].features & ~active);
for (i = 0; i < ARRAY_SIZE(update_attrs); i++) {
- attr = &update_attrs[i];
+ const struct preview_update *attr = &update_attrs[i];
+ struct prev_params *params;
+ unsigned int bit = 1 << i;
+ void *param_ptr;
- if (!(prev->update & attr->feature_bit))
+ if (!(update & bit))
continue;
- bit = params->features & attr->feature_bit;
- if (bit) {
+
+ params = &prev->params.params[!(active & bit)];
+
+ if (params->features & bit) {
if (attr->config) {
- __preview_get_ptrs(params, &param_ptr, NULL,
- NULL, bit);
+ param_ptr = (void *)params + attr->param_offset;
attr->config(prev, param_ptr);
}
if (attr->enable)
attr->enable(prev, 1);
- } else
+ } else {
if (attr->enable)
attr->enable(prev, 0);
-
- prev->update &= ~attr->feature_bit;
+ }
}
}
@@ -1000,13 +1043,17 @@ preview_config_ycpos(struct isp_prev_device *prev,
static void preview_config_averager(struct isp_prev_device *prev, u8 average)
{
struct isp_device *isp = to_isp_device(prev);
+ struct prev_params *params;
int reg = 0;
- if (prev->params.cfa.format == OMAP3ISP_CFAFMT_BAYER)
+ params = (prev->params.active & OMAP3ISP_PREV_CFA)
+ ? &prev->params.params[0] : &prev->params.params[1];
+
+ if (params->cfa.format == OMAP3ISP_CFAFMT_BAYER)
reg = ISPPRV_AVE_EVENDIST_2 << ISPPRV_AVE_EVENDIST_SHIFT |
ISPPRV_AVE_ODDDIST_2 << ISPPRV_AVE_ODDDIST_SHIFT |
average;
- else if (prev->params.cfa.format == OMAP3ISP_CFAFMT_RGBFOVEON)
+ else if (params->cfa.format == OMAP3ISP_CFAFMT_RGBFOVEON)
reg = ISPPRV_AVE_EVENDIST_3 << ISPPRV_AVE_EVENDIST_SHIFT |
ISPPRV_AVE_ODDDIST_3 << ISPPRV_AVE_ODDDIST_SHIFT |
average;
@@ -1014,6 +1061,27 @@ static void preview_config_averager(struct isp_prev_device *prev, u8 average)
}
/*
+ * preview_config_input_format - Configure the input format
+ * @prev: The preview engine
+ * @format: Format on the preview engine sink pad
+ *
+ * Enable CFA interpolation for Bayer formats and disable it for greyscale
+ * formats.
+ */
+static void preview_config_input_format(struct isp_prev_device *prev,
+ const struct v4l2_mbus_framefmt *format)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ if (format->code != V4L2_MBUS_FMT_Y10_1X10)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_CFAEN);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_CFAEN);
+}
+
+/*
* preview_config_input_size - Configure the input frame size
*
* The preview engine crops several rows and columns internally depending on
@@ -1024,32 +1092,37 @@ static void preview_config_averager(struct isp_prev_device *prev, u8 average)
*
* See the explanation at the PREV_MARGIN_* definitions for more details.
*/
-static void preview_config_input_size(struct isp_prev_device *prev)
+static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
{
+ const struct v4l2_mbus_framefmt *format = &prev->formats[PREV_PAD_SINK];
struct isp_device *isp = to_isp_device(prev);
- struct prev_params *params = &prev->params;
unsigned int sph = prev->crop.left;
unsigned int eph = prev->crop.left + prev->crop.width - 1;
unsigned int slv = prev->crop.top;
unsigned int elv = prev->crop.top + prev->crop.height - 1;
+ u32 features;
- if (params->features & PREV_CFA) {
+ if (format->code == V4L2_MBUS_FMT_Y10_1X10) {
sph -= 2;
eph += 2;
slv -= 2;
elv += 2;
}
- if (params->features & (PREV_DEFECT_COR | PREV_NOISE_FILTER)) {
+
+ features = (prev->params.params[0].features & active)
+ | (prev->params.params[1].features & ~active);
+
+ if (features & (OMAP3ISP_PREV_DEFECT_COR | OMAP3ISP_PREV_NF)) {
sph -= 2;
eph += 2;
slv -= 2;
elv += 2;
}
- if (params->features & PREV_HORZ_MEDIAN_FILTER) {
+ if (features & OMAP3ISP_PREV_HRZ_MED) {
sph -= 2;
eph += 2;
}
- if (params->features & (PREV_CHROMA_SUPPRESS | PREV_LUMA_ENHANCE))
+ if (features & (OMAP3ISP_PREV_CHROMA_SUPP | OMAP3ISP_PREV_LUMAENH))
sph -= 2;
isp_reg_writel(isp, (sph << ISPPRV_HORZ_INFO_SPH_SHIFT) | eph,
@@ -1184,8 +1257,16 @@ int omap3isp_preview_busy(struct isp_prev_device *prev)
*/
void omap3isp_preview_restore_context(struct isp_device *isp)
{
- isp->isp_prev.update = PREV_FEATURES_END - 1;
- preview_setup_hw(&isp->isp_prev);
+ struct isp_prev_device *prev = &isp->isp_prev;
+ const u32 update = OMAP3ISP_PREV_FEATURES_END - 1;
+
+ prev->params.params[0].update = prev->params.active & update;
+ prev->params.params[1].update = ~prev->params.active & update;
+
+ preview_setup_hw(prev, update, prev->params.active);
+
+ prev->params.params[0].update = 0;
+ prev->params.params[1].update = 0;
}
/*
@@ -1244,12 +1325,21 @@ static void preview_print_status(struct isp_prev_device *prev)
/*
* preview_init_params - init image processing parameters.
* @prev: pointer to previewer private structure
- * return none
*/
static void preview_init_params(struct isp_prev_device *prev)
{
- struct prev_params *params = &prev->params;
- int i = 0;
+ struct prev_params *params;
+ unsigned int i;
+
+ spin_lock_init(&prev->params.lock);
+
+ prev->params.active = ~0;
+ prev->params.params[0].busy = 0;
+ prev->params.params[0].update = OMAP3ISP_PREV_FEATURES_END - 1;
+ prev->params.params[1].busy = 0;
+ prev->params.params[1].update = 0;
+
+ params = &prev->params.params[0];
/* Init values */
params->contrast = ISPPRV_CONTRAST_DEF * ISPPRV_CONTRAST_UNITS;
@@ -1277,22 +1367,22 @@ static void preview_init_params(struct isp_prev_device *prev)
params->wbal.coef1 = FLR_WBAL_COEF;
params->wbal.coef2 = FLR_WBAL_COEF;
params->wbal.coef3 = FLR_WBAL_COEF;
- params->blk_adj.red = FLR_BLKADJ_RED;
- params->blk_adj.green = FLR_BLKADJ_GREEN;
- params->blk_adj.blue = FLR_BLKADJ_BLUE;
+ params->blkadj.red = FLR_BLKADJ_RED;
+ params->blkadj.green = FLR_BLKADJ_GREEN;
+ params->blkadj.blue = FLR_BLKADJ_BLUE;
params->rgb2rgb = flr_rgb2rgb;
- params->rgb2ycbcr = flr_prev_csc;
+ params->csc = flr_prev_csc;
params->yclimit.minC = ISPPRV_YC_MIN;
params->yclimit.maxC = ISPPRV_YC_MAX;
params->yclimit.minY = ISPPRV_YC_MIN;
params->yclimit.maxY = ISPPRV_YC_MAX;
- params->features = PREV_CFA | PREV_DEFECT_COR | PREV_NOISE_FILTER
- | PREV_GAMMA | PREV_BLKADJ | PREV_YCLIMITS
- | PREV_RGB2RGB | PREV_COLOR_CONV | PREV_WB
- | PREV_BRIGHTNESS | PREV_CONTRAST;
-
- prev->update = PREV_FEATURES_END - 1;
+ params->features = OMAP3ISP_PREV_CFA | OMAP3ISP_PREV_DEFECT_COR
+ | OMAP3ISP_PREV_NF | OMAP3ISP_PREV_GAMMA
+ | OMAP3ISP_PREV_BLKADJ | OMAP3ISP_PREV_YC_LIMIT
+ | OMAP3ISP_PREV_RGB2RGB | OMAP3ISP_PREV_COLOR_CONV
+ | OMAP3ISP_PREV_WB | OMAP3ISP_PREV_BRIGHTNESS
+ | OMAP3ISP_PREV_CONTRAST;
}
/*
@@ -1321,8 +1411,17 @@ static void preview_configure(struct isp_prev_device *prev)
{
struct isp_device *isp = to_isp_device(prev);
struct v4l2_mbus_framefmt *format;
+ unsigned long flags;
+ u32 update;
+ u32 active;
- preview_setup_hw(prev);
+ spin_lock_irqsave(&prev->params.lock, flags);
+ /* Mark all active parameters we are going to touch as busy. */
+ update = preview_params_lock(prev, 0, false);
+ active = prev->params.active;
+ spin_unlock_irqrestore(&prev->params.lock, flags);
+
+ preview_setup_hw(prev, update, active);
if (prev->output & PREVIEW_OUTPUT_MEMORY)
isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
@@ -1343,7 +1442,8 @@ static void preview_configure(struct isp_prev_device *prev)
preview_adjust_bandwidth(prev);
- preview_config_input_size(prev);
+ preview_config_input_format(prev, format);
+ preview_config_input_size(prev, active);
if (prev->input == PREVIEW_INPUT_CCDC)
preview_config_inlineoffset(prev, 0);
@@ -1360,6 +1460,10 @@ static void preview_configure(struct isp_prev_device *prev)
preview_config_averager(prev, 0);
preview_config_ycpos(prev, format->code);
+
+ spin_lock_irqsave(&prev->params.lock, flags);
+ preview_params_unlock(prev, update, false);
+ spin_unlock_irqrestore(&prev->params.lock, flags);
}
/* -----------------------------------------------------------------------------
@@ -1448,25 +1552,30 @@ static void preview_isr_buffer(struct isp_prev_device *prev)
void omap3isp_preview_isr(struct isp_prev_device *prev)
{
unsigned long flags;
+ u32 update;
+ u32 active;
if (omap3isp_module_sync_is_stopping(&prev->wait, &prev->stopping))
return;
- spin_lock_irqsave(&prev->lock, flags);
- if (prev->shadow_update)
- goto done;
+ spin_lock_irqsave(&prev->params.lock, flags);
+ preview_params_switch(prev);
+ update = preview_params_lock(prev, 0, false);
+ active = prev->params.active;
+ spin_unlock_irqrestore(&prev->params.lock, flags);
- preview_setup_hw(prev);
- preview_config_input_size(prev);
-
-done:
- spin_unlock_irqrestore(&prev->lock, flags);
+ preview_setup_hw(prev, update, active);
+ preview_config_input_size(prev, active);
if (prev->input == PREVIEW_INPUT_MEMORY ||
prev->output & PREVIEW_OUTPUT_MEMORY)
preview_isr_buffer(prev);
else if (prev->state == ISP_PIPELINE_STREAM_CONTINUOUS)
preview_enable_oneshot(prev);
+
+ spin_lock_irqsave(&prev->params.lock, flags);
+ preview_params_unlock(prev, update, false);
+ spin_unlock_irqrestore(&prev->params.lock, flags);
}
/* -----------------------------------------------------------------------------
@@ -1552,7 +1661,6 @@ static int preview_set_stream(struct v4l2_subdev *sd, int enable)
struct isp_video *video_out = &prev->video_out;
struct isp_device *isp = to_isp_device(prev);
struct device *dev = to_device(prev);
- unsigned long flags;
if (prev->state == ISP_PIPELINE_STREAM_STOPPED) {
if (enable == ISP_PIPELINE_STREAM_STOPPED)
@@ -1589,11 +1697,9 @@ static int preview_set_stream(struct v4l2_subdev *sd, int enable)
if (omap3isp_module_sync_idle(&sd->entity, &prev->wait,
&prev->stopping))
dev_dbg(dev, "%s: stop timeout.\n", sd->name);
- spin_lock_irqsave(&prev->lock, flags);
omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_PREVIEW_READ);
omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_PREVIEW_WRITE);
omap3isp_subclk_disable(isp, OMAP3_ISP_SUBCLK_PREVIEW);
- spin_unlock_irqrestore(&prev->lock, flags);
isp_video_dmaqueue_flags_clr(video_out);
break;
}
@@ -1624,6 +1730,7 @@ __preview_get_crop(struct isp_prev_device *prev, struct v4l2_subdev_fh *fh,
/* previewer format descriptions */
static const unsigned int preview_input_fmts[] = {
+ V4L2_MBUS_FMT_Y10_1X10,
V4L2_MBUS_FMT_SGRBG10_1X10,
V4L2_MBUS_FMT_SRGGB10_1X10,
V4L2_MBUS_FMT_SBGGR10_1X10,
@@ -1822,55 +1929,89 @@ static int preview_enum_frame_size(struct v4l2_subdev *sd,
}
/*
- * preview_get_crop - Retrieve the crop rectangle on a pad
+ * preview_get_selection - Retrieve a selection rectangle on a pad
* @sd: ISP preview V4L2 subdevice
* @fh: V4L2 subdev file handle
- * @crop: crop rectangle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangles are the crop rectangles on the sink pad.
*
* Return 0 on success or a negative error code otherwise.
*/
-static int preview_get_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int preview_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (sel->pad != PREV_PAD_SINK)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = INT_MAX;
+ sel->r.height = INT_MAX;
+
+ format = __preview_get_format(prev, fh, PREV_PAD_SINK,
+ sel->which);
+ preview_try_crop(prev, format, &sel->r);
+ break;
+
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ sel->r = *__preview_get_crop(prev, fh, sel->which);
+ break;
- /* Cropping is only supported on the sink pad. */
- if (crop->pad != PREV_PAD_SINK)
+ default:
return -EINVAL;
+ }
- crop->rect = *__preview_get_crop(prev, fh, crop->which);
return 0;
}
/*
- * preview_set_crop - Retrieve the crop rectangle on a pad
+ * preview_set_selection - Set a selection rectangle on a pad
* @sd: ISP preview V4L2 subdevice
* @fh: V4L2 subdev file handle
- * @crop: crop rectangle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangle is the actual crop rectangle on the sink pad.
*
* Return 0 on success or a negative error code otherwise.
*/
-static int preview_set_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int preview_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- /* Cropping is only supported on the sink pad. */
- if (crop->pad != PREV_PAD_SINK)
+ if (sel->target != V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL ||
+ sel->pad != PREV_PAD_SINK)
return -EINVAL;
/* The crop rectangle can't be changed while streaming. */
if (prev->state != ISP_PIPELINE_STREAM_STOPPED)
return -EBUSY;
- format = __preview_get_format(prev, fh, PREV_PAD_SINK, crop->which);
- preview_try_crop(prev, format, &crop->rect);
- *__preview_get_crop(prev, fh, crop->which) = crop->rect;
+ /* Modifying the crop rectangle always changes the format on the source
+ * pad. If the KEEP_CONFIG flag is set, just return the current crop
+ * rectangle.
+ */
+ if (sel->flags & V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG) {
+ sel->r = *__preview_get_crop(prev, fh, sel->which);
+ return 0;
+ }
+
+ format = __preview_get_format(prev, fh, PREV_PAD_SINK, sel->which);
+ preview_try_crop(prev, format, &sel->r);
+ *__preview_get_crop(prev, fh, sel->which) = sel->r;
/* Update the source format. */
- format = __preview_get_format(prev, fh, PREV_PAD_SOURCE, crop->which);
- preview_try_format(prev, fh, PREV_PAD_SOURCE, format, crop->which);
+ format = __preview_get_format(prev, fh, PREV_PAD_SOURCE, sel->which);
+ preview_try_format(prev, fh, PREV_PAD_SOURCE, format, sel->which);
return 0;
}
@@ -1979,8 +2120,8 @@ static const struct v4l2_subdev_pad_ops preview_v4l2_pad_ops = {
.enum_frame_size = preview_enum_frame_size,
.get_fmt = preview_get_format,
.set_fmt = preview_set_format,
- .get_crop = preview_get_crop,
- .set_crop = preview_set_crop,
+ .get_selection = preview_get_selection,
+ .set_selection = preview_set_selection,
};
/* subdev operations */
@@ -2076,6 +2217,7 @@ static int preview_link_setup(struct media_entity *entity,
/* media operations */
static const struct media_entity_operations preview_media_ops = {
.link_setup = preview_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
};
void omap3isp_preview_unregister_entities(struct isp_prev_device *prev)
@@ -2201,7 +2343,7 @@ error_video_in:
}
/*
- * isp_preview_init - Previewer initialization.
+ * omap3isp_preview_init - Previewer initialization.
* @dev : Pointer to ISP device
* return -ENOMEM or zero on success
*/
@@ -2209,8 +2351,8 @@ int omap3isp_preview_init(struct isp_device *isp)
{
struct isp_prev_device *prev = &isp->isp_prev;
- spin_lock_init(&prev->lock);
init_waitqueue_head(&prev->wait);
+
preview_init_params(prev);
return preview_init_entities(prev);
diff --git a/drivers/media/video/omap3isp/isppreview.h b/drivers/media/video/omap3isp/isppreview.h
index 09686607973c..6663ab64e4b1 100644
--- a/drivers/media/video/omap3isp/isppreview.h
+++ b/drivers/media/video/omap3isp/isppreview.h
@@ -45,29 +45,10 @@
#define ISPPRV_CONTRAST_HIGH 0xFF
#define ISPPRV_CONTRAST_UNITS 0x1
-/* Features list */
-#define PREV_LUMA_ENHANCE OMAP3ISP_PREV_LUMAENH
-#define PREV_INVERSE_ALAW OMAP3ISP_PREV_INVALAW
-#define PREV_HORZ_MEDIAN_FILTER OMAP3ISP_PREV_HRZ_MED
-#define PREV_CFA OMAP3ISP_PREV_CFA
-#define PREV_CHROMA_SUPPRESS OMAP3ISP_PREV_CHROMA_SUPP
-#define PREV_WB OMAP3ISP_PREV_WB
-#define PREV_BLKADJ OMAP3ISP_PREV_BLKADJ
-#define PREV_RGB2RGB OMAP3ISP_PREV_RGB2RGB
-#define PREV_COLOR_CONV OMAP3ISP_PREV_COLOR_CONV
-#define PREV_YCLIMITS OMAP3ISP_PREV_YC_LIMIT
-#define PREV_DEFECT_COR OMAP3ISP_PREV_DEFECT_COR
-#define PREV_GAMMA_BYPASS OMAP3ISP_PREV_GAMMABYPASS
-#define PREV_DARK_FRAME_CAPTURE OMAP3ISP_PREV_DRK_FRM_CAPTURE
-#define PREV_DARK_FRAME_SUBTRACT OMAP3ISP_PREV_DRK_FRM_SUBTRACT
-#define PREV_LENS_SHADING OMAP3ISP_PREV_LENS_SHADING
-#define PREV_NOISE_FILTER OMAP3ISP_PREV_NF
-#define PREV_GAMMA OMAP3ISP_PREV_GAMMA
-
-#define PREV_CONTRAST (1 << 17)
-#define PREV_BRIGHTNESS (1 << 18)
-#define PREV_AVERAGER (1 << 19)
-#define PREV_FEATURES_END (1 << 20)
+/* Additional features not listed in linux/omap3isp.h */
+#define OMAP3ISP_PREV_CONTRAST (1 << 17)
+#define OMAP3ISP_PREV_BRIGHTNESS (1 << 18)
+#define OMAP3ISP_PREV_FEATURES_END (1 << 19)
enum preview_input_entity {
PREVIEW_INPUT_NONE,
@@ -88,6 +69,8 @@ enum preview_ycpos_mode {
/*
* struct prev_params - Structure for all configuration
+ * @busy: Bitmask of busy parameters (being updated or used)
+ * @update: Bitmask of the parameters to be updated
* @features: Set of features enabled.
* @cfa: CFA coefficients.
* @csup: Chroma suppression coefficients.
@@ -96,15 +79,17 @@ enum preview_ycpos_mode {
* @dcor: Noise filter coefficients.
* @gamma: Gamma coefficients.
* @wbal: White Balance parameters.
- * @blk_adj: Black adjustment parameters.
+ * @blkadj: Black adjustment parameters.
* @rgb2rgb: RGB blending parameters.
- * @rgb2ycbcr: RGB to ycbcr parameters.
+ * @csc: Color space conversion (RGB to YCbCr) parameters.
* @hmed: Horizontal median filter.
* @yclimit: YC limits parameters.
* @contrast: Contrast.
* @brightness: Brightness.
*/
struct prev_params {
+ u32 busy;
+ u32 update;
u32 features;
struct omap3isp_prev_cfa cfa;
struct omap3isp_prev_csup csup;
@@ -113,35 +98,15 @@ struct prev_params {
struct omap3isp_prev_dcor dcor;
struct omap3isp_prev_gtables gamma;
struct omap3isp_prev_wbal wbal;
- struct omap3isp_prev_blkadj blk_adj;
+ struct omap3isp_prev_blkadj blkadj;
struct omap3isp_prev_rgbtorgb rgb2rgb;
- struct omap3isp_prev_csc rgb2ycbcr;
+ struct omap3isp_prev_csc csc;
struct omap3isp_prev_hmed hmed;
struct omap3isp_prev_yclimit yclimit;
u8 contrast;
u8 brightness;
};
-/*
- * struct isptables_update - Structure for Table Configuration.
- * @update: Specifies which tables should be updated.
- * @flag: Specifies which tables should be enabled.
- * @nf: Pointer to structure for Noise Filter
- * @lsc: Pointer to LSC gain table. (currently not used)
- * @gamma: Pointer to gamma correction tables.
- * @cfa: Pointer to color filter array configuration.
- * @wbal: Pointer to colour and digital gain configuration.
- */
-struct isptables_update {
- u32 update;
- u32 flag;
- struct omap3isp_prev_nf *nf;
- u32 *lsc;
- struct omap3isp_prev_gtables *gamma;
- struct omap3isp_prev_cfa *cfa;
- struct omap3isp_prev_wbal *wbal;
-};
-
/* Sink and source previewer pads */
#define PREV_PAD_SINK 0
#define PREV_PAD_SOURCE 1
@@ -157,12 +122,11 @@ struct isptables_update {
* @output: Bitmask of the active output
* @video_in: Input video entity
* @video_out: Output video entity
- * @params: Module configuration data
- * @shadow_update: If set, update the hardware configured in the next interrupt
+ * @params.params : Active and shadow parameters sets
+ * @params.active: Bitmask of parameters active in set 0
+ * @params.lock: Parameters lock, protects params.active and params.shadow
* @underrun: Whether the preview entity has queued buffers on the output
* @state: Current preview pipeline state
- * @lock: Shadow update lock
- * @update: Bitmask of the parameters to be updated
*
* This structure is used to store the OMAP ISP Preview module Information.
*/
@@ -179,13 +143,15 @@ struct isp_prev_device {
struct isp_video video_in;
struct isp_video video_out;
- struct prev_params params;
- unsigned int shadow_update:1;
+ struct {
+ struct prev_params params[2];
+ u32 active;
+ spinlock_t lock;
+ } params;
+
enum isp_pipeline_stream_state state;
wait_queue_head_t wait;
atomic_t stopping;
- spinlock_t lock;
- u32 update;
};
struct isp_device;
diff --git a/drivers/media/video/omap3isp/ispqueue.h b/drivers/media/video/omap3isp/ispqueue.h
index 92c5a12157d5..908dfd712e8e 100644
--- a/drivers/media/video/omap3isp/ispqueue.h
+++ b/drivers/media/video/omap3isp/ispqueue.h
@@ -90,7 +90,7 @@ struct isp_video_buffer {
void *vaddr;
/* For userspace buffers. */
- unsigned long vm_flags;
+ vm_flags_t vm_flags;
unsigned long offset;
unsigned int npages;
struct page **pages;
diff --git a/drivers/media/video/omap3isp/ispresizer.c b/drivers/media/video/omap3isp/ispresizer.c
index 6958a9e3dc22..14041c9c8643 100644
--- a/drivers/media/video/omap3isp/ispresizer.c
+++ b/drivers/media/video/omap3isp/ispresizer.c
@@ -1188,32 +1188,6 @@ static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
}
/*
- * resizer_g_crop - handle get crop subdev operation
- * @sd : pointer to v4l2 subdev structure
- * @pad : subdev pad
- * @crop : pointer to crop structure
- * @which : active or try format
- * return zero
- */
-static int resizer_g_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
-{
- struct isp_res_device *res = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
- struct resizer_ratio ratio;
-
- /* Only sink pad has crop capability */
- if (crop->pad != RESZ_PAD_SINK)
- return -EINVAL;
-
- format = __resizer_get_format(res, fh, RESZ_PAD_SOURCE, crop->which);
- crop->rect = *__resizer_get_crop(res, fh, crop->which);
- resizer_calc_ratios(res, &crop->rect, format, &ratio);
-
- return 0;
-}
-
-/*
* resizer_try_crop - mangles crop parameters.
*/
static void resizer_try_crop(const struct v4l2_mbus_framefmt *sink,
@@ -1223,7 +1197,7 @@ static void resizer_try_crop(const struct v4l2_mbus_framefmt *sink,
const unsigned int spv = DEFAULT_PHASE;
const unsigned int sph = DEFAULT_PHASE;
- /* Crop rectangle is constrained to the output size so that zoom ratio
+ /* Crop rectangle is constrained by the output size so that zoom ratio
* cannot exceed +/-4.0.
*/
unsigned int min_width =
@@ -1248,51 +1222,115 @@ static void resizer_try_crop(const struct v4l2_mbus_framefmt *sink,
}
/*
- * resizer_s_crop - handle set crop subdev operation
- * @sd : pointer to v4l2 subdev structure
- * @pad : subdev pad
- * @crop : pointer to crop structure
- * @which : active or try format
- * return -EINVAL or zero when succeed
+ * resizer_get_selection - Retrieve a selection rectangle on a pad
+ * @sd: ISP resizer V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangles are the crop rectangles on the sink pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
*/
-static int resizer_s_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int resizer_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct isp_res_device *res = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format_source;
+ struct v4l2_mbus_framefmt *format_sink;
+ struct resizer_ratio ratio;
+
+ if (sel->pad != RESZ_PAD_SINK)
+ return -EINVAL;
+
+ format_sink = __resizer_get_format(res, fh, RESZ_PAD_SINK,
+ sel->which);
+ format_source = __resizer_get_format(res, fh, RESZ_PAD_SOURCE,
+ sel->which);
+
+ switch (sel->target) {
+ case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = INT_MAX;
+ sel->r.height = INT_MAX;
+
+ resizer_try_crop(format_sink, format_source, &sel->r);
+ resizer_calc_ratios(res, &sel->r, format_source, &ratio);
+ break;
+
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ sel->r = *__resizer_get_crop(res, fh, sel->which);
+ resizer_calc_ratios(res, &sel->r, format_source, &ratio);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * resizer_set_selection - Set a selection rectangle on a pad
+ * @sd: ISP resizer V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangle is the actual crop rectangle on the sink pad.
+ *
+ * FIXME: This function currently behaves as if the KEEP_CONFIG selection flag
+ * was always set.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int resizer_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
struct isp_device *isp = to_isp_device(res);
struct v4l2_mbus_framefmt *format_sink, *format_source;
struct resizer_ratio ratio;
- /* Only sink pad has crop capability */
- if (crop->pad != RESZ_PAD_SINK)
+ if (sel->target != V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL ||
+ sel->pad != RESZ_PAD_SINK)
return -EINVAL;
format_sink = __resizer_get_format(res, fh, RESZ_PAD_SINK,
- crop->which);
+ sel->which);
format_source = __resizer_get_format(res, fh, RESZ_PAD_SOURCE,
- crop->which);
+ sel->which);
dev_dbg(isp->dev, "%s: L=%d,T=%d,W=%d,H=%d,which=%d\n", __func__,
- crop->rect.left, crop->rect.top, crop->rect.width,
- crop->rect.height, crop->which);
+ sel->r.left, sel->r.top, sel->r.width, sel->r.height,
+ sel->which);
dev_dbg(isp->dev, "%s: input=%dx%d, output=%dx%d\n", __func__,
format_sink->width, format_sink->height,
format_source->width, format_source->height);
- resizer_try_crop(format_sink, format_source, &crop->rect);
- *__resizer_get_crop(res, fh, crop->which) = crop->rect;
- resizer_calc_ratios(res, &crop->rect, format_source, &ratio);
+ /* Clamp the crop rectangle to the bounds, and then mangle it further to
+ * fulfill the TRM equations. Store the clamped but otherwise unmangled
+ * rectangle to avoid cropping the input multiple times: when an
+ * application sets the output format, the current crop rectangle is
+ * mangled during crop rectangle computation, which would lead to a new,
+ * smaller input crop rectangle every time the output size is set if we
+ * stored the mangled rectangle.
+ */
+ resizer_try_crop(format_sink, format_source, &sel->r);
+ *__resizer_get_crop(res, fh, sel->which) = sel->r;
+ resizer_calc_ratios(res, &sel->r, format_source, &ratio);
- if (crop->which == V4L2_SUBDEV_FORMAT_TRY)
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
return 0;
res->ratio = ratio;
- res->crop.active = crop->rect;
+ res->crop.active = sel->r;
/*
- * s_crop can be called while streaming is on. In this case
- * the crop values will be set in the next IRQ.
+ * set_selection can be called while streaming is on. In this case the
+ * crop values will be set in the next IRQ.
*/
if (res->state != ISP_PIPELINE_STREAM_STOPPED)
res->applycrop = 1;
@@ -1530,8 +1568,8 @@ static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = {
.enum_frame_size = resizer_enum_frame_size,
.get_fmt = resizer_get_format,
.set_fmt = resizer_set_format,
- .get_crop = resizer_g_crop,
- .set_crop = resizer_s_crop,
+ .get_selection = resizer_get_selection,
+ .set_selection = resizer_set_selection,
};
/* subdev operations */
@@ -1603,6 +1641,7 @@ static int resizer_link_setup(struct media_entity *entity,
/* media operations */
static const struct media_entity_operations resizer_media_ops = {
.link_setup = resizer_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
};
void omap3isp_resizer_unregister_entities(struct isp_res_device *res)
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c
index 11871ecc6d25..b8640be692f1 100644
--- a/drivers/media/video/omap3isp/ispstat.c
+++ b/drivers/media/video/omap3isp/ispstat.c
@@ -1032,7 +1032,7 @@ int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
if (sub->type != stat->event_type)
return -EINVAL;
- return v4l2_event_subscribe(fh, sub, STAT_NEVENTS);
+ return v4l2_event_subscribe(fh, sub, STAT_NEVENTS, NULL);
}
int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c
index b02070057724..b37379d39cdd 100644
--- a/drivers/media/video/omap3isp/ispvideo.c
+++ b/drivers/media/video/omap3isp/ispvideo.c
@@ -46,6 +46,10 @@
* Helper functions
*/
+/*
+ * NOTE: When adding new media bus codes, always remember to add
+ * corresponding in-memory formats to the table below!!!
+ */
static struct isp_format_info formats[] = {
{ V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
@@ -68,9 +72,18 @@ static struct isp_format_info formats[] = {
{ V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
V4L2_PIX_FMT_SRGGB8, 8, },
+ { V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SBGGR10_1X10, 0,
+ V4L2_PIX_FMT_SBGGR10DPCM8, 8, },
+ { V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SGBRG10_1X10, 0,
+ V4L2_PIX_FMT_SGBRG10DPCM8, 8, },
{ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
V4L2_MBUS_FMT_SGRBG10_1X10, 0,
V4L2_PIX_FMT_SGRBG10DPCM8, 8, },
+ { V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SRGGB10_1X10, 0,
+ V4L2_PIX_FMT_SRGGB10DPCM8, 8, },
{ V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
V4L2_PIX_FMT_SBGGR10, 10, },
@@ -117,37 +130,6 @@ omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
}
/*
- * Decide whether desired output pixel code can be obtained with
- * the lane shifter by shifting the input pixel code.
- * @in: input pixelcode to shifter
- * @out: output pixelcode from shifter
- * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0]
- *
- * return true if the combination is possible
- * return false otherwise
- */
-static bool isp_video_is_shiftable(enum v4l2_mbus_pixelcode in,
- enum v4l2_mbus_pixelcode out,
- unsigned int additional_shift)
-{
- const struct isp_format_info *in_info, *out_info;
-
- if (in == out)
- return true;
-
- in_info = omap3isp_video_format_info(in);
- out_info = omap3isp_video_format_info(out);
-
- if ((in_info->flavor == 0) || (out_info->flavor == 0))
- return false;
-
- if (in_info->flavor != out_info->flavor)
- return false;
-
- return in_info->bpp - out_info->bpp + additional_shift <= 6;
-}
-
-/*
* isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
* @video: ISP video instance
* @mbus: v4l2_mbus_framefmt format (input)
@@ -242,8 +224,8 @@ isp_video_remote_subdev(struct isp_video *video, u32 *pad)
}
/* Return a pointer to the ISP video instance at the far end of the pipeline. */
-static struct isp_video *
-isp_video_far_end(struct isp_video *video)
+static int isp_video_get_graph_data(struct isp_video *video,
+ struct isp_pipeline *pipe)
{
struct media_entity_graph graph;
struct media_entity *entity = &video->video.entity;
@@ -254,21 +236,38 @@ isp_video_far_end(struct isp_video *video)
media_entity_graph_walk_start(&graph, entity);
while ((entity = media_entity_graph_walk_next(&graph))) {
+ struct isp_video *__video;
+
+ pipe->entities |= 1 << entity->id;
+
+ if (far_end != NULL)
+ continue;
+
if (entity == &video->video.entity)
continue;
if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
continue;
- far_end = to_isp_video(media_entity_to_video_device(entity));
- if (far_end->type != video->type)
- break;
-
- far_end = NULL;
+ __video = to_isp_video(media_entity_to_video_device(entity));
+ if (__video->type != video->type)
+ far_end = __video;
}
mutex_unlock(&mdev->graph_mutex);
- return far_end;
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ pipe->input = far_end;
+ pipe->output = video;
+ } else {
+ if (far_end == NULL)
+ return -EPIPE;
+
+ pipe->input = video;
+ pipe->output = far_end;
+ }
+
+ return 0;
}
/*
@@ -285,52 +284,24 @@ isp_video_far_end(struct isp_video *video)
static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
{
struct isp_device *isp = pipe->output->isp;
- struct v4l2_subdev_format fmt_source;
- struct v4l2_subdev_format fmt_sink;
struct media_pad *pad;
struct v4l2_subdev *subdev;
- int ret;
-
- pipe->max_rate = pipe->l3_ick;
subdev = isp_video_remote_subdev(pipe->output, NULL);
if (subdev == NULL)
return -EPIPE;
while (1) {
- unsigned int shifter_link;
/* Retrieve the sink format */
pad = &subdev->entity.pads[0];
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
- fmt_sink.pad = pad->index;
- fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_sink);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return -EPIPE;
-
/* Update the maximum frame rate */
if (subdev == &isp->isp_res.subdev)
omap3isp_resizer_max_rate(&isp->isp_res,
&pipe->max_rate);
- /* Check ccdc maximum data rate when data comes from sensor
- * TODO: Include ccdc rate in pipe->max_rate and compare the
- * total pipe rate with the input data rate from sensor.
- */
- if (subdev == &isp->isp_ccdc.subdev && pipe->input == NULL) {
- unsigned int rate = UINT_MAX;
-
- omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
- if (isp->isp_ccdc.vpcfg.pixelclk > rate)
- return -ENOSPC;
- }
-
- /* If sink pad is on CCDC, the link has the lane shifter
- * in the middle of it. */
- shifter_link = subdev == &isp->isp_ccdc.subdev;
-
/* Retrieve the source format. Return an error if no source
* entity can be found, and stop checking the pipeline if the
* source entity isn't a subdev.
@@ -343,32 +314,6 @@ static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
break;
subdev = media_entity_to_v4l2_subdev(pad->entity);
-
- fmt_source.pad = pad->index;
- fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return -EPIPE;
-
- /* Check if the two ends match */
- if (fmt_source.format.width != fmt_sink.format.width ||
- fmt_source.format.height != fmt_sink.format.height)
- return -EPIPE;
-
- if (shifter_link) {
- unsigned int parallel_shift = 0;
- if (isp->isp_ccdc.input == CCDC_INPUT_PARALLEL) {
- struct isp_parallel_platform_data *pdata =
- &((struct isp_v4l2_subdevs_group *)
- subdev->host_priv)->bus.parallel;
- parallel_shift = pdata->data_lane_shift * 2;
- }
- if (!isp_video_is_shiftable(fmt_source.format.code,
- fmt_sink.format.code,
- parallel_shift))
- return -EPIPE;
- } else if (fmt_source.format.code != fmt_sink.format.code)
- return -EPIPE;
}
return 0;
@@ -923,6 +868,92 @@ isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
file->f_flags & O_NONBLOCK);
}
+static int isp_video_check_external_subdevs(struct isp_video *video,
+ struct isp_pipeline *pipe)
+{
+ struct isp_device *isp = video->isp;
+ struct media_entity *ents[] = {
+ &isp->isp_csi2a.subdev.entity,
+ &isp->isp_csi2c.subdev.entity,
+ &isp->isp_ccp2.subdev.entity,
+ &isp->isp_ccdc.subdev.entity
+ };
+ struct media_pad *source_pad;
+ struct media_entity *source = NULL;
+ struct media_entity *sink;
+ struct v4l2_subdev_format fmt;
+ struct v4l2_ext_controls ctrls;
+ struct v4l2_ext_control ctrl;
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ents); i++) {
+ /* Is the entity part of the pipeline? */
+ if (!(pipe->entities & (1 << ents[i]->id)))
+ continue;
+
+ /* ISP entities have always sink pad == 0. Find source. */
+ source_pad = media_entity_remote_source(&ents[i]->pads[0]);
+ if (source_pad == NULL)
+ continue;
+
+ source = source_pad->entity;
+ sink = ents[i];
+ break;
+ }
+
+ if (!source) {
+ dev_warn(isp->dev, "can't find source, failing now\n");
+ return ret;
+ }
+
+ if (media_entity_type(source) != MEDIA_ENT_T_V4L2_SUBDEV)
+ return 0;
+
+ pipe->external = media_entity_to_v4l2_subdev(source);
+
+ fmt.pad = source_pad->index;
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink),
+ pad, get_fmt, NULL, &fmt);
+ if (unlikely(ret < 0)) {
+ dev_warn(isp->dev, "get_fmt returned null!\n");
+ return ret;
+ }
+
+ pipe->external_bpp = omap3isp_video_format_info(fmt.format.code)->bpp;
+
+ memset(&ctrls, 0, sizeof(ctrls));
+ memset(&ctrl, 0, sizeof(ctrl));
+
+ ctrl.id = V4L2_CID_PIXEL_RATE;
+
+ ctrls.count = 1;
+ ctrls.controls = &ctrl;
+
+ ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &ctrls);
+ if (ret < 0) {
+ dev_warn(isp->dev, "no pixel rate control in subdev %s\n",
+ pipe->external->name);
+ return ret;
+ }
+
+ pipe->external_rate = ctrl.value64;
+
+ if (pipe->entities & (1 << isp->isp_ccdc.subdev.entity.id)) {
+ unsigned int rate = UINT_MAX;
+ /*
+ * Check that maximum allowed CCDC pixel rate isn't
+ * exceeded by the pixel rate.
+ */
+ omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
+ if (pipe->external_rate > rate)
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
/*
* Stream management
*
@@ -961,7 +992,6 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
struct isp_video *video = video_drvdata(file);
enum isp_pipeline_state state;
struct isp_pipeline *pipe;
- struct isp_video *far_end;
unsigned long flags;
int ret;
@@ -980,46 +1010,45 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
*/
pipe = video->video.entity.pipe
? to_isp_pipeline(&video->video.entity) : &video->pipe;
- media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
+
+ pipe->entities = 0;
+
+ if (video->isp->pdata->set_constraints)
+ video->isp->pdata->set_constraints(video->isp, true);
+ pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
+ pipe->max_rate = pipe->l3_ick;
+
+ ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
+ if (ret < 0)
+ goto err_pipeline_start;
/* Verify that the currently configured format matches the output of
* the connected subdev.
*/
ret = isp_video_check_format(video, vfh);
if (ret < 0)
- goto error;
+ goto err_check_format;
video->bpl_padding = ret;
video->bpl_value = vfh->format.fmt.pix.bytesperline;
- /* Find the ISP video node connected at the far end of the pipeline and
- * update the pipeline.
- */
- far_end = isp_video_far_end(video);
+ ret = isp_video_get_graph_data(video, pipe);
+ if (ret < 0)
+ goto err_check_format;
- if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
- pipe->input = far_end;
- pipe->output = video;
- } else {
- if (far_end == NULL) {
- ret = -EPIPE;
- goto error;
- }
-
+ else
state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
- pipe->input = video;
- pipe->output = far_end;
- }
- if (video->isp->pdata->set_constraints)
- video->isp->pdata->set_constraints(video->isp, true);
- pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
+ ret = isp_video_check_external_subdevs(video, pipe);
+ if (ret < 0)
+ goto err_check_format;
/* Validate the pipeline and update its state. */
ret = isp_video_validate_pipeline(pipe);
if (ret < 0)
- goto error;
+ goto err_check_format;
pipe->error = false;
@@ -1041,7 +1070,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
ret = omap3isp_video_queue_streamon(&vfh->queue);
if (ret < 0)
- goto error;
+ goto err_check_format;
/* In sensor-to-memory mode, the stream can be started synchronously
* to the stream on command. In memory-to-memory mode, it will be
@@ -1051,32 +1080,34 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
ret = omap3isp_pipeline_set_stream(pipe,
ISP_PIPELINE_STREAM_CONTINUOUS);
if (ret < 0)
- goto error;
+ goto err_set_stream;
spin_lock_irqsave(&video->queue->irqlock, flags);
if (list_empty(&video->dmaqueue))
video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
spin_unlock_irqrestore(&video->queue->irqlock, flags);
}
-error:
- if (ret < 0) {
- omap3isp_video_queue_streamoff(&vfh->queue);
- if (video->isp->pdata->set_constraints)
- video->isp->pdata->set_constraints(video->isp, false);
- media_entity_pipeline_stop(&video->video.entity);
- /* The DMA queue must be emptied here, otherwise CCDC interrupts
- * that will get triggered the next time the CCDC is powered up
- * will try to access buffers that might have been freed but
- * still present in the DMA queue. This can easily get triggered
- * if the above omap3isp_pipeline_set_stream() call fails on a
- * system with a free-running sensor.
- */
- INIT_LIST_HEAD(&video->dmaqueue);
- video->queue = NULL;
- }
+ video->streaming = 1;
+
+ mutex_unlock(&video->stream_lock);
+ return 0;
- if (!ret)
- video->streaming = 1;
+err_set_stream:
+ omap3isp_video_queue_streamoff(&vfh->queue);
+err_check_format:
+ media_entity_pipeline_stop(&video->video.entity);
+err_pipeline_start:
+ if (video->isp->pdata->set_constraints)
+ video->isp->pdata->set_constraints(video->isp, false);
+ /* The DMA queue must be emptied here, otherwise CCDC interrupts that
+ * will get triggered the next time the CCDC is powered up will try to
+ * access buffers that might have been freed but still present in the
+ * DMA queue. This can easily get triggered if the above
+ * omap3isp_pipeline_set_stream() call fails on a system with a
+ * free-running sensor.
+ */
+ INIT_LIST_HEAD(&video->dmaqueue);
+ video->queue = NULL;
mutex_unlock(&video->stream_lock);
return ret;
diff --git a/drivers/media/video/omap3isp/ispvideo.h b/drivers/media/video/omap3isp/ispvideo.h
index d91bdb919be0..5acc909500ec 100644
--- a/drivers/media/video/omap3isp/ispvideo.h
+++ b/drivers/media/video/omap3isp/ispvideo.h
@@ -88,6 +88,7 @@ enum isp_pipeline_state {
/*
* struct isp_pipeline - An ISP hardware pipeline
* @error: A hardware error occurred during capture
+ * @entities: Bitmask of entities in the pipeline (indexed by entity ID)
*/
struct isp_pipeline {
struct media_pipeline pipe;
@@ -96,12 +97,16 @@ struct isp_pipeline {
enum isp_pipeline_stream_state stream_state;
struct isp_video *input;
struct isp_video *output;
+ u32 entities;
unsigned long l3_ick;
unsigned int max_rate;
atomic_t frame_number;
bool do_propagation; /* of frame number */
bool error;
struct v4l2_fract max_timeperframe;
+ struct v4l2_subdev *external;
+ unsigned int external_rate;
+ unsigned int external_bpp;
};
#define to_isp_pipeline(__e) \
diff --git a/drivers/media/video/ov5642.c b/drivers/media/video/ov5642.c
index 80e07794ac8e..0bc93313d37a 100644
--- a/drivers/media/video/ov5642.c
+++ b/drivers/media/video/ov5642.c
@@ -1025,8 +1025,6 @@ static int ov5642_probe(struct i2c_client *client,
priv->crop_rect.height = OV5642_DEFAULT_HEIGHT;
priv->crop_rect.left = (OV5642_MAX_WIDTH - OV5642_DEFAULT_WIDTH) / 2;
priv->crop_rect.top = (OV5642_MAX_HEIGHT - OV5642_DEFAULT_HEIGHT) / 2;
- priv->crop_rect.width = OV5642_DEFAULT_WIDTH;
- priv->crop_rect.height = OV5642_DEFAULT_HEIGHT;
priv->total_width = OV5642_DEFAULT_WIDTH + BLANKING_EXTRA_WIDTH;
priv->total_height = BLANKING_MIN_HEIGHT;
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index e753b5e4d2ce..af2d9086d7e8 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -30,15 +30,19 @@
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
+#include <linux/isa.h>
#include <asm/io.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-device.h>
MODULE_LICENSE("GPL");
-MODULE_VERSION("0.0.4");
+MODULE_VERSION("0.0.5");
#define MOTOROLA 1
#define PHILIPS2 2 /* SAA7191 */
@@ -55,11 +59,11 @@ struct i2c_info {
struct pms {
struct v4l2_device v4l2_dev;
struct video_device vdev;
+ struct v4l2_ctrl_handler hdl;
int height;
int width;
int depth;
int input;
- s32 brightness, saturation, hue, contrast;
struct mutex lock;
int i2c_count;
struct i2c_info i2cinfo[64];
@@ -72,8 +76,6 @@ struct pms {
void __iomem *mem;
};
-static struct pms pms_card;
-
/*
* I/O ports and Shared Memory
*/
@@ -676,8 +678,10 @@ static int pms_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, dev->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "Mediavision PMS", sizeof(vcap->card));
- strlcpy(vcap->bus_info, "ISA", sizeof(vcap->bus_info));
- vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ snprintf(vcap->bus_info, sizeof(vcap->bus_info),
+ "ISA:%s", dev->v4l2_dev.name);
+ vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -716,11 +720,9 @@ static int pms_s_input(struct file *file, void *fh, unsigned int inp)
if (inp > 3)
return -EINVAL;
- mutex_lock(&dev->lock);
dev->input = inp;
pms_videosource(dev, inp & 1);
pms_vcrinput(dev, inp >> 1);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -738,7 +740,6 @@ static int pms_s_std(struct file *file, void *fh, v4l2_std_id *std)
int ret = 0;
dev->std = *std;
- mutex_lock(&dev->lock);
if (dev->std & V4L2_STD_NTSC) {
pms_framerate(dev, 30);
pms_secamcross(dev, 0);
@@ -762,81 +763,31 @@ static int pms_s_std(struct file *file, void *fh, v4l2_std_id *std)
pms_format(dev, 0);
break;
}*/
- mutex_unlock(&dev->lock);
- return 0;
-}
-
-static int pms_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 139);
- case V4L2_CID_CONTRAST:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 70);
- case V4L2_CID_SATURATION:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 64);
- case V4L2_CID_HUE:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 0);
- }
- return -EINVAL;
-}
-
-static int pms_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct pms *dev = video_drvdata(file);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = dev->brightness;
- break;
- case V4L2_CID_CONTRAST:
- ctrl->value = dev->contrast;
- break;
- case V4L2_CID_SATURATION:
- ctrl->value = dev->saturation;
- break;
- case V4L2_CID_HUE:
- ctrl->value = dev->hue;
- break;
- default:
- ret = -EINVAL;
- break;
- }
return ret;
}
-static int pms_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int pms_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct pms *dev = video_drvdata(file);
+ struct pms *dev = container_of(ctrl->handler, struct pms, hdl);
int ret = 0;
- mutex_lock(&dev->lock);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- dev->brightness = ctrl->value;
- pms_brightness(dev, dev->brightness);
+ pms_brightness(dev, ctrl->val);
break;
case V4L2_CID_CONTRAST:
- dev->contrast = ctrl->value;
- pms_contrast(dev, dev->contrast);
+ pms_contrast(dev, ctrl->val);
break;
case V4L2_CID_SATURATION:
- dev->saturation = ctrl->value;
- pms_saturation(dev, dev->saturation);
+ pms_saturation(dev, ctrl->val);
break;
case V4L2_CID_HUE:
- dev->hue = ctrl->value;
- pms_hue(dev, dev->hue);
+ pms_hue(dev, ctrl->val);
break;
default:
ret = -EINVAL;
break;
}
- mutex_unlock(&dev->lock);
return ret;
}
@@ -884,13 +835,11 @@ static int pms_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fm
if (ret)
return ret;
- mutex_lock(&dev->lock);
dev->width = pix->width;
dev->height = pix->height;
dev->depth = (pix->pixelformat == V4L2_PIX_FMT_RGB555) ? 15 : 16;
pms_resolution(dev, dev->width, dev->height);
/* Ok we figured out what to use from our wide choice */
- mutex_unlock(&dev->lock);
return 0;
}
@@ -901,7 +850,7 @@ static int pms_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc
"RGB 5:5:5", V4L2_PIX_FMT_RGB555,
{ 0, 0, 0, 0 }
},
- { 0, 0, 0,
+ { 1, 0, 0,
"RGB 5:6:5", V4L2_PIX_FMT_RGB565,
{ 0, 0, 0, 0 }
},
@@ -922,32 +871,43 @@ static ssize_t pms_read(struct file *file, char __user *buf,
struct pms *dev = video_drvdata(file);
int len;
- mutex_lock(&dev->lock);
len = pms_capture(dev, buf, (dev->depth == 15), count);
- mutex_unlock(&dev->lock);
return len;
}
+static unsigned int pms_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct v4l2_fh *fh = file->private_data;
+ unsigned int res = POLLIN | POLLRDNORM;
+
+ if (v4l2_event_pending(fh))
+ res |= POLLPRI;
+ poll_wait(file, &fh->wait, wait);
+ return res;
+}
+
static const struct v4l2_file_operations pms_fops = {
.owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = pms_poll,
.unlocked_ioctl = video_ioctl2,
.read = pms_read,
};
static const struct v4l2_ioctl_ops pms_ioctl_ops = {
- .vidioc_querycap = pms_querycap,
- .vidioc_g_input = pms_g_input,
- .vidioc_s_input = pms_s_input,
- .vidioc_enum_input = pms_enum_input,
- .vidioc_g_std = pms_g_std,
- .vidioc_s_std = pms_s_std,
- .vidioc_queryctrl = pms_queryctrl,
- .vidioc_g_ctrl = pms_g_ctrl,
- .vidioc_s_ctrl = pms_s_ctrl,
- .vidioc_enum_fmt_vid_cap = pms_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = pms_g_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = pms_s_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = pms_try_fmt_vid_cap,
+ .vidioc_querycap = pms_querycap,
+ .vidioc_g_input = pms_g_input,
+ .vidioc_s_input = pms_s_input,
+ .vidioc_enum_input = pms_enum_input,
+ .vidioc_g_std = pms_g_std,
+ .vidioc_s_std = pms_s_std,
+ .vidioc_enum_fmt_vid_cap = pms_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = pms_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = pms_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = pms_try_fmt_vid_cap,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/*
@@ -956,7 +916,6 @@ static const struct v4l2_ioctl_ops pms_ioctl_ops = {
static int init_mediavision(struct pms *dev)
{
- int id;
int idec, decst;
int i;
static const unsigned char i2c_defs[] = {
@@ -988,7 +947,6 @@ static int init_mediavision(struct pms *dev)
outb(dev->io >> 4, 0x9a01); /* Set IO port */
- id = mvv_read(dev, 3);
decst = pms_i2c_stat(dev, 0x43);
if (decst != -1)
@@ -1068,76 +1026,125 @@ static int enable;
module_param(enable, int, 0);
#endif
-static int __init pms_init(void)
+static const struct v4l2_ctrl_ops pms_ctrl_ops = {
+ .s_ctrl = pms_s_ctrl,
+};
+
+static int pms_probe(struct device *pdev, unsigned int card)
{
- struct pms *dev = &pms_card;
- struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
+ struct pms *dev;
+ struct v4l2_device *v4l2_dev;
+ struct v4l2_ctrl_handler *hdl;
int res;
- strlcpy(v4l2_dev->name, "pms", sizeof(v4l2_dev->name));
-
- v4l2_info(v4l2_dev, "Mediavision Pro Movie Studio driver 0.03\n");
-
#ifndef MODULE
if (!enable) {
- v4l2_err(v4l2_dev,
- "PMS: not enabled, use pms.enable=1 to probe\n");
+ pr_err("PMS: not enabled, use pms.enable=1 to probe\n");
return -ENODEV;
}
#endif
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL)
+ return -ENOMEM;
+
dev->decoder = PHILIPS2;
dev->io = io_port;
dev->data = io_port + 1;
+ v4l2_dev = &dev->v4l2_dev;
+ hdl = &dev->hdl;
- if (init_mediavision(dev)) {
+ res = v4l2_device_register(pdev, v4l2_dev);
+ if (res < 0) {
+ v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ goto free_dev;
+ }
+ v4l2_info(v4l2_dev, "Mediavision Pro Movie Studio driver 0.05\n");
+
+ res = init_mediavision(dev);
+ if (res) {
v4l2_err(v4l2_dev, "Board not found.\n");
- return -ENODEV;
+ goto free_io;
}
- res = v4l2_device_register(NULL, v4l2_dev);
- if (res < 0) {
- v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
- return res;
+ v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_new_std(hdl, &pms_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 139);
+ v4l2_ctrl_new_std(hdl, &pms_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 70);
+ v4l2_ctrl_new_std(hdl, &pms_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 64);
+ v4l2_ctrl_new_std(hdl, &pms_ctrl_ops,
+ V4L2_CID_HUE, 0, 255, 1, 0);
+ if (hdl->error) {
+ res = hdl->error;
+ goto free_hdl;
}
+ mutex_init(&dev->lock);
strlcpy(dev->vdev.name, v4l2_dev->name, sizeof(dev->vdev.name));
dev->vdev.v4l2_dev = v4l2_dev;
+ dev->vdev.ctrl_handler = hdl;
dev->vdev.fops = &pms_fops;
dev->vdev.ioctl_ops = &pms_ioctl_ops;
dev->vdev.release = video_device_release_empty;
+ dev->vdev.lock = &dev->lock;
+ dev->vdev.tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM;
+ set_bit(V4L2_FL_USE_FH_PRIO, &dev->vdev.flags);
video_set_drvdata(&dev->vdev, dev);
- mutex_init(&dev->lock);
dev->std = V4L2_STD_NTSC_M;
dev->height = 240;
dev->width = 320;
- dev->depth = 15;
- dev->brightness = 139;
- dev->contrast = 70;
- dev->hue = 0;
- dev->saturation = 64;
+ dev->depth = 16;
pms_swsense(dev, 75);
pms_resolution(dev, 320, 240);
pms_videosource(dev, 0);
pms_vcrinput(dev, 0);
- if (video_register_device(&dev->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
- v4l2_device_unregister(&dev->v4l2_dev);
- release_region(dev->io, 3);
- release_region(0x9a01, 1);
- iounmap(dev->mem);
- return -EINVAL;
- }
- return 0;
+ v4l2_ctrl_handler_setup(hdl);
+ res = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, video_nr);
+ if (res >= 0)
+ return 0;
+
+free_hdl:
+ v4l2_ctrl_handler_free(hdl);
+ v4l2_device_unregister(&dev->v4l2_dev);
+free_io:
+ release_region(dev->io, 3);
+ release_region(0x9a01, 1);
+ iounmap(dev->mem);
+free_dev:
+ kfree(dev);
+ return res;
}
-static void __exit pms_exit(void)
+static int pms_remove(struct device *pdev, unsigned int card)
{
- struct pms *dev = &pms_card;
+ struct pms *dev = dev_get_drvdata(pdev);
video_unregister_device(&dev->vdev);
+ v4l2_ctrl_handler_free(&dev->hdl);
release_region(dev->io, 3);
release_region(0x9a01, 1);
iounmap(dev->mem);
+ return 0;
+}
+
+static struct isa_driver pms_driver = {
+ .probe = pms_probe,
+ .remove = pms_remove,
+ .driver = {
+ .name = "pms",
+ },
+};
+
+static int __init pms_init(void)
+{
+ return isa_register_driver(&pms_driver, 1);
+}
+
+static void __exit pms_exit(void)
+{
+ isa_unregister_driver(&pms_driver);
}
module_init(pms_init);
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
index 305e6aaa844a..036952f2a3cb 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
@@ -317,18 +317,16 @@ struct pvr2_hdw {
v4l2_std_id std_mask_eeprom; // Hardware supported selections
v4l2_std_id std_mask_avail; // Which standards we may select from
v4l2_std_id std_mask_cur; // Currently selected standard(s)
- unsigned int std_enum_cnt; // # of enumerated standards
int std_enum_cur; // selected standard enumeration value
int std_dirty; // True if std_mask_cur has changed
struct pvr2_ctl_info std_info_enum;
struct pvr2_ctl_info std_info_avail;
struct pvr2_ctl_info std_info_cur;
- struct v4l2_standard *std_defs;
- const char **std_enum_names;
+ struct pvr2_ctl_info std_info_detect;
// Generated string names, one per actual V4L2 standard
const char *std_mask_ptrs[32];
- char std_mask_names[32][10];
+ char std_mask_names[32][16];
int unit_number; /* ID for driver instance */
unsigned long serial_number; /* ID for hardware itself */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index ebc2c7e39233..fb828ba1dbbe 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -334,8 +334,6 @@ static void pvr2_hdw_state_log_state(struct pvr2_hdw *);
static int pvr2_hdw_cmd_usbstream(struct pvr2_hdw *hdw,int runFl);
static int pvr2_hdw_commit_setup(struct pvr2_hdw *hdw);
static int pvr2_hdw_get_eeprom_addr(struct pvr2_hdw *hdw);
-static void pvr2_hdw_internal_find_stdenum(struct pvr2_hdw *hdw);
-static void pvr2_hdw_internal_set_std_avail(struct pvr2_hdw *hdw);
static void pvr2_hdw_quiescent_timeout(unsigned long);
static void pvr2_hdw_decoder_stabilization_timeout(unsigned long);
static void pvr2_hdw_encoder_wait_timeout(unsigned long);
@@ -346,7 +344,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
void *write_data,unsigned int write_len,
void *read_data,unsigned int read_len);
static int pvr2_hdw_check_cropcap(struct pvr2_hdw *hdw);
-
+static v4l2_std_id pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw);
static void trace_stbit(const char *name,int val)
{
@@ -840,6 +838,12 @@ static int ctrl_hsm_get(struct pvr2_ctrl *cptr,int *vp)
return 0;
}
+static int ctrl_stddetect_get(struct pvr2_ctrl *cptr, int *vp)
+{
+ *vp = pvr2_hdw_get_detected_std(cptr->hdw);
+ return 0;
+}
+
static int ctrl_stdavail_get(struct pvr2_ctrl *cptr,int *vp)
{
*vp = cptr->hdw->std_mask_avail;
@@ -854,8 +858,7 @@ static int ctrl_stdavail_set(struct pvr2_ctrl *cptr,int m,int v)
ns = (ns & ~m) | (v & m);
if (ns == hdw->std_mask_avail) return 0;
hdw->std_mask_avail = ns;
- pvr2_hdw_internal_set_std_avail(hdw);
- pvr2_hdw_internal_find_stdenum(hdw);
+ hdw->std_info_cur.def.type_bitmask.valid_bits = hdw->std_mask_avail;
return 0;
}
@@ -895,7 +898,6 @@ static int ctrl_stdcur_set(struct pvr2_ctrl *cptr,int m,int v)
if (ns == hdw->std_mask_cur) return 0;
hdw->std_mask_cur = ns;
hdw->std_dirty = !0;
- pvr2_hdw_internal_find_stdenum(hdw);
return 0;
}
@@ -941,40 +943,6 @@ static int ctrl_audio_modes_present_get(struct pvr2_ctrl *cptr,int *vp)
}
-static int ctrl_stdenumcur_set(struct pvr2_ctrl *cptr,int m,int v)
-{
- struct pvr2_hdw *hdw = cptr->hdw;
- if (v < 0) return -EINVAL;
- if (v > hdw->std_enum_cnt) return -EINVAL;
- hdw->std_enum_cur = v;
- if (!v) return 0;
- v--;
- if (hdw->std_mask_cur == hdw->std_defs[v].id) return 0;
- hdw->std_mask_cur = hdw->std_defs[v].id;
- hdw->std_dirty = !0;
- return 0;
-}
-
-
-static int ctrl_stdenumcur_get(struct pvr2_ctrl *cptr,int *vp)
-{
- *vp = cptr->hdw->std_enum_cur;
- return 0;
-}
-
-
-static int ctrl_stdenumcur_is_dirty(struct pvr2_ctrl *cptr)
-{
- return cptr->hdw->std_dirty != 0;
-}
-
-
-static void ctrl_stdenumcur_clear_dirty(struct pvr2_ctrl *cptr)
-{
- cptr->hdw->std_dirty = 0;
-}
-
-
#define DEFINT(vmin,vmax) \
.type = pvr2_ctl_int, \
.def.type_int.min_value = vmin, \
@@ -1293,15 +1261,14 @@ static const struct pvr2_ctl_info control_defs[] = {
.sym_to_val = ctrl_std_sym_to_val,
.type = pvr2_ctl_bitmask,
},{
- .desc = "Video Standard Name",
- .name = "video_standard",
- .internal_id = PVR2_CID_STDENUM,
+ .desc = "Video Standards Detected Mask",
+ .name = "video_standard_mask_detected",
+ .internal_id = PVR2_CID_STDDETECT,
.skip_init = !0,
- .get_value = ctrl_stdenumcur_get,
- .set_value = ctrl_stdenumcur_set,
- .is_dirty = ctrl_stdenumcur_is_dirty,
- .clear_dirty = ctrl_stdenumcur_clear_dirty,
- .type = pvr2_ctl_enum,
+ .get_value = ctrl_stddetect_get,
+ .val_to_sym = ctrl_std_val_to_sym,
+ .sym_to_val = ctrl_std_sym_to_val,
+ .type = pvr2_ctl_bitmask,
}
};
@@ -1936,7 +1903,7 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
hdw->std_mask_avail |= std2;
}
- pvr2_hdw_internal_set_std_avail(hdw);
+ hdw->std_info_cur.def.type_bitmask.valid_bits = hdw->std_mask_avail;
if (std1) {
bcnt = pvr2_std_id_to_str(buf,sizeof(buf),std1);
@@ -1945,7 +1912,6 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
bcnt,buf);
hdw->std_mask_cur = std1;
hdw->std_dirty = !0;
- pvr2_hdw_internal_find_stdenum(hdw);
return;
}
if (std3) {
@@ -1955,7 +1921,6 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
" (determined by device type): %.*s",bcnt,buf);
hdw->std_mask_cur = std3;
hdw->std_dirty = !0;
- pvr2_hdw_internal_find_stdenum(hdw);
return;
}
@@ -1975,24 +1940,10 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
bcnt,buf);
hdw->std_mask_cur = std_eeprom_maps[idx].std;
hdw->std_dirty = !0;
- pvr2_hdw_internal_find_stdenum(hdw);
return;
}
}
- if (hdw->std_enum_cnt > 1) {
- // Autoselect the first listed standard
- hdw->std_enum_cur = 1;
- hdw->std_mask_cur = hdw->std_defs[hdw->std_enum_cur-1].id;
- hdw->std_dirty = !0;
- pvr2_trace(PVR2_TRACE_STD,
- "Initial video standard auto-selected to %s",
- hdw->std_defs[hdw->std_enum_cur-1].name);
- return;
- }
-
- pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Unable to select a viable initial video standard");
}
@@ -2594,14 +2545,6 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
cptr->info = ciptr;
}
- // Initialize video standard enum dynamic control
- cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDENUM);
- if (cptr) {
- memcpy(&hdw->std_info_enum,cptr->info,
- sizeof(hdw->std_info_enum));
- cptr->info = &hdw->std_info_enum;
-
- }
// Initialize control data regarding video standard masks
valid_std_mask = pvr2_std_get_usable();
for (idx = 0; idx < 32; idx++) {
@@ -2629,7 +2572,17 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
cptr->info = &hdw->std_info_cur;
hdw->std_info_cur.def.type_bitmask.bit_names =
hdw->std_mask_ptrs;
- hdw->std_info_avail.def.type_bitmask.valid_bits =
+ hdw->std_info_cur.def.type_bitmask.valid_bits =
+ valid_std_mask;
+ }
+ cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDDETECT);
+ if (cptr) {
+ memcpy(&hdw->std_info_detect,cptr->info,
+ sizeof(hdw->std_info_detect));
+ cptr->info = &hdw->std_info_detect;
+ hdw->std_info_detect.def.type_bitmask.bit_names =
+ hdw->std_mask_ptrs;
+ hdw->std_info_detect.def.type_bitmask.valid_bits =
valid_std_mask;
}
@@ -2711,8 +2664,6 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
kfree(hdw->ctl_write_buffer);
kfree(hdw->controls);
kfree(hdw->mpeg_ctrl_info);
- kfree(hdw->std_defs);
- kfree(hdw->std_enum_names);
kfree(hdw);
}
return NULL;
@@ -2788,8 +2739,6 @@ void pvr2_hdw_destroy(struct pvr2_hdw *hdw)
} while (0); mutex_unlock(&pvr2_unit_mtx);
kfree(hdw->controls);
kfree(hdw->mpeg_ctrl_info);
- kfree(hdw->std_defs);
- kfree(hdw->std_enum_names);
kfree(hdw);
}
@@ -2812,86 +2761,6 @@ void pvr2_hdw_disconnect(struct pvr2_hdw *hdw)
}
-// Attempt to autoselect an appropriate value for std_enum_cur given
-// whatever is currently in std_mask_cur
-static void pvr2_hdw_internal_find_stdenum(struct pvr2_hdw *hdw)
-{
- unsigned int idx;
- for (idx = 1; idx < hdw->std_enum_cnt; idx++) {
- if (hdw->std_defs[idx-1].id == hdw->std_mask_cur) {
- hdw->std_enum_cur = idx;
- return;
- }
- }
- hdw->std_enum_cur = 0;
-}
-
-
-// Calculate correct set of enumerated standards based on currently known
-// set of available standards bits.
-static void pvr2_hdw_internal_set_std_avail(struct pvr2_hdw *hdw)
-{
- struct v4l2_standard *newstd;
- unsigned int std_cnt;
- unsigned int idx;
-
- newstd = pvr2_std_create_enum(&std_cnt,hdw->std_mask_avail);
-
- if (hdw->std_defs) {
- kfree(hdw->std_defs);
- hdw->std_defs = NULL;
- }
- hdw->std_enum_cnt = 0;
- if (hdw->std_enum_names) {
- kfree(hdw->std_enum_names);
- hdw->std_enum_names = NULL;
- }
-
- if (!std_cnt) {
- pvr2_trace(
- PVR2_TRACE_ERROR_LEGS,
- "WARNING: Failed to identify any viable standards");
- }
-
- /* Set up the dynamic control for this standard */
- hdw->std_enum_names = kmalloc(sizeof(char *)*(std_cnt+1),GFP_KERNEL);
- if (hdw->std_enum_names) {
- hdw->std_enum_names[0] = "none";
- for (idx = 0; idx < std_cnt; idx++)
- hdw->std_enum_names[idx+1] = newstd[idx].name;
- hdw->std_info_enum.def.type_enum.value_names =
- hdw->std_enum_names;
- hdw->std_info_enum.def.type_enum.count = std_cnt+1;
- } else {
- pvr2_trace(
- PVR2_TRACE_ERROR_LEGS,
- "WARNING: Failed to alloc memory for names");
- hdw->std_info_enum.def.type_enum.value_names = NULL;
- hdw->std_info_enum.def.type_enum.count = 0;
- }
- hdw->std_defs = newstd;
- hdw->std_enum_cnt = std_cnt+1;
- hdw->std_enum_cur = 0;
- hdw->std_info_cur.def.type_bitmask.valid_bits = hdw->std_mask_avail;
-}
-
-
-int pvr2_hdw_get_stdenum_value(struct pvr2_hdw *hdw,
- struct v4l2_standard *std,
- unsigned int idx)
-{
- int ret = -EINVAL;
- if (!idx) return ret;
- LOCK_TAKE(hdw->big_lock); do {
- if (idx >= hdw->std_enum_cnt) break;
- idx--;
- memcpy(std,hdw->std_defs+idx,sizeof(*std));
- ret = 0;
- } while (0); LOCK_GIVE(hdw->big_lock);
- return ret;
-}
-
-
/* Get the number of defined controls */
unsigned int pvr2_hdw_get_ctrl_count(struct pvr2_hdw *hdw)
{
@@ -2995,11 +2864,13 @@ static void pvr2_subdev_set_control(struct pvr2_hdw *hdw, int id,
pvr2_subdev_set_control(hdw, id, #lab, (hdw)->lab##_val); \
}
-int pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw, v4l2_std_id *std)
+v4l2_std_id pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw)
{
+ v4l2_std_id std;
+ std = (v4l2_std_id)hdw->std_mask_avail;
v4l2_device_call_all(&hdw->v4l2_dev, 0,
- video, querystd, std);
- return 0;
+ video, querystd, &std);
+ return std;
}
/* Execute whatever commands are required to update the state of all the
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.h b/drivers/media/video/pvrusb2/pvrusb2-hdw.h
index 66546580b17d..8060fc666eeb 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.h
@@ -28,7 +28,6 @@
/* Private internal control ids, look these up with
pvr2_hdw_get_ctrl_by_id() - these are NOT visible in V4L */
-#define PVR2_CID_STDENUM 1
#define PVR2_CID_STDCUR 2
#define PVR2_CID_STDAVAIL 3
#define PVR2_CID_INPUT 4
@@ -46,6 +45,7 @@
#define PVR2_CID_CROPCAPBT 16
#define PVR2_CID_CROPCAPBW 17
#define PVR2_CID_CROPCAPBH 18
+#define PVR2_CID_STDDETECT 19
/* Legal values for the INPUT state variable */
#define PVR2_CVAL_INPUT_TV 0
@@ -210,13 +210,6 @@ int pvr2_hdw_set_stream_type(struct pvr2_hdw *, enum pvr2_config);
/* Get handle to video output stream */
struct pvr2_stream *pvr2_hdw_get_video_stream(struct pvr2_hdw *);
-/* Emit a video standard struct */
-int pvr2_hdw_get_stdenum_value(struct pvr2_hdw *hdw,struct v4l2_standard *std,
- unsigned int idx);
-
-/* Get the detected video standard */
-int pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw, v4l2_std_id *std);
-
/* Enable / disable retrieval of CPU firmware or prom contents. This must
be enabled before pvr2_hdw_cpufw_get() will function. Note that doing
this may prevent the device from running (and leaving this mode may
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index e1111d968a3d..7bddfaeeafc3 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -107,7 +107,6 @@ static struct v4l2_fmtdesc pvr_fmtdesc [] = {
// This should really be V4L2_PIX_FMT_MPEG, but xawtv
// breaks when I do that.
.pixelformat = 0, // V4L2_PIX_FMT_MPEG,
- .reserved = { 0, 0, 0, 0 }
}
};
@@ -145,740 +144,739 @@ static struct v4l2_format pvr_format [] = {
.start = { 0, 0 },
.count = { 0, 0 },
.flags = 0,
- .reserved = { 0, 0 }
}
}
}
};
+
/*
- * pvr_ioctl()
- *
- * This is part of Video 4 Linux API. The procedure handles ioctl() calls.
- *
+ * This is part of Video 4 Linux API. These procedures handle ioctl() calls.
*/
-static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
+static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
{
struct pvr2_v4l2_fh *fh = file->private_data;
- struct pvr2_v4l2 *vp = fh->vhead;
- struct pvr2_v4l2_dev *pdi = fh->pdi;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- long ret = -EINVAL;
- if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
- v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),cmd);
- }
+ memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability));
+ strlcpy(cap->bus_info, pvr2_hdw_get_bus_info(hdw),
+ sizeof(cap->bus_info));
+ strlcpy(cap->card, pvr2_hdw_get_desc(hdw), sizeof(cap->card));
+ return 0;
+}
- if (!pvr2_hdw_dev_ok(hdw)) {
- pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "ioctl failed - bad or no context");
- return -EFAULT;
- }
+static int pvr2_g_priority(struct file *file, void *priv, enum v4l2_priority *p)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2 *vp = fh->vhead;
- /* check priority */
- switch (cmd) {
- case VIDIOC_S_CTRL:
- case VIDIOC_S_STD:
- case VIDIOC_S_INPUT:
- case VIDIOC_S_TUNER:
- case VIDIOC_S_FREQUENCY:
- ret = v4l2_prio_check(&vp->prio, fh->prio);
- if (ret)
- return ret;
- }
+ *p = v4l2_prio_max(&vp->prio);
+ return 0;
+}
- switch (cmd) {
- case VIDIOC_QUERYCAP:
- {
- struct v4l2_capability *cap = arg;
+static int pvr2_s_priority(struct file *file, void *priv, enum v4l2_priority prio)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2 *vp = fh->vhead;
- memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability));
- strlcpy(cap->bus_info,pvr2_hdw_get_bus_info(hdw),
- sizeof(cap->bus_info));
- strlcpy(cap->card,pvr2_hdw_get_desc(hdw),sizeof(cap->card));
+ return v4l2_prio_change(&vp->prio, &fh->prio, prio);
+}
- ret = 0;
- break;
- }
+static int pvr2_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int val = 0;
+ int ret;
- case VIDIOC_G_PRIORITY:
- {
- enum v4l2_priority *p = arg;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_STDCUR), &val);
+ *std = val;
+ return ret;
+}
- *p = v4l2_prio_max(&vp->prio);
- ret = 0;
- break;
- }
+int pvr2_s_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- case VIDIOC_S_PRIORITY:
- {
- enum v4l2_priority *prio = arg;
+ return pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_STDCUR), *std);
+}
- ret = v4l2_prio_change(&vp->prio, &fh->prio, *prio);
- break;
- }
+static int pvr2_querystd(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int val = 0;
+ int ret;
- case VIDIOC_ENUMSTD:
- {
- struct v4l2_standard *vs = (struct v4l2_standard *)arg;
- int idx = vs->index;
- ret = pvr2_hdw_get_stdenum_value(hdw,vs,idx+1);
- break;
- }
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_STDDETECT), &val);
+ *std = val;
+ return ret;
+}
- case VIDIOC_QUERYSTD:
- {
- v4l2_std_id *std = arg;
- *std = V4L2_STD_ALL;
- ret = pvr2_hdw_get_detected_std(hdw, std);
- break;
- }
+static int pvr2_enum_input(struct file *file, void *priv, struct v4l2_input *vi)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct pvr2_ctrl *cptr;
+ struct v4l2_input tmp;
+ unsigned int cnt;
+ int val;
+ int ret;
- case VIDIOC_G_STD:
- {
- int val = 0;
- ret = pvr2_ctrl_get_value(
- pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR),&val);
- *(v4l2_std_id *)arg = val;
+ cptr = pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_INPUT);
+
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.index = vi->index;
+ ret = 0;
+ if (vi->index >= fh->input_cnt)
+ return -EINVAL;
+ val = fh->input_map[vi->index];
+ switch (val) {
+ case PVR2_CVAL_INPUT_TV:
+ case PVR2_CVAL_INPUT_DTV:
+ case PVR2_CVAL_INPUT_RADIO:
+ tmp.type = V4L2_INPUT_TYPE_TUNER;
break;
- }
-
- case VIDIOC_S_STD:
- {
- ret = pvr2_ctrl_set_value(
- pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR),
- *(v4l2_std_id *)arg);
+ case PVR2_CVAL_INPUT_SVIDEO:
+ case PVR2_CVAL_INPUT_COMPOSITE:
+ tmp.type = V4L2_INPUT_TYPE_CAMERA;
break;
+ default:
+ return -EINVAL;
}
- case VIDIOC_ENUMINPUT:
- {
- struct pvr2_ctrl *cptr;
- struct v4l2_input *vi = (struct v4l2_input *)arg;
- struct v4l2_input tmp;
- unsigned int cnt;
- int val;
+ cnt = 0;
+ pvr2_ctrl_get_valname(cptr, val,
+ tmp.name, sizeof(tmp.name) - 1, &cnt);
+ tmp.name[cnt] = 0;
- cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT);
+ /* Don't bother with audioset, since this driver currently
+ always switches the audio whenever the video is
+ switched. */
- memset(&tmp,0,sizeof(tmp));
- tmp.index = vi->index;
- ret = 0;
- if (vi->index >= fh->input_cnt) {
- ret = -EINVAL;
- break;
- }
- val = fh->input_map[vi->index];
- switch (val) {
- case PVR2_CVAL_INPUT_TV:
- case PVR2_CVAL_INPUT_DTV:
- case PVR2_CVAL_INPUT_RADIO:
- tmp.type = V4L2_INPUT_TYPE_TUNER;
- break;
- case PVR2_CVAL_INPUT_SVIDEO:
- case PVR2_CVAL_INPUT_COMPOSITE:
- tmp.type = V4L2_INPUT_TYPE_CAMERA;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- if (ret < 0) break;
-
- cnt = 0;
- pvr2_ctrl_get_valname(cptr,val,
- tmp.name,sizeof(tmp.name)-1,&cnt);
- tmp.name[cnt] = 0;
-
- /* Don't bother with audioset, since this driver currently
- always switches the audio whenever the video is
- switched. */
-
- /* Handling std is a tougher problem. It doesn't make
- sense in cases where a device might be multi-standard.
- We could just copy out the current value for the
- standard, but it can change over time. For now just
- leave it zero. */
-
- memcpy(vi, &tmp, sizeof(tmp));
-
- ret = 0;
- break;
- }
+ /* Handling std is a tougher problem. It doesn't make
+ sense in cases where a device might be multi-standard.
+ We could just copy out the current value for the
+ standard, but it can change over time. For now just
+ leave it zero. */
+ *vi = tmp;
+ return 0;
+}
- case VIDIOC_G_INPUT:
- {
- unsigned int idx;
- struct pvr2_ctrl *cptr;
- struct v4l2_input *vi = (struct v4l2_input *)arg;
- int val;
- cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT);
- val = 0;
- ret = pvr2_ctrl_get_value(cptr,&val);
- vi->index = 0;
- for (idx = 0; idx < fh->input_cnt; idx++) {
- if (fh->input_map[idx] == val) {
- vi->index = idx;
- break;
- }
- }
- break;
- }
+static int pvr2_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ unsigned int idx;
+ struct pvr2_ctrl *cptr;
+ int val;
+ int ret;
- case VIDIOC_S_INPUT:
- {
- struct v4l2_input *vi = (struct v4l2_input *)arg;
- if (vi->index >= fh->input_cnt) {
- ret = -ERANGE;
+ cptr = pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_INPUT);
+ val = 0;
+ ret = pvr2_ctrl_get_value(cptr, &val);
+ *i = 0;
+ for (idx = 0; idx < fh->input_cnt; idx++) {
+ if (fh->input_map[idx] == val) {
+ *i = idx;
break;
}
- ret = pvr2_ctrl_set_value(
- pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT),
- fh->input_map[vi->index]);
- break;
}
+ return ret;
+}
- case VIDIOC_ENUMAUDIO:
- {
- /* pkt: FIXME: We are returning one "fake" input here
- which could very well be called "whatever_we_like".
- This is for apps that want to see an audio input
- just to feel comfortable, as well as to test if
- it can do stereo or sth. There is actually no guarantee
- that the actual audio input cannot change behind the app's
- back, but most applications should not mind that either.
-
- Hopefully, mplayer people will work with us on this (this
- whole mess is to support mplayer pvr://), or Hans will come
- up with a more standard way to say "we have inputs but we
- don 't want you to change them independent of video" which
- will sort this mess.
- */
- struct v4l2_audio *vin = arg;
- ret = -EINVAL;
- if (vin->index > 0) break;
- strncpy(vin->name, "PVRUSB2 Audio",14);
- vin->capability = V4L2_AUDCAP_STEREO;
- ret = 0;
- break;
- break;
- }
+static int pvr2_s_input(struct file *file, void *priv, unsigned int inp)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- case VIDIOC_G_AUDIO:
- {
- /* pkt: FIXME: see above comment (VIDIOC_ENUMAUDIO) */
- struct v4l2_audio *vin = arg;
- memset(vin,0,sizeof(*vin));
- vin->index = 0;
- strncpy(vin->name, "PVRUSB2 Audio",14);
- vin->capability = V4L2_AUDCAP_STEREO;
- ret = 0;
- break;
- }
+ if (inp >= fh->input_cnt)
+ return -EINVAL;
+ return pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_INPUT),
+ fh->input_map[inp]);
+}
- case VIDIOC_G_TUNER:
- {
- struct v4l2_tuner *vt = (struct v4l2_tuner *)arg;
+static int pvr2_enumaudio(struct file *file, void *priv, struct v4l2_audio *vin)
+{
+ /* pkt: FIXME: We are returning one "fake" input here
+ which could very well be called "whatever_we_like".
+ This is for apps that want to see an audio input
+ just to feel comfortable, as well as to test if
+ it can do stereo or sth. There is actually no guarantee
+ that the actual audio input cannot change behind the app's
+ back, but most applications should not mind that either.
+
+ Hopefully, mplayer people will work with us on this (this
+ whole mess is to support mplayer pvr://), or Hans will come
+ up with a more standard way to say "we have inputs but we
+ don 't want you to change them independent of video" which
+ will sort this mess.
+ */
+
+ if (vin->index > 0)
+ return -EINVAL;
+ strncpy(vin->name, "PVRUSB2 Audio", 14);
+ vin->capability = V4L2_AUDCAP_STEREO;
+ return 0;
+}
- if (vt->index != 0) break; /* Only answer for the 1st tuner */
+static int pvr2_g_audio(struct file *file, void *priv, struct v4l2_audio *vin)
+{
+ /* pkt: FIXME: see above comment (VIDIOC_ENUMAUDIO) */
+ vin->index = 0;
+ strncpy(vin->name, "PVRUSB2 Audio", 14);
+ vin->capability = V4L2_AUDCAP_STEREO;
+ return 0;
+}
- pvr2_hdw_execute_tuner_poll(hdw);
- ret = pvr2_hdw_get_tuner_status(hdw,vt);
- break;
- }
+static int pvr2_s_audio(struct file *file, void *priv, struct v4l2_audio *vout)
+{
+ if (vout->index)
+ return -EINVAL;
+ return 0;
+}
- case VIDIOC_S_TUNER:
- {
- struct v4l2_tuner *vt=(struct v4l2_tuner *)arg;
+static int pvr2_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- if (vt->index != 0)
- break;
+ if (vt->index != 0)
+ return -EINVAL; /* Only answer for the 1st tuner */
- ret = pvr2_ctrl_set_value(
- pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_AUDIOMODE),
+ pvr2_hdw_execute_tuner_poll(hdw);
+ return pvr2_hdw_get_tuner_status(hdw, vt);
+}
+
+static int pvr2_s_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+
+ if (vt->index != 0)
+ return -EINVAL;
+
+ return pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_AUDIOMODE),
vt->audmode);
- break;
- }
+}
- case VIDIOC_S_FREQUENCY:
- {
- const struct v4l2_frequency *vf = (struct v4l2_frequency *)arg;
- unsigned long fv;
- struct v4l2_tuner vt;
- int cur_input;
- struct pvr2_ctrl *ctrlp;
- ret = pvr2_hdw_get_tuner_status(hdw,&vt);
- if (ret != 0) break;
- ctrlp = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT);
- ret = pvr2_ctrl_get_value(ctrlp,&cur_input);
- if (ret != 0) break;
- if (vf->type == V4L2_TUNER_RADIO) {
- if (cur_input != PVR2_CVAL_INPUT_RADIO) {
- pvr2_ctrl_set_value(ctrlp,
- PVR2_CVAL_INPUT_RADIO);
- }
- } else {
- if (cur_input == PVR2_CVAL_INPUT_RADIO) {
- pvr2_ctrl_set_value(ctrlp,
- PVR2_CVAL_INPUT_TV);
- }
- }
- fv = vf->frequency;
- if (vt.capability & V4L2_TUNER_CAP_LOW) {
- fv = (fv * 125) / 2;
- } else {
- fv = fv * 62500;
- }
- ret = pvr2_ctrl_set_value(
+int pvr2_s_frequency(struct file *file, void *priv, struct v4l2_frequency *vf)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ unsigned long fv;
+ struct v4l2_tuner vt;
+ int cur_input;
+ struct pvr2_ctrl *ctrlp;
+ int ret;
+
+ ret = pvr2_hdw_get_tuner_status(hdw, &vt);
+ if (ret != 0)
+ return ret;
+ ctrlp = pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_INPUT);
+ ret = pvr2_ctrl_get_value(ctrlp, &cur_input);
+ if (ret != 0)
+ return ret;
+ if (vf->type == V4L2_TUNER_RADIO) {
+ if (cur_input != PVR2_CVAL_INPUT_RADIO)
+ pvr2_ctrl_set_value(ctrlp, PVR2_CVAL_INPUT_RADIO);
+ } else {
+ if (cur_input == PVR2_CVAL_INPUT_RADIO)
+ pvr2_ctrl_set_value(ctrlp, PVR2_CVAL_INPUT_TV);
+ }
+ fv = vf->frequency;
+ if (vt.capability & V4L2_TUNER_CAP_LOW)
+ fv = (fv * 125) / 2;
+ else
+ fv = fv * 62500;
+ return pvr2_ctrl_set_value(
pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY),fv);
- break;
- }
+}
- case VIDIOC_G_FREQUENCY:
- {
- struct v4l2_frequency *vf = (struct v4l2_frequency *)arg;
- int val = 0;
- int cur_input;
- struct v4l2_tuner vt;
- ret = pvr2_hdw_get_tuner_status(hdw,&vt);
- if (ret != 0) break;
- ret = pvr2_ctrl_get_value(
- pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY),
+static int pvr2_g_frequency(struct file *file, void *priv, struct v4l2_frequency *vf)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int val = 0;
+ int cur_input;
+ struct v4l2_tuner vt;
+ int ret;
+
+ ret = pvr2_hdw_get_tuner_status(hdw, &vt);
+ if (ret != 0)
+ return ret;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_FREQUENCY),
&val);
- if (ret != 0) break;
- pvr2_ctrl_get_value(
- pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT),
+ if (ret != 0)
+ return ret;
+ pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_INPUT),
&cur_input);
- if (cur_input == PVR2_CVAL_INPUT_RADIO) {
- vf->type = V4L2_TUNER_RADIO;
- } else {
- vf->type = V4L2_TUNER_ANALOG_TV;
- }
- if (vt.capability & V4L2_TUNER_CAP_LOW) {
- val = (val * 2) / 125;
- } else {
- val /= 62500;
- }
- vf->frequency = val;
- break;
- }
+ if (cur_input == PVR2_CVAL_INPUT_RADIO)
+ vf->type = V4L2_TUNER_RADIO;
+ else
+ vf->type = V4L2_TUNER_ANALOG_TV;
+ if (vt.capability & V4L2_TUNER_CAP_LOW)
+ val = (val * 2) / 125;
+ else
+ val /= 62500;
+ vf->frequency = val;
+ return 0;
+}
- case VIDIOC_ENUM_FMT:
- {
- struct v4l2_fmtdesc *fd = (struct v4l2_fmtdesc *)arg;
+static int pvr2_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *fd)
+{
+ /* Only one format is supported : mpeg.*/
+ if (fd->index != 0)
+ return -EINVAL;
- /* Only one format is supported : mpeg.*/
- if (fd->index != 0)
- break;
+ memcpy(fd, pvr_fmtdesc, sizeof(struct v4l2_fmtdesc));
+ return 0;
+}
- memcpy(fd, pvr_fmtdesc, sizeof(struct v4l2_fmtdesc));
- ret = 0;
- break;
- }
+static int pvr2_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int val;
- case VIDIOC_G_FMT:
- {
- struct v4l2_format *vf = (struct v4l2_format *)arg;
- int val;
- switch(vf->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- memcpy(vf, &pvr_format[PVR_FORMAT_PIX],
- sizeof(struct v4l2_format));
- val = 0;
- pvr2_ctrl_get_value(
- pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_HRES),
- &val);
- vf->fmt.pix.width = val;
- val = 0;
- pvr2_ctrl_get_value(
- pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_VRES),
- &val);
- vf->fmt.pix.height = val;
- ret = 0;
- break;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- // ????? Still need to figure out to do VBI correctly
- ret = -EINVAL;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- break;
- }
+ memcpy(vf, &pvr_format[PVR_FORMAT_PIX], sizeof(struct v4l2_format));
+ val = 0;
+ pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_HRES),
+ &val);
+ vf->fmt.pix.width = val;
+ val = 0;
+ pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_VRES),
+ &val);
+ vf->fmt.pix.height = val;
+ return 0;
+}
- case VIDIOC_TRY_FMT:
- case VIDIOC_S_FMT:
- {
- struct v4l2_format *vf = (struct v4l2_format *)arg;
-
- ret = 0;
- switch(vf->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
- int lmin,lmax,ldef;
- struct pvr2_ctrl *hcp,*vcp;
- int h = vf->fmt.pix.height;
- int w = vf->fmt.pix.width;
- hcp = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_HRES);
- vcp = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_VRES);
-
- lmin = pvr2_ctrl_get_min(hcp);
- lmax = pvr2_ctrl_get_max(hcp);
- pvr2_ctrl_get_def(hcp, &ldef);
- if (w == -1) {
- w = ldef;
- } else if (w < lmin) {
- w = lmin;
- } else if (w > lmax) {
- w = lmax;
- }
- lmin = pvr2_ctrl_get_min(vcp);
- lmax = pvr2_ctrl_get_max(vcp);
- pvr2_ctrl_get_def(vcp, &ldef);
- if (h == -1) {
- h = ldef;
- } else if (h < lmin) {
- h = lmin;
- } else if (h > lmax) {
- h = lmax;
- }
+static int pvr2_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int lmin, lmax, ldef;
+ struct pvr2_ctrl *hcp, *vcp;
+ int h = vf->fmt.pix.height;
+ int w = vf->fmt.pix.width;
+
+ hcp = pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_HRES);
+ vcp = pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_VRES);
+
+ lmin = pvr2_ctrl_get_min(hcp);
+ lmax = pvr2_ctrl_get_max(hcp);
+ pvr2_ctrl_get_def(hcp, &ldef);
+ if (w == -1)
+ w = ldef;
+ else if (w < lmin)
+ w = lmin;
+ else if (w > lmax)
+ w = lmax;
+ lmin = pvr2_ctrl_get_min(vcp);
+ lmax = pvr2_ctrl_get_max(vcp);
+ pvr2_ctrl_get_def(vcp, &ldef);
+ if (h == -1)
+ h = ldef;
+ else if (h < lmin)
+ h = lmin;
+ else if (h > lmax)
+ h = lmax;
+
+ memcpy(vf, &pvr_format[PVR_FORMAT_PIX],
+ sizeof(struct v4l2_format));
+ vf->fmt.pix.width = w;
+ vf->fmt.pix.height = h;
+ return 0;
+}
- memcpy(vf, &pvr_format[PVR_FORMAT_PIX],
- sizeof(struct v4l2_format));
- vf->fmt.pix.width = w;
- vf->fmt.pix.height = h;
+static int pvr2_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct pvr2_ctrl *hcp, *vcp;
+ int ret = pvr2_try_fmt_vid_cap(file, fh, vf);
- if (cmd == VIDIOC_S_FMT) {
- pvr2_ctrl_set_value(hcp,vf->fmt.pix.width);
- pvr2_ctrl_set_value(vcp,vf->fmt.pix.height);
- }
- } break;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- // ????? Still need to figure out to do VBI correctly
- ret = -EINVAL;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- break;
- }
+ if (ret)
+ return ret;
+ hcp = pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_HRES);
+ vcp = pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_VRES);
+ pvr2_ctrl_set_value(hcp, vf->fmt.pix.width);
+ pvr2_ctrl_set_value(vcp, vf->fmt.pix.height);
+ return 0;
+}
- case VIDIOC_STREAMON:
- {
- if (!fh->pdi->stream) {
- /* No stream defined for this node. This means
- that we're not currently allowed to stream from
- this node. */
- ret = -EPERM;
- break;
- }
- ret = pvr2_hdw_set_stream_type(hdw,pdi->config);
- if (ret < 0) return ret;
- ret = pvr2_hdw_set_streaming(hdw,!0);
- break;
+static int pvr2_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct pvr2_v4l2_dev *pdi = fh->pdi;
+ int ret;
+
+ if (!fh->pdi->stream) {
+ /* No stream defined for this node. This means
+ that we're not currently allowed to stream from
+ this node. */
+ return -EPERM;
}
+ ret = pvr2_hdw_set_stream_type(hdw, pdi->config);
+ if (ret < 0)
+ return ret;
+ return pvr2_hdw_set_streaming(hdw, !0);
+}
- case VIDIOC_STREAMOFF:
- {
- if (!fh->pdi->stream) {
- /* No stream defined for this node. This means
- that we're not currently allowed to stream from
- this node. */
- ret = -EPERM;
- break;
- }
- ret = pvr2_hdw_set_streaming(hdw,0);
- break;
+static int pvr2_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+
+ if (!fh->pdi->stream) {
+ /* No stream defined for this node. This means
+ that we're not currently allowed to stream from
+ this node. */
+ return -EPERM;
}
+ return pvr2_hdw_set_streaming(hdw, 0);
+}
- case VIDIOC_QUERYCTRL:
- {
- struct pvr2_ctrl *cptr;
- int val;
- struct v4l2_queryctrl *vc = (struct v4l2_queryctrl *)arg;
- ret = 0;
- if (vc->id & V4L2_CTRL_FLAG_NEXT_CTRL) {
- cptr = pvr2_hdw_get_ctrl_nextv4l(
- hdw,(vc->id & ~V4L2_CTRL_FLAG_NEXT_CTRL));
- if (cptr) vc->id = pvr2_ctrl_get_v4lid(cptr);
- } else {
- cptr = pvr2_hdw_get_ctrl_v4l(hdw,vc->id);
- }
- if (!cptr) {
- pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "QUERYCTRL id=0x%x not implemented here",
- vc->id);
- ret = -EINVAL;
- break;
- }
+static int pvr2_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *vc)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct pvr2_ctrl *cptr;
+ int val;
+ int ret;
+ ret = 0;
+ if (vc->id & V4L2_CTRL_FLAG_NEXT_CTRL) {
+ cptr = pvr2_hdw_get_ctrl_nextv4l(
+ hdw, (vc->id & ~V4L2_CTRL_FLAG_NEXT_CTRL));
+ if (cptr)
+ vc->id = pvr2_ctrl_get_v4lid(cptr);
+ } else {
+ cptr = pvr2_hdw_get_ctrl_v4l(hdw, vc->id);
+ }
+ if (!cptr) {
pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "QUERYCTRL id=0x%x mapping name=%s (%s)",
- vc->id,pvr2_ctrl_get_name(cptr),
- pvr2_ctrl_get_desc(cptr));
- strlcpy(vc->name,pvr2_ctrl_get_desc(cptr),sizeof(vc->name));
- vc->flags = pvr2_ctrl_get_v4lflags(cptr);
- pvr2_ctrl_get_def(cptr, &val);
- vc->default_value = val;
- switch (pvr2_ctrl_get_type(cptr)) {
- case pvr2_ctl_enum:
- vc->type = V4L2_CTRL_TYPE_MENU;
- vc->minimum = 0;
- vc->maximum = pvr2_ctrl_get_cnt(cptr) - 1;
- vc->step = 1;
- break;
- case pvr2_ctl_bool:
- vc->type = V4L2_CTRL_TYPE_BOOLEAN;
- vc->minimum = 0;
- vc->maximum = 1;
- vc->step = 1;
- break;
- case pvr2_ctl_int:
- vc->type = V4L2_CTRL_TYPE_INTEGER;
- vc->minimum = pvr2_ctrl_get_min(cptr);
- vc->maximum = pvr2_ctrl_get_max(cptr);
- vc->step = 1;
- break;
- default:
- pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "QUERYCTRL id=0x%x name=%s not mappable",
- vc->id,pvr2_ctrl_get_name(cptr));
- ret = -EINVAL;
- break;
- }
+ "QUERYCTRL id=0x%x not implemented here",
+ vc->id);
+ return -EINVAL;
+ }
+
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "QUERYCTRL id=0x%x mapping name=%s (%s)",
+ vc->id, pvr2_ctrl_get_name(cptr),
+ pvr2_ctrl_get_desc(cptr));
+ strlcpy(vc->name, pvr2_ctrl_get_desc(cptr), sizeof(vc->name));
+ vc->flags = pvr2_ctrl_get_v4lflags(cptr);
+ pvr2_ctrl_get_def(cptr, &val);
+ vc->default_value = val;
+ switch (pvr2_ctrl_get_type(cptr)) {
+ case pvr2_ctl_enum:
+ vc->type = V4L2_CTRL_TYPE_MENU;
+ vc->minimum = 0;
+ vc->maximum = pvr2_ctrl_get_cnt(cptr) - 1;
+ vc->step = 1;
break;
- }
-
- case VIDIOC_QUERYMENU:
- {
- struct v4l2_querymenu *vm = (struct v4l2_querymenu *)arg;
- unsigned int cnt = 0;
- ret = pvr2_ctrl_get_valname(pvr2_hdw_get_ctrl_v4l(hdw,vm->id),
- vm->index,
- vm->name,sizeof(vm->name)-1,
- &cnt);
- vm->name[cnt] = 0;
+ case pvr2_ctl_bool:
+ vc->type = V4L2_CTRL_TYPE_BOOLEAN;
+ vc->minimum = 0;
+ vc->maximum = 1;
+ vc->step = 1;
break;
- }
-
- case VIDIOC_G_CTRL:
- {
- struct v4l2_control *vc = (struct v4l2_control *)arg;
- int val = 0;
- ret = pvr2_ctrl_get_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id),
- &val);
- vc->value = val;
+ case pvr2_ctl_int:
+ vc->type = V4L2_CTRL_TYPE_INTEGER;
+ vc->minimum = pvr2_ctrl_get_min(cptr);
+ vc->maximum = pvr2_ctrl_get_max(cptr);
+ vc->step = 1;
break;
+ default:
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "QUERYCTRL id=0x%x name=%s not mappable",
+ vc->id, pvr2_ctrl_get_name(cptr));
+ return -EINVAL;
}
+ return 0;
+}
- case VIDIOC_S_CTRL:
- {
- struct v4l2_control *vc = (struct v4l2_control *)arg;
- ret = pvr2_ctrl_set_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id),
- vc->value);
- break;
- }
+static int pvr2_querymenu(struct file *file, void *priv, struct v4l2_querymenu *vm)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ unsigned int cnt = 0;
+ int ret;
- case VIDIOC_G_EXT_CTRLS:
- {
- struct v4l2_ext_controls *ctls =
- (struct v4l2_ext_controls *)arg;
- struct v4l2_ext_control *ctrl;
- unsigned int idx;
- int val;
- ret = 0;
- for (idx = 0; idx < ctls->count; idx++) {
- ctrl = ctls->controls + idx;
- ret = pvr2_ctrl_get_value(
- pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id),&val);
- if (ret) {
- ctls->error_idx = idx;
- break;
- }
- /* Ensure that if read as a 64 bit value, the user
- will still get a hopefully sane value */
- ctrl->value64 = 0;
- ctrl->value = val;
+ ret = pvr2_ctrl_get_valname(pvr2_hdw_get_ctrl_v4l(hdw, vm->id),
+ vm->index,
+ vm->name, sizeof(vm->name) - 1,
+ &cnt);
+ vm->name[cnt] = 0;
+ return ret;
+}
+
+static int pvr2_g_ctrl(struct file *file, void *priv, struct v4l2_control *vc)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int val = 0;
+ int ret;
+
+ ret = pvr2_ctrl_get_value(pvr2_hdw_get_ctrl_v4l(hdw, vc->id),
+ &val);
+ vc->value = val;
+ return ret;
+}
+
+static int pvr2_s_ctrl(struct file *file, void *priv, struct v4l2_control *vc)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+
+ return pvr2_ctrl_set_value(pvr2_hdw_get_ctrl_v4l(hdw, vc->id),
+ vc->value);
+}
+
+static int pvr2_g_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctls)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct v4l2_ext_control *ctrl;
+ unsigned int idx;
+ int val;
+ int ret;
+
+ ret = 0;
+ for (idx = 0; idx < ctls->count; idx++) {
+ ctrl = ctls->controls + idx;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_v4l(hdw, ctrl->id), &val);
+ if (ret) {
+ ctls->error_idx = idx;
+ return ret;
}
- break;
+ /* Ensure that if read as a 64 bit value, the user
+ will still get a hopefully sane value */
+ ctrl->value64 = 0;
+ ctrl->value = val;
}
+ return 0;
+}
- case VIDIOC_S_EXT_CTRLS:
- {
- struct v4l2_ext_controls *ctls =
- (struct v4l2_ext_controls *)arg;
- struct v4l2_ext_control *ctrl;
- unsigned int idx;
- ret = 0;
- for (idx = 0; idx < ctls->count; idx++) {
- ctrl = ctls->controls + idx;
- ret = pvr2_ctrl_set_value(
- pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id),
+static int pvr2_s_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctls)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct v4l2_ext_control *ctrl;
+ unsigned int idx;
+ int ret;
+
+ ret = 0;
+ for (idx = 0; idx < ctls->count; idx++) {
+ ctrl = ctls->controls + idx;
+ ret = pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_v4l(hdw, ctrl->id),
ctrl->value);
- if (ret) {
- ctls->error_idx = idx;
- break;
- }
+ if (ret) {
+ ctls->error_idx = idx;
+ return ret;
}
- break;
}
+ return 0;
+}
- case VIDIOC_TRY_EXT_CTRLS:
- {
- struct v4l2_ext_controls *ctls =
- (struct v4l2_ext_controls *)arg;
- struct v4l2_ext_control *ctrl;
- struct pvr2_ctrl *pctl;
- unsigned int idx;
- /* For the moment just validate that the requested control
- actually exists. */
- ret = 0;
- for (idx = 0; idx < ctls->count; idx++) {
- ctrl = ctls->controls + idx;
- pctl = pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id);
- if (!pctl) {
- ret = -EINVAL;
- ctls->error_idx = idx;
- break;
- }
- }
- break;
- }
+static int pvr2_try_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctls)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct v4l2_ext_control *ctrl;
+ struct pvr2_ctrl *pctl;
+ unsigned int idx;
+ int ret;
- case VIDIOC_CROPCAP:
- {
- struct v4l2_cropcap *cap = (struct v4l2_cropcap *)arg;
- if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- ret = -EINVAL;
- break;
+ /* For the moment just validate that the requested control
+ actually exists. */
+ ret = 0;
+ for (idx = 0; idx < ctls->count; idx++) {
+ ctrl = ctls->controls + idx;
+ pctl = pvr2_hdw_get_ctrl_v4l(hdw, ctrl->id);
+ if (!pctl) {
+ ctls->error_idx = idx;
+ return -EINVAL;
}
- ret = pvr2_hdw_get_cropcap(hdw, cap);
- cap->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* paranoia */
- break;
}
- case VIDIOC_G_CROP:
- {
- struct v4l2_crop *crop = (struct v4l2_crop *)arg;
- int val = 0;
- if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- ret = -EINVAL;
- break;
- }
- ret = pvr2_ctrl_get_value(
+ return 0;
+}
+
+static int pvr2_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cap)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int ret;
+
+ if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ ret = pvr2_hdw_get_cropcap(hdw, cap);
+ cap->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* paranoia */
+ return ret;
+}
+
+static int pvr2_g_crop(struct file *file, void *priv, struct v4l2_crop *crop)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int val = 0;
+ int ret;
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ ret = pvr2_ctrl_get_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL), &val);
- if (ret != 0) {
- ret = -EINVAL;
- break;
- }
- crop->c.left = val;
- ret = pvr2_ctrl_get_value(
+ if (ret != 0)
+ return -EINVAL;
+ crop->c.left = val;
+ ret = pvr2_ctrl_get_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPT), &val);
- if (ret != 0) {
- ret = -EINVAL;
- break;
- }
- crop->c.top = val;
- ret = pvr2_ctrl_get_value(
+ if (ret != 0)
+ return -EINVAL;
+ crop->c.top = val;
+ ret = pvr2_ctrl_get_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPW), &val);
- if (ret != 0) {
- ret = -EINVAL;
- break;
- }
- crop->c.width = val;
- ret = pvr2_ctrl_get_value(
+ if (ret != 0)
+ return -EINVAL;
+ crop->c.width = val;
+ ret = pvr2_ctrl_get_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPH), &val);
- if (ret != 0) {
- ret = -EINVAL;
- break;
- }
- crop->c.height = val;
- }
- case VIDIOC_S_CROP:
- {
- struct v4l2_crop *crop = (struct v4l2_crop *)arg;
- if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- ret = -EINVAL;
- break;
- }
- ret = pvr2_ctrl_set_value(
+ if (ret != 0)
+ return -EINVAL;
+ crop->c.height = val;
+ return 0;
+}
+
+static int pvr2_s_crop(struct file *file, void *priv, struct v4l2_crop *crop)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct v4l2_cropcap cap;
+ int ret;
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ret = pvr2_ctrl_set_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL),
crop->c.left);
- if (ret != 0) {
- ret = -EINVAL;
- break;
- }
- ret = pvr2_ctrl_set_value(
+ if (ret != 0)
+ return -EINVAL;
+ ret = pvr2_ctrl_set_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPT),
crop->c.top);
- if (ret != 0) {
- ret = -EINVAL;
- break;
- }
- ret = pvr2_ctrl_set_value(
+ if (ret != 0)
+ return -EINVAL;
+ ret = pvr2_ctrl_set_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPW),
crop->c.width);
- if (ret != 0) {
- ret = -EINVAL;
- break;
- }
- ret = pvr2_ctrl_set_value(
+ if (ret != 0)
+ return -EINVAL;
+ ret = pvr2_ctrl_set_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPH),
crop->c.height);
- if (ret != 0) {
- ret = -EINVAL;
- break;
- }
- }
- case VIDIOC_LOG_STATUS:
- {
- pvr2_hdw_trigger_module_log(hdw);
- ret = 0;
- break;
- }
+ if (ret != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int pvr2_log_status(struct file *file, void *priv)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+
+ pvr2_hdw_trigger_module_log(hdw);
+ return 0;
+}
+
#ifdef CONFIG_VIDEO_ADV_DEBUG
- case VIDIOC_DBG_S_REGISTER:
- case VIDIOC_DBG_G_REGISTER:
- {
- u64 val;
- struct v4l2_dbg_register *req = (struct v4l2_dbg_register *)arg;
- if (cmd == VIDIOC_DBG_S_REGISTER) val = req->val;
- ret = pvr2_hdw_register_access(
- hdw, &req->match, req->reg,
- cmd == VIDIOC_DBG_S_REGISTER, &val);
- if (cmd == VIDIOC_DBG_G_REGISTER) req->val = val;
- break;
- }
-#endif
+static int pvr2_g_register(struct file *file, void *priv, struct v4l2_dbg_register *req)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ u64 val;
+ int ret;
- default :
- ret = -ENOTTY;
- break;
- }
+ ret = pvr2_hdw_register_access(
+ hdw, &req->match, req->reg,
+ 0, &val);
+ req->val = val;
+ return ret;
+}
- pvr2_hdw_commit_ctl(hdw);
+static int pvr2_s_register(struct file *file, void *priv, struct v4l2_dbg_register *req)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ u64 val;
+ int ret;
- if (ret < 0) {
- if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
- pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "pvr2_v4l2_do_ioctl failure, ret=%ld", ret);
- } else {
- if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
- pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "pvr2_v4l2_do_ioctl failure, ret=%ld"
- " command was:", ret);
- v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),
- cmd);
- }
- }
- } else {
- pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "pvr2_v4l2_do_ioctl complete, ret=%ld (0x%lx)",
- ret, ret);
- }
+ val = req->val;
+ ret = pvr2_hdw_register_access(
+ hdw, &req->match, req->reg,
+ 1, &val);
return ret;
}
+#endif
+
+static const struct v4l2_ioctl_ops pvr2_ioctl_ops = {
+ .vidioc_querycap = pvr2_querycap,
+ .vidioc_g_priority = pvr2_g_priority,
+ .vidioc_s_priority = pvr2_s_priority,
+ .vidioc_s_audio = pvr2_s_audio,
+ .vidioc_g_audio = pvr2_g_audio,
+ .vidioc_enumaudio = pvr2_enumaudio,
+ .vidioc_enum_input = pvr2_enum_input,
+ .vidioc_cropcap = pvr2_cropcap,
+ .vidioc_s_crop = pvr2_s_crop,
+ .vidioc_g_crop = pvr2_g_crop,
+ .vidioc_g_input = pvr2_g_input,
+ .vidioc_s_input = pvr2_s_input,
+ .vidioc_g_frequency = pvr2_g_frequency,
+ .vidioc_s_frequency = pvr2_s_frequency,
+ .vidioc_s_tuner = pvr2_s_tuner,
+ .vidioc_g_tuner = pvr2_g_tuner,
+ .vidioc_g_std = pvr2_g_std,
+ .vidioc_s_std = pvr2_s_std,
+ .vidioc_querystd = pvr2_querystd,
+ .vidioc_log_status = pvr2_log_status,
+ .vidioc_enum_fmt_vid_cap = pvr2_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = pvr2_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = pvr2_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = pvr2_try_fmt_vid_cap,
+ .vidioc_streamon = pvr2_streamon,
+ .vidioc_streamoff = pvr2_streamoff,
+ .vidioc_queryctrl = pvr2_queryctrl,
+ .vidioc_querymenu = pvr2_querymenu,
+ .vidioc_g_ctrl = pvr2_g_ctrl,
+ .vidioc_s_ctrl = pvr2_s_ctrl,
+ .vidioc_g_ext_ctrls = pvr2_g_ext_ctrls,
+ .vidioc_s_ext_ctrls = pvr2_s_ext_ctrls,
+ .vidioc_try_ext_ctrls = pvr2_try_ext_ctrls,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = pvr2_g_register,
+ .vidioc_s_register = pvr2_s_register,
+#endif
+};
static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
{
@@ -961,7 +959,56 @@ static long pvr2_v4l2_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- return video_usercopy(file, cmd, arg, pvr2_v4l2_do_ioctl);
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2 *vp = fh->vhead;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ long ret = -EINVAL;
+
+ if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL)
+ v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw), cmd);
+
+ if (!pvr2_hdw_dev_ok(hdw)) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "ioctl failed - bad or no context");
+ return -EFAULT;
+ }
+
+ /* check priority */
+ switch (cmd) {
+ case VIDIOC_S_CTRL:
+ case VIDIOC_S_STD:
+ case VIDIOC_S_INPUT:
+ case VIDIOC_S_TUNER:
+ case VIDIOC_S_FREQUENCY:
+ ret = v4l2_prio_check(&vp->prio, fh->prio);
+ if (ret)
+ return ret;
+ }
+
+ ret = video_ioctl2(file, cmd, arg);
+
+ pvr2_hdw_commit_ctl(hdw);
+
+ if (ret < 0) {
+ if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "pvr2_v4l2_do_ioctl failure, ret=%ld", ret);
+ } else {
+ if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "pvr2_v4l2_do_ioctl failure, ret=%ld"
+ " command was:", ret);
+ v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),
+ cmd);
+ }
+ }
+ } else {
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "pvr2_v4l2_do_ioctl complete, ret=%ld (0x%lx)",
+ ret, ret);
+ }
+ return ret;
+
}
@@ -1262,10 +1309,12 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
struct usb_device *usbdev;
int mindevnum;
int unit_number;
+ struct pvr2_hdw *hdw;
int *nr_ptr = NULL;
dip->v4lp = vp;
- usbdev = pvr2_hdw_get_dev(vp->channel.mc_head->hdw);
+ hdw = vp->channel.mc_head->hdw;
+ usbdev = pvr2_hdw_get_dev(hdw);
dip->v4l_type = v4l_type;
switch (v4l_type) {
case VFL_TYPE_GRABBER:
@@ -1300,9 +1349,17 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
memcpy(&dip->devbase,&vdev_template,sizeof(vdev_template));
dip->devbase.release = pvr2_video_device_release;
+ dip->devbase.ioctl_ops = &pvr2_ioctl_ops;
+ {
+ int val;
+ pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,
+ PVR2_CID_STDAVAIL), &val);
+ dip->devbase.tvnorms = (v4l2_std_id)val;
+ }
mindevnum = -1;
- unit_number = pvr2_hdw_get_unit_number(vp->channel.mc_head->hdw);
+ unit_number = pvr2_hdw_get_unit_number(hdw);
if (nr_ptr && (unit_number >= 0) && (unit_number < PVR_NUM)) {
mindevnum = nr_ptr[unit_number];
}
@@ -1319,7 +1376,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
video_device_node_name(&dip->devbase),
pvr2_config_get_name(dip->config));
- pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw,
+ pvr2_hdw_v4l_store_minor_number(hdw,
dip->minor_type,dip->devbase.minor);
}
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 122fbd0081eb..ec4e2ef54e65 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -357,6 +357,7 @@ handler_end:
PWC_ERROR("Error (%d) re-submitting urb in pwc_isoc_handler.\n", i);
}
+/* Both v4l2_lock and vb_queue_lock should be locked when calling this */
static int pwc_isoc_init(struct pwc_device *pdev)
{
struct usb_device *udev;
@@ -366,9 +367,6 @@ static int pwc_isoc_init(struct pwc_device *pdev)
struct usb_host_interface *idesc = NULL;
int compression = 0; /* 0..3 = uncompressed..high */
- if (pdev->iso_init)
- return 0;
-
pdev->vsync = 0;
pdev->vlast_packet_size = 0;
pdev->fill_buf = NULL;
@@ -418,7 +416,6 @@ retry:
urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL);
if (urb == NULL) {
PWC_ERROR("Failed to allocate urb %d\n", i);
- pdev->iso_init = 1;
pwc_isoc_cleanup(pdev);
return -ENOMEM;
}
@@ -435,7 +432,6 @@ retry:
&urb->transfer_dma);
if (urb->transfer_buffer == NULL) {
PWC_ERROR("Failed to allocate urb buffer %d\n", i);
- pdev->iso_init = 1;
pwc_isoc_cleanup(pdev);
return -ENOMEM;
}
@@ -455,13 +451,11 @@ retry:
ret = usb_submit_urb(pdev->urbs[i], GFP_KERNEL);
if (ret == -ENOSPC && compression < 3) {
compression++;
- pdev->iso_init = 1;
pwc_isoc_cleanup(pdev);
goto retry;
}
if (ret) {
PWC_ERROR("isoc_init() submit_urb %d failed with error %d\n", i, ret);
- pdev->iso_init = 1;
pwc_isoc_cleanup(pdev);
return ret;
}
@@ -469,7 +463,6 @@ retry:
}
/* All is done... */
- pdev->iso_init = 1;
PWC_DEBUG_OPEN("<< pwc_isoc_init()\n");
return 0;
}
@@ -507,21 +500,19 @@ static void pwc_iso_free(struct pwc_device *pdev)
}
}
+/* Both v4l2_lock and vb_queue_lock should be locked when calling this */
static void pwc_isoc_cleanup(struct pwc_device *pdev)
{
PWC_DEBUG_OPEN(">> pwc_isoc_cleanup()\n");
- if (pdev->iso_init == 0)
- return;
-
pwc_iso_stop(pdev);
pwc_iso_free(pdev);
usb_set_interface(pdev->udev, 0, 0);
- pdev->iso_init = 0;
PWC_DEBUG_OPEN("<< pwc_isoc_cleanup()\n");
}
+/* Must be called with vb_queue_lock hold */
static void pwc_cleanup_queued_bufs(struct pwc_device *pdev)
{
unsigned long flags = 0;
@@ -573,18 +564,13 @@ static const char *pwc_sensor_type_to_string(unsigned int sensor_type)
int pwc_test_n_set_capt_file(struct pwc_device *pdev, struct file *file)
{
- int r = 0;
-
- mutex_lock(&pdev->capt_file_lock);
if (pdev->capt_file != NULL &&
- pdev->capt_file != file) {
- r = -EBUSY;
- goto leave;
- }
+ pdev->capt_file != file)
+ return -EBUSY;
+
pdev->capt_file = file;
-leave:
- mutex_unlock(&pdev->capt_file_lock);
- return r;
+
+ return 0;
}
static void pwc_video_release(struct v4l2_device *v)
@@ -592,6 +578,7 @@ static void pwc_video_release(struct v4l2_device *v)
struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev);
v4l2_ctrl_handler_free(&pdev->ctrl_handler);
+ v4l2_device_unregister(&pdev->v4l2_dev);
kfree(pdev->ctrl_buf);
kfree(pdev);
}
@@ -600,10 +587,25 @@ static int pwc_video_close(struct file *file)
{
struct pwc_device *pdev = video_drvdata(file);
+ /*
+ * If we're still streaming vb2_queue_release will call stream_stop
+ * so we must take both the v4l2_lock and the vb_queue_lock.
+ */
+ if (mutex_lock_interruptible(&pdev->v4l2_lock))
+ return -ERESTARTSYS;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock)) {
+ mutex_unlock(&pdev->v4l2_lock);
+ return -ERESTARTSYS;
+ }
+
if (pdev->capt_file == file) {
vb2_queue_release(&pdev->vb_queue);
pdev->capt_file = NULL;
}
+
+ mutex_unlock(&pdev->vb_queue_lock);
+ mutex_unlock(&pdev->v4l2_lock);
+
return v4l2_fh_release(file);
}
@@ -611,35 +613,81 @@ static ssize_t pwc_video_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct pwc_device *pdev = video_drvdata(file);
+ int lock_v4l2 = 0;
+ ssize_t ret;
- if (!pdev->udev)
- return -ENODEV;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
- if (pwc_test_n_set_capt_file(pdev, file))
- return -EBUSY;
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret)
+ goto out;
- return vb2_read(&pdev->vb_queue, buf, count, ppos,
- file->f_flags & O_NONBLOCK);
+ /* stream_start will get called so we must take the v4l2_lock */
+ if (pdev->vb_queue.fileio == NULL)
+ lock_v4l2 = 1;
+
+ /* Use try_lock, since we're taking the locks in the *wrong* order! */
+ if (lock_v4l2 && !mutex_trylock(&pdev->v4l2_lock)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+ ret = vb2_read(&pdev->vb_queue, buf, count, ppos,
+ file->f_flags & O_NONBLOCK);
+ if (lock_v4l2)
+ mutex_unlock(&pdev->v4l2_lock);
+out:
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
static unsigned int pwc_video_poll(struct file *file, poll_table *wait)
{
struct pwc_device *pdev = video_drvdata(file);
+ struct vb2_queue *q = &pdev->vb_queue;
+ unsigned long req_events = poll_requested_events(wait);
+ unsigned int ret = POLL_ERR;
+ int lock_v4l2 = 0;
- if (!pdev->udev)
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
return POLL_ERR;
- return vb2_poll(&pdev->vb_queue, file, wait);
+ /* Will this start fileio and thus call start_stream? */
+ if ((req_events & (POLLIN | POLLRDNORM)) &&
+ q->num_buffers == 0 && !q->streaming && q->fileio == NULL) {
+ if (pwc_test_n_set_capt_file(pdev, file))
+ goto out;
+ lock_v4l2 = 1;
+ }
+
+ /* Use try_lock, since we're taking the locks in the *wrong* order! */
+ if (lock_v4l2 && !mutex_trylock(&pdev->v4l2_lock))
+ goto out;
+ ret = vb2_poll(&pdev->vb_queue, file, wait);
+ if (lock_v4l2)
+ mutex_unlock(&pdev->v4l2_lock);
+
+out:
+ if (!pdev->udev)
+ ret |= POLLHUP;
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma)
{
struct pwc_device *pdev = video_drvdata(file);
+ int ret;
- if (pdev->capt_file != file)
- return -EBUSY;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
- return vb2_mmap(&pdev->vb_queue, vma);
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret == 0)
+ ret = vb2_mmap(&pdev->vb_queue, vma);
+
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
/***************************************************************************/
@@ -715,12 +763,14 @@ static void buffer_queue(struct vb2_buffer *vb)
struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
unsigned long flags = 0;
- spin_lock_irqsave(&pdev->queued_bufs_lock, flags);
/* Check the device has not disconnected between prep and queuing */
- if (pdev->udev)
- list_add_tail(&buf->list, &pdev->queued_bufs);
- else
+ if (!pdev->udev) {
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ spin_lock_irqsave(&pdev->queued_bufs_lock, flags);
+ list_add_tail(&buf->list, &pdev->queued_bufs);
spin_unlock_irqrestore(&pdev->queued_bufs_lock, flags);
}
@@ -729,11 +779,8 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
struct pwc_device *pdev = vb2_get_drv_priv(vq);
int r;
- mutex_lock(&pdev->udevlock);
- if (!pdev->udev) {
- r = -ENODEV;
- goto leave;
- }
+ if (!pdev->udev)
+ return -ENODEV;
/* Turn on camera and set LEDS on */
pwc_camera_power(pdev, 1);
@@ -747,8 +794,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
/* And cleanup any queued bufs!! */
pwc_cleanup_queued_bufs(pdev);
}
-leave:
- mutex_unlock(&pdev->udevlock);
+
return r;
}
@@ -756,19 +802,29 @@ static int stop_streaming(struct vb2_queue *vq)
{
struct pwc_device *pdev = vb2_get_drv_priv(vq);
- mutex_lock(&pdev->udevlock);
if (pdev->udev) {
pwc_set_leds(pdev, 0, 0);
pwc_camera_power(pdev, 0);
pwc_isoc_cleanup(pdev);
}
- mutex_unlock(&pdev->udevlock);
pwc_cleanup_queued_bufs(pdev);
return 0;
}
+static void wait_prepare(struct vb2_queue *vq)
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vq);
+ mutex_unlock(&pdev->vb_queue_lock);
+}
+
+static void wait_finish(struct vb2_queue *vq)
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vq);
+ mutex_lock(&pdev->vb_queue_lock);
+}
+
static struct vb2_ops pwc_vb_queue_ops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
@@ -778,6 +834,8 @@ static struct vb2_ops pwc_vb_queue_ops = {
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
+ .wait_prepare = wait_prepare,
+ .wait_finish = wait_finish,
};
/***************************************************************************/
@@ -1057,8 +1115,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->features = features;
pwc_construct(pdev); /* set min/max sizes correct */
- mutex_init(&pdev->capt_file_lock);
- mutex_init(&pdev->udevlock);
+ mutex_init(&pdev->v4l2_lock);
+ mutex_init(&pdev->vb_queue_lock);
spin_lock_init(&pdev->queued_bufs_lock);
INIT_LIST_HEAD(&pdev->queued_bufs);
@@ -1130,6 +1188,16 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->v4l2_dev.ctrl_handler = &pdev->ctrl_handler;
pdev->vdev.v4l2_dev = &pdev->v4l2_dev;
+ pdev->vdev.lock = &pdev->v4l2_lock;
+
+ /*
+ * Don't take v4l2_lock for these ioctls. This improves latency if
+ * v4l2_lock is taken for a long time, e.g. when changing a control
+ * value, and a new frame is ready to be dequeued.
+ */
+ v4l2_disable_ioctl_locking(&pdev->vdev, VIDIOC_DQBUF);
+ v4l2_disable_ioctl_locking(&pdev->vdev, VIDIOC_QBUF);
+ v4l2_disable_ioctl_locking(&pdev->vdev, VIDIOC_QUERYBUF);
rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, -1);
if (rc < 0) {
@@ -1185,16 +1253,20 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
struct v4l2_device *v = usb_get_intfdata(intf);
struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev);
- mutex_lock(&pdev->udevlock);
+ mutex_lock(&pdev->v4l2_lock);
+
+ mutex_lock(&pdev->vb_queue_lock);
/* No need to keep the urbs around after disconnection */
- pwc_isoc_cleanup(pdev);
+ if (pdev->vb_queue.streaming)
+ pwc_isoc_cleanup(pdev);
pdev->udev = NULL;
- mutex_unlock(&pdev->udevlock);
-
pwc_cleanup_queued_bufs(pdev);
+ mutex_unlock(&pdev->vb_queue_lock);
+ v4l2_device_disconnect(&pdev->v4l2_dev);
video_unregister_device(&pdev->vdev);
- v4l2_device_unregister(&pdev->v4l2_dev);
+
+ mutex_unlock(&pdev->v4l2_lock);
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
if (pdev->button_dev)
@@ -1229,15 +1301,4 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("pwcx");
MODULE_VERSION( PWC_VERSION );
-static int __init usb_pwc_init(void)
-{
- return usb_register(&pwc_driver);
-}
-
-static void __exit usb_pwc_exit(void)
-{
- usb_deregister(&pwc_driver);
-}
-
-module_init(usb_pwc_init);
-module_exit(usb_pwc_exit);
+module_usb_driver(pwc_driver);
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index 2834e3e65b39..c691e29cc36e 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -464,26 +464,24 @@ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
struct pwc_device *pdev = video_drvdata(file);
int ret, pixelformat, compression = 0;
- if (pwc_test_n_set_capt_file(pdev, file))
- return -EBUSY;
-
ret = pwc_vidioc_try_fmt(pdev, f);
if (ret < 0)
return ret;
- pixelformat = f->fmt.pix.pixelformat;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
- mutex_lock(&pdev->udevlock);
- if (!pdev->udev) {
- ret = -ENODEV;
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret)
goto leave;
- }
- if (pdev->iso_init) {
+ if (pdev->vb_queue.streaming) {
ret = -EBUSY;
goto leave;
}
+ pixelformat = f->fmt.pix.pixelformat;
+
PWC_DEBUG_IOCTL("Trying to set format to: width=%d height=%d fps=%d "
"format=%c%c%c%c\n",
f->fmt.pix.width, f->fmt.pix.height, pdev->vframes,
@@ -499,7 +497,7 @@ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt);
leave:
- mutex_unlock(&pdev->udevlock);
+ mutex_unlock(&pdev->vb_queue_lock);
return ret;
}
@@ -507,9 +505,6 @@ static int pwc_querycap(struct file *file, void *fh, struct v4l2_capability *cap
{
struct pwc_device *pdev = video_drvdata(file);
- if (!pdev->udev)
- return -ENODEV;
-
strcpy(cap->driver, PWC_NAME);
strlcpy(cap->card, pdev->vdev.name, sizeof(cap->card));
usb_make_path(pdev->udev, cap->bus_info, sizeof(cap->bus_info));
@@ -540,15 +535,12 @@ static int pwc_s_input(struct file *file, void *fh, unsigned int i)
return i ? -EINVAL : 0;
}
-static int pwc_g_volatile_ctrl_unlocked(struct v4l2_ctrl *ctrl)
+static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
struct pwc_device *pdev =
container_of(ctrl->handler, struct pwc_device, ctrl_handler);
int ret = 0;
- if (!pdev->udev)
- return -ENODEV;
-
switch (ctrl->id) {
case V4L2_CID_AUTO_WHITE_BALANCE:
if (pdev->color_bal_valid &&
@@ -615,18 +607,6 @@ static int pwc_g_volatile_ctrl_unlocked(struct v4l2_ctrl *ctrl)
return ret;
}
-static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct pwc_device *pdev =
- container_of(ctrl->handler, struct pwc_device, ctrl_handler);
- int ret;
-
- mutex_lock(&pdev->udevlock);
- ret = pwc_g_volatile_ctrl_unlocked(ctrl);
- mutex_unlock(&pdev->udevlock);
- return ret;
-}
-
static int pwc_set_awb(struct pwc_device *pdev)
{
int ret;
@@ -648,7 +628,7 @@ static int pwc_set_awb(struct pwc_device *pdev)
if (pdev->auto_white_balance->val == awb_indoor ||
pdev->auto_white_balance->val == awb_outdoor ||
pdev->auto_white_balance->val == awb_fl)
- pwc_g_volatile_ctrl_unlocked(pdev->auto_white_balance);
+ pwc_g_volatile_ctrl(pdev->auto_white_balance);
}
if (pdev->auto_white_balance->val != awb_manual)
return 0;
@@ -812,13 +792,6 @@ static int pwc_s_ctrl(struct v4l2_ctrl *ctrl)
container_of(ctrl->handler, struct pwc_device, ctrl_handler);
int ret = 0;
- mutex_lock(&pdev->udevlock);
-
- if (!pdev->udev) {
- ret = -ENODEV;
- goto leave;
- }
-
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
@@ -915,8 +888,6 @@ static int pwc_s_ctrl(struct v4l2_ctrl *ctrl)
if (ret)
PWC_ERROR("s_ctrl %s error %d\n", ctrl->name, ret);
-leave:
- mutex_unlock(&pdev->udevlock);
return ret;
}
@@ -949,11 +920,9 @@ static int pwc_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- mutex_lock(&pdev->udevlock); /* To avoid race with s_fmt */
PWC_DEBUG_IOCTL("ioctl(VIDIOC_G_FMT) return size %dx%d\n",
pdev->width, pdev->height);
pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt);
- mutex_unlock(&pdev->udevlock);
return 0;
}
@@ -968,70 +937,98 @@ static int pwc_reqbufs(struct file *file, void *fh,
struct v4l2_requestbuffers *rb)
{
struct pwc_device *pdev = video_drvdata(file);
+ int ret;
- if (pwc_test_n_set_capt_file(pdev, file))
- return -EBUSY;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
- return vb2_reqbufs(&pdev->vb_queue, rb);
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret == 0)
+ ret = vb2_reqbufs(&pdev->vb_queue, rb);
+
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
static int pwc_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
struct pwc_device *pdev = video_drvdata(file);
+ int ret;
- return vb2_querybuf(&pdev->vb_queue, buf);
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
+
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret == 0)
+ ret = vb2_querybuf(&pdev->vb_queue, buf);
+
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
static int pwc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
struct pwc_device *pdev = video_drvdata(file);
+ int ret;
- if (!pdev->udev)
- return -ENODEV;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
- if (pdev->capt_file != file)
- return -EBUSY;
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret == 0)
+ ret = vb2_qbuf(&pdev->vb_queue, buf);
- return vb2_qbuf(&pdev->vb_queue, buf);
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
static int pwc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
struct pwc_device *pdev = video_drvdata(file);
+ int ret;
- if (!pdev->udev)
- return -ENODEV;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
- if (pdev->capt_file != file)
- return -EBUSY;
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret == 0)
+ ret = vb2_dqbuf(&pdev->vb_queue, buf,
+ file->f_flags & O_NONBLOCK);
- return vb2_dqbuf(&pdev->vb_queue, buf, file->f_flags & O_NONBLOCK);
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
static int pwc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
{
struct pwc_device *pdev = video_drvdata(file);
+ int ret;
- if (!pdev->udev)
- return -ENODEV;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
- if (pdev->capt_file != file)
- return -EBUSY;
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret == 0)
+ ret = vb2_streamon(&pdev->vb_queue, i);
- return vb2_streamon(&pdev->vb_queue, i);
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
static int pwc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
{
struct pwc_device *pdev = video_drvdata(file);
+ int ret;
- if (!pdev->udev)
- return -ENODEV;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
- if (pdev->capt_file != file)
- return -EBUSY;
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret == 0)
+ ret = vb2_streamoff(&pdev->vb_queue, i);
- return vb2_streamoff(&pdev->vb_queue, i);
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
static int pwc_enum_framesizes(struct file *file, void *fh,
@@ -1119,19 +1116,17 @@ static int pwc_s_parm(struct file *file, void *fh,
parm->parm.capture.timeperframe.numerator == 0)
return -EINVAL;
- if (pwc_test_n_set_capt_file(pdev, file))
- return -EBUSY;
-
fps = parm->parm.capture.timeperframe.denominator /
parm->parm.capture.timeperframe.numerator;
- mutex_lock(&pdev->udevlock);
- if (!pdev->udev) {
- ret = -ENODEV;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
+
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret)
goto leave;
- }
- if (pdev->iso_init) {
+ if (pdev->vb_queue.streaming) {
ret = -EBUSY;
goto leave;
}
@@ -1142,7 +1137,7 @@ static int pwc_s_parm(struct file *file, void *fh,
pwc_g_parm(file, fh, parm);
leave:
- mutex_unlock(&pdev->udevlock);
+ mutex_unlock(&pdev->vb_queue_lock);
return ret;
}
@@ -1166,4 +1161,6 @@ const struct v4l2_ioctl_ops pwc_ioctl_ops = {
.vidioc_enum_frameintervals = pwc_enum_frameintervals,
.vidioc_g_parm = pwc_g_parm,
.vidioc_s_parm = pwc_s_parm,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index e4d4d711dd1f..d6b5b216b9d6 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -221,9 +221,17 @@ struct pwc_device
struct video_device vdev;
struct v4l2_device v4l2_dev;
- /* Pointer to our usb_device, may be NULL after unplug */
- struct usb_device *udev;
- struct mutex udevlock;
+ /* videobuf2 queue and queued buffers list */
+ struct vb2_queue vb_queue;
+ struct list_head queued_bufs;
+ spinlock_t queued_bufs_lock; /* Protects queued_bufs */
+
+ /* Note if taking both locks v4l2_lock must always be locked first! */
+ struct mutex v4l2_lock; /* Protects everything else */
+ struct mutex vb_queue_lock; /* Protects vb_queue and capt_file */
+
+ /* Pointer to our usb_device, will be NULL after unplug */
+ struct usb_device *udev; /* Both mutexes most be hold when setting! */
/* type of cam (645, 646, 675, 680, 690, 720, 730, 740, 750) */
int type;
@@ -232,7 +240,6 @@ struct pwc_device
/*** Video data ***/
struct file *capt_file; /* file doing video capture */
- struct mutex capt_file_lock;
int vendpoint; /* video isoc endpoint */
int vcinterface; /* video control interface */
int valternate; /* alternate interface needed */
@@ -251,12 +258,6 @@ struct pwc_device
unsigned char *ctrl_buf;
struct urb *urbs[MAX_ISO_BUFS];
- char iso_init;
-
- /* videobuf2 queue and queued buffers list */
- struct vb2_queue vb_queue;
- struct list_head queued_bufs;
- spinlock_t queued_bufs_lock;
/*
* Frame currently being filled, this only gets touched by the
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index 5a413f4427e0..9c21e01f2c24 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -241,15 +241,10 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
unsigned int *size)
{
struct soc_camera_device *icd = vq->priv_data;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
-
- if (bytes_per_line < 0)
- return bytes_per_line;
dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
- *size = bytes_per_line * icd->user_height;
+ *size = icd->sizeimage;
if (0 == *count)
*count = 32;
@@ -435,11 +430,6 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
int ret;
int size_y, size_u = 0, size_v = 0;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
-
- if (bytes_per_line < 0)
- return bytes_per_line;
dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
@@ -474,7 +464,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
vb->state = VIDEOBUF_NEEDS_INIT;
}
- vb->size = bytes_per_line * vb->height;
+ vb->size = icd->sizeimage;
if (0 != vb->baddr && vb->bsize < vb->size) {
ret = -EINVAL;
goto out;
@@ -1244,6 +1234,7 @@ static const struct soc_mbus_pixelfmt pxa_camera_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_U_V,
},
};
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 4894cbb1c547..01c2179f0520 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -634,13 +634,11 @@ static void s2255_fillbuff(struct s2255_channel *channel,
const char *tmpbuf;
char *vbuf = videobuf_to_vmalloc(&buf->vb);
unsigned long last_frame;
- struct s2255_framei *frm;
if (!vbuf)
return;
last_frame = channel->last_frame;
if (last_frame != -1) {
- frm = &channel->buffer.frame[last_frame];
tmpbuf =
(const char *)channel->buffer.frame[last_frame].lpvbits;
switch (buf->fmt->fourcc) {
@@ -987,7 +985,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct videobuf_queue *q = &fh->vb_vidq;
struct s2255_mode mode;
int ret;
- int norm;
ret = vidioc_try_fmt_vid_cap(file, fh, f);
@@ -1018,7 +1015,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
channel->height = f->fmt.pix.height;
fh->vb_vidq.field = f->fmt.pix.field;
fh->type = f->type;
- norm = norm_minw(&channel->vdev);
if (channel->width > norm_minw(&channel->vdev)) {
if (channel->height > norm_minh(&channel->vdev)) {
if (channel->cap_parm.capturemode &
@@ -1826,8 +1822,7 @@ static void s2255_destroy(struct s2255_dev *dev)
usb_free_urb(dev->fw_data->fw_urb);
dev->fw_data->fw_urb = NULL;
}
- if (dev->fw_data->fw)
- release_firmware(dev->fw_data->fw);
+ release_firmware(dev->fw_data->fw);
kfree(dev->fw_data->pfw_data);
kfree(dev->fw_data);
/* reset the DSP so firmware can be reloaded next time */
@@ -1949,6 +1944,10 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
/* register 4 video devices */
channel->vdev = template;
channel->vdev.lock = &dev->lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &channel->vdev.flags);
channel->vdev.v4l2_dev = &dev->v4l2_dev;
video_set_drvdata(&channel->vdev, channel);
if (video_nr == -1)
diff --git a/drivers/media/video/s5p-fimc/Kconfig b/drivers/media/video/s5p-fimc/Kconfig
new file mode 100644
index 000000000000..a564f7eeb064
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/Kconfig
@@ -0,0 +1,48 @@
+
+config VIDEO_SAMSUNG_S5P_FIMC
+ bool "Samsung S5P/EXYNOS SoC camera interface driver (experimental)"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && PLAT_S5P && PM_RUNTIME
+ depends on EXPERIMENTAL
+ help
+ Say Y here to enable camera host interface devices for
+ Samsung S5P and EXYNOS SoC series.
+
+if VIDEO_SAMSUNG_S5P_FIMC
+
+config VIDEO_S5P_FIMC
+ tristate "S5P/EXYNOS4 FIMC/CAMIF camera interface driver"
+ depends on I2C
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is a V4L2 driver for Samsung S5P and EXYNOS4 SoC camera host
+ interface and video postprocessor (FIMC and FIMC-LITE) devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called s5p-fimc.
+
+config VIDEO_S5P_MIPI_CSIS
+ tristate "S5P/EXYNOS MIPI-CSI2 receiver (MIPI-CSIS) driver"
+ depends on REGULATOR
+ help
+ This is a V4L2 driver for Samsung S5P and EXYNOS4 SoC MIPI-CSI2
+ receiver (MIPI-CSIS) devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called s5p-csis.
+
+if ARCH_EXYNOS
+
+config VIDEO_EXYNOS_FIMC_LITE
+ tristate "EXYNOS FIMC-LITE camera interface driver"
+ depends on I2C
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ This is a V4L2 driver for Samsung EXYNOS4/5 SoC FIMC-LITE camera
+ host interface.
+
+ To compile this driver as a module, choose M here: the
+ module will be called exynos-fimc-lite.
+endif
+
+endif # VIDEO_SAMSUNG_S5P_FIMC
diff --git a/drivers/media/video/s5p-fimc/Makefile b/drivers/media/video/s5p-fimc/Makefile
index 33dec7f890e7..46485143e1ca 100644
--- a/drivers/media/video/s5p-fimc/Makefile
+++ b/drivers/media/video/s5p-fimc/Makefile
@@ -1,5 +1,7 @@
-s5p-fimc-objs := fimc-core.o fimc-reg.o fimc-capture.o fimc-mdevice.o
+s5p-fimc-objs := fimc-core.o fimc-reg.o fimc-m2m.o fimc-capture.o fimc-mdevice.o
+exynos-fimc-lite-objs += fimc-lite-reg.o fimc-lite.o
s5p-csis-objs := mipi-csis.o
obj-$(CONFIG_VIDEO_S5P_MIPI_CSIS) += s5p-csis.o
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) += s5p-fimc.o
+obj-$(CONFIG_VIDEO_EXYNOS_FIMC_LITE) += exynos-fimc-lite.o
+obj-$(CONFIG_VIDEO_S5P_FIMC) += s5p-fimc.o
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index 7e9b2c612b03..354574591908 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -1,8 +1,8 @@
/*
* Samsung S5P/EXYNOS4 SoC series camera interface (camera capture) driver
*
- * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
- * Author: Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -29,20 +29,22 @@
#include "fimc-mdevice.h"
#include "fimc-core.h"
+#include "fimc-reg.h"
-static int fimc_init_capture(struct fimc_dev *fimc)
+static int fimc_capture_hw_init(struct fimc_dev *fimc)
{
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct fimc_pipeline *p = &fimc->pipeline;
struct fimc_sensor_info *sensor;
unsigned long flags;
int ret = 0;
- if (fimc->pipeline.sensor == NULL || ctx == NULL)
+ if (p->subdevs[IDX_SENSOR] == NULL || ctx == NULL)
return -ENXIO;
if (ctx->s_frame.fmt == NULL)
return -EINVAL;
- sensor = v4l2_get_subdev_hostdata(fimc->pipeline.sensor);
+ sensor = v4l2_get_subdev_hostdata(p->subdevs[IDX_SENSOR]);
spin_lock_irqsave(&fimc->slock, flags);
fimc_prepare_dma_offset(ctx, &ctx->d_frame);
@@ -60,7 +62,7 @@ static int fimc_init_capture(struct fimc_dev *fimc)
fimc_hw_set_mainscaler(ctx);
fimc_hw_set_target_format(ctx);
fimc_hw_set_rotation(ctx);
- fimc_hw_set_effect(ctx, false);
+ fimc_hw_set_effect(ctx);
fimc_hw_set_output_path(ctx);
fimc_hw_set_out_dma(ctx);
if (fimc->variant->has_alpha)
@@ -71,6 +73,14 @@ static int fimc_init_capture(struct fimc_dev *fimc)
return ret;
}
+/*
+ * Reinitialize the driver so it is ready to start the streaming again.
+ * Set fimc->state to indicate stream off and the hardware shut down state.
+ * If not suspending (@suspend is false), return any buffers to videobuf2.
+ * Otherwise put any owned buffers onto the pending buffers queue, so they
+ * can be re-spun when the device is being resumed. Also perform FIMC
+ * software reset and disable streaming on the whole pipeline if required.
+ */
static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
{
struct fimc_vid_cap *cap = &fimc->vid_cap;
@@ -83,7 +93,9 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
fimc->state &= ~(1 << ST_CAPT_RUN | 1 << ST_CAPT_SHUT |
1 << ST_CAPT_STREAM | 1 << ST_CAPT_ISP_STREAM);
- if (!suspend)
+ if (suspend)
+ fimc->state |= (1 << ST_CAPT_SUSPENDED);
+ else
fimc->state &= ~(1 << ST_CAPT_PEND | 1 << ST_CAPT_SUSPENDED);
/* Release unused buffers */
@@ -99,7 +111,6 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
else
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
- set_bit(ST_CAPT_SUSPENDED, &fimc->state);
fimc_hw_reset(fimc);
cap->buf_index = 0;
@@ -107,7 +118,7 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
spin_unlock_irqrestore(&fimc->slock, flags);
if (streaming)
- return fimc_pipeline_s_stream(fimc, 0);
+ return fimc_pipeline_s_stream(&fimc->pipeline, 0);
else
return 0;
}
@@ -138,32 +149,96 @@ static int fimc_stop_capture(struct fimc_dev *fimc, bool suspend)
* spinlock held. It updates the camera pixel crop, rotation and
* image flip in H/W.
*/
-int fimc_capture_config_update(struct fimc_ctx *ctx)
+static int fimc_capture_config_update(struct fimc_ctx *ctx)
{
struct fimc_dev *fimc = ctx->fimc_dev;
int ret;
- if (!test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
- return 0;
-
- spin_lock(&ctx->slock);
fimc_hw_set_camera_offset(fimc, &ctx->s_frame);
+
ret = fimc_set_scaler_info(ctx);
- if (ret == 0) {
- fimc_hw_set_prescaler(ctx);
- fimc_hw_set_mainscaler(ctx);
- fimc_hw_set_target_format(ctx);
- fimc_hw_set_rotation(ctx);
- fimc_prepare_dma_offset(ctx, &ctx->d_frame);
- fimc_hw_set_out_dma(ctx);
- if (fimc->variant->has_alpha)
- fimc_hw_set_rgb_alpha(ctx);
- clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
- }
- spin_unlock(&ctx->slock);
+ if (ret)
+ return ret;
+
+ fimc_hw_set_prescaler(ctx);
+ fimc_hw_set_mainscaler(ctx);
+ fimc_hw_set_target_format(ctx);
+ fimc_hw_set_rotation(ctx);
+ fimc_hw_set_effect(ctx);
+ fimc_prepare_dma_offset(ctx, &ctx->d_frame);
+ fimc_hw_set_out_dma(ctx);
+ if (fimc->variant->has_alpha)
+ fimc_hw_set_rgb_alpha(ctx);
+
+ clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
return ret;
}
+void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf)
+{
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ struct fimc_vid_buffer *v_buf;
+ struct timeval *tv;
+ struct timespec ts;
+
+ if (test_and_clear_bit(ST_CAPT_SHUT, &fimc->state)) {
+ wake_up(&fimc->irq_queue);
+ goto done;
+ }
+
+ if (!list_empty(&cap->active_buf_q) &&
+ test_bit(ST_CAPT_RUN, &fimc->state) && deq_buf) {
+ ktime_get_real_ts(&ts);
+
+ v_buf = fimc_active_queue_pop(cap);
+
+ tv = &v_buf->vb.v4l2_buf.timestamp;
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ v_buf->vb.v4l2_buf.sequence = cap->frame_count++;
+
+ vb2_buffer_done(&v_buf->vb, VB2_BUF_STATE_DONE);
+ }
+
+ if (!list_empty(&cap->pending_buf_q)) {
+
+ v_buf = fimc_pending_queue_pop(cap);
+ fimc_hw_set_output_addr(fimc, &v_buf->paddr, cap->buf_index);
+ v_buf->index = cap->buf_index;
+
+ /* Move the buffer to the capture active queue */
+ fimc_active_queue_add(cap, v_buf);
+
+ dbg("next frame: %d, done frame: %d",
+ fimc_hw_get_frame_index(fimc), v_buf->index);
+
+ if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
+ cap->buf_index = 0;
+ }
+
+ if (cap->active_buf_cnt == 0) {
+ if (deq_buf)
+ clear_bit(ST_CAPT_RUN, &fimc->state);
+
+ if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
+ cap->buf_index = 0;
+ } else {
+ set_bit(ST_CAPT_RUN, &fimc->state);
+ }
+
+ if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
+ fimc_capture_config_update(cap->ctx);
+done:
+ if (cap->active_buf_cnt == 1) {
+ fimc_deactivate_capture(fimc);
+ clear_bit(ST_CAPT_STREAM, &fimc->state);
+ }
+
+ dbg("frame: %d, active_buf_cnt: %d",
+ fimc_hw_get_frame_index(fimc), cap->active_buf_cnt);
+}
+
+
static int start_streaming(struct vb2_queue *q, unsigned int count)
{
struct fimc_ctx *ctx = q->drv_priv;
@@ -174,9 +249,11 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
vid_cap->frame_count = 0;
- ret = fimc_init_capture(fimc);
- if (ret)
- goto error;
+ ret = fimc_capture_hw_init(fimc);
+ if (ret) {
+ fimc_capture_state_cleanup(fimc, false);
+ return ret;
+ }
set_bit(ST_CAPT_PEND, &fimc->state);
@@ -187,13 +264,10 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
fimc_activate_capture(ctx);
if (!test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state))
- fimc_pipeline_s_stream(fimc, 1);
+ fimc_pipeline_s_stream(&fimc->pipeline, 1);
}
return 0;
-error:
- fimc_capture_state_cleanup(fimc, false);
- return ret;
}
static int stop_streaming(struct vb2_queue *q)
@@ -214,7 +288,7 @@ int fimc_capture_suspend(struct fimc_dev *fimc)
int ret = fimc_stop_capture(fimc, suspend);
if (ret)
return ret;
- return fimc_pipeline_shutdown(fimc);
+ return fimc_pipeline_shutdown(&fimc->pipeline);
}
static void buffer_queue(struct vb2_buffer *vb);
@@ -230,9 +304,9 @@ int fimc_capture_resume(struct fimc_dev *fimc)
INIT_LIST_HEAD(&fimc->vid_cap.active_buf_q);
vid_cap->buf_index = 0;
- fimc_pipeline_initialize(fimc, &fimc->vid_cap.vfd->entity,
+ fimc_pipeline_initialize(&fimc->pipeline, &vid_cap->vfd->entity,
false);
- fimc_init_capture(fimc);
+ fimc_capture_hw_init(fimc);
clear_bit(ST_CAPT_SUSPENDED, &fimc->state);
@@ -347,7 +421,7 @@ static void buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&fimc->slock, flags);
if (!test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state))
- fimc_pipeline_s_stream(fimc, 1);
+ fimc_pipeline_s_stream(&fimc->pipeline, 1);
return;
}
spin_unlock_irqrestore(&fimc->slock, flags);
@@ -389,15 +463,15 @@ int fimc_capture_ctrls_create(struct fimc_dev *fimc)
if (WARN_ON(vid_cap->ctx == NULL))
return -ENXIO;
- if (vid_cap->ctx->ctrls_rdy)
+ if (vid_cap->ctx->ctrls.ready)
return 0;
ret = fimc_ctrls_create(vid_cap->ctx);
- if (ret || vid_cap->user_subdev_api)
+ if (ret || vid_cap->user_subdev_api || !vid_cap->ctx->ctrls.ready)
return ret;
- return v4l2_ctrl_add_handler(&vid_cap->ctx->ctrl_handler,
- fimc->pipeline.sensor->ctrl_handler);
+ return v4l2_ctrl_add_handler(&vid_cap->ctx->ctrls.handler,
+ fimc->pipeline.subdevs[IDX_SENSOR]->ctrl_handler);
}
static int fimc_capture_set_default_format(struct fimc_dev *fimc);
@@ -420,7 +494,7 @@ static int fimc_capture_open(struct file *file)
pm_runtime_get_sync(&fimc->pdev->dev);
if (++fimc->vid_cap.refcnt == 1) {
- ret = fimc_pipeline_initialize(fimc,
+ ret = fimc_pipeline_initialize(&fimc->pipeline,
&fimc->vid_cap.vfd->entity, true);
if (ret < 0) {
dev_err(&fimc->pdev->dev,
@@ -448,7 +522,7 @@ static int fimc_capture_close(struct file *file)
if (--fimc->vid_cap.refcnt == 0) {
clear_bit(ST_CAPT_BUSY, &fimc->state);
fimc_stop_capture(fimc, false);
- fimc_pipeline_shutdown(fimc);
+ fimc_pipeline_shutdown(&fimc->pipeline);
clear_bit(ST_CAPT_SUSPENDED, &fimc->state);
}
@@ -495,7 +569,7 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
{
bool rotation = ctx->rotation == 90 || ctx->rotation == 270;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct samsung_fimc_variant *var = fimc->variant;
+ struct fimc_variant *var = fimc->variant;
struct fimc_pix_limit *pl = var->pix_limit;
struct fimc_frame *dst = &ctx->d_frame;
u32 depth, min_w, max_w, min_h, align_h = 3;
@@ -537,8 +611,13 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
}
/* Apply the scaler and the output DMA constraints */
max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w;
- min_w = ctx->state & FIMC_DST_CROP ? dst->width : var->min_out_pixsize;
- min_h = ctx->state & FIMC_DST_CROP ? dst->height : var->min_out_pixsize;
+ if (ctx->state & FIMC_COMPOSE) {
+ min_w = dst->offs_h + dst->width;
+ min_h = dst->offs_v + dst->height;
+ } else {
+ min_w = var->min_out_pixsize;
+ min_h = var->min_out_pixsize;
+ }
if (var->min_vsize_align == 1 && !rotation)
align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1;
@@ -556,12 +635,13 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
return ffmt;
}
-static void fimc_capture_try_crop(struct fimc_ctx *ctx, struct v4l2_rect *r,
- int pad)
+static void fimc_capture_try_selection(struct fimc_ctx *ctx,
+ struct v4l2_rect *r,
+ int target)
{
bool rotate = ctx->rotation == 90 || ctx->rotation == 270;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct samsung_fimc_variant *var = fimc->variant;
+ struct fimc_variant *var = fimc->variant;
struct fimc_pix_limit *pl = var->pix_limit;
struct fimc_frame *sink = &ctx->s_frame;
u32 max_w, max_h, min_w = 0, min_h = 0, min_sz;
@@ -575,7 +655,7 @@ static void fimc_capture_try_crop(struct fimc_ctx *ctx, struct v4l2_rect *r,
r->left = r->top = 0;
return;
}
- if (pad == FIMC_SD_PAD_SOURCE) {
+ if (target == V4L2_SEL_TGT_COMPOSE_ACTIVE) {
if (ctx->rotation != 90 && ctx->rotation != 270)
align_h = 1;
max_sc_h = min(SCALER_MAX_HRATIO, 1 << (ffs(sink->width) - 3));
@@ -589,8 +669,7 @@ static void fimc_capture_try_crop(struct fimc_ctx *ctx, struct v4l2_rect *r,
max_sc_h = max_sc_v = 1;
}
/*
- * For the crop rectangle at source pad the following constraints
- * must be met:
+ * For the compose rectangle the following constraints must be met:
* - it must fit in the sink pad format rectangle (f_width/f_height);
* - maximum downscaling ratio is 64;
* - maximum crop size depends if the rotator is used or not;
@@ -602,7 +681,8 @@ static void fimc_capture_try_crop(struct fimc_ctx *ctx, struct v4l2_rect *r,
rotate ? pl->out_rot_en_w : pl->out_rot_dis_w,
rotate ? sink->f_height : sink->f_width);
max_h = min_t(u32, FIMC_CAMIF_MAX_HEIGHT, sink->f_height);
- if (pad == FIMC_SD_PAD_SOURCE) {
+
+ if (target == V4L2_SEL_TGT_COMPOSE_ACTIVE) {
min_w = min_t(u32, max_w, sink->f_width / max_sc_h);
min_h = min_t(u32, max_h, sink->f_height / max_sc_v);
if (rotate) {
@@ -613,13 +693,13 @@ static void fimc_capture_try_crop(struct fimc_ctx *ctx, struct v4l2_rect *r,
v4l_bound_align_image(&r->width, min_w, max_w, ffs(min_sz) - 1,
&r->height, min_h, max_h, align_h,
align_sz);
- /* Adjust left/top if cropping rectangle is out of bounds */
+ /* Adjust left/top if crop/compose rectangle is out of bounds */
r->left = clamp_t(u32, r->left, 0, sink->f_width - r->width);
r->top = clamp_t(u32, r->top, 0, sink->f_height - r->height);
r->left = round_down(r->left, var->hor_offs_align);
- dbg("pad%d: (%d,%d)/%dx%d, sink fmt: %dx%d",
- pad, r->left, r->top, r->width, r->height,
+ dbg("target %#x: (%d,%d)/%dx%d, sink fmt: %dx%d",
+ target, r->left, r->top, r->width, r->height,
sink->f_width, sink->f_height);
}
@@ -669,8 +749,8 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
bool set)
{
struct fimc_dev *fimc = ctx->fimc_dev;
- struct v4l2_subdev *sd = fimc->pipeline.sensor;
- struct v4l2_subdev *csis = fimc->pipeline.csis;
+ struct v4l2_subdev *sd = fimc->pipeline.subdevs[IDX_SENSOR];
+ struct v4l2_subdev *csis = fimc->pipeline.subdevs[IDX_CSIS];
struct v4l2_subdev_format sfmt;
struct v4l2_mbus_framefmt *mf = &sfmt.format;
struct fimc_fmt *ffmt = NULL;
@@ -851,7 +931,7 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
set_frame_bounds(ff, pix->width, pix->height);
/* Reset the composition rectangle if not yet configured */
- if (!(ctx->state & FIMC_DST_CROP))
+ if (!(ctx->state & FIMC_COMPOSE))
set_frame_crop(ff, 0, 0, pix->width, pix->height);
fimc_capture_mark_jpeg_xfer(ctx, fimc_fmt_is_jpeg(ff->fmt->color));
@@ -878,7 +958,7 @@ static int fimc_cap_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
struct fimc_dev *fimc = video_drvdata(file);
- struct v4l2_subdev *sd = fimc->pipeline.sensor;
+ struct v4l2_subdev *sd = fimc->pipeline.subdevs[IDX_SENSOR];
if (i->index != 0)
return -EINVAL;
@@ -927,7 +1007,7 @@ static int fimc_pipeline_validate(struct fimc_dev *fimc)
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
/* Don't call FIMC subdev operation to avoid nested locking */
- if (sd == fimc->vid_cap.subdev) {
+ if (sd == &fimc->vid_cap.subdev) {
struct fimc_frame *ff = &vid_cap->ctx->s_frame;
sink_fmt.format.width = ff->f_width;
sink_fmt.format.height = ff->f_height;
@@ -970,7 +1050,8 @@ static int fimc_cap_streamon(struct file *file, void *priv,
if (fimc_capture_active(fimc))
return -EBUSY;
- media_entity_pipeline_start(&p->sensor->entity, p->pipe);
+ media_entity_pipeline_start(&p->subdevs[IDX_SENSOR]->entity,
+ p->m_pipeline);
if (fimc->vid_cap.user_subdev_api) {
ret = fimc_pipeline_validate(fimc);
@@ -984,7 +1065,7 @@ static int fimc_cap_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
struct fimc_dev *fimc = video_drvdata(file);
- struct v4l2_subdev *sd = fimc->pipeline.sensor;
+ struct v4l2_subdev *sd = fimc->pipeline.subdevs[IDX_SENSOR];
int ret;
ret = vb2_streamoff(&fimc->vid_cap.vbq, type);
@@ -1100,29 +1181,18 @@ static int fimc_cap_s_selection(struct file *file, void *fh,
struct v4l2_rect rect = s->r;
struct fimc_frame *f;
unsigned long flags;
- unsigned int pad;
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
- switch (s->target) {
- case V4L2_SEL_TGT_COMPOSE_DEFAULT:
- case V4L2_SEL_TGT_COMPOSE_BOUNDS:
- case V4L2_SEL_TGT_COMPOSE_ACTIVE:
+ if (s->target == V4L2_SEL_TGT_COMPOSE_ACTIVE)
f = &ctx->d_frame;
- pad = FIMC_SD_PAD_SOURCE;
- break;
- case V4L2_SEL_TGT_CROP_BOUNDS:
- case V4L2_SEL_TGT_CROP_DEFAULT:
- case V4L2_SEL_TGT_CROP_ACTIVE:
+ else if (s->target == V4L2_SEL_TGT_CROP_ACTIVE)
f = &ctx->s_frame;
- pad = FIMC_SD_PAD_SINK;
- break;
- default:
+ else
return -EINVAL;
- }
- fimc_capture_try_crop(ctx, &rect, pad);
+ fimc_capture_try_selection(ctx, &rect, s->target);
if (s->flags & V4L2_SEL_FLAG_LE &&
!enclosed_rectangle(&rect, &s->r))
@@ -1243,7 +1313,7 @@ void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
struct fimc_vid_buffer, list);
vb2_set_plane_payload(&buf->vb, 0, *((u32 *)arg));
}
- fimc_capture_irq_handler(fimc, true);
+ fimc_capture_irq_handler(fimc, 1);
fimc_deactivate_capture(fimc);
spin_unlock_irqrestore(&fimc->slock, irq_flags);
}
@@ -1334,77 +1404,122 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
ff->fmt = ffmt;
/* Reset the crop rectangle if required. */
- if (!(fmt->pad == FIMC_SD_PAD_SOURCE && (ctx->state & FIMC_DST_CROP)))
+ if (!(fmt->pad == FIMC_SD_PAD_SOURCE && (ctx->state & FIMC_COMPOSE)))
set_frame_crop(ff, 0, 0, mf->width, mf->height);
if (fmt->pad == FIMC_SD_PAD_SINK)
- ctx->state &= ~FIMC_DST_CROP;
+ ctx->state &= ~FIMC_COMPOSE;
mutex_unlock(&fimc->lock);
return 0;
}
-static int fimc_subdev_get_crop(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
- struct v4l2_rect *r = &crop->rect;
- struct fimc_frame *ff;
+ struct fimc_frame *f = &ctx->s_frame;
+ struct v4l2_rect *r = &sel->r;
+ struct v4l2_rect *try_sel;
- if (crop->which == V4L2_SUBDEV_FORMAT_TRY) {
- crop->rect = *v4l2_subdev_get_try_crop(fh, crop->pad);
+ if (sel->pad != FIMC_SD_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&fimc->lock);
+
+ switch (sel->target) {
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS:
+ f = &ctx->d_frame;
+ case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ r->width = f->o_width;
+ r->height = f->o_height;
+ r->left = 0;
+ r->top = 0;
+ mutex_unlock(&fimc->lock);
return 0;
+
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ try_sel = v4l2_subdev_get_try_crop(fh, sel->pad);
+ break;
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ try_sel = v4l2_subdev_get_try_compose(fh, sel->pad);
+ f = &ctx->d_frame;
+ break;
+ default:
+ mutex_unlock(&fimc->lock);
+ return -EINVAL;
}
- ff = crop->pad == FIMC_SD_PAD_SINK ?
- &ctx->s_frame : &ctx->d_frame;
- mutex_lock(&fimc->lock);
- r->left = ff->offs_h;
- r->top = ff->offs_v;
- r->width = ff->width;
- r->height = ff->height;
- mutex_unlock(&fimc->lock);
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ sel->r = *try_sel;
+ } else {
+ r->left = f->offs_h;
+ r->top = f->offs_v;
+ r->width = f->width;
+ r->height = f->height;
+ }
- dbg("ff:%p, pad%d: l:%d, t:%d, %dx%d, f_w: %d, f_h: %d",
- ff, crop->pad, r->left, r->top, r->width, r->height,
- ff->f_width, ff->f_height);
+ dbg("target %#x: l:%d, t:%d, %dx%d, f_w: %d, f_h: %d",
+ sel->pad, r->left, r->top, r->width, r->height,
+ f->f_width, f->f_height);
+ mutex_unlock(&fimc->lock);
return 0;
}
-static int fimc_subdev_set_crop(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
- struct v4l2_rect *r = &crop->rect;
- struct fimc_frame *ff;
+ struct fimc_frame *f = &ctx->s_frame;
+ struct v4l2_rect *r = &sel->r;
+ struct v4l2_rect *try_sel;
unsigned long flags;
- dbg("(%d,%d)/%dx%d", r->left, r->top, r->width, r->height);
-
- ff = crop->pad == FIMC_SD_PAD_SOURCE ?
- &ctx->d_frame : &ctx->s_frame;
+ if (sel->pad != FIMC_SD_PAD_SINK)
+ return -EINVAL;
mutex_lock(&fimc->lock);
- fimc_capture_try_crop(ctx, r, crop->pad);
+ fimc_capture_try_selection(ctx, r, V4L2_SEL_TGT_CROP_ACTIVE);
- if (crop->which == V4L2_SUBDEV_FORMAT_TRY) {
+ switch (sel->target) {
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS:
+ f = &ctx->d_frame;
+ case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ r->width = f->o_width;
+ r->height = f->o_height;
+ r->left = 0;
+ r->top = 0;
mutex_unlock(&fimc->lock);
- *v4l2_subdev_get_try_crop(fh, crop->pad) = *r;
return 0;
+
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ try_sel = v4l2_subdev_get_try_crop(fh, sel->pad);
+ break;
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ try_sel = v4l2_subdev_get_try_compose(fh, sel->pad);
+ f = &ctx->d_frame;
+ break;
+ default:
+ mutex_unlock(&fimc->lock);
+ return -EINVAL;
}
- spin_lock_irqsave(&fimc->slock, flags);
- set_frame_crop(ff, r->left, r->top, r->width, r->height);
- if (crop->pad == FIMC_SD_PAD_SOURCE)
- ctx->state |= FIMC_DST_CROP;
- set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
- spin_unlock_irqrestore(&fimc->slock, flags);
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *try_sel = sel->r;
+ } else {
+ spin_lock_irqsave(&fimc->slock, flags);
+ set_frame_crop(f, r->left, r->top, r->width, r->height);
+ set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ if (sel->target == V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL)
+ ctx->state |= FIMC_COMPOSE;
+ }
- dbg("pad%d: (%d,%d)/%dx%d", crop->pad, r->left, r->top,
+ dbg("target %#x: (%d,%d)/%dx%d", sel->target, r->left, r->top,
r->width, r->height);
mutex_unlock(&fimc->lock);
@@ -1413,63 +1528,16 @@ static int fimc_subdev_set_crop(struct v4l2_subdev *sd,
static struct v4l2_subdev_pad_ops fimc_subdev_pad_ops = {
.enum_mbus_code = fimc_subdev_enum_mbus_code,
+ .get_selection = fimc_subdev_get_selection,
+ .set_selection = fimc_subdev_set_selection,
.get_fmt = fimc_subdev_get_fmt,
.set_fmt = fimc_subdev_set_fmt,
- .get_crop = fimc_subdev_get_crop,
- .set_crop = fimc_subdev_set_crop,
};
static struct v4l2_subdev_ops fimc_subdev_ops = {
.pad = &fimc_subdev_pad_ops,
};
-static int fimc_create_capture_subdev(struct fimc_dev *fimc,
- struct v4l2_device *v4l2_dev)
-{
- struct v4l2_subdev *sd;
- int ret;
-
- sd = kzalloc(sizeof(*sd), GFP_KERNEL);
- if (!sd)
- return -ENOMEM;
-
- v4l2_subdev_init(sd, &fimc_subdev_ops);
- sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
- snprintf(sd->name, sizeof(sd->name), "FIMC.%d", fimc->pdev->id);
-
- fimc->vid_cap.sd_pads[FIMC_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- fimc->vid_cap.sd_pads[FIMC_SD_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
- ret = media_entity_init(&sd->entity, FIMC_SD_PADS_NUM,
- fimc->vid_cap.sd_pads, 0);
- if (ret)
- goto me_err;
- ret = v4l2_device_register_subdev(v4l2_dev, sd);
- if (ret)
- goto sd_err;
-
- fimc->vid_cap.subdev = sd;
- v4l2_set_subdevdata(sd, fimc);
- sd->entity.ops = &fimc_sd_media_ops;
- return 0;
-sd_err:
- media_entity_cleanup(&sd->entity);
-me_err:
- kfree(sd);
- return ret;
-}
-
-static void fimc_destroy_capture_subdev(struct fimc_dev *fimc)
-{
- struct v4l2_subdev *sd = fimc->vid_cap.subdev;
-
- if (!sd)
- return;
- media_entity_cleanup(&sd->entity);
- v4l2_device_unregister_subdev(sd);
- kfree(sd);
- fimc->vid_cap.subdev = NULL;
-}
-
/* Set default format at the sensor and host interface */
static int fimc_capture_set_default_format(struct fimc_dev *fimc)
{
@@ -1488,7 +1556,7 @@ static int fimc_capture_set_default_format(struct fimc_dev *fimc)
}
/* fimc->lock must be already initialized */
-int fimc_register_capture_device(struct fimc_dev *fimc,
+static int fimc_register_capture_device(struct fimc_dev *fimc,
struct v4l2_device *v4l2_dev)
{
struct video_device *vfd;
@@ -1502,11 +1570,11 @@ int fimc_register_capture_device(struct fimc_dev *fimc,
return -ENOMEM;
ctx->fimc_dev = fimc;
- ctx->in_path = FIMC_CAMERA;
- ctx->out_path = FIMC_DMA;
+ ctx->in_path = FIMC_IO_CAMERA;
+ ctx->out_path = FIMC_IO_DMA;
ctx->state = FIMC_CTX_CAP;
ctx->s_frame.fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, 0);
- ctx->d_frame.fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, 0);
+ ctx->d_frame.fmt = ctx->s_frame.fmt;
vfd = video_device_alloc();
if (!vfd) {
@@ -1514,8 +1582,7 @@ int fimc_register_capture_device(struct fimc_dev *fimc,
goto err_vd_alloc;
}
- snprintf(vfd->name, sizeof(vfd->name), "%s.capture",
- dev_name(&fimc->pdev->dev));
+ snprintf(vfd->name, sizeof(vfd->name), "fimc.%d.capture", fimc->id);
vfd->fops = &fimc_capture_fops;
vfd->ioctl_ops = &fimc_capture_ioctl_ops;
@@ -1523,6 +1590,10 @@ int fimc_register_capture_device(struct fimc_dev *fimc,
vfd->minor = -1;
vfd->release = video_device_release;
vfd->lock = &fimc->lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
video_set_drvdata(vfd, fimc);
vid_cap = &fimc->vid_cap;
@@ -1533,7 +1604,6 @@ int fimc_register_capture_device(struct fimc_dev *fimc,
INIT_LIST_HEAD(&vid_cap->pending_buf_q);
INIT_LIST_HEAD(&vid_cap->active_buf_q);
- spin_lock_init(&ctx->slock);
vid_cap->ctx = ctx;
q = &fimc->vid_cap.vbq;
@@ -1547,18 +1617,22 @@ int fimc_register_capture_device(struct fimc_dev *fimc,
vb2_queue_init(q);
- fimc->vid_cap.vd_pad.flags = MEDIA_PAD_FL_SINK;
- ret = media_entity_init(&vfd->entity, 1, &fimc->vid_cap.vd_pad, 0);
+ vid_cap->vd_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&vfd->entity, 1, &vid_cap->vd_pad, 0);
if (ret)
goto err_ent;
- ret = fimc_create_capture_subdev(fimc, v4l2_dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
if (ret)
- goto err_sd_reg;
+ goto err_vd;
- vfd->ctrl_handler = &ctx->ctrl_handler;
+ v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+
+ vfd->ctrl_handler = &ctx->ctrls.handler;
return 0;
-err_sd_reg:
+err_vd:
media_entity_cleanup(&vfd->entity);
err_ent:
video_device_release(vfd);
@@ -1567,17 +1641,73 @@ err_vd_alloc:
return ret;
}
-void fimc_unregister_capture_device(struct fimc_dev *fimc)
+static int fimc_capture_subdev_registered(struct v4l2_subdev *sd)
{
- struct video_device *vfd = fimc->vid_cap.vfd;
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ int ret;
+
+ ret = fimc_register_m2m_device(fimc, sd->v4l2_dev);
+ if (ret)
+ return ret;
- if (vfd) {
- media_entity_cleanup(&vfd->entity);
- /* Can also be called if video device was
- not registered */
- video_unregister_device(vfd);
+ ret = fimc_register_capture_device(fimc, sd->v4l2_dev);
+ if (ret)
+ fimc_unregister_m2m_device(fimc);
+
+ return ret;
+}
+
+static void fimc_capture_subdev_unregistered(struct v4l2_subdev *sd)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+
+ if (fimc == NULL)
+ return;
+
+ fimc_unregister_m2m_device(fimc);
+
+ if (fimc->vid_cap.vfd) {
+ media_entity_cleanup(&fimc->vid_cap.vfd->entity);
+ video_unregister_device(fimc->vid_cap.vfd);
+ fimc->vid_cap.vfd = NULL;
}
- fimc_destroy_capture_subdev(fimc);
+
kfree(fimc->vid_cap.ctx);
fimc->vid_cap.ctx = NULL;
}
+
+static const struct v4l2_subdev_internal_ops fimc_capture_sd_internal_ops = {
+ .registered = fimc_capture_subdev_registered,
+ .unregistered = fimc_capture_subdev_unregistered,
+};
+
+int fimc_initialize_capture_subdev(struct fimc_dev *fimc)
+{
+ struct v4l2_subdev *sd = &fimc->vid_cap.subdev;
+ int ret;
+
+ v4l2_subdev_init(sd, &fimc_subdev_ops);
+ sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, sizeof(sd->name), "FIMC.%d", fimc->pdev->id);
+
+ fimc->vid_cap.sd_pads[FIMC_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ fimc->vid_cap.sd_pads[FIMC_SD_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, FIMC_SD_PADS_NUM,
+ fimc->vid_cap.sd_pads, 0);
+ if (ret)
+ return ret;
+
+ sd->entity.ops = &fimc_sd_media_ops;
+ sd->internal_ops = &fimc_capture_sd_internal_ops;
+ v4l2_set_subdevdata(sd, fimc);
+ return 0;
+}
+
+void fimc_unregister_capture_subdev(struct fimc_dev *fimc)
+{
+ struct v4l2_subdev *sd = &fimc->vid_cap.subdev;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_set_subdevdata(sd, NULL);
+}
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index e09ba7b0076e..fedcd561ba27 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -1,8 +1,8 @@
/*
- * Samsung S5P/EXYNOS4 SoC series camera interface (video postprocessor) driver
+ * Samsung S5P/EXYNOS4 SoC series FIMC (CAMIF) driver
*
- * Copyright (C) 2010-2011 Samsung Electronics Co., Ltd.
- * Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2010-2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
@@ -28,6 +28,7 @@
#include <media/videobuf2-dma-contig.h>
#include "fimc-core.h"
+#include "fimc-reg.h"
#include "fimc-mdevice.h"
static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
@@ -39,7 +40,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "RGB565",
.fourcc = V4L2_PIX_FMT_RGB565,
.depth = { 16 },
- .color = S5P_FIMC_RGB565,
+ .color = FIMC_FMT_RGB565,
.memplanes = 1,
.colplanes = 1,
.flags = FMT_FLAGS_M2M,
@@ -47,7 +48,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "BGR666",
.fourcc = V4L2_PIX_FMT_BGR666,
.depth = { 32 },
- .color = S5P_FIMC_RGB666,
+ .color = FIMC_FMT_RGB666,
.memplanes = 1,
.colplanes = 1,
.flags = FMT_FLAGS_M2M,
@@ -55,7 +56,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "ARGB8888, 32 bpp",
.fourcc = V4L2_PIX_FMT_RGB32,
.depth = { 32 },
- .color = S5P_FIMC_RGB888,
+ .color = FIMC_FMT_RGB888,
.memplanes = 1,
.colplanes = 1,
.flags = FMT_FLAGS_M2M | FMT_HAS_ALPHA,
@@ -63,7 +64,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "ARGB1555",
.fourcc = V4L2_PIX_FMT_RGB555,
.depth = { 16 },
- .color = S5P_FIMC_RGB555,
+ .color = FIMC_FMT_RGB555,
.memplanes = 1,
.colplanes = 1,
.flags = FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA,
@@ -71,7 +72,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "ARGB4444",
.fourcc = V4L2_PIX_FMT_RGB444,
.depth = { 16 },
- .color = S5P_FIMC_RGB444,
+ .color = FIMC_FMT_RGB444,
.memplanes = 1,
.colplanes = 1,
.flags = FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA,
@@ -79,7 +80,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:2 packed, YCbYCr",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = { 16 },
- .color = S5P_FIMC_YCBYCR422,
+ .color = FIMC_FMT_YCBYCR422,
.memplanes = 1,
.colplanes = 1,
.mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
@@ -88,7 +89,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:2 packed, CbYCrY",
.fourcc = V4L2_PIX_FMT_UYVY,
.depth = { 16 },
- .color = S5P_FIMC_CBYCRY422,
+ .color = FIMC_FMT_CBYCRY422,
.memplanes = 1,
.colplanes = 1,
.mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
@@ -97,7 +98,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:2 packed, CrYCbY",
.fourcc = V4L2_PIX_FMT_VYUY,
.depth = { 16 },
- .color = S5P_FIMC_CRYCBY422,
+ .color = FIMC_FMT_CRYCBY422,
.memplanes = 1,
.colplanes = 1,
.mbus_code = V4L2_MBUS_FMT_VYUY8_2X8,
@@ -106,7 +107,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:2 packed, YCrYCb",
.fourcc = V4L2_PIX_FMT_YVYU,
.depth = { 16 },
- .color = S5P_FIMC_YCRYCB422,
+ .color = FIMC_FMT_YCRYCB422,
.memplanes = 1,
.colplanes = 1,
.mbus_code = V4L2_MBUS_FMT_YVYU8_2X8,
@@ -115,7 +116,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:2 planar, Y/Cb/Cr",
.fourcc = V4L2_PIX_FMT_YUV422P,
.depth = { 12 },
- .color = S5P_FIMC_YCBYCR422,
+ .color = FIMC_FMT_YCBYCR422,
.memplanes = 1,
.colplanes = 3,
.flags = FMT_FLAGS_M2M,
@@ -123,7 +124,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:2 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV16,
.depth = { 16 },
- .color = S5P_FIMC_YCBYCR422,
+ .color = FIMC_FMT_YCBYCR422,
.memplanes = 1,
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
@@ -131,7 +132,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:2 planar, Y/CrCb",
.fourcc = V4L2_PIX_FMT_NV61,
.depth = { 16 },
- .color = S5P_FIMC_YCRYCB422,
+ .color = FIMC_FMT_YCRYCB422,
.memplanes = 1,
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
@@ -139,7 +140,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:0 planar, YCbCr",
.fourcc = V4L2_PIX_FMT_YUV420,
.depth = { 12 },
- .color = S5P_FIMC_YCBCR420,
+ .color = FIMC_FMT_YCBCR420,
.memplanes = 1,
.colplanes = 3,
.flags = FMT_FLAGS_M2M,
@@ -147,14 +148,14 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:0 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12,
.depth = { 12 },
- .color = S5P_FIMC_YCBCR420,
+ .color = FIMC_FMT_YCBCR420,
.memplanes = 1,
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
}, {
.name = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12M,
- .color = S5P_FIMC_YCBCR420,
+ .color = FIMC_FMT_YCBCR420,
.depth = { 8, 4 },
.memplanes = 2,
.colplanes = 2,
@@ -162,7 +163,7 @@ static struct fimc_fmt fimc_formats[] = {
}, {
.name = "YUV 4:2:0 non-contiguous 3-planar, Y/Cb/Cr",
.fourcc = V4L2_PIX_FMT_YUV420M,
- .color = S5P_FIMC_YCBCR420,
+ .color = FIMC_FMT_YCBCR420,
.depth = { 8, 2, 2 },
.memplanes = 3,
.colplanes = 3,
@@ -170,7 +171,7 @@ static struct fimc_fmt fimc_formats[] = {
}, {
.name = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr, tiled",
.fourcc = V4L2_PIX_FMT_NV12MT,
- .color = S5P_FIMC_YCBCR420,
+ .color = FIMC_FMT_YCBCR420,
.depth = { 8, 4 },
.memplanes = 2,
.colplanes = 2,
@@ -178,7 +179,7 @@ static struct fimc_fmt fimc_formats[] = {
}, {
.name = "JPEG encoded data",
.fourcc = V4L2_PIX_FMT_JPEG,
- .color = S5P_FIMC_JPEG,
+ .color = FIMC_FMT_JPEG,
.depth = { 8 },
.memplanes = 1,
.colplanes = 1,
@@ -187,12 +188,12 @@ static struct fimc_fmt fimc_formats[] = {
},
};
-static unsigned int get_m2m_fmt_flags(unsigned int stream_type)
+struct fimc_fmt *fimc_get_format(unsigned int index)
{
- if (stream_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- return FMT_FLAGS_M2M_IN;
- else
- return FMT_FLAGS_M2M_OUT;
+ if (index >= ARRAY_SIZE(fimc_formats))
+ return NULL;
+
+ return &fimc_formats[index];
}
int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
@@ -230,7 +231,7 @@ static int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
int fimc_set_scaler_info(struct fimc_ctx *ctx)
{
- struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
+ struct fimc_variant *variant = ctx->fimc_dev->variant;
struct device *dev = &ctx->fimc_dev->pdev->dev;
struct fimc_scaler *sc = &ctx->scaler;
struct fimc_frame *s_frame = &ctx->s_frame;
@@ -293,126 +294,9 @@ int fimc_set_scaler_info(struct fimc_ctx *ctx)
return 0;
}
-static void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
-{
- struct vb2_buffer *src_vb, *dst_vb;
-
- if (!ctx || !ctx->m2m_ctx)
- return;
-
- src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
-
- if (src_vb && dst_vb) {
- v4l2_m2m_buf_done(src_vb, vb_state);
- v4l2_m2m_buf_done(dst_vb, vb_state);
- v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev,
- ctx->m2m_ctx);
- }
-}
-
-/* Complete the transaction which has been scheduled for execution. */
-static int fimc_m2m_shutdown(struct fimc_ctx *ctx)
-{
- struct fimc_dev *fimc = ctx->fimc_dev;
- int ret;
-
- if (!fimc_m2m_pending(fimc))
- return 0;
-
- fimc_ctx_state_lock_set(FIMC_CTX_SHUT, ctx);
-
- ret = wait_event_timeout(fimc->irq_queue,
- !fimc_ctx_state_is_set(FIMC_CTX_SHUT, ctx),
- FIMC_SHUTDOWN_TIMEOUT);
-
- return ret == 0 ? -ETIMEDOUT : ret;
-}
-
-static int start_streaming(struct vb2_queue *q, unsigned int count)
-{
- struct fimc_ctx *ctx = q->drv_priv;
- int ret;
-
- ret = pm_runtime_get_sync(&ctx->fimc_dev->pdev->dev);
- return ret > 0 ? 0 : ret;
-}
-
-static int stop_streaming(struct vb2_queue *q)
-{
- struct fimc_ctx *ctx = q->drv_priv;
- int ret;
-
- ret = fimc_m2m_shutdown(ctx);
- if (ret == -ETIMEDOUT)
- fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
-
- pm_runtime_put(&ctx->fimc_dev->pdev->dev);
- return 0;
-}
-
-void fimc_capture_irq_handler(struct fimc_dev *fimc, bool final)
-{
- struct fimc_vid_cap *cap = &fimc->vid_cap;
- struct fimc_vid_buffer *v_buf;
- struct timeval *tv;
- struct timespec ts;
-
- if (test_and_clear_bit(ST_CAPT_SHUT, &fimc->state)) {
- wake_up(&fimc->irq_queue);
- return;
- }
-
- if (!list_empty(&cap->active_buf_q) &&
- test_bit(ST_CAPT_RUN, &fimc->state) && final) {
- ktime_get_real_ts(&ts);
-
- v_buf = fimc_active_queue_pop(cap);
-
- tv = &v_buf->vb.v4l2_buf.timestamp;
- tv->tv_sec = ts.tv_sec;
- tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- v_buf->vb.v4l2_buf.sequence = cap->frame_count++;
-
- vb2_buffer_done(&v_buf->vb, VB2_BUF_STATE_DONE);
- }
-
- if (!list_empty(&cap->pending_buf_q)) {
-
- v_buf = fimc_pending_queue_pop(cap);
- fimc_hw_set_output_addr(fimc, &v_buf->paddr, cap->buf_index);
- v_buf->index = cap->buf_index;
-
- /* Move the buffer to the capture active queue */
- fimc_active_queue_add(cap, v_buf);
-
- dbg("next frame: %d, done frame: %d",
- fimc_hw_get_frame_index(fimc), v_buf->index);
-
- if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
- cap->buf_index = 0;
- }
-
- if (cap->active_buf_cnt == 0) {
- if (final)
- clear_bit(ST_CAPT_RUN, &fimc->state);
-
- if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
- cap->buf_index = 0;
- } else {
- set_bit(ST_CAPT_RUN, &fimc->state);
- }
-
- fimc_capture_config_update(cap->ctx);
-
- dbg("frame: %d, active_buf_cnt: %d",
- fimc_hw_get_frame_index(fimc), cap->active_buf_cnt);
-}
-
static irqreturn_t fimc_irq_handler(int irq, void *priv)
{
struct fimc_dev *fimc = priv;
- struct fimc_vid_cap *cap = &fimc->vid_cap;
struct fimc_ctx *ctx;
fimc_hw_clear_irq(fimc);
@@ -430,21 +314,16 @@ static irqreturn_t fimc_irq_handler(int irq, void *priv)
spin_unlock(&fimc->slock);
fimc_m2m_job_finish(ctx, VB2_BUF_STATE_DONE);
- spin_lock(&ctx->slock);
if (ctx->state & FIMC_CTX_SHUT) {
ctx->state &= ~FIMC_CTX_SHUT;
wake_up(&fimc->irq_queue);
}
- spin_unlock(&ctx->slock);
+ return IRQ_HANDLED;
}
- return IRQ_HANDLED;
} else if (test_bit(ST_CAPT_PEND, &fimc->state)) {
- fimc_capture_irq_handler(fimc,
- !test_bit(ST_CAPT_JPEG, &fimc->state));
- if (cap->active_buf_cnt == 1) {
- fimc_deactivate_capture(fimc);
- clear_bit(ST_CAPT_STREAM, &fimc->state);
- }
+ int last_buf = test_bit(ST_CAPT_JPEG, &fimc->state) &&
+ fimc->vid_cap.reqbufs_count == 1;
+ fimc_capture_irq_handler(fimc, !last_buf);
}
out:
spin_unlock(&fimc->slock);
@@ -482,7 +361,7 @@ int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
case 3:
paddr->cb = (u32)(paddr->y + pix_size);
/* decompose Y into Y/Cb/Cr */
- if (S5P_FIMC_YCBCR420 == frame->fmt->color)
+ if (FIMC_FMT_YCBCR420 == frame->fmt->color)
paddr->cr = (u32)(paddr->cb
+ (pix_size >> 2));
else /* 422 */
@@ -510,40 +389,40 @@ int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
void fimc_set_yuv_order(struct fimc_ctx *ctx)
{
/* The one only mode supported in SoC. */
- ctx->in_order_2p = S5P_FIMC_LSB_CRCB;
- ctx->out_order_2p = S5P_FIMC_LSB_CRCB;
+ ctx->in_order_2p = FIMC_REG_CIOCTRL_ORDER422_2P_LSB_CRCB;
+ ctx->out_order_2p = FIMC_REG_CIOCTRL_ORDER422_2P_LSB_CRCB;
/* Set order for 1 plane input formats. */
switch (ctx->s_frame.fmt->color) {
- case S5P_FIMC_YCRYCB422:
- ctx->in_order_1p = S5P_MSCTRL_ORDER422_CBYCRY;
+ case FIMC_FMT_YCRYCB422:
+ ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_CBYCRY;
break;
- case S5P_FIMC_CBYCRY422:
- ctx->in_order_1p = S5P_MSCTRL_ORDER422_YCRYCB;
+ case FIMC_FMT_CBYCRY422:
+ ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_YCRYCB;
break;
- case S5P_FIMC_CRYCBY422:
- ctx->in_order_1p = S5P_MSCTRL_ORDER422_YCBYCR;
+ case FIMC_FMT_CRYCBY422:
+ ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_YCBYCR;
break;
- case S5P_FIMC_YCBYCR422:
+ case FIMC_FMT_YCBYCR422:
default:
- ctx->in_order_1p = S5P_MSCTRL_ORDER422_CRYCBY;
+ ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_CRYCBY;
break;
}
dbg("ctx->in_order_1p= %d", ctx->in_order_1p);
switch (ctx->d_frame.fmt->color) {
- case S5P_FIMC_YCRYCB422:
- ctx->out_order_1p = S5P_CIOCTRL_ORDER422_CBYCRY;
+ case FIMC_FMT_YCRYCB422:
+ ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_CBYCRY;
break;
- case S5P_FIMC_CBYCRY422:
- ctx->out_order_1p = S5P_CIOCTRL_ORDER422_YCRYCB;
+ case FIMC_FMT_CBYCRY422:
+ ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_YCRYCB;
break;
- case S5P_FIMC_CRYCBY422:
- ctx->out_order_1p = S5P_CIOCTRL_ORDER422_YCBYCR;
+ case FIMC_FMT_CRYCBY422:
+ ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_YCBYCR;
break;
- case S5P_FIMC_YCBYCR422:
+ case FIMC_FMT_YCBYCR422:
default:
- ctx->out_order_1p = S5P_CIOCTRL_ORDER422_CRYCBY;
+ ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_CRYCBY;
break;
}
dbg("ctx->out_order_1p= %d", ctx->out_order_1p);
@@ -551,7 +430,7 @@ void fimc_set_yuv_order(struct fimc_ctx *ctx)
void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
{
- struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
+ struct fimc_variant *variant = ctx->fimc_dev->variant;
u32 i, depth = 0;
for (i = 0; i < f->fmt->colplanes; i++)
@@ -574,7 +453,7 @@ void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
f->dma_offset.cb_h >>= 1;
f->dma_offset.cr_h >>= 1;
}
- if (f->fmt->color == S5P_FIMC_YCBCR420) {
+ if (f->fmt->color == FIMC_FMT_YCBCR420) {
f->dma_offset.cb_v >>= 1;
f->dma_offset.cr_v >>= 1;
}
@@ -584,203 +463,58 @@ void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
f->fmt->color, f->dma_offset.y_h, f->dma_offset.y_v);
}
-/**
- * fimc_prepare_config - check dimensions, operation and color mode
- * and pre-calculate offset and the scaling coefficients.
- *
- * @ctx: hardware context information
- * @flags: flags indicating which parameters to check/update
- *
- * Return: 0 if dimensions are valid or non zero otherwise.
- */
-int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
-{
- struct fimc_frame *s_frame, *d_frame;
- struct vb2_buffer *vb = NULL;
- int ret = 0;
-
- s_frame = &ctx->s_frame;
- d_frame = &ctx->d_frame;
-
- if (flags & FIMC_PARAMS) {
- /* Prepare the DMA offset ratios for scaler. */
- fimc_prepare_dma_offset(ctx, &ctx->s_frame);
- fimc_prepare_dma_offset(ctx, &ctx->d_frame);
-
- if (s_frame->height > (SCALER_MAX_VRATIO * d_frame->height) ||
- s_frame->width > (SCALER_MAX_HRATIO * d_frame->width)) {
- err("out of scaler range");
- return -EINVAL;
- }
- fimc_set_yuv_order(ctx);
- }
-
- if (flags & FIMC_SRC_ADDR) {
- vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- ret = fimc_prepare_addr(ctx, vb, s_frame, &s_frame->paddr);
- if (ret)
- return ret;
- }
-
- if (flags & FIMC_DST_ADDR) {
- vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
- ret = fimc_prepare_addr(ctx, vb, d_frame, &d_frame->paddr);
- }
-
- return ret;
-}
-
-static void fimc_dma_run(void *priv)
-{
- struct fimc_ctx *ctx = priv;
- struct fimc_dev *fimc;
- unsigned long flags;
- u32 ret;
-
- if (WARN(!ctx, "null hardware context\n"))
- return;
-
- fimc = ctx->fimc_dev;
- spin_lock_irqsave(&fimc->slock, flags);
- set_bit(ST_M2M_PEND, &fimc->state);
-
- spin_lock(&ctx->slock);
- ctx->state |= (FIMC_SRC_ADDR | FIMC_DST_ADDR);
- ret = fimc_prepare_config(ctx, ctx->state);
- if (ret)
- goto dma_unlock;
-
- /* Reconfigure hardware if the context has changed. */
- if (fimc->m2m.ctx != ctx) {
- ctx->state |= FIMC_PARAMS;
- fimc->m2m.ctx = ctx;
- }
- fimc_hw_set_input_addr(fimc, &ctx->s_frame.paddr);
-
- if (ctx->state & FIMC_PARAMS) {
- fimc_hw_set_input_path(ctx);
- fimc_hw_set_in_dma(ctx);
- ret = fimc_set_scaler_info(ctx);
- if (ret) {
- spin_unlock(&fimc->slock);
- goto dma_unlock;
- }
- fimc_hw_set_prescaler(ctx);
- fimc_hw_set_mainscaler(ctx);
- fimc_hw_set_target_format(ctx);
- fimc_hw_set_rotation(ctx);
- fimc_hw_set_effect(ctx, false);
- }
-
- fimc_hw_set_output_path(ctx);
- if (ctx->state & (FIMC_DST_ADDR | FIMC_PARAMS))
- fimc_hw_set_output_addr(fimc, &ctx->d_frame.paddr, -1);
-
- if (ctx->state & FIMC_PARAMS) {
- fimc_hw_set_out_dma(ctx);
- if (fimc->variant->has_alpha)
- fimc_hw_set_rgb_alpha(ctx);
- }
-
- fimc_activate_capture(ctx);
-
- ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP |
- FIMC_SRC_FMT | FIMC_DST_FMT);
- fimc_hw_activate_input_dma(fimc, true);
-dma_unlock:
- spin_unlock(&ctx->slock);
- spin_unlock_irqrestore(&fimc->slock, flags);
-}
-
-static void fimc_job_abort(void *priv)
+int fimc_set_color_effect(struct fimc_ctx *ctx, enum v4l2_colorfx colorfx)
{
- fimc_m2m_shutdown(priv);
-}
-
-static int fimc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
- unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *allocators[])
-{
- struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
- struct fimc_frame *f;
- int i;
+ struct fimc_effect *effect = &ctx->effect;
- f = ctx_get_frame(ctx, vq->type);
- if (IS_ERR(f))
- return PTR_ERR(f);
- /*
- * Return number of non-contigous planes (plane buffers)
- * depending on the configured color format.
- */
- if (!f->fmt)
+ switch (colorfx) {
+ case V4L2_COLORFX_NONE:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
+ break;
+ case V4L2_COLORFX_BW:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_ARBITRARY;
+ effect->pat_cb = 128;
+ effect->pat_cr = 128;
+ break;
+ case V4L2_COLORFX_SEPIA:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_ARBITRARY;
+ effect->pat_cb = 115;
+ effect->pat_cr = 145;
+ break;
+ case V4L2_COLORFX_NEGATIVE:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_NEGATIVE;
+ break;
+ case V4L2_COLORFX_EMBOSS:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_EMBOSSING;
+ break;
+ case V4L2_COLORFX_ART_FREEZE:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_ARTFREEZE;
+ break;
+ case V4L2_COLORFX_SILHOUETTE:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_SILHOUETTE;
+ break;
+ case V4L2_COLORFX_SET_CBCR:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_ARBITRARY;
+ effect->pat_cb = ctx->ctrls.colorfx_cbcr->val >> 8;
+ effect->pat_cr = ctx->ctrls.colorfx_cbcr->val & 0xff;
+ break;
+ default:
return -EINVAL;
-
- *num_planes = f->fmt->memplanes;
- for (i = 0; i < f->fmt->memplanes; i++) {
- sizes[i] = (f->f_width * f->f_height * f->fmt->depth[i]) / 8;
- allocators[i] = ctx->fimc_dev->alloc_ctx;
}
- return 0;
-}
-
-static int fimc_buf_prepare(struct vb2_buffer *vb)
-{
- struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct fimc_frame *frame;
- int i;
-
- frame = ctx_get_frame(ctx, vb->vb2_queue->type);
- if (IS_ERR(frame))
- return PTR_ERR(frame);
-
- for (i = 0; i < frame->fmt->memplanes; i++)
- vb2_set_plane_payload(vb, i, frame->payload[i]);
return 0;
}
-static void fimc_buf_queue(struct vb2_buffer *vb)
-{
- struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
-
- dbg("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
-
- if (ctx->m2m_ctx)
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
-}
-
-static void fimc_lock(struct vb2_queue *vq)
-{
- struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
- mutex_lock(&ctx->fimc_dev->lock);
-}
-
-static void fimc_unlock(struct vb2_queue *vq)
-{
- struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
- mutex_unlock(&ctx->fimc_dev->lock);
-}
-
-static struct vb2_ops fimc_qops = {
- .queue_setup = fimc_queue_setup,
- .buf_prepare = fimc_buf_prepare,
- .buf_queue = fimc_buf_queue,
- .wait_prepare = fimc_unlock,
- .wait_finish = fimc_lock,
- .stop_streaming = stop_streaming,
- .start_streaming = start_streaming,
-};
-
/*
* V4L2 controls handling
*/
#define ctrl_to_ctx(__ctrl) \
- container_of((__ctrl)->handler, struct fimc_ctx, ctrl_handler)
+ container_of((__ctrl)->handler, struct fimc_ctx, ctrls.handler)
static int __fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_ctrl *ctrl)
{
struct fimc_dev *fimc = ctx->fimc_dev;
- struct samsung_fimc_variant *variant = fimc->variant;
+ struct fimc_variant *variant = fimc->variant;
unsigned int flags = FIMC_DST_FMT | FIMC_SRC_FMT;
int ret = 0;
@@ -815,7 +549,14 @@ static int __fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_ctrl *ctrl)
case V4L2_CID_ALPHA_COMPONENT:
ctx->d_frame.alpha = ctrl->val;
break;
+
+ case V4L2_CID_COLORFX:
+ ret = fimc_set_color_effect(ctx, ctrl->val);
+ if (ret)
+ return ret;
+ break;
}
+
ctx->state |= FIMC_PARAMS;
set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
return 0;
@@ -827,9 +568,9 @@ static int fimc_s_ctrl(struct v4l2_ctrl *ctrl)
unsigned long flags;
int ret;
- spin_lock_irqsave(&ctx->slock, flags);
+ spin_lock_irqsave(&ctx->fimc_dev->slock, flags);
ret = __fimc_s_ctrl(ctx, ctrl);
- spin_unlock_irqrestore(&ctx->slock, flags);
+ spin_unlock_irqrestore(&ctx->fimc_dev->slock, flags);
return ret;
}
@@ -840,71 +581,93 @@ static const struct v4l2_ctrl_ops fimc_ctrl_ops = {
int fimc_ctrls_create(struct fimc_ctx *ctx)
{
- struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
+ struct fimc_variant *variant = ctx->fimc_dev->variant;
unsigned int max_alpha = fimc_get_alpha_mask(ctx->d_frame.fmt);
+ struct fimc_ctrls *ctrls = &ctx->ctrls;
+ struct v4l2_ctrl_handler *handler = &ctrls->handler;
- if (ctx->ctrls_rdy)
+ if (ctx->ctrls.ready)
return 0;
- v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4);
- ctx->ctrl_rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
+ v4l2_ctrl_handler_init(handler, 6);
+
+ ctrls->rotate = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
V4L2_CID_ROTATE, 0, 270, 90, 0);
- ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
+ ctrls->hflip = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
- ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
+ ctrls->vflip = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
+
if (variant->has_alpha)
- ctx->ctrl_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
- &fimc_ctrl_ops, V4L2_CID_ALPHA_COMPONENT,
- 0, max_alpha, 1, 0);
+ ctrls->alpha = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT,
+ 0, max_alpha, 1, 0);
else
- ctx->ctrl_alpha = NULL;
+ ctrls->alpha = NULL;
- ctx->ctrls_rdy = ctx->ctrl_handler.error == 0;
+ ctrls->colorfx = v4l2_ctrl_new_std_menu(handler, &fimc_ctrl_ops,
+ V4L2_CID_COLORFX, V4L2_COLORFX_SET_CBCR,
+ ~0x983f, V4L2_COLORFX_NONE);
- return ctx->ctrl_handler.error;
+ ctrls->colorfx_cbcr = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+ V4L2_CID_COLORFX_CBCR, 0, 0xffff, 1, 0);
+
+ ctx->effect.type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
+
+ if (!handler->error) {
+ v4l2_ctrl_cluster(3, &ctrls->colorfx);
+ ctrls->ready = true;
+ }
+
+ return handler->error;
}
void fimc_ctrls_delete(struct fimc_ctx *ctx)
{
- if (ctx->ctrls_rdy) {
- v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- ctx->ctrls_rdy = false;
- ctx->ctrl_alpha = NULL;
+ struct fimc_ctrls *ctrls = &ctx->ctrls;
+
+ if (ctrls->ready) {
+ v4l2_ctrl_handler_free(&ctrls->handler);
+ ctrls->ready = false;
+ ctrls->alpha = NULL;
}
}
void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
{
unsigned int has_alpha = ctx->d_frame.fmt->flags & FMT_HAS_ALPHA;
+ struct fimc_ctrls *ctrls = &ctx->ctrls;
- if (!ctx->ctrls_rdy)
+ if (!ctrls->ready)
return;
- mutex_lock(&ctx->ctrl_handler.lock);
- v4l2_ctrl_activate(ctx->ctrl_rotate, active);
- v4l2_ctrl_activate(ctx->ctrl_hflip, active);
- v4l2_ctrl_activate(ctx->ctrl_vflip, active);
- if (ctx->ctrl_alpha)
- v4l2_ctrl_activate(ctx->ctrl_alpha, active && has_alpha);
+ mutex_lock(&ctrls->handler.lock);
+ v4l2_ctrl_activate(ctrls->rotate, active);
+ v4l2_ctrl_activate(ctrls->hflip, active);
+ v4l2_ctrl_activate(ctrls->vflip, active);
+ v4l2_ctrl_activate(ctrls->colorfx, active);
+ if (ctrls->alpha)
+ v4l2_ctrl_activate(ctrls->alpha, active && has_alpha);
if (active) {
- ctx->rotation = ctx->ctrl_rotate->val;
- ctx->hflip = ctx->ctrl_hflip->val;
- ctx->vflip = ctx->ctrl_vflip->val;
+ fimc_set_color_effect(ctx, ctrls->colorfx->cur.val);
+ ctx->rotation = ctrls->rotate->val;
+ ctx->hflip = ctrls->hflip->val;
+ ctx->vflip = ctrls->vflip->val;
} else {
+ ctx->effect.type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
ctx->rotation = 0;
ctx->hflip = 0;
ctx->vflip = 0;
}
- mutex_unlock(&ctx->ctrl_handler.lock);
+ mutex_unlock(&ctrls->handler.lock);
}
/* Update maximum value of the alpha color control */
void fimc_alpha_ctrl_update(struct fimc_ctx *ctx)
{
struct fimc_dev *fimc = ctx->fimc_dev;
- struct v4l2_ctrl *ctrl = ctx->ctrl_alpha;
+ struct v4l2_ctrl *ctrl = ctx->ctrls.alpha;
if (ctrl == NULL || !fimc->variant->has_alpha)
return;
@@ -918,39 +681,6 @@ void fimc_alpha_ctrl_update(struct fimc_ctx *ctx)
v4l2_ctrl_unlock(ctrl);
}
-/*
- * V4L2 ioctl handlers
- */
-static int fimc_m2m_querycap(struct file *file, void *fh,
- struct v4l2_capability *cap)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- struct fimc_dev *fimc = ctx->fimc_dev;
-
- strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
- strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
- cap->bus_info[0] = 0;
- cap->capabilities = V4L2_CAP_STREAMING |
- V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
-
- return 0;
-}
-
-static int fimc_m2m_enum_fmt_mplane(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- struct fimc_fmt *fmt;
-
- fmt = fimc_find_format(NULL, NULL, get_m2m_fmt_flags(f->type),
- f->index);
- if (!fmt)
- return -EINVAL;
-
- strncpy(f->description, fmt->name, sizeof(f->description) - 1);
- f->pixelformat = fmt->fourcc;
- return 0;
-}
-
int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
@@ -1029,18 +759,6 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
}
}
-static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
- struct v4l2_format *f)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- struct fimc_frame *frame = ctx_get_frame(ctx, f->type);
-
- if (IS_ERR(frame))
- return PTR_ERR(frame);
-
- return fimc_fill_format(frame, f);
-}
-
/**
* fimc_find_format - lookup fimc color format by fourcc or media bus format
* @pixelformat: fourcc to match, ignored if null
@@ -1073,535 +791,10 @@ struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
return def_fmt;
}
-static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
-{
- struct fimc_dev *fimc = ctx->fimc_dev;
- struct samsung_fimc_variant *variant = fimc->variant;
- struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
- struct fimc_fmt *fmt;
- u32 max_w, mod_x, mod_y;
-
- if (!IS_M2M(f->type))
- return -EINVAL;
-
- dbg("w: %d, h: %d", pix->width, pix->height);
-
- fmt = fimc_find_format(&pix->pixelformat, NULL,
- get_m2m_fmt_flags(f->type), 0);
- if (WARN(fmt == NULL, "Pixel format lookup failed"))
- return -EINVAL;
-
- if (pix->field == V4L2_FIELD_ANY)
- pix->field = V4L2_FIELD_NONE;
- else if (pix->field != V4L2_FIELD_NONE)
- return -EINVAL;
-
- if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- max_w = variant->pix_limit->scaler_dis_w;
- mod_x = ffs(variant->min_inp_pixsize) - 1;
- } else {
- max_w = variant->pix_limit->out_rot_dis_w;
- mod_x = ffs(variant->min_out_pixsize) - 1;
- }
-
- if (tiled_fmt(fmt)) {
- mod_x = 6; /* 64 x 32 pixels tile */
- mod_y = 5;
- } else {
- if (variant->min_vsize_align == 1)
- mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
- else
- mod_y = ffs(variant->min_vsize_align) - 1;
- }
-
- v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
- &pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
-
- fimc_adjust_mplane_format(fmt, pix->width, pix->height, &f->fmt.pix_mp);
- return 0;
-}
-
-static int fimc_m2m_try_fmt_mplane(struct file *file, void *fh,
- struct v4l2_format *f)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
-
- return fimc_try_fmt_mplane(ctx, f);
-}
-
-static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
- struct v4l2_format *f)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- struct fimc_dev *fimc = ctx->fimc_dev;
- struct vb2_queue *vq;
- struct fimc_frame *frame;
- struct v4l2_pix_format_mplane *pix;
- int i, ret = 0;
-
- ret = fimc_try_fmt_mplane(ctx, f);
- if (ret)
- return ret;
-
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
-
- if (vb2_is_busy(vq)) {
- v4l2_err(fimc->m2m.vfd, "queue (%d) busy\n", f->type);
- return -EBUSY;
- }
-
- if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- frame = &ctx->s_frame;
- else
- frame = &ctx->d_frame;
-
- pix = &f->fmt.pix_mp;
- frame->fmt = fimc_find_format(&pix->pixelformat, NULL,
- get_m2m_fmt_flags(f->type), 0);
- if (!frame->fmt)
- return -EINVAL;
-
- /* Update RGB Alpha control state and value range */
- fimc_alpha_ctrl_update(ctx);
-
- for (i = 0; i < frame->fmt->colplanes; i++) {
- frame->payload[i] =
- (pix->width * pix->height * frame->fmt->depth[i]) / 8;
- }
-
- fimc_fill_frame(frame, f);
-
- ctx->scaler.enabled = 1;
-
- if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- fimc_ctx_state_lock_set(FIMC_PARAMS | FIMC_DST_FMT, ctx);
- else
- fimc_ctx_state_lock_set(FIMC_PARAMS | FIMC_SRC_FMT, ctx);
-
- dbg("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
-
- return 0;
-}
-
-static int fimc_m2m_reqbufs(struct file *file, void *fh,
- struct v4l2_requestbuffers *reqbufs)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
-
- return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int fimc_m2m_querybuf(struct file *file, void *fh,
- struct v4l2_buffer *buf)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
-
- return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int fimc_m2m_qbuf(struct file *file, void *fh,
- struct v4l2_buffer *buf)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
-
- return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int fimc_m2m_dqbuf(struct file *file, void *fh,
- struct v4l2_buffer *buf)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
-
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int fimc_m2m_streamon(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
-
- /* The source and target color format need to be set */
- if (V4L2_TYPE_IS_OUTPUT(type)) {
- if (!fimc_ctx_state_is_set(FIMC_SRC_FMT, ctx))
- return -EINVAL;
- } else if (!fimc_ctx_state_is_set(FIMC_DST_FMT, ctx)) {
- return -EINVAL;
- }
-
- return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int fimc_m2m_streamoff(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
-
- return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
-static int fimc_m2m_cropcap(struct file *file, void *fh,
- struct v4l2_cropcap *cr)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- struct fimc_frame *frame;
-
- frame = ctx_get_frame(ctx, cr->type);
- if (IS_ERR(frame))
- return PTR_ERR(frame);
-
- cr->bounds.left = 0;
- cr->bounds.top = 0;
- cr->bounds.width = frame->o_width;
- cr->bounds.height = frame->o_height;
- cr->defrect = cr->bounds;
-
- return 0;
-}
-
-static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- struct fimc_frame *frame;
-
- frame = ctx_get_frame(ctx, cr->type);
- if (IS_ERR(frame))
- return PTR_ERR(frame);
-
- cr->c.left = frame->offs_h;
- cr->c.top = frame->offs_v;
- cr->c.width = frame->width;
- cr->c.height = frame->height;
-
- return 0;
-}
-
-static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
-{
- struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_frame *f;
- u32 min_size, halign, depth = 0;
- int i;
-
- if (cr->c.top < 0 || cr->c.left < 0) {
- v4l2_err(fimc->m2m.vfd,
- "doesn't support negative values for top & left\n");
- return -EINVAL;
- }
- if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- f = &ctx->d_frame;
- else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- f = &ctx->s_frame;
- else
- return -EINVAL;
-
- min_size = (f == &ctx->s_frame) ?
- fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
-
- /* Get pixel alignment constraints. */
- if (fimc->variant->min_vsize_align == 1)
- halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
- else
- halign = ffs(fimc->variant->min_vsize_align) - 1;
-
- for (i = 0; i < f->fmt->colplanes; i++)
- depth += f->fmt->depth[i];
-
- v4l_bound_align_image(&cr->c.width, min_size, f->o_width,
- ffs(min_size) - 1,
- &cr->c.height, min_size, f->o_height,
- halign, 64/(ALIGN(depth, 8)));
-
- /* adjust left/top if cropping rectangle is out of bounds */
- if (cr->c.left + cr->c.width > f->o_width)
- cr->c.left = f->o_width - cr->c.width;
- if (cr->c.top + cr->c.height > f->o_height)
- cr->c.top = f->o_height - cr->c.height;
-
- cr->c.left = round_down(cr->c.left, min_size);
- cr->c.top = round_down(cr->c.top, fimc->variant->hor_offs_align);
-
- dbg("l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
- cr->c.left, cr->c.top, cr->c.width, cr->c.height,
- f->f_width, f->f_height);
-
- return 0;
-}
-
-static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_frame *f;
- int ret;
-
- ret = fimc_m2m_try_crop(ctx, cr);
- if (ret)
- return ret;
-
- f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
- &ctx->s_frame : &ctx->d_frame;
-
- /* Check to see if scaling ratio is within supported range */
- if (fimc_ctx_state_is_set(FIMC_DST_FMT | FIMC_SRC_FMT, ctx)) {
- if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- ret = fimc_check_scaler_ratio(ctx, cr->c.width,
- cr->c.height, ctx->d_frame.width,
- ctx->d_frame.height, ctx->rotation);
- } else {
- ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
- ctx->s_frame.height, cr->c.width,
- cr->c.height, ctx->rotation);
- }
- if (ret) {
- v4l2_err(fimc->m2m.vfd, "Out of scaler range\n");
- return -EINVAL;
- }
- }
-
- f->offs_h = cr->c.left;
- f->offs_v = cr->c.top;
- f->width = cr->c.width;
- f->height = cr->c.height;
-
- fimc_ctx_state_lock_set(FIMC_PARAMS, ctx);
-
- return 0;
-}
-
-static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
- .vidioc_querycap = fimc_m2m_querycap,
-
- .vidioc_enum_fmt_vid_cap_mplane = fimc_m2m_enum_fmt_mplane,
- .vidioc_enum_fmt_vid_out_mplane = fimc_m2m_enum_fmt_mplane,
-
- .vidioc_g_fmt_vid_cap_mplane = fimc_m2m_g_fmt_mplane,
- .vidioc_g_fmt_vid_out_mplane = fimc_m2m_g_fmt_mplane,
-
- .vidioc_try_fmt_vid_cap_mplane = fimc_m2m_try_fmt_mplane,
- .vidioc_try_fmt_vid_out_mplane = fimc_m2m_try_fmt_mplane,
-
- .vidioc_s_fmt_vid_cap_mplane = fimc_m2m_s_fmt_mplane,
- .vidioc_s_fmt_vid_out_mplane = fimc_m2m_s_fmt_mplane,
-
- .vidioc_reqbufs = fimc_m2m_reqbufs,
- .vidioc_querybuf = fimc_m2m_querybuf,
-
- .vidioc_qbuf = fimc_m2m_qbuf,
- .vidioc_dqbuf = fimc_m2m_dqbuf,
-
- .vidioc_streamon = fimc_m2m_streamon,
- .vidioc_streamoff = fimc_m2m_streamoff,
-
- .vidioc_g_crop = fimc_m2m_g_crop,
- .vidioc_s_crop = fimc_m2m_s_crop,
- .vidioc_cropcap = fimc_m2m_cropcap
-
-};
-
-static int queue_init(void *priv, struct vb2_queue *src_vq,
- struct vb2_queue *dst_vq)
-{
- struct fimc_ctx *ctx = priv;
- int ret;
-
- memset(src_vq, 0, sizeof(*src_vq));
- src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
- src_vq->drv_priv = ctx;
- src_vq->ops = &fimc_qops;
- src_vq->mem_ops = &vb2_dma_contig_memops;
- src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
-
- ret = vb2_queue_init(src_vq);
- if (ret)
- return ret;
-
- memset(dst_vq, 0, sizeof(*dst_vq));
- dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
- dst_vq->drv_priv = ctx;
- dst_vq->ops = &fimc_qops;
- dst_vq->mem_ops = &vb2_dma_contig_memops;
- dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
-
- return vb2_queue_init(dst_vq);
-}
-
-static int fimc_m2m_open(struct file *file)
-{
- struct fimc_dev *fimc = video_drvdata(file);
- struct fimc_ctx *ctx;
- int ret;
-
- dbg("pid: %d, state: 0x%lx, refcnt: %d",
- task_pid_nr(current), fimc->state, fimc->vid_cap.refcnt);
-
- /*
- * Return if the corresponding video capture node
- * is already opened.
- */
- if (fimc->vid_cap.refcnt > 0)
- return -EBUSY;
-
- ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
- v4l2_fh_init(&ctx->fh, fimc->m2m.vfd);
- ctx->fimc_dev = fimc;
-
- /* Default color format */
- ctx->s_frame.fmt = &fimc_formats[0];
- ctx->d_frame.fmt = &fimc_formats[0];
-
- ret = fimc_ctrls_create(ctx);
- if (ret)
- goto error_fh;
-
- /* Use separate control handler per file handle */
- ctx->fh.ctrl_handler = &ctx->ctrl_handler;
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
-
- /* Setup the device context for memory-to-memory mode */
- ctx->state = FIMC_CTX_M2M;
- ctx->flags = 0;
- ctx->in_path = FIMC_DMA;
- ctx->out_path = FIMC_DMA;
- spin_lock_init(&ctx->slock);
-
- ctx->m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
- if (IS_ERR(ctx->m2m_ctx)) {
- ret = PTR_ERR(ctx->m2m_ctx);
- goto error_c;
- }
-
- if (fimc->m2m.refcnt++ == 0)
- set_bit(ST_M2M_RUN, &fimc->state);
- return 0;
-
-error_c:
- fimc_ctrls_delete(ctx);
-error_fh:
- v4l2_fh_del(&ctx->fh);
- v4l2_fh_exit(&ctx->fh);
- kfree(ctx);
- return ret;
-}
-
-static int fimc_m2m_release(struct file *file)
-{
- struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
- struct fimc_dev *fimc = ctx->fimc_dev;
-
- dbg("pid: %d, state: 0x%lx, refcnt= %d",
- task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
-
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
- fimc_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
- v4l2_fh_exit(&ctx->fh);
-
- if (--fimc->m2m.refcnt <= 0)
- clear_bit(ST_M2M_RUN, &fimc->state);
- kfree(ctx);
- return 0;
-}
-
-static unsigned int fimc_m2m_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
-
- return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
-}
-
-
-static int fimc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
-
- return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
-}
-
-static const struct v4l2_file_operations fimc_m2m_fops = {
- .owner = THIS_MODULE,
- .open = fimc_m2m_open,
- .release = fimc_m2m_release,
- .poll = fimc_m2m_poll,
- .unlocked_ioctl = video_ioctl2,
- .mmap = fimc_m2m_mmap,
-};
-
-static struct v4l2_m2m_ops m2m_ops = {
- .device_run = fimc_dma_run,
- .job_abort = fimc_job_abort,
-};
-
-int fimc_register_m2m_device(struct fimc_dev *fimc,
- struct v4l2_device *v4l2_dev)
-{
- struct video_device *vfd;
- struct platform_device *pdev;
- int ret = 0;
-
- if (!fimc)
- return -ENODEV;
-
- pdev = fimc->pdev;
- fimc->v4l2_dev = v4l2_dev;
-
- vfd = video_device_alloc();
- if (!vfd) {
- v4l2_err(v4l2_dev, "Failed to allocate video device\n");
- return -ENOMEM;
- }
-
- vfd->fops = &fimc_m2m_fops;
- vfd->ioctl_ops = &fimc_m2m_ioctl_ops;
- vfd->v4l2_dev = v4l2_dev;
- vfd->minor = -1;
- vfd->release = video_device_release;
- vfd->lock = &fimc->lock;
-
- snprintf(vfd->name, sizeof(vfd->name), "%s.m2m", dev_name(&pdev->dev));
- video_set_drvdata(vfd, fimc);
-
- fimc->m2m.vfd = vfd;
- fimc->m2m.m2m_dev = v4l2_m2m_init(&m2m_ops);
- if (IS_ERR(fimc->m2m.m2m_dev)) {
- v4l2_err(v4l2_dev, "failed to initialize v4l2-m2m device\n");
- ret = PTR_ERR(fimc->m2m.m2m_dev);
- goto err_init;
- }
-
- ret = media_entity_init(&vfd->entity, 0, NULL, 0);
- if (!ret)
- return 0;
-
- v4l2_m2m_release(fimc->m2m.m2m_dev);
-err_init:
- video_device_release(fimc->m2m.vfd);
- return ret;
-}
-
-void fimc_unregister_m2m_device(struct fimc_dev *fimc)
-{
- if (!fimc)
- return;
-
- if (fimc->m2m.m2m_dev)
- v4l2_m2m_release(fimc->m2m.m2m_dev);
- if (fimc->m2m.vfd) {
- media_entity_cleanup(&fimc->m2m.vfd->entity);
- /* Can also be called if video device wasn't registered */
- video_unregister_device(fimc->m2m.vfd);
- }
-}
-
static void fimc_clk_put(struct fimc_dev *fimc)
{
int i;
- for (i = 0; i < fimc->num_clocks; i++) {
+ for (i = 0; i < MAX_FIMC_CLOCKS; i++) {
if (IS_ERR_OR_NULL(fimc->clock[i]))
continue;
clk_unprepare(fimc->clock[i]);
@@ -1614,7 +807,7 @@ static int fimc_clk_get(struct fimc_dev *fimc)
{
int i, ret;
- for (i = 0; i < fimc->num_clocks; i++) {
+ for (i = 0; i < MAX_FIMC_CLOCKS; i++) {
fimc->clock[i] = clk_get(&fimc->pdev->dev, fimc_clocks[i]);
if (IS_ERR(fimc->clock[i]))
goto err;
@@ -1672,15 +865,12 @@ static int fimc_m2m_resume(struct fimc_dev *fimc)
static int fimc_probe(struct platform_device *pdev)
{
+ struct fimc_drvdata *drv_data = fimc_get_drvdata(pdev);
+ struct s5p_platform_fimc *pdata;
struct fimc_dev *fimc;
struct resource *res;
- struct samsung_fimc_driverdata *drv_data;
- struct s5p_platform_fimc *pdata;
int ret = 0;
- drv_data = (struct samsung_fimc_driverdata *)
- platform_get_device_id(pdev)->driver_data;
-
if (pdev->id >= drv_data->num_entities) {
dev_err(&pdev->dev, "Invalid platform device id: %d\n",
pdev->id);
@@ -1714,28 +904,29 @@ static int fimc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to get IRQ resource\n");
return -ENXIO;
}
- fimc->irq = res->start;
- fimc->num_clocks = MAX_FIMC_CLOCKS;
ret = fimc_clk_get(fimc);
if (ret)
return ret;
clk_set_rate(fimc->clock[CLK_BUS], drv_data->lclk_frequency);
clk_enable(fimc->clock[CLK_BUS]);
- platform_set_drvdata(pdev, fimc);
-
- ret = devm_request_irq(&pdev->dev, fimc->irq, fimc_irq_handler,
- 0, pdev->name, fimc);
+ ret = devm_request_irq(&pdev->dev, res->start, fimc_irq_handler,
+ 0, dev_name(&pdev->dev), fimc);
if (ret) {
dev_err(&pdev->dev, "failed to install irq (%d)\n", ret);
goto err_clk;
}
+ ret = fimc_initialize_capture_subdev(fimc);
+ if (ret)
+ goto err_clk;
+
+ platform_set_drvdata(pdev, fimc);
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
- goto err_clk;
+ goto err_sd;
/* Initialize contiguous memory allocator */
fimc->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
if (IS_ERR(fimc->alloc_ctx)) {
@@ -1747,9 +938,10 @@ static int fimc_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
return 0;
-
err_pm:
pm_runtime_put(&pdev->dev);
+err_sd:
+ fimc_unregister_capture_subdev(fimc);
err_clk:
fimc_clk_put(fimc);
return ret;
@@ -1834,6 +1026,7 @@ static int __devexit fimc_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
+ fimc_unregister_capture_subdev(fimc);
vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
clk_disable(fimc->clock[CLK_BUS]);
@@ -1879,7 +1072,7 @@ static struct fimc_pix_limit s5p_pix_limit[4] = {
},
};
-static struct samsung_fimc_variant fimc0_variant_s5p = {
+static struct fimc_variant fimc0_variant_s5p = {
.has_inp_rot = 1,
.has_out_rot = 1,
.has_cam_if = 1,
@@ -1891,17 +1084,17 @@ static struct samsung_fimc_variant fimc0_variant_s5p = {
.pix_limit = &s5p_pix_limit[0],
};
-static struct samsung_fimc_variant fimc2_variant_s5p = {
+static struct fimc_variant fimc2_variant_s5p = {
.has_cam_if = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
.min_vsize_align = 16,
.out_buf_count = 4,
- .pix_limit = &s5p_pix_limit[1],
+ .pix_limit = &s5p_pix_limit[1],
};
-static struct samsung_fimc_variant fimc0_variant_s5pv210 = {
+static struct fimc_variant fimc0_variant_s5pv210 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
@@ -1914,7 +1107,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv210 = {
.pix_limit = &s5p_pix_limit[1],
};
-static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
+static struct fimc_variant fimc1_variant_s5pv210 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
@@ -1928,7 +1121,7 @@ static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
.pix_limit = &s5p_pix_limit[2],
};
-static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
+static struct fimc_variant fimc2_variant_s5pv210 = {
.has_cam_if = 1,
.pix_hoff = 1,
.min_inp_pixsize = 16,
@@ -1939,7 +1132,7 @@ static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
.pix_limit = &s5p_pix_limit[2],
};
-static struct samsung_fimc_variant fimc0_variant_exynos4 = {
+static struct fimc_variant fimc0_variant_exynos4 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
@@ -1955,7 +1148,7 @@ static struct samsung_fimc_variant fimc0_variant_exynos4 = {
.pix_limit = &s5p_pix_limit[1],
};
-static struct samsung_fimc_variant fimc3_variant_exynos4 = {
+static struct fimc_variant fimc3_variant_exynos4 = {
.pix_hoff = 1,
.has_cam_if = 1,
.has_cistatus2 = 1,
@@ -1970,7 +1163,7 @@ static struct samsung_fimc_variant fimc3_variant_exynos4 = {
};
/* S5PC100 */
-static struct samsung_fimc_driverdata fimc_drvdata_s5p = {
+static struct fimc_drvdata fimc_drvdata_s5p = {
.variant = {
[0] = &fimc0_variant_s5p,
[1] = &fimc0_variant_s5p,
@@ -1981,7 +1174,7 @@ static struct samsung_fimc_driverdata fimc_drvdata_s5p = {
};
/* S5PV210, S5PC110 */
-static struct samsung_fimc_driverdata fimc_drvdata_s5pv210 = {
+static struct fimc_drvdata fimc_drvdata_s5pv210 = {
.variant = {
[0] = &fimc0_variant_s5pv210,
[1] = &fimc1_variant_s5pv210,
@@ -1991,8 +1184,8 @@ static struct samsung_fimc_driverdata fimc_drvdata_s5pv210 = {
.lclk_frequency = 166000000UL,
};
-/* S5PV310, S5PC210 */
-static struct samsung_fimc_driverdata fimc_drvdata_exynos4 = {
+/* EXYNOS4210, S5PV310, S5PC210 */
+static struct fimc_drvdata fimc_drvdata_exynos4 = {
.variant = {
[0] = &fimc0_variant_exynos4,
[1] = &fimc0_variant_exynos4,
@@ -2036,7 +1229,7 @@ static struct platform_driver fimc_driver = {
int __init fimc_register_driver(void)
{
- return platform_driver_probe(&fimc_driver, fimc_probe);
+ return platform_driver_register(&fimc_driver);
}
void __exit fimc_unregister_driver(void)
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index 84fd83550bd7..95b27ae5cf27 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/videodev2.h>
#include <linux/io.h>
+#include <asm/sizes.h>
#include <media/media-entity.h>
#include <media/videobuf2-core.h>
@@ -26,8 +27,6 @@
#include <media/v4l2-mediabus.h>
#include <media/s5p_fimc.h>
-#include "regs-fimc.h"
-
#define err(fmt, args...) \
printk(KERN_ERR "%s:%d: " fmt "\n", __func__, __LINE__, ##args)
@@ -78,26 +77,31 @@ enum fimc_dev_flags {
#define fimc_capture_busy(dev) test_bit(ST_CAPT_BUSY, &(dev)->state)
enum fimc_datapath {
- FIMC_CAMERA,
- FIMC_DMA,
- FIMC_LCDFIFO,
- FIMC_WRITEBACK
+ FIMC_IO_NONE,
+ FIMC_IO_CAMERA,
+ FIMC_IO_DMA,
+ FIMC_IO_LCDFIFO,
+ FIMC_IO_WRITEBACK,
+ FIMC_IO_ISP,
};
enum fimc_color_fmt {
- S5P_FIMC_RGB444 = 0x10,
- S5P_FIMC_RGB555,
- S5P_FIMC_RGB565,
- S5P_FIMC_RGB666,
- S5P_FIMC_RGB888,
- S5P_FIMC_RGB30_LOCAL,
- S5P_FIMC_YCBCR420 = 0x20,
- S5P_FIMC_YCBYCR422,
- S5P_FIMC_YCRYCB422,
- S5P_FIMC_CBYCRY422,
- S5P_FIMC_CRYCBY422,
- S5P_FIMC_YCBCR444_LOCAL,
- S5P_FIMC_JPEG = 0x40,
+ FIMC_FMT_RGB444 = 0x10,
+ FIMC_FMT_RGB555,
+ FIMC_FMT_RGB565,
+ FIMC_FMT_RGB666,
+ FIMC_FMT_RGB888,
+ FIMC_FMT_RGB30_LOCAL,
+ FIMC_FMT_YCBCR420 = 0x20,
+ FIMC_FMT_YCBYCR422,
+ FIMC_FMT_YCRYCB422,
+ FIMC_FMT_CBYCRY422,
+ FIMC_FMT_CRYCBY422,
+ FIMC_FMT_YCBCR444_LOCAL,
+ FIMC_FMT_JPEG = 0x40,
+ FIMC_FMT_RAW8 = 0x80,
+ FIMC_FMT_RAW10,
+ FIMC_FMT_RAW12,
};
#define fimc_fmt_is_rgb(x) (!!((x) & 0x10))
@@ -106,24 +110,11 @@ enum fimc_color_fmt {
#define IS_M2M(__strt) ((__strt) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE || \
__strt == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-/* Cb/Cr chrominance components order for 2 plane Y/CbCr 4:2:2 formats. */
-#define S5P_FIMC_LSB_CRCB S5P_CIOCTRL_ORDER422_2P_LSB_CRCB
-
-/* The embedded image effect selection */
-#define S5P_FIMC_EFFECT_ORIGINAL S5P_CIIMGEFF_FIN_BYPASS
-#define S5P_FIMC_EFFECT_ARBITRARY S5P_CIIMGEFF_FIN_ARBITRARY
-#define S5P_FIMC_EFFECT_NEGATIVE S5P_CIIMGEFF_FIN_NEGATIVE
-#define S5P_FIMC_EFFECT_ARTFREEZE S5P_CIIMGEFF_FIN_ARTFREEZE
-#define S5P_FIMC_EFFECT_EMBOSSING S5P_CIIMGEFF_FIN_EMBOSSING
-#define S5P_FIMC_EFFECT_SIKHOUETTE S5P_CIIMGEFF_FIN_SILHOUETTE
-
/* The hardware context state. */
#define FIMC_PARAMS (1 << 0)
-#define FIMC_SRC_ADDR (1 << 1)
-#define FIMC_DST_ADDR (1 << 2)
#define FIMC_SRC_FMT (1 << 3)
#define FIMC_DST_FMT (1 << 4)
-#define FIMC_DST_CROP (1 << 5)
+#define FIMC_COMPOSE (1 << 5)
#define FIMC_CTX_M2M (1 << 16)
#define FIMC_CTX_CAP (1 << 17)
#define FIMC_CTX_SHUT (1 << 18)
@@ -333,7 +324,7 @@ struct fimc_vid_cap {
struct fimc_ctx *ctx;
struct vb2_alloc_ctx *alloc_ctx;
struct video_device *vfd;
- struct v4l2_subdev *subdev;
+ struct v4l2_subdev subdev;
struct media_pad vd_pad;
struct v4l2_mbus_framefmt mf;
struct media_pad sd_pads[FIMC_SD_PADS_NUM];
@@ -370,8 +361,7 @@ struct fimc_pix_limit {
};
/**
- * struct samsung_fimc_variant - camera interface variant information
- *
+ * struct fimc_variant - FIMC device variant information
* @pix_hoff: indicate whether horizontal offset is in pixels or in bytes
* @has_inp_rot: set if has input rotator
* @has_out_rot: set if has output rotator
@@ -386,7 +376,7 @@ struct fimc_pix_limit {
* @min_vsize_align: minimum vertical pixel size alignment
* @out_buf_count: the number of buffers in output DMA sequence
*/
-struct samsung_fimc_variant {
+struct fimc_variant {
unsigned int pix_hoff:1;
unsigned int has_inp_rot:1;
unsigned int has_out_rot:1;
@@ -403,23 +393,19 @@ struct samsung_fimc_variant {
};
/**
- * struct samsung_fimc_driverdata - per device type driver data for init time.
- *
- * @variant: the variant information for this driver.
- * @dev_cnt: number of fimc sub-devices available in SoC
- * @lclk_frequency: fimc bus clock frequency
+ * struct fimc_drvdata - per device type driver data
+ * @variant: variant information for this device
+ * @num_entities: number of fimc instances available in a SoC
+ * @lclk_frequency: local bus clock frequency
*/
-struct samsung_fimc_driverdata {
- struct samsung_fimc_variant *variant[FIMC_MAX_DEVS];
- unsigned long lclk_frequency;
- int num_entities;
+struct fimc_drvdata {
+ struct fimc_variant *variant[FIMC_MAX_DEVS];
+ int num_entities;
+ unsigned long lclk_frequency;
};
-struct fimc_pipeline {
- struct media_pipeline *pipe;
- struct v4l2_subdev *sensor;
- struct v4l2_subdev *csis;
-};
+#define fimc_get_drvdata(_pdev) \
+ ((struct fimc_drvdata *) platform_get_device_id(_pdev)->driver_data)
struct fimc_ctx;
@@ -431,10 +417,8 @@ struct fimc_ctx;
* @pdata: pointer to the device platform data
* @variant: the IP variant information
* @id: FIMC device index (0..FIMC_MAX_DEVS)
- * @num_clocks: the number of clocks managed by this device instance
* @clock: clocks required for FIMC operation
* @regs: the mapped hardware registers
- * @irq: FIMC interrupt number
* @irq_queue: interrupt handler waitqueue
* @v4l2_dev: root v4l2_device
* @m2m: memory-to-memory V4L2 device information
@@ -448,12 +432,10 @@ struct fimc_dev {
struct mutex lock;
struct platform_device *pdev;
struct s5p_platform_fimc *pdata;
- struct samsung_fimc_variant *variant;
+ struct fimc_variant *variant;
u16 id;
- u16 num_clocks;
struct clk *clock[MAX_FIMC_CLOCKS];
void __iomem *regs;
- int irq;
wait_queue_head_t irq_queue;
struct v4l2_device *v4l2_dev;
struct fimc_m2m_device m2m;
@@ -464,8 +446,31 @@ struct fimc_dev {
};
/**
+ * struct fimc_ctrls - v4l2 controls structure
+ * @handler: the control handler
+ * @colorfx: image effect control
+ * @colorfx_cbcr: Cb/Cr coefficients control
+ * @rotate: image rotation control
+ * @hflip: horizontal flip control
+ * @vflip: vertical flip control
+ * @alpha: RGB alpha control
+ * @ready: true if @handler is initialized
+ */
+struct fimc_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct {
+ struct v4l2_ctrl *colorfx;
+ struct v4l2_ctrl *colorfx_cbcr;
+ };
+ struct v4l2_ctrl *rotate;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *alpha;
+ bool ready;
+};
+
+/**
* fimc_ctx - the device context data
- * @slock: spinlock protecting this data structure
* @s_frame: source frame properties
* @d_frame: destination frame properties
* @out_order_1p: output 1-plane YCBCR order
@@ -484,15 +489,9 @@ struct fimc_dev {
* @fimc_dev: the FIMC device this context applies to
* @m2m_ctx: memory-to-memory device context
* @fh: v4l2 file handle
- * @ctrl_handler: v4l2 controls handler
- * @ctrl_rotate image rotation control
- * @ctrl_hflip horizontal flip control
- * @ctrl_vflip vertical flip control
- * @ctrl_alpha RGB alpha control
- * @ctrls_rdy: true if the control handler is initialized
+ * @ctrls: v4l2 controls structure
*/
struct fimc_ctx {
- spinlock_t slock;
struct fimc_frame s_frame;
struct fimc_frame d_frame;
u32 out_order_1p;
@@ -511,12 +510,7 @@ struct fimc_ctx {
struct fimc_dev *fimc_dev;
struct v4l2_m2m_ctx *m2m_ctx;
struct v4l2_fh fh;
- struct v4l2_ctrl_handler ctrl_handler;
- struct v4l2_ctrl *ctrl_rotate;
- struct v4l2_ctrl *ctrl_hflip;
- struct v4l2_ctrl *ctrl_vflip;
- struct v4l2_ctrl *ctrl_alpha;
- bool ctrls_rdy;
+ struct fimc_ctrls ctrls;
};
#define fh_to_ctx(__fh) container_of(__fh, struct fimc_ctx, fh)
@@ -560,13 +554,13 @@ static inline bool fimc_capture_active(struct fimc_dev *fimc)
return ret;
}
-static inline void fimc_ctx_state_lock_set(u32 state, struct fimc_ctx *ctx)
+static inline void fimc_ctx_state_set(u32 state, struct fimc_ctx *ctx)
{
unsigned long flags;
- spin_lock_irqsave(&ctx->slock, flags);
+ spin_lock_irqsave(&ctx->fimc_dev->slock, flags);
ctx->state |= state;
- spin_unlock_irqrestore(&ctx->slock, flags);
+ spin_unlock_irqrestore(&ctx->fimc_dev->slock, flags);
}
static inline bool fimc_ctx_state_is_set(u32 mask, struct fimc_ctx *ctx)
@@ -574,9 +568,9 @@ static inline bool fimc_ctx_state_is_set(u32 mask, struct fimc_ctx *ctx)
unsigned long flags;
bool ret;
- spin_lock_irqsave(&ctx->slock, flags);
+ spin_lock_irqsave(&ctx->fimc_dev->slock, flags);
ret = (ctx->state & mask) == mask;
- spin_unlock_irqrestore(&ctx->slock, flags);
+ spin_unlock_irqrestore(&ctx->fimc_dev->slock, flags);
return ret;
}
@@ -589,61 +583,13 @@ static inline int tiled_fmt(struct fimc_fmt *fmt)
static inline int fimc_get_alpha_mask(struct fimc_fmt *fmt)
{
switch (fmt->color) {
- case S5P_FIMC_RGB444: return 0x0f;
- case S5P_FIMC_RGB555: return 0x01;
- case S5P_FIMC_RGB888: return 0xff;
+ case FIMC_FMT_RGB444: return 0x0f;
+ case FIMC_FMT_RGB555: return 0x01;
+ case FIMC_FMT_RGB888: return 0xff;
default: return 0;
};
}
-static inline void fimc_hw_clear_irq(struct fimc_dev *dev)
-{
- u32 cfg = readl(dev->regs + S5P_CIGCTRL);
- cfg |= S5P_CIGCTRL_IRQ_CLR;
- writel(cfg, dev->regs + S5P_CIGCTRL);
-}
-
-static inline void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on)
-{
- u32 cfg = readl(dev->regs + S5P_CISCCTRL);
- if (on)
- cfg |= S5P_CISCCTRL_SCALERSTART;
- else
- cfg &= ~S5P_CISCCTRL_SCALERSTART;
- writel(cfg, dev->regs + S5P_CISCCTRL);
-}
-
-static inline void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on)
-{
- u32 cfg = readl(dev->regs + S5P_MSCTRL);
- if (on)
- cfg |= S5P_MSCTRL_ENVID;
- else
- cfg &= ~S5P_MSCTRL_ENVID;
- writel(cfg, dev->regs + S5P_MSCTRL);
-}
-
-static inline void fimc_hw_dis_capture(struct fimc_dev *dev)
-{
- u32 cfg = readl(dev->regs + S5P_CIIMGCPT);
- cfg &= ~(S5P_CIIMGCPT_IMGCPTEN | S5P_CIIMGCPT_IMGCPTEN_SC);
- writel(cfg, dev->regs + S5P_CIIMGCPT);
-}
-
-/**
- * fimc_hw_set_dma_seq - configure output DMA buffer sequence
- * @mask: each bit corresponds to one of 32 output buffer registers set
- * 1 to include buffer in the sequence, 0 to disable
- *
- * This function mask output DMA ring buffers, i.e. it allows to configure
- * which of the output buffer address registers will be used by the DMA
- * engine.
- */
-static inline void fimc_hw_set_dma_seq(struct fimc_dev *dev, u32 mask)
-{
- writel(mask, dev->regs + S5P_CIFCNTSEQ);
-}
-
static inline struct fimc_frame *ctx_get_frame(struct fimc_ctx *ctx,
enum v4l2_buf_type type)
{
@@ -665,48 +611,6 @@ static inline struct fimc_frame *ctx_get_frame(struct fimc_ctx *ctx,
return frame;
}
-/* Return an index to the buffer actually being written. */
-static inline u32 fimc_hw_get_frame_index(struct fimc_dev *dev)
-{
- u32 reg;
-
- if (dev->variant->has_cistatus2) {
- reg = readl(dev->regs + S5P_CISTATUS2) & 0x3F;
- return reg > 0 ? --reg : reg;
- } else {
- reg = readl(dev->regs + S5P_CISTATUS);
- return (reg & S5P_CISTATUS_FRAMECNT_MASK) >>
- S5P_CISTATUS_FRAMECNT_SHIFT;
- }
-}
-
-/* -----------------------------------------------------*/
-/* fimc-reg.c */
-void fimc_hw_reset(struct fimc_dev *fimc);
-void fimc_hw_set_rotation(struct fimc_ctx *ctx);
-void fimc_hw_set_target_format(struct fimc_ctx *ctx);
-void fimc_hw_set_out_dma(struct fimc_ctx *ctx);
-void fimc_hw_en_lastirq(struct fimc_dev *fimc, int enable);
-void fimc_hw_en_irq(struct fimc_dev *fimc, int enable);
-void fimc_hw_set_prescaler(struct fimc_ctx *ctx);
-void fimc_hw_set_mainscaler(struct fimc_ctx *ctx);
-void fimc_hw_en_capture(struct fimc_ctx *ctx);
-void fimc_hw_set_effect(struct fimc_ctx *ctx, bool active);
-void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx);
-void fimc_hw_set_in_dma(struct fimc_ctx *ctx);
-void fimc_hw_set_input_path(struct fimc_ctx *ctx);
-void fimc_hw_set_output_path(struct fimc_ctx *ctx);
-void fimc_hw_set_input_addr(struct fimc_dev *fimc, struct fimc_addr *paddr);
-void fimc_hw_set_output_addr(struct fimc_dev *fimc, struct fimc_addr *paddr,
- int index);
-int fimc_hw_set_camera_source(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam);
-int fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f);
-int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam);
-int fimc_hw_set_camera_type(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam);
-
/* -----------------------------------------------------*/
/* fimc-core.c */
int fimc_vidioc_enum_fmt_mplane(struct file *file, void *priv,
@@ -720,6 +624,7 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
struct v4l2_pix_format_mplane *pix);
struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
unsigned int mask, int index);
+struct fimc_fmt *fimc_get_format(unsigned int index);
int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
int dw, int dh, int rotation);
@@ -730,7 +635,7 @@ int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f);
void fimc_set_yuv_order(struct fimc_ctx *ctx);
void fimc_fill_frame(struct fimc_frame *frame, struct v4l2_format *f);
-void fimc_capture_irq_handler(struct fimc_dev *fimc, bool done);
+void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf);
int fimc_register_m2m_device(struct fimc_dev *fimc,
struct v4l2_device *v4l2_dev);
@@ -739,33 +644,18 @@ int fimc_register_driver(void);
void fimc_unregister_driver(void);
/* -----------------------------------------------------*/
+/* fimc-m2m.c */
+void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state);
+
+/* -----------------------------------------------------*/
/* fimc-capture.c */
-int fimc_register_capture_device(struct fimc_dev *fimc,
- struct v4l2_device *v4l2_dev);
-void fimc_unregister_capture_device(struct fimc_dev *fimc);
+int fimc_initialize_capture_subdev(struct fimc_dev *fimc);
+void fimc_unregister_capture_subdev(struct fimc_dev *fimc);
int fimc_capture_ctrls_create(struct fimc_dev *fimc);
-int fimc_vid_cap_buf_queue(struct fimc_dev *fimc,
- struct fimc_vid_buffer *fimc_vb);
void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
void *arg);
int fimc_capture_suspend(struct fimc_dev *fimc);
int fimc_capture_resume(struct fimc_dev *fimc);
-int fimc_capture_config_update(struct fimc_ctx *ctx);
-
-/* Locking: the caller holds fimc->slock */
-static inline void fimc_activate_capture(struct fimc_ctx *ctx)
-{
- fimc_hw_enable_scaler(ctx->fimc_dev, ctx->scaler.enabled);
- fimc_hw_en_capture(ctx);
-}
-
-static inline void fimc_deactivate_capture(struct fimc_dev *fimc)
-{
- fimc_hw_en_lastirq(fimc, true);
- fimc_hw_dis_capture(fimc);
- fimc_hw_enable_scaler(fimc, false);
- fimc_hw_en_lastirq(fimc, false);
-}
/*
* Buffer list manipulation functions. Must be called with fimc.slock held.
diff --git a/drivers/media/video/s5p-fimc/fimc-lite-reg.c b/drivers/media/video/s5p-fimc/fimc-lite-reg.c
new file mode 100644
index 000000000000..419adfb7cdf9
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-lite-reg.c
@@ -0,0 +1,300 @@
+/*
+ * Register interface file for EXYNOS FIMC-LITE (camera interface) driver
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <media/s5p_fimc.h>
+
+#include "fimc-lite-reg.h"
+#include "fimc-lite.h"
+#include "fimc-core.h"
+
+#define FLITE_RESET_TIMEOUT 50 /* in ms */
+
+void flite_hw_reset(struct fimc_lite *dev)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(FLITE_RESET_TIMEOUT);
+ u32 cfg;
+
+ cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ cfg |= FLITE_REG_CIGCTRL_SWRST_REQ;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+ while (time_is_after_jiffies(end)) {
+ cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ if (cfg & FLITE_REG_CIGCTRL_SWRST_RDY)
+ break;
+ usleep_range(1000, 5000);
+ }
+
+ cfg |= FLITE_REG_CIGCTRL_SWRST;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+}
+
+void flite_hw_clear_pending_irq(struct fimc_lite *dev)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CISTATUS);
+ cfg &= ~FLITE_REG_CISTATUS_IRQ_CAM;
+ writel(cfg, dev->regs + FLITE_REG_CISTATUS);
+}
+
+u32 flite_hw_get_interrupt_source(struct fimc_lite *dev)
+{
+ u32 intsrc = readl(dev->regs + FLITE_REG_CISTATUS);
+ return intsrc & FLITE_REG_CISTATUS_IRQ_MASK;
+}
+
+void flite_hw_clear_last_capture_end(struct fimc_lite *dev)
+{
+
+ u32 cfg = readl(dev->regs + FLITE_REG_CISTATUS2);
+ cfg &= ~FLITE_REG_CISTATUS2_LASTCAPEND;
+ writel(cfg, dev->regs + FLITE_REG_CISTATUS2);
+}
+
+void flite_hw_set_interrupt_mask(struct fimc_lite *dev)
+{
+ u32 cfg, intsrc;
+
+ /* Select interrupts to be enabled for each output mode */
+ if (dev->out_path == FIMC_IO_DMA) {
+ intsrc = FLITE_REG_CIGCTRL_IRQ_OVFEN |
+ FLITE_REG_CIGCTRL_IRQ_LASTEN |
+ FLITE_REG_CIGCTRL_IRQ_STARTEN;
+ } else {
+ /* An output to the FIMC-IS */
+ intsrc = FLITE_REG_CIGCTRL_IRQ_OVFEN |
+ FLITE_REG_CIGCTRL_IRQ_LASTEN;
+ }
+
+ cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ cfg |= FLITE_REG_CIGCTRL_IRQ_DISABLE_MASK;
+ cfg &= ~intsrc;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+}
+
+void flite_hw_capture_start(struct fimc_lite *dev)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIIMGCPT);
+ cfg |= FLITE_REG_CIIMGCPT_IMGCPTEN;
+ writel(cfg, dev->regs + FLITE_REG_CIIMGCPT);
+}
+
+void flite_hw_capture_stop(struct fimc_lite *dev)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIIMGCPT);
+ cfg &= ~FLITE_REG_CIIMGCPT_IMGCPTEN;
+ writel(cfg, dev->regs + FLITE_REG_CIIMGCPT);
+}
+
+/*
+ * Test pattern (color bars) enable/disable. External sensor
+ * pixel clock must be active for the test pattern to work.
+ */
+void flite_hw_set_test_pattern(struct fimc_lite *dev, bool on)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ if (on)
+ cfg |= FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR;
+ else
+ cfg &= ~FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+}
+
+static const u32 src_pixfmt_map[8][3] = {
+ { V4L2_MBUS_FMT_YUYV8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCBYCR,
+ FLITE_REG_CIGCTRL_YUV422_1P },
+ { V4L2_MBUS_FMT_YVYU8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCRYCB,
+ FLITE_REG_CIGCTRL_YUV422_1P },
+ { V4L2_MBUS_FMT_UYVY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CBYCRY,
+ FLITE_REG_CIGCTRL_YUV422_1P },
+ { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CRYCBY,
+ FLITE_REG_CIGCTRL_YUV422_1P },
+ { V4L2_PIX_FMT_SGRBG8, 0, FLITE_REG_CIGCTRL_RAW8 },
+ { V4L2_PIX_FMT_SGRBG10, 0, FLITE_REG_CIGCTRL_RAW10 },
+ { V4L2_PIX_FMT_SGRBG12, 0, FLITE_REG_CIGCTRL_RAW12 },
+ { V4L2_MBUS_FMT_JPEG_1X8, 0, FLITE_REG_CIGCTRL_USER(1) },
+};
+
+/* Set camera input pixel format and resolution */
+void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
+{
+ enum v4l2_mbus_pixelcode pixelcode = dev->fmt->mbus_code;
+ unsigned int i = ARRAY_SIZE(src_pixfmt_map);
+ u32 cfg;
+
+ while (i-- >= 0) {
+ if (src_pixfmt_map[i][0] == pixelcode)
+ break;
+ }
+
+ if (i == 0 && src_pixfmt_map[i][0] != pixelcode) {
+ v4l2_err(dev->vfd,
+ "Unsupported pixel code, falling back to %#08x\n",
+ src_pixfmt_map[i][0]);
+ }
+
+ cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ cfg &= ~FLITE_REG_CIGCTRL_FMT_MASK;
+ cfg |= src_pixfmt_map[i][2];
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+ cfg = readl(dev->regs + FLITE_REG_CISRCSIZE);
+ cfg &= ~(FLITE_REG_CISRCSIZE_ORDER422_MASK |
+ FLITE_REG_CISRCSIZE_SIZE_CAM_MASK);
+ cfg |= (f->f_width << 16) | f->f_height;
+ cfg |= src_pixfmt_map[i][1];
+ writel(cfg, dev->regs + FLITE_REG_CISRCSIZE);
+}
+
+/* Set the camera host input window offsets (cropping) */
+void flite_hw_set_window_offset(struct fimc_lite *dev, struct flite_frame *f)
+{
+ u32 hoff2, voff2;
+ u32 cfg;
+
+ cfg = readl(dev->regs + FLITE_REG_CIWDOFST);
+ cfg &= ~FLITE_REG_CIWDOFST_OFST_MASK;
+ cfg |= (f->rect.left << 16) | f->rect.top;
+ cfg |= FLITE_REG_CIWDOFST_WINOFSEN;
+ writel(cfg, dev->regs + FLITE_REG_CIWDOFST);
+
+ hoff2 = f->f_width - f->rect.width - f->rect.left;
+ voff2 = f->f_height - f->rect.height - f->rect.top;
+
+ cfg = (hoff2 << 16) | voff2;
+ writel(cfg, dev->regs + FLITE_REG_CIWDOFST2);
+}
+
+/* Select camera port (A, B) */
+static void flite_hw_set_camera_port(struct fimc_lite *dev, int id)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIGENERAL);
+ if (id == 0)
+ cfg &= ~FLITE_REG_CIGENERAL_CAM_B;
+ else
+ cfg |= FLITE_REG_CIGENERAL_CAM_B;
+ writel(cfg, dev->regs + FLITE_REG_CIGENERAL);
+}
+
+/* Select serial or parallel bus, camera port (A,B) and set signals polarity */
+void flite_hw_set_camera_bus(struct fimc_lite *dev,
+ struct s5p_fimc_isp_info *s_info)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ unsigned int flags = s_info->flags;
+
+ if (s_info->bus_type != FIMC_MIPI_CSI2) {
+ cfg &= ~(FLITE_REG_CIGCTRL_SELCAM_MIPI |
+ FLITE_REG_CIGCTRL_INVPOLPCLK |
+ FLITE_REG_CIGCTRL_INVPOLVSYNC |
+ FLITE_REG_CIGCTRL_INVPOLHREF);
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ cfg |= FLITE_REG_CIGCTRL_INVPOLPCLK;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ cfg |= FLITE_REG_CIGCTRL_INVPOLVSYNC;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ cfg |= FLITE_REG_CIGCTRL_INVPOLHREF;
+ } else {
+ cfg |= FLITE_REG_CIGCTRL_SELCAM_MIPI;
+ }
+
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+ flite_hw_set_camera_port(dev, s_info->mux_id);
+}
+
+void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
+{
+ static const u32 pixcode[4][2] = {
+ { V4L2_MBUS_FMT_YUYV8_2X8, FLITE_REG_CIODMAFMT_YCBYCR },
+ { V4L2_MBUS_FMT_YVYU8_2X8, FLITE_REG_CIODMAFMT_YCRYCB },
+ { V4L2_MBUS_FMT_UYVY8_2X8, FLITE_REG_CIODMAFMT_CBYCRY },
+ { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY },
+ };
+ u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
+ unsigned int i = ARRAY_SIZE(pixcode);
+
+ while (i-- >= 0)
+ if (pixcode[i][0] == dev->fmt->mbus_code)
+ break;
+ cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK;
+ writel(cfg | pixcode[i][1], dev->regs + FLITE_REG_CIODMAFMT);
+}
+
+void flite_hw_set_dma_window(struct fimc_lite *dev, struct flite_frame *f)
+{
+ u32 cfg;
+
+ /* Maximum output pixel size */
+ cfg = readl(dev->regs + FLITE_REG_CIOCAN);
+ cfg &= ~FLITE_REG_CIOCAN_MASK;
+ cfg = (f->f_height << 16) | f->f_width;
+ writel(cfg, dev->regs + FLITE_REG_CIOCAN);
+
+ /* DMA offsets */
+ cfg = readl(dev->regs + FLITE_REG_CIOOFF);
+ cfg &= ~FLITE_REG_CIOOFF_MASK;
+ cfg |= (f->rect.top << 16) | f->rect.left;
+ writel(cfg, dev->regs + FLITE_REG_CIOOFF);
+}
+
+/* Enable/disable output DMA, set output pixel size and offsets (composition) */
+void flite_hw_set_output_dma(struct fimc_lite *dev, struct flite_frame *f,
+ bool enable)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+
+ if (!enable) {
+ cfg |= FLITE_REG_CIGCTRL_ODMA_DISABLE;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+ return;
+ }
+
+ cfg &= ~FLITE_REG_CIGCTRL_ODMA_DISABLE;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+ flite_hw_set_out_order(dev, f);
+ flite_hw_set_dma_window(dev, f);
+}
+
+void flite_hw_dump_regs(struct fimc_lite *dev, const char *label)
+{
+ struct {
+ u32 offset;
+ const char * const name;
+ } registers[] = {
+ { 0x00, "CISRCSIZE" },
+ { 0x04, "CIGCTRL" },
+ { 0x08, "CIIMGCPT" },
+ { 0x0c, "CICPTSEQ" },
+ { 0x10, "CIWDOFST" },
+ { 0x14, "CIWDOFST2" },
+ { 0x18, "CIODMAFMT" },
+ { 0x20, "CIOCAN" },
+ { 0x24, "CIOOFF" },
+ { 0x30, "CIOSA" },
+ { 0x40, "CISTATUS" },
+ { 0x44, "CISTATUS2" },
+ { 0xf0, "CITHOLD" },
+ { 0xfc, "CIGENERAL" },
+ };
+ u32 i;
+
+ pr_info("--- %s ---\n", label);
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ u32 cfg = readl(dev->regs + registers[i].offset);
+ pr_info("%s: %s:\t0x%08x\n", __func__, registers[i].name, cfg);
+ }
+}
diff --git a/drivers/media/video/s5p-fimc/fimc-lite-reg.h b/drivers/media/video/s5p-fimc/fimc-lite-reg.h
new file mode 100644
index 000000000000..adb9e9e6f3c2
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-lite-reg.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_LITE_REG_H_
+#define FIMC_LITE_REG_H_
+
+#include "fimc-lite.h"
+
+/* Camera Source size */
+#define FLITE_REG_CISRCSIZE 0x00
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_YCBYCR (0 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_YCRYCB (1 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_CBYCRY (2 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_CRYCBY (3 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_MASK (0x3 << 14)
+#define FLITE_REG_CISRCSIZE_SIZE_CAM_MASK (0x3fff << 16 | 0x3fff)
+
+/* Global control */
+#define FLITE_REG_CIGCTRL 0x04
+#define FLITE_REG_CIGCTRL_YUV422_1P (0x1e << 24)
+#define FLITE_REG_CIGCTRL_RAW8 (0x2a << 24)
+#define FLITE_REG_CIGCTRL_RAW10 (0x2b << 24)
+#define FLITE_REG_CIGCTRL_RAW12 (0x2c << 24)
+#define FLITE_REG_CIGCTRL_RAW14 (0x2d << 24)
+/* User defined formats. x = 0...15 */
+#define FLITE_REG_CIGCTRL_USER(x) ((0x30 + x - 1) << 24)
+#define FLITE_REG_CIGCTRL_FMT_MASK (0x3f << 24)
+#define FLITE_REG_CIGCTRL_SHADOWMASK_DISABLE (1 << 21)
+#define FLITE_REG_CIGCTRL_ODMA_DISABLE (1 << 20)
+#define FLITE_REG_CIGCTRL_SWRST_REQ (1 << 19)
+#define FLITE_REG_CIGCTRL_SWRST_RDY (1 << 18)
+#define FLITE_REG_CIGCTRL_SWRST (1 << 17)
+#define FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR (1 << 15)
+#define FLITE_REG_CIGCTRL_INVPOLPCLK (1 << 14)
+#define FLITE_REG_CIGCTRL_INVPOLVSYNC (1 << 13)
+#define FLITE_REG_CIGCTRL_INVPOLHREF (1 << 12)
+/* Interrupts mask bits (1 disables an interrupt) */
+#define FLITE_REG_CIGCTRL_IRQ_LASTEN (1 << 8)
+#define FLITE_REG_CIGCTRL_IRQ_ENDEN (1 << 7)
+#define FLITE_REG_CIGCTRL_IRQ_STARTEN (1 << 6)
+#define FLITE_REG_CIGCTRL_IRQ_OVFEN (1 << 5)
+#define FLITE_REG_CIGCTRL_IRQ_DISABLE_MASK (0xf << 5)
+#define FLITE_REG_CIGCTRL_SELCAM_MIPI (1 << 3)
+
+/* Image Capture Enable */
+#define FLITE_REG_CIIMGCPT 0x08
+#define FLITE_REG_CIIMGCPT_IMGCPTEN (1 << 31)
+#define FLITE_REG_CIIMGCPT_CPT_FREN (1 << 25)
+#define FLITE_REG_CIIMGCPT_CPT_MOD_FRCNT (1 << 18)
+#define FLITE_REG_CIIMGCPT_CPT_MOD_FREN (0 << 18)
+
+/* Capture Sequence */
+#define FLITE_REG_CICPTSEQ 0x0c
+
+/* Camera Window Offset */
+#define FLITE_REG_CIWDOFST 0x10
+#define FLITE_REG_CIWDOFST_WINOFSEN (1 << 31)
+#define FLITE_REG_CIWDOFST_CLROVIY (1 << 31)
+#define FLITE_REG_CIWDOFST_CLROVFICB (1 << 15)
+#define FLITE_REG_CIWDOFST_CLROVFICR (1 << 14)
+#define FLITE_REG_CIWDOFST_OFST_MASK ((0x1fff << 16) | 0x1fff)
+
+/* Camera Window Offset2 */
+#define FLITE_REG_CIWDOFST2 0x14
+
+/* Camera Output DMA Format */
+#define FLITE_REG_CIODMAFMT 0x18
+#define FLITE_REG_CIODMAFMT_RAW_CON (1 << 15)
+#define FLITE_REG_CIODMAFMT_PACK12 (1 << 14)
+#define FLITE_REG_CIODMAFMT_CRYCBY (0 << 4)
+#define FLITE_REG_CIODMAFMT_CBYCRY (1 << 4)
+#define FLITE_REG_CIODMAFMT_YCRYCB (2 << 4)
+#define FLITE_REG_CIODMAFMT_YCBYCR (3 << 4)
+#define FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK (0x3 << 4)
+
+/* Camera Output Canvas */
+#define FLITE_REG_CIOCAN 0x20
+#define FLITE_REG_CIOCAN_MASK ((0x3fff << 16) | 0x3fff)
+
+/* Camera Output DMA Offset */
+#define FLITE_REG_CIOOFF 0x24
+#define FLITE_REG_CIOOFF_MASK ((0x3fff << 16) | 0x3fff)
+
+/* Camera Output DMA Start Address */
+#define FLITE_REG_CIOSA 0x30
+
+/* Camera Status */
+#define FLITE_REG_CISTATUS 0x40
+#define FLITE_REG_CISTATUS_MIPI_VVALID (1 << 22)
+#define FLITE_REG_CISTATUS_MIPI_HVALID (1 << 21)
+#define FLITE_REG_CISTATUS_MIPI_DVALID (1 << 20)
+#define FLITE_REG_CISTATUS_ITU_VSYNC (1 << 14)
+#define FLITE_REG_CISTATUS_ITU_HREFF (1 << 13)
+#define FLITE_REG_CISTATUS_OVFIY (1 << 10)
+#define FLITE_REG_CISTATUS_OVFICB (1 << 9)
+#define FLITE_REG_CISTATUS_OVFICR (1 << 8)
+#define FLITE_REG_CISTATUS_IRQ_SRC_OVERFLOW (1 << 7)
+#define FLITE_REG_CISTATUS_IRQ_SRC_LASTCAPEND (1 << 6)
+#define FLITE_REG_CISTATUS_IRQ_SRC_FRMSTART (1 << 5)
+#define FLITE_REG_CISTATUS_IRQ_SRC_FRMEND (1 << 4)
+#define FLITE_REG_CISTATUS_IRQ_CAM (1 << 0)
+#define FLITE_REG_CISTATUS_IRQ_MASK (0xf << 4)
+
+/* Camera Status2 */
+#define FLITE_REG_CISTATUS2 0x44
+#define FLITE_REG_CISTATUS2_LASTCAPEND (1 << 1)
+#define FLITE_REG_CISTATUS2_FRMEND (1 << 0)
+
+/* Qos Threshold */
+#define FLITE_REG_CITHOLD 0xf0
+#define FLITE_REG_CITHOLD_W_QOS_EN (1 << 30)
+
+/* Camera General Purpose */
+#define FLITE_REG_CIGENERAL 0xfc
+/* b0: 1 - camera B, 0 - camera A */
+#define FLITE_REG_CIGENERAL_CAM_B (1 << 0)
+
+/* ----------------------------------------------------------------------------
+ * Function declarations
+ */
+void flite_hw_reset(struct fimc_lite *dev);
+void flite_hw_clear_pending_irq(struct fimc_lite *dev);
+u32 flite_hw_get_interrupt_source(struct fimc_lite *dev);
+void flite_hw_clear_last_capture_end(struct fimc_lite *dev);
+void flite_hw_set_interrupt_mask(struct fimc_lite *dev);
+void flite_hw_capture_start(struct fimc_lite *dev);
+void flite_hw_capture_stop(struct fimc_lite *dev);
+void flite_hw_set_camera_bus(struct fimc_lite *dev,
+ struct s5p_fimc_isp_info *s_info);
+void flite_hw_set_camera_polarity(struct fimc_lite *dev,
+ struct s5p_fimc_isp_info *cam);
+void flite_hw_set_window_offset(struct fimc_lite *dev, struct flite_frame *f);
+void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f);
+
+void flite_hw_set_output_dma(struct fimc_lite *dev, struct flite_frame *f,
+ bool enable);
+void flite_hw_set_dma_window(struct fimc_lite *dev, struct flite_frame *f);
+void flite_hw_set_test_pattern(struct fimc_lite *dev, bool on);
+void flite_hw_dump_regs(struct fimc_lite *dev, const char *label);
+
+static inline void flite_hw_set_output_addr(struct fimc_lite *dev, u32 paddr)
+{
+ writel(paddr, dev->regs + FLITE_REG_CIOSA);
+}
+#endif /* FIMC_LITE_REG_H */
diff --git a/drivers/media/video/s5p-fimc/fimc-lite.c b/drivers/media/video/s5p-fimc/fimc-lite.c
new file mode 100644
index 000000000000..400d701aef04
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-lite.c
@@ -0,0 +1,1576 @@
+/*
+ * Samsung EXYNOS FIMC-LITE (camera host interface) driver
+*
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "fimc-mdevice.h"
+#include "fimc-core.h"
+#include "fimc-lite-reg.h"
+
+static int debug;
+module_param(debug, int, 0644);
+
+static const struct fimc_fmt fimc_lite_formats[] = {
+ {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCBYCR422,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ }, {
+ .name = "YUV 4:2:2 packed, CbYCrY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = { 16 },
+ .color = FIMC_FMT_CBYCRY422,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
+ }, {
+ .name = "YUV 4:2:2 packed, CrYCbY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .depth = { 16 },
+ .color = FIMC_FMT_CRYCBY422,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_VYUY8_2X8,
+ }, {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCRYCB422,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_YVYU8_2X8,
+ }, {
+ .name = "RAW8 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .depth = { 8 },
+ .color = FIMC_FMT_RAW8,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_SGRBG8_1X8,
+ }, {
+ .name = "RAW10 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .depth = { 10 },
+ .color = FIMC_FMT_RAW10,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ }, {
+ .name = "RAW12 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .depth = { 12 },
+ .color = FIMC_FMT_RAW12,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_SGRBG12_1X12,
+ },
+};
+
+/**
+ * fimc_lite_find_format - lookup fimc color format by fourcc or media bus code
+ * @pixelformat: fourcc to match, ignored if null
+ * @mbus_code: media bus code to match, ignored if null
+ * @index: index to the fimc_lite_formats array, ignored if negative
+ */
+static const struct fimc_fmt *fimc_lite_find_format(const u32 *pixelformat,
+ const u32 *mbus_code, int index)
+{
+ const struct fimc_fmt *fmt, *def_fmt = NULL;
+ unsigned int i;
+ int id = 0;
+
+ if (index >= (int)ARRAY_SIZE(fimc_lite_formats))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(fimc_lite_formats); ++i) {
+ fmt = &fimc_lite_formats[i];
+ if (pixelformat && fmt->fourcc == *pixelformat)
+ return fmt;
+ if (mbus_code && fmt->mbus_code == *mbus_code)
+ return fmt;
+ if (index == id)
+ def_fmt = fmt;
+ id++;
+ }
+ return def_fmt;
+}
+
+static int fimc_lite_hw_init(struct fimc_lite *fimc)
+{
+ struct fimc_pipeline *pipeline = &fimc->pipeline;
+ struct fimc_sensor_info *sensor;
+ unsigned long flags;
+
+ if (pipeline->subdevs[IDX_SENSOR] == NULL)
+ return -ENXIO;
+
+ if (fimc->fmt == NULL)
+ return -EINVAL;
+
+ sensor = v4l2_get_subdev_hostdata(pipeline->subdevs[IDX_SENSOR]);
+ spin_lock_irqsave(&fimc->slock, flags);
+
+ flite_hw_set_camera_bus(fimc, sensor->pdata);
+ flite_hw_set_source_format(fimc, &fimc->inp_frame);
+ flite_hw_set_window_offset(fimc, &fimc->inp_frame);
+ flite_hw_set_output_dma(fimc, &fimc->out_frame, true);
+ flite_hw_set_interrupt_mask(fimc);
+ flite_hw_set_test_pattern(fimc, fimc->test_pattern->val);
+
+ if (debug > 0)
+ flite_hw_dump_regs(fimc, __func__);
+
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return 0;
+}
+
+/*
+ * Reinitialize the driver so it is ready to start the streaming again.
+ * Set fimc->state to indicate stream off and the hardware shut down state.
+ * If not suspending (@suspend is false), return any buffers to videobuf2.
+ * Otherwise put any owned buffers onto the pending buffers queue, so they
+ * can be re-spun when the device is being resumed. Also perform FIMC
+ * software reset and disable streaming on the whole pipeline if required.
+ */
+static int fimc_lite_reinit(struct fimc_lite *fimc, bool suspend)
+{
+ struct flite_buffer *buf;
+ unsigned long flags;
+ bool streaming;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ streaming = fimc->state & (1 << ST_SENSOR_STREAM);
+
+ fimc->state &= ~(1 << ST_FLITE_RUN | 1 << ST_FLITE_OFF |
+ 1 << ST_FLITE_STREAM | 1 << ST_SENSOR_STREAM);
+ if (suspend)
+ fimc->state |= (1 << ST_FLITE_SUSPENDED);
+ else
+ fimc->state &= ~(1 << ST_FLITE_PENDING |
+ 1 << ST_FLITE_SUSPENDED);
+
+ /* Release unused buffers */
+ while (!suspend && !list_empty(&fimc->pending_buf_q)) {
+ buf = fimc_lite_pending_queue_pop(fimc);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ /* If suspending put unused buffers onto pending queue */
+ while (!list_empty(&fimc->active_buf_q)) {
+ buf = fimc_lite_active_queue_pop(fimc);
+ if (suspend)
+ fimc_lite_pending_queue_add(fimc, buf);
+ else
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ flite_hw_reset(fimc);
+
+ if (!streaming)
+ return 0;
+
+ return fimc_pipeline_s_stream(&fimc->pipeline, 0);
+}
+
+static int fimc_lite_stop_capture(struct fimc_lite *fimc, bool suspend)
+{
+ unsigned long flags;
+
+ if (!fimc_lite_active(fimc))
+ return 0;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ set_bit(ST_FLITE_OFF, &fimc->state);
+ flite_hw_capture_stop(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ wait_event_timeout(fimc->irq_queue,
+ !test_bit(ST_FLITE_OFF, &fimc->state),
+ (2*HZ/10)); /* 200 ms */
+
+ return fimc_lite_reinit(fimc, suspend);
+}
+
+/* Must be called with fimc.slock spinlock held. */
+static void fimc_lite_config_update(struct fimc_lite *fimc)
+{
+ flite_hw_set_window_offset(fimc, &fimc->inp_frame);
+ flite_hw_set_dma_window(fimc, &fimc->out_frame);
+ flite_hw_set_test_pattern(fimc, fimc->test_pattern->val);
+ clear_bit(ST_FLITE_CONFIG, &fimc->state);
+}
+
+static irqreturn_t flite_irq_handler(int irq, void *priv)
+{
+ struct fimc_lite *fimc = priv;
+ struct flite_buffer *vbuf;
+ unsigned long flags;
+ struct timeval *tv;
+ struct timespec ts;
+ u32 intsrc;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+
+ intsrc = flite_hw_get_interrupt_source(fimc);
+ flite_hw_clear_pending_irq(fimc);
+
+ if (test_and_clear_bit(ST_FLITE_OFF, &fimc->state)) {
+ wake_up(&fimc->irq_queue);
+ goto done;
+ }
+
+ if (intsrc & FLITE_REG_CISTATUS_IRQ_SRC_OVERFLOW) {
+ clear_bit(ST_FLITE_RUN, &fimc->state);
+ fimc->events.data_overflow++;
+ }
+
+ if (intsrc & FLITE_REG_CISTATUS_IRQ_SRC_LASTCAPEND) {
+ flite_hw_clear_last_capture_end(fimc);
+ clear_bit(ST_FLITE_STREAM, &fimc->state);
+ wake_up(&fimc->irq_queue);
+ }
+
+ if (fimc->out_path != FIMC_IO_DMA)
+ goto done;
+
+ if ((intsrc & FLITE_REG_CISTATUS_IRQ_SRC_FRMSTART) &&
+ test_bit(ST_FLITE_RUN, &fimc->state) &&
+ !list_empty(&fimc->active_buf_q) &&
+ !list_empty(&fimc->pending_buf_q)) {
+ vbuf = fimc_lite_active_queue_pop(fimc);
+ ktime_get_ts(&ts);
+ tv = &vbuf->vb.v4l2_buf.timestamp;
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ vbuf->vb.v4l2_buf.sequence = fimc->frame_count++;
+ vb2_buffer_done(&vbuf->vb, VB2_BUF_STATE_DONE);
+
+ vbuf = fimc_lite_pending_queue_pop(fimc);
+ flite_hw_set_output_addr(fimc, vbuf->paddr);
+ fimc_lite_active_queue_add(fimc, vbuf);
+ }
+
+ if (test_bit(ST_FLITE_CONFIG, &fimc->state))
+ fimc_lite_config_update(fimc);
+
+ if (list_empty(&fimc->pending_buf_q)) {
+ flite_hw_capture_stop(fimc);
+ clear_bit(ST_FLITE_STREAM, &fimc->state);
+ }
+done:
+ set_bit(ST_FLITE_RUN, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return IRQ_HANDLED;
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fimc_lite *fimc = q->drv_priv;
+ int ret;
+
+ fimc->frame_count = 0;
+
+ ret = fimc_lite_hw_init(fimc);
+ if (ret) {
+ fimc_lite_reinit(fimc, false);
+ return ret;
+ }
+
+ set_bit(ST_FLITE_PENDING, &fimc->state);
+
+ if (!list_empty(&fimc->active_buf_q) &&
+ !test_and_set_bit(ST_FLITE_STREAM, &fimc->state)) {
+ flite_hw_capture_start(fimc);
+
+ if (!test_and_set_bit(ST_SENSOR_STREAM, &fimc->state))
+ fimc_pipeline_s_stream(&fimc->pipeline, 1);
+ }
+ if (debug > 0)
+ flite_hw_dump_regs(fimc, __func__);
+
+ return 0;
+}
+
+static int stop_streaming(struct vb2_queue *q)
+{
+ struct fimc_lite *fimc = q->drv_priv;
+
+ if (!fimc_lite_active(fimc))
+ return -EINVAL;
+
+ return fimc_lite_stop_capture(fimc, false);
+}
+
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *allocators[])
+{
+ const struct v4l2_pix_format_mplane *pixm = NULL;
+ struct fimc_lite *fimc = vq->drv_priv;
+ struct flite_frame *frame = &fimc->out_frame;
+ const struct fimc_fmt *fmt = fimc->fmt;
+ unsigned long wh;
+ int i;
+
+ if (pfmt) {
+ pixm = &pfmt->fmt.pix_mp;
+ fmt = fimc_lite_find_format(&pixm->pixelformat, NULL, -1);
+ wh = pixm->width * pixm->height;
+ } else {
+ wh = frame->f_width * frame->f_height;
+ }
+
+ if (fmt == NULL)
+ return -EINVAL;
+
+ *num_planes = fmt->memplanes;
+
+ for (i = 0; i < fmt->memplanes; i++) {
+ unsigned int size = (wh * fmt->depth[i]) / 8;
+ if (pixm)
+ sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
+ else
+ sizes[i] = size;
+ allocators[i] = fimc->alloc_ctx;
+ }
+
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct fimc_lite *fimc = vq->drv_priv;
+ int i;
+
+ if (fimc->fmt == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < fimc->fmt->memplanes; i++) {
+ unsigned long size = fimc->payload[i];
+
+ if (vb2_plane_size(vb, i) < size) {
+ v4l2_err(fimc->vfd,
+ "User buffer too small (%ld < %ld)\n",
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct flite_buffer *buf
+ = container_of(vb, struct flite_buffer, vb);
+ struct fimc_lite *fimc = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ buf->paddr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (!test_bit(ST_FLITE_SUSPENDED, &fimc->state) &&
+ !test_bit(ST_FLITE_STREAM, &fimc->state) &&
+ list_empty(&fimc->active_buf_q)) {
+ flite_hw_set_output_addr(fimc, buf->paddr);
+ fimc_lite_active_queue_add(fimc, buf);
+ } else {
+ fimc_lite_pending_queue_add(fimc, buf);
+ }
+
+ if (vb2_is_streaming(&fimc->vb_queue) &&
+ !list_empty(&fimc->pending_buf_q) &&
+ !test_and_set_bit(ST_FLITE_STREAM, &fimc->state)) {
+ flite_hw_capture_start(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (!test_and_set_bit(ST_SENSOR_STREAM, &fimc->state))
+ fimc_pipeline_s_stream(&fimc->pipeline, 1);
+ return;
+ }
+ spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static void fimc_lock(struct vb2_queue *vq)
+{
+ struct fimc_lite *fimc = vb2_get_drv_priv(vq);
+ mutex_lock(&fimc->lock);
+}
+
+static void fimc_unlock(struct vb2_queue *vq)
+{
+ struct fimc_lite *fimc = vb2_get_drv_priv(vq);
+ mutex_unlock(&fimc->lock);
+}
+
+static const struct vb2_ops fimc_lite_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .wait_prepare = fimc_unlock,
+ .wait_finish = fimc_lock,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
+
+static void fimc_lite_clear_event_counters(struct fimc_lite *fimc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ memset(&fimc->events, 0, sizeof(fimc->events));
+ spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static int fimc_lite_open(struct file *file)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ int ret = v4l2_fh_open(file);
+
+ if (ret)
+ return ret;
+
+ set_bit(ST_FLITE_IN_USE, &fimc->state);
+ pm_runtime_get_sync(&fimc->pdev->dev);
+
+ if (++fimc->ref_count != 1 || fimc->out_path != FIMC_IO_DMA)
+ return ret;
+
+ ret = fimc_pipeline_initialize(&fimc->pipeline, &fimc->vfd->entity,
+ true);
+ if (ret < 0) {
+ v4l2_err(fimc->vfd, "Video pipeline initialization failed\n");
+ pm_runtime_put_sync(&fimc->pdev->dev);
+ fimc->ref_count--;
+ v4l2_fh_release(file);
+ clear_bit(ST_FLITE_IN_USE, &fimc->state);
+ }
+
+ fimc_lite_clear_event_counters(fimc);
+ return ret;
+}
+
+static int fimc_lite_close(struct file *file)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+
+ if (--fimc->ref_count == 0 && fimc->out_path == FIMC_IO_DMA) {
+ clear_bit(ST_FLITE_IN_USE, &fimc->state);
+ fimc_lite_stop_capture(fimc, false);
+ fimc_pipeline_shutdown(&fimc->pipeline);
+ clear_bit(ST_FLITE_SUSPENDED, &fimc->state);
+ }
+
+ pm_runtime_put(&fimc->pdev->dev);
+
+ if (fimc->ref_count == 0)
+ vb2_queue_release(&fimc->vb_queue);
+
+ return v4l2_fh_release(file);
+}
+
+static unsigned int fimc_lite_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ return vb2_poll(&fimc->vb_queue, file, wait);
+}
+
+static int fimc_lite_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ return vb2_mmap(&fimc->vb_queue, vma);
+}
+
+static const struct v4l2_file_operations fimc_lite_fops = {
+ .owner = THIS_MODULE,
+ .open = fimc_lite_open,
+ .release = fimc_lite_close,
+ .poll = fimc_lite_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = fimc_lite_mmap,
+};
+
+/*
+ * Format and crop negotiation helpers
+ */
+
+static const struct fimc_fmt *fimc_lite_try_format(struct fimc_lite *fimc,
+ u32 *width, u32 *height,
+ u32 *code, u32 *fourcc, int pad)
+{
+ struct flite_variant *variant = fimc->variant;
+ const struct fimc_fmt *fmt;
+
+ fmt = fimc_lite_find_format(fourcc, code, 0);
+ if (WARN_ON(!fmt))
+ return NULL;
+
+ if (code)
+ *code = fmt->mbus_code;
+ if (fourcc)
+ *fourcc = fmt->fourcc;
+
+ if (pad == FLITE_SD_PAD_SINK) {
+ v4l_bound_align_image(width, 8, variant->max_width,
+ ffs(variant->out_width_align) - 1,
+ height, 0, variant->max_height, 0, 0);
+ } else {
+ v4l_bound_align_image(width, 8, fimc->inp_frame.rect.width,
+ ffs(variant->out_width_align) - 1,
+ height, 0, fimc->inp_frame.rect.height,
+ 0, 0);
+ }
+
+ v4l2_dbg(1, debug, &fimc->subdev, "code: 0x%x, %dx%d\n",
+ code ? *code : 0, *width, *height);
+
+ return fmt;
+}
+
+static void fimc_lite_try_crop(struct fimc_lite *fimc, struct v4l2_rect *r)
+{
+ struct flite_frame *frame = &fimc->inp_frame;
+
+ v4l_bound_align_image(&r->width, 0, frame->f_width, 0,
+ &r->height, 0, frame->f_height, 0, 0);
+
+ /* Adjust left/top if cropping rectangle got out of bounds */
+ r->left = clamp_t(u32, r->left, 0, frame->f_width - r->width);
+ r->left = round_down(r->left, fimc->variant->win_hor_offs_align);
+ r->top = clamp_t(u32, r->top, 0, frame->f_height - r->height);
+
+ v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, sink fmt: %dx%d",
+ r->left, r->top, r->width, r->height,
+ frame->f_width, frame->f_height);
+}
+
+static void fimc_lite_try_compose(struct fimc_lite *fimc, struct v4l2_rect *r)
+{
+ struct flite_frame *frame = &fimc->out_frame;
+ struct v4l2_rect *crop_rect = &fimc->inp_frame.rect;
+
+ /* Scaling is not supported so we enforce compose rectangle size
+ same as size of the sink crop rectangle. */
+ r->width = crop_rect->width;
+ r->height = crop_rect->height;
+
+ /* Adjust left/top if the composing rectangle got out of bounds */
+ r->left = clamp_t(u32, r->left, 0, frame->f_width - r->width);
+ r->left = round_down(r->left, fimc->variant->out_hor_offs_align);
+ r->top = clamp_t(u32, r->top, 0, fimc->out_frame.f_height - r->height);
+
+ v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, source fmt: %dx%d",
+ r->left, r->top, r->width, r->height,
+ frame->f_width, frame->f_height);
+}
+
+/*
+ * Video node ioctl operations
+ */
+static int fimc_vidioc_querycap_capture(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, FIMC_LITE_DRV_NAME, sizeof(cap->driver));
+ cap->bus_info[0] = 0;
+ cap->card[0] = 0;
+ cap->capabilities = V4L2_CAP_STREAMING;
+ return 0;
+}
+
+static int fimc_lite_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ const struct fimc_fmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(fimc_lite_formats))
+ return -EINVAL;
+
+ fmt = &fimc_lite_formats[f->index];
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int fimc_lite_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt = &pixm->plane_fmt[0];
+ struct flite_frame *frame = &fimc->out_frame;
+ const struct fimc_fmt *fmt = fimc->fmt;
+
+ plane_fmt->bytesperline = (frame->f_width * fmt->depth[0]) / 8;
+ plane_fmt->sizeimage = plane_fmt->bytesperline * frame->f_height;
+
+ pixm->num_planes = fmt->memplanes;
+ pixm->pixelformat = fmt->fourcc;
+ pixm->width = frame->f_width;
+ pixm->height = frame->f_height;
+ pixm->field = V4L2_FIELD_NONE;
+ pixm->colorspace = V4L2_COLORSPACE_JPEG;
+ return 0;
+}
+
+static int fimc_lite_try_fmt(struct fimc_lite *fimc,
+ struct v4l2_pix_format_mplane *pixm,
+ const struct fimc_fmt **ffmt)
+{
+ struct flite_variant *variant = fimc->variant;
+ u32 bpl = pixm->plane_fmt[0].bytesperline;
+ const struct fimc_fmt *fmt;
+
+ fmt = fimc_lite_find_format(&pixm->pixelformat, NULL, 0);
+ if (WARN_ON(fmt == NULL))
+ return -EINVAL;
+ if (ffmt)
+ *ffmt = fmt;
+ v4l_bound_align_image(&pixm->width, 8, variant->max_width,
+ ffs(variant->out_width_align) - 1,
+ &pixm->height, 0, variant->max_height, 0, 0);
+
+ if ((bpl == 0 || ((bpl * 8) / fmt->depth[0]) < pixm->width))
+ pixm->plane_fmt[0].bytesperline = (pixm->width *
+ fmt->depth[0]) / 8;
+
+ if (pixm->plane_fmt[0].sizeimage == 0)
+ pixm->plane_fmt[0].sizeimage = (pixm->width * pixm->height *
+ fmt->depth[0]) / 8;
+ pixm->num_planes = fmt->memplanes;
+ pixm->pixelformat = fmt->fourcc;
+ pixm->colorspace = V4L2_COLORSPACE_JPEG;
+ pixm->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+static int fimc_lite_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+
+ return fimc_lite_try_fmt(fimc, &f->fmt.pix_mp, NULL);
+}
+
+static int fimc_lite_s_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct flite_frame *frame = &fimc->out_frame;
+ const struct fimc_fmt *fmt = NULL;
+ int ret;
+
+ if (vb2_is_busy(&fimc->vb_queue))
+ return -EBUSY;
+
+ ret = fimc_lite_try_fmt(fimc, &f->fmt.pix_mp, &fmt);
+ if (ret < 0)
+ return ret;
+
+ fimc->fmt = fmt;
+ fimc->payload[0] = max((pixm->width * pixm->height * fmt->depth[0]) / 8,
+ pixm->plane_fmt[0].sizeimage);
+ frame->f_width = pixm->width;
+ frame->f_height = pixm->height;
+
+ return 0;
+}
+
+static int fimc_pipeline_validate(struct fimc_lite *fimc)
+{
+ struct v4l2_subdev *sd = &fimc->subdev;
+ struct v4l2_subdev_format sink_fmt, src_fmt;
+ struct media_pad *pad;
+ int ret;
+
+ while (1) {
+ /* Retrieve format at the sink pad */
+ pad = &sd->entity.pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+ /* Don't call FIMC subdev operation to avoid nested locking */
+ if (sd == &fimc->subdev) {
+ struct flite_frame *ff = &fimc->out_frame;
+ sink_fmt.format.width = ff->f_width;
+ sink_fmt.format.height = ff->f_height;
+ sink_fmt.format.code = fimc->fmt->mbus_code;
+ } else {
+ sink_fmt.pad = pad->index;
+ sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL,
+ &sink_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+ }
+ /* Retrieve format at the source pad */
+ pad = media_entity_remote_source(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ src_fmt.pad = pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ if (src_fmt.format.width != sink_fmt.format.width ||
+ src_fmt.format.height != sink_fmt.format.height ||
+ src_fmt.format.code != sink_fmt.format.code)
+ return -EPIPE;
+ }
+ return 0;
+}
+
+static int fimc_lite_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct v4l2_subdev *sensor = fimc->pipeline.subdevs[IDX_SENSOR];
+ struct fimc_pipeline *p = &fimc->pipeline;
+ int ret;
+
+ if (fimc_lite_active(fimc))
+ return -EBUSY;
+
+ media_entity_pipeline_start(&sensor->entity, p->m_pipeline);
+
+ ret = fimc_pipeline_validate(fimc);
+ if (ret) {
+ media_entity_pipeline_stop(&sensor->entity);
+ return ret;
+ }
+
+ return vb2_streamon(&fimc->vb_queue, type);
+}
+
+static int fimc_lite_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct v4l2_subdev *sd = fimc->pipeline.subdevs[IDX_SENSOR];
+ int ret;
+
+ ret = vb2_streamoff(&fimc->vb_queue, type);
+ if (ret == 0)
+ media_entity_pipeline_stop(&sd->entity);
+ return ret;
+}
+
+static int fimc_lite_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ int ret;
+
+ reqbufs->count = max_t(u32, FLITE_REQ_BUFS_MIN, reqbufs->count);
+ ret = vb2_reqbufs(&fimc->vb_queue, reqbufs);
+ if (!ret < 0)
+ fimc->reqbufs_count = reqbufs->count;
+
+ return ret;
+}
+
+static int fimc_lite_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+
+ return vb2_querybuf(&fimc->vb_queue, buf);
+}
+
+static int fimc_lite_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+
+ return vb2_qbuf(&fimc->vb_queue, buf);
+}
+
+static int fimc_lite_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+
+ return vb2_dqbuf(&fimc->vb_queue, buf, file->f_flags & O_NONBLOCK);
+}
+
+static int fimc_lite_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+
+ return vb2_create_bufs(&fimc->vb_queue, create);
+}
+
+static int fimc_lite_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *b)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+
+ return vb2_prepare_buf(&fimc->vb_queue, b);
+}
+
+/* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
+static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ if (a->left < b->left || a->top < b->top)
+ return 0;
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+
+ return 1;
+}
+
+static int fimc_lite_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct flite_frame *f = &fimc->out_frame;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = f->f_width;
+ sel->r.height = f->f_height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE_ACTIVE:
+ sel->r = f->rect;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int fimc_lite_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct flite_frame *f = &fimc->out_frame;
+ struct v4l2_rect rect = sel->r;
+ unsigned long flags;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
+ sel->target != V4L2_SEL_TGT_COMPOSE_ACTIVE)
+ return -EINVAL;
+
+ fimc_lite_try_compose(fimc, &rect);
+
+ if ((sel->flags & V4L2_SEL_FLAG_LE) &&
+ !enclosed_rectangle(&rect, &sel->r))
+ return -ERANGE;
+
+ if ((sel->flags & V4L2_SEL_FLAG_GE) &&
+ !enclosed_rectangle(&sel->r, &rect))
+ return -ERANGE;
+
+ sel->r = rect;
+ spin_lock_irqsave(&fimc->slock, flags);
+ f->rect = rect;
+ set_bit(ST_FLITE_CONFIG, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops fimc_lite_ioctl_ops = {
+ .vidioc_querycap = fimc_vidioc_querycap_capture,
+ .vidioc_enum_fmt_vid_cap_mplane = fimc_lite_enum_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = fimc_lite_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = fimc_lite_s_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = fimc_lite_g_fmt_mplane,
+ .vidioc_g_selection = fimc_lite_g_selection,
+ .vidioc_s_selection = fimc_lite_s_selection,
+ .vidioc_reqbufs = fimc_lite_reqbufs,
+ .vidioc_querybuf = fimc_lite_querybuf,
+ .vidioc_prepare_buf = fimc_lite_prepare_buf,
+ .vidioc_create_bufs = fimc_lite_create_bufs,
+ .vidioc_qbuf = fimc_lite_qbuf,
+ .vidioc_dqbuf = fimc_lite_dqbuf,
+ .vidioc_streamon = fimc_lite_streamon,
+ .vidioc_streamoff = fimc_lite_streamoff,
+};
+
+/* Capture subdev media entity operations */
+static int fimc_lite_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ unsigned int remote_ent_type = media_entity_type(remote->entity);
+
+ if (WARN_ON(fimc == NULL))
+ return 0;
+
+ v4l2_dbg(1, debug, sd, "%s: %s --> %s, flags: 0x%x. source_id: 0x%x",
+ __func__, local->entity->name, remote->entity->name,
+ flags, fimc->source_subdev_grp_id);
+
+ switch (local->index) {
+ case FIMC_SD_PAD_SINK:
+ if (remote_ent_type != MEDIA_ENT_T_V4L2_SUBDEV)
+ return -EINVAL;
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (fimc->source_subdev_grp_id != 0)
+ return -EBUSY;
+ fimc->source_subdev_grp_id = sd->grp_id;
+ return 0;
+ }
+
+ fimc->source_subdev_grp_id = 0;
+ break;
+
+ case FIMC_SD_PAD_SOURCE:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ fimc->out_path = FIMC_IO_NONE;
+ return 0;
+ }
+ if (remote_ent_type == MEDIA_ENT_T_V4L2_SUBDEV)
+ fimc->out_path = FIMC_IO_ISP;
+ else
+ fimc->out_path = FIMC_IO_DMA;
+ break;
+
+ default:
+ v4l2_err(sd, "Invalid pad index\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct media_entity_operations fimc_lite_subdev_media_ops = {
+ .link_setup = fimc_lite_link_setup,
+};
+
+static int fimc_lite_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct fimc_fmt *fmt;
+
+ fmt = fimc_lite_find_format(NULL, NULL, code->index);
+ if (!fmt)
+ return -EINVAL;
+ code->code = fmt->mbus_code;
+ return 0;
+}
+
+static int fimc_lite_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct flite_frame *f = &fimc->out_frame;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ fmt->format = *mf;
+ return 0;
+ }
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+
+ mutex_lock(&fimc->lock);
+ mf->code = fimc->fmt->mbus_code;
+
+ if (fmt->pad == FLITE_SD_PAD_SINK) {
+ /* full camera input frame size */
+ mf->width = f->f_width;
+ mf->height = f->f_height;
+ } else {
+ /* crop size */
+ mf->width = f->rect.width;
+ mf->height = f->rect.height;
+ }
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct flite_frame *sink = &fimc->inp_frame;
+ const struct fimc_fmt *ffmt;
+
+ v4l2_dbg(1, debug, sd, "pad%d: code: 0x%x, %dx%d",
+ fmt->pad, mf->code, mf->width, mf->height);
+
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mutex_lock(&fimc->lock);
+
+ if ((fimc->out_path == FIMC_IO_ISP && sd->entity.stream_count > 0) ||
+ (fimc->out_path == FIMC_IO_DMA && vb2_is_busy(&fimc->vb_queue))) {
+ mutex_unlock(&fimc->lock);
+ return -EBUSY;
+ }
+
+ ffmt = fimc_lite_try_format(fimc, &mf->width, &mf->height,
+ &mf->code, NULL, fmt->pad);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ *mf = fmt->format;
+ mutex_unlock(&fimc->lock);
+ return 0;
+ }
+
+ if (fmt->pad == FLITE_SD_PAD_SINK) {
+ sink->f_width = mf->width;
+ sink->f_height = mf->height;
+ fimc->fmt = ffmt;
+ /* Set sink crop rectangle */
+ sink->rect.width = mf->width;
+ sink->rect.height = mf->height;
+ sink->rect.left = 0;
+ sink->rect.top = 0;
+ /* Reset source crop rectangle */
+ fimc->out_frame.rect = sink->rect;
+ } else {
+ /* Allow changing format only on sink pad */
+ mf->code = fimc->fmt->mbus_code;
+ mf->width = sink->rect.width;
+ mf->height = sink->rect.height;
+ }
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct flite_frame *f = &fimc->inp_frame;
+
+ if ((sel->target != V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL &&
+ sel->target != V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS) ||
+ sel->pad != FLITE_SD_PAD_SINK)
+ return -EINVAL;
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ sel->r = *v4l2_subdev_get_try_crop(fh, sel->pad);
+ return 0;
+ }
+
+ mutex_lock(&fimc->lock);
+ if (sel->target == V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL) {
+ sel->r = f->rect;
+ } else {
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = f->f_width;
+ sel->r.height = f->f_height;
+ }
+ mutex_unlock(&fimc->lock);
+
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d",
+ __func__, f->rect.left, f->rect.top, f->rect.width,
+ f->rect.height, f->f_width, f->f_height);
+
+ return 0;
+}
+
+static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct flite_frame *f = &fimc->inp_frame;
+ int ret = 0;
+
+ if (sel->target != V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL ||
+ sel->pad != FLITE_SD_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&fimc->lock);
+ fimc_lite_try_crop(fimc, &sel->r);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_crop(fh, sel->pad) = sel->r;
+ } else {
+ unsigned long flags;
+ spin_lock_irqsave(&fimc->slock, flags);
+ f->rect = sel->r;
+ /* Same crop rectangle on the source pad */
+ fimc->out_frame.rect = sel->r;
+ set_bit(ST_FLITE_CONFIG, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ }
+ mutex_unlock(&fimc->lock);
+
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d",
+ __func__, f->rect.left, f->rect.top, f->rect.width,
+ f->rect.height, f->f_width, f->f_height);
+
+ return ret;
+}
+
+static int fimc_lite_subdev_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+
+ if (fimc->out_path == FIMC_IO_DMA)
+ return -ENOIOCTLCMD;
+
+ /* TODO: */
+
+ return 0;
+}
+
+static int fimc_lite_subdev_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+
+ if (fimc->out_path == FIMC_IO_DMA)
+ return -ENOIOCTLCMD;
+
+ /* TODO: */
+
+ return 0;
+}
+
+static int fimc_lite_log_status(struct v4l2_subdev *sd)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+
+ flite_hw_dump_regs(fimc, __func__);
+ return 0;
+}
+
+static int fimc_lite_subdev_registered(struct v4l2_subdev *sd)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct vb2_queue *q = &fimc->vb_queue;
+ struct video_device *vfd;
+ int ret;
+
+ fimc->fmt = &fimc_lite_formats[0];
+ fimc->out_path = FIMC_IO_DMA;
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(sd->v4l2_dev, "Failed to allocate video device\n");
+ return -ENOMEM;
+ }
+
+ snprintf(vfd->name, sizeof(vfd->name), "fimc-lite.%d.capture",
+ fimc->index);
+
+ vfd->fops = &fimc_lite_fops;
+ vfd->ioctl_ops = &fimc_lite_ioctl_ops;
+ vfd->v4l2_dev = sd->v4l2_dev;
+ vfd->minor = -1;
+ vfd->release = video_device_release;
+ vfd->lock = &fimc->lock;
+ fimc->vfd = vfd;
+ fimc->ref_count = 0;
+ fimc->reqbufs_count = 0;
+
+ INIT_LIST_HEAD(&fimc->pending_buf_q);
+ INIT_LIST_HEAD(&fimc->active_buf_q);
+
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = &fimc_lite_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct flite_buffer);
+ q->drv_priv = fimc;
+
+ vb2_queue_init(q);
+
+ fimc->vd_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&vfd->entity, 1, &fimc->vd_pad, 0);
+ if (ret)
+ goto err;
+
+ video_set_drvdata(vfd, fimc);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_vd;
+
+ v4l2_info(sd->v4l2_dev, "Registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+ return 0;
+
+ err_vd:
+ media_entity_cleanup(&vfd->entity);
+ err:
+ video_device_release(vfd);
+ return ret;
+}
+
+static void fimc_lite_subdev_unregistered(struct v4l2_subdev *sd)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+
+ if (fimc == NULL)
+ return;
+
+ if (fimc->vfd) {
+ video_unregister_device(fimc->vfd);
+ media_entity_cleanup(&fimc->vfd->entity);
+ fimc->vfd = NULL;
+ }
+}
+
+static const struct v4l2_subdev_internal_ops fimc_lite_subdev_internal_ops = {
+ .registered = fimc_lite_subdev_registered,
+ .unregistered = fimc_lite_subdev_unregistered,
+};
+
+static const struct v4l2_subdev_pad_ops fimc_lite_subdev_pad_ops = {
+ .enum_mbus_code = fimc_lite_subdev_enum_mbus_code,
+ .get_selection = fimc_lite_subdev_get_selection,
+ .set_selection = fimc_lite_subdev_set_selection,
+ .get_fmt = fimc_lite_subdev_get_fmt,
+ .set_fmt = fimc_lite_subdev_set_fmt,
+};
+
+static const struct v4l2_subdev_video_ops fimc_lite_subdev_video_ops = {
+ .s_stream = fimc_lite_subdev_s_stream,
+};
+
+static const struct v4l2_subdev_core_ops fimc_lite_core_ops = {
+ .s_power = fimc_lite_subdev_s_power,
+ .log_status = fimc_lite_log_status,
+};
+
+static struct v4l2_subdev_ops fimc_lite_subdev_ops = {
+ .core = &fimc_lite_core_ops,
+ .video = &fimc_lite_subdev_video_ops,
+ .pad = &fimc_lite_subdev_pad_ops,
+};
+
+static int fimc_lite_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fimc_lite *fimc = container_of(ctrl->handler, struct fimc_lite,
+ ctrl_handler);
+ set_bit(ST_FLITE_CONFIG, &fimc->state);
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops fimc_lite_ctrl_ops = {
+ .s_ctrl = fimc_lite_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config fimc_lite_ctrl = {
+ .ops = &fimc_lite_ctrl_ops,
+ .id = V4L2_CTRL_CLASS_USER | 0x1001,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Test Pattern 640x480",
+};
+
+static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc)
+{
+ struct v4l2_ctrl_handler *handler = &fimc->ctrl_handler;
+ struct v4l2_subdev *sd = &fimc->subdev;
+ int ret;
+
+ v4l2_subdev_init(sd, &fimc_lite_subdev_ops);
+ sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, sizeof(sd->name), "FIMC-LITE.%d", fimc->index);
+
+ fimc->subdev_pads[FIMC_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ fimc->subdev_pads[FIMC_SD_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, FIMC_SD_PADS_NUM,
+ fimc->subdev_pads, 0);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_handler_init(handler, 1);
+ fimc->test_pattern = v4l2_ctrl_new_custom(handler, &fimc_lite_ctrl,
+ NULL);
+ if (handler->error) {
+ media_entity_cleanup(&sd->entity);
+ return handler->error;
+ }
+
+ sd->ctrl_handler = handler;
+ sd->internal_ops = &fimc_lite_subdev_internal_ops;
+ sd->entity.ops = &fimc_lite_subdev_media_ops;
+ v4l2_set_subdevdata(sd, fimc);
+
+ return 0;
+}
+
+static void fimc_lite_unregister_capture_subdev(struct fimc_lite *fimc)
+{
+ struct v4l2_subdev *sd = &fimc->subdev;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(&fimc->ctrl_handler);
+ v4l2_set_subdevdata(sd, NULL);
+}
+
+static void fimc_lite_clk_put(struct fimc_lite *fimc)
+{
+ if (IS_ERR_OR_NULL(fimc->clock))
+ return;
+
+ clk_unprepare(fimc->clock);
+ clk_put(fimc->clock);
+ fimc->clock = NULL;
+}
+
+static int fimc_lite_clk_get(struct fimc_lite *fimc)
+{
+ int ret;
+
+ fimc->clock = clk_get(&fimc->pdev->dev, FLITE_CLK_NAME);
+ if (IS_ERR(fimc->clock))
+ return PTR_ERR(fimc->clock);
+
+ ret = clk_prepare(fimc->clock);
+ if (ret < 0) {
+ clk_put(fimc->clock);
+ fimc->clock = NULL;
+ }
+ return ret;
+}
+
+static int __devinit fimc_lite_probe(struct platform_device *pdev)
+{
+ struct flite_drvdata *drv_data = fimc_lite_get_drvdata(pdev);
+ struct fimc_lite *fimc;
+ struct resource *res;
+ int ret;
+
+ fimc = devm_kzalloc(&pdev->dev, sizeof(*fimc), GFP_KERNEL);
+ if (!fimc)
+ return -ENOMEM;
+
+ fimc->index = pdev->id;
+ fimc->variant = drv_data->variant[fimc->index];
+ fimc->pdev = pdev;
+
+ init_waitqueue_head(&fimc->irq_queue);
+ spin_lock_init(&fimc->slock);
+ mutex_init(&fimc->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fimc->regs = devm_request_and_ioremap(&pdev->dev, res);
+ if (fimc->regs == NULL) {
+ dev_err(&pdev->dev, "Failed to obtain io memory\n");
+ return -ENOENT;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Failed to get IRQ resource\n");
+ return -ENXIO;
+ }
+
+ ret = fimc_lite_clk_get(fimc);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev, res->start, flite_irq_handler,
+ 0, dev_name(&pdev->dev), fimc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
+ goto err_clk;
+ }
+
+ /* The video node will be created within the subdev's registered() op */
+ ret = fimc_lite_create_capture_subdev(fimc);
+ if (ret)
+ goto err_clk;
+
+ platform_set_drvdata(pdev, fimc);
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto err_sd;
+
+ fimc->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(fimc->alloc_ctx)) {
+ ret = PTR_ERR(fimc->alloc_ctx);
+ goto err_pm;
+ }
+ pm_runtime_put(&pdev->dev);
+
+ dev_dbg(&pdev->dev, "FIMC-LITE.%d registered successfully\n",
+ fimc->index);
+ return 0;
+err_pm:
+ pm_runtime_put(&pdev->dev);
+err_sd:
+ fimc_lite_unregister_capture_subdev(fimc);
+err_clk:
+ fimc_lite_clk_put(fimc);
+ return ret;
+}
+
+static int fimc_lite_runtime_resume(struct device *dev)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+
+ clk_enable(fimc->clock);
+ return 0;
+}
+
+static int fimc_lite_runtime_suspend(struct device *dev)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+
+ clk_disable(fimc->clock);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_lite_resume(struct device *dev)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+ struct flite_buffer *buf;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ if (!test_and_clear_bit(ST_LPM, &fimc->state) ||
+ !test_bit(ST_FLITE_IN_USE, &fimc->state)) {
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return 0;
+ }
+ flite_hw_reset(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (!test_and_clear_bit(ST_FLITE_SUSPENDED, &fimc->state))
+ return 0;
+
+ INIT_LIST_HEAD(&fimc->active_buf_q);
+ fimc_pipeline_initialize(&fimc->pipeline, &fimc->vfd->entity, false);
+ fimc_lite_hw_init(fimc);
+ clear_bit(ST_FLITE_SUSPENDED, &fimc->state);
+
+ for (i = 0; i < fimc->reqbufs_count; i++) {
+ if (list_empty(&fimc->pending_buf_q))
+ break;
+ buf = fimc_lite_pending_queue_pop(fimc);
+ buffer_queue(&buf->vb);
+ }
+ return 0;
+}
+
+static int fimc_lite_suspend(struct device *dev)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+ bool suspend = test_bit(ST_FLITE_IN_USE, &fimc->state);
+ int ret;
+
+ if (test_and_set_bit(ST_LPM, &fimc->state))
+ return 0;
+
+ ret = fimc_lite_stop_capture(fimc, suspend);
+ if (ret)
+ return ret;
+
+ return fimc_pipeline_shutdown(&fimc->pipeline);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int __devexit fimc_lite_remove(struct platform_device *pdev)
+{
+ struct fimc_lite *fimc = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ fimc_lite_unregister_capture_subdev(fimc);
+ vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
+ fimc_lite_clk_put(fimc);
+
+ dev_info(dev, "Driver unloaded\n");
+ return 0;
+}
+
+static struct flite_variant fimc_lite0_variant_exynos4 = {
+ .max_width = 8192,
+ .max_height = 8192,
+ .out_width_align = 8,
+ .win_hor_offs_align = 2,
+ .out_hor_offs_align = 8,
+};
+
+/* EXYNOS4212, EXYNOS4412 */
+static struct flite_drvdata fimc_lite_drvdata_exynos4 = {
+ .variant = {
+ [0] = &fimc_lite0_variant_exynos4,
+ [1] = &fimc_lite0_variant_exynos4,
+ },
+};
+
+static struct platform_device_id fimc_lite_driver_ids[] = {
+ {
+ .name = "exynos-fimc-lite",
+ .driver_data = (unsigned long)&fimc_lite_drvdata_exynos4,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, fimc_lite_driver_ids);
+
+static const struct dev_pm_ops fimc_lite_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_lite_suspend, fimc_lite_resume)
+ SET_RUNTIME_PM_OPS(fimc_lite_runtime_suspend, fimc_lite_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver fimc_lite_driver = {
+ .probe = fimc_lite_probe,
+ .remove = __devexit_p(fimc_lite_remove),
+ .id_table = fimc_lite_driver_ids,
+ .driver = {
+ .name = FIMC_LITE_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &fimc_lite_pm_ops,
+ }
+};
+module_platform_driver(fimc_lite_driver);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" FIMC_LITE_DRV_NAME);
diff --git a/drivers/media/video/s5p-fimc/fimc-lite.h b/drivers/media/video/s5p-fimc/fimc-lite.h
new file mode 100644
index 000000000000..44424eee81d8
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-lite.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_LITE_H_
+#define FIMC_LITE_H_
+
+#include <asm/sizes.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/s5p_fimc.h>
+
+#include "fimc-core.h"
+
+#define FIMC_LITE_DRV_NAME "exynos-fimc-lite"
+#define FLITE_CLK_NAME "flite"
+#define FIMC_LITE_MAX_DEVS 2
+#define FLITE_REQ_BUFS_MIN 2
+
+/* Bit index definitions for struct fimc_lite::state */
+enum {
+ ST_FLITE_LPM,
+ ST_FLITE_PENDING,
+ ST_FLITE_RUN,
+ ST_FLITE_STREAM,
+ ST_FLITE_SUSPENDED,
+ ST_FLITE_OFF,
+ ST_FLITE_IN_USE,
+ ST_FLITE_CONFIG,
+ ST_SENSOR_STREAM,
+};
+
+#define FLITE_SD_PAD_SINK 0
+#define FLITE_SD_PAD_SOURCE 1
+#define FLITE_SD_PADS_NUM 2
+
+struct flite_variant {
+ unsigned short max_width;
+ unsigned short max_height;
+ unsigned short out_width_align;
+ unsigned short win_hor_offs_align;
+ unsigned short out_hor_offs_align;
+};
+
+struct flite_drvdata {
+ struct flite_variant *variant[FIMC_LITE_MAX_DEVS];
+};
+
+#define fimc_lite_get_drvdata(_pdev) \
+ ((struct flite_drvdata *) platform_get_device_id(_pdev)->driver_data)
+
+struct fimc_lite_events {
+ unsigned int data_overflow;
+};
+
+#define FLITE_MAX_PLANES 1
+
+/**
+ * struct flite_frame - source/target frame properties
+ * @f_width: full pixel width
+ * @f_height: full pixel height
+ * @rect: crop/composition rectangle
+ */
+struct flite_frame {
+ u16 f_width;
+ u16 f_height;
+ struct v4l2_rect rect;
+};
+
+/**
+ * struct flite_buffer - video buffer structure
+ * @vb: vb2 buffer
+ * @list: list head for the buffers queue
+ * @paddr: precalculated physical address
+ */
+struct flite_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+ dma_addr_t paddr;
+};
+
+/**
+ * struct fimc_lite - fimc lite structure
+ * @pdev: pointer to FIMC-LITE platform device
+ * @variant: variant information for this IP
+ * @v4l2_dev: pointer to top the level v4l2_device
+ * @vfd: video device node
+ * @fh: v4l2 file handle
+ * @alloc_ctx: videobuf2 memory allocator context
+ * @subdev: FIMC-LITE subdev
+ * @vd_pad: media (sink) pad for the capture video node
+ * @subdev_pads: the subdev media pads
+ * @ctrl_handler: v4l2 control handler
+ * @test_pattern: test pattern controls
+ * @index: FIMC-LITE platform device index
+ * @pipeline: video capture pipeline data structure
+ * @slock: spinlock protecting this data structure and the hw registers
+ * @lock: mutex serializing video device and the subdev operations
+ * @clock: FIMC-LITE gate clock
+ * @regs: memory mapped io registers
+ * @irq_queue: interrupt handler waitqueue
+ * @fmt: pointer to color format description structure
+ * @payload: image size in bytes (w x h x bpp)
+ * @inp_frame: camera input frame structure
+ * @out_frame: DMA output frame structure
+ * @out_path: output data path (DMA or FIFO)
+ * @source_subdev_grp_id: source subdev group id
+ * @state: driver state flags
+ * @pending_buf_q: pending buffers queue head
+ * @active_buf_q: the queue head of buffers scheduled in hardware
+ * @vb_queue: vb2 buffers queue
+ * @active_buf_count: number of video buffers scheduled in hardware
+ * @frame_count: the captured frames counter
+ * @reqbufs_count: the number of buffers requested with REQBUFS ioctl
+ * @ref_count: driver's private reference counter
+ */
+struct fimc_lite {
+ struct platform_device *pdev;
+ struct flite_variant *variant;
+ struct v4l2_device *v4l2_dev;
+ struct video_device *vfd;
+ struct v4l2_fh fh;
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct v4l2_subdev subdev;
+ struct media_pad vd_pad;
+ struct media_pad subdev_pads[FLITE_SD_PADS_NUM];
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *test_pattern;
+ u32 index;
+ struct fimc_pipeline pipeline;
+
+ struct mutex lock;
+ spinlock_t slock;
+
+ struct clk *clock;
+ void __iomem *regs;
+ wait_queue_head_t irq_queue;
+
+ const struct fimc_fmt *fmt;
+ unsigned long payload[FLITE_MAX_PLANES];
+ struct flite_frame inp_frame;
+ struct flite_frame out_frame;
+ enum fimc_datapath out_path;
+ unsigned int source_subdev_grp_id;
+
+ unsigned long state;
+ struct list_head pending_buf_q;
+ struct list_head active_buf_q;
+ struct vb2_queue vb_queue;
+ unsigned int frame_count;
+ unsigned int reqbufs_count;
+ int ref_count;
+
+ struct fimc_lite_events events;
+};
+
+static inline bool fimc_lite_active(struct fimc_lite *fimc)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ ret = fimc->state & (1 << ST_FLITE_RUN) ||
+ fimc->state & (1 << ST_FLITE_PENDING);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return ret;
+}
+
+static inline void fimc_lite_active_queue_add(struct fimc_lite *dev,
+ struct flite_buffer *buf)
+{
+ list_add_tail(&buf->list, &dev->active_buf_q);
+}
+
+static inline struct flite_buffer *fimc_lite_active_queue_pop(
+ struct fimc_lite *dev)
+{
+ struct flite_buffer *buf = list_entry(dev->active_buf_q.next,
+ struct flite_buffer, list);
+ list_del(&buf->list);
+ return buf;
+}
+
+static inline void fimc_lite_pending_queue_add(struct fimc_lite *dev,
+ struct flite_buffer *buf)
+{
+ list_add_tail(&buf->list, &dev->pending_buf_q);
+}
+
+static inline struct flite_buffer *fimc_lite_pending_queue_pop(
+ struct fimc_lite *dev)
+{
+ struct flite_buffer *buf = list_entry(dev->pending_buf_q.next,
+ struct flite_buffer, list);
+ list_del(&buf->list);
+ return buf;
+}
+
+#endif /* FIMC_LITE_H_ */
diff --git a/drivers/media/video/s5p-fimc/fimc-m2m.c b/drivers/media/video/s5p-fimc/fimc-m2m.c
new file mode 100644
index 000000000000..4c58e0570962
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-m2m.c
@@ -0,0 +1,824 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC series FIMC (video postprocessor) driver
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "fimc-core.h"
+#include "fimc-reg.h"
+#include "fimc-mdevice.h"
+
+
+static unsigned int get_m2m_fmt_flags(unsigned int stream_type)
+{
+ if (stream_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return FMT_FLAGS_M2M_IN;
+ else
+ return FMT_FLAGS_M2M_OUT;
+}
+
+void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
+{
+ struct vb2_buffer *src_vb, *dst_vb;
+
+ if (!ctx || !ctx->m2m_ctx)
+ return;
+
+ src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+
+ if (src_vb && dst_vb) {
+ v4l2_m2m_buf_done(src_vb, vb_state);
+ v4l2_m2m_buf_done(dst_vb, vb_state);
+ v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev,
+ ctx->m2m_ctx);
+ }
+}
+
+/* Complete the transaction which has been scheduled for execution. */
+static int fimc_m2m_shutdown(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ int ret;
+
+ if (!fimc_m2m_pending(fimc))
+ return 0;
+
+ fimc_ctx_state_set(FIMC_CTX_SHUT, ctx);
+
+ ret = wait_event_timeout(fimc->irq_queue,
+ !fimc_ctx_state_is_set(FIMC_CTX_SHUT, ctx),
+ FIMC_SHUTDOWN_TIMEOUT);
+
+ return ret == 0 ? -ETIMEDOUT : ret;
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fimc_ctx *ctx = q->drv_priv;
+ int ret;
+
+ ret = pm_runtime_get_sync(&ctx->fimc_dev->pdev->dev);
+ return ret > 0 ? 0 : ret;
+}
+
+static int stop_streaming(struct vb2_queue *q)
+{
+ struct fimc_ctx *ctx = q->drv_priv;
+ int ret;
+
+ ret = fimc_m2m_shutdown(ctx);
+ if (ret == -ETIMEDOUT)
+ fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+
+ pm_runtime_put(&ctx->fimc_dev->pdev->dev);
+ return 0;
+}
+
+static void fimc_device_run(void *priv)
+{
+ struct vb2_buffer *vb = NULL;
+ struct fimc_ctx *ctx = priv;
+ struct fimc_frame *sf, *df;
+ struct fimc_dev *fimc;
+ unsigned long flags;
+ u32 ret;
+
+ if (WARN(!ctx, "Null context\n"))
+ return;
+
+ fimc = ctx->fimc_dev;
+ spin_lock_irqsave(&fimc->slock, flags);
+
+ set_bit(ST_M2M_PEND, &fimc->state);
+ sf = &ctx->s_frame;
+ df = &ctx->d_frame;
+
+ if (ctx->state & FIMC_PARAMS) {
+ /* Prepare the DMA offsets for scaler */
+ fimc_prepare_dma_offset(ctx, sf);
+ fimc_prepare_dma_offset(ctx, df);
+ }
+
+ vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ ret = fimc_prepare_addr(ctx, vb, sf, &sf->paddr);
+ if (ret)
+ goto dma_unlock;
+
+ vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ ret = fimc_prepare_addr(ctx, vb, df, &df->paddr);
+ if (ret)
+ goto dma_unlock;
+
+ /* Reconfigure hardware if the context has changed. */
+ if (fimc->m2m.ctx != ctx) {
+ ctx->state |= FIMC_PARAMS;
+ fimc->m2m.ctx = ctx;
+ }
+
+ if (ctx->state & FIMC_PARAMS) {
+ fimc_set_yuv_order(ctx);
+ fimc_hw_set_input_path(ctx);
+ fimc_hw_set_in_dma(ctx);
+ ret = fimc_set_scaler_info(ctx);
+ if (ret)
+ goto dma_unlock;
+ fimc_hw_set_prescaler(ctx);
+ fimc_hw_set_mainscaler(ctx);
+ fimc_hw_set_target_format(ctx);
+ fimc_hw_set_rotation(ctx);
+ fimc_hw_set_effect(ctx);
+ fimc_hw_set_out_dma(ctx);
+ if (fimc->variant->has_alpha)
+ fimc_hw_set_rgb_alpha(ctx);
+ fimc_hw_set_output_path(ctx);
+ }
+ fimc_hw_set_input_addr(fimc, &sf->paddr);
+ fimc_hw_set_output_addr(fimc, &df->paddr, -1);
+
+ fimc_activate_capture(ctx);
+ ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP |
+ FIMC_SRC_FMT | FIMC_DST_FMT);
+ fimc_hw_activate_input_dma(fimc, true);
+
+dma_unlock:
+ spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static void fimc_job_abort(void *priv)
+{
+ fimc_m2m_shutdown(priv);
+}
+
+static int fimc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *allocators[])
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+ struct fimc_frame *f;
+ int i;
+
+ f = ctx_get_frame(ctx, vq->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+ /*
+ * Return number of non-contigous planes (plane buffers)
+ * depending on the configured color format.
+ */
+ if (!f->fmt)
+ return -EINVAL;
+
+ *num_planes = f->fmt->memplanes;
+ for (i = 0; i < f->fmt->memplanes; i++) {
+ sizes[i] = (f->f_width * f->f_height * f->fmt->depth[i]) / 8;
+ allocators[i] = ctx->fimc_dev->alloc_ctx;
+ }
+ return 0;
+}
+
+static int fimc_buf_prepare(struct vb2_buffer *vb)
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct fimc_frame *frame;
+ int i;
+
+ frame = ctx_get_frame(ctx, vb->vb2_queue->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ for (i = 0; i < frame->fmt->memplanes; i++)
+ vb2_set_plane_payload(vb, i, frame->payload[i]);
+
+ return 0;
+}
+
+static void fimc_buf_queue(struct vb2_buffer *vb)
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ dbg("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
+
+ if (ctx->m2m_ctx)
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+static void fimc_lock(struct vb2_queue *vq)
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+ mutex_lock(&ctx->fimc_dev->lock);
+}
+
+static void fimc_unlock(struct vb2_queue *vq)
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+ mutex_unlock(&ctx->fimc_dev->lock);
+}
+
+static struct vb2_ops fimc_qops = {
+ .queue_setup = fimc_queue_setup,
+ .buf_prepare = fimc_buf_prepare,
+ .buf_queue = fimc_buf_queue,
+ .wait_prepare = fimc_unlock,
+ .wait_finish = fimc_lock,
+ .stop_streaming = stop_streaming,
+ .start_streaming = start_streaming,
+};
+
+/*
+ * V4L2 ioctl handlers
+ */
+static int fimc_m2m_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+
+ strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
+ strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
+ cap->bus_info[0] = 0;
+ cap->capabilities = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+
+ return 0;
+}
+
+static int fimc_m2m_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct fimc_fmt *fmt;
+
+ fmt = fimc_find_format(NULL, NULL, get_m2m_fmt_flags(f->type),
+ f->index);
+ if (!fmt)
+ return -EINVAL;
+
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_frame *frame = ctx_get_frame(ctx, f->type);
+
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ return fimc_fill_format(frame, f);
+}
+
+static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_variant *variant = fimc->variant;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct fimc_fmt *fmt;
+ u32 max_w, mod_x, mod_y;
+
+ if (!IS_M2M(f->type))
+ return -EINVAL;
+
+ dbg("w: %d, h: %d", pix->width, pix->height);
+
+ fmt = fimc_find_format(&pix->pixelformat, NULL,
+ get_m2m_fmt_flags(f->type), 0);
+ if (WARN(fmt == NULL, "Pixel format lookup failed"))
+ return -EINVAL;
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+ else if (pix->field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ max_w = variant->pix_limit->scaler_dis_w;
+ mod_x = ffs(variant->min_inp_pixsize) - 1;
+ } else {
+ max_w = variant->pix_limit->out_rot_dis_w;
+ mod_x = ffs(variant->min_out_pixsize) - 1;
+ }
+
+ if (tiled_fmt(fmt)) {
+ mod_x = 6; /* 64 x 32 pixels tile */
+ mod_y = 5;
+ } else {
+ if (variant->min_vsize_align == 1)
+ mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
+ else
+ mod_y = ffs(variant->min_vsize_align) - 1;
+ }
+
+ v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
+ &pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
+
+ fimc_adjust_mplane_format(fmt, pix->width, pix->height, &f->fmt.pix_mp);
+ return 0;
+}
+
+static int fimc_m2m_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ return fimc_try_fmt_mplane(ctx, f);
+}
+
+static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct vb2_queue *vq;
+ struct fimc_frame *frame;
+ struct v4l2_pix_format_mplane *pix;
+ int i, ret = 0;
+
+ ret = fimc_try_fmt_mplane(ctx, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(fimc->m2m.vfd, "queue (%d) busy\n", f->type);
+ return -EBUSY;
+ }
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ frame = &ctx->s_frame;
+ else
+ frame = &ctx->d_frame;
+
+ pix = &f->fmt.pix_mp;
+ frame->fmt = fimc_find_format(&pix->pixelformat, NULL,
+ get_m2m_fmt_flags(f->type), 0);
+ if (!frame->fmt)
+ return -EINVAL;
+
+ /* Update RGB Alpha control state and value range */
+ fimc_alpha_ctrl_update(ctx);
+
+ for (i = 0; i < frame->fmt->colplanes; i++) {
+ frame->payload[i] =
+ (pix->width * pix->height * frame->fmt->depth[i]) / 8;
+ }
+
+ fimc_fill_frame(frame, f);
+
+ ctx->scaler.enabled = 1;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ fimc_ctx_state_set(FIMC_PARAMS | FIMC_DST_FMT, ctx);
+ else
+ fimc_ctx_state_set(FIMC_PARAMS | FIMC_SRC_FMT, ctx);
+
+ dbg("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
+
+ return 0;
+}
+
+static int fimc_m2m_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int fimc_m2m_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int fimc_m2m_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int fimc_m2m_dqbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int fimc_m2m_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ /* The source and target color format need to be set */
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ if (!fimc_ctx_state_is_set(FIMC_SRC_FMT, ctx))
+ return -EINVAL;
+ } else if (!fimc_ctx_state_is_set(FIMC_DST_FMT, ctx)) {
+ return -EINVAL;
+ }
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int fimc_m2m_streamoff(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static int fimc_m2m_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *cr)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_frame *frame;
+
+ frame = ctx_get_frame(ctx, cr->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ cr->bounds.left = 0;
+ cr->bounds.top = 0;
+ cr->bounds.width = frame->o_width;
+ cr->bounds.height = frame->o_height;
+ cr->defrect = cr->bounds;
+
+ return 0;
+}
+
+static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_frame *frame;
+
+ frame = ctx_get_frame(ctx, cr->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ cr->c.left = frame->offs_h;
+ cr->c.top = frame->offs_v;
+ cr->c.width = frame->width;
+ cr->c.height = frame->height;
+
+ return 0;
+}
+
+static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_frame *f;
+ u32 min_size, halign, depth = 0;
+ int i;
+
+ if (cr->c.top < 0 || cr->c.left < 0) {
+ v4l2_err(fimc->m2m.vfd,
+ "doesn't support negative values for top & left\n");
+ return -EINVAL;
+ }
+ if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ f = &ctx->d_frame;
+ else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ f = &ctx->s_frame;
+ else
+ return -EINVAL;
+
+ min_size = (f == &ctx->s_frame) ?
+ fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
+
+ /* Get pixel alignment constraints. */
+ if (fimc->variant->min_vsize_align == 1)
+ halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
+ else
+ halign = ffs(fimc->variant->min_vsize_align) - 1;
+
+ for (i = 0; i < f->fmt->colplanes; i++)
+ depth += f->fmt->depth[i];
+
+ v4l_bound_align_image(&cr->c.width, min_size, f->o_width,
+ ffs(min_size) - 1,
+ &cr->c.height, min_size, f->o_height,
+ halign, 64/(ALIGN(depth, 8)));
+
+ /* adjust left/top if cropping rectangle is out of bounds */
+ if (cr->c.left + cr->c.width > f->o_width)
+ cr->c.left = f->o_width - cr->c.width;
+ if (cr->c.top + cr->c.height > f->o_height)
+ cr->c.top = f->o_height - cr->c.height;
+
+ cr->c.left = round_down(cr->c.left, min_size);
+ cr->c.top = round_down(cr->c.top, fimc->variant->hor_offs_align);
+
+ dbg("l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
+ cr->c.left, cr->c.top, cr->c.width, cr->c.height,
+ f->f_width, f->f_height);
+
+ return 0;
+}
+
+static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_frame *f;
+ int ret;
+
+ ret = fimc_m2m_try_crop(ctx, cr);
+ if (ret)
+ return ret;
+
+ f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
+ &ctx->s_frame : &ctx->d_frame;
+
+ /* Check to see if scaling ratio is within supported range */
+ if (fimc_ctx_state_is_set(FIMC_DST_FMT | FIMC_SRC_FMT, ctx)) {
+ if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = fimc_check_scaler_ratio(ctx, cr->c.width,
+ cr->c.height, ctx->d_frame.width,
+ ctx->d_frame.height, ctx->rotation);
+ } else {
+ ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
+ ctx->s_frame.height, cr->c.width,
+ cr->c.height, ctx->rotation);
+ }
+ if (ret) {
+ v4l2_err(fimc->m2m.vfd, "Out of scaler range\n");
+ return -EINVAL;
+ }
+ }
+
+ f->offs_h = cr->c.left;
+ f->offs_v = cr->c.top;
+ f->width = cr->c.width;
+ f->height = cr->c.height;
+
+ fimc_ctx_state_set(FIMC_PARAMS, ctx);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
+ .vidioc_querycap = fimc_m2m_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = fimc_m2m_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = fimc_m2m_enum_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = fimc_m2m_g_fmt_mplane,
+ .vidioc_g_fmt_vid_out_mplane = fimc_m2m_g_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = fimc_m2m_try_fmt_mplane,
+ .vidioc_try_fmt_vid_out_mplane = fimc_m2m_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = fimc_m2m_s_fmt_mplane,
+ .vidioc_s_fmt_vid_out_mplane = fimc_m2m_s_fmt_mplane,
+ .vidioc_reqbufs = fimc_m2m_reqbufs,
+ .vidioc_querybuf = fimc_m2m_querybuf,
+ .vidioc_qbuf = fimc_m2m_qbuf,
+ .vidioc_dqbuf = fimc_m2m_dqbuf,
+ .vidioc_streamon = fimc_m2m_streamon,
+ .vidioc_streamoff = fimc_m2m_streamoff,
+ .vidioc_g_crop = fimc_m2m_g_crop,
+ .vidioc_s_crop = fimc_m2m_s_crop,
+ .vidioc_cropcap = fimc_m2m_cropcap
+
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct fimc_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &fimc_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &fimc_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int fimc_m2m_open(struct file *file)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_ctx *ctx;
+ int ret;
+
+ dbg("pid: %d, state: 0x%lx, refcnt: %d",
+ task_pid_nr(current), fimc->state, fimc->vid_cap.refcnt);
+
+ /*
+ * Return if the corresponding video capture node
+ * is already opened.
+ */
+ if (fimc->vid_cap.refcnt > 0)
+ return -EBUSY;
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ v4l2_fh_init(&ctx->fh, fimc->m2m.vfd);
+ ctx->fimc_dev = fimc;
+
+ /* Default color format */
+ ctx->s_frame.fmt = fimc_get_format(0);
+ ctx->d_frame.fmt = fimc_get_format(0);
+
+ ret = fimc_ctrls_create(ctx);
+ if (ret)
+ goto error_fh;
+
+ /* Use separate control handler per file handle */
+ ctx->fh.ctrl_handler = &ctx->ctrls.handler;
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ /* Setup the device context for memory-to-memory mode */
+ ctx->state = FIMC_CTX_M2M;
+ ctx->flags = 0;
+ ctx->in_path = FIMC_IO_DMA;
+ ctx->out_path = FIMC_IO_DMA;
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ ret = PTR_ERR(ctx->m2m_ctx);
+ goto error_c;
+ }
+
+ if (fimc->m2m.refcnt++ == 0)
+ set_bit(ST_M2M_RUN, &fimc->state);
+ return 0;
+
+error_c:
+ fimc_ctrls_delete(ctx);
+error_fh:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ return ret;
+}
+
+static int fimc_m2m_release(struct file *file)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+
+ dbg("pid: %d, state: 0x%lx, refcnt= %d",
+ task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
+
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ fimc_ctrls_delete(ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+
+ if (--fimc->m2m.refcnt <= 0)
+ clear_bit(ST_M2M_RUN, &fimc->state);
+ kfree(ctx);
+ return 0;
+}
+
+static unsigned int fimc_m2m_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
+
+ return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+}
+
+
+static int fimc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
+
+ return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations fimc_m2m_fops = {
+ .owner = THIS_MODULE,
+ .open = fimc_m2m_open,
+ .release = fimc_m2m_release,
+ .poll = fimc_m2m_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = fimc_m2m_mmap,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+ .device_run = fimc_device_run,
+ .job_abort = fimc_job_abort,
+};
+
+int fimc_register_m2m_device(struct fimc_dev *fimc,
+ struct v4l2_device *v4l2_dev)
+{
+ struct video_device *vfd;
+ struct platform_device *pdev;
+ int ret = 0;
+
+ if (!fimc)
+ return -ENODEV;
+
+ pdev = fimc->pdev;
+ fimc->v4l2_dev = v4l2_dev;
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(v4l2_dev, "Failed to allocate video device\n");
+ return -ENOMEM;
+ }
+
+ vfd->fops = &fimc_m2m_fops;
+ vfd->ioctl_ops = &fimc_m2m_ioctl_ops;
+ vfd->v4l2_dev = v4l2_dev;
+ vfd->minor = -1;
+ vfd->release = video_device_release;
+ vfd->lock = &fimc->lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
+
+ snprintf(vfd->name, sizeof(vfd->name), "fimc.%d.m2m", fimc->id);
+ video_set_drvdata(vfd, fimc);
+
+ fimc->m2m.vfd = vfd;
+ fimc->m2m.m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(fimc->m2m.m2m_dev)) {
+ v4l2_err(v4l2_dev, "failed to initialize v4l2-m2m device\n");
+ ret = PTR_ERR(fimc->m2m.m2m_dev);
+ goto err_init;
+ }
+
+ ret = media_entity_init(&vfd->entity, 0, NULL, 0);
+ if (ret)
+ goto err_me;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_vd;
+
+ v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+ return 0;
+
+err_vd:
+ media_entity_cleanup(&vfd->entity);
+err_me:
+ v4l2_m2m_release(fimc->m2m.m2m_dev);
+err_init:
+ video_device_release(fimc->m2m.vfd);
+ return ret;
+}
+
+void fimc_unregister_m2m_device(struct fimc_dev *fimc)
+{
+ if (!fimc)
+ return;
+
+ if (fimc->m2m.m2m_dev)
+ v4l2_m2m_release(fimc->m2m.m2m_dev);
+ if (fimc->m2m.vfd) {
+ media_entity_cleanup(&fimc->m2m.vfd->entity);
+ /* Can also be called if video device wasn't registered */
+ video_unregister_device(fimc->m2m.vfd);
+ }
+}
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c
index 62ed37e40149..6753c45631b8 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -25,6 +25,7 @@
#include <media/media-device.h>
#include "fimc-core.h"
+#include "fimc-lite.h"
#include "fimc-mdevice.h"
#include "mipi-csis.h"
@@ -37,22 +38,46 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
*
* Caller holds the graph mutex.
*/
-void fimc_pipeline_prepare(struct fimc_dev *fimc, struct media_entity *me)
+void fimc_pipeline_prepare(struct fimc_pipeline *p, struct media_entity *me)
{
- struct media_entity_graph graph;
+ struct media_pad *pad = &me->pads[0];
struct v4l2_subdev *sd;
+ int i;
- media_entity_graph_walk_start(&graph, me);
+ for (i = 0; i < IDX_MAX; i++)
+ p->subdevs[i] = NULL;
- while ((me = media_entity_graph_walk_next(&graph))) {
- if (media_entity_type(me) != MEDIA_ENT_T_V4L2_SUBDEV)
- continue;
- sd = media_entity_to_v4l2_subdev(me);
+ while (1) {
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ /* source pad */
+ pad = media_entity_remote_source(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
- if (sd->grp_id == SENSOR_GROUP_ID)
- fimc->pipeline.sensor = sd;
- else if (sd->grp_id == CSIS_GROUP_ID)
- fimc->pipeline.csis = sd;
+ switch (sd->grp_id) {
+ case SENSOR_GROUP_ID:
+ p->subdevs[IDX_SENSOR] = sd;
+ break;
+ case CSIS_GROUP_ID:
+ p->subdevs[IDX_CSIS] = sd;
+ break;
+ case FLITE_GROUP_ID:
+ p->subdevs[IDX_FLITE] = sd;
+ break;
+ case FIMC_GROUP_ID:
+ /* No need to control FIMC subdev through subdev ops */
+ break;
+ default:
+ pr_warn("%s: Unknown subdev grp_id: %#x\n",
+ __func__, sd->grp_id);
+ }
+ /* sink pad */
+ pad = &sd->entity.pads[0];
}
}
@@ -85,30 +110,27 @@ static int __subdev_set_power(struct v4l2_subdev *sd, int on)
/**
* fimc_pipeline_s_power - change power state of all pipeline subdevs
* @fimc: fimc device terminating the pipeline
- * @state: 1 to enable power or 0 for power down
+ * @state: true to power on, false to power off
*
- * Need to be called with the graph mutex held.
+ * Needs to be called with the graph mutex held.
*/
-int fimc_pipeline_s_power(struct fimc_dev *fimc, int state)
+int fimc_pipeline_s_power(struct fimc_pipeline *p, bool state)
{
- int ret = 0;
+ unsigned int i;
+ int ret;
- if (fimc->pipeline.sensor == NULL)
+ if (p->subdevs[IDX_SENSOR] == NULL)
return -ENXIO;
- if (state) {
- ret = __subdev_set_power(fimc->pipeline.csis, 1);
- if (ret && ret != -ENXIO)
+ for (i = 0; i < IDX_MAX; i++) {
+ unsigned int idx = state ? (IDX_MAX - 1) - i : i;
+
+ ret = __subdev_set_power(p->subdevs[idx], state);
+ if (ret < 0 && ret != -ENXIO)
return ret;
- return __subdev_set_power(fimc->pipeline.sensor, 1);
}
- ret = __subdev_set_power(fimc->pipeline.sensor, 0);
- if (ret)
- return ret;
- ret = __subdev_set_power(fimc->pipeline.csis, 0);
-
- return ret == -ENXIO ? 0 : ret;
+ return 0;
}
/**
@@ -119,32 +141,36 @@ int fimc_pipeline_s_power(struct fimc_dev *fimc, int state)
*
* This function must be called with the graph mutex held.
*/
-static int __fimc_pipeline_initialize(struct fimc_dev *fimc,
+static int __fimc_pipeline_initialize(struct fimc_pipeline *p,
struct media_entity *me, bool prep)
{
int ret;
if (prep)
- fimc_pipeline_prepare(fimc, me);
- if (fimc->pipeline.sensor == NULL)
+ fimc_pipeline_prepare(p, me);
+
+ if (p->subdevs[IDX_SENSOR] == NULL)
return -EINVAL;
- ret = fimc_md_set_camclk(fimc->pipeline.sensor, true);
+
+ ret = fimc_md_set_camclk(p->subdevs[IDX_SENSOR], true);
if (ret)
return ret;
- return fimc_pipeline_s_power(fimc, 1);
+
+ return fimc_pipeline_s_power(p, 1);
}
-int fimc_pipeline_initialize(struct fimc_dev *fimc, struct media_entity *me,
+int fimc_pipeline_initialize(struct fimc_pipeline *p, struct media_entity *me,
bool prep)
{
int ret;
mutex_lock(&me->parent->graph_mutex);
- ret = __fimc_pipeline_initialize(fimc, me, prep);
+ ret = __fimc_pipeline_initialize(p, me, prep);
mutex_unlock(&me->parent->graph_mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(fimc_pipeline_initialize);
/**
* __fimc_pipeline_shutdown - disable the sensor clock and pipeline power
@@ -154,52 +180,55 @@ int fimc_pipeline_initialize(struct fimc_dev *fimc, struct media_entity *me,
* sensor clock.
* Called with the graph mutex held.
*/
-int __fimc_pipeline_shutdown(struct fimc_dev *fimc)
+int __fimc_pipeline_shutdown(struct fimc_pipeline *p)
{
int ret = 0;
- if (fimc->pipeline.sensor) {
- ret = fimc_pipeline_s_power(fimc, 0);
- fimc_md_set_camclk(fimc->pipeline.sensor, false);
+ if (p->subdevs[IDX_SENSOR]) {
+ ret = fimc_pipeline_s_power(p, 0);
+ fimc_md_set_camclk(p->subdevs[IDX_SENSOR], false);
}
return ret == -ENXIO ? 0 : ret;
}
-int fimc_pipeline_shutdown(struct fimc_dev *fimc)
+int fimc_pipeline_shutdown(struct fimc_pipeline *p)
{
- struct media_entity *me = &fimc->vid_cap.vfd->entity;
+ struct media_entity *me = &p->subdevs[IDX_SENSOR]->entity;
int ret;
mutex_lock(&me->parent->graph_mutex);
- ret = __fimc_pipeline_shutdown(fimc);
+ ret = __fimc_pipeline_shutdown(p);
mutex_unlock(&me->parent->graph_mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(fimc_pipeline_shutdown);
/**
* fimc_pipeline_s_stream - invoke s_stream on pipeline subdevs
- * @fimc: fimc device terminating the pipeline
+ * @pipeline: video pipeline structure
* @on: passed as the s_stream call argument
*/
-int fimc_pipeline_s_stream(struct fimc_dev *fimc, int on)
+int fimc_pipeline_s_stream(struct fimc_pipeline *p, bool on)
{
- struct fimc_pipeline *p = &fimc->pipeline;
- int ret = 0;
+ int i, ret;
- if (p->sensor == NULL)
+ if (p->subdevs[IDX_SENSOR] == NULL)
return -ENODEV;
- if ((on && p->csis) || !on)
- ret = v4l2_subdev_call(on ? p->csis : p->sensor,
- video, s_stream, on);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return ret;
- if ((!on && p->csis) || on)
- ret = v4l2_subdev_call(on ? p->sensor : p->csis,
- video, s_stream, on);
- return ret == -ENOIOCTLCMD ? 0 : ret;
+ for (i = 0; i < IDX_MAX; i++) {
+ unsigned int idx = on ? (IDX_MAX - 1) - i : i;
+
+ ret = v4l2_subdev_call(p->subdevs[idx], video, s_stream, on);
+
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+ }
+
+ return 0;
+
}
+EXPORT_SYMBOL_GPL(fimc_pipeline_s_stream);
/*
* Sensor subdevice helper functions
@@ -214,14 +243,20 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
return NULL;
adapter = i2c_get_adapter(s_info->pdata->i2c_bus_num);
- if (!adapter)
- return NULL;
+ if (!adapter) {
+ v4l2_warn(&fmd->v4l2_dev,
+ "Failed to get I2C adapter %d, deferring probe\n",
+ s_info->pdata->i2c_bus_num);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
sd = v4l2_i2c_new_subdev_board(&fmd->v4l2_dev, adapter,
s_info->pdata->board_info, NULL);
if (IS_ERR_OR_NULL(sd)) {
i2c_put_adapter(adapter);
- v4l2_err(&fmd->v4l2_dev, "Failed to acquire subdev\n");
- return NULL;
+ v4l2_warn(&fmd->v4l2_dev,
+ "Failed to acquire subdev %s, deferring probe\n",
+ s_info->pdata->board_info->type);
+ return ERR_PTR(-EPROBE_DEFER);
}
v4l2_set_subdev_hostdata(sd, s_info);
sd->grp_id = SENSOR_GROUP_ID;
@@ -269,13 +304,22 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
fmd->num_sensors = num_clients;
for (i = 0; i < num_clients; i++) {
+ struct v4l2_subdev *sd;
+
fmd->sensor[i].pdata = &pdata->isp_info[i];
ret = __fimc_md_set_camclk(fmd, &fmd->sensor[i], true);
if (ret)
break;
- fmd->sensor[i].subdev =
- fimc_md_register_sensor(fmd, &fmd->sensor[i]);
+ sd = fimc_md_register_sensor(fmd, &fmd->sensor[i]);
ret = __fimc_md_set_camclk(fmd, &fmd->sensor[i], false);
+
+ if (!IS_ERR(sd)) {
+ fmd->sensor[i].subdev = sd;
+ } else {
+ fmd->sensor[i].subdev = NULL;
+ ret = PTR_ERR(sd);
+ break;
+ }
if (ret)
break;
}
@@ -289,21 +333,50 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
static int fimc_register_callback(struct device *dev, void *p)
{
struct fimc_dev *fimc = dev_get_drvdata(dev);
+ struct v4l2_subdev *sd = &fimc->vid_cap.subdev;
struct fimc_md *fmd = p;
- int ret;
+ int ret = 0;
if (!fimc || !fimc->pdev)
return 0;
+
if (fimc->pdev->id < 0 || fimc->pdev->id >= FIMC_MAX_DEVS)
return 0;
fmd->fimc[fimc->pdev->id] = fimc;
- ret = fimc_register_m2m_device(fimc, &fmd->v4l2_dev);
- if (ret)
- return ret;
- ret = fimc_register_capture_device(fimc, &fmd->v4l2_dev);
- if (!ret)
- fimc->vid_cap.user_subdev_api = fmd->user_subdev_api;
+ sd->grp_id = FIMC_GROUP_ID;
+
+ ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+ if (ret) {
+ v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.%d (%d)\n",
+ fimc->id, ret);
+ }
+
+ return ret;
+}
+
+static int fimc_lite_register_callback(struct device *dev, void *p)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+ struct v4l2_subdev *sd = &fimc->subdev;
+ struct fimc_md *fmd = p;
+ int ret;
+
+ if (fimc == NULL)
+ return 0;
+
+ if (fimc->index >= FIMC_LITE_MAX_DEVS)
+ return 0;
+
+ fmd->fimc_lite[fimc->index] = fimc;
+ sd->grp_id = FLITE_GROUP_ID;
+
+ ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+ if (ret) {
+ v4l2_err(&fmd->v4l2_dev,
+ "Failed to register FIMC-LITE.%d (%d)\n",
+ fimc->index, ret);
+ }
return ret;
}
@@ -336,22 +409,56 @@ static int csis_register_callback(struct device *dev, void *p)
*/
static int fimc_md_register_platform_entities(struct fimc_md *fmd)
{
+ struct s5p_platform_fimc *pdata = fmd->pdev->dev.platform_data;
struct device_driver *driver;
- int ret;
+ int ret, i;
driver = driver_find(FIMC_MODULE_NAME, &platform_bus_type);
- if (!driver)
- return -ENODEV;
+ if (!driver) {
+ v4l2_warn(&fmd->v4l2_dev,
+ "%s driver not found, deffering probe\n",
+ FIMC_MODULE_NAME);
+ return -EPROBE_DEFER;
+ }
+
ret = driver_for_each_device(driver, NULL, fmd,
fimc_register_callback);
if (ret)
return ret;
- driver = driver_find(CSIS_DRIVER_NAME, &platform_bus_type);
- if (driver)
+ driver = driver_find(FIMC_LITE_DRV_NAME, &platform_bus_type);
+ if (driver && try_module_get(driver->owner)) {
ret = driver_for_each_device(driver, NULL, fmd,
- csis_register_callback);
- return ret;
+ fimc_lite_register_callback);
+ if (ret)
+ return ret;
+ module_put(driver->owner);
+ }
+ /*
+ * Check if there is any sensor on the MIPI-CSI2 bus and
+ * if not skip the s5p-csis module loading.
+ */
+ if (pdata == NULL)
+ return 0;
+ for (i = 0; i < pdata->num_clients; i++) {
+ if (pdata->isp_info[i].bus_type == FIMC_MIPI_CSI2) {
+ ret = 1;
+ break;
+ }
+ }
+ if (!ret)
+ return 0;
+
+ driver = driver_find(CSIS_DRIVER_NAME, &platform_bus_type);
+ if (!driver || !try_module_get(driver->owner)) {
+ v4l2_warn(&fmd->v4l2_dev,
+ "%s driver not found, deffering probe\n",
+ CSIS_DRIVER_NAME);
+ return -EPROBE_DEFER;
+ }
+
+ return driver_for_each_device(driver, NULL, fmd,
+ csis_register_callback);
}
static void fimc_md_unregister_entities(struct fimc_md *fmd)
@@ -361,14 +468,20 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
for (i = 0; i < FIMC_MAX_DEVS; i++) {
if (fmd->fimc[i] == NULL)
continue;
- fimc_unregister_m2m_device(fmd->fimc[i]);
- fimc_unregister_capture_device(fmd->fimc[i]);
+ v4l2_device_unregister_subdev(&fmd->fimc[i]->vid_cap.subdev);
fmd->fimc[i] = NULL;
}
+ for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
+ if (fmd->fimc_lite[i] == NULL)
+ continue;
+ v4l2_device_unregister_subdev(&fmd->fimc_lite[i]->subdev);
+ fmd->fimc_lite[i] = NULL;
+ }
for (i = 0; i < CSIS_MAX_ENTITIES; i++) {
if (fmd->csis[i].sd == NULL)
continue;
v4l2_device_unregister_subdev(fmd->csis[i].sd);
+ module_put(fmd->csis[i].sd->owner);
fmd->csis[i].sd = NULL;
}
for (i = 0; i < fmd->num_sensors; i++) {
@@ -379,35 +492,6 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
}
}
-static int fimc_md_register_video_nodes(struct fimc_md *fmd)
-{
- struct video_device *vdev;
- int i, ret = 0;
-
- for (i = 0; i < FIMC_MAX_DEVS && !ret; i++) {
- if (!fmd->fimc[i])
- continue;
-
- vdev = fmd->fimc[i]->m2m.vfd;
- if (vdev) {
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
- if (ret)
- break;
- v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
- vdev->name, video_device_node_name(vdev));
- }
-
- vdev = fmd->fimc[i]->vid_cap.vfd;
- if (vdev == NULL)
- continue;
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
- v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
- vdev->name, video_device_node_name(vdev));
- }
-
- return ret;
-}
-
/**
* __fimc_md_create_fimc_links - create links to all FIMC entities
* @fmd: fimc media device
@@ -416,29 +500,29 @@ static int fimc_md_register_video_nodes(struct fimc_md *fmd)
* @pad: the source entity pad index
* @fimc_id: index of the fimc device for which link should be enabled
*/
-static int __fimc_md_create_fimc_links(struct fimc_md *fmd,
- struct media_entity *source,
- struct v4l2_subdev *sensor,
- int pad, int fimc_id)
+static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
+ struct media_entity *source,
+ struct v4l2_subdev *sensor,
+ int pad, int fimc_id)
{
struct fimc_sensor_info *s_info;
struct media_entity *sink;
- unsigned int flags;
+ unsigned int flags = 0;
int ret, i;
for (i = 0; i < FIMC_MAX_DEVS; i++) {
if (!fmd->fimc[i])
- break;
+ continue;
/*
* Some FIMC variants are not fitted with camera capture
* interface. Skip creating a link from sensor for those.
*/
- if (sensor->grp_id == SENSOR_GROUP_ID &&
- !fmd->fimc[i]->variant->has_cam_if)
+ if (!fmd->fimc[i]->variant->has_cam_if)
continue;
flags = (i == fimc_id) ? MEDIA_LNK_FL_ENABLED : 0;
- sink = &fmd->fimc[i]->vid_cap.subdev->entity;
+
+ sink = &fmd->fimc[i]->vid_cap.subdev.entity;
ret = media_entity_create_link(source, pad, sink,
FIMC_SD_PAD_SINK, flags);
if (ret)
@@ -453,7 +537,7 @@ static int __fimc_md_create_fimc_links(struct fimc_md *fmd,
v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]",
source->name, flags ? '=' : '-', sink->name);
- if (flags == 0)
+ if (flags == 0 || sensor == NULL)
continue;
s_info = v4l2_get_subdev_hostdata(sensor);
if (!WARN_ON(s_info == NULL)) {
@@ -463,9 +547,55 @@ static int __fimc_md_create_fimc_links(struct fimc_md *fmd,
spin_unlock_irqrestore(&fmd->slock, irq_flags);
}
}
+
+ for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
+ if (!fmd->fimc_lite[i])
+ continue;
+
+ flags = (i == fimc_id) ? MEDIA_LNK_FL_ENABLED : 0;
+
+ sink = &fmd->fimc_lite[i]->subdev.entity;
+ ret = media_entity_create_link(source, pad, sink,
+ FLITE_SD_PAD_SINK, flags);
+ if (ret)
+ return ret;
+
+ /* Notify FIMC-LITE subdev entity */
+ ret = media_entity_call(sink, link_setup, &sink->pads[0],
+ &source->pads[pad], flags);
+ if (ret)
+ break;
+
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]",
+ source->name, flags ? '=' : '-', sink->name);
+ }
return 0;
}
+/* Create links from FIMC-LITE source pads to other entities */
+static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)
+{
+ struct media_entity *source, *sink;
+ unsigned int flags = MEDIA_LNK_FL_ENABLED;
+ int i, ret;
+
+ for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
+ struct fimc_lite *fimc = fmd->fimc_lite[i];
+ if (fimc == NULL)
+ continue;
+ source = &fimc->subdev.entity;
+ sink = &fimc->vfd->entity;
+ /* FIMC-LITE's subdev and video node */
+ ret = media_entity_create_link(source, FIMC_SD_PAD_SOURCE,
+ sink, 0, flags);
+ if (ret)
+ break;
+ /* TODO: create links to other entities */
+ }
+
+ return ret;
+}
+
/**
* fimc_md_create_links - create default links between registered entities
*
@@ -522,8 +652,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
v4l2_info(&fmd->v4l2_dev, "created link [%s] => [%s]",
sensor->entity.name, csis->entity.name);
- source = &csis->entity;
- pad = CSIS_PAD_SOURCE;
+ source = NULL;
break;
case FIMC_ITU_601...FIMC_ITU_656:
@@ -539,15 +668,27 @@ static int fimc_md_create_links(struct fimc_md *fmd)
if (source == NULL)
continue;
- ret = __fimc_md_create_fimc_links(fmd, source, sensor, pad,
- fimc_id++);
+ ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor,
+ pad, fimc_id++);
}
+
+ fimc_id = 0;
+ for (i = 0; i < ARRAY_SIZE(fmd->csis); i++) {
+ if (fmd->csis[i].sd == NULL)
+ continue;
+ source = &fmd->csis[i].sd->entity;
+ pad = CSIS_PAD_SOURCE;
+
+ ret = __fimc_md_create_fimc_sink_links(fmd, source, NULL,
+ pad, fimc_id++);
+ }
+
/* Create immutable links between each FIMC's subdev and video node */
flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
for (i = 0; i < FIMC_MAX_DEVS; i++) {
if (!fmd->fimc[i])
continue;
- source = &fmd->fimc[i]->vid_cap.subdev->entity;
+ source = &fmd->fimc[i]->vid_cap.subdev.entity;
sink = &fmd->fimc[i]->vid_cap.vfd->entity;
ret = media_entity_create_link(source, FIMC_SD_PAD_SOURCE,
sink, 0, flags);
@@ -555,7 +696,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
break;
}
- return ret;
+ return __fimc_md_create_flite_source_links(fmd);
}
/*
@@ -663,24 +804,40 @@ int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on)
static int fimc_md_link_notify(struct media_pad *source,
struct media_pad *sink, u32 flags)
{
+ struct fimc_lite *fimc_lite = NULL;
+ struct fimc_dev *fimc = NULL;
+ struct fimc_pipeline *pipeline;
struct v4l2_subdev *sd;
- struct fimc_dev *fimc;
int ret = 0;
if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
return 0;
sd = media_entity_to_v4l2_subdev(sink->entity);
- fimc = v4l2_get_subdevdata(sd);
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- ret = __fimc_pipeline_shutdown(fimc);
- fimc->pipeline.sensor = NULL;
- fimc->pipeline.csis = NULL;
+ switch (sd->grp_id) {
+ case FLITE_GROUP_ID:
+ fimc_lite = v4l2_get_subdevdata(sd);
+ pipeline = &fimc_lite->pipeline;
+ break;
+ case FIMC_GROUP_ID:
+ fimc = v4l2_get_subdevdata(sd);
+ pipeline = &fimc->pipeline;
+ break;
+ default:
+ return 0;
+ }
- mutex_lock(&fimc->lock);
- fimc_ctrls_delete(fimc->vid_cap.ctx);
- mutex_unlock(&fimc->lock);
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ret = __fimc_pipeline_shutdown(pipeline);
+ pipeline->subdevs[IDX_SENSOR] = NULL;
+ pipeline->subdevs[IDX_CSIS] = NULL;
+
+ if (fimc) {
+ mutex_lock(&fimc->lock);
+ fimc_ctrls_delete(fimc->vid_cap.ctx);
+ mutex_unlock(&fimc->lock);
+ }
return ret;
}
/*
@@ -688,14 +845,23 @@ static int fimc_md_link_notify(struct media_pad *source,
* pipeline is already in use, i.e. its video node is opened.
* Recreate the controls destroyed during the link deactivation.
*/
- mutex_lock(&fimc->lock);
- if (fimc->vid_cap.refcnt > 0) {
- ret = __fimc_pipeline_initialize(fimc, source->entity, true);
+ if (fimc) {
+ mutex_lock(&fimc->lock);
+ if (fimc->vid_cap.refcnt > 0) {
+ ret = __fimc_pipeline_initialize(pipeline,
+ source->entity, true);
if (!ret)
ret = fimc_capture_ctrls_create(fimc);
+ }
+ mutex_unlock(&fimc->lock);
+ } else {
+ mutex_lock(&fimc_lite->lock);
+ if (fimc_lite->ref_count > 0) {
+ ret = __fimc_pipeline_initialize(pipeline,
+ source->entity, true);
+ }
+ mutex_unlock(&fimc_lite->lock);
}
- mutex_unlock(&fimc->lock);
-
return ret ? -EPIPE : ret;
}
@@ -744,7 +910,7 @@ static ssize_t fimc_md_sysfs_store(struct device *dev,
static DEVICE_ATTR(subdev_conf_mode, S_IWUSR | S_IRUGO,
fimc_md_sysfs_show, fimc_md_sysfs_store);
-static int __devinit fimc_md_probe(struct platform_device *pdev)
+static int fimc_md_probe(struct platform_device *pdev)
{
struct v4l2_device *v4l2_dev;
struct fimc_md *fmd;
@@ -776,42 +942,48 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
ret = media_device_register(&fmd->media_dev);
if (ret < 0) {
v4l2_err(v4l2_dev, "Failed to register media device: %d\n", ret);
- goto err2;
+ goto err_md;
}
ret = fimc_md_get_clocks(fmd);
if (ret)
- goto err3;
+ goto err_clk;
fmd->user_subdev_api = false;
+
+ /* Protect the media graph while we're registering entities */
+ mutex_lock(&fmd->media_dev.graph_mutex);
+
ret = fimc_md_register_platform_entities(fmd);
if (ret)
- goto err3;
+ goto err_unlock;
if (pdev->dev.platform_data) {
ret = fimc_md_register_sensor_entities(fmd);
if (ret)
- goto err3;
+ goto err_unlock;
}
ret = fimc_md_create_links(fmd);
if (ret)
- goto err3;
+ goto err_unlock;
ret = v4l2_device_register_subdev_nodes(&fmd->v4l2_dev);
if (ret)
- goto err3;
- ret = fimc_md_register_video_nodes(fmd);
- if (ret)
- goto err3;
+ goto err_unlock;
ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode);
- if (!ret) {
- platform_set_drvdata(pdev, fmd);
- return 0;
- }
-err3:
+ if (ret)
+ goto err_unlock;
+
+ platform_set_drvdata(pdev, fmd);
+ mutex_unlock(&fmd->media_dev.graph_mutex);
+ return 0;
+
+err_unlock:
+ mutex_unlock(&fmd->media_dev.graph_mutex);
+err_clk:
media_device_unregister(&fmd->media_dev);
fimc_md_put_clocks(fmd);
fimc_md_unregister_entities(fmd);
-err2:
+err_md:
v4l2_device_unregister(&fmd->v4l2_dev);
return ret;
}
@@ -841,10 +1013,12 @@ static struct platform_driver fimc_md_driver = {
int __init fimc_md_init(void)
{
int ret;
+
request_module("s5p-csis");
ret = fimc_register_driver();
if (ret)
return ret;
+
return platform_driver_register(&fimc_md_driver);
}
void __exit fimc_md_exit(void)
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.h b/drivers/media/video/s5p-fimc/fimc-mdevice.h
index da3780823e7d..3b8a3492a176 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.h
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 - 2012 Samsung Electronics Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,12 +18,15 @@
#include <media/v4l2-subdev.h>
#include "fimc-core.h"
+#include "fimc-lite.h"
#include "mipi-csis.h"
-/* Group IDs of sensor, MIPI CSIS and the writeback subdevs. */
+/* Group IDs of sensor, MIPI-CSIS, FIMC-LITE and the writeback subdevs. */
#define SENSOR_GROUP_ID (1 << 8)
#define CSIS_GROUP_ID (1 << 9)
#define WRITEBACK_GROUP_ID (1 << 10)
+#define FIMC_GROUP_ID (1 << 11)
+#define FLITE_GROUP_ID (1 << 12)
#define FIMC_MAX_SENSORS 8
#define FIMC_MAX_CAMCLKS 2
@@ -73,6 +76,7 @@ struct fimc_md {
struct fimc_sensor_info sensor[FIMC_MAX_SENSORS];
int num_sensors;
struct fimc_camclk_info camclk[FIMC_MAX_CAMCLKS];
+ struct fimc_lite *fimc_lite[FIMC_LITE_MAX_DEVS];
struct fimc_dev *fimc[FIMC_MAX_DEVS];
struct media_device media_dev;
struct v4l2_device v4l2_dev;
@@ -108,11 +112,11 @@ static inline void fimc_md_graph_unlock(struct fimc_dev *fimc)
}
int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on);
-void fimc_pipeline_prepare(struct fimc_dev *fimc, struct media_entity *me);
-int fimc_pipeline_initialize(struct fimc_dev *fimc, struct media_entity *me,
+void fimc_pipeline_prepare(struct fimc_pipeline *p, struct media_entity *me);
+int fimc_pipeline_initialize(struct fimc_pipeline *p, struct media_entity *me,
bool resume);
-int fimc_pipeline_shutdown(struct fimc_dev *fimc);
-int fimc_pipeline_s_power(struct fimc_dev *fimc, int state);
-int fimc_pipeline_s_stream(struct fimc_dev *fimc, int state);
+int fimc_pipeline_shutdown(struct fimc_pipeline *p);
+int fimc_pipeline_s_power(struct fimc_pipeline *p, bool state);
+int fimc_pipeline_s_stream(struct fimc_pipeline *p, bool state);
#endif
diff --git a/drivers/media/video/s5p-fimc/fimc-reg.c b/drivers/media/video/s5p-fimc/fimc-reg.c
index 15466d0529c1..1fc4ce8446f5 100644
--- a/drivers/media/video/s5p-fimc/fimc-reg.c
+++ b/drivers/media/video/s5p-fimc/fimc-reg.c
@@ -1,9 +1,8 @@
/*
* Register interface file for Samsung Camera Interface (FIMC) driver
*
- * Copyright (c) 2010 Samsung Electronics
- *
- * Sylwester Nawrocki, s.nawrocki@samsung.com
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki, <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -12,9 +11,9 @@
#include <linux/io.h>
#include <linux/delay.h>
-#include <mach/map.h>
#include <media/s5p_fimc.h>
+#include "fimc-reg.h"
#include "fimc-core.h"
@@ -22,19 +21,19 @@ void fimc_hw_reset(struct fimc_dev *dev)
{
u32 cfg;
- cfg = readl(dev->regs + S5P_CISRCFMT);
- cfg |= S5P_CISRCFMT_ITU601_8BIT;
- writel(cfg, dev->regs + S5P_CISRCFMT);
+ cfg = readl(dev->regs + FIMC_REG_CISRCFMT);
+ cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
+ writel(cfg, dev->regs + FIMC_REG_CISRCFMT);
/* Software reset. */
- cfg = readl(dev->regs + S5P_CIGCTRL);
- cfg |= (S5P_CIGCTRL_SWRST | S5P_CIGCTRL_IRQ_LEVEL);
- writel(cfg, dev->regs + S5P_CIGCTRL);
+ cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+ cfg |= (FIMC_REG_CIGCTRL_SWRST | FIMC_REG_CIGCTRL_IRQ_LEVEL);
+ writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
udelay(10);
- cfg = readl(dev->regs + S5P_CIGCTRL);
- cfg &= ~S5P_CIGCTRL_SWRST;
- writel(cfg, dev->regs + S5P_CIGCTRL);
+ cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+ cfg &= ~FIMC_REG_CIGCTRL_SWRST;
+ writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
if (dev->variant->out_buf_count > 4)
fimc_hw_set_dma_seq(dev, 0xF);
@@ -42,32 +41,32 @@ void fimc_hw_reset(struct fimc_dev *dev)
static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
{
- u32 flip = S5P_MSCTRL_FLIP_NORMAL;
+ u32 flip = FIMC_REG_MSCTRL_FLIP_NORMAL;
if (ctx->hflip)
- flip = S5P_MSCTRL_FLIP_X_MIRROR;
+ flip = FIMC_REG_MSCTRL_FLIP_X_MIRROR;
if (ctx->vflip)
- flip = S5P_MSCTRL_FLIP_Y_MIRROR;
+ flip = FIMC_REG_MSCTRL_FLIP_Y_MIRROR;
if (ctx->rotation <= 90)
return flip;
- return (flip ^ S5P_MSCTRL_FLIP_180) & S5P_MSCTRL_FLIP_180;
+ return (flip ^ FIMC_REG_MSCTRL_FLIP_180) & FIMC_REG_MSCTRL_FLIP_180;
}
static u32 fimc_hw_get_target_flip(struct fimc_ctx *ctx)
{
- u32 flip = S5P_CITRGFMT_FLIP_NORMAL;
+ u32 flip = FIMC_REG_CITRGFMT_FLIP_NORMAL;
if (ctx->hflip)
- flip |= S5P_CITRGFMT_FLIP_X_MIRROR;
+ flip |= FIMC_REG_CITRGFMT_FLIP_X_MIRROR;
if (ctx->vflip)
- flip |= S5P_CITRGFMT_FLIP_Y_MIRROR;
+ flip |= FIMC_REG_CITRGFMT_FLIP_Y_MIRROR;
if (ctx->rotation <= 90)
return flip;
- return (flip ^ S5P_CITRGFMT_FLIP_180) & S5P_CITRGFMT_FLIP_180;
+ return (flip ^ FIMC_REG_CITRGFMT_FLIP_180) & FIMC_REG_CITRGFMT_FLIP_180;
}
void fimc_hw_set_rotation(struct fimc_ctx *ctx)
@@ -75,9 +74,9 @@ void fimc_hw_set_rotation(struct fimc_ctx *ctx)
u32 cfg, flip;
struct fimc_dev *dev = ctx->fimc_dev;
- cfg = readl(dev->regs + S5P_CITRGFMT);
- cfg &= ~(S5P_CITRGFMT_INROT90 | S5P_CITRGFMT_OUTROT90 |
- S5P_CITRGFMT_FLIP_180);
+ cfg = readl(dev->regs + FIMC_REG_CITRGFMT);
+ cfg &= ~(FIMC_REG_CITRGFMT_INROT90 | FIMC_REG_CITRGFMT_OUTROT90 |
+ FIMC_REG_CITRGFMT_FLIP_180);
/*
* The input and output rotator cannot work simultaneously.
@@ -85,21 +84,21 @@ void fimc_hw_set_rotation(struct fimc_ctx *ctx)
* in direct fifo output mode.
*/
if (ctx->rotation == 90 || ctx->rotation == 270) {
- if (ctx->out_path == FIMC_LCDFIFO)
- cfg |= S5P_CITRGFMT_INROT90;
+ if (ctx->out_path == FIMC_IO_LCDFIFO)
+ cfg |= FIMC_REG_CITRGFMT_INROT90;
else
- cfg |= S5P_CITRGFMT_OUTROT90;
+ cfg |= FIMC_REG_CITRGFMT_OUTROT90;
}
- if (ctx->out_path == FIMC_DMA) {
+ if (ctx->out_path == FIMC_IO_DMA) {
cfg |= fimc_hw_get_target_flip(ctx);
- writel(cfg, dev->regs + S5P_CITRGFMT);
+ writel(cfg, dev->regs + FIMC_REG_CITRGFMT);
} else {
/* LCD FIFO path */
- flip = readl(dev->regs + S5P_MSCTRL);
- flip &= ~S5P_MSCTRL_FLIP_MASK;
+ flip = readl(dev->regs + FIMC_REG_MSCTRL);
+ flip &= ~FIMC_REG_MSCTRL_FLIP_MASK;
flip |= fimc_hw_get_in_flip(ctx);
- writel(flip, dev->regs + S5P_MSCTRL);
+ writel(flip, dev->regs + FIMC_REG_MSCTRL);
}
}
@@ -110,43 +109,40 @@ void fimc_hw_set_target_format(struct fimc_ctx *ctx)
struct fimc_frame *frame = &ctx->d_frame;
dbg("w= %d, h= %d color: %d", frame->width,
- frame->height, frame->fmt->color);
+ frame->height, frame->fmt->color);
- cfg = readl(dev->regs + S5P_CITRGFMT);
- cfg &= ~(S5P_CITRGFMT_FMT_MASK | S5P_CITRGFMT_HSIZE_MASK |
- S5P_CITRGFMT_VSIZE_MASK);
+ cfg = readl(dev->regs + FIMC_REG_CITRGFMT);
+ cfg &= ~(FIMC_REG_CITRGFMT_FMT_MASK | FIMC_REG_CITRGFMT_HSIZE_MASK |
+ FIMC_REG_CITRGFMT_VSIZE_MASK);
switch (frame->fmt->color) {
- case S5P_FIMC_RGB444...S5P_FIMC_RGB888:
- cfg |= S5P_CITRGFMT_RGB;
+ case FIMC_FMT_RGB444...FIMC_FMT_RGB888:
+ cfg |= FIMC_REG_CITRGFMT_RGB;
break;
- case S5P_FIMC_YCBCR420:
- cfg |= S5P_CITRGFMT_YCBCR420;
+ case FIMC_FMT_YCBCR420:
+ cfg |= FIMC_REG_CITRGFMT_YCBCR420;
break;
- case S5P_FIMC_YCBYCR422...S5P_FIMC_CRYCBY422:
+ case FIMC_FMT_YCBYCR422...FIMC_FMT_CRYCBY422:
if (frame->fmt->colplanes == 1)
- cfg |= S5P_CITRGFMT_YCBCR422_1P;
+ cfg |= FIMC_REG_CITRGFMT_YCBCR422_1P;
else
- cfg |= S5P_CITRGFMT_YCBCR422;
+ cfg |= FIMC_REG_CITRGFMT_YCBCR422;
break;
default:
break;
}
- if (ctx->rotation == 90 || ctx->rotation == 270) {
- cfg |= S5P_CITRGFMT_HSIZE(frame->height);
- cfg |= S5P_CITRGFMT_VSIZE(frame->width);
- } else {
-
- cfg |= S5P_CITRGFMT_HSIZE(frame->width);
- cfg |= S5P_CITRGFMT_VSIZE(frame->height);
- }
+ if (ctx->rotation == 90 || ctx->rotation == 270)
+ cfg |= (frame->height << 16) | frame->width;
+ else
+ cfg |= (frame->width << 16) | frame->height;
- writel(cfg, dev->regs + S5P_CITRGFMT);
+ writel(cfg, dev->regs + FIMC_REG_CITRGFMT);
- cfg = readl(dev->regs + S5P_CITAREA) & ~S5P_CITAREA_MASK;
+ cfg = readl(dev->regs + FIMC_REG_CITAREA);
+ cfg &= ~FIMC_REG_CITAREA_MASK;
cfg |= (frame->width * frame->height);
- writel(cfg, dev->regs + S5P_CITAREA);
+ writel(cfg, dev->regs + FIMC_REG_CITAREA);
}
static void fimc_hw_set_out_dma_size(struct fimc_ctx *ctx)
@@ -155,87 +151,82 @@ static void fimc_hw_set_out_dma_size(struct fimc_ctx *ctx)
struct fimc_frame *frame = &ctx->d_frame;
u32 cfg;
- cfg = S5P_ORIG_SIZE_HOR(frame->f_width);
- cfg |= S5P_ORIG_SIZE_VER(frame->f_height);
- writel(cfg, dev->regs + S5P_ORGOSIZE);
+ cfg = (frame->f_height << 16) | frame->f_width;
+ writel(cfg, dev->regs + FIMC_REG_ORGOSIZE);
/* Select color space conversion equation (HD/SD size).*/
- cfg = readl(dev->regs + S5P_CIGCTRL);
+ cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
if (frame->f_width >= 1280) /* HD */
- cfg |= S5P_CIGCTRL_CSC_ITU601_709;
+ cfg |= FIMC_REG_CIGCTRL_CSC_ITU601_709;
else /* SD */
- cfg &= ~S5P_CIGCTRL_CSC_ITU601_709;
- writel(cfg, dev->regs + S5P_CIGCTRL);
+ cfg &= ~FIMC_REG_CIGCTRL_CSC_ITU601_709;
+ writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
}
void fimc_hw_set_out_dma(struct fimc_ctx *ctx)
{
- u32 cfg;
struct fimc_dev *dev = ctx->fimc_dev;
struct fimc_frame *frame = &ctx->d_frame;
struct fimc_dma_offset *offset = &frame->dma_offset;
struct fimc_fmt *fmt = frame->fmt;
+ u32 cfg;
/* Set the input dma offsets. */
- cfg = 0;
- cfg |= S5P_CIO_OFFS_HOR(offset->y_h);
- cfg |= S5P_CIO_OFFS_VER(offset->y_v);
- writel(cfg, dev->regs + S5P_CIOYOFF);
+ cfg = (offset->y_v << 16) | offset->y_h;
+ writel(cfg, dev->regs + FIMC_REG_CIOYOFF);
- cfg = 0;
- cfg |= S5P_CIO_OFFS_HOR(offset->cb_h);
- cfg |= S5P_CIO_OFFS_VER(offset->cb_v);
- writel(cfg, dev->regs + S5P_CIOCBOFF);
+ cfg = (offset->cb_v << 16) | offset->cb_h;
+ writel(cfg, dev->regs + FIMC_REG_CIOCBOFF);
- cfg = 0;
- cfg |= S5P_CIO_OFFS_HOR(offset->cr_h);
- cfg |= S5P_CIO_OFFS_VER(offset->cr_v);
- writel(cfg, dev->regs + S5P_CIOCROFF);
+ cfg = (offset->cr_v << 16) | offset->cr_h;
+ writel(cfg, dev->regs + FIMC_REG_CIOCROFF);
fimc_hw_set_out_dma_size(ctx);
/* Configure chroma components order. */
- cfg = readl(dev->regs + S5P_CIOCTRL);
+ cfg = readl(dev->regs + FIMC_REG_CIOCTRL);
- cfg &= ~(S5P_CIOCTRL_ORDER2P_MASK | S5P_CIOCTRL_ORDER422_MASK |
- S5P_CIOCTRL_YCBCR_PLANE_MASK | S5P_CIOCTRL_RGB16FMT_MASK);
+ cfg &= ~(FIMC_REG_CIOCTRL_ORDER2P_MASK |
+ FIMC_REG_CIOCTRL_ORDER422_MASK |
+ FIMC_REG_CIOCTRL_YCBCR_PLANE_MASK |
+ FIMC_REG_CIOCTRL_RGB16FMT_MASK);
if (fmt->colplanes == 1)
cfg |= ctx->out_order_1p;
else if (fmt->colplanes == 2)
- cfg |= ctx->out_order_2p | S5P_CIOCTRL_YCBCR_2PLANE;
+ cfg |= ctx->out_order_2p | FIMC_REG_CIOCTRL_YCBCR_2PLANE;
else if (fmt->colplanes == 3)
- cfg |= S5P_CIOCTRL_YCBCR_3PLANE;
+ cfg |= FIMC_REG_CIOCTRL_YCBCR_3PLANE;
- if (fmt->color == S5P_FIMC_RGB565)
- cfg |= S5P_CIOCTRL_RGB565;
- else if (fmt->color == S5P_FIMC_RGB555)
- cfg |= S5P_CIOCTRL_ARGB1555;
- else if (fmt->color == S5P_FIMC_RGB444)
- cfg |= S5P_CIOCTRL_ARGB4444;
+ if (fmt->color == FIMC_FMT_RGB565)
+ cfg |= FIMC_REG_CIOCTRL_RGB565;
+ else if (fmt->color == FIMC_FMT_RGB555)
+ cfg |= FIMC_REG_CIOCTRL_ARGB1555;
+ else if (fmt->color == FIMC_FMT_RGB444)
+ cfg |= FIMC_REG_CIOCTRL_ARGB4444;
- writel(cfg, dev->regs + S5P_CIOCTRL);
+ writel(cfg, dev->regs + FIMC_REG_CIOCTRL);
}
static void fimc_hw_en_autoload(struct fimc_dev *dev, int enable)
{
- u32 cfg = readl(dev->regs + S5P_ORGISIZE);
+ u32 cfg = readl(dev->regs + FIMC_REG_ORGISIZE);
if (enable)
- cfg |= S5P_CIREAL_ISIZE_AUTOLOAD_EN;
+ cfg |= FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN;
else
- cfg &= ~S5P_CIREAL_ISIZE_AUTOLOAD_EN;
- writel(cfg, dev->regs + S5P_ORGISIZE);
+ cfg &= ~FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN;
+ writel(cfg, dev->regs + FIMC_REG_ORGISIZE);
}
void fimc_hw_en_lastirq(struct fimc_dev *dev, int enable)
{
- u32 cfg = readl(dev->regs + S5P_CIOCTRL);
+ u32 cfg = readl(dev->regs + FIMC_REG_CIOCTRL);
if (enable)
- cfg |= S5P_CIOCTRL_LASTIRQ_ENABLE;
+ cfg |= FIMC_REG_CIOCTRL_LASTIRQ_ENABLE;
else
- cfg &= ~S5P_CIOCTRL_LASTIRQ_ENABLE;
- writel(cfg, dev->regs + S5P_CIOCTRL);
+ cfg &= ~FIMC_REG_CIOCTRL_LASTIRQ_ENABLE;
+ writel(cfg, dev->regs + FIMC_REG_CIOCTRL);
}
void fimc_hw_set_prescaler(struct fimc_ctx *ctx)
@@ -245,15 +236,13 @@ void fimc_hw_set_prescaler(struct fimc_ctx *ctx)
u32 cfg, shfactor;
shfactor = 10 - (sc->hfactor + sc->vfactor);
+ cfg = shfactor << 28;
- cfg = S5P_CISCPRERATIO_SHFACTOR(shfactor);
- cfg |= S5P_CISCPRERATIO_HOR(sc->pre_hratio);
- cfg |= S5P_CISCPRERATIO_VER(sc->pre_vratio);
- writel(cfg, dev->regs + S5P_CISCPRERATIO);
+ cfg |= (sc->pre_hratio << 16) | sc->pre_vratio;
+ writel(cfg, dev->regs + FIMC_REG_CISCPRERATIO);
- cfg = S5P_CISCPREDST_WIDTH(sc->pre_dst_width);
- cfg |= S5P_CISCPREDST_HEIGHT(sc->pre_dst_height);
- writel(cfg, dev->regs + S5P_CISCPREDST);
+ cfg = (sc->pre_dst_width << 16) | sc->pre_dst_height;
+ writel(cfg, dev->regs + FIMC_REG_CISCPREDST);
}
static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
@@ -263,93 +252,95 @@ static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
struct fimc_frame *src_frame = &ctx->s_frame;
struct fimc_frame *dst_frame = &ctx->d_frame;
- u32 cfg = readl(dev->regs + S5P_CISCCTRL);
+ u32 cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
- cfg &= ~(S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE |
- S5P_CISCCTRL_SCALEUP_H | S5P_CISCCTRL_SCALEUP_V |
- S5P_CISCCTRL_SCALERBYPASS | S5P_CISCCTRL_ONE2ONE |
- S5P_CISCCTRL_INRGB_FMT_MASK | S5P_CISCCTRL_OUTRGB_FMT_MASK |
- S5P_CISCCTRL_INTERLACE | S5P_CISCCTRL_RGB_EXT);
+ cfg &= ~(FIMC_REG_CISCCTRL_CSCR2Y_WIDE | FIMC_REG_CISCCTRL_CSCY2R_WIDE |
+ FIMC_REG_CISCCTRL_SCALEUP_H | FIMC_REG_CISCCTRL_SCALEUP_V |
+ FIMC_REG_CISCCTRL_SCALERBYPASS | FIMC_REG_CISCCTRL_ONE2ONE |
+ FIMC_REG_CISCCTRL_INRGB_FMT_MASK | FIMC_REG_CISCCTRL_OUTRGB_FMT_MASK |
+ FIMC_REG_CISCCTRL_INTERLACE | FIMC_REG_CISCCTRL_RGB_EXT);
if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
- cfg |= (S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE);
+ cfg |= (FIMC_REG_CISCCTRL_CSCR2Y_WIDE |
+ FIMC_REG_CISCCTRL_CSCY2R_WIDE);
if (!sc->enabled)
- cfg |= S5P_CISCCTRL_SCALERBYPASS;
+ cfg |= FIMC_REG_CISCCTRL_SCALERBYPASS;
if (sc->scaleup_h)
- cfg |= S5P_CISCCTRL_SCALEUP_H;
+ cfg |= FIMC_REG_CISCCTRL_SCALEUP_H;
if (sc->scaleup_v)
- cfg |= S5P_CISCCTRL_SCALEUP_V;
+ cfg |= FIMC_REG_CISCCTRL_SCALEUP_V;
if (sc->copy_mode)
- cfg |= S5P_CISCCTRL_ONE2ONE;
+ cfg |= FIMC_REG_CISCCTRL_ONE2ONE;
- if (ctx->in_path == FIMC_DMA) {
+ if (ctx->in_path == FIMC_IO_DMA) {
switch (src_frame->fmt->color) {
- case S5P_FIMC_RGB565:
- cfg |= S5P_CISCCTRL_INRGB_FMT_RGB565;
+ case FIMC_FMT_RGB565:
+ cfg |= FIMC_REG_CISCCTRL_INRGB_FMT_RGB565;
break;
- case S5P_FIMC_RGB666:
- cfg |= S5P_CISCCTRL_INRGB_FMT_RGB666;
+ case FIMC_FMT_RGB666:
+ cfg |= FIMC_REG_CISCCTRL_INRGB_FMT_RGB666;
break;
- case S5P_FIMC_RGB888:
- cfg |= S5P_CISCCTRL_INRGB_FMT_RGB888;
+ case FIMC_FMT_RGB888:
+ cfg |= FIMC_REG_CISCCTRL_INRGB_FMT_RGB888;
break;
}
}
- if (ctx->out_path == FIMC_DMA) {
+ if (ctx->out_path == FIMC_IO_DMA) {
u32 color = dst_frame->fmt->color;
- if (color >= S5P_FIMC_RGB444 && color <= S5P_FIMC_RGB565)
- cfg |= S5P_CISCCTRL_OUTRGB_FMT_RGB565;
- else if (color == S5P_FIMC_RGB666)
- cfg |= S5P_CISCCTRL_OUTRGB_FMT_RGB666;
- else if (color == S5P_FIMC_RGB888)
- cfg |= S5P_CISCCTRL_OUTRGB_FMT_RGB888;
+ if (color >= FIMC_FMT_RGB444 && color <= FIMC_FMT_RGB565)
+ cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB565;
+ else if (color == FIMC_FMT_RGB666)
+ cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB666;
+ else if (color == FIMC_FMT_RGB888)
+ cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB888;
} else {
- cfg |= S5P_CISCCTRL_OUTRGB_FMT_RGB888;
+ cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB888;
if (ctx->flags & FIMC_SCAN_MODE_INTERLACED)
- cfg |= S5P_CISCCTRL_INTERLACE;
+ cfg |= FIMC_REG_CISCCTRL_INTERLACE;
}
- writel(cfg, dev->regs + S5P_CISCCTRL);
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
}
void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- struct samsung_fimc_variant *variant = dev->variant;
+ struct fimc_variant *variant = dev->variant;
struct fimc_scaler *sc = &ctx->scaler;
u32 cfg;
dbg("main_hratio= 0x%X main_vratio= 0x%X",
- sc->main_hratio, sc->main_vratio);
+ sc->main_hratio, sc->main_vratio);
fimc_hw_set_scaler(ctx);
- cfg = readl(dev->regs + S5P_CISCCTRL);
- cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
+ cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+ cfg &= ~(FIMC_REG_CISCCTRL_MHRATIO_MASK |
+ FIMC_REG_CISCCTRL_MVRATIO_MASK);
if (variant->has_mainscaler_ext) {
- cfg |= S5P_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
- cfg |= S5P_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
- writel(cfg, dev->regs + S5P_CISCCTRL);
+ cfg |= FIMC_REG_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
+ cfg |= FIMC_REG_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
- cfg = readl(dev->regs + S5P_CIEXTEN);
+ cfg = readl(dev->regs + FIMC_REG_CIEXTEN);
- cfg &= ~(S5P_CIEXTEN_MVRATIO_EXT_MASK |
- S5P_CIEXTEN_MHRATIO_EXT_MASK);
- cfg |= S5P_CIEXTEN_MHRATIO_EXT(sc->main_hratio);
- cfg |= S5P_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
- writel(cfg, dev->regs + S5P_CIEXTEN);
+ cfg &= ~(FIMC_REG_CIEXTEN_MVRATIO_EXT_MASK |
+ FIMC_REG_CIEXTEN_MHRATIO_EXT_MASK);
+ cfg |= FIMC_REG_CIEXTEN_MHRATIO_EXT(sc->main_hratio);
+ cfg |= FIMC_REG_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
+ writel(cfg, dev->regs + FIMC_REG_CIEXTEN);
} else {
- cfg |= S5P_CISCCTRL_MHRATIO(sc->main_hratio);
- cfg |= S5P_CISCCTRL_MVRATIO(sc->main_vratio);
- writel(cfg, dev->regs + S5P_CISCCTRL);
+ cfg |= FIMC_REG_CISCCTRL_MHRATIO(sc->main_hratio);
+ cfg |= FIMC_REG_CISCCTRL_MVRATIO(sc->main_vratio);
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
}
}
@@ -357,40 +348,41 @@ void fimc_hw_en_capture(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- u32 cfg = readl(dev->regs + S5P_CIIMGCPT);
+ u32 cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
- if (ctx->out_path == FIMC_DMA) {
+ if (ctx->out_path == FIMC_IO_DMA) {
/* one shot mode */
- cfg |= S5P_CIIMGCPT_CPT_FREN_ENABLE | S5P_CIIMGCPT_IMGCPTEN;
+ cfg |= FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE |
+ FIMC_REG_CIIMGCPT_IMGCPTEN;
} else {
/* Continuous frame capture mode (freerun). */
- cfg &= ~(S5P_CIIMGCPT_CPT_FREN_ENABLE |
- S5P_CIIMGCPT_CPT_FRMOD_CNT);
- cfg |= S5P_CIIMGCPT_IMGCPTEN;
+ cfg &= ~(FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE |
+ FIMC_REG_CIIMGCPT_CPT_FRMOD_CNT);
+ cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN;
}
if (ctx->scaler.enabled)
- cfg |= S5P_CIIMGCPT_IMGCPTEN_SC;
+ cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN_SC;
- writel(cfg | S5P_CIIMGCPT_IMGCPTEN, dev->regs + S5P_CIIMGCPT);
+ cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN;
+ writel(cfg, dev->regs + FIMC_REG_CIIMGCPT);
}
-void fimc_hw_set_effect(struct fimc_ctx *ctx, bool active)
+void fimc_hw_set_effect(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
struct fimc_effect *effect = &ctx->effect;
u32 cfg = 0;
- if (active) {
- cfg |= S5P_CIIMGEFF_IE_SC_AFTER | S5P_CIIMGEFF_IE_ENABLE;
+ if (effect->type != FIMC_REG_CIIMGEFF_FIN_BYPASS) {
+ cfg |= FIMC_REG_CIIMGEFF_IE_SC_AFTER |
+ FIMC_REG_CIIMGEFF_IE_ENABLE;
cfg |= effect->type;
- if (effect->type == S5P_FIMC_EFFECT_ARBITRARY) {
- cfg |= S5P_CIIMGEFF_PAT_CB(effect->pat_cb);
- cfg |= S5P_CIIMGEFF_PAT_CR(effect->pat_cr);
- }
+ if (effect->type == FIMC_REG_CIIMGEFF_FIN_ARBITRARY)
+ cfg |= (effect->pat_cb << 13) | effect->pat_cr;
}
- writel(cfg, dev->regs + S5P_CIIMGEFF);
+ writel(cfg, dev->regs + FIMC_REG_CIIMGEFF);
}
void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx)
@@ -402,10 +394,10 @@ void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx)
if (!(frame->fmt->flags & FMT_HAS_ALPHA))
return;
- cfg = readl(dev->regs + S5P_CIOCTRL);
- cfg &= ~S5P_CIOCTRL_ALPHA_OUT_MASK;
+ cfg = readl(dev->regs + FIMC_REG_CIOCTRL);
+ cfg &= ~FIMC_REG_CIOCTRL_ALPHA_OUT_MASK;
cfg |= (frame->alpha << 4);
- writel(cfg, dev->regs + S5P_CIOCTRL);
+ writel(cfg, dev->regs + FIMC_REG_CIOCTRL);
}
static void fimc_hw_set_in_dma_size(struct fimc_ctx *ctx)
@@ -415,16 +407,14 @@ static void fimc_hw_set_in_dma_size(struct fimc_ctx *ctx)
u32 cfg_o = 0;
u32 cfg_r = 0;
- if (FIMC_LCDFIFO == ctx->out_path)
- cfg_r |= S5P_CIREAL_ISIZE_AUTOLOAD_EN;
+ if (FIMC_IO_LCDFIFO == ctx->out_path)
+ cfg_r |= FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN;
- cfg_o |= S5P_ORIG_SIZE_HOR(frame->f_width);
- cfg_o |= S5P_ORIG_SIZE_VER(frame->f_height);
- cfg_r |= S5P_CIREAL_ISIZE_WIDTH(frame->width);
- cfg_r |= S5P_CIREAL_ISIZE_HEIGHT(frame->height);
+ cfg_o |= (frame->f_height << 16) | frame->f_width;
+ cfg_r |= (frame->height << 16) | frame->width;
- writel(cfg_o, dev->regs + S5P_ORGISIZE);
- writel(cfg_r, dev->regs + S5P_CIREAL_ISIZE);
+ writel(cfg_o, dev->regs + FIMC_REG_ORGISIZE);
+ writel(cfg_r, dev->regs + FIMC_REG_CIREAL_ISIZE);
}
void fimc_hw_set_in_dma(struct fimc_ctx *ctx)
@@ -435,80 +425,77 @@ void fimc_hw_set_in_dma(struct fimc_ctx *ctx)
u32 cfg;
/* Set the pixel offsets. */
- cfg = S5P_CIO_OFFS_HOR(offset->y_h);
- cfg |= S5P_CIO_OFFS_VER(offset->y_v);
- writel(cfg, dev->regs + S5P_CIIYOFF);
+ cfg = (offset->y_v << 16) | offset->y_h;
+ writel(cfg, dev->regs + FIMC_REG_CIIYOFF);
- cfg = S5P_CIO_OFFS_HOR(offset->cb_h);
- cfg |= S5P_CIO_OFFS_VER(offset->cb_v);
- writel(cfg, dev->regs + S5P_CIICBOFF);
+ cfg = (offset->cb_v << 16) | offset->cb_h;
+ writel(cfg, dev->regs + FIMC_REG_CIICBOFF);
- cfg = S5P_CIO_OFFS_HOR(offset->cr_h);
- cfg |= S5P_CIO_OFFS_VER(offset->cr_v);
- writel(cfg, dev->regs + S5P_CIICROFF);
+ cfg = (offset->cr_v << 16) | offset->cr_h;
+ writel(cfg, dev->regs + FIMC_REG_CIICROFF);
/* Input original and real size. */
fimc_hw_set_in_dma_size(ctx);
/* Use DMA autoload only in FIFO mode. */
- fimc_hw_en_autoload(dev, ctx->out_path == FIMC_LCDFIFO);
+ fimc_hw_en_autoload(dev, ctx->out_path == FIMC_IO_LCDFIFO);
/* Set the input DMA to process single frame only. */
- cfg = readl(dev->regs + S5P_MSCTRL);
- cfg &= ~(S5P_MSCTRL_INFORMAT_MASK
- | S5P_MSCTRL_IN_BURST_COUNT_MASK
- | S5P_MSCTRL_INPUT_MASK
- | S5P_MSCTRL_C_INT_IN_MASK
- | S5P_MSCTRL_2P_IN_ORDER_MASK);
+ cfg = readl(dev->regs + FIMC_REG_MSCTRL);
+ cfg &= ~(FIMC_REG_MSCTRL_INFORMAT_MASK
+ | FIMC_REG_MSCTRL_IN_BURST_COUNT_MASK
+ | FIMC_REG_MSCTRL_INPUT_MASK
+ | FIMC_REG_MSCTRL_C_INT_IN_MASK
+ | FIMC_REG_MSCTRL_2P_IN_ORDER_MASK);
- cfg |= (S5P_MSCTRL_IN_BURST_COUNT(4)
- | S5P_MSCTRL_INPUT_MEMORY
- | S5P_MSCTRL_FIFO_CTRL_FULL);
+ cfg |= (FIMC_REG_MSCTRL_IN_BURST_COUNT(4)
+ | FIMC_REG_MSCTRL_INPUT_MEMORY
+ | FIMC_REG_MSCTRL_FIFO_CTRL_FULL);
switch (frame->fmt->color) {
- case S5P_FIMC_RGB565...S5P_FIMC_RGB888:
- cfg |= S5P_MSCTRL_INFORMAT_RGB;
+ case FIMC_FMT_RGB565...FIMC_FMT_RGB888:
+ cfg |= FIMC_REG_MSCTRL_INFORMAT_RGB;
break;
- case S5P_FIMC_YCBCR420:
- cfg |= S5P_MSCTRL_INFORMAT_YCBCR420;
+ case FIMC_FMT_YCBCR420:
+ cfg |= FIMC_REG_MSCTRL_INFORMAT_YCBCR420;
if (frame->fmt->colplanes == 2)
- cfg |= ctx->in_order_2p | S5P_MSCTRL_C_INT_IN_2PLANE;
+ cfg |= ctx->in_order_2p | FIMC_REG_MSCTRL_C_INT_IN_2PLANE;
else
- cfg |= S5P_MSCTRL_C_INT_IN_3PLANE;
+ cfg |= FIMC_REG_MSCTRL_C_INT_IN_3PLANE;
break;
- case S5P_FIMC_YCBYCR422...S5P_FIMC_CRYCBY422:
+ case FIMC_FMT_YCBYCR422...FIMC_FMT_CRYCBY422:
if (frame->fmt->colplanes == 1) {
cfg |= ctx->in_order_1p
- | S5P_MSCTRL_INFORMAT_YCBCR422_1P;
+ | FIMC_REG_MSCTRL_INFORMAT_YCBCR422_1P;
} else {
- cfg |= S5P_MSCTRL_INFORMAT_YCBCR422;
+ cfg |= FIMC_REG_MSCTRL_INFORMAT_YCBCR422;
if (frame->fmt->colplanes == 2)
cfg |= ctx->in_order_2p
- | S5P_MSCTRL_C_INT_IN_2PLANE;
+ | FIMC_REG_MSCTRL_C_INT_IN_2PLANE;
else
- cfg |= S5P_MSCTRL_C_INT_IN_3PLANE;
+ cfg |= FIMC_REG_MSCTRL_C_INT_IN_3PLANE;
}
break;
default:
break;
}
- writel(cfg, dev->regs + S5P_MSCTRL);
+ writel(cfg, dev->regs + FIMC_REG_MSCTRL);
/* Input/output DMA linear/tiled mode. */
- cfg = readl(dev->regs + S5P_CIDMAPARAM);
- cfg &= ~S5P_CIDMAPARAM_TILE_MASK;
+ cfg = readl(dev->regs + FIMC_REG_CIDMAPARAM);
+ cfg &= ~FIMC_REG_CIDMAPARAM_TILE_MASK;
if (tiled_fmt(ctx->s_frame.fmt))
- cfg |= S5P_CIDMAPARAM_R_64X32;
+ cfg |= FIMC_REG_CIDMAPARAM_R_64X32;
if (tiled_fmt(ctx->d_frame.fmt))
- cfg |= S5P_CIDMAPARAM_W_64X32;
+ cfg |= FIMC_REG_CIDMAPARAM_W_64X32;
- writel(cfg, dev->regs + S5P_CIDMAPARAM);
+ writel(cfg, dev->regs + FIMC_REG_CIDMAPARAM);
}
@@ -516,40 +503,40 @@ void fimc_hw_set_input_path(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- u32 cfg = readl(dev->regs + S5P_MSCTRL);
- cfg &= ~S5P_MSCTRL_INPUT_MASK;
+ u32 cfg = readl(dev->regs + FIMC_REG_MSCTRL);
+ cfg &= ~FIMC_REG_MSCTRL_INPUT_MASK;
- if (ctx->in_path == FIMC_DMA)
- cfg |= S5P_MSCTRL_INPUT_MEMORY;
+ if (ctx->in_path == FIMC_IO_DMA)
+ cfg |= FIMC_REG_MSCTRL_INPUT_MEMORY;
else
- cfg |= S5P_MSCTRL_INPUT_EXTCAM;
+ cfg |= FIMC_REG_MSCTRL_INPUT_EXTCAM;
- writel(cfg, dev->regs + S5P_MSCTRL);
+ writel(cfg, dev->regs + FIMC_REG_MSCTRL);
}
void fimc_hw_set_output_path(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- u32 cfg = readl(dev->regs + S5P_CISCCTRL);
- cfg &= ~S5P_CISCCTRL_LCDPATHEN_FIFO;
- if (ctx->out_path == FIMC_LCDFIFO)
- cfg |= S5P_CISCCTRL_LCDPATHEN_FIFO;
- writel(cfg, dev->regs + S5P_CISCCTRL);
+ u32 cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+ cfg &= ~FIMC_REG_CISCCTRL_LCDPATHEN_FIFO;
+ if (ctx->out_path == FIMC_IO_LCDFIFO)
+ cfg |= FIMC_REG_CISCCTRL_LCDPATHEN_FIFO;
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
}
void fimc_hw_set_input_addr(struct fimc_dev *dev, struct fimc_addr *paddr)
{
- u32 cfg = readl(dev->regs + S5P_CIREAL_ISIZE);
- cfg |= S5P_CIREAL_ISIZE_ADDR_CH_DIS;
- writel(cfg, dev->regs + S5P_CIREAL_ISIZE);
+ u32 cfg = readl(dev->regs + FIMC_REG_CIREAL_ISIZE);
+ cfg |= FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS;
+ writel(cfg, dev->regs + FIMC_REG_CIREAL_ISIZE);
- writel(paddr->y, dev->regs + S5P_CIIYSA(0));
- writel(paddr->cb, dev->regs + S5P_CIICBSA(0));
- writel(paddr->cr, dev->regs + S5P_CIICRSA(0));
+ writel(paddr->y, dev->regs + FIMC_REG_CIIYSA(0));
+ writel(paddr->cb, dev->regs + FIMC_REG_CIICBSA(0));
+ writel(paddr->cr, dev->regs + FIMC_REG_CIICRSA(0));
- cfg &= ~S5P_CIREAL_ISIZE_ADDR_CH_DIS;
- writel(cfg, dev->regs + S5P_CIREAL_ISIZE);
+ cfg &= ~FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS;
+ writel(cfg, dev->regs + FIMC_REG_CIREAL_ISIZE);
}
void fimc_hw_set_output_addr(struct fimc_dev *dev,
@@ -557,9 +544,9 @@ void fimc_hw_set_output_addr(struct fimc_dev *dev,
{
int i = (index == -1) ? 0 : index;
do {
- writel(paddr->y, dev->regs + S5P_CIOYSA(i));
- writel(paddr->cb, dev->regs + S5P_CIOCBSA(i));
- writel(paddr->cr, dev->regs + S5P_CIOCRSA(i));
+ writel(paddr->y, dev->regs + FIMC_REG_CIOYSA(i));
+ writel(paddr->cb, dev->regs + FIMC_REG_CIOCBSA(i));
+ writel(paddr->cr, dev->regs + FIMC_REG_CIOCRSA(i));
dbg("dst_buf[%d]: 0x%X, cb: 0x%X, cr: 0x%X",
i, paddr->y, paddr->cb, paddr->cr);
} while (index == -1 && ++i < FIMC_MAX_OUT_BUFS);
@@ -568,32 +555,45 @@ void fimc_hw_set_output_addr(struct fimc_dev *dev,
int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
struct s5p_fimc_isp_info *cam)
{
- u32 cfg = readl(fimc->regs + S5P_CIGCTRL);
+ u32 cfg = readl(fimc->regs + FIMC_REG_CIGCTRL);
- cfg &= ~(S5P_CIGCTRL_INVPOLPCLK | S5P_CIGCTRL_INVPOLVSYNC |
- S5P_CIGCTRL_INVPOLHREF | S5P_CIGCTRL_INVPOLHSYNC |
- S5P_CIGCTRL_INVPOLFIELD);
+ cfg &= ~(FIMC_REG_CIGCTRL_INVPOLPCLK | FIMC_REG_CIGCTRL_INVPOLVSYNC |
+ FIMC_REG_CIGCTRL_INVPOLHREF | FIMC_REG_CIGCTRL_INVPOLHSYNC |
+ FIMC_REG_CIGCTRL_INVPOLFIELD);
if (cam->flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
- cfg |= S5P_CIGCTRL_INVPOLPCLK;
+ cfg |= FIMC_REG_CIGCTRL_INVPOLPCLK;
if (cam->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
- cfg |= S5P_CIGCTRL_INVPOLVSYNC;
+ cfg |= FIMC_REG_CIGCTRL_INVPOLVSYNC;
if (cam->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
- cfg |= S5P_CIGCTRL_INVPOLHREF;
+ cfg |= FIMC_REG_CIGCTRL_INVPOLHREF;
if (cam->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
- cfg |= S5P_CIGCTRL_INVPOLHSYNC;
+ cfg |= FIMC_REG_CIGCTRL_INVPOLHSYNC;
if (cam->flags & V4L2_MBUS_FIELD_EVEN_LOW)
- cfg |= S5P_CIGCTRL_INVPOLFIELD;
+ cfg |= FIMC_REG_CIGCTRL_INVPOLFIELD;
- writel(cfg, fimc->regs + S5P_CIGCTRL);
+ writel(cfg, fimc->regs + FIMC_REG_CIGCTRL);
return 0;
}
+struct mbus_pixfmt_desc {
+ u32 pixelcode;
+ u32 cisrcfmt;
+ u16 bus_width;
+};
+
+static const struct mbus_pixfmt_desc pix_desc[] = {
+ { V4L2_MBUS_FMT_YUYV8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCBYCR, 8 },
+ { V4L2_MBUS_FMT_YVYU8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCRYCB, 8 },
+ { V4L2_MBUS_FMT_VYUY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CRYCBY, 8 },
+ { V4L2_MBUS_FMT_UYVY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CBYCRY, 8 },
+};
+
int fimc_hw_set_camera_source(struct fimc_dev *fimc,
struct s5p_fimc_isp_info *cam)
{
@@ -602,18 +602,6 @@ int fimc_hw_set_camera_source(struct fimc_dev *fimc,
u32 bus_width;
int i;
- static const struct {
- u32 pixelcode;
- u32 cisrcfmt;
- u16 bus_width;
- } pix_desc[] = {
- { V4L2_MBUS_FMT_YUYV8_2X8, S5P_CISRCFMT_ORDER422_YCBYCR, 8 },
- { V4L2_MBUS_FMT_YVYU8_2X8, S5P_CISRCFMT_ORDER422_YCRYCB, 8 },
- { V4L2_MBUS_FMT_VYUY8_2X8, S5P_CISRCFMT_ORDER422_CRYCBY, 8 },
- { V4L2_MBUS_FMT_UYVY8_2X8, S5P_CISRCFMT_ORDER422_CBYCRY, 8 },
- /* TODO: Add pixel codes for 16-bit bus width */
- };
-
if (cam->bus_type == FIMC_ITU_601 || cam->bus_type == FIMC_ITU_656) {
for (i = 0; i < ARRAY_SIZE(pix_desc); i++) {
if (fimc->vid_cap.mf.code == pix_desc[i].pixelcode) {
@@ -632,41 +620,37 @@ int fimc_hw_set_camera_source(struct fimc_dev *fimc,
if (cam->bus_type == FIMC_ITU_601) {
if (bus_width == 8)
- cfg |= S5P_CISRCFMT_ITU601_8BIT;
+ cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
else if (bus_width == 16)
- cfg |= S5P_CISRCFMT_ITU601_16BIT;
+ cfg |= FIMC_REG_CISRCFMT_ITU601_16BIT;
} /* else defaults to ITU-R BT.656 8-bit */
} else if (cam->bus_type == FIMC_MIPI_CSI2) {
if (fimc_fmt_is_jpeg(f->fmt->color))
- cfg |= S5P_CISRCFMT_ITU601_8BIT;
+ cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
}
- cfg |= S5P_CISRCFMT_HSIZE(f->o_width) | S5P_CISRCFMT_VSIZE(f->o_height);
- writel(cfg, fimc->regs + S5P_CISRCFMT);
+ cfg |= (f->o_width << 16) | f->o_height;
+ writel(cfg, fimc->regs + FIMC_REG_CISRCFMT);
return 0;
}
-
-int fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f)
+void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f)
{
u32 hoff2, voff2;
- u32 cfg = readl(fimc->regs + S5P_CIWDOFST);
+ u32 cfg = readl(fimc->regs + FIMC_REG_CIWDOFST);
- cfg &= ~(S5P_CIWDOFST_HOROFF_MASK | S5P_CIWDOFST_VEROFF_MASK);
- cfg |= S5P_CIWDOFST_OFF_EN |
- S5P_CIWDOFST_HOROFF(f->offs_h) |
- S5P_CIWDOFST_VEROFF(f->offs_v);
+ cfg &= ~(FIMC_REG_CIWDOFST_HOROFF_MASK | FIMC_REG_CIWDOFST_VEROFF_MASK);
+ cfg |= FIMC_REG_CIWDOFST_OFF_EN |
+ (f->offs_h << 16) | f->offs_v;
- writel(cfg, fimc->regs + S5P_CIWDOFST);
+ writel(cfg, fimc->regs + FIMC_REG_CIWDOFST);
/* See CIWDOFSTn register description in the datasheet for details. */
hoff2 = f->o_width - f->width - f->offs_h;
voff2 = f->o_height - f->height - f->offs_v;
- cfg = S5P_CIWDOFST2_HOROFF(hoff2) | S5P_CIWDOFST2_VEROFF(voff2);
-
- writel(cfg, fimc->regs + S5P_CIWDOFST2);
- return 0;
+ cfg = (hoff2 << 16) | voff2;
+ writel(cfg, fimc->regs + FIMC_REG_CIWDOFST2);
}
int fimc_hw_set_camera_type(struct fimc_dev *fimc,
@@ -674,28 +658,29 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
{
u32 cfg, tmp;
struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ u32 csis_data_alignment = 32;
- cfg = readl(fimc->regs + S5P_CIGCTRL);
+ cfg = readl(fimc->regs + FIMC_REG_CIGCTRL);
/* Select ITU B interface, disable Writeback path and test pattern. */
- cfg &= ~(S5P_CIGCTRL_TESTPAT_MASK | S5P_CIGCTRL_SELCAM_ITU_A |
- S5P_CIGCTRL_SELCAM_MIPI | S5P_CIGCTRL_CAMIF_SELWB |
- S5P_CIGCTRL_SELCAM_MIPI_A | S5P_CIGCTRL_CAM_JPEG);
+ cfg &= ~(FIMC_REG_CIGCTRL_TESTPAT_MASK | FIMC_REG_CIGCTRL_SELCAM_ITU_A |
+ FIMC_REG_CIGCTRL_SELCAM_MIPI | FIMC_REG_CIGCTRL_CAMIF_SELWB |
+ FIMC_REG_CIGCTRL_SELCAM_MIPI_A | FIMC_REG_CIGCTRL_CAM_JPEG);
if (cam->bus_type == FIMC_MIPI_CSI2) {
- cfg |= S5P_CIGCTRL_SELCAM_MIPI;
+ cfg |= FIMC_REG_CIGCTRL_SELCAM_MIPI;
if (cam->mux_id == 0)
- cfg |= S5P_CIGCTRL_SELCAM_MIPI_A;
+ cfg |= FIMC_REG_CIGCTRL_SELCAM_MIPI_A;
/* TODO: add remaining supported formats. */
switch (vid_cap->mf.code) {
case V4L2_MBUS_FMT_VYUY8_2X8:
- tmp = S5P_CSIIMGFMT_YCBCR422_8BIT;
+ tmp = FIMC_REG_CSIIMGFMT_YCBCR422_8BIT;
break;
case V4L2_MBUS_FMT_JPEG_1X8:
- tmp = S5P_CSIIMGFMT_USER(1);
- cfg |= S5P_CIGCTRL_CAM_JPEG;
+ tmp = FIMC_REG_CSIIMGFMT_USER(1);
+ cfg |= FIMC_REG_CIGCTRL_CAM_JPEG;
break;
default:
v4l2_err(fimc->vid_cap.vfd,
@@ -703,21 +688,86 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
vid_cap->mf.code);
return -EINVAL;
}
- tmp |= (cam->csi_data_align == 32) << 8;
+ tmp |= (csis_data_alignment == 32) << 8;
- writel(tmp, fimc->regs + S5P_CSIIMGFMT);
+ writel(tmp, fimc->regs + FIMC_REG_CSIIMGFMT);
} else if (cam->bus_type == FIMC_ITU_601 ||
cam->bus_type == FIMC_ITU_656) {
if (cam->mux_id == 0) /* ITU-A, ITU-B: 0, 1 */
- cfg |= S5P_CIGCTRL_SELCAM_ITU_A;
+ cfg |= FIMC_REG_CIGCTRL_SELCAM_ITU_A;
} else if (cam->bus_type == FIMC_LCD_WB) {
- cfg |= S5P_CIGCTRL_CAMIF_SELWB;
+ cfg |= FIMC_REG_CIGCTRL_CAMIF_SELWB;
} else {
err("invalid camera bus type selected\n");
return -EINVAL;
}
- writel(cfg, fimc->regs + S5P_CIGCTRL);
+ writel(cfg, fimc->regs + FIMC_REG_CIGCTRL);
return 0;
}
+
+void fimc_hw_clear_irq(struct fimc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+ cfg |= FIMC_REG_CIGCTRL_IRQ_CLR;
+ writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
+}
+
+void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+ if (on)
+ cfg |= FIMC_REG_CISCCTRL_SCALERSTART;
+ else
+ cfg &= ~FIMC_REG_CISCCTRL_SCALERSTART;
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+}
+
+void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_MSCTRL);
+ if (on)
+ cfg |= FIMC_REG_MSCTRL_ENVID;
+ else
+ cfg &= ~FIMC_REG_MSCTRL_ENVID;
+ writel(cfg, dev->regs + FIMC_REG_MSCTRL);
+}
+
+void fimc_hw_dis_capture(struct fimc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
+ cfg &= ~(FIMC_REG_CIIMGCPT_IMGCPTEN | FIMC_REG_CIIMGCPT_IMGCPTEN_SC);
+ writel(cfg, dev->regs + FIMC_REG_CIIMGCPT);
+}
+
+/* Return an index to the buffer actually being written. */
+u32 fimc_hw_get_frame_index(struct fimc_dev *dev)
+{
+ u32 reg;
+
+ if (dev->variant->has_cistatus2) {
+ reg = readl(dev->regs + FIMC_REG_CISTATUS2) & 0x3F;
+ return reg > 0 ? --reg : reg;
+ }
+
+ reg = readl(dev->regs + FIMC_REG_CISTATUS);
+
+ return (reg & FIMC_REG_CISTATUS_FRAMECNT_MASK) >>
+ FIMC_REG_CISTATUS_FRAMECNT_SHIFT;
+}
+
+/* Locking: the caller holds fimc->slock */
+void fimc_activate_capture(struct fimc_ctx *ctx)
+{
+ fimc_hw_enable_scaler(ctx->fimc_dev, ctx->scaler.enabled);
+ fimc_hw_en_capture(ctx);
+}
+
+void fimc_deactivate_capture(struct fimc_dev *fimc)
+{
+ fimc_hw_en_lastirq(fimc, true);
+ fimc_hw_dis_capture(fimc);
+ fimc_hw_enable_scaler(fimc, false);
+ fimc_hw_en_lastirq(fimc, false);
+}
diff --git a/drivers/media/video/s5p-fimc/fimc-reg.h b/drivers/media/video/s5p-fimc/fimc-reg.h
new file mode 100644
index 000000000000..579ac8ac03de
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-reg.h
@@ -0,0 +1,326 @@
+/*
+ * Samsung camera host interface (FIMC) registers definition
+ *
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_REG_H_
+#define FIMC_REG_H_
+
+#include "fimc-core.h"
+
+/* Input source format */
+#define FIMC_REG_CISRCFMT 0x00
+#define FIMC_REG_CISRCFMT_ITU601_8BIT (1 << 31)
+#define FIMC_REG_CISRCFMT_ITU601_16BIT (1 << 29)
+#define FIMC_REG_CISRCFMT_ORDER422_YCBYCR (0 << 14)
+#define FIMC_REG_CISRCFMT_ORDER422_YCRYCB (1 << 14)
+#define FIMC_REG_CISRCFMT_ORDER422_CBYCRY (2 << 14)
+#define FIMC_REG_CISRCFMT_ORDER422_CRYCBY (3 << 14)
+
+/* Window offset */
+#define FIMC_REG_CIWDOFST 0x04
+#define FIMC_REG_CIWDOFST_OFF_EN (1 << 31)
+#define FIMC_REG_CIWDOFST_CLROVFIY (1 << 30)
+#define FIMC_REG_CIWDOFST_CLROVRLB (1 << 29)
+#define FIMC_REG_CIWDOFST_HOROFF_MASK (0x7ff << 16)
+#define FIMC_REG_CIWDOFST_CLROVFICB (1 << 15)
+#define FIMC_REG_CIWDOFST_CLROVFICR (1 << 14)
+#define FIMC_REG_CIWDOFST_VEROFF_MASK (0xfff << 0)
+
+/* Global control */
+#define FIMC_REG_CIGCTRL 0x08
+#define FIMC_REG_CIGCTRL_SWRST (1 << 31)
+#define FIMC_REG_CIGCTRL_CAMRST_A (1 << 30)
+#define FIMC_REG_CIGCTRL_SELCAM_ITU_A (1 << 29)
+#define FIMC_REG_CIGCTRL_TESTPAT_NORMAL (0 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_COLOR_BAR (1 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_HOR_INC (2 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_VER_INC (3 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_MASK (3 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_SHIFT 27
+#define FIMC_REG_CIGCTRL_INVPOLPCLK (1 << 26)
+#define FIMC_REG_CIGCTRL_INVPOLVSYNC (1 << 25)
+#define FIMC_REG_CIGCTRL_INVPOLHREF (1 << 24)
+#define FIMC_REG_CIGCTRL_IRQ_OVFEN (1 << 22)
+#define FIMC_REG_CIGCTRL_HREF_MASK (1 << 21)
+#define FIMC_REG_CIGCTRL_IRQ_LEVEL (1 << 20)
+#define FIMC_REG_CIGCTRL_IRQ_CLR (1 << 19)
+#define FIMC_REG_CIGCTRL_IRQ_ENABLE (1 << 16)
+#define FIMC_REG_CIGCTRL_SHDW_DISABLE (1 << 12)
+#define FIMC_REG_CIGCTRL_CAM_JPEG (1 << 8)
+#define FIMC_REG_CIGCTRL_SELCAM_MIPI_A (1 << 7)
+#define FIMC_REG_CIGCTRL_CAMIF_SELWB (1 << 6)
+/* 0 - ITU601; 1 - ITU709 */
+#define FIMC_REG_CIGCTRL_CSC_ITU601_709 (1 << 5)
+#define FIMC_REG_CIGCTRL_INVPOLHSYNC (1 << 4)
+#define FIMC_REG_CIGCTRL_SELCAM_MIPI (1 << 3)
+#define FIMC_REG_CIGCTRL_INVPOLFIELD (1 << 1)
+#define FIMC_REG_CIGCTRL_INTERLACE (1 << 0)
+
+/* Window offset 2 */
+#define FIMC_REG_CIWDOFST2 0x14
+#define FIMC_REG_CIWDOFST2_HOROFF_MASK (0xfff << 16)
+#define FIMC_REG_CIWDOFST2_VEROFF_MASK (0xfff << 0)
+
+/* Output DMA Y/Cb/Cr plane start addresses */
+#define FIMC_REG_CIOYSA(n) (0x18 + (n) * 4)
+#define FIMC_REG_CIOCBSA(n) (0x28 + (n) * 4)
+#define FIMC_REG_CIOCRSA(n) (0x38 + (n) * 4)
+
+/* Target image format */
+#define FIMC_REG_CITRGFMT 0x48
+#define FIMC_REG_CITRGFMT_INROT90 (1 << 31)
+#define FIMC_REG_CITRGFMT_YCBCR420 (0 << 29)
+#define FIMC_REG_CITRGFMT_YCBCR422 (1 << 29)
+#define FIMC_REG_CITRGFMT_YCBCR422_1P (2 << 29)
+#define FIMC_REG_CITRGFMT_RGB (3 << 29)
+#define FIMC_REG_CITRGFMT_FMT_MASK (3 << 29)
+#define FIMC_REG_CITRGFMT_HSIZE_MASK (0xfff << 16)
+#define FIMC_REG_CITRGFMT_FLIP_SHIFT 14
+#define FIMC_REG_CITRGFMT_FLIP_NORMAL (0 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_X_MIRROR (1 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_180 (3 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_MASK (3 << 14)
+#define FIMC_REG_CITRGFMT_OUTROT90 (1 << 13)
+#define FIMC_REG_CITRGFMT_VSIZE_MASK (0xfff << 0)
+
+/* Output DMA control */
+#define FIMC_REG_CIOCTRL 0x4c
+#define FIMC_REG_CIOCTRL_ORDER422_MASK (3 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_CRYCBY (0 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_CBYCRY (1 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_YCRYCB (2 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_YCBYCR (3 << 0)
+#define FIMC_REG_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
+#define FIMC_REG_CIOCTRL_YCBCR_3PLANE (0 << 3)
+#define FIMC_REG_CIOCTRL_YCBCR_2PLANE (1 << 3)
+#define FIMC_REG_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
+#define FIMC_REG_CIOCTRL_ALPHA_OUT_MASK (0xff << 4)
+#define FIMC_REG_CIOCTRL_RGB16FMT_MASK (3 << 16)
+#define FIMC_REG_CIOCTRL_RGB565 (0 << 16)
+#define FIMC_REG_CIOCTRL_ARGB1555 (1 << 16)
+#define FIMC_REG_CIOCTRL_ARGB4444 (2 << 16)
+#define FIMC_REG_CIOCTRL_ORDER2P_SHIFT 24
+#define FIMC_REG_CIOCTRL_ORDER2P_MASK (3 << 24)
+#define FIMC_REG_CIOCTRL_ORDER422_2P_LSB_CRCB (0 << 24)
+
+/* Pre-scaler control 1 */
+#define FIMC_REG_CISCPRERATIO 0x50
+
+#define FIMC_REG_CISCPREDST 0x54
+
+/* Main scaler control */
+#define FIMC_REG_CISCCTRL 0x58
+#define FIMC_REG_CISCCTRL_SCALERBYPASS (1 << 31)
+#define FIMC_REG_CISCCTRL_SCALEUP_H (1 << 30)
+#define FIMC_REG_CISCCTRL_SCALEUP_V (1 << 29)
+#define FIMC_REG_CISCCTRL_CSCR2Y_WIDE (1 << 28)
+#define FIMC_REG_CISCCTRL_CSCY2R_WIDE (1 << 27)
+#define FIMC_REG_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
+#define FIMC_REG_CISCCTRL_INTERLACE (1 << 25)
+#define FIMC_REG_CISCCTRL_SCALERSTART (1 << 15)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_MASK (3 << 13)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_MASK (3 << 11)
+#define FIMC_REG_CISCCTRL_RGB_EXT (1 << 10)
+#define FIMC_REG_CISCCTRL_ONE2ONE (1 << 9)
+#define FIMC_REG_CISCCTRL_MHRATIO(x) ((x) << 16)
+#define FIMC_REG_CISCCTRL_MVRATIO(x) ((x) << 0)
+#define FIMC_REG_CISCCTRL_MHRATIO_MASK (0x1ff << 16)
+#define FIMC_REG_CISCCTRL_MVRATIO_MASK (0x1ff << 0)
+#define FIMC_REG_CISCCTRL_MHRATIO_EXT(x) (((x) >> 6) << 16)
+#define FIMC_REG_CISCCTRL_MVRATIO_EXT(x) (((x) >> 6) << 0)
+
+/* Target area */
+#define FIMC_REG_CITAREA 0x5c
+#define FIMC_REG_CITAREA_MASK 0x0fffffff
+
+/* General status */
+#define FIMC_REG_CISTATUS 0x64
+#define FIMC_REG_CISTATUS_OVFIY (1 << 31)
+#define FIMC_REG_CISTATUS_OVFICB (1 << 30)
+#define FIMC_REG_CISTATUS_OVFICR (1 << 29)
+#define FIMC_REG_CISTATUS_VSYNC (1 << 28)
+#define FIMC_REG_CISTATUS_FRAMECNT_MASK (3 << 26)
+#define FIMC_REG_CISTATUS_FRAMECNT_SHIFT 26
+#define FIMC_REG_CISTATUS_WINOFF_EN (1 << 25)
+#define FIMC_REG_CISTATUS_IMGCPT_EN (1 << 22)
+#define FIMC_REG_CISTATUS_IMGCPT_SCEN (1 << 21)
+#define FIMC_REG_CISTATUS_VSYNC_A (1 << 20)
+#define FIMC_REG_CISTATUS_VSYNC_B (1 << 19)
+#define FIMC_REG_CISTATUS_OVRLB (1 << 18)
+#define FIMC_REG_CISTATUS_FRAME_END (1 << 17)
+#define FIMC_REG_CISTATUS_LASTCAPT_END (1 << 16)
+#define FIMC_REG_CISTATUS_VVALID_A (1 << 15)
+#define FIMC_REG_CISTATUS_VVALID_B (1 << 14)
+
+/* Indexes to the last and the currently processed buffer. */
+#define FIMC_REG_CISTATUS2 0x68
+
+/* Image capture control */
+#define FIMC_REG_CIIMGCPT 0xc0
+#define FIMC_REG_CIIMGCPT_IMGCPTEN (1 << 31)
+#define FIMC_REG_CIIMGCPT_IMGCPTEN_SC (1 << 30)
+#define FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
+#define FIMC_REG_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+
+/* Frame capture sequence */
+#define FIMC_REG_CICPTSEQ 0xc4
+
+/* Image effect */
+#define FIMC_REG_CIIMGEFF 0xd0
+#define FIMC_REG_CIIMGEFF_IE_ENABLE (1 << 30)
+#define FIMC_REG_CIIMGEFF_IE_SC_BEFORE (0 << 29)
+#define FIMC_REG_CIIMGEFF_IE_SC_AFTER (1 << 29)
+#define FIMC_REG_CIIMGEFF_FIN_BYPASS (0 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_ARBITRARY (1 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_NEGATIVE (2 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_EMBOSSING (4 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_MASK (7 << 26)
+#define FIMC_REG_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | 0xff)
+
+/* Input DMA Y/Cb/Cr plane start address 0/1 */
+#define FIMC_REG_CIIYSA(n) (0xd4 + (n) * 0x70)
+#define FIMC_REG_CIICBSA(n) (0xd8 + (n) * 0x70)
+#define FIMC_REG_CIICRSA(n) (0xdc + (n) * 0x70)
+
+/* Real input DMA image size */
+#define FIMC_REG_CIREAL_ISIZE 0xf8
+#define FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN (1 << 31)
+#define FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS (1 << 30)
+
+/* Input DMA control */
+#define FIMC_REG_MSCTRL 0xfc
+#define FIMC_REG_MSCTRL_IN_BURST_COUNT_MASK (0xf << 24)
+#define FIMC_REG_MSCTRL_2P_IN_ORDER_MASK (3 << 16)
+#define FIMC_REG_MSCTRL_2P_IN_ORDER_SHIFT 16
+#define FIMC_REG_MSCTRL_C_INT_IN_3PLANE (0 << 15)
+#define FIMC_REG_MSCTRL_C_INT_IN_2PLANE (1 << 15)
+#define FIMC_REG_MSCTRL_C_INT_IN_MASK (1 << 15)
+#define FIMC_REG_MSCTRL_FLIP_SHIFT 13
+#define FIMC_REG_MSCTRL_FLIP_MASK (3 << 13)
+#define FIMC_REG_MSCTRL_FLIP_NORMAL (0 << 13)
+#define FIMC_REG_MSCTRL_FLIP_X_MIRROR (1 << 13)
+#define FIMC_REG_MSCTRL_FLIP_Y_MIRROR (2 << 13)
+#define FIMC_REG_MSCTRL_FLIP_180 (3 << 13)
+#define FIMC_REG_MSCTRL_FIFO_CTRL_FULL (1 << 12)
+#define FIMC_REG_MSCTRL_ORDER422_SHIFT 4
+#define FIMC_REG_MSCTRL_ORDER422_YCBYCR (0 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_CBYCRY (1 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_YCRYCB (2 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_CRYCBY (3 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_MASK (3 << 4)
+#define FIMC_REG_MSCTRL_INPUT_EXTCAM (0 << 3)
+#define FIMC_REG_MSCTRL_INPUT_MEMORY (1 << 3)
+#define FIMC_REG_MSCTRL_INPUT_MASK (1 << 3)
+#define FIMC_REG_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_YCBCR422_1P (2 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_RGB (3 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_MASK (3 << 1)
+#define FIMC_REG_MSCTRL_ENVID (1 << 0)
+#define FIMC_REG_MSCTRL_IN_BURST_COUNT(x) ((x) << 24)
+
+/* Output DMA Y/Cb/Cr offset */
+#define FIMC_REG_CIOYOFF 0x168
+#define FIMC_REG_CIOCBOFF 0x16c
+#define FIMC_REG_CIOCROFF 0x170
+
+/* Input DMA Y/Cb/Cr offset */
+#define FIMC_REG_CIIYOFF 0x174
+#define FIMC_REG_CIICBOFF 0x178
+#define FIMC_REG_CIICROFF 0x17c
+
+/* Input DMA original image size */
+#define FIMC_REG_ORGISIZE 0x180
+
+/* Output DMA original image size */
+#define FIMC_REG_ORGOSIZE 0x184
+
+/* Real output DMA image size (extension register) */
+#define FIMC_REG_CIEXTEN 0x188
+#define FIMC_REG_CIEXTEN_MHRATIO_EXT(x) (((x) & 0x3f) << 10)
+#define FIMC_REG_CIEXTEN_MVRATIO_EXT(x) ((x) & 0x3f)
+#define FIMC_REG_CIEXTEN_MHRATIO_EXT_MASK (0x3f << 10)
+#define FIMC_REG_CIEXTEN_MVRATIO_EXT_MASK 0x3f
+
+#define FIMC_REG_CIDMAPARAM 0x18c
+#define FIMC_REG_CIDMAPARAM_R_LINEAR (0 << 29)
+#define FIMC_REG_CIDMAPARAM_R_64X32 (3 << 29)
+#define FIMC_REG_CIDMAPARAM_W_LINEAR (0 << 13)
+#define FIMC_REG_CIDMAPARAM_W_64X32 (3 << 13)
+#define FIMC_REG_CIDMAPARAM_TILE_MASK ((3 << 29) | (3 << 13))
+
+/* MIPI CSI image format */
+#define FIMC_REG_CSIIMGFMT 0x194
+#define FIMC_REG_CSIIMGFMT_YCBCR422_8BIT 0x1e
+#define FIMC_REG_CSIIMGFMT_RAW8 0x2a
+#define FIMC_REG_CSIIMGFMT_RAW10 0x2b
+#define FIMC_REG_CSIIMGFMT_RAW12 0x2c
+/* User defined formats. x = 0...16. */
+#define FIMC_REG_CSIIMGFMT_USER(x) (0x30 + x - 1)
+
+/* Output frame buffer sequence mask */
+#define FIMC_REG_CIFCNTSEQ 0x1fc
+
+/*
+ * Function declarations
+ */
+void fimc_hw_reset(struct fimc_dev *fimc);
+void fimc_hw_set_rotation(struct fimc_ctx *ctx);
+void fimc_hw_set_target_format(struct fimc_ctx *ctx);
+void fimc_hw_set_out_dma(struct fimc_ctx *ctx);
+void fimc_hw_en_lastirq(struct fimc_dev *fimc, int enable);
+void fimc_hw_en_irq(struct fimc_dev *fimc, int enable);
+void fimc_hw_set_prescaler(struct fimc_ctx *ctx);
+void fimc_hw_set_mainscaler(struct fimc_ctx *ctx);
+void fimc_hw_en_capture(struct fimc_ctx *ctx);
+void fimc_hw_set_effect(struct fimc_ctx *ctx);
+void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx);
+void fimc_hw_set_in_dma(struct fimc_ctx *ctx);
+void fimc_hw_set_input_path(struct fimc_ctx *ctx);
+void fimc_hw_set_output_path(struct fimc_ctx *ctx);
+void fimc_hw_set_input_addr(struct fimc_dev *fimc, struct fimc_addr *paddr);
+void fimc_hw_set_output_addr(struct fimc_dev *fimc, struct fimc_addr *paddr,
+ int index);
+int fimc_hw_set_camera_source(struct fimc_dev *fimc,
+ struct s5p_fimc_isp_info *cam);
+void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f);
+int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
+ struct s5p_fimc_isp_info *cam);
+int fimc_hw_set_camera_type(struct fimc_dev *fimc,
+ struct s5p_fimc_isp_info *cam);
+void fimc_hw_clear_irq(struct fimc_dev *dev);
+void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on);
+void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on);
+void fimc_hw_dis_capture(struct fimc_dev *dev);
+u32 fimc_hw_get_frame_index(struct fimc_dev *dev);
+void fimc_activate_capture(struct fimc_ctx *ctx);
+void fimc_deactivate_capture(struct fimc_dev *fimc);
+
+/**
+ * fimc_hw_set_dma_seq - configure output DMA buffer sequence
+ * @mask: bitmask for the DMA output buffer registers, set to 0 to skip buffer
+ * This function masks output DMA ring buffers, it allows to select which of
+ * the 32 available output buffer address registers will be used by the DMA
+ * engine.
+ */
+static inline void fimc_hw_set_dma_seq(struct fimc_dev *dev, u32 mask)
+{
+ writel(mask, dev->regs + FIMC_REG_CIFCNTSEQ);
+}
+
+#endif /* FIMC_REG_H_ */
diff --git a/drivers/media/video/s5p-fimc/mipi-csis.c b/drivers/media/video/s5p-fimc/mipi-csis.c
index f44f690397f7..2f73d9e3d0b7 100644
--- a/drivers/media/video/s5p-fimc/mipi-csis.c
+++ b/drivers/media/video/s5p-fimc/mipi-csis.c
@@ -127,20 +127,24 @@ struct csis_state {
* multiple of 2^pix_width_alignment
* @code: corresponding media bus code
* @fmt_reg: S5PCSIS_CONFIG register value
+ * @data_alignment: MIPI-CSI data alignment in bits
*/
struct csis_pix_format {
unsigned int pix_width_alignment;
enum v4l2_mbus_pixelcode code;
u32 fmt_reg;
+ u8 data_alignment;
};
static const struct csis_pix_format s5pcsis_formats[] = {
{
.code = V4L2_MBUS_FMT_VYUY8_2X8,
.fmt_reg = S5PCSIS_CFG_FMT_YCBCR422_8BIT,
+ .data_alignment = 32,
}, {
.code = V4L2_MBUS_FMT_JPEG_1X8,
.fmt_reg = S5PCSIS_CFG_FMT_USER(1),
+ .data_alignment = 32,
},
};
@@ -239,7 +243,7 @@ static void s5pcsis_set_params(struct csis_state *state)
s5pcsis_set_hsync_settle(state, pdata->hs_settle);
val = s5pcsis_read(state, S5PCSIS_CTRL);
- if (pdata->alignment == 32)
+ if (state->csis_fmt->data_alignment == 32)
val |= S5PCSIS_CTRL_ALIGN_32BIT;
else /* 24-bits */
val &= ~S5PCSIS_CTRL_ALIGN_32BIT;
@@ -711,19 +715,8 @@ static struct platform_driver s5pcsis_driver = {
},
};
-static int __init s5pcsis_init(void)
-{
- return platform_driver_probe(&s5pcsis_driver, s5pcsis_probe);
-}
-
-static void __exit s5pcsis_exit(void)
-{
- platform_driver_unregister(&s5pcsis_driver);
-}
-
-module_init(s5pcsis_init);
-module_exit(s5pcsis_exit);
+module_platform_driver(s5pcsis_driver);
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
-MODULE_DESCRIPTION("S5P/EXYNOS4 MIPI CSI receiver driver");
+MODULE_DESCRIPTION("Samsung S5P/EXYNOS SoC MIPI-CSI2 receiver driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/s5p-fimc/regs-fimc.h b/drivers/media/video/s5p-fimc/regs-fimc.h
deleted file mode 100644
index c7a5bc51d571..000000000000
--- a/drivers/media/video/s5p-fimc/regs-fimc.h
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Register definition file for Samsung Camera Interface (FIMC) driver
- *
- * Copyright (c) 2010 Samsung Electronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef REGS_FIMC_H_
-#define REGS_FIMC_H_
-
-/* Input source format */
-#define S5P_CISRCFMT 0x00
-#define S5P_CISRCFMT_ITU601_8BIT (1 << 31)
-#define S5P_CISRCFMT_ITU601_16BIT (1 << 29)
-#define S5P_CISRCFMT_ORDER422_YCBYCR (0 << 14)
-#define S5P_CISRCFMT_ORDER422_YCRYCB (1 << 14)
-#define S5P_CISRCFMT_ORDER422_CBYCRY (2 << 14)
-#define S5P_CISRCFMT_ORDER422_CRYCBY (3 << 14)
-#define S5P_CISRCFMT_HSIZE(x) ((x) << 16)
-#define S5P_CISRCFMT_VSIZE(x) ((x) << 0)
-
-/* Window offset */
-#define S5P_CIWDOFST 0x04
-#define S5P_CIWDOFST_OFF_EN (1 << 31)
-#define S5P_CIWDOFST_CLROVFIY (1 << 30)
-#define S5P_CIWDOFST_CLROVRLB (1 << 29)
-#define S5P_CIWDOFST_HOROFF_MASK (0x7ff << 16)
-#define S5P_CIWDOFST_CLROVFICB (1 << 15)
-#define S5P_CIWDOFST_CLROVFICR (1 << 14)
-#define S5P_CIWDOFST_HOROFF(x) ((x) << 16)
-#define S5P_CIWDOFST_VEROFF(x) ((x) << 0)
-#define S5P_CIWDOFST_VEROFF_MASK (0xfff << 0)
-
-/* Global control */
-#define S5P_CIGCTRL 0x08
-#define S5P_CIGCTRL_SWRST (1 << 31)
-#define S5P_CIGCTRL_CAMRST_A (1 << 30)
-#define S5P_CIGCTRL_SELCAM_ITU_A (1 << 29)
-#define S5P_CIGCTRL_TESTPAT_NORMAL (0 << 27)
-#define S5P_CIGCTRL_TESTPAT_COLOR_BAR (1 << 27)
-#define S5P_CIGCTRL_TESTPAT_HOR_INC (2 << 27)
-#define S5P_CIGCTRL_TESTPAT_VER_INC (3 << 27)
-#define S5P_CIGCTRL_TESTPAT_MASK (3 << 27)
-#define S5P_CIGCTRL_TESTPAT_SHIFT (27)
-#define S5P_CIGCTRL_INVPOLPCLK (1 << 26)
-#define S5P_CIGCTRL_INVPOLVSYNC (1 << 25)
-#define S5P_CIGCTRL_INVPOLHREF (1 << 24)
-#define S5P_CIGCTRL_IRQ_OVFEN (1 << 22)
-#define S5P_CIGCTRL_HREF_MASK (1 << 21)
-#define S5P_CIGCTRL_IRQ_LEVEL (1 << 20)
-#define S5P_CIGCTRL_IRQ_CLR (1 << 19)
-#define S5P_CIGCTRL_IRQ_ENABLE (1 << 16)
-#define S5P_CIGCTRL_SHDW_DISABLE (1 << 12)
-#define S5P_CIGCTRL_CAM_JPEG (1 << 8)
-#define S5P_CIGCTRL_SELCAM_MIPI_A (1 << 7)
-#define S5P_CIGCTRL_CAMIF_SELWB (1 << 6)
-/* 0 - ITU601; 1 - ITU709 */
-#define S5P_CIGCTRL_CSC_ITU601_709 (1 << 5)
-#define S5P_CIGCTRL_INVPOLHSYNC (1 << 4)
-#define S5P_CIGCTRL_SELCAM_MIPI (1 << 3)
-#define S5P_CIGCTRL_INVPOLFIELD (1 << 1)
-#define S5P_CIGCTRL_INTERLACE (1 << 0)
-
-/* Window offset 2 */
-#define S5P_CIWDOFST2 0x14
-#define S5P_CIWDOFST2_HOROFF_MASK (0xfff << 16)
-#define S5P_CIWDOFST2_VEROFF_MASK (0xfff << 0)
-#define S5P_CIWDOFST2_HOROFF(x) ((x) << 16)
-#define S5P_CIWDOFST2_VEROFF(x) ((x) << 0)
-
-/* Output DMA Y/Cb/Cr plane start addresses */
-#define S5P_CIOYSA(n) (0x18 + (n) * 4)
-#define S5P_CIOCBSA(n) (0x28 + (n) * 4)
-#define S5P_CIOCRSA(n) (0x38 + (n) * 4)
-
-/* Target image format */
-#define S5P_CITRGFMT 0x48
-#define S5P_CITRGFMT_INROT90 (1 << 31)
-#define S5P_CITRGFMT_YCBCR420 (0 << 29)
-#define S5P_CITRGFMT_YCBCR422 (1 << 29)
-#define S5P_CITRGFMT_YCBCR422_1P (2 << 29)
-#define S5P_CITRGFMT_RGB (3 << 29)
-#define S5P_CITRGFMT_FMT_MASK (3 << 29)
-#define S5P_CITRGFMT_HSIZE_MASK (0xfff << 16)
-#define S5P_CITRGFMT_FLIP_SHIFT (14)
-#define S5P_CITRGFMT_FLIP_NORMAL (0 << 14)
-#define S5P_CITRGFMT_FLIP_X_MIRROR (1 << 14)
-#define S5P_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
-#define S5P_CITRGFMT_FLIP_180 (3 << 14)
-#define S5P_CITRGFMT_FLIP_MASK (3 << 14)
-#define S5P_CITRGFMT_OUTROT90 (1 << 13)
-#define S5P_CITRGFMT_VSIZE_MASK (0xfff << 0)
-#define S5P_CITRGFMT_HSIZE(x) ((x) << 16)
-#define S5P_CITRGFMT_VSIZE(x) ((x) << 0)
-
-/* Output DMA control */
-#define S5P_CIOCTRL 0x4c
-#define S5P_CIOCTRL_ORDER422_MASK (3 << 0)
-#define S5P_CIOCTRL_ORDER422_CRYCBY (0 << 0)
-#define S5P_CIOCTRL_ORDER422_CBYCRY (1 << 0)
-#define S5P_CIOCTRL_ORDER422_YCRYCB (2 << 0)
-#define S5P_CIOCTRL_ORDER422_YCBYCR (3 << 0)
-#define S5P_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
-#define S5P_CIOCTRL_YCBCR_3PLANE (0 << 3)
-#define S5P_CIOCTRL_YCBCR_2PLANE (1 << 3)
-#define S5P_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
-#define S5P_CIOCTRL_ALPHA_OUT_MASK (0xff << 4)
-#define S5P_CIOCTRL_RGB16FMT_MASK (3 << 16)
-#define S5P_CIOCTRL_RGB565 (0 << 16)
-#define S5P_CIOCTRL_ARGB1555 (1 << 16)
-#define S5P_CIOCTRL_ARGB4444 (2 << 16)
-#define S5P_CIOCTRL_ORDER2P_SHIFT (24)
-#define S5P_CIOCTRL_ORDER2P_MASK (3 << 24)
-#define S5P_CIOCTRL_ORDER422_2P_LSB_CRCB (0 << 24)
-
-/* Pre-scaler control 1 */
-#define S5P_CISCPRERATIO 0x50
-#define S5P_CISCPRERATIO_SHFACTOR(x) ((x) << 28)
-#define S5P_CISCPRERATIO_HOR(x) ((x) << 16)
-#define S5P_CISCPRERATIO_VER(x) ((x) << 0)
-
-#define S5P_CISCPREDST 0x54
-#define S5P_CISCPREDST_WIDTH(x) ((x) << 16)
-#define S5P_CISCPREDST_HEIGHT(x) ((x) << 0)
-
-/* Main scaler control */
-#define S5P_CISCCTRL 0x58
-#define S5P_CISCCTRL_SCALERBYPASS (1 << 31)
-#define S5P_CISCCTRL_SCALEUP_H (1 << 30)
-#define S5P_CISCCTRL_SCALEUP_V (1 << 29)
-#define S5P_CISCCTRL_CSCR2Y_WIDE (1 << 28)
-#define S5P_CISCCTRL_CSCY2R_WIDE (1 << 27)
-#define S5P_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
-#define S5P_CISCCTRL_INTERLACE (1 << 25)
-#define S5P_CISCCTRL_SCALERSTART (1 << 15)
-#define S5P_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
-#define S5P_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
-#define S5P_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
-#define S5P_CISCCTRL_INRGB_FMT_MASK (3 << 13)
-#define S5P_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
-#define S5P_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
-#define S5P_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
-#define S5P_CISCCTRL_OUTRGB_FMT_MASK (3 << 11)
-#define S5P_CISCCTRL_RGB_EXT (1 << 10)
-#define S5P_CISCCTRL_ONE2ONE (1 << 9)
-#define S5P_CISCCTRL_MHRATIO(x) ((x) << 16)
-#define S5P_CISCCTRL_MVRATIO(x) ((x) << 0)
-#define S5P_CISCCTRL_MHRATIO_MASK (0x1ff << 16)
-#define S5P_CISCCTRL_MVRATIO_MASK (0x1ff << 0)
-#define S5P_CISCCTRL_MHRATIO_EXT(x) (((x) >> 6) << 16)
-#define S5P_CISCCTRL_MVRATIO_EXT(x) (((x) >> 6) << 0)
-
-/* Target area */
-#define S5P_CITAREA 0x5c
-#define S5P_CITAREA_MASK 0x0fffffff
-
-/* General status */
-#define S5P_CISTATUS 0x64
-#define S5P_CISTATUS_OVFIY (1 << 31)
-#define S5P_CISTATUS_OVFICB (1 << 30)
-#define S5P_CISTATUS_OVFICR (1 << 29)
-#define S5P_CISTATUS_VSYNC (1 << 28)
-#define S5P_CISTATUS_FRAMECNT_MASK (3 << 26)
-#define S5P_CISTATUS_FRAMECNT_SHIFT 26
-#define S5P_CISTATUS_WINOFF_EN (1 << 25)
-#define S5P_CISTATUS_IMGCPT_EN (1 << 22)
-#define S5P_CISTATUS_IMGCPT_SCEN (1 << 21)
-#define S5P_CISTATUS_VSYNC_A (1 << 20)
-#define S5P_CISTATUS_VSYNC_B (1 << 19)
-#define S5P_CISTATUS_OVRLB (1 << 18)
-#define S5P_CISTATUS_FRAME_END (1 << 17)
-#define S5P_CISTATUS_LASTCAPT_END (1 << 16)
-#define S5P_CISTATUS_VVALID_A (1 << 15)
-#define S5P_CISTATUS_VVALID_B (1 << 14)
-
-/* Indexes to the last and the currently processed buffer. */
-#define S5P_CISTATUS2 0x68
-
-/* Image capture control */
-#define S5P_CIIMGCPT 0xc0
-#define S5P_CIIMGCPT_IMGCPTEN (1 << 31)
-#define S5P_CIIMGCPT_IMGCPTEN_SC (1 << 30)
-#define S5P_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
-#define S5P_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
-
-/* Frame capture sequence */
-#define S5P_CICPTSEQ 0xc4
-
-/* Image effect */
-#define S5P_CIIMGEFF 0xd0
-#define S5P_CIIMGEFF_IE_ENABLE (1 << 30)
-#define S5P_CIIMGEFF_IE_SC_BEFORE (0 << 29)
-#define S5P_CIIMGEFF_IE_SC_AFTER (1 << 29)
-#define S5P_CIIMGEFF_FIN_BYPASS (0 << 26)
-#define S5P_CIIMGEFF_FIN_ARBITRARY (1 << 26)
-#define S5P_CIIMGEFF_FIN_NEGATIVE (2 << 26)
-#define S5P_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
-#define S5P_CIIMGEFF_FIN_EMBOSSING (4 << 26)
-#define S5P_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
-#define S5P_CIIMGEFF_FIN_MASK (7 << 26)
-#define S5P_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
-#define S5P_CIIMGEFF_PAT_CB(x) ((x) << 13)
-#define S5P_CIIMGEFF_PAT_CR(x) ((x) << 0)
-
-/* Input DMA Y/Cb/Cr plane start address 0/1 */
-#define S5P_CIIYSA(n) (0xd4 + (n) * 0x70)
-#define S5P_CIICBSA(n) (0xd8 + (n) * 0x70)
-#define S5P_CIICRSA(n) (0xdc + (n) * 0x70)
-
-/* Real input DMA image size */
-#define S5P_CIREAL_ISIZE 0xf8
-#define S5P_CIREAL_ISIZE_AUTOLOAD_EN (1 << 31)
-#define S5P_CIREAL_ISIZE_ADDR_CH_DIS (1 << 30)
-#define S5P_CIREAL_ISIZE_HEIGHT(x) ((x) << 16)
-#define S5P_CIREAL_ISIZE_WIDTH(x) ((x) << 0)
-
-
-/* Input DMA control */
-#define S5P_MSCTRL 0xfc
-#define S5P_MSCTRL_IN_BURST_COUNT_MASK (0xF << 24)
-#define S5P_MSCTRL_2P_IN_ORDER_MASK (3 << 16)
-#define S5P_MSCTRL_2P_IN_ORDER_SHIFT 16
-#define S5P_MSCTRL_C_INT_IN_3PLANE (0 << 15)
-#define S5P_MSCTRL_C_INT_IN_2PLANE (1 << 15)
-#define S5P_MSCTRL_C_INT_IN_MASK (1 << 15)
-#define S5P_MSCTRL_FLIP_SHIFT 13
-#define S5P_MSCTRL_FLIP_MASK (3 << 13)
-#define S5P_MSCTRL_FLIP_NORMAL (0 << 13)
-#define S5P_MSCTRL_FLIP_X_MIRROR (1 << 13)
-#define S5P_MSCTRL_FLIP_Y_MIRROR (2 << 13)
-#define S5P_MSCTRL_FLIP_180 (3 << 13)
-#define S5P_MSCTRL_FIFO_CTRL_FULL (1 << 12)
-#define S5P_MSCTRL_ORDER422_SHIFT 4
-#define S5P_MSCTRL_ORDER422_YCBYCR (0 << 4)
-#define S5P_MSCTRL_ORDER422_CBYCRY (1 << 4)
-#define S5P_MSCTRL_ORDER422_YCRYCB (2 << 4)
-#define S5P_MSCTRL_ORDER422_CRYCBY (3 << 4)
-#define S5P_MSCTRL_ORDER422_MASK (3 << 4)
-#define S5P_MSCTRL_INPUT_EXTCAM (0 << 3)
-#define S5P_MSCTRL_INPUT_MEMORY (1 << 3)
-#define S5P_MSCTRL_INPUT_MASK (1 << 3)
-#define S5P_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
-#define S5P_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
-#define S5P_MSCTRL_INFORMAT_YCBCR422_1P (2 << 1)
-#define S5P_MSCTRL_INFORMAT_RGB (3 << 1)
-#define S5P_MSCTRL_INFORMAT_MASK (3 << 1)
-#define S5P_MSCTRL_ENVID (1 << 0)
-#define S5P_MSCTRL_IN_BURST_COUNT(x) ((x) << 24)
-
-/* Output DMA Y/Cb/Cr offset */
-#define S5P_CIOYOFF 0x168
-#define S5P_CIOCBOFF 0x16c
-#define S5P_CIOCROFF 0x170
-
-/* Input DMA Y/Cb/Cr offset */
-#define S5P_CIIYOFF 0x174
-#define S5P_CIICBOFF 0x178
-#define S5P_CIICROFF 0x17c
-
-#define S5P_CIO_OFFS_VER(x) ((x) << 16)
-#define S5P_CIO_OFFS_HOR(x) ((x) << 0)
-
-/* Input DMA original image size */
-#define S5P_ORGISIZE 0x180
-
-/* Output DMA original image size */
-#define S5P_ORGOSIZE 0x184
-
-#define S5P_ORIG_SIZE_VER(x) ((x) << 16)
-#define S5P_ORIG_SIZE_HOR(x) ((x) << 0)
-
-/* Real output DMA image size (extension register) */
-#define S5P_CIEXTEN 0x188
-#define S5P_CIEXTEN_MHRATIO_EXT(x) (((x) & 0x3f) << 10)
-#define S5P_CIEXTEN_MVRATIO_EXT(x) ((x) & 0x3f)
-#define S5P_CIEXTEN_MHRATIO_EXT_MASK (0x3f << 10)
-#define S5P_CIEXTEN_MVRATIO_EXT_MASK 0x3f
-
-#define S5P_CIDMAPARAM 0x18c
-#define S5P_CIDMAPARAM_R_LINEAR (0 << 29)
-#define S5P_CIDMAPARAM_R_64X32 (3 << 29)
-#define S5P_CIDMAPARAM_W_LINEAR (0 << 13)
-#define S5P_CIDMAPARAM_W_64X32 (3 << 13)
-#define S5P_CIDMAPARAM_TILE_MASK ((3 << 29) | (3 << 13))
-
-/* MIPI CSI image format */
-#define S5P_CSIIMGFMT 0x194
-#define S5P_CSIIMGFMT_YCBCR422_8BIT 0x1e
-#define S5P_CSIIMGFMT_RAW8 0x2a
-#define S5P_CSIIMGFMT_RAW10 0x2b
-#define S5P_CSIIMGFMT_RAW12 0x2c
-/* User defined formats. x = 0...16. */
-#define S5P_CSIIMGFMT_USER(x) (0x30 + x - 1)
-
-/* Output frame buffer sequence mask */
-#define S5P_CIFCNTSEQ 0x1FC
-
-#endif /* REGS_FIMC_H_ */
diff --git a/drivers/media/video/s5p-g2d/g2d.c b/drivers/media/video/s5p-g2d/g2d.c
index 789de74014e5..7c98ee7377ee 100644
--- a/drivers/media/video/s5p-g2d/g2d.c
+++ b/drivers/media/video/s5p-g2d/g2d.c
@@ -65,7 +65,7 @@ static struct g2d_fmt formats[] = {
};
#define NUM_FORMATS ARRAY_SIZE(formats)
-struct g2d_frame def_frame = {
+static struct g2d_frame def_frame = {
.width = DEFAULT_WIDTH,
.height = DEFAULT_HEIGHT,
.c_width = DEFAULT_WIDTH,
@@ -77,7 +77,7 @@ struct g2d_frame def_frame = {
.bottom = DEFAULT_HEIGHT,
};
-struct g2d_fmt *find_fmt(struct v4l2_format *f)
+static struct g2d_fmt *find_fmt(struct v4l2_format *f)
{
unsigned int i;
for (i = 0; i < NUM_FORMATS; i++) {
@@ -202,7 +202,7 @@ static const struct v4l2_ctrl_ops g2d_ctrl_ops = {
.s_ctrl = g2d_s_ctrl,
};
-int g2d_setup_ctrls(struct g2d_ctx *ctx)
+static int g2d_setup_ctrls(struct g2d_ctx *ctx)
{
struct g2d_dev *dev = ctx->dev;
@@ -546,11 +546,11 @@ static void job_abort(void *prv)
struct g2d_dev *dev = ctx->dev;
int ret;
- if (dev->curr == 0) /* No job currently running */
+ if (dev->curr == NULL) /* No job currently running */
return;
ret = wait_event_timeout(dev->irq_queue,
- dev->curr == 0,
+ dev->curr == NULL,
msecs_to_jiffies(G2D_TIMEOUT));
}
@@ -599,19 +599,19 @@ static irqreturn_t g2d_isr(int irq, void *prv)
g2d_clear_int(dev);
clk_disable(dev->gate);
- BUG_ON(ctx == 0);
+ BUG_ON(ctx == NULL);
src = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
dst = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
- BUG_ON(src == 0);
- BUG_ON(dst == 0);
+ BUG_ON(src == NULL);
+ BUG_ON(dst == NULL);
v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);
- dev->curr = 0;
+ dev->curr = NULL;
wake_up(&dev->irq_queue);
return IRQ_HANDLED;
}
@@ -674,42 +674,27 @@ static int g2d_probe(struct platform_device *pdev)
struct resource *res;
int ret = 0;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
+
spin_lock_init(&dev->ctrl_lock);
mutex_init(&dev->mutex);
atomic_set(&dev->num_inst, 0);
init_waitqueue_head(&dev->irq_queue);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to find registers\n");
- ret = -ENOENT;
- goto free_dev;
- }
- dev->res_regs = request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev));
-
- if (!dev->res_regs) {
- dev_err(&pdev->dev, "failed to obtain register region\n");
- ret = -ENOENT;
- goto free_dev;
- }
-
- dev->regs = ioremap(res->start, resource_size(res));
- if (!dev->regs) {
- dev_err(&pdev->dev, "failed to map registers\n");
- ret = -ENOENT;
- goto rel_res_regs;
+ dev->regs = devm_request_and_ioremap(&pdev->dev, res);
+ if (dev->regs == NULL) {
+ dev_err(&pdev->dev, "Failed to obtain io memory\n");
+ return -ENOENT;
}
dev->clk = clk_get(&pdev->dev, "sclk_fimg2d");
if (IS_ERR_OR_NULL(dev->clk)) {
dev_err(&pdev->dev, "failed to get g2d clock\n");
- ret = -ENXIO;
- goto unmap_regs;
+ return -ENXIO;
}
ret = clk_prepare(dev->clk);
@@ -740,7 +725,8 @@ static int g2d_probe(struct platform_device *pdev)
dev->irq = res->start;
- ret = request_irq(dev->irq, g2d_isr, 0, pdev->name, dev);
+ ret = devm_request_irq(&pdev->dev, dev->irq, g2d_isr,
+ 0, pdev->name, dev);
if (ret) {
dev_err(&pdev->dev, "failed to install IRQ\n");
goto put_clk_gate;
@@ -749,7 +735,7 @@ static int g2d_probe(struct platform_device *pdev)
dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
if (IS_ERR(dev->alloc_ctx)) {
ret = PTR_ERR(dev->alloc_ctx);
- goto rel_irq;
+ goto unprep_clk_gate;
}
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
@@ -762,6 +748,10 @@ static int g2d_probe(struct platform_device *pdev)
goto unreg_v4l2_dev;
}
*vfd = g2d_videodev;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
vfd->lock = &dev->mutex;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
@@ -793,8 +783,6 @@ unreg_v4l2_dev:
v4l2_device_unregister(&dev->v4l2_dev);
alloc_ctx_cleanup:
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
-rel_irq:
- free_irq(dev->irq, dev);
unprep_clk_gate:
clk_unprepare(dev->gate);
put_clk_gate:
@@ -803,12 +791,7 @@ unprep_clk:
clk_unprepare(dev->clk);
put_clk:
clk_put(dev->clk);
-unmap_regs:
- iounmap(dev->regs);
-rel_res_regs:
- release_resource(dev->res_regs);
-free_dev:
- kfree(dev);
+
return ret;
}
@@ -821,14 +804,10 @@ static int g2d_remove(struct platform_device *pdev)
video_unregister_device(dev->vfd);
v4l2_device_unregister(&dev->v4l2_dev);
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
- free_irq(dev->irq, dev);
clk_unprepare(dev->gate);
clk_put(dev->gate);
clk_unprepare(dev->clk);
clk_put(dev->clk);
- iounmap(dev->regs);
- release_resource(dev->res_regs);
- kfree(dev);
return 0;
}
diff --git a/drivers/media/video/s5p-g2d/g2d.h b/drivers/media/video/s5p-g2d/g2d.h
index 1b82065aeaef..6b765b0216c5 100644
--- a/drivers/media/video/s5p-g2d/g2d.h
+++ b/drivers/media/video/s5p-g2d/g2d.h
@@ -23,7 +23,6 @@ struct g2d_dev {
spinlock_t ctrl_lock;
atomic_t num_inst;
struct vb2_alloc_ctx *alloc_ctx;
- struct resource *res_regs;
void __iomem *regs;
struct clk *clk;
struct clk *gate;
diff --git a/drivers/media/video/s5p-jpeg/jpeg-core.c b/drivers/media/video/s5p-jpeg/jpeg-core.c
index 5a49c307f9c1..28b5225d94f5 100644
--- a/drivers/media/video/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/video/s5p-jpeg/jpeg-core.c
@@ -813,7 +813,7 @@ static int s5p_jpeg_streamoff(struct file *file, void *priv,
return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
}
-int s5p_jpeg_g_selection(struct file *file, void *priv,
+static int s5p_jpeg_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
@@ -1290,7 +1290,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
int ret;
/* JPEG IP abstraction struct */
- jpeg = kzalloc(sizeof(struct s5p_jpeg), GFP_KERNEL);
+ jpeg = devm_kzalloc(&pdev->dev, sizeof(struct s5p_jpeg), GFP_KERNEL);
if (!jpeg)
return -ENOMEM;
@@ -1300,43 +1300,25 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
/* memory-mapped registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "cannot find IO resource\n");
- ret = -ENOENT;
- goto jpeg_alloc_rollback;
- }
-
- jpeg->ioarea = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (!jpeg->ioarea) {
- dev_err(&pdev->dev, "cannot request IO\n");
- ret = -ENXIO;
- goto jpeg_alloc_rollback;
- }
- jpeg->regs = ioremap(res->start, resource_size(res));
- if (!jpeg->regs) {
- dev_err(&pdev->dev, "cannot map IO\n");
- ret = -ENXIO;
- goto mem_region_rollback;
+ jpeg->regs = devm_request_and_ioremap(&pdev->dev, res);
+ if (jpeg->regs == NULL) {
+ dev_err(&pdev->dev, "Failed to obtain io memory\n");
+ return -ENOENT;
}
- dev_dbg(&pdev->dev, "registers %p (%p, %p)\n",
- jpeg->regs, jpeg->ioarea, res);
-
/* interrupt service routine registration */
jpeg->irq = ret = platform_get_irq(pdev, 0);
if (ret < 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
- goto ioremap_rollback;
+ return ret;
}
- ret = request_irq(jpeg->irq, s5p_jpeg_irq, 0,
- dev_name(&pdev->dev), jpeg);
-
+ ret = devm_request_irq(&pdev->dev, jpeg->irq, s5p_jpeg_irq, 0,
+ dev_name(&pdev->dev), jpeg);
if (ret) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpeg->irq);
- goto ioremap_rollback;
+ return ret;
}
/* clocks */
@@ -1344,7 +1326,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
if (IS_ERR(jpeg->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = PTR_ERR(jpeg->clk);
- goto request_irq_rollback;
+ return ret;
}
dev_dbg(&pdev->dev, "clock source %p\n", jpeg->clk);
clk_enable(jpeg->clk);
@@ -1386,6 +1368,10 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
jpeg->vfd_encoder->release = video_device_release;
jpeg->vfd_encoder->lock = &jpeg->lock;
jpeg->vfd_encoder->v4l2_dev = &jpeg->v4l2_dev;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &jpeg->vfd_encoder->flags);
ret = video_register_device(jpeg->vfd_encoder, VFL_TYPE_GRABBER, -1);
if (ret) {
@@ -1413,6 +1399,10 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
jpeg->vfd_decoder->release = video_device_release;
jpeg->vfd_decoder->lock = &jpeg->lock;
jpeg->vfd_decoder->v4l2_dev = &jpeg->v4l2_dev;
+ /* Locking in file operations other than ioctl should be done by the driver,
+ not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &jpeg->vfd_decoder->flags);
ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1);
if (ret) {
@@ -1456,18 +1446,6 @@ clk_get_rollback:
clk_disable(jpeg->clk);
clk_put(jpeg->clk);
-request_irq_rollback:
- free_irq(jpeg->irq, jpeg);
-
-ioremap_rollback:
- iounmap(jpeg->regs);
-
-mem_region_rollback:
- release_resource(jpeg->ioarea);
- release_mem_region(jpeg->ioarea->start, resource_size(jpeg->ioarea));
-
-jpeg_alloc_rollback:
- kfree(jpeg);
return ret;
}
@@ -1488,14 +1466,6 @@ static int s5p_jpeg_remove(struct platform_device *pdev)
clk_disable(jpeg->clk);
clk_put(jpeg->clk);
- free_irq(jpeg->irq, jpeg);
-
- iounmap(jpeg->regs);
-
- release_resource(jpeg->ioarea);
- release_mem_region(jpeg->ioarea->start, resource_size(jpeg->ioarea));
- kfree(jpeg);
-
return 0;
}
diff --git a/drivers/media/video/s5p-jpeg/jpeg-core.h b/drivers/media/video/s5p-jpeg/jpeg-core.h
index 38d7367f7a6d..9d0cd2b76f61 100644
--- a/drivers/media/video/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/video/s5p-jpeg/jpeg-core.h
@@ -54,7 +54,6 @@
* @vfd_encoder: video device node for encoder mem2mem mode
* @vfd_decoder: video device node for decoder mem2mem mode
* @m2m_dev: v4l2 mem2mem device data
- * @ioarea: JPEG IP memory region
* @regs: JPEG IP registers mapping
* @irq: JPEG IP irq
* @clk: JPEG IP clock
@@ -70,7 +69,6 @@ struct s5p_jpeg {
struct video_device *vfd_decoder;
struct v4l2_m2m_dev *m2m_dev;
- struct resource *ioarea;
void __iomem *regs;
unsigned int irq;
struct clk *clk;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc.c b/drivers/media/video/s5p-mfc/s5p_mfc.c
index 83fe461af263..9bb68e7b5ae8 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc.c
@@ -70,7 +70,7 @@ static void wake_up_dev(struct s5p_mfc_dev *dev, unsigned int reason,
wake_up(&dev->queue);
}
-void s5p_mfc_watchdog(unsigned long arg)
+static void s5p_mfc_watchdog(unsigned long arg)
{
struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
@@ -373,7 +373,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx,
/* If no context is available then all necessary
* processing has been done. */
- if (ctx == 0)
+ if (ctx == NULL)
return;
dev = ctx->dev;
@@ -429,7 +429,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
struct s5p_mfc_dev *dev;
unsigned int guard_width, guard_height;
- if (ctx == 0)
+ if (ctx == NULL)
return;
dev = ctx->dev;
if (ctx->c_ops->post_seq_start) {
@@ -496,7 +496,7 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
struct s5p_mfc_dev *dev;
unsigned long flags;
- if (ctx == 0)
+ if (ctx == NULL)
return;
dev = ctx->dev;
s5p_mfc_clear_int_flags(dev);
@@ -772,7 +772,7 @@ err_queue_init:
err_init_hw:
s5p_mfc_release_firmware(dev);
err_alloc_fw:
- dev->ctx[ctx->num] = 0;
+ dev->ctx[ctx->num] = NULL;
del_timer_sync(&dev->watchdog_timer);
s5p_mfc_clock_off();
err_pwr_enable:
@@ -849,7 +849,7 @@ static int s5p_mfc_release(struct file *file)
}
mfc_debug(2, "Shutting down clock\n");
s5p_mfc_clock_off();
- dev->ctx[ctx->num] = 0;
+ dev->ctx[ctx->num] = NULL;
s5p_mfc_dec_ctrls_delete(ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
@@ -948,7 +948,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
int ret;
pr_debug("%s++\n", __func__);
- dev = kzalloc(sizeof *dev, GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev, sizeof *dev, GFP_KERNEL);
if (!dev) {
dev_err(&pdev->dev, "Not enough memory for MFC device\n");
return -ENOMEM;
@@ -959,49 +959,35 @@ static int s5p_mfc_probe(struct platform_device *pdev)
dev->plat_dev = pdev;
if (!dev->plat_dev) {
dev_err(&pdev->dev, "No platform data specified\n");
- ret = -ENODEV;
- goto err_dev;
+ return -ENODEV;
}
ret = s5p_mfc_init_pm(dev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to get mfc clock source\n");
- goto err_clk;
+ return ret;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to get memory region resource\n");
- ret = -ENOENT;
- goto err_res;
- }
- dev->mfc_mem = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (dev->mfc_mem == NULL) {
- dev_err(&pdev->dev, "failed to get memory region\n");
- ret = -ENOENT;
- goto err_mem_reg;
- }
- dev->regs_base = ioremap(dev->mfc_mem->start, resource_size(dev->mfc_mem));
+ dev->regs_base = devm_request_and_ioremap(&pdev->dev, res);
if (dev->regs_base == NULL) {
- dev_err(&pdev->dev, "failed to ioremap address region\n");
- ret = -ENOENT;
- goto err_ioremap;
+ dev_err(&pdev->dev, "Failed to obtain io memory\n");
+ return -ENOENT;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(&pdev->dev, "failed to get irq resource\n");
ret = -ENOENT;
- goto err_get_res;
+ goto err_res;
}
dev->irq = res->start;
- ret = request_irq(dev->irq, s5p_mfc_irq, IRQF_DISABLED, pdev->name,
- dev);
+ ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq,
+ IRQF_DISABLED, pdev->name, dev);
if (ret) {
dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
- goto err_req_irq;
+ goto err_res;
}
dev->mem_dev_l = device_find_child(&dev->plat_dev->dev, "s5p-mfc-l",
@@ -1009,20 +995,20 @@ static int s5p_mfc_probe(struct platform_device *pdev)
if (!dev->mem_dev_l) {
mfc_err("Mem child (L) device get failed\n");
ret = -ENODEV;
- goto err_find_child;
+ goto err_res;
}
dev->mem_dev_r = device_find_child(&dev->plat_dev->dev, "s5p-mfc-r",
match_child);
if (!dev->mem_dev_r) {
mfc_err("Mem child (R) device get failed\n");
ret = -ENODEV;
- goto err_find_child;
+ goto err_res;
}
dev->alloc_ctx[0] = vb2_dma_contig_init_ctx(dev->mem_dev_l);
if (IS_ERR_OR_NULL(dev->alloc_ctx[0])) {
ret = PTR_ERR(dev->alloc_ctx[0]);
- goto err_mem_init_ctx_0;
+ goto err_res;
}
dev->alloc_ctx[1] = vb2_dma_contig_init_ctx(dev->mem_dev_r);
if (IS_ERR_OR_NULL(dev->alloc_ctx[1])) {
@@ -1048,6 +1034,10 @@ static int s5p_mfc_probe(struct platform_device *pdev)
vfd->ioctl_ops = get_dec_v4l2_ioctl_ops();
vfd->release = video_device_release,
vfd->lock = &dev->mfc_mutex;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
vfd->v4l2_dev = &dev->v4l2_dev;
snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_DEC_NAME);
dev->vfd_dec = vfd;
@@ -1072,6 +1062,8 @@ static int s5p_mfc_probe(struct platform_device *pdev)
vfd->ioctl_ops = get_enc_v4l2_ioctl_ops();
vfd->release = video_device_release,
vfd->lock = &dev->mfc_mutex;
+ /* This should not be necessary */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
vfd->v4l2_dev = &dev->v4l2_dev;
snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME);
dev->vfd_enc = vfd;
@@ -1110,22 +1102,9 @@ err_v4l2_dev_reg:
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
err_mem_init_ctx_1:
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
-err_mem_init_ctx_0:
-err_find_child:
- free_irq(dev->irq, dev);
-err_req_irq:
-err_get_res:
- iounmap(dev->regs_base);
- dev->regs_base = NULL;
-err_ioremap:
- release_resource(dev->mfc_mem);
- kfree(dev->mfc_mem);
-err_mem_reg:
err_res:
s5p_mfc_final_pm(dev);
-err_clk:
-err_dev:
- kfree(dev);
+
pr_debug("%s-- with error\n", __func__);
return ret;
@@ -1148,15 +1127,7 @@ static int __devexit s5p_mfc_remove(struct platform_device *pdev)
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
- free_irq(dev->irq, dev);
- iounmap(dev->regs_base);
- if (dev->mfc_mem) {
- release_resource(dev->mfc_mem);
- kfree(dev->mfc_mem);
- dev->mfc_mem = NULL;
- }
s5p_mfc_final_pm(dev);
- kfree(dev);
return 0;
}
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_common.h b/drivers/media/video/s5p-mfc/s5p_mfc_common.h
index 91146fa622e4..bd5706a6bad1 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_common.h
@@ -185,7 +185,6 @@ struct s5p_mfc_pm {
* @mem_dev_r: child device of the right memory bank (1)
* @regs_base: base address of the MFC hw registers
* @irq: irq resource
- * @mfc_mem: MFC registers memory resource
* @dec_ctrl_handler: control framework handler for decoding
* @enc_ctrl_handler: control framework handler for encoding
* @pm: power management control
@@ -221,7 +220,6 @@ struct s5p_mfc_dev {
struct device *mem_dev_r;
void __iomem *regs_base;
int irq;
- struct resource *mfc_mem;
struct v4l2_ctrl_handler dec_ctrl_handler;
struct v4l2_ctrl_handler enc_ctrl_handler;
struct s5p_mfc_pm pm;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
index f2481a85e0a2..08a5cfeaa59e 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
@@ -52,7 +52,7 @@ int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
s5p_mfc_bitproc_buf = vb2_dma_contig_memops.alloc(
dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], dev->fw_size);
if (IS_ERR(s5p_mfc_bitproc_buf)) {
- s5p_mfc_bitproc_buf = 0;
+ s5p_mfc_bitproc_buf = NULL;
mfc_err("Allocating bitprocessor buffer failed\n");
release_firmware(fw_blob);
return -ENOMEM;
@@ -63,7 +63,7 @@ int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
mfc_err("The base memory for bank 1 is not aligned to 128KB\n");
vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = 0;
+ s5p_mfc_bitproc_buf = NULL;
release_firmware(fw_blob);
return -EIO;
}
@@ -72,7 +72,7 @@ int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
mfc_err("Bitprocessor memory remap failed\n");
vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = 0;
+ s5p_mfc_bitproc_buf = NULL;
release_firmware(fw_blob);
return -EIO;
}
@@ -82,7 +82,7 @@ int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
if (IS_ERR(b_base)) {
vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = 0;
+ s5p_mfc_bitproc_buf = NULL;
mfc_err("Allocating bank2 base failed\n");
release_firmware(fw_blob);
return -ENOMEM;
@@ -94,7 +94,7 @@ int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
mfc_err("The base memory for bank 2 is not aligned to 128KB\n");
vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = 0;
+ s5p_mfc_bitproc_buf = NULL;
release_firmware(fw_blob);
return -EIO;
}
@@ -126,7 +126,7 @@ int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev)
release_firmware(fw_blob);
return -ENOMEM;
}
- if (s5p_mfc_bitproc_buf == 0 || s5p_mfc_bitproc_phys == 0) {
+ if (s5p_mfc_bitproc_buf == NULL || s5p_mfc_bitproc_phys == 0) {
mfc_err("MFC firmware is not allocated or was not mapped correctly\n");
release_firmware(fw_blob);
return -EINVAL;
@@ -146,9 +146,9 @@ int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev)
if (!s5p_mfc_bitproc_buf)
return -EINVAL;
vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
- s5p_mfc_bitproc_virt = 0;
+ s5p_mfc_bitproc_virt = NULL;
s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = 0;
+ s5p_mfc_bitproc_buf = NULL;
return 0;
}
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index dff9dc798795..acedb2004be3 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -1436,7 +1436,8 @@ static const struct v4l2_ctrl_ops s5p_mfc_enc_ctrl_ops = {
.s_ctrl = s5p_mfc_enc_s_ctrl,
};
-int vidioc_s_parm(struct file *file, void *priv, struct v4l2_streamparm *a)
+static int vidioc_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *a)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
@@ -1452,7 +1453,8 @@ int vidioc_s_parm(struct file *file, void *priv, struct v4l2_streamparm *a)
return 0;
}
-int vidioc_g_parm(struct file *file, void *priv, struct v4l2_streamparm *a)
+static int vidioc_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *a)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_opr.c b/drivers/media/video/s5p-mfc/s5p_mfc_opr.c
index e08b21c50ebf..e6217cbfa4a3 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_opr.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_opr.c
@@ -43,7 +43,7 @@ int s5p_mfc_alloc_dec_temp_buffers(struct s5p_mfc_ctx *ctx)
ctx->desc_buf = vb2_dma_contig_memops.alloc(
dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], DESC_BUF_SIZE);
if (IS_ERR_VALUE((int)ctx->desc_buf)) {
- ctx->desc_buf = 0;
+ ctx->desc_buf = NULL;
mfc_err("Allocating DESC buffer failed\n");
return -ENOMEM;
}
@@ -54,7 +54,7 @@ int s5p_mfc_alloc_dec_temp_buffers(struct s5p_mfc_ctx *ctx)
if (desc_virt == NULL) {
vb2_dma_contig_memops.put(ctx->desc_buf);
ctx->desc_phys = 0;
- ctx->desc_buf = 0;
+ ctx->desc_buf = NULL;
mfc_err("Remapping DESC buffer failed\n");
return -ENOMEM;
}
@@ -69,7 +69,7 @@ void s5p_mfc_release_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
if (ctx->desc_phys) {
vb2_dma_contig_memops.put(ctx->desc_buf);
ctx->desc_phys = 0;
- ctx->desc_buf = 0;
+ ctx->desc_buf = NULL;
}
}
@@ -186,7 +186,7 @@ int s5p_mfc_alloc_codec_buffers(struct s5p_mfc_ctx *ctx)
ctx->bank1_buf = vb2_dma_contig_memops.alloc(
dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_size);
if (IS_ERR(ctx->bank1_buf)) {
- ctx->bank1_buf = 0;
+ ctx->bank1_buf = NULL;
printk(KERN_ERR
"Buf alloc for decoding failed (port A)\n");
return -ENOMEM;
@@ -200,7 +200,7 @@ int s5p_mfc_alloc_codec_buffers(struct s5p_mfc_ctx *ctx)
ctx->bank2_buf = vb2_dma_contig_memops.alloc(
dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_size);
if (IS_ERR(ctx->bank2_buf)) {
- ctx->bank2_buf = 0;
+ ctx->bank2_buf = NULL;
mfc_err("Buf alloc for decoding failed (port B)\n");
return -ENOMEM;
}
@@ -216,13 +216,13 @@ void s5p_mfc_release_codec_buffers(struct s5p_mfc_ctx *ctx)
{
if (ctx->bank1_buf) {
vb2_dma_contig_memops.put(ctx->bank1_buf);
- ctx->bank1_buf = 0;
+ ctx->bank1_buf = NULL;
ctx->bank1_phys = 0;
ctx->bank1_size = 0;
}
if (ctx->bank2_buf) {
vb2_dma_contig_memops.put(ctx->bank2_buf);
- ctx->bank2_buf = 0;
+ ctx->bank2_buf = NULL;
ctx->bank2_phys = 0;
ctx->bank2_size = 0;
}
@@ -244,7 +244,7 @@ int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx)
if (IS_ERR(ctx->ctx_buf)) {
mfc_err("Allocating context buffer failed\n");
ctx->ctx_phys = 0;
- ctx->ctx_buf = 0;
+ ctx->ctx_buf = NULL;
return -ENOMEM;
}
ctx->ctx_phys = s5p_mfc_mem_cookie(
@@ -256,7 +256,7 @@ int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx)
mfc_err("Remapping instance buffer failed\n");
vb2_dma_contig_memops.put(ctx->ctx_buf);
ctx->ctx_phys = 0;
- ctx->ctx_buf = 0;
+ ctx->ctx_buf = NULL;
return -ENOMEM;
}
/* Zero content of the allocated memory */
@@ -265,7 +265,7 @@ int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx)
if (s5p_mfc_init_shm(ctx) < 0) {
vb2_dma_contig_memops.put(ctx->ctx_buf);
ctx->ctx_phys = 0;
- ctx->ctx_buf = 0;
+ ctx->ctx_buf = NULL;
return -ENOMEM;
}
return 0;
@@ -277,12 +277,12 @@ void s5p_mfc_release_instance_buffer(struct s5p_mfc_ctx *ctx)
if (ctx->ctx_buf) {
vb2_dma_contig_memops.put(ctx->ctx_buf);
ctx->ctx_phys = 0;
- ctx->ctx_buf = 0;
+ ctx->ctx_buf = NULL;
}
if (ctx->shm_alloc) {
vb2_dma_contig_memops.put(ctx->shm_alloc);
- ctx->shm_alloc = 0;
- ctx->shm = 0;
+ ctx->shm_alloc = NULL;
+ ctx->shm = NULL;
}
}
@@ -296,7 +296,7 @@ void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
}
/* Set registers for shared buffer */
-void s5p_mfc_set_shared_buffer(struct s5p_mfc_ctx *ctx)
+static void s5p_mfc_set_shared_buffer(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
mfc_write(dev, ctx->shm_ofs, S5P_FIMV_SI_CH0_HOST_WR_ADR);
diff --git a/drivers/media/video/s5p-tv/hdmi_drv.c b/drivers/media/video/s5p-tv/hdmi_drv.c
index 4865d25a0e57..20cb6eef2979 100644
--- a/drivers/media/video/s5p-tv/hdmi_drv.c
+++ b/drivers/media/video/s5p-tv/hdmi_drv.c
@@ -42,7 +42,23 @@ MODULE_DESCRIPTION("Samsung HDMI");
MODULE_LICENSE("GPL");
/* default preset configured on probe */
-#define HDMI_DEFAULT_PRESET V4L2_DV_1080P60
+#define HDMI_DEFAULT_PRESET V4L2_DV_480P59_94
+
+struct hdmi_pulse {
+ u32 beg;
+ u32 end;
+};
+
+struct hdmi_timings {
+ struct hdmi_pulse hact;
+ u32 hsyn_pol; /* 0 - high, 1 - low */
+ struct hdmi_pulse hsyn;
+ u32 interlaced;
+ struct hdmi_pulse vact[2];
+ u32 vsyn_pol; /* 0 - high, 1 - low */
+ u32 vsyn_off;
+ struct hdmi_pulse vsyn[2];
+};
struct hdmi_resources {
struct clk *hdmi;
@@ -70,64 +86,15 @@ struct hdmi_device {
/** subdev of MHL interface */
struct v4l2_subdev *mhl_sd;
/** configuration of current graphic mode */
- const struct hdmi_preset_conf *cur_conf;
+ const struct hdmi_timings *cur_conf;
+ /** flag indicating that timings are dirty */
+ int cur_conf_dirty;
/** current preset */
u32 cur_preset;
/** other resources */
struct hdmi_resources res;
};
-struct hdmi_tg_regs {
- u8 cmd;
- u8 h_fsz_l;
- u8 h_fsz_h;
- u8 hact_st_l;
- u8 hact_st_h;
- u8 hact_sz_l;
- u8 hact_sz_h;
- u8 v_fsz_l;
- u8 v_fsz_h;
- u8 vsync_l;
- u8 vsync_h;
- u8 vsync2_l;
- u8 vsync2_h;
- u8 vact_st_l;
- u8 vact_st_h;
- u8 vact_sz_l;
- u8 vact_sz_h;
- u8 field_chg_l;
- u8 field_chg_h;
- u8 vact_st2_l;
- u8 vact_st2_h;
- u8 vsync_top_hdmi_l;
- u8 vsync_top_hdmi_h;
- u8 vsync_bot_hdmi_l;
- u8 vsync_bot_hdmi_h;
- u8 field_top_hdmi_l;
- u8 field_top_hdmi_h;
- u8 field_bot_hdmi_l;
- u8 field_bot_hdmi_h;
-};
-
-struct hdmi_core_regs {
- u8 h_blank[2];
- u8 v_blank[3];
- u8 h_v_line[3];
- u8 vsync_pol[1];
- u8 int_pro_mode[1];
- u8 v_blank_f[3];
- u8 h_sync_gen[3];
- u8 v_sync_gen1[3];
- u8 v_sync_gen2[3];
- u8 v_sync_gen3[3];
-};
-
-struct hdmi_preset_conf {
- struct hdmi_core_regs core;
- struct hdmi_tg_regs tg;
- struct v4l2_mbus_framefmt mbus_fmt;
-};
-
static struct platform_device_id hdmi_driver_types[] = {
{
.name = "s5pv210-hdmi",
@@ -165,6 +132,21 @@ void hdmi_writeb(struct hdmi_device *hdev, u32 reg_id, u8 value)
writeb(value, hdev->regs + reg_id);
}
+static inline
+void hdmi_writebn(struct hdmi_device *hdev, u32 reg_id, int n, u32 value)
+{
+ switch (n) {
+ default:
+ writeb(value >> 24, hdev->regs + reg_id + 12);
+ case 3:
+ writeb(value >> 16, hdev->regs + reg_id + 8);
+ case 2:
+ writeb(value >> 8, hdev->regs + reg_id + 4);
+ case 1:
+ writeb(value >> 0, hdev->regs + reg_id + 0);
+ }
+}
+
static inline u32 hdmi_read(struct hdmi_device *hdev, u32 reg_id)
{
return readl(hdev->regs + reg_id);
@@ -211,77 +193,72 @@ static void hdmi_reg_init(struct hdmi_device *hdev)
}
static void hdmi_timing_apply(struct hdmi_device *hdev,
- const struct hdmi_preset_conf *conf)
+ const struct hdmi_timings *t)
{
- const struct hdmi_core_regs *core = &conf->core;
- const struct hdmi_tg_regs *tg = &conf->tg;
-
/* setting core registers */
- hdmi_writeb(hdev, HDMI_H_BLANK_0, core->h_blank[0]);
- hdmi_writeb(hdev, HDMI_H_BLANK_1, core->h_blank[1]);
- hdmi_writeb(hdev, HDMI_V_BLANK_0, core->v_blank[0]);
- hdmi_writeb(hdev, HDMI_V_BLANK_1, core->v_blank[1]);
- hdmi_writeb(hdev, HDMI_V_BLANK_2, core->v_blank[2]);
- hdmi_writeb(hdev, HDMI_H_V_LINE_0, core->h_v_line[0]);
- hdmi_writeb(hdev, HDMI_H_V_LINE_1, core->h_v_line[1]);
- hdmi_writeb(hdev, HDMI_H_V_LINE_2, core->h_v_line[2]);
- hdmi_writeb(hdev, HDMI_VSYNC_POL, core->vsync_pol[0]);
- hdmi_writeb(hdev, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
- hdmi_writeb(hdev, HDMI_V_BLANK_F_0, core->v_blank_f[0]);
- hdmi_writeb(hdev, HDMI_V_BLANK_F_1, core->v_blank_f[1]);
- hdmi_writeb(hdev, HDMI_V_BLANK_F_2, core->v_blank_f[2]);
- hdmi_writeb(hdev, HDMI_H_SYNC_GEN_0, core->h_sync_gen[0]);
- hdmi_writeb(hdev, HDMI_H_SYNC_GEN_1, core->h_sync_gen[1]);
- hdmi_writeb(hdev, HDMI_H_SYNC_GEN_2, core->h_sync_gen[2]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+ hdmi_writebn(hdev, HDMI_H_BLANK_0, 2, t->hact.beg);
+ hdmi_writebn(hdev, HDMI_H_SYNC_GEN_0, 3,
+ (t->hsyn_pol << 20) | (t->hsyn.end << 10) | t->hsyn.beg);
+ hdmi_writeb(hdev, HDMI_VSYNC_POL, t->vsyn_pol);
+ hdmi_writebn(hdev, HDMI_V_BLANK_0, 3,
+ (t->vact[0].beg << 11) | t->vact[0].end);
+ hdmi_writebn(hdev, HDMI_V_SYNC_GEN_1_0, 3,
+ (t->vsyn[0].beg << 12) | t->vsyn[0].end);
+ if (t->interlaced) {
+ u32 vsyn_trans = t->hsyn.beg + t->vsyn_off;
+
+ hdmi_writeb(hdev, HDMI_INT_PRO_MODE, 1);
+ hdmi_writebn(hdev, HDMI_H_V_LINE_0, 3,
+ (t->hact.end << 12) | t->vact[1].end);
+ hdmi_writebn(hdev, HDMI_V_BLANK_F_0, 3,
+ (t->vact[1].end << 11) | t->vact[1].beg);
+ hdmi_writebn(hdev, HDMI_V_SYNC_GEN_2_0, 3,
+ (t->vsyn[1].beg << 12) | t->vsyn[1].end);
+ hdmi_writebn(hdev, HDMI_V_SYNC_GEN_3_0, 3,
+ (vsyn_trans << 12) | vsyn_trans);
+ } else {
+ hdmi_writeb(hdev, HDMI_INT_PRO_MODE, 0);
+ hdmi_writebn(hdev, HDMI_H_V_LINE_0, 3,
+ (t->hact.end << 12) | t->vact[0].end);
+ }
+
/* Timing generator registers */
- hdmi_writeb(hdev, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
- hdmi_writeb(hdev, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
- hdmi_writeb(hdev, HDMI_TG_HACT_ST_L, tg->hact_st_l);
- hdmi_writeb(hdev, HDMI_TG_HACT_ST_H, tg->hact_st_h);
- hdmi_writeb(hdev, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
- hdmi_writeb(hdev, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
- hdmi_writeb(hdev, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
- hdmi_writeb(hdev, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
- hdmi_writeb(hdev, HDMI_TG_VSYNC_L, tg->vsync_l);
- hdmi_writeb(hdev, HDMI_TG_VSYNC_H, tg->vsync_h);
- hdmi_writeb(hdev, HDMI_TG_VSYNC2_L, tg->vsync2_l);
- hdmi_writeb(hdev, HDMI_TG_VSYNC2_H, tg->vsync2_h);
- hdmi_writeb(hdev, HDMI_TG_VACT_ST_L, tg->vact_st_l);
- hdmi_writeb(hdev, HDMI_TG_VACT_ST_H, tg->vact_st_h);
- hdmi_writeb(hdev, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
- hdmi_writeb(hdev, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
- hdmi_writeb(hdev, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
- hdmi_writeb(hdev, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
- hdmi_writeb(hdev, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
- hdmi_writeb(hdev, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
- hdmi_writeb(hdev, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
- hdmi_writeb(hdev, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
- hdmi_writeb(hdev, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
- hdmi_writeb(hdev, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
- hdmi_writeb(hdev, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
- hdmi_writeb(hdev, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
- hdmi_writeb(hdev, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
- hdmi_writeb(hdev, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
+ hdmi_writebn(hdev, HDMI_TG_H_FSZ_L, 2, t->hact.end);
+ hdmi_writebn(hdev, HDMI_TG_HACT_ST_L, 2, t->hact.beg);
+ hdmi_writebn(hdev, HDMI_TG_HACT_SZ_L, 2, t->hact.end - t->hact.beg);
+ hdmi_writebn(hdev, HDMI_TG_VSYNC_L, 2, t->vsyn[0].beg);
+ hdmi_writebn(hdev, HDMI_TG_VACT_ST_L, 2, t->vact[0].beg);
+ hdmi_writebn(hdev, HDMI_TG_VACT_SZ_L, 2,
+ t->vact[0].end - t->vact[0].beg);
+ hdmi_writebn(hdev, HDMI_TG_VSYNC_TOP_HDMI_L, 2, t->vsyn[0].beg);
+ hdmi_writebn(hdev, HDMI_TG_FIELD_TOP_HDMI_L, 2, t->vsyn[0].beg);
+ if (t->interlaced) {
+ hdmi_write_mask(hdev, HDMI_TG_CMD, ~0, HDMI_TG_FIELD_EN);
+ hdmi_writebn(hdev, HDMI_TG_V_FSZ_L, 2, t->vact[1].end);
+ hdmi_writebn(hdev, HDMI_TG_VSYNC2_L, 2, t->vsyn[1].beg);
+ hdmi_writebn(hdev, HDMI_TG_FIELD_CHG_L, 2, t->vact[0].end);
+ hdmi_writebn(hdev, HDMI_TG_VACT_ST2_L, 2, t->vact[1].beg);
+ hdmi_writebn(hdev, HDMI_TG_VSYNC_BOT_HDMI_L, 2, t->vsyn[1].beg);
+ hdmi_writebn(hdev, HDMI_TG_FIELD_BOT_HDMI_L, 2, t->vsyn[1].beg);
+ } else {
+ hdmi_write_mask(hdev, HDMI_TG_CMD, 0, HDMI_TG_FIELD_EN);
+ hdmi_writebn(hdev, HDMI_TG_V_FSZ_L, 2, t->vact[0].end);
+ }
}
static int hdmi_conf_apply(struct hdmi_device *hdmi_dev)
{
struct device *dev = hdmi_dev->dev;
- const struct hdmi_preset_conf *conf = hdmi_dev->cur_conf;
+ const struct hdmi_timings *conf = hdmi_dev->cur_conf;
struct v4l2_dv_preset preset;
int ret;
dev_dbg(dev, "%s\n", __func__);
+ /* skip if conf is already synchronized with HW */
+ if (!hdmi_dev->cur_conf_dirty)
+ return 0;
+
/* reset hdmiphy */
hdmi_write_mask(hdmi_dev, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
mdelay(10);
@@ -307,6 +284,8 @@ static int hdmi_conf_apply(struct hdmi_device *hdmi_dev)
/* setting core registers */
hdmi_timing_apply(hdmi_dev, conf);
+ hdmi_dev->cur_conf_dirty = 0;
+
return 0;
}
@@ -398,156 +377,126 @@ static void hdmi_dumpregs(struct hdmi_device *hdev, char *prefix)
#undef DUMPREG
}
-static const struct hdmi_preset_conf hdmi_conf_480p = {
- .core = {
- .h_blank = {0x8a, 0x00},
- .v_blank = {0x0d, 0x6a, 0x01},
- .h_v_line = {0x0d, 0xa2, 0x35},
- .vsync_pol = {0x01},
- .int_pro_mode = {0x00},
- .v_blank_f = {0x00, 0x00, 0x00},
- .h_sync_gen = {0x0e, 0x30, 0x11},
- .v_sync_gen1 = {0x0f, 0x90, 0x00},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0x5a, 0x03, /* h_fsz */
- 0x8a, 0x00, 0xd0, 0x02, /* hact */
- 0x0d, 0x02, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0xe0, 0x01, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x49, 0x02, /* vact_st2 */
- 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- },
- .mbus_fmt = {
- .width = 720,
- .height = 480,
- .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
- .field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_SRGB,
- },
+static const struct hdmi_timings hdmi_timings_480p = {
+ .hact = { .beg = 138, .end = 858 },
+ .hsyn_pol = 1,
+ .hsyn = { .beg = 16, .end = 16 + 62 },
+ .interlaced = 0,
+ .vact[0] = { .beg = 42 + 3, .end = 522 + 3 },
+ .vsyn_pol = 1,
+ .vsyn[0] = { .beg = 6 + 3, .end = 12 + 3},
};
-static const struct hdmi_preset_conf hdmi_conf_720p60 = {
- .core = {
- .h_blank = {0x72, 0x01},
- .v_blank = {0xee, 0xf2, 0x00},
- .h_v_line = {0xee, 0x22, 0x67},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
- .h_sync_gen = {0x6c, 0x50, 0x02},
- .v_sync_gen1 = {0x0a, 0x50, 0x00},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0x72, 0x06, /* h_fsz */
- 0x72, 0x01, 0x00, 0x05, /* hact */
- 0xee, 0x02, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x1e, 0x00, 0xd0, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x49, 0x02, /* vact_st2 */
- 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- },
- .mbus_fmt = {
- .width = 1280,
- .height = 720,
- .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
- .field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_SRGB,
- },
+static const struct hdmi_timings hdmi_timings_576p50 = {
+ .hact = { .beg = 144, .end = 864 },
+ .hsyn_pol = 1,
+ .hsyn = { .beg = 12, .end = 12 + 64 },
+ .interlaced = 0,
+ .vact[0] = { .beg = 44 + 5, .end = 620 + 5 },
+ .vsyn_pol = 1,
+ .vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
};
-static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
- .core = {
- .h_blank = {0xd0, 0x02},
- .v_blank = {0x65, 0x6c, 0x01},
- .h_v_line = {0x65, 0x04, 0xa5},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
- .h_sync_gen = {0x0e, 0xea, 0x08},
- .v_sync_gen1 = {0x09, 0x40, 0x00},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0x98, 0x08, /* h_fsz */
- 0x18, 0x01, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0x38, 0x04, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x49, 0x02, /* vact_st2 */
- 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- },
- .mbus_fmt = {
- .width = 1920,
- .height = 1080,
- .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
- .field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_SRGB,
- },
+static const struct hdmi_timings hdmi_timings_720p60 = {
+ .hact = { .beg = 370, .end = 1650 },
+ .hsyn_pol = 0,
+ .hsyn = { .beg = 110, .end = 110 + 40 },
+ .interlaced = 0,
+ .vact[0] = { .beg = 25 + 5, .end = 745 + 5 },
+ .vsyn_pol = 0,
+ .vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
};
-static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
- .core = {
- .h_blank = {0x18, 0x01},
- .v_blank = {0x65, 0x6c, 0x01},
- .h_v_line = {0x65, 0x84, 0x89},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
- .h_sync_gen = {0x56, 0x08, 0x02},
- .v_sync_gen1 = {0x09, 0x40, 0x00},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0x98, 0x08, /* h_fsz */
- 0x18, 0x01, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0x38, 0x04, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- },
- .mbus_fmt = {
- .width = 1920,
- .height = 1080,
- .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
- .field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_SRGB,
- },
+static const struct hdmi_timings hdmi_timings_720p50 = {
+ .hact = { .beg = 700, .end = 1980 },
+ .hsyn_pol = 0,
+ .hsyn = { .beg = 440, .end = 440 + 40 },
+ .interlaced = 0,
+ .vact[0] = { .beg = 25 + 5, .end = 745 + 5 },
+ .vsyn_pol = 0,
+ .vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
+};
+
+static const struct hdmi_timings hdmi_timings_1080p24 = {
+ .hact = { .beg = 830, .end = 2750 },
+ .hsyn_pol = 0,
+ .hsyn = { .beg = 638, .end = 638 + 44 },
+ .interlaced = 0,
+ .vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
+ .vsyn_pol = 0,
+ .vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
+};
+
+static const struct hdmi_timings hdmi_timings_1080p60 = {
+ .hact = { .beg = 280, .end = 2200 },
+ .hsyn_pol = 0,
+ .hsyn = { .beg = 88, .end = 88 + 44 },
+ .interlaced = 0,
+ .vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
+ .vsyn_pol = 0,
+ .vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
+};
+
+static const struct hdmi_timings hdmi_timings_1080i60 = {
+ .hact = { .beg = 280, .end = 2200 },
+ .hsyn_pol = 0,
+ .hsyn = { .beg = 88, .end = 88 + 44 },
+ .interlaced = 1,
+ .vact[0] = { .beg = 20 + 2, .end = 560 + 2 },
+ .vact[1] = { .beg = 583 + 2, .end = 1123 + 2 },
+ .vsyn_pol = 0,
+ .vsyn_off = 1100,
+ .vsyn[0] = { .beg = 0 + 2, .end = 5 + 2},
+ .vsyn[1] = { .beg = 562 + 2, .end = 567 + 2},
+};
+
+static const struct hdmi_timings hdmi_timings_1080i50 = {
+ .hact = { .beg = 720, .end = 2640 },
+ .hsyn_pol = 0,
+ .hsyn = { .beg = 528, .end = 528 + 44 },
+ .interlaced = 1,
+ .vact[0] = { .beg = 20 + 2, .end = 560 + 2 },
+ .vact[1] = { .beg = 583 + 2, .end = 1123 + 2 },
+ .vsyn_pol = 0,
+ .vsyn_off = 1320,
+ .vsyn[0] = { .beg = 0 + 2, .end = 5 + 2},
+ .vsyn[1] = { .beg = 562 + 2, .end = 567 + 2},
+};
+
+static const struct hdmi_timings hdmi_timings_1080p50 = {
+ .hact = { .beg = 720, .end = 2640 },
+ .hsyn_pol = 0,
+ .hsyn = { .beg = 528, .end = 528 + 44 },
+ .interlaced = 0,
+ .vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
+ .vsyn_pol = 0,
+ .vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
};
static const struct {
u32 preset;
- const struct hdmi_preset_conf *conf;
-} hdmi_conf[] = {
- { V4L2_DV_480P59_94, &hdmi_conf_480p },
- { V4L2_DV_720P59_94, &hdmi_conf_720p60 },
- { V4L2_DV_1080P50, &hdmi_conf_1080p50 },
- { V4L2_DV_1080P30, &hdmi_conf_1080p60 },
- { V4L2_DV_1080P60, &hdmi_conf_1080p60 },
+ const struct hdmi_timings *timings;
+} hdmi_timings[] = {
+ { V4L2_DV_480P59_94, &hdmi_timings_480p },
+ { V4L2_DV_576P50, &hdmi_timings_576p50 },
+ { V4L2_DV_720P50, &hdmi_timings_720p50 },
+ { V4L2_DV_720P59_94, &hdmi_timings_720p60 },
+ { V4L2_DV_720P60, &hdmi_timings_720p60 },
+ { V4L2_DV_1080P24, &hdmi_timings_1080p24 },
+ { V4L2_DV_1080P30, &hdmi_timings_1080p60 },
+ { V4L2_DV_1080P50, &hdmi_timings_1080p50 },
+ { V4L2_DV_1080I50, &hdmi_timings_1080i50 },
+ { V4L2_DV_1080I60, &hdmi_timings_1080i60 },
+ { V4L2_DV_1080P60, &hdmi_timings_1080p60 },
};
-static const struct hdmi_preset_conf *hdmi_preset2conf(u32 preset)
+static const struct hdmi_timings *hdmi_preset2timings(u32 preset)
{
int i;
- for (i = 0; i < ARRAY_SIZE(hdmi_conf); ++i)
- if (hdmi_conf[i].preset == preset)
- return hdmi_conf[i].conf;
+ for (i = 0; i < ARRAY_SIZE(hdmi_timings); ++i)
+ if (hdmi_timings[i].preset == preset)
+ return hdmi_timings[i].timings;
return NULL;
}
@@ -559,6 +508,10 @@ static int hdmi_streamon(struct hdmi_device *hdev)
dev_dbg(dev, "%s\n", __func__);
+ ret = hdmi_conf_apply(hdev);
+ if (ret)
+ return ret;
+
ret = v4l2_subdev_call(hdev->phy_sd, video, s_stream, 1);
if (ret)
return ret;
@@ -671,14 +624,15 @@ static int hdmi_s_dv_preset(struct v4l2_subdev *sd,
{
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
struct device *dev = hdev->dev;
- const struct hdmi_preset_conf *conf;
+ const struct hdmi_timings *conf;
- conf = hdmi_preset2conf(preset->preset);
+ conf = hdmi_preset2timings(preset->preset);
if (conf == NULL) {
dev_err(dev, "preset (%u) not supported\n", preset->preset);
return -EINVAL;
}
hdev->cur_conf = conf;
+ hdev->cur_conf_dirty = 1;
hdev->cur_preset = preset->preset;
return 0;
}
@@ -695,21 +649,32 @@ static int hdmi_g_mbus_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *fmt)
{
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
- struct device *dev = hdev->dev;
+ const struct hdmi_timings *t = hdev->cur_conf;
- dev_dbg(dev, "%s\n", __func__);
+ dev_dbg(hdev->dev, "%s\n", __func__);
if (!hdev->cur_conf)
return -EINVAL;
- *fmt = hdev->cur_conf->mbus_fmt;
+ memset(fmt, 0, sizeof *fmt);
+ fmt->width = t->hact.end - t->hact.beg;
+ fmt->height = t->vact[0].end - t->vact[0].beg;
+ fmt->code = V4L2_MBUS_FMT_FIXED; /* means RGB888 */
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ if (t->interlaced) {
+ fmt->field = V4L2_FIELD_INTERLACED;
+ fmt->height *= 2;
+ } else {
+ fmt->field = V4L2_FIELD_NONE;
+ }
return 0;
}
static int hdmi_enum_dv_presets(struct v4l2_subdev *sd,
struct v4l2_dv_enum_preset *preset)
{
- if (preset->index >= ARRAY_SIZE(hdmi_conf))
+ if (preset->index >= ARRAY_SIZE(hdmi_timings))
return -EINVAL;
- return v4l_fill_dv_preset_info(hdmi_conf[preset->index].preset, preset);
+ return v4l_fill_dv_preset_info(hdmi_timings[preset->index].preset,
+ preset);
}
static const struct v4l2_subdev_core_ops hdmi_sd_core_ops = {
@@ -737,6 +702,8 @@ static int hdmi_runtime_suspend(struct device *dev)
dev_dbg(dev, "%s\n", __func__);
v4l2_subdev_call(hdev->mhl_sd, core, s_power, 0);
hdmi_resource_poweroff(&hdev->res);
+ /* flag that device context is lost */
+ hdev->cur_conf_dirty = 1;
return 0;
}
@@ -750,10 +717,6 @@ static int hdmi_runtime_resume(struct device *dev)
hdmi_resource_poweron(&hdev->res);
- ret = hdmi_conf_apply(hdev);
- if (ret)
- goto fail;
-
/* starting MHL */
ret = v4l2_subdev_call(hdev->mhl_sd, core, s_power, 1);
if (hdev->mhl_sd && ret)
@@ -993,7 +956,8 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
strlcpy(sd->name, "s5p-hdmi", sizeof sd->name);
hdmi_dev->cur_preset = HDMI_DEFAULT_PRESET;
/* FIXME: missing fail preset is not supported */
- hdmi_dev->cur_conf = hdmi_preset2conf(hdmi_dev->cur_preset);
+ hdmi_dev->cur_conf = hdmi_preset2timings(hdmi_dev->cur_preset);
+ hdmi_dev->cur_conf_dirty = 1;
/* storing subdev for call that have only access to struct device */
dev_set_drvdata(dev, sd);
diff --git a/drivers/media/video/s5p-tv/hdmiphy_drv.c b/drivers/media/video/s5p-tv/hdmiphy_drv.c
index 0afef77747e5..f67b38631801 100644
--- a/drivers/media/video/s5p-tv/hdmiphy_drv.c
+++ b/drivers/media/video/s5p-tv/hdmiphy_drv.c
@@ -26,53 +26,188 @@ MODULE_DESCRIPTION("Samsung HDMI Physical interface driver");
MODULE_LICENSE("GPL");
struct hdmiphy_conf {
- u32 preset;
+ unsigned long pixclk;
const u8 *data;
};
-static const u8 hdmiphy_conf27[32] = {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
- 0x6B, 0x10, 0x02, 0x51, 0xDf, 0xF2, 0x54, 0x87,
- 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xe3, 0x26, 0x00, 0x00, 0x00, 0x00,
+struct hdmiphy_ctx {
+ struct v4l2_subdev sd;
+ const struct hdmiphy_conf *conf_tab;
};
-static const u8 hdmiphy_conf74_175[32] = {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
- 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
- 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
+static const struct hdmiphy_conf hdmiphy_conf_s5pv210[] = {
+ { .pixclk = 27000000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x02, 0x52, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, }
+ },
+ { .pixclk = 27027000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+ 0x6B, 0x10, 0x02, 0x52, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE2, 0x26, 0x00, 0x00, 0x00, 0x00, }
+ },
+ { .pixclk = 74176000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xEF, 0x5B,
+ 0x6D, 0x10, 0x01, 0x52, 0xEF, 0xF3, 0x54, 0xB9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xA5, 0x26, 0x01, 0x00, 0x00, 0x00, }
+ },
+ { .pixclk = 74250000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xF8, 0x40,
+ 0x6A, 0x10, 0x01, 0x52, 0xFF, 0xF1, 0x54, 0xBA,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xA4, 0x26, 0x01, 0x00, 0x00, 0x00, }
+ },
+ { /* end marker */ }
};
-static const u8 hdmiphy_conf74_25[32] = {
- 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
- 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
- 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xe0,
- 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
+static const struct hdmiphy_conf hdmiphy_conf_exynos4210[] = {
+ { .pixclk = 27000000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, }
+ },
+ { .pixclk = 27027000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+ 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE2, 0x26, 0x00, 0x00, 0x00, 0x00, }
+ },
+ { .pixclk = 74176000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xEF, 0x5B,
+ 0x6D, 0x10, 0x01, 0x51, 0xEF, 0xF3, 0x54, 0xB9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xA5, 0x26, 0x01, 0x00, 0x00, 0x00, }
+ },
+ { .pixclk = 74250000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xF8, 0x40,
+ 0x6A, 0x10, 0x01, 0x51, 0xFF, 0xF1, 0x54, 0xBA,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xA4, 0x26, 0x01, 0x00, 0x00, 0x00, }
+ },
+ { .pixclk = 148352000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xEF, 0x5B,
+ 0x6D, 0x18, 0x00, 0x51, 0xEF, 0xF3, 0x54, 0xB9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x11, 0x40, 0xA5, 0x26, 0x02, 0x00, 0x00, 0x00, }
+ },
+ { .pixclk = 148500000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xF8, 0x40,
+ 0x6A, 0x18, 0x00, 0x51, 0xFF, 0xF1, 0x54, 0xBA,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x11, 0x40, 0xA4, 0x26, 0x02, 0x00, 0x00, 0x00, }
+ },
+ { /* end marker */ }
};
-static const u8 hdmiphy_conf148_5[32] = {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
- 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
- 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
+static const struct hdmiphy_conf hdmiphy_conf_exynos4212[] = {
+ { .pixclk = 27000000, .data = (u8 [32]) {
+ 0x01, 0x11, 0x2D, 0x75, 0x00, 0x01, 0x00, 0x08,
+ 0x82, 0x00, 0x0E, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
+ 0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x71,
+ 0x54, 0xE3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { .pixclk = 27027000, .data = (u8 [32]) {
+ 0x01, 0x91, 0x2D, 0x72, 0x00, 0x64, 0x12, 0x08,
+ 0x43, 0x20, 0x0E, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
+ 0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x71,
+ 0x54, 0xE2, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { .pixclk = 74176000, .data = (u8 [32]) {
+ 0x01, 0x91, 0x3E, 0x35, 0x00, 0x5B, 0xDE, 0x08,
+ 0x82, 0x20, 0x73, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
+ 0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x52,
+ 0x54, 0xA5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { .pixclk = 74250000, .data = (u8 [32]) {
+ 0x01, 0x91, 0x3E, 0x35, 0x00, 0x40, 0xF0, 0x08,
+ 0x82, 0x20, 0x73, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
+ 0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x52,
+ 0x54, 0xA4, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { .pixclk = 148500000, .data = (u8 [32]) {
+ 0x01, 0x91, 0x3E, 0x15, 0x00, 0x40, 0xF0, 0x08,
+ 0x82, 0x20, 0x73, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
+ 0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0xA4,
+ 0x54, 0x4A, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { /* end marker */ }
};
-static const struct hdmiphy_conf hdmiphy_conf[] = {
- { V4L2_DV_480P59_94, hdmiphy_conf27 },
- { V4L2_DV_1080P30, hdmiphy_conf74_175 },
- { V4L2_DV_720P59_94, hdmiphy_conf74_175 },
- { V4L2_DV_720P60, hdmiphy_conf74_25 },
- { V4L2_DV_1080P50, hdmiphy_conf148_5 },
- { V4L2_DV_1080P60, hdmiphy_conf148_5 },
+static const struct hdmiphy_conf hdmiphy_conf_exynos4412[] = {
+ { .pixclk = 27000000, .data = (u8 [32]) {
+ 0x01, 0x11, 0x2D, 0x75, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x00, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xE4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { .pixclk = 27027000, .data = (u8 [32]) {
+ 0x01, 0x91, 0x2D, 0x72, 0x40, 0x64, 0x12, 0x08,
+ 0x43, 0x20, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xE3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { .pixclk = 74176000, .data = (u8 [32]) {
+ 0x01, 0x91, 0x1F, 0x10, 0x40, 0x5B, 0xEF, 0x08,
+ 0x81, 0x20, 0xB9, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xA6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { .pixclk = 74250000, .data = (u8 [32]) {
+ 0x01, 0x91, 0x1F, 0x10, 0x40, 0x40, 0xF8, 0x08,
+ 0x81, 0x20, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xA5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { .pixclk = 148500000, .data = (u8 [32]) {
+ 0x01, 0x91, 0x1F, 0x00, 0x40, 0x40, 0xF8, 0x08,
+ 0x81, 0x20, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x4B, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { /* end marker */ }
};
-const u8 *hdmiphy_preset2conf(u32 preset)
+static inline struct hdmiphy_ctx *sd_to_ctx(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct hdmiphy_ctx, sd);
+}
+
+static unsigned long hdmiphy_preset_to_pixclk(u32 preset)
+{
+ static const unsigned long pixclk[] = {
+ [V4L2_DV_480P59_94] = 27000000,
+ [V4L2_DV_576P50] = 27000000,
+ [V4L2_DV_720P59_94] = 74176000,
+ [V4L2_DV_720P50] = 74250000,
+ [V4L2_DV_720P60] = 74250000,
+ [V4L2_DV_1080P24] = 74250000,
+ [V4L2_DV_1080P30] = 74250000,
+ [V4L2_DV_1080I50] = 74250000,
+ [V4L2_DV_1080I60] = 74250000,
+ [V4L2_DV_1080P50] = 148500000,
+ [V4L2_DV_1080P60] = 148500000,
+ };
+ if (preset < ARRAY_SIZE(pixclk))
+ return pixclk[preset];
+ else
+ return 0;
+}
+
+static const u8 *hdmiphy_find_conf(u32 preset, const struct hdmiphy_conf *conf)
{
- int i;
- for (i = 0; i < ARRAY_SIZE(hdmiphy_conf); ++i)
- if (hdmiphy_conf[i].preset == preset)
- return hdmiphy_conf[i].data;
+ unsigned long pixclk;
+
+ pixclk = hdmiphy_preset_to_pixclk(preset);
+ if (!pixclk)
+ return NULL;
+
+ for (; conf->pixclk; ++conf)
+ if (conf->pixclk == pixclk)
+ return conf->data;
return NULL;
}
@@ -88,11 +223,12 @@ static int hdmiphy_s_dv_preset(struct v4l2_subdev *sd,
const u8 *data;
u8 buffer[32];
int ret;
+ struct hdmiphy_ctx *ctx = sd_to_ctx(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct device *dev = &client->dev;
dev_info(dev, "s_dv_preset(preset = %d)\n", preset->preset);
- data = hdmiphy_preset2conf(preset->preset);
+ data = hdmiphy_find_conf(preset->preset, ctx->conf_tab);
if (!data) {
dev_err(dev, "format not supported\n");
return -EINVAL;
@@ -146,21 +282,36 @@ static const struct v4l2_subdev_ops hdmiphy_ops = {
static int __devinit hdmiphy_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- static struct v4l2_subdev sd;
+ struct hdmiphy_ctx *ctx;
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->conf_tab = (struct hdmiphy_conf *)id->driver_data;
+ v4l2_i2c_subdev_init(&ctx->sd, client, &hdmiphy_ops);
- v4l2_i2c_subdev_init(&sd, client, &hdmiphy_ops);
dev_info(&client->dev, "probe successful\n");
return 0;
}
static int __devexit hdmiphy_remove(struct i2c_client *client)
{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hdmiphy_ctx *ctx = sd_to_ctx(sd);
+
+ kfree(ctx);
dev_info(&client->dev, "remove successful\n");
+
return 0;
}
static const struct i2c_device_id hdmiphy_id[] = {
- { "hdmiphy", 0 },
+ { "hdmiphy", (unsigned long)hdmiphy_conf_exynos4210 },
+ { "hdmiphy-s5pv210", (unsigned long)hdmiphy_conf_s5pv210 },
+ { "hdmiphy-exynos4210", (unsigned long)hdmiphy_conf_exynos4210 },
+ { "hdmiphy-exynos4212", (unsigned long)hdmiphy_conf_exynos4212 },
+ { "hdmiphy-exynos4412", (unsigned long)hdmiphy_conf_exynos4412 },
{ },
};
MODULE_DEVICE_TABLE(i2c, hdmiphy_id);
diff --git a/drivers/media/video/s5p-tv/mixer.h b/drivers/media/video/s5p-tv/mixer.h
index 1597078c4a50..ddb422e23550 100644
--- a/drivers/media/video/s5p-tv/mixer.h
+++ b/drivers/media/video/s5p-tv/mixer.h
@@ -226,6 +226,7 @@ struct mxr_resources {
/* event flags used */
enum mxr_devide_flags {
MXR_EVENT_VSYNC = 0,
+ MXR_EVENT_TOP = 1,
};
/** drivers instance */
@@ -293,7 +294,7 @@ int __devinit mxr_acquire_video(struct mxr_device *mdev,
struct mxr_output_conf *output_cont, int output_count);
/** releasing common video resources */
-void __devexit mxr_release_video(struct mxr_device *mdev);
+void mxr_release_video(struct mxr_device *mdev);
struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx);
struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx);
diff --git a/drivers/media/video/s5p-tv/mixer_drv.c b/drivers/media/video/s5p-tv/mixer_drv.c
index a2c0c25ad130..edca06592883 100644
--- a/drivers/media/video/s5p-tv/mixer_drv.c
+++ b/drivers/media/video/s5p-tv/mixer_drv.c
@@ -461,7 +461,7 @@ static struct platform_driver mxr_driver __refdata = {
static int __init mxr_init(void)
{
int i, ret;
- static const char banner[] __initdata = KERN_INFO
+ static const char banner[] __initconst = KERN_INFO
"Samsung TV Mixer driver, "
"(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
printk(banner);
diff --git a/drivers/media/video/s5p-tv/mixer_reg.c b/drivers/media/video/s5p-tv/mixer_reg.c
index 4800a3cbb297..3b1670a045f4 100644
--- a/drivers/media/video/s5p-tv/mixer_reg.c
+++ b/drivers/media/video/s5p-tv/mixer_reg.c
@@ -296,21 +296,25 @@ irqreturn_t mxr_irq_handler(int irq, void *dev_data)
/* wake up process waiting for VSYNC */
if (val & MXR_INT_STATUS_VSYNC) {
set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
+ /* toggle TOP field event if working in interlaced mode */
+ if (~mxr_read(mdev, MXR_CFG) & MXR_CFG_SCAN_PROGRASSIVE)
+ change_bit(MXR_EVENT_TOP, &mdev->event_flags);
wake_up(&mdev->event_queue);
- }
-
- /* clear interrupts */
- if (~val & MXR_INT_EN_VSYNC) {
/* vsync interrupt use different bit for read and clear */
- val &= ~MXR_INT_EN_VSYNC;
+ val &= ~MXR_INT_STATUS_VSYNC;
val |= MXR_INT_CLEAR_VSYNC;
}
+
+ /* clear interrupts */
mxr_write(mdev, MXR_INT_STATUS, val);
spin_unlock(&mdev->reg_slock);
/* leave on non-vsync event */
if (~val & MXR_INT_CLEAR_VSYNC)
return IRQ_HANDLED;
+ /* skip layer update on bottom field */
+ if (!test_bit(MXR_EVENT_TOP, &mdev->event_flags))
+ return IRQ_HANDLED;
for (i = 0; i < MXR_MAX_LAYERS; ++i)
mxr_irq_layer_handle(mdev->layer[i]);
return IRQ_HANDLED;
@@ -333,6 +337,7 @@ void mxr_reg_streamon(struct mxr_device *mdev)
/* start MIXER */
mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
+ set_bit(MXR_EVENT_TOP, &mdev->event_flags);
spin_unlock_irqrestore(&mdev->reg_slock, flags);
}
diff --git a/drivers/media/video/s5p-tv/mixer_video.c b/drivers/media/video/s5p-tv/mixer_video.c
index f7ca5cc143c6..33fde2a763ec 100644
--- a/drivers/media/video/s5p-tv/mixer_video.c
+++ b/drivers/media/video/s5p-tv/mixer_video.c
@@ -140,7 +140,7 @@ fail:
return ret;
}
-void __devexit mxr_release_video(struct mxr_device *mdev)
+void mxr_release_video(struct mxr_device *mdev)
{
int i;
@@ -853,8 +853,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
*nplanes = fmt->num_subframes;
for (i = 0; i < fmt->num_subframes; ++i) {
alloc_ctxs[i] = layer->mdev->alloc_ctx;
- sizes[i] = PAGE_ALIGN(planes[i].sizeimage);
- mxr_dbg(mdev, "size[%d] = %08lx\n", i, sizes[i]);
+ sizes[i] = planes[i].sizeimage;
+ mxr_dbg(mdev, "size[%d] = %08x\n", i, sizes[i]);
}
if (*nbuffers == 0)
@@ -1069,6 +1069,10 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
video_set_drvdata(&layer->vfd, layer);
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &layer->vfd.flags);
layer->vfd.lock = &layer->mutex;
layer->vfd.v4l2_dev = &mdev->v4l2_dev;
diff --git a/drivers/media/video/s5p-tv/regs-hdmi.h b/drivers/media/video/s5p-tv/regs-hdmi.h
index 33247d13e4c0..a889d1f57f28 100644
--- a/drivers/media/video/s5p-tv/regs-hdmi.h
+++ b/drivers/media/video/s5p-tv/regs-hdmi.h
@@ -140,6 +140,7 @@
#define HDMI_MODE_MASK (3 << 0)
/* HDMI_TG_CMD */
+#define HDMI_TG_FIELD_EN (1 << 1)
#define HDMI_TG_EN (1 << 0)
#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 53aae5968ffb..bc08f1dbc293 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -5080,6 +5080,36 @@ struct saa7134_board saa7134_boards[] = {
.gpio = 0x0200000,
},
},
+ [SAA7134_BOARD_ASUSTeK_PS3_100] = {
+ .name = "Asus My Cinema PS3-100",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .tuner_config = 2,
+ .gpiomask = 1 << 21,
+ .mpeg = SAA7134_MPEG_DVB,
+ .inputs = {{
+ .name = name_tv,
+ .vmux = 1,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_comp,
+ .vmux = 0,
+ .amux = LINE2,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE2,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = TV,
+ .gpio = 0x0200000,
+ },
+ },
[SAA7134_BOARD_REAL_ANGEL_220] = {
.name = "Zogis Real Angel 220",
.audio_clock = 0x00187de7,
@@ -6877,6 +6907,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
.driver_data = SAA7134_BOARD_ASUSTeK_TIGER_3IN1,
}, {
.vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x1043,
+ .subdevice = 0x48cd,
+ .driver_data = SAA7134_BOARD_ASUSTeK_PS3_100,
+ }, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
.subvendor = 0x17de,
.subdevice = 0x7128,
@@ -7347,6 +7383,7 @@ int saa7134_board_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_KWORLD_TERMINATOR:
case SAA7134_BOARD_SEDNA_PC_TV_CARDBUS:
case SAA7134_BOARD_FLYDVBT_LR301:
+ case SAA7134_BOARD_ASUSTeK_PS3_100:
case SAA7134_BOARD_ASUSTeK_P7131_DUAL:
case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA:
case SAA7134_BOARD_ASUSTeK_P7131_ANALOG:
@@ -7811,6 +7848,14 @@ int saa7134_board_init2(struct saa7134_dev *dev)
i2c_transfer(&dev->i2c_adap, &msg, 1);
break;
}
+ case SAA7134_BOARD_ASUSTeK_PS3_100:
+ {
+ u8 data[] = { 0x3c, 0x33, 0x60};
+ struct i2c_msg msg = {.addr = 0x0b, .flags = 0, .buf = data,
+ .len = sizeof(data)};
+ i2c_transfer(&dev->i2c_adap, &msg, 1);
+ break;
+ }
case SAA7134_BOARD_FLYDVB_TRIO:
{
u8 temp = 0;
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index aaa5c97a7216..5dfd826d734e 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -881,6 +881,20 @@ static struct tda1004x_config asus_tiger_3in1_config = {
.request_firmware = philips_tda1004x_request_firmware
};
+static struct tda1004x_config asus_ps3_100_config = {
+ .demod_address = 0x0b,
+ .invert = 1,
+ .invert_oclk = 0,
+ .xtal_freq = TDA10046_XTAL_16M,
+ .agc_config = TDA10046_AGC_TDA827X,
+ .gpio_config = TDA10046_GP11_I,
+ .if_freq = TDA10046_FREQ_045,
+ .i2c_gate = 0x4b,
+ .tuner_address = 0x61,
+ .antenna_switch = 1,
+ .request_firmware = philips_tda1004x_request_firmware
+};
+
/* ------------------------------------------------------------------
* special case: this card uses saa713x GPIO22 for the mode switch
*/
@@ -1647,6 +1661,31 @@ static int dvb_init(struct saa7134_dev *dev)
&dev->i2c_adap, 0, 0) == NULL) {
wprintk("%s: Asus Tiger 3in1, no lnbp21"
" found!\n", __func__);
+ goto dettach_frontend;
+ }
+ }
+ }
+ break;
+ case SAA7134_BOARD_ASUSTeK_PS3_100:
+ if (!use_frontend) { /* terrestrial */
+ if (configure_tda827x_fe(dev, &asus_ps3_100_config,
+ &tda827x_cfg_2) < 0)
+ goto dettach_frontend;
+ } else { /* satellite */
+ fe0->dvb.frontend = dvb_attach(tda10086_attach,
+ &flydvbs, &dev->i2c_adap);
+ if (fe0->dvb.frontend) {
+ if (dvb_attach(tda826x_attach,
+ fe0->dvb.frontend, 0x60,
+ &dev->i2c_adap, 0) == NULL) {
+ wprintk("%s: Asus My Cinema PS3-100, no "
+ "tda826x found!\n", __func__);
+ goto dettach_frontend;
+ }
+ if (dvb_attach(lnbp21_attach, fe0->dvb.frontend,
+ &dev->i2c_adap, 0, 0) == NULL) {
+ wprintk("%s: Asus My Cinema PS3-100, no lnbp21"
+ " found!\n", __func__);
goto dettach_frontend;
}
}
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 48d2878699b7..05c6e217d8a7 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -753,6 +753,13 @@ int saa7134_input_init1(struct saa7134_dev *dev)
mask_keycode = 0xffff;
raw_decode = true;
break;
+ case SAA7134_BOARD_ASUSTeK_PS3_100:
+ ir_codes = RC_MAP_ASUS_PS3_100;
+ mask_keydown = 0x0040000;
+ mask_keyup = 0x0040000;
+ mask_keycode = 0xffff;
+ raw_decode = true;
+ break;
case SAA7134_BOARD_ENCORE_ENLTV:
case SAA7134_BOARD_ENCORE_ENLTV_FM:
ir_codes = RC_MAP_ENCORE_ENLTV;
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 417034eb6ad2..6de10b1e7251 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -2036,7 +2036,7 @@ static int saa7134_s_tuner(struct file *file, void *priv,
mode = dev->thread.mode;
if (UNSET == mode) {
rx = saa7134_tvaudio_getstereo(dev);
- mode = saa7134_tvaudio_rx2mode(t->rxsubchans);
+ mode = saa7134_tvaudio_rx2mode(rx);
}
if (mode != t->audmode)
dev->thread.mode = t->audmode;
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index f625060e6a0f..89c8333736a2 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -332,6 +332,7 @@ struct saa7134_card_ir {
#define SAA7134_BOARD_BEHOLD_503FM 187
#define SAA7134_BOARD_SENSORAY811_911 188
#define SAA7134_BOARD_KWORLD_PC150U 189
+#define SAA7134_BOARD_ASUSTeK_PS3_100 190
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
diff --git a/drivers/media/video/saa7164/saa7164-vbi.c b/drivers/media/video/saa7164/saa7164-vbi.c
index 273cf807401c..d8e6c8f14079 100644
--- a/drivers/media/video/saa7164/saa7164-vbi.c
+++ b/drivers/media/video/saa7164/saa7164-vbi.c
@@ -952,7 +952,7 @@ static int saa7164_vbi_start_streaming(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_vbi_stop_port(port);
- if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ if (result != SAA_OK) {
printk(KERN_ERR "%s() pause/forced stop transition "
"failed, res = 0x%x\n", __func__, result);
}
@@ -971,7 +971,7 @@ static int saa7164_vbi_start_streaming(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_vbi_acquire_port(port);
result = saa7164_vbi_stop_port(port);
- if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ if (result != SAA_OK) {
printk(KERN_ERR "%s() run/forced stop transition "
"failed, res = 0x%x\n", __func__, result);
}
diff --git a/drivers/media/video/saa7164/saa7164.h b/drivers/media/video/saa7164/saa7164.h
index 742b34103b5d..8d120e3baf70 100644
--- a/drivers/media/video/saa7164/saa7164.h
+++ b/drivers/media/video/saa7164/saa7164.h
@@ -611,11 +611,6 @@ extern unsigned int saa_debug;
printk(KERN_WARNING "%s: " fmt, dev->name, ## arg);\
} while (0)
-#define log_err(fmt, arg...)\
- do { \
- printk(KERN_ERROR "%s: " fmt, dev->name, ## arg);\
- } while (0)
-
#define saa7164_readl(reg) readl(dev->lmmio + ((reg) >> 2))
#define saa7164_writel(reg, value) writel((value), dev->lmmio + ((reg) >> 2))
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 424dfacd263a..0baaf94db7e0 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -210,27 +210,33 @@ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
struct soc_camera_device *icd = container_of(vq, struct soc_camera_device, vb2_vidq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
- int bytes_per_line;
- unsigned int height;
if (fmt) {
const struct soc_camera_format_xlate *xlate = soc_camera_xlate_by_fourcc(icd,
fmt->fmt.pix.pixelformat);
+ unsigned int bytes_per_line;
+ int ret;
+
if (!xlate)
return -EINVAL;
- bytes_per_line = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
- xlate->host_fmt);
- height = fmt->fmt.pix.height;
+
+ ret = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
+ xlate->host_fmt);
+ if (ret < 0)
+ return ret;
+
+ bytes_per_line = max_t(u32, fmt->fmt.pix.bytesperline, ret);
+
+ ret = soc_mbus_image_size(xlate->host_fmt, bytes_per_line,
+ fmt->fmt.pix.height);
+ if (ret < 0)
+ return ret;
+
+ sizes[0] = max_t(u32, fmt->fmt.pix.sizeimage, ret);
} else {
/* Called from VIDIOC_REQBUFS or in compatibility mode */
- bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
- height = icd->user_height;
+ sizes[0] = icd->sizeimage;
}
- if (bytes_per_line < 0)
- return bytes_per_line;
-
- sizes[0] = bytes_per_line * height;
alloc_ctxs[0] = pcdev->alloc_ctx;
@@ -336,21 +342,15 @@ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
ceu_write(pcdev, top1, phys_addr_top);
if (V4L2_FIELD_NONE != pcdev->field) {
- if (planar)
- phys_addr_bottom = phys_addr_top + icd->user_width;
- else
- phys_addr_bottom = phys_addr_top +
- soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
+ phys_addr_bottom = phys_addr_top + icd->bytesperline;
ceu_write(pcdev, bottom1, phys_addr_bottom);
}
if (planar) {
- phys_addr_top += icd->user_width *
- icd->user_height;
+ phys_addr_top += icd->bytesperline * icd->user_height;
ceu_write(pcdev, top2, phys_addr_top);
if (V4L2_FIELD_NONE != pcdev->field) {
- phys_addr_bottom = phys_addr_top + icd->user_width;
+ phys_addr_bottom = phys_addr_top + icd->bytesperline;
ceu_write(pcdev, bottom2, phys_addr_bottom);
}
}
@@ -377,13 +377,8 @@ static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
unsigned long size;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
- if (bytes_per_line < 0)
- goto error;
-
- size = icd->user_height * bytes_per_line;
+ size = icd->sizeimage;
if (vb2_plane_size(vb, 0) < size) {
dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
@@ -682,10 +677,7 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
in_width *= 2;
left_offset *= 2;
}
- cdwdr_width = width;
} else {
- int bytes_per_line = soc_mbus_bytes_per_line(width,
- icd->current_fmt->host_fmt);
unsigned int w_factor;
switch (icd->current_fmt->host_fmt->packing) {
@@ -698,13 +690,10 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
in_width = cam->width * w_factor;
left_offset *= w_factor;
-
- if (bytes_per_line < 0)
- cdwdr_width = width;
- else
- cdwdr_width = bytes_per_line;
}
+ cdwdr_width = icd->bytesperline;
+
height = icd->user_height;
in_height = cam->height;
if (V4L2_FIELD_NONE != pcdev->field) {
@@ -881,11 +870,13 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd)
value |= common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
value |= common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0;
- value |= pcdev->is_16bit ? 1 << 12 : 0;
- /* CSI2 mode */
- if (pcdev->pdata->csi2)
+ if (pcdev->pdata->csi2) /* CSI2 mode */
value |= 3 << 12;
+ else if (pcdev->is_16bit)
+ value |= 1 << 12;
+ else if (pcdev->pdata->flags & SH_CEU_FLAG_LOWER_8BIT)
+ value |= 2 << 12;
ceu_write(pcdev, CAMCR, value);
@@ -964,24 +955,28 @@ static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_1_5X8,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C,
}, {
.fourcc = V4L2_PIX_FMT_NV21,
.name = "NV21",
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_1_5X8,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C,
}, {
.fourcc = V4L2_PIX_FMT_NV16,
.name = "NV16",
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
}, {
.fourcc = V4L2_PIX_FMT_NV61,
.name = "NV61",
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
},
};
@@ -1845,6 +1840,8 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
return 0;
}
+#define CEU_CHDW_MAX 8188U /* Maximum line stride */
+
static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
@@ -1863,8 +1860,12 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
- dev_warn(icd->parent, "Format %x not found\n", pixfmt);
- return -EINVAL;
+ xlate = icd->current_fmt;
+ dev_dbg(icd->parent, "Format %x not found, keeping %x\n",
+ pixfmt, xlate->host_fmt->fourcc);
+ pixfmt = xlate->host_fmt->fourcc;
+ pix->pixelformat = pixfmt;
+ pix->colorspace = icd->colorspace;
}
/* FIXME: calculate using depth and bus width */
@@ -1923,10 +1924,20 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
pix->width = width;
if (mf.height > height)
pix->height = height;
+
+ pix->bytesperline = max(pix->bytesperline, pix->width);
+ pix->bytesperline = min(pix->bytesperline, CEU_CHDW_MAX);
+ pix->bytesperline &= ~3;
+ break;
+
+ default:
+ /* Configurable stride isn't supported in pass-through mode. */
+ pix->bytesperline = 0;
}
pix->width &= ~3;
pix->height &= ~3;
+ pix->sizeimage = 0;
dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n",
__func__, ret, pix->pixelformat, pix->width, pix->height);
@@ -2145,6 +2156,7 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
pcdev->ici.nr = pdev->id;
pcdev->ici.drv_name = dev_name(&pdev->dev);
pcdev->ici.ops = &sh_mobile_ceu_host_ops;
+ pcdev->ici.capabilities = SOCAM_HOST_CAP_STRIDE;
pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
if (IS_ERR(pcdev->alloc_ctx)) {
diff --git a/drivers/media/video/sh_vou.c b/drivers/media/video/sh_vou.c
index 9644bd861abc..8fd1874382c6 100644
--- a/drivers/media/video/sh_vou.c
+++ b/drivers/media/video/sh_vou.c
@@ -1390,6 +1390,10 @@ static int __devinit sh_vou_probe(struct platform_device *pdev)
vdev->v4l2_dev = &vou_dev->v4l2_dev;
vdev->release = video_device_release;
vdev->lock = &vou_dev->fop_lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags);
vou_dev->vdev = vdev;
video_set_drvdata(vdev, vou_dev);
diff --git a/drivers/media/video/smiapp-pll.c b/drivers/media/video/smiapp-pll.c
new file mode 100644
index 000000000000..a2e41a21dc65
--- /dev/null
+++ b/drivers/media/video/smiapp-pll.c
@@ -0,0 +1,418 @@
+/*
+ * drivers/media/video/smiapp-pll.c
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/gcd.h>
+#include <linux/lcm.h>
+#include <linux/module.h>
+
+#include "smiapp-pll.h"
+
+/* Return an even number or one. */
+static inline uint32_t clk_div_even(uint32_t a)
+{
+ return max_t(uint32_t, 1, a & ~1);
+}
+
+/* Return an even number or one. */
+static inline uint32_t clk_div_even_up(uint32_t a)
+{
+ if (a == 1)
+ return 1;
+ return (a + 1) & ~1;
+}
+
+static inline uint32_t is_one_or_even(uint32_t a)
+{
+ if (a == 1)
+ return 1;
+ if (a & 1)
+ return 0;
+
+ return 1;
+}
+
+static int bounds_check(struct device *dev, uint32_t val,
+ uint32_t min, uint32_t max, char *str)
+{
+ if (val >= min && val <= max)
+ return 0;
+
+ dev_warn(dev, "%s out of bounds: %d (%d--%d)\n", str, val, min, max);
+
+ return -EINVAL;
+}
+
+static void print_pll(struct device *dev, struct smiapp_pll *pll)
+{
+ dev_dbg(dev, "pre_pll_clk_div\t%d\n", pll->pre_pll_clk_div);
+ dev_dbg(dev, "pll_multiplier \t%d\n", pll->pll_multiplier);
+ if (pll->flags != SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
+ dev_dbg(dev, "op_sys_clk_div \t%d\n", pll->op_sys_clk_div);
+ dev_dbg(dev, "op_pix_clk_div \t%d\n", pll->op_pix_clk_div);
+ }
+ dev_dbg(dev, "vt_sys_clk_div \t%d\n", pll->vt_sys_clk_div);
+ dev_dbg(dev, "vt_pix_clk_div \t%d\n", pll->vt_pix_clk_div);
+
+ dev_dbg(dev, "ext_clk_freq_hz \t%d\n", pll->ext_clk_freq_hz);
+ dev_dbg(dev, "pll_ip_clk_freq_hz \t%d\n", pll->pll_ip_clk_freq_hz);
+ dev_dbg(dev, "pll_op_clk_freq_hz \t%d\n", pll->pll_op_clk_freq_hz);
+ if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
+ dev_dbg(dev, "op_sys_clk_freq_hz \t%d\n",
+ pll->op_sys_clk_freq_hz);
+ dev_dbg(dev, "op_pix_clk_freq_hz \t%d\n",
+ pll->op_pix_clk_freq_hz);
+ }
+ dev_dbg(dev, "vt_sys_clk_freq_hz \t%d\n", pll->vt_sys_clk_freq_hz);
+ dev_dbg(dev, "vt_pix_clk_freq_hz \t%d\n", pll->vt_pix_clk_freq_hz);
+}
+
+int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
+ struct smiapp_pll *pll)
+{
+ uint32_t sys_div;
+ uint32_t best_pix_div = INT_MAX >> 1;
+ uint32_t vt_op_binning_div;
+ uint32_t lane_op_clock_ratio;
+ uint32_t mul, div;
+ uint32_t more_mul_min, more_mul_max;
+ uint32_t more_mul_factor;
+ uint32_t min_vt_div, max_vt_div, vt_div;
+ uint32_t min_sys_div, max_sys_div;
+ unsigned int i;
+ int rval;
+
+ if (pll->flags & SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE)
+ lane_op_clock_ratio = pll->lanes;
+ else
+ lane_op_clock_ratio = 1;
+ dev_dbg(dev, "lane_op_clock_ratio: %d\n", lane_op_clock_ratio);
+
+ dev_dbg(dev, "binning: %dx%d\n", pll->binning_horizontal,
+ pll->binning_vertical);
+
+ /* CSI transfers 2 bits per clock per lane; thus times 2 */
+ pll->pll_op_clk_freq_hz = pll->link_freq * 2
+ * (pll->lanes / lane_op_clock_ratio);
+
+ /* Figure out limits for pre-pll divider based on extclk */
+ dev_dbg(dev, "min / max pre_pll_clk_div: %d / %d\n",
+ limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
+ limits->max_pre_pll_clk_div =
+ min_t(uint16_t, limits->max_pre_pll_clk_div,
+ clk_div_even(pll->ext_clk_freq_hz /
+ limits->min_pll_ip_freq_hz));
+ limits->min_pre_pll_clk_div =
+ max_t(uint16_t, limits->min_pre_pll_clk_div,
+ clk_div_even_up(
+ DIV_ROUND_UP(pll->ext_clk_freq_hz,
+ limits->max_pll_ip_freq_hz)));
+ dev_dbg(dev, "pre-pll check: min / max pre_pll_clk_div: %d / %d\n",
+ limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
+
+ i = gcd(pll->pll_op_clk_freq_hz, pll->ext_clk_freq_hz);
+ mul = div_u64(pll->pll_op_clk_freq_hz, i);
+ div = pll->ext_clk_freq_hz / i;
+ dev_dbg(dev, "mul %d / div %d\n", mul, div);
+
+ limits->min_pre_pll_clk_div =
+ max_t(uint16_t, limits->min_pre_pll_clk_div,
+ clk_div_even_up(
+ DIV_ROUND_UP(mul * pll->ext_clk_freq_hz,
+ limits->max_pll_op_freq_hz)));
+ dev_dbg(dev, "pll_op check: min / max pre_pll_clk_div: %d / %d\n",
+ limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
+
+ if (limits->min_pre_pll_clk_div > limits->max_pre_pll_clk_div) {
+ dev_err(dev, "unable to compute pre_pll divisor\n");
+ return -EINVAL;
+ }
+
+ pll->pre_pll_clk_div = limits->min_pre_pll_clk_div;
+
+ /*
+ * Get pre_pll_clk_div so that our pll_op_clk_freq_hz won't be
+ * too high.
+ */
+ dev_dbg(dev, "pre_pll_clk_div %d\n", pll->pre_pll_clk_div);
+
+ /* Don't go above max pll multiplier. */
+ more_mul_max = limits->max_pll_multiplier / mul;
+ dev_dbg(dev, "more_mul_max: max_pll_multiplier check: %d\n",
+ more_mul_max);
+ /* Don't go above max pll op frequency. */
+ more_mul_max =
+ min_t(int,
+ more_mul_max,
+ limits->max_pll_op_freq_hz
+ / (pll->ext_clk_freq_hz / pll->pre_pll_clk_div * mul));
+ dev_dbg(dev, "more_mul_max: max_pll_op_freq_hz check: %d\n",
+ more_mul_max);
+ /* Don't go above the division capability of op sys clock divider. */
+ more_mul_max = min(more_mul_max,
+ limits->max_op_sys_clk_div * pll->pre_pll_clk_div
+ / div);
+ dev_dbg(dev, "more_mul_max: max_op_sys_clk_div check: %d\n",
+ more_mul_max);
+ /* Ensure we won't go above min_pll_multiplier. */
+ more_mul_max = min(more_mul_max,
+ DIV_ROUND_UP(limits->max_pll_multiplier, mul));
+ dev_dbg(dev, "more_mul_max: min_pll_multiplier check: %d\n",
+ more_mul_max);
+
+ /* Ensure we won't go below min_pll_op_freq_hz. */
+ more_mul_min = DIV_ROUND_UP(limits->min_pll_op_freq_hz,
+ pll->ext_clk_freq_hz / pll->pre_pll_clk_div
+ * mul);
+ dev_dbg(dev, "more_mul_min: min_pll_op_freq_hz check: %d\n",
+ more_mul_min);
+ /* Ensure we won't go below min_pll_multiplier. */
+ more_mul_min = max(more_mul_min,
+ DIV_ROUND_UP(limits->min_pll_multiplier, mul));
+ dev_dbg(dev, "more_mul_min: min_pll_multiplier check: %d\n",
+ more_mul_min);
+
+ if (more_mul_min > more_mul_max) {
+ dev_warn(dev,
+ "unable to compute more_mul_min and more_mul_max");
+ return -EINVAL;
+ }
+
+ more_mul_factor = lcm(div, pll->pre_pll_clk_div) / div;
+ dev_dbg(dev, "more_mul_factor: %d\n", more_mul_factor);
+ more_mul_factor = lcm(more_mul_factor, limits->min_op_sys_clk_div);
+ dev_dbg(dev, "more_mul_factor: min_op_sys_clk_div: %d\n",
+ more_mul_factor);
+ i = roundup(more_mul_min, more_mul_factor);
+ if (!is_one_or_even(i))
+ i <<= 1;
+
+ dev_dbg(dev, "final more_mul: %d\n", i);
+ if (i > more_mul_max) {
+ dev_warn(dev, "final more_mul is bad, max %d", more_mul_max);
+ return -EINVAL;
+ }
+
+ pll->pll_multiplier = mul * i;
+ pll->op_sys_clk_div = div * i / pll->pre_pll_clk_div;
+ dev_dbg(dev, "op_sys_clk_div: %d\n", pll->op_sys_clk_div);
+
+ pll->pll_ip_clk_freq_hz = pll->ext_clk_freq_hz
+ / pll->pre_pll_clk_div;
+
+ pll->pll_op_clk_freq_hz = pll->pll_ip_clk_freq_hz
+ * pll->pll_multiplier;
+
+ /* Derive pll_op_clk_freq_hz. */
+ pll->op_sys_clk_freq_hz =
+ pll->pll_op_clk_freq_hz / pll->op_sys_clk_div;
+
+ pll->op_pix_clk_div = pll->bits_per_pixel;
+ dev_dbg(dev, "op_pix_clk_div: %d\n", pll->op_pix_clk_div);
+
+ pll->op_pix_clk_freq_hz =
+ pll->op_sys_clk_freq_hz / pll->op_pix_clk_div;
+
+ /*
+ * Some sensors perform analogue binning and some do this
+ * digitally. The ones doing this digitally can be roughly be
+ * found out using this formula. The ones doing this digitally
+ * should run at higher clock rate, so smaller divisor is used
+ * on video timing side.
+ */
+ if (limits->min_line_length_pck_bin > limits->min_line_length_pck
+ / pll->binning_horizontal)
+ vt_op_binning_div = pll->binning_horizontal;
+ else
+ vt_op_binning_div = 1;
+ dev_dbg(dev, "vt_op_binning_div: %d\n", vt_op_binning_div);
+
+ /*
+ * Profile 2 supports vt_pix_clk_div E [4, 10]
+ *
+ * Horizontal binning can be used as a base for difference in
+ * divisors. One must make sure that horizontal blanking is
+ * enough to accommodate the CSI-2 sync codes.
+ *
+ * Take scaling factor into account as well.
+ *
+ * Find absolute limits for the factor of vt divider.
+ */
+ dev_dbg(dev, "scale_m: %d\n", pll->scale_m);
+ min_vt_div = DIV_ROUND_UP(pll->op_pix_clk_div * pll->op_sys_clk_div
+ * pll->scale_n,
+ lane_op_clock_ratio * vt_op_binning_div
+ * pll->scale_m);
+
+ /* Find smallest and biggest allowed vt divisor. */
+ dev_dbg(dev, "min_vt_div: %d\n", min_vt_div);
+ min_vt_div = max(min_vt_div,
+ DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
+ limits->max_vt_pix_clk_freq_hz));
+ dev_dbg(dev, "min_vt_div: max_vt_pix_clk_freq_hz: %d\n",
+ min_vt_div);
+ min_vt_div = max_t(uint32_t, min_vt_div,
+ limits->min_vt_pix_clk_div
+ * limits->min_vt_sys_clk_div);
+ dev_dbg(dev, "min_vt_div: min_vt_clk_div: %d\n", min_vt_div);
+
+ max_vt_div = limits->max_vt_sys_clk_div * limits->max_vt_pix_clk_div;
+ dev_dbg(dev, "max_vt_div: %d\n", max_vt_div);
+ max_vt_div = min(max_vt_div,
+ DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
+ limits->min_vt_pix_clk_freq_hz));
+ dev_dbg(dev, "max_vt_div: min_vt_pix_clk_freq_hz: %d\n",
+ max_vt_div);
+
+ /*
+ * Find limitsits for sys_clk_div. Not all values are possible
+ * with all values of pix_clk_div.
+ */
+ min_sys_div = limits->min_vt_sys_clk_div;
+ dev_dbg(dev, "min_sys_div: %d\n", min_sys_div);
+ min_sys_div = max(min_sys_div,
+ DIV_ROUND_UP(min_vt_div,
+ limits->max_vt_pix_clk_div));
+ dev_dbg(dev, "min_sys_div: max_vt_pix_clk_div: %d\n", min_sys_div);
+ min_sys_div = max(min_sys_div,
+ pll->pll_op_clk_freq_hz
+ / limits->max_vt_sys_clk_freq_hz);
+ dev_dbg(dev, "min_sys_div: max_pll_op_clk_freq_hz: %d\n", min_sys_div);
+ min_sys_div = clk_div_even_up(min_sys_div);
+ dev_dbg(dev, "min_sys_div: one or even: %d\n", min_sys_div);
+
+ max_sys_div = limits->max_vt_sys_clk_div;
+ dev_dbg(dev, "max_sys_div: %d\n", max_sys_div);
+ max_sys_div = min(max_sys_div,
+ DIV_ROUND_UP(max_vt_div,
+ limits->min_vt_pix_clk_div));
+ dev_dbg(dev, "max_sys_div: min_vt_pix_clk_div: %d\n", max_sys_div);
+ max_sys_div = min(max_sys_div,
+ DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
+ limits->min_vt_pix_clk_freq_hz));
+ dev_dbg(dev, "max_sys_div: min_vt_pix_clk_freq_hz: %d\n", max_sys_div);
+
+ /*
+ * Find pix_div such that a legal pix_div * sys_div results
+ * into a value which is not smaller than div, the desired
+ * divisor.
+ */
+ for (vt_div = min_vt_div; vt_div <= max_vt_div;
+ vt_div += 2 - (vt_div & 1)) {
+ for (sys_div = min_sys_div;
+ sys_div <= max_sys_div;
+ sys_div += 2 - (sys_div & 1)) {
+ int pix_div = DIV_ROUND_UP(vt_div, sys_div);
+
+ if (pix_div < limits->min_vt_pix_clk_div
+ || pix_div > limits->max_vt_pix_clk_div) {
+ dev_dbg(dev,
+ "pix_div %d too small or too big (%d--%d)\n",
+ pix_div,
+ limits->min_vt_pix_clk_div,
+ limits->max_vt_pix_clk_div);
+ continue;
+ }
+
+ /* Check if this one is better. */
+ if (pix_div * sys_div
+ <= roundup(min_vt_div, best_pix_div))
+ best_pix_div = pix_div;
+ }
+ if (best_pix_div < INT_MAX >> 1)
+ break;
+ }
+
+ pll->vt_sys_clk_div = DIV_ROUND_UP(min_vt_div, best_pix_div);
+ pll->vt_pix_clk_div = best_pix_div;
+
+ pll->vt_sys_clk_freq_hz =
+ pll->pll_op_clk_freq_hz / pll->vt_sys_clk_div;
+ pll->vt_pix_clk_freq_hz =
+ pll->vt_sys_clk_freq_hz / pll->vt_pix_clk_div;
+
+ pll->pixel_rate_csi =
+ pll->op_pix_clk_freq_hz * lane_op_clock_ratio;
+
+ print_pll(dev, pll);
+
+ rval = bounds_check(dev, pll->pre_pll_clk_div,
+ limits->min_pre_pll_clk_div,
+ limits->max_pre_pll_clk_div, "pre_pll_clk_div");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->pll_ip_clk_freq_hz,
+ limits->min_pll_ip_freq_hz, limits->max_pll_ip_freq_hz,
+ "pll_ip_clk_freq_hz");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->pll_multiplier,
+ limits->min_pll_multiplier, limits->max_pll_multiplier,
+ "pll_multiplier");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->pll_op_clk_freq_hz,
+ limits->min_pll_op_freq_hz, limits->max_pll_op_freq_hz,
+ "pll_op_clk_freq_hz");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->op_sys_clk_div,
+ limits->min_op_sys_clk_div, limits->max_op_sys_clk_div,
+ "op_sys_clk_div");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->op_pix_clk_div,
+ limits->min_op_pix_clk_div, limits->max_op_pix_clk_div,
+ "op_pix_clk_div");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->op_sys_clk_freq_hz,
+ limits->min_op_sys_clk_freq_hz,
+ limits->max_op_sys_clk_freq_hz,
+ "op_sys_clk_freq_hz");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->op_pix_clk_freq_hz,
+ limits->min_op_pix_clk_freq_hz,
+ limits->max_op_pix_clk_freq_hz,
+ "op_pix_clk_freq_hz");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->vt_sys_clk_freq_hz,
+ limits->min_vt_sys_clk_freq_hz,
+ limits->max_vt_sys_clk_freq_hz,
+ "vt_sys_clk_freq_hz");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->vt_pix_clk_freq_hz,
+ limits->min_vt_pix_clk_freq_hz,
+ limits->max_vt_pix_clk_freq_hz,
+ "vt_pix_clk_freq_hz");
+
+ return rval;
+}
+EXPORT_SYMBOL_GPL(smiapp_pll_calculate);
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>");
+MODULE_DESCRIPTION("Generic SMIA/SMIA++ PLL calculator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/smiapp-pll.h b/drivers/media/video/smiapp-pll.h
new file mode 100644
index 000000000000..9eab63f23afb
--- /dev/null
+++ b/drivers/media/video/smiapp-pll.h
@@ -0,0 +1,103 @@
+/*
+ * drivers/media/video/smiapp-pll.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef SMIAPP_PLL_H
+#define SMIAPP_PLL_H
+
+#include <linux/device.h>
+
+struct smiapp_pll {
+ uint8_t lanes;
+ uint8_t binning_horizontal;
+ uint8_t binning_vertical;
+ uint8_t scale_m;
+ uint8_t scale_n;
+ uint8_t bits_per_pixel;
+ uint16_t flags;
+ uint32_t link_freq;
+
+ uint16_t pre_pll_clk_div;
+ uint16_t pll_multiplier;
+ uint16_t op_sys_clk_div;
+ uint16_t op_pix_clk_div;
+ uint16_t vt_sys_clk_div;
+ uint16_t vt_pix_clk_div;
+
+ uint32_t ext_clk_freq_hz;
+ uint32_t pll_ip_clk_freq_hz;
+ uint32_t pll_op_clk_freq_hz;
+ uint32_t op_sys_clk_freq_hz;
+ uint32_t op_pix_clk_freq_hz;
+ uint32_t vt_sys_clk_freq_hz;
+ uint32_t vt_pix_clk_freq_hz;
+
+ uint32_t pixel_rate_csi;
+};
+
+struct smiapp_pll_limits {
+ /* Strict PLL limits */
+ uint32_t min_ext_clk_freq_hz;
+ uint32_t max_ext_clk_freq_hz;
+ uint16_t min_pre_pll_clk_div;
+ uint16_t max_pre_pll_clk_div;
+ uint32_t min_pll_ip_freq_hz;
+ uint32_t max_pll_ip_freq_hz;
+ uint16_t min_pll_multiplier;
+ uint16_t max_pll_multiplier;
+ uint32_t min_pll_op_freq_hz;
+ uint32_t max_pll_op_freq_hz;
+
+ uint16_t min_vt_sys_clk_div;
+ uint16_t max_vt_sys_clk_div;
+ uint32_t min_vt_sys_clk_freq_hz;
+ uint32_t max_vt_sys_clk_freq_hz;
+ uint16_t min_vt_pix_clk_div;
+ uint16_t max_vt_pix_clk_div;
+ uint32_t min_vt_pix_clk_freq_hz;
+ uint32_t max_vt_pix_clk_freq_hz;
+
+ uint16_t min_op_sys_clk_div;
+ uint16_t max_op_sys_clk_div;
+ uint32_t min_op_sys_clk_freq_hz;
+ uint32_t max_op_sys_clk_freq_hz;
+ uint16_t min_op_pix_clk_div;
+ uint16_t max_op_pix_clk_div;
+ uint32_t min_op_pix_clk_freq_hz;
+ uint32_t max_op_pix_clk_freq_hz;
+
+ /* Other relevant limits */
+ uint32_t min_line_length_pck_bin;
+ uint32_t min_line_length_pck;
+};
+
+/* op pix clock is for all lanes in total normally */
+#define SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE (1 << 0)
+#define SMIAPP_PLL_FLAG_NO_OP_CLOCKS (1 << 1)
+
+struct device;
+
+int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
+ struct smiapp_pll *pll);
+
+#endif /* SMIAPP_PLL_H */
diff --git a/drivers/media/video/smiapp/Kconfig b/drivers/media/video/smiapp/Kconfig
new file mode 100644
index 000000000000..f7b35ff443bf
--- /dev/null
+++ b/drivers/media/video/smiapp/Kconfig
@@ -0,0 +1,6 @@
+config VIDEO_SMIAPP
+ tristate "SMIA++/SMIA sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select VIDEO_SMIAPP_PLL
+ ---help---
+ This is a generic driver for SMIA++/SMIA camera modules.
diff --git a/drivers/media/video/smiapp/Makefile b/drivers/media/video/smiapp/Makefile
new file mode 100644
index 000000000000..36b0cfa2c541
--- /dev/null
+++ b/drivers/media/video/smiapp/Makefile
@@ -0,0 +1,5 @@
+smiapp-objs += smiapp-core.o smiapp-regs.o \
+ smiapp-quirk.o smiapp-limits.o
+obj-$(CONFIG_VIDEO_SMIAPP) += smiapp.o
+
+ccflags-y += -Idrivers/media/video
diff --git a/drivers/media/video/smiapp/smiapp-core.c b/drivers/media/video/smiapp/smiapp-core.c
new file mode 100644
index 000000000000..f518026cb67b
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-core.c
@@ -0,0 +1,2894 @@
+/*
+ * drivers/media/video/smiapp/smiapp-core.c
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2010--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * Based on smiapp driver by Vimarsh Zutshi
+ * Based on jt8ev1.c by Vimarsh Zutshi
+ * Based on smia-sensor.c by Tuukka Toivonen <tuukkat76@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/v4l2-device.h>
+
+#include "smiapp.h"
+
+#define SMIAPP_ALIGN_DIM(dim, flags) \
+ ((flags) & V4L2_SUBDEV_SEL_FLAG_SIZE_GE \
+ ? ALIGN((dim), 2) \
+ : (dim) & ~1)
+
+/*
+ * smiapp_module_idents - supported camera modules
+ */
+static const struct smiapp_module_ident smiapp_module_idents[] = {
+ SMIAPP_IDENT_L(0x01, 0x022b, -1, "vs6555"),
+ SMIAPP_IDENT_L(0x01, 0x022e, -1, "vw6558"),
+ SMIAPP_IDENT_L(0x07, 0x7698, -1, "ovm7698"),
+ SMIAPP_IDENT_L(0x0b, 0x4242, -1, "smiapp-003"),
+ SMIAPP_IDENT_L(0x0c, 0x208a, -1, "tcm8330md"),
+ SMIAPP_IDENT_LQ(0x0c, 0x2134, -1, "tcm8500md", &smiapp_tcm8500md_quirk),
+ SMIAPP_IDENT_L(0x0c, 0x213e, -1, "et8en2"),
+ SMIAPP_IDENT_L(0x0c, 0x2184, -1, "tcm8580md"),
+ SMIAPP_IDENT_LQ(0x0c, 0x560f, -1, "jt8ew9", &smiapp_jt8ew9_quirk),
+ SMIAPP_IDENT_LQ(0x10, 0x4141, -1, "jt8ev1", &smiapp_jt8ev1_quirk),
+ SMIAPP_IDENT_LQ(0x10, 0x4241, -1, "imx125es", &smiapp_imx125es_quirk),
+};
+
+/*
+ *
+ * Dynamic Capability Identification
+ *
+ */
+
+static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ u32 fmt_model_type, fmt_model_subtype, ncol_desc, nrow_desc;
+ unsigned int i;
+ int rval;
+ int line_count = 0;
+ int embedded_start = -1, embedded_end = -1;
+ int image_start = 0;
+
+ rval = smiapp_read(sensor, SMIAPP_REG_U8_FRAME_FORMAT_MODEL_TYPE,
+ &fmt_model_type);
+ if (rval)
+ return rval;
+
+ rval = smiapp_read(sensor, SMIAPP_REG_U8_FRAME_FORMAT_MODEL_SUBTYPE,
+ &fmt_model_subtype);
+ if (rval)
+ return rval;
+
+ ncol_desc = (fmt_model_subtype
+ & SMIAPP_FRAME_FORMAT_MODEL_SUBTYPE_NCOLS_MASK)
+ >> SMIAPP_FRAME_FORMAT_MODEL_SUBTYPE_NCOLS_SHIFT;
+ nrow_desc = fmt_model_subtype
+ & SMIAPP_FRAME_FORMAT_MODEL_SUBTYPE_NROWS_MASK;
+
+ dev_dbg(&client->dev, "format_model_type %s\n",
+ fmt_model_type == SMIAPP_FRAME_FORMAT_MODEL_TYPE_2BYTE
+ ? "2 byte" :
+ fmt_model_type == SMIAPP_FRAME_FORMAT_MODEL_TYPE_4BYTE
+ ? "4 byte" : "is simply bad");
+
+ for (i = 0; i < ncol_desc + nrow_desc; i++) {
+ u32 desc;
+ u32 pixelcode;
+ u32 pixels;
+ char *which;
+ char *what;
+
+ if (fmt_model_type == SMIAPP_FRAME_FORMAT_MODEL_TYPE_2BYTE) {
+ rval = smiapp_read(
+ sensor,
+ SMIAPP_REG_U16_FRAME_FORMAT_DESCRIPTOR_2(i),
+ &desc);
+ if (rval)
+ return rval;
+
+ pixelcode =
+ (desc
+ & SMIAPP_FRAME_FORMAT_DESC_2_PIXELCODE_MASK)
+ >> SMIAPP_FRAME_FORMAT_DESC_2_PIXELCODE_SHIFT;
+ pixels = desc & SMIAPP_FRAME_FORMAT_DESC_2_PIXELS_MASK;
+ } else if (fmt_model_type
+ == SMIAPP_FRAME_FORMAT_MODEL_TYPE_4BYTE) {
+ rval = smiapp_read(
+ sensor,
+ SMIAPP_REG_U32_FRAME_FORMAT_DESCRIPTOR_4(i),
+ &desc);
+ if (rval)
+ return rval;
+
+ pixelcode =
+ (desc
+ & SMIAPP_FRAME_FORMAT_DESC_4_PIXELCODE_MASK)
+ >> SMIAPP_FRAME_FORMAT_DESC_4_PIXELCODE_SHIFT;
+ pixels = desc & SMIAPP_FRAME_FORMAT_DESC_4_PIXELS_MASK;
+ } else {
+ dev_dbg(&client->dev,
+ "invalid frame format model type %d\n",
+ fmt_model_type);
+ return -EINVAL;
+ }
+
+ if (i < ncol_desc)
+ which = "columns";
+ else
+ which = "rows";
+
+ switch (pixelcode) {
+ case SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_EMBEDDED:
+ what = "embedded";
+ break;
+ case SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_DUMMY:
+ what = "dummy";
+ break;
+ case SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_BLACK:
+ what = "black";
+ break;
+ case SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_DARK:
+ what = "dark";
+ break;
+ case SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_VISIBLE:
+ what = "visible";
+ break;
+ default:
+ what = "invalid";
+ dev_dbg(&client->dev, "pixelcode %d\n", pixelcode);
+ break;
+ }
+
+ dev_dbg(&client->dev, "%s pixels: %d %s\n",
+ what, pixels, which);
+
+ if (i < ncol_desc)
+ continue;
+
+ /* Handle row descriptors */
+ if (pixelcode
+ == SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_EMBEDDED) {
+ embedded_start = line_count;
+ } else {
+ if (pixelcode == SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_VISIBLE
+ || pixels >= sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES] / 2)
+ image_start = line_count;
+ if (embedded_start != -1 && embedded_end == -1)
+ embedded_end = line_count;
+ }
+ line_count += pixels;
+ }
+
+ if (embedded_start == -1 || embedded_end == -1) {
+ embedded_start = 0;
+ embedded_end = 0;
+ }
+
+ dev_dbg(&client->dev, "embedded data from lines %d to %d\n",
+ embedded_start, embedded_end);
+ dev_dbg(&client->dev, "image data starts at line %d\n", image_start);
+
+ return 0;
+}
+
+static int smiapp_pll_configure(struct smiapp_sensor *sensor)
+{
+ struct smiapp_pll *pll = &sensor->pll;
+ int rval;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_VT_PIX_CLK_DIV, pll->vt_pix_clk_div);
+ if (rval < 0)
+ return rval;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_VT_SYS_CLK_DIV, pll->vt_sys_clk_div);
+ if (rval < 0)
+ return rval;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_PRE_PLL_CLK_DIV, pll->pre_pll_clk_div);
+ if (rval < 0)
+ return rval;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_PLL_MULTIPLIER, pll->pll_multiplier);
+ if (rval < 0)
+ return rval;
+
+ /* Lane op clock ratio does not apply here. */
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U32_REQUESTED_LINK_BIT_RATE_MBPS,
+ DIV_ROUND_UP(pll->op_sys_clk_freq_hz, 1000000 / 256 / 256));
+ if (rval < 0 || sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
+ return rval;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_OP_PIX_CLK_DIV, pll->op_pix_clk_div);
+ if (rval < 0)
+ return rval;
+
+ return smiapp_write(
+ sensor, SMIAPP_REG_U16_OP_SYS_CLK_DIV, pll->op_sys_clk_div);
+}
+
+static int smiapp_pll_update(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ struct smiapp_pll_limits lim = {
+ .min_pre_pll_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_PRE_PLL_CLK_DIV],
+ .max_pre_pll_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_PRE_PLL_CLK_DIV],
+ .min_pll_ip_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_PLL_IP_FREQ_HZ],
+ .max_pll_ip_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_PLL_IP_FREQ_HZ],
+ .min_pll_multiplier = sensor->limits[SMIAPP_LIMIT_MIN_PLL_MULTIPLIER],
+ .max_pll_multiplier = sensor->limits[SMIAPP_LIMIT_MAX_PLL_MULTIPLIER],
+ .min_pll_op_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_PLL_OP_FREQ_HZ],
+ .max_pll_op_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_PLL_OP_FREQ_HZ],
+
+ .min_op_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV],
+ .max_op_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV],
+ .min_op_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV],
+ .max_op_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV],
+ .min_op_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_FREQ_HZ],
+ .max_op_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_FREQ_HZ],
+ .min_op_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_FREQ_HZ],
+ .max_op_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_FREQ_HZ],
+
+ .min_vt_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_DIV],
+ .max_vt_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_DIV],
+ .min_vt_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_DIV],
+ .max_vt_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_DIV],
+ .min_vt_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_FREQ_HZ],
+ .max_vt_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_FREQ_HZ],
+ .min_vt_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_FREQ_HZ],
+ .max_vt_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_FREQ_HZ],
+
+ .min_line_length_pck_bin = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN],
+ .min_line_length_pck = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK],
+ };
+ struct smiapp_pll *pll = &sensor->pll;
+ int rval;
+
+ memset(&sensor->pll, 0, sizeof(sensor->pll));
+
+ pll->lanes = sensor->platform_data->lanes;
+ pll->ext_clk_freq_hz = sensor->platform_data->ext_clk;
+
+ if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0) {
+ /*
+ * Fill in operational clock divisors limits from the
+ * video timing ones. On profile 0 sensors the
+ * requirements regarding them are essentially the
+ * same as on VT ones.
+ */
+ lim.min_op_sys_clk_div = lim.min_vt_sys_clk_div;
+ lim.max_op_sys_clk_div = lim.max_vt_sys_clk_div;
+ lim.min_op_pix_clk_div = lim.min_vt_pix_clk_div;
+ lim.max_op_pix_clk_div = lim.max_vt_pix_clk_div;
+ lim.min_op_sys_clk_freq_hz = lim.min_vt_sys_clk_freq_hz;
+ lim.max_op_sys_clk_freq_hz = lim.max_vt_sys_clk_freq_hz;
+ lim.min_op_pix_clk_freq_hz = lim.min_vt_pix_clk_freq_hz;
+ lim.max_op_pix_clk_freq_hz = lim.max_vt_pix_clk_freq_hz;
+ /* Profile 0 sensors have no separate OP clock branch. */
+ pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
+ }
+
+ if (smiapp_needs_quirk(sensor,
+ SMIAPP_QUIRK_FLAG_OP_PIX_CLOCK_PER_LANE))
+ pll->flags |= SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE;
+
+ pll->binning_horizontal = sensor->binning_horizontal;
+ pll->binning_vertical = sensor->binning_vertical;
+ pll->link_freq =
+ sensor->link_freq->qmenu_int[sensor->link_freq->val];
+ pll->scale_m = sensor->scale_m;
+ pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+ pll->bits_per_pixel = sensor->csi_format->compressed;
+
+ rval = smiapp_pll_calculate(&client->dev, &lim, pll);
+ if (rval < 0)
+ return rval;
+
+ sensor->pixel_rate_parray->cur.val64 = pll->vt_pix_clk_freq_hz;
+ sensor->pixel_rate_csi->cur.val64 = pll->pixel_rate_csi;
+
+ return 0;
+}
+
+
+/*
+ *
+ * V4L2 Controls handling
+ *
+ */
+
+static void __smiapp_update_exposure_limits(struct smiapp_sensor *sensor)
+{
+ struct v4l2_ctrl *ctrl = sensor->exposure;
+ int max;
+
+ max = sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height
+ + sensor->vblank->val
+ - sensor->limits[SMIAPP_LIMIT_COARSE_INTEGRATION_TIME_MAX_MARGIN];
+
+ ctrl->maximum = max;
+ if (ctrl->default_value > max)
+ ctrl->default_value = max;
+ if (ctrl->val > max)
+ ctrl->val = max;
+ if (ctrl->cur.val > max)
+ ctrl->cur.val = max;
+}
+
+/*
+ * Order matters.
+ *
+ * 1. Bits-per-pixel, descending.
+ * 2. Bits-per-pixel compressed, descending.
+ * 3. Pixel order, same as in pixel_order_str. Formats for all four pixel
+ * orders must be defined.
+ */
+static const struct smiapp_csi_data_format smiapp_csi_data_formats[] = {
+ { V4L2_MBUS_FMT_SGRBG12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_GRBG, },
+ { V4L2_MBUS_FMT_SRGGB12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_RGGB, },
+ { V4L2_MBUS_FMT_SBGGR12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_BGGR, },
+ { V4L2_MBUS_FMT_SGBRG12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_GBRG, },
+ { V4L2_MBUS_FMT_SGRBG10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_GRBG, },
+ { V4L2_MBUS_FMT_SRGGB10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_RGGB, },
+ { V4L2_MBUS_FMT_SBGGR10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_BGGR, },
+ { V4L2_MBUS_FMT_SGBRG10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_GBRG, },
+ { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_GRBG, },
+ { V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_RGGB, },
+ { V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_BGGR, },
+ { V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_GBRG, },
+ { V4L2_MBUS_FMT_SGRBG8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_GRBG, },
+ { V4L2_MBUS_FMT_SRGGB8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_RGGB, },
+ { V4L2_MBUS_FMT_SBGGR8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_BGGR, },
+ { V4L2_MBUS_FMT_SGBRG8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_GBRG, },
+};
+
+const char *pixel_order_str[] = { "GRBG", "RGGB", "BGGR", "GBRG" };
+
+#define to_csi_format_idx(fmt) (((unsigned long)(fmt) \
+ - (unsigned long)smiapp_csi_data_formats) \
+ / sizeof(*smiapp_csi_data_formats))
+
+static u32 smiapp_pixel_order(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int flip = 0;
+
+ if (sensor->hflip) {
+ if (sensor->hflip->val)
+ flip |= SMIAPP_IMAGE_ORIENTATION_HFLIP;
+
+ if (sensor->vflip->val)
+ flip |= SMIAPP_IMAGE_ORIENTATION_VFLIP;
+ }
+
+ flip ^= sensor->hvflip_inv_mask;
+
+ dev_dbg(&client->dev, "flip %d\n", flip);
+ return sensor->default_pixel_order ^ flip;
+}
+
+static void smiapp_update_mbus_formats(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int csi_format_idx =
+ to_csi_format_idx(sensor->csi_format) & ~3;
+ unsigned int internal_csi_format_idx =
+ to_csi_format_idx(sensor->internal_csi_format) & ~3;
+ unsigned int pixel_order = smiapp_pixel_order(sensor);
+
+ sensor->mbus_frame_fmts =
+ sensor->default_mbus_frame_fmts << pixel_order;
+ sensor->csi_format =
+ &smiapp_csi_data_formats[csi_format_idx + pixel_order];
+ sensor->internal_csi_format =
+ &smiapp_csi_data_formats[internal_csi_format_idx
+ + pixel_order];
+
+ BUG_ON(max(internal_csi_format_idx, csi_format_idx) + pixel_order
+ >= ARRAY_SIZE(smiapp_csi_data_formats));
+ BUG_ON(min(internal_csi_format_idx, csi_format_idx) < 0);
+
+ dev_dbg(&client->dev, "new pixel order %s\n",
+ pixel_order_str[pixel_order]);
+}
+
+static int smiapp_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct smiapp_sensor *sensor =
+ container_of(ctrl->handler, struct smiapp_subdev, ctrl_handler)
+ ->sensor;
+ u32 orient = 0;
+ int exposure;
+ int rval;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ return smiapp_write(
+ sensor,
+ SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GLOBAL, ctrl->val);
+
+ case V4L2_CID_EXPOSURE:
+ return smiapp_write(
+ sensor,
+ SMIAPP_REG_U16_COARSE_INTEGRATION_TIME, ctrl->val);
+
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ if (sensor->streaming)
+ return -EBUSY;
+
+ if (sensor->hflip->val)
+ orient |= SMIAPP_IMAGE_ORIENTATION_HFLIP;
+
+ if (sensor->vflip->val)
+ orient |= SMIAPP_IMAGE_ORIENTATION_VFLIP;
+
+ orient ^= sensor->hvflip_inv_mask;
+ rval = smiapp_write(sensor,
+ SMIAPP_REG_U8_IMAGE_ORIENTATION,
+ orient);
+ if (rval < 0)
+ return rval;
+
+ smiapp_update_mbus_formats(sensor);
+
+ return 0;
+
+ case V4L2_CID_VBLANK:
+ exposure = sensor->exposure->val;
+
+ __smiapp_update_exposure_limits(sensor);
+
+ if (exposure > sensor->exposure->maximum) {
+ sensor->exposure->val =
+ sensor->exposure->maximum;
+ rval = smiapp_set_ctrl(
+ sensor->exposure);
+ if (rval < 0)
+ return rval;
+ }
+
+ return smiapp_write(
+ sensor, SMIAPP_REG_U16_FRAME_LENGTH_LINES,
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height
+ + ctrl->val);
+
+ case V4L2_CID_HBLANK:
+ return smiapp_write(
+ sensor, SMIAPP_REG_U16_LINE_LENGTH_PCK,
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width
+ + ctrl->val);
+
+ case V4L2_CID_LINK_FREQ:
+ if (sensor->streaming)
+ return -EBUSY;
+
+ return smiapp_pll_update(sensor);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_ctrl_ops smiapp_ctrl_ops = {
+ .s_ctrl = smiapp_set_ctrl,
+};
+
+static int smiapp_init_controls(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int max;
+ int rval;
+
+ rval = v4l2_ctrl_handler_init(&sensor->pixel_array->ctrl_handler, 7);
+ if (rval)
+ return rval;
+ sensor->pixel_array->ctrl_handler.lock = &sensor->mutex;
+
+ sensor->analog_gain = v4l2_ctrl_new_std(
+ &sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_ANALOGUE_GAIN,
+ sensor->limits[SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MIN],
+ sensor->limits[SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MAX],
+ max(sensor->limits[SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_STEP], 1U),
+ sensor->limits[SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MIN]);
+
+ /* Exposure limits will be updated soon, use just something here. */
+ sensor->exposure = v4l2_ctrl_new_std(
+ &sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 0, 1, 0);
+
+ sensor->hflip = v4l2_ctrl_new_std(
+ &sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ sensor->vflip = v4l2_ctrl_new_std(
+ &sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ sensor->vblank = v4l2_ctrl_new_std(
+ &sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_VBLANK, 0, 1, 1, 0);
+
+ if (sensor->vblank)
+ sensor->vblank->flags |= V4L2_CTRL_FLAG_UPDATE;
+
+ sensor->hblank = v4l2_ctrl_new_std(
+ &sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_HBLANK, 0, 1, 1, 0);
+
+ if (sensor->hblank)
+ sensor->hblank->flags |= V4L2_CTRL_FLAG_UPDATE;
+
+ sensor->pixel_rate_parray = v4l2_ctrl_new_std(
+ &sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0, 0, 1, 0);
+
+ if (sensor->pixel_array->ctrl_handler.error) {
+ dev_err(&client->dev,
+ "pixel array controls initialization failed (%d)\n",
+ sensor->pixel_array->ctrl_handler.error);
+ rval = sensor->pixel_array->ctrl_handler.error;
+ goto error;
+ }
+
+ sensor->pixel_array->sd.ctrl_handler =
+ &sensor->pixel_array->ctrl_handler;
+
+ v4l2_ctrl_cluster(2, &sensor->hflip);
+
+ rval = v4l2_ctrl_handler_init(&sensor->src->ctrl_handler, 0);
+ if (rval)
+ goto error;
+ sensor->src->ctrl_handler.lock = &sensor->mutex;
+
+ for (max = 0; sensor->platform_data->op_sys_clock[max + 1]; max++);
+
+ sensor->link_freq = v4l2_ctrl_new_int_menu(
+ &sensor->src->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_LINK_FREQ, max, 0,
+ sensor->platform_data->op_sys_clock);
+
+ sensor->pixel_rate_csi = v4l2_ctrl_new_std(
+ &sensor->src->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0, 0, 1, 0);
+
+ if (sensor->src->ctrl_handler.error) {
+ dev_err(&client->dev,
+ "src controls initialization failed (%d)\n",
+ sensor->src->ctrl_handler.error);
+ rval = sensor->src->ctrl_handler.error;
+ goto error;
+ }
+
+ sensor->src->sd.ctrl_handler =
+ &sensor->src->ctrl_handler;
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&sensor->pixel_array->ctrl_handler);
+ v4l2_ctrl_handler_free(&sensor->src->ctrl_handler);
+
+ return rval;
+}
+
+static void smiapp_free_controls(struct smiapp_sensor *sensor)
+{
+ unsigned int i;
+
+ for (i = 0; i < sensor->ssds_used; i++)
+ v4l2_ctrl_handler_free(&sensor->ssds[i].ctrl_handler);
+}
+
+static int smiapp_get_limits(struct smiapp_sensor *sensor, int const *limit,
+ unsigned int n)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int i;
+ u32 val;
+ int rval;
+
+ for (i = 0; i < n; i++) {
+ rval = smiapp_read(
+ sensor, smiapp_reg_limits[limit[i]].addr, &val);
+ if (rval)
+ return rval;
+ sensor->limits[limit[i]] = val;
+ dev_dbg(&client->dev, "0x%8.8x \"%s\" = %d, 0x%x\n",
+ smiapp_reg_limits[limit[i]].addr,
+ smiapp_reg_limits[limit[i]].what, val, val);
+ }
+
+ return 0;
+}
+
+static int smiapp_get_all_limits(struct smiapp_sensor *sensor)
+{
+ unsigned int i;
+ int rval;
+
+ for (i = 0; i < SMIAPP_LIMIT_LAST; i++) {
+ rval = smiapp_get_limits(sensor, &i, 1);
+ if (rval < 0)
+ return rval;
+ }
+
+ if (sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN] == 0)
+ smiapp_replace_limit(sensor, SMIAPP_LIMIT_SCALER_N_MIN, 16);
+
+ return 0;
+}
+
+static int smiapp_get_limits_binning(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ static u32 const limits[] = {
+ SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN,
+ SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN,
+ SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN,
+ SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN,
+ SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN,
+ SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MIN_BIN,
+ SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MAX_MARGIN_BIN,
+ };
+ static u32 const limits_replace[] = {
+ SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES,
+ SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES,
+ SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK,
+ SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK,
+ SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK,
+ SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MIN,
+ SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MAX_MARGIN,
+ };
+ unsigned int i;
+ int rval;
+
+ if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY] ==
+ SMIAPP_BINNING_CAPABILITY_NO) {
+ for (i = 0; i < ARRAY_SIZE(limits); i++)
+ sensor->limits[limits[i]] =
+ sensor->limits[limits_replace[i]];
+
+ return 0;
+ }
+
+ rval = smiapp_get_limits(sensor, limits, ARRAY_SIZE(limits));
+ if (rval < 0)
+ return rval;
+
+ /*
+ * Sanity check whether the binning limits are valid. If not,
+ * use the non-binning ones.
+ */
+ if (sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN]
+ && sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN]
+ && sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(limits); i++) {
+ dev_dbg(&client->dev,
+ "replace limit 0x%8.8x \"%s\" = %d, 0x%x\n",
+ smiapp_reg_limits[limits[i]].addr,
+ smiapp_reg_limits[limits[i]].what,
+ sensor->limits[limits_replace[i]],
+ sensor->limits[limits_replace[i]]);
+ sensor->limits[limits[i]] =
+ sensor->limits[limits_replace[i]];
+ }
+
+ return 0;
+}
+
+static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int type, n;
+ unsigned int i, pixel_order;
+ int rval;
+
+ rval = smiapp_read(
+ sensor, SMIAPP_REG_U8_DATA_FORMAT_MODEL_TYPE, &type);
+ if (rval)
+ return rval;
+
+ dev_dbg(&client->dev, "data_format_model_type %d\n", type);
+
+ rval = smiapp_read(sensor, SMIAPP_REG_U8_PIXEL_ORDER,
+ &pixel_order);
+ if (rval)
+ return rval;
+
+ if (pixel_order >= ARRAY_SIZE(pixel_order_str)) {
+ dev_dbg(&client->dev, "bad pixel order %d\n", pixel_order);
+ return -EINVAL;
+ }
+
+ dev_dbg(&client->dev, "pixel order %d (%s)\n", pixel_order,
+ pixel_order_str[pixel_order]);
+
+ switch (type) {
+ case SMIAPP_DATA_FORMAT_MODEL_TYPE_NORMAL:
+ n = SMIAPP_DATA_FORMAT_MODEL_TYPE_NORMAL_N;
+ break;
+ case SMIAPP_DATA_FORMAT_MODEL_TYPE_EXTENDED:
+ n = SMIAPP_DATA_FORMAT_MODEL_TYPE_EXTENDED_N;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sensor->default_pixel_order = pixel_order;
+ sensor->mbus_frame_fmts = 0;
+
+ for (i = 0; i < n; i++) {
+ unsigned int fmt, j;
+
+ rval = smiapp_read(
+ sensor,
+ SMIAPP_REG_U16_DATA_FORMAT_DESCRIPTOR(i), &fmt);
+ if (rval)
+ return rval;
+
+ dev_dbg(&client->dev, "bpp %d, compressed %d\n",
+ fmt >> 8, (u8)fmt);
+
+ for (j = 0; j < ARRAY_SIZE(smiapp_csi_data_formats); j++) {
+ const struct smiapp_csi_data_format *f =
+ &smiapp_csi_data_formats[j];
+
+ if (f->pixel_order != SMIAPP_PIXEL_ORDER_GRBG)
+ continue;
+
+ if (f->width != fmt >> 8 || f->compressed != (u8)fmt)
+ continue;
+
+ dev_dbg(&client->dev, "jolly good! %d\n", j);
+
+ sensor->default_mbus_frame_fmts |= 1 << j;
+ if (!sensor->csi_format) {
+ sensor->csi_format = f;
+ sensor->internal_csi_format = f;
+ }
+ }
+ }
+
+ if (!sensor->csi_format) {
+ dev_err(&client->dev, "no supported mbus code found\n");
+ return -EINVAL;
+ }
+
+ smiapp_update_mbus_formats(sensor);
+
+ return 0;
+}
+
+static void smiapp_update_blanking(struct smiapp_sensor *sensor)
+{
+ struct v4l2_ctrl *vblank = sensor->vblank;
+ struct v4l2_ctrl *hblank = sensor->hblank;
+
+ vblank->minimum =
+ max_t(int,
+ sensor->limits[SMIAPP_LIMIT_MIN_FRAME_BLANKING_LINES],
+ sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN] -
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height);
+ vblank->maximum =
+ sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN] -
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height;
+
+ vblank->val = clamp_t(int, vblank->val,
+ vblank->minimum, vblank->maximum);
+ vblank->default_value = vblank->minimum;
+ vblank->val = vblank->val;
+ vblank->cur.val = vblank->val;
+
+ hblank->minimum =
+ max_t(int,
+ sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN] -
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width,
+ sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN]);
+ hblank->maximum =
+ sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN] -
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width;
+
+ hblank->val = clamp_t(int, hblank->val,
+ hblank->minimum, hblank->maximum);
+ hblank->default_value = hblank->minimum;
+ hblank->val = hblank->val;
+ hblank->cur.val = hblank->val;
+
+ __smiapp_update_exposure_limits(sensor);
+}
+
+static int smiapp_update_mode(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int binning_mode;
+ int rval;
+
+ dev_dbg(&client->dev, "frame size: %dx%d\n",
+ sensor->src->crop[SMIAPP_PAD_SRC].width,
+ sensor->src->crop[SMIAPP_PAD_SRC].height);
+ dev_dbg(&client->dev, "csi format width: %d\n",
+ sensor->csi_format->width);
+
+ /* Binning has to be set up here; it affects limits */
+ if (sensor->binning_horizontal == 1 &&
+ sensor->binning_vertical == 1) {
+ binning_mode = 0;
+ } else {
+ u8 binning_type =
+ (sensor->binning_horizontal << 4)
+ | sensor->binning_vertical;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U8_BINNING_TYPE, binning_type);
+ if (rval < 0)
+ return rval;
+
+ binning_mode = 1;
+ }
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_BINNING_MODE, binning_mode);
+ if (rval < 0)
+ return rval;
+
+ /* Get updated limits due to binning */
+ rval = smiapp_get_limits_binning(sensor);
+ if (rval < 0)
+ return rval;
+
+ rval = smiapp_pll_update(sensor);
+ if (rval < 0)
+ return rval;
+
+ /* Output from pixel array, including blanking */
+ smiapp_update_blanking(sensor);
+
+ dev_dbg(&client->dev, "vblank\t\t%d\n", sensor->vblank->val);
+ dev_dbg(&client->dev, "hblank\t\t%d\n", sensor->hblank->val);
+
+ dev_dbg(&client->dev, "real timeperframe\t100/%d\n",
+ sensor->pll.vt_pix_clk_freq_hz /
+ ((sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width
+ + sensor->hblank->val) *
+ (sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height
+ + sensor->vblank->val) / 100));
+
+ return 0;
+}
+
+/*
+ *
+ * SMIA++ NVM handling
+ *
+ */
+static int smiapp_read_nvm(struct smiapp_sensor *sensor,
+ unsigned char *nvm)
+{
+ u32 i, s, p, np, v;
+ int rval = 0, rval2;
+
+ np = sensor->nvm_size / SMIAPP_NVM_PAGE_SIZE;
+ for (p = 0; p < np; p++) {
+ rval = smiapp_write(
+ sensor,
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_PAGE_SELECT, p);
+ if (rval)
+ goto out;
+
+ rval = smiapp_write(sensor,
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL,
+ SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN |
+ SMIAPP_DATA_TRANSFER_IF_1_CTRL_RD_EN);
+ if (rval)
+ goto out;
+
+ for (i = 0; i < 1000; i++) {
+ rval = smiapp_read(
+ sensor,
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS, &s);
+
+ if (rval)
+ goto out;
+
+ if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY)
+ break;
+
+ if (--i == 0) {
+ rval = -ETIMEDOUT;
+ goto out;
+ }
+
+ }
+
+ for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) {
+ rval = smiapp_read(
+ sensor,
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_0 + i,
+ &v);
+ if (rval)
+ goto out;
+
+ *nvm++ = v;
+ }
+ }
+
+out:
+ rval2 = smiapp_write(sensor, SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL, 0);
+ if (rval < 0)
+ return rval;
+ else
+ return rval2;
+}
+
+/*
+ *
+ * SMIA++ CCI address control
+ *
+ */
+static int smiapp_change_cci_addr(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int rval;
+ u32 val;
+
+ client->addr = sensor->platform_data->i2c_addr_dfl;
+
+ rval = smiapp_write(sensor,
+ SMIAPP_REG_U8_CCI_ADDRESS_CONTROL,
+ sensor->platform_data->i2c_addr_alt << 1);
+ if (rval)
+ return rval;
+
+ client->addr = sensor->platform_data->i2c_addr_alt;
+
+ /* verify addr change went ok */
+ rval = smiapp_read(sensor, SMIAPP_REG_U8_CCI_ADDRESS_CONTROL, &val);
+ if (rval)
+ return rval;
+
+ if (val != sensor->platform_data->i2c_addr_alt << 1)
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ *
+ * SMIA++ Mode Control
+ *
+ */
+static int smiapp_setup_flash_strobe(struct smiapp_sensor *sensor)
+{
+ struct smiapp_flash_strobe_parms *strobe_setup;
+ unsigned int ext_freq = sensor->platform_data->ext_clk;
+ u32 tmp;
+ u32 strobe_adjustment;
+ u32 strobe_width_high_rs;
+ int rval;
+
+ strobe_setup = sensor->platform_data->strobe_setup;
+
+ /*
+ * How to calculate registers related to strobe length. Please
+ * do not change, or if you do at least know what you're
+ * doing. :-)
+ *
+ * Sakari Ailus <sakari.ailus@maxwell.research.nokia.com> 2010-10-25
+ *
+ * flash_strobe_length [us] / 10^6 = (tFlash_strobe_width_ctrl
+ * / EXTCLK freq [Hz]) * flash_strobe_adjustment
+ *
+ * tFlash_strobe_width_ctrl E N, [1 - 0xffff]
+ * flash_strobe_adjustment E N, [1 - 0xff]
+ *
+ * The formula above is written as below to keep it on one
+ * line:
+ *
+ * l / 10^6 = w / e * a
+ *
+ * Let's mark w * a by x:
+ *
+ * x = w * a
+ *
+ * Thus, we get:
+ *
+ * x = l * e / 10^6
+ *
+ * The strobe width must be at least as long as requested,
+ * thus rounding upwards is needed.
+ *
+ * x = (l * e + 10^6 - 1) / 10^6
+ * -----------------------------
+ *
+ * Maximum possible accuracy is wanted at all times. Thus keep
+ * a as small as possible.
+ *
+ * Calculate a, assuming maximum w, with rounding upwards:
+ *
+ * a = (x + (2^16 - 1) - 1) / (2^16 - 1)
+ * -------------------------------------
+ *
+ * Thus, we also get w, with that a, with rounding upwards:
+ *
+ * w = (x + a - 1) / a
+ * -------------------
+ *
+ * To get limits:
+ *
+ * x E [1, (2^16 - 1) * (2^8 - 1)]
+ *
+ * Substituting maximum x to the original formula (with rounding),
+ * the maximum l is thus
+ *
+ * (2^16 - 1) * (2^8 - 1) * 10^6 = l * e + 10^6 - 1
+ *
+ * l = (10^6 * (2^16 - 1) * (2^8 - 1) - 10^6 + 1) / e
+ * --------------------------------------------------
+ *
+ * flash_strobe_length must be clamped between 1 and
+ * (10^6 * (2^16 - 1) * (2^8 - 1) - 10^6 + 1) / EXTCLK freq.
+ *
+ * Then,
+ *
+ * flash_strobe_adjustment = ((flash_strobe_length *
+ * EXTCLK freq + 10^6 - 1) / 10^6 + (2^16 - 1) - 1) / (2^16 - 1)
+ *
+ * tFlash_strobe_width_ctrl = ((flash_strobe_length *
+ * EXTCLK freq + 10^6 - 1) / 10^6 +
+ * flash_strobe_adjustment - 1) / flash_strobe_adjustment
+ */
+ tmp = div_u64(1000000ULL * ((1 << 16) - 1) * ((1 << 8) - 1) -
+ 1000000 + 1, ext_freq);
+ strobe_setup->strobe_width_high_us =
+ clamp_t(u32, strobe_setup->strobe_width_high_us, 1, tmp);
+
+ tmp = div_u64(((u64)strobe_setup->strobe_width_high_us * (u64)ext_freq +
+ 1000000 - 1), 1000000ULL);
+ strobe_adjustment = (tmp + (1 << 16) - 1 - 1) / ((1 << 16) - 1);
+ strobe_width_high_rs = (tmp + strobe_adjustment - 1) /
+ strobe_adjustment;
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_FLASH_MODE_RS,
+ strobe_setup->mode);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_FLASH_STROBE_ADJUSTMENT,
+ strobe_adjustment);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_HIGH_RS_CTRL,
+ strobe_width_high_rs);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_TFLASH_STROBE_DELAY_RS_CTRL,
+ strobe_setup->strobe_delay);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_FLASH_STROBE_START_POINT,
+ strobe_setup->stobe_start_point);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_FLASH_TRIGGER_RS,
+ strobe_setup->trigger);
+
+out:
+ sensor->platform_data->strobe_setup->trigger = 0;
+
+ return rval;
+}
+
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
+
+static int smiapp_power_on(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int sleep;
+ int rval;
+
+ rval = regulator_enable(sensor->vana);
+ if (rval) {
+ dev_err(&client->dev, "failed to enable vana regulator\n");
+ return rval;
+ }
+ usleep_range(1000, 1000);
+
+ if (sensor->platform_data->set_xclk)
+ rval = sensor->platform_data->set_xclk(
+ &sensor->src->sd, sensor->platform_data->ext_clk);
+ else
+ rval = clk_enable(sensor->ext_clk);
+ if (rval < 0) {
+ dev_dbg(&client->dev, "failed to set xclk\n");
+ goto out_xclk_fail;
+ }
+ usleep_range(1000, 1000);
+
+ if (sensor->platform_data->xshutdown != SMIAPP_NO_XSHUTDOWN)
+ gpio_set_value(sensor->platform_data->xshutdown, 1);
+
+ sleep = SMIAPP_RESET_DELAY(sensor->platform_data->ext_clk);
+ usleep_range(sleep, sleep);
+
+ /*
+ * Failures to respond to the address change command have been noticed.
+ * Those failures seem to be caused by the sensor requiring a longer
+ * boot time than advertised. An additional 10ms delay seems to work
+ * around the issue, but the SMIA++ I2C write retry hack makes the delay
+ * unnecessary. The failures need to be investigated to find a proper
+ * fix, and a delay will likely need to be added here if the I2C write
+ * retry hack is reverted before the root cause of the boot time issue
+ * is found.
+ */
+
+ if (sensor->platform_data->i2c_addr_alt) {
+ rval = smiapp_change_cci_addr(sensor);
+ if (rval) {
+ dev_err(&client->dev, "cci address change error\n");
+ goto out_cci_addr_fail;
+ }
+ }
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_SOFTWARE_RESET,
+ SMIAPP_SOFTWARE_RESET);
+ if (rval < 0) {
+ dev_err(&client->dev, "software reset failed\n");
+ goto out_cci_addr_fail;
+ }
+
+ if (sensor->platform_data->i2c_addr_alt) {
+ rval = smiapp_change_cci_addr(sensor);
+ if (rval) {
+ dev_err(&client->dev, "cci address change error\n");
+ goto out_cci_addr_fail;
+ }
+ }
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_COMPRESSION_MODE,
+ SMIAPP_COMPRESSION_MODE_SIMPLE_PREDICTOR);
+ if (rval) {
+ dev_err(&client->dev, "compression mode set failed\n");
+ goto out_cci_addr_fail;
+ }
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_EXTCLK_FREQUENCY_MHZ,
+ sensor->platform_data->ext_clk / (1000000 / (1 << 8)));
+ if (rval) {
+ dev_err(&client->dev, "extclk frequency set failed\n");
+ goto out_cci_addr_fail;
+ }
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_CSI_LANE_MODE,
+ sensor->platform_data->lanes - 1);
+ if (rval) {
+ dev_err(&client->dev, "csi lane mode set failed\n");
+ goto out_cci_addr_fail;
+ }
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_FAST_STANDBY_CTRL,
+ SMIAPP_FAST_STANDBY_CTRL_IMMEDIATE);
+ if (rval) {
+ dev_err(&client->dev, "fast standby set failed\n");
+ goto out_cci_addr_fail;
+ }
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_CSI_SIGNALLING_MODE,
+ sensor->platform_data->csi_signalling_mode);
+ if (rval) {
+ dev_err(&client->dev, "csi signalling mode set failed\n");
+ goto out_cci_addr_fail;
+ }
+
+ /* DPHY control done by sensor based on requested link rate */
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_DPHY_CTRL,
+ SMIAPP_DPHY_CTRL_UI);
+ if (rval < 0)
+ return rval;
+
+ rval = smiapp_call_quirk(sensor, post_poweron);
+ if (rval) {
+ dev_err(&client->dev, "post_poweron quirks failed\n");
+ goto out_cci_addr_fail;
+ }
+
+ /* Are we still initialising...? If yes, return here. */
+ if (!sensor->pixel_array)
+ return 0;
+
+ rval = v4l2_ctrl_handler_setup(
+ &sensor->pixel_array->ctrl_handler);
+ if (rval)
+ goto out_cci_addr_fail;
+
+ rval = v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
+ if (rval)
+ goto out_cci_addr_fail;
+
+ mutex_lock(&sensor->mutex);
+ rval = smiapp_update_mode(sensor);
+ mutex_unlock(&sensor->mutex);
+ if (rval < 0)
+ goto out_cci_addr_fail;
+
+ return 0;
+
+out_cci_addr_fail:
+ if (sensor->platform_data->xshutdown != SMIAPP_NO_XSHUTDOWN)
+ gpio_set_value(sensor->platform_data->xshutdown, 0);
+ if (sensor->platform_data->set_xclk)
+ sensor->platform_data->set_xclk(&sensor->src->sd, 0);
+ else
+ clk_disable(sensor->ext_clk);
+
+out_xclk_fail:
+ regulator_disable(sensor->vana);
+ return rval;
+}
+
+static void smiapp_power_off(struct smiapp_sensor *sensor)
+{
+ /*
+ * Currently power/clock to lens are enable/disabled separately
+ * but they are essentially the same signals. So if the sensor is
+ * powered off while the lens is powered on the sensor does not
+ * really see a power off and next time the cci address change
+ * will fail. So do a soft reset explicitly here.
+ */
+ if (sensor->platform_data->i2c_addr_alt)
+ smiapp_write(sensor,
+ SMIAPP_REG_U8_SOFTWARE_RESET,
+ SMIAPP_SOFTWARE_RESET);
+
+ if (sensor->platform_data->xshutdown != SMIAPP_NO_XSHUTDOWN)
+ gpio_set_value(sensor->platform_data->xshutdown, 0);
+ if (sensor->platform_data->set_xclk)
+ sensor->platform_data->set_xclk(&sensor->src->sd, 0);
+ else
+ clk_disable(sensor->ext_clk);
+ usleep_range(5000, 5000);
+ regulator_disable(sensor->vana);
+ sensor->streaming = 0;
+}
+
+static int smiapp_set_power(struct v4l2_subdev *subdev, int on)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ int ret = 0;
+
+ mutex_lock(&sensor->power_mutex);
+
+ /*
+ * If the power count is modified from 0 to != 0 or from != 0
+ * to 0, update the power state.
+ */
+ if (!sensor->power_count == !on)
+ goto out;
+
+ if (on) {
+ /* Power on and perform initialisation. */
+ ret = smiapp_power_on(sensor);
+ if (ret < 0)
+ goto out;
+ } else {
+ smiapp_power_off(sensor);
+ }
+
+ /* Update the power count. */
+ sensor->power_count += on ? 1 : -1;
+ WARN_ON(sensor->power_count < 0);
+
+out:
+ mutex_unlock(&sensor->power_mutex);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Video stream management
+ */
+
+static int smiapp_start_streaming(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int rval;
+
+ mutex_lock(&sensor->mutex);
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_CSI_DATA_FORMAT,
+ (sensor->csi_format->width << 8) |
+ sensor->csi_format->compressed);
+ if (rval)
+ goto out;
+
+ rval = smiapp_pll_configure(sensor);
+ if (rval)
+ goto out;
+
+ /* Analog crop start coordinates */
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_X_ADDR_START,
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].left);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_Y_ADDR_START,
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].top);
+ if (rval < 0)
+ goto out;
+
+ /* Analog crop end coordinates */
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_X_ADDR_END,
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].left
+ + sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width - 1);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_Y_ADDR_END,
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].top
+ + sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height - 1);
+ if (rval < 0)
+ goto out;
+
+ /*
+ * Output from pixel array, including blanking, is set using
+ * controls below. No need to set here.
+ */
+
+ /* Digital crop */
+ if (sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY]
+ == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP) {
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_DIGITAL_CROP_X_OFFSET,
+ sensor->scaler->crop[SMIAPP_PAD_SINK].left);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_DIGITAL_CROP_Y_OFFSET,
+ sensor->scaler->crop[SMIAPP_PAD_SINK].top);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_DIGITAL_CROP_IMAGE_WIDTH,
+ sensor->scaler->crop[SMIAPP_PAD_SINK].width);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_DIGITAL_CROP_IMAGE_HEIGHT,
+ sensor->scaler->crop[SMIAPP_PAD_SINK].height);
+ if (rval < 0)
+ goto out;
+ }
+
+ /* Scaling */
+ if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ != SMIAPP_SCALING_CAPABILITY_NONE) {
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_SCALING_MODE,
+ sensor->scaling_mode);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_SCALE_M,
+ sensor->scale_m);
+ if (rval < 0)
+ goto out;
+ }
+
+ /* Output size from sensor */
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_X_OUTPUT_SIZE,
+ sensor->src->crop[SMIAPP_PAD_SRC].width);
+ if (rval < 0)
+ goto out;
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_Y_OUTPUT_SIZE,
+ sensor->src->crop[SMIAPP_PAD_SRC].height);
+ if (rval < 0)
+ goto out;
+
+ if ((sensor->flash_capability &
+ (SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE |
+ SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE)) &&
+ sensor->platform_data->strobe_setup != NULL &&
+ sensor->platform_data->strobe_setup->trigger != 0) {
+ rval = smiapp_setup_flash_strobe(sensor);
+ if (rval)
+ goto out;
+ }
+
+ rval = smiapp_call_quirk(sensor, pre_streamon);
+ if (rval) {
+ dev_err(&client->dev, "pre_streamon quirks failed\n");
+ goto out;
+ }
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_MODE_SELECT,
+ SMIAPP_MODE_SELECT_STREAMING);
+
+out:
+ mutex_unlock(&sensor->mutex);
+
+ return rval;
+}
+
+static int smiapp_stop_streaming(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int rval;
+
+ mutex_lock(&sensor->mutex);
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_MODE_SELECT,
+ SMIAPP_MODE_SELECT_SOFTWARE_STANDBY);
+ if (rval)
+ goto out;
+
+ rval = smiapp_call_quirk(sensor, post_streamoff);
+ if (rval)
+ dev_err(&client->dev, "post_streamoff quirks failed\n");
+
+out:
+ mutex_unlock(&sensor->mutex);
+ return rval;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev video operations
+ */
+
+static int smiapp_set_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ int rval;
+
+ if (sensor->streaming == enable)
+ return 0;
+
+ if (enable) {
+ sensor->streaming = 1;
+ rval = smiapp_start_streaming(sensor);
+ if (rval < 0)
+ sensor->streaming = 0;
+ } else {
+ rval = smiapp_stop_streaming(sensor);
+ sensor->streaming = 0;
+ }
+
+ return rval;
+}
+
+static int smiapp_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ unsigned int i;
+ int idx = -1;
+ int rval = -EINVAL;
+
+ mutex_lock(&sensor->mutex);
+
+ dev_err(&client->dev, "subdev %s, pad %d, index %d\n",
+ subdev->name, code->pad, code->index);
+
+ if (subdev != &sensor->src->sd || code->pad != SMIAPP_PAD_SRC) {
+ if (code->index)
+ goto out;
+
+ code->code = sensor->internal_csi_format->code;
+ rval = 0;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) {
+ if (sensor->mbus_frame_fmts & (1 << i))
+ idx++;
+
+ if (idx == code->index) {
+ code->code = smiapp_csi_data_formats[i].code;
+ dev_err(&client->dev, "found index %d, i %d, code %x\n",
+ code->index, i, code->code);
+ rval = 0;
+ break;
+ }
+ }
+
+out:
+ mutex_unlock(&sensor->mutex);
+
+ return rval;
+}
+
+static u32 __smiapp_get_mbus_code(struct v4l2_subdev *subdev,
+ unsigned int pad)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+
+ if (subdev == &sensor->src->sd && pad == SMIAPP_PAD_SRC)
+ return sensor->csi_format->code;
+ else
+ return sensor->internal_csi_format->code;
+}
+
+static int __smiapp_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ fmt->format = *v4l2_subdev_get_try_format(fh, fmt->pad);
+ } else {
+ struct v4l2_rect *r;
+
+ if (fmt->pad == ssd->source_pad)
+ r = &ssd->crop[ssd->source_pad];
+ else
+ r = &ssd->sink_fmt;
+
+ fmt->format.code = __smiapp_get_mbus_code(subdev, fmt->pad);
+ fmt->format.width = r->width;
+ fmt->format.height = r->height;
+ }
+
+ return 0;
+}
+
+static int smiapp_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ int rval;
+
+ mutex_lock(&sensor->mutex);
+ rval = __smiapp_get_format(subdev, fh, fmt);
+ mutex_unlock(&sensor->mutex);
+
+ return rval;
+}
+
+static void smiapp_get_crop_compose(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_rect **crops,
+ struct v4l2_rect **comps, int which)
+{
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ unsigned int i;
+
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ if (crops)
+ for (i = 0; i < subdev->entity.num_pads; i++)
+ crops[i] = &ssd->crop[i];
+ if (comps)
+ *comps = &ssd->compose;
+ } else {
+ if (crops) {
+ for (i = 0; i < subdev->entity.num_pads; i++) {
+ crops[i] = v4l2_subdev_get_try_crop(fh, i);
+ BUG_ON(!crops[i]);
+ }
+ }
+ if (comps) {
+ *comps = v4l2_subdev_get_try_compose(fh,
+ SMIAPP_PAD_SINK);
+ BUG_ON(!*comps);
+ }
+ }
+}
+
+/* Changes require propagation only on sink pad. */
+static void smiapp_propagate(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh, int which,
+ int target)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ struct v4l2_rect *comp, *crops[SMIAPP_PADS];
+
+ smiapp_get_crop_compose(subdev, fh, crops, &comp, which);
+
+ switch (target) {
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ comp->width = crops[SMIAPP_PAD_SINK]->width;
+ comp->height = crops[SMIAPP_PAD_SINK]->height;
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ if (ssd == sensor->scaler) {
+ sensor->scale_m =
+ sensor->limits[
+ SMIAPP_LIMIT_SCALER_N_MIN];
+ sensor->scaling_mode =
+ SMIAPP_SCALING_MODE_NONE;
+ } else if (ssd == sensor->binner) {
+ sensor->binning_horizontal = 1;
+ sensor->binning_vertical = 1;
+ }
+ }
+ /* Fall through */
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ *crops[SMIAPP_PAD_SRC] = *comp;
+ break;
+ default:
+ BUG();
+ }
+}
+
+static const struct smiapp_csi_data_format
+*smiapp_validate_csi_data_format(struct smiapp_sensor *sensor, u32 code)
+{
+ const struct smiapp_csi_data_format *csi_format = sensor->csi_format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) {
+ if (sensor->mbus_frame_fmts & (1 << i)
+ && smiapp_csi_data_formats[i].code == code)
+ return &smiapp_csi_data_formats[i];
+ }
+
+ return csi_format;
+}
+
+static int smiapp_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ struct v4l2_rect *crops[SMIAPP_PADS];
+
+ mutex_lock(&sensor->mutex);
+
+ /*
+ * Media bus code is changeable on src subdev's source pad. On
+ * other source pads we just get format here.
+ */
+ if (fmt->pad == ssd->source_pad) {
+ u32 code = fmt->format.code;
+ int rval = __smiapp_get_format(subdev, fh, fmt);
+
+ if (!rval && subdev == &sensor->src->sd) {
+ const struct smiapp_csi_data_format *csi_format =
+ smiapp_validate_csi_data_format(sensor, code);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ sensor->csi_format = csi_format;
+ fmt->format.code = csi_format->code;
+ }
+
+ mutex_unlock(&sensor->mutex);
+ return rval;
+ }
+
+ /* Sink pad. Width and height are changeable here. */
+ fmt->format.code = __smiapp_get_mbus_code(subdev, fmt->pad);
+ fmt->format.width &= ~1;
+ fmt->format.height &= ~1;
+
+ fmt->format.width =
+ clamp(fmt->format.width,
+ sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE],
+ sensor->limits[SMIAPP_LIMIT_MAX_X_OUTPUT_SIZE]);
+ fmt->format.height =
+ clamp(fmt->format.height,
+ sensor->limits[SMIAPP_LIMIT_MIN_Y_OUTPUT_SIZE],
+ sensor->limits[SMIAPP_LIMIT_MAX_Y_OUTPUT_SIZE]);
+
+ smiapp_get_crop_compose(subdev, fh, crops, NULL, fmt->which);
+
+ crops[ssd->sink_pad]->left = 0;
+ crops[ssd->sink_pad]->top = 0;
+ crops[ssd->sink_pad]->width = fmt->format.width;
+ crops[ssd->sink_pad]->height = fmt->format.height;
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ ssd->sink_fmt = *crops[ssd->sink_pad];
+ smiapp_propagate(subdev, fh, fmt->which,
+ V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL);
+
+ mutex_unlock(&sensor->mutex);
+
+ return 0;
+}
+
+/*
+ * Calculate goodness of scaled image size compared to expected image
+ * size and flags provided.
+ */
+#define SCALING_GOODNESS 100000
+#define SCALING_GOODNESS_EXTREME 100000000
+static int scaling_goodness(struct v4l2_subdev *subdev, int w, int ask_w,
+ int h, int ask_h, u32 flags)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ int val = 0;
+
+ w &= ~1;
+ ask_w &= ~1;
+ h &= ~1;
+ ask_h &= ~1;
+
+ if (flags & V4L2_SUBDEV_SEL_FLAG_SIZE_GE) {
+ if (w < ask_w)
+ val -= SCALING_GOODNESS;
+ if (h < ask_h)
+ val -= SCALING_GOODNESS;
+ }
+
+ if (flags & V4L2_SUBDEV_SEL_FLAG_SIZE_LE) {
+ if (w > ask_w)
+ val -= SCALING_GOODNESS;
+ if (h > ask_h)
+ val -= SCALING_GOODNESS;
+ }
+
+ val -= abs(w - ask_w);
+ val -= abs(h - ask_h);
+
+ if (w < sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE])
+ val -= SCALING_GOODNESS_EXTREME;
+
+ dev_dbg(&client->dev, "w %d ask_w %d h %d ask_h %d goodness %d\n",
+ w, ask_h, h, ask_h, val);
+
+ return val;
+}
+
+static void smiapp_set_compose_binner(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel,
+ struct v4l2_rect **crops,
+ struct v4l2_rect *comp)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ unsigned int i;
+ unsigned int binh = 1, binv = 1;
+ unsigned int best = scaling_goodness(
+ subdev,
+ crops[SMIAPP_PAD_SINK]->width, sel->r.width,
+ crops[SMIAPP_PAD_SINK]->height, sel->r.height, sel->flags);
+
+ for (i = 0; i < sensor->nbinning_subtypes; i++) {
+ int this = scaling_goodness(
+ subdev,
+ crops[SMIAPP_PAD_SINK]->width
+ / sensor->binning_subtypes[i].horizontal,
+ sel->r.width,
+ crops[SMIAPP_PAD_SINK]->height
+ / sensor->binning_subtypes[i].vertical,
+ sel->r.height, sel->flags);
+
+ if (this > best) {
+ binh = sensor->binning_subtypes[i].horizontal;
+ binv = sensor->binning_subtypes[i].vertical;
+ best = this;
+ }
+ }
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ sensor->binning_vertical = binv;
+ sensor->binning_horizontal = binh;
+ }
+
+ sel->r.width = (crops[SMIAPP_PAD_SINK]->width / binh) & ~1;
+ sel->r.height = (crops[SMIAPP_PAD_SINK]->height / binv) & ~1;
+}
+
+/*
+ * Calculate best scaling ratio and mode for given output resolution.
+ *
+ * Try all of these: horizontal ratio, vertical ratio and smallest
+ * size possible (horizontally).
+ *
+ * Also try whether horizontal scaler or full scaler gives a better
+ * result.
+ */
+static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel,
+ struct v4l2_rect **crops,
+ struct v4l2_rect *comp)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ u32 min, max, a, b, max_m;
+ u32 scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+ int mode = SMIAPP_SCALING_MODE_HORIZONTAL;
+ u32 try[4];
+ u32 ntry = 0;
+ unsigned int i;
+ int best = INT_MIN;
+
+ sel->r.width = min_t(unsigned int, sel->r.width,
+ crops[SMIAPP_PAD_SINK]->width);
+ sel->r.height = min_t(unsigned int, sel->r.height,
+ crops[SMIAPP_PAD_SINK]->height);
+
+ a = crops[SMIAPP_PAD_SINK]->width
+ * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN] / sel->r.width;
+ b = crops[SMIAPP_PAD_SINK]->height
+ * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN] / sel->r.height;
+ max_m = crops[SMIAPP_PAD_SINK]->width
+ * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN]
+ / sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE];
+
+ a = min(sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX],
+ max(a, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN]));
+ b = min(sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX],
+ max(b, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN]));
+ max_m = min(sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX],
+ max(max_m, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN]));
+
+ dev_dbg(&client->dev, "scaling: a %d b %d max_m %d\n", a, b, max_m);
+
+ min = min(max_m, min(a, b));
+ max = min(max_m, max(a, b));
+
+ try[ntry] = min;
+ ntry++;
+ if (min != max) {
+ try[ntry] = max;
+ ntry++;
+ }
+ if (max != max_m) {
+ try[ntry] = min + 1;
+ ntry++;
+ if (min != max) {
+ try[ntry] = max + 1;
+ ntry++;
+ }
+ }
+
+ for (i = 0; i < ntry; i++) {
+ int this = scaling_goodness(
+ subdev,
+ crops[SMIAPP_PAD_SINK]->width
+ / try[i]
+ * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN],
+ sel->r.width,
+ crops[SMIAPP_PAD_SINK]->height,
+ sel->r.height,
+ sel->flags);
+
+ dev_dbg(&client->dev, "trying factor %d (%d)\n", try[i], i);
+
+ if (this > best) {
+ scale_m = try[i];
+ mode = SMIAPP_SCALING_MODE_HORIZONTAL;
+ best = this;
+ }
+
+ if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ == SMIAPP_SCALING_CAPABILITY_HORIZONTAL)
+ continue;
+
+ this = scaling_goodness(
+ subdev, crops[SMIAPP_PAD_SINK]->width
+ / try[i]
+ * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN],
+ sel->r.width,
+ crops[SMIAPP_PAD_SINK]->height
+ / try[i]
+ * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN],
+ sel->r.height,
+ sel->flags);
+
+ if (this > best) {
+ scale_m = try[i];
+ mode = SMIAPP_SCALING_MODE_BOTH;
+ best = this;
+ }
+ }
+
+ sel->r.width =
+ (crops[SMIAPP_PAD_SINK]->width
+ / scale_m
+ * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN]) & ~1;
+ if (mode == SMIAPP_SCALING_MODE_BOTH)
+ sel->r.height =
+ (crops[SMIAPP_PAD_SINK]->height
+ / scale_m
+ * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN])
+ & ~1;
+ else
+ sel->r.height = crops[SMIAPP_PAD_SINK]->height;
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ sensor->scale_m = scale_m;
+ sensor->scaling_mode = mode;
+ }
+}
+/* We're only called on source pads. This function sets scaling. */
+static int smiapp_set_compose(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ struct v4l2_rect *comp, *crops[SMIAPP_PADS];
+
+ smiapp_get_crop_compose(subdev, fh, crops, &comp, sel->which);
+
+ sel->r.top = 0;
+ sel->r.left = 0;
+
+ if (ssd == sensor->binner)
+ smiapp_set_compose_binner(subdev, fh, sel, crops, comp);
+ else
+ smiapp_set_compose_scaler(subdev, fh, sel, crops, comp);
+
+ *comp = sel->r;
+ smiapp_propagate(subdev, fh, sel->which,
+ V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ return smiapp_update_mode(sensor);
+
+ return 0;
+}
+
+static int __smiapp_sel_supported(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_selection *sel)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+
+ /* We only implement crop in three places. */
+ switch (sel->target) {
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ if (ssd == sensor->pixel_array
+ && sel->pad == SMIAPP_PA_PAD_SRC)
+ return 0;
+ if (ssd == sensor->src
+ && sel->pad == SMIAPP_PAD_SRC)
+ return 0;
+ if (ssd == sensor->scaler
+ && sel->pad == SMIAPP_PAD_SINK
+ && sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY]
+ == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP)
+ return 0;
+ return -EINVAL;
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS:
+ if (sel->pad == ssd->source_pad)
+ return -EINVAL;
+ if (ssd == sensor->binner)
+ return 0;
+ if (ssd == sensor->scaler
+ && sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ != SMIAPP_SCALING_CAPABILITY_NONE)
+ return 0;
+ /* Fall through */
+ default:
+ return -EINVAL;
+ }
+}
+
+static int smiapp_set_crop(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ struct v4l2_rect *src_size, *crops[SMIAPP_PADS];
+ struct v4l2_rect _r;
+
+ smiapp_get_crop_compose(subdev, fh, crops, NULL, sel->which);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ if (sel->pad == ssd->sink_pad)
+ src_size = &ssd->sink_fmt;
+ else
+ src_size = &ssd->compose;
+ } else {
+ if (sel->pad == ssd->sink_pad) {
+ _r.left = 0;
+ _r.top = 0;
+ _r.width = v4l2_subdev_get_try_format(fh, sel->pad)
+ ->width;
+ _r.height = v4l2_subdev_get_try_format(fh, sel->pad)
+ ->height;
+ src_size = &_r;
+ } else {
+ src_size =
+ v4l2_subdev_get_try_compose(
+ fh, ssd->sink_pad);
+ }
+ }
+
+ if (ssd == sensor->src && sel->pad == SMIAPP_PAD_SRC) {
+ sel->r.left = 0;
+ sel->r.top = 0;
+ }
+
+ sel->r.width = min(sel->r.width, src_size->width);
+ sel->r.height = min(sel->r.height, src_size->height);
+
+ sel->r.left = min(sel->r.left, src_size->width - sel->r.width);
+ sel->r.top = min(sel->r.top, src_size->height - sel->r.height);
+
+ *crops[sel->pad] = sel->r;
+
+ if (ssd != sensor->pixel_array && sel->pad == SMIAPP_PAD_SINK)
+ smiapp_propagate(subdev, fh, sel->which,
+ V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL);
+
+ return 0;
+}
+
+static int __smiapp_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ struct v4l2_rect *comp, *crops[SMIAPP_PADS];
+ struct v4l2_rect sink_fmt;
+ int ret;
+
+ ret = __smiapp_sel_supported(subdev, sel);
+ if (ret)
+ return ret;
+
+ smiapp_get_crop_compose(subdev, fh, crops, &comp, sel->which);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ sink_fmt = ssd->sink_fmt;
+ } else {
+ struct v4l2_mbus_framefmt *fmt =
+ v4l2_subdev_get_try_format(fh, ssd->sink_pad);
+
+ sink_fmt.left = 0;
+ sink_fmt.top = 0;
+ sink_fmt.width = fmt->width;
+ sink_fmt.height = fmt->height;
+ }
+
+ switch (sel->target) {
+ case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ if (ssd == sensor->pixel_array) {
+ sel->r.width =
+ sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
+ sel->r.height =
+ sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
+ } else if (sel->pad == ssd->sink_pad) {
+ sel->r = sink_fmt;
+ } else {
+ sel->r = *comp;
+ }
+ break;
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS:
+ sel->r = *crops[sel->pad];
+ break;
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ sel->r = *comp;
+ break;
+ }
+
+ return 0;
+}
+
+static int smiapp_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ int rval;
+
+ mutex_lock(&sensor->mutex);
+ rval = __smiapp_get_selection(subdev, fh, sel);
+ mutex_unlock(&sensor->mutex);
+
+ return rval;
+}
+static int smiapp_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ int ret;
+
+ ret = __smiapp_sel_supported(subdev, sel);
+ if (ret)
+ return ret;
+
+ mutex_lock(&sensor->mutex);
+
+ sel->r.left = max(0, sel->r.left & ~1);
+ sel->r.top = max(0, sel->r.top & ~1);
+ sel->r.width = max(0, SMIAPP_ALIGN_DIM(sel->r.width, sel->flags));
+ sel->r.height = max(0, SMIAPP_ALIGN_DIM(sel->r.height, sel->flags));
+
+ sel->r.width = max_t(unsigned int,
+ sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE],
+ sel->r.width);
+ sel->r.height = max_t(unsigned int,
+ sensor->limits[SMIAPP_LIMIT_MIN_Y_OUTPUT_SIZE],
+ sel->r.height);
+
+ switch (sel->target) {
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ ret = smiapp_set_crop(subdev, fh, sel);
+ break;
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ ret = smiapp_set_compose(subdev, fh, sel);
+ break;
+ default:
+ BUG();
+ }
+
+ mutex_unlock(&sensor->mutex);
+ return ret;
+}
+
+static int smiapp_get_skip_frames(struct v4l2_subdev *subdev, u32 *frames)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+
+ *frames = sensor->frame_skip;
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * sysfs attributes
+ */
+
+static ssize_t
+smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(to_i2c_client(dev));
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ unsigned int nbytes;
+
+ if (!sensor->dev_init_done)
+ return -EBUSY;
+
+ if (!sensor->nvm_size) {
+ /* NVM not read yet - read it now */
+ sensor->nvm_size = sensor->platform_data->nvm_size;
+ if (smiapp_set_power(subdev, 1) < 0)
+ return -ENODEV;
+ if (smiapp_read_nvm(sensor, sensor->nvm)) {
+ dev_err(&client->dev, "nvm read failed\n");
+ return -ENODEV;
+ }
+ smiapp_set_power(subdev, 0);
+ }
+ /*
+ * NVM is still way below a PAGE_SIZE, so we can safely
+ * assume this for now.
+ */
+ nbytes = min_t(unsigned int, sensor->nvm_size, PAGE_SIZE);
+ memcpy(buf, sensor->nvm, nbytes);
+
+ return nbytes;
+}
+static DEVICE_ATTR(nvm, S_IRUGO, smiapp_sysfs_nvm_read, NULL);
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev core operations
+ */
+
+static int smiapp_identify_module(struct v4l2_subdev *subdev)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct smiapp_module_info *minfo = &sensor->minfo;
+ unsigned int i;
+ int rval = 0;
+
+ minfo->name = SMIAPP_NAME;
+
+ /* Module info */
+ rval = smiapp_read_8only(sensor, SMIAPP_REG_U8_MANUFACTURER_ID,
+ &minfo->manufacturer_id);
+ if (!rval)
+ rval = smiapp_read_8only(sensor, SMIAPP_REG_U16_MODEL_ID,
+ &minfo->model_id);
+ if (!rval)
+ rval = smiapp_read_8only(sensor,
+ SMIAPP_REG_U8_REVISION_NUMBER_MAJOR,
+ &minfo->revision_number_major);
+ if (!rval)
+ rval = smiapp_read_8only(sensor,
+ SMIAPP_REG_U8_REVISION_NUMBER_MINOR,
+ &minfo->revision_number_minor);
+ if (!rval)
+ rval = smiapp_read_8only(sensor,
+ SMIAPP_REG_U8_MODULE_DATE_YEAR,
+ &minfo->module_year);
+ if (!rval)
+ rval = smiapp_read_8only(sensor,
+ SMIAPP_REG_U8_MODULE_DATE_MONTH,
+ &minfo->module_month);
+ if (!rval)
+ rval = smiapp_read_8only(sensor, SMIAPP_REG_U8_MODULE_DATE_DAY,
+ &minfo->module_day);
+
+ /* Sensor info */
+ if (!rval)
+ rval = smiapp_read_8only(sensor,
+ SMIAPP_REG_U8_SENSOR_MANUFACTURER_ID,
+ &minfo->sensor_manufacturer_id);
+ if (!rval)
+ rval = smiapp_read_8only(sensor,
+ SMIAPP_REG_U16_SENSOR_MODEL_ID,
+ &minfo->sensor_model_id);
+ if (!rval)
+ rval = smiapp_read_8only(sensor,
+ SMIAPP_REG_U8_SENSOR_REVISION_NUMBER,
+ &minfo->sensor_revision_number);
+ if (!rval)
+ rval = smiapp_read_8only(sensor,
+ SMIAPP_REG_U8_SENSOR_FIRMWARE_VERSION,
+ &minfo->sensor_firmware_version);
+
+ /* SMIA */
+ if (!rval)
+ rval = smiapp_read_8only(sensor, SMIAPP_REG_U8_SMIA_VERSION,
+ &minfo->smia_version);
+ if (!rval)
+ rval = smiapp_read_8only(sensor, SMIAPP_REG_U8_SMIAPP_VERSION,
+ &minfo->smiapp_version);
+
+ if (rval) {
+ dev_err(&client->dev, "sensor detection failed\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(&client->dev, "module 0x%2.2x-0x%4.4x\n",
+ minfo->manufacturer_id, minfo->model_id);
+
+ dev_dbg(&client->dev,
+ "module revision 0x%2.2x-0x%2.2x date %2.2d-%2.2d-%2.2d\n",
+ minfo->revision_number_major, minfo->revision_number_minor,
+ minfo->module_year, minfo->module_month, minfo->module_day);
+
+ dev_dbg(&client->dev, "sensor 0x%2.2x-0x%4.4x\n",
+ minfo->sensor_manufacturer_id, minfo->sensor_model_id);
+
+ dev_dbg(&client->dev,
+ "sensor revision 0x%2.2x firmware version 0x%2.2x\n",
+ minfo->sensor_revision_number, minfo->sensor_firmware_version);
+
+ dev_dbg(&client->dev, "smia version %2.2d smiapp version %2.2d\n",
+ minfo->smia_version, minfo->smiapp_version);
+
+ /*
+ * Some modules have bad data in the lvalues below. Hope the
+ * rvalues have better stuff. The lvalues are module
+ * parameters whereas the rvalues are sensor parameters.
+ */
+ if (!minfo->manufacturer_id && !minfo->model_id) {
+ minfo->manufacturer_id = minfo->sensor_manufacturer_id;
+ minfo->model_id = minfo->sensor_model_id;
+ minfo->revision_number_major = minfo->sensor_revision_number;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(smiapp_module_idents); i++) {
+ if (smiapp_module_idents[i].manufacturer_id
+ != minfo->manufacturer_id)
+ continue;
+ if (smiapp_module_idents[i].model_id != minfo->model_id)
+ continue;
+ if (smiapp_module_idents[i].flags
+ & SMIAPP_MODULE_IDENT_FLAG_REV_LE) {
+ if (smiapp_module_idents[i].revision_number_major
+ < minfo->revision_number_major)
+ continue;
+ } else {
+ if (smiapp_module_idents[i].revision_number_major
+ != minfo->revision_number_major)
+ continue;
+ }
+
+ minfo->name = smiapp_module_idents[i].name;
+ minfo->quirk = smiapp_module_idents[i].quirk;
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(smiapp_module_idents))
+ dev_warn(&client->dev,
+ "no quirks for this module; let's hope it's fully compliant\n");
+
+ dev_dbg(&client->dev, "the sensor is called %s, ident %2.2x%4.4x%2.2x\n",
+ minfo->name, minfo->manufacturer_id, minfo->model_id,
+ minfo->revision_number_major);
+
+ strlcpy(subdev->name, sensor->minfo.name, sizeof(subdev->name));
+
+ return 0;
+}
+
+static const struct v4l2_subdev_ops smiapp_ops;
+static const struct v4l2_subdev_internal_ops smiapp_internal_ops;
+static const struct media_entity_operations smiapp_entity_ops;
+
+static int smiapp_registered(struct v4l2_subdev *subdev)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct smiapp_subdev *last = NULL;
+ u32 tmp;
+ unsigned int i;
+ int rval;
+
+ sensor->vana = regulator_get(&client->dev, "VANA");
+ if (IS_ERR(sensor->vana)) {
+ dev_err(&client->dev, "could not get regulator for vana\n");
+ return -ENODEV;
+ }
+
+ if (!sensor->platform_data->set_xclk) {
+ sensor->ext_clk = clk_get(&client->dev,
+ sensor->platform_data->ext_clk_name);
+ if (IS_ERR(sensor->ext_clk)) {
+ dev_err(&client->dev, "could not get clock %s\n",
+ sensor->platform_data->ext_clk_name);
+ rval = -ENODEV;
+ goto out_clk_get;
+ }
+
+ rval = clk_set_rate(sensor->ext_clk,
+ sensor->platform_data->ext_clk);
+ if (rval < 0) {
+ dev_err(&client->dev,
+ "unable to set clock %s freq to %u\n",
+ sensor->platform_data->ext_clk_name,
+ sensor->platform_data->ext_clk);
+ rval = -ENODEV;
+ goto out_clk_set_rate;
+ }
+ }
+
+ if (sensor->platform_data->xshutdown != SMIAPP_NO_XSHUTDOWN) {
+ if (gpio_request_one(sensor->platform_data->xshutdown, 0,
+ "SMIA++ xshutdown") != 0) {
+ dev_err(&client->dev,
+ "unable to acquire reset gpio %d\n",
+ sensor->platform_data->xshutdown);
+ rval = -ENODEV;
+ goto out_clk_set_rate;
+ }
+ }
+
+ rval = smiapp_power_on(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_smiapp_power_on;
+ }
+
+ rval = smiapp_identify_module(subdev);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+
+ rval = smiapp_get_all_limits(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+
+ /*
+ * Handle Sensor Module orientation on the board.
+ *
+ * The application of H-FLIP and V-FLIP on the sensor is modified by
+ * the sensor orientation on the board.
+ *
+ * For SMIAPP_BOARD_SENSOR_ORIENT_180 the default behaviour is to set
+ * both H-FLIP and V-FLIP for normal operation which also implies
+ * that a set/unset operation for user space HFLIP and VFLIP v4l2
+ * controls will need to be internally inverted.
+ *
+ * Rotation also changes the bayer pattern.
+ */
+ if (sensor->platform_data->module_board_orient ==
+ SMIAPP_MODULE_BOARD_ORIENT_180)
+ sensor->hvflip_inv_mask = SMIAPP_IMAGE_ORIENTATION_HFLIP |
+ SMIAPP_IMAGE_ORIENTATION_VFLIP;
+
+ rval = smiapp_get_mbus_formats(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+
+ if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY]) {
+ u32 val;
+
+ rval = smiapp_read(sensor,
+ SMIAPP_REG_U8_BINNING_SUBTYPES, &val);
+ if (rval < 0) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+ sensor->nbinning_subtypes = min_t(u8, val,
+ SMIAPP_BINNING_SUBTYPES);
+
+ for (i = 0; i < sensor->nbinning_subtypes; i++) {
+ rval = smiapp_read(
+ sensor, SMIAPP_REG_U8_BINNING_TYPE_n(i), &val);
+ if (rval < 0) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+ sensor->binning_subtypes[i] =
+ *(struct smiapp_binning_subtype *)&val;
+
+ dev_dbg(&client->dev, "binning %xx%x\n",
+ sensor->binning_subtypes[i].horizontal,
+ sensor->binning_subtypes[i].vertical);
+ }
+ }
+ sensor->binning_horizontal = 1;
+ sensor->binning_vertical = 1;
+
+ /* SMIA++ NVM initialization - it will be read from the sensor
+ * when it is first requested by userspace.
+ */
+ if (sensor->minfo.smiapp_version && sensor->platform_data->nvm_size) {
+ sensor->nvm = kzalloc(sensor->platform_data->nvm_size,
+ GFP_KERNEL);
+ if (sensor->nvm == NULL) {
+ dev_err(&client->dev, "nvm buf allocation failed\n");
+ rval = -ENOMEM;
+ goto out_power_off;
+ }
+
+ if (device_create_file(&client->dev, &dev_attr_nvm) != 0) {
+ dev_err(&client->dev, "sysfs nvm entry failed\n");
+ rval = -EBUSY;
+ goto out_power_off;
+ }
+ }
+
+ rval = smiapp_call_quirk(sensor, limits);
+ if (rval) {
+ dev_err(&client->dev, "limits quirks failed\n");
+ goto out_nvm_release;
+ }
+
+ /* We consider this as profile 0 sensor if any of these are zero. */
+ if (!sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV] ||
+ !sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV] ||
+ !sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV] ||
+ !sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV]) {
+ sensor->minfo.smiapp_profile = SMIAPP_PROFILE_0;
+ } else if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ != SMIAPP_SCALING_CAPABILITY_NONE) {
+ if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ == SMIAPP_SCALING_CAPABILITY_HORIZONTAL)
+ sensor->minfo.smiapp_profile = SMIAPP_PROFILE_1;
+ else
+ sensor->minfo.smiapp_profile = SMIAPP_PROFILE_2;
+ sensor->scaler = &sensor->ssds[sensor->ssds_used];
+ sensor->ssds_used++;
+ } else if (sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY]
+ == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP) {
+ sensor->scaler = &sensor->ssds[sensor->ssds_used];
+ sensor->ssds_used++;
+ }
+ sensor->binner = &sensor->ssds[sensor->ssds_used];
+ sensor->ssds_used++;
+ sensor->pixel_array = &sensor->ssds[sensor->ssds_used];
+ sensor->ssds_used++;
+
+ sensor->scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+
+ for (i = 0; i < SMIAPP_SUBDEVS; i++) {
+ struct {
+ struct smiapp_subdev *ssd;
+ char *name;
+ } const __this[] = {
+ { sensor->scaler, "scaler", },
+ { sensor->binner, "binner", },
+ { sensor->pixel_array, "pixel array", },
+ }, *_this = &__this[i];
+ struct smiapp_subdev *this = _this->ssd;
+
+ if (!this)
+ continue;
+
+ if (this != sensor->src)
+ v4l2_subdev_init(&this->sd, &smiapp_ops);
+
+ this->sensor = sensor;
+
+ if (this == sensor->pixel_array) {
+ this->npads = 1;
+ } else {
+ this->npads = 2;
+ this->source_pad = 1;
+ }
+
+ snprintf(this->sd.name,
+ sizeof(this->sd.name), "%s %s",
+ sensor->minfo.name, _this->name);
+
+ this->sink_fmt.width =
+ sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
+ this->sink_fmt.height =
+ sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
+ this->compose.width = this->sink_fmt.width;
+ this->compose.height = this->sink_fmt.height;
+ this->crop[this->source_pad] = this->compose;
+ this->pads[this->source_pad].flags = MEDIA_PAD_FL_SOURCE;
+ if (this != sensor->pixel_array) {
+ this->crop[this->sink_pad] = this->compose;
+ this->pads[this->sink_pad].flags = MEDIA_PAD_FL_SINK;
+ }
+
+ this->sd.entity.ops = &smiapp_entity_ops;
+
+ if (last == NULL) {
+ last = this;
+ continue;
+ }
+
+ this->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ this->sd.internal_ops = &smiapp_internal_ops;
+ this->sd.owner = NULL;
+ v4l2_set_subdevdata(&this->sd, client);
+
+ rval = media_entity_init(&this->sd.entity,
+ this->npads, this->pads, 0);
+ if (rval) {
+ dev_err(&client->dev,
+ "media_entity_init failed\n");
+ goto out_nvm_release;
+ }
+
+ rval = media_entity_create_link(&this->sd.entity,
+ this->source_pad,
+ &last->sd.entity,
+ last->sink_pad,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (rval) {
+ dev_err(&client->dev,
+ "media_entity_create_link failed\n");
+ goto out_nvm_release;
+ }
+
+ rval = v4l2_device_register_subdev(sensor->src->sd.v4l2_dev,
+ &this->sd);
+ if (rval) {
+ dev_err(&client->dev,
+ "v4l2_device_register_subdev failed\n");
+ goto out_nvm_release;
+ }
+
+ last = this;
+ }
+
+ dev_dbg(&client->dev, "profile %d\n", sensor->minfo.smiapp_profile);
+
+ sensor->pixel_array->sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+
+ /* final steps */
+ smiapp_read_frame_fmt(sensor);
+ rval = smiapp_init_controls(sensor);
+ if (rval < 0)
+ goto out_nvm_release;
+
+ rval = smiapp_update_mode(sensor);
+ if (rval) {
+ dev_err(&client->dev, "update mode failed\n");
+ goto out_nvm_release;
+ }
+
+ sensor->streaming = false;
+ sensor->dev_init_done = true;
+
+ /* check flash capability */
+ rval = smiapp_read(sensor, SMIAPP_REG_U8_FLASH_MODE_CAPABILITY, &tmp);
+ sensor->flash_capability = tmp;
+ if (rval)
+ goto out_nvm_release;
+
+ smiapp_power_off(sensor);
+
+ return 0;
+
+out_nvm_release:
+ device_remove_file(&client->dev, &dev_attr_nvm);
+
+out_power_off:
+ kfree(sensor->nvm);
+ sensor->nvm = NULL;
+ smiapp_power_off(sensor);
+
+out_smiapp_power_on:
+ if (sensor->platform_data->xshutdown != SMIAPP_NO_XSHUTDOWN)
+ gpio_free(sensor->platform_data->xshutdown);
+
+out_clk_set_rate:
+ clk_put(sensor->ext_clk);
+ sensor->ext_clk = NULL;
+
+out_clk_get:
+ regulator_put(sensor->vana);
+ sensor->vana = NULL;
+ return rval;
+}
+
+static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct smiapp_subdev *ssd = to_smiapp_subdev(sd);
+ struct smiapp_sensor *sensor = ssd->sensor;
+ u32 mbus_code =
+ smiapp_csi_data_formats[smiapp_pixel_order(sensor)].code;
+ unsigned int i;
+
+ mutex_lock(&sensor->mutex);
+
+ for (i = 0; i < ssd->npads; i++) {
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_get_try_format(fh, i);
+ struct v4l2_rect *try_crop = v4l2_subdev_get_try_crop(fh, i);
+ struct v4l2_rect *try_comp;
+
+ try_fmt->width = sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
+ try_fmt->height = sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
+ try_fmt->code = mbus_code;
+
+ try_crop->top = 0;
+ try_crop->left = 0;
+ try_crop->width = try_fmt->width;
+ try_crop->height = try_fmt->height;
+
+ if (ssd != sensor->pixel_array)
+ continue;
+
+ try_comp = v4l2_subdev_get_try_compose(fh, i);
+ *try_comp = *try_crop;
+ }
+
+ mutex_unlock(&sensor->mutex);
+
+ return smiapp_set_power(sd, 1);
+}
+
+static int smiapp_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return smiapp_set_power(sd, 0);
+}
+
+static const struct v4l2_subdev_video_ops smiapp_video_ops = {
+ .s_stream = smiapp_set_stream,
+};
+
+static const struct v4l2_subdev_core_ops smiapp_core_ops = {
+ .s_power = smiapp_set_power,
+};
+
+static const struct v4l2_subdev_pad_ops smiapp_pad_ops = {
+ .enum_mbus_code = smiapp_enum_mbus_code,
+ .get_fmt = smiapp_get_format,
+ .set_fmt = smiapp_set_format,
+ .get_selection = smiapp_get_selection,
+ .set_selection = smiapp_set_selection,
+};
+
+static const struct v4l2_subdev_sensor_ops smiapp_sensor_ops = {
+ .g_skip_frames = smiapp_get_skip_frames,
+};
+
+static const struct v4l2_subdev_ops smiapp_ops = {
+ .core = &smiapp_core_ops,
+ .video = &smiapp_video_ops,
+ .pad = &smiapp_pad_ops,
+ .sensor = &smiapp_sensor_ops,
+};
+
+static const struct media_entity_operations smiapp_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops smiapp_internal_src_ops = {
+ .registered = smiapp_registered,
+ .open = smiapp_open,
+ .close = smiapp_close,
+};
+
+static const struct v4l2_subdev_internal_ops smiapp_internal_ops = {
+ .open = smiapp_open,
+ .close = smiapp_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * I2C Driver
+ */
+
+#ifdef CONFIG_PM
+
+static int smiapp_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ bool streaming;
+
+ BUG_ON(mutex_is_locked(&sensor->mutex));
+
+ if (sensor->power_count == 0)
+ return 0;
+
+ if (sensor->streaming)
+ smiapp_stop_streaming(sensor);
+
+ streaming = sensor->streaming;
+
+ smiapp_power_off(sensor);
+
+ /* save state for resume */
+ sensor->streaming = streaming;
+
+ return 0;
+}
+
+static int smiapp_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ int rval;
+
+ if (sensor->power_count == 0)
+ return 0;
+
+ rval = smiapp_power_on(sensor);
+ if (rval)
+ return rval;
+
+ if (sensor->streaming)
+ rval = smiapp_start_streaming(sensor);
+
+ return rval;
+}
+
+#else
+
+#define smiapp_suspend NULL
+#define smiapp_resume NULL
+
+#endif /* CONFIG_PM */
+
+static int smiapp_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct smiapp_sensor *sensor;
+ int rval;
+
+ if (client->dev.platform_data == NULL)
+ return -ENODEV;
+
+ sensor = kzalloc(sizeof(*sensor), GFP_KERNEL);
+ if (sensor == NULL)
+ return -ENOMEM;
+
+ sensor->platform_data = client->dev.platform_data;
+ mutex_init(&sensor->mutex);
+ mutex_init(&sensor->power_mutex);
+ sensor->src = &sensor->ssds[sensor->ssds_used];
+
+ v4l2_i2c_subdev_init(&sensor->src->sd, client, &smiapp_ops);
+ sensor->src->sd.internal_ops = &smiapp_internal_src_ops;
+ sensor->src->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sensor->src->sensor = sensor;
+
+ sensor->src->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ rval = media_entity_init(&sensor->src->sd.entity, 2,
+ sensor->src->pads, 0);
+ if (rval < 0)
+ kfree(sensor);
+
+ return rval;
+}
+
+static int __exit smiapp_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ unsigned int i;
+
+ if (sensor->power_count) {
+ if (sensor->platform_data->xshutdown != SMIAPP_NO_XSHUTDOWN)
+ gpio_set_value(sensor->platform_data->xshutdown, 0);
+ if (sensor->platform_data->set_xclk)
+ sensor->platform_data->set_xclk(&sensor->src->sd, 0);
+ else
+ clk_disable(sensor->ext_clk);
+ sensor->power_count = 0;
+ }
+
+ if (sensor->nvm) {
+ device_remove_file(&client->dev, &dev_attr_nvm);
+ kfree(sensor->nvm);
+ }
+
+ for (i = 0; i < sensor->ssds_used; i++) {
+ media_entity_cleanup(&sensor->ssds[i].sd.entity);
+ v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
+ }
+ smiapp_free_controls(sensor);
+ if (sensor->platform_data->xshutdown != SMIAPP_NO_XSHUTDOWN)
+ gpio_free(sensor->platform_data->xshutdown);
+ if (sensor->ext_clk)
+ clk_put(sensor->ext_clk);
+ if (sensor->vana)
+ regulator_put(sensor->vana);
+
+ kfree(sensor);
+
+ return 0;
+}
+
+static const struct i2c_device_id smiapp_id_table[] = {
+ { SMIAPP_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, smiapp_id_table);
+
+static const struct dev_pm_ops smiapp_pm_ops = {
+ .suspend = smiapp_suspend,
+ .resume = smiapp_resume,
+};
+
+static struct i2c_driver smiapp_i2c_driver = {
+ .driver = {
+ .name = SMIAPP_NAME,
+ .pm = &smiapp_pm_ops,
+ },
+ .probe = smiapp_probe,
+ .remove = __exit_p(smiapp_remove),
+ .id_table = smiapp_id_table,
+};
+
+module_i2c_driver(smiapp_i2c_driver);
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>");
+MODULE_DESCRIPTION("Generic SMIA/SMIA++ camera module driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/smiapp/smiapp-limits.c b/drivers/media/video/smiapp/smiapp-limits.c
new file mode 100644
index 000000000000..0800e095724e
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-limits.c
@@ -0,0 +1,132 @@
+/*
+ * drivers/media/video/smiapp/smiapp-limits.c
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "smiapp.h"
+
+struct smiapp_reg_limits smiapp_reg_limits[] = {
+ { SMIAPP_REG_U16_ANALOGUE_GAIN_CAPABILITY, "analogue_gain_capability" }, /* 0 */
+ { SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_MIN, "analogue_gain_code_min" },
+ { SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_MAX, "analogue_gain_code_max" },
+ { SMIAPP_REG_U8_THS_ZERO_MIN, "ths_zero_min" },
+ { SMIAPP_REG_U8_TCLK_TRAIL_MIN, "tclk_trail_min" },
+ { SMIAPP_REG_U16_INTEGRATION_TIME_CAPABILITY, "integration_time_capability" }, /* 5 */
+ { SMIAPP_REG_U16_COARSE_INTEGRATION_TIME_MIN, "coarse_integration_time_min" },
+ { SMIAPP_REG_U16_COARSE_INTEGRATION_TIME_MAX_MARGIN, "coarse_integration_time_max_margin" },
+ { SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MIN, "fine_integration_time_min" },
+ { SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MAX_MARGIN, "fine_integration_time_max_margin" },
+ { SMIAPP_REG_U16_DIGITAL_GAIN_CAPABILITY, "digital_gain_capability" }, /* 10 */
+ { SMIAPP_REG_U16_DIGITAL_GAIN_MIN, "digital_gain_min" },
+ { SMIAPP_REG_U16_DIGITAL_GAIN_MAX, "digital_gain_max" },
+ { SMIAPP_REG_F32_MIN_EXT_CLK_FREQ_HZ, "min_ext_clk_freq_hz" },
+ { SMIAPP_REG_F32_MAX_EXT_CLK_FREQ_HZ, "max_ext_clk_freq_hz" },
+ { SMIAPP_REG_U16_MIN_PRE_PLL_CLK_DIV, "min_pre_pll_clk_div" }, /* 15 */
+ { SMIAPP_REG_U16_MAX_PRE_PLL_CLK_DIV, "max_pre_pll_clk_div" },
+ { SMIAPP_REG_F32_MIN_PLL_IP_FREQ_HZ, "min_pll_ip_freq_hz" },
+ { SMIAPP_REG_F32_MAX_PLL_IP_FREQ_HZ, "max_pll_ip_freq_hz" },
+ { SMIAPP_REG_U16_MIN_PLL_MULTIPLIER, "min_pll_multiplier" },
+ { SMIAPP_REG_U16_MAX_PLL_MULTIPLIER, "max_pll_multiplier" }, /* 20 */
+ { SMIAPP_REG_F32_MIN_PLL_OP_FREQ_HZ, "min_pll_op_freq_hz" },
+ { SMIAPP_REG_F32_MAX_PLL_OP_FREQ_HZ, "max_pll_op_freq_hz" },
+ { SMIAPP_REG_U16_MIN_VT_SYS_CLK_DIV, "min_vt_sys_clk_div" },
+ { SMIAPP_REG_U16_MAX_VT_SYS_CLK_DIV, "max_vt_sys_clk_div" },
+ { SMIAPP_REG_F32_MIN_VT_SYS_CLK_FREQ_HZ, "min_vt_sys_clk_freq_hz" }, /* 25 */
+ { SMIAPP_REG_F32_MAX_VT_SYS_CLK_FREQ_HZ, "max_vt_sys_clk_freq_hz" },
+ { SMIAPP_REG_F32_MIN_VT_PIX_CLK_FREQ_HZ, "min_vt_pix_clk_freq_hz" },
+ { SMIAPP_REG_F32_MAX_VT_PIX_CLK_FREQ_HZ, "max_vt_pix_clk_freq_hz" },
+ { SMIAPP_REG_U16_MIN_VT_PIX_CLK_DIV, "min_vt_pix_clk_div" },
+ { SMIAPP_REG_U16_MAX_VT_PIX_CLK_DIV, "max_vt_pix_clk_div" }, /* 30 */
+ { SMIAPP_REG_U16_MIN_FRAME_LENGTH_LINES, "min_frame_length_lines" },
+ { SMIAPP_REG_U16_MAX_FRAME_LENGTH_LINES, "max_frame_length_lines" },
+ { SMIAPP_REG_U16_MIN_LINE_LENGTH_PCK, "min_line_length_pck" },
+ { SMIAPP_REG_U16_MAX_LINE_LENGTH_PCK, "max_line_length_pck" },
+ { SMIAPP_REG_U16_MIN_LINE_BLANKING_PCK, "min_line_blanking_pck" }, /* 35 */
+ { SMIAPP_REG_U16_MIN_FRAME_BLANKING_LINES, "min_frame_blanking_lines" },
+ { SMIAPP_REG_U8_MIN_LINE_LENGTH_PCK_STEP_SIZE, "min_line_length_pck_step_size" },
+ { SMIAPP_REG_U16_MIN_OP_SYS_CLK_DIV, "min_op_sys_clk_div" },
+ { SMIAPP_REG_U16_MAX_OP_SYS_CLK_DIV, "max_op_sys_clk_div" },
+ { SMIAPP_REG_F32_MIN_OP_SYS_CLK_FREQ_HZ, "min_op_sys_clk_freq_hz" }, /* 40 */
+ { SMIAPP_REG_F32_MAX_OP_SYS_CLK_FREQ_HZ, "max_op_sys_clk_freq_hz" },
+ { SMIAPP_REG_U16_MIN_OP_PIX_CLK_DIV, "min_op_pix_clk_div" },
+ { SMIAPP_REG_U16_MAX_OP_PIX_CLK_DIV, "max_op_pix_clk_div" },
+ { SMIAPP_REG_F32_MIN_OP_PIX_CLK_FREQ_HZ, "min_op_pix_clk_freq_hz" },
+ { SMIAPP_REG_F32_MAX_OP_PIX_CLK_FREQ_HZ, "max_op_pix_clk_freq_hz" }, /* 45 */
+ { SMIAPP_REG_U16_X_ADDR_MIN, "x_addr_min" },
+ { SMIAPP_REG_U16_Y_ADDR_MIN, "y_addr_min" },
+ { SMIAPP_REG_U16_X_ADDR_MAX, "x_addr_max" },
+ { SMIAPP_REG_U16_Y_ADDR_MAX, "y_addr_max" },
+ { SMIAPP_REG_U16_MIN_X_OUTPUT_SIZE, "min_x_output_size" }, /* 50 */
+ { SMIAPP_REG_U16_MIN_Y_OUTPUT_SIZE, "min_y_output_size" },
+ { SMIAPP_REG_U16_MAX_X_OUTPUT_SIZE, "max_x_output_size" },
+ { SMIAPP_REG_U16_MAX_Y_OUTPUT_SIZE, "max_y_output_size" },
+ { SMIAPP_REG_U16_MIN_EVEN_INC, "min_even_inc" },
+ { SMIAPP_REG_U16_MAX_EVEN_INC, "max_even_inc" }, /* 55 */
+ { SMIAPP_REG_U16_MIN_ODD_INC, "min_odd_inc" },
+ { SMIAPP_REG_U16_MAX_ODD_INC, "max_odd_inc" },
+ { SMIAPP_REG_U16_SCALING_CAPABILITY, "scaling_capability" },
+ { SMIAPP_REG_U16_SCALER_M_MIN, "scaler_m_min" },
+ { SMIAPP_REG_U16_SCALER_M_MAX, "scaler_m_max" }, /* 60 */
+ { SMIAPP_REG_U16_SCALER_N_MIN, "scaler_n_min" },
+ { SMIAPP_REG_U16_SCALER_N_MAX, "scaler_n_max" },
+ { SMIAPP_REG_U16_SPATIAL_SAMPLING_CAPABILITY, "spatial_sampling_capability" },
+ { SMIAPP_REG_U8_DIGITAL_CROP_CAPABILITY, "digital_crop_capability" },
+ { SMIAPP_REG_U16_COMPRESSION_CAPABILITY, "compression_capability" }, /* 65 */
+ { SMIAPP_REG_U8_FIFO_SUPPORT_CAPABILITY, "fifo_support_capability" },
+ { SMIAPP_REG_U8_DPHY_CTRL_CAPABILITY, "dphy_ctrl_capability" },
+ { SMIAPP_REG_U8_CSI_LANE_MODE_CAPABILITY, "csi_lane_mode_capability" },
+ { SMIAPP_REG_U8_CSI_SIGNALLING_MODE_CAPABILITY, "csi_signalling_mode_capability" },
+ { SMIAPP_REG_U8_FAST_STANDBY_CAPABILITY, "fast_standby_capability" }, /* 70 */
+ { SMIAPP_REG_U8_CCI_ADDRESS_CONTROL_CAPABILITY, "cci_address_control_capability" },
+ { SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_1_LANE_MODE_MBPS, "max_per_lane_bitrate_1_lane_mode_mbps" },
+ { SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_2_LANE_MODE_MBPS, "max_per_lane_bitrate_2_lane_mode_mbps" },
+ { SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_3_LANE_MODE_MBPS, "max_per_lane_bitrate_3_lane_mode_mbps" },
+ { SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_4_LANE_MODE_MBPS, "max_per_lane_bitrate_4_lane_mode_mbps" }, /* 75 */
+ { SMIAPP_REG_U8_TEMP_SENSOR_CAPABILITY, "temp_sensor_capability" },
+ { SMIAPP_REG_U16_MIN_FRAME_LENGTH_LINES_BIN, "min_frame_length_lines_bin" },
+ { SMIAPP_REG_U16_MAX_FRAME_LENGTH_LINES_BIN, "max_frame_length_lines_bin" },
+ { SMIAPP_REG_U16_MIN_LINE_LENGTH_PCK_BIN, "min_line_length_pck_bin" },
+ { SMIAPP_REG_U16_MAX_LINE_LENGTH_PCK_BIN, "max_line_length_pck_bin" }, /* 80 */
+ { SMIAPP_REG_U16_MIN_LINE_BLANKING_PCK_BIN, "min_line_blanking_pck_bin" },
+ { SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MIN_BIN, "fine_integration_time_min_bin" },
+ { SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MAX_MARGIN_BIN, "fine_integration_time_max_margin_bin" },
+ { SMIAPP_REG_U8_BINNING_CAPABILITY, "binning_capability" },
+ { SMIAPP_REG_U8_BINNING_WEIGHTING_CAPABILITY, "binning_weighting_capability" }, /* 85 */
+ { SMIAPP_REG_U8_DATA_TRANSFER_IF_CAPABILITY, "data_transfer_if_capability" },
+ { SMIAPP_REG_U8_SHADING_CORRECTION_CAPABILITY, "shading_correction_capability" },
+ { SMIAPP_REG_U8_GREEN_IMBALANCE_CAPABILITY, "green_imbalance_capability" },
+ { SMIAPP_REG_U8_BLACK_LEVEL_CAPABILITY, "black_level_capability" },
+ { SMIAPP_REG_U8_MODULE_SPECIFIC_CORRECTION_CAPABILITY, "module_specific_correction_capability" }, /* 90 */
+ { SMIAPP_REG_U16_DEFECT_CORRECTION_CAPABILITY, "defect_correction_capability" },
+ { SMIAPP_REG_U16_DEFECT_CORRECTION_CAPABILITY_2, "defect_correction_capability_2" },
+ { SMIAPP_REG_U8_EDOF_CAPABILITY, "edof_capability" },
+ { SMIAPP_REG_U8_COLOUR_FEEDBACK_CAPABILITY, "colour_feedback_capability" },
+ { SMIAPP_REG_U8_ESTIMATION_MODE_CAPABILITY, "estimation_mode_capability" }, /* 95 */
+ { SMIAPP_REG_U8_ESTIMATION_ZONE_CAPABILITY, "estimation_zone_capability" },
+ { SMIAPP_REG_U16_CAPABILITY_TRDY_MIN, "capability_trdy_min" },
+ { SMIAPP_REG_U8_FLASH_MODE_CAPABILITY, "flash_mode_capability" },
+ { SMIAPP_REG_U8_ACTUATOR_CAPABILITY, "actuator_capability" },
+ { SMIAPP_REG_U8_BRACKETING_LUT_CAPABILITY_1, "bracketing_lut_capability_1" }, /* 100 */
+ { SMIAPP_REG_U8_BRACKETING_LUT_CAPABILITY_2, "bracketing_lut_capability_2" },
+ { SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_STEP, "analogue_gain_code_step" },
+ { 0, NULL },
+};
diff --git a/drivers/media/video/smiapp/smiapp-limits.h b/drivers/media/video/smiapp/smiapp-limits.h
new file mode 100644
index 000000000000..7f4836bb78db
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-limits.h
@@ -0,0 +1,128 @@
+/*
+ * drivers/media/video/smiapp/smiapp-limits.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#define SMIAPP_LIMIT_ANALOGUE_GAIN_CAPABILITY 0
+#define SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MIN 1
+#define SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MAX 2
+#define SMIAPP_LIMIT_THS_ZERO_MIN 3
+#define SMIAPP_LIMIT_TCLK_TRAIL_MIN 4
+#define SMIAPP_LIMIT_INTEGRATION_TIME_CAPABILITY 5
+#define SMIAPP_LIMIT_COARSE_INTEGRATION_TIME_MIN 6
+#define SMIAPP_LIMIT_COARSE_INTEGRATION_TIME_MAX_MARGIN 7
+#define SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MIN 8
+#define SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MAX_MARGIN 9
+#define SMIAPP_LIMIT_DIGITAL_GAIN_CAPABILITY 10
+#define SMIAPP_LIMIT_DIGITAL_GAIN_MIN 11
+#define SMIAPP_LIMIT_DIGITAL_GAIN_MAX 12
+#define SMIAPP_LIMIT_MIN_EXT_CLK_FREQ_HZ 13
+#define SMIAPP_LIMIT_MAX_EXT_CLK_FREQ_HZ 14
+#define SMIAPP_LIMIT_MIN_PRE_PLL_CLK_DIV 15
+#define SMIAPP_LIMIT_MAX_PRE_PLL_CLK_DIV 16
+#define SMIAPP_LIMIT_MIN_PLL_IP_FREQ_HZ 17
+#define SMIAPP_LIMIT_MAX_PLL_IP_FREQ_HZ 18
+#define SMIAPP_LIMIT_MIN_PLL_MULTIPLIER 19
+#define SMIAPP_LIMIT_MAX_PLL_MULTIPLIER 20
+#define SMIAPP_LIMIT_MIN_PLL_OP_FREQ_HZ 21
+#define SMIAPP_LIMIT_MAX_PLL_OP_FREQ_HZ 22
+#define SMIAPP_LIMIT_MIN_VT_SYS_CLK_DIV 23
+#define SMIAPP_LIMIT_MAX_VT_SYS_CLK_DIV 24
+#define SMIAPP_LIMIT_MIN_VT_SYS_CLK_FREQ_HZ 25
+#define SMIAPP_LIMIT_MAX_VT_SYS_CLK_FREQ_HZ 26
+#define SMIAPP_LIMIT_MIN_VT_PIX_CLK_FREQ_HZ 27
+#define SMIAPP_LIMIT_MAX_VT_PIX_CLK_FREQ_HZ 28
+#define SMIAPP_LIMIT_MIN_VT_PIX_CLK_DIV 29
+#define SMIAPP_LIMIT_MAX_VT_PIX_CLK_DIV 30
+#define SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES 31
+#define SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES 32
+#define SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK 33
+#define SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK 34
+#define SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK 35
+#define SMIAPP_LIMIT_MIN_FRAME_BLANKING_LINES 36
+#define SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_STEP_SIZE 37
+#define SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV 38
+#define SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV 39
+#define SMIAPP_LIMIT_MIN_OP_SYS_CLK_FREQ_HZ 40
+#define SMIAPP_LIMIT_MAX_OP_SYS_CLK_FREQ_HZ 41
+#define SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV 42
+#define SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV 43
+#define SMIAPP_LIMIT_MIN_OP_PIX_CLK_FREQ_HZ 44
+#define SMIAPP_LIMIT_MAX_OP_PIX_CLK_FREQ_HZ 45
+#define SMIAPP_LIMIT_X_ADDR_MIN 46
+#define SMIAPP_LIMIT_Y_ADDR_MIN 47
+#define SMIAPP_LIMIT_X_ADDR_MAX 48
+#define SMIAPP_LIMIT_Y_ADDR_MAX 49
+#define SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE 50
+#define SMIAPP_LIMIT_MIN_Y_OUTPUT_SIZE 51
+#define SMIAPP_LIMIT_MAX_X_OUTPUT_SIZE 52
+#define SMIAPP_LIMIT_MAX_Y_OUTPUT_SIZE 53
+#define SMIAPP_LIMIT_MIN_EVEN_INC 54
+#define SMIAPP_LIMIT_MAX_EVEN_INC 55
+#define SMIAPP_LIMIT_MIN_ODD_INC 56
+#define SMIAPP_LIMIT_MAX_ODD_INC 57
+#define SMIAPP_LIMIT_SCALING_CAPABILITY 58
+#define SMIAPP_LIMIT_SCALER_M_MIN 59
+#define SMIAPP_LIMIT_SCALER_M_MAX 60
+#define SMIAPP_LIMIT_SCALER_N_MIN 61
+#define SMIAPP_LIMIT_SCALER_N_MAX 62
+#define SMIAPP_LIMIT_SPATIAL_SAMPLING_CAPABILITY 63
+#define SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY 64
+#define SMIAPP_LIMIT_COMPRESSION_CAPABILITY 65
+#define SMIAPP_LIMIT_FIFO_SUPPORT_CAPABILITY 66
+#define SMIAPP_LIMIT_DPHY_CTRL_CAPABILITY 67
+#define SMIAPP_LIMIT_CSI_LANE_MODE_CAPABILITY 68
+#define SMIAPP_LIMIT_CSI_SIGNALLING_MODE_CAPABILITY 69
+#define SMIAPP_LIMIT_FAST_STANDBY_CAPABILITY 70
+#define SMIAPP_LIMIT_CCI_ADDRESS_CONTROL_CAPABILITY 71
+#define SMIAPP_LIMIT_MAX_PER_LANE_BITRATE_1_LANE_MODE_MBPS 72
+#define SMIAPP_LIMIT_MAX_PER_LANE_BITRATE_2_LANE_MODE_MBPS 73
+#define SMIAPP_LIMIT_MAX_PER_LANE_BITRATE_3_LANE_MODE_MBPS 74
+#define SMIAPP_LIMIT_MAX_PER_LANE_BITRATE_4_LANE_MODE_MBPS 75
+#define SMIAPP_LIMIT_TEMP_SENSOR_CAPABILITY 76
+#define SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN 77
+#define SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN 78
+#define SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN 79
+#define SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN 80
+#define SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN 81
+#define SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MIN_BIN 82
+#define SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MAX_MARGIN_BIN 83
+#define SMIAPP_LIMIT_BINNING_CAPABILITY 84
+#define SMIAPP_LIMIT_BINNING_WEIGHTING_CAPABILITY 85
+#define SMIAPP_LIMIT_DATA_TRANSFER_IF_CAPABILITY 86
+#define SMIAPP_LIMIT_SHADING_CORRECTION_CAPABILITY 87
+#define SMIAPP_LIMIT_GREEN_IMBALANCE_CAPABILITY 88
+#define SMIAPP_LIMIT_BLACK_LEVEL_CAPABILITY 89
+#define SMIAPP_LIMIT_MODULE_SPECIFIC_CORRECTION_CAPABILITY 90
+#define SMIAPP_LIMIT_DEFECT_CORRECTION_CAPABILITY 91
+#define SMIAPP_LIMIT_DEFECT_CORRECTION_CAPABILITY_2 92
+#define SMIAPP_LIMIT_EDOF_CAPABILITY 93
+#define SMIAPP_LIMIT_COLOUR_FEEDBACK_CAPABILITY 94
+#define SMIAPP_LIMIT_ESTIMATION_MODE_CAPABILITY 95
+#define SMIAPP_LIMIT_ESTIMATION_ZONE_CAPABILITY 96
+#define SMIAPP_LIMIT_CAPABILITY_TRDY_MIN 97
+#define SMIAPP_LIMIT_FLASH_MODE_CAPABILITY 98
+#define SMIAPP_LIMIT_ACTUATOR_CAPABILITY 99
+#define SMIAPP_LIMIT_BRACKETING_LUT_CAPABILITY_1 100
+#define SMIAPP_LIMIT_BRACKETING_LUT_CAPABILITY_2 101
+#define SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_STEP 102
+#define SMIAPP_LIMIT_LAST 103
diff --git a/drivers/media/video/smiapp/smiapp-quirk.c b/drivers/media/video/smiapp/smiapp-quirk.c
new file mode 100644
index 000000000000..55e87950dcea
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-quirk.c
@@ -0,0 +1,306 @@
+/*
+ * drivers/media/video/smiapp/smiapp-quirk.c
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/delay.h>
+
+#include "smiapp.h"
+
+static int smiapp_write_8(struct smiapp_sensor *sensor, u16 reg, u8 val)
+{
+ return smiapp_write(sensor, (SMIA_REG_8BIT << 16) | reg, val);
+}
+
+static int smiapp_write_8s(struct smiapp_sensor *sensor,
+ struct smiapp_reg_8 *regs, int len)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int rval;
+
+ for (; len > 0; len--, regs++) {
+ rval = smiapp_write_8(sensor, regs->reg, regs->val);
+ if (rval < 0) {
+ dev_err(&client->dev,
+ "error %d writing reg 0x%4.4x, val 0x%2.2x",
+ rval, regs->reg, regs->val);
+ return rval;
+ }
+ }
+
+ return 0;
+}
+
+void smiapp_replace_limit(struct smiapp_sensor *sensor,
+ u32 limit, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+
+ dev_dbg(&client->dev, "quirk: 0x%8.8x \"%s\" = %d, 0x%x\n",
+ smiapp_reg_limits[limit].addr,
+ smiapp_reg_limits[limit].what, val, val);
+ sensor->limits[limit] = val;
+}
+
+int smiapp_replace_limit_at(struct smiapp_sensor *sensor,
+ u32 reg, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int i;
+
+ for (i = 0; smiapp_reg_limits[i].addr; i++) {
+ if ((smiapp_reg_limits[i].addr & 0xffff) != reg)
+ continue;
+
+ smiapp_replace_limit(sensor, i, val);
+
+ return 0;
+ }
+
+ dev_dbg(&client->dev, "quirk: bad register 0x%4.4x\n", reg);
+
+ return -EINVAL;
+}
+
+bool smiapp_quirk_reg(struct smiapp_sensor *sensor,
+ u32 reg, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ const struct smia_reg *sreg;
+
+ if (!sensor->minfo.quirk)
+ return false;
+
+ sreg = sensor->minfo.quirk->regs;
+
+ if (!sreg)
+ return false;
+
+ while (sreg->type) {
+ u16 type = reg >> 16;
+ u16 reg16 = reg;
+
+ if (sreg->type != type || sreg->reg != reg16) {
+ sreg++;
+ continue;
+ }
+
+ switch ((u8)type) {
+ case SMIA_REG_8BIT:
+ dev_dbg(&client->dev, "quirk: 0x%8.8x: 0x%2.2x\n",
+ reg, sreg->val);
+ break;
+ case SMIA_REG_16BIT:
+ dev_dbg(&client->dev, "quirk: 0x%8.8x: 0x%4.4x\n",
+ reg, sreg->val);
+ break;
+ case SMIA_REG_32BIT:
+ dev_dbg(&client->dev, "quirk: 0x%8.8x: 0x%8.8x\n",
+ reg, sreg->val);
+ break;
+ }
+
+ *val = sreg->val;
+
+ return true;
+ }
+
+ return false;
+}
+
+static int jt8ew9_limits(struct smiapp_sensor *sensor)
+{
+ if (sensor->minfo.revision_number_major < 0x03)
+ sensor->frame_skip = 1;
+
+ /* Below 24 gain doesn't have effect at all, */
+ /* but ~59 is needed for full dynamic range */
+ smiapp_replace_limit(sensor, SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MIN, 59);
+ smiapp_replace_limit(
+ sensor, SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MAX, 6000);
+
+ return 0;
+}
+
+static int jt8ew9_post_poweron(struct smiapp_sensor *sensor)
+{
+ struct smiapp_reg_8 regs[] = {
+ { 0x30a3, 0xd8 }, /* Output port control : LVDS ports only */
+ { 0x30ae, 0x00 }, /* 0x0307 pll_multiplier maximum value on PLL input 9.6MHz ( 19.2MHz is divided on pre_pll_div) */
+ { 0x30af, 0xd0 }, /* 0x0307 pll_multiplier maximum value on PLL input 9.6MHz ( 19.2MHz is divided on pre_pll_div) */
+ { 0x322d, 0x04 }, /* Adjusting Processing Image Size to Scaler Toshiba Recommendation Setting */
+ { 0x3255, 0x0f }, /* Horizontal Noise Reduction Control Toshiba Recommendation Setting */
+ { 0x3256, 0x15 }, /* Horizontal Noise Reduction Control Toshiba Recommendation Setting */
+ { 0x3258, 0x70 }, /* Analog Gain Control Toshiba Recommendation Setting */
+ { 0x3259, 0x70 }, /* Analog Gain Control Toshiba Recommendation Setting */
+ { 0x325f, 0x7c }, /* Analog Gain Control Toshiba Recommendation Setting */
+ { 0x3302, 0x06 }, /* Pixel Reference Voltage Control Toshiba Recommendation Setting */
+ { 0x3304, 0x00 }, /* Pixel Reference Voltage Control Toshiba Recommendation Setting */
+ { 0x3307, 0x22 }, /* Pixel Reference Voltage Control Toshiba Recommendation Setting */
+ { 0x3308, 0x8d }, /* Pixel Reference Voltage Control Toshiba Recommendation Setting */
+ { 0x331e, 0x0f }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */
+ { 0x3320, 0x30 }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */
+ { 0x3321, 0x11 }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */
+ { 0x3322, 0x98 }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */
+ { 0x3323, 0x64 }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */
+ { 0x3325, 0x83 }, /* Read Out Timing Control Toshiba Recommendation Setting */
+ { 0x3330, 0x18 }, /* Read Out Timing Control Toshiba Recommendation Setting */
+ { 0x333c, 0x01 }, /* Read Out Timing Control Toshiba Recommendation Setting */
+ { 0x3345, 0x2f }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */
+ { 0x33de, 0x38 }, /* Horizontal Noise Reduction Control Toshiba Recommendation Setting */
+ /* Taken from v03. No idea what the rest are. */
+ { 0x32e0, 0x05 },
+ { 0x32e1, 0x05 },
+ { 0x32e2, 0x04 },
+ { 0x32e5, 0x04 },
+ { 0x32e6, 0x04 },
+
+ };
+
+ return smiapp_write_8s(sensor, regs, ARRAY_SIZE(regs));
+}
+
+const struct smiapp_quirk smiapp_jt8ew9_quirk = {
+ .limits = jt8ew9_limits,
+ .post_poweron = jt8ew9_post_poweron,
+};
+
+static int imx125es_post_poweron(struct smiapp_sensor *sensor)
+{
+ /* Taken from v02. No idea what the other two are. */
+ struct smiapp_reg_8 regs[] = {
+ /*
+ * 0x3302: clk during frame blanking:
+ * 0x00 - HS mode, 0x01 - LP11
+ */
+ { 0x3302, 0x01 },
+ { 0x302d, 0x00 },
+ { 0x3b08, 0x8c },
+ };
+
+ return smiapp_write_8s(sensor, regs, ARRAY_SIZE(regs));
+}
+
+const struct smiapp_quirk smiapp_imx125es_quirk = {
+ .post_poweron = imx125es_post_poweron,
+};
+
+static int jt8ev1_limits(struct smiapp_sensor *sensor)
+{
+ smiapp_replace_limit(sensor, SMIAPP_LIMIT_X_ADDR_MAX, 4271);
+ smiapp_replace_limit(sensor,
+ SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN, 184);
+
+ return 0;
+}
+
+static int jt8ev1_post_poweron(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int rval;
+
+ struct smiapp_reg_8 regs[] = {
+ { 0x3031, 0xcd }, /* For digital binning (EQ_MONI) */
+ { 0x30a3, 0xd0 }, /* FLASH STROBE enable */
+ { 0x3237, 0x00 }, /* For control of pulse timing for ADC */
+ { 0x3238, 0x43 },
+ { 0x3301, 0x06 }, /* For analog bias for sensor */
+ { 0x3302, 0x06 },
+ { 0x3304, 0x00 },
+ { 0x3305, 0x88 },
+ { 0x332a, 0x14 },
+ { 0x332c, 0x6b },
+ { 0x3336, 0x01 },
+ { 0x333f, 0x1f },
+ { 0x3355, 0x00 },
+ { 0x3356, 0x20 },
+ { 0x33bf, 0x20 }, /* Adjust the FBC speed */
+ { 0x33c9, 0x20 },
+ { 0x33ce, 0x30 }, /* Adjust the parameter for logic function */
+ { 0x33cf, 0xec }, /* For Black sun */
+ { 0x3328, 0x80 }, /* Ugh. No idea what's this. */
+ };
+
+ struct smiapp_reg_8 regs_96[] = {
+ { 0x30ae, 0x00 }, /* For control of ADC clock */
+ { 0x30af, 0xd0 },
+ { 0x30b0, 0x01 },
+ };
+
+ rval = smiapp_write_8s(sensor, regs, ARRAY_SIZE(regs));
+ if (rval < 0)
+ return rval;
+
+ switch (sensor->platform_data->ext_clk) {
+ case 9600000:
+ return smiapp_write_8s(sensor, regs_96,
+ ARRAY_SIZE(regs_96));
+ default:
+ dev_warn(&client->dev, "no MSRs for %d Hz ext_clk\n",
+ sensor->platform_data->ext_clk);
+ return 0;
+ }
+}
+
+static int jt8ev1_pre_streamon(struct smiapp_sensor *sensor)
+{
+ return smiapp_write_8(sensor, 0x3328, 0x00);
+}
+
+static int jt8ev1_post_streamoff(struct smiapp_sensor *sensor)
+{
+ int rval;
+
+ /* Workaround: allows fast standby to work properly */
+ rval = smiapp_write_8(sensor, 0x3205, 0x04);
+ if (rval < 0)
+ return rval;
+
+ /* Wait for 1 ms + one line => 2 ms is likely enough */
+ usleep_range(2000, 2000);
+
+ /* Restore it */
+ rval = smiapp_write_8(sensor, 0x3205, 0x00);
+ if (rval < 0)
+ return rval;
+
+ return smiapp_write_8(sensor, 0x3328, 0x80);
+}
+
+const struct smiapp_quirk smiapp_jt8ev1_quirk = {
+ .limits = jt8ev1_limits,
+ .post_poweron = jt8ev1_post_poweron,
+ .pre_streamon = jt8ev1_pre_streamon,
+ .post_streamoff = jt8ev1_post_streamoff,
+ .flags = SMIAPP_QUIRK_FLAG_OP_PIX_CLOCK_PER_LANE,
+};
+
+static int tcm8500md_limits(struct smiapp_sensor *sensor)
+{
+ smiapp_replace_limit(sensor, SMIAPP_LIMIT_MIN_PLL_IP_FREQ_HZ, 2700000);
+
+ return 0;
+}
+
+const struct smiapp_quirk smiapp_tcm8500md_quirk = {
+ .limits = tcm8500md_limits,
+};
diff --git a/drivers/media/video/smiapp/smiapp-quirk.h b/drivers/media/video/smiapp/smiapp-quirk.h
new file mode 100644
index 000000000000..f4dcaabaefe7
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-quirk.h
@@ -0,0 +1,83 @@
+/*
+ * drivers/media/video/smiapp/smiapp-quirk.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __SMIAPP_QUIRK__
+#define __SMIAPP_QUIRK__
+
+struct smiapp_sensor;
+
+/**
+ * struct smiapp_quirk - quirks for sensors that deviate from SMIA++ standard
+ *
+ * @limits: Replace sensor->limits with values which can't be read from
+ * sensor registers. Called the first time the sensor is powered up.
+ * @post_poweron: Called always after the sensor has been fully powered on.
+ * @pre_streamon: Called just before streaming is enabled.
+ * @post_streamon: Called right after stopping streaming.
+ */
+struct smiapp_quirk {
+ int (*limits)(struct smiapp_sensor *sensor);
+ int (*post_poweron)(struct smiapp_sensor *sensor);
+ int (*pre_streamon)(struct smiapp_sensor *sensor);
+ int (*post_streamoff)(struct smiapp_sensor *sensor);
+ const struct smia_reg *regs;
+ unsigned long flags;
+};
+
+/* op pix clock is for all lanes in total normally */
+#define SMIAPP_QUIRK_FLAG_OP_PIX_CLOCK_PER_LANE (1 << 0)
+#define SMIAPP_QUIRK_FLAG_8BIT_READ_ONLY (1 << 1)
+
+struct smiapp_reg_8 {
+ u16 reg;
+ u8 val;
+};
+
+void smiapp_replace_limit(struct smiapp_sensor *sensor,
+ u32 limit, u32 val);
+bool smiapp_quirk_reg(struct smiapp_sensor *sensor,
+ u32 reg, u32 *val);
+
+#define SMIAPP_MK_QUIRK_REG(_reg, _val) \
+ { \
+ .type = (_reg >> 16), \
+ .reg = (u16)_reg, \
+ .val = _val, \
+ }
+
+#define smiapp_call_quirk(_sensor, _quirk, ...) \
+ (_sensor->minfo.quirk && \
+ _sensor->minfo.quirk->_quirk ? \
+ _sensor->minfo.quirk->_quirk(_sensor, ##__VA_ARGS__) : 0)
+
+#define smiapp_needs_quirk(_sensor, _quirk) \
+ (_sensor->minfo.quirk ? \
+ _sensor->minfo.quirk->flags & _quirk : 0)
+
+extern const struct smiapp_quirk smiapp_jt8ev1_quirk;
+extern const struct smiapp_quirk smiapp_imx125es_quirk;
+extern const struct smiapp_quirk smiapp_jt8ew9_quirk;
+extern const struct smiapp_quirk smiapp_tcm8500md_quirk;
+
+#endif /* __SMIAPP_QUIRK__ */
diff --git a/drivers/media/video/smiapp/smiapp-reg-defs.h b/drivers/media/video/smiapp/smiapp-reg-defs.h
new file mode 100644
index 000000000000..a089eb8161e1
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-reg-defs.h
@@ -0,0 +1,503 @@
+/*
+ * drivers/media/video/smiapp/smiapp-reg-defs.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+#define SMIAPP_REG_MK_U8(r) ((SMIA_REG_8BIT << 16) | (r))
+#define SMIAPP_REG_MK_U16(r) ((SMIA_REG_16BIT << 16) | (r))
+#define SMIAPP_REG_MK_U32(r) ((SMIA_REG_32BIT << 16) | (r))
+
+#define SMIAPP_REG_MK_F32(r) (SMIA_REG_FLAG_FLOAT | (SMIA_REG_32BIT << 16) | (r))
+
+#define SMIAPP_REG_U16_MODEL_ID SMIAPP_REG_MK_U16(0x0000)
+#define SMIAPP_REG_U8_REVISION_NUMBER_MAJOR SMIAPP_REG_MK_U8(0x0002)
+#define SMIAPP_REG_U8_MANUFACTURER_ID SMIAPP_REG_MK_U8(0x0003)
+#define SMIAPP_REG_U8_SMIA_VERSION SMIAPP_REG_MK_U8(0x0004)
+#define SMIAPP_REG_U8_FRAME_COUNT SMIAPP_REG_MK_U8(0x0005)
+#define SMIAPP_REG_U8_PIXEL_ORDER SMIAPP_REG_MK_U8(0x0006)
+#define SMIAPP_REG_U16_DATA_PEDESTAL SMIAPP_REG_MK_U16(0x0008)
+#define SMIAPP_REG_U8_PIXEL_DEPTH SMIAPP_REG_MK_U8(0x000c)
+#define SMIAPP_REG_U8_REVISION_NUMBER_MINOR SMIAPP_REG_MK_U8(0x0010)
+#define SMIAPP_REG_U8_SMIAPP_VERSION SMIAPP_REG_MK_U8(0x0011)
+#define SMIAPP_REG_U8_MODULE_DATE_YEAR SMIAPP_REG_MK_U8(0x0012)
+#define SMIAPP_REG_U8_MODULE_DATE_MONTH SMIAPP_REG_MK_U8(0x0013)
+#define SMIAPP_REG_U8_MODULE_DATE_DAY SMIAPP_REG_MK_U8(0x0014)
+#define SMIAPP_REG_U8_MODULE_DATE_PHASE SMIAPP_REG_MK_U8(0x0015)
+#define SMIAPP_REG_U16_SENSOR_MODEL_ID SMIAPP_REG_MK_U16(0x0016)
+#define SMIAPP_REG_U8_SENSOR_REVISION_NUMBER SMIAPP_REG_MK_U8(0x0018)
+#define SMIAPP_REG_U8_SENSOR_MANUFACTURER_ID SMIAPP_REG_MK_U8(0x0019)
+#define SMIAPP_REG_U8_SENSOR_FIRMWARE_VERSION SMIAPP_REG_MK_U8(0x001a)
+#define SMIAPP_REG_U32_SERIAL_NUMBER SMIAPP_REG_MK_U32(0x001c)
+#define SMIAPP_REG_U8_FRAME_FORMAT_MODEL_TYPE SMIAPP_REG_MK_U8(0x0040)
+#define SMIAPP_REG_U8_FRAME_FORMAT_MODEL_SUBTYPE SMIAPP_REG_MK_U8(0x0041)
+#define SMIAPP_REG_U16_FRAME_FORMAT_DESCRIPTOR_2(n) SMIAPP_REG_MK_U16(0x0042 + ((n) << 1)) /* 0 <= n <= 14 */
+#define SMIAPP_REG_U32_FRAME_FORMAT_DESCRIPTOR_4(n) SMIAPP_REG_MK_U32(0x0060 + ((n) << 2)) /* 0 <= n <= 7 */
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CAPABILITY SMIAPP_REG_MK_U16(0x0080)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_MIN SMIAPP_REG_MK_U16(0x0084)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_MAX SMIAPP_REG_MK_U16(0x0086)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_STEP SMIAPP_REG_MK_U16(0x0088)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_TYPE SMIAPP_REG_MK_U16(0x008a)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_M0 SMIAPP_REG_MK_U16(0x008c)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_C0 SMIAPP_REG_MK_U16(0x008e)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_M1 SMIAPP_REG_MK_U16(0x0090)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_C1 SMIAPP_REG_MK_U16(0x0092)
+#define SMIAPP_REG_U8_DATA_FORMAT_MODEL_TYPE SMIAPP_REG_MK_U8(0x00c0)
+#define SMIAPP_REG_U8_DATA_FORMAT_MODEL_SUBTYPE SMIAPP_REG_MK_U8(0x00c1)
+#define SMIAPP_REG_U16_DATA_FORMAT_DESCRIPTOR(n) SMIAPP_REG_MK_U16(0x00c2 + ((n) << 1))
+#define SMIAPP_REG_U8_MODE_SELECT SMIAPP_REG_MK_U8(0x0100)
+#define SMIAPP_REG_U8_IMAGE_ORIENTATION SMIAPP_REG_MK_U8(0x0101)
+#define SMIAPP_REG_U8_SOFTWARE_RESET SMIAPP_REG_MK_U8(0x0103)
+#define SMIAPP_REG_U8_GROUPED_PARAMETER_HOLD SMIAPP_REG_MK_U8(0x0104)
+#define SMIAPP_REG_U8_MASK_CORRUPTED_FRAMES SMIAPP_REG_MK_U8(0x0105)
+#define SMIAPP_REG_U8_FAST_STANDBY_CTRL SMIAPP_REG_MK_U8(0x0106)
+#define SMIAPP_REG_U8_CCI_ADDRESS_CONTROL SMIAPP_REG_MK_U8(0x0107)
+#define SMIAPP_REG_U8_2ND_CCI_IF_CONTROL SMIAPP_REG_MK_U8(0x0108)
+#define SMIAPP_REG_U8_2ND_CCI_ADDRESS_CONTROL SMIAPP_REG_MK_U8(0x0109)
+#define SMIAPP_REG_U8_CSI_CHANNEL_IDENTIFIER SMIAPP_REG_MK_U8(0x0110)
+#define SMIAPP_REG_U8_CSI_SIGNALLING_MODE SMIAPP_REG_MK_U8(0x0111)
+#define SMIAPP_REG_U16_CSI_DATA_FORMAT SMIAPP_REG_MK_U16(0x0112)
+#define SMIAPP_REG_U8_CSI_LANE_MODE SMIAPP_REG_MK_U8(0x0114)
+#define SMIAPP_REG_U8_CSI2_10_TO_8_DT SMIAPP_REG_MK_U8(0x0115)
+#define SMIAPP_REG_U8_CSI2_10_TO_7_DT SMIAPP_REG_MK_U8(0x0116)
+#define SMIAPP_REG_U8_CSI2_10_TO_6_DT SMIAPP_REG_MK_U8(0x0117)
+#define SMIAPP_REG_U8_CSI2_12_TO_8_DT SMIAPP_REG_MK_U8(0x0118)
+#define SMIAPP_REG_U8_CSI2_12_TO_7_DT SMIAPP_REG_MK_U8(0x0119)
+#define SMIAPP_REG_U8_CSI2_12_TO_6_DT SMIAPP_REG_MK_U8(0x011a)
+#define SMIAPP_REG_U8_CSI2_14_TO_10_DT SMIAPP_REG_MK_U8(0x011b)
+#define SMIAPP_REG_U8_CSI2_14_TO_8_DT SMIAPP_REG_MK_U8(0x011c)
+#define SMIAPP_REG_U8_CSI2_16_TO_10_DT SMIAPP_REG_MK_U8(0x011d)
+#define SMIAPP_REG_U8_CSI2_16_TO_8_DT SMIAPP_REG_MK_U8(0x011e)
+#define SMIAPP_REG_U8_GAIN_MODE SMIAPP_REG_MK_U8(0x0120)
+#define SMIAPP_REG_U16_VANA_VOLTAGE SMIAPP_REG_MK_U16(0x0130)
+#define SMIAPP_REG_U16_VDIG_VOLTAGE SMIAPP_REG_MK_U16(0x0132)
+#define SMIAPP_REG_U16_VIO_VOLTAGE SMIAPP_REG_MK_U16(0x0134)
+#define SMIAPP_REG_U16_EXTCLK_FREQUENCY_MHZ SMIAPP_REG_MK_U16(0x0136)
+#define SMIAPP_REG_U8_TEMP_SENSOR_CONTROL SMIAPP_REG_MK_U8(0x0138)
+#define SMIAPP_REG_U8_TEMP_SENSOR_MODE SMIAPP_REG_MK_U8(0x0139)
+#define SMIAPP_REG_U8_TEMP_SENSOR_OUTPUT SMIAPP_REG_MK_U8(0x013a)
+#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME SMIAPP_REG_MK_U16(0x0200)
+#define SMIAPP_REG_U16_COARSE_INTEGRATION_TIME SMIAPP_REG_MK_U16(0x0202)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GLOBAL SMIAPP_REG_MK_U16(0x0204)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GREENR SMIAPP_REG_MK_U16(0x0206)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_RED SMIAPP_REG_MK_U16(0x0208)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_BLUE SMIAPP_REG_MK_U16(0x020a)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GREENB SMIAPP_REG_MK_U16(0x020c)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_GREENR SMIAPP_REG_MK_U16(0x020e)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_RED SMIAPP_REG_MK_U16(0x0210)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_BLUE SMIAPP_REG_MK_U16(0x0212)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_GREENB SMIAPP_REG_MK_U16(0x0214)
+#define SMIAPP_REG_U16_VT_PIX_CLK_DIV SMIAPP_REG_MK_U16(0x0300)
+#define SMIAPP_REG_U16_VT_SYS_CLK_DIV SMIAPP_REG_MK_U16(0x0302)
+#define SMIAPP_REG_U16_PRE_PLL_CLK_DIV SMIAPP_REG_MK_U16(0x0304)
+#define SMIAPP_REG_U16_PLL_MULTIPLIER SMIAPP_REG_MK_U16(0x0306)
+#define SMIAPP_REG_U16_OP_PIX_CLK_DIV SMIAPP_REG_MK_U16(0x0308)
+#define SMIAPP_REG_U16_OP_SYS_CLK_DIV SMIAPP_REG_MK_U16(0x030a)
+#define SMIAPP_REG_U16_FRAME_LENGTH_LINES SMIAPP_REG_MK_U16(0x0340)
+#define SMIAPP_REG_U16_LINE_LENGTH_PCK SMIAPP_REG_MK_U16(0x0342)
+#define SMIAPP_REG_U16_X_ADDR_START SMIAPP_REG_MK_U16(0x0344)
+#define SMIAPP_REG_U16_Y_ADDR_START SMIAPP_REG_MK_U16(0x0346)
+#define SMIAPP_REG_U16_X_ADDR_END SMIAPP_REG_MK_U16(0x0348)
+#define SMIAPP_REG_U16_Y_ADDR_END SMIAPP_REG_MK_U16(0x034a)
+#define SMIAPP_REG_U16_X_OUTPUT_SIZE SMIAPP_REG_MK_U16(0x034c)
+#define SMIAPP_REG_U16_Y_OUTPUT_SIZE SMIAPP_REG_MK_U16(0x034e)
+#define SMIAPP_REG_U16_X_EVEN_INC SMIAPP_REG_MK_U16(0x0380)
+#define SMIAPP_REG_U16_X_ODD_INC SMIAPP_REG_MK_U16(0x0382)
+#define SMIAPP_REG_U16_Y_EVEN_INC SMIAPP_REG_MK_U16(0x0384)
+#define SMIAPP_REG_U16_Y_ODD_INC SMIAPP_REG_MK_U16(0x0386)
+#define SMIAPP_REG_U16_SCALING_MODE SMIAPP_REG_MK_U16(0x0400)
+#define SMIAPP_REG_U16_SPATIAL_SAMPLING SMIAPP_REG_MK_U16(0x0402)
+#define SMIAPP_REG_U16_SCALE_M SMIAPP_REG_MK_U16(0x0404)
+#define SMIAPP_REG_U16_SCALE_N SMIAPP_REG_MK_U16(0x0406)
+#define SMIAPP_REG_U16_DIGITAL_CROP_X_OFFSET SMIAPP_REG_MK_U16(0x0408)
+#define SMIAPP_REG_U16_DIGITAL_CROP_Y_OFFSET SMIAPP_REG_MK_U16(0x040a)
+#define SMIAPP_REG_U16_DIGITAL_CROP_IMAGE_WIDTH SMIAPP_REG_MK_U16(0x040c)
+#define SMIAPP_REG_U16_DIGITAL_CROP_IMAGE_HEIGHT SMIAPP_REG_MK_U16(0x040e)
+#define SMIAPP_REG_U16_COMPRESSION_MODE SMIAPP_REG_MK_U16(0x0500)
+#define SMIAPP_REG_U16_TEST_PATTERN_MODE SMIAPP_REG_MK_U16(0x0600)
+#define SMIAPP_REG_U16_TEST_DATA_RED SMIAPP_REG_MK_U16(0x0602)
+#define SMIAPP_REG_U16_TEST_DATA_GREENR SMIAPP_REG_MK_U16(0x0604)
+#define SMIAPP_REG_U16_TEST_DATA_BLUE SMIAPP_REG_MK_U16(0x0606)
+#define SMIAPP_REG_U16_TEST_DATA_GREENB SMIAPP_REG_MK_U16(0x0608)
+#define SMIAPP_REG_U16_HORIZONTAL_CURSOR_WIDTH SMIAPP_REG_MK_U16(0x060a)
+#define SMIAPP_REG_U16_HORIZONTAL_CURSOR_POSITION SMIAPP_REG_MK_U16(0x060c)
+#define SMIAPP_REG_U16_VERTICAL_CURSOR_WIDTH SMIAPP_REG_MK_U16(0x060e)
+#define SMIAPP_REG_U16_VERTICAL_CURSOR_POSITION SMIAPP_REG_MK_U16(0x0610)
+#define SMIAPP_REG_U16_FIFO_WATER_MARK_PIXELS SMIAPP_REG_MK_U16(0x0700)
+#define SMIAPP_REG_U8_TCLK_POST SMIAPP_REG_MK_U8(0x0800)
+#define SMIAPP_REG_U8_THS_PREPARE SMIAPP_REG_MK_U8(0x0801)
+#define SMIAPP_REG_U8_THS_ZERO_MIN SMIAPP_REG_MK_U8(0x0802)
+#define SMIAPP_REG_U8_THS_TRAIL SMIAPP_REG_MK_U8(0x0803)
+#define SMIAPP_REG_U8_TCLK_TRAIL_MIN SMIAPP_REG_MK_U8(0x0804)
+#define SMIAPP_REG_U8_TCLK_PREPARE SMIAPP_REG_MK_U8(0x0805)
+#define SMIAPP_REG_U8_TCLK_ZERO SMIAPP_REG_MK_U8(0x0806)
+#define SMIAPP_REG_U8_TLPX SMIAPP_REG_MK_U8(0x0807)
+#define SMIAPP_REG_U8_DPHY_CTRL SMIAPP_REG_MK_U8(0x0808)
+#define SMIAPP_REG_U32_REQUESTED_LINK_BIT_RATE_MBPS SMIAPP_REG_MK_U32(0x0820)
+#define SMIAPP_REG_U8_BINNING_MODE SMIAPP_REG_MK_U8(0x0900)
+#define SMIAPP_REG_U8_BINNING_TYPE SMIAPP_REG_MK_U8(0x0901)
+#define SMIAPP_REG_U8_BINNING_WEIGHTING SMIAPP_REG_MK_U8(0x0902)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL SMIAPP_REG_MK_U8(0x0a00)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS SMIAPP_REG_MK_U8(0x0a01)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_PAGE_SELECT SMIAPP_REG_MK_U8(0x0a02)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_0 SMIAPP_REG_MK_U8(0x0a04)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_1 SMIAPP_REG_MK_U8(0x0a05)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_2 SMIAPP_REG_MK_U8(0x0a06)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_3 SMIAPP_REG_MK_U8(0x0a07)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_4 SMIAPP_REG_MK_U8(0x0a08)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_5 SMIAPP_REG_MK_U8(0x0a09)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_12 SMIAPP_REG_MK_U8(0x0a10)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_13 SMIAPP_REG_MK_U8(0x0a11)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_14 SMIAPP_REG_MK_U8(0x0a12)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_15 SMIAPP_REG_MK_U8(0x0a13)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_16 SMIAPP_REG_MK_U8(0x0a14)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_17 SMIAPP_REG_MK_U8(0x0a15)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_18 SMIAPP_REG_MK_U8(0x0a16)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_19 SMIAPP_REG_MK_U8(0x0a17)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_20 SMIAPP_REG_MK_U8(0x0a18)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_21 SMIAPP_REG_MK_U8(0x0a19)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_22 SMIAPP_REG_MK_U8(0x0a1a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_23 SMIAPP_REG_MK_U8(0x0a1b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_24 SMIAPP_REG_MK_U8(0x0a1c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_25 SMIAPP_REG_MK_U8(0x0a1d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_26 SMIAPP_REG_MK_U8(0x0a1e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_27 SMIAPP_REG_MK_U8(0x0a1f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_28 SMIAPP_REG_MK_U8(0x0a20)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_29 SMIAPP_REG_MK_U8(0x0a21)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_30 SMIAPP_REG_MK_U8(0x0a22)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_31 SMIAPP_REG_MK_U8(0x0a23)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_32 SMIAPP_REG_MK_U8(0x0a24)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_33 SMIAPP_REG_MK_U8(0x0a25)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_34 SMIAPP_REG_MK_U8(0x0a26)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_35 SMIAPP_REG_MK_U8(0x0a27)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_36 SMIAPP_REG_MK_U8(0x0a28)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_37 SMIAPP_REG_MK_U8(0x0a29)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_38 SMIAPP_REG_MK_U8(0x0a2a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_39 SMIAPP_REG_MK_U8(0x0a2b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_40 SMIAPP_REG_MK_U8(0x0a2c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_41 SMIAPP_REG_MK_U8(0x0a2d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_42 SMIAPP_REG_MK_U8(0x0a2e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_43 SMIAPP_REG_MK_U8(0x0a2f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_44 SMIAPP_REG_MK_U8(0x0a30)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_45 SMIAPP_REG_MK_U8(0x0a31)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_46 SMIAPP_REG_MK_U8(0x0a32)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_47 SMIAPP_REG_MK_U8(0x0a33)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_48 SMIAPP_REG_MK_U8(0x0a34)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_49 SMIAPP_REG_MK_U8(0x0a35)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_50 SMIAPP_REG_MK_U8(0x0a36)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_51 SMIAPP_REG_MK_U8(0x0a37)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_52 SMIAPP_REG_MK_U8(0x0a38)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_53 SMIAPP_REG_MK_U8(0x0a39)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_54 SMIAPP_REG_MK_U8(0x0a3a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_55 SMIAPP_REG_MK_U8(0x0a3b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_56 SMIAPP_REG_MK_U8(0x0a3c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_57 SMIAPP_REG_MK_U8(0x0a3d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_58 SMIAPP_REG_MK_U8(0x0a3e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_59 SMIAPP_REG_MK_U8(0x0a3f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_60 SMIAPP_REG_MK_U8(0x0a40)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_61 SMIAPP_REG_MK_U8(0x0a41)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_62 SMIAPP_REG_MK_U8(0x0a42)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_63 SMIAPP_REG_MK_U8(0x0a43)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_CTRL SMIAPP_REG_MK_U8(0x0a44)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_STATUS SMIAPP_REG_MK_U8(0x0a45)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_PAGE_SELECT SMIAPP_REG_MK_U8(0x0a46)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_0 SMIAPP_REG_MK_U8(0x0a48)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_1 SMIAPP_REG_MK_U8(0x0a49)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_2 SMIAPP_REG_MK_U8(0x0a4a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_3 SMIAPP_REG_MK_U8(0x0a4b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_4 SMIAPP_REG_MK_U8(0x0a4c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_5 SMIAPP_REG_MK_U8(0x0a4d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_6 SMIAPP_REG_MK_U8(0x0a4e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_7 SMIAPP_REG_MK_U8(0x0a4f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_8 SMIAPP_REG_MK_U8(0x0a50)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_9 SMIAPP_REG_MK_U8(0x0a51)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_10 SMIAPP_REG_MK_U8(0x0a52)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_11 SMIAPP_REG_MK_U8(0x0a53)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_12 SMIAPP_REG_MK_U8(0x0a54)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_13 SMIAPP_REG_MK_U8(0x0a55)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_14 SMIAPP_REG_MK_U8(0x0a56)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_15 SMIAPP_REG_MK_U8(0x0a57)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_16 SMIAPP_REG_MK_U8(0x0a58)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_17 SMIAPP_REG_MK_U8(0x0a59)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_18 SMIAPP_REG_MK_U8(0x0a5a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_19 SMIAPP_REG_MK_U8(0x0a5b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_20 SMIAPP_REG_MK_U8(0x0a5c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_21 SMIAPP_REG_MK_U8(0x0a5d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_22 SMIAPP_REG_MK_U8(0x0a5e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_23 SMIAPP_REG_MK_U8(0x0a5f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_24 SMIAPP_REG_MK_U8(0x0a60)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_25 SMIAPP_REG_MK_U8(0x0a61)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_26 SMIAPP_REG_MK_U8(0x0a62)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_27 SMIAPP_REG_MK_U8(0x0a63)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_28 SMIAPP_REG_MK_U8(0x0a64)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_29 SMIAPP_REG_MK_U8(0x0a65)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_30 SMIAPP_REG_MK_U8(0x0a66)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_31 SMIAPP_REG_MK_U8(0x0a67)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_32 SMIAPP_REG_MK_U8(0x0a68)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_33 SMIAPP_REG_MK_U8(0x0a69)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_34 SMIAPP_REG_MK_U8(0x0a6a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_35 SMIAPP_REG_MK_U8(0x0a6b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_36 SMIAPP_REG_MK_U8(0x0a6c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_37 SMIAPP_REG_MK_U8(0x0a6d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_38 SMIAPP_REG_MK_U8(0x0a6e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_39 SMIAPP_REG_MK_U8(0x0a6f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_40 SMIAPP_REG_MK_U8(0x0a70)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_41 SMIAPP_REG_MK_U8(0x0a71)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_42 SMIAPP_REG_MK_U8(0x0a72)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_43 SMIAPP_REG_MK_U8(0x0a73)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_44 SMIAPP_REG_MK_U8(0x0a74)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_45 SMIAPP_REG_MK_U8(0x0a75)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_46 SMIAPP_REG_MK_U8(0x0a76)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_47 SMIAPP_REG_MK_U8(0x0a77)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_48 SMIAPP_REG_MK_U8(0x0a78)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_49 SMIAPP_REG_MK_U8(0x0a79)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_50 SMIAPP_REG_MK_U8(0x0a7a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_51 SMIAPP_REG_MK_U8(0x0a7b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_52 SMIAPP_REG_MK_U8(0x0a7c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_53 SMIAPP_REG_MK_U8(0x0a7d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_54 SMIAPP_REG_MK_U8(0x0a7e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_55 SMIAPP_REG_MK_U8(0x0a7f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_56 SMIAPP_REG_MK_U8(0x0a80)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_57 SMIAPP_REG_MK_U8(0x0a81)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_58 SMIAPP_REG_MK_U8(0x0a82)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_59 SMIAPP_REG_MK_U8(0x0a83)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_60 SMIAPP_REG_MK_U8(0x0a84)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_61 SMIAPP_REG_MK_U8(0x0a85)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_62 SMIAPP_REG_MK_U8(0x0a86)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_63 SMIAPP_REG_MK_U8(0x0a87)
+#define SMIAPP_REG_U8_SHADING_CORRECTION_ENABLE SMIAPP_REG_MK_U8(0x0b00)
+#define SMIAPP_REG_U8_LUMINANCE_CORRECTION_LEVEL SMIAPP_REG_MK_U8(0x0b01)
+#define SMIAPP_REG_U8_GREEN_IMBALANCE_FILTER_ENABLE SMIAPP_REG_MK_U8(0x0b02)
+#define SMIAPP_REG_U8_GREEN_IMBALANCE_FILTER_WEIGHT SMIAPP_REG_MK_U8(0x0b03)
+#define SMIAPP_REG_U8_BLACK_LEVEL_CORRECTION_ENABLE SMIAPP_REG_MK_U8(0x0b04)
+#define SMIAPP_REG_U8_MAPPED_COUPLET_CORRECT_ENABLE SMIAPP_REG_MK_U8(0x0b05)
+#define SMIAPP_REG_U8_SINGLE_DEFECT_CORRECT_ENABLE SMIAPP_REG_MK_U8(0x0b06)
+#define SMIAPP_REG_U8_SINGLE_DEFECT_CORRECT_WEIGHT SMIAPP_REG_MK_U8(0x0b07)
+#define SMIAPP_REG_U8_DYNAMIC_COUPLET_CORRECT_ENABLE SMIAPP_REG_MK_U8(0x0b08)
+#define SMIAPP_REG_U8_DYNAMIC_COUPLET_CORRECT_WEIGHT SMIAPP_REG_MK_U8(0x0b09)
+#define SMIAPP_REG_U8_COMBINED_DEFECT_CORRECT_ENABLE SMIAPP_REG_MK_U8(0x0b0a)
+#define SMIAPP_REG_U8_COMBINED_DEFECT_CORRECT_WEIGHT SMIAPP_REG_MK_U8(0x0b0b)
+#define SMIAPP_REG_U8_MODULE_SPECIFIC_CORRECTION_ENABLE SMIAPP_REG_MK_U8(0x0b0c)
+#define SMIAPP_REG_U8_MODULE_SPECIFIC_CORRECTION_WEIGHT SMIAPP_REG_MK_U8(0x0b0d)
+#define SMIAPP_REG_U8_MAPPED_LINE_DEFECT_CORRECT_ENABLE SMIAPP_REG_MK_U8(0x0b0e)
+#define SMIAPP_REG_U8_MAPPED_LINE_DEFECT_CORRECT_ADJUST SMIAPP_REG_MK_U8(0x0b0f)
+#define SMIAPP_REG_U8_MAPPED_COUPLET_CORRECT_ADJUST SMIAPP_REG_MK_U8(0x0b10)
+#define SMIAPP_REG_U8_MAPPED_TRIPLET_DEFECT_CORRECT_ENABLE SMIAPP_REG_MK_U8(0x0b11)
+#define SMIAPP_REG_U8_MAPPED_TRIPLET_DEFECT_CORRECT_ADJUST SMIAPP_REG_MK_U8(0x0b12)
+#define SMIAPP_REG_U8_DYNAMIC_TRIPLET_DEFECT_CORRECT_ENABLE SMIAPP_REG_MK_U8(0x0b13)
+#define SMIAPP_REG_U8_DYNAMIC_TRIPLET_DEFECT_CORRECT_ADJUST SMIAPP_REG_MK_U8(0x0b14)
+#define SMIAPP_REG_U8_DYNAMIC_LINE_DEFECT_CORRECT_ENABLE SMIAPP_REG_MK_U8(0x0b15)
+#define SMIAPP_REG_U8_DYNAMIC_LINE_DEFECT_CORRECT_ADJUST SMIAPP_REG_MK_U8(0x0b16)
+#define SMIAPP_REG_U8_EDOF_MODE SMIAPP_REG_MK_U8(0x0b80)
+#define SMIAPP_REG_U8_SHARPNESS SMIAPP_REG_MK_U8(0x0b83)
+#define SMIAPP_REG_U8_DENOISING SMIAPP_REG_MK_U8(0x0b84)
+#define SMIAPP_REG_U8_MODULE_SPECIFIC SMIAPP_REG_MK_U8(0x0b85)
+#define SMIAPP_REG_U16_DEPTH_OF_FIELD SMIAPP_REG_MK_U16(0x0b86)
+#define SMIAPP_REG_U16_FOCUS_DISTANCE SMIAPP_REG_MK_U16(0x0b88)
+#define SMIAPP_REG_U8_ESTIMATION_MODE_CTRL SMIAPP_REG_MK_U8(0x0b8a)
+#define SMIAPP_REG_U16_COLOUR_TEMPERATURE SMIAPP_REG_MK_U16(0x0b8c)
+#define SMIAPP_REG_U16_ABSOLUTE_GAIN_GREENR SMIAPP_REG_MK_U16(0x0b8e)
+#define SMIAPP_REG_U16_ABSOLUTE_GAIN_RED SMIAPP_REG_MK_U16(0x0b90)
+#define SMIAPP_REG_U16_ABSOLUTE_GAIN_BLUE SMIAPP_REG_MK_U16(0x0b92)
+#define SMIAPP_REG_U16_ABSOLUTE_GAIN_GREENB SMIAPP_REG_MK_U16(0x0b94)
+#define SMIAPP_REG_U8_ESTIMATION_ZONE_MODE SMIAPP_REG_MK_U8(0x0bc0)
+#define SMIAPP_REG_U16_FIXED_ZONE_WEIGHTING SMIAPP_REG_MK_U16(0x0bc2)
+#define SMIAPP_REG_U16_CUSTOM_ZONE_X_START SMIAPP_REG_MK_U16(0x0bc4)
+#define SMIAPP_REG_U16_CUSTOM_ZONE_Y_START SMIAPP_REG_MK_U16(0x0bc6)
+#define SMIAPP_REG_U16_CUSTOM_ZONE_WIDTH SMIAPP_REG_MK_U16(0x0bc8)
+#define SMIAPP_REG_U16_CUSTOM_ZONE_HEIGHT SMIAPP_REG_MK_U16(0x0bca)
+#define SMIAPP_REG_U8_GLOBAL_RESET_CTRL1 SMIAPP_REG_MK_U8(0x0c00)
+#define SMIAPP_REG_U8_GLOBAL_RESET_CTRL2 SMIAPP_REG_MK_U8(0x0c01)
+#define SMIAPP_REG_U8_GLOBAL_RESET_MODE_CONFIG_1 SMIAPP_REG_MK_U8(0x0c02)
+#define SMIAPP_REG_U8_GLOBAL_RESET_MODE_CONFIG_2 SMIAPP_REG_MK_U8(0x0c03)
+#define SMIAPP_REG_U16_TRDY_CTRL SMIAPP_REG_MK_U16(0x0c04)
+#define SMIAPP_REG_U16_TRDOUT_CTRL SMIAPP_REG_MK_U16(0x0c06)
+#define SMIAPP_REG_U16_TSHUTTER_STROBE_DELAY_CTRL SMIAPP_REG_MK_U16(0x0c08)
+#define SMIAPP_REG_U16_TSHUTTER_STROBE_WIDTH_CTRL SMIAPP_REG_MK_U16(0x0c0a)
+#define SMIAPP_REG_U16_TFLASH_STROBE_DELAY_CTRL SMIAPP_REG_MK_U16(0x0c0c)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_HIGH_CTRL SMIAPP_REG_MK_U16(0x0c0e)
+#define SMIAPP_REG_U16_TGRST_INTERVAL_CTRL SMIAPP_REG_MK_U16(0x0c10)
+#define SMIAPP_REG_U8_FLASH_STROBE_ADJUSTMENT SMIAPP_REG_MK_U8(0x0c12)
+#define SMIAPP_REG_U16_FLASH_STROBE_START_POINT SMIAPP_REG_MK_U16(0x0c14)
+#define SMIAPP_REG_U16_TFLASH_STROBE_DELAY_RS_CTRL SMIAPP_REG_MK_U16(0x0c16)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_HIGH_RS_CTRL SMIAPP_REG_MK_U16(0x0c18)
+#define SMIAPP_REG_U8_FLASH_MODE_RS SMIAPP_REG_MK_U8(0x0c1a)
+#define SMIAPP_REG_U8_FLASH_TRIGGER_RS SMIAPP_REG_MK_U8(0x0c1b)
+#define SMIAPP_REG_U8_FLASH_STATUS SMIAPP_REG_MK_U8(0x0c1c)
+#define SMIAPP_REG_U8_SA_STROBE_MODE SMIAPP_REG_MK_U8(0x0c1d)
+#define SMIAPP_REG_U16_SA_STROBE_START_POINT SMIAPP_REG_MK_U16(0x0c1e)
+#define SMIAPP_REG_U16_TSA_STROBE_DELAY_CTRL SMIAPP_REG_MK_U16(0x0c20)
+#define SMIAPP_REG_U16_TSA_STROBE_WIDTH_CTRL SMIAPP_REG_MK_U16(0x0c22)
+#define SMIAPP_REG_U8_SA_STROBE_TRIGGER SMIAPP_REG_MK_U8(0x0c24)
+#define SMIAPP_REG_U8_SPECIAL_ACTUATOR_STATUS SMIAPP_REG_MK_U8(0x0c25)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH2_HIGH_RS_CTRL SMIAPP_REG_MK_U16(0x0c26)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_LOW_RS_CTRL SMIAPP_REG_MK_U16(0x0c28)
+#define SMIAPP_REG_U8_TFLASH_STROBE_COUNT_RS_CTRL SMIAPP_REG_MK_U8(0x0c2a)
+#define SMIAPP_REG_U8_TFLASH_STROBE_COUNT_CTRL SMIAPP_REG_MK_U8(0x0c2b)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH2_HIGH_CTRL SMIAPP_REG_MK_U16(0x0c2c)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_LOW_CTRL SMIAPP_REG_MK_U16(0x0c2e)
+#define SMIAPP_REG_U8_LOW_LEVEL_CTRL SMIAPP_REG_MK_U8(0x0c80)
+#define SMIAPP_REG_U16_MAIN_TRIGGER_REF_POINT SMIAPP_REG_MK_U16(0x0c82)
+#define SMIAPP_REG_U16_MAIN_TRIGGER_T3 SMIAPP_REG_MK_U16(0x0c84)
+#define SMIAPP_REG_U8_MAIN_TRIGGER_COUNT SMIAPP_REG_MK_U8(0x0c86)
+#define SMIAPP_REG_U16_PHASE1_TRIGGER_T3 SMIAPP_REG_MK_U16(0x0c88)
+#define SMIAPP_REG_U8_PHASE1_TRIGGER_COUNT SMIAPP_REG_MK_U8(0x0c8a)
+#define SMIAPP_REG_U16_PHASE2_TRIGGER_T3 SMIAPP_REG_MK_U16(0x0c8c)
+#define SMIAPP_REG_U8_PHASE2_TRIGGER_COUNT SMIAPP_REG_MK_U8(0x0c8e)
+#define SMIAPP_REG_U8_MECH_SHUTTER_CTRL SMIAPP_REG_MK_U8(0x0d00)
+#define SMIAPP_REG_U8_OPERATION_MODE SMIAPP_REG_MK_U8(0x0d01)
+#define SMIAPP_REG_U8_ACT_STATE1 SMIAPP_REG_MK_U8(0x0d02)
+#define SMIAPP_REG_U8_ACT_STATE2 SMIAPP_REG_MK_U8(0x0d03)
+#define SMIAPP_REG_U16_FOCUS_CHANGE SMIAPP_REG_MK_U16(0x0d80)
+#define SMIAPP_REG_U16_FOCUS_CHANGE_CONTROL SMIAPP_REG_MK_U16(0x0d82)
+#define SMIAPP_REG_U16_FOCUS_CHANGE_NUMBER_PHASE1 SMIAPP_REG_MK_U16(0x0d84)
+#define SMIAPP_REG_U16_FOCUS_CHANGE_NUMBER_PHASE2 SMIAPP_REG_MK_U16(0x0d86)
+#define SMIAPP_REG_U8_STROBE_COUNT_PHASE1 SMIAPP_REG_MK_U8(0x0d88)
+#define SMIAPP_REG_U8_STROBE_COUNT_PHASE2 SMIAPP_REG_MK_U8(0x0d89)
+#define SMIAPP_REG_U8_POSITION SMIAPP_REG_MK_U8(0x0d8a)
+#define SMIAPP_REG_U8_BRACKETING_LUT_CONTROL SMIAPP_REG_MK_U8(0x0e00)
+#define SMIAPP_REG_U8_BRACKETING_LUT_MODE SMIAPP_REG_MK_U8(0x0e01)
+#define SMIAPP_REG_U8_BRACKETING_LUT_ENTRY_CONTROL SMIAPP_REG_MK_U8(0x0e02)
+#define SMIAPP_REG_U8_LUT_PARAMETERS_START SMIAPP_REG_MK_U8(0x0e10)
+#define SMIAPP_REG_U8_LUT_PARAMETERS_END SMIAPP_REG_MK_U8(0x0eff)
+#define SMIAPP_REG_U16_INTEGRATION_TIME_CAPABILITY SMIAPP_REG_MK_U16(0x1000)
+#define SMIAPP_REG_U16_COARSE_INTEGRATION_TIME_MIN SMIAPP_REG_MK_U16(0x1004)
+#define SMIAPP_REG_U16_COARSE_INTEGRATION_TIME_MAX_MARGIN SMIAPP_REG_MK_U16(0x1006)
+#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MIN SMIAPP_REG_MK_U16(0x1008)
+#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MAX_MARGIN SMIAPP_REG_MK_U16(0x100a)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_CAPABILITY SMIAPP_REG_MK_U16(0x1080)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_MIN SMIAPP_REG_MK_U16(0x1084)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_MAX SMIAPP_REG_MK_U16(0x1086)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_STEP_SIZE SMIAPP_REG_MK_U16(0x1088)
+#define SMIAPP_REG_F32_MIN_EXT_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1100)
+#define SMIAPP_REG_F32_MAX_EXT_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1104)
+#define SMIAPP_REG_U16_MIN_PRE_PLL_CLK_DIV SMIAPP_REG_MK_U16(0x1108)
+#define SMIAPP_REG_U16_MAX_PRE_PLL_CLK_DIV SMIAPP_REG_MK_U16(0x110a)
+#define SMIAPP_REG_F32_MIN_PLL_IP_FREQ_HZ SMIAPP_REG_MK_F32(0x110c)
+#define SMIAPP_REG_F32_MAX_PLL_IP_FREQ_HZ SMIAPP_REG_MK_F32(0x1110)
+#define SMIAPP_REG_U16_MIN_PLL_MULTIPLIER SMIAPP_REG_MK_U16(0x1114)
+#define SMIAPP_REG_U16_MAX_PLL_MULTIPLIER SMIAPP_REG_MK_U16(0x1116)
+#define SMIAPP_REG_F32_MIN_PLL_OP_FREQ_HZ SMIAPP_REG_MK_F32(0x1118)
+#define SMIAPP_REG_F32_MAX_PLL_OP_FREQ_HZ SMIAPP_REG_MK_F32(0x111c)
+#define SMIAPP_REG_U16_MIN_VT_SYS_CLK_DIV SMIAPP_REG_MK_U16(0x1120)
+#define SMIAPP_REG_U16_MAX_VT_SYS_CLK_DIV SMIAPP_REG_MK_U16(0x1122)
+#define SMIAPP_REG_F32_MIN_VT_SYS_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1124)
+#define SMIAPP_REG_F32_MAX_VT_SYS_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1128)
+#define SMIAPP_REG_F32_MIN_VT_PIX_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x112c)
+#define SMIAPP_REG_F32_MAX_VT_PIX_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1130)
+#define SMIAPP_REG_U16_MIN_VT_PIX_CLK_DIV SMIAPP_REG_MK_U16(0x1134)
+#define SMIAPP_REG_U16_MAX_VT_PIX_CLK_DIV SMIAPP_REG_MK_U16(0x1136)
+#define SMIAPP_REG_U16_MIN_FRAME_LENGTH_LINES SMIAPP_REG_MK_U16(0x1140)
+#define SMIAPP_REG_U16_MAX_FRAME_LENGTH_LINES SMIAPP_REG_MK_U16(0x1142)
+#define SMIAPP_REG_U16_MIN_LINE_LENGTH_PCK SMIAPP_REG_MK_U16(0x1144)
+#define SMIAPP_REG_U16_MAX_LINE_LENGTH_PCK SMIAPP_REG_MK_U16(0x1146)
+#define SMIAPP_REG_U16_MIN_LINE_BLANKING_PCK SMIAPP_REG_MK_U16(0x1148)
+#define SMIAPP_REG_U16_MIN_FRAME_BLANKING_LINES SMIAPP_REG_MK_U16(0x114a)
+#define SMIAPP_REG_U8_MIN_LINE_LENGTH_PCK_STEP_SIZE SMIAPP_REG_MK_U8(0x114c)
+#define SMIAPP_REG_U16_MIN_OP_SYS_CLK_DIV SMIAPP_REG_MK_U16(0x1160)
+#define SMIAPP_REG_U16_MAX_OP_SYS_CLK_DIV SMIAPP_REG_MK_U16(0x1162)
+#define SMIAPP_REG_F32_MIN_OP_SYS_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1164)
+#define SMIAPP_REG_F32_MAX_OP_SYS_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1168)
+#define SMIAPP_REG_U16_MIN_OP_PIX_CLK_DIV SMIAPP_REG_MK_U16(0x116c)
+#define SMIAPP_REG_U16_MAX_OP_PIX_CLK_DIV SMIAPP_REG_MK_U16(0x116e)
+#define SMIAPP_REG_F32_MIN_OP_PIX_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1170)
+#define SMIAPP_REG_F32_MAX_OP_PIX_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1174)
+#define SMIAPP_REG_U16_X_ADDR_MIN SMIAPP_REG_MK_U16(0x1180)
+#define SMIAPP_REG_U16_Y_ADDR_MIN SMIAPP_REG_MK_U16(0x1182)
+#define SMIAPP_REG_U16_X_ADDR_MAX SMIAPP_REG_MK_U16(0x1184)
+#define SMIAPP_REG_U16_Y_ADDR_MAX SMIAPP_REG_MK_U16(0x1186)
+#define SMIAPP_REG_U16_MIN_X_OUTPUT_SIZE SMIAPP_REG_MK_U16(0x1188)
+#define SMIAPP_REG_U16_MIN_Y_OUTPUT_SIZE SMIAPP_REG_MK_U16(0x118a)
+#define SMIAPP_REG_U16_MAX_X_OUTPUT_SIZE SMIAPP_REG_MK_U16(0x118c)
+#define SMIAPP_REG_U16_MAX_Y_OUTPUT_SIZE SMIAPP_REG_MK_U16(0x118e)
+#define SMIAPP_REG_U16_MIN_EVEN_INC SMIAPP_REG_MK_U16(0x11c0)
+#define SMIAPP_REG_U16_MAX_EVEN_INC SMIAPP_REG_MK_U16(0x11c2)
+#define SMIAPP_REG_U16_MIN_ODD_INC SMIAPP_REG_MK_U16(0x11c4)
+#define SMIAPP_REG_U16_MAX_ODD_INC SMIAPP_REG_MK_U16(0x11c6)
+#define SMIAPP_REG_U16_SCALING_CAPABILITY SMIAPP_REG_MK_U16(0x1200)
+#define SMIAPP_REG_U16_SCALER_M_MIN SMIAPP_REG_MK_U16(0x1204)
+#define SMIAPP_REG_U16_SCALER_M_MAX SMIAPP_REG_MK_U16(0x1206)
+#define SMIAPP_REG_U16_SCALER_N_MIN SMIAPP_REG_MK_U16(0x1208)
+#define SMIAPP_REG_U16_SCALER_N_MAX SMIAPP_REG_MK_U16(0x120a)
+#define SMIAPP_REG_U16_SPATIAL_SAMPLING_CAPABILITY SMIAPP_REG_MK_U16(0x120c)
+#define SMIAPP_REG_U8_DIGITAL_CROP_CAPABILITY SMIAPP_REG_MK_U8(0x120e)
+#define SMIAPP_REG_U16_COMPRESSION_CAPABILITY SMIAPP_REG_MK_U16(0x1300)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_REDINRED SMIAPP_REG_MK_U16(0x1400)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_GREENINRED SMIAPP_REG_MK_U16(0x1402)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_BLUEINRED SMIAPP_REG_MK_U16(0x1404)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_REDINGREEN SMIAPP_REG_MK_U16(0x1406)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_GREENINGREEN SMIAPP_REG_MK_U16(0x1408)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_BLUEINGREEN SMIAPP_REG_MK_U16(0x140a)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_REDINBLUE SMIAPP_REG_MK_U16(0x140c)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_GREENINBLUE SMIAPP_REG_MK_U16(0x140e)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_BLUEINBLUE SMIAPP_REG_MK_U16(0x1410)
+#define SMIAPP_REG_U16_FIFO_SIZE_PIXELS SMIAPP_REG_MK_U16(0x1500)
+#define SMIAPP_REG_U8_FIFO_SUPPORT_CAPABILITY SMIAPP_REG_MK_U8(0x1502)
+#define SMIAPP_REG_U8_DPHY_CTRL_CAPABILITY SMIAPP_REG_MK_U8(0x1600)
+#define SMIAPP_REG_U8_CSI_LANE_MODE_CAPABILITY SMIAPP_REG_MK_U8(0x1601)
+#define SMIAPP_REG_U8_CSI_SIGNALLING_MODE_CAPABILITY SMIAPP_REG_MK_U8(0x1602)
+#define SMIAPP_REG_U8_FAST_STANDBY_CAPABILITY SMIAPP_REG_MK_U8(0x1603)
+#define SMIAPP_REG_U8_CCI_ADDRESS_CONTROL_CAPABILITY SMIAPP_REG_MK_U8(0x1604)
+#define SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_1_LANE_MODE_MBPS SMIAPP_REG_MK_U32(0x1608)
+#define SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_2_LANE_MODE_MBPS SMIAPP_REG_MK_U32(0x160c)
+#define SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_3_LANE_MODE_MBPS SMIAPP_REG_MK_U32(0x1610)
+#define SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_4_LANE_MODE_MBPS SMIAPP_REG_MK_U32(0x1614)
+#define SMIAPP_REG_U8_TEMP_SENSOR_CAPABILITY SMIAPP_REG_MK_U8(0x1618)
+#define SMIAPP_REG_U16_MIN_FRAME_LENGTH_LINES_BIN SMIAPP_REG_MK_U16(0x1700)
+#define SMIAPP_REG_U16_MAX_FRAME_LENGTH_LINES_BIN SMIAPP_REG_MK_U16(0x1702)
+#define SMIAPP_REG_U16_MIN_LINE_LENGTH_PCK_BIN SMIAPP_REG_MK_U16(0x1704)
+#define SMIAPP_REG_U16_MAX_LINE_LENGTH_PCK_BIN SMIAPP_REG_MK_U16(0x1706)
+#define SMIAPP_REG_U16_MIN_LINE_BLANKING_PCK_BIN SMIAPP_REG_MK_U16(0x1708)
+#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MIN_BIN SMIAPP_REG_MK_U16(0x170a)
+#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MAX_MARGIN_BIN SMIAPP_REG_MK_U16(0x170c)
+#define SMIAPP_REG_U8_BINNING_CAPABILITY SMIAPP_REG_MK_U8(0x1710)
+#define SMIAPP_REG_U8_BINNING_WEIGHTING_CAPABILITY SMIAPP_REG_MK_U8(0x1711)
+#define SMIAPP_REG_U8_BINNING_SUBTYPES SMIAPP_REG_MK_U8(0x1712)
+#define SMIAPP_REG_U8_BINNING_TYPE_n(n) SMIAPP_REG_MK_U8(0x1713 + (n)) /* 1 <= n <= 237 */
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_CAPABILITY SMIAPP_REG_MK_U8(0x1800)
+#define SMIAPP_REG_U8_SHADING_CORRECTION_CAPABILITY SMIAPP_REG_MK_U8(0x1900)
+#define SMIAPP_REG_U8_GREEN_IMBALANCE_CAPABILITY SMIAPP_REG_MK_U8(0x1901)
+#define SMIAPP_REG_U8_BLACK_LEVEL_CAPABILITY SMIAPP_REG_MK_U8(0x1902)
+#define SMIAPP_REG_U8_MODULE_SPECIFIC_CORRECTION_CAPABILITY SMIAPP_REG_MK_U8(0x1903)
+#define SMIAPP_REG_U16_DEFECT_CORRECTION_CAPABILITY SMIAPP_REG_MK_U16(0x1904)
+#define SMIAPP_REG_U16_DEFECT_CORRECTION_CAPABILITY_2 SMIAPP_REG_MK_U16(0x1906)
+#define SMIAPP_REG_U8_EDOF_CAPABILITY SMIAPP_REG_MK_U8(0x1980)
+#define SMIAPP_REG_U8_ESTIMATION_FRAMES SMIAPP_REG_MK_U8(0x1981)
+#define SMIAPP_REG_U8_SUPPORTS_SHARPNESS_ADJ SMIAPP_REG_MK_U8(0x1982)
+#define SMIAPP_REG_U8_SUPPORTS_DENOISING_ADJ SMIAPP_REG_MK_U8(0x1983)
+#define SMIAPP_REG_U8_SUPPORTS_MODULE_SPECIFIC_ADJ SMIAPP_REG_MK_U8(0x1984)
+#define SMIAPP_REG_U8_SUPPORTS_DEPTH_OF_FIELD_ADJ SMIAPP_REG_MK_U8(0x1985)
+#define SMIAPP_REG_U8_SUPPORTS_FOCUS_DISTANCE_ADJ SMIAPP_REG_MK_U8(0x1986)
+#define SMIAPP_REG_U8_COLOUR_FEEDBACK_CAPABILITY SMIAPP_REG_MK_U8(0x1987)
+#define SMIAPP_REG_U8_EDOF_SUPPORT_AB_NXM SMIAPP_REG_MK_U8(0x1988)
+#define SMIAPP_REG_U8_ESTIMATION_MODE_CAPABILITY SMIAPP_REG_MK_U8(0x19c0)
+#define SMIAPP_REG_U8_ESTIMATION_ZONE_CAPABILITY SMIAPP_REG_MK_U8(0x19c1)
+#define SMIAPP_REG_U16_EST_DEPTH_OF_FIELD SMIAPP_REG_MK_U16(0x19c2)
+#define SMIAPP_REG_U16_EST_FOCUS_DISTANCE SMIAPP_REG_MK_U16(0x19c4)
+#define SMIAPP_REG_U16_CAPABILITY_TRDY_MIN SMIAPP_REG_MK_U16(0x1a00)
+#define SMIAPP_REG_U8_FLASH_MODE_CAPABILITY SMIAPP_REG_MK_U8(0x1a02)
+#define SMIAPP_REG_U16_MECH_SHUT_AND_ACT_START_ADDR SMIAPP_REG_MK_U16(0x1b02)
+#define SMIAPP_REG_U8_ACTUATOR_CAPABILITY SMIAPP_REG_MK_U8(0x1b04)
+#define SMIAPP_REG_U16_ACTUATOR_TYPE SMIAPP_REG_MK_U16(0x1b40)
+#define SMIAPP_REG_U8_AF_DEVICE_ADDRESS SMIAPP_REG_MK_U8(0x1b42)
+#define SMIAPP_REG_U16_FOCUS_CHANGE_ADDRESS SMIAPP_REG_MK_U16(0x1b44)
+#define SMIAPP_REG_U8_BRACKETING_LUT_CAPABILITY_1 SMIAPP_REG_MK_U8(0x1c00)
+#define SMIAPP_REG_U8_BRACKETING_LUT_CAPABILITY_2 SMIAPP_REG_MK_U8(0x1c01)
+#define SMIAPP_REG_U8_BRACKETING_LUT_SIZE SMIAPP_REG_MK_U8(0x1c02)
diff --git a/drivers/media/video/smiapp/smiapp-reg.h b/drivers/media/video/smiapp/smiapp-reg.h
new file mode 100644
index 000000000000..d0167aa17534
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-reg.h
@@ -0,0 +1,122 @@
+/*
+ * drivers/media/video/smiapp/smiapp-reg.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __SMIAPP_REG_H_
+#define __SMIAPP_REG_H_
+
+#include "smiapp-reg-defs.h"
+
+/* Bits for above register */
+#define SMIAPP_IMAGE_ORIENTATION_HFLIP (1 << 0)
+#define SMIAPP_IMAGE_ORIENTATION_VFLIP (1 << 1)
+
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN (1 << 0)
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_RD_EN (0 << 1)
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_WR_EN (1 << 1)
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_ERR_CLEAR (1 << 2)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY (1 << 0)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_WR_READY (1 << 1)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EDATA (1 << 2)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE (1 << 3)
+
+#define SMIAPP_SOFTWARE_RESET (1 << 0)
+
+#define SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE (1 << 0)
+#define SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE (1 << 1)
+
+#define SMIAPP_DPHY_CTRL_AUTOMATIC 0
+/* DPHY control based on REQUESTED_LINK_BIT_RATE_MBPS */
+#define SMIAPP_DPHY_CTRL_UI 1
+#define SMIAPP_DPHY_CTRL_REGISTER 2
+
+#define SMIAPP_COMPRESSION_MODE_SIMPLE_PREDICTOR 1
+#define SMIAPP_COMPRESSION_MODE_ADVANCED_PREDICTOR 2
+
+#define SMIAPP_MODE_SELECT_SOFTWARE_STANDBY 0
+#define SMIAPP_MODE_SELECT_STREAMING 1
+
+#define SMIAPP_SCALING_MODE_NONE 0
+#define SMIAPP_SCALING_MODE_HORIZONTAL 1
+#define SMIAPP_SCALING_MODE_BOTH 2
+
+#define SMIAPP_SCALING_CAPABILITY_NONE 0
+#define SMIAPP_SCALING_CAPABILITY_HORIZONTAL 1
+#define SMIAPP_SCALING_CAPABILITY_BOTH 2 /* horizontal/both */
+
+/* digital crop right before scaler */
+#define SMIAPP_DIGITAL_CROP_CAPABILITY_NONE 0
+#define SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP 1
+
+#define SMIAPP_BINNING_CAPABILITY_NO 0
+#define SMIAPP_BINNING_CAPABILITY_YES 1
+
+/* Maximum number of binning subtypes */
+#define SMIAPP_BINNING_SUBTYPES 253
+
+#define SMIAPP_PIXEL_ORDER_GRBG 0
+#define SMIAPP_PIXEL_ORDER_RGGB 1
+#define SMIAPP_PIXEL_ORDER_BGGR 2
+#define SMIAPP_PIXEL_ORDER_GBRG 3
+
+#define SMIAPP_DATA_FORMAT_MODEL_TYPE_NORMAL 1
+#define SMIAPP_DATA_FORMAT_MODEL_TYPE_EXTENDED 2
+#define SMIAPP_DATA_FORMAT_MODEL_TYPE_NORMAL_N 8
+#define SMIAPP_DATA_FORMAT_MODEL_TYPE_EXTENDED_N 16
+
+#define SMIAPP_FRAME_FORMAT_MODEL_TYPE_2BYTE 0x01
+#define SMIAPP_FRAME_FORMAT_MODEL_TYPE_4BYTE 0x02
+#define SMIAPP_FRAME_FORMAT_MODEL_SUBTYPE_NROWS_MASK 0x0f
+#define SMIAPP_FRAME_FORMAT_MODEL_SUBTYPE_NCOLS_MASK 0xf0
+#define SMIAPP_FRAME_FORMAT_MODEL_SUBTYPE_NCOLS_SHIFT 4
+
+#define SMIAPP_FRAME_FORMAT_DESC_2_PIXELCODE_MASK 0xf000
+#define SMIAPP_FRAME_FORMAT_DESC_2_PIXELCODE_SHIFT 12
+#define SMIAPP_FRAME_FORMAT_DESC_2_PIXELS_MASK 0x0fff
+
+#define SMIAPP_FRAME_FORMAT_DESC_4_PIXELCODE_MASK 0xf0000000
+#define SMIAPP_FRAME_FORMAT_DESC_4_PIXELCODE_SHIFT 28
+#define SMIAPP_FRAME_FORMAT_DESC_4_PIXELS_MASK 0x0000ffff
+
+#define SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_EMBEDDED 1
+#define SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_DUMMY 2
+#define SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_BLACK 3
+#define SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_DARK 4
+#define SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_VISIBLE 5
+
+#define SMIAPP_FAST_STANDBY_CTRL_COMPLETE_FRAMES 0
+#define SMIAPP_FAST_STANDBY_CTRL_IMMEDIATE 1
+
+/* Scaling N factor */
+#define SMIAPP_SCALE_N 16
+
+/* Image statistics registers */
+/* Registers 0x2000 to 0x2fff are reserved for future
+ * use for statistics features.
+ */
+
+/* Manufacturer Specific Registers: 0x3000 to 0x3fff
+ * The manufacturer specifies these as a black box.
+ */
+
+#endif /* __SMIAPP_REG_H_ */
diff --git a/drivers/media/video/smiapp/smiapp-regs.c b/drivers/media/video/smiapp/smiapp-regs.c
new file mode 100644
index 000000000000..b1812b17a407
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-regs.c
@@ -0,0 +1,273 @@
+/*
+ * drivers/media/video/smiapp/smiapp-regs.c
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+
+#include "smiapp.h"
+#include "smiapp-regs.h"
+
+static uint32_t float_to_u32_mul_1000000(struct i2c_client *client,
+ uint32_t phloat)
+{
+ int32_t exp;
+ uint64_t man;
+
+ if (phloat >= 0x80000000) {
+ dev_err(&client->dev, "this is a negative number\n");
+ return 0;
+ }
+
+ if (phloat == 0x7f800000)
+ return ~0; /* Inf. */
+
+ if ((phloat & 0x7f800000) == 0x7f800000) {
+ dev_err(&client->dev, "NaN or other special number\n");
+ return 0;
+ }
+
+ /* Valid cases begin here */
+ if (phloat == 0)
+ return 0; /* Valid zero */
+
+ if (phloat > 0x4f800000)
+ return ~0; /* larger than 4294967295 */
+
+ /*
+ * Unbias exponent (note how phloat is now guaranteed to
+ * have 0 in the high bit)
+ */
+ exp = ((int32_t)phloat >> 23) - 127;
+
+ /* Extract mantissa, add missing '1' bit and it's in MHz */
+ man = ((phloat & 0x7fffff) | 0x800000) * 1000000ULL;
+
+ if (exp < 0)
+ man >>= -exp;
+ else
+ man <<= exp;
+
+ man >>= 23; /* Remove mantissa bias */
+
+ return man & 0xffffffff;
+}
+
+
+/*
+ * Read a 8/16/32-bit i2c register. The value is returned in 'val'.
+ * Returns zero if successful, or non-zero otherwise.
+ */
+static int ____smiapp_read(struct smiapp_sensor *sensor, u16 reg,
+ u16 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ struct i2c_msg msg;
+ unsigned char data[4];
+ u16 offset = reg;
+ int r;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8) (offset >> 8);
+ data[1] = (u8) offset;
+ r = i2c_transfer(client->adapter, &msg, 1);
+ if (r != 1) {
+ if (r >= 0)
+ r = -EBUSY;
+ goto err;
+ }
+
+ msg.len = len;
+ msg.flags = I2C_M_RD;
+ r = i2c_transfer(client->adapter, &msg, 1);
+ if (r != 1) {
+ if (r >= 0)
+ r = -EBUSY;
+ goto err;
+ }
+
+ *val = 0;
+ /* high byte comes first */
+ switch (len) {
+ case SMIA_REG_32BIT:
+ *val = (data[0] << 24) + (data[1] << 16) + (data[2] << 8) +
+ data[3];
+ break;
+ case SMIA_REG_16BIT:
+ *val = (data[0] << 8) + data[1];
+ break;
+ case SMIA_REG_8BIT:
+ *val = data[0];
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+
+err:
+ dev_err(&client->dev, "read from offset 0x%x error %d\n", offset, r);
+
+ return r;
+}
+
+/* Read a register using 8-bit access only. */
+static int ____smiapp_read_8only(struct smiapp_sensor *sensor, u16 reg,
+ u16 len, u32 *val)
+{
+ unsigned int i;
+ int rval;
+
+ *val = 0;
+
+ for (i = 0; i < len; i++) {
+ u32 val8;
+
+ rval = ____smiapp_read(sensor, reg + i, 1, &val8);
+ if (rval < 0)
+ return rval;
+ *val |= val8 << ((len - i - 1) << 3);
+ }
+
+ return 0;
+}
+
+/*
+ * Read a 8/16/32-bit i2c register. The value is returned in 'val'.
+ * Returns zero if successful, or non-zero otherwise.
+ */
+static int __smiapp_read(struct smiapp_sensor *sensor, u32 reg, u32 *val,
+ bool only8)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int len = (u8)(reg >> 16);
+ int rval;
+
+ if (len != SMIA_REG_8BIT && len != SMIA_REG_16BIT
+ && len != SMIA_REG_32BIT)
+ return -EINVAL;
+
+ if (smiapp_quirk_reg(sensor, reg, val))
+ goto found_quirk;
+
+ if (len == SMIA_REG_8BIT && !only8)
+ rval = ____smiapp_read(sensor, (u16)reg, len, val);
+ else
+ rval = ____smiapp_read_8only(sensor, (u16)reg, len, val);
+ if (rval < 0)
+ return rval;
+
+found_quirk:
+ if (reg & SMIA_REG_FLAG_FLOAT)
+ *val = float_to_u32_mul_1000000(client, *val);
+
+ return 0;
+}
+
+int smiapp_read(struct smiapp_sensor *sensor, u32 reg, u32 *val)
+{
+ return __smiapp_read(
+ sensor, reg, val,
+ smiapp_needs_quirk(sensor,
+ SMIAPP_QUIRK_FLAG_8BIT_READ_ONLY));
+}
+
+int smiapp_read_8only(struct smiapp_sensor *sensor, u32 reg, u32 *val)
+{
+ return __smiapp_read(sensor, reg, val, true);
+}
+
+/*
+ * Write to a 8/16-bit register.
+ * Returns zero if successful, or non-zero otherwise.
+ */
+int smiapp_write(struct smiapp_sensor *sensor, u32 reg, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ struct i2c_msg msg;
+ unsigned char data[6];
+ unsigned int retries;
+ unsigned int flags = reg >> 24;
+ unsigned int len = (u8)(reg >> 16);
+ u16 offset = reg;
+ int r;
+
+ if ((len != SMIA_REG_8BIT && len != SMIA_REG_16BIT &&
+ len != SMIA_REG_32BIT) || flags)
+ return -EINVAL;
+
+ msg.addr = client->addr;
+ msg.flags = 0; /* Write */
+ msg.len = 2 + len;
+ msg.buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8) (reg >> 8);
+ data[1] = (u8) (reg & 0xff);
+
+ switch (len) {
+ case SMIA_REG_8BIT:
+ data[2] = val;
+ break;
+ case SMIA_REG_16BIT:
+ data[2] = val >> 8;
+ data[3] = val;
+ break;
+ case SMIA_REG_32BIT:
+ data[2] = val >> 24;
+ data[3] = val >> 16;
+ data[4] = val >> 8;
+ data[5] = val;
+ break;
+ default:
+ BUG();
+ }
+
+ for (retries = 0; retries < 5; retries++) {
+ /*
+ * Due to unknown reason sensor stops responding. This
+ * loop is a temporaty solution until the root cause
+ * is found.
+ */
+ r = i2c_transfer(client->adapter, &msg, 1);
+ if (r == 1) {
+ if (retries)
+ dev_err(&client->dev,
+ "sensor i2c stall encountered. "
+ "retries: %d\n", retries);
+ return 0;
+ }
+
+ usleep_range(2000, 2000);
+ }
+
+ dev_err(&client->dev,
+ "wrote 0x%x to offset 0x%x error %d\n", val, offset, r);
+
+ return r;
+}
diff --git a/drivers/media/video/smiapp/smiapp-regs.h b/drivers/media/video/smiapp/smiapp-regs.h
new file mode 100644
index 000000000000..7f9013b47971
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-regs.h
@@ -0,0 +1,49 @@
+/*
+ * include/media/smiapp/smiapp-regs.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef SMIAPP_REGS_H
+#define SMIAPP_REGS_H
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+/* Use upper 8 bits of the type field for flags */
+#define SMIA_REG_FLAG_FLOAT (1 << 24)
+
+#define SMIA_REG_8BIT 1
+#define SMIA_REG_16BIT 2
+#define SMIA_REG_32BIT 4
+struct smia_reg {
+ u16 type;
+ u16 reg; /* 16-bit offset */
+ u32 val; /* 8/16/32-bit value */
+};
+
+struct smiapp_sensor;
+
+int smiapp_read(struct smiapp_sensor *sensor, u32 reg, u32 *val);
+int smiapp_read_8only(struct smiapp_sensor *sensor, u32 reg, u32 *val);
+int smiapp_write(struct smiapp_sensor *sensor, u32 reg, u32 val);
+
+#endif
diff --git a/drivers/media/video/smiapp/smiapp.h b/drivers/media/video/smiapp/smiapp.h
new file mode 100644
index 000000000000..587f7f11238d
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp.h
@@ -0,0 +1,252 @@
+/*
+ * drivers/media/video/smiapp/smiapp.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2010--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __SMIAPP_PRIV_H_
+#define __SMIAPP_PRIV_H_
+
+#include <linux/mutex.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+#include <media/smiapp.h>
+
+#include "smiapp-pll.h"
+#include "smiapp-reg.h"
+#include "smiapp-regs.h"
+#include "smiapp-quirk.h"
+
+/*
+ * Standard SMIA++ constants
+ */
+#define SMIA_VERSION_1 10
+#define SMIAPP_VERSION_0_8 8 /* Draft 0.8 */
+#define SMIAPP_VERSION_0_9 9 /* Draft 0.9 */
+#define SMIAPP_VERSION_1 10
+
+#define SMIAPP_PROFILE_0 0
+#define SMIAPP_PROFILE_1 1
+#define SMIAPP_PROFILE_2 2
+
+#define SMIAPP_NVM_PAGE_SIZE 64 /* bytes */
+
+#define SMIAPP_RESET_DELAY_CLOCKS 2400
+#define SMIAPP_RESET_DELAY(clk) \
+ (1000 + (SMIAPP_RESET_DELAY_CLOCKS * 1000 \
+ + (clk) / 1000 - 1) / ((clk) / 1000))
+
+#include "smiapp-limits.h"
+
+struct smiapp_quirk;
+
+#define SMIAPP_MODULE_IDENT_FLAG_REV_LE (1 << 0)
+
+struct smiapp_module_ident {
+ u8 manufacturer_id;
+ u16 model_id;
+ u8 revision_number_major;
+
+ u8 flags;
+
+ char *name;
+ const struct smiapp_quirk *quirk;
+};
+
+struct smiapp_module_info {
+ u32 manufacturer_id;
+ u32 model_id;
+ u32 revision_number_major;
+ u32 revision_number_minor;
+
+ u32 module_year;
+ u32 module_month;
+ u32 module_day;
+
+ u32 sensor_manufacturer_id;
+ u32 sensor_model_id;
+ u32 sensor_revision_number;
+ u32 sensor_firmware_version;
+
+ u32 smia_version;
+ u32 smiapp_version;
+
+ u32 smiapp_profile;
+
+ char *name;
+ const struct smiapp_quirk *quirk;
+};
+
+#define SMIAPP_IDENT_FQ(manufacturer, model, rev, fl, _name, _quirk) \
+ { .manufacturer_id = manufacturer, \
+ .model_id = model, \
+ .revision_number_major = rev, \
+ .flags = fl, \
+ .name = _name, \
+ .quirk = _quirk, }
+
+#define SMIAPP_IDENT_LQ(manufacturer, model, rev, _name, _quirk) \
+ { .manufacturer_id = manufacturer, \
+ .model_id = model, \
+ .revision_number_major = rev, \
+ .flags = SMIAPP_MODULE_IDENT_FLAG_REV_LE, \
+ .name = _name, \
+ .quirk = _quirk, }
+
+#define SMIAPP_IDENT_L(manufacturer, model, rev, _name) \
+ { .manufacturer_id = manufacturer, \
+ .model_id = model, \
+ .revision_number_major = rev, \
+ .flags = SMIAPP_MODULE_IDENT_FLAG_REV_LE, \
+ .name = _name, }
+
+#define SMIAPP_IDENT_Q(manufacturer, model, rev, _name, _quirk) \
+ { .manufacturer_id = manufacturer, \
+ .model_id = model, \
+ .revision_number_major = rev, \
+ .flags = 0, \
+ .name = _name, \
+ .quirk = _quirk, }
+
+#define SMIAPP_IDENT(manufacturer, model, rev, _name) \
+ { .manufacturer_id = manufacturer, \
+ .model_id = model, \
+ .revision_number_major = rev, \
+ .flags = 0, \
+ .name = _name, }
+
+struct smiapp_reg_limits {
+ u32 addr;
+ char *what;
+};
+
+extern struct smiapp_reg_limits smiapp_reg_limits[];
+
+struct smiapp_csi_data_format {
+ u32 code;
+ u8 width;
+ u8 compressed;
+ u8 pixel_order;
+};
+
+#define SMIAPP_SUBDEVS 3
+
+#define SMIAPP_PA_PAD_SRC 0
+#define SMIAPP_PAD_SINK 0
+#define SMIAPP_PAD_SRC 1
+#define SMIAPP_PADS 2
+
+struct smiapp_binning_subtype {
+ u8 horizontal:4;
+ u8 vertical:4;
+} __packed;
+
+struct smiapp_subdev {
+ struct v4l2_subdev sd;
+ struct media_pad pads[2];
+ struct v4l2_rect sink_fmt;
+ struct v4l2_rect crop[2];
+ struct v4l2_rect compose; /* compose on sink */
+ unsigned short sink_pad;
+ unsigned short source_pad;
+ int npads;
+ struct smiapp_sensor *sensor;
+ struct v4l2_ctrl_handler ctrl_handler;
+};
+
+/*
+ * struct smiapp_sensor - Main device structure
+ */
+struct smiapp_sensor {
+ /*
+ * "mutex" is used to serialise access to all fields here
+ * except v4l2_ctrls at the end of the struct. "mutex" is also
+ * used to serialise access to file handle specific
+ * information. The exception to this rule is the power_mutex
+ * below.
+ */
+ struct mutex mutex;
+ /*
+ * power_mutex is used to serialise power management related
+ * activities. Acquiring "mutex" at that time isn't necessary
+ * since there are no other users anyway.
+ */
+ struct mutex power_mutex;
+ struct smiapp_subdev ssds[SMIAPP_SUBDEVS];
+ u32 ssds_used;
+ struct smiapp_subdev *src;
+ struct smiapp_subdev *binner;
+ struct smiapp_subdev *scaler;
+ struct smiapp_subdev *pixel_array;
+ struct smiapp_platform_data *platform_data;
+ struct regulator *vana;
+ struct clk *ext_clk;
+ u32 limits[SMIAPP_LIMIT_LAST];
+ u8 nbinning_subtypes;
+ struct smiapp_binning_subtype binning_subtypes[SMIAPP_BINNING_SUBTYPES];
+ u32 mbus_frame_fmts;
+ const struct smiapp_csi_data_format *csi_format;
+ const struct smiapp_csi_data_format *internal_csi_format;
+ u32 default_mbus_frame_fmts;
+ int default_pixel_order;
+
+ u8 binning_horizontal;
+ u8 binning_vertical;
+
+ u8 scale_m;
+ u8 scaling_mode;
+
+ u8 hvflip_inv_mask; /* H/VFLIP inversion due to sensor orientation */
+ u8 flash_capability;
+ u8 frame_skip;
+
+ int power_count;
+
+ bool streaming;
+ bool dev_init_done;
+
+ u8 *nvm; /* nvm memory buffer */
+ unsigned int nvm_size; /* bytes */
+
+ struct smiapp_module_info minfo;
+
+ struct smiapp_pll pll;
+
+ /* Pixel array controls */
+ struct v4l2_ctrl *analog_gain;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *pixel_rate_parray;
+ /* src controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate_csi;
+};
+
+#define to_smiapp_subdev(_sd) \
+ container_of(_sd, struct smiapp_subdev, sd)
+
+#define to_smiapp_sensor(_sd) \
+ (to_smiapp_subdev(_sd)->sensor)
+
+#endif /* __SMIAPP_PRIV_H_ */
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index c2882fa5be85..19ea780b16ff 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -995,10 +995,8 @@ static int sn9c102_stop_transfer(struct sn9c102_device* cam)
static int sn9c102_stream_interrupt(struct sn9c102_device* cam)
{
- long timeout;
-
cam->stream = STREAM_INTERRUPT;
- timeout = wait_event_timeout(cam->wait_stream,
+ wait_event_timeout(cam->wait_stream,
(cam->stream == STREAM_OFF) ||
(cam->state & DEV_DISCONNECTED),
SN9C102_URB_TIMEOUT);
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index aedb970d13f6..0421bf9453b4 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -164,35 +164,38 @@ static int soc_camera_try_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ const struct soc_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
int ret;
dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
pixfmtstr(pix->pixelformat), pix->width, pix->height);
- pix->bytesperline = 0;
- pix->sizeimage = 0;
+ if (!(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) {
+ pix->bytesperline = 0;
+ pix->sizeimage = 0;
+ }
ret = ici->ops->try_fmt(icd, f);
if (ret < 0)
return ret;
- if (!pix->sizeimage) {
- if (!pix->bytesperline) {
- const struct soc_camera_format_xlate *xlate;
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate)
+ return -EINVAL;
+
+ ret = soc_mbus_bytes_per_line(pix->width, xlate->host_fmt);
+ if (ret < 0)
+ return ret;
+
+ pix->bytesperline = max_t(u32, pix->bytesperline, ret);
- xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
- if (!xlate)
- return -EINVAL;
+ ret = soc_mbus_image_size(xlate->host_fmt, pix->bytesperline,
+ pix->height);
+ if (ret < 0)
+ return ret;
- ret = soc_mbus_bytes_per_line(pix->width,
- xlate->host_fmt);
- if (ret > 0)
- pix->bytesperline = ret;
- }
- if (pix->bytesperline)
- pix->sizeimage = pix->bytesperline * pix->height;
- }
+ pix->sizeimage = max_t(u32, pix->sizeimage, ret);
return 0;
}
@@ -257,13 +260,13 @@ static int soc_camera_g_std(struct file *file, void *priv, v4l2_std_id *a)
return v4l2_subdev_call(sd, core, g_std, a);
}
-static int soc_camera_enum_fsizes(struct file *file, void *fh,
+static int soc_camera_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- return ici->ops->enum_fsizes(icd, fsize);
+ return ici->ops->enum_framesizes(icd, fsize);
}
static int soc_camera_reqbufs(struct file *file, void *priv,
@@ -1244,8 +1247,8 @@ static int default_s_parm(struct soc_camera_device *icd,
return v4l2_subdev_call(sd, video, s_parm, parm);
}
-static int default_enum_fsizes(struct soc_camera_device *icd,
- struct v4l2_frmsizeenum *fsize)
+static int default_enum_framesizes(struct soc_camera_device *icd,
+ struct v4l2_frmsizeenum *fsize)
{
int ret;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
@@ -1259,7 +1262,7 @@ static int default_enum_fsizes(struct soc_camera_device *icd,
/* map xlate-code to pixel_format, sensor only handle xlate-code*/
fsize_mbus.pixel_format = xlate->code;
- ret = v4l2_subdev_call(sd, video, enum_mbus_fsizes, &fsize_mbus);
+ ret = v4l2_subdev_call(sd, video, enum_framesizes, &fsize_mbus);
if (ret < 0)
return ret;
@@ -1298,8 +1301,8 @@ int soc_camera_host_register(struct soc_camera_host *ici)
ici->ops->set_parm = default_s_parm;
if (!ici->ops->get_parm)
ici->ops->get_parm = default_g_parm;
- if (!ici->ops->enum_fsizes)
- ici->ops->enum_fsizes = default_enum_fsizes;
+ if (!ici->ops->enum_framesizes)
+ ici->ops->enum_framesizes = default_enum_framesizes;
mutex_lock(&list_lock);
list_for_each_entry(ix, &hosts, list) {
@@ -1390,7 +1393,7 @@ static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = {
.vidioc_s_input = soc_camera_s_input,
.vidioc_s_std = soc_camera_s_std,
.vidioc_g_std = soc_camera_g_std,
- .vidioc_enum_framesizes = soc_camera_enum_fsizes,
+ .vidioc_enum_framesizes = soc_camera_enum_framesizes,
.vidioc_reqbufs = soc_camera_reqbufs,
.vidioc_querybuf = soc_camera_querybuf,
.vidioc_qbuf = soc_camera_qbuf,
@@ -1429,6 +1432,10 @@ static int video_dev_create(struct soc_camera_device *icd)
vdev->tvnorms = V4L2_STD_UNKNOWN;
vdev->ctrl_handler = &icd->ctrl_handler;
vdev->lock = &icd->video_lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags);
icd->vdev = vdev;
diff --git a/drivers/media/video/soc_mediabus.c b/drivers/media/video/soc_mediabus.c
index cf7f2194ded4..89dce097a827 100644
--- a/drivers/media/video/soc_mediabus.c
+++ b/drivers/media/video/soc_mediabus.c
@@ -24,6 +24,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_YVYU8_2X8,
@@ -33,6 +34,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_UYVY8_2X8,
@@ -42,6 +44,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_VYUY8_2X8,
@@ -51,6 +54,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
@@ -60,6 +64,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
@@ -69,6 +74,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB565_2X8_LE,
@@ -78,6 +84,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB565_2X8_BE,
@@ -87,6 +94,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SBGGR8_1X8,
@@ -96,6 +104,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SBGGR10_1X10,
@@ -105,6 +114,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 10,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_Y8_1X8,
@@ -114,6 +124,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_Y10_1X10,
@@ -123,6 +134,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 10,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE,
@@ -132,6 +144,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE,
@@ -141,6 +154,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADLO,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE,
@@ -150,6 +164,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE,
@@ -159,6 +174,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADLO,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_JPEG_1X8,
@@ -168,6 +184,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_VARIABLE,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE,
@@ -177,6 +194,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_YUYV8_1_5X8,
@@ -186,6 +204,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_1_5X8,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_YVYU8_1_5X8,
@@ -195,6 +214,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_1_5X8,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_UYVY8_1X16,
@@ -204,6 +224,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 16,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_VYUY8_1X16,
@@ -213,6 +234,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 16,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_YUYV8_1X16,
@@ -222,6 +244,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 16,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_YVYU8_1X16,
@@ -231,6 +254,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 16,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SGRBG8_1X8,
@@ -240,6 +264,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
@@ -249,6 +274,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SGBRG10_1X10,
@@ -258,6 +284,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 10,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SGRBG10_1X10,
@@ -267,6 +294,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 10,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SRGGB10_1X10,
@@ -276,6 +304,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 10,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SBGGR12_1X12,
@@ -285,6 +314,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 12,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SGBRG12_1X12,
@@ -294,6 +324,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 12,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SGRBG12_1X12,
@@ -303,6 +334,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 12,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SRGGB12_1X12,
@@ -312,6 +344,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 12,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
},
};
@@ -345,6 +378,9 @@ EXPORT_SYMBOL(soc_mbus_samples_per_pixel);
s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf)
{
+ if (mf->layout != SOC_MBUS_LAYOUT_PACKED)
+ return width * mf->bits_per_sample / 8;
+
switch (mf->packing) {
case SOC_MBUS_PACKING_NONE:
return width * mf->bits_per_sample / 8;
@@ -361,6 +397,24 @@ s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf)
}
EXPORT_SYMBOL(soc_mbus_bytes_per_line);
+s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
+ u32 bytes_per_line, u32 height)
+{
+ if (mf->layout == SOC_MBUS_LAYOUT_PACKED)
+ return bytes_per_line * height;
+
+ switch (mf->packing) {
+ case SOC_MBUS_PACKING_2X8_PADHI:
+ case SOC_MBUS_PACKING_2X8_PADLO:
+ return bytes_per_line * height * 2;
+ case SOC_MBUS_PACKING_1_5X8:
+ return bytes_per_line * height * 3 / 2;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(soc_mbus_image_size);
+
const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc(
enum v4l2_mbus_pixelcode code,
const struct soc_mbus_lookup *lookup,
diff --git a/drivers/media/video/sta2x11_vip.c b/drivers/media/video/sta2x11_vip.c
new file mode 100644
index 000000000000..4c10205264d4
--- /dev/null
+++ b/drivers/media/video/sta2x11_vip.c
@@ -0,0 +1,1550 @@
+/*
+ * This is the driver for the STA2x11 Video Input Port.
+ *
+ * Copyright (C) 2010 WindRiver Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Author: Andreas Kies <andreas.kies@windriver.com>
+ * Vlad Lungu <vlad.lungu@windriver.com>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+
+#include <linux/videodev2.h>
+
+#include <linux/kmod.h>
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf-dma-contig.h>
+
+#include "sta2x11_vip.h"
+
+#define DRV_NAME "sta2x11_vip"
+#define DRV_VERSION "1.3"
+
+#ifndef PCI_DEVICE_ID_STMICRO_VIP
+#define PCI_DEVICE_ID_STMICRO_VIP 0xCC0D
+#endif
+
+#define MAX_FRAMES 4
+
+/*Register offsets*/
+#define DVP_CTL 0x00
+#define DVP_TFO 0x04
+#define DVP_TFS 0x08
+#define DVP_BFO 0x0C
+#define DVP_BFS 0x10
+#define DVP_VTP 0x14
+#define DVP_VBP 0x18
+#define DVP_VMP 0x1C
+#define DVP_ITM 0x98
+#define DVP_ITS 0x9C
+#define DVP_STA 0xA0
+#define DVP_HLFLN 0xA8
+#define DVP_RGB 0xC0
+#define DVP_PKZ 0xF0
+
+/*Register fields*/
+#define DVP_CTL_ENA 0x00000001
+#define DVP_CTL_RST 0x80000000
+#define DVP_CTL_DIS (~0x00040001)
+
+#define DVP_IT_VSB 0x00000008
+#define DVP_IT_VST 0x00000010
+#define DVP_IT_FIFO 0x00000020
+
+#define DVP_HLFLN_SD 0x00000001
+
+#define REG_WRITE(vip, reg, value) iowrite32((value), (vip->iomem)+(reg))
+#define REG_READ(vip, reg) ioread32((vip->iomem)+(reg))
+
+#define SAVE_COUNT 8
+#define AUX_COUNT 3
+#define IRQ_COUNT 1
+
+/**
+ * struct sta2x11_vip - All internal data for one instance of device
+ * @v4l2_dev: device registered in v4l layer
+ * @video_dev: properties of our device
+ * @pdev: PCI device
+ * @adapter: contains I2C adapter information
+ * @register_save_area: All relevant register are saved here during suspend
+ * @decoder: contains information about video DAC
+ * @format: pixel format, fixed UYVY
+ * @std: video standard (e.g. PAL/NTSC)
+ * @input: input line for video signal ( 0 or 1 )
+ * @users: Number of open of device ( max. 1 )
+ * @disabled: Device is in power down state
+ * @mutex: ensures exclusive opening of device
+ * @slock: for excluse acces of registers
+ * @vb_vidq: queue maintained by videobuf layer
+ * @capture: linked list of capture buffer
+ * @active: struct videobuf_buffer currently beingg filled
+ * @started: device is ready to capture frame
+ * @closing: device will be shut down
+ * @tcount: Number of top frames
+ * @bcount: Number of bottom frames
+ * @overflow: Number of FIFO overflows
+ * @mem_spare: small buffer of unused frame
+ * @dma_spare: dma addres of mem_spare
+ * @iomem: hardware base address
+ * @config: I2C and gpio config from platform
+ *
+ * All non-local data is accessed via this structure.
+ */
+
+struct sta2x11_vip {
+ struct v4l2_device v4l2_dev;
+ struct video_device *video_dev;
+ struct pci_dev *pdev;
+ struct i2c_adapter *adapter;
+ unsigned int register_save_area[IRQ_COUNT + SAVE_COUNT + AUX_COUNT];
+ struct v4l2_subdev *decoder;
+ struct v4l2_pix_format format;
+ v4l2_std_id std;
+ unsigned int input;
+ int users;
+ int disabled;
+ struct mutex mutex; /* exclusive access during open */
+ spinlock_t slock; /* spin lock for hardware and queue access */
+ struct videobuf_queue vb_vidq;
+ struct list_head capture;
+ struct videobuf_buffer *active;
+ int started, closing, tcount, bcount;
+ int overflow;
+ void *mem_spare;
+ dma_addr_t dma_spare;
+ void *iomem;
+ struct vip_config *config;
+};
+
+static const unsigned int registers_to_save[AUX_COUNT] = {
+ DVP_HLFLN, DVP_RGB, DVP_PKZ
+};
+
+static struct v4l2_pix_format formats_50[] = {
+ { /*PAL interlaced */
+ .width = 720,
+ .height = 576,
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .field = V4L2_FIELD_INTERLACED,
+ .bytesperline = 720 * 2,
+ .sizeimage = 720 * 2 * 576,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M},
+ { /*PAL top */
+ .width = 720,
+ .height = 288,
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .field = V4L2_FIELD_TOP,
+ .bytesperline = 720 * 2,
+ .sizeimage = 720 * 2 * 288,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M},
+ { /*PAL bottom */
+ .width = 720,
+ .height = 288,
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .field = V4L2_FIELD_BOTTOM,
+ .bytesperline = 720 * 2,
+ .sizeimage = 720 * 2 * 288,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M},
+
+};
+
+static struct v4l2_pix_format formats_60[] = {
+ { /*NTSC interlaced */
+ .width = 720,
+ .height = 480,
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .field = V4L2_FIELD_INTERLACED,
+ .bytesperline = 720 * 2,
+ .sizeimage = 720 * 2 * 480,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M},
+ { /*NTSC top */
+ .width = 720,
+ .height = 240,
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .field = V4L2_FIELD_TOP,
+ .bytesperline = 720 * 2,
+ .sizeimage = 720 * 2 * 240,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M},
+ { /*NTSC bottom */
+ .width = 720,
+ .height = 240,
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .field = V4L2_FIELD_BOTTOM,
+ .bytesperline = 720 * 2,
+ .sizeimage = 720 * 2 * 240,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M},
+};
+
+/**
+ * buf_setup - Get size and number of video buffer
+ * @vq: queue in videobuf
+ * @count: Number of buffers (1..MAX_FRAMES).
+ * 0 use default value.
+ * @size: size of buffer in bytes
+ *
+ * returns size and number of buffers
+ * a preset value of 0 returns the default number.
+ * return value: 0, always succesfull.
+ */
+static int buf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct sta2x11_vip *vip = vq->priv_data;
+
+ *size = vip->format.width * vip->format.height * 2;
+ if (0 == *count || MAX_FRAMES < *count)
+ *count = MAX_FRAMES;
+ return 0;
+};
+
+/**
+ * buf_prepare - prepare buffer for usage
+ * @vq: queue in videobuf layer
+ * @vb: buffer to be prepared
+ * @field: type of video data (interlaced/non-interlaced)
+ *
+ * Allocate or realloc buffer
+ * return value: 0, successful.
+ *
+ * -EINVAL, supplied buffer is too small.
+ *
+ * other, buffer could not be locked.
+ */
+static int buf_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct sta2x11_vip *vip = vq->priv_data;
+ int ret;
+
+ vb->size = vip->format.width * vip->format.height * 2;
+ if ((0 != vb->baddr) && (vb->bsize < vb->size))
+ return -EINVAL;
+ vb->width = vip->format.width;
+ vb->height = vip->format.height;
+ vb->field = field;
+
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ ret = videobuf_iolock(vq, vb, NULL);
+ if (ret)
+ goto fail;
+ }
+ vb->state = VIDEOBUF_PREPARED;
+ return 0;
+fail:
+ videobuf_dma_contig_free(vq, vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+ return ret;
+}
+
+/**
+ * buf_queu - queue buffer for filling
+ * @vq: queue in videobuf layer
+ * @vb: buffer to be queued
+ *
+ * if capturing is already running, the buffer will be queued. Otherwise
+ * capture is started and the buffer is used directly.
+ */
+static void buf_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+ struct sta2x11_vip *vip = vq->priv_data;
+ u32 dma;
+
+ vb->state = VIDEOBUF_QUEUED;
+
+ if (vip->active) {
+ list_add_tail(&vb->queue, &vip->capture);
+ return;
+ }
+
+ vip->started = 1;
+ vip->tcount = 0;
+ vip->bcount = 0;
+ vip->active = vb;
+ vb->state = VIDEOBUF_ACTIVE;
+
+ dma = videobuf_to_dma_contig(vb);
+
+ REG_WRITE(vip, DVP_TFO, (0 << 16) | (0));
+ /* despite of interlace mode, upper and lower frames start at zero */
+ REG_WRITE(vip, DVP_BFO, (0 << 16) | (0));
+
+ switch (vip->format.field) {
+ case V4L2_FIELD_INTERLACED:
+ REG_WRITE(vip, DVP_TFS,
+ ((vip->format.height / 2 - 1) << 16) |
+ (2 * vip->format.width - 1));
+ REG_WRITE(vip, DVP_BFS, ((vip->format.height / 2 - 1) << 16) |
+ (2 * vip->format.width - 1));
+ REG_WRITE(vip, DVP_VTP, dma);
+ REG_WRITE(vip, DVP_VBP, dma + vip->format.width * 2);
+ REG_WRITE(vip, DVP_VMP, 4 * vip->format.width);
+ break;
+ case V4L2_FIELD_TOP:
+ REG_WRITE(vip, DVP_TFS,
+ ((vip->format.height - 1) << 16) |
+ (2 * vip->format.width - 1));
+ REG_WRITE(vip, DVP_BFS, ((0) << 16) |
+ (2 * vip->format.width - 1));
+ REG_WRITE(vip, DVP_VTP, dma);
+ REG_WRITE(vip, DVP_VBP, dma);
+ REG_WRITE(vip, DVP_VMP, 2 * vip->format.width);
+ break;
+ case V4L2_FIELD_BOTTOM:
+ REG_WRITE(vip, DVP_TFS, ((0) << 16) |
+ (2 * vip->format.width - 1));
+ REG_WRITE(vip, DVP_BFS,
+ ((vip->format.height) << 16) |
+ (2 * vip->format.width - 1));
+ REG_WRITE(vip, DVP_VTP, dma);
+ REG_WRITE(vip, DVP_VBP, dma);
+ REG_WRITE(vip, DVP_VMP, 2 * vip->format.width);
+ break;
+
+ default:
+ pr_warning("VIP: unknown field format\n");
+ return;
+ }
+
+ REG_WRITE(vip, DVP_CTL, DVP_CTL_ENA);
+}
+
+/**
+ * buff_release - release buffer
+ * @vq: queue in videobuf layer
+ * @vb: buffer to be released
+ *
+ * release buffer in videobuf layer
+ */
+static void buf_release(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+
+ videobuf_dma_contig_free(vq, vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static struct videobuf_queue_ops vip_qops = {
+ .buf_setup = buf_setup,
+ .buf_prepare = buf_prepare,
+ .buf_queue = buf_queue,
+ .buf_release = buf_release,
+};
+
+/**
+ * vip_open - open video device
+ * @file: descriptor of device
+ *
+ * open device, make sure it is only opened once.
+ * return value: 0, no error.
+ *
+ * -EBUSY, device is already opened
+ *
+ * -ENOMEM, no memory for auxiliary DMA buffer
+ */
+static int vip_open(struct file *file)
+{
+ struct video_device *dev = video_devdata(file);
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ mutex_lock(&vip->mutex);
+ vip->users++;
+
+ if (vip->users > 1) {
+ vip->users--;
+ mutex_unlock(&vip->mutex);
+ return -EBUSY;
+ }
+
+ file->private_data = dev;
+ vip->overflow = 0;
+ vip->started = 0;
+ vip->closing = 0;
+ vip->active = NULL;
+
+ INIT_LIST_HEAD(&vip->capture);
+ vip->mem_spare = dma_alloc_coherent(&vip->pdev->dev, 64,
+ &vip->dma_spare, GFP_KERNEL);
+ if (!vip->mem_spare) {
+ vip->users--;
+ mutex_unlock(&vip->mutex);
+ return -ENOMEM;
+ }
+
+ mutex_unlock(&vip->mutex);
+ videobuf_queue_dma_contig_init_cached(&vip->vb_vidq,
+ &vip_qops,
+ &vip->pdev->dev,
+ &vip->slock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_INTERLACED,
+ sizeof(struct videobuf_buffer),
+ vip, NULL);
+ REG_READ(vip, DVP_ITS);
+ REG_WRITE(vip, DVP_HLFLN, DVP_HLFLN_SD);
+ REG_WRITE(vip, DVP_ITM, DVP_IT_VSB | DVP_IT_VST);
+ REG_WRITE(vip, DVP_CTL, DVP_CTL_RST);
+ REG_WRITE(vip, DVP_CTL, 0);
+ REG_READ(vip, DVP_ITS);
+ return 0;
+}
+
+/**
+ * vip_close - close video device
+ * @file: descriptor of device
+ *
+ * close video device, wait until all pending operations are finished
+ * ( maximum FRAME_MAX buffers pending )
+ * Turn off interrupts.
+ *
+ * return value: 0, always succesful.
+ */
+static int vip_close(struct file *file)
+{
+ struct video_device *dev = video_devdata(file);
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ vip->closing = 1;
+ if (vip->active)
+ videobuf_waiton(&vip->vb_vidq, vip->active, 0, 0);
+ spin_lock_irq(&vip->slock);
+
+ REG_WRITE(vip, DVP_ITM, 0);
+ REG_WRITE(vip, DVP_CTL, DVP_CTL_RST);
+ REG_WRITE(vip, DVP_CTL, 0);
+ REG_READ(vip, DVP_ITS);
+
+ vip->started = 0;
+ vip->active = NULL;
+
+ spin_unlock_irq(&vip->slock);
+
+ videobuf_stop(&vip->vb_vidq);
+ videobuf_mmap_free(&vip->vb_vidq);
+
+ dma_free_coherent(&vip->pdev->dev, 64, vip->mem_spare, vip->dma_spare);
+ file->private_data = NULL;
+ mutex_lock(&vip->mutex);
+ vip->users--;
+ mutex_unlock(&vip->mutex);
+ return 0;
+}
+
+/**
+ * vip_read - read from video input
+ * @file: descriptor of device
+ * @data: user buffer
+ * @count: number of bytes to be read
+ * @ppos: position within stream
+ *
+ * read video data from video device.
+ * handling is done in generic videobuf layer
+ * return value: provided by videobuf layer
+ */
+static ssize_t vip_read(struct file *file, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct video_device *dev = file->private_data;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_read_stream(&vip->vb_vidq, data, count, ppos, 0,
+ file->f_flags & O_NONBLOCK);
+}
+
+/**
+ * vip_mmap - map user buffer
+ * @file: descriptor of device
+ * @vma: user buffer
+ *
+ * map user space buffer into kernel mode, including DMA address.
+ * handling is done in generic videobuf layer.
+ * return value: provided by videobuf layer
+ */
+static int vip_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct video_device *dev = file->private_data;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_mmap_mapper(&vip->vb_vidq, vma);
+}
+
+/**
+ * vip_poll - poll for event
+ * @file: descriptor of device
+ * @wait: contains events to be waited for
+ *
+ * wait for event related to video device.
+ * handling is done in generic videobuf layer.
+ * return value: provided by videobuf layer
+ */
+static unsigned int vip_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct video_device *dev = file->private_data;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_poll_stream(file, &vip->vb_vidq, wait);
+}
+
+/**
+ * vidioc_querycap - return capabilities of device
+ * @file: descriptor of device (not used)
+ * @priv: points to current videodevice
+ * @cap: contains return values
+ *
+ * the capabilities of the device are returned
+ *
+ * return value: 0, no error.
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ memset(cap, 0, sizeof(struct v4l2_capability));
+ strcpy(cap->driver, DRV_NAME);
+ strcpy(cap->card, DRV_NAME);
+ cap->version = 0;
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
+ pci_name(vip->pdev));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
+
+ return 0;
+}
+
+/**
+ * vidioc_s_std - set video standard
+ * @file: descriptor of device (not used)
+ * @priv: points to current videodevice
+ * @std: contains standard to be set
+ *
+ * the video standard is set
+ *
+ * return value: 0, no error.
+ *
+ * -EIO, no input signal detected
+ *
+ * other, returned from video DAC.
+ */
+static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+ v4l2_std_id oldstd = vip->std, newstd;
+ int status;
+
+ if (V4L2_STD_ALL == *std) {
+ v4l2_subdev_call(vip->decoder, core, s_std, *std);
+ ssleep(2);
+ v4l2_subdev_call(vip->decoder, video, querystd, &newstd);
+ v4l2_subdev_call(vip->decoder, video, g_input_status, &status);
+ if (status & V4L2_IN_ST_NO_SIGNAL)
+ return -EIO;
+ *std = vip->std = newstd;
+ if (oldstd != *std) {
+ if (V4L2_STD_525_60 & (*std))
+ vip->format = formats_60[0];
+ else
+ vip->format = formats_50[0];
+ }
+ return 0;
+ }
+
+ if (oldstd != *std) {
+ if (V4L2_STD_525_60 & (*std))
+ vip->format = formats_60[0];
+ else
+ vip->format = formats_50[0];
+ }
+
+ return v4l2_subdev_call(vip->decoder, core, s_std, *std);
+}
+
+/**
+ * vidioc_g_std - get video standard
+ * @file: descriptor of device (not used)
+ * @priv: points to current videodevice
+ * @std: contains return values
+ *
+ * the current video standard is returned
+ *
+ * return value: 0, no error.
+ */
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ *std = vip->std;
+ return 0;
+}
+
+/**
+ * vidioc_querystd - get possible video standards
+ * @file: descriptor of device (not used)
+ * @priv: points to current videodevice
+ * @std: contains return values
+ *
+ * all possible video standards are returned
+ *
+ * return value: delivered by video DAC routine.
+ */
+static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return v4l2_subdev_call(vip->decoder, video, querystd, std);
+
+}
+
+/**
+ * vidioc_queryctl - get possible control settings
+ * @file: descriptor of device (not used)
+ * @priv: points to current videodevice
+ * @ctrl: contains return values
+ *
+ * return possible values for a control
+ * return value: delivered by video DAC routine.
+ */
+static int vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *ctrl)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return v4l2_subdev_call(vip->decoder, core, queryctrl, ctrl);
+}
+
+/**
+ * vidioc_g_ctl - get control value
+ * @file: descriptor of device (not used)
+ * @priv: points to current videodevice
+ * @ctrl: contains return values
+ *
+ * return setting for a control value
+ * return value: delivered by video DAC routine.
+ */
+static int vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return v4l2_subdev_call(vip->decoder, core, g_ctrl, ctrl);
+}
+
+/**
+ * vidioc_s_ctl - set control value
+ * @file: descriptor of device (not used)
+ * @priv: points to current videodevice
+ * @ctrl: contains value to be set
+ *
+ * set value for a specific control
+ * return value: delivered by video DAC routine.
+ */
+static int vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return v4l2_subdev_call(vip->decoder, core, s_ctrl, ctrl);
+}
+
+/**
+ * vidioc_enum_input - return name of input line
+ * @file: descriptor of device (not used)
+ * @priv: points to current videodevice
+ * @inp: contains return values
+ *
+ * the user friendly name of the input line is returned
+ *
+ * return value: 0, no error.
+ *
+ * -EINVAL, input line number out of range
+ */
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ if (inp->index > 1)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = V4L2_STD_ALL;
+ sprintf(inp->name, "Camera %u", inp->index);
+
+ return 0;
+}
+
+/**
+ * vidioc_s_input - set input line
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @i: new input line number
+ *
+ * the current active input line is set
+ *
+ * return value: 0, no error.
+ *
+ * -EINVAL, line number out of range
+ */
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+ int ret;
+
+ if (i > 1)
+ return -EINVAL;
+ ret = v4l2_subdev_call(vip->decoder, video, s_routing, i, 0, 0);
+
+ if (!ret)
+ vip->input = i;
+
+ return 0;
+}
+
+/**
+ * vidioc_g_input - return input line
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @i: returned input line number
+ *
+ * the current active input line is returned
+ *
+ * return value: always 0.
+ */
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ *i = vip->input;
+ return 0;
+}
+
+/**
+ * vidioc_enum_fmt_vid_cap - return video capture format
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @f: returned format information
+ *
+ * returns name and format of video capture
+ * Only UYVY is supported by hardware.
+ *
+ * return value: always 0.
+ */
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+
+ if (f->index != 0)
+ return -EINVAL;
+
+ strcpy(f->description, "4:2:2, packed, UYVY");
+ f->pixelformat = V4L2_PIX_FMT_UYVY;
+ f->flags = 0;
+ return 0;
+}
+
+/**
+ * vidioc_try_fmt_vid_cap - set video capture format
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @f: new format
+ *
+ * new video format is set which includes width and
+ * field type. width is fixed to 720, no scaling.
+ * Only UYVY is supported by this hardware.
+ * the minimum height is 200, the maximum is 576 (PAL)
+ *
+ * return value: 0, no error
+ *
+ * -EINVAL, pixel or field format not supported
+ *
+ */
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+ int interlace_lim;
+
+ if (V4L2_PIX_FMT_UYVY != f->fmt.pix.pixelformat)
+ return -EINVAL;
+
+ if (V4L2_STD_525_60 & vip->std)
+ interlace_lim = 240;
+ else
+ interlace_lim = 288;
+
+ switch (f->fmt.pix.field) {
+ case V4L2_FIELD_ANY:
+ if (interlace_lim < f->fmt.pix.height)
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ else
+ f->fmt.pix.field = V4L2_FIELD_BOTTOM;
+ break;
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ if (interlace_lim < f->fmt.pix.height)
+ f->fmt.pix.height = interlace_lim;
+ break;
+ case V4L2_FIELD_INTERLACED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ f->fmt.pix.height &= ~1;
+ if (2 * interlace_lim < f->fmt.pix.height)
+ f->fmt.pix.height = 2 * interlace_lim;
+ if (200 > f->fmt.pix.height)
+ f->fmt.pix.height = 200;
+ f->fmt.pix.width = 720;
+ f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
+ f->fmt.pix.sizeimage = f->fmt.pix.width * 2 * f->fmt.pix.height;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ f->fmt.pix.priv = 0;
+ return 0;
+}
+
+/**
+ * vidioc_s_fmt_vid_cap - set current video format parameters
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @f: returned format information
+ *
+ * set new capture format
+ * return value: 0, no error
+ *
+ * other, delivered by video DAC routine.
+ */
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+ int ret;
+
+ ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ memcpy(&vip->format, &f->fmt.pix, sizeof(struct v4l2_pix_format));
+ return 0;
+}
+
+/**
+ * vidioc_g_fmt_vid_cap - get current video format parameters
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @f: contains format information
+ *
+ * returns current video format parameters
+ *
+ * return value: 0, always successful
+ */
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ memcpy(&f->fmt.pix, &vip->format, sizeof(struct v4l2_pix_format));
+ return 0;
+}
+
+/**
+ * vidioc_reqfs - request buffer
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @p: video buffer
+ *
+ * Handling is done in generic videobuf layer.
+ */
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_reqbufs(&vip->vb_vidq, p);
+}
+
+/**
+ * vidioc_querybuf - query buffer
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @p: video buffer
+ *
+ * query buffer state.
+ * Handling is done in generic videobuf layer.
+ */
+static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_querybuf(&vip->vb_vidq, p);
+}
+
+/**
+ * vidioc_qbuf - queue a buffer
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @p: video buffer
+ *
+ * Handling is done in generic videobuf layer.
+ */
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_qbuf(&vip->vb_vidq, p);
+}
+
+/**
+ * vidioc_dqbuf - dequeue a buffer
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @p: video buffer
+ *
+ * Handling is done in generic videobuf layer.
+ */
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_dqbuf(&vip->vb_vidq, p, file->f_flags & O_NONBLOCK);
+}
+
+/**
+ * vidioc_streamon - turn on streaming
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @type: type of capture
+ *
+ * turn on streaming.
+ * Handling is done in generic videobuf layer.
+ */
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_streamon(&vip->vb_vidq);
+}
+
+/**
+ * vidioc_streamoff - turn off streaming
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @type: type of capture
+ *
+ * turn off streaming.
+ * Handling is done in generic videobuf layer.
+ */
+static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_streamoff(&vip->vb_vidq);
+}
+
+static const struct v4l2_file_operations vip_fops = {
+ .owner = THIS_MODULE,
+ .open = vip_open,
+ .release = vip_close,
+ .ioctl = video_ioctl2,
+ .read = vip_read,
+ .mmap = vip_mmap,
+ .poll = vip_poll
+};
+
+static const struct v4l2_ioctl_ops vip_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_g_std = vidioc_g_std,
+ .vidioc_querystd = vidioc_querystd,
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+};
+
+static struct video_device video_dev_template = {
+ .name = DRV_NAME,
+ .release = video_device_release,
+ .fops = &vip_fops,
+ .ioctl_ops = &vip_ioctl_ops,
+ .tvnorms = V4L2_STD_ALL,
+};
+
+/**
+ * vip_irq - interrupt routine
+ * @irq: Number of interrupt ( not used, correct number is assumed )
+ * @vip: local data structure containing all information
+ *
+ * check for both frame interrupts set ( top and bottom ).
+ * check FIFO overflow, but limit number of log messages after open.
+ * signal a complete buffer if done.
+ * dequeue a new buffer if available.
+ * disable VIP if no buffer available.
+ *
+ * return value: IRQ_NONE, interrupt was not generated by VIP
+ *
+ * IRQ_HANDLED, interrupt done.
+ */
+static irqreturn_t vip_irq(int irq, struct sta2x11_vip *vip)
+{
+ u32 status, dma;
+ unsigned long flags;
+ struct videobuf_buffer *vb;
+
+ status = REG_READ(vip, DVP_ITS);
+
+ if (!status) {
+ pr_debug("VIP: irq ignored\n");
+ return IRQ_NONE;
+ }
+
+ if (!vip->started)
+ return IRQ_HANDLED;
+
+ if (status & DVP_IT_VSB)
+ vip->bcount++;
+
+ if (status & DVP_IT_VST)
+ vip->tcount++;
+
+ if ((DVP_IT_VSB | DVP_IT_VST) == (status & (DVP_IT_VST | DVP_IT_VSB))) {
+ /* this is bad, we are too slow, hope the condition is gone
+ * on the next frame */
+ pr_info("VIP: both irqs\n");
+ return IRQ_HANDLED;
+ }
+
+ if (status & DVP_IT_FIFO) {
+ if (5 > vip->overflow++)
+ pr_info("VIP: fifo overflow\n");
+ }
+
+ if (2 > vip->tcount)
+ return IRQ_HANDLED;
+
+ if (status & DVP_IT_VSB)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&vip->slock, flags);
+
+ REG_WRITE(vip, DVP_CTL, REG_READ(vip, DVP_CTL) & ~DVP_CTL_ENA);
+ if (vip->active) {
+ do_gettimeofday(&vip->active->ts);
+ vip->active->field_count++;
+ vip->active->state = VIDEOBUF_DONE;
+ wake_up(&vip->active->done);
+ vip->active = NULL;
+ }
+ if (!vip->closing) {
+ if (list_empty(&vip->capture))
+ goto done;
+
+ vb = list_first_entry(&vip->capture, struct videobuf_buffer,
+ queue);
+ if (NULL == vb) {
+ pr_info("VIP: no buffer\n");
+ goto done;
+ }
+ vb->state = VIDEOBUF_ACTIVE;
+ list_del(&vb->queue);
+ vip->active = vb;
+ dma = videobuf_to_dma_contig(vb);
+ switch (vip->format.field) {
+ case V4L2_FIELD_INTERLACED:
+ REG_WRITE(vip, DVP_VTP, dma);
+ REG_WRITE(vip, DVP_VBP, dma + vip->format.width * 2);
+ break;
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ REG_WRITE(vip, DVP_VTP, dma);
+ REG_WRITE(vip, DVP_VBP, dma);
+ break;
+ default:
+ pr_warning("VIP: unknown field format\n");
+ goto done;
+ break;
+ }
+ REG_WRITE(vip, DVP_CTL, REG_READ(vip, DVP_CTL) | DVP_CTL_ENA);
+ }
+done:
+ spin_unlock_irqrestore(&vip->slock, flags);
+ return IRQ_HANDLED;
+}
+
+/**
+ * vip_gpio_reserve - reserve gpio pin
+ * @dev: device
+ * @pin: GPIO pin number
+ * @dir: direction, input or output
+ * @name: GPIO pin name
+ *
+ */
+static int vip_gpio_reserve(struct device *dev, int pin, int dir,
+ const char *name)
+{
+ int ret;
+
+ if (pin == -1)
+ return 0;
+
+ ret = gpio_request(pin, name);
+ if (ret) {
+ dev_err(dev, "Failed to allocate pin %d (%s)\n", pin, name);
+ return ret;
+ }
+
+ ret = gpio_direction_output(pin, dir);
+ if (ret) {
+ dev_err(dev, "Failed to set direction for pin %d (%s)\n",
+ pin, name);
+ gpio_free(pin);
+ return ret;
+ }
+
+ ret = gpio_export(pin, false);
+ if (ret) {
+ dev_err(dev, "Failed to export pin %d (%s)\n", pin, name);
+ gpio_free(pin);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * vip_gpio_release - release gpio pin
+ * @dev: device
+ * @pin: GPIO pin number
+ * @name: GPIO pin name
+ *
+ */
+static void vip_gpio_release(struct device *dev, int pin, const char *name)
+{
+ if (pin != -1) {
+ dev_dbg(dev, "releasing pin %d (%s)\n", pin, name);
+ gpio_unexport(pin);
+ gpio_free(pin);
+ }
+}
+
+/**
+ * sta2x11_vip_init_one - init one instance of video device
+ * @pdev: PCI device
+ * @ent: (not used)
+ *
+ * allocate reset pins for DAC.
+ * Reset video DAC, this is done via reset line.
+ * allocate memory for managing device
+ * request interrupt
+ * map IO region
+ * register device
+ * find and initialize video DAC
+ *
+ * return value: 0, no error
+ *
+ * -ENOMEM, no memory
+ *
+ * -ENODEV, device could not be detected or registered
+ */
+static int __devinit sta2x11_vip_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int ret;
+ struct sta2x11_vip *vip;
+ struct vip_config *config;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ config = dev_get_platdata(&pdev->dev);
+ if (!config) {
+ dev_info(&pdev->dev, "VIP slot disabled\n");
+ ret = -EINVAL;
+ goto disable;
+ }
+
+ ret = vip_gpio_reserve(&pdev->dev, config->pwr_pin, 0,
+ config->pwr_name);
+ if (ret)
+ goto disable;
+
+ if (config->reset_pin >= 0) {
+ ret = vip_gpio_reserve(&pdev->dev, config->reset_pin, 0,
+ config->reset_name);
+ if (ret) {
+ vip_gpio_release(&pdev->dev, config->pwr_pin,
+ config->pwr_name);
+ goto disable;
+ }
+ }
+
+ if (config->pwr_pin != -1) {
+ /* Datasheet says 5ms between PWR and RST */
+ usleep_range(5000, 25000);
+ ret = gpio_direction_output(config->pwr_pin, 1);
+ }
+
+ if (config->reset_pin != -1) {
+ /* Datasheet says 5ms between PWR and RST */
+ usleep_range(5000, 25000);
+ ret = gpio_direction_output(config->reset_pin, 1);
+ }
+ usleep_range(5000, 25000);
+
+ vip = kzalloc(sizeof(struct sta2x11_vip), GFP_KERNEL);
+ if (!vip) {
+ ret = -ENOMEM;
+ goto release_gpios;
+ }
+
+ vip->pdev = pdev;
+ vip->std = V4L2_STD_PAL;
+ vip->format = formats_50[0];
+ vip->config = config;
+
+ if (v4l2_device_register(&pdev->dev, &vip->v4l2_dev))
+ goto free_mem;
+
+ dev_dbg(&pdev->dev, "BAR #0 at 0x%lx 0x%lx irq %d\n",
+ (unsigned long)pci_resource_start(pdev, 0),
+ (unsigned long)pci_resource_len(pdev, 0), pdev->irq);
+
+ pci_set_master(pdev);
+
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret)
+ goto unreg;
+
+ vip->iomem = pci_iomap(pdev, 0, 0x100);
+ if (!vip->iomem) {
+ ret = -ENOMEM; /* FIXME */
+ goto release;
+ }
+
+ pci_enable_msi(pdev);
+
+ INIT_LIST_HEAD(&vip->capture);
+ spin_lock_init(&vip->slock);
+ mutex_init(&vip->mutex);
+ vip->started = 0;
+ vip->disabled = 0;
+
+ ret = request_irq(pdev->irq,
+ (irq_handler_t) vip_irq,
+ IRQF_SHARED, DRV_NAME, vip);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ ret = -ENODEV;
+ goto unmap;
+ }
+
+ vip->video_dev = video_device_alloc();
+ if (!vip->video_dev) {
+ ret = -ENOMEM;
+ goto release_irq;
+ }
+
+ *(vip->video_dev) = video_dev_template;
+ video_set_drvdata(vip->video_dev, vip);
+
+ ret = video_register_device(vip->video_dev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto vrelease;
+
+ vip->adapter = i2c_get_adapter(vip->config->i2c_id);
+ if (!vip->adapter) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "no I2C adapter found\n");
+ goto vunreg;
+ }
+
+ vip->decoder = v4l2_i2c_new_subdev(&vip->v4l2_dev, vip->adapter,
+ "adv7180", vip->config->i2c_addr,
+ NULL);
+ if (!vip->decoder) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "no decoder found\n");
+ goto vunreg;
+ }
+
+ i2c_put_adapter(vip->adapter);
+
+ v4l2_subdev_call(vip->decoder, core, init, 0);
+
+ pr_info("STA2X11 Video Input Port (VIP) loaded\n");
+ return 0;
+
+vunreg:
+ video_set_drvdata(vip->video_dev, NULL);
+vrelease:
+ if (video_is_registered(vip->video_dev))
+ video_unregister_device(vip->video_dev);
+ else
+ video_device_release(vip->video_dev);
+release_irq:
+ free_irq(pdev->irq, vip);
+ pci_disable_msi(pdev);
+unmap:
+ pci_iounmap(pdev, vip->iomem);
+ mutex_destroy(&vip->mutex);
+release:
+ pci_release_regions(pdev);
+unreg:
+ v4l2_device_unregister(&vip->v4l2_dev);
+free_mem:
+ kfree(vip);
+release_gpios:
+ vip_gpio_release(&pdev->dev, config->reset_pin, config->reset_name);
+ vip_gpio_release(&pdev->dev, config->pwr_pin, config->pwr_name);
+disable:
+ /*
+ * do not call pci_disable_device on sta2x11 because it break all
+ * other Bus masters on this EP
+ */
+ return ret;
+}
+
+/**
+ * sta2x11_vip_remove_one - release device
+ * @pdev: PCI device
+ *
+ * Undo everything done in .._init_one
+ *
+ * unregister video device
+ * free interrupt
+ * unmap ioadresses
+ * free memory
+ * free GPIO pins
+ */
+static void __devexit sta2x11_vip_remove_one(struct pci_dev *pdev)
+{
+ struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev);
+ struct sta2x11_vip *vip =
+ container_of(v4l2_dev, struct sta2x11_vip, v4l2_dev);
+
+ video_set_drvdata(vip->video_dev, NULL);
+ video_unregister_device(vip->video_dev);
+ /*do not call video_device_release() here, is already done */
+ free_irq(pdev->irq, vip);
+ pci_disable_msi(pdev);
+ pci_iounmap(pdev, vip->iomem);
+ pci_release_regions(pdev);
+
+ v4l2_device_unregister(&vip->v4l2_dev);
+ mutex_destroy(&vip->mutex);
+
+ vip_gpio_release(&pdev->dev, vip->config->pwr_pin,
+ vip->config->pwr_name);
+ vip_gpio_release(&pdev->dev, vip->config->reset_pin,
+ vip->config->reset_name);
+
+ kfree(vip);
+ /*
+ * do not call pci_disable_device on sta2x11 because it break all
+ * other Bus masters on this EP
+ */
+}
+
+#ifdef CONFIG_PM
+
+/**
+ * sta2x11_vip_suspend - set device into power save mode
+ * @pdev: PCI device
+ * @state: new state of device
+ *
+ * all relevant registers are saved and an attempt to set a new state is made.
+ *
+ * return value: 0 always indicate success,
+ * even if device could not be disabled. (workaround for hardware problem)
+ *
+ * reurn value : 0, always succesful, even if hardware does not not support
+ * power down mode.
+ */
+static int sta2x11_vip_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev);
+ struct sta2x11_vip *vip =
+ container_of(v4l2_dev, struct sta2x11_vip, v4l2_dev);
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&vip->slock, flags);
+ vip->register_save_area[0] = REG_READ(vip, DVP_CTL);
+ REG_WRITE(vip, DVP_CTL, vip->register_save_area[0] & DVP_CTL_DIS);
+ vip->register_save_area[SAVE_COUNT] = REG_READ(vip, DVP_ITM);
+ REG_WRITE(vip, DVP_ITM, 0);
+ for (i = 1; i < SAVE_COUNT; i++)
+ vip->register_save_area[i] = REG_READ(vip, 4 * i);
+ for (i = 0; i < AUX_COUNT; i++)
+ vip->register_save_area[SAVE_COUNT + IRQ_COUNT + i] =
+ REG_READ(vip, registers_to_save[i]);
+ spin_unlock_irqrestore(&vip->slock, flags);
+ /* save pci state */
+ pci_save_state(pdev);
+ if (pci_set_power_state(pdev, pci_choose_state(pdev, state))) {
+ /*
+ * do not call pci_disable_device on sta2x11 because it
+ * break all other Bus masters on this EP
+ */
+ vip->disabled = 1;
+ }
+
+ pr_info("VIP: suspend\n");
+ return 0;
+}
+
+/**
+ * sta2x11_vip_resume - resume device operation
+ * @pdev : PCI device
+ *
+ * re-enable device, set PCI state to powered and restore registers.
+ * resume normal device operation afterwards.
+ *
+ * return value: 0, no error.
+ *
+ * other, could not set device to power on state.
+ */
+static int sta2x11_vip_resume(struct pci_dev *pdev)
+{
+ struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev);
+ struct sta2x11_vip *vip =
+ container_of(v4l2_dev, struct sta2x11_vip, v4l2_dev);
+ unsigned long flags;
+ int ret, i;
+
+ pr_info("VIP: resume\n");
+ /* restore pci state */
+ if (vip->disabled) {
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ pr_warning("VIP: Can't enable device.\n");
+ return ret;
+ }
+ vip->disabled = 0;
+ }
+ ret = pci_set_power_state(pdev, PCI_D0);
+ if (ret) {
+ /*
+ * do not call pci_disable_device on sta2x11 because it
+ * break all other Bus masters on this EP
+ */
+ pr_warning("VIP: Can't enable device.\n");
+ vip->disabled = 1;
+ return ret;
+ }
+
+ pci_restore_state(pdev);
+
+ spin_lock_irqsave(&vip->slock, flags);
+ for (i = 1; i < SAVE_COUNT; i++)
+ REG_WRITE(vip, 4 * i, vip->register_save_area[i]);
+ for (i = 0; i < AUX_COUNT; i++)
+ REG_WRITE(vip, registers_to_save[i],
+ vip->register_save_area[SAVE_COUNT + IRQ_COUNT + i]);
+ REG_WRITE(vip, DVP_CTL, vip->register_save_area[0]);
+ REG_WRITE(vip, DVP_ITM, vip->register_save_area[SAVE_COUNT]);
+ spin_unlock_irqrestore(&vip->slock, flags);
+ return 0;
+}
+
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(sta2x11_vip_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_VIP)},
+ {0,}
+};
+
+static struct pci_driver sta2x11_vip_driver = {
+ .name = DRV_NAME,
+ .probe = sta2x11_vip_init_one,
+ .remove = __devexit_p(sta2x11_vip_remove_one),
+ .id_table = sta2x11_vip_pci_tbl,
+#ifdef CONFIG_PM
+ .suspend = sta2x11_vip_suspend,
+ .resume = sta2x11_vip_resume,
+#endif
+};
+
+static int __init sta2x11_vip_init_module(void)
+{
+ return pci_register_driver(&sta2x11_vip_driver);
+}
+
+static void __exit sta2x11_vip_exit_module(void)
+{
+ pci_unregister_driver(&sta2x11_vip_driver);
+}
+
+#ifdef MODULE
+module_init(sta2x11_vip_init_module);
+module_exit(sta2x11_vip_exit_module);
+#else
+late_initcall_sync(sta2x11_vip_init_module);
+#endif
+
+MODULE_DESCRIPTION("STA2X11 Video Input Port driver");
+MODULE_AUTHOR("Wind River");
+MODULE_LICENSE("GPL v2");
+MODULE_SUPPORTED_DEVICE("sta2x11 video input");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, sta2x11_vip_pci_tbl);
diff --git a/drivers/media/video/sta2x11_vip.h b/drivers/media/video/sta2x11_vip.h
new file mode 100644
index 000000000000..4f81a13666eb
--- /dev/null
+++ b/drivers/media/video/sta2x11_vip.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2011 Wind River Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Anders Wallin <anders.wallin@windriver.com>
+ *
+ */
+
+#ifndef __STA2X11_VIP_H
+#define __STA2X11_VIP_H
+
+/**
+ * struct vip_config - video input configuration data
+ * @pwr_name: ADV powerdown name
+ * @pwr_pin: ADV powerdown pin
+ * @reset_name: ADV reset name
+ * @reset_pin: ADV reset pin
+ */
+struct vip_config {
+ const char *pwr_name;
+ int pwr_pin;
+ const char *reset_name;
+ int reset_pin;
+ int i2c_id;
+ int i2c_addr;
+};
+
+#endif /* __STA2X11_VIP_H */
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index d427f8436c70..86a0fc56c330 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -38,13 +38,13 @@
#include "stk-webcam.h"
-static bool hflip = 1;
+static bool hflip;
module_param(hflip, bool, 0444);
-MODULE_PARM_DESC(hflip, "Horizontal image flip (mirror). Defaults to 1");
+MODULE_PARM_DESC(hflip, "Horizontal image flip (mirror). Defaults to 0");
-static bool vflip = 1;
+static bool vflip;
module_param(vflip, bool, 0444);
-MODULE_PARM_DESC(vflip, "Vertical image flip. Defaults to 1");
+MODULE_PARM_DESC(vflip, "Vertical image flip. Defaults to 0");
static int debug;
module_param(debug, int, 0444);
diff --git a/drivers/media/video/tda9840.c b/drivers/media/video/tda9840.c
index 465d7086babf..3d7ddd93282d 100644
--- a/drivers/media/video/tda9840.c
+++ b/drivers/media/video/tda9840.c
@@ -66,29 +66,53 @@ static void tda9840_write(struct v4l2_subdev *sd, u8 reg, u8 val)
val, reg);
}
+static int tda9840_status(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 byte;
+
+ if (1 != i2c_master_recv(client, &byte, 1)) {
+ v4l2_dbg(1, debug, sd,
+ "i2c_master_recv() failed\n");
+ return -EIO;
+ }
+
+ if (byte & 0x80) {
+ v4l2_dbg(1, debug, sd,
+ "TDA9840_DETECT: register contents invalid\n");
+ return -EINVAL;
+ }
+
+ v4l2_dbg(1, debug, sd, "TDA9840_DETECT: byte: 0x%02x\n", byte);
+ return byte & 0x60;
+}
+
static int tda9840_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *t)
{
+ int stat = tda9840_status(sd);
int byte;
if (t->index)
return -EINVAL;
- switch (t->audmode) {
- case V4L2_TUNER_MODE_STEREO:
- byte = TDA9840_SET_STEREO;
- break;
- case V4L2_TUNER_MODE_LANG1_LANG2:
- byte = TDA9840_SET_BOTH;
- break;
- case V4L2_TUNER_MODE_LANG1:
- byte = TDA9840_SET_LANG1;
- break;
- case V4L2_TUNER_MODE_LANG2:
- byte = TDA9840_SET_LANG2;
- break;
- default:
+ stat = stat < 0 ? 0 : stat;
+ if (stat == 0 || stat == 0x60) /* mono input */
byte = TDA9840_SET_MONO;
- break;
+ else if (stat == 0x40) /* stereo input */
+ byte = (t->audmode == V4L2_TUNER_MODE_MONO) ?
+ TDA9840_SET_MONO : TDA9840_SET_STEREO;
+ else { /* bilingual */
+ switch (t->audmode) {
+ case V4L2_TUNER_MODE_LANG1_LANG2:
+ byte = TDA9840_SET_BOTH;
+ break;
+ case V4L2_TUNER_MODE_LANG2:
+ byte = TDA9840_SET_LANG2;
+ break;
+ default:
+ byte = TDA9840_SET_LANG1;
+ break;
+ }
}
v4l2_dbg(1, debug, sd, "TDA9840_SWITCH: 0x%02x\n", byte);
tda9840_write(sd, SWITCH, byte);
@@ -97,25 +121,14 @@ static int tda9840_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *t)
static int tda9840_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *t)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 byte;
-
- t->rxsubchans = V4L2_TUNER_SUB_MONO;
- if (1 != i2c_master_recv(client, &byte, 1)) {
- v4l2_dbg(1, debug, sd,
- "i2c_master_recv() failed\n");
- return -EIO;
- }
+ int stat = tda9840_status(sd);
- if (byte & 0x80) {
- v4l2_dbg(1, debug, sd,
- "TDA9840_DETECT: register contents invalid\n");
- return -EINVAL;
- }
+ if (stat < 0)
+ return stat;
- v4l2_dbg(1, debug, sd, "TDA9840_DETECT: byte: 0x%02x\n", byte);
+ t->rxsubchans = V4L2_TUNER_SUB_MONO;
- switch (byte & 0x60) {
+ switch (stat & 0x60) {
case 0x00:
t->rxsubchans = V4L2_TUNER_SUB_MONO;
break;
diff --git a/drivers/media/video/tlg2300/pd-video.c b/drivers/media/video/tlg2300/pd-video.c
index a794ae62aebf..bfbf9e56b0a4 100644
--- a/drivers/media/video/tlg2300/pd-video.c
+++ b/drivers/media/video/tlg2300/pd-video.c
@@ -150,7 +150,6 @@ static int vidioc_querycap(struct file *file, void *fh,
strcpy(cap->driver, "tele-video");
strcpy(cap->card, "Telegent Poseidon");
usb_make_path(p->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->version = KERNEL_VERSION(0, 0, 1);
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_STREAMING |
V4L2_CAP_READWRITE | V4L2_CAP_VBI_CAPTURE;
diff --git a/drivers/media/video/tm6000/tm6000-input.c b/drivers/media/video/tm6000/tm6000-input.c
index 859eb90e4d56..e80b7e190471 100644
--- a/drivers/media/video/tm6000/tm6000-input.c
+++ b/drivers/media/video/tm6000/tm6000-input.c
@@ -168,7 +168,6 @@ static void tm6000_ir_urb_received(struct urb *urb)
struct tm6000_IR *ir = dev->ir;
struct tm6000_ir_poll_result poll_result;
char *buf;
- int rc;
dprintk(2, "%s\n",__func__);
if (urb->status < 0 || urb->actual_length <= 0) {
@@ -192,7 +191,7 @@ static void tm6000_ir_urb_received(struct urb *urb)
dprintk(1, "%s, scancode: 0x%04x\n",__func__, poll_result.rc_data);
rc_keydown(ir->rc, poll_result.rc_data, 0);
- rc = usb_submit_urb(urb, GFP_ATOMIC);
+ usb_submit_urb(urb, GFP_ATOMIC);
/*
* Flash the led. We can't do it here, as it is running on IRQ context.
* So, use the scheduler to do it, in a few ms.
diff --git a/drivers/media/video/tm6000/tm6000-stds.c b/drivers/media/video/tm6000/tm6000-stds.c
index 9dc0831d813f..5e28d6a2412f 100644
--- a/drivers/media/video/tm6000/tm6000-stds.c
+++ b/drivers/media/video/tm6000/tm6000-stds.c
@@ -338,7 +338,6 @@ static int tm6000_set_audio_std(struct tm6000_core *dev)
uint8_t areg_02 = 0x04; /* GC1 Fixed gain 0dB */
uint8_t areg_05 = 0x01; /* Auto 4.5 = M Japan, Auto 6.5 = DK */
uint8_t areg_06 = 0x02; /* Auto de-emphasis, mannual channel mode */
- uint8_t nicam_flag = 0; /* No NICAM */
if (dev->radio) {
tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x00);
@@ -398,7 +397,6 @@ static int tm6000_set_audio_std(struct tm6000_core *dev)
} else {
areg_05 = 0x07;
}
- nicam_flag = 1;
break;
/* other */
case 3:
diff --git a/drivers/media/video/tm6000/tm6000-video.c b/drivers/media/video/tm6000/tm6000-video.c
index bc13db736e24..f7034df94e0a 100644
--- a/drivers/media/video/tm6000/tm6000-video.c
+++ b/drivers/media/video/tm6000/tm6000-video.c
@@ -169,7 +169,6 @@ static inline void get_next_buf(struct tm6000_dmaqueue *dma_q,
struct tm6000_buffer **buf)
{
struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
- char *outp;
if (list_empty(&dma_q->active)) {
dprintk(dev, V4L2_DEBUG_QUEUE, "No active queue to serve\n");
@@ -179,11 +178,6 @@ static inline void get_next_buf(struct tm6000_dmaqueue *dma_q,
*buf = list_entry(dma_q->active.next,
struct tm6000_buffer, vb.queue);
-
- /* Cleans up buffer - Useful for testing for frame/URB loss */
- outp = videobuf_to_vmalloc(&(*buf)->vb);
-
- return;
}
/*
@@ -211,7 +205,7 @@ static int copy_streams(u8 *data, unsigned long len,
{
struct tm6000_dmaqueue *dma_q = urb->context;
struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
- u8 *ptr = data, *endp = data+len, c;
+ u8 *ptr = data, *endp = data+len;
unsigned long header = 0;
int rc = 0;
unsigned int cmd, cpysize, pktsize, size, field, block, line, pos = 0;
@@ -264,7 +258,6 @@ static int copy_streams(u8 *data, unsigned long len,
}
/* split the header fields */
- c = (header >> 24) & 0xff;
size = ((header & 0x7e) << 1);
if (size > 0)
size -= 4;
@@ -889,7 +882,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->driver, "tm6000", sizeof(cap->driver));
strlcpy(cap->card, "Trident TVMaster TM5600/6000/6010", sizeof(cap->card));
- cap->version = TM6000_VERSION;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING |
V4L2_CAP_AUDIO |
@@ -1732,6 +1724,10 @@ static struct video_device *vdev_init(struct tm6000_core *dev,
vfd->release = video_device_release;
vfd->debug = tm6000_debug;
vfd->lock = &dev->lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
diff --git a/drivers/media/video/tm6000/tm6000.h b/drivers/media/video/tm6000/tm6000.h
index 27ba659cfa85..6df418658c9c 100644
--- a/drivers/media/video/tm6000/tm6000.h
+++ b/drivers/media/video/tm6000/tm6000.h
@@ -33,8 +33,6 @@
#include "dvb_frontend.h"
#include "dmxdev.h"
-#define TM6000_VERSION KERNEL_VERSION(0, 0, 2)
-
/* Inputs */
enum tm6000_itype {
TM6000_INPUT_TV = 1,
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index a5c6397ad591..3e050e12153b 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -1241,8 +1241,10 @@ static int tuner_log_status(struct v4l2_subdev *sd)
return 0;
}
-static int tuner_suspend(struct i2c_client *c, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int tuner_suspend(struct device *dev)
{
+ struct i2c_client *c = to_i2c_client(dev);
struct tuner *t = to_tuner(i2c_get_clientdata(c));
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
@@ -1254,8 +1256,9 @@ static int tuner_suspend(struct i2c_client *c, pm_message_t state)
return 0;
}
-static int tuner_resume(struct i2c_client *c)
+static int tuner_resume(struct device *dev)
{
+ struct i2c_client *c = to_i2c_client(dev);
struct tuner *t = to_tuner(i2c_get_clientdata(c));
tuner_dbg("resume\n");
@@ -1266,6 +1269,7 @@ static int tuner_resume(struct i2c_client *c)
return 0;
}
+#endif
static int tuner_command(struct i2c_client *client, unsigned cmd, void *arg)
{
@@ -1310,6 +1314,10 @@ static const struct v4l2_subdev_ops tuner_ops = {
* I2C structs and module init functions
*/
+static const struct dev_pm_ops tuner_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tuner_suspend, tuner_resume)
+};
+
static const struct i2c_device_id tuner_id[] = {
{ "tuner", }, /* autodetect */
{ }
@@ -1320,12 +1328,11 @@ static struct i2c_driver tuner_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "tuner",
+ .pm = &tuner_pm_ops,
},
.probe = tuner_probe,
.remove = tuner_remove,
.command = tuner_command,
- .suspend = tuner_suspend,
- .resume = tuner_resume,
.id_table = tuner_id,
};
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index 1326e11cf4a9..b7867427e5c4 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -822,7 +822,7 @@ static int tvp5150_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
if (index)
return -EINVAL;
- *code = V4L2_MBUS_FMT_YUYV8_2X8;
+ *code = V4L2_MBUS_FMT_UYVY8_2X8;
return 0;
}
@@ -830,23 +830,16 @@ static int tvp5150_mbus_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *f)
{
struct tvp5150 *decoder = to_tvp5150(sd);
- v4l2_std_id std;
if (f == NULL)
return -EINVAL;
tvp5150_reset(sd, 0);
- /* Calculate height and width based on current standard */
- if (decoder->norm == V4L2_STD_ALL)
- std = tvp5150_read_std(sd);
- else
- std = decoder->norm;
-
f->width = decoder->rect.width;
f->height = decoder->rect.height;
- f->code = V4L2_MBUS_FMT_YUYV8_2X8;
+ f->code = V4L2_MBUS_FMT_UYVY8_2X8;
f->field = V4L2_FIELD_SEQ_TB;
f->colorspace = V4L2_COLORSPACE_SMPTE170M;
diff --git a/drivers/media/video/tvp7002.c b/drivers/media/video/tvp7002.c
index d7676d85c4df..fb6a5b57eb83 100644
--- a/drivers/media/video/tvp7002.c
+++ b/drivers/media/video/tvp7002.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/module.h>
+#include <linux/v4l2-dv-timings.h>
#include <media/tvp7002.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
@@ -328,6 +329,7 @@ static const struct i2c_reg_value tvp7002_parms_720P50[] = {
/* Preset definition for handling device operation */
struct tvp7002_preset_definition {
u32 preset;
+ struct v4l2_dv_timings timings;
const struct i2c_reg_value *p_settings;
enum v4l2_colorspace color_space;
enum v4l2_field scanmode;
@@ -341,6 +343,7 @@ struct tvp7002_preset_definition {
static const struct tvp7002_preset_definition tvp7002_presets[] = {
{
V4L2_DV_720P60,
+ V4L2_DV_BT_CEA_1280X720P60,
tvp7002_parms_720P60,
V4L2_COLORSPACE_REC709,
V4L2_FIELD_NONE,
@@ -351,6 +354,7 @@ static const struct tvp7002_preset_definition tvp7002_presets[] = {
},
{
V4L2_DV_1080I60,
+ V4L2_DV_BT_CEA_1920X1080I60,
tvp7002_parms_1080I60,
V4L2_COLORSPACE_REC709,
V4L2_FIELD_INTERLACED,
@@ -361,6 +365,7 @@ static const struct tvp7002_preset_definition tvp7002_presets[] = {
},
{
V4L2_DV_1080I50,
+ V4L2_DV_BT_CEA_1920X1080I50,
tvp7002_parms_1080I50,
V4L2_COLORSPACE_REC709,
V4L2_FIELD_INTERLACED,
@@ -371,6 +376,7 @@ static const struct tvp7002_preset_definition tvp7002_presets[] = {
},
{
V4L2_DV_720P50,
+ V4L2_DV_BT_CEA_1280X720P50,
tvp7002_parms_720P50,
V4L2_COLORSPACE_REC709,
V4L2_FIELD_NONE,
@@ -381,6 +387,7 @@ static const struct tvp7002_preset_definition tvp7002_presets[] = {
},
{
V4L2_DV_1080P60,
+ V4L2_DV_BT_CEA_1920X1080P60,
tvp7002_parms_1080P60,
V4L2_COLORSPACE_REC709,
V4L2_FIELD_NONE,
@@ -391,6 +398,7 @@ static const struct tvp7002_preset_definition tvp7002_presets[] = {
},
{
V4L2_DV_480P59_94,
+ V4L2_DV_BT_CEA_720X480P59_94,
tvp7002_parms_480P,
V4L2_COLORSPACE_SMPTE170M,
V4L2_FIELD_NONE,
@@ -401,6 +409,7 @@ static const struct tvp7002_preset_definition tvp7002_presets[] = {
},
{
V4L2_DV_576P50,
+ V4L2_DV_BT_CEA_720X576P50,
tvp7002_parms_576P,
V4L2_COLORSPACE_SMPTE170M,
V4L2_FIELD_NONE,
@@ -605,6 +614,35 @@ static int tvp7002_s_dv_preset(struct v4l2_subdev *sd,
return -EINVAL;
}
+static int tvp7002_s_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *dv_timings)
+{
+ struct tvp7002 *device = to_tvp7002(sd);
+ const struct v4l2_bt_timings *bt = &dv_timings->bt;
+ int i;
+
+ if (dv_timings->type != V4L2_DV_BT_656_1120)
+ return -EINVAL;
+ for (i = 0; i < NUM_PRESETS; i++) {
+ const struct v4l2_bt_timings *t = &tvp7002_presets[i].timings.bt;
+
+ if (!memcmp(bt, t, &bt->standards - &bt->width)) {
+ device->current_preset = &tvp7002_presets[i];
+ return tvp7002_write_inittab(sd, tvp7002_presets[i].p_settings);
+ }
+ }
+ return -EINVAL;
+}
+
+static int tvp7002_g_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *dv_timings)
+{
+ struct tvp7002 *device = to_tvp7002(sd);
+
+ *dv_timings = device->current_preset->timings;
+ return 0;
+}
+
/*
* tvp7002_s_ctrl() - Set a control
* @ctrl: ptr to v4l2_ctrl struct
@@ -666,11 +704,9 @@ static int tvp7002_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *f
* Returns the current DV preset by TVP7002. If no active input is
* detected, returns -EINVAL
*/
-static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
- struct v4l2_dv_preset *qpreset)
+static int tvp7002_query_dv(struct v4l2_subdev *sd, int *index)
{
const struct tvp7002_preset_definition *presets = tvp7002_presets;
- struct tvp7002 *device;
u8 progressive;
u32 lpfr;
u32 cpln;
@@ -679,12 +715,9 @@ static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
u8 lpf_msb;
u8 cpl_lsb;
u8 cpl_msb;
- int index;
-
- /* Return invalid preset if no active input is detected */
- qpreset->preset = V4L2_DV_INVALID;
- device = to_tvp7002(sd);
+ /* Return invalid index if no active input is detected */
+ *index = NUM_PRESETS;
/* Read standards from device registers */
tvp7002_read_err(sd, TVP7002_L_FRAME_STAT_LSBS, &lpf_lsb, &error);
@@ -705,8 +738,8 @@ static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
progressive = (lpf_msb & TVP7002_INPR_MASK) >> TVP7002_IP_SHIFT;
/* Do checking of video modes */
- for (index = 0; index < NUM_PRESETS; index++, presets++)
- if (lpfr == presets->lines_per_frame &&
+ for (*index = 0; *index < NUM_PRESETS; (*index)++, presets++)
+ if (lpfr == presets->lines_per_frame &&
progressive == presets->progressive) {
if (presets->cpl_min == 0xffff)
break;
@@ -714,17 +747,42 @@ static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
break;
}
- if (index == NUM_PRESETS) {
+ if (*index == NUM_PRESETS) {
v4l2_dbg(1, debug, sd, "detection failed: lpf = %x, cpl = %x\n",
lpfr, cpln);
- return 0;
+ return -ENOLINK;
}
- /* Set values in found preset */
- qpreset->preset = presets->preset;
-
/* Update lines per frame and clocks per line info */
- v4l2_dbg(1, debug, sd, "detected preset: %d\n", presets->preset);
+ v4l2_dbg(1, debug, sd, "detected preset: %d\n", *index);
+ return 0;
+}
+
+static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
+ struct v4l2_dv_preset *qpreset)
+{
+ int index;
+ int err = tvp7002_query_dv(sd, &index);
+
+ if (err || index == NUM_PRESETS) {
+ qpreset->preset = V4L2_DV_INVALID;
+ if (err == -ENOLINK)
+ err = 0;
+ return err;
+ }
+ qpreset->preset = tvp7002_presets[index].preset;
+ return 0;
+}
+
+static int tvp7002_query_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ int index;
+ int err = tvp7002_query_dv(sd, &index);
+
+ if (err)
+ return err;
+ *timings = tvp7002_presets[index].timings;
return 0;
}
@@ -894,6 +952,17 @@ static int tvp7002_enum_dv_presets(struct v4l2_subdev *sd,
return v4l_fill_dv_preset_info(tvp7002_presets[preset->index].preset, preset);
}
+static int tvp7002_enum_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *timings)
+{
+ /* Check requested format index is within range */
+ if (timings->index >= NUM_PRESETS)
+ return -EINVAL;
+
+ timings->timings = tvp7002_presets[timings->index].timings;
+ return 0;
+}
+
static const struct v4l2_ctrl_ops tvp7002_ctrl_ops = {
.s_ctrl = tvp7002_s_ctrl,
};
@@ -920,6 +989,10 @@ static const struct v4l2_subdev_video_ops tvp7002_video_ops = {
.enum_dv_presets = tvp7002_enum_dv_presets,
.s_dv_preset = tvp7002_s_dv_preset,
.query_dv_preset = tvp7002_query_dv_preset,
+ .g_dv_timings = tvp7002_g_dv_timings,
+ .s_dv_timings = tvp7002_s_dv_timings,
+ .enum_dv_timings = tvp7002_enum_dv_timings,
+ .query_dv_timings = tvp7002_query_dv_timings,
.s_stream = tvp7002_s_stream,
.g_mbus_fmt = tvp7002_mbus_fmt,
.try_mbus_fmt = tvp7002_mbus_fmt,
diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
index f344411a4578..c9b2042f8bdf 100644
--- a/drivers/media/video/usbvision/usbvision-core.c
+++ b/drivers/media/video/usbvision/usbvision-core.c
@@ -601,13 +601,12 @@ static int usbvision_decompress(struct usb_usbvision *usbvision, unsigned char *
unsigned char *decompressed, int *start_pos,
int *block_typestart_pos, int len)
{
- int rest_pixel, idx, max_pos, pos, extra_pos, block_len, block_type_pos, block_type_len;
+ int rest_pixel, idx, pos, extra_pos, block_len, block_type_pos, block_type_len;
unsigned char block_byte, block_code, block_type, block_type_byte, integrator;
integrator = 0;
pos = *start_pos;
block_type_pos = *block_typestart_pos;
- max_pos = 396; /* pos + len; */
extra_pos = pos;
block_len = 0;
block_byte = 0;
@@ -702,7 +701,7 @@ static enum parse_state usbvision_parse_compress(struct usb_usbvision *usbvision
unsigned char strip_data[USBVISION_STRIP_LEN_MAX];
unsigned char strip_header[USBVISION_STRIP_HEADER_LEN];
int idx, idx_end, strip_len, strip_ptr, startblock_pos, block_pos, block_type_pos;
- int clipmask_index, bytes_per_pixel, rc;
+ int clipmask_index;
int image_size;
unsigned char rv, gv, bv;
static unsigned char *Y, *U, *V;
@@ -769,7 +768,6 @@ static enum parse_state usbvision_parse_compress(struct usb_usbvision *usbvision
return parse_state_next_frame;
}
- bytes_per_pixel = frame->v4l2_format.bytes_per_pixel;
clipmask_index = frame->curline * MAX_FRAME_WIDTH;
scratch_get(usbvision, strip_data, strip_len);
@@ -781,14 +779,14 @@ static enum parse_state usbvision_parse_compress(struct usb_usbvision *usbvision
usbvision->block_pos = block_pos;
- rc = usbvision_decompress(usbvision, strip_data, Y, &block_pos, &block_type_pos, idx_end);
+ usbvision_decompress(usbvision, strip_data, Y, &block_pos, &block_type_pos, idx_end);
if (strip_len > usbvision->max_strip_len)
usbvision->max_strip_len = strip_len;
if (frame->curline % 2)
- rc = usbvision_decompress(usbvision, strip_data, V, &block_pos, &block_type_pos, idx_end / 2);
+ usbvision_decompress(usbvision, strip_data, V, &block_pos, &block_type_pos, idx_end / 2);
else
- rc = usbvision_decompress(usbvision, strip_data, U, &block_pos, &block_type_pos, idx_end / 2);
+ usbvision_decompress(usbvision, strip_data, U, &block_pos, &block_type_pos, idx_end / 2);
if (block_pos > usbvision->comprblock_pos)
usbvision->comprblock_pos = block_pos;
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index 5a74f5e07d7d..9bd8f084f348 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -1296,6 +1296,10 @@ static struct video_device *usbvision_vdev_init(struct usb_usbvision *usbvision,
if (NULL == vdev)
return NULL;
*vdev = *vdev_template;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags);
vdev->lock = &usbvision->v4l2_lock;
vdev->v4l2_dev = &usbvision->v4l2_dev;
snprintf(vdev->name, sizeof(vdev->name), "%s", name);
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 0efd3b10b353..af26bbe6f76e 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -21,6 +21,7 @@
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/atomic.h>
+#include <media/v4l2-ctrls.h>
#include "uvcvideo.h"
@@ -420,6 +421,8 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
+ .master_id = V4L2_CID_HUE_AUTO,
+ .master_manual = 0,
},
{
.id = V4L2_CID_SATURATION,
@@ -492,6 +495,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
+ .slave_ids = { V4L2_CID_HUE, },
},
{
.id = V4L2_CID_EXPOSURE_AUTO,
@@ -504,6 +508,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.data_type = UVC_CTRL_DATA_TYPE_BITMASK,
.menu_info = exposure_auto_controls,
.menu_count = ARRAY_SIZE(exposure_auto_controls),
+ .slave_ids = { V4L2_CID_EXPOSURE_ABSOLUTE, },
},
{
.id = V4L2_CID_EXPOSURE_AUTO_PRIORITY,
@@ -524,6 +529,8 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
+ .master_id = V4L2_CID_EXPOSURE_AUTO,
+ .master_manual = V4L2_EXPOSURE_MANUAL,
},
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
@@ -534,6 +541,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
+ .slave_ids = { V4L2_CID_WHITE_BALANCE_TEMPERATURE, },
},
{
.id = V4L2_CID_WHITE_BALANCE_TEMPERATURE,
@@ -544,6 +552,8 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
+ .master_id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .master_manual = 0,
},
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
@@ -554,6 +564,8 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
+ .slave_ids = { V4L2_CID_BLUE_BALANCE,
+ V4L2_CID_RED_BALANCE },
},
{
.id = V4L2_CID_BLUE_BALANCE,
@@ -564,6 +576,8 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
+ .master_id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .master_manual = 0,
},
{
.id = V4L2_CID_RED_BALANCE,
@@ -574,6 +588,8 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 16,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
+ .master_id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .master_manual = 0,
},
{
.id = V4L2_CID_FOCUS_ABSOLUTE,
@@ -584,6 +600,8 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
+ .master_id = V4L2_CID_FOCUS_AUTO,
+ .master_manual = 0,
},
{
.id = V4L2_CID_FOCUS_AUTO,
@@ -594,6 +612,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
+ .slave_ids = { V4L2_CID_FOCUS_ABSOLUTE, },
},
{
.id = V4L2_CID_IRIS_ABSOLUTE,
@@ -899,25 +918,54 @@ static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain,
return 0;
}
-int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
- struct v4l2_queryctrl *v4l2_ctrl)
+static int __uvc_ctrl_get(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl, struct uvc_control_mapping *mapping,
+ s32 *value)
{
- struct uvc_control *ctrl;
- struct uvc_control_mapping *mapping;
struct uvc_menu_info *menu;
unsigned int i;
int ret;
- ret = mutex_lock_interruptible(&chain->ctrl_mutex);
- if (ret < 0)
- return -ERESTARTSYS;
+ if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0)
+ return -EINVAL;
- ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping);
- if (ctrl == NULL) {
- ret = -EINVAL;
- goto done;
+ if (!ctrl->loaded) {
+ ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, ctrl->entity->id,
+ chain->dev->intfnum, ctrl->info.selector,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
+ ctrl->info.size);
+ if (ret < 0)
+ return ret;
+
+ ctrl->loaded = 1;
}
+ *value = mapping->get(mapping, UVC_GET_CUR,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+
+ if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
+ menu = mapping->menu_info;
+ for (i = 0; i < mapping->menu_count; ++i, ++menu) {
+ if (menu->value == *value) {
+ *value = i;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ struct v4l2_queryctrl *v4l2_ctrl)
+{
+ struct uvc_control_mapping *master_map = NULL;
+ struct uvc_control *master_ctrl = NULL;
+ struct uvc_menu_info *menu;
+ unsigned int i;
+
memset(v4l2_ctrl, 0, sizeof *v4l2_ctrl);
v4l2_ctrl->id = mapping->id;
v4l2_ctrl->type = mapping->v4l2_type;
@@ -929,10 +977,23 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ if (mapping->master_id)
+ __uvc_find_control(ctrl->entity, mapping->master_id,
+ &master_map, &master_ctrl, 0);
+ if (master_ctrl && (master_ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR)) {
+ s32 val;
+ int ret = __uvc_ctrl_get(chain, master_ctrl, master_map, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != mapping->master_manual)
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
+ }
+
if (!ctrl->cached) {
- ret = uvc_ctrl_populate_cache(chain, ctrl);
+ int ret = uvc_ctrl_populate_cache(chain, ctrl);
if (ret < 0)
- goto done;
+ return ret;
}
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_DEF) {
@@ -954,19 +1015,19 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
}
}
- goto done;
+ return 0;
case V4L2_CTRL_TYPE_BOOLEAN:
v4l2_ctrl->minimum = 0;
v4l2_ctrl->maximum = 1;
v4l2_ctrl->step = 1;
- goto done;
+ return 0;
case V4L2_CTRL_TYPE_BUTTON:
v4l2_ctrl->minimum = 0;
v4l2_ctrl->maximum = 0;
v4l2_ctrl->step = 0;
- goto done;
+ return 0;
default:
break;
@@ -984,6 +1045,27 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
v4l2_ctrl->step = mapping->get(mapping, UVC_GET_RES,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
+ return 0;
+}
+
+int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
+ struct v4l2_queryctrl *v4l2_ctrl)
+{
+ struct uvc_control *ctrl;
+ struct uvc_control_mapping *mapping;
+ int ret;
+
+ ret = mutex_lock_interruptible(&chain->ctrl_mutex);
+ if (ret < 0)
+ return -ERESTARTSYS;
+
+ ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping);
+ if (ctrl == NULL) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = __uvc_query_v4l2_ctrl(chain, ctrl, mapping, v4l2_ctrl);
done:
mutex_unlock(&chain->ctrl_mutex);
return ret;
@@ -1054,6 +1136,174 @@ done:
return ret;
}
+/* --------------------------------------------------------------------------
+ * Ctrl event handling
+ */
+
+static void uvc_ctrl_fill_event(struct uvc_video_chain *chain,
+ struct v4l2_event *ev,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ s32 value, u32 changes)
+{
+ struct v4l2_queryctrl v4l2_ctrl;
+
+ __uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl);
+
+ memset(ev->reserved, 0, sizeof(ev->reserved));
+ ev->type = V4L2_EVENT_CTRL;
+ ev->id = v4l2_ctrl.id;
+ ev->u.ctrl.value = value;
+ ev->u.ctrl.changes = changes;
+ ev->u.ctrl.type = v4l2_ctrl.type;
+ ev->u.ctrl.flags = v4l2_ctrl.flags;
+ ev->u.ctrl.minimum = v4l2_ctrl.minimum;
+ ev->u.ctrl.maximum = v4l2_ctrl.maximum;
+ ev->u.ctrl.step = v4l2_ctrl.step;
+ ev->u.ctrl.default_value = v4l2_ctrl.default_value;
+}
+
+static void uvc_ctrl_send_event(struct uvc_fh *handle,
+ struct uvc_control *ctrl, struct uvc_control_mapping *mapping,
+ s32 value, u32 changes)
+{
+ struct v4l2_subscribed_event *sev;
+ struct v4l2_event ev;
+
+ if (list_empty(&mapping->ev_subs))
+ return;
+
+ uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, value, changes);
+
+ list_for_each_entry(sev, &mapping->ev_subs, node) {
+ if (sev->fh && (sev->fh != &handle->vfh ||
+ (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK) ||
+ (changes & V4L2_EVENT_CTRL_CH_FLAGS)))
+ v4l2_event_queue_fh(sev->fh, &ev);
+ }
+}
+
+static void uvc_ctrl_send_slave_event(struct uvc_fh *handle,
+ struct uvc_control *master, u32 slave_id,
+ const struct v4l2_ext_control *xctrls, unsigned int xctrls_count)
+{
+ struct uvc_control_mapping *mapping = NULL;
+ struct uvc_control *ctrl = NULL;
+ u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
+ unsigned int i;
+ s32 val = 0;
+
+ /*
+ * We can skip sending an event for the slave if the slave
+ * is being modified in the same transaction.
+ */
+ for (i = 0; i < xctrls_count; i++) {
+ if (xctrls[i].id == slave_id)
+ return;
+ }
+
+ __uvc_find_control(master->entity, slave_id, &mapping, &ctrl, 0);
+ if (ctrl == NULL)
+ return;
+
+ if (__uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0)
+ changes |= V4L2_EVENT_CTRL_CH_VALUE;
+
+ uvc_ctrl_send_event(handle, ctrl, mapping, val, changes);
+}
+
+static void uvc_ctrl_send_events(struct uvc_fh *handle,
+ const struct v4l2_ext_control *xctrls, unsigned int xctrls_count)
+{
+ struct uvc_control_mapping *mapping;
+ struct uvc_control *ctrl;
+ u32 changes = V4L2_EVENT_CTRL_CH_VALUE;
+ unsigned int i;
+ unsigned int j;
+
+ for (i = 0; i < xctrls_count; ++i) {
+ ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
+
+ for (j = 0; j < ARRAY_SIZE(mapping->slave_ids); ++j) {
+ if (!mapping->slave_ids[j])
+ break;
+ uvc_ctrl_send_slave_event(handle, ctrl,
+ mapping->slave_ids[j],
+ xctrls, xctrls_count);
+ }
+
+ /*
+ * If the master is being modified in the same transaction
+ * flags may change too.
+ */
+ if (mapping->master_id) {
+ for (j = 0; j < xctrls_count; j++) {
+ if (xctrls[j].id == mapping->master_id) {
+ changes |= V4L2_EVENT_CTRL_CH_FLAGS;
+ break;
+ }
+ }
+ }
+
+ uvc_ctrl_send_event(handle, ctrl, mapping, xctrls[i].value,
+ changes);
+ }
+}
+
+static int uvc_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
+{
+ struct uvc_fh *handle = container_of(sev->fh, struct uvc_fh, vfh);
+ struct uvc_control_mapping *mapping;
+ struct uvc_control *ctrl;
+ int ret;
+
+ ret = mutex_lock_interruptible(&handle->chain->ctrl_mutex);
+ if (ret < 0)
+ return -ERESTARTSYS;
+
+ ctrl = uvc_find_control(handle->chain, sev->id, &mapping);
+ if (ctrl == NULL) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ list_add_tail(&sev->node, &mapping->ev_subs);
+ if (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL) {
+ struct v4l2_event ev;
+ u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
+ s32 val = 0;
+
+ if (__uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0)
+ changes |= V4L2_EVENT_CTRL_CH_VALUE;
+
+ uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, val,
+ changes);
+ /* Mark the queue as active, allowing this initial
+ event to be accepted. */
+ sev->elems = elems;
+ v4l2_event_queue_fh(sev->fh, &ev);
+ }
+
+done:
+ mutex_unlock(&handle->chain->ctrl_mutex);
+ return ret;
+}
+
+static void uvc_ctrl_del_event(struct v4l2_subscribed_event *sev)
+{
+ struct uvc_fh *handle = container_of(sev->fh, struct uvc_fh, vfh);
+
+ mutex_lock(&handle->chain->ctrl_mutex);
+ list_del(&sev->node);
+ mutex_unlock(&handle->chain->ctrl_mutex);
+}
+
+const struct v4l2_subscribed_event_ops uvc_ctrl_sub_ev_ops = {
+ .add = uvc_ctrl_add_event,
+ .del = uvc_ctrl_del_event,
+ .replace = v4l2_ctrl_replace,
+ .merge = v4l2_ctrl_merge,
+};
/* --------------------------------------------------------------------------
* Control transactions
@@ -1101,9 +1351,12 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
/* Reset the loaded flag for auto-update controls that were
* marked as loaded in uvc_ctrl_get/uvc_ctrl_set to prevent
- * uvc_ctrl_get from using the cached value.
+ * uvc_ctrl_get from using the cached value, and for write-only
+ * controls to prevent uvc_ctrl_set from setting bits not
+ * explicitly set by the user.
*/
- if (ctrl->info.flags & UVC_CTRL_FLAG_AUTO_UPDATE)
+ if (ctrl->info.flags & UVC_CTRL_FLAG_AUTO_UPDATE ||
+ !(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
ctrl->loaded = 0;
if (!ctrl->dirty)
@@ -1131,8 +1384,11 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
return 0;
}
-int __uvc_ctrl_commit(struct uvc_video_chain *chain, int rollback)
+int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
+ const struct v4l2_ext_control *xctrls,
+ unsigned int xctrls_count)
{
+ struct uvc_video_chain *chain = handle->chain;
struct uvc_entity *entity;
int ret = 0;
@@ -1143,6 +1399,8 @@ int __uvc_ctrl_commit(struct uvc_video_chain *chain, int rollback)
goto done;
}
+ if (!rollback)
+ uvc_ctrl_send_events(handle, xctrls, xctrls_count);
done:
mutex_unlock(&chain->ctrl_mutex);
return ret;
@@ -1153,39 +1411,12 @@ int uvc_ctrl_get(struct uvc_video_chain *chain,
{
struct uvc_control *ctrl;
struct uvc_control_mapping *mapping;
- struct uvc_menu_info *menu;
- unsigned int i;
- int ret;
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
- if (ctrl == NULL || (ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0)
+ if (ctrl == NULL)
return -EINVAL;
- if (!ctrl->loaded) {
- ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, ctrl->entity->id,
- chain->dev->intfnum, ctrl->info.selector,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- ctrl->info.size);
- if (ret < 0)
- return ret;
-
- ctrl->loaded = 1;
- }
-
- xctrl->value = mapping->get(mapping, UVC_GET_CUR,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
-
- if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
- menu = mapping->menu_info;
- for (i = 0; i < mapping->menu_count; ++i, ++menu) {
- if (menu->value == xctrl->value) {
- xctrl->value = i;
- break;
- }
- }
- }
-
- return 0;
+ return __uvc_ctrl_get(chain, ctrl, mapping, &xctrl->value);
}
int uvc_ctrl_set(struct uvc_video_chain *chain,
@@ -1641,6 +1872,8 @@ static int __uvc_ctrl_add_mapping(struct uvc_device *dev,
if (map == NULL)
return -ENOMEM;
+ INIT_LIST_HEAD(&map->ev_subs);
+
size = sizeof(*mapping->menu_info) * mapping->menu_count;
map->menu_info = kmemdup(mapping->menu_info, size, GFP_KERNEL);
if (map->menu_info == NULL) {
@@ -1653,7 +1886,6 @@ static int __uvc_ctrl_add_mapping(struct uvc_device *dev,
if (map->set == NULL)
map->set = uvc_set_le_value;
- map->ctrl = &ctrl->info;
list_add_tail(&map->list, &ctrl->info.mappings);
uvc_trace(UVC_TRACE_CONTROL,
"Adding mapping '%s' to control %pUl/%u.\n",
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 8f54e24e3f35..9288fbd5001b 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -207,6 +207,19 @@ int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
return ret;
}
+#ifndef CONFIG_MMU
+unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
+ unsigned long pgoff)
+{
+ unsigned long ret;
+
+ mutex_lock(&queue->mutex);
+ ret = vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
+ mutex_unlock(&queue->mutex);
+ return ret;
+}
+#endif
+
unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
poll_table *wait)
{
@@ -237,36 +250,6 @@ int uvc_queue_allocated(struct uvc_video_queue *queue)
return allocated;
}
-#ifndef CONFIG_MMU
-/*
- * Get unmapped area.
- *
- * NO-MMU arch need this function to make mmap() work correctly.
- */
-unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
- unsigned long pgoff)
-{
- struct uvc_buffer *buffer;
- unsigned int i;
- unsigned long ret;
-
- mutex_lock(&queue->mutex);
- for (i = 0; i < queue->count; ++i) {
- buffer = &queue->buffer[i];
- if ((buffer->buf.m.offset >> PAGE_SHIFT) == pgoff)
- break;
- }
- if (i == queue->count) {
- ret = -EINVAL;
- goto done;
- }
- ret = (unsigned long)buf->mem;
-done:
- mutex_unlock(&queue->mutex);
- return ret;
-}
-#endif
-
/*
* Enable or disable the video buffers queue.
*
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index ff2cdddf9bc6..759bef8897e9 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -25,6 +25,8 @@
#include <linux/atomic.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include "uvcvideo.h"
@@ -505,6 +507,8 @@ static int uvc_v4l2_open(struct file *file)
}
}
+ v4l2_fh_init(&handle->vfh, stream->vdev);
+ v4l2_fh_add(&handle->vfh);
handle->chain = stream->chain;
handle->stream = stream;
handle->state = UVC_HANDLE_PASSIVE;
@@ -528,6 +532,8 @@ static int uvc_v4l2_release(struct file *file)
/* Release the file handle. */
uvc_dismiss_privileges(handle);
+ v4l2_fh_del(&handle->vfh);
+ v4l2_fh_exit(&handle->vfh);
kfree(handle);
file->private_data = NULL;
@@ -584,7 +590,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return ret;
ret = uvc_ctrl_get(chain, &xctrl);
- uvc_ctrl_rollback(chain);
+ uvc_ctrl_rollback(handle);
if (ret >= 0)
ctrl->value = xctrl.value;
break;
@@ -605,10 +611,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
ret = uvc_ctrl_set(chain, &xctrl);
if (ret < 0) {
- uvc_ctrl_rollback(chain);
+ uvc_ctrl_rollback(handle);
return ret;
}
- ret = uvc_ctrl_commit(chain);
+ ret = uvc_ctrl_commit(handle, &xctrl, 1);
if (ret == 0)
ctrl->value = xctrl.value;
break;
@@ -630,13 +636,13 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
for (i = 0; i < ctrls->count; ++ctrl, ++i) {
ret = uvc_ctrl_get(chain, ctrl);
if (ret < 0) {
- uvc_ctrl_rollback(chain);
+ uvc_ctrl_rollback(handle);
ctrls->error_idx = i;
return ret;
}
}
ctrls->error_idx = 0;
- ret = uvc_ctrl_rollback(chain);
+ ret = uvc_ctrl_rollback(handle);
break;
}
@@ -654,7 +660,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
for (i = 0; i < ctrls->count; ++ctrl, ++i) {
ret = uvc_ctrl_set(chain, ctrl);
if (ret < 0) {
- uvc_ctrl_rollback(chain);
+ uvc_ctrl_rollback(handle);
ctrls->error_idx = i;
return ret;
}
@@ -663,9 +669,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
ctrls->error_idx = 0;
if (cmd == VIDIOC_S_EXT_CTRLS)
- ret = uvc_ctrl_commit(chain);
+ ret = uvc_ctrl_commit(handle,
+ ctrls->controls, ctrls->count);
else
- ret = uvc_ctrl_rollback(chain);
+ ret = uvc_ctrl_rollback(handle);
break;
}
@@ -687,7 +694,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
break;
}
pin = iterm->id;
- } else if (pin < selector->bNrInPins) {
+ } else if (index < selector->bNrInPins) {
pin = selector->baSourceID[index];
list_for_each_entry(iterm, &chain->entities, chain) {
if (!UVC_ENTITY_IS_ITERM(iterm))
@@ -990,6 +997,26 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return uvc_video_enable(stream, 0);
}
+ case VIDIOC_SUBSCRIBE_EVENT:
+ {
+ struct v4l2_event_subscription *sub = arg;
+
+ switch (sub->type) {
+ case V4L2_EVENT_CTRL:
+ return v4l2_event_subscribe(&handle->vfh, sub, 0,
+ &uvc_ctrl_sub_ev_ops);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ return v4l2_event_unsubscribe(&handle->vfh, arg);
+
+ case VIDIOC_DQEVENT:
+ return v4l2_event_dequeue(&handle->vfh, arg,
+ file->f_flags & O_NONBLOCK);
+
/* Analog video standards make no sense for digital cameras. */
case VIDIOC_ENUMSTD:
case VIDIOC_QUERYSTD:
@@ -1097,7 +1124,8 @@ static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
__put_user(kp->menu_count, &up->menu_count))
return -EFAULT;
- __clear_user(up->reserved, sizeof(up->reserved));
+ if (__clear_user(up->reserved, sizeof(up->reserved)))
+ return -EFAULT;
if (kp->menu_count == 0)
return 0;
@@ -1105,8 +1133,6 @@ static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
if (get_user(p, &up->menu_info))
return -EFAULT;
umenus = compat_ptr(p);
- if (!access_ok(VERIFY_WRITE, umenus, kp->menu_count * sizeof(*umenus)))
- return -EFAULT;
if (copy_in_user(umenus, kmenus, kp->menu_count * sizeof(*umenus)))
return -EFAULT;
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 67f88d85bb16..7c3d082505b7 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -13,6 +13,8 @@
#include <linux/videodev2.h>
#include <media/media-device.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
#include <media/videobuf2-core.h>
/* --------------------------------------------------------------------------
@@ -153,8 +155,7 @@ struct uvc_control_info {
struct uvc_control_mapping {
struct list_head list;
-
- struct uvc_control_info *ctrl;
+ struct list_head ev_subs;
__u32 id;
__u8 name[32];
@@ -169,6 +170,10 @@ struct uvc_control_mapping {
struct uvc_menu_info *menu_info;
__u32 menu_count;
+ __u32 master_id;
+ __s32 master_manual;
+ __u32 slave_ids[2];
+
__s32 (*get) (struct uvc_control_mapping *mapping, __u8 query,
const __u8 *data);
void (*set) (struct uvc_control_mapping *mapping, __s32 value,
@@ -524,6 +529,7 @@ enum uvc_handle_state {
};
struct uvc_fh {
+ struct v4l2_fh vfh;
struct uvc_video_chain *chain;
struct uvc_streaming *stream;
enum uvc_handle_state state;
@@ -643,6 +649,8 @@ extern int uvc_status_suspend(struct uvc_device *dev);
extern int uvc_status_resume(struct uvc_device *dev);
/* Controls */
+extern const struct v4l2_subscribed_event_ops uvc_ctrl_sub_ev_ops;
+
extern int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
struct v4l2_queryctrl *v4l2_ctrl);
extern int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
@@ -655,14 +663,18 @@ extern void uvc_ctrl_cleanup_device(struct uvc_device *dev);
extern int uvc_ctrl_resume_device(struct uvc_device *dev);
extern int uvc_ctrl_begin(struct uvc_video_chain *chain);
-extern int __uvc_ctrl_commit(struct uvc_video_chain *chain, int rollback);
-static inline int uvc_ctrl_commit(struct uvc_video_chain *chain)
+extern int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
+ const struct v4l2_ext_control *xctrls,
+ unsigned int xctrls_count);
+static inline int uvc_ctrl_commit(struct uvc_fh *handle,
+ const struct v4l2_ext_control *xctrls,
+ unsigned int xctrls_count)
{
- return __uvc_ctrl_commit(chain, 0);
+ return __uvc_ctrl_commit(handle, 0, xctrls, xctrls_count);
}
-static inline int uvc_ctrl_rollback(struct uvc_video_chain *chain)
+static inline int uvc_ctrl_rollback(struct uvc_fh *handle)
{
- return __uvc_ctrl_commit(chain, 1);
+ return __uvc_ctrl_commit(handle, 1, NULL, 0);
}
extern int uvc_ctrl_get(struct uvc_video_chain *chain,
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index 2829d256e4b7..5327ad3a6390 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -37,7 +37,7 @@ struct v4l2_clip32 {
struct v4l2_window32 {
struct v4l2_rect w;
- enum v4l2_field field;
+ __u32 field; /* enum v4l2_field */
__u32 chromakey;
compat_caddr_t clips; /* actually struct v4l2_clip32 * */
__u32 clipcount;
@@ -147,7 +147,7 @@ static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp,
}
struct v4l2_format32 {
- enum v4l2_buf_type type;
+ __u32 type; /* enum v4l2_buf_type */
union {
struct v4l2_pix_format pix;
struct v4l2_pix_format_mplane pix_mp;
@@ -170,7 +170,7 @@ struct v4l2_format32 {
struct v4l2_create_buffers32 {
__u32 index;
__u32 count;
- enum v4l2_memory memory;
+ __u32 memory; /* enum v4l2_memory */
struct v4l2_format32 format;
__u32 reserved[8];
};
@@ -311,16 +311,16 @@ struct v4l2_plane32 {
struct v4l2_buffer32 {
__u32 index;
- enum v4l2_buf_type type;
+ __u32 type; /* enum v4l2_buf_type */
__u32 bytesused;
__u32 flags;
- enum v4l2_field field;
+ __u32 field; /* enum v4l2_field */
struct compat_timeval timestamp;
struct v4l2_timecode timecode;
__u32 sequence;
/* memory location */
- enum v4l2_memory memory;
+ __u32 memory; /* enum v4l2_memory */
union {
__u32 offset;
compat_long_t userptr;
@@ -1023,6 +1023,9 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
case VIDIOC_UNSUBSCRIBE_EVENT:
case VIDIOC_CREATE_BUFS32:
case VIDIOC_PREPARE_BUF32:
+ case VIDIOC_ENUM_DV_TIMINGS:
+ case VIDIOC_QUERY_DV_TIMINGS:
+ case VIDIOC_DV_TIMINGS_CAP:
ret = do_video_ioctl(file, cmd, arg);
break;
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index 18015c0a8d31..9abd9abd4502 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -230,6 +230,19 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Aperture Priority Mode",
NULL
};
+ static const char * const camera_exposure_metering[] = {
+ "Average",
+ "Center Weighted",
+ "Spot",
+ NULL
+ };
+ static const char * const camera_auto_focus_range[] = {
+ "Auto",
+ "Normal",
+ "Macro",
+ "Infinity",
+ NULL
+ };
static const char * const colorfx[] = {
"None",
"Black & White",
@@ -241,6 +254,47 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Grass Green",
"Skin Whiten",
"Vivid",
+ "Aqua",
+ "Art Freeze",
+ "Silhouette",
+ "Solarization",
+ "Antique",
+ "Set Cb/Cr",
+ NULL
+ };
+ static const char * const auto_n_preset_white_balance[] = {
+ "Manual",
+ "Auto",
+ "Incandescent",
+ "Fluorescent",
+ "Fluorescent H",
+ "Horizon",
+ "Daylight",
+ "Flash",
+ "Cloudy",
+ "Shade",
+ NULL,
+ };
+ static const char * const camera_iso_sensitivity_auto[] = {
+ "Manual",
+ "Auto",
+ NULL
+ };
+ static const char * const scene_mode[] = {
+ "None",
+ "Backlight",
+ "Beach/Snow",
+ "Candle Light",
+ "Dusk/Dawn",
+ "Fall Colors",
+ "Fireworks",
+ "Landscape",
+ "Night",
+ "Party/Indoor",
+ "Portrait",
+ "Sports",
+ "Sunset",
+ "Text",
NULL
};
static const char * const tune_preemphasis[] = {
@@ -410,8 +464,18 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return camera_power_line_frequency;
case V4L2_CID_EXPOSURE_AUTO:
return camera_exposure_auto;
+ case V4L2_CID_EXPOSURE_METERING:
+ return camera_exposure_metering;
+ case V4L2_CID_AUTO_FOCUS_RANGE:
+ return camera_auto_focus_range;
case V4L2_CID_COLORFX:
return colorfx;
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ return auto_n_preset_white_balance;
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ return camera_iso_sensitivity_auto;
+ case V4L2_CID_SCENE_MODE:
+ return scene_mode;
case V4L2_CID_TUNE_PREEMPHASIS:
return tune_preemphasis;
case V4L2_CID_FLASH_LED_MODE:
@@ -493,6 +557,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers";
case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers";
case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
+ case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr";
/* MPEG controls */
/* Keep the order of the 'case's the same as in videodev2.h! */
@@ -590,13 +655,26 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute";
case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute";
case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative";
- case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic";
+ case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic Continuous";
case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute";
case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative";
case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous";
case V4L2_CID_PRIVACY: return "Privacy";
case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute";
case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative";
+ case V4L2_CID_AUTO_EXPOSURE_BIAS: return "Auto Exposure, Bias";
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return "White Balance, Auto & Preset";
+ case V4L2_CID_WIDE_DYNAMIC_RANGE: return "Wide Dynamic Range";
+ case V4L2_CID_IMAGE_STABILIZATION: return "Image Stabilization";
+ case V4L2_CID_ISO_SENSITIVITY: return "ISO Sensitivity";
+ case V4L2_CID_ISO_SENSITIVITY_AUTO: return "ISO Sensitivity, Auto";
+ case V4L2_CID_EXPOSURE_METERING: return "Exposure, Metering Mode";
+ case V4L2_CID_SCENE_MODE: return "Scene Mode";
+ case V4L2_CID_3A_LOCK: return "3A Lock";
+ case V4L2_CID_AUTO_FOCUS_START: return "Auto Focus, Start";
+ case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop";
+ case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status";
+ case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range";
/* FM Radio Modulator control */
/* Keep the order of the 'case's the same as in videodev2.h! */
@@ -644,6 +722,17 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_JPEG_COMPRESSION_QUALITY: return "Compression Quality";
case V4L2_CID_JPEG_ACTIVE_MARKER: return "Active Markers";
+ /* Image source controls */
+ case V4L2_CID_IMAGE_SOURCE_CLASS: return "Image Source Controls";
+ case V4L2_CID_VBLANK: return "Vertical Blanking";
+ case V4L2_CID_HBLANK: return "Horizontal Blanking";
+ case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain";
+
+ /* Image processing controls */
+ case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls";
+ case V4L2_CID_LINK_FREQ: return "Link Frequency";
+ case V4L2_CID_PIXEL_RATE: return "Pixel Rate";
+
default:
return NULL;
}
@@ -688,6 +777,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
+ case V4L2_CID_WIDE_DYNAMIC_RANGE:
+ case V4L2_CID_IMAGE_STABILIZATION:
*type = V4L2_CTRL_TYPE_BOOLEAN;
*min = 0;
*max = *step = 1;
@@ -696,6 +787,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_TILT_RESET:
case V4L2_CID_FLASH_STROBE:
case V4L2_CID_FLASH_STROBE_STOP:
+ case V4L2_CID_AUTO_FOCUS_START:
+ case V4L2_CID_AUTO_FOCUS_STOP:
*type = V4L2_CTRL_TYPE_BUTTON;
*flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
*min = *max = *step = *def = 0;
@@ -719,7 +812,9 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_STREAM_TYPE:
case V4L2_CID_MPEG_STREAM_VBI_FMT:
case V4L2_CID_EXPOSURE_AUTO:
+ case V4L2_CID_AUTO_FOCUS_RANGE:
case V4L2_CID_COLORFX:
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
case V4L2_CID_TUNE_PREEMPHASIS:
case V4L2_CID_FLASH_LED_MODE:
case V4L2_CID_FLASH_STROBE_SOURCE:
@@ -733,18 +828,30 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ case V4L2_CID_EXPOSURE_METERING:
+ case V4L2_CID_SCENE_MODE:
*type = V4L2_CTRL_TYPE_MENU;
break;
+ case V4L2_CID_LINK_FREQ:
+ *type = V4L2_CTRL_TYPE_INTEGER_MENU;
+ break;
case V4L2_CID_RDS_TX_PS_NAME:
case V4L2_CID_RDS_TX_RADIO_TEXT:
*type = V4L2_CTRL_TYPE_STRING;
break;
+ case V4L2_CID_ISO_SENSITIVITY:
+ case V4L2_CID_AUTO_EXPOSURE_BIAS:
+ *type = V4L2_CTRL_TYPE_INTEGER_MENU;
+ break;
case V4L2_CID_USER_CLASS:
case V4L2_CID_CAMERA_CLASS:
case V4L2_CID_MPEG_CLASS:
case V4L2_CID_FM_TX_CLASS:
case V4L2_CID_FLASH_CLASS:
case V4L2_CID_JPEG_CLASS:
+ case V4L2_CID_IMAGE_SOURCE_CLASS:
+ case V4L2_CID_IMAGE_PROC_CLASS:
*type = V4L2_CTRL_TYPE_CTRL_CLASS;
/* You can neither read not write these */
*flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY;
@@ -759,6 +866,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
break;
case V4L2_CID_FLASH_FAULT:
case V4L2_CID_JPEG_ACTIVE_MARKER:
+ case V4L2_CID_3A_LOCK:
+ case V4L2_CID_AUTO_FOCUS_STATUS:
*type = V4L2_CTRL_TYPE_BITMASK;
break;
case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
@@ -768,8 +877,12 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
break;
case V4L2_CID_MPEG_VIDEO_DEC_FRAME:
case V4L2_CID_MPEG_VIDEO_DEC_PTS:
+ *flags |= V4L2_CTRL_FLAG_VOLATILE;
+ /* Fall through */
+ case V4L2_CID_PIXEL_RATE:
*type = V4L2_CTRL_TYPE_INTEGER64;
- *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ *min = *max = *step = *def = 0;
break;
default:
*type = V4L2_CTRL_TYPE_INTEGER;
@@ -817,6 +930,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
*flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
break;
case V4L2_CID_FLASH_STROBE_STATUS:
+ case V4L2_CID_AUTO_FOCUS_STATUS:
case V4L2_CID_FLASH_READY:
*flags |= V4L2_CTRL_FLAG_READ_ONLY;
break;
@@ -852,7 +966,8 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change
ev->u.ctrl.value64 = ctrl->cur.val64;
ev->u.ctrl.minimum = ctrl->minimum;
ev->u.ctrl.maximum = ctrl->maximum;
- if (ctrl->type == V4L2_CTRL_TYPE_MENU)
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU
+ || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
ev->u.ctrl.step = 1;
else
ev->u.ctrl.step = ctrl->step;
@@ -1083,10 +1198,13 @@ static int validate_new_int(const struct v4l2_ctrl *ctrl, s32 *pval)
return 0;
case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
if (val < ctrl->minimum || val > ctrl->maximum)
return -ERANGE;
- if (ctrl->qmenu[val][0] == '\0' ||
- (ctrl->menu_skip_mask & (1 << val)))
+ if (ctrl->menu_skip_mask & (1 << val))
+ return -EINVAL;
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU &&
+ ctrl->qmenu[val][0] == '\0')
return -EINVAL;
return 0;
@@ -1114,6 +1232,7 @@ static int validate_new(const struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c
case V4L2_CTRL_TYPE_INTEGER:
case V4L2_CTRL_TYPE_BOOLEAN:
case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
case V4L2_CTRL_TYPE_BITMASK:
case V4L2_CTRL_TYPE_BUTTON:
case V4L2_CTRL_TYPE_CTRL_CLASS:
@@ -1152,7 +1271,8 @@ static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err)
int v4l2_ctrl_handler_init(struct v4l2_ctrl_handler *hdl,
unsigned nr_of_controls_hint)
{
- mutex_init(&hdl->lock);
+ hdl->lock = &hdl->_lock;
+ mutex_init(hdl->lock);
INIT_LIST_HEAD(&hdl->ctrls);
INIT_LIST_HEAD(&hdl->ctrl_refs);
hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
@@ -1173,7 +1293,7 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
if (hdl == NULL || hdl->buckets == NULL)
return;
- mutex_lock(&hdl->lock);
+ mutex_lock(hdl->lock);
/* Free all nodes */
list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
list_del(&ref->node);
@@ -1190,7 +1310,7 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
hdl->buckets = NULL;
hdl->cached = NULL;
hdl->error = 0;
- mutex_unlock(&hdl->lock);
+ mutex_unlock(hdl->lock);
}
EXPORT_SYMBOL(v4l2_ctrl_handler_free);
@@ -1255,9 +1375,9 @@ static struct v4l2_ctrl_ref *find_ref_lock(
struct v4l2_ctrl_ref *ref = NULL;
if (hdl) {
- mutex_lock(&hdl->lock);
+ mutex_lock(hdl->lock);
ref = find_ref(hdl, id);
- mutex_unlock(&hdl->lock);
+ mutex_unlock(hdl->lock);
}
return ref;
}
@@ -1304,7 +1424,7 @@ static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
INIT_LIST_HEAD(&new_ref->node);
- mutex_lock(&hdl->lock);
+ mutex_lock(hdl->lock);
/* Add immediately at the end of the list if the list is empty, or if
the last element in the list has a lower ID.
@@ -1334,7 +1454,7 @@ insert_in_hash:
hdl->buckets[bucket] = new_ref;
unlock:
- mutex_unlock(&hdl->lock);
+ mutex_unlock(hdl->lock);
return 0;
}
@@ -1343,7 +1463,8 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id, const char *name, enum v4l2_ctrl_type type,
s32 min, s32 max, u32 step, s32 def,
- u32 flags, const char * const *qmenu, void *priv)
+ u32 flags, const char * const *qmenu,
+ const s64 *qmenu_int, void *priv)
{
struct v4l2_ctrl *ctrl;
unsigned sz_extra = 0;
@@ -1356,6 +1477,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
(type == V4L2_CTRL_TYPE_INTEGER && step == 0) ||
(type == V4L2_CTRL_TYPE_BITMASK && max == 0) ||
(type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
+ (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL) ||
(type == V4L2_CTRL_TYPE_STRING && max == 0)) {
handler_set_err(hdl, -ERANGE);
return NULL;
@@ -1366,6 +1488,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
}
if ((type == V4L2_CTRL_TYPE_INTEGER ||
type == V4L2_CTRL_TYPE_MENU ||
+ type == V4L2_CTRL_TYPE_INTEGER_MENU ||
type == V4L2_CTRL_TYPE_BOOLEAN) &&
(def < min || def > max)) {
handler_set_err(hdl, -ERANGE);
@@ -1400,7 +1523,10 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
ctrl->minimum = min;
ctrl->maximum = max;
ctrl->step = step;
- ctrl->qmenu = qmenu;
+ if (type == V4L2_CTRL_TYPE_MENU)
+ ctrl->qmenu = qmenu;
+ else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ ctrl->qmenu_int = qmenu_int;
ctrl->priv = priv;
ctrl->cur.val = ctrl->val = ctrl->default_value = def;
@@ -1414,9 +1540,9 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
kfree(ctrl);
return NULL;
}
- mutex_lock(&hdl->lock);
+ mutex_lock(hdl->lock);
list_add_tail(&ctrl->node, &hdl->ctrls);
- mutex_unlock(&hdl->lock);
+ mutex_unlock(hdl->lock);
return ctrl;
}
@@ -1427,6 +1553,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl *ctrl;
const char *name = cfg->name;
const char * const *qmenu = cfg->qmenu;
+ const s64 *qmenu_int = cfg->qmenu_int;
enum v4l2_ctrl_type type = cfg->type;
u32 flags = cfg->flags;
s32 min = cfg->min;
@@ -1438,18 +1565,24 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
&def, &flags);
- is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU);
+ is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU ||
+ cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU);
if (is_menu)
WARN_ON(step);
else
WARN_ON(cfg->menu_skip_mask);
- if (is_menu && qmenu == NULL)
+ if (cfg->type == V4L2_CTRL_TYPE_MENU && qmenu == NULL)
qmenu = v4l2_ctrl_get_menu(cfg->id);
+ else if (cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU &&
+ qmenu_int == NULL) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
ctrl = v4l2_ctrl_new(hdl, cfg->ops, cfg->id, name,
type, min, max,
is_menu ? cfg->menu_skip_mask : step,
- def, flags, qmenu, priv);
+ def, flags, qmenu, qmenu_int, priv);
if (ctrl)
ctrl->is_private = cfg->is_private;
return ctrl;
@@ -1466,12 +1599,13 @@ struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
u32 flags;
v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
- if (type == V4L2_CTRL_TYPE_MENU) {
+ if (type == V4L2_CTRL_TYPE_MENU
+ || type == V4L2_CTRL_TYPE_INTEGER_MENU) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
return v4l2_ctrl_new(hdl, ops, id, name, type,
- min, max, step, def, flags, NULL, NULL);
+ min, max, step, def, flags, NULL, NULL, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std);
@@ -1493,10 +1627,31 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
return NULL;
}
return v4l2_ctrl_new(hdl, ops, id, name, type,
- 0, max, mask, def, flags, qmenu, NULL);
+ 0, max, mask, def, flags, qmenu, NULL, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
+/* Helper function for standard integer menu controls */
+struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, s32 max, s32 def, const s64 *qmenu_int)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ s32 min;
+ s32 step;
+ u32 flags;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type != V4L2_CTRL_TYPE_INTEGER_MENU) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, id, name, type,
+ 0, max, 0, def, flags, NULL, qmenu_int, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
+
/* Add a control from another handler to this handler */
struct v4l2_ctrl *v4l2_ctrl_add_ctrl(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl *ctrl)
@@ -1525,7 +1680,7 @@ int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
return 0;
if (hdl->error)
return hdl->error;
- mutex_lock(&add->lock);
+ mutex_lock(add->lock);
list_for_each_entry(ref, &add->ctrl_refs, node) {
struct v4l2_ctrl *ctrl = ref->ctrl;
@@ -1539,7 +1694,7 @@ int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
if (ret)
break;
}
- mutex_unlock(&add->lock);
+ mutex_unlock(add->lock);
return ret;
}
EXPORT_SYMBOL(v4l2_ctrl_add_handler);
@@ -1659,6 +1814,9 @@ static void log_ctrl(const struct v4l2_ctrl *ctrl,
case V4L2_CTRL_TYPE_MENU:
printk(KERN_CONT "%s", ctrl->qmenu[ctrl->cur.val]);
break;
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ printk(KERN_CONT "%lld", ctrl->qmenu_int[ctrl->cur.val]);
+ break;
case V4L2_CTRL_TYPE_BITMASK:
printk(KERN_CONT "0x%08x", ctrl->cur.val);
break;
@@ -1700,11 +1858,11 @@ void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
len = strlen(prefix);
if (len && prefix[len - 1] != ' ')
colon = ": ";
- mutex_lock(&hdl->lock);
+ mutex_lock(hdl->lock);
list_for_each_entry(ctrl, &hdl->ctrls, node)
if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
log_ctrl(ctrl, prefix, colon);
- mutex_unlock(&hdl->lock);
+ mutex_unlock(hdl->lock);
}
EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
@@ -1716,7 +1874,7 @@ int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
if (hdl == NULL)
return 0;
- mutex_lock(&hdl->lock);
+ mutex_lock(hdl->lock);
list_for_each_entry(ctrl, &hdl->ctrls, node)
ctrl->done = false;
@@ -1741,7 +1899,7 @@ int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
if (ret)
break;
}
- mutex_unlock(&hdl->lock);
+ mutex_unlock(hdl->lock);
return ret;
}
EXPORT_SYMBOL(v4l2_ctrl_handler_setup);
@@ -1756,7 +1914,7 @@ int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
if (hdl == NULL)
return -EINVAL;
- mutex_lock(&hdl->lock);
+ mutex_lock(hdl->lock);
/* Try to find it */
ref = find_ref(hdl, id);
@@ -1781,7 +1939,7 @@ int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
break;
}
}
- mutex_unlock(&hdl->lock);
+ mutex_unlock(hdl->lock);
if (!ref)
return -EINVAL;
@@ -1795,7 +1953,8 @@ int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
qc->minimum = ctrl->minimum;
qc->maximum = ctrl->maximum;
qc->default_value = ctrl->default_value;
- if (ctrl->type == V4L2_CTRL_TYPE_MENU)
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU
+ || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
qc->step = 1;
else
qc->step = ctrl->step;
@@ -1825,16 +1984,33 @@ int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
qm->reserved = 0;
/* Sanity checks */
- if (ctrl->qmenu == NULL ||
- i < ctrl->minimum || i > ctrl->maximum)
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_MENU:
+ if (ctrl->qmenu == NULL)
+ return -EINVAL;
+ break;
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (ctrl->qmenu_int == NULL)
+ return -EINVAL;
+ break;
+ default:
return -EINVAL;
+ }
+
+ if (i < ctrl->minimum || i > ctrl->maximum)
+ return -EINVAL;
+
/* Use mask to see if this menu item should be skipped */
if (ctrl->menu_skip_mask & (1 << i))
return -EINVAL;
/* Empty menu items should also be skipped */
- if (ctrl->qmenu[i] == NULL || ctrl->qmenu[i][0] == '\0')
- return -EINVAL;
- strlcpy(qm->name, ctrl->qmenu[i], sizeof(qm->name));
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU) {
+ if (ctrl->qmenu[i] == NULL || ctrl->qmenu[i][0] == '\0')
+ return -EINVAL;
+ strlcpy(qm->name, ctrl->qmenu[i], sizeof(qm->name));
+ } else {
+ qm->value = ctrl->qmenu_int[i];
+ }
return 0;
}
EXPORT_SYMBOL(v4l2_querymenu);
@@ -1940,7 +2116,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
belong to the same cluster. */
/* This has to be done with the handler lock taken. */
- mutex_lock(&hdl->lock);
+ mutex_lock(hdl->lock);
/* First zero the helper field in the master control references */
for (i = 0; i < cs->count; i++)
@@ -1962,7 +2138,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
/* Point the mref helper to the current helper struct. */
mref->helper = h;
}
- mutex_unlock(&hdl->lock);
+ mutex_unlock(hdl->lock);
return 0;
}
@@ -1996,7 +2172,8 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
return class_check(hdl, cs->ctrl_class);
if (cs->count > ARRAY_SIZE(helper)) {
- helpers = kmalloc(sizeof(helper[0]) * cs->count, GFP_KERNEL);
+ helpers = kmalloc_array(cs->count, sizeof(helper[0]),
+ GFP_KERNEL);
if (helpers == NULL)
return -ENOMEM;
}
@@ -2218,7 +2395,8 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
return class_check(hdl, cs->ctrl_class);
if (cs->count > ARRAY_SIZE(helper)) {
- helpers = kmalloc(sizeof(helper[0]) * cs->count, GFP_KERNEL);
+ helpers = kmalloc_array(cs->count, sizeof(helper[0]),
+ GFP_KERNEL);
if (!helpers)
return -ENOMEM;
}
@@ -2381,9 +2559,13 @@ int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
}
EXPORT_SYMBOL(v4l2_ctrl_s_ctrl);
-void v4l2_ctrl_add_event(struct v4l2_ctrl *ctrl,
- struct v4l2_subscribed_event *sev)
+static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
+
+ if (ctrl == NULL)
+ return -EINVAL;
+
v4l2_ctrl_lock(ctrl);
list_add_tail(&sev->node, &ctrl->ev_subs);
if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS &&
@@ -2394,20 +2576,46 @@ void v4l2_ctrl_add_event(struct v4l2_ctrl *ctrl,
if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY))
changes |= V4L2_EVENT_CTRL_CH_VALUE;
fill_event(&ev, ctrl, changes);
+ /* Mark the queue as active, allowing this initial
+ event to be accepted. */
+ sev->elems = elems;
v4l2_event_queue_fh(sev->fh, &ev);
}
v4l2_ctrl_unlock(ctrl);
+ return 0;
}
-EXPORT_SYMBOL(v4l2_ctrl_add_event);
-void v4l2_ctrl_del_event(struct v4l2_ctrl *ctrl,
- struct v4l2_subscribed_event *sev)
+static void v4l2_ctrl_del_event(struct v4l2_subscribed_event *sev)
{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
+
v4l2_ctrl_lock(ctrl);
list_del(&sev->node);
v4l2_ctrl_unlock(ctrl);
}
-EXPORT_SYMBOL(v4l2_ctrl_del_event);
+
+void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new)
+{
+ u32 old_changes = old->u.ctrl.changes;
+
+ old->u.ctrl = new->u.ctrl;
+ old->u.ctrl.changes |= old_changes;
+}
+EXPORT_SYMBOL(v4l2_ctrl_replace);
+
+void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new)
+{
+ new->u.ctrl.changes |= old->u.ctrl.changes;
+}
+EXPORT_SYMBOL(v4l2_ctrl_merge);
+
+const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops = {
+ .add = v4l2_ctrl_add_event,
+ .del = v4l2_ctrl_del_event,
+ .replace = v4l2_ctrl_replace,
+ .merge = v4l2_ctrl_merge,
+};
+EXPORT_SYMBOL(v4l2_ctrl_sub_ev_ops);
int v4l2_ctrl_log_status(struct file *file, void *fh)
{
@@ -2425,7 +2633,7 @@ int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
if (sub->type == V4L2_EVENT_CTRL)
- return v4l2_event_subscribe(fh, sub, 0);
+ return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops);
return -EINVAL;
}
EXPORT_SYMBOL(v4l2_ctrl_subscribe_event);
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 70bec548d904..5ccbd4629f9c 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -274,11 +274,12 @@ static ssize_t v4l2_read(struct file *filp, char __user *buf,
if (!vdev->fops->read)
return -EINVAL;
- if (vdev->lock && mutex_lock_interruptible(vdev->lock))
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags) &&
+ mutex_lock_interruptible(vdev->lock))
return -ERESTARTSYS;
if (video_is_registered(vdev))
ret = vdev->fops->read(filp, buf, sz, off);
- if (vdev->lock)
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
return ret;
}
@@ -291,11 +292,12 @@ static ssize_t v4l2_write(struct file *filp, const char __user *buf,
if (!vdev->fops->write)
return -EINVAL;
- if (vdev->lock && mutex_lock_interruptible(vdev->lock))
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags) &&
+ mutex_lock_interruptible(vdev->lock))
return -ERESTARTSYS;
if (video_is_registered(vdev))
ret = vdev->fops->write(filp, buf, sz, off);
- if (vdev->lock)
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
return ret;
}
@@ -307,11 +309,11 @@ static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
if (!vdev->fops->poll)
return DEFAULT_POLLMASK;
- if (vdev->lock)
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_lock(vdev->lock);
if (video_is_registered(vdev))
ret = vdev->fops->poll(filp, poll);
- if (vdev->lock)
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
return ret;
}
@@ -322,11 +324,19 @@ static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
int ret = -ENODEV;
if (vdev->fops->unlocked_ioctl) {
- if (vdev->lock && mutex_lock_interruptible(vdev->lock))
- return -ERESTARTSYS;
+ bool locked = false;
+
+ if (vdev->lock) {
+ /* always lock unless the cmd is marked as "don't use lock" */
+ locked = !v4l2_is_known_ioctl(cmd) ||
+ !test_bit(_IOC_NR(cmd), vdev->disable_locking);
+
+ if (locked && mutex_lock_interruptible(vdev->lock))
+ return -ERESTARTSYS;
+ }
if (video_is_registered(vdev))
ret = vdev->fops->unlocked_ioctl(filp, cmd, arg);
- if (vdev->lock)
+ if (locked)
mutex_unlock(vdev->lock);
} else if (vdev->fops->ioctl) {
/* This code path is a replacement for the BKL. It is a major
@@ -391,11 +401,12 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
if (!vdev->fops->mmap)
return ret;
- if (vdev->lock && mutex_lock_interruptible(vdev->lock))
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags) &&
+ mutex_lock_interruptible(vdev->lock))
return -ERESTARTSYS;
if (video_is_registered(vdev))
ret = vdev->fops->mmap(filp, vm);
- if (vdev->lock)
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
return ret;
}
@@ -418,7 +429,8 @@ static int v4l2_open(struct inode *inode, struct file *filp)
video_get(vdev);
mutex_unlock(&videodev_lock);
if (vdev->fops->open) {
- if (vdev->lock && mutex_lock_interruptible(vdev->lock)) {
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags) &&
+ mutex_lock_interruptible(vdev->lock)) {
ret = -ERESTARTSYS;
goto err;
}
@@ -426,7 +438,7 @@ static int v4l2_open(struct inode *inode, struct file *filp)
ret = vdev->fops->open(filp);
else
ret = -ENODEV;
- if (vdev->lock)
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
}
@@ -444,10 +456,10 @@ static int v4l2_release(struct inode *inode, struct file *filp)
int ret = 0;
if (vdev->fops->release) {
- if (vdev->lock)
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_lock(vdev->lock);
vdev->fops->release(filp);
- if (vdev->lock)
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
}
/* decrease the refcount unconditionally since the release()
@@ -508,6 +520,175 @@ static int get_index(struct video_device *vdev)
return find_first_zero_bit(used, VIDEO_NUM_DEVICES);
}
+#define SET_VALID_IOCTL(ops, cmd, op) \
+ if (ops->op) \
+ set_bit(_IOC_NR(cmd), valid_ioctls)
+
+/* This determines which ioctls are actually implemented in the driver.
+ It's a one-time thing which simplifies video_ioctl2 as it can just do
+ a bit test.
+
+ Note that drivers can override this by setting bits to 1 in
+ vdev->valid_ioctls. If an ioctl is marked as 1 when this function is
+ called, then that ioctl will actually be marked as unimplemented.
+
+ It does that by first setting up the local valid_ioctls bitmap, and
+ at the end do a:
+
+ vdev->valid_ioctls = valid_ioctls & ~(vdev->valid_ioctls)
+ */
+static void determine_valid_ioctls(struct video_device *vdev)
+{
+ DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
+ const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
+
+ bitmap_zero(valid_ioctls, BASE_VIDIOC_PRIVATE);
+
+ SET_VALID_IOCTL(ops, VIDIOC_QUERYCAP, vidioc_querycap);
+ if (ops->vidioc_g_priority ||
+ test_bit(V4L2_FL_USE_FH_PRIO, &vdev->flags))
+ set_bit(_IOC_NR(VIDIOC_G_PRIORITY), valid_ioctls);
+ if (ops->vidioc_s_priority ||
+ test_bit(V4L2_FL_USE_FH_PRIO, &vdev->flags))
+ set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls);
+ if (ops->vidioc_enum_fmt_vid_cap ||
+ ops->vidioc_enum_fmt_vid_out ||
+ ops->vidioc_enum_fmt_vid_cap_mplane ||
+ ops->vidioc_enum_fmt_vid_out_mplane ||
+ ops->vidioc_enum_fmt_vid_overlay ||
+ ops->vidioc_enum_fmt_type_private)
+ set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
+ if (ops->vidioc_g_fmt_vid_cap ||
+ ops->vidioc_g_fmt_vid_out ||
+ ops->vidioc_g_fmt_vid_cap_mplane ||
+ ops->vidioc_g_fmt_vid_out_mplane ||
+ ops->vidioc_g_fmt_vid_overlay ||
+ ops->vidioc_g_fmt_vbi_cap ||
+ ops->vidioc_g_fmt_vid_out_overlay ||
+ ops->vidioc_g_fmt_vbi_out ||
+ ops->vidioc_g_fmt_sliced_vbi_cap ||
+ ops->vidioc_g_fmt_sliced_vbi_out ||
+ ops->vidioc_g_fmt_type_private)
+ set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
+ if (ops->vidioc_s_fmt_vid_cap ||
+ ops->vidioc_s_fmt_vid_out ||
+ ops->vidioc_s_fmt_vid_cap_mplane ||
+ ops->vidioc_s_fmt_vid_out_mplane ||
+ ops->vidioc_s_fmt_vid_overlay ||
+ ops->vidioc_s_fmt_vbi_cap ||
+ ops->vidioc_s_fmt_vid_out_overlay ||
+ ops->vidioc_s_fmt_vbi_out ||
+ ops->vidioc_s_fmt_sliced_vbi_cap ||
+ ops->vidioc_s_fmt_sliced_vbi_out ||
+ ops->vidioc_s_fmt_type_private)
+ set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
+ if (ops->vidioc_try_fmt_vid_cap ||
+ ops->vidioc_try_fmt_vid_out ||
+ ops->vidioc_try_fmt_vid_cap_mplane ||
+ ops->vidioc_try_fmt_vid_out_mplane ||
+ ops->vidioc_try_fmt_vid_overlay ||
+ ops->vidioc_try_fmt_vbi_cap ||
+ ops->vidioc_try_fmt_vid_out_overlay ||
+ ops->vidioc_try_fmt_vbi_out ||
+ ops->vidioc_try_fmt_sliced_vbi_cap ||
+ ops->vidioc_try_fmt_sliced_vbi_out ||
+ ops->vidioc_try_fmt_type_private)
+ set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
+ SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
+ SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FBUF, vidioc_s_fbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
+ SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
+ if (vdev->tvnorms)
+ set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
+ if (ops->vidioc_g_std || vdev->current_norm)
+ set_bit(_IOC_NR(VIDIOC_G_STD), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std);
+ SET_VALID_IOCTL(ops, VIDIOC_QUERYSTD, vidioc_querystd);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output);
+ SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output);
+ SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output);
+ /* Note: the control handler can also be passed through the filehandle,
+ and that can't be tested here. If the bit for these control ioctls
+ is set, then the ioctl is valid. But if it is 0, then it can still
+ be valid if the filehandle passed the control handler. */
+ if (vdev->ctrl_handler || ops->vidioc_queryctrl)
+ set_bit(_IOC_NR(VIDIOC_QUERYCTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_g_ctrl || ops->vidioc_g_ext_ctrls)
+ set_bit(_IOC_NR(VIDIOC_G_CTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_s_ctrl || ops->vidioc_s_ext_ctrls)
+ set_bit(_IOC_NR(VIDIOC_S_CTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls)
+ set_bit(_IOC_NR(VIDIOC_G_EXT_CTRLS), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls)
+ set_bit(_IOC_NR(VIDIOC_S_EXT_CTRLS), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_try_ext_ctrls)
+ set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_querymenu)
+ set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDIO, vidioc_enumaudio);
+ SET_VALID_IOCTL(ops, VIDIOC_G_AUDIO, vidioc_g_audio);
+ SET_VALID_IOCTL(ops, VIDIOC_S_AUDIO, vidioc_s_audio);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDOUT, vidioc_enumaudout);
+ SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout);
+ SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout);
+ SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator);
+ SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator);
+ if (ops->vidioc_g_crop || ops->vidioc_g_selection)
+ set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls);
+ if (ops->vidioc_s_crop || ops->vidioc_s_selection)
+ set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
+ SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
+ if (ops->vidioc_cropcap || ops->vidioc_g_selection)
+ set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp);
+ SET_VALID_IOCTL(ops, VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp);
+ SET_VALID_IOCTL(ops, VIDIOC_G_ENC_INDEX, vidioc_g_enc_index);
+ SET_VALID_IOCTL(ops, VIDIOC_ENCODER_CMD, vidioc_encoder_cmd);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd);
+ SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
+ if (ops->vidioc_g_parm || vdev->current_norm)
+ set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
+ SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
+ SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
+ SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status);
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ SET_VALID_IOCTL(ops, VIDIOC_DBG_G_REGISTER, vidioc_g_register);
+ SET_VALID_IOCTL(ops, VIDIOC_DBG_S_REGISTER, vidioc_s_register);
+#endif
+ SET_VALID_IOCTL(ops, VIDIOC_DBG_G_CHIP_IDENT, vidioc_g_chip_ident);
+ SET_VALID_IOCTL(ops, VIDIOC_S_HW_FREQ_SEEK, vidioc_s_hw_freq_seek);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_PRESETS, vidioc_enum_dv_presets);
+ SET_VALID_IOCTL(ops, VIDIOC_S_DV_PRESET, vidioc_s_dv_preset);
+ SET_VALID_IOCTL(ops, VIDIOC_G_DV_PRESET, vidioc_g_dv_preset);
+ SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_PRESET, vidioc_query_dv_preset);
+ SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
+ /* yes, really vidioc_subscribe_event */
+ SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
+ SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
+ SET_VALID_IOCTL(ops, VIDIOC_UNSUBSCRIBE_EVENT, vidioc_unsubscribe_event);
+ SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs);
+ SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf);
+ bitmap_andnot(vdev->valid_ioctls, valid_ioctls, vdev->valid_ioctls,
+ BASE_VIDIOC_PRIVATE);
+}
+
/**
* __video_register_device - register video4linux devices
* @vdev: video device structure we want to register
@@ -654,6 +835,13 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
WARN_ON(video_device[vdev->minor] != NULL);
vdev->index = get_index(vdev);
mutex_unlock(&videodev_lock);
+ /* if no lock was passed, then make sure the LOCK_ALL_FOPS bit is
+ clear and warn if it wasn't. */
+ if (vdev->lock == NULL)
+ WARN_ON(test_and_clear_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags));
+
+ if (vdev->ioctl_ops)
+ determine_valid_ioctls(vdev);
/* Part 3: Initialize the character device */
vdev->cdev = cdev_alloc();
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
index c26ad9637143..ef2a33c94045 100644
--- a/drivers/media/video/v4l2-event.c
+++ b/drivers/media/video/v4l2-event.c
@@ -25,7 +25,6 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
-#include <media/v4l2-ctrls.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -120,6 +119,14 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
if (sev == NULL)
return;
+ /*
+ * If the event has been added to the fh->subscribed list, but its
+ * add op has not completed yet elems will be 0, treat this as
+ * not being subscribed.
+ */
+ if (!sev->elems)
+ return;
+
/* Increase event sequence number on fh. */
fh->sequence++;
@@ -132,14 +139,14 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
sev->first = sev_pos(sev, 1);
fh->navailable--;
if (sev->elems == 1) {
- if (sev->replace) {
- sev->replace(&kev->event, ev);
+ if (sev->ops && sev->ops->replace) {
+ sev->ops->replace(&kev->event, ev);
copy_payload = false;
}
- } else if (sev->merge) {
+ } else if (sev->ops && sev->ops->merge) {
struct v4l2_kevent *second_oldest =
sev->events + sev_pos(sev, 0);
- sev->merge(&kev->event, &second_oldest->event);
+ sev->ops->merge(&kev->event, &second_oldest->event);
}
}
@@ -195,24 +202,11 @@ int v4l2_event_pending(struct v4l2_fh *fh)
}
EXPORT_SYMBOL_GPL(v4l2_event_pending);
-static void ctrls_replace(struct v4l2_event *old, const struct v4l2_event *new)
-{
- u32 old_changes = old->u.ctrl.changes;
-
- old->u.ctrl = new->u.ctrl;
- old->u.ctrl.changes |= old_changes;
-}
-
-static void ctrls_merge(const struct v4l2_event *old, struct v4l2_event *new)
-{
- new->u.ctrl.changes |= old->u.ctrl.changes;
-}
-
int v4l2_event_subscribe(struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub, unsigned elems)
+ struct v4l2_event_subscription *sub, unsigned elems,
+ const struct v4l2_subscribed_event_ops *ops)
{
struct v4l2_subscribed_event *sev, *found_ev;
- struct v4l2_ctrl *ctrl = NULL;
unsigned long flags;
unsigned i;
@@ -221,11 +215,6 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
if (elems < 1)
elems = 1;
- if (sub->type == V4L2_EVENT_CTRL) {
- ctrl = v4l2_ctrl_find(fh->ctrl_handler, sub->id);
- if (ctrl == NULL)
- return -EINVAL;
- }
sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
if (!sev)
@@ -236,11 +225,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
sev->id = sub->id;
sev->flags = sub->flags;
sev->fh = fh;
- sev->elems = elems;
- if (ctrl) {
- sev->replace = ctrls_replace;
- sev->merge = ctrls_merge;
- }
+ sev->ops = ops;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -248,11 +233,22 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
- /* v4l2_ctrl_add_event uses a mutex, so do this outside the spin lock */
- if (found_ev)
+ if (found_ev) {
kfree(sev);
- else if (ctrl)
- v4l2_ctrl_add_event(ctrl, sev);
+ return 0; /* Already listening */
+ }
+
+ if (sev->ops && sev->ops->add) {
+ int ret = sev->ops->add(sev, elems);
+ if (ret) {
+ sev->ops = NULL;
+ v4l2_event_unsubscribe(fh, sub);
+ return ret;
+ }
+ }
+
+ /* Mark as ready for use */
+ sev->elems = elems;
return 0;
}
@@ -306,12 +302,9 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
}
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
- if (sev && sev->type == V4L2_EVENT_CTRL) {
- struct v4l2_ctrl *ctrl = v4l2_ctrl_find(fh->ctrl_handler, sev->id);
- if (ctrl)
- v4l2_ctrl_del_event(ctrl, sev);
- }
+ if (sev && sev->ops && sev->ops->del)
+ sev->ops->del(sev);
kfree(sev);
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 5b2ec1fd2d0a..91be4e871f43 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -55,19 +55,6 @@
memset((u8 *)(p) + offsetof(typeof(*(p)), field) + sizeof((p)->field), \
0, sizeof(*(p)) - offsetof(typeof(*(p)), field) - sizeof((p)->field))
-#define have_fmt_ops(foo) ( \
- ops->vidioc_##foo##_fmt_vid_cap || \
- ops->vidioc_##foo##_fmt_vid_out || \
- ops->vidioc_##foo##_fmt_vid_cap_mplane || \
- ops->vidioc_##foo##_fmt_vid_out_mplane || \
- ops->vidioc_##foo##_fmt_vid_overlay || \
- ops->vidioc_##foo##_fmt_vbi_cap || \
- ops->vidioc_##foo##_fmt_vid_out_overlay || \
- ops->vidioc_##foo##_fmt_vbi_out || \
- ops->vidioc_##foo##_fmt_sliced_vbi_cap || \
- ops->vidioc_##foo##_fmt_sliced_vbi_out || \
- ops->vidioc_##foo##_fmt_type_private)
-
struct std_descr {
v4l2_std_id std;
const char *descr;
@@ -195,93 +182,118 @@ static const char *v4l2_memory_names[] = {
/* ------------------------------------------------------------------ */
/* debug help functions */
-static const char *v4l2_ioctls[] = {
- [_IOC_NR(VIDIOC_QUERYCAP)] = "VIDIOC_QUERYCAP",
- [_IOC_NR(VIDIOC_RESERVED)] = "VIDIOC_RESERVED",
- [_IOC_NR(VIDIOC_ENUM_FMT)] = "VIDIOC_ENUM_FMT",
- [_IOC_NR(VIDIOC_G_FMT)] = "VIDIOC_G_FMT",
- [_IOC_NR(VIDIOC_S_FMT)] = "VIDIOC_S_FMT",
- [_IOC_NR(VIDIOC_REQBUFS)] = "VIDIOC_REQBUFS",
- [_IOC_NR(VIDIOC_QUERYBUF)] = "VIDIOC_QUERYBUF",
- [_IOC_NR(VIDIOC_G_FBUF)] = "VIDIOC_G_FBUF",
- [_IOC_NR(VIDIOC_S_FBUF)] = "VIDIOC_S_FBUF",
- [_IOC_NR(VIDIOC_OVERLAY)] = "VIDIOC_OVERLAY",
- [_IOC_NR(VIDIOC_QBUF)] = "VIDIOC_QBUF",
- [_IOC_NR(VIDIOC_DQBUF)] = "VIDIOC_DQBUF",
- [_IOC_NR(VIDIOC_STREAMON)] = "VIDIOC_STREAMON",
- [_IOC_NR(VIDIOC_STREAMOFF)] = "VIDIOC_STREAMOFF",
- [_IOC_NR(VIDIOC_G_PARM)] = "VIDIOC_G_PARM",
- [_IOC_NR(VIDIOC_S_PARM)] = "VIDIOC_S_PARM",
- [_IOC_NR(VIDIOC_G_STD)] = "VIDIOC_G_STD",
- [_IOC_NR(VIDIOC_S_STD)] = "VIDIOC_S_STD",
- [_IOC_NR(VIDIOC_ENUMSTD)] = "VIDIOC_ENUMSTD",
- [_IOC_NR(VIDIOC_ENUMINPUT)] = "VIDIOC_ENUMINPUT",
- [_IOC_NR(VIDIOC_G_CTRL)] = "VIDIOC_G_CTRL",
- [_IOC_NR(VIDIOC_S_CTRL)] = "VIDIOC_S_CTRL",
- [_IOC_NR(VIDIOC_G_TUNER)] = "VIDIOC_G_TUNER",
- [_IOC_NR(VIDIOC_S_TUNER)] = "VIDIOC_S_TUNER",
- [_IOC_NR(VIDIOC_G_AUDIO)] = "VIDIOC_G_AUDIO",
- [_IOC_NR(VIDIOC_S_AUDIO)] = "VIDIOC_S_AUDIO",
- [_IOC_NR(VIDIOC_QUERYCTRL)] = "VIDIOC_QUERYCTRL",
- [_IOC_NR(VIDIOC_QUERYMENU)] = "VIDIOC_QUERYMENU",
- [_IOC_NR(VIDIOC_G_INPUT)] = "VIDIOC_G_INPUT",
- [_IOC_NR(VIDIOC_S_INPUT)] = "VIDIOC_S_INPUT",
- [_IOC_NR(VIDIOC_G_OUTPUT)] = "VIDIOC_G_OUTPUT",
- [_IOC_NR(VIDIOC_S_OUTPUT)] = "VIDIOC_S_OUTPUT",
- [_IOC_NR(VIDIOC_ENUMOUTPUT)] = "VIDIOC_ENUMOUTPUT",
- [_IOC_NR(VIDIOC_G_AUDOUT)] = "VIDIOC_G_AUDOUT",
- [_IOC_NR(VIDIOC_S_AUDOUT)] = "VIDIOC_S_AUDOUT",
- [_IOC_NR(VIDIOC_G_MODULATOR)] = "VIDIOC_G_MODULATOR",
- [_IOC_NR(VIDIOC_S_MODULATOR)] = "VIDIOC_S_MODULATOR",
- [_IOC_NR(VIDIOC_G_FREQUENCY)] = "VIDIOC_G_FREQUENCY",
- [_IOC_NR(VIDIOC_S_FREQUENCY)] = "VIDIOC_S_FREQUENCY",
- [_IOC_NR(VIDIOC_CROPCAP)] = "VIDIOC_CROPCAP",
- [_IOC_NR(VIDIOC_G_CROP)] = "VIDIOC_G_CROP",
- [_IOC_NR(VIDIOC_S_CROP)] = "VIDIOC_S_CROP",
- [_IOC_NR(VIDIOC_G_SELECTION)] = "VIDIOC_G_SELECTION",
- [_IOC_NR(VIDIOC_S_SELECTION)] = "VIDIOC_S_SELECTION",
- [_IOC_NR(VIDIOC_G_JPEGCOMP)] = "VIDIOC_G_JPEGCOMP",
- [_IOC_NR(VIDIOC_S_JPEGCOMP)] = "VIDIOC_S_JPEGCOMP",
- [_IOC_NR(VIDIOC_QUERYSTD)] = "VIDIOC_QUERYSTD",
- [_IOC_NR(VIDIOC_TRY_FMT)] = "VIDIOC_TRY_FMT",
- [_IOC_NR(VIDIOC_ENUMAUDIO)] = "VIDIOC_ENUMAUDIO",
- [_IOC_NR(VIDIOC_ENUMAUDOUT)] = "VIDIOC_ENUMAUDOUT",
- [_IOC_NR(VIDIOC_G_PRIORITY)] = "VIDIOC_G_PRIORITY",
- [_IOC_NR(VIDIOC_S_PRIORITY)] = "VIDIOC_S_PRIORITY",
- [_IOC_NR(VIDIOC_G_SLICED_VBI_CAP)] = "VIDIOC_G_SLICED_VBI_CAP",
- [_IOC_NR(VIDIOC_LOG_STATUS)] = "VIDIOC_LOG_STATUS",
- [_IOC_NR(VIDIOC_G_EXT_CTRLS)] = "VIDIOC_G_EXT_CTRLS",
- [_IOC_NR(VIDIOC_S_EXT_CTRLS)] = "VIDIOC_S_EXT_CTRLS",
- [_IOC_NR(VIDIOC_TRY_EXT_CTRLS)] = "VIDIOC_TRY_EXT_CTRLS",
-#if 1
- [_IOC_NR(VIDIOC_ENUM_FRAMESIZES)] = "VIDIOC_ENUM_FRAMESIZES",
- [_IOC_NR(VIDIOC_ENUM_FRAMEINTERVALS)] = "VIDIOC_ENUM_FRAMEINTERVALS",
- [_IOC_NR(VIDIOC_G_ENC_INDEX)] = "VIDIOC_G_ENC_INDEX",
- [_IOC_NR(VIDIOC_ENCODER_CMD)] = "VIDIOC_ENCODER_CMD",
- [_IOC_NR(VIDIOC_TRY_ENCODER_CMD)] = "VIDIOC_TRY_ENCODER_CMD",
-
- [_IOC_NR(VIDIOC_DECODER_CMD)] = "VIDIOC_DECODER_CMD",
- [_IOC_NR(VIDIOC_TRY_DECODER_CMD)] = "VIDIOC_TRY_DECODER_CMD",
- [_IOC_NR(VIDIOC_DBG_S_REGISTER)] = "VIDIOC_DBG_S_REGISTER",
- [_IOC_NR(VIDIOC_DBG_G_REGISTER)] = "VIDIOC_DBG_G_REGISTER",
-
- [_IOC_NR(VIDIOC_DBG_G_CHIP_IDENT)] = "VIDIOC_DBG_G_CHIP_IDENT",
- [_IOC_NR(VIDIOC_S_HW_FREQ_SEEK)] = "VIDIOC_S_HW_FREQ_SEEK",
+
+struct v4l2_ioctl_info {
+ unsigned int ioctl;
+ u16 flags;
+ const char * const name;
+};
+
+/* This control needs a priority check */
+#define INFO_FL_PRIO (1 << 0)
+/* This control can be valid if the filehandle passes a control handler. */
+#define INFO_FL_CTRL (1 << 1)
+
+#define IOCTL_INFO(_ioctl, _flags) [_IOC_NR(_ioctl)] = { \
+ .ioctl = _ioctl, \
+ .flags = _flags, \
+ .name = #_ioctl, \
+}
+
+static struct v4l2_ioctl_info v4l2_ioctls[] = {
+ IOCTL_INFO(VIDIOC_QUERYCAP, 0),
+ IOCTL_INFO(VIDIOC_ENUM_FMT, 0),
+ IOCTL_INFO(VIDIOC_G_FMT, 0),
+ IOCTL_INFO(VIDIOC_S_FMT, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_REQBUFS, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_QUERYBUF, 0),
+ IOCTL_INFO(VIDIOC_G_FBUF, 0),
+ IOCTL_INFO(VIDIOC_S_FBUF, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_OVERLAY, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_QBUF, 0),
+ IOCTL_INFO(VIDIOC_DQBUF, 0),
+ IOCTL_INFO(VIDIOC_STREAMON, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_STREAMOFF, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_PARM, 0),
+ IOCTL_INFO(VIDIOC_S_PARM, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_STD, 0),
+ IOCTL_INFO(VIDIOC_S_STD, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_ENUMSTD, 0),
+ IOCTL_INFO(VIDIOC_ENUMINPUT, 0),
+ IOCTL_INFO(VIDIOC_G_CTRL, INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_S_CTRL, INFO_FL_PRIO | INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_G_TUNER, 0),
+ IOCTL_INFO(VIDIOC_S_TUNER, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_AUDIO, 0),
+ IOCTL_INFO(VIDIOC_S_AUDIO, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_QUERYCTRL, INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_QUERYMENU, INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_G_INPUT, 0),
+ IOCTL_INFO(VIDIOC_S_INPUT, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_OUTPUT, 0),
+ IOCTL_INFO(VIDIOC_S_OUTPUT, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_ENUMOUTPUT, 0),
+ IOCTL_INFO(VIDIOC_G_AUDOUT, 0),
+ IOCTL_INFO(VIDIOC_S_AUDOUT, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_MODULATOR, 0),
+ IOCTL_INFO(VIDIOC_S_MODULATOR, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_FREQUENCY, 0),
+ IOCTL_INFO(VIDIOC_S_FREQUENCY, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_CROPCAP, 0),
+ IOCTL_INFO(VIDIOC_G_CROP, 0),
+ IOCTL_INFO(VIDIOC_S_CROP, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_SELECTION, 0),
+ IOCTL_INFO(VIDIOC_S_SELECTION, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_JPEGCOMP, 0),
+ IOCTL_INFO(VIDIOC_S_JPEGCOMP, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_QUERYSTD, 0),
+ IOCTL_INFO(VIDIOC_TRY_FMT, 0),
+ IOCTL_INFO(VIDIOC_ENUMAUDIO, 0),
+ IOCTL_INFO(VIDIOC_ENUMAUDOUT, 0),
+ IOCTL_INFO(VIDIOC_G_PRIORITY, 0),
+ IOCTL_INFO(VIDIOC_S_PRIORITY, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_SLICED_VBI_CAP, 0),
+ IOCTL_INFO(VIDIOC_LOG_STATUS, 0),
+ IOCTL_INFO(VIDIOC_G_EXT_CTRLS, INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_S_EXT_CTRLS, INFO_FL_PRIO | INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, 0),
+ IOCTL_INFO(VIDIOC_ENUM_FRAMESIZES, 0),
+ IOCTL_INFO(VIDIOC_ENUM_FRAMEINTERVALS, 0),
+ IOCTL_INFO(VIDIOC_G_ENC_INDEX, 0),
+ IOCTL_INFO(VIDIOC_ENCODER_CMD, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_TRY_ENCODER_CMD, 0),
+ IOCTL_INFO(VIDIOC_DECODER_CMD, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_TRY_DECODER_CMD, 0),
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ IOCTL_INFO(VIDIOC_DBG_S_REGISTER, 0),
+ IOCTL_INFO(VIDIOC_DBG_G_REGISTER, 0),
#endif
- [_IOC_NR(VIDIOC_ENUM_DV_PRESETS)] = "VIDIOC_ENUM_DV_PRESETS",
- [_IOC_NR(VIDIOC_S_DV_PRESET)] = "VIDIOC_S_DV_PRESET",
- [_IOC_NR(VIDIOC_G_DV_PRESET)] = "VIDIOC_G_DV_PRESET",
- [_IOC_NR(VIDIOC_QUERY_DV_PRESET)] = "VIDIOC_QUERY_DV_PRESET",
- [_IOC_NR(VIDIOC_S_DV_TIMINGS)] = "VIDIOC_S_DV_TIMINGS",
- [_IOC_NR(VIDIOC_G_DV_TIMINGS)] = "VIDIOC_G_DV_TIMINGS",
- [_IOC_NR(VIDIOC_DQEVENT)] = "VIDIOC_DQEVENT",
- [_IOC_NR(VIDIOC_SUBSCRIBE_EVENT)] = "VIDIOC_SUBSCRIBE_EVENT",
- [_IOC_NR(VIDIOC_UNSUBSCRIBE_EVENT)] = "VIDIOC_UNSUBSCRIBE_EVENT",
- [_IOC_NR(VIDIOC_CREATE_BUFS)] = "VIDIOC_CREATE_BUFS",
- [_IOC_NR(VIDIOC_PREPARE_BUF)] = "VIDIOC_PREPARE_BUF",
+ IOCTL_INFO(VIDIOC_DBG_G_CHIP_IDENT, 0),
+ IOCTL_INFO(VIDIOC_S_HW_FREQ_SEEK, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_ENUM_DV_PRESETS, 0),
+ IOCTL_INFO(VIDIOC_S_DV_PRESET, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_DV_PRESET, 0),
+ IOCTL_INFO(VIDIOC_QUERY_DV_PRESET, 0),
+ IOCTL_INFO(VIDIOC_S_DV_TIMINGS, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_DV_TIMINGS, 0),
+ IOCTL_INFO(VIDIOC_DQEVENT, 0),
+ IOCTL_INFO(VIDIOC_SUBSCRIBE_EVENT, 0),
+ IOCTL_INFO(VIDIOC_UNSUBSCRIBE_EVENT, 0),
+ IOCTL_INFO(VIDIOC_CREATE_BUFS, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_PREPARE_BUF, 0),
+ IOCTL_INFO(VIDIOC_ENUM_DV_TIMINGS, 0),
+ IOCTL_INFO(VIDIOC_QUERY_DV_TIMINGS, 0),
+ IOCTL_INFO(VIDIOC_DV_TIMINGS_CAP, 0),
};
#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
+bool v4l2_is_known_ioctl(unsigned int cmd)
+{
+ if (_IOC_NR(cmd) >= V4L2_IOCTLS)
+ return false;
+ return v4l2_ioctls[_IOC_NR(cmd)].ioctl == cmd;
+}
+
/* Common ioctl debug function. This function can be used by
external ioctl messages as well as internal V4L ioctl */
void v4l_printk_ioctl(unsigned int cmd)
@@ -297,7 +309,7 @@ void v4l_printk_ioctl(unsigned int cmd)
type = "v4l2";
break;
}
- printk("%s", v4l2_ioctls[_IOC_NR(cmd)]);
+ printk("%s", v4l2_ioctls[_IOC_NR(cmd)].name);
return;
default:
type = "unknown";
@@ -359,6 +371,34 @@ static inline void dbgrect(struct video_device *vfd, char *s,
r->width, r->height);
};
+static void dbgtimings(struct video_device *vfd,
+ const struct v4l2_dv_timings *p)
+{
+ switch (p->type) {
+ case V4L2_DV_BT_656_1120:
+ dbgarg2("bt-656/1120:interlaced=%d,"
+ " pixelclock=%lld,"
+ " width=%d, height=%d, polarities=%x,"
+ " hfrontporch=%d, hsync=%d,"
+ " hbackporch=%d, vfrontporch=%d,"
+ " vsync=%d, vbackporch=%d,"
+ " il_vfrontporch=%d, il_vsync=%d,"
+ " il_vbackporch=%d, standards=%x, flags=%x\n",
+ p->bt.interlaced, p->bt.pixelclock,
+ p->bt.width, p->bt.height,
+ p->bt.polarities, p->bt.hfrontporch,
+ p->bt.hsync, p->bt.hbackporch,
+ p->bt.vfrontporch, p->bt.vsync,
+ p->bt.vbackporch, p->bt.il_vfrontporch,
+ p->bt.il_vsync, p->bt.il_vbackporch,
+ p->bt.standards, p->bt.flags);
+ break;
+ default:
+ dbgarg2("Unknown type %d!\n", p->type);
+ break;
+ }
+}
+
static inline void v4l_print_pix_fmt(struct video_device *vfd,
struct v4l2_pix_format *fmt)
{
@@ -504,7 +544,6 @@ static long __video_do_ioctl(struct file *file,
void *fh = file->private_data;
struct v4l2_fh *vfh = NULL;
int use_fh_prio = 0;
- long ret_prio = 0;
long ret = -ENOTTY;
if (ops == NULL) {
@@ -513,19 +552,30 @@ static long __video_do_ioctl(struct file *file,
return ret;
}
- if ((vfd->debug & V4L2_DEBUG_IOCTL) &&
- !(vfd->debug & V4L2_DEBUG_IOCTL_ARG)) {
- v4l_print_ioctl(vfd->name, cmd);
- printk(KERN_CONT "\n");
- }
-
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
vfh = file->private_data;
use_fh_prio = test_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
}
- if (use_fh_prio)
- ret_prio = v4l2_prio_check(vfd->prio, vfh->prio);
+ if (v4l2_is_known_ioctl(cmd)) {
+ struct v4l2_ioctl_info *info = &v4l2_ioctls[_IOC_NR(cmd)];
+
+ if (!test_bit(_IOC_NR(cmd), vfd->valid_ioctls) &&
+ !((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler))
+ return -ENOTTY;
+
+ if (use_fh_prio && (info->flags & INFO_FL_PRIO)) {
+ ret = v4l2_prio_check(vfd->prio, vfh->prio);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if ((vfd->debug & V4L2_DEBUG_IOCTL) &&
+ !(vfd->debug & V4L2_DEBUG_IOCTL_ARG)) {
+ v4l_print_ioctl(vfd->name, cmd);
+ printk(KERN_CONT "\n");
+ }
switch (cmd) {
@@ -534,9 +584,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_capability *cap = (struct v4l2_capability *)arg;
- if (!ops->vidioc_querycap)
- break;
-
cap->version = LINUX_VERSION_CODE;
ret = ops->vidioc_querycap(file, fh, cap);
if (!ret)
@@ -570,14 +617,11 @@ static long __video_do_ioctl(struct file *file,
{
enum v4l2_priority *p = arg;
- if (!ops->vidioc_s_priority && !use_fh_prio)
- break;
dbgarg(cmd, "setting priority to %d\n", *p);
if (ops->vidioc_s_priority)
ret = ops->vidioc_s_priority(file, fh, *p);
else
- ret = ret_prio ? ret_prio :
- v4l2_prio_change(&vfd->v4l2_dev->prio,
+ ret = v4l2_prio_change(&vfd->v4l2_dev->prio,
&vfh->prio, *p);
break;
}
@@ -587,6 +631,7 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_fmtdesc *f = arg;
+ ret = -EINVAL;
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
if (likely(ops->vidioc_enum_fmt_vid_cap))
@@ -619,7 +664,7 @@ static long __video_do_ioctl(struct file *file,
default:
break;
}
- if (likely (!ret))
+ if (likely(!ret))
dbgarg(cmd, "index=%d, type=%d, flags=%d, "
"pixelformat=%c%c%c%c, description='%s'\n",
f->index, f->type, f->flags,
@@ -628,14 +673,6 @@ static long __video_do_ioctl(struct file *file,
(f->pixelformat >> 16) & 0xff,
(f->pixelformat >> 24) & 0xff,
f->description);
- else if (ret == -ENOTTY &&
- (ops->vidioc_enum_fmt_vid_cap ||
- ops->vidioc_enum_fmt_vid_out ||
- ops->vidioc_enum_fmt_vid_cap_mplane ||
- ops->vidioc_enum_fmt_vid_out_mplane ||
- ops->vidioc_enum_fmt_vid_overlay ||
- ops->vidioc_enum_fmt_type_private))
- ret = -EINVAL;
break;
}
case VIDIOC_G_FMT:
@@ -645,6 +682,7 @@ static long __video_do_ioctl(struct file *file,
/* FIXME: Should be one dump per type */
dbgarg(cmd, "type=%s\n", prt_names(f->type, v4l2_type_names));
+ ret = -EINVAL;
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
if (ops->vidioc_g_fmt_vid_cap)
@@ -706,21 +744,12 @@ static long __video_do_ioctl(struct file *file,
fh, f);
break;
}
- if (unlikely(ret == -ENOTTY && have_fmt_ops(g)))
- ret = -EINVAL;
-
break;
}
case VIDIOC_S_FMT:
{
struct v4l2_format *f = (struct v4l2_format *)arg;
- if (!have_fmt_ops(s))
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
ret = -EINVAL;
/* FIXME: Should be one dump per type */
@@ -804,6 +833,7 @@ static long __video_do_ioctl(struct file *file,
/* FIXME: Should be one dump per type */
dbgarg(cmd, "type=%s\n", prt_names(f->type,
v4l2_type_names));
+ ret = -EINVAL;
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
CLEAR_AFTER_FIELD(f, fmt.pix);
@@ -876,8 +906,6 @@ static long __video_do_ioctl(struct file *file,
fh, f);
break;
}
- if (unlikely(ret == -ENOTTY && have_fmt_ops(try)))
- ret = -EINVAL;
break;
}
/* FIXME: Those buf reqs could be handled here,
@@ -888,12 +916,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_requestbuffers *p = arg;
- if (!ops->vidioc_reqbufs)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
ret = check_fmt(ops, p->type);
if (ret)
break;
@@ -912,8 +934,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_buffer *p = arg;
- if (!ops->vidioc_querybuf)
- break;
ret = check_fmt(ops, p->type);
if (ret)
break;
@@ -927,8 +947,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_buffer *p = arg;
- if (!ops->vidioc_qbuf)
- break;
ret = check_fmt(ops, p->type);
if (ret)
break;
@@ -942,8 +960,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_buffer *p = arg;
- if (!ops->vidioc_dqbuf)
- break;
ret = check_fmt(ops, p->type);
if (ret)
break;
@@ -957,12 +973,6 @@ static long __video_do_ioctl(struct file *file,
{
int *i = arg;
- if (!ops->vidioc_overlay)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "value=%d\n", *i);
ret = ops->vidioc_overlay(file, fh, *i);
break;
@@ -971,8 +981,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_framebuffer *p = arg;
- if (!ops->vidioc_g_fbuf)
- break;
ret = ops->vidioc_g_fbuf(file, fh, arg);
if (!ret) {
dbgarg(cmd, "capability=0x%x, flags=%d, base=0x%08lx\n",
@@ -986,12 +994,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_framebuffer *p = arg;
- if (!ops->vidioc_s_fbuf)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "capability=0x%x, flags=%d, base=0x%08lx\n",
p->capability, p->flags, (unsigned long)p->base);
v4l_print_pix_fmt(vfd, &p->fmt);
@@ -1002,12 +1004,6 @@ static long __video_do_ioctl(struct file *file,
{
enum v4l2_buf_type i = *(int *)arg;
- if (!ops->vidioc_streamon)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "type=%s\n", prt_names(i, v4l2_type_names));
ret = ops->vidioc_streamon(file, fh, i);
break;
@@ -1016,12 +1012,6 @@ static long __video_do_ioctl(struct file *file,
{
enum v4l2_buf_type i = *(int *)arg;
- if (!ops->vidioc_streamoff)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "type=%s\n", prt_names(i, v4l2_type_names));
ret = ops->vidioc_streamoff(file, fh, i);
break;
@@ -1091,13 +1081,6 @@ static long __video_do_ioctl(struct file *file,
dbgarg(cmd, "std=%08Lx\n", (long long unsigned)*id);
- if (!ops->vidioc_s_std)
- break;
-
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
ret = -EINVAL;
norm = (*id) & vfd->tvnorms;
if (vfd->tvnorms && !norm) /* Check if std is supported */
@@ -1115,8 +1098,6 @@ static long __video_do_ioctl(struct file *file,
{
v4l2_std_id *p = arg;
- if (!ops->vidioc_querystd)
- break;
/*
* If nothing detected, it should return all supported
* Drivers just need to mask the std argument, in order
@@ -1150,9 +1131,6 @@ static long __video_do_ioctl(struct file *file,
if (ops->vidioc_s_dv_timings)
p->capabilities |= V4L2_IN_CAP_CUSTOM_TIMINGS;
- if (!ops->vidioc_enum_input)
- break;
-
ret = ops->vidioc_enum_input(file, fh, p);
if (!ret)
dbgarg(cmd, "index=%d, name=%s, type=%d, "
@@ -1168,8 +1146,6 @@ static long __video_do_ioctl(struct file *file,
{
unsigned int *i = arg;
- if (!ops->vidioc_g_input)
- break;
ret = ops->vidioc_g_input(file, fh, i);
if (!ret)
dbgarg(cmd, "value=%d\n", *i);
@@ -1179,12 +1155,6 @@ static long __video_do_ioctl(struct file *file,
{
unsigned int *i = arg;
- if (!ops->vidioc_s_input)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "value=%d\n", *i);
ret = ops->vidioc_s_input(file, fh, *i);
break;
@@ -1195,9 +1165,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_output *p = arg;
- if (!ops->vidioc_enum_output)
- break;
-
/*
* We set the flags for CAP_PRESETS, CAP_CUSTOM_TIMINGS &
* CAP_STD here based on ioctl handler provided by the
@@ -1224,8 +1191,6 @@ static long __video_do_ioctl(struct file *file,
{
unsigned int *i = arg;
- if (!ops->vidioc_g_output)
- break;
ret = ops->vidioc_g_output(file, fh, i);
if (!ret)
dbgarg(cmd, "value=%d\n", *i);
@@ -1235,12 +1200,6 @@ static long __video_do_ioctl(struct file *file,
{
unsigned int *i = arg;
- if (!ops->vidioc_s_output)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "value=%d\n", *i);
ret = ops->vidioc_s_output(file, fh, *i);
break;
@@ -1310,10 +1269,6 @@ static long __video_do_ioctl(struct file *file,
if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
!ops->vidioc_s_ctrl && !ops->vidioc_s_ext_ctrls)
break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "id=0x%x, value=%d\n", p->id, p->value);
@@ -1369,10 +1324,6 @@ static long __video_do_ioctl(struct file *file,
if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
!ops->vidioc_s_ext_ctrls)
break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
v4l_print_ext_ctrls(cmd, vfd, p, 1);
if (vfh && vfh->ctrl_handler)
ret = v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, p);
@@ -1428,8 +1379,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_audio *p = arg;
- if (!ops->vidioc_enumaudio)
- break;
ret = ops->vidioc_enumaudio(file, fh, p);
if (!ret)
dbgarg(cmd, "index=%d, name=%s, capability=0x%x, "
@@ -1443,9 +1392,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_audio *p = arg;
- if (!ops->vidioc_g_audio)
- break;
-
ret = ops->vidioc_g_audio(file, fh, p);
if (!ret)
dbgarg(cmd, "index=%d, name=%s, capability=0x%x, "
@@ -1459,12 +1405,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_audio *p = arg;
- if (!ops->vidioc_s_audio)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "index=%d, name=%s, capability=0x%x, "
"mode=0x%x\n", p->index, p->name,
p->capability, p->mode);
@@ -1475,8 +1415,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_audioout *p = arg;
- if (!ops->vidioc_enumaudout)
- break;
dbgarg(cmd, "Enum for index=%d\n", p->index);
ret = ops->vidioc_enumaudout(file, fh, p);
if (!ret)
@@ -1489,9 +1427,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_audioout *p = arg;
- if (!ops->vidioc_g_audout)
- break;
-
ret = ops->vidioc_g_audout(file, fh, p);
if (!ret)
dbgarg2("index=%d, name=%s, capability=%d, "
@@ -1503,12 +1438,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_audioout *p = arg;
- if (!ops->vidioc_s_audout)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "index=%d, name=%s, capability=%d, "
"mode=%d\n", p->index, p->name,
p->capability, p->mode);
@@ -1520,8 +1449,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_modulator *p = arg;
- if (!ops->vidioc_g_modulator)
- break;
ret = ops->vidioc_g_modulator(file, fh, p);
if (!ret)
dbgarg(cmd, "index=%d, name=%s, "
@@ -1536,12 +1463,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_modulator *p = arg;
- if (!ops->vidioc_s_modulator)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "index=%d, name=%s, capability=%d, "
"rangelow=%d, rangehigh=%d, txsubchans=%d\n",
p->index, p->name, p->capability, p->rangelow,
@@ -1553,9 +1474,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_crop *p = arg;
- if (!ops->vidioc_g_crop && !ops->vidioc_g_selection)
- break;
-
dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
if (ops->vidioc_g_crop) {
@@ -1587,13 +1505,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_crop *p = arg;
- if (!ops->vidioc_s_crop && !ops->vidioc_s_selection)
- break;
-
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
dbgrect(vfd, "", &p->c);
@@ -1620,9 +1531,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_selection *p = arg;
- if (!ops->vidioc_g_selection)
- break;
-
dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
ret = ops->vidioc_g_selection(file, fh, p);
@@ -1634,13 +1542,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_selection *p = arg;
- if (!ops->vidioc_s_selection)
- break;
-
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
dbgrect(vfd, "", &p->r);
@@ -1653,9 +1554,6 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_cropcap *p = arg;
/*FIXME: Should also show v4l2_fract pixelaspect */
- if (!ops->vidioc_cropcap && !ops->vidioc_g_selection)
- break;
-
dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
if (ops->vidioc_cropcap) {
ret = ops->vidioc_cropcap(file, fh, p);
@@ -1699,9 +1597,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_jpegcompression *p = arg;
- if (!ops->vidioc_g_jpegcomp)
- break;
-
ret = ops->vidioc_g_jpegcomp(file, fh, p);
if (!ret)
dbgarg(cmd, "quality=%d, APPn=%d, "
@@ -1715,12 +1610,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_jpegcompression *p = arg;
- if (!ops->vidioc_g_jpegcomp)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "quality=%d, APPn=%d, APP_len=%d, "
"COM_len=%d, jpeg_markers=%d\n",
p->quality, p->APPn, p->APP_len,
@@ -1732,8 +1621,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_enc_idx *p = arg;
- if (!ops->vidioc_g_enc_index)
- break;
ret = ops->vidioc_g_enc_index(file, fh, p);
if (!ret)
dbgarg(cmd, "entries=%d, entries_cap=%d\n",
@@ -1744,12 +1631,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_encoder_cmd *p = arg;
- if (!ops->vidioc_encoder_cmd)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
ret = ops->vidioc_encoder_cmd(file, fh, p);
if (!ret)
dbgarg(cmd, "cmd=%d, flags=%x\n", p->cmd, p->flags);
@@ -1759,8 +1640,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_encoder_cmd *p = arg;
- if (!ops->vidioc_try_encoder_cmd)
- break;
ret = ops->vidioc_try_encoder_cmd(file, fh, p);
if (!ret)
dbgarg(cmd, "cmd=%d, flags=%x\n", p->cmd, p->flags);
@@ -1770,12 +1649,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_decoder_cmd *p = arg;
- if (!ops->vidioc_decoder_cmd)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
ret = ops->vidioc_decoder_cmd(file, fh, p);
if (!ret)
dbgarg(cmd, "cmd=%d, flags=%x\n", p->cmd, p->flags);
@@ -1785,8 +1658,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_decoder_cmd *p = arg;
- if (!ops->vidioc_try_decoder_cmd)
- break;
ret = ops->vidioc_try_decoder_cmd(file, fh, p);
if (!ret)
dbgarg(cmd, "cmd=%d, flags=%x\n", p->cmd, p->flags);
@@ -1796,8 +1667,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_streamparm *p = arg;
- if (!ops->vidioc_g_parm && !vfd->current_norm)
- break;
if (ops->vidioc_g_parm) {
ret = check_fmt(ops, p->type);
if (ret)
@@ -1825,12 +1694,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_streamparm *p = arg;
- if (!ops->vidioc_s_parm)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
ret = check_fmt(ops, p->type);
if (ret)
break;
@@ -1843,9 +1706,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_tuner *p = arg;
- if (!ops->vidioc_g_tuner)
- break;
-
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
ret = ops->vidioc_g_tuner(file, fh, p);
@@ -1864,12 +1724,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_tuner *p = arg;
- if (!ops->vidioc_s_tuner)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd, "index=%d, name=%s, type=%d, "
@@ -1887,9 +1741,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_frequency *p = arg;
- if (!ops->vidioc_g_frequency)
- break;
-
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
ret = ops->vidioc_g_frequency(file, fh, p);
@@ -1903,12 +1754,6 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_frequency *p = arg;
enum v4l2_tuner_type type;
- if (!ops->vidioc_s_frequency)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
@@ -1923,9 +1768,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_sliced_vbi_cap *p = arg;
- if (!ops->vidioc_g_sliced_vbi_cap)
- break;
-
/* Clear up to type, everything after type is zerod already */
memset(p, 0, offsetof(struct v4l2_sliced_vbi_cap, type));
@@ -1937,8 +1779,6 @@ static long __video_do_ioctl(struct file *file,
}
case VIDIOC_LOG_STATUS:
{
- if (!ops->vidioc_log_status)
- break;
if (vfd->v4l2_dev)
pr_info("%s: ================= START STATUS =================\n",
vfd->v4l2_dev->name);
@@ -1948,38 +1788,34 @@ static long __video_do_ioctl(struct file *file,
vfd->v4l2_dev->name);
break;
}
-#ifdef CONFIG_VIDEO_ADV_DEBUG
case VIDIOC_DBG_G_REGISTER:
{
+#ifdef CONFIG_VIDEO_ADV_DEBUG
struct v4l2_dbg_register *p = arg;
- if (ops->vidioc_g_register) {
- if (!capable(CAP_SYS_ADMIN))
- ret = -EPERM;
- else
- ret = ops->vidioc_g_register(file, fh, p);
- }
+ if (!capable(CAP_SYS_ADMIN))
+ ret = -EPERM;
+ else
+ ret = ops->vidioc_g_register(file, fh, p);
+#endif
break;
}
case VIDIOC_DBG_S_REGISTER:
{
+#ifdef CONFIG_VIDEO_ADV_DEBUG
struct v4l2_dbg_register *p = arg;
- if (ops->vidioc_s_register) {
- if (!capable(CAP_SYS_ADMIN))
- ret = -EPERM;
- else
- ret = ops->vidioc_s_register(file, fh, p);
- }
+ if (!capable(CAP_SYS_ADMIN))
+ ret = -EPERM;
+ else
+ ret = ops->vidioc_s_register(file, fh, p);
+#endif
break;
}
-#endif
case VIDIOC_DBG_G_CHIP_IDENT:
{
struct v4l2_dbg_chip_ident *p = arg;
- if (!ops->vidioc_g_chip_ident)
- break;
p->ident = V4L2_IDENT_NONE;
p->revision = 0;
ret = ops->vidioc_g_chip_ident(file, fh, p);
@@ -1992,12 +1828,6 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_hw_freq_seek *p = arg;
enum v4l2_tuner_type type;
- if (!ops->vidioc_s_hw_freq_seek)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd,
@@ -2013,9 +1843,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_frmsizeenum *p = arg;
- if (!ops->vidioc_enum_framesizes)
- break;
-
ret = ops->vidioc_enum_framesizes(file, fh, p);
dbgarg(cmd,
"index=%d, pixelformat=%c%c%c%c, type=%d ",
@@ -2049,9 +1876,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_frmivalenum *p = arg;
- if (!ops->vidioc_enum_frameintervals)
- break;
-
ret = ops->vidioc_enum_frameintervals(file, fh, p);
dbgarg(cmd,
"index=%d, pixelformat=%d, width=%d, height=%d, type=%d ",
@@ -2084,9 +1908,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_dv_enum_preset *p = arg;
- if (!ops->vidioc_enum_dv_presets)
- break;
-
ret = ops->vidioc_enum_dv_presets(file, fh, p);
if (!ret)
dbgarg(cmd,
@@ -2100,13 +1921,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_dv_preset *p = arg;
- if (!ops->vidioc_s_dv_preset)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
-
dbgarg(cmd, "preset=%d\n", p->preset);
ret = ops->vidioc_s_dv_preset(file, fh, p);
break;
@@ -2115,9 +1929,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_dv_preset *p = arg;
- if (!ops->vidioc_g_dv_preset)
- break;
-
ret = ops->vidioc_g_dv_preset(file, fh, p);
if (!ret)
dbgarg(cmd, "preset=%d\n", p->preset);
@@ -2127,9 +1938,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_dv_preset *p = arg;
- if (!ops->vidioc_query_dv_preset)
- break;
-
ret = ops->vidioc_query_dv_preset(file, fh, p);
if (!ret)
dbgarg(cmd, "preset=%d\n", p->preset);
@@ -2139,32 +1947,13 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_dv_timings *p = arg;
- if (!ops->vidioc_s_dv_timings)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
-
+ dbgtimings(vfd, p);
switch (p->type) {
case V4L2_DV_BT_656_1120:
- dbgarg2("bt-656/1120:interlaced=%d, pixelclock=%lld,"
- " width=%d, height=%d, polarities=%x,"
- " hfrontporch=%d, hsync=%d, hbackporch=%d,"
- " vfrontporch=%d, vsync=%d, vbackporch=%d,"
- " il_vfrontporch=%d, il_vsync=%d,"
- " il_vbackporch=%d\n",
- p->bt.interlaced, p->bt.pixelclock,
- p->bt.width, p->bt.height, p->bt.polarities,
- p->bt.hfrontporch, p->bt.hsync,
- p->bt.hbackporch, p->bt.vfrontporch,
- p->bt.vsync, p->bt.vbackporch,
- p->bt.il_vfrontporch, p->bt.il_vsync,
- p->bt.il_vbackporch);
ret = ops->vidioc_s_dv_timings(file, fh, p);
break;
default:
- dbgarg2("Unknown type %d!\n", p->type);
+ ret = -EINVAL;
break;
}
break;
@@ -2173,43 +1962,68 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_dv_timings *p = arg;
- if (!ops->vidioc_g_dv_timings)
+ ret = ops->vidioc_g_dv_timings(file, fh, p);
+ if (!ret)
+ dbgtimings(vfd, p);
+ break;
+ }
+ case VIDIOC_ENUM_DV_TIMINGS:
+ {
+ struct v4l2_enum_dv_timings *p = arg;
+
+ if (!ops->vidioc_enum_dv_timings)
break;
- ret = ops->vidioc_g_dv_timings(file, fh, p);
+ ret = ops->vidioc_enum_dv_timings(file, fh, p);
if (!ret) {
- switch (p->type) {
- case V4L2_DV_BT_656_1120:
- dbgarg2("bt-656/1120:interlaced=%d,"
- " pixelclock=%lld,"
- " width=%d, height=%d, polarities=%x,"
- " hfrontporch=%d, hsync=%d,"
- " hbackporch=%d, vfrontporch=%d,"
- " vsync=%d, vbackporch=%d,"
- " il_vfrontporch=%d, il_vsync=%d,"
- " il_vbackporch=%d\n",
- p->bt.interlaced, p->bt.pixelclock,
- p->bt.width, p->bt.height,
- p->bt.polarities, p->bt.hfrontporch,
- p->bt.hsync, p->bt.hbackporch,
- p->bt.vfrontporch, p->bt.vsync,
- p->bt.vbackporch, p->bt.il_vfrontporch,
- p->bt.il_vsync, p->bt.il_vbackporch);
- break;
- default:
- dbgarg2("Unknown type %d!\n", p->type);
- break;
- }
+ dbgarg(cmd, "index=%d: ", p->index);
+ dbgtimings(vfd, &p->timings);
}
break;
}
- case VIDIOC_DQEVENT:
+ case VIDIOC_QUERY_DV_TIMINGS:
{
- struct v4l2_event *ev = arg;
+ struct v4l2_dv_timings *p = arg;
+
+ if (!ops->vidioc_query_dv_timings)
+ break;
- if (!ops->vidioc_subscribe_event)
+ ret = ops->vidioc_query_dv_timings(file, fh, p);
+ if (!ret)
+ dbgtimings(vfd, p);
+ break;
+ }
+ case VIDIOC_DV_TIMINGS_CAP:
+ {
+ struct v4l2_dv_timings_cap *p = arg;
+
+ if (!ops->vidioc_dv_timings_cap)
break;
+ ret = ops->vidioc_dv_timings_cap(file, fh, p);
+ if (ret)
+ break;
+ switch (p->type) {
+ case V4L2_DV_BT_656_1120:
+ dbgarg(cmd,
+ "type=%d, width=%u-%u, height=%u-%u, "
+ "pixelclock=%llu-%llu, standards=%x, capabilities=%x ",
+ p->type,
+ p->bt.min_width, p->bt.max_width,
+ p->bt.min_height, p->bt.max_height,
+ p->bt.min_pixelclock, p->bt.max_pixelclock,
+ p->bt.standards, p->bt.capabilities);
+ break;
+ default:
+ dbgarg(cmd, "unknown type ");
+ break;
+ }
+ break;
+ }
+ case VIDIOC_DQEVENT:
+ {
+ struct v4l2_event *ev = arg;
+
ret = v4l2_event_dequeue(fh, ev, file->f_flags & O_NONBLOCK);
if (ret < 0) {
dbgarg(cmd, "no pending events?");
@@ -2226,9 +2040,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_event_subscription *sub = arg;
- if (!ops->vidioc_subscribe_event)
- break;
-
ret = ops->vidioc_subscribe_event(fh, sub);
if (ret < 0) {
dbgarg(cmd, "failed, ret=%ld", ret);
@@ -2241,9 +2052,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_event_subscription *sub = arg;
- if (!ops->vidioc_unsubscribe_event)
- break;
-
ret = ops->vidioc_unsubscribe_event(fh, sub);
if (ret < 0) {
dbgarg(cmd, "failed, ret=%ld", ret);
@@ -2256,12 +2064,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_create_buffers *create = arg;
- if (!ops->vidioc_create_bufs)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
ret = check_fmt(ops, create->format.type);
if (ret)
break;
@@ -2275,8 +2077,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_buffer *b = arg;
- if (!ops->vidioc_prepare_buf)
- break;
ret = check_fmt(ops, b->type);
if (ret)
break;
@@ -2289,7 +2089,9 @@ static long __video_do_ioctl(struct file *file,
default:
if (!ops->vidioc_default)
break;
- ret = ops->vidioc_default(file, fh, ret_prio >= 0, cmd, arg);
+ ret = ops->vidioc_default(file, fh, use_fh_prio ?
+ v4l2_prio_check(vfd->prio, vfh->prio) >= 0 : 0,
+ cmd, arg);
break;
} /* switch */
@@ -2463,7 +2265,9 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
err = -EFAULT;
goto out_array_args;
}
- if (err < 0)
+ /* VIDIOC_QUERY_DV_TIMINGS can return an error, but still have valid
+ results that must be returned. */
+ if (err < 0 && cmd != VIDIOC_QUERY_DV_TIMINGS)
goto out;
out_array_args:
diff --git a/drivers/media/video/v4l2-subdev.c b/drivers/media/video/v4l2-subdev.c
index 6fe88e965a8c..db6e859b93d4 100644
--- a/drivers/media/video/v4l2-subdev.c
+++ b/drivers/media/video/v4l2-subdev.c
@@ -35,14 +35,9 @@
static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
{
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- /* Allocate try format and crop in the same memory block */
- fh->try_fmt = kzalloc((sizeof(*fh->try_fmt) + sizeof(*fh->try_crop))
- * sd->entity.num_pads, GFP_KERNEL);
- if (fh->try_fmt == NULL)
+ fh->pad = kzalloc(sizeof(*fh->pad) * sd->entity.num_pads, GFP_KERNEL);
+ if (fh->pad == NULL)
return -ENOMEM;
-
- fh->try_crop = (struct v4l2_rect *)
- (fh->try_fmt + sd->entity.num_pads);
#endif
return 0;
}
@@ -50,9 +45,8 @@ static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
static void subdev_fh_free(struct v4l2_subdev_fh *fh)
{
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- kfree(fh->try_fmt);
- fh->try_fmt = NULL;
- fh->try_crop = NULL;
+ kfree(fh->pad);
+ fh->pad = NULL;
#endif
}
@@ -234,6 +228,8 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_G_CROP: {
struct v4l2_subdev_crop *crop = arg;
+ struct v4l2_subdev_selection sel;
+ int rval;
if (crop->which != V4L2_SUBDEV_FORMAT_TRY &&
crop->which != V4L2_SUBDEV_FORMAT_ACTIVE)
@@ -242,11 +238,27 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (crop->pad >= sd->entity.num_pads)
return -EINVAL;
- return v4l2_subdev_call(sd, pad, get_crop, subdev_fh, crop);
+ rval = v4l2_subdev_call(sd, pad, get_crop, subdev_fh, crop);
+ if (rval != -ENOIOCTLCMD)
+ return rval;
+
+ memset(&sel, 0, sizeof(sel));
+ sel.which = crop->which;
+ sel.pad = crop->pad;
+ sel.target = V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL;
+
+ rval = v4l2_subdev_call(
+ sd, pad, get_selection, subdev_fh, &sel);
+
+ crop->rect = sel.r;
+
+ return rval;
}
case VIDIOC_SUBDEV_S_CROP: {
struct v4l2_subdev_crop *crop = arg;
+ struct v4l2_subdev_selection sel;
+ int rval;
if (crop->which != V4L2_SUBDEV_FORMAT_TRY &&
crop->which != V4L2_SUBDEV_FORMAT_ACTIVE)
@@ -255,7 +267,22 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (crop->pad >= sd->entity.num_pads)
return -EINVAL;
- return v4l2_subdev_call(sd, pad, set_crop, subdev_fh, crop);
+ rval = v4l2_subdev_call(sd, pad, set_crop, subdev_fh, crop);
+ if (rval != -ENOIOCTLCMD)
+ return rval;
+
+ memset(&sel, 0, sizeof(sel));
+ sel.which = crop->which;
+ sel.pad = crop->pad;
+ sel.target = V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL;
+ sel.r = crop->rect;
+
+ rval = v4l2_subdev_call(
+ sd, pad, set_selection, subdev_fh, &sel);
+
+ crop->rect = sel.r;
+
+ return rval;
}
case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
@@ -293,6 +320,34 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return v4l2_subdev_call(sd, pad, enum_frame_interval, subdev_fh,
fie);
}
+
+ case VIDIOC_SUBDEV_G_SELECTION: {
+ struct v4l2_subdev_selection *sel = arg;
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_TRY &&
+ sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (sel->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ return v4l2_subdev_call(
+ sd, pad, get_selection, subdev_fh, sel);
+ }
+
+ case VIDIOC_SUBDEV_S_SELECTION: {
+ struct v4l2_subdev_selection *sel = arg;
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_TRY &&
+ sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (sel->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ return v4l2_subdev_call(
+ sd, pad, set_selection, subdev_fh, sel);
+ }
#endif
default:
return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
@@ -332,6 +387,70 @@ const struct v4l2_file_operations v4l2_subdev_fops = {
.poll = subdev_poll,
};
+#ifdef CONFIG_MEDIA_CONTROLLER
+int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ if (source_fmt->format.width != sink_fmt->format.width
+ || source_fmt->format.height != sink_fmt->format.height
+ || source_fmt->format.code != sink_fmt->format.code)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
+
+static int
+v4l2_subdev_link_validate_get_format(struct media_pad *pad,
+ struct v4l2_subdev_format *fmt)
+{
+ switch (media_entity_type(pad->entity)) {
+ case MEDIA_ENT_T_V4L2_SUBDEV:
+ fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt->pad = pad->index;
+ return v4l2_subdev_call(media_entity_to_v4l2_subdev(
+ pad->entity),
+ pad, get_fmt, NULL, fmt);
+ default:
+ WARN(1, "Driver bug! Wrong media entity type %d, entity %s\n",
+ media_entity_type(pad->entity), pad->entity->name);
+ /* Fall through */
+ case MEDIA_ENT_T_DEVNODE_V4L:
+ return -EINVAL;
+ }
+}
+
+int v4l2_subdev_link_validate(struct media_link *link)
+{
+ struct v4l2_subdev *sink;
+ struct v4l2_subdev_format sink_fmt, source_fmt;
+ int rval;
+
+ rval = v4l2_subdev_link_validate_get_format(
+ link->source, &source_fmt);
+ if (rval < 0)
+ return 0;
+
+ rval = v4l2_subdev_link_validate_get_format(
+ link->sink, &sink_fmt);
+ if (rval < 0)
+ return 0;
+
+ sink = media_entity_to_v4l2_subdev(link->sink->entity);
+
+ rval = v4l2_subdev_call(sink, pad, link_validate, link,
+ &source_fmt, &sink_fmt);
+ if (rval != -ENOIOCTLCMD)
+ return rval;
+
+ return v4l2_subdev_link_validate_default(
+ sink, link, &source_fmt, &sink_fmt);
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
+#endif /* CONFIG_MEDIA_CONTROLLER */
+
void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
{
INIT_LIST_HEAD(&sd->list);
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
index 20f7237b8242..308e150a39bc 100644
--- a/drivers/media/video/via-camera.c
+++ b/drivers/media/video/via-camera.c
@@ -18,6 +18,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-chip-ident.h>
+#include <media/ov7670.h>
#include <media/videobuf-dma-sg.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -1347,11 +1348,21 @@ static __devinit bool viacam_serial_is_enabled(void)
return false;
}
+static struct ov7670_config sensor_cfg = {
+ /* The XO-1.5 (only known user) clocks the camera at 90MHz. */
+ .clock_speed = 90,
+};
+
static __devinit int viacam_probe(struct platform_device *pdev)
{
int ret;
struct i2c_adapter *sensor_adapter;
struct viafb_dev *viadev = pdev->dev.platform_data;
+ struct i2c_board_info ov7670_info = {
+ .type = "ov7670",
+ .addr = 0x42 >> 1,
+ .platform_data = &sensor_cfg,
+ };
/*
* Note that there are actually two capture channels on
@@ -1433,8 +1444,8 @@ static __devinit int viacam_probe(struct platform_device *pdev)
* is OLPC-specific. 0x42 assumption is ov7670-specific.
*/
sensor_adapter = viafb_find_i2c_adapter(VIA_PORT_31);
- cam->sensor = v4l2_i2c_new_subdev(&cam->v4l2_dev, sensor_adapter,
- "ov7670", 0x42 >> 1, NULL);
+ cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev, sensor_adapter,
+ &ov7670_info, NULL);
if (cam->sensor == NULL) {
dev_err(&pdev->dev, "Unable to find the sensor!\n");
ret = -ENODEV;
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index de4fa4eb8844..ffdf59cfe405 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -1129,6 +1129,7 @@ unsigned int videobuf_poll_stream(struct file *file,
struct videobuf_queue *q,
poll_table *wait)
{
+ unsigned long req_events = poll_requested_events(wait);
struct videobuf_buffer *buf = NULL;
unsigned int rc = 0;
@@ -1137,7 +1138,7 @@ unsigned int videobuf_poll_stream(struct file *file,
if (!list_empty(&q->stream))
buf = list_entry(q->stream.next,
struct videobuf_buffer, stream);
- } else {
+ } else if (req_events & (POLLIN | POLLRDNORM)) {
if (!q->reading)
__videobuf_read_start(q);
if (!q->reading) {
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index c9691115f2d2..b6b5cc1a43cb 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -27,6 +27,7 @@ struct videobuf_dma_contig_memory {
u32 magic;
void *vaddr;
dma_addr_t dma_handle;
+ bool cached;
unsigned long size;
};
@@ -37,8 +38,58 @@ struct videobuf_dma_contig_memory {
BUG(); \
}
-static void
-videobuf_vm_open(struct vm_area_struct *vma)
+static int __videobuf_dc_alloc(struct device *dev,
+ struct videobuf_dma_contig_memory *mem,
+ unsigned long size, unsigned long flags)
+{
+ mem->size = size;
+ if (mem->cached) {
+ mem->vaddr = alloc_pages_exact(mem->size, flags | GFP_DMA);
+ if (mem->vaddr) {
+ int err;
+
+ mem->dma_handle = dma_map_single(dev, mem->vaddr,
+ mem->size,
+ DMA_FROM_DEVICE);
+ err = dma_mapping_error(dev, mem->dma_handle);
+ if (err) {
+ dev_err(dev, "dma_map_single failed\n");
+
+ free_pages_exact(mem->vaddr, mem->size);
+ mem->vaddr = 0;
+ return err;
+ }
+ }
+ } else
+ mem->vaddr = dma_alloc_coherent(dev, mem->size,
+ &mem->dma_handle, flags);
+
+ if (!mem->vaddr) {
+ dev_err(dev, "memory alloc size %ld failed\n", mem->size);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
+
+ return 0;
+}
+
+static void __videobuf_dc_free(struct device *dev,
+ struct videobuf_dma_contig_memory *mem)
+{
+ if (mem->cached) {
+ if (!mem->vaddr)
+ return;
+ dma_unmap_single(dev, mem->dma_handle, mem->size,
+ DMA_FROM_DEVICE);
+ free_pages_exact(mem->vaddr, mem->size);
+ } else
+ dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
+
+ mem->vaddr = NULL;
+}
+
+static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
@@ -91,12 +142,11 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dev_dbg(q->dev, "buf[%d] freeing %p\n",
i, mem->vaddr);
- dma_free_coherent(q->dev, mem->size,
- mem->vaddr, mem->dma_handle);
+ __videobuf_dc_free(q->dev, mem);
mem->vaddr = NULL;
}
- q->bufs[i]->map = NULL;
+ q->bufs[i]->map = NULL;
q->bufs[i]->baddr = 0;
}
@@ -107,8 +157,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
}
static const struct vm_operations_struct videobuf_vm_ops = {
- .open = videobuf_vm_open,
- .close = videobuf_vm_close,
+ .open = videobuf_vm_open,
+ .close = videobuf_vm_close,
};
/**
@@ -178,26 +228,38 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
pages_done++;
}
- out_up:
+out_up:
up_read(&current->mm->mmap_sem);
return ret;
}
-static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
+static struct videobuf_buffer *__videobuf_alloc_vb(size_t size, bool cached)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_buffer *vb;
vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
if (vb) {
- mem = vb->priv = ((char *)vb) + size;
+ vb->priv = ((char *)vb) + size;
+ mem = vb->priv;
mem->magic = MAGIC_DC_MEM;
+ mem->cached = cached;
}
return vb;
}
+static struct videobuf_buffer *__videobuf_alloc_uncached(size_t size)
+{
+ return __videobuf_alloc_vb(size, false);
+}
+
+static struct videobuf_buffer *__videobuf_alloc_cached(size_t size)
+{
+ return __videobuf_alloc_vb(size, true);
+}
+
static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
@@ -235,28 +297,32 @@ static int __videobuf_iolock(struct videobuf_queue *q,
return videobuf_dma_contig_user_get(mem, vb);
/* allocate memory for the read() method */
- mem->size = PAGE_ALIGN(vb->size);
- mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
- &mem->dma_handle, GFP_KERNEL);
- if (!mem->vaddr) {
- dev_err(q->dev, "dma_alloc_coherent %ld failed\n",
- mem->size);
+ if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
+ GFP_KERNEL))
return -ENOMEM;
- }
-
- dev_dbg(q->dev, "dma_alloc_coherent data is at %p (%ld)\n",
- mem->vaddr, mem->size);
break;
case V4L2_MEMORY_OVERLAY:
default:
- dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n",
- __func__);
+ dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
return -EINVAL;
}
return 0;
}
+static int __videobuf_sync(struct videobuf_queue *q,
+ struct videobuf_buffer *buf)
+{
+ struct videobuf_dma_contig_memory *mem = buf->priv;
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+
+ dma_sync_single_for_cpu(q->dev, mem->dma_handle, mem->size,
+ DMA_FROM_DEVICE);
+
+ return 0;
+}
+
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
struct videobuf_buffer *buf,
struct vm_area_struct *vma)
@@ -265,6 +331,8 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
struct videobuf_mapping *map;
int retval;
unsigned long size;
+ unsigned long pos, start = vma->vm_start;
+ struct page *page;
dev_dbg(q->dev, "%s\n", __func__);
@@ -282,41 +350,50 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
- mem->size = PAGE_ALIGN(buf->bsize);
- mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
- &mem->dma_handle, GFP_KERNEL);
- if (!mem->vaddr) {
- dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
- mem->size);
+ if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
+ GFP_KERNEL | __GFP_COMP))
goto error;
- }
- dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
- mem->vaddr, mem->size);
/* Try to remap memory */
size = vma->vm_end - vma->vm_start;
size = (size < mem->size) ? size : mem->size;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- retval = remap_pfn_range(vma, vma->vm_start,
- mem->dma_handle >> PAGE_SHIFT,
- size, vma->vm_page_prot);
- if (retval) {
- dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
- dma_free_coherent(q->dev, mem->size,
- mem->vaddr, mem->dma_handle);
- goto error;
+ if (!mem->cached)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ pos = (unsigned long)mem->vaddr;
+
+ while (size > 0) {
+ page = virt_to_page((void *)pos);
+ if (NULL == page) {
+ dev_err(q->dev, "mmap: virt_to_page failed\n");
+ __videobuf_dc_free(q->dev, mem);
+ goto error;
+ }
+ retval = vm_insert_page(vma, start, page);
+ if (retval) {
+ dev_err(q->dev, "mmap: insert failed with error %d\n",
+ retval);
+ __videobuf_dc_free(q->dev, mem);
+ goto error;
+ }
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
+
+ if (size > PAGE_SIZE)
+ size -= PAGE_SIZE;
+ else
+ size = 0;
}
- vma->vm_ops = &videobuf_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND;
+ vma->vm_ops = &videobuf_vm_ops;
+ vma->vm_flags |= VM_DONTEXPAND;
vma->vm_private_data = map;
dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
map, q, vma->vm_start, vma->vm_end,
- (long int)buf->bsize,
- vma->vm_pgoff, buf->i);
+ (long int)buf->bsize, vma->vm_pgoff, buf->i);
videobuf_vm_open(vma);
@@ -328,12 +405,20 @@ error:
}
static struct videobuf_qtype_ops qops = {
- .magic = MAGIC_QTYPE_OPS,
+ .magic = MAGIC_QTYPE_OPS,
+ .alloc_vb = __videobuf_alloc_uncached,
+ .iolock = __videobuf_iolock,
+ .mmap_mapper = __videobuf_mmap_mapper,
+ .vaddr = __videobuf_to_vaddr,
+};
- .alloc_vb = __videobuf_alloc_vb,
- .iolock = __videobuf_iolock,
- .mmap_mapper = __videobuf_mmap_mapper,
- .vaddr = __videobuf_to_vaddr,
+static struct videobuf_qtype_ops qops_cached = {
+ .magic = MAGIC_QTYPE_OPS,
+ .alloc_vb = __videobuf_alloc_cached,
+ .iolock = __videobuf_iolock,
+ .sync = __videobuf_sync,
+ .mmap_mapper = __videobuf_mmap_mapper,
+ .vaddr = __videobuf_to_vaddr,
};
void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
@@ -351,6 +436,20 @@ void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
}
EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
+void videobuf_queue_dma_contig_init_cached(struct videobuf_queue *q,
+ const struct videobuf_queue_ops *ops,
+ struct device *dev,
+ spinlock_t *irqlock,
+ enum v4l2_buf_type type,
+ enum v4l2_field field,
+ unsigned int msize,
+ void *priv, struct mutex *ext_lock)
+{
+ videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
+ priv, &qops_cached, ext_lock);
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init_cached);
+
dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
@@ -389,7 +488,7 @@ void videobuf_dma_contig_free(struct videobuf_queue *q,
/* read() method */
if (mem->vaddr) {
- dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
+ __videobuf_dc_free(q->dev, mem);
mem->vaddr = NULL;
}
}
diff --git a/drivers/media/video/videobuf-dvb.c b/drivers/media/video/videobuf-dvb.c
index 59cb54aa2946..94d83a41381b 100644
--- a/drivers/media/video/videobuf-dvb.c
+++ b/drivers/media/video/videobuf-dvb.c
@@ -45,7 +45,6 @@ static int videobuf_dvb_thread(void *data)
struct videobuf_dvb *dvb = data;
struct videobuf_buffer *buf;
unsigned long flags;
- int err;
void *outp;
dprintk("dvb thread started\n");
@@ -57,7 +56,7 @@ static int videobuf_dvb_thread(void *data)
buf = list_entry(dvb->dvbq.stream.next,
struct videobuf_buffer, stream);
list_del(&buf->stream);
- err = videobuf_waiton(&dvb->dvbq, buf, 0, 1);
+ videobuf_waiton(&dvb->dvbq, buf, 0, 1);
/* no more feeds left or stop_feed() asked us to quit */
if (0 == dvb->nfeeds)
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
index 2e8f1df775b6..9d4e9edbd2e7 100644
--- a/drivers/media/video/videobuf2-core.c
+++ b/drivers/media/video/videobuf2-core.c
@@ -19,6 +19,9 @@
#include <linux/slab.h>
#include <linux/sched.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include <media/videobuf2-core.h>
static int debug;
@@ -1642,32 +1645,46 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q);
* For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
* will be reported as available for writing.
*
+ * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
+ * pending events.
+ *
* The return values from this function are intended to be directly returned
* from poll handler in driver.
*/
unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
{
- unsigned long flags;
- unsigned int ret;
+ struct video_device *vfd = video_devdata(file);
+ unsigned long req_events = poll_requested_events(wait);
struct vb2_buffer *vb = NULL;
+ unsigned int res = 0;
+ unsigned long flags;
+
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
+ struct v4l2_fh *fh = file->private_data;
+
+ if (v4l2_event_pending(fh))
+ res = POLLPRI;
+ else if (req_events & POLLPRI)
+ poll_wait(file, &fh->wait, wait);
+ }
/*
* Start file I/O emulator only if streaming API has not been used yet.
*/
if (q->num_buffers == 0 && q->fileio == NULL) {
- if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ)) {
- ret = __vb2_init_fileio(q, 1);
- if (ret)
- return POLLERR;
+ if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
+ (req_events & (POLLIN | POLLRDNORM))) {
+ if (__vb2_init_fileio(q, 1))
+ return res | POLLERR;
}
- if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE)) {
- ret = __vb2_init_fileio(q, 0);
- if (ret)
- return POLLERR;
+ if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
+ (req_events & (POLLOUT | POLLWRNORM))) {
+ if (__vb2_init_fileio(q, 0))
+ return res | POLLERR;
/*
* Write to OUTPUT queue can be done immediately.
*/
- return POLLOUT | POLLWRNORM;
+ return res | POLLOUT | POLLWRNORM;
}
}
@@ -1675,7 +1692,7 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
* There is nothing to wait for if no buffers have already been queued.
*/
if (list_empty(&q->queued_list))
- return POLLERR;
+ return res | POLLERR;
poll_wait(file, &q->done_wq, wait);
@@ -1690,10 +1707,11 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
if (vb && (vb->state == VB2_BUF_STATE_DONE
|| vb->state == VB2_BUF_STATE_ERROR)) {
- return (V4L2_TYPE_IS_OUTPUT(q->type)) ? POLLOUT | POLLWRNORM :
- POLLIN | POLLRDNORM;
+ return (V4L2_TYPE_IS_OUTPUT(q->type)) ?
+ res | POLLOUT | POLLWRNORM :
+ res | POLLIN | POLLRDNORM;
}
- return 0;
+ return res;
}
EXPORT_SYMBOL_GPL(vb2_poll);
@@ -1839,7 +1857,6 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
* (multiplane buffers are not supported).
*/
if (q->bufs[0]->num_planes != 1) {
- fileio->req.count = 0;
ret = -EBUSY;
goto err_reqbufs;
}
@@ -1886,6 +1903,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
return ret;
err_reqbufs:
+ fileio->req.count = 0;
vb2_reqbufs(q, &fileio->req);
err_kfree:
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 5e8b0710105b..0960d7f0d394 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -95,6 +95,16 @@ static struct vivi_fmt formats[] = {
.depth = 16,
},
{
+ .name = "4:2:2, packed, YVYU",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .depth = 16,
+ },
+ {
+ .name = "4:2:2, packed, VYUY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .depth = 16,
+ },
+ {
.name = "RGB565 (LE)",
.fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
.depth = 16,
@@ -114,6 +124,26 @@ static struct vivi_fmt formats[] = {
.fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
.depth = 16,
},
+ {
+ .name = "RGB24 (LE)",
+ .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
+ .depth = 24,
+ },
+ {
+ .name = "RGB24 (BE)",
+ .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
+ .depth = 24,
+ },
+ {
+ .name = "RGB32 (LE)",
+ .fourcc = V4L2_PIX_FMT_RGB32, /* argb */
+ .depth = 32,
+ },
+ {
+ .name = "RGB32 (BE)",
+ .fourcc = V4L2_PIX_FMT_BGR32, /* bgra */
+ .depth = 32,
+ },
};
static struct vivi_fmt *get_format(struct v4l2_format *f)
@@ -170,6 +200,7 @@ struct vivi_dev {
struct v4l2_ctrl *gain;
};
struct v4l2_ctrl *volume;
+ struct v4l2_ctrl *alpha;
struct v4l2_ctrl *button;
struct v4l2_ctrl *boolean;
struct v4l2_ctrl *int32;
@@ -177,6 +208,7 @@ struct vivi_dev {
struct v4l2_ctrl *menu;
struct v4l2_ctrl *string;
struct v4l2_ctrl *bitmask;
+ struct v4l2_ctrl *int_menu;
spinlock_t slock;
struct mutex mutex;
@@ -203,8 +235,10 @@ struct vivi_dev {
enum v4l2_field field;
unsigned int field_count;
- u8 bars[9][3];
- u8 line[MAX_WIDTH * 4];
+ u8 bars[9][3];
+ u8 line[MAX_WIDTH * 8];
+ unsigned int pixelsize;
+ u8 alpha_component;
};
/* ------------------------------------------------------------------
@@ -283,6 +317,8 @@ static void precalculate_bars(struct vivi_dev *dev)
switch (dev->fmt->fourcc) {
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
is_yuv = 1;
break;
case V4L2_PIX_FMT_RGB565:
@@ -297,6 +333,11 @@ static void precalculate_bars(struct vivi_dev *dev)
g >>= 3;
b >>= 3;
break;
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ break;
}
if (is_yuv) {
@@ -316,9 +357,11 @@ static void precalculate_bars(struct vivi_dev *dev)
#define TSTAMP_INPUT_X 10
#define TSTAMP_MIN_X (54 + TSTAMP_INPUT_X)
-static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos)
+/* 'odd' is true for pixels 1, 3, 5, etc. and false for pixels 0, 2, 4, etc. */
+static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos, bool odd)
{
u8 r_y, g_u, b_v;
+ u8 alpha = dev->alpha_component;
int color;
u8 *p;
@@ -326,46 +369,56 @@ static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos)
g_u = dev->bars[colorpos][1]; /* G or precalculated U */
b_v = dev->bars[colorpos][2]; /* B or precalculated V */
- for (color = 0; color < 4; color++) {
+ for (color = 0; color < dev->pixelsize; color++) {
p = buf + color;
switch (dev->fmt->fourcc) {
case V4L2_PIX_FMT_YUYV:
switch (color) {
case 0:
- case 2:
*p = r_y;
break;
case 1:
- *p = g_u;
- break;
- case 3:
- *p = b_v;
+ *p = odd ? b_v : g_u;
break;
}
break;
case V4L2_PIX_FMT_UYVY:
switch (color) {
+ case 0:
+ *p = odd ? b_v : g_u;
+ break;
case 1:
- case 3:
*p = r_y;
break;
+ }
+ break;
+ case V4L2_PIX_FMT_YVYU:
+ switch (color) {
case 0:
- *p = g_u;
+ *p = r_y;
break;
- case 2:
- *p = b_v;
+ case 1:
+ *p = odd ? g_u : b_v;
+ break;
+ }
+ break;
+ case V4L2_PIX_FMT_VYUY:
+ switch (color) {
+ case 0:
+ *p = odd ? g_u : b_v;
+ break;
+ case 1:
+ *p = r_y;
break;
}
break;
case V4L2_PIX_FMT_RGB565:
switch (color) {
case 0:
- case 2:
*p = (g_u << 5) | b_v;
break;
case 1:
- case 3:
*p = (r_y << 3) | (g_u >> 3);
break;
}
@@ -373,11 +426,9 @@ static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos)
case V4L2_PIX_FMT_RGB565X:
switch (color) {
case 0:
- case 2:
*p = (r_y << 3) | (g_u >> 3);
break;
case 1:
- case 3:
*p = (g_u << 5) | b_v;
break;
}
@@ -385,24 +436,78 @@ static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos)
case V4L2_PIX_FMT_RGB555:
switch (color) {
case 0:
- case 2:
*p = (g_u << 5) | b_v;
break;
case 1:
- case 3:
- *p = (r_y << 2) | (g_u >> 3);
+ *p = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
break;
}
break;
case V4L2_PIX_FMT_RGB555X:
switch (color) {
case 0:
+ *p = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
+ break;
+ case 1:
+ *p = (g_u << 5) | b_v;
+ break;
+ }
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ switch (color) {
+ case 0:
+ *p = r_y;
+ break;
+ case 1:
+ *p = g_u;
+ break;
case 2:
- *p = (r_y << 2) | (g_u >> 3);
+ *p = b_v;
+ break;
+ }
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ switch (color) {
+ case 0:
+ *p = b_v;
break;
case 1:
+ *p = g_u;
+ break;
+ case 2:
+ *p = r_y;
+ break;
+ }
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ switch (color) {
+ case 0:
+ *p = alpha;
+ break;
+ case 1:
+ *p = r_y;
+ break;
+ case 2:
+ *p = g_u;
+ break;
case 3:
- *p = (g_u << 5) | b_v;
+ *p = b_v;
+ break;
+ }
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ switch (color) {
+ case 0:
+ *p = b_v;
+ break;
+ case 1:
+ *p = g_u;
+ break;
+ case 2:
+ *p = r_y;
+ break;
+ case 3:
+ *p = alpha;
break;
}
break;
@@ -414,10 +519,10 @@ static void precalculate_line(struct vivi_dev *dev)
{
int w;
- for (w = 0; w < dev->width * 2; w += 2) {
- int colorpos = (w / (dev->width / 8) % 8);
+ for (w = 0; w < dev->width * 2; w++) {
+ int colorpos = w / (dev->width / 8) % 8;
- gen_twopix(dev, dev->line + w * 2, colorpos);
+ gen_twopix(dev, dev->line + w * dev->pixelsize, colorpos, w & 1);
}
}
@@ -433,7 +538,7 @@ static void gen_text(struct vivi_dev *dev, char *basep,
/* Print stream time */
for (line = y; line < y + 16; line++) {
int j = 0;
- char *pos = basep + line * dev->width * 2 + x * 2;
+ char *pos = basep + line * dev->width * dev->pixelsize + x * dev->pixelsize;
char *s;
for (s = text; *s; s++) {
@@ -443,9 +548,9 @@ static void gen_text(struct vivi_dev *dev, char *basep,
for (i = 0; i < 7; i++, j++) {
/* Draw white font on black background */
if (chr & (1 << (7 - i)))
- gen_twopix(dev, pos + j * 2, WHITE);
+ gen_twopix(dev, pos + j * dev->pixelsize, WHITE, (x+y) & 1);
else
- gen_twopix(dev, pos + j * 2, TEXT_BLACK);
+ gen_twopix(dev, pos + j * dev->pixelsize, TEXT_BLACK, (x+y) & 1);
}
}
}
@@ -466,7 +571,9 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
return;
for (h = 0; h < hmax; h++)
- memcpy(vbuf + h * wmax * 2, dev->line + (dev->mv_count % wmax) * 2, wmax * 2);
+ memcpy(vbuf + h * wmax * dev->pixelsize,
+ dev->line + (dev->mv_count % wmax) * dev->pixelsize,
+ wmax * dev->pixelsize);
/* Updates stream time */
@@ -484,15 +591,16 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
gen_text(dev, vbuf, line++ * 16, 16, str);
gain = v4l2_ctrl_g_ctrl(dev->gain);
- mutex_lock(&dev->ctrl_handler.lock);
+ mutex_lock(dev->ctrl_handler.lock);
snprintf(str, sizeof(str), " brightness %3d, contrast %3d, saturation %3d, hue %d ",
dev->brightness->cur.val,
dev->contrast->cur.val,
dev->saturation->cur.val,
dev->hue->cur.val);
gen_text(dev, vbuf, line++ * 16, 16, str);
- snprintf(str, sizeof(str), " autogain %d, gain %3d, volume %3d ",
- dev->autogain->cur.val, gain, dev->volume->cur.val);
+ snprintf(str, sizeof(str), " autogain %d, gain %3d, volume %3d, alpha 0x%02x ",
+ dev->autogain->cur.val, gain, dev->volume->cur.val,
+ dev->alpha->cur.val);
gen_text(dev, vbuf, line++ * 16, 16, str);
snprintf(str, sizeof(str), " int32 %d, int64 %lld, bitmask %08x ",
dev->int32->cur.val,
@@ -503,8 +611,12 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
dev->boolean->cur.val,
dev->menu->qmenu[dev->menu->cur.val],
dev->string->cur.string);
- mutex_unlock(&dev->ctrl_handler.lock);
gen_text(dev, vbuf, line++ * 16, 16, str);
+ snprintf(str, sizeof(str), " integer_menu %lld, value %d ",
+ dev->int_menu->qmenu_int[dev->int_menu->cur.val],
+ dev->int_menu->cur.val);
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+ mutex_unlock(dev->ctrl_handler.lock);
if (dev->button_pressed) {
dev->button_pressed--;
snprintf(str, sizeof(str), " button pressed!");
@@ -657,7 +769,7 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
struct vivi_dev *dev = vb2_get_drv_priv(vq);
unsigned long size;
- size = dev->width * dev->height * 2;
+ size = dev->width * dev->height * dev->pixelsize;
if (0 == *nbuffers)
*nbuffers = 32;
@@ -721,7 +833,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
dev->height < 32 || dev->height > MAX_HEIGHT)
return -EINVAL;
- size = dev->width * dev->height * 2;
+ size = dev->width * dev->height * dev->pixelsize;
if (vb2_plane_size(vb, 0) < size) {
dprintk(dev, 1, "%s data will not fit into plane (%lu < %lu)\n",
__func__, vb2_plane_size(vb, 0), size);
@@ -915,6 +1027,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
dev->fmt = get_format(f);
+ dev->pixelsize = dev->fmt->depth / 8;
dev->width = f->fmt.pix.width;
dev->height = f->fmt.pix.height;
dev->field = f->fmt.pix.field;
@@ -1016,8 +1129,15 @@ static int vivi_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct vivi_dev *dev = container_of(ctrl->handler, struct vivi_dev, ctrl_handler);
- if (ctrl == dev->button)
- dev->button_pressed = 30;
+ switch (ctrl->id) {
+ case V4L2_CID_ALPHA_COMPONENT:
+ dev->alpha_component = ctrl->val;
+ break;
+ default:
+ if (ctrl == dev->button)
+ dev->button_pressed = 30;
+ break;
+ }
return 0;
}
@@ -1039,17 +1159,10 @@ static unsigned int
vivi_poll(struct file *file, struct poll_table_struct *wait)
{
struct vivi_dev *dev = video_drvdata(file);
- struct v4l2_fh *fh = file->private_data;
struct vb2_queue *q = &dev->vb_vidq;
- unsigned int res;
dprintk(dev, 1, "%s\n", __func__);
- res = vb2_poll(q, file, wait);
- if (v4l2_event_pending(fh))
- res |= POLLPRI;
- else
- poll_wait(file, &fh->wait, wait);
- return res;
+ return vb2_poll(q, file, wait);
}
static int vivi_close(struct file *file)
@@ -1165,6 +1278,22 @@ static const struct v4l2_ctrl_config vivi_ctrl_bitmask = {
.step = 0,
};
+static const s64 vivi_ctrl_int_menu_values[] = {
+ 1, 1, 2, 3, 5, 8, 13, 21, 42,
+};
+
+static const struct v4l2_ctrl_config vivi_ctrl_int_menu = {
+ .ops = &vivi_ctrl_ops,
+ .id = VIVI_CID_CUSTOM_BASE + 7,
+ .name = "Integer menu",
+ .type = V4L2_CTRL_TYPE_INTEGER_MENU,
+ .min = 1,
+ .max = 8,
+ .def = 4,
+ .menu_skip_mask = 0x02,
+ .qmenu_int = vivi_ctrl_int_menu_values,
+};
+
static const struct v4l2_file_operations vivi_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
@@ -1252,6 +1381,7 @@ static int __init vivi_create_instance(int inst)
dev->fmt = &formats[0];
dev->width = 640;
dev->height = 480;
+ dev->pixelsize = dev->fmt->depth / 8;
hdl = &dev->ctrl_handler;
v4l2_ctrl_handler_init(hdl, 11);
dev->volume = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
@@ -1268,6 +1398,8 @@ static int __init vivi_create_instance(int inst)
V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
dev->gain = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
V4L2_CID_GAIN, 0, 255, 1, 100);
+ dev->alpha = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 0);
dev->button = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_button, NULL);
dev->int32 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int32, NULL);
dev->int64 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int64, NULL);
@@ -1275,6 +1407,7 @@ static int __init vivi_create_instance(int inst)
dev->menu = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_menu, NULL);
dev->string = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_string, NULL);
dev->bitmask = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_bitmask, NULL);
+ dev->int_menu = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int_menu, NULL);
if (hdl->error) {
ret = hdl->error;
goto unreg_dev;
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index 7fd7ac567e1a..db2a6003a1c3 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -62,6 +62,9 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
#include <linux/parport.h>
/*#define DEBUG*/ /* Undef me for production */
@@ -104,6 +107,7 @@
struct w9966 {
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler hdl;
unsigned char dev_state;
unsigned char i2c_state;
unsigned short ppmode;
@@ -567,7 +571,8 @@ static int cam_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, cam->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, W9966_DRIVERNAME, sizeof(vcap->card));
strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
- vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -595,67 +600,25 @@ static int cam_s_input(struct file *file, void *fh, unsigned int inp)
return (inp > 0) ? -EINVAL : 0;
}
-static int cam_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
+static int cam_s_ctrl(struct v4l2_ctrl *ctrl)
{
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
- case V4L2_CID_CONTRAST:
- return v4l2_ctrl_query_fill(qc, -64, 64, 1, 64);
- case V4L2_CID_SATURATION:
- return v4l2_ctrl_query_fill(qc, -64, 64, 1, 64);
- case V4L2_CID_HUE:
- return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
- }
- return -EINVAL;
-}
-
-static int cam_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct w9966 *cam = video_drvdata(file);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = cam->brightness;
- break;
- case V4L2_CID_CONTRAST:
- ctrl->value = cam->contrast;
- break;
- case V4L2_CID_SATURATION:
- ctrl->value = cam->color;
- break;
- case V4L2_CID_HUE:
- ctrl->value = cam->hue;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int cam_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct w9966 *cam = video_drvdata(file);
+ struct w9966 *cam =
+ container_of(ctrl->handler, struct w9966, hdl);
int ret = 0;
mutex_lock(&cam->lock);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- cam->brightness = ctrl->value;
+ cam->brightness = ctrl->val;
break;
case V4L2_CID_CONTRAST:
- cam->contrast = ctrl->value;
+ cam->contrast = ctrl->val;
break;
case V4L2_CID_SATURATION:
- cam->color = ctrl->value;
+ cam->color = ctrl->val;
break;
case V4L2_CID_HUE:
- cam->hue = ctrl->value;
+ cam->hue = ctrl->val;
break;
default:
ret = -EINVAL;
@@ -813,6 +776,9 @@ out:
static const struct v4l2_file_operations w9966_fops = {
.owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = v4l2_ctrl_poll,
.unlocked_ioctl = video_ioctl2,
.read = w9966_v4l_read,
};
@@ -822,13 +788,17 @@ static const struct v4l2_ioctl_ops w9966_ioctl_ops = {
.vidioc_g_input = cam_g_input,
.vidioc_s_input = cam_s_input,
.vidioc_enum_input = cam_enum_input,
- .vidioc_queryctrl = cam_queryctrl,
- .vidioc_g_ctrl = cam_g_ctrl,
- .vidioc_s_ctrl = cam_s_ctrl,
.vidioc_enum_fmt_vid_cap = cam_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = cam_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = cam_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = cam_try_fmt_vid_cap,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_ctrl_ops cam_ctrl_ops = {
+ .s_ctrl = cam_s_ctrl,
};
@@ -849,6 +819,20 @@ static int w9966_init(struct w9966 *cam, struct parport *port)
v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
return -1;
}
+
+ v4l2_ctrl_handler_init(&cam->hdl, 4);
+ v4l2_ctrl_new_std(&cam->hdl, &cam_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(&cam->hdl, &cam_ctrl_ops,
+ V4L2_CID_CONTRAST, -64, 64, 1, 64);
+ v4l2_ctrl_new_std(&cam->hdl, &cam_ctrl_ops,
+ V4L2_CID_SATURATION, -64, 64, 1, 64);
+ v4l2_ctrl_new_std(&cam->hdl, &cam_ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+ if (cam->hdl.error) {
+ v4l2_err(v4l2_dev, "couldn't register controls\n");
+ return -1;
+ }
cam->pport = port;
cam->brightness = 128;
cam->contrast = 64;
@@ -898,6 +882,8 @@ static int w9966_init(struct w9966 *cam, struct parport *port)
cam->vdev.fops = &w9966_fops;
cam->vdev.ioctl_ops = &w9966_ioctl_ops;
cam->vdev.release = video_device_release_empty;
+ cam->vdev.ctrl_handler = &cam->hdl;
+ set_bit(V4L2_FL_USE_FH_PRIO, &cam->vdev.flags);
video_set_drvdata(&cam->vdev, cam);
mutex_init(&cam->lock);
@@ -923,6 +909,8 @@ static void w9966_term(struct w9966 *cam)
w9966_set_state(cam, W9966_STATE_VDEV, 0);
}
+ v4l2_ctrl_handler_free(&cam->hdl);
+
/* Terminate from IEEE1284 mode and release pdev block */
if (w9966_get_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) {
w9966_pdev_claim(cam);
diff --git a/drivers/media/video/zoran/zoran_device.c b/drivers/media/video/zoran/zoran_device.c
index e86173bd1327..a4cd504b8eee 100644
--- a/drivers/media/video/zoran/zoran_device.c
+++ b/drivers/media/video/zoran/zoran_device.c
@@ -542,11 +542,9 @@ void write_overlay_mask(struct zoran_fh *fh, struct v4l2_clip *vp, int count)
u32 *mask;
int x, y, width, height;
unsigned i, j, k;
- u32 reg;
/* fill mask with one bits */
memset(fh->overlay_mask, ~0, mask_line_size * 4 * BUZ_MAX_HEIGHT);
- reg = 0;
for (i = 0; i < count; ++i) {
/* pick up local copy of clip */
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index 4c09ab781ec3..c57310931810 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -1131,8 +1131,14 @@ static int setup_fbuffer(struct zoran_fh *fh,
}
-static int setup_window(struct zoran_fh *fh, int x, int y, int width, int height,
- struct v4l2_clip __user *clips, int clipcount, void __user *bitmap)
+static int setup_window(struct zoran_fh *fh,
+ int x,
+ int y,
+ int width,
+ int height,
+ struct v4l2_clip __user *clips,
+ unsigned int clipcount,
+ void __user *bitmap)
{
struct zoran *zr = fh->zr;
struct v4l2_clip *vcp = NULL;
@@ -1155,6 +1161,14 @@ static int setup_window(struct zoran_fh *fh, int x, int y, int width, int height
return -EINVAL;
}
+ if (clipcount > 2048) {
+ dprintk(1,
+ KERN_ERR
+ "%s: %s - invalid clipcount\n",
+ ZR_DEVNAME(zr), __func__);
+ return -EINVAL;
+ }
+
/*
* The video front end needs 4-byte alinged line sizes, we correct that
* silently here if necessary
@@ -1218,7 +1232,7 @@ static int setup_window(struct zoran_fh *fh, int x, int y, int width, int height
(width * height + 7) / 8)) {
return -EFAULT;
}
- } else if (clipcount > 0) {
+ } else if (clipcount) {
/* write our own bitmap from the clips */
vcp = vmalloc(sizeof(struct v4l2_clip) * (clipcount + 4));
if (vcp == NULL) {
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index cd2e39fc4bf0..e44cb330bbc8 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -507,14 +507,12 @@ static void zr364xx_fillbuff(struct zr364xx_camera *cam,
const char *tmpbuf;
char *vbuf = videobuf_to_vmalloc(&buf->vb);
unsigned long last_frame;
- struct zr364xx_framei *frm;
if (!vbuf)
return;
last_frame = cam->last_frame;
if (last_frame != -1) {
- frm = &cam->buffer.frame[last_frame];
tmpbuf = (const char *)cam->buffer.frame[last_frame].lpvbits;
switch (buf->fmt->fourcc) {
case V4L2_PIX_FMT_JPEG:
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index a5c591ffe395..d99db5623acf 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1653,7 +1653,6 @@ mpt_mapresources(MPT_ADAPTER *ioc)
unsigned long port;
u32 msize;
u32 psize;
- u8 revision;
int r = -ENODEV;
struct pci_dev *pdev;
@@ -1670,8 +1669,6 @@ mpt_mapresources(MPT_ADAPTER *ioc)
return r;
}
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
-
if (sizeof(dma_addr_t) > 4) {
const uint64_t required_mask = dma_get_required_mask
(&pdev->dev);
@@ -1779,7 +1776,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
MPT_ADAPTER *ioc;
u8 cb_idx;
int r = -ENODEV;
- u8 revision;
u8 pcixcmd;
static int mpt_ids = 0;
#ifdef CONFIG_PROC_FS
@@ -1887,8 +1883,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
ioc->name, &ioc->facts, &ioc->pfacts[0]));
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
- mpt_get_product_name(pdev->vendor, pdev->device, revision, ioc->prod_name);
+ mpt_get_product_name(pdev->vendor, pdev->device, pdev->revision,
+ ioc->prod_name);
switch (pdev->device)
{
@@ -1903,7 +1899,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
break;
case MPI_MANUFACTPAGE_DEVICEID_FC929X:
- if (revision < XL_929) {
+ if (pdev->revision < XL_929) {
/* 929X Chip Fix. Set Split transactions level
* for PCIX. Set MOST bits to zero.
*/
@@ -1934,7 +1930,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
/* 1030 Chip Fix. Disable Split transactions
* for PCIX. Set MOST bits to zero if Rev < C0( = 8).
*/
- if (revision < C0_1030) {
+ if (pdev->revision < C0_1030) {
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
@@ -6483,6 +6479,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
printk(MYIOC_s_INFO_FMT "%s: host reset in"
" progress mpt_config timed out.!!\n",
__func__, ioc->name);
+ mutex_unlock(&ioc->mptbase_cmds.mutex);
return -EFAULT;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 6e6e16aab9da..b383b6961e59 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1250,7 +1250,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
int iocnum;
unsigned int port;
int cim_rev;
- u8 revision;
struct scsi_device *sdev;
VirtDevice *vdevice;
@@ -1324,8 +1323,7 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
pdev = (struct pci_dev *) ioc->pcidev;
karg->pciId = pdev->device;
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
- karg->hwRev = revision;
+ karg->hwRev = pdev->revision;
karg->subSystemDevice = pdev->subsystem_device;
karg->subSystemVendor = pdev->subsystem_vendor;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f4b4dad77391..e129c820df7d 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -106,6 +106,19 @@ config UCB1400_CORE
To compile this driver as a module, choose M here: the
module will be called ucb1400_core.
+config MFD_LM3533
+ tristate "LM3533 Lighting Power chip"
+ depends on I2C
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Say yes here to enable support for National Semiconductor / TI
+ LM3533 Lighting Power chips.
+
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the LED,
+ backlight or ambient-light-sensor functionality of the device.
+
config TPS6105X
tristate "TPS61050/61052 Boost Converters"
depends on I2C
@@ -177,8 +190,8 @@ config MFD_TPS65910
bool "TPS65910 Power Management chip"
depends on I2C=y && GPIOLIB
select MFD_CORE
- select GPIO_TPS65910
select REGMAP_I2C
+ select IRQ_DOMAIN
help
if you say yes here you get support for the TPS65910 series of
Power Management chips.
@@ -409,6 +422,19 @@ config PMIC_ADP5520
individual components like LCD backlight, LEDs, GPIOs and Kepad
under the corresponding menus.
+config MFD_MAX77693
+ bool "Maxim Semiconductor MAX77693 PMIC Support"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Say yes here to support for Maxim Semiconductor MAX77693.
+ This is a companion Power Management IC with Flash, Haptic, Charger,
+ and MUIC(Micro USB Interface Controller) controls on chip.
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
config MFD_MAX8925
bool "Maxim Semiconductor MAX8925 PMIC Support"
depends on I2C=y && GENERIC_HARDIRQS
@@ -454,9 +480,9 @@ config MFD_S5M_CORE
of the device
config MFD_WM8400
- tristate "Support Wolfson Microelectronics WM8400"
+ bool "Support Wolfson Microelectronics WM8400"
select MFD_CORE
- depends on I2C
+ depends on I2C=y
select REGMAP_I2C
help
Support for the Wolfson Microelecronics WM8400 PMIC and audio
@@ -473,6 +499,7 @@ config MFD_WM831X_I2C
select MFD_CORE
select MFD_WM831X
select REGMAP_I2C
+ select IRQ_DOMAIN
depends on I2C=y && GENERIC_HARDIRQS
help
Support for the Wolfson Microelecronics WM831x and WM832x PMICs
@@ -485,6 +512,7 @@ config MFD_WM831X_SPI
select MFD_CORE
select MFD_WM831X
select REGMAP_SPI
+ select IRQ_DOMAIN
depends on SPI_MASTER && GENERIC_HARDIRQS
help
Support for the Wolfson Microelecronics WM831x and WM832x PMICs
@@ -597,17 +625,32 @@ config MFD_MC13783
tristate
config MFD_MC13XXX
- tristate "Support Freescale MC13783 and MC13892"
- depends on SPI_MASTER
+ tristate
+ depends on SPI_MASTER || I2C
select MFD_CORE
select MFD_MC13783
help
- Support for the Freescale (Atlas) PMIC and audio CODECs
- MC13783 and MC13892.
- This driver provides common support for accessing the device,
+ Enable support for the Freescale MC13783 and MC13892 PMICs.
+ This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the
functionality of the device.
+config MFD_MC13XXX_SPI
+ tristate "Freescale MC13783 and MC13892 SPI interface"
+ depends on SPI_MASTER
+ select REGMAP_SPI
+ select MFD_MC13XXX
+ help
+ Select this if your MC13xxx is connected via an SPI bus.
+
+config MFD_MC13XXX_I2C
+ tristate "Freescale MC13892 I2C interface"
+ depends on I2C
+ select REGMAP_I2C
+ select MFD_MC13XXX
+ help
+ Select this if your MC13xxx is connected via an I2C bus.
+
config ABX500_CORE
bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
default y if ARCH_U300 || ARCH_U8500
@@ -651,7 +694,7 @@ config EZX_PCAP
config AB8500_CORE
bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
- depends on GENERIC_HARDIRQS && ABX500_CORE
+ depends on GENERIC_HARDIRQS && ABX500_CORE && MFD_DB8500_PRCMU
select MFD_CORE
help
Select this option to enable access to AB8500 power management
@@ -722,6 +765,16 @@ config LPC_SCH
LPC bridge function of the Intel SCH provides support for
System Management Bus and General Purpose I/O.
+config LPC_ICH
+ tristate "Intel ICH LPC"
+ depends on PCI
+ select MFD_CORE
+ help
+ The LPC bridge function of the Intel ICH provides support for
+ many functional units. This driver provides needed support for
+ other drivers to control these functions, currently GPIO and
+ watchdog.
+
config MFD_RDC321X
tristate "Support for RDC-R321x southbridge"
select MFD_CORE
@@ -854,6 +907,11 @@ config MFD_RC5T583
Additional drivers must be enabled in order to use the
different functionality of the device.
+config MFD_STA2X11
+ bool "STA2X11 multi function device support"
+ depends on STA2X11
+ select MFD_CORE
+
config MFD_ANATOP
bool "Support for Freescale i.MX on-chip ANATOP controller"
depends on SOC_IMX6Q
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 43672b87805a..75f6ed68a4b9 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
obj-$(CONFIG_MFD_TI_SSP) += ti-ssp.o
+obj-$(CONFIG_MFD_STA2X11) += sta2x11-mfd.o
obj-$(CONFIG_MFD_STMPE) += stmpe.o
obj-$(CONFIG_STMPE_I2C) += stmpe-i2c.o
obj-$(CONFIG_STMPE_SPI) += stmpe-spi.o
@@ -54,6 +55,8 @@ obj-$(CONFIG_TWL6030_PWM) += twl6030-pwm.o
obj-$(CONFIG_TWL6040_CORE) += twl6040-core.o twl6040-irq.o
obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o
+obj-$(CONFIG_MFD_MC13XXX_SPI) += mc13xxx-spi.o
+obj-$(CONFIG_MFD_MC13XXX_I2C) += mc13xxx-i2c.o
obj-$(CONFIG_MFD_CORE) += mfd-core.o
@@ -75,6 +78,7 @@ obj-$(CONFIG_PMIC_DA9052) += da9052-core.o
obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o
obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o
+obj-$(CONFIG_MFD_MAX77693) += max77693.o max77693-irq.o
max8925-objs := max8925-core.o max8925-i2c.o
obj-$(CONFIG_MFD_MAX8925) += max8925.o
obj-$(CONFIG_MFD_MAX8997) += max8997.o max8997-irq.o
@@ -87,15 +91,15 @@ obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
obj-$(CONFIG_ABX500_CORE) += abx500-core.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
-obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
-# ab8500-i2c need to come after db8500-prcmu (which provides the channel)
-obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o
+# ab8500-core need to come after db8500-prcmu (which provides the channel)
+obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
obj-$(CONFIG_LPC_SCH) += lpc_sch.o
+obj-$(CONFIG_LPC_ICH) += lpc_ich.o
obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o
obj-$(CONFIG_MFD_JANZ_CMODIO) += janz-cmodio.o
obj-$(CONFIG_MFD_JZ4740_ADC) += jz4740-adc.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 1f08704f7ae8..dac0e2998603 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -18,7 +18,10 @@
#include <linux/mfd/core.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/dbx500-prcmu.h>
#include <linux/regulator/ab8500.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
/*
* Interrupt register offsets
@@ -91,12 +94,24 @@
#define AB8500_IT_MASK23_REG 0x56
#define AB8500_IT_MASK24_REG 0x57
+/*
+ * latch hierarchy registers
+ */
+#define AB8500_IT_LATCHHIER1_REG 0x60
+#define AB8500_IT_LATCHHIER2_REG 0x61
+#define AB8500_IT_LATCHHIER3_REG 0x62
+
+#define AB8500_IT_LATCHHIER_NUM 3
+
#define AB8500_REV_REG 0x80
#define AB8500_IC_NAME_REG 0x82
#define AB8500_SWITCH_OFF_STATUS 0x00
#define AB8500_TURN_ON_STATUS 0x00
+static bool no_bm; /* No battery management */
+module_param(no_bm, bool, S_IRUGO);
+
#define AB9540_MODEM_CTRL2_REG 0x23
#define AB9540_MODEM_CTRL2_SWDBBRSTN_BIT BIT(2)
@@ -125,6 +140,41 @@ static const char ab8500_version_str[][7] = {
[AB8500_VERSION_AB8540] = "AB8540",
};
+static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
+{
+ int ret;
+
+ ret = prcmu_abb_write((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
+ if (ret < 0)
+ dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+ return ret;
+}
+
+static int ab8500_i2c_write_masked(struct ab8500 *ab8500, u16 addr, u8 mask,
+ u8 data)
+{
+ int ret;
+
+ ret = prcmu_abb_write_masked((u8)(addr >> 8), (u8)(addr & 0xFF), &data,
+ &mask, 1);
+ if (ret < 0)
+ dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+ return ret;
+}
+
+static int ab8500_i2c_read(struct ab8500 *ab8500, u16 addr)
+{
+ int ret;
+ u8 data;
+
+ ret = prcmu_abb_read((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
+ if (ret < 0) {
+ dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+ return ret;
+ }
+ return (int)data;
+}
+
static int ab8500_get_chip_id(struct device *dev)
{
struct ab8500 *ab8500;
@@ -161,9 +211,13 @@ static int set_register_interruptible(struct ab8500 *ab8500, u8 bank,
static int ab8500_set_register(struct device *dev, u8 bank,
u8 reg, u8 value)
{
+ int ret;
struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
- return set_register_interruptible(ab8500, bank, reg, value);
+ atomic_inc(&ab8500->transfer_ongoing);
+ ret = set_register_interruptible(ab8500, bank, reg, value);
+ atomic_dec(&ab8500->transfer_ongoing);
+ return ret;
}
static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
@@ -192,9 +246,13 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
static int ab8500_get_register(struct device *dev, u8 bank,
u8 reg, u8 *value)
{
+ int ret;
struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
- return get_register_interruptible(ab8500, bank, reg, value);
+ atomic_inc(&ab8500->transfer_ongoing);
+ ret = get_register_interruptible(ab8500, bank, reg, value);
+ atomic_dec(&ab8500->transfer_ongoing);
+ return ret;
}
static int mask_and_set_register_interruptible(struct ab8500 *ab8500, u8 bank,
@@ -241,11 +299,14 @@ out:
static int ab8500_mask_and_set_register(struct device *dev,
u8 bank, u8 reg, u8 bitmask, u8 bitvalues)
{
+ int ret;
struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
- return mask_and_set_register_interruptible(ab8500, bank, reg,
- bitmask, bitvalues);
-
+ atomic_inc(&ab8500->transfer_ongoing);
+ ret= mask_and_set_register_interruptible(ab8500, bank, reg,
+ bitmask, bitvalues);
+ atomic_dec(&ab8500->transfer_ongoing);
+ return ret;
}
static struct abx500_ops ab8500_ops = {
@@ -264,6 +325,7 @@ static void ab8500_irq_lock(struct irq_data *data)
struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
mutex_lock(&ab8500->irq_lock);
+ atomic_inc(&ab8500->transfer_ongoing);
}
static void ab8500_irq_sync_unlock(struct irq_data *data)
@@ -292,7 +354,7 @@ static void ab8500_irq_sync_unlock(struct irq_data *data)
reg = AB8500_IT_MASK1_REG + ab8500->irq_reg_offset[i];
set_register_interruptible(ab8500, AB8500_INTERRUPT, reg, new);
}
-
+ atomic_dec(&ab8500->transfer_ongoing);
mutex_unlock(&ab8500->irq_lock);
}
@@ -325,6 +387,90 @@ static struct irq_chip ab8500_irq_chip = {
.irq_unmask = ab8500_irq_unmask,
};
+static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
+ int latch_offset, u8 latch_val)
+{
+ int int_bit = __ffs(latch_val);
+ int line, i;
+
+ do {
+ int_bit = __ffs(latch_val);
+
+ for (i = 0; i < ab8500->mask_size; i++)
+ if (ab8500->irq_reg_offset[i] == latch_offset)
+ break;
+
+ if (i >= ab8500->mask_size) {
+ dev_err(ab8500->dev, "Register offset 0x%2x not declared\n",
+ latch_offset);
+ return -ENXIO;
+ }
+
+ line = (i << 3) + int_bit;
+ latch_val &= ~(1 << int_bit);
+
+ handle_nested_irq(ab8500->irq_base + line);
+ } while (latch_val);
+
+ return 0;
+}
+
+static int ab8500_handle_hierarchical_latch(struct ab8500 *ab8500,
+ int hier_offset, u8 hier_val)
+{
+ int latch_bit, status;
+ u8 latch_offset, latch_val;
+
+ do {
+ latch_bit = __ffs(hier_val);
+ latch_offset = (hier_offset << 3) + latch_bit;
+
+ /* Fix inconsistent ITFromLatch25 bit mapping... */
+ if (unlikely(latch_offset == 17))
+ latch_offset = 24;
+
+ status = get_register_interruptible(ab8500,
+ AB8500_INTERRUPT,
+ AB8500_IT_LATCH1_REG + latch_offset,
+ &latch_val);
+ if (status < 0 || latch_val == 0)
+ goto discard;
+
+ status = ab8500_handle_hierarchical_line(ab8500,
+ latch_offset, latch_val);
+ if (status < 0)
+ return status;
+discard:
+ hier_val &= ~(1 << latch_bit);
+ } while (hier_val);
+
+ return 0;
+}
+
+static irqreturn_t ab8500_hierarchical_irq(int irq, void *dev)
+{
+ struct ab8500 *ab8500 = dev;
+ u8 i;
+
+ dev_vdbg(ab8500->dev, "interrupt\n");
+
+ /* Hierarchical interrupt version */
+ for (i = 0; i < AB8500_IT_LATCHHIER_NUM; i++) {
+ int status;
+ u8 hier_val;
+
+ status = get_register_interruptible(ab8500, AB8500_INTERRUPT,
+ AB8500_IT_LATCHHIER1_REG + i, &hier_val);
+ if (status < 0 || hier_val == 0)
+ continue;
+
+ status = ab8500_handle_hierarchical_latch(ab8500, i, hier_val);
+ if (status < 0)
+ break;
+ }
+ return IRQ_HANDLED;
+}
+
static irqreturn_t ab8500_irq(int irq, void *dev)
{
struct ab8500 *ab8500 = dev;
@@ -332,6 +478,8 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
dev_vdbg(ab8500->dev, "interrupt\n");
+ atomic_inc(&ab8500->transfer_ongoing);
+
for (i = 0; i < ab8500->mask_size; i++) {
int regoffset = ab8500->irq_reg_offset[i];
int status;
@@ -355,9 +503,10 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
handle_nested_irq(ab8500->irq_base + line);
value &= ~(1 << bit);
+
} while (value);
}
-
+ atomic_dec(&ab8500->transfer_ongoing);
return IRQ_HANDLED;
}
@@ -411,6 +560,14 @@ static void ab8500_irq_remove(struct ab8500 *ab8500)
}
}
+int ab8500_suspend(struct ab8500 *ab8500)
+{
+ if (atomic_read(&ab8500->transfer_ongoing))
+ return -EINVAL;
+ else
+ return 0;
+}
+
/* AB8500 GPIO Resources */
static struct resource __devinitdata ab8500_gpio_resources[] = {
{
@@ -744,6 +901,39 @@ static struct resource __devinitdata ab8500_usb_resources[] = {
},
};
+static struct resource __devinitdata ab8505_iddet_resources[] = {
+ {
+ .name = "KeyDeglitch",
+ .start = AB8505_INT_KEYDEGLITCH,
+ .end = AB8505_INT_KEYDEGLITCH,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "KP",
+ .start = AB8505_INT_KP,
+ .end = AB8505_INT_KP,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "IKP",
+ .start = AB8505_INT_IKP,
+ .end = AB8505_INT_IKP,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "IKR",
+ .start = AB8505_INT_IKR,
+ .end = AB8505_INT_IKR,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "KeyStuck",
+ .start = AB8505_INT_KEYSTUCK,
+ .end = AB8505_INT_KEYSTUCK,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct resource __devinitdata ab8500_temp_resources[] = {
{
.name = "AB8500_TEMP_WARM",
@@ -778,35 +968,11 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = {
.resources = ab8500_rtc_resources,
},
{
- .name = "ab8500-charger",
- .num_resources = ARRAY_SIZE(ab8500_charger_resources),
- .resources = ab8500_charger_resources,
- },
- {
- .name = "ab8500-btemp",
- .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
- .resources = ab8500_btemp_resources,
- },
- {
- .name = "ab8500-fg",
- .num_resources = ARRAY_SIZE(ab8500_fg_resources),
- .resources = ab8500_fg_resources,
- },
- {
- .name = "ab8500-chargalg",
- .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
- .resources = ab8500_chargalg_resources,
- },
- {
.name = "ab8500-acc-det",
.num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
.resources = ab8500_av_acc_detect_resources,
},
{
- .name = "ab8500-codec",
- },
-
- {
.name = "ab8500-poweron-key",
.num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
.resources = ab8500_poweronkey_db_resources,
@@ -834,6 +1000,29 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = {
},
};
+static struct mfd_cell __devinitdata ab8500_bm_devs[] = {
+ {
+ .name = "ab8500-charger",
+ .num_resources = ARRAY_SIZE(ab8500_charger_resources),
+ .resources = ab8500_charger_resources,
+ },
+ {
+ .name = "ab8500-btemp",
+ .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
+ .resources = ab8500_btemp_resources,
+ },
+ {
+ .name = "ab8500-fg",
+ .num_resources = ARRAY_SIZE(ab8500_fg_resources),
+ .resources = ab8500_fg_resources,
+ },
+ {
+ .name = "ab8500-chargalg",
+ .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
+ .resources = ab8500_chargalg_resources,
+ },
+};
+
static struct mfd_cell __devinitdata ab8500_devs[] = {
{
.name = "ab8500-gpio",
@@ -845,6 +1034,9 @@ static struct mfd_cell __devinitdata ab8500_devs[] = {
.num_resources = ARRAY_SIZE(ab8500_usb_resources),
.resources = ab8500_usb_resources,
},
+ {
+ .name = "ab8500-codec",
+ },
};
static struct mfd_cell __devinitdata ab9540_devs[] = {
@@ -858,6 +1050,18 @@ static struct mfd_cell __devinitdata ab9540_devs[] = {
.num_resources = ARRAY_SIZE(ab8500_usb_resources),
.resources = ab8500_usb_resources,
},
+ {
+ .name = "ab9540-codec",
+ },
+};
+
+/* Device list common to ab9540 and ab8505 */
+static struct mfd_cell __devinitdata ab9540_ab8505_devs[] = {
+ {
+ .name = "ab-iddet",
+ .num_resources = ARRAY_SIZE(ab8505_iddet_resources),
+ .resources = ab8505_iddet_resources,
+ },
};
static ssize_t show_chip_id(struct device *dev,
@@ -1003,18 +1207,66 @@ static struct attribute_group ab9540_attr_group = {
.attrs = ab9540_sysfs_entries,
};
-int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
+static const struct of_device_id ab8500_match[] = {
+ {
+ .compatible = "stericsson,ab8500",
+ .data = (void *)AB8500_VERSION_AB8500,
+ },
+ {},
+};
+
+static int __devinit ab8500_probe(struct platform_device *pdev)
{
- struct ab8500_platform_data *plat = dev_get_platdata(ab8500->dev);
+ struct ab8500_platform_data *plat = dev_get_platdata(&pdev->dev);
+ const struct platform_device_id *platid = platform_get_device_id(pdev);
+ enum ab8500_version version = AB8500_VERSION_UNDEFINED;
+ struct device_node *np = pdev->dev.of_node;
+ struct ab8500 *ab8500;
+ struct resource *resource;
int ret;
int i;
u8 value;
+ ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
+ if (!ab8500)
+ return -ENOMEM;
+
if (plat)
ab8500->irq_base = plat->irq_base;
+ else if (np)
+ ret = of_property_read_u32(np, "stericsson,irq-base", &ab8500->irq_base);
+
+ if (!ab8500->irq_base) {
+ dev_info(&pdev->dev, "couldn't find irq-base\n");
+ ret = -EINVAL;
+ goto out_free_ab8500;
+ }
+
+ ab8500->dev = &pdev->dev;
+
+ resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!resource) {
+ ret = -ENODEV;
+ goto out_free_ab8500;
+ }
+
+ ab8500->irq = resource->start;
+
+ ab8500->read = ab8500_i2c_read;
+ ab8500->write = ab8500_i2c_write;
+ ab8500->write_masked = ab8500_i2c_write_masked;
mutex_init(&ab8500->lock);
mutex_init(&ab8500->irq_lock);
+ atomic_set(&ab8500->transfer_ongoing, 0);
+
+ platform_set_drvdata(pdev, ab8500);
+
+ if (platid)
+ version = platid->driver_data;
+ else if (np)
+ version = (unsigned int)
+ of_match_device(ab8500_match, &pdev->dev)->data;
if (version != AB8500_VERSION_UNDEFINED)
ab8500->version = version;
@@ -1022,7 +1274,7 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
ret = get_register_interruptible(ab8500, AB8500_MISC,
AB8500_IC_NAME_REG, &value);
if (ret < 0)
- return ret;
+ goto out_free_ab8500;
ab8500->version = value;
}
@@ -1030,7 +1282,7 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
ret = get_register_interruptible(ab8500, AB8500_MISC,
AB8500_REV_REG, &value);
if (ret < 0)
- return ret;
+ goto out_free_ab8500;
ab8500->chip_id = value;
@@ -1105,30 +1357,57 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
if (ret)
goto out_freeoldmask;
- ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq,
- IRQF_ONESHOT | IRQF_NO_SUSPEND,
- "ab8500", ab8500);
+ /* Activate this feature only in ab9540 */
+ /* till tests are done on ab8500 1p2 or later*/
+ if (is_ab9540(ab8500))
+ ret = request_threaded_irq(ab8500->irq, NULL,
+ ab8500_hierarchical_irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "ab8500", ab8500);
+ else
+ ret = request_threaded_irq(ab8500->irq, NULL,
+ ab8500_irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "ab8500", ab8500);
if (ret)
goto out_removeirq;
}
- ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
- ARRAY_SIZE(abx500_common_devs), NULL,
- ab8500->irq_base);
+ if (!np) {
+ ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
+ ARRAY_SIZE(abx500_common_devs), NULL,
+ ab8500->irq_base);
- if (ret)
- goto out_freeirq;
+ if (ret)
+ goto out_freeirq;
+
+ if (is_ab9540(ab8500))
+ ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
+ ARRAY_SIZE(ab9540_devs), NULL,
+ ab8500->irq_base);
+ else
+ ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
+ ARRAY_SIZE(ab8500_devs), NULL,
+ ab8500->irq_base);
+ if (ret)
+ goto out_freeirq;
- if (is_ab9540(ab8500))
- ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
- ARRAY_SIZE(ab9540_devs), NULL,
- ab8500->irq_base);
- else
- ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
- ARRAY_SIZE(ab9540_devs), NULL,
- ab8500->irq_base);
- if (ret)
- goto out_freeirq;
+ if (is_ab9540(ab8500) || is_ab8505(ab8500))
+ ret = mfd_add_devices(ab8500->dev, 0, ab9540_ab8505_devs,
+ ARRAY_SIZE(ab9540_ab8505_devs), NULL,
+ ab8500->irq_base);
+ if (ret)
+ goto out_freeirq;
+ }
+
+ if (!no_bm) {
+ /* Add battery management devices */
+ ret = mfd_add_devices(ab8500->dev, 0, ab8500_bm_devs,
+ ARRAY_SIZE(ab8500_bm_devs), NULL,
+ ab8500->irq_base);
+ if (ret)
+ dev_err(ab8500->dev, "error adding bm devices\n");
+ }
if (is_ab9540(ab8500))
ret = sysfs_create_group(&ab8500->dev->kobj,
@@ -1151,12 +1430,16 @@ out_freeoldmask:
kfree(ab8500->oldmask);
out_freemask:
kfree(ab8500->mask);
+out_free_ab8500:
+ kfree(ab8500);
return ret;
}
-int __devexit ab8500_exit(struct ab8500 *ab8500)
+static int __devexit ab8500_remove(struct platform_device *pdev)
{
+ struct ab8500 *ab8500 = platform_get_drvdata(pdev);
+
if (is_ab9540(ab8500))
sysfs_remove_group(&ab8500->dev->kobj, &ab9540_attr_group);
else
@@ -1168,10 +1451,42 @@ int __devexit ab8500_exit(struct ab8500 *ab8500)
}
kfree(ab8500->oldmask);
kfree(ab8500->mask);
+ kfree(ab8500);
return 0;
}
+static const struct platform_device_id ab8500_id[] = {
+ { "ab8500-core", AB8500_VERSION_AB8500 },
+ { "ab8505-i2c", AB8500_VERSION_AB8505 },
+ { "ab9540-i2c", AB8500_VERSION_AB9540 },
+ { "ab8540-i2c", AB8500_VERSION_AB8540 },
+ { }
+};
+
+static struct platform_driver ab8500_core_driver = {
+ .driver = {
+ .name = "ab8500-core",
+ .owner = THIS_MODULE,
+ .of_match_table = ab8500_match,
+ },
+ .probe = ab8500_probe,
+ .remove = __devexit_p(ab8500_remove),
+ .id_table = ab8500_id,
+};
+
+static int __init ab8500_core_init(void)
+{
+ return platform_driver_register(&ab8500_core_driver);
+}
+
+static void __exit ab8500_core_exit(void)
+{
+ platform_driver_unregister(&ab8500_core_driver);
+}
+arch_initcall(ab8500_core_init);
+module_exit(ab8500_core_exit);
+
MODULE_AUTHOR("Mattias Wallin, Srinidhi Kasagar, Rabin Vincent");
MODULE_DESCRIPTION("AB8500 MFD core");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 9a0211aa8897..50c4c89ab220 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -608,10 +608,16 @@ static int __devexit ab8500_debug_remove(struct platform_device *plf)
return 0;
}
+static const struct of_device_id ab8500_debug_match[] = {
+ { .compatible = "stericsson,ab8500-debug", },
+ {}
+};
+
static struct platform_driver ab8500_debug_driver = {
.driver = {
.name = "ab8500-debug",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_debug_match,
},
.probe = ab8500_debug_probe,
.remove = __devexit_p(ab8500_debug_remove)
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index c39fc716e1dc..b86fd8e1ec3f 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -584,7 +584,7 @@ static int __devinit ab8500_gpadc_probe(struct platform_device *pdev)
gpadc->irq = platform_get_irq_byname(pdev, "SW_CONV_END");
if (gpadc->irq < 0) {
- dev_err(gpadc->dev, "failed to get platform irq-%d\n",
+ dev_err(&pdev->dev, "failed to get platform irq-%d\n",
gpadc->irq);
ret = gpadc->irq;
goto fail;
@@ -648,12 +648,18 @@ static int __devexit ab8500_gpadc_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ab8500_gpadc_match[] = {
+ { .compatible = "stericsson,ab8500-gpadc", },
+ {}
+};
+
static struct platform_driver ab8500_gpadc_driver = {
.probe = ab8500_gpadc_probe,
.remove = __devexit_p(ab8500_gpadc_remove),
.driver = {
.name = "ab8500-gpadc",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_gpadc_match,
},
};
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c
deleted file mode 100644
index b83045f102be..000000000000
--- a/drivers/mfd/ab8500-i2c.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson.
- * License Terms: GNU General Public License v2
- * This file was based on drivers/mfd/ab8500-spi.c
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/abx500/ab8500.h>
-#include <linux/mfd/dbx500-prcmu.h>
-
-static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
-{
- int ret;
-
- ret = prcmu_abb_write((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
- if (ret < 0)
- dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
- return ret;
-}
-
-static int ab8500_i2c_write_masked(struct ab8500 *ab8500, u16 addr, u8 mask,
- u8 data)
-{
- int ret;
-
- ret = prcmu_abb_write_masked((u8)(addr >> 8), (u8)(addr & 0xFF), &data,
- &mask, 1);
- if (ret < 0)
- dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
- return ret;
-}
-
-static int ab8500_i2c_read(struct ab8500 *ab8500, u16 addr)
-{
- int ret;
- u8 data;
-
- ret = prcmu_abb_read((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
- if (ret < 0) {
- dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
- return ret;
- }
- return (int)data;
-}
-
-static int __devinit ab8500_i2c_probe(struct platform_device *plf)
-{
- const struct platform_device_id *platid = platform_get_device_id(plf);
- struct ab8500 *ab8500;
- struct resource *resource;
- int ret;
-
- ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
- if (!ab8500)
- return -ENOMEM;
-
- ab8500->dev = &plf->dev;
-
- resource = platform_get_resource(plf, IORESOURCE_IRQ, 0);
- if (!resource) {
- kfree(ab8500);
- return -ENODEV;
- }
-
- ab8500->irq = resource->start;
-
- ab8500->read = ab8500_i2c_read;
- ab8500->write = ab8500_i2c_write;
- ab8500->write_masked = ab8500_i2c_write_masked;
-
- platform_set_drvdata(plf, ab8500);
-
- ret = ab8500_init(ab8500, platid->driver_data);
- if (ret)
- kfree(ab8500);
-
-
- return ret;
-}
-
-static int __devexit ab8500_i2c_remove(struct platform_device *plf)
-{
- struct ab8500 *ab8500 = platform_get_drvdata(plf);
-
- ab8500_exit(ab8500);
- kfree(ab8500);
-
- return 0;
-}
-
-static const struct platform_device_id ab8500_id[] = {
- { "ab8500-i2c", AB8500_VERSION_AB8500 },
- { "ab8505-i2c", AB8500_VERSION_AB8505 },
- { "ab9540-i2c", AB8500_VERSION_AB9540 },
- { "ab8540-i2c", AB8500_VERSION_AB8540 },
- { }
-};
-
-static struct platform_driver ab8500_i2c_driver = {
- .driver = {
- .name = "ab8500-i2c",
- .owner = THIS_MODULE,
- },
- .probe = ab8500_i2c_probe,
- .remove = __devexit_p(ab8500_i2c_remove),
- .id_table = ab8500_id,
-};
-
-static int __init ab8500_i2c_init(void)
-{
- return platform_driver_register(&ab8500_i2c_driver);
-}
-
-static void __exit ab8500_i2c_exit(void)
-{
- platform_driver_unregister(&ab8500_i2c_driver);
-}
-arch_initcall(ab8500_i2c_init);
-module_exit(ab8500_i2c_exit);
-
-MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com");
-MODULE_DESCRIPTION("AB8500 Core access via PRCMU I2C");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index c28d4eb1eff0..5a3e51ccf258 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -61,10 +61,16 @@ static int __devexit ab8500_sysctrl_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ab8500_sysctrl_match[] = {
+ { .compatible = "stericsson,ab8500-sysctrl", },
+ {}
+};
+
static struct platform_driver ab8500_sysctrl_driver = {
.driver = {
.name = "ab8500-sysctrl",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_sysctrl_match,
},
.probe = ab8500_sysctrl_probe,
.remove = __devexit_p(ab8500_sysctrl_remove),
diff --git a/drivers/mfd/anatop-mfd.c b/drivers/mfd/anatop-mfd.c
index 2af42480635e..6da06341f6c9 100644
--- a/drivers/mfd/anatop-mfd.c
+++ b/drivers/mfd/anatop-mfd.c
@@ -41,39 +41,26 @@
#include <linux/of_address.h>
#include <linux/mfd/anatop.h>
-u32 anatop_get_bits(struct anatop *adata, u32 addr, int bit_shift,
- int bit_width)
+u32 anatop_read_reg(struct anatop *adata, u32 addr)
{
- u32 val, mask;
-
- if (bit_width == 32)
- mask = ~0;
- else
- mask = (1 << bit_width) - 1;
-
- val = readl(adata->ioreg + addr);
- val = (val >> bit_shift) & mask;
-
- return val;
+ return readl(adata->ioreg + addr);
}
-EXPORT_SYMBOL_GPL(anatop_get_bits);
+EXPORT_SYMBOL_GPL(anatop_read_reg);
-void anatop_set_bits(struct anatop *adata, u32 addr, int bit_shift,
- int bit_width, u32 data)
+void anatop_write_reg(struct anatop *adata, u32 addr, u32 data, u32 mask)
{
- u32 val, mask;
+ u32 val;
- if (bit_width == 32)
- mask = ~0;
- else
- mask = (1 << bit_width) - 1;
+ data &= mask;
spin_lock(&adata->reglock);
- val = readl(adata->ioreg + addr) & ~(mask << bit_shift);
- writel((data << bit_shift) | val, adata->ioreg + addr);
+ val = readl(adata->ioreg + addr);
+ val &= ~mask;
+ val |= data;
+ writel(val, adata->ioreg + addr);
spin_unlock(&adata->reglock);
}
-EXPORT_SYMBOL_GPL(anatop_set_bits);
+EXPORT_SYMBOL_GPL(anatop_write_reg);
static const struct of_device_id of_anatop_match[] = {
{ .compatible = "fsl,imx6q-anatop", },
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 1582c3d95257..383421bf5760 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -353,12 +353,28 @@ static int asic3_gpio_irq_type(struct irq_data *data, unsigned int type)
return 0;
}
+static int asic3_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct asic3 *asic = irq_data_get_irq_chip_data(data);
+ u32 bank, index;
+ u16 bit;
+
+ bank = asic3_irq_to_bank(asic, data->irq);
+ index = asic3_irq_to_index(asic, data->irq);
+ bit = 1<<index;
+
+ asic3_set_register(asic, bank + ASIC3_GPIO_SLEEP_MASK, bit, !on);
+
+ return 0;
+}
+
static struct irq_chip asic3_gpio_irq_chip = {
.name = "ASIC3-GPIO",
.irq_ack = asic3_mask_gpio_irq,
.irq_mask = asic3_mask_gpio_irq,
.irq_unmask = asic3_unmask_gpio_irq,
.irq_set_type = asic3_gpio_irq_type,
+ .irq_set_wake = asic3_gpio_irq_set_wake,
};
static struct irq_chip asic3_irq_chip = {
@@ -529,7 +545,7 @@ static int asic3_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct asic3 *asic = container_of(chip, struct asic3, gpio);
- return (offset < ASIC3_NUM_GPIOS) ? asic->irq_base + offset : -ENXIO;
+ return asic->irq_base + offset;
}
static __init int asic3_gpio_probe(struct platform_device *pdev,
@@ -894,10 +910,13 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
asic3_mmc_resources[0].start >>= asic->bus_shift;
asic3_mmc_resources[0].end >>= asic->bus_shift;
- ret = mfd_add_devices(&pdev->dev, pdev->id,
+ if (pdata->clock_rate) {
+ ds1wm_pdata.clock_rate = pdata->clock_rate;
+ ret = mfd_add_devices(&pdev->dev, pdev->id,
&asic3_cell_ds1wm, 1, mem, asic->irq_base);
- if (ret < 0)
- goto out;
+ if (ret < 0)
+ goto out;
+ }
if (mem_sdio && (irq >= 0)) {
ret = mfd_add_devices(&pdev->dev, pdev->id,
@@ -1000,6 +1019,9 @@ static int __init asic3_probe(struct platform_device *pdev)
asic3_mfd_probe(pdev, pdata, mem);
+ asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
+ (ASIC3_EXTCF_CF0_BUF_EN|ASIC3_EXTCF_CF0_PWAIT_EN), 1);
+
dev_info(asic->dev, "ASIC3 Core driver\n");
return 0;
@@ -1021,6 +1043,9 @@ static int __devexit asic3_remove(struct platform_device *pdev)
int ret;
struct asic3 *asic = platform_get_drvdata(pdev);
+ asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
+ (ASIC3_EXTCF_CF0_BUF_EN|ASIC3_EXTCF_CF0_PWAIT_EN), 0);
+
asic3_mfd_remove(pdev);
ret = asic3_gpio_remove(pdev);
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
index 315fef5d466a..3419e726de47 100644
--- a/drivers/mfd/cs5535-mfd.c
+++ b/drivers/mfd/cs5535-mfd.c
@@ -186,18 +186,7 @@ static struct pci_driver cs5535_mfd_driver = {
.remove = __devexit_p(cs5535_mfd_remove),
};
-static int __init cs5535_mfd_init(void)
-{
- return pci_register_driver(&cs5535_mfd_driver);
-}
-
-static void __exit cs5535_mfd_exit(void)
-{
- pci_unregister_driver(&cs5535_mfd_driver);
-}
-
-module_init(cs5535_mfd_init);
-module_exit(cs5535_mfd_exit);
+module_pci_driver(cs5535_mfd_driver);
MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
MODULE_DESCRIPTION("MFD driver for CS5535/CS5536 southbridge's ISA PCI device");
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index 7776aff46269..1f1313c90573 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -318,6 +318,135 @@ static bool da9052_reg_volatile(struct device *dev, unsigned int reg)
}
}
+/*
+ * TBAT look-up table is computed from the R90 reg (8 bit register)
+ * reading as below. The battery temperature is in milliCentigrade
+ * TBAT = (1/(t1+1/298) - 273) * 1000 mC
+ * where t1 = (1/B)* ln(( ADCval * 2.5)/(R25*ITBAT*255))
+ * Default values are R25 = 10e3, B = 3380, ITBAT = 50e-6
+ * Example:
+ * R25=10E3, B=3380, ITBAT=50e-6, ADCVAL=62d calculates
+ * TBAT = 20015 mili degrees Centrigrade
+ *
+*/
+static const int32_t tbat_lookup[255] = {
+ 183258, 144221, 124334, 111336, 101826, 94397, 88343, 83257,
+ 78889, 75071, 71688, 68656, 65914, 63414, 61120, 59001,
+ 570366, 55204, 53490, 51881, 50364, 48931, 47574, 46285,
+ 45059, 43889, 42772, 41703, 40678, 39694, 38748, 37838,
+ 36961, 36115, 35297, 34507, 33743, 33002, 32284, 31588,
+ 30911, 30254, 29615, 28994, 28389, 27799, 27225, 26664,
+ 26117, 25584, 25062, 24553, 24054, 23567, 23091, 22624,
+ 22167, 21719, 21281, 20851, 20429, 20015, 19610, 19211,
+ 18820, 18436, 18058, 17688, 17323, 16965, 16612, 16266,
+ 15925, 15589, 15259, 14933, 14613, 14298, 13987, 13681,
+ 13379, 13082, 12788, 12499, 12214, 11933, 11655, 11382,
+ 11112, 10845, 10582, 10322, 10066, 9812, 9562, 9315,
+ 9071, 8830, 8591, 8356, 8123, 7893, 7665, 7440,
+ 7218, 6998, 6780, 6565, 6352, 6141, 5933, 5726,
+ 5522, 5320, 5120, 4922, 4726, 4532, 4340, 4149,
+ 3961, 3774, 3589, 3406, 3225, 3045, 2867, 2690,
+ 2516, 2342, 2170, 2000, 1831, 1664, 1498, 1334,
+ 1171, 1009, 849, 690, 532, 376, 221, 67,
+ -84, -236, -386, -535, -683, -830, -975, -1119,
+ -1263, -1405, -1546, -1686, -1825, -1964, -2101, -2237,
+ -2372, -2506, -2639, -2771, -2902, -3033, -3162, -3291,
+ -3418, -3545, -3671, -3796, -3920, -4044, -4166, -4288,
+ -4409, -4529, -4649, -4767, -4885, -5002, -5119, -5235,
+ -5349, -5464, -5577, -5690, -5802, -5913, -6024, -6134,
+ -6244, -6352, -6461, -6568, -6675, -6781, -6887, -6992,
+ -7096, -7200, -7303, -7406, -7508, -7609, -7710, -7810,
+ -7910, -8009, -8108, -8206, -8304, -8401, -8497, -8593,
+ -8689, -8784, -8878, -8972, -9066, -9159, -9251, -9343,
+ -9435, -9526, -9617, -9707, -9796, -9886, -9975, -10063,
+ -10151, -10238, -10325, -10412, -10839, -10923, -11007, -11090,
+ -11173, -11256, -11338, -11420, -11501, -11583, -11663, -11744,
+ -11823, -11903, -11982
+};
+
+static const u8 chan_mux[DA9052_ADC_VBBAT + 1] = {
+ [DA9052_ADC_VDDOUT] = DA9052_ADC_MAN_MUXSEL_VDDOUT,
+ [DA9052_ADC_ICH] = DA9052_ADC_MAN_MUXSEL_ICH,
+ [DA9052_ADC_TBAT] = DA9052_ADC_MAN_MUXSEL_TBAT,
+ [DA9052_ADC_VBAT] = DA9052_ADC_MAN_MUXSEL_VBAT,
+ [DA9052_ADC_IN4] = DA9052_ADC_MAN_MUXSEL_AD4,
+ [DA9052_ADC_IN5] = DA9052_ADC_MAN_MUXSEL_AD5,
+ [DA9052_ADC_IN6] = DA9052_ADC_MAN_MUXSEL_AD6,
+ [DA9052_ADC_VBBAT] = DA9052_ADC_MAN_MUXSEL_VBBAT
+};
+
+int da9052_adc_manual_read(struct da9052 *da9052, unsigned char channel)
+{
+ int ret;
+ unsigned short calc_data;
+ unsigned short data;
+ unsigned char mux_sel;
+
+ if (channel > DA9052_ADC_VBBAT)
+ return -EINVAL;
+
+ mutex_lock(&da9052->auxadc_lock);
+
+ /* Channel gets activated on enabling the Conversion bit */
+ mux_sel = chan_mux[channel] | DA9052_ADC_MAN_MAN_CONV;
+
+ ret = da9052_reg_write(da9052, DA9052_ADC_MAN_REG, mux_sel);
+ if (ret < 0)
+ goto err;
+
+ /* Wait for an interrupt */
+ if (!wait_for_completion_timeout(&da9052->done,
+ msecs_to_jiffies(500))) {
+ dev_err(da9052->dev,
+ "timeout waiting for ADC conversion interrupt\n");
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ ret = da9052_reg_read(da9052, DA9052_ADC_RES_H_REG);
+ if (ret < 0)
+ goto err;
+
+ calc_data = (unsigned short)ret;
+ data = calc_data << 2;
+
+ ret = da9052_reg_read(da9052, DA9052_ADC_RES_L_REG);
+ if (ret < 0)
+ goto err;
+
+ calc_data = (unsigned short)(ret & DA9052_ADC_RES_LSB);
+ data |= calc_data;
+
+ ret = data;
+
+err:
+ mutex_unlock(&da9052->auxadc_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(da9052_adc_manual_read);
+
+static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data)
+{
+ struct da9052 *da9052 = irq_data;
+
+ complete(&da9052->done);
+
+ return IRQ_HANDLED;
+}
+
+int da9052_adc_read_temp(struct da9052 *da9052)
+{
+ int tbat;
+
+ tbat = da9052_reg_read(da9052, DA9052_TBAT_RES_REG);
+ if (tbat <= 0)
+ return tbat;
+
+ /* ARRAY_SIZE check is not needed since TBAT is a 8-bit register */
+ return tbat_lookup[tbat - 1];
+}
+EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
+
static struct resource da9052_rtc_resource = {
.name = "ALM",
.start = DA9052_IRQ_ALARM,
@@ -646,6 +775,9 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
struct irq_desc *desc;
int ret;
+ mutex_init(&da9052->auxadc_lock);
+ init_completion(&da9052->done);
+
if (pdata && pdata->init != NULL)
pdata->init(da9052);
@@ -665,6 +797,12 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
da9052->irq_base = regmap_irq_chip_get_base(da9052->irq_data);
+ ret = request_threaded_irq(DA9052_IRQ_ADC_EOM, NULL, da9052_auxadc_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "adc irq", da9052);
+ if (ret != 0)
+ dev_err(da9052->dev, "DA9052 ADC IRQ failed ret=%d\n", ret);
+
ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
ARRAY_SIZE(da9052_subdev_info), NULL, 0);
if (ret)
@@ -673,6 +811,7 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
return 0;
err:
+ free_irq(DA9052_IRQ_ADC_EOM, da9052);
mfd_remove_devices(da9052->dev);
regmap_err:
return ret;
@@ -680,6 +819,7 @@ regmap_err:
void da9052_device_exit(struct da9052 *da9052)
{
+ free_irq(DA9052_IRQ_ADC_EOM, da9052);
regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
mfd_remove_devices(da9052->dev);
}
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 36b88e395499..82c9d6450286 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -22,6 +22,11 @@
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
+#ifdef CONFIG_OF
+#include <linux/of.h>
+#include <linux/of_device.h>
+#endif
+
static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
{
int reg_val, ret;
@@ -41,13 +46,31 @@ static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
return 0;
}
+static struct i2c_device_id da9052_i2c_id[] = {
+ {"da9052", DA9052},
+ {"da9053-aa", DA9053_AA},
+ {"da9053-ba", DA9053_BA},
+ {"da9053-bb", DA9053_BB},
+ {}
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id dialog_dt_ids[] = {
+ { .compatible = "dlg,da9052", .data = &da9052_i2c_id[0] },
+ { .compatible = "dlg,da9053-aa", .data = &da9052_i2c_id[1] },
+ { .compatible = "dlg,da9053-ab", .data = &da9052_i2c_id[2] },
+ { .compatible = "dlg,da9053-bb", .data = &da9052_i2c_id[3] },
+ { /* sentinel */ }
+};
+#endif
+
static int __devinit da9052_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct da9052 *da9052;
int ret;
- da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+ da9052 = devm_kzalloc(&client->dev, sizeof(struct da9052), GFP_KERNEL);
if (!da9052)
return -ENOMEM;
@@ -55,8 +78,7 @@ static int __devinit da9052_i2c_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_info(&client->dev, "Error in %s:i2c_check_functionality\n",
__func__);
- ret = -ENODEV;
- goto err;
+ return -ENODEV;
}
da9052->dev = &client->dev;
@@ -64,29 +86,39 @@ static int __devinit da9052_i2c_probe(struct i2c_client *client,
i2c_set_clientdata(client, da9052);
- da9052->regmap = regmap_init_i2c(client, &da9052_regmap_config);
+ da9052->regmap = devm_regmap_init_i2c(client, &da9052_regmap_config);
if (IS_ERR(da9052->regmap)) {
ret = PTR_ERR(da9052->regmap);
dev_err(&client->dev, "Failed to allocate register map: %d\n",
ret);
- goto err;
+ return ret;
}
ret = da9052_i2c_enable_multiwrite(da9052);
if (ret < 0)
- goto err_regmap;
+ return ret;
+
+#ifdef CONFIG_OF
+ if (!id) {
+ struct device_node *np = client->dev.of_node;
+ const struct of_device_id *deviceid;
+
+ deviceid = of_match_node(dialog_dt_ids, np);
+ id = (const struct i2c_device_id *)deviceid->data;
+ }
+#endif
+
+ if (!id) {
+ ret = -ENODEV;
+ dev_err(&client->dev, "id is null.\n");
+ return ret;
+ }
ret = da9052_device_init(da9052, id->driver_data);
if (ret != 0)
- goto err_regmap;
+ return ret;
return 0;
-
-err_regmap:
- regmap_exit(da9052->regmap);
-err:
- kfree(da9052);
- return ret;
}
static int __devexit da9052_i2c_remove(struct i2c_client *client)
@@ -94,20 +126,9 @@ static int __devexit da9052_i2c_remove(struct i2c_client *client)
struct da9052 *da9052 = i2c_get_clientdata(client);
da9052_device_exit(da9052);
- regmap_exit(da9052->regmap);
- kfree(da9052);
-
return 0;
}
-static struct i2c_device_id da9052_i2c_id[] = {
- {"da9052", DA9052},
- {"da9053-aa", DA9053_AA},
- {"da9053-ba", DA9053_BA},
- {"da9053-bb", DA9053_BB},
- {}
-};
-
static struct i2c_driver da9052_i2c_driver = {
.probe = da9052_i2c_probe,
.remove = __devexit_p(da9052_i2c_remove),
@@ -115,6 +136,9 @@ static struct i2c_driver da9052_i2c_driver = {
.driver = {
.name = "da9052",
.owner = THIS_MODULE,
+#ifdef CONFIG_OF
+ .of_match_table = dialog_dt_ids,
+#endif
},
};
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
index 6faf149e8d94..dbeadc5a6436 100644
--- a/drivers/mfd/da9052-spi.c
+++ b/drivers/mfd/da9052-spi.c
@@ -25,8 +25,9 @@ static int __devinit da9052_spi_probe(struct spi_device *spi)
{
int ret;
const struct spi_device_id *id = spi_get_device_id(spi);
- struct da9052 *da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+ struct da9052 *da9052;
+ da9052 = devm_kzalloc(&spi->dev, sizeof(struct da9052), GFP_KERNEL);
if (!da9052)
return -ENOMEM;
@@ -42,25 +43,19 @@ static int __devinit da9052_spi_probe(struct spi_device *spi)
da9052_regmap_config.read_flag_mask = 1;
da9052_regmap_config.write_flag_mask = 0;
- da9052->regmap = regmap_init_spi(spi, &da9052_regmap_config);
+ da9052->regmap = devm_regmap_init_spi(spi, &da9052_regmap_config);
if (IS_ERR(da9052->regmap)) {
ret = PTR_ERR(da9052->regmap);
dev_err(&spi->dev, "Failed to allocate register map: %d\n",
ret);
- goto err;
+ return ret;
}
ret = da9052_device_init(da9052, id->driver_data);
if (ret != 0)
- goto err_regmap;
+ return ret;
return 0;
-
-err_regmap:
- regmap_exit(da9052->regmap);
-err:
- kfree(da9052);
- return ret;
}
static int __devexit da9052_spi_remove(struct spi_device *spi)
@@ -68,9 +63,6 @@ static int __devexit da9052_spi_remove(struct spi_device *spi)
struct da9052 *da9052 = dev_get_drvdata(&spi->dev);
da9052_device_exit(da9052);
- regmap_exit(da9052->regmap);
- kfree(da9052);
-
return 0;
}
@@ -88,7 +80,6 @@ static struct spi_driver da9052_spi_driver = {
.id_table = da9052_spi_id,
.driver = {
.name = "da9052",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
};
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 5be32489714f..671c8bc14bbc 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2720,6 +2720,7 @@ static struct regulator_consumer_supply db8500_vape_consumers[] = {
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"),
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"),
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"),
+ REGULATOR_SUPPLY("v-i2c", "nmk-i2c.4"),
/* "v-mmc" changed to "vcore" in the mainline kernel */
REGULATOR_SUPPLY("vcore", "sdi0"),
REGULATOR_SUPPLY("vcore", "sdi1"),
@@ -2958,9 +2959,10 @@ static struct mfd_cell db8500_prcmu_devs[] = {
* prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
*
*/
-static int __init db8500_prcmu_probe(struct platform_device *pdev)
+static int __devinit db8500_prcmu_probe(struct platform_device *pdev)
{
- int err = 0;
+ struct device_node *np = pdev->dev.of_node;
+ int irq = 0, err = 0;
if (ux500_is_svp())
return -ENODEV;
@@ -2970,8 +2972,14 @@ static int __init db8500_prcmu_probe(struct platform_device *pdev)
/* Clean up the mailbox interrupts after pre-kernel code. */
writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
- err = request_threaded_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler,
- prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
+ if (np)
+ irq = platform_get_irq(pdev, 0);
+
+ if (!np || irq <= 0)
+ irq = IRQ_DB8500_PRCMU1;
+
+ err = request_threaded_irq(irq, prcmu_irq_handler,
+ prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
if (err < 0) {
pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n");
err = -EBUSY;
@@ -2981,14 +2989,16 @@ static int __init db8500_prcmu_probe(struct platform_device *pdev)
if (cpu_is_u8500v20_or_later())
prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
- err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
- ARRAY_SIZE(db8500_prcmu_devs), NULL,
- 0);
+ if (!np) {
+ err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
+ ARRAY_SIZE(db8500_prcmu_devs), NULL, 0);
+ if (err) {
+ pr_err("prcmu: Failed to add subdevices\n");
+ return err;
+ }
+ }
- if (err)
- pr_err("prcmu: Failed to add subdevices\n");
- else
- pr_info("DB8500 PRCMU initialized\n");
+ pr_info("DB8500 PRCMU initialized\n");
no_irq_return:
return err;
@@ -2999,11 +3009,12 @@ static struct platform_driver db8500_prcmu_driver = {
.name = "db8500-prcmu",
.owner = THIS_MODULE,
},
+ .probe = db8500_prcmu_probe,
};
static int __init db8500_prcmu_init(void)
{
- return platform_driver_probe(&db8500_prcmu_driver, db8500_prcmu_probe);
+ return platform_driver_register(&db8500_prcmu_driver);
}
arch_initcall(db8500_prcmu_init);
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index b76657eb0c51..59df5584cb58 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -406,7 +406,7 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
return -ENXIO;
}
- msic = kzalloc(sizeof(*msic), GFP_KERNEL);
+ msic = devm_kzalloc(&pdev->dev, sizeof(*msic), GFP_KERNEL);
if (!msic)
return -ENOMEM;
@@ -421,21 +421,13 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "failed to get SRAM iomem resource\n");
- ret = -ENODEV;
- goto fail_free_msic;
+ return -ENODEV;
}
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!res) {
- ret = -EBUSY;
- goto fail_free_msic;
- }
-
- msic->irq_base = ioremap_nocache(res->start, resource_size(res));
+ msic->irq_base = devm_request_and_ioremap(&pdev->dev, res);
if (!msic->irq_base) {
dev_err(&pdev->dev, "failed to map SRAM memory\n");
- ret = -ENOMEM;
- goto fail_release_region;
+ return -ENOMEM;
}
platform_set_drvdata(pdev, msic);
@@ -443,7 +435,7 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
ret = intel_msic_init_devices(msic);
if (ret) {
dev_err(&pdev->dev, "failed to initialize MSIC devices\n");
- goto fail_unmap_mem;
+ return ret;
}
dev_info(&pdev->dev, "Intel MSIC version %c%d (vendor %#x)\n",
@@ -451,27 +443,14 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
msic->vendor);
return 0;
-
-fail_unmap_mem:
- iounmap(msic->irq_base);
-fail_release_region:
- release_mem_region(res->start, resource_size(res));
-fail_free_msic:
- kfree(msic);
-
- return ret;
}
static int __devexit intel_msic_remove(struct platform_device *pdev)
{
struct intel_msic *msic = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
intel_msic_remove_devices(msic);
platform_set_drvdata(pdev, NULL);
- iounmap(msic->irq_base);
- release_mem_region(res->start, resource_size(res));
- kfree(msic);
return 0;
}
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index a9223ed1b7c5..2ea99989551a 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -283,23 +283,8 @@ static struct pci_driver cmodio_pci_driver = {
.remove = __devexit_p(cmodio_pci_remove),
};
-/*
- * Module Init / Exit
- */
-
-static int __init cmodio_init(void)
-{
- return pci_register_driver(&cmodio_pci_driver);
-}
-
-static void __exit cmodio_exit(void)
-{
- pci_unregister_driver(&cmodio_pci_driver);
-}
+module_pci_driver(cmodio_pci_driver);
MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
MODULE_DESCRIPTION("Janz CMOD-IO PCI MODULbus Carrier Board Driver");
MODULE_LICENSE("GPL");
-
-module_init(cmodio_init);
-module_exit(cmodio_exit);
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
new file mode 100644
index 000000000000..0b2879b87fd9
--- /dev/null
+++ b/drivers/mfd/lm3533-core.c
@@ -0,0 +1,667 @@
+/*
+ * lm3533-core.c -- LM3533 Core
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_BOOST_OVP_MASK 0x06
+#define LM3533_BOOST_OVP_SHIFT 1
+
+#define LM3533_BOOST_FREQ_MASK 0x01
+#define LM3533_BOOST_FREQ_SHIFT 0
+
+#define LM3533_BL_ID_MASK 1
+#define LM3533_LED_ID_MASK 3
+#define LM3533_BL_ID_MAX 1
+#define LM3533_LED_ID_MAX 3
+
+#define LM3533_HVLED_ID_MAX 2
+#define LM3533_LVLED_ID_MAX 5
+
+#define LM3533_REG_OUTPUT_CONF1 0x10
+#define LM3533_REG_OUTPUT_CONF2 0x11
+#define LM3533_REG_BOOST_PWM 0x2c
+
+#define LM3533_REG_MAX 0xb2
+
+
+static struct mfd_cell lm3533_als_devs[] = {
+ {
+ .name = "lm3533-als",
+ .id = -1,
+ },
+};
+
+static struct mfd_cell lm3533_bl_devs[] = {
+ {
+ .name = "lm3533-backlight",
+ .id = 0,
+ },
+ {
+ .name = "lm3533-backlight",
+ .id = 1,
+ },
+};
+
+static struct mfd_cell lm3533_led_devs[] = {
+ {
+ .name = "lm3533-leds",
+ .id = 0,
+ },
+ {
+ .name = "lm3533-leds",
+ .id = 1,
+ },
+ {
+ .name = "lm3533-leds",
+ .id = 2,
+ },
+ {
+ .name = "lm3533-leds",
+ .id = 3,
+ },
+};
+
+int lm3533_read(struct lm3533 *lm3533, u8 reg, u8 *val)
+{
+ int tmp;
+ int ret;
+
+ ret = regmap_read(lm3533->regmap, reg, &tmp);
+ if (ret < 0) {
+ dev_err(lm3533->dev, "failed to read register %02x: %d\n",
+ reg, ret);
+ return ret;
+ }
+
+ *val = tmp;
+
+ dev_dbg(lm3533->dev, "read [%02x]: %02x\n", reg, *val);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_read);
+
+int lm3533_write(struct lm3533 *lm3533, u8 reg, u8 val)
+{
+ int ret;
+
+ dev_dbg(lm3533->dev, "write [%02x]: %02x\n", reg, val);
+
+ ret = regmap_write(lm3533->regmap, reg, val);
+ if (ret < 0) {
+ dev_err(lm3533->dev, "failed to write register %02x: %d\n",
+ reg, ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_write);
+
+int lm3533_update(struct lm3533 *lm3533, u8 reg, u8 val, u8 mask)
+{
+ int ret;
+
+ dev_dbg(lm3533->dev, "update [%02x]: %02x/%02x\n", reg, val, mask);
+
+ ret = regmap_update_bits(lm3533->regmap, reg, mask, val);
+ if (ret < 0) {
+ dev_err(lm3533->dev, "failed to update register %02x: %d\n",
+ reg, ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_update);
+
+static int lm3533_set_boost_freq(struct lm3533 *lm3533,
+ enum lm3533_boost_freq freq)
+{
+ int ret;
+
+ ret = lm3533_update(lm3533, LM3533_REG_BOOST_PWM,
+ freq << LM3533_BOOST_FREQ_SHIFT,
+ LM3533_BOOST_FREQ_MASK);
+ if (ret)
+ dev_err(lm3533->dev, "failed to set boost frequency\n");
+
+ return ret;
+}
+
+
+static int lm3533_set_boost_ovp(struct lm3533 *lm3533,
+ enum lm3533_boost_ovp ovp)
+{
+ int ret;
+
+ ret = lm3533_update(lm3533, LM3533_REG_BOOST_PWM,
+ ovp << LM3533_BOOST_OVP_SHIFT,
+ LM3533_BOOST_OVP_MASK);
+ if (ret)
+ dev_err(lm3533->dev, "failed to set boost ovp\n");
+
+ return ret;
+}
+
+/*
+ * HVLED output config -- output hvled controlled by backlight bl
+ */
+static int lm3533_set_hvled_config(struct lm3533 *lm3533, u8 hvled, u8 bl)
+{
+ u8 val;
+ u8 mask;
+ int shift;
+ int ret;
+
+ if (hvled == 0 || hvled > LM3533_HVLED_ID_MAX)
+ return -EINVAL;
+
+ if (bl > LM3533_BL_ID_MAX)
+ return -EINVAL;
+
+ shift = hvled - 1;
+ mask = LM3533_BL_ID_MASK << shift;
+ val = bl << shift;
+
+ ret = lm3533_update(lm3533, LM3533_REG_OUTPUT_CONF1, val, mask);
+ if (ret)
+ dev_err(lm3533->dev, "failed to set hvled config\n");
+
+ return ret;
+}
+
+/*
+ * LVLED output config -- output lvled controlled by LED led
+ */
+static int lm3533_set_lvled_config(struct lm3533 *lm3533, u8 lvled, u8 led)
+{
+ u8 reg;
+ u8 val;
+ u8 mask;
+ int shift;
+ int ret;
+
+ if (lvled == 0 || lvled > LM3533_LVLED_ID_MAX)
+ return -EINVAL;
+
+ if (led > LM3533_LED_ID_MAX)
+ return -EINVAL;
+
+ if (lvled < 4) {
+ reg = LM3533_REG_OUTPUT_CONF1;
+ shift = 2 * lvled;
+ } else {
+ reg = LM3533_REG_OUTPUT_CONF2;
+ shift = 2 * (lvled - 4);
+ }
+
+ mask = LM3533_LED_ID_MASK << shift;
+ val = led << shift;
+
+ ret = lm3533_update(lm3533, reg, val, mask);
+ if (ret)
+ dev_err(lm3533->dev, "failed to set lvled config\n");
+
+ return ret;
+}
+
+static void lm3533_enable(struct lm3533 *lm3533)
+{
+ if (gpio_is_valid(lm3533->gpio_hwen))
+ gpio_set_value(lm3533->gpio_hwen, 1);
+}
+
+static void lm3533_disable(struct lm3533 *lm3533)
+{
+ if (gpio_is_valid(lm3533->gpio_hwen))
+ gpio_set_value(lm3533->gpio_hwen, 0);
+}
+
+enum lm3533_attribute_type {
+ LM3533_ATTR_TYPE_BACKLIGHT,
+ LM3533_ATTR_TYPE_LED,
+};
+
+struct lm3533_device_attribute {
+ struct device_attribute dev_attr;
+ enum lm3533_attribute_type type;
+ union {
+ struct {
+ u8 id;
+ } output;
+ } u;
+};
+
+#define to_lm3533_dev_attr(_attr) \
+ container_of(_attr, struct lm3533_device_attribute, dev_attr)
+
+static ssize_t show_output(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533 *lm3533 = dev_get_drvdata(dev);
+ struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(attr);
+ int id = lattr->u.output.id;
+ u8 reg;
+ u8 val;
+ u8 mask;
+ int shift;
+ int ret;
+
+ if (lattr->type == LM3533_ATTR_TYPE_BACKLIGHT) {
+ reg = LM3533_REG_OUTPUT_CONF1;
+ shift = id - 1;
+ mask = LM3533_BL_ID_MASK << shift;
+ } else {
+ if (id < 4) {
+ reg = LM3533_REG_OUTPUT_CONF1;
+ shift = 2 * id;
+ } else {
+ reg = LM3533_REG_OUTPUT_CONF2;
+ shift = 2 * (id - 4);
+ }
+ mask = LM3533_LED_ID_MASK << shift;
+ }
+
+ ret = lm3533_read(lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ val = (val & mask) >> shift;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_output(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lm3533 *lm3533 = dev_get_drvdata(dev);
+ struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(attr);
+ int id = lattr->u.output.id;
+ u8 val;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val))
+ return -EINVAL;
+
+ if (lattr->type == LM3533_ATTR_TYPE_BACKLIGHT)
+ ret = lm3533_set_hvled_config(lm3533, id, val);
+ else
+ ret = lm3533_set_lvled_config(lm3533, id, val);
+
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+#define LM3533_OUTPUT_ATTR(_name, _mode, _show, _store, _type, _id) \
+ struct lm3533_device_attribute lm3533_dev_attr_##_name = \
+ { .dev_attr = __ATTR(_name, _mode, _show, _store), \
+ .type = _type, \
+ .u.output = { .id = _id }, }
+
+#define LM3533_OUTPUT_ATTR_RW(_name, _type, _id) \
+ LM3533_OUTPUT_ATTR(output_##_name, S_IRUGO | S_IWUSR, \
+ show_output, store_output, _type, _id)
+
+#define LM3533_OUTPUT_HVLED_ATTR_RW(_nr) \
+ LM3533_OUTPUT_ATTR_RW(hvled##_nr, LM3533_ATTR_TYPE_BACKLIGHT, _nr)
+#define LM3533_OUTPUT_LVLED_ATTR_RW(_nr) \
+ LM3533_OUTPUT_ATTR_RW(lvled##_nr, LM3533_ATTR_TYPE_LED, _nr)
+/*
+ * Output config:
+ *
+ * output_hvled<nr> 0-1
+ * output_lvled<nr> 0-3
+ */
+static LM3533_OUTPUT_HVLED_ATTR_RW(1);
+static LM3533_OUTPUT_HVLED_ATTR_RW(2);
+static LM3533_OUTPUT_LVLED_ATTR_RW(1);
+static LM3533_OUTPUT_LVLED_ATTR_RW(2);
+static LM3533_OUTPUT_LVLED_ATTR_RW(3);
+static LM3533_OUTPUT_LVLED_ATTR_RW(4);
+static LM3533_OUTPUT_LVLED_ATTR_RW(5);
+
+static struct attribute *lm3533_attributes[] = {
+ &lm3533_dev_attr_output_hvled1.dev_attr.attr,
+ &lm3533_dev_attr_output_hvled2.dev_attr.attr,
+ &lm3533_dev_attr_output_lvled1.dev_attr.attr,
+ &lm3533_dev_attr_output_lvled2.dev_attr.attr,
+ &lm3533_dev_attr_output_lvled3.dev_attr.attr,
+ &lm3533_dev_attr_output_lvled4.dev_attr.attr,
+ &lm3533_dev_attr_output_lvled5.dev_attr.attr,
+ NULL,
+};
+
+#define to_dev_attr(_attr) \
+ container_of(_attr, struct device_attribute, attr)
+
+static umode_t lm3533_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct lm3533 *lm3533 = dev_get_drvdata(dev);
+ struct device_attribute *dattr = to_dev_attr(attr);
+ struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(dattr);
+ enum lm3533_attribute_type type = lattr->type;
+ umode_t mode = attr->mode;
+
+ if (!lm3533->have_backlights && type == LM3533_ATTR_TYPE_BACKLIGHT)
+ mode = 0;
+ else if (!lm3533->have_leds && type == LM3533_ATTR_TYPE_LED)
+ mode = 0;
+
+ return mode;
+};
+
+static struct attribute_group lm3533_attribute_group = {
+ .is_visible = lm3533_attr_is_visible,
+ .attrs = lm3533_attributes
+};
+
+static int __devinit lm3533_device_als_init(struct lm3533 *lm3533)
+{
+ struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+ int ret;
+
+ if (!pdata->als)
+ return 0;
+
+ lm3533_als_devs[0].platform_data = pdata->als;
+ lm3533_als_devs[0].pdata_size = sizeof(*pdata->als);
+
+ ret = mfd_add_devices(lm3533->dev, 0, lm3533_als_devs, 1, NULL, 0);
+ if (ret) {
+ dev_err(lm3533->dev, "failed to add ALS device\n");
+ return ret;
+ }
+
+ lm3533->have_als = 1;
+
+ return 0;
+}
+
+static int __devinit lm3533_device_bl_init(struct lm3533 *lm3533)
+{
+ struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+ int i;
+ int ret;
+
+ if (!pdata->backlights || pdata->num_backlights == 0)
+ return 0;
+
+ if (pdata->num_backlights > ARRAY_SIZE(lm3533_bl_devs))
+ pdata->num_backlights = ARRAY_SIZE(lm3533_bl_devs);
+
+ for (i = 0; i < pdata->num_backlights; ++i) {
+ lm3533_bl_devs[i].platform_data = &pdata->backlights[i];
+ lm3533_bl_devs[i].pdata_size = sizeof(pdata->backlights[i]);
+ }
+
+ ret = mfd_add_devices(lm3533->dev, 0, lm3533_bl_devs,
+ pdata->num_backlights, NULL, 0);
+ if (ret) {
+ dev_err(lm3533->dev, "failed to add backlight devices\n");
+ return ret;
+ }
+
+ lm3533->have_backlights = 1;
+
+ return 0;
+}
+
+static int __devinit lm3533_device_led_init(struct lm3533 *lm3533)
+{
+ struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+ int i;
+ int ret;
+
+ if (!pdata->leds || pdata->num_leds == 0)
+ return 0;
+
+ if (pdata->num_leds > ARRAY_SIZE(lm3533_led_devs))
+ pdata->num_leds = ARRAY_SIZE(lm3533_led_devs);
+
+ for (i = 0; i < pdata->num_leds; ++i) {
+ lm3533_led_devs[i].platform_data = &pdata->leds[i];
+ lm3533_led_devs[i].pdata_size = sizeof(pdata->leds[i]);
+ }
+
+ ret = mfd_add_devices(lm3533->dev, 0, lm3533_led_devs,
+ pdata->num_leds, NULL, 0);
+ if (ret) {
+ dev_err(lm3533->dev, "failed to add LED devices\n");
+ return ret;
+ }
+
+ lm3533->have_leds = 1;
+
+ return 0;
+}
+
+static int __devinit lm3533_device_setup(struct lm3533 *lm3533,
+ struct lm3533_platform_data *pdata)
+{
+ int ret;
+
+ ret = lm3533_set_boost_freq(lm3533, pdata->boost_freq);
+ if (ret)
+ return ret;
+
+ ret = lm3533_set_boost_ovp(lm3533, pdata->boost_ovp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __devinit lm3533_device_init(struct lm3533 *lm3533)
+{
+ struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+ int ret;
+
+ dev_dbg(lm3533->dev, "%s\n", __func__);
+
+ if (!pdata) {
+ dev_err(lm3533->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ lm3533->gpio_hwen = pdata->gpio_hwen;
+
+ dev_set_drvdata(lm3533->dev, lm3533);
+
+ if (gpio_is_valid(lm3533->gpio_hwen)) {
+ ret = gpio_request_one(lm3533->gpio_hwen, GPIOF_OUT_INIT_LOW,
+ "lm3533-hwen");
+ if (ret < 0) {
+ dev_err(lm3533->dev,
+ "failed to request HWEN GPIO %d\n",
+ lm3533->gpio_hwen);
+ return ret;
+ }
+ }
+
+ lm3533_enable(lm3533);
+
+ ret = lm3533_device_setup(lm3533, pdata);
+ if (ret)
+ goto err_disable;
+
+ lm3533_device_als_init(lm3533);
+ lm3533_device_bl_init(lm3533);
+ lm3533_device_led_init(lm3533);
+
+ ret = sysfs_create_group(&lm3533->dev->kobj, &lm3533_attribute_group);
+ if (ret < 0) {
+ dev_err(lm3533->dev, "failed to create sysfs attributes\n");
+ goto err_unregister;
+ }
+
+ return 0;
+
+err_unregister:
+ mfd_remove_devices(lm3533->dev);
+err_disable:
+ lm3533_disable(lm3533);
+ if (gpio_is_valid(lm3533->gpio_hwen))
+ gpio_free(lm3533->gpio_hwen);
+
+ return ret;
+}
+
+static void __devexit lm3533_device_exit(struct lm3533 *lm3533)
+{
+ dev_dbg(lm3533->dev, "%s\n", __func__);
+
+ sysfs_remove_group(&lm3533->dev->kobj, &lm3533_attribute_group);
+
+ mfd_remove_devices(lm3533->dev);
+ lm3533_disable(lm3533);
+ if (gpio_is_valid(lm3533->gpio_hwen))
+ gpio_free(lm3533->gpio_hwen);
+}
+
+static bool lm3533_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0x10 ... 0x2c:
+ case 0x30 ... 0x38:
+ case 0x40 ... 0x45:
+ case 0x50 ... 0x57:
+ case 0x60 ... 0x6e:
+ case 0x70 ... 0x75:
+ case 0x80 ... 0x85:
+ case 0x90 ... 0x95:
+ case 0xa0 ... 0xa5:
+ case 0xb0 ... 0xb2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool lm3533_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0x34 ... 0x36: /* zone */
+ case 0x37 ... 0x38: /* adc */
+ case 0xb0 ... 0xb1: /* fault */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool lm3533_precious_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0x34: /* zone */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = LM3533_REG_MAX,
+ .readable_reg = lm3533_readable_register,
+ .volatile_reg = lm3533_volatile_register,
+ .precious_reg = lm3533_precious_register,
+};
+
+static int __devinit lm3533_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct lm3533 *lm3533;
+ int ret;
+
+ dev_dbg(&i2c->dev, "%s\n", __func__);
+
+ lm3533 = devm_kzalloc(&i2c->dev, sizeof(*lm3533), GFP_KERNEL);
+ if (!lm3533)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, lm3533);
+
+ lm3533->regmap = devm_regmap_init_i2c(i2c, &regmap_config);
+ if (IS_ERR(lm3533->regmap))
+ return PTR_ERR(lm3533->regmap);
+
+ lm3533->dev = &i2c->dev;
+ lm3533->irq = i2c->irq;
+
+ ret = lm3533_device_init(lm3533);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __devexit lm3533_i2c_remove(struct i2c_client *i2c)
+{
+ struct lm3533 *lm3533 = i2c_get_clientdata(i2c);
+
+ dev_dbg(&i2c->dev, "%s\n", __func__);
+
+ lm3533_device_exit(lm3533);
+
+ return 0;
+}
+
+static const struct i2c_device_id lm3533_i2c_ids[] = {
+ { "lm3533", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, lm3533_i2c_ids);
+
+static struct i2c_driver lm3533_i2c_driver = {
+ .driver = {
+ .name = "lm3533",
+ .owner = THIS_MODULE,
+ },
+ .id_table = lm3533_i2c_ids,
+ .probe = lm3533_i2c_probe,
+ .remove = __devexit_p(lm3533_i2c_remove),
+};
+
+static int __init lm3533_i2c_init(void)
+{
+ return i2c_add_driver(&lm3533_i2c_driver);
+}
+subsys_initcall(lm3533_i2c_init);
+
+static void __exit lm3533_i2c_exit(void)
+{
+ i2c_del_driver(&lm3533_i2c_driver);
+}
+module_exit(lm3533_i2c_exit);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lm3533-ctrlbank.c b/drivers/mfd/lm3533-ctrlbank.c
new file mode 100644
index 000000000000..a4cb7a5220a7
--- /dev/null
+++ b/drivers/mfd/lm3533-ctrlbank.c
@@ -0,0 +1,148 @@
+/*
+ * lm3533-ctrlbank.c -- LM3533 Generic Control Bank interface
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_MAX_CURRENT_MIN 5000
+#define LM3533_MAX_CURRENT_MAX 29800
+#define LM3533_MAX_CURRENT_STEP 800
+
+#define LM3533_BRIGHTNESS_MAX 255
+#define LM3533_PWM_MAX 0x3f
+
+#define LM3533_REG_PWM_BASE 0x14
+#define LM3533_REG_MAX_CURRENT_BASE 0x1f
+#define LM3533_REG_CTRLBANK_ENABLE 0x27
+#define LM3533_REG_BRIGHTNESS_BASE 0x40
+
+
+static inline u8 lm3533_ctrlbank_get_reg(struct lm3533_ctrlbank *cb, u8 base)
+{
+ return base + cb->id;
+}
+
+int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb)
+{
+ u8 mask;
+ int ret;
+
+ dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id);
+
+ mask = 1 << cb->id;
+ ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE,
+ mask, mask);
+ if (ret)
+ dev_err(cb->dev, "failed to enable ctrlbank %d\n", cb->id);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_enable);
+
+int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb)
+{
+ u8 mask;
+ int ret;
+
+ dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id);
+
+ mask = 1 << cb->id;
+ ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE, 0, mask);
+ if (ret)
+ dev_err(cb->dev, "failed to disable ctrlbank %d\n", cb->id);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_disable);
+
+/*
+ * Full-scale current.
+ *
+ * imax 5000 - 29800 uA (800 uA step)
+ */
+int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb, u16 imax)
+{
+ u8 reg;
+ u8 val;
+ int ret;
+
+ if (imax < LM3533_MAX_CURRENT_MIN || imax > LM3533_MAX_CURRENT_MAX)
+ return -EINVAL;
+
+ val = (imax - LM3533_MAX_CURRENT_MIN) / LM3533_MAX_CURRENT_STEP;
+
+ reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_MAX_CURRENT_BASE);
+ ret = lm3533_write(cb->lm3533, reg, val);
+ if (ret)
+ dev_err(cb->dev, "failed to set max current\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_max_current);
+
+#define lm3533_ctrlbank_set(_name, _NAME) \
+int lm3533_ctrlbank_set_##_name(struct lm3533_ctrlbank *cb, u8 val) \
+{ \
+ u8 reg; \
+ int ret; \
+ \
+ if (val > LM3533_##_NAME##_MAX) \
+ return -EINVAL; \
+ \
+ reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE); \
+ ret = lm3533_write(cb->lm3533, reg, val); \
+ if (ret) \
+ dev_err(cb->dev, "failed to set " #_name "\n"); \
+ \
+ return ret; \
+} \
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_##_name);
+
+#define lm3533_ctrlbank_get(_name, _NAME) \
+int lm3533_ctrlbank_get_##_name(struct lm3533_ctrlbank *cb, u8 *val) \
+{ \
+ u8 reg; \
+ int ret; \
+ \
+ reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE); \
+ ret = lm3533_read(cb->lm3533, reg, val); \
+ if (ret) \
+ dev_err(cb->dev, "failed to get " #_name "\n"); \
+ \
+ return ret; \
+} \
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_get_##_name);
+
+lm3533_ctrlbank_set(brightness, BRIGHTNESS);
+lm3533_ctrlbank_get(brightness, BRIGHTNESS);
+
+/*
+ * PWM-input control mask:
+ *
+ * bit 5 - PWM-input enabled in Zone 4
+ * bit 4 - PWM-input enabled in Zone 3
+ * bit 3 - PWM-input enabled in Zone 2
+ * bit 2 - PWM-input enabled in Zone 1
+ * bit 1 - PWM-input enabled in Zone 0
+ * bit 0 - PWM-input enabled
+ */
+lm3533_ctrlbank_set(pwm, PWM);
+lm3533_ctrlbank_get(pwm, PWM);
+
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Control Bank interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
new file mode 100644
index 000000000000..027cc8f86132
--- /dev/null
+++ b/drivers/mfd/lpc_ich.c
@@ -0,0 +1,888 @@
+/*
+ * lpc_ich.c - LPC interface for Intel ICH
+ *
+ * LPC bridge function of the Intel ICH contains many other
+ * functional units, such as Interrupt controllers, Timers,
+ * Power Management, System Management, GPIO, RTC, and LPC
+ * Configuration Registers.
+ *
+ * This driver is derived from lpc_sch.
+
+ * Copyright (c) 2011 Extreme Engineering Solution, Inc.
+ * Author: Aaron Sierra <asierra@xes-inc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * This driver supports the following I/O Controller hubs:
+ * (See the intel documentation on http://developer.intel.com.)
+ * document number 290655-003, 290677-014: 82801AA (ICH), 82801AB (ICHO)
+ * document number 290687-002, 298242-027: 82801BA (ICH2)
+ * document number 290733-003, 290739-013: 82801CA (ICH3-S)
+ * document number 290716-001, 290718-007: 82801CAM (ICH3-M)
+ * document number 290744-001, 290745-025: 82801DB (ICH4)
+ * document number 252337-001, 252663-008: 82801DBM (ICH4-M)
+ * document number 273599-001, 273645-002: 82801E (C-ICH)
+ * document number 252516-001, 252517-028: 82801EB (ICH5), 82801ER (ICH5R)
+ * document number 300641-004, 300884-013: 6300ESB
+ * document number 301473-002, 301474-026: 82801F (ICH6)
+ * document number 313082-001, 313075-006: 631xESB, 632xESB
+ * document number 307013-003, 307014-024: 82801G (ICH7)
+ * document number 322896-001, 322897-001: NM10
+ * document number 313056-003, 313057-017: 82801H (ICH8)
+ * document number 316972-004, 316973-012: 82801I (ICH9)
+ * document number 319973-002, 319974-002: 82801J (ICH10)
+ * document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH)
+ * document number 320066-003, 320257-008: EP80597 (IICH)
+ * document number 324645-001, 324646-001: Cougar Point (CPT)
+ * document number TBD : Patsburg (PBG)
+ * document number TBD : DH89xxCC
+ * document number TBD : Panther Point
+ * document number TBD : Lynx Point
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/lpc_ich.h>
+
+#define ACPIBASE 0x40
+#define ACPIBASE_GPE_OFF 0x28
+#define ACPIBASE_GPE_END 0x2f
+#define ACPIBASE_SMI_OFF 0x30
+#define ACPIBASE_SMI_END 0x33
+#define ACPIBASE_TCO_OFF 0x60
+#define ACPIBASE_TCO_END 0x7f
+#define ACPICTRL 0x44
+
+#define ACPIBASE_GCS_OFF 0x3410
+#define ACPIBASE_GCS_END 0x3414
+
+#define GPIOBASE 0x48
+#define GPIOCTRL 0x4C
+
+#define RCBABASE 0xf0
+
+#define wdt_io_res(i) wdt_res(0, i)
+#define wdt_mem_res(i) wdt_res(ICH_RES_MEM_OFF, i)
+#define wdt_res(b, i) (&wdt_ich_res[(b) + (i)])
+
+static int lpc_ich_acpi_save = -1;
+static int lpc_ich_gpio_save = -1;
+
+static struct resource wdt_ich_res[] = {
+ /* ACPI - TCO */
+ {
+ .flags = IORESOURCE_IO,
+ },
+ /* ACPI - SMI */
+ {
+ .flags = IORESOURCE_IO,
+ },
+ /* GCS */
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource gpio_ich_res[] = {
+ /* GPIO */
+ {
+ .flags = IORESOURCE_IO,
+ },
+ /* ACPI - GPE0 */
+ {
+ .flags = IORESOURCE_IO,
+ },
+};
+
+enum lpc_cells {
+ LPC_WDT = 0,
+ LPC_GPIO,
+};
+
+static struct mfd_cell lpc_ich_cells[] = {
+ [LPC_WDT] = {
+ .name = "iTCO_wdt",
+ .num_resources = ARRAY_SIZE(wdt_ich_res),
+ .resources = wdt_ich_res,
+ .ignore_resource_conflicts = true,
+ },
+ [LPC_GPIO] = {
+ .name = "gpio_ich",
+ .num_resources = ARRAY_SIZE(gpio_ich_res),
+ .resources = gpio_ich_res,
+ .ignore_resource_conflicts = true,
+ },
+};
+
+/* chipset related info */
+enum lpc_chipsets {
+ LPC_ICH = 0, /* ICH */
+ LPC_ICH0, /* ICH0 */
+ LPC_ICH2, /* ICH2 */
+ LPC_ICH2M, /* ICH2-M */
+ LPC_ICH3, /* ICH3-S */
+ LPC_ICH3M, /* ICH3-M */
+ LPC_ICH4, /* ICH4 */
+ LPC_ICH4M, /* ICH4-M */
+ LPC_CICH, /* C-ICH */
+ LPC_ICH5, /* ICH5 & ICH5R */
+ LPC_6300ESB, /* 6300ESB */
+ LPC_ICH6, /* ICH6 & ICH6R */
+ LPC_ICH6M, /* ICH6-M */
+ LPC_ICH6W, /* ICH6W & ICH6RW */
+ LPC_631XESB, /* 631xESB/632xESB */
+ LPC_ICH7, /* ICH7 & ICH7R */
+ LPC_ICH7DH, /* ICH7DH */
+ LPC_ICH7M, /* ICH7-M & ICH7-U */
+ LPC_ICH7MDH, /* ICH7-M DH */
+ LPC_NM10, /* NM10 */
+ LPC_ICH8, /* ICH8 & ICH8R */
+ LPC_ICH8DH, /* ICH8DH */
+ LPC_ICH8DO, /* ICH8DO */
+ LPC_ICH8M, /* ICH8M */
+ LPC_ICH8ME, /* ICH8M-E */
+ LPC_ICH9, /* ICH9 */
+ LPC_ICH9R, /* ICH9R */
+ LPC_ICH9DH, /* ICH9DH */
+ LPC_ICH9DO, /* ICH9DO */
+ LPC_ICH9M, /* ICH9M */
+ LPC_ICH9ME, /* ICH9M-E */
+ LPC_ICH10, /* ICH10 */
+ LPC_ICH10R, /* ICH10R */
+ LPC_ICH10D, /* ICH10D */
+ LPC_ICH10DO, /* ICH10DO */
+ LPC_PCH, /* PCH Desktop Full Featured */
+ LPC_PCHM, /* PCH Mobile Full Featured */
+ LPC_P55, /* P55 */
+ LPC_PM55, /* PM55 */
+ LPC_H55, /* H55 */
+ LPC_QM57, /* QM57 */
+ LPC_H57, /* H57 */
+ LPC_HM55, /* HM55 */
+ LPC_Q57, /* Q57 */
+ LPC_HM57, /* HM57 */
+ LPC_PCHMSFF, /* PCH Mobile SFF Full Featured */
+ LPC_QS57, /* QS57 */
+ LPC_3400, /* 3400 */
+ LPC_3420, /* 3420 */
+ LPC_3450, /* 3450 */
+ LPC_EP80579, /* EP80579 */
+ LPC_CPT, /* Cougar Point */
+ LPC_CPTD, /* Cougar Point Desktop */
+ LPC_CPTM, /* Cougar Point Mobile */
+ LPC_PBG, /* Patsburg */
+ LPC_DH89XXCC, /* DH89xxCC */
+ LPC_PPT, /* Panther Point */
+ LPC_LPT, /* Lynx Point */
+};
+
+struct lpc_ich_info lpc_chipset_info[] __devinitdata = {
+ [LPC_ICH] = {
+ .name = "ICH",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH0] = {
+ .name = "ICH0",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH2] = {
+ .name = "ICH2",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH2M] = {
+ .name = "ICH2-M",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH3] = {
+ .name = "ICH3-S",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH3M] = {
+ .name = "ICH3-M",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH4] = {
+ .name = "ICH4",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH4M] = {
+ .name = "ICH4-M",
+ .iTCO_version = 1,
+ },
+ [LPC_CICH] = {
+ .name = "C-ICH",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH5] = {
+ .name = "ICH5 or ICH5R",
+ .iTCO_version = 1,
+ },
+ [LPC_6300ESB] = {
+ .name = "6300ESB",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH6] = {
+ .name = "ICH6 or ICH6R",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V6_GPIO,
+ },
+ [LPC_ICH6M] = {
+ .name = "ICH6-M",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V6_GPIO,
+ },
+ [LPC_ICH6W] = {
+ .name = "ICH6W or ICH6RW",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V6_GPIO,
+ },
+ [LPC_631XESB] = {
+ .name = "631xESB/632xESB",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V6_GPIO,
+ },
+ [LPC_ICH7] = {
+ .name = "ICH7 or ICH7R",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH7DH] = {
+ .name = "ICH7DH",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH7M] = {
+ .name = "ICH7-M or ICH7-U",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH7MDH] = {
+ .name = "ICH7-M DH",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_NM10] = {
+ .name = "NM10",
+ .iTCO_version = 2,
+ },
+ [LPC_ICH8] = {
+ .name = "ICH8 or ICH8R",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH8DH] = {
+ .name = "ICH8DH",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH8DO] = {
+ .name = "ICH8DO",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH8M] = {
+ .name = "ICH8M",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH8ME] = {
+ .name = "ICH8M-E",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH9] = {
+ .name = "ICH9",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH9R] = {
+ .name = "ICH9R",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH9DH] = {
+ .name = "ICH9DH",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH9DO] = {
+ .name = "ICH9DO",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH9M] = {
+ .name = "ICH9M",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH9ME] = {
+ .name = "ICH9M-E",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH10] = {
+ .name = "ICH10",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V10CONS_GPIO,
+ },
+ [LPC_ICH10R] = {
+ .name = "ICH10R",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V10CONS_GPIO,
+ },
+ [LPC_ICH10D] = {
+ .name = "ICH10D",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V10CORP_GPIO,
+ },
+ [LPC_ICH10DO] = {
+ .name = "ICH10DO",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V10CORP_GPIO,
+ },
+ [LPC_PCH] = {
+ .name = "PCH Desktop Full Featured",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_PCHM] = {
+ .name = "PCH Mobile Full Featured",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_P55] = {
+ .name = "P55",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_PM55] = {
+ .name = "PM55",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_H55] = {
+ .name = "H55",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_QM57] = {
+ .name = "QM57",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_H57] = {
+ .name = "H57",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_HM55] = {
+ .name = "HM55",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_Q57] = {
+ .name = "Q57",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_HM57] = {
+ .name = "HM57",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_PCHMSFF] = {
+ .name = "PCH Mobile SFF Full Featured",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_QS57] = {
+ .name = "QS57",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_3400] = {
+ .name = "3400",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_3420] = {
+ .name = "3420",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_3450] = {
+ .name = "3450",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_EP80579] = {
+ .name = "EP80579",
+ .iTCO_version = 2,
+ },
+ [LPC_CPT] = {
+ .name = "Cougar Point",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_CPTD] = {
+ .name = "Cougar Point Desktop",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_CPTM] = {
+ .name = "Cougar Point Mobile",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_PBG] = {
+ .name = "Patsburg",
+ .iTCO_version = 2,
+ },
+ [LPC_DH89XXCC] = {
+ .name = "DH89xxCC",
+ .iTCO_version = 2,
+ },
+ [LPC_PPT] = {
+ .name = "Panther Point",
+ .iTCO_version = 2,
+ },
+ [LPC_LPT] = {
+ .name = "Lynx Point",
+ .iTCO_version = 2,
+ },
+};
+
+/*
+ * This data only exists for exporting the supported PCI ids
+ * via MODULE_DEVICE_TABLE. We do not actually register a
+ * pci_driver, because the I/O Controller Hub has also other
+ * functions that probably will be registered by other drivers.
+ */
+static DEFINE_PCI_DEVICE_TABLE(lpc_ich_ids) = {
+ { PCI_VDEVICE(INTEL, 0x2410), LPC_ICH},
+ { PCI_VDEVICE(INTEL, 0x2420), LPC_ICH0},
+ { PCI_VDEVICE(INTEL, 0x2440), LPC_ICH2},
+ { PCI_VDEVICE(INTEL, 0x244c), LPC_ICH2M},
+ { PCI_VDEVICE(INTEL, 0x2480), LPC_ICH3},
+ { PCI_VDEVICE(INTEL, 0x248c), LPC_ICH3M},
+ { PCI_VDEVICE(INTEL, 0x24c0), LPC_ICH4},
+ { PCI_VDEVICE(INTEL, 0x24cc), LPC_ICH4M},
+ { PCI_VDEVICE(INTEL, 0x2450), LPC_CICH},
+ { PCI_VDEVICE(INTEL, 0x24d0), LPC_ICH5},
+ { PCI_VDEVICE(INTEL, 0x25a1), LPC_6300ESB},
+ { PCI_VDEVICE(INTEL, 0x2640), LPC_ICH6},
+ { PCI_VDEVICE(INTEL, 0x2641), LPC_ICH6M},
+ { PCI_VDEVICE(INTEL, 0x2642), LPC_ICH6W},
+ { PCI_VDEVICE(INTEL, 0x2670), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2671), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2672), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2673), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2674), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2675), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2676), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2677), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2678), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2679), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267a), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267b), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267c), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267d), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267e), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267f), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x27b8), LPC_ICH7},
+ { PCI_VDEVICE(INTEL, 0x27b0), LPC_ICH7DH},
+ { PCI_VDEVICE(INTEL, 0x27b9), LPC_ICH7M},
+ { PCI_VDEVICE(INTEL, 0x27bd), LPC_ICH7MDH},
+ { PCI_VDEVICE(INTEL, 0x27bc), LPC_NM10},
+ { PCI_VDEVICE(INTEL, 0x2810), LPC_ICH8},
+ { PCI_VDEVICE(INTEL, 0x2812), LPC_ICH8DH},
+ { PCI_VDEVICE(INTEL, 0x2814), LPC_ICH8DO},
+ { PCI_VDEVICE(INTEL, 0x2815), LPC_ICH8M},
+ { PCI_VDEVICE(INTEL, 0x2811), LPC_ICH8ME},
+ { PCI_VDEVICE(INTEL, 0x2918), LPC_ICH9},
+ { PCI_VDEVICE(INTEL, 0x2916), LPC_ICH9R},
+ { PCI_VDEVICE(INTEL, 0x2912), LPC_ICH9DH},
+ { PCI_VDEVICE(INTEL, 0x2914), LPC_ICH9DO},
+ { PCI_VDEVICE(INTEL, 0x2919), LPC_ICH9M},
+ { PCI_VDEVICE(INTEL, 0x2917), LPC_ICH9ME},
+ { PCI_VDEVICE(INTEL, 0x3a18), LPC_ICH10},
+ { PCI_VDEVICE(INTEL, 0x3a16), LPC_ICH10R},
+ { PCI_VDEVICE(INTEL, 0x3a1a), LPC_ICH10D},
+ { PCI_VDEVICE(INTEL, 0x3a14), LPC_ICH10DO},
+ { PCI_VDEVICE(INTEL, 0x3b00), LPC_PCH},
+ { PCI_VDEVICE(INTEL, 0x3b01), LPC_PCHM},
+ { PCI_VDEVICE(INTEL, 0x3b02), LPC_P55},
+ { PCI_VDEVICE(INTEL, 0x3b03), LPC_PM55},
+ { PCI_VDEVICE(INTEL, 0x3b06), LPC_H55},
+ { PCI_VDEVICE(INTEL, 0x3b07), LPC_QM57},
+ { PCI_VDEVICE(INTEL, 0x3b08), LPC_H57},
+ { PCI_VDEVICE(INTEL, 0x3b09), LPC_HM55},
+ { PCI_VDEVICE(INTEL, 0x3b0a), LPC_Q57},
+ { PCI_VDEVICE(INTEL, 0x3b0b), LPC_HM57},
+ { PCI_VDEVICE(INTEL, 0x3b0d), LPC_PCHMSFF},
+ { PCI_VDEVICE(INTEL, 0x3b0f), LPC_QS57},
+ { PCI_VDEVICE(INTEL, 0x3b12), LPC_3400},
+ { PCI_VDEVICE(INTEL, 0x3b14), LPC_3420},
+ { PCI_VDEVICE(INTEL, 0x3b16), LPC_3450},
+ { PCI_VDEVICE(INTEL, 0x5031), LPC_EP80579},
+ { PCI_VDEVICE(INTEL, 0x1c41), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c42), LPC_CPTD},
+ { PCI_VDEVICE(INTEL, 0x1c43), LPC_CPTM},
+ { PCI_VDEVICE(INTEL, 0x1c44), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c45), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c46), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c47), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c48), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c49), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4a), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4b), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4c), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4d), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4e), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4f), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c50), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c51), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c52), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c53), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c54), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c55), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c56), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c57), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c58), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c59), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5a), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5b), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5c), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5d), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5e), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5f), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1d40), LPC_PBG},
+ { PCI_VDEVICE(INTEL, 0x1d41), LPC_PBG},
+ { PCI_VDEVICE(INTEL, 0x2310), LPC_DH89XXCC},
+ { PCI_VDEVICE(INTEL, 0x1e40), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e41), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e42), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e43), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e44), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e45), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e46), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e47), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e48), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e49), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4a), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4b), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4c), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4d), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4e), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4f), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e50), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e51), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e52), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e53), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e54), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e55), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e56), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e57), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e58), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e59), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5a), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5b), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5c), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5d), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5e), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5f), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x8c40), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c41), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c42), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c43), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c44), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c45), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c46), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c47), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c48), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c49), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4a), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4b), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4c), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4d), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4e), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4f), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c50), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c51), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c52), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c53), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c54), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c55), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c56), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c57), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c58), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c59), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5a), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5b), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5c), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5d), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5e), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5f), LPC_LPT},
+ { 0, }, /* End of list */
+};
+MODULE_DEVICE_TABLE(pci, lpc_ich_ids);
+
+static void lpc_ich_restore_config_space(struct pci_dev *dev)
+{
+ if (lpc_ich_acpi_save >= 0) {
+ pci_write_config_byte(dev, ACPICTRL, lpc_ich_acpi_save);
+ lpc_ich_acpi_save = -1;
+ }
+
+ if (lpc_ich_gpio_save >= 0) {
+ pci_write_config_byte(dev, GPIOCTRL, lpc_ich_gpio_save);
+ lpc_ich_gpio_save = -1;
+ }
+}
+
+static void __devinit lpc_ich_enable_acpi_space(struct pci_dev *dev)
+{
+ u8 reg_save;
+
+ pci_read_config_byte(dev, ACPICTRL, &reg_save);
+ pci_write_config_byte(dev, ACPICTRL, reg_save | 0x10);
+ lpc_ich_acpi_save = reg_save;
+}
+
+static void __devinit lpc_ich_enable_gpio_space(struct pci_dev *dev)
+{
+ u8 reg_save;
+
+ pci_read_config_byte(dev, GPIOCTRL, &reg_save);
+ pci_write_config_byte(dev, GPIOCTRL, reg_save | 0x10);
+ lpc_ich_gpio_save = reg_save;
+}
+
+static void __devinit lpc_ich_finalize_cell(struct mfd_cell *cell,
+ const struct pci_device_id *id)
+{
+ cell->platform_data = &lpc_chipset_info[id->driver_data];
+ cell->pdata_size = sizeof(struct lpc_ich_info);
+}
+
+static int __devinit lpc_ich_init_gpio(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ u32 base_addr_cfg;
+ u32 base_addr;
+ int ret;
+ bool acpi_conflict = false;
+ struct resource *res;
+
+ /* Setup power management base register */
+ pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
+ base_addr = base_addr_cfg & 0x0000ff80;
+ if (!base_addr) {
+ dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+ lpc_ich_cells[LPC_GPIO].num_resources--;
+ goto gpe0_done;
+ }
+
+ res = &gpio_ich_res[ICH_RES_GPE0];
+ res->start = base_addr + ACPIBASE_GPE_OFF;
+ res->end = base_addr + ACPIBASE_GPE_END;
+ ret = acpi_check_resource_conflict(res);
+ if (ret) {
+ /*
+ * This isn't fatal for the GPIO, but we have to make sure that
+ * the platform_device subsystem doesn't see this resource
+ * or it will register an invalid region.
+ */
+ lpc_ich_cells[LPC_GPIO].num_resources--;
+ acpi_conflict = true;
+ } else {
+ lpc_ich_enable_acpi_space(dev);
+ }
+
+gpe0_done:
+ /* Setup GPIO base register */
+ pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
+ base_addr = base_addr_cfg & 0x0000ff80;
+ if (!base_addr) {
+ dev_err(&dev->dev, "I/O space for GPIO uninitialized\n");
+ ret = -ENODEV;
+ goto gpio_done;
+ }
+
+ /* Older devices provide fewer GPIO and have a smaller resource size. */
+ res = &gpio_ich_res[ICH_RES_GPIO];
+ res->start = base_addr;
+ switch (lpc_chipset_info[id->driver_data].gpio_version) {
+ case ICH_V5_GPIO:
+ case ICH_V10CORP_GPIO:
+ res->end = res->start + 128 - 1;
+ break;
+ default:
+ res->end = res->start + 64 - 1;
+ break;
+ }
+
+ ret = acpi_check_resource_conflict(res);
+ if (ret) {
+ /* this isn't necessarily fatal for the GPIO */
+ acpi_conflict = true;
+ goto gpio_done;
+ }
+ lpc_ich_enable_gpio_space(dev);
+
+ lpc_ich_finalize_cell(&lpc_ich_cells[LPC_GPIO], id);
+ ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_GPIO],
+ 1, NULL, 0);
+
+gpio_done:
+ if (acpi_conflict)
+ pr_warn("Resource conflict(s) found affecting %s\n",
+ lpc_ich_cells[LPC_GPIO].name);
+ return ret;
+}
+
+static int __devinit lpc_ich_init_wdt(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ u32 base_addr_cfg;
+ u32 base_addr;
+ int ret;
+ bool acpi_conflict = false;
+ struct resource *res;
+
+ /* Setup power management base register */
+ pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
+ base_addr = base_addr_cfg & 0x0000ff80;
+ if (!base_addr) {
+ dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+ ret = -ENODEV;
+ goto wdt_done;
+ }
+
+ res = wdt_io_res(ICH_RES_IO_TCO);
+ res->start = base_addr + ACPIBASE_TCO_OFF;
+ res->end = base_addr + ACPIBASE_TCO_END;
+ ret = acpi_check_resource_conflict(res);
+ if (ret) {
+ acpi_conflict = true;
+ goto wdt_done;
+ }
+
+ res = wdt_io_res(ICH_RES_IO_SMI);
+ res->start = base_addr + ACPIBASE_SMI_OFF;
+ res->end = base_addr + ACPIBASE_SMI_END;
+ ret = acpi_check_resource_conflict(res);
+ if (ret) {
+ acpi_conflict = true;
+ goto wdt_done;
+ }
+ lpc_ich_enable_acpi_space(dev);
+
+ /*
+ * Get the Memory-Mapped GCS register. To get access to it
+ * we have to read RCBA from PCI Config space 0xf0 and use
+ * it as base. GCS = RCBA + ICH6_GCS(0x3410).
+ */
+ if (lpc_chipset_info[id->driver_data].iTCO_version == 2) {
+ pci_read_config_dword(dev, RCBABASE, &base_addr_cfg);
+ base_addr = base_addr_cfg & 0xffffc000;
+ if (!(base_addr_cfg & 1)) {
+ pr_err("RCBA is disabled by hardware/BIOS, "
+ "device disabled\n");
+ ret = -ENODEV;
+ goto wdt_done;
+ }
+ res = wdt_mem_res(ICH_RES_MEM_GCS);
+ res->start = base_addr + ACPIBASE_GCS_OFF;
+ res->end = base_addr + ACPIBASE_GCS_END;
+ ret = acpi_check_resource_conflict(res);
+ if (ret) {
+ acpi_conflict = true;
+ goto wdt_done;
+ }
+ }
+
+ lpc_ich_finalize_cell(&lpc_ich_cells[LPC_WDT], id);
+ ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_WDT],
+ 1, NULL, 0);
+
+wdt_done:
+ if (acpi_conflict)
+ pr_warn("Resource conflict(s) found affecting %s\n",
+ lpc_ich_cells[LPC_WDT].name);
+ return ret;
+}
+
+static int __devinit lpc_ich_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ int ret;
+ bool cell_added = false;
+
+ ret = lpc_ich_init_wdt(dev, id);
+ if (!ret)
+ cell_added = true;
+
+ ret = lpc_ich_init_gpio(dev, id);
+ if (!ret)
+ cell_added = true;
+
+ /*
+ * We only care if at least one or none of the cells registered
+ * successfully.
+ */
+ if (!cell_added) {
+ lpc_ich_restore_config_space(dev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __devexit lpc_ich_remove(struct pci_dev *dev)
+{
+ mfd_remove_devices(&dev->dev);
+ lpc_ich_restore_config_space(dev);
+}
+
+static struct pci_driver lpc_ich_driver = {
+ .name = "lpc_ich",
+ .id_table = lpc_ich_ids,
+ .probe = lpc_ich_probe,
+ .remove = __devexit_p(lpc_ich_remove),
+};
+
+static int __init lpc_ich_init(void)
+{
+ return pci_register_driver(&lpc_ich_driver);
+}
+
+static void __exit lpc_ich_exit(void)
+{
+ pci_unregister_driver(&lpc_ich_driver);
+}
+
+module_init(lpc_ich_init);
+module_exit(lpc_ich_exit);
+
+MODULE_AUTHOR("Aaron Sierra <asierra@xes-inc.com>");
+MODULE_DESCRIPTION("LPC interface for Intel ICH");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index abc421364a45..9f20abc5e393 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -36,6 +36,7 @@
#define GPIOBASE 0x44
#define GPIO_IO_SIZE 64
+#define GPIO_IO_SIZE_CENTERTON 128
#define WDTBASE 0x84
#define WDT_IO_SIZE 64
@@ -68,7 +69,7 @@ static struct resource wdt_sch_resource = {
static struct mfd_cell tunnelcreek_cells[] = {
{
- .name = "tunnelcreek_wdt",
+ .name = "ie6xx_wdt",
.num_resources = 1,
.resources = &wdt_sch_resource,
},
@@ -77,6 +78,7 @@ static struct mfd_cell tunnelcreek_cells[] = {
static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CENTERTON_ILB) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, lpc_sch_ids);
@@ -115,7 +117,11 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
}
gpio_sch_resource.start = base_addr;
- gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
+
+ if (id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB)
+ gpio_sch_resource.end = base_addr + GPIO_IO_SIZE_CENTERTON - 1;
+ else
+ gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++)
lpc_sch_cells[i].id = id->device;
@@ -125,7 +131,8 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
if (ret)
goto out_dev;
- if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC) {
+ if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC
+ || id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB) {
pci_read_config_dword(dev, WDTBASE, &base_addr_cfg);
if (!(base_addr_cfg & (1 << 31))) {
dev_err(&dev->dev, "Decode of the WDT I/O range disabled\n");
@@ -167,18 +174,7 @@ static struct pci_driver lpc_sch_driver = {
.remove = __devexit_p(lpc_sch_remove),
};
-static int __init lpc_sch_init(void)
-{
- return pci_register_driver(&lpc_sch_driver);
-}
-
-static void __exit lpc_sch_exit(void)
-{
- pci_unregister_driver(&lpc_sch_driver);
-}
-
-module_init(lpc_sch_init);
-module_exit(lpc_sch_exit);
+module_pci_driver(lpc_sch_driver);
MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>");
MODULE_DESCRIPTION("LPC interface for Intel Poulsbo SCH");
diff --git a/drivers/mfd/max77693-irq.c b/drivers/mfd/max77693-irq.c
new file mode 100644
index 000000000000..2b403569e0a6
--- /dev/null
+++ b/drivers/mfd/max77693-irq.c
@@ -0,0 +1,309 @@
+/*
+ * max77693-irq.c - Interrupt controller support for MAX77693
+ *
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * SangYoung Son <hello.son@samsung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8997-irq.c
+ */
+
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-private.h>
+
+static const u8 max77693_mask_reg[] = {
+ [LED_INT] = MAX77693_LED_REG_FLASH_INT_MASK,
+ [TOPSYS_INT] = MAX77693_PMIC_REG_TOPSYS_INT_MASK,
+ [CHG_INT] = MAX77693_CHG_REG_CHG_INT_MASK,
+ [MUIC_INT1] = MAX77693_MUIC_REG_INTMASK1,
+ [MUIC_INT2] = MAX77693_MUIC_REG_INTMASK2,
+ [MUIC_INT3] = MAX77693_MUIC_REG_INTMASK3,
+};
+
+static struct regmap *max77693_get_regmap(struct max77693_dev *max77693,
+ enum max77693_irq_source src)
+{
+ switch (src) {
+ case LED_INT ... CHG_INT:
+ return max77693->regmap;
+ case MUIC_INT1 ... MUIC_INT3:
+ return max77693->regmap_muic;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+struct max77693_irq_data {
+ int mask;
+ enum max77693_irq_source group;
+};
+
+#define DECLARE_IRQ(idx, _group, _mask) \
+ [(idx)] = { .group = (_group), .mask = (_mask) }
+static const struct max77693_irq_data max77693_irqs[] = {
+ DECLARE_IRQ(MAX77693_LED_IRQ_FLED2_OPEN, LED_INT, 1 << 0),
+ DECLARE_IRQ(MAX77693_LED_IRQ_FLED2_SHORT, LED_INT, 1 << 1),
+ DECLARE_IRQ(MAX77693_LED_IRQ_FLED1_OPEN, LED_INT, 1 << 2),
+ DECLARE_IRQ(MAX77693_LED_IRQ_FLED1_SHORT, LED_INT, 1 << 3),
+ DECLARE_IRQ(MAX77693_LED_IRQ_MAX_FLASH, LED_INT, 1 << 4),
+
+ DECLARE_IRQ(MAX77693_TOPSYS_IRQ_T120C_INT, TOPSYS_INT, 1 << 0),
+ DECLARE_IRQ(MAX77693_TOPSYS_IRQ_T140C_INT, TOPSYS_INT, 1 << 1),
+ DECLARE_IRQ(MAX77693_TOPSYS_IRQ_LOWSYS_INT, TOPSYS_INT, 1 << 3),
+
+ DECLARE_IRQ(MAX77693_CHG_IRQ_BYP_I, CHG_INT, 1 << 0),
+ DECLARE_IRQ(MAX77693_CHG_IRQ_THM_I, CHG_INT, 1 << 2),
+ DECLARE_IRQ(MAX77693_CHG_IRQ_BAT_I, CHG_INT, 1 << 3),
+ DECLARE_IRQ(MAX77693_CHG_IRQ_CHG_I, CHG_INT, 1 << 4),
+ DECLARE_IRQ(MAX77693_CHG_IRQ_CHGIN_I, CHG_INT, 1 << 6),
+
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC, MUIC_INT1, 1 << 0),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC_LOW, MUIC_INT1, 1 << 1),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC_ERR, MUIC_INT1, 1 << 2),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC1K, MUIC_INT1, 1 << 3),
+
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_CHGTYP, MUIC_INT2, 1 << 0),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_CHGDETREUN, MUIC_INT2, 1 << 1),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_DCDTMR, MUIC_INT2, 1 << 2),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_DXOVP, MUIC_INT2, 1 << 3),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_VBVOLT, MUIC_INT2, 1 << 4),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_VIDRM, MUIC_INT2, 1 << 5),
+
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_EOC, MUIC_INT3, 1 << 0),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_CGMBC, MUIC_INT3, 1 << 1),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_OVP, MUIC_INT3, 1 << 2),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR, MUIC_INT3, 1 << 3),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_CHG_ENABLED, MUIC_INT3, 1 << 4),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_BAT_DET, MUIC_INT3, 1 << 5),
+};
+
+static void max77693_irq_lock(struct irq_data *data)
+{
+ struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+
+ mutex_lock(&max77693->irqlock);
+}
+
+static void max77693_irq_sync_unlock(struct irq_data *data)
+{
+ struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+ int i;
+
+ for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
+ u8 mask_reg = max77693_mask_reg[i];
+ struct regmap *map = max77693_get_regmap(max77693, i);
+
+ if (mask_reg == MAX77693_REG_INVALID ||
+ IS_ERR_OR_NULL(map))
+ continue;
+ max77693->irq_masks_cache[i] = max77693->irq_masks_cur[i];
+
+ max77693_write_reg(map, max77693_mask_reg[i],
+ max77693->irq_masks_cur[i]);
+ }
+
+ mutex_unlock(&max77693->irqlock);
+}
+
+static const inline struct max77693_irq_data *
+irq_to_max77693_irq(struct max77693_dev *max77693, int irq)
+{
+ return &max77693_irqs[irq];
+}
+
+static void max77693_irq_mask(struct irq_data *data)
+{
+ struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+ const struct max77693_irq_data *irq_data =
+ irq_to_max77693_irq(max77693, data->irq);
+
+ if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
+ max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
+ else
+ max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
+}
+
+static void max77693_irq_unmask(struct irq_data *data)
+{
+ struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+ const struct max77693_irq_data *irq_data =
+ irq_to_max77693_irq(max77693, data->irq);
+
+ if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
+ max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
+ else
+ max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
+}
+
+static struct irq_chip max77693_irq_chip = {
+ .name = "max77693",
+ .irq_bus_lock = max77693_irq_lock,
+ .irq_bus_sync_unlock = max77693_irq_sync_unlock,
+ .irq_mask = max77693_irq_mask,
+ .irq_unmask = max77693_irq_unmask,
+};
+
+#define MAX77693_IRQSRC_CHG (1 << 0)
+#define MAX77693_IRQSRC_TOP (1 << 1)
+#define MAX77693_IRQSRC_FLASH (1 << 2)
+#define MAX77693_IRQSRC_MUIC (1 << 3)
+static irqreturn_t max77693_irq_thread(int irq, void *data)
+{
+ struct max77693_dev *max77693 = data;
+ u8 irq_reg[MAX77693_IRQ_GROUP_NR] = {};
+ u8 irq_src;
+ int ret;
+ int i, cur_irq;
+
+ ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_INTSRC,
+ &irq_src);
+ if (ret < 0) {
+ dev_err(max77693->dev, "Failed to read interrupt source: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ if (irq_src & MAX77693_IRQSRC_CHG)
+ /* CHG_INT */
+ ret = max77693_read_reg(max77693->regmap, MAX77693_CHG_REG_CHG_INT,
+ &irq_reg[CHG_INT]);
+
+ if (irq_src & MAX77693_IRQSRC_TOP)
+ /* TOPSYS_INT */
+ ret = max77693_read_reg(max77693->regmap,
+ MAX77693_PMIC_REG_TOPSYS_INT, &irq_reg[TOPSYS_INT]);
+
+ if (irq_src & MAX77693_IRQSRC_FLASH)
+ /* LED_INT */
+ ret = max77693_read_reg(max77693->regmap,
+ MAX77693_LED_REG_FLASH_INT, &irq_reg[LED_INT]);
+
+ if (irq_src & MAX77693_IRQSRC_MUIC)
+ /* MUIC INT1 ~ INT3 */
+ max77693_bulk_read(max77693->regmap, MAX77693_MUIC_REG_INT1,
+ MAX77693_NUM_IRQ_MUIC_REGS, &irq_reg[MUIC_INT1]);
+
+ /* Apply masking */
+ for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
+ if (i >= MUIC_INT1 && i <= MUIC_INT3)
+ irq_reg[i] &= max77693->irq_masks_cur[i];
+ else
+ irq_reg[i] &= ~max77693->irq_masks_cur[i];
+ }
+
+ /* Report */
+ for (i = 0; i < MAX77693_IRQ_NR; i++) {
+ if (irq_reg[max77693_irqs[i].group] & max77693_irqs[i].mask) {
+ cur_irq = irq_find_mapping(max77693->irq_domain, i);
+ if (cur_irq)
+ handle_nested_irq(cur_irq);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+int max77693_irq_resume(struct max77693_dev *max77693)
+{
+ if (max77693->irq)
+ max77693_irq_thread(0, max77693);
+
+ return 0;
+}
+
+static int max77693_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct max77693_dev *max77693 = d->host_data;
+
+ irq_set_chip_data(irq, max77693);
+ irq_set_chip_and_handler(irq, &max77693_irq_chip, handle_edge_irq);
+ irq_set_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+ return 0;
+}
+
+static struct irq_domain_ops max77693_irq_domain_ops = {
+ .map = max77693_irq_domain_map,
+};
+
+int max77693_irq_init(struct max77693_dev *max77693)
+{
+ struct irq_domain *domain;
+ int i;
+ int ret;
+
+ mutex_init(&max77693->irqlock);
+
+ /* Mask individual interrupt sources */
+ for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
+ struct regmap *map;
+ /* MUIC IRQ 0:MASK 1:NOT MASK */
+ /* Other IRQ 1:MASK 0:NOT MASK */
+ if (i >= MUIC_INT1 && i <= MUIC_INT3) {
+ max77693->irq_masks_cur[i] = 0x00;
+ max77693->irq_masks_cache[i] = 0x00;
+ } else {
+ max77693->irq_masks_cur[i] = 0xff;
+ max77693->irq_masks_cache[i] = 0xff;
+ }
+ map = max77693_get_regmap(max77693, i);
+
+ if (IS_ERR_OR_NULL(map))
+ continue;
+ if (max77693_mask_reg[i] == MAX77693_REG_INVALID)
+ continue;
+ if (i >= MUIC_INT1 && i <= MUIC_INT3)
+ max77693_write_reg(map, max77693_mask_reg[i], 0x00);
+ else
+ max77693_write_reg(map, max77693_mask_reg[i], 0xff);
+ }
+
+ domain = irq_domain_add_linear(NULL, MAX77693_IRQ_NR,
+ &max77693_irq_domain_ops, max77693);
+ if (!domain) {
+ dev_err(max77693->dev, "could not create irq domain\n");
+ return -ENODEV;
+ }
+ max77693->irq_domain = domain;
+
+ ret = request_threaded_irq(max77693->irq, NULL, max77693_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "max77693-irq", max77693);
+
+ if (ret)
+ dev_err(max77693->dev, "Failed to request IRQ %d: %d\n",
+ max77693->irq, ret);
+
+ return 0;
+}
+
+void max77693_irq_exit(struct max77693_dev *max77693)
+{
+ if (max77693->irq)
+ free_irq(max77693->irq, max77693);
+}
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
new file mode 100644
index 000000000000..e9e4278722f3
--- /dev/null
+++ b/drivers/mfd/max77693.c
@@ -0,0 +1,249 @@
+/*
+ * max77693.c - mfd core driver for the MAX 77693
+ *
+ * Copyright (C) 2012 Samsung Electronics
+ * SangYoung Son <hello.son@smasung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8997.c
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-private.h>
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+
+#define I2C_ADDR_PMIC (0xCC >> 1) /* Charger, Flash LED */
+#define I2C_ADDR_MUIC (0x4A >> 1)
+#define I2C_ADDR_HAPTIC (0x90 >> 1)
+
+static struct mfd_cell max77693_devs[] = {
+ { .name = "max77693-pmic", },
+ { .name = "max77693-charger", },
+ { .name = "max77693-flash", },
+ { .name = "max77693-muic", },
+ { .name = "max77693-haptic", },
+};
+
+int max77693_read_reg(struct regmap *map, u8 reg, u8 *dest)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(map, reg, &val);
+ *dest = val;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_read_reg);
+
+int max77693_bulk_read(struct regmap *map, u8 reg, int count, u8 *buf)
+{
+ int ret;
+
+ ret = regmap_bulk_read(map, reg, buf, count);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_bulk_read);
+
+int max77693_write_reg(struct regmap *map, u8 reg, u8 value)
+{
+ int ret;
+
+ ret = regmap_write(map, reg, value);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_write_reg);
+
+int max77693_bulk_write(struct regmap *map, u8 reg, int count, u8 *buf)
+{
+ int ret;
+
+ ret = regmap_bulk_write(map, reg, buf, count);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_bulk_write);
+
+int max77693_update_reg(struct regmap *map, u8 reg, u8 val, u8 mask)
+{
+ int ret;
+
+ ret = regmap_update_bits(map, reg, mask, val);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_update_reg);
+
+static const struct regmap_config max77693_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77693_PMIC_REG_END,
+};
+
+static int max77693_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct max77693_dev *max77693;
+ struct max77693_platform_data *pdata = i2c->dev.platform_data;
+ u8 reg_data;
+ int ret = 0;
+
+ max77693 = devm_kzalloc(&i2c->dev,
+ sizeof(struct max77693_dev), GFP_KERNEL);
+ if (max77693 == NULL)
+ return -ENOMEM;
+
+ max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config);
+ if (IS_ERR(max77693->regmap)) {
+ ret = PTR_ERR(max77693->regmap);
+ dev_err(max77693->dev,"failed to allocate register map: %d\n",
+ ret);
+ goto err_regmap;
+ }
+
+ i2c_set_clientdata(i2c, max77693);
+ max77693->dev = &i2c->dev;
+ max77693->i2c = i2c;
+ max77693->irq = i2c->irq;
+ max77693->type = id->driver_data;
+
+ if (!pdata)
+ goto err_regmap;
+
+ max77693->wakeup = pdata->wakeup;
+
+ mutex_init(&max77693->iolock);
+
+ if (max77693_read_reg(max77693->regmap,
+ MAX77693_PMIC_REG_PMIC_ID2, &reg_data) < 0) {
+ dev_err(max77693->dev, "device not found on this channel\n");
+ ret = -ENODEV;
+ goto err_regmap;
+ } else
+ dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
+
+ max77693->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
+ i2c_set_clientdata(max77693->muic, max77693);
+
+ max77693->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
+ i2c_set_clientdata(max77693->haptic, max77693);
+
+ ret = max77693_irq_init(max77693);
+ if (ret < 0)
+ goto err_mfd;
+
+ pm_runtime_set_active(max77693->dev);
+
+ ret = mfd_add_devices(max77693->dev, -1, max77693_devs,
+ ARRAY_SIZE(max77693_devs), NULL, 0);
+ if (ret < 0)
+ goto err_mfd;
+
+ device_init_wakeup(max77693->dev, pdata->wakeup);
+
+ return ret;
+
+err_mfd:
+ i2c_unregister_device(max77693->muic);
+ i2c_unregister_device(max77693->haptic);
+err_regmap:
+ kfree(max77693);
+
+ return ret;
+}
+
+static int max77693_i2c_remove(struct i2c_client *i2c)
+{
+ struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(max77693->dev);
+ i2c_unregister_device(max77693->muic);
+ i2c_unregister_device(max77693->haptic);
+
+ return 0;
+}
+
+static const struct i2c_device_id max77693_i2c_id[] = {
+ { "max77693", TYPE_MAX77693 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max77693_i2c_id);
+
+static int max77693_suspend(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
+
+ if (device_may_wakeup(dev))
+ irq_set_irq_wake(max77693->irq, 1);
+ return 0;
+}
+
+static int max77693_resume(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
+
+ if (device_may_wakeup(dev))
+ irq_set_irq_wake(max77693->irq, 0);
+ return max77693_irq_resume(max77693);
+}
+
+const struct dev_pm_ops max77693_pm = {
+ .suspend = max77693_suspend,
+ .resume = max77693_resume,
+};
+
+static struct i2c_driver max77693_i2c_driver = {
+ .driver = {
+ .name = "max77693",
+ .owner = THIS_MODULE,
+ .pm = &max77693_pm,
+ },
+ .probe = max77693_i2c_probe,
+ .remove = max77693_i2c_remove,
+ .id_table = max77693_i2c_id,
+};
+
+static int __init max77693_i2c_init(void)
+{
+ return i2c_add_driver(&max77693_i2c_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(max77693_i2c_init);
+
+static void __exit max77693_i2c_exit(void)
+{
+ i2c_del_driver(&max77693_i2c_driver);
+}
+module_exit(max77693_i2c_exit);
+
+MODULE_DESCRIPTION("MAXIM 77693 multi-function core driver");
+MODULE_AUTHOR("SangYoung, Son <hello.son@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 738722cdecaa..f0ea3b8b3e4a 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -15,24 +15,13 @@
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/interrupt.h>
-#include <linux/spi/spi.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mc13xxx.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
-struct mc13xxx {
- struct spi_device *spidev;
- struct mutex lock;
- int irq;
- int flags;
-
- irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
- void *irqdata[MC13XXX_NUM_IRQ];
-
- int adcflags;
-};
+#include "mc13xxx.h"
#define MC13XXX_IRQSTAT0 0
#define MC13XXX_IRQSTAT0_ADCDONEI (1 << 0)
@@ -139,34 +128,29 @@ struct mc13xxx {
#define MC13XXX_ADC2 45
-#define MC13XXX_NUMREGS 0x3f
-
void mc13xxx_lock(struct mc13xxx *mc13xxx)
{
if (!mutex_trylock(&mc13xxx->lock)) {
- dev_dbg(&mc13xxx->spidev->dev, "wait for %s from %pf\n",
+ dev_dbg(mc13xxx->dev, "wait for %s from %pf\n",
__func__, __builtin_return_address(0));
mutex_lock(&mc13xxx->lock);
}
- dev_dbg(&mc13xxx->spidev->dev, "%s from %pf\n",
+ dev_dbg(mc13xxx->dev, "%s from %pf\n",
__func__, __builtin_return_address(0));
}
EXPORT_SYMBOL(mc13xxx_lock);
void mc13xxx_unlock(struct mc13xxx *mc13xxx)
{
- dev_dbg(&mc13xxx->spidev->dev, "%s from %pf\n",
+ dev_dbg(mc13xxx->dev, "%s from %pf\n",
__func__, __builtin_return_address(0));
mutex_unlock(&mc13xxx->lock);
}
EXPORT_SYMBOL(mc13xxx_unlock);
-#define MC13XXX_REGOFFSET_SHIFT 25
int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
{
- struct spi_transfer t;
- struct spi_message m;
int ret;
BUG_ON(!mutex_is_locked(&mc13xxx->lock));
@@ -174,84 +158,35 @@ int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
if (offset > MC13XXX_NUMREGS)
return -EINVAL;
- *val = offset << MC13XXX_REGOFFSET_SHIFT;
-
- memset(&t, 0, sizeof(t));
-
- t.tx_buf = val;
- t.rx_buf = val;
- t.len = sizeof(u32);
-
- spi_message_init(&m);
- spi_message_add_tail(&t, &m);
-
- ret = spi_sync(mc13xxx->spidev, &m);
-
- /* error in message.status implies error return from spi_sync */
- BUG_ON(!ret && m.status);
+ ret = regmap_read(mc13xxx->regmap, offset, val);
+ dev_vdbg(mc13xxx->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
- if (ret)
- return ret;
-
- *val &= 0xffffff;
-
- dev_vdbg(&mc13xxx->spidev->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL(mc13xxx_reg_read);
int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val)
{
- u32 buf;
- struct spi_transfer t;
- struct spi_message m;
- int ret;
-
BUG_ON(!mutex_is_locked(&mc13xxx->lock));
- dev_vdbg(&mc13xxx->spidev->dev, "[0x%02x] <- 0x%06x\n", offset, val);
+ dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x\n", offset, val);
if (offset > MC13XXX_NUMREGS || val > 0xffffff)
return -EINVAL;
- buf = 1 << 31 | offset << MC13XXX_REGOFFSET_SHIFT | val;
-
- memset(&t, 0, sizeof(t));
-
- t.tx_buf = &buf;
- t.rx_buf = &buf;
- t.len = sizeof(u32);
-
- spi_message_init(&m);
- spi_message_add_tail(&t, &m);
-
- ret = spi_sync(mc13xxx->spidev, &m);
-
- BUG_ON(!ret && m.status);
-
- if (ret)
- return ret;
-
- return 0;
+ return regmap_write(mc13xxx->regmap, offset, val);
}
EXPORT_SYMBOL(mc13xxx_reg_write);
int mc13xxx_reg_rmw(struct mc13xxx *mc13xxx, unsigned int offset,
u32 mask, u32 val)
{
- int ret;
- u32 valread;
-
+ BUG_ON(!mutex_is_locked(&mc13xxx->lock));
BUG_ON(val & ~mask);
+ dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x (mask: 0x%06x)\n",
+ offset, val, mask);
- ret = mc13xxx_reg_read(mc13xxx, offset, &valread);
- if (ret)
- return ret;
-
- valread = (valread & ~mask) | val;
-
- return mc13xxx_reg_write(mc13xxx, offset, valread);
+ return regmap_update_bits(mc13xxx->regmap, offset, mask, val);
}
EXPORT_SYMBOL(mc13xxx_reg_rmw);
@@ -439,7 +374,7 @@ static int mc13xxx_irq_handle(struct mc13xxx *mc13xxx,
if (handled == IRQ_HANDLED)
num_handled++;
} else {
- dev_err(&mc13xxx->spidev->dev,
+ dev_err(mc13xxx->dev,
"BUG: irq %u but no handler\n",
baseirq + irq);
@@ -475,25 +410,23 @@ static irqreturn_t mc13xxx_irq_thread(int irq, void *data)
return IRQ_RETVAL(handled);
}
-enum mc13xxx_id {
- MC13XXX_ID_MC13783,
- MC13XXX_ID_MC13892,
- MC13XXX_ID_INVALID,
-};
-
static const char *mc13xxx_chipname[] = {
[MC13XXX_ID_MC13783] = "mc13783",
[MC13XXX_ID_MC13892] = "mc13892",
};
#define maskval(reg, mask) (((reg) & (mask)) >> __ffs(mask))
-static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id)
+static int mc13xxx_identify(struct mc13xxx *mc13xxx)
{
u32 icid;
u32 revision;
- const char *name;
int ret;
+ /*
+ * Get the generation ID from register 46, as apparently some older
+ * IC revisions only have this info at this location. Newer ICs seem to
+ * have both.
+ */
ret = mc13xxx_reg_read(mc13xxx, 46, &icid);
if (ret)
return ret;
@@ -502,26 +435,23 @@ static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id)
switch (icid) {
case 2:
- *id = MC13XXX_ID_MC13783;
- name = "mc13783";
+ mc13xxx->ictype = MC13XXX_ID_MC13783;
break;
case 7:
- *id = MC13XXX_ID_MC13892;
- name = "mc13892";
+ mc13xxx->ictype = MC13XXX_ID_MC13892;
break;
default:
- *id = MC13XXX_ID_INVALID;
+ mc13xxx->ictype = MC13XXX_ID_INVALID;
break;
}
- if (*id == MC13XXX_ID_MC13783 || *id == MC13XXX_ID_MC13892) {
+ if (mc13xxx->ictype == MC13XXX_ID_MC13783 ||
+ mc13xxx->ictype == MC13XXX_ID_MC13892) {
ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision);
- if (ret)
- return ret;
- dev_info(&mc13xxx->spidev->dev, "%s: rev: %d.%d, "
+ dev_info(mc13xxx->dev, "%s: rev: %d.%d, "
"fin: %d, fab: %d, icid: %d/%d\n",
- mc13xxx_chipname[*id],
+ mc13xxx_chipname[mc13xxx->ictype],
maskval(revision, MC13XXX_REVISION_REVFULL),
maskval(revision, MC13XXX_REVISION_REVMETAL),
maskval(revision, MC13XXX_REVISION_FIN),
@@ -530,26 +460,12 @@ static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id)
maskval(revision, MC13XXX_REVISION_ICIDCODE));
}
- if (*id != MC13XXX_ID_INVALID) {
- const struct spi_device_id *devid =
- spi_get_device_id(mc13xxx->spidev);
- if (!devid || devid->driver_data != *id)
- dev_warn(&mc13xxx->spidev->dev, "device id doesn't "
- "match auto detection!\n");
- }
-
- return 0;
+ return (mc13xxx->ictype == MC13XXX_ID_INVALID) ? -ENODEV : 0;
}
static const char *mc13xxx_get_chipname(struct mc13xxx *mc13xxx)
{
- const struct spi_device_id *devid =
- spi_get_device_id(mc13xxx->spidev);
-
- if (!devid)
- return NULL;
-
- return mc13xxx_chipname[devid->driver_data];
+ return mc13xxx_chipname[mc13xxx->ictype];
}
int mc13xxx_get_flags(struct mc13xxx *mc13xxx)
@@ -592,7 +508,7 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
};
init_completion(&adcdone_data.done);
- dev_dbg(&mc13xxx->spidev->dev, "%s\n", __func__);
+ dev_dbg(mc13xxx->dev, "%s\n", __func__);
mc13xxx_lock(mc13xxx);
@@ -637,7 +553,8 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
adc1 |= ato << MC13783_ADC1_ATO_SHIFT;
if (atox)
adc1 |= MC13783_ADC1_ATOX;
- dev_dbg(&mc13xxx->spidev->dev, "%s: request irq\n", __func__);
+
+ dev_dbg(mc13xxx->dev, "%s: request irq\n", __func__);
mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE,
mc13xxx_handler_adcdone, __func__, &adcdone_data);
mc13xxx_irq_ack(mc13xxx, MC13XXX_IRQ_ADCDONE);
@@ -695,7 +612,7 @@ static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
if (!cell.name)
return -ENOMEM;
- return mfd_add_devices(&mc13xxx->spidev->dev, -1, &cell, 1, NULL, 0);
+ return mfd_add_devices(mc13xxx->dev, -1, &cell, 1, NULL, 0);
}
static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
@@ -706,7 +623,7 @@ static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
#ifdef CONFIG_OF
static int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx)
{
- struct device_node *np = mc13xxx->spidev->dev.of_node;
+ struct device_node *np = mc13xxx->dev->of_node;
if (!np)
return -ENODEV;
@@ -732,55 +649,15 @@ static inline int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx)
}
#endif
-static const struct spi_device_id mc13xxx_device_id[] = {
- {
- .name = "mc13783",
- .driver_data = MC13XXX_ID_MC13783,
- }, {
- .name = "mc13892",
- .driver_data = MC13XXX_ID_MC13892,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
-
-static const struct of_device_id mc13xxx_dt_ids[] = {
- { .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
- { .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
-
-static int mc13xxx_probe(struct spi_device *spi)
+int mc13xxx_common_init(struct mc13xxx *mc13xxx,
+ struct mc13xxx_platform_data *pdata, int irq)
{
- const struct of_device_id *of_id;
- struct spi_driver *sdrv = to_spi_driver(spi->dev.driver);
- struct mc13xxx *mc13xxx;
- struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
- enum mc13xxx_id id;
int ret;
- of_id = of_match_device(mc13xxx_dt_ids, &spi->dev);
- if (of_id)
- sdrv->id_table = &mc13xxx_device_id[(enum mc13xxx_id) of_id->data];
-
- mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
- if (!mc13xxx)
- return -ENOMEM;
-
- dev_set_drvdata(&spi->dev, mc13xxx);
- spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
- spi->bits_per_word = 32;
- spi_setup(spi);
-
- mc13xxx->spidev = spi;
-
- mutex_init(&mc13xxx->lock);
mc13xxx_lock(mc13xxx);
- ret = mc13xxx_identify(mc13xxx, &id);
- if (ret || id == MC13XXX_ID_INVALID)
+ ret = mc13xxx_identify(mc13xxx);
+ if (ret)
goto err_revision;
/* mask all irqs */
@@ -792,18 +669,19 @@ static int mc13xxx_probe(struct spi_device *spi)
if (ret)
goto err_mask;
- ret = request_threaded_irq(spi->irq, NULL, mc13xxx_irq_thread,
+ ret = request_threaded_irq(irq, NULL, mc13xxx_irq_thread,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH, "mc13xxx", mc13xxx);
if (ret) {
err_mask:
err_revision:
mc13xxx_unlock(mc13xxx);
- dev_set_drvdata(&spi->dev, NULL);
kfree(mc13xxx);
return ret;
}
+ mc13xxx->irq = irq;
+
mc13xxx_unlock(mc13xxx);
if (mc13xxx_probe_flags_dt(mc13xxx) < 0 && pdata)
@@ -838,42 +716,19 @@ err_revision:
return 0;
}
+EXPORT_SYMBOL_GPL(mc13xxx_common_init);
-static int __devexit mc13xxx_remove(struct spi_device *spi)
+void mc13xxx_common_cleanup(struct mc13xxx *mc13xxx)
{
- struct mc13xxx *mc13xxx = dev_get_drvdata(&spi->dev);
+ free_irq(mc13xxx->irq, mc13xxx);
- free_irq(mc13xxx->spidev->irq, mc13xxx);
+ mfd_remove_devices(mc13xxx->dev);
- mfd_remove_devices(&spi->dev);
+ regmap_exit(mc13xxx->regmap);
kfree(mc13xxx);
-
- return 0;
-}
-
-static struct spi_driver mc13xxx_driver = {
- .id_table = mc13xxx_device_id,
- .driver = {
- .name = "mc13xxx",
- .owner = THIS_MODULE,
- .of_match_table = mc13xxx_dt_ids,
- },
- .probe = mc13xxx_probe,
- .remove = __devexit_p(mc13xxx_remove),
-};
-
-static int __init mc13xxx_init(void)
-{
- return spi_register_driver(&mc13xxx_driver);
-}
-subsys_initcall(mc13xxx_init);
-
-static void __exit mc13xxx_exit(void)
-{
- spi_unregister_driver(&mc13xxx_driver);
}
-module_exit(mc13xxx_exit);
+EXPORT_SYMBOL_GPL(mc13xxx_common_cleanup);
MODULE_DESCRIPTION("Core driver for Freescale MC13XXX PMIC");
MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
new file mode 100644
index 000000000000..d22501dad6a6
--- /dev/null
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2009-2010 Creative Product Design
+ * Marc Reilly marc@cpdesign.com.au
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/mc13xxx.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+
+#include "mc13xxx.h"
+
+static const struct i2c_device_id mc13xxx_i2c_device_id[] = {
+ {
+ .name = "mc13892",
+ .driver_data = MC13XXX_ID_MC13892,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(i2c, mc13xxx_i2c_device_id);
+
+static const struct of_device_id mc13xxx_dt_ids[] = {
+ {
+ .compatible = "fsl,mc13892",
+ .data = (void *) &mc13xxx_i2c_device_id[0],
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
+
+static struct regmap_config mc13xxx_regmap_i2c_config = {
+ .reg_bits = 8,
+ .val_bits = 24,
+
+ .max_register = MC13XXX_NUMREGS,
+
+ .cache_type = REGCACHE_NONE,
+};
+
+static int mc13xxx_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct of_device_id *of_id;
+ struct i2c_driver *idrv = to_i2c_driver(client->dev.driver);
+ struct mc13xxx *mc13xxx;
+ struct mc13xxx_platform_data *pdata = dev_get_platdata(&client->dev);
+ int ret;
+
+ of_id = of_match_device(mc13xxx_dt_ids, &client->dev);
+ if (of_id)
+ idrv->id_table = (const struct i2c_device_id*) of_id->data;
+
+ mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
+ if (!mc13xxx)
+ return -ENOMEM;
+
+ dev_set_drvdata(&client->dev, mc13xxx);
+
+ mc13xxx->dev = &client->dev;
+ mutex_init(&mc13xxx->lock);
+
+ mc13xxx->regmap = regmap_init_i2c(client, &mc13xxx_regmap_i2c_config);
+ if (IS_ERR(mc13xxx->regmap)) {
+ ret = PTR_ERR(mc13xxx->regmap);
+ dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
+ ret);
+ dev_set_drvdata(&client->dev, NULL);
+ kfree(mc13xxx);
+ return ret;
+ }
+
+ ret = mc13xxx_common_init(mc13xxx, pdata, client->irq);
+
+ if (ret == 0 && (id->driver_data != mc13xxx->ictype))
+ dev_warn(mc13xxx->dev,
+ "device id doesn't match auto detection!\n");
+
+ return ret;
+}
+
+static int __devexit mc13xxx_i2c_remove(struct i2c_client *client)
+{
+ struct mc13xxx *mc13xxx = dev_get_drvdata(&client->dev);
+
+ mc13xxx_common_cleanup(mc13xxx);
+
+ return 0;
+}
+
+static struct i2c_driver mc13xxx_i2c_driver = {
+ .id_table = mc13xxx_i2c_device_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mc13xxx",
+ .of_match_table = mc13xxx_dt_ids,
+ },
+ .probe = mc13xxx_i2c_probe,
+ .remove = __devexit_p(mc13xxx_i2c_remove),
+};
+
+static int __init mc13xxx_i2c_init(void)
+{
+ return i2c_add_driver(&mc13xxx_i2c_driver);
+}
+subsys_initcall(mc13xxx_i2c_init);
+
+static void __exit mc13xxx_i2c_exit(void)
+{
+ i2c_del_driver(&mc13xxx_i2c_driver);
+}
+module_exit(mc13xxx_i2c_exit);
+
+MODULE_DESCRIPTION("i2c driver for Freescale MC13XXX PMIC");
+MODULE_AUTHOR("Marc Reilly <marc@cpdesign.com.au");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
new file mode 100644
index 000000000000..3fcdab3eb8eb
--- /dev/null
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2009-2010 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * loosely based on an earlier driver that has
+ * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/mc13xxx.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+
+#include "mc13xxx.h"
+
+static const struct spi_device_id mc13xxx_device_id[] = {
+ {
+ .name = "mc13783",
+ .driver_data = MC13XXX_ID_MC13783,
+ }, {
+ .name = "mc13892",
+ .driver_data = MC13XXX_ID_MC13892,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
+
+static const struct of_device_id mc13xxx_dt_ids[] = {
+ { .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
+ { .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
+
+static struct regmap_config mc13xxx_regmap_spi_config = {
+ .reg_bits = 7,
+ .pad_bits = 1,
+ .val_bits = 24,
+
+ .max_register = MC13XXX_NUMREGS,
+
+ .cache_type = REGCACHE_NONE,
+};
+
+static int mc13xxx_spi_probe(struct spi_device *spi)
+{
+ const struct of_device_id *of_id;
+ struct spi_driver *sdrv = to_spi_driver(spi->dev.driver);
+ struct mc13xxx *mc13xxx;
+ struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
+ int ret;
+
+ of_id = of_match_device(mc13xxx_dt_ids, &spi->dev);
+ if (of_id)
+ sdrv->id_table = &mc13xxx_device_id[(enum mc13xxx_id) of_id->data];
+
+ mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
+ if (!mc13xxx)
+ return -ENOMEM;
+
+ dev_set_drvdata(&spi->dev, mc13xxx);
+ spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
+ spi->bits_per_word = 32;
+
+ mc13xxx->dev = &spi->dev;
+ mutex_init(&mc13xxx->lock);
+
+ mc13xxx->regmap = regmap_init_spi(spi, &mc13xxx_regmap_spi_config);
+ if (IS_ERR(mc13xxx->regmap)) {
+ ret = PTR_ERR(mc13xxx->regmap);
+ dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
+ ret);
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(mc13xxx);
+ return ret;
+ }
+
+ ret = mc13xxx_common_init(mc13xxx, pdata, spi->irq);
+
+ if (ret) {
+ dev_set_drvdata(&spi->dev, NULL);
+ } else {
+ const struct spi_device_id *devid =
+ spi_get_device_id(spi);
+ if (!devid || devid->driver_data != mc13xxx->ictype)
+ dev_warn(mc13xxx->dev,
+ "device id doesn't match auto detection!\n");
+ }
+
+ return ret;
+}
+
+static int __devexit mc13xxx_spi_remove(struct spi_device *spi)
+{
+ struct mc13xxx *mc13xxx = dev_get_drvdata(&spi->dev);
+
+ mc13xxx_common_cleanup(mc13xxx);
+
+ return 0;
+}
+
+static struct spi_driver mc13xxx_spi_driver = {
+ .id_table = mc13xxx_device_id,
+ .driver = {
+ .name = "mc13xxx",
+ .owner = THIS_MODULE,
+ .of_match_table = mc13xxx_dt_ids,
+ },
+ .probe = mc13xxx_spi_probe,
+ .remove = __devexit_p(mc13xxx_spi_remove),
+};
+
+static int __init mc13xxx_init(void)
+{
+ return spi_register_driver(&mc13xxx_spi_driver);
+}
+subsys_initcall(mc13xxx_init);
+
+static void __exit mc13xxx_exit(void)
+{
+ spi_unregister_driver(&mc13xxx_spi_driver);
+}
+module_exit(mc13xxx_exit);
+
+MODULE_DESCRIPTION("Core driver for Freescale MC13XXX PMIC");
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mc13xxx.h b/drivers/mfd/mc13xxx.h
new file mode 100644
index 000000000000..bbba06feea06
--- /dev/null
+++ b/drivers/mfd/mc13xxx.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2012 Creative Product Design
+ * Marc Reilly <marc@cpdesign.com.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#ifndef __DRIVERS_MFD_MC13XXX_H
+#define __DRIVERS_MFD_MC13XXX_H
+
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/mfd/mc13xxx.h>
+
+enum mc13xxx_id {
+ MC13XXX_ID_MC13783,
+ MC13XXX_ID_MC13892,
+ MC13XXX_ID_INVALID,
+};
+
+#define MC13XXX_NUMREGS 0x3f
+
+struct mc13xxx {
+ struct regmap *regmap;
+
+ struct device *dev;
+ enum mc13xxx_id ictype;
+
+ struct mutex lock;
+ int irq;
+ int flags;
+
+ irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
+ void *irqdata[MC13XXX_NUM_IRQ];
+
+ int adcflags;
+};
+
+int mc13xxx_common_init(struct mc13xxx *mc13xxx,
+ struct mc13xxx_platform_data *pdata, int irq);
+
+void mc13xxx_common_cleanup(struct mc13xxx *mc13xxx);
+
+#endif /* __DRIVERS_MFD_MC13XXX_H */
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 189c2f07b83f..29c122bf28ea 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -204,7 +204,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
return -ENOENT;
}
- pcf = kzalloc(sizeof(*pcf), GFP_KERNEL);
+ pcf = devm_kzalloc(&client->dev, sizeof(*pcf), GFP_KERNEL);
if (!pcf)
return -ENOMEM;
@@ -212,12 +212,11 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
mutex_init(&pcf->lock);
- pcf->regmap = regmap_init_i2c(client, &pcf50633_regmap_config);
+ pcf->regmap = devm_regmap_init_i2c(client, &pcf50633_regmap_config);
if (IS_ERR(pcf->regmap)) {
ret = PTR_ERR(pcf->regmap);
- dev_err(pcf->dev, "Failed to allocate register map: %d\n",
- ret);
- goto err_free;
+ dev_err(pcf->dev, "Failed to allocate register map: %d\n", ret);
+ return ret;
}
i2c_set_clientdata(client, pcf);
@@ -228,7 +227,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
if (version < 0 || variant < 0) {
dev_err(pcf->dev, "Unable to probe pcf50633\n");
ret = -ENODEV;
- goto err_regmap;
+ return ret;
}
dev_info(pcf->dev, "Probed device version %d variant %d\n",
@@ -237,16 +236,11 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
pcf50633_irq_init(pcf, client->irq);
/* Create sub devices */
- pcf50633_client_dev_register(pcf, "pcf50633-input",
- &pcf->input_pdev);
- pcf50633_client_dev_register(pcf, "pcf50633-rtc",
- &pcf->rtc_pdev);
- pcf50633_client_dev_register(pcf, "pcf50633-mbc",
- &pcf->mbc_pdev);
- pcf50633_client_dev_register(pcf, "pcf50633-adc",
- &pcf->adc_pdev);
- pcf50633_client_dev_register(pcf, "pcf50633-backlight",
- &pcf->bl_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-input", &pcf->input_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-rtc", &pcf->rtc_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-mbc", &pcf->mbc_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-adc", &pcf->adc_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-backlight", &pcf->bl_pdev);
for (i = 0; i < PCF50633_NUM_REGULATORS; i++) {
@@ -274,13 +268,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
pdata->probe_done(pcf);
return 0;
-
-err_regmap:
- regmap_exit(pcf->regmap);
-err_free:
- kfree(pcf);
-
- return ret;
}
static int __devexit pcf50633_remove(struct i2c_client *client)
@@ -300,9 +287,6 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
platform_device_unregister(pcf->regulator_pdev[i]);
- regmap_exit(pcf->regmap);
- kfree(pcf);
-
return 0;
}
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index 44afae0a69ce..cdc1df7fa0e9 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -75,6 +75,7 @@ static struct deepsleep_control_data deepsleep_data[] = {
(RC5T583_EXT_PWRREQ1_CONTROL | RC5T583_EXT_PWRREQ2_CONTROL)
static struct mfd_cell rc5t583_subdevs[] = {
+ {.name = "rc5t583-gpio",},
{.name = "rc5t583-regulator",},
{.name = "rc5t583-rtc", },
{.name = "rc5t583-key", }
@@ -267,7 +268,7 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
rc5t583->dev = &i2c->dev;
i2c_set_clientdata(i2c, rc5t583);
- rc5t583->regmap = regmap_init_i2c(i2c, &rc5t583_regmap_config);
+ rc5t583->regmap = devm_regmap_init_i2c(i2c, &rc5t583_regmap_config);
if (IS_ERR(rc5t583->regmap)) {
ret = PTR_ERR(rc5t583->regmap);
dev_err(&i2c->dev, "regmap initialization failed: %d\n", ret);
@@ -276,7 +277,7 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
ret = rc5t583_clear_ext_power_req(rc5t583, pdata);
if (ret < 0)
- goto err_irq_init;
+ return ret;
if (i2c->irq) {
ret = rc5t583_irq_init(rc5t583, i2c->irq, pdata->irq_base);
@@ -299,8 +300,6 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
err_add_devs:
if (irq_init_success)
rc5t583_irq_exit(rc5t583);
-err_irq_init:
- regmap_exit(rc5t583->regmap);
return ret;
}
@@ -310,7 +309,6 @@ static int __devexit rc5t583_i2c_remove(struct i2c_client *i2c)
mfd_remove_devices(rc5t583->dev);
rc5t583_irq_exit(rc5t583);
- regmap_exit(rc5t583->regmap);
return 0;
}
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
index 809bd4a61089..685d61e431ad 100644
--- a/drivers/mfd/rdc321x-southbridge.c
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -108,18 +108,7 @@ static struct pci_driver rdc321x_sb_driver = {
.remove = __devexit_p(rdc321x_sb_remove),
};
-static int __init rdc321x_sb_init(void)
-{
- return pci_register_driver(&rdc321x_sb_driver);
-}
-
-static void __exit rdc321x_sb_exit(void)
-{
- pci_unregister_driver(&rdc321x_sb_driver);
-}
-
-module_init(rdc321x_sb_init);
-module_exit(rdc321x_sb_exit);
+module_pci_driver(rdc321x_sb_driver);
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/s5m-core.c b/drivers/mfd/s5m-core.c
index 48949d998d10..dd170307e60e 100644
--- a/drivers/mfd/s5m-core.c
+++ b/drivers/mfd/s5m-core.c
@@ -114,12 +114,12 @@ static int s5m87xx_i2c_probe(struct i2c_client *i2c,
s5m87xx->wakeup = pdata->wakeup;
}
- s5m87xx->regmap = regmap_init_i2c(i2c, &s5m_regmap_config);
+ s5m87xx->regmap = devm_regmap_init_i2c(i2c, &s5m_regmap_config);
if (IS_ERR(s5m87xx->regmap)) {
ret = PTR_ERR(s5m87xx->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
- goto err;
+ return ret;
}
s5m87xx->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
@@ -159,7 +159,6 @@ err:
mfd_remove_devices(s5m87xx->dev);
s5m_irq_exit(s5m87xx);
i2c_unregister_device(s5m87xx->rtc);
- regmap_exit(s5m87xx->regmap);
return ret;
}
@@ -170,7 +169,6 @@ static int s5m87xx_i2c_remove(struct i2c_client *i2c)
mfd_remove_devices(s5m87xx->dev);
s5m_irq_exit(s5m87xx);
i2c_unregister_device(s5m87xx->rtc);
- regmap_exit(s5m87xx->regmap);
return 0;
}
diff --git a/drivers/mfd/sta2x11-mfd.c b/drivers/mfd/sta2x11-mfd.c
new file mode 100644
index 000000000000..d31fed07aefb
--- /dev/null
+++ b/drivers/mfd/sta2x11-mfd.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright (c) 2009-2011 Wind River Systems, Inc.
+ * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/sta2x11-mfd.h>
+
+#include <asm/sta2x11.h>
+
+/* This describes STA2X11 MFD chip for us, we may have several */
+struct sta2x11_mfd {
+ struct sta2x11_instance *instance;
+ spinlock_t lock;
+ struct list_head list;
+ void __iomem *sctl_regs;
+ void __iomem *apbreg_regs;
+};
+
+static LIST_HEAD(sta2x11_mfd_list);
+
+/* Three functions to act on the list */
+static struct sta2x11_mfd *sta2x11_mfd_find(struct pci_dev *pdev)
+{
+ struct sta2x11_instance *instance;
+ struct sta2x11_mfd *mfd;
+
+ if (!pdev && !list_empty(&sta2x11_mfd_list)) {
+ pr_warning("%s: Unspecified device, "
+ "using first instance\n", __func__);
+ return list_entry(sta2x11_mfd_list.next,
+ struct sta2x11_mfd, list);
+ }
+
+ instance = sta2x11_get_instance(pdev);
+ if (!instance)
+ return NULL;
+ list_for_each_entry(mfd, &sta2x11_mfd_list, list) {
+ if (mfd->instance == instance)
+ return mfd;
+ }
+ return NULL;
+}
+
+static int __devinit sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
+{
+ struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+ struct sta2x11_instance *instance;
+
+ if (mfd)
+ return -EBUSY;
+ instance = sta2x11_get_instance(pdev);
+ if (!instance)
+ return -EINVAL;
+ mfd = kzalloc(sizeof(*mfd), flags);
+ if (!mfd)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&mfd->list);
+ spin_lock_init(&mfd->lock);
+ mfd->instance = instance;
+ list_add(&mfd->list, &sta2x11_mfd_list);
+ return 0;
+}
+
+static int __devexit mfd_remove(struct pci_dev *pdev)
+{
+ struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+
+ if (!mfd)
+ return -ENODEV;
+ list_del(&mfd->list);
+ kfree(mfd);
+ return 0;
+}
+
+/* These two functions are exported and are not expected to fail */
+u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+ struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+ u32 r;
+ unsigned long flags;
+
+ if (!mfd) {
+ dev_warn(&pdev->dev, ": can't access sctl regs\n");
+ return 0;
+ }
+ if (!mfd->sctl_regs) {
+ dev_warn(&pdev->dev, ": system ctl not initialized\n");
+ return 0;
+ }
+ spin_lock_irqsave(&mfd->lock, flags);
+ r = readl(mfd->sctl_regs + reg);
+ r &= ~mask;
+ r |= val;
+ if (mask)
+ writel(r, mfd->sctl_regs + reg);
+ spin_unlock_irqrestore(&mfd->lock, flags);
+ return r;
+}
+EXPORT_SYMBOL(sta2x11_sctl_mask);
+
+u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+ struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+ u32 r;
+ unsigned long flags;
+
+ if (!mfd) {
+ dev_warn(&pdev->dev, ": can't access apb regs\n");
+ return 0;
+ }
+ if (!mfd->apbreg_regs) {
+ dev_warn(&pdev->dev, ": apb bridge not initialized\n");
+ return 0;
+ }
+ spin_lock_irqsave(&mfd->lock, flags);
+ r = readl(mfd->apbreg_regs + reg);
+ r &= ~mask;
+ r |= val;
+ if (mask)
+ writel(r, mfd->apbreg_regs + reg);
+ spin_unlock_irqrestore(&mfd->lock, flags);
+ return r;
+}
+EXPORT_SYMBOL(sta2x11_apbreg_mask);
+
+/* Two debugfs files, for our registers (FIXME: one instance only) */
+#define REG(regname) {.name = #regname, .offset = SCTL_ ## regname}
+static struct debugfs_reg32 sta2x11_sctl_regs[] = {
+ REG(SCCTL), REG(ARMCFG), REG(SCPLLCTL), REG(SCPLLFCTRL),
+ REG(SCRESFRACT), REG(SCRESCTRL1), REG(SCRESXTRL2), REG(SCPEREN0),
+ REG(SCPEREN1), REG(SCPEREN2), REG(SCGRST), REG(SCPCIPMCR1),
+ REG(SCPCIPMCR2), REG(SCPCIPMSR1), REG(SCPCIPMSR2), REG(SCPCIPMSR3),
+ REG(SCINTREN), REG(SCRISR), REG(SCCLKSTAT0), REG(SCCLKSTAT1),
+ REG(SCCLKSTAT2), REG(SCRSTSTA),
+};
+#undef REG
+
+static struct debugfs_regset32 sctl_regset = {
+ .regs = sta2x11_sctl_regs,
+ .nregs = ARRAY_SIZE(sta2x11_sctl_regs),
+};
+
+#define REG(regname) {.name = #regname, .offset = regname}
+static struct debugfs_reg32 sta2x11_apbreg_regs[] = {
+ REG(APBREG_BSR), REG(APBREG_PAER), REG(APBREG_PWAC), REG(APBREG_PRAC),
+ REG(APBREG_PCG), REG(APBREG_PUR), REG(APBREG_EMU_PCG),
+};
+#undef REG
+
+static struct debugfs_regset32 apbreg_regset = {
+ .regs = sta2x11_apbreg_regs,
+ .nregs = ARRAY_SIZE(sta2x11_apbreg_regs),
+};
+
+static struct dentry *sta2x11_sctl_debugfs;
+static struct dentry *sta2x11_apbreg_debugfs;
+
+/* Probe for the two platform devices */
+static int sta2x11_sctl_probe(struct platform_device *dev)
+{
+ struct pci_dev **pdev;
+ struct sta2x11_mfd *mfd;
+ struct resource *res;
+
+ pdev = dev->dev.platform_data;
+ mfd = sta2x11_mfd_find(*pdev);
+ if (!mfd)
+ return -ENODEV;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOMEM;
+
+ if (!request_mem_region(res->start, resource_size(res),
+ "sta2x11-sctl"))
+ return -EBUSY;
+
+ mfd->sctl_regs = ioremap(res->start, resource_size(res));
+ if (!mfd->sctl_regs) {
+ release_mem_region(res->start, resource_size(res));
+ return -ENOMEM;
+ }
+ sctl_regset.base = mfd->sctl_regs;
+ sta2x11_sctl_debugfs = debugfs_create_regset32("sta2x11-sctl",
+ S_IFREG | S_IRUGO,
+ NULL, &sctl_regset);
+ return 0;
+}
+
+static int sta2x11_apbreg_probe(struct platform_device *dev)
+{
+ struct pci_dev **pdev;
+ struct sta2x11_mfd *mfd;
+ struct resource *res;
+
+ pdev = dev->dev.platform_data;
+ dev_dbg(&dev->dev, "%s: pdata is %p\n", __func__, pdev);
+ dev_dbg(&dev->dev, "%s: *pdata is %p\n", __func__, *pdev);
+
+ mfd = sta2x11_mfd_find(*pdev);
+ if (!mfd)
+ return -ENODEV;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOMEM;
+
+ if (!request_mem_region(res->start, resource_size(res),
+ "sta2x11-apbreg"))
+ return -EBUSY;
+
+ mfd->apbreg_regs = ioremap(res->start, resource_size(res));
+ if (!mfd->apbreg_regs) {
+ release_mem_region(res->start, resource_size(res));
+ return -ENOMEM;
+ }
+ dev_dbg(&dev->dev, "%s: regbase %p\n", __func__, mfd->apbreg_regs);
+
+ apbreg_regset.base = mfd->apbreg_regs;
+ sta2x11_apbreg_debugfs = debugfs_create_regset32("sta2x11-apbreg",
+ S_IFREG | S_IRUGO,
+ NULL, &apbreg_regset);
+ return 0;
+}
+
+/* The two platform drivers */
+static struct platform_driver sta2x11_sctl_platform_driver = {
+ .driver = {
+ .name = "sta2x11-sctl",
+ .owner = THIS_MODULE,
+ },
+ .probe = sta2x11_sctl_probe,
+};
+
+static int __init sta2x11_sctl_init(void)
+{
+ pr_info("%s\n", __func__);
+ return platform_driver_register(&sta2x11_sctl_platform_driver);
+}
+
+static struct platform_driver sta2x11_platform_driver = {
+ .driver = {
+ .name = "sta2x11-apbreg",
+ .owner = THIS_MODULE,
+ },
+ .probe = sta2x11_apbreg_probe,
+};
+
+static int __init sta2x11_apbreg_init(void)
+{
+ pr_info("%s\n", __func__);
+ return platform_driver_register(&sta2x11_platform_driver);
+}
+
+/*
+ * What follows is the PCI device that hosts the above two pdevs.
+ * Each logic block is 4kB and they are all consecutive: we use this info.
+ */
+
+/* Bar 0 */
+enum bar0_cells {
+ STA2X11_GPIO_0 = 0,
+ STA2X11_GPIO_1,
+ STA2X11_GPIO_2,
+ STA2X11_GPIO_3,
+ STA2X11_SCTL,
+ STA2X11_SCR,
+ STA2X11_TIME,
+};
+/* Bar 1 */
+enum bar1_cells {
+ STA2X11_APBREG = 0,
+};
+#define CELL_4K(_name, _cell) { \
+ .name = _name, \
+ .start = _cell * 4096, .end = _cell * 4096 + 4095, \
+ .flags = IORESOURCE_MEM, \
+ }
+
+static const __devinitconst struct resource gpio_resources[] = {
+ {
+ .name = "sta2x11_gpio", /* 4 consecutive cells, 1 driver */
+ .start = 0,
+ .end = (4 * 4096) - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+static const __devinitconst struct resource sctl_resources[] = {
+ CELL_4K("sta2x11-sctl", STA2X11_SCTL),
+};
+static const __devinitconst struct resource scr_resources[] = {
+ CELL_4K("sta2x11-scr", STA2X11_SCR),
+};
+static const __devinitconst struct resource time_resources[] = {
+ CELL_4K("sta2x11-time", STA2X11_TIME),
+};
+
+static const __devinitconst struct resource apbreg_resources[] = {
+ CELL_4K("sta2x11-apbreg", STA2X11_APBREG),
+};
+
+#define DEV(_name, _r) \
+ { .name = _name, .num_resources = ARRAY_SIZE(_r), .resources = _r, }
+
+static __devinitdata struct mfd_cell sta2x11_mfd_bar0[] = {
+ DEV("sta2x11-gpio", gpio_resources), /* offset 0: we add pdata later */
+ DEV("sta2x11-sctl", sctl_resources),
+ DEV("sta2x11-scr", scr_resources),
+ DEV("sta2x11-time", time_resources),
+};
+
+static __devinitdata struct mfd_cell sta2x11_mfd_bar1[] = {
+ DEV("sta2x11-apbreg", apbreg_resources),
+};
+
+static int sta2x11_mfd_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int sta2x11_mfd_resume(struct pci_dev *pdev)
+{
+ int err;
+
+ pci_set_power_state(pdev, 0);
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+ pci_restore_state(pdev);
+
+ return 0;
+}
+
+static int __devinit sta2x11_mfd_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ int err, i;
+ struct sta2x11_gpio_pdata *gpio_data;
+
+ dev_info(&pdev->dev, "%s\n", __func__);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Can't enable device.\n");
+ return err;
+ }
+
+ err = pci_enable_msi(pdev);
+ if (err)
+ dev_info(&pdev->dev, "Enable msi failed\n");
+
+ /* Read gpio config data as pci device's platform data */
+ gpio_data = dev_get_platdata(&pdev->dev);
+ if (!gpio_data)
+ dev_warn(&pdev->dev, "no gpio configuration\n");
+
+ dev_dbg(&pdev->dev, "%s, gpio_data = %p (%p)\n", __func__,
+ gpio_data, &gpio_data);
+ dev_dbg(&pdev->dev, "%s, pdev = %p (%p)\n", __func__,
+ pdev, &pdev);
+
+ /* platform data is the pci device for all of them */
+ for (i = 0; i < ARRAY_SIZE(sta2x11_mfd_bar0); i++) {
+ sta2x11_mfd_bar0[i].pdata_size = sizeof(pdev);
+ sta2x11_mfd_bar0[i].platform_data = &pdev;
+ }
+ sta2x11_mfd_bar1[0].pdata_size = sizeof(pdev);
+ sta2x11_mfd_bar1[0].platform_data = &pdev;
+
+ /* Record this pdev before mfd_add_devices: their probe looks for it */
+ sta2x11_mfd_add(pdev, GFP_ATOMIC);
+
+
+ err = mfd_add_devices(&pdev->dev, -1,
+ sta2x11_mfd_bar0,
+ ARRAY_SIZE(sta2x11_mfd_bar0),
+ &pdev->resource[0],
+ 0);
+ if (err) {
+ dev_err(&pdev->dev, "mfd_add_devices[0] failed: %d\n", err);
+ goto err_disable;
+ }
+
+ err = mfd_add_devices(&pdev->dev, -1,
+ sta2x11_mfd_bar1,
+ ARRAY_SIZE(sta2x11_mfd_bar1),
+ &pdev->resource[1],
+ 0);
+ if (err) {
+ dev_err(&pdev->dev, "mfd_add_devices[1] failed: %d\n", err);
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ mfd_remove_devices(&pdev->dev);
+ pci_disable_device(pdev);
+ pci_disable_msi(pdev);
+ return err;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(sta2x11_mfd_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_GPIO)},
+ {0,},
+};
+
+static struct pci_driver sta2x11_mfd_driver = {
+ .name = "sta2x11-mfd",
+ .id_table = sta2x11_mfd_tbl,
+ .probe = sta2x11_mfd_probe,
+ .suspend = sta2x11_mfd_suspend,
+ .resume = sta2x11_mfd_resume,
+};
+
+static int __init sta2x11_mfd_init(void)
+{
+ pr_info("%s\n", __func__);
+ return pci_register_driver(&sta2x11_mfd_driver);
+}
+
+/*
+ * All of this must be ready before "normal" devices like MMCI appear.
+ * But MFD (the pci device) can't be too early. The following choice
+ * prepares platform drivers very early and probe the PCI device later,
+ * but before other PCI devices.
+ */
+subsys_initcall(sta2x11_apbreg_init);
+subsys_initcall(sta2x11_sctl_init);
+rootfs_initcall(sta2x11_mfd_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wind River");
+MODULE_DESCRIPTION("STA2x11 mfd for GPIO, SCTL and APBREG");
+MODULE_DEVICE_TABLE(pci, sta2x11_mfd_tbl);
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index b58c43c7ea93..afd459013ecb 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -122,7 +122,6 @@ MODULE_DEVICE_TABLE(spi, stmpe_id);
static struct spi_driver stmpe_spi_driver = {
.driver = {
.name = "stmpe-spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &stmpe_dev_pm_ops,
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 47f802bf1848..396b9d1b6bd6 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -283,27 +283,24 @@ static int __devinit tps65090_i2c_probe(struct i2c_client *client,
}
}
- tps65090->rmap = regmap_init_i2c(tps65090->client,
- &tps65090_regmap_config);
+ tps65090->rmap = devm_regmap_init_i2c(tps65090->client,
+ &tps65090_regmap_config);
if (IS_ERR(tps65090->rmap)) {
- dev_err(&client->dev, "regmap_init failed with err: %ld\n",
- PTR_ERR(tps65090->rmap));
+ ret = PTR_ERR(tps65090->rmap);
+ dev_err(&client->dev, "regmap_init failed with err: %d\n", ret);
goto err_irq_exit;
- };
+ }
ret = mfd_add_devices(tps65090->dev, -1, tps65090s,
ARRAY_SIZE(tps65090s), NULL, 0);
if (ret) {
dev_err(&client->dev, "add mfd devices failed with err: %d\n",
ret);
- goto err_regmap_exit;
+ goto err_irq_exit;
}
return 0;
-err_regmap_exit:
- regmap_exit(tps65090->rmap);
-
err_irq_exit:
if (client->irq)
free_irq(client->irq, tps65090);
@@ -316,29 +313,34 @@ static int __devexit tps65090_i2c_remove(struct i2c_client *client)
struct tps65090 *tps65090 = i2c_get_clientdata(client);
mfd_remove_devices(tps65090->dev);
- regmap_exit(tps65090->rmap);
if (client->irq)
free_irq(client->irq, tps65090);
return 0;
}
-#ifdef CONFIG_PM
-static int tps65090_i2c_suspend(struct i2c_client *client, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int tps65090_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
if (client->irq)
disable_irq(client->irq);
return 0;
}
-static int tps65090_i2c_resume(struct i2c_client *client)
+static int tps65090_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
if (client->irq)
enable_irq(client->irq);
return 0;
}
#endif
+static const struct dev_pm_ops tps65090_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tps65090_suspend, tps65090_resume)
+};
+
static const struct i2c_device_id tps65090_id_table[] = {
{ "tps65090", 0 },
{ },
@@ -349,13 +351,10 @@ static struct i2c_driver tps65090_driver = {
.driver = {
.name = "tps65090",
.owner = THIS_MODULE,
+ .pm = &tps65090_pm_ops,
},
.probe = tps65090_i2c_probe,
.remove = __devexit_p(tps65090_i2c_remove),
-#ifdef CONFIG_PM
- .suspend = tps65090_i2c_suspend,
- .resume = tps65090_i2c_resume,
-#endif
.id_table = tps65090_id_table,
};
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index f7d854e4cc62..db194e433c08 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -96,7 +96,7 @@ EXPORT_SYMBOL_GPL(tps65217_reg_write);
* @val: Value to write.
* @level: Password protected level
*/
-int tps65217_update_bits(struct tps65217 *tps, unsigned int reg,
+static int tps65217_update_bits(struct tps65217 *tps, unsigned int reg,
unsigned int mask, unsigned int val, unsigned int level)
{
int ret;
@@ -150,7 +150,7 @@ static int __devinit tps65217_probe(struct i2c_client *client,
return -ENOMEM;
tps->pdata = pdata;
- tps->regmap = regmap_init_i2c(client, &tps65217_regmap_config);
+ tps->regmap = devm_regmap_init_i2c(client, &tps65217_regmap_config);
if (IS_ERR(tps->regmap)) {
ret = PTR_ERR(tps->regmap);
dev_err(tps->dev, "Failed to allocate register map: %d\n",
@@ -163,9 +163,9 @@ static int __devinit tps65217_probe(struct i2c_client *client,
ret = tps65217_reg_read(tps, TPS65217_REG_CHIPID, &version);
if (ret < 0) {
- dev_err(tps->dev, "Failed to read revision"
- " register: %d\n", ret);
- goto err_regmap;
+ dev_err(tps->dev, "Failed to read revision register: %d\n",
+ ret);
+ return ret;
}
dev_info(tps->dev, "TPS65217 ID %#x version 1.%d\n",
@@ -190,11 +190,6 @@ static int __devinit tps65217_probe(struct i2c_client *client,
}
return 0;
-
-err_regmap:
- regmap_exit(tps->regmap);
-
- return ret;
}
static int __devexit tps65217_remove(struct i2c_client *client)
@@ -205,8 +200,6 @@ static int __devexit tps65217_remove(struct i2c_client *client)
for (i = 0; i < TPS65217_NUM_REGULATOR; i++)
platform_device_unregister(tps->regulator_pdev[i]);
- regmap_exit(tps->regmap);
-
return 0;
}
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
index c9ed5c00a621..09aab3e4776d 100644
--- a/drivers/mfd/tps65910-irq.c
+++ b/drivers/mfd/tps65910-irq.c
@@ -20,15 +20,10 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/gpio.h>
#include <linux/mfd/tps65910.h>
-static inline int irq_to_tps65910_irq(struct tps65910 *tps65910,
- int irq)
-{
- return (irq - tps65910->irq_base);
-}
-
/*
* This is a threaded IRQ handler so can access I2C/SPI. Since all
* interrupts are clear on read the IRQ line will be reasserted and
@@ -41,28 +36,28 @@ static inline int irq_to_tps65910_irq(struct tps65910 *tps65910,
static irqreturn_t tps65910_irq(int irq, void *irq_data)
{
struct tps65910 *tps65910 = irq_data;
+ unsigned int reg;
u32 irq_sts;
u32 irq_mask;
- u8 reg;
int i;
- tps65910->read(tps65910, TPS65910_INT_STS, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_STS, &reg);
irq_sts = reg;
- tps65910->read(tps65910, TPS65910_INT_STS2, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_STS2, &reg);
irq_sts |= reg << 8;
switch (tps65910_chip_id(tps65910)) {
case TPS65911:
- tps65910->read(tps65910, TPS65910_INT_STS3, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_STS3, &reg);
irq_sts |= reg << 16;
}
- tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
irq_mask = reg;
- tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
irq_mask |= reg << 8;
switch (tps65910_chip_id(tps65910)) {
case TPS65911:
- tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
irq_mask |= reg << 16;
}
@@ -76,19 +71,19 @@ static irqreturn_t tps65910_irq(int irq, void *irq_data)
if (!(irq_sts & (1 << i)))
continue;
- handle_nested_irq(tps65910->irq_base + i);
+ handle_nested_irq(irq_find_mapping(tps65910->domain, i));
}
/* Write the STS register back to clear IRQs we handled */
reg = irq_sts & 0xFF;
irq_sts >>= 8;
- tps65910->write(tps65910, TPS65910_INT_STS, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_STS, reg);
reg = irq_sts & 0xFF;
- tps65910->write(tps65910, TPS65910_INT_STS2, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_STS2, reg);
switch (tps65910_chip_id(tps65910)) {
case TPS65911:
reg = irq_sts >> 8;
- tps65910->write(tps65910, TPS65910_INT_STS3, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_STS3, reg);
}
return IRQ_HANDLED;
@@ -105,27 +100,27 @@ static void tps65910_irq_sync_unlock(struct irq_data *data)
{
struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
u32 reg_mask;
- u8 reg;
+ unsigned int reg;
- tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
reg_mask = reg;
- tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
reg_mask |= reg << 8;
switch (tps65910_chip_id(tps65910)) {
case TPS65911:
- tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
reg_mask |= reg << 16;
}
if (tps65910->irq_mask != reg_mask) {
reg = tps65910->irq_mask & 0xFF;
- tps65910->write(tps65910, TPS65910_INT_MSK, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_MSK, reg);
reg = tps65910->irq_mask >> 8 & 0xFF;
- tps65910->write(tps65910, TPS65910_INT_MSK2, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_MSK2, reg);
switch (tps65910_chip_id(tps65910)) {
case TPS65911:
reg = tps65910->irq_mask >> 16;
- tps65910->write(tps65910, TPS65910_INT_MSK3, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_MSK3, reg);
}
}
mutex_unlock(&tps65910->irq_lock);
@@ -135,14 +130,14 @@ static void tps65910_irq_enable(struct irq_data *data)
{
struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
- tps65910->irq_mask &= ~( 1 << irq_to_tps65910_irq(tps65910, data->irq));
+ tps65910->irq_mask &= ~(1 << data->hwirq);
}
static void tps65910_irq_disable(struct irq_data *data)
{
struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
- tps65910->irq_mask |= ( 1 << irq_to_tps65910_irq(tps65910, data->irq));
+ tps65910->irq_mask |= (1 << data->hwirq);
}
#ifdef CONFIG_PM_SLEEP
@@ -164,10 +159,35 @@ static struct irq_chip tps65910_irq_chip = {
.irq_set_wake = tps65910_irq_set_wake,
};
+static int tps65910_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct tps65910 *tps65910 = h->host_data;
+
+ irq_set_chip_data(virq, tps65910);
+ irq_set_chip_and_handler(virq, &tps65910_irq_chip, handle_edge_irq);
+ irq_set_nested_thread(virq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(virq, IRQF_VALID);
+#else
+ irq_set_noprobe(virq);
+#endif
+
+ return 0;
+}
+
+static struct irq_domain_ops tps65910_domain_ops = {
+ .map = tps65910_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
int tps65910_irq_init(struct tps65910 *tps65910, int irq,
struct tps65910_platform_data *pdata)
{
- int ret, cur_irq;
+ int ret;
int flags = IRQF_ONESHOT;
if (!irq) {
@@ -175,17 +195,11 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
return -EINVAL;
}
- if (!pdata || !pdata->irq_base) {
- dev_warn(tps65910->dev, "No interrupt support, no IRQ base\n");
+ if (!pdata) {
+ dev_warn(tps65910->dev, "No interrupt support, no pdata\n");
return -EINVAL;
}
- tps65910->irq_mask = 0xFFFFFF;
-
- mutex_init(&tps65910->irq_lock);
- tps65910->chip_irq = irq;
- tps65910->irq_base = pdata->irq_base;
-
switch (tps65910_chip_id(tps65910)) {
case TPS65910:
tps65910->irq_num = TPS65910_NUM_IRQ;
@@ -195,22 +209,36 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
break;
}
- /* Register with genirq */
- for (cur_irq = tps65910->irq_base;
- cur_irq < tps65910->irq_num + tps65910->irq_base;
- cur_irq++) {
- irq_set_chip_data(cur_irq, tps65910);
- irq_set_chip_and_handler(cur_irq, &tps65910_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(cur_irq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
-#else
- irq_set_noprobe(cur_irq);
-#endif
+ if (pdata->irq_base > 0) {
+ pdata->irq_base = irq_alloc_descs(pdata->irq_base, 0,
+ tps65910->irq_num, -1);
+ if (pdata->irq_base < 0) {
+ dev_warn(tps65910->dev, "Failed to alloc IRQs: %d\n",
+ pdata->irq_base);
+ return pdata->irq_base;
+ }
+ }
+
+ tps65910->irq_mask = 0xFFFFFF;
+
+ mutex_init(&tps65910->irq_lock);
+ tps65910->chip_irq = irq;
+ tps65910->irq_base = pdata->irq_base;
+
+ if (pdata->irq_base > 0)
+ tps65910->domain = irq_domain_add_legacy(tps65910->dev->of_node,
+ tps65910->irq_num,
+ pdata->irq_base,
+ 0,
+ &tps65910_domain_ops, tps65910);
+ else
+ tps65910->domain = irq_domain_add_linear(tps65910->dev->of_node,
+ tps65910->irq_num,
+ &tps65910_domain_ops, tps65910);
+
+ if (!tps65910->domain) {
+ dev_err(tps65910->dev, "Failed to create IRQ domain\n");
+ return -ENOMEM;
}
ret = request_threaded_irq(irq, NULL, tps65910_irq, flags,
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index bf2b25ebf2ca..be9e07b77325 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -19,13 +19,16 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/mfd/core.h>
#include <linux/regmap.h>
#include <linux/mfd/tps65910.h>
+#include <linux/of_device.h>
static struct mfd_cell tps65910s[] = {
{
+ .name = "tps65910-gpio",
+ },
+ {
.name = "tps65910-pmic",
},
{
@@ -37,30 +40,6 @@ static struct mfd_cell tps65910s[] = {
};
-static int tps65910_i2c_read(struct tps65910 *tps65910, u8 reg,
- int bytes, void *dest)
-{
- return regmap_bulk_read(tps65910->regmap, reg, dest, bytes);
-}
-
-static int tps65910_i2c_write(struct tps65910 *tps65910, u8 reg,
- int bytes, void *src)
-{
- return regmap_bulk_write(tps65910->regmap, reg, src, bytes);
-}
-
-int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, mask);
-}
-EXPORT_SYMBOL_GPL(tps65910_set_bits);
-
-int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, 0);
-}
-EXPORT_SYMBOL_GPL(tps65910_clear_bits);
-
static bool is_volatile_reg(struct device *dev, unsigned int reg)
{
struct tps65910 *tps65910 = dev_get_drvdata(dev);
@@ -85,80 +64,197 @@ static const struct regmap_config tps65910_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.volatile_reg = is_volatile_reg,
- .max_register = TPS65910_MAX_REGISTER,
- .num_reg_defaults_raw = TPS65910_MAX_REGISTER,
+ .max_register = TPS65910_MAX_REGISTER - 1,
.cache_type = REGCACHE_RBTREE,
};
-static int tps65910_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int __devinit tps65910_sleepinit(struct tps65910 *tps65910,
+ struct tps65910_board *pmic_pdata)
+{
+ struct device *dev = NULL;
+ int ret = 0;
+
+ dev = tps65910->dev;
+
+ if (!pmic_pdata->en_dev_slp)
+ return 0;
+
+ /* enabling SLEEP device state */
+ ret = tps65910_reg_set_bits(tps65910, TPS65910_DEVCTRL,
+ DEVCTRL_DEV_SLP_MASK);
+ if (ret < 0) {
+ dev_err(dev, "set dev_slp failed: %d\n", ret);
+ goto err_sleep_init;
+ }
+
+ /* Return if there is no sleep keepon data. */
+ if (!pmic_pdata->slp_keepon)
+ return 0;
+
+ if (pmic_pdata->slp_keepon->therm_keepon) {
+ ret = tps65910_reg_set_bits(tps65910,
+ TPS65910_SLEEP_KEEP_RES_ON,
+ SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK);
+ if (ret < 0) {
+ dev_err(dev, "set therm_keepon failed: %d\n", ret);
+ goto disable_dev_slp;
+ }
+ }
+
+ if (pmic_pdata->slp_keepon->clkout32k_keepon) {
+ ret = tps65910_reg_set_bits(tps65910,
+ TPS65910_SLEEP_KEEP_RES_ON,
+ SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK);
+ if (ret < 0) {
+ dev_err(dev, "set clkout32k_keepon failed: %d\n", ret);
+ goto disable_dev_slp;
+ }
+ }
+
+ if (pmic_pdata->slp_keepon->i2chs_keepon) {
+ ret = tps65910_reg_set_bits(tps65910,
+ TPS65910_SLEEP_KEEP_RES_ON,
+ SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK);
+ if (ret < 0) {
+ dev_err(dev, "set i2chs_keepon failed: %d\n", ret);
+ goto disable_dev_slp;
+ }
+ }
+
+ return 0;
+
+disable_dev_slp:
+ tps65910_reg_clear_bits(tps65910, TPS65910_DEVCTRL,
+ DEVCTRL_DEV_SLP_MASK);
+
+err_sleep_init:
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id tps65910_of_match[] = {
+ { .compatible = "ti,tps65910", .data = (void *)TPS65910},
+ { .compatible = "ti,tps65911", .data = (void *)TPS65911},
+ { },
+};
+MODULE_DEVICE_TABLE(of, tps65910_of_match);
+
+static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
+ int *chip_id)
+{
+ struct device_node *np = client->dev.of_node;
+ struct tps65910_board *board_info;
+ unsigned int prop;
+ const struct of_device_id *match;
+ int ret = 0;
+
+ match = of_match_device(tps65910_of_match, &client->dev);
+ if (!match) {
+ dev_err(&client->dev, "Failed to find matching dt id\n");
+ return NULL;
+ }
+
+ *chip_id = (int)match->data;
+
+ board_info = devm_kzalloc(&client->dev, sizeof(*board_info),
+ GFP_KERNEL);
+ if (!board_info) {
+ dev_err(&client->dev, "Failed to allocate pdata\n");
+ return NULL;
+ }
+
+ ret = of_property_read_u32(np, "ti,vmbch-threshold", &prop);
+ if (!ret)
+ board_info->vmbch_threshold = prop;
+ else if (*chip_id == TPS65911)
+ dev_warn(&client->dev, "VMBCH-Threshold not specified");
+
+ ret = of_property_read_u32(np, "ti,vmbch2-threshold", &prop);
+ if (!ret)
+ board_info->vmbch2_threshold = prop;
+ else if (*chip_id == TPS65911)
+ dev_warn(&client->dev, "VMBCH2-Threshold not specified");
+
+ board_info->irq = client->irq;
+ board_info->irq_base = -1;
+
+ return board_info;
+}
+#else
+static inline
+struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
+ int *chip_id)
+{
+ return NULL;
+}
+#endif
+
+static __devinit int tps65910_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct tps65910 *tps65910;
struct tps65910_board *pmic_plat_data;
+ struct tps65910_board *of_pmic_plat_data = NULL;
struct tps65910_platform_data *init_data;
int ret = 0;
+ int chip_id = id->driver_data;
pmic_plat_data = dev_get_platdata(&i2c->dev);
+
+ if (!pmic_plat_data && i2c->dev.of_node) {
+ pmic_plat_data = tps65910_parse_dt(i2c, &chip_id);
+ of_pmic_plat_data = pmic_plat_data;
+ }
+
if (!pmic_plat_data)
return -EINVAL;
- init_data = kzalloc(sizeof(struct tps65910_platform_data), GFP_KERNEL);
+ init_data = devm_kzalloc(&i2c->dev, sizeof(*init_data), GFP_KERNEL);
if (init_data == NULL)
return -ENOMEM;
- tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL);
- if (tps65910 == NULL) {
- kfree(init_data);
+ tps65910 = devm_kzalloc(&i2c->dev, sizeof(*tps65910), GFP_KERNEL);
+ if (tps65910 == NULL)
return -ENOMEM;
- }
+ tps65910->of_plat_data = of_pmic_plat_data;
i2c_set_clientdata(i2c, tps65910);
tps65910->dev = &i2c->dev;
tps65910->i2c_client = i2c;
- tps65910->id = id->driver_data;
- tps65910->read = tps65910_i2c_read;
- tps65910->write = tps65910_i2c_write;
+ tps65910->id = chip_id;
mutex_init(&tps65910->io_mutex);
- tps65910->regmap = regmap_init_i2c(i2c, &tps65910_regmap_config);
+ tps65910->regmap = devm_regmap_init_i2c(i2c, &tps65910_regmap_config);
if (IS_ERR(tps65910->regmap)) {
ret = PTR_ERR(tps65910->regmap);
dev_err(&i2c->dev, "regmap initialization failed: %d\n", ret);
- goto regmap_err;
+ return ret;
}
ret = mfd_add_devices(tps65910->dev, -1,
tps65910s, ARRAY_SIZE(tps65910s),
NULL, 0);
- if (ret < 0)
- goto err;
+ if (ret < 0) {
+ dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
+ return ret;
+ }
init_data->irq = pmic_plat_data->irq;
init_data->irq_base = pmic_plat_data->irq_base;
- tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
-
tps65910_irq_init(tps65910, init_data->irq, init_data);
- kfree(init_data);
- return ret;
+ tps65910_sleepinit(tps65910, pmic_plat_data);
-err:
- regmap_exit(tps65910->regmap);
-regmap_err:
- kfree(tps65910);
- kfree(init_data);
return ret;
}
-static int tps65910_i2c_remove(struct i2c_client *i2c)
+static __devexit int tps65910_i2c_remove(struct i2c_client *i2c)
{
struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
tps65910_irq_exit(tps65910);
mfd_remove_devices(tps65910->dev);
- regmap_exit(tps65910->regmap);
- kfree(tps65910);
return 0;
}
@@ -175,9 +271,10 @@ static struct i2c_driver tps65910_i2c_driver = {
.driver = {
.name = "tps65910",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(tps65910_of_match),
},
.probe = tps65910_i2c_probe,
- .remove = tps65910_i2c_remove,
+ .remove = __devexit_p(tps65910_i2c_remove),
.id_table = tps65910_i2c_id,
};
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 5d656e814358..ad733d76207a 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -757,6 +757,7 @@ int twl4030_init_irq(struct device *dev, int irq_num)
dev_err(dev, "could not claim irq%d: %d\n", irq_num, status);
goto fail_rqirq;
}
+ enable_irq_wake(irq_num);
return irq_base;
fail_rqirq:
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c
index 2d6bedadca09..4ded9e7aa246 100644
--- a/drivers/mfd/twl6040-core.c
+++ b/drivers/mfd/twl6040-core.c
@@ -27,7 +27,12 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -35,8 +40,24 @@
#include <linux/err.h>
#include <linux/mfd/core.h>
#include <linux/mfd/twl6040.h>
+#include <linux/regulator/consumer.h>
#define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1)
+#define TWL6040_NUM_SUPPLIES (2)
+
+static bool twl6040_has_vibra(struct twl6040_platform_data *pdata,
+ struct device_node *node)
+{
+ if (pdata && pdata->vibra)
+ return true;
+
+#ifdef CONFIG_OF
+ if (of_find_node_by_name(node, "vibra"))
+ return true;
+#endif
+
+ return false;
+}
int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
{
@@ -502,17 +523,18 @@ static int __devinit twl6040_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct twl6040_platform_data *pdata = client->dev.platform_data;
+ struct device_node *node = client->dev.of_node;
struct twl6040 *twl6040;
struct mfd_cell *cell = NULL;
- int ret, children = 0;
+ int irq, ret, children = 0;
- if (!pdata) {
+ if (!pdata && !node) {
dev_err(&client->dev, "Platform data is missing\n");
return -EINVAL;
}
/* In order to operate correctly we need valid interrupt config */
- if (!client->irq || !pdata->irq_base) {
+ if (!client->irq) {
dev_err(&client->dev, "Invalid IRQ configuration\n");
return -EINVAL;
}
@@ -524,7 +546,7 @@ static int __devinit twl6040_probe(struct i2c_client *client,
goto err;
}
- twl6040->regmap = regmap_init_i2c(client, &twl6040_regmap_config);
+ twl6040->regmap = devm_regmap_init_i2c(client, &twl6040_regmap_config);
if (IS_ERR(twl6040->regmap)) {
ret = PTR_ERR(twl6040->regmap);
goto err;
@@ -532,9 +554,23 @@ static int __devinit twl6040_probe(struct i2c_client *client,
i2c_set_clientdata(client, twl6040);
+ twl6040->supplies[0].supply = "vio";
+ twl6040->supplies[1].supply = "v2v1";
+ ret = regulator_bulk_get(&client->dev, TWL6040_NUM_SUPPLIES,
+ twl6040->supplies);
+ if (ret != 0) {
+ dev_err(&client->dev, "Failed to get supplies: %d\n", ret);
+ goto regulator_get_err;
+ }
+
+ ret = regulator_bulk_enable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+ if (ret != 0) {
+ dev_err(&client->dev, "Failed to enable supplies: %d\n", ret);
+ goto power_err;
+ }
+
twl6040->dev = &client->dev;
twl6040->irq = client->irq;
- twl6040->irq_base = pdata->irq_base;
mutex_init(&twl6040->mutex);
mutex_init(&twl6040->io_mutex);
@@ -543,22 +579,26 @@ static int __devinit twl6040_probe(struct i2c_client *client,
twl6040->rev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV);
/* ERRATA: Automatic power-up is not possible in ES1.0 */
- if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0)
- twl6040->audpwron = pdata->audpwron_gpio;
- else
+ if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0) {
+ if (pdata)
+ twl6040->audpwron = pdata->audpwron_gpio;
+ else
+ twl6040->audpwron = of_get_named_gpio(node,
+ "ti,audpwron-gpio", 0);
+ } else
twl6040->audpwron = -EINVAL;
if (gpio_is_valid(twl6040->audpwron)) {
ret = gpio_request_one(twl6040->audpwron, GPIOF_OUT_INIT_LOW,
"audpwron");
if (ret)
- goto gpio1_err;
+ goto gpio_err;
}
/* codec interrupt */
ret = twl6040_irq_init(twl6040);
if (ret)
- goto gpio2_err;
+ goto irq_init_err;
ret = request_threaded_irq(twl6040->irq_base + TWL6040_IRQ_READY,
NULL, twl6040_naudint_handler, 0,
@@ -572,22 +612,27 @@ static int __devinit twl6040_probe(struct i2c_client *client,
/* dual-access registers controlled by I2C only */
twl6040_set_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_I2CSEL);
- if (pdata->codec) {
- int irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
-
- cell = &twl6040->cells[children];
- cell->name = "twl6040-codec";
- twl6040_codec_rsrc[0].start = irq;
- twl6040_codec_rsrc[0].end = irq;
- cell->resources = twl6040_codec_rsrc;
- cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc);
+ /*
+ * The main functionality of twl6040 to provide audio on OMAP4+ systems.
+ * We can add the ASoC codec child whenever this driver has been loaded.
+ * The ASoC codec can work without pdata, pass the platform_data only if
+ * it has been provided.
+ */
+ irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
+ cell = &twl6040->cells[children];
+ cell->name = "twl6040-codec";
+ twl6040_codec_rsrc[0].start = irq;
+ twl6040_codec_rsrc[0].end = irq;
+ cell->resources = twl6040_codec_rsrc;
+ cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc);
+ if (pdata && pdata->codec) {
cell->platform_data = pdata->codec;
cell->pdata_size = sizeof(*pdata->codec);
- children++;
}
+ children++;
- if (pdata->vibra) {
- int irq = twl6040->irq_base + TWL6040_IRQ_VIB;
+ if (twl6040_has_vibra(pdata, node)) {
+ irq = twl6040->irq_base + TWL6040_IRQ_VIB;
cell = &twl6040->cells[children];
cell->name = "twl6040-vibra";
@@ -596,21 +641,17 @@ static int __devinit twl6040_probe(struct i2c_client *client,
cell->resources = twl6040_vibra_rsrc;
cell->num_resources = ARRAY_SIZE(twl6040_vibra_rsrc);
- cell->platform_data = pdata->vibra;
- cell->pdata_size = sizeof(*pdata->vibra);
+ if (pdata && pdata->vibra) {
+ cell->platform_data = pdata->vibra;
+ cell->pdata_size = sizeof(*pdata->vibra);
+ }
children++;
}
- if (children) {
- ret = mfd_add_devices(&client->dev, -1, twl6040->cells,
- children, NULL, 0);
- if (ret)
- goto mfd_err;
- } else {
- dev_err(&client->dev, "No platform data found for children\n");
- ret = -ENODEV;
+ ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children,
+ NULL, 0);
+ if (ret)
goto mfd_err;
- }
return 0;
@@ -618,12 +659,15 @@ mfd_err:
free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
irq_err:
twl6040_irq_exit(twl6040);
-gpio2_err:
+irq_init_err:
if (gpio_is_valid(twl6040->audpwron))
gpio_free(twl6040->audpwron);
-gpio1_err:
+gpio_err:
+ regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+power_err:
+ regulator_bulk_free(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+regulator_get_err:
i2c_set_clientdata(client, NULL);
- regmap_exit(twl6040->regmap);
err:
return ret;
}
@@ -643,7 +687,9 @@ static int __devexit twl6040_remove(struct i2c_client *client)
mfd_remove_devices(&client->dev);
i2c_set_clientdata(client, NULL);
- regmap_exit(twl6040->regmap);
+
+ regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+ regulator_bulk_free(TWL6040_NUM_SUPPLIES, twl6040->supplies);
return 0;
}
diff --git a/drivers/mfd/twl6040-irq.c b/drivers/mfd/twl6040-irq.c
index b3f8ddaa28a8..4b42543da228 100644
--- a/drivers/mfd/twl6040-irq.c
+++ b/drivers/mfd/twl6040-irq.c
@@ -23,7 +23,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/err.h>
#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/mfd/core.h>
#include <linux/mfd/twl6040.h>
@@ -138,7 +141,8 @@ static irqreturn_t twl6040_irq_thread(int irq, void *data)
int twl6040_irq_init(struct twl6040 *twl6040)
{
- int cur_irq, ret;
+ struct device_node *node = twl6040->dev->of_node;
+ int i, nr_irqs, irq_base, ret;
u8 val;
mutex_init(&twl6040->irq_mutex);
@@ -148,21 +152,31 @@ int twl6040_irq_init(struct twl6040 *twl6040)
twl6040->irq_masks_cache = TWL6040_ALLINT_MSK;
twl6040_reg_write(twl6040, TWL6040_REG_INTMR, TWL6040_ALLINT_MSK);
+ nr_irqs = ARRAY_SIZE(twl6040_irqs);
+
+ irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
+ if (IS_ERR_VALUE(irq_base)) {
+ dev_err(twl6040->dev, "Fail to allocate IRQ descs\n");
+ return irq_base;
+ }
+ twl6040->irq_base = irq_base;
+
+ irq_domain_add_legacy(node, ARRAY_SIZE(twl6040_irqs), irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+
/* Register them with genirq */
- for (cur_irq = twl6040->irq_base;
- cur_irq < twl6040->irq_base + ARRAY_SIZE(twl6040_irqs);
- cur_irq++) {
- irq_set_chip_data(cur_irq, twl6040);
- irq_set_chip_and_handler(cur_irq, &twl6040_irq_chip,
+ for (i = irq_base; i < irq_base + nr_irqs; i++) {
+ irq_set_chip_data(i, twl6040);
+ irq_set_chip_and_handler(i, &twl6040_irq_chip,
handle_level_irq);
- irq_set_nested_thread(cur_irq, 1);
+ irq_set_nested_thread(i, 1);
/* ARM needs us to explicitly flag the IRQ as valid
* and will set them noprobe when we do so. */
#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
+ set_irq_flags(i, IRQF_VALID);
#else
- irq_set_noprobe(cur_irq);
+ irq_set_noprobe(i);
#endif
}
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
index b73cc15e0081..872aff21e4be 100644
--- a/drivers/mfd/vx855.c
+++ b/drivers/mfd/vx855.c
@@ -131,17 +131,7 @@ static struct pci_driver vx855_pci_driver = {
.remove = __devexit_p(vx855_remove),
};
-static int vx855_init(void)
-{
- return pci_register_driver(&vx855_pci_driver);
-}
-module_init(vx855_init);
-
-static void vx855_exit(void)
-{
- pci_unregister_driver(&vx855_pci_driver);
-}
-module_exit(vx855_exit);
+module_pci_driver(vx855_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
index 87210954a066..6ee3018d8653 100644
--- a/drivers/mfd/wm831x-auxadc.c
+++ b/drivers/mfd/wm831x-auxadc.c
@@ -280,11 +280,11 @@ void wm831x_auxadc_init(struct wm831x *wm831x)
mutex_init(&wm831x->auxadc_lock);
INIT_LIST_HEAD(&wm831x->auxadc_pending);
- if (wm831x->irq && wm831x->irq_base) {
+ if (wm831x->irq) {
wm831x->auxadc_read = wm831x_auxadc_read_irq;
- ret = request_threaded_irq(wm831x->irq_base +
- WM831X_IRQ_AUXADC_DATA,
+ ret = request_threaded_irq(wm831x_irq(wm831x,
+ WM831X_IRQ_AUXADC_DATA),
NULL, wm831x_auxadc_irq, 0,
"auxadc", wm831x);
if (ret < 0) {
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 838056c3493a..946698fd2dc6 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -614,8 +614,15 @@ int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg,
}
EXPORT_SYMBOL_GPL(wm831x_set_bits);
+static struct resource wm831x_io_parent = {
+ .start = 0,
+ .end = 0xffffffff,
+ .flags = IORESOURCE_IO,
+};
+
static struct resource wm831x_dcdc1_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_DC1_CONTROL_1,
.end = WM831X_DC1_DVS_CONTROL,
.flags = IORESOURCE_IO,
@@ -637,6 +644,7 @@ static struct resource wm831x_dcdc1_resources[] = {
static struct resource wm831x_dcdc2_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_DC2_CONTROL_1,
.end = WM831X_DC2_DVS_CONTROL,
.flags = IORESOURCE_IO,
@@ -657,6 +665,7 @@ static struct resource wm831x_dcdc2_resources[] = {
static struct resource wm831x_dcdc3_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_DC3_CONTROL_1,
.end = WM831X_DC3_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -671,6 +680,7 @@ static struct resource wm831x_dcdc3_resources[] = {
static struct resource wm831x_dcdc4_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_DC4_CONTROL,
.end = WM831X_DC4_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -685,6 +695,7 @@ static struct resource wm831x_dcdc4_resources[] = {
static struct resource wm8320_dcdc4_buck_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_DC4_CONTROL,
.end = WM832X_DC4_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -707,6 +718,7 @@ static struct resource wm831x_gpio_resources[] = {
static struct resource wm831x_isink1_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_CURRENT_SINK_1,
.end = WM831X_CURRENT_SINK_1,
.flags = IORESOURCE_IO,
@@ -720,6 +732,7 @@ static struct resource wm831x_isink1_resources[] = {
static struct resource wm831x_isink2_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_CURRENT_SINK_2,
.end = WM831X_CURRENT_SINK_2,
.flags = IORESOURCE_IO,
@@ -733,6 +746,7 @@ static struct resource wm831x_isink2_resources[] = {
static struct resource wm831x_ldo1_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO1_CONTROL,
.end = WM831X_LDO1_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -747,6 +761,7 @@ static struct resource wm831x_ldo1_resources[] = {
static struct resource wm831x_ldo2_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO2_CONTROL,
.end = WM831X_LDO2_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -761,6 +776,7 @@ static struct resource wm831x_ldo2_resources[] = {
static struct resource wm831x_ldo3_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO3_CONTROL,
.end = WM831X_LDO3_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -775,6 +791,7 @@ static struct resource wm831x_ldo3_resources[] = {
static struct resource wm831x_ldo4_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO4_CONTROL,
.end = WM831X_LDO4_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -789,6 +806,7 @@ static struct resource wm831x_ldo4_resources[] = {
static struct resource wm831x_ldo5_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO5_CONTROL,
.end = WM831X_LDO5_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -803,6 +821,7 @@ static struct resource wm831x_ldo5_resources[] = {
static struct resource wm831x_ldo6_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO6_CONTROL,
.end = WM831X_LDO6_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -817,6 +836,7 @@ static struct resource wm831x_ldo6_resources[] = {
static struct resource wm831x_ldo7_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO7_CONTROL,
.end = WM831X_LDO7_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -831,6 +851,7 @@ static struct resource wm831x_ldo7_resources[] = {
static struct resource wm831x_ldo8_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO8_CONTROL,
.end = WM831X_LDO8_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -845,6 +866,7 @@ static struct resource wm831x_ldo8_resources[] = {
static struct resource wm831x_ldo9_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO9_CONTROL,
.end = WM831X_LDO9_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -859,6 +881,7 @@ static struct resource wm831x_ldo9_resources[] = {
static struct resource wm831x_ldo10_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO10_CONTROL,
.end = WM831X_LDO10_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -873,6 +896,7 @@ static struct resource wm831x_ldo10_resources[] = {
static struct resource wm831x_ldo11_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO11_ON_CONTROL,
.end = WM831X_LDO11_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -974,6 +998,7 @@ static struct resource wm831x_rtc_resources[] = {
static struct resource wm831x_status1_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_STATUS_LED_1,
.end = WM831X_STATUS_LED_1,
.flags = IORESOURCE_IO,
@@ -982,6 +1007,7 @@ static struct resource wm831x_status1_resources[] = {
static struct resource wm831x_status2_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_STATUS_LED_2,
.end = WM831X_STATUS_LED_2,
.flags = IORESOURCE_IO,
@@ -1787,27 +1813,27 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8310:
ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8310_devs, ARRAY_SIZE(wm8310_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
break;
case WM8311:
ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8311_devs, ARRAY_SIZE(wm8311_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
if (!pdata || !pdata->disable_touch)
mfd_add_devices(wm831x->dev, wm831x_num,
touch_devs, ARRAY_SIZE(touch_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
break;
case WM8312:
ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8312_devs, ARRAY_SIZE(wm8312_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
if (!pdata || !pdata->disable_touch)
mfd_add_devices(wm831x->dev, wm831x_num,
touch_devs, ARRAY_SIZE(touch_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
break;
case WM8320:
@@ -1816,7 +1842,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8326:
ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8320_devs, ARRAY_SIZE(wm8320_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
break;
default:
@@ -1841,7 +1867,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
if (ret & WM831X_XTAL_ENA) {
ret = mfd_add_devices(wm831x->dev, wm831x_num,
rtc_devs, ARRAY_SIZE(rtc_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
if (ret != 0) {
dev_err(wm831x->dev, "Failed to add RTC: %d\n", ret);
goto err_irq;
@@ -1854,7 +1880,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
/* Treat errors as non-critical */
ret = mfd_add_devices(wm831x->dev, wm831x_num, backlight_devs,
ARRAY_SIZE(backlight_devs), NULL,
- wm831x->irq_base);
+ 0);
if (ret < 0)
dev_err(wm831x->dev, "Failed to add backlight: %d\n",
ret);
@@ -1883,8 +1909,7 @@ void wm831x_device_exit(struct wm831x *wm831x)
{
wm831x_otp_exit(wm831x);
mfd_remove_devices(wm831x->dev);
- if (wm831x->irq_base)
- free_irq(wm831x->irq_base + WM831X_IRQ_AUXADC_DATA, wm831x);
+ free_irq(wm831x_irq(wm831x, WM831X_IRQ_AUXADC_DATA), wm831x);
wm831x_irq_exit(wm831x);
}
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index bec4d0539160..804e56ec99eb 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -18,6 +18,7 @@
#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
@@ -328,7 +329,7 @@ static inline int irq_data_to_status_reg(struct wm831x_irq_data *irq_data)
static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x,
int irq)
{
- return &wm831x_irqs[irq - wm831x->irq_base];
+ return &wm831x_irqs[irq];
}
static void wm831x_irq_lock(struct irq_data *data)
@@ -374,7 +375,7 @@ static void wm831x_irq_enable(struct irq_data *data)
{
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
- data->irq);
+ data->hwirq);
wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
@@ -383,7 +384,7 @@ static void wm831x_irq_disable(struct irq_data *data)
{
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
- data->irq);
+ data->hwirq);
wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
}
@@ -393,7 +394,7 @@ static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
int irq;
- irq = data->irq - wm831x->irq_base;
+ irq = data->hwirq;
if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
/* Ignore internal-only IRQs */
@@ -412,22 +413,25 @@ static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
* do the update here as we can be called with the bus lock
* held.
*/
+ wm831x->gpio_level_low[irq] = false;
+ wm831x->gpio_level_high[irq] = false;
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_INT_MODE;
- wm831x->gpio_level[irq] = false;
break;
case IRQ_TYPE_EDGE_RISING:
wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
- wm831x->gpio_level[irq] = false;
break;
case IRQ_TYPE_EDGE_FALLING:
wm831x->gpio_update[irq] = 0x10000;
- wm831x->gpio_level[irq] = false;
break;
case IRQ_TYPE_LEVEL_HIGH:
wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
- wm831x->gpio_level[irq] = true;
+ wm831x->gpio_level_high[irq] = true;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ wm831x->gpio_update[irq] = 0x10000;
+ wm831x->gpio_level_low[irq] = true;
break;
default:
return -EINVAL;
@@ -469,9 +473,11 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
* descriptors.
*/
if (primary & WM831X_TCHPD_INT)
- handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHPD);
+ handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+ WM831X_IRQ_TCHPD));
if (primary & WM831X_TCHDATA_INT)
- handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHDATA);
+ handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+ WM831X_IRQ_TCHDATA));
primary &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT);
for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) {
@@ -507,16 +513,29 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
}
if (*status & wm831x_irqs[i].mask)
- handle_nested_irq(wm831x->irq_base + i);
+ handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+ i));
/* Simulate an edge triggered IRQ by polling the input
* status. This is sucky but improves interoperability.
*/
if (primary == WM831X_GP_INT &&
- wm831x->gpio_level[i - WM831X_IRQ_GPIO_1]) {
+ wm831x->gpio_level_high[i - WM831X_IRQ_GPIO_1]) {
ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL);
while (ret & 1 << (i - WM831X_IRQ_GPIO_1)) {
- handle_nested_irq(wm831x->irq_base + i);
+ handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+ i));
+ ret = wm831x_reg_read(wm831x,
+ WM831X_GPIO_LEVEL);
+ }
+ }
+
+ if (primary == WM831X_GP_INT &&
+ wm831x->gpio_level_low[i - WM831X_IRQ_GPIO_1]) {
+ ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL);
+ while (!(ret & 1 << (i - WM831X_IRQ_GPIO_1))) {
+ handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+ i));
ret = wm831x_reg_read(wm831x,
WM831X_GPIO_LEVEL);
}
@@ -527,10 +546,34 @@ out:
return IRQ_HANDLED;
}
+static int wm831x_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_chip_and_handler(virq, &wm831x_irq_chip, handle_edge_irq);
+ irq_set_nested_thread(virq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(virq, IRQF_VALID);
+#else
+ irq_set_noprobe(virq);
+#endif
+
+ return 0;
+}
+
+static struct irq_domain_ops wm831x_irq_domain_ops = {
+ .map = wm831x_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
int wm831x_irq_init(struct wm831x *wm831x, int irq)
{
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int i, cur_irq, ret;
+ struct irq_domain *domain;
+ int i, ret, irq_base;
mutex_init(&wm831x->irq_lock);
@@ -543,18 +586,33 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
}
/* Try to dynamically allocate IRQs if no base is specified */
- if (!pdata || !pdata->irq_base)
- wm831x->irq_base = -1;
+ if (pdata && pdata->irq_base) {
+ irq_base = irq_alloc_descs(pdata->irq_base, 0,
+ WM831X_NUM_IRQS, 0);
+ if (irq_base < 0) {
+ dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n",
+ irq_base);
+ irq_base = 0;
+ }
+ } else {
+ irq_base = 0;
+ }
+
+ if (irq_base)
+ domain = irq_domain_add_legacy(wm831x->dev->of_node,
+ ARRAY_SIZE(wm831x_irqs),
+ irq_base, 0,
+ &wm831x_irq_domain_ops,
+ wm831x);
else
- wm831x->irq_base = pdata->irq_base;
+ domain = irq_domain_add_linear(wm831x->dev->of_node,
+ ARRAY_SIZE(wm831x_irqs),
+ &wm831x_irq_domain_ops,
+ wm831x);
- wm831x->irq_base = irq_alloc_descs(wm831x->irq_base, 0,
- WM831X_NUM_IRQS, 0);
- if (wm831x->irq_base < 0) {
- dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n",
- wm831x->irq_base);
- wm831x->irq_base = 0;
- return 0;
+ if (!domain) {
+ dev_warn(wm831x->dev, "Failed to allocate IRQ domain\n");
+ return -EINVAL;
}
if (pdata && pdata->irq_cmos)
@@ -565,38 +623,22 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
wm831x_set_bits(wm831x, WM831X_IRQ_CONFIG,
WM831X_IRQ_OD, i);
- /* Try to flag /IRQ as a wake source; there are a number of
- * unconditional wake sources in the PMIC so this isn't
- * conditional but we don't actually care *too* much if it
- * fails.
- */
- ret = enable_irq_wake(irq);
- if (ret != 0) {
- dev_warn(wm831x->dev, "Can't enable IRQ as wake source: %d\n",
- ret);
- }
-
wm831x->irq = irq;
-
- /* Register them with genirq */
- for (cur_irq = wm831x->irq_base;
- cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base;
- cur_irq++) {
- irq_set_chip_data(cur_irq, wm831x);
- irq_set_chip_and_handler(cur_irq, &wm831x_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(cur_irq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
-#else
- irq_set_noprobe(cur_irq);
-#endif
- }
+ wm831x->irq_domain = domain;
if (irq) {
+ /* Try to flag /IRQ as a wake source; there are a number of
+ * unconditional wake sources in the PMIC so this isn't
+ * conditional but we don't actually care *too* much if it
+ * fails.
+ */
+ ret = enable_irq_wake(irq);
+ if (ret != 0) {
+ dev_warn(wm831x->dev,
+ "Can't enable IRQ as wake source: %d\n",
+ ret);
+ }
+
ret = request_threaded_irq(irq, NULL, wm831x_irq_thread,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"wm831x", wm831x);
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index dd1caaac55e4..8a9b11ca076a 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -20,6 +20,7 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/regmap.h>
#include <linux/workqueue.h>
#include <linux/mfd/wm8350/core.h>
@@ -74,7 +75,7 @@ static int wm8350_phys_read(struct wm8350 *wm8350, u8 reg, int num_regs,
int bytes = num_regs * 2;
dev_dbg(wm8350->dev, "volatile read\n");
- ret = wm8350->read_dev(wm8350, reg, bytes, (char *)dest);
+ ret = regmap_raw_read(wm8350->regmap, reg, dest, bytes);
for (i = reg; i < reg + num_regs; i++) {
/* Cache is CPU endian */
@@ -96,9 +97,6 @@ static int wm8350_read(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *dest)
int ret = 0;
int bytes = num_regs * 2;
- if (wm8350->read_dev == NULL)
- return -ENODEV;
-
if ((reg + num_regs - 1) > WM8350_MAX_REGISTER) {
dev_err(wm8350->dev, "invalid reg %x\n",
reg + num_regs - 1);
@@ -149,9 +147,6 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
int end = reg + num_regs;
int bytes = num_regs * 2;
- if (wm8350->write_dev == NULL)
- return -ENODEV;
-
if ((reg + num_regs - 1) > WM8350_MAX_REGISTER) {
dev_err(wm8350->dev, "invalid reg %x\n",
reg + num_regs - 1);
@@ -182,7 +177,7 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
}
/* Actually write it out */
- return wm8350->write_dev(wm8350, reg, bytes, (char *)src);
+ return regmap_raw_write(wm8350->regmap, reg, src, bytes);
}
/*
@@ -515,9 +510,8 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int type, int mode)
* a PMIC so the device many not be in a virgin state and we
* can't rely on the silicon values.
*/
- ret = wm8350->read_dev(wm8350, 0,
- sizeof(u16) * (WM8350_MAX_REGISTER + 1),
- wm8350->reg_cache);
+ ret = regmap_raw_read(wm8350->regmap, 0, wm8350->reg_cache,
+ sizeof(u16) * (WM8350_MAX_REGISTER + 1));
if (ret < 0) {
dev_err(wm8350->dev,
"failed to read initial cache values\n");
@@ -570,35 +564,30 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
struct wm8350_platform_data *pdata)
{
int ret;
- u16 id1, id2, mask_rev;
- u16 cust_id, mode, chip_rev;
+ unsigned int id1, id2, mask_rev;
+ unsigned int cust_id, mode, chip_rev;
dev_set_drvdata(wm8350->dev, wm8350);
/* get WM8350 revision and config mode */
- ret = wm8350->read_dev(wm8350, WM8350_RESET_ID, sizeof(id1), &id1);
+ ret = regmap_read(wm8350->regmap, WM8350_RESET_ID, &id1);
if (ret != 0) {
dev_err(wm8350->dev, "Failed to read ID: %d\n", ret);
goto err;
}
- ret = wm8350->read_dev(wm8350, WM8350_ID, sizeof(id2), &id2);
+ ret = regmap_read(wm8350->regmap, WM8350_ID, &id2);
if (ret != 0) {
dev_err(wm8350->dev, "Failed to read ID: %d\n", ret);
goto err;
}
- ret = wm8350->read_dev(wm8350, WM8350_REVISION, sizeof(mask_rev),
- &mask_rev);
+ ret = regmap_read(wm8350->regmap, WM8350_REVISION, &mask_rev);
if (ret != 0) {
dev_err(wm8350->dev, "Failed to read revision: %d\n", ret);
goto err;
}
- id1 = be16_to_cpu(id1);
- id2 = be16_to_cpu(id2);
- mask_rev = be16_to_cpu(mask_rev);
-
if (id1 != 0x6143) {
dev_err(wm8350->dev,
"Device with ID %x is not a WM8350\n", id1);
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index d955faaf27c4..a68aceb4e48c 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -15,47 +15,18 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/mfd/wm8350/core.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
-static int wm8350_i2c_read_device(struct wm8350 *wm8350, char reg,
- int bytes, void *dest)
-{
- int ret;
-
- ret = i2c_master_send(wm8350->i2c_client, &reg, 1);
- if (ret < 0)
- return ret;
- ret = i2c_master_recv(wm8350->i2c_client, dest, bytes);
- if (ret < 0)
- return ret;
- if (ret != bytes)
- return -EIO;
- return 0;
-}
-
-static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
- int bytes, void *src)
-{
- /* we add 1 byte for device register */
- u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
- int ret;
-
- if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
- return -EINVAL;
-
- msg[0] = reg;
- memcpy(&msg[1], src, bytes);
- ret = i2c_master_send(wm8350->i2c_client, msg, bytes + 1);
- if (ret < 0)
- return ret;
- if (ret != bytes + 1)
- return -EIO;
- return 0;
-}
+static const struct regmap_config wm8350_regmap = {
+ .reg_bits = 8,
+ .val_bits = 16,
+};
static int wm8350_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
@@ -67,20 +38,18 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
if (wm8350 == NULL)
return -ENOMEM;
+ wm8350->regmap = devm_regmap_init_i2c(i2c, &wm8350_regmap);
+ if (IS_ERR(wm8350->regmap)) {
+ ret = PTR_ERR(wm8350->regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
i2c_set_clientdata(i2c, wm8350);
wm8350->dev = &i2c->dev;
- wm8350->i2c_client = i2c;
- wm8350->read_dev = wm8350_i2c_read_device;
- wm8350->write_dev = wm8350_i2c_write_device;
-
- ret = wm8350_device_init(wm8350, i2c->irq, i2c->dev.platform_data);
- if (ret < 0)
- goto err;
-
- return ret;
-err:
- return ret;
+ return wm8350_device_init(wm8350, i2c->irq, i2c->dev.platform_data);
}
static int wm8350_i2c_remove(struct i2c_client *i2c)
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 1189a17f0f25..4b7d378551d5 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -23,136 +23,16 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-static struct {
- u16 readable; /* Mask of readable bits */
- u16 writable; /* Mask of writable bits */
- u16 vol; /* Mask of volatile bits */
- int is_codec; /* Register controlled by codec reset */
- u16 default_val; /* Value on reset */
-} reg_data[] = {
- { 0xFFFF, 0xFFFF, 0x0000, 0, 0x6172 }, /* R0 */
- { 0x7000, 0x0000, 0x8000, 0, 0x0000 }, /* R1 */
- { 0xFF17, 0xFF17, 0x0000, 0, 0x0000 }, /* R2 */
- { 0xEBF3, 0xEBF3, 0x0000, 1, 0x6000 }, /* R3 */
- { 0x3CF3, 0x3CF3, 0x0000, 1, 0x0000 }, /* R4 */
- { 0xF1F8, 0xF1F8, 0x0000, 1, 0x4050 }, /* R5 */
- { 0xFC1F, 0xFC1F, 0x0000, 1, 0x4000 }, /* R6 */
- { 0xDFDE, 0xDFDE, 0x0000, 1, 0x01C8 }, /* R7 */
- { 0xFCFC, 0xFCFC, 0x0000, 1, 0x0000 }, /* R8 */
- { 0xEFFF, 0xEFFF, 0x0000, 1, 0x0040 }, /* R9 */
- { 0xEFFF, 0xEFFF, 0x0000, 1, 0x0040 }, /* R10 */
- { 0x27F7, 0x27F7, 0x0000, 1, 0x0004 }, /* R11 */
- { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R12 */
- { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R13 */
- { 0x1FEF, 0x1FEF, 0x0000, 1, 0x0000 }, /* R14 */
- { 0x0163, 0x0163, 0x0000, 1, 0x0100 }, /* R15 */
- { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R16 */
- { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R17 */
- { 0x1FFF, 0x0FFF, 0x0000, 1, 0x0000 }, /* R18 */
- { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1000 }, /* R19 */
- { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1010 }, /* R20 */
- { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1010 }, /* R21 */
- { 0x0FDD, 0x0FDD, 0x0000, 1, 0x8000 }, /* R22 */
- { 0x1FFF, 0x1FFF, 0x0000, 1, 0x0800 }, /* R23 */
- { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R24 */
- { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R25 */
- { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R26 */
- { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R27 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R28 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R29 */
- { 0x0000, 0x0077, 0x0000, 1, 0x0066 }, /* R30 */
- { 0x0000, 0x0033, 0x0000, 1, 0x0022 }, /* R31 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0079 }, /* R32 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0079 }, /* R33 */
- { 0x0000, 0x0003, 0x0000, 1, 0x0003 }, /* R34 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0003 }, /* R35 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R36 */
- { 0x0000, 0x003F, 0x0000, 1, 0x0100 }, /* R37 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R38 */
- { 0x0000, 0x000F, 0x0000, 0, 0x0000 }, /* R39 */
- { 0x0000, 0x00FF, 0x0000, 1, 0x0000 }, /* R40 */
- { 0x0000, 0x01B7, 0x0000, 1, 0x0000 }, /* R41 */
- { 0x0000, 0x01B7, 0x0000, 1, 0x0000 }, /* R42 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R43 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R44 */
- { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R45 */
- { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R46 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R47 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R48 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R49 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R50 */
- { 0x0000, 0x01B3, 0x0000, 1, 0x0180 }, /* R51 */
- { 0x0000, 0x0077, 0x0000, 1, 0x0000 }, /* R52 */
- { 0x0000, 0x0077, 0x0000, 1, 0x0000 }, /* R53 */
- { 0x0000, 0x00FF, 0x0000, 1, 0x0000 }, /* R54 */
- { 0x0000, 0x0001, 0x0000, 1, 0x0000 }, /* R55 */
- { 0x0000, 0x003F, 0x0000, 1, 0x0000 }, /* R56 */
- { 0x0000, 0x004F, 0x0000, 1, 0x0000 }, /* R57 */
- { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R58 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R59 */
- { 0x1FFF, 0x1FFF, 0x0000, 1, 0x0000 }, /* R60 */
- { 0xFFFF, 0xFFFF, 0x0000, 1, 0x0000 }, /* R61 */
- { 0x03FF, 0x03FF, 0x0000, 1, 0x0000 }, /* R62 */
- { 0x007F, 0x007F, 0x0000, 1, 0x0000 }, /* R63 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R64 */
- { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R65 */
- { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R66 */
- { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R67 */
- { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R68 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R69 */
- { 0xFFFF, 0xFFFF, 0x0000, 0, 0x4400 }, /* R70 */
- { 0x23FF, 0x23FF, 0x0000, 0, 0x0000 }, /* R71 */
- { 0xFFFF, 0xFFFF, 0x0000, 0, 0x4400 }, /* R72 */
- { 0x23FF, 0x23FF, 0x0000, 0, 0x0000 }, /* R73 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R74 */
- { 0x000E, 0x000E, 0x0000, 0, 0x0008 }, /* R75 */
- { 0xE00F, 0xE00F, 0x0000, 0, 0x0000 }, /* R76 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R77 */
- { 0x03C0, 0x03C0, 0x0000, 0, 0x02C0 }, /* R78 */
- { 0xFFFF, 0x0000, 0xffff, 0, 0x0000 }, /* R79 */
- { 0xFFFF, 0xFFFF, 0x0000, 0, 0x0000 }, /* R80 */
- { 0xFFFF, 0x0000, 0xffff, 0, 0x0000 }, /* R81 */
- { 0x2BFF, 0x0000, 0xffff, 0, 0x0000 }, /* R82 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R83 */
- { 0x80FF, 0x80FF, 0x0000, 0, 0x00ff }, /* R84 */
-};
-
-static int wm8400_read(struct wm8400 *wm8400, u8 reg, int num_regs, u16 *dest)
+static bool wm8400_volatile(struct device *dev, unsigned int reg)
{
- int i, ret = 0;
-
- BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
-
- /* If there are any volatile reads then read back the entire block */
- for (i = reg; i < reg + num_regs; i++)
- if (reg_data[i].vol) {
- ret = regmap_bulk_read(wm8400->regmap, reg, dest,
- num_regs);
- return ret;
- }
-
- /* Otherwise use the cache */
- memcpy(dest, &wm8400->reg_cache[reg], num_regs * sizeof(u16));
-
- return 0;
-}
-
-static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs,
- u16 *src)
-{
- int ret, i;
-
- BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
-
- for (i = 0; i < num_regs; i++) {
- BUG_ON(!reg_data[reg + i].writable);
- wm8400->reg_cache[reg + i] = src[i];
- ret = regmap_write(wm8400->regmap, reg, src[i]);
- if (ret != 0)
- return ret;
+ switch (reg) {
+ case WM8400_INTERRUPT_STATUS_1:
+ case WM8400_INTERRUPT_LEVELS:
+ case WM8400_SHUTDOWN_REASON:
+ return true;
+ default:
+ return false;
}
-
- return 0;
}
/**
@@ -165,13 +45,12 @@ static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs,
*/
u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg)
{
- u16 val;
-
- mutex_lock(&wm8400->io_lock);
-
- wm8400_read(wm8400, reg, 1, &val);
+ unsigned int val;
+ int ret;
- mutex_unlock(&wm8400->io_lock);
+ ret = regmap_read(wm8400->regmap, reg, &val);
+ if (ret < 0)
+ return ret;
return val;
}
@@ -179,63 +58,10 @@ EXPORT_SYMBOL_GPL(wm8400_reg_read);
int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data)
{
- int ret;
-
- mutex_lock(&wm8400->io_lock);
-
- ret = wm8400_read(wm8400, reg, count, data);
-
- mutex_unlock(&wm8400->io_lock);
-
- return ret;
+ return regmap_bulk_read(wm8400->regmap, reg, data, count);
}
EXPORT_SYMBOL_GPL(wm8400_block_read);
-/**
- * wm8400_set_bits - Bitmask write
- *
- * @wm8400: Pointer to wm8400 control structure
- * @reg: Register to access
- * @mask: Mask of bits to change
- * @val: Value to set for masked bits
- */
-int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, u16 mask, u16 val)
-{
- u16 tmp;
- int ret;
-
- mutex_lock(&wm8400->io_lock);
-
- ret = wm8400_read(wm8400, reg, 1, &tmp);
- tmp = (tmp & ~mask) | val;
- if (ret == 0)
- ret = wm8400_write(wm8400, reg, 1, &tmp);
-
- mutex_unlock(&wm8400->io_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(wm8400_set_bits);
-
-/**
- * wm8400_reset_codec_reg_cache - Reset cached codec registers to
- * their default values.
- */
-void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400)
-{
- int i;
-
- mutex_lock(&wm8400->io_lock);
-
- /* Reset all codec registers to their initial value */
- for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
- if (reg_data[i].is_codec)
- wm8400->reg_cache[i] = reg_data[i].default_val;
-
- mutex_unlock(&wm8400->io_lock);
-}
-EXPORT_SYMBOL_GPL(wm8400_reset_codec_reg_cache);
-
static int wm8400_register_codec(struct wm8400 *wm8400)
{
struct mfd_cell cell = {
@@ -257,44 +83,24 @@ static int wm8400_register_codec(struct wm8400 *wm8400)
static int wm8400_init(struct wm8400 *wm8400,
struct wm8400_platform_data *pdata)
{
- u16 reg;
- int ret, i;
-
- mutex_init(&wm8400->io_lock);
+ unsigned int reg;
+ int ret;
dev_set_drvdata(wm8400->dev, wm8400);
/* Check that this is actually a WM8400 */
- ret = regmap_read(wm8400->regmap, WM8400_RESET_ID, &i);
+ ret = regmap_read(wm8400->regmap, WM8400_RESET_ID, &reg);
if (ret != 0) {
dev_err(wm8400->dev, "Chip ID register read failed\n");
return -EIO;
}
- if (i != reg_data[WM8400_RESET_ID].default_val) {
- dev_err(wm8400->dev, "Device is not a WM8400, ID is %x\n", i);
+ if (reg != 0x6172) {
+ dev_err(wm8400->dev, "Device is not a WM8400, ID is %x\n",
+ reg);
return -ENODEV;
}
- /* We don't know what state the hardware is in and since this
- * is a PMIC we can't reset it safely so initialise the register
- * cache from the hardware.
- */
- ret = regmap_raw_read(wm8400->regmap, 0, wm8400->reg_cache,
- ARRAY_SIZE(wm8400->reg_cache));
- if (ret != 0) {
- dev_err(wm8400->dev, "Register cache read failed\n");
- return -EIO;
- }
- for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
- wm8400->reg_cache[i] = be16_to_cpu(wm8400->reg_cache[i]);
-
- /* If the codec is in reset use hard coded values */
- if (!(wm8400->reg_cache[WM8400_POWER_MANAGEMENT_1] & WM8400_CODEC_ENA))
- for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
- if (reg_data[i].is_codec)
- wm8400->reg_cache[i] = reg_data[i].default_val;
-
- ret = wm8400_read(wm8400, WM8400_ID, 1, &reg);
+ ret = regmap_read(wm8400->regmap, WM8400_ID, &reg);
if (ret != 0) {
dev_err(wm8400->dev, "ID register read failed: %d\n", ret);
return ret;
@@ -334,8 +140,22 @@ static const struct regmap_config wm8400_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
.max_register = WM8400_REGISTER_COUNT - 1,
+
+ .volatile_reg = wm8400_volatile,
+
+ .cache_type = REGCACHE_RBTREE,
};
+/**
+ * wm8400_reset_codec_reg_cache - Reset cached codec registers to
+ * their default values.
+ */
+void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400)
+{
+ regmap_reinit_cache(wm8400->regmap, &wm8400_regmap_config);
+}
+EXPORT_SYMBOL_GPL(wm8400_reset_codec_reg_cache);
+
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static int wm8400_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 9d7ca1e978fa..1e321d349777 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -500,7 +500,8 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
ret);
goto err_enable;
}
- wm8994->revision = ret;
+ wm8994->revision = ret & WM8994_CHIP_REV_MASK;
+ wm8994->cust_id = (ret & WM8994_CUST_ID_MASK) >> WM8994_CUST_ID_SHIFT;
switch (wm8994->type) {
case WM8994:
@@ -553,8 +554,8 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
break;
}
- dev_info(wm8994->dev, "%s revision %c\n", devname,
- 'A' + wm8994->revision);
+ dev_info(wm8994->dev, "%s revision %c CUST_ID %02x\n", devname,
+ 'A' + wm8994->revision, wm8994->cust_id);
switch (wm8994->type) {
case WM1811:
@@ -732,23 +733,7 @@ static struct i2c_driver wm8994_i2c_driver = {
.id_table = wm8994_i2c_id,
};
-static int __init wm8994_i2c_init(void)
-{
- int ret;
-
- ret = i2c_add_driver(&wm8994_i2c_driver);
- if (ret != 0)
- pr_err("Failed to register wm8994 I2C driver: %d\n", ret);
-
- return ret;
-}
-module_init(wm8994_i2c_init);
-
-static void __exit wm8994_i2c_exit(void)
-{
- i2c_del_driver(&wm8994_i2c_driver);
-}
-module_exit(wm8994_i2c_exit);
+module_i2c_driver(wm8994_i2c_driver);
MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/wm8994-regmap.c b/drivers/mfd/wm8994-regmap.c
index bfd25af6ecb1..52e9e2944940 100644
--- a/drivers/mfd/wm8994-regmap.c
+++ b/drivers/mfd/wm8994-regmap.c
@@ -1122,7 +1122,6 @@ static bool wm8994_volatile_register(struct device *dev, unsigned int reg)
case WM8994_RATE_STATUS:
case WM8958_MIC_DETECT_3:
case WM8994_DC_SERVO_4E:
- case WM8994_CHIP_REVISION:
case WM8994_INTERRUPT_STATUS_1:
case WM8994_INTERRUPT_STATUS_2:
return true;
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index d7a9aa14e5d5..042a8fe4efaa 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -142,10 +142,16 @@ static int __devexit ab8500_pwm_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ab8500_pwm_match[] = {
+ { .compatible = "stericsson,ab8500-pwm", },
+ {}
+};
+
static struct platform_driver ab8500_pwm_driver = {
.driver = {
.name = "ab8500-pwm",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_pwm_match,
},
.probe = ab8500_pwm_probe,
.remove = __devexit_p(ab8500_pwm_remove),
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index dabec556ebb8..dd2d374dcc7a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -384,7 +384,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
md = mmc_blk_get(bdev->bd_disk);
if (!md) {
err = -EINVAL;
- goto cmd_done;
+ goto cmd_err;
}
card = md->queue.card;
@@ -483,6 +483,7 @@ cmd_rel_host:
cmd_done:
mmc_blk_put(md);
+cmd_err:
kfree(idata->buf);
kfree(idata);
return err;
@@ -1283,7 +1284,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
int ret = 1, disable_multi = 0, retry = 0, type;
enum mmc_blk_status status;
struct mmc_queue_req *mq_rq;
- struct request *req;
+ struct request *req = rqc;
struct mmc_async_req *areq;
if (!rqc && !mq->mqrq_prev->req)
@@ -1291,6 +1292,16 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
do {
if (rqc) {
+ /*
+ * When 4KB native sector is enabled, only 8 blocks
+ * multiple read or write is allowed
+ */
+ if ((brq->data.blocks & 0x07) &&
+ (card->ext_csd.data_sector_size == 4096)) {
+ pr_err("%s: Transfer size is not 4KB sector size aligned\n",
+ req->rq_disk->disk_name);
+ goto cmd_abort;
+ }
mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
areq = &mq->mqrq_cur->mmc_active;
} else
@@ -1538,7 +1549,12 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
"mmcblk%d%s", md->name_idx, subname ? subname : "");
- blk_queue_logical_block_size(md->queue.queue, 512);
+ if (mmc_card_mmc(card))
+ blk_queue_logical_block_size(md->queue.queue,
+ card->ext_csd.data_sector_size);
+ else
+ blk_queue_logical_block_size(md->queue.queue, 512);
+
set_capacity(md->disk, size);
if (mmc_host_cmd23(card->host)) {
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 996f8e36e23d..e360a979857d 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -96,7 +96,7 @@ static int mmc_queue_thread(void *d)
* on any queue on this host, and attempt to issue it. This may
* not be the queue we were asked to process.
*/
-static void mmc_request(struct request_queue *q)
+static void mmc_request_fn(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
@@ -171,12 +171,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
limit = *mmc_dev(host)->dma_mask;
mq->card = card;
- mq->queue = blk_init_queue(mmc_request, lock);
+ mq->queue = blk_init_queue(mmc_request_fn, lock);
if (!mq->queue)
return -ENOMEM;
- memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
- memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
mq->mqrq_cur = mqrq_cur;
mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index c60cee92a2b2..9b68933f27e7 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -122,6 +122,7 @@ static int mmc_bus_remove(struct device *dev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int mmc_bus_suspend(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
@@ -143,6 +144,7 @@ static int mmc_bus_resume(struct device *dev)
ret = drv->resume(card);
return ret;
}
+#endif
#ifdef CONFIG_PM_RUNTIME
diff --git a/drivers/mmc/core/cd-gpio.c b/drivers/mmc/core/cd-gpio.c
index 2c14be73254c..f13e38deceac 100644
--- a/drivers/mmc/core/cd-gpio.c
+++ b/drivers/mmc/core/cd-gpio.c
@@ -73,6 +73,9 @@ void mmc_cd_gpio_free(struct mmc_host *host)
{
struct mmc_cd_gpio *cd = host->hotplug.handler_priv;
+ if (!cd)
+ return;
+
free_irq(host->hotplug.irq, host);
gpio_free(cd->gpio);
kfree(cd);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index ba821fe70bca..0b6141d29dbd 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -42,6 +42,7 @@
#include "sdio_ops.h"
static struct workqueue_struct *workqueue;
+static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
/*
* Enabling software CRCs on the data blocks can be a significant (30%)
@@ -1157,6 +1158,9 @@ static void mmc_power_up(struct mmc_host *host)
{
int bit;
+ if (host->ios.power_mode == MMC_POWER_ON)
+ return;
+
mmc_host_clk_hold(host);
/* If ocr is set, we use it */
@@ -1199,6 +1203,10 @@ static void mmc_power_up(struct mmc_host *host)
void mmc_power_off(struct mmc_host *host)
{
int err = 0;
+
+ if (host->ios.power_mode == MMC_POWER_OFF)
+ return;
+
mmc_host_clk_hold(host);
host->ios.clock = 0;
@@ -2005,7 +2013,6 @@ EXPORT_SYMBOL(mmc_detect_card_removed);
void mmc_rescan(struct work_struct *work)
{
- static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
int i;
@@ -2044,8 +2051,12 @@ void mmc_rescan(struct work_struct *work)
*/
mmc_bus_put(host);
- if (host->ops->get_cd && host->ops->get_cd(host) == 0)
+ if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
+ mmc_claim_host(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
goto out;
+ }
mmc_claim_host(host);
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
@@ -2063,7 +2074,8 @@ void mmc_rescan(struct work_struct *work)
void mmc_start_host(struct mmc_host *host)
{
- mmc_power_off(host);
+ host->f_init = max(freqs[0], host->f_min);
+ mmc_power_up(host);
mmc_detect_change(host, 0);
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 54df5adc0413..2d4a4b746750 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -235,6 +235,36 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
return err;
}
+static void mmc_select_card_type(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK;
+ unsigned int caps = host->caps, caps2 = host->caps2;
+ unsigned int hs_max_dtr = 0;
+
+ if (card_type & EXT_CSD_CARD_TYPE_26)
+ hs_max_dtr = MMC_HIGH_26_MAX_DTR;
+
+ if (caps & MMC_CAP_MMC_HIGHSPEED &&
+ card_type & EXT_CSD_CARD_TYPE_52)
+ hs_max_dtr = MMC_HIGH_52_MAX_DTR;
+
+ if ((caps & MMC_CAP_1_8V_DDR &&
+ card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) ||
+ (caps & MMC_CAP_1_2V_DDR &&
+ card_type & EXT_CSD_CARD_TYPE_DDR_1_2V))
+ hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
+
+ if ((caps2 & MMC_CAP2_HS200_1_8V_SDR &&
+ card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) ||
+ (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
+ card_type & EXT_CSD_CARD_TYPE_SDR_1_2V))
+ hs_max_dtr = MMC_HS200_MAX_DTR;
+
+ card->ext_csd.hs_max_dtr = hs_max_dtr;
+ card->ext_csd.card_type = card_type;
+}
+
/*
* Decode extended CSD.
*/
@@ -284,56 +314,9 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
mmc_card_set_blockaddr(card);
}
+
card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
- switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
- case EXT_CSD_CARD_TYPE_SDR_ALL:
- case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V:
- case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V:
- case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52:
- card->ext_csd.hs_max_dtr = 200000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_200;
- break;
- case EXT_CSD_CARD_TYPE_SDR_1_2V_ALL:
- case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V:
- case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V:
- case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52:
- card->ext_csd.hs_max_dtr = 200000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_2V;
- break;
- case EXT_CSD_CARD_TYPE_SDR_1_8V_ALL:
- case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V:
- case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V:
- case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52:
- card->ext_csd.hs_max_dtr = 200000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_8V;
- break;
- case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
- EXT_CSD_CARD_TYPE_26:
- card->ext_csd.hs_max_dtr = 52000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52;
- break;
- case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 |
- EXT_CSD_CARD_TYPE_26:
- card->ext_csd.hs_max_dtr = 52000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V;
- break;
- case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 |
- EXT_CSD_CARD_TYPE_26:
- card->ext_csd.hs_max_dtr = 52000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V;
- break;
- case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
- card->ext_csd.hs_max_dtr = 52000000;
- break;
- case EXT_CSD_CARD_TYPE_26:
- card->ext_csd.hs_max_dtr = 26000000;
- break;
- default:
- /* MMC v4 spec says this cannot happen */
- pr_warning("%s: card is mmc v4 but doesn't "
- "support any high-speed modes.\n",
- mmc_hostname(card->host));
- }
+ mmc_select_card_type(card);
card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
card->ext_csd.raw_erase_timeout_mult =
@@ -533,6 +516,8 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
} else {
card->ext_csd.data_tag_unit_size = 0;
}
+ } else {
+ card->ext_csd.data_sector_size = 512;
}
out:
@@ -556,14 +541,10 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
err = mmc_get_ext_csd(card, &bw_ext_csd);
if (err || bw_ext_csd == NULL) {
- if (bus_width != MMC_BUS_WIDTH_1)
- err = -EINVAL;
+ err = -EINVAL;
goto out;
}
- if (bus_width == MMC_BUS_WIDTH_1)
- goto out;
-
/* only compare read only fields */
err = !((card->ext_csd.raw_partition_support ==
bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
@@ -736,6 +717,10 @@ static int mmc_select_powerclass(struct mmc_card *card,
card->ext_csd.generic_cmd6_time);
}
+ if (err)
+ pr_err("%s: power class selection for ext_csd_bus_width %d"
+ " failed\n", mmc_hostname(card->host), bus_width);
+
return err;
}
@@ -745,7 +730,7 @@ static int mmc_select_powerclass(struct mmc_card *card,
*/
static int mmc_select_hs200(struct mmc_card *card)
{
- int idx, err = 0;
+ int idx, err = -EINVAL;
struct mmc_host *host;
static unsigned ext_csd_bits[] = {
EXT_CSD_BUS_WIDTH_4,
@@ -761,10 +746,12 @@ static int mmc_select_hs200(struct mmc_card *card)
host = card->host;
if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
- host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
- if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0))
- err = mmc_set_signal_voltage(host,
- MMC_SIGNAL_VOLTAGE_180, 0);
+ host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0);
+
+ if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
+ host->caps2 & MMC_CAP2_HS200_1_8V_SDR)
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 0);
/* If fails try again during next card power cycle */
if (err)
@@ -1117,9 +1104,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
if (err)
- pr_warning("%s: power class selection to bus width %d"
- " failed\n", mmc_hostname(card->host),
- 1 << bus_width);
+ goto err;
}
/*
@@ -1151,10 +1136,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
ext_csd);
if (err)
- pr_warning("%s: power class selection to "
- "bus width %d failed\n",
- mmc_hostname(card->host),
- 1 << bus_width);
+ goto err;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
@@ -1182,10 +1164,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
ext_csd);
if (err)
- pr_warning("%s: power class selection to "
- "bus width %d ddr %d failed\n",
- mmc_hostname(card->host),
- 1 << bus_width, ddr);
+ goto err;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 2c7c83f832d2..13d0e95380ab 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -947,7 +947,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
}
if (!err && host->sdio_irqs)
- mmc_signal_sdio_irq(host);
+ wake_up_process(host->sdio_irq_thread);
mmc_release_host(host);
/*
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index f573e7f9f740..3d8ceb4084de 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -28,18 +28,20 @@
#include "sdio_ops.h"
-static int process_sdio_pending_irqs(struct mmc_card *card)
+static int process_sdio_pending_irqs(struct mmc_host *host)
{
+ struct mmc_card *card = host->card;
int i, ret, count;
unsigned char pending;
struct sdio_func *func;
/*
* Optimization, if there is only 1 function interrupt registered
- * call irq handler directly
+ * and we know an IRQ was signaled then call irq handler directly.
+ * Otherwise do the full probe.
*/
func = card->sdio_single_irq;
- if (func) {
+ if (func && host->sdio_irq_pending) {
func->irq_handler(func);
return 1;
}
@@ -116,7 +118,8 @@ static int sdio_irq_thread(void *_host)
ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
if (ret)
break;
- ret = process_sdio_pending_irqs(host->card);
+ ret = process_sdio_pending_irqs(host);
+ host->sdio_irq_pending = false;
mmc_release_host(host);
/*
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2bc06e7344db..aa131b32e3b2 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -278,10 +278,13 @@ choice
Choose which driver to use for the Atmel MCI Silicon
config MMC_AT91
- tristate "AT91 SD/MMC Card Interface support"
+ tristate "AT91 SD/MMC Card Interface support (DEPRECATED)"
depends on ARCH_AT91
help
- This selects the AT91 MCI controller.
+ This selects the AT91 MCI controller. This driver will
+ be removed soon (for more information have a look to
+ Documentation/feature-removal-schedule.txt). Please use
+ MMC_ATMEL_MCI.
If unsure, say N.
@@ -307,16 +310,6 @@ config MMC_ATMELMCI_DMA
If unsure, say N.
-config MMC_IMX
- tristate "Motorola i.MX Multimedia Card Interface support"
- depends on ARCH_MX1
- help
- This selects the Motorola i.MX Multimedia card Interface.
- If you have a i.MX platform with a Multimedia Card slot,
- say Y or M here.
-
- If unsure, say N.
-
config MMC_MSM
tristate "Qualcomm SDCC Controller Support"
depends on MMC && ARCH_MSM
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 3e7e26d08073..8922b06be925 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -4,7 +4,6 @@
obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
obj-$(CONFIG_MMC_PXA) += pxamci.o
-obj-$(CONFIG_MMC_IMX) += imxmmc.o
obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index e94476beca18..420aca642b14 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -45,19 +45,19 @@
#define ATMCI_DMA_THRESHOLD 16
enum {
- EVENT_CMD_COMPLETE = 0,
+ EVENT_CMD_RDY = 0,
EVENT_XFER_COMPLETE,
- EVENT_DATA_COMPLETE,
+ EVENT_NOTBUSY,
EVENT_DATA_ERROR,
};
enum atmel_mci_state {
STATE_IDLE = 0,
STATE_SENDING_CMD,
- STATE_SENDING_DATA,
- STATE_DATA_BUSY,
+ STATE_DATA_XFER,
+ STATE_WAITING_NOTBUSY,
STATE_SENDING_STOP,
- STATE_DATA_ERROR,
+ STATE_END_REQUEST,
};
enum atmci_xfer_dir {
@@ -78,6 +78,9 @@ struct atmel_mci_caps {
bool has_highspeed;
bool has_rwproof;
bool has_odd_clk_div;
+ bool has_bad_data_ordering;
+ bool need_reset_after_xfer;
+ bool need_blksz_mul_4;
};
struct atmel_mci_dma {
@@ -91,6 +94,11 @@ struct atmel_mci_dma {
* @regs: Pointer to MMIO registers.
* @sg: Scatterlist entry currently being processed by PIO or PDC code.
* @pio_offset: Offset into the current scatterlist entry.
+ * @buffer: Buffer used if we don't have the r/w proof capability. We
+ * don't have the time to switch pdc buffers so we have to use only
+ * one buffer for the full transaction.
+ * @buf_size: size of the buffer.
+ * @phys_buf_addr: buffer address needed for pdc.
* @cur_slot: The slot which is currently using the controller.
* @mrq: The request currently being processed on @cur_slot,
* or NULL if the controller is idle.
@@ -116,6 +124,7 @@ struct atmel_mci_dma {
* @queue: List of slots waiting for access to the controller.
* @need_clock_update: Update the clock rate before the next request.
* @need_reset: Reset controller before next request.
+ * @timer: Timer to balance the data timeout error flag which cannot rise.
* @mode_reg: Value of the MR register.
* @cfg_reg: Value of the CFG register.
* @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
@@ -166,6 +175,9 @@ struct atmel_mci {
struct scatterlist *sg;
unsigned int pio_offset;
+ unsigned int *buffer;
+ unsigned int buf_size;
+ dma_addr_t buf_phys_addr;
struct atmel_mci_slot *cur_slot;
struct mmc_request *mrq;
@@ -189,6 +201,7 @@ struct atmel_mci {
bool need_clock_update;
bool need_reset;
+ struct timer_list timer;
u32 mode_reg;
u32 cfg_reg;
unsigned long bus_hz;
@@ -480,6 +493,32 @@ err:
dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
}
+static inline unsigned int atmci_get_version(struct atmel_mci *host)
+{
+ return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
+}
+
+static void atmci_timeout_timer(unsigned long data)
+{
+ struct atmel_mci *host;
+
+ host = (struct atmel_mci *)data;
+
+ dev_dbg(&host->pdev->dev, "software timeout\n");
+
+ if (host->mrq->cmd->data) {
+ host->mrq->cmd->data->error = -ETIMEDOUT;
+ host->data = NULL;
+ } else {
+ host->mrq->cmd->error = -ETIMEDOUT;
+ host->cmd = NULL;
+ }
+ host->need_reset = 1;
+ host->state = STATE_END_REQUEST;
+ smp_wmb();
+ tasklet_schedule(&host->tasklet);
+}
+
static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
unsigned int ns)
{
@@ -591,6 +630,7 @@ static void atmci_send_command(struct atmel_mci *host,
static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
{
+ dev_dbg(&host->pdev->dev, "send stop command\n");
atmci_send_command(host, data->stop, host->stop_cmdr);
atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
}
@@ -603,6 +643,7 @@ static void atmci_pdc_set_single_buf(struct atmel_mci *host,
enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
{
u32 pointer_reg, counter_reg;
+ unsigned int buf_size;
if (dir == XFER_RECEIVE) {
pointer_reg = ATMEL_PDC_RPR;
@@ -617,8 +658,15 @@ static void atmci_pdc_set_single_buf(struct atmel_mci *host,
counter_reg += ATMEL_PDC_SCND_BUF_OFF;
}
- atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
- if (host->data_size <= sg_dma_len(host->sg)) {
+ if (!host->caps.has_rwproof) {
+ buf_size = host->buf_size;
+ atmci_writel(host, pointer_reg, host->buf_phys_addr);
+ } else {
+ buf_size = sg_dma_len(host->sg);
+ atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
+ }
+
+ if (host->data_size <= buf_size) {
if (host->data_size & 0x3) {
/* If size is different from modulo 4, transfer bytes */
atmci_writel(host, counter_reg, host->data_size);
@@ -670,7 +718,20 @@ static void atmci_pdc_cleanup(struct atmel_mci *host)
*/
static void atmci_pdc_complete(struct atmel_mci *host)
{
+ int transfer_size = host->data->blocks * host->data->blksz;
+ int i;
+
atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
+
+ if ((!host->caps.has_rwproof)
+ && (host->data->flags & MMC_DATA_READ)) {
+ if (host->caps.has_bad_data_ordering)
+ for (i = 0; i < transfer_size; i++)
+ host->buffer[i] = swab32(host->buffer[i]);
+ sg_copy_from_buffer(host->data->sg, host->data->sg_len,
+ host->buffer, transfer_size);
+ }
+
atmci_pdc_cleanup(host);
/*
@@ -678,9 +739,10 @@ static void atmci_pdc_complete(struct atmel_mci *host)
* to send the stop command or waiting for NBUSY in this case.
*/
if (host->data) {
+ dev_dbg(&host->pdev->dev,
+ "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
tasklet_schedule(&host->tasklet);
- atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
}
@@ -716,6 +778,8 @@ static void atmci_dma_complete(void *arg)
* to send the stop command or waiting for NBUSY in this case.
*/
if (data) {
+ dev_dbg(&host->pdev->dev,
+ "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
tasklet_schedule(&host->tasklet);
@@ -791,6 +855,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
u32 iflags, tmp;
unsigned int sg_len;
enum dma_data_direction dir;
+ int i;
data->error = -EINPROGRESS;
@@ -806,7 +871,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
} else {
dir = DMA_TO_DEVICE;
- iflags |= ATMCI_ENDTX | ATMCI_TXBUFE;
+ iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
}
/* Set BLKLEN */
@@ -818,6 +883,16 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
/* Configure PDC */
host->data_size = data->blocks * data->blksz;
sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
+
+ if ((!host->caps.has_rwproof)
+ && (host->data->flags & MMC_DATA_WRITE)) {
+ sg_copy_to_buffer(host->data->sg, host->data->sg_len,
+ host->buffer, host->data_size);
+ if (host->caps.has_bad_data_ordering)
+ for (i = 0; i < host->data_size; i++)
+ host->buffer[i] = swab32(host->buffer[i]);
+ }
+
if (host->data_size)
atmci_pdc_set_both_buf(host,
((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
@@ -931,6 +1006,8 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
static void atmci_stop_transfer(struct atmel_mci *host)
{
+ dev_dbg(&host->pdev->dev,
+ "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
@@ -940,8 +1017,7 @@ static void atmci_stop_transfer(struct atmel_mci *host)
*/
static void atmci_stop_transfer_pdc(struct atmel_mci *host)
{
- atmci_set_pending(host, EVENT_XFER_COMPLETE);
- atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
}
static void atmci_stop_transfer_dma(struct atmel_mci *host)
@@ -953,6 +1029,8 @@ static void atmci_stop_transfer_dma(struct atmel_mci *host)
atmci_dma_cleanup(host);
} else {
/* Data transfer was stopped by the interrupt handler */
+ dev_dbg(&host->pdev->dev,
+ "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
@@ -977,9 +1055,12 @@ static void atmci_start_request(struct atmel_mci *host,
host->pending_events = 0;
host->completed_events = 0;
+ host->cmd_status = 0;
host->data_status = 0;
- if (host->need_reset) {
+ dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
+
+ if (host->need_reset || host->caps.need_reset_after_xfer) {
iflags = atmci_readl(host, ATMCI_IMR);
iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
@@ -994,7 +1075,7 @@ static void atmci_start_request(struct atmel_mci *host,
iflags = atmci_readl(host, ATMCI_IMR);
if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
- dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
+ dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
iflags);
if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
@@ -1043,6 +1124,8 @@ static void atmci_start_request(struct atmel_mci *host,
* prepared yet.)
*/
atmci_writel(host, ATMCI_IER, iflags);
+
+ mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
}
static void atmci_queue_request(struct atmel_mci *host,
@@ -1057,6 +1140,7 @@ static void atmci_queue_request(struct atmel_mci *host,
host->state = STATE_SENDING_CMD;
atmci_start_request(host, slot);
} else {
+ dev_dbg(&host->pdev->dev, "queue request\n");
list_add_tail(&slot->queue_node, &host->queue);
}
spin_unlock_bh(&host->lock);
@@ -1069,6 +1153,7 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
struct mmc_data *data;
WARN_ON(slot->mrq);
+ dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
/*
* We may "know" the card is gone even though there's still an
@@ -1308,6 +1393,8 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
host->state = STATE_IDLE;
}
+ del_timer(&host->timer);
+
spin_unlock(&host->lock);
mmc_request_done(prev_mmc, mrq);
spin_lock(&host->lock);
@@ -1330,21 +1417,13 @@ static void atmci_command_complete(struct atmel_mci *host,
cmd->error = -EILSEQ;
else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
cmd->error = -EIO;
- else
- cmd->error = 0;
-
- if (cmd->error) {
- dev_dbg(&host->pdev->dev,
- "command error: status=0x%08x\n", status);
-
- if (cmd->data) {
- host->stop_transfer(host);
- host->data = NULL;
- atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY
- | ATMCI_TXRDY | ATMCI_RXRDY
- | ATMCI_DATA_ERROR_FLAGS);
+ else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
+ if (host->caps.need_blksz_mul_4) {
+ cmd->error = -EINVAL;
+ host->need_reset = 1;
}
- }
+ } else
+ cmd->error = 0;
}
static void atmci_detect_change(unsigned long data)
@@ -1407,23 +1486,21 @@ static void atmci_detect_change(unsigned long data)
break;
case STATE_SENDING_CMD:
mrq->cmd->error = -ENOMEDIUM;
- if (!mrq->data)
- break;
- /* fall through */
- case STATE_SENDING_DATA:
+ if (mrq->data)
+ host->stop_transfer(host);
+ break;
+ case STATE_DATA_XFER:
mrq->data->error = -ENOMEDIUM;
host->stop_transfer(host);
break;
- case STATE_DATA_BUSY:
- case STATE_DATA_ERROR:
- if (mrq->data->error == -EINPROGRESS)
- mrq->data->error = -ENOMEDIUM;
- if (!mrq->stop)
- break;
- /* fall through */
+ case STATE_WAITING_NOTBUSY:
+ mrq->data->error = -ENOMEDIUM;
+ break;
case STATE_SENDING_STOP:
mrq->stop->error = -ENOMEDIUM;
break;
+ case STATE_END_REQUEST:
+ break;
}
atmci_request_end(host, mrq);
@@ -1451,7 +1528,6 @@ static void atmci_tasklet_func(unsigned long priv)
struct atmel_mci *host = (struct atmel_mci *)priv;
struct mmc_request *mrq = host->mrq;
struct mmc_data *data = host->data;
- struct mmc_command *cmd = host->cmd;
enum atmel_mci_state state = host->state;
enum atmel_mci_state prev_state;
u32 status;
@@ -1467,107 +1543,186 @@ static void atmci_tasklet_func(unsigned long priv)
do {
prev_state = state;
+ dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
switch (state) {
case STATE_IDLE:
break;
case STATE_SENDING_CMD:
+ /*
+ * Command has been sent, we are waiting for command
+ * ready. Then we have three next states possible:
+ * END_REQUEST by default, WAITING_NOTBUSY if it's a
+ * command needing it or DATA_XFER if there is data.
+ */
+ dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
if (!atmci_test_and_clear_pending(host,
- EVENT_CMD_COMPLETE))
+ EVENT_CMD_RDY))
break;
+ dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
host->cmd = NULL;
- atmci_set_completed(host, EVENT_CMD_COMPLETE);
+ atmci_set_completed(host, EVENT_CMD_RDY);
atmci_command_complete(host, mrq->cmd);
- if (!mrq->data || cmd->error) {
- atmci_request_end(host, host->mrq);
- goto unlock;
- }
+ if (mrq->data) {
+ dev_dbg(&host->pdev->dev,
+ "command with data transfer");
+ /*
+ * If there is a command error don't start
+ * data transfer.
+ */
+ if (mrq->cmd->error) {
+ host->stop_transfer(host);
+ host->data = NULL;
+ atmci_writel(host, ATMCI_IDR,
+ ATMCI_TXRDY | ATMCI_RXRDY
+ | ATMCI_DATA_ERROR_FLAGS);
+ state = STATE_END_REQUEST;
+ } else
+ state = STATE_DATA_XFER;
+ } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
+ dev_dbg(&host->pdev->dev,
+ "command response need waiting notbusy");
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ state = STATE_WAITING_NOTBUSY;
+ } else
+ state = STATE_END_REQUEST;
- prev_state = state = STATE_SENDING_DATA;
- /* fall through */
+ break;
- case STATE_SENDING_DATA:
+ case STATE_DATA_XFER:
if (atmci_test_and_clear_pending(host,
EVENT_DATA_ERROR)) {
- host->stop_transfer(host);
- if (data->stop)
- atmci_send_stop_cmd(host, data);
- state = STATE_DATA_ERROR;
+ dev_dbg(&host->pdev->dev, "set completed data error\n");
+ atmci_set_completed(host, EVENT_DATA_ERROR);
+ state = STATE_END_REQUEST;
break;
}
+ /*
+ * A data transfer is in progress. The event expected
+ * to move to the next state depends of data transfer
+ * type (PDC or DMA). Once transfer done we can move
+ * to the next step which is WAITING_NOTBUSY in write
+ * case and directly SENDING_STOP in read case.
+ */
+ dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_XFER_COMPLETE))
break;
+ dev_dbg(&host->pdev->dev,
+ "(%s) set completed xfer complete\n",
+ __func__);
atmci_set_completed(host, EVENT_XFER_COMPLETE);
- prev_state = state = STATE_DATA_BUSY;
- /* fall through */
- case STATE_DATA_BUSY:
- if (!atmci_test_and_clear_pending(host,
- EVENT_DATA_COMPLETE))
- break;
-
- host->data = NULL;
- atmci_set_completed(host, EVENT_DATA_COMPLETE);
- status = host->data_status;
- if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) {
- if (status & ATMCI_DTOE) {
- dev_dbg(&host->pdev->dev,
- "data timeout error\n");
- data->error = -ETIMEDOUT;
- } else if (status & ATMCI_DCRCE) {
- dev_dbg(&host->pdev->dev,
- "data CRC error\n");
- data->error = -EILSEQ;
- } else {
- dev_dbg(&host->pdev->dev,
- "data FIFO error (status=%08x)\n",
- status);
- data->error = -EIO;
- }
+ if (host->data->flags & MMC_DATA_WRITE) {
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ state = STATE_WAITING_NOTBUSY;
+ } else if (host->mrq->stop) {
+ atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
+ atmci_send_stop_cmd(host, data);
+ state = STATE_SENDING_STOP;
} else {
+ host->data = NULL;
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
- atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS);
+ state = STATE_END_REQUEST;
}
+ break;
- if (!data->stop) {
- atmci_request_end(host, host->mrq);
- goto unlock;
- }
+ case STATE_WAITING_NOTBUSY:
+ /*
+ * We can be in the state for two reasons: a command
+ * requiring waiting not busy signal (stop command
+ * included) or a write operation. In the latest case,
+ * we need to send a stop command.
+ */
+ dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
+ if (!atmci_test_and_clear_pending(host,
+ EVENT_NOTBUSY))
+ break;
- prev_state = state = STATE_SENDING_STOP;
- if (!data->error)
- atmci_send_stop_cmd(host, data);
- /* fall through */
+ dev_dbg(&host->pdev->dev, "set completed not busy\n");
+ atmci_set_completed(host, EVENT_NOTBUSY);
+
+ if (host->data) {
+ /*
+ * For some commands such as CMD53, even if
+ * there is data transfer, there is no stop
+ * command to send.
+ */
+ if (host->mrq->stop) {
+ atmci_writel(host, ATMCI_IER,
+ ATMCI_CMDRDY);
+ atmci_send_stop_cmd(host, data);
+ state = STATE_SENDING_STOP;
+ } else {
+ host->data = NULL;
+ data->bytes_xfered = data->blocks
+ * data->blksz;
+ data->error = 0;
+ state = STATE_END_REQUEST;
+ }
+ } else
+ state = STATE_END_REQUEST;
+ break;
case STATE_SENDING_STOP:
+ /*
+ * In this state, it is important to set host->data to
+ * NULL (which is tested in the waiting notbusy state)
+ * in order to go to the end request state instead of
+ * sending stop again.
+ */
+ dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
if (!atmci_test_and_clear_pending(host,
- EVENT_CMD_COMPLETE))
+ EVENT_CMD_RDY))
break;
+ dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
host->cmd = NULL;
+ host->data = NULL;
+ data->bytes_xfered = data->blocks * data->blksz;
+ data->error = 0;
atmci_command_complete(host, mrq->stop);
- atmci_request_end(host, host->mrq);
- goto unlock;
+ if (mrq->stop->error) {
+ host->stop_transfer(host);
+ atmci_writel(host, ATMCI_IDR,
+ ATMCI_TXRDY | ATMCI_RXRDY
+ | ATMCI_DATA_ERROR_FLAGS);
+ state = STATE_END_REQUEST;
+ } else {
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ state = STATE_WAITING_NOTBUSY;
+ }
+ break;
- case STATE_DATA_ERROR:
- if (!atmci_test_and_clear_pending(host,
- EVENT_XFER_COMPLETE))
- break;
+ case STATE_END_REQUEST:
+ atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
+ | ATMCI_DATA_ERROR_FLAGS);
+ status = host->data_status;
+ if (unlikely(status)) {
+ host->stop_transfer(host);
+ host->data = NULL;
+ if (status & ATMCI_DTOE) {
+ data->error = -ETIMEDOUT;
+ } else if (status & ATMCI_DCRCE) {
+ data->error = -EILSEQ;
+ } else {
+ data->error = -EIO;
+ }
+ }
- state = STATE_DATA_BUSY;
+ atmci_request_end(host, host->mrq);
+ state = STATE_IDLE;
break;
}
} while (state != prev_state);
host->state = state;
-unlock:
spin_unlock(&host->lock);
}
@@ -1620,9 +1775,6 @@ static void atmci_read_data_pio(struct atmel_mci *host)
| ATMCI_DATA_ERROR_FLAGS));
host->data_status = status;
data->bytes_xfered += nbytes;
- smp_wmb();
- atmci_set_pending(host, EVENT_DATA_ERROR);
- tasklet_schedule(&host->tasklet);
return;
}
} while (status & ATMCI_RXRDY);
@@ -1691,9 +1843,6 @@ static void atmci_write_data_pio(struct atmel_mci *host)
| ATMCI_DATA_ERROR_FLAGS));
host->data_status = status;
data->bytes_xfered += nbytes;
- smp_wmb();
- atmci_set_pending(host, EVENT_DATA_ERROR);
- tasklet_schedule(&host->tasklet);
return;
}
} while (status & ATMCI_TXRDY);
@@ -1711,16 +1860,6 @@ done:
atmci_set_pending(host, EVENT_XFER_COMPLETE);
}
-static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
-{
- atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
-
- host->cmd_status = status;
- smp_wmb();
- atmci_set_pending(host, EVENT_CMD_COMPLETE);
- tasklet_schedule(&host->tasklet);
-}
-
static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
{
int i;
@@ -1748,17 +1887,21 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
break;
if (pending & ATMCI_DATA_ERROR_FLAGS) {
+ dev_dbg(&host->pdev->dev, "IRQ: data error\n");
atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
- | ATMCI_RXRDY | ATMCI_TXRDY);
- pending &= atmci_readl(host, ATMCI_IMR);
+ | ATMCI_RXRDY | ATMCI_TXRDY
+ | ATMCI_ENDRX | ATMCI_ENDTX
+ | ATMCI_RXBUFF | ATMCI_TXBUFE);
host->data_status = status;
+ dev_dbg(&host->pdev->dev, "set pending data error\n");
smp_wmb();
atmci_set_pending(host, EVENT_DATA_ERROR);
tasklet_schedule(&host->tasklet);
}
if (pending & ATMCI_TXBUFE) {
+ dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
/*
@@ -1774,6 +1917,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
atmci_pdc_complete(host);
}
} else if (pending & ATMCI_ENDTX) {
+ dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
if (host->data_size) {
@@ -1784,6 +1928,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
}
if (pending & ATMCI_RXBUFF) {
+ dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
/*
@@ -1799,6 +1944,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
atmci_pdc_complete(host);
}
} else if (pending & ATMCI_ENDRX) {
+ dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
if (host->data_size) {
@@ -1808,23 +1954,44 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
}
}
+ /*
+ * First mci IPs, so mainly the ones having pdc, have some
+ * issues with the notbusy signal. You can't get it after
+ * data transmission if you have not sent a stop command.
+ * The appropriate workaround is to use the BLKE signal.
+ */
+ if (pending & ATMCI_BLKE) {
+ dev_dbg(&host->pdev->dev, "IRQ: blke\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
+ smp_wmb();
+ dev_dbg(&host->pdev->dev, "set pending notbusy\n");
+ atmci_set_pending(host, EVENT_NOTBUSY);
+ tasklet_schedule(&host->tasklet);
+ }
if (pending & ATMCI_NOTBUSY) {
- atmci_writel(host, ATMCI_IDR,
- ATMCI_DATA_ERROR_FLAGS | ATMCI_NOTBUSY);
- if (!host->data_status)
- host->data_status = status;
+ dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
smp_wmb();
- atmci_set_pending(host, EVENT_DATA_COMPLETE);
+ dev_dbg(&host->pdev->dev, "set pending notbusy\n");
+ atmci_set_pending(host, EVENT_NOTBUSY);
tasklet_schedule(&host->tasklet);
}
+
if (pending & ATMCI_RXRDY)
atmci_read_data_pio(host);
if (pending & ATMCI_TXRDY)
atmci_write_data_pio(host);
- if (pending & ATMCI_CMDRDY)
- atmci_cmd_interrupt(host, status);
+ if (pending & ATMCI_CMDRDY) {
+ dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
+ host->cmd_status = status;
+ smp_wmb();
+ dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
+ atmci_set_pending(host, EVENT_CMD_RDY);
+ tasklet_schedule(&host->tasklet);
+ }
if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
atmci_sdio_interrupt(host, status);
@@ -1877,13 +2044,26 @@ static int __init atmci_init_slot(struct atmel_mci *host,
mmc->caps |= MMC_CAP_SDIO_IRQ;
if (host->caps.has_highspeed)
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
- if (slot_data->bus_width >= 4)
+ /*
+ * Without the read/write proof capability, it is strongly suggested to
+ * use only one bit for data to prevent fifo underruns and overruns
+ * which will corrupt data.
+ */
+ if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
mmc->caps |= MMC_CAP_4_BIT_DATA;
- mmc->max_segs = 64;
- mmc->max_req_size = 32768 * 512;
- mmc->max_blk_size = 32768;
- mmc->max_blk_count = 512;
+ if (atmci_get_version(host) < 0x200) {
+ mmc->max_segs = 256;
+ mmc->max_blk_size = 4095;
+ mmc->max_blk_count = 256;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
+ } else {
+ mmc->max_segs = 64;
+ mmc->max_req_size = 32768 * 512;
+ mmc->max_blk_size = 32768;
+ mmc->max_blk_count = 512;
+ }
/* Assume card is present initially */
set_bit(ATMCI_CARD_PRESENT, &slot->flags);
@@ -2007,11 +2187,6 @@ static bool atmci_configure_dma(struct atmel_mci *host)
}
}
-static inline unsigned int atmci_get_version(struct atmel_mci *host)
-{
- return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
-}
-
/*
* HSMCI (High Speed MCI) module is not fully compatible with MCI module.
* HSMCI provides DMA support and a new config register but no more supports
@@ -2032,6 +2207,9 @@ static void __init atmci_get_cap(struct atmel_mci *host)
host->caps.has_highspeed = 0;
host->caps.has_rwproof = 0;
host->caps.has_odd_clk_div = 0;
+ host->caps.has_bad_data_ordering = 1;
+ host->caps.need_reset_after_xfer = 1;
+ host->caps.need_blksz_mul_4 = 1;
/* keep only major version number */
switch (version & 0xf00) {
@@ -2051,7 +2229,11 @@ static void __init atmci_get_cap(struct atmel_mci *host)
host->caps.has_highspeed = 1;
case 0x200:
host->caps.has_rwproof = 1;
+ host->caps.need_blksz_mul_4 = 0;
case 0x100:
+ host->caps.has_bad_data_ordering = 0;
+ host->caps.need_reset_after_xfer = 0;
+ case 0x0:
break;
default:
host->caps.has_pdc = 0;
@@ -2138,14 +2320,20 @@ static int __init atmci_probe(struct platform_device *pdev)
if (pdata->slot[0].bus_width) {
ret = atmci_init_slot(host, &pdata->slot[0],
0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
- if (!ret)
+ if (!ret) {
nr_slots++;
+ host->buf_size = host->slot[0]->mmc->max_req_size;
+ }
}
if (pdata->slot[1].bus_width) {
ret = atmci_init_slot(host, &pdata->slot[1],
1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
- if (!ret)
+ if (!ret) {
nr_slots++;
+ if (host->slot[1]->mmc->max_req_size > host->buf_size)
+ host->buf_size =
+ host->slot[1]->mmc->max_req_size;
+ }
}
if (!nr_slots) {
@@ -2153,6 +2341,19 @@ static int __init atmci_probe(struct platform_device *pdev)
goto err_init_slot;
}
+ if (!host->caps.has_rwproof) {
+ host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
+ &host->buf_phys_addr,
+ GFP_KERNEL);
+ if (!host->buffer) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "buffer allocation failed\n");
+ goto err_init_slot;
+ }
+ }
+
+ setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
+
dev_info(&pdev->dev,
"Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
host->mapbase, irq, nr_slots);
@@ -2179,6 +2380,10 @@ static int __exit atmci_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
+ if (host->buffer)
+ dma_free_coherent(&pdev->dev, host->buf_size,
+ host->buffer, host->buf_phys_addr);
+
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (host->slot[i])
atmci_cleanup_slot(host->slot[i], i);
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index c1f3673ae1ef..7cf6c624bf73 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1533,4 +1533,5 @@ module_exit(davinci_mmcsd_exit);
MODULE_AUTHOR("Texas Instruments India");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
+MODULE_ALIAS("platform:davinci_mmc");
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index ab3fc4617107..9bbf45f8c538 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -100,8 +100,6 @@ struct dw_mci_slot {
int last_detect_state;
};
-static struct workqueue_struct *dw_mci_card_workqueue;
-
#if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show(struct seq_file *s, void *v)
{
@@ -859,10 +857,10 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
int_mask = mci_readl(host, INTMASK);
if (enb) {
mci_writel(host, INTMASK,
- (int_mask | (1 << SDMMC_INT_SDIO(slot->id))));
+ (int_mask | SDMMC_INT_SDIO(slot->id)));
} else {
mci_writel(host, INTMASK,
- (int_mask & ~(1 << SDMMC_INT_SDIO(slot->id))));
+ (int_mask & ~SDMMC_INT_SDIO(slot->id)));
}
}
@@ -1605,7 +1603,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & SDMMC_INT_CD) {
mci_writel(host, RINTSTS, SDMMC_INT_CD);
- queue_work(dw_mci_card_workqueue, &host->card_work);
+ queue_work(host->card_workqueue, &host->card_work);
}
/* Handle SDIO Interrupts */
@@ -1844,7 +1842,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
* Card may have been plugged in prior to boot so we
* need to run the detect tasklet
*/
- queue_work(dw_mci_card_workqueue, &host->card_work);
+ queue_work(host->card_workqueue, &host->card_work);
return 0;
}
@@ -2021,9 +2019,9 @@ int dw_mci_probe(struct dw_mci *host)
mci_writel(host, CLKSRC, 0);
tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
- dw_mci_card_workqueue = alloc_workqueue("dw-mci-card",
+ host->card_workqueue = alloc_workqueue("dw-mci-card",
WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
- if (!dw_mci_card_workqueue)
+ if (!host->card_workqueue)
goto err_dmaunmap;
INIT_WORK(&host->card_work, dw_mci_work_routine_card);
ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host);
@@ -2085,7 +2083,7 @@ err_init_slot:
free_irq(host->irq, host);
err_workqueue:
- destroy_workqueue(dw_mci_card_workqueue);
+ destroy_workqueue(host->card_workqueue);
err_dmaunmap:
if (host->use_dma && host->dma_ops->exit)
@@ -2119,7 +2117,7 @@ void dw_mci_remove(struct dw_mci *host)
mci_writel(host, CLKSRC, 0);
free_irq(host->irq, host);
- destroy_workqueue(dw_mci_card_workqueue);
+ destroy_workqueue(host->card_workqueue);
dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
if (host->use_dma && host->dma_ops->exit)
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
deleted file mode 100644
index ea0f3cedef21..000000000000
--- a/drivers/mmc/host/imxmmc.c
+++ /dev/null
@@ -1,1169 +0,0 @@
-/*
- * linux/drivers/mmc/host/imxmmc.c - Motorola i.MX MMCI driver
- *
- * Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de>
- * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
- *
- * derived from pxamci.c by Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/dma-mapping.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/card.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <asm/dma.h>
-#include <asm/irq.h>
-#include <asm/sizes.h>
-#include <mach/mmc.h>
-#include <mach/imx-dma.h>
-
-#include "imxmmc.h"
-
-#define DRIVER_NAME "imx-mmc"
-
-#define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \
- INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \
- INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO)
-
-struct imxmci_host {
- struct mmc_host *mmc;
- spinlock_t lock;
- struct resource *res;
- void __iomem *base;
- int irq;
- imx_dmach_t dma;
- volatile unsigned int imask;
- unsigned int power_mode;
- unsigned int present;
- struct imxmmc_platform_data *pdata;
-
- struct mmc_request *req;
- struct mmc_command *cmd;
- struct mmc_data *data;
-
- struct timer_list timer;
- struct tasklet_struct tasklet;
- unsigned int status_reg;
- unsigned long pending_events;
- /* Next two fields are there for CPU driven transfers to overcome SDHC deficiencies */
- u16 *data_ptr;
- unsigned int data_cnt;
- atomic_t stuck_timeout;
-
- unsigned int dma_nents;
- unsigned int dma_size;
- unsigned int dma_dir;
- int dma_allocated;
-
- unsigned char actual_bus_width;
-
- int prev_cmd_code;
-
- struct clk *clk;
-};
-
-#define IMXMCI_PEND_IRQ_b 0
-#define IMXMCI_PEND_DMA_END_b 1
-#define IMXMCI_PEND_DMA_ERR_b 2
-#define IMXMCI_PEND_WAIT_RESP_b 3
-#define IMXMCI_PEND_DMA_DATA_b 4
-#define IMXMCI_PEND_CPU_DATA_b 5
-#define IMXMCI_PEND_CARD_XCHG_b 6
-#define IMXMCI_PEND_SET_INIT_b 7
-#define IMXMCI_PEND_STARTED_b 8
-
-#define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b)
-#define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b)
-#define IMXMCI_PEND_DMA_ERR_m (1 << IMXMCI_PEND_DMA_ERR_b)
-#define IMXMCI_PEND_WAIT_RESP_m (1 << IMXMCI_PEND_WAIT_RESP_b)
-#define IMXMCI_PEND_DMA_DATA_m (1 << IMXMCI_PEND_DMA_DATA_b)
-#define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b)
-#define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b)
-#define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b)
-#define IMXMCI_PEND_STARTED_m (1 << IMXMCI_PEND_STARTED_b)
-
-static void imxmci_stop_clock(struct imxmci_host *host)
-{
- int i = 0;
- u16 reg;
-
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg & ~STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
- while (i < 0x1000) {
- if (!(i & 0x7f)) {
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg | STR_STP_CLK_STOP_CLK,
- host->base + MMC_REG_STR_STP_CLK);
- }
-
- reg = readw(host->base + MMC_REG_STATUS);
- if (!(reg & STATUS_CARD_BUS_CLK_RUN)) {
- /* Check twice before cut */
- reg = readw(host->base + MMC_REG_STATUS);
- if (!(reg & STATUS_CARD_BUS_CLK_RUN))
- return;
- }
-
- i++;
- }
- dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n");
-}
-
-static int imxmci_start_clock(struct imxmci_host *host)
-{
- unsigned int trials = 0;
- unsigned int delay_limit = 128;
- unsigned long flags;
- u16 reg;
-
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg & ~STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK);
-
- clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
-
- /*
- * Command start of the clock, this usually succeeds in less
- * then 6 delay loops, but during card detection (low clockrate)
- * it takes up to 5000 delay loops and sometimes fails for the first time
- */
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg | STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
-
- do {
- unsigned int delay = delay_limit;
-
- while (delay--) {
- reg = readw(host->base + MMC_REG_STATUS);
- if (reg & STATUS_CARD_BUS_CLK_RUN) {
- /* Check twice before cut */
- reg = readw(host->base + MMC_REG_STATUS);
- if (reg & STATUS_CARD_BUS_CLK_RUN)
- return 0;
- }
-
- if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
- return 0;
- }
-
- local_irq_save(flags);
- /*
- * Ensure, that request is not doubled under all possible circumstances.
- * It is possible, that cock running state is missed, because some other
- * IRQ or schedule delays this function execution and the clocks has
- * been already stopped by other means (response processing, SDHC HW)
- */
- if (!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) {
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg | STR_STP_CLK_START_CLK,
- host->base + MMC_REG_STR_STP_CLK);
- }
- local_irq_restore(flags);
-
- } while (++trials < 256);
-
- dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
-
- return -1;
-}
-
-static void imxmci_softreset(struct imxmci_host *host)
-{
- int i;
-
- /* reset sequence */
- writew(0x08, host->base + MMC_REG_STR_STP_CLK);
- writew(0x0D, host->base + MMC_REG_STR_STP_CLK);
-
- for (i = 0; i < 8; i++)
- writew(0x05, host->base + MMC_REG_STR_STP_CLK);
-
- writew(0xff, host->base + MMC_REG_RES_TO);
- writew(512, host->base + MMC_REG_BLK_LEN);
- writew(1, host->base + MMC_REG_NOB);
-}
-
-static int imxmci_busy_wait_for_status(struct imxmci_host *host,
- unsigned int *pstat, unsigned int stat_mask,
- int timeout, const char *where)
-{
- int loops = 0;
-
- while (!(*pstat & stat_mask)) {
- loops += 2;
- if (loops >= timeout) {
- dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n",
- where, *pstat, stat_mask);
- return -1;
- }
- udelay(2);
- *pstat |= readw(host->base + MMC_REG_STATUS);
- }
- if (!loops)
- return 0;
-
- /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
- if (!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock >= 8000000))
- dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
- loops, where, *pstat, stat_mask);
- return loops;
-}
-
-static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
-{
- unsigned int nob = data->blocks;
- unsigned int blksz = data->blksz;
- unsigned int datasz = nob * blksz;
- int i;
-
- if (data->flags & MMC_DATA_STREAM)
- nob = 0xffff;
-
- host->data = data;
- data->bytes_xfered = 0;
-
- writew(nob, host->base + MMC_REG_NOB);
- writew(blksz, host->base + MMC_REG_BLK_LEN);
-
- /*
- * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
- * We are in big troubles for non-512 byte transfers according to note in the paragraph
- * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
- * The situation is even more complex in reality. The SDHC in not able to handle wll
- * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
- * This is required for SCR read at least.
- */
- if (datasz < 512) {
- host->dma_size = datasz;
- if (data->flags & MMC_DATA_READ) {
- host->dma_dir = DMA_FROM_DEVICE;
-
- /* Hack to enable read SCR */
- writew(1, host->base + MMC_REG_NOB);
- writew(512, host->base + MMC_REG_BLK_LEN);
- } else {
- host->dma_dir = DMA_TO_DEVICE;
- }
-
- /* Convert back to virtual address */
- host->data_ptr = (u16 *)sg_virt(data->sg);
- host->data_cnt = 0;
-
- clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
- set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
-
- return;
- }
-
- if (data->flags & MMC_DATA_READ) {
- host->dma_dir = DMA_FROM_DEVICE;
- host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, host->dma_dir);
-
- imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
- host->res->start + MMC_REG_BUFFER_ACCESS,
- DMA_MODE_READ);
-
- /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
- CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
- } else {
- host->dma_dir = DMA_TO_DEVICE;
-
- host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, host->dma_dir);
-
- imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
- host->res->start + MMC_REG_BUFFER_ACCESS,
- DMA_MODE_WRITE);
-
- /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
- CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
- }
-
-#if 1 /* This code is there only for consistency checking and can be disabled in future */
- host->dma_size = 0;
- for (i = 0; i < host->dma_nents; i++)
- host->dma_size += data->sg[i].length;
-
- if (datasz > host->dma_size) {
- dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
- datasz, host->dma_size);
- }
-#endif
-
- host->dma_size = datasz;
-
- wmb();
-
- set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
- clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
-
- /* start DMA engine for read, write is delayed after initial response */
- if (host->dma_dir == DMA_FROM_DEVICE)
- imx_dma_enable(host->dma);
-}
-
-static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat)
-{
- unsigned long flags;
- u32 imask;
-
- WARN_ON(host->cmd != NULL);
- host->cmd = cmd;
-
- /* Ensure, that clock are stopped else command programming and start fails */
- imxmci_stop_clock(host);
-
- if (cmd->flags & MMC_RSP_BUSY)
- cmdat |= CMD_DAT_CONT_BUSY;
-
- switch (mmc_resp_type(cmd)) {
- case MMC_RSP_R1: /* short CRC, OPCODE */
- case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
- cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1;
- break;
- case MMC_RSP_R2: /* long 136 bit + CRC */
- cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2;
- break;
- case MMC_RSP_R3: /* short */
- cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3;
- break;
- default:
- break;
- }
-
- if (test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events))
- cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */
-
- if (host->actual_bus_width == MMC_BUS_WIDTH_4)
- cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
-
- writew(cmd->opcode, host->base + MMC_REG_CMD);
- writew(cmd->arg >> 16, host->base + MMC_REG_ARGH);
- writew(cmd->arg & 0xffff, host->base + MMC_REG_ARGL);
- writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT);
-
- atomic_set(&host->stuck_timeout, 0);
- set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events);
-
-
- imask = IMXMCI_INT_MASK_DEFAULT;
- imask &= ~INT_MASK_END_CMD_RES;
- if (cmdat & CMD_DAT_CONT_DATA_ENABLE) {
- /* imask &= ~INT_MASK_BUF_READY; */
- imask &= ~INT_MASK_DATA_TRAN;
- if (cmdat & CMD_DAT_CONT_WRITE)
- imask &= ~INT_MASK_WRITE_OP_DONE;
- if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
- imask &= ~INT_MASK_BUF_READY;
- }
-
- spin_lock_irqsave(&host->lock, flags);
- host->imask = imask;
- writew(host->imask, host->base + MMC_REG_INT_MASK);
- spin_unlock_irqrestore(&host->lock, flags);
-
- dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n",
- cmd->opcode, cmd->opcode, imask);
-
- imxmci_start_clock(host);
-}
-
-static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
-
- host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m |
- IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m);
-
- host->imask = IMXMCI_INT_MASK_DEFAULT;
- writew(host->imask, host->base + MMC_REG_INT_MASK);
-
- spin_unlock_irqrestore(&host->lock, flags);
-
- if (req && req->cmd)
- host->prev_cmd_code = req->cmd->opcode;
-
- host->req = NULL;
- host->cmd = NULL;
- host->data = NULL;
- mmc_request_done(host->mmc, req);
-}
-
-static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat)
-{
- struct mmc_data *data = host->data;
- int data_error;
-
- if (test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
- imx_dma_disable(host->dma);
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
- host->dma_dir);
- }
-
- if (stat & STATUS_ERR_MASK) {
- dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", stat);
- if (stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR))
- data->error = -EILSEQ;
- else if (stat & STATUS_TIME_OUT_READ)
- data->error = -ETIMEDOUT;
- else
- data->error = -EIO;
- } else {
- data->bytes_xfered = host->dma_size;
- }
-
- data_error = data->error;
-
- host->data = NULL;
-
- return data_error;
-}
-
-static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat)
-{
- struct mmc_command *cmd = host->cmd;
- int i;
- u32 a, b, c;
- struct mmc_data *data = host->data;
-
- if (!cmd)
- return 0;
-
- host->cmd = NULL;
-
- if (stat & STATUS_TIME_OUT_RESP) {
- dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
- cmd->error = -ETIMEDOUT;
- } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
- dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
- cmd->error = -EILSEQ;
- }
-
- if (cmd->flags & MMC_RSP_PRESENT) {
- if (cmd->flags & MMC_RSP_136) {
- for (i = 0; i < 4; i++) {
- a = readw(host->base + MMC_REG_RES_FIFO);
- b = readw(host->base + MMC_REG_RES_FIFO);
- cmd->resp[i] = a << 16 | b;
- }
- } else {
- a = readw(host->base + MMC_REG_RES_FIFO);
- b = readw(host->base + MMC_REG_RES_FIFO);
- c = readw(host->base + MMC_REG_RES_FIFO);
- cmd->resp[0] = a << 24 | b << 8 | c >> 8;
- }
- }
-
- dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n",
- cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error);
-
- if (data && !cmd->error && !(stat & STATUS_ERR_MASK)) {
- if (host->req->data->flags & MMC_DATA_WRITE) {
-
- /* Wait for FIFO to be empty before starting DMA write */
-
- stat = readw(host->base + MMC_REG_STATUS);
- if (imxmci_busy_wait_for_status(host, &stat,
- STATUS_APPL_BUFF_FE,
- 40, "imxmci_cmd_done DMA WR") < 0) {
- cmd->error = -EIO;
- imxmci_finish_data(host, stat);
- if (host->req)
- imxmci_finish_request(host, host->req);
- dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n",
- stat);
- return 0;
- }
-
- if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
- imx_dma_enable(host->dma);
- }
- } else {
- struct mmc_request *req;
- imxmci_stop_clock(host);
- req = host->req;
-
- if (data)
- imxmci_finish_data(host, stat);
-
- if (req)
- imxmci_finish_request(host, req);
- else
- dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n");
- }
-
- return 1;
-}
-
-static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
-{
- struct mmc_data *data = host->data;
- int data_error;
-
- if (!data)
- return 0;
-
- data_error = imxmci_finish_data(host, stat);
-
- if (host->req->stop) {
- imxmci_stop_clock(host);
- imxmci_start_cmd(host, host->req->stop, 0);
- } else {
- struct mmc_request *req;
- req = host->req;
- if (req)
- imxmci_finish_request(host, req);
- else
- dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n");
- }
-
- return 1;
-}
-
-static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
-{
- int i;
- int burst_len;
- int trans_done = 0;
- unsigned int stat = *pstat;
-
- if (host->actual_bus_width != MMC_BUS_WIDTH_4)
- burst_len = 16;
- else
- burst_len = 64;
-
- /* This is unfortunately required */
- dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
- stat);
-
- udelay(20); /* required for clocks < 8MHz*/
-
- if (host->dma_dir == DMA_FROM_DEVICE) {
- imxmci_busy_wait_for_status(host, &stat,
- STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE |
- STATUS_TIME_OUT_READ,
- 50, "imxmci_cpu_driven_data read");
-
- while ((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
- !(stat & STATUS_TIME_OUT_READ) &&
- (host->data_cnt < 512)) {
-
- udelay(20); /* required for clocks < 8MHz*/
-
- for (i = burst_len; i >= 2 ; i -= 2) {
- u16 data;
- data = readw(host->base + MMC_REG_BUFFER_ACCESS);
- udelay(10); /* required for clocks < 8MHz*/
- if (host->data_cnt+2 <= host->dma_size) {
- *(host->data_ptr++) = data;
- } else {
- if (host->data_cnt < host->dma_size)
- *(u8 *)(host->data_ptr) = data;
- }
- host->data_cnt += 2;
- }
-
- stat = readw(host->base + MMC_REG_STATUS);
-
- dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
- host->data_cnt, burst_len, stat);
- }
-
- if ((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
- trans_done = 1;
-
- if (host->dma_size & 0x1ff)
- stat &= ~STATUS_CRC_READ_ERR;
-
- if (stat & STATUS_TIME_OUT_READ) {
- dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n",
- stat);
- trans_done = -1;
- }
-
- } else {
- imxmci_busy_wait_for_status(host, &stat,
- STATUS_APPL_BUFF_FE,
- 20, "imxmci_cpu_driven_data write");
-
- while ((stat & STATUS_APPL_BUFF_FE) &&
- (host->data_cnt < host->dma_size)) {
- if (burst_len >= host->dma_size - host->data_cnt) {
- burst_len = host->dma_size - host->data_cnt;
- host->data_cnt = host->dma_size;
- trans_done = 1;
- } else {
- host->data_cnt += burst_len;
- }
-
- for (i = burst_len; i > 0 ; i -= 2)
- writew(*(host->data_ptr++), host->base + MMC_REG_BUFFER_ACCESS);
-
- stat = readw(host->base + MMC_REG_STATUS);
-
- dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n",
- burst_len, stat);
- }
- }
-
- *pstat = stat;
-
- return trans_done;
-}
-
-static void imxmci_dma_irq(int dma, void *devid)
-{
- struct imxmci_host *host = devid;
- u32 stat = readw(host->base + MMC_REG_STATUS);
-
- atomic_set(&host->stuck_timeout, 0);
- host->status_reg = stat;
- set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
- tasklet_schedule(&host->tasklet);
-}
-
-static irqreturn_t imxmci_irq(int irq, void *devid)
-{
- struct imxmci_host *host = devid;
- u32 stat = readw(host->base + MMC_REG_STATUS);
- int handled = 1;
-
- writew(host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT,
- host->base + MMC_REG_INT_MASK);
-
- atomic_set(&host->stuck_timeout, 0);
- host->status_reg = stat;
- set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
- set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
- tasklet_schedule(&host->tasklet);
-
- return IRQ_RETVAL(handled);
-}
-
-static void imxmci_tasklet_fnc(unsigned long data)
-{
- struct imxmci_host *host = (struct imxmci_host *)data;
- u32 stat;
- unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */
- int timeout = 0;
-
- if (atomic_read(&host->stuck_timeout) > 4) {
- char *what;
- timeout = 1;
- stat = readw(host->base + MMC_REG_STATUS);
- host->status_reg = stat;
- if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
- if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
- what = "RESP+DMA";
- else
- what = "RESP";
- else
- if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
- if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events))
- what = "DATA";
- else
- what = "DMA";
- else
- what = "???";
-
- dev_err(mmc_dev(host->mmc),
- "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n",
- what, stat,
- readw(host->base + MMC_REG_INT_MASK));
- dev_err(mmc_dev(host->mmc),
- "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
- readw(host->base + MMC_REG_CMD_DAT_CONT),
- readw(host->base + MMC_REG_BLK_LEN),
- readw(host->base + MMC_REG_NOB),
- CCR(host->dma));
- dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
- host->cmd ? host->cmd->opcode : 0,
- host->prev_cmd_code,
- 1 << host->actual_bus_width, host->dma_size);
- }
-
- if (!host->present || timeout)
- host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ |
- STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR;
-
- if (test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) {
- clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
-
- stat = readw(host->base + MMC_REG_STATUS);
- /*
- * This is not required in theory, but there is chance to miss some flag
- * which clears automatically by mask write, FreeScale original code keeps
- * stat from IRQ time so do I
- */
- stat |= host->status_reg;
-
- if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
- stat &= ~STATUS_CRC_READ_ERR;
-
- if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
- imxmci_busy_wait_for_status(host, &stat,
- STATUS_END_CMD_RESP | STATUS_ERR_MASK,
- 20, "imxmci_tasklet_fnc resp (ERRATUM #4)");
- }
-
- if (stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) {
- if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
- imxmci_cmd_done(host, stat);
- if (host->data && (stat & STATUS_ERR_MASK))
- imxmci_data_done(host, stat);
- }
-
- if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) {
- stat |= readw(host->base + MMC_REG_STATUS);
- if (imxmci_cpu_driven_data(host, &stat)) {
- if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
- imxmci_cmd_done(host, stat);
- atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m,
- &host->pending_events);
- imxmci_data_done(host, stat);
- }
- }
- }
-
- if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) &&
- !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
-
- stat = readw(host->base + MMC_REG_STATUS);
- /* Same as above */
- stat |= host->status_reg;
-
- if (host->dma_dir == DMA_TO_DEVICE)
- data_dir_mask = STATUS_WRITE_OP_DONE;
- else
- data_dir_mask = STATUS_DATA_TRANS_DONE;
-
- if (stat & data_dir_mask) {
- clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
- imxmci_data_done(host, stat);
- }
- }
-
- if (test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) {
-
- if (host->cmd)
- imxmci_cmd_done(host, STATUS_TIME_OUT_RESP);
-
- if (host->data)
- imxmci_data_done(host, STATUS_TIME_OUT_READ |
- STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR);
-
- if (host->req)
- imxmci_finish_request(host, host->req);
-
- mmc_detect_change(host->mmc, msecs_to_jiffies(100));
-
- }
-}
-
-static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req)
-{
- struct imxmci_host *host = mmc_priv(mmc);
- unsigned int cmdat;
-
- WARN_ON(host->req != NULL);
-
- host->req = req;
-
- cmdat = 0;
-
- if (req->data) {
- imxmci_setup_data(host, req->data);
-
- cmdat |= CMD_DAT_CONT_DATA_ENABLE;
-
- if (req->data->flags & MMC_DATA_WRITE)
- cmdat |= CMD_DAT_CONT_WRITE;
-
- if (req->data->flags & MMC_DATA_STREAM)
- cmdat |= CMD_DAT_CONT_STREAM_BLOCK;
- }
-
- imxmci_start_cmd(host, req->cmd, cmdat);
-}
-
-#define CLK_RATE 19200000
-
-static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
-{
- struct imxmci_host *host = mmc_priv(mmc);
- int prescaler;
-
- if (ios->bus_width == MMC_BUS_WIDTH_4) {
- host->actual_bus_width = MMC_BUS_WIDTH_4;
- imx_gpio_mode(PB11_PF_SD_DAT3);
- BLR(host->dma) = 0; /* burst 64 byte read/write */
- } else {
- host->actual_bus_width = MMC_BUS_WIDTH_1;
- imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
- BLR(host->dma) = 16; /* burst 16 byte read/write */
- }
-
- if (host->power_mode != ios->power_mode) {
- switch (ios->power_mode) {
- case MMC_POWER_OFF:
- break;
- case MMC_POWER_UP:
- set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
- break;
- case MMC_POWER_ON:
- break;
- }
- host->power_mode = ios->power_mode;
- }
-
- if (ios->clock) {
- unsigned int clk;
- u16 reg;
-
- /* The prescaler is 5 for PERCLK2 equal to 96MHz
- * then 96MHz / 5 = 19.2 MHz
- */
- clk = clk_get_rate(host->clk);
- prescaler = (clk + (CLK_RATE * 7) / 8) / CLK_RATE;
- switch (prescaler) {
- case 0:
- case 1: prescaler = 0;
- break;
- case 2: prescaler = 1;
- break;
- case 3: prescaler = 2;
- break;
- case 4: prescaler = 4;
- break;
- default:
- case 5: prescaler = 5;
- break;
- }
-
- dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n",
- clk, prescaler);
-
- for (clk = 0; clk < 8; clk++) {
- int x;
- x = CLK_RATE / (1 << clk);
- if (x <= ios->clock)
- break;
- }
-
- /* enable controller */
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg | STR_STP_CLK_ENABLE,
- host->base + MMC_REG_STR_STP_CLK);
-
- imxmci_stop_clock(host);
- writew((prescaler << 3) | clk, host->base + MMC_REG_CLK_RATE);
- /*
- * Under my understanding, clock should not be started there, because it would
- * initiate SDHC sequencer and send last or random command into card
- */
- /* imxmci_start_clock(host); */
-
- dev_dbg(mmc_dev(host->mmc),
- "MMC_CLK_RATE: 0x%08x\n",
- readw(host->base + MMC_REG_CLK_RATE));
- } else {
- imxmci_stop_clock(host);
- }
-}
-
-static int imxmci_get_ro(struct mmc_host *mmc)
-{
- struct imxmci_host *host = mmc_priv(mmc);
-
- if (host->pdata && host->pdata->get_ro)
- return !!host->pdata->get_ro(mmc_dev(mmc));
- /*
- * Board doesn't support read only detection; let the mmc core
- * decide what to do.
- */
- return -ENOSYS;
-}
-
-
-static const struct mmc_host_ops imxmci_ops = {
- .request = imxmci_request,
- .set_ios = imxmci_set_ios,
- .get_ro = imxmci_get_ro,
-};
-
-static void imxmci_check_status(unsigned long data)
-{
- struct imxmci_host *host = (struct imxmci_host *)data;
-
- if (host->pdata && host->pdata->card_present &&
- host->pdata->card_present(mmc_dev(host->mmc)) != host->present) {
- host->present ^= 1;
- dev_info(mmc_dev(host->mmc), "card %s\n",
- host->present ? "inserted" : "removed");
-
- set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events);
- tasklet_schedule(&host->tasklet);
- }
-
- if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) ||
- test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
- atomic_inc(&host->stuck_timeout);
- if (atomic_read(&host->stuck_timeout) > 4)
- tasklet_schedule(&host->tasklet);
- } else {
- atomic_set(&host->stuck_timeout, 0);
-
- }
-
- mod_timer(&host->timer, jiffies + (HZ>>1));
-}
-
-static int __init imxmci_probe(struct platform_device *pdev)
-{
- struct mmc_host *mmc;
- struct imxmci_host *host = NULL;
- struct resource *r;
- int ret = 0, irq;
- u16 rev_no;
-
- pr_info("i.MX mmc driver\n");
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
- if (!r || irq < 0)
- return -ENXIO;
-
- r = request_mem_region(r->start, resource_size(r), pdev->name);
- if (!r)
- return -EBUSY;
-
- mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev);
- if (!mmc) {
- ret = -ENOMEM;
- goto out;
- }
-
- mmc->ops = &imxmci_ops;
- mmc->f_min = 150000;
- mmc->f_max = CLK_RATE/2;
- mmc->ocr_avail = MMC_VDD_32_33;
- mmc->caps = MMC_CAP_4_BIT_DATA;
-
- /* MMC core transfer sizes tunable parameters */
- mmc->max_segs = 64;
- mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
- mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
- mmc->max_blk_size = 2048;
- mmc->max_blk_count = 65535;
-
- host = mmc_priv(mmc);
- host->base = ioremap(r->start, resource_size(r));
- if (!host->base) {
- ret = -ENOMEM;
- goto out;
- }
-
- host->mmc = mmc;
- host->dma_allocated = 0;
- host->pdata = pdev->dev.platform_data;
- if (!host->pdata)
- dev_warn(&pdev->dev, "No platform data provided!\n");
-
- spin_lock_init(&host->lock);
- host->res = r;
- host->irq = irq;
-
- host->clk = clk_get(&pdev->dev, "perclk2");
- if (IS_ERR(host->clk)) {
- ret = PTR_ERR(host->clk);
- goto out;
- }
- clk_enable(host->clk);
-
- imx_gpio_mode(PB8_PF_SD_DAT0);
- imx_gpio_mode(PB9_PF_SD_DAT1);
- imx_gpio_mode(PB10_PF_SD_DAT2);
- /* Configured as GPIO with pull-up to ensure right MCC card mode */
- /* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */
- imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
- /* imx_gpio_mode(PB11_PF_SD_DAT3); */
- imx_gpio_mode(PB12_PF_SD_CLK);
- imx_gpio_mode(PB13_PF_SD_CMD);
-
- imxmci_softreset(host);
-
- rev_no = readw(host->base + MMC_REG_REV_NO);
- if (rev_no != 0x390) {
- dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
- readw(host->base + MMC_REG_REV_NO));
- goto out;
- }
-
- /* recommended in data sheet */
- writew(0x2db4, host->base + MMC_REG_READ_TO);
-
- host->imask = IMXMCI_INT_MASK_DEFAULT;
- writew(host->imask, host->base + MMC_REG_INT_MASK);
-
- host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW);
- if(host->dma < 0) {
- dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
- ret = -EBUSY;
- goto out;
- }
- host->dma_allocated = 1;
- imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host);
- RSSR(host->dma) = DMA_REQ_SDHC;
-
- tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host);
- host->status_reg=0;
- host->pending_events=0;
-
- ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host);
- if (ret)
- goto out;
-
- if (host->pdata && host->pdata->card_present)
- host->present = host->pdata->card_present(mmc_dev(mmc));
- else /* if there is no way to detect assume that card is present */
- host->present = 1;
-
- init_timer(&host->timer);
- host->timer.data = (unsigned long)host;
- host->timer.function = imxmci_check_status;
- add_timer(&host->timer);
- mod_timer(&host->timer, jiffies + (HZ >> 1));
-
- platform_set_drvdata(pdev, mmc);
-
- mmc_add_host(mmc);
-
- return 0;
-
-out:
- if (host) {
- if (host->dma_allocated) {
- imx_dma_free(host->dma);
- host->dma_allocated = 0;
- }
- if (host->clk) {
- clk_disable(host->clk);
- clk_put(host->clk);
- }
- if (host->base)
- iounmap(host->base);
- }
- if (mmc)
- mmc_free_host(mmc);
- release_mem_region(r->start, resource_size(r));
- return ret;
-}
-
-static int __exit imxmci_remove(struct platform_device *pdev)
-{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
-
- platform_set_drvdata(pdev, NULL);
-
- if (mmc) {
- struct imxmci_host *host = mmc_priv(mmc);
-
- tasklet_disable(&host->tasklet);
-
- del_timer_sync(&host->timer);
- mmc_remove_host(mmc);
-
- free_irq(host->irq, host);
- iounmap(host->base);
- if (host->dma_allocated) {
- imx_dma_free(host->dma);
- host->dma_allocated = 0;
- }
-
- tasklet_kill(&host->tasklet);
-
- clk_disable(host->clk);
- clk_put(host->clk);
-
- release_mem_region(host->res->start, resource_size(host->res));
-
- mmc_free_host(mmc);
- }
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct mmc_host *mmc = platform_get_drvdata(dev);
- int ret = 0;
-
- if (mmc)
- ret = mmc_suspend_host(mmc);
-
- return ret;
-}
-
-static int imxmci_resume(struct platform_device *dev)
-{
- struct mmc_host *mmc = platform_get_drvdata(dev);
- struct imxmci_host *host;
- int ret = 0;
-
- if (mmc) {
- host = mmc_priv(mmc);
- if (host)
- set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
- ret = mmc_resume_host(mmc);
- }
-
- return ret;
-}
-#else
-#define imxmci_suspend NULL
-#define imxmci_resume NULL
-#endif /* CONFIG_PM */
-
-static struct platform_driver imxmci_driver = {
- .remove = __exit_p(imxmci_remove),
- .suspend = imxmci_suspend,
- .resume = imxmci_resume,
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- }
-};
-
-static int __init imxmci_init(void)
-{
- return platform_driver_probe(&imxmci_driver, imxmci_probe);
-}
-
-static void __exit imxmci_exit(void)
-{
- platform_driver_unregister(&imxmci_driver);
-}
-
-module_init(imxmci_init);
-module_exit(imxmci_exit);
-
-MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
-MODULE_AUTHOR("Sascha Hauer, Pengutronix");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-mmc");
diff --git a/drivers/mmc/host/imxmmc.h b/drivers/mmc/host/imxmmc.h
deleted file mode 100644
index 09d5d4ee3a77..000000000000
--- a/drivers/mmc/host/imxmmc.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#define MMC_REG_STR_STP_CLK 0x00
-#define MMC_REG_STATUS 0x04
-#define MMC_REG_CLK_RATE 0x08
-#define MMC_REG_CMD_DAT_CONT 0x0C
-#define MMC_REG_RES_TO 0x10
-#define MMC_REG_READ_TO 0x14
-#define MMC_REG_BLK_LEN 0x18
-#define MMC_REG_NOB 0x1C
-#define MMC_REG_REV_NO 0x20
-#define MMC_REG_INT_MASK 0x24
-#define MMC_REG_CMD 0x28
-#define MMC_REG_ARGH 0x2C
-#define MMC_REG_ARGL 0x30
-#define MMC_REG_RES_FIFO 0x34
-#define MMC_REG_BUFFER_ACCESS 0x38
-
-#define STR_STP_CLK_IPG_CLK_GATE_DIS (1<<15)
-#define STR_STP_CLK_IPG_PERCLK_GATE_DIS (1<<14)
-#define STR_STP_CLK_ENDIAN (1<<5)
-#define STR_STP_CLK_RESET (1<<3)
-#define STR_STP_CLK_ENABLE (1<<2)
-#define STR_STP_CLK_START_CLK (1<<1)
-#define STR_STP_CLK_STOP_CLK (1<<0)
-#define STATUS_CARD_PRESENCE (1<<15)
-#define STATUS_SDIO_INT_ACTIVE (1<<14)
-#define STATUS_END_CMD_RESP (1<<13)
-#define STATUS_WRITE_OP_DONE (1<<12)
-#define STATUS_DATA_TRANS_DONE (1<<11)
-#define STATUS_WR_CRC_ERROR_CODE_MASK (3<<10)
-#define STATUS_CARD_BUS_CLK_RUN (1<<8)
-#define STATUS_APPL_BUFF_FF (1<<7)
-#define STATUS_APPL_BUFF_FE (1<<6)
-#define STATUS_RESP_CRC_ERR (1<<5)
-#define STATUS_CRC_READ_ERR (1<<3)
-#define STATUS_CRC_WRITE_ERR (1<<2)
-#define STATUS_TIME_OUT_RESP (1<<1)
-#define STATUS_TIME_OUT_READ (1<<0)
-#define STATUS_ERR_MASK 0x2f
-#define CLK_RATE_PRESCALER(x) ((x) & 0x7)
-#define CLK_RATE_CLK_RATE(x) (((x) & 0x7) << 3)
-#define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1<<12)
-#define CMD_DAT_CONT_STOP_READWAIT (1<<11)
-#define CMD_DAT_CONT_START_READWAIT (1<<10)
-#define CMD_DAT_CONT_BUS_WIDTH_1 (0<<8)
-#define CMD_DAT_CONT_BUS_WIDTH_4 (2<<8)
-#define CMD_DAT_CONT_INIT (1<<7)
-#define CMD_DAT_CONT_BUSY (1<<6)
-#define CMD_DAT_CONT_STREAM_BLOCK (1<<5)
-#define CMD_DAT_CONT_WRITE (1<<4)
-#define CMD_DAT_CONT_DATA_ENABLE (1<<3)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R1 (1)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R2 (2)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R3 (3)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R4 (4)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R5 (5)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R6 (6)
-#define INT_MASK_AUTO_CARD_DETECT (1<<6)
-#define INT_MASK_DAT0_EN (1<<5)
-#define INT_MASK_SDIO (1<<4)
-#define INT_MASK_BUF_READY (1<<3)
-#define INT_MASK_END_CMD_RES (1<<2)
-#define INT_MASK_WRITE_OP_DONE (1<<1)
-#define INT_MASK_DATA_TRAN (1<<0)
-#define INT_ALL (0x7f)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index b6f38421d541..f0fcce40cd8d 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/highmem.h>
@@ -25,6 +26,7 @@
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -1207,21 +1209,76 @@ static const struct mmc_host_ops mmci_ops = {
.get_cd = mmci_get_cd,
};
+#ifdef CONFIG_OF
+static void mmci_dt_populate_generic_pdata(struct device_node *np,
+ struct mmci_platform_data *pdata)
+{
+ int bus_width = 0;
+
+ pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
+ if (!pdata->gpio_wp)
+ pdata->gpio_wp = -1;
+
+ pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
+ if (!pdata->gpio_cd)
+ pdata->gpio_cd = -1;
+
+ if (of_get_property(np, "cd-inverted", NULL))
+ pdata->cd_invert = true;
+ else
+ pdata->cd_invert = false;
+
+ of_property_read_u32(np, "max-frequency", &pdata->f_max);
+ if (!pdata->f_max)
+ pr_warn("%s has no 'max-frequency' property\n", np->full_name);
+
+ if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
+ pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED;
+ if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
+ pdata->capabilities |= MMC_CAP_SD_HIGHSPEED;
+
+ of_property_read_u32(np, "bus-width", &bus_width);
+ switch (bus_width) {
+ case 0 :
+ /* No bus-width supplied. */
+ break;
+ case 4 :
+ pdata->capabilities |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 8 :
+ pdata->capabilities |= MMC_CAP_8_BIT_DATA;
+ break;
+ default :
+ pr_warn("%s: Unsupported bus width\n", np->full_name);
+ }
+}
+#else
+static void mmci_dt_populate_generic_pdata(struct device_node *np,
+ struct mmci_platform_data *pdata)
+{
+ return;
+}
+#endif
+
static int __devinit mmci_probe(struct amba_device *dev,
const struct amba_id *id)
{
struct mmci_platform_data *plat = dev->dev.platform_data;
+ struct device_node *np = dev->dev.of_node;
struct variant_data *variant = id->data;
struct mmci_host *host;
struct mmc_host *mmc;
int ret;
- /* must have platform data */
- if (!plat) {
- ret = -EINVAL;
- goto out;
+ /* Must have platform data or Device Tree. */
+ if (!plat && !np) {
+ dev_err(&dev->dev, "No plat data or DT found\n");
+ return -EINVAL;
}
+ if (np)
+ mmci_dt_populate_generic_pdata(np, plat);
+
ret = amba_request_regions(dev, DRIVER_NAME);
if (ret)
goto out;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index eeb8cd125b0c..3b9136c1a475 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/irq.h>
+#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/mmc/host.h>
@@ -51,6 +52,7 @@ struct mvsd_host {
struct device *dev;
struct resource *res;
int irq;
+ struct clk *clk;
int gpio_card_detect;
int gpio_write_protect;
};
@@ -770,6 +772,13 @@ static int __init mvsd_probe(struct platform_device *pdev)
} else
host->irq = irq;
+ /* Not all platforms can gate the clock, so it is not
+ an error if the clock does not exists. */
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(host->clk)) {
+ clk_prepare_enable(host->clk);
+ }
+
if (mvsd_data->gpio_card_detect) {
ret = gpio_request(mvsd_data->gpio_card_detect,
DRIVER_NAME " cd");
@@ -854,6 +863,11 @@ static int __exit mvsd_remove(struct platform_device *pdev)
mvsd_power_down(host);
iounmap(host->base);
release_resource(host->res);
+
+ if (!IS_ERR(host->clk)) {
+ clk_disable_unprepare(host->clk);
+ clk_put(host->clk);
+ }
mmc_free_host(mmc);
}
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index b2058b432320..28ed52d58f7f 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -136,7 +136,8 @@ struct mxcmci_host {
u16 rev_no;
unsigned int cmdat;
- struct clk *clk;
+ struct clk *clk_ipg;
+ struct clk *clk_per;
int clock;
@@ -672,7 +673,7 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
{
unsigned int divider;
int prescaler = 0;
- unsigned int clk_in = clk_get_rate(host->clk);
+ unsigned int clk_in = clk_get_rate(host->clk_per);
while (prescaler <= 0x800) {
for (divider = 1; divider <= 0xF; divider++) {
@@ -900,12 +901,20 @@ static int mxcmci_probe(struct platform_device *pdev)
host->res = r;
host->irq = irq;
- host->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(host->clk)) {
- ret = PTR_ERR(host->clk);
+ host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(host->clk_ipg)) {
+ ret = PTR_ERR(host->clk_ipg);
goto out_iounmap;
}
- clk_enable(host->clk);
+
+ host->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(host->clk_per)) {
+ ret = PTR_ERR(host->clk_per);
+ goto out_iounmap;
+ }
+
+ clk_prepare_enable(host->clk_per);
+ clk_prepare_enable(host->clk_ipg);
mxcmci_softreset(host);
@@ -917,8 +926,8 @@ static int mxcmci_probe(struct platform_device *pdev)
goto out_clk_put;
}
- mmc->f_min = clk_get_rate(host->clk) >> 16;
- mmc->f_max = clk_get_rate(host->clk) >> 1;
+ mmc->f_min = clk_get_rate(host->clk_per) >> 16;
+ mmc->f_max = clk_get_rate(host->clk_per) >> 1;
/* recommended in data sheet */
writew(0x2db4, host->base + MMC_REG_READ_TO);
@@ -967,8 +976,8 @@ out_free_dma:
if (host->dma)
dma_release_channel(host->dma);
out_clk_put:
- clk_disable(host->clk);
- clk_put(host->clk);
+ clk_disable_unprepare(host->clk_per);
+ clk_disable_unprepare(host->clk_ipg);
out_iounmap:
iounmap(host->base);
out_free:
@@ -999,8 +1008,8 @@ static int mxcmci_remove(struct platform_device *pdev)
if (host->dma)
dma_release_channel(host->dma);
- clk_disable(host->clk);
- clk_put(host->clk);
+ clk_disable_unprepare(host->clk_per);
+ clk_disable_unprepare(host->clk_ipg);
release_mem_region(host->res->start, resource_size(host->res));
@@ -1018,7 +1027,8 @@ static int mxcmci_suspend(struct device *dev)
if (mmc)
ret = mmc_suspend_host(mmc);
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk_per);
+ clk_disable_unprepare(host->clk_ipg);
return ret;
}
@@ -1029,7 +1039,8 @@ static int mxcmci_resume(struct device *dev)
struct mxcmci_host *host = mmc_priv(mmc);
int ret = 0;
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk_per);
+ clk_prepare_enable(host->clk_ipg);
if (mmc)
ret = mmc_resume_host(mmc);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index bb03ddda481d..34a90266ab11 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -23,6 +23,9 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -40,18 +43,15 @@
#include <linux/module.h>
#include <linux/fsl/mxs-dma.h>
#include <linux/pinctrl/consumer.h>
-
-#include <mach/mxs.h>
-#include <mach/common.h>
-#include <mach/mmc.h>
+#include <linux/stmp_device.h>
+#include <linux/mmc/mxs-mmc.h>
#define DRIVER_NAME "mxs-mmc"
/* card detect polling timeout */
#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
-#define SSP_VERSION_LATEST 4
-#define ssp_is_old() (host->version < SSP_VERSION_LATEST)
+#define ssp_is_old(host) ((host)->devid == IMX23_MMC)
/* SSP registers */
#define HW_SSP_CTRL0 0x000
@@ -86,14 +86,14 @@
#define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4)
#define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0)
#define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf)
-#define HW_SSP_TIMING (ssp_is_old() ? 0x050 : 0x070)
+#define HW_SSP_TIMING(h) (ssp_is_old(h) ? 0x050 : 0x070)
#define BP_SSP_TIMING_TIMEOUT (16)
#define BM_SSP_TIMING_TIMEOUT (0xffff << 16)
#define BP_SSP_TIMING_CLOCK_DIVIDE (8)
#define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8)
#define BP_SSP_TIMING_CLOCK_RATE (0)
#define BM_SSP_TIMING_CLOCK_RATE (0xff)
-#define HW_SSP_CTRL1 (ssp_is_old() ? 0x060 : 0x080)
+#define HW_SSP_CTRL1(h) (ssp_is_old(h) ? 0x060 : 0x080)
#define BM_SSP_CTRL1_SDIO_IRQ (1 << 31)
#define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30)
#define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29)
@@ -116,15 +116,13 @@
#define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4)
#define BP_SSP_CTRL1_SSP_MODE (0)
#define BM_SSP_CTRL1_SSP_MODE (0xf)
-#define HW_SSP_SDRESP0 (ssp_is_old() ? 0x080 : 0x0a0)
-#define HW_SSP_SDRESP1 (ssp_is_old() ? 0x090 : 0x0b0)
-#define HW_SSP_SDRESP2 (ssp_is_old() ? 0x0a0 : 0x0c0)
-#define HW_SSP_SDRESP3 (ssp_is_old() ? 0x0b0 : 0x0d0)
-#define HW_SSP_STATUS (ssp_is_old() ? 0x0c0 : 0x100)
+#define HW_SSP_SDRESP0(h) (ssp_is_old(h) ? 0x080 : 0x0a0)
+#define HW_SSP_SDRESP1(h) (ssp_is_old(h) ? 0x090 : 0x0b0)
+#define HW_SSP_SDRESP2(h) (ssp_is_old(h) ? 0x0a0 : 0x0c0)
+#define HW_SSP_SDRESP3(h) (ssp_is_old(h) ? 0x0b0 : 0x0d0)
+#define HW_SSP_STATUS(h) (ssp_is_old(h) ? 0x0c0 : 0x100)
#define BM_SSP_STATUS_CARD_DETECT (1 << 28)
#define BM_SSP_STATUS_SDIO_IRQ (1 << 17)
-#define HW_SSP_VERSION (cpu_is_mx23() ? 0x110 : 0x130)
-#define BP_SSP_VERSION_MAJOR (24)
#define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field)
@@ -139,6 +137,11 @@
#define SSP_PIO_NUM 3
+enum mxs_mmc_id {
+ IMX23_MMC,
+ IMX28_MMC,
+};
+
struct mxs_mmc_host {
struct mmc_host *mmc;
struct mmc_request *mrq;
@@ -146,9 +149,7 @@ struct mxs_mmc_host {
struct mmc_data *data;
void __iomem *base;
- int irq;
- struct resource *res;
- struct resource *dma_res;
+ int dma_channel;
struct clk *clk;
unsigned int clk_rate;
@@ -158,32 +159,28 @@ struct mxs_mmc_host {
enum dma_transfer_direction slave_dirn;
u32 ssp_pio_words[SSP_PIO_NUM];
- unsigned int version;
+ enum mxs_mmc_id devid;
unsigned char bus_width;
spinlock_t lock;
int sdio_irq_en;
+ int wp_gpio;
};
static int mxs_mmc_get_ro(struct mmc_host *mmc)
{
struct mxs_mmc_host *host = mmc_priv(mmc);
- struct mxs_mmc_platform_data *pdata =
- mmc_dev(host->mmc)->platform_data;
-
- if (!pdata)
- return -EFAULT;
- if (!gpio_is_valid(pdata->wp_gpio))
+ if (!gpio_is_valid(host->wp_gpio))
return -EINVAL;
- return gpio_get_value(pdata->wp_gpio);
+ return gpio_get_value(host->wp_gpio);
}
static int mxs_mmc_get_cd(struct mmc_host *mmc)
{
struct mxs_mmc_host *host = mmc_priv(mmc);
- return !(readl(host->base + HW_SSP_STATUS) &
+ return !(readl(host->base + HW_SSP_STATUS(host)) &
BM_SSP_STATUS_CARD_DETECT);
}
@@ -191,7 +188,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
{
u32 ctrl0, ctrl1;
- mxs_reset_block(host->base);
+ stmp_reset_block(host->base);
ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
@@ -207,7 +204,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
BF_SSP(2, TIMING_CLOCK_DIVIDE) |
BF_SSP(0, TIMING_CLOCK_RATE),
- host->base + HW_SSP_TIMING);
+ host->base + HW_SSP_TIMING(host));
if (host->sdio_irq_en) {
ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
@@ -215,7 +212,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
}
writel(ctrl0, host->base + HW_SSP_CTRL0);
- writel(ctrl1, host->base + HW_SSP_CTRL1);
+ writel(ctrl1, host->base + HW_SSP_CTRL1(host));
}
static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
@@ -229,12 +226,12 @@ static void mxs_mmc_request_done(struct mxs_mmc_host *host)
if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
if (mmc_resp_type(cmd) & MMC_RSP_136) {
- cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0);
- cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1);
- cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2);
- cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3);
+ cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0(host));
+ cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1(host));
+ cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2(host));
+ cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3(host));
} else {
- cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0);
+ cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0(host));
}
}
@@ -277,9 +274,9 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
spin_lock(&host->lock);
- stat = readl(host->base + HW_SSP_CTRL1);
+ stat = readl(host->base + HW_SSP_CTRL1(host));
writel(stat & MXS_MMC_IRQ_BITS,
- host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+ host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
mmc_signal_sdio_irq(host->mmc);
@@ -485,7 +482,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
blocks = 1;
/* xfer count, block size and count need to be set differently */
- if (ssp_is_old()) {
+ if (ssp_is_old(host)) {
ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
@@ -509,10 +506,10 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
/* set the timeout count */
timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns);
- val = readl(host->base + HW_SSP_TIMING);
+ val = readl(host->base + HW_SSP_TIMING(host));
val &= ~(BM_SSP_TIMING_TIMEOUT);
val |= BF_SSP(timeout, TIMING_TIMEOUT);
- writel(val, host->base + HW_SSP_TIMING);
+ writel(val, host->base + HW_SSP_TIMING(host));
/* pio */
host->ssp_pio_words[0] = ctrl0;
@@ -598,11 +595,11 @@ static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
ssp_sck = ssp_clk / clock_divide / (1 + clock_rate);
- val = readl(host->base + HW_SSP_TIMING);
+ val = readl(host->base + HW_SSP_TIMING(host));
val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE);
val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE);
- writel(val, host->base + HW_SSP_TIMING);
+ writel(val, host->base + HW_SSP_TIMING(host));
host->clk_rate = ssp_sck;
@@ -637,18 +634,19 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
if (enable) {
writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
- host->base + HW_SSP_CTRL0 + MXS_SET_ADDR);
+ host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
- host->base + HW_SSP_CTRL1 + MXS_SET_ADDR);
+ host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET);
- if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
+ if (readl(host->base + HW_SSP_STATUS(host)) &
+ BM_SSP_STATUS_SDIO_IRQ)
mmc_signal_sdio_irq(host->mmc);
} else {
writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
- host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR);
+ host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
- host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+ host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
}
spin_unlock_irqrestore(&host->lock, flags);
@@ -669,7 +667,7 @@ static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
if (!mxs_dma_is_apbh(chan))
return false;
- if (chan->chan_id != host->dma_res->start)
+ if (chan->chan_id != host->dma_channel)
return false;
chan->private = &host->dma_data;
@@ -677,11 +675,34 @@ static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
return true;
}
+static struct platform_device_id mxs_mmc_ids[] = {
+ {
+ .name = "imx23-mmc",
+ .driver_data = IMX23_MMC,
+ }, {
+ .name = "imx28-mmc",
+ .driver_data = IMX28_MMC,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mxs_mmc_ids);
+
+static const struct of_device_id mxs_mmc_dt_ids[] = {
+ { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_MMC, },
+ { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_MMC, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
+
static int mxs_mmc_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(mxs_mmc_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
struct mxs_mmc_host *host;
struct mmc_host *mmc;
- struct resource *iores, *dmares, *r;
+ struct resource *iores, *dmares;
struct mxs_mmc_platform_data *pdata;
struct pinctrl *pinctrl;
int ret = 0, irq_err, irq_dma;
@@ -691,46 +712,51 @@ static int mxs_mmc_probe(struct platform_device *pdev)
dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
irq_err = platform_get_irq(pdev, 0);
irq_dma = platform_get_irq(pdev, 1);
- if (!iores || !dmares || irq_err < 0 || irq_dma < 0)
+ if (!iores || irq_err < 0 || irq_dma < 0)
return -EINVAL;
- r = request_mem_region(iores->start, resource_size(iores), pdev->name);
- if (!r)
- return -EBUSY;
-
mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
- if (!mmc) {
- ret = -ENOMEM;
- goto out_release_mem;
- }
+ if (!mmc)
+ return -ENOMEM;
host = mmc_priv(mmc);
- host->base = ioremap(r->start, resource_size(r));
+ host->base = devm_request_and_ioremap(&pdev->dev, iores);
if (!host->base) {
- ret = -ENOMEM;
+ ret = -EADDRNOTAVAIL;
goto out_mmc_free;
}
- /* only major verion does matter */
- host->version = readl(host->base + HW_SSP_VERSION) >>
- BP_SSP_VERSION_MAJOR;
+ if (np) {
+ host->devid = (enum mxs_mmc_id) of_id->data;
+ /*
+ * TODO: This is a temporary solution and should be changed
+ * to use generic DMA binding later when the helpers get in.
+ */
+ ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
+ &host->dma_channel);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to get dma channel\n");
+ goto out_mmc_free;
+ }
+ } else {
+ host->devid = pdev->id_entry->driver_data;
+ host->dma_channel = dmares->start;
+ }
host->mmc = mmc;
- host->res = r;
- host->dma_res = dmares;
- host->irq = irq_err;
host->sdio_irq_en = 0;
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl)) {
ret = PTR_ERR(pinctrl);
- goto out_iounmap;
+ goto out_mmc_free;
}
host->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
- goto out_iounmap;
+ goto out_mmc_free;
}
clk_prepare_enable(host->clk);
@@ -752,11 +778,20 @@ static int mxs_mmc_probe(struct platform_device *pdev)
MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
pdata = mmc_dev(host->mmc)->platform_data;
- if (pdata) {
+ if (!pdata) {
+ u32 bus_width = 0;
+ of_property_read_u32(np, "bus-width", &bus_width);
+ if (bus_width == 4)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+ else if (bus_width == 8)
+ mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
+ host->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
+ } else {
if (pdata->flags & SLOTF_8_BIT_CAPABLE)
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
if (pdata->flags & SLOTF_4_BIT_CAPABLE)
mmc->caps |= MMC_CAP_4_BIT_DATA;
+ host->wp_gpio = pdata->wp_gpio;
}
mmc->f_min = 400000;
@@ -765,13 +800,14 @@ static int mxs_mmc_probe(struct platform_device *pdev)
mmc->max_segs = 52;
mmc->max_blk_size = 1 << 0xf;
- mmc->max_blk_count = (ssp_is_old()) ? 0xff : 0xffffff;
- mmc->max_req_size = (ssp_is_old()) ? 0xffff : 0xffffffff;
+ mmc->max_blk_count = (ssp_is_old(host)) ? 0xff : 0xffffff;
+ mmc->max_req_size = (ssp_is_old(host)) ? 0xffff : 0xffffffff;
mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev);
platform_set_drvdata(pdev, mmc);
- ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host);
+ ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
+ DRIVER_NAME, host);
if (ret)
goto out_free_dma;
@@ -779,26 +815,20 @@ static int mxs_mmc_probe(struct platform_device *pdev)
ret = mmc_add_host(mmc);
if (ret)
- goto out_free_irq;
+ goto out_free_dma;
dev_info(mmc_dev(host->mmc), "initialized\n");
return 0;
-out_free_irq:
- free_irq(host->irq, host);
out_free_dma:
if (host->dmach)
dma_release_channel(host->dmach);
out_clk_put:
clk_disable_unprepare(host->clk);
clk_put(host->clk);
-out_iounmap:
- iounmap(host->base);
out_mmc_free:
mmc_free_host(mmc);
-out_release_mem:
- release_mem_region(iores->start, resource_size(iores));
return ret;
}
@@ -806,12 +836,9 @@ static int mxs_mmc_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct mxs_mmc_host *host = mmc_priv(mmc);
- struct resource *res = host->res;
mmc_remove_host(mmc);
- free_irq(host->irq, host);
-
platform_set_drvdata(pdev, NULL);
if (host->dmach)
@@ -820,12 +847,8 @@ static int mxs_mmc_remove(struct platform_device *pdev)
clk_disable_unprepare(host->clk);
clk_put(host->clk);
- iounmap(host->base);
-
mmc_free_host(mmc);
- release_mem_region(res->start, resource_size(res));
-
return 0;
}
@@ -865,11 +888,13 @@ static const struct dev_pm_ops mxs_mmc_pm_ops = {
static struct platform_driver mxs_mmc_driver = {
.probe = mxs_mmc_probe,
.remove = mxs_mmc_remove,
+ .id_table = mxs_mmc_ids,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &mxs_mmc_pm_ops,
+ .of_match_table = mxs_mmc_dt_ids,
#endif
},
};
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 887c0e598cf3..552196c764d4 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -169,11 +169,11 @@ struct mmc_omap_host {
struct timer_list clk_timer;
spinlock_t clk_lock; /* for changing enabled state */
unsigned int fclk_enabled:1;
+ struct workqueue_struct *mmc_omap_wq;
struct omap_mmc_platform_data *pdata;
};
-static struct workqueue_struct *mmc_omap_wq;
static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
{
@@ -291,7 +291,7 @@ static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
host->next_slot = new_slot;
host->mmc = new_slot->mmc;
spin_unlock_irqrestore(&host->slot_lock, flags);
- queue_work(mmc_omap_wq, &host->slot_release_work);
+ queue_work(host->mmc_omap_wq, &host->slot_release_work);
return;
}
@@ -459,7 +459,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
}
host->stop_data = data;
- queue_work(mmc_omap_wq, &host->send_stop_work);
+ queue_work(host->mmc_omap_wq, &host->send_stop_work);
}
static void
@@ -639,7 +639,7 @@ mmc_omap_cmd_timer(unsigned long data)
OMAP_MMC_WRITE(host, IE, 0);
disable_irq(host->irq);
host->abort = 1;
- queue_work(mmc_omap_wq, &host->cmd_abort_work);
+ queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
}
spin_unlock_irqrestore(&host->slot_lock, flags);
}
@@ -828,7 +828,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
host->abort = 1;
OMAP_MMC_WRITE(host, IE, 0);
disable_irq_nosync(host->irq);
- queue_work(mmc_omap_wq, &host->cmd_abort_work);
+ queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
return IRQ_HANDLED;
}
@@ -1389,13 +1389,13 @@ static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
tasklet_kill(&slot->cover_tasklet);
del_timer_sync(&slot->cover_timer);
- flush_workqueue(mmc_omap_wq);
+ flush_workqueue(slot->host->mmc_omap_wq);
mmc_remove_host(mmc);
mmc_free_host(mmc);
}
-static int __init mmc_omap_probe(struct platform_device *pdev)
+static int __devinit mmc_omap_probe(struct platform_device *pdev)
{
struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
struct mmc_omap_host *host = NULL;
@@ -1497,6 +1497,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
+ host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
+ if (!host->mmc_omap_wq)
+ goto err_plat_cleanup;
+
return 0;
err_plat_cleanup:
@@ -1518,7 +1522,7 @@ err_free_mem_region:
return ret;
}
-static int mmc_omap_remove(struct platform_device *pdev)
+static int __devexit mmc_omap_remove(struct platform_device *pdev)
{
struct mmc_omap_host *host = platform_get_drvdata(pdev);
int i;
@@ -1542,6 +1546,7 @@ static int mmc_omap_remove(struct platform_device *pdev)
iounmap(host->virt_base);
release_mem_region(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1);
+ destroy_workqueue(host->mmc_omap_wq);
kfree(host);
@@ -1599,7 +1604,8 @@ static int mmc_omap_resume(struct platform_device *pdev)
#endif
static struct platform_driver mmc_omap_driver = {
- .remove = mmc_omap_remove,
+ .probe = mmc_omap_probe,
+ .remove = __devexit_p(mmc_omap_remove),
.suspend = mmc_omap_suspend,
.resume = mmc_omap_resume,
.driver = {
@@ -1608,29 +1614,7 @@ static struct platform_driver mmc_omap_driver = {
},
};
-static int __init mmc_omap_init(void)
-{
- int ret;
-
- mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
- if (!mmc_omap_wq)
- return -ENOMEM;
-
- ret = platform_driver_probe(&mmc_omap_driver, mmc_omap_probe);
- if (ret)
- destroy_workqueue(mmc_omap_wq);
- return ret;
-}
-
-static void __exit mmc_omap_exit(void)
-{
- platform_driver_unregister(&mmc_omap_driver);
- destroy_workqueue(mmc_omap_wq);
-}
-
-module_init(mmc_omap_init);
-module_exit(mmc_omap_exit);
-
+module_platform_driver(mmc_omap_driver);
MODULE_DESCRIPTION("OMAP Multimedia Card driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 56d4499d4388..9a7a60aeb19e 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -85,12 +85,14 @@
#define BRR_ENABLE (1 << 5)
#define DTO_ENABLE (1 << 20)
#define INIT_STREAM (1 << 1)
+#define ACEN_ACMD12 (1 << 2)
#define DP_SELECT (1 << 21)
#define DDIR (1 << 4)
#define DMA_EN 0x1
#define MSBS (1 << 5)
#define BCE (1 << 1)
#define FOUR_BIT (1 << 1)
+#define DDR (1 << 19)
#define DW8 (1 << 5)
#define CC 0x1
#define TC 0x02
@@ -115,6 +117,7 @@
#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
+#define AUTO_CMD12 (1 << 0) /* Auto CMD12 support */
/*
* One controller can have multiple slots, like on some omap boards using
* omap.c controller driver. Luckily this is not currently done on any known
@@ -167,7 +170,6 @@ struct omap_hsmmc_host {
int use_dma, dma_ch;
int dma_line_tx, dma_line_rx;
int slot_id;
- int got_dbclk;
int response_busy;
int context_loss;
int vdd;
@@ -175,6 +177,7 @@ struct omap_hsmmc_host {
int reqs_blocked;
int use_reg;
int req_in_progress;
+ unsigned int flags;
struct omap_hsmmc_next next_data;
struct omap_mmc_platform_data *pdata;
@@ -520,6 +523,10 @@ static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
u32 con;
con = OMAP_HSMMC_READ(host->base, CON);
+ if (ios->timing == MMC_TIMING_UHS_DDR50)
+ con |= DDR; /* configure in DDR mode */
+ else
+ con &= ~DDR;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_8:
OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
@@ -766,6 +773,8 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
cmdtype = 0x3;
cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);
+ if ((host->flags & AUTO_CMD12) && mmc_op_multi(cmd->opcode))
+ cmdreg |= ACEN_ACMD12;
if (data) {
cmdreg |= DP_SELECT | MSBS | BCE;
@@ -796,11 +805,12 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
{
int dma_ch;
+ unsigned long flags;
- spin_lock(&host->irq_lock);
+ spin_lock_irqsave(&host->irq_lock, flags);
host->req_in_progress = 0;
dma_ch = host->dma_ch;
- spin_unlock(&host->irq_lock);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
omap_hsmmc_disable_irq(host);
/* Do not complete the request if DMA is still in progress */
@@ -837,11 +847,14 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
else
data->bytes_xfered = 0;
- if (!data->stop) {
+ if (data->stop && ((!(host->flags & AUTO_CMD12)) || data->error)) {
+ omap_hsmmc_start_command(host, data->stop, NULL);
+ } else {
+ if (data->stop)
+ data->stop->resp[0] = OMAP_HSMMC_READ(host->base,
+ RSP76);
omap_hsmmc_request_done(host, data->mrq);
- return;
}
- omap_hsmmc_start_command(host, data->stop, NULL);
}
/*
@@ -874,13 +887,14 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
{
int dma_ch;
+ unsigned long flags;
host->data->error = errno;
- spin_lock(&host->irq_lock);
+ spin_lock_irqsave(&host->irq_lock, flags);
dma_ch = host->dma_ch;
host->dma_ch = -1;
- spin_unlock(&host->irq_lock);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
if (host->use_dma && dma_ch != -1) {
dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
@@ -1082,7 +1096,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
/* Disable the clocks */
pm_runtime_put_sync(host->dev);
- if (host->got_dbclk)
+ if (host->dbclk)
clk_disable(host->dbclk);
/* Turn the power off */
@@ -1093,7 +1107,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1,
vdd);
pm_runtime_get_sync(host->dev);
- if (host->got_dbclk)
+ if (host->dbclk)
clk_enable(host->dbclk);
if (ret != 0)
@@ -1234,6 +1248,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
struct omap_hsmmc_host *host = cb_data;
struct mmc_data *data;
int dma_ch, req_in_progress;
+ unsigned long flags;
if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
@@ -1241,9 +1256,9 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
return;
}
- spin_lock(&host->irq_lock);
+ spin_lock_irqsave(&host->irq_lock, flags);
if (host->dma_ch < 0) {
- spin_unlock(&host->irq_lock);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
return;
}
@@ -1253,7 +1268,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
/* Fire up the next transfer. */
omap_hsmmc_config_dma_params(host, data,
data->sg + host->dma_sg_idx);
- spin_unlock(&host->irq_lock);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
return;
}
@@ -1264,7 +1279,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
req_in_progress = host->req_in_progress;
dma_ch = host->dma_ch;
host->dma_ch = -1;
- spin_unlock(&host->irq_lock);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
omap_free_dma(dma_ch);
@@ -1766,7 +1781,7 @@ static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
pdata->slots[0].nonremovable = true;
pdata->slots[0].no_regulator_off_init = true;
}
- of_property_read_u32(np, "ti,bus-width", &bus_width);
+ of_property_read_u32(np, "bus-width", &bus_width);
if (bus_width == 4)
pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA;
else if (bus_width == 8)
@@ -1844,6 +1859,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
host->mapbase = res->start + pdata->reg_offset;
host->base = ioremap(host->mapbase, SZ_4K);
host->power_mode = MMC_POWER_OFF;
+ host->flags = AUTO_CMD12;
host->next_data.cookie = 1;
platform_set_drvdata(pdev, host);
@@ -1885,21 +1901,17 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
omap_hsmmc_context_save(host);
- if (cpu_is_omap2430()) {
- host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
- /*
- * MMC can still work without debounce clock.
- */
- if (IS_ERR(host->dbclk))
- dev_warn(mmc_dev(host->mmc),
- "Failed to get debounce clock\n");
- else
- host->got_dbclk = 1;
-
- if (host->got_dbclk)
- if (clk_enable(host->dbclk) != 0)
- dev_dbg(mmc_dev(host->mmc), "Enabling debounce"
- " clk failed\n");
+ host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
+ /*
+ * MMC can still work without debounce clock.
+ */
+ if (IS_ERR(host->dbclk)) {
+ dev_warn(mmc_dev(host->mmc), "Failed to get debounce clk\n");
+ host->dbclk = NULL;
+ } else if (clk_enable(host->dbclk) != 0) {
+ dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
+ clk_put(host->dbclk);
+ host->dbclk = NULL;
}
/* Since we do only SG emulation, we can have as many segs
@@ -1969,7 +1981,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
ret = request_threaded_irq(mmc_slot(host).card_detect_irq,
NULL,
omap_hsmmc_detect,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
mmc_hostname(mmc), host);
if (ret) {
dev_dbg(mmc_dev(host->mmc),
@@ -2019,7 +2031,7 @@ err_irq:
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
clk_put(host->fclk);
- if (host->got_dbclk) {
+ if (host->dbclk) {
clk_disable(host->dbclk);
clk_put(host->dbclk);
}
@@ -2030,7 +2042,9 @@ err1:
err_alloc:
omap_hsmmc_gpio_free(pdata);
err:
- release_mem_region(res->start, resource_size(res));
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
return ret;
}
@@ -2052,7 +2066,7 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
clk_put(host->fclk);
- if (host->got_dbclk) {
+ if (host->dbclk) {
clk_disable(host->dbclk);
clk_put(host->dbclk);
}
@@ -2110,7 +2124,7 @@ static int omap_hsmmc_suspend(struct device *dev)
OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
}
- if (host->got_dbclk)
+ if (host->dbclk)
clk_disable(host->dbclk);
err:
pm_runtime_put_sync(host->dev);
@@ -2131,7 +2145,7 @@ static int omap_hsmmc_resume(struct device *dev)
pm_runtime_get_sync(host->dev);
- if (host->got_dbclk)
+ if (host->dbclk)
clk_enable(host->dbclk);
if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d190d04636a7..ebbe984e5d00 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -71,6 +71,9 @@ struct pltfm_imx_data {
enum imx_esdhc_type devtype;
struct pinctrl *pinctrl;
struct esdhc_platform_data boarddata;
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+ struct clk *clk_per;
};
static struct platform_device_id imx_esdhc_devtype[] = {
@@ -404,7 +407,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
if (!np)
return -ENODEV;
- if (of_get_property(np, "fsl,card-wired", NULL))
+ if (of_get_property(np, "non-removable", NULL))
boarddata->cd_type = ESDHC_CD_PERMANENT;
if (of_get_property(np, "fsl,cd-controller", NULL))
@@ -439,7 +442,6 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
struct esdhc_platform_data *boarddata;
- struct clk *clk;
int err;
struct pltfm_imx_data *imx_data;
@@ -460,14 +462,29 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
imx_data->devtype = pdev->id_entry->driver_data;
pltfm_host->priv = imx_data;
- clk = clk_get(mmc_dev(host->mmc), NULL);
- if (IS_ERR(clk)) {
- dev_err(mmc_dev(host->mmc), "clk err\n");
- err = PTR_ERR(clk);
+ imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(imx_data->clk_ipg)) {
+ err = PTR_ERR(imx_data->clk_ipg);
goto err_clk_get;
}
- clk_prepare_enable(clk);
- pltfm_host->clk = clk;
+
+ imx_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(imx_data->clk_ahb)) {
+ err = PTR_ERR(imx_data->clk_ahb);
+ goto err_clk_get;
+ }
+
+ imx_data->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(imx_data->clk_per)) {
+ err = PTR_ERR(imx_data->clk_per);
+ goto err_clk_get;
+ }
+
+ pltfm_host->clk = imx_data->clk_per;
+
+ clk_prepare_enable(imx_data->clk_per);
+ clk_prepare_enable(imx_data->clk_ipg);
+ clk_prepare_enable(imx_data->clk_ahb);
imx_data->pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(imx_data->pinctrl)) {
@@ -567,8 +584,9 @@ no_card_detect_irq:
no_card_detect_pin:
no_board_data:
pin_err:
- clk_disable_unprepare(pltfm_host->clk);
- clk_put(pltfm_host->clk);
+ clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_ipg);
+ clk_disable_unprepare(imx_data->clk_ahb);
err_clk_get:
kfree(imx_data);
err_imx_data:
@@ -594,8 +612,10 @@ static int __devexit sdhci_esdhc_imx_remove(struct platform_device *pdev)
gpio_free(boarddata->cd_gpio);
}
- clk_disable_unprepare(pltfm_host->clk);
- clk_put(pltfm_host->clk);
+ clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_ipg);
+ clk_disable_unprepare(imx_data->clk_ahb);
+
kfree(imx_data);
sdhci_pltfm_free(pdev);
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index c5c2a48bdd94..d9a4ef4f1ed0 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -42,7 +42,8 @@ static struct sdhci_ops sdhci_pltfm_ops = {
#ifdef CONFIG_OF
static bool sdhci_of_wp_inverted(struct device_node *np)
{
- if (of_get_property(np, "sdhci,wp-inverted", NULL))
+ if (of_get_property(np, "sdhci,wp-inverted", NULL) ||
+ of_get_property(np, "wp-inverted", NULL))
return true;
/* Old device trees don't have the wp-inverted property. */
@@ -59,13 +60,16 @@ void sdhci_get_of_property(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
const __be32 *clk;
+ u32 bus_width;
int size;
if (of_device_is_available(np)) {
if (of_get_property(np, "sdhci,auto-cmd12", NULL))
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
- if (of_get_property(np, "sdhci,1-bit-only", NULL))
+ if (of_get_property(np, "sdhci,1-bit-only", NULL) ||
+ (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
+ bus_width == 1))
host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
if (sdhci_of_wp_inverted(np))
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 6dfa82e03c7e..1fe32dfa7cd4 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -75,8 +75,6 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
struct spear_sdhci *sdhci;
int ret;
- BUG_ON(pdev == NULL);
-
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem) {
ret = -ENOMEM;
@@ -84,18 +82,18 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
goto err;
}
- if (!request_mem_region(iomem->start, resource_size(iomem),
- "spear-sdhci")) {
+ if (!devm_request_mem_region(&pdev->dev, iomem->start,
+ resource_size(iomem), "spear-sdhci")) {
ret = -EBUSY;
dev_dbg(&pdev->dev, "cannot request region\n");
goto err;
}
- sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL);
+ sdhci = devm_kzalloc(&pdev->dev, sizeof(*sdhci), GFP_KERNEL);
if (!sdhci) {
ret = -ENOMEM;
dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
- goto err_kzalloc;
+ goto err;
}
/* clk enable */
@@ -103,13 +101,13 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
if (IS_ERR(sdhci->clk)) {
ret = PTR_ERR(sdhci->clk);
dev_dbg(&pdev->dev, "Error getting clock\n");
- goto err_clk_get;
+ goto err;
}
ret = clk_enable(sdhci->clk);
if (ret) {
dev_dbg(&pdev->dev, "Error enabling clock\n");
- goto err_clk_enb;
+ goto put_clk;
}
/* overwrite platform_data */
@@ -124,7 +122,7 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
if (IS_ERR(host)) {
ret = PTR_ERR(host);
dev_dbg(&pdev->dev, "error allocating host\n");
- goto err_alloc_host;
+ goto disable_clk;
}
host->hw_name = "sdhci";
@@ -132,17 +130,18 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
host->irq = platform_get_irq(pdev, 0);
host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
- host->ioaddr = ioremap(iomem->start, resource_size(iomem));
+ host->ioaddr = devm_ioremap(&pdev->dev, iomem->start,
+ resource_size(iomem));
if (!host->ioaddr) {
ret = -ENOMEM;
dev_dbg(&pdev->dev, "failed to remap registers\n");
- goto err_ioremap;
+ goto free_host;
}
ret = sdhci_add_host(host);
if (ret) {
dev_dbg(&pdev->dev, "error adding host\n");
- goto err_add_host;
+ goto free_host;
}
platform_set_drvdata(pdev, host);
@@ -161,11 +160,12 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
if (sdhci->data->card_power_gpio >= 0) {
int val = 0;
- ret = gpio_request(sdhci->data->card_power_gpio, "sdhci");
+ ret = devm_gpio_request(&pdev->dev,
+ sdhci->data->card_power_gpio, "sdhci");
if (ret < 0) {
dev_dbg(&pdev->dev, "gpio request fail: %d\n",
sdhci->data->card_power_gpio);
- goto err_pgpio_request;
+ goto set_drvdata;
}
if (sdhci->data->power_always_enb)
@@ -177,60 +177,48 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
if (ret) {
dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
sdhci->data->card_power_gpio);
- goto err_pgpio_direction;
+ goto set_drvdata;
}
}
if (sdhci->data->card_int_gpio >= 0) {
- ret = gpio_request(sdhci->data->card_int_gpio, "sdhci");
+ ret = devm_gpio_request(&pdev->dev, sdhci->data->card_int_gpio,
+ "sdhci");
if (ret < 0) {
dev_dbg(&pdev->dev, "gpio request fail: %d\n",
sdhci->data->card_int_gpio);
- goto err_igpio_request;
+ goto set_drvdata;
}
ret = gpio_direction_input(sdhci->data->card_int_gpio);
if (ret) {
dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
sdhci->data->card_int_gpio);
- goto err_igpio_direction;
+ goto set_drvdata;
}
- ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio),
+ ret = devm_request_irq(&pdev->dev,
+ gpio_to_irq(sdhci->data->card_int_gpio),
sdhci_gpio_irq, IRQF_TRIGGER_LOW,
mmc_hostname(host->mmc), pdev);
if (ret) {
dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
sdhci->data->card_int_gpio);
- goto err_igpio_request_irq;
+ goto set_drvdata;
}
}
return 0;
-err_igpio_request_irq:
-err_igpio_direction:
- if (sdhci->data->card_int_gpio >= 0)
- gpio_free(sdhci->data->card_int_gpio);
-err_igpio_request:
-err_pgpio_direction:
- if (sdhci->data->card_power_gpio >= 0)
- gpio_free(sdhci->data->card_power_gpio);
-err_pgpio_request:
+set_drvdata:
platform_set_drvdata(pdev, NULL);
sdhci_remove_host(host, 1);
-err_add_host:
- iounmap(host->ioaddr);
-err_ioremap:
+free_host:
sdhci_free_host(host);
-err_alloc_host:
+disable_clk:
clk_disable(sdhci->clk);
-err_clk_enb:
+put_clk:
clk_put(sdhci->clk);
-err_clk_get:
- kfree(sdhci);
-err_kzalloc:
- release_mem_region(iomem->start, resource_size(iomem));
err:
dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
return ret;
@@ -239,35 +227,19 @@ err:
static int __devexit sdhci_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
- struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
- int dead;
+ int dead = 0;
u32 scratch;
- if (sdhci->data) {
- if (sdhci->data->card_int_gpio >= 0) {
- free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev);
- gpio_free(sdhci->data->card_int_gpio);
- }
-
- if (sdhci->data->card_power_gpio >= 0)
- gpio_free(sdhci->data->card_power_gpio);
- }
-
platform_set_drvdata(pdev, NULL);
- dead = 0;
scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
if (scratch == (u32)-1)
dead = 1;
sdhci_remove_host(host, dead);
- iounmap(host->ioaddr);
sdhci_free_host(host);
clk_disable(sdhci->clk);
clk_put(sdhci->clk);
- kfree(sdhci);
- if (iomem)
- release_mem_region(iomem->start, resource_size(iomem));
return 0;
}
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index ff5a16991939..b38d8a78f6a0 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -32,8 +32,13 @@
#include "sdhci-pltfm.h"
+/* Tegra SDHOST controller vendor register definitions */
+#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
+#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
+
#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
+#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
struct sdhci_tegra_soc_data {
struct sdhci_pltfm_data *pdata;
@@ -120,6 +125,25 @@ static irqreturn_t carddetect_irq(int irq, void *data)
return IRQ_HANDLED;
};
+static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+
+ if (!(mask & SDHCI_RESET_ALL))
+ return;
+
+ /* Erratum: Enable SDHCI spec v3.00 support */
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) {
+ u32 misc_ctrl;
+
+ misc_ctrl = sdhci_readb(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+ misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
+ sdhci_writeb(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+ }
+}
+
static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -148,6 +172,7 @@ static struct sdhci_ops tegra_sdhci_ops = {
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
.platform_8bit_width = tegra_sdhci_8bit,
+ .platform_reset_exit = tegra_sdhci_reset_exit,
};
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
@@ -178,6 +203,7 @@ static struct sdhci_pltfm_data sdhci_tegra30_pdata = {
static struct sdhci_tegra_soc_data soc_data_tegra30 = {
.pdata = &sdhci_tegra30_pdata,
+ .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300,
};
#endif
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index ccefdebeff14..e626732aff77 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -680,8 +680,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
}
if (count >= 0xF) {
- pr_warning("%s: Too large timeout requested for CMD%d!\n",
- mmc_hostname(host->mmc), cmd->opcode);
+ pr_warning("%s: Too large timeout 0x%x requested for CMD%d!\n",
+ mmc_hostname(host->mmc), count, cmd->opcode);
count = 0xE;
}
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index b5401e355745..c03456f17004 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -19,9 +19,9 @@
#include <linux/mtd/cfi.h>
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
+#include <linux/of.h>
#include <lantiq_soc.h>
-#include <lantiq_platform.h>
/*
* The NOR flash is connected to the same external bus unit (EBU) as PCI.
@@ -44,8 +44,9 @@ struct ltq_mtd {
struct map_info *map;
};
-static char ltq_map_name[] = "ltq_nor";
-static const char *ltq_probe_types[] __devinitconst = { "cmdlinepart", NULL };
+static const char ltq_map_name[] = "ltq_nor";
+static const char *ltq_probe_types[] __devinitconst = {
+ "cmdlinepart", "ofpart", NULL };
static map_word
ltq_read16(struct map_info *map, unsigned long adr)
@@ -108,42 +109,38 @@ ltq_copy_to(struct map_info *map, unsigned long to,
spin_unlock_irqrestore(&ebu_lock, flags);
}
-static int __init
+static int __devinit
ltq_mtd_probe(struct platform_device *pdev)
{
- struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev);
+ struct mtd_part_parser_data ppdata;
struct ltq_mtd *ltq_mtd;
- struct resource *res;
struct cfi_private *cfi;
int err;
+ if (of_machine_is_compatible("lantiq,falcon") &&
+ (ltq_boot_select() != BS_FLASH)) {
+ dev_err(&pdev->dev, "invalid bootstrap options\n");
+ return -ENODEV;
+ }
+
ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL);
platform_set_drvdata(pdev, ltq_mtd);
ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!ltq_mtd->res) {
- dev_err(&pdev->dev, "failed to get memory resource");
+ dev_err(&pdev->dev, "failed to get memory resource\n");
err = -ENOENT;
goto err_out;
}
- res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start,
- resource_size(ltq_mtd->res), dev_name(&pdev->dev));
- if (!ltq_mtd->res) {
- dev_err(&pdev->dev, "failed to request mem resource");
- err = -EBUSY;
- goto err_out;
- }
-
ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
- ltq_mtd->map->phys = res->start;
- ltq_mtd->map->size = resource_size(res);
- ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev,
- ltq_mtd->map->phys, ltq_mtd->map->size);
+ ltq_mtd->map->phys = ltq_mtd->res->start;
+ ltq_mtd->map->size = resource_size(ltq_mtd->res);
+ ltq_mtd->map->virt = devm_request_and_ioremap(&pdev->dev, ltq_mtd->res);
if (!ltq_mtd->map->virt) {
- dev_err(&pdev->dev, "failed to ioremap!\n");
- err = -ENOMEM;
- goto err_free;
+ dev_err(&pdev->dev, "failed to remap mem resource\n");
+ err = -EBUSY;
+ goto err_out;
}
ltq_mtd->map->name = ltq_map_name;
@@ -169,9 +166,9 @@ ltq_mtd_probe(struct platform_device *pdev)
cfi->addr_unlock1 ^= 1;
cfi->addr_unlock2 ^= 1;
- err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types, NULL,
- ltq_mtd_data->parts,
- ltq_mtd_data->nr_parts);
+ ppdata.of_node = pdev->dev.of_node;
+ err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types,
+ &ppdata, NULL, 0);
if (err) {
dev_err(&pdev->dev, "failed to add partitions\n");
goto err_destroy;
@@ -204,32 +201,23 @@ ltq_mtd_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ltq_mtd_match[] = {
+ { .compatible = "lantiq,nor" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_mtd_match);
+
static struct platform_driver ltq_mtd_driver = {
+ .probe = ltq_mtd_probe,
.remove = __devexit_p(ltq_mtd_remove),
.driver = {
- .name = "ltq_nor",
+ .name = "ltq-nor",
.owner = THIS_MODULE,
+ .of_match_table = ltq_mtd_match,
},
};
-static int __init
-init_ltq_mtd(void)
-{
- int ret = platform_driver_probe(&ltq_mtd_driver, ltq_mtd_probe);
-
- if (ret)
- pr_err("ltq_nor: error registering platform driver");
- return ret;
-}
-
-static void __exit
-exit_ltq_mtd(void)
-{
- platform_driver_unregister(&ltq_mtd_driver);
-}
-
-module_init(init_ltq_mtd);
-module_exit(exit_ltq_mtd);
+module_platform_driver(ltq_mtd_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index cc0678a967c1..9e374e9bd296 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -690,7 +690,7 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
if (chip == -1) {
/* Disable the NFC clock */
if (host->clk_act) {
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
host->clk_act = 0;
}
return;
@@ -698,7 +698,7 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
if (!host->clk_act) {
/* Enable the NFC clock */
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
host->clk_act = 1;
}
@@ -1078,7 +1078,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
goto eclk;
}
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
host->clk_act = 1;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 0f50ef38b87b..513dc88a05ca 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -17,6 +17,8 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
#include <asm/io.h>
#include <asm/sizes.h>
#include <mach/hardware.h>
@@ -79,6 +81,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
struct nand_chip *nc;
struct orion_nand_data *board;
struct resource *res;
+ struct clk *clk;
void __iomem *io_base;
int ret = 0;
u32 val = 0;
@@ -155,6 +158,14 @@ static int __init orion_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mtd);
+ /* Not all platforms can gate the clock, so it is not
+ an error if the clock does not exists. */
+ clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_prepare_enable(clk);
+ clk_put(clk);
+ }
+
if (nand_scan(mtd, 1)) {
ret = -ENXIO;
goto no_dev;
@@ -184,6 +195,7 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct nand_chip *nc = mtd->priv;
+ struct clk *clk;
nand_release(mtd);
@@ -191,6 +203,12 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
kfree(nc);
+ clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ }
+
return 0;
}
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index ec03b401620a..9c755db6b16d 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1131,7 +1131,6 @@ static irqreturn_t
e100rxtx_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
- struct net_local *np = netdev_priv(dev);
unsigned long irqbits;
/*
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 8f2cf8c09e2d..ff7f4c5115a1 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -207,7 +207,8 @@ struct fec_enet_private {
struct net_device *netdev;
- struct clk *clk;
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
unsigned char *tx_bounce[TX_RING_SIZE];
@@ -1065,7 +1066,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* Reference Manual has an error on this, and gets fixed on i.MX6Q
* document.
*/
- fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000);
+ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
fep->phy_speed--;
fep->phy_speed <<= 1;
@@ -1618,12 +1619,20 @@ fec_probe(struct platform_device *pdev)
goto failed_pin;
}
- fep->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(fep->clk)) {
- ret = PTR_ERR(fep->clk);
+ fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(fep->clk_ipg)) {
+ ret = PTR_ERR(fep->clk_ipg);
goto failed_clk;
}
- clk_prepare_enable(fep->clk);
+
+ fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(fep->clk_ahb)) {
+ ret = PTR_ERR(fep->clk_ahb);
+ goto failed_clk;
+ }
+
+ clk_prepare_enable(fep->clk_ahb);
+ clk_prepare_enable(fep->clk_ipg);
ret = fec_enet_init(ndev);
if (ret)
@@ -1646,8 +1655,8 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_init:
- clk_disable_unprepare(fep->clk);
- clk_put(fep->clk);
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
failed_pin:
failed_clk:
for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1680,8 +1689,8 @@ fec_drv_remove(struct platform_device *pdev)
if (irq > 0)
free_irq(irq, ndev);
}
- clk_disable_unprepare(fep->clk);
- clk_put(fep->clk);
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
iounmap(fep->hwp);
free_netdev(ndev);
@@ -1705,7 +1714,8 @@ fec_suspend(struct device *dev)
fec_stop(ndev);
netif_device_detach(ndev);
}
- clk_disable_unprepare(fep->clk);
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
return 0;
}
@@ -1716,7 +1726,8 @@ fec_resume(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
- clk_prepare_enable(fep->clk);
+ clk_prepare_enable(fep->clk_ahb);
+ clk_prepare_enable(fep->clk_ipg);
if (netif_running(ndev)) {
fec_restart(ndev, fep->full_duplex);
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 1adb0245b9dd..0741aded9eb0 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1082,7 +1082,7 @@ static int gfar_probe(struct platform_device *ofdev)
if (dev->features & NETIF_F_IP_CSUM ||
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
- dev->hard_header_len += GMAC_FCB_LEN;
+ dev->needed_headroom = GMAC_FCB_LEN;
/* Program the isrg regs only if number of grps > 1 */
if (priv->num_grps > 1) {
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index c8950da60e6b..04d901d0ff63 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -57,6 +57,7 @@
#include <linux/types.h>
#include <linux/inet_lro.h>
#include <linux/slab.h>
+#include <linux/clk.h>
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
static char mv643xx_eth_driver_version[] = "1.4";
@@ -289,10 +290,10 @@ struct mv643xx_eth_shared_private {
/*
* Hardware-specific parameters.
*/
- unsigned int t_clk;
int extended_rx_coal_limit;
int tx_bw_control;
int tx_csum_limit;
+
};
#define TX_BW_CONTROL_ABSENT 0
@@ -431,6 +432,12 @@ struct mv643xx_eth_private {
int tx_desc_sram_size;
int txq_count;
struct tx_queue txq[8];
+
+ /*
+ * Hardware-specific parameters.
+ */
+ struct clk *clk;
+ unsigned int t_clk;
};
@@ -1010,7 +1017,7 @@ static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
int mtu;
int bucket_size;
- token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
+ token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
if (token_rate > 1023)
token_rate = 1023;
@@ -1042,7 +1049,7 @@ static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
int token_rate;
int bucket_size;
- token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
+ token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
if (token_rate > 1023)
token_rate = 1023;
@@ -1309,7 +1316,7 @@ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
temp = (val & 0x003fff00) >> 8;
temp *= 64000000;
- do_div(temp, mp->shared->t_clk);
+ do_div(temp, mp->t_clk);
return (unsigned int)temp;
}
@@ -1319,7 +1326,7 @@ static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
u64 temp;
u32 val;
- temp = (u64)usec * mp->shared->t_clk;
+ temp = (u64)usec * mp->t_clk;
temp += 31999999;
do_div(temp, 64000000);
@@ -1345,7 +1352,7 @@ static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
temp *= 64000000;
- do_div(temp, mp->shared->t_clk);
+ do_div(temp, mp->t_clk);
return (unsigned int)temp;
}
@@ -1354,7 +1361,7 @@ static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
{
u64 temp;
- temp = (u64)usec * mp->shared->t_clk;
+ temp = (u64)usec * mp->t_clk;
temp += 31999999;
do_div(temp, 64000000);
@@ -2663,10 +2670,6 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
if (dram)
mv643xx_eth_conf_mbus_windows(msp, dram);
- /*
- * Detect hardware parameters.
- */
- msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
pd->tx_csum_limit : 9 * 1024;
infer_hw_params(msp);
@@ -2891,6 +2894,18 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mp->dev = dev;
+ /*
+ * Get the clk rate, if there is one, otherwise use the default.
+ */
+ mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0"));
+ if (!IS_ERR(mp->clk)) {
+ clk_prepare_enable(mp->clk);
+ mp->t_clk = clk_get_rate(mp->clk);
+ } else {
+ mp->t_clk = 133000000;
+ printk(KERN_WARNING "Unable to get clock");
+ }
+
set_params(mp, pd);
netif_set_real_num_tx_queues(dev, mp->txq_count);
netif_set_real_num_rx_queues(dev, mp->rxq_count);
@@ -2979,6 +2994,11 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
if (mp->phy != NULL)
phy_detach(mp->phy);
cancel_work_sync(&mp->tx_timeout_task);
+
+ if (!IS_ERR(mp->clk)) {
+ clk_disable_unprepare(mp->clk);
+ clk_put(mp->clk);
+ }
free_netdev(mp->dev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4de73643fec6..d1827e887f4e 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1096,20 +1096,20 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
if (err) {
dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
- goto err_out;
+ goto err_out_disable_dev;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
- goto err_out;
+ goto err_out_disable_dev;
}
/* IO Size check */
if (pci_resource_len(pdev, bar) < io_size) {
dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
err = -EIO;
- goto err_out;
+ goto err_out_disable_dev;
}
pci_set_master(pdev);
@@ -1117,7 +1117,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(struct r6040_private));
if (!dev) {
err = -ENOMEM;
- goto err_out;
+ goto err_out_disable_dev;
}
SET_NETDEV_DEV(dev, &pdev->dev);
lp = netdev_priv(dev);
@@ -1233,11 +1233,15 @@ err_out_mdio_irq:
err_out_mdio:
mdiobus_free(lp->mii_bus);
err_out_unmap:
+ netif_napi_del(&lp->napi);
+ pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
err_out_free_dev:
free_netdev(dev);
+err_out_disable_dev:
+ pci_disable_device(pdev);
err_out:
return err;
}
@@ -1251,6 +1255,9 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
mdiobus_unregister(lp->mii_bus);
kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus);
+ netif_napi_del(&lp->napi);
+ pci_set_drvdata(pdev, NULL);
+ pci_iounmap(pdev, lp->base);
pci_release_regions(pdev);
free_netdev(dev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index be3c22179161..667169b82526 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1101,8 +1101,12 @@ static int sh_eth_rx(struct net_device *ndev)
/* Restart Rx engine if stopped. */
/* If we don't need to check status, don't. -KDU */
- if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
+ if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
+ /* fix the values for the next receiving */
+ mdp->cur_rx = mdp->dirty_rx = (sh_eth_read(ndev, RDFAR) -
+ sh_eth_read(ndev, RDLAR)) >> 4;
sh_eth_write(ndev, EDRRR_R, EDRRR);
+ }
return 0;
}
@@ -1199,8 +1203,6 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
/* Receive Descriptor Empty int */
ndev->stats.rx_over_errors++;
- if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
- sh_eth_write(ndev, EDRRR_R, EDRRR);
if (netif_msg_rx_err(mdp))
dev_err(&ndev->dev, "Receive Descriptor Empty\n");
}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index dab9c6f671ec..1466e5d2af44 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2390,11 +2390,11 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
retval = smsc911x_request_resources(pdev);
if (retval)
- goto out_return_resources;
+ goto out_request_resources_fail;
retval = smsc911x_enable_resources(pdev);
if (retval)
- goto out_disable_resources;
+ goto out_enable_resources_fail;
if (pdata->ioaddr == NULL) {
SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
@@ -2501,8 +2501,9 @@ out_free_irq:
free_irq(dev->irq, dev);
out_disable_resources:
(void)smsc911x_disable_resources(pdev);
-out_return_resources:
+out_enable_resources_fail:
smsc911x_free_resources(pdev);
+out_request_resources_fail:
platform_set_drvdata(pdev, NULL);
iounmap(pdata->ioaddr);
free_netdev(dev);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index b42252c4bec8..1b173a6145d6 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -51,7 +51,7 @@ config TI_DAVINCI_CPDMA
config TI_CPSW
tristate "TI CPSW Switch Support"
- depends on ARM && (ARCH_DAVINCI || SOC_OMAPAM33XX)
+ depends on ARM && (ARCH_DAVINCI || SOC_AM33XX)
select TI_DAVINCI_CPDMA
select TI_DAVINCI_MDIO
---help---
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 71e2b0523bc2..3ae80eccd0ef 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -35,6 +35,7 @@
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
+#include <linux/if_vlan.h>
#define DRIVER_VERSION "22-Dec-2011"
#define DRIVER_NAME "asix"
@@ -321,7 +322,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 0;
}
- if ((size > dev->net->mtu + ETH_HLEN) ||
+ if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
(size + offset > skb->len)) {
netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
size);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 380dbea6109d..3b206786b5e7 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -547,6 +547,8 @@ static const struct usb_device_id products[] = {
{QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
{QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
{QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
+ {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
+ {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
{ } /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9ce6995e8d08..5214b1eceb95 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1231,11 +1231,6 @@ static int virtnet_freeze(struct virtio_device *vdev)
vi->config_enable = false;
mutex_unlock(&vi->config_lock);
- virtqueue_disable_cb(vi->rvq);
- virtqueue_disable_cb(vi->svq);
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
- virtqueue_disable_cb(vi->cvq);
-
netif_device_detach(vi->dev);
cancel_delayed_work_sync(&vi->refill);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 0ba81a66061f..fbaa30930076 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2415,6 +2415,22 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
* Initialization routines *
\*************************/
+static const struct ieee80211_iface_limit if_limits[] = {
+ { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) },
+ { .max = 4, .types =
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_AP) },
+};
+
+static const struct ieee80211_iface_combination if_comb = {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+};
+
int __devinit
ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
{
@@ -2436,6 +2452,9 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
+ hw->wiphy->iface_combinations = &if_comb;
+ hw->wiphy->n_iface_combinations = 1;
+
/* SW support for IBSS_RSN is provided by mac80211 */
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 28a65d3a03d0..b869a358ce43 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -693,8 +693,8 @@ ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
ie, 2 + vif->ssid_len + beacon_ie_len,
0, GFP_KERNEL);
if (bss)
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added bss %pM to "
- "cfg80211\n", bssid);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "added bss %pM to cfg80211\n", bssid);
kfree(ie);
} else
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n");
@@ -882,6 +882,32 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
vif->sme_state = SME_DISCONNECTED;
}
+static int ath6kl_set_probed_ssids(struct ath6kl *ar,
+ struct ath6kl_vif *vif,
+ struct cfg80211_ssid *ssids, int n_ssids)
+{
+ u8 i;
+
+ if (n_ssids > MAX_PROBED_SSID_INDEX)
+ return -EINVAL;
+
+ for (i = 0; i < n_ssids; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i,
+ ssids[i].ssid_len ?
+ SPECIFIC_SSID_FLAG : ANY_SSID_FLAG,
+ ssids[i].ssid_len,
+ ssids[i].ssid);
+ }
+
+ /* Make sure no old entries are left behind */
+ for (i = n_ssids; i < MAX_PROBED_SSID_INDEX; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i,
+ DISABLE_SSID_FLAG, 0, NULL);
+ }
+
+ return 0;
+}
+
static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_scan_request *request)
{
@@ -899,36 +925,25 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
if (!ar->usr_bss_filter) {
clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
- ret = ath6kl_wmi_bssfilter_cmd(
- ar->wmi, vif->fw_vif_idx,
- (test_bit(CONNECTED, &vif->flags) ?
- ALL_BUT_BSS_FILTER : ALL_BSS_FILTER), 0);
+ ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ ALL_BSS_FILTER, 0);
if (ret) {
ath6kl_err("couldn't set bss filtering\n");
return ret;
}
}
- if (request->n_ssids && request->ssids[0].ssid_len) {
- u8 i;
-
- if (request->n_ssids > (MAX_PROBED_SSID_INDEX - 1))
- request->n_ssids = MAX_PROBED_SSID_INDEX - 1;
-
- for (i = 0; i < request->n_ssids; i++)
- ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
- i + 1, SPECIFIC_SSID_FLAG,
- request->ssids[i].ssid_len,
- request->ssids[i].ssid);
- }
+ ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
+ request->n_ssids);
+ if (ret < 0)
+ return ret;
/* this also clears IE in fw if it's not set */
ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
WMI_FRAME_PROBE_REQ,
request->ie, request->ie_len);
if (ret) {
- ath6kl_err("failed to set Probe Request appie for "
- "scan");
+ ath6kl_err("failed to set Probe Request appie for scan");
return ret;
}
@@ -945,8 +960,7 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
channels = kzalloc(n_channels * sizeof(u16), GFP_KERNEL);
if (channels == NULL) {
- ath6kl_warn("failed to set scan channels, "
- "scan all channels");
+ ath6kl_warn("failed to set scan channels, scan all channels");
n_channels = 0;
}
@@ -1018,6 +1032,20 @@ out:
vif->scan_req = NULL;
}
+void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
+ enum wmi_phy_mode mode)
+{
+ enum nl80211_channel_type type;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "channel switch notify nw_type %d freq %d mode %d\n",
+ vif->nw_type, freq, mode);
+
+ type = (mode == WMI_11G_HT20) ? NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT;
+
+ cfg80211_ch_switch_notify(vif->ndev, freq, type);
+}
+
static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise,
const u8 *mac_addr,
@@ -1111,9 +1139,8 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
ar->ap_mode_bkey.key_len = key->key_len;
memcpy(ar->ap_mode_bkey.key, key->key, key->key_len);
if (!test_bit(CONNECTED, &vif->flags)) {
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay initial group "
- "key configuration until AP mode has been "
- "started\n");
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "Delay initial group key configuration until AP mode has been started\n");
/*
* The key will be set in ath6kl_connect_ap_mode() once
* the connected event is received from the target.
@@ -1129,8 +1156,8 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
* the AP mode has properly started
* (ath6kl_install_statioc_wep_keys).
*/
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay WEP key configuration "
- "until AP mode has been started\n");
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "Delay WEP key configuration until AP mode has been started\n");
vif->wep_key_list[key_index].key_len = key->key_len;
memcpy(vif->wep_key_list[key_index].key, key->key,
key->key_len);
@@ -1962,8 +1989,7 @@ static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif)
sizeof(discvr_pattern), discvr_offset,
discvr_pattern, discvr_mask);
if (ret) {
- ath6kl_err("failed to add WOW mDNS/SSDP/LLMNR "
- "pattern\n");
+ ath6kl_err("failed to add WOW mDNS/SSDP/LLMNR pattern\n");
return ret;
}
}
@@ -2031,6 +2057,10 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
u8 index = 0;
__be32 ips[MAX_IP_ADDRS];
+ /* The FW currently can't support multi-vif WoW properly. */
+ if (ar->num_vif > 1)
+ return -EIO;
+
vif = ath6kl_vif_first(ar);
if (!vif)
return -EIO;
@@ -2044,6 +2074,13 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST))
return -EINVAL;
+ if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags)) {
+ ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
+ vif->fw_vif_idx, false);
+ if (ret)
+ return ret;
+ }
+
/* Clear existing WOW patterns */
for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++)
ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx,
@@ -2147,8 +2184,8 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_HOST_MODE_AWAKE);
if (ret) {
- ath6kl_warn("Failed to configure host sleep mode for "
- "wow resume: %d\n", ret);
+ ath6kl_warn("Failed to configure host sleep mode for wow resume: %d\n",
+ ret);
ar->state = ATH6KL_STATE_WOW;
return ret;
}
@@ -2172,6 +2209,13 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
ar->state = ATH6KL_STATE_ON;
+ if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags)) {
+ ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
+ vif->fw_vif_idx, true);
+ if (ret)
+ return ret;
+ }
+
netif_wake_queue(vif->ndev);
return 0;
@@ -2186,8 +2230,10 @@ static int ath6kl_cfg80211_deepsleep_suspend(struct ath6kl *ar)
if (!vif)
return -EIO;
- if (!ath6kl_cfg80211_ready(vif))
+ if (!test_bit(WMI_READY, &ar->flag)) {
+ ath6kl_err("deepsleep failed as wmi is not ready\n");
return -EIO;
+ }
ath6kl_cfg80211_stop_all(ar);
@@ -2447,6 +2493,24 @@ static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
band, htcap);
}
+static int ath6kl_restore_htcap(struct ath6kl_vif *vif)
+{
+ struct wiphy *wiphy = vif->ar->wiphy;
+ int band, ret = 0;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!wiphy->bands[band])
+ continue;
+
+ ret = ath6kl_set_htcap(vif, band,
+ wiphy->bands[band]->ht_cap.ht_supported);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
static bool ath6kl_is_p2p_ie(const u8 *pos)
{
return pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 &&
@@ -2568,28 +2632,34 @@ static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon,
/* skip element id and length */
rsn_ie += 2;
- /* skip version, group cipher */
- if (rsn_ie_len < 6)
+ /* skip version */
+ if (rsn_ie_len < 2)
return -EINVAL;
- rsn_ie += 6;
- rsn_ie_len -= 6;
+ rsn_ie += 2;
+ rsn_ie_len -= 2;
+
+ /* skip group cipher suite */
+ if (rsn_ie_len < 4)
+ return 0;
+ rsn_ie += 4;
+ rsn_ie_len -= 4;
/* skip pairwise cipher suite */
if (rsn_ie_len < 2)
- return -EINVAL;
- cnt = *((u16 *) rsn_ie);
+ return 0;
+ cnt = get_unaligned_le16(rsn_ie);
rsn_ie += (2 + cnt * 4);
rsn_ie_len -= (2 + cnt * 4);
/* skip akm suite */
if (rsn_ie_len < 2)
- return -EINVAL;
- cnt = *((u16 *) rsn_ie);
+ return 0;
+ cnt = get_unaligned_le16(rsn_ie);
rsn_ie += (2 + cnt * 4);
rsn_ie_len -= (2 + cnt * 4);
if (rsn_ie_len < 2)
- return -EINVAL;
+ return 0;
memcpy(rsn_capab, rsn_ie, 2);
@@ -2766,6 +2836,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
return res;
}
+ memcpy(&vif->profile, &p, sizeof(p));
res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p);
if (res < 0)
return res;
@@ -2801,13 +2872,7 @@ static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
clear_bit(CONNECTED, &vif->flags);
/* Restore ht setting in firmware */
- if (ath6kl_set_htcap(vif, IEEE80211_BAND_2GHZ, true))
- return -EIO;
-
- if (ath6kl_set_htcap(vif, IEEE80211_BAND_5GHZ, true))
- return -EIO;
-
- return 0;
+ return ath6kl_restore_htcap(vif);
}
static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
@@ -3081,7 +3146,6 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
struct ath6kl_vif *vif = netdev_priv(dev);
u16 interval;
int ret;
- u8 i;
if (ar->state != ATH6KL_STATE_ON)
return -EIO;
@@ -3089,29 +3153,23 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
if (vif->sme_state != SME_DISCONNECTED)
return -EBUSY;
+ /* The FW currently can't support multi-vif WoW properly. */
+ if (ar->num_vif > 1)
+ return -EIO;
+
ath6kl_cfg80211_scan_complete_event(vif, true);
- for (i = 0; i < ar->wiphy->max_sched_scan_ssids; i++) {
- ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
- i, DISABLE_SSID_FLAG,
- 0, NULL);
- }
+ ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
+ request->n_ssids);
+ if (ret < 0)
+ return ret;
/* fw uses seconds, also make sure that it's >0 */
interval = max_t(u16, 1, request->interval / 1000);
ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
interval, interval,
- 10, 0, 0, 0, 3, 0, 0, 0);
-
- if (request->n_ssids && request->ssids[0].ssid_len) {
- for (i = 0; i < request->n_ssids; i++) {
- ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
- i, SPECIFIC_SSID_FLAG,
- request->ssids[i].ssid_len,
- request->ssids[i].ssid);
- }
- }
+ vif->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_WOW_MODE_ENABLE,
@@ -3271,8 +3329,7 @@ void ath6kl_cfg80211_stop_all(struct ath6kl *ar)
ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
if (ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER) != 0)
- ath6kl_warn("ath6kl_deep_sleep_enable: "
- "wmi_powermode_cmd failed\n");
+ ath6kl_warn("ath6kl_deep_sleep_enable: wmi_powermode_cmd failed\n");
return;
}
@@ -3352,6 +3409,7 @@ struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
vif->next_mode = nw_type;
vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
+ vif->bg_scan_period = 0;
vif->htcap.ht_enable = true;
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
@@ -3393,6 +3451,7 @@ err:
int ath6kl_cfg80211_init(struct ath6kl *ar)
{
struct wiphy *wiphy = ar->wiphy;
+ bool band_2gig = false, band_5gig = false, ht = false;
int ret;
wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
@@ -3413,8 +3472,46 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
/* max num of ssids that can be probed during scanning */
wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
- wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
- wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+ switch (ar->hw.cap) {
+ case WMI_11AN_CAP:
+ ht = true;
+ case WMI_11A_CAP:
+ band_5gig = true;
+ break;
+ case WMI_11GN_CAP:
+ ht = true;
+ case WMI_11G_CAP:
+ band_2gig = true;
+ break;
+ case WMI_11AGN_CAP:
+ ht = true;
+ case WMI_11AG_CAP:
+ band_2gig = true;
+ band_5gig = true;
+ break;
+ default:
+ ath6kl_err("invalid phy capability!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Even if the fw has HT support, advertise HT cap only when
+ * the firmware has support to override RSN capability, otherwise
+ * 4-way handshake would fail.
+ */
+ if (!(ht &&
+ test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
+ ar->fw_capabilities))) {
+ ath6kl_band_2ghz.ht_cap.cap = 0;
+ ath6kl_band_2ghz.ht_cap.ht_supported = false;
+ ath6kl_band_5ghz.ht_cap.cap = 0;
+ ath6kl_band_5ghz.ht_cap.ht_supported = false;
+ }
+ if (band_2gig)
+ wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+ if (band_5gig)
+ wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->cipher_suites = cipher_suites;
@@ -3430,7 +3527,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
wiphy->wowlan.pattern_min_len = 1;
wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
- wiphy->max_sched_scan_ssids = 10;
+ wiphy->max_sched_scan_ssids = MAX_PROBED_SSID_INDEX;
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
WIPHY_FLAG_HAVE_AP_SME |
@@ -3447,8 +3544,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
ar->wiphy->probe_resp_offload =
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
- NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
- NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
ret = wiphy_register(wiphy);
if (ret < 0) {
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index c5def436417f..5ea8cbb79f43 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -28,6 +28,8 @@ enum ath6kl_cfg_suspend_mode {
struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
enum nl80211_iftype type,
u8 fw_vif_idx, u8 nw_type);
+void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
+ enum wmi_phy_mode mode);
void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted);
void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 9d67964a51dd..4d9c6f142698 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -126,9 +126,9 @@ struct ath6kl_fw_ie {
#define AR6003_HW_2_0_FIRMWARE_FILE "athwlan.bin.z77"
#define AR6003_HW_2_0_TCMD_FIRMWARE_FILE "athtcmd_ram.bin"
#define AR6003_HW_2_0_PATCH_FILE "data.patch.bin"
-#define AR6003_HW_2_0_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin"
+#define AR6003_HW_2_0_BOARD_DATA_FILE AR6003_HW_2_0_FW_DIR "/bdata.bin"
#define AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE \
- "ath6k/AR6003/hw2.0/bdata.SD31.bin"
+ AR6003_HW_2_0_FW_DIR "/bdata.SD31.bin"
/* AR6003 3.0 definitions */
#define AR6003_HW_2_1_1_VERSION 0x30000582
@@ -139,25 +139,33 @@ struct ath6kl_fw_ie {
#define AR6003_HW_2_1_1_UTF_FIRMWARE_FILE "utf.bin"
#define AR6003_HW_2_1_1_TESTSCRIPT_FILE "nullTestFlow.bin"
#define AR6003_HW_2_1_1_PATCH_FILE "data.patch.bin"
-#define AR6003_HW_2_1_1_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin"
+#define AR6003_HW_2_1_1_BOARD_DATA_FILE AR6003_HW_2_1_1_FW_DIR "/bdata.bin"
#define AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE \
- "ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
+ AR6003_HW_2_1_1_FW_DIR "/bdata.SD31.bin"
/* AR6004 1.0 definitions */
#define AR6004_HW_1_0_VERSION 0x30000623
#define AR6004_HW_1_0_FW_DIR "ath6k/AR6004/hw1.0"
#define AR6004_HW_1_0_FIRMWARE_FILE "fw.ram.bin"
-#define AR6004_HW_1_0_BOARD_DATA_FILE "ath6k/AR6004/hw1.0/bdata.bin"
+#define AR6004_HW_1_0_BOARD_DATA_FILE AR6004_HW_1_0_FW_DIR "/bdata.bin"
#define AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE \
- "ath6k/AR6004/hw1.0/bdata.DB132.bin"
+ AR6004_HW_1_0_FW_DIR "/bdata.DB132.bin"
/* AR6004 1.1 definitions */
#define AR6004_HW_1_1_VERSION 0x30000001
#define AR6004_HW_1_1_FW_DIR "ath6k/AR6004/hw1.1"
#define AR6004_HW_1_1_FIRMWARE_FILE "fw.ram.bin"
-#define AR6004_HW_1_1_BOARD_DATA_FILE "ath6k/AR6004/hw1.1/bdata.bin"
+#define AR6004_HW_1_1_BOARD_DATA_FILE AR6004_HW_1_1_FW_DIR "/bdata.bin"
#define AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE \
- "ath6k/AR6004/hw1.1/bdata.DB132.bin"
+ AR6004_HW_1_1_FW_DIR "/bdata.DB132.bin"
+
+/* AR6004 1.2 definitions */
+#define AR6004_HW_1_2_VERSION 0x300007e8
+#define AR6004_HW_1_2_FW_DIR "ath6k/AR6004/hw1.2"
+#define AR6004_HW_1_2_FIRMWARE_FILE "fw.ram.bin"
+#define AR6004_HW_1_2_BOARD_DATA_FILE AR6004_HW_1_2_FW_DIR "/bdata.bin"
+#define AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE \
+ AR6004_HW_1_2_FW_DIR "/bdata.bin"
/* Per STA data, used in AP mode */
#define STA_PS_AWAKE BIT(0)
@@ -502,6 +510,8 @@ enum ath6kl_vif_state {
WLAN_ENABLED,
STATS_UPDATE_PEND,
HOST_SLEEP_MODE_CMD_PROCESSED,
+ NETDEV_MCAST_ALL_ON,
+ NETDEV_MCAST_ALL_OFF,
};
struct ath6kl_vif {
@@ -549,9 +559,11 @@ struct ath6kl_vif {
u16 assoc_bss_beacon_int;
u16 listen_intvl_t;
u16 bmiss_time_t;
+ u16 bg_scan_period;
u8 assoc_bss_dtim_period;
struct net_device_stats net_stats;
struct target_stats target_stats;
+ struct wmi_connect_cmd profile;
struct list_head mc_filter;
};
@@ -640,6 +652,7 @@ struct ath6kl {
u8 sta_list_index;
struct ath6kl_req_key ap_mode_bkey;
struct sk_buff_head mcastpsq;
+ u32 want_ch_switch;
/*
* FIXME: protects access to mcastpsq but is actually useless as
@@ -672,6 +685,7 @@ struct ath6kl {
u32 refclk_hz;
u32 uarttx_pin;
u32 testscript_addr;
+ enum wmi_phy_cap cap;
struct ath6kl_hw_fw {
const char *dir;
@@ -805,7 +819,8 @@ void aggr_reset_state(struct aggr_info_conn *aggr_conn);
struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr);
struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
-void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver);
+void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver,
+ enum wmi_phy_cap cap);
int ath6kl_control_tx(void *devt, struct sk_buff *skb,
enum htc_endpoint_id eid);
void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel,
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 1b76aff78508..15cfe30e54fd 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -401,8 +401,10 @@ static ssize_t ath6kl_fwlog_block_read(struct file *file,
ret = wait_for_completion_interruptible(
&ar->debug.fwlog_completion);
- if (ret == -ERESTARTSYS)
+ if (ret == -ERESTARTSYS) {
+ vfree(buf);
return ret;
+ }
spin_lock(&ar->debug.fwlog_queue.lock);
}
@@ -1570,10 +1572,15 @@ static ssize_t ath6kl_bgscan_int_write(struct file *file,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
+ struct ath6kl_vif *vif;
u16 bgscan_int;
char buf[32];
ssize_t len;
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
@@ -1585,6 +1592,8 @@ static ssize_t ath6kl_bgscan_int_write(struct file *file,
if (bgscan_int == 0)
bgscan_int = 0xffff;
+ vif->bg_scan_period = bgscan_int;
+
ath6kl_wmi_scanparams_cmd(ar->wmi, 0, 0, 0, bgscan_int, 0, 0, 0, 3,
0, 0, 0);
@@ -1809,6 +1818,7 @@ int ath6kl_debug_init_fs(struct ath6kl *ar)
void ath6kl_debug_cleanup(struct ath6kl *ar)
{
skb_queue_purge(&ar->debug.fwlog_queue);
+ complete(&ar->debug.fwlog_completion);
kfree(ar->debug.roam_tbl);
}
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 065e61516d7a..2798624d3a9d 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -83,10 +83,7 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
* never goes inactive EVER.
*/
cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
- } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
- /* this is the lowest priority data endpoint */
- /* FIXME: this looks fishy, check */
- cred_info->lowestpri_ep_dist = cur_ep_dist->list;
+ }
/*
* Streams have to be created (explicit | implicit) for all
@@ -100,6 +97,13 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
*/
}
+ /*
+ * For ath6kl_credit_seek function,
+ * it use list_for_each_entry_reverse to walk around the whole ep list.
+ * Therefore assign this lowestpri_ep_dist after walk around the ep_list
+ */
+ cred_info->lowestpri_ep_dist = cur_ep_dist->list;
+
WARN_ON(cred_info->cur_free_credits <= 0);
list_for_each_entry(cur_ep_dist, ep_list, list) {
@@ -758,7 +762,7 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
u32 txb_mask;
u8 ac = WMM_NUM_AC;
- if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) ||
+ if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
(WMI_CONTROL_SVC != endpoint->svc_id))
ac = target->dev->ar->ep2ac_map[endpoint->eid];
@@ -793,16 +797,17 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
* itself
*/
txb_mask = ((1 << ac) - 1);
- /*
- * when the scatter request resources drop below a
- * certain threshold, disable Tx bundling for all
- * AC's with priority lower than the current requesting
- * AC. Otherwise re-enable Tx bundling for them
- */
- if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS)
- target->tx_bndl_mask &= ~txb_mask;
- else
- target->tx_bndl_mask |= txb_mask;
+
+ /*
+ * when the scatter request resources drop below a
+ * certain threshold, disable Tx bundling for all
+ * AC's with priority lower than the current requesting
+ * AC. Otherwise re-enable Tx bundling for them
+ */
+ if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS)
+ target->tx_bndl_mask &= ~txb_mask;
+ else
+ target->tx_bndl_mask |= txb_mask;
}
ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
@@ -849,6 +854,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
int bundle_sent;
int n_pkts_bundle;
u8 ac = WMM_NUM_AC;
+ int status;
spin_lock_bh(&target->tx_lock);
@@ -866,7 +872,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
*/
INIT_LIST_HEAD(&txq);
- if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) ||
+ if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
(WMI_CONTROL_SVC != endpoint->svc_id))
ac = target->dev->ar->ep2ac_map[endpoint->eid];
@@ -910,7 +916,12 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
0, packet->info.tx.seqno);
- ath6kl_htc_tx_issue(target, packet);
+ status = ath6kl_htc_tx_issue(target, packet);
+
+ if (status) {
+ packet->status = status;
+ packet->completion(packet->context, packet);
+ }
}
spin_lock_bh(&target->tx_lock);
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index b277b3446882..f9626c723693 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -108,8 +108,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
/* get packet at head, but don't remove it */
packet = list_first_entry(&ep->txq, struct htc_packet, list);
- if (packet == NULL)
- break;
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: got head packet:0x%p , queue depth: %d\n",
@@ -803,8 +801,6 @@ static int htc_send_packets_multiple(struct htc_target *target,
/* get first packet to find out which ep the packets will go into */
packet = list_first_entry(pkt_queue, struct htc_packet, list);
- if (packet == NULL)
- return -EINVAL;
if (packet->endpoint >= ENDPOINT_MAX) {
WARN_ON_ONCE(1);
@@ -1382,6 +1378,9 @@ static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
/* copy all the callbacks */
ep->ep_cb = conn_req->ep_cb;
+ /* initialize tx_drop_packet_threshold */
+ ep->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
+
status = ath6kl_hif_pipe_map_service(ar, ep->svc_id,
&ep->pipe.pipeid_ul,
&ep->pipe.pipeid_dl);
@@ -1636,10 +1635,6 @@ static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target,
return -EINVAL;
first = list_first_entry(pkt_queue, struct htc_packet, list);
- if (first == NULL) {
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
if (first->endpoint >= ENDPOINT_MAX) {
WARN_ON_ONCE(1);
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 29ef50ea07d5..7eb0515f458a 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -119,6 +119,24 @@ static const struct ath6kl_hw hw_list[] = {
.fw_board = AR6004_HW_1_1_BOARD_DATA_FILE,
.fw_default_board = AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE,
},
+ {
+ .id = AR6004_HW_1_2_VERSION,
+ .name = "ar6004 hw 1.2",
+ .dataset_patch_addr = 0x436ecc,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x437000,
+ .reserved_ram_size = 9216,
+ .board_addr = 0x435c00,
+ .refclk_hz = 40000000,
+ .uarttx_pin = 11,
+
+ .fw = {
+ .dir = AR6004_HW_1_2_FW_DIR,
+ .fw = AR6004_HW_1_2_FIRMWARE_FILE,
+ },
+ .fw_board = AR6004_HW_1_2_BOARD_DATA_FILE,
+ .fw_default_board = AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE,
+ },
};
/*
@@ -445,9 +463,9 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
P2P_FLAG_MACADDR_REQ |
P2P_FLAG_HMODEL_REQ);
if (ret) {
- ath6kl_dbg(ATH6KL_DBG_TRC, "failed to request P2P "
- "capabilities (%d) - assuming P2P not "
- "supported\n", ret);
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "failed to request P2P capabilities (%d) - assuming P2P not supported\n",
+ ret);
ar->p2p = false;
}
}
@@ -456,8 +474,9 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
/* Enable Probe Request reporting for P2P */
ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, idx, true);
if (ret) {
- ath6kl_dbg(ATH6KL_DBG_TRC, "failed to enable Probe "
- "Request reporting (%d)\n", ret);
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "failed to enable Probe Request reporting (%d)\n",
+ ret);
}
}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 4d818f96c415..e5524470529c 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -421,8 +421,8 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
if (!ik->valid)
break;
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed addkey for "
- "the initial group key for AP mode\n");
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "Delayed addkey for the initial group key for AP mode\n");
memset(key_rsc, 0, sizeof(key_rsc));
res = ath6kl_wmi_addkey_cmd(
ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type,
@@ -430,12 +430,19 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
ik->key,
KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG);
if (res) {
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed "
- "addkey failed: %d\n", res);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "Delayed addkey failed: %d\n", res);
}
break;
}
+ if (ar->want_ch_switch & (1 << vif->fw_vif_idx)) {
+ ar->want_ch_switch &= ~(1 << vif->fw_vif_idx);
+ /* we actually don't know the phymode, default to HT20 */
+ ath6kl_cfg80211_ch_switch_notify(vif, channel,
+ WMI_11G_HT20);
+ }
+
ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0);
set_bit(CONNECTED, &vif->flags);
netif_carrier_on(vif->ndev);
@@ -541,7 +548,8 @@ void ath6kl_disconnect(struct ath6kl_vif *vif)
/* WMI Event handlers */
-void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
+void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver,
+ enum wmi_phy_cap cap)
{
struct ath6kl *ar = devt;
@@ -551,6 +559,7 @@ void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
ar->version.wlan_ver = sw_ver;
ar->version.abi_ver = abi_ver;
+ ar->hw.cap = cap;
snprintf(ar->wiphy->fw_version,
sizeof(ar->wiphy->fw_version),
@@ -584,6 +593,45 @@ void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "scan complete: %d\n", status);
}
+static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
+{
+
+ struct ath6kl *ar = vif->ar;
+
+ vif->next_chan = channel;
+ vif->profile.ch = cpu_to_le16(channel);
+
+ switch (vif->nw_type) {
+ case AP_NETWORK:
+ return ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx,
+ &vif->profile);
+ default:
+ ath6kl_err("won't switch channels nw_type=%d\n", vif->nw_type);
+ return -ENOTSUPP;
+ }
+}
+
+static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel)
+{
+
+ struct ath6kl_vif *vif;
+ int res = 0;
+
+ if (!ar->want_ch_switch)
+ return;
+
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (ar->want_ch_switch & (1 << vif->fw_vif_idx))
+ res = ath6kl_commit_ch_switch(vif, channel);
+
+ if (res)
+ ath6kl_err("channel switch failed nw_type %d res %d\n",
+ vif->nw_type, res);
+ }
+ spin_unlock_bh(&ar->list_lock);
+}
+
void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
u16 listen_int, u16 beacon_int,
enum network_type net_type, u8 beacon_ie_len,
@@ -601,9 +649,11 @@ void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
memcpy(vif->bssid, bssid, sizeof(vif->bssid));
vif->bss_ch = channel;
- if ((vif->nw_type == INFRA_NETWORK))
+ if ((vif->nw_type == INFRA_NETWORK)) {
ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
vif->listen_intvl_t, 0);
+ ath6kl_check_ch_switch(ar, channel);
+ }
netif_wake_queue(vif->ndev);
@@ -926,6 +976,11 @@ void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
struct ath6kl *ar = vif->ar;
if (vif->nw_type == AP_NETWORK) {
+ /* disconnect due to other STA vif switching channels */
+ if (reason == BSS_DISCONNECTED &&
+ prot_reason_status == WMI_AP_REASON_STA_ROAM)
+ ar->want_ch_switch |= 1 << vif->fw_vif_idx;
+
if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
return;
@@ -1090,7 +1145,7 @@ static int ath6kl_set_features(struct net_device *dev,
static void ath6kl_set_multicast_list(struct net_device *ndev)
{
struct ath6kl_vif *vif = netdev_priv(ndev);
- bool mc_all_on = false, mc_all_off = false;
+ bool mc_all_on = false;
int mc_count = netdev_mc_count(ndev);
struct netdev_hw_addr *ha;
bool found;
@@ -1102,24 +1157,41 @@ static void ath6kl_set_multicast_list(struct net_device *ndev)
!test_bit(WLAN_ENABLED, &vif->flags))
return;
+ /* Enable multicast-all filter. */
mc_all_on = !!(ndev->flags & IFF_PROMISC) ||
!!(ndev->flags & IFF_ALLMULTI) ||
!!(mc_count > ATH6K_MAX_MC_FILTERS_PER_LIST);
- mc_all_off = !(ndev->flags & IFF_MULTICAST) || mc_count == 0;
+ if (mc_all_on)
+ set_bit(NETDEV_MCAST_ALL_ON, &vif->flags);
+ else
+ clear_bit(NETDEV_MCAST_ALL_ON, &vif->flags);
+
+ mc_all_on = mc_all_on || (vif->ar->state == ATH6KL_STATE_ON);
- if (mc_all_on || mc_all_off) {
- /* Enable/disable all multicast */
- ath6kl_dbg(ATH6KL_DBG_TRC, "%s multicast filter\n",
- mc_all_on ? "enabling" : "disabling");
- ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, vif->fw_vif_idx,
+ if (!(ndev->flags & IFF_MULTICAST)) {
+ mc_all_on = false;
+ set_bit(NETDEV_MCAST_ALL_OFF, &vif->flags);
+ } else {
+ clear_bit(NETDEV_MCAST_ALL_OFF, &vif->flags);
+ }
+
+ /* Enable/disable "multicast-all" filter*/
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s multicast-all filter\n",
+ mc_all_on ? "enabling" : "disabling");
+
+ ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, vif->fw_vif_idx,
mc_all_on);
- if (ret)
- ath6kl_warn("Failed to %s multicast receive\n",
- mc_all_on ? "enable" : "disable");
+ if (ret) {
+ ath6kl_warn("Failed to %s multicast-all receive\n",
+ mc_all_on ? "enable" : "disable");
return;
}
+ if (test_bit(NETDEV_MCAST_ALL_ON, &vif->flags))
+ return;
+
+ /* Keep the driver and firmware mcast list in sync. */
list_for_each_entry_safe(mc_filter, tmp, &vif->mc_filter, list) {
found = false;
netdev_for_each_mc_addr(ha, ndev) {
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 44ea7a742101..05b95405f7b5 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -552,7 +552,7 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
- if (!bus_req)
+ if (WARN_ON_ONCE(!bus_req))
return -ENOMEM;
bus_req->address = address;
@@ -915,6 +915,9 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
}
cut_pwr:
+ if (func->card && func->card->host)
+ func->card->host->pm_flags &= ~MMC_PM_KEEP_POWER;
+
return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL);
}
@@ -985,9 +988,8 @@ static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
}
if (status) {
- ath6kl_err("%s: failed to write initial bytes of 0x%x "
- "to window reg: 0x%X\n", __func__,
- addr, reg_addr);
+ ath6kl_err("%s: failed to write initial bytes of 0x%x to window reg: 0x%X\n",
+ __func__, addr, reg_addr);
return status;
}
@@ -1076,8 +1078,8 @@ static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
(u8 *)&ar->bmi.cmd_credits, 4,
HIF_RD_SYNC_BYTE_INC);
if (ret) {
- ath6kl_err("Unable to decrement the command credit "
- "count register: %d\n", ret);
+ ath6kl_err("Unable to decrement the command credit count register: %d\n",
+ ret);
return ret;
}
@@ -1457,3 +1459,6 @@ MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_2_FW_DIR "/" AR6004_HW_1_2_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 82f2f5cb475b..67206aedea6c 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -362,15 +362,11 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
skb, skb->data, skb->len);
/* If target is not associated */
- if (!test_bit(CONNECTED, &vif->flags)) {
- dev_kfree_skb(skb);
- return 0;
- }
+ if (!test_bit(CONNECTED, &vif->flags))
+ goto fail_tx;
- if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON)) {
- dev_kfree_skb(skb);
- return 0;
- }
+ if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON))
+ goto fail_tx;
if (!test_bit(WMI_READY, &ar->flag))
goto fail_tx;
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 44a795f14da9..3740c3d6ab88 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -1037,6 +1037,14 @@ static void ath6kl_usb_stop(struct ath6kl *ar)
hif_stop(ar);
}
+static void ath6kl_usb_cleanup_scatter(struct ath6kl *ar)
+{
+ /*
+ * USB doesn't support it. Just return.
+ */
+ return;
+}
+
static const struct ath6kl_hif_ops ath6kl_usb_ops = {
.diag_read32 = ath6kl_usb_diag_read32,
.diag_write32 = ath6kl_usb_diag_write32,
@@ -1049,6 +1057,7 @@ static const struct ath6kl_hif_ops ath6kl_usb_ops = {
.pipe_get_default = ath6kl_usb_get_default_pipe,
.pipe_map_service = ath6kl_usb_map_service_pipe,
.pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number,
+ .cleanup_scatter = ath6kl_usb_cleanup_scatter,
};
/* ath6kl usb driver registered functions */
@@ -1208,3 +1217,6 @@ MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_FIRMWARE_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_2_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 7c8a9977faf5..ee8ec2394c2c 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -16,6 +16,7 @@
*/
#include <linux/ip.h>
+#include <linux/in.h>
#include "core.h"
#include "debug.h"
#include "testmode.h"
@@ -289,6 +290,13 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
layer2_priority);
} else
usr_pri = layer2_priority & 0x7;
+
+ /*
+ * Queue the EAPOL frames in the same WMM_AC_VO queue
+ * as that of management frames.
+ */
+ if (skb->protocol == cpu_to_be16(ETH_P_PAE))
+ usr_pri = WMI_VOICE_USER_PRIORITY;
}
/*
@@ -460,8 +468,9 @@ static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
freq, dur);
chan = ieee80211_get_channel(ar->wiphy, freq);
if (!chan) {
- ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: Unknown channel "
- "(freq=%u)\n", freq);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "remain_on_chnl: Unknown channel (freq=%u)\n",
+ freq);
return -EINVAL;
}
id = vif->last_roc_id;
@@ -488,12 +497,14 @@ static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
ev = (struct wmi_cancel_remain_on_chnl_event *) datap;
freq = le32_to_cpu(ev->freq);
dur = le32_to_cpu(ev->duration);
- ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: freq=%u dur=%u "
- "status=%u\n", freq, dur, ev->status);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "cancel_remain_on_chnl: freq=%u dur=%u status=%u\n",
+ freq, dur, ev->status);
chan = ieee80211_get_channel(ar->wiphy, freq);
if (!chan) {
- ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: Unknown "
- "channel (freq=%u)\n", freq);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "cancel_remain_on_chnl: Unknown channel (freq=%u)\n",
+ freq);
return -EINVAL;
}
if (vif->last_cancel_roc_id &&
@@ -548,12 +559,12 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
freq = le32_to_cpu(ev->freq);
dlen = le16_to_cpu(ev->len);
if (datap + len < ev->data + dlen) {
- ath6kl_err("invalid wmi_p2p_rx_probe_req_event: "
- "len=%d dlen=%u\n", len, dlen);
+ ath6kl_err("invalid wmi_p2p_rx_probe_req_event: len=%d dlen=%u\n",
+ len, dlen);
return -EINVAL;
}
- ath6kl_dbg(ATH6KL_DBG_WMI, "rx_probe_req: len=%u freq=%u "
- "probe_req_report=%d\n",
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "rx_probe_req: len=%u freq=%u probe_req_report=%d\n",
dlen, freq, vif->probe_req_report);
if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
@@ -592,8 +603,8 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
freq = le32_to_cpu(ev->freq);
dlen = le16_to_cpu(ev->len);
if (datap + len < ev->data + dlen) {
- ath6kl_err("invalid wmi_rx_action_event: "
- "len=%d dlen=%u\n", len, dlen);
+ ath6kl_err("invalid wmi_rx_action_event: len=%d dlen=%u\n",
+ len, dlen);
return -EINVAL;
}
ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
@@ -687,7 +698,7 @@ static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len)
ath6kl_ready_event(wmi->parent_dev, ev->mac_addr,
le32_to_cpu(ev->sw_version),
- le32_to_cpu(ev->abi_version));
+ le32_to_cpu(ev->abi_version), ev->phy_cap);
return 0;
}
@@ -777,16 +788,15 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len,
/* AP mode start/STA connected event */
struct net_device *dev = vif->ndev;
if (memcmp(dev->dev_addr, ev->u.ap_bss.bssid, ETH_ALEN) == 0) {
- ath6kl_dbg(ATH6KL_DBG_WMI, "%s: freq %d bssid %pM "
- "(AP started)\n",
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "%s: freq %d bssid %pM (AP started)\n",
__func__, le16_to_cpu(ev->u.ap_bss.ch),
ev->u.ap_bss.bssid);
ath6kl_connect_ap_mode_bss(
vif, le16_to_cpu(ev->u.ap_bss.ch));
} else {
- ath6kl_dbg(ATH6KL_DBG_WMI, "%s: aid %u mac_addr %pM "
- "auth=%u keymgmt=%u cipher=%u apsd_info=%u "
- "(STA connected)\n",
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "%s: aid %u mac_addr %pM auth=%u keymgmt=%u cipher=%u apsd_info=%u (STA connected)\n",
__func__, ev->u.ap_sta.aid,
ev->u.ap_sta.mac_addr,
ev->u.ap_sta.auth,
@@ -1229,8 +1239,9 @@ static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
ev = (struct wmi_neighbor_report_event *) datap;
if (sizeof(*ev) + ev->num_neighbors * sizeof(struct wmi_neighbor_info)
> len) {
- ath6kl_dbg(ATH6KL_DBG_WMI, "truncated neighbor event "
- "(num=%d len=%d)\n", ev->num_neighbors, len);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "truncated neighbor event (num=%d len=%d)\n",
+ ev->num_neighbors, len);
return -EINVAL;
}
for (i = 0; i < ev->num_neighbors; i++) {
@@ -1814,12 +1825,14 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
u32 home_dwell_time, u32 force_scan_interval,
s8 num_chan, u16 *ch_list, u32 no_cck, u32 *rates)
{
+ struct ieee80211_supported_band *sband;
struct sk_buff *skb;
struct wmi_begin_scan_cmd *sc;
- s8 size;
+ s8 size, *supp_rates;
int i, band, ret;
struct ath6kl *ar = wmi->parent_dev;
int num_rates;
+ u32 ratemask;
size = sizeof(struct wmi_begin_scan_cmd);
@@ -1846,10 +1859,13 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
sc->num_ch = num_chan;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
- struct ieee80211_supported_band *sband =
- ar->wiphy->bands[band];
- u32 ratemask = rates[band];
- u8 *supp_rates = sc->supp_rates[band].rates;
+ sband = ar->wiphy->bands[band];
+
+ if (!sband)
+ continue;
+
+ ratemask = rates[band];
+ supp_rates = sc->supp_rates[band].rates;
num_rates = 0;
for (i = 0; i < sband->n_bitrates; i++) {
@@ -2129,8 +2145,8 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
struct wmi_add_cipher_key_cmd *cmd;
int ret;
- ath6kl_dbg(ATH6KL_DBG_WMI, "addkey cmd: key_index=%u key_type=%d "
- "key_usage=%d key_len=%d key_op_ctrl=%d\n",
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "addkey cmd: key_index=%u key_type=%d key_usage=%d key_len=%d key_op_ctrl=%d\n",
key_index, key_type, key_usage, key_len, key_op_ctrl);
if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) ||
@@ -3047,8 +3063,8 @@ int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
res = ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_CONFIG_COMMIT_CMDID,
NO_SYNC_WMIFLAG);
- ath6kl_dbg(ATH6KL_DBG_WMI, "%s: nw_type=%u auth_mode=%u ch=%u "
- "ctrl_flags=0x%x-> res=%d\n",
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "%s: nw_type=%u auth_mode=%u ch=%u ctrl_flags=0x%x-> res=%d\n",
__func__, p->nw_type, p->auth_mode, le16_to_cpu(p->ch),
le32_to_cpu(p->ctrl_flags), res);
return res;
@@ -3208,8 +3224,9 @@ int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
if (!skb)
return -ENOMEM;
- ath6kl_dbg(ATH6KL_DBG_WMI, "set_appie_cmd: mgmt_frm_type=%u "
- "ie_len=%u\n", mgmt_frm_type, ie_len);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "set_appie_cmd: mgmt_frm_type=%u ie_len=%u\n",
+ mgmt_frm_type, ie_len);
p = (struct wmi_set_appie_cmd *) skb->data;
p->mgmt_frm_type = mgmt_frm_type;
p->ie_len = ie_len;
@@ -3310,8 +3327,9 @@ static int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id,
wmi->last_mgmt_tx_frame = buf;
wmi->last_mgmt_tx_frame_len = data_len;
- ath6kl_dbg(ATH6KL_DBG_WMI, "send_action_cmd: id=%u freq=%u wait=%u "
- "len=%u\n", id, freq, wait, data_len);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "send_action_cmd: id=%u freq=%u wait=%u len=%u\n",
+ id, freq, wait, data_len);
p = (struct wmi_send_action_cmd *) skb->data;
p->id = cpu_to_le32(id);
p->freq = cpu_to_le32(freq);
@@ -3348,8 +3366,9 @@ static int __ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id,
wmi->last_mgmt_tx_frame = buf;
wmi->last_mgmt_tx_frame_len = data_len;
- ath6kl_dbg(ATH6KL_DBG_WMI, "send_action_cmd: id=%u freq=%u wait=%u "
- "len=%u\n", id, freq, wait, data_len);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "send_action_cmd: id=%u freq=%u wait=%u len=%u\n",
+ id, freq, wait, data_len);
p = (struct wmi_send_mgmt_cmd *) skb->data;
p->id = cpu_to_le32(id);
p->freq = cpu_to_le32(freq);
@@ -3402,8 +3421,9 @@ int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
if (!skb)
return -ENOMEM;
- ath6kl_dbg(ATH6KL_DBG_WMI, "send_probe_response_cmd: freq=%u dst=%pM "
- "len=%u\n", freq, dst, data_len);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "send_probe_response_cmd: freq=%u dst=%pM len=%u\n",
+ freq, dst, data_len);
p = (struct wmi_p2p_probe_response_cmd *) skb->data;
p->freq = cpu_to_le32(freq);
memcpy(p->destination_addr, dst, ETH_ALEN);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index d3d2ab5c1689..9076bec3a2ba 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -106,6 +106,8 @@ struct wmi_data_sync_bufs {
#define WMM_AC_VI 2 /* video */
#define WMM_AC_VO 3 /* voice */
+#define WMI_VOICE_USER_PRIORITY 0x7
+
struct wmi {
u16 stream_exist_for_ac[WMM_NUM_AC];
u8 fat_pipe_exist;
@@ -1151,6 +1153,7 @@ enum wmi_phy_mode {
WMI_11AG_MODE = 0x3,
WMI_11B_MODE = 0x4,
WMI_11GONLY_MODE = 0x5,
+ WMI_11G_HT20 = 0x6,
};
#define WMI_MAX_CHANNELS 32
@@ -1416,6 +1419,16 @@ struct wmi_ready_event_2 {
u8 phy_cap;
} __packed;
+/* WMI_PHY_CAPABILITY */
+enum wmi_phy_cap {
+ WMI_11A_CAP = 0x01,
+ WMI_11G_CAP = 0x02,
+ WMI_11AG_CAP = 0x03,
+ WMI_11AN_CAP = 0x04,
+ WMI_11GN_CAP = 0x05,
+ WMI_11AGN_CAP = 0x06,
+};
+
/* Connect Event */
struct wmi_connect_event {
union {
@@ -1468,6 +1481,17 @@ enum wmi_disconnect_reason {
IBSS_MERGE = 0xe,
};
+/* AP mode disconnect proto_reasons */
+enum ap_disconnect_reason {
+ WMI_AP_REASON_STA_LEFT = 101,
+ WMI_AP_REASON_FROM_HOST = 102,
+ WMI_AP_REASON_COMM_TIMEOUT = 103,
+ WMI_AP_REASON_MAX_STA = 104,
+ WMI_AP_REASON_ACL = 105,
+ WMI_AP_REASON_STA_ROAM = 106,
+ WMI_AP_REASON_DFS_CHANNEL = 107,
+};
+
#define ATH6KL_COUNTRY_RD_SHIFT 16
struct ath6kl_wmi_regdomain {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index a0387a027db0..9fdd70fcaf5b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -892,34 +892,6 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
}
-static bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath9k_rtt_hist *hist;
- u32 *table;
- int i;
- bool restore;
-
- if (!ah->caldata)
- return false;
-
- hist = &ah->caldata->rtt_hist;
- if (!hist->num_readings)
- return false;
-
- ar9003_hw_rtt_enable(ah);
- ar9003_hw_rtt_set_mask(ah, 0x00);
- for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (!(ah->rxchainmask & (1 << i)))
- continue;
- table = &hist->table[i][hist->num_readings][0];
- ar9003_hw_rtt_load_hist(ah, i, table);
- }
- restore = ar9003_hw_rtt_force_restore(ah);
- ar9003_hw_rtt_disable(ah);
-
- return restore;
-}
-
static bool ar9003_hw_init_cal(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -942,9 +914,10 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
if (!ar9003_hw_rtt_restore(ah, chan))
run_rtt_cal = true;
- ath_dbg(common, CALIBRATE, "RTT restore %s\n",
- run_rtt_cal ? "failed" : "succeed");
+ if (run_rtt_cal)
+ ath_dbg(common, CALIBRATE, "RTT calibration to be done\n");
}
+
run_agc_cal = run_rtt_cal;
if (run_rtt_cal) {
@@ -1069,17 +1042,14 @@ skip_tx_iqcal:
#undef CL_TAB_ENTRY
if (run_rtt_cal && caldata) {
- struct ath9k_rtt_hist *hist = &caldata->rtt_hist;
- if (is_reusable && (hist->num_readings < RTT_HIST_MAX)) {
- u32 *table;
+ if (is_reusable) {
+ if (!ath9k_hw_rfbus_req(ah))
+ ath_err(ath9k_hw_common(ah),
+ "Could not stop baseband\n");
+ else
+ ar9003_hw_rtt_fill_hist(ah);
- hist->num_readings++;
- for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (!(ah->rxchainmask & (1 << i)))
- continue;
- table = &hist->table[i][hist->num_readings][0];
- ar9003_hw_rtt_fill_hist(ah, i, table);
- }
+ ath9k_hw_rfbus_done(ah);
}
ar9003_hw_rtt_disable(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index ac53d901801d..dfb0441f406c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3809,7 +3809,7 @@ static bool is_pmu_set(struct ath_hw *ah, u32 pmu_reg, int pmu_set)
return true;
}
-static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
+void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
{
int internal_regulator =
ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 2505ac44f0c1..8396d150ce01 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -334,4 +334,7 @@ u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
struct ath9k_channel *chan);
+
+void ar9003_hw_internal_regulator_apply(struct ath_hw *ah);
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 3cac293a2849..ffbb180f91e1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -756,7 +756,7 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (caldata) {
caldata->done_txiqcal_once = false;
caldata->done_txclcal_once = false;
- caldata->rtt_hist.num_readings = 0;
+ caldata->rtt_done = false;
}
if (!ath9k_hw_init_cal(ah, chan))
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
index 458bedf0b0ae..74de3539c2c8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "hw-ops.h"
#include "ar9003_phy.h"
#include "ar9003_rtt.h"
@@ -69,7 +70,7 @@ bool ar9003_hw_rtt_force_restore(struct ath_hw *ah)
}
static void ar9003_hw_rtt_load_hist_entry(struct ath_hw *ah, u8 chain,
- u32 index, u32 data28)
+ u32 index, u32 data28)
{
u32 val;
@@ -100,12 +101,21 @@ static void ar9003_hw_rtt_load_hist_entry(struct ath_hw *ah, u8 chain,
RTT_ACCESS_TIMEOUT);
}
-void ar9003_hw_rtt_load_hist(struct ath_hw *ah, u8 chain, u32 *table)
+void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
{
- int i;
+ int chain, i;
- for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++)
- ar9003_hw_rtt_load_hist_entry(ah, chain, i, table[i]);
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->rxchainmask & (1 << chain)))
+ continue;
+ for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
+ ar9003_hw_rtt_load_hist_entry(ah, chain, i,
+ ah->caldata->rtt_table[chain][i]);
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "Load RTT value at idx %d, chain %d: 0x%x\n",
+ i, chain, ah->caldata->rtt_table[chain][i]);
+ }
+ }
}
static int ar9003_hw_rtt_fill_hist_entry(struct ath_hw *ah, u8 chain, u32 index)
@@ -128,27 +138,71 @@ static int ar9003_hw_rtt_fill_hist_entry(struct ath_hw *ah, u8 chain, u32 index)
RTT_ACCESS_TIMEOUT))
return RTT_BAD_VALUE;
- val = REG_READ(ah, AR_PHY_RTT_TABLE_SW_INTF_1_B(chain));
+ val = MS(REG_READ(ah, AR_PHY_RTT_TABLE_SW_INTF_1_B(chain)),
+ AR_PHY_RTT_SW_RTT_TABLE_DATA);
+
return val;
}
-void ar9003_hw_rtt_fill_hist(struct ath_hw *ah, u8 chain, u32 *table)
+void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
{
- int i;
+ int chain, i;
+
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->rxchainmask & (1 << chain)))
+ continue;
+ for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
+ ah->caldata->rtt_table[chain][i] =
+ ar9003_hw_rtt_fill_hist_entry(ah, chain, i);
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "RTT value at idx %d, chain %d is: 0x%x\n",
+ i, chain, ah->caldata->rtt_table[chain][i]);
+ }
+ }
- for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++)
- table[i] = ar9003_hw_rtt_fill_hist_entry(ah, chain, i);
+ ah->caldata->rtt_done = true;
}
void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
{
- int i, j;
+ int chain, i;
- for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (!(ah->rxchainmask & (1 << i)))
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->rxchainmask & (1 << chain)))
continue;
- for (j = 0; j < MAX_RTT_TABLE_ENTRY; j++)
- ar9003_hw_rtt_load_hist_entry(ah, i, j, 0);
+ for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++)
+ ar9003_hw_rtt_load_hist_entry(ah, chain, i, 0);
}
+
+ if (ah->caldata)
+ ah->caldata->rtt_done = false;
+}
+
+bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ bool restore;
+
+ if (!ah->caldata)
+ return false;
+
+ if (!ah->caldata->rtt_done)
+ return false;
+
+ ar9003_hw_rtt_enable(ah);
+ ar9003_hw_rtt_set_mask(ah, 0x10);
+
+ if (!ath9k_hw_rfbus_req(ah)) {
+ ath_err(ath9k_hw_common(ah), "Could not stop baseband\n");
+ restore = false;
+ goto fail;
+ }
+
+ ar9003_hw_rtt_load_hist(ah);
+ restore = ar9003_hw_rtt_force_restore(ah);
+
+fail:
+ ath9k_hw_rfbus_done(ah);
+ ar9003_hw_rtt_disable(ah);
+ return restore;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.h b/drivers/net/wireless/ath/ath9k/ar9003_rtt.h
index 030758d087d6..a43b30d723a4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.h
@@ -21,8 +21,9 @@ void ar9003_hw_rtt_enable(struct ath_hw *ah);
void ar9003_hw_rtt_disable(struct ath_hw *ah);
void ar9003_hw_rtt_set_mask(struct ath_hw *ah, u32 rtt_mask);
bool ar9003_hw_rtt_force_restore(struct ath_hw *ah);
-void ar9003_hw_rtt_load_hist(struct ath_hw *ah, u8 chain, u32 *table);
-void ar9003_hw_rtt_fill_hist(struct ath_hw *ah, u8 chain, u32 *table);
+void ar9003_hw_rtt_load_hist(struct ath_hw *ah);
+void ar9003_hw_rtt_fill_hist(struct ath_hw *ah);
void ar9003_hw_rtt_clear_hist(struct ath_hw *ah);
+bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan);
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index f11d9b2677fd..1bd3a3d22101 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -18,7 +19,7 @@
#define INITVALS_9330_1P1_H
static const u32 ar9331_1p1_baseband_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
{0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
@@ -27,10 +28,10 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
{0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
{0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
{0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
- {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+ {0x00009e04, 0x00202020, 0x00202020, 0x00202020, 0x00202020},
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
{0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
- {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e14, 0x31365d5e, 0x3136605e, 0x3136605e, 0x31365d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
@@ -55,7 +56,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
- {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
+ {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
@@ -63,7 +64,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
};
static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -155,7 +156,7 @@ static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
};
static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52},
{0x0000a2e0, 0xffb31c84, 0xffb31c84, 0xffb31c84, 0xffb31c84},
@@ -245,7 +246,7 @@ static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
};
static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -377,14 +378,14 @@ static const u32 ar9331_1p1_radio_core[][2] = {
{0x000160b4, 0x92480040},
{0x000160c0, 0x006db6db},
{0x000160c4, 0x0186db60},
- {0x000160c8, 0x6db6db6c},
+ {0x000160c8, 0x6db4db6c},
{0x000160cc, 0x6de6c300},
{0x000160d0, 0x14500820},
{0x00016100, 0x04cb0001},
{0x00016104, 0xfff80015},
{0x00016108, 0x00080010},
{0x0001610c, 0x00170000},
- {0x00016140, 0x10804000},
+ {0x00016140, 0x10800000},
{0x00016144, 0x01884080},
{0x00016148, 0x000080c0},
{0x00016280, 0x01000015},
@@ -417,7 +418,7 @@ static const u32 ar9331_1p1_radio_core[][2] = {
};
static const u32 ar9331_1p1_soc_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022},
};
@@ -691,7 +692,7 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
};
static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -783,7 +784,7 @@ static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
};
static const u32 ar9331_1p1_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
{0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
{0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
@@ -973,26 +974,27 @@ static const u32 ar9331_1p1_mac_core[][2] = {
static const u32 ar9331_common_rx_gain_1p1[][2] = {
/* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x01910190},
- {0x0000a030, 0x01930192},
- {0x0000a034, 0x01950194},
- {0x0000a038, 0x038a0196},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
+ {0x00009e18, 0x05000000},
+ {0x0000a000, 0x00060005},
+ {0x0000a004, 0x00810080},
+ {0x0000a008, 0x00830082},
+ {0x0000a00c, 0x00850084},
+ {0x0000a010, 0x01820181},
+ {0x0000a014, 0x01840183},
+ {0x0000a018, 0x01880185},
+ {0x0000a01c, 0x018a0189},
+ {0x0000a020, 0x02850284},
+ {0x0000a024, 0x02890288},
+ {0x0000a028, 0x028b028a},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
{0x0000a050, 0x00000000},
{0x0000a054, 0x00000000},
{0x0000a058, 0x00000000},
@@ -1005,15 +1007,15 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
{0x0000a074, 0x00000000},
{0x0000a078, 0x00000000},
{0x0000a07c, 0x00000000},
- {0x0000a080, 0x22222229},
- {0x0000a084, 0x1d1d1d1d},
- {0x0000a088, 0x1d1d1d1d},
- {0x0000a08c, 0x1d1d1d1d},
- {0x0000a090, 0x171d1d1d},
- {0x0000a094, 0x11111717},
- {0x0000a098, 0x00030311},
- {0x0000a09c, 0x00000000},
- {0x0000a0a0, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x24242428},
+ {0x0000a098, 0x171e1e1e},
+ {0x0000a09c, 0x02020b0b},
+ {0x0000a0a0, 0x02020202},
{0x0000a0a4, 0x00000000},
{0x0000a0a8, 0x00000000},
{0x0000a0ac, 0x00000000},
@@ -1021,27 +1023,27 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
{0x0000a0b4, 0x00000000},
{0x0000a0b8, 0x00000000},
{0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
+ {0x0000a0c0, 0x22072208},
+ {0x0000a0c4, 0x22052206},
+ {0x0000a0c8, 0x22032204},
+ {0x0000a0cc, 0x22012202},
+ {0x0000a0d0, 0x221f2200},
+ {0x0000a0d4, 0x221d221e},
+ {0x0000a0d8, 0x33023303},
+ {0x0000a0dc, 0x33003301},
+ {0x0000a0e0, 0x331e331f},
+ {0x0000a0e4, 0x4402331d},
+ {0x0000a0e8, 0x44004401},
+ {0x0000a0ec, 0x441e441f},
+ {0x0000a0f0, 0x55025503},
+ {0x0000a0f4, 0x55005501},
+ {0x0000a0f8, 0x551e551f},
+ {0x0000a0fc, 0x6602551d},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
{0x0000a114, 0x00000000},
{0x0000a118, 0x00000000},
{0x0000a11c, 0x00000000},
@@ -1054,26 +1056,26 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
{0x0000a138, 0x00000000},
{0x0000a13c, 0x00000000},
{0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
{0x0000a194, 0x00000000},
{0x0000a198, 0x00000000},
{0x0000a19c, 0x00000000},
@@ -1100,14 +1102,14 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
{0x0000a1f0, 0x00000396},
{0x0000a1f4, 0x00000396},
{0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
+ {0x0000a1fc, 0x00000296},
};
static const u32 ar9331_common_tx_gain_offset1_1[][1] = {
- {0},
- {3},
- {0},
- {0},
+ {0x00000000},
+ {0x00000003},
+ {0x00000000},
+ {0x00000000},
};
static const u32 ar9331_1p1_chansel_xtal_25M[] = {
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index f84477c5ebb1..7db1890448f2 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1468,6 +1468,9 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
return false;
ah->chip_fullsleep = false;
+
+ if (AR_SREV_9330(ah))
+ ar9003_hw_internal_regulator_apply(ah);
ath9k_hw_init_pll(ah, chan);
ath9k_hw_set_rfmode(ah, chan);
@@ -1702,10 +1705,10 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
* For AR9462, make sure that calibration data for
* re-using are present.
*/
- if (AR_SREV_9462(ah) && (!ah->caldata ||
- !ah->caldata->done_txiqcal_once ||
- !ah->caldata->done_txclcal_once ||
- !ah->caldata->rtt_hist.num_readings))
+ if (AR_SREV_9462(ah) && (ah->caldata &&
+ (!ah->caldata->done_txiqcal_once ||
+ !ah->caldata->done_txclcal_once ||
+ !ah->caldata->rtt_done)))
goto fail;
ath_dbg(common, RESET, "FastChannelChange for %d -> %d\n",
@@ -1941,7 +1944,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (caldata) {
caldata->done_txiqcal_once = false;
caldata->done_txclcal_once = false;
- caldata->rtt_hist.num_readings = 0;
}
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 828b9bbc456d..b620c557c2a6 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -348,12 +348,6 @@ enum ath9k_int {
CHANNEL_HT40MINUS)
#define MAX_RTT_TABLE_ENTRY 6
-#define RTT_HIST_MAX 3
-struct ath9k_rtt_hist {
- u32 table[AR9300_MAX_CHAINS][RTT_HIST_MAX][MAX_RTT_TABLE_ENTRY];
- u8 num_readings;
-};
-
#define MAX_IQCAL_MEASUREMENT 8
#define MAX_CL_TAB_ENTRY 16
@@ -363,6 +357,7 @@ struct ath9k_hw_cal_data {
int32_t CalValid;
int8_t iCoff;
int8_t qCoff;
+ bool rtt_done;
bool paprd_done;
bool nfcal_pending;
bool nfcal_interference;
@@ -373,8 +368,8 @@ struct ath9k_hw_cal_data {
u32 num_measures[AR9300_MAX_CHAINS];
int tx_corr_coeff[MAX_IQCAL_MEASUREMENT][AR9300_MAX_CHAINS];
u32 tx_clcal[AR9300_MAX_CHAINS][MAX_CL_TAB_ENTRY];
+ u32 rtt_table[AR9300_MAX_CHAINS][MAX_RTT_TABLE_ENTRY];
struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
- struct ath9k_rtt_hist rtt_hist;
};
struct ath9k_channel {
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dfa78e8b6470..4de4473776ac 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -239,7 +239,7 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- bool ret;
+ bool ret = true;
ieee80211_stop_queues(sc->hw);
@@ -250,11 +250,12 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
ath9k_debug_samp_bb_mac(sc);
ath9k_hw_disable_interrupts(ah);
- ret = ath_drain_all_txq(sc, retry_tx);
-
if (!ath_stoprecv(sc))
ret = false;
+ if (!ath_drain_all_txq(sc, retry_tx))
+ ret = false;
+
if (!flush) {
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_rx_tasklet(sc, 1, true);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 23eaa1b26ebe..d59dd01d6cde 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -64,7 +64,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ bool dequeue);
enum {
MCS_HT20,
@@ -811,7 +812,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fi = get_frame_info(skb);
bf = fi->bf;
if (!fi->bf)
- bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
if (!bf)
continue;
@@ -1726,7 +1727,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
return;
}
- bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
if (!bf)
return;
@@ -1753,7 +1754,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
bf = fi->bf;
if (!bf)
- bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
if (!bf)
return;
@@ -1814,7 +1815,8 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ bool dequeue)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_frame_info *fi = get_frame_info(skb);
@@ -1863,6 +1865,8 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
return bf;
error:
+ if (dequeue)
+ __skb_unlink(skb, &tid->buf_q);
dev_kfree_skb_any(skb);
return NULL;
}
@@ -1893,7 +1897,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
*/
ath_tx_send_ampdu(sc, tid, skb, txctl);
} else {
- bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
if (!bf)
return;
diff --git a/drivers/net/wireless/b43/bus.c b/drivers/net/wireless/b43/bus.c
index 424692df239d..565fdbdd6915 100644
--- a/drivers/net/wireless/b43/bus.c
+++ b/drivers/net/wireless/b43/bus.c
@@ -107,11 +107,9 @@ struct b43_bus_dev *b43_bus_dev_bcma_init(struct bcma_device *core)
dev->dma_dev = core->dma_dev;
dev->irq = core->irq;
- /*
dev->board_vendor = core->bus->boardinfo.vendor;
dev->board_type = core->bus->boardinfo.type;
- dev->board_rev = core->bus->boardinfo.rev;
- */
+ dev->board_rev = core->bus->sprom.board_rev;
dev->chip_id = core->bus->chipinfo.id;
dev->chip_rev = core->bus->chipinfo.rev;
@@ -210,7 +208,7 @@ struct b43_bus_dev *b43_bus_dev_ssb_init(struct ssb_device *sdev)
dev->board_vendor = sdev->bus->boardinfo.vendor;
dev->board_type = sdev->bus->boardinfo.type;
- dev->board_rev = sdev->bus->boardinfo.rev;
+ dev->board_rev = sdev->bus->sprom.board_rev;
dev->chip_id = sdev->bus->chip_id;
dev->chip_rev = sdev->bus->chip_rev;
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index b5f1b91002bb..777cd74921d7 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1109,7 +1109,7 @@ static bool b43_dma_translation_in_low_word(struct b43_wldev *dev,
#ifdef CONFIG_B43_SSB
if (dev->dev->bus_type == B43_BUS_SSB &&
dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
- !(dev->dev->sdev->bus->host_pci->is_pcie &&
+ !(pci_is_pcie(dev->dev->sdev->bus->host_pci) &&
ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64))
return 1;
#endif
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 617afc8211b2..5a39b226b2e3 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5243,10 +5243,10 @@ static void b43_sprom_fixup(struct ssb_bus *bus)
/* boardflags workarounds */
if (bus->boardinfo.vendor == SSB_BOARDVENDOR_DELL &&
- bus->chip_id == 0x4301 && bus->boardinfo.rev == 0x74)
+ bus->chip_id == 0x4301 && bus->sprom.board_rev == 0x74)
bus->sprom.boardflags_lo |= B43_BFL_BTCOEXIST;
if (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE &&
- bus->boardinfo.type == 0x4E && bus->boardinfo.rev > 0x40)
+ bus->boardinfo.type == 0x4E && bus->sprom.board_rev > 0x40)
bus->sprom.boardflags_lo |= B43_BFL_PACTRL;
if (bus->bustype == SSB_BUSTYPE_PCI) {
pdev = bus->host_pci;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1be214b815fb..cd9c9bc186d9 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1573,8 +1573,6 @@ static void b43legacy_request_firmware(struct work_struct *work)
const char *filename;
int err;
- /* do dummy read */
- ssb_read32(dev->dev, SSB_TMSHIGH);
if (!fw->ucode) {
if (rev == 2)
filename = "ucode2";
@@ -3781,7 +3779,7 @@ static void b43legacy_sprom_fixup(struct ssb_bus *bus)
/* boardflags workarounds */
if (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE &&
bus->boardinfo.type == 0x4E &&
- bus->boardinfo.rev > 0x40)
+ bus->sprom.board_rev > 0x40)
bus->sprom.boardflags_lo |= B43legacy_BFL_PACTRL;
}
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 950334197f40..995c7d0c212a 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -408,7 +408,7 @@ static void b43legacy_phy_setupg(struct b43legacy_wldev *dev)
if (is_bcm_board_vendor(dev) &&
(dev->dev->bus->boardinfo.type == 0x0416) &&
- (dev->dev->bus->boardinfo.rev == 0x0017))
+ (dev->dev->bus->sprom.board_rev == 0x0017))
return;
b43legacy_ilt_write(dev, 0x5001, 0x0002);
@@ -424,7 +424,7 @@ static void b43legacy_phy_setupg(struct b43legacy_wldev *dev)
if (is_bcm_board_vendor(dev) &&
(dev->dev->bus->boardinfo.type == 0x0416) &&
- (dev->dev->bus->boardinfo.rev == 0x0017))
+ (dev->dev->bus->sprom.board_rev == 0x0017))
return;
b43legacy_ilt_write(dev, 0x0401, 0x0002);
diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c
index fcbafcd603cc..896177690394 100644
--- a/drivers/net/wireless/b43legacy/radio.c
+++ b/drivers/net/wireless/b43legacy/radio.c
@@ -1998,7 +1998,7 @@ u16 b43legacy_default_radio_attenuation(struct b43legacy_wldev *dev)
if (phy->type == B43legacy_PHYTYPE_G) {
if (is_bcm_board_vendor(dev) &&
dev->dev->bus->boardinfo.type == 0x421 &&
- dev->dev->bus->boardinfo.rev >= 30)
+ dev->dev->bus->sprom.board_rev >= 30)
att = 3;
else if (is_bcm_board_vendor(dev) &&
dev->dev->bus->boardinfo.type == 0x416)
@@ -2008,7 +2008,7 @@ u16 b43legacy_default_radio_attenuation(struct b43legacy_wldev *dev)
} else {
if (is_bcm_board_vendor(dev) &&
dev->dev->bus->boardinfo.type == 0x421 &&
- dev->dev->bus->boardinfo.rev >= 30)
+ dev->dev->bus->sprom.board_rev >= 30)
att = 7;
else
att = 6;
@@ -2018,7 +2018,7 @@ u16 b43legacy_default_radio_attenuation(struct b43legacy_wldev *dev)
if (phy->type == B43legacy_PHYTYPE_G) {
if (is_bcm_board_vendor(dev) &&
dev->dev->bus->boardinfo.type == 0x421 &&
- dev->dev->bus->boardinfo.rev >= 30)
+ dev->dev->bus->sprom.board_rev >= 30)
att = 3;
else if (is_bcm_board_vendor(dev) &&
dev->dev->bus->boardinfo.type ==
@@ -2052,9 +2052,9 @@ u16 b43legacy_default_radio_attenuation(struct b43legacy_wldev *dev)
}
if (is_bcm_board_vendor(dev) &&
dev->dev->bus->boardinfo.type == 0x421) {
- if (dev->dev->bus->boardinfo.rev < 0x43)
+ if (dev->dev->bus->sprom.board_rev < 0x43)
att = 2;
- else if (dev->dev->bus->boardinfo.rev < 0x51)
+ else if (dev->dev->bus->sprom.board_rev < 0x51)
att = 3;
}
if (att == 0xFFFF)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 4add7da24681..e2480d196276 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -85,18 +85,15 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
sdiodev->irq_wake = true;
/* must configure SDIO_CCCR_IENx to enable irq */
- data = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_0,
- SDIO_CCCR_IENx, &ret);
+ data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_IENx,
- data, &ret);
+ brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
/* redirect, configure ane enable io for interrupt signal */
data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH)
data |= SDIO_SEPINT_ACT_HI;
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_BRCM_SEPINT,
- data, &ret);
+ brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
return 0;
}
@@ -105,9 +102,8 @@ int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
{
brcmf_dbg(TRACE, "Entering\n");
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_BRCM_SEPINT,
- 0, NULL);
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_IENx, 0, NULL);
+ brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
+ brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
if (sdiodev->irq_wake) {
disable_irq_wake(sdiodev->irq);
@@ -158,153 +154,147 @@ int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
}
#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
-u8 brcmf_sdcard_cfg_read(struct brcmf_sdio_dev *sdiodev, uint fnc_num, u32 addr,
- int *err)
-{
- int status;
- s32 retry = 0;
- u8 data = 0;
-
- do {
- if (retry) /* wait for 1 ms till bus get settled down */
- udelay(1000);
- status = brcmf_sdioh_request_byte(sdiodev, SDIOH_READ, fnc_num,
- addr, (u8 *) &data);
- } while (status != 0
- && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
- if (err)
- *err = status;
-
- brcmf_dbg(INFO, "fun = %d, addr = 0x%x, u8data = 0x%x\n",
- fnc_num, addr, data);
-
- return data;
-}
-
-void
-brcmf_sdcard_cfg_write(struct brcmf_sdio_dev *sdiodev, uint fnc_num, u32 addr,
- u8 data, int *err)
-{
- int status;
- s32 retry = 0;
-
- do {
- if (retry) /* wait for 1 ms till bus get settled down */
- udelay(1000);
- status = brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, fnc_num,
- addr, (u8 *) &data);
- } while (status != 0
- && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
- if (err)
- *err = status;
-
- brcmf_dbg(INFO, "fun = %d, addr = 0x%x, u8data = 0x%x\n",
- fnc_num, addr, data);
-}
-
int
brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
{
- int err = 0;
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
- (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
- if (!err)
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_SBADDRMID,
- (address >> 16) & SBSDIO_SBADDRMID_MASK,
- &err);
- if (!err)
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_SBADDRHIGH,
- (address >> 24) & SBSDIO_SBADDRHIGH_MASK,
- &err);
+ int err = 0, i;
+ u8 addr[3];
+ s32 retry;
+
+ addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
+ addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
+ addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
+
+ for (i = 0; i < 3; i++) {
+ retry = 0;
+ do {
+ if (retry)
+ usleep_range(1000, 2000);
+ err = brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE,
+ SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW + i,
+ &addr[i]);
+ } while (err != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
+
+ if (err) {
+ brcmf_dbg(ERROR, "failed at addr:0x%0x\n",
+ SBSDIO_FUNC1_SBADDRLOW + i);
+ break;
+ }
+ }
return err;
}
-u32 brcmf_sdcard_reg_read(struct brcmf_sdio_dev *sdiodev, u32 addr, uint size)
+static int
+brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ void *data, bool write)
{
- int status;
- u32 word = 0;
- uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
-
- brcmf_dbg(INFO, "fun = 1, addr = 0x%x\n", addr);
-
- if (bar0 != sdiodev->sbwad) {
- if (brcmf_sdcard_set_sbaddr_window(sdiodev, bar0))
- return 0xFFFFFFFF;
+ u8 func_num, reg_size;
+ u32 bar;
+ s32 retry = 0;
+ int ret;
- sdiodev->sbwad = bar0;
+ /*
+ * figure out how to read the register based on address range
+ * 0x00 ~ 0x7FF: function 0 CCCR and FBR
+ * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
+ * The rest: function 1 silicon backplane core registers
+ */
+ if ((addr & ~REG_F0_REG_MASK) == 0) {
+ func_num = SDIO_FUNC_0;
+ reg_size = 1;
+ } else if ((addr & ~REG_F1_MISC_MASK) == 0) {
+ func_num = SDIO_FUNC_1;
+ reg_size = 1;
+ } else {
+ func_num = SDIO_FUNC_1;
+ reg_size = 4;
+
+ /* Set the window for SB core register */
+ bar = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+ if (bar != sdiodev->sbwad) {
+ ret = brcmf_sdcard_set_sbaddr_window(sdiodev, bar);
+ if (ret != 0) {
+ memset(data, 0xFF, reg_size);
+ return ret;
+ }
+ sdiodev->sbwad = bar;
+ }
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
}
- addr &= SBSDIO_SB_OFT_ADDR_MASK;
- if (size == 4)
- addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ do {
+ if (!write)
+ memset(data, 0, reg_size);
+ if (retry) /* wait for 1 ms till bus get settled down */
+ usleep_range(1000, 2000);
+ if (reg_size == 1)
+ ret = brcmf_sdioh_request_byte(sdiodev, write,
+ func_num, addr, data);
+ else
+ ret = brcmf_sdioh_request_word(sdiodev, write,
+ func_num, addr, data, 4);
+ } while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
- status = brcmf_sdioh_request_word(sdiodev, SDIOH_READ, SDIO_FUNC_1,
- addr, &word, size);
+ if (ret != 0)
+ brcmf_dbg(ERROR, "failed with %d\n", ret);
- sdiodev->regfail = (status != 0);
+ return ret;
+}
- brcmf_dbg(INFO, "u32data = 0x%x\n", word);
+u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+{
+ u8 data;
+ int retval;
- /* if ok, return appropriately masked word */
- if (status == 0) {
- switch (size) {
- case sizeof(u8):
- return word & 0xff;
- case sizeof(u16):
- return word & 0xffff;
- case sizeof(u32):
- return word;
- default:
- sdiodev->regfail = true;
+ brcmf_dbg(INFO, "addr:0x%08x\n", addr);
+ retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+ brcmf_dbg(INFO, "data:0x%02x\n", data);
- }
- }
+ if (ret)
+ *ret = retval;
- /* otherwise, bad sdio access or invalid size */
- brcmf_dbg(ERROR, "error reading addr 0x%04x size %d\n", addr, size);
- return 0xFFFFFFFF;
+ return data;
}
-u32 brcmf_sdcard_reg_write(struct brcmf_sdio_dev *sdiodev, u32 addr, uint size,
- u32 data)
+u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
{
- int status;
- uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
- int err = 0;
+ u32 data;
+ int retval;
- brcmf_dbg(INFO, "fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
- addr, size * 8, data);
+ brcmf_dbg(INFO, "addr:0x%08x\n", addr);
+ retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+ brcmf_dbg(INFO, "data:0x%08x\n", data);
- if (bar0 != sdiodev->sbwad) {
- err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
- if (err)
- return err;
+ if (ret)
+ *ret = retval;
- sdiodev->sbwad = bar0;
- }
+ return data;
+}
- addr &= SBSDIO_SB_OFT_ADDR_MASK;
- if (size == 4)
- addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
- status =
- brcmf_sdioh_request_word(sdiodev, SDIOH_WRITE, SDIO_FUNC_1,
- addr, &data, size);
- sdiodev->regfail = (status != 0);
+void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ u8 data, int *ret)
+{
+ int retval;
- if (status == 0)
- return 0;
+ brcmf_dbg(INFO, "addr:0x%08x, data:0x%02x\n", addr, data);
+ retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
- brcmf_dbg(ERROR, "error writing 0x%08x to addr 0x%04x size %d\n",
- data, addr, size);
- return 0xFFFFFFFF;
+ if (ret)
+ *ret = retval;
}
-bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev)
+void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ u32 data, int *ret)
{
- return sdiodev->regfail;
+ int retval;
+
+ brcmf_dbg(INFO, "addr:0x%08x, data:0x%08x\n", addr, data);
+ retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
+
+ if (ret)
+ *ret = retval;
}
static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index dd07d33a927c..82f51dbd0d66 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -346,43 +346,17 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
return status;
}
-/* Read client card reg */
-static int
-brcmf_sdioh_card_regread(struct brcmf_sdio_dev *sdiodev, int func, u32 regaddr,
- int regsize, u32 *data)
-{
-
- if ((func == 0) || (regsize == 1)) {
- u8 temp = 0;
-
- brcmf_sdioh_request_byte(sdiodev, SDIOH_READ, func, regaddr,
- &temp);
- *data = temp;
- *data &= 0xff;
- brcmf_dbg(DATA, "byte read data=0x%02x\n", *data);
- } else {
- brcmf_sdioh_request_word(sdiodev, SDIOH_READ, func, regaddr,
- data, regsize);
- if (regsize == 2)
- *data &= 0xffff;
-
- brcmf_dbg(DATA, "word read data=0x%08x\n", *data);
- }
-
- return SUCCESS;
-}
-
static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr)
{
/* read 24 bits and return valid 17 bit addr */
- int i;
+ int i, ret;
u32 scratch, regdata;
__le32 scratch_le;
u8 *ptr = (u8 *)&scratch_le;
for (i = 0; i < 3; i++) {
- if ((brcmf_sdioh_card_regread(sdiodev, 0, regaddr, 1,
- &regdata)) != SUCCESS)
+ regdata = brcmf_sdio_regrl(sdiodev, regaddr, &ret);
+ if (ret != 0)
brcmf_dbg(ERROR, "Can't read!\n");
*ptr++ = (u8) regdata;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 149ee67beb2e..1dbf2be478c8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -629,43 +629,29 @@ static bool data_ok(struct brcmf_sdio *bus)
* Reads a register in the SDIO hardware block. This block occupies a series of
* adresses on the 32 bit backplane bus.
*/
-static void
-r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
+static int
+r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
{
u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
- *retryvar = 0;
- do {
- *regvar = brcmf_sdcard_reg_read(bus->sdiodev,
- bus->ci->c_inf[idx].base + reg_offset,
- sizeof(u32));
- } while (brcmf_sdcard_regfail(bus->sdiodev) &&
- (++(*retryvar) <= retry_limit));
- if (*retryvar) {
- bus->regfails += (*retryvar-1);
- if (*retryvar > retry_limit) {
- brcmf_dbg(ERROR, "FAILED READ %Xh\n", reg_offset);
- *regvar = 0;
- }
- }
+ int ret;
+
+ *regvar = brcmf_sdio_regrl(bus->sdiodev,
+ bus->ci->c_inf[idx].base + offset, &ret);
+
+ return ret;
}
-static void
-w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset, u32 *retryvar)
+static int
+w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
{
u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
- *retryvar = 0;
- do {
- brcmf_sdcard_reg_write(bus->sdiodev,
- bus->ci->c_inf[idx].base + reg_offset,
- sizeof(u32), regval);
- } while (brcmf_sdcard_regfail(bus->sdiodev) &&
- (++(*retryvar) <= retry_limit));
- if (*retryvar) {
- bus->regfails += (*retryvar-1);
- if (*retryvar > retry_limit)
- brcmf_dbg(ERROR, "FAILED REGISTER WRITE %Xh\n",
- reg_offset);
- }
+ int ret;
+
+ brcmf_sdio_regwl(bus->sdiodev,
+ bus->ci->c_inf[idx].base + reg_offset,
+ regval, &ret);
+
+ return ret;
}
#define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
@@ -697,16 +683,16 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
clkreq =
bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkreq, &err);
if (err) {
brcmf_dbg(ERROR, "HT Avail request error: %d\n", err);
return -EBADE;
}
/* Check current status */
- clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ clkctl = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
brcmf_dbg(ERROR, "HT Avail read error: %d\n", err);
return -EBADE;
@@ -715,9 +701,8 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
/* Go to pending and await interrupt if appropriate */
if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
/* Allow only clock-available interrupt */
- devctl = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
if (err) {
brcmf_dbg(ERROR, "Devctl error setting CA: %d\n",
err);
@@ -725,30 +710,28 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
}
devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, devctl, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
brcmf_dbg(INFO, "CLKCTL: set PENDING\n");
bus->clkstate = CLK_PENDING;
return 0;
} else if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
- devctl =
- brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ devctl = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, devctl, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
}
/* Otherwise, wait here (polling) for HT Avail */
timeout = jiffies +
msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
- clkctl = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- &err);
+ clkctl = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ &err);
if (time_after(jiffies, timeout))
break;
else
@@ -781,17 +764,16 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
- devctl = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, devctl, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
}
bus->clkstate = CLK_SDONLY;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkreq, &err);
brcmf_dbg(INFO, "CLKCTL: turned OFF\n");
if (err) {
brcmf_dbg(ERROR, "Failed access turning clock off: %d\n",
@@ -874,7 +856,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
{
- uint retries = 0;
+ int ret;
brcmf_dbg(INFO, "request %s (currently %s)\n",
sleep ? "SLEEP" : "WAKE",
@@ -894,22 +876,20 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
/* Tell device to start using OOB wakeup */
- w_sdreg32(bus, SMB_USE_OOB,
- offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
- if (retries > retry_limit)
+ ret = w_sdreg32(bus, SMB_USE_OOB,
+ offsetof(struct sdpcmd_regs, tosbmailbox));
+ if (ret != 0)
brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n");
/* Turn off our contribution to the HT clock request */
brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
/* Isolate the bus */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL,
- SBSDIO_DEVCTL_PADS_ISO, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ SBSDIO_DEVCTL_PADS_ISO, NULL);
/* Change state */
bus->sleeping = true;
@@ -917,21 +897,20 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
} else {
/* Waking up: bus power up is ok, set local state */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ 0, NULL);
/* Make sure the controller has the bus up */
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
/* Send misc interrupt to indicate OOB not needed */
- w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, tosbmailboxdata),
- &retries);
- if (retries <= retry_limit)
- w_sdreg32(bus, SMB_DEV_INT,
- offsetof(struct sdpcmd_regs, tosbmailbox),
- &retries);
-
- if (retries > retry_limit)
+ ret = w_sdreg32(bus, 0,
+ offsetof(struct sdpcmd_regs, tosbmailboxdata));
+ if (ret == 0)
+ ret = w_sdreg32(bus, SMB_DEV_INT,
+ offsetof(struct sdpcmd_regs, tosbmailbox));
+
+ if (ret != 0)
brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP TO CLEAR OOB!!\n");
/* Make sure we have SD bus access */
@@ -955,17 +934,17 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
u32 intstatus = 0;
u32 hmb_data;
u8 fcbits;
- uint retries = 0;
+ int ret;
brcmf_dbg(TRACE, "Enter\n");
/* Read mailbox data and ack that we did so */
- r_sdreg32(bus, &hmb_data,
- offsetof(struct sdpcmd_regs, tohostmailboxdata), &retries);
+ ret = r_sdreg32(bus, &hmb_data,
+ offsetof(struct sdpcmd_regs, tohostmailboxdata));
- if (retries <= retry_limit)
+ if (ret == 0)
w_sdreg32(bus, SMB_INT_ACK,
- offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
+ offsetof(struct sdpcmd_regs, tosbmailbox));
bus->f1regdata += 2;
/* Dongle recomposed rx frames, accept them again */
@@ -1040,17 +1019,16 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
if (abort)
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_FRAMECTRL,
- SFC_RF_TERM, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_RF_TERM, &err);
bus->f1regdata++;
/* Wait until the packet has been flushed (device/FIFO stable) */
for (lastrbc = retries = 0xffff; retries > 0; retries--) {
- hi = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_RFRAMEBCHI, NULL);
- lo = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_RFRAMEBCLO, NULL);
+ hi = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_RFRAMEBCHI, &err);
+ lo = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_RFRAMEBCLO, &err);
bus->f1regdata += 2;
if ((hi == 0) && (lo == 0))
@@ -1070,11 +1048,11 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
if (rtx) {
bus->rxrtx++;
- w_sdreg32(bus, SMB_NAK,
- offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
+ err = w_sdreg32(bus, SMB_NAK,
+ offsetof(struct sdpcmd_regs, tosbmailbox));
bus->f1regdata++;
- if (retries <= retry_limit)
+ if (err == 0)
bus->rxskip = true;
}
@@ -1082,7 +1060,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
bus->nextlen = 0;
/* If we can't reach the device, signal failure */
- if (err || brcmf_sdcard_regfail(bus->sdiodev))
+ if (err)
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
@@ -2178,21 +2156,16 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
bus->tx_sderrs++;
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM,
- NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
bus->f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
- hi = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCHI,
- NULL);
- lo = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCLO,
- NULL);
+ hi = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
bus->f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
@@ -2219,7 +2192,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt;
u32 intstatus = 0;
- uint retries = 0;
int ret = 0, prec_out;
uint cnt = 0;
uint datalen;
@@ -2249,11 +2221,11 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
/* In poll mode, need to check for other events */
if (!bus->intr && cnt) {
/* Check device status, signal pending interrupt */
- r_sdreg32(bus, &intstatus,
- offsetof(struct sdpcmd_regs, intstatus),
- &retries);
+ ret = r_sdreg32(bus, &intstatus,
+ offsetof(struct sdpcmd_regs,
+ intstatus));
bus->f2txdata++;
- if (brcmf_sdcard_regfail(bus->sdiodev))
+ if (ret != 0)
break;
if (intstatus & bus->hostintmask)
bus->ipend = true;
@@ -2275,7 +2247,6 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
{
u32 local_hostintmask;
u8 saveclk;
- uint retries;
int err;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
@@ -2303,7 +2274,7 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
/* Disable and clear interrupts at the chip level also */
- w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries);
+ w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
local_hostintmask = bus->hostintmask;
bus->hostintmask = 0;
@@ -2311,24 +2282,23 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
/* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ saveclk = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (!err) {
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
}
if (err)
brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
/* Turn off the bus (F2), free any pending packets */
brcmf_dbg(INTR, "disable SDIO interrupts\n");
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
- SDIO_FUNC_ENABLE_1, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, SDIO_FUNC_ENABLE_1,
+ NULL);
/* Clear any pending interrupts now that F2 is disabled */
w_sdreg32(bus, local_hostintmask,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
+ offsetof(struct sdpcmd_regs, intstatus));
/* Turn off the backplane clock (only) */
brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
@@ -2373,12 +2343,12 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
{
u32 intstatus, newstatus = 0;
- uint retries = 0;
uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
uint txlimit = bus->txbound; /* Tx frames to send before resched */
uint framecnt = 0; /* Temporary counter of tx/rx frames */
bool rxdone = true; /* Flag for no more read data */
bool resched = false; /* Flag indicating resched wanted */
+ int err;
brcmf_dbg(TRACE, "Enter\n");
@@ -2389,13 +2359,12 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
/* If waiting for HTAVAIL, check status */
if (bus->clkstate == CLK_PENDING) {
- int err;
u8 clkctl, devctl = 0;
#ifdef DEBUG
/* Check for inconsistent device control */
- devctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
if (err) {
brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err);
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
@@ -2403,8 +2372,8 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
#endif /* DEBUG */
/* Read CSR, if clock on switch to AVAIL, else ignore */
- clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ clkctl = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
brcmf_dbg(ERROR, "error reading CSR: %d\n",
err);
@@ -2415,17 +2384,16 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
devctl, clkctl);
if (SBSDIO_HTAV(clkctl)) {
- devctl = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
if (err) {
brcmf_dbg(ERROR, "error reading DEVCTL: %d\n",
err);
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, devctl, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
if (err) {
brcmf_dbg(ERROR, "error writing DEVCTL: %d\n",
err);
@@ -2447,17 +2415,17 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
/* Pending interrupt indicates new device status */
if (bus->ipend) {
bus->ipend = false;
- r_sdreg32(bus, &newstatus,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
+ err = r_sdreg32(bus, &newstatus,
+ offsetof(struct sdpcmd_regs, intstatus));
bus->f1regdata++;
- if (brcmf_sdcard_regfail(bus->sdiodev))
+ if (err != 0)
newstatus = 0;
newstatus &= bus->hostintmask;
bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
if (newstatus) {
- w_sdreg32(bus, newstatus,
- offsetof(struct sdpcmd_regs, intstatus),
- &retries);
+ err = w_sdreg32(bus, newstatus,
+ offsetof(struct sdpcmd_regs,
+ intstatus));
bus->f1regdata++;
}
}
@@ -2472,11 +2440,11 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
*/
if (intstatus & I_HMB_FC_CHANGE) {
intstatus &= ~I_HMB_FC_CHANGE;
- w_sdreg32(bus, I_HMB_FC_CHANGE,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
+ err = w_sdreg32(bus, I_HMB_FC_CHANGE,
+ offsetof(struct sdpcmd_regs, intstatus));
- r_sdreg32(bus, &newstatus,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
+ err = r_sdreg32(bus, &newstatus,
+ offsetof(struct sdpcmd_regs, intstatus));
bus->f1regdata += 2;
bus->fcstate =
!!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
@@ -2546,21 +2514,18 @@ clkwait:
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM,
- NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, &err);
bus->f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
- hi = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCHI,
- NULL);
- lo = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCLO,
- NULL);
+ hi = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCHI,
+ &err);
+ lo = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCLO,
+ &err);
bus->f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
@@ -2587,10 +2552,8 @@ clkwait:
else await next interrupt */
/* On failed register access, all bets are off:
no resched or interrupts */
- if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) ||
- brcmf_sdcard_regfail(bus->sdiodev)) {
- brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation %d\n",
- brcmf_sdcard_regfail(bus->sdiodev));
+ if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
+ brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation\n");
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
bus->intstatus = 0;
} else if (bus->clkstate == CLK_PENDING) {
@@ -2886,19 +2849,16 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
bus->f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
- hi = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCHI,
- NULL);
- lo = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCLO,
- NULL);
+ hi = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
bus->f1regdata += 2;
if (hi == 0 && lo == 0)
break;
@@ -3188,7 +3148,6 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
{
- uint retries;
int bcmerror = 0;
struct chip_info *ci = bus->ci;
@@ -3222,7 +3181,7 @@ static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
}
w_sdreg32(bus, 0xFFFFFFFF,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
+ offsetof(struct sdpcmd_regs, intstatus));
ci->resetcore(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
@@ -3444,7 +3403,6 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
unsigned long timeout;
- uint retries = 0;
u8 ready, enable;
int err, ret = 0;
u8 saveclk;
@@ -3472,13 +3430,11 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
goto exit;
/* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk =
- brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ saveclk = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (!err) {
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
}
if (err) {
brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
@@ -3487,17 +3443,16 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
/* Enable function 2 (frame transfers) */
w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
- offsetof(struct sdpcmd_regs, tosbmailboxdata), &retries);
+ offsetof(struct sdpcmd_regs, tosbmailboxdata));
enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
- enable, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
timeout = jiffies + msecs_to_jiffies(BRCMF_WAIT_F2RDY);
ready = 0;
while (enable != ready) {
- ready = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_0,
- SDIO_CCCR_IORx, NULL);
+ ready = brcmf_sdio_regrb(bus->sdiodev,
+ SDIO_CCCR_IORx, NULL);
if (time_after(jiffies, timeout))
break;
else if (time_after(jiffies, timeout - BRCMF_WAIT_F2RDY + 50))
@@ -3512,21 +3467,18 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
/* Set up the interrupt mask and enable interrupts */
bus->hostintmask = HOSTINTMASK;
w_sdreg32(bus, bus->hostintmask,
- offsetof(struct sdpcmd_regs, hostintmask), &retries);
+ offsetof(struct sdpcmd_regs, hostintmask));
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_WATERMARK, 8, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
} else {
/* Disable F2 again */
enable = SDIO_FUNC_ENABLE_1;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0,
- SDIO_CCCR_IOEx, enable, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
ret = -ENODEV;
}
/* Restore previous clock setting */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
if (ret == 0) {
ret = brcmf_sdio_intr_register(bus->sdiodev);
@@ -3606,9 +3558,9 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
if (!bus->dpc_sched) {
u8 devpend;
- devpend = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_0, SDIO_CCCR_INTx,
- NULL);
+ devpend = brcmf_sdio_regrb(bus->sdiodev,
+ SDIO_CCCR_INTx,
+ NULL);
intstatus =
devpend & (INTR_STATUS_FUNC1 |
INTR_STATUS_FUNC2);
@@ -3732,24 +3684,18 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
bus->alp_only = true;
- /* Return the window to backplane enumeration space for core access */
- if (brcmf_sdcard_set_sbaddr_window(bus->sdiodev, SI_ENUM_BASE))
- brcmf_dbg(ERROR, "FAILED to return to SI_ENUM_BASE\n");
-
pr_debug("F1 signature read @0x18000000=0x%4x\n",
- brcmf_sdcard_reg_read(bus->sdiodev, SI_ENUM_BASE, 4));
+ brcmf_sdio_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
/*
* Force PLL off until brcmf_sdio_chip_attach()
* programs PLL control regs
*/
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- BRCMF_INIT_CLKCTL1, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ BRCMF_INIT_CLKCTL1, &err);
if (!err)
- clkctl =
- brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ clkctl = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
@@ -3782,9 +3728,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
reg_addr = bus->ci->c_inf[idx].base +
offsetof(struct sdpcmd_regs, corecontrol);
- reg_val = brcmf_sdcard_reg_read(bus->sdiodev, reg_addr, sizeof(u32));
- brcmf_sdcard_reg_write(bus->sdiodev, reg_addr, sizeof(u32),
- reg_val | CC_BPRESEN);
+ reg_val = brcmf_sdio_regrl(bus->sdiodev, reg_addr, NULL);
+ brcmf_sdio_regwl(bus->sdiodev, reg_addr, reg_val | CC_BPRESEN, NULL);
brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
@@ -3809,16 +3754,15 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
/* Disable F2 to clear any intermediate frame state on the dongle */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
- SDIO_FUNC_ENABLE_1, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx,
+ SDIO_FUNC_ENABLE_1, NULL);
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
bus->sleeping = false;
bus->rxflow = false;
/* Done with backplane-dependent accesses, can drop clock... */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
/* ...and initialize clock/power states */
bus->clkstate = CLK_SDONLY;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index 1534efc21631..f8e1f1c84d08 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -93,8 +93,9 @@ brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbidhigh), 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbidhigh),
+ NULL);
return SBCOREREV(regdata);
}
@@ -118,8 +119,9 @@ brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ NULL);
regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
return (SSB_TMSLOW_CLOCK == regdata);
@@ -135,13 +137,13 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ NULL);
ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
- regdata = brcmf_sdcard_reg_read(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ NULL);
ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
return ret;
@@ -151,84 +153,85 @@ static void
brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
struct chip_info *ci, u16 coreid)
{
- u32 regdata;
+ u32 regdata, base;
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ base = ci->c_inf[idx].base;
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
if (regdata & SSB_TMSLOW_RESET)
return;
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
/*
* set target reject and spin until busy is clear
* (preserve core-specific bits)
*/
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- 4, regdata | SSB_TMSLOW_REJECT);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow),
+ NULL);
+ brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+ regdata | SSB_TMSLOW_REJECT, NULL);
+
+ regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow),
+ NULL);
udelay(1);
- SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4) &
+ SPINWAIT((brcmf_sdio_regrl(sdiodev,
+ CORE_SB(base, sbtmstatehigh),
+ NULL) &
SSB_TMSHIGH_BUSY), 100000);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(base, sbtmstatehigh),
+ NULL);
if (regdata & SSB_TMSHIGH_BUSY)
brcmf_dbg(ERROR, "core state still busy\n");
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbidlow), 4);
+ regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbidlow),
+ NULL);
if (regdata & SSB_IDLOW_INITIATOR) {
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate), 4) |
- SSB_IMSTATE_REJECT;
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
- regdata);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate), 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(base, sbimstate),
+ NULL);
+ regdata |= SSB_IMSTATE_REJECT;
+ brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbimstate),
+ regdata, NULL);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(base, sbimstate),
+ NULL);
udelay(1);
- SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate), 4) &
+ SPINWAIT((brcmf_sdio_regrl(sdiodev,
+ CORE_SB(base, sbimstate),
+ NULL) &
SSB_IMSTATE_BUSY), 100000);
}
/* set reset and reject while enabling the clocks */
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
- (SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
- SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ regdata = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+ SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
+ brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+ regdata, NULL);
+ regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow),
+ NULL);
udelay(10);
/* clear the initiator reject bit */
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbidlow), 4);
+ regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbidlow),
+ NULL);
if (regdata & SSB_IDLOW_INITIATOR) {
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate), 4) &
- ~SSB_IMSTATE_REJECT;
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
- regdata);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(base, sbimstate),
+ NULL);
+ regdata &= ~SSB_IMSTATE_REJECT;
+ brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbimstate),
+ regdata, NULL);
}
}
/* leave reset and reject asserted */
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
- (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+ brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+ (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
udelay(1);
}
@@ -242,20 +245,19 @@ brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
/* if core is already in reset, just return */
- regdata = brcmf_sdcard_reg_read(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ NULL);
if ((regdata & BCMA_RESET_CTL_RESET) != 0)
return;
- brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- 4, 0);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, 0, NULL);
+ regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ NULL);
udelay(10);
- brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- 4, BCMA_RESET_CTL_RESET);
+ brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ BCMA_RESET_CTL_RESET, NULL);
udelay(1);
}
@@ -279,41 +281,47 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
* set reset while enabling the clock and
* forcing them on throughout the core
*/
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ brcmf_sdio_regwl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
+ NULL);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ NULL);
udelay(1);
/* clear any serror */
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
+ NULL);
if (regdata & SSB_TMSHIGH_SERR)
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4, 0);
+ brcmf_sdio_regwl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
+ 0, NULL);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate), 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate),
+ NULL);
if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
- regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO));
+ brcmf_sdio_regwl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate),
+ regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
+ NULL);
/* clear reset and allow it to propagate throughout the core */
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ brcmf_sdio_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ NULL);
udelay(1);
/* leave clock enabled */
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- 4, SSB_TMSLOW_CLOCK);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ brcmf_sdio_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ SSB_TMSLOW_CLOCK, NULL);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ NULL);
udelay(1);
}
@@ -330,18 +338,18 @@ brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
brcmf_sdio_ai_coredisable(sdiodev, ci, coreid);
/* now do initialization sequence */
- brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- 4, BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
- brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- 4, 0);
+ brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
+ regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ NULL);
+ brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ 0, NULL);
udelay(1);
- brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- 4, BCMA_IOCTL_CLK);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ BCMA_IOCTL_CLK, NULL);
+ regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ NULL);
udelay(1);
}
@@ -358,8 +366,9 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
*/
ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
ci->c_inf[0].base = regs;
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, chipid), 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, chipid),
+ NULL);
ci->chip = regdata & CID_ID_MASK;
ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
@@ -428,8 +437,7 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
/* Try forcing SDIO core to do ALPAvail request only */
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
if (err) {
brcmf_dbg(ERROR, "error writing for HT off\n");
return err;
@@ -437,8 +445,8 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
/* If register supported, wait for ALPAvail and then force ALP */
/* This may take up to 15 milliseconds */
- clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+ clkval = brcmf_sdio_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
if ((clkval & ~SBSDIO_AVBITS) != clkset) {
brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
@@ -446,8 +454,8 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
return -EACCES;
}
- SPINWAIT(((clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ SPINWAIT(((clkval = brcmf_sdio_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
!SBSDIO_ALPAV(clkval)),
PMU_MAX_TRANSITION_DLY);
if (!SBSDIO_ALPAV(clkval)) {
@@ -457,13 +465,11 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
}
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
udelay(65);
/* Also, disable the extra SDIO pull-ups */
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+ brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
return 0;
}
@@ -472,18 +478,22 @@ static void
brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
struct chip_info *ci)
{
+ u32 base = ci->c_inf[0].base;
+
/* get chipcommon rev */
ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
/* get chipcommon capabilites */
- ci->c_inf[0].caps =
- brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, capabilities), 4);
+ ci->c_inf[0].caps = brcmf_sdio_regrl(sdiodev,
+ CORE_CC_REG(base, capabilities),
+ NULL);
/* get pmu caps & rev */
if (ci->c_inf[0].caps & CC_CAP_PMU) {
- ci->pmucaps = brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, pmucapabilities), 4);
+ ci->pmucaps =
+ brcmf_sdio_regrl(sdiodev,
+ CORE_CC_REG(base, pmucapabilities),
+ NULL);
ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
}
@@ -523,10 +533,10 @@ int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
brcmf_sdio_chip_buscoresetup(sdiodev, ci);
- brcmf_sdcard_reg_write(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, gpiopullup), 4, 0);
- brcmf_sdcard_reg_write(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, gpiopulldown), 4, 0);
+ brcmf_sdio_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
+ 0, NULL);
+ brcmf_sdio_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
+ 0, NULL);
*ci_ptr = ci;
return 0;
@@ -562,6 +572,7 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
u32 str_mask = 0;
u32 str_shift = 0;
char chn[8];
+ u32 base = ci->c_inf[0].base;
if (!(ci->c_inf[0].caps & CC_CAP_PMU))
return;
@@ -591,17 +602,17 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
}
}
- brcmf_sdcard_reg_write(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr),
- 4, 1);
- cc_data_temp = brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr), 4);
+ brcmf_sdio_regwl(sdiodev, CORE_CC_REG(base, chipcontrol_addr),
+ 1, NULL);
+ cc_data_temp =
+ brcmf_sdio_regrl(sdiodev,
+ CORE_CC_REG(base, chipcontrol_addr),
+ NULL);
cc_data_temp &= ~str_mask;
drivestrength_sel <<= str_shift;
cc_data_temp |= drivestrength_sel;
- brcmf_sdcard_reg_write(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr),
- 4, cc_data_temp);
+ brcmf_sdio_regwl(sdiodev, CORE_CC_REG(base, chipcontrol_addr),
+ cc_data_temp, NULL);
brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n",
drivestrength, cc_data_temp);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 7010eaf71f99..29bf78d264e0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -40,6 +40,10 @@
/* Maximum number of I/O funcs */
#define SDIOD_MAX_IOFUNCS 7
+/* mask of register map */
+#define REG_F0_REG_MASK 0x7FF
+#define REG_F1_MISC_MASK 0x1FFFF
+
/* as of sdiod rev 0, supports 3 functions */
#define SBSDIO_NUM_FUNCTION 3
@@ -142,7 +146,6 @@ struct brcmf_sdio_dev {
u8 num_funcs; /* Supported funcs on client */
u32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
u32 sbwad; /* Save backplane window address */
- bool regfail; /* status of last reg_r/w call */
void *bus;
atomic_t suspend; /* suspend flag */
wait_queue_head_t request_byte_wait;
@@ -164,31 +167,13 @@ struct brcmf_sdio_dev {
extern int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
extern int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
-/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
- * fn: function number
- * addr: unmodified SDIO-space address
- * data: data byte to write
- * err: pointer to error code (or NULL)
- */
-extern u8 brcmf_sdcard_cfg_read(struct brcmf_sdio_dev *sdiodev, uint func,
- u32 addr, int *err);
-extern void brcmf_sdcard_cfg_write(struct brcmf_sdio_dev *sdiodev, uint func,
- u32 addr, u8 data, int *err);
-
-/* Synchronous access to device (client) core registers via CMD53 to F1.
- * addr: backplane address (i.e. >= regsva from attach)
- * size: register width in bytes (2 or 4)
- * data: data for register write
- */
-extern u32
-brcmf_sdcard_reg_read(struct brcmf_sdio_dev *sdiodev, u32 addr, uint size);
-
-extern u32
-brcmf_sdcard_reg_write(struct brcmf_sdio_dev *sdiodev, u32 addr, uint size,
- u32 data);
-
-/* Indicate if last reg read/write failed */
-extern bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev);
+/* sdio device register access interface */
+extern u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+extern u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+extern void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ u8 data, int *ret);
+extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ u32 data, int *ret);
/* Buffer transfer to/from device (client) core via cmd53.
* fn: function number
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index c5a34ffe6459..a299d42da8e7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -28,6 +28,7 @@
#include <linux/uaccess.h>
#include <linux/firmware.h>
#include <linux/usb.h>
+#include <linux/vmalloc.h>
#include <net/cfg80211.h>
#include <defs.h>
@@ -1239,7 +1240,7 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
return -EINVAL;
}
- devinfo->image = kmalloc(fw->size, GFP_ATOMIC); /* plus nvram */
+ devinfo->image = vmalloc(fw->size); /* plus nvram */
if (!devinfo->image)
return -ENOMEM;
@@ -1603,7 +1604,7 @@ static struct usb_driver brcmf_usbdrvr = {
void brcmf_usb_exit(void)
{
usb_deregister(&brcmf_usbdrvr);
- kfree(g_image.data);
+ vfree(g_image.data);
g_image.data = NULL;
g_image.len = 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
index c2eb2d0af386..e227c4c68ef9 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
@@ -39,10 +39,7 @@ BRCMSMAC_OFILES := \
phy/phytbl_lcn.o \
phy/phytbl_n.o \
phy/phy_qmath.o \
- otp.o \
- srom.o \
dma.o \
- nicpci.o \
brcms_trace_events.o
MODULEPFX := brcmsmac
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index c93ea35bceec..6d8b7213643a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -19,7 +19,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
-#include <linux/pci.h>
#include <defs.h>
#include <chipcommon.h>
@@ -29,8 +28,6 @@
#include "types.h"
#include "pub.h"
#include "pmu.h"
-#include "srom.h"
-#include "nicpci.h"
#include "aiutils.h"
/* slow_clk_ctl */
@@ -321,7 +318,6 @@
#define IS_SIM(chippkg) \
((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
-#define PCI(sih) (ai_get_buscoretype(sih) == PCI_CORE_ID)
#define PCIE(sih) (ai_get_buscoretype(sih) == PCIE_CORE_ID)
#define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID))
@@ -454,36 +450,9 @@ struct aidmp {
u32 componentid3; /* 0xffc */
};
-/* return true if PCIE capability exists in the pci config space */
-static bool ai_ispcie(struct si_info *sii)
-{
- u8 cap_ptr;
-
- cap_ptr =
- pcicore_find_pci_capability(sii->pcibus, PCI_CAP_ID_EXP, NULL,
- NULL);
- if (!cap_ptr)
- return false;
-
- return true;
-}
-
-static bool ai_buscore_prep(struct si_info *sii)
-{
- /* kludge to enable the clock on the 4306 which lacks a slowclock */
- if (!ai_ispcie(sii))
- ai_clkctl_xtal(&sii->pub, XTAL | PLL, ON);
- return true;
-}
-
static bool
ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
{
- struct bcma_device *pci = NULL;
- struct bcma_device *pcie = NULL;
- struct bcma_device *core;
-
-
/* no cores found, bail out */
if (cc->bus->nr_cores == 0)
return false;
@@ -492,8 +461,7 @@ ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
sii->pub.ccrev = cc->id.rev;
/* get chipcommon chipstatus */
- if (ai_get_ccrev(&sii->pub) >= 11)
- sii->chipst = bcma_read32(cc, CHIPCREGOFFS(chipstatus));
+ sii->chipst = bcma_read32(cc, CHIPCREGOFFS(chipstatus));
/* get chipcommon capabilites */
sii->pub.cccaps = bcma_read32(cc, CHIPCREGOFFS(capabilities));
@@ -506,64 +474,18 @@ ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
}
/* figure out buscore */
- list_for_each_entry(core, &cc->bus->cores, list) {
- uint cid, crev;
-
- cid = core->id.id;
- crev = core->id.rev;
-
- if (cid == PCI_CORE_ID) {
- pci = core;
- } else if (cid == PCIE_CORE_ID) {
- pcie = core;
- }
- }
-
- if (pci && pcie) {
- if (ai_ispcie(sii))
- pci = NULL;
- else
- pcie = NULL;
- }
- if (pci) {
- sii->buscore = pci;
- } else if (pcie) {
- sii->buscore = pcie;
- }
-
- /* fixup necessary chip/core configurations */
- if (!sii->pch) {
- sii->pch = pcicore_init(&sii->pub, sii->icbus->drv_pci.core);
- if (sii->pch == NULL)
- return false;
- }
- if (ai_pci_fixcfg(&sii->pub))
- return false;
+ sii->buscore = ai_findcore(&sii->pub, PCIE_CORE_ID, 0);
return true;
}
-/*
- * get boardtype and boardrev
- */
-static __used void ai_nvram_process(struct si_info *sii)
-{
- uint w = 0;
-
- /* do a pci config read to get subsystem id and subvendor id */
- pci_read_config_dword(sii->pcibus, PCI_SUBSYSTEM_VENDOR_ID, &w);
-
- sii->pub.boardvendor = w & 0xffff;
- sii->pub.boardtype = (w >> 16) & 0xffff;
-}
-
static struct si_info *ai_doattach(struct si_info *sii,
struct bcma_bus *pbus)
{
struct si_pub *sih = &sii->pub;
u32 w, savewin;
struct bcma_device *cc;
- uint socitype;
+ struct ssb_sprom *sprom = &pbus->sprom;
savewin = 0;
@@ -573,38 +495,15 @@ static struct si_info *ai_doattach(struct si_info *sii,
/* switch to Chipcommon core */
cc = pbus->drv_cc.core;
- /* bus/core/clk setup for register access */
- if (!ai_buscore_prep(sii))
- return NULL;
+ sih->chip = pbus->chipinfo.id;
+ sih->chiprev = pbus->chipinfo.rev;
+ sih->chippkg = pbus->chipinfo.pkg;
+ sih->boardvendor = pbus->boardinfo.vendor;
+ sih->boardtype = pbus->boardinfo.type;
- /*
- * ChipID recognition.
- * We assume we can read chipid at offset 0 from the regs arg.
- * If we add other chiptypes (or if we need to support old sdio
- * hosts w/o chipcommon), some way of recognizing them needs to
- * be added here.
- */
- w = bcma_read32(cc, CHIPCREGOFFS(chipid));
- socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
- /* Might as wll fill in chip id rev & pkg */
- sih->chip = w & CID_ID_MASK;
- sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
- sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
-
- /* scan for cores */
- if (socitype != SOCI_AI)
- return NULL;
-
- SI_MSG("Found chip type AI (0x%08x)\n", w);
if (!ai_buscore_setup(sii, cc))
goto exit;
- /* Init nvram from sprom/otp if they exist */
- if (srom_var_init(&sii->pub))
- goto exit;
-
- ai_nvram_process(sii);
-
/* === NVRAM, clock is ready === */
bcma_write32(cc, CHIPCREGOFFS(gpiopullup), 0);
bcma_write32(cc, CHIPCREGOFFS(gpiopulldown), 0);
@@ -617,15 +516,13 @@ static struct si_info *ai_doattach(struct si_info *sii,
}
/* setup the GPIO based LED powersave register */
- w = getintvar(sih, BRCMS_SROM_LEDDC);
+ w = (sprom->leddc_on_time << BCMA_CC_GPIOTIMER_ONTIME_SHIFT) |
+ (sprom->leddc_off_time << BCMA_CC_GPIOTIMER_OFFTIME_SHIFT);
if (w == 0)
w = DEFAULT_GPIOTIMERVAL;
ai_cc_reg(sih, offsetof(struct chipcregs, gpiotimerval),
~0, w);
- if (PCIE(sih))
- pcicore_attach(sii->pch, SI_DOATTACH);
-
if (ai_get_chip_id(sih) == BCM43224_CHIP_ID) {
/*
* enable 12 mA drive strenth for 43224 and
@@ -659,9 +556,6 @@ static struct si_info *ai_doattach(struct si_info *sii,
return sii;
exit:
- if (sii->pch)
- pcicore_deinit(sii->pch);
- sii->pch = NULL;
return NULL;
}
@@ -700,11 +594,6 @@ void ai_detach(struct si_pub *sih)
if (sii == NULL)
return;
- if (sii->pch)
- pcicore_deinit(sii->pch);
- sii->pch = NULL;
-
- srom_free_vars(sih);
kfree(sii);
}
@@ -755,21 +644,7 @@ uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val)
/* return the slow clock source - LPO, XTAL, or PCI */
static uint ai_slowclk_src(struct si_pub *sih, struct bcma_device *cc)
{
- struct si_info *sii;
- u32 val;
-
- sii = (struct si_info *)sih;
- if (ai_get_ccrev(&sii->pub) < 6) {
- pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT,
- &val);
- if (val & PCI_CFG_GPIO_SCS)
- return SCC_SS_PCI;
- return SCC_SS_XTAL;
- } else if (ai_get_ccrev(&sii->pub) < 10) {
- return bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
- SCC_SS_MASK;
- } else /* Insta-clock */
- return SCC_SS_XTAL;
+ return SCC_SS_XTAL;
}
/*
@@ -779,36 +654,12 @@ static uint ai_slowclk_src(struct si_pub *sih, struct bcma_device *cc)
static uint ai_slowclk_freq(struct si_pub *sih, bool max_freq,
struct bcma_device *cc)
{
- u32 slowclk;
uint div;
- slowclk = ai_slowclk_src(sih, cc);
- if (ai_get_ccrev(sih) < 6) {
- if (slowclk == SCC_SS_PCI)
- return max_freq ? (PCIMAXFREQ / 64)
- : (PCIMINFREQ / 64);
- else
- return max_freq ? (XTALMAXFREQ / 32)
- : (XTALMINFREQ / 32);
- } else if (ai_get_ccrev(sih) < 10) {
- div = 4 *
- (((bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
- SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
- if (slowclk == SCC_SS_LPO)
- return max_freq ? LPOMAXFREQ : LPOMINFREQ;
- else if (slowclk == SCC_SS_XTAL)
- return max_freq ? (XTALMAXFREQ / div)
- : (XTALMINFREQ / div);
- else if (slowclk == SCC_SS_PCI)
- return max_freq ? (PCIMAXFREQ / div)
- : (PCIMINFREQ / div);
- } else {
- /* Chipc rev 10 is InstaClock */
- div = bcma_read32(cc, CHIPCREGOFFS(system_clk_ctl));
- div = 4 * ((div >> SYCC_CD_SHIFT) + 1);
- return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
- }
- return 0;
+ /* Chipc rev 10 is InstaClock */
+ div = bcma_read32(cc, CHIPCREGOFFS(system_clk_ctl));
+ div = 4 * ((div >> SYCC_CD_SHIFT) + 1);
+ return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
}
static void
@@ -831,8 +682,7 @@ ai_clkctl_setdelay(struct si_pub *sih, struct bcma_device *cc)
/* Starting with 4318 it is ILP that is used for the delays */
slowmaxfreq =
- ai_slowclk_freq(sih,
- (ai_get_ccrev(sih) >= 10) ? false : true, cc);
+ ai_slowclk_freq(sih, false, cc);
pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
@@ -854,9 +704,8 @@ void ai_clkctl_init(struct si_pub *sih)
return;
/* set all Instaclk chip ILP to 1 MHz */
- if (ai_get_ccrev(sih) >= 10)
- bcma_maskset32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_CD_MASK,
- (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+ bcma_maskset32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_CD_MASK,
+ (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
ai_clkctl_setdelay(sih, cc);
}
@@ -891,140 +740,6 @@ u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
return fpdelay;
}
-/* turn primary xtal and/or pll off/on */
-int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
-{
- struct si_info *sii;
- u32 in, out, outen;
-
- sii = (struct si_info *)sih;
-
- /* pcie core doesn't have any mapping to control the xtal pu */
- if (PCIE(sih))
- return -1;
-
- pci_read_config_dword(sii->pcibus, PCI_GPIO_IN, &in);
- pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT, &out);
- pci_read_config_dword(sii->pcibus, PCI_GPIO_OUTEN, &outen);
-
- /*
- * Avoid glitching the clock if GPRS is already using it.
- * We can't actually read the state of the PLLPD so we infer it
- * by the value of XTAL_PU which *is* readable via gpioin.
- */
- if (on && (in & PCI_CFG_GPIO_XTAL))
- return 0;
-
- if (what & XTAL)
- outen |= PCI_CFG_GPIO_XTAL;
- if (what & PLL)
- outen |= PCI_CFG_GPIO_PLL;
-
- if (on) {
- /* turn primary xtal on */
- if (what & XTAL) {
- out |= PCI_CFG_GPIO_XTAL;
- if (what & PLL)
- out |= PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pcibus,
- PCI_GPIO_OUT, out);
- pci_write_config_dword(sii->pcibus,
- PCI_GPIO_OUTEN, outen);
- udelay(XTAL_ON_DELAY);
- }
-
- /* turn pll on */
- if (what & PLL) {
- out &= ~PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pcibus,
- PCI_GPIO_OUT, out);
- mdelay(2);
- }
- } else {
- if (what & XTAL)
- out &= ~PCI_CFG_GPIO_XTAL;
- if (what & PLL)
- out |= PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pcibus,
- PCI_GPIO_OUT, out);
- pci_write_config_dword(sii->pcibus,
- PCI_GPIO_OUTEN, outen);
- }
-
- return 0;
-}
-
-/* clk control mechanism through chipcommon, no policy checking */
-static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
-{
- struct bcma_device *cc;
- u32 scc;
-
- /* chipcommon cores prior to rev6 don't support dynamic clock control */
- if (ai_get_ccrev(&sii->pub) < 6)
- return false;
-
- cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
-
- if (!(ai_get_cccaps(&sii->pub) & CC_CAP_PWR_CTL) &&
- (ai_get_ccrev(&sii->pub) < 20))
- return mode == CLK_FAST;
-
- switch (mode) {
- case CLK_FAST: /* FORCEHT, fast (pll) clock */
- if (ai_get_ccrev(&sii->pub) < 10) {
- /*
- * don't forget to force xtal back
- * on before we clear SCC_DYN_XTAL..
- */
- ai_clkctl_xtal(&sii->pub, XTAL, ON);
- bcma_maskset32(cc, CHIPCREGOFFS(slow_clk_ctl),
- (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
- } else if (ai_get_ccrev(&sii->pub) < 20) {
- bcma_set32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_HR);
- } else {
- bcma_set32(cc, CHIPCREGOFFS(clk_ctl_st), CCS_FORCEHT);
- }
-
- /* wait for the PLL */
- if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
- u32 htavail = CCS_HTAVAIL;
- SPINWAIT(((bcma_read32(cc, CHIPCREGOFFS(clk_ctl_st)) &
- htavail) == 0), PMU_MAX_TRANSITION_DLY);
- } else {
- udelay(PLL_DELAY);
- }
- break;
-
- case CLK_DYNAMIC: /* enable dynamic clock control */
- if (ai_get_ccrev(&sii->pub) < 10) {
- scc = bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl));
- scc &= ~(SCC_FS | SCC_IP | SCC_XC);
- if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
- scc |= SCC_XC;
- bcma_write32(cc, CHIPCREGOFFS(slow_clk_ctl), scc);
-
- /*
- * for dynamic control, we have to
- * release our xtal_pu "force on"
- */
- if (scc & SCC_XC)
- ai_clkctl_xtal(&sii->pub, XTAL, OFF);
- } else if (ai_get_ccrev(&sii->pub) < 20) {
- /* Instaclock */
- bcma_mask32(cc, CHIPCREGOFFS(system_clk_ctl), ~SYCC_HR);
- } else {
- bcma_mask32(cc, CHIPCREGOFFS(clk_ctl_st), ~CCS_FORCEHT);
- }
- break;
-
- default:
- break;
- }
-
- return mode == CLK_FAST;
-}
-
/*
* clock control policy function throught chipcommon
*
@@ -1033,133 +748,53 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
* this is a wrapper over the next internal function
* to allow flexible policy settings for outside caller
*/
-bool ai_clkctl_cc(struct si_pub *sih, uint mode)
+bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode)
{
struct si_info *sii;
+ struct bcma_device *cc;
sii = (struct si_info *)sih;
- /* chipcommon cores prior to rev6 don't support dynamic clock control */
- if (ai_get_ccrev(sih) < 6)
- return false;
-
if (PCI_FORCEHT(sih))
- return mode == CLK_FAST;
+ return mode == BCMA_CLKMODE_FAST;
- return _ai_clkctl_cc(sii, mode);
+ cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
+ bcma_core_set_clockmode(cc, mode);
+ return mode == BCMA_CLKMODE_FAST;
}
void ai_pci_up(struct si_pub *sih)
{
struct si_info *sii;
+ struct bcma_device *cc;
sii = (struct si_info *)sih;
- if (PCI_FORCEHT(sih))
- _ai_clkctl_cc(sii, CLK_FAST);
+ if (PCI_FORCEHT(sih)) {
+ cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
+ bcma_core_set_clockmode(cc, BCMA_CLKMODE_FAST);
+ }
if (PCIE(sih))
- pcicore_up(sii->pch, SI_PCIUP);
-
-}
-
-/* Unconfigure and/or apply various WARs when system is going to sleep mode */
-void ai_pci_sleep(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
-
- pcicore_sleep(sii->pch);
+ bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, true);
}
/* Unconfigure and/or apply various WARs when going down */
void ai_pci_down(struct si_pub *sih)
{
struct si_info *sii;
+ struct bcma_device *cc;
sii = (struct si_info *)sih;
/* release FORCEHT since chip is going to "down" state */
- if (PCI_FORCEHT(sih))
- _ai_clkctl_cc(sii, CLK_DYNAMIC);
-
- pcicore_down(sii->pch, SI_PCIDOWN);
-}
-
-/*
- * Configure the pci core for pci client (NIC) action
- * coremask is the bitvec of cores by index to be enabled.
- */
-void ai_pci_setup(struct si_pub *sih, uint coremask)
-{
- struct si_info *sii;
- u32 w;
-
- sii = (struct si_info *)sih;
-
- /*
- * Enable sb->pci interrupts. Assume
- * PCI rev 2.3 support was added in pci core rev 6 and things changed..
- */
- if (PCIE(sih) || (PCI(sih) && (ai_get_buscorerev(sih) >= 6))) {
- /* pci config write to set this core bit in PCIIntMask */
- pci_read_config_dword(sii->pcibus, PCI_INT_MASK, &w);
- w |= (coremask << PCI_SBIM_SHIFT);
- pci_write_config_dword(sii->pcibus, PCI_INT_MASK, w);
- }
-
- if (PCI(sih)) {
- pcicore_pci_setup(sii->pch);
+ if (PCI_FORCEHT(sih)) {
+ cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
+ bcma_core_set_clockmode(cc, BCMA_CLKMODE_DYNAMIC);
}
-}
-/*
- * Fixup SROMless PCI device's configuration.
- * The current core may be changed upon return.
- */
-int ai_pci_fixcfg(struct si_pub *sih)
-{
- struct si_info *sii = (struct si_info *)sih;
-
- /* Fixup PI in SROM shadow area to enable the correct PCI core access */
- /* check 'pi' is correct and fix it if not */
- pcicore_fixcfg(sii->pch);
- pcicore_hwup(sii->pch);
- return 0;
-}
-
-/* mask&set gpiocontrol bits */
-u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val, u8 priority)
-{
- uint regoff;
-
- regoff = offsetof(struct chipcregs, gpiocontrol);
- return ai_cc_reg(sih, regoff, mask, val);
-}
-
-void ai_chipcontrl_epa4331(struct si_pub *sih, bool on)
-{
- struct bcma_device *cc;
- u32 val;
-
- cc = ai_findcore(sih, CC_CORE_ID, 0);
-
- if (on) {
- if (ai_get_chippkg(sih) == 9 || ai_get_chippkg(sih) == 0xb)
- /* Ext PA Controls for 4331 12x9 Package */
- bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
- CCTRL4331_EXTPA_EN |
- CCTRL4331_EXTPA_ON_GPIO2_5);
- else
- /* Ext PA Controls for 4331 12x12 Package */
- bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
- CCTRL4331_EXTPA_EN);
- } else {
- val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
- bcma_mask32(cc, CHIPCREGOFFS(chipcontrol),
- ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5));
- }
+ if (PCIE(sih))
+ bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, false);
}
/* Enable BT-COEX & Ex-PA for 4313 */
@@ -1181,6 +816,9 @@ bool ai_deviceremoved(struct si_pub *sih)
sii = (struct si_info *)sih;
+ if (sii->icbus->hosttype != BCMA_HOSTTYPE_PCI)
+ return false;
+
pci_read_config_dword(sii->pcibus, PCI_VENDOR_ID, &w);
if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM)
return true;
@@ -1188,45 +826,6 @@ bool ai_deviceremoved(struct si_pub *sih)
return false;
}
-bool ai_is_sprom_available(struct si_pub *sih)
-{
- struct si_info *sii = (struct si_info *)sih;
-
- if (ai_get_ccrev(sih) >= 31) {
- struct bcma_device *cc;
- u32 sromctrl;
-
- if ((ai_get_cccaps(sih) & CC_CAP_SROM) == 0)
- return false;
-
- cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
- sromctrl = bcma_read32(cc, CHIPCREGOFFS(sromcontrol));
- return sromctrl & SRC_PRESENT;
- }
-
- switch (ai_get_chip_id(sih)) {
- case BCM4313_CHIP_ID:
- return (sii->chipst & CST4313_SPROM_PRESENT) != 0;
- default:
- return true;
- }
-}
-
-bool ai_is_otp_disabled(struct si_pub *sih)
-{
- struct si_info *sii = (struct si_info *)sih;
-
- switch (ai_get_chip_id(sih)) {
- case BCM4313_CHIP_ID:
- return (sii->chipst & CST4313_OTP_PRESENT) == 0;
- /* These chips always have their OTP on */
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- default:
- return false;
- }
-}
-
uint ai_get_buscoretype(struct si_pub *sih)
{
struct si_info *sii = (struct si_info *)sih;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index f84c6f781692..d9f04a683bdb 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -113,10 +113,6 @@
#define XTAL 0x1 /* primary crystal oscillator (2050) */
#define PLL 0x2 /* main chip pll */
-/* clkctl clk mode */
-#define CLK_FAST 0 /* force fast (pll) clock */
-#define CLK_DYNAMIC 2 /* enable dynamic clock control */
-
/* GPIO usage priorities */
#define GPIO_DRV_PRIORITY 0 /* Driver */
#define GPIO_APP_PRIORITY 1 /* Application */
@@ -172,9 +168,7 @@ struct si_info {
struct si_pub pub; /* back plane public state (must be first) */
struct bcma_bus *icbus; /* handle to soc interconnect bus */
struct pci_dev *pcibus; /* handle to pci bus */
- struct pcicore_info *pch; /* PCI/E core handle */
struct bcma_device *buscore;
- struct list_head var_list; /* list of srom variables */
u32 chipst; /* chip status */
};
@@ -197,38 +191,20 @@ extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
extern struct si_pub *ai_attach(struct bcma_bus *pbus);
extern void ai_detach(struct si_pub *sih);
extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
-extern void ai_pci_setup(struct si_pub *sih, uint coremask);
extern void ai_clkctl_init(struct si_pub *sih);
extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
extern bool ai_clkctl_cc(struct si_pub *sih, uint mode);
-extern int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on);
extern bool ai_deviceremoved(struct si_pub *sih);
-extern u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val,
- u8 priority);
-
-/* OTP status */
-extern bool ai_is_otp_disabled(struct si_pub *sih);
-
-/* SPROM availability */
-extern bool ai_is_sprom_available(struct si_pub *sih);
-extern void ai_pci_sleep(struct si_pub *sih);
extern void ai_pci_down(struct si_pub *sih);
extern void ai_pci_up(struct si_pub *sih);
-extern int ai_pci_fixcfg(struct si_pub *sih);
-extern void ai_chipcontrl_epa4331(struct si_pub *sih, bool on);
/* Enable Ex-PA for 4313 */
extern void ai_epa_4313war(struct si_pub *sih);
extern uint ai_get_buscoretype(struct si_pub *sih);
extern uint ai_get_buscorerev(struct si_pub *sih);
-static inline int ai_get_ccrev(struct si_pub *sih)
-{
- return sih->ccrev;
-}
-
static inline u32 ai_get_cccaps(struct si_pub *sih)
{
return sih->cccaps;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/antsel.c b/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
index a47ce25cb9a2..55e12c327911 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
@@ -108,7 +108,7 @@ brcms_c_antsel_init_cfg(struct antsel_info *asi, struct brcms_antselcfg *antsel,
struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc)
{
struct antsel_info *asi;
- struct si_pub *sih = wlc->hw->sih;
+ struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom;
asi = kzalloc(sizeof(struct antsel_info), GFP_ATOMIC);
if (!asi)
@@ -118,7 +118,7 @@ struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc)
asi->pub = wlc->pub;
asi->antsel_type = ANTSEL_NA;
asi->antsel_avail = false;
- asi->antsel_antswitch = (u8) getintvar(sih, BRCMS_SROM_ANTSWITCH);
+ asi->antsel_antswitch = sprom->antswitch;
if ((asi->pub->sromrev >= 4) && (asi->antsel_antswitch != 0)) {
switch (asi->antsel_antswitch) {
@@ -128,12 +128,12 @@ struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc)
/* 4321/2 board with 2x3 switch logic */
asi->antsel_type = ANTSEL_2x3;
/* Antenna selection availability */
- if (((u16) getintvar(sih, BRCMS_SROM_AA2G) == 7) ||
- ((u16) getintvar(sih, BRCMS_SROM_AA5G) == 7)) {
+ if ((sprom->ant_available_bg == 7) ||
+ (sprom->ant_available_a == 7)) {
asi->antsel_avail = true;
} else if (
- (u16) getintvar(sih, BRCMS_SROM_AA2G) == 3 ||
- (u16) getintvar(sih, BRCMS_SROM_AA5G) == 3) {
+ sprom->ant_available_bg == 3 ||
+ sprom->ant_available_a == 3) {
asi->antsel_avail = false;
} else {
asi->antsel_avail = false;
@@ -146,8 +146,8 @@ struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc)
break;
}
} else if ((asi->pub->sromrev == 4) &&
- ((u16) getintvar(sih, BRCMS_SROM_AA2G) == 7) &&
- ((u16) getintvar(sih, BRCMS_SROM_AA5G) == 0)) {
+ (sprom->ant_available_bg == 7) &&
+ (sprom->ant_available_a == 0)) {
/* hack to match old 4321CB2 cards with 2of3 antenna switch */
asi->antsel_type = ANTSEL_2x3;
asi->antsel_avail = true;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 0efe88e25a9a..eb77ac3cfb6b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -1110,7 +1110,7 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
char country_abbrev[BRCM_CNTRY_BUF_SZ];
const struct country_info *country;
struct brcms_pub *pub = wlc->pub;
- char *ccode;
+ struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom;
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
@@ -1122,9 +1122,8 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
wlc->cmi = wlc_cm;
/* store the country code for passing up as a regulatory hint */
- ccode = getvar(wlc->hw->sih, BRCMS_SROM_CCODE);
- if (ccode && brcms_c_country_valid(ccode))
- strncpy(wlc->pub->srom_ccode, ccode, BRCM_CNTRY_BUF_SZ - 1);
+ if (sprom->alpha2 && brcms_c_country_valid(sprom->alpha2))
+ strncpy(wlc->pub->srom_ccode, sprom->alpha2, sizeof(sprom->alpha2));
/*
* internal country information which must match
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index aa15558f75c8..50f92a0b7c41 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -25,7 +25,6 @@
#include <linux/bcma/bcma.h>
#include <net/mac80211.h>
#include <defs.h>
-#include "nicpci.h"
#include "phy/phy_int.h"
#include "d11.h"
#include "channel.h"
@@ -770,7 +769,7 @@ void brcms_dpc(unsigned long data)
* Precondition: Since this function is called in brcms_pci_probe() context,
* no locking is required.
*/
-static int brcms_request_fw(struct brcms_info *wl, struct pci_dev *pdev)
+static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev)
{
int status;
struct device *device = &pdev->dev;
@@ -1022,7 +1021,7 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
spin_lock_init(&wl->isr_lock);
/* prepare ucode */
- if (brcms_request_fw(wl, pdev->bus->host_pci) < 0) {
+ if (brcms_request_fw(wl, pdev) < 0) {
wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
"%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
brcms_release_fw(wl);
@@ -1043,12 +1042,12 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
wl->pub->ieee_hw = hw;
/* register our interrupt handler */
- if (request_irq(pdev->bus->host_pci->irq, brcms_isr,
+ if (request_irq(pdev->irq, brcms_isr,
IRQF_SHARED, KBUILD_MODNAME, wl)) {
wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit);
goto fail;
}
- wl->irq = pdev->bus->host_pci->irq;
+ wl->irq = pdev->irq;
/* register module */
brcms_c_module_register(wl->pub, "linux", wl, NULL);
@@ -1098,7 +1097,7 @@ static int __devinit brcms_bcma_probe(struct bcma_device *pdev)
dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n",
pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class,
- pdev->bus->host_pci->irq);
+ pdev->irq);
if ((pdev->id.manuf != BCMA_MANUF_BCM) ||
(pdev->id.id != BCMA_CORE_80211))
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index b4d92792c502..19db4052c44c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -1219,7 +1219,7 @@ static void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw)
}
/* control chip clock to save power, enable dynamic clock or force fast clock */
-static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
+static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, enum bcma_clkmode mode)
{
if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU) {
/* new chips with PMU, CCS_FORCEHT will distribute the HT clock
@@ -1229,7 +1229,7 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
*/
if (wlc_hw->clk) {
- if (mode == CLK_FAST) {
+ if (mode == BCMA_CLKMODE_FAST) {
bcma_set32(wlc_hw->d11core,
D11REGOFFS(clk_ctl_st),
CCS_FORCEHT);
@@ -1260,7 +1260,7 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
~CCS_FORCEHT);
}
}
- wlc_hw->forcefastclk = (mode == CLK_FAST);
+ wlc_hw->forcefastclk = (mode == BCMA_CLKMODE_FAST);
} else {
/* old chips w/o PMU, force HT through cc,
@@ -1567,7 +1567,7 @@ void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw)
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
wlc_phy_bw_state_set(wlc_hw->band->pi, bw);
@@ -1576,7 +1576,7 @@ void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw)
/* restore the clk */
if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_DYNAMIC);
}
static void brcms_b_upd_synthpu(struct brcms_hardware *wlc_hw)
@@ -1882,27 +1882,20 @@ static bool brcms_c_validboardtype(struct brcms_hardware *wlc_hw)
return true;
}
-static char *brcms_c_get_macaddr(struct brcms_hardware *wlc_hw)
+static void brcms_c_get_macaddr(struct brcms_hardware *wlc_hw, u8 etheraddr[ETH_ALEN])
{
- enum brcms_srom_id var_id = BRCMS_SROM_MACADDR;
- char *macaddr;
+ struct ssb_sprom *sprom = &wlc_hw->d11core->bus->sprom;
/* If macaddr exists, use it (Sromrev4, CIS, ...). */
- macaddr = getvar(wlc_hw->sih, var_id);
- if (macaddr != NULL)
- return macaddr;
+ if (!is_zero_ether_addr(sprom->il0mac)) {
+ memcpy(etheraddr, sprom->il0mac, 6);
+ return;
+ }
if (wlc_hw->_nbands > 1)
- var_id = BRCMS_SROM_ET1MACADDR;
+ memcpy(etheraddr, sprom->et1mac, 6);
else
- var_id = BRCMS_SROM_IL0MACADDR;
-
- macaddr = getvar(wlc_hw->sih, var_id);
- if (macaddr == NULL)
- wiphy_err(wlc_hw->wlc->wiphy, "wl%d: wlc_get_macaddr: macaddr "
- "getvar(%d) not found\n", wlc_hw->unit, var_id);
-
- return macaddr;
+ memcpy(etheraddr, sprom->il0mac, 6);
}
/* power both the pll and external oscillator on/off */
@@ -1917,9 +1910,6 @@ static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want)
if (!want && wlc_hw->pllreq)
return;
- if (wlc_hw->sih)
- ai_clkctl_xtal(wlc_hw->sih, XTAL | PLL, want);
-
wlc_hw->sbclk = want;
if (!wlc_hw->sbclk) {
wlc_hw->clk = false;
@@ -2004,7 +1994,7 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
/* reset the dma engines except first time thru */
if (bcma_core_is_enabled(wlc_hw->d11core)) {
@@ -2053,7 +2043,7 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
brcms_c_mctrl_reset(wlc_hw);
if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU)
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
brcms_b_phy_reset(wlc_hw);
@@ -2065,7 +2055,7 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
/* restore the clk setting */
if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_DYNAMIC);
}
/* txfifo sizes needs to be modified(increased) since the newer cores
@@ -2218,7 +2208,7 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc)
gm |= gc |= BOARD_GPIO_PACTRL;
/* apply to gpiocontrol register */
- ai_gpiocontrol(wlc_hw->sih, gm, gc, GPIO_DRV_PRIORITY);
+ bcma_chipco_gpio_control(&wlc_hw->d11core->bus->drv_cc, gm, gc);
}
static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
@@ -3371,7 +3361,7 @@ static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) {
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
/* disable interrupts */
macintmask = brcms_intrsoff(wlc->wl);
@@ -3405,7 +3395,7 @@ static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) {
/* restore the clk */
if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_DYNAMIC);
}
static void brcms_c_set_phy_chanspec(struct brcms_c_info *wlc,
@@ -4436,17 +4426,22 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
uint unit, bool piomode)
{
struct brcms_hardware *wlc_hw;
- char *macaddr = NULL;
uint err = 0;
uint j;
bool wme = false;
struct shared_phy_params sha_params;
struct wiphy *wiphy = wlc->wiphy;
struct pci_dev *pcidev = core->bus->host_pci;
+ struct ssb_sprom *sprom = &core->bus->sprom;
- BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
- pcidev->vendor,
- pcidev->device);
+ if (core->bus->hosttype == BCMA_HOSTTYPE_PCI)
+ BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
+ pcidev->vendor,
+ pcidev->device);
+ else
+ BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
+ core->bus->boardinfo.vendor,
+ core->bus->boardinfo.type);
wme = true;
@@ -4472,7 +4467,8 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
}
/* verify again the device is supported */
- if (!brcms_c_chipmatch(pcidev->vendor, pcidev->device)) {
+ if (core->bus->hosttype == BCMA_HOSTTYPE_PCI &&
+ !brcms_c_chipmatch(pcidev->vendor, pcidev->device)) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported "
"vendor/device (0x%x/0x%x)\n",
unit, pcidev->vendor, pcidev->device);
@@ -4480,8 +4476,13 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
goto fail;
}
- wlc_hw->vendorid = pcidev->vendor;
- wlc_hw->deviceid = pcidev->device;
+ if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
+ wlc_hw->vendorid = pcidev->vendor;
+ wlc_hw->deviceid = pcidev->device;
+ } else {
+ wlc_hw->vendorid = core->bus->boardinfo.vendor;
+ wlc_hw->deviceid = core->bus->boardinfo.type;
+ }
wlc_hw->d11core = core;
wlc_hw->corerev = core->id.rev;
@@ -4501,7 +4502,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
* is still false; But it will be called again inside wlc_corereset,
* after d11 is out of reset.
*/
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
if (!brcms_b_validate_chip_access(wlc_hw)) {
@@ -4512,7 +4513,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
}
/* get the board rev, used just below */
- j = getintvar(wlc_hw->sih, BRCMS_SROM_BOARDREV);
+ j = sprom->board_rev;
/* promote srom boardrev of 0xFF to 1 */
if (j == BOARDREV_PROMOTABLE)
j = BOARDREV_PROMOTED;
@@ -4525,11 +4526,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
err = 15;
goto fail;
}
- wlc_hw->sromrev = (u8) getintvar(wlc_hw->sih, BRCMS_SROM_REV);
- wlc_hw->boardflags = (u32) getintvar(wlc_hw->sih,
- BRCMS_SROM_BOARDFLAGS);
- wlc_hw->boardflags2 = (u32) getintvar(wlc_hw->sih,
- BRCMS_SROM_BOARDFLAGS2);
+ wlc_hw->sromrev = sprom->revision;
+ wlc_hw->boardflags = sprom->boardflags_lo + (sprom->boardflags_hi << 16);
+ wlc_hw->boardflags2 = sprom->boardflags2_lo + (sprom->boardflags2_hi << 16);
if (wlc_hw->boardflags & BFL_NOPLLDOWN)
brcms_b_pllreq(wlc_hw, true, BRCMS_PLLREQ_SHARED);
@@ -4702,25 +4701,18 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
*/
/* init etheraddr state variables */
- macaddr = brcms_c_get_macaddr(wlc_hw);
- if (macaddr == NULL) {
- wiphy_err(wiphy, "wl%d: brcms_b_attach: macaddr not found\n",
- unit);
- err = 21;
- goto fail;
- }
- if (!mac_pton(macaddr, wlc_hw->etheraddr) ||
- is_broadcast_ether_addr(wlc_hw->etheraddr) ||
+ brcms_c_get_macaddr(wlc_hw, wlc_hw->etheraddr);
+
+ if (is_broadcast_ether_addr(wlc_hw->etheraddr) ||
is_zero_ether_addr(wlc_hw->etheraddr)) {
- wiphy_err(wiphy, "wl%d: brcms_b_attach: bad macaddr %s\n",
- unit, macaddr);
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: bad macaddr\n",
+ unit);
err = 22;
goto fail;
}
- BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
- wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih),
- macaddr);
+ BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x\n",
+ wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih));
return err;
@@ -4770,16 +4762,16 @@ static bool brcms_c_attach_stf_ant_init(struct brcms_c_info *wlc)
int aa;
uint unit;
int bandtype;
- struct si_pub *sih = wlc->hw->sih;
+ struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom;
unit = wlc->pub->unit;
bandtype = wlc->band->bandtype;
/* get antennas available */
if (bandtype == BRCM_BAND_5G)
- aa = (s8) getintvar(sih, BRCMS_SROM_AA5G);
+ aa = sprom->ant_available_a;
else
- aa = (s8) getintvar(sih, BRCMS_SROM_AA2G);
+ aa = sprom->ant_available_bg;
if ((aa < 1) || (aa > 15)) {
wiphy_err(wlc->wiphy, "wl%d: %s: Invalid antennas available in"
@@ -4799,9 +4791,9 @@ static bool brcms_c_attach_stf_ant_init(struct brcms_c_info *wlc)
/* Compute Antenna Gain */
if (bandtype == BRCM_BAND_5G)
- wlc->band->antgain = (s8) getintvar(sih, BRCMS_SROM_AG1);
+ wlc->band->antgain = sprom->antenna_gain.a1;
else
- wlc->band->antgain = (s8) getintvar(sih, BRCMS_SROM_AG0);
+ wlc->band->antgain = sprom->antenna_gain.a0;
brcms_c_attach_antgain_init(wlc);
@@ -4952,15 +4944,6 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
callbacks = 0;
- if (wlc_hw->sih) {
- /*
- * detach interrupt sync mechanism since interrupt is disabled
- * and per-port interrupt object may has been freed. this must
- * be done before sb core switch
- */
- ai_pci_sleep(wlc_hw->sih);
- }
-
brcms_b_detach_dmapio(wlc_hw);
band = wlc_hw->band;
@@ -5047,9 +5030,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
*/
brcms_b_xtal(wlc_hw, ON);
ai_clkctl_init(wlc_hw->sih);
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
-
- ai_pci_fixcfg(wlc_hw->sih);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
/*
* TODO: test suspend/resume
@@ -5078,8 +5059,6 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
{
- uint coremask;
-
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
/*
@@ -5088,15 +5067,14 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
*/
brcms_b_xtal(wlc_hw, ON);
ai_clkctl_init(wlc_hw->sih);
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
/*
* Configure pci/pcmcia here instead of in brcms_c_attach()
* to allow mfg hotswap: down, hotswap (chip power cycle), up.
*/
- coremask = (1 << wlc_hw->wlc->core->coreidx);
-
- ai_pci_setup(wlc_hw->sih, coremask);
+ bcma_core_pci_irq_ctl(&wlc_hw->d11core->bus->drv_pci, wlc_hw->d11core,
+ true);
/*
* Need to read the hwradio status here to cover the case where the
@@ -5126,7 +5104,7 @@ static int brcms_b_up_finish(struct brcms_hardware *wlc_hw)
wlc_phy_hw_state_upd(wlc_hw->band->pi, true);
/* FULLY enable dynamic power control and d11 core interrupt */
- brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_DYNAMIC);
brcms_intrson(wlc_hw->wlc->wl);
return 0;
}
@@ -5267,7 +5245,7 @@ static int brcms_b_bmac_down_prep(struct brcms_hardware *wlc_hw)
brcms_intrsoff(wlc_hw->wlc->wl);
/* ensure we're running on the pll clock again */
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
}
/* down phy at the last of this stage */
callbacks += wlc_phy_down(wlc_hw->band->pi);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
deleted file mode 100644
index 7fad6dc19258..000000000000
--- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
+++ /dev/null
@@ -1,826 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-
-#include <defs.h>
-#include <soc.h>
-#include <chipcommon.h>
-#include "aiutils.h"
-#include "pub.h"
-#include "nicpci.h"
-
-/* SPROM offsets */
-#define SRSH_ASPM_OFFSET 4 /* word 4 */
-#define SRSH_ASPM_ENB 0x18 /* bit 3, 4 */
-#define SRSH_ASPM_L1_ENB 0x10 /* bit 4 */
-#define SRSH_ASPM_L0s_ENB 0x8 /* bit 3 */
-
-#define SRSH_PCIE_MISC_CONFIG 5 /* word 5 */
-#define SRSH_L23READY_EXIT_NOPERST 0x8000 /* bit 15 */
-#define SRSH_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */
-#define SRSH_CLKREQ_ENB 0x0800 /* bit 11 */
-#define SRSH_BD_OFFSET 6 /* word 6 */
-
-/* chipcontrol */
-#define CHIPCTRL_4321_PLL_DOWN 0x800000/* serdes PLL down override */
-
-/* MDIO control */
-#define MDIOCTL_DIVISOR_MASK 0x7f /* clock to be used on MDIO */
-#define MDIOCTL_DIVISOR_VAL 0x2
-#define MDIOCTL_PREAM_EN 0x80 /* Enable preamble sequnce */
-#define MDIOCTL_ACCESS_DONE 0x100 /* Transaction complete */
-
-/* MDIO Data */
-#define MDIODATA_MASK 0x0000ffff /* data 2 bytes */
-#define MDIODATA_TA 0x00020000 /* Turnaround */
-
-#define MDIODATA_REGADDR_SHF 18 /* Regaddr shift */
-#define MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */
-#define MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */
-#define MDIODATA_DEVADDR_MASK 0x0f800000
- /* Physmedia devaddr Mask */
-
-/* MDIO Data for older revisions < 10 */
-#define MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift */
-#define MDIODATA_REGADDR_MASK_OLD 0x003c0000
- /* Regaddr Mask */
-#define MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift */
-#define MDIODATA_DEVADDR_MASK_OLD 0x0fc00000
- /* Physmedia devaddr Mask */
-
-/* Transactions flags */
-#define MDIODATA_WRITE 0x10000000
-#define MDIODATA_READ 0x20000000
-#define MDIODATA_START 0x40000000
-
-#define MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */
-#define MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */
-
-/* serdes regs (rev < 10) */
-#define MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */
-#define MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */
-#define MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */
-
-/* SERDES RX registers */
-#define SERDES_RX_CTRL 1 /* Rx cntrl */
-#define SERDES_RX_TIMER1 2 /* Rx Timer1 */
-#define SERDES_RX_CDR 6 /* CDR */
-#define SERDES_RX_CDRBW 7 /* CDR BW */
-/* SERDES RX control register */
-#define SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */
-#define SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */
-
-/* SERDES PLL registers */
-#define SERDES_PLL_CTRL 1 /* PLL control reg */
-#define PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */
-
-/* Linkcontrol reg offset in PCIE Cap */
-#define PCIE_CAP_LINKCTRL_OFFSET 16 /* offset in pcie cap */
-#define PCIE_CAP_LCREG_ASPML0s 0x01 /* ASPM L0s in linkctrl */
-#define PCIE_CAP_LCREG_ASPML1 0x02 /* ASPM L1 in linkctrl */
-#define PCIE_CLKREQ_ENAB 0x100 /* CLKREQ Enab in linkctrl */
-
-#define PCIE_ASPM_ENAB 3 /* ASPM L0s & L1 in linkctrl */
-#define PCIE_ASPM_L1_ENAB 2 /* ASPM L0s & L1 in linkctrl */
-#define PCIE_ASPM_L0s_ENAB 1 /* ASPM L0s & L1 in linkctrl */
-#define PCIE_ASPM_DISAB 0 /* ASPM L0s & L1 in linkctrl */
-
-/* Power management threshold */
-#define PCIE_L1THRESHOLDTIME_MASK 0xFF00 /* bits 8 - 15 */
-#define PCIE_L1THRESHOLDTIME_SHIFT 8 /* PCIE_L1THRESHOLDTIME_SHIFT */
-#define PCIE_L1THRESHOLD_WARVAL 0x72 /* WAR value */
-#define PCIE_ASPMTIMER_EXTEND 0x01000000
- /* > rev7:
- * enable extend ASPM timer
- */
-
-/* different register spaces to access thru pcie indirect access */
-#define PCIE_CONFIGREGS 1 /* Access to config space */
-#define PCIE_PCIEREGS 2 /* Access to pcie registers */
-
-/* PCIE protocol PHY diagnostic registers */
-#define PCIE_PLP_STATUSREG 0x204 /* Status */
-
-/* Status reg PCIE_PLP_STATUSREG */
-#define PCIE_PLP_POLARITYINV_STAT 0x10
-
-/* PCIE protocol DLLP diagnostic registers */
-#define PCIE_DLLP_LCREG 0x100 /* Link Control */
-#define PCIE_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */
-
-/* PCIE protocol TLP diagnostic registers */
-#define PCIE_TLP_WORKAROUNDSREG 0x004 /* TLP Workarounds */
-
-/* Sonics to PCI translation types */
-#define SBTOPCI_PREF 0x4 /* prefetch enable */
-#define SBTOPCI_BURST 0x8 /* burst enable */
-#define SBTOPCI_RC_READMULTI 0x20 /* memory read multiple */
-
-#define PCI_CLKRUN_DSBL 0x8000 /* Bit 15 forceClkrun */
-
-/* PCI core index in SROM shadow area */
-#define SRSH_PI_OFFSET 0 /* first word */
-#define SRSH_PI_MASK 0xf000 /* bit 15:12 */
-#define SRSH_PI_SHIFT 12 /* bit 15:12 */
-
-#define PCIREGOFFS(field) offsetof(struct sbpciregs, field)
-#define PCIEREGOFFS(field) offsetof(struct sbpcieregs, field)
-
-/* Sonics side: PCI core and host control registers */
-struct sbpciregs {
- u32 control; /* PCI control */
- u32 PAD[3];
- u32 arbcontrol; /* PCI arbiter control */
- u32 clkrun; /* Clkrun Control (>=rev11) */
- u32 PAD[2];
- u32 intstatus; /* Interrupt status */
- u32 intmask; /* Interrupt mask */
- u32 sbtopcimailbox; /* Sonics to PCI mailbox */
- u32 PAD[9];
- u32 bcastaddr; /* Sonics broadcast address */
- u32 bcastdata; /* Sonics broadcast data */
- u32 PAD[2];
- u32 gpioin; /* ro: gpio input (>=rev2) */
- u32 gpioout; /* rw: gpio output (>=rev2) */
- u32 gpioouten; /* rw: gpio output enable (>= rev2) */
- u32 gpiocontrol; /* rw: gpio control (>= rev2) */
- u32 PAD[36];
- u32 sbtopci0; /* Sonics to PCI translation 0 */
- u32 sbtopci1; /* Sonics to PCI translation 1 */
- u32 sbtopci2; /* Sonics to PCI translation 2 */
- u32 PAD[189];
- u32 pcicfg[4][64]; /* 0x400 - 0x7FF, PCI Cfg Space (>=rev8) */
- u16 sprom[36]; /* SPROM shadow Area */
- u32 PAD[46];
-};
-
-/* SB side: PCIE core and host control registers */
-struct sbpcieregs {
- u32 control; /* host mode only */
- u32 PAD[2];
- u32 biststatus; /* bist Status: 0x00C */
- u32 gpiosel; /* PCIE gpio sel: 0x010 */
- u32 gpioouten; /* PCIE gpio outen: 0x14 */
- u32 PAD[2];
- u32 intstatus; /* Interrupt status: 0x20 */
- u32 intmask; /* Interrupt mask: 0x24 */
- u32 sbtopcimailbox; /* sb to pcie mailbox: 0x028 */
- u32 PAD[53];
- u32 sbtopcie0; /* sb to pcie translation 0: 0x100 */
- u32 sbtopcie1; /* sb to pcie translation 1: 0x104 */
- u32 sbtopcie2; /* sb to pcie translation 2: 0x108 */
- u32 PAD[5];
-
- /* pcie core supports in direct access to config space */
- u32 configaddr; /* pcie config space access: Address field: 0x120 */
- u32 configdata; /* pcie config space access: Data field: 0x124 */
-
- /* mdio access to serdes */
- u32 mdiocontrol; /* controls the mdio access: 0x128 */
- u32 mdiodata; /* Data to the mdio access: 0x12c */
-
- /* pcie protocol phy/dllp/tlp register indirect access mechanism */
- u32 pcieindaddr; /* indirect access to
- * the internal register: 0x130
- */
- u32 pcieinddata; /* Data to/from the internal regsiter: 0x134 */
-
- u32 clkreqenctrl; /* >= rev 6, Clkreq rdma control : 0x138 */
- u32 PAD[177];
- u32 pciecfg[4][64]; /* 0x400 - 0x7FF, PCIE Cfg Space */
- u16 sprom[64]; /* SPROM shadow Area */
-};
-
-struct pcicore_info {
- struct bcma_device *core;
- struct si_pub *sih; /* System interconnect handle */
- struct pci_dev *dev;
- u8 pciecap_lcreg_offset;/* PCIE capability LCreg offset
- * in the config space
- */
- bool pcie_pr42767;
- u8 pcie_polarity;
- u8 pcie_war_aspm_ovr; /* Override ASPM/Clkreq settings */
-
- u8 pmecap_offset; /* PM Capability offset in the config space */
- bool pmecap; /* Capable of generating PME */
-};
-
-#define PCIE_ASPM(sih) \
- ((ai_get_buscoretype(sih) == PCIE_CORE_ID) && \
- ((ai_get_buscorerev(sih) >= 3) && \
- (ai_get_buscorerev(sih) <= 5)))
-
-
-/* delay needed between the mdio control/ mdiodata register data access */
-static void pr28829_delay(void)
-{
- udelay(10);
-}
-
-/* Initialize the PCI core.
- * It's caller's responsibility to make sure that this is done only once
- */
-struct pcicore_info *pcicore_init(struct si_pub *sih, struct bcma_device *core)
-{
- struct pcicore_info *pi;
-
- /* alloc struct pcicore_info */
- pi = kzalloc(sizeof(struct pcicore_info), GFP_ATOMIC);
- if (pi == NULL)
- return NULL;
-
- pi->sih = sih;
- pi->dev = core->bus->host_pci;
- pi->core = core;
-
- if (core->id.id == PCIE_CORE_ID) {
- u8 cap_ptr;
- cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_EXP,
- NULL, NULL);
- pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
- }
- return pi;
-}
-
-void pcicore_deinit(struct pcicore_info *pch)
-{
- kfree(pch);
-}
-
-/* return cap_offset if requested capability exists in the PCI config space */
-/* Note that it's caller's responsibility to make sure it's a pci bus */
-u8
-pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id,
- unsigned char *buf, u32 *buflen)
-{
- u8 cap_id;
- u8 cap_ptr = 0;
- u32 bufsize;
- u8 byte_val;
-
- /* check for Header type 0 */
- pci_read_config_byte(dev, PCI_HEADER_TYPE, &byte_val);
- if ((byte_val & 0x7f) != PCI_HEADER_TYPE_NORMAL)
- goto end;
-
- /* check if the capability pointer field exists */
- pci_read_config_byte(dev, PCI_STATUS, &byte_val);
- if (!(byte_val & PCI_STATUS_CAP_LIST))
- goto end;
-
- pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr);
- /* check if the capability pointer is 0x00 */
- if (cap_ptr == 0x00)
- goto end;
-
- /* loop thru the capability list
- * and see if the pcie capability exists
- */
-
- pci_read_config_byte(dev, cap_ptr, &cap_id);
-
- while (cap_id != req_cap_id) {
- pci_read_config_byte(dev, cap_ptr + 1, &cap_ptr);
- if (cap_ptr == 0x00)
- break;
- pci_read_config_byte(dev, cap_ptr, &cap_id);
- }
- if (cap_id != req_cap_id)
- goto end;
-
- /* found the caller requested capability */
- if (buf != NULL && buflen != NULL) {
- u8 cap_data;
-
- bufsize = *buflen;
- if (!bufsize)
- goto end;
- *buflen = 0;
- /* copy the capability data excluding cap ID and next ptr */
- cap_data = cap_ptr + 2;
- if ((bufsize + cap_data) > PCI_SZPCR)
- bufsize = PCI_SZPCR - cap_data;
- *buflen = bufsize;
- while (bufsize--) {
- pci_read_config_byte(dev, cap_data, buf);
- cap_data++;
- buf++;
- }
- }
-end:
- return cap_ptr;
-}
-
-/* ***** Register Access API */
-static uint
-pcie_readreg(struct bcma_device *core, uint addrtype, uint offset)
-{
- uint retval = 0xFFFFFFFF;
-
- switch (addrtype) {
- case PCIE_CONFIGREGS:
- bcma_write32(core, PCIEREGOFFS(configaddr), offset);
- (void)bcma_read32(core, PCIEREGOFFS(configaddr));
- retval = bcma_read32(core, PCIEREGOFFS(configdata));
- break;
- case PCIE_PCIEREGS:
- bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
- (void)bcma_read32(core, PCIEREGOFFS(pcieindaddr));
- retval = bcma_read32(core, PCIEREGOFFS(pcieinddata));
- break;
- }
-
- return retval;
-}
-
-static uint pcie_writereg(struct bcma_device *core, uint addrtype,
- uint offset, uint val)
-{
- switch (addrtype) {
- case PCIE_CONFIGREGS:
- bcma_write32(core, PCIEREGOFFS(configaddr), offset);
- bcma_write32(core, PCIEREGOFFS(configdata), val);
- break;
- case PCIE_PCIEREGS:
- bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
- bcma_write32(core, PCIEREGOFFS(pcieinddata), val);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
-{
- uint mdiodata, i = 0;
- uint pcie_serdes_spinwait = 200;
-
- mdiodata = (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
- (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
- (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) |
- (blk << 4));
- bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
-
- pr28829_delay();
- /* retry till the transaction is complete */
- while (i < pcie_serdes_spinwait) {
- if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
- MDIOCTL_ACCESS_DONE)
- break;
-
- udelay(1000);
- i++;
- }
-
- if (i >= pcie_serdes_spinwait)
- return false;
-
- return true;
-}
-
-static int
-pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
- uint *val)
-{
- uint mdiodata;
- uint i = 0;
- uint pcie_serdes_spinwait = 10;
-
- /* enable mdio access to SERDES */
- bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol),
- MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
-
- if (ai_get_buscorerev(pi->sih) >= 10) {
- /* new serdes is slower in rw,
- * using two layers of reg address mapping
- */
- if (!pcie_mdiosetblock(pi, physmedia))
- return 1;
- mdiodata = ((MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
- (regaddr << MDIODATA_REGADDR_SHF));
- pcie_serdes_spinwait *= 20;
- } else {
- mdiodata = ((physmedia << MDIODATA_DEVADDR_SHF_OLD) |
- (regaddr << MDIODATA_REGADDR_SHF_OLD));
- }
-
- if (!write)
- mdiodata |= (MDIODATA_START | MDIODATA_READ | MDIODATA_TA);
- else
- mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
- *val);
-
- bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
-
- pr28829_delay();
-
- /* retry till the transaction is complete */
- while (i < pcie_serdes_spinwait) {
- if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
- MDIOCTL_ACCESS_DONE) {
- if (!write) {
- pr28829_delay();
- *val = (bcma_read32(pi->core,
- PCIEREGOFFS(mdiodata)) &
- MDIODATA_MASK);
- }
- /* Disable mdio access to SERDES */
- bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
- return 0;
- }
- udelay(1000);
- i++;
- }
-
- /* Timed out. Disable mdio access to SERDES. */
- bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
- return 1;
-}
-
-/* use the mdio interface to read from mdio slaves */
-static int
-pcie_mdioread(struct pcicore_info *pi, uint physmedia, uint regaddr,
- uint *regval)
-{
- return pcie_mdioop(pi, physmedia, regaddr, false, regval);
-}
-
-/* use the mdio interface to write to mdio slaves */
-static int
-pcie_mdiowrite(struct pcicore_info *pi, uint physmedia, uint regaddr, uint val)
-{
- return pcie_mdioop(pi, physmedia, regaddr, true, &val);
-}
-
-/* ***** Support functions ***** */
-static u8 pcie_clkreq(struct pcicore_info *pi, u32 mask, u32 val)
-{
- u32 reg_val;
- u8 offset;
-
- offset = pi->pciecap_lcreg_offset;
- if (!offset)
- return 0;
-
- pci_read_config_dword(pi->dev, offset, &reg_val);
- /* set operation */
- if (mask) {
- if (val)
- reg_val |= PCIE_CLKREQ_ENAB;
- else
- reg_val &= ~PCIE_CLKREQ_ENAB;
- pci_write_config_dword(pi->dev, offset, reg_val);
- pci_read_config_dword(pi->dev, offset, &reg_val);
- }
- if (reg_val & PCIE_CLKREQ_ENAB)
- return 1;
- else
- return 0;
-}
-
-static void pcie_extendL1timer(struct pcicore_info *pi, bool extend)
-{
- u32 w;
- struct si_pub *sih = pi->sih;
-
- if (ai_get_buscoretype(sih) != PCIE_CORE_ID ||
- ai_get_buscorerev(sih) < 7)
- return;
-
- w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
- if (extend)
- w |= PCIE_ASPMTIMER_EXTEND;
- else
- w &= ~PCIE_ASPMTIMER_EXTEND;
- pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
- w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
-}
-
-/* centralized clkreq control policy */
-static void pcie_clkreq_upd(struct pcicore_info *pi, uint state)
-{
- struct si_pub *sih = pi->sih;
-
- switch (state) {
- case SI_DOATTACH:
- if (PCIE_ASPM(sih))
- pcie_clkreq(pi, 1, 0);
- break;
- case SI_PCIDOWN:
- /* turn on serdes PLL down */
- if (ai_get_buscorerev(sih) == 6) {
- ai_cc_reg(sih,
- offsetof(struct chipcregs, chipcontrol_addr),
- ~0, 0);
- ai_cc_reg(sih,
- offsetof(struct chipcregs, chipcontrol_data),
- ~0x40, 0);
- } else if (pi->pcie_pr42767) {
- pcie_clkreq(pi, 1, 1);
- }
- break;
- case SI_PCIUP:
- /* turn off serdes PLL down */
- if (ai_get_buscorerev(sih) == 6) {
- ai_cc_reg(sih,
- offsetof(struct chipcregs, chipcontrol_addr),
- ~0, 0);
- ai_cc_reg(sih,
- offsetof(struct chipcregs, chipcontrol_data),
- ~0x40, 0x40);
- } else if (PCIE_ASPM(sih)) { /* disable clkreq */
- pcie_clkreq(pi, 1, 0);
- }
- break;
- }
-}
-
-/* ***** PCI core WARs ***** */
-/* Done only once at attach time */
-static void pcie_war_polarity(struct pcicore_info *pi)
-{
- u32 w;
-
- if (pi->pcie_polarity != 0)
- return;
-
- w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
-
- /* Detect the current polarity at attach and force that polarity and
- * disable changing the polarity
- */
- if ((w & PCIE_PLP_POLARITYINV_STAT) == 0)
- pi->pcie_polarity = SERDES_RX_CTRL_FORCE;
- else
- pi->pcie_polarity = (SERDES_RX_CTRL_FORCE |
- SERDES_RX_CTRL_POLARITY);
-}
-
-/* enable ASPM and CLKREQ if srom doesn't have it */
-/* Needs to happen when update to shadow SROM is needed
- * : Coming out of 'standby'/'hibernate'
- * : If pcie_war_aspm_ovr state changed
- */
-static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
-{
- struct si_pub *sih = pi->sih;
- u16 val16;
- u32 w;
-
- if (!PCIE_ASPM(sih))
- return;
-
- /* bypass this on QT or VSIM */
- val16 = bcma_read16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]));
-
- val16 &= ~SRSH_ASPM_ENB;
- if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
- val16 |= SRSH_ASPM_ENB;
- else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L1_ENAB)
- val16 |= SRSH_ASPM_L1_ENB;
- else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
- val16 |= SRSH_ASPM_L0s_ENB;
-
- bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]), val16);
-
- pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
- w &= ~PCIE_ASPM_ENAB;
- w |= pi->pcie_war_aspm_ovr;
- pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
-
- val16 = bcma_read16(pi->core,
- PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]));
-
- if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) {
- val16 |= SRSH_CLKREQ_ENB;
- pi->pcie_pr42767 = true;
- } else
- val16 &= ~SRSH_CLKREQ_ENB;
-
- bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]),
- val16);
-}
-
-/* Apply the polarity determined at the start */
-/* Needs to happen when coming out of 'standby'/'hibernate' */
-static void pcie_war_serdes(struct pcicore_info *pi)
-{
- u32 w = 0;
-
- if (pi->pcie_polarity != 0)
- pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CTRL,
- pi->pcie_polarity);
-
- pcie_mdioread(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, &w);
- if (w & PLL_CTRL_FREQDET_EN) {
- w &= ~PLL_CTRL_FREQDET_EN;
- pcie_mdiowrite(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, w);
- }
-}
-
-/* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
-/* Needs to happen when coming out of 'standby'/'hibernate' */
-static void pcie_misc_config_fixup(struct pcicore_info *pi)
-{
- u16 val16;
-
- val16 = bcma_read16(pi->core,
- PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]));
-
- if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) {
- val16 |= SRSH_L23READY_EXIT_NOPERST;
- bcma_write16(pi->core,
- PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]), val16);
- }
-}
-
-/* quick hack for testing */
-/* Needs to happen when coming out of 'standby'/'hibernate' */
-static void pcie_war_noplldown(struct pcicore_info *pi)
-{
- /* turn off serdes PLL down */
- ai_cc_reg(pi->sih, offsetof(struct chipcregs, chipcontrol),
- CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
-
- /* clear srom shadow backdoor */
- bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_BD_OFFSET]), 0);
-}
-
-/* Needs to happen when coming out of 'standby'/'hibernate' */
-static void pcie_war_pci_setup(struct pcicore_info *pi)
-{
- struct si_pub *sih = pi->sih;
- u32 w;
-
- if (ai_get_buscorerev(sih) == 0 || ai_get_buscorerev(sih) == 1) {
- w = pcie_readreg(pi->core, PCIE_PCIEREGS,
- PCIE_TLP_WORKAROUNDSREG);
- w |= 0x8;
- pcie_writereg(pi->core, PCIE_PCIEREGS,
- PCIE_TLP_WORKAROUNDSREG, w);
- }
-
- if (ai_get_buscorerev(sih) == 1) {
- w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
- w |= 0x40;
- pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
- }
-
- if (ai_get_buscorerev(sih) == 0) {
- pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
- pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
- pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
- } else if (PCIE_ASPM(sih)) {
- /* Change the L1 threshold for better performance */
- w = pcie_readreg(pi->core, PCIE_PCIEREGS,
- PCIE_DLLP_PMTHRESHREG);
- w &= ~PCIE_L1THRESHOLDTIME_MASK;
- w |= PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT;
- pcie_writereg(pi->core, PCIE_PCIEREGS,
- PCIE_DLLP_PMTHRESHREG, w);
-
- pcie_war_serdes(pi);
-
- pcie_war_aspm_clkreq(pi);
- } else if (ai_get_buscorerev(pi->sih) == 7)
- pcie_war_noplldown(pi);
-
- /* Note that the fix is actually in the SROM,
- * that's why this is open-ended
- */
- if (ai_get_buscorerev(pi->sih) >= 6)
- pcie_misc_config_fixup(pi);
-}
-
-/* ***** Functions called during driver state changes ***** */
-void pcicore_attach(struct pcicore_info *pi, int state)
-{
- struct si_pub *sih = pi->sih;
- u32 bfl2 = (u32)getintvar(sih, BRCMS_SROM_BOARDFLAGS2);
-
- /* Determine if this board needs override */
- if (PCIE_ASPM(sih)) {
- if (bfl2 & BFL2_PCIEWAR_OVR)
- pi->pcie_war_aspm_ovr = PCIE_ASPM_DISAB;
- else
- pi->pcie_war_aspm_ovr = PCIE_ASPM_ENAB;
- }
-
- /* These need to happen in this order only */
- pcie_war_polarity(pi);
-
- pcie_war_serdes(pi);
-
- pcie_war_aspm_clkreq(pi);
-
- pcie_clkreq_upd(pi, state);
-
-}
-
-void pcicore_hwup(struct pcicore_info *pi)
-{
- if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
- return;
-
- pcie_war_pci_setup(pi);
-}
-
-void pcicore_up(struct pcicore_info *pi, int state)
-{
- if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
- return;
-
- /* Restore L1 timer for better performance */
- pcie_extendL1timer(pi, true);
-
- pcie_clkreq_upd(pi, state);
-}
-
-/* When the device is going to enter D3 state
- * (or the system is going to enter S3/S4 states)
- */
-void pcicore_sleep(struct pcicore_info *pi)
-{
- u32 w;
-
- if (!pi || !PCIE_ASPM(pi->sih))
- return;
-
- pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
- w &= ~PCIE_CAP_LCREG_ASPML1;
- pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
-
- pi->pcie_pr42767 = false;
-}
-
-void pcicore_down(struct pcicore_info *pi, int state)
-{
- if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
- return;
-
- pcie_clkreq_upd(pi, state);
-
- /* Reduce L1 timer for better power savings */
- pcie_extendL1timer(pi, false);
-}
-
-void pcicore_fixcfg(struct pcicore_info *pi)
-{
- struct bcma_device *core = pi->core;
- u16 val16;
- uint regoff;
-
- switch (pi->core->id.id) {
- case BCMA_CORE_PCI:
- regoff = PCIREGOFFS(sprom[SRSH_PI_OFFSET]);
- break;
-
- case BCMA_CORE_PCIE:
- regoff = PCIEREGOFFS(sprom[SRSH_PI_OFFSET]);
- break;
-
- default:
- return;
- }
-
- val16 = bcma_read16(pi->core, regoff);
- if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) !=
- (u16)core->core_index) {
- val16 = ((u16)core->core_index << SRSH_PI_SHIFT) |
- (val16 & ~SRSH_PI_MASK);
- bcma_write16(pi->core, regoff, val16);
- }
-}
-
-/* precondition: current core is pci core */
-void
-pcicore_pci_setup(struct pcicore_info *pi)
-{
- bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
- SBTOPCI_PREF | SBTOPCI_BURST);
-
- if (pi->core->id.rev >= 11) {
- bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
- SBTOPCI_RC_READMULTI);
- bcma_set32(pi->core, PCIREGOFFS(clkrun), PCI_CLKRUN_DSBL);
- (void)bcma_read32(pi->core, PCIREGOFFS(clkrun));
- }
-}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
deleted file mode 100644
index 9fc3ead540a8..000000000000
--- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_NICPCI_H_
-#define _BRCM_NICPCI_H_
-
-#include "types.h"
-
-/* PCI configuration address space size */
-#define PCI_SZPCR 256
-
-/* Brcm PCI configuration registers */
-/* backplane address space accessed by BAR0 */
-#define PCI_BAR0_WIN 0x80
-/* sprom property control */
-#define PCI_SPROM_CONTROL 0x88
-/* mask of PCI and other cores interrupts */
-#define PCI_INT_MASK 0x94
-/* backplane core interrupt mask bits offset */
-#define PCI_SBIM_SHIFT 8
-/* backplane address space accessed by second 4KB of BAR0 */
-#define PCI_BAR0_WIN2 0xac
-/* pci config space gpio input (>=rev3) */
-#define PCI_GPIO_IN 0xb0
-/* pci config space gpio output (>=rev3) */
-#define PCI_GPIO_OUT 0xb4
-/* pci config space gpio output enable (>=rev3) */
-#define PCI_GPIO_OUTEN 0xb8
-
-/* bar0 + 4K accesses external sprom */
-#define PCI_BAR0_SPROM_OFFSET (4 * 1024)
-/* bar0 + 6K accesses pci core registers */
-#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024)
-/*
- * pci core SB registers are at the end of the
- * 8KB window, so their address is the "regular"
- * address plus 4K
- */
-#define PCI_BAR0_PCISBR_OFFSET (4 * 1024)
-/* bar0 window size Match with corerev 13 */
-#define PCI_BAR0_WINSZ (16 * 1024)
-/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */
-/* bar0 + 8K accesses pci/pcie core registers */
-#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024)
-/* bar0 + 12K accesses chipc core registers */
-#define PCI_16KB0_CCREGS_OFFSET (12 * 1024)
-
-struct sbpciregs;
-struct sbpcieregs;
-
-extern struct pcicore_info *pcicore_init(struct si_pub *sih,
- struct bcma_device *core);
-extern void pcicore_deinit(struct pcicore_info *pch);
-extern void pcicore_attach(struct pcicore_info *pch, int state);
-extern void pcicore_hwup(struct pcicore_info *pch);
-extern void pcicore_up(struct pcicore_info *pch, int state);
-extern void pcicore_sleep(struct pcicore_info *pch);
-extern void pcicore_down(struct pcicore_info *pch, int state);
-extern u8 pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id,
- unsigned char *buf, u32 *buflen);
-extern void pcicore_fixcfg(struct pcicore_info *pch);
-extern void pcicore_pci_setup(struct pcicore_info *pch);
-
-#endif /* _BRCM_NICPCI_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/otp.c b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
deleted file mode 100644
index f1ca12625860..000000000000
--- a/drivers/net/wireless/brcm80211/brcmsmac/otp.c
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/io.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-
-#include <brcm_hw_ids.h>
-#include <chipcommon.h>
-#include "aiutils.h"
-#include "otp.h"
-
-#define OTPS_GUP_MASK 0x00000f00
-#define OTPS_GUP_SHIFT 8
-/* h/w subregion is programmed */
-#define OTPS_GUP_HW 0x00000100
-/* s/w subregion is programmed */
-#define OTPS_GUP_SW 0x00000200
-/* chipid/pkgopt subregion is programmed */
-#define OTPS_GUP_CI 0x00000400
-/* fuse subregion is programmed */
-#define OTPS_GUP_FUSE 0x00000800
-
-/* Fields in otpprog in rev >= 21 */
-#define OTPP_COL_MASK 0x000000ff
-#define OTPP_COL_SHIFT 0
-#define OTPP_ROW_MASK 0x0000ff00
-#define OTPP_ROW_SHIFT 8
-#define OTPP_OC_MASK 0x0f000000
-#define OTPP_OC_SHIFT 24
-#define OTPP_READERR 0x10000000
-#define OTPP_VALUE_MASK 0x20000000
-#define OTPP_VALUE_SHIFT 29
-#define OTPP_START_BUSY 0x80000000
-#define OTPP_READ 0x40000000
-
-/* Opcodes for OTPP_OC field */
-#define OTPPOC_READ 0
-#define OTPPOC_BIT_PROG 1
-#define OTPPOC_VERIFY 3
-#define OTPPOC_INIT 4
-#define OTPPOC_SET 5
-#define OTPPOC_RESET 6
-#define OTPPOC_OCST 7
-#define OTPPOC_ROW_LOCK 8
-#define OTPPOC_PRESCN_TEST 9
-
-#define OTPTYPE_IPX(ccrev) ((ccrev) == 21 || (ccrev) >= 23)
-
-#define OTPP_TRIES 10000000 /* # of tries for OTPP */
-
-#define MAXNUMRDES 9 /* Maximum OTP redundancy entries */
-
-/* Fixed size subregions sizes in words */
-#define OTPGU_CI_SZ 2
-
-struct otpinfo;
-
-/* OTP function struct */
-struct otp_fn_s {
- int (*init)(struct si_pub *sih, struct otpinfo *oi);
- int (*read_region)(struct otpinfo *oi, int region, u16 *data,
- uint *wlen);
-};
-
-struct otpinfo {
- struct bcma_device *core; /* chipc core */
- const struct otp_fn_s *fn; /* OTP functions */
- struct si_pub *sih; /* Saved sb handle */
-
- /* IPX OTP section */
- u16 wsize; /* Size of otp in words */
- u16 rows; /* Geometry */
- u16 cols; /* Geometry */
- u32 status; /* Flag bits (lock/prog/rv).
- * (Reflected only when OTP is power cycled)
- */
- u16 hwbase; /* hardware subregion offset */
- u16 hwlim; /* hardware subregion boundary */
- u16 swbase; /* software subregion offset */
- u16 swlim; /* software subregion boundary */
- u16 fbase; /* fuse subregion offset */
- u16 flim; /* fuse subregion boundary */
- int otpgu_base; /* offset to General Use Region */
-};
-
-/* OTP layout */
-/* CC revs 21, 24 and 27 OTP General Use Region word offset */
-#define REVA4_OTPGU_BASE 12
-
-/* CC revs 23, 25, 26, 28 and above OTP General Use Region word offset */
-#define REVB8_OTPGU_BASE 20
-
-/* CC rev 36 OTP General Use Region word offset */
-#define REV36_OTPGU_BASE 12
-
-/* Subregion word offsets in General Use region */
-#define OTPGU_HSB_OFF 0
-#define OTPGU_SFB_OFF 1
-#define OTPGU_CI_OFF 2
-#define OTPGU_P_OFF 3
-#define OTPGU_SROM_OFF 4
-
-/* Flag bit offsets in General Use region */
-#define OTPGU_HWP_OFF 60
-#define OTPGU_SWP_OFF 61
-#define OTPGU_CIP_OFF 62
-#define OTPGU_FUSEP_OFF 63
-#define OTPGU_CIP_MSK 0x4000
-#define OTPGU_P_MSK 0xf000
-#define OTPGU_P_SHIFT (OTPGU_HWP_OFF % 16)
-
-/* OTP Size */
-#define OTP_SZ_FU_324 ((roundup(324, 8))/8) /* 324 bits */
-#define OTP_SZ_FU_288 (288/8) /* 288 bits */
-#define OTP_SZ_FU_216 (216/8) /* 216 bits */
-#define OTP_SZ_FU_72 (72/8) /* 72 bits */
-#define OTP_SZ_CHECKSUM (16/8) /* 16 bits */
-#define OTP4315_SWREG_SZ 178 /* 178 bytes */
-#define OTP_SZ_FU_144 (144/8) /* 144 bits */
-
-static u16
-ipxotp_otpr(struct otpinfo *oi, uint wn)
-{
- return bcma_read16(oi->core,
- CHIPCREGOFFS(sromotp[wn]));
-}
-
-/*
- * Calculate max HW/SW region byte size by subtracting fuse region
- * and checksum size, osizew is oi->wsize (OTP size - GU size) in words
- */
-static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
-{
- int ret = 0;
-
- switch (ai_get_chip_id(sih)) {
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
- break;
- case BCM4313_CHIP_ID:
- ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
- break;
- default:
- break; /* Don't know about this chip */
- }
-
- return ret;
-}
-
-static void _ipxotp_init(struct otpinfo *oi)
-{
- uint k;
- u32 otpp, st;
- int ccrev = ai_get_ccrev(oi->sih);
-
-
- /*
- * record word offset of General Use Region
- * for various chipcommon revs
- */
- if (ccrev == 21 || ccrev == 24
- || ccrev == 27) {
- oi->otpgu_base = REVA4_OTPGU_BASE;
- } else if (ccrev == 36) {
- /*
- * OTP size greater than equal to 2KB (128 words),
- * otpgu_base is similar to rev23
- */
- if (oi->wsize >= 128)
- oi->otpgu_base = REVB8_OTPGU_BASE;
- else
- oi->otpgu_base = REV36_OTPGU_BASE;
- } else if (ccrev == 23 || ccrev >= 25) {
- oi->otpgu_base = REVB8_OTPGU_BASE;
- }
-
- /* First issue an init command so the status is up to date */
- otpp =
- OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK);
-
- bcma_write32(oi->core, CHIPCREGOFFS(otpprog), otpp);
- st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
- for (k = 0; (st & OTPP_START_BUSY) && (k < OTPP_TRIES); k++)
- st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
- if (k >= OTPP_TRIES)
- return;
-
- /* Read OTP lock bits and subregion programmed indication bits */
- oi->status = bcma_read32(oi->core, CHIPCREGOFFS(otpstatus));
-
- if ((ai_get_chip_id(oi->sih) == BCM43224_CHIP_ID)
- || (ai_get_chip_id(oi->sih) == BCM43225_CHIP_ID)) {
- u32 p_bits;
- p_bits = (ipxotp_otpr(oi, oi->otpgu_base + OTPGU_P_OFF) &
- OTPGU_P_MSK) >> OTPGU_P_SHIFT;
- oi->status |= (p_bits << OTPS_GUP_SHIFT);
- }
-
- /*
- * h/w region base and fuse region limit are fixed to
- * the top and the bottom of the general use region.
- * Everything else can be flexible.
- */
- oi->hwbase = oi->otpgu_base + OTPGU_SROM_OFF;
- oi->hwlim = oi->wsize;
- if (oi->status & OTPS_GUP_HW) {
- oi->hwlim =
- ipxotp_otpr(oi, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
- oi->swbase = oi->hwlim;
- } else
- oi->swbase = oi->hwbase;
-
- /* subtract fuse and checksum from beginning */
- oi->swlim = ipxotp_max_rgnsz(oi->sih, oi->wsize) / 2;
-
- if (oi->status & OTPS_GUP_SW) {
- oi->swlim =
- ipxotp_otpr(oi, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
- oi->fbase = oi->swlim;
- } else
- oi->fbase = oi->swbase;
-
- oi->flim = oi->wsize;
-}
-
-static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
-{
- /* Make sure we're running IPX OTP */
- if (!OTPTYPE_IPX(ai_get_ccrev(sih)))
- return -EBADE;
-
- /* Make sure OTP is not disabled */
- if (ai_is_otp_disabled(sih))
- return -EBADE;
-
- /* Check for otp size */
- switch ((ai_get_cccaps(sih) & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
- case 0:
- /* Nothing there */
- return -EBADE;
- case 1: /* 32x64 */
- oi->rows = 32;
- oi->cols = 64;
- oi->wsize = 128;
- break;
- case 2: /* 64x64 */
- oi->rows = 64;
- oi->cols = 64;
- oi->wsize = 256;
- break;
- case 5: /* 96x64 */
- oi->rows = 96;
- oi->cols = 64;
- oi->wsize = 384;
- break;
- case 7: /* 16x64 *//* 1024 bits */
- oi->rows = 16;
- oi->cols = 64;
- oi->wsize = 64;
- break;
- default:
- /* Don't know the geometry */
- return -EBADE;
- }
-
- /* Retrieve OTP region info */
- _ipxotp_init(oi);
- return 0;
-}
-
-static int
-ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
-{
- uint base, i, sz;
-
- /* Validate region selection */
- switch (region) {
- case OTP_HW_RGN:
- sz = (uint) oi->hwlim - oi->hwbase;
- if (!(oi->status & OTPS_GUP_HW)) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->hwbase;
- break;
- case OTP_SW_RGN:
- sz = ((uint) oi->swlim - oi->swbase);
- if (!(oi->status & OTPS_GUP_SW)) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->swbase;
- break;
- case OTP_CI_RGN:
- sz = OTPGU_CI_SZ;
- if (!(oi->status & OTPS_GUP_CI)) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->otpgu_base + OTPGU_CI_OFF;
- break;
- case OTP_FUSE_RGN:
- sz = (uint) oi->flim - oi->fbase;
- if (!(oi->status & OTPS_GUP_FUSE)) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->fbase;
- break;
- case OTP_ALL_RGN:
- sz = ((uint) oi->flim - oi->hwbase);
- if (!(oi->status & (OTPS_GUP_HW | OTPS_GUP_SW))) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->hwbase;
- break;
- default:
- return -EINVAL;
- }
-
- /* Read the data */
- for (i = 0; i < sz; i++)
- data[i] = ipxotp_otpr(oi, base + i);
-
- *wlen = sz;
- return 0;
-}
-
-static const struct otp_fn_s ipxotp_fn = {
- (int (*)(struct si_pub *, struct otpinfo *)) ipxotp_init,
- (int (*)(struct otpinfo *, int, u16 *, uint *)) ipxotp_read_region,
-};
-
-static int otp_init(struct si_pub *sih, struct otpinfo *oi)
-{
- int ret;
-
- memset(oi, 0, sizeof(struct otpinfo));
-
- oi->core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
-
- if (OTPTYPE_IPX(ai_get_ccrev(sih)))
- oi->fn = &ipxotp_fn;
-
- if (oi->fn == NULL)
- return -EBADE;
-
- oi->sih = sih;
-
- ret = (oi->fn->init)(sih, oi);
-
- return ret;
-}
-
-int
-otp_read_region(struct si_pub *sih, int region, u16 *data, uint *wlen) {
- struct otpinfo otpinfo;
- struct otpinfo *oi = &otpinfo;
- int err = 0;
-
- if (ai_is_otp_disabled(sih)) {
- err = -EPERM;
- goto out;
- }
-
- err = otp_init(sih, oi);
- if (err)
- goto out;
-
- err = ((oi)->fn->read_region)(oi, region, data, wlen);
-
- out:
- return err;
-}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/otp.h b/drivers/net/wireless/brcm80211/brcmsmac/otp.h
deleted file mode 100644
index 6b6d31cf9569..000000000000
--- a/drivers/net/wireless/brcm80211/brcmsmac/otp.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_OTP_H_
-#define _BRCM_OTP_H_
-
-#include "types.h"
-
-/* OTP regions */
-#define OTP_HW_RGN 1
-#define OTP_SW_RGN 2
-#define OTP_CI_RGN 4
-#define OTP_FUSE_RGN 8
-/* From h/w region to end of OTP including checksum */
-#define OTP_ALL_RGN 0xf
-
-/* OTP Size */
-#define OTP_SZ_MAX (6144/8) /* maximum bytes in one CIS */
-
-extern int otp_read_region(struct si_pub *sih, int region, u16 *data,
- uint *wlen);
-
-#endif /* _BRCM_OTP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index 0fce56235f38..abfd78822fb8 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -4817,28 +4817,23 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
s8 txpwr = 0;
int i;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- struct phy_shim_info *shim = pi->sh->physhim;
+ struct ssb_sprom *sprom = &pi->d11core->bus->sprom;
if (CHSPEC_IS2G(pi->radio_chanspec)) {
u16 cckpo = 0;
u32 offset_ofdm, offset_mcs;
- pi_lcn->lcnphy_tr_isolation_mid =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_TRISO2G);
+ pi_lcn->lcnphy_tr_isolation_mid = sprom->fem.ghz2.tr_iso;
- pi_lcn->lcnphy_rx_power_offset =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_RXPO2G);
+ pi_lcn->lcnphy_rx_power_offset = sprom->rxpo2g;
- pi->txpa_2g[0] = (s16)wlapi_getintvar(shim, BRCMS_SROM_PA0B0);
- pi->txpa_2g[1] = (s16)wlapi_getintvar(shim, BRCMS_SROM_PA0B1);
- pi->txpa_2g[2] = (s16)wlapi_getintvar(shim, BRCMS_SROM_PA0B2);
+ pi->txpa_2g[0] = sprom->pa0b0;
+ pi->txpa_2g[1] = sprom->pa0b1;
+ pi->txpa_2g[2] = sprom->pa0b2;
- pi_lcn->lcnphy_rssi_vf =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_RSSISMF2G);
- pi_lcn->lcnphy_rssi_vc =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_RSSISMC2G);
- pi_lcn->lcnphy_rssi_gs =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_RSSISAV2G);
+ pi_lcn->lcnphy_rssi_vf = sprom->rssismf2g;
+ pi_lcn->lcnphy_rssi_vc = sprom->rssismc2g;
+ pi_lcn->lcnphy_rssi_gs = sprom->rssisav2g;
pi_lcn->lcnphy_rssi_vf_lowtemp = pi_lcn->lcnphy_rssi_vf;
pi_lcn->lcnphy_rssi_vc_lowtemp = pi_lcn->lcnphy_rssi_vc;
@@ -4848,7 +4843,7 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
pi_lcn->lcnphy_rssi_vc_hightemp = pi_lcn->lcnphy_rssi_vc;
pi_lcn->lcnphy_rssi_gs_hightemp = pi_lcn->lcnphy_rssi_gs;
- txpwr = (s8)wlapi_getintvar(shim, BRCMS_SROM_MAXP2GA0);
+ txpwr = sprom->core_pwr_info[0].maxpwr_2g;
pi->tx_srom_max_2g = txpwr;
for (i = 0; i < PWRTBL_NUM_COEFF; i++) {
@@ -4856,8 +4851,8 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
pi->txpa_2g_high_temp[i] = pi->txpa_2g[i];
}
- cckpo = (u16)wlapi_getintvar(shim, BRCMS_SROM_CCK2GPO);
- offset_ofdm = (u32)wlapi_getintvar(shim, BRCMS_SROM_OFDM2GPO);
+ cckpo = sprom->cck2gpo;
+ offset_ofdm = sprom->ofdm2gpo;
if (cckpo) {
uint max_pwr_chan = txpwr;
@@ -4876,7 +4871,7 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
} else {
u8 opo = 0;
- opo = (u8)wlapi_getintvar(shim, BRCMS_SROM_OPO);
+ opo = sprom->opo;
for (i = TXP_FIRST_CCK; i <= TXP_LAST_CCK; i++)
pi->tx_srom_max_rate_2g[i] = txpwr;
@@ -4886,12 +4881,8 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
((offset_ofdm & 0xf) * 2);
offset_ofdm >>= 4;
}
- offset_mcs =
- wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO1) << 16;
- offset_mcs |=
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO0);
+ offset_mcs = sprom->mcs2gpo[1] << 16;
+ offset_mcs |= sprom->mcs2gpo[0];
pi_lcn->lcnphy_mcs20_po = offset_mcs;
for (i = TXP_FIRST_SISO_MCS_20;
i <= TXP_LAST_SISO_MCS_20; i++) {
@@ -4901,25 +4892,17 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
}
}
- pi_lcn->lcnphy_rawtempsense =
- (u16)wlapi_getintvar(shim, BRCMS_SROM_RAWTEMPSENSE);
- pi_lcn->lcnphy_measPower =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_MEASPOWER);
- pi_lcn->lcnphy_tempsense_slope =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_TEMPSENSE_SLOPE);
- pi_lcn->lcnphy_hw_iqcal_en =
- (bool)wlapi_getintvar(shim, BRCMS_SROM_HW_IQCAL_EN);
- pi_lcn->lcnphy_iqcal_swp_dis =
- (bool)wlapi_getintvar(shim, BRCMS_SROM_IQCAL_SWP_DIS);
- pi_lcn->lcnphy_tempcorrx =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_TEMPCORRX);
- pi_lcn->lcnphy_tempsense_option =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_TEMPSENSE_OPTION);
- pi_lcn->lcnphy_freqoffset_corr =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_FREQOFFSET_CORR);
- if ((u8)wlapi_getintvar(shim, BRCMS_SROM_AA2G) > 1)
+ pi_lcn->lcnphy_rawtempsense = sprom->rawtempsense;
+ pi_lcn->lcnphy_measPower = sprom->measpower;
+ pi_lcn->lcnphy_tempsense_slope = sprom->tempsense_slope;
+ pi_lcn->lcnphy_hw_iqcal_en = sprom->hw_iqcal_en;
+ pi_lcn->lcnphy_iqcal_swp_dis = sprom->iqcal_swp_dis;
+ pi_lcn->lcnphy_tempcorrx = sprom->tempcorrx;
+ pi_lcn->lcnphy_tempsense_option = sprom->tempsense_option;
+ pi_lcn->lcnphy_freqoffset_corr = sprom->freqoffset_corr;
+ if (sprom->ant_available_bg > 1)
wlc_phy_ant_rxdiv_set((struct brcms_phy_pub *) pi,
- (u8) wlapi_getintvar(shim, BRCMS_SROM_AA2G));
+ sprom->ant_available_bg);
}
pi_lcn->lcnphy_cck_dig_filt_type = -1;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index 812b6e38526e..13b261517cce 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -14386,30 +14386,30 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
{
u16 bw40po, cddpo, stbcpo, bwduppo;
uint band_num;
- struct phy_shim_info *shim = pi->sh->physhim;
+ struct ssb_sprom *sprom = &pi->d11core->bus->sprom;
if (pi->sh->sromrev >= 9)
return;
- bw40po = (u16) wlapi_getintvar(shim, BRCMS_SROM_BW40PO);
+ bw40po = sprom->bw40po;
pi->bw402gpo = bw40po & 0xf;
pi->bw405gpo = (bw40po & 0xf0) >> 4;
pi->bw405glpo = (bw40po & 0xf00) >> 8;
pi->bw405ghpo = (bw40po & 0xf000) >> 12;
- cddpo = (u16) wlapi_getintvar(shim, BRCMS_SROM_CDDPO);
+ cddpo = sprom->cddpo;
pi->cdd2gpo = cddpo & 0xf;
pi->cdd5gpo = (cddpo & 0xf0) >> 4;
pi->cdd5glpo = (cddpo & 0xf00) >> 8;
pi->cdd5ghpo = (cddpo & 0xf000) >> 12;
- stbcpo = (u16) wlapi_getintvar(shim, BRCMS_SROM_STBCPO);
+ stbcpo = sprom->stbcpo;
pi->stbc2gpo = stbcpo & 0xf;
pi->stbc5gpo = (stbcpo & 0xf0) >> 4;
pi->stbc5glpo = (stbcpo & 0xf00) >> 8;
pi->stbc5ghpo = (stbcpo & 0xf000) >> 12;
- bwduppo = (u16) wlapi_getintvar(shim, BRCMS_SROM_BWDUPPO);
+ bwduppo = sprom->bwduppo;
pi->bwdup2gpo = bwduppo & 0xf;
pi->bwdup5gpo = (bwduppo & 0xf0) >> 4;
pi->bwdup5glpo = (bwduppo & 0xf00) >> 8;
@@ -14419,242 +14419,137 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
band_num++) {
switch (band_num) {
case 0:
-
pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_2g =
- (s8) wlapi_getintvar(shim,
- BRCMS_SROM_MAXP2GA0);
+ sprom->core_pwr_info[0].maxpwr_2g;
pi->nphy_pwrctrl_info[PHY_CORE_1].max_pwr_2g =
- (s8) wlapi_getintvar(shim,
- BRCMS_SROM_MAXP2GA1);
+ sprom->core_pwr_info[1].maxpwr_2g;
pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_2g_a1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA2GW0A0);
+ sprom->core_pwr_info[0].pa_2g[0];
pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_2g_a1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA2GW0A1);
+ sprom->core_pwr_info[1].pa_2g[0];
pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_2g_b0 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA2GW1A0);
+ sprom->core_pwr_info[0].pa_2g[1];
pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_2g_b0 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA2GW1A1);
+ sprom->core_pwr_info[1].pa_2g[1];
pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_2g_b1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA2GW2A0);
+ sprom->core_pwr_info[0].pa_2g[2];
pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_2g_b1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA2GW2A1);
+ sprom->core_pwr_info[1].pa_2g[2];
pi->nphy_pwrctrl_info[PHY_CORE_0].idle_targ_2g =
- (s8) wlapi_getintvar(shim, BRCMS_SROM_ITT2GA0);
+ sprom->core_pwr_info[0].itssi_2g;
pi->nphy_pwrctrl_info[PHY_CORE_1].idle_targ_2g =
- (s8) wlapi_getintvar(shim, BRCMS_SROM_ITT2GA1);
-
- pi->cck2gpo = (u16) wlapi_getintvar(shim,
- BRCMS_SROM_CCK2GPO);
-
- pi->ofdm2gpo =
- (u32) wlapi_getintvar(shim,
- BRCMS_SROM_OFDM2GPO);
-
- pi->mcs2gpo[0] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO0);
- pi->mcs2gpo[1] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO1);
- pi->mcs2gpo[2] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO2);
- pi->mcs2gpo[3] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO3);
- pi->mcs2gpo[4] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO4);
- pi->mcs2gpo[5] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO5);
- pi->mcs2gpo[6] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO6);
- pi->mcs2gpo[7] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO7);
+ sprom->core_pwr_info[1].itssi_2g;
+
+ pi->cck2gpo = sprom->cck2gpo;
+
+ pi->ofdm2gpo = sprom->ofdm2gpo;
+
+ pi->mcs2gpo[0] = sprom->mcs2gpo[0];
+ pi->mcs2gpo[1] = sprom->mcs2gpo[1];
+ pi->mcs2gpo[2] = sprom->mcs2gpo[2];
+ pi->mcs2gpo[3] = sprom->mcs2gpo[3];
+ pi->mcs2gpo[4] = sprom->mcs2gpo[4];
+ pi->mcs2gpo[5] = sprom->mcs2gpo[5];
+ pi->mcs2gpo[6] = sprom->mcs2gpo[6];
+ pi->mcs2gpo[7] = sprom->mcs2gpo[7];
break;
case 1:
pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_5gm =
- (s8) wlapi_getintvar(shim, BRCMS_SROM_MAXP5GA0);
+ sprom->core_pwr_info[0].maxpwr_5g;
pi->nphy_pwrctrl_info[PHY_CORE_1].max_pwr_5gm =
- (s8) wlapi_getintvar(shim,
- BRCMS_SROM_MAXP5GA1);
+ sprom->core_pwr_info[1].maxpwr_5g;
pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_5gm_a1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GW0A0);
+ sprom->core_pwr_info[0].pa_5g[0];
pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_5gm_a1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GW0A1);
+ sprom->core_pwr_info[1].pa_5g[0];
pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_5gm_b0 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GW1A0);
+ sprom->core_pwr_info[0].pa_5g[1];
pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_5gm_b0 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GW1A1);
+ sprom->core_pwr_info[1].pa_5g[1];
pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_5gm_b1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GW2A0);
+ sprom->core_pwr_info[0].pa_5g[2];
pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_5gm_b1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GW2A1);
+ sprom->core_pwr_info[1].pa_5g[2];
pi->nphy_pwrctrl_info[PHY_CORE_0].idle_targ_5gm =
- (s8) wlapi_getintvar(shim, BRCMS_SROM_ITT5GA0);
+ sprom->core_pwr_info[0].itssi_5g;
pi->nphy_pwrctrl_info[PHY_CORE_1].idle_targ_5gm =
- (s8) wlapi_getintvar(shim, BRCMS_SROM_ITT5GA1);
-
- pi->ofdm5gpo =
- (u32) wlapi_getintvar(shim,
- BRCMS_SROM_OFDM5GPO);
-
- pi->mcs5gpo[0] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GPO0);
- pi->mcs5gpo[1] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GPO1);
- pi->mcs5gpo[2] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GPO2);
- pi->mcs5gpo[3] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GPO3);
- pi->mcs5gpo[4] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GPO4);
- pi->mcs5gpo[5] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GPO5);
- pi->mcs5gpo[6] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GPO6);
- pi->mcs5gpo[7] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GPO7);
+ sprom->core_pwr_info[1].itssi_5g;
+
+ pi->ofdm5gpo = sprom->ofdm5gpo;
+
+ pi->mcs5gpo[0] = sprom->mcs5gpo[0];
+ pi->mcs5gpo[1] = sprom->mcs5gpo[1];
+ pi->mcs5gpo[2] = sprom->mcs5gpo[2];
+ pi->mcs5gpo[3] = sprom->mcs5gpo[3];
+ pi->mcs5gpo[4] = sprom->mcs5gpo[4];
+ pi->mcs5gpo[5] = sprom->mcs5gpo[5];
+ pi->mcs5gpo[6] = sprom->mcs5gpo[6];
+ pi->mcs5gpo[7] = sprom->mcs5gpo[7];
break;
case 2:
pi->nphy_pwrctrl_info[0].max_pwr_5gl =
- (s8) wlapi_getintvar(shim,
- BRCMS_SROM_MAXP5GLA0);
+ sprom->core_pwr_info[0].maxpwr_5gl;
pi->nphy_pwrctrl_info[1].max_pwr_5gl =
- (s8) wlapi_getintvar(shim,
- BRCMS_SROM_MAXP5GLA1);
+ sprom->core_pwr_info[1].maxpwr_5gl;
pi->nphy_pwrctrl_info[0].pwrdet_5gl_a1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GLW0A0);
+ sprom->core_pwr_info[0].pa_5gl[0];
pi->nphy_pwrctrl_info[1].pwrdet_5gl_a1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GLW0A1);
+ sprom->core_pwr_info[1].pa_5gl[0];
pi->nphy_pwrctrl_info[0].pwrdet_5gl_b0 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GLW1A0);
+ sprom->core_pwr_info[0].pa_5gl[1];
pi->nphy_pwrctrl_info[1].pwrdet_5gl_b0 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GLW1A1);
+ sprom->core_pwr_info[1].pa_5gl[1];
pi->nphy_pwrctrl_info[0].pwrdet_5gl_b1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GLW2A0);
+ sprom->core_pwr_info[0].pa_5gl[2];
pi->nphy_pwrctrl_info[1].pwrdet_5gl_b1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GLW2A1);
+ sprom->core_pwr_info[1].pa_5gl[2];
pi->nphy_pwrctrl_info[0].idle_targ_5gl = 0;
pi->nphy_pwrctrl_info[1].idle_targ_5gl = 0;
- pi->ofdm5glpo =
- (u32) wlapi_getintvar(shim,
- BRCMS_SROM_OFDM5GLPO);
-
- pi->mcs5glpo[0] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GLPO0);
- pi->mcs5glpo[1] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GLPO1);
- pi->mcs5glpo[2] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GLPO2);
- pi->mcs5glpo[3] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GLPO3);
- pi->mcs5glpo[4] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GLPO4);
- pi->mcs5glpo[5] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GLPO5);
- pi->mcs5glpo[6] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GLPO6);
- pi->mcs5glpo[7] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GLPO7);
+ pi->ofdm5glpo = sprom->ofdm5glpo;
+
+ pi->mcs5glpo[0] = sprom->mcs5glpo[0];
+ pi->mcs5glpo[1] = sprom->mcs5glpo[1];
+ pi->mcs5glpo[2] = sprom->mcs5glpo[2];
+ pi->mcs5glpo[3] = sprom->mcs5glpo[3];
+ pi->mcs5glpo[4] = sprom->mcs5glpo[4];
+ pi->mcs5glpo[5] = sprom->mcs5glpo[5];
+ pi->mcs5glpo[6] = sprom->mcs5glpo[6];
+ pi->mcs5glpo[7] = sprom->mcs5glpo[7];
break;
case 3:
pi->nphy_pwrctrl_info[0].max_pwr_5gh =
- (s8) wlapi_getintvar(shim,
- BRCMS_SROM_MAXP5GHA0);
+ sprom->core_pwr_info[0].maxpwr_5gh;
pi->nphy_pwrctrl_info[1].max_pwr_5gh =
- (s8) wlapi_getintvar(shim,
- BRCMS_SROM_MAXP5GHA1);
+ sprom->core_pwr_info[1].maxpwr_5gh;
pi->nphy_pwrctrl_info[0].pwrdet_5gh_a1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GHW0A0);
+ sprom->core_pwr_info[0].pa_5gh[0];
pi->nphy_pwrctrl_info[1].pwrdet_5gh_a1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GHW0A1);
+ sprom->core_pwr_info[1].pa_5gh[0];
pi->nphy_pwrctrl_info[0].pwrdet_5gh_b0 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GHW1A0);
+ sprom->core_pwr_info[0].pa_5gh[1];
pi->nphy_pwrctrl_info[1].pwrdet_5gh_b0 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GHW1A1);
+ sprom->core_pwr_info[1].pa_5gh[1];
pi->nphy_pwrctrl_info[0].pwrdet_5gh_b1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GHW2A0);
+ sprom->core_pwr_info[0].pa_5gh[2];
pi->nphy_pwrctrl_info[1].pwrdet_5gh_b1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GHW2A1);
+ sprom->core_pwr_info[1].pa_5gh[2];
pi->nphy_pwrctrl_info[0].idle_targ_5gh = 0;
pi->nphy_pwrctrl_info[1].idle_targ_5gh = 0;
- pi->ofdm5ghpo =
- (u32) wlapi_getintvar(shim,
- BRCMS_SROM_OFDM5GHPO);
-
- pi->mcs5ghpo[0] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GHPO0);
- pi->mcs5ghpo[1] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GHPO1);
- pi->mcs5ghpo[2] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GHPO2);
- pi->mcs5ghpo[3] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GHPO3);
- pi->mcs5ghpo[4] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GHPO4);
- pi->mcs5ghpo[5] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GHPO5);
- pi->mcs5ghpo[6] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GHPO6);
- pi->mcs5ghpo[7] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GHPO7);
+ pi->ofdm5ghpo = sprom->ofdm5ghpo;
+
+ pi->mcs5ghpo[0] = sprom->mcs5ghpo[0];
+ pi->mcs5ghpo[1] = sprom->mcs5ghpo[1];
+ pi->mcs5ghpo[2] = sprom->mcs5ghpo[2];
+ pi->mcs5ghpo[3] = sprom->mcs5ghpo[3];
+ pi->mcs5ghpo[4] = sprom->mcs5ghpo[4];
+ pi->mcs5ghpo[5] = sprom->mcs5ghpo[5];
+ pi->mcs5ghpo[6] = sprom->mcs5ghpo[6];
+ pi->mcs5ghpo[7] = sprom->mcs5ghpo[7];
break;
}
}
@@ -14664,45 +14559,34 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
{
- struct phy_shim_info *shim = pi->sh->physhim;
-
- pi->antswitch = (u8) wlapi_getintvar(shim, BRCMS_SROM_ANTSWITCH);
- pi->aa2g = (u8) wlapi_getintvar(shim, BRCMS_SROM_AA2G);
- pi->aa5g = (u8) wlapi_getintvar(shim, BRCMS_SROM_AA5G);
-
- pi->srom_fem2g.tssipos = (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TSSIPOS2G);
- pi->srom_fem2g.extpagain = (u8) wlapi_getintvar(shim,
- BRCMS_SROM_EXTPAGAIN2G);
- pi->srom_fem2g.pdetrange = (u8) wlapi_getintvar(shim,
- BRCMS_SROM_PDETRANGE2G);
- pi->srom_fem2g.triso = (u8) wlapi_getintvar(shim, BRCMS_SROM_TRISO2G);
- pi->srom_fem2g.antswctrllut =
- (u8) wlapi_getintvar(shim, BRCMS_SROM_ANTSWCTL2G);
-
- pi->srom_fem5g.tssipos = (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TSSIPOS5G);
- pi->srom_fem5g.extpagain = (u8) wlapi_getintvar(shim,
- BRCMS_SROM_EXTPAGAIN5G);
- pi->srom_fem5g.pdetrange = (u8) wlapi_getintvar(shim,
- BRCMS_SROM_PDETRANGE5G);
- pi->srom_fem5g.triso = (u8) wlapi_getintvar(shim, BRCMS_SROM_TRISO5G);
- if (wlapi_getvar(shim, BRCMS_SROM_ANTSWCTL5G))
- pi->srom_fem5g.antswctrllut =
- (u8) wlapi_getintvar(shim, BRCMS_SROM_ANTSWCTL5G);
+ struct ssb_sprom *sprom = &pi->d11core->bus->sprom;
+
+ pi->antswitch = sprom->antswitch;
+ pi->aa2g = sprom->ant_available_bg;
+ pi->aa5g = sprom->ant_available_a;
+
+ pi->srom_fem2g.tssipos = sprom->fem.ghz2.tssipos;
+ pi->srom_fem2g.extpagain = sprom->fem.ghz2.extpa_gain;
+ pi->srom_fem2g.pdetrange = sprom->fem.ghz2.pdet_range;
+ pi->srom_fem2g.triso = sprom->fem.ghz2.tr_iso;
+ pi->srom_fem2g.antswctrllut = sprom->fem.ghz2.antswlut;
+
+ pi->srom_fem5g.tssipos = sprom->fem.ghz5.tssipos;
+ pi->srom_fem5g.extpagain = sprom->fem.ghz5.extpa_gain;
+ pi->srom_fem5g.pdetrange = sprom->fem.ghz5.pdet_range;
+ pi->srom_fem5g.triso = sprom->fem.ghz5.tr_iso;
+ if (sprom->fem.ghz5.antswlut)
+ pi->srom_fem5g.antswctrllut = sprom->fem.ghz5.antswlut;
else
- pi->srom_fem5g.antswctrllut =
- (u8) wlapi_getintvar(shim, BRCMS_SROM_ANTSWCTL2G);
+ pi->srom_fem5g.antswctrllut = sprom->fem.ghz2.antswlut;
wlc_phy_txpower_ipa_upd(pi);
- pi->phy_txcore_disable_temp =
- (s16) wlapi_getintvar(shim, BRCMS_SROM_TEMPTHRESH);
+ pi->phy_txcore_disable_temp = sprom->tempthresh;
if (pi->phy_txcore_disable_temp == 0)
pi->phy_txcore_disable_temp = PHY_CHAIN_TX_DISABLE_TEMP;
- pi->phy_tempsense_offset = (s8) wlapi_getintvar(shim,
- BRCMS_SROM_TEMPOFFSET);
+ pi->phy_tempsense_offset = sprom->tempoffset;
if (pi->phy_tempsense_offset != 0) {
if (pi->phy_tempsense_offset >
(NPHY_SROM_TEMPSHIFT + NPHY_SROM_MAXTEMPOFFSET))
@@ -14717,8 +14601,7 @@ static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
pi->phy_txcore_enable_temp =
pi->phy_txcore_disable_temp - PHY_HYSTERESIS_DELTATEMP;
- pi->phycal_tempdelta =
- (u8) wlapi_getintvar(shim, BRCMS_SROM_PHYCAL_TEMPDELTA);
+ pi->phycal_tempdelta = sprom->phycal_tempdelta;
if (pi->phycal_tempdelta > NPHY_CAL_MAXTEMPDELTA)
pi->phycal_tempdelta = 0;
@@ -21460,7 +21343,7 @@ void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init)
write_phy_reg(pi, 0xc8, 0x0);
write_phy_reg(pi, 0xc9, 0x0);
- ai_gpiocontrol(pi->sh->sih, mask, mask, GPIO_DRV_PRIORITY);
+ bcma_chipco_gpio_control(&pi->d11core->bus->drv_cc, mask, mask);
mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
mc &= ~MCTL_GPOUT_SEL_MASK;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.c
index 5926854f62e2..a0de5db0cd64 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.c
@@ -214,12 +214,3 @@ wlapi_copyto_objmem(struct phy_shim_info *physhim, uint offset, const void *buf,
{
brcms_b_copyto_objmem(physhim->wlc_hw, offset, buf, l, sel);
}
-
-char *wlapi_getvar(struct phy_shim_info *physhim, enum brcms_srom_id id)
-{
- return getvar(physhim->wlc_hw->sih, id);
-}
-int wlapi_getintvar(struct phy_shim_info *physhim, enum brcms_srom_id id)
-{
- return getintvar(physhim->wlc_hw->sih, id);
-}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
index 9168c459b185..2c5b66b75970 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
@@ -175,8 +175,5 @@ extern void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint,
extern void wlapi_high_update_phy_mode(struct phy_shim_info *physhim,
u32 phy_mode);
extern u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
-extern char *wlapi_getvar(struct phy_shim_info *physhim, enum brcms_srom_id id);
-extern int wlapi_getintvar(struct phy_shim_info *physhim,
- enum brcms_srom_id id);
#endif /* _BRCM_PHY_SHIM_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index f0038ad7d7bf..aa5d67f8d874 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -22,232 +22,6 @@
#include "types.h"
#include "defs.h"
-enum brcms_srom_id {
- BRCMS_SROM_NULL,
- BRCMS_SROM_CONT,
- BRCMS_SROM_AA2G,
- BRCMS_SROM_AA5G,
- BRCMS_SROM_AG0,
- BRCMS_SROM_AG1,
- BRCMS_SROM_AG2,
- BRCMS_SROM_AG3,
- BRCMS_SROM_ANTSWCTL2G,
- BRCMS_SROM_ANTSWCTL5G,
- BRCMS_SROM_ANTSWITCH,
- BRCMS_SROM_BOARDFLAGS2,
- BRCMS_SROM_BOARDFLAGS,
- BRCMS_SROM_BOARDNUM,
- BRCMS_SROM_BOARDREV,
- BRCMS_SROM_BOARDTYPE,
- BRCMS_SROM_BW40PO,
- BRCMS_SROM_BWDUPPO,
- BRCMS_SROM_BXA2G,
- BRCMS_SROM_BXA5G,
- BRCMS_SROM_CC,
- BRCMS_SROM_CCK2GPO,
- BRCMS_SROM_CCKBW202GPO,
- BRCMS_SROM_CCKBW20UL2GPO,
- BRCMS_SROM_CCODE,
- BRCMS_SROM_CDDPO,
- BRCMS_SROM_DEVID,
- BRCMS_SROM_ET1MACADDR,
- BRCMS_SROM_EXTPAGAIN2G,
- BRCMS_SROM_EXTPAGAIN5G,
- BRCMS_SROM_FREQOFFSET_CORR,
- BRCMS_SROM_HW_IQCAL_EN,
- BRCMS_SROM_IL0MACADDR,
- BRCMS_SROM_IQCAL_SWP_DIS,
- BRCMS_SROM_LEDBH0,
- BRCMS_SROM_LEDBH1,
- BRCMS_SROM_LEDBH2,
- BRCMS_SROM_LEDBH3,
- BRCMS_SROM_LEDDC,
- BRCMS_SROM_LEGOFDM40DUPPO,
- BRCMS_SROM_LEGOFDMBW202GPO,
- BRCMS_SROM_LEGOFDMBW205GHPO,
- BRCMS_SROM_LEGOFDMBW205GLPO,
- BRCMS_SROM_LEGOFDMBW205GMPO,
- BRCMS_SROM_LEGOFDMBW20UL2GPO,
- BRCMS_SROM_LEGOFDMBW20UL5GHPO,
- BRCMS_SROM_LEGOFDMBW20UL5GLPO,
- BRCMS_SROM_LEGOFDMBW20UL5GMPO,
- BRCMS_SROM_MACADDR,
- BRCMS_SROM_MCS2GPO0,
- BRCMS_SROM_MCS2GPO1,
- BRCMS_SROM_MCS2GPO2,
- BRCMS_SROM_MCS2GPO3,
- BRCMS_SROM_MCS2GPO4,
- BRCMS_SROM_MCS2GPO5,
- BRCMS_SROM_MCS2GPO6,
- BRCMS_SROM_MCS2GPO7,
- BRCMS_SROM_MCS32PO,
- BRCMS_SROM_MCS5GHPO0,
- BRCMS_SROM_MCS5GHPO1,
- BRCMS_SROM_MCS5GHPO2,
- BRCMS_SROM_MCS5GHPO3,
- BRCMS_SROM_MCS5GHPO4,
- BRCMS_SROM_MCS5GHPO5,
- BRCMS_SROM_MCS5GHPO6,
- BRCMS_SROM_MCS5GHPO7,
- BRCMS_SROM_MCS5GLPO0,
- BRCMS_SROM_MCS5GLPO1,
- BRCMS_SROM_MCS5GLPO2,
- BRCMS_SROM_MCS5GLPO3,
- BRCMS_SROM_MCS5GLPO4,
- BRCMS_SROM_MCS5GLPO5,
- BRCMS_SROM_MCS5GLPO6,
- BRCMS_SROM_MCS5GLPO7,
- BRCMS_SROM_MCS5GPO0,
- BRCMS_SROM_MCS5GPO1,
- BRCMS_SROM_MCS5GPO2,
- BRCMS_SROM_MCS5GPO3,
- BRCMS_SROM_MCS5GPO4,
- BRCMS_SROM_MCS5GPO5,
- BRCMS_SROM_MCS5GPO6,
- BRCMS_SROM_MCS5GPO7,
- BRCMS_SROM_MCSBW202GPO,
- BRCMS_SROM_MCSBW205GHPO,
- BRCMS_SROM_MCSBW205GLPO,
- BRCMS_SROM_MCSBW205GMPO,
- BRCMS_SROM_MCSBW20UL2GPO,
- BRCMS_SROM_MCSBW20UL5GHPO,
- BRCMS_SROM_MCSBW20UL5GLPO,
- BRCMS_SROM_MCSBW20UL5GMPO,
- BRCMS_SROM_MCSBW402GPO,
- BRCMS_SROM_MCSBW405GHPO,
- BRCMS_SROM_MCSBW405GLPO,
- BRCMS_SROM_MCSBW405GMPO,
- BRCMS_SROM_MEASPOWER,
- BRCMS_SROM_OFDM2GPO,
- BRCMS_SROM_OFDM5GHPO,
- BRCMS_SROM_OFDM5GLPO,
- BRCMS_SROM_OFDM5GPO,
- BRCMS_SROM_OPO,
- BRCMS_SROM_PA0B0,
- BRCMS_SROM_PA0B1,
- BRCMS_SROM_PA0B2,
- BRCMS_SROM_PA0ITSSIT,
- BRCMS_SROM_PA0MAXPWR,
- BRCMS_SROM_PA1B0,
- BRCMS_SROM_PA1B1,
- BRCMS_SROM_PA1B2,
- BRCMS_SROM_PA1HIB0,
- BRCMS_SROM_PA1HIB1,
- BRCMS_SROM_PA1HIB2,
- BRCMS_SROM_PA1HIMAXPWR,
- BRCMS_SROM_PA1ITSSIT,
- BRCMS_SROM_PA1LOB0,
- BRCMS_SROM_PA1LOB1,
- BRCMS_SROM_PA1LOB2,
- BRCMS_SROM_PA1LOMAXPWR,
- BRCMS_SROM_PA1MAXPWR,
- BRCMS_SROM_PDETRANGE2G,
- BRCMS_SROM_PDETRANGE5G,
- BRCMS_SROM_PHYCAL_TEMPDELTA,
- BRCMS_SROM_RAWTEMPSENSE,
- BRCMS_SROM_REGREV,
- BRCMS_SROM_REV,
- BRCMS_SROM_RSSISAV2G,
- BRCMS_SROM_RSSISAV5G,
- BRCMS_SROM_RSSISMC2G,
- BRCMS_SROM_RSSISMC5G,
- BRCMS_SROM_RSSISMF2G,
- BRCMS_SROM_RSSISMF5G,
- BRCMS_SROM_RXCHAIN,
- BRCMS_SROM_RXPO2G,
- BRCMS_SROM_RXPO5G,
- BRCMS_SROM_STBCPO,
- BRCMS_SROM_TEMPCORRX,
- BRCMS_SROM_TEMPOFFSET,
- BRCMS_SROM_TEMPSENSE_OPTION,
- BRCMS_SROM_TEMPSENSE_SLOPE,
- BRCMS_SROM_TEMPTHRESH,
- BRCMS_SROM_TRI2G,
- BRCMS_SROM_TRI5GH,
- BRCMS_SROM_TRI5GL,
- BRCMS_SROM_TRI5G,
- BRCMS_SROM_TRISO2G,
- BRCMS_SROM_TRISO5G,
- BRCMS_SROM_TSSIPOS2G,
- BRCMS_SROM_TSSIPOS5G,
- BRCMS_SROM_TXCHAIN,
- /*
- * per-path identifiers (see srom.c)
- */
- BRCMS_SROM_ITT2GA0,
- BRCMS_SROM_ITT2GA1,
- BRCMS_SROM_ITT2GA2,
- BRCMS_SROM_ITT2GA3,
- BRCMS_SROM_ITT5GA0,
- BRCMS_SROM_ITT5GA1,
- BRCMS_SROM_ITT5GA2,
- BRCMS_SROM_ITT5GA3,
- BRCMS_SROM_MAXP2GA0,
- BRCMS_SROM_MAXP2GA1,
- BRCMS_SROM_MAXP2GA2,
- BRCMS_SROM_MAXP2GA3,
- BRCMS_SROM_MAXP5GA0,
- BRCMS_SROM_MAXP5GA1,
- BRCMS_SROM_MAXP5GA2,
- BRCMS_SROM_MAXP5GA3,
- BRCMS_SROM_MAXP5GHA0,
- BRCMS_SROM_MAXP5GHA1,
- BRCMS_SROM_MAXP5GHA2,
- BRCMS_SROM_MAXP5GHA3,
- BRCMS_SROM_MAXP5GLA0,
- BRCMS_SROM_MAXP5GLA1,
- BRCMS_SROM_MAXP5GLA2,
- BRCMS_SROM_MAXP5GLA3,
- BRCMS_SROM_PA2GW0A0,
- BRCMS_SROM_PA2GW0A1,
- BRCMS_SROM_PA2GW0A2,
- BRCMS_SROM_PA2GW0A3,
- BRCMS_SROM_PA2GW1A0,
- BRCMS_SROM_PA2GW1A1,
- BRCMS_SROM_PA2GW1A2,
- BRCMS_SROM_PA2GW1A3,
- BRCMS_SROM_PA2GW2A0,
- BRCMS_SROM_PA2GW2A1,
- BRCMS_SROM_PA2GW2A2,
- BRCMS_SROM_PA2GW2A3,
- BRCMS_SROM_PA5GHW0A0,
- BRCMS_SROM_PA5GHW0A1,
- BRCMS_SROM_PA5GHW0A2,
- BRCMS_SROM_PA5GHW0A3,
- BRCMS_SROM_PA5GHW1A0,
- BRCMS_SROM_PA5GHW1A1,
- BRCMS_SROM_PA5GHW1A2,
- BRCMS_SROM_PA5GHW1A3,
- BRCMS_SROM_PA5GHW2A0,
- BRCMS_SROM_PA5GHW2A1,
- BRCMS_SROM_PA5GHW2A2,
- BRCMS_SROM_PA5GHW2A3,
- BRCMS_SROM_PA5GLW0A0,
- BRCMS_SROM_PA5GLW0A1,
- BRCMS_SROM_PA5GLW0A2,
- BRCMS_SROM_PA5GLW0A3,
- BRCMS_SROM_PA5GLW1A0,
- BRCMS_SROM_PA5GLW1A1,
- BRCMS_SROM_PA5GLW1A2,
- BRCMS_SROM_PA5GLW1A3,
- BRCMS_SROM_PA5GLW2A0,
- BRCMS_SROM_PA5GLW2A1,
- BRCMS_SROM_PA5GLW2A2,
- BRCMS_SROM_PA5GLW2A3,
- BRCMS_SROM_PA5GW0A0,
- BRCMS_SROM_PA5GW0A1,
- BRCMS_SROM_PA5GW0A2,
- BRCMS_SROM_PA5GW0A3,
- BRCMS_SROM_PA5GW1A0,
- BRCMS_SROM_PA5GW1A1,
- BRCMS_SROM_PA5GW1A2,
- BRCMS_SROM_PA5GW1A3,
- BRCMS_SROM_PA5GW2A0,
- BRCMS_SROM_PA5GW2A1,
- BRCMS_SROM_PA5GW2A2,
- BRCMS_SROM_PA5GW2A3,
-};
-
#define BRCMS_NUMRATES 16 /* max # of rates in a rateset */
/* phy types */
@@ -565,8 +339,6 @@ extern void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
struct ieee80211_sta *sta, u16 tid);
extern void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
u8 ba_wsize, uint max_rx_ampdu_bytes);
-extern char *getvar(struct si_pub *sih, enum brcms_srom_id id);
-extern int getintvar(struct si_pub *sih, enum brcms_srom_id id);
extern int brcms_c_module_register(struct brcms_pub *pub,
const char *name, struct brcms_info *hdl,
int (*down_fn)(void *handle));
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.c b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
deleted file mode 100644
index b96f4b9d74bd..000000000000
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.c
+++ /dev/null
@@ -1,980 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/io.h>
-#include <linux/etherdevice.h>
-#include <linux/crc8.h>
-#include <stdarg.h>
-
-#include <chipcommon.h>
-#include <brcmu_utils.h>
-#include "pub.h"
-#include "nicpci.h"
-#include "aiutils.h"
-#include "otp.h"
-#include "srom.h"
-#include "soc.h"
-
-/*
- * SROM CRC8 polynomial value:
- *
- * x^8 + x^7 +x^6 + x^4 + x^2 + 1
- */
-#define SROM_CRC8_POLY 0xAB
-
-/* Maximum srom: 6 Kilobits == 768 bytes */
-#define SROM_MAX 768
-
-/* PCI fields */
-#define PCI_F0DEVID 48
-
-#define SROM_WORDS 64
-
-#define SROM_SSID 2
-
-#define SROM_WL1LHMAXP 29
-
-#define SROM_WL1LPAB0 30
-#define SROM_WL1LPAB1 31
-#define SROM_WL1LPAB2 32
-
-#define SROM_WL1HPAB0 33
-#define SROM_WL1HPAB1 34
-#define SROM_WL1HPAB2 35
-
-#define SROM_MACHI_IL0 36
-#define SROM_MACMID_IL0 37
-#define SROM_MACLO_IL0 38
-#define SROM_MACHI_ET1 42
-#define SROM_MACMID_ET1 43
-#define SROM_MACLO_ET1 44
-
-#define SROM_BXARSSI2G 40
-#define SROM_BXARSSI5G 41
-
-#define SROM_TRI52G 42
-#define SROM_TRI5GHL 43
-
-#define SROM_RXPO52G 45
-
-#define SROM_AABREV 46
-/* Fields in AABREV */
-#define SROM_BR_MASK 0x00ff
-#define SROM_CC_MASK 0x0f00
-#define SROM_CC_SHIFT 8
-#define SROM_AA0_MASK 0x3000
-#define SROM_AA0_SHIFT 12
-#define SROM_AA1_MASK 0xc000
-#define SROM_AA1_SHIFT 14
-
-#define SROM_WL0PAB0 47
-#define SROM_WL0PAB1 48
-#define SROM_WL0PAB2 49
-
-#define SROM_LEDBH10 50
-#define SROM_LEDBH32 51
-
-#define SROM_WL10MAXP 52
-
-#define SROM_WL1PAB0 53
-#define SROM_WL1PAB1 54
-#define SROM_WL1PAB2 55
-
-#define SROM_ITT 56
-
-#define SROM_BFL 57
-#define SROM_BFL2 28
-
-#define SROM_AG10 58
-
-#define SROM_CCODE 59
-
-#define SROM_OPO 60
-
-#define SROM_CRCREV 63
-
-#define SROM4_WORDS 220
-
-#define SROM4_TXCHAIN_MASK 0x000f
-#define SROM4_RXCHAIN_MASK 0x00f0
-#define SROM4_SWITCH_MASK 0xff00
-
-/* Per-path fields */
-#define MAX_PATH_SROM 4
-
-#define SROM4_CRCREV 219
-
-/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6.
- * This is acombined srom for both MIMO and SISO boards, usable in
- * the .130 4Kilobit OTP with hardware redundancy.
- */
-#define SROM8_BREV 65
-
-#define SROM8_BFL0 66
-#define SROM8_BFL1 67
-#define SROM8_BFL2 68
-#define SROM8_BFL3 69
-
-#define SROM8_MACHI 70
-#define SROM8_MACMID 71
-#define SROM8_MACLO 72
-
-#define SROM8_CCODE 73
-#define SROM8_REGREV 74
-
-#define SROM8_LEDBH10 75
-#define SROM8_LEDBH32 76
-
-#define SROM8_LEDDC 77
-
-#define SROM8_AA 78
-
-#define SROM8_AG10 79
-#define SROM8_AG32 80
-
-#define SROM8_TXRXC 81
-
-#define SROM8_BXARSSI2G 82
-#define SROM8_BXARSSI5G 83
-#define SROM8_TRI52G 84
-#define SROM8_TRI5GHL 85
-#define SROM8_RXPO52G 86
-
-#define SROM8_FEM2G 87
-#define SROM8_FEM5G 88
-#define SROM8_FEM_ANTSWLUT_MASK 0xf800
-#define SROM8_FEM_ANTSWLUT_SHIFT 11
-#define SROM8_FEM_TR_ISO_MASK 0x0700
-#define SROM8_FEM_TR_ISO_SHIFT 8
-#define SROM8_FEM_PDET_RANGE_MASK 0x00f8
-#define SROM8_FEM_PDET_RANGE_SHIFT 3
-#define SROM8_FEM_EXTPA_GAIN_MASK 0x0006
-#define SROM8_FEM_EXTPA_GAIN_SHIFT 1
-#define SROM8_FEM_TSSIPOS_MASK 0x0001
-#define SROM8_FEM_TSSIPOS_SHIFT 0
-
-#define SROM8_THERMAL 89
-
-/* Temp sense related entries */
-#define SROM8_MPWR_RAWTS 90
-#define SROM8_TS_SLP_OPT_CORRX 91
-/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable,
- * IQSWP: IQ CAL swap disable */
-#define SROM8_FOC_HWIQ_IQSWP 92
-
-/* Temperature delta for PHY calibration */
-#define SROM8_PHYCAL_TEMPDELTA 93
-
-/* Per-path offsets & fields */
-#define SROM8_PATH0 96
-#define SROM8_PATH1 112
-#define SROM8_PATH2 128
-#define SROM8_PATH3 144
-
-#define SROM8_2G_ITT_MAXP 0
-#define SROM8_2G_PA 1
-#define SROM8_5G_ITT_MAXP 4
-#define SROM8_5GLH_MAXP 5
-#define SROM8_5G_PA 6
-#define SROM8_5GL_PA 9
-#define SROM8_5GH_PA 12
-
-/* All the miriad power offsets */
-#define SROM8_2G_CCKPO 160
-
-#define SROM8_2G_OFDMPO 161
-#define SROM8_5G_OFDMPO 163
-#define SROM8_5GL_OFDMPO 165
-#define SROM8_5GH_OFDMPO 167
-
-#define SROM8_2G_MCSPO 169
-#define SROM8_5G_MCSPO 177
-#define SROM8_5GL_MCSPO 185
-#define SROM8_5GH_MCSPO 193
-
-#define SROM8_CDDPO 201
-#define SROM8_STBCPO 202
-#define SROM8_BW40PO 203
-#define SROM8_BWDUPPO 204
-
-/* SISO PA parameters are in the path0 spaces */
-#define SROM8_SISO 96
-
-/* Legacy names for SISO PA paramters */
-#define SROM8_W0_ITTMAXP (SROM8_SISO + SROM8_2G_ITT_MAXP)
-#define SROM8_W0_PAB0 (SROM8_SISO + SROM8_2G_PA)
-#define SROM8_W0_PAB1 (SROM8_SISO + SROM8_2G_PA + 1)
-#define SROM8_W0_PAB2 (SROM8_SISO + SROM8_2G_PA + 2)
-#define SROM8_W1_ITTMAXP (SROM8_SISO + SROM8_5G_ITT_MAXP)
-#define SROM8_W1_MAXP_LCHC (SROM8_SISO + SROM8_5GLH_MAXP)
-#define SROM8_W1_PAB0 (SROM8_SISO + SROM8_5G_PA)
-#define SROM8_W1_PAB1 (SROM8_SISO + SROM8_5G_PA + 1)
-#define SROM8_W1_PAB2 (SROM8_SISO + SROM8_5G_PA + 2)
-#define SROM8_W1_PAB0_LC (SROM8_SISO + SROM8_5GL_PA)
-#define SROM8_W1_PAB1_LC (SROM8_SISO + SROM8_5GL_PA + 1)
-#define SROM8_W1_PAB2_LC (SROM8_SISO + SROM8_5GL_PA + 2)
-#define SROM8_W1_PAB0_HC (SROM8_SISO + SROM8_5GH_PA)
-#define SROM8_W1_PAB1_HC (SROM8_SISO + SROM8_5GH_PA + 1)
-#define SROM8_W1_PAB2_HC (SROM8_SISO + SROM8_5GH_PA + 2)
-
-/* SROM REV 9 */
-#define SROM9_2GPO_CCKBW20 160
-#define SROM9_2GPO_CCKBW20UL 161
-#define SROM9_2GPO_LOFDMBW20 162
-#define SROM9_2GPO_LOFDMBW20UL 164
-
-#define SROM9_5GLPO_LOFDMBW20 166
-#define SROM9_5GLPO_LOFDMBW20UL 168
-#define SROM9_5GMPO_LOFDMBW20 170
-#define SROM9_5GMPO_LOFDMBW20UL 172
-#define SROM9_5GHPO_LOFDMBW20 174
-#define SROM9_5GHPO_LOFDMBW20UL 176
-
-#define SROM9_2GPO_MCSBW20 178
-#define SROM9_2GPO_MCSBW20UL 180
-#define SROM9_2GPO_MCSBW40 182
-
-#define SROM9_5GLPO_MCSBW20 184
-#define SROM9_5GLPO_MCSBW20UL 186
-#define SROM9_5GLPO_MCSBW40 188
-#define SROM9_5GMPO_MCSBW20 190
-#define SROM9_5GMPO_MCSBW20UL 192
-#define SROM9_5GMPO_MCSBW40 194
-#define SROM9_5GHPO_MCSBW20 196
-#define SROM9_5GHPO_MCSBW20UL 198
-#define SROM9_5GHPO_MCSBW40 200
-
-#define SROM9_PO_MCS32 202
-#define SROM9_PO_LOFDM40DUP 203
-
-/* SROM flags (see sromvar_t) */
-
-/* value continues as described by the next entry */
-#define SRFL_MORE 1
-#define SRFL_NOFFS 2 /* value bits can't be all one's */
-#define SRFL_PRHEX 4 /* value is in hexdecimal format */
-#define SRFL_PRSIGN 8 /* value is in signed decimal format */
-#define SRFL_CCODE 0x10 /* value is in country code format */
-#define SRFL_ETHADDR 0x20 /* value is an Ethernet address */
-#define SRFL_LEDDC 0x40 /* value is an LED duty cycle */
-/* do not generate a nvram param, entry is for mfgc */
-#define SRFL_NOVAR 0x80
-
-/* Max. nvram variable table size */
-#define MAXSZ_NVRAM_VARS 4096
-
-/*
- * indicates type of value.
- */
-enum brcms_srom_var_type {
- BRCMS_SROM_STRING,
- BRCMS_SROM_SNUMBER,
- BRCMS_SROM_UNUMBER
-};
-
-/*
- * storage type for srom variable.
- *
- * var_list: for linked list operations.
- * varid: identifier of the variable.
- * var_type: type of variable.
- * buf: variable value when var_type == BRCMS_SROM_STRING.
- * uval: unsigned variable value when var_type == BRCMS_SROM_UNUMBER.
- * sval: signed variable value when var_type == BRCMS_SROM_SNUMBER.
- */
-struct brcms_srom_list_head {
- struct list_head var_list;
- enum brcms_srom_id varid;
- enum brcms_srom_var_type var_type;
- union {
- char buf[0];
- u32 uval;
- s32 sval;
- };
-};
-
-struct brcms_sromvar {
- enum brcms_srom_id varid;
- u32 revmask;
- u32 flags;
- u16 off;
- u16 mask;
-};
-
-struct brcms_varbuf {
- char *base; /* pointer to buffer base */
- char *buf; /* pointer to current position */
- unsigned int size; /* current (residual) size in bytes */
-};
-
-/*
- * Assumptions:
- * - Ethernet address spans across 3 consecutive words
- *
- * Table rules:
- * - Add multiple entries next to each other if a value spans across multiple
- * words (even multiple fields in the same word) with each entry except the
- * last having it's SRFL_MORE bit set.
- * - Ethernet address entry does not follow above rule and must not have
- * SRFL_MORE bit set. Its SRFL_ETHADDR bit implies it takes multiple words.
- * - The last entry's name field must be NULL to indicate the end of the table.
- * Other entries must have non-NULL name.
- */
-static const struct brcms_sromvar pci_sromvars[] = {
- {BRCMS_SROM_DEVID, 0xffffff00, SRFL_PRHEX | SRFL_NOVAR, PCI_F0DEVID,
- 0xffff},
- {BRCMS_SROM_BOARDREV, 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL0,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM8_BFL1, 0xffff},
- {BRCMS_SROM_BOARDFLAGS2, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL2,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM8_BFL3, 0xffff},
- {BRCMS_SROM_BOARDTYPE, 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff},
- {BRCMS_SROM_BOARDNUM, 0xffffff00, 0, SROM8_MACLO, 0xffff},
- {BRCMS_SROM_REGREV, 0xffffff00, 0, SROM8_REGREV, 0x00ff},
- {BRCMS_SROM_LEDBH0, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff},
- {BRCMS_SROM_LEDBH1, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0xff00},
- {BRCMS_SROM_LEDBH2, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff},
- {BRCMS_SROM_LEDBH3, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0xff00},
- {BRCMS_SROM_PA0B0, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff},
- {BRCMS_SROM_PA0B1, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff},
- {BRCMS_SROM_PA0B2, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff},
- {BRCMS_SROM_PA0ITSSIT, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0xff00},
- {BRCMS_SROM_PA0MAXPWR, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0x00ff},
- {BRCMS_SROM_OPO, 0xffffff00, 0, SROM8_2G_OFDMPO, 0x00ff},
- {BRCMS_SROM_AA2G, 0xffffff00, 0, SROM8_AA, 0x00ff},
- {BRCMS_SROM_AA5G, 0xffffff00, 0, SROM8_AA, 0xff00},
- {BRCMS_SROM_AG0, 0xffffff00, 0, SROM8_AG10, 0x00ff},
- {BRCMS_SROM_AG1, 0xffffff00, 0, SROM8_AG10, 0xff00},
- {BRCMS_SROM_AG2, 0xffffff00, 0, SROM8_AG32, 0x00ff},
- {BRCMS_SROM_AG3, 0xffffff00, 0, SROM8_AG32, 0xff00},
- {BRCMS_SROM_PA1B0, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff},
- {BRCMS_SROM_PA1B1, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff},
- {BRCMS_SROM_PA1B2, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff},
- {BRCMS_SROM_PA1LOB0, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0_LC, 0xffff},
- {BRCMS_SROM_PA1LOB1, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1_LC, 0xffff},
- {BRCMS_SROM_PA1LOB2, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2_LC, 0xffff},
- {BRCMS_SROM_PA1HIB0, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0_HC, 0xffff},
- {BRCMS_SROM_PA1HIB1, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1_HC, 0xffff},
- {BRCMS_SROM_PA1HIB2, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2_HC, 0xffff},
- {BRCMS_SROM_PA1ITSSIT, 0xffffff00, 0, SROM8_W1_ITTMAXP, 0xff00},
- {BRCMS_SROM_PA1MAXPWR, 0xffffff00, 0, SROM8_W1_ITTMAXP, 0x00ff},
- {BRCMS_SROM_PA1LOMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0xff00},
- {BRCMS_SROM_PA1HIMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0x00ff},
- {BRCMS_SROM_BXA2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x1800},
- {BRCMS_SROM_RSSISAV2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x0700},
- {BRCMS_SROM_RSSISMC2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x00f0},
- {BRCMS_SROM_RSSISMF2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x000f},
- {BRCMS_SROM_BXA5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x1800},
- {BRCMS_SROM_RSSISAV5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x0700},
- {BRCMS_SROM_RSSISMC5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x00f0},
- {BRCMS_SROM_RSSISMF5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x000f},
- {BRCMS_SROM_TRI2G, 0xffffff00, 0, SROM8_TRI52G, 0x00ff},
- {BRCMS_SROM_TRI5G, 0xffffff00, 0, SROM8_TRI52G, 0xff00},
- {BRCMS_SROM_TRI5GL, 0xffffff00, 0, SROM8_TRI5GHL, 0x00ff},
- {BRCMS_SROM_TRI5GH, 0xffffff00, 0, SROM8_TRI5GHL, 0xff00},
- {BRCMS_SROM_RXPO2G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff},
- {BRCMS_SROM_RXPO5G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00},
- {BRCMS_SROM_TXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
- SROM4_TXCHAIN_MASK},
- {BRCMS_SROM_RXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
- SROM4_RXCHAIN_MASK},
- {BRCMS_SROM_ANTSWITCH, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
- SROM4_SWITCH_MASK},
- {BRCMS_SROM_TSSIPOS2G, 0xffffff00, 0, SROM8_FEM2G,
- SROM8_FEM_TSSIPOS_MASK},
- {BRCMS_SROM_EXTPAGAIN2G, 0xffffff00, 0, SROM8_FEM2G,
- SROM8_FEM_EXTPA_GAIN_MASK},
- {BRCMS_SROM_PDETRANGE2G, 0xffffff00, 0, SROM8_FEM2G,
- SROM8_FEM_PDET_RANGE_MASK},
- {BRCMS_SROM_TRISO2G, 0xffffff00, 0, SROM8_FEM2G, SROM8_FEM_TR_ISO_MASK},
- {BRCMS_SROM_ANTSWCTL2G, 0xffffff00, 0, SROM8_FEM2G,
- SROM8_FEM_ANTSWLUT_MASK},
- {BRCMS_SROM_TSSIPOS5G, 0xffffff00, 0, SROM8_FEM5G,
- SROM8_FEM_TSSIPOS_MASK},
- {BRCMS_SROM_EXTPAGAIN5G, 0xffffff00, 0, SROM8_FEM5G,
- SROM8_FEM_EXTPA_GAIN_MASK},
- {BRCMS_SROM_PDETRANGE5G, 0xffffff00, 0, SROM8_FEM5G,
- SROM8_FEM_PDET_RANGE_MASK},
- {BRCMS_SROM_TRISO5G, 0xffffff00, 0, SROM8_FEM5G, SROM8_FEM_TR_ISO_MASK},
- {BRCMS_SROM_ANTSWCTL5G, 0xffffff00, 0, SROM8_FEM5G,
- SROM8_FEM_ANTSWLUT_MASK},
- {BRCMS_SROM_TEMPTHRESH, 0xffffff00, 0, SROM8_THERMAL, 0xff00},
- {BRCMS_SROM_TEMPOFFSET, 0xffffff00, 0, SROM8_THERMAL, 0x00ff},
-
- {BRCMS_SROM_CCODE, 0xffffff00, SRFL_CCODE, SROM8_CCODE, 0xffff},
- {BRCMS_SROM_MACADDR, 0xffffff00, SRFL_ETHADDR, SROM8_MACHI, 0xffff},
- {BRCMS_SROM_LEDDC, 0xffffff00, SRFL_NOFFS | SRFL_LEDDC, SROM8_LEDDC,
- 0xffff},
- {BRCMS_SROM_RAWTEMPSENSE, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS,
- 0x01ff},
- {BRCMS_SROM_MEASPOWER, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS,
- 0xfe00},
- {BRCMS_SROM_TEMPSENSE_SLOPE, 0xffffff00, SRFL_PRHEX,
- SROM8_TS_SLP_OPT_CORRX, 0x00ff},
- {BRCMS_SROM_TEMPCORRX, 0xffffff00, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX,
- 0xfc00},
- {BRCMS_SROM_TEMPSENSE_OPTION, 0xffffff00, SRFL_PRHEX,
- SROM8_TS_SLP_OPT_CORRX, 0x0300},
- {BRCMS_SROM_FREQOFFSET_CORR, 0xffffff00, SRFL_PRHEX,
- SROM8_FOC_HWIQ_IQSWP, 0x000f},
- {BRCMS_SROM_IQCAL_SWP_DIS, 0xffffff00, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP,
- 0x0010},
- {BRCMS_SROM_HW_IQCAL_EN, 0xffffff00, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP,
- 0x0020},
- {BRCMS_SROM_PHYCAL_TEMPDELTA, 0xffffff00, 0, SROM8_PHYCAL_TEMPDELTA,
- 0x00ff},
-
- {BRCMS_SROM_CCK2GPO, 0x00000100, 0, SROM8_2G_CCKPO, 0xffff},
- {BRCMS_SROM_OFDM2GPO, 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM8_2G_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_OFDM5GPO, 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM8_5G_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_OFDM5GLPO, 0x00000100, SRFL_MORE, SROM8_5GL_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_OFDM5GHPO, 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_MCS2GPO0, 0x00000100, 0, SROM8_2G_MCSPO, 0xffff},
- {BRCMS_SROM_MCS2GPO1, 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS2GPO2, 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS2GPO3, 0x00000100, 0, SROM8_2G_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS2GPO4, 0x00000100, 0, SROM8_2G_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS2GPO5, 0x00000100, 0, SROM8_2G_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS2GPO6, 0x00000100, 0, SROM8_2G_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS2GPO7, 0x00000100, 0, SROM8_2G_MCSPO + 7, 0xffff},
- {BRCMS_SROM_MCS5GPO0, 0x00000100, 0, SROM8_5G_MCSPO, 0xffff},
- {BRCMS_SROM_MCS5GPO1, 0x00000100, 0, SROM8_5G_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS5GPO2, 0x00000100, 0, SROM8_5G_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS5GPO3, 0x00000100, 0, SROM8_5G_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS5GPO4, 0x00000100, 0, SROM8_5G_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS5GPO5, 0x00000100, 0, SROM8_5G_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS5GPO6, 0x00000100, 0, SROM8_5G_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS5GPO7, 0x00000100, 0, SROM8_5G_MCSPO + 7, 0xffff},
- {BRCMS_SROM_MCS5GLPO0, 0x00000100, 0, SROM8_5GL_MCSPO, 0xffff},
- {BRCMS_SROM_MCS5GLPO1, 0x00000100, 0, SROM8_5GL_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS5GLPO2, 0x00000100, 0, SROM8_5GL_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS5GLPO3, 0x00000100, 0, SROM8_5GL_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS5GLPO4, 0x00000100, 0, SROM8_5GL_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS5GLPO5, 0x00000100, 0, SROM8_5GL_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS5GLPO6, 0x00000100, 0, SROM8_5GL_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS5GLPO7, 0x00000100, 0, SROM8_5GL_MCSPO + 7, 0xffff},
- {BRCMS_SROM_MCS5GHPO0, 0x00000100, 0, SROM8_5GH_MCSPO, 0xffff},
- {BRCMS_SROM_MCS5GHPO1, 0x00000100, 0, SROM8_5GH_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS5GHPO2, 0x00000100, 0, SROM8_5GH_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS5GHPO3, 0x00000100, 0, SROM8_5GH_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS5GHPO4, 0x00000100, 0, SROM8_5GH_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS5GHPO5, 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS5GHPO6, 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS5GHPO7, 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff},
- {BRCMS_SROM_CDDPO, 0x00000100, 0, SROM8_CDDPO, 0xffff},
- {BRCMS_SROM_STBCPO, 0x00000100, 0, SROM8_STBCPO, 0xffff},
- {BRCMS_SROM_BW40PO, 0x00000100, 0, SROM8_BW40PO, 0xffff},
- {BRCMS_SROM_BWDUPPO, 0x00000100, 0, SROM8_BWDUPPO, 0xffff},
-
- /* power per rate from sromrev 9 */
- {BRCMS_SROM_CCKBW202GPO, 0xfffffe00, 0, SROM9_2GPO_CCKBW20, 0xffff},
- {BRCMS_SROM_CCKBW20UL2GPO, 0xfffffe00, 0, SROM9_2GPO_CCKBW20UL, 0xffff},
- {BRCMS_SROM_LEGOFDMBW202GPO, 0xfffffe00, SRFL_MORE,
- SROM9_2GPO_LOFDMBW20, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_2GPO_LOFDMBW20 + 1, 0xffff},
- {BRCMS_SROM_LEGOFDMBW20UL2GPO, 0xfffffe00, SRFL_MORE,
- SROM9_2GPO_LOFDMBW20UL, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_2GPO_LOFDMBW20UL + 1, 0xffff},
- {BRCMS_SROM_LEGOFDMBW205GLPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GLPO_LOFDMBW20, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GLPO_LOFDMBW20 + 1, 0xffff},
- {BRCMS_SROM_LEGOFDMBW20UL5GLPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GLPO_LOFDMBW20UL, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GLPO_LOFDMBW20UL + 1, 0xffff},
- {BRCMS_SROM_LEGOFDMBW205GMPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GMPO_LOFDMBW20, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GMPO_LOFDMBW20 + 1, 0xffff},
- {BRCMS_SROM_LEGOFDMBW20UL5GMPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GMPO_LOFDMBW20UL, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GMPO_LOFDMBW20UL + 1, 0xffff},
- {BRCMS_SROM_LEGOFDMBW205GHPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GHPO_LOFDMBW20, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GHPO_LOFDMBW20 + 1, 0xffff},
- {BRCMS_SROM_LEGOFDMBW20UL5GHPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GHPO_LOFDMBW20UL, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GHPO_LOFDMBW20UL + 1, 0xffff},
- {BRCMS_SROM_MCSBW202GPO, 0xfffffe00, SRFL_MORE, SROM9_2GPO_MCSBW20,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_2GPO_MCSBW20 + 1, 0xffff},
- {BRCMS_SROM_MCSBW20UL2GPO, 0xfffffe00, SRFL_MORE, SROM9_2GPO_MCSBW20UL,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_2GPO_MCSBW20UL + 1, 0xffff},
- {BRCMS_SROM_MCSBW402GPO, 0xfffffe00, SRFL_MORE, SROM9_2GPO_MCSBW40,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_2GPO_MCSBW40 + 1, 0xffff},
- {BRCMS_SROM_MCSBW205GLPO, 0xfffffe00, SRFL_MORE, SROM9_5GLPO_MCSBW20,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GLPO_MCSBW20 + 1, 0xffff},
- {BRCMS_SROM_MCSBW20UL5GLPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GLPO_MCSBW20UL, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GLPO_MCSBW20UL + 1, 0xffff},
- {BRCMS_SROM_MCSBW405GLPO, 0xfffffe00, SRFL_MORE, SROM9_5GLPO_MCSBW40,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GLPO_MCSBW40 + 1, 0xffff},
- {BRCMS_SROM_MCSBW205GMPO, 0xfffffe00, SRFL_MORE, SROM9_5GMPO_MCSBW20,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GMPO_MCSBW20 + 1, 0xffff},
- {BRCMS_SROM_MCSBW20UL5GMPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GMPO_MCSBW20UL, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GMPO_MCSBW20UL + 1, 0xffff},
- {BRCMS_SROM_MCSBW405GMPO, 0xfffffe00, SRFL_MORE, SROM9_5GMPO_MCSBW40,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GMPO_MCSBW40 + 1, 0xffff},
- {BRCMS_SROM_MCSBW205GHPO, 0xfffffe00, SRFL_MORE, SROM9_5GHPO_MCSBW20,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GHPO_MCSBW20 + 1, 0xffff},
- {BRCMS_SROM_MCSBW20UL5GHPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GHPO_MCSBW20UL, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GHPO_MCSBW20UL + 1, 0xffff},
- {BRCMS_SROM_MCSBW405GHPO, 0xfffffe00, SRFL_MORE, SROM9_5GHPO_MCSBW40,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GHPO_MCSBW40 + 1, 0xffff},
- {BRCMS_SROM_MCS32PO, 0xfffffe00, 0, SROM9_PO_MCS32, 0xffff},
- {BRCMS_SROM_LEGOFDM40DUPPO, 0xfffffe00, 0, SROM9_PO_LOFDM40DUP, 0xffff},
-
- {BRCMS_SROM_NULL, 0, 0, 0, 0}
-};
-
-static const struct brcms_sromvar perpath_pci_sromvars[] = {
- {BRCMS_SROM_MAXP2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0x00ff},
- {BRCMS_SROM_ITT2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0xff00},
- {BRCMS_SROM_ITT5GA0, 0xffffff00, 0, SROM8_5G_ITT_MAXP, 0xff00},
- {BRCMS_SROM_PA2GW0A0, 0xffffff00, SRFL_PRHEX, SROM8_2G_PA, 0xffff},
- {BRCMS_SROM_PA2GW1A0, 0xffffff00, SRFL_PRHEX, SROM8_2G_PA + 1, 0xffff},
- {BRCMS_SROM_PA2GW2A0, 0xffffff00, SRFL_PRHEX, SROM8_2G_PA + 2, 0xffff},
- {BRCMS_SROM_MAXP5GA0, 0xffffff00, 0, SROM8_5G_ITT_MAXP, 0x00ff},
- {BRCMS_SROM_MAXP5GHA0, 0xffffff00, 0, SROM8_5GLH_MAXP, 0x00ff},
- {BRCMS_SROM_MAXP5GLA0, 0xffffff00, 0, SROM8_5GLH_MAXP, 0xff00},
- {BRCMS_SROM_PA5GW0A0, 0xffffff00, SRFL_PRHEX, SROM8_5G_PA, 0xffff},
- {BRCMS_SROM_PA5GW1A0, 0xffffff00, SRFL_PRHEX, SROM8_5G_PA + 1, 0xffff},
- {BRCMS_SROM_PA5GW2A0, 0xffffff00, SRFL_PRHEX, SROM8_5G_PA + 2, 0xffff},
- {BRCMS_SROM_PA5GLW0A0, 0xffffff00, SRFL_PRHEX, SROM8_5GL_PA, 0xffff},
- {BRCMS_SROM_PA5GLW1A0, 0xffffff00, SRFL_PRHEX, SROM8_5GL_PA + 1,
- 0xffff},
- {BRCMS_SROM_PA5GLW2A0, 0xffffff00, SRFL_PRHEX, SROM8_5GL_PA + 2,
- 0xffff},
- {BRCMS_SROM_PA5GHW0A0, 0xffffff00, SRFL_PRHEX, SROM8_5GH_PA, 0xffff},
- {BRCMS_SROM_PA5GHW1A0, 0xffffff00, SRFL_PRHEX, SROM8_5GH_PA + 1,
- 0xffff},
- {BRCMS_SROM_PA5GHW2A0, 0xffffff00, SRFL_PRHEX, SROM8_5GH_PA + 2,
- 0xffff},
- {BRCMS_SROM_NULL, 0, 0, 0, 0}
-};
-
-/* crc table has the same contents for every device instance, so it can be
- * shared between devices. */
-static u8 brcms_srom_crc8_table[CRC8_TABLE_SIZE];
-
-static uint mask_shift(u16 mask)
-{
- uint i;
- for (i = 0; i < (sizeof(mask) << 3); i++) {
- if (mask & (1 << i))
- return i;
- }
- return 0;
-}
-
-static uint mask_width(u16 mask)
-{
- int i;
- for (i = (sizeof(mask) << 3) - 1; i >= 0; i--) {
- if (mask & (1 << i))
- return (uint) (i - mask_shift(mask) + 1);
- }
- return 0;
-}
-
-static inline void le16_to_cpu_buf(u16 *buf, uint nwords)
-{
- while (nwords--)
- *(buf + nwords) = le16_to_cpu(*(__le16 *)(buf + nwords));
-}
-
-static inline void cpu_to_le16_buf(u16 *buf, uint nwords)
-{
- while (nwords--)
- *(__le16 *)(buf + nwords) = cpu_to_le16(*(buf + nwords));
-}
-
-/*
- * convert binary srom data into linked list of srom variable items.
- */
-static int
-_initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
-{
- struct brcms_srom_list_head *entry;
- enum brcms_srom_id id;
- u16 w;
- u32 val = 0;
- const struct brcms_sromvar *srv;
- uint width;
- uint flags;
- u32 sr = (1 << sromrev);
- uint p;
- uint pb = SROM8_PATH0;
- const uint psz = SROM8_PATH1 - SROM8_PATH0;
-
- /* first store the srom revision */
- entry = kzalloc(sizeof(struct brcms_srom_list_head), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- entry->varid = BRCMS_SROM_REV;
- entry->var_type = BRCMS_SROM_UNUMBER;
- entry->uval = sromrev;
- list_add(&entry->var_list, var_list);
-
- for (srv = pci_sromvars; srv->varid != BRCMS_SROM_NULL; srv++) {
- enum brcms_srom_var_type type;
- u8 ea[ETH_ALEN];
- u8 extra_space = 0;
-
- if ((srv->revmask & sr) == 0)
- continue;
-
- flags = srv->flags;
- id = srv->varid;
-
- /* This entry is for mfgc only. Don't generate param for it, */
- if (flags & SRFL_NOVAR)
- continue;
-
- if (flags & SRFL_ETHADDR) {
- /*
- * stored in string format XX:XX:XX:XX:XX:XX (17 chars)
- */
- ea[0] = (srom[srv->off] >> 8) & 0xff;
- ea[1] = srom[srv->off] & 0xff;
- ea[2] = (srom[srv->off + 1] >> 8) & 0xff;
- ea[3] = srom[srv->off + 1] & 0xff;
- ea[4] = (srom[srv->off + 2] >> 8) & 0xff;
- ea[5] = srom[srv->off + 2] & 0xff;
- /* 17 characters + string terminator - union size */
- extra_space = 18 - sizeof(s32);
- type = BRCMS_SROM_STRING;
- } else {
- w = srom[srv->off];
- val = (w & srv->mask) >> mask_shift(srv->mask);
- width = mask_width(srv->mask);
-
- while (srv->flags & SRFL_MORE) {
- srv++;
- if (srv->off == 0)
- continue;
-
- w = srom[srv->off];
- val +=
- ((w & srv->mask) >> mask_shift(srv->
- mask)) <<
- width;
- width += mask_width(srv->mask);
- }
-
- if ((flags & SRFL_NOFFS)
- && ((int)val == (1 << width) - 1))
- continue;
-
- if (flags & SRFL_CCODE) {
- type = BRCMS_SROM_STRING;
- } else if (flags & SRFL_LEDDC) {
- /* LED Powersave duty cycle has to be scaled:
- *(oncount >> 24) (offcount >> 8)
- */
- u32 w32 = /* oncount */
- (((val >> 8) & 0xff) << 24) |
- /* offcount */
- (((val & 0xff)) << 8);
- type = BRCMS_SROM_UNUMBER;
- val = w32;
- } else if ((flags & SRFL_PRSIGN)
- && (val & (1 << (width - 1)))) {
- type = BRCMS_SROM_SNUMBER;
- val |= ~0 << width;
- } else
- type = BRCMS_SROM_UNUMBER;
- }
-
- entry = kzalloc(sizeof(struct brcms_srom_list_head) +
- extra_space, GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
- entry->varid = id;
- entry->var_type = type;
- if (flags & SRFL_ETHADDR) {
- snprintf(entry->buf, 18, "%pM", ea);
- } else if (flags & SRFL_CCODE) {
- if (val == 0)
- entry->buf[0] = '\0';
- else
- snprintf(entry->buf, 3, "%c%c",
- (val >> 8), (val & 0xff));
- } else {
- entry->uval = val;
- }
-
- list_add(&entry->var_list, var_list);
- }
-
- for (p = 0; p < MAX_PATH_SROM; p++) {
- for (srv = perpath_pci_sromvars;
- srv->varid != BRCMS_SROM_NULL; srv++) {
- if ((srv->revmask & sr) == 0)
- continue;
-
- if (srv->flags & SRFL_NOVAR)
- continue;
-
- w = srom[pb + srv->off];
- val = (w & srv->mask) >> mask_shift(srv->mask);
- width = mask_width(srv->mask);
-
- /* Cheating: no per-path var is more than
- * 1 word */
- if ((srv->flags & SRFL_NOFFS)
- && ((int)val == (1 << width) - 1))
- continue;
-
- entry =
- kzalloc(sizeof(struct brcms_srom_list_head),
- GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
- entry->varid = srv->varid+p;
- entry->var_type = BRCMS_SROM_UNUMBER;
- entry->uval = val;
- list_add(&entry->var_list, var_list);
- }
- pb += psz;
- }
- return 0;
-}
-
-/*
- * The crc check is done on a little-endian array, we need
- * to switch the bytes around before checking crc (and
- * then switch it back).
- */
-static int do_crc_check(u16 *buf, unsigned nwords)
-{
- u8 crc;
-
- cpu_to_le16_buf(buf, nwords);
- crc = crc8(brcms_srom_crc8_table, (void *)buf, nwords << 1, CRC8_INIT_VALUE);
- le16_to_cpu_buf(buf, nwords);
-
- return crc == CRC8_GOOD_VALUE(brcms_srom_crc8_table);
-}
-
-/*
- * Read in and validate sprom.
- * Return 0 on success, nonzero on error.
- */
-static int
-sprom_read_pci(struct si_pub *sih, u16 *buf, uint nwords, bool check_crc)
-{
- int err = 0;
- uint i;
- struct bcma_device *core;
- uint sprom_offset;
-
- /* determine core to read */
- if (ai_get_ccrev(sih) < 32) {
- core = ai_findcore(sih, BCMA_CORE_80211, 0);
- sprom_offset = PCI_BAR0_SPROM_OFFSET;
- } else {
- core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
- sprom_offset = CHIPCREGOFFS(sromotp);
- }
-
- /* read the sprom */
- for (i = 0; i < nwords; i++)
- buf[i] = bcma_read16(core, sprom_offset+i*2);
-
- if (buf[0] == 0xffff)
- /*
- * The hardware thinks that an srom that starts with
- * 0xffff is blank, regardless of the rest of the
- * content, so declare it bad.
- */
- return -ENODATA;
-
- if (check_crc && !do_crc_check(buf, nwords))
- err = -EIO;
-
- return err;
-}
-
-static int otp_read_pci(struct si_pub *sih, u16 *buf, uint nwords)
-{
- u8 *otp;
- uint sz = OTP_SZ_MAX / 2; /* size in words */
- int err = 0;
-
- otp = kzalloc(OTP_SZ_MAX, GFP_ATOMIC);
- if (otp == NULL)
- return -ENOMEM;
-
- err = otp_read_region(sih, OTP_HW_RGN, (u16 *) otp, &sz);
-
- sz = min_t(uint, sz, nwords);
- memcpy(buf, otp, sz * 2);
-
- kfree(otp);
-
- /* Check CRC */
- if (buf[0] == 0xffff)
- /* The hardware thinks that an srom that starts with 0xffff
- * is blank, regardless of the rest of the content, so declare
- * it bad.
- */
- return -ENODATA;
-
- /* fixup the endianness so crc8 will pass */
- cpu_to_le16_buf(buf, sz);
- if (crc8(brcms_srom_crc8_table, (u8 *) buf, sz * 2,
- CRC8_INIT_VALUE) != CRC8_GOOD_VALUE(brcms_srom_crc8_table))
- err = -EIO;
- else
- /* now correct the endianness of the byte array */
- le16_to_cpu_buf(buf, sz);
-
- return err;
-}
-
-/*
- * Initialize nonvolatile variable table from sprom.
- * Return 0 on success, nonzero on error.
- */
-int srom_var_init(struct si_pub *sih)
-{
- u16 *srom;
- u8 sromrev = 0;
- u32 sr;
- int err = 0;
-
- /*
- * Apply CRC over SROM content regardless SROM is present or not.
- */
- srom = kmalloc(SROM_MAX, GFP_ATOMIC);
- if (!srom)
- return -ENOMEM;
-
- crc8_populate_lsb(brcms_srom_crc8_table, SROM_CRC8_POLY);
- if (ai_is_sprom_available(sih)) {
- err = sprom_read_pci(sih, srom, SROM4_WORDS, true);
-
- if (err == 0)
- /* srom read and passed crc */
- /* top word of sprom contains version and crc8 */
- sromrev = srom[SROM4_CRCREV] & 0xff;
- } else {
- /* Use OTP if SPROM not available */
- err = otp_read_pci(sih, srom, SROM4_WORDS);
- if (err == 0)
- /* OTP only contain SROM rev8/rev9 for now */
- sromrev = srom[SROM4_CRCREV] & 0xff;
- }
-
- if (!err) {
- struct si_info *sii = (struct si_info *)sih;
-
- /* Bitmask for the sromrev */
- sr = 1 << sromrev;
-
- /*
- * srom version check: Current valid versions: 8, 9
- */
- if ((sr & 0x300) == 0) {
- err = -EINVAL;
- goto errout;
- }
-
- INIT_LIST_HEAD(&sii->var_list);
-
- /* parse SROM into name=value pairs. */
- err = _initvars_srom_pci(sromrev, srom, &sii->var_list);
- if (err)
- srom_free_vars(sih);
- }
-
-errout:
- kfree(srom);
- return err;
-}
-
-void srom_free_vars(struct si_pub *sih)
-{
- struct si_info *sii;
- struct brcms_srom_list_head *entry, *next;
-
- sii = (struct si_info *)sih;
- list_for_each_entry_safe(entry, next, &sii->var_list, var_list) {
- list_del(&entry->var_list);
- kfree(entry);
- }
-}
-
-/*
- * Search the name=value vars for a specific one and return its value.
- * Returns NULL if not found.
- */
-char *getvar(struct si_pub *sih, enum brcms_srom_id id)
-{
- struct si_info *sii;
- struct brcms_srom_list_head *entry;
-
- sii = (struct si_info *)sih;
-
- list_for_each_entry(entry, &sii->var_list, var_list)
- if (entry->varid == id)
- return &entry->buf[0];
-
- /* nothing found */
- return NULL;
-}
-
-/*
- * Search the vars for a specific one and return its value as
- * an integer. Returns 0 if not found.-
- */
-int getintvar(struct si_pub *sih, enum brcms_srom_id id)
-{
- struct si_info *sii;
- struct brcms_srom_list_head *entry;
- unsigned long res;
-
- sii = (struct si_info *)sih;
-
- list_for_each_entry(entry, &sii->var_list, var_list)
- if (entry->varid == id) {
- if (entry->var_type == BRCMS_SROM_SNUMBER ||
- entry->var_type == BRCMS_SROM_UNUMBER)
- return (int)entry->sval;
- else if (!kstrtoul(&entry->buf[0], 0, &res))
- return (int)res;
- }
-
- return 0;
-}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.h b/drivers/net/wireless/brcm80211/brcmsmac/srom.h
deleted file mode 100644
index f2a58f262c99..000000000000
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_SROM_H_
-#define _BRCM_SROM_H_
-
-#include "types.h"
-
-/* Prototypes */
-extern int srom_var_init(struct si_pub *sih);
-extern void srom_free_vars(struct si_pub *sih);
-
-extern int srom_read(struct si_pub *sih, uint bus, void *curmap,
- uint byteoff, uint nbytes, u16 *buf, bool check_crc);
-
-#endif /* _BRCM_SROM_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.c b/drivers/net/wireless/brcm80211/brcmsmac/stf.c
index d8f528eb180c..ed1d1aa71d2d 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/stf.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.c
@@ -370,9 +370,11 @@ void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc)
void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc)
{
+ struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom;
+
/* get available rx/tx chains */
- wlc->stf->hw_txchain = (u8) getintvar(wlc->hw->sih, BRCMS_SROM_TXCHAIN);
- wlc->stf->hw_rxchain = (u8) getintvar(wlc->hw->sih, BRCMS_SROM_RXCHAIN);
+ wlc->stf->hw_txchain = sprom->txchain;
+ wlc->stf->hw_rxchain = sprom->rxchain;
/* these parameter are intended to be used for all PHY types */
if (wlc->stf->hw_txchain == 0 || wlc->stf->hw_txchain == 0xf) {
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index db6c6e528022..2463c0626438 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -137,11 +137,3 @@ config IWLWIFI_EXPERIMENTAL_MFP
even if the microcode doesn't advertise it.
Say Y only if you want to experiment with MFP.
-
-config IWLWIFI_UCODE16
- bool "support uCode 16.0"
- depends on IWLWIFI
- help
- This option enables support for uCode version 16.0.
-
- Say Y if you want to use 16.0 microcode.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 406f297a9a56..d615eacbf050 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -18,7 +18,6 @@ iwlwifi-objs += iwl-notif-wait.o
iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
-iwlwifi-$(CONFIG_IWLWIFI_UCODE16) += iwl-phy-db.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 7f793417c787..8133105ac645 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -79,7 +79,7 @@ static const struct iwl_base_params iwl2000_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
.hd_v2 = true,
};
@@ -97,7 +97,7 @@ static const struct iwl_base_params iwl2030_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
.hd_v2 = true,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 381b02cf339c..19f7ee84ae89 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -86,7 +86,7 @@ static const struct iwl_base_params iwl6000_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
};
static const struct iwl_base_params iwl6050_base_params = {
@@ -102,7 +102,7 @@ static const struct iwl_base_params iwl6050_base_params = {
.chain_noise_scale = 1500,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 1024,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
};
static const struct iwl_base_params iwl6000_g2_base_params = {
@@ -118,7 +118,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
};
static const struct iwl_ht_params iwl6000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 01dc44267317..e55ec6c8a920 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
+#include <net/mac80211.h>
#include "iwl-dev.h"
#include "iwl-io.h"
@@ -273,9 +274,20 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
return;
}
+ /*
+ * Possible situations when BT needs to take over for receive,
+ * at the same time where STA needs to response to AP's frame(s),
+ * reduce the tx power of the required response frames, by that,
+ * allow the concurrent BT receive & WiFi transmit
+ * (BT - ANT A, WiFi -ANT B), without interference to one another
+ *
+ * Reduced tx power apply to control frames only (ACK/Back/CTS)
+ * when indicated by the BT config command
+ */
basic.kill_ack_mask = priv->kill_ack_mask;
basic.kill_cts_mask = priv->kill_cts_mask;
- basic.reduce_txpower = priv->reduced_txpower;
+ if (priv->reduced_txpower)
+ basic.reduce_txpower = IWLAGN_BT_REDUCED_TX_PWR;
basic.valid = priv->bt_valid;
/*
@@ -589,13 +601,31 @@ static bool iwlagn_set_kill_msk(struct iwl_priv *priv,
return need_update;
}
+/*
+ * Upon RSSI changes, sends a bt config command with following changes
+ * 1. enable/disable "reduced control frames tx power
+ * 2. update the "kill)ack_mask" and "kill_cts_mask"
+ *
+ * If "reduced tx power" is enabled, uCode shall
+ * 1. ACK/Back/CTS rate shall reduced to 6Mbps
+ * 2. not use duplciate 20/40MHz mode
+ */
static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
struct iwl_bt_uart_msg *uart_msg)
{
bool need_update = false;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ int ave_rssi;
+ ave_rssi = ieee80211_ave_rssi(ctx->vif);
+ if (!ave_rssi) {
+ /* no rssi data, no changes to reduce tx power */
+ IWL_DEBUG_COEX(priv, "no rssi data available\n");
+ return need_update;
+ }
if (!priv->reduced_txpower &&
!iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
+ (ave_rssi > BT_ENABLE_REDUCED_TXPOWER_THRESHOLD) &&
(uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK |
BT_UART_MSG_FRAME3OBEX_MSK)) &&
!(uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK |
@@ -606,13 +636,14 @@ static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
need_update = true;
} else if (priv->reduced_txpower &&
(iwl_is_associated(priv, IWL_RXON_CTX_PAN) ||
+ (ave_rssi < BT_DISABLE_REDUCED_TXPOWER_THRESHOLD) ||
(uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK |
BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK)) ||
!(uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK |
BT_UART_MSG_FRAME3OBEX_MSK)))) {
/* disable reduced tx power */
priv->reduced_txpower = false;
- priv->bt_valid &= ~IWLAGN_BT_VALID_REDUCED_TX_PWR;
+ priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR;
need_update = true;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 51e1a69ffdda..8cebd7c363fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -884,6 +884,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
(priv->bt_full_concurrent != full_concurrent)) {
priv->bt_full_concurrent = full_concurrent;
+ priv->last_bt_traffic_load = priv->bt_traffic_load;
/* Update uCode's rate table. */
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 74fbee627306..0a3aa7c83003 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -61,6 +61,10 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
RXON_FILTER_ACCEPT_GRP_MSK;
break;
+ case NL80211_IFTYPE_MONITOR:
+ ctx->staging.dev_type = RXON_DEV_TYPE_SNIFFER;
+ break;
+
default:
IWL_ERR(priv, "Unsupported interface type %d\n",
ctx->vif->type);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index b31584e87bc7..aea07aab3c9e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -772,7 +772,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
~IWL_STA_DRIVER_ACTIVE;
priv->stations[i].used &=
~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_bh(&priv->sta_lock);
+ continue;
}
/*
* Rate scaling has already been initialized, send
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index f2e9f298a947..3366e2e2f00f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -590,11 +590,17 @@ turn_off:
spin_unlock_bh(&priv->sta_lock);
if (test_bit(txq_id, priv->agg_q_alloc)) {
- /* If the transport didn't know that we wanted to start
- * agreggation, don't tell it that we want to stop them
+ /*
+ * If the transport didn't know that we wanted to start
+ * agreggation, don't tell it that we want to stop them.
+ * This can happen when we don't get the addBA response on
+ * time, or we hadn't time to drain the AC queues.
*/
- if (agg_state != IWL_AGG_STARTING)
+ if (agg_state == IWL_AGG_ON)
iwl_trans_tx_agg_disable(priv->trans, txq_id);
+ else
+ IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
+ agg_state);
iwlagn_dealloc_agg_txq(priv, txq_id);
}
@@ -1300,10 +1306,11 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
(u8 *) &ba_resp->sta_addr_lo32,
ba_resp->sta_id);
IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
- "scd_flow = %d, scd_ssn = %d\n",
+ "scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl),
(unsigned long long)le64_to_cpu(ba_resp->bitmap),
- scd_flow, ba_resp_scd_ssn);
+ scd_flow, ba_resp_scd_ssn, ba_resp->txed,
+ ba_resp->txed_2_done);
/* Mark that the expected block-ack response arrived */
agg->wait_for_ba = false;
@@ -1319,8 +1326,6 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
*/
ba_resp->txed = ba_resp->txed_2_done;
}
- IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
- ba_resp->txed, ba_resp->txed_2_done);
priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 8d7637083fcf..ec36e2b020b6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -603,7 +603,7 @@ void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
- BIT(NL80211_IFTYPE_ADHOC);
+ BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MONITOR);
priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
BIT(NL80211_IFTYPE_STATION);
priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 83a6930f3658..9af6a239b384 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -1910,6 +1910,8 @@ enum iwl_bt_kill_idx {
IWLAGN_BT_VALID_REDUCED_TX_PWR | \
IWLAGN_BT_VALID_3W_LUT)
+#define IWLAGN_BT_REDUCED_TX_PWR BIT(0)
+
#define IWLAGN_BT_DECISION_LUT_SIZE 12
struct iwl_basic_bt_cmd {
@@ -1923,6 +1925,10 @@ struct iwl_basic_bt_cmd {
u8 bt3_timer_t2_value;
__le16 bt4_reaction_time; /* unused */
__le32 bt3_lookup_table[IWLAGN_BT_DECISION_LUT_SIZE];
+ /*
+ * bit 0: use reduced tx power for control frame
+ * bit 1 - 7: reserved
+ */
u8 reduce_txpower;
u8 reserved;
__le16 valid;
@@ -2272,7 +2278,6 @@ struct iwl_ssid_ie {
#define IWL_GOOD_CRC_TH_DISABLED 0
#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
-#define IWL_MAX_SCAN_SIZE 1024
#define IWL_MAX_CMD_SIZE 4096
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 3c72bad0ae56..d742900969ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -657,17 +657,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
-static int alloc_pci_desc(struct iwl_drv *drv,
- struct iwl_firmware_pieces *pieces,
- enum iwl_ucode_type type)
+static int iwl_alloc_ucode(struct iwl_drv *drv,
+ struct iwl_firmware_pieces *pieces,
+ enum iwl_ucode_type type)
{
int i;
for (i = 0;
i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i);
i++)
if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]),
- get_sec(pieces, type, i)))
- return -1;
+ get_sec(pieces, type, i)))
+ return -ENOMEM;
return 0;
}
@@ -825,8 +825,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
- if (alloc_pci_desc(drv, &pieces, i))
- goto err_pci_alloc;
+ if (iwl_alloc_ucode(drv, &pieces, i))
+ goto out_free_fw;
/* Now that we can no longer fail, copy information */
@@ -866,7 +866,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw);
if (!drv->op_mode)
- goto out_unbind;
+ goto out_free_fw;
return;
@@ -877,7 +877,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
goto out_unbind;
return;
- err_pci_alloc:
+ out_free_fw:
IWL_ERR(drv, "failed to allocate pci memory\n");
iwl_dealloc_ucode(drv);
release_firmware(ucode_raw);
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
index d33cc9cc7d3f..ab2f4d7500a4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -150,6 +150,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
IEEE80211_HW_QUEUE_CONTROL |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
+ IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_SCAN_WHILE_IDLE;
hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
@@ -223,8 +224,8 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
- /* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
+ /* we create the 802.11 header and a max-length SSID element */
+ hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 34;
/*
* We don't use all queues: 4 and 9 are unused and any
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
deleted file mode 100644
index f166955340fe..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ /dev/null
@@ -1,288 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#include "iwl-debug.h"
-#include "iwl-dev.h"
-
-#include "iwl-phy-db.h"
-
-#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
-
-struct iwl_phy_db *iwl_phy_db_init(struct device *dev)
-{
- struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
- GFP_KERNEL);
-
- if (!phy_db)
- return phy_db;
-
- phy_db->dev = dev;
-
- /* TODO: add default values of the phy db. */
- return phy_db;
-}
-
-/*
- * get phy db section: returns a pointer to a phy db section specified by
- * type and channel group id.
- */
-static struct iwl_phy_db_entry *
-iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type,
- u16 chg_id)
-{
- if (!phy_db || type < 0 || type >= IWL_PHY_DB_MAX)
- return NULL;
-
- switch (type) {
- case IWL_PHY_DB_CFG:
- return &phy_db->cfg;
- case IWL_PHY_DB_CALIB_NCH:
- return &phy_db->calib_nch;
- case IWL_PHY_DB_CALIB_CH:
- return &phy_db->calib_ch;
- case IWL_PHY_DB_CALIB_CHG_PAPD:
- if (chg_id < 0 || chg_id >= IWL_NUM_PAPD_CH_GROUPS)
- return NULL;
- return &phy_db->calib_ch_group_papd[chg_id];
- case IWL_PHY_DB_CALIB_CHG_TXP:
- if (chg_id < 0 || chg_id >= IWL_NUM_TXP_CH_GROUPS)
- return NULL;
- return &phy_db->calib_ch_group_txp[chg_id];
- default:
- return NULL;
- }
- return NULL;
-}
-
-static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type,
- u16 chg_id)
-{
- struct iwl_phy_db_entry *entry =
- iwl_phy_db_get_section(phy_db, type, chg_id);
- if (!entry)
- return;
-
- kfree(entry->data);
- entry->data = NULL;
- entry->size = 0;
-}
-
-void iwl_phy_db_free(struct iwl_phy_db *phy_db)
-{
- int i;
-
- if (!phy_db)
- return;
-
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
- for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
- for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
-
- kfree(phy_db);
-}
-
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type, u8 *data,
- u16 size, gfp_t alloc_ctx)
-{
- struct iwl_phy_db_entry *entry;
- u16 chg_id = 0;
-
- if (!phy_db)
- return -EINVAL;
-
- if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
- type == IWL_PHY_DB_CALIB_CHG_TXP)
- chg_id = le16_to_cpup((__le16 *)data);
-
- entry = iwl_phy_db_get_section(phy_db, type, chg_id);
- if (!entry)
- return -EINVAL;
-
- kfree(entry->data);
- entry->data = kmemdup(data, size, alloc_ctx);
- if (!entry->data) {
- entry->size = 0;
- return -ENOMEM;
- }
-
- entry->size = size;
-
- if (type == IWL_PHY_DB_CALIB_CH) {
- phy_db->channel_num = le32_to_cpup((__le32 *)data);
- phy_db->channel_size =
- (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
- }
-
- return 0;
-}
-
-static int is_valid_channel(u16 ch_id)
-{
- if (ch_id <= 14 ||
- (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
- (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
- (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
- return 1;
- return 0;
-}
-
-static u8 ch_id_to_ch_index(u16 ch_id)
-{
- if (WARN_ON(!is_valid_channel(ch_id)))
- return 0xff;
-
- if (ch_id <= 14)
- return ch_id - 1;
- if (ch_id <= 64)
- return (ch_id + 20) / 4;
- if (ch_id <= 140)
- return (ch_id - 12) / 4;
- return (ch_id - 13) / 4;
-}
-
-
-static u16 channel_id_to_papd(u16 ch_id)
-{
- if (WARN_ON(!is_valid_channel(ch_id)))
- return 0xff;
-
- if (1 <= ch_id && ch_id <= 14)
- return 0;
- if (36 <= ch_id && ch_id <= 64)
- return 1;
- if (100 <= ch_id && ch_id <= 140)
- return 2;
- return 3;
-}
-
-static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
-{
- struct iwl_phy_db_chg_txp *txp_chg;
- int i;
- u8 ch_index = ch_id_to_ch_index(ch_id);
- if (ch_index == 0xff)
- return 0xff;
-
- for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
- txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
- if (!txp_chg)
- return 0xff;
- /*
- * Looking for the first channel group that its max channel is
- * higher then wanted channel.
- */
- if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
- return i;
- }
- return 0xff;
-}
-
-int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type, u8 **data,
- u16 *size, u16 ch_id)
-{
- struct iwl_phy_db_entry *entry;
- u32 channel_num;
- u32 channel_size;
- u16 ch_group_id = 0;
- u16 index;
-
- if (!phy_db)
- return -EINVAL;
-
- /* find wanted channel group */
- if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
- ch_group_id = channel_id_to_papd(ch_id);
- else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
- ch_group_id = channel_id_to_txp(phy_db, ch_id);
-
- entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
- if (!entry)
- return -EINVAL;
-
- if (type == IWL_PHY_DB_CALIB_CH) {
- index = ch_id_to_ch_index(ch_id);
- channel_num = phy_db->channel_num;
- channel_size = phy_db->channel_size;
- if (index >= channel_num) {
- IWL_ERR(phy_db, "Wrong channel number %d", ch_id);
- return -EINVAL;
- }
- *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
- *size = channel_size;
- } else {
- *data = entry->data;
- *size = entry->size;
- }
- return 0;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
deleted file mode 100644
index c34c6a9303ab..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#ifndef __IWL_PHYDB_H__
-#define __IWL_PHYDB_H__
-
-#include <linux/types.h>
-
-#define IWL_NUM_PAPD_CH_GROUPS 4
-#define IWL_NUM_TXP_CH_GROUPS 8
-
-struct iwl_phy_db_entry {
- u16 size;
- u8 *data;
-};
-
-struct iwl_shared;
-
-/**
- * struct iwl_phy_db - stores phy configuration and calibration data.
- *
- * @cfg: phy configuration.
- * @calib_nch: non channel specific calibration data.
- * @calib_ch: channel specific calibration data.
- * @calib_ch_group_papd: calibration data related to papd channel group.
- * @calib_ch_group_txp: calibration data related to tx power chanel group.
- */
-struct iwl_phy_db {
- struct iwl_phy_db_entry cfg;
- struct iwl_phy_db_entry calib_nch;
- struct iwl_phy_db_entry calib_ch;
- struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
- struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
-
- u32 channel_num;
- u32 channel_size;
-
- /* for an access to the logger */
- struct device *dev;
-};
-
-enum iwl_phy_db_section_type {
- IWL_PHY_DB_CFG = 1,
- IWL_PHY_DB_CALIB_NCH,
- IWL_PHY_DB_CALIB_CH,
- IWL_PHY_DB_CALIB_CHG_PAPD,
- IWL_PHY_DB_CALIB_CHG_TXP,
- IWL_PHY_DB_MAX
-};
-
-/* for parsing of tx power channel group data that comes from the firmware*/
-struct iwl_phy_db_chg_txp {
- __le32 space;
- __le16 max_channel_idx;
-} __packed;
-
-struct iwl_phy_db *iwl_phy_db_init(struct device *dev);
-
-void iwl_phy_db_free(struct iwl_phy_db *phy_db);
-
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type, u8 *data,
- u16 size, gfp_t alloc_ctx);
-
-int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type, u8 **data,
- u16 *size, u16 ch_id);
-
-#endif /* __IWL_PHYDB_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 8352265dbc4b..544ddf17f5bd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -253,6 +253,8 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n",
skip, period);
+ /* The power level here is 0-4 (used as array index), but user expects
+ to see 1-5 (according to spec). */
IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
}
@@ -308,10 +310,12 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
priv->power_data.debug_sleep_level_override,
dtimper);
else {
+ /* Note that the user parameter is 1-5 (according to spec),
+ but we pass 0-4 because it acts as an array index. */
if (iwlwifi_mod_params.power_level > IWL_POWER_INDEX_1 &&
- iwlwifi_mod_params.power_level <= IWL_POWER_INDEX_5)
+ iwlwifi_mod_params.power_level <= IWL_POWER_NUM)
iwl_static_sleep_cmd(priv, cmd,
- iwlwifi_mod_params.power_level, dtimper);
+ iwlwifi_mod_params.power_level - 1, dtimper);
else
iwl_static_sleep_cmd(priv, cmd,
IWL_POWER_INDEX_1, dtimper);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index a8437a6bc18e..031d8e21f82f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -52,6 +52,7 @@
#define IWL_PASSIVE_DWELL_TIME_52 (10)
#define IWL_PASSIVE_DWELL_BASE (100)
#define IWL_CHANNEL_TUNE_TIME 5
+#define MAX_SCAN_CHANNEL 50
static int iwl_send_scan_abort(struct iwl_priv *priv)
{
@@ -616,7 +617,8 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
*/
static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
- const u8 *ies, int ie_len, int left)
+ const u8 *ies, int ie_len, const u8 *ssid,
+ u8 ssid_len, int left)
{
int len = 0;
u8 *pos = NULL;
@@ -638,14 +640,18 @@ static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
/* ...next IE... */
pos = &frame->u.probe_req.variable[0];
- /* fill in our indirect SSID IE */
- left -= 2;
+ /* fill in our SSID IE */
+ left -= ssid_len + 2;
if (left < 0)
return 0;
*pos++ = WLAN_EID_SSID;
- *pos++ = 0;
+ *pos++ = ssid_len;
+ if (ssid && ssid_len) {
+ memcpy(pos, ssid, ssid_len);
+ pos += ssid_len;
+ }
- len += 2;
+ len += ssid_len + 2;
if (WARN_ON(left < ie_len))
return len;
@@ -679,6 +685,15 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
u8 active_chains;
u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
int ret;
+ int scan_cmd_size = sizeof(struct iwl_scan_cmd) +
+ MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) +
+ priv->fw->ucode_capa.max_probe_length;
+ const u8 *ssid = NULL;
+ u8 ssid_len = 0;
+
+ if (WARN_ON_ONCE(priv->scan_request &&
+ priv->scan_request->n_channels > MAX_SCAN_CHANNEL))
+ return -EINVAL;
lockdep_assert_held(&priv->mutex);
@@ -686,8 +701,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
ctx = iwl_rxon_ctx_from_vif(vif);
if (!priv->scan_cmd) {
- priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
+ priv->scan_cmd = kmalloc(scan_cmd_size, GFP_KERNEL);
if (!priv->scan_cmd) {
IWL_DEBUG_SCAN(priv,
"fail to allocate memory for scan\n");
@@ -695,7 +709,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
}
}
scan = priv->scan_cmd;
- memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
+ memset(scan, 0, scan_cmd_size);
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
@@ -746,10 +760,18 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
if (priv->scan_request->n_ssids) {
int i, p = 0;
IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
+ /*
+ * The highest priority SSID is inserted to the
+ * probe request template.
+ */
+ ssid_len = priv->scan_request->ssids[0].ssid_len;
+ ssid = priv->scan_request->ssids[0].ssid;
+
+ /*
+ * Invert the order of ssids, the firmware will invert
+ * it back.
+ */
+ for (i = priv->scan_request->n_ssids - 1; i >= 1; i--) {
scan->direct_scan[p].id = WLAN_EID_SSID;
scan->direct_scan[p].len =
priv->scan_request->ssids[i].ssid_len;
@@ -883,7 +905,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
vif->addr,
priv->scan_request->ie,
priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ ssid, ssid_len,
+ scan_cmd_size - sizeof(*scan));
break;
case IWL_SCAN_RADIO_RESET:
case IWL_SCAN_ROC:
@@ -891,7 +914,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
cmd_len = iwl_fill_probe_req(
(struct ieee80211_mgmt *)scan->data,
iwl_bcast_addr, NULL, 0,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ NULL, 0,
+ scan_cmd_size - sizeof(*scan));
break;
default:
BUG();
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index 6213c05a4b52..e959207c630a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -347,7 +347,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
int sta_id, int tid, int frame_limit, u16 ssn);
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
- int index, enum dma_data_direction dma_dir);
+ enum dma_data_direction dma_dir);
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
struct sk_buff_head *skbs);
int iwl_queue_space(const struct iwl_queue *q);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 21a8a672fbb2..a8750238ee09 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -204,33 +204,39 @@ static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
for (i = 1; i < num_tbs; i++)
dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
iwl_tfd_tb_get_len(tfd, i), dma_dir);
+
+ tfd->num_tbs = 0;
}
/**
* iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
* @trans - transport private data
* @txq - tx queue
- * @index - the index of the TFD to be freed
- *@dma_dir - the direction of the DMA mapping
+ * @dma_dir - the direction of the DMA mapping
*
* Does NOT advance any TFD circular buffer read/write indexes
* Does NOT free the TFD itself (which is within circular buffer)
*/
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
- int index, enum dma_data_direction dma_dir)
+ enum dma_data_direction dma_dir)
{
struct iwl_tfd *tfd_tmp = txq->tfds;
+ /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
+ int rd_ptr = txq->q.read_ptr;
+ int idx = get_cmd_index(&txq->q, rd_ptr);
+
lockdep_assert_held(&txq->lock);
- iwlagn_unmap_tfd(trans, &txq->entries[index].meta,
- &tfd_tmp[index], dma_dir);
+ /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
+ iwlagn_unmap_tfd(trans, &txq->entries[idx].meta,
+ &tfd_tmp[rd_ptr], dma_dir);
/* free SKB */
if (txq->entries) {
struct sk_buff *skb;
- skb = txq->entries[index].skb;
+ skb = txq->entries[idx].skb;
/* Can be called from irqs-disabled context
* If skb is not NULL, it means that the whole queue is being
@@ -238,7 +244,7 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
*/
if (skb) {
iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[index].skb = NULL;
+ txq->entries[idx].skb = NULL;
}
}
}
@@ -973,7 +979,7 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
- iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
+ iwlagn_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
freed++;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 2e57161854b9..ec6fb395b84d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -435,9 +435,7 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
spin_lock_bh(&txq->lock);
while (q->write_ptr != q->read_ptr) {
- /* The read_ptr needs to bound by q->n_window */
- iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
- dma_dir);
+ iwlagn_txq_free_tfd(trans, txq, dma_dir);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
}
spin_unlock_bh(&txq->lock);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 03c0c6b1372c..fb787df01666 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -746,6 +746,11 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
hwsim_check_sta_magic(txi->control.sta);
ieee80211_tx_info_clear_status(txi);
+
+ /* frame was transmitted at most favorable rate at first attempt */
+ txi->control.rates[0].count = 1;
+ txi->control.rates[1].idx = -1;
+
if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack)
txi->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(hw, skb);
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index 5c1a46bf1e11..3f66ebb0a630 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -29,6 +29,8 @@ mwifiex-y += scan.o
mwifiex-y += join.o
mwifiex-y += sta_ioctl.o
mwifiex-y += sta_cmd.o
+mwifiex-y += uap_cmd.o
+mwifiex-y += ie.o
mwifiex-y += sta_cmdresp.o
mwifiex-y += sta_event.o
mwifiex-y += sta_tx.o
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index c78ea873a63a..87671446e24b 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -20,6 +20,23 @@
#include "cfg80211.h"
#include "main.h"
+static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
+ {
+ .max = 1, .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1, .types = BIT(NL80211_IFTYPE_AP),
+ },
+};
+
+static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
+ .limits = mwifiex_ap_sta_limits,
+ .num_different_channels = 1,
+ .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
+ .max_interfaces = MWIFIEX_MAX_BSS_NUM,
+ .beacon_int_infra_match = true,
+};
+
/*
* This function maps the nl802.11 channel type into driver channel type.
*
@@ -67,7 +84,7 @@ mwifiex_is_alg_wep(u32 cipher)
/*
* This function retrieves the private structure from kernel wiphy structure.
*/
-static void *mwifiex_cfg80211_get_priv(struct wiphy *wiphy)
+static void *mwifiex_cfg80211_get_adapter(struct wiphy *wiphy)
{
return (void *) (*(unsigned long *) wiphy_priv(wiphy));
}
@@ -80,8 +97,10 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
u8 key_index, bool pairwise, const u8 *mac_addr)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
+ const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
- if (mwifiex_set_encode(priv, NULL, 0, key_index, 1)) {
+ if (mwifiex_set_encode(priv, NULL, 0, key_index, peer_mac, 1)) {
wiphy_err(wiphy, "deleting the crypto keys\n");
return -EFAULT;
}
@@ -98,7 +117,8 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
enum nl80211_tx_power_setting type,
int mbm)
{
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_private *priv;
struct mwifiex_power_cfg power_cfg;
int dbm = MBM_TO_DBM(mbm);
@@ -109,6 +129,8 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
power_cfg.is_power_auto = 1;
}
+ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+
return mwifiex_set_tx_power(priv, &power_cfg);
}
@@ -148,7 +170,7 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
if (!priv->sec_info.wep_enabled)
return 0;
- if (mwifiex_set_encode(priv, NULL, 0, key_index, 0)) {
+ if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) {
wiphy_err(wiphy, "set default Tx key index\n");
return -EFAULT;
}
@@ -165,9 +187,11 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
struct key_params *params)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
+ const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
if (mwifiex_set_encode(priv, params->key, params->key_len,
- key_index, 0)) {
+ key_index, peer_mac, 0)) {
wiphy_err(wiphy, "crypto keys added\n");
return -EFAULT;
}
@@ -192,13 +216,13 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
enum ieee80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
- struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_private *priv;
struct mwifiex_802_11d_domain_reg *domain_info = &adapter->domain_reg;
/* Set country code */
- domain_info->country_code[0] = priv->country_code[0];
- domain_info->country_code[1] = priv->country_code[1];
+ domain_info->country_code[0] = adapter->country_code[0];
+ domain_info->country_code[1] = adapter->country_code[1];
domain_info->country_code[2] = ' ';
band = mwifiex_band_to_radio_type(adapter->config_bands);
@@ -250,6 +274,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
domain_info->no_of_triplet = no_of_triplet;
+ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+
if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
HostCmd_ACT_GEN_SET, 0, NULL)) {
wiphy_err(wiphy, "11D: setting domain info in FW\n");
@@ -272,12 +298,12 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
static int mwifiex_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
- wiphy_dbg(wiphy, "info: cfg80211 regulatory domain callback for domain"
- " %c%c\n", request->alpha2[0], request->alpha2[1]);
+ wiphy_dbg(wiphy, "info: cfg80211 regulatory domain callback for %c%c\n",
+ request->alpha2[0], request->alpha2[1]);
- memcpy(priv->country_code, request->alpha2, sizeof(request->alpha2));
+ memcpy(adapter->country_code, request->alpha2, sizeof(request->alpha2));
switch (request->initiator) {
case NL80211_REGDOM_SET_BY_DRIVER:
@@ -361,33 +387,10 @@ mwifiex_set_rf_channel(struct mwifiex_private *priv,
if (mwifiex_bss_set_channel(priv, &cfp))
return -EFAULT;
- return mwifiex_drv_change_adhoc_chan(priv, cfp.channel);
-}
-
-/*
- * CFG802.11 operation handler to set channel.
- *
- * This function can only be used when station is not connected.
- */
-static int
-mwifiex_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
-{
- struct mwifiex_private *priv;
-
- if (dev)
- priv = mwifiex_netdev_get_priv(dev);
+ if (priv->bss_type == MWIFIEX_BSS_TYPE_STA)
+ return mwifiex_drv_change_adhoc_chan(priv, cfp.channel);
else
- priv = mwifiex_cfg80211_get_priv(wiphy);
-
- if (priv->media_connected) {
- wiphy_err(wiphy, "This setting is valid only when station "
- "is not connected\n");
- return -EINVAL;
- }
-
- return mwifiex_set_rf_channel(priv, chan, channel_type);
+ return mwifiex_uap_set_channel(priv, cfp.channel);
}
/*
@@ -399,18 +402,13 @@ mwifiex_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
static int
mwifiex_set_frag(struct mwifiex_private *priv, u32 frag_thr)
{
- int ret;
-
if (frag_thr < MWIFIEX_FRAG_MIN_VALUE ||
frag_thr > MWIFIEX_FRAG_MAX_VALUE)
- return -EINVAL;
-
- /* Send request to firmware */
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_SET, FRAG_THRESH_I,
- &frag_thr);
+ frag_thr = MWIFIEX_FRAG_MAX_VALUE;
- return ret;
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_SET, FRAG_THRESH_I,
+ &frag_thr);
}
/*
@@ -439,19 +437,85 @@ mwifiex_set_rts(struct mwifiex_private *priv, u32 rts_thr)
static int
mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
- int ret = 0;
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_private *priv;
+ struct mwifiex_uap_bss_param *bss_cfg;
+ int ret, bss_started, i;
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+
+ switch (priv->bss_role) {
+ case MWIFIEX_BSS_ROLE_UAP:
+ bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param),
+ GFP_KERNEL);
+ if (!bss_cfg)
+ return -ENOMEM;
+
+ mwifiex_set_sys_config_invalid_data(bss_cfg);
+
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD)
+ bss_cfg->rts_threshold = wiphy->rts_threshold;
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
+ bss_cfg->frag_threshold = wiphy->frag_threshold;
+ if (changed & WIPHY_PARAM_RETRY_LONG)
+ bss_cfg->retry_limit = wiphy->retry_long;
+
+ bss_started = priv->bss_started;
+
+ ret = mwifiex_send_cmd_sync(priv,
+ HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0,
+ NULL);
+ if (ret) {
+ wiphy_err(wiphy, "Failed to stop the BSS\n");
+ kfree(bss_cfg);
+ return ret;
+ }
- if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
- ret = mwifiex_set_rts(priv, wiphy->rts_threshold);
- if (ret)
- return ret;
- }
+ ret = mwifiex_send_cmd_async(priv,
+ HostCmd_CMD_UAP_SYS_CONFIG,
+ HostCmd_ACT_GEN_SET,
+ UAP_BSS_PARAMS_I, bss_cfg);
- if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
- ret = mwifiex_set_frag(priv, wiphy->frag_threshold);
+ kfree(bss_cfg);
- return ret;
+ if (ret) {
+ wiphy_err(wiphy, "Failed to set bss config\n");
+ return ret;
+ }
+
+ if (!bss_started)
+ break;
+
+ ret = mwifiex_send_cmd_async(priv,
+ HostCmd_CMD_UAP_BSS_START,
+ HostCmd_ACT_GEN_SET, 0,
+ NULL);
+ if (ret) {
+ wiphy_err(wiphy, "Failed to start BSS\n");
+ return ret;
+ }
+
+ break;
+ case MWIFIEX_BSS_ROLE_STA:
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
+ ret = mwifiex_set_rts(priv,
+ wiphy->rts_threshold);
+ if (ret)
+ return ret;
+ }
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
+ ret = mwifiex_set_frag(priv,
+ wiphy->frag_threshold);
+ if (ret)
+ return ret;
+ }
+ break;
+ }
+ }
+
+ return 0;
}
/*
@@ -466,31 +530,59 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
int ret;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- if (priv->bss_mode == type) {
- wiphy_warn(wiphy, "already set to required type\n");
- return 0;
- }
-
- priv->bss_mode = type;
-
- switch (type) {
+ switch (dev->ieee80211_ptr->iftype) {
case NL80211_IFTYPE_ADHOC:
- dev->ieee80211_ptr->iftype = NL80211_IFTYPE_ADHOC;
- wiphy_dbg(wiphy, "info: setting interface type to adhoc\n");
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ break;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ wiphy_warn(wiphy, "%s: kept type as IBSS\n", dev->name);
+ case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */
+ return 0;
+ case NL80211_IFTYPE_AP:
+ default:
+ wiphy_err(wiphy, "%s: changing to %d not supported\n",
+ dev->name, type);
+ return -EOPNOTSUPP;
+ }
break;
case NL80211_IFTYPE_STATION:
- dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
- wiphy_dbg(wiphy, "info: setting interface type to managed\n");
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ break;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name);
+ case NL80211_IFTYPE_STATION: /* This shouldn't happen */
+ return 0;
+ case NL80211_IFTYPE_AP:
+ default:
+ wiphy_err(wiphy, "%s: changing to %d not supported\n",
+ dev->name, type);
+ return -EOPNOTSUPP;
+ }
+ break;
+ case NL80211_IFTYPE_AP:
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ wiphy_warn(wiphy, "%s: kept type as AP\n", dev->name);
+ case NL80211_IFTYPE_AP: /* This shouldn't happen */
+ return 0;
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ default:
+ wiphy_err(wiphy, "%s: changing to %d not supported\n",
+ dev->name, type);
+ return -EOPNOTSUPP;
+ }
break;
- case NL80211_IFTYPE_UNSPECIFIED:
- dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
- wiphy_dbg(wiphy, "info: setting interface type to auto\n");
- return 0;
default:
- wiphy_err(wiphy, "unknown interface type: %d\n", type);
- return -EINVAL;
+ wiphy_err(wiphy, "%s: unknown iftype: %d\n",
+ dev->name, dev->ieee80211_ptr->iftype);
+ return -EOPNOTSUPP;
}
+ dev->ieee80211_ptr->iftype = type;
+ priv->bss_mode = type;
mwifiex_deauthenticate(priv, NULL);
priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM;
@@ -804,6 +896,90 @@ static int mwifiex_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
return 0;
}
+/* cfg80211 operation handler for stop ap.
+ * Function stops BSS running at uAP interface.
+ */
+static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (mwifiex_del_mgmt_ies(priv))
+ wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
+
+ if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL)) {
+ wiphy_err(wiphy, "Failed to stop the BSS\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* cfg80211 operation handler for start_ap.
+ * Function sets beacon period, DTIM period, SSID and security into
+ * AP config structure.
+ * AP is configured with these settings and BSS is started.
+ */
+static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_ap_settings *params)
+{
+ struct mwifiex_uap_bss_param *bss_cfg;
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP)
+ return -1;
+ if (mwifiex_set_mgmt_ies(priv, params))
+ return -1;
+
+ bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param), GFP_KERNEL);
+ if (!bss_cfg)
+ return -ENOMEM;
+
+ mwifiex_set_sys_config_invalid_data(bss_cfg);
+
+ if (params->beacon_interval)
+ bss_cfg->beacon_period = params->beacon_interval;
+ if (params->dtim_period)
+ bss_cfg->dtim_period = params->dtim_period;
+
+ if (params->ssid && params->ssid_len) {
+ memcpy(bss_cfg->ssid.ssid, params->ssid, params->ssid_len);
+ bss_cfg->ssid.ssid_len = params->ssid_len;
+ }
+
+ if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
+ kfree(bss_cfg);
+ wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
+ return -1;
+ }
+
+ if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL)) {
+ wiphy_err(wiphy, "Failed to stop the BSS\n");
+ kfree(bss_cfg);
+ return -1;
+ }
+
+ if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+ HostCmd_ACT_GEN_SET,
+ UAP_BSS_PARAMS_I, bss_cfg)) {
+ wiphy_err(wiphy, "Failed to set the SSID\n");
+ kfree(bss_cfg);
+ return -1;
+ }
+
+ kfree(bss_cfg);
+
+ if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_BSS_START,
+ HostCmd_ACT_GEN_SET, 0, NULL)) {
+ wiphy_err(wiphy, "Failed to start the BSS\n");
+ return -1;
+ }
+
+ return 0;
+}
+
/*
* CFG802.11 operation handler for disconnection request.
*
@@ -923,7 +1099,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
priv->wep_key_curr_index = 0;
priv->sec_info.encryption_mode = 0;
priv->sec_info.is_authtype_auto = 0;
- ret = mwifiex_set_encode(priv, NULL, 0, 0, 1);
+ ret = mwifiex_set_encode(priv, NULL, 0, 0, NULL, 1);
if (mode == NL80211_IFTYPE_ADHOC) {
/* "privacy" is set only for ad-hoc mode */
@@ -971,7 +1147,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
" with key len %d\n", sme->key_len);
priv->wep_key_curr_index = sme->key_idx;
ret = mwifiex_set_encode(priv, sme->key, sme->key_len,
- sme->key_idx, 0);
+ sme->key_idx, NULL, 0);
}
}
done:
@@ -1050,6 +1226,11 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
goto done;
}
+ if (priv->bss_mode == NL80211_IFTYPE_AP) {
+ wiphy_err(wiphy, "skip association request for AP interface\n");
+ goto done;
+ }
+
wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
(char *) sme->ssid, sme->bssid);
@@ -1283,15 +1464,12 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
u32 *flags,
struct vif_params *params)
{
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
- struct mwifiex_adapter *adapter;
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_private *priv;
struct net_device *dev;
void *mdev_priv;
+ struct wireless_dev *wdev;
- if (!priv)
- return NULL;
-
- adapter = priv->adapter;
if (!adapter)
return NULL;
@@ -1299,12 +1477,21 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
+ priv = adapter->priv[MWIFIEX_BSS_TYPE_STA];
if (priv->bss_mode) {
- wiphy_err(wiphy, "cannot create multiple"
- " station/adhoc interfaces\n");
+ wiphy_err(wiphy,
+ "cannot create multiple sta/adhoc ifaces\n");
return NULL;
}
+ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!wdev)
+ return NULL;
+
+ wdev->wiphy = wiphy;
+ priv->wdev = wdev;
+ wdev->iftype = NL80211_IFTYPE_STATION;
+
if (type == NL80211_IFTYPE_UNSPECIFIED)
priv->bss_mode = NL80211_IFTYPE_STATION;
else
@@ -1312,11 +1499,36 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->bss_type = MWIFIEX_BSS_TYPE_STA;
priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
- priv->bss_priority = 0;
+ priv->bss_priority = MWIFIEX_BSS_ROLE_STA;
priv->bss_role = MWIFIEX_BSS_ROLE_STA;
priv->bss_num = 0;
break;
+ case NL80211_IFTYPE_AP:
+ priv = adapter->priv[MWIFIEX_BSS_TYPE_UAP];
+
+ if (priv->bss_mode) {
+ wiphy_err(wiphy, "Can't create multiple AP interfaces");
+ return NULL;
+ }
+
+ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!wdev)
+ return NULL;
+
+ priv->wdev = wdev;
+ wdev->wiphy = wiphy;
+ wdev->iftype = NL80211_IFTYPE_AP;
+
+ priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
+ priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
+ priv->bss_priority = MWIFIEX_BSS_ROLE_UAP;
+ priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
+ priv->bss_started = 0;
+ priv->bss_num = 0;
+ priv->bss_mode = type;
+
+ break;
default:
wiphy_err(wiphy, "type not supported\n");
return NULL;
@@ -1329,6 +1541,15 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
goto error;
}
+ mwifiex_init_priv_params(priv, dev);
+ priv->netdev = dev;
+
+ mwifiex_setup_ht_caps(&wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap, priv);
+
+ if (adapter->config_bands & BAND_A)
+ mwifiex_setup_ht_caps(
+ &wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap, priv);
+
dev_net_set(dev, wiphy_net(wiphy));
dev->ieee80211_ptr = priv->wdev;
dev->ieee80211_ptr->iftype = priv->bss_mode;
@@ -1343,9 +1564,6 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
mdev_priv = netdev_priv(dev);
*((unsigned long *) mdev_priv) = (unsigned long) priv;
- priv->netdev = dev;
- mwifiex_init_priv_params(priv, dev);
-
SET_NETDEV_DEV(dev, adapter->dev);
/* Register network device */
@@ -1417,7 +1635,6 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.get_station = mwifiex_cfg80211_get_station,
.dump_station = mwifiex_cfg80211_dump_station,
.set_wiphy_params = mwifiex_cfg80211_set_wiphy_params,
- .set_channel = mwifiex_cfg80211_set_channel,
.join_ibss = mwifiex_cfg80211_join_ibss,
.leave_ibss = mwifiex_cfg80211_leave_ibss,
.add_key = mwifiex_cfg80211_add_key,
@@ -1426,6 +1643,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.set_power_mgmt = mwifiex_cfg80211_set_power_mgmt,
.set_tx_power = mwifiex_cfg80211_set_tx_power,
.set_bitrate_mask = mwifiex_cfg80211_set_bitrate_mask,
+ .start_ap = mwifiex_cfg80211_start_ap,
+ .stop_ap = mwifiex_cfg80211_stop_ap,
.set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
};
@@ -1436,82 +1655,67 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
* default parameters and handler function pointers, and finally
* registers the device.
*/
-int mwifiex_register_cfg80211(struct mwifiex_private *priv)
+
+int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
{
int ret;
void *wdev_priv;
- struct wireless_dev *wdev;
- struct ieee80211_sta_ht_cap *ht_info;
+ struct wiphy *wiphy;
+ struct mwifiex_private *priv = adapter->priv[MWIFIEX_BSS_TYPE_STA];
u8 *country_code;
- wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
- if (!wdev) {
- dev_err(priv->adapter->dev, "%s: allocating wireless device\n",
- __func__);
- return -ENOMEM;
- }
- wdev->wiphy =
- wiphy_new(&mwifiex_cfg80211_ops,
- sizeof(struct mwifiex_private *));
- if (!wdev->wiphy) {
- kfree(wdev);
+ /* create a new wiphy for use with cfg80211 */
+ wiphy = wiphy_new(&mwifiex_cfg80211_ops,
+ sizeof(struct mwifiex_adapter *));
+ if (!wiphy) {
+ dev_err(adapter->dev, "%s: creating new wiphy\n", __func__);
return -ENOMEM;
}
- wdev->iftype = NL80211_IFTYPE_STATION;
- wdev->wiphy->max_scan_ssids = 10;
- wdev->wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
- wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
-
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
- ht_info = &wdev->wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap;
- mwifiex_setup_ht_caps(ht_info, priv);
-
- if (priv->adapter->config_bands & BAND_A) {
- wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &mwifiex_band_5ghz;
- ht_info = &wdev->wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap;
- mwifiex_setup_ht_caps(ht_info, priv);
- } else {
- wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
- }
+ wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
+ wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
+
+ wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
+ if (adapter->config_bands & BAND_A)
+ wiphy->bands[IEEE80211_BAND_5GHZ] = &mwifiex_band_5ghz;
+ else
+ wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+
+ wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta;
+ wiphy->n_iface_combinations = 1;
/* Initialize cipher suits */
- wdev->wiphy->cipher_suites = mwifiex_cipher_suites;
- wdev->wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites);
+ wiphy->cipher_suites = mwifiex_cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites);
- memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
- wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_CUSTOM_REGULATORY;
/* Reserve space for mwifiex specific private data for BSS */
- wdev->wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
-
- wdev->wiphy->reg_notifier = mwifiex_reg_notifier;
+ wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
- /* Set struct mwifiex_private pointer in wiphy_priv */
- wdev_priv = wiphy_priv(wdev->wiphy);
+ wiphy->reg_notifier = mwifiex_reg_notifier;
- *(unsigned long *) wdev_priv = (unsigned long) priv;
+ /* Set struct mwifiex_adapter pointer in wiphy_priv */
+ wdev_priv = wiphy_priv(wiphy);
+ *(unsigned long *)wdev_priv = (unsigned long)adapter;
- set_wiphy_dev(wdev->wiphy, (struct device *) priv->adapter->dev);
+ set_wiphy_dev(wiphy, (struct device *)priv->adapter->dev);
- ret = wiphy_register(wdev->wiphy);
+ ret = wiphy_register(wiphy);
if (ret < 0) {
- dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n",
- __func__);
- wiphy_free(wdev->wiphy);
- kfree(wdev);
+ dev_err(adapter->dev,
+ "%s: wiphy_register failed: %d\n", __func__, ret);
+ wiphy_free(wiphy);
return ret;
- } else {
- dev_dbg(priv->adapter->dev,
- "info: successfully registered wiphy device\n");
}
-
country_code = mwifiex_11d_code_2_region(priv->adapter->region_code);
- if (country_code && regulatory_hint(wdev->wiphy, country_code))
- dev_err(priv->adapter->dev,
- "%s: regulatory_hint failed\n", __func__);
-
- priv->wdev = wdev;
+ if (country_code && regulatory_hint(wiphy, country_code))
+ dev_err(adapter->dev, "regulatory_hint() failed\n");
+ adapter->wiphy = wiphy;
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/cfg80211.h b/drivers/net/wireless/mwifiex/cfg80211.h
index 76c76c60438b..c5848934f111 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.h
+++ b/drivers/net/wireless/mwifiex/cfg80211.h
@@ -24,6 +24,6 @@
#include "main.h"
-int mwifiex_register_cfg80211(struct mwifiex_private *);
+int mwifiex_register_cfg80211(struct mwifiex_adapter *);
#endif
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 1710beffb93a..51e023ec1de4 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -440,6 +440,11 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
do_gettimeofday(&tstamp);
dev_dbg(adapter->dev, "event: %lu.%lu: cause: %#x\n",
tstamp.tv_sec, tstamp.tv_usec, eventcause);
+ } else {
+ /* Handle PS_SLEEP/AWAKE events on STA */
+ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+ if (!priv)
+ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
}
ret = mwifiex_process_sta_event(priv);
@@ -540,8 +545,20 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
/* Prepare command */
if (cmd_no) {
- ret = mwifiex_sta_prepare_cmd(priv, cmd_no, cmd_action,
- cmd_oid, data_buf, cmd_ptr);
+ switch (cmd_no) {
+ case HostCmd_CMD_UAP_SYS_CONFIG:
+ case HostCmd_CMD_UAP_BSS_START:
+ case HostCmd_CMD_UAP_BSS_STOP:
+ ret = mwifiex_uap_prepare_cmd(priv, cmd_no, cmd_action,
+ cmd_oid, data_buf,
+ cmd_ptr);
+ break;
+ default:
+ ret = mwifiex_sta_prepare_cmd(priv, cmd_no, cmd_action,
+ cmd_oid, data_buf,
+ cmd_ptr);
+ break;
+ }
} else {
ret = mwifiex_cmd_host_cmd(priv, cmd_ptr, data_buf);
cmd_node->cmd_flag |= CMD_F_HOSTCMD;
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index d04aba4131dc..f918f66e5e27 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -28,7 +28,7 @@
#include <linux/ieee80211.h>
-#define MWIFIEX_MAX_BSS_NUM (1)
+#define MWIFIEX_MAX_BSS_NUM (2)
#define MWIFIEX_MIN_DATA_HEADER_LEN 36 /* sizeof(mwifiex_txpd)
* + 4 byte alignment
@@ -55,11 +55,17 @@
#define MWIFIEX_RX_DATA_BUF_SIZE (4 * 1024)
#define MWIFIEX_RX_CMD_BUF_SIZE (2 * 1024)
+#define MAX_BEACON_PERIOD (4000)
+#define MIN_BEACON_PERIOD (50)
+#define MAX_DTIM_PERIOD (100)
+#define MIN_DTIM_PERIOD (1)
+
#define MWIFIEX_RTS_MIN_VALUE (0)
#define MWIFIEX_RTS_MAX_VALUE (2347)
#define MWIFIEX_FRAG_MIN_VALUE (256)
#define MWIFIEX_FRAG_MAX_VALUE (2346)
+#define MWIFIEX_RETRY_LIMIT 14
#define MWIFIEX_SDIO_BLOCK_SIZE 256
#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
@@ -92,6 +98,11 @@ struct mwifiex_fw_image {
u32 fw_len;
};
+struct mwifiex_802_11_ssid {
+ u32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+};
+
struct mwifiex_wait_queue {
wait_queue_head_t wait;
int status;
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 5f6adeb9b950..9f674bbebe65 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -93,6 +93,20 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define CAL_SNR(RSSI, NF) ((s16)((s16)(RSSI)-(s16)(NF)))
+#define UAP_BSS_PARAMS_I 0
+#define UAP_CUSTOM_IE_I 1
+#define MWIFIEX_AUTO_IDX_MASK 0xffff
+#define MWIFIEX_DELETE_MASK 0x0000
+#define MGMT_MASK_ASSOC_REQ 0x01
+#define MGMT_MASK_REASSOC_REQ 0x04
+#define MGMT_MASK_ASSOC_RESP 0x02
+#define MGMT_MASK_REASSOC_RESP 0x08
+#define MGMT_MASK_PROBE_REQ 0x10
+#define MGMT_MASK_PROBE_RESP 0x20
+#define MGMT_MASK_BEACON 0x100
+
+#define TLV_TYPE_UAP_SSID 0x0000
+
#define PROPRIETARY_TLV_BASE_ID 0x0100
#define TLV_TYPE_KEY_MATERIAL (PROPRIETARY_TLV_BASE_ID + 0)
#define TLV_TYPE_CHANLIST (PROPRIETARY_TLV_BASE_ID + 1)
@@ -104,14 +118,26 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19)
#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22)
#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31)
+#define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32)
#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
+#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44)
+#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
+#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51)
+#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
+#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
+#define TLV_TYPE_UAP_AKMP (PROPRIETARY_TLV_BASE_ID + 65)
+#define TLV_TYPE_UAP_FRAG_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 70)
#define TLV_TYPE_RATE_DROP_CONTROL (PROPRIETARY_TLV_BASE_ID + 82)
#define TLV_TYPE_RATE_SCOPE (PROPRIETARY_TLV_BASE_ID + 83)
#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84)
+#define TLV_TYPE_UAP_RETRY_LIMIT (PROPRIETARY_TLV_BASE_ID + 93)
#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94)
+#define TLV_TYPE_UAP_MGMT_FRAME (PROPRIETARY_TLV_BASE_ID + 104)
#define TLV_TYPE_MGMT_IE (PROPRIETARY_TLV_BASE_ID + 105)
#define TLV_TYPE_AUTO_DS_PARAM (PROPRIETARY_TLV_BASE_ID + 113)
#define TLV_TYPE_PS_PARAM (PROPRIETARY_TLV_BASE_ID + 114)
+#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
+#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -209,6 +235,9 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_RSSI_INFO 0x00a4
#define HostCmd_CMD_FUNC_INIT 0x00a9
#define HostCmd_CMD_FUNC_SHUTDOWN 0x00aa
+#define HostCmd_CMD_UAP_SYS_CONFIG 0x00b0
+#define HostCmd_CMD_UAP_BSS_START 0x00b1
+#define HostCmd_CMD_UAP_BSS_STOP 0x00b2
#define HostCmd_CMD_11N_CFG 0x00cd
#define HostCmd_CMD_11N_ADDBA_REQ 0x00ce
#define HostCmd_CMD_11N_ADDBA_RSP 0x00cf
@@ -223,6 +252,19 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_SET_BSS_MODE 0x00f7
#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa
+#define PROTOCOL_NO_SECURITY 0x01
+#define PROTOCOL_STATIC_WEP 0x02
+#define PROTOCOL_WPA 0x08
+#define PROTOCOL_WPA2 0x20
+#define PROTOCOL_WPA2_MIXED 0x28
+#define PROTOCOL_EAP 0x40
+#define KEY_MGMT_NONE 0x04
+#define KEY_MGMT_PSK 0x02
+#define KEY_MGMT_EAP 0x01
+#define CIPHER_TKIP 0x04
+#define CIPHER_AES_CCMP 0x08
+#define VALID_CIPHER_BITMAP 0x0c
+
enum ENH_PS_MODES {
EN_PS = 1,
DIS_PS = 2,
@@ -313,15 +355,20 @@ enum ENH_PS_MODES {
#define EVENT_DATA_SNR_HIGH 0x00000027
#define EVENT_LINK_QUALITY 0x00000028
#define EVENT_PORT_RELEASE 0x0000002b
+#define EVENT_UAP_STA_DEAUTH 0x0000002c
+#define EVENT_UAP_STA_ASSOC 0x0000002d
+#define EVENT_UAP_BSS_START 0x0000002e
#define EVENT_PRE_BEACON_LOST 0x00000031
#define EVENT_ADDBA 0x00000033
#define EVENT_DELBA 0x00000034
#define EVENT_BA_STREAM_TIEMOUT 0x00000037
#define EVENT_AMSDU_AGGR_CTRL 0x00000042
+#define EVENT_UAP_BSS_IDLE 0x00000043
+#define EVENT_UAP_BSS_ACTIVE 0x00000044
#define EVENT_WEP_ICV_ERR 0x00000046
#define EVENT_HS_ACT_REQ 0x00000047
#define EVENT_BW_CHANGE 0x00000048
-
+#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c
#define EVENT_HOSTWAKE_STAIE 0x0000004d
#define EVENT_ID_MASK 0xffff
@@ -1103,6 +1150,101 @@ struct host_cmd_ds_802_11_eeprom_access {
u8 value;
} __packed;
+struct host_cmd_tlv {
+ __le16 type;
+ __le16 len;
+} __packed;
+
+struct mwifiex_assoc_event {
+ u8 sta_addr[ETH_ALEN];
+ __le16 type;
+ __le16 len;
+ __le16 frame_control;
+ __le16 cap_info;
+ __le16 listen_interval;
+ u8 data[0];
+} __packed;
+
+struct host_cmd_ds_sys_config {
+ __le16 action;
+ u8 tlv[0];
+};
+
+struct host_cmd_tlv_akmp {
+ struct host_cmd_tlv tlv;
+ __le16 key_mgmt;
+ __le16 key_mgmt_operation;
+} __packed;
+
+struct host_cmd_tlv_pwk_cipher {
+ struct host_cmd_tlv tlv;
+ __le16 proto;
+ u8 cipher;
+ u8 reserved;
+} __packed;
+
+struct host_cmd_tlv_gwk_cipher {
+ struct host_cmd_tlv tlv;
+ u8 cipher;
+ u8 reserved;
+} __packed;
+
+struct host_cmd_tlv_passphrase {
+ struct host_cmd_tlv tlv;
+ u8 passphrase[0];
+} __packed;
+
+struct host_cmd_tlv_auth_type {
+ struct host_cmd_tlv tlv;
+ u8 auth_type;
+} __packed;
+
+struct host_cmd_tlv_encrypt_protocol {
+ struct host_cmd_tlv tlv;
+ __le16 proto;
+} __packed;
+
+struct host_cmd_tlv_ssid {
+ struct host_cmd_tlv tlv;
+ u8 ssid[0];
+} __packed;
+
+struct host_cmd_tlv_beacon_period {
+ struct host_cmd_tlv tlv;
+ __le16 period;
+} __packed;
+
+struct host_cmd_tlv_dtim_period {
+ struct host_cmd_tlv tlv;
+ u8 period;
+} __packed;
+
+struct host_cmd_tlv_frag_threshold {
+ struct host_cmd_tlv tlv;
+ __le16 frag_thr;
+} __packed;
+
+struct host_cmd_tlv_rts_threshold {
+ struct host_cmd_tlv tlv;
+ __le16 rts_thr;
+} __packed;
+
+struct host_cmd_tlv_retry_limit {
+ struct host_cmd_tlv tlv;
+ u8 limit;
+} __packed;
+
+struct host_cmd_tlv_mac_addr {
+ struct host_cmd_tlv tlv;
+ u8 mac_addr[ETH_ALEN];
+} __packed;
+
+struct host_cmd_tlv_channel_band {
+ struct host_cmd_tlv tlv;
+ u8 band_config;
+ u8 channel;
+} __packed;
+
struct host_cmd_ds_802_11_rf_channel {
__le16 action;
__le16 current_channel;
@@ -1167,6 +1309,20 @@ struct host_cmd_ds_802_11_subsc_evt {
__le16 events;
} __packed;
+struct mwifiex_ie {
+ __le16 ie_index;
+ __le16 mgmt_subtype_mask;
+ __le16 ie_length;
+ u8 ie_buffer[IEEE_MAX_IE_SIZE];
+} __packed;
+
+#define MAX_MGMT_IE_INDEX 16
+struct mwifiex_ie_list {
+ __le16 type;
+ __le16 len;
+ struct mwifiex_ie ie_list[MAX_MGMT_IE_INDEX];
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -1217,6 +1373,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_pcie_details pcie_host_spec;
struct host_cmd_ds_802_11_eeprom_access eeprom;
struct host_cmd_ds_802_11_subsc_evt subsc_evt;
+ struct host_cmd_ds_sys_config uap_sys_config;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
new file mode 100644
index 000000000000..ceb82cd749cc
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -0,0 +1,396 @@
+/*
+ * Marvell Wireless LAN device driver: management IE handling- setting and
+ * deleting IE.
+ *
+ * Copyright (C) 2012, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "main.h"
+
+/* This function checks if current IE index is used by any on other interface.
+ * Return: -1: yes, current IE index is used by someone else.
+ * 0: no, current IE index is NOT used by other interface.
+ */
+static int
+mwifiex_ie_index_used_by_other_intf(struct mwifiex_private *priv, u16 idx)
+{
+ int i;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_ie *ie;
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ if (adapter->priv[i] != priv) {
+ ie = &adapter->priv[i]->mgmt_ie[idx];
+ if (ie->mgmt_subtype_mask && ie->ie_length)
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/* Get unused IE index. This index will be used for setting new IE */
+static int
+mwifiex_ie_get_autoidx(struct mwifiex_private *priv, u16 subtype_mask,
+ struct mwifiex_ie *ie, u16 *index)
+{
+ u16 mask, len, i;
+
+ for (i = 0; i < priv->adapter->max_mgmt_ie_index; i++) {
+ mask = le16_to_cpu(priv->mgmt_ie[i].mgmt_subtype_mask);
+ len = le16_to_cpu(priv->mgmt_ie[i].ie_length) +
+ le16_to_cpu(ie->ie_length);
+
+ if (mask == MWIFIEX_AUTO_IDX_MASK)
+ continue;
+
+ if (mask == subtype_mask) {
+ if (len > IEEE_MAX_IE_SIZE)
+ continue;
+
+ *index = i;
+ return 0;
+ }
+
+ if (!priv->mgmt_ie[i].ie_length) {
+ if (mwifiex_ie_index_used_by_other_intf(priv, i))
+ continue;
+
+ *index = i;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+/* This function prepares IE data buffer for command to be sent to FW */
+static int
+mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
+ struct mwifiex_ie_list *ie_list)
+{
+ u16 travel_len, index, mask;
+ s16 input_len;
+ struct mwifiex_ie *ie;
+ u8 *tmp;
+
+ input_len = le16_to_cpu(ie_list->len);
+ travel_len = sizeof(struct host_cmd_tlv);
+
+ ie_list->len = 0;
+
+ while (input_len > 0) {
+ ie = (struct mwifiex_ie *)(((u8 *)ie_list) + travel_len);
+ input_len -= le16_to_cpu(ie->ie_length) + MWIFIEX_IE_HDR_SIZE;
+ travel_len += le16_to_cpu(ie->ie_length) + MWIFIEX_IE_HDR_SIZE;
+
+ index = le16_to_cpu(ie->ie_index);
+ mask = le16_to_cpu(ie->mgmt_subtype_mask);
+
+ if (index == MWIFIEX_AUTO_IDX_MASK) {
+ /* automatic addition */
+ if (mwifiex_ie_get_autoidx(priv, mask, ie, &index))
+ return -1;
+ if (index == MWIFIEX_AUTO_IDX_MASK)
+ return -1;
+
+ tmp = (u8 *)&priv->mgmt_ie[index].ie_buffer;
+ tmp += le16_to_cpu(priv->mgmt_ie[index].ie_length);
+ memcpy(tmp, &ie->ie_buffer, le16_to_cpu(ie->ie_length));
+ le16_add_cpu(&priv->mgmt_ie[index].ie_length,
+ le16_to_cpu(ie->ie_length));
+ priv->mgmt_ie[index].ie_index = cpu_to_le16(index);
+ priv->mgmt_ie[index].mgmt_subtype_mask =
+ cpu_to_le16(mask);
+
+ ie->ie_index = cpu_to_le16(index);
+ ie->ie_length = priv->mgmt_ie[index].ie_length;
+ memcpy(&ie->ie_buffer, &priv->mgmt_ie[index].ie_buffer,
+ le16_to_cpu(priv->mgmt_ie[index].ie_length));
+ } else {
+ if (mask != MWIFIEX_DELETE_MASK)
+ return -1;
+ /*
+ * Check if this index is being used on any
+ * other interface.
+ */
+ if (mwifiex_ie_index_used_by_other_intf(priv, index))
+ return -1;
+
+ ie->ie_length = 0;
+ memcpy(&priv->mgmt_ie[index], ie,
+ sizeof(struct mwifiex_ie));
+ }
+
+ le16_add_cpu(&ie_list->len,
+ le16_to_cpu(priv->mgmt_ie[index].ie_length) +
+ MWIFIEX_IE_HDR_SIZE);
+ }
+
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
+ return mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+ HostCmd_ACT_GEN_SET,
+ UAP_CUSTOM_IE_I, ie_list);
+
+ return 0;
+}
+
+/* Copy individual custom IEs for beacon, probe response and assoc response
+ * and prepare single structure for IE setting.
+ * This function also updates allocated IE indices from driver.
+ */
+static int
+mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
+ struct mwifiex_ie *beacon_ie, u16 *beacon_idx,
+ struct mwifiex_ie *pr_ie, u16 *probe_idx,
+ struct mwifiex_ie *ar_ie, u16 *assoc_idx)
+{
+ struct mwifiex_ie_list *ap_custom_ie;
+ u8 *pos;
+ u16 len;
+ int ret;
+
+ ap_custom_ie = kzalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!ap_custom_ie)
+ return -ENOMEM;
+
+ ap_custom_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE);
+ pos = (u8 *)ap_custom_ie->ie_list;
+
+ if (beacon_ie) {
+ len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
+ le16_to_cpu(beacon_ie->ie_length);
+ memcpy(pos, beacon_ie, len);
+ pos += len;
+ le16_add_cpu(&ap_custom_ie->len, len);
+ }
+ if (pr_ie) {
+ len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
+ le16_to_cpu(pr_ie->ie_length);
+ memcpy(pos, pr_ie, len);
+ pos += len;
+ le16_add_cpu(&ap_custom_ie->len, len);
+ }
+ if (ar_ie) {
+ len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
+ le16_to_cpu(ar_ie->ie_length);
+ memcpy(pos, ar_ie, len);
+ pos += len;
+ le16_add_cpu(&ap_custom_ie->len, len);
+ }
+
+ ret = mwifiex_update_autoindex_ies(priv, ap_custom_ie);
+
+ pos = (u8 *)(&ap_custom_ie->ie_list[0].ie_index);
+ if (beacon_ie && *beacon_idx == MWIFIEX_AUTO_IDX_MASK) {
+ /* save beacon ie index after auto-indexing */
+ *beacon_idx = le16_to_cpu(ap_custom_ie->ie_list[0].ie_index);
+ len = sizeof(*beacon_ie) - IEEE_MAX_IE_SIZE +
+ le16_to_cpu(beacon_ie->ie_length);
+ pos += len;
+ }
+ if (pr_ie && le16_to_cpu(pr_ie->ie_index) == MWIFIEX_AUTO_IDX_MASK) {
+ /* save probe resp ie index after auto-indexing */
+ *probe_idx = *((u16 *)pos);
+ len = sizeof(*pr_ie) - IEEE_MAX_IE_SIZE +
+ le16_to_cpu(pr_ie->ie_length);
+ pos += len;
+ }
+ if (ar_ie && le16_to_cpu(ar_ie->ie_index) == MWIFIEX_AUTO_IDX_MASK)
+ /* save assoc resp ie index after auto-indexing */
+ *assoc_idx = *((u16 *)pos);
+
+ return ret;
+}
+
+/* This function parses different IEs- Tail IEs, beacon IEs, probe response IEs,
+ * association response IEs from cfg80211_ap_settings function and sets these IE
+ * to FW.
+ */
+int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
+ struct cfg80211_ap_settings *params)
+{
+ struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL;
+ struct mwifiex_ie *ar_ie = NULL, *rsn_ie = NULL;
+ struct ieee_types_header *ie = NULL;
+ u16 beacon_idx = MWIFIEX_AUTO_IDX_MASK, pr_idx = MWIFIEX_AUTO_IDX_MASK;
+ u16 ar_idx = MWIFIEX_AUTO_IDX_MASK, rsn_idx = MWIFIEX_AUTO_IDX_MASK;
+ u16 mask;
+ int ret = 0;
+
+ if (params->beacon.tail && params->beacon.tail_len) {
+ ie = (void *)cfg80211_find_ie(WLAN_EID_RSN, params->beacon.tail,
+ params->beacon.tail_len);
+ if (ie) {
+ rsn_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!rsn_ie)
+ return -ENOMEM;
+
+ rsn_ie->ie_index = cpu_to_le16(rsn_idx);
+ mask = MGMT_MASK_BEACON | MGMT_MASK_PROBE_RESP |
+ MGMT_MASK_ASSOC_RESP;
+ rsn_ie->mgmt_subtype_mask = cpu_to_le16(mask);
+ rsn_ie->ie_length = cpu_to_le16(ie->len + 2);
+ memcpy(rsn_ie->ie_buffer, ie, ie->len + 2);
+
+ if (mwifiex_update_uap_custom_ie(priv, rsn_ie, &rsn_idx,
+ NULL, NULL,
+ NULL, NULL)) {
+ ret = -1;
+ goto done;
+ }
+
+ priv->rsn_idx = rsn_idx;
+ }
+ }
+
+ if (params->beacon.beacon_ies && params->beacon.beacon_ies_len) {
+ beacon_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!beacon_ie) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ beacon_ie->ie_index = cpu_to_le16(beacon_idx);
+ beacon_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON);
+ beacon_ie->ie_length =
+ cpu_to_le16(params->beacon.beacon_ies_len);
+ memcpy(beacon_ie->ie_buffer, params->beacon.beacon_ies,
+ params->beacon.beacon_ies_len);
+ }
+
+ if (params->beacon.proberesp_ies && params->beacon.proberesp_ies_len) {
+ pr_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!pr_ie) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ pr_ie->ie_index = cpu_to_le16(pr_idx);
+ pr_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_PROBE_RESP);
+ pr_ie->ie_length =
+ cpu_to_le16(params->beacon.proberesp_ies_len);
+ memcpy(pr_ie->ie_buffer, params->beacon.proberesp_ies,
+ params->beacon.proberesp_ies_len);
+ }
+
+ if (params->beacon.assocresp_ies && params->beacon.assocresp_ies_len) {
+ ar_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!ar_ie) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ ar_ie->ie_index = cpu_to_le16(ar_idx);
+ mask = MGMT_MASK_ASSOC_RESP | MGMT_MASK_REASSOC_RESP;
+ ar_ie->mgmt_subtype_mask = cpu_to_le16(mask);
+ ar_ie->ie_length =
+ cpu_to_le16(params->beacon.assocresp_ies_len);
+ memcpy(ar_ie->ie_buffer, params->beacon.assocresp_ies,
+ params->beacon.assocresp_ies_len);
+ }
+
+ if (beacon_ie || pr_ie || ar_ie) {
+ ret = mwifiex_update_uap_custom_ie(priv, beacon_ie,
+ &beacon_idx, pr_ie,
+ &pr_idx, ar_ie, &ar_idx);
+ if (ret)
+ goto done;
+ }
+
+ priv->beacon_idx = beacon_idx;
+ priv->proberesp_idx = pr_idx;
+ priv->assocresp_idx = ar_idx;
+
+done:
+ kfree(beacon_ie);
+ kfree(pr_ie);
+ kfree(ar_ie);
+ kfree(rsn_ie);
+
+ return ret;
+}
+
+/* This function removes management IE set */
+int mwifiex_del_mgmt_ies(struct mwifiex_private *priv)
+{
+ struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL;
+ struct mwifiex_ie *ar_ie = NULL, *rsn_ie = NULL;
+ int ret = 0;
+
+ if (priv->rsn_idx != MWIFIEX_AUTO_IDX_MASK) {
+ rsn_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!rsn_ie)
+ return -ENOMEM;
+
+ rsn_ie->ie_index = cpu_to_le16(priv->rsn_idx);
+ rsn_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
+ rsn_ie->ie_length = 0;
+ if (mwifiex_update_uap_custom_ie(priv, rsn_ie, &priv->rsn_idx,
+ NULL, &priv->proberesp_idx,
+ NULL, &priv->assocresp_idx)) {
+ ret = -1;
+ goto done;
+ }
+
+ priv->rsn_idx = MWIFIEX_AUTO_IDX_MASK;
+ }
+
+ if (priv->beacon_idx != MWIFIEX_AUTO_IDX_MASK) {
+ beacon_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!beacon_ie) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ beacon_ie->ie_index = cpu_to_le16(priv->beacon_idx);
+ beacon_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
+ beacon_ie->ie_length = 0;
+ }
+ if (priv->proberesp_idx != MWIFIEX_AUTO_IDX_MASK) {
+ pr_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!pr_ie) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ pr_ie->ie_index = cpu_to_le16(priv->proberesp_idx);
+ pr_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
+ pr_ie->ie_length = 0;
+ }
+ if (priv->assocresp_idx != MWIFIEX_AUTO_IDX_MASK) {
+ ar_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!ar_ie) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ ar_ie->ie_index = cpu_to_le16(priv->assocresp_idx);
+ ar_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
+ ar_ie->ie_length = 0;
+ }
+
+ if (beacon_ie || pr_ie || ar_ie)
+ ret = mwifiex_update_uap_custom_ie(priv,
+ beacon_ie, &priv->beacon_idx,
+ pr_ie, &priv->proberesp_idx,
+ ar_ie, &priv->assocresp_idx);
+
+done:
+ kfree(beacon_ie);
+ kfree(pr_ie);
+ kfree(ar_ie);
+ kfree(rsn_ie);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index d440c3eb640b..c1cb004db913 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -279,6 +279,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
adapter->arp_filter_size = 0;
adapter->channel_type = NL80211_CHAN_HT20;
+ adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
}
/*
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index f0f95524e96b..e6be6ee75951 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -62,6 +62,36 @@ enum {
BAND_AN = 16,
};
+#define MWIFIEX_WPA_PASSHPHRASE_LEN 64
+struct wpa_param {
+ u8 pairwise_cipher_wpa;
+ u8 pairwise_cipher_wpa2;
+ u8 group_cipher;
+ u32 length;
+ u8 passphrase[MWIFIEX_WPA_PASSHPHRASE_LEN];
+};
+
+#define KEY_MGMT_ON_HOST 0x03
+#define MWIFIEX_AUTH_MODE_AUTO 0xFF
+#define BAND_CONFIG_MANUAL 0x00
+struct mwifiex_uap_bss_param {
+ u8 channel;
+ u8 band_cfg;
+ u16 rts_threshold;
+ u16 frag_threshold;
+ u8 retry_limit;
+ struct mwifiex_802_11_ssid ssid;
+ u8 bcast_ssid_ctl;
+ u8 radio_ctl;
+ u8 dtim_period;
+ u16 beacon_period;
+ u16 auth_mode;
+ u16 protocol;
+ u16 key_mgmt;
+ u16 key_mgmt_operation;
+ struct wpa_param wpa_cfg;
+};
+
enum {
ADHOC_IDLE,
ADHOC_STARTED,
@@ -269,6 +299,8 @@ struct mwifiex_ds_read_eeprom {
#define IEEE_MAX_IE_SIZE 256
+#define MWIFIEX_IE_HDR_SIZE (sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE)
+
struct mwifiex_ds_misc_gen_ie {
u32 type;
u32 len;
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 8a390982463e..d6b4fb04011f 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1374,22 +1374,28 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
*
* In case of infra made, it sends deauthentication request, and
* in case of ad-hoc mode, a stop network request is sent to the firmware.
+ * In AP mode, a command to stop bss is sent to firmware.
*/
int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
{
- int ret = 0;
+ if (!priv->media_connected)
+ return 0;
- if (priv->media_connected) {
- if (priv->bss_mode == NL80211_IFTYPE_STATION) {
- ret = mwifiex_deauthenticate_infra(priv, mac);
- } else if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
- ret = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_AD_HOC_STOP,
- HostCmd_ACT_GEN_SET, 0, NULL);
- }
+ switch (priv->bss_mode) {
+ case NL80211_IFTYPE_STATION:
+ return mwifiex_deauthenticate_infra(priv, mac);
+ case NL80211_IFTYPE_ADHOC:
+ return mwifiex_send_cmd_sync(priv,
+ HostCmd_CMD_802_11_AD_HOC_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL);
+ case NL80211_IFTYPE_AP:
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL);
+ default:
+ break;
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(mwifiex_deauthenticate);
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index be0f0e583f75..3192855c31c0 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -64,17 +64,17 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
adapter->priv_num = 0;
- /* Allocate memory for private structure */
- adapter->priv[0] = kzalloc(sizeof(struct mwifiex_private), GFP_KERNEL);
- if (!adapter->priv[0]) {
- dev_err(adapter->dev,
- "%s: failed to alloc priv[0]\n", __func__);
- goto error;
- }
-
- adapter->priv_num++;
+ for (i = 0; i < MWIFIEX_MAX_BSS_NUM; i++) {
+ /* Allocate memory for private structure */
+ adapter->priv[i] =
+ kzalloc(sizeof(struct mwifiex_private), GFP_KERNEL);
+ if (!adapter->priv[i])
+ goto error;
- adapter->priv[0]->adapter = adapter;
+ adapter->priv[i]->adapter = adapter;
+ adapter->priv[i]->bss_priority = i;
+ adapter->priv_num++;
+ }
mwifiex_init_lock_list(adapter);
init_timer(&adapter->cmd_timer);
@@ -349,19 +349,26 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
goto done;
- priv = adapter->priv[0];
- if (mwifiex_register_cfg80211(priv) != 0) {
+ priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
+ if (mwifiex_register_cfg80211(adapter)) {
dev_err(adapter->dev, "cannot register with cfg80211\n");
goto err_init_fw;
}
rtnl_lock();
/* Create station interface by default */
- if (!mwifiex_add_virtual_intf(priv->wdev->wiphy, "mlan%d",
+ if (!mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d",
NL80211_IFTYPE_STATION, NULL, NULL)) {
dev_err(adapter->dev, "cannot create default STA interface\n");
goto err_add_intf;
}
+
+ /* Create AP interface by default */
+ if (!mwifiex_add_virtual_intf(adapter->wiphy, "uap%d",
+ NL80211_IFTYPE_AP, NULL, NULL)) {
+ dev_err(adapter->dev, "cannot create default AP interface\n");
+ goto err_add_intf;
+ }
rtnl_unlock();
mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
@@ -369,7 +376,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto done;
err_add_intf:
- mwifiex_del_virtual_intf(priv->wdev->wiphy, priv->netdev);
+ mwifiex_del_virtual_intf(adapter->wiphy, priv->netdev);
rtnl_unlock();
err_init_fw:
pr_debug("info: %s: unregister device\n", __func__);
@@ -633,6 +640,12 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
priv->current_key_index = 0;
priv->media_connected = false;
memset(&priv->nick_name, 0, sizeof(priv->nick_name));
+ memset(priv->mgmt_ie, 0,
+ sizeof(struct mwifiex_ie) * MAX_MGMT_IE_INDEX);
+ priv->beacon_idx = MWIFIEX_AUTO_IDX_MASK;
+ priv->proberesp_idx = MWIFIEX_AUTO_IDX_MASK;
+ priv->assocresp_idx = MWIFIEX_AUTO_IDX_MASK;
+ priv->rsn_idx = MWIFIEX_AUTO_IDX_MASK;
priv->num_tx_timeout = 0;
memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
}
@@ -830,19 +843,21 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
rtnl_lock();
if (priv->wdev && priv->netdev)
- mwifiex_del_virtual_intf(priv->wdev->wiphy,
- priv->netdev);
+ mwifiex_del_virtual_intf(adapter->wiphy, priv->netdev);
rtnl_unlock();
}
priv = adapter->priv[0];
- if (!priv)
+ if (!priv || !priv->wdev)
goto exit_remove;
- if (priv->wdev) {
- wiphy_unregister(priv->wdev->wiphy);
- wiphy_free(priv->wdev->wiphy);
- kfree(priv->wdev);
+ wiphy_unregister(priv->wdev->wiphy);
+ wiphy_free(priv->wdev->wiphy);
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (priv)
+ kfree(priv->wdev);
}
mwifiex_terminate_workqueue(adapter);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 324ad390cacd..bd3b0bf94b9e 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -116,6 +116,7 @@ enum {
#define MAX_FREQUENCY_BAND_BG 2484
#define MWIFIEX_EVENT_HEADER_LEN 4
+#define MWIFIEX_UAP_EVENT_EXTRA_HEADER 2
#define MWIFIEX_TYPE_LEN 4
#define MWIFIEX_USB_TYPE_CMD 0xF00DFACE
@@ -370,6 +371,7 @@ struct mwifiex_private {
u8 bss_role;
u8 bss_priority;
u8 bss_num;
+ u8 bss_started;
u8 frame_type;
u8 curr_addr[ETH_ALEN];
u8 media_connected;
@@ -470,12 +472,16 @@ struct mwifiex_private {
struct cfg80211_scan_request *scan_request;
struct mwifiex_user_scan_cfg *user_scan_cfg;
u8 cfg_bssid[6];
- u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
struct wps wps;
u8 scan_block;
s32 cqm_rssi_thold;
u32 cqm_rssi_hyst;
u8 subsc_evt_rssi_state;
+ struct mwifiex_ie mgmt_ie[MAX_MGMT_IE_INDEX];
+ u16 beacon_idx;
+ u16 proberesp_idx;
+ u16 assocresp_idx;
+ u16 rsn_idx;
};
enum mwifiex_ba_status {
@@ -571,6 +577,7 @@ struct mwifiex_adapter {
char fw_name[32];
int winner;
struct device *dev;
+ struct wiphy *wiphy;
bool surprise_removed;
u32 fw_release_number;
u16 init_wait_q_woken;
@@ -677,6 +684,8 @@ struct mwifiex_adapter {
struct cmd_ctrl_node *cmd_queued;
spinlock_t queue_lock; /* lock for tx queues */
struct completion fw_load;
+ u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
+ u16 max_mgmt_ie_index;
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -760,6 +769,9 @@ int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
int mwifiex_sta_prepare_cmd(struct mwifiex_private *, uint16_t cmd_no,
u16 cmd_action, u32 cmd_oid,
void *data_buf, void *cmd_buf);
+int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
+ u16 cmd_action, u32 cmd_oid,
+ void *data_buf, void *cmd_buf);
int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no,
struct host_cmd_ds_command *resp);
int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *,
@@ -820,6 +832,9 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
int is_command_pending(struct mwifiex_adapter *adapter);
void mwifiex_init_priv_params(struct mwifiex_private *priv,
struct net_device *dev);
+int mwifiex_set_secure_params(struct mwifiex_private *priv,
+ struct mwifiex_uap_bss_param *bss_config,
+ struct cfg80211_ap_settings *params);
/*
* This function checks if the queuing is RA based or not.
@@ -933,7 +948,8 @@ int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
int mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, u16 channel);
int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
- int key_len, u8 key_index, int disable);
+ int key_len, u8 key_index, const u8 *mac_addr,
+ int disable);
int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
@@ -969,6 +985,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
int mwifiex_main_process(struct mwifiex_adapter *);
+int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel);
int mwifiex_bss_set_channel(struct mwifiex_private *,
struct mwifiex_chan_freq_power *cfp);
int mwifiex_get_bss_info(struct mwifiex_private *,
@@ -986,6 +1003,11 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
u32 *flags, struct vif_params *params);
int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev);
+void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config);
+
+int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
+ struct cfg80211_ap_settings *params);
+int mwifiex_del_mgmt_ies(struct mwifiex_private *priv);
u8 *mwifiex_11d_code_2_region(u8 code);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 87ed2a1f6cd9..40e025da6bc2 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -498,7 +498,8 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
{
struct host_cmd_ds_802_11_key_material *key_material =
&cmd->params.key_material;
- u16 key_param_len = 0;
+ struct host_cmd_tlv_mac_addr *tlv_mac;
+ u16 key_param_len = 0, cmd_size;
int ret = 0;
const u8 bc_mac[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
@@ -614,11 +615,26 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
cpu_to_le16((u16) enc_key->key_len +
KEYPARAMSET_FIXED_LEN);
- key_param_len = (u16) (enc_key->key_len + KEYPARAMSET_FIXED_LEN)
+ key_param_len = (u16)(enc_key->key_len + KEYPARAMSET_FIXED_LEN)
+ sizeof(struct mwifiex_ie_types_header);
cmd->size = cpu_to_le16(sizeof(key_material->action) + S_DS_GEN
+ key_param_len);
+
+ if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
+ tlv_mac = (void *)((u8 *)&key_material->key_param_set +
+ key_param_len);
+ tlv_mac->tlv.type = cpu_to_le16(TLV_TYPE_STA_MAC_ADDR);
+ tlv_mac->tlv.len = cpu_to_le16(ETH_ALEN);
+ memcpy(tlv_mac->mac_addr, enc_key->mac_addr, ETH_ALEN);
+ cmd_size = key_param_len + S_DS_GEN +
+ sizeof(key_material->action) +
+ sizeof(struct host_cmd_tlv_mac_addr);
+ } else {
+ cmd_size = key_param_len + S_DS_GEN +
+ sizeof(key_material->action);
+ }
+ cmd->size = cpu_to_le16(cmd_size);
}
return ret;
@@ -1248,13 +1264,15 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (ret)
return -1;
- /* Enable IEEE PS by default */
- priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_PS_MODE_ENH,
- EN_AUTO_PS, BITMAP_STA_PS, NULL);
- if (ret)
- return -1;
+ if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
+ /* Enable IEEE PS by default */
+ priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
+ ret = mwifiex_send_cmd_async(
+ priv, HostCmd_CMD_802_11_PS_MODE_ENH,
+ EN_AUTO_PS, BITMAP_STA_PS, NULL);
+ if (ret)
+ return -1;
+ }
}
/* get tx rate */
@@ -1270,12 +1288,14 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (ret)
return -1;
- /* set ibss coalescing_status */
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
- HostCmd_ACT_GEN_SET, 0, &enable);
- if (ret)
- return -1;
+ if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
+ /* set ibss coalescing_status */
+ ret = mwifiex_send_cmd_async(
+ priv, HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
+ HostCmd_ACT_GEN_SET, 0, &enable);
+ if (ret)
+ return -1;
+ }
memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl));
amsdu_aggr_ctrl.enable = true;
@@ -1293,7 +1313,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (ret)
return -1;
- if (first_sta && (priv->adapter->iface_type != MWIFIEX_USB)) {
+ if (first_sta && priv->adapter->iface_type != MWIFIEX_USB &&
+ priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
/* Enable auto deep sleep */
auto_ds.auto_ds = DEEP_SLEEP_ON;
auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME;
@@ -1305,12 +1326,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
return -1;
}
- /* Send cmd to FW to enable/disable 11D function */
- state_11d = ENABLE_11D;
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_SET, DOT11D_I, &state_11d);
- if (ret)
- dev_err(priv->adapter->dev, "11D: failed to enable 11D\n");
+ if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
+ /* Send cmd to FW to enable/disable 11D function */
+ state_11d = ENABLE_11D;
+ ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_SET, DOT11D_I,
+ &state_11d);
+ if (ret)
+ dev_err(priv->adapter->dev,
+ "11D: failed to enable 11D\n");
+ }
/* Send cmd to FW to configure 11n specific configuration
* (Short GI, Channel BW, Green field support etc.) for transmit
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 3aa54243dea9..a79ed9bd9695 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -944,6 +944,14 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_802_11_SUBSCRIBE_EVENT:
ret = mwifiex_ret_subsc_evt(priv, resp, data_buf);
break;
+ case HostCmd_CMD_UAP_SYS_CONFIG:
+ break;
+ case HostCmd_CMD_UAP_BSS_START:
+ priv->bss_started = 1;
+ break;
+ case HostCmd_CMD_UAP_BSS_STOP:
+ priv->bss_started = 0;
+ break;
default:
dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index f6bbb9307f86..4ace5a3dcd23 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -184,8 +184,10 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
int mwifiex_process_sta_event(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
- int ret = 0;
+ int len, ret = 0;
u32 eventcause = adapter->event_cause;
+ struct station_info sinfo;
+ struct mwifiex_assoc_event *event;
switch (eventcause) {
case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
@@ -402,6 +404,53 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_HOSTWAKE_STAIE:
dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause);
break;
+
+ case EVENT_UAP_STA_ASSOC:
+ skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
+ memset(&sinfo, 0, sizeof(sinfo));
+ event = (struct mwifiex_assoc_event *)adapter->event_skb->data;
+ if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
+ len = -1;
+
+ if (ieee80211_is_assoc_req(event->frame_control))
+ len = 0;
+ else if (ieee80211_is_reassoc_req(event->frame_control))
+ /* There will be ETH_ALEN bytes of
+ * current_ap_addr before the re-assoc ies.
+ */
+ len = ETH_ALEN;
+
+ if (len != -1) {
+ sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+ sinfo.assoc_req_ies = (u8 *)&event->data[len];
+ len = (u8 *)sinfo.assoc_req_ies -
+ (u8 *)&event->frame_control;
+ sinfo.assoc_req_ies_len =
+ le16_to_cpu(event->len) - (u16)len;
+ }
+ }
+ cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
+ GFP_KERNEL);
+ break;
+ case EVENT_UAP_STA_DEAUTH:
+ skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
+ cfg80211_del_sta(priv->netdev, adapter->event_skb->data,
+ GFP_KERNEL);
+ break;
+ case EVENT_UAP_BSS_IDLE:
+ priv->media_connected = false;
+ break;
+ case EVENT_UAP_BSS_ACTIVE:
+ priv->media_connected = true;
+ break;
+ case EVENT_UAP_BSS_START:
+ dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+ memcpy(priv->netdev->dev_addr, adapter->event_body+2, ETH_ALEN);
+ break;
+ case EVENT_UAP_MIC_COUNTERMEASURES:
+ /* For future development */
+ dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+ break;
default:
dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 58970e0f7d13..106c449477b2 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -462,7 +462,7 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
info->bss_chan = bss_desc->channel;
- memcpy(info->country_code, priv->country_code,
+ memcpy(info->country_code, adapter->country_code,
IEEE80211_COUNTRY_STRING_LEN);
info->media_connected = priv->media_connected;
@@ -1219,7 +1219,8 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
* with requisite parameters and calls the IOCTL handler.
*/
int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
- int key_len, u8 key_index, int disable)
+ int key_len, u8 key_index,
+ const u8 *mac_addr, int disable)
{
struct mwifiex_ds_encrypt_key encrypt_key;
@@ -1229,8 +1230,12 @@ int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
encrypt_key.key_index = key_index;
if (key_len)
memcpy(encrypt_key.key_material, key, key_len);
+ if (mac_addr)
+ memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
} else {
encrypt_key.key_disable = true;
+ if (mac_addr)
+ memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
}
return mwifiex_sec_ioctl_encrypt_key(priv, &encrypt_key);
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
new file mode 100644
index 000000000000..76dfbc42a732
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -0,0 +1,432 @@
+/*
+ * Marvell Wireless LAN device driver: AP specific command handling
+ *
+ * Copyright (C) 2012, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "main.h"
+
+/* This function parses security related parameters from cfg80211_ap_settings
+ * and sets into FW understandable bss_config structure.
+ */
+int mwifiex_set_secure_params(struct mwifiex_private *priv,
+ struct mwifiex_uap_bss_param *bss_config,
+ struct cfg80211_ap_settings *params) {
+ int i;
+
+ switch (params->auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ bss_config->auth_mode = WLAN_AUTH_OPEN;
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ bss_config->auth_mode = WLAN_AUTH_SHARED_KEY;
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ bss_config->auth_mode = WLAN_AUTH_LEAP;
+ break;
+ default:
+ bss_config->auth_mode = MWIFIEX_AUTH_MODE_AUTO;
+ break;
+ }
+
+ bss_config->key_mgmt_operation |= KEY_MGMT_ON_HOST;
+
+ for (i = 0; i < params->crypto.n_akm_suites; i++) {
+ switch (params->crypto.akm_suites[i]) {
+ case WLAN_AKM_SUITE_8021X:
+ if (params->crypto.wpa_versions &
+ NL80211_WPA_VERSION_1) {
+ bss_config->protocol = PROTOCOL_WPA;
+ bss_config->key_mgmt = KEY_MGMT_EAP;
+ }
+ if (params->crypto.wpa_versions &
+ NL80211_WPA_VERSION_2) {
+ bss_config->protocol = PROTOCOL_WPA2;
+ bss_config->key_mgmt = KEY_MGMT_EAP;
+ }
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ if (params->crypto.wpa_versions &
+ NL80211_WPA_VERSION_1) {
+ bss_config->protocol = PROTOCOL_WPA;
+ bss_config->key_mgmt = KEY_MGMT_PSK;
+ }
+ if (params->crypto.wpa_versions &
+ NL80211_WPA_VERSION_2) {
+ bss_config->protocol = PROTOCOL_WPA2;
+ bss_config->key_mgmt = KEY_MGMT_PSK;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ for (i = 0; i < params->crypto.n_ciphers_pairwise; i++) {
+ switch (params->crypto.ciphers_pairwise[i]) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ bss_config->wpa_cfg.pairwise_cipher_wpa = CIPHER_TKIP;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ bss_config->wpa_cfg.pairwise_cipher_wpa2 =
+ CIPHER_AES_CCMP;
+ default:
+ break;
+ }
+ }
+
+ switch (params->crypto.cipher_group) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ bss_config->wpa_cfg.group_cipher = CIPHER_TKIP;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ bss_config->wpa_cfg.group_cipher = CIPHER_AES_CCMP;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* This function initializes some of mwifiex_uap_bss_param variables.
+ * This helps FW in ignoring invalid values. These values may or may not
+ * be get updated to valid ones at later stage.
+ */
+void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config)
+{
+ config->bcast_ssid_ctl = 0x7F;
+ config->radio_ctl = 0x7F;
+ config->dtim_period = 0x7F;
+ config->beacon_period = 0x7FFF;
+ config->auth_mode = 0x7F;
+ config->rts_threshold = 0x7FFF;
+ config->frag_threshold = 0x7FFF;
+ config->retry_limit = 0x7F;
+}
+
+/* This function parses BSS related parameters from structure
+ * and prepares TLVs. These TLVs are appended to command buffer.
+*/
+static int
+mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
+{
+ struct host_cmd_tlv_dtim_period *dtim_period;
+ struct host_cmd_tlv_beacon_period *beacon_period;
+ struct host_cmd_tlv_ssid *ssid;
+ struct host_cmd_tlv_channel_band *chan_band;
+ struct host_cmd_tlv_frag_threshold *frag_threshold;
+ struct host_cmd_tlv_rts_threshold *rts_threshold;
+ struct host_cmd_tlv_retry_limit *retry_limit;
+ struct host_cmd_tlv_pwk_cipher *pwk_cipher;
+ struct host_cmd_tlv_gwk_cipher *gwk_cipher;
+ struct host_cmd_tlv_encrypt_protocol *encrypt_protocol;
+ struct host_cmd_tlv_auth_type *auth_type;
+ struct host_cmd_tlv_passphrase *passphrase;
+ struct host_cmd_tlv_akmp *tlv_akmp;
+ struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
+ u16 cmd_size = *param_size;
+
+ if (bss_cfg->ssid.ssid_len) {
+ ssid = (struct host_cmd_tlv_ssid *)tlv;
+ ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
+ ssid->tlv.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len);
+ memcpy(ssid->ssid, bss_cfg->ssid.ssid, bss_cfg->ssid.ssid_len);
+ cmd_size += sizeof(struct host_cmd_tlv) +
+ bss_cfg->ssid.ssid_len;
+ tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len;
+ }
+ if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) {
+ chan_band = (struct host_cmd_tlv_channel_band *)tlv;
+ chan_band->tlv.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
+ chan_band->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_channel_band) -
+ sizeof(struct host_cmd_tlv));
+ chan_band->band_config = bss_cfg->band_cfg;
+ chan_band->channel = bss_cfg->channel;
+ cmd_size += sizeof(struct host_cmd_tlv_channel_band);
+ tlv += sizeof(struct host_cmd_tlv_channel_band);
+ }
+ if (bss_cfg->beacon_period >= MIN_BEACON_PERIOD &&
+ bss_cfg->beacon_period <= MAX_BEACON_PERIOD) {
+ beacon_period = (struct host_cmd_tlv_beacon_period *)tlv;
+ beacon_period->tlv.type =
+ cpu_to_le16(TLV_TYPE_UAP_BEACON_PERIOD);
+ beacon_period->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_beacon_period) -
+ sizeof(struct host_cmd_tlv));
+ beacon_period->period = cpu_to_le16(bss_cfg->beacon_period);
+ cmd_size += sizeof(struct host_cmd_tlv_beacon_period);
+ tlv += sizeof(struct host_cmd_tlv_beacon_period);
+ }
+ if (bss_cfg->dtim_period >= MIN_DTIM_PERIOD &&
+ bss_cfg->dtim_period <= MAX_DTIM_PERIOD) {
+ dtim_period = (struct host_cmd_tlv_dtim_period *)tlv;
+ dtim_period->tlv.type = cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD);
+ dtim_period->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_dtim_period) -
+ sizeof(struct host_cmd_tlv));
+ dtim_period->period = bss_cfg->dtim_period;
+ cmd_size += sizeof(struct host_cmd_tlv_dtim_period);
+ tlv += sizeof(struct host_cmd_tlv_dtim_period);
+ }
+ if (bss_cfg->rts_threshold <= MWIFIEX_RTS_MAX_VALUE) {
+ rts_threshold = (struct host_cmd_tlv_rts_threshold *)tlv;
+ rts_threshold->tlv.type =
+ cpu_to_le16(TLV_TYPE_UAP_RTS_THRESHOLD);
+ rts_threshold->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_rts_threshold) -
+ sizeof(struct host_cmd_tlv));
+ rts_threshold->rts_thr = cpu_to_le16(bss_cfg->rts_threshold);
+ cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
+ tlv += sizeof(struct host_cmd_tlv_frag_threshold);
+ }
+ if ((bss_cfg->frag_threshold >= MWIFIEX_FRAG_MIN_VALUE) &&
+ (bss_cfg->frag_threshold <= MWIFIEX_FRAG_MAX_VALUE)) {
+ frag_threshold = (struct host_cmd_tlv_frag_threshold *)tlv;
+ frag_threshold->tlv.type =
+ cpu_to_le16(TLV_TYPE_UAP_FRAG_THRESHOLD);
+ frag_threshold->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_frag_threshold) -
+ sizeof(struct host_cmd_tlv));
+ frag_threshold->frag_thr = cpu_to_le16(bss_cfg->frag_threshold);
+ cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
+ tlv += sizeof(struct host_cmd_tlv_frag_threshold);
+ }
+ if (bss_cfg->retry_limit <= MWIFIEX_RETRY_LIMIT) {
+ retry_limit = (struct host_cmd_tlv_retry_limit *)tlv;
+ retry_limit->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT);
+ retry_limit->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_retry_limit) -
+ sizeof(struct host_cmd_tlv));
+ retry_limit->limit = (u8)bss_cfg->retry_limit;
+ cmd_size += sizeof(struct host_cmd_tlv_retry_limit);
+ tlv += sizeof(struct host_cmd_tlv_retry_limit);
+ }
+ if ((bss_cfg->protocol & PROTOCOL_WPA) ||
+ (bss_cfg->protocol & PROTOCOL_WPA2) ||
+ (bss_cfg->protocol & PROTOCOL_EAP)) {
+ tlv_akmp = (struct host_cmd_tlv_akmp *)tlv;
+ tlv_akmp->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
+ tlv_akmp->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
+ sizeof(struct host_cmd_tlv));
+ tlv_akmp->key_mgmt_operation =
+ cpu_to_le16(bss_cfg->key_mgmt_operation);
+ tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
+ cmd_size += sizeof(struct host_cmd_tlv_akmp);
+ tlv += sizeof(struct host_cmd_tlv_akmp);
+
+ if (bss_cfg->wpa_cfg.pairwise_cipher_wpa &
+ VALID_CIPHER_BITMAP) {
+ pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
+ pwk_cipher->tlv.type =
+ cpu_to_le16(TLV_TYPE_PWK_CIPHER);
+ pwk_cipher->tlv.len = cpu_to_le16(
+ sizeof(struct host_cmd_tlv_pwk_cipher) -
+ sizeof(struct host_cmd_tlv));
+ pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
+ pwk_cipher->cipher =
+ bss_cfg->wpa_cfg.pairwise_cipher_wpa;
+ cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
+ tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
+ }
+ if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 &
+ VALID_CIPHER_BITMAP) {
+ pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
+ pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
+ pwk_cipher->tlv.len = cpu_to_le16(
+ sizeof(struct host_cmd_tlv_pwk_cipher) -
+ sizeof(struct host_cmd_tlv));
+ pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
+ pwk_cipher->cipher =
+ bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
+ cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
+ tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
+ }
+ if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
+ gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
+ gwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
+ gwk_cipher->tlv.len = cpu_to_le16(
+ sizeof(struct host_cmd_tlv_gwk_cipher) -
+ sizeof(struct host_cmd_tlv));
+ gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
+ cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
+ tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
+ }
+ if (bss_cfg->wpa_cfg.length) {
+ passphrase = (struct host_cmd_tlv_passphrase *)tlv;
+ passphrase->tlv.type =
+ cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
+ passphrase->tlv.len =
+ cpu_to_le16(bss_cfg->wpa_cfg.length);
+ memcpy(passphrase->passphrase,
+ bss_cfg->wpa_cfg.passphrase,
+ bss_cfg->wpa_cfg.length);
+ cmd_size += sizeof(struct host_cmd_tlv) +
+ bss_cfg->wpa_cfg.length;
+ tlv += sizeof(struct host_cmd_tlv) +
+ bss_cfg->wpa_cfg.length;
+ }
+ }
+ if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) ||
+ (bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) {
+ auth_type = (struct host_cmd_tlv_auth_type *)tlv;
+ auth_type->tlv.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
+ auth_type->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_auth_type) -
+ sizeof(struct host_cmd_tlv));
+ auth_type->auth_type = (u8)bss_cfg->auth_mode;
+ cmd_size += sizeof(struct host_cmd_tlv_auth_type);
+ tlv += sizeof(struct host_cmd_tlv_auth_type);
+ }
+ if (bss_cfg->protocol) {
+ encrypt_protocol = (struct host_cmd_tlv_encrypt_protocol *)tlv;
+ encrypt_protocol->tlv.type =
+ cpu_to_le16(TLV_TYPE_UAP_ENCRY_PROTOCOL);
+ encrypt_protocol->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_encrypt_protocol)
+ - sizeof(struct host_cmd_tlv));
+ encrypt_protocol->proto = cpu_to_le16(bss_cfg->protocol);
+ cmd_size += sizeof(struct host_cmd_tlv_encrypt_protocol);
+ tlv += sizeof(struct host_cmd_tlv_encrypt_protocol);
+ }
+
+ *param_size = cmd_size;
+
+ return 0;
+}
+
+/* This function parses custom IEs from IE list and prepares command buffer */
+static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size)
+{
+ struct mwifiex_ie_list *ap_ie = cmd_buf;
+ struct host_cmd_tlv *tlv_ie = (struct host_cmd_tlv *)tlv;
+
+ if (!ap_ie || !ap_ie->len || !ap_ie->ie_list)
+ return -1;
+
+ *ie_size += le16_to_cpu(ap_ie->len) + sizeof(struct host_cmd_tlv);
+
+ tlv_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE);
+ tlv_ie->len = ap_ie->len;
+ tlv += sizeof(struct host_cmd_tlv);
+
+ memcpy(tlv, ap_ie->ie_list, le16_to_cpu(ap_ie->len));
+
+ return 0;
+}
+
+/* Parse AP config structure and prepare TLV based command structure
+ * to be sent to FW for uAP configuration
+ */
+static int
+mwifiex_cmd_uap_sys_config(struct host_cmd_ds_command *cmd, u16 cmd_action,
+ u32 type, void *cmd_buf)
+{
+ u8 *tlv;
+ u16 cmd_size, param_size, ie_size;
+ struct host_cmd_ds_sys_config *sys_cfg;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_UAP_SYS_CONFIG);
+ cmd_size = (u16)(sizeof(struct host_cmd_ds_sys_config) + S_DS_GEN);
+ sys_cfg = (struct host_cmd_ds_sys_config *)&cmd->params.uap_sys_config;
+ sys_cfg->action = cpu_to_le16(cmd_action);
+ tlv = sys_cfg->tlv;
+
+ switch (type) {
+ case UAP_BSS_PARAMS_I:
+ param_size = cmd_size;
+ if (mwifiex_uap_bss_param_prepare(tlv, cmd_buf, &param_size))
+ return -1;
+ cmd->size = cpu_to_le16(param_size);
+ break;
+ case UAP_CUSTOM_IE_I:
+ ie_size = cmd_size;
+ if (mwifiex_uap_custom_ie_prepare(tlv, cmd_buf, &ie_size))
+ return -1;
+ cmd->size = cpu_to_le16(ie_size);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+/* This function prepares the AP specific commands before sending them
+ * to the firmware.
+ * This is a generic function which calls specific command preparation
+ * routines based upon the command number.
+ */
+int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
+ u16 cmd_action, u32 type,
+ void *data_buf, void *cmd_buf)
+{
+ struct host_cmd_ds_command *cmd = cmd_buf;
+
+ switch (cmd_no) {
+ case HostCmd_CMD_UAP_SYS_CONFIG:
+ if (mwifiex_cmd_uap_sys_config(cmd, cmd_action, type, data_buf))
+ return -1;
+ break;
+ case HostCmd_CMD_UAP_BSS_START:
+ case HostCmd_CMD_UAP_BSS_STOP:
+ cmd->command = cpu_to_le16(cmd_no);
+ cmd->size = cpu_to_le16(S_DS_GEN);
+ break;
+ default:
+ dev_err(priv->adapter->dev,
+ "PREP_CMD: unknown cmd %#x\n", cmd_no);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* This function sets the RF channel for AP.
+ *
+ * This function populates channel information in AP config structure
+ * and sends command to configure channel information in AP.
+ */
+int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel)
+{
+ struct mwifiex_uap_bss_param *bss_cfg;
+ struct wiphy *wiphy = priv->wdev->wiphy;
+
+ bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param), GFP_KERNEL);
+ if (!bss_cfg)
+ return -ENOMEM;
+
+ bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
+ bss_cfg->channel = channel;
+
+ if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+ HostCmd_ACT_GEN_SET,
+ UAP_BSS_PARAMS_I, bss_cfg)) {
+ wiphy_err(wiphy, "Failed to set the uAP channel\n");
+ kfree(bss_cfg);
+ return -1;
+ }
+
+ kfree(bss_cfg);
+ return 0;
+}
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 429a1dee2d26..f3fc65515857 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -885,6 +885,10 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
tid_ptr = &(priv_tmp)->wmm.
tid_tbl_ptr[tos_to_tid[i]];
+ /* For non-STA ra_list_curr may be NULL */
+ if (!tid_ptr->ra_list_curr)
+ continue;
+
spin_lock_irqsave(&tid_ptr->tid_tbl_lock,
flags);
is_list_empty =
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index c5404eb82b2f..2e9e6af21362 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -505,9 +505,6 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev);
-static int rndis_set_channel(struct wiphy *wiphy, struct net_device *dev,
- struct ieee80211_channel *chan, enum nl80211_channel_type channel_type);
-
static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
u8 key_index, bool pairwise, const u8 *mac_addr,
struct key_params *params);
@@ -549,7 +546,6 @@ static const struct cfg80211_ops rndis_config_ops = {
.disconnect = rndis_disconnect,
.join_ibss = rndis_join_ibss,
.leave_ibss = rndis_leave_ibss,
- .set_channel = rndis_set_channel,
.add_key = rndis_add_key,
.del_key = rndis_del_key,
.set_default_key = rndis_set_default_key,
@@ -2398,16 +2394,6 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
return deauthenticate(usbdev);
}
-static int rndis_set_channel(struct wiphy *wiphy, struct net_device *netdev,
- struct ieee80211_channel *chan, enum nl80211_channel_type channel_type)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
-
- return set_channel(usbdev,
- ieee80211_frequency_to_channel(chan->center_freq));
-}
-
static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
u8 key_index, bool pairwise, const u8 *mac_addr,
struct key_params *params)
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 931331d95217..cad25bfebd7a 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -1192,6 +1192,7 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x5390) },
{ PCI_DEVICE(0x1814, 0x5392) },
{ PCI_DEVICE(0x1814, 0x539a) },
+ { PCI_DEVICE(0x1814, 0x539b) },
{ PCI_DEVICE(0x1814, 0x539f) },
#endif
{ 0, }
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 1b851f650e07..e2750a12c6f1 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -260,6 +260,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
}
if (wl->irq) {
+ irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
if (ret < 0) {
wl1251_error("request_irq() failed: %d", ret);
@@ -267,7 +268,6 @@ static int wl1251_sdio_probe(struct sdio_func *func,
}
irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
- disable_irq(wl->irq);
wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 6248c354fc5c..87f6305bda2c 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -281,6 +281,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
wl->use_eeprom = pdata->use_eeprom;
+ irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
if (ret < 0) {
wl1251_error("request_irq() failed: %d", ret);
@@ -289,8 +290,6 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
- disable_irq(wl->irq);
-
ret = wl1251_init_ieee80211(wl);
if (ret)
goto out_irq;
diff --git a/drivers/net/wireless/ti/wl12xx/Kconfig b/drivers/net/wireless/ti/wl12xx/Kconfig
index 5b92329122c4..c2183594655a 100644
--- a/drivers/net/wireless/ti/wl12xx/Kconfig
+++ b/drivers/net/wireless/ti/wl12xx/Kconfig
@@ -1,5 +1,6 @@
config WL12XX
tristate "TI wl12xx support"
+ depends on MAC80211
select WLCORE
---help---
This module adds support for wireless adapters based on TI wl1271,
diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig
index 9d04c38938bc..54156b0b5c2d 100644
--- a/drivers/net/wireless/ti/wlcore/Kconfig
+++ b/drivers/net/wireless/ti/wlcore/Kconfig
@@ -1,6 +1,6 @@
config WLCORE
tristate "TI wlcore support"
- depends on WL_TI && GENERIC_HARDIRQS
+ depends on WL_TI && GENERIC_HARDIRQS && MAC80211
depends on INET
select FW_LOADER
---help---
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index 5912541a925e..f3d6fa508269 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -1714,3 +1714,85 @@ out:
return ret;
}
+
+#ifdef CONFIG_PM
+/* Set the global behaviour of RX filters - On/Off + default action */
+int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
+ enum rx_filter_action action)
+{
+ struct acx_default_rx_filter *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx default rx filter en: %d act: %d",
+ enable, action);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx)
+ return -ENOMEM;
+
+ acx->enable = enable;
+ acx->default_action = action;
+
+ ret = wl1271_cmd_configure(wl, ACX_ENABLE_RX_DATA_FILTER, acx,
+ sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx default rx filter enable failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+/* Configure or disable a specific RX filter pattern */
+int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
+ struct wl12xx_rx_filter *filter)
+{
+ struct acx_rx_filter_cfg *acx;
+ int fields_size = 0;
+ int acx_size;
+ int ret;
+
+ WARN_ON(enable && !filter);
+ WARN_ON(index >= WL1271_MAX_RX_FILTERS);
+
+ wl1271_debug(DEBUG_ACX,
+ "acx set rx filter idx: %d enable: %d filter: %p",
+ index, enable, filter);
+
+ if (enable) {
+ fields_size = wl1271_rx_filter_get_fields_size(filter);
+
+ wl1271_debug(DEBUG_ACX, "act: %d num_fields: %d field_size: %d",
+ filter->action, filter->num_fields, fields_size);
+ }
+
+ acx_size = ALIGN(sizeof(*acx) + fields_size, 4);
+ acx = kzalloc(acx_size, GFP_KERNEL);
+
+ if (!acx)
+ return -ENOMEM;
+
+ acx->enable = enable;
+ acx->index = index;
+
+ if (enable) {
+ acx->num_fields = filter->num_fields;
+ acx->action = filter->action;
+ wl1271_rx_filter_flatten_fields(filter, acx->fields);
+ }
+
+ wl1271_dump(DEBUG_ACX, "RX_FILTER: ", acx, acx_size);
+
+ ret = wl1271_cmd_configure(wl, ACX_SET_RX_DATA_FILTER, acx, acx_size);
+ if (ret < 0) {
+ wl1271_warning("setting rx filter failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index b2f88831b7a9..e6a74869a5ff 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -1147,6 +1147,32 @@ struct wl12xx_acx_config_hangover {
u8 padding[2];
} __packed;
+
+struct acx_default_rx_filter {
+ struct acx_header header;
+ u8 enable;
+
+ /* action of type FILTER_XXX */
+ u8 default_action;
+
+ u8 pad[2];
+} __packed;
+
+
+struct acx_rx_filter_cfg {
+ struct acx_header header;
+
+ u8 enable;
+
+ /* 0 - WL1271_MAX_RX_FILTERS-1 */
+ u8 index;
+
+ u8 action;
+
+ u8 num_fields;
+ u8 fields[0];
+} __packed;
+
enum {
ACX_WAKE_UP_CONDITIONS = 0x0000,
ACX_MEM_CFG = 0x0001,
@@ -1305,4 +1331,10 @@ int wl1271_acx_fm_coex(struct wl1271 *wl);
int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
int wl12xx_acx_config_hangover(struct wl1271 *wl);
+#ifdef CONFIG_PM
+int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
+ enum rx_filter_action action);
+int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
+ struct wl12xx_rx_filter *filter);
+#endif /* CONFIG_PM */
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
index 3a2207db5405..9b98230f84ce 100644
--- a/drivers/net/wireless/ti/wlcore/boot.c
+++ b/drivers/net/wireless/ti/wlcore/boot.c
@@ -72,7 +72,7 @@ static int wlcore_boot_fw_version(struct wl1271 *wl)
struct wl1271_static_data *static_data;
int ret;
- static_data = kmalloc(sizeof(*static_data), GFP_DMA);
+ static_data = kmalloc(sizeof(*static_data), GFP_KERNEL | GFP_DMA);
if (!static_data) {
wl1271_error("Couldn't allocate memory for static data!");
return -ENOMEM;
@@ -413,6 +413,7 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
/* unmask required mbox events */
wl->event_mask = BSS_LOSE_EVENT_ID |
+ REGAINED_BSS_EVENT_ID |
SCAN_COMPLETE_EVENT_ID |
ROLE_STOP_COMPLETE_EVENT_ID |
RSSI_SNR_TRIGGER_0_EVENT_ID |
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 5c4716c6f040..5b128a971449 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -123,7 +123,9 @@ static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
unsigned long timeout;
int ret = 0;
- events_vector = kmalloc(sizeof(*events_vector), GFP_DMA);
+ events_vector = kmalloc(sizeof(*events_vector), GFP_KERNEL | GFP_DMA);
+ if (!events_vector)
+ return -ENOMEM;
timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
@@ -1034,7 +1036,7 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
skb_reserve(skb, sizeof(*hdr) + WL1271_EXTRA_SPACE_MAX);
tmpl = (struct wl12xx_arp_rsp_template *)skb_put(skb, sizeof(*tmpl));
- memset(tmpl, 0, sizeof(tmpl));
+ memset(tmpl, 0, sizeof(*tmpl));
/* llc layer */
memcpy(tmpl->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
@@ -1083,7 +1085,7 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
/* mac80211 header */
hdr = (struct ieee80211_hdr_3addr *)skb_push(skb, sizeof(*hdr));
- memset(hdr, 0, sizeof(hdr));
+ memset(hdr, 0, sizeof(*hdr));
fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS;
if (wlvif->sta.qos)
fc |= IEEE80211_STYPE_QOS_DATA;
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 292632ddf890..28e2a633c3be 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -103,7 +103,6 @@ static int wl1271_event_process(struct wl1271 *wl)
struct ieee80211_vif *vif;
struct wl12xx_vif *wlvif;
u32 vector;
- bool beacon_loss = false;
bool disconnect_sta = false;
unsigned long sta_bitmap = 0;
@@ -141,20 +140,23 @@ static int wl1271_event_process(struct wl1271 *wl)
mbox->soft_gemini_sense_info);
/*
- * The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon
- * filtering) is enabled. Without PSM, the stack will receive all
- * beacons and can detect beacon loss by itself.
- *
- * As there's possibility that the driver disables PSM before receiving
- * BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
- *
+ * We are HW_MONITOR device. On beacon loss - queue
+ * connection loss work. Cancel it on REGAINED event.
*/
if (vector & BSS_LOSE_EVENT_ID) {
/* TODO: check for multi-role */
+ int delay = wl->conf.conn.synch_fail_thold *
+ wl->conf.conn.bss_lose_timeout;
wl1271_info("Beacon loss detected.");
+ cancel_delayed_work_sync(&wl->connection_loss_work);
+ ieee80211_queue_delayed_work(wl->hw, &wl->connection_loss_work,
+ msecs_to_jiffies(delay));
+ }
- /* indicate to the stack, that beacons have been lost */
- beacon_loss = true;
+ if (vector & REGAINED_BSS_EVENT_ID) {
+ /* TODO: check for multi-role */
+ wl1271_info("Beacon regained.");
+ cancel_delayed_work_sync(&wl->connection_loss_work);
}
if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
@@ -257,13 +259,6 @@ static int wl1271_event_process(struct wl1271 *wl)
rcu_read_unlock();
}
}
-
- if (beacon_loss)
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- vif = wl12xx_wlvif_to_vif(wlvif);
- ieee80211_connection_loss(vif);
- }
-
return 0;
}
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 2b0f987660c6..acef93390d3d 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1120,6 +1120,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
cancel_work_sync(&wl->recovery_work);
cancel_delayed_work_sync(&wl->elp_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
+ cancel_delayed_work_sync(&wl->connection_loss_work);
mutex_lock(&wl->mutex);
wl1271_power_off(wl);
@@ -1261,8 +1262,270 @@ static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
#ifdef CONFIG_PM
+static int
+wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
+{
+ int num_fields = 0, in_field = 0, fields_size = 0;
+ int i, pattern_len = 0;
+
+ if (!p->mask) {
+ wl1271_warning("No mask in WoWLAN pattern");
+ return -EINVAL;
+ }
+
+ /*
+ * The pattern is broken up into segments of bytes at different offsets
+ * that need to be checked by the FW filter. Each segment is called
+ * a field in the FW API. We verify that the total number of fields
+ * required for this pattern won't exceed FW limits (8)
+ * as well as the total fields buffer won't exceed the FW limit.
+ * Note that if there's a pattern which crosses Ethernet/IP header
+ * boundary a new field is required.
+ */
+ for (i = 0; i < p->pattern_len; i++) {
+ if (test_bit(i, (unsigned long *)p->mask)) {
+ if (!in_field) {
+ in_field = 1;
+ pattern_len = 1;
+ } else {
+ if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
+ num_fields++;
+ fields_size += pattern_len +
+ RX_FILTER_FIELD_OVERHEAD;
+ pattern_len = 1;
+ } else
+ pattern_len++;
+ }
+ } else {
+ if (in_field) {
+ in_field = 0;
+ fields_size += pattern_len +
+ RX_FILTER_FIELD_OVERHEAD;
+ num_fields++;
+ }
+ }
+ }
+
+ if (in_field) {
+ fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
+ num_fields++;
+ }
+
+ if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
+ wl1271_warning("RX Filter too complex. Too many segments");
+ return -EINVAL;
+ }
+
+ if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
+ wl1271_warning("RX filter pattern is too big");
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
+{
+ return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
+}
+
+void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
+{
+ int i;
+
+ if (filter == NULL)
+ return;
+
+ for (i = 0; i < filter->num_fields; i++)
+ kfree(filter->fields[i].pattern);
+
+ kfree(filter);
+}
+
+int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
+ u16 offset, u8 flags,
+ u8 *pattern, u8 len)
+{
+ struct wl12xx_rx_filter_field *field;
+
+ if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
+ wl1271_warning("Max fields per RX filter. can't alloc another");
+ return -EINVAL;
+ }
+
+ field = &filter->fields[filter->num_fields];
+
+ field->pattern = kzalloc(len, GFP_KERNEL);
+ if (!field->pattern) {
+ wl1271_warning("Failed to allocate RX filter pattern");
+ return -ENOMEM;
+ }
+
+ filter->num_fields++;
+
+ field->offset = cpu_to_le16(offset);
+ field->flags = flags;
+ field->len = len;
+ memcpy(field->pattern, pattern, len);
+
+ return 0;
+}
+
+int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
+{
+ int i, fields_size = 0;
+
+ for (i = 0; i < filter->num_fields; i++)
+ fields_size += filter->fields[i].len +
+ sizeof(struct wl12xx_rx_filter_field) -
+ sizeof(u8 *);
+
+ return fields_size;
+}
+
+void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
+ u8 *buf)
+{
+ int i;
+ struct wl12xx_rx_filter_field *field;
+
+ for (i = 0; i < filter->num_fields; i++) {
+ field = (struct wl12xx_rx_filter_field *)buf;
+
+ field->offset = filter->fields[i].offset;
+ field->flags = filter->fields[i].flags;
+ field->len = filter->fields[i].len;
+
+ memcpy(&field->pattern, filter->fields[i].pattern, field->len);
+ buf += sizeof(struct wl12xx_rx_filter_field) -
+ sizeof(u8 *) + field->len;
+ }
+}
+
+/*
+ * Allocates an RX filter returned through f
+ * which needs to be freed using rx_filter_free()
+ */
+static int wl1271_convert_wowlan_pattern_to_rx_filter(
+ struct cfg80211_wowlan_trig_pkt_pattern *p,
+ struct wl12xx_rx_filter **f)
+{
+ int i, j, ret = 0;
+ struct wl12xx_rx_filter *filter;
+ u16 offset;
+ u8 flags, len;
+
+ filter = wl1271_rx_filter_alloc();
+ if (!filter) {
+ wl1271_warning("Failed to alloc rx filter");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ i = 0;
+ while (i < p->pattern_len) {
+ if (!test_bit(i, (unsigned long *)p->mask)) {
+ i++;
+ continue;
+ }
+
+ for (j = i; j < p->pattern_len; j++) {
+ if (!test_bit(j, (unsigned long *)p->mask))
+ break;
+
+ if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
+ j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
+ break;
+ }
+
+ if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
+ offset = i;
+ flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
+ } else {
+ offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
+ flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
+ }
+
+ len = j - i;
+
+ ret = wl1271_rx_filter_alloc_field(filter,
+ offset,
+ flags,
+ &p->pattern[i], len);
+ if (ret)
+ goto err;
+
+ i = j;
+ }
+
+ filter->action = FILTER_SIGNAL;
+
+ *f = filter;
+ return 0;
+
+err:
+ wl1271_rx_filter_free(filter);
+ *f = NULL;
+
+ return ret;
+}
+
+static int wl1271_configure_wowlan(struct wl1271 *wl,
+ struct cfg80211_wowlan *wow)
+{
+ int i, ret;
+
+ if (!wow || wow->any || !wow->n_patterns) {
+ wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
+ wl1271_rx_filter_clear_all(wl);
+ return 0;
+ }
+
+ if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
+ return -EINVAL;
+
+ /* Validate all incoming patterns before clearing current FW state */
+ for (i = 0; i < wow->n_patterns; i++) {
+ ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
+ if (ret) {
+ wl1271_warning("Bad wowlan pattern %d", i);
+ return ret;
+ }
+ }
+
+ wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
+ wl1271_rx_filter_clear_all(wl);
+
+ /* Translate WoWLAN patterns into filters */
+ for (i = 0; i < wow->n_patterns; i++) {
+ struct cfg80211_wowlan_trig_pkt_pattern *p;
+ struct wl12xx_rx_filter *filter = NULL;
+
+ p = &wow->patterns[i];
+
+ ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
+ if (ret) {
+ wl1271_warning("Failed to create an RX filter from "
+ "wowlan pattern %d", i);
+ goto out;
+ }
+
+ ret = wl1271_rx_filter_enable(wl, i, 1, filter);
+
+ wl1271_rx_filter_free(filter);
+ if (ret)
+ goto out;
+ }
+
+ ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
+
+out:
+ return ret;
+}
+
static int wl1271_configure_suspend_sta(struct wl1271 *wl,
- struct wl12xx_vif *wlvif)
+ struct wl12xx_vif *wlvif,
+ struct cfg80211_wowlan *wow)
{
int ret = 0;
@@ -1273,6 +1536,7 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
if (ret < 0)
goto out;
+ wl1271_configure_wowlan(wl, wow);
ret = wl1271_acx_wake_up_conditions(wl, wlvif,
wl->conf.conn.suspend_wake_up_event,
wl->conf.conn.suspend_listen_interval);
@@ -1308,10 +1572,11 @@ out:
}
static int wl1271_configure_suspend(struct wl1271 *wl,
- struct wl12xx_vif *wlvif)
+ struct wl12xx_vif *wlvif,
+ struct cfg80211_wowlan *wow)
{
if (wlvif->bss_type == BSS_TYPE_STA_BSS)
- return wl1271_configure_suspend_sta(wl, wlvif);
+ return wl1271_configure_suspend_sta(wl, wlvif, wow);
if (wlvif->bss_type == BSS_TYPE_AP_BSS)
return wl1271_configure_suspend_ap(wl, wlvif);
return 0;
@@ -1332,6 +1597,8 @@ static void wl1271_configure_resume(struct wl1271 *wl,
return;
if (is_sta) {
+ wl1271_configure_wowlan(wl, NULL);
+
ret = wl1271_acx_wake_up_conditions(wl, wlvif,
wl->conf.conn.wake_up_event,
wl->conf.conn.listen_interval);
@@ -1355,15 +1622,16 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
- WARN_ON(!wow || !wow->any);
+ WARN_ON(!wow);
wl1271_tx_flush(wl);
mutex_lock(&wl->mutex);
wl->wow_enabled = true;
wl12xx_for_each_wlvif(wl, wlvif) {
- ret = wl1271_configure_suspend(wl, wlvif);
+ ret = wl1271_configure_suspend(wl, wlvif, wow);
if (ret < 0) {
+ mutex_unlock(&wl->mutex);
wl1271_warning("couldn't prepare device to suspend");
return ret;
}
@@ -1487,6 +1755,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->elp_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
+ cancel_delayed_work_sync(&wl->connection_loss_work);
/* let's notify MAC80211 about the remaining pending TX frames */
wl12xx_tx_reset(wl, true);
@@ -3439,6 +3708,9 @@ sta_not_found:
do_join = true;
set_assoc = true;
+ /* Cancel connection_loss_work */
+ cancel_delayed_work_sync(&wl->connection_loss_work);
+
/*
* use basic rates from AP, and determine lowest rate
* to use with control frames.
@@ -4549,6 +4821,34 @@ static struct bin_attribute fwlog_attr = {
.read = wl1271_sysfs_read_fwlog,
};
+static void wl1271_connection_loss_work(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct wl1271 *wl;
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
+
+ dwork = container_of(work, struct delayed_work, work);
+ wl = container_of(dwork, struct wl1271, connection_loss_work);
+
+ wl1271_info("Connection loss work.");
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ /* Call mac80211 connection loss */
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ goto out;
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_connection_loss(vif);
+ }
+out:
+ mutex_unlock(&wl->mutex);
+}
+
static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
u32 oui, u32 nic, int n)
{
@@ -4804,6 +5104,8 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
+ INIT_DELAYED_WORK(&wl->connection_loss_work,
+ wl1271_connection_loss_work);
wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
if (!wl->freezable_wq) {
@@ -4861,7 +5163,7 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
goto err_dummy_packet;
}
- wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_DMA);
+ wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_KERNEL | GFP_DMA);
if (!wl->mbox) {
ret = -ENOMEM;
goto err_fwlog;
@@ -5003,9 +5305,14 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
if (!ret) {
wl->irq_wake_enabled = true;
device_init_wakeup(wl->dev, 1);
- if (pdata->pwr_in_suspend)
+ if (pdata->pwr_in_suspend) {
wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
-
+ wl->hw->wiphy->wowlan.n_patterns =
+ WL1271_MAX_RX_FILTERS;
+ wl->hw->wiphy->wowlan.pattern_min_len = 1;
+ wl->hw->wiphy->wowlan.pattern_max_len =
+ WL1271_RX_FILTER_MAX_PATTERN_SIZE;
+ }
}
disable_irq(wl->irq);
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 89bd9385e90b..d6a3c6b07827 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -278,3 +278,41 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
wl12xx_rearm_rx_streaming(wl, active_hlids);
}
+
+#ifdef CONFIG_PM
+int wl1271_rx_filter_enable(struct wl1271 *wl,
+ int index, bool enable,
+ struct wl12xx_rx_filter *filter)
+{
+ int ret;
+
+ if (wl->rx_filter_enabled[index] == enable) {
+ wl1271_warning("Request to enable an already "
+ "enabled rx filter %d", index);
+ return 0;
+ }
+
+ ret = wl1271_acx_set_rx_filter(wl, index, enable, filter);
+
+ if (ret) {
+ wl1271_error("Failed to %s rx data filter %d (err=%d)",
+ enable ? "enable" : "disable", index, ret);
+ return ret;
+ }
+
+ wl->rx_filter_enabled[index] = enable;
+
+ return 0;
+}
+
+void wl1271_rx_filter_clear_all(struct wl1271 *wl)
+{
+ int i;
+
+ for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) {
+ if (!wl->rx_filter_enabled[i])
+ continue;
+ wl1271_rx_filter_enable(wl, i, 0, NULL);
+ }
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index 6e129e2a8546..e9a162a864ca 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -138,5 +138,9 @@ struct wl1271_rx_descriptor {
void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
+int wl1271_rx_filter_enable(struct wl1271 *wl,
+ int index, bool enable,
+ struct wl12xx_rx_filter *filter);
+void wl1271_rx_filter_clear_all(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/ti/wlcore/wl12xx.h b/drivers/net/wireless/ti/wlcore/wl12xx.h
index a9b220c43e54..f12bdf745180 100644
--- a/drivers/net/wireless/ti/wlcore/wl12xx.h
+++ b/drivers/net/wireless/ti/wlcore/wl12xx.h
@@ -279,6 +279,39 @@ struct wl1271_link {
u8 ba_bitmap;
};
+#define WL1271_MAX_RX_FILTERS 5
+#define WL1271_RX_FILTER_MAX_FIELDS 8
+
+#define WL1271_RX_FILTER_ETH_HEADER_SIZE 14
+#define WL1271_RX_FILTER_MAX_FIELDS_SIZE 95
+#define RX_FILTER_FIELD_OVERHEAD \
+ (sizeof(struct wl12xx_rx_filter_field) - sizeof(u8 *))
+#define WL1271_RX_FILTER_MAX_PATTERN_SIZE \
+ (WL1271_RX_FILTER_MAX_FIELDS_SIZE - RX_FILTER_FIELD_OVERHEAD)
+
+#define WL1271_RX_FILTER_FLAG_MASK BIT(0)
+#define WL1271_RX_FILTER_FLAG_IP_HEADER 0
+#define WL1271_RX_FILTER_FLAG_ETHERNET_HEADER BIT(1)
+
+enum rx_filter_action {
+ FILTER_DROP = 0,
+ FILTER_SIGNAL = 1,
+ FILTER_FW_HANDLE = 2
+};
+
+struct wl12xx_rx_filter_field {
+ __le16 offset;
+ u8 len;
+ u8 flags;
+ u8 *pattern;
+} __packed;
+
+struct wl12xx_rx_filter {
+ u8 action;
+ int num_fields;
+ struct wl12xx_rx_filter_field fields[WL1271_RX_FILTER_MAX_FIELDS];
+};
+
struct wl1271_station {
u8 hlid;
};
@@ -439,6 +472,14 @@ int wl1271_plt_stop(struct wl1271 *wl);
int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl12xx_queue_recovery_work(struct wl1271 *wl);
size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
+int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
+ u16 offset, u8 flags,
+ u8 *pattern, u8 len);
+void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter);
+struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void);
+int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter);
+void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
+ u8 *buf);
#define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 39f9fadfebd9..0b3f0b586f4b 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -243,6 +243,9 @@ struct wl1271 {
struct wl1271_scan scan;
struct delayed_work scan_complete_work;
+ /* Connection loss work */
+ struct delayed_work connection_loss_work;
+
bool sched_scanning;
/* The current band */
@@ -349,6 +352,9 @@ struct wl1271 {
/* size of the private FW status data */
size_t fw_status_priv_len;
+
+ /* RX Data filter rule state - enabled/disabled */
+ bool rx_filter_enabled[WL1271_MAX_RX_FILTERS];
};
int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 2596401308a8..f4a6fcaeffb1 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -325,8 +325,7 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
unsigned int count;
int i, copy_off;
- count = DIV_ROUND_UP(
- offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);
+ count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
copy_off = skb_headlen(skb) % PAGE_SIZE;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 0ebbb1906c30..2027afe405fe 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1962,9 +1962,6 @@ static int __init netif_init(void)
if (!xen_domain())
return -ENODEV;
- if (xen_initial_domain())
- return 0;
-
if (xen_hvm_domain() && !xen_platform_pci_unplug)
return -ENODEV;
@@ -1977,9 +1974,6 @@ module_init(netif_init);
static void __exit netif_exit(void)
{
- if (xen_initial_domain())
- return;
-
xenbus_unregister_driver(&netfront_driver);
}
module_exit(netif_exit);
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 5af959274d4e..3b20b73ee649 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -17,6 +17,19 @@ config PN544_NFC
To compile this driver as a module, choose m here. The module will
be called pn544.
+config PN544_HCI_NFC
+ tristate "HCI PN544 NFC driver"
+ depends on I2C && NFC_SHDLC
+ select CRC_CCITT
+ default n
+ ---help---
+ NXP PN544 i2c driver.
+ This is a driver based on the SHDLC and HCI NFC kernel layers and
+ will thus not work with NXP libnfc library.
+
+ To compile this driver as a module, choose m here. The module will
+ be called pn544_hci.
+
config NFC_PN533
tristate "NXP PN533 USB driver"
depends on USB
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index ab99e8572f02..473e44cef612 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_PN544_NFC) += pn544.o
+obj-$(CONFIG_PN544_HCI_NFC) += pn544_hci.o
obj-$(CONFIG_NFC_PN533) += pn533.o
obj-$(CONFIG_NFC_WILINK) += nfcwilink.o
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index e6ec16d92e65..19110f0eb15f 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -394,9 +394,6 @@ static void pn533_wq_cmd_complete(struct work_struct *work)
struct pn533_frame *in_frame;
int rc;
- if (dev == NULL)
- return;
-
in_frame = dev->wq_in_frame;
if (dev->wq_in_error)
@@ -1194,8 +1191,8 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
return rc;
}
-static int pn533_activate_target(struct nfc_dev *nfc_dev, u32 target_idx,
- u32 protocol)
+static int pn533_activate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target, u32 protocol)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
int rc;
@@ -1243,7 +1240,8 @@ static int pn533_activate_target(struct nfc_dev *nfc_dev, u32 target_idx,
return 0;
}
-static void pn533_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx)
+static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
u8 tg;
@@ -1351,7 +1349,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
return 0;
}
-static int pn533_dep_link_up(struct nfc_dev *nfc_dev, int target_idx,
+static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
u8 comm_mode, u8* gb, size_t gb_len)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
@@ -1552,10 +1550,9 @@ error:
return 0;
}
-static int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
- struct sk_buff *skb,
- data_exchange_cb_t cb,
- void *cb_context)
+static int pn533_data_exchange(struct nfc_dev *nfc_dev,
+ struct nfc_target *target, struct sk_buff *skb,
+ data_exchange_cb_t cb, void *cb_context)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
struct pn533_frame *out_frame, *in_frame;
diff --git a/drivers/nfc/pn544_hci.c b/drivers/nfc/pn544_hci.c
new file mode 100644
index 000000000000..281f18c2fb82
--- /dev/null
+++ b/drivers/nfc/pn544_hci.c
@@ -0,0 +1,947 @@
+/*
+ * HCI based Driver for NXP PN544 NFC Chip
+ *
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/crc-ccitt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+
+#include <linux/nfc.h>
+#include <net/nfc/hci.h>
+#include <net/nfc/shdlc.h>
+
+#include <linux/nfc/pn544.h>
+
+#define DRIVER_DESC "HCI NFC driver for PN544"
+
+#define PN544_HCI_DRIVER_NAME "pn544_hci"
+
+/* Timing restrictions (ms) */
+#define PN544_HCI_RESETVEN_TIME 30
+
+static struct i2c_device_id pn544_hci_id_table[] = {
+ {"pn544", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, pn544_hci_id_table);
+
+#define HCI_MODE 0
+#define FW_MODE 1
+
+/* framing in HCI mode */
+#define PN544_HCI_LLC_LEN 1
+#define PN544_HCI_LLC_CRC 2
+#define PN544_HCI_LLC_LEN_CRC (PN544_HCI_LLC_LEN + PN544_HCI_LLC_CRC)
+#define PN544_HCI_LLC_MIN_SIZE (1 + PN544_HCI_LLC_LEN_CRC)
+#define PN544_HCI_LLC_MAX_PAYLOAD 29
+#define PN544_HCI_LLC_MAX_SIZE (PN544_HCI_LLC_LEN_CRC + 1 + \
+ PN544_HCI_LLC_MAX_PAYLOAD)
+
+enum pn544_state {
+ PN544_ST_COLD,
+ PN544_ST_FW_READY,
+ PN544_ST_READY,
+};
+
+#define FULL_VERSION_LEN 11
+
+/* Proprietary commands */
+#define PN544_WRITE 0x3f
+
+/* Proprietary gates, events, commands and registers */
+
+/* NFC_HCI_RF_READER_A_GATE additional registers and commands */
+#define PN544_RF_READER_A_AUTO_ACTIVATION 0x10
+#define PN544_RF_READER_A_CMD_CONTINUE_ACTIVATION 0x12
+#define PN544_MIFARE_CMD 0x21
+
+/* Commands that apply to all RF readers */
+#define PN544_RF_READER_CMD_PRESENCE_CHECK 0x30
+#define PN544_RF_READER_CMD_ACTIVATE_NEXT 0x32
+
+/* NFC_HCI_ID_MGMT_GATE additional registers */
+#define PN544_ID_MGMT_FULL_VERSION_SW 0x10
+
+#define PN544_RF_READER_ISO15693_GATE 0x12
+
+#define PN544_RF_READER_F_GATE 0x14
+#define PN544_FELICA_ID 0x04
+#define PN544_FELICA_RAW 0x20
+
+#define PN544_RF_READER_JEWEL_GATE 0x15
+#define PN544_JEWEL_RAW_CMD 0x23
+
+#define PN544_RF_READER_NFCIP1_INITIATOR_GATE 0x30
+#define PN544_RF_READER_NFCIP1_TARGET_GATE 0x31
+
+#define PN544_SYS_MGMT_GATE 0x90
+#define PN544_SYS_MGMT_INFO_NOTIFICATION 0x02
+
+#define PN544_POLLING_LOOP_MGMT_GATE 0x94
+#define PN544_PL_RDPHASES 0x06
+#define PN544_PL_EMULATION 0x07
+#define PN544_PL_NFCT_DEACTIVATED 0x09
+
+#define PN544_SWP_MGMT_GATE 0xA0
+
+#define PN544_NFC_WI_MGMT_GATE 0xA1
+
+static u8 pn544_custom_gates[] = {
+ PN544_SYS_MGMT_GATE,
+ PN544_SWP_MGMT_GATE,
+ PN544_POLLING_LOOP_MGMT_GATE,
+ PN544_NFC_WI_MGMT_GATE,
+ PN544_RF_READER_F_GATE,
+ PN544_RF_READER_JEWEL_GATE,
+ PN544_RF_READER_ISO15693_GATE,
+ PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+ PN544_RF_READER_NFCIP1_TARGET_GATE
+};
+
+/* Largest headroom needed for outgoing custom commands */
+#define PN544_CMDS_HEADROOM 2
+
+struct pn544_hci_info {
+ struct i2c_client *i2c_dev;
+ struct nfc_shdlc *shdlc;
+
+ enum pn544_state state;
+
+ struct mutex info_lock;
+
+ unsigned int gpio_en;
+ unsigned int gpio_irq;
+ unsigned int gpio_fw;
+ unsigned int en_polarity;
+
+ int hard_fault; /*
+ * < 0 if hardware error occured (e.g. i2c err)
+ * and prevents normal operation.
+ */
+};
+
+static void pn544_hci_platform_init(struct pn544_hci_info *info)
+{
+ int polarity, retry, ret;
+ char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 };
+ int count = sizeof(rset_cmd);
+
+ pr_info(DRIVER_DESC ": %s\n", __func__);
+ dev_info(&info->i2c_dev->dev, "Detecting nfc_en polarity\n");
+
+ /* Disable fw download */
+ gpio_set_value(info->gpio_fw, 0);
+
+ for (polarity = 0; polarity < 2; polarity++) {
+ info->en_polarity = polarity;
+ retry = 3;
+ while (retry--) {
+ /* power off */
+ gpio_set_value(info->gpio_en, !info->en_polarity);
+ usleep_range(10000, 15000);
+
+ /* power on */
+ gpio_set_value(info->gpio_en, info->en_polarity);
+ usleep_range(10000, 15000);
+
+ /* send reset */
+ dev_dbg(&info->i2c_dev->dev, "Sending reset cmd\n");
+ ret = i2c_master_send(info->i2c_dev, rset_cmd, count);
+ if (ret == count) {
+ dev_info(&info->i2c_dev->dev,
+ "nfc_en polarity : active %s\n",
+ (polarity == 0 ? "low" : "high"));
+ goto out;
+ }
+ }
+ }
+
+ dev_err(&info->i2c_dev->dev,
+ "Could not detect nfc_en polarity, fallback to active high\n");
+
+out:
+ gpio_set_value(info->gpio_en, !info->en_polarity);
+}
+
+static int pn544_hci_enable(struct pn544_hci_info *info, int mode)
+{
+ pr_info(DRIVER_DESC ": %s\n", __func__);
+
+ gpio_set_value(info->gpio_fw, 0);
+ gpio_set_value(info->gpio_en, info->en_polarity);
+ usleep_range(10000, 15000);
+
+ return 0;
+}
+
+static void pn544_hci_disable(struct pn544_hci_info *info)
+{
+ pr_info(DRIVER_DESC ": %s\n", __func__);
+
+ gpio_set_value(info->gpio_fw, 0);
+ gpio_set_value(info->gpio_en, !info->en_polarity);
+ usleep_range(10000, 15000);
+
+ gpio_set_value(info->gpio_en, info->en_polarity);
+ usleep_range(10000, 15000);
+
+ gpio_set_value(info->gpio_en, !info->en_polarity);
+ usleep_range(10000, 15000);
+}
+
+static int pn544_hci_i2c_write(struct i2c_client *client, u8 *buf, int len)
+{
+ int r;
+
+ usleep_range(3000, 6000);
+
+ r = i2c_master_send(client, buf, len);
+
+ if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+ usleep_range(6000, 10000);
+ r = i2c_master_send(client, buf, len);
+ }
+
+ if (r >= 0 && r != len)
+ r = -EREMOTEIO;
+
+ return r;
+}
+
+static int check_crc(u8 *buf, int buflen)
+{
+ int len;
+ u16 crc;
+
+ len = buf[0] + 1;
+ crc = crc_ccitt(0xffff, buf, len - 2);
+ crc = ~crc;
+
+ if (buf[len - 2] != (crc & 0xff) || buf[len - 1] != (crc >> 8)) {
+ pr_err(PN544_HCI_DRIVER_NAME ": CRC error 0x%x != 0x%x 0x%x\n",
+ crc, buf[len - 1], buf[len - 2]);
+
+ pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
+ print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
+ 16, 2, buf, buflen, false);
+ return -EPERM;
+ }
+ return 0;
+}
+
+/*
+ * Reads an shdlc frame and returns it in a newly allocated sk_buff. Guarantees
+ * that i2c bus will be flushed and that next read will start on a new frame.
+ * returned skb contains only LLC header and payload.
+ * returns:
+ * -EREMOTEIO : i2c read error (fatal)
+ * -EBADMSG : frame was incorrect and discarded
+ * -ENOMEM : cannot allocate skb, frame dropped
+ */
+static int pn544_hci_i2c_read(struct i2c_client *client, struct sk_buff **skb)
+{
+ int r;
+ u8 len;
+ u8 tmp[PN544_HCI_LLC_MAX_SIZE - 1];
+
+ r = i2c_master_recv(client, &len, 1);
+ if (r != 1) {
+ dev_err(&client->dev, "cannot read len byte\n");
+ return -EREMOTEIO;
+ }
+
+ if ((len < (PN544_HCI_LLC_MIN_SIZE - 1)) ||
+ (len > (PN544_HCI_LLC_MAX_SIZE - 1))) {
+ dev_err(&client->dev, "invalid len byte\n");
+ r = -EBADMSG;
+ goto flush;
+ }
+
+ *skb = alloc_skb(1 + len, GFP_KERNEL);
+ if (*skb == NULL) {
+ r = -ENOMEM;
+ goto flush;
+ }
+
+ *skb_put(*skb, 1) = len;
+
+ r = i2c_master_recv(client, skb_put(*skb, len), len);
+ if (r != len) {
+ kfree_skb(*skb);
+ return -EREMOTEIO;
+ }
+
+ r = check_crc((*skb)->data, (*skb)->len);
+ if (r != 0) {
+ kfree_skb(*skb);
+ r = -EBADMSG;
+ goto flush;
+ }
+
+ skb_pull(*skb, 1);
+ skb_trim(*skb, (*skb)->len - 2);
+
+ usleep_range(3000, 6000);
+
+ return 0;
+
+flush:
+ if (i2c_master_recv(client, tmp, sizeof(tmp)) < 0)
+ r = -EREMOTEIO;
+
+ usleep_range(3000, 6000);
+
+ return r;
+}
+
+/*
+ * Reads an shdlc frame from the chip. This is not as straightforward as it
+ * seems. There are cases where we could loose the frame start synchronization.
+ * The frame format is len-data-crc, and corruption can occur anywhere while
+ * transiting on i2c bus, such that we could read an invalid len.
+ * In order to recover synchronization with the next frame, we must be sure
+ * to read the real amount of data without using the len byte. We do this by
+ * assuming the following:
+ * - the chip will always present only one single complete frame on the bus
+ * before triggering the interrupt
+ * - the chip will not present a new frame until we have completely read
+ * the previous one (or until we have handled the interrupt).
+ * The tricky case is when we read a corrupted len that is less than the real
+ * len. We must detect this here in order to determine that we need to flush
+ * the bus. This is the reason why we check the crc here.
+ */
+static irqreturn_t pn544_hci_irq_thread_fn(int irq, void *dev_id)
+{
+ struct pn544_hci_info *info = dev_id;
+ struct i2c_client *client = info->i2c_dev;
+ struct sk_buff *skb = NULL;
+ int r;
+
+ BUG_ON(!info);
+ BUG_ON(irq != info->i2c_dev->irq);
+
+ dev_dbg(&client->dev, "IRQ\n");
+
+ if (info->hard_fault != 0)
+ return IRQ_HANDLED;
+
+ r = pn544_hci_i2c_read(client, &skb);
+ if (r == -EREMOTEIO) {
+ info->hard_fault = r;
+
+ nfc_shdlc_recv_frame(info->shdlc, NULL);
+
+ return IRQ_HANDLED;
+ } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
+ return IRQ_HANDLED;
+ }
+
+ nfc_shdlc_recv_frame(info->shdlc, skb);
+
+ return IRQ_HANDLED;
+}
+
+static int pn544_hci_open(struct nfc_shdlc *shdlc)
+{
+ struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc);
+ int r = 0;
+
+ mutex_lock(&info->info_lock);
+
+ if (info->state != PN544_ST_COLD) {
+ r = -EBUSY;
+ goto out;
+ }
+
+ r = pn544_hci_enable(info, HCI_MODE);
+
+out:
+ mutex_unlock(&info->info_lock);
+ return r;
+}
+
+static void pn544_hci_close(struct nfc_shdlc *shdlc)
+{
+ struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc);
+
+ mutex_lock(&info->info_lock);
+
+ if (info->state == PN544_ST_COLD)
+ goto out;
+
+ pn544_hci_disable(info);
+
+out:
+ mutex_unlock(&info->info_lock);
+}
+
+static int pn544_hci_ready(struct nfc_shdlc *shdlc)
+{
+ struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
+ struct sk_buff *skb;
+ static struct hw_config {
+ u8 adr[2];
+ u8 value;
+ } hw_config[] = {
+ {{0x9f, 0x9a}, 0x00},
+
+ {{0x98, 0x10}, 0xbc},
+
+ {{0x9e, 0x71}, 0x00},
+
+ {{0x98, 0x09}, 0x00},
+
+ {{0x9e, 0xb4}, 0x00},
+
+ {{0x9e, 0xd9}, 0xff},
+ {{0x9e, 0xda}, 0xff},
+ {{0x9e, 0xdb}, 0x23},
+ {{0x9e, 0xdc}, 0x21},
+ {{0x9e, 0xdd}, 0x22},
+ {{0x9e, 0xde}, 0x24},
+
+ {{0x9c, 0x01}, 0x08},
+
+ {{0x9e, 0xaa}, 0x01},
+
+ {{0x9b, 0xd1}, 0x0d},
+ {{0x9b, 0xd2}, 0x24},
+ {{0x9b, 0xd3}, 0x0a},
+ {{0x9b, 0xd4}, 0x22},
+ {{0x9b, 0xd5}, 0x08},
+ {{0x9b, 0xd6}, 0x1e},
+ {{0x9b, 0xdd}, 0x1c},
+
+ {{0x9b, 0x84}, 0x13},
+ {{0x99, 0x81}, 0x7f},
+ {{0x99, 0x31}, 0x70},
+
+ {{0x98, 0x00}, 0x3f},
+
+ {{0x9f, 0x09}, 0x00},
+
+ {{0x9f, 0x0a}, 0x05},
+
+ {{0x9e, 0xd1}, 0xa1},
+ {{0x99, 0x23}, 0x00},
+
+ {{0x9e, 0x74}, 0x80},
+
+ {{0x9f, 0x28}, 0x10},
+
+ {{0x9f, 0x35}, 0x14},
+
+ {{0x9f, 0x36}, 0x60},
+
+ {{0x9c, 0x31}, 0x00},
+
+ {{0x9c, 0x32}, 0xc8},
+
+ {{0x9c, 0x19}, 0x40},
+
+ {{0x9c, 0x1a}, 0x40},
+
+ {{0x9c, 0x0c}, 0x00},
+
+ {{0x9c, 0x0d}, 0x00},
+
+ {{0x9c, 0x12}, 0x00},
+
+ {{0x9c, 0x13}, 0x00},
+
+ {{0x98, 0xa2}, 0x0e},
+
+ {{0x98, 0x93}, 0x40},
+
+ {{0x98, 0x7d}, 0x02},
+ {{0x98, 0x7e}, 0x00},
+ {{0x9f, 0xc8}, 0x01},
+ };
+ struct hw_config *p = hw_config;
+ int count = ARRAY_SIZE(hw_config);
+ struct sk_buff *res_skb;
+ u8 param[4];
+ int r;
+
+ param[0] = 0;
+ while (count--) {
+ param[1] = p->adr[0];
+ param[2] = p->adr[1];
+ param[3] = p->value;
+
+ r = nfc_hci_send_cmd(hdev, PN544_SYS_MGMT_GATE, PN544_WRITE,
+ param, 4, &res_skb);
+ if (r < 0)
+ return r;
+
+ if (res_skb->len != 1) {
+ kfree_skb(res_skb);
+ return -EPROTO;
+ }
+
+ if (res_skb->data[0] != p->value) {
+ kfree_skb(res_skb);
+ return -EIO;
+ }
+
+ kfree_skb(res_skb);
+
+ p++;
+ }
+
+ param[0] = NFC_HCI_UICC_HOST_ID;
+ r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
+ NFC_HCI_ADMIN_WHITELIST, param, 1);
+ if (r < 0)
+ return r;
+
+ param[0] = 0x3d;
+ r = nfc_hci_set_param(hdev, PN544_SYS_MGMT_GATE,
+ PN544_SYS_MGMT_INFO_NOTIFICATION, param, 1);
+ if (r < 0)
+ return r;
+
+ param[0] = 0x0;
+ r = nfc_hci_set_param(hdev, NFC_HCI_RF_READER_A_GATE,
+ PN544_RF_READER_A_AUTO_ACTIVATION, param, 1);
+ if (r < 0)
+ return r;
+
+ r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+ if (r < 0)
+ return r;
+
+ param[0] = 0x1;
+ r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
+ PN544_PL_NFCT_DEACTIVATED, param, 1);
+ if (r < 0)
+ return r;
+
+ param[0] = 0x0;
+ r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
+ PN544_PL_RDPHASES, param, 1);
+ if (r < 0)
+ return r;
+
+ r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
+ PN544_ID_MGMT_FULL_VERSION_SW, &skb);
+ if (r < 0)
+ return r;
+
+ if (skb->len != FULL_VERSION_LEN) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ print_hex_dump(KERN_DEBUG, "FULL VERSION SOFTWARE INFO: ",
+ DUMP_PREFIX_NONE, 16, 1,
+ skb->data, FULL_VERSION_LEN, false);
+
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int pn544_hci_xmit(struct nfc_shdlc *shdlc, struct sk_buff *skb)
+{
+ struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc);
+ struct i2c_client *client = info->i2c_dev;
+
+ if (info->hard_fault != 0)
+ return info->hard_fault;
+
+ return pn544_hci_i2c_write(client, skb->data, skb->len);
+}
+
+static int pn544_hci_start_poll(struct nfc_shdlc *shdlc, u32 protocols)
+{
+ struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
+ u8 phases = 0;
+ int r;
+ u8 duration[2];
+ u8 activated;
+
+ pr_info(DRIVER_DESC ": %s protocols = %d\n", __func__, protocols);
+
+ r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+ if (r < 0)
+ return r;
+
+ duration[0] = 0x18;
+ duration[1] = 0x6a;
+ r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
+ PN544_PL_EMULATION, duration, 2);
+ if (r < 0)
+ return r;
+
+ activated = 0;
+ r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
+ PN544_PL_NFCT_DEACTIVATED, &activated, 1);
+ if (r < 0)
+ return r;
+
+ if (protocols & (NFC_PROTO_ISO14443_MASK | NFC_PROTO_MIFARE_MASK |
+ NFC_PROTO_JEWEL_MASK))
+ phases |= 1; /* Type A */
+ if (protocols & NFC_PROTO_FELICA_MASK) {
+ phases |= (1 << 2); /* Type F 212 */
+ phases |= (1 << 3); /* Type F 424 */
+ }
+
+ phases |= (1 << 5); /* NFC active */
+
+ r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
+ PN544_PL_RDPHASES, &phases, 1);
+ if (r < 0)
+ return r;
+
+ r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
+ if (r < 0)
+ nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+
+ return r;
+}
+
+static int pn544_hci_target_from_gate(struct nfc_shdlc *shdlc, u8 gate,
+ struct nfc_target *target)
+{
+ switch (gate) {
+ case PN544_RF_READER_F_GATE:
+ target->supported_protocols = NFC_PROTO_FELICA_MASK;
+ break;
+ case PN544_RF_READER_JEWEL_GATE:
+ target->supported_protocols = NFC_PROTO_JEWEL_MASK;
+ target->sens_res = 0x0c00;
+ break;
+ default:
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int pn544_hci_complete_target_discovered(struct nfc_shdlc *shdlc,
+ u8 gate,
+ struct nfc_target *target)
+{
+ struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
+ struct sk_buff *uid_skb;
+ int r = 0;
+
+ if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) {
+ if (target->nfcid1_len != 4 && target->nfcid1_len != 7 &&
+ target->nfcid1_len != 10)
+ return -EPROTO;
+
+ r = nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
+ PN544_RF_READER_CMD_ACTIVATE_NEXT,
+ target->nfcid1, target->nfcid1_len, NULL);
+ } else if (target->supported_protocols & NFC_PROTO_FELICA_MASK) {
+ r = nfc_hci_get_param(hdev, PN544_RF_READER_F_GATE,
+ PN544_FELICA_ID, &uid_skb);
+ if (r < 0)
+ return r;
+
+ if (uid_skb->len != 8) {
+ kfree_skb(uid_skb);
+ return -EPROTO;
+ }
+
+ r = nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE,
+ PN544_RF_READER_CMD_ACTIVATE_NEXT,
+ uid_skb->data, uid_skb->len, NULL);
+ kfree_skb(uid_skb);
+ } else if (target->supported_protocols & NFC_PROTO_ISO14443_MASK) {
+ /*
+ * TODO: maybe other ISO 14443 require some kind of continue
+ * activation, but for now we've seen only this one below.
+ */
+ if (target->sens_res == 0x4403) /* Type 4 Mifare DESFire */
+ r = nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
+ PN544_RF_READER_A_CMD_CONTINUE_ACTIVATION,
+ NULL, 0, NULL);
+ }
+
+ return r;
+}
+
+#define MIFARE_CMD_AUTH_KEY_A 0x60
+#define MIFARE_CMD_AUTH_KEY_B 0x61
+#define MIFARE_CMD_HEADER 2
+#define MIFARE_UID_LEN 4
+#define MIFARE_KEY_LEN 6
+#define MIFARE_CMD_LEN 12
+/*
+ * Returns:
+ * <= 0: driver handled the data exchange
+ * 1: driver doesn't especially handle, please do standard processing
+ */
+static int pn544_hci_data_exchange(struct nfc_shdlc *shdlc,
+ struct nfc_target *target,
+ struct sk_buff *skb,
+ struct sk_buff **res_skb)
+{
+ struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
+ int r;
+
+ pr_info(DRIVER_DESC ": %s for gate=%d\n", __func__,
+ target->hci_reader_gate);
+
+ switch (target->hci_reader_gate) {
+ case NFC_HCI_RF_READER_A_GATE:
+ if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) {
+ /*
+ * It seems that pn544 is inverting key and UID for
+ * MIFARE authentication commands.
+ */
+ if (skb->len == MIFARE_CMD_LEN &&
+ (skb->data[0] == MIFARE_CMD_AUTH_KEY_A ||
+ skb->data[0] == MIFARE_CMD_AUTH_KEY_B)) {
+ u8 uid[MIFARE_UID_LEN];
+ u8 *data = skb->data + MIFARE_CMD_HEADER;
+
+ memcpy(uid, data + MIFARE_KEY_LEN,
+ MIFARE_UID_LEN);
+ memmove(data + MIFARE_UID_LEN, data,
+ MIFARE_KEY_LEN);
+ memcpy(data, uid, MIFARE_UID_LEN);
+ }
+
+ return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+ PN544_MIFARE_CMD,
+ skb->data, skb->len, res_skb);
+ } else
+ return 1;
+ case PN544_RF_READER_F_GATE:
+ *skb_push(skb, 1) = 0;
+ *skb_push(skb, 1) = 0;
+
+ r = nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+ PN544_FELICA_RAW,
+ skb->data, skb->len, res_skb);
+ if (r == 0)
+ skb_pull(*res_skb, 1);
+ return r;
+ case PN544_RF_READER_JEWEL_GATE:
+ return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+ PN544_JEWEL_RAW_CMD,
+ skb->data, skb->len, res_skb);
+ default:
+ return 1;
+ }
+}
+
+static int pn544_hci_check_presence(struct nfc_shdlc *shdlc,
+ struct nfc_target *target)
+{
+ struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
+
+ return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+ PN544_RF_READER_CMD_PRESENCE_CHECK,
+ NULL, 0, NULL);
+}
+
+static struct nfc_shdlc_ops pn544_shdlc_ops = {
+ .open = pn544_hci_open,
+ .close = pn544_hci_close,
+ .hci_ready = pn544_hci_ready,
+ .xmit = pn544_hci_xmit,
+ .start_poll = pn544_hci_start_poll,
+ .target_from_gate = pn544_hci_target_from_gate,
+ .complete_target_discovered = pn544_hci_complete_target_discovered,
+ .data_exchange = pn544_hci_data_exchange,
+ .check_presence = pn544_hci_check_presence,
+};
+
+static int __devinit pn544_hci_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pn544_hci_info *info;
+ struct pn544_nfc_platform_data *pdata;
+ int r = 0;
+ u32 protocols;
+ struct nfc_hci_init_data init_data;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+ dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "Need I2C_FUNC_I2C\n");
+ return -ENODEV;
+ }
+
+ info = kzalloc(sizeof(struct pn544_hci_info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&client->dev,
+ "Cannot allocate memory for pn544_hci_info.\n");
+ r = -ENOMEM;
+ goto err_info_alloc;
+ }
+
+ info->i2c_dev = client;
+ info->state = PN544_ST_COLD;
+ mutex_init(&info->info_lock);
+ i2c_set_clientdata(client, info);
+
+ pdata = client->dev.platform_data;
+ if (pdata == NULL) {
+ dev_err(&client->dev, "No platform data\n");
+ r = -EINVAL;
+ goto err_pdata;
+ }
+
+ if (pdata->request_resources == NULL) {
+ dev_err(&client->dev, "request_resources() missing\n");
+ r = -EINVAL;
+ goto err_pdata;
+ }
+
+ r = pdata->request_resources(client);
+ if (r) {
+ dev_err(&client->dev, "Cannot get platform resources\n");
+ goto err_pdata;
+ }
+
+ info->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE);
+ info->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET);
+ info->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ);
+
+ pn544_hci_platform_init(info);
+
+ r = request_threaded_irq(client->irq, NULL, pn544_hci_irq_thread_fn,
+ IRQF_TRIGGER_RISING, PN544_HCI_DRIVER_NAME,
+ info);
+ if (r < 0) {
+ dev_err(&client->dev, "Unable to register IRQ handler\n");
+ goto err_rti;
+ }
+
+ init_data.gate_count = ARRAY_SIZE(pn544_custom_gates);
+
+ memcpy(init_data.gates, pn544_custom_gates,
+ ARRAY_SIZE(pn544_custom_gates));
+
+ /*
+ * TODO: Session id must include the driver name + some bus addr
+ * persistent info to discriminate 2 identical chips
+ */
+ strcpy(init_data.session_id, "ID544HCI");
+
+ protocols = NFC_PROTO_JEWEL_MASK |
+ NFC_PROTO_MIFARE_MASK |
+ NFC_PROTO_FELICA_MASK |
+ NFC_PROTO_ISO14443_MASK |
+ NFC_PROTO_NFC_DEP_MASK;
+
+ info->shdlc = nfc_shdlc_allocate(&pn544_shdlc_ops,
+ &init_data, protocols,
+ PN544_CMDS_HEADROOM, 0,
+ PN544_HCI_LLC_MAX_PAYLOAD,
+ dev_name(&client->dev));
+ if (!info->shdlc) {
+ dev_err(&client->dev, "Cannot allocate nfc shdlc.\n");
+ r = -ENOMEM;
+ goto err_allocshdlc;
+ }
+
+ nfc_shdlc_set_clientdata(info->shdlc, info);
+
+ return 0;
+
+err_allocshdlc:
+ free_irq(client->irq, info);
+
+err_rti:
+ if (pdata->free_resources != NULL)
+ pdata->free_resources();
+
+err_pdata:
+ kfree(info);
+
+err_info_alloc:
+ return r;
+}
+
+static __devexit int pn544_hci_remove(struct i2c_client *client)
+{
+ struct pn544_hci_info *info = i2c_get_clientdata(client);
+ struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ nfc_shdlc_free(info->shdlc);
+
+ if (info->state != PN544_ST_COLD) {
+ if (pdata->disable)
+ pdata->disable();
+ }
+
+ free_irq(client->irq, info);
+ if (pdata->free_resources)
+ pdata->free_resources();
+
+ kfree(info);
+
+ return 0;
+}
+
+static struct i2c_driver pn544_hci_driver = {
+ .driver = {
+ .name = PN544_HCI_DRIVER_NAME,
+ },
+ .probe = pn544_hci_probe,
+ .id_table = pn544_hci_id_table,
+ .remove = __devexit_p(pn544_hci_remove),
+};
+
+static int __init pn544_hci_init(void)
+{
+ int r;
+
+ pr_debug(DRIVER_DESC ": %s\n", __func__);
+
+ r = i2c_add_driver(&pn544_hci_driver);
+ if (r) {
+ pr_err(PN544_HCI_DRIVER_NAME ": driver registration failed\n");
+ return r;
+ }
+
+ return 0;
+}
+
+static void __exit pn544_hci_exit(void)
+{
+ i2c_del_driver(&pn544_hci_driver);
+}
+
+module_init(pn544_hci_init);
+module_exit(pn544_hci_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 8e84ce9765a9..dfba3e64d595 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -51,12 +51,6 @@ config OF_IRQ
config OF_DEVICE
def_bool y
-config OF_GPIO
- def_bool y
- depends on GPIOLIB && !SPARC
- help
- OpenFirmware GPIO accessors
-
config OF_I2C
def_tristate I2C
depends on I2C && !SPARC
@@ -67,12 +61,6 @@ config OF_NET
depends on NETDEVICES
def_bool y
-config OF_SPI
- def_tristate SPI
- depends on SPI && !SPARC
- help
- OpenFirmware SPI accessors
-
config OF_MDIO
def_tristate PHYLIB
depends on PHYLIB
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index aa90e602c8a7..e027f444d10c 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -4,10 +4,8 @@ obj-$(CONFIG_OF_PROMTREE) += pdt.o
obj-$(CONFIG_OF_ADDRESS) += address.o
obj-$(CONFIG_OF_IRQ) += irq.o
obj-$(CONFIG_OF_DEVICE) += device.o platform.o
-obj-$(CONFIG_OF_GPIO) += gpio.o
obj-$(CONFIG_OF_I2C) += of_i2c.o
obj-$(CONFIG_OF_NET) += of_net.o
-obj-$(CONFIG_OF_SPI) += of_spi.o
obj-$(CONFIG_OF_SELFTEST) += selftest.o
obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_PCI) += of_pci.o
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index f37fbeb66a44..1e173f357674 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -90,8 +90,22 @@ struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
if (!dev)
return NULL;
- return to_i2c_client(dev);
+ return i2c_verify_client(dev);
}
EXPORT_SYMBOL(of_find_i2c_device_by_node);
+/* must call put_device() when done with returned i2c_adapter device */
+struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&i2c_bus_type, NULL, node,
+ of_dev_node_match);
+ if (!dev)
+ return NULL;
+
+ return i2c_verify_adapter(dev);
+}
+EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
index 93125163dea2..677053813211 100644
--- a/drivers/of/of_pci_irq.c
+++ b/drivers/of/of_pci_irq.c
@@ -15,7 +15,7 @@
* PCI tree until an device-node is found, at which point it will finish
* resolving using the OF tree walking.
*/
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq)
{
struct device_node *dn, *ppnode;
struct pci_dev *ppdev;
diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c
deleted file mode 100644
index 6dbc074e4876..000000000000
--- a/drivers/of/of_spi.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * SPI OF support routines
- * Copyright (C) 2008 Secret Lab Technologies Ltd.
- *
- * Support routines for deriving SPI device attachments from the device
- * tree.
- */
-
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/device.h>
-#include <linux/spi/spi.h>
-#include <linux/of_irq.h>
-#include <linux/of_spi.h>
-
-/**
- * of_register_spi_devices - Register child devices onto the SPI bus
- * @master: Pointer to spi_master device
- *
- * Registers an spi_device for each child node of master node which has a 'reg'
- * property.
- */
-void of_register_spi_devices(struct spi_master *master)
-{
- struct spi_device *spi;
- struct device_node *nc;
- const __be32 *prop;
- int rc;
- int len;
-
- if (!master->dev.of_node)
- return;
-
- for_each_child_of_node(master->dev.of_node, nc) {
- /* Alloc an spi_device */
- spi = spi_alloc_device(master);
- if (!spi) {
- dev_err(&master->dev, "spi_device alloc error for %s\n",
- nc->full_name);
- spi_dev_put(spi);
- continue;
- }
-
- /* Select device driver */
- if (of_modalias_node(nc, spi->modalias,
- sizeof(spi->modalias)) < 0) {
- dev_err(&master->dev, "cannot find modalias for %s\n",
- nc->full_name);
- spi_dev_put(spi);
- continue;
- }
-
- /* Device address */
- prop = of_get_property(nc, "reg", &len);
- if (!prop || len < sizeof(*prop)) {
- dev_err(&master->dev, "%s has no 'reg' property\n",
- nc->full_name);
- spi_dev_put(spi);
- continue;
- }
- spi->chip_select = be32_to_cpup(prop);
-
- /* Mode (clock phase/polarity/etc.) */
- if (of_find_property(nc, "spi-cpha", NULL))
- spi->mode |= SPI_CPHA;
- if (of_find_property(nc, "spi-cpol", NULL))
- spi->mode |= SPI_CPOL;
- if (of_find_property(nc, "spi-cs-high", NULL))
- spi->mode |= SPI_CS_HIGH;
-
- /* Device speed */
- prop = of_get_property(nc, "spi-max-frequency", &len);
- if (!prop || len < sizeof(*prop)) {
- dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n",
- nc->full_name);
- spi_dev_put(spi);
- continue;
- }
- spi->max_speed_hz = be32_to_cpup(prop);
-
- /* IRQ */
- spi->irq = irq_of_parse_and_map(nc, 0);
-
- /* Store a pointer to the node in the device structure */
- of_node_get(nc);
- spi->dev.of_node = nc;
-
- /* Register the new device */
- request_module(spi->modalias);
- rc = spi_add_device(spi);
- if (rc) {
- dev_err(&master->dev, "spi_device register error %s\n",
- nc->full_name);
- spi_dev_put(spi);
- }
-
- }
-}
-EXPORT_SYMBOL(of_register_spi_devices);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index a55e248618cd..86c63fe45d11 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -27,6 +27,7 @@
#include <linux/security.h>
#include <linux/pci-aspm.h>
#include <linux/slab.h>
+#include <linux/vgaarb.h>
#include "pci.h"
static int sysfs_initialized; /* = 0 */
@@ -417,6 +418,10 @@ static ssize_t
boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *vga_dev = vga_default_device();
+
+ if (vga_dev)
+ return sprintf(buf, "%u\n", (pdev == vga_dev));
return sprintf(buf, "%u\n",
!!(pdev->resource[PCI_ROM_RESOURCE].flags &
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 8f169002dc7e..447e83472c01 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2370,7 +2370,7 @@ void pci_enable_acs(struct pci_dev *dev)
* number is always 0 (see the Implementation Note in section 2.2.8.1 of
* the PCI Express Base Specification, Revision 2.1)
*/
-u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
{
int slot;
diff --git a/drivers/pinctrl/spear/Kconfig b/drivers/pinctrl/spear/Kconfig
index 6a2596b4f359..91558791e766 100644
--- a/drivers/pinctrl/spear/Kconfig
+++ b/drivers/pinctrl/spear/Kconfig
@@ -31,4 +31,14 @@ config PINCTRL_SPEAR320
depends on MACH_SPEAR320
select PINCTRL_SPEAR3XX
+config PINCTRL_SPEAR1310
+ bool "ST Microelectronics SPEAr1310 SoC pin controller driver"
+ depends on MACH_SPEAR1310
+ select PINCTRL_SPEAR
+
+config PINCTRL_SPEAR1340
+ bool "ST Microelectronics SPEAr1340 SoC pin controller driver"
+ depends on MACH_SPEAR1340
+ select PINCTRL_SPEAR
+
endif
diff --git a/drivers/pinctrl/spear/Makefile b/drivers/pinctrl/spear/Makefile
index 15dcb85da22d..b28a7ba22443 100644
--- a/drivers/pinctrl/spear/Makefile
+++ b/drivers/pinctrl/spear/Makefile
@@ -5,3 +5,5 @@ obj-$(CONFIG_PINCTRL_SPEAR3XX) += pinctrl-spear3xx.o
obj-$(CONFIG_PINCTRL_SPEAR300) += pinctrl-spear300.o
obj-$(CONFIG_PINCTRL_SPEAR310) += pinctrl-spear310.o
obj-$(CONFIG_PINCTRL_SPEAR320) += pinctrl-spear320.o
+obj-$(CONFIG_PINCTRL_SPEAR1310) += pinctrl-spear1310.o
+obj-$(CONFIG_PINCTRL_SPEAR1340) += pinctrl-spear1340.o
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index 47a6b5b72f90..9155783bb47f 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -139,4 +139,255 @@ void __devinit pmx_init_addr(struct spear_pinctrl_machdata *machdata, u16 reg);
int __devinit spear_pinctrl_probe(struct platform_device *pdev,
struct spear_pinctrl_machdata *machdata);
int __devexit spear_pinctrl_remove(struct platform_device *pdev);
+
+#define SPEAR_PIN_0_TO_101 \
+ PINCTRL_PIN(0, "PLGPIO0"), \
+ PINCTRL_PIN(1, "PLGPIO1"), \
+ PINCTRL_PIN(2, "PLGPIO2"), \
+ PINCTRL_PIN(3, "PLGPIO3"), \
+ PINCTRL_PIN(4, "PLGPIO4"), \
+ PINCTRL_PIN(5, "PLGPIO5"), \
+ PINCTRL_PIN(6, "PLGPIO6"), \
+ PINCTRL_PIN(7, "PLGPIO7"), \
+ PINCTRL_PIN(8, "PLGPIO8"), \
+ PINCTRL_PIN(9, "PLGPIO9"), \
+ PINCTRL_PIN(10, "PLGPIO10"), \
+ PINCTRL_PIN(11, "PLGPIO11"), \
+ PINCTRL_PIN(12, "PLGPIO12"), \
+ PINCTRL_PIN(13, "PLGPIO13"), \
+ PINCTRL_PIN(14, "PLGPIO14"), \
+ PINCTRL_PIN(15, "PLGPIO15"), \
+ PINCTRL_PIN(16, "PLGPIO16"), \
+ PINCTRL_PIN(17, "PLGPIO17"), \
+ PINCTRL_PIN(18, "PLGPIO18"), \
+ PINCTRL_PIN(19, "PLGPIO19"), \
+ PINCTRL_PIN(20, "PLGPIO20"), \
+ PINCTRL_PIN(21, "PLGPIO21"), \
+ PINCTRL_PIN(22, "PLGPIO22"), \
+ PINCTRL_PIN(23, "PLGPIO23"), \
+ PINCTRL_PIN(24, "PLGPIO24"), \
+ PINCTRL_PIN(25, "PLGPIO25"), \
+ PINCTRL_PIN(26, "PLGPIO26"), \
+ PINCTRL_PIN(27, "PLGPIO27"), \
+ PINCTRL_PIN(28, "PLGPIO28"), \
+ PINCTRL_PIN(29, "PLGPIO29"), \
+ PINCTRL_PIN(30, "PLGPIO30"), \
+ PINCTRL_PIN(31, "PLGPIO31"), \
+ PINCTRL_PIN(32, "PLGPIO32"), \
+ PINCTRL_PIN(33, "PLGPIO33"), \
+ PINCTRL_PIN(34, "PLGPIO34"), \
+ PINCTRL_PIN(35, "PLGPIO35"), \
+ PINCTRL_PIN(36, "PLGPIO36"), \
+ PINCTRL_PIN(37, "PLGPIO37"), \
+ PINCTRL_PIN(38, "PLGPIO38"), \
+ PINCTRL_PIN(39, "PLGPIO39"), \
+ PINCTRL_PIN(40, "PLGPIO40"), \
+ PINCTRL_PIN(41, "PLGPIO41"), \
+ PINCTRL_PIN(42, "PLGPIO42"), \
+ PINCTRL_PIN(43, "PLGPIO43"), \
+ PINCTRL_PIN(44, "PLGPIO44"), \
+ PINCTRL_PIN(45, "PLGPIO45"), \
+ PINCTRL_PIN(46, "PLGPIO46"), \
+ PINCTRL_PIN(47, "PLGPIO47"), \
+ PINCTRL_PIN(48, "PLGPIO48"), \
+ PINCTRL_PIN(49, "PLGPIO49"), \
+ PINCTRL_PIN(50, "PLGPIO50"), \
+ PINCTRL_PIN(51, "PLGPIO51"), \
+ PINCTRL_PIN(52, "PLGPIO52"), \
+ PINCTRL_PIN(53, "PLGPIO53"), \
+ PINCTRL_PIN(54, "PLGPIO54"), \
+ PINCTRL_PIN(55, "PLGPIO55"), \
+ PINCTRL_PIN(56, "PLGPIO56"), \
+ PINCTRL_PIN(57, "PLGPIO57"), \
+ PINCTRL_PIN(58, "PLGPIO58"), \
+ PINCTRL_PIN(59, "PLGPIO59"), \
+ PINCTRL_PIN(60, "PLGPIO60"), \
+ PINCTRL_PIN(61, "PLGPIO61"), \
+ PINCTRL_PIN(62, "PLGPIO62"), \
+ PINCTRL_PIN(63, "PLGPIO63"), \
+ PINCTRL_PIN(64, "PLGPIO64"), \
+ PINCTRL_PIN(65, "PLGPIO65"), \
+ PINCTRL_PIN(66, "PLGPIO66"), \
+ PINCTRL_PIN(67, "PLGPIO67"), \
+ PINCTRL_PIN(68, "PLGPIO68"), \
+ PINCTRL_PIN(69, "PLGPIO69"), \
+ PINCTRL_PIN(70, "PLGPIO70"), \
+ PINCTRL_PIN(71, "PLGPIO71"), \
+ PINCTRL_PIN(72, "PLGPIO72"), \
+ PINCTRL_PIN(73, "PLGPIO73"), \
+ PINCTRL_PIN(74, "PLGPIO74"), \
+ PINCTRL_PIN(75, "PLGPIO75"), \
+ PINCTRL_PIN(76, "PLGPIO76"), \
+ PINCTRL_PIN(77, "PLGPIO77"), \
+ PINCTRL_PIN(78, "PLGPIO78"), \
+ PINCTRL_PIN(79, "PLGPIO79"), \
+ PINCTRL_PIN(80, "PLGPIO80"), \
+ PINCTRL_PIN(81, "PLGPIO81"), \
+ PINCTRL_PIN(82, "PLGPIO82"), \
+ PINCTRL_PIN(83, "PLGPIO83"), \
+ PINCTRL_PIN(84, "PLGPIO84"), \
+ PINCTRL_PIN(85, "PLGPIO85"), \
+ PINCTRL_PIN(86, "PLGPIO86"), \
+ PINCTRL_PIN(87, "PLGPIO87"), \
+ PINCTRL_PIN(88, "PLGPIO88"), \
+ PINCTRL_PIN(89, "PLGPIO89"), \
+ PINCTRL_PIN(90, "PLGPIO90"), \
+ PINCTRL_PIN(91, "PLGPIO91"), \
+ PINCTRL_PIN(92, "PLGPIO92"), \
+ PINCTRL_PIN(93, "PLGPIO93"), \
+ PINCTRL_PIN(94, "PLGPIO94"), \
+ PINCTRL_PIN(95, "PLGPIO95"), \
+ PINCTRL_PIN(96, "PLGPIO96"), \
+ PINCTRL_PIN(97, "PLGPIO97"), \
+ PINCTRL_PIN(98, "PLGPIO98"), \
+ PINCTRL_PIN(99, "PLGPIO99"), \
+ PINCTRL_PIN(100, "PLGPIO100"), \
+ PINCTRL_PIN(101, "PLGPIO101")
+
+#define SPEAR_PIN_102_TO_245 \
+ PINCTRL_PIN(102, "PLGPIO102"), \
+ PINCTRL_PIN(103, "PLGPIO103"), \
+ PINCTRL_PIN(104, "PLGPIO104"), \
+ PINCTRL_PIN(105, "PLGPIO105"), \
+ PINCTRL_PIN(106, "PLGPIO106"), \
+ PINCTRL_PIN(107, "PLGPIO107"), \
+ PINCTRL_PIN(108, "PLGPIO108"), \
+ PINCTRL_PIN(109, "PLGPIO109"), \
+ PINCTRL_PIN(110, "PLGPIO110"), \
+ PINCTRL_PIN(111, "PLGPIO111"), \
+ PINCTRL_PIN(112, "PLGPIO112"), \
+ PINCTRL_PIN(113, "PLGPIO113"), \
+ PINCTRL_PIN(114, "PLGPIO114"), \
+ PINCTRL_PIN(115, "PLGPIO115"), \
+ PINCTRL_PIN(116, "PLGPIO116"), \
+ PINCTRL_PIN(117, "PLGPIO117"), \
+ PINCTRL_PIN(118, "PLGPIO118"), \
+ PINCTRL_PIN(119, "PLGPIO119"), \
+ PINCTRL_PIN(120, "PLGPIO120"), \
+ PINCTRL_PIN(121, "PLGPIO121"), \
+ PINCTRL_PIN(122, "PLGPIO122"), \
+ PINCTRL_PIN(123, "PLGPIO123"), \
+ PINCTRL_PIN(124, "PLGPIO124"), \
+ PINCTRL_PIN(125, "PLGPIO125"), \
+ PINCTRL_PIN(126, "PLGPIO126"), \
+ PINCTRL_PIN(127, "PLGPIO127"), \
+ PINCTRL_PIN(128, "PLGPIO128"), \
+ PINCTRL_PIN(129, "PLGPIO129"), \
+ PINCTRL_PIN(130, "PLGPIO130"), \
+ PINCTRL_PIN(131, "PLGPIO131"), \
+ PINCTRL_PIN(132, "PLGPIO132"), \
+ PINCTRL_PIN(133, "PLGPIO133"), \
+ PINCTRL_PIN(134, "PLGPIO134"), \
+ PINCTRL_PIN(135, "PLGPIO135"), \
+ PINCTRL_PIN(136, "PLGPIO136"), \
+ PINCTRL_PIN(137, "PLGPIO137"), \
+ PINCTRL_PIN(138, "PLGPIO138"), \
+ PINCTRL_PIN(139, "PLGPIO139"), \
+ PINCTRL_PIN(140, "PLGPIO140"), \
+ PINCTRL_PIN(141, "PLGPIO141"), \
+ PINCTRL_PIN(142, "PLGPIO142"), \
+ PINCTRL_PIN(143, "PLGPIO143"), \
+ PINCTRL_PIN(144, "PLGPIO144"), \
+ PINCTRL_PIN(145, "PLGPIO145"), \
+ PINCTRL_PIN(146, "PLGPIO146"), \
+ PINCTRL_PIN(147, "PLGPIO147"), \
+ PINCTRL_PIN(148, "PLGPIO148"), \
+ PINCTRL_PIN(149, "PLGPIO149"), \
+ PINCTRL_PIN(150, "PLGPIO150"), \
+ PINCTRL_PIN(151, "PLGPIO151"), \
+ PINCTRL_PIN(152, "PLGPIO152"), \
+ PINCTRL_PIN(153, "PLGPIO153"), \
+ PINCTRL_PIN(154, "PLGPIO154"), \
+ PINCTRL_PIN(155, "PLGPIO155"), \
+ PINCTRL_PIN(156, "PLGPIO156"), \
+ PINCTRL_PIN(157, "PLGPIO157"), \
+ PINCTRL_PIN(158, "PLGPIO158"), \
+ PINCTRL_PIN(159, "PLGPIO159"), \
+ PINCTRL_PIN(160, "PLGPIO160"), \
+ PINCTRL_PIN(161, "PLGPIO161"), \
+ PINCTRL_PIN(162, "PLGPIO162"), \
+ PINCTRL_PIN(163, "PLGPIO163"), \
+ PINCTRL_PIN(164, "PLGPIO164"), \
+ PINCTRL_PIN(165, "PLGPIO165"), \
+ PINCTRL_PIN(166, "PLGPIO166"), \
+ PINCTRL_PIN(167, "PLGPIO167"), \
+ PINCTRL_PIN(168, "PLGPIO168"), \
+ PINCTRL_PIN(169, "PLGPIO169"), \
+ PINCTRL_PIN(170, "PLGPIO170"), \
+ PINCTRL_PIN(171, "PLGPIO171"), \
+ PINCTRL_PIN(172, "PLGPIO172"), \
+ PINCTRL_PIN(173, "PLGPIO173"), \
+ PINCTRL_PIN(174, "PLGPIO174"), \
+ PINCTRL_PIN(175, "PLGPIO175"), \
+ PINCTRL_PIN(176, "PLGPIO176"), \
+ PINCTRL_PIN(177, "PLGPIO177"), \
+ PINCTRL_PIN(178, "PLGPIO178"), \
+ PINCTRL_PIN(179, "PLGPIO179"), \
+ PINCTRL_PIN(180, "PLGPIO180"), \
+ PINCTRL_PIN(181, "PLGPIO181"), \
+ PINCTRL_PIN(182, "PLGPIO182"), \
+ PINCTRL_PIN(183, "PLGPIO183"), \
+ PINCTRL_PIN(184, "PLGPIO184"), \
+ PINCTRL_PIN(185, "PLGPIO185"), \
+ PINCTRL_PIN(186, "PLGPIO186"), \
+ PINCTRL_PIN(187, "PLGPIO187"), \
+ PINCTRL_PIN(188, "PLGPIO188"), \
+ PINCTRL_PIN(189, "PLGPIO189"), \
+ PINCTRL_PIN(190, "PLGPIO190"), \
+ PINCTRL_PIN(191, "PLGPIO191"), \
+ PINCTRL_PIN(192, "PLGPIO192"), \
+ PINCTRL_PIN(193, "PLGPIO193"), \
+ PINCTRL_PIN(194, "PLGPIO194"), \
+ PINCTRL_PIN(195, "PLGPIO195"), \
+ PINCTRL_PIN(196, "PLGPIO196"), \
+ PINCTRL_PIN(197, "PLGPIO197"), \
+ PINCTRL_PIN(198, "PLGPIO198"), \
+ PINCTRL_PIN(199, "PLGPIO199"), \
+ PINCTRL_PIN(200, "PLGPIO200"), \
+ PINCTRL_PIN(201, "PLGPIO201"), \
+ PINCTRL_PIN(202, "PLGPIO202"), \
+ PINCTRL_PIN(203, "PLGPIO203"), \
+ PINCTRL_PIN(204, "PLGPIO204"), \
+ PINCTRL_PIN(205, "PLGPIO205"), \
+ PINCTRL_PIN(206, "PLGPIO206"), \
+ PINCTRL_PIN(207, "PLGPIO207"), \
+ PINCTRL_PIN(208, "PLGPIO208"), \
+ PINCTRL_PIN(209, "PLGPIO209"), \
+ PINCTRL_PIN(210, "PLGPIO210"), \
+ PINCTRL_PIN(211, "PLGPIO211"), \
+ PINCTRL_PIN(212, "PLGPIO212"), \
+ PINCTRL_PIN(213, "PLGPIO213"), \
+ PINCTRL_PIN(214, "PLGPIO214"), \
+ PINCTRL_PIN(215, "PLGPIO215"), \
+ PINCTRL_PIN(216, "PLGPIO216"), \
+ PINCTRL_PIN(217, "PLGPIO217"), \
+ PINCTRL_PIN(218, "PLGPIO218"), \
+ PINCTRL_PIN(219, "PLGPIO219"), \
+ PINCTRL_PIN(220, "PLGPIO220"), \
+ PINCTRL_PIN(221, "PLGPIO221"), \
+ PINCTRL_PIN(222, "PLGPIO222"), \
+ PINCTRL_PIN(223, "PLGPIO223"), \
+ PINCTRL_PIN(224, "PLGPIO224"), \
+ PINCTRL_PIN(225, "PLGPIO225"), \
+ PINCTRL_PIN(226, "PLGPIO226"), \
+ PINCTRL_PIN(227, "PLGPIO227"), \
+ PINCTRL_PIN(228, "PLGPIO228"), \
+ PINCTRL_PIN(229, "PLGPIO229"), \
+ PINCTRL_PIN(230, "PLGPIO230"), \
+ PINCTRL_PIN(231, "PLGPIO231"), \
+ PINCTRL_PIN(232, "PLGPIO232"), \
+ PINCTRL_PIN(233, "PLGPIO233"), \
+ PINCTRL_PIN(234, "PLGPIO234"), \
+ PINCTRL_PIN(235, "PLGPIO235"), \
+ PINCTRL_PIN(236, "PLGPIO236"), \
+ PINCTRL_PIN(237, "PLGPIO237"), \
+ PINCTRL_PIN(238, "PLGPIO238"), \
+ PINCTRL_PIN(239, "PLGPIO239"), \
+ PINCTRL_PIN(240, "PLGPIO240"), \
+ PINCTRL_PIN(241, "PLGPIO241"), \
+ PINCTRL_PIN(242, "PLGPIO242"), \
+ PINCTRL_PIN(243, "PLGPIO243"), \
+ PINCTRL_PIN(244, "PLGPIO244"), \
+ PINCTRL_PIN(245, "PLGPIO245")
+
#endif /* __PINMUX_SPEAR_H__ */
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
new file mode 100644
index 000000000000..fff168be7f00
--- /dev/null
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -0,0 +1,2198 @@
+/*
+ * Driver for the ST Microelectronics SPEAr1310 pinmux
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "pinctrl-spear.h"
+
+#define DRIVER_NAME "spear1310-pinmux"
+
+/* pins */
+static const struct pinctrl_pin_desc spear1310_pins[] = {
+ SPEAR_PIN_0_TO_101,
+ SPEAR_PIN_102_TO_245,
+};
+
+/* registers */
+#define PERIP_CFG 0x32C
+ #define MCIF_SEL_SHIFT 3
+ #define MCIF_SEL_SD (0x1 << MCIF_SEL_SHIFT)
+ #define MCIF_SEL_CF (0x2 << MCIF_SEL_SHIFT)
+ #define MCIF_SEL_XD (0x3 << MCIF_SEL_SHIFT)
+ #define MCIF_SEL_MASK (0x3 << MCIF_SEL_SHIFT)
+
+#define PCIE_SATA_CFG 0x3A4
+ #define PCIE_SATA2_SEL_PCIE (0 << 31)
+ #define PCIE_SATA1_SEL_PCIE (0 << 30)
+ #define PCIE_SATA0_SEL_PCIE (0 << 29)
+ #define PCIE_SATA2_SEL_SATA (1 << 31)
+ #define PCIE_SATA1_SEL_SATA (1 << 30)
+ #define PCIE_SATA0_SEL_SATA (1 << 29)
+ #define SATA2_CFG_TX_CLK_EN (1 << 27)
+ #define SATA2_CFG_RX_CLK_EN (1 << 26)
+ #define SATA2_CFG_POWERUP_RESET (1 << 25)
+ #define SATA2_CFG_PM_CLK_EN (1 << 24)
+ #define SATA1_CFG_TX_CLK_EN (1 << 23)
+ #define SATA1_CFG_RX_CLK_EN (1 << 22)
+ #define SATA1_CFG_POWERUP_RESET (1 << 21)
+ #define SATA1_CFG_PM_CLK_EN (1 << 20)
+ #define SATA0_CFG_TX_CLK_EN (1 << 19)
+ #define SATA0_CFG_RX_CLK_EN (1 << 18)
+ #define SATA0_CFG_POWERUP_RESET (1 << 17)
+ #define SATA0_CFG_PM_CLK_EN (1 << 16)
+ #define PCIE2_CFG_DEVICE_PRESENT (1 << 11)
+ #define PCIE2_CFG_POWERUP_RESET (1 << 10)
+ #define PCIE2_CFG_CORE_CLK_EN (1 << 9)
+ #define PCIE2_CFG_AUX_CLK_EN (1 << 8)
+ #define PCIE1_CFG_DEVICE_PRESENT (1 << 7)
+ #define PCIE1_CFG_POWERUP_RESET (1 << 6)
+ #define PCIE1_CFG_CORE_CLK_EN (1 << 5)
+ #define PCIE1_CFG_AUX_CLK_EN (1 << 4)
+ #define PCIE0_CFG_DEVICE_PRESENT (1 << 3)
+ #define PCIE0_CFG_POWERUP_RESET (1 << 2)
+ #define PCIE0_CFG_CORE_CLK_EN (1 << 1)
+ #define PCIE0_CFG_AUX_CLK_EN (1 << 0)
+
+#define PAD_FUNCTION_EN_0 0x650
+ #define PMX_UART0_MASK (1 << 1)
+ #define PMX_I2C0_MASK (1 << 2)
+ #define PMX_I2S0_MASK (1 << 3)
+ #define PMX_SSP0_MASK (1 << 4)
+ #define PMX_CLCD1_MASK (1 << 5)
+ #define PMX_EGPIO00_MASK (1 << 6)
+ #define PMX_EGPIO01_MASK (1 << 7)
+ #define PMX_EGPIO02_MASK (1 << 8)
+ #define PMX_EGPIO03_MASK (1 << 9)
+ #define PMX_EGPIO04_MASK (1 << 10)
+ #define PMX_EGPIO05_MASK (1 << 11)
+ #define PMX_EGPIO06_MASK (1 << 12)
+ #define PMX_EGPIO07_MASK (1 << 13)
+ #define PMX_EGPIO08_MASK (1 << 14)
+ #define PMX_EGPIO09_MASK (1 << 15)
+ #define PMX_SMI_MASK (1 << 16)
+ #define PMX_NAND8_MASK (1 << 17)
+ #define PMX_GMIICLK_MASK (1 << 18)
+ #define PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK (1 << 19)
+ #define PMX_RXCLK_RDV_TXEN_D03_MASK (1 << 20)
+ #define PMX_GMIID47_MASK (1 << 21)
+ #define PMX_MDC_MDIO_MASK (1 << 22)
+ #define PMX_MCI_DATA8_15_MASK (1 << 23)
+ #define PMX_NFAD23_MASK (1 << 24)
+ #define PMX_NFAD24_MASK (1 << 25)
+ #define PMX_NFAD25_MASK (1 << 26)
+ #define PMX_NFCE3_MASK (1 << 27)
+ #define PMX_NFWPRT3_MASK (1 << 28)
+ #define PMX_NFRSTPWDWN0_MASK (1 << 29)
+ #define PMX_NFRSTPWDWN1_MASK (1 << 30)
+ #define PMX_NFRSTPWDWN2_MASK (1 << 31)
+
+#define PAD_FUNCTION_EN_1 0x654
+ #define PMX_NFRSTPWDWN3_MASK (1 << 0)
+ #define PMX_SMINCS2_MASK (1 << 1)
+ #define PMX_SMINCS3_MASK (1 << 2)
+ #define PMX_CLCD2_MASK (1 << 3)
+ #define PMX_KBD_ROWCOL68_MASK (1 << 4)
+ #define PMX_EGPIO10_MASK (1 << 5)
+ #define PMX_EGPIO11_MASK (1 << 6)
+ #define PMX_EGPIO12_MASK (1 << 7)
+ #define PMX_EGPIO13_MASK (1 << 8)
+ #define PMX_EGPIO14_MASK (1 << 9)
+ #define PMX_EGPIO15_MASK (1 << 10)
+ #define PMX_UART0_MODEM_MASK (1 << 11)
+ #define PMX_GPT0_TMR0_MASK (1 << 12)
+ #define PMX_GPT0_TMR1_MASK (1 << 13)
+ #define PMX_GPT1_TMR0_MASK (1 << 14)
+ #define PMX_GPT1_TMR1_MASK (1 << 15)
+ #define PMX_I2S1_MASK (1 << 16)
+ #define PMX_KBD_ROWCOL25_MASK (1 << 17)
+ #define PMX_NFIO8_15_MASK (1 << 18)
+ #define PMX_KBD_COL1_MASK (1 << 19)
+ #define PMX_NFCE1_MASK (1 << 20)
+ #define PMX_KBD_COL0_MASK (1 << 21)
+ #define PMX_NFCE2_MASK (1 << 22)
+ #define PMX_KBD_ROW1_MASK (1 << 23)
+ #define PMX_NFWPRT1_MASK (1 << 24)
+ #define PMX_KBD_ROW0_MASK (1 << 25)
+ #define PMX_NFWPRT2_MASK (1 << 26)
+ #define PMX_MCIDATA0_MASK (1 << 27)
+ #define PMX_MCIDATA1_MASK (1 << 28)
+ #define PMX_MCIDATA2_MASK (1 << 29)
+ #define PMX_MCIDATA3_MASK (1 << 30)
+ #define PMX_MCIDATA4_MASK (1 << 31)
+
+#define PAD_FUNCTION_EN_2 0x658
+ #define PMX_MCIDATA5_MASK (1 << 0)
+ #define PMX_MCIDATA6_MASK (1 << 1)
+ #define PMX_MCIDATA7_MASK (1 << 2)
+ #define PMX_MCIDATA1SD_MASK (1 << 3)
+ #define PMX_MCIDATA2SD_MASK (1 << 4)
+ #define PMX_MCIDATA3SD_MASK (1 << 5)
+ #define PMX_MCIADDR0ALE_MASK (1 << 6)
+ #define PMX_MCIADDR1CLECLK_MASK (1 << 7)
+ #define PMX_MCIADDR2_MASK (1 << 8)
+ #define PMX_MCICECF_MASK (1 << 9)
+ #define PMX_MCICEXD_MASK (1 << 10)
+ #define PMX_MCICESDMMC_MASK (1 << 11)
+ #define PMX_MCICDCF1_MASK (1 << 12)
+ #define PMX_MCICDCF2_MASK (1 << 13)
+ #define PMX_MCICDXD_MASK (1 << 14)
+ #define PMX_MCICDSDMMC_MASK (1 << 15)
+ #define PMX_MCIDATADIR_MASK (1 << 16)
+ #define PMX_MCIDMARQWP_MASK (1 << 17)
+ #define PMX_MCIIORDRE_MASK (1 << 18)
+ #define PMX_MCIIOWRWE_MASK (1 << 19)
+ #define PMX_MCIRESETCF_MASK (1 << 20)
+ #define PMX_MCICS0CE_MASK (1 << 21)
+ #define PMX_MCICFINTR_MASK (1 << 22)
+ #define PMX_MCIIORDY_MASK (1 << 23)
+ #define PMX_MCICS1_MASK (1 << 24)
+ #define PMX_MCIDMAACK_MASK (1 << 25)
+ #define PMX_MCISDCMD_MASK (1 << 26)
+ #define PMX_MCILEDS_MASK (1 << 27)
+ #define PMX_TOUCH_XY_MASK (1 << 28)
+ #define PMX_SSP0_CS0_MASK (1 << 29)
+ #define PMX_SSP0_CS1_2_MASK (1 << 30)
+
+/* combined macros */
+#define PMX_GMII_MASK (PMX_GMIICLK_MASK | \
+ PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK | \
+ PMX_RXCLK_RDV_TXEN_D03_MASK | \
+ PMX_GMIID47_MASK | PMX_MDC_MDIO_MASK)
+
+#define PMX_EGPIO_0_GRP_MASK (PMX_EGPIO00_MASK | PMX_EGPIO01_MASK | \
+ PMX_EGPIO02_MASK | \
+ PMX_EGPIO03_MASK | PMX_EGPIO04_MASK | \
+ PMX_EGPIO05_MASK | PMX_EGPIO06_MASK | \
+ PMX_EGPIO07_MASK | PMX_EGPIO08_MASK | \
+ PMX_EGPIO09_MASK)
+#define PMX_EGPIO_1_GRP_MASK (PMX_EGPIO10_MASK | PMX_EGPIO11_MASK | \
+ PMX_EGPIO12_MASK | PMX_EGPIO13_MASK | \
+ PMX_EGPIO14_MASK | PMX_EGPIO15_MASK)
+
+#define PMX_KEYBOARD_6X6_MASK (PMX_KBD_ROW0_MASK | PMX_KBD_ROW1_MASK | \
+ PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL0_MASK | \
+ PMX_KBD_COL1_MASK)
+
+#define PMX_NAND8BIT_0_MASK (PMX_NAND8_MASK | PMX_NFAD23_MASK | \
+ PMX_NFAD24_MASK | PMX_NFAD25_MASK | \
+ PMX_NFWPRT3_MASK | PMX_NFRSTPWDWN0_MASK | \
+ PMX_NFRSTPWDWN1_MASK | PMX_NFRSTPWDWN2_MASK | \
+ PMX_NFCE3_MASK)
+#define PMX_NAND8BIT_1_MASK PMX_NFRSTPWDWN3_MASK
+
+#define PMX_NAND16BIT_1_MASK (PMX_KBD_ROWCOL25_MASK | PMX_NFIO8_15_MASK)
+#define PMX_NAND_4CHIPS_MASK (PMX_NFCE1_MASK | PMX_NFCE2_MASK | \
+ PMX_NFWPRT1_MASK | PMX_NFWPRT2_MASK | \
+ PMX_KBD_ROW0_MASK | PMX_KBD_ROW1_MASK | \
+ PMX_KBD_COL0_MASK | PMX_KBD_COL1_MASK)
+
+#define PMX_MCIFALL_1_MASK 0xF8000000
+#define PMX_MCIFALL_2_MASK 0x0FFFFFFF
+
+#define PMX_PCI_REG1_MASK (PMX_SMINCS2_MASK | PMX_SMINCS3_MASK | \
+ PMX_CLCD2_MASK | PMX_KBD_ROWCOL68_MASK | \
+ PMX_EGPIO_1_GRP_MASK | PMX_GPT0_TMR0_MASK | \
+ PMX_GPT0_TMR1_MASK | PMX_GPT1_TMR0_MASK | \
+ PMX_GPT1_TMR1_MASK | PMX_I2S1_MASK | \
+ PMX_NFCE2_MASK)
+#define PMX_PCI_REG2_MASK (PMX_TOUCH_XY_MASK | PMX_SSP0_CS0_MASK | \
+ PMX_SSP0_CS1_2_MASK)
+
+#define PMX_SMII_0_1_2_MASK (PMX_CLCD2_MASK | PMX_KBD_ROWCOL68_MASK)
+#define PMX_RGMII_REG0_MASK (PMX_MCI_DATA8_15_MASK | \
+ PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK | \
+ PMX_GMIID47_MASK)
+#define PMX_RGMII_REG1_MASK (PMX_KBD_ROWCOL68_MASK | PMX_EGPIO_1_GRP_MASK |\
+ PMX_KBD_ROW1_MASK | PMX_NFWPRT1_MASK | \
+ PMX_KBD_ROW0_MASK | PMX_NFWPRT2_MASK)
+#define PMX_RGMII_REG2_MASK (PMX_TOUCH_XY_MASK | PMX_SSP0_CS0_MASK | \
+ PMX_SSP0_CS1_2_MASK)
+
+#define PCIE_CFG_VAL(x) (PCIE_SATA##x##_SEL_PCIE | \
+ PCIE##x##_CFG_AUX_CLK_EN | \
+ PCIE##x##_CFG_CORE_CLK_EN | \
+ PCIE##x##_CFG_POWERUP_RESET | \
+ PCIE##x##_CFG_DEVICE_PRESENT)
+#define SATA_CFG_VAL(x) (PCIE_SATA##x##_SEL_SATA | \
+ SATA##x##_CFG_PM_CLK_EN | \
+ SATA##x##_CFG_POWERUP_RESET | \
+ SATA##x##_CFG_RX_CLK_EN | \
+ SATA##x##_CFG_TX_CLK_EN)
+
+/* Pad multiplexing for i2c0 device */
+static const unsigned i2c0_pins[] = { 102, 103 };
+static struct spear_muxreg i2c0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_I2C0_MASK,
+ .val = PMX_I2C0_MASK,
+ },
+};
+
+static struct spear_modemux i2c0_modemux[] = {
+ {
+ .muxregs = i2c0_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c0_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c0_pingroup = {
+ .name = "i2c0_grp",
+ .pins = i2c0_pins,
+ .npins = ARRAY_SIZE(i2c0_pins),
+ .modemuxs = i2c0_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c0_modemux),
+};
+
+static const char *const i2c0_grps[] = { "i2c0_grp" };
+static struct spear_function i2c0_function = {
+ .name = "i2c0",
+ .groups = i2c0_grps,
+ .ngroups = ARRAY_SIZE(i2c0_grps),
+};
+
+/* Pad multiplexing for ssp0 device */
+static const unsigned ssp0_pins[] = { 109, 110, 111, 112 };
+static struct spear_muxreg ssp0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_SSP0_MASK,
+ .val = PMX_SSP0_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_modemux[] = {
+ {
+ .muxregs = ssp0_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_pingroup = {
+ .name = "ssp0_grp",
+ .pins = ssp0_pins,
+ .npins = ARRAY_SIZE(ssp0_pins),
+ .modemuxs = ssp0_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_modemux),
+};
+
+/* Pad multiplexing for ssp0_cs0 device */
+static const unsigned ssp0_cs0_pins[] = { 96 };
+static struct spear_muxreg ssp0_cs0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_SSP0_CS0_MASK,
+ .val = PMX_SSP0_CS0_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_cs0_modemux[] = {
+ {
+ .muxregs = ssp0_cs0_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_cs0_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_cs0_pingroup = {
+ .name = "ssp0_cs0_grp",
+ .pins = ssp0_cs0_pins,
+ .npins = ARRAY_SIZE(ssp0_cs0_pins),
+ .modemuxs = ssp0_cs0_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_cs0_modemux),
+};
+
+/* ssp0_cs1_2 device */
+static const unsigned ssp0_cs1_2_pins[] = { 94, 95 };
+static struct spear_muxreg ssp0_cs1_2_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_SSP0_CS1_2_MASK,
+ .val = PMX_SSP0_CS1_2_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_cs1_2_modemux[] = {
+ {
+ .muxregs = ssp0_cs1_2_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_cs1_2_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_cs1_2_pingroup = {
+ .name = "ssp0_cs1_2_grp",
+ .pins = ssp0_cs1_2_pins,
+ .npins = ARRAY_SIZE(ssp0_cs1_2_pins),
+ .modemuxs = ssp0_cs1_2_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_cs1_2_modemux),
+};
+
+static const char *const ssp0_grps[] = { "ssp0_grp", "ssp0_cs0_grp",
+ "ssp0_cs1_2_grp" };
+static struct spear_function ssp0_function = {
+ .name = "ssp0",
+ .groups = ssp0_grps,
+ .ngroups = ARRAY_SIZE(ssp0_grps),
+};
+
+/* Pad multiplexing for i2s0 device */
+static const unsigned i2s0_pins[] = { 104, 105, 106, 107, 108 };
+static struct spear_muxreg i2s0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_I2S0_MASK,
+ .val = PMX_I2S0_MASK,
+ },
+};
+
+static struct spear_modemux i2s0_modemux[] = {
+ {
+ .muxregs = i2s0_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2s0_muxreg),
+ },
+};
+
+static struct spear_pingroup i2s0_pingroup = {
+ .name = "i2s0_grp",
+ .pins = i2s0_pins,
+ .npins = ARRAY_SIZE(i2s0_pins),
+ .modemuxs = i2s0_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2s0_modemux),
+};
+
+static const char *const i2s0_grps[] = { "i2s0_grp" };
+static struct spear_function i2s0_function = {
+ .name = "i2s0",
+ .groups = i2s0_grps,
+ .ngroups = ARRAY_SIZE(i2s0_grps),
+};
+
+/* Pad multiplexing for i2s1 device */
+static const unsigned i2s1_pins[] = { 0, 1, 2, 3 };
+static struct spear_muxreg i2s1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_I2S1_MASK,
+ .val = PMX_I2S1_MASK,
+ },
+};
+
+static struct spear_modemux i2s1_modemux[] = {
+ {
+ .muxregs = i2s1_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2s1_muxreg),
+ },
+};
+
+static struct spear_pingroup i2s1_pingroup = {
+ .name = "i2s1_grp",
+ .pins = i2s1_pins,
+ .npins = ARRAY_SIZE(i2s1_pins),
+ .modemuxs = i2s1_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2s1_modemux),
+};
+
+static const char *const i2s1_grps[] = { "i2s1_grp" };
+static struct spear_function i2s1_function = {
+ .name = "i2s1",
+ .groups = i2s1_grps,
+ .ngroups = ARRAY_SIZE(i2s1_grps),
+};
+
+/* Pad multiplexing for clcd device */
+static const unsigned clcd_pins[] = { 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142 };
+static struct spear_muxreg clcd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = PMX_CLCD1_MASK,
+ },
+};
+
+static struct spear_modemux clcd_modemux[] = {
+ {
+ .muxregs = clcd_muxreg,
+ .nmuxregs = ARRAY_SIZE(clcd_muxreg),
+ },
+};
+
+static struct spear_pingroup clcd_pingroup = {
+ .name = "clcd_grp",
+ .pins = clcd_pins,
+ .npins = ARRAY_SIZE(clcd_pins),
+ .modemuxs = clcd_modemux,
+ .nmodemuxs = ARRAY_SIZE(clcd_modemux),
+};
+
+static const unsigned clcd_high_res_pins[] = { 30, 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53 };
+static struct spear_muxreg clcd_high_res_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_CLCD2_MASK,
+ .val = PMX_CLCD2_MASK,
+ },
+};
+
+static struct spear_modemux clcd_high_res_modemux[] = {
+ {
+ .muxregs = clcd_high_res_muxreg,
+ .nmuxregs = ARRAY_SIZE(clcd_high_res_muxreg),
+ },
+};
+
+static struct spear_pingroup clcd_high_res_pingroup = {
+ .name = "clcd_high_res_grp",
+ .pins = clcd_high_res_pins,
+ .npins = ARRAY_SIZE(clcd_high_res_pins),
+ .modemuxs = clcd_high_res_modemux,
+ .nmodemuxs = ARRAY_SIZE(clcd_high_res_modemux),
+};
+
+static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res" };
+static struct spear_function clcd_function = {
+ .name = "clcd",
+ .groups = clcd_grps,
+ .ngroups = ARRAY_SIZE(clcd_grps),
+};
+
+static const unsigned arm_gpio_pins[] = { 18, 19, 20, 21, 22, 23, 143, 144, 145,
+ 146, 147, 148, 149, 150, 151, 152 };
+static struct spear_muxreg arm_gpio_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_EGPIO_0_GRP_MASK,
+ .val = PMX_EGPIO_0_GRP_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_EGPIO_1_GRP_MASK,
+ .val = PMX_EGPIO_1_GRP_MASK,
+ },
+};
+
+static struct spear_modemux arm_gpio_modemux[] = {
+ {
+ .muxregs = arm_gpio_muxreg,
+ .nmuxregs = ARRAY_SIZE(arm_gpio_muxreg),
+ },
+};
+
+static struct spear_pingroup arm_gpio_pingroup = {
+ .name = "arm_gpio_grp",
+ .pins = arm_gpio_pins,
+ .npins = ARRAY_SIZE(arm_gpio_pins),
+ .modemuxs = arm_gpio_modemux,
+ .nmodemuxs = ARRAY_SIZE(arm_gpio_modemux),
+};
+
+static const char *const arm_gpio_grps[] = { "arm_gpio_grp" };
+static struct spear_function arm_gpio_function = {
+ .name = "arm_gpio",
+ .groups = arm_gpio_grps,
+ .ngroups = ARRAY_SIZE(arm_gpio_grps),
+};
+
+/* Pad multiplexing for smi 2 chips device */
+static const unsigned smi_2_chips_pins[] = { 153, 154, 155, 156, 157 };
+static struct spear_muxreg smi_2_chips_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_SMI_MASK,
+ .val = PMX_SMI_MASK,
+ },
+};
+
+static struct spear_modemux smi_2_chips_modemux[] = {
+ {
+ .muxregs = smi_2_chips_muxreg,
+ .nmuxregs = ARRAY_SIZE(smi_2_chips_muxreg),
+ },
+};
+
+static struct spear_pingroup smi_2_chips_pingroup = {
+ .name = "smi_2_chips_grp",
+ .pins = smi_2_chips_pins,
+ .npins = ARRAY_SIZE(smi_2_chips_pins),
+ .modemuxs = smi_2_chips_modemux,
+ .nmodemuxs = ARRAY_SIZE(smi_2_chips_modemux),
+};
+
+static const unsigned smi_4_chips_pins[] = { 54, 55 };
+static struct spear_muxreg smi_4_chips_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_SMI_MASK,
+ .val = PMX_SMI_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
+ .val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
+ },
+};
+
+static struct spear_modemux smi_4_chips_modemux[] = {
+ {
+ .muxregs = smi_4_chips_muxreg,
+ .nmuxregs = ARRAY_SIZE(smi_4_chips_muxreg),
+ },
+};
+
+static struct spear_pingroup smi_4_chips_pingroup = {
+ .name = "smi_4_chips_grp",
+ .pins = smi_4_chips_pins,
+ .npins = ARRAY_SIZE(smi_4_chips_pins),
+ .modemuxs = smi_4_chips_modemux,
+ .nmodemuxs = ARRAY_SIZE(smi_4_chips_modemux),
+};
+
+static const char *const smi_grps[] = { "smi_2_chips_grp", "smi_4_chips_grp" };
+static struct spear_function smi_function = {
+ .name = "smi",
+ .groups = smi_grps,
+ .ngroups = ARRAY_SIZE(smi_grps),
+};
+
+/* Pad multiplexing for gmii device */
+static const unsigned gmii_pins[] = { 173, 174, 175, 176, 177, 178, 179, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200 };
+static struct spear_muxreg gmii_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_GMII_MASK,
+ .val = PMX_GMII_MASK,
+ },
+};
+
+static struct spear_modemux gmii_modemux[] = {
+ {
+ .muxregs = gmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(gmii_muxreg),
+ },
+};
+
+static struct spear_pingroup gmii_pingroup = {
+ .name = "gmii_grp",
+ .pins = gmii_pins,
+ .npins = ARRAY_SIZE(gmii_pins),
+ .modemuxs = gmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(gmii_modemux),
+};
+
+static const char *const gmii_grps[] = { "gmii_grp" };
+static struct spear_function gmii_function = {
+ .name = "gmii",
+ .groups = gmii_grps,
+ .ngroups = ARRAY_SIZE(gmii_grps),
+};
+
+/* Pad multiplexing for rgmii device */
+static const unsigned rgmii_pins[] = { 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 175,
+ 180, 181, 182, 183, 185, 188, 193, 194, 195, 196, 197, 198, 211, 212 };
+static struct spear_muxreg rgmii_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_RGMII_REG0_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_RGMII_REG1_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_RGMII_REG2_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux rgmii_modemux[] = {
+ {
+ .muxregs = rgmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(rgmii_muxreg),
+ },
+};
+
+static struct spear_pingroup rgmii_pingroup = {
+ .name = "rgmii_grp",
+ .pins = rgmii_pins,
+ .npins = ARRAY_SIZE(rgmii_pins),
+ .modemuxs = rgmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(rgmii_modemux),
+};
+
+static const char *const rgmii_grps[] = { "rgmii_grp" };
+static struct spear_function rgmii_function = {
+ .name = "rgmii",
+ .groups = rgmii_grps,
+ .ngroups = ARRAY_SIZE(rgmii_grps),
+};
+
+/* Pad multiplexing for smii_0_1_2 device */
+static const unsigned smii_0_1_2_pins[] = { 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55 };
+static struct spear_muxreg smii_0_1_2_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_SMII_0_1_2_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux smii_0_1_2_modemux[] = {
+ {
+ .muxregs = smii_0_1_2_muxreg,
+ .nmuxregs = ARRAY_SIZE(smii_0_1_2_muxreg),
+ },
+};
+
+static struct spear_pingroup smii_0_1_2_pingroup = {
+ .name = "smii_0_1_2_grp",
+ .pins = smii_0_1_2_pins,
+ .npins = ARRAY_SIZE(smii_0_1_2_pins),
+ .modemuxs = smii_0_1_2_modemux,
+ .nmodemuxs = ARRAY_SIZE(smii_0_1_2_modemux),
+};
+
+static const char *const smii_0_1_2_grps[] = { "smii_0_1_2_grp" };
+static struct spear_function smii_0_1_2_function = {
+ .name = "smii_0_1_2",
+ .groups = smii_0_1_2_grps,
+ .ngroups = ARRAY_SIZE(smii_0_1_2_grps),
+};
+
+/* Pad multiplexing for ras_mii_txclk device */
+static const unsigned ras_mii_txclk_pins[] = { 98, 99 };
+static struct spear_muxreg ras_mii_txclk_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_NFCE2_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux ras_mii_txclk_modemux[] = {
+ {
+ .muxregs = ras_mii_txclk_muxreg,
+ .nmuxregs = ARRAY_SIZE(ras_mii_txclk_muxreg),
+ },
+};
+
+static struct spear_pingroup ras_mii_txclk_pingroup = {
+ .name = "ras_mii_txclk_grp",
+ .pins = ras_mii_txclk_pins,
+ .npins = ARRAY_SIZE(ras_mii_txclk_pins),
+ .modemuxs = ras_mii_txclk_modemux,
+ .nmodemuxs = ARRAY_SIZE(ras_mii_txclk_modemux),
+};
+
+static const char *const ras_mii_txclk_grps[] = { "ras_mii_txclk_grp" };
+static struct spear_function ras_mii_txclk_function = {
+ .name = "ras_mii_txclk",
+ .groups = ras_mii_txclk_grps,
+ .ngroups = ARRAY_SIZE(ras_mii_txclk_grps),
+};
+
+/* Pad multiplexing for nand 8bit device (cs0 only) */
+static const unsigned nand_8bit_pins[] = { 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
+ 83, 84, 85, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 212 };
+static struct spear_muxreg nand_8bit_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_NAND8BIT_0_MASK,
+ .val = PMX_NAND8BIT_0_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_NAND8BIT_1_MASK,
+ .val = PMX_NAND8BIT_1_MASK,
+ },
+};
+
+static struct spear_modemux nand_8bit_modemux[] = {
+ {
+ .muxregs = nand_8bit_muxreg,
+ .nmuxregs = ARRAY_SIZE(nand_8bit_muxreg),
+ },
+};
+
+static struct spear_pingroup nand_8bit_pingroup = {
+ .name = "nand_8bit_grp",
+ .pins = nand_8bit_pins,
+ .npins = ARRAY_SIZE(nand_8bit_pins),
+ .modemuxs = nand_8bit_modemux,
+ .nmodemuxs = ARRAY_SIZE(nand_8bit_modemux),
+};
+
+/* Pad multiplexing for nand 16bit device */
+static const unsigned nand_16bit_pins[] = { 201, 202, 203, 204, 207, 208, 209,
+ 210 };
+static struct spear_muxreg nand_16bit_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_NAND16BIT_1_MASK,
+ .val = PMX_NAND16BIT_1_MASK,
+ },
+};
+
+static struct spear_modemux nand_16bit_modemux[] = {
+ {
+ .muxregs = nand_16bit_muxreg,
+ .nmuxregs = ARRAY_SIZE(nand_16bit_muxreg),
+ },
+};
+
+static struct spear_pingroup nand_16bit_pingroup = {
+ .name = "nand_16bit_grp",
+ .pins = nand_16bit_pins,
+ .npins = ARRAY_SIZE(nand_16bit_pins),
+ .modemuxs = nand_16bit_modemux,
+ .nmodemuxs = ARRAY_SIZE(nand_16bit_modemux),
+};
+
+/* Pad multiplexing for nand 4 chips */
+static const unsigned nand_4_chips_pins[] = { 205, 206, 211, 212 };
+static struct spear_muxreg nand_4_chips_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_NAND_4CHIPS_MASK,
+ .val = PMX_NAND_4CHIPS_MASK,
+ },
+};
+
+static struct spear_modemux nand_4_chips_modemux[] = {
+ {
+ .muxregs = nand_4_chips_muxreg,
+ .nmuxregs = ARRAY_SIZE(nand_4_chips_muxreg),
+ },
+};
+
+static struct spear_pingroup nand_4_chips_pingroup = {
+ .name = "nand_4_chips_grp",
+ .pins = nand_4_chips_pins,
+ .npins = ARRAY_SIZE(nand_4_chips_pins),
+ .modemuxs = nand_4_chips_modemux,
+ .nmodemuxs = ARRAY_SIZE(nand_4_chips_modemux),
+};
+
+static const char *const nand_grps[] = { "nand_8bit_grp", "nand_16bit_grp",
+ "nand_4_chips_grp" };
+static struct spear_function nand_function = {
+ .name = "nand",
+ .groups = nand_grps,
+ .ngroups = ARRAY_SIZE(nand_grps),
+};
+
+/* Pad multiplexing for keyboard_6x6 device */
+static const unsigned keyboard_6x6_pins[] = { 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212 };
+static struct spear_muxreg keyboard_6x6_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_KEYBOARD_6X6_MASK | PMX_NFIO8_15_MASK |
+ PMX_NFCE1_MASK | PMX_NFCE2_MASK | PMX_NFWPRT1_MASK |
+ PMX_NFWPRT2_MASK,
+ .val = PMX_KEYBOARD_6X6_MASK,
+ },
+};
+
+static struct spear_modemux keyboard_6x6_modemux[] = {
+ {
+ .muxregs = keyboard_6x6_muxreg,
+ .nmuxregs = ARRAY_SIZE(keyboard_6x6_muxreg),
+ },
+};
+
+static struct spear_pingroup keyboard_6x6_pingroup = {
+ .name = "keyboard_6x6_grp",
+ .pins = keyboard_6x6_pins,
+ .npins = ARRAY_SIZE(keyboard_6x6_pins),
+ .modemuxs = keyboard_6x6_modemux,
+ .nmodemuxs = ARRAY_SIZE(keyboard_6x6_modemux),
+};
+
+/* Pad multiplexing for keyboard_rowcol6_8 device */
+static const unsigned keyboard_rowcol6_8_pins[] = { 24, 25, 26, 27, 28, 29 };
+static struct spear_muxreg keyboard_rowcol6_8_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_KBD_ROWCOL68_MASK,
+ .val = PMX_KBD_ROWCOL68_MASK,
+ },
+};
+
+static struct spear_modemux keyboard_rowcol6_8_modemux[] = {
+ {
+ .muxregs = keyboard_rowcol6_8_muxreg,
+ .nmuxregs = ARRAY_SIZE(keyboard_rowcol6_8_muxreg),
+ },
+};
+
+static struct spear_pingroup keyboard_rowcol6_8_pingroup = {
+ .name = "keyboard_rowcol6_8_grp",
+ .pins = keyboard_rowcol6_8_pins,
+ .npins = ARRAY_SIZE(keyboard_rowcol6_8_pins),
+ .modemuxs = keyboard_rowcol6_8_modemux,
+ .nmodemuxs = ARRAY_SIZE(keyboard_rowcol6_8_modemux),
+};
+
+static const char *const keyboard_grps[] = { "keyboard_6x6_grp",
+ "keyboard_rowcol6_8_grp" };
+static struct spear_function keyboard_function = {
+ .name = "keyboard",
+ .groups = keyboard_grps,
+ .ngroups = ARRAY_SIZE(keyboard_grps),
+};
+
+/* Pad multiplexing for uart0 device */
+static const unsigned uart0_pins[] = { 100, 101 };
+static struct spear_muxreg uart0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_UART0_MASK,
+ .val = PMX_UART0_MASK,
+ },
+};
+
+static struct spear_modemux uart0_modemux[] = {
+ {
+ .muxregs = uart0_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart0_muxreg),
+ },
+};
+
+static struct spear_pingroup uart0_pingroup = {
+ .name = "uart0_grp",
+ .pins = uart0_pins,
+ .npins = ARRAY_SIZE(uart0_pins),
+ .modemuxs = uart0_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart0_modemux),
+};
+
+/* Pad multiplexing for uart0_modem device */
+static const unsigned uart0_modem_pins[] = { 12, 13, 14, 15, 16, 17 };
+static struct spear_muxreg uart0_modem_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_UART0_MODEM_MASK,
+ .val = PMX_UART0_MODEM_MASK,
+ },
+};
+
+static struct spear_modemux uart0_modem_modemux[] = {
+ {
+ .muxregs = uart0_modem_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart0_modem_muxreg),
+ },
+};
+
+static struct spear_pingroup uart0_modem_pingroup = {
+ .name = "uart0_modem_grp",
+ .pins = uart0_modem_pins,
+ .npins = ARRAY_SIZE(uart0_modem_pins),
+ .modemuxs = uart0_modem_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart0_modem_modemux),
+};
+
+static const char *const uart0_grps[] = { "uart0_grp", "uart0_modem_grp" };
+static struct spear_function uart0_function = {
+ .name = "uart0",
+ .groups = uart0_grps,
+ .ngroups = ARRAY_SIZE(uart0_grps),
+};
+
+/* Pad multiplexing for gpt0_tmr0 device */
+static const unsigned gpt0_tmr0_pins[] = { 10, 11 };
+static struct spear_muxreg gpt0_tmr0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_GPT0_TMR0_MASK,
+ .val = PMX_GPT0_TMR0_MASK,
+ },
+};
+
+static struct spear_modemux gpt0_tmr0_modemux[] = {
+ {
+ .muxregs = gpt0_tmr0_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt0_tmr0_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt0_tmr0_pingroup = {
+ .name = "gpt0_tmr0_grp",
+ .pins = gpt0_tmr0_pins,
+ .npins = ARRAY_SIZE(gpt0_tmr0_pins),
+ .modemuxs = gpt0_tmr0_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt0_tmr0_modemux),
+};
+
+/* Pad multiplexing for gpt0_tmr1 device */
+static const unsigned gpt0_tmr1_pins[] = { 8, 9 };
+static struct spear_muxreg gpt0_tmr1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_GPT0_TMR1_MASK,
+ .val = PMX_GPT0_TMR1_MASK,
+ },
+};
+
+static struct spear_modemux gpt0_tmr1_modemux[] = {
+ {
+ .muxregs = gpt0_tmr1_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt0_tmr1_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt0_tmr1_pingroup = {
+ .name = "gpt0_tmr1_grp",
+ .pins = gpt0_tmr1_pins,
+ .npins = ARRAY_SIZE(gpt0_tmr1_pins),
+ .modemuxs = gpt0_tmr1_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt0_tmr1_modemux),
+};
+
+static const char *const gpt0_grps[] = { "gpt0_tmr0_grp", "gpt0_tmr1_grp" };
+static struct spear_function gpt0_function = {
+ .name = "gpt0",
+ .groups = gpt0_grps,
+ .ngroups = ARRAY_SIZE(gpt0_grps),
+};
+
+/* Pad multiplexing for gpt1_tmr0 device */
+static const unsigned gpt1_tmr0_pins[] = { 6, 7 };
+static struct spear_muxreg gpt1_tmr0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_GPT1_TMR0_MASK,
+ .val = PMX_GPT1_TMR0_MASK,
+ },
+};
+
+static struct spear_modemux gpt1_tmr0_modemux[] = {
+ {
+ .muxregs = gpt1_tmr0_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt1_tmr0_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt1_tmr0_pingroup = {
+ .name = "gpt1_tmr0_grp",
+ .pins = gpt1_tmr0_pins,
+ .npins = ARRAY_SIZE(gpt1_tmr0_pins),
+ .modemuxs = gpt1_tmr0_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt1_tmr0_modemux),
+};
+
+/* Pad multiplexing for gpt1_tmr1 device */
+static const unsigned gpt1_tmr1_pins[] = { 4, 5 };
+static struct spear_muxreg gpt1_tmr1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_GPT1_TMR1_MASK,
+ .val = PMX_GPT1_TMR1_MASK,
+ },
+};
+
+static struct spear_modemux gpt1_tmr1_modemux[] = {
+ {
+ .muxregs = gpt1_tmr1_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt1_tmr1_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt1_tmr1_pingroup = {
+ .name = "gpt1_tmr1_grp",
+ .pins = gpt1_tmr1_pins,
+ .npins = ARRAY_SIZE(gpt1_tmr1_pins),
+ .modemuxs = gpt1_tmr1_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt1_tmr1_modemux),
+};
+
+static const char *const gpt1_grps[] = { "gpt1_tmr1_grp", "gpt1_tmr0_grp" };
+static struct spear_function gpt1_function = {
+ .name = "gpt1",
+ .groups = gpt1_grps,
+ .ngroups = ARRAY_SIZE(gpt1_grps),
+};
+
+/* Pad multiplexing for mcif device */
+static const unsigned mcif_pins[] = { 86, 87, 88, 89, 90, 91, 92, 93, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245 };
+#define MCIF_MUXREG \
+ { \
+ .reg = PAD_FUNCTION_EN_0, \
+ .mask = PMX_MCI_DATA8_15_MASK, \
+ .val = PMX_MCI_DATA8_15_MASK, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_1, \
+ .mask = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \
+ PMX_NFWPRT2_MASK, \
+ .val = PMX_MCIFALL_1_MASK, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_2, \
+ .mask = PMX_MCIFALL_2_MASK, \
+ .val = PMX_MCIFALL_2_MASK, \
+ }
+
+/* sdhci device */
+static struct spear_muxreg sdhci_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_SD,
+ },
+};
+
+static struct spear_modemux sdhci_modemux[] = {
+ {
+ .muxregs = sdhci_muxreg,
+ .nmuxregs = ARRAY_SIZE(sdhci_muxreg),
+ },
+};
+
+static struct spear_pingroup sdhci_pingroup = {
+ .name = "sdhci_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = sdhci_modemux,
+ .nmodemuxs = ARRAY_SIZE(sdhci_modemux),
+};
+
+static const char *const sdhci_grps[] = { "sdhci_grp" };
+static struct spear_function sdhci_function = {
+ .name = "sdhci",
+ .groups = sdhci_grps,
+ .ngroups = ARRAY_SIZE(sdhci_grps),
+};
+
+/* cf device */
+static struct spear_muxreg cf_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_CF,
+ },
+};
+
+static struct spear_modemux cf_modemux[] = {
+ {
+ .muxregs = cf_muxreg,
+ .nmuxregs = ARRAY_SIZE(cf_muxreg),
+ },
+};
+
+static struct spear_pingroup cf_pingroup = {
+ .name = "cf_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = cf_modemux,
+ .nmodemuxs = ARRAY_SIZE(cf_modemux),
+};
+
+static const char *const cf_grps[] = { "cf_grp" };
+static struct spear_function cf_function = {
+ .name = "cf",
+ .groups = cf_grps,
+ .ngroups = ARRAY_SIZE(cf_grps),
+};
+
+/* xd device */
+static struct spear_muxreg xd_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_XD,
+ },
+};
+
+static struct spear_modemux xd_modemux[] = {
+ {
+ .muxregs = xd_muxreg,
+ .nmuxregs = ARRAY_SIZE(xd_muxreg),
+ },
+};
+
+static struct spear_pingroup xd_pingroup = {
+ .name = "xd_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = xd_modemux,
+ .nmodemuxs = ARRAY_SIZE(xd_modemux),
+};
+
+static const char *const xd_grps[] = { "xd_grp" };
+static struct spear_function xd_function = {
+ .name = "xd",
+ .groups = xd_grps,
+ .ngroups = ARRAY_SIZE(xd_grps),
+};
+
+/* Pad multiplexing for touch_xy device */
+static const unsigned touch_xy_pins[] = { 97 };
+static struct spear_muxreg touch_xy_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_TOUCH_XY_MASK,
+ .val = PMX_TOUCH_XY_MASK,
+ },
+};
+
+static struct spear_modemux touch_xy_modemux[] = {
+ {
+ .muxregs = touch_xy_muxreg,
+ .nmuxregs = ARRAY_SIZE(touch_xy_muxreg),
+ },
+};
+
+static struct spear_pingroup touch_xy_pingroup = {
+ .name = "touch_xy_grp",
+ .pins = touch_xy_pins,
+ .npins = ARRAY_SIZE(touch_xy_pins),
+ .modemuxs = touch_xy_modemux,
+ .nmodemuxs = ARRAY_SIZE(touch_xy_modemux),
+};
+
+static const char *const touch_xy_grps[] = { "touch_xy_grp" };
+static struct spear_function touch_xy_function = {
+ .name = "touchscreen",
+ .groups = touch_xy_grps,
+ .ngroups = ARRAY_SIZE(touch_xy_grps),
+};
+
+/* Pad multiplexing for uart1 device */
+/* Muxed with I2C */
+static const unsigned uart1_dis_i2c_pins[] = { 102, 103 };
+static struct spear_muxreg uart1_dis_i2c_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_I2C0_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux uart1_dis_i2c_modemux[] = {
+ {
+ .muxregs = uart1_dis_i2c_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart1_dis_i2c_muxreg),
+ },
+};
+
+static struct spear_pingroup uart_1_dis_i2c_pingroup = {
+ .name = "uart1_disable_i2c_grp",
+ .pins = uart1_dis_i2c_pins,
+ .npins = ARRAY_SIZE(uart1_dis_i2c_pins),
+ .modemuxs = uart1_dis_i2c_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart1_dis_i2c_modemux),
+};
+
+/* Muxed with SD/MMC */
+static const unsigned uart1_dis_sd_pins[] = { 214, 215 };
+static struct spear_muxreg uart1_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_MCIDATA1_MASK |
+ PMX_MCIDATA2_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux uart1_dis_sd_modemux[] = {
+ {
+ .muxregs = uart1_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart1_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup uart_1_dis_sd_pingroup = {
+ .name = "uart1_disable_sd_grp",
+ .pins = uart1_dis_sd_pins,
+ .npins = ARRAY_SIZE(uart1_dis_sd_pins),
+ .modemuxs = uart1_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart1_dis_sd_modemux),
+};
+
+static const char *const uart1_grps[] = { "uart1_disable_i2c_grp",
+ "uart1_disable_sd_grp" };
+static struct spear_function uart1_function = {
+ .name = "uart1",
+ .groups = uart1_grps,
+ .ngroups = ARRAY_SIZE(uart1_grps),
+};
+
+/* Pad multiplexing for uart2_3 device */
+static const unsigned uart2_3_pins[] = { 104, 105, 106, 107 };
+static struct spear_muxreg uart2_3_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_I2S0_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux uart2_3_modemux[] = {
+ {
+ .muxregs = uart2_3_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart2_3_muxreg),
+ },
+};
+
+static struct spear_pingroup uart_2_3_pingroup = {
+ .name = "uart2_3_grp",
+ .pins = uart2_3_pins,
+ .npins = ARRAY_SIZE(uart2_3_pins),
+ .modemuxs = uart2_3_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart2_3_modemux),
+};
+
+static const char *const uart2_3_grps[] = { "uart2_3_grp" };
+static struct spear_function uart2_3_function = {
+ .name = "uart2_3",
+ .groups = uart2_3_grps,
+ .ngroups = ARRAY_SIZE(uart2_3_grps),
+};
+
+/* Pad multiplexing for uart4 device */
+static const unsigned uart4_pins[] = { 108, 113 };
+static struct spear_muxreg uart4_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_I2S0_MASK | PMX_CLCD1_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux uart4_modemux[] = {
+ {
+ .muxregs = uart4_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart4_muxreg),
+ },
+};
+
+static struct spear_pingroup uart_4_pingroup = {
+ .name = "uart4_grp",
+ .pins = uart4_pins,
+ .npins = ARRAY_SIZE(uart4_pins),
+ .modemuxs = uart4_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart4_modemux),
+};
+
+static const char *const uart4_grps[] = { "uart4_grp" };
+static struct spear_function uart4_function = {
+ .name = "uart4",
+ .groups = uart4_grps,
+ .ngroups = ARRAY_SIZE(uart4_grps),
+};
+
+/* Pad multiplexing for uart5 device */
+static const unsigned uart5_pins[] = { 114, 115 };
+static struct spear_muxreg uart5_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux uart5_modemux[] = {
+ {
+ .muxregs = uart5_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart5_muxreg),
+ },
+};
+
+static struct spear_pingroup uart_5_pingroup = {
+ .name = "uart5_grp",
+ .pins = uart5_pins,
+ .npins = ARRAY_SIZE(uart5_pins),
+ .modemuxs = uart5_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart5_modemux),
+};
+
+static const char *const uart5_grps[] = { "uart5_grp" };
+static struct spear_function uart5_function = {
+ .name = "uart5",
+ .groups = uart5_grps,
+ .ngroups = ARRAY_SIZE(uart5_grps),
+};
+
+/* Pad multiplexing for rs485_0_1_tdm_0_1 device */
+static const unsigned rs485_0_1_tdm_0_1_pins[] = { 116, 117, 118, 119, 120, 121,
+ 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137 };
+static struct spear_muxreg rs485_0_1_tdm_0_1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux rs485_0_1_tdm_0_1_modemux[] = {
+ {
+ .muxregs = rs485_0_1_tdm_0_1_muxreg,
+ .nmuxregs = ARRAY_SIZE(rs485_0_1_tdm_0_1_muxreg),
+ },
+};
+
+static struct spear_pingroup rs485_0_1_tdm_0_1_pingroup = {
+ .name = "rs485_0_1_tdm_0_1_grp",
+ .pins = rs485_0_1_tdm_0_1_pins,
+ .npins = ARRAY_SIZE(rs485_0_1_tdm_0_1_pins),
+ .modemuxs = rs485_0_1_tdm_0_1_modemux,
+ .nmodemuxs = ARRAY_SIZE(rs485_0_1_tdm_0_1_modemux),
+};
+
+static const char *const rs485_0_1_tdm_0_1_grps[] = { "rs485_0_1_tdm_0_1_grp" };
+static struct spear_function rs485_0_1_tdm_0_1_function = {
+ .name = "rs485_0_1_tdm_0_1",
+ .groups = rs485_0_1_tdm_0_1_grps,
+ .ngroups = ARRAY_SIZE(rs485_0_1_tdm_0_1_grps),
+};
+
+/* Pad multiplexing for i2c_1_2 device */
+static const unsigned i2c_1_2_pins[] = { 138, 139, 140, 141 };
+static struct spear_muxreg i2c_1_2_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c_1_2_modemux[] = {
+ {
+ .muxregs = i2c_1_2_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c_1_2_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c_1_2_pingroup = {
+ .name = "i2c_1_2_grp",
+ .pins = i2c_1_2_pins,
+ .npins = ARRAY_SIZE(i2c_1_2_pins),
+ .modemuxs = i2c_1_2_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c_1_2_modemux),
+};
+
+static const char *const i2c_1_2_grps[] = { "i2c_1_2_grp" };
+static struct spear_function i2c_1_2_function = {
+ .name = "i2c_1_2",
+ .groups = i2c_1_2_grps,
+ .ngroups = ARRAY_SIZE(i2c_1_2_grps),
+};
+
+/* Pad multiplexing for i2c3_dis_smi_clcd device */
+/* Muxed with SMI & CLCD */
+static const unsigned i2c3_dis_smi_clcd_pins[] = { 142, 153 };
+static struct spear_muxreg i2c3_dis_smi_clcd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_CLCD1_MASK | PMX_SMI_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c3_dis_smi_clcd_modemux[] = {
+ {
+ .muxregs = i2c3_dis_smi_clcd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c3_dis_smi_clcd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c3_dis_smi_clcd_pingroup = {
+ .name = "i2c3_dis_smi_clcd_grp",
+ .pins = i2c3_dis_smi_clcd_pins,
+ .npins = ARRAY_SIZE(i2c3_dis_smi_clcd_pins),
+ .modemuxs = i2c3_dis_smi_clcd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c3_dis_smi_clcd_modemux),
+};
+
+/* Pad multiplexing for i2c3_dis_sd_i2s0 device */
+/* Muxed with SD/MMC & I2S1 */
+static const unsigned i2c3_dis_sd_i2s0_pins[] = { 0, 216 };
+static struct spear_muxreg i2c3_dis_sd_i2s0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c3_dis_sd_i2s0_modemux[] = {
+ {
+ .muxregs = i2c3_dis_sd_i2s0_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c3_dis_sd_i2s0_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c3_dis_sd_i2s0_pingroup = {
+ .name = "i2c3_dis_sd_i2s0_grp",
+ .pins = i2c3_dis_sd_i2s0_pins,
+ .npins = ARRAY_SIZE(i2c3_dis_sd_i2s0_pins),
+ .modemuxs = i2c3_dis_sd_i2s0_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c3_dis_sd_i2s0_modemux),
+};
+
+static const char *const i2c3_grps[] = { "i2c3_dis_smi_clcd_grp",
+ "i2c3_dis_sd_i2s0_grp" };
+static struct spear_function i2c3_unction = {
+ .name = "i2c3_i2s1",
+ .groups = i2c3_grps,
+ .ngroups = ARRAY_SIZE(i2c3_grps),
+};
+
+/* Pad multiplexing for i2c_4_5_dis_smi device */
+/* Muxed with SMI */
+static const unsigned i2c_4_5_dis_smi_pins[] = { 154, 155, 156, 157 };
+static struct spear_muxreg i2c_4_5_dis_smi_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_SMI_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c_4_5_dis_smi_modemux[] = {
+ {
+ .muxregs = i2c_4_5_dis_smi_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c_4_5_dis_smi_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c_4_5_dis_smi_pingroup = {
+ .name = "i2c_4_5_dis_smi_grp",
+ .pins = i2c_4_5_dis_smi_pins,
+ .npins = ARRAY_SIZE(i2c_4_5_dis_smi_pins),
+ .modemuxs = i2c_4_5_dis_smi_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c_4_5_dis_smi_modemux),
+};
+
+/* Pad multiplexing for i2c4_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned i2c4_dis_sd_pins[] = { 217, 218 };
+static struct spear_muxreg i2c4_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_MCIDATA4_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCIDATA5_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c4_dis_sd_modemux[] = {
+ {
+ .muxregs = i2c4_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c4_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c4_dis_sd_pingroup = {
+ .name = "i2c4_dis_sd_grp",
+ .pins = i2c4_dis_sd_pins,
+ .npins = ARRAY_SIZE(i2c4_dis_sd_pins),
+ .modemuxs = i2c4_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c4_dis_sd_modemux),
+};
+
+/* Pad multiplexing for i2c5_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned i2c5_dis_sd_pins[] = { 219, 220 };
+static struct spear_muxreg i2c5_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCIDATA6_MASK |
+ PMX_MCIDATA7_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c5_dis_sd_modemux[] = {
+ {
+ .muxregs = i2c5_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c5_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c5_dis_sd_pingroup = {
+ .name = "i2c5_dis_sd_grp",
+ .pins = i2c5_dis_sd_pins,
+ .npins = ARRAY_SIZE(i2c5_dis_sd_pins),
+ .modemuxs = i2c5_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c5_dis_sd_modemux),
+};
+
+static const char *const i2c_4_5_grps[] = { "i2c5_dis_sd_grp",
+ "i2c4_dis_sd_grp", "i2c_4_5_dis_smi_grp" };
+static struct spear_function i2c_4_5_function = {
+ .name = "i2c_4_5",
+ .groups = i2c_4_5_grps,
+ .ngroups = ARRAY_SIZE(i2c_4_5_grps),
+};
+
+/* Pad multiplexing for i2c_6_7_dis_kbd device */
+/* Muxed with KBD */
+static const unsigned i2c_6_7_dis_kbd_pins[] = { 207, 208, 209, 210 };
+static struct spear_muxreg i2c_6_7_dis_kbd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_KBD_ROWCOL25_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c_6_7_dis_kbd_modemux[] = {
+ {
+ .muxregs = i2c_6_7_dis_kbd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c_6_7_dis_kbd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c_6_7_dis_kbd_pingroup = {
+ .name = "i2c_6_7_dis_kbd_grp",
+ .pins = i2c_6_7_dis_kbd_pins,
+ .npins = ARRAY_SIZE(i2c_6_7_dis_kbd_pins),
+ .modemuxs = i2c_6_7_dis_kbd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c_6_7_dis_kbd_modemux),
+};
+
+/* Pad multiplexing for i2c6_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned i2c6_dis_sd_pins[] = { 236, 237 };
+static struct spear_muxreg i2c6_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCIIORDRE_MASK |
+ PMX_MCIIOWRWE_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c6_dis_sd_modemux[] = {
+ {
+ .muxregs = i2c6_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c6_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c6_dis_sd_pingroup = {
+ .name = "i2c6_dis_sd_grp",
+ .pins = i2c6_dis_sd_pins,
+ .npins = ARRAY_SIZE(i2c6_dis_sd_pins),
+ .modemuxs = i2c6_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c6_dis_sd_modemux),
+};
+
+/* Pad multiplexing for i2c7_dis_sd device */
+static const unsigned i2c7_dis_sd_pins[] = { 238, 239 };
+static struct spear_muxreg i2c7_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCIRESETCF_MASK |
+ PMX_MCICS0CE_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c7_dis_sd_modemux[] = {
+ {
+ .muxregs = i2c7_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c7_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c7_dis_sd_pingroup = {
+ .name = "i2c7_dis_sd_grp",
+ .pins = i2c7_dis_sd_pins,
+ .npins = ARRAY_SIZE(i2c7_dis_sd_pins),
+ .modemuxs = i2c7_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c7_dis_sd_modemux),
+};
+
+static const char *const i2c_6_7_grps[] = { "i2c6_dis_sd_grp",
+ "i2c7_dis_sd_grp", "i2c_6_7_dis_kbd_grp" };
+static struct spear_function i2c_6_7_function = {
+ .name = "i2c_6_7",
+ .groups = i2c_6_7_grps,
+ .ngroups = ARRAY_SIZE(i2c_6_7_grps),
+};
+
+/* Pad multiplexing for can0_dis_nor device */
+/* Muxed with NOR */
+static const unsigned can0_dis_nor_pins[] = { 56, 57 };
+static struct spear_muxreg can0_dis_nor_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_NFRSTPWDWN2_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_NFRSTPWDWN3_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux can0_dis_nor_modemux[] = {
+ {
+ .muxregs = can0_dis_nor_muxreg,
+ .nmuxregs = ARRAY_SIZE(can0_dis_nor_muxreg),
+ },
+};
+
+static struct spear_pingroup can0_dis_nor_pingroup = {
+ .name = "can0_dis_nor_grp",
+ .pins = can0_dis_nor_pins,
+ .npins = ARRAY_SIZE(can0_dis_nor_pins),
+ .modemuxs = can0_dis_nor_modemux,
+ .nmodemuxs = ARRAY_SIZE(can0_dis_nor_modemux),
+};
+
+/* Pad multiplexing for can0_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned can0_dis_sd_pins[] = { 240, 241 };
+static struct spear_muxreg can0_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux can0_dis_sd_modemux[] = {
+ {
+ .muxregs = can0_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(can0_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup can0_dis_sd_pingroup = {
+ .name = "can0_dis_sd_grp",
+ .pins = can0_dis_sd_pins,
+ .npins = ARRAY_SIZE(can0_dis_sd_pins),
+ .modemuxs = can0_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(can0_dis_sd_modemux),
+};
+
+static const char *const can0_grps[] = { "can0_dis_nor_grp", "can0_dis_sd_grp"
+};
+static struct spear_function can0_function = {
+ .name = "can0",
+ .groups = can0_grps,
+ .ngroups = ARRAY_SIZE(can0_grps),
+};
+
+/* Pad multiplexing for can1_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned can1_dis_sd_pins[] = { 242, 243 };
+static struct spear_muxreg can1_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux can1_dis_sd_modemux[] = {
+ {
+ .muxregs = can1_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(can1_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup can1_dis_sd_pingroup = {
+ .name = "can1_dis_sd_grp",
+ .pins = can1_dis_sd_pins,
+ .npins = ARRAY_SIZE(can1_dis_sd_pins),
+ .modemuxs = can1_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(can1_dis_sd_modemux),
+};
+
+/* Pad multiplexing for can1_dis_kbd device */
+/* Muxed with KBD */
+static const unsigned can1_dis_kbd_pins[] = { 201, 202 };
+static struct spear_muxreg can1_dis_kbd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_KBD_ROWCOL25_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux can1_dis_kbd_modemux[] = {
+ {
+ .muxregs = can1_dis_kbd_muxreg,
+ .nmuxregs = ARRAY_SIZE(can1_dis_kbd_muxreg),
+ },
+};
+
+static struct spear_pingroup can1_dis_kbd_pingroup = {
+ .name = "can1_dis_kbd_grp",
+ .pins = can1_dis_kbd_pins,
+ .npins = ARRAY_SIZE(can1_dis_kbd_pins),
+ .modemuxs = can1_dis_kbd_modemux,
+ .nmodemuxs = ARRAY_SIZE(can1_dis_kbd_modemux),
+};
+
+static const char *const can1_grps[] = { "can1_dis_sd_grp", "can1_dis_kbd_grp"
+};
+static struct spear_function can1_function = {
+ .name = "can1",
+ .groups = can1_grps,
+ .ngroups = ARRAY_SIZE(can1_grps),
+};
+
+/* Pad multiplexing for pci device */
+static const unsigned pci_sata_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 };
+#define PCI_SATA_MUXREG \
+ { \
+ .reg = PAD_FUNCTION_EN_0, \
+ .mask = PMX_MCI_DATA8_15_MASK, \
+ .val = 0, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_1, \
+ .mask = PMX_PCI_REG1_MASK, \
+ .val = 0, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_2, \
+ .mask = PMX_PCI_REG2_MASK, \
+ .val = 0, \
+ }
+
+/* pad multiplexing for pcie0 device */
+static struct spear_muxreg pcie0_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = PCIE_CFG_VAL(0),
+ .val = PCIE_CFG_VAL(0),
+ },
+};
+
+static struct spear_modemux pcie0_modemux[] = {
+ {
+ .muxregs = pcie0_muxreg,
+ .nmuxregs = ARRAY_SIZE(pcie0_muxreg),
+ },
+};
+
+static struct spear_pingroup pcie0_pingroup = {
+ .name = "pcie0_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = pcie0_modemux,
+ .nmodemuxs = ARRAY_SIZE(pcie0_modemux),
+};
+
+/* pad multiplexing for pcie1 device */
+static struct spear_muxreg pcie1_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = PCIE_CFG_VAL(1),
+ .val = PCIE_CFG_VAL(1),
+ },
+};
+
+static struct spear_modemux pcie1_modemux[] = {
+ {
+ .muxregs = pcie1_muxreg,
+ .nmuxregs = ARRAY_SIZE(pcie1_muxreg),
+ },
+};
+
+static struct spear_pingroup pcie1_pingroup = {
+ .name = "pcie1_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = pcie1_modemux,
+ .nmodemuxs = ARRAY_SIZE(pcie1_modemux),
+};
+
+/* pad multiplexing for pcie2 device */
+static struct spear_muxreg pcie2_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = PCIE_CFG_VAL(2),
+ .val = PCIE_CFG_VAL(2),
+ },
+};
+
+static struct spear_modemux pcie2_modemux[] = {
+ {
+ .muxregs = pcie2_muxreg,
+ .nmuxregs = ARRAY_SIZE(pcie2_muxreg),
+ },
+};
+
+static struct spear_pingroup pcie2_pingroup = {
+ .name = "pcie2_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = pcie2_modemux,
+ .nmodemuxs = ARRAY_SIZE(pcie2_modemux),
+};
+
+static const char *const pci_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp" };
+static struct spear_function pci_function = {
+ .name = "pci",
+ .groups = pci_grps,
+ .ngroups = ARRAY_SIZE(pci_grps),
+};
+
+/* pad multiplexing for sata0 device */
+static struct spear_muxreg sata0_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = SATA_CFG_VAL(0),
+ .val = SATA_CFG_VAL(0),
+ },
+};
+
+static struct spear_modemux sata0_modemux[] = {
+ {
+ .muxregs = sata0_muxreg,
+ .nmuxregs = ARRAY_SIZE(sata0_muxreg),
+ },
+};
+
+static struct spear_pingroup sata0_pingroup = {
+ .name = "sata0_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = sata0_modemux,
+ .nmodemuxs = ARRAY_SIZE(sata0_modemux),
+};
+
+/* pad multiplexing for sata1 device */
+static struct spear_muxreg sata1_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = SATA_CFG_VAL(1),
+ .val = SATA_CFG_VAL(1),
+ },
+};
+
+static struct spear_modemux sata1_modemux[] = {
+ {
+ .muxregs = sata1_muxreg,
+ .nmuxregs = ARRAY_SIZE(sata1_muxreg),
+ },
+};
+
+static struct spear_pingroup sata1_pingroup = {
+ .name = "sata1_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = sata1_modemux,
+ .nmodemuxs = ARRAY_SIZE(sata1_modemux),
+};
+
+/* pad multiplexing for sata2 device */
+static struct spear_muxreg sata2_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = SATA_CFG_VAL(2),
+ .val = SATA_CFG_VAL(2),
+ },
+};
+
+static struct spear_modemux sata2_modemux[] = {
+ {
+ .muxregs = sata2_muxreg,
+ .nmuxregs = ARRAY_SIZE(sata2_muxreg),
+ },
+};
+
+static struct spear_pingroup sata2_pingroup = {
+ .name = "sata2_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = sata2_modemux,
+ .nmodemuxs = ARRAY_SIZE(sata2_modemux),
+};
+
+static const char *const sata_grps[] = { "sata0_grp", "sata1_grp", "sata2_grp"
+};
+static struct spear_function sata_function = {
+ .name = "sata",
+ .groups = sata_grps,
+ .ngroups = ARRAY_SIZE(sata_grps),
+};
+
+/* Pad multiplexing for ssp1_dis_kbd device */
+static const unsigned ssp1_dis_kbd_pins[] = { 203, 204, 205, 206 };
+static struct spear_muxreg ssp1_dis_kbd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK |
+ PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK |
+ PMX_NFCE2_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux ssp1_dis_kbd_modemux[] = {
+ {
+ .muxregs = ssp1_dis_kbd_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp1_dis_kbd_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp1_dis_kbd_pingroup = {
+ .name = "ssp1_dis_kbd_grp",
+ .pins = ssp1_dis_kbd_pins,
+ .npins = ARRAY_SIZE(ssp1_dis_kbd_pins),
+ .modemuxs = ssp1_dis_kbd_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp1_dis_kbd_modemux),
+};
+
+/* Pad multiplexing for ssp1_dis_sd device */
+static const unsigned ssp1_dis_sd_pins[] = { 224, 226, 227, 228 };
+static struct spear_muxreg ssp1_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK |
+ PMX_MCICECF_MASK | PMX_MCICEXD_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux ssp1_dis_sd_modemux[] = {
+ {
+ .muxregs = ssp1_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp1_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp1_dis_sd_pingroup = {
+ .name = "ssp1_dis_sd_grp",
+ .pins = ssp1_dis_sd_pins,
+ .npins = ARRAY_SIZE(ssp1_dis_sd_pins),
+ .modemuxs = ssp1_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp1_dis_sd_modemux),
+};
+
+static const char *const ssp1_grps[] = { "ssp1_dis_kbd_grp",
+ "ssp1_dis_sd_grp" };
+static struct spear_function ssp1_function = {
+ .name = "ssp1",
+ .groups = ssp1_grps,
+ .ngroups = ARRAY_SIZE(ssp1_grps),
+};
+
+/* Pad multiplexing for gpt64 device */
+static const unsigned gpt64_pins[] = { 230, 231, 232, 245 };
+static struct spear_muxreg gpt64_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK
+ | PMX_MCILEDS_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux gpt64_modemux[] = {
+ {
+ .muxregs = gpt64_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt64_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt64_pingroup = {
+ .name = "gpt64_grp",
+ .pins = gpt64_pins,
+ .npins = ARRAY_SIZE(gpt64_pins),
+ .modemuxs = gpt64_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt64_modemux),
+};
+
+static const char *const gpt64_grps[] = { "gpt64_grp" };
+static struct spear_function gpt64_function = {
+ .name = "gpt64",
+ .groups = gpt64_grps,
+ .ngroups = ARRAY_SIZE(gpt64_grps),
+};
+
+/* pingroups */
+static struct spear_pingroup *spear1310_pingroups[] = {
+ &i2c0_pingroup,
+ &ssp0_pingroup,
+ &i2s0_pingroup,
+ &i2s1_pingroup,
+ &clcd_pingroup,
+ &clcd_high_res_pingroup,
+ &arm_gpio_pingroup,
+ &smi_2_chips_pingroup,
+ &smi_4_chips_pingroup,
+ &gmii_pingroup,
+ &rgmii_pingroup,
+ &smii_0_1_2_pingroup,
+ &ras_mii_txclk_pingroup,
+ &nand_8bit_pingroup,
+ &nand_16bit_pingroup,
+ &nand_4_chips_pingroup,
+ &keyboard_6x6_pingroup,
+ &keyboard_rowcol6_8_pingroup,
+ &uart0_pingroup,
+ &uart0_modem_pingroup,
+ &gpt0_tmr0_pingroup,
+ &gpt0_tmr1_pingroup,
+ &gpt1_tmr0_pingroup,
+ &gpt1_tmr1_pingroup,
+ &sdhci_pingroup,
+ &cf_pingroup,
+ &xd_pingroup,
+ &touch_xy_pingroup,
+ &ssp0_cs0_pingroup,
+ &ssp0_cs1_2_pingroup,
+ &uart_1_dis_i2c_pingroup,
+ &uart_1_dis_sd_pingroup,
+ &uart_2_3_pingroup,
+ &uart_4_pingroup,
+ &uart_5_pingroup,
+ &rs485_0_1_tdm_0_1_pingroup,
+ &i2c_1_2_pingroup,
+ &i2c3_dis_smi_clcd_pingroup,
+ &i2c3_dis_sd_i2s0_pingroup,
+ &i2c_4_5_dis_smi_pingroup,
+ &i2c4_dis_sd_pingroup,
+ &i2c5_dis_sd_pingroup,
+ &i2c_6_7_dis_kbd_pingroup,
+ &i2c6_dis_sd_pingroup,
+ &i2c7_dis_sd_pingroup,
+ &can0_dis_nor_pingroup,
+ &can0_dis_sd_pingroup,
+ &can1_dis_sd_pingroup,
+ &can1_dis_kbd_pingroup,
+ &pcie0_pingroup,
+ &pcie1_pingroup,
+ &pcie2_pingroup,
+ &sata0_pingroup,
+ &sata1_pingroup,
+ &sata2_pingroup,
+ &ssp1_dis_kbd_pingroup,
+ &ssp1_dis_sd_pingroup,
+ &gpt64_pingroup,
+};
+
+/* functions */
+static struct spear_function *spear1310_functions[] = {
+ &i2c0_function,
+ &ssp0_function,
+ &i2s0_function,
+ &i2s1_function,
+ &clcd_function,
+ &arm_gpio_function,
+ &smi_function,
+ &gmii_function,
+ &rgmii_function,
+ &smii_0_1_2_function,
+ &ras_mii_txclk_function,
+ &nand_function,
+ &keyboard_function,
+ &uart0_function,
+ &gpt0_function,
+ &gpt1_function,
+ &sdhci_function,
+ &cf_function,
+ &xd_function,
+ &touch_xy_function,
+ &uart1_function,
+ &uart2_3_function,
+ &uart4_function,
+ &uart5_function,
+ &rs485_0_1_tdm_0_1_function,
+ &i2c_1_2_function,
+ &i2c3_unction,
+ &i2c_4_5_function,
+ &i2c_6_7_function,
+ &can0_function,
+ &can1_function,
+ &pci_function,
+ &sata_function,
+ &ssp1_function,
+ &gpt64_function,
+};
+
+static struct spear_pinctrl_machdata spear1310_machdata = {
+ .pins = spear1310_pins,
+ .npins = ARRAY_SIZE(spear1310_pins),
+ .groups = spear1310_pingroups,
+ .ngroups = ARRAY_SIZE(spear1310_pingroups),
+ .functions = spear1310_functions,
+ .nfunctions = ARRAY_SIZE(spear1310_functions),
+ .modes_supported = false,
+};
+
+static struct of_device_id spear1310_pinctrl_of_match[] __devinitdata = {
+ {
+ .compatible = "st,spear1310-pinmux",
+ },
+ {},
+};
+
+static int __devinit spear1310_pinctrl_probe(struct platform_device *pdev)
+{
+ return spear_pinctrl_probe(pdev, &spear1310_machdata);
+}
+
+static int __devexit spear1310_pinctrl_remove(struct platform_device *pdev)
+{
+ return spear_pinctrl_remove(pdev);
+}
+
+static struct platform_driver spear1310_pinctrl_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = spear1310_pinctrl_of_match,
+ },
+ .probe = spear1310_pinctrl_probe,
+ .remove = __devexit_p(spear1310_pinctrl_remove),
+};
+
+static int __init spear1310_pinctrl_init(void)
+{
+ return platform_driver_register(&spear1310_pinctrl_driver);
+}
+arch_initcall(spear1310_pinctrl_init);
+
+static void __exit spear1310_pinctrl_exit(void)
+{
+ platform_driver_unregister(&spear1310_pinctrl_driver);
+}
+module_exit(spear1310_pinctrl_exit);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
new file mode 100644
index 000000000000..a8ab2a6f51bf
--- /dev/null
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -0,0 +1,1989 @@
+/*
+ * Driver for the ST Microelectronics SPEAr1340 pinmux
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "pinctrl-spear.h"
+
+#define DRIVER_NAME "spear1340-pinmux"
+
+/* pins */
+static const struct pinctrl_pin_desc spear1340_pins[] = {
+ SPEAR_PIN_0_TO_101,
+ SPEAR_PIN_102_TO_245,
+ PINCTRL_PIN(246, "PLGPIO246"),
+ PINCTRL_PIN(247, "PLGPIO247"),
+ PINCTRL_PIN(248, "PLGPIO248"),
+ PINCTRL_PIN(249, "PLGPIO249"),
+ PINCTRL_PIN(250, "PLGPIO250"),
+ PINCTRL_PIN(251, "PLGPIO251"),
+};
+
+/* In SPEAr1340 there are two levels of pad muxing */
+/* - pads as gpio OR peripherals */
+#define PAD_FUNCTION_EN_1 0x668
+#define PAD_FUNCTION_EN_2 0x66C
+#define PAD_FUNCTION_EN_3 0x670
+#define PAD_FUNCTION_EN_4 0x674
+#define PAD_FUNCTION_EN_5 0x690
+#define PAD_FUNCTION_EN_6 0x694
+#define PAD_FUNCTION_EN_7 0x698
+#define PAD_FUNCTION_EN_8 0x69C
+
+/* - If peripherals, then primary OR alternate peripheral */
+#define PAD_SHARED_IP_EN_1 0x6A0
+#define PAD_SHARED_IP_EN_2 0x6A4
+
+/*
+ * Macro's for first level of pmx - pads as gpio OR peripherals. There are 8
+ * registers with 32 bits each for handling gpio pads, register 8 has only 26
+ * relevant bits.
+ */
+/* macro's for making pads as gpio's */
+#define PADS_AS_GPIO_REG0_MASK 0xFFFFFFFE
+#define PADS_AS_GPIO_REGS_MASK 0xFFFFFFFF
+#define PADS_AS_GPIO_REG7_MASK 0x07FFFFFF
+
+/* macro's for making pads as peripherals */
+#define FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK 0x00000FFE
+#define UART0_ENH_AND_GPT_REG0_MASK 0x0003F000
+#define PWM1_AND_KBD_COL5_REG0_MASK 0x00040000
+#define I2C1_REG0_MASK 0x01080000
+#define SPDIF_IN_REG0_MASK 0x00100000
+#define PWM2_AND_GPT0_TMR0_CPT_REG0_MASK 0x00400000
+#define PWM3_AND_GPT0_TMR1_CLK_REG0_MASK 0x00800000
+#define PWM0_AND_SSP0_CS1_REG0_MASK 0x02000000
+#define VIP_AND_CAM3_REG0_MASK 0xFC200000
+#define VIP_AND_CAM3_REG1_MASK 0x0000000F
+#define VIP_REG1_MASK 0x00001EF0
+#define VIP_AND_CAM2_REG1_MASK 0x007FE100
+#define VIP_AND_CAM1_REG1_MASK 0xFF800000
+#define VIP_AND_CAM1_REG2_MASK 0x00000003
+#define VIP_AND_CAM0_REG2_MASK 0x00001FFC
+#define SMI_REG2_MASK 0x0021E000
+#define SSP0_REG2_MASK 0x001E0000
+#define TS_AND_SSP0_CS2_REG2_MASK 0x00400000
+#define UART0_REG2_MASK 0x01800000
+#define UART1_REG2_MASK 0x06000000
+#define I2S_IN_REG2_MASK 0xF8000000
+#define DEVS_GRP_AND_MIPHY_DBG_REG3_MASK 0x000001FE
+#define I2S_OUT_REG3_MASK 0x000001EF
+#define I2S_IN_REG3_MASK 0x00000010
+#define GMAC_REG3_MASK 0xFFFFFE00
+#define GMAC_REG4_MASK 0x0000001F
+#define DEVS_GRP_AND_MIPHY_DBG_REG4_MASK 0x7FFFFF20
+#define SSP0_CS3_REG4_MASK 0x00000020
+#define I2C0_REG4_MASK 0x000000C0
+#define CEC0_REG4_MASK 0x00000100
+#define CEC1_REG4_MASK 0x00000200
+#define SPDIF_OUT_REG4_MASK 0x00000400
+#define CLCD_REG4_MASK 0x7FFFF800
+#define CLCD_AND_ARM_TRACE_REG4_MASK 0x80000000
+#define CLCD_AND_ARM_TRACE_REG5_MASK 0xFFFFFFFF
+#define CLCD_AND_ARM_TRACE_REG6_MASK 0x00000001
+#define FSMC_PNOR_AND_MCIF_REG6_MASK 0x073FFFFE
+#define MCIF_REG6_MASK 0xF8C00000
+#define MCIF_REG7_MASK 0x000043FF
+#define FSMC_8BIT_REG7_MASK 0x07FFBC00
+
+/* other registers */
+#define PERIP_CFG 0x42C
+ /* PERIP_CFG register masks */
+ #define SSP_CS_CTL_HW 0
+ #define SSP_CS_CTL_SW 1
+ #define SSP_CS_CTL_MASK 1
+ #define SSP_CS_CTL_SHIFT 21
+ #define SSP_CS_VAL_MASK 1
+ #define SSP_CS_VAL_SHIFT 20
+ #define SSP_CS_SEL_CS0 0
+ #define SSP_CS_SEL_CS1 1
+ #define SSP_CS_SEL_CS2 2
+ #define SSP_CS_SEL_MASK 3
+ #define SSP_CS_SEL_SHIFT 18
+
+ #define I2S_CHNL_2_0 (0)
+ #define I2S_CHNL_3_1 (1)
+ #define I2S_CHNL_5_1 (2)
+ #define I2S_CHNL_7_1 (3)
+ #define I2S_CHNL_PLAY_SHIFT (4)
+ #define I2S_CHNL_PLAY_MASK (3 << 4)
+ #define I2S_CHNL_REC_SHIFT (6)
+ #define I2S_CHNL_REC_MASK (3 << 6)
+
+ #define SPDIF_OUT_ENB_MASK (1 << 2)
+ #define SPDIF_OUT_ENB_SHIFT 2
+
+ #define MCIF_SEL_SD 1
+ #define MCIF_SEL_CF 2
+ #define MCIF_SEL_XD 3
+ #define MCIF_SEL_MASK 3
+ #define MCIF_SEL_SHIFT 0
+
+#define GMAC_CLK_CFG 0x248
+ #define GMAC_PHY_IF_GMII_VAL (0 << 3)
+ #define GMAC_PHY_IF_RGMII_VAL (1 << 3)
+ #define GMAC_PHY_IF_SGMII_VAL (2 << 3)
+ #define GMAC_PHY_IF_RMII_VAL (4 << 3)
+ #define GMAC_PHY_IF_SEL_MASK (7 << 3)
+ #define GMAC_PHY_INPUT_ENB_VAL 0
+ #define GMAC_PHY_SYNT_ENB_VAL 1
+ #define GMAC_PHY_CLK_MASK 1
+ #define GMAC_PHY_CLK_SHIFT 2
+ #define GMAC_PHY_125M_PAD_VAL 0
+ #define GMAC_PHY_PLL2_VAL 1
+ #define GMAC_PHY_OSC3_VAL 2
+ #define GMAC_PHY_INPUT_CLK_MASK 3
+ #define GMAC_PHY_INPUT_CLK_SHIFT 0
+
+#define PCIE_SATA_CFG 0x424
+ /* PCIE CFG MASks */
+ #define PCIE_CFG_DEVICE_PRESENT (1 << 11)
+ #define PCIE_CFG_POWERUP_RESET (1 << 10)
+ #define PCIE_CFG_CORE_CLK_EN (1 << 9)
+ #define PCIE_CFG_AUX_CLK_EN (1 << 8)
+ #define SATA_CFG_TX_CLK_EN (1 << 4)
+ #define SATA_CFG_RX_CLK_EN (1 << 3)
+ #define SATA_CFG_POWERUP_RESET (1 << 2)
+ #define SATA_CFG_PM_CLK_EN (1 << 1)
+ #define PCIE_SATA_SEL_PCIE (0)
+ #define PCIE_SATA_SEL_SATA (1)
+ #define SATA_PCIE_CFG_MASK 0xF1F
+ #define PCIE_CFG_VAL (PCIE_SATA_SEL_PCIE | PCIE_CFG_AUX_CLK_EN | \
+ PCIE_CFG_CORE_CLK_EN | PCIE_CFG_POWERUP_RESET |\
+ PCIE_CFG_DEVICE_PRESENT)
+ #define SATA_CFG_VAL (PCIE_SATA_SEL_SATA | SATA_CFG_PM_CLK_EN | \
+ SATA_CFG_POWERUP_RESET | SATA_CFG_RX_CLK_EN | \
+ SATA_CFG_TX_CLK_EN)
+
+/* Macro's for second level of pmx - pads as primary OR alternate peripheral */
+/* Write 0 to enable FSMC_16_BIT */
+#define KBD_ROW_COL_MASK (1 << 0)
+
+/* Write 0 to enable UART0_ENH */
+#define GPT_MASK (1 << 1) /* Only clk & cpt */
+
+/* Write 0 to enable PWM1 */
+#define KBD_COL5_MASK (1 << 2)
+
+/* Write 0 to enable PWM2 */
+#define GPT0_TMR0_CPT_MASK (1 << 3) /* Only clk & cpt */
+
+/* Write 0 to enable PWM3 */
+#define GPT0_TMR1_CLK_MASK (1 << 4) /* Only clk & cpt */
+
+/* Write 0 to enable PWM0 */
+#define SSP0_CS1_MASK (1 << 5)
+
+/* Write 0 to enable VIP */
+#define CAM3_MASK (1 << 6)
+
+/* Write 0 to enable VIP */
+#define CAM2_MASK (1 << 7)
+
+/* Write 0 to enable VIP */
+#define CAM1_MASK (1 << 8)
+
+/* Write 0 to enable VIP */
+#define CAM0_MASK (1 << 9)
+
+/* Write 0 to enable TS */
+#define SSP0_CS2_MASK (1 << 10)
+
+/* Write 0 to enable FSMC PNOR */
+#define MCIF_MASK (1 << 11)
+
+/* Write 0 to enable CLCD */
+#define ARM_TRACE_MASK (1 << 12)
+
+/* Write 0 to enable I2S, SSP0_CS2, CEC0, 1, SPDIF out, CLCD */
+#define MIPHY_DBG_MASK (1 << 13)
+
+/*
+ * Pad multiplexing for making all pads as gpio's. This is done to override the
+ * values passed from bootloader and start from scratch.
+ */
+static const unsigned pads_as_gpio_pins[] = { 251 };
+static struct spear_muxreg pads_as_gpio_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PADS_AS_GPIO_REG0_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_4,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_6,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_7,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_8,
+ .mask = PADS_AS_GPIO_REG7_MASK,
+ .val = 0x0,
+ },
+};
+
+static struct spear_modemux pads_as_gpio_modemux[] = {
+ {
+ .muxregs = pads_as_gpio_muxreg,
+ .nmuxregs = ARRAY_SIZE(pads_as_gpio_muxreg),
+ },
+};
+
+static struct spear_pingroup pads_as_gpio_pingroup = {
+ .name = "pads_as_gpio_grp",
+ .pins = pads_as_gpio_pins,
+ .npins = ARRAY_SIZE(pads_as_gpio_pins),
+ .modemuxs = pads_as_gpio_modemux,
+ .nmodemuxs = ARRAY_SIZE(pads_as_gpio_modemux),
+};
+
+static const char *const pads_as_gpio_grps[] = { "pads_as_gpio_grp" };
+static struct spear_function pads_as_gpio_function = {
+ .name = "pads_as_gpio",
+ .groups = pads_as_gpio_grps,
+ .ngroups = ARRAY_SIZE(pads_as_gpio_grps),
+};
+
+/* Pad multiplexing for fsmc_8bit device */
+static const unsigned fsmc_8bit_pins[] = { 233, 234, 235, 236, 238, 239, 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249 };
+static struct spear_muxreg fsmc_8bit_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_8,
+ .mask = FSMC_8BIT_REG7_MASK,
+ .val = FSMC_8BIT_REG7_MASK,
+ }
+};
+
+static struct spear_modemux fsmc_8bit_modemux[] = {
+ {
+ .muxregs = fsmc_8bit_muxreg,
+ .nmuxregs = ARRAY_SIZE(fsmc_8bit_muxreg),
+ },
+};
+
+static struct spear_pingroup fsmc_8bit_pingroup = {
+ .name = "fsmc_8bit_grp",
+ .pins = fsmc_8bit_pins,
+ .npins = ARRAY_SIZE(fsmc_8bit_pins),
+ .modemuxs = fsmc_8bit_modemux,
+ .nmodemuxs = ARRAY_SIZE(fsmc_8bit_modemux),
+};
+
+/* Pad multiplexing for fsmc_16bit device */
+static const unsigned fsmc_16bit_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+static struct spear_muxreg fsmc_16bit_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = KBD_ROW_COL_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+ .val = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+ },
+};
+
+static struct spear_modemux fsmc_16bit_modemux[] = {
+ {
+ .muxregs = fsmc_16bit_muxreg,
+ .nmuxregs = ARRAY_SIZE(fsmc_16bit_muxreg),
+ },
+};
+
+static struct spear_pingroup fsmc_16bit_pingroup = {
+ .name = "fsmc_16bit_grp",
+ .pins = fsmc_16bit_pins,
+ .npins = ARRAY_SIZE(fsmc_16bit_pins),
+ .modemuxs = fsmc_16bit_modemux,
+ .nmodemuxs = ARRAY_SIZE(fsmc_16bit_modemux),
+};
+
+/* pad multiplexing for fsmc_pnor device */
+static const unsigned fsmc_pnor_pins[] = { 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 215, 216, 217 };
+static struct spear_muxreg fsmc_pnor_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = MCIF_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_7,
+ .mask = FSMC_PNOR_AND_MCIF_REG6_MASK,
+ .val = FSMC_PNOR_AND_MCIF_REG6_MASK,
+ },
+};
+
+static struct spear_modemux fsmc_pnor_modemux[] = {
+ {
+ .muxregs = fsmc_pnor_muxreg,
+ .nmuxregs = ARRAY_SIZE(fsmc_pnor_muxreg),
+ },
+};
+
+static struct spear_pingroup fsmc_pnor_pingroup = {
+ .name = "fsmc_pnor_grp",
+ .pins = fsmc_pnor_pins,
+ .npins = ARRAY_SIZE(fsmc_pnor_pins),
+ .modemuxs = fsmc_pnor_modemux,
+ .nmodemuxs = ARRAY_SIZE(fsmc_pnor_modemux),
+};
+
+static const char *const fsmc_grps[] = { "fsmc_8bit_grp", "fsmc_16bit_grp",
+ "fsmc_pnor_grp" };
+static struct spear_function fsmc_function = {
+ .name = "fsmc",
+ .groups = fsmc_grps,
+ .ngroups = ARRAY_SIZE(fsmc_grps),
+};
+
+/* pad multiplexing for keyboard rows-cols device */
+static const unsigned keyboard_row_col_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10 };
+static struct spear_muxreg keyboard_row_col_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = KBD_ROW_COL_MASK,
+ .val = KBD_ROW_COL_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+ .val = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+ },
+};
+
+static struct spear_modemux keyboard_row_col_modemux[] = {
+ {
+ .muxregs = keyboard_row_col_muxreg,
+ .nmuxregs = ARRAY_SIZE(keyboard_row_col_muxreg),
+ },
+};
+
+static struct spear_pingroup keyboard_row_col_pingroup = {
+ .name = "keyboard_row_col_grp",
+ .pins = keyboard_row_col_pins,
+ .npins = ARRAY_SIZE(keyboard_row_col_pins),
+ .modemuxs = keyboard_row_col_modemux,
+ .nmodemuxs = ARRAY_SIZE(keyboard_row_col_modemux),
+};
+
+/* pad multiplexing for keyboard col5 device */
+static const unsigned keyboard_col5_pins[] = { 17 };
+static struct spear_muxreg keyboard_col5_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = KBD_COL5_MASK,
+ .val = KBD_COL5_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM1_AND_KBD_COL5_REG0_MASK,
+ .val = PWM1_AND_KBD_COL5_REG0_MASK,
+ },
+};
+
+static struct spear_modemux keyboard_col5_modemux[] = {
+ {
+ .muxregs = keyboard_col5_muxreg,
+ .nmuxregs = ARRAY_SIZE(keyboard_col5_muxreg),
+ },
+};
+
+static struct spear_pingroup keyboard_col5_pingroup = {
+ .name = "keyboard_col5_grp",
+ .pins = keyboard_col5_pins,
+ .npins = ARRAY_SIZE(keyboard_col5_pins),
+ .modemuxs = keyboard_col5_modemux,
+ .nmodemuxs = ARRAY_SIZE(keyboard_col5_modemux),
+};
+
+static const char *const keyboard_grps[] = { "keyboard_row_col_grp",
+ "keyboard_col5_grp" };
+static struct spear_function keyboard_function = {
+ .name = "keyboard",
+ .groups = keyboard_grps,
+ .ngroups = ARRAY_SIZE(keyboard_grps),
+};
+
+/* pad multiplexing for spdif_in device */
+static const unsigned spdif_in_pins[] = { 19 };
+static struct spear_muxreg spdif_in_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = SPDIF_IN_REG0_MASK,
+ .val = SPDIF_IN_REG0_MASK,
+ },
+};
+
+static struct spear_modemux spdif_in_modemux[] = {
+ {
+ .muxregs = spdif_in_muxreg,
+ .nmuxregs = ARRAY_SIZE(spdif_in_muxreg),
+ },
+};
+
+static struct spear_pingroup spdif_in_pingroup = {
+ .name = "spdif_in_grp",
+ .pins = spdif_in_pins,
+ .npins = ARRAY_SIZE(spdif_in_pins),
+ .modemuxs = spdif_in_modemux,
+ .nmodemuxs = ARRAY_SIZE(spdif_in_modemux),
+};
+
+static const char *const spdif_in_grps[] = { "spdif_in_grp" };
+static struct spear_function spdif_in_function = {
+ .name = "spdif_in",
+ .groups = spdif_in_grps,
+ .ngroups = ARRAY_SIZE(spdif_in_grps),
+};
+
+/* pad multiplexing for spdif_out device */
+static const unsigned spdif_out_pins[] = { 137 };
+static struct spear_muxreg spdif_out_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = SPDIF_OUT_REG4_MASK,
+ .val = SPDIF_OUT_REG4_MASK,
+ }, {
+ .reg = PERIP_CFG,
+ .mask = SPDIF_OUT_ENB_MASK,
+ .val = SPDIF_OUT_ENB_MASK,
+ }
+};
+
+static struct spear_modemux spdif_out_modemux[] = {
+ {
+ .muxregs = spdif_out_muxreg,
+ .nmuxregs = ARRAY_SIZE(spdif_out_muxreg),
+ },
+};
+
+static struct spear_pingroup spdif_out_pingroup = {
+ .name = "spdif_out_grp",
+ .pins = spdif_out_pins,
+ .npins = ARRAY_SIZE(spdif_out_pins),
+ .modemuxs = spdif_out_modemux,
+ .nmodemuxs = ARRAY_SIZE(spdif_out_modemux),
+};
+
+static const char *const spdif_out_grps[] = { "spdif_out_grp" };
+static struct spear_function spdif_out_function = {
+ .name = "spdif_out",
+ .groups = spdif_out_grps,
+ .ngroups = ARRAY_SIZE(spdif_out_grps),
+};
+
+/* pad multiplexing for gpt_0_1 device */
+static const unsigned gpt_0_1_pins[] = { 11, 12, 13, 14, 15, 16, 21, 22 };
+static struct spear_muxreg gpt_0_1_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = GPT_MASK | GPT0_TMR0_CPT_MASK | GPT0_TMR1_CLK_MASK,
+ .val = GPT_MASK | GPT0_TMR0_CPT_MASK | GPT0_TMR1_CLK_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = UART0_ENH_AND_GPT_REG0_MASK |
+ PWM2_AND_GPT0_TMR0_CPT_REG0_MASK |
+ PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+ .val = UART0_ENH_AND_GPT_REG0_MASK |
+ PWM2_AND_GPT0_TMR0_CPT_REG0_MASK |
+ PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+ },
+};
+
+static struct spear_modemux gpt_0_1_modemux[] = {
+ {
+ .muxregs = gpt_0_1_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt_0_1_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt_0_1_pingroup = {
+ .name = "gpt_0_1_grp",
+ .pins = gpt_0_1_pins,
+ .npins = ARRAY_SIZE(gpt_0_1_pins),
+ .modemuxs = gpt_0_1_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt_0_1_modemux),
+};
+
+static const char *const gpt_0_1_grps[] = { "gpt_0_1_grp" };
+static struct spear_function gpt_0_1_function = {
+ .name = "gpt_0_1",
+ .groups = gpt_0_1_grps,
+ .ngroups = ARRAY_SIZE(gpt_0_1_grps),
+};
+
+/* pad multiplexing for pwm0 device */
+static const unsigned pwm0_pins[] = { 24 };
+static struct spear_muxreg pwm0_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = SSP0_CS1_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM0_AND_SSP0_CS1_REG0_MASK,
+ .val = PWM0_AND_SSP0_CS1_REG0_MASK,
+ },
+};
+
+static struct spear_modemux pwm0_modemux[] = {
+ {
+ .muxregs = pwm0_muxreg,
+ .nmuxregs = ARRAY_SIZE(pwm0_muxreg),
+ },
+};
+
+static struct spear_pingroup pwm0_pingroup = {
+ .name = "pwm0_grp",
+ .pins = pwm0_pins,
+ .npins = ARRAY_SIZE(pwm0_pins),
+ .modemuxs = pwm0_modemux,
+ .nmodemuxs = ARRAY_SIZE(pwm0_modemux),
+};
+
+/* pad multiplexing for pwm1 device */
+static const unsigned pwm1_pins[] = { 17 };
+static struct spear_muxreg pwm1_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = KBD_COL5_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM1_AND_KBD_COL5_REG0_MASK,
+ .val = PWM1_AND_KBD_COL5_REG0_MASK,
+ },
+};
+
+static struct spear_modemux pwm1_modemux[] = {
+ {
+ .muxregs = pwm1_muxreg,
+ .nmuxregs = ARRAY_SIZE(pwm1_muxreg),
+ },
+};
+
+static struct spear_pingroup pwm1_pingroup = {
+ .name = "pwm1_grp",
+ .pins = pwm1_pins,
+ .npins = ARRAY_SIZE(pwm1_pins),
+ .modemuxs = pwm1_modemux,
+ .nmodemuxs = ARRAY_SIZE(pwm1_modemux),
+};
+
+/* pad multiplexing for pwm2 device */
+static const unsigned pwm2_pins[] = { 21 };
+static struct spear_muxreg pwm2_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = GPT0_TMR0_CPT_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM2_AND_GPT0_TMR0_CPT_REG0_MASK,
+ .val = PWM2_AND_GPT0_TMR0_CPT_REG0_MASK,
+ },
+};
+
+static struct spear_modemux pwm2_modemux[] = {
+ {
+ .muxregs = pwm2_muxreg,
+ .nmuxregs = ARRAY_SIZE(pwm2_muxreg),
+ },
+};
+
+static struct spear_pingroup pwm2_pingroup = {
+ .name = "pwm2_grp",
+ .pins = pwm2_pins,
+ .npins = ARRAY_SIZE(pwm2_pins),
+ .modemuxs = pwm2_modemux,
+ .nmodemuxs = ARRAY_SIZE(pwm2_modemux),
+};
+
+/* pad multiplexing for pwm3 device */
+static const unsigned pwm3_pins[] = { 22 };
+static struct spear_muxreg pwm3_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = GPT0_TMR1_CLK_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+ .val = PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+ },
+};
+
+static struct spear_modemux pwm3_modemux[] = {
+ {
+ .muxregs = pwm3_muxreg,
+ .nmuxregs = ARRAY_SIZE(pwm3_muxreg),
+ },
+};
+
+static struct spear_pingroup pwm3_pingroup = {
+ .name = "pwm3_grp",
+ .pins = pwm3_pins,
+ .npins = ARRAY_SIZE(pwm3_pins),
+ .modemuxs = pwm3_modemux,
+ .nmodemuxs = ARRAY_SIZE(pwm3_modemux),
+};
+
+static const char *const pwm_grps[] = { "pwm0_grp", "pwm1_grp", "pwm2_grp",
+ "pwm3_grp" };
+static struct spear_function pwm_function = {
+ .name = "pwm",
+ .groups = pwm_grps,
+ .ngroups = ARRAY_SIZE(pwm_grps),
+};
+
+/* pad multiplexing for vip_mux device */
+static const unsigned vip_mux_pins[] = { 35, 36, 37, 38, 40, 41, 42, 43 };
+static struct spear_muxreg vip_mux_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_REG1_MASK,
+ .val = VIP_REG1_MASK,
+ },
+};
+
+static struct spear_modemux vip_mux_modemux[] = {
+ {
+ .muxregs = vip_mux_muxreg,
+ .nmuxregs = ARRAY_SIZE(vip_mux_muxreg),
+ },
+};
+
+static struct spear_pingroup vip_mux_pingroup = {
+ .name = "vip_mux_grp",
+ .pins = vip_mux_pins,
+ .npins = ARRAY_SIZE(vip_mux_pins),
+ .modemuxs = vip_mux_modemux,
+ .nmodemuxs = ARRAY_SIZE(vip_mux_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam0 (disables cam0) device */
+static const unsigned vip_mux_cam0_pins[] = { 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75 };
+static struct spear_muxreg vip_mux_cam0_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM0_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = VIP_AND_CAM0_REG2_MASK,
+ .val = VIP_AND_CAM0_REG2_MASK,
+ },
+};
+
+static struct spear_modemux vip_mux_cam0_modemux[] = {
+ {
+ .muxregs = vip_mux_cam0_muxreg,
+ .nmuxregs = ARRAY_SIZE(vip_mux_cam0_muxreg),
+ },
+};
+
+static struct spear_pingroup vip_mux_cam0_pingroup = {
+ .name = "vip_mux_cam0_grp",
+ .pins = vip_mux_cam0_pins,
+ .npins = ARRAY_SIZE(vip_mux_cam0_pins),
+ .modemuxs = vip_mux_cam0_modemux,
+ .nmodemuxs = ARRAY_SIZE(vip_mux_cam0_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam1 (disables cam1) device */
+static const unsigned vip_mux_cam1_pins[] = { 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64 };
+static struct spear_muxreg vip_mux_cam1_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM1_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM1_REG1_MASK,
+ .val = VIP_AND_CAM1_REG1_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = VIP_AND_CAM1_REG2_MASK,
+ .val = VIP_AND_CAM1_REG2_MASK,
+ },
+};
+
+static struct spear_modemux vip_mux_cam1_modemux[] = {
+ {
+ .muxregs = vip_mux_cam1_muxreg,
+ .nmuxregs = ARRAY_SIZE(vip_mux_cam1_muxreg),
+ },
+};
+
+static struct spear_pingroup vip_mux_cam1_pingroup = {
+ .name = "vip_mux_cam1_grp",
+ .pins = vip_mux_cam1_pins,
+ .npins = ARRAY_SIZE(vip_mux_cam1_pins),
+ .modemuxs = vip_mux_cam1_modemux,
+ .nmodemuxs = ARRAY_SIZE(vip_mux_cam1_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam2 (disables cam2) device */
+static const unsigned vip_mux_cam2_pins[] = { 39, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53 };
+static struct spear_muxreg vip_mux_cam2_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM2_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM2_REG1_MASK,
+ .val = VIP_AND_CAM2_REG1_MASK,
+ },
+};
+
+static struct spear_modemux vip_mux_cam2_modemux[] = {
+ {
+ .muxregs = vip_mux_cam2_muxreg,
+ .nmuxregs = ARRAY_SIZE(vip_mux_cam2_muxreg),
+ },
+};
+
+static struct spear_pingroup vip_mux_cam2_pingroup = {
+ .name = "vip_mux_cam2_grp",
+ .pins = vip_mux_cam2_pins,
+ .npins = ARRAY_SIZE(vip_mux_cam2_pins),
+ .modemuxs = vip_mux_cam2_modemux,
+ .nmodemuxs = ARRAY_SIZE(vip_mux_cam2_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam3 (disables cam3) device */
+static const unsigned vip_mux_cam3_pins[] = { 20, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34 };
+static struct spear_muxreg vip_mux_cam3_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM3_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = VIP_AND_CAM3_REG0_MASK,
+ .val = VIP_AND_CAM3_REG0_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM3_REG1_MASK,
+ .val = VIP_AND_CAM3_REG1_MASK,
+ },
+};
+
+static struct spear_modemux vip_mux_cam3_modemux[] = {
+ {
+ .muxregs = vip_mux_cam3_muxreg,
+ .nmuxregs = ARRAY_SIZE(vip_mux_cam3_muxreg),
+ },
+};
+
+static struct spear_pingroup vip_mux_cam3_pingroup = {
+ .name = "vip_mux_cam3_grp",
+ .pins = vip_mux_cam3_pins,
+ .npins = ARRAY_SIZE(vip_mux_cam3_pins),
+ .modemuxs = vip_mux_cam3_modemux,
+ .nmodemuxs = ARRAY_SIZE(vip_mux_cam3_modemux),
+};
+
+static const char *const vip_grps[] = { "vip_mux_grp", "vip_mux_cam0_grp" ,
+ "vip_mux_cam1_grp" , "vip_mux_cam2_grp", "vip_mux_cam3_grp" };
+static struct spear_function vip_function = {
+ .name = "vip",
+ .groups = vip_grps,
+ .ngroups = ARRAY_SIZE(vip_grps),
+};
+
+/* pad multiplexing for cam0 device */
+static const unsigned cam0_pins[] = { 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75
+};
+static struct spear_muxreg cam0_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM0_MASK,
+ .val = CAM0_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = VIP_AND_CAM0_REG2_MASK,
+ .val = VIP_AND_CAM0_REG2_MASK,
+ },
+};
+
+static struct spear_modemux cam0_modemux[] = {
+ {
+ .muxregs = cam0_muxreg,
+ .nmuxregs = ARRAY_SIZE(cam0_muxreg),
+ },
+};
+
+static struct spear_pingroup cam0_pingroup = {
+ .name = "cam0_grp",
+ .pins = cam0_pins,
+ .npins = ARRAY_SIZE(cam0_pins),
+ .modemuxs = cam0_modemux,
+ .nmodemuxs = ARRAY_SIZE(cam0_modemux),
+};
+
+static const char *const cam0_grps[] = { "cam0_grp" };
+static struct spear_function cam0_function = {
+ .name = "cam0",
+ .groups = cam0_grps,
+ .ngroups = ARRAY_SIZE(cam0_grps),
+};
+
+/* pad multiplexing for cam1 device */
+static const unsigned cam1_pins[] = { 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
+};
+static struct spear_muxreg cam1_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM1_MASK,
+ .val = CAM1_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM1_REG1_MASK,
+ .val = VIP_AND_CAM1_REG1_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = VIP_AND_CAM1_REG2_MASK,
+ .val = VIP_AND_CAM1_REG2_MASK,
+ },
+};
+
+static struct spear_modemux cam1_modemux[] = {
+ {
+ .muxregs = cam1_muxreg,
+ .nmuxregs = ARRAY_SIZE(cam1_muxreg),
+ },
+};
+
+static struct spear_pingroup cam1_pingroup = {
+ .name = "cam1_grp",
+ .pins = cam1_pins,
+ .npins = ARRAY_SIZE(cam1_pins),
+ .modemuxs = cam1_modemux,
+ .nmodemuxs = ARRAY_SIZE(cam1_modemux),
+};
+
+static const char *const cam1_grps[] = { "cam1_grp" };
+static struct spear_function cam1_function = {
+ .name = "cam1",
+ .groups = cam1_grps,
+ .ngroups = ARRAY_SIZE(cam1_grps),
+};
+
+/* pad multiplexing for cam2 device */
+static const unsigned cam2_pins[] = { 39, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53
+};
+static struct spear_muxreg cam2_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM2_MASK,
+ .val = CAM2_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM2_REG1_MASK,
+ .val = VIP_AND_CAM2_REG1_MASK,
+ },
+};
+
+static struct spear_modemux cam2_modemux[] = {
+ {
+ .muxregs = cam2_muxreg,
+ .nmuxregs = ARRAY_SIZE(cam2_muxreg),
+ },
+};
+
+static struct spear_pingroup cam2_pingroup = {
+ .name = "cam2_grp",
+ .pins = cam2_pins,
+ .npins = ARRAY_SIZE(cam2_pins),
+ .modemuxs = cam2_modemux,
+ .nmodemuxs = ARRAY_SIZE(cam2_modemux),
+};
+
+static const char *const cam2_grps[] = { "cam2_grp" };
+static struct spear_function cam2_function = {
+ .name = "cam2",
+ .groups = cam2_grps,
+ .ngroups = ARRAY_SIZE(cam2_grps),
+};
+
+/* pad multiplexing for cam3 device */
+static const unsigned cam3_pins[] = { 20, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34
+};
+static struct spear_muxreg cam3_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM3_MASK,
+ .val = CAM3_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = VIP_AND_CAM3_REG0_MASK,
+ .val = VIP_AND_CAM3_REG0_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM3_REG1_MASK,
+ .val = VIP_AND_CAM3_REG1_MASK,
+ },
+};
+
+static struct spear_modemux cam3_modemux[] = {
+ {
+ .muxregs = cam3_muxreg,
+ .nmuxregs = ARRAY_SIZE(cam3_muxreg),
+ },
+};
+
+static struct spear_pingroup cam3_pingroup = {
+ .name = "cam3_grp",
+ .pins = cam3_pins,
+ .npins = ARRAY_SIZE(cam3_pins),
+ .modemuxs = cam3_modemux,
+ .nmodemuxs = ARRAY_SIZE(cam3_modemux),
+};
+
+static const char *const cam3_grps[] = { "cam3_grp" };
+static struct spear_function cam3_function = {
+ .name = "cam3",
+ .groups = cam3_grps,
+ .ngroups = ARRAY_SIZE(cam3_grps),
+};
+
+/* pad multiplexing for smi device */
+static const unsigned smi_pins[] = { 76, 77, 78, 79, 84 };
+static struct spear_muxreg smi_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = SMI_REG2_MASK,
+ .val = SMI_REG2_MASK,
+ },
+};
+
+static struct spear_modemux smi_modemux[] = {
+ {
+ .muxregs = smi_muxreg,
+ .nmuxregs = ARRAY_SIZE(smi_muxreg),
+ },
+};
+
+static struct spear_pingroup smi_pingroup = {
+ .name = "smi_grp",
+ .pins = smi_pins,
+ .npins = ARRAY_SIZE(smi_pins),
+ .modemuxs = smi_modemux,
+ .nmodemuxs = ARRAY_SIZE(smi_modemux),
+};
+
+static const char *const smi_grps[] = { "smi_grp" };
+static struct spear_function smi_function = {
+ .name = "smi",
+ .groups = smi_grps,
+ .ngroups = ARRAY_SIZE(smi_grps),
+};
+
+/* pad multiplexing for ssp0 device */
+static const unsigned ssp0_pins[] = { 80, 81, 82, 83 };
+static struct spear_muxreg ssp0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = SSP0_REG2_MASK,
+ .val = SSP0_REG2_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_modemux[] = {
+ {
+ .muxregs = ssp0_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_pingroup = {
+ .name = "ssp0_grp",
+ .pins = ssp0_pins,
+ .npins = ARRAY_SIZE(ssp0_pins),
+ .modemuxs = ssp0_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_modemux),
+};
+
+/* pad multiplexing for ssp0_cs1 device */
+static const unsigned ssp0_cs1_pins[] = { 24 };
+static struct spear_muxreg ssp0_cs1_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = SSP0_CS1_MASK,
+ .val = SSP0_CS1_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM0_AND_SSP0_CS1_REG0_MASK,
+ .val = PWM0_AND_SSP0_CS1_REG0_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_cs1_modemux[] = {
+ {
+ .muxregs = ssp0_cs1_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_cs1_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_cs1_pingroup = {
+ .name = "ssp0_cs1_grp",
+ .pins = ssp0_cs1_pins,
+ .npins = ARRAY_SIZE(ssp0_cs1_pins),
+ .modemuxs = ssp0_cs1_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_cs1_modemux),
+};
+
+/* pad multiplexing for ssp0_cs2 device */
+static const unsigned ssp0_cs2_pins[] = { 85 };
+static struct spear_muxreg ssp0_cs2_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = SSP0_CS2_MASK,
+ .val = SSP0_CS2_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = TS_AND_SSP0_CS2_REG2_MASK,
+ .val = TS_AND_SSP0_CS2_REG2_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_cs2_modemux[] = {
+ {
+ .muxregs = ssp0_cs2_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_cs2_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_cs2_pingroup = {
+ .name = "ssp0_cs2_grp",
+ .pins = ssp0_cs2_pins,
+ .npins = ARRAY_SIZE(ssp0_cs2_pins),
+ .modemuxs = ssp0_cs2_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_cs2_modemux),
+};
+
+/* pad multiplexing for ssp0_cs3 device */
+static const unsigned ssp0_cs3_pins[] = { 132 };
+static struct spear_muxreg ssp0_cs3_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = SSP0_CS3_REG4_MASK,
+ .val = SSP0_CS3_REG4_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_cs3_modemux[] = {
+ {
+ .muxregs = ssp0_cs3_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_cs3_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_cs3_pingroup = {
+ .name = "ssp0_cs3_grp",
+ .pins = ssp0_cs3_pins,
+ .npins = ARRAY_SIZE(ssp0_cs3_pins),
+ .modemuxs = ssp0_cs3_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_cs3_modemux),
+};
+
+static const char *const ssp0_grps[] = { "ssp0_grp", "ssp0_cs1_grp",
+ "ssp0_cs2_grp", "ssp0_cs3_grp" };
+static struct spear_function ssp0_function = {
+ .name = "ssp0",
+ .groups = ssp0_grps,
+ .ngroups = ARRAY_SIZE(ssp0_grps),
+};
+
+/* pad multiplexing for uart0 device */
+static const unsigned uart0_pins[] = { 86, 87 };
+static struct spear_muxreg uart0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = UART0_REG2_MASK,
+ .val = UART0_REG2_MASK,
+ },
+};
+
+static struct spear_modemux uart0_modemux[] = {
+ {
+ .muxregs = uart0_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart0_muxreg),
+ },
+};
+
+static struct spear_pingroup uart0_pingroup = {
+ .name = "uart0_grp",
+ .pins = uart0_pins,
+ .npins = ARRAY_SIZE(uart0_pins),
+ .modemuxs = uart0_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart0_modemux),
+};
+
+/* pad multiplexing for uart0_enh device */
+static const unsigned uart0_enh_pins[] = { 11, 12, 13, 14, 15, 16 };
+static struct spear_muxreg uart0_enh_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = GPT_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = UART0_ENH_AND_GPT_REG0_MASK,
+ .val = UART0_ENH_AND_GPT_REG0_MASK,
+ },
+};
+
+static struct spear_modemux uart0_enh_modemux[] = {
+ {
+ .muxregs = uart0_enh_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart0_enh_muxreg),
+ },
+};
+
+static struct spear_pingroup uart0_enh_pingroup = {
+ .name = "uart0_enh_grp",
+ .pins = uart0_enh_pins,
+ .npins = ARRAY_SIZE(uart0_enh_pins),
+ .modemuxs = uart0_enh_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart0_enh_modemux),
+};
+
+static const char *const uart0_grps[] = { "uart0_grp", "uart0_enh_grp" };
+static struct spear_function uart0_function = {
+ .name = "uart0",
+ .groups = uart0_grps,
+ .ngroups = ARRAY_SIZE(uart0_grps),
+};
+
+/* pad multiplexing for uart1 device */
+static const unsigned uart1_pins[] = { 88, 89 };
+static struct spear_muxreg uart1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = UART1_REG2_MASK,
+ .val = UART1_REG2_MASK,
+ },
+};
+
+static struct spear_modemux uart1_modemux[] = {
+ {
+ .muxregs = uart1_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart1_muxreg),
+ },
+};
+
+static struct spear_pingroup uart1_pingroup = {
+ .name = "uart1_grp",
+ .pins = uart1_pins,
+ .npins = ARRAY_SIZE(uart1_pins),
+ .modemuxs = uart1_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart1_modemux),
+};
+
+static const char *const uart1_grps[] = { "uart1_grp" };
+static struct spear_function uart1_function = {
+ .name = "uart1",
+ .groups = uart1_grps,
+ .ngroups = ARRAY_SIZE(uart1_grps),
+};
+
+/* pad multiplexing for i2s_in device */
+static const unsigned i2s_in_pins[] = { 90, 91, 92, 93, 94, 99 };
+static struct spear_muxreg i2s_in_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = I2S_IN_REG2_MASK,
+ .val = I2S_IN_REG2_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_4,
+ .mask = I2S_IN_REG3_MASK,
+ .val = I2S_IN_REG3_MASK,
+ },
+};
+
+static struct spear_modemux i2s_in_modemux[] = {
+ {
+ .muxregs = i2s_in_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2s_in_muxreg),
+ },
+};
+
+static struct spear_pingroup i2s_in_pingroup = {
+ .name = "i2s_in_grp",
+ .pins = i2s_in_pins,
+ .npins = ARRAY_SIZE(i2s_in_pins),
+ .modemuxs = i2s_in_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2s_in_modemux),
+};
+
+/* pad multiplexing for i2s_out device */
+static const unsigned i2s_out_pins[] = { 95, 96, 97, 98, 100, 101, 102, 103 };
+static struct spear_muxreg i2s_out_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_4,
+ .mask = I2S_OUT_REG3_MASK,
+ .val = I2S_OUT_REG3_MASK,
+ },
+};
+
+static struct spear_modemux i2s_out_modemux[] = {
+ {
+ .muxregs = i2s_out_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2s_out_muxreg),
+ },
+};
+
+static struct spear_pingroup i2s_out_pingroup = {
+ .name = "i2s_out_grp",
+ .pins = i2s_out_pins,
+ .npins = ARRAY_SIZE(i2s_out_pins),
+ .modemuxs = i2s_out_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2s_out_modemux),
+};
+
+static const char *const i2s_grps[] = { "i2s_in_grp", "i2s_out_grp" };
+static struct spear_function i2s_function = {
+ .name = "i2s",
+ .groups = i2s_grps,
+ .ngroups = ARRAY_SIZE(i2s_grps),
+};
+
+/* pad multiplexing for gmac device */
+static const unsigned gmac_pins[] = { 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
+ 126, 127, 128, 129, 130, 131 };
+#define GMAC_MUXREG \
+ { \
+ .reg = PAD_FUNCTION_EN_4, \
+ .mask = GMAC_REG3_MASK, \
+ .val = GMAC_REG3_MASK, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_5, \
+ .mask = GMAC_REG4_MASK, \
+ .val = GMAC_REG4_MASK, \
+ }
+
+/* pad multiplexing for gmii device */
+static struct spear_muxreg gmii_muxreg[] = {
+ GMAC_MUXREG,
+ {
+ .reg = GMAC_CLK_CFG,
+ .mask = GMAC_PHY_IF_SEL_MASK,
+ .val = GMAC_PHY_IF_GMII_VAL,
+ },
+};
+
+static struct spear_modemux gmii_modemux[] = {
+ {
+ .muxregs = gmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(gmii_muxreg),
+ },
+};
+
+static struct spear_pingroup gmii_pingroup = {
+ .name = "gmii_grp",
+ .pins = gmac_pins,
+ .npins = ARRAY_SIZE(gmac_pins),
+ .modemuxs = gmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(gmii_modemux),
+};
+
+/* pad multiplexing for rgmii device */
+static struct spear_muxreg rgmii_muxreg[] = {
+ GMAC_MUXREG,
+ {
+ .reg = GMAC_CLK_CFG,
+ .mask = GMAC_PHY_IF_SEL_MASK,
+ .val = GMAC_PHY_IF_RGMII_VAL,
+ },
+};
+
+static struct spear_modemux rgmii_modemux[] = {
+ {
+ .muxregs = rgmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(rgmii_muxreg),
+ },
+};
+
+static struct spear_pingroup rgmii_pingroup = {
+ .name = "rgmii_grp",
+ .pins = gmac_pins,
+ .npins = ARRAY_SIZE(gmac_pins),
+ .modemuxs = rgmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(rgmii_modemux),
+};
+
+/* pad multiplexing for rmii device */
+static struct spear_muxreg rmii_muxreg[] = {
+ GMAC_MUXREG,
+ {
+ .reg = GMAC_CLK_CFG,
+ .mask = GMAC_PHY_IF_SEL_MASK,
+ .val = GMAC_PHY_IF_RMII_VAL,
+ },
+};
+
+static struct spear_modemux rmii_modemux[] = {
+ {
+ .muxregs = rmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(rmii_muxreg),
+ },
+};
+
+static struct spear_pingroup rmii_pingroup = {
+ .name = "rmii_grp",
+ .pins = gmac_pins,
+ .npins = ARRAY_SIZE(gmac_pins),
+ .modemuxs = rmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(rmii_modemux),
+};
+
+/* pad multiplexing for sgmii device */
+static struct spear_muxreg sgmii_muxreg[] = {
+ GMAC_MUXREG,
+ {
+ .reg = GMAC_CLK_CFG,
+ .mask = GMAC_PHY_IF_SEL_MASK,
+ .val = GMAC_PHY_IF_SGMII_VAL,
+ },
+};
+
+static struct spear_modemux sgmii_modemux[] = {
+ {
+ .muxregs = sgmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(sgmii_muxreg),
+ },
+};
+
+static struct spear_pingroup sgmii_pingroup = {
+ .name = "sgmii_grp",
+ .pins = gmac_pins,
+ .npins = ARRAY_SIZE(gmac_pins),
+ .modemuxs = sgmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(sgmii_modemux),
+};
+
+static const char *const gmac_grps[] = { "gmii_grp", "rgmii_grp", "rmii_grp",
+ "sgmii_grp" };
+static struct spear_function gmac_function = {
+ .name = "gmac",
+ .groups = gmac_grps,
+ .ngroups = ARRAY_SIZE(gmac_grps),
+};
+
+/* pad multiplexing for i2c0 device */
+static const unsigned i2c0_pins[] = { 133, 134 };
+static struct spear_muxreg i2c0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = I2C0_REG4_MASK,
+ .val = I2C0_REG4_MASK,
+ },
+};
+
+static struct spear_modemux i2c0_modemux[] = {
+ {
+ .muxregs = i2c0_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c0_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c0_pingroup = {
+ .name = "i2c0_grp",
+ .pins = i2c0_pins,
+ .npins = ARRAY_SIZE(i2c0_pins),
+ .modemuxs = i2c0_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c0_modemux),
+};
+
+static const char *const i2c0_grps[] = { "i2c0_grp" };
+static struct spear_function i2c0_function = {
+ .name = "i2c0",
+ .groups = i2c0_grps,
+ .ngroups = ARRAY_SIZE(i2c0_grps),
+};
+
+/* pad multiplexing for i2c1 device */
+static const unsigned i2c1_pins[] = { 18, 23 };
+static struct spear_muxreg i2c1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = I2C1_REG0_MASK,
+ .val = I2C1_REG0_MASK,
+ },
+};
+
+static struct spear_modemux i2c1_modemux[] = {
+ {
+ .muxregs = i2c1_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c1_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c1_pingroup = {
+ .name = "i2c1_grp",
+ .pins = i2c1_pins,
+ .npins = ARRAY_SIZE(i2c1_pins),
+ .modemuxs = i2c1_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c1_modemux),
+};
+
+static const char *const i2c1_grps[] = { "i2c1_grp" };
+static struct spear_function i2c1_function = {
+ .name = "i2c1",
+ .groups = i2c1_grps,
+ .ngroups = ARRAY_SIZE(i2c1_grps),
+};
+
+/* pad multiplexing for cec0 device */
+static const unsigned cec0_pins[] = { 135 };
+static struct spear_muxreg cec0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = CEC0_REG4_MASK,
+ .val = CEC0_REG4_MASK,
+ },
+};
+
+static struct spear_modemux cec0_modemux[] = {
+ {
+ .muxregs = cec0_muxreg,
+ .nmuxregs = ARRAY_SIZE(cec0_muxreg),
+ },
+};
+
+static struct spear_pingroup cec0_pingroup = {
+ .name = "cec0_grp",
+ .pins = cec0_pins,
+ .npins = ARRAY_SIZE(cec0_pins),
+ .modemuxs = cec0_modemux,
+ .nmodemuxs = ARRAY_SIZE(cec0_modemux),
+};
+
+static const char *const cec0_grps[] = { "cec0_grp" };
+static struct spear_function cec0_function = {
+ .name = "cec0",
+ .groups = cec0_grps,
+ .ngroups = ARRAY_SIZE(cec0_grps),
+};
+
+/* pad multiplexing for cec1 device */
+static const unsigned cec1_pins[] = { 136 };
+static struct spear_muxreg cec1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = CEC1_REG4_MASK,
+ .val = CEC1_REG4_MASK,
+ },
+};
+
+static struct spear_modemux cec1_modemux[] = {
+ {
+ .muxregs = cec1_muxreg,
+ .nmuxregs = ARRAY_SIZE(cec1_muxreg),
+ },
+};
+
+static struct spear_pingroup cec1_pingroup = {
+ .name = "cec1_grp",
+ .pins = cec1_pins,
+ .npins = ARRAY_SIZE(cec1_pins),
+ .modemuxs = cec1_modemux,
+ .nmodemuxs = ARRAY_SIZE(cec1_modemux),
+};
+
+static const char *const cec1_grps[] = { "cec1_grp" };
+static struct spear_function cec1_function = {
+ .name = "cec1",
+ .groups = cec1_grps,
+ .ngroups = ARRAY_SIZE(cec1_grps),
+};
+
+/* pad multiplexing for mcif devices */
+static const unsigned mcif_pins[] = { 193, 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 237 };
+#define MCIF_MUXREG \
+ { \
+ .reg = PAD_SHARED_IP_EN_1, \
+ .mask = MCIF_MASK, \
+ .val = MCIF_MASK, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_7, \
+ .mask = FSMC_PNOR_AND_MCIF_REG6_MASK | MCIF_REG6_MASK, \
+ .val = FSMC_PNOR_AND_MCIF_REG6_MASK | MCIF_REG6_MASK, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_8, \
+ .mask = MCIF_REG7_MASK, \
+ .val = MCIF_REG7_MASK, \
+ }
+
+/* Pad multiplexing for sdhci device */
+static struct spear_muxreg sdhci_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_SD,
+ },
+};
+
+static struct spear_modemux sdhci_modemux[] = {
+ {
+ .muxregs = sdhci_muxreg,
+ .nmuxregs = ARRAY_SIZE(sdhci_muxreg),
+ },
+};
+
+static struct spear_pingroup sdhci_pingroup = {
+ .name = "sdhci_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = sdhci_modemux,
+ .nmodemuxs = ARRAY_SIZE(sdhci_modemux),
+};
+
+static const char *const sdhci_grps[] = { "sdhci_grp" };
+static struct spear_function sdhci_function = {
+ .name = "sdhci",
+ .groups = sdhci_grps,
+ .ngroups = ARRAY_SIZE(sdhci_grps),
+};
+
+/* Pad multiplexing for cf device */
+static struct spear_muxreg cf_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_CF,
+ },
+};
+
+static struct spear_modemux cf_modemux[] = {
+ {
+ .muxregs = cf_muxreg,
+ .nmuxregs = ARRAY_SIZE(cf_muxreg),
+ },
+};
+
+static struct spear_pingroup cf_pingroup = {
+ .name = "cf_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = cf_modemux,
+ .nmodemuxs = ARRAY_SIZE(cf_modemux),
+};
+
+static const char *const cf_grps[] = { "cf_grp" };
+static struct spear_function cf_function = {
+ .name = "cf",
+ .groups = cf_grps,
+ .ngroups = ARRAY_SIZE(cf_grps),
+};
+
+/* Pad multiplexing for xd device */
+static struct spear_muxreg xd_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_XD,
+ },
+};
+
+static struct spear_modemux xd_modemux[] = {
+ {
+ .muxregs = xd_muxreg,
+ .nmuxregs = ARRAY_SIZE(xd_muxreg),
+ },
+};
+
+static struct spear_pingroup xd_pingroup = {
+ .name = "xd_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = xd_modemux,
+ .nmodemuxs = ARRAY_SIZE(xd_modemux),
+};
+
+static const char *const xd_grps[] = { "xd_grp" };
+static struct spear_function xd_function = {
+ .name = "xd",
+ .groups = xd_grps,
+ .ngroups = ARRAY_SIZE(xd_grps),
+};
+
+/* pad multiplexing for clcd device */
+static const unsigned clcd_pins[] = { 138, 139, 140, 141, 142, 143, 144, 145,
+ 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173,
+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191 };
+static struct spear_muxreg clcd_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = ARM_TRACE_MASK | MIPHY_DBG_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK,
+ .val = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_6,
+ .mask = CLCD_AND_ARM_TRACE_REG5_MASK,
+ .val = CLCD_AND_ARM_TRACE_REG5_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_7,
+ .mask = CLCD_AND_ARM_TRACE_REG6_MASK,
+ .val = CLCD_AND_ARM_TRACE_REG6_MASK,
+ },
+};
+
+static struct spear_modemux clcd_modemux[] = {
+ {
+ .muxregs = clcd_muxreg,
+ .nmuxregs = ARRAY_SIZE(clcd_muxreg),
+ },
+};
+
+static struct spear_pingroup clcd_pingroup = {
+ .name = "clcd_grp",
+ .pins = clcd_pins,
+ .npins = ARRAY_SIZE(clcd_pins),
+ .modemuxs = clcd_modemux,
+ .nmodemuxs = ARRAY_SIZE(clcd_modemux),
+};
+
+static const char *const clcd_grps[] = { "clcd_grp" };
+static struct spear_function clcd_function = {
+ .name = "clcd",
+ .groups = clcd_grps,
+ .ngroups = ARRAY_SIZE(clcd_grps),
+};
+
+/* pad multiplexing for arm_trace device */
+static const unsigned arm_trace_pins[] = { 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200 };
+static struct spear_muxreg arm_trace_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = ARM_TRACE_MASK,
+ .val = ARM_TRACE_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = CLCD_AND_ARM_TRACE_REG4_MASK,
+ .val = CLCD_AND_ARM_TRACE_REG4_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_6,
+ .mask = CLCD_AND_ARM_TRACE_REG5_MASK,
+ .val = CLCD_AND_ARM_TRACE_REG5_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_7,
+ .mask = CLCD_AND_ARM_TRACE_REG6_MASK,
+ .val = CLCD_AND_ARM_TRACE_REG6_MASK,
+ },
+};
+
+static struct spear_modemux arm_trace_modemux[] = {
+ {
+ .muxregs = arm_trace_muxreg,
+ .nmuxregs = ARRAY_SIZE(arm_trace_muxreg),
+ },
+};
+
+static struct spear_pingroup arm_trace_pingroup = {
+ .name = "arm_trace_grp",
+ .pins = arm_trace_pins,
+ .npins = ARRAY_SIZE(arm_trace_pins),
+ .modemuxs = arm_trace_modemux,
+ .nmodemuxs = ARRAY_SIZE(arm_trace_modemux),
+};
+
+static const char *const arm_trace_grps[] = { "arm_trace_grp" };
+static struct spear_function arm_trace_function = {
+ .name = "arm_trace",
+ .groups = arm_trace_grps,
+ .ngroups = ARRAY_SIZE(arm_trace_grps),
+};
+
+/* pad multiplexing for miphy_dbg device */
+static const unsigned miphy_dbg_pins[] = { 96, 97, 98, 99, 100, 101, 102, 103,
+ 132, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157 };
+static struct spear_muxreg miphy_dbg_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = MIPHY_DBG_MASK,
+ .val = MIPHY_DBG_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = DEVS_GRP_AND_MIPHY_DBG_REG4_MASK,
+ .val = DEVS_GRP_AND_MIPHY_DBG_REG4_MASK,
+ },
+};
+
+static struct spear_modemux miphy_dbg_modemux[] = {
+ {
+ .muxregs = miphy_dbg_muxreg,
+ .nmuxregs = ARRAY_SIZE(miphy_dbg_muxreg),
+ },
+};
+
+static struct spear_pingroup miphy_dbg_pingroup = {
+ .name = "miphy_dbg_grp",
+ .pins = miphy_dbg_pins,
+ .npins = ARRAY_SIZE(miphy_dbg_pins),
+ .modemuxs = miphy_dbg_modemux,
+ .nmodemuxs = ARRAY_SIZE(miphy_dbg_modemux),
+};
+
+static const char *const miphy_dbg_grps[] = { "miphy_dbg_grp" };
+static struct spear_function miphy_dbg_function = {
+ .name = "miphy_dbg",
+ .groups = miphy_dbg_grps,
+ .ngroups = ARRAY_SIZE(miphy_dbg_grps),
+};
+
+/* pad multiplexing for pcie device */
+static const unsigned pcie_pins[] = { 250 };
+static struct spear_muxreg pcie_muxreg[] = {
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = SATA_PCIE_CFG_MASK,
+ .val = PCIE_CFG_VAL,
+ },
+};
+
+static struct spear_modemux pcie_modemux[] = {
+ {
+ .muxregs = pcie_muxreg,
+ .nmuxregs = ARRAY_SIZE(pcie_muxreg),
+ },
+};
+
+static struct spear_pingroup pcie_pingroup = {
+ .name = "pcie_grp",
+ .pins = pcie_pins,
+ .npins = ARRAY_SIZE(pcie_pins),
+ .modemuxs = pcie_modemux,
+ .nmodemuxs = ARRAY_SIZE(pcie_modemux),
+};
+
+static const char *const pcie_grps[] = { "pcie_grp" };
+static struct spear_function pcie_function = {
+ .name = "pcie",
+ .groups = pcie_grps,
+ .ngroups = ARRAY_SIZE(pcie_grps),
+};
+
+/* pad multiplexing for sata device */
+static const unsigned sata_pins[] = { 250 };
+static struct spear_muxreg sata_muxreg[] = {
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = SATA_PCIE_CFG_MASK,
+ .val = SATA_CFG_VAL,
+ },
+};
+
+static struct spear_modemux sata_modemux[] = {
+ {
+ .muxregs = sata_muxreg,
+ .nmuxregs = ARRAY_SIZE(sata_muxreg),
+ },
+};
+
+static struct spear_pingroup sata_pingroup = {
+ .name = "sata_grp",
+ .pins = sata_pins,
+ .npins = ARRAY_SIZE(sata_pins),
+ .modemuxs = sata_modemux,
+ .nmodemuxs = ARRAY_SIZE(sata_modemux),
+};
+
+static const char *const sata_grps[] = { "sata_grp" };
+static struct spear_function sata_function = {
+ .name = "sata",
+ .groups = sata_grps,
+ .ngroups = ARRAY_SIZE(sata_grps),
+};
+
+/* pingroups */
+static struct spear_pingroup *spear1340_pingroups[] = {
+ &pads_as_gpio_pingroup,
+ &fsmc_8bit_pingroup,
+ &fsmc_16bit_pingroup,
+ &fsmc_pnor_pingroup,
+ &keyboard_row_col_pingroup,
+ &keyboard_col5_pingroup,
+ &spdif_in_pingroup,
+ &spdif_out_pingroup,
+ &gpt_0_1_pingroup,
+ &pwm0_pingroup,
+ &pwm1_pingroup,
+ &pwm2_pingroup,
+ &pwm3_pingroup,
+ &vip_mux_pingroup,
+ &vip_mux_cam0_pingroup,
+ &vip_mux_cam1_pingroup,
+ &vip_mux_cam2_pingroup,
+ &vip_mux_cam3_pingroup,
+ &cam0_pingroup,
+ &cam1_pingroup,
+ &cam2_pingroup,
+ &cam3_pingroup,
+ &smi_pingroup,
+ &ssp0_pingroup,
+ &ssp0_cs1_pingroup,
+ &ssp0_cs2_pingroup,
+ &ssp0_cs3_pingroup,
+ &uart0_pingroup,
+ &uart0_enh_pingroup,
+ &uart1_pingroup,
+ &i2s_in_pingroup,
+ &i2s_out_pingroup,
+ &gmii_pingroup,
+ &rgmii_pingroup,
+ &rmii_pingroup,
+ &sgmii_pingroup,
+ &i2c0_pingroup,
+ &i2c1_pingroup,
+ &cec0_pingroup,
+ &cec1_pingroup,
+ &sdhci_pingroup,
+ &cf_pingroup,
+ &xd_pingroup,
+ &clcd_pingroup,
+ &arm_trace_pingroup,
+ &miphy_dbg_pingroup,
+ &pcie_pingroup,
+ &sata_pingroup,
+};
+
+/* functions */
+static struct spear_function *spear1340_functions[] = {
+ &pads_as_gpio_function,
+ &fsmc_function,
+ &keyboard_function,
+ &spdif_in_function,
+ &spdif_out_function,
+ &gpt_0_1_function,
+ &pwm_function,
+ &vip_function,
+ &cam0_function,
+ &cam1_function,
+ &cam2_function,
+ &cam3_function,
+ &smi_function,
+ &ssp0_function,
+ &uart0_function,
+ &uart1_function,
+ &i2s_function,
+ &gmac_function,
+ &i2c0_function,
+ &i2c1_function,
+ &cec0_function,
+ &cec1_function,
+ &sdhci_function,
+ &cf_function,
+ &xd_function,
+ &clcd_function,
+ &arm_trace_function,
+ &miphy_dbg_function,
+ &pcie_function,
+ &sata_function,
+};
+
+static struct spear_pinctrl_machdata spear1340_machdata = {
+ .pins = spear1340_pins,
+ .npins = ARRAY_SIZE(spear1340_pins),
+ .groups = spear1340_pingroups,
+ .ngroups = ARRAY_SIZE(spear1340_pingroups),
+ .functions = spear1340_functions,
+ .nfunctions = ARRAY_SIZE(spear1340_functions),
+ .modes_supported = false,
+};
+
+static struct of_device_id spear1340_pinctrl_of_match[] __devinitdata = {
+ {
+ .compatible = "st,spear1340-pinmux",
+ },
+ {},
+};
+
+static int __devinit spear1340_pinctrl_probe(struct platform_device *pdev)
+{
+ return spear_pinctrl_probe(pdev, &spear1340_machdata);
+}
+
+static int __devexit spear1340_pinctrl_remove(struct platform_device *pdev)
+{
+ return spear_pinctrl_remove(pdev);
+}
+
+static struct platform_driver spear1340_pinctrl_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = spear1340_pinctrl_of_match,
+ },
+ .probe = spear1340_pinctrl_probe,
+ .remove = __devexit_p(spear1340_pinctrl_remove),
+};
+
+static int __init spear1340_pinctrl_init(void)
+{
+ return platform_driver_register(&spear1340_pinctrl_driver);
+}
+arch_initcall(spear1340_pinctrl_init);
+
+static void __exit spear1340_pinctrl_exit(void)
+{
+ platform_driver_unregister(&spear1340_pinctrl_driver);
+}
+module_exit(spear1340_pinctrl_exit);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.c b/drivers/pinctrl/spear/pinctrl-spear3xx.c
index 832049a8b1c9..91c883bc46a6 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.c
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.c
@@ -15,108 +15,7 @@
/* pins */
static const struct pinctrl_pin_desc spear3xx_pins[] = {
- PINCTRL_PIN(0, "PLGPIO0"),
- PINCTRL_PIN(1, "PLGPIO1"),
- PINCTRL_PIN(2, "PLGPIO2"),
- PINCTRL_PIN(3, "PLGPIO3"),
- PINCTRL_PIN(4, "PLGPIO4"),
- PINCTRL_PIN(5, "PLGPIO5"),
- PINCTRL_PIN(6, "PLGPIO6"),
- PINCTRL_PIN(7, "PLGPIO7"),
- PINCTRL_PIN(8, "PLGPIO8"),
- PINCTRL_PIN(9, "PLGPIO9"),
- PINCTRL_PIN(10, "PLGPIO10"),
- PINCTRL_PIN(11, "PLGPIO11"),
- PINCTRL_PIN(12, "PLGPIO12"),
- PINCTRL_PIN(13, "PLGPIO13"),
- PINCTRL_PIN(14, "PLGPIO14"),
- PINCTRL_PIN(15, "PLGPIO15"),
- PINCTRL_PIN(16, "PLGPIO16"),
- PINCTRL_PIN(17, "PLGPIO17"),
- PINCTRL_PIN(18, "PLGPIO18"),
- PINCTRL_PIN(19, "PLGPIO19"),
- PINCTRL_PIN(20, "PLGPIO20"),
- PINCTRL_PIN(21, "PLGPIO21"),
- PINCTRL_PIN(22, "PLGPIO22"),
- PINCTRL_PIN(23, "PLGPIO23"),
- PINCTRL_PIN(24, "PLGPIO24"),
- PINCTRL_PIN(25, "PLGPIO25"),
- PINCTRL_PIN(26, "PLGPIO26"),
- PINCTRL_PIN(27, "PLGPIO27"),
- PINCTRL_PIN(28, "PLGPIO28"),
- PINCTRL_PIN(29, "PLGPIO29"),
- PINCTRL_PIN(30, "PLGPIO30"),
- PINCTRL_PIN(31, "PLGPIO31"),
- PINCTRL_PIN(32, "PLGPIO32"),
- PINCTRL_PIN(33, "PLGPIO33"),
- PINCTRL_PIN(34, "PLGPIO34"),
- PINCTRL_PIN(35, "PLGPIO35"),
- PINCTRL_PIN(36, "PLGPIO36"),
- PINCTRL_PIN(37, "PLGPIO37"),
- PINCTRL_PIN(38, "PLGPIO38"),
- PINCTRL_PIN(39, "PLGPIO39"),
- PINCTRL_PIN(40, "PLGPIO40"),
- PINCTRL_PIN(41, "PLGPIO41"),
- PINCTRL_PIN(42, "PLGPIO42"),
- PINCTRL_PIN(43, "PLGPIO43"),
- PINCTRL_PIN(44, "PLGPIO44"),
- PINCTRL_PIN(45, "PLGPIO45"),
- PINCTRL_PIN(46, "PLGPIO46"),
- PINCTRL_PIN(47, "PLGPIO47"),
- PINCTRL_PIN(48, "PLGPIO48"),
- PINCTRL_PIN(49, "PLGPIO49"),
- PINCTRL_PIN(50, "PLGPIO50"),
- PINCTRL_PIN(51, "PLGPIO51"),
- PINCTRL_PIN(52, "PLGPIO52"),
- PINCTRL_PIN(53, "PLGPIO53"),
- PINCTRL_PIN(54, "PLGPIO54"),
- PINCTRL_PIN(55, "PLGPIO55"),
- PINCTRL_PIN(56, "PLGPIO56"),
- PINCTRL_PIN(57, "PLGPIO57"),
- PINCTRL_PIN(58, "PLGPIO58"),
- PINCTRL_PIN(59, "PLGPIO59"),
- PINCTRL_PIN(60, "PLGPIO60"),
- PINCTRL_PIN(61, "PLGPIO61"),
- PINCTRL_PIN(62, "PLGPIO62"),
- PINCTRL_PIN(63, "PLGPIO63"),
- PINCTRL_PIN(64, "PLGPIO64"),
- PINCTRL_PIN(65, "PLGPIO65"),
- PINCTRL_PIN(66, "PLGPIO66"),
- PINCTRL_PIN(67, "PLGPIO67"),
- PINCTRL_PIN(68, "PLGPIO68"),
- PINCTRL_PIN(69, "PLGPIO69"),
- PINCTRL_PIN(70, "PLGPIO70"),
- PINCTRL_PIN(71, "PLGPIO71"),
- PINCTRL_PIN(72, "PLGPIO72"),
- PINCTRL_PIN(73, "PLGPIO73"),
- PINCTRL_PIN(74, "PLGPIO74"),
- PINCTRL_PIN(75, "PLGPIO75"),
- PINCTRL_PIN(76, "PLGPIO76"),
- PINCTRL_PIN(77, "PLGPIO77"),
- PINCTRL_PIN(78, "PLGPIO78"),
- PINCTRL_PIN(79, "PLGPIO79"),
- PINCTRL_PIN(80, "PLGPIO80"),
- PINCTRL_PIN(81, "PLGPIO81"),
- PINCTRL_PIN(82, "PLGPIO82"),
- PINCTRL_PIN(83, "PLGPIO83"),
- PINCTRL_PIN(84, "PLGPIO84"),
- PINCTRL_PIN(85, "PLGPIO85"),
- PINCTRL_PIN(86, "PLGPIO86"),
- PINCTRL_PIN(87, "PLGPIO87"),
- PINCTRL_PIN(88, "PLGPIO88"),
- PINCTRL_PIN(89, "PLGPIO89"),
- PINCTRL_PIN(90, "PLGPIO90"),
- PINCTRL_PIN(91, "PLGPIO91"),
- PINCTRL_PIN(92, "PLGPIO92"),
- PINCTRL_PIN(93, "PLGPIO93"),
- PINCTRL_PIN(94, "PLGPIO94"),
- PINCTRL_PIN(95, "PLGPIO95"),
- PINCTRL_PIN(96, "PLGPIO96"),
- PINCTRL_PIN(97, "PLGPIO97"),
- PINCTRL_PIN(98, "PLGPIO98"),
- PINCTRL_PIN(99, "PLGPIO99"),
- PINCTRL_PIN(100, "PLGPIO100"),
- PINCTRL_PIN(101, "PLGPIO101"),
+ SPEAR_PIN_0_TO_101,
};
/* firda_pins */
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index ee79ce64d9df..57787d87d9a4 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1104,6 +1104,7 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
mutex_init(&dev->mutex);
+ memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
dev->backlight_dev = backlight_device_register("toshiba",
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 99dc29f2f2f2..e3a3b4956f08 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -1,5 +1,5 @@
menuconfig POWER_SUPPLY
- tristate "Power supply class support"
+ bool "Power supply class support"
help
Say Y here to enable power supply class support. This allows
power supply (batteries, AC, USB) monitoring by userspace
@@ -77,7 +77,7 @@ config BATTERY_DS2780
Say Y here to enable support for batteries with ds2780 chip.
config BATTERY_DS2781
- tristate "2781 battery driver"
+ tristate "DS2781 battery driver"
depends on HAS_IOMEM
select W1
select W1_SLAVE_DS2781
@@ -181,14 +181,15 @@ config BATTERY_MAX17040
to operate with a single lithium cell
config BATTERY_MAX17042
- tristate "Maxim MAX17042/8997/8966 Fuel Gauge"
+ tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
depends on I2C
help
MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
in handheld and portable equipment. The MAX17042 is configured
to operate with a single lithium cell. MAX8997 and MAX8966 are
multi-function devices that include fuel gauages that are compatible
- with MAX17042.
+ with MAX17042. This driver also supports max17047/50 chips which are
+ improved version of max17042.
config BATTERY_Z2
tristate "Z2 battery driver"
@@ -291,6 +292,7 @@ config CHARGER_MAX8998
config CHARGER_SMB347
tristate "Summit Microelectronics SMB347 Battery Charger"
depends on I2C
+ select REGMAP_I2C
help
Say Y to include support for Summit Microelectronics SMB347
Battery Charger.
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
index d8bb99394ac0..bba3ccac72fe 100644
--- a/drivers/power/ab8500_btemp.c
+++ b/drivers/power/ab8500_btemp.c
@@ -964,10 +964,15 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
{
int irq, i, ret = 0;
u8 val;
- struct abx500_bm_plat_data *plat_data;
+ struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+ struct ab8500_btemp *di;
+
+ if (!plat_data) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
- struct ab8500_btemp *di =
- kzalloc(sizeof(struct ab8500_btemp), GFP_KERNEL);
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
if (!di)
return -ENOMEM;
@@ -977,7 +982,6 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
/* get btemp specific platform data */
- plat_data = pdev->dev.platform_data;
di->pdata = plat_data->btemp;
if (!di->pdata) {
dev_err(di->dev, "no btemp platform data supplied\n");
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index e2b4accbec88..d2303d0b7c75 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -2534,10 +2534,15 @@ static int __devexit ab8500_charger_remove(struct platform_device *pdev)
static int __devinit ab8500_charger_probe(struct platform_device *pdev)
{
int irq, i, charger_status, ret = 0;
- struct abx500_bm_plat_data *plat_data;
+ struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+ struct ab8500_charger *di;
- struct ab8500_charger *di =
- kzalloc(sizeof(struct ab8500_charger), GFP_KERNEL);
+ if (!plat_data) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
if (!di)
return -ENOMEM;
@@ -2550,9 +2555,7 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev)
spin_lock_init(&di->usb_state.usb_lock);
/* get charger specific platform data */
- plat_data = pdev->dev.platform_data;
di->pdata = plat_data->charger;
-
if (!di->pdata) {
dev_err(di->dev, "no charger platform data supplied\n");
ret = -EINVAL;
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index c22f2f05657e..bf022255994c 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -2446,10 +2446,15 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
{
int i, irq;
int ret = 0;
- struct abx500_bm_plat_data *plat_data;
+ struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+ struct ab8500_fg *di;
+
+ if (!plat_data) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
- struct ab8500_fg *di =
- kzalloc(sizeof(struct ab8500_fg), GFP_KERNEL);
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
if (!di)
return -ENOMEM;
@@ -2461,7 +2466,6 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
/* get fg specific platform data */
- plat_data = pdev->dev.platform_data;
di->pdata = plat_data->fg;
if (!di->pdata) {
dev_err(di->dev, "no fg platform data supplied\n");
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 9eca9f1ff0ea..86935ec18954 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -23,6 +23,16 @@
#include <linux/power/charger-manager.h>
#include <linux/regulator/consumer.h>
+static const char * const default_event_names[] = {
+ [CM_EVENT_UNKNOWN] = "Unknown",
+ [CM_EVENT_BATT_FULL] = "Battery Full",
+ [CM_EVENT_BATT_IN] = "Battery Inserted",
+ [CM_EVENT_BATT_OUT] = "Battery Pulled Out",
+ [CM_EVENT_EXT_PWR_IN_OUT] = "External Power Attach/Detach",
+ [CM_EVENT_CHG_START_STOP] = "Charging Start/Stop",
+ [CM_EVENT_OTHERS] = "Other battery events"
+};
+
/*
* Regard CM_JIFFIES_SMALL jiffies is small enough to ignore for
* delayed works so that we can run delayed works with CM_JIFFIES_SMALL
@@ -57,6 +67,12 @@ static bool cm_suspended;
static bool cm_rtc_set;
static unsigned long cm_suspend_duration_ms;
+/* About normal (not suspended) monitoring */
+static unsigned long polling_jiffy = ULONG_MAX; /* ULONG_MAX: no polling */
+static unsigned long next_polling; /* Next appointed polling time */
+static struct workqueue_struct *cm_wq; /* init at driver add */
+static struct delayed_work cm_monitor_work; /* init at driver add */
+
/* Global charger-manager description */
static struct charger_global_desc *g_desc; /* init with setup_charger_manager */
@@ -71,6 +87,11 @@ static bool is_batt_present(struct charger_manager *cm)
int i, ret;
switch (cm->desc->battery_present) {
+ case CM_BATTERY_PRESENT:
+ present = true;
+ break;
+ case CM_NO_BATTERY:
+ break;
case CM_FUEL_GAUGE:
ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
POWER_SUPPLY_PROP_PRESENT, &val);
@@ -279,6 +300,26 @@ static int try_charger_enable(struct charger_manager *cm, bool enable)
}
/**
+ * try_charger_restart - Restart charging.
+ * @cm: the Charger Manager representing the battery.
+ *
+ * Restart charging by turning off and on the charger.
+ */
+static int try_charger_restart(struct charger_manager *cm)
+{
+ int err;
+
+ if (cm->emergency_stop)
+ return -EAGAIN;
+
+ err = try_charger_enable(cm, false);
+ if (err)
+ return err;
+
+ return try_charger_enable(cm, true);
+}
+
+/**
* uevent_notify - Let users know something has changed.
* @cm: the Charger Manager representing the battery.
* @event: the event string.
@@ -334,6 +375,46 @@ static void uevent_notify(struct charger_manager *cm, const char *event)
}
/**
+ * fullbatt_vchk - Check voltage drop some times after "FULL" event.
+ * @work: the work_struct appointing the function
+ *
+ * If a user has designated "fullbatt_vchkdrop_ms/uV" values with
+ * charger_desc, Charger Manager checks voltage drop after the battery
+ * "FULL" event. It checks whether the voltage has dropped more than
+ * fullbatt_vchkdrop_uV by calling this function after fullbatt_vchkrop_ms.
+ */
+static void fullbatt_vchk(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct charger_manager *cm = container_of(dwork,
+ struct charger_manager, fullbatt_vchk_work);
+ struct charger_desc *desc = cm->desc;
+ int batt_uV, err, diff;
+
+ /* remove the appointment for fullbatt_vchk */
+ cm->fullbatt_vchk_jiffies_at = 0;
+
+ if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
+ return;
+
+ err = get_batt_uV(cm, &batt_uV);
+ if (err) {
+ dev_err(cm->dev, "%s: get_batt_uV error(%d).\n", __func__, err);
+ return;
+ }
+
+ diff = cm->fullbatt_vchk_uV;
+ diff -= batt_uV;
+
+ dev_dbg(cm->dev, "VBATT dropped %duV after full-batt.\n", diff);
+
+ if (diff > desc->fullbatt_vchkdrop_uV) {
+ try_charger_restart(cm);
+ uevent_notify(cm, "Recharge");
+ }
+}
+
+/**
* _cm_monitor - Monitor the temperature and return true for exceptions.
* @cm: the Charger Manager representing the battery.
*
@@ -392,6 +473,131 @@ static bool cm_monitor(void)
return stop;
}
+/**
+ * _setup_polling - Setup the next instance of polling.
+ * @work: work_struct of the function _setup_polling.
+ */
+static void _setup_polling(struct work_struct *work)
+{
+ unsigned long min = ULONG_MAX;
+ struct charger_manager *cm;
+ bool keep_polling = false;
+ unsigned long _next_polling;
+
+ mutex_lock(&cm_list_mtx);
+
+ list_for_each_entry(cm, &cm_list, entry) {
+ if (is_polling_required(cm) && cm->desc->polling_interval_ms) {
+ keep_polling = true;
+
+ if (min > cm->desc->polling_interval_ms)
+ min = cm->desc->polling_interval_ms;
+ }
+ }
+
+ polling_jiffy = msecs_to_jiffies(min);
+ if (polling_jiffy <= CM_JIFFIES_SMALL)
+ polling_jiffy = CM_JIFFIES_SMALL + 1;
+
+ if (!keep_polling)
+ polling_jiffy = ULONG_MAX;
+ if (polling_jiffy == ULONG_MAX)
+ goto out;
+
+ WARN(cm_wq == NULL, "charger-manager: workqueue not initialized"
+ ". try it later. %s\n", __func__);
+
+ _next_polling = jiffies + polling_jiffy;
+
+ if (!delayed_work_pending(&cm_monitor_work) ||
+ (delayed_work_pending(&cm_monitor_work) &&
+ time_after(next_polling, _next_polling))) {
+ cancel_delayed_work_sync(&cm_monitor_work);
+ next_polling = jiffies + polling_jiffy;
+ queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
+ }
+
+out:
+ mutex_unlock(&cm_list_mtx);
+}
+static DECLARE_WORK(setup_polling, _setup_polling);
+
+/**
+ * cm_monitor_poller - The Monitor / Poller.
+ * @work: work_struct of the function cm_monitor_poller
+ *
+ * During non-suspended state, cm_monitor_poller is used to poll and monitor
+ * the batteries.
+ */
+static void cm_monitor_poller(struct work_struct *work)
+{
+ cm_monitor();
+ schedule_work(&setup_polling);
+}
+
+/**
+ * fullbatt_handler - Event handler for CM_EVENT_BATT_FULL
+ * @cm: the Charger Manager representing the battery.
+ */
+static void fullbatt_handler(struct charger_manager *cm)
+{
+ struct charger_desc *desc = cm->desc;
+
+ if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
+ goto out;
+
+ if (cm_suspended)
+ device_set_wakeup_capable(cm->dev, true);
+
+ if (delayed_work_pending(&cm->fullbatt_vchk_work))
+ cancel_delayed_work(&cm->fullbatt_vchk_work);
+ queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
+ msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
+ cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies(
+ desc->fullbatt_vchkdrop_ms);
+
+ if (cm->fullbatt_vchk_jiffies_at == 0)
+ cm->fullbatt_vchk_jiffies_at = 1;
+
+out:
+ dev_info(cm->dev, "EVENT_HANDLE: Battery Fully Charged.\n");
+ uevent_notify(cm, default_event_names[CM_EVENT_BATT_FULL]);
+}
+
+/**
+ * battout_handler - Event handler for CM_EVENT_BATT_OUT
+ * @cm: the Charger Manager representing the battery.
+ */
+static void battout_handler(struct charger_manager *cm)
+{
+ if (cm_suspended)
+ device_set_wakeup_capable(cm->dev, true);
+
+ if (!is_batt_present(cm)) {
+ dev_emerg(cm->dev, "Battery Pulled Out!\n");
+ uevent_notify(cm, default_event_names[CM_EVENT_BATT_OUT]);
+ } else {
+ uevent_notify(cm, "Battery Reinserted?");
+ }
+}
+
+/**
+ * misc_event_handler - Handler for other evnets
+ * @cm: the Charger Manager representing the battery.
+ * @type: the Charger Manager representing the battery.
+ */
+static void misc_event_handler(struct charger_manager *cm,
+ enum cm_event_types type)
+{
+ if (cm_suspended)
+ device_set_wakeup_capable(cm->dev, true);
+
+ if (!delayed_work_pending(&cm_monitor_work) &&
+ is_polling_required(cm) && cm->desc->polling_interval_ms)
+ schedule_work(&setup_polling);
+ uevent_notify(cm, default_event_names[type]);
+}
+
static int charger_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -613,6 +819,21 @@ static bool cm_setup_timer(void)
mutex_lock(&cm_list_mtx);
list_for_each_entry(cm, &cm_list, entry) {
+ unsigned int fbchk_ms = 0;
+
+ /* fullbatt_vchk is required. setup timer for that */
+ if (cm->fullbatt_vchk_jiffies_at) {
+ fbchk_ms = jiffies_to_msecs(cm->fullbatt_vchk_jiffies_at
+ - jiffies);
+ if (time_is_before_eq_jiffies(
+ cm->fullbatt_vchk_jiffies_at) ||
+ msecs_to_jiffies(fbchk_ms) < CM_JIFFIES_SMALL) {
+ fullbatt_vchk(&cm->fullbatt_vchk_work.work);
+ fbchk_ms = 0;
+ }
+ }
+ CM_MIN_VALID(wakeup_ms, fbchk_ms);
+
/* Skip if polling is not required for this CM */
if (!is_polling_required(cm) && !cm->emergency_stop)
continue;
@@ -672,6 +893,23 @@ static bool cm_setup_timer(void)
return false;
}
+static void _cm_fbchk_in_suspend(struct charger_manager *cm)
+{
+ unsigned long jiffy_now = jiffies;
+
+ if (!cm->fullbatt_vchk_jiffies_at)
+ return;
+
+ if (g_desc && g_desc->assume_timer_stops_in_suspend)
+ jiffy_now += msecs_to_jiffies(cm_suspend_duration_ms);
+
+ /* Execute now if it's going to be executed not too long after */
+ jiffy_now += CM_JIFFIES_SMALL;
+
+ if (time_after_eq(jiffy_now, cm->fullbatt_vchk_jiffies_at))
+ fullbatt_vchk(&cm->fullbatt_vchk_work.work);
+}
+
/**
* cm_suspend_again - Determine whether suspend again or not
*
@@ -693,6 +931,8 @@ bool cm_suspend_again(void)
ret = true;
mutex_lock(&cm_list_mtx);
list_for_each_entry(cm, &cm_list, entry) {
+ _cm_fbchk_in_suspend(cm);
+
if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) ||
cm->status_save_batt != is_batt_present(cm)) {
ret = false;
@@ -796,6 +1036,21 @@ static int charger_manager_probe(struct platform_device *pdev)
memcpy(cm->desc, desc, sizeof(struct charger_desc));
cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */
+ /*
+ * The following two do not need to be errors.
+ * Users may intentionally ignore those two features.
+ */
+ if (desc->fullbatt_uV == 0) {
+ dev_info(&pdev->dev, "Ignoring full-battery voltage threshold"
+ " as it is not supplied.");
+ }
+ if (!desc->fullbatt_vchkdrop_ms || !desc->fullbatt_vchkdrop_uV) {
+ dev_info(&pdev->dev, "Disabling full-battery voltage drop "
+ "checking mechanism as it is not supplied.");
+ desc->fullbatt_vchkdrop_ms = 0;
+ desc->fullbatt_vchkdrop_uV = 0;
+ }
+
if (!desc->charger_regulators || desc->num_charger_regulators < 1) {
ret = -EINVAL;
dev_err(&pdev->dev, "charger_regulators undefined.\n");
@@ -903,6 +1158,8 @@ static int charger_manager_probe(struct platform_device *pdev)
cm->charger_psy.num_properties++;
}
+ INIT_DELAYED_WORK(&cm->fullbatt_vchk_work, fullbatt_vchk);
+
ret = power_supply_register(NULL, &cm->charger_psy);
if (ret) {
dev_err(&pdev->dev, "Cannot register charger-manager with"
@@ -928,6 +1185,15 @@ static int charger_manager_probe(struct platform_device *pdev)
list_add(&cm->entry, &cm_list);
mutex_unlock(&cm_list_mtx);
+ /*
+ * Charger-manager is capable of waking up the systme from sleep
+ * when event is happend through cm_notify_event()
+ */
+ device_init_wakeup(&pdev->dev, true);
+ device_set_wakeup_capable(&pdev->dev, false);
+
+ schedule_work(&setup_polling);
+
return 0;
err_chg_enable:
@@ -958,9 +1224,17 @@ static int __devexit charger_manager_remove(struct platform_device *pdev)
list_del(&cm->entry);
mutex_unlock(&cm_list_mtx);
+ if (work_pending(&setup_polling))
+ cancel_work_sync(&setup_polling);
+ if (delayed_work_pending(&cm_monitor_work))
+ cancel_delayed_work_sync(&cm_monitor_work);
+
regulator_bulk_free(desc->num_charger_regulators,
desc->charger_regulators);
power_supply_unregister(&cm->charger_psy);
+
+ try_charger_enable(cm, false);
+
kfree(cm->charger_psy.properties);
kfree(cm->charger_stat);
kfree(cm->desc);
@@ -975,6 +1249,18 @@ static const struct platform_device_id charger_manager_id[] = {
};
MODULE_DEVICE_TABLE(platform, charger_manager_id);
+static int cm_suspend_noirq(struct device *dev)
+{
+ int ret = 0;
+
+ if (device_may_wakeup(dev)) {
+ device_set_wakeup_capable(dev, false);
+ ret = -EAGAIN;
+ }
+
+ return ret;
+}
+
static int cm_suspend_prepare(struct device *dev)
{
struct charger_manager *cm = dev_get_drvdata(dev);
@@ -1000,6 +1286,8 @@ static int cm_suspend_prepare(struct device *dev)
cm_suspended = true;
}
+ if (delayed_work_pending(&cm->fullbatt_vchk_work))
+ cancel_delayed_work(&cm->fullbatt_vchk_work);
cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm);
cm->status_save_batt = is_batt_present(cm);
@@ -1027,11 +1315,40 @@ static void cm_suspend_complete(struct device *dev)
cm_rtc_set = false;
}
+ /* Re-enqueue delayed work (fullbatt_vchk_work) */
+ if (cm->fullbatt_vchk_jiffies_at) {
+ unsigned long delay = 0;
+ unsigned long now = jiffies + CM_JIFFIES_SMALL;
+
+ if (time_after_eq(now, cm->fullbatt_vchk_jiffies_at)) {
+ delay = (unsigned long)((long)now
+ - (long)(cm->fullbatt_vchk_jiffies_at));
+ delay = jiffies_to_msecs(delay);
+ } else {
+ delay = 0;
+ }
+
+ /*
+ * Account for cm_suspend_duration_ms if
+ * assume_timer_stops_in_suspend is active
+ */
+ if (g_desc && g_desc->assume_timer_stops_in_suspend) {
+ if (delay > cm_suspend_duration_ms)
+ delay -= cm_suspend_duration_ms;
+ else
+ delay = 0;
+ }
+
+ queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
+ msecs_to_jiffies(delay));
+ }
+ device_set_wakeup_capable(cm->dev, false);
uevent_notify(cm, NULL);
}
static const struct dev_pm_ops charger_manager_pm = {
.prepare = cm_suspend_prepare,
+ .suspend_noirq = cm_suspend_noirq,
.complete = cm_suspend_complete,
};
@@ -1048,16 +1365,91 @@ static struct platform_driver charger_manager_driver = {
static int __init charger_manager_init(void)
{
+ cm_wq = create_freezable_workqueue("charger_manager");
+ INIT_DELAYED_WORK(&cm_monitor_work, cm_monitor_poller);
+
return platform_driver_register(&charger_manager_driver);
}
late_initcall(charger_manager_init);
static void __exit charger_manager_cleanup(void)
{
+ destroy_workqueue(cm_wq);
+ cm_wq = NULL;
+
platform_driver_unregister(&charger_manager_driver);
}
module_exit(charger_manager_cleanup);
+/**
+ * find_power_supply - find the associated power_supply of charger
+ * @cm: the Charger Manager representing the battery
+ * @psy: pointer to instance of charger's power_supply
+ */
+static bool find_power_supply(struct charger_manager *cm,
+ struct power_supply *psy)
+{
+ int i;
+ bool found = false;
+
+ for (i = 0; cm->charger_stat[i]; i++) {
+ if (psy == cm->charger_stat[i]) {
+ found = true;
+ break;
+ }
+ }
+
+ return found;
+}
+
+/**
+ * cm_notify_event - charger driver notify Charger Manager of charger event
+ * @psy: pointer to instance of charger's power_supply
+ * @type: type of charger event
+ * @msg: optional message passed to uevent_notify fuction
+ */
+void cm_notify_event(struct power_supply *psy, enum cm_event_types type,
+ char *msg)
+{
+ struct charger_manager *cm;
+ bool found_power_supply = false;
+
+ if (psy == NULL)
+ return;
+
+ mutex_lock(&cm_list_mtx);
+ list_for_each_entry(cm, &cm_list, entry) {
+ found_power_supply = find_power_supply(cm, psy);
+ if (found_power_supply)
+ break;
+ }
+ mutex_unlock(&cm_list_mtx);
+
+ if (!found_power_supply)
+ return;
+
+ switch (type) {
+ case CM_EVENT_BATT_FULL:
+ fullbatt_handler(cm);
+ break;
+ case CM_EVENT_BATT_OUT:
+ battout_handler(cm);
+ break;
+ case CM_EVENT_BATT_IN:
+ case CM_EVENT_EXT_PWR_IN_OUT ... CM_EVENT_CHG_START_STOP:
+ misc_event_handler(cm, type);
+ break;
+ case CM_EVENT_UNKNOWN:
+ case CM_EVENT_OTHERS:
+ uevent_notify(cm, msg ? msg : default_event_names[type]);
+ break;
+ default:
+ dev_err(cm->dev, "%s type not specified.\n", __func__);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(cm_notify_event);
+
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
MODULE_DESCRIPTION("Charger Manager");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/ds2781_battery.c b/drivers/power/ds2781_battery.c
index ca0d653d0a7a..975684a40f15 100644
--- a/drivers/power/ds2781_battery.c
+++ b/drivers/power/ds2781_battery.c
@@ -643,9 +643,7 @@ static ssize_t ds2781_read_param_eeprom_bin(struct file *filp,
struct power_supply *psy = to_power_supply(dev);
struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
- count = min_t(loff_t, count,
- DS2781_EEPROM_BLOCK1_END -
- DS2781_EEPROM_BLOCK1_START + 1 - off);
+ count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
return ds2781_read_block(dev_info, buf,
DS2781_EEPROM_BLOCK1_START + off, count);
@@ -661,9 +659,7 @@ static ssize_t ds2781_write_param_eeprom_bin(struct file *filp,
struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
int ret;
- count = min_t(loff_t, count,
- DS2781_EEPROM_BLOCK1_END -
- DS2781_EEPROM_BLOCK1_START + 1 - off);
+ count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
ret = ds2781_write(dev_info, buf,
DS2781_EEPROM_BLOCK1_START + off, count);
@@ -682,7 +678,7 @@ static struct bin_attribute ds2781_param_eeprom_bin_attr = {
.name = "param_eeprom",
.mode = S_IRUGO | S_IWUSR,
},
- .size = DS2781_EEPROM_BLOCK1_END - DS2781_EEPROM_BLOCK1_START + 1,
+ .size = DS2781_PARAM_EEPROM_SIZE,
.read = ds2781_read_param_eeprom_bin,
.write = ds2781_write_param_eeprom_bin,
};
@@ -696,9 +692,7 @@ static ssize_t ds2781_read_user_eeprom_bin(struct file *filp,
struct power_supply *psy = to_power_supply(dev);
struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
- count = min_t(loff_t, count,
- DS2781_EEPROM_BLOCK0_END -
- DS2781_EEPROM_BLOCK0_START + 1 - off);
+ count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
return ds2781_read_block(dev_info, buf,
DS2781_EEPROM_BLOCK0_START + off, count);
@@ -715,9 +709,7 @@ static ssize_t ds2781_write_user_eeprom_bin(struct file *filp,
struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
int ret;
- count = min_t(loff_t, count,
- DS2781_EEPROM_BLOCK0_END -
- DS2781_EEPROM_BLOCK0_START + 1 - off);
+ count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
ret = ds2781_write(dev_info, buf,
DS2781_EEPROM_BLOCK0_START + off, count);
@@ -736,7 +728,7 @@ static struct bin_attribute ds2781_user_eeprom_bin_attr = {
.name = "user_eeprom",
.mode = S_IRUGO | S_IWUSR,
},
- .size = DS2781_EEPROM_BLOCK0_END - DS2781_EEPROM_BLOCK0_START + 1,
+ .size = DS2781_USER_EEPROM_SIZE,
.read = ds2781_read_user_eeprom_bin,
.write = ds2781_write_user_eeprom_bin,
};
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 39eb50f35f09..e5ccd2979773 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -474,13 +474,13 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
fail2:
power_supply_unregister(&isp->psy);
fail1:
+ isp1704_charger_set_power(isp, 0);
usb_put_transceiver(isp->phy);
fail0:
kfree(isp);
dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret);
- isp1704_charger_set_power(isp, 0);
return ret;
}
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 04620c2cb388..140788b309f8 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -28,6 +28,7 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/pm.h>
#include <linux/mod_devicetable.h>
#include <linux/power_supply.h>
#include <linux/power/max17042_battery.h>
@@ -61,9 +62,13 @@
#define dP_ACC_100 0x1900
#define dP_ACC_200 0x3200
+#define MAX17042_IC_VERSION 0x0092
+#define MAX17047_IC_VERSION 0x00AC /* same for max17050 */
+
struct max17042_chip {
struct i2c_client *client;
struct power_supply battery;
+ enum max170xx_chip_type chip_type;
struct max17042_platform_data *pdata;
struct work_struct work;
int init_complete;
@@ -105,6 +110,7 @@ static enum power_supply_property max17042_battery_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_TEMP,
@@ -150,7 +156,10 @@ static int max17042_get_property(struct power_supply *psy,
val->intval *= 20000; /* Units of LSB = 20mV */
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- ret = max17042_read_reg(chip->client, MAX17042_V_empty);
+ if (chip->chip_type == MAX17042)
+ ret = max17042_read_reg(chip->client, MAX17042_V_empty);
+ else
+ ret = max17042_read_reg(chip->client, MAX17047_V_empty);
if (ret < 0)
return ret;
@@ -171,6 +180,13 @@ static int max17042_get_property(struct power_supply *psy,
val->intval = ret * 625 / 8;
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ ret = max17042_read_reg(chip->client, MAX17042_OCVInternal);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret * 625 / 8;
+ break;
case POWER_SUPPLY_PROP_CAPACITY:
ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
if (ret < 0)
@@ -325,11 +341,10 @@ static inline int max17042_model_data_compare(struct max17042_chip *chip,
static int max17042_init_model(struct max17042_chip *chip)
{
int ret;
- int table_size =
- sizeof(chip->pdata->config_data->cell_char_tbl)/sizeof(u16);
+ int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
u16 *temp_data;
- temp_data = kzalloc(table_size, GFP_KERNEL);
+ temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
if (!temp_data)
return -ENOMEM;
@@ -354,12 +369,11 @@ static int max17042_init_model(struct max17042_chip *chip)
static int max17042_verify_model_lock(struct max17042_chip *chip)
{
int i;
- int table_size =
- sizeof(chip->pdata->config_data->cell_char_tbl);
+ int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
u16 *temp_data;
int ret = 0;
- temp_data = kzalloc(table_size, GFP_KERNEL);
+ temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
if (!temp_data)
return -ENOMEM;
@@ -382,6 +396,9 @@ static void max17042_write_config_regs(struct max17042_chip *chip)
max17042_write_reg(chip->client, MAX17042_FilterCFG,
config->filter_cfg);
max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg);
+ if (chip->chip_type == MAX17047)
+ max17042_write_reg(chip->client, MAX17047_FullSOCThr,
+ config->full_soc_thresh);
}
static void max17042_write_custom_regs(struct max17042_chip *chip)
@@ -392,12 +409,23 @@ static void max17042_write_custom_regs(struct max17042_chip *chip)
config->rcomp0);
max17042_write_verify_reg(chip->client, MAX17042_TempCo,
config->tcompc0);
- max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
- config->empty_tempco);
- max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
- config->kempty0);
max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm,
config->ichgt_term);
+ if (chip->chip_type == MAX17042) {
+ max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
+ config->empty_tempco);
+ max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
+ config->kempty0);
+ } else {
+ max17042_write_verify_reg(chip->client, MAX17047_QRTbl00,
+ config->qrtbl00);
+ max17042_write_verify_reg(chip->client, MAX17047_QRTbl10,
+ config->qrtbl10);
+ max17042_write_verify_reg(chip->client, MAX17047_QRTbl20,
+ config->qrtbl20);
+ max17042_write_verify_reg(chip->client, MAX17047_QRTbl30,
+ config->qrtbl30);
+ }
}
static void max17042_update_capacity_regs(struct max17042_chip *chip)
@@ -453,6 +481,8 @@ static void max17042_load_new_capacity_params(struct max17042_chip *chip)
config->design_cap);
max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
config->fullcapnom);
+ /* Update SOC register with new SOC */
+ max17042_write_reg(chip->client, MAX17042_RepSOC, vfSoc);
}
/*
@@ -489,20 +519,28 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
max17042_override_por(client, MAX17042_FullCAP, config->fullcap);
max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom);
- max17042_override_por(client, MAX17042_SOC_empty, config->socempty);
+ if (chip->chip_type == MAX17042)
+ max17042_override_por(client, MAX17042_SOC_empty,
+ config->socempty);
max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty);
max17042_override_por(client, MAX17042_dQacc, config->dqacc);
max17042_override_por(client, MAX17042_dPacc, config->dpacc);
- max17042_override_por(client, MAX17042_V_empty, config->vempty);
+ if (chip->chip_type == MAX17042)
+ max17042_override_por(client, MAX17042_V_empty, config->vempty);
+ else
+ max17042_override_por(client, MAX17047_V_empty, config->vempty);
max17042_override_por(client, MAX17042_TempNom, config->temp_nom);
max17042_override_por(client, MAX17042_TempLim, config->temp_lim);
max17042_override_por(client, MAX17042_FCTC, config->fctc);
max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0);
max17042_override_por(client, MAX17042_TempCo, config->tcompc0);
- max17042_override_por(client, MAX17042_EmptyTempCo,
- config->empty_tempco);
- max17042_override_por(client, MAX17042_K_empty0, config->kempty0);
+ if (chip->chip_type) {
+ max17042_override_por(client, MAX17042_EmptyTempCo,
+ config->empty_tempco);
+ max17042_override_por(client, MAX17042_K_empty0,
+ config->kempty0);
+ }
}
static int max17042_init_chip(struct max17042_chip *chip)
@@ -659,7 +697,19 @@ static int __devinit max17042_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
- chip->battery.name = "max17042_battery";
+ ret = max17042_read_reg(chip->client, MAX17042_DevName);
+ if (ret == MAX17042_IC_VERSION) {
+ dev_dbg(&client->dev, "chip type max17042 detected\n");
+ chip->chip_type = MAX17042;
+ } else if (ret == MAX17047_IC_VERSION) {
+ dev_dbg(&client->dev, "chip type max17047/50 detected\n");
+ chip->chip_type = MAX17047;
+ } else {
+ dev_err(&client->dev, "device version mismatch: %x\n", ret);
+ return -EIO;
+ }
+
+ chip->battery.name = "max170xx_battery";
chip->battery.type = POWER_SUPPLY_TYPE_BATTERY;
chip->battery.get_property = max17042_get_property;
chip->battery.properties = max17042_battery_props;
@@ -683,6 +733,12 @@ static int __devinit max17042_probe(struct i2c_client *client,
max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
}
+ ret = power_supply_register(&client->dev, &chip->battery);
+ if (ret) {
+ dev_err(&client->dev, "failed: power supply register\n");
+ return ret;
+ }
+
if (client->irq) {
ret = request_threaded_irq(client->irq, NULL,
max17042_thread_handler,
@@ -693,13 +749,14 @@ static int __devinit max17042_probe(struct i2c_client *client,
reg |= CONFIG_ALRT_BIT_ENBL;
max17042_write_reg(client, MAX17042_CONFIG, reg);
max17042_set_soc_threshold(chip, 1);
- } else
+ } else {
+ client->irq = 0;
dev_err(&client->dev, "%s(): cannot get IRQ\n",
__func__);
+ }
}
reg = max17042_read_reg(chip->client, MAX17042_STATUS);
-
if (reg & STATUS_POR_BIT) {
INIT_WORK(&chip->work, max17042_init_worker);
schedule_work(&chip->work);
@@ -707,23 +764,65 @@ static int __devinit max17042_probe(struct i2c_client *client,
chip->init_complete = 1;
}
- ret = power_supply_register(&client->dev, &chip->battery);
- if (ret)
- dev_err(&client->dev, "failed: power supply register\n");
- return ret;
+ return 0;
}
static int __devexit max17042_remove(struct i2c_client *client)
{
struct max17042_chip *chip = i2c_get_clientdata(client);
+ if (client->irq)
+ free_irq(client->irq, chip);
power_supply_unregister(&chip->battery);
return 0;
}
+#ifdef CONFIG_PM
+static int max17042_suspend(struct device *dev)
+{
+ struct max17042_chip *chip = dev_get_drvdata(dev);
+
+ /*
+ * disable the irq and enable irq_wake
+ * capability to the interrupt line.
+ */
+ if (chip->client->irq) {
+ disable_irq(chip->client->irq);
+ enable_irq_wake(chip->client->irq);
+ }
+
+ return 0;
+}
+
+static int max17042_resume(struct device *dev)
+{
+ struct max17042_chip *chip = dev_get_drvdata(dev);
+
+ if (chip->client->irq) {
+ disable_irq_wake(chip->client->irq);
+ enable_irq(chip->client->irq);
+ /* re-program the SOC thresholds to 1% change */
+ max17042_set_soc_threshold(chip, 1);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops max17042_pm_ops = {
+ .suspend = max17042_suspend,
+ .resume = max17042_resume,
+};
+
+#define MAX17042_PM_OPS (&max17042_pm_ops)
+#else
+#define MAX17042_PM_OPS NULL
+#endif
+
#ifdef CONFIG_OF
static const struct of_device_id max17042_dt_match[] = {
{ .compatible = "maxim,max17042" },
+ { .compatible = "maxim,max17047" },
+ { .compatible = "maxim,max17050" },
{ },
};
MODULE_DEVICE_TABLE(of, max17042_dt_match);
@@ -731,6 +830,8 @@ MODULE_DEVICE_TABLE(of, max17042_dt_match);
static const struct i2c_device_id max17042_id[] = {
{ "max17042", 0 },
+ { "max17047", 1 },
+ { "max17050", 2 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max17042_id);
@@ -739,6 +840,7 @@ static struct i2c_driver max17042_i2c_driver = {
.driver = {
.name = "max17042",
.of_match_table = of_match_ptr(max17042_dt_match),
+ .pm = MAX17042_PM_OPS,
},
.probe = max17042_probe,
.remove = __devexit_p(max17042_remove),
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 4368e7d61316..4150747f9186 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -146,6 +146,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(voltage_min_design),
POWER_SUPPLY_ATTR(voltage_now),
POWER_SUPPLY_ATTR(voltage_avg),
+ POWER_SUPPLY_ATTR(voltage_ocv),
POWER_SUPPLY_ATTR(current_max),
POWER_SUPPLY_ATTR(current_now),
POWER_SUPPLY_ATTR(current_avg),
diff --git a/drivers/power/sbs-battery.c b/drivers/power/sbs-battery.c
index 06b659d91790..a5b6849d4123 100644
--- a/drivers/power/sbs-battery.c
+++ b/drivers/power/sbs-battery.c
@@ -89,7 +89,7 @@ static const struct chip_data {
[REG_CURRENT] =
SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767),
[REG_CAPACITY] =
- SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100),
+ SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0D, 0, 100),
[REG_REMAINING_CAPACITY] =
SBS_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535),
[REG_REMAINING_CAPACITY_CHARGE] =
diff --git a/drivers/power/smb347-charger.c b/drivers/power/smb347-charger.c
index ce1694d1a365..f8eedd8a676f 100644
--- a/drivers/power/smb347-charger.c
+++ b/drivers/power/smb347-charger.c
@@ -11,7 +11,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/debugfs.h>
+#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -21,7 +21,7 @@
#include <linux/mutex.h>
#include <linux/power_supply.h>
#include <linux/power/smb347-charger.h>
-#include <linux/seq_file.h>
+#include <linux/regmap.h>
/*
* Configuration registers. These are mirrored to volatile RAM and can be
@@ -39,6 +39,7 @@
#define CFG_CURRENT_LIMIT_DC_SHIFT 4
#define CFG_CURRENT_LIMIT_USB_MASK 0x0f
#define CFG_FLOAT_VOLTAGE 0x03
+#define CFG_FLOAT_VOLTAGE_FLOAT_MASK 0x3f
#define CFG_FLOAT_VOLTAGE_THRESHOLD_MASK 0xc0
#define CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT 6
#define CFG_STAT 0x05
@@ -113,29 +114,31 @@
#define STAT_C_CHARGER_ERROR BIT(6)
#define STAT_E 0x3f
+#define SMB347_MAX_REGISTER 0x3f
+
/**
* struct smb347_charger - smb347 charger instance
* @lock: protects concurrent access to online variables
- * @client: pointer to i2c client
+ * @dev: pointer to device
+ * @regmap: pointer to driver regmap
* @mains: power_supply instance for AC/DC power
* @usb: power_supply instance for USB power
* @battery: power_supply instance for battery
* @mains_online: is AC/DC input connected
* @usb_online: is USB input connected
* @charging_enabled: is charging enabled
- * @dentry: for debugfs
* @pdata: pointer to platform data
*/
struct smb347_charger {
struct mutex lock;
- struct i2c_client *client;
+ struct device *dev;
+ struct regmap *regmap;
struct power_supply mains;
struct power_supply usb;
struct power_supply battery;
bool mains_online;
bool usb_online;
bool charging_enabled;
- struct dentry *dentry;
const struct smb347_charger_platform_data *pdata;
};
@@ -193,14 +196,6 @@ static const unsigned int ccc_tbl[] = {
1200000,
};
-/* Convert register value to current using lookup table */
-static int hw_to_current(const unsigned int *tbl, size_t size, unsigned int val)
-{
- if (val >= size)
- return -EINVAL;
- return tbl[val];
-}
-
/* Convert current to register value using lookup table */
static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
{
@@ -212,43 +207,22 @@ static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
return i > 0 ? i - 1 : -EINVAL;
}
-static int smb347_read(struct smb347_charger *smb, u8 reg)
-{
- int ret;
-
- ret = i2c_smbus_read_byte_data(smb->client, reg);
- if (ret < 0)
- dev_warn(&smb->client->dev, "failed to read reg 0x%x: %d\n",
- reg, ret);
- return ret;
-}
-
-static int smb347_write(struct smb347_charger *smb, u8 reg, u8 val)
-{
- int ret;
-
- ret = i2c_smbus_write_byte_data(smb->client, reg, val);
- if (ret < 0)
- dev_warn(&smb->client->dev, "failed to write reg 0x%x: %d\n",
- reg, ret);
- return ret;
-}
-
/**
- * smb347_update_status - updates the charging status
+ * smb347_update_ps_status - refreshes the power source status
* @smb: pointer to smb347 charger instance
*
- * Function checks status of the charging and updates internal state
- * accordingly. Returns %0 if there is no change in status, %1 if the
- * status has changed and negative errno in case of failure.
+ * Function checks whether any power source is connected to the charger and
+ * updates internal state accordingly. If there is a change to previous state
+ * function returns %1, otherwise %0 and negative errno in case of errror.
*/
-static int smb347_update_status(struct smb347_charger *smb)
+static int smb347_update_ps_status(struct smb347_charger *smb)
{
bool usb = false;
bool dc = false;
+ unsigned int val;
int ret;
- ret = smb347_read(smb, IRQSTAT_E);
+ ret = regmap_read(smb->regmap, IRQSTAT_E, &val);
if (ret < 0)
return ret;
@@ -257,9 +231,9 @@ static int smb347_update_status(struct smb347_charger *smb)
* platform data _and_ whether corresponding undervoltage is set.
*/
if (smb->pdata->use_mains)
- dc = !(ret & IRQSTAT_E_DCIN_UV_STAT);
+ dc = !(val & IRQSTAT_E_DCIN_UV_STAT);
if (smb->pdata->use_usb)
- usb = !(ret & IRQSTAT_E_USBIN_UV_STAT);
+ usb = !(val & IRQSTAT_E_USBIN_UV_STAT);
mutex_lock(&smb->lock);
ret = smb->mains_online != dc || smb->usb_online != usb;
@@ -271,15 +245,15 @@ static int smb347_update_status(struct smb347_charger *smb)
}
/*
- * smb347_is_online - returns whether input power source is connected
+ * smb347_is_ps_online - returns whether input power source is connected
* @smb: pointer to smb347 charger instance
*
* Returns %true if input power source is connected. Note that this is
* dependent on what platform has configured for usable power sources. For
- * example if USB is disabled, this will return %false even if the USB
- * cable is connected.
+ * example if USB is disabled, this will return %false even if the USB cable
+ * is connected.
*/
-static bool smb347_is_online(struct smb347_charger *smb)
+static bool smb347_is_ps_online(struct smb347_charger *smb)
{
bool ret;
@@ -299,16 +273,17 @@ static bool smb347_is_online(struct smb347_charger *smb)
*/
static int smb347_charging_status(struct smb347_charger *smb)
{
+ unsigned int val;
int ret;
- if (!smb347_is_online(smb))
+ if (!smb347_is_ps_online(smb))
return 0;
- ret = smb347_read(smb, STAT_C);
+ ret = regmap_read(smb->regmap, STAT_C, &val);
if (ret < 0)
return 0;
- return (ret & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
+ return (val & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
}
static int smb347_charging_set(struct smb347_charger *smb, bool enable)
@@ -316,27 +291,17 @@ static int smb347_charging_set(struct smb347_charger *smb, bool enable)
int ret = 0;
if (smb->pdata->enable_control != SMB347_CHG_ENABLE_SW) {
- dev_dbg(&smb->client->dev,
- "charging enable/disable in SW disabled\n");
+ dev_dbg(smb->dev, "charging enable/disable in SW disabled\n");
return 0;
}
mutex_lock(&smb->lock);
if (smb->charging_enabled != enable) {
- ret = smb347_read(smb, CMD_A);
- if (ret < 0)
- goto out;
-
- smb->charging_enabled = enable;
-
- if (enable)
- ret |= CMD_A_CHG_ENABLED;
- else
- ret &= ~CMD_A_CHG_ENABLED;
-
- ret = smb347_write(smb, CMD_A, ret);
+ ret = regmap_update_bits(smb->regmap, CMD_A, CMD_A_CHG_ENABLED,
+ enable ? CMD_A_CHG_ENABLED : 0);
+ if (!ret)
+ smb->charging_enabled = enable;
}
-out:
mutex_unlock(&smb->lock);
return ret;
}
@@ -351,7 +316,7 @@ static inline int smb347_charging_disable(struct smb347_charger *smb)
return smb347_charging_set(smb, false);
}
-static int smb347_update_online(struct smb347_charger *smb)
+static int smb347_start_stop_charging(struct smb347_charger *smb)
{
int ret;
@@ -360,16 +325,14 @@ static int smb347_update_online(struct smb347_charger *smb)
* disable or enable the charging. We do it manually because it
* depends on how the platform has configured the valid inputs.
*/
- if (smb347_is_online(smb)) {
+ if (smb347_is_ps_online(smb)) {
ret = smb347_charging_enable(smb);
if (ret < 0)
- dev_err(&smb->client->dev,
- "failed to enable charging\n");
+ dev_err(smb->dev, "failed to enable charging\n");
} else {
ret = smb347_charging_disable(smb);
if (ret < 0)
- dev_err(&smb->client->dev,
- "failed to disable charging\n");
+ dev_err(smb->dev, "failed to disable charging\n");
}
return ret;
@@ -377,112 +340,120 @@ static int smb347_update_online(struct smb347_charger *smb)
static int smb347_set_charge_current(struct smb347_charger *smb)
{
- int ret, val;
-
- ret = smb347_read(smb, CFG_CHARGE_CURRENT);
- if (ret < 0)
- return ret;
+ int ret;
if (smb->pdata->max_charge_current) {
- val = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
+ ret = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
smb->pdata->max_charge_current);
- if (val < 0)
- return val;
+ if (ret < 0)
+ return ret;
- ret &= ~CFG_CHARGE_CURRENT_FCC_MASK;
- ret |= val << CFG_CHARGE_CURRENT_FCC_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+ CFG_CHARGE_CURRENT_FCC_MASK,
+ ret << CFG_CHARGE_CURRENT_FCC_SHIFT);
+ if (ret < 0)
+ return ret;
}
if (smb->pdata->pre_charge_current) {
- val = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
+ ret = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
smb->pdata->pre_charge_current);
- if (val < 0)
- return val;
+ if (ret < 0)
+ return ret;
- ret &= ~CFG_CHARGE_CURRENT_PCC_MASK;
- ret |= val << CFG_CHARGE_CURRENT_PCC_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+ CFG_CHARGE_CURRENT_PCC_MASK,
+ ret << CFG_CHARGE_CURRENT_PCC_SHIFT);
+ if (ret < 0)
+ return ret;
}
if (smb->pdata->termination_current) {
- val = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
+ ret = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
smb->pdata->termination_current);
- if (val < 0)
- return val;
+ if (ret < 0)
+ return ret;
- ret &= ~CFG_CHARGE_CURRENT_TC_MASK;
- ret |= val;
+ ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+ CFG_CHARGE_CURRENT_TC_MASK, ret);
+ if (ret < 0)
+ return ret;
}
- return smb347_write(smb, CFG_CHARGE_CURRENT, ret);
+ return 0;
}
static int smb347_set_current_limits(struct smb347_charger *smb)
{
- int ret, val;
-
- ret = smb347_read(smb, CFG_CURRENT_LIMIT);
- if (ret < 0)
- return ret;
+ int ret;
if (smb->pdata->mains_current_limit) {
- val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
+ ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
smb->pdata->mains_current_limit);
- if (val < 0)
- return val;
+ if (ret < 0)
+ return ret;
- ret &= ~CFG_CURRENT_LIMIT_DC_MASK;
- ret |= val << CFG_CURRENT_LIMIT_DC_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_CURRENT_LIMIT,
+ CFG_CURRENT_LIMIT_DC_MASK,
+ ret << CFG_CURRENT_LIMIT_DC_SHIFT);
+ if (ret < 0)
+ return ret;
}
if (smb->pdata->usb_hc_current_limit) {
- val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
+ ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
smb->pdata->usb_hc_current_limit);
- if (val < 0)
- return val;
+ if (ret < 0)
+ return ret;
- ret &= ~CFG_CURRENT_LIMIT_USB_MASK;
- ret |= val;
+ ret = regmap_update_bits(smb->regmap, CFG_CURRENT_LIMIT,
+ CFG_CURRENT_LIMIT_USB_MASK, ret);
+ if (ret < 0)
+ return ret;
}
- return smb347_write(smb, CFG_CURRENT_LIMIT, ret);
+ return 0;
}
static int smb347_set_voltage_limits(struct smb347_charger *smb)
{
- int ret, val;
-
- ret = smb347_read(smb, CFG_FLOAT_VOLTAGE);
- if (ret < 0)
- return ret;
+ int ret;
if (smb->pdata->pre_to_fast_voltage) {
- val = smb->pdata->pre_to_fast_voltage;
+ ret = smb->pdata->pre_to_fast_voltage;
/* uV */
- val = clamp_val(val, 2400000, 3000000) - 2400000;
- val /= 200000;
+ ret = clamp_val(ret, 2400000, 3000000) - 2400000;
+ ret /= 200000;
- ret &= ~CFG_FLOAT_VOLTAGE_THRESHOLD_MASK;
- ret |= val << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_FLOAT_VOLTAGE,
+ CFG_FLOAT_VOLTAGE_THRESHOLD_MASK,
+ ret << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT);
+ if (ret < 0)
+ return ret;
}
if (smb->pdata->max_charge_voltage) {
- val = smb->pdata->max_charge_voltage;
+ ret = smb->pdata->max_charge_voltage;
/* uV */
- val = clamp_val(val, 3500000, 4500000) - 3500000;
- val /= 20000;
+ ret = clamp_val(ret, 3500000, 4500000) - 3500000;
+ ret /= 20000;
- ret |= val;
+ ret = regmap_update_bits(smb->regmap, CFG_FLOAT_VOLTAGE,
+ CFG_FLOAT_VOLTAGE_FLOAT_MASK, ret);
+ if (ret < 0)
+ return ret;
}
- return smb347_write(smb, CFG_FLOAT_VOLTAGE, ret);
+ return 0;
}
static int smb347_set_temp_limits(struct smb347_charger *smb)
{
bool enable_therm_monitor = false;
- int ret, val;
+ int ret = 0;
+ int val;
if (smb->pdata->chip_temp_threshold) {
val = smb->pdata->chip_temp_threshold;
@@ -491,22 +462,13 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
val = clamp_val(val, 100, 130) - 100;
val /= 10;
- ret = smb347_read(smb, CFG_OTG);
- if (ret < 0)
- return ret;
-
- ret &= ~CFG_OTG_TEMP_THRESHOLD_MASK;
- ret |= val << CFG_OTG_TEMP_THRESHOLD_SHIFT;
-
- ret = smb347_write(smb, CFG_OTG, ret);
+ ret = regmap_update_bits(smb->regmap, CFG_OTG,
+ CFG_OTG_TEMP_THRESHOLD_MASK,
+ val << CFG_OTG_TEMP_THRESHOLD_SHIFT);
if (ret < 0)
return ret;
}
- ret = smb347_read(smb, CFG_TEMP_LIMIT);
- if (ret < 0)
- return ret;
-
if (smb->pdata->soft_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) {
val = smb->pdata->soft_cold_temp_limit;
@@ -515,8 +477,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
/* this goes from higher to lower so invert the value */
val = ~val & 0x3;
- ret &= ~CFG_TEMP_LIMIT_SOFT_COLD_MASK;
- ret |= val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+ CFG_TEMP_LIMIT_SOFT_COLD_MASK,
+ val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT);
+ if (ret < 0)
+ return ret;
enable_therm_monitor = true;
}
@@ -527,8 +492,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
val = clamp_val(val, 40, 55) - 40;
val /= 5;
- ret &= ~CFG_TEMP_LIMIT_SOFT_HOT_MASK;
- ret |= val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+ CFG_TEMP_LIMIT_SOFT_HOT_MASK,
+ val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT);
+ if (ret < 0)
+ return ret;
enable_therm_monitor = true;
}
@@ -541,8 +509,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
/* this goes from higher to lower so invert the value */
val = ~val & 0x3;
- ret &= ~CFG_TEMP_LIMIT_HARD_COLD_MASK;
- ret |= val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+ CFG_TEMP_LIMIT_HARD_COLD_MASK,
+ val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT);
+ if (ret < 0)
+ return ret;
enable_therm_monitor = true;
}
@@ -553,16 +524,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
val = clamp_val(val, 50, 65) - 50;
val /= 5;
- ret &= ~CFG_TEMP_LIMIT_HARD_HOT_MASK;
- ret |= val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+ CFG_TEMP_LIMIT_HARD_HOT_MASK,
+ val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT);
+ if (ret < 0)
+ return ret;
enable_therm_monitor = true;
}
- ret = smb347_write(smb, CFG_TEMP_LIMIT, ret);
- if (ret < 0)
- return ret;
-
/*
* If any of the temperature limits are set, we also enable the
* thermistor monitoring.
@@ -574,25 +544,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
* depending on the configuration.
*/
if (enable_therm_monitor) {
- ret = smb347_read(smb, CFG_THERM);
- if (ret < 0)
- return ret;
-
- ret &= ~CFG_THERM_MONITOR_DISABLED;
-
- ret = smb347_write(smb, CFG_THERM, ret);
+ ret = regmap_update_bits(smb->regmap, CFG_THERM,
+ CFG_THERM_MONITOR_DISABLED, 0);
if (ret < 0)
return ret;
}
if (smb->pdata->suspend_on_hard_temp_limit) {
- ret = smb347_read(smb, CFG_SYSOK);
- if (ret < 0)
- return ret;
-
- ret &= ~CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED;
-
- ret = smb347_write(smb, CFG_SYSOK, ret);
+ ret = regmap_update_bits(smb->regmap, CFG_SYSOK,
+ CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED, 0);
if (ret < 0)
return ret;
}
@@ -601,17 +561,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
SMB347_SOFT_TEMP_COMPENSATE_DEFAULT) {
val = smb->pdata->soft_temp_limit_compensation & 0x3;
- ret = smb347_read(smb, CFG_THERM);
+ ret = regmap_update_bits(smb->regmap, CFG_THERM,
+ CFG_THERM_SOFT_HOT_COMPENSATION_MASK,
+ val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT);
if (ret < 0)
return ret;
- ret &= ~CFG_THERM_SOFT_HOT_COMPENSATION_MASK;
- ret |= val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT;
-
- ret &= ~CFG_THERM_SOFT_COLD_COMPENSATION_MASK;
- ret |= val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT;
-
- ret = smb347_write(smb, CFG_THERM, ret);
+ ret = regmap_update_bits(smb->regmap, CFG_THERM,
+ CFG_THERM_SOFT_COLD_COMPENSATION_MASK,
+ val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT);
if (ret < 0)
return ret;
}
@@ -622,14 +580,9 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
if (val < 0)
return val;
- ret = smb347_read(smb, CFG_OTG);
- if (ret < 0)
- return ret;
-
- ret &= ~CFG_OTG_CC_COMPENSATION_MASK;
- ret |= (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT;
-
- ret = smb347_write(smb, CFG_OTG, ret);
+ ret = regmap_update_bits(smb->regmap, CFG_OTG,
+ CFG_OTG_CC_COMPENSATION_MASK,
+ (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT);
if (ret < 0)
return ret;
}
@@ -648,22 +601,13 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
*/
static int smb347_set_writable(struct smb347_charger *smb, bool writable)
{
- int ret;
-
- ret = smb347_read(smb, CMD_A);
- if (ret < 0)
- return ret;
-
- if (writable)
- ret |= CMD_A_ALLOW_WRITE;
- else
- ret &= ~CMD_A_ALLOW_WRITE;
-
- return smb347_write(smb, CMD_A, ret);
+ return regmap_update_bits(smb->regmap, CMD_A, CMD_A_ALLOW_WRITE,
+ writable ? CMD_A_ALLOW_WRITE : 0);
}
static int smb347_hw_init(struct smb347_charger *smb)
{
+ unsigned int val;
int ret;
ret = smb347_set_writable(smb, true);
@@ -692,34 +636,19 @@ static int smb347_hw_init(struct smb347_charger *smb)
/* If USB charging is disabled we put the USB in suspend mode */
if (!smb->pdata->use_usb) {
- ret = smb347_read(smb, CMD_A);
- if (ret < 0)
- goto fail;
-
- ret |= CMD_A_SUSPEND_ENABLED;
-
- ret = smb347_write(smb, CMD_A, ret);
+ ret = regmap_update_bits(smb->regmap, CMD_A,
+ CMD_A_SUSPEND_ENABLED,
+ CMD_A_SUSPEND_ENABLED);
if (ret < 0)
goto fail;
}
- ret = smb347_read(smb, CFG_OTHER);
- if (ret < 0)
- goto fail;
-
/*
* If configured by platform data, we enable hardware Auto-OTG
* support for driving VBUS. Otherwise we disable it.
*/
- ret &= ~CFG_OTHER_RID_MASK;
- if (smb->pdata->use_usb_otg)
- ret |= CFG_OTHER_RID_ENABLED_AUTO_OTG;
-
- ret = smb347_write(smb, CFG_OTHER, ret);
- if (ret < 0)
- goto fail;
-
- ret = smb347_read(smb, CFG_PIN);
+ ret = regmap_update_bits(smb->regmap, CFG_OTHER, CFG_OTHER_RID_MASK,
+ smb->pdata->use_usb_otg ? CFG_OTHER_RID_ENABLED_AUTO_OTG : 0);
if (ret < 0)
goto fail;
@@ -728,32 +657,33 @@ static int smb347_hw_init(struct smb347_charger *smb)
* command register unless pin control is specified in the platform
* data.
*/
- ret &= ~CFG_PIN_EN_CTRL_MASK;
-
switch (smb->pdata->enable_control) {
- case SMB347_CHG_ENABLE_SW:
- /* Do nothing, 0 means i2c control */
- break;
case SMB347_CHG_ENABLE_PIN_ACTIVE_LOW:
- ret |= CFG_PIN_EN_CTRL_ACTIVE_LOW;
+ val = CFG_PIN_EN_CTRL_ACTIVE_LOW;
break;
case SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH:
- ret |= CFG_PIN_EN_CTRL_ACTIVE_HIGH;
+ val = CFG_PIN_EN_CTRL_ACTIVE_HIGH;
+ break;
+ default:
+ val = 0;
break;
}
- /* Disable Automatic Power Source Detection (APSD) interrupt. */
- ret &= ~CFG_PIN_EN_APSD_IRQ;
+ ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CTRL_MASK,
+ val);
+ if (ret < 0)
+ goto fail;
- ret = smb347_write(smb, CFG_PIN, ret);
+ /* Disable Automatic Power Source Detection (APSD) interrupt. */
+ ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_APSD_IRQ, 0);
if (ret < 0)
goto fail;
- ret = smb347_update_status(smb);
+ ret = smb347_update_ps_status(smb);
if (ret < 0)
goto fail;
- ret = smb347_update_online(smb);
+ ret = smb347_start_stop_charging(smb);
fail:
smb347_set_writable(smb, false);
@@ -763,24 +693,25 @@ fail:
static irqreturn_t smb347_interrupt(int irq, void *data)
{
struct smb347_charger *smb = data;
- int stat_c, irqstat_e, irqstat_c;
- irqreturn_t ret = IRQ_NONE;
+ unsigned int stat_c, irqstat_e, irqstat_c;
+ bool handled = false;
+ int ret;
- stat_c = smb347_read(smb, STAT_C);
- if (stat_c < 0) {
- dev_warn(&smb->client->dev, "reading STAT_C failed\n");
+ ret = regmap_read(smb->regmap, STAT_C, &stat_c);
+ if (ret < 0) {
+ dev_warn(smb->dev, "reading STAT_C failed\n");
return IRQ_NONE;
}
- irqstat_c = smb347_read(smb, IRQSTAT_C);
- if (irqstat_c < 0) {
- dev_warn(&smb->client->dev, "reading IRQSTAT_C failed\n");
+ ret = regmap_read(smb->regmap, IRQSTAT_C, &irqstat_c);
+ if (ret < 0) {
+ dev_warn(smb->dev, "reading IRQSTAT_C failed\n");
return IRQ_NONE;
}
- irqstat_e = smb347_read(smb, IRQSTAT_E);
- if (irqstat_e < 0) {
- dev_warn(&smb->client->dev, "reading IRQSTAT_E failed\n");
+ ret = regmap_read(smb->regmap, IRQSTAT_E, &irqstat_e);
+ if (ret < 0) {
+ dev_warn(smb->dev, "reading IRQSTAT_E failed\n");
return IRQ_NONE;
}
@@ -789,13 +720,11 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
* disable charging.
*/
if (stat_c & STAT_C_CHARGER_ERROR) {
- dev_err(&smb->client->dev,
- "error in charger, disabling charging\n");
+ dev_err(smb->dev, "error in charger, disabling charging\n");
smb347_charging_disable(smb);
power_supply_changed(&smb->battery);
-
- ret = IRQ_HANDLED;
+ handled = true;
}
/*
@@ -806,7 +735,7 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
if (irqstat_c & (IRQSTAT_C_TERMINATION_IRQ | IRQSTAT_C_TAPER_IRQ)) {
if (irqstat_c & IRQSTAT_C_TERMINATION_STAT)
power_supply_changed(&smb->battery);
- ret = IRQ_HANDLED;
+ handled = true;
}
/*
@@ -814,15 +743,17 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
* was connected or disconnected.
*/
if (irqstat_e & (IRQSTAT_E_USBIN_UV_IRQ | IRQSTAT_E_DCIN_UV_IRQ)) {
- if (smb347_update_status(smb) > 0) {
- smb347_update_online(smb);
- power_supply_changed(&smb->mains);
- power_supply_changed(&smb->usb);
+ if (smb347_update_ps_status(smb) > 0) {
+ smb347_start_stop_charging(smb);
+ if (smb->pdata->use_mains)
+ power_supply_changed(&smb->mains);
+ if (smb->pdata->use_usb)
+ power_supply_changed(&smb->usb);
}
- ret = IRQ_HANDLED;
+ handled = true;
}
- return ret;
+ return handled ? IRQ_HANDLED : IRQ_NONE;
}
static int smb347_irq_set(struct smb347_charger *smb, bool enable)
@@ -839,41 +770,18 @@ static int smb347_irq_set(struct smb347_charger *smb, bool enable)
* - termination current reached
* - charger error
*/
- if (enable) {
- ret = smb347_write(smb, CFG_FAULT_IRQ, CFG_FAULT_IRQ_DCIN_UV);
- if (ret < 0)
- goto fail;
-
- ret = smb347_write(smb, CFG_STATUS_IRQ,
- CFG_STATUS_IRQ_TERMINATION_OR_TAPER);
- if (ret < 0)
- goto fail;
-
- ret = smb347_read(smb, CFG_PIN);
- if (ret < 0)
- goto fail;
-
- ret |= CFG_PIN_EN_CHARGER_ERROR;
-
- ret = smb347_write(smb, CFG_PIN, ret);
- } else {
- ret = smb347_write(smb, CFG_FAULT_IRQ, 0);
- if (ret < 0)
- goto fail;
-
- ret = smb347_write(smb, CFG_STATUS_IRQ, 0);
- if (ret < 0)
- goto fail;
-
- ret = smb347_read(smb, CFG_PIN);
- if (ret < 0)
- goto fail;
-
- ret &= ~CFG_PIN_EN_CHARGER_ERROR;
+ ret = regmap_update_bits(smb->regmap, CFG_FAULT_IRQ, 0xff,
+ enable ? CFG_FAULT_IRQ_DCIN_UV : 0);
+ if (ret < 0)
+ goto fail;
- ret = smb347_write(smb, CFG_PIN, ret);
- }
+ ret = regmap_update_bits(smb->regmap, CFG_STATUS_IRQ, 0xff,
+ enable ? CFG_STATUS_IRQ_TERMINATION_OR_TAPER : 0);
+ if (ret < 0)
+ goto fail;
+ ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CHARGER_ERROR,
+ enable ? CFG_PIN_EN_CHARGER_ERROR : 0);
fail:
smb347_set_writable(smb, false);
return ret;
@@ -889,18 +797,18 @@ static inline int smb347_irq_disable(struct smb347_charger *smb)
return smb347_irq_set(smb, false);
}
-static int smb347_irq_init(struct smb347_charger *smb)
+static int smb347_irq_init(struct smb347_charger *smb,
+ struct i2c_client *client)
{
const struct smb347_charger_platform_data *pdata = smb->pdata;
int ret, irq = gpio_to_irq(pdata->irq_gpio);
- ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, smb->client->name);
+ ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name);
if (ret < 0)
goto fail;
ret = request_threaded_irq(irq, NULL, smb347_interrupt,
- IRQF_TRIGGER_FALLING, smb->client->name,
- smb);
+ IRQF_TRIGGER_FALLING, client->name, smb);
if (ret < 0)
goto fail_gpio;
@@ -912,23 +820,14 @@ static int smb347_irq_init(struct smb347_charger *smb)
* Configure the STAT output to be suitable for interrupts: disable
* all other output (except interrupts) and make it active low.
*/
- ret = smb347_read(smb, CFG_STAT);
- if (ret < 0)
- goto fail_readonly;
-
- ret &= ~CFG_STAT_ACTIVE_HIGH;
- ret |= CFG_STAT_DISABLED;
-
- ret = smb347_write(smb, CFG_STAT, ret);
- if (ret < 0)
- goto fail_readonly;
-
- ret = smb347_irq_enable(smb);
+ ret = regmap_update_bits(smb->regmap, CFG_STAT,
+ CFG_STAT_ACTIVE_HIGH | CFG_STAT_DISABLED,
+ CFG_STAT_DISABLED);
if (ret < 0)
goto fail_readonly;
smb347_set_writable(smb, false);
- smb->client->irq = irq;
+ client->irq = irq;
return 0;
fail_readonly:
@@ -938,7 +837,7 @@ fail_irq:
fail_gpio:
gpio_free(pdata->irq_gpio);
fail:
- smb->client->irq = 0;
+ client->irq = 0;
return ret;
}
@@ -987,13 +886,13 @@ static int smb347_battery_get_property(struct power_supply *psy,
const struct smb347_charger_platform_data *pdata = smb->pdata;
int ret;
- ret = smb347_update_status(smb);
+ ret = smb347_update_ps_status(smb);
if (ret < 0)
return ret;
switch (prop) {
case POWER_SUPPLY_PROP_STATUS:
- if (!smb347_is_online(smb)) {
+ if (!smb347_is_ps_online(smb)) {
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
break;
}
@@ -1004,7 +903,7 @@ static int smb347_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
- if (!smb347_is_online(smb))
+ if (!smb347_is_ps_online(smb))
return -ENODATA;
/*
@@ -1036,44 +935,6 @@ static int smb347_battery_get_property(struct power_supply *psy,
val->intval = pdata->battery_info.voltage_max_design;
break;
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- if (!smb347_is_online(smb))
- return -ENODATA;
- ret = smb347_read(smb, STAT_A);
- if (ret < 0)
- return ret;
-
- ret &= STAT_A_FLOAT_VOLTAGE_MASK;
- if (ret > 0x3d)
- ret = 0x3d;
-
- val->intval = 3500000 + ret * 20000;
- break;
-
- case POWER_SUPPLY_PROP_CURRENT_NOW:
- if (!smb347_is_online(smb))
- return -ENODATA;
-
- ret = smb347_read(smb, STAT_B);
- if (ret < 0)
- return ret;
-
- /*
- * The current value is composition of FCC and PCC values
- * and we can detect which table to use from bit 5.
- */
- if (ret & 0x20) {
- val->intval = hw_to_current(fcc_tbl,
- ARRAY_SIZE(fcc_tbl),
- ret & 7);
- } else {
- ret >>= 3;
- val->intval = hw_to_current(pcc_tbl,
- ARRAY_SIZE(pcc_tbl),
- ret & 7);
- }
- break;
-
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
val->intval = pdata->battery_info.charge_full_design;
break;
@@ -1095,64 +956,58 @@ static enum power_supply_property smb347_battery_properties[] = {
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_MODEL_NAME,
};
-static int smb347_debugfs_show(struct seq_file *s, void *data)
+static bool smb347_volatile_reg(struct device *dev, unsigned int reg)
{
- struct smb347_charger *smb = s->private;
- int ret;
- u8 reg;
-
- seq_printf(s, "Control registers:\n");
- seq_printf(s, "==================\n");
- for (reg = CFG_CHARGE_CURRENT; reg <= CFG_ADDRESS; reg++) {
- ret = smb347_read(smb, reg);
- seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
- }
- seq_printf(s, "\n");
-
- seq_printf(s, "Command registers:\n");
- seq_printf(s, "==================\n");
- ret = smb347_read(smb, CMD_A);
- seq_printf(s, "0x%02x:\t0x%02x\n", CMD_A, ret);
- ret = smb347_read(smb, CMD_B);
- seq_printf(s, "0x%02x:\t0x%02x\n", CMD_B, ret);
- ret = smb347_read(smb, CMD_C);
- seq_printf(s, "0x%02x:\t0x%02x\n", CMD_C, ret);
- seq_printf(s, "\n");
-
- seq_printf(s, "Interrupt status registers:\n");
- seq_printf(s, "===========================\n");
- for (reg = IRQSTAT_A; reg <= IRQSTAT_F; reg++) {
- ret = smb347_read(smb, reg);
- seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
- }
- seq_printf(s, "\n");
-
- seq_printf(s, "Status registers:\n");
- seq_printf(s, "=================\n");
- for (reg = STAT_A; reg <= STAT_E; reg++) {
- ret = smb347_read(smb, reg);
- seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
+ switch (reg) {
+ case IRQSTAT_A:
+ case IRQSTAT_C:
+ case IRQSTAT_E:
+ case IRQSTAT_F:
+ case STAT_A:
+ case STAT_B:
+ case STAT_C:
+ case STAT_E:
+ return true;
}
- return 0;
+ return false;
}
-static int smb347_debugfs_open(struct inode *inode, struct file *file)
+static bool smb347_readable_reg(struct device *dev, unsigned int reg)
{
- return single_open(file, smb347_debugfs_show, inode->i_private);
+ switch (reg) {
+ case CFG_CHARGE_CURRENT:
+ case CFG_CURRENT_LIMIT:
+ case CFG_FLOAT_VOLTAGE:
+ case CFG_STAT:
+ case CFG_PIN:
+ case CFG_THERM:
+ case CFG_SYSOK:
+ case CFG_OTHER:
+ case CFG_OTG:
+ case CFG_TEMP_LIMIT:
+ case CFG_FAULT_IRQ:
+ case CFG_STATUS_IRQ:
+ case CFG_ADDRESS:
+ case CMD_A:
+ case CMD_B:
+ case CMD_C:
+ return true;
+ }
+
+ return smb347_volatile_reg(dev, reg);
}
-static const struct file_operations smb347_debugfs_fops = {
- .open = smb347_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+static const struct regmap_config smb347_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = SMB347_MAX_REGISTER,
+ .volatile_reg = smb347_volatile_reg,
+ .readable_reg = smb347_readable_reg,
};
static int smb347_probe(struct i2c_client *client,
@@ -1178,28 +1033,45 @@ static int smb347_probe(struct i2c_client *client,
i2c_set_clientdata(client, smb);
mutex_init(&smb->lock);
- smb->client = client;
+ smb->dev = &client->dev;
smb->pdata = pdata;
+ smb->regmap = devm_regmap_init_i2c(client, &smb347_regmap);
+ if (IS_ERR(smb->regmap))
+ return PTR_ERR(smb->regmap);
+
ret = smb347_hw_init(smb);
if (ret < 0)
return ret;
- smb->mains.name = "smb347-mains";
- smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
- smb->mains.get_property = smb347_mains_get_property;
- smb->mains.properties = smb347_mains_properties;
- smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
- smb->mains.supplied_to = battery;
- smb->mains.num_supplicants = ARRAY_SIZE(battery);
-
- smb->usb.name = "smb347-usb";
- smb->usb.type = POWER_SUPPLY_TYPE_USB;
- smb->usb.get_property = smb347_usb_get_property;
- smb->usb.properties = smb347_usb_properties;
- smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
- smb->usb.supplied_to = battery;
- smb->usb.num_supplicants = ARRAY_SIZE(battery);
+ if (smb->pdata->use_mains) {
+ smb->mains.name = "smb347-mains";
+ smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
+ smb->mains.get_property = smb347_mains_get_property;
+ smb->mains.properties = smb347_mains_properties;
+ smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
+ smb->mains.supplied_to = battery;
+ smb->mains.num_supplicants = ARRAY_SIZE(battery);
+ ret = power_supply_register(dev, &smb->mains);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (smb->pdata->use_usb) {
+ smb->usb.name = "smb347-usb";
+ smb->usb.type = POWER_SUPPLY_TYPE_USB;
+ smb->usb.get_property = smb347_usb_get_property;
+ smb->usb.properties = smb347_usb_properties;
+ smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
+ smb->usb.supplied_to = battery;
+ smb->usb.num_supplicants = ARRAY_SIZE(battery);
+ ret = power_supply_register(dev, &smb->usb);
+ if (ret < 0) {
+ if (smb->pdata->use_mains)
+ power_supply_unregister(&smb->mains);
+ return ret;
+ }
+ }
smb->battery.name = "smb347-battery";
smb->battery.type = POWER_SUPPLY_TYPE_BATTERY;
@@ -1207,20 +1079,13 @@ static int smb347_probe(struct i2c_client *client,
smb->battery.properties = smb347_battery_properties;
smb->battery.num_properties = ARRAY_SIZE(smb347_battery_properties);
- ret = power_supply_register(dev, &smb->mains);
- if (ret < 0)
- return ret;
-
- ret = power_supply_register(dev, &smb->usb);
- if (ret < 0) {
- power_supply_unregister(&smb->mains);
- return ret;
- }
ret = power_supply_register(dev, &smb->battery);
if (ret < 0) {
- power_supply_unregister(&smb->usb);
- power_supply_unregister(&smb->mains);
+ if (smb->pdata->use_usb)
+ power_supply_unregister(&smb->usb);
+ if (smb->pdata->use_mains)
+ power_supply_unregister(&smb->mains);
return ret;
}
@@ -1229,15 +1094,15 @@ static int smb347_probe(struct i2c_client *client,
* interrupt support here.
*/
if (pdata->irq_gpio >= 0) {
- ret = smb347_irq_init(smb);
+ ret = smb347_irq_init(smb, client);
if (ret < 0) {
dev_warn(dev, "failed to initialize IRQ: %d\n", ret);
dev_warn(dev, "disabling IRQ support\n");
+ } else {
+ smb347_irq_enable(smb);
}
}
- smb->dentry = debugfs_create_file("smb347-regs", S_IRUSR, NULL, smb,
- &smb347_debugfs_fops);
return 0;
}
@@ -1245,9 +1110,6 @@ static int smb347_remove(struct i2c_client *client)
{
struct smb347_charger *smb = i2c_get_clientdata(client);
- if (!IS_ERR_OR_NULL(smb->dentry))
- debugfs_remove(smb->dentry);
-
if (client->irq) {
smb347_irq_disable(smb);
free_irq(client->irq, smb);
@@ -1255,8 +1117,10 @@ static int smb347_remove(struct i2c_client *client)
}
power_supply_unregister(&smb->battery);
- power_supply_unregister(&smb->usb);
- power_supply_unregister(&smb->mains);
+ if (smb->pdata->use_usb)
+ power_supply_unregister(&smb->usb);
+ if (smb->pdata->use_mains)
+ power_supply_unregister(&smb->mains);
return 0;
}
@@ -1275,17 +1139,7 @@ static struct i2c_driver smb347_driver = {
.id_table = smb347_id,
};
-static int __init smb347_init(void)
-{
- return i2c_add_driver(&smb347_driver);
-}
-module_init(smb347_init);
-
-static void __exit smb347_exit(void)
-{
- i2c_del_driver(&smb347_driver);
-}
-module_exit(smb347_exit);
+module_i2c_driver(smb347_driver);
MODULE_AUTHOR("Bruce E. Robertson <bruce.e.robertson@intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index 987332b71d8d..fc1ad9551182 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -565,7 +565,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
goto err_usb;
}
- irq = platform_get_irq_byname(pdev, "SYSLO");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
IRQF_TRIGGER_RISING, "System power low",
power);
@@ -575,7 +575,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
goto err_battery;
}
- irq = platform_get_irq_byname(pdev, "PWR SRC");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
IRQF_TRIGGER_RISING, "Power source",
power);
@@ -586,7 +586,9 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
}
for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
- irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
+ irq = wm831x_irq(wm831x,
+ platform_get_irq_byname(pdev,
+ wm831x_bat_irqs[i]));
ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
IRQF_TRIGGER_RISING,
wm831x_bat_irqs[i],
@@ -606,10 +608,10 @@ err_bat_irq:
irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
free_irq(irq, power);
}
- irq = platform_get_irq_byname(pdev, "PWR SRC");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
free_irq(irq, power);
err_syslo:
- irq = platform_get_irq_byname(pdev, "SYSLO");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
free_irq(irq, power);
err_battery:
if (power->have_battery)
@@ -626,17 +628,20 @@ err_kmalloc:
static __devexit int wm831x_power_remove(struct platform_device *pdev)
{
struct wm831x_power *wm831x_power = platform_get_drvdata(pdev);
+ struct wm831x *wm831x = wm831x_power->wm831x;
int irq, i;
for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
- irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
+ irq = wm831x_irq(wm831x,
+ platform_get_irq_byname(pdev,
+ wm831x_bat_irqs[i]));
free_irq(irq, wm831x_power);
}
- irq = platform_get_irq_byname(pdev, "PWR SRC");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
free_irq(irq, wm831x_power);
- irq = platform_get_irq_byname(pdev, "SYSLO");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
free_irq(irq, wm831x_power);
if (wm831x_power->have_battery)
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index bc8719238793..6194d35ebb97 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -22,6 +22,20 @@ config RAPIDIO_ENABLE_RX_TX_PORTS
ports for Input/Output direction to allow other traffic
than Maintenance transfers.
+config RAPIDIO_DMA_ENGINE
+ bool "DMA Engine support for RapidIO"
+ depends on RAPIDIO
+ select DMADEVICES
+ select DMA_ENGINE
+ help
+ Say Y here if you want to use DMA Engine frameork for RapidIO data
+ transfers to/from target RIO devices. RapidIO uses NREAD and
+ NWRITE (NWRITE_R, SWRITE) requests to transfer data between local
+ memory and memory on remote target device. You need a DMA controller
+ capable to perform data transfers to/from RapidIO.
+
+ If you are unsure about this, say Y here.
+
config RAPIDIO_DEBUG
bool "RapidIO subsystem debug messages"
depends on RAPIDIO
diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile
index 3b7b4e2dff7c..7b62860f34f8 100644
--- a/drivers/rapidio/devices/Makefile
+++ b/drivers/rapidio/devices/Makefile
@@ -3,3 +3,6 @@
#
obj-$(CONFIG_RAPIDIO_TSI721) += tsi721.o
+ifeq ($(CONFIG_RAPIDIO_DMA_ENGINE),y)
+obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_dma.o
+endif
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 30d2072f480b..722246cf20ab 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -108,6 +108,7 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
u16 destid, u8 hopcount, u32 offset, int len,
u32 *data, int do_wr)
{
+ void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
struct tsi721_dma_desc *bd_ptr;
u32 rd_count, swr_ptr, ch_stat;
int i, err = 0;
@@ -116,10 +117,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
return -EINVAL;
- bd_ptr = priv->bdma[TSI721_DMACH_MAINT].bd_base;
+ bd_ptr = priv->mdma.bd_base;
- rd_count = ioread32(
- priv->regs + TSI721_DMAC_DRDCNT(TSI721_DMACH_MAINT));
+ rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
/* Initialize DMA descriptor */
bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
@@ -134,19 +134,18 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
mb();
/* Start DMA operation */
- iowrite32(rd_count + 2,
- priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
- ioread32(priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+ iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT);
+ ioread32(regs + TSI721_DMAC_DWRCNT);
i = 0;
/* Wait until DMA transfer is finished */
- while ((ch_stat = ioread32(priv->regs +
- TSI721_DMAC_STS(TSI721_DMACH_MAINT))) & TSI721_DMAC_STS_RUN) {
+ while ((ch_stat = ioread32(regs + TSI721_DMAC_STS))
+ & TSI721_DMAC_STS_RUN) {
udelay(1);
if (++i >= 5000000) {
dev_dbg(&priv->pdev->dev,
"%s : DMA[%d] read timeout ch_status=%x\n",
- __func__, TSI721_DMACH_MAINT, ch_stat);
+ __func__, priv->mdma.ch_id, ch_stat);
if (!do_wr)
*data = 0xffffffff;
err = -EIO;
@@ -162,13 +161,10 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
__func__, ch_stat);
dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n",
do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset);
- iowrite32(TSI721_DMAC_INT_ALL,
- priv->regs + TSI721_DMAC_INT(TSI721_DMACH_MAINT));
- iowrite32(TSI721_DMAC_CTL_INIT,
- priv->regs + TSI721_DMAC_CTL(TSI721_DMACH_MAINT));
+ iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
+ iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
udelay(10);
- iowrite32(0, priv->regs +
- TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+ iowrite32(0, regs + TSI721_DMAC_DWRCNT);
udelay(1);
if (!do_wr)
*data = 0xffffffff;
@@ -184,8 +180,8 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
* NOTE: Skipping check and clear FIFO entries because we are waiting
* for transfer to be completed.
*/
- swr_ptr = ioread32(priv->regs + TSI721_DMAC_DSWP(TSI721_DMACH_MAINT));
- iowrite32(swr_ptr, priv->regs + TSI721_DMAC_DSRP(TSI721_DMACH_MAINT));
+ swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
+ iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
err_out:
return err;
@@ -541,6 +537,22 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
tsi721_pw_handler(mport);
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ if (dev_int & TSI721_DEV_INT_BDMA_CH) {
+ int ch;
+
+ if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) {
+ dev_dbg(&priv->pdev->dev,
+ "IRQ from DMA channel 0x%08x\n", dev_ch_int);
+
+ for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) {
+ if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch)))
+ continue;
+ tsi721_bdma_handler(&priv->bdma[ch]);
+ }
+ }
+ }
+#endif
return IRQ_HANDLED;
}
@@ -553,18 +565,26 @@ static void tsi721_interrupts_init(struct tsi721_device *priv)
priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
iowrite32(TSI721_SR_CHINT_IDBQRCV,
priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
- iowrite32(TSI721_INT_SR2PC_CHAN(IDB_QUEUE),
- priv->regs + TSI721_DEV_CHAN_INTE);
/* Enable SRIO MAC interrupts */
iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
priv->regs + TSI721_RIO_EM_DEV_INT_EN);
+ /* Enable interrupts from channels in use */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) |
+ (TSI721_INT_BDMA_CHAN_M &
+ ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT));
+#else
+ intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE);
+#endif
+ iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE);
+
if (priv->flags & TSI721_USING_MSIX)
intr = TSI721_DEV_INT_SRIO;
else
intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
- TSI721_DEV_INT_SMSG_CH;
+ TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
iowrite32(intr, priv->regs + TSI721_DEV_INTE);
ioread32(priv->regs + TSI721_DEV_INTE);
@@ -715,12 +735,29 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
TSI721_MSIX_OMSG_INT(i);
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ /*
+ * Initialize MSI-X entries for Block DMA Engine:
+ * this driver supports XXX DMA channels
+ * (one is reserved for SRIO maintenance transactions)
+ */
+ for (i = 0; i < TSI721_DMA_CHNUM; i++) {
+ entries[TSI721_VECT_DMA0_DONE + i].entry =
+ TSI721_MSIX_DMACH_DONE(i);
+ entries[TSI721_VECT_DMA0_INT + i].entry =
+ TSI721_MSIX_DMACH_INT(i);
+ }
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries));
if (err) {
if (err > 0)
dev_info(&priv->pdev->dev,
"Only %d MSI-X vectors available, "
"not using MSI-X\n", err);
+ else
+ dev_err(&priv->pdev->dev,
+ "Failed to enable MSI-X (err=%d)\n", err);
return err;
}
@@ -760,6 +797,22 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
i, pci_name(priv->pdev));
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ for (i = 0; i < TSI721_DMA_CHNUM; i++) {
+ priv->msix[TSI721_VECT_DMA0_DONE + i].vector =
+ entries[TSI721_VECT_DMA0_DONE + i].vector;
+ snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name,
+ IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s",
+ i, pci_name(priv->pdev));
+
+ priv->msix[TSI721_VECT_DMA0_INT + i].vector =
+ entries[TSI721_VECT_DMA0_INT + i].vector;
+ snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name,
+ IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s",
+ i, pci_name(priv->pdev));
+ }
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
return 0;
}
#endif /* CONFIG_PCI_MSI */
@@ -888,20 +941,34 @@ static void tsi721_doorbell_free(struct tsi721_device *priv)
priv->idb_base = NULL;
}
-static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
+/**
+ * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel.
+ * @priv: pointer to tsi721 private data
+ *
+ * Initialize BDMA channel allocated for RapidIO maintenance read/write
+ * request generation
+ * Returns %0 on success or %-ENOMEM on failure.
+ */
+static int tsi721_bdma_maint_init(struct tsi721_device *priv)
{
struct tsi721_dma_desc *bd_ptr;
u64 *sts_ptr;
dma_addr_t bd_phys, sts_phys;
int sts_size;
- int bd_num = priv->bdma[chnum].bd_num;
+ int bd_num = 2;
+ void __iomem *regs;
- dev_dbg(&priv->pdev->dev, "Init Block DMA Engine, CH%d\n", chnum);
+ dev_dbg(&priv->pdev->dev,
+ "Init Block DMA Engine for Maintenance requests, CH%d\n",
+ TSI721_DMACH_MAINT);
/*
* Initialize DMA channel for maintenance requests
*/
+ priv->mdma.ch_id = TSI721_DMACH_MAINT;
+ regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
+
/* Allocate space for DMA descriptors */
bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
bd_num * sizeof(struct tsi721_dma_desc),
@@ -909,8 +976,9 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
if (!bd_ptr)
return -ENOMEM;
- priv->bdma[chnum].bd_phys = bd_phys;
- priv->bdma[chnum].bd_base = bd_ptr;
+ priv->mdma.bd_num = bd_num;
+ priv->mdma.bd_phys = bd_phys;
+ priv->mdma.bd_base = bd_ptr;
dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
bd_ptr, (unsigned long long)bd_phys);
@@ -927,13 +995,13 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
dma_free_coherent(&priv->pdev->dev,
bd_num * sizeof(struct tsi721_dma_desc),
bd_ptr, bd_phys);
- priv->bdma[chnum].bd_base = NULL;
+ priv->mdma.bd_base = NULL;
return -ENOMEM;
}
- priv->bdma[chnum].sts_phys = sts_phys;
- priv->bdma[chnum].sts_base = sts_ptr;
- priv->bdma[chnum].sts_size = sts_size;
+ priv->mdma.sts_phys = sts_phys;
+ priv->mdma.sts_base = sts_ptr;
+ priv->mdma.sts_size = sts_size;
dev_dbg(&priv->pdev->dev,
"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
@@ -946,83 +1014,61 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
/* Setup DMA descriptor pointers */
- iowrite32(((u64)bd_phys >> 32),
- priv->regs + TSI721_DMAC_DPTRH(chnum));
+ iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH);
iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
- priv->regs + TSI721_DMAC_DPTRL(chnum));
+ regs + TSI721_DMAC_DPTRL);
/* Setup descriptor status FIFO */
- iowrite32(((u64)sts_phys >> 32),
- priv->regs + TSI721_DMAC_DSBH(chnum));
+ iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH);
iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
- priv->regs + TSI721_DMAC_DSBL(chnum));
+ regs + TSI721_DMAC_DSBL);
iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
- priv->regs + TSI721_DMAC_DSSZ(chnum));
+ regs + TSI721_DMAC_DSSZ);
/* Clear interrupt bits */
- iowrite32(TSI721_DMAC_INT_ALL,
- priv->regs + TSI721_DMAC_INT(chnum));
+ iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
- ioread32(priv->regs + TSI721_DMAC_INT(chnum));
+ ioread32(regs + TSI721_DMAC_INT);
/* Toggle DMA channel initialization */
- iowrite32(TSI721_DMAC_CTL_INIT, priv->regs + TSI721_DMAC_CTL(chnum));
- ioread32(priv->regs + TSI721_DMAC_CTL(chnum));
+ iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
+ ioread32(regs + TSI721_DMAC_CTL);
udelay(10);
return 0;
}
-static int tsi721_bdma_ch_free(struct tsi721_device *priv, int chnum)
+static int tsi721_bdma_maint_free(struct tsi721_device *priv)
{
u32 ch_stat;
+ struct tsi721_bdma_maint *mdma = &priv->mdma;
+ void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id);
- if (priv->bdma[chnum].bd_base == NULL)
+ if (mdma->bd_base == NULL)
return 0;
/* Check if DMA channel still running */
- ch_stat = ioread32(priv->regs + TSI721_DMAC_STS(chnum));
+ ch_stat = ioread32(regs + TSI721_DMAC_STS);
if (ch_stat & TSI721_DMAC_STS_RUN)
return -EFAULT;
/* Put DMA channel into init state */
- iowrite32(TSI721_DMAC_CTL_INIT,
- priv->regs + TSI721_DMAC_CTL(chnum));
+ iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
/* Free space allocated for DMA descriptors */
dma_free_coherent(&priv->pdev->dev,
- priv->bdma[chnum].bd_num * sizeof(struct tsi721_dma_desc),
- priv->bdma[chnum].bd_base, priv->bdma[chnum].bd_phys);
- priv->bdma[chnum].bd_base = NULL;
+ mdma->bd_num * sizeof(struct tsi721_dma_desc),
+ mdma->bd_base, mdma->bd_phys);
+ mdma->bd_base = NULL;
/* Free space allocated for status FIFO */
dma_free_coherent(&priv->pdev->dev,
- priv->bdma[chnum].sts_size * sizeof(struct tsi721_dma_sts),
- priv->bdma[chnum].sts_base, priv->bdma[chnum].sts_phys);
- priv->bdma[chnum].sts_base = NULL;
- return 0;
-}
-
-static int tsi721_bdma_init(struct tsi721_device *priv)
-{
- /* Initialize BDMA channel allocated for RapidIO maintenance read/write
- * request generation
- */
- priv->bdma[TSI721_DMACH_MAINT].bd_num = 2;
- if (tsi721_bdma_ch_init(priv, TSI721_DMACH_MAINT)) {
- dev_err(&priv->pdev->dev, "Unable to initialize maintenance DMA"
- " channel %d, aborting\n", TSI721_DMACH_MAINT);
- return -ENOMEM;
- }
-
+ mdma->sts_size * sizeof(struct tsi721_dma_sts),
+ mdma->sts_base, mdma->sts_phys);
+ mdma->sts_base = NULL;
return 0;
}
-static void tsi721_bdma_free(struct tsi721_device *priv)
-{
- tsi721_bdma_ch_free(priv, TSI721_DMACH_MAINT);
-}
-
/* Enable Inbound Messaging Interrupts */
static void
tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
@@ -2035,7 +2081,8 @@ static void tsi721_disable_ints(struct tsi721_device *priv)
/* Disable all BDMA Channel interrupts */
for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
- iowrite32(0, priv->regs + TSI721_DMAC_INTE(ch));
+ iowrite32(0,
+ priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE);
/* Disable all general BDMA interrupts */
iowrite32(0, priv->regs + TSI721_BDMA_INTE);
@@ -2104,6 +2151,7 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
mport->phy_type = RIO_PHY_SERIAL;
mport->priv = (void *)priv;
mport->phys_efptr = 0x100;
+ priv->mport = mport;
INIT_LIST_HEAD(&mport->dbells);
@@ -2129,17 +2177,21 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
if (!err) {
tsi721_interrupts_init(priv);
ops->pwenable = tsi721_pw_enable;
- } else
+ } else {
dev_err(&pdev->dev, "Unable to get assigned PCI IRQ "
"vector %02X err=0x%x\n", pdev->irq, err);
+ goto err_exit;
+ }
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ tsi721_register_dma(priv);
+#endif
/* Enable SRIO link */
iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
TSI721_DEVCTL_SRBOOT_CMPL,
priv->regs + TSI721_DEVCTL);
rio_register_mport(mport);
- priv->mport = mport;
if (mport->host_deviceid >= 0)
iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
@@ -2149,6 +2201,11 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
return 0;
+
+err_exit:
+ kfree(mport);
+ kfree(ops);
+ return err;
}
static int __devinit tsi721_probe(struct pci_dev *pdev,
@@ -2294,7 +2351,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
tsi721_init_pc2sr_mapping(priv);
tsi721_init_sr2pc_mapping(priv);
- if (tsi721_bdma_init(priv)) {
+ if (tsi721_bdma_maint_init(priv)) {
dev_err(&pdev->dev, "BDMA initialization failed, aborting\n");
err = -ENOMEM;
goto err_unmap_bars;
@@ -2319,7 +2376,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
err_free_consistent:
tsi721_doorbell_free(priv);
err_free_bdma:
- tsi721_bdma_free(priv);
+ tsi721_bdma_maint_free(priv);
err_unmap_bars:
if (priv->regs)
iounmap(priv->regs);
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 1c226b31af13..59de9d7be346 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -167,6 +167,8 @@
#define TSI721_DEV_INTE 0x29840
#define TSI721_DEV_INT 0x29844
#define TSI721_DEV_INTSET 0x29848
+#define TSI721_DEV_INT_BDMA_CH 0x00002000
+#define TSI721_DEV_INT_BDMA_NCH 0x00001000
#define TSI721_DEV_INT_SMSG_CH 0x00000800
#define TSI721_DEV_INT_SMSG_NCH 0x00000400
#define TSI721_DEV_INT_SR2PC_CH 0x00000200
@@ -181,6 +183,8 @@
#define TSI721_INT_IMSG_CHAN(x) (1 << (16 + (x)))
#define TSI721_INT_OMSG_CHAN_M 0x0000ff00
#define TSI721_INT_OMSG_CHAN(x) (1 << (8 + (x)))
+#define TSI721_INT_BDMA_CHAN_M 0x000000ff
+#define TSI721_INT_BDMA_CHAN(x) (1 << (x))
/*
* PC2SR block registers
@@ -235,14 +239,16 @@
* x = 0..7
*/
-#define TSI721_DMAC_DWRCNT(x) (0x51000 + (x) * 0x1000)
-#define TSI721_DMAC_DRDCNT(x) (0x51004 + (x) * 0x1000)
+#define TSI721_DMAC_BASE(x) (0x51000 + (x) * 0x1000)
-#define TSI721_DMAC_CTL(x) (0x51008 + (x) * 0x1000)
+#define TSI721_DMAC_DWRCNT 0x000
+#define TSI721_DMAC_DRDCNT 0x004
+
+#define TSI721_DMAC_CTL 0x008
#define TSI721_DMAC_CTL_SUSP 0x00000002
#define TSI721_DMAC_CTL_INIT 0x00000001
-#define TSI721_DMAC_INT(x) (0x5100c + (x) * 0x1000)
+#define TSI721_DMAC_INT 0x00c
#define TSI721_DMAC_INT_STFULL 0x00000010
#define TSI721_DMAC_INT_DONE 0x00000008
#define TSI721_DMAC_INT_SUSP 0x00000004
@@ -250,34 +256,33 @@
#define TSI721_DMAC_INT_IOFDONE 0x00000001
#define TSI721_DMAC_INT_ALL 0x0000001f
-#define TSI721_DMAC_INTSET(x) (0x51010 + (x) * 0x1000)
+#define TSI721_DMAC_INTSET 0x010
-#define TSI721_DMAC_STS(x) (0x51014 + (x) * 0x1000)
+#define TSI721_DMAC_STS 0x014
#define TSI721_DMAC_STS_ABORT 0x00400000
#define TSI721_DMAC_STS_RUN 0x00200000
#define TSI721_DMAC_STS_CS 0x001f0000
-#define TSI721_DMAC_INTE(x) (0x51018 + (x) * 0x1000)
+#define TSI721_DMAC_INTE 0x018
-#define TSI721_DMAC_DPTRL(x) (0x51024 + (x) * 0x1000)
+#define TSI721_DMAC_DPTRL 0x024
#define TSI721_DMAC_DPTRL_MASK 0xffffffe0
-#define TSI721_DMAC_DPTRH(x) (0x51028 + (x) * 0x1000)
+#define TSI721_DMAC_DPTRH 0x028
-#define TSI721_DMAC_DSBL(x) (0x5102c + (x) * 0x1000)
+#define TSI721_DMAC_DSBL 0x02c
#define TSI721_DMAC_DSBL_MASK 0xffffffc0
-#define TSI721_DMAC_DSBH(x) (0x51030 + (x) * 0x1000)
+#define TSI721_DMAC_DSBH 0x030
-#define TSI721_DMAC_DSSZ(x) (0x51034 + (x) * 0x1000)
+#define TSI721_DMAC_DSSZ 0x034
#define TSI721_DMAC_DSSZ_SIZE_M 0x0000000f
#define TSI721_DMAC_DSSZ_SIZE(size) (__fls(size) - 4)
-
-#define TSI721_DMAC_DSRP(x) (0x51038 + (x) * 0x1000)
+#define TSI721_DMAC_DSRP 0x038
#define TSI721_DMAC_DSRP_MASK 0x0007ffff
-#define TSI721_DMAC_DSWP(x) (0x5103c + (x) * 0x1000)
+#define TSI721_DMAC_DSWP 0x03c
#define TSI721_DMAC_DSWP_MASK 0x0007ffff
#define TSI721_BDMA_INTE 0x5f000
@@ -612,6 +617,8 @@ enum dma_rtype {
#define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */
#define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */
+#define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */
+
#define MSG_DMA_ENTRY_INX_TO_SIZE(x) ((0x10 << (x)) & 0xFFFF0)
enum tsi721_smsg_int_flag {
@@ -626,7 +633,48 @@ enum tsi721_smsg_int_flag {
/* Structures */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+struct tsi721_tx_desc {
+ struct dma_async_tx_descriptor txd;
+ struct tsi721_dma_desc *hw_desc;
+ u16 destid;
+ /* low 64-bits of 66-bit RIO address */
+ u64 rio_addr;
+ /* upper 2-bits of 66-bit RIO address */
+ u8 rio_addr_u;
+ bool interrupt;
+ struct list_head desc_node;
+ struct list_head tx_list;
+};
+
struct tsi721_bdma_chan {
+ int id;
+ void __iomem *regs;
+ int bd_num; /* number of buffer descriptors */
+ void *bd_base; /* start of DMA descriptors */
+ dma_addr_t bd_phys;
+ void *sts_base; /* start of DMA BD status FIFO */
+ dma_addr_t sts_phys;
+ int sts_size;
+ u32 sts_rdptr;
+ u32 wr_count;
+ u32 wr_count_next;
+
+ struct dma_chan dchan;
+ struct tsi721_tx_desc *tx_desc;
+ spinlock_t lock;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+ dma_cookie_t completed_cookie;
+ struct tasklet_struct tasklet;
+};
+
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
+struct tsi721_bdma_maint {
+ int ch_id; /* BDMA channel number */
int bd_num; /* number of buffer descriptors */
void *bd_base; /* start of DMA descriptors */
dma_addr_t bd_phys;
@@ -721,6 +769,24 @@ enum tsi721_msix_vect {
TSI721_VECT_IMB1_INT,
TSI721_VECT_IMB2_INT,
TSI721_VECT_IMB3_INT,
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ TSI721_VECT_DMA0_DONE,
+ TSI721_VECT_DMA1_DONE,
+ TSI721_VECT_DMA2_DONE,
+ TSI721_VECT_DMA3_DONE,
+ TSI721_VECT_DMA4_DONE,
+ TSI721_VECT_DMA5_DONE,
+ TSI721_VECT_DMA6_DONE,
+ TSI721_VECT_DMA7_DONE,
+ TSI721_VECT_DMA0_INT,
+ TSI721_VECT_DMA1_INT,
+ TSI721_VECT_DMA2_INT,
+ TSI721_VECT_DMA3_INT,
+ TSI721_VECT_DMA4_INT,
+ TSI721_VECT_DMA5_INT,
+ TSI721_VECT_DMA6_INT,
+ TSI721_VECT_DMA7_INT,
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
TSI721_VECT_MAX
};
@@ -754,7 +820,11 @@ struct tsi721_device {
u32 pw_discard_count;
/* BDMA Engine */
+ struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM];
+#endif
/* Inbound Messaging */
int imsg_init[TSI721_IMSG_CHNUM];
@@ -765,4 +835,9 @@ struct tsi721_device {
struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM];
};
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan);
+extern int __devinit tsi721_register_dma(struct tsi721_device *priv);
+#endif
+
#endif
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
new file mode 100644
index 000000000000..92e06a5c62ec
--- /dev/null
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -0,0 +1,823 @@
+/*
+ * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
+ *
+ * Copyright 2011 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/delay.h>
+
+#include "tsi721.h"
+
+static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct tsi721_bdma_chan, dchan);
+}
+
+static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
+{
+ return container_of(ddev, struct rio_mport, dma)->priv;
+}
+
+static inline
+struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
+{
+ return container_of(txd, struct tsi721_tx_desc, txd);
+}
+
+static inline
+struct tsi721_tx_desc *tsi721_dma_first_active(
+ struct tsi721_bdma_chan *bdma_chan)
+{
+ return list_first_entry(&bdma_chan->active_list,
+ struct tsi721_tx_desc, desc_node);
+}
+
+static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
+{
+ struct tsi721_dma_desc *bd_ptr;
+ struct device *dev = bdma_chan->dchan.device->dev;
+ u64 *sts_ptr;
+ dma_addr_t bd_phys;
+ dma_addr_t sts_phys;
+ int sts_size;
+ int bd_num = bdma_chan->bd_num;
+
+ dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
+
+ /* Allocate space for DMA descriptors */
+ bd_ptr = dma_zalloc_coherent(dev,
+ bd_num * sizeof(struct tsi721_dma_desc),
+ &bd_phys, GFP_KERNEL);
+ if (!bd_ptr)
+ return -ENOMEM;
+
+ bdma_chan->bd_phys = bd_phys;
+ bdma_chan->bd_base = bd_ptr;
+
+ dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n",
+ bd_ptr, (unsigned long long)bd_phys);
+
+ /* Allocate space for descriptor status FIFO */
+ sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
+ bd_num : TSI721_DMA_MINSTSSZ;
+ sts_size = roundup_pow_of_two(sts_size);
+ sts_ptr = dma_zalloc_coherent(dev,
+ sts_size * sizeof(struct tsi721_dma_sts),
+ &sts_phys, GFP_KERNEL);
+ if (!sts_ptr) {
+ /* Free space allocated for DMA descriptors */
+ dma_free_coherent(dev,
+ bd_num * sizeof(struct tsi721_dma_desc),
+ bd_ptr, bd_phys);
+ bdma_chan->bd_base = NULL;
+ return -ENOMEM;
+ }
+
+ bdma_chan->sts_phys = sts_phys;
+ bdma_chan->sts_base = sts_ptr;
+ bdma_chan->sts_size = sts_size;
+
+ dev_dbg(dev,
+ "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
+ sts_ptr, (unsigned long long)sts_phys, sts_size);
+
+ /* Initialize DMA descriptors ring */
+ bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
+ bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
+ TSI721_DMAC_DPTRL_MASK);
+ bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
+
+ /* Setup DMA descriptor pointers */
+ iowrite32(((u64)bd_phys >> 32),
+ bdma_chan->regs + TSI721_DMAC_DPTRH);
+ iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
+ bdma_chan->regs + TSI721_DMAC_DPTRL);
+
+ /* Setup descriptor status FIFO */
+ iowrite32(((u64)sts_phys >> 32),
+ bdma_chan->regs + TSI721_DMAC_DSBH);
+ iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
+ bdma_chan->regs + TSI721_DMAC_DSBL);
+ iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
+ bdma_chan->regs + TSI721_DMAC_DSSZ);
+
+ /* Clear interrupt bits */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INT);
+
+ ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+
+ /* Toggle DMA channel initialization */
+ iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
+ ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
+ bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
+ bdma_chan->sts_rdptr = 0;
+ udelay(10);
+
+ return 0;
+}
+
+static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
+{
+ u32 ch_stat;
+
+ if (bdma_chan->bd_base == NULL)
+ return 0;
+
+ /* Check if DMA channel still running */
+ ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+ if (ch_stat & TSI721_DMAC_STS_RUN)
+ return -EFAULT;
+
+ /* Put DMA channel into init state */
+ iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
+
+ /* Free space allocated for DMA descriptors */
+ dma_free_coherent(bdma_chan->dchan.device->dev,
+ bdma_chan->bd_num * sizeof(struct tsi721_dma_desc),
+ bdma_chan->bd_base, bdma_chan->bd_phys);
+ bdma_chan->bd_base = NULL;
+
+ /* Free space allocated for status FIFO */
+ dma_free_coherent(bdma_chan->dchan.device->dev,
+ bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
+ bdma_chan->sts_base, bdma_chan->sts_phys);
+ bdma_chan->sts_base = NULL;
+ return 0;
+}
+
+static void
+tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
+{
+ if (enable) {
+ /* Clear pending BDMA channel interrupts */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INT);
+ ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+ /* Enable BDMA channel interrupts */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INTE);
+ } else {
+ /* Disable BDMA channel interrupts */
+ iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+ /* Clear pending BDMA channel interrupts */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INT);
+ }
+
+}
+
+static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
+{
+ u32 sts;
+
+ sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+ return ((sts & TSI721_DMAC_STS_RUN) == 0);
+}
+
+void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
+{
+ /* Disable BDMA channel interrupts */
+ iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+
+ tasklet_schedule(&bdma_chan->tasklet);
+}
+
+#ifdef CONFIG_PCI_MSI
+/**
+ * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
+ *
+ * Handles BDMA channel interrupts signaled using MSI-X.
+ */
+static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
+{
+ struct tsi721_bdma_chan *bdma_chan = ptr;
+
+ tsi721_bdma_handler(bdma_chan);
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_PCI_MSI */
+
+/* Must be called with the spinlock held */
+static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
+{
+ if (!tsi721_dma_is_idle(bdma_chan)) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "BUG: Attempt to start non-idle channel\n");
+ return;
+ }
+
+ if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "BUG: Attempt to start DMA with no BDs ready\n");
+ return;
+ }
+
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "tx_chan: %p, chan: %d, regs: %p\n",
+ bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs);
+
+ iowrite32(bdma_chan->wr_count_next,
+ bdma_chan->regs + TSI721_DMAC_DWRCNT);
+ ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
+
+ bdma_chan->wr_count = bdma_chan->wr_count_next;
+}
+
+static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan,
+ struct tsi721_tx_desc *desc)
+{
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "Put desc: %p into free list\n", desc);
+
+ if (desc) {
+ spin_lock_bh(&bdma_chan->lock);
+ list_splice_init(&desc->tx_list, &bdma_chan->free_list);
+ list_add(&desc->desc_node, &bdma_chan->free_list);
+ bdma_chan->wr_count_next = bdma_chan->wr_count;
+ spin_unlock_bh(&bdma_chan->lock);
+ }
+}
+
+static
+struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
+{
+ struct tsi721_tx_desc *tx_desc, *_tx_desc;
+ struct tsi721_tx_desc *ret = NULL;
+ int i;
+
+ spin_lock_bh(&bdma_chan->lock);
+ list_for_each_entry_safe(tx_desc, _tx_desc,
+ &bdma_chan->free_list, desc_node) {
+ if (async_tx_test_ack(&tx_desc->txd)) {
+ list_del(&tx_desc->desc_node);
+ ret = tx_desc;
+ break;
+ }
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "desc %p not ACKed\n", tx_desc);
+ }
+
+ i = bdma_chan->wr_count_next % bdma_chan->bd_num;
+ if (i == bdma_chan->bd_num - 1) {
+ i = 0;
+ bdma_chan->wr_count_next++; /* skip link descriptor */
+ }
+
+ bdma_chan->wr_count_next++;
+ tx_desc->txd.phys = bdma_chan->bd_phys +
+ i * sizeof(struct tsi721_dma_desc);
+ tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
+
+ spin_unlock_bh(&bdma_chan->lock);
+
+ return ret;
+}
+
+static int
+tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan,
+ struct tsi721_tx_desc *desc, struct scatterlist *sg,
+ enum dma_rtype rtype, u32 sys_size)
+{
+ struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
+ u64 rio_addr;
+
+ if (sg_dma_len(sg) > TSI721_DMAD_BCOUNT1 + 1) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "SG element is too large\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "desc: 0x%llx, addr: 0x%llx len: 0x%x\n",
+ (u64)desc->txd.phys, (unsigned long long)sg_dma_address(sg),
+ sg_dma_len(sg));
+
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "bd_ptr = %p did=%d raddr=0x%llx\n",
+ bd_ptr, desc->destid, desc->rio_addr);
+
+ /* Initialize DMA descriptor */
+ bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
+ (rtype << 19) | desc->destid);
+ if (desc->interrupt)
+ bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
+ bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
+ (sys_size << 26) | sg_dma_len(sg));
+ rio_addr = (desc->rio_addr >> 2) |
+ ((u64)(desc->rio_addr_u & 0x3) << 62);
+ bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
+ bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
+ bd_ptr->t1.bufptr_lo = cpu_to_le32(
+ (u64)sg_dma_address(sg) & 0xffffffff);
+ bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
+ bd_ptr->t1.s_dist = 0;
+ bd_ptr->t1.s_size = 0;
+
+ return 0;
+}
+
+static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan,
+ struct tsi721_tx_desc *desc)
+{
+ struct dma_async_tx_descriptor *txd = &desc->txd;
+ dma_async_tx_callback callback = txd->callback;
+ void *param = txd->callback_param;
+
+ list_splice_init(&desc->tx_list, &bdma_chan->free_list);
+ list_move(&desc->desc_node, &bdma_chan->free_list);
+ bdma_chan->completed_cookie = txd->cookie;
+
+ if (callback)
+ callback(param);
+}
+
+static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan)
+{
+ struct tsi721_tx_desc *desc, *_d;
+ LIST_HEAD(list);
+
+ BUG_ON(!tsi721_dma_is_idle(bdma_chan));
+
+ if (!list_empty(&bdma_chan->queue))
+ tsi721_start_dma(bdma_chan);
+
+ list_splice_init(&bdma_chan->active_list, &list);
+ list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
+
+ list_for_each_entry_safe(desc, _d, &list, desc_node)
+ tsi721_dma_chain_complete(bdma_chan, desc);
+}
+
+static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
+{
+ u32 srd_ptr;
+ u64 *sts_ptr;
+ int i, j;
+
+ /* Check and clear descriptor status FIFO entries */
+ srd_ptr = bdma_chan->sts_rdptr;
+ sts_ptr = bdma_chan->sts_base;
+ j = srd_ptr * 8;
+ while (sts_ptr[j]) {
+ for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
+ sts_ptr[j] = 0;
+
+ ++srd_ptr;
+ srd_ptr %= bdma_chan->sts_size;
+ j = srd_ptr * 8;
+ }
+
+ iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
+ bdma_chan->sts_rdptr = srd_ptr;
+}
+
+static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan)
+{
+ if (list_empty(&bdma_chan->active_list) ||
+ list_is_singular(&bdma_chan->active_list)) {
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "%s: Active_list empty\n", __func__);
+ tsi721_dma_complete_all(bdma_chan);
+ } else {
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "%s: Active_list NOT empty\n", __func__);
+ tsi721_dma_chain_complete(bdma_chan,
+ tsi721_dma_first_active(bdma_chan));
+ tsi721_start_dma(bdma_chan);
+ }
+}
+
+static void tsi721_dma_tasklet(unsigned long data)
+{
+ struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
+ u32 dmac_int, dmac_sts;
+
+ dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+ dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n",
+ __func__, bdma_chan->id, dmac_int);
+ /* Clear channel interrupts */
+ iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
+
+ if (dmac_int & TSI721_DMAC_INT_ERR) {
+ dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+ dev_err(bdma_chan->dchan.device->dev,
+ "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
+ __func__, bdma_chan->id, dmac_sts);
+ }
+
+ if (dmac_int & TSI721_DMAC_INT_STFULL) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "%s: DMAC%d descriptor status FIFO is full\n",
+ __func__, bdma_chan->id);
+ }
+
+ if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
+ tsi721_clr_stat(bdma_chan);
+ spin_lock(&bdma_chan->lock);
+ tsi721_advance_work(bdma_chan);
+ spin_unlock(&bdma_chan->lock);
+ }
+
+ /* Re-Enable BDMA channel interrupts */
+ iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
+}
+
+static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
+ dma_cookie_t cookie;
+
+ spin_lock_bh(&bdma_chan->lock);
+
+ cookie = txd->chan->cookie;
+ if (++cookie < 0)
+ cookie = 1;
+ txd->chan->cookie = cookie;
+ txd->cookie = cookie;
+
+ if (list_empty(&bdma_chan->active_list)) {
+ list_add_tail(&desc->desc_node, &bdma_chan->active_list);
+ tsi721_start_dma(bdma_chan);
+ } else {
+ list_add_tail(&desc->desc_node, &bdma_chan->queue);
+ }
+
+ spin_unlock_bh(&bdma_chan->lock);
+ return cookie;
+}
+
+static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+#ifdef CONFIG_PCI_MSI
+ struct tsi721_device *priv = to_tsi721(dchan->device);
+#endif
+ struct tsi721_tx_desc *desc = NULL;
+ LIST_HEAD(tmp_list);
+ int i;
+ int rc;
+
+ if (bdma_chan->bd_base)
+ return bdma_chan->bd_num - 1;
+
+ /* Initialize BDMA channel */
+ if (tsi721_bdma_ch_init(bdma_chan)) {
+ dev_err(dchan->device->dev, "Unable to initialize data DMA"
+ " channel %d, aborting\n", bdma_chan->id);
+ return -ENOMEM;
+ }
+
+ /* Alocate matching number of logical descriptors */
+ desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc),
+ GFP_KERNEL);
+ if (!desc) {
+ dev_err(dchan->device->dev,
+ "Failed to allocate logical descriptors\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ bdma_chan->tx_desc = desc;
+
+ for (i = 0; i < bdma_chan->bd_num - 1; i++) {
+ dma_async_tx_descriptor_init(&desc[i].txd, dchan);
+ desc[i].txd.tx_submit = tsi721_tx_submit;
+ desc[i].txd.flags = DMA_CTRL_ACK;
+ INIT_LIST_HEAD(&desc[i].tx_list);
+ list_add_tail(&desc[i].desc_node, &tmp_list);
+ }
+
+ spin_lock_bh(&bdma_chan->lock);
+ list_splice(&tmp_list, &bdma_chan->free_list);
+ bdma_chan->completed_cookie = dchan->cookie = 1;
+ spin_unlock_bh(&bdma_chan->lock);
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ /* Request interrupt service if we are in MSI-X mode */
+ rc = request_irq(
+ priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector,
+ tsi721_bdma_msix, 0,
+ priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].irq_name,
+ (void *)bdma_chan);
+
+ if (rc) {
+ dev_dbg(dchan->device->dev,
+ "Unable to allocate MSI-X interrupt for "
+ "BDMA%d-DONE\n", bdma_chan->id);
+ goto err_out;
+ }
+
+ rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].vector,
+ tsi721_bdma_msix, 0,
+ priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].irq_name,
+ (void *)bdma_chan);
+
+ if (rc) {
+ dev_dbg(dchan->device->dev,
+ "Unable to allocate MSI-X interrupt for "
+ "BDMA%d-INT\n", bdma_chan->id);
+ free_irq(
+ priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector,
+ (void *)bdma_chan);
+ rc = -EIO;
+ goto err_out;
+ }
+ }
+#endif /* CONFIG_PCI_MSI */
+
+ tasklet_enable(&bdma_chan->tasklet);
+ tsi721_bdma_interrupt_enable(bdma_chan, 1);
+
+ return bdma_chan->bd_num - 1;
+
+err_out:
+ kfree(desc);
+ tsi721_bdma_ch_free(bdma_chan);
+ return rc;
+}
+
+static void tsi721_free_chan_resources(struct dma_chan *dchan)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+#ifdef CONFIG_PCI_MSI
+ struct tsi721_device *priv = to_tsi721(dchan->device);
+#endif
+ LIST_HEAD(list);
+
+ dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+ if (bdma_chan->bd_base == NULL)
+ return;
+
+ BUG_ON(!list_empty(&bdma_chan->active_list));
+ BUG_ON(!list_empty(&bdma_chan->queue));
+
+ tasklet_disable(&bdma_chan->tasklet);
+
+ spin_lock_bh(&bdma_chan->lock);
+ list_splice_init(&bdma_chan->free_list, &list);
+ spin_unlock_bh(&bdma_chan->lock);
+
+ tsi721_bdma_interrupt_enable(bdma_chan, 0);
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector, (void *)bdma_chan);
+ free_irq(priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].vector, (void *)bdma_chan);
+ }
+#endif /* CONFIG_PCI_MSI */
+
+ tsi721_bdma_ch_free(bdma_chan);
+ kfree(bdma_chan->tx_desc);
+}
+
+static
+enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_completed;
+ int ret;
+
+ spin_lock_bh(&bdma_chan->lock);
+ last_completed = bdma_chan->completed_cookie;
+ last_used = dchan->cookie;
+ spin_unlock_bh(&bdma_chan->lock);
+
+ ret = dma_async_is_complete(cookie, last_completed, last_used);
+
+ dma_set_tx_state(txstate, last_completed, last_used, 0);
+
+ dev_dbg(dchan->device->dev,
+ "%s: exit, ret: %d, last_completed: %d, last_used: %d\n",
+ __func__, ret, last_completed, last_used);
+
+ return ret;
+}
+
+static void tsi721_issue_pending(struct dma_chan *dchan)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+
+ dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+ if (tsi721_dma_is_idle(bdma_chan)) {
+ spin_lock_bh(&bdma_chan->lock);
+ tsi721_advance_work(bdma_chan);
+ spin_unlock_bh(&bdma_chan->lock);
+ } else
+ dev_dbg(dchan->device->dev,
+ "%s: DMA channel still busy\n", __func__);
+}
+
+static
+struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, unsigned long flags,
+ void *tinfo)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+ struct tsi721_tx_desc *desc = NULL;
+ struct tsi721_tx_desc *first = NULL;
+ struct scatterlist *sg;
+ struct rio_dma_ext *rext = tinfo;
+ u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */
+ unsigned int i;
+ u32 sys_size = dma_to_mport(dchan->device)->sys_size;
+ enum dma_rtype rtype;
+
+ if (!sgl || !sg_len) {
+ dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
+ return NULL;
+ }
+
+ if (dir == DMA_DEV_TO_MEM)
+ rtype = NREAD;
+ else if (dir == DMA_MEM_TO_DEV) {
+ switch (rext->wr_type) {
+ case RDW_ALL_NWRITE:
+ rtype = ALL_NWRITE;
+ break;
+ case RDW_ALL_NWRITE_R:
+ rtype = ALL_NWRITE_R;
+ break;
+ case RDW_LAST_NWRITE_R:
+ default:
+ rtype = LAST_NWRITE_R;
+ break;
+ }
+ } else {
+ dev_err(dchan->device->dev,
+ "%s: Unsupported DMA direction option\n", __func__);
+ return NULL;
+ }
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ int err;
+
+ dev_dbg(dchan->device->dev, "%s: sg #%d\n", __func__, i);
+ desc = tsi721_desc_get(bdma_chan);
+ if (!desc) {
+ dev_err(dchan->device->dev,
+ "Not enough descriptors available\n");
+ goto err_desc_get;
+ }
+
+ if (sg_is_last(sg))
+ desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
+ else
+ desc->interrupt = false;
+
+ desc->destid = rext->destid;
+ desc->rio_addr = rio_addr;
+ desc->rio_addr_u = 0;
+
+ err = tsi721_fill_desc(bdma_chan, desc, sg, rtype, sys_size);
+ if (err) {
+ dev_err(dchan->device->dev,
+ "Failed to build desc: %d\n", err);
+ goto err_desc_get;
+ }
+
+ rio_addr += sg_dma_len(sg);
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->desc_node, &first->tx_list);
+ }
+
+ first->txd.cookie = -EBUSY;
+ desc->txd.flags = flags;
+
+ return &first->txd;
+
+err_desc_get:
+ tsi721_desc_put(bdma_chan, first);
+ return NULL;
+}
+
+static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+ struct tsi721_tx_desc *desc, *_d;
+ LIST_HEAD(list);
+
+ dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ spin_lock_bh(&bdma_chan->lock);
+
+ /* make sure to stop the transfer */
+ iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL);
+
+ list_splice_init(&bdma_chan->active_list, &list);
+ list_splice_init(&bdma_chan->queue, &list);
+
+ list_for_each_entry_safe(desc, _d, &list, desc_node)
+ tsi721_dma_chain_complete(bdma_chan, desc);
+
+ spin_unlock_bh(&bdma_chan->lock);
+
+ return 0;
+}
+
+int __devinit tsi721_register_dma(struct tsi721_device *priv)
+{
+ int i;
+ int nr_channels = TSI721_DMA_MAXCH;
+ int err;
+ struct rio_mport *mport = priv->mport;
+
+ mport->dma.dev = &priv->pdev->dev;
+ mport->dma.chancnt = nr_channels;
+
+ INIT_LIST_HEAD(&mport->dma.channels);
+
+ for (i = 0; i < nr_channels; i++) {
+ struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
+
+ if (i == TSI721_DMACH_MAINT)
+ continue;
+
+ bdma_chan->bd_num = 64;
+ bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
+
+ bdma_chan->dchan.device = &mport->dma;
+ bdma_chan->dchan.cookie = 1;
+ bdma_chan->dchan.chan_id = i;
+ bdma_chan->id = i;
+
+ spin_lock_init(&bdma_chan->lock);
+
+ INIT_LIST_HEAD(&bdma_chan->active_list);
+ INIT_LIST_HEAD(&bdma_chan->queue);
+ INIT_LIST_HEAD(&bdma_chan->free_list);
+
+ tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
+ (unsigned long)bdma_chan);
+ tasklet_disable(&bdma_chan->tasklet);
+ list_add_tail(&bdma_chan->dchan.device_node,
+ &mport->dma.channels);
+ }
+
+ dma_cap_zero(mport->dma.cap_mask);
+ dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
+ dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
+
+ mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
+ mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
+ mport->dma.device_tx_status = tsi721_tx_status;
+ mport->dma.device_issue_pending = tsi721_issue_pending;
+ mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
+ mport->dma.device_control = tsi721_device_control;
+
+ err = dma_async_device_register(&mport->dma);
+ if (err)
+ dev_err(&priv->pdev->dev, "Failed to register DMA device\n");
+
+ return err;
+}
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 86c9a091a2ff..c40665a4fa33 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1121,6 +1121,87 @@ int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
return 0;
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+static bool rio_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct rio_dev *rdev = arg;
+
+ /* Check that DMA device belongs to the right MPORT */
+ return (rdev->net->hport ==
+ container_of(chan->device, struct rio_mport, dma));
+}
+
+/**
+ * rio_request_dma - request RapidIO capable DMA channel that supports
+ * specified target RapidIO device.
+ * @rdev: RIO device control structure
+ *
+ * Returns pointer to allocated DMA channel or NULL if failed.
+ */
+struct dma_chan *rio_request_dma(struct rio_dev *rdev)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *dchan;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dchan = dma_request_channel(mask, rio_chan_filter, rdev);
+
+ return dchan;
+}
+EXPORT_SYMBOL_GPL(rio_request_dma);
+
+/**
+ * rio_release_dma - release specified DMA channel
+ * @dchan: DMA channel to release
+ */
+void rio_release_dma(struct dma_chan *dchan)
+{
+ dma_release_channel(dchan);
+}
+EXPORT_SYMBOL_GPL(rio_release_dma);
+
+/**
+ * rio_dma_prep_slave_sg - RapidIO specific wrapper
+ * for device_prep_slave_sg callback defined by DMAENGINE.
+ * @rdev: RIO device control structure
+ * @dchan: DMA channel to configure
+ * @data: RIO specific data descriptor
+ * @direction: DMA data transfer direction (TO or FROM the device)
+ * @flags: dmaengine defined flags
+ *
+ * Initializes RapidIO capable DMA channel for the specified data transfer.
+ * Uses DMA channel private extension to pass information related to remote
+ * target RIO device.
+ * Returns pointer to DMA transaction descriptor or NULL if failed.
+ */
+struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
+ struct dma_chan *dchan, struct rio_dma_data *data,
+ enum dma_transfer_direction direction, unsigned long flags)
+{
+ struct dma_async_tx_descriptor *txd = NULL;
+ struct rio_dma_ext rio_ext;
+
+ if (dchan->device->device_prep_slave_sg == NULL) {
+ pr_err("%s: prep_rio_sg == NULL\n", __func__);
+ return NULL;
+ }
+
+ rio_ext.destid = rdev->destid;
+ rio_ext.rio_addr_u = data->rio_addr_u;
+ rio_ext.rio_addr = data->rio_addr;
+ rio_ext.wr_type = data->wr_type;
+
+ txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len,
+ direction, flags, &rio_ext);
+
+ return txd;
+}
+EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
+
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
static void rio_fixup_device(struct rio_dev *dev)
{
}
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 49b2112b0486..3660bace123c 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -47,7 +47,7 @@ static int anatop_set_voltage(struct regulator_dev *reg, int min_uV,
int max_uV, unsigned *selector)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
- u32 val, sel;
+ u32 val, sel, mask;
int uv;
uv = min_uV;
@@ -71,11 +71,10 @@ static int anatop_set_voltage(struct regulator_dev *reg, int min_uV,
val = anatop_reg->min_bit_val + sel;
*selector = sel;
dev_dbg(&reg->dev, "%s: calculated val %d\n", __func__, val);
- anatop_set_bits(anatop_reg->mfd,
- anatop_reg->control_reg,
- anatop_reg->vol_bit_shift,
- anatop_reg->vol_bit_width,
- val);
+ mask = ((1 << anatop_reg->vol_bit_width) - 1) <<
+ anatop_reg->vol_bit_shift;
+ val <<= anatop_reg->vol_bit_shift;
+ anatop_write_reg(anatop_reg->mfd, anatop_reg->control_reg, val, mask);
return 0;
}
@@ -88,10 +87,9 @@ static int anatop_get_voltage_sel(struct regulator_dev *reg)
if (!anatop_reg->control_reg)
return -ENOTSUPP;
- val = anatop_get_bits(anatop_reg->mfd,
- anatop_reg->control_reg,
- anatop_reg->vol_bit_shift,
- anatop_reg->vol_bit_width);
+ val = anatop_read_reg(anatop_reg->mfd, anatop_reg->control_reg);
+ val = (val & ((1 << anatop_reg->vol_bit_width) - 1)) >>
+ anatop_reg->vol_bit_shift;
return val - anatop_reg->min_bit_val;
}
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 4e01a423471b..6bf864b4bdf6 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -331,21 +331,16 @@ struct tps65910_reg {
static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg)
{
- u8 val;
+ unsigned int val;
int err;
- err = pmic->mfd->read(pmic->mfd, reg, 1, &val);
+ err = tps65910_reg_read(pmic->mfd, reg, &val);
if (err)
return err;
return val;
}
-static inline int tps65910_write(struct tps65910_reg *pmic, u8 reg, u8 val)
-{
- return pmic->mfd->write(pmic->mfd, reg, 1, &val);
-}
-
static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
u8 set_mask, u8 clear_mask)
{
@@ -362,7 +357,7 @@ static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
data &= ~clear_mask;
data |= set_mask;
- err = tps65910_write(pmic, reg, data);
+ err = tps65910_reg_write(pmic->mfd, reg, data);
if (err)
dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
@@ -371,7 +366,7 @@ out:
return err;
}
-static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg)
+static int tps65910_reg_read_locked(struct tps65910_reg *pmic, u8 reg)
{
int data;
@@ -385,13 +380,13 @@ static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg)
return data;
}
-static int tps65910_reg_write(struct tps65910_reg *pmic, u8 reg, u8 val)
+static int tps65910_reg_write_locked(struct tps65910_reg *pmic, u8 reg, u8 val)
{
int err;
mutex_lock(&pmic->mutex);
- err = tps65910_write(pmic, reg, val);
+ err = tps65910_reg_write(pmic->mfd, reg, val);
if (err < 0)
dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
@@ -490,9 +485,9 @@ static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
LDO_ST_MODE_BIT);
case REGULATOR_MODE_IDLE:
value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT;
- return tps65910_set_bits(mfd, reg, value);
+ return tps65910_reg_set_bits(mfd, reg, value);
case REGULATOR_MODE_STANDBY:
- return tps65910_clear_bits(mfd, reg, LDO_ST_ON_BIT);
+ return tps65910_reg_clear_bits(mfd, reg, LDO_ST_ON_BIT);
}
return -EINVAL;
@@ -507,7 +502,7 @@ static unsigned int tps65910_get_mode(struct regulator_dev *dev)
if (reg < 0)
return reg;
- value = tps65910_reg_read(pmic, reg);
+ value = tps65910_reg_read_locked(pmic, reg);
if (value < 0)
return value;
@@ -527,28 +522,28 @@ static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
switch (id) {
case TPS65910_REG_VDD1:
- opvsel = tps65910_reg_read(pmic, TPS65910_VDD1_OP);
- mult = tps65910_reg_read(pmic, TPS65910_VDD1);
+ opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_OP);
+ mult = tps65910_reg_read_locked(pmic, TPS65910_VDD1);
mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT;
- srvsel = tps65910_reg_read(pmic, TPS65910_VDD1_SR);
+ srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_SR);
sr = opvsel & VDD1_OP_CMD_MASK;
opvsel &= VDD1_OP_SEL_MASK;
srvsel &= VDD1_SR_SEL_MASK;
vselmax = 75;
break;
case TPS65910_REG_VDD2:
- opvsel = tps65910_reg_read(pmic, TPS65910_VDD2_OP);
- mult = tps65910_reg_read(pmic, TPS65910_VDD2);
+ opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_OP);
+ mult = tps65910_reg_read_locked(pmic, TPS65910_VDD2);
mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT;
- srvsel = tps65910_reg_read(pmic, TPS65910_VDD2_SR);
+ srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_SR);
sr = opvsel & VDD2_OP_CMD_MASK;
opvsel &= VDD2_OP_SEL_MASK;
srvsel &= VDD2_SR_SEL_MASK;
vselmax = 75;
break;
case TPS65911_REG_VDDCTRL:
- opvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_OP);
- srvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_SR);
+ opvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_OP);
+ srvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_SR);
sr = opvsel & VDDCTRL_OP_CMD_MASK;
opvsel &= VDDCTRL_OP_SEL_MASK;
srvsel &= VDDCTRL_SR_SEL_MASK;
@@ -588,7 +583,7 @@ static int tps65910_get_voltage_sel(struct regulator_dev *dev)
if (reg < 0)
return reg;
- value = tps65910_reg_read(pmic, reg);
+ value = tps65910_reg_read_locked(pmic, reg);
if (value < 0)
return value;
@@ -625,7 +620,7 @@ static int tps65911_get_voltage_sel(struct regulator_dev *dev)
reg = pmic->get_ctrl_reg(id);
- value = tps65910_reg_read(pmic, reg);
+ value = tps65910_reg_read_locked(pmic, reg);
switch (id) {
case TPS65911_REG_LDO1:
@@ -670,7 +665,7 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
tps65910_modify_bits(pmic, TPS65910_VDD1,
(dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
VDD1_VGAIN_SEL_MASK);
- tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
+ tps65910_reg_write_locked(pmic, TPS65910_VDD1_OP, vsel);
break;
case TPS65910_REG_VDD2:
dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
@@ -681,11 +676,11 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
tps65910_modify_bits(pmic, TPS65910_VDD2,
(dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
VDD1_VGAIN_SEL_MASK);
- tps65910_reg_write(pmic, TPS65910_VDD2_OP, vsel);
+ tps65910_reg_write_locked(pmic, TPS65910_VDD2_OP, vsel);
break;
case TPS65911_REG_VDDCTRL:
vsel = selector + 3;
- tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel);
+ tps65910_reg_write_locked(pmic, TPS65911_VDDCTRL_OP, vsel);
}
return 0;
@@ -936,10 +931,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
/* External EN1 control */
if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1)
- ret = tps65910_set_bits(mfd,
+ ret = tps65910_reg_set_bits(mfd,
TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
else
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
if (ret < 0) {
dev_err(mfd->dev,
@@ -949,10 +944,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
/* External EN2 control */
if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2)
- ret = tps65910_set_bits(mfd,
+ ret = tps65910_reg_set_bits(mfd,
TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
else
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
if (ret < 0) {
dev_err(mfd->dev,
@@ -964,10 +959,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
if ((tps65910_chip_id(mfd) == TPS65910) &&
(id >= TPS65910_REG_VDIG1)) {
if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3)
- ret = tps65910_set_bits(mfd,
+ ret = tps65910_reg_set_bits(mfd,
TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
else
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
if (ret < 0) {
dev_err(mfd->dev,
@@ -979,10 +974,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
/* Return if no external control is selected */
if (!(ext_sleep_config & EXT_SLEEP_CONTROL)) {
/* Clear all sleep controls */
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
if (!ret)
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
if (ret < 0)
dev_err(mfd->dev,
@@ -1001,32 +996,33 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
(tps65910_chip_id(mfd) == TPS65911))) {
int op_reg_add = pmic->get_ctrl_reg(id) + 1;
int sr_reg_add = pmic->get_ctrl_reg(id) + 2;
- int opvsel = tps65910_reg_read(pmic, op_reg_add);
- int srvsel = tps65910_reg_read(pmic, sr_reg_add);
+ int opvsel = tps65910_reg_read_locked(pmic, op_reg_add);
+ int srvsel = tps65910_reg_read_locked(pmic, sr_reg_add);
if (opvsel & VDD1_OP_CMD_MASK) {
u8 reg_val = srvsel & VDD1_OP_SEL_MASK;
- ret = tps65910_reg_write(pmic, op_reg_add, reg_val);
+ ret = tps65910_reg_write_locked(pmic, op_reg_add,
+ reg_val);
if (ret < 0) {
dev_err(mfd->dev,
"Error in configuring op register\n");
return ret;
}
}
- ret = tps65910_reg_write(pmic, sr_reg_add, 0);
+ ret = tps65910_reg_write_locked(pmic, sr_reg_add, 0);
if (ret < 0) {
dev_err(mfd->dev, "Error in settting sr register\n");
return ret;
}
}
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
if (!ret) {
if (ext_sleep_config & TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP)
- ret = tps65910_set_bits(mfd,
+ ret = tps65910_reg_set_bits(mfd,
TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
else
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
}
if (ret < 0)
@@ -1177,7 +1173,7 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmic);
/* Give control of all register to control port */
- tps65910_set_bits(pmic->mfd, TPS65910_DEVCTRL,
+ tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL,
DEVCTRL_SR_CTL_I2C_SEL_MASK);
switch(tps65910_chip_id(tps65910)) {
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index a885911bb5fc..099da11e989f 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -535,7 +535,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq_byname(pdev, "UV");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
@@ -544,7 +544,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
goto err_regulator;
}
- irq = platform_get_irq_byname(pdev, "HC");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "HC"));
ret = request_threaded_irq(irq, NULL, wm831x_dcdc_oc_irq,
IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
@@ -558,7 +558,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
return 0;
err_uv:
- free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")),
+ dcdc);
err_regulator:
regulator_unregister(dcdc->regulator);
err:
@@ -570,11 +571,14 @@ err:
static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
{
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
+ struct wm831x *wm831x = dcdc->wm831x;
platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq_byname(pdev, "HC"), dcdc);
- free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "HC")),
+ dcdc);
+ free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")),
+ dcdc);
regulator_unregister(dcdc->regulator);
if (dcdc->dvs_gpio)
gpio_free(dcdc->dvs_gpio);
@@ -726,7 +730,7 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq_byname(pdev, "UV");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
@@ -751,7 +755,8 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(wm831x_irq(dcdc->wm831x, platform_get_irq_byname(pdev, "UV")),
+ dcdc);
regulator_unregister(dcdc->regulator);
return 0;
@@ -859,7 +864,7 @@ static __devinit int wm831x_boostp_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq_byname(pdev, "UV");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
IRQF_TRIGGER_RISING, dcdc->name,
dcdc);
@@ -885,7 +890,8 @@ static __devexit int wm831x_boostp_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(wm831x_irq(dcdc->wm831x, platform_get_irq_byname(pdev, "UV")),
+ dcdc);
regulator_unregister(dcdc->regulator);
return 0;
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index b50ab778b098..0d207c297714 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -202,7 +202,7 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq(pdev, 0);
+ irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
ret = request_threaded_irq(irq, NULL, wm831x_isink_irq,
IRQF_TRIGGER_RISING, isink->name, isink);
if (ret != 0) {
@@ -227,7 +227,7 @@ static __devexit int wm831x_isink_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq(pdev, 0), isink);
+ free_irq(wm831x_irq(isink->wm831x, platform_get_irq(pdev, 0)), isink);
regulator_unregister(isink->regulator);
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index aa1f8b3fbe16..a9a28d8ac185 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -321,7 +321,7 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq_byname(pdev, "UV");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
IRQF_TRIGGER_RISING, ldo->name,
ldo);
@@ -347,7 +347,8 @@ static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
+ free_irq(wm831x_irq(ldo->wm831x,
+ platform_get_irq_byname(pdev, "UV")), ldo);
regulator_unregister(ldo->regulator);
return 0;
@@ -582,7 +583,7 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq_byname(pdev, "UV");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
IRQF_TRIGGER_RISING, ldo->name, ldo);
if (ret != 0) {
@@ -605,7 +606,8 @@ static __devexit int wm831x_aldo_remove(struct platform_device *pdev)
{
struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
- free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
+ free_irq(wm831x_irq(ldo->wm831x, platform_get_irq_byname(pdev, "UV")),
+ ldo);
regulator_unregister(ldo->regulator);
return 0;
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index d6f8adaa26ef..8ea7bccc7100 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -78,7 +78,7 @@ typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int avail);
* the recovery of the remote processor.
*/
static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
- unsigned long iova, int flags)
+ unsigned long iova, int flags, void *token)
{
dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
@@ -117,7 +117,7 @@ static int rproc_enable_iommu(struct rproc *rproc)
return -ENOMEM;
}
- iommu_set_fault_handler(domain, rproc_iommu_fault);
+ iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
ret = iommu_attach_device(domain, dev);
if (ret) {
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4161bfe462cd..08cbdb900a18 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -620,27 +620,6 @@ config RTC_DRV_MSM6242
This driver can also be built as a module. If so, the module
will be called rtc-msm6242.
-config RTC_DRV_IMXDI
- tristate "Freescale IMX DryIce Real Time Clock"
- depends on ARCH_MX25
- depends on RTC_CLASS
- help
- Support for Freescale IMX DryIce RTC
-
- This driver can also be built as a module, if so, the module
- will be called "rtc-imxdi".
-
-config RTC_MXC
- tristate "Freescale MXC Real Time Clock"
- depends on ARCH_MXC
- depends on RTC_CLASS
- help
- If you say yes here you get support for the Freescale MXC
- RTC module.
-
- This driver can also be built as a module, if so, the module
- will be called "rtc-mxc".
-
config RTC_DRV_BQ4802
tristate "TI BQ4802"
help
@@ -738,6 +717,16 @@ config RTC_DRV_DAVINCI
This driver can also be built as a module. If so, the module
will be called rtc-davinci.
+config RTC_DRV_IMXDI
+ tristate "Freescale IMX DryIce Real Time Clock"
+ depends on SOC_IMX25
+ depends on RTC_CLASS
+ help
+ Support for Freescale IMX DryIce RTC
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-imxdi".
+
config RTC_DRV_OMAP
tristate "TI OMAP1"
depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
@@ -1087,4 +1076,15 @@ config RTC_DRV_LOONGSON1
This driver can also be built as a module. If so, the module
will be called rtc-ls1x.
+config RTC_DRV_MXC
+ tristate "Freescale MXC Real Time Clock"
+ depends on ARCH_MXC
+ depends on RTC_CLASS
+ help
+ If you say yes here you get support for the Freescale MXC
+ RTC module.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-mxc".
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 727ae7786e6c..2973921c30d8 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -61,7 +61,7 @@ obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o
obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o
obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
-obj-$(CONFIG_RTC_MXC) += rtc-mxc.o
+obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX8925) += rtc-max8925.o
obj-$(CONFIG_RTC_DRV_MAX8998) += rtc-max8998.o
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index c293d0cdb104..836710ce750e 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -17,8 +17,7 @@
#include <linux/string.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
-
-
+#include <linux/rtc/ds1307.h>
/*
* We can't determine type by probing, but if we expect pre-Linux code
@@ -92,7 +91,8 @@ enum ds_type {
# define DS1337_BIT_A2I 0x02
# define DS1337_BIT_A1I 0x01
#define DS1339_REG_ALARM1_SECS 0x07
-#define DS1339_REG_TRICKLE 0x10
+
+#define DS13XX_TRICKLE_CHARGER_MAGIC 0xa0
#define RX8025_REG_CTRL1 0x0e
# define RX8025_BIT_2412 0x20
@@ -124,6 +124,7 @@ struct chip_desc {
unsigned alarm:1;
u16 nvram_offset;
u16 nvram_size;
+ u16 trickle_charger_reg;
};
static const struct chip_desc chips[last_ds_type] = {
@@ -140,6 +141,13 @@ static const struct chip_desc chips[last_ds_type] = {
},
[ds_1339] = {
.alarm = 1,
+ .trickle_charger_reg = 0x10,
+ },
+ [ds_1340] = {
+ .trickle_charger_reg = 0x08,
+ },
+ [ds_1388] = {
+ .trickle_charger_reg = 0x0a,
},
[ds_3231] = {
.alarm = 1,
@@ -619,6 +627,7 @@ static int __devinit ds1307_probe(struct i2c_client *client,
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int want_irq = false;
unsigned char *buf;
+ struct ds1307_platform_data *pdata = client->dev.platform_data;
static const int bbsqi_bitpos[] = {
[ds_1337] = 0,
[ds_1339] = DS1339_BIT_BBSQI,
@@ -637,7 +646,10 @@ static int __devinit ds1307_probe(struct i2c_client *client,
ds1307->client = client;
ds1307->type = id->driver_data;
- ds1307->offset = 0;
+
+ if (pdata && pdata->trickle_charger_setup && chip->trickle_charger_reg)
+ i2c_smbus_write_byte_data(client, chip->trickle_charger_reg,
+ DS13XX_TRICKLE_CHARGER_MAGIC | pdata->trickle_charger_setup);
buf = ds1307->regs;
if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 14a42a1edc66..9602278ff988 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -127,7 +127,7 @@ static const struct attribute_group ep93xx_rtc_sysfs_files = {
.attrs = ep93xx_rtc_attrs,
};
-static int __init ep93xx_rtc_probe(struct platform_device *pdev)
+static int __devinit ep93xx_rtc_probe(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc;
struct resource *res;
@@ -174,7 +174,7 @@ exit:
return err;
}
-static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
+static int __devexit ep93xx_rtc_remove(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
@@ -186,31 +186,19 @@ static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:ep93xx-rtc");
-
static struct platform_driver ep93xx_rtc_driver = {
.driver = {
.name = "ep93xx-rtc",
.owner = THIS_MODULE,
},
- .remove = __exit_p(ep93xx_rtc_remove),
+ .probe = ep93xx_rtc_probe,
+ .remove = __devexit_p(ep93xx_rtc_remove),
};
-static int __init ep93xx_rtc_init(void)
-{
- return platform_driver_probe(&ep93xx_rtc_driver, ep93xx_rtc_probe);
-}
-
-static void __exit ep93xx_rtc_exit(void)
-{
- platform_driver_unregister(&ep93xx_rtc_driver);
-}
+module_platform_driver(ep93xx_rtc_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("EP93XX RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-
-module_init(ep93xx_rtc_init);
-module_exit(ep93xx_rtc_exit);
+MODULE_ALIAS("platform:ep93xx-rtc");
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index d93a9608b1f0..891cd6c61d0a 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -405,7 +405,7 @@ static int dryice_rtc_probe(struct platform_device *pdev)
imxdi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(imxdi->clk))
return PTR_ERR(imxdi->clk);
- clk_enable(imxdi->clk);
+ clk_prepare_enable(imxdi->clk);
/*
* Initialize dryice hardware
@@ -470,7 +470,7 @@ static int dryice_rtc_probe(struct platform_device *pdev)
return 0;
err:
- clk_disable(imxdi->clk);
+ clk_disable_unprepare(imxdi->clk);
clk_put(imxdi->clk);
return rc;
@@ -487,7 +487,7 @@ static int __devexit dryice_rtc_remove(struct platform_device *pdev)
rtc_device_unregister(imxdi->rtc);
- clk_disable(imxdi->clk);
+ clk_disable_unprepare(imxdi->clk);
clk_put(imxdi->clk);
return 0;
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index 63c72189c64b..d5218553741f 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -19,6 +19,7 @@
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/of.h>
/*
* Clock and Power control register offsets
@@ -386,13 +387,22 @@ static const struct dev_pm_ops lpc32xx_rtc_pm_ops = {
#define LPC32XX_RTC_PM_OPS NULL
#endif
+#ifdef CONFIG_OF
+static const struct of_device_id lpc32xx_rtc_match[] = {
+ { .compatible = "nxp,lpc3220-rtc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_rtc_match);
+#endif
+
static struct platform_driver lpc32xx_rtc_driver = {
.probe = lpc32xx_rtc_probe,
.remove = __devexit_p(lpc32xx_rtc_remove),
.driver = {
.name = RTC_NAME,
.owner = THIS_MODULE,
- .pm = LPC32XX_RTC_PM_OPS
+ .pm = LPC32XX_RTC_PM_OPS,
+ .of_match_table = of_match_ptr(lpc32xx_rtc_match),
},
};
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
index 10f1c29436ec..efab3d48cb15 100644
--- a/drivers/rtc/rtc-m41t93.c
+++ b/drivers/rtc/rtc-m41t93.c
@@ -48,6 +48,7 @@ static inline int m41t93_set_reg(struct spi_device *spi, u8 addr, u8 data)
static int m41t93_set_time(struct device *dev, struct rtc_time *tm)
{
struct spi_device *spi = to_spi_device(dev);
+ int tmp;
u8 buf[9] = {0x80}; /* write cmd + 8 data bytes */
u8 * const data = &buf[1]; /* ptr to first data byte */
@@ -62,6 +63,30 @@ static int m41t93_set_time(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
+ tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
+ if (tmp < 0)
+ return tmp;
+
+ if (tmp & M41T93_FLAG_OF) {
+ dev_warn(&spi->dev, "OF bit is set, resetting.\n");
+ m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
+
+ tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
+ if (tmp < 0) {
+ return tmp;
+ } else if (tmp & M41T93_FLAG_OF) {
+ /* OF cannot be immediately reset: oscillator has to be
+ * restarted. */
+ u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
+
+ dev_warn(&spi->dev,
+ "OF bit is still set, kickstarting clock.\n");
+ m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
+ reset_osc &= ~M41T93_FLAG_ST;
+ m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
+ }
+ }
+
data[M41T93_REG_SSEC] = 0;
data[M41T93_REG_ST_SEC] = bin2bcd(tm->tm_sec);
data[M41T93_REG_MIN] = bin2bcd(tm->tm_min);
@@ -89,10 +114,7 @@ static int m41t93_get_time(struct device *dev, struct rtc_time *tm)
1. halt bit (HT) is set: the clock is running but update of readout
registers has been disabled due to power failure. This is normal
case after poweron. Time is valid after resetting HT bit.
- 2. oscillator fail bit (OF) is set. Oscillator has be stopped and
- time is invalid:
- a) OF can be immeditely reset.
- b) OF cannot be immediately reset: oscillator has to be restarted.
+ 2. oscillator fail bit (OF) is set: time is invalid.
*/
tmp = spi_w8r8(spi, M41T93_REG_ALM_HOUR_HT);
if (tmp < 0)
@@ -110,21 +132,7 @@ static int m41t93_get_time(struct device *dev, struct rtc_time *tm)
if (tmp & M41T93_FLAG_OF) {
ret = -EINVAL;
- dev_warn(&spi->dev, "OF bit is set, resetting.\n");
- m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
-
- tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
- if (tmp < 0)
- return tmp;
- else if (tmp & M41T93_FLAG_OF) {
- u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
-
- dev_warn(&spi->dev,
- "OF bit is still set, kickstarting clock.\n");
- m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
- reset_osc &= ~M41T93_FLAG_ST;
- m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
- }
+ dev_warn(&spi->dev, "OF bit is set, write time to restart.\n");
}
if (tmp & M41T93_FLAG_BL)
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index bc0677de1996..97a3284bb7c6 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -64,6 +64,7 @@ struct pcf8563 {
* 1970...2069.
*/
int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
+ int voltage_low; /* incicates if a low_voltage was detected */
};
/*
@@ -86,9 +87,11 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
return -EIO;
}
- if (buf[PCF8563_REG_SC] & PCF8563_SC_LV)
+ if (buf[PCF8563_REG_SC] & PCF8563_SC_LV) {
+ pcf8563->voltage_low = 1;
dev_info(&client->dev,
"low voltage detected, date/time is not reliable.\n");
+ }
dev_dbg(&client->dev,
"%s: raw data is st1=%02x, st2=%02x, sec=%02x, min=%02x, hr=%02x, "
@@ -173,6 +176,44 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
return 0;
}
+#ifdef CONFIG_RTC_INTF_DEV
+static int pcf8563_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct pcf8563 *pcf8563 = i2c_get_clientdata(to_i2c_client(dev));
+ struct rtc_time tm;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ if (pcf8563->voltage_low)
+ dev_info(dev, "low voltage detected, date/time is not reliable.\n");
+
+ if (copy_to_user((void __user *)arg, &pcf8563->voltage_low,
+ sizeof(int)))
+ return -EFAULT;
+ return 0;
+ case RTC_VL_CLR:
+ /*
+ * Clear the VL bit in the seconds register in case
+ * the time has not been set already (which would
+ * have cleared it). This does not really matter
+ * because of the cached voltage_low value but do it
+ * anyway for consistency.
+ */
+ if (pcf8563_get_datetime(to_i2c_client(dev), &tm))
+ pcf8563_set_datetime(to_i2c_client(dev), &tm);
+
+ /* Clear the cached value. */
+ pcf8563->voltage_low = 0;
+
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+#else
+#define pcf8563_rtc_ioctl NULL
+#endif
+
static int pcf8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
return pcf8563_get_datetime(to_i2c_client(dev), tm);
@@ -184,6 +225,7 @@ static int pcf8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
}
static const struct rtc_class_ops pcf8563_rtc_ops = {
+ .ioctl = pcf8563_rtc_ioctl,
.read_time = pcf8563_rtc_read_time,
.set_time = pcf8563_rtc_set_time,
};
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index f027c063fb20..cc0533994f6e 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -220,17 +220,9 @@ static irqreturn_t pl031_interrupt(int irq, void *dev_id)
unsigned long events = 0;
rtcmis = readl(ldata->base + RTC_MIS);
- if (rtcmis) {
- writel(rtcmis, ldata->base + RTC_ICR);
-
- if (rtcmis & RTC_BIT_AI)
- events |= (RTC_AF | RTC_IRQF);
-
- /* Timer interrupt is only available in ST variants */
- if ((rtcmis & RTC_BIT_PI) &&
- (ldata->hw_designer == AMBA_VENDOR_ST))
- events |= (RTC_PF | RTC_IRQF);
-
+ if (rtcmis & RTC_BIT_AI) {
+ writel(RTC_BIT_AI, ldata->base + RTC_ICR);
+ events |= (RTC_AF | RTC_IRQF);
rtc_update_irq(ldata->rtc, 1, events);
return IRQ_HANDLED;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 3f3a29752369..7e6af0b22f17 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -670,6 +670,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
#define s3c_rtc_resume NULL
#endif
+#ifdef CONFIG_OF
static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
[TYPE_S3C2410] = { TYPE_S3C2410 },
[TYPE_S3C2416] = { TYPE_S3C2416 },
@@ -677,7 +678,6 @@ static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
[TYPE_S3C64XX] = { TYPE_S3C64XX },
};
-#ifdef CONFIG_OF
static const struct of_device_id s3c_rtc_dt_match[] = {
{
.compatible = "samsung,s3c2410-rtc",
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index e38da0dc4187..1f76320e545b 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/slab.h>
@@ -519,6 +520,14 @@ static void spear_rtc_shutdown(struct platform_device *pdev)
clk_disable(config->clk);
}
+#ifdef CONFIG_OF
+static const struct of_device_id spear_rtc_id_table[] = {
+ { .compatible = "st,spear600-rtc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spear_rtc_id_table);
+#endif
+
static struct platform_driver spear_rtc_driver = {
.probe = spear_rtc_probe,
.remove = __devexit_p(spear_rtc_remove),
@@ -527,6 +536,7 @@ static struct platform_driver spear_rtc_driver = {
.shutdown = spear_rtc_shutdown,
.driver = {
.name = "rtc-spear",
+ .of_match_table = of_match_ptr(spear_rtc_id_table),
},
};
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 75259fe38602..c006025cecc8 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -309,7 +309,8 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- info = kzalloc(sizeof(struct tegra_rtc_info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(struct tegra_rtc_info),
+ GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -317,29 +318,18 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
if (!res) {
dev_err(&pdev->dev,
"Unable to allocate resources for device.\n");
- ret = -EBUSY;
- goto err_free_info;
+ return -EBUSY;
}
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev,
- "Unable to request mem region for device.\n");
- ret = -EBUSY;
- goto err_free_info;
+ info->rtc_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!info->rtc_base) {
+ dev_err(&pdev->dev, "Unable to request mem region and grab IOs for device.\n");
+ return -EBUSY;
}
info->tegra_rtc_irq = platform_get_irq(pdev, 0);
- if (info->tegra_rtc_irq <= 0) {
- ret = -EBUSY;
- goto err_release_mem_region;
- }
-
- info->rtc_base = ioremap_nocache(res->start, resource_size(res));
- if (!info->rtc_base) {
- dev_err(&pdev->dev, "Unable to grab IOs for device.\n");
- ret = -EBUSY;
- goto err_release_mem_region;
- }
+ if (info->tegra_rtc_irq <= 0)
+ return -EBUSY;
/* set context info. */
info->pdev = pdev;
@@ -362,11 +352,12 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Unable to register device (err=%d).\n",
ret);
- goto err_iounmap;
+ return ret;
}
- ret = request_irq(info->tegra_rtc_irq, tegra_rtc_irq_handler,
- IRQF_TRIGGER_HIGH, "rtc alarm", &pdev->dev);
+ ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq,
+ tegra_rtc_irq_handler, IRQF_TRIGGER_HIGH,
+ "rtc alarm", &pdev->dev);
if (ret) {
dev_err(&pdev->dev,
"Unable to request interrupt for device (err=%d).\n",
@@ -380,12 +371,6 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
err_dev_unreg:
rtc_device_unregister(info->rtc_dev);
-err_iounmap:
- iounmap(info->rtc_base);
-err_release_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_info:
- kfree(info);
return ret;
}
@@ -393,17 +378,8 @@ err_free_info:
static int __devexit tegra_rtc_remove(struct platform_device *pdev)
{
struct tegra_rtc_info *info = platform_get_drvdata(pdev);
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EBUSY;
- free_irq(info->tegra_rtc_irq, &pdev->dev);
rtc_device_unregister(info->rtc_dev);
- iounmap(info->rtc_base);
- release_mem_region(res->start, resource_size(res));
- kfree(info);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index 3b6e6a67e765..59c6245e0421 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -396,7 +396,7 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_rtc *wm831x_rtc;
- int alm_irq = platform_get_irq_byname(pdev, "ALM");
+ int alm_irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "ALM"));
int ret = 0;
wm831x_rtc = devm_kzalloc(&pdev->dev, sizeof(*wm831x_rtc), GFP_KERNEL);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 33a6743ddc55..c05da00583f0 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -10,8 +10,6 @@
#ifndef DASD_INT_H
#define DASD_INT_H
-#ifdef __KERNEL__
-
/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
@@ -791,6 +789,4 @@ static inline int dasd_eer_enabled(struct dasd_device *device)
#define dasd_eer_enabled(d) (0)
#endif /* CONFIG_DASD_ERR */
-#endif /* __KERNEL__ */
-
#endif /* DASD_H */
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 36506366158d..766cb7b19b40 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -17,6 +17,7 @@
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/memory.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <asm/chpid.h>
#include <asm/sclp.h>
@@ -38,7 +39,8 @@ struct read_info_sccb {
u64 facilities; /* 48-55 */
u8 _reserved2[84 - 56]; /* 56-83 */
u8 fac84; /* 84 */
- u8 _reserved3[91 - 85]; /* 85-90 */
+ u8 fac85; /* 85 */
+ u8 _reserved3[91 - 86]; /* 86-90 */
u8 flags; /* 91 */
u8 _reserved4[100 - 92]; /* 92-99 */
u32 rnsize2; /* 100-103 */
@@ -51,6 +53,7 @@ static int __initdata early_read_info_sccb_valid;
u64 sclp_facilities;
static u8 sclp_fac84;
+static u8 sclp_fac85;
static unsigned long long rzm;
static unsigned long long rnmax;
@@ -112,6 +115,7 @@ void __init sclp_facilities_detect(void)
sccb = &early_read_info_sccb;
sclp_facilities = sccb->facilities;
sclp_fac84 = sccb->fac84;
+ sclp_fac85 = sccb->fac85;
rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
rzm <<= 20;
@@ -127,6 +131,12 @@ unsigned long long sclp_get_rzm(void)
return rzm;
}
+u8 sclp_get_fac85(void)
+{
+ return sclp_fac85;
+}
+EXPORT_SYMBOL_GPL(sclp_get_fac85);
+
/*
* This function will be called after sclp_facilities_detect(), which gets
* called from early.c code. Therefore the sccb should have valid contents.
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 69e6c50d4cfb..50f7115990ff 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -211,7 +211,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
sccb.evbuf.event_qual = EQ_STORE_DATA;
sccb.evbuf.data_id = DI_FCP_DUMP;
sccb.evbuf.event_id = 4712;
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
sccb.evbuf.asa_size = ASA_SIZE_64;
#else
sccb.evbuf.asa_size = ASA_SIZE_32;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 01bb04cd9e75..2a096795b9aa 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -571,13 +571,12 @@ free_cmd:
static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
int iscsi_cmd, int size)
{
- cmd->va = pci_alloc_consistent(phba->ctrl.pdev, sizeof(size),
- &cmd->dma);
+ cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
if (!cmd->va) {
SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n");
return -ENOMEM;
}
- memset(cmd->va, 0, sizeof(size));
+ memset(cmd->va, 0, size);
cmd->size = size;
be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
return 0;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 8b6c6bf7837e..b83927440171 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -426,6 +426,23 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
vshost = vport->drv_port.im_port->shost;
fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn);
fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn);
+ fc_host_supported_classes(vshost) = FC_COS_CLASS3;
+
+ memset(fc_host_supported_fc4s(vshost), 0,
+ sizeof(fc_host_supported_fc4s(vshost)));
+
+ /* For FCP type 0x08 */
+ if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
+ fc_host_supported_fc4s(vshost)[2] = 1;
+
+ /* For fibre channel services type 0x20 */
+ fc_host_supported_fc4s(vshost)[7] = 1;
+
+ fc_host_supported_speeds(vshost) =
+ bfad_im_supported_speeds(&bfad->bfa);
+ fc_host_maxframe_size(vshost) =
+ bfa_fcport_get_maxfrsize(&bfad->bfa);
+
fc_vport->dd_data = vport;
vport->drv_port.im_port->fc_vport = fc_vport;
} else if (rc == BFA_STATUS_INVALID_WWN)
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 3153923f5b60..1ac09afe35ee 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -987,7 +987,7 @@ done:
return 0;
}
-static u32
+u32
bfad_im_supported_speeds(struct bfa_s *bfa)
{
struct bfa_ioc_attr_s *ioc_attr;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 0814367ef101..f6c1023e502a 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -37,6 +37,7 @@ int bfad_im_scsi_host_alloc(struct bfad_s *bfad,
struct bfad_im_port_s *im_port, struct device *dev);
void bfad_im_scsi_host_free(struct bfad_s *bfad,
struct bfad_im_port_s *im_port);
+u32 bfad_im_supported_speeds(struct bfa_s *bfa);
#define MAX_FCP_TARGET 1024
#define MAX_FCP_LUN 16384
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index a4953ef9e53a..0578fa0dc14b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -62,7 +62,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "1.0.10"
+#define BNX2FC_VERSION "1.0.11"
#define PFX "bnx2fc: "
@@ -228,13 +228,16 @@ struct bnx2fc_interface {
struct packet_type fip_packet_type;
struct workqueue_struct *timer_work_queue;
struct kref kref;
- struct fcoe_ctlr ctlr;
u8 vlan_enabled;
int vlan_id;
bool enabled;
};
-#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr)
+#define bnx2fc_from_ctlr(x) \
+ ((struct bnx2fc_interface *)((x) + 1))
+
+#define bnx2fc_to_ctlr(x) \
+ ((struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1))
struct bnx2fc_lport {
struct list_head list;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index ce0ce3e32f33..bdbbb13b8534 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -854,7 +854,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
struct fc_exch *exch = fc_seq_exch(seq);
struct fc_lport *lport = exch->lp;
u8 *mac;
- struct fc_frame_header *fh;
u8 op;
if (IS_ERR(fp))
@@ -862,13 +861,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
mac = fr_cb(fp)->granted_mac;
if (is_zero_ether_addr(mac)) {
- fh = fc_frame_header_get(fp);
- if (fh->fh_type != FC_TYPE_ELS) {
- printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
- "fh_type != FC_TYPE_ELS\n");
- fc_frame_free(fp);
- return;
- }
op = fc_frame_payload_op(fp);
if (lport->vport) {
if (op == ELS_LS_RJT) {
@@ -878,12 +870,10 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
return;
}
}
- if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
- fc_frame_free(fp);
- return;
- }
+ fcoe_ctlr_recv_flogi(fip, lport, fp);
}
- fip->update_mac(lport, mac);
+ if (!is_zero_ether_addr(mac))
+ fip->update_mac(lport, mac);
done:
fc_lport_flogi_resp(seq, fp, lport);
}
@@ -910,7 +900,7 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
{
struct fcoe_port *port = lport_priv(lport);
struct bnx2fc_interface *interface = port->priv;
- struct fcoe_ctlr *fip = &interface->ctlr;
+ struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
struct fc_frame_header *fh = fc_frame_header_get(fp);
switch (op) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index c1c6a92a0b98..f52f668fd247 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
-#define DRV_MODULE_RELDATE "Jan 22, 2011"
+#define DRV_MODULE_RELDATE "Apr 24, 2012"
static char version[] __devinitdata =
@@ -54,6 +54,7 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb;
static struct libfc_function_template bnx2fc_libfc_fcn_templ;
static struct scsi_host_template bnx2fc_shost_template;
static struct fc_function_template bnx2fc_transport_function;
+static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ;
static struct fc_function_template bnx2fc_vport_xport_function;
static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
static void __bnx2fc_destroy(struct bnx2fc_interface *interface);
@@ -88,6 +89,7 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport);
static void bnx2fc_stop(struct bnx2fc_interface *interface);
static int __init bnx2fc_mod_init(void);
static void __exit bnx2fc_mod_exit(void);
+static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
unsigned int bnx2fc_debug_level;
module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
@@ -118,6 +120,41 @@ static void bnx2fc_get_lesb(struct fc_lport *lport,
__fcoe_get_lesb(lport, fc_lesb, netdev);
}
+static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+ struct net_device *netdev = bnx2fc_netdev(fip->lp);
+ struct fcoe_fc_els_lesb *fcoe_lesb;
+ struct fc_els_lesb fc_lesb;
+
+ __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+ fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+ ctlr_dev->lesb.lesb_link_fail =
+ ntohl(fcoe_lesb->lesb_link_fail);
+ ctlr_dev->lesb.lesb_vlink_fail =
+ ntohl(fcoe_lesb->lesb_vlink_fail);
+ ctlr_dev->lesb.lesb_miss_fka =
+ ntohl(fcoe_lesb->lesb_miss_fka);
+ ctlr_dev->lesb.lesb_symb_err =
+ ntohl(fcoe_lesb->lesb_symb_err);
+ ctlr_dev->lesb.lesb_err_block =
+ ntohl(fcoe_lesb->lesb_err_block);
+ ctlr_dev->lesb.lesb_fcs_error =
+ ntohl(fcoe_lesb->lesb_fcs_error);
+}
+EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb);
+
+static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
+{
+ struct fcoe_ctlr_device *ctlr_dev =
+ fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr);
+
+ fcf_dev->vlan_id = fcoe->vlan_id;
+}
+
static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
{
struct fcoe_percpu_s *bg;
@@ -244,6 +281,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
struct sk_buff *skb;
struct fc_frame_header *fh;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
struct bnx2fc_hba *hba;
struct fcoe_port *port;
struct fcoe_hdr *hp;
@@ -256,6 +294,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
port = (struct fcoe_port *)lport_priv(lport);
interface = port->priv;
+ ctlr = bnx2fc_to_ctlr(interface);
hba = interface->hba;
fh = fc_frame_header_get(fp);
@@ -268,12 +307,12 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
}
if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
- if (!interface->ctlr.sel_fcf) {
+ if (!ctlr->sel_fcf) {
BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
kfree_skb(skb);
return -EINVAL;
}
- if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb))
+ if (fcoe_ctlr_els_send(ctlr, lport, skb))
return 0;
}
@@ -346,14 +385,14 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
/* fill up mac and fcoe headers */
eh = eth_hdr(skb);
eh->h_proto = htons(ETH_P_FCOE);
- if (interface->ctlr.map_dest)
+ if (ctlr->map_dest)
fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
else
/* insert GW address */
- memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN);
+ memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
- if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN))
- memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN);
+ if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
+ memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
else
memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
@@ -403,6 +442,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
{
struct fc_lport *lport;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
struct fc_frame_header *fh;
struct fcoe_rcv_info *fr;
struct fcoe_percpu_s *bg;
@@ -410,7 +450,8 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
interface = container_of(ptype, struct bnx2fc_interface,
fcoe_packet_type);
- lport = interface->ctlr.lp;
+ ctlr = bnx2fc_to_ctlr(interface);
+ lport = ctlr->lp;
if (unlikely(lport == NULL)) {
printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
@@ -758,11 +799,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
{
struct bnx2fc_hba *hba;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
struct fcoe_port *port;
u64 wwnn, wwpn;
port = lport_priv(lport);
interface = port->priv;
+ ctlr = bnx2fc_to_ctlr(interface);
hba = interface->hba;
/* require support for get_pauseparam ethtool op. */
@@ -781,13 +824,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
if (!lport->vport) {
if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
- wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
+ wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
1, 0);
BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
fc_set_wwnn(lport, wwnn);
if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
- wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
+ wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
2, 0);
BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
@@ -824,6 +867,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
struct fc_lport *lport;
struct fc_lport *vport;
struct bnx2fc_interface *interface, *tmp;
+ struct fcoe_ctlr *ctlr;
int wait_for_upload = 0;
u32 link_possible = 1;
@@ -874,7 +918,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
if (interface->hba != hba)
continue;
- lport = interface->ctlr.lp;
+ ctlr = bnx2fc_to_ctlr(interface);
+ lport = ctlr->lp;
BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
interface->netdev->name, event);
@@ -889,8 +934,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
* on a stale vlan
*/
if (interface->enabled)
- fcoe_ctlr_link_up(&interface->ctlr);
- } else if (fcoe_ctlr_link_down(&interface->ctlr)) {
+ fcoe_ctlr_link_up(ctlr);
+ } else if (fcoe_ctlr_link_down(ctlr)) {
mutex_lock(&lport->lp_mutex);
list_for_each_entry(vport, &lport->vports, list)
fc_host_port_type(vport->host) =
@@ -995,9 +1040,11 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
struct net_device *orig_dev)
{
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
interface = container_of(ptype, struct bnx2fc_interface,
fip_packet_type);
- fcoe_ctlr_recv(&interface->ctlr, skb);
+ ctlr = bnx2fc_to_ctlr(interface);
+ fcoe_ctlr_recv(ctlr, skb);
return 0;
}
@@ -1155,6 +1202,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
{
struct net_device *netdev = interface->netdev;
struct net_device *physdev = interface->hba->phys_dev;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct netdev_hw_addr *ha;
int sel_san_mac = 0;
@@ -1169,7 +1217,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
(is_valid_ether_addr(ha->addr))) {
- memcpy(interface->ctlr.ctl_src_addr, ha->addr,
+ memcpy(ctlr->ctl_src_addr, ha->addr,
ETH_ALEN);
sel_san_mac = 1;
BNX2FC_MISC_DBG("Found SAN MAC\n");
@@ -1224,19 +1272,23 @@ static void bnx2fc_release_transport(void)
static void bnx2fc_interface_release(struct kref *kref)
{
+ struct fcoe_ctlr_device *ctlr_dev;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
struct net_device *netdev;
interface = container_of(kref, struct bnx2fc_interface, kref);
BNX2FC_MISC_DBG("Interface is being released\n");
+ ctlr = bnx2fc_to_ctlr(interface);
+ ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
netdev = interface->netdev;
/* tear-down FIP controller */
if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
- fcoe_ctlr_destroy(&interface->ctlr);
+ fcoe_ctlr_destroy(ctlr);
- kfree(interface);
+ fcoe_ctlr_device_delete(ctlr_dev);
dev_put(netdev);
module_put(THIS_MODULE);
@@ -1329,33 +1381,40 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
struct net_device *netdev,
enum fip_state fip_mode)
{
+ struct fcoe_ctlr_device *ctlr_dev;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
+ int size;
int rc = 0;
- interface = kzalloc(sizeof(*interface), GFP_KERNEL);
- if (!interface) {
+ size = (sizeof(*interface) + sizeof(struct fcoe_ctlr));
+ ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ,
+ size);
+ if (!ctlr_dev) {
printk(KERN_ERR PFX "Unable to allocate interface structure\n");
return NULL;
}
+ ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ interface = fcoe_ctlr_priv(ctlr);
dev_hold(netdev);
kref_init(&interface->kref);
interface->hba = hba;
interface->netdev = netdev;
/* Initialize FIP */
- fcoe_ctlr_init(&interface->ctlr, fip_mode);
- interface->ctlr.send = bnx2fc_fip_send;
- interface->ctlr.update_mac = bnx2fc_update_src_mac;
- interface->ctlr.get_src_addr = bnx2fc_get_src_mac;
+ fcoe_ctlr_init(ctlr, fip_mode);
+ ctlr->send = bnx2fc_fip_send;
+ ctlr->update_mac = bnx2fc_update_src_mac;
+ ctlr->get_src_addr = bnx2fc_get_src_mac;
set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
rc = bnx2fc_interface_setup(interface);
if (!rc)
return interface;
- fcoe_ctlr_destroy(&interface->ctlr);
+ fcoe_ctlr_destroy(ctlr);
dev_put(netdev);
- kfree(interface);
+ fcoe_ctlr_device_delete(ctlr_dev);
return NULL;
}
@@ -1373,6 +1432,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
struct device *parent, int npiv)
{
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct fc_lport *lport, *n_port;
struct fcoe_port *port;
struct Scsi_Host *shost;
@@ -1383,7 +1443,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
if (!blport) {
- BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n");
+ BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n");
return NULL;
}
@@ -1479,7 +1539,8 @@ static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface)
static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface)
{
- struct fc_lport *lport = interface->ctlr.lp;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+ struct fc_lport *lport = ctlr->lp;
struct fcoe_port *port = lport_priv(lport);
struct bnx2fc_hba *hba = interface->hba;
@@ -1519,7 +1580,8 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
{
- struct fc_lport *lport = interface->ctlr.lp;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+ struct fc_lport *lport = ctlr->lp;
struct fcoe_port *port = lport_priv(lport);
bnx2fc_interface_cleanup(interface);
@@ -1543,13 +1605,15 @@ static int bnx2fc_destroy(struct net_device *netdev)
{
struct bnx2fc_interface *interface = NULL;
struct workqueue_struct *timer_work_queue;
+ struct fcoe_ctlr *ctlr;
int rc = 0;
rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
interface = bnx2fc_interface_lookup(netdev);
- if (!interface || !interface->ctlr.lp) {
+ ctlr = bnx2fc_to_ctlr(interface);
+ if (!interface || !ctlr->lp) {
rc = -ENODEV;
printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
goto netdev_err;
@@ -1646,6 +1710,7 @@ static void bnx2fc_ulp_start(void *handle)
{
struct bnx2fc_hba *hba = handle;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
struct fc_lport *lport;
mutex_lock(&bnx2fc_dev_lock);
@@ -1657,7 +1722,8 @@ static void bnx2fc_ulp_start(void *handle)
list_for_each_entry(interface, &if_list, list) {
if (interface->hba == hba) {
- lport = interface->ctlr.lp;
+ ctlr = bnx2fc_to_ctlr(interface);
+ lport = ctlr->lp;
/* Kick off Fabric discovery*/
printk(KERN_ERR PFX "ulp_init: start discovery\n");
lport->tt.frame_send = bnx2fc_xmit;
@@ -1677,13 +1743,14 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport)
static void bnx2fc_stop(struct bnx2fc_interface *interface)
{
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct fc_lport *lport;
struct fc_lport *vport;
if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
return;
- lport = interface->ctlr.lp;
+ lport = ctlr->lp;
bnx2fc_port_shutdown(lport);
mutex_lock(&lport->lp_mutex);
@@ -1692,7 +1759,7 @@ static void bnx2fc_stop(struct bnx2fc_interface *interface)
FC_PORTTYPE_UNKNOWN;
mutex_unlock(&lport->lp_mutex);
fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
- fcoe_ctlr_link_down(&interface->ctlr);
+ fcoe_ctlr_link_down(ctlr);
fcoe_clean_pending_queue(lport);
}
@@ -1804,6 +1871,7 @@ exit:
static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
{
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct fc_lport *lport;
int wait_cnt = 0;
@@ -1814,18 +1882,18 @@ static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
return;
}
- lport = interface->ctlr.lp;
+ lport = ctlr->lp;
BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
if (!bnx2fc_link_ok(lport) && interface->enabled) {
BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
- fcoe_ctlr_link_up(&interface->ctlr);
+ fcoe_ctlr_link_up(ctlr);
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
}
/* wait for the FCF to be selected before issuing FLOGI */
- while (!interface->ctlr.sel_fcf) {
+ while (!ctlr->sel_fcf) {
msleep(250);
/* give up after 3 secs */
if (++wait_cnt > 12)
@@ -1889,19 +1957,21 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
static int bnx2fc_disable(struct net_device *netdev)
{
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
int rc = 0;
rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
interface = bnx2fc_interface_lookup(netdev);
- if (!interface || !interface->ctlr.lp) {
+ ctlr = bnx2fc_to_ctlr(interface);
+ if (!interface || !ctlr->lp) {
rc = -ENODEV;
printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
} else {
interface->enabled = false;
- fcoe_ctlr_link_down(&interface->ctlr);
- fcoe_clean_pending_queue(interface->ctlr.lp);
+ fcoe_ctlr_link_down(ctlr);
+ fcoe_clean_pending_queue(ctlr->lp);
}
mutex_unlock(&bnx2fc_dev_lock);
@@ -1913,17 +1983,19 @@ static int bnx2fc_disable(struct net_device *netdev)
static int bnx2fc_enable(struct net_device *netdev)
{
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
int rc = 0;
rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
interface = bnx2fc_interface_lookup(netdev);
- if (!interface || !interface->ctlr.lp) {
+ ctlr = bnx2fc_to_ctlr(interface);
+ if (!interface || !ctlr->lp) {
rc = -ENODEV;
printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
- } else if (!bnx2fc_link_ok(interface->ctlr.lp)) {
- fcoe_ctlr_link_up(&interface->ctlr);
+ } else if (!bnx2fc_link_ok(ctlr->lp)) {
+ fcoe_ctlr_link_up(ctlr);
interface->enabled = true;
}
@@ -1944,6 +2016,7 @@ static int bnx2fc_enable(struct net_device *netdev)
*/
static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
{
+ struct fcoe_ctlr *ctlr;
struct bnx2fc_interface *interface;
struct bnx2fc_hba *hba;
struct net_device *phys_dev;
@@ -2010,6 +2083,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
goto ifput_err;
}
+ ctlr = bnx2fc_to_ctlr(interface);
interface->vlan_id = vlan_id;
interface->vlan_enabled = 1;
@@ -2035,10 +2109,10 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
lport->boot_time = jiffies;
/* Make this master N_port */
- interface->ctlr.lp = lport;
+ ctlr->lp = lport;
if (!bnx2fc_link_ok(lport)) {
- fcoe_ctlr_link_up(&interface->ctlr);
+ fcoe_ctlr_link_up(ctlr);
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
}
@@ -2439,6 +2513,19 @@ static void __exit bnx2fc_mod_exit(void)
module_init(bnx2fc_mod_init);
module_exit(bnx2fc_mod_exit);
+static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
+ .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+ .get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb,
+ .get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb,
+ .get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb,
+ .get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb,
+ .get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb,
+ .get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb,
+
+ .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
+ .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
+};
+
static struct fc_function_template bnx2fc_transport_function = {
.show_host_node_name = 1,
.show_host_port_name = 1,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index afd570962b8c..2ca6bfe4ce5e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -167,6 +167,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
{
struct fc_lport *lport = port->lport;
struct bnx2fc_interface *interface = port->priv;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct bnx2fc_hba *hba = interface->hba;
struct kwqe *kwqe_arr[4];
struct fcoe_kwqe_conn_offload1 ofld_req1;
@@ -314,13 +315,13 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
- ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
+ ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
/* fcf mac */
- ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
- ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
- ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
- ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
- ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
+ ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
+ ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+ ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+ ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+ ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -351,6 +352,7 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
{
struct kwqe *kwqe_arr[2];
struct bnx2fc_interface *interface = port->priv;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct bnx2fc_hba *hba = interface->hba;
struct fcoe_kwqe_conn_enable_disable enbl_req;
struct fc_lport *lport = port->lport;
@@ -374,12 +376,12 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
- enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
- enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
- enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
- enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
- enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
- enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
+ enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
+ enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
+ enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+ enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+ enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+ enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
port_id = fc_host_port_id(lport->host);
if (port_id != tgt->sid) {
@@ -419,6 +421,7 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
struct bnx2fc_interface *interface = port->priv;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct bnx2fc_hba *hba = interface->hba;
struct fcoe_kwqe_conn_enable_disable disable_req;
struct kwqe *kwqe_arr[2];
@@ -440,12 +443,12 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
- disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
- disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
- disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
- disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
- disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
- disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
+ disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
+ disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
+ disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+ disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+ disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+ disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
port_id = tgt->sid;
disable_req.s_id[0] = (port_id & 0x000000FF);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index e897ce975bb8..4f7453b9e41e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -810,8 +810,22 @@ retry_tmf:
spin_lock_bh(&tgt->tgt_lock);
io_req->wait_for_comp = 0;
- if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags)))
+ if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
+ if (io_req->on_tmf_queue) {
+ list_del_init(&io_req->link);
+ io_req->on_tmf_queue = 0;
+ }
+ io_req->wait_for_comp = 1;
+ bnx2fc_initiate_cleanup(io_req);
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = wait_for_completion_timeout(&io_req->tm_done,
+ BNX2FC_FW_TIMEOUT);
+ spin_lock_bh(&tgt->tgt_lock);
+ io_req->wait_for_comp = 0;
+ if (!rc)
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
spin_unlock_bh(&tgt->tgt_lock);
@@ -1089,6 +1103,48 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
}
+int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct fc_rport_priv *rdata = tgt->rdata;
+ int logo_issued;
+ int rc = SUCCESS;
+ int wait_cnt = 0;
+
+ BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
+ tgt->flags);
+ logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ io_req->wait_for_comp = 1;
+ bnx2fc_initiate_cleanup(io_req);
+
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ wait_for_completion(&io_req->tm_done);
+
+ io_req->wait_for_comp = 0;
+ /*
+ * release the reference taken in eh_abort to allow the
+ * target to re-login after flushing IOs
+ */
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+
+ if (!logo_issued) {
+ clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ do {
+ msleep(BNX2FC_RELOGIN_WAIT_TIME);
+ if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) {
+ rc = FAILED;
+ break;
+ }
+ } while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags));
+ }
+ spin_lock_bh(&tgt->tgt_lock);
+ return rc;
+}
/**
* bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
* SCSI command
@@ -1103,10 +1159,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
struct fc_rport_libfc_priv *rp = rport->dd_data;
struct bnx2fc_cmd *io_req;
struct fc_lport *lport;
- struct fc_rport_priv *rdata;
struct bnx2fc_rport *tgt;
- int logo_issued;
- int wait_cnt = 0;
int rc = FAILED;
@@ -1183,58 +1236,31 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
list_add_tail(&io_req->link, &tgt->io_retire_queue);
init_completion(&io_req->tm_done);
- io_req->wait_for_comp = 1;
- if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
- /* Cancel the current timer running on this io_req */
- if (cancel_delayed_work(&io_req->timeout_work))
- kref_put(&io_req->refcount,
- bnx2fc_cmd_release); /* drop timer hold */
- set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
- rc = bnx2fc_initiate_abts(io_req);
- } else {
+ if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
"already in abts processing\n", io_req->xid);
if (cancel_delayed_work(&io_req->timeout_work))
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
- bnx2fc_initiate_cleanup(io_req);
+ rc = bnx2fc_expl_logo(lport, io_req);
+ goto out;
+ }
+ /* Cancel the current timer running on this io_req */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+ set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
+ io_req->wait_for_comp = 1;
+ rc = bnx2fc_initiate_abts(io_req);
+ if (rc == FAILED) {
+ bnx2fc_initiate_cleanup(io_req);
spin_unlock_bh(&tgt->tgt_lock);
-
wait_for_completion(&io_req->tm_done);
-
spin_lock_bh(&tgt->tgt_lock);
io_req->wait_for_comp = 0;
- rdata = io_req->tgt->rdata;
- logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
- &tgt->flags);
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- spin_unlock_bh(&tgt->tgt_lock);
-
- if (!logo_issued) {
- BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
- tgt->flags);
- mutex_lock(&lport->disc.disc_mutex);
- lport->tt.rport_logoff(rdata);
- mutex_unlock(&lport->disc.disc_mutex);
- do {
- msleep(BNX2FC_RELOGIN_WAIT_TIME);
- /*
- * If session not recovered, let SCSI-ml
- * escalate error recovery.
- */
- if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT)
- return FAILED;
- } while (!test_bit(BNX2FC_FLAG_SESSION_READY,
- &tgt->flags));
- }
- return SUCCESS;
- }
- if (rc == FAILED) {
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- spin_unlock_bh(&tgt->tgt_lock);
- return rc;
+ goto done;
}
spin_unlock_bh(&tgt->tgt_lock);
@@ -1247,7 +1273,8 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
/* Let the scsi-ml try to recover this command */
printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
io_req->xid);
- rc = FAILED;
+ rc = bnx2fc_expl_logo(lport, io_req);
+ goto out;
} else {
/*
* We come here even when there was a race condition
@@ -1259,9 +1286,10 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
bnx2fc_scsi_done(io_req, DID_ABORT);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
}
-
+done:
/* release the reference taken in eh_abort */
kref_put(&io_req->refcount, bnx2fc_cmd_release);
+out:
spin_unlock_bh(&tgt->tgt_lock);
return rc;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index c1800b531270..082a25c3117e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -185,6 +185,16 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
BUG_ON(rc);
}
+ list_for_each_safe(list, tmp, &tgt->active_tm_queue) {
+ i++;
+ io_req = (struct bnx2fc_cmd *)list;
+ list_del_init(&io_req->link);
+ io_req->on_tmf_queue = 0;
+ BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
+ if (io_req->wait_for_comp)
+ complete(&io_req->tm_done);
+ }
+
list_for_each_safe(list, tmp, &tgt->els_queue) {
i++;
io_req = (struct bnx2fc_cmd *)list;
@@ -213,8 +223,17 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
- if (cancel_delayed_work(&io_req->timeout_work))
+ if (cancel_delayed_work(&io_req->timeout_work)) {
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags)) {
+ /* Handle eh_abort timeout */
+ BNX2FC_IO_DBG(io_req, "eh_abort for IO "
+ "in retire_q\n");
+ if (io_req->wait_for_comp)
+ complete(&io_req->tm_done);
+ }
kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
}
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
index f6d37d0271f7..aed0f5db3668 100644
--- a/drivers/scsi/fcoe/Makefile
+++ b/drivers/scsi/fcoe/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_FCOE) += fcoe.o
obj-$(CONFIG_LIBFCOE) += libfcoe.o
-libfcoe-objs := fcoe_ctlr.o fcoe_transport.o
+libfcoe-objs := fcoe_ctlr.o fcoe_transport.o fcoe_sysfs.o
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 76e3d0b5bfa6..fe30b1b65e1d 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -41,6 +41,7 @@
#include <scsi/fc/fc_encaps.h>
#include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
@@ -150,6 +151,21 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
+static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
+static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
+
+static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
+ .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+ .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
+
+ .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
+ .get_fcoe_fcf_vlan_id = fcoe_fcf_get_vlan_id,
+};
static struct libfc_function_template fcoe_libfc_fcn_templ = {
.frame_send = fcoe_xmit,
@@ -282,7 +298,7 @@ static struct scsi_host_template fcoe_shost_template = {
static int fcoe_interface_setup(struct fcoe_interface *fcoe,
struct net_device *netdev)
{
- struct fcoe_ctlr *fip = &fcoe->ctlr;
+ struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
struct netdev_hw_addr *ha;
struct net_device *real_dev;
u8 flogi_maddr[ETH_ALEN];
@@ -366,7 +382,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
enum fip_state fip_mode)
{
+ struct fcoe_ctlr_device *ctlr_dev;
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
+ int size;
int err;
if (!try_module_get(THIS_MODULE)) {
@@ -376,27 +395,32 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
goto out;
}
- fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
- if (!fcoe) {
- FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
+ size = sizeof(struct fcoe_ctlr) + sizeof(struct fcoe_interface);
+ ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &fcoe_sysfs_templ,
+ size);
+ if (!ctlr_dev) {
+ FCOE_DBG("Failed to add fcoe_ctlr_device\n");
fcoe = ERR_PTR(-ENOMEM);
goto out_putmod;
}
+ ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ fcoe = fcoe_ctlr_priv(ctlr);
+
dev_hold(netdev);
/*
* Initialize FIP.
*/
- fcoe_ctlr_init(&fcoe->ctlr, fip_mode);
- fcoe->ctlr.send = fcoe_fip_send;
- fcoe->ctlr.update_mac = fcoe_update_src_mac;
- fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
+ fcoe_ctlr_init(ctlr, fip_mode);
+ ctlr->send = fcoe_fip_send;
+ ctlr->update_mac = fcoe_update_src_mac;
+ ctlr->get_src_addr = fcoe_get_src_mac;
err = fcoe_interface_setup(fcoe, netdev);
if (err) {
- fcoe_ctlr_destroy(&fcoe->ctlr);
- kfree(fcoe);
+ fcoe_ctlr_destroy(ctlr);
+ fcoe_ctlr_device_delete(ctlr_dev);
dev_put(netdev);
fcoe = ERR_PTR(err);
goto out_putmod;
@@ -419,7 +443,7 @@ out:
static void fcoe_interface_remove(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
- struct fcoe_ctlr *fip = &fcoe->ctlr;
+ struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
u8 flogi_maddr[ETH_ALEN];
const struct net_device_ops *ops;
@@ -462,7 +486,8 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
- struct fcoe_ctlr *fip = &fcoe->ctlr;
+ struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
+ struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
rtnl_lock();
if (!fcoe->removed)
@@ -472,8 +497,8 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
/* Release the self-reference taken during fcoe_interface_create() */
/* tear-down the FCoE controller */
fcoe_ctlr_destroy(fip);
- scsi_host_put(fcoe->ctlr.lp->host);
- kfree(fcoe);
+ scsi_host_put(fip->lp->host);
+ fcoe_ctlr_device_delete(ctlr_dev);
dev_put(netdev);
module_put(THIS_MODULE);
}
@@ -493,9 +518,11 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
struct net_device *orig_dev)
{
struct fcoe_interface *fcoe;
+ struct fcoe_ctlr *ctlr;
fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
- fcoe_ctlr_recv(&fcoe->ctlr, skb);
+ ctlr = fcoe_to_ctlr(fcoe);
+ fcoe_ctlr_recv(ctlr, skb);
return 0;
}
@@ -645,11 +672,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
u32 mfs;
u64 wwnn, wwpn;
struct fcoe_interface *fcoe;
+ struct fcoe_ctlr *ctlr;
struct fcoe_port *port;
/* Setup lport private data to point to fcoe softc */
port = lport_priv(lport);
fcoe = port->priv;
+ ctlr = fcoe_to_ctlr(fcoe);
/*
* Determine max frame size based on underlying device and optional
@@ -676,10 +705,10 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
if (!lport->vport) {
if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
- wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
+ wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0);
fc_set_wwnn(lport, wwnn);
if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
- wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
+ wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
2, 0);
fc_set_wwpn(lport, wwpn);
}
@@ -1056,6 +1085,7 @@ static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
struct device *parent, int npiv)
{
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
struct net_device *netdev = fcoe->netdev;
struct fc_lport *lport, *n_port;
struct fcoe_port *port;
@@ -1119,7 +1149,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
}
/* Initialize the library */
- rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1);
+ rc = fcoe_libfc_config(lport, ctlr, &fcoe_libfc_fcn_templ, 1);
if (rc) {
FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
"interface\n");
@@ -1386,6 +1416,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
{
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_frame_header *fh;
struct fcoe_percpu_s *fps;
@@ -1393,7 +1424,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
unsigned int cpu;
fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
- lport = fcoe->ctlr.lp;
+ ctlr = fcoe_to_ctlr(fcoe);
+ lport = ctlr->lp;
if (unlikely(!lport)) {
FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
goto err2;
@@ -1409,8 +1441,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
eh = eth_hdr(skb);
- if (is_fip_mode(&fcoe->ctlr) &&
- compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
+ if (is_fip_mode(ctlr) &&
+ compare_ether_addr(eh->h_source, ctlr->dest_addr)) {
FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
eh->h_source);
goto err;
@@ -1544,6 +1576,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
unsigned int elen; /* eth header, may include vlan */
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
u8 sof, eof;
struct fcoe_hdr *hp;
@@ -1559,7 +1592,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
}
if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
- fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
+ fcoe_ctlr_els_send(ctlr, lport, skb))
return 0;
sof = fr_sof(fp);
@@ -1623,12 +1656,12 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
/* fill up mac and fcoe headers */
eh = eth_hdr(skb);
eh->h_proto = htons(ETH_P_FCOE);
- memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
- if (fcoe->ctlr.map_dest)
+ memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
+ if (ctlr->map_dest)
memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
- if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
- memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
+ if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
+ memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
else
memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
@@ -1677,6 +1710,7 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb)
static inline int fcoe_filter_frames(struct fc_lport *lport,
struct fc_frame *fp)
{
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_frame_header *fh;
struct sk_buff *skb = (struct sk_buff *)fp;
@@ -1698,7 +1732,8 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
return 0;
fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
- if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
+ ctlr = fcoe_to_ctlr(fcoe);
+ if (is_fip_mode(ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
return -EINVAL;
@@ -1877,6 +1912,7 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
ulong event, void *ptr)
{
struct dcb_app_type *entry = ptr;
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct net_device *netdev;
struct fcoe_port *port;
@@ -1894,6 +1930,8 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
if (!fcoe)
return NOTIFY_OK;
+ ctlr = fcoe_to_ctlr(fcoe);
+
if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
prio = ffs(entry->app.priority) - 1;
else
@@ -1904,10 +1942,10 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
if (entry->app.protocol == ETH_P_FIP ||
entry->app.protocol == ETH_P_FCOE)
- fcoe->ctlr.priority = prio;
+ ctlr->priority = prio;
if (entry->app.protocol == ETH_P_FCOE) {
- port = lport_priv(fcoe->ctlr.lp);
+ port = lport_priv(ctlr->lp);
port->priority = prio;
}
@@ -1929,6 +1967,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
{
struct fc_lport *lport = NULL;
struct net_device *netdev = ptr;
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fcoe_port *port;
struct fcoe_dev_stats *stats;
@@ -1938,7 +1977,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
list_for_each_entry(fcoe, &fcoe_hostlist, list) {
if (fcoe->netdev == netdev) {
- lport = fcoe->ctlr.lp;
+ ctlr = fcoe_to_ctlr(fcoe);
+ lport = ctlr->lp;
break;
}
}
@@ -1967,7 +2007,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
break;
case NETDEV_UNREGISTER:
list_del(&fcoe->list);
- port = lport_priv(fcoe->ctlr.lp);
+ port = lport_priv(ctlr->lp);
queue_work(fcoe_wq, &port->destroy_work);
goto out;
break;
@@ -1982,8 +2022,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
fcoe_link_speed_update(lport);
if (link_possible && !fcoe_link_ok(lport))
- fcoe_ctlr_link_up(&fcoe->ctlr);
- else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
+ fcoe_ctlr_link_up(ctlr);
+ else if (fcoe_ctlr_link_down(ctlr)) {
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->LinkFailureCount++;
put_cpu();
@@ -2003,6 +2043,7 @@ out:
*/
static int fcoe_disable(struct net_device *netdev)
{
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
int rc = 0;
@@ -2013,8 +2054,9 @@ static int fcoe_disable(struct net_device *netdev)
rtnl_unlock();
if (fcoe) {
- fcoe_ctlr_link_down(&fcoe->ctlr);
- fcoe_clean_pending_queue(fcoe->ctlr.lp);
+ ctlr = fcoe_to_ctlr(fcoe);
+ fcoe_ctlr_link_down(ctlr);
+ fcoe_clean_pending_queue(ctlr->lp);
} else
rc = -ENODEV;
@@ -2032,6 +2074,7 @@ static int fcoe_disable(struct net_device *netdev)
*/
static int fcoe_enable(struct net_device *netdev)
{
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
int rc = 0;
@@ -2040,11 +2083,17 @@ static int fcoe_enable(struct net_device *netdev)
fcoe = fcoe_hostlist_lookup_port(netdev);
rtnl_unlock();
- if (!fcoe)
+ if (!fcoe) {
rc = -ENODEV;
- else if (!fcoe_link_ok(fcoe->ctlr.lp))
- fcoe_ctlr_link_up(&fcoe->ctlr);
+ goto out;
+ }
+
+ ctlr = fcoe_to_ctlr(fcoe);
+
+ if (!fcoe_link_ok(ctlr->lp))
+ fcoe_ctlr_link_up(ctlr);
+out:
mutex_unlock(&fcoe_config_mutex);
return rc;
}
@@ -2059,6 +2108,7 @@ static int fcoe_enable(struct net_device *netdev)
*/
static int fcoe_destroy(struct net_device *netdev)
{
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_lport *lport;
struct fcoe_port *port;
@@ -2071,7 +2121,8 @@ static int fcoe_destroy(struct net_device *netdev)
rc = -ENODEV;
goto out_nodev;
}
- lport = fcoe->ctlr.lp;
+ ctlr = fcoe_to_ctlr(fcoe);
+ lport = ctlr->lp;
port = lport_priv(lport);
list_del(&fcoe->list);
queue_work(fcoe_wq, &port->destroy_work);
@@ -2126,7 +2177,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
int dcbx;
u8 fup, up;
struct net_device *netdev = fcoe->realdev;
- struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+ struct fcoe_port *port = lport_priv(ctlr->lp);
struct dcb_app app = {
.priority = 0,
.protocol = ETH_P_FCOE
@@ -2149,7 +2201,7 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
}
port->priority = ffs(up) ? ffs(up) - 1 : 0;
- fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+ ctlr->priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
}
#endif
}
@@ -2166,6 +2218,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
{
int rc = 0;
+ struct fcoe_ctlr_device *ctlr_dev;
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_lport *lport;
@@ -2184,7 +2238,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
goto out_nodev;
}
- lport = fcoe_if_create(fcoe, &netdev->dev, 0);
+ ctlr = fcoe_to_ctlr(fcoe);
+ ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
+ lport = fcoe_if_create(fcoe, &ctlr_dev->dev, 0);
if (IS_ERR(lport)) {
printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
netdev->name);
@@ -2195,7 +2251,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
}
/* Make this the "master" N_Port */
- fcoe->ctlr.lp = lport;
+ ctlr->lp = lport;
/* setup DCB priority attributes. */
fcoe_dcb_create(fcoe);
@@ -2208,7 +2264,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
fc_fabric_login(lport);
if (!fcoe_link_ok(lport)) {
rtnl_unlock();
- fcoe_ctlr_link_up(&fcoe->ctlr);
+ fcoe_ctlr_link_up(ctlr);
mutex_unlock(&fcoe_config_mutex);
return rc;
}
@@ -2320,11 +2376,12 @@ static int fcoe_reset(struct Scsi_Host *shost)
struct fc_lport *lport = shost_priv(shost);
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
- fcoe_ctlr_link_down(&fcoe->ctlr);
- fcoe_clean_pending_queue(fcoe->ctlr.lp);
- if (!fcoe_link_ok(fcoe->ctlr.lp))
- fcoe_ctlr_link_up(&fcoe->ctlr);
+ fcoe_ctlr_link_down(ctlr);
+ fcoe_clean_pending_queue(ctlr->lp);
+ if (!fcoe_link_ok(ctlr->lp))
+ fcoe_ctlr_link_up(ctlr);
return 0;
}
@@ -2359,10 +2416,12 @@ fcoe_hostlist_lookup_port(const struct net_device *netdev)
*/
static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
{
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
fcoe = fcoe_hostlist_lookup_port(netdev);
- return (fcoe) ? fcoe->ctlr.lp : NULL;
+ ctlr = fcoe_to_ctlr(fcoe);
+ return (fcoe) ? ctlr->lp : NULL;
}
/**
@@ -2466,6 +2525,7 @@ module_init(fcoe_init);
static void __exit fcoe_exit(void)
{
struct fcoe_interface *fcoe, *tmp;
+ struct fcoe_ctlr *ctlr;
struct fcoe_port *port;
unsigned int cpu;
@@ -2477,7 +2537,8 @@ static void __exit fcoe_exit(void)
rtnl_lock();
list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
list_del(&fcoe->list);
- port = lport_priv(fcoe->ctlr.lp);
+ ctlr = fcoe_to_ctlr(fcoe);
+ port = lport_priv(ctlr->lp);
queue_work(fcoe_wq, &port->destroy_work);
}
rtnl_unlock();
@@ -2573,7 +2634,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
{
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
- struct fcoe_ctlr *fip = &fcoe->ctlr;
+ struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
struct fc_frame_header *fh = fc_frame_header_get(fp);
switch (op) {
@@ -2730,6 +2791,40 @@ static void fcoe_get_lesb(struct fc_lport *lport,
__fcoe_get_lesb(lport, fc_lesb, netdev);
}
+static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+ struct net_device *netdev = fcoe_netdev(fip->lp);
+ struct fcoe_fc_els_lesb *fcoe_lesb;
+ struct fc_els_lesb fc_lesb;
+
+ __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+ fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+ ctlr_dev->lesb.lesb_link_fail =
+ ntohl(fcoe_lesb->lesb_link_fail);
+ ctlr_dev->lesb.lesb_vlink_fail =
+ ntohl(fcoe_lesb->lesb_vlink_fail);
+ ctlr_dev->lesb.lesb_miss_fka =
+ ntohl(fcoe_lesb->lesb_miss_fka);
+ ctlr_dev->lesb.lesb_symb_err =
+ ntohl(fcoe_lesb->lesb_symb_err);
+ ctlr_dev->lesb.lesb_err_block =
+ ntohl(fcoe_lesb->lesb_err_block);
+ ctlr_dev->lesb.lesb_fcs_error =
+ ntohl(fcoe_lesb->lesb_fcs_error);
+}
+
+static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
+{
+ struct fcoe_ctlr_device *ctlr_dev =
+ fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr);
+
+ fcf_dev->vlan_id = vlan_dev_vlan_id(fcoe->netdev);
+}
+
/**
* fcoe_set_port_id() - Callback from libfc when Port_ID is set.
* @lport: the local port
@@ -2747,7 +2842,8 @@ static void fcoe_set_port_id(struct fc_lport *lport,
{
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
- fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
+ fcoe_ctlr_recv_flogi(ctlr, lport, fp);
}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 96ac938d39cc..a624add4f8ec 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -68,7 +68,6 @@ do { \
* @netdev: The associated net device
* @fcoe_packet_type: FCoE packet type
* @fip_packet_type: FIP packet type
- * @ctlr: The FCoE controller (for FIP)
* @oem: The offload exchange manager for all local port
* instances associated with this port
* @removed: Indicates fcoe interface removed from net device
@@ -80,12 +79,15 @@ struct fcoe_interface {
struct net_device *realdev;
struct packet_type fcoe_packet_type;
struct packet_type fip_packet_type;
- struct fcoe_ctlr ctlr;
struct fc_exch_mgr *oem;
u8 removed;
};
-#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
+#define fcoe_to_ctlr(x) \
+ (struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1)
+
+#define fcoe_from_ctlr(x) \
+ ((struct fcoe_interface *)((x) + 1))
/**
* fcoe_netdev() - Return the net device associated with a local port
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 5a4c7250aa77..d68d57241ee6 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -160,6 +160,76 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
}
EXPORT_SYMBOL(fcoe_ctlr_init);
+static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)
+{
+ struct fcoe_ctlr *fip = new->fip;
+ struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+ struct fcoe_fcf_device temp, *fcf_dev;
+ int rc = 0;
+
+ LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
+ new->fabric_name, new->fcf_mac);
+
+ mutex_lock(&ctlr_dev->lock);
+
+ temp.fabric_name = new->fabric_name;
+ temp.switch_name = new->switch_name;
+ temp.fc_map = new->fc_map;
+ temp.vfid = new->vfid;
+ memcpy(temp.mac, new->fcf_mac, ETH_ALEN);
+ temp.priority = new->pri;
+ temp.fka_period = new->fka_period;
+ temp.selected = 0; /* default to unselected */
+
+ fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp);
+ if (unlikely(!fcf_dev)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * The fcoe_sysfs layer can return a CONNECTED fcf that
+ * has a priv (fcf was never deleted) or a CONNECTED fcf
+ * that doesn't have a priv (fcf was deleted). However,
+ * libfcoe will always delete FCFs before trying to add
+ * them. This is ensured because both recv_adv and
+ * age_fcfs are protected by the the fcoe_ctlr's mutex.
+ * This means that we should never get a FCF with a
+ * non-NULL priv pointer.
+ */
+ BUG_ON(fcf_dev->priv);
+
+ fcf_dev->priv = new;
+ new->fcf_dev = fcf_dev;
+
+ list_add(&new->list, &fip->fcfs);
+ fip->fcf_count++;
+
+out:
+ mutex_unlock(&ctlr_dev->lock);
+ return rc;
+}
+
+static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
+{
+ struct fcoe_ctlr *fip = new->fip;
+ struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+ struct fcoe_fcf_device *fcf_dev;
+
+ list_del(&new->list);
+ fip->fcf_count--;
+
+ mutex_lock(&ctlr_dev->lock);
+
+ fcf_dev = fcoe_fcf_to_fcf_dev(new);
+ WARN_ON(!fcf_dev);
+ new->fcf_dev = NULL;
+ fcoe_fcf_device_delete(fcf_dev);
+ kfree(new);
+
+ mutex_unlock(&ctlr_dev->lock);
+}
+
/**
* fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller
* @fip: The FCoE controller whose FCFs are to be reset
@@ -173,10 +243,10 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
fip->sel_fcf = NULL;
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
- list_del(&fcf->list);
- kfree(fcf);
+ fcoe_sysfs_fcf_del(fcf);
}
- fip->fcf_count = 0;
+ WARN_ON(fip->fcf_count);
+
fip->sel_time = 0;
}
@@ -717,8 +787,11 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
unsigned long deadline;
unsigned long sel_time = 0;
+ struct list_head del_list;
struct fcoe_dev_stats *stats;
+ INIT_LIST_HEAD(&del_list);
+
stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
@@ -739,10 +812,13 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
if (time_after_eq(jiffies, deadline)) {
if (fip->sel_fcf == fcf)
fip->sel_fcf = NULL;
+ /*
+ * Move to delete list so we can call
+ * fcoe_sysfs_fcf_del (which can sleep)
+ * after the put_cpu().
+ */
list_del(&fcf->list);
- WARN_ON(!fip->fcf_count);
- fip->fcf_count--;
- kfree(fcf);
+ list_add(&fcf->list, &del_list);
stats->VLinkFailureCount++;
} else {
if (time_after(next_timer, deadline))
@@ -753,6 +829,12 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
}
}
put_cpu();
+
+ list_for_each_entry_safe(fcf, next, &del_list, list) {
+ /* Removes fcf from current list */
+ fcoe_sysfs_fcf_del(fcf);
+ }
+
if (sel_time && !fip->sel_fcf && !fip->sel_time) {
sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
fip->sel_time = sel_time;
@@ -903,23 +985,23 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fcoe_fcf *fcf;
struct fcoe_fcf new;
- struct fcoe_fcf *found;
unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV);
int first = 0;
int mtu_valid;
+ int found = 0;
+ int rc = 0;
if (fcoe_ctlr_parse_adv(fip, skb, &new))
return;
mutex_lock(&fip->ctlr_mutex);
first = list_empty(&fip->fcfs);
- found = NULL;
list_for_each_entry(fcf, &fip->fcfs, list) {
if (fcf->switch_name == new.switch_name &&
fcf->fabric_name == new.fabric_name &&
fcf->fc_map == new.fc_map &&
compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) {
- found = fcf;
+ found = 1;
break;
}
}
@@ -931,9 +1013,16 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
if (!fcf)
goto out;
- fip->fcf_count++;
memcpy(fcf, &new, sizeof(new));
- list_add(&fcf->list, &fip->fcfs);
+ fcf->fip = fip;
+ rc = fcoe_sysfs_fcf_add(fcf);
+ if (rc) {
+ printk(KERN_ERR "Failed to allocate sysfs instance "
+ "for FCF, fab %16.16llx mac %pM\n",
+ new.fabric_name, new.fcf_mac);
+ kfree(fcf);
+ goto out;
+ }
} else {
/*
* Update the FCF's keep-alive descriptor flags.
@@ -954,6 +1043,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
fcf->fka_period = new.fka_period;
memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN);
}
+
mtu_valid = fcoe_ctlr_mtu_valid(fcf);
fcf->time = jiffies;
if (!found)
@@ -996,6 +1086,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
time_before(fip->sel_time, fip->timer.expires))
mod_timer(&fip->timer, fip->sel_time);
}
+
out:
mutex_unlock(&fip->ctlr_mutex);
}
@@ -2718,9 +2809,9 @@ unlock:
/**
* fcoe_libfc_config() - Sets up libfc related properties for local port
- * @lp: The local port to configure libfc for
- * @fip: The FCoE controller in use by the local port
- * @tt: The libfc function template
+ * @lport: The local port to configure libfc for
+ * @fip: The FCoE controller in use by the local port
+ * @tt: The libfc function template
* @init_fcp: If non-zero, the FCP portion of libfc should be initialized
*
* Returns : 0 for success
@@ -2753,3 +2844,43 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
return 0;
}
EXPORT_SYMBOL_GPL(fcoe_libfc_config);
+
+void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev)
+{
+ struct fcoe_ctlr_device *ctlr_dev = fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+ struct fcoe_fcf *fcf;
+
+ mutex_lock(&fip->ctlr_mutex);
+ mutex_lock(&ctlr_dev->lock);
+
+ fcf = fcoe_fcf_device_priv(fcf_dev);
+ if (fcf)
+ fcf_dev->selected = (fcf == fip->sel_fcf) ? 1 : 0;
+ else
+ fcf_dev->selected = 0;
+
+ mutex_unlock(&ctlr_dev->lock);
+ mutex_unlock(&fip->ctlr_mutex);
+}
+EXPORT_SYMBOL(fcoe_fcf_get_selected);
+
+void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
+{
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ mutex_lock(&ctlr->ctlr_mutex);
+ switch (ctlr->mode) {
+ case FIP_MODE_FABRIC:
+ ctlr_dev->mode = FIP_CONN_TYPE_FABRIC;
+ break;
+ case FIP_MODE_VN2VN:
+ ctlr_dev->mode = FIP_CONN_TYPE_VN2VN;
+ break;
+ default:
+ ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN;
+ break;
+ }
+ mutex_unlock(&ctlr->ctlr_mutex);
+}
+EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
new file mode 100644
index 000000000000..2bc163198d33
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -0,0 +1,832 @@
+/*
+ * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+
+#include <scsi/fcoe_sysfs.h>
+
+static atomic_t ctlr_num;
+static atomic_t fcf_num;
+
+/*
+ * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
+ * should insulate the loss of a fcf.
+ */
+static unsigned int fcoe_fcf_dev_loss_tmo = 1800; /* seconds */
+
+module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo,
+ uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fcf_dev_loss_tmo,
+ "Maximum number of seconds that libfcoe should"
+ " insulate the loss of a fcf. Once this value is"
+ " exceeded, the fcf is removed.");
+
+/*
+ * These are used by the fcoe_*_show_function routines, they
+ * are intentionally placed in the .c file as they're not intended
+ * for use throughout the code.
+ */
+#define fcoe_ctlr_id(x) \
+ ((x)->id)
+#define fcoe_ctlr_work_q_name(x) \
+ ((x)->work_q_name)
+#define fcoe_ctlr_work_q(x) \
+ ((x)->work_q)
+#define fcoe_ctlr_devloss_work_q_name(x) \
+ ((x)->devloss_work_q_name)
+#define fcoe_ctlr_devloss_work_q(x) \
+ ((x)->devloss_work_q)
+#define fcoe_ctlr_mode(x) \
+ ((x)->mode)
+#define fcoe_ctlr_fcf_dev_loss_tmo(x) \
+ ((x)->fcf_dev_loss_tmo)
+#define fcoe_ctlr_link_fail(x) \
+ ((x)->lesb.lesb_link_fail)
+#define fcoe_ctlr_vlink_fail(x) \
+ ((x)->lesb.lesb_vlink_fail)
+#define fcoe_ctlr_miss_fka(x) \
+ ((x)->lesb.lesb_miss_fka)
+#define fcoe_ctlr_symb_err(x) \
+ ((x)->lesb.lesb_symb_err)
+#define fcoe_ctlr_err_block(x) \
+ ((x)->lesb.lesb_err_block)
+#define fcoe_ctlr_fcs_error(x) \
+ ((x)->lesb.lesb_fcs_error)
+#define fcoe_fcf_state(x) \
+ ((x)->state)
+#define fcoe_fcf_fabric_name(x) \
+ ((x)->fabric_name)
+#define fcoe_fcf_switch_name(x) \
+ ((x)->switch_name)
+#define fcoe_fcf_fc_map(x) \
+ ((x)->fc_map)
+#define fcoe_fcf_vfid(x) \
+ ((x)->vfid)
+#define fcoe_fcf_mac(x) \
+ ((x)->mac)
+#define fcoe_fcf_priority(x) \
+ ((x)->priority)
+#define fcoe_fcf_fka_period(x) \
+ ((x)->fka_period)
+#define fcoe_fcf_dev_loss_tmo(x) \
+ ((x)->dev_loss_tmo)
+#define fcoe_fcf_selected(x) \
+ ((x)->selected)
+#define fcoe_fcf_vlan_id(x) \
+ ((x)->vlan_id)
+
+/*
+ * dev_loss_tmo attribute
+ */
+static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val)
+{
+ int ret;
+
+ ret = kstrtoul(buf, 0, val);
+ if (ret || *val < 0)
+ return -EINVAL;
+ /*
+ * Check for overflow; dev_loss_tmo is u32
+ */
+ if (*val > UINT_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf,
+ unsigned long val)
+{
+ if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) ||
+ (fcf->state == FCOE_FCF_STATE_DISCONNECTED) ||
+ (fcf->state == FCOE_FCF_STATE_DELETED))
+ return -EBUSY;
+ /*
+ * Check for overflow; dev_loss_tmo is u32
+ */
+ if (val > UINT_MAX)
+ return -EINVAL;
+
+ fcoe_fcf_dev_loss_tmo(fcf) = val;
+ return 0;
+}
+
+#define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store) \
+struct device_attribute device_attr_fcoe_##_prefix##_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+#define fcoe_ctlr_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
+ if (ctlr->f->get_fcoe_ctlr_##field) \
+ ctlr->f->get_fcoe_ctlr_##field(ctlr); \
+ return snprintf(buf, sz, format_string, \
+ cast fcoe_ctlr_##field(ctlr)); \
+}
+
+#define fcoe_fcf_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
+ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); \
+ if (ctlr->f->get_fcoe_fcf_##field) \
+ ctlr->f->get_fcoe_fcf_##field(fcf); \
+ return snprintf(buf, sz, format_string, \
+ cast fcoe_fcf_##field(fcf)); \
+}
+
+#define fcoe_ctlr_private_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
+ return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \
+}
+
+#define fcoe_fcf_private_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
+ return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \
+}
+
+#define fcoe_ctlr_private_rd_attr(field, format_string, sz) \
+ fcoe_ctlr_private_show_function(field, format_string, sz, ) \
+ static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
+ show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_ctlr_rd_attr(field, format_string, sz) \
+ fcoe_ctlr_show_function(field, format_string, sz, ) \
+ static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
+ show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_fcf_rd_attr(field, format_string, sz) \
+ fcoe_fcf_show_function(field, format_string, sz, ) \
+ static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
+ show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_fcf_private_rd_attr(field, format_string, sz) \
+ fcoe_fcf_private_show_function(field, format_string, sz, ) \
+ static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
+ show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast) \
+ fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \
+ static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
+ show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast) \
+ fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \
+ static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
+ show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_enum_name_search(title, table_type, table) \
+static const char *get_fcoe_##title##_name(enum table_type table_key) \
+{ \
+ int i; \
+ char *name = NULL; \
+ \
+ for (i = 0; i < ARRAY_SIZE(table); i++) { \
+ if (table[i].value == table_key) { \
+ name = table[i].name; \
+ break; \
+ } \
+ } \
+ return name; \
+}
+
+static struct {
+ enum fcf_state value;
+ char *name;
+} fcf_state_names[] = {
+ { FCOE_FCF_STATE_UNKNOWN, "Unknown" },
+ { FCOE_FCF_STATE_DISCONNECTED, "Disconnected" },
+ { FCOE_FCF_STATE_CONNECTED, "Connected" },
+};
+fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
+#define FCOE_FCF_STATE_MAX_NAMELEN 50
+
+static ssize_t show_fcf_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+ const char *name;
+ name = get_fcoe_fcf_state_name(fcf->state);
+ if (!name)
+ return -EINVAL;
+ return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name);
+}
+static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
+
+static struct {
+ enum fip_conn_type value;
+ char *name;
+} fip_conn_type_names[] = {
+ { FIP_CONN_TYPE_UNKNOWN, "Unknown" },
+ { FIP_CONN_TYPE_FABRIC, "Fabric" },
+ { FIP_CONN_TYPE_VN2VN, "VN2VN" },
+};
+fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
+#define FCOE_CTLR_MODE_MAX_NAMELEN 50
+
+static ssize_t show_ctlr_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ const char *name;
+
+ if (ctlr->f->get_fcoe_ctlr_mode)
+ ctlr->f->get_fcoe_ctlr_mode(ctlr);
+
+ name = get_fcoe_ctlr_mode_name(ctlr->mode);
+ if (!name)
+ return -EINVAL;
+ return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN,
+ "%s\n", name);
+}
+static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO,
+ show_ctlr_mode, NULL);
+
+static ssize_t
+store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ struct fcoe_fcf_device *fcf;
+ unsigned long val;
+ int rc;
+
+ rc = fcoe_str_to_dev_loss(buf, &val);
+ if (rc)
+ return rc;
+
+ fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val;
+ mutex_lock(&ctlr->lock);
+ list_for_each_entry(fcf, &ctlr->fcfs, peers)
+ fcoe_fcf_set_dev_loss_tmo(fcf, val);
+ mutex_unlock(&ctlr->lock);
+ return count;
+}
+fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, );
+static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR,
+ show_fcoe_ctlr_device_fcf_dev_loss_tmo,
+ store_private_fcoe_ctlr_fcf_dev_loss_tmo);
+
+/* Link Error Status Block (LESB) */
+fcoe_ctlr_rd_attr(link_fail, "%u\n", 20);
+fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20);
+fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20);
+fcoe_ctlr_rd_attr(symb_err, "%u\n", 20);
+fcoe_ctlr_rd_attr(err_block, "%u\n", 20);
+fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20);
+
+fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
+fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long);
+fcoe_fcf_private_rd_attr(priority, "%u\n", 20);
+fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20);
+fcoe_fcf_private_rd_attr(vfid, "%u\n", 20);
+fcoe_fcf_private_rd_attr(mac, "%pM\n", 20);
+fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20);
+fcoe_fcf_rd_attr(selected, "%u\n", 20);
+fcoe_fcf_rd_attr(vlan_id, "%u\n", 20);
+
+fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, )
+static ssize_t
+store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+ unsigned long val;
+ int rc;
+
+ rc = fcoe_str_to_dev_loss(buf, &val);
+ if (rc)
+ return rc;
+
+ rc = fcoe_fcf_set_dev_loss_tmo(fcf, val);
+ if (rc)
+ return rc;
+ return count;
+}
+static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR,
+ show_fcoe_fcf_device_dev_loss_tmo,
+ store_fcoe_fcf_dev_loss_tmo);
+
+static struct attribute *fcoe_ctlr_lesb_attrs[] = {
+ &device_attr_fcoe_ctlr_link_fail.attr,
+ &device_attr_fcoe_ctlr_vlink_fail.attr,
+ &device_attr_fcoe_ctlr_miss_fka.attr,
+ &device_attr_fcoe_ctlr_symb_err.attr,
+ &device_attr_fcoe_ctlr_err_block.attr,
+ &device_attr_fcoe_ctlr_fcs_error.attr,
+ NULL,
+};
+
+static struct attribute_group fcoe_ctlr_lesb_attr_group = {
+ .name = "lesb",
+ .attrs = fcoe_ctlr_lesb_attrs,
+};
+
+static struct attribute *fcoe_ctlr_attrs[] = {
+ &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+ &device_attr_fcoe_ctlr_mode.attr,
+ NULL,
+};
+
+static struct attribute_group fcoe_ctlr_attr_group = {
+ .attrs = fcoe_ctlr_attrs,
+};
+
+static const struct attribute_group *fcoe_ctlr_attr_groups[] = {
+ &fcoe_ctlr_attr_group,
+ &fcoe_ctlr_lesb_attr_group,
+ NULL,
+};
+
+static struct attribute *fcoe_fcf_attrs[] = {
+ &device_attr_fcoe_fcf_fabric_name.attr,
+ &device_attr_fcoe_fcf_switch_name.attr,
+ &device_attr_fcoe_fcf_dev_loss_tmo.attr,
+ &device_attr_fcoe_fcf_fc_map.attr,
+ &device_attr_fcoe_fcf_vfid.attr,
+ &device_attr_fcoe_fcf_mac.attr,
+ &device_attr_fcoe_fcf_priority.attr,
+ &device_attr_fcoe_fcf_fka_period.attr,
+ &device_attr_fcoe_fcf_state.attr,
+ &device_attr_fcoe_fcf_selected.attr,
+ &device_attr_fcoe_fcf_vlan_id.attr,
+ NULL
+};
+
+static struct attribute_group fcoe_fcf_attr_group = {
+ .attrs = fcoe_fcf_attrs,
+};
+
+static const struct attribute_group *fcoe_fcf_attr_groups[] = {
+ &fcoe_fcf_attr_group,
+ NULL,
+};
+
+struct bus_type fcoe_bus_type;
+
+static int fcoe_bus_match(struct device *dev,
+ struct device_driver *drv)
+{
+ if (dev->bus == &fcoe_bus_type)
+ return 1;
+ return 0;
+}
+
+/**
+ * fcoe_ctlr_device_release() - Release the FIP ctlr memory
+ * @dev: Pointer to the FIP ctlr's embedded device
+ *
+ * Called when the last FIP ctlr reference is released.
+ */
+static void fcoe_ctlr_device_release(struct device *dev)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ kfree(ctlr);
+}
+
+/**
+ * fcoe_fcf_device_release() - Release the FIP fcf memory
+ * @dev: Pointer to the fcf's embedded device
+ *
+ * Called when the last FIP fcf reference is released.
+ */
+static void fcoe_fcf_device_release(struct device *dev)
+{
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+ kfree(fcf);
+}
+
+struct device_type fcoe_ctlr_device_type = {
+ .name = "fcoe_ctlr",
+ .groups = fcoe_ctlr_attr_groups,
+ .release = fcoe_ctlr_device_release,
+};
+
+struct device_type fcoe_fcf_device_type = {
+ .name = "fcoe_fcf",
+ .groups = fcoe_fcf_attr_groups,
+ .release = fcoe_fcf_device_release,
+};
+
+struct bus_type fcoe_bus_type = {
+ .name = "fcoe",
+ .match = &fcoe_bus_match,
+};
+
+/**
+ * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue
+ * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed
+ */
+void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr)
+{
+ if (!fcoe_ctlr_work_q(ctlr)) {
+ printk(KERN_ERR
+ "ERROR: FIP Ctlr '%d' attempted to flush work, "
+ "when no workqueue created.\n", ctlr->id);
+ dump_stack();
+ return;
+ }
+
+ flush_workqueue(fcoe_ctlr_work_q(ctlr));
+}
+
+/**
+ * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue
+ * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
+ * @work: Work to queue for execution
+ *
+ * Return value:
+ * 1 on success / 0 already queued / < 0 for error
+ */
+int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr,
+ struct work_struct *work)
+{
+ if (unlikely(!fcoe_ctlr_work_q(ctlr))) {
+ printk(KERN_ERR
+ "ERROR: FIP Ctlr '%d' attempted to queue work, "
+ "when no workqueue created.\n", ctlr->id);
+ dump_stack();
+
+ return -EINVAL;
+ }
+
+ return queue_work(fcoe_ctlr_work_q(ctlr), work);
+}
+
+/**
+ * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue
+ * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed
+ */
+void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr)
+{
+ if (!fcoe_ctlr_devloss_work_q(ctlr)) {
+ printk(KERN_ERR
+ "ERROR: FIP Ctlr '%d' attempted to flush work, "
+ "when no workqueue created.\n", ctlr->id);
+ dump_stack();
+ return;
+ }
+
+ flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr));
+}
+
+/**
+ * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue
+ * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
+ * @work: Work to queue for execution
+ * @delay: jiffies to delay the work queuing
+ *
+ * Return value:
+ * 1 on success / 0 already queued / < 0 for error
+ */
+int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr,
+ struct delayed_work *work,
+ unsigned long delay)
+{
+ if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) {
+ printk(KERN_ERR
+ "ERROR: FIP Ctlr '%d' attempted to queue work, "
+ "when no workqueue created.\n", ctlr->id);
+ dump_stack();
+
+ return -EINVAL;
+ }
+
+ return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay);
+}
+
+static int fcoe_fcf_device_match(struct fcoe_fcf_device *new,
+ struct fcoe_fcf_device *old)
+{
+ if (new->switch_name == old->switch_name &&
+ new->fabric_name == old->fabric_name &&
+ new->fc_map == old->fc_map &&
+ compare_ether_addr(new->mac, old->mac) == 0)
+ return 1;
+ return 0;
+}
+
+/**
+ * fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs
+ * @parent: The parent device to which the fcoe_ctlr instance
+ * should be attached
+ * @f: The LLD's FCoE sysfs function template pointer
+ * @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD
+ *
+ * This routine allocates a FIP ctlr object with some additional memory
+ * for the LLD. The FIP ctlr is initialized, added to sysfs and then
+ * attributes are added to it.
+ */
+struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
+ struct fcoe_sysfs_function_template *f,
+ int priv_size)
+{
+ struct fcoe_ctlr_device *ctlr;
+ int error = 0;
+
+ ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size,
+ GFP_KERNEL);
+ if (!ctlr)
+ goto out;
+
+ ctlr->id = atomic_inc_return(&ctlr_num) - 1;
+ ctlr->f = f;
+ INIT_LIST_HEAD(&ctlr->fcfs);
+ mutex_init(&ctlr->lock);
+ ctlr->dev.parent = parent;
+ ctlr->dev.bus = &fcoe_bus_type;
+ ctlr->dev.type = &fcoe_ctlr_device_type;
+
+ ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo;
+
+ snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name),
+ "ctlr_wq_%d", ctlr->id);
+ ctlr->work_q = create_singlethread_workqueue(
+ ctlr->work_q_name);
+ if (!ctlr->work_q)
+ goto out_del;
+
+ snprintf(ctlr->devloss_work_q_name,
+ sizeof(ctlr->devloss_work_q_name),
+ "ctlr_dl_wq_%d", ctlr->id);
+ ctlr->devloss_work_q = create_singlethread_workqueue(
+ ctlr->devloss_work_q_name);
+ if (!ctlr->devloss_work_q)
+ goto out_del_q;
+
+ dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
+ error = device_register(&ctlr->dev);
+ if (error)
+ goto out_del_q2;
+
+ return ctlr;
+
+out_del_q2:
+ destroy_workqueue(ctlr->devloss_work_q);
+ ctlr->devloss_work_q = NULL;
+out_del_q:
+ destroy_workqueue(ctlr->work_q);
+ ctlr->work_q = NULL;
+out_del:
+ kfree(ctlr);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add);
+
+/**
+ * fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs
+ * @ctlr: A pointer to the ctlr to be deleted
+ *
+ * Deletes a FIP ctlr and any fcfs attached
+ * to it. Deleting fcfs will cause their childen
+ * to be deleted as well.
+ *
+ * The ctlr is detached from sysfs and it's resources
+ * are freed (work q), but the memory is not freed
+ * until its last reference is released.
+ *
+ * This routine expects no locks to be held before
+ * calling.
+ *
+ * TODO: Currently there are no callbacks to clean up LLD data
+ * for a fcoe_fcf_device. LLDs must keep this in mind as they need
+ * to clean up each of their LLD data for all fcoe_fcf_device before
+ * calling fcoe_ctlr_device_delete.
+ */
+void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr)
+{
+ struct fcoe_fcf_device *fcf, *next;
+ /* Remove any attached fcfs */
+ mutex_lock(&ctlr->lock);
+ list_for_each_entry_safe(fcf, next,
+ &ctlr->fcfs, peers) {
+ list_del(&fcf->peers);
+ fcf->state = FCOE_FCF_STATE_DELETED;
+ fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
+ }
+ mutex_unlock(&ctlr->lock);
+
+ fcoe_ctlr_device_flush_work(ctlr);
+
+ destroy_workqueue(ctlr->devloss_work_q);
+ ctlr->devloss_work_q = NULL;
+ destroy_workqueue(ctlr->work_q);
+ ctlr->work_q = NULL;
+
+ device_unregister(&ctlr->dev);
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete);
+
+/**
+ * fcoe_fcf_device_final_delete() - Final delete routine
+ * @work: The FIP fcf's embedded work struct
+ *
+ * It is expected that the fcf has been removed from
+ * the FIP ctlr's list before calling this routine.
+ */
+static void fcoe_fcf_device_final_delete(struct work_struct *work)
+{
+ struct fcoe_fcf_device *fcf =
+ container_of(work, struct fcoe_fcf_device, delete_work);
+ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+
+ /*
+ * Cancel any outstanding timers. These should really exist
+ * only when rmmod'ing the LLDD and we're asking for
+ * immediate termination of the rports
+ */
+ if (!cancel_delayed_work(&fcf->dev_loss_work))
+ fcoe_ctlr_device_flush_devloss(ctlr);
+
+ device_unregister(&fcf->dev);
+}
+
+/**
+ * fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires
+ * @work: The FIP fcf's embedded work struct
+ *
+ * Removes the fcf from the FIP ctlr's list of fcfs and
+ * queues the final deletion.
+ */
+static void fip_timeout_deleted_fcf(struct work_struct *work)
+{
+ struct fcoe_fcf_device *fcf =
+ container_of(work, struct fcoe_fcf_device, dev_loss_work.work);
+ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+
+ mutex_lock(&ctlr->lock);
+
+ /*
+ * If the fcf is deleted or reconnected before the timer
+ * fires the devloss queue will be flushed, but the state will
+ * either be CONNECTED or DELETED. If that is the case we
+ * cancel deleting the fcf.
+ */
+ if (fcf->state != FCOE_FCF_STATE_DISCONNECTED)
+ goto out;
+
+ dev_printk(KERN_ERR, &fcf->dev,
+ "FIP fcf connection time out: removing fcf\n");
+
+ list_del(&fcf->peers);
+ fcf->state = FCOE_FCF_STATE_DELETED;
+ fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
+
+out:
+ mutex_unlock(&ctlr->lock);
+}
+
+/**
+ * fcoe_fcf_device_delete() - Delete a FIP fcf
+ * @fcf: Pointer to the fcf which is to be deleted
+ *
+ * Queues the FIP fcf on the devloss workqueue
+ *
+ * Expects the ctlr_attrs mutex to be held for fcf
+ * state change.
+ */
+void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf)
+{
+ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+ int timeout = fcf->dev_loss_tmo;
+
+ if (fcf->state != FCOE_FCF_STATE_CONNECTED)
+ return;
+
+ fcf->state = FCOE_FCF_STATE_DISCONNECTED;
+
+ /*
+ * FCF will only be re-connected by the LLD calling
+ * fcoe_fcf_device_add, and it should be setting up
+ * priv then.
+ */
+ fcf->priv = NULL;
+
+ fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work,
+ timeout * HZ);
+}
+EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete);
+
+/**
+ * fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system
+ * @ctlr: The fcoe_ctlr_device that will be the fcoe_fcf_device parent
+ * @new_fcf: A temporary FCF used for lookups on the current list of fcfs
+ *
+ * Expects to be called with the ctlr->lock held
+ */
+struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
+ struct fcoe_fcf_device *new_fcf)
+{
+ struct fcoe_fcf_device *fcf;
+ int error = 0;
+
+ list_for_each_entry(fcf, &ctlr->fcfs, peers) {
+ if (fcoe_fcf_device_match(new_fcf, fcf)) {
+ if (fcf->state == FCOE_FCF_STATE_CONNECTED)
+ return fcf;
+
+ fcf->state = FCOE_FCF_STATE_CONNECTED;
+
+ if (!cancel_delayed_work(&fcf->dev_loss_work))
+ fcoe_ctlr_device_flush_devloss(ctlr);
+
+ return fcf;
+ }
+ }
+
+ fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC);
+ if (unlikely(!fcf))
+ goto out;
+
+ INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete);
+ INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf);
+
+ fcf->dev.parent = &ctlr->dev;
+ fcf->dev.bus = &fcoe_bus_type;
+ fcf->dev.type = &fcoe_fcf_device_type;
+ fcf->id = atomic_inc_return(&fcf_num) - 1;
+ fcf->state = FCOE_FCF_STATE_UNKNOWN;
+
+ fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
+
+ dev_set_name(&fcf->dev, "fcf_%d", fcf->id);
+
+ fcf->fabric_name = new_fcf->fabric_name;
+ fcf->switch_name = new_fcf->switch_name;
+ fcf->fc_map = new_fcf->fc_map;
+ fcf->vfid = new_fcf->vfid;
+ memcpy(fcf->mac, new_fcf->mac, ETH_ALEN);
+ fcf->priority = new_fcf->priority;
+ fcf->fka_period = new_fcf->fka_period;
+ fcf->selected = new_fcf->selected;
+
+ error = device_register(&fcf->dev);
+ if (error)
+ goto out_del;
+
+ fcf->state = FCOE_FCF_STATE_CONNECTED;
+ list_add_tail(&fcf->peers, &ctlr->fcfs);
+
+ return fcf;
+
+out_del:
+ kfree(fcf);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_fcf_device_add);
+
+int __init fcoe_sysfs_setup(void)
+{
+ int error;
+
+ atomic_set(&ctlr_num, 0);
+ atomic_set(&fcf_num, 0);
+
+ error = bus_register(&fcoe_bus_type);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+void __exit fcoe_sysfs_teardown(void)
+{
+ bus_unregister(&fcoe_bus_type);
+}
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 710e149d41b6..b46f43dced78 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -815,9 +815,17 @@ out_nodev:
*/
static int __init libfcoe_init(void)
{
- fcoe_transport_init();
+ int rc = 0;
- return 0;
+ rc = fcoe_transport_init();
+ if (rc)
+ return rc;
+
+ rc = fcoe_sysfs_setup();
+ if (rc)
+ fcoe_transport_exit();
+
+ return rc;
}
module_init(libfcoe_init);
@@ -826,6 +834,7 @@ module_init(libfcoe_init);
*/
static void __exit libfcoe_exit(void)
{
+ fcoe_sysfs_teardown();
fcoe_transport_exit();
}
module_exit(libfcoe_exit);
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 6208d562890d..317a7fdc3b82 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -25,3 +25,12 @@ config SCSI_QLA_FC
Firmware images can be retrieved from:
ftp://ftp.qlogic.com/outgoing/linux/firmware/
+
+config TCM_QLA2XXX
+ tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
+ depends on SCSI_QLA_FC && TARGET_CORE
+ select LIBFC
+ select BTREE
+ default n
+ ---help---
+ Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 5df782f4a097..dce7d788cdc9 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,5 +1,6 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
- qla_nx.o
+ qla_nx.o qla_target.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
+obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 5926f5a87ea8..5ab953029f8d 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_target.h"
#include <linux/kthread.h>
#include <linux/vmalloc.h>
@@ -576,6 +577,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_block_requests(vha->host);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
if (IS_QLA82XX(ha)) {
+ ha->flags.isp82xx_no_md_cap = 1;
qla82xx_idc_lock(ha);
qla82xx_set_reset_owner(vha);
qla82xx_idc_unlock(ha);
@@ -585,7 +587,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_unblock_requests(vha->host);
break;
case 0x2025d:
- if (!IS_QLA81XX(ha))
+ if (!IS_QLA81XX(ha) || !IS_QLA8031(ha))
return -EPERM;
ql_log(ql_log_info, vha, 0x706f,
@@ -1105,9 +1107,8 @@ qla2x00_total_isp_aborts_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- struct qla_hw_data *ha = vha->hw;
return snprintf(buf, PAGE_SIZE, "%d\n",
- ha->qla_stats.total_isp_aborts);
+ vha->qla_stats.total_isp_aborts);
}
static ssize_t
@@ -1154,7 +1155,7 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@@ -1537,7 +1538,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
dma_addr_t stats_dma;
struct fc_host_statistics *pfc_host_stat;
- pfc_host_stat = &ha->fc_host_stat;
+ pfc_host_stat = &vha->fc_host_stat;
memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
if (test_bit(UNLOADING, &vha->dpc_flags))
@@ -1580,8 +1581,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat->dumped_frames = stats->dumped_frames;
pfc_host_stat->nos_count = stats->nos_rcvd;
}
- pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
- pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
+ pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
+ pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
done_free:
dma_pool_free(ha->s_dma_pool, stats, stats_dma);
@@ -1737,6 +1738,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
fc_host_supported_speeds(vha->host) =
fc_host_supported_speeds(base_vha->host);
+ qlt_vport_create(vha, ha);
qla24xx_vport_disable(fc_vport, disable);
if (ha->flags.cpu_affinity_enabled) {
@@ -1951,12 +1953,16 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
- fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+ fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ?
+ (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
if (IS_CNA_CAPABLE(ha))
speed = FC_PORTSPEED_10GBIT;
+ else if (IS_QLA2031(ha))
+ speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
+ FC_PORTSPEED_4GBIT;
else if (IS_QLA25XX(ha))
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index bc3cc6d91117..c68883806c54 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -297,7 +297,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
fcport->d_id.b.al_pa =
bsg_job->request->rqst_data.h_els.port_id[0];
fcport->d_id.b.area =
@@ -483,7 +482,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
@@ -544,7 +542,7 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
int rval = 0;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
goto done_set_internal;
new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
@@ -586,7 +584,7 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
uint16_t new_config[4];
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
goto done_reset_internal;
memset(new_config, 0 , sizeof(new_config));
@@ -710,8 +708,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
if ((ha->current_topology == ISP_CFG_F ||
- (atomic_read(&vha->loop_state) == LOOP_DOWN) ||
- ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
+ ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
elreq.options == EXTERNAL_LOOPBACK) {
@@ -1402,6 +1399,9 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
if (rval)
return rval;
+ /* Set the isp82xx_no_md_cap not to capture minidump */
+ ha->flags.isp82xx_no_md_cap = 1;
+
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
ha->optrom_region_size);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 62324a1d5573..fdee5611f3e2 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,27 +11,31 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0120 | 0x4b,0xba,0xfa |
- * | Mailbox commands | 0x113e | 0x112c-0x112e |
+ * | Module Init and Probe | 0x0122 | 0x4b,0xba,0xfa |
+ * | Mailbox commands | 0x1140 | 0x111a-0x111b |
+ * | | | 0x112c-0x112e |
* | | | 0x113a |
* | Device Discovery | 0x2086 | 0x2020-0x2022 |
* | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 |
* | | | 0x302d-0x302e |
- * | DPC Thread | 0x401c | |
- * | Async Events | 0x505d | 0x502b-0x502f |
+ * | DPC Thread | 0x401c | 0x4002,0x4013 |
+ * | Async Events | 0x505f | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
- * | Timer Routines | 0x6011 | 0x600e-0x600f |
+ * | Timer Routines | 0x6011 | |
* | User Space Interactions | 0x709f | 0x7018,0x702e, |
* | | | 0x7039,0x7045, |
* | | | 0x7073-0x7075, |
* | | | 0x708c |
* | Task Management | 0x803c | 0x8025-0x8026 |
* | | | 0x800b,0x8039 |
- * | AER/EEH | 0x900f | |
+ * | AER/EEH | 0x9011 | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb054 | 0xb053 |
+ * | ISP82XX Specific | 0xb054 | 0xb024 |
* | MultiQ | 0xc00c | |
* | Misc | 0xd010 | |
+ * | Target Mode | 0xe06f | |
+ * | Target Mode Management | 0xf071 | |
+ * | Target Mode Task Management | 0x1000b | |
* ----------------------------------------------------------------------
*/
@@ -379,6 +383,54 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
}
static inline void *
+qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
+ uint32_t **last_chain)
+{
+ struct qla2xxx_mqueue_chain *q;
+ struct qla2xxx_mqueue_header *qh;
+ uint32_t num_queues;
+ int que;
+ struct {
+ int length;
+ void *ring;
+ } aq, *aqp;
+
+ if (!ha->tgt.atio_q_length)
+ return ptr;
+
+ num_queues = 1;
+ aqp = &aq;
+ aqp->length = ha->tgt.atio_q_length;
+ aqp->ring = ha->tgt.atio_ring;
+
+ for (que = 0; que < num_queues; que++) {
+ /* aqp = ha->atio_q_map[que]; */
+ q = ptr;
+ *last_chain = &q->type;
+ q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+ q->chain_size = htonl(
+ sizeof(struct qla2xxx_mqueue_chain) +
+ sizeof(struct qla2xxx_mqueue_header) +
+ (aqp->length * sizeof(request_t)));
+ ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+ /* Add header. */
+ qh = ptr;
+ qh->queue = __constant_htonl(TYPE_ATIO_QUEUE);
+ qh->number = htonl(que);
+ qh->size = htonl(aqp->length * sizeof(request_t));
+ ptr += sizeof(struct qla2xxx_mqueue_header);
+
+ /* Add data. */
+ memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
+
+ ptr += aqp->length * sizeof(request_t);
+ }
+
+ return ptr;
+}
+
+static inline void *
qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
{
struct qla2xxx_mqueue_chain *q;
@@ -873,6 +925,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
struct qla24xx_fw_dump *fw;
uint32_t ext_mem_cnt;
void *nxt;
+ void *nxt_chain;
+ uint32_t *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
if (IS_QLA82XX(ha))
@@ -1091,6 +1145,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_copy_eft(ha, nxt);
+ nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
+ nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+ if (last_chain) {
+ ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
+ *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+ }
+
+ /* Adjust valid length. */
+ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
qla24xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
@@ -1399,6 +1463,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Chain entries -- started with MQ. */
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -1717,6 +1782,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Chain entries -- started with MQ. */
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -2218,6 +2284,7 @@ copy_queue:
/* Chain entries -- started with MQ. */
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 2157bdf1569a..f278df8cce0f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -244,6 +244,7 @@ struct qla2xxx_mqueue_header {
uint32_t queue;
#define TYPE_REQUEST_QUEUE 0x1
#define TYPE_RESPONSE_QUEUE 0x2
+#define TYPE_ATIO_QUEUE 0x3
uint32_t number;
uint32_t size;
};
@@ -339,3 +340,11 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
* not covered by upper categories
*/
+#define ql_dbg_verbose 0x00008000 /* More verbosity for each level
+ * This is to be used with other levels where
+ * more verbosity is required. It might not
+ * be applicable to all the levels.
+ */
+#define ql_dbg_tgt 0x00004000 /* Target mode */
+#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
+#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a2443031dbe7..39007f53aec0 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -186,6 +186,7 @@
#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
+#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
struct req_que;
@@ -1234,11 +1235,27 @@ typedef struct {
* ISP queue - response queue entry definition.
*/
typedef struct {
- uint8_t data[60];
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System defined handle */
+ uint8_t data[52];
uint32_t signature;
#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */
} response_t;
+/*
+ * ISP queue - ATIO queue entry definition.
+ */
+struct atio {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t data[58];
+ uint32_t signature;
+#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
+};
+
typedef union {
uint16_t extended;
struct {
@@ -1719,11 +1736,13 @@ typedef struct fc_port {
struct fc_rport *rport, *drport;
u32 supported_classes;
- uint16_t vp_idx;
uint8_t fc4_type;
uint8_t scan_state;
} fc_port_t;
+#define QLA_FCPORT_SCAN_NONE 0
+#define QLA_FCPORT_SCAN_FOUND 1
+
/*
* Fibre channel port/lun states.
*/
@@ -1747,6 +1766,7 @@ static const char * const port_state_str[] = {
#define FCF_LOGIN_NEEDED BIT_1
#define FCF_FCP2_DEVICE BIT_2
#define FCF_ASYNC_SENT BIT_3
+#define FCF_CONF_COMP_SUPPORTED BIT_4
/* No loop ID flag. */
#define FC_NO_LOOP_ID 0x1000
@@ -2419,6 +2439,40 @@ struct qlfc_fw {
uint32_t len;
};
+struct qlt_hw_data {
+ /* Protected by hw lock */
+ uint32_t enable_class_2:1;
+ uint32_t enable_explicit_conf:1;
+ uint32_t ini_mode_force_reverse:1;
+ uint32_t node_name_set:1;
+
+ dma_addr_t atio_dma; /* Physical address. */
+ struct atio *atio_ring; /* Base virtual address */
+ struct atio *atio_ring_ptr; /* Current address. */
+ uint16_t atio_ring_index; /* Current index. */
+ uint16_t atio_q_length;
+
+ void *target_lport_ptr;
+ struct qla_tgt_func_tmpl *tgt_ops;
+ struct qla_tgt *qla_tgt;
+ struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
+ uint16_t current_handle;
+
+ struct qla_tgt_vp_map *tgt_vp_map;
+ struct mutex tgt_mutex;
+ struct mutex tgt_host_action_mutex;
+
+ int saved_set;
+ uint16_t saved_exchange_count;
+ uint32_t saved_firmware_options_1;
+ uint32_t saved_firmware_options_2;
+ uint32_t saved_firmware_options_3;
+ uint8_t saved_firmware_options[2];
+ uint8_t saved_add_firmware_options[2];
+
+ uint8_t tgt_node_name[WWN_SIZE];
+};
+
/*
* Qlogic host adapter specific data structure.
*/
@@ -2460,7 +2514,9 @@ struct qla_hw_data {
uint32_t thermal_supported:1;
uint32_t isp82xx_reset_hdlr_active:1;
uint32_t isp82xx_reset_owner:1;
- /* 28 bits */
+ uint32_t isp82xx_no_md_cap:1;
+ uint32_t host_shutting_down:1;
+ /* 30 bits */
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -2804,7 +2860,6 @@ struct qla_hw_data {
/* ISP2322: red, green, amber. */
uint16_t zio_mode;
uint16_t zio_timer;
- struct fc_host_statistics fc_host_stat;
struct qla_msix_entry *msix_entries;
@@ -2817,7 +2872,6 @@ struct qla_hw_data {
int cur_vport_count;
struct qla_chip_state_84xx *cs84xx;
- struct qla_statistics qla_stats;
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
struct qlfc_fw fw_buf;
@@ -2863,6 +2917,8 @@ struct qla_hw_data {
dma_addr_t md_tmplt_hdr_dma;
void *md_dump;
uint32_t md_dump_size;
+
+ struct qlt_hw_data tgt;
};
/*
@@ -2920,6 +2976,7 @@ typedef struct scsi_qla_host {
#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
+#define SCR_PENDING 21 /* SCR in target mode */
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
@@ -2979,10 +3036,21 @@ typedef struct scsi_qla_host {
struct req_que *req;
int fw_heartbeat_counter;
int seconds_since_last_heartbeat;
+ struct fc_host_statistics fc_host_stat;
+ struct qla_statistics qla_stats;
atomic_t vref_count;
} scsi_qla_host_t;
+#define SET_VP_IDX 1
+#define SET_AL_PA 2
+#define RESET_VP_IDX 3
+#define RESET_AL_PA 4
+struct qla_tgt_vp_map {
+ uint8_t idx;
+ scsi_qla_host_t *vha;
+};
+
/*
* Macros to help code, maintain, etc.
*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9f065804bd12..9eacd2df111b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -175,6 +175,7 @@ extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_iocb.c source file.
*/
+
extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
@@ -188,6 +189,8 @@ extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
extern int qla24xx_dif_start_scsi(srb_t *);
+extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
+extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -239,6 +242,9 @@ extern int
qla2x00_init_firmware(scsi_qla_host_t *, uint16_t);
extern int
+qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *);
+
+extern int
qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
extern int
@@ -383,6 +389,8 @@ extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
extern void qla2x00_free_irqs(scsi_qla_host_t *);
extern int qla2x00_get_data_rate(scsi_qla_host_t *);
+extern char *qla2x00_get_link_speed_str(struct qla_hw_data *);
+
/*
* Global Function Prototypes in qla_sup.c source file.
*/
@@ -546,6 +554,7 @@ extern void qla2x00_sp_free(void *, void *);
extern void qla2x00_sp_timeout(unsigned long);
extern void qla2x00_bsg_job_done(void *, void *, int);
extern void qla2x00_bsg_sp_free(void *, void *);
+extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
/* Interrupt related */
extern irqreturn_t qla82xx_intr_handler(int, void *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 3128f80441f5..05260d25fe46 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_target.h"
static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
@@ -556,7 +557,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
- ct_req->req.rff_id.fc4_feature = BIT_1;
+ qlt_rff_id(vha, ct_req);
+
ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */
/* Execute MS IOCB */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b9465643396b..ca5084743135 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -17,6 +17,9 @@
#include <asm/prom.h>
#endif
+#include <target/target_core_base.h>
+#include "qla_target.h"
+
/*
* QLogic ISP2x00 Hardware Support Function Prototypes.
*/
@@ -518,7 +521,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
return QLA_FUNCTION_FAILED;
}
}
- rval = qla2x00_init_rings(vha);
+
+ if (qla_ini_mode_enabled(vha))
+ rval = qla2x00_init_rings(vha);
+
ha->flags.chip_reset_done = 1;
if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
@@ -1233,6 +1239,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mq_size += ha->max_rsp_queues *
(rsp->length * sizeof(response_t));
}
+ if (ha->tgt.atio_q_length)
+ mq_size += ha->tgt.atio_q_length * sizeof(request_t);
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
goto try_eft;
@@ -1696,6 +1704,12 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+ /* Setup ATIO queue dma pointers for target mode */
+ icb->atio_q_inpointer = __constant_cpu_to_le16(0);
+ icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
+ icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
+ icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
+
if (ha->mqenable || IS_QLA83XX(ha)) {
icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
icb->rid = __constant_cpu_to_le16(rid);
@@ -1739,6 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
}
+ qlt_24xx_config_rings(vha, reg);
+
/* PCI posting */
RD_REG_DWORD(&ioreg->hccr);
}
@@ -1794,6 +1810,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
spin_unlock(&ha->vport_slock);
+ ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+ ha->tgt.atio_ring_index = 0;
+ /* Initialize ATIO queue entries */
+ qlt_init_atio_q_entries(vha);
+
ha->isp_ops->config_rings(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2051,6 +2072,10 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
vha->d_id.b.area = area;
vha->d_id.b.al_pa = al_pa;
+ spin_lock(&ha->vport_slock);
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock(&ha->vport_slock);
+
if (!vha->flags.init_done)
ql_log(ql_log_info, vha, 0x2010,
"Topology - %s, Host Loop address 0x%x.\n",
@@ -2185,7 +2210,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
/* Reset NVRAM data. */
ql_log(ql_log_warn, vha, 0x0064,
- "Inconisistent NVRAM "
+ "Inconsistent NVRAM "
"detected: checksum=0x%x id=%c version=0x%x.\n",
chksum, nv->id[0], nv->nvram_version);
ql_log(ql_log_warn, vha, 0x0065,
@@ -2270,7 +2295,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
if (IS_QLA23XX(ha)) {
nv->firmware_options[0] |= BIT_2;
nv->firmware_options[0] &= ~BIT_3;
- nv->firmware_options[0] &= ~BIT_6;
+ nv->special_options[0] &= ~BIT_6;
nv->add_firmware_options[1] |= BIT_5 | BIT_4;
if (IS_QLA2300(ha)) {
@@ -2467,14 +2492,21 @@ qla2x00_rport_del(void *data)
{
fc_port_t *fcport = data;
struct fc_rport *rport;
+ scsi_qla_host_t *vha = fcport->vha;
unsigned long flags;
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
rport = fcport->drport ? fcport->drport: fcport->rport;
fcport->drport = NULL;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
- if (rport)
+ if (rport) {
fc_remote_port_delete(rport);
+ /*
+ * Release the target mode FC NEXUS in qla_target.c code
+ * if target mod is enabled.
+ */
+ qlt_fc_port_deleted(vha, fcport);
+ }
}
/**
@@ -2495,11 +2527,11 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
/* Setup fcport template structure. */
fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
fcport->port_type = FCT_UNKNOWN;
fcport->loop_id = FC_NO_LOOP_ID;
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
fcport->supported_classes = FC_COS_UNSPECIFIED;
+ fcport->scan_state = QLA_FCPORT_SCAN_NONE;
return fcport;
}
@@ -2726,7 +2758,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport->d_id.b.area = area;
new_fcport->d_id.b.al_pa = al_pa;
new_fcport->loop_id = loop_id;
- new_fcport->vp_idx = vha->vp_idx;
rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
ql_dbg(ql_dbg_disc, vha, 0x201a,
@@ -2760,10 +2791,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
if (!found) {
/* New device, add to fcports list. */
- if (vha->vp_idx) {
- new_fcport->vha = vha;
- new_fcport->vp_idx = vha->vp_idx;
- }
list_add_tail(&new_fcport->list, &vha->vp_fcports);
/* Allocate a new replacement fcport. */
@@ -2800,8 +2827,6 @@ cleanup_allocation:
static void
qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
-#define LS_UNKNOWN 2
- static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
char *link_speed;
int rval;
uint16_t mb[4];
@@ -2829,11 +2854,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->port_name[6], fcport->port_name[7], rval,
fcport->fp_speed, mb[0], mb[1]);
} else {
- link_speed = link_speeds[LS_UNKNOWN];
- if (fcport->fp_speed < 5)
- link_speed = link_speeds[fcport->fp_speed];
- else if (fcport->fp_speed == 0x13)
- link_speed = link_speeds[5];
+ link_speed = qla2x00_get_link_speed_str(ha);
ql_dbg(ql_dbg_disc, vha, 0x2005,
"iIDMA adjusted to %s GB/s "
"on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
@@ -2864,6 +2885,12 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
"Unable to allocate fc remote port.\n");
return;
}
+ /*
+ * Create target mode FC NEXUS in qla_target.c if target mode is
+ * enabled..
+ */
+ qlt_fc_port_added(vha, fcport);
+
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
*((fc_port_t **)rport->dd_data) = fcport;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
@@ -2921,7 +2948,7 @@ static int
qla2x00_configure_fabric(scsi_qla_host_t *vha)
{
int rval;
- fc_port_t *fcport, *fcptemp;
+ fc_port_t *fcport;
uint16_t next_loopid;
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint16_t loop_id;
@@ -2959,7 +2986,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
0xfc, mb, BIT_1|BIT_0);
if (rval != QLA_SUCCESS) {
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- return rval;
+ break;
}
if (mb[0] != MBS_COMMAND_COMPLETE) {
ql_dbg(ql_dbg_disc, vha, 0x2042,
@@ -2991,21 +3018,16 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
-#define QLA_FCPORT_SCAN 1
-#define QLA_FCPORT_FOUND 2
-
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- fcport->scan_state = QLA_FCPORT_SCAN;
- }
-
rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
if (rval != QLA_SUCCESS)
break;
- /*
- * Logout all previous fabric devices marked lost, except
- * FCP2 devices.
- */
+ /* Add new ports to existing port list */
+ list_splice_tail_init(&new_fcports, &vha->vp_fcports);
+
+ /* Starting free loop ID. */
+ next_loopid = ha->min_external_loopid;
+
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
@@ -3013,7 +3035,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
continue;
- if (fcport->scan_state == QLA_FCPORT_SCAN &&
+ /* Logout lost/gone fabric devices (non-FCP2) */
+ if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
atomic_read(&fcport->state) == FCS_ONLINE) {
qla2x00_mark_device_lost(vha, fcport,
ql2xplogiabsentdevice, 0);
@@ -3026,78 +3049,30 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
fcport->d_id.b.domain,
fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- fcport->loop_id = FC_NO_LOOP_ID;
}
- }
- }
-
- /* Starting free loop ID. */
- next_loopid = ha->min_external_loopid;
-
- /*
- * Scan through our port list and login entries that need to be
- * logged in.
- */
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (atomic_read(&vha->loop_down_timer) ||
- test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
- break;
-
- if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
- (fcport->flags & FCF_LOGIN_NEEDED) == 0)
continue;
-
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(
- base_vha, fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- break;
- }
}
- /* Login and update database */
- qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
- }
-
- /* Exit if out of loop IDs. */
- if (rval != QLA_SUCCESS) {
- break;
- }
-
- /*
- * Login and add the new devices to our port list.
- */
- list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
- if (atomic_read(&vha->loop_down_timer) ||
- test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
- break;
-
- /* Find a new loop ID to use. */
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(base_vha, fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- break;
+ fcport->scan_state = QLA_FCPORT_SCAN_NONE;
+
+ /* Login fabric devices that need a login */
+ if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
+ atomic_read(&vha->loop_down_timer) == 0) {
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(
+ base_vha, fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ continue;
+ }
+ }
}
/* Login and update database */
qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
-
- if (vha->vp_idx) {
- fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
- }
- list_move_tail(&fcport->list, &vha->vp_fcports);
}
} while (0);
- /* Free all new device structures not processed. */
- list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
- list_del(&fcport->list);
- kfree(fcport);
- }
-
if (rval) {
ql_dbg(ql_dbg_disc, vha, 0x2068,
"Configure fabric error exit rval=%d.\n", rval);
@@ -3287,7 +3262,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
WWN_SIZE))
continue;
- fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->scan_state = QLA_FCPORT_SCAN_FOUND;
found++;
@@ -3595,6 +3570,12 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
if (mb[10] & BIT_1)
fcport->supported_classes |= FC_COS_CLASS3;
+ if (IS_FWI2_CAPABLE(ha)) {
+ if (mb[10] & BIT_7)
+ fcport->flags |=
+ FCF_CONF_COMP_SUPPORTED;
+ }
+
rval = QLA_SUCCESS;
break;
} else if (mb[0] == MBS_LOOP_ID_USED) {
@@ -3841,7 +3822,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- ha->qla_stats.total_isp_aborts++;
+ vha->qla_stats.total_isp_aborts++;
ql_log(ql_log_info, vha, 0x00af,
"Performing ISP error recovery - ha=%p.\n", ha);
@@ -4066,6 +4047,7 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
+ unsigned long flags;
/* If firmware needs to be loaded */
if (qla2x00_isp_firmware(vha)) {
@@ -4090,6 +4072,16 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
vha->flags.online = 1;
+
+ /*
+ * Process any ATIO queue entries that came in
+ * while we weren't online.
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (qla_tgt_mode_enabled(vha))
+ qlt_24xx_process_atio_queue(vha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
/* Wait at most MAX_TARGET RSCNs for a stable link. */
wait_time = 256;
do {
@@ -4279,7 +4271,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
/* Reset NVRAM data. */
ql_log(ql_log_warn, vha, 0x006b,
- "Inconisistent NVRAM detected: checksum=0x%x id=%c "
+ "Inconsistent NVRAM detected: checksum=0x%x id=%c "
"version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
ql_log(ql_log_warn, vha, 0x006c,
"Falling back to functioning (yet invalid -- WWPN) "
@@ -4330,6 +4322,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
rval = 1;
}
+ if (!qla_ini_mode_enabled(vha)) {
+ /* Don't enable full login after initial LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ /* Don't enable LIP full login for initiator */
+ nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ }
+
+ qlt_24xx_config_nvram_stage1(vha, nv);
+
/* Reset Initialization control block */
memset(icb, 0, ha->init_cb_size);
@@ -4357,8 +4358,10 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
"QLA2462");
- /* Use alternate WWN? */
+ qlt_24xx_config_nvram_stage2(vha, icb);
+
if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
+ /* Use alternate WWN? */
memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
}
@@ -5029,7 +5032,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
/* Reset NVRAM data. */
ql_log(ql_log_info, vha, 0x0073,
- "Inconisistent NVRAM detected: checksum=0x%x id=%c "
+ "Inconsistent NVRAM detected: checksum=0x%x id=%c "
"version=0x%x.\n", chksum, nv->id[0],
le16_to_cpu(nv->nvram_version));
ql_log(ql_log_info, vha, 0x0074,
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index eac950924497..70dbf53d9e0f 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_target.h"
#include <linux/blkdev.h>
#include <linux/delay.h>
@@ -23,18 +24,17 @@ qla2x00_get_cmd_direction(srb_t *sp)
{
uint16_t cflags;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_qla_host *vha = sp->fcport->vha;
cflags = 0;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cflags = CF_WRITE;
- sp->fcport->vha->hw->qla_stats.output_bytes +=
- scsi_bufflen(cmd);
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cflags = CF_READ;
- sp->fcport->vha->hw->qla_stats.input_bytes +=
- scsi_bufflen(cmd);
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
}
return (cflags);
}
@@ -385,9 +385,10 @@ qla2x00_start_scsi(srb_t *sp)
else
req->cnt = req->length -
(req->ring_index - cnt);
+ /* If still no head room then bail out */
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
}
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
/* Build command packet */
req->current_outstanding_cmd = handle;
@@ -470,7 +471,7 @@ queuing_error:
/**
* qla2x00_start_iocbs() - Execute the IOCB command
*/
-static void
+void
qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
{
struct qla_hw_data *ha = vha->hw;
@@ -571,6 +572,29 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
return (ret);
}
+/*
+ * qla2x00_issue_marker
+ *
+ * Issue marker
+ * Caller CAN have hardware lock held as specified by ha_locked parameter.
+ * Might release it, then reaquire.
+ */
+int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
+{
+ if (ha_locked) {
+ if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+ MK_SYNC_ALL) != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ } else {
+ if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+ MK_SYNC_ALL) != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ }
+ vha->marker_needed = 0;
+
+ return QLA_SUCCESS;
+}
+
/**
* qla24xx_calc_iocbs() - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate.
@@ -629,11 +653,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->control_flags =
__constant_cpu_to_le16(CF_WRITE_DATA);
- ha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->control_flags =
__constant_cpu_to_le16(CF_READ_DATA);
- ha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
}
cur_seg = scsi_sglist(cmd);
@@ -745,13 +769,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_WRITE_DATA);
- sp->fcport->vha->hw->qla_stats.output_bytes +=
- scsi_bufflen(cmd);
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_READ_DATA);
- sp->fcport->vha->hw->qla_stats.input_bytes +=
- scsi_bufflen(cmd);
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
}
/* One DSD is available in the Command Type 3 IOCB */
@@ -1245,7 +1267,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
return QLA_SUCCESS;
}
- cmd_pkt->vp_index = sp->fcport->vp_idx;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -1502,9 +1524,9 @@ qla24xx_start_scsi(srb_t *sp)
else
req->cnt = req->length -
(req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
}
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
/* Build command packet. */
req->current_outstanding_cmd = handle;
@@ -1527,7 +1549,7 @@ qla24xx_start_scsi(srb_t *sp)
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vp_idx;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
@@ -1717,11 +1739,10 @@ qla24xx_dif_start_scsi(srb_t *sp)
else
req->cnt = req->length -
(req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
}
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
-
status |= QDSS_GOT_Q_SPACE;
/* Build header part of command packet (excluding the OPCODE). */
@@ -1898,7 +1919,7 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
logio->port_id[2] = sp->fcport->d_id.b.domain;
- logio->vp_index = sp->fcport->vp_idx;
+ logio->vp_index = sp->fcport->vha->vp_idx;
}
static void
@@ -1922,7 +1943,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
sp->fcport->d_id.b.al_pa);
- mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+ mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
}
static void
@@ -1935,7 +1956,7 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
logio->port_id[2] = sp->fcport->d_id.b.domain;
- logio->vp_index = sp->fcport->vp_idx;
+ logio->vp_index = sp->fcport->vha->vp_idx;
}
static void
@@ -1952,7 +1973,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
sp->fcport->d_id.b.al_pa);
- mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+ mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
/* Implicit: mbx->mbx10 = 0. */
}
@@ -1962,7 +1983,7 @@ qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- logio->vp_index = sp->fcport->vp_idx;
+ logio->vp_index = sp->fcport->vha->vp_idx;
}
static void
@@ -1983,7 +2004,7 @@ qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
- mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+ mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
}
static void
@@ -2009,7 +2030,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
tsk->port_id[0] = fcport->d_id.b.al_pa;
tsk->port_id[1] = fcport->d_id.b.area;
tsk->port_id[2] = fcport->d_id.b.domain;
- tsk->vp_index = fcport->vp_idx;
+ tsk->vp_index = fcport->vha->vp_idx;
if (flags == TCF_LUN_RESET) {
int_to_scsilun(lun, &tsk->lun);
@@ -2030,7 +2051,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->handle = sp->handle;
els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
- els_iocb->vp_index = sp->fcport->vp_idx;
+ els_iocb->vp_index = sp->fcport->vha->vp_idx;
els_iocb->sof_type = EST_SOFI3;
els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
@@ -2160,7 +2181,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
ct_iocb->handle = sp->handle;
ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- ct_iocb->vp_index = sp->fcport->vp_idx;
+ ct_iocb->vp_index = sp->fcport->vha->vp_idx;
ct_iocb->comp_status = __constant_cpu_to_le16(0);
ct_iocb->cmd_dsd_count =
@@ -2343,11 +2364,10 @@ sufficient_dsds:
else
req->cnt = req->length -
(req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
}
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
-
ctx = sp->u.scmd.ctx =
mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
if (!ctx) {
@@ -2362,7 +2382,7 @@ sufficient_dsds:
if (!ctx->fcp_cmnd) {
ql_log(ql_log_fatal, vha, 0x3011,
"Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
- goto queuing_error_fcp_cmnd;
+ goto queuing_error;
}
/* Initialize the DSD list and dma handle */
@@ -2400,7 +2420,7 @@ sufficient_dsds:
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vp_idx;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
/* Build IOCB segments */
if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
@@ -2489,7 +2509,7 @@ sufficient_dsds:
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vp_idx;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ce42288049b5..6f67a9d4998b 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_target.h"
#include <linux/delay.h>
#include <linux/slab.h>
@@ -309,6 +310,28 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
"IDC failed to post ACK.\n");
}
+#define LS_UNKNOWN 2
+char *
+qla2x00_get_link_speed_str(struct qla_hw_data *ha)
+{
+ static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
+ char *link_speed;
+ int fw_speed = ha->link_data_rate;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ link_speed = link_speeds[0];
+ else if (fw_speed == 0x13)
+ link_speed = link_speeds[6];
+ else {
+ link_speed = link_speeds[LS_UNKNOWN];
+ if (fw_speed < 6)
+ link_speed =
+ link_speeds[fw_speed];
+ }
+
+ return link_speed;
+}
+
/**
* qla2x00_async_event() - Process aynchronous events.
* @ha: SCSI driver HA context
@@ -317,9 +340,6 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
void
qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
{
-#define LS_UNKNOWN 2
- static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" };
- char *link_speed;
uint16_t handle_cnt;
uint16_t cnt, mbx;
uint32_t handles[5];
@@ -454,8 +474,8 @@ skip_rio:
case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
ql_dbg(ql_dbg_async, vha, 0x5008,
"Asynchronous WAKEUP_THRES.\n");
- break;
+ break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
ql_dbg(ql_dbg_async, vha, 0x5009,
"LIP occurred (%x).\n", mb[1]);
@@ -479,20 +499,14 @@ skip_rio:
break;
case MBA_LOOP_UP: /* Loop Up Event */
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- link_speed = link_speeds[0];
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
ha->link_data_rate = PORT_SPEED_1GB;
- } else {
- link_speed = link_speeds[LS_UNKNOWN];
- if (mb[1] < 6)
- link_speed = link_speeds[mb[1]];
- else if (mb[1] == 0x13)
- link_speed = link_speeds[6];
+ else
ha->link_data_rate = mb[1];
- }
ql_dbg(ql_dbg_async, vha, 0x500a,
- "LOOP UP detected (%s Gbps).\n", link_speed);
+ "LOOP UP detected (%s Gbps).\n",
+ qla2x00_get_link_speed_str(ha));
vha->flags.management_server_logged_in = 0;
qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -638,6 +652,8 @@ skip_rio:
ql_dbg(ql_dbg_async, vha, 0x5010,
"Port unavailable %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
+ ql_log(ql_log_warn, vha, 0x505e,
+ "Link is offline.\n");
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -670,12 +686,17 @@ skip_rio:
ql_dbg(ql_dbg_async, vha, 0x5011,
"Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
mb[1], mb[2], mb[3]);
+
+ qlt_async_event(mb[0], vha, mb);
break;
}
ql_dbg(ql_dbg_async, vha, 0x5012,
"Port database changed %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
+ ql_log(ql_log_warn, vha, 0x505f,
+ "Link is operational (%s Gbps).\n",
+ qla2x00_get_link_speed_str(ha));
/*
* Mark all devices as missing so we will login again.
@@ -684,8 +705,13 @@ skip_rio:
qla2x00_mark_all_devices_lost(vha, 1);
+ if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
+ set_bit(SCR_PENDING, &vha->dpc_flags);
+
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+
+ qlt_async_event(mb[0], vha, mb);
break;
case MBA_RSCN_UPDATE: /* State Change Registration */
@@ -807,6 +833,8 @@ skip_rio:
mb[0], mb[1], mb[2], mb[3]);
}
+ qlt_async_event(mb[0], vha, mb);
+
if (!vha->vp_idx && ha->num_vhosts)
qla2x00_alert_all_vps(rsp, mb);
}
@@ -1172,6 +1200,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
} else if (iop[0] & BIT_5)
fcport->port_type = FCT_INITIATOR;
+ if (iop[0] & BIT_7)
+ fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+
if (logio->io_parameter[7] || logio->io_parameter[8])
fcport->supported_classes |= FC_COS_CLASS2;
if (logio->io_parameter[9] || logio->io_parameter[10])
@@ -1986,6 +2017,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (pkt->entry_status != 0) {
qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
+
+ (void)qlt_24xx_process_response_error(vha, pkt);
+
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
wmb();
continue;
@@ -2016,6 +2050,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
case ELS_IOCB_TYPE:
qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
break;
+ case ABTS_RECV_24XX:
+ /* ensure that the ATIO queue is empty */
+ qlt_24xx_process_atio_queue(vha);
+ case ABTS_RESP_24XX:
+ case CTIO_TYPE7:
+ case NOTIFY_ACK_TYPE:
+ qlt_response_pkt_all_vps(vha, (response_t *)pkt);
+ break;
case MARKER_TYPE:
/* Do nothing in this case, this check is to prevent it
* from falling into default case
@@ -2168,6 +2210,13 @@ qla24xx_intr_handler(int irq, void *dev_id)
case 0x14:
qla24xx_process_response_queue(vha, rsp);
break;
+ case 0x1C: /* ATIO queue updated */
+ qlt_24xx_process_atio_queue(vha);
+ break;
+ case 0x1D: /* ATIO and response queues updated */
+ qlt_24xx_process_atio_queue(vha);
+ qla24xx_process_response_queue(vha, rsp);
+ break;
default:
ql_dbg(ql_dbg_async, vha, 0x504f,
"Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -2312,6 +2361,13 @@ qla24xx_msix_default(int irq, void *dev_id)
case 0x14:
qla24xx_process_response_queue(vha, rsp);
break;
+ case 0x1C: /* ATIO queue updated */
+ qlt_24xx_process_atio_queue(vha);
+ break;
+ case 0x1D: /* ATIO and response queues updated */
+ qlt_24xx_process_atio_queue(vha);
+ qla24xx_process_response_queue(vha, rsp);
+ break;
default:
ql_dbg(ql_dbg_async, vha, 0x5051,
"Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -2564,7 +2620,15 @@ void
qla2x00_free_irqs(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct rsp_que *rsp = ha->rsp_q_map[0];
+ struct rsp_que *rsp;
+
+ /*
+ * We need to check that ha->rsp_q_map is valid in case we are called
+ * from a probe failure context.
+ */
+ if (!ha->rsp_q_map || !ha->rsp_q_map[0])
+ return;
+ rsp = ha->rsp_q_map[0];
if (ha->flags.msix_enabled)
qla24xx_disable_msix(ha);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b4a23394a7bd..d5ce92c0a8fc 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_target.h"
#include <linux/delay.h>
#include <linux/gfp.h>
@@ -270,11 +271,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ictrl = RD_REG_WORD(&reg->isp.ictrl);
}
ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
- "MBX Command timeout for cmd %x.\n", command);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111a,
- "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111b,
- "mb[0] = 0x%x.\n", mb0);
+ "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+ "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
/*
@@ -320,7 +318,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
CRB_NIU_XG_PAUSE_CTL_P1);
}
ql_log(ql_log_info, base_vha, 0x101c,
- "Mailbox cmd timeout occured, cmd=0x%x, "
+ "Mailbox cmd timeout occurred, cmd=0x%x, "
"mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
"abort.\n", command, mcp->mb[0],
ha->flags.eeh_busy);
@@ -345,7 +343,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
CRB_NIU_XG_PAUSE_CTL_P1);
}
ql_log(ql_log_info, base_vha, 0x101e,
- "Mailbox cmd timeout occured, cmd=0x%x, "
+ "Mailbox cmd timeout occurred, cmd=0x%x, "
"mb[0]=0x%x. Scheduling ISP abort ",
command, mcp->mb[0]);
set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
@@ -390,7 +388,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
+ "Entered %s.\n", __func__);
if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -424,7 +423,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
ql_dbg(ql_dbg_mbx, vha, 0x1023,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
+ "Done %s.\n", __func__);
}
return rval;
@@ -454,7 +454,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
mcp->out_mb = MBX_0;
@@ -489,10 +490,11 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
if (IS_FWI2_CAPABLE(ha)) {
- ql_dbg(ql_dbg_mbx, vha, 0x1027,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027,
"Done exchanges=%x.\n", mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
+ "Done %s.\n", __func__);
}
}
@@ -523,7 +525,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
mcp->out_mb = MBX_0;
@@ -561,11 +564,11 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ha->fw_attributes_h = mcp->mb[15];
ha->fw_attributes_ext[0] = mcp->mb[16];
ha->fw_attributes_ext[1] = mcp->mb[17];
- ql_dbg(ql_dbg_mbx, vha, 0x1139,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
"%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
__func__, mcp->mb[15], mcp->mb[6]);
} else
- ql_dbg(ql_dbg_mbx, vha, 0x112f,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
"%s: FwAttributes [Upper] invalid, MB6:%04x\n",
__func__, mcp->mb[6]);
}
@@ -576,7 +579,8 @@ failed:
ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
+ "Done %s.\n", __func__);
}
return rval;
}
@@ -602,7 +606,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
mcp->out_mb = MBX_0;
@@ -620,7 +625,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
fwopts[2] = mcp->mb[2];
fwopts[3] = mcp->mb[3];
- ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
+ "Done %s.\n", __func__);
}
return rval;
@@ -648,7 +654,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
mcp->mb[1] = fwopts[1];
@@ -676,7 +683,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
"Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
+ "Done %s.\n", __func__);
}
return rval;
@@ -704,7 +712,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
mcp->mb[1] = 0xAAAA;
@@ -734,7 +743,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
+ "Done %s.\n", __func__);
}
return rval;
@@ -762,7 +772,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_VERIFY_CHECKSUM;
mcp->out_mb = MBX_0;
@@ -787,7 +798,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
"Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
(mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
+ "Done %s.\n", __func__);
}
return rval;
@@ -819,7 +831,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_IOCB_COMMAND_A64;
mcp->mb[1] = 0;
@@ -842,7 +855,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
/* Mask reserved bits. */
sts_entry->entry_status &=
IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
- ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
+ "Done %s.\n", __func__);
}
return rval;
@@ -884,7 +898,8 @@ qla2x00_abort_command(srb_t *sp)
struct req_que *req = vha->req;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
+ "Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -915,7 +930,8 @@ qla2x00_abort_command(srb_t *sp)
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
+ "Done %s.\n", __func__);
}
return rval;
@@ -934,7 +950,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
l = l;
vha = fcport->vha;
- ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
+ "Entered %s.\n", __func__);
req = vha->hw->req_q_map[0];
rsp = req->rsp;
@@ -955,7 +972,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
+ "Failed=%x.\n", rval);
}
/* Issue marker IOCB. */
@@ -965,7 +983,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
ql_dbg(ql_dbg_mbx, vha, 0x1040,
"Failed to issue marker IOCB (%x).\n", rval2);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
+ "Done %s.\n", __func__);
}
return rval;
@@ -983,7 +1002,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
vha = fcport->vha;
- ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
+ "Entered %s.\n", __func__);
req = vha->hw->req_q_map[0];
rsp = req->rsp;
@@ -1012,7 +1032,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
ql_dbg(ql_dbg_mbx, vha, 0x1044,
"Failed to issue marker IOCB (%x).\n", rval2);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1046,7 +1067,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
mcp->mb[9] = vha->vp_idx;
@@ -1074,7 +1096,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
+ "Done %s.\n", __func__);
if (IS_CNA_CAPABLE(vha->hw)) {
vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
@@ -1115,7 +1138,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_RETRY_COUNT;
mcp->out_mb = MBX_0;
@@ -1138,7 +1162,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
*tov = ratov;
}
- ql_dbg(ql_dbg_mbx, vha, 0x104b,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
"Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
}
@@ -1170,7 +1194,8 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
+ "Entered %s.\n", __func__);
if (IS_QLA82XX(ha) && ql2xdbwr)
qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
@@ -1213,9 +1238,100 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_get_node_name_list
+ * Issue get node name list mailbox command, kmalloc()
+ * and return the resulting list. Caller must kfree() it!
+ *
+ * Input:
+ * ha = adapter state pointer.
+ * out_data = resulting list
+ * out_len = length of the resulting list
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_port_24xx_data *list = NULL;
+ void *pmap;
+ mbx_cmd_t mc;
+ dma_addr_t pmap_dma;
+ ulong dma_size;
+ int rval, left;
+
+ left = 1;
+ while (left > 0) {
+ dma_size = left * sizeof(*list);
+ pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size,
+ &pmap_dma, GFP_KERNEL);
+ if (!pmap) {
+ ql_log(ql_log_warn, vha, 0x113f,
+ "%s(%ld): DMA Alloc failed of %ld\n",
+ __func__, vha->host_no, dma_size);
+ rval = QLA_MEMORY_ALLOC_FAILED;
+ goto out;
+ }
+
+ mc.mb[0] = MBC_PORT_NODE_NAME_LIST;
+ mc.mb[1] = BIT_1 | BIT_3;
+ mc.mb[2] = MSW(pmap_dma);
+ mc.mb[3] = LSW(pmap_dma);
+ mc.mb[6] = MSW(MSD(pmap_dma));
+ mc.mb[7] = LSW(MSD(pmap_dma));
+ mc.mb[8] = dma_size;
+ mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8;
+ mc.in_mb = MBX_0|MBX_1;
+ mc.tov = 30;
+ mc.flags = MBX_DMA_IN;
+
+ rval = qla2x00_mailbox_command(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ if ((mc.mb[0] == MBS_COMMAND_ERROR) &&
+ (mc.mb[1] == 0xA)) {
+ left += le16_to_cpu(mc.mb[2]) /
+ sizeof(struct qla_port_24xx_data);
+ goto restart;
+ }
+ goto out_free;
+ }
+
+ left = 0;
+
+ list = kzalloc(dma_size, GFP_KERNEL);
+ if (!list) {
+ ql_log(ql_log_warn, vha, 0x1140,
+ "%s(%ld): failed to allocate node names list "
+ "structure.\n", __func__, vha->host_no);
+ rval = QLA_MEMORY_ALLOC_FAILED;
+ goto out_free;
+ }
+
+ memcpy(list, pmap, dma_size);
+restart:
+ dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
}
+ *out_data = list;
+ *out_len = dma_size;
+
+out:
+ return rval;
+
+out_free:
+ dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
return rval;
}
@@ -1246,7 +1362,8 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
dma_addr_t pd_dma;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
+ "Entered %s.\n", __func__);
pd24 = NULL;
pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
@@ -1326,6 +1443,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
fcport->port_type = FCT_INITIATOR;
else
fcport->port_type = FCT_TARGET;
+
+ /* Passback COS information. */
+ fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
+ FC_COS_CLASS2 : FC_COS_CLASS3;
+
+ if (pd24->prli_svc_param_word_3[0] & BIT_7)
+ fcport->flags |= FCF_CONF_COMP_SUPPORTED;
} else {
uint64_t zero = 0;
@@ -1378,7 +1502,8 @@ gpd_error_out:
"Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1407,7 +1532,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
mcp->out_mb = MBX_0;
@@ -1433,7 +1559,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1465,7 +1592,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_PORT_NAME;
mcp->mb[9] = vha->vp_idx;
@@ -1499,7 +1627,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
name[7] = LSB(mcp->mb[7]);
}
- ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1527,7 +1656,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
+ "Entered %s.\n", __func__);
if (IS_CNA_CAPABLE(vha->hw)) {
/* Logout across all FCFs. */
@@ -1564,7 +1694,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1596,9 +1727,10 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
+ "Entered %s.\n", __func__);
- ql_dbg(ql_dbg_mbx, vha, 0x105e,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
"Retry cnt=%d ratov=%d total tov=%d.\n",
vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
@@ -1622,7 +1754,8 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
rval, mcp->mb[0], mcp->mb[1]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1641,7 +1774,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
struct req_que *req;
struct rsp_que *rsp;
- ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
+ "Entered %s.\n", __func__);
if (ha->flags.cpu_affinity_enabled)
req = ha->req_q_map[0];
@@ -1715,7 +1849,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
break;
}
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
+ "Done %s.\n", __func__);
iop[0] = le32_to_cpu(lg->io_parameter[0]);
@@ -1733,6 +1868,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
mb[10] |= BIT_0; /* Class 2. */
if (lg->io_parameter[9] || lg->io_parameter[10])
mb[10] |= BIT_1; /* Class 3. */
+ if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7))
+ mb[10] |= BIT_7; /* Confirmed Completion
+ * Allowed
+ */
}
dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1770,7 +1909,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1818,7 +1958,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1849,7 +1990,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
+ "Entered %s.\n", __func__);
if (IS_FWI2_CAPABLE(ha))
return qla24xx_login_fabric(vha, fcport->loop_id,
@@ -1891,7 +2033,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
+ "Done %s.\n", __func__);
}
return (rval);
@@ -1908,7 +2051,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
struct req_que *req;
struct rsp_que *rsp;
- ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
+ "Entered %s.\n", __func__);
lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
@@ -1952,7 +2096,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
le32_to_cpu(lg->io_parameter[1]));
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
+ "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1984,7 +2129,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
mcp->out_mb = MBX_1|MBX_0;
@@ -2007,7 +2153,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
"Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2035,7 +2182,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
@@ -2052,7 +2200,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2078,7 +2227,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
+ "Entered %s.\n", __func__);
if (id_list == NULL)
return QLA_FUNCTION_FAILED;
@@ -2110,7 +2260,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
} else {
*entries = mcp->mb[1];
- ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2138,7 +2289,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
mcp->out_mb = MBX_0;
@@ -2154,7 +2306,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
ql_dbg(ql_dbg_mbx, vha, 0x107d,
"Failed mb[0]=%x.\n", mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x107e,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
"Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
"mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
@@ -2201,7 +2353,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
dma_addr_t pmap_dma;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
+ "Entered %s.\n", __func__);
pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
if (pmap == NULL) {
@@ -2224,7 +2377,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval == QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x1081,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
"mb0/mb1=%x/%X FC/AL position map size (%x).\n",
mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
@@ -2238,7 +2391,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2267,7 +2421,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
uint32_t *siter, *diter, dwords;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_LINK_STATUS;
mcp->mb[2] = MSW(stats_dma);
@@ -2301,7 +2456,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
rval = QLA_FUNCTION_FAILED;
} else {
/* Copy over data -- firmware data is LE. */
- ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
+ "Done %s.\n", __func__);
dwords = offsetof(struct link_statistics, unused1) / 4;
siter = diter = &stats->link_fail_cnt;
while (dwords--)
@@ -2324,7 +2480,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
mbx_cmd_t *mcp = &mc;
uint32_t *siter, *diter, dwords;
- ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
mcp->mb[2] = MSW(stats_dma);
@@ -2346,7 +2503,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
"Failed mb[0]=%x.\n", mcp->mb[0]);
rval = QLA_FUNCTION_FAILED;
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
+ "Done %s.\n", __func__);
/* Copy over data -- firmware data is LE. */
dwords = sizeof(struct link_statistics) / 4;
siter = diter = &stats->link_fail_cnt;
@@ -2375,7 +2533,8 @@ qla24xx_abort_command(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
- ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
+ "Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -2404,7 +2563,7 @@ qla24xx_abort_command(srb_t *sp)
abt->port_id[0] = fcport->d_id.b.al_pa;
abt->port_id[1] = fcport->d_id.b.area;
abt->port_id[2] = fcport->d_id.b.domain;
- abt->vp_index = fcport->vp_idx;
+ abt->vp_index = fcport->vha->vp_idx;
abt->req_que_no = cpu_to_le16(req->id);
@@ -2423,7 +2582,8 @@ qla24xx_abort_command(srb_t *sp)
le16_to_cpu(abt->nport_handle));
rval = QLA_FUNCTION_FAILED;
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
+ "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2455,7 +2615,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
ha = vha->hw;
req = vha->req;
- ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
+ "Entered %s.\n", __func__);
if (ha->flags.cpu_affinity_enabled)
rsp = ha->rsp_q_map[tag + 1];
@@ -2478,7 +2639,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
- tsk->p.tsk.vp_index = fcport->vp_idx;
+ tsk->p.tsk.vp_index = fcport->vha->vp_idx;
if (type == TCF_LUN_RESET) {
int_to_scsilun(l, &tsk->p.tsk.lun);
host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
@@ -2504,7 +2665,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
} else if (le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID) {
if (le32_to_cpu(sts->rsp_data_len) < 4) {
- ql_dbg(ql_dbg_mbx, vha, 0x1097,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
"Ignoring inconsistent data length -- not enough "
"response info (%d).\n",
le32_to_cpu(sts->rsp_data_len));
@@ -2523,7 +2684,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
ql_dbg(ql_dbg_mbx, vha, 0x1099,
"Failed to issue marker IOCB (%x).\n", rval2);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
+ "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
@@ -2564,7 +2726,8 @@ qla2x00_system_error(scsi_qla_host_t *vha)
if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
mcp->out_mb = MBX_0;
@@ -2576,7 +2739,8 @@ qla2x00_system_error(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2596,7 +2760,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SERDES_PARAMS;
mcp->mb[1] = BIT_0;
@@ -2615,7 +2780,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2631,7 +2797,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_STOP_FIRMWARE;
mcp->mb[1] = 0;
@@ -2646,7 +2813,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
if (mcp->mb[0] == MBS_INVALID_COMMAND)
rval = QLA_INVALID_COMMAND;
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2660,7 +2828,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -2686,7 +2855,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2699,7 +2869,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -2719,7 +2890,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2733,7 +2905,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
+ "Entered %s.\n", __func__);
if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
!IS_QLA83XX(vha->hw))
@@ -2764,7 +2937,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
+ "Done %s.\n", __func__);
if (mb)
memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2782,7 +2956,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -2804,7 +2979,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
+ "Done %s.\n", __func__);
if (wr)
*wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2829,7 +3005,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
+ "Entered %s.\n", __func__);
if (!IS_IIDMA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -2854,7 +3031,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
+ "Done %s.\n", __func__);
if (port_speed)
*port_speed = mcp->mb[3];
}
@@ -2870,7 +3048,8 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
+ "Entered %s.\n", __func__);
if (!IS_IIDMA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -2897,9 +3076,11 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
}
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval);
+ ql_dbg(ql_dbg_mbx, vha, 0x10b4,
+ "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2915,24 +3096,25 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
scsi_qla_host_t *vp;
unsigned long flags;
- ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
+ "Entered %s.\n", __func__);
if (rptid_entry->entry_status != 0)
return;
if (rptid_entry->format == 0) {
- ql_dbg(ql_dbg_mbx, vha, 0x10b7,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
"Format 0 : Number of VPs setup %d, number of "
"VPs acquired %d.\n",
MSB(le16_to_cpu(rptid_entry->vp_count)),
LSB(le16_to_cpu(rptid_entry->vp_count)));
- ql_dbg(ql_dbg_mbx, vha, 0x10b8,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
"Primary port id %02x%02x%02x.\n",
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
} else if (rptid_entry->format == 1) {
vp_idx = LSB(stat);
- ql_dbg(ql_dbg_mbx, vha, 0x10b9,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
"Format 1: VP[%d] enabled - status %d - with "
"port id %02x%02x%02x.\n", vp_idx, MSB(stat),
rptid_entry->port_id[2], rptid_entry->port_id[1],
@@ -2999,7 +3181,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
/* This can be called by the parent */
- ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
+ "Entered %s.\n", __func__);
vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
if (!vpmod) {
@@ -3015,6 +3198,9 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
vpmod->vp_count = 1;
vpmod->vp_index1 = vha->vp_idx;
vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
+
+ qlt_modify_vp_config(vha, vpmod);
+
memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
vpmod->entry_count = 1;
@@ -3035,7 +3221,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
rval = QLA_FUNCTION_FAILED;
} else {
/* EMPTY */
- ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
+ "Done %s.\n", __func__);
fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
}
dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
@@ -3069,7 +3256,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
int vp_index = vha->vp_idx;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- ql_dbg(ql_dbg_mbx, vha, 0x10c1,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
"Entered %s enabling index %d.\n", __func__, vp_index);
if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
@@ -3112,7 +3299,8 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
le16_to_cpu(vce->comp_status));
rval = QLA_FUNCTION_FAILED;
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
+ "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -3149,14 +3337,8 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__);
-
- /*
- * This command is implicitly executed by firmware during login for the
- * physical hosts
- */
- if (vp_idx == 0)
- return QLA_FUNCTION_FAILED;
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
mcp->mb[1] = format;
@@ -3185,7 +3367,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
+ "Entered %s.\n", __func__);
if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
@@ -3219,7 +3402,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
ql_dbg(ql_dbg_mbx, vha, 0x1008,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3244,7 +3428,8 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
unsigned long flags;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
+ "Entered %s.\n", __func__);
mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
if (mn == NULL) {
@@ -3285,7 +3470,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
status[0] = le16_to_cpu(mn->p.rsp.comp_status);
status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
le16_to_cpu(mn->p.rsp.failure_code) : 0;
- ql_dbg(ql_dbg_mbx, vha, 0x10ce,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
"cs=%x fc=%x.\n", status[0], status[1]);
if (status[0] != CS_COMPLETE) {
@@ -3299,7 +3484,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
retry = 1;
}
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10d0,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
"Firmware updated to %x.\n",
le32_to_cpu(mn->p.rsp.fw_ver));
@@ -3316,9 +3501,11 @@ verify_done:
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval);
+ ql_dbg(ql_dbg_mbx, vha, 0x10d1,
+ "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3334,7 +3521,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
struct device_reg_25xxmq __iomem *reg;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
mcp->mb[1] = req->options;
@@ -3388,7 +3576,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
ql_dbg(ql_dbg_mbx, vha, 0x10d4,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3404,7 +3593,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
struct device_reg_25xxmq __iomem *reg;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
mcp->mb[1] = rsp->options;
@@ -3456,7 +3646,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
ql_dbg(ql_dbg_mbx, vha, 0x10d7,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3469,7 +3660,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_IDC_ACK;
memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
@@ -3483,7 +3675,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
ql_dbg(ql_dbg_mbx, vha, 0x10da,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3496,7 +3689,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
+ "Entered %s.\n", __func__);
if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -3514,7 +3708,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
+ "Done %s.\n", __func__);
*sector_size = mcp->mb[1];
}
@@ -3531,7 +3726,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
@@ -3547,7 +3743,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3563,7 +3760,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
@@ -3582,7 +3780,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
"Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3595,7 +3794,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_RESTART_MPI_FW;
mcp->out_mb = MBX_0;
@@ -3609,7 +3809,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3624,7 +3825,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
@@ -3654,7 +3856,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
ql_dbg(ql_dbg_mbx, vha, 0x10e9,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3669,7 +3872,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
@@ -3699,7 +3903,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
ql_dbg(ql_dbg_mbx, vha, 0x10ec,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3713,7 +3918,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
+ "Entered %s.\n", __func__);
if (!IS_CNA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -3735,7 +3941,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
"Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
+ "Done %s.\n", __func__);
*actual_size = mcp->mb[2] << 2;
@@ -3752,7 +3959,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
+ "Entered %s.\n", __func__);
if (!IS_CNA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -3775,7 +3983,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
"Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3788,7 +3997,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -3805,7 +4015,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
ql_dbg(ql_dbg_mbx, vha, 0x10f5,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
+ "Done %s.\n", __func__);
*data = mcp->mb[3] << 16 | mcp->mb[2];
}
@@ -3821,7 +4032,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mbx_cmd_t *mcp = &mc;
uint32_t iter_cnt = 0x1;
- ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
+ "Entered %s.\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
@@ -3865,7 +4077,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
"mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
mcp->mb[3], mcp->mb[18], mcp->mb[19]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
+ "Done %s.\n", __func__);
}
/* Copy mailbox information */
@@ -3882,7 +4095,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
+ "Entered %s.\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
@@ -3926,7 +4140,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
+ "Done %s.\n", __func__);
}
/* Copy mailbox information */
@@ -3941,7 +4156,7 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10fd,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
"Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
mcp->mb[0] = MBC_ISP84XX_RESET;
@@ -3955,7 +4170,8 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
if (rval != QLA_SUCCESS)
ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
else
- ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
+ "Done %s.\n", __func__);
return rval;
}
@@ -3967,7 +4183,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -3986,7 +4203,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
ql_dbg(ql_dbg_mbx, vha, 0x1101,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4003,7 +4221,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
rval = QLA_SUCCESS;
- ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
+ "Entered %s.\n", __func__);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
@@ -4046,7 +4265,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
ql_dbg(ql_dbg_mbx, vha, 0x1104,
"Failed=%x mb[0]=%x.\n", rval, mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4060,7 +4280,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
@@ -4078,7 +4299,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1107,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
+ "Done %s.\n", __func__);
if (mcp->mb[1] != 0x7)
ha->link_data_rate = mcp->mb[1];
}
@@ -4094,7 +4316,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
+ "Entered %s.\n", __func__);
if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
return QLA_FUNCTION_FAILED;
@@ -4113,7 +4336,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
/* Copy all bits to preserve original value */
memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
- ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
+ "Done %s.\n", __func__);
}
return rval;
}
@@ -4125,7 +4349,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SET_PORT_CONFIG;
/* Copy all bits to preserve original setting */
@@ -4140,7 +4365,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
ql_dbg(ql_dbg_mbx, vha, 0x110d,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else
- ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
+ "Done %s.\n", __func__);
return rval;
}
@@ -4155,7 +4381,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
+ "Entered %s.\n", __func__);
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
return QLA_FUNCTION_FAILED;
@@ -4183,7 +4410,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4196,7 +4424,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
uint8_t byte;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
+ "Entered %s.\n", __func__);
/* Integer part */
rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
@@ -4216,7 +4445,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
}
*frac = (byte >> 6) * 25;
- ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
+ "Done %s.\n", __func__);
fail:
return rval;
}
@@ -4229,7 +4459,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
@@ -4248,7 +4479,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1016,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4262,7 +4494,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
+ "Entered %s.\n", __func__);
if (!IS_QLA82XX(ha))
return QLA_FUNCTION_FAILED;
@@ -4281,7 +4514,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x100c,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4295,7 +4529,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
int rval = QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
+ "Entered %s.\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
@@ -4318,7 +4553,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha)
(mcp->mb[1] << 16) | mcp->mb[0],
(mcp->mb[3] << 16) | mcp->mb[2]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
+ "Done %s.\n", __func__);
ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
if (!ha->md_template_size) {
ql_dbg(ql_dbg_mbx, vha, 0x1122,
@@ -4337,7 +4573,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
int rval = QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
+ "Entered %s.\n", __func__);
ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
@@ -4372,7 +4609,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
((mcp->mb[1] << 16) | mcp->mb[0]),
((mcp->mb[3] << 16) | mcp->mb[2]));
} else
- ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
+ "Done %s.\n", __func__);
return rval;
}
@@ -4387,7 +4625,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x1133, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
+ "Entered %s.\n", __func__);
memset(mcp, 0, sizeof(mbx_cmd_t));
mcp->mb[0] = MBC_SET_LED_CONFIG;
@@ -4412,7 +4651,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
ql_dbg(ql_dbg_mbx, vha, 0x1134,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1135, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4429,7 +4669,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x1136, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
+ "Entered %s.\n", __func__);
memset(mcp, 0, sizeof(mbx_cmd_t));
mcp->mb[0] = MBC_GET_LED_CONFIG;
@@ -4454,7 +4695,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
led_cfg[4] = mcp->mb[5];
led_cfg[5] = mcp->mb[6];
}
- ql_dbg(ql_dbg_mbx, vha, 0x1138, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4471,7 +4713,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
if (!IS_QLA82XX(ha))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x1127,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
"Entered %s.\n", __func__);
memset(mcp, 0, sizeof(mbx_cmd_t));
@@ -4491,7 +4733,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
ql_dbg(ql_dbg_mbx, vha, 0x1128,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1129,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
"Done %s.\n", __func__);
}
@@ -4509,7 +4751,8 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
if (!IS_QLA83XX(ha))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x1130, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_WRITE_REMOTE_REG;
mcp->mb[1] = LSW(reg);
@@ -4527,7 +4770,7 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
ql_dbg(ql_dbg_mbx, vha, 0x1131,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1132,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
"Done %s.\n", __func__);
}
@@ -4543,13 +4786,14 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
mbx_cmd_t *mcp = &mc;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- ql_dbg(ql_dbg_mbx, vha, 0x113b,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
"Implicit LOGO Unsupported.\n");
return QLA_FUNCTION_FAILED;
}
- ql_dbg(ql_dbg_mbx, vha, 0x113c, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
+ "Entering %s.\n", __func__);
/* Perform Implicit LOGO. */
mcp->mb[0] = MBC_PORT_LOGOUT;
@@ -4564,7 +4808,8 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
ql_dbg(ql_dbg_mbx, vha, 0x113d,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
else
- ql_dbg(ql_dbg_mbx, vha, 0x113e, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
+ "Done %s.\n", __func__);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index aa062a1b0ca4..3e8b32419e68 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -6,6 +6,7 @@
*/
#include "qla_def.h"
#include "qla_gbl.h"
+#include "qla_target.h"
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
@@ -49,6 +50,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->vport_slock, flags);
list_add_tail(&vha->list, &ha->vp_list);
+
+ qlt_update_vp_map(vha, SET_VP_IDX);
+
spin_unlock_irqrestore(&ha->vport_slock, flags);
mutex_unlock(&ha->vport_lock);
@@ -79,6 +83,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->vport_slock, flags);
}
list_del(&vha->list);
+ qlt_update_vp_map(vha, RESET_VP_IDX);
spin_unlock_irqrestore(&ha->vport_slock, flags);
vp_id = vha->vp_idx;
@@ -134,7 +139,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
ql_dbg(ql_dbg_vport, vha, 0xa001,
"Marking port dead, loop_id=0x%04x : %x.\n",
- fcport->loop_id, fcport->vp_idx);
+ fcport->loop_id, fcport->vha->vp_idx);
qla2x00_mark_device_lost(vha, fcport, 0, 0);
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@@ -150,6 +155,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ /* Remove port id from vp target map */
+ qlt_update_vp_map(vha, RESET_AL_PA);
+
qla2x00_mark_vp_devices_dead(vha);
atomic_set(&vha->vp_state, VP_FAILED);
vha->flags.management_server_logged_in = 0;
@@ -295,10 +303,8 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
static int
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
- ql_dbg(ql_dbg_dpc, vha, 0x4012,
- "Entering %s.\n", __func__);
- ql_dbg(ql_dbg_dpc, vha, 0x4013,
- "vp_flags: 0x%lx.\n", vha->vp_flags);
+ ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
+ "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
qla2x00_do_work(vha);
@@ -348,7 +354,7 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
}
}
- ql_dbg(ql_dbg_dpc, vha, 0x401c,
+ ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
"Exiting %s.\n", __func__);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index de722a933438..caf627ba7fa8 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1190,12 +1190,12 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
}
/* Offset in flash = lower 16 bits
- * Number of enteries = upper 16 bits
+ * Number of entries = upper 16 bits
*/
offset = n & 0xffffU;
n = (n >> 16) & 0xffffU;
- /* number of addr/value pair should not exceed 1024 enteries */
+ /* number of addr/value pair should not exceed 1024 entries */
if (n >= 1024) {
ql_log(ql_log_fatal, vha, 0x0071,
"Card flash not initialized:n=0x%x.\n", n);
@@ -2050,7 +2050,7 @@ qla82xx_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- ql_log(ql_log_info, NULL, 0xb054,
+ ql_log(ql_log_info, NULL, 0xb053,
"%s: NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
@@ -2446,7 +2446,7 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
ql_log(ql_log_info, vha, 0x00a1,
- "Firmware loaded successully from flash.\n");
+ "Firmware loaded successfully from flash.\n");
return QLA_SUCCESS;
} else {
ql_log(ql_log_warn, vha, 0x0108,
@@ -2461,7 +2461,7 @@ try_blob_fw:
blob = ha->hablob = qla2x00_request_firmware(vha);
if (!blob) {
ql_log(ql_log_fatal, vha, 0x00a3,
- "Firmware image not preset.\n");
+ "Firmware image not present.\n");
goto fw_load_failed;
}
@@ -2689,7 +2689,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
if (!optrom) {
ql_log(ql_log_warn, vha, 0xb01b,
"Unable to allocate memory "
- "for optron burst write (%x KB).\n",
+ "for optrom burst write (%x KB).\n",
OPTROM_BURST_SIZE / 1024);
}
}
@@ -2960,9 +2960,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
* changing the state to DEV_READY
*/
ql_log(ql_log_info, vha, 0xb023,
- "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME);
- ql_log(ql_log_info, vha, 0xb024,
- "DRV_ACTIVE:%d DRV_STATE:%d.\n",
+ "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d "
+ "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
drv_active, drv_state);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_READY);
@@ -3129,7 +3128,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
if (ql2xmdenable) {
if (qla82xx_md_collect(vha))
ql_log(ql_log_warn, vha, 0xb02c,
- "Not able to collect minidump.\n");
+ "Minidump not collected.\n");
} else
ql_log(ql_log_warn, vha, 0xb04f,
"Minidump disabled.\n");
@@ -3160,11 +3159,11 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
"Firmware version differs "
"Previous version: %d:%d:%d - "
"New version: %d:%d:%d\n",
+ fw_major_version, fw_minor_version,
+ fw_subminor_version,
ha->fw_major_version,
ha->fw_minor_version,
- ha->fw_subminor_version,
- fw_major_version, fw_minor_version,
- fw_subminor_version);
+ ha->fw_subminor_version);
/* Release MiniDump resources */
qla82xx_md_free(vha);
/* ALlocate MiniDump resources */
@@ -3325,6 +3324,30 @@ exit:
return rval;
}
+static int qla82xx_check_temp(scsi_qla_host_t *vha)
+{
+ uint32_t temp, temp_state, temp_val;
+ struct qla_hw_data *ha = vha->hw;
+
+ temp = qla82xx_rd_32(ha, CRB_TEMP_STATE);
+ temp_state = qla82xx_get_temp_state(temp);
+ temp_val = qla82xx_get_temp_val(temp);
+
+ if (temp_state == QLA82XX_TEMP_PANIC) {
+ ql_log(ql_log_warn, vha, 0x600e,
+ "Device temperature %d degrees C exceeds "
+ " maximum allowed. Hardware has been shut down.\n",
+ temp_val);
+ return 1;
+ } else if (temp_state == QLA82XX_TEMP_WARN) {
+ ql_log(ql_log_warn, vha, 0x600f,
+ "Device temperature %d degrees C exceeds "
+ "operating range. Immediate action needed.\n",
+ temp_val);
+ }
+ return 0;
+}
+
void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
@@ -3347,18 +3370,20 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
/* don't poll if reset is going on */
if (!ha->flags.isp82xx_reset_hdlr_active) {
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- if (dev_state == QLA82XX_DEV_NEED_RESET &&
+ if (qla82xx_check_temp(vha)) {
+ set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+ ha->flags.isp82xx_fw_hung = 1;
+ qla82xx_clear_pending_mbx(vha);
+ } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x6001,
"Adapter reset needed.\n");
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
!test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x6002,
"Quiescent needed.\n");
set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
} else {
if (qla82xx_check_fw_alive(vha)) {
ql_dbg(ql_dbg_timer, vha, 0x6011,
@@ -3398,7 +3423,6 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
}
- qla2xxx_wake_dpc(vha);
ha->flags.isp82xx_fw_hung = 1;
ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
qla82xx_clear_pending_mbx(vha);
@@ -4113,6 +4137,14 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
goto md_failed;
}
+ if (ha->flags.isp82xx_no_md_cap) {
+ ql_log(ql_log_warn, vha, 0xb054,
+ "Forced reset from application, "
+ "ignore minidump capture\n");
+ ha->flags.isp82xx_no_md_cap = 0;
+ goto md_failed;
+ }
+
if (qla82xx_validate_template_chksum(vha)) {
ql_log(ql_log_info, vha, 0xb039,
"Template checksum validation error\n");
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 4ac50e274661..6eb210e3cc63 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -26,6 +26,7 @@
#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
#define QLA82XX_DMA_SHIFT_VALUE 0x55555555
#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
@@ -561,7 +562,6 @@
#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158))
#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg))
-#define PCIE_CHICKEN3 (0x120c8)
#define PCIE_SETUP_FUNCTION (0x12040)
#define PCIE_SETUP_FUNCTION2 (0x12048)
@@ -1178,4 +1178,16 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
+#define qla82xx_get_temp_val(x) ((x) >> 16)
+#define qla82xx_get_temp_state(x) ((x) & 0xffff)
+#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
+ QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
+ QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c9c56a8427f3..6d1d873a20e2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,12 +13,13 @@
#include <linux/mutex.h>
#include <linux/kobject.h>
#include <linux/slab.h>
-
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
+#include "qla_target.h"
+
/*
* Driver version
*/
@@ -40,6 +41,12 @@ static struct kmem_cache *ctx_cachep;
*/
int ql_errlev = ql_log_all;
+int ql2xenableclass2;
+module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xenableclass2,
+ "Specify if Class 2 operations are supported from the very "
+ "beginning. Default is 0 - class 2 not supported.");
+
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO);
MODULE_PARM_DESC(ql2xlogintimeout,
@@ -255,6 +262,8 @@ struct scsi_host_template qla2xxx_driver_template = {
.max_sectors = 0xFFFF,
.shost_attrs = qla2x00_host_attrs,
+
+ .supported_mode = MODE_INITIATOR,
};
static struct scsi_transport_template *qla2xxx_transport_template = NULL;
@@ -306,7 +315,8 @@ static void qla2x00_free_fw_dump(struct qla_hw_data *);
static void qla2x00_mem_free(struct qla_hw_data *);
/* -------------------------------------------------------------------------- */
-static int qla2x00_alloc_queues(struct qla_hw_data *ha)
+static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
+ struct rsp_que *rsp)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
@@ -324,6 +334,12 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
"Unable to allocate memory for response queue ptrs.\n");
goto fail_rsp_map;
}
+ /*
+ * Make sure we record at least the request and response queue zero in
+ * case we need to free them if part of the probe fails.
+ */
+ ha->rsp_q_map[0] = rsp;
+ ha->req_q_map[0] = req;
set_bit(0, ha->rsp_qid_map);
set_bit(0, ha->req_qid_map);
return 1;
@@ -642,12 +658,12 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure) {
- ql_dbg(ql_dbg_io, vha, 0x3001,
+ ql_dbg(ql_dbg_aer, vha, 0x9010,
"PCI Channel IO permanent failure, exiting "
"cmd=%p.\n", cmd);
cmd->result = DID_NO_CONNECT << 16;
} else {
- ql_dbg(ql_dbg_io, vha, 0x3002,
+ ql_dbg(ql_dbg_aer, vha, 0x9011,
"EEH_Busy, Requeuing the cmd=%p.\n", cmd);
cmd->result = DID_REQUEUE << 16;
}
@@ -657,7 +673,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
rval = fc_remote_port_chkready(rport);
if (rval) {
cmd->result = rval;
- ql_dbg(ql_dbg_io, vha, 0x3003,
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
"fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
cmd, rval);
goto qc24_fail_command;
@@ -1136,7 +1152,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
ret = FAILED;
ql_log(ql_log_info, vha, 0x8012,
- "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun);
+ "BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_fatal, vha, 0x8013,
@@ -2180,6 +2196,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
"Memory allocated for ha=%p.\n", ha);
ha->pdev = pdev;
+ ha->tgt.enable_class_2 = ql2xenableclass2;
/* Clear our data area */
ha->bars = bars;
@@ -2243,6 +2260,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
ha->gid_list_info_size = 8;
@@ -2258,6 +2276,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
ha->gid_list_info_size = 8;
@@ -2417,6 +2436,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_cmd_len, host->max_channel, host->max_lun,
host->transportt, sht->vendor_id);
+que_init:
+ /* Alloc arrays of request and response ring ptrs */
+ if (!qla2x00_alloc_queues(ha, req, rsp)) {
+ ql_log(ql_log_fatal, base_vha, 0x003d,
+ "Failed to allocate memory for queue pointers..."
+ "aborting.\n");
+ goto probe_init_failed;
+ }
+
+ qlt_probe_one_stage1(base_vha, ha);
+
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
@@ -2424,20 +2454,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_save_state(pdev);
- /* Alloc arrays of request and response ring ptrs */
-que_init:
- if (!qla2x00_alloc_queues(ha)) {
- ql_log(ql_log_fatal, base_vha, 0x003d,
- "Failed to allocate memory for queue pointers.. aborting.\n");
- goto probe_init_failed;
- }
-
- ha->rsp_q_map[0] = rsp;
- ha->req_q_map[0] = req;
+ /* Assign back pointers */
rsp->req = req;
req->rsp = rsp;
- set_bit(0, ha->req_qid_map);
- set_bit(0, ha->rsp_qid_map);
+
/* FWI2-capable only. */
req->req_q_in = &ha->iobase->isp24.req_q_in;
req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -2514,6 +2534,14 @@ que_init:
ql_dbg(ql_dbg_init, base_vha, 0x00ee,
"DPC thread started successfully.\n");
+ /*
+ * If we're not coming up in initiator mode, we might sit for
+ * a while without waking up the dpc thread, which leads to a
+ * stuck process warning. So just kick the dpc once here and
+ * let the kthread start (and go back to sleep in qla2x00_do_dpc).
+ */
+ qla2xxx_wake_dpc(base_vha);
+
skip_dpc:
list_add_tail(&base_vha->list, &ha->vp_list);
base_vha->host->irq = ha->pdev->irq;
@@ -2559,7 +2587,11 @@ skip_dpc:
ql_dbg(ql_dbg_init, base_vha, 0x00f2,
"Init done and hba is online.\n");
- scsi_scan_host(host);
+ if (qla_ini_mode_enabled(base_vha))
+ scsi_scan_host(host);
+ else
+ ql_dbg(ql_dbg_init, base_vha, 0x0122,
+ "skipping scsi_scan_host() for non-initiator port\n");
qla2x00_alloc_sysfs_attr(base_vha);
@@ -2577,11 +2609,17 @@ skip_dpc:
base_vha->host_no,
ha->isp_ops->fw_version_str(base_vha, fw_str));
+ qlt_add_target(ha, base_vha);
+
return 0;
probe_init_failed:
qla2x00_free_req_que(ha, req);
+ ha->req_q_map[0] = NULL;
+ clear_bit(0, ha->req_qid_map);
qla2x00_free_rsp_que(ha, rsp);
+ ha->rsp_q_map[0] = NULL;
+ clear_bit(0, ha->rsp_qid_map);
ha->max_req_queues = ha->max_rsp_queues = 0;
probe_failed:
@@ -2621,6 +2659,22 @@ probe_out:
}
static void
+qla2x00_stop_dpc_thread(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct task_struct *t = ha->dpc_thread;
+
+ if (ha->dpc_thread == NULL)
+ return;
+ /*
+ * qla2xxx_wake_dpc checks for ->dpc_thread
+ * so we need to zero it out.
+ */
+ ha->dpc_thread = NULL;
+ kthread_stop(t);
+}
+
+static void
qla2x00_shutdown(struct pci_dev *pdev)
{
scsi_qla_host_t *vha;
@@ -2663,9 +2717,18 @@ qla2x00_remove_one(struct pci_dev *pdev)
struct qla_hw_data *ha;
unsigned long flags;
+ /*
+ * If the PCI device is disabled that means that probe failed and any
+ * resources should be have cleaned up on probe exit.
+ */
+ if (!atomic_read(&pdev->enable_cnt))
+ return;
+
base_vha = pci_get_drvdata(pdev);
ha = base_vha->hw;
+ ha->flags.host_shutting_down = 1;
+
mutex_lock(&ha->vport_lock);
while (ha->cur_vport_count) {
struct Scsi_Host *scsi_host;
@@ -2719,6 +2782,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
ha->dpc_thread = NULL;
kthread_stop(t);
}
+ qlt_remove_target(ha, base_vha);
qla2x00_free_sysfs_attr(base_vha);
@@ -2770,17 +2834,7 @@ qla2x00_free_device(scsi_qla_host_t *vha)
if (vha->timer_active)
qla2x00_stop_timer(vha);
- /* Kill the kernel thread for this host */
- if (ha->dpc_thread) {
- struct task_struct *t = ha->dpc_thread;
-
- /*
- * qla2xxx_wake_dpc checks for ->dpc_thread
- * so we need to zero it out.
- */
- ha->dpc_thread = NULL;
- kthread_stop(t);
- }
+ qla2x00_stop_dpc_thread(vha);
qla25xx_delete_queues(vha);
@@ -2842,8 +2896,10 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
spin_unlock_irqrestore(vha->host->host_lock, flags);
set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(base_vha);
- } else
+ } else {
fc_remote_port_delete(rport);
+ qlt_fc_port_deleted(vha, fcport);
+ }
}
/*
@@ -2859,7 +2915,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
int do_login, int defer)
{
if (atomic_read(&fcport->state) == FCS_ONLINE &&
- vha->vp_idx == fcport->vp_idx) {
+ vha->vp_idx == fcport->vha->vp_idx) {
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
qla2x00_schedule_rport_del(vha, fcport, defer);
}
@@ -2908,7 +2964,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
fc_port_t *fcport;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
+ if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
continue;
/*
@@ -2921,7 +2977,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
if (defer)
qla2x00_schedule_rport_del(vha, fcport, defer);
- else if (vha->vp_idx == fcport->vp_idx)
+ else if (vha->vp_idx == fcport->vha->vp_idx)
qla2x00_schedule_rport_del(vha, fcport, defer);
}
}
@@ -2946,10 +3002,13 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->init_cb)
goto fail;
+ if (qlt_mem_alloc(ha) < 0)
+ goto fail_free_init_cb;
+
ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
if (!ha->gid_list)
- goto fail_free_init_cb;
+ goto fail_free_tgt_mem;
ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
if (!ha->srb_mempool)
@@ -3167,6 +3226,8 @@ fail_free_gid_list:
ha->gid_list_dma);
ha->gid_list = NULL;
ha->gid_list_dma = 0;
+fail_free_tgt_mem:
+ qlt_mem_free(ha);
fail_free_init_cb:
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
ha->init_cb_dma);
@@ -3282,6 +3343,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
if (ha->ctx_mempool)
mempool_destroy(ha->ctx_mempool);
+ qlt_mem_free(ha);
+
if (ha->init_cb)
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
ha->init_cb, ha->init_cb_dma);
@@ -3311,6 +3374,10 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->gid_list = NULL;
ha->gid_list_dma = 0;
+
+ ha->tgt.atio_ring = NULL;
+ ha->tgt.atio_dma = 0;
+ ha->tgt.tgt_vp_map = NULL;
}
struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -3671,10 +3738,9 @@ qla2x00_do_dpc(void *data)
ha->dpc_active = 1;
- ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
- "DPC handler waking up.\n");
- ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
- "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
+ "DPC handler waking up, dpc_flags=0x%lx.\n",
+ base_vha->dpc_flags);
qla2x00_do_work(base_vha);
@@ -3740,6 +3806,16 @@ qla2x00_do_dpc(void *data)
clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
}
+ if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
+ int ret;
+ ret = qla2x00_send_change_request(base_vha, 0x3, 0);
+ if (ret != QLA_SUCCESS)
+ ql_log(ql_log_warn, base_vha, 0x121,
+ "Failed to enable receiving of RSCN "
+ "requests: 0x%x.\n", ret);
+ clear_bit(SCR_PENDING, &base_vha->dpc_flags);
+ }
+
if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
"Quiescence mode scheduled.\n");
@@ -4457,6 +4533,21 @@ qla2x00_module_init(void)
return -ENOMEM;
}
+ /* Initialize target kmem_cache and mem_pools */
+ ret = qlt_init();
+ if (ret < 0) {
+ kmem_cache_destroy(srb_cachep);
+ return ret;
+ } else if (ret > 0) {
+ /*
+ * If initiator mode is explictly disabled by qlt_init(),
+ * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
+ * performing scsi_scan_target() during LOOP UP event.
+ */
+ qla2xxx_transport_functions.disable_target_scan = 1;
+ qla2xxx_transport_vport_functions.disable_target_scan = 1;
+ }
+
/* Derive version string. */
strcpy(qla2x00_version_str, QLA2XXX_VERSION);
if (ql2xextended_error_logging)
@@ -4468,6 +4559,7 @@ qla2x00_module_init(void)
kmem_cache_destroy(srb_cachep);
ql_log(ql_log_fatal, NULL, 0x0002,
"fc_attach_transport failed...Failing load!.\n");
+ qlt_exit();
return -ENODEV;
}
@@ -4481,6 +4573,7 @@ qla2x00_module_init(void)
fc_attach_transport(&qla2xxx_transport_vport_functions);
if (!qla2xxx_transport_vport_template) {
kmem_cache_destroy(srb_cachep);
+ qlt_exit();
fc_release_transport(qla2xxx_transport_template);
ql_log(ql_log_fatal, NULL, 0x0004,
"fc_attach_transport vport failed...Failing load!.\n");
@@ -4492,6 +4585,7 @@ qla2x00_module_init(void)
ret = pci_register_driver(&qla2xxx_pci_driver);
if (ret) {
kmem_cache_destroy(srb_cachep);
+ qlt_exit();
fc_release_transport(qla2xxx_transport_template);
fc_release_transport(qla2xxx_transport_vport_template);
ql_log(ql_log_fatal, NULL, 0x0006,
@@ -4511,6 +4605,7 @@ qla2x00_module_exit(void)
pci_unregister_driver(&qla2xxx_pci_driver);
qla2x00_release_firmware();
kmem_cache_destroy(srb_cachep);
+ qlt_exit();
if (ctx_cachep)
kmem_cache_destroy(ctx_cachep);
fc_release_transport(qla2xxx_transport_template);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
new file mode 100644
index 000000000000..04f80ebf09eb
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -0,0 +1,4973 @@
+/*
+ * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
+ *
+ * based on qla2x00t.c code:
+ *
+ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
+ * Copyright (C) 2004 - 2005 Leonid Stoljar
+ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ * Copyright (C) 2006 - 2010 ID7 Ltd.
+ *
+ * Forward port and refactoring to modern qla2xxx and target/configfs
+ *
+ * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2
+ * of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+
+static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
+module_param(qlini_mode, charp, S_IRUGO);
+MODULE_PARM_DESC(qlini_mode,
+ "Determines when initiator mode will be enabled. Possible values: "
+ "\"exclusive\" - initiator mode will be enabled on load, "
+ "disabled on enabling target mode and then on disabling target mode "
+ "enabled back; "
+ "\"disabled\" - initiator mode will never be enabled; "
+ "\"enabled\" (default) - initiator mode will always stay enabled.");
+
+static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+
+/*
+ * From scsi/fc/fc_fcp.h
+ */
+enum fcp_resp_rsp_codes {
+ FCP_TMF_CMPL = 0,
+ FCP_DATA_LEN_INVALID = 1,
+ FCP_CMND_FIELDS_INVALID = 2,
+ FCP_DATA_PARAM_MISMATCH = 3,
+ FCP_TMF_REJECTED = 4,
+ FCP_TMF_FAILED = 5,
+ FCP_TMF_INVALID_LUN = 9,
+};
+
+/*
+ * fc_pri_ta from scsi/fc/fc_fcp.h
+ */
+#define FCP_PTA_SIMPLE 0 /* simple task attribute */
+#define FCP_PTA_HEADQ 1 /* head of queue task attribute */
+#define FCP_PTA_ORDERED 2 /* ordered task attribute */
+#define FCP_PTA_ACA 4 /* auto. contigent allegiance */
+#define FCP_PTA_MASK 7 /* mask for task attribute field */
+#define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
+#define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
+
+/*
+ * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
+ * must be called under HW lock and could unlock/lock it inside.
+ * It isn't an issue, since in the current implementation on the time when
+ * those functions are called:
+ *
+ * - Either context is IRQ and only IRQ handler can modify HW data,
+ * including rings related fields,
+ *
+ * - Or access to target mode variables from struct qla_tgt doesn't
+ * cross those functions boundaries, except tgt_stop, which
+ * additionally protected by irq_cmd_count.
+ */
+/* Predefs for callbacks handed to qla2xxx LLD */
+static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
+ struct atio_from_isp *pkt);
+static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
+static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+ int fn, void *iocb, int flags);
+static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
+ *cmd, struct atio_from_isp *atio, int ha_locked);
+static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
+ struct qla_tgt_srr_imm *imm, int ha_lock);
+/*
+ * Global Variables
+ */
+static struct kmem_cache *qla_tgt_cmd_cachep;
+static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
+static mempool_t *qla_tgt_mgmt_cmd_mempool;
+static struct workqueue_struct *qla_tgt_wq;
+static DEFINE_MUTEX(qla_tgt_mutex);
+static LIST_HEAD(qla_tgt_glist);
+
+/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
+static struct qla_tgt_sess *qlt_find_sess_by_port_name(
+ struct qla_tgt *tgt,
+ const uint8_t *port_name)
+{
+ struct qla_tgt_sess *sess;
+
+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
+ if (!memcmp(sess->port_name, port_name, WWN_SIZE))
+ return sess;
+ }
+
+ return NULL;
+}
+
+/* Might release hw lock, then reaquire!! */
+static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
+{
+ /* Send marker if required */
+ if (unlikely(vha->marker_needed != 0)) {
+ int rc = qla2x00_issue_marker(vha, vha_locked);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe03d,
+ "qla_target(%d): issue_marker() failed\n",
+ vha->vp_idx);
+ }
+ return rc;
+ }
+ return QLA_SUCCESS;
+}
+
+static inline
+struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
+ uint8_t *d_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint8_t vp_idx;
+
+ if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
+ return NULL;
+
+ if (vha->d_id.b.al_pa == d_id[2])
+ return vha;
+
+ BUG_ON(ha->tgt.tgt_vp_map == NULL);
+ vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
+ if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+ return ha->tgt.tgt_vp_map[vp_idx].vha;
+
+ return NULL;
+}
+
+static inline
+struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
+ uint16_t vp_idx)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->vp_idx == vp_idx)
+ return vha;
+
+ BUG_ON(ha->tgt.tgt_vp_map == NULL);
+ if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+ return ha->tgt.tgt_vp_map[vp_idx].vha;
+
+ return NULL;
+}
+
+void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio)
+{
+ switch (atio->u.raw.entry_type) {
+ case ATIO_TYPE7:
+ {
+ struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
+ atio->u.isp24.fcp_hdr.d_id);
+ if (unlikely(NULL == host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe03e,
+ "qla_target(%d): Received ATIO_TYPE7 "
+ "with unknown d_id %x:%x:%x\n", vha->vp_idx,
+ atio->u.isp24.fcp_hdr.d_id[0],
+ atio->u.isp24.fcp_hdr.d_id[1],
+ atio->u.isp24.fcp_hdr.d_id[2]);
+ break;
+ }
+ qlt_24xx_atio_pkt(host, atio);
+ break;
+ }
+
+ case IMMED_NOTIFY_TYPE:
+ {
+ struct scsi_qla_host *host = vha;
+ struct imm_ntfy_from_isp *entry =
+ (struct imm_ntfy_from_isp *)atio;
+
+ if ((entry->u.isp24.vp_index != 0xFF) &&
+ (entry->u.isp24.nport_handle != 0xFFFF)) {
+ host = qlt_find_host_by_vp_idx(vha,
+ entry->u.isp24.vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe03f,
+ "qla_target(%d): Received "
+ "ATIO (IMMED_NOTIFY_TYPE) "
+ "with unknown vp_index %d\n",
+ vha->vp_idx, entry->u.isp24.vp_index);
+ break;
+ }
+ }
+ qlt_24xx_atio_pkt(host, atio);
+ break;
+ }
+
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe040,
+ "qla_target(%d): Received unknown ATIO atio "
+ "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+ break;
+ }
+
+ return;
+}
+
+void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
+{
+ switch (pkt->entry_type) {
+ case CTIO_TYPE7:
+ {
+ struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+ struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ entry->vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe041,
+ "qla_target(%d): Response pkt (CTIO_TYPE7) "
+ "received, with unknown vp_index %d\n",
+ vha->vp_idx, entry->vp_index);
+ break;
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ case IMMED_NOTIFY_TYPE:
+ {
+ struct scsi_qla_host *host = vha;
+ struct imm_ntfy_from_isp *entry =
+ (struct imm_ntfy_from_isp *)pkt;
+
+ host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe042,
+ "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
+ "received, with unknown vp_index %d\n",
+ vha->vp_idx, entry->u.isp24.vp_index);
+ break;
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ case NOTIFY_ACK_TYPE:
+ {
+ struct scsi_qla_host *host = vha;
+ struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+
+ if (0xFF != entry->u.isp24.vp_index) {
+ host = qlt_find_host_by_vp_idx(vha,
+ entry->u.isp24.vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe043,
+ "qla_target(%d): Response "
+ "pkt (NOTIFY_ACK_TYPE) "
+ "received, with unknown "
+ "vp_index %d\n", vha->vp_idx,
+ entry->u.isp24.vp_index);
+ break;
+ }
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ case ABTS_RECV_24XX:
+ {
+ struct abts_recv_from_24xx *entry =
+ (struct abts_recv_from_24xx *)pkt;
+ struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ entry->vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe044,
+ "qla_target(%d): Response pkt "
+ "(ABTS_RECV_24XX) received, with unknown "
+ "vp_index %d\n", vha->vp_idx, entry->vp_index);
+ break;
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ case ABTS_RESP_24XX:
+ {
+ struct abts_resp_to_24xx *entry =
+ (struct abts_resp_to_24xx *)pkt;
+ struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ entry->vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe045,
+ "qla_target(%d): Response pkt "
+ "(ABTS_RECV_24XX) received, with unknown "
+ "vp_index %d\n", vha->vp_idx, entry->vp_index);
+ break;
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ default:
+ qlt_response_pkt(vha, pkt);
+ break;
+ }
+
+}
+
+static void qlt_free_session_done(struct work_struct *work)
+{
+ struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
+ free_work);
+ struct qla_tgt *tgt = sess->tgt;
+ struct scsi_qla_host *vha = sess->vha;
+ struct qla_hw_data *ha = vha->hw;
+
+ BUG_ON(!tgt);
+ /*
+ * Release the target session for FC Nexus from fabric module code.
+ */
+ if (sess->se_sess != NULL)
+ ha->tgt.tgt_ops->free_session(sess);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
+ "Unregistration of sess %p finished\n", sess);
+
+ kfree(sess);
+ /*
+ * We need to protect against race, when tgt is freed before or
+ * inside wake_up()
+ */
+ tgt->sess_count--;
+ if (tgt->sess_count == 0)
+ wake_up_all(&tgt->waitQ);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+void qlt_unreg_sess(struct qla_tgt_sess *sess)
+{
+ struct scsi_qla_host *vha = sess->vha;
+
+ vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
+
+ list_del(&sess->sess_list_entry);
+ if (sess->deleted)
+ list_del(&sess->del_list_entry);
+
+ INIT_WORK(&sess->free_work, qlt_free_session_done);
+ schedule_work(&sess->free_work);
+}
+EXPORT_SYMBOL(qlt_unreg_sess);
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ uint32_t unpacked_lun, lun = 0;
+ uint16_t loop_id;
+ int res = 0;
+ struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
+ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+
+ loop_id = le16_to_cpu(n->u.isp24.nport_handle);
+ if (loop_id == 0xFFFF) {
+#if 0 /* FIXME: Re-enable Global event handling.. */
+ /* Global event */
+ atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
+ qlt_clear_tgt_db(ha->tgt.qla_tgt, 1);
+ if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
+ sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
+ typeof(*sess), sess_list_entry);
+ switch (mcmd) {
+ case QLA_TGT_NEXUS_LOSS_SESS:
+ mcmd = QLA_TGT_NEXUS_LOSS;
+ break;
+ case QLA_TGT_ABORT_ALL_SESS:
+ mcmd = QLA_TGT_ABORT_ALL;
+ break;
+ case QLA_TGT_NEXUS_LOSS:
+ case QLA_TGT_ABORT_ALL:
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe046,
+ "qla_target(%d): Not allowed "
+ "command %x in %s", vha->vp_idx,
+ mcmd, __func__);
+ sess = NULL;
+ break;
+ }
+ } else
+ sess = NULL;
+#endif
+ } else {
+ sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe000,
+ "Using sess for qla_tgt_reset: %p\n", sess);
+ if (!sess) {
+ res = -ESRCH;
+ return res;
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe047,
+ "scsi(%ld): resetting (session %p from port "
+ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
+ "mcmd %x, loop_id %d)\n", vha->host_no, sess,
+ sess->port_name[0], sess->port_name[1],
+ sess->port_name[2], sess->port_name[3],
+ sess->port_name[4], sess->port_name[5],
+ sess->port_name[6], sess->port_name[7],
+ mcmd, loop_id);
+
+ lun = a->u.isp24.fcp_cmnd.lun;
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
+ iocb, QLA24XX_MGMT_SEND_NACK);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
+ bool immediate)
+{
+ struct qla_tgt *tgt = sess->tgt;
+ uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
+
+ if (sess->deleted)
+ return;
+
+ ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
+ "Scheduling sess %p for deletion\n", sess);
+ list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
+ sess->deleted = 1;
+
+ if (immediate)
+ dev_loss_tmo = 0;
+
+ sess->expires = jiffies + dev_loss_tmo * HZ;
+
+ ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
+ "qla_target(%d): session for port %02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
+ "deletion in %u secs (expires: %lu) immed: %d\n",
+ sess->vha->vp_idx,
+ sess->port_name[0], sess->port_name[1],
+ sess->port_name[2], sess->port_name[3],
+ sess->port_name[4], sess->port_name[5],
+ sess->port_name[6], sess->port_name[7],
+ sess->loop_id, dev_loss_tmo, sess->expires, immediate);
+
+ if (immediate)
+ schedule_delayed_work(&tgt->sess_del_work, 0);
+ else
+ schedule_delayed_work(&tgt->sess_del_work,
+ jiffies - sess->expires);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only)
+{
+ struct qla_tgt_sess *sess;
+
+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
+ qlt_schedule_sess_for_deletion(sess, true);
+
+ /* At this point tgt could be already dead */
+}
+
+static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
+ uint16_t *loop_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ dma_addr_t gid_list_dma;
+ struct gid_list_info *gid_list;
+ char *id_iter;
+ int res, rc, i;
+ uint16_t entries;
+
+ gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ &gid_list_dma, GFP_KERNEL);
+ if (!gid_list) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
+ "qla_target(%d): DMA Alloc failed of %u\n",
+ vha->vp_idx, qla2x00_gid_list_size(ha));
+ return -ENOMEM;
+ }
+
+ /* Get list of logged in devices */
+ rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
+ "qla_target(%d): get_id_list() failed: %x\n",
+ vha->vp_idx, rc);
+ res = -1;
+ goto out_free_id_list;
+ }
+
+ id_iter = (char *)gid_list;
+ res = -1;
+ for (i = 0; i < entries; i++) {
+ struct gid_list_info *gid = (struct gid_list_info *)id_iter;
+ if ((gid->al_pa == s_id[2]) &&
+ (gid->area == s_id[1]) &&
+ (gid->domain == s_id[0])) {
+ *loop_id = le16_to_cpu(gid->loop_id);
+ res = 0;
+ break;
+ }
+ id_iter += ha->gid_list_info_size;
+ }
+
+out_free_id_list:
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ gid_list, gid_list_dma);
+ return res;
+}
+
+static bool qlt_check_fcport_exist(struct scsi_qla_host *vha,
+ struct qla_tgt_sess *sess)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_port_24xx_data *pmap24;
+ bool res, found = false;
+ int rc, i;
+ uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */
+ uint16_t entries;
+ void *pmap;
+ int pmap_len;
+ fc_port_t *fcport;
+ int global_resets;
+
+retry:
+ global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
+
+ rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len);
+ if (rc != QLA_SUCCESS) {
+ res = false;
+ goto out;
+ }
+
+ pmap24 = pmap;
+ entries = pmap_len/sizeof(*pmap24);
+
+ for (i = 0; i < entries; ++i) {
+ if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) {
+ loop_id = le16_to_cpu(pmap24[i].loop_id);
+ found = true;
+ break;
+ }
+ }
+
+ kfree(pmap);
+
+ if (!found) {
+ res = false;
+ goto out;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046,
+ "qlt_check_fcport_exist(): loop_id %d", loop_id);
+
+ fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+ if (fcport == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047,
+ "qla_target(%d): Allocation of tmp FC port failed",
+ vha->vp_idx);
+ res = false;
+ goto out;
+ }
+
+ fcport->loop_id = loop_id;
+
+ rc = qla2x00_get_port_database(vha, fcport, 0);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048,
+ "qla_target(%d): Failed to retrieve fcport "
+ "information -- get_port_database() returned %x "
+ "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
+ res = false;
+ goto out_free_fcport;
+ }
+
+ if (global_resets !=
+ atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
+ "qla_target(%d): global reset during session discovery"
+ " (counter was %d, new %d), retrying",
+ vha->vp_idx, global_resets,
+ atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
+ goto retry;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
+ "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, "
+ "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa,
+ sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id);
+
+ sess->s_id = fcport->d_id;
+ sess->loop_id = fcport->loop_id;
+ sess->conf_compl_supported = !!(fcport->flags &
+ FCF_CONF_COMP_SUPPORTED);
+
+ res = true;
+
+out_free_fcport:
+ kfree(fcport);
+
+out:
+ return res;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_undelete_sess(struct qla_tgt_sess *sess)
+{
+ BUG_ON(!sess->deleted);
+
+ list_del(&sess->del_list_entry);
+ sess->deleted = 0;
+}
+
+static void qlt_del_sess_work_fn(struct delayed_work *work)
+{
+ struct qla_tgt *tgt = container_of(work, struct qla_tgt,
+ sess_del_work);
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ while (!list_empty(&tgt->del_sess_list)) {
+ sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
+ del_list_entry);
+ if (time_after_eq(jiffies, sess->expires)) {
+ bool cancel;
+
+ qlt_undelete_sess(sess);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ cancel = qlt_check_fcport_exist(vha, sess);
+
+ if (cancel) {
+ if (sess->deleted) {
+ /*
+ * sess was again deleted while we were
+ * discovering it
+ */
+ spin_lock_irqsave(&ha->hardware_lock,
+ flags);
+ continue;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049,
+ "qla_target(%d): cancel deletion of "
+ "session for port %02x:%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x (loop ID %d), because "
+ " it isn't deleted by firmware",
+ vha->vp_idx, sess->port_name[0],
+ sess->port_name[1], sess->port_name[2],
+ sess->port_name[3], sess->port_name[4],
+ sess->port_name[5], sess->port_name[6],
+ sess->port_name[7], sess->loop_id);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
+ "Timeout: sess %p about to be deleted\n",
+ sess);
+ ha->tgt.tgt_ops->shutdown_sess(sess);
+ ha->tgt.tgt_ops->put_sess(sess);
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ } else {
+ schedule_delayed_work(&tgt->sess_del_work,
+ jiffies - sess->expires);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+/*
+ * Adds an extra ref to allow to drop hw lock after adding sess to the list.
+ * Caller must put it.
+ */
+static struct qla_tgt_sess *qlt_create_sess(
+ struct scsi_qla_host *vha,
+ fc_port_t *fcport,
+ bool local)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ unsigned long flags;
+ unsigned char be_sid[3];
+
+ /* Check to avoid double sessions */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
+ sess_list_entry) {
+ if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
+ "Double sess %p found (s_id %x:%x:%x, "
+ "loop_id %d), updating to d_id %x:%x:%x, "
+ "loop_id %d", sess, sess->s_id.b.domain,
+ sess->s_id.b.al_pa, sess->s_id.b.area,
+ sess->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.al_pa, fcport->d_id.b.area,
+ fcport->loop_id);
+
+ if (sess->deleted)
+ qlt_undelete_sess(sess);
+
+ kref_get(&sess->se_sess->sess_kref);
+ sess->s_id = fcport->d_id;
+ sess->loop_id = fcport->loop_id;
+ sess->conf_compl_supported = !!(fcport->flags &
+ FCF_CONF_COMP_SUPPORTED);
+ if (sess->local && !local)
+ sess->local = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return sess;
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
+ "qla_target(%u): session allocation failed, "
+ "all commands from port %02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x will be refused", vha->vp_idx,
+ fcport->port_name[0], fcport->port_name[1],
+ fcport->port_name[2], fcport->port_name[3],
+ fcport->port_name[4], fcport->port_name[5],
+ fcport->port_name[6], fcport->port_name[7]);
+
+ return NULL;
+ }
+ sess->tgt = ha->tgt.qla_tgt;
+ sess->vha = vha;
+ sess->s_id = fcport->d_id;
+ sess->loop_id = fcport->loop_id;
+ sess->local = local;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
+ "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
+ sess, ha->tgt.qla_tgt);
+
+ be_sid[0] = sess->s_id.b.domain;
+ be_sid[1] = sess->s_id.b.area;
+ be_sid[2] = sess->s_id.b.al_pa;
+ /*
+ * Determine if this fc_port->port_name is allowed to access
+ * target mode using explict NodeACLs+MappedLUNs, or using
+ * TPG demo mode. If this is successful a target mode FC nexus
+ * is created.
+ */
+ if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
+ &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
+ kfree(sess);
+ return NULL;
+ }
+ /*
+ * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
+ * access across ->hardware_lock reaquire.
+ */
+ kref_get(&sess->se_sess->sess_kref);
+
+ sess->conf_compl_supported = !!(fcport->flags &
+ FCF_CONF_COMP_SUPPORTED);
+ BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
+ memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
+ ha->tgt.qla_tgt->sess_count++;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
+ "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed"
+ " completion %ssupported) added\n",
+ vha->vp_idx, local ? "local " : "", fcport->port_name[0],
+ fcport->port_name[1], fcport->port_name[2], fcport->port_name[3],
+ fcport->port_name[4], fcport->port_name[5], fcport->port_name[6],
+ fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain,
+ sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ?
+ "" : "not ");
+
+ return sess;
+}
+
+/*
+ * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
+ */
+void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_sess *sess;
+ unsigned long flags;
+
+ if (!vha->hw->tgt.tgt_ops)
+ return;
+
+ if (!tgt || (fcport->port_type != FCT_INITIATOR))
+ return;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (tgt->tgt_stop) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+ sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+ if (!sess) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ mutex_lock(&ha->tgt.tgt_mutex);
+ sess = qlt_create_sess(vha, fcport, false);
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ } else {
+ kref_get(&sess->se_sess->sess_kref);
+
+ if (sess->deleted) {
+ qlt_undelete_sess(sess);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
+ "qla_target(%u): %ssession for port %02x:"
+ "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
+ "reappeared\n", vha->vp_idx, sess->local ? "local "
+ : "", sess->port_name[0], sess->port_name[1],
+ sess->port_name[2], sess->port_name[3],
+ sess->port_name[4], sess->port_name[5],
+ sess->port_name[6], sess->port_name[7],
+ sess->loop_id);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
+ "Reappeared sess %p\n", sess);
+ }
+ sess->s_id = fcport->d_id;
+ sess->loop_id = fcport->loop_id;
+ sess->conf_compl_supported = !!(fcport->flags &
+ FCF_CONF_COMP_SUPPORTED);
+ }
+
+ if (sess && sess->local) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
+ "qla_target(%u): local session for "
+ "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+ "(loop ID %d) became global\n", vha->vp_idx,
+ fcport->port_name[0], fcport->port_name[1],
+ fcport->port_name[2], fcport->port_name[3],
+ fcport->port_name[4], fcport->port_name[5],
+ fcport->port_name[6], fcport->port_name[7],
+ sess->loop_id);
+ sess->local = 0;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ha->tgt.tgt_ops->put_sess(sess);
+}
+
+void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_sess *sess;
+ unsigned long flags;
+
+ if (!vha->hw->tgt.tgt_ops)
+ return;
+
+ if (!tgt || (fcport->port_type != FCT_INITIATOR))
+ return;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (tgt->tgt_stop) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+ sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+ if (!sess) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
+
+ sess->local = 1;
+ qlt_schedule_sess_for_deletion(sess, false);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static inline int test_tgt_sess_count(struct qla_tgt *tgt)
+{
+ struct qla_hw_data *ha = tgt->ha;
+ unsigned long flags;
+ int res;
+ /*
+ * We need to protect against race, when tgt is freed before or
+ * inside wake_up()
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
+ "tgt %p, empty(sess_list)=%d sess_count=%d\n",
+ tgt, list_empty(&tgt->sess_list), tgt->sess_count);
+ res = (tgt->sess_count == 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return res;
+}
+
+/* Called by tcm_qla2xxx configfs code */
+void qlt_stop_phase1(struct qla_tgt *tgt)
+{
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = tgt->ha;
+ unsigned long flags;
+
+ if (tgt->tgt_stop || tgt->tgt_stopped) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
+ "Already in tgt->tgt_stop or tgt_stopped state\n");
+ dump_stack();
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
+ vha->host_no, vha);
+ /*
+ * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
+ * Lock is needed, because we still can get an incoming packet.
+ */
+ mutex_lock(&ha->tgt.tgt_mutex);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ tgt->tgt_stop = 1;
+ qlt_clear_tgt_db(tgt, true);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ flush_delayed_work_sync(&tgt->sess_del_work);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
+ "Waiting for sess works (tgt %p)", tgt);
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+ while (!list_empty(&tgt->sess_works_list)) {
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+ flush_scheduled_work();
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+ }
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
+ "Waiting for tgt %p: list_empty(sess_list)=%d "
+ "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
+ tgt->sess_count);
+
+ wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+
+ /* Big hammer */
+ if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
+ qlt_disable_vha(vha);
+
+ /* Wait for sessions to clear out (just in case) */
+ wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+}
+EXPORT_SYMBOL(qlt_stop_phase1);
+
+/* Called by tcm_qla2xxx configfs code */
+void qlt_stop_phase2(struct qla_tgt *tgt)
+{
+ struct qla_hw_data *ha = tgt->ha;
+ unsigned long flags;
+
+ if (tgt->tgt_stopped) {
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
+ "Already in tgt->tgt_stopped state\n");
+ dump_stack();
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
+ "Waiting for %d IRQ commands to complete (tgt %p)",
+ tgt->irq_cmd_count, tgt);
+
+ mutex_lock(&ha->tgt.tgt_mutex);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ while (tgt->irq_cmd_count != 0) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ udelay(2);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ }
+ tgt->tgt_stop = 0;
+ tgt->tgt_stopped = 1;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
+ tgt);
+}
+EXPORT_SYMBOL(qlt_stop_phase2);
+
+/* Called from qlt_remove_target() -> qla2x00_remove_one() */
+void qlt_release(struct qla_tgt *tgt)
+{
+ struct qla_hw_data *ha = tgt->ha;
+
+ if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
+ qlt_stop_phase2(tgt);
+
+ ha->tgt.qla_tgt = NULL;
+
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
+ "Release of tgt %p finished\n", tgt);
+
+ kfree(tgt);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
+ const void *param, unsigned int param_size)
+{
+ struct qla_tgt_sess_work_param *prm;
+ unsigned long flags;
+
+ prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
+ if (!prm) {
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
+ "qla_target(%d): Unable to create session "
+ "work, command will be refused", 0);
+ return -ENOMEM;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
+ "Scheduling work (type %d, prm %p)"
+ " to find session for param %p (size %d, tgt %p)\n",
+ type, prm, param, param_size, tgt);
+
+ prm->type = type;
+ memcpy(&prm->tm_iocb, param, param_size);
+
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+ list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+ schedule_work(&tgt->sess_work);
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_send_notify_ack(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *ntfy,
+ uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
+ uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
+{
+ struct qla_hw_data *ha = vha->hw;
+ request_t *pkt;
+ struct nack_to_isp *nack;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
+
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+ return;
+
+ pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ if (!pkt) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe049,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", vha->vp_idx, __func__);
+ return;
+ }
+
+ if (ha->tgt.qla_tgt != NULL)
+ ha->tgt.qla_tgt->notify_ack_expected++;
+
+ pkt->entry_type = NOTIFY_ACK_TYPE;
+ pkt->entry_count = 1;
+
+ nack = (struct nack_to_isp *)pkt;
+ nack->ox_id = ntfy->ox_id;
+
+ nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+ if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+ nack->u.isp24.flags = ntfy->u.isp24.flags &
+ __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ }
+ nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+ nack->u.isp24.status = ntfy->u.isp24.status;
+ nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+ nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+ nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+ nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+ nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
+ nack->u.isp24.srr_reject_code = srr_reject_code;
+ nack->u.isp24.srr_reject_code_expl = srr_explan;
+ nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe005,
+ "qla_target(%d): Sending 24xx Notify Ack %d\n",
+ vha->vp_idx, nack->u.isp24.status);
+
+ qla2x00_start_iocbs(vha, vha->req);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
+ struct abts_recv_from_24xx *abts, uint32_t status,
+ bool ids_reversed)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct abts_resp_to_24xx *resp;
+ uint32_t f_ctl;
+ uint8_t *p;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe006,
+ "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
+ ha, abts, status);
+
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+ return;
+
+ resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+ if (!resp) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04a,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet", vha->vp_idx, __func__);
+ return;
+ }
+
+ resp->entry_type = ABTS_RESP_24XX;
+ resp->entry_count = 1;
+ resp->nport_handle = abts->nport_handle;
+ resp->vp_index = vha->vp_idx;
+ resp->sof_type = abts->sof_type;
+ resp->exchange_address = abts->exchange_address;
+ resp->fcp_hdr_le = abts->fcp_hdr_le;
+ f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+ F_CTL_LAST_SEQ | F_CTL_END_SEQ |
+ F_CTL_SEQ_INITIATIVE);
+ p = (uint8_t *)&f_ctl;
+ resp->fcp_hdr_le.f_ctl[0] = *p++;
+ resp->fcp_hdr_le.f_ctl[1] = *p++;
+ resp->fcp_hdr_le.f_ctl[2] = *p;
+ if (ids_reversed) {
+ resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
+ resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
+ resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
+ resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
+ resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
+ resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
+ } else {
+ resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
+ resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
+ resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
+ resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
+ resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
+ resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
+ }
+ resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
+ if (status == FCP_TMF_CMPL) {
+ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
+ resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
+ resp->payload.ba_acct.low_seq_cnt = 0x0000;
+ resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+ resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
+ resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
+ } else {
+ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
+ resp->payload.ba_rjt.reason_code =
+ BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
+ /* Other bytes are zero */
+ }
+
+ ha->tgt.qla_tgt->abts_resp_expected++;
+
+ qla2x00_start_iocbs(vha, vha->req);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
+ struct abts_resp_from_24xx_fw *entry)
+{
+ struct ctio7_to_24xx *ctio;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe007,
+ "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+ return;
+
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+ if (ctio == NULL) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04b,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", vha->vp_idx, __func__);
+ return;
+ }
+
+ /*
+ * We've got on entrance firmware's response on by us generated
+ * ABTS response. So, in it ID fields are reversed.
+ */
+
+ ctio->entry_type = CTIO_TYPE7;
+ ctio->entry_count = 1;
+ ctio->nport_handle = entry->nport_handle;
+ ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio->vp_index = vha->vp_idx;
+ ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
+ ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
+ ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
+ ctio->exchange_addr = entry->exchange_addr_to_abort;
+ ctio->u.status1.flags =
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_TERMINATE);
+ ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
+
+ qla2x00_start_iocbs(vha, vha->req);
+
+ qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
+ FCP_TMF_CMPL, true);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+ struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_mgmt_cmd *mcmd;
+ int rc;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
+ "qla_target(%d): task abort (tag=%d)\n",
+ vha->vp_idx, abts->exchange_addr_to_abort);
+
+ mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+ if (mcmd == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
+ "qla_target(%d): %s: Allocation of ABORT cmd failed",
+ vha->vp_idx, __func__);
+ return -ENOMEM;
+ }
+ memset(mcmd, 0, sizeof(*mcmd));
+
+ mcmd->sess = sess;
+ memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
+
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
+ abts->exchange_addr_to_abort);
+ if (rc != 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
+ "qla_target(%d): tgt_ops->handle_tmr()"
+ " failed: %d", vha->vp_idx, rc);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+ struct abts_recv_from_24xx *abts)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ uint32_t tag = abts->exchange_addr_to_abort;
+ uint8_t s_id[3];
+ int rc;
+
+ if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
+ "qla_target(%d): ABTS: Abort Sequence not "
+ "supported\n", vha->vp_idx);
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ return;
+ }
+
+ if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
+ "qla_target(%d): ABTS: Unknown Exchange "
+ "Address received\n", vha->vp_idx);
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
+ "qla_target(%d): task abort (s_id=%x:%x:%x, "
+ "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
+ abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
+ le32_to_cpu(abts->fcp_hdr_le.parameter));
+
+ s_id[0] = abts->fcp_hdr_le.s_id[2];
+ s_id[1] = abts->fcp_hdr_le.s_id[1];
+ s_id[2] = abts->fcp_hdr_le.s_id[0];
+
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+ if (!sess) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
+ "qla_target(%d): task abort for non-existant session\n",
+ vha->vp_idx);
+ rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
+ QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
+ if (rc != 0) {
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
+ false);
+ }
+ return;
+ }
+
+ rc = __qlt_24xx_handle_abts(vha, abts, sess);
+ if (rc != 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
+ "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
+ vha->vp_idx, rc);
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ return;
+ }
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
+ struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
+{
+ struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
+ struct ctio7_to_24xx *ctio;
+
+ ql_dbg(ql_dbg_tgt, ha, 0xe008,
+ "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
+ ha, atio, resp_code);
+
+ /* Send marker if required */
+ if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
+ return;
+
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
+ if (ctio == NULL) {
+ ql_dbg(ql_dbg_tgt, ha, 0xe04c,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", ha->vp_idx, __func__);
+ return;
+ }
+
+ ctio->entry_type = CTIO_TYPE7;
+ ctio->entry_count = 1;
+ ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ ctio->nport_handle = mcmd->sess->loop_id;
+ ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio->vp_index = ha->vp_idx;
+ ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio->exchange_addr = atio->u.isp24.exchange_addr;
+ ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_SEND_STATUS);
+ ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ ctio->u.status1.scsi_status =
+ __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
+ ctio->u.status1.response_len = __constant_cpu_to_le16(8);
+ ((uint32_t *)ctio->u.status1.sense_data)[0] = cpu_to_be32(resp_code);
+
+ qla2x00_start_iocbs(ha, ha->req);
+}
+
+void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+{
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+}
+EXPORT_SYMBOL(qlt_free_mcmd);
+
+/* callback from target fabric module code */
+void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
+{
+ struct scsi_qla_host *vha = mcmd->sess->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
+ "TM response mcmd (%p) status %#x state %#x",
+ mcmd, mcmd->fc_tm_rsp, mcmd->flags);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
+ qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
+ 0, 0, 0, 0, 0, 0);
+ else {
+ if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
+ qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
+ mcmd->fc_tm_rsp, false);
+ else
+ qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
+ mcmd->fc_tm_rsp);
+ }
+ /*
+ * Make the callback for ->free_mcmd() to queue_work() and invoke
+ * target_put_sess_cmd() to drop cmd_kref to 1. The final
+ * target_put_sess_cmd() call will be made from TFO->check_stop_free()
+ * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
+ * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
+ * qlt_xmit_tm_rsp() returns here..
+ */
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+EXPORT_SYMBOL(qlt_xmit_tm_rsp);
+
+/* No locks */
+static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
+{
+ struct qla_tgt_cmd *cmd = prm->cmd;
+
+ BUG_ON(cmd->sg_cnt == 0);
+
+ prm->sg = (struct scatterlist *)cmd->sg;
+ prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
+ cmd->sg_cnt, cmd->dma_data_direction);
+ if (unlikely(prm->seg_cnt == 0))
+ goto out_err;
+
+ prm->cmd->sg_mapped = 1;
+
+ /*
+ * If greater than four sg entries then we need to allocate
+ * the continuation entries
+ */
+ if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
+ prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
+ prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
+
+ ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
+ prm->seg_cnt, prm->req_cnt);
+ return 0;
+
+out_err:
+ ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
+ "qla_target(%d): PCI mapping failed: sg_cnt=%d",
+ 0, prm->cmd->sg_cnt);
+ return -1;
+}
+
+static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ BUG_ON(!cmd->sg_mapped);
+ pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+ cmd->sg_mapped = 0;
+}
+
+static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
+ uint32_t req_cnt)
+{
+ struct qla_hw_data *ha = vha->hw;
+ device_reg_t __iomem *reg = ha->iobase;
+ uint32_t cnt;
+
+ if (vha->req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe00a,
+ "Request ring circled: cnt=%d, vha->->ring_index=%d, "
+ "vha->req->cnt=%d, req_cnt=%d\n", cnt,
+ vha->req->ring_index, vha->req->cnt, req_cnt);
+ if (vha->req->ring_index < cnt)
+ vha->req->cnt = cnt - vha->req->ring_index;
+ else
+ vha->req->cnt = vha->req->length -
+ (vha->req->ring_index - cnt);
+ }
+
+ if (unlikely(vha->req->cnt < (req_cnt + 2))) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe00b,
+ "qla_target(%d): There is no room in the "
+ "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
+ "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
+ vha->req->cnt, req_cnt);
+ return -EAGAIN;
+ }
+ vha->req->cnt -= req_cnt;
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
+{
+ /* Adjust ring index. */
+ vha->req->ring_index++;
+ if (vha->req->ring_index == vha->req->length) {
+ vha->req->ring_index = 0;
+ vha->req->ring_ptr = vha->req->ring;
+ } else {
+ vha->req->ring_ptr++;
+ }
+ return (cont_entry_t *)vha->req->ring_ptr;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t h;
+
+ h = ha->tgt.current_handle;
+ /* always increment cmd handle */
+ do {
+ ++h;
+ if (h > MAX_OUTSTANDING_COMMANDS)
+ h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
+ if (h == ha->tgt.current_handle) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04e,
+ "qla_target(%d): Ran out of "
+ "empty cmd slots in ha %p\n", vha->vp_idx, ha);
+ h = QLA_TGT_NULL_HANDLE;
+ break;
+ }
+ } while ((h == QLA_TGT_NULL_HANDLE) ||
+ (h == QLA_TGT_SKIP_HANDLE) ||
+ (ha->tgt.cmds[h-1] != NULL));
+
+ if (h != QLA_TGT_NULL_HANDLE)
+ ha->tgt.current_handle = h;
+
+ return h;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
+ struct scsi_qla_host *vha)
+{
+ uint32_t h;
+ struct ctio7_to_24xx *pkt;
+ struct qla_hw_data *ha = vha->hw;
+ struct atio_from_isp *atio = &prm->cmd->atio;
+
+ pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
+ prm->pkt = pkt;
+ memset(pkt, 0, sizeof(*pkt));
+
+ pkt->entry_type = CTIO_TYPE7;
+ pkt->entry_count = (uint8_t)prm->req_cnt;
+ pkt->vp_index = vha->vp_idx;
+
+ h = qlt_make_handle(vha);
+ if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+ /*
+ * CTIO type 7 from the firmware doesn't provide a way to
+ * know the initiator's LOOP ID, hence we can't find
+ * the session and, so, the command.
+ */
+ return -EAGAIN;
+ } else
+ ha->tgt.cmds[h-1] = prm->cmd;
+
+ pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
+ pkt->nport_handle = prm->cmd->loop_id;
+ pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ pkt->exchange_addr = atio->u.isp24.exchange_addr;
+ pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
+ pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe00c,
+ "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
+ vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT,
+ le16_to_cpu(pkt->u.status0.ox_id));
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
+ struct scsi_qla_host *vha)
+{
+ int cnt;
+ uint32_t *dword_ptr;
+ int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
+
+ /* Build continuation packets */
+ while (prm->seg_cnt > 0) {
+ cont_a64_entry_t *cont_pkt64 =
+ (cont_a64_entry_t *)qlt_get_req_pkt(vha);
+
+ /*
+ * Make sure that from cont_pkt64 none of
+ * 64-bit specific fields used for 32-bit
+ * addressing. Cast to (cont_entry_t *) for
+ * that.
+ */
+
+ memset(cont_pkt64, 0, sizeof(*cont_pkt64));
+
+ cont_pkt64->entry_count = 1;
+ cont_pkt64->sys_define = 0;
+
+ if (enable_64bit_addressing) {
+ cont_pkt64->entry_type = CONTINUE_A64_TYPE;
+ dword_ptr =
+ (uint32_t *)&cont_pkt64->dseg_0_address;
+ } else {
+ cont_pkt64->entry_type = CONTINUE_TYPE;
+ dword_ptr =
+ (uint32_t *)&((cont_entry_t *)
+ cont_pkt64)->dseg_0_address;
+ }
+
+ /* Load continuation entry data segments */
+ for (cnt = 0;
+ cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
+ cnt++, prm->seg_cnt--) {
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_lo32
+ (sg_dma_address(prm->sg)));
+ if (enable_64bit_addressing) {
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_hi32
+ (sg_dma_address
+ (prm->sg)));
+ }
+ *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe00d,
+ "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
+ (long long unsigned int)
+ pci_dma_hi32(sg_dma_address(prm->sg)),
+ (long long unsigned int)
+ pci_dma_lo32(sg_dma_address(prm->sg)),
+ (int)sg_dma_len(prm->sg));
+
+ prm->sg = sg_next(prm->sg);
+ }
+ }
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void qlt_load_data_segments(struct qla_tgt_prm *prm,
+ struct scsi_qla_host *vha)
+{
+ int cnt;
+ uint32_t *dword_ptr;
+ int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
+ struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe00e,
+ "iocb->scsi_status=%x, iocb->flags=%x\n",
+ le16_to_cpu(pkt24->u.status0.scsi_status),
+ le16_to_cpu(pkt24->u.status0.flags));
+
+ pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
+
+ /* Setup packet address segment pointer */
+ dword_ptr = pkt24->u.status0.dseg_0_address;
+
+ /* Set total data segment count */
+ if (prm->seg_cnt)
+ pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
+
+ if (prm->seg_cnt == 0) {
+ /* No data transfer */
+ *dword_ptr++ = 0;
+ *dword_ptr = 0;
+ return;
+ }
+
+ /* If scatter gather */
+ ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
+
+ /* Load command entry data segments */
+ for (cnt = 0;
+ (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
+ cnt++, prm->seg_cnt--) {
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
+ if (enable_64bit_addressing) {
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_hi32(
+ sg_dma_address(prm->sg)));
+ }
+ *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe010,
+ "S/G Segment phys_addr=%llx:%llx, len=%d\n",
+ (long long unsigned int)pci_dma_hi32(sg_dma_address(
+ prm->sg)),
+ (long long unsigned int)pci_dma_lo32(sg_dma_address(
+ prm->sg)),
+ (int)sg_dma_len(prm->sg));
+
+ prm->sg = sg_next(prm->sg);
+ }
+
+ qlt_load_cont_data_segments(prm, vha);
+}
+
+static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
+{
+ return cmd->bufflen > 0;
+}
+
+/*
+ * Called without ha->hardware_lock held
+ */
+static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
+ struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
+ uint32_t *full_req_cnt)
+{
+ struct qla_tgt *tgt = cmd->tgt;
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ if (unlikely(cmd->aborted)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+ "qla_target(%d): terminating exchange "
+ "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
+ se_cmd, cmd->tag);
+
+ cmd->state = QLA_TGT_STATE_ABORTED;
+
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+
+ /* !! At this point cmd could be already freed !! */
+ return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
+ vha->vp_idx, cmd->tag);
+
+ prm->cmd = cmd;
+ prm->tgt = tgt;
+ prm->rq_result = scsi_status;
+ prm->sense_buffer = &cmd->sense_buffer[0];
+ prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
+ prm->sg = NULL;
+ prm->seg_cnt = -1;
+ prm->req_cnt = 1;
+ prm->add_status_pkt = 0;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
+ prm->rq_result, xmit_type);
+
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
+ return -EFAULT;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
+
+ if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
+ if (qlt_pci_map_calc_cnt(prm) != 0)
+ return -EAGAIN;
+ }
+
+ *full_req_cnt = prm->req_cnt;
+
+ if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ prm->residual = se_cmd->residual_count;
+ ql_dbg(ql_dbg_tgt, vha, 0xe014,
+ "Residual underflow: %d (tag %d, "
+ "op %x, bufflen %d, rq_result %x)\n", prm->residual,
+ cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+ cmd->bufflen, prm->rq_result);
+ prm->rq_result |= SS_RESIDUAL_UNDER;
+ } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ prm->residual = se_cmd->residual_count;
+ ql_dbg(ql_dbg_tgt, vha, 0xe015,
+ "Residual overflow: %d (tag %d, "
+ "op %x, bufflen %d, rq_result %x)\n", prm->residual,
+ cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+ cmd->bufflen, prm->rq_result);
+ prm->rq_result |= SS_RESIDUAL_OVER;
+ }
+
+ if (xmit_type & QLA_TGT_XMIT_STATUS) {
+ /*
+ * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
+ * ignored in *xmit_response() below
+ */
+ if (qlt_has_data(cmd)) {
+ if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
+ (IS_FWI2_CAPABLE(ha) &&
+ (prm->rq_result != 0))) {
+ prm->add_status_pkt = 1;
+ (*full_req_cnt)++;
+ }
+ }
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe016,
+ "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
+ prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
+
+ return 0;
+}
+
+static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
+ struct qla_tgt_cmd *cmd, int sending_sense)
+{
+ if (ha->tgt.enable_class_2)
+ return 0;
+
+ if (sending_sense)
+ return cmd->conf_compl_supported;
+ else
+ return ha->tgt.enable_explicit_conf &&
+ cmd->conf_compl_supported;
+}
+
+#ifdef CONFIG_QLA_TGT_DEBUG_SRR
+/*
+ * Original taken from the XFS code
+ */
+static unsigned long qlt_srr_random(void)
+{
+ static int Inited;
+ static unsigned long RandomValue;
+ static DEFINE_SPINLOCK(lock);
+ /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
+ register long rv;
+ register long lo;
+ register long hi;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lock, flags);
+ if (!Inited) {
+ RandomValue = jiffies;
+ Inited = 1;
+ }
+ rv = RandomValue;
+ hi = rv / 127773;
+ lo = rv % 127773;
+ rv = 16807 * lo - 2836 * hi;
+ if (rv <= 0)
+ rv += 2147483647;
+ RandomValue = rv;
+ spin_unlock_irqrestore(&lock, flags);
+ return rv;
+}
+
+static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
+{
+#if 0 /* This is not a real status packets lost, so it won't lead to SRR */
+ if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
+ == 50) {
+ *xmit_type &= ~QLA_TGT_XMIT_STATUS;
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
+ "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
+ }
+#endif
+ /*
+ * It's currently not possible to simulate SRRs for FCP_WRITE without
+ * a physical link layer failure, so don't even try here..
+ */
+ if (cmd->dma_data_direction != DMA_FROM_DEVICE)
+ return;
+
+ if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
+ ((qlt_srr_random() % 100) == 20)) {
+ int i, leave = 0;
+ unsigned int tot_len = 0;
+
+ while (leave == 0)
+ leave = qlt_srr_random() % cmd->sg_cnt;
+
+ for (i = 0; i < leave; i++)
+ tot_len += cmd->sg[i].length;
+
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
+ "Cutting cmd %p (tag %d) buffer"
+ " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
+ " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
+ cmd->bufflen, cmd->sg_cnt);
+
+ cmd->bufflen = tot_len;
+ cmd->sg_cnt = leave;
+ }
+
+ if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
+ unsigned int offset = qlt_srr_random() % cmd->bufflen;
+
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
+ "Cutting cmd %p (tag %d) buffer head "
+ "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
+ cmd->bufflen);
+ if (offset == 0)
+ *xmit_type &= ~QLA_TGT_XMIT_DATA;
+ else if (qlt_set_data_offset(cmd, offset)) {
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
+ "qlt_set_data_offset() failed (tag %d)", cmd->tag);
+ }
+ }
+}
+#else
+static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
+{}
+#endif
+
+static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
+ struct qla_tgt_prm *prm)
+{
+ prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
+ (uint32_t)sizeof(ctio->u.status1.sense_data));
+ ctio->u.status0.flags |=
+ __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
+ if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
+ ctio->u.status0.flags |= __constant_cpu_to_le16(
+ CTIO7_FLAGS_EXPLICIT_CONFORM |
+ CTIO7_FLAGS_CONFORM_REQ);
+ }
+ ctio->u.status0.residual = cpu_to_le32(prm->residual);
+ ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
+ if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
+ int i;
+
+ if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
+ if (prm->cmd->se_cmd.scsi_status != 0) {
+ ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
+ "Skipping EXPLICIT_CONFORM and "
+ "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
+ "non GOOD status\n");
+ goto skip_explict_conf;
+ }
+ ctio->u.status1.flags |= __constant_cpu_to_le16(
+ CTIO7_FLAGS_EXPLICIT_CONFORM |
+ CTIO7_FLAGS_CONFORM_REQ);
+ }
+skip_explict_conf:
+ ctio->u.status1.flags &=
+ ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+ ctio->u.status1.flags |=
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+ ctio->u.status1.scsi_status |=
+ __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
+ ctio->u.status1.sense_length =
+ cpu_to_le16(prm->sense_buffer_len);
+ for (i = 0; i < prm->sense_buffer_len/4; i++)
+ ((uint32_t *)ctio->u.status1.sense_data)[i] =
+ cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
+#if 0
+ if (unlikely((prm->sense_buffer_len % 4) != 0)) {
+ static int q;
+ if (q < 10) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04f,
+ "qla_target(%d): %d bytes of sense "
+ "lost", prm->tgt->ha->vp_idx,
+ prm->sense_buffer_len % 4);
+ q++;
+ }
+ }
+#endif
+ } else {
+ ctio->u.status1.flags &=
+ ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+ ctio->u.status1.flags |=
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+ ctio->u.status1.sense_length = 0;
+ memset(ctio->u.status1.sense_data, 0,
+ sizeof(ctio->u.status1.sense_data));
+ }
+
+ /* Sense with len > 24, is it possible ??? */
+}
+
+/*
+ * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
+ * QLA_TGT_XMIT_STATUS for >= 24xx silicon
+ */
+int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
+ uint8_t scsi_status)
+{
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct ctio7_to_24xx *pkt;
+ struct qla_tgt_prm prm;
+ uint32_t full_req_cnt = 0;
+ unsigned long flags = 0;
+ int res;
+
+ memset(&prm, 0, sizeof(prm));
+ qlt_check_srr_debug(cmd, &xmit_type);
+
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
+ "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
+ "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
+ 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
+
+ res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
+ &full_req_cnt);
+ if (unlikely(res != 0)) {
+ if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
+ return 0;
+
+ return res;
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Does F/W have an IOCBs for this request */
+ res = qlt_check_reserve_free_req(vha, full_req_cnt);
+ if (unlikely(res))
+ goto out_unmap_unlock;
+
+ res = qlt_24xx_build_ctio_pkt(&prm, vha);
+ if (unlikely(res != 0))
+ goto out_unmap_unlock;
+
+
+ pkt = (struct ctio7_to_24xx *)prm.pkt;
+
+ if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
+ pkt->u.status0.flags |=
+ __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
+ CTIO7_FLAGS_STATUS_MODE_0);
+
+ qlt_load_data_segments(&prm, vha);
+
+ if (prm.add_status_pkt == 0) {
+ if (xmit_type & QLA_TGT_XMIT_STATUS) {
+ pkt->u.status0.scsi_status =
+ cpu_to_le16(prm.rq_result);
+ pkt->u.status0.residual =
+ cpu_to_le32(prm.residual);
+ pkt->u.status0.flags |= __constant_cpu_to_le16(
+ CTIO7_FLAGS_SEND_STATUS);
+ if (qlt_need_explicit_conf(ha, cmd, 0)) {
+ pkt->u.status0.flags |=
+ __constant_cpu_to_le16(
+ CTIO7_FLAGS_EXPLICIT_CONFORM |
+ CTIO7_FLAGS_CONFORM_REQ);
+ }
+ }
+
+ } else {
+ /*
+ * We have already made sure that there is sufficient
+ * amount of request entries to not drop HW lock in
+ * req_pkt().
+ */
+ struct ctio7_to_24xx *ctio =
+ (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe019,
+ "Building additional status packet\n");
+
+ memcpy(ctio, pkt, sizeof(*ctio));
+ ctio->entry_count = 1;
+ ctio->dseg_count = 0;
+ ctio->u.status1.flags &= ~__constant_cpu_to_le16(
+ CTIO7_FLAGS_DATA_IN);
+
+ /* Real finish is ctio_m1's finish */
+ pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
+ pkt->u.status0.flags |= __constant_cpu_to_le16(
+ CTIO7_FLAGS_DONT_RET_CTIO);
+ qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
+ &prm);
+ pr_debug("Status CTIO7: %p\n", ctio);
+ }
+ } else
+ qlt_24xx_init_ctio_to_isp(pkt, &prm);
+
+
+ cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe01a,
+ "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
+ pkt, scsi_status);
+
+ qla2x00_start_iocbs(vha, vha->req);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return 0;
+
+out_unmap_unlock:
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return res;
+}
+EXPORT_SYMBOL(qlt_xmit_response);
+
+int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
+{
+ struct ctio7_to_24xx *pkt;
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = cmd->tgt;
+ struct qla_tgt_prm prm;
+ unsigned long flags;
+ int res = 0;
+
+ memset(&prm, 0, sizeof(prm));
+ prm.cmd = cmd;
+ prm.tgt = tgt;
+ prm.sg = NULL;
+ prm.req_cnt = 1;
+
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
+ return -EIO;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
+ (int)vha->vp_idx);
+
+ /* Calculate number of entries and segments required */
+ if (qlt_pci_map_calc_cnt(&prm) != 0)
+ return -EAGAIN;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Does F/W have an IOCBs for this request */
+ res = qlt_check_reserve_free_req(vha, prm.req_cnt);
+ if (res != 0)
+ goto out_unlock_free_unmap;
+
+ res = qlt_24xx_build_ctio_pkt(&prm, vha);
+ if (unlikely(res != 0))
+ goto out_unlock_free_unmap;
+ pkt = (struct ctio7_to_24xx *)prm.pkt;
+ pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
+ CTIO7_FLAGS_STATUS_MODE_0);
+ qlt_load_data_segments(&prm, vha);
+
+ cmd->state = QLA_TGT_STATE_NEED_DATA;
+
+ qla2x00_start_iocbs(vha, vha->req);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return res;
+
+out_unlock_free_unmap:
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return res;
+}
+EXPORT_SYMBOL(qlt_rdy_to_xfer);
+
+/* If hardware_lock held on entry, might drop it, then reaquire */
+/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
+static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd,
+ struct atio_from_isp *atio)
+{
+ struct ctio7_to_24xx *ctio24;
+ struct qla_hw_data *ha = vha->hw;
+ request_t *pkt;
+ int ret = 0;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
+
+ pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ if (pkt == NULL) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe050,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", vha->vp_idx, __func__);
+ return -ENOMEM;
+ }
+
+ if (cmd != NULL) {
+ if (cmd->state < QLA_TGT_STATE_PROCESSED) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe051,
+ "qla_target(%d): Terminating cmd %p with "
+ "incorrect state %d\n", vha->vp_idx, cmd,
+ cmd->state);
+ } else
+ ret = 1;
+ }
+
+ pkt->entry_count = 1;
+ pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+ ctio24 = (struct ctio7_to_24xx *)pkt;
+ ctio24->entry_type = CTIO_TYPE7;
+ ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
+ ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio24->vp_index = vha->vp_idx;
+ ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio24->exchange_addr = atio->u.isp24.exchange_addr;
+ ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_TERMINATE);
+ ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+
+ /* Most likely, it isn't needed */
+ ctio24->u.status1.residual = get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[
+ atio->u.isp24.fcp_cmnd.add_cdb_len]);
+ if (ctio24->u.status1.residual != 0)
+ ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+
+ qla2x00_start_iocbs(vha, vha->req);
+ return ret;
+}
+
+static void qlt_send_term_exchange(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
+{
+ unsigned long flags;
+ int rc;
+
+ if (qlt_issue_marker(vha, ha_locked) < 0)
+ return;
+
+ if (ha_locked) {
+ rc = __qlt_send_term_exchange(vha, cmd, atio);
+ goto done;
+ }
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ rc = __qlt_send_term_exchange(vha, cmd, atio);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+done:
+ if (rc == 1) {
+ if (!ha_locked && !in_interrupt())
+ msleep(250); /* just in case */
+
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ }
+}
+
+void qlt_free_cmd(struct qla_tgt_cmd *cmd)
+{
+ BUG_ON(cmd->sg_mapped);
+
+ if (unlikely(cmd->free_sg))
+ kfree(cmd->sg);
+ kmem_cache_free(qla_tgt_cmd_cachep, cmd);
+}
+EXPORT_SYMBOL(qlt_free_cmd);
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd, void *ctio)
+{
+ struct qla_tgt_srr_ctio *sc;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_srr_imm *imm;
+
+ tgt->ctio_srr_id++;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
+ "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
+
+ if (!ctio) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
+ "qla_target(%d): SRR CTIO, but ctio is NULL\n",
+ vha->vp_idx);
+ return -EINVAL;
+ }
+
+ sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
+ if (sc != NULL) {
+ sc->cmd = cmd;
+ /* IRQ is already OFF */
+ spin_lock(&tgt->srr_lock);
+ sc->srr_id = tgt->ctio_srr_id;
+ list_add_tail(&sc->srr_list_entry,
+ &tgt->srr_ctio_list);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
+ "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
+ if (tgt->imm_srr_id == tgt->ctio_srr_id) {
+ int found = 0;
+ list_for_each_entry(imm, &tgt->srr_imm_list,
+ srr_list_entry) {
+ if (imm->srr_id == sc->srr_id) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
+ "Scheduling srr work\n");
+ schedule_work(&tgt->srr_work);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
+ "qla_target(%d): imm_srr_id "
+ "== ctio_srr_id (%d), but there is no "
+ "corresponding SRR IMM, deleting CTIO "
+ "SRR %p\n", vha->vp_idx,
+ tgt->ctio_srr_id, sc);
+ list_del(&sc->srr_list_entry);
+ spin_unlock(&tgt->srr_lock);
+
+ kfree(sc);
+ return -EINVAL;
+ }
+ }
+ spin_unlock(&tgt->srr_lock);
+ } else {
+ struct qla_tgt_srr_imm *ti;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
+ "qla_target(%d): Unable to allocate SRR CTIO entry\n",
+ vha->vp_idx);
+ spin_lock(&tgt->srr_lock);
+ list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
+ srr_list_entry) {
+ if (imm->srr_id == tgt->ctio_srr_id) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
+ "IMM SRR %p deleted (id %d)\n",
+ imm, imm->srr_id);
+ list_del(&imm->srr_list_entry);
+ qlt_reject_free_srr_imm(vha, imm, 1);
+ }
+ }
+ spin_unlock(&tgt->srr_lock);
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
+ struct qla_tgt_cmd *cmd, uint32_t status)
+{
+ int term = 0;
+
+ if (ctio != NULL) {
+ struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
+ term = !(c->flags &
+ __constant_cpu_to_le16(OF_TERM_EXCH));
+ } else
+ term = 1;
+
+ if (term)
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+
+ return term;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
+ uint32_t handle)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ handle--;
+ if (ha->tgt.cmds[handle] != NULL) {
+ struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
+ ha->tgt.cmds[handle] = NULL;
+ return cmd;
+ } else
+ return NULL;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
+ uint32_t handle, void *ctio)
+{
+ struct qla_tgt_cmd *cmd = NULL;
+
+ /* Clear out internal marks */
+ handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
+ CTIO_INTERMEDIATE_HANDLE_MARK);
+
+ if (handle != QLA_TGT_NULL_HANDLE) {
+ if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
+ "SKIP_HANDLE CTIO\n");
+ return NULL;
+ }
+ /* handle-1 is actually used */
+ if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe052,
+ "qla_target(%d): Wrong handle %x received\n",
+ vha->vp_idx, handle);
+ return NULL;
+ }
+ cmd = qlt_get_cmd(vha, handle);
+ if (unlikely(cmd == NULL)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe053,
+ "qla_target(%d): Suspicious: unable to "
+ "find the command with handle %x\n", vha->vp_idx,
+ handle);
+ return NULL;
+ }
+ } else if (ctio != NULL) {
+ /* We can't get loop ID from CTIO7 */
+ ql_dbg(ql_dbg_tgt, vha, 0xe054,
+ "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
+ "support NULL handles\n", vha->vp_idx);
+ return NULL;
+ }
+
+ return cmd;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
+ uint32_t status, void *ctio)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct se_cmd *se_cmd;
+ struct target_core_fabric_ops *tfo;
+ struct qla_tgt_cmd *cmd;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe01e,
+ "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
+ vha->vp_idx, ctio, status, handle);
+
+ if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
+ /* That could happen only in case of an error/reset/abort */
+ if (status != CTIO_SUCCESS) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
+ "Intermediate CTIO received"
+ " (status %x)\n", status);
+ }
+ return;
+ }
+
+ cmd = qlt_ctio_to_cmd(vha, handle, ctio);
+ if (cmd == NULL) {
+ if (status != CTIO_SUCCESS)
+ qlt_term_ctio_exchange(vha, ctio, NULL, status);
+ return;
+ }
+ se_cmd = &cmd->se_cmd;
+ tfo = se_cmd->se_tfo;
+
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+
+ if (unlikely(status != CTIO_SUCCESS)) {
+ switch (status & 0xFFFF) {
+ case CTIO_LIP_RESET:
+ case CTIO_TARGET_RESET:
+ case CTIO_ABORTED:
+ case CTIO_TIMEOUT:
+ case CTIO_INVALID_RX_ID:
+ /* They are OK */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
+ "qla_target(%d): CTIO with "
+ "status %#x received, state %x, se_cmd %p, "
+ "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
+ "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
+ status, cmd->state, se_cmd);
+ break;
+
+ case CTIO_PORT_LOGGED_OUT:
+ case CTIO_PORT_UNAVAILABLE:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
+ "qla_target(%d): CTIO with PORT LOGGED "
+ "OUT (29) or PORT UNAVAILABLE (28) status %x "
+ "received (state %x, se_cmd %p)\n", vha->vp_idx,
+ status, cmd->state, se_cmd);
+ break;
+
+ case CTIO_SRR_RECEIVED:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
+ "qla_target(%d): CTIO with SRR_RECEIVED"
+ " status %x received (state %x, se_cmd %p)\n",
+ vha->vp_idx, status, cmd->state, se_cmd);
+ if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
+ break;
+ else
+ return;
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
+ "qla_target(%d): CTIO with error status "
+ "0x%x received (state %x, se_cmd %p\n",
+ vha->vp_idx, status, cmd->state, se_cmd);
+ break;
+ }
+
+ if (cmd->state != QLA_TGT_STATE_NEED_DATA)
+ if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
+ return;
+ }
+
+ if (cmd->state == QLA_TGT_STATE_PROCESSED) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
+ } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ int rx_status = 0;
+
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+
+ if (unlikely(status != CTIO_SUCCESS))
+ rx_status = -EIO;
+ else
+ cmd->write_data_transferred = 1;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe020,
+ "Data received, context %x, rx_status %d\n",
+ 0x0, rx_status);
+
+ ha->tgt.tgt_ops->handle_data(cmd);
+ return;
+ } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
+ "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
+ "qla_target(%d): A command in state (%d) should "
+ "not return a CTIO complete\n", vha->vp_idx, cmd->state);
+ }
+
+ if (unlikely(status != CTIO_SUCCESS)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
+ dump_stack();
+ }
+
+ ha->tgt.tgt_ops->free_cmd(cmd);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+
+ if (likely(tgt == NULL)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe021,
+ "CTIO, but target mode not enabled"
+ " (ha %d %p handle %#x)", vha->vp_idx, ha, handle);
+ return;
+ }
+
+ tgt->irq_cmd_count++;
+ qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL);
+ tgt->irq_cmd_count--;
+}
+
+static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
+ uint8_t task_codes)
+{
+ int fcp_task_attr;
+
+ switch (task_codes) {
+ case ATIO_SIMPLE_QUEUE:
+ fcp_task_attr = MSG_SIMPLE_TAG;
+ break;
+ case ATIO_HEAD_OF_QUEUE:
+ fcp_task_attr = MSG_HEAD_TAG;
+ break;
+ case ATIO_ORDERED_QUEUE:
+ fcp_task_attr = MSG_ORDERED_TAG;
+ break;
+ case ATIO_ACA_QUEUE:
+ fcp_task_attr = MSG_ACA_TAG;
+ break;
+ case ATIO_UNTAGGED:
+ fcp_task_attr = MSG_SIMPLE_TAG;
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
+ "qla_target: unknown task code %x, use ORDERED instead\n",
+ task_codes);
+ fcp_task_attr = MSG_ORDERED_TAG;
+ break;
+ }
+
+ return fcp_task_attr;
+}
+
+static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
+ uint8_t *);
+/*
+ * Process context for I/O path into tcm_qla2xxx code
+ */
+static void qlt_do_work(struct work_struct *work)
+{
+ struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ scsi_qla_host_t *vha = cmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_sess *sess = NULL;
+ struct atio_from_isp *atio = &cmd->atio;
+ unsigned char *cdb;
+ unsigned long flags;
+ uint32_t data_length;
+ int ret, fcp_task_attr, data_dir, bidi = 0;
+
+ if (tgt->tgt_stop)
+ goto out_term;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+ atio->u.isp24.fcp_hdr.s_id);
+ if (sess) {
+ if (unlikely(sess->tearing_down)) {
+ sess = NULL;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ goto out_term;
+ } else {
+ /*
+ * Do the extra kref_get() before dropping
+ * qla_hw_data->hardware_lock.
+ */
+ kref_get(&sess->se_sess->sess_kref);
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (unlikely(!sess)) {
+ uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
+ "qla_target(%d): Unable to find wwn login"
+ " (s_id %x:%x:%x), trying to create it manually\n",
+ vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+
+ if (atio->u.raw.entry_count > 1) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
+ "Dropping multy entry cmd %p\n", cmd);
+ goto out_term;
+ }
+
+ mutex_lock(&ha->tgt.tgt_mutex);
+ sess = qlt_make_local_sess(vha, s_id);
+ /* sess has an extra creation ref. */
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ if (!sess)
+ goto out_term;
+ }
+
+ cmd->sess = sess;
+ cmd->loop_id = sess->loop_id;
+ cmd->conf_compl_supported = sess->conf_compl_supported;
+
+ cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
+ cmd->tag = atio->u.isp24.exchange_addr;
+ cmd->unpacked_lun = scsilun_to_int(
+ (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
+
+ if (atio->u.isp24.fcp_cmnd.rddata &&
+ atio->u.isp24.fcp_cmnd.wrdata) {
+ bidi = 1;
+ data_dir = DMA_TO_DEVICE;
+ } else if (atio->u.isp24.fcp_cmnd.rddata)
+ data_dir = DMA_FROM_DEVICE;
+ else if (atio->u.isp24.fcp_cmnd.wrdata)
+ data_dir = DMA_TO_DEVICE;
+ else
+ data_dir = DMA_NONE;
+
+ fcp_task_attr = qlt_get_fcp_task_attr(vha,
+ atio->u.isp24.fcp_cmnd.task_attr);
+ data_length = be32_to_cpu(get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[
+ atio->u.isp24.fcp_cmnd.add_cdb_len]));
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe022,
+ "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
+ cmd, cmd->unpacked_lun, cmd->tag);
+
+ ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
+ fcp_task_attr, data_dir, bidi);
+ if (ret != 0)
+ goto out_term;
+ /*
+ * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
+ */
+ ha->tgt.tgt_ops->put_sess(sess);
+ return;
+
+out_term:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
+ /*
+ * cmd has not sent to target yet, so pass NULL as the second argument
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_cmd *cmd;
+
+ if (unlikely(tgt->tgt_stop)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+ "New command while device %p is shutting down\n", tgt);
+ return -EFAULT;
+ }
+
+ cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC);
+ if (!cmd) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
+ "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&cmd->cmd_list);
+
+ memcpy(&cmd->atio, atio, sizeof(*atio));
+ cmd->state = QLA_TGT_STATE_NEW;
+ cmd->tgt = ha->tgt.qla_tgt;
+ cmd->vha = vha;
+
+ INIT_WORK(&cmd->work, qlt_do_work);
+ queue_work(qla_tgt_wq, &cmd->work);
+ return 0;
+
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+ int fn, void *iocb, int flags)
+{
+ struct scsi_qla_host *vha = sess->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_mgmt_cmd *mcmd;
+ int res;
+ uint8_t tmr_func;
+
+ mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+ if (!mcmd) {
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
+ "qla_target(%d): Allocation of management "
+ "command failed, some commands and their data could "
+ "leak\n", vha->vp_idx);
+ return -ENOMEM;
+ }
+ memset(mcmd, 0, sizeof(*mcmd));
+ mcmd->sess = sess;
+
+ if (iocb) {
+ memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
+ sizeof(mcmd->orig_iocb.imm_ntfy));
+ }
+ mcmd->tmr_func = fn;
+ mcmd->flags = flags;
+
+ switch (fn) {
+ case QLA_TGT_CLEAR_ACA:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
+ "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
+ tmr_func = TMR_CLEAR_ACA;
+ break;
+
+ case QLA_TGT_TARGET_RESET:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
+ "qla_target(%d): TARGET_RESET received\n",
+ sess->vha->vp_idx);
+ tmr_func = TMR_TARGET_WARM_RESET;
+ break;
+
+ case QLA_TGT_LUN_RESET:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
+ "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
+ tmr_func = TMR_LUN_RESET;
+ break;
+
+ case QLA_TGT_CLEAR_TS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
+ "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
+ tmr_func = TMR_CLEAR_TASK_SET;
+ break;
+
+ case QLA_TGT_ABORT_TS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
+ "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
+ tmr_func = TMR_ABORT_TASK_SET;
+ break;
+#if 0
+ case QLA_TGT_ABORT_ALL:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
+ "qla_target(%d): Doing ABORT_ALL_TASKS\n",
+ sess->vha->vp_idx);
+ tmr_func = 0;
+ break;
+
+ case QLA_TGT_ABORT_ALL_SESS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
+ "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
+ sess->vha->vp_idx);
+ tmr_func = 0;
+ break;
+
+ case QLA_TGT_NEXUS_LOSS_SESS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
+ "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
+ sess->vha->vp_idx);
+ tmr_func = 0;
+ break;
+
+ case QLA_TGT_NEXUS_LOSS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
+ "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
+ tmr_func = 0;
+ break;
+#endif
+ default:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
+ "qla_target(%d): Unknown task mgmt fn 0x%x\n",
+ sess->vha->vp_idx, fn);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -ENOSYS;
+ }
+
+ res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
+ if (res != 0) {
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
+ "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
+ sess->vha->vp_idx, res);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
+{
+ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt;
+ struct qla_tgt_sess *sess;
+ uint32_t lun, unpacked_lun;
+ int lun_size, fn;
+
+ tgt = ha->tgt.qla_tgt;
+
+ lun = a->u.isp24.fcp_cmnd.lun;
+ lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
+ fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+ a->u.isp24.fcp_hdr.s_id);
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ if (!sess) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
+ "qla_target(%d): task mgmt fn 0x%x for "
+ "non-existant session\n", vha->vp_idx, fn);
+ return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
+ sizeof(struct atio_from_isp));
+ }
+
+ return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int __qlt_abort_task(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
+{
+ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_mgmt_cmd *mcmd;
+ uint32_t lun, unpacked_lun;
+ int rc;
+
+ mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+ if (mcmd == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
+ "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
+ vha->vp_idx, __func__);
+ return -ENOMEM;
+ }
+ memset(mcmd, 0, sizeof(*mcmd));
+
+ mcmd->sess = sess;
+ memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
+ sizeof(mcmd->orig_iocb.imm_ntfy));
+
+ lun = a->u.isp24.fcp_cmnd.lun;
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
+ le16_to_cpu(iocb->u.isp2x.seq_id));
+ if (rc != 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
+ "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
+ vha->vp_idx, rc);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_abort_task(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ int loop_id;
+
+ loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
+
+ sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+ if (sess == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
+ "qla_target(%d): task abort for unexisting "
+ "session\n", vha->vp_idx);
+ return qlt_sched_sess_work(ha->tgt.qla_tgt,
+ QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
+ }
+
+ return __qlt_abort_task(vha, iocb, sess);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int res = 0;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
+ "qla_target(%d): Port ID: 0x%02x:%02x:%02x"
+ " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0],
+ iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2],
+ iocb->u.isp24.status_subcode);
+
+ switch (iocb->u.isp24.status_subcode) {
+ case ELS_PLOGI:
+ case ELS_FLOGI:
+ case ELS_PRLI:
+ case ELS_LOGO:
+ case ELS_PRLO:
+ res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+ break;
+ case ELS_PDISC:
+ case ELS_ADISC:
+ {
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ if (tgt->link_reinit_iocb_pending) {
+ qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
+ 0, 0, 0, 0, 0, 0);
+ tgt->link_reinit_iocb_pending = 0;
+ }
+ res = 1; /* send notify ack */
+ break;
+ }
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
+ "qla_target(%d): Unsupported ELS command %x "
+ "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
+ res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+ break;
+ }
+
+ return res;
+}
+
+static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
+{
+ struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
+ size_t first_offset = 0, rem_offset = offset, tmp = 0;
+ int i, sg_srr_cnt, bufflen = 0;
+
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
+ "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
+ "cmd->sg_cnt: %u, direction: %d\n",
+ cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+
+ /*
+ * FIXME: Reject non zero SRR relative offset until we can test
+ * this code properly.
+ */
+ pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
+ return -1;
+
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
+ "Missing cmd->sg or zero cmd->sg_cnt in"
+ " qla_tgt_set_data_offset\n");
+ return -EINVAL;
+ }
+ /*
+ * Walk the current cmd->sg list until we locate the new sg_srr_start
+ */
+ for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
+ "sg[%d]: %p page: %p, length: %d, offset: %d\n",
+ i, sg, sg_page(sg), sg->length, sg->offset);
+
+ if ((sg->length + tmp) > offset) {
+ first_offset = rem_offset;
+ sg_srr_start = sg;
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
+ "Found matching sg[%d], using %p as sg_srr_start, "
+ "and using first_offset: %zu\n", i, sg,
+ first_offset);
+ break;
+ }
+ tmp += sg->length;
+ rem_offset -= sg->length;
+ }
+
+ if (!sg_srr_start) {
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
+ "Unable to locate sg_srr_start for offset: %u\n", offset);
+ return -EINVAL;
+ }
+ sg_srr_cnt = (cmd->sg_cnt - i);
+
+ sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
+ if (!sg_srr) {
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
+ "Unable to allocate sgp\n");
+ return -ENOMEM;
+ }
+ sg_init_table(sg_srr, sg_srr_cnt);
+ sgp = &sg_srr[0];
+ /*
+ * Walk the remaining list for sg_srr_start, mapping to the newly
+ * allocated sg_srr taking first_offset into account.
+ */
+ for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
+ if (first_offset) {
+ sg_set_page(sgp, sg_page(sg),
+ (sg->length - first_offset), first_offset);
+ first_offset = 0;
+ } else {
+ sg_set_page(sgp, sg_page(sg), sg->length, 0);
+ }
+ bufflen += sgp->length;
+
+ sgp = sg_next(sgp);
+ if (!sgp)
+ break;
+ }
+
+ cmd->sg = sg_srr;
+ cmd->sg_cnt = sg_srr_cnt;
+ cmd->bufflen = bufflen;
+ cmd->offset += offset;
+ cmd->free_sg = 1;
+
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
+ cmd->sg_cnt);
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
+ cmd->bufflen);
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
+ cmd->offset);
+
+ if (cmd->sg_cnt < 0)
+ BUG();
+
+ if (cmd->bufflen < 0)
+ BUG();
+
+ return 0;
+}
+
+static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
+ uint32_t srr_rel_offs, int *xmit_type)
+{
+ int res = 0, rel_offs;
+
+ rel_offs = srr_rel_offs - cmd->offset;
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
+ srr_rel_offs, rel_offs);
+
+ *xmit_type = QLA_TGT_XMIT_ALL;
+
+ if (rel_offs < 0) {
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
+ "qla_target(%d): SRR rel_offs (%d) < 0",
+ cmd->vha->vp_idx, rel_offs);
+ res = -1;
+ } else if (rel_offs == cmd->bufflen)
+ *xmit_type = QLA_TGT_XMIT_STATUS;
+ else if (rel_offs > 0)
+ res = qlt_set_data_offset(cmd, rel_offs);
+
+ return res;
+}
+
+/* No locks, thread context */
+static void qlt_handle_srr(struct scsi_qla_host *vha,
+ struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
+{
+ struct imm_ntfy_from_isp *ntfy =
+ (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_cmd *cmd = sctio->cmd;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ unsigned long flags;
+ int xmit_type = 0, resp = 0;
+ uint32_t offset;
+ uint16_t srr_ui;
+
+ offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
+ srr_ui = ntfy->u.isp24.srr_ui;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
+ cmd, srr_ui);
+
+ switch (srr_ui) {
+ case SRR_IU_STATUS:
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_notify_ack(vha, ntfy,
+ 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ xmit_type = QLA_TGT_XMIT_STATUS;
+ resp = 1;
+ break;
+ case SRR_IU_DATA_IN:
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
+ "Unable to process SRR_IU_DATA_IN due to"
+ " missing cmd->sg, state: %d\n", cmd->state);
+ dump_stack();
+ goto out_reject;
+ }
+ if (se_cmd->scsi_status != 0) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe02a,
+ "Rejecting SRR_IU_DATA_IN with non GOOD "
+ "scsi_status\n");
+ goto out_reject;
+ }
+ cmd->bufflen = se_cmd->data_length;
+
+ if (qlt_has_data(cmd)) {
+ if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+ goto out_reject;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_notify_ack(vha, ntfy,
+ 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ resp = 1;
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
+ "qla_target(%d): SRR for in data for cmd "
+ "without them (tag %d, SCSI status %d), "
+ "reject", vha->vp_idx, cmd->tag,
+ cmd->se_cmd.scsi_status);
+ goto out_reject;
+ }
+ break;
+ case SRR_IU_DATA_OUT:
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
+ "Unable to process SRR_IU_DATA_OUT due to"
+ " missing cmd->sg\n");
+ dump_stack();
+ goto out_reject;
+ }
+ if (se_cmd->scsi_status != 0) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe02b,
+ "Rejecting SRR_IU_DATA_OUT"
+ " with non GOOD scsi_status\n");
+ goto out_reject;
+ }
+ cmd->bufflen = se_cmd->data_length;
+
+ if (qlt_has_data(cmd)) {
+ if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+ goto out_reject;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_notify_ack(vha, ntfy,
+ 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (xmit_type & QLA_TGT_XMIT_DATA)
+ qlt_rdy_to_xfer(cmd);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
+ "qla_target(%d): SRR for out data for cmd "
+ "without them (tag %d, SCSI status %d), "
+ "reject", vha->vp_idx, cmd->tag,
+ cmd->se_cmd.scsi_status);
+ goto out_reject;
+ }
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
+ "qla_target(%d): Unknown srr_ui value %x",
+ vha->vp_idx, srr_ui);
+ goto out_reject;
+ }
+
+ /* Transmit response in case of status and data-in cases */
+ if (resp)
+ qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+
+ return;
+
+out_reject:
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+ if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+ dump_stack();
+ } else
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
+ struct qla_tgt_srr_imm *imm, int ha_locked)
+{
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+
+ if (!ha_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+
+ if (!ha_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ kfree(imm);
+}
+
+static void qlt_handle_srr_work(struct work_struct *work)
+{
+ struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_tgt_srr_ctio *sctio;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
+ tgt);
+
+restart:
+ spin_lock_irqsave(&tgt->srr_lock, flags);
+ list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
+ struct qla_tgt_srr_imm *imm, *i, *ti;
+ struct qla_tgt_cmd *cmd;
+ struct se_cmd *se_cmd;
+
+ imm = NULL;
+ list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
+ srr_list_entry) {
+ if (i->srr_id == sctio->srr_id) {
+ list_del(&i->srr_list_entry);
+ if (imm) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
+ "qla_target(%d): There must be "
+ "only one IMM SRR per CTIO SRR "
+ "(IMM SRR %p, id %d, CTIO %p\n",
+ vha->vp_idx, i, i->srr_id, sctio);
+ qlt_reject_free_srr_imm(tgt->vha, i, 0);
+ } else
+ imm = i;
+ }
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
+ "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
+ sctio->srr_id);
+
+ if (imm == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
+ "Not found matching IMM for SRR CTIO (id %d)\n",
+ sctio->srr_id);
+ continue;
+ } else
+ list_del(&sctio->srr_list_entry);
+
+ spin_unlock_irqrestore(&tgt->srr_lock, flags);
+
+ cmd = sctio->cmd;
+ /*
+ * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
+ * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
+ * logic..
+ */
+ cmd->offset = 0;
+ if (cmd->free_sg) {
+ kfree(cmd->sg);
+ cmd->sg = NULL;
+ cmd->free_sg = 0;
+ }
+ se_cmd = &cmd->se_cmd;
+
+ cmd->sg_cnt = se_cmd->t_data_nents;
+ cmd->sg = se_cmd->t_data_sg;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
+ "SRR cmd %p (se_cmd %p, tag %d, op %x), "
+ "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
+ se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
+
+ qlt_handle_srr(vha, sctio, imm);
+
+ kfree(imm);
+ kfree(sctio);
+ goto restart;
+ }
+ spin_unlock_irqrestore(&tgt->srr_lock, flags);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_tgt_srr_imm *imm;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_srr_ctio *sctio;
+
+ tgt->imm_srr_id++;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
+ vha->vp_idx);
+
+ imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
+ if (imm != NULL) {
+ memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
+
+ /* IRQ is already OFF */
+ spin_lock(&tgt->srr_lock);
+ imm->srr_id = tgt->imm_srr_id;
+ list_add_tail(&imm->srr_list_entry,
+ &tgt->srr_imm_list);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
+ "IMM NTFY SRR %p added (id %d, ui %x)\n",
+ imm, imm->srr_id, iocb->u.isp24.srr_ui);
+ if (tgt->imm_srr_id == tgt->ctio_srr_id) {
+ int found = 0;
+ list_for_each_entry(sctio, &tgt->srr_ctio_list,
+ srr_list_entry) {
+ if (sctio->srr_id == imm->srr_id) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
+ "Scheduling srr work\n");
+ schedule_work(&tgt->srr_work);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
+ "qla_target(%d): imm_srr_id "
+ "== ctio_srr_id (%d), but there is no "
+ "corresponding SRR CTIO, deleting IMM "
+ "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
+ imm);
+ list_del(&imm->srr_list_entry);
+
+ kfree(imm);
+
+ spin_unlock(&tgt->srr_lock);
+ goto out_reject;
+ }
+ }
+ spin_unlock(&tgt->srr_lock);
+ } else {
+ struct qla_tgt_srr_ctio *ts;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
+ "qla_target(%d): Unable to allocate SRR IMM "
+ "entry, SRR request will be rejected\n", vha->vp_idx);
+
+ /* IRQ is already OFF */
+ spin_lock(&tgt->srr_lock);
+ list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
+ srr_list_entry) {
+ if (sctio->srr_id == tgt->imm_srr_id) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
+ "CTIO SRR %p deleted (id %d)\n",
+ sctio, sctio->srr_id);
+ list_del(&sctio->srr_list_entry);
+ qlt_send_term_exchange(vha, sctio->cmd,
+ &sctio->cmd->atio, 1);
+ kfree(sctio);
+ }
+ }
+ spin_unlock(&tgt->srr_lock);
+ goto out_reject;
+ }
+
+ return;
+
+out_reject:
+ qlt_send_notify_ack(vha, iocb, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t add_flags = 0;
+ int send_notify_ack = 1;
+ uint16_t status;
+
+ status = le16_to_cpu(iocb->u.isp2x.status);
+ switch (status) {
+ case IMM_NTFY_LIP_RESET:
+ {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
+ "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
+ vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
+ iocb->u.isp24.status_subcode);
+
+ if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
+ send_notify_ack = 0;
+ break;
+ }
+
+ case IMM_NTFY_LIP_LINK_REINIT:
+ {
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
+ "qla_target(%d): LINK REINIT (loop %#x, "
+ "subcode %x)\n", vha->vp_idx,
+ le16_to_cpu(iocb->u.isp24.nport_handle),
+ iocb->u.isp24.status_subcode);
+ if (tgt->link_reinit_iocb_pending) {
+ qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
+ 0, 0, 0, 0, 0, 0);
+ }
+ memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
+ tgt->link_reinit_iocb_pending = 1;
+ /*
+ * QLogic requires to wait after LINK REINIT for possible
+ * PDISC or ADISC ELS commands
+ */
+ send_notify_ack = 0;
+ break;
+ }
+
+ case IMM_NTFY_PORT_LOGOUT:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
+ "qla_target(%d): Port logout (loop "
+ "%#x, subcode %x)\n", vha->vp_idx,
+ le16_to_cpu(iocb->u.isp24.nport_handle),
+ iocb->u.isp24.status_subcode);
+
+ if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
+ send_notify_ack = 0;
+ /* The sessions will be cleared in the callback, if needed */
+ break;
+
+ case IMM_NTFY_GLBL_TPRLO:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
+ "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
+ if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
+ send_notify_ack = 0;
+ /* The sessions will be cleared in the callback, if needed */
+ break;
+
+ case IMM_NTFY_PORT_CONFIG:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
+ "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
+ status);
+ if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
+ send_notify_ack = 0;
+ /* The sessions will be cleared in the callback, if needed */
+ break;
+
+ case IMM_NTFY_GLBL_LOGO:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
+ "qla_target(%d): Link failure detected\n",
+ vha->vp_idx);
+ /* I_T nexus loss */
+ if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
+ send_notify_ack = 0;
+ break;
+
+ case IMM_NTFY_IOCB_OVERFLOW:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
+ "qla_target(%d): Cannot provide requested "
+ "capability (IOCB overflowed the immediate notify "
+ "resource count)\n", vha->vp_idx);
+ break;
+
+ case IMM_NTFY_ABORT_TASK:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
+ "qla_target(%d): Abort Task (S %08x I %#x -> "
+ "L %#x)\n", vha->vp_idx,
+ le16_to_cpu(iocb->u.isp2x.seq_id),
+ GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
+ le16_to_cpu(iocb->u.isp2x.lun));
+ if (qlt_abort_task(vha, iocb) == 0)
+ send_notify_ack = 0;
+ break;
+
+ case IMM_NTFY_RESOURCE:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
+ "qla_target(%d): Out of resources, host %ld\n",
+ vha->vp_idx, vha->host_no);
+ break;
+
+ case IMM_NTFY_MSG_RX:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
+ "qla_target(%d): Immediate notify task %x\n",
+ vha->vp_idx, iocb->u.isp2x.task_flags);
+ if (qlt_handle_task_mgmt(vha, iocb) == 0)
+ send_notify_ack = 0;
+ break;
+
+ case IMM_NTFY_ELS:
+ if (qlt_24xx_handle_els(vha, iocb) == 0)
+ send_notify_ack = 0;
+ break;
+
+ case IMM_NTFY_SRR:
+ qlt_prepare_srr_imm(vha, iocb);
+ send_notify_ack = 0;
+ break;
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
+ "qla_target(%d): Received unknown immediate "
+ "notify status %x\n", vha->vp_idx, status);
+ break;
+ }
+
+ if (send_notify_ack)
+ qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ * This function sends busy to ISP 2xxx or 24xx.
+ */
+static void qlt_send_busy(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio, uint16_t status)
+{
+ struct ctio7_to_24xx *ctio24;
+ struct qla_hw_data *ha = vha->hw;
+ request_t *pkt;
+ struct qla_tgt_sess *sess = NULL;
+
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+ atio->u.isp24.fcp_hdr.s_id);
+ if (!sess) {
+ qlt_send_term_exchange(vha, NULL, atio, 1);
+ return;
+ }
+ /* Sending marker isn't necessary, since we called from ISR */
+
+ pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ if (!pkt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet", vha->vp_idx, __func__);
+ return;
+ }
+
+ pkt->entry_count = 1;
+ pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+ ctio24 = (struct ctio7_to_24xx *)pkt;
+ ctio24->entry_type = CTIO_TYPE7;
+ ctio24->nport_handle = sess->loop_id;
+ ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio24->vp_index = vha->vp_idx;
+ ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio24->exchange_addr = atio->u.isp24.exchange_addr;
+ ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
+ __constant_cpu_to_le16(
+ CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
+ CTIO7_FLAGS_DONT_RET_CTIO);
+ /*
+ * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
+ * if the explicit conformation is used.
+ */
+ ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ ctio24->u.status1.scsi_status = cpu_to_le16(status);
+ ctio24->u.status1.residual = get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[
+ atio->u.isp24.fcp_cmnd.add_cdb_len]);
+ if (ctio24->u.status1.residual != 0)
+ ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+
+ qla2x00_start_iocbs(vha, vha->req);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ int rc;
+
+ if (unlikely(tgt == NULL)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
+ "ATIO pkt, but no tgt (ha %p)", ha);
+ return;
+ }
+ ql_dbg(ql_dbg_tgt, vha, 0xe02c,
+ "qla_target(%d): ATIO pkt %p: type %02x count %02x",
+ vha->vp_idx, atio, atio->u.raw.entry_type,
+ atio->u.raw.entry_count);
+ /*
+ * In tgt_stop mode we also should allow all requests to pass.
+ * Otherwise, some commands can stuck.
+ */
+
+ tgt->irq_cmd_count++;
+
+ switch (atio->u.raw.entry_type) {
+ case ATIO_TYPE7:
+ ql_dbg(ql_dbg_tgt, vha, 0xe02d,
+ "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
+ "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
+ vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
+ atio->u.isp24.fcp_cmnd.rddata,
+ atio->u.isp24.fcp_cmnd.wrdata,
+ atio->u.isp24.fcp_cmnd.add_cdb_len,
+ be32_to_cpu(get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[
+ atio->u.isp24.fcp_cmnd.add_cdb_len])),
+ atio->u.isp24.fcp_hdr.s_id[0],
+ atio->u.isp24.fcp_hdr.s_id[1],
+ atio->u.isp24.fcp_hdr.s_id[2]);
+
+ if (unlikely(atio->u.isp24.exchange_addr ==
+ ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe058,
+ "qla_target(%d): ATIO_TYPE7 "
+ "received with UNKNOWN exchange address, "
+ "sending QUEUE_FULL\n", vha->vp_idx);
+ qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
+ break;
+ }
+ if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
+ rc = qlt_handle_cmd_for_atio(vha, atio);
+ else
+ rc = qlt_handle_task_mgmt(vha, atio);
+ if (unlikely(rc != 0)) {
+ if (rc == -ESRCH) {
+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
+ qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+#else
+ qlt_send_term_exchange(vha, NULL, atio, 1);
+#endif
+ } else {
+ if (tgt->tgt_stop) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe059,
+ "qla_target: Unable to send "
+ "command to target for req, "
+ "ignoring.\n");
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05a,
+ "qla_target(%d): Unable to send "
+ "command to target, sending BUSY "
+ "status.\n", vha->vp_idx);
+ qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+ }
+ }
+ }
+ break;
+
+ case IMMED_NOTIFY_TYPE:
+ {
+ if (unlikely(atio->u.isp2x.entry_status != 0)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05b,
+ "qla_target(%d): Received ATIO packet %x "
+ "with error status %x\n", vha->vp_idx,
+ atio->u.raw.entry_type,
+ atio->u.isp2x.entry_status);
+ break;
+ }
+ ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
+ qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
+ break;
+ }
+
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05c,
+ "qla_target(%d): Received unknown ATIO atio "
+ "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+ break;
+ }
+
+ tgt->irq_cmd_count--;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+
+ if (unlikely(tgt == NULL)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05d,
+ "qla_target(%d): Response pkt %x received, but no "
+ "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe02f,
+ "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
+ "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
+ pkt->entry_count, pkt->entry_status, pkt->handle);
+
+ /*
+ * In tgt_stop mode we also should allow all requests to pass.
+ * Otherwise, some commands can stuck.
+ */
+
+ tgt->irq_cmd_count++;
+
+ switch (pkt->entry_type) {
+ case CTIO_TYPE7:
+ {
+ struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
+ vha->vp_idx);
+ qlt_do_ctio_completion(vha, entry->handle,
+ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+ entry);
+ break;
+ }
+
+ case ACCEPT_TGT_IO_TYPE:
+ {
+ struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
+ int rc;
+ ql_dbg(ql_dbg_tgt, vha, 0xe031,
+ "ACCEPT_TGT_IO instance %d status %04x "
+ "lun %04x read/write %d data_length %04x "
+ "target_id %02x rx_id %04x\n ", vha->vp_idx,
+ le16_to_cpu(atio->u.isp2x.status),
+ le16_to_cpu(atio->u.isp2x.lun),
+ atio->u.isp2x.execution_codes,
+ le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
+ atio), atio->u.isp2x.rx_id);
+ if (atio->u.isp2x.status !=
+ __constant_cpu_to_le16(ATIO_CDB_VALID)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05e,
+ "qla_target(%d): ATIO with error "
+ "status %x received\n", vha->vp_idx,
+ le16_to_cpu(atio->u.isp2x.status));
+ break;
+ }
+ ql_dbg(ql_dbg_tgt, vha, 0xe032,
+ "FCP CDB: 0x%02x, sizeof(cdb): %lu",
+ atio->u.isp2x.cdb[0], (unsigned long
+ int)sizeof(atio->u.isp2x.cdb));
+
+ rc = qlt_handle_cmd_for_atio(vha, atio);
+ if (unlikely(rc != 0)) {
+ if (rc == -ESRCH) {
+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
+ qlt_send_busy(vha, atio, 0);
+#else
+ qlt_send_term_exchange(vha, NULL, atio, 1);
+#endif
+ } else {
+ if (tgt->tgt_stop) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send "
+ "command to target, sending TERM "
+ "EXCHANGE for rsp\n");
+ qlt_send_term_exchange(vha, NULL,
+ atio, 1);
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send "
+ "command to target, sending BUSY "
+ "status\n", vha->vp_idx);
+ qlt_send_busy(vha, atio, 0);
+ }
+ }
+ }
+ }
+ break;
+
+ case CONTINUE_TGT_IO_TYPE:
+ {
+ struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe033,
+ "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
+ qlt_do_ctio_completion(vha, entry->handle,
+ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+ entry);
+ break;
+ }
+
+ case CTIO_A64_TYPE:
+ {
+ struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
+ vha->vp_idx);
+ qlt_do_ctio_completion(vha, entry->handle,
+ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+ entry);
+ break;
+ }
+
+ case IMMED_NOTIFY_TYPE:
+ ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
+ qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
+ break;
+
+ case NOTIFY_ACK_TYPE:
+ if (tgt->notify_ack_expected > 0) {
+ struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe036,
+ "NOTIFY_ACK seq %08x status %x\n",
+ le16_to_cpu(entry->u.isp2x.seq_id),
+ le16_to_cpu(entry->u.isp2x.status));
+ tgt->notify_ack_expected--;
+ if (entry->u.isp2x.status !=
+ __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe061,
+ "qla_target(%d): NOTIFY_ACK "
+ "failed %x\n", vha->vp_idx,
+ le16_to_cpu(entry->u.isp2x.status));
+ }
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe062,
+ "qla_target(%d): Unexpected NOTIFY_ACK received\n",
+ vha->vp_idx);
+ }
+ break;
+
+ case ABTS_RECV_24XX:
+ ql_dbg(ql_dbg_tgt, vha, 0xe037,
+ "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
+ qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
+ break;
+
+ case ABTS_RESP_24XX:
+ if (tgt->abts_resp_expected > 0) {
+ struct abts_resp_from_24xx_fw *entry =
+ (struct abts_resp_from_24xx_fw *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe038,
+ "ABTS_RESP_24XX: compl_status %x\n",
+ entry->compl_status);
+ tgt->abts_resp_expected--;
+ if (le16_to_cpu(entry->compl_status) !=
+ ABTS_RESP_COMPL_SUCCESS) {
+ if ((entry->error_subcode1 == 0x1E) &&
+ (entry->error_subcode2 == 0)) {
+ /*
+ * We've got a race here: aborted
+ * exchange not terminated, i.e.
+ * response for the aborted command was
+ * sent between the abort request was
+ * received and processed.
+ * Unfortunately, the firmware has a
+ * silly requirement that all aborted
+ * exchanges must be explicitely
+ * terminated, otherwise it refuses to
+ * send responses for the abort
+ * requests. So, we have to
+ * (re)terminate the exchange and retry
+ * the abort response.
+ */
+ qlt_24xx_retry_term_exchange(vha,
+ entry);
+ } else
+ ql_dbg(ql_dbg_tgt, vha, 0xe063,
+ "qla_target(%d): ABTS_RESP_24XX "
+ "failed %x (subcode %x:%x)",
+ vha->vp_idx, entry->compl_status,
+ entry->error_subcode1,
+ entry->error_subcode2);
+ }
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe064,
+ "qla_target(%d): Unexpected ABTS_RESP_24XX "
+ "received\n", vha->vp_idx);
+ }
+ break;
+
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe065,
+ "qla_target(%d): Received unknown response pkt "
+ "type %x\n", vha->vp_idx, pkt->entry_type);
+ break;
+ }
+
+ tgt->irq_cmd_count--;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
+ uint16_t *mailbox)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ int reason_code;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe039,
+ "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
+ vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
+ ha->operating_mode, ha->current_topology);
+
+ if (!ha->tgt.tgt_ops)
+ return;
+
+ if (unlikely(tgt == NULL)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe03a,
+ "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
+ return;
+ }
+
+ if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
+ IS_QLA2100(ha))
+ return;
+ /*
+ * In tgt_stop mode we also should allow all requests to pass.
+ * Otherwise, some commands can stuck.
+ */
+
+ tgt->irq_cmd_count++;
+
+ switch (code) {
+ case MBA_RESET: /* Reset */
+ case MBA_SYSTEM_ERR: /* System Error */
+ case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
+ case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
+ "qla_target(%d): System error async event %#x "
+ "occured", vha->vp_idx, code);
+ break;
+ case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+
+ case MBA_LOOP_UP:
+ {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
+ "qla_target(%d): Async LOOP_UP occured "
+ "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
+ le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
+ le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+ if (tgt->link_reinit_iocb_pending) {
+ qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
+ 0, 0, 0, 0, 0, 0);
+ tgt->link_reinit_iocb_pending = 0;
+ }
+ break;
+ }
+
+ case MBA_LIP_OCCURRED:
+ case MBA_LOOP_DOWN:
+ case MBA_LIP_RESET:
+ case MBA_RSCN_UPDATE:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
+ "qla_target(%d): Async event %#x occured "
+ "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, code,
+ le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
+ le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+ break;
+
+ case MBA_PORT_UPDATE:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
+ "qla_target(%d): Port update async event %#x "
+ "occured: updating the ports database (m[1]=%x, m[2]=%x, "
+ "m[3]=%x, m[4]=%x)", vha->vp_idx, code,
+ le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
+ le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+ reason_code = le16_to_cpu(mailbox[2]);
+ if (reason_code == 0x4)
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
+ "Async MB 2: Got PLOGI Complete\n");
+ else if (reason_code == 0x7)
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
+ "Async MB 2: Port Logged Out\n");
+ break;
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
+ "qla_target(%d): Async event %#x occured: "
+ "ignore (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
+ code, le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
+ le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+ break;
+ }
+
+ tgt->irq_cmd_count--;
+}
+
+static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
+ uint16_t loop_id)
+{
+ fc_port_t *fcport;
+ int rc;
+
+ fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+ if (!fcport) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
+ "qla_target(%d): Allocation of tmp FC port failed",
+ vha->vp_idx);
+ return NULL;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
+
+ fcport->loop_id = loop_id;
+
+ rc = qla2x00_get_port_database(vha, fcport, 0);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
+ "qla_target(%d): Failed to retrieve fcport "
+ "information -- get_port_database() returned %x "
+ "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
+ kfree(fcport);
+ return NULL;
+ }
+
+ return fcport;
+}
+
+/* Must be called under tgt_mutex */
+static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
+ uint8_t *s_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ fc_port_t *fcport = NULL;
+ int rc, global_resets;
+ uint16_t loop_id = 0;
+
+retry:
+ global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
+
+ rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
+ if (rc != 0) {
+ if ((s_id[0] == 0xFF) &&
+ (s_id[1] == 0xFC)) {
+ /*
+ * This is Domain Controller, so it should be
+ * OK to drop SCSI commands from it.
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
+ "Unable to find initiator with S_ID %x:%x:%x",
+ s_id[0], s_id[1], s_id[2]);
+ } else
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
+ "qla_target(%d): Unable to find "
+ "initiator with S_ID %x:%x:%x",
+ vha->vp_idx, s_id[0], s_id[1],
+ s_id[2]);
+ return NULL;
+ }
+
+ fcport = qlt_get_port_database(vha, loop_id);
+ if (!fcport)
+ return NULL;
+
+ if (global_resets !=
+ atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
+ "qla_target(%d): global reset during session discovery "
+ "(counter was %d, new %d), retrying", vha->vp_idx,
+ global_resets,
+ atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
+ goto retry;
+ }
+
+ sess = qlt_create_sess(vha, fcport, true);
+
+ kfree(fcport);
+ return sess;
+}
+
+static void qlt_abort_work(struct qla_tgt *tgt,
+ struct qla_tgt_sess_work_param *prm)
+{
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ unsigned long flags;
+ uint32_t be_s_id;
+ uint8_t s_id[3];
+ int rc;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (tgt->tgt_stop)
+ goto out_term;
+
+ s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
+ s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
+ s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
+
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+ (unsigned char *)&be_s_id);
+ if (!sess) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ mutex_lock(&ha->tgt.tgt_mutex);
+ sess = qlt_make_local_sess(vha, s_id);
+ /* sess has got an extra creation ref */
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (!sess)
+ goto out_term;
+ } else {
+ kref_get(&sess->se_sess->sess_kref);
+ }
+
+ if (tgt->tgt_stop)
+ goto out_term;
+
+ rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
+ if (rc != 0)
+ goto out_term;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ha->tgt.tgt_ops->put_sess(sess);
+ return;
+
+out_term:
+ qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+}
+
+static void qlt_tmr_work(struct qla_tgt *tgt,
+ struct qla_tgt_sess_work_param *prm)
+{
+ struct atio_from_isp *a = &prm->tm_iocb2;
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ unsigned long flags;
+ uint8_t *s_id = NULL; /* to hide compiler warnings */
+ int rc;
+ uint32_t lun, unpacked_lun;
+ int lun_size, fn;
+ void *iocb;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (tgt->tgt_stop)
+ goto out_term;
+
+ s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+ if (!sess) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ mutex_lock(&ha->tgt.tgt_mutex);
+ sess = qlt_make_local_sess(vha, s_id);
+ /* sess has got an extra creation ref */
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (!sess)
+ goto out_term;
+ } else {
+ kref_get(&sess->se_sess->sess_kref);
+ }
+
+ iocb = a;
+ lun = a->u.isp24.fcp_cmnd.lun;
+ lun_size = sizeof(lun);
+ fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+ if (rc != 0)
+ goto out_term;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ha->tgt.tgt_ops->put_sess(sess);
+ return;
+
+out_term:
+ qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+}
+
+static void qlt_sess_work_fn(struct work_struct *work)
+{
+ struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
+ struct scsi_qla_host *vha = tgt->vha;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
+
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+ while (!list_empty(&tgt->sess_works_list)) {
+ struct qla_tgt_sess_work_param *prm = list_entry(
+ tgt->sess_works_list.next, typeof(*prm),
+ sess_works_list_entry);
+
+ /*
+ * This work can be scheduled on several CPUs at time, so we
+ * must delete the entry to eliminate double processing
+ */
+ list_del(&prm->sess_works_list_entry);
+
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+ switch (prm->type) {
+ case QLA_TGT_SESS_WORK_ABORT:
+ qlt_abort_work(tgt, prm);
+ break;
+ case QLA_TGT_SESS_WORK_TM:
+ qlt_tmr_work(tgt, prm);
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+
+ kfree(prm);
+ }
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+}
+
+/* Must be called under tgt_host_action_mutex */
+int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
+{
+ struct qla_tgt *tgt;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return 0;
+
+ ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
+ "Registering target for host %ld(%p)", base_vha->host_no, ha);
+
+ BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
+
+ tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
+ if (!tgt) {
+ ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
+ "Unable to allocate struct qla_tgt\n");
+ return -ENOMEM;
+ }
+
+ if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
+ base_vha->host->hostt->supported_mode |= MODE_TARGET;
+
+ tgt->ha = ha;
+ tgt->vha = base_vha;
+ init_waitqueue_head(&tgt->waitQ);
+ INIT_LIST_HEAD(&tgt->sess_list);
+ INIT_LIST_HEAD(&tgt->del_sess_list);
+ INIT_DELAYED_WORK(&tgt->sess_del_work,
+ (void (*)(struct work_struct *))qlt_del_sess_work_fn);
+ spin_lock_init(&tgt->sess_work_lock);
+ INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
+ INIT_LIST_HEAD(&tgt->sess_works_list);
+ spin_lock_init(&tgt->srr_lock);
+ INIT_LIST_HEAD(&tgt->srr_ctio_list);
+ INIT_LIST_HEAD(&tgt->srr_imm_list);
+ INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
+ atomic_set(&tgt->tgt_global_resets_count, 0);
+
+ ha->tgt.qla_tgt = tgt;
+
+ ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
+ "qla_target(%d): using 64 Bit PCI addressing",
+ base_vha->vp_idx);
+ tgt->tgt_enable_64bit_addr = 1;
+ /* 3 is reserved */
+ tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
+ tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
+ tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
+
+ mutex_lock(&qla_tgt_mutex);
+ list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
+ mutex_unlock(&qla_tgt_mutex);
+
+ return 0;
+}
+
+/* Must be called under tgt_host_action_mutex */
+int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
+{
+ if (!ha->tgt.qla_tgt)
+ return 0;
+
+ mutex_lock(&qla_tgt_mutex);
+ list_del(&ha->tgt.qla_tgt->tgt_list_entry);
+ mutex_unlock(&qla_tgt_mutex);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
+ vha->host_no, ha);
+ qlt_release(ha->tgt.qla_tgt);
+
+ return 0;
+}
+
+static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
+ unsigned char *b)
+{
+ int i;
+
+ pr_debug("qla2xxx HW vha->node_name: ");
+ for (i = 0; i < WWN_SIZE; i++)
+ pr_debug("%02x ", vha->node_name[i]);
+ pr_debug("\n");
+ pr_debug("qla2xxx HW vha->port_name: ");
+ for (i = 0; i < WWN_SIZE; i++)
+ pr_debug("%02x ", vha->port_name[i]);
+ pr_debug("\n");
+
+ pr_debug("qla2xxx passed configfs WWPN: ");
+ put_unaligned_be64(wwpn, b);
+ for (i = 0; i < WWN_SIZE; i++)
+ pr_debug("%02x ", b[i]);
+ pr_debug("\n");
+}
+
+/**
+ * qla_tgt_lport_register - register lport with external module
+ *
+ * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
+ * @wwpn: Passwd FC target WWPN
+ * @callback: lport initialization callback for tcm_qla2xxx code
+ * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
+ */
+int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
+ int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
+{
+ struct qla_tgt *tgt;
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha;
+ struct Scsi_Host *host;
+ unsigned long flags;
+ int rc;
+ u8 b[WWN_SIZE];
+
+ mutex_lock(&qla_tgt_mutex);
+ list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
+ vha = tgt->vha;
+ ha = vha->hw;
+
+ host = vha->host;
+ if (!host)
+ continue;
+
+ if (ha->tgt.tgt_ops != NULL)
+ continue;
+
+ if (!(host->hostt->supported_mode & MODE_TARGET))
+ continue;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (host->active_mode & MODE_TARGET) {
+ pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
+ host->host_no);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ continue;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (!scsi_host_get(host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe068,
+ "Unable to scsi_host_get() for"
+ " qla2xxx scsi_host\n");
+ continue;
+ }
+ qlt_lport_dump(vha, wwpn, b);
+
+ if (memcmp(vha->port_name, b, WWN_SIZE)) {
+ scsi_host_put(host);
+ continue;
+ }
+ /*
+ * Setup passed parameters ahead of invoking callback
+ */
+ ha->tgt.tgt_ops = qla_tgt_ops;
+ ha->tgt.target_lport_ptr = target_lport_ptr;
+ rc = (*callback)(vha);
+ if (rc != 0) {
+ ha->tgt.tgt_ops = NULL;
+ ha->tgt.target_lport_ptr = NULL;
+ }
+ mutex_unlock(&qla_tgt_mutex);
+ return rc;
+ }
+ mutex_unlock(&qla_tgt_mutex);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(qlt_lport_register);
+
+/**
+ * qla_tgt_lport_deregister - Degister lport
+ *
+ * @vha: Registered scsi_qla_host pointer
+ */
+void qlt_lport_deregister(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct Scsi_Host *sh = vha->host;
+ /*
+ * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
+ */
+ ha->tgt.target_lport_ptr = NULL;
+ ha->tgt.tgt_ops = NULL;
+ /*
+ * Release the Scsi_Host reference for the underlying qla2xxx host
+ */
+ scsi_host_put(sh);
+}
+EXPORT_SYMBOL(qlt_lport_deregister);
+
+/* Must be called under HW lock */
+void qlt_set_mode(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ switch (ql2x_ini_mode) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ vha->host->active_mode = MODE_TARGET;
+ break;
+ case QLA2XXX_INI_MODE_ENABLED:
+ vha->host->active_mode |= MODE_TARGET;
+ break;
+ default:
+ break;
+ }
+
+ if (ha->tgt.ini_mode_force_reverse)
+ qla_reverse_ini_mode(vha);
+}
+
+/* Must be called under HW lock */
+void qlt_clear_mode(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ switch (ql2x_ini_mode) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ vha->host->active_mode = MODE_UNKNOWN;
+ break;
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ vha->host->active_mode = MODE_INITIATOR;
+ break;
+ case QLA2XXX_INI_MODE_ENABLED:
+ vha->host->active_mode &= ~MODE_TARGET;
+ break;
+ default:
+ break;
+ }
+
+ if (ha->tgt.ini_mode_force_reverse)
+ qla_reverse_ini_mode(vha);
+}
+
+/*
+ * qla_tgt_enable_vha - NO LOCK HELD
+ *
+ * host_reset, bring up w/ Target Mode Enabled
+ */
+void
+qlt_enable_vha(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ unsigned long flags;
+
+ if (!tgt) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe069,
+ "Unable to locate qla_tgt pointer from"
+ " struct qla_hw_data\n");
+ dump_stack();
+ return;
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ tgt->tgt_stopped = 0;
+ qlt_set_mode(vha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_hba_online(vha);
+}
+EXPORT_SYMBOL(qlt_enable_vha);
+
+/*
+ * qla_tgt_disable_vha - NO LOCK HELD
+ *
+ * Disable Target Mode and reset the adapter
+ */
+void
+qlt_disable_vha(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ unsigned long flags;
+
+ if (!tgt) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe06a,
+ "Unable to locate qla_tgt pointer from"
+ " struct qla_hw_data\n");
+ dump_stack();
+ return;
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_clear_mode(vha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_hba_online(vha);
+}
+
+/*
+ * Called from qla_init.c:qla24xx_vport_create() contex to setup
+ * the target mode specific struct scsi_qla_host and struct qla_hw_data
+ * members.
+ */
+void
+qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
+{
+ if (!qla_tgt_mode_enabled(vha))
+ return;
+
+ mutex_init(&ha->tgt.tgt_mutex);
+ mutex_init(&ha->tgt.tgt_host_action_mutex);
+
+ qlt_clear_mode(vha);
+
+ /*
+ * NOTE: Currently the value is kept the same for <24xx and
+ * >=24xx ISPs. If it is necessary to change it,
+ * the check should be added for specific ISPs,
+ * assigning the value appropriately.
+ */
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+}
+
+void
+qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
+{
+ /*
+ * FC-4 Feature bit 0 indicates target functionality to the name server.
+ */
+ if (qla_tgt_mode_enabled(vha)) {
+ if (qla_ini_mode_enabled(vha))
+ ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
+ else
+ ct_req->req.rff_id.fc4_feature = BIT_0;
+ } else if (qla_ini_mode_enabled(vha)) {
+ ct_req->req.rff_id.fc4_feature = BIT_1;
+ }
+}
+
+/*
+ * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
+ * @ha: HA context
+ *
+ * Beginning of ATIO ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+void
+qlt_init_atio_q_entries(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t cnt;
+ struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
+
+ if (!qla_tgt_mode_enabled(vha))
+ return;
+
+ for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
+ pkt->u.raw.signature = ATIO_PROCESSED;
+ pkt++;
+ }
+
+}
+
+/*
+ * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
+ * @ha: SCSI driver HA context
+ */
+void
+qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ struct atio_from_isp *pkt;
+ int cnt, i;
+
+ if (!vha->flags.online)
+ return;
+
+ while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
+ pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+ cnt = pkt->u.raw.entry_count;
+
+ qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
+
+ for (i = 0; i < cnt; i++) {
+ ha->tgt.atio_ring_index++;
+ if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
+ ha->tgt.atio_ring_index = 0;
+ ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+ } else
+ ha->tgt.atio_ring_ptr++;
+
+ pkt->u.raw.signature = ATIO_PROCESSED;
+ pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+ }
+ wmb();
+ }
+
+ /* Adjust ring index */
+ WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
+}
+
+void
+qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+/* FIXME: atio_q in/out for ha->mqenable=1..? */
+ if (ha->mqenable) {
+#if 0
+ WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
+ WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
+ RD_REG_DWORD(&reg->isp25mq.atio_q_out);
+#endif
+ } else {
+ /* Setup APTIO registers for target mode */
+ WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
+ WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
+ RD_REG_DWORD(&reg->isp24.atio_q_out);
+ }
+}
+
+void
+qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (qla_tgt_mode_enabled(vha)) {
+ if (!ha->tgt.saved_set) {
+ /* We save only once */
+ ha->tgt.saved_exchange_count = nv->exchange_count;
+ ha->tgt.saved_firmware_options_1 =
+ nv->firmware_options_1;
+ ha->tgt.saved_firmware_options_2 =
+ nv->firmware_options_2;
+ ha->tgt.saved_firmware_options_3 =
+ nv->firmware_options_3;
+ ha->tgt.saved_set = 1;
+ }
+
+ nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+
+ /* Enable target mode */
+ nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+
+ /* Disable ini mode, if requested */
+ if (!qla_ini_mode_enabled(vha))
+ nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
+
+ /* Disable Full Login after LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ /* Enable initial LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+ /* Enable FC tapes support */
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+ /* Disable Full Login after LIP */
+ nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ /* Enable target PRLI control */
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+ } else {
+ if (ha->tgt.saved_set) {
+ nv->exchange_count = ha->tgt.saved_exchange_count;
+ nv->firmware_options_1 =
+ ha->tgt.saved_firmware_options_1;
+ nv->firmware_options_2 =
+ ha->tgt.saved_firmware_options_2;
+ nv->firmware_options_3 =
+ ha->tgt.saved_firmware_options_3;
+ }
+ return;
+ }
+
+ /* out-of-order frames reassembly */
+ nv->firmware_options_3 |= BIT_6|BIT_9;
+
+ if (ha->tgt.enable_class_2) {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) =
+ FC_COS_CLASS2 | FC_COS_CLASS3;
+
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+ } else {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+
+ nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+ }
+}
+
+void
+qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
+ struct init_cb_24xx *icb)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->tgt.node_name_set) {
+ memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
+ icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+ }
+}
+
+int
+qlt_24xx_process_response_error(struct scsi_qla_host *vha,
+ struct sts_entry_24xx *pkt)
+{
+ switch (pkt->entry_type) {
+ case ABTS_RECV_24XX:
+ case ABTS_RESP_24XX:
+ case CTIO_TYPE7:
+ case NOTIFY_ACK_TYPE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+void
+qlt_modify_vp_config(struct scsi_qla_host *vha,
+ struct vp_config_entry_24xx *vpmod)
+{
+ if (qla_tgt_mode_enabled(vha))
+ vpmod->options_idx1 &= ~BIT_5;
+ /* Disable ini mode, if requested */
+ if (!qla_ini_mode_enabled(vha))
+ vpmod->options_idx1 &= ~BIT_4;
+}
+
+void
+qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ mutex_init(&ha->tgt.tgt_mutex);
+ mutex_init(&ha->tgt.tgt_host_action_mutex);
+ qlt_clear_mode(base_vha);
+}
+
+int
+qlt_mem_alloc(struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return 0;
+
+ ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
+ MAX_MULTI_ID_FABRIC, GFP_KERNEL);
+ if (!ha->tgt.tgt_vp_map)
+ return -ENOMEM;
+
+ ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
+ (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
+ &ha->tgt.atio_dma, GFP_KERNEL);
+ if (!ha->tgt.atio_ring) {
+ kfree(ha->tgt.tgt_vp_map);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void
+qlt_mem_free(struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (ha->tgt.atio_ring) {
+ dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
+ sizeof(struct atio_from_isp), ha->tgt.atio_ring,
+ ha->tgt.atio_dma);
+ }
+ kfree(ha->tgt.tgt_vp_map);
+}
+
+/* vport_slock to be held by the caller */
+void
+qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ switch (cmd) {
+ case SET_VP_IDX:
+ vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
+ break;
+ case SET_AL_PA:
+ vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
+ break;
+ case RESET_VP_IDX:
+ vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
+ break;
+ case RESET_AL_PA:
+ vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
+ break;
+ }
+}
+
+static int __init qlt_parse_ini_mode(void)
+{
+ if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
+ ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+ else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
+ ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
+ else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
+ ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
+ else
+ return false;
+
+ return true;
+}
+
+int __init qlt_init(void)
+{
+ int ret;
+
+ if (!qlt_parse_ini_mode()) {
+ ql_log(ql_log_fatal, NULL, 0xe06b,
+ "qlt_parse_ini_mode() failed\n");
+ return -EINVAL;
+ }
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return 0;
+
+ qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
+ sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
+ NULL);
+ if (!qla_tgt_cmd_cachep) {
+ ql_log(ql_log_fatal, NULL, 0xe06c,
+ "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
+ return -ENOMEM;
+ }
+
+ qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
+ sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
+ qla_tgt_mgmt_cmd), 0, NULL);
+ if (!qla_tgt_mgmt_cmd_cachep) {
+ ql_log(ql_log_fatal, NULL, 0xe06d,
+ "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
+ mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
+ if (!qla_tgt_mgmt_cmd_mempool) {
+ ql_log(ql_log_fatal, NULL, 0xe06e,
+ "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
+ ret = -ENOMEM;
+ goto out_mgmt_cmd_cachep;
+ }
+
+ qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
+ if (!qla_tgt_wq) {
+ ql_log(ql_log_fatal, NULL, 0xe06f,
+ "alloc_workqueue for qla_tgt_wq failed\n");
+ ret = -ENOMEM;
+ goto out_cmd_mempool;
+ }
+ /*
+ * Return 1 to signal that initiator-mode is being disabled
+ */
+ return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
+
+out_cmd_mempool:
+ mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+out_mgmt_cmd_cachep:
+ kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
+out:
+ kmem_cache_destroy(qla_tgt_cmd_cachep);
+ return ret;
+}
+
+void qlt_exit(void)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ destroy_workqueue(qla_tgt_wq);
+ mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+ kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
+ kmem_cache_destroy(qla_tgt_cmd_cachep);
+}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
new file mode 100644
index 000000000000..9ec19bc2f0fe
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -0,0 +1,1005 @@
+/*
+ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
+ * Copyright (C) 2004 - 2005 Leonid Stoljar
+ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
+ *
+ * Forward port and refactoring to modern qla2xxx and target/configfs
+ *
+ * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * Additional file for the target driver support.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * This is the global def file that is useful for including from the
+ * target portion.
+ */
+
+#ifndef __QLA_TARGET_H
+#define __QLA_TARGET_H
+
+#include "qla_def.h"
+
+/*
+ * Must be changed on any change in any initiator visible interfaces or
+ * data in the target add-on
+ */
+#define QLA2XXX_TARGET_MAGIC 269
+
+/*
+ * Must be changed on any change in any target visible interfaces or
+ * data in the initiator
+ */
+#define QLA2XXX_INITIATOR_MAGIC 57222
+
+#define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive"
+#define QLA2XXX_INI_MODE_STR_DISABLED "disabled"
+#define QLA2XXX_INI_MODE_STR_ENABLED "enabled"
+
+#define QLA2XXX_INI_MODE_EXCLUSIVE 0
+#define QLA2XXX_INI_MODE_DISABLED 1
+#define QLA2XXX_INI_MODE_ENABLED 2
+
+#define QLA2XXX_COMMAND_COUNT_INIT 250
+#define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250
+
+/*
+ * Used to mark which completion handles (for RIO Status's) are for CTIO's
+ * vs. regular (non-target) info. This is checked for in
+ * qla2x00_process_response_queue() to see if a handle coming back in a
+ * multi-complete should come to the tgt driver or be handled there by qla2xxx
+ */
+#define CTIO_COMPLETION_HANDLE_MARK BIT_29
+#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
+#error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS"
+#endif
+#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
+
+/* Used to mark CTIO as intermediate */
+#define CTIO_INTERMEDIATE_HANDLE_MARK BIT_30
+
+#ifndef OF_SS_MODE_0
+/*
+ * ISP target entries - Flags bit definitions.
+ */
+#define OF_SS_MODE_0 0
+#define OF_SS_MODE_1 1
+#define OF_SS_MODE_2 2
+#define OF_SS_MODE_3 3
+
+#define OF_EXPL_CONF BIT_5 /* Explicit Confirmation Requested */
+#define OF_DATA_IN BIT_6 /* Data in to initiator */
+ /* (data from target to initiator) */
+#define OF_DATA_OUT BIT_7 /* Data out from initiator */
+ /* (data from initiator to target) */
+#define OF_NO_DATA (BIT_7 | BIT_6)
+#define OF_INC_RC BIT_8 /* Increment command resource count */
+#define OF_FAST_POST BIT_9 /* Enable mailbox fast posting. */
+#define OF_CONF_REQ BIT_13 /* Confirmation Requested */
+#define OF_TERM_EXCH BIT_14 /* Terminate exchange */
+#define OF_SSTS BIT_15 /* Send SCSI status */
+#endif
+
+#ifndef QLA_TGT_DATASEGS_PER_CMD32
+#define QLA_TGT_DATASEGS_PER_CMD32 3
+#define QLA_TGT_DATASEGS_PER_CONT32 7
+#define QLA_TGT_MAX_SG32(ql) \
+ (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD32 + \
+ QLA_TGT_DATASEGS_PER_CONT32*((ql) - 1)) : 0)
+
+#define QLA_TGT_DATASEGS_PER_CMD64 2
+#define QLA_TGT_DATASEGS_PER_CONT64 5
+#define QLA_TGT_MAX_SG64(ql) \
+ (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD64 + \
+ QLA_TGT_DATASEGS_PER_CONT64*((ql) - 1)) : 0)
+#endif
+
+#ifndef QLA_TGT_DATASEGS_PER_CMD_24XX
+#define QLA_TGT_DATASEGS_PER_CMD_24XX 1
+#define QLA_TGT_DATASEGS_PER_CONT_24XX 5
+#define QLA_TGT_MAX_SG_24XX(ql) \
+ (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
+ QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
+#endif
+#endif
+
+#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \
+ ? le16_to_cpu((iocb)->u.isp2x.target.extended) \
+ : (uint16_t)(iocb)->u.isp2x.target.id.standard)
+
+#ifndef IMMED_NOTIFY_TYPE
+#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */
+/*
+ * ISP queue - immediate notify entry structure definition.
+ * This is sent by the ISP to the Target driver.
+ * This IOCB would have report of events sent by the
+ * initiator, that needs to be handled by the target
+ * driver immediately.
+ */
+struct imm_ntfy_from_isp {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ union {
+ struct {
+ uint32_t sys_define_2; /* System defined. */
+ target_id_t target;
+ uint16_t lun;
+ uint8_t target_id;
+ uint8_t reserved_1;
+ uint16_t status_modifier;
+ uint16_t status;
+ uint16_t task_flags;
+ uint16_t seq_id;
+ uint16_t srr_rx_id;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+#define SRR_IU_DATA_IN 0x1
+#define SRR_IU_DATA_OUT 0x5
+#define SRR_IU_STATUS 0x7
+ uint16_t srr_ox_id;
+ uint8_t reserved_2[28];
+ } isp2x;
+ struct {
+ uint32_t reserved;
+ uint16_t nport_handle;
+ uint16_t reserved_2;
+ uint16_t flags;
+#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
+#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
+ uint16_t srr_rx_id;
+ uint16_t status;
+ uint8_t status_subcode;
+ uint8_t reserved_3;
+ uint32_t exchange_address;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+ uint16_t srr_ox_id;
+ uint8_t reserved_4[19];
+ uint8_t vp_index;
+ uint32_t reserved_5;
+ uint8_t port_id[3];
+ uint8_t reserved_6;
+ } isp24;
+ } u;
+ uint16_t reserved_7;
+ uint16_t ox_id;
+} __packed;
+#endif
+
+#ifndef NOTIFY_ACK_TYPE
+#define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */
+/*
+ * ISP queue - notify acknowledge entry structure definition.
+ * This is sent to the ISP from the target driver.
+ */
+struct nack_to_isp {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ union {
+ struct {
+ uint32_t sys_define_2; /* System defined. */
+ target_id_t target;
+ uint8_t target_id;
+ uint8_t reserved_1;
+ uint16_t flags;
+ uint16_t resp_code;
+ uint16_t status;
+ uint16_t task_flags;
+ uint16_t seq_id;
+ uint16_t srr_rx_id;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+ uint16_t srr_flags;
+ uint16_t srr_reject_code;
+ uint8_t srr_reject_vendor_uniq;
+ uint8_t srr_reject_code_expl;
+ uint8_t reserved_2[24];
+ } isp2x;
+ struct {
+ uint32_t handle;
+ uint16_t nport_handle;
+ uint16_t reserved_1;
+ uint16_t flags;
+ uint16_t srr_rx_id;
+ uint16_t status;
+ uint8_t status_subcode;
+ uint8_t reserved_3;
+ uint32_t exchange_address;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+ uint16_t srr_flags;
+ uint8_t reserved_4[19];
+ uint8_t vp_index;
+ uint8_t srr_reject_vendor_uniq;
+ uint8_t srr_reject_code_expl;
+ uint8_t srr_reject_code;
+ uint8_t reserved_5[5];
+ } isp24;
+ } u;
+ uint8_t reserved[2];
+ uint16_t ox_id;
+} __packed;
+#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
+
+#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9
+
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a
+
+#define NOTIFY_ACK_SUCCESS 0x01
+#endif
+
+#ifndef ACCEPT_TGT_IO_TYPE
+#define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */
+#endif
+
+#ifndef CONTINUE_TGT_IO_TYPE
+#define CONTINUE_TGT_IO_TYPE 0x17
+/*
+ * ISP queue - Continue Target I/O (CTIO) entry for status mode 0 structure.
+ * This structure is sent to the ISP 2xxx from target driver.
+ */
+struct ctio_to_2xxx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System defined handle */
+ target_id_t target;
+ uint16_t rx_id;
+ uint16_t flags;
+ uint16_t status;
+ uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
+ uint16_t dseg_count; /* Data segment count. */
+ uint32_t relative_offset;
+ uint32_t residual;
+ uint16_t reserved_1[3];
+ uint16_t scsi_status;
+ uint32_t transfer_length;
+ uint32_t dseg_0_address; /* Data segment 0 address. */
+ uint32_t dseg_0_length; /* Data segment 0 length. */
+ uint32_t dseg_1_address; /* Data segment 1 address. */
+ uint32_t dseg_1_length; /* Data segment 1 length. */
+ uint32_t dseg_2_address; /* Data segment 2 address. */
+ uint32_t dseg_2_length; /* Data segment 2 length. */
+} __packed;
+#define ATIO_PATH_INVALID 0x07
+#define ATIO_CANT_PROV_CAP 0x16
+#define ATIO_CDB_VALID 0x3D
+
+#define ATIO_EXEC_READ BIT_1
+#define ATIO_EXEC_WRITE BIT_0
+#endif
+
+#ifndef CTIO_A64_TYPE
+#define CTIO_A64_TYPE 0x1F
+#define CTIO_SUCCESS 0x01
+#define CTIO_ABORTED 0x02
+#define CTIO_INVALID_RX_ID 0x08
+#define CTIO_TIMEOUT 0x0B
+#define CTIO_LIP_RESET 0x0E
+#define CTIO_TARGET_RESET 0x17
+#define CTIO_PORT_UNAVAILABLE 0x28
+#define CTIO_PORT_LOGGED_OUT 0x29
+#define CTIO_PORT_CONF_CHANGED 0x2A
+#define CTIO_SRR_RECEIVED 0x45
+#endif
+
+#ifndef CTIO_RET_TYPE
+#define CTIO_RET_TYPE 0x17 /* CTIO return entry */
+#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
+
+struct fcp_hdr {
+ uint8_t r_ctl;
+ uint8_t d_id[3];
+ uint8_t cs_ctl;
+ uint8_t s_id[3];
+ uint8_t type;
+ uint8_t f_ctl[3];
+ uint8_t seq_id;
+ uint8_t df_ctl;
+ uint16_t seq_cnt;
+ uint16_t ox_id;
+ uint16_t rx_id;
+ uint32_t parameter;
+} __packed;
+
+struct fcp_hdr_le {
+ uint8_t d_id[3];
+ uint8_t r_ctl;
+ uint8_t s_id[3];
+ uint8_t cs_ctl;
+ uint8_t f_ctl[3];
+ uint8_t type;
+ uint16_t seq_cnt;
+ uint8_t df_ctl;
+ uint8_t seq_id;
+ uint16_t rx_id;
+ uint16_t ox_id;
+ uint32_t parameter;
+} __packed;
+
+#define F_CTL_EXCH_CONTEXT_RESP BIT_23
+#define F_CTL_SEQ_CONTEXT_RESIP BIT_22
+#define F_CTL_LAST_SEQ BIT_20
+#define F_CTL_END_SEQ BIT_19
+#define F_CTL_SEQ_INITIATIVE BIT_16
+
+#define R_CTL_BASIC_LINK_SERV 0x80
+#define R_CTL_B_ACC 0x4
+#define R_CTL_B_RJT 0x5
+
+struct atio7_fcp_cmnd {
+ uint64_t lun;
+ uint8_t cmnd_ref;
+ uint8_t task_attr:3;
+ uint8_t reserved:5;
+ uint8_t task_mgmt_flags;
+#define FCP_CMND_TASK_MGMT_CLEAR_ACA 6
+#define FCP_CMND_TASK_MGMT_TARGET_RESET 5
+#define FCP_CMND_TASK_MGMT_LU_RESET 4
+#define FCP_CMND_TASK_MGMT_CLEAR_TASK_SET 2
+#define FCP_CMND_TASK_MGMT_ABORT_TASK_SET 1
+ uint8_t wrdata:1;
+ uint8_t rddata:1;
+ uint8_t add_cdb_len:6;
+ uint8_t cdb[16];
+ /*
+ * add_cdb is optional and can absent from struct atio7_fcp_cmnd. Size 4
+ * only to make sizeof(struct atio7_fcp_cmnd) be as expected by
+ * BUILD_BUG_ON in qlt_init().
+ */
+ uint8_t add_cdb[4];
+ /* uint32_t data_length; */
+} __packed;
+
+/*
+ * ISP queue - Accept Target I/O (ATIO) type entry IOCB structure.
+ * This is sent from the ISP to the target driver.
+ */
+struct atio_from_isp {
+ union {
+ struct {
+ uint16_t entry_hdr;
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t sys_define_2; /* System defined. */
+ target_id_t target;
+ uint16_t rx_id;
+ uint16_t flags;
+ uint16_t status;
+ uint8_t command_ref;
+ uint8_t task_codes;
+ uint8_t task_flags;
+ uint8_t execution_codes;
+ uint8_t cdb[MAX_CMDSZ];
+ uint32_t data_length;
+ uint16_t lun;
+ uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */
+ uint16_t reserved_32[6];
+ uint16_t ox_id;
+ } isp2x;
+ struct {
+ uint16_t entry_hdr;
+ uint8_t fcp_cmnd_len_low;
+ uint8_t fcp_cmnd_len_high:4;
+ uint8_t attr:4;
+ uint32_t exchange_addr;
+#define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF
+ struct fcp_hdr fcp_hdr;
+ struct atio7_fcp_cmnd fcp_cmnd;
+ } isp24;
+ struct {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t data[58];
+ uint32_t signature;
+#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
+ } raw;
+ } u;
+} __packed;
+
+#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
+
+/*
+ * ISP queue - Continue Target I/O (ATIO) type 7 entry (for 24xx) structure.
+ * This structure is sent to the ISP 24xx from the target driver.
+ */
+
+struct ctio7_to_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System defined handle */
+ uint16_t nport_handle;
+#define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF
+ uint16_t timeout;
+ uint16_t dseg_count; /* Data segment count. */
+ uint8_t vp_index;
+ uint8_t add_flags;
+ uint8_t initiator_id[3];
+ uint8_t reserved;
+ uint32_t exchange_addr;
+ union {
+ struct {
+ uint16_t reserved1;
+ uint16_t flags;
+ uint32_t residual;
+ uint16_t ox_id;
+ uint16_t scsi_status;
+ uint32_t relative_offset;
+ uint32_t reserved2;
+ uint32_t transfer_length;
+ uint32_t reserved3;
+ /* Data segment 0 address. */
+ uint32_t dseg_0_address[2];
+ /* Data segment 0 length. */
+ uint32_t dseg_0_length;
+ } status0;
+ struct {
+ uint16_t sense_length;
+ uint16_t flags;
+ uint32_t residual;
+ uint16_t ox_id;
+ uint16_t scsi_status;
+ uint16_t response_len;
+ uint16_t reserved;
+ uint8_t sense_data[24];
+ } status1;
+ } u;
+} __packed;
+
+/*
+ * ISP queue - CTIO type 7 from ISP 24xx to target driver
+ * returned entry structure.
+ */
+struct ctio7_from_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System defined handle */
+ uint16_t status;
+ uint16_t timeout;
+ uint16_t dseg_count; /* Data segment count. */
+ uint8_t vp_index;
+ uint8_t reserved1[5];
+ uint32_t exchange_address;
+ uint16_t reserved2;
+ uint16_t flags;
+ uint32_t residual;
+ uint16_t ox_id;
+ uint16_t reserved3;
+ uint32_t relative_offset;
+ uint8_t reserved4[24];
+} __packed;
+
+/* CTIO7 flags values */
+#define CTIO7_FLAGS_SEND_STATUS BIT_15
+#define CTIO7_FLAGS_TERMINATE BIT_14
+#define CTIO7_FLAGS_CONFORM_REQ BIT_13
+#define CTIO7_FLAGS_DONT_RET_CTIO BIT_8
+#define CTIO7_FLAGS_STATUS_MODE_0 0
+#define CTIO7_FLAGS_STATUS_MODE_1 BIT_6
+#define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5
+#define CTIO7_FLAGS_CONFIRM_SATISF BIT_4
+#define CTIO7_FLAGS_DSD_PTR BIT_2
+#define CTIO7_FLAGS_DATA_IN BIT_1
+#define CTIO7_FLAGS_DATA_OUT BIT_0
+
+#define ELS_PLOGI 0x3
+#define ELS_FLOGI 0x4
+#define ELS_LOGO 0x5
+#define ELS_PRLI 0x20
+#define ELS_PRLO 0x21
+#define ELS_TPRLO 0x24
+#define ELS_PDISC 0x50
+#define ELS_ADISC 0x52
+
+/*
+ * ISP queue - ABTS received/response entries structure definition for 24xx.
+ */
+#define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */
+#define ABTS_RESP_24XX 0x55 /* ABTS responce (for 24xx) */
+
+/*
+ * ISP queue - ABTS received IOCB entry structure definition for 24xx.
+ * The ABTS BLS received from the wire is sent to the
+ * target driver by the ISP 24xx.
+ * The IOCB is placed on the response queue.
+ */
+struct abts_recv_from_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint8_t reserved_1[6];
+ uint16_t nport_handle;
+ uint8_t reserved_2[2];
+ uint8_t vp_index;
+ uint8_t reserved_3:4;
+ uint8_t sof_type:4;
+ uint32_t exchange_address;
+ struct fcp_hdr_le fcp_hdr_le;
+ uint8_t reserved_4[16];
+ uint32_t exchange_addr_to_abort;
+} __packed;
+
+#define ABTS_PARAM_ABORT_SEQ BIT_0
+
+struct ba_acc_le {
+ uint16_t reserved;
+ uint8_t seq_id_last;
+ uint8_t seq_id_valid;
+#define SEQ_ID_VALID 0x80
+#define SEQ_ID_INVALID 0x00
+ uint16_t rx_id;
+ uint16_t ox_id;
+ uint16_t high_seq_cnt;
+ uint16_t low_seq_cnt;
+} __packed;
+
+struct ba_rjt_le {
+ uint8_t vendor_uniq;
+ uint8_t reason_expl;
+ uint8_t reason_code;
+#define BA_RJT_REASON_CODE_INVALID_COMMAND 0x1
+#define BA_RJT_REASON_CODE_UNABLE_TO_PERFORM 0x9
+ uint8_t reserved;
+} __packed;
+
+/*
+ * ISP queue - ABTS Response IOCB entry structure definition for 24xx.
+ * The ABTS response to the ABTS received is sent by the
+ * target driver to the ISP 24xx.
+ * The IOCB is placed on the request queue.
+ */
+struct abts_resp_to_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle;
+ uint16_t reserved_1;
+ uint16_t nport_handle;
+ uint16_t control_flags;
+#define ABTS_CONTR_FLG_TERM_EXCHG BIT_0
+ uint8_t vp_index;
+ uint8_t reserved_3:4;
+ uint8_t sof_type:4;
+ uint32_t exchange_address;
+ struct fcp_hdr_le fcp_hdr_le;
+ union {
+ struct ba_acc_le ba_acct;
+ struct ba_rjt_le ba_rjt;
+ } __packed payload;
+ uint32_t reserved_4;
+ uint32_t exchange_addr_to_abort;
+} __packed;
+
+/*
+ * ISP queue - ABTS Response IOCB from ISP24xx Firmware entry structure.
+ * The ABTS response with completion status to the ABTS response
+ * (sent by the target driver to the ISP 24xx) is sent by the
+ * ISP24xx firmware to the target driver.
+ * The IOCB is placed on the response queue.
+ */
+struct abts_resp_from_24xx_fw {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle;
+ uint16_t compl_status;
+#define ABTS_RESP_COMPL_SUCCESS 0
+#define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31
+ uint16_t nport_handle;
+ uint16_t reserved_1;
+ uint8_t reserved_2;
+ uint8_t reserved_3:4;
+ uint8_t sof_type:4;
+ uint32_t exchange_address;
+ struct fcp_hdr_le fcp_hdr_le;
+ uint8_t reserved_4[8];
+ uint32_t error_subcode1;
+#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E
+ uint32_t error_subcode2;
+ uint32_t exchange_addr_to_abort;
+} __packed;
+
+/********************************************************************\
+ * Type Definitions used by initiator & target halves
+\********************************************************************/
+
+struct qla_tgt_mgmt_cmd;
+struct qla_tgt_sess;
+
+/*
+ * This structure provides a template of function calls that the
+ * target driver (from within qla_target.c) can issue to the
+ * target module (tcm_qla2xxx).
+ */
+struct qla_tgt_func_tmpl {
+
+ int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
+ unsigned char *, uint32_t, int, int, int);
+ int (*handle_data)(struct qla_tgt_cmd *);
+ int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
+ uint32_t);
+ void (*free_cmd)(struct qla_tgt_cmd *);
+ void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
+ void (*free_session)(struct qla_tgt_sess *);
+
+ int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
+ void *, uint8_t *, uint16_t);
+ struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
+ const uint16_t);
+ struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
+ const uint8_t *);
+ void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *);
+ void (*put_sess)(struct qla_tgt_sess *);
+ void (*shutdown_sess)(struct qla_tgt_sess *);
+};
+
+int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
+
+#include <target/target_core_base.h>
+
+#define QLA_TGT_TIMEOUT 10 /* in seconds */
+
+#define QLA_TGT_MAX_HW_PENDING_TIME 60 /* in seconds */
+
+/* Immediate notify status constants */
+#define IMM_NTFY_LIP_RESET 0x000E
+#define IMM_NTFY_LIP_LINK_REINIT 0x000F
+#define IMM_NTFY_IOCB_OVERFLOW 0x0016
+#define IMM_NTFY_ABORT_TASK 0x0020
+#define IMM_NTFY_PORT_LOGOUT 0x0029
+#define IMM_NTFY_PORT_CONFIG 0x002A
+#define IMM_NTFY_GLBL_TPRLO 0x002D
+#define IMM_NTFY_GLBL_LOGO 0x002E
+#define IMM_NTFY_RESOURCE 0x0034
+#define IMM_NTFY_MSG_RX 0x0036
+#define IMM_NTFY_SRR 0x0045
+#define IMM_NTFY_ELS 0x0046
+
+/* Immediate notify task flags */
+#define IMM_NTFY_TASK_MGMT_SHIFT 8
+
+#define QLA_TGT_CLEAR_ACA 0x40
+#define QLA_TGT_TARGET_RESET 0x20
+#define QLA_TGT_LUN_RESET 0x10
+#define QLA_TGT_CLEAR_TS 0x04
+#define QLA_TGT_ABORT_TS 0x02
+#define QLA_TGT_ABORT_ALL_SESS 0xFFFF
+#define QLA_TGT_ABORT_ALL 0xFFFE
+#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD
+#define QLA_TGT_NEXUS_LOSS 0xFFFC
+
+/* Notify Acknowledge flags */
+#define NOTIFY_ACK_RES_COUNT BIT_8
+#define NOTIFY_ACK_CLEAR_LIP_RESET BIT_5
+#define NOTIFY_ACK_TM_RESP_CODE_VALID BIT_4
+
+/* Command's states */
+#define QLA_TGT_STATE_NEW 0 /* New command + target processing */
+#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */
+#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */
+#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */
+#define QLA_TGT_STATE_ABORTED 4 /* Command aborted */
+
+/* Special handles */
+#define QLA_TGT_NULL_HANDLE 0
+#define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK)
+
+/* ATIO task_codes field */
+#define ATIO_SIMPLE_QUEUE 0
+#define ATIO_HEAD_OF_QUEUE 1
+#define ATIO_ORDERED_QUEUE 2
+#define ATIO_ACA_QUEUE 4
+#define ATIO_UNTAGGED 5
+
+/* TM failed response codes, see FCP (9.4.11 FCP_RSP_INFO) */
+#define FC_TM_SUCCESS 0
+#define FC_TM_BAD_FCP_DATA 1
+#define FC_TM_BAD_CMD 2
+#define FC_TM_FCP_DATA_MISMATCH 3
+#define FC_TM_REJECT 4
+#define FC_TM_FAILED 5
+
+/*
+ * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
+ * terminated, so no more actions is needed and success should be returned
+ * to target.
+ */
+#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717
+
+#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
+#define pci_dma_lo32(a) (a & 0xffffffff)
+#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
+#else
+#define pci_dma_lo32(a) (a & 0xffffffff)
+#define pci_dma_hi32(a) 0
+#endif
+
+#define QLA_TGT_SENSE_VALID(sense) ((sense != NULL) && \
+ (((const uint8_t *)(sense))[0] & 0x70) == 0x70)
+
+struct qla_port_24xx_data {
+ uint8_t port_name[WWN_SIZE];
+ uint16_t loop_id;
+ uint16_t reserved;
+};
+
+struct qla_tgt {
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha;
+
+ /*
+ * To sync between IRQ handlers and qlt_target_release(). Needed,
+ * because req_pkt() can drop/reaquire HW lock inside. Protected by
+ * HW lock.
+ */
+ int irq_cmd_count;
+
+ int datasegs_per_cmd, datasegs_per_cont, sg_tablesize;
+
+ /* Target's flags, serialized by pha->hardware_lock */
+ unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addr enabled */
+ unsigned int link_reinit_iocb_pending:1;
+
+ /*
+ * Protected by tgt_mutex AND hardware_lock for writing and tgt_mutex
+ * OR hardware_lock for reading.
+ */
+ int tgt_stop; /* the target mode driver is being stopped */
+ int tgt_stopped; /* the target mode driver has been stopped */
+
+ /* Count of sessions refering qla_tgt. Protected by hardware_lock. */
+ int sess_count;
+
+ /* Protected by hardware_lock. Addition also protected by tgt_mutex. */
+ struct list_head sess_list;
+
+ /* Protected by hardware_lock */
+ struct list_head del_sess_list;
+ struct delayed_work sess_del_work;
+
+ spinlock_t sess_work_lock;
+ struct list_head sess_works_list;
+ struct work_struct sess_work;
+
+ struct imm_ntfy_from_isp link_reinit_iocb;
+ wait_queue_head_t waitQ;
+ int notify_ack_expected;
+ int abts_resp_expected;
+ int modify_lun_expected;
+
+ int ctio_srr_id;
+ int imm_srr_id;
+ spinlock_t srr_lock;
+ struct list_head srr_ctio_list;
+ struct list_head srr_imm_list;
+ struct work_struct srr_work;
+
+ atomic_t tgt_global_resets_count;
+
+ struct list_head tgt_list_entry;
+};
+
+/*
+ * Equivilant to IT Nexus (Initiator-Target)
+ */
+struct qla_tgt_sess {
+ uint16_t loop_id;
+ port_id_t s_id;
+
+ unsigned int conf_compl_supported:1;
+ unsigned int deleted:1;
+ unsigned int local:1;
+ unsigned int tearing_down:1;
+
+ struct se_session *se_sess;
+ struct scsi_qla_host *vha;
+ struct qla_tgt *tgt;
+
+ struct list_head sess_list_entry;
+ unsigned long expires;
+ struct list_head del_list_entry;
+
+ uint8_t port_name[WWN_SIZE];
+ struct work_struct free_work;
+};
+
+struct qla_tgt_cmd {
+ struct qla_tgt_sess *sess;
+ int state;
+ struct se_cmd se_cmd;
+ struct work_struct free_work;
+ struct work_struct work;
+ /* Sense buffer that will be mapped into outgoing status */
+ unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+
+ /* to save extra sess dereferences */
+ unsigned int conf_compl_supported:1;
+ unsigned int sg_mapped:1;
+ unsigned int free_sg:1;
+ unsigned int aborted:1; /* Needed in case of SRR */
+ unsigned int write_data_transferred:1;
+
+ struct scatterlist *sg; /* cmd data buffer SG vector */
+ int sg_cnt; /* SG segments count */
+ int bufflen; /* cmd buffer length */
+ int offset;
+ uint32_t tag;
+ uint32_t unpacked_lun;
+ enum dma_data_direction dma_data_direction;
+
+ uint16_t loop_id; /* to save extra sess dereferences */
+ struct qla_tgt *tgt; /* to save extra sess dereferences */
+ struct scsi_qla_host *vha;
+ struct list_head cmd_list;
+
+ struct atio_from_isp atio;
+};
+
+struct qla_tgt_sess_work_param {
+ struct list_head sess_works_list_entry;
+
+#define QLA_TGT_SESS_WORK_ABORT 1
+#define QLA_TGT_SESS_WORK_TM 2
+ int type;
+
+ union {
+ struct abts_recv_from_24xx abts;
+ struct imm_ntfy_from_isp tm_iocb;
+ struct atio_from_isp tm_iocb2;
+ };
+};
+
+struct qla_tgt_mgmt_cmd {
+ uint8_t tmr_func;
+ uint8_t fc_tm_rsp;
+ struct qla_tgt_sess *sess;
+ struct se_cmd se_cmd;
+ struct work_struct free_work;
+ unsigned int flags;
+#define QLA24XX_MGMT_SEND_NACK 1
+ union {
+ struct atio_from_isp atio;
+ struct imm_ntfy_from_isp imm_ntfy;
+ struct abts_recv_from_24xx abts;
+ } __packed orig_iocb;
+};
+
+struct qla_tgt_prm {
+ struct qla_tgt_cmd *cmd;
+ struct qla_tgt *tgt;
+ void *pkt;
+ struct scatterlist *sg; /* cmd data buffer SG vector */
+ int seg_cnt;
+ int req_cnt;
+ uint16_t rq_result;
+ uint16_t scsi_status;
+ unsigned char *sense_buffer;
+ int sense_buffer_len;
+ int residual;
+ int add_status_pkt;
+};
+
+struct qla_tgt_srr_imm {
+ struct list_head srr_list_entry;
+ int srr_id;
+ struct imm_ntfy_from_isp imm_ntfy;
+};
+
+struct qla_tgt_srr_ctio {
+ struct list_head srr_list_entry;
+ int srr_id;
+ struct qla_tgt_cmd *cmd;
+};
+
+#define QLA_TGT_XMIT_DATA 1
+#define QLA_TGT_XMIT_STATUS 2
+#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
+
+#include <linux/version.h>
+
+extern struct qla_tgt_data qla_target;
+/*
+ * Internal function prototypes
+ */
+void qlt_disable_vha(struct scsi_qla_host *);
+
+/*
+ * Function prototypes for qla_target.c logic used by qla2xxx LLD code.
+ */
+extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);
+extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
+extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64,
+ int (*callback)(struct scsi_qla_host *), void *);
+extern void qlt_lport_deregister(struct scsi_qla_host *);
+extern void qlt_unreg_sess(struct qla_tgt_sess *);
+extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_set_mode(struct scsi_qla_host *ha);
+extern void qlt_clear_mode(struct scsi_qla_host *ha);
+extern int __init qlt_init(void);
+extern void qlt_exit(void);
+extern void qlt_update_vp_map(struct scsi_qla_host *, int);
+
+/*
+ * This macro is used during early initializations when host->active_mode
+ * is not set. Right now, ha value is ignored.
+ */
+#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
+
+static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
+{
+ return ha->host->active_mode & MODE_TARGET;
+}
+
+static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha)
+{
+ return ha->host->active_mode & MODE_INITIATOR;
+}
+
+static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
+{
+ if (ha->host->active_mode & MODE_INITIATOR)
+ ha->host->active_mode &= ~MODE_INITIATOR;
+ else
+ ha->host->active_mode |= MODE_INITIATOR;
+}
+
+/*
+ * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
+ */
+extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
+ struct atio_from_isp *);
+extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
+extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
+extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
+extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
+extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
+extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
+extern void qlt_ctio_completion(struct scsi_qla_host *, uint32_t);
+extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
+extern void qlt_enable_vha(struct scsi_qla_host *);
+extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
+extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
+extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
+extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
+extern void qlt_24xx_config_rings(struct scsi_qla_host *,
+ device_reg_t __iomem *);
+extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
+ struct nvram_24xx *);
+extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
+ struct init_cb_24xx *);
+extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
+ struct sts_entry_24xx *);
+extern void qlt_modify_vp_config(struct scsi_qla_host *,
+ struct vp_config_entry_24xx *);
+extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
+extern int qlt_mem_alloc(struct qla_hw_data *);
+extern void qlt_mem_free(struct qla_hw_data *);
+extern void qlt_stop_phase1(struct qla_tgt *);
+extern void qlt_stop_phase2(struct qla_tgt *);
+
+#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
new file mode 100644
index 000000000000..436598f57404
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -0,0 +1,1955 @@
+/*******************************************************************************
+ * This file contains tcm implementation using v4 configfs fabric infrastructure
+ * for QLogic target mode HBAs
+ *
+ * ?? Copyright 2010-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL)
+ * version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ *
+ * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
+ * the TCM_FC / Open-FCoE.org fabric module.
+ *
+ * Copyright (c) 2010 Cisco Systems, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+#include "tcm_qla2xxx.h"
+
+struct workqueue_struct *tcm_qla2xxx_free_wq;
+struct workqueue_struct *tcm_qla2xxx_cmd_wq;
+
+static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+/*
+ * Parse WWN.
+ * If strict, we require lower-case hex and colon separators to be sure
+ * the name is the same as what would be generated by ft_format_wwn()
+ * so the name and wwn are mapped one-to-one.
+ */
+static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
+{
+ const char *cp;
+ char c;
+ u32 nibble;
+ u32 byte = 0;
+ u32 pos = 0;
+ u32 err;
+
+ *wwn = 0;
+ for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
+ c = *cp;
+ if (c == '\n' && cp[1] == '\0')
+ continue;
+ if (strict && pos++ == 2 && byte++ < 7) {
+ pos = 0;
+ if (c == ':')
+ continue;
+ err = 1;
+ goto fail;
+ }
+ if (c == '\0') {
+ err = 2;
+ if (strict && byte != 8)
+ goto fail;
+ return cp - name;
+ }
+ err = 3;
+ if (isdigit(c))
+ nibble = c - '0';
+ else if (isxdigit(c) && (islower(c) || !strict))
+ nibble = tolower(c) - 'a' + 10;
+ else
+ goto fail;
+ *wwn = (*wwn << 4) | nibble;
+ }
+ err = 4;
+fail:
+ pr_debug("err %u len %zu pos %u byte %u\n",
+ err, cp - name, pos, byte);
+ return -1;
+}
+
+static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
+{
+ u8 b[8];
+
+ put_unaligned_be64(wwn, b);
+ return snprintf(buf, len,
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+ b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
+}
+
+static char *tcm_qla2xxx_get_fabric_name(void)
+{
+ return "qla2xxx";
+}
+
+/*
+ * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
+ */
+static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
+{
+ unsigned int i, j, value;
+ u8 wwn[8];
+
+ memset(wwn, 0, sizeof(wwn));
+
+ /* Validate and store the new name */
+ for (i = 0, j = 0; i < 16; i++) {
+ value = hex_to_bin(*ns++);
+ if (value >= 0)
+ j = (j << 4) | value;
+ else
+ return -EINVAL;
+
+ if (i % 2) {
+ wwn[i/2] = j & 0xff;
+ j = 0;
+ }
+ }
+
+ *nm = wwn_to_u64(wwn);
+ return 0;
+}
+
+/*
+ * This parsing logic follows drivers/scsi/scsi_transport_fc.c:
+ * store_fc_host_vport_create()
+ */
+static int tcm_qla2xxx_npiv_parse_wwn(
+ const char *name,
+ size_t count,
+ u64 *wwpn,
+ u64 *wwnn)
+{
+ unsigned int cnt = count;
+ int rc;
+
+ *wwpn = 0;
+ *wwnn = 0;
+
+ /* count may include a LF at end of string */
+ if (name[cnt-1] == '\n')
+ cnt--;
+
+ /* validate we have enough characters for WWPN */
+ if ((cnt != (16+1+16)) || (name[16] != ':'))
+ return -EINVAL;
+
+ rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
+ if (rc != 0)
+ return rc;
+
+ rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
+ if (rc != 0)
+ return rc;
+
+ return 0;
+}
+
+static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
+ u64 wwpn, u64 wwnn)
+{
+ u8 b[8], b2[8];
+
+ put_unaligned_be64(wwpn, b);
+ put_unaligned_be64(wwnn, b2);
+ return snprintf(buf, len,
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+ b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
+ b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
+}
+
+static char *tcm_qla2xxx_npiv_get_fabric_name(void)
+{
+ return "qla2xxx_npiv";
+}
+
+static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ u8 proto_id;
+
+ switch (lport->lport_proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ default:
+ proto_id = fc_get_fabric_proto_ident(se_tpg);
+ break;
+ }
+
+ return proto_id;
+}
+
+static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+
+ return &lport->lport_name[0];
+}
+
+static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+
+ return &lport->lport_npiv_name[0];
+}
+
+static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ return tpg->lport_tpgt;
+}
+
+static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static u32 tcm_qla2xxx_get_pr_transport_id(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ int ret = 0;
+
+ switch (lport->lport_proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ default:
+ ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ break;
+ }
+
+ return ret;
+}
+
+static u32 tcm_qla2xxx_get_pr_transport_id_len(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ int ret = 0;
+
+ switch (lport->lport_proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ default:
+ ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ break;
+ }
+
+ return ret;
+}
+
+static char *tcm_qla2xxx_parse_pr_out_transport_id(
+ struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ char *tid = NULL;
+
+ switch (lport->lport_proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ default:
+ tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ break;
+ }
+
+ return tid;
+}
+
+static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return QLA_TPG_ATTRIB(tpg)->generate_node_acls;
+}
+
+static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+}
+
+static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+}
+
+static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+}
+
+static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
+ struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_nacl *nacl;
+
+ nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
+ if (!nacl) {
+ pr_err("Unable to alocate struct tcm_qla2xxx_nacl\n");
+ return NULL;
+ }
+
+ return &nacl->se_node_acl;
+}
+
+static void tcm_qla2xxx_release_fabric_acl(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl)
+{
+ struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+ struct tcm_qla2xxx_nacl, se_node_acl);
+ kfree(nacl);
+}
+
+static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return tpg->lport_tpgt;
+}
+
+static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
+{
+ struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
+ struct qla_tgt_mgmt_cmd, free_work);
+
+ transport_generic_free_cmd(&mcmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_mcmd(), and will call
+ * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
+ * release callback. qla_hw_data->hardware_lock is expected to be held
+ */
+static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+{
+ INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
+ queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
+}
+
+static void tcm_qla2xxx_complete_free(struct work_struct *work)
+{
+ struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_cmd(), and will call
+ * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
+ * release callback. qla_hw_data->hardware_lock is expected to be held
+ */
+static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
+{
+ INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
+ queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+}
+
+/*
+ * Called from struct target_core_fabric_ops->check_stop_free() context
+ */
+static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
+{
+ return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+}
+
+/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
+ * fabric descriptor @se_cmd command to release
+ */
+static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd;
+
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
+ struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+ struct qla_tgt_mgmt_cmd, se_cmd);
+ qlt_free_mcmd(mcmd);
+ return;
+ }
+
+ cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+ qlt_free_cmd(cmd);
+}
+
+static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
+{
+ struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+ struct scsi_qla_host *vha;
+ unsigned long flags;
+
+ BUG_ON(!sess);
+ vha = sess->vha;
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ sess->tearing_down = 1;
+ target_splice_sess_cmd_list(se_sess);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+ return 1;
+}
+
+static void tcm_qla2xxx_close_session(struct se_session *se_sess)
+{
+ struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+ struct scsi_qla_host *vha;
+ unsigned long flags;
+
+ BUG_ON(!sess);
+ vha = sess->vha;
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ qlt_unreg_sess(sess);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
+static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+/*
+ * The LIO target core uses DMA_TO_DEVICE to mean that data is going
+ * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
+ * that data is coming from the target (eg handling a READ). However,
+ * this is just the opposite of what we have to tell the DMA mapping
+ * layer -- eg when handling a READ, the HBA will have to DMA the data
+ * out of memory so it can send it to the initiator, which means we
+ * need to use DMA_TO_DEVICE when we map the data.
+ */
+static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
+{
+ if (se_cmd->se_cmd_flags & SCF_BIDI)
+ return DMA_BIDIRECTIONAL;
+
+ switch (se_cmd->data_direction) {
+ case DMA_TO_DEVICE:
+ return DMA_FROM_DEVICE;
+ case DMA_FROM_DEVICE:
+ return DMA_TO_DEVICE;
+ case DMA_NONE:
+ default:
+ return DMA_NONE;
+ }
+}
+
+static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+
+ cmd->bufflen = se_cmd->data_length;
+ cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+
+ cmd->sg_cnt = se_cmd->t_data_nents;
+ cmd->sg = se_cmd->t_data_sg;
+
+ /*
+ * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
+ * the SGL mappings into PCIe memory for incoming FCP WRITE data.
+ */
+ return qlt_rdy_to_xfer(cmd);
+}
+
+static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
+{
+ unsigned long flags;
+ /*
+ * Check for WRITE_PENDING status to determine if we need to wait for
+ * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
+ */
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+ if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
+ se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+ wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
+ 3000);
+ return 0;
+ }
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+ return 0;
+}
+
+static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
+{
+ return;
+}
+
+static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+
+ return cmd->tag;
+}
+
+static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+/*
+ * Called from process context in qla_target.c:qlt_do_work() code
+ */
+static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+ unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
+ int data_dir, int bidi)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct se_session *se_sess;
+ struct qla_tgt_sess *sess;
+ int flags = TARGET_SCF_ACK_KREF;
+
+ if (bidi)
+ flags |= TARGET_SCF_BIDI_OP;
+
+ sess = cmd->sess;
+ if (!sess) {
+ pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
+ return -EINVAL;
+ }
+
+ se_sess = sess->se_sess;
+ if (!se_sess) {
+ pr_err("Unable to locate active struct se_session\n");
+ return -EINVAL;
+ }
+
+ target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
+ cmd->unpacked_lun, data_length, fcp_task_attr,
+ data_dir, flags);
+ return 0;
+}
+
+static void tcm_qla2xxx_do_rsp(struct work_struct *work)
+{
+ struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ /*
+ * Dispatch ->queue_status from workqueue process context
+ */
+ transport_generic_request_failure(&cmd->se_cmd);
+}
+
+/*
+ * Called from qla_target.c:qlt_do_ctio_completion()
+ */
+static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ unsigned long flags;
+ /*
+ * Ensure that the complete FCP WRITE payload has been received.
+ * Otherwise return an exception via CHECK_CONDITION status.
+ */
+ if (!cmd->write_data_transferred) {
+ /*
+ * Check if se_cmd has already been aborted via LUN_RESET, and
+ * waiting upon completion in tcm_qla2xxx_write_pending_status()
+ */
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+ if (se_cmd->transport_state & CMD_T_ABORTED) {
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+ complete(&se_cmd->t_transport_stop_comp);
+ return 0;
+ }
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+ se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
+ INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp);
+ queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+ return 0;
+ }
+ /*
+ * We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE
+ * status to the backstore processing thread.
+ */
+ return transport_generic_handle_data(&cmd->se_cmd);
+}
+
+/*
+ * Called from qla_target.c:qlt_issue_task_mgmt()
+ */
+int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
+ uint8_t tmr_func, uint32_t tag)
+{
+ struct qla_tgt_sess *sess = mcmd->sess;
+ struct se_cmd *se_cmd = &mcmd->se_cmd;
+
+ return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
+ tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
+}
+
+static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+
+ cmd->bufflen = se_cmd->data_length;
+ cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+ cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+
+ cmd->sg_cnt = se_cmd->t_data_nents;
+ cmd->sg = se_cmd->t_data_sg;
+ cmd->offset = 0;
+
+ /*
+ * Now queue completed DATA_IN the qla2xxx LLD and response ring
+ */
+ return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
+ se_cmd->scsi_status);
+}
+
+static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+ int xmit_type = QLA_TGT_XMIT_STATUS;
+
+ cmd->bufflen = se_cmd->data_length;
+ cmd->sg = NULL;
+ cmd->sg_cnt = 0;
+ cmd->offset = 0;
+ cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+ cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+
+ if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+ /*
+ * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
+ * for qla_tgt_xmit_response LLD code
+ */
+ se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ se_cmd->residual_count = se_cmd->data_length;
+
+ cmd->bufflen = 0;
+ }
+ /*
+ * Now queue status response to qla2xxx LLD code and response ring
+ */
+ return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+}
+
+static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+ struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+ struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+ struct qla_tgt_mgmt_cmd, se_cmd);
+
+ pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
+ mcmd, se_tmr->function, se_tmr->response);
+ /*
+ * Do translation between TCM TM response codes and
+ * QLA2xxx FC TM response codes.
+ */
+ switch (se_tmr->response) {
+ case TMR_FUNCTION_COMPLETE:
+ mcmd->fc_tm_rsp = FC_TM_SUCCESS;
+ break;
+ case TMR_TASK_DOES_NOT_EXIST:
+ mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
+ break;
+ case TMR_FUNCTION_REJECTED:
+ mcmd->fc_tm_rsp = FC_TM_REJECT;
+ break;
+ case TMR_LUN_DOES_NOT_EXIST:
+ default:
+ mcmd->fc_tm_rsp = FC_TM_FAILED;
+ break;
+ }
+ /*
+ * Queue the TM response to QLA2xxx LLD to build a
+ * CTIO response packet.
+ */
+ qlt_xmit_tm_rsp(mcmd);
+
+ return 0;
+}
+
+static u16 tcm_qla2xxx_get_fabric_sense_len(void)
+{
+ return 0;
+}
+
+static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd,
+ u32 sense_length)
+{
+ return 0;
+}
+
+/* Local pointer to allocated TCM configfs fabric module */
+struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
+struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
+
+static int tcm_qla2xxx_setup_nacl_from_rport(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct tcm_qla2xxx_lport *lport,
+ struct tcm_qla2xxx_nacl *nacl,
+ u64 rport_wwnn)
+{
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct Scsi_Host *sh = vha->host;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
+ struct fc_rport *rport;
+ unsigned long flags;
+ void *node;
+ int rc;
+
+ /*
+ * Scan the existing rports, and create a session for the
+ * explict NodeACL is an matching rport->node_name already
+ * exists.
+ */
+ spin_lock_irqsave(sh->host_lock, flags);
+ list_for_each_entry(rport, &fc_host->rports, peers) {
+ if (rport_wwnn != rport->node_name)
+ continue;
+
+ pr_debug("Located existing rport_wwpn and rport->node_name: 0x%016LX, port_id: 0x%04x\n",
+ rport->node_name, rport->port_id);
+ nacl->nport_id = rport->port_id;
+
+ spin_unlock_irqrestore(sh->host_lock, flags);
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ node = btree_lookup32(&lport->lport_fcport_map, rport->port_id);
+ if (node) {
+ rc = btree_update32(&lport->lport_fcport_map,
+ rport->port_id, se_nacl);
+ } else {
+ rc = btree_insert32(&lport->lport_fcport_map,
+ rport->port_id, se_nacl,
+ GFP_ATOMIC);
+ }
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+ if (rc) {
+ pr_err("Unable to insert se_nacl into fcport_map");
+ WARN_ON(rc > 0);
+ return rc;
+ }
+
+ pr_debug("Inserted into fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%08x\n",
+ se_nacl, rport_wwnn, nacl->nport_id);
+
+ return 1;
+ }
+ spin_unlock_irqrestore(sh->host_lock, flags);
+
+ return 0;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
+{
+ struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+ struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+ struct tcm_qla2xxx_nacl, se_node_acl);
+ void *node;
+
+ pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
+
+ node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
+ WARN_ON(node && (node != se_nacl));
+
+ pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
+ se_nacl, nacl->nport_wwnn, nacl->nport_id);
+}
+
+static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
+{
+ target_put_session(sess->se_sess);
+}
+
+static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
+{
+ tcm_qla2xxx_shutdown_session(sess->se_sess);
+}
+
+static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
+ struct se_portal_group *se_tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+ struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct se_node_acl *se_nacl, *se_nacl_new;
+ struct tcm_qla2xxx_nacl *nacl;
+ u64 wwnn;
+ u32 qla2xxx_nexus_depth;
+ int rc;
+
+ if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
+ return ERR_PTR(-EINVAL);
+
+ se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
+ if (!se_nacl_new)
+ return ERR_PTR(-ENOMEM);
+/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
+ qla2xxx_nexus_depth = 1;
+
+ /*
+ * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+ * when converting a NodeACL from demo mode -> explict
+ */
+ se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+ name, qla2xxx_nexus_depth);
+ if (IS_ERR(se_nacl)) {
+ tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
+ return se_nacl;
+ }
+ /*
+ * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
+ */
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+ nacl->nport_wwnn = wwnn;
+ tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
+ /*
+ * Setup a se_nacl handle based on an a matching struct fc_rport setup
+ * via drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
+ */
+ rc = tcm_qla2xxx_setup_nacl_from_rport(se_tpg, se_nacl, lport,
+ nacl, wwnn);
+ if (rc < 0) {
+ tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
+ return ERR_PTR(rc);
+ }
+
+ return se_nacl;
+}
+
+static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
+{
+ struct se_portal_group *se_tpg = se_acl->se_tpg;
+ struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
+ struct tcm_qla2xxx_nacl, se_node_acl);
+
+ core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
+ kfree(nacl);
+}
+
+/* Start items for tcm_qla2xxx_tpg_attrib_cit */
+
+#define DEF_QLA_TPG_ATTRIB(name) \
+ \
+static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
+ struct se_portal_group *se_tpg, \
+ char *page) \
+{ \
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
+ struct tcm_qla2xxx_tpg, se_tpg); \
+ \
+ return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \
+} \
+ \
+static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
+ struct se_portal_group *se_tpg, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
+ struct tcm_qla2xxx_tpg, se_tpg); \
+ unsigned long val; \
+ int ret; \
+ \
+ ret = kstrtoul(page, 0, &val); \
+ if (ret < 0) { \
+ pr_err("kstrtoul() failed with" \
+ " ret: %d\n", ret); \
+ return -EINVAL; \
+ } \
+ ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \
+ \
+ return (!ret) ? count : -EINVAL; \
+}
+
+#define DEF_QLA_TPG_ATTR_BOOL(_name) \
+ \
+static int tcm_qla2xxx_set_attrib_##_name( \
+ struct tcm_qla2xxx_tpg *tpg, \
+ unsigned long val) \
+{ \
+ struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \
+ \
+ if ((val != 0) && (val != 1)) { \
+ pr_err("Illegal boolean value %lu\n", val); \
+ return -EINVAL; \
+ } \
+ \
+ a->_name = val; \
+ return 0; \
+}
+
+#define QLA_TPG_ATTR(_name, _mode) \
+ TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
+ */
+DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
+DEF_QLA_TPG_ATTRIB(generate_node_acls);
+QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
+
+/*
+ Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
+ */
+DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
+DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
+QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
+ */
+DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
+QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
+ */
+DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
+QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
+ &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
+ &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
+ &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
+ &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
+ NULL,
+};
+
+/* End items for tcm_qla2xxx_tpg_attrib_cit */
+
+static ssize_t tcm_qla2xxx_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ atomic_read(&tpg->lport_tpg_enabled));
+}
+
+static ssize_t tcm_qla2xxx_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+ struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ unsigned long op;
+ int rc;
+
+ rc = kstrtoul(page, 0, &op);
+ if (rc < 0) {
+ pr_err("kstrtoul() returned %d\n", rc);
+ return -EINVAL;
+ }
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for tpg_enable: %lu\n", op);
+ return -EINVAL;
+ }
+
+ if (op) {
+ atomic_set(&tpg->lport_tpg_enabled, 1);
+ qlt_enable_vha(vha);
+ } else {
+ if (!ha->tgt.qla_tgt) {
+ pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n");
+ return -ENODEV;
+ }
+ atomic_set(&tpg->lport_tpg_enabled, 0);
+ qlt_stop_phase1(ha->tgt.qla_tgt);
+ }
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
+ &tcm_qla2xxx_tpg_enable.attr,
+ NULL,
+};
+
+static struct se_portal_group *tcm_qla2xxx_make_tpg(
+ struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_qla2xxx_lport *lport = container_of(wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct tcm_qla2xxx_tpg *tpg;
+ unsigned long tpgt;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ if (!lport->qla_npiv_vp && (tpgt != 1)) {
+ pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
+ return ERR_PTR(-ENOSYS);
+ }
+
+ tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+ if (!tpg) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ tpg->lport = lport;
+ tpg->lport_tpgt = tpgt;
+ /*
+ * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
+ * NodeACLs
+ */
+ QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
+ QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
+ QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
+
+ ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
+ &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ /*
+ * Setup local TPG=1 pointer for non NPIV mode.
+ */
+ if (lport->qla_npiv_vp == NULL)
+ lport->tpg_1 = tpg;
+
+ return &tpg->se_tpg;
+}
+
+static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct qla_hw_data *ha = vha->hw;
+ /*
+ * Call into qla2x_target.c LLD logic to shutdown the active
+ * FC Nexuses and disable target mode operation for this qla_hw_data
+ */
+ if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop)
+ qlt_stop_phase1(ha->tgt.qla_tgt);
+
+ core_tpg_deregister(se_tpg);
+ /*
+ * Clear local TPG=1 pointer for non NPIV mode.
+ */
+ if (lport->qla_npiv_vp == NULL)
+ lport->tpg_1 = NULL;
+
+ kfree(tpg);
+}
+
+static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
+ struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_qla2xxx_lport *lport = container_of(wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct tcm_qla2xxx_tpg *tpg;
+ unsigned long tpgt;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+ if (!tpg) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ tpg->lport = lport;
+ tpg->lport_tpgt = tpgt;
+
+ ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
+ &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ return &tpg->se_tpg;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
+ scsi_qla_host_t *vha,
+ const uint8_t *s_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_lport *lport;
+ struct se_node_acl *se_nacl;
+ struct tcm_qla2xxx_nacl *nacl;
+ u32 key;
+
+ lport = ha->tgt.target_lport_ptr;
+ if (!lport) {
+ pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+ dump_stack();
+ return NULL;
+ }
+
+ key = (((unsigned long)s_id[0] << 16) |
+ ((unsigned long)s_id[1] << 8) |
+ (unsigned long)s_id[2]);
+ pr_debug("find_sess_by_s_id: 0x%06x\n", key);
+
+ se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
+ if (!se_nacl) {
+ pr_debug("Unable to locate s_id: 0x%06x\n", key);
+ return NULL;
+ }
+ pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
+ se_nacl, se_nacl->initiatorname);
+
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+ if (!nacl->qla_tgt_sess) {
+ pr_err("Unable to locate struct qla_tgt_sess\n");
+ return NULL;
+ }
+
+ return nacl->qla_tgt_sess;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_s_id(
+ struct tcm_qla2xxx_lport *lport,
+ struct se_node_acl *new_se_nacl,
+ struct tcm_qla2xxx_nacl *nacl,
+ struct se_session *se_sess,
+ struct qla_tgt_sess *qla_tgt_sess,
+ uint8_t *s_id)
+{
+ u32 key;
+ void *slot;
+ int rc;
+
+ key = (((unsigned long)s_id[0] << 16) |
+ ((unsigned long)s_id[1] << 8) |
+ (unsigned long)s_id[2]);
+ pr_debug("set_sess_by_s_id: %06x\n", key);
+
+ slot = btree_lookup32(&lport->lport_fcport_map, key);
+ if (!slot) {
+ if (new_se_nacl) {
+ pr_debug("Setting up new fc_port entry to new_se_nacl\n");
+ nacl->nport_id = key;
+ rc = btree_insert32(&lport->lport_fcport_map, key,
+ new_se_nacl, GFP_ATOMIC);
+ if (rc)
+ printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
+ (int)key);
+ } else {
+ pr_debug("Wiping nonexisting fc_port entry\n");
+ }
+
+ qla_tgt_sess->se_sess = se_sess;
+ nacl->qla_tgt_sess = qla_tgt_sess;
+ return;
+ }
+
+ if (nacl->qla_tgt_sess) {
+ if (new_se_nacl == NULL) {
+ pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
+ btree_remove32(&lport->lport_fcport_map, key);
+ nacl->qla_tgt_sess = NULL;
+ return;
+ }
+ pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
+ btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
+ qla_tgt_sess->se_sess = se_sess;
+ nacl->qla_tgt_sess = qla_tgt_sess;
+ return;
+ }
+
+ if (new_se_nacl == NULL) {
+ pr_debug("Clearing existing fc_port entry\n");
+ btree_remove32(&lport->lport_fcport_map, key);
+ return;
+ }
+
+ pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
+ btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
+ qla_tgt_sess->se_sess = se_sess;
+ nacl->qla_tgt_sess = qla_tgt_sess;
+
+ pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
+ nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
+ scsi_qla_host_t *vha,
+ const uint16_t loop_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_lport *lport;
+ struct se_node_acl *se_nacl;
+ struct tcm_qla2xxx_nacl *nacl;
+ struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+ lport = ha->tgt.target_lport_ptr;
+ if (!lport) {
+ pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+ dump_stack();
+ return NULL;
+ }
+
+ pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+ fc_loopid = lport->lport_loopid_map + loop_id;
+ se_nacl = fc_loopid->se_nacl;
+ if (!se_nacl) {
+ pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
+ loop_id);
+ return NULL;
+ }
+
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+ if (!nacl->qla_tgt_sess) {
+ pr_err("Unable to locate struct qla_tgt_sess\n");
+ return NULL;
+ }
+
+ return nacl->qla_tgt_sess;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_loop_id(
+ struct tcm_qla2xxx_lport *lport,
+ struct se_node_acl *new_se_nacl,
+ struct tcm_qla2xxx_nacl *nacl,
+ struct se_session *se_sess,
+ struct qla_tgt_sess *qla_tgt_sess,
+ uint16_t loop_id)
+{
+ struct se_node_acl *saved_nacl;
+ struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+ pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+ fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
+ lport->lport_loopid_map)[loop_id];
+
+ saved_nacl = fc_loopid->se_nacl;
+ if (!saved_nacl) {
+ pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
+ fc_loopid->se_nacl = new_se_nacl;
+ if (qla_tgt_sess->se_sess != se_sess)
+ qla_tgt_sess->se_sess = se_sess;
+ if (nacl->qla_tgt_sess != qla_tgt_sess)
+ nacl->qla_tgt_sess = qla_tgt_sess;
+ return;
+ }
+
+ if (nacl->qla_tgt_sess) {
+ if (new_se_nacl == NULL) {
+ pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+ fc_loopid->se_nacl = NULL;
+ nacl->qla_tgt_sess = NULL;
+ return;
+ }
+
+ pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+ fc_loopid->se_nacl = new_se_nacl;
+ if (qla_tgt_sess->se_sess != se_sess)
+ qla_tgt_sess->se_sess = se_sess;
+ if (nacl->qla_tgt_sess != qla_tgt_sess)
+ nacl->qla_tgt_sess = qla_tgt_sess;
+ return;
+ }
+
+ if (new_se_nacl == NULL) {
+ pr_debug("Clearing fc_loopid->se_nacl\n");
+ fc_loopid->se_nacl = NULL;
+ return;
+ }
+
+ pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
+ fc_loopid->se_nacl = new_se_nacl;
+ if (qla_tgt_sess->se_sess != se_sess)
+ qla_tgt_sess->se_sess = se_sess;
+ if (nacl->qla_tgt_sess != qla_tgt_sess)
+ nacl->qla_tgt_sess = qla_tgt_sess;
+
+ pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
+ nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+}
+
+static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
+{
+ struct qla_tgt *tgt = sess->tgt;
+ struct qla_hw_data *ha = tgt->ha;
+ struct se_session *se_sess;
+ struct se_node_acl *se_nacl;
+ struct tcm_qla2xxx_lport *lport;
+ struct tcm_qla2xxx_nacl *nacl;
+ unsigned char be_sid[3];
+ unsigned long flags;
+
+ BUG_ON(in_interrupt());
+
+ se_sess = sess->se_sess;
+ if (!se_sess) {
+ pr_err("struct qla_tgt_sess->se_sess is NULL\n");
+ dump_stack();
+ return;
+ }
+ se_nacl = se_sess->se_node_acl;
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+ lport = ha->tgt.target_lport_ptr;
+ if (!lport) {
+ pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+ dump_stack();
+ return;
+ }
+ target_wait_for_sess_cmds(se_sess, 0);
+ /*
+ * And now clear the se_nacl and session pointers from our HW lport
+ * mappings for fabric S_ID and LOOP_ID.
+ */
+ memset(&be_sid, 0, 3);
+ be_sid[0] = sess->s_id.b.domain;
+ be_sid[1] = sess->s_id.b.area;
+ be_sid[2] = sess->s_id.b.al_pa;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
+ sess, be_sid);
+ tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
+ sess, sess->loop_id);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ transport_deregister_session_configfs(sess->se_sess);
+ transport_deregister_session(sess->se_sess);
+}
+
+/*
+ * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
+ * to locate struct se_node_acl
+ */
+static int tcm_qla2xxx_check_initiator_node_acl(
+ scsi_qla_host_t *vha,
+ unsigned char *fc_wwpn,
+ void *qla_tgt_sess,
+ uint8_t *s_id,
+ uint16_t loop_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_lport *lport;
+ struct tcm_qla2xxx_tpg *tpg;
+ struct tcm_qla2xxx_nacl *nacl;
+ struct se_portal_group *se_tpg;
+ struct se_node_acl *se_nacl;
+ struct se_session *se_sess;
+ struct qla_tgt_sess *sess = qla_tgt_sess;
+ unsigned char port_name[36];
+ unsigned long flags;
+
+ lport = ha->tgt.target_lport_ptr;
+ if (!lport) {
+ pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+ dump_stack();
+ return -EINVAL;
+ }
+ /*
+ * Locate the TPG=1 reference..
+ */
+ tpg = lport->tpg_1;
+ if (!tpg) {
+ pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
+ return -EINVAL;
+ }
+ se_tpg = &tpg->se_tpg;
+
+ se_sess = transport_init_session();
+ if (IS_ERR(se_sess)) {
+ pr_err("Unable to initialize struct se_session\n");
+ return PTR_ERR(se_sess);
+ }
+ /*
+ * Format the FCP Initiator port_name into colon seperated values to
+ * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
+ */
+ memset(&port_name, 0, 36);
+ snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
+ fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
+ fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
+ /*
+ * Locate our struct se_node_acl either from an explict NodeACL created
+ * via ConfigFS, or via running in TPG demo mode.
+ */
+ se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
+ port_name);
+ if (!se_sess->se_node_acl) {
+ transport_free_session(se_sess);
+ return -EINVAL;
+ }
+ se_nacl = se_sess->se_node_acl;
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+ /*
+ * And now setup the new se_nacl and session pointers into our HW lport
+ * mappings for fabric S_ID and LOOP_ID.
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
+ qla_tgt_sess, s_id);
+ tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
+ qla_tgt_sess, loop_id);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ /*
+ * Finally register the new FC Nexus with TCM
+ */
+ __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
+
+ return 0;
+}
+
+/*
+ * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
+ */
+static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
+ .handle_cmd = tcm_qla2xxx_handle_cmd,
+ .handle_data = tcm_qla2xxx_handle_data,
+ .handle_tmr = tcm_qla2xxx_handle_tmr,
+ .free_cmd = tcm_qla2xxx_free_cmd,
+ .free_mcmd = tcm_qla2xxx_free_mcmd,
+ .free_session = tcm_qla2xxx_free_session,
+ .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
+ .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
+ .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
+ .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
+ .put_sess = tcm_qla2xxx_put_sess,
+ .shutdown_sess = tcm_qla2xxx_shutdown_sess,
+};
+
+static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
+{
+ int rc;
+
+ rc = btree_init32(&lport->lport_fcport_map);
+ if (rc) {
+ pr_err("Unable to initialize lport->lport_fcport_map btree\n");
+ return rc;
+ }
+
+ lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
+ 65536);
+ if (!lport->lport_loopid_map) {
+ pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
+ sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+ btree_destroy32(&lport->lport_fcport_map);
+ return -ENOMEM;
+ }
+ memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
+ * 65536);
+ pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
+ sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+ return 0;
+}
+
+static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_lport *lport;
+ /*
+ * Setup local pointer to vha, NPIV VP pointer (if present) and
+ * vha->tcm_lport pointer
+ */
+ lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr;
+ lport->qla_vha = vha;
+
+ return 0;
+}
+
+static struct se_wwn *tcm_qla2xxx_make_lport(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_qla2xxx_lport *lport;
+ u64 wwpn;
+ int ret = -ENODEV;
+
+ if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
+ return ERR_PTR(-EINVAL);
+
+ lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+ if (!lport) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ lport->lport_wwpn = wwpn;
+ tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
+ wwpn);
+
+ ret = tcm_qla2xxx_init_lport(lport);
+ if (ret != 0)
+ goto out;
+
+ ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn,
+ tcm_qla2xxx_lport_register_cb, lport);
+ if (ret != 0)
+ goto out_lport;
+
+ return &lport->lport_wwn;
+out_lport:
+ vfree(lport->lport_loopid_map);
+ btree_destroy32(&lport->lport_fcport_map);
+out:
+ kfree(lport);
+ return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
+{
+ struct tcm_qla2xxx_lport *lport = container_of(wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct se_node_acl *node;
+ u32 key = 0;
+
+ /*
+ * Call into qla2x_target.c LLD logic to complete the
+ * shutdown of struct qla_tgt after the call to
+ * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
+ */
+ if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped)
+ qlt_stop_phase2(ha->tgt.qla_tgt);
+
+ qlt_lport_deregister(vha);
+
+ vfree(lport->lport_loopid_map);
+ btree_for_each_safe32(&lport->lport_fcport_map, key, node)
+ btree_remove32(&lport->lport_fcport_map, key);
+ btree_destroy32(&lport->lport_fcport_map);
+ kfree(lport);
+}
+
+static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_qla2xxx_lport *lport;
+ u64 npiv_wwpn, npiv_wwnn;
+ int ret;
+
+ if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
+ &npiv_wwpn, &npiv_wwnn) < 0)
+ return ERR_PTR(-EINVAL);
+
+ lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+ if (!lport) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ lport->lport_npiv_wwpn = npiv_wwpn;
+ lport->lport_npiv_wwnn = npiv_wwnn;
+ tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
+ TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
+
+/* FIXME: tcm_qla2xxx_npiv_make_lport */
+ ret = -ENOSYS;
+ if (ret != 0)
+ goto out;
+
+ return &lport->lport_wwn;
+out:
+ kfree(lport);
+ return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
+{
+ struct tcm_qla2xxx_lport *lport = container_of(wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct Scsi_Host *sh = vha->host;
+ /*
+ * Notify libfc that we want to release the lport->npiv_vport
+ */
+ fc_vport_terminate(lport->npiv_vport);
+
+ scsi_host_put(sh);
+ kfree(lport);
+}
+
+
+static ssize_t tcm_qla2xxx_wwn_show_attr_version(
+ struct target_fabric_configfs *tf,
+ char *page)
+{
+ return sprintf(page,
+ "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
+ UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+ utsname()->machine);
+}
+
+TF_WWN_ATTR_RO(tcm_qla2xxx, version);
+
+static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
+ &tcm_qla2xxx_wwn_version.attr,
+ NULL,
+};
+
+static struct target_core_fabric_ops tcm_qla2xxx_ops = {
+ .get_fabric_name = tcm_qla2xxx_get_fabric_name,
+ .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
+ .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
+ .tpg_get_tag = tcm_qla2xxx_get_tag,
+ .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
+ .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
+ .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect =
+ tcm_qla2xxx_check_demo_write_protect,
+ .tpg_check_prod_mode_write_protect =
+ tcm_qla2xxx_check_prod_write_protect,
+ .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
+ .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
+ .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
+ .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
+ .new_cmd_map = NULL,
+ .check_stop_free = tcm_qla2xxx_check_stop_free,
+ .release_cmd = tcm_qla2xxx_release_cmd,
+ .shutdown_session = tcm_qla2xxx_shutdown_session,
+ .close_session = tcm_qla2xxx_close_session,
+ .sess_get_index = tcm_qla2xxx_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = tcm_qla2xxx_write_pending,
+ .write_pending_status = tcm_qla2xxx_write_pending_status,
+ .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
+ .get_task_tag = tcm_qla2xxx_get_task_tag,
+ .get_cmd_state = tcm_qla2xxx_get_cmd_state,
+ .queue_data_in = tcm_qla2xxx_queue_data_in,
+ .queue_status = tcm_qla2xxx_queue_status,
+ .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
+ .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
+ .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
+ /*
+ * Setup function pointers for generic logic in
+ * target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = tcm_qla2xxx_make_lport,
+ .fabric_drop_wwn = tcm_qla2xxx_drop_lport,
+ .fabric_make_tpg = tcm_qla2xxx_make_tpg,
+ .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
+ .fabric_post_link = NULL,
+ .fabric_pre_unlink = NULL,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+ .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
+ .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
+};
+
+static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
+ .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
+ .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
+ .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn,
+ .tpg_get_tag = tcm_qla2xxx_get_tag,
+ .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
+ .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = tcm_qla2xxx_check_false,
+ .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true,
+ .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
+ .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
+ .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
+ .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
+ .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
+ .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
+ .release_cmd = tcm_qla2xxx_release_cmd,
+ .shutdown_session = tcm_qla2xxx_shutdown_session,
+ .close_session = tcm_qla2xxx_close_session,
+ .sess_get_index = tcm_qla2xxx_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = tcm_qla2xxx_write_pending,
+ .write_pending_status = tcm_qla2xxx_write_pending_status,
+ .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
+ .get_task_tag = tcm_qla2xxx_get_task_tag,
+ .get_cmd_state = tcm_qla2xxx_get_cmd_state,
+ .queue_data_in = tcm_qla2xxx_queue_data_in,
+ .queue_status = tcm_qla2xxx_queue_status,
+ .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
+ .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
+ .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
+ /*
+ * Setup function pointers for generic logic in
+ * target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport,
+ .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport,
+ .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg,
+ .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
+ .fabric_post_link = NULL,
+ .fabric_pre_unlink = NULL,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+ .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
+ .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
+};
+
+static int tcm_qla2xxx_register_configfs(void)
+{
+ struct target_fabric_configfs *fabric, *npiv_fabric;
+ int ret;
+
+ pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
+ UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+ utsname()->machine);
+ /*
+ * Register the top level struct config_item_type with TCM core
+ */
+ fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
+ if (IS_ERR(fabric)) {
+ pr_err("target_fabric_configfs_init() failed\n");
+ return PTR_ERR(fabric);
+ }
+ /*
+ * Setup fabric->tf_ops from our local tcm_qla2xxx_ops
+ */
+ fabric->tf_ops = tcm_qla2xxx_ops;
+ /*
+ * Setup default attribute lists for various fabric->tf_cit_tmpl
+ */
+ TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs =
+ tcm_qla2xxx_tpg_attrib_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ /*
+ * Register the fabric for use within TCM
+ */
+ ret = target_fabric_configfs_register(fabric);
+ if (ret < 0) {
+ pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
+ return ret;
+ }
+ /*
+ * Setup our local pointer to *fabric
+ */
+ tcm_qla2xxx_fabric_configfs = fabric;
+ pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
+
+ /*
+ * Register the top level struct config_item_type for NPIV with TCM core
+ */
+ npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
+ if (IS_ERR(npiv_fabric)) {
+ pr_err("target_fabric_configfs_init() failed\n");
+ ret = PTR_ERR(npiv_fabric);
+ goto out_fabric;
+ }
+ /*
+ * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
+ */
+ npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
+ /*
+ * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
+ */
+ TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ /*
+ * Register the npiv_fabric for use within TCM
+ */
+ ret = target_fabric_configfs_register(npiv_fabric);
+ if (ret < 0) {
+ pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
+ goto out_fabric;
+ }
+ /*
+ * Setup our local pointer to *npiv_fabric
+ */
+ tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
+ pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
+
+ tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
+ WQ_MEM_RECLAIM, 0);
+ if (!tcm_qla2xxx_free_wq) {
+ ret = -ENOMEM;
+ goto out_fabric_npiv;
+ }
+
+ tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
+ if (!tcm_qla2xxx_cmd_wq) {
+ ret = -ENOMEM;
+ goto out_free_wq;
+ }
+
+ return 0;
+
+out_free_wq:
+ destroy_workqueue(tcm_qla2xxx_free_wq);
+out_fabric_npiv:
+ target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
+out_fabric:
+ target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
+ return ret;
+}
+
+static void tcm_qla2xxx_deregister_configfs(void)
+{
+ destroy_workqueue(tcm_qla2xxx_cmd_wq);
+ destroy_workqueue(tcm_qla2xxx_free_wq);
+
+ target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
+ tcm_qla2xxx_fabric_configfs = NULL;
+ pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
+
+ target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
+ tcm_qla2xxx_npiv_fabric_configfs = NULL;
+ pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
+}
+
+static int __init tcm_qla2xxx_init(void)
+{
+ int ret;
+
+ ret = tcm_qla2xxx_register_configfs();
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void __exit tcm_qla2xxx_exit(void)
+{
+ tcm_qla2xxx_deregister_configfs();
+}
+
+MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
+MODULE_LICENSE("GPL");
+module_init(tcm_qla2xxx_init);
+module_exit(tcm_qla2xxx_exit);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
new file mode 100644
index 000000000000..825498103352
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -0,0 +1,82 @@
+#include <target/target_core_base.h>
+#include <linux/btree.h>
+
+#define TCM_QLA2XXX_VERSION "v0.1"
+/* length of ASCII WWPNs including pad */
+#define TCM_QLA2XXX_NAMELEN 32
+/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
+#define TCM_QLA2XXX_NPIV_NAMELEN 66
+
+#include "qla_target.h"
+
+struct tcm_qla2xxx_nacl {
+ /* From libfc struct fc_rport->port_id */
+ u32 nport_id;
+ /* Binary World Wide unique Node Name for remote FC Initiator Nport */
+ u64 nport_wwnn;
+ /* ASCII formatted WWPN for FC Initiator Nport */
+ char nport_name[TCM_QLA2XXX_NAMELEN];
+ /* Pointer to qla_tgt_sess */
+ struct qla_tgt_sess *qla_tgt_sess;
+ /* Pointer to TCM FC nexus */
+ struct se_session *nport_nexus;
+ /* Returned by tcm_qla2xxx_make_nodeacl() */
+ struct se_node_acl se_node_acl;
+};
+
+struct tcm_qla2xxx_tpg_attrib {
+ int generate_node_acls;
+ int cache_dynamic_acls;
+ int demo_mode_write_protect;
+ int prod_mode_write_protect;
+};
+
+struct tcm_qla2xxx_tpg {
+ /* FC lport target portal group tag for TCM */
+ u16 lport_tpgt;
+ /* Atomic bit to determine TPG active status */
+ atomic_t lport_tpg_enabled;
+ /* Pointer back to tcm_qla2xxx_lport */
+ struct tcm_qla2xxx_lport *lport;
+ /* Used by tcm_qla2xxx_tpg_attrib_cit */
+ struct tcm_qla2xxx_tpg_attrib tpg_attrib;
+ /* Returned by tcm_qla2xxx_make_tpg() */
+ struct se_portal_group se_tpg;
+};
+
+#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib)
+
+struct tcm_qla2xxx_fc_loopid {
+ struct se_node_acl *se_nacl;
+};
+
+struct tcm_qla2xxx_lport {
+ /* SCSI protocol the lport is providing */
+ u8 lport_proto_id;
+ /* Binary World Wide unique Port Name for FC Target Lport */
+ u64 lport_wwpn;
+ /* Binary World Wide unique Port Name for FC NPIV Target Lport */
+ u64 lport_npiv_wwpn;
+ /* Binary World Wide unique Node Name for FC NPIV Target Lport */
+ u64 lport_npiv_wwnn;
+ /* ASCII formatted WWPN for FC Target Lport */
+ char lport_name[TCM_QLA2XXX_NAMELEN];
+ /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
+ char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
+ /* map for fc_port pointers in 24-bit FC Port ID space */
+ struct btree_head32 lport_fcport_map;
+ /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
+ struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
+ /* Pointer to struct scsi_qla_host from qla2xxx LLD */
+ struct scsi_qla_host *qla_vha;
+ /* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */
+ struct scsi_qla_host *qla_npiv_vp;
+ /* Pointer to struct qla_tgt pointer */
+ struct qla_tgt lport_qla_tgt;
+ /* Pointer to struct fc_vport for NPIV vport from libfc */
+ struct fc_vport *npiv_vport;
+ /* Pointer to TPG=1 for non NPIV mode */
+ struct tcm_qla2xxx_tpg *tpg_1;
+ /* Returned by tcm_qla2xxx_make_lport() */
+ struct se_wwn lport_wwn;
+};
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 0b0a7d42137d..c681b2a355e1 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -9,6 +9,140 @@
#include "ql4_glbl.h"
#include "ql4_dbg.h"
+static ssize_t
+qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *ba, char *buf, loff_t off,
+ size_t count)
+{
+ struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+
+ if (!is_qla8022(ha))
+ return -EINVAL;
+
+ if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
+ return 0;
+
+ return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
+ ha->fw_dump_size);
+}
+
+static ssize_t
+qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *ba, char *buf, loff_t off,
+ size_t count)
+{
+ struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ uint32_t dev_state;
+ long reading;
+ int ret = 0;
+
+ if (!is_qla8022(ha))
+ return -EINVAL;
+
+ if (off != 0)
+ return ret;
+
+ buf[1] = 0;
+ ret = kstrtol(buf, 10, &reading);
+ if (ret) {
+ ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ switch (reading) {
+ case 0:
+ /* clear dump collection flags */
+ if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) {
+ clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+ /* Reload minidump template */
+ qla4xxx_alloc_fw_dump(ha);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Firmware template reloaded\n"));
+ }
+ break;
+ case 1:
+ /* Set flag to read dump */
+ if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) &&
+ !test_bit(AF_82XX_DUMP_READING, &ha->flags)) {
+ set_bit(AF_82XX_DUMP_READING, &ha->flags);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Raw firmware dump ready for read on (%ld).\n",
+ ha->host_no));
+ }
+ break;
+ case 2:
+ /* Reset HBA */
+ qla4_8xxx_idc_lock(ha);
+ dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ if (dev_state == QLA82XX_DEV_READY) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: Setting Need reset, reset_owner is 0x%x.\n",
+ __func__, ha->func_num);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_NEED_RESET);
+ set_bit(AF_82XX_RST_OWNER, &ha->flags);
+ } else
+ ql4_printk(KERN_INFO, ha,
+ "%s: Reset not performed as device state is 0x%x\n",
+ __func__, dev_state);
+
+ qla4_8xxx_idc_unlock(ha);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+
+ return count;
+}
+
+static struct bin_attribute sysfs_fw_dump_attr = {
+ .attr = {
+ .name = "fw_dump",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = 0,
+ .read = qla4_8xxx_sysfs_read_fw_dump,
+ .write = qla4_8xxx_sysfs_write_fw_dump,
+};
+
+static struct sysfs_entry {
+ char *name;
+ struct bin_attribute *attr;
+} bin_file_entries[] = {
+ { "fw_dump", &sysfs_fw_dump_attr },
+ { NULL },
+};
+
+void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha)
+{
+ struct Scsi_Host *host = ha->host;
+ struct sysfs_entry *iter;
+ int ret;
+
+ for (iter = bin_file_entries; iter->name; iter++) {
+ ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
+ iter->attr);
+ if (ret)
+ ql4_printk(KERN_ERR, ha,
+ "Unable to create sysfs %s binary attribute (%d).\n",
+ iter->name, ret);
+ }
+}
+
+void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha)
+{
+ struct Scsi_Host *host = ha->host;
+ struct sysfs_entry *iter;
+
+ for (iter = bin_file_entries; iter->name; iter++)
+ sysfs_remove_bin_file(&host->shost_gendev.kobj,
+ iter->attr);
+}
+
/* Scsi_Host attributes. */
static ssize_t
qla4xxx_fw_version_show(struct device *dev,
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 7f2492e88be7..96a5616a8fda 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -398,6 +398,16 @@ struct isp_operations {
int (*get_sys_info) (struct scsi_qla_host *);
};
+struct ql4_mdump_size_table {
+ uint32_t size;
+ uint32_t size_cmask_02;
+ uint32_t size_cmask_04;
+ uint32_t size_cmask_08;
+ uint32_t size_cmask_10;
+ uint32_t size_cmask_FF;
+ uint32_t version;
+};
+
/*qla4xxx ipaddress configuration details */
struct ipaddress_config {
uint16_t ipv4_options;
@@ -485,6 +495,10 @@ struct scsi_qla_host {
#define AF_EEH_BUSY 20 /* 0x00100000 */
#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
+#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */
+#define AF_82XX_RST_OWNER 25 /* 0x02000000 */
+#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
+
unsigned long dpc_flags;
#define DPC_RESET_HA 1 /* 0x00000002 */
@@ -662,6 +676,11 @@ struct scsi_qla_host {
uint32_t nx_dev_init_timeout;
uint32_t nx_reset_timeout;
+ void *fw_dump;
+ uint32_t fw_dump_size;
+ uint32_t fw_dump_capture_mask;
+ void *fw_dump_tmplt_hdr;
+ uint32_t fw_dump_tmplt_size;
struct completion mbx_intr_comp;
@@ -936,4 +955,7 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
#define PROCESS_ALL_AENS 0
#define FLUSH_DDB_CHANGED_AENS 1
+/* Defines for udev events */
+#define QL4_UEVENT_CODE_FW_DUMP 0
+
#endif /*_QLA4XXX_H */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 210cd1d64475..7240948fb929 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -385,6 +385,11 @@ struct qla_flt_region {
#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091
#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092
#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093
+#define MBOX_CMD_MINIDUMP 0x0129
+
+/* Minidump subcommand */
+#define MINIDUMP_GET_SIZE_SUBCOMMAND 0x00
+#define MINIDUMP_GET_TMPLT_SUBCOMMAND 0x01
/* Mailbox 1 */
#define FW_STATE_READY 0x0000
@@ -1190,4 +1195,27 @@ struct ql_iscsi_stats {
uint8_t reserved2[264]; /* 0x0308 - 0x040F */
};
+#define QLA82XX_DBG_STATE_ARRAY_LEN 16
+#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8
+#define QLA82XX_DBG_RSVD_ARRAY_LEN 8
+
+struct qla4_8xxx_minidump_template_hdr {
+ uint32_t entry_type;
+ uint32_t first_entry_offset;
+ uint32_t size_of_template;
+ uint32_t capture_debug_level;
+ uint32_t num_of_entries;
+ uint32_t version;
+ uint32_t driver_timestamp;
+ uint32_t checksum;
+
+ uint32_t driver_capture_mask;
+ uint32_t driver_info_word2;
+ uint32_t driver_info_word3;
+ uint32_t driver_info_word4;
+
+ uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
+ uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
+};
+
#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 910536667cf5..20b49d019043 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -196,10 +196,18 @@ int qla4xxx_bsg_request(struct bsg_job *bsg_job);
int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
+ dma_addr_t phys_addr);
+int qla4xxx_req_template_size(struct scsi_qla_host *ha);
+void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
+void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
+void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
extern int ql4xenablemsix;
+extern int ql4xmdcapmask;
+extern int ql4xenablemd;
extern struct device_attribute *qla4xxx_host_attrs[];
#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 90ee5d8fa731..bf36723b84e1 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -277,6 +277,94 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
return ipv4_wait|ipv6_wait;
}
+/**
+ * qla4xxx_alloc_fw_dump - Allocate memory for minidump data.
+ * @ha: pointer to host adapter structure.
+ **/
+void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
+{
+ int status;
+ uint32_t capture_debug_level;
+ int hdr_entry_bit, k;
+ void *md_tmp;
+ dma_addr_t md_tmp_dma;
+ struct qla4_8xxx_minidump_template_hdr *md_hdr;
+
+ if (ha->fw_dump) {
+ ql4_printk(KERN_WARNING, ha,
+ "Firmware dump previously allocated.\n");
+ return;
+ }
+
+ status = qla4xxx_req_template_size(ha);
+ if (status != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Failed to get template size\n",
+ ha->host_no);
+ return;
+ }
+
+ clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+
+ /* Allocate memory for saving the template */
+ md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
+ &md_tmp_dma, GFP_KERNEL);
+
+ /* Request template */
+ status = qla4xxx_get_minidump_template(ha, md_tmp_dma);
+ if (status != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Failed to get minidump template\n",
+ ha->host_no);
+ goto alloc_cleanup;
+ }
+
+ md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp;
+
+ capture_debug_level = md_hdr->capture_debug_level;
+
+ /* Get capture mask based on module loadtime setting. */
+ if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F)
+ ha->fw_dump_capture_mask = ql4xmdcapmask;
+ else
+ ha->fw_dump_capture_mask = capture_debug_level;
+
+ md_hdr->driver_capture_mask = ha->fw_dump_capture_mask;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Minimum num of entries = %d\n",
+ md_hdr->num_of_entries));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Dump template size = %d\n",
+ ha->fw_dump_tmplt_size));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Selected Capture mask =0x%x\n",
+ ha->fw_dump_capture_mask));
+
+ /* Calculate fw_dump_size */
+ for (hdr_entry_bit = 0x2, k = 1; (hdr_entry_bit & 0xFF);
+ hdr_entry_bit <<= 1, k++) {
+ if (hdr_entry_bit & ha->fw_dump_capture_mask)
+ ha->fw_dump_size += md_hdr->capture_size_array[k];
+ }
+
+ /* Total firmware dump size including command header */
+ ha->fw_dump_size += ha->fw_dump_tmplt_size;
+ ha->fw_dump = vmalloc(ha->fw_dump_size);
+ if (!ha->fw_dump)
+ goto alloc_cleanup;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Minidump Tempalate Size = 0x%x KB\n",
+ ha->fw_dump_tmplt_size));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Total Minidump size = 0x%x KB\n", ha->fw_dump_size));
+
+ memcpy(ha->fw_dump, md_tmp, ha->fw_dump_tmplt_size);
+ ha->fw_dump_tmplt_hdr = ha->fw_dump;
+
+alloc_cleanup:
+ dma_free_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
+ md_tmp, md_tmp_dma);
+}
+
static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
{
uint32_t timeout_count;
@@ -445,9 +533,13 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
"control block\n", ha->host_no, __func__));
return status;
}
+
if (!qla4xxx_fw_ready(ha))
return status;
+ if (is_qla8022(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
+ qla4xxx_alloc_fw_dump(ha);
+
return qla4xxx_get_firmware_status(ha);
}
@@ -884,8 +976,8 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
- ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
+ ddb_entry->unblock_sess(ddb_entry->sess);
status = QLA_SUCCESS;
break;
case DDB_DS_SESSION_FAILED:
@@ -897,6 +989,7 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
}
break;
case DDB_DS_SESSION_ACTIVE:
+ case DDB_DS_DISCOVERY:
switch (state) {
case DDB_DS_SESSION_FAILED:
/*
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 7ac21dabbf22..cab8f665a41f 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -51,25 +51,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
}
}
- if (is_qla8022(ha)) {
- if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
- DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
- "prematurely completing mbx cmd as firmware "
- "recovery detected\n", ha->host_no, __func__));
- return status;
- }
- /* Do not send any mbx cmd if h/w is in failed state*/
- qla4_8xxx_idc_lock(ha);
- dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- qla4_8xxx_idc_unlock(ha);
- if (dev_state == QLA82XX_DEV_FAILED) {
- ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in "
- "failed state, do not send any mailbox commands\n",
- ha->host_no, __func__);
- return status;
- }
- }
-
if ((is_aer_supported(ha)) &&
(test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
@@ -96,6 +77,25 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
msleep(10);
}
+ if (is_qla8022(ha)) {
+ if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
+ ha->host_no, __func__));
+ goto mbox_exit;
+ }
+ /* Do not send any mbx cmd if h/w is in failed state*/
+ qla4_8xxx_idc_lock(ha);
+ dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ qla4_8xxx_idc_unlock(ha);
+ if (dev_state == QLA82XX_DEV_FAILED) {
+ ql4_printk(KERN_WARNING, ha,
+ "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
+ ha->host_no, __func__);
+ goto mbox_exit;
+ }
+ }
+
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->mbox_status_count = outCount;
@@ -270,6 +270,79 @@ mbox_exit:
return status;
}
+/**
+ * qla4xxx_get_minidump_template - Get the firmware template
+ * @ha: Pointer to host adapter structure.
+ * @phys_addr: dma address for template
+ *
+ * Obtain the minidump template from firmware during initialization
+ * as it may not be available when minidump is desired.
+ **/
+int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
+ dma_addr_t phys_addr)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_MINIDUMP;
+ mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND;
+ mbox_cmd[2] = LSDW(phys_addr);
+ mbox_cmd[3] = MSDW(phys_addr);
+ mbox_cmd[4] = ha->fw_dump_tmplt_size;
+ mbox_cmd[5] = 0;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n",
+ ha->host_no, __func__, mbox_cmd[0],
+ mbox_sts[0], mbox_sts[1]));
+ }
+ return status;
+}
+
+/**
+ * qla4xxx_req_template_size - Get minidump template size from firmware.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_req_template_size(struct scsi_qla_host *ha)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_MINIDUMP;
+ mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status == QLA_SUCCESS) {
+ ha->fw_dump_tmplt_size = mbox_sts[1];
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: sts[0]=0x%04x, template size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n",
+ __func__, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3], mbox_sts[4],
+ mbox_sts[5], mbox_sts[6], mbox_sts[7]));
+ if (ha->fw_dump_tmplt_size == 0)
+ status = QLA_ERROR;
+ } else {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n",
+ __func__, mbox_sts[0], mbox_sts[1]);
+ status = QLA_ERROR;
+ }
+
+ return status;
+}
+
void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
{
set_bit(AF_FW_RECOVERY, &ha->flags);
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e1e46b6dac75..228b67020d2c 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/pci.h>
+#include <linux/ratelimit.h>
#include "ql4_def.h"
#include "ql4_glbl.h"
@@ -420,6 +421,38 @@ qla4_8xxx_rd_32(struct scsi_qla_host *ha, ulong off)
return data;
}
+/* Minidump related functions */
+static int qla4_8xxx_md_rw_32(struct scsi_qla_host *ha, uint32_t off,
+ u32 data, uint8_t flag)
+{
+ uint32_t win_read, off_value, rval = QLA_SUCCESS;
+
+ off_value = off & 0xFFFF0000;
+ writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+
+ /* Read back value to make sure write has gone through before trying
+ * to use it.
+ */
+ win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ if (win_read != off_value) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
+ __func__, off_value, win_read, off));
+ return QLA_ERROR;
+ }
+
+ off_value = off & 0x0000FFFF;
+
+ if (flag)
+ writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
+ ha->nx_pcibase));
+ else
+ rval = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
+ ha->nx_pcibase));
+
+ return rval;
+}
+
#define CRB_WIN_LOCK_TIMEOUT 100000000
int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
@@ -1252,9 +1285,9 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
}
if (j >= MAX_CTL_CHECK) {
- if (printk_ratelimit())
- ql4_printk(KERN_ERR, ha,
- "failed to read through agent\n");
+ printk_ratelimited(KERN_ERR
+ "%s: failed to read through agent\n",
+ __func__);
break;
}
@@ -1390,7 +1423,8 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
if (j >= MAX_CTL_CHECK) {
if (printk_ratelimit())
ql4_printk(KERN_ERR, ha,
- "failed to write through agent\n");
+ "%s: failed to read through agent\n",
+ __func__);
ret = -1;
break;
}
@@ -1462,6 +1496,8 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
drv_active |= (1 << (ha->func_num * 4));
+ ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
+ __func__, ha->host_no, drv_active);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
}
@@ -1472,6 +1508,8 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
drv_active &= ~(1 << (ha->func_num * 4));
+ ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
+ __func__, ha->host_no, drv_active);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
}
@@ -1497,6 +1535,8 @@ qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
drv_state |= (1 << (ha->func_num * 4));
+ ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
+ __func__, ha->host_no, drv_state);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
}
@@ -1507,6 +1547,8 @@ qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
drv_state &= ~(1 << (ha->func_num * 4));
+ ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
+ __func__, ha->host_no, drv_state);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
}
@@ -1601,6 +1643,629 @@ static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
qla4_8xxx_rom_unlock(ha);
}
+static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla82xx_minidump_entry_crb *crb_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ crb_hdr = (struct qla82xx_minidump_entry_crb *)entry_hdr;
+ r_addr = crb_hdr->addr;
+ r_stride = crb_hdr->crb_strd.addr_stride;
+ loop_cnt = crb_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_addr);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ unsigned long p_wait, w_time, p_mask;
+ uint32_t c_value_w, c_value_r;
+ struct qla82xx_minidump_entry_cache *cache_hdr;
+ int rval = QLA_ERROR;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
+
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+ p_wait = cache_hdr->cache_ctrl.poll_wait;
+ p_mask = cache_hdr->cache_ctrl.poll_mask;
+
+ for (i = 0; i < loop_count; i++) {
+ qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
+
+ if (c_value_w)
+ qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
+
+ if (p_mask) {
+ w_time = jiffies + p_wait;
+ do {
+ c_value_r = qla4_8xxx_md_rw_32(ha, c_addr,
+ 0, 0);
+ if ((c_value_r & p_mask) == 0) {
+ break;
+ } else if (time_after_eq(jiffies, w_time)) {
+ /* capturing dump failed */
+ return rval;
+ }
+ } while (1);
+ }
+
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr)
+{
+ struct qla82xx_minidump_entry_crb *crb_entry;
+ uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
+ uint32_t crb_addr;
+ unsigned long wtime;
+ struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
+ int i;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+ ha->fw_dump_tmplt_hdr;
+ crb_entry = (struct qla82xx_minidump_entry_crb *)entry_hdr;
+
+ crb_addr = crb_entry->addr;
+ for (i = 0; i < crb_entry->op_count; i++) {
+ opcode = crb_entry->crb_ctrl.opcode;
+ if (opcode & QLA82XX_DBG_OPCODE_WR) {
+ qla4_8xxx_md_rw_32(ha, crb_addr,
+ crb_entry->value_1, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_WR;
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_RW) {
+ read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+ qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_RW;
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_AND) {
+ read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+ read_value &= crb_entry->value_2;
+ opcode &= ~QLA82XX_DBG_OPCODE_AND;
+ if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ read_value |= crb_entry->value_3;
+ opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ }
+ qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+ read_value |= crb_entry->value_3;
+ qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_POLL) {
+ poll_time = crb_entry->crb_strd.poll_timeout;
+ wtime = jiffies + poll_time;
+ read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+
+ do {
+ if ((read_value & crb_entry->value_2) ==
+ crb_entry->value_1)
+ break;
+ else if (time_after_eq(jiffies, wtime)) {
+ /* capturing dump failed */
+ rval = QLA_ERROR;
+ break;
+ } else
+ read_value = qla4_8xxx_md_rw_32(ha,
+ crb_addr, 0, 0);
+ } while (1);
+ opcode &= ~QLA82XX_DBG_OPCODE_POLL;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else {
+ addr = crb_addr;
+ }
+
+ read_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+ index = crb_entry->crb_ctrl.state_index_v;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else {
+ addr = crb_addr;
+ }
+
+ if (crb_entry->crb_ctrl.state_index_v) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value =
+ tmplt_hdr->saved_state_array[index];
+ } else {
+ read_value = crb_entry->value_1;
+ }
+
+ qla4_8xxx_md_rw_32(ha, addr, read_value, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value = tmplt_hdr->saved_state_array[index];
+ read_value <<= crb_entry->crb_ctrl.shl;
+ read_value >>= crb_entry->crb_ctrl.shr;
+ if (crb_entry->value_2)
+ read_value &= crb_entry->value_2;
+ read_value |= crb_entry->value_3;
+ read_value += crb_entry->value_1;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
+ }
+ crb_addr += crb_entry->crb_strd.addr_stride;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
+ return rval;
+}
+
+static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla82xx_minidump_entry_rdocm *ocm_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ ocm_hdr = (struct qla82xx_minidump_entry_rdocm *)entry_hdr;
+ r_addr = ocm_hdr->read_addr;
+ r_stride = ocm_hdr->read_addr_stride;
+ loop_cnt = ocm_hdr->op_count;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
+ __func__, r_addr, r_stride, loop_cnt));
+
+ for (i = 0; i < loop_cnt; i++) {
+ r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
+ __func__, (loop_cnt * sizeof(uint32_t))));
+ *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+ struct qla82xx_minidump_entry_mux *mux_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ mux_hdr = (struct qla82xx_minidump_entry_mux *)entry_hdr;
+ r_addr = mux_hdr->read_addr;
+ s_addr = mux_hdr->select_addr;
+ s_stride = mux_hdr->select_value_stride;
+ s_value = mux_hdr->select_value;
+ loop_cnt = mux_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla4_8xxx_md_rw_32(ha, s_addr, s_value, 1);
+ r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(s_value);
+ *data_ptr++ = cpu_to_le32(r_value);
+ s_value += s_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ uint32_t c_value_w;
+ struct qla82xx_minidump_entry_cache *cache_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+
+ for (i = 0; i < loop_count; i++) {
+ qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
+ qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t s_addr, r_addr;
+ uint32_t r_stride, r_value, r_cnt, qid = 0;
+ uint32_t i, k, loop_cnt;
+ struct qla82xx_minidump_entry_queue *q_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ q_hdr = (struct qla82xx_minidump_entry_queue *)entry_hdr;
+ s_addr = q_hdr->select_addr;
+ r_cnt = q_hdr->rd_strd.read_addr_cnt;
+ r_stride = q_hdr->rd_strd.read_addr_stride;
+ loop_cnt = q_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla4_8xxx_md_rw_32(ha, s_addr, qid, 1);
+ r_addr = q_hdr->read_addr;
+ for (k = 0; k < r_cnt; k++) {
+ r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ qid += q_hdr->q_strd.queue_id_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+#define MD_DIRECT_ROM_WINDOW 0x42110030
+#define MD_DIRECT_ROM_READ_BASE 0x42150000
+
+static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_value;
+ uint32_t i, loop_cnt;
+ struct qla82xx_minidump_entry_rdrom *rom_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ rom_hdr = (struct qla82xx_minidump_entry_rdrom *)entry_hdr;
+ r_addr = rom_hdr->read_addr;
+ loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n",
+ __func__, r_addr, loop_cnt));
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla4_8xxx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
+ (r_addr & 0xFFFF0000), 1);
+ r_value = qla4_8xxx_md_rw_32(ha,
+ MD_DIRECT_ROM_READ_BASE +
+ (r_addr & 0x0000FFFF), 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += sizeof(uint32_t);
+ }
+ *d_ptr = data_ptr;
+}
+
+#define MD_MIU_TEST_AGT_CTRL 0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
+
+static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_value, r_data;
+ uint32_t i, j, loop_cnt;
+ struct qla82xx_minidump_entry_rdmem *m_hdr;
+ unsigned long flags;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ m_hdr = (struct qla82xx_minidump_entry_rdmem *)entry_hdr;
+ r_addr = m_hdr->read_addr;
+ loop_cnt = m_hdr->read_data_size/16;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
+ __func__, r_addr, m_hdr->read_data_size));
+
+ if (r_addr & 0xf) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: Read addr 0x%x not 16 bytes alligned\n",
+ __func__, r_addr));
+ return QLA_ERROR;
+ }
+
+ if (m_hdr->read_data_size % 16) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
+ __func__, m_hdr->read_data_size));
+ return QLA_ERROR;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
+ __func__, r_addr, m_hdr->read_data_size, loop_cnt));
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+ for (i = 0; i < loop_cnt; i++) {
+ qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
+ r_value = 0;
+ qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
+ r_value = MIU_TA_CTL_ENABLE;
+ qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+ r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+ qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ r_value = qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL,
+ 0, 0);
+ if ((r_value & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_ERR
+ "%s: failed to read through agent\n",
+ __func__);
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ return QLA_SUCCESS;
+ }
+
+ for (j = 0; j < 4; j++) {
+ r_data = qla4_8xxx_md_rw_32(ha,
+ MD_MIU_TEST_AGT_RDDATA[j],
+ 0, 0);
+ *data_ptr++ = cpu_to_le32(r_data);
+ }
+
+ r_addr += 16;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n",
+ __func__, (loop_cnt * 16)));
+
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+static void ql4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ int index)
+{
+ entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
+ ha->host_no, index, entry_hdr->entry_type,
+ entry_hdr->d_ctrl.entry_capture_mask));
+}
+
+/**
+ * qla82xx_collect_md_data - Retrieve firmware minidump data.
+ * @ha: pointer to adapter structure
+ **/
+static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
+{
+ int num_entry_hdr = 0;
+ struct qla82xx_minidump_entry_hdr *entry_hdr;
+ struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
+ uint32_t *data_ptr;
+ uint32_t data_collected = 0;
+ int i, rval = QLA_ERROR;
+ uint64_t now;
+ uint32_t timestamp;
+
+ if (!ha->fw_dump) {
+ ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
+ __func__, ha->host_no);
+ return rval;
+ }
+
+ tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+ ha->fw_dump_tmplt_hdr;
+ data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump +
+ ha->fw_dump_tmplt_size);
+ data_collected += ha->fw_dump_tmplt_size;
+
+ num_entry_hdr = tmplt_hdr->num_of_entries;
+ ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n",
+ __func__, data_ptr);
+ ql4_printk(KERN_INFO, ha,
+ "[%s]: no of entry headers in Template: 0x%x\n",
+ __func__, num_entry_hdr);
+ ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n",
+ __func__, ha->fw_dump_capture_mask);
+ ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n",
+ __func__, ha->fw_dump_size, ha->fw_dump_size);
+
+ /* Update current timestamp before taking dump */
+ now = get_jiffies_64();
+ timestamp = (u32)(jiffies_to_msecs(now) / 1000);
+ tmplt_hdr->driver_timestamp = timestamp;
+
+ entry_hdr = (struct qla82xx_minidump_entry_hdr *)
+ (((uint8_t *)ha->fw_dump_tmplt_hdr) +
+ tmplt_hdr->first_entry_offset);
+
+ /* Walk through the entry headers - validate/perform required action */
+ for (i = 0; i < num_entry_hdr; i++) {
+ if (data_collected >= ha->fw_dump_size) {
+ ql4_printk(KERN_INFO, ha,
+ "Data collected: [0x%x], Total Dump size: [0x%x]\n",
+ data_collected, ha->fw_dump_size);
+ return rval;
+ }
+
+ if (!(entry_hdr->d_ctrl.entry_capture_mask &
+ ha->fw_dump_capture_mask)) {
+ entry_hdr->d_ctrl.driver_flags |=
+ QLA82XX_DBG_SKIPPED_FLAG;
+ goto skip_nxt_entry;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Data collected: [0x%x], Dump size left:[0x%x]\n",
+ data_collected,
+ (ha->fw_dump_size - data_collected)));
+
+ /* Decode the entry type and take required action to capture
+ * debug data
+ */
+ switch (entry_hdr->entry_type) {
+ case QLA82XX_RDEND:
+ ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ case QLA82XX_CNTRL:
+ rval = qla4_8xxx_minidump_process_control(ha,
+ entry_hdr);
+ if (rval != QLA_SUCCESS) {
+ ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_RDCRB:
+ qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_RDMEM:
+ rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_BOARD:
+ case QLA82XX_RDROM:
+ qla4_8xxx_minidump_process_rdrom(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_L2DTG:
+ case QLA82XX_L2ITG:
+ case QLA82XX_L2DAT:
+ case QLA82XX_L2INS:
+ rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_L1DAT:
+ case QLA82XX_L1INS:
+ qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_RDOCM:
+ qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_RDMUX:
+ qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_QUEUE:
+ qla4_8xxx_minidump_process_queue(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_RDNOP:
+ default:
+ ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ }
+
+ data_collected = (uint8_t *)data_ptr -
+ ((uint8_t *)((uint8_t *)ha->fw_dump +
+ ha->fw_dump_tmplt_size));
+skip_nxt_entry:
+ /* next entry in the template */
+ entry_hdr = (struct qla82xx_minidump_entry_hdr *)
+ (((uint8_t *)entry_hdr) +
+ entry_hdr->entry_size);
+ }
+
+ if ((data_collected + ha->fw_dump_tmplt_size) != ha->fw_dump_size) {
+ ql4_printk(KERN_INFO, ha,
+ "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
+ data_collected, ha->fw_dump_size);
+ goto md_failed;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n",
+ __func__, i));
+md_failed:
+ return rval;
+}
+
+/**
+ * qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready.
+ * @ha: pointer to adapter structure
+ **/
+static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
+{
+ char event_string[40];
+ char *envp[] = { event_string, NULL };
+
+ switch (code) {
+ case QL4_UEVENT_CODE_FW_DUMP:
+ snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+ ha->host_no);
+ break;
+ default:
+ /*do nothing*/
+ break;
+ }
+
+ kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
+}
+
/**
* qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
* @ha: pointer to adapter structure
@@ -1659,6 +2324,15 @@ dev_initialize:
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
qla4_8xxx_idc_unlock(ha);
+ if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
+ !test_and_set_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
+ if (!qla4_8xxx_collect_md_data(ha)) {
+ qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
+ } else {
+ ql4_printk(KERN_INFO, ha, "Unable to collect minidump\n");
+ clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+ }
+ }
rval = qla4_8xxx_try_start_fw(ha);
qla4_8xxx_idc_lock(ha);
@@ -1686,6 +2360,7 @@ static void
qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
{
uint32_t dev_state, drv_state, drv_active;
+ uint32_t active_mask = 0xFFFFFFFF;
unsigned long reset_timeout;
ql4_printk(KERN_INFO, ha,
@@ -1697,7 +2372,14 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
qla4_8xxx_idc_lock(ha);
}
- qla4_8xxx_set_rst_ready(ha);
+ if (!test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s(%ld): reset acknowledged\n",
+ __func__, ha->host_no));
+ qla4_8xxx_set_rst_ready(ha);
+ } else {
+ active_mask = (~(1 << (ha->func_num * 4)));
+ }
/* wait for 10 seconds for reset ack from all functions */
reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
@@ -1709,12 +2391,24 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
"%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
__func__, ha->host_no, drv_state, drv_active);
- while (drv_state != drv_active) {
+ while (drv_state != (drv_active & active_mask)) {
if (time_after_eq(jiffies, reset_timeout)) {
- printk("%s: RESET TIMEOUT!\n", DRIVER_NAME);
+ ql4_printk(KERN_INFO, ha,
+ "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
+ DRIVER_NAME, drv_state, drv_active);
break;
}
+ /*
+ * When reset_owner times out, check which functions
+ * acked/did not ack
+ */
+ if (test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
+ ql4_printk(KERN_INFO, ha,
+ "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
+ __func__, ha->host_no, drv_state,
+ drv_active);
+ }
qla4_8xxx_idc_unlock(ha);
msleep(1000);
qla4_8xxx_idc_lock(ha);
@@ -1723,14 +2417,18 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
}
+ /* Clear RESET OWNER as we are not going to use it any further */
+ clear_bit(AF_82XX_RST_OWNER, &ha->flags);
+
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- ql4_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
- dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+ ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
+ dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
/* Force to DEV_COLD unless someone else is starting a reset */
if (dev_state != QLA82XX_DEV_INITIALIZING) {
ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
+ qla4_8xxx_set_rst_ready(ha);
}
}
@@ -1765,8 +2463,9 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
}
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
- dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown"));
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -1775,15 +2474,19 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
while (1) {
if (time_after_eq(jiffies, dev_init_timeout)) {
- ql4_printk(KERN_WARNING, ha, "Device init failed!\n");
+ ql4_printk(KERN_WARNING, ha,
+ "%s: Device Init Failed 0x%x = %s\n",
+ DRIVER_NAME,
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown");
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_FAILED);
}
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- ql4_printk(KERN_INFO, ha,
- "2:Device state is 0x%x = %s\n", dev_state,
- dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+ ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown");
/* NOTE: Make sure idc unlocked upon exit of switch statement */
switch (dev_state) {
@@ -2184,6 +2887,7 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_NEED_RESET);
+ set_bit(AF_82XX_RST_OWNER, &ha->flags);
} else
ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
@@ -2195,8 +2899,10 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
qla4_8xxx_clear_rst_ready(ha);
qla4_8xxx_idc_unlock(ha);
- if (rval == QLA_SUCCESS)
+ if (rval == QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_8xxx_isp_reset\n");
clear_bit(AF_FW_RECOVERY, &ha->flags);
+ }
return rval;
}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index dc7500e47b8b..30258479f100 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -792,4 +792,196 @@ struct crb_addr_pair {
#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
+/* Minidump related */
+
+/* Entry Type Defines */
+#define QLA82XX_RDNOP 0
+#define QLA82XX_RDCRB 1
+#define QLA82XX_RDMUX 2
+#define QLA82XX_QUEUE 3
+#define QLA82XX_BOARD 4
+#define QLA82XX_RDOCM 6
+#define QLA82XX_PREGS 7
+#define QLA82XX_L1DTG 8
+#define QLA82XX_L1ITG 9
+#define QLA82XX_L1DAT 11
+#define QLA82XX_L1INS 12
+#define QLA82XX_L2DTG 21
+#define QLA82XX_L2ITG 22
+#define QLA82XX_L2DAT 23
+#define QLA82XX_L2INS 24
+#define QLA82XX_RDROM 71
+#define QLA82XX_RDMEM 72
+#define QLA82XX_CNTRL 98
+#define QLA82XX_RDEND 255
+
+/* Opcodes for Control Entries.
+ * These Flags are bit fields.
+ */
+#define QLA82XX_DBG_OPCODE_WR 0x01
+#define QLA82XX_DBG_OPCODE_RW 0x02
+#define QLA82XX_DBG_OPCODE_AND 0x04
+#define QLA82XX_DBG_OPCODE_OR 0x08
+#define QLA82XX_DBG_OPCODE_POLL 0x10
+#define QLA82XX_DBG_OPCODE_RDSTATE 0x20
+#define QLA82XX_DBG_OPCODE_WRSTATE 0x40
+#define QLA82XX_DBG_OPCODE_MDSTATE 0x80
+
+/* Driver Flags */
+#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
+#define QLA82XX_DBG_SIZE_ERR_FLAG 0x40 /* Entry vs Capture size
+ * mismatch */
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+struct qla82xx_minidump_entry_hdr {
+ uint32_t entry_type;
+ uint32_t entry_size;
+ uint32_t entry_capture_size;
+ struct {
+ uint8_t entry_capture_mask;
+ uint8_t entry_code;
+ uint8_t driver_code;
+ uint8_t driver_flags;
+ } d_ctrl;
+};
+
+/* Read CRB entry header */
+struct qla82xx_minidump_entry_crb {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t addr;
+ struct {
+ uint8_t addr_stride;
+ uint8_t state_index_a;
+ uint16_t poll_timeout;
+ } crb_strd;
+ uint32_t data_size;
+ uint32_t op_count;
+
+ struct {
+ uint8_t opcode;
+ uint8_t state_index_v;
+ uint8_t shl;
+ uint8_t shr;
+ } crb_ctrl;
+
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t value_3;
+};
+
+struct qla82xx_minidump_entry_cache {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t tag_reg_addr;
+ struct {
+ uint16_t tag_value_stride;
+ uint16_t init_tag_value;
+ } addr_ctrl;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t control_addr;
+ struct {
+ uint16_t write_value;
+ uint8_t poll_mask;
+ uint8_t poll_wait;
+ } cache_ctrl;
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_1;
+ } read_ctrl;
+};
+
+/* Read OCM */
+struct qla82xx_minidump_entry_rdocm {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t rsvd_0;
+ uint32_t rsvd_1;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t rsvd_2;
+ uint32_t rsvd_3;
+ uint32_t read_addr;
+ uint32_t read_addr_stride;
+};
+
+/* Read Memory */
+struct qla82xx_minidump_entry_rdmem {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+};
+
+/* Read ROM */
+struct qla82xx_minidump_entry_rdrom {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+};
+
+/* Mux entry */
+struct qla82xx_minidump_entry_mux {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t select_addr;
+ uint32_t rsvd_0;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t select_value;
+ uint32_t select_value_stride;
+ uint32_t read_addr;
+ uint32_t rsvd_1;
+};
+
+/* Queue entry */
+struct qla82xx_minidump_entry_queue {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t select_addr;
+ struct {
+ uint16_t queue_id_stride;
+ uint16_t rsvd_0;
+ } q_strd;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t rsvd_1;
+ uint32_t rsvd_2;
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_3;
+ } rd_strd;
+};
+
+#define QLA82XX_MINIDUMP_OCM0_SIZE (256 * 1024)
+#define QLA82XX_MINIDUMP_L1C_SIZE (256 * 1024)
+#define QLA82XX_MINIDUMP_L2C_SIZE 1572864
+#define QLA82XX_MINIDUMP_COMMON_STR_SIZE 0
+#define QLA82XX_MINIDUMP_FCOE_STR_SIZE 0
+#define QLA82XX_MINIDUMP_MEM_SIZE 0
+#define QLA82XX_MAX_ENTRY_HDR 4
+
+struct qla82xx_minidump {
+ uint32_t md_ocm0_data[QLA82XX_MINIDUMP_OCM0_SIZE];
+ uint32_t md_l1c_data[QLA82XX_MINIDUMP_L1C_SIZE];
+ uint32_t md_l2c_data[QLA82XX_MINIDUMP_L2C_SIZE];
+ uint32_t md_cs_data[QLA82XX_MINIDUMP_COMMON_STR_SIZE];
+ uint32_t md_fcoes_data[QLA82XX_MINIDUMP_FCOE_STR_SIZE];
+ uint32_t md_mem_data[QLA82XX_MINIDUMP_MEM_SIZE];
+};
+
+#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
+#define RQST_TMPLT_SIZE 0x0
+#define RQST_TMPLT 0x1
+#define MD_DIRECT_ROM_WINDOW 0x42110030
+#define MD_DIRECT_ROM_READ_BASE 0x42150000
+#define MD_MIU_TEST_AGT_CTRL 0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
+
+static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
+ 0x410000AC, 0x410000B8, 0x410000BC };
#endif
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ee47820c30a6..cd15678f9ada 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -68,12 +68,34 @@ MODULE_PARM_DESC(ql4xmaxqdepth,
" Maximum queue depth to report for target devices.\n"
"\t\t Default: 32.");
+static int ql4xqfulltracking = 1;
+module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xqfulltracking,
+ " Enable or disable dynamic tracking and adjustment of\n"
+ "\t\t scsi device queue depth.\n"
+ "\t\t 0 - Disable.\n"
+ "\t\t 1 - Enable. (Default)");
+
static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
MODULE_PARM_DESC(ql4xsess_recovery_tmo,
" Target Session Recovery Timeout.\n"
"\t\t Default: 120 sec.");
+int ql4xmdcapmask = 0x1F;
+module_param(ql4xmdcapmask, int, S_IRUGO);
+MODULE_PARM_DESC(ql4xmdcapmask,
+ " Set the Minidump driver capture mask level.\n"
+ "\t\t Default is 0x1F.\n"
+ "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
+
+int ql4xenablemd = 1;
+module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xenablemd,
+ " Set to enable minidump.\n"
+ "\t\t 0 - disable minidump\n"
+ "\t\t 1 - enable minidump (Default)");
+
static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
/*
* SCSI host template entry points
@@ -140,6 +162,8 @@ static int qla4xxx_slave_configure(struct scsi_device *device);
static void qla4xxx_slave_destroy(struct scsi_device *sdev);
static umode_t ql4_attr_is_visible(int param_type, int param);
static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
+static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
+ int reason);
static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
QLA82XX_LEGACY_INTR_CONFIG;
@@ -159,6 +183,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.slave_configure = qla4xxx_slave_configure,
.slave_alloc = qla4xxx_slave_alloc,
.slave_destroy = qla4xxx_slave_destroy,
+ .change_queue_depth = qla4xxx_change_queue_depth,
.this_id = -1,
.cmd_per_lun = 3,
@@ -1555,19 +1580,53 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
- unsigned long flags;
+ unsigned long flags, wtime;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ uint32_t ddb_state;
+ int ret;
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto destroy_session;
+ }
+
+ wtime = jiffies + (HZ * LOGOUT_TOV);
+ do {
+ ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_entry, fw_ddb_entry_dma,
+ NULL, NULL, &ddb_state, NULL,
+ NULL, NULL);
+ if (ret == QLA_ERROR)
+ goto destroy_session;
+
+ if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+ (ddb_state == DDB_DS_SESSION_FAILED))
+ goto destroy_session;
+
+ schedule_timeout_uninterruptible(HZ);
+ } while ((time_after(wtime, jiffies)));
+
+destroy_session:
qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
spin_lock_irqsave(&ha->hardware_lock, flags);
qla4xxx_free_ddb(ha, ddb_entry);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
iscsi_session_teardown(cls_sess);
+
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
}
static struct iscsi_cls_conn *
@@ -2220,6 +2279,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
ha->queues_dma);
+ if (ha->fw_dump)
+ vfree(ha->fw_dump);
+
ha->queues_len = 0;
ha->queues = NULL;
ha->queues_dma = 0;
@@ -2229,6 +2291,8 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
ha->response_dma = 0;
ha->shadow_regs = NULL;
ha->shadow_regs_dma = 0;
+ ha->fw_dump = NULL;
+ ha->fw_dump_size = 0;
/* Free srb pool. */
if (ha->srb_mempool)
@@ -5023,6 +5087,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
set_bit(AF_INIT_DONE, &ha->flags);
+ qla4_8xxx_alloc_sysfs_attr(ha);
+
printk(KERN_INFO
" QLogic iSCSI HBA Driver version: %s\n"
" QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
@@ -5149,6 +5215,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
iscsi_boot_destroy_kset(ha->boot_kset);
qla4xxx_destroy_fw_ddb_session(ha);
+ qla4_8xxx_free_sysfs_attr(ha);
scsi_remove_host(ha->host);
@@ -5217,6 +5284,15 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev)
scsi_deactivate_tcq(sdev, 1);
}
+static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
+ int reason)
+{
+ if (!ql4xqfulltracking)
+ return -EOPNOTSUPP;
+
+ return iscsi_change_queue_depth(sdev, qdepth, reason);
+}
+
/**
* qla4xxx_del_from_active_array - returns an active srb
* @ha: Pointer to host adapter structure.
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 97b30c108e36..cc1cc3518b87 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k16"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k17"
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 62ddfd31d4ce..6dfb9785d345 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1378,16 +1378,19 @@ static int scsi_lld_busy(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost;
- struct scsi_target *starget;
if (!sdev)
return 0;
shost = sdev->host;
- starget = scsi_target(sdev);
- if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
- scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
+ /*
+ * Ignore host/starget busy state.
+ * Since block layer does not have a concept of fairness across
+ * multiple queues, congestion of host/starget needs to be handled
+ * in SCSI layer.
+ */
+ if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
return 1;
return 0;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index f661a41fa4c6..d4201ded3b22 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -24,8 +24,11 @@ static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg)
err = scsi_device_quiesce(to_scsi_device(dev));
if (err == 0) {
drv = dev->driver;
- if (drv && drv->suspend)
+ if (drv && drv->suspend) {
err = drv->suspend(dev, msg);
+ if (err)
+ scsi_device_resume(to_scsi_device(dev));
+ }
}
dev_dbg(dev, "scsi suspend: %d\n", err);
return err;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 01b03744f1f9..2e5fe584aad3 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -147,7 +147,7 @@ int scsi_complete_async_scans(void)
do {
if (list_empty(&scanning_hosts))
- return 0;
+ goto out;
/* If we can't get memory immediately, that's OK. Just
* sleep a little. Even if we never get memory, the async
* scans will finish eventually.
@@ -179,8 +179,11 @@ int scsi_complete_async_scans(void)
}
done:
spin_unlock(&async_scan_lock);
-
kfree(data);
+
+ out:
+ async_synchronize_full_domain(&scsi_sd_probe_domain);
+
return 0;
}
diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
index 74708fcaf82f..ae7814874618 100644
--- a/drivers/scsi/scsi_wait_scan.c
+++ b/drivers/scsi/scsi_wait_scan.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/device.h>
-#include <scsi/scsi_scan.h>
+#include "scsi_priv.h"
static int __init wait_scan_init(void)
{
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 4e010b727818..6a4fd00117ca 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1836,7 +1836,7 @@ ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = pci_request_regions(pdev, UFSHCD);
if (err < 0) {
dev_err(&pdev->dev, "request regions failed\n");
- goto out_disable;
+ goto out_host_put;
}
hba->mmio_base = pci_ioremap_bar(pdev, 0);
@@ -1925,8 +1925,9 @@ out_iounmap:
iounmap(hba->mmio_base);
out_release_regions:
pci_release_regions(pdev);
-out_disable:
+out_host_put:
scsi_host_put(host);
+out_disable:
pci_clear_master(pdev);
pci_disable_device(pdev);
out_error:
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 00c024039c97..cd2fe350e724 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -311,7 +311,7 @@ config SPI_S3C24XX_FIQ
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
- depends on (ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
+ depends on (ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
select S3C64XX_DMA if ARCH_S3C64XX
help
SPI driver for Samsung S3C64XX and newer SoCs.
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index acc88b4d2869..249077e5cc48 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -216,9 +216,6 @@ static __devinit int ath79_spi_probe(struct platform_device *pdev)
if (pdata) {
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->num_chipselect;
- } else {
- master->bus_num = -1;
- master->num_chipselect = 1;
}
sp->bitbang.master = spi_master_get(master);
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index 6eee64a5d240..b2d4b9e4e010 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -25,12 +25,12 @@
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
-#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/spi/spi.h>
+#include <linux/pm_runtime.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
@@ -78,10 +78,7 @@ struct mcfqspi {
wait_queue_head_t waitq;
- struct work_struct work;
- struct workqueue_struct *workq;
- spinlock_t lock;
- struct list_head msgq;
+ struct device *dev;
};
static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val)
@@ -303,120 +300,80 @@ static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count,
}
}
-static void mcfqspi_work(struct work_struct *work)
+static int mcfqspi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
{
- struct mcfqspi *mcfqspi = container_of(work, struct mcfqspi, work);
- unsigned long flags;
-
- spin_lock_irqsave(&mcfqspi->lock, flags);
- while (!list_empty(&mcfqspi->msgq)) {
- struct spi_message *msg;
- struct spi_device *spi;
- struct spi_transfer *xfer;
- int status = 0;
-
- msg = container_of(mcfqspi->msgq.next, struct spi_message,
- queue);
-
- list_del_init(&msg->queue);
- spin_unlock_irqrestore(&mcfqspi->lock, flags);
-
- spi = msg->spi;
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- bool cs_high = spi->mode & SPI_CS_HIGH;
- u16 qmr = MCFQSPI_QMR_MSTR;
-
- if (xfer->bits_per_word)
- qmr |= xfer->bits_per_word << 10;
- else
- qmr |= spi->bits_per_word << 10;
- if (spi->mode & SPI_CPHA)
- qmr |= MCFQSPI_QMR_CPHA;
- if (spi->mode & SPI_CPOL)
- qmr |= MCFQSPI_QMR_CPOL;
- if (xfer->speed_hz)
- qmr |= mcfqspi_qmr_baud(xfer->speed_hz);
- else
- qmr |= mcfqspi_qmr_baud(spi->max_speed_hz);
- mcfqspi_wr_qmr(mcfqspi, qmr);
-
- mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high);
-
- mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
- if ((xfer->bits_per_word ? xfer->bits_per_word :
- spi->bits_per_word) == 8)
- mcfqspi_transfer_msg8(mcfqspi, xfer->len,
- xfer->tx_buf,
- xfer->rx_buf);
- else
- mcfqspi_transfer_msg16(mcfqspi, xfer->len / 2,
- xfer->tx_buf,
- xfer->rx_buf);
- mcfqspi_wr_qir(mcfqspi, 0);
-
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
- if (xfer->cs_change) {
- if (!list_is_last(&xfer->transfer_list,
- &msg->transfers))
- mcfqspi_cs_deselect(mcfqspi,
- spi->chip_select,
- cs_high);
- } else {
- if (list_is_last(&xfer->transfer_list,
- &msg->transfers))
- mcfqspi_cs_deselect(mcfqspi,
- spi->chip_select,
- cs_high);
- }
- msg->actual_length += xfer->len;
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+ struct spi_transfer *t;
+ int status = 0;
+
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ bool cs_high = spi->mode & SPI_CS_HIGH;
+ u16 qmr = MCFQSPI_QMR_MSTR;
+
+ if (t->bits_per_word)
+ qmr |= t->bits_per_word << 10;
+ else
+ qmr |= spi->bits_per_word << 10;
+ if (spi->mode & SPI_CPHA)
+ qmr |= MCFQSPI_QMR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ qmr |= MCFQSPI_QMR_CPOL;
+ if (t->speed_hz)
+ qmr |= mcfqspi_qmr_baud(t->speed_hz);
+ else
+ qmr |= mcfqspi_qmr_baud(spi->max_speed_hz);
+ mcfqspi_wr_qmr(mcfqspi, qmr);
+
+ mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high);
+
+ mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
+ if ((t->bits_per_word ? t->bits_per_word :
+ spi->bits_per_word) == 8)
+ mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf,
+ t->rx_buf);
+ else
+ mcfqspi_transfer_msg16(mcfqspi, t->len / 2, t->tx_buf,
+ t->rx_buf);
+ mcfqspi_wr_qir(mcfqspi, 0);
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+ if (t->cs_change) {
+ if (!list_is_last(&t->transfer_list, &msg->transfers))
+ mcfqspi_cs_deselect(mcfqspi, spi->chip_select,
+ cs_high);
+ } else {
+ if (list_is_last(&t->transfer_list, &msg->transfers))
+ mcfqspi_cs_deselect(mcfqspi, spi->chip_select,
+ cs_high);
}
- msg->status = status;
- msg->complete(msg->context);
-
- spin_lock_irqsave(&mcfqspi->lock, flags);
+ msg->actual_length += t->len;
}
- spin_unlock_irqrestore(&mcfqspi->lock, flags);
+ msg->status = status;
+ spi_finalize_current_message(master);
+
+ return status;
+
}
-static int mcfqspi_transfer(struct spi_device *spi, struct spi_message *msg)
+static int mcfqspi_prepare_transfer_hw(struct spi_master *master)
{
- struct mcfqspi *mcfqspi;
- struct spi_transfer *xfer;
- unsigned long flags;
-
- mcfqspi = spi_master_get_devdata(spi->master);
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (xfer->bits_per_word && ((xfer->bits_per_word < 8)
- || (xfer->bits_per_word > 16))) {
- dev_dbg(&spi->dev,
- "%d bits per word is not supported\n",
- xfer->bits_per_word);
- goto fail;
- }
- if (xfer->speed_hz) {
- u32 real_speed = MCFQSPI_BUSCLK /
- mcfqspi_qmr_baud(xfer->speed_hz);
- if (real_speed != xfer->speed_hz)
- dev_dbg(&spi->dev,
- "using speed %d instead of %d\n",
- real_speed, xfer->speed_hz);
- }
- }
- msg->status = -EINPROGRESS;
- msg->actual_length = 0;
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
- spin_lock_irqsave(&mcfqspi->lock, flags);
- list_add_tail(&msg->queue, &mcfqspi->msgq);
- queue_work(mcfqspi->workq, &mcfqspi->work);
- spin_unlock_irqrestore(&mcfqspi->lock, flags);
+ pm_runtime_get_sync(mcfqspi->dev);
+
+ return 0;
+}
+
+static int mcfqspi_unprepare_transfer_hw(struct spi_master *master)
+{
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+
+ pm_runtime_put_sync(mcfqspi->dev);
return 0;
-fail:
- msg->status = -EINVAL;
- return -EINVAL;
}
static int mcfqspi_setup(struct spi_device *spi)
@@ -502,21 +459,10 @@ static int __devinit mcfqspi_probe(struct platform_device *pdev)
}
clk_enable(mcfqspi->clk);
- mcfqspi->workq = create_singlethread_workqueue(dev_name(master->dev.parent));
- if (!mcfqspi->workq) {
- dev_dbg(&pdev->dev, "create_workqueue failed\n");
- status = -ENOMEM;
- goto fail4;
- }
- INIT_WORK(&mcfqspi->work, mcfqspi_work);
- spin_lock_init(&mcfqspi->lock);
- INIT_LIST_HEAD(&mcfqspi->msgq);
- init_waitqueue_head(&mcfqspi->waitq);
-
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_dbg(&pdev->dev, "platform data is missing\n");
- goto fail5;
+ goto fail4;
}
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->num_chipselect;
@@ -525,28 +471,33 @@ static int __devinit mcfqspi_probe(struct platform_device *pdev)
status = mcfqspi_cs_setup(mcfqspi);
if (status) {
dev_dbg(&pdev->dev, "error initializing cs_control\n");
- goto fail5;
+ goto fail4;
}
+ init_waitqueue_head(&mcfqspi->waitq);
+ mcfqspi->dev = &pdev->dev;
+
master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
master->setup = mcfqspi_setup;
- master->transfer = mcfqspi_transfer;
+ master->transfer_one_message = mcfqspi_transfer_one_message;
+ master->prepare_transfer_hardware = mcfqspi_prepare_transfer_hw;
+ master->unprepare_transfer_hardware = mcfqspi_unprepare_transfer_hw;
platform_set_drvdata(pdev, master);
status = spi_register_master(master);
if (status) {
dev_dbg(&pdev->dev, "spi_register_master failed\n");
- goto fail6;
+ goto fail5;
}
+ pm_runtime_enable(mcfqspi->dev);
+
dev_info(&pdev->dev, "Coldfire QSPI bus driver\n");
return 0;
-fail6:
- mcfqspi_cs_teardown(mcfqspi);
fail5:
- destroy_workqueue(mcfqspi->workq);
+ mcfqspi_cs_teardown(mcfqspi);
fail4:
clk_disable(mcfqspi->clk);
clk_put(mcfqspi->clk);
@@ -570,12 +521,12 @@ static int __devexit mcfqspi_remove(struct platform_device *pdev)
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pm_runtime_disable(mcfqspi->dev);
/* disable the hardware (set the baud rate to 0) */
mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
platform_set_drvdata(pdev, NULL);
mcfqspi_cs_teardown(mcfqspi);
- destroy_workqueue(mcfqspi->workq);
clk_disable(mcfqspi->clk);
clk_put(mcfqspi->clk);
free_irq(mcfqspi->irq, mcfqspi);
@@ -587,11 +538,13 @@ static int __devexit mcfqspi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-
+#ifdef CONFIG_PM_SLEEP
static int mcfqspi_suspend(struct device *dev)
{
- struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
+ struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
clk_disable(mcfqspi->clk);
@@ -600,27 +553,47 @@ static int mcfqspi_suspend(struct device *dev)
static int mcfqspi_resume(struct device *dev)
{
- struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
+ struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+
+ spi_master_resume(master);
clk_enable(mcfqspi->clk);
return 0;
}
+#endif
-static struct dev_pm_ops mcfqspi_dev_pm_ops = {
- .suspend = mcfqspi_suspend,
- .resume = mcfqspi_resume,
-};
+#ifdef CONFIG_PM_RUNTIME
+static int mcfqspi_runtime_suspend(struct device *dev)
+{
+ struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
-#define MCFQSPI_DEV_PM_OPS (&mcfqspi_dev_pm_ops)
-#else
-#define MCFQSPI_DEV_PM_OPS NULL
+ clk_disable(mcfqspi->clk);
+
+ return 0;
+}
+
+static int mcfqspi_runtime_resume(struct device *dev)
+{
+ struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
+
+ clk_enable(mcfqspi->clk);
+
+ return 0;
+}
#endif
+static const struct dev_pm_ops mcfqspi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(mcfqspi_suspend, mcfqspi_resume)
+ SET_RUNTIME_PM_OPS(mcfqspi_runtime_suspend, mcfqspi_runtime_resume,
+ NULL)
+};
+
static struct platform_driver mcfqspi_driver = {
.driver.name = DRIVER_NAME,
.driver.owner = THIS_MODULE,
- .driver.pm = MCFQSPI_DEV_PM_OPS,
+ .driver.pm = &mcfqspi_pm,
.probe = mcfqspi_probe,
.remove = __devexit_p(mcfqspi_remove),
};
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 14f7cc9523f0..ff81abbb3066 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -164,18 +164,7 @@ static struct pci_driver dw_spi_driver = {
.resume = spi_resume,
};
-static int __init mrst_spi_init(void)
-{
- return pci_register_driver(&dw_spi_driver);
-}
-
-static void __exit mrst_spi_exit(void)
-{
- pci_unregister_driver(&dw_spi_driver);
-}
-
-module_init(mrst_spi_init);
-module_exit(mrst_spi_exit);
+module_pci_driver(dw_spi_driver);
MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
MODULE_DESCRIPTION("PCI interface driver for DW SPI Core");
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index e8055073e84d..f97f1d248800 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -76,7 +76,6 @@
* @clk: clock for the controller
* @regs_base: pointer to ioremap()'d registers
* @sspdr_phys: physical address of the SSPDR register
- * @irq: IRQ number used by the driver
* @min_rate: minimum clock rate (in Hz) supported by the controller
* @max_rate: maximum clock rate (in Hz) supported by the controller
* @running: is the queue running
@@ -114,7 +113,6 @@ struct ep93xx_spi {
struct clk *clk;
void __iomem *regs_base;
unsigned long sspdr_phys;
- int irq;
unsigned long min_rate;
unsigned long max_rate;
bool running;
@@ -1031,6 +1029,7 @@ static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
struct ep93xx_spi_info *info;
struct ep93xx_spi *espi;
struct resource *res;
+ int irq;
int error;
info = pdev->dev.platform_data;
@@ -1070,8 +1069,8 @@ static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
espi->pdev = pdev;
- espi->irq = platform_get_irq(pdev, 0);
- if (espi->irq < 0) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
error = -EBUSY;
dev_err(&pdev->dev, "failed to get irq resources\n");
goto fail_put_clock;
@@ -1084,26 +1083,20 @@ static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
goto fail_put_clock;
}
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!res) {
- dev_err(&pdev->dev, "unable to request iomem resources\n");
- error = -EBUSY;
- goto fail_put_clock;
- }
-
espi->sspdr_phys = res->start + SSPDR;
- espi->regs_base = ioremap(res->start, resource_size(res));
+
+ espi->regs_base = devm_request_and_ioremap(&pdev->dev, res);
if (!espi->regs_base) {
dev_err(&pdev->dev, "failed to map resources\n");
error = -ENODEV;
- goto fail_free_mem;
+ goto fail_put_clock;
}
- error = request_irq(espi->irq, ep93xx_spi_interrupt, 0,
- "ep93xx-spi", espi);
+ error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
+ 0, "ep93xx-spi", espi);
if (error) {
dev_err(&pdev->dev, "failed to request irq\n");
- goto fail_unmap_regs;
+ goto fail_put_clock;
}
if (info->use_dma && ep93xx_spi_setup_dma(espi))
@@ -1128,7 +1121,7 @@ static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
- (unsigned long)res->start, espi->irq);
+ (unsigned long)res->start, irq);
return 0;
@@ -1136,11 +1129,6 @@ fail_free_queue:
destroy_workqueue(espi->wq);
fail_free_dma:
ep93xx_spi_release_dma(espi);
- free_irq(espi->irq, espi);
-fail_unmap_regs:
- iounmap(espi->regs_base);
-fail_free_mem:
- release_mem_region(res->start, resource_size(res));
fail_put_clock:
clk_put(espi->clk);
fail_release_master:
@@ -1154,7 +1142,6 @@ static int __devexit ep93xx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct ep93xx_spi *espi = spi_master_get_devdata(master);
- struct resource *res;
spin_lock_irq(&espi->lock);
espi->running = false;
@@ -1180,10 +1167,6 @@ static int __devexit ep93xx_spi_remove(struct platform_device *pdev)
spin_unlock_irq(&espi->lock);
ep93xx_spi_release_dma(espi);
- free_irq(espi->irq, espi);
- iounmap(espi->regs_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
clk_put(espi->clk);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 7523a2429d09..27bdc47b5250 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -17,7 +17,6 @@
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_spi.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <sysdev/fsl_soc.h>
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 2674fad7f68a..1503574b215a 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -22,7 +22,7 @@
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/of_platform.h>
-#include <linux/of_spi.h>
+#include <linux/spi/spi.h>
#include <sysdev/fsl_soc.h>
#include "spi-fsl-lib.h"
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 5f748c0d96bd..6a62934ca74c 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -933,7 +933,7 @@ err:
static void fsl_spi_cs_control(struct spi_device *spi, bool on)
{
- struct device *dev = spi->dev.parent;
+ struct device *dev = spi->dev.parent->parent;
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data);
u16 cs = spi->chip_select;
int gpio = pinfo->gpios[cs];
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 69c9a6601f45..47877d687614 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -86,7 +86,8 @@ struct spi_imx_data {
struct completion xfer_done;
void __iomem *base;
int irq;
- struct clk *clk;
+ struct clk *clk_per;
+ struct clk *clk_ipg;
unsigned long spi_clk;
unsigned int count;
@@ -853,15 +854,22 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
goto out_free_irq;
}
- spi_imx->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(spi_imx->clk)) {
- dev_err(&pdev->dev, "unable to get clock\n");
- ret = PTR_ERR(spi_imx->clk);
+ spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(spi_imx->clk_ipg)) {
+ ret = PTR_ERR(spi_imx->clk_ipg);
goto out_free_irq;
}
- clk_enable(spi_imx->clk);
- spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
+ spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(spi_imx->clk_per)) {
+ ret = PTR_ERR(spi_imx->clk_per);
+ goto out_free_irq;
+ }
+
+ clk_prepare_enable(spi_imx->clk_per);
+ clk_prepare_enable(spi_imx->clk_ipg);
+
+ spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
spi_imx->devtype_data->reset(spi_imx);
@@ -879,8 +887,8 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
return ret;
out_clk_put:
- clk_disable(spi_imx->clk);
- clk_put(spi_imx->clk);
+ clk_disable_unprepare(spi_imx->clk_per);
+ clk_disable_unprepare(spi_imx->clk_ipg);
out_free_irq:
free_irq(spi_imx->irq, spi_imx);
out_iounmap:
@@ -908,8 +916,8 @@ static int __devexit spi_imx_remove(struct platform_device *pdev)
spi_bitbang_stop(&spi_imx->bitbang);
writel(0, spi_imx->base + MXC_CSPICTRL);
- clk_disable(spi_imx->clk);
- clk_put(spi_imx->clk);
+ clk_disable_unprepare(spi_imx->clk_per);
+ clk_disable_unprepare(spi_imx->clk_ipg);
free_irq(spi_imx->irq, spi_imx);
iounmap(spi_imx->base);
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index 933eb9d9ddd4..0759b5db9883 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -219,9 +219,6 @@ static void spi_lm70llp_attach(struct parport *p)
}
pp = spi_master_get_devdata(master);
- master->bus_num = -1; /* dynamic alloc of a bus number */
- master->num_chipselect = 1;
-
/*
* SPI and bitbang hookup.
*/
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 57633d963456..cb3a3830b0a5 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -433,7 +433,6 @@ static int __devinit mpc52xx_spi_probe(struct platform_device *op)
goto err_alloc;
}
- master->bus_num = -1;
master->setup = mpc52xx_spi_setup;
master->transfer = mpc52xx_spi_transfer;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
@@ -479,8 +478,6 @@ static int __devinit mpc52xx_spi_probe(struct platform_device *op)
gpio_direction_output(gpio_cs, 1);
ms->gpio_cs[i] = gpio_cs;
}
- } else {
- master->num_chipselect = 1;
}
spin_lock_init(&ms->lock);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index bb9274c2526d..46ef5fe51db5 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -44,9 +44,7 @@
#include <plat/mcspi.h>
#define OMAP2_MCSPI_MAX_FREQ 48000000
-
-/* OMAP2 has 3 SPI controllers, while OMAP3 has 4 */
-#define OMAP2_MCSPI_MAX_CTRL 4
+#define SPI_AUTOSUSPEND_TIMEOUT 2000
#define OMAP2_MCSPI_REVISION 0x00
#define OMAP2_MCSPI_SYSSTATUS 0x14
@@ -111,19 +109,25 @@ struct omap2_mcspi_dma {
#define DMA_MIN_BYTES 160
+/*
+ * Used for context save and restore, structure members to be updated whenever
+ * corresponding registers are modified.
+ */
+struct omap2_mcspi_regs {
+ u32 modulctrl;
+ u32 wakeupenable;
+ struct list_head cs;
+};
+
struct omap2_mcspi {
- struct work_struct work;
- /* lock protects queue and registers */
- spinlock_t lock;
- struct list_head msg_queue;
struct spi_master *master;
/* Virtual base address of the controller */
void __iomem *base;
unsigned long phys;
/* SPI1 has 4 channels, while SPI2 has 2 */
struct omap2_mcspi_dma *dma_channels;
- struct device *dev;
- struct workqueue_struct *wq;
+ struct device *dev;
+ struct omap2_mcspi_regs ctx;
};
struct omap2_mcspi_cs {
@@ -135,17 +139,6 @@ struct omap2_mcspi_cs {
u32 chconf0;
};
-/* used for context save and restore, structure members to be updated whenever
- * corresponding registers are modified.
- */
-struct omap2_mcspi_regs {
- u32 modulctrl;
- u32 wakeupenable;
- struct list_head cs;
-};
-
-static struct omap2_mcspi_regs omap2_mcspi_ctx[OMAP2_MCSPI_MAX_CTRL];
-
#define MOD_REG_BIT(val, mask, set) do { \
if (set) \
val |= mask; \
@@ -236,9 +229,12 @@ static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
static void omap2_mcspi_set_master_mode(struct spi_master *master)
{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
u32 l;
- /* setup when switching from (reset default) slave mode
+ /*
+ * Setup when switching from (reset default) slave mode
* to single-channel master mode
*/
l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
@@ -247,29 +243,26 @@ static void omap2_mcspi_set_master_mode(struct spi_master *master)
MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1);
mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
- omap2_mcspi_ctx[master->bus_num - 1].modulctrl = l;
+ ctx->modulctrl = l;
}
static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
{
- struct spi_master *spi_cntrl;
- struct omap2_mcspi_cs *cs;
- spi_cntrl = mcspi->master;
+ struct spi_master *spi_cntrl = mcspi->master;
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs;
/* McSPI: context restore */
- mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL,
- omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl);
-
- mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE,
- omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable);
+ mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
+ mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
- list_for_each_entry(cs, &omap2_mcspi_ctx[spi_cntrl->bus_num - 1].cs,
- node)
+ list_for_each_entry(cs, &ctx->cs, node)
__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
}
static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi)
{
- pm_runtime_put_sync(mcspi->dev);
+ pm_runtime_mark_last_busy(mcspi->dev);
+ pm_runtime_put_autosuspend(mcspi->dev);
}
static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
@@ -277,6 +270,23 @@ static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
return pm_runtime_get_sync(mcspi->dev);
}
+static int omap2_prepare_transfer(struct spi_master *master)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+
+ pm_runtime_get_sync(mcspi->dev);
+ return 0;
+}
+
+static int omap2_unprepare_transfer(struct spi_master *master)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+
+ pm_runtime_mark_last_busy(mcspi->dev);
+ pm_runtime_put_autosuspend(mcspi->dev);
+ return 0;
+}
+
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
{
unsigned long timeout;
@@ -777,7 +787,8 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
static int omap2_mcspi_setup(struct spi_device *spi)
{
int ret;
- struct omap2_mcspi *mcspi;
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
struct omap2_mcspi_dma *mcspi_dma;
struct omap2_mcspi_cs *cs = spi->controller_state;
@@ -787,11 +798,10 @@ static int omap2_mcspi_setup(struct spi_device *spi)
return -EINVAL;
}
- mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
if (!cs) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = devm_kzalloc(&spi->dev , sizeof *cs, GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->base = mcspi->base + spi->chip_select * 0x14;
@@ -799,8 +809,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
cs->chconf0 = 0;
spi->controller_state = cs;
/* Link this to context save list */
- list_add_tail(&cs->node,
- &omap2_mcspi_ctx[mcspi->master->bus_num - 1].cs);
+ list_add_tail(&cs->node, &ctx->cs);
}
if (mcspi_dma->dma_rx_channel == -1
@@ -833,7 +842,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
cs = spi->controller_state;
list_del(&cs->node);
- kfree(spi->controller_state);
}
if (spi->chip_select < spi->master->num_chipselect) {
@@ -850,144 +858,122 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
}
}
-static void omap2_mcspi_work(struct work_struct *work)
+static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
{
- struct omap2_mcspi *mcspi;
-
- mcspi = container_of(work, struct omap2_mcspi, work);
-
- if (omap2_mcspi_enable_clocks(mcspi) < 0)
- return;
-
- spin_lock_irq(&mcspi->lock);
/* We only enable one channel at a time -- the one whose message is
- * at the head of the queue -- although this controller would gladly
+ * -- although this controller would gladly
* arbitrate among multiple channels. This corresponds to "single
* channel" master mode. As a side effect, we need to manage the
* chipselect with the FORCE bit ... CS != channel enable.
*/
- while (!list_empty(&mcspi->msg_queue)) {
- struct spi_message *m;
- struct spi_device *spi;
- struct spi_transfer *t = NULL;
- int cs_active = 0;
- struct omap2_mcspi_cs *cs;
- struct omap2_mcspi_device_config *cd;
- int par_override = 0;
- int status = 0;
- u32 chconf;
-
- m = container_of(mcspi->msg_queue.next, struct spi_message,
- queue);
-
- list_del_init(&m->queue);
- spin_unlock_irq(&mcspi->lock);
-
- spi = m->spi;
- cs = spi->controller_state;
- cd = spi->controller_data;
- omap2_mcspi_set_enable(spi, 1);
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
- status = -EINVAL;
- break;
- }
- if (par_override || t->speed_hz || t->bits_per_word) {
- par_override = 1;
- status = omap2_mcspi_setup_transfer(spi, t);
- if (status < 0)
- break;
- if (!t->speed_hz && !t->bits_per_word)
- par_override = 0;
- }
+ struct spi_device *spi;
+ struct spi_transfer *t = NULL;
+ int cs_active = 0;
+ struct omap2_mcspi_cs *cs;
+ struct omap2_mcspi_device_config *cd;
+ int par_override = 0;
+ int status = 0;
+ u32 chconf;
- if (!cs_active) {
- omap2_mcspi_force_cs(spi, 1);
- cs_active = 1;
- }
+ spi = m->spi;
+ cs = spi->controller_state;
+ cd = spi->controller_data;
- chconf = mcspi_cached_chconf0(spi);
- chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
- chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
+ omap2_mcspi_set_enable(spi, 1);
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
+ status = -EINVAL;
+ break;
+ }
+ if (par_override || t->speed_hz || t->bits_per_word) {
+ par_override = 1;
+ status = omap2_mcspi_setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ if (!t->speed_hz && !t->bits_per_word)
+ par_override = 0;
+ }
- if (t->tx_buf == NULL)
- chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
- else if (t->rx_buf == NULL)
- chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
-
- if (cd && cd->turbo_mode && t->tx_buf == NULL) {
- /* Turbo mode is for more than one word */
- if (t->len > ((cs->word_len + 7) >> 3))
- chconf |= OMAP2_MCSPI_CHCONF_TURBO;
- }
+ if (!cs_active) {
+ omap2_mcspi_force_cs(spi, 1);
+ cs_active = 1;
+ }
- mcspi_write_chconf0(spi, chconf);
+ chconf = mcspi_cached_chconf0(spi);
+ chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
+ chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
- if (t->len) {
- unsigned count;
+ if (t->tx_buf == NULL)
+ chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
+ else if (t->rx_buf == NULL)
+ chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
- /* RX_ONLY mode needs dummy data in TX reg */
- if (t->tx_buf == NULL)
- __raw_writel(0, cs->base
- + OMAP2_MCSPI_TX0);
+ if (cd && cd->turbo_mode && t->tx_buf == NULL) {
+ /* Turbo mode is for more than one word */
+ if (t->len > ((cs->word_len + 7) >> 3))
+ chconf |= OMAP2_MCSPI_CHCONF_TURBO;
+ }
- if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
- count = omap2_mcspi_txrx_dma(spi, t);
- else
- count = omap2_mcspi_txrx_pio(spi, t);
- m->actual_length += count;
+ mcspi_write_chconf0(spi, chconf);
- if (count != t->len) {
- status = -EIO;
- break;
- }
- }
+ if (t->len) {
+ unsigned count;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ /* RX_ONLY mode needs dummy data in TX reg */
+ if (t->tx_buf == NULL)
+ __raw_writel(0, cs->base
+ + OMAP2_MCSPI_TX0);
+
+ if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
+ count = omap2_mcspi_txrx_dma(spi, t);
+ else
+ count = omap2_mcspi_txrx_pio(spi, t);
+ m->actual_length += count;
- /* ignore the "leave it on after last xfer" hint */
- if (t->cs_change) {
- omap2_mcspi_force_cs(spi, 0);
- cs_active = 0;
+ if (count != t->len) {
+ status = -EIO;
+ break;
}
}
- /* Restore defaults if they were overriden */
- if (par_override) {
- par_override = 0;
- status = omap2_mcspi_setup_transfer(spi, NULL);
- }
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
- if (cs_active)
+ /* ignore the "leave it on after last xfer" hint */
+ if (t->cs_change) {
omap2_mcspi_force_cs(spi, 0);
+ cs_active = 0;
+ }
+ }
+ /* Restore defaults if they were overriden */
+ if (par_override) {
+ par_override = 0;
+ status = omap2_mcspi_setup_transfer(spi, NULL);
+ }
- omap2_mcspi_set_enable(spi, 0);
-
- m->status = status;
- m->complete(m->context);
+ if (cs_active)
+ omap2_mcspi_force_cs(spi, 0);
- spin_lock_irq(&mcspi->lock);
- }
+ omap2_mcspi_set_enable(spi, 0);
- spin_unlock_irq(&mcspi->lock);
+ m->status = status;
- omap2_mcspi_disable_clocks(mcspi);
}
-static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
+static int omap2_mcspi_transfer_one_message(struct spi_master *master,
+ struct spi_message *m)
{
struct omap2_mcspi *mcspi;
- unsigned long flags;
struct spi_transfer *t;
+ mcspi = spi_master_get_devdata(master);
m->actual_length = 0;
m->status = 0;
/* reject invalid messages and transfers */
- if (list_empty(&m->transfers) || !m->complete)
+ if (list_empty(&m->transfers))
return -EINVAL;
list_for_each_entry(t, &m->transfers, transfer_list) {
const void *tx_buf = t->tx_buf;
@@ -999,7 +985,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
|| (t->bits_per_word &&
( t->bits_per_word < 4
|| t->bits_per_word > 32))) {
- dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
+ dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
t->speed_hz,
len,
tx_buf ? "tx" : "",
@@ -1008,7 +994,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
return -EINVAL;
}
if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
- dev_dbg(&spi->dev, "speed_hz %d below minimum %d Hz\n",
+ dev_dbg(mcspi->dev, "speed_hz %d below minimum %d Hz\n",
t->speed_hz,
OMAP2_MCSPI_MAX_FREQ >> 15);
return -EINVAL;
@@ -1018,51 +1004,46 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
continue;
if (tx_buf != NULL) {
- t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
+ t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
len, DMA_TO_DEVICE);
- if (dma_mapping_error(&spi->dev, t->tx_dma)) {
- dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
+ if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
+ dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
'T', len);
return -EINVAL;
}
}
if (rx_buf != NULL) {
- t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len,
+ t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&spi->dev, t->rx_dma)) {
- dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
+ if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
+ dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
'R', len);
if (tx_buf != NULL)
- dma_unmap_single(&spi->dev, t->tx_dma,
+ dma_unmap_single(mcspi->dev, t->tx_dma,
len, DMA_TO_DEVICE);
return -EINVAL;
}
}
}
- mcspi = spi_master_get_devdata(spi->master);
-
- spin_lock_irqsave(&mcspi->lock, flags);
- list_add_tail(&m->queue, &mcspi->msg_queue);
- queue_work(mcspi->wq, &mcspi->work);
- spin_unlock_irqrestore(&mcspi->lock, flags);
-
+ omap2_mcspi_work(mcspi, m);
+ spi_finalize_current_message(master);
return 0;
}
static int __init omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
{
struct spi_master *master = mcspi->master;
- u32 tmp;
- int ret = 0;
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ int ret = 0;
ret = omap2_mcspi_enable_clocks(mcspi);
if (ret < 0)
return ret;
- tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
- mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp);
- omap2_mcspi_ctx[master->bus_num - 1].wakeupenable = tmp;
+ mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
+ OMAP2_MCSPI_WAKEUPENABLE_WKEN);
+ ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
omap2_mcspi_set_master_mode(master);
omap2_mcspi_disable_clocks(mcspi);
@@ -1102,14 +1083,13 @@ static const struct of_device_id omap_mcspi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
-static int __init omap2_mcspi_probe(struct platform_device *pdev)
+static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct omap2_mcspi_platform_config *pdata;
struct omap2_mcspi *mcspi;
struct resource *r;
int status = 0, i;
- char wq_name[20];
u32 regs_offset = 0;
static int bus_num = 1;
struct device_node *node = pdev->dev.of_node;
@@ -1125,7 +1105,9 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = omap2_mcspi_setup;
- master->transfer = omap2_mcspi_transfer;
+ master->prepare_transfer_hardware = omap2_prepare_transfer;
+ master->unprepare_transfer_hardware = omap2_unprepare_transfer;
+ master->transfer_one_message = omap2_mcspi_transfer_one_message;
master->cleanup = omap2_mcspi_cleanup;
master->dev.of_node = node;
@@ -1150,13 +1132,6 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
mcspi = spi_master_get_devdata(master);
mcspi->master = master;
- sprintf(wq_name, "omap2_mcspi/%d", master->bus_num);
- mcspi->wq = alloc_workqueue(wq_name, WQ_MEM_RECLAIM, 1);
- if (mcspi->wq == NULL) {
- status = -ENOMEM;
- goto free_master;
- }
-
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
status = -ENODEV;
@@ -1166,32 +1141,24 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
r->start += regs_offset;
r->end += regs_offset;
mcspi->phys = r->start;
- if (!request_mem_region(r->start, resource_size(r),
- dev_name(&pdev->dev))) {
- status = -EBUSY;
- goto free_master;
- }
- mcspi->base = ioremap(r->start, resource_size(r));
+ mcspi->base = devm_request_and_ioremap(&pdev->dev, r);
if (!mcspi->base) {
dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
status = -ENOMEM;
- goto release_region;
+ goto free_master;
}
mcspi->dev = &pdev->dev;
- INIT_WORK(&mcspi->work, omap2_mcspi_work);
- spin_lock_init(&mcspi->lock);
- INIT_LIST_HEAD(&mcspi->msg_queue);
- INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs);
+ INIT_LIST_HEAD(&mcspi->ctx.cs);
mcspi->dma_channels = kcalloc(master->num_chipselect,
sizeof(struct omap2_mcspi_dma),
GFP_KERNEL);
if (mcspi->dma_channels == NULL)
- goto unmap_io;
+ goto free_master;
for (i = 0; i < master->num_chipselect; i++) {
char dma_ch_name[14];
@@ -1224,6 +1191,8 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
if (status < 0)
goto dma_chnl_free;
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_enable(&pdev->dev);
if (status || omap2_mcspi_master_setup(mcspi) < 0)
@@ -1241,23 +1210,17 @@ disable_pm:
pm_runtime_disable(&pdev->dev);
dma_chnl_free:
kfree(mcspi->dma_channels);
-unmap_io:
- iounmap(mcspi->base);
-release_region:
- release_mem_region(r->start, resource_size(r));
free_master:
kfree(master);
platform_set_drvdata(pdev, NULL);
return status;
}
-static int __exit omap2_mcspi_remove(struct platform_device *pdev)
+static int __devexit omap2_mcspi_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *dma_channels;
- struct resource *r;
- void __iomem *base;
master = dev_get_drvdata(&pdev->dev);
mcspi = spi_master_get_devdata(master);
@@ -1265,14 +1228,9 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
omap2_mcspi_disable_clocks(mcspi);
pm_runtime_disable(&pdev->dev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, resource_size(r));
- base = mcspi->base;
spi_unregister_master(master);
- iounmap(base);
kfree(dma_channels);
- destroy_workqueue(mcspi->wq);
platform_set_drvdata(pdev, NULL);
return 0;
@@ -1291,13 +1249,12 @@ static int omap2_mcspi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
- struct omap2_mcspi_cs *cs;
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs;
omap2_mcspi_enable_clocks(mcspi);
- list_for_each_entry(cs, &omap2_mcspi_ctx[master->bus_num - 1].cs,
- node) {
+ list_for_each_entry(cs, &ctx->cs, node) {
if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
-
/*
* We need to toggle CS state for OMAP take this
* change in account.
@@ -1327,21 +1284,9 @@ static struct platform_driver omap2_mcspi_driver = {
.pm = &omap2_mcspi_pm_ops,
.of_match_table = omap_mcspi_of_match,
},
- .remove = __exit_p(omap2_mcspi_remove),
+ .probe = omap2_mcspi_probe,
+ .remove = __devexit_p(omap2_mcspi_remove),
};
-
-static int __init omap2_mcspi_init(void)
-{
- return platform_driver_probe(&omap2_mcspi_driver, omap2_mcspi_probe);
-}
-subsys_initcall(omap2_mcspi_init);
-
-static void __exit omap2_mcspi_exit(void)
-{
- platform_driver_unregister(&omap2_mcspi_driver);
-
-}
-module_exit(omap2_mcspi_exit);
-
+module_platform_driver(omap2_mcspi_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index e496f799b7a9..dfd04e91fa6d 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -16,8 +16,8 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <linux/module.h>
+#include <linux/clk.h>
#include <asm/unaligned.h>
#define DRIVER_NAME "orion_spi"
@@ -46,6 +46,7 @@ struct orion_spi {
unsigned int max_speed;
unsigned int min_speed;
struct orion_spi_info *spi_info;
+ struct clk *clk;
};
static struct workqueue_struct *orion_spi_wq;
@@ -104,7 +105,7 @@ static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
orion_spi = spi_master_get_devdata(spi->master);
- tclk_hz = orion_spi->spi_info->tclk;
+ tclk_hz = clk_get_rate(orion_spi->clk);
/*
* the supported rates are: 4,6,8...30
@@ -450,6 +451,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
struct orion_spi *spi;
struct resource *r;
struct orion_spi_info *spi_info;
+ unsigned long tclk_hz;
int status = 0;
spi_info = pdev->dev.platform_data;
@@ -476,19 +478,28 @@ static int __init orion_spi_probe(struct platform_device *pdev)
spi->master = master;
spi->spi_info = spi_info;
- spi->max_speed = DIV_ROUND_UP(spi_info->tclk, 4);
- spi->min_speed = DIV_ROUND_UP(spi_info->tclk, 30);
+ spi->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(spi->clk)) {
+ status = PTR_ERR(spi->clk);
+ goto out;
+ }
+
+ clk_prepare(spi->clk);
+ clk_enable(spi->clk);
+ tclk_hz = clk_get_rate(spi->clk);
+ spi->max_speed = DIV_ROUND_UP(tclk_hz, 4);
+ spi->min_speed = DIV_ROUND_UP(tclk_hz, 30);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
status = -ENODEV;
- goto out;
+ goto out_rel_clk;
}
if (!request_mem_region(r->start, resource_size(r),
dev_name(&pdev->dev))) {
status = -EBUSY;
- goto out;
+ goto out_rel_clk;
}
spi->base = ioremap(r->start, SZ_1K);
@@ -508,7 +519,9 @@ static int __init orion_spi_probe(struct platform_device *pdev)
out_rel_mem:
release_mem_region(r->start, resource_size(r));
-
+out_rel_clk:
+ clk_disable_unprepare(spi->clk);
+ clk_put(spi->clk);
out:
spi_master_put(master);
return status;
@@ -526,6 +539,9 @@ static int __exit orion_spi_remove(struct platform_device *pdev)
cancel_work_sync(&spi->work);
+ clk_disable_unprepare(spi->clk);
+ clk_put(spi->clk);
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(r->start, resource_size(r));
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 98ec53285fc7..75ac9d48ef46 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -30,7 +30,6 @@
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/of_platform.h>
-#include <linux/of_spi.h>
#include <linux/of_gpio.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -467,9 +466,6 @@ static int __init spi_ppc4xx_of_probe(struct platform_device *op)
bbp->master->setup = spi_ppc4xx_setup;
bbp->master->cleanup = spi_ppc4xx_cleanup;
- /* Allocate bus num dynamically. */
- bbp->master->bus_num = -1;
-
/* the spi->mode bits understood by this driver: */
bbp->master->mode_bits =
SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST;
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 3fb44afe27b4..9f6ba34b172c 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -164,17 +164,7 @@ static struct pci_driver ce4100_spi_driver = {
.remove = __devexit_p(ce4100_spi_remove),
};
-static int __init ce4100_spi_init(void)
-{
- return pci_register_driver(&ce4100_spi_driver);
-}
-module_init(ce4100_spi_init);
-
-static void __exit ce4100_spi_exit(void)
-{
- pci_unregister_driver(&ce4100_spi_driver);
-}
-module_exit(ce4100_spi_exit);
+module_pci_driver(ce4100_spi_driver);
MODULE_DESCRIPTION("CE4100 PCI-SPI glue code for PXA's driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 354f170eab95..4894bde4bbff 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -31,7 +31,11 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/sh_dma.h>
#include <linux/spi/spi.h>
+#include <linux/spi/rspi.h>
#define RSPI_SPCR 0x00
#define RSPI_SSLP 0x01
@@ -141,6 +145,16 @@ struct rspi_data {
spinlock_t lock;
struct clk *clk;
unsigned char spsr;
+
+ /* for dmaengine */
+ struct sh_dmae_slave dma_tx;
+ struct sh_dmae_slave dma_rx;
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+ int irq;
+
+ unsigned dma_width_16bit:1;
+ unsigned dma_callbacked:1;
};
static void rspi_write8(struct rspi_data *rspi, u8 data, u16 offset)
@@ -265,11 +279,125 @@ static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
return 0;
}
-static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t)
+static void rspi_dma_complete(void *arg)
+{
+ struct rspi_data *rspi = arg;
+
+ rspi->dma_callbacked = 1;
+ wake_up_interruptible(&rspi->wait);
+}
+
+static int rspi_dma_map_sg(struct scatterlist *sg, void *buf, unsigned len,
+ struct dma_chan *chan,
+ enum dma_transfer_direction dir)
+{
+ sg_init_table(sg, 1);
+ sg_set_buf(sg, buf, len);
+ sg_dma_len(sg) = len;
+ return dma_map_sg(chan->device->dev, sg, 1, dir);
+}
+
+static void rspi_dma_unmap_sg(struct scatterlist *sg, struct dma_chan *chan,
+ enum dma_transfer_direction dir)
+{
+ dma_unmap_sg(chan->device->dev, sg, 1, dir);
+}
+
+static void rspi_memory_to_8bit(void *buf, const void *data, unsigned len)
+{
+ u16 *dst = buf;
+ const u8 *src = data;
+
+ while (len) {
+ *dst++ = (u16)(*src++);
+ len--;
+ }
+}
+
+static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len)
+{
+ u8 *dst = buf;
+ const u16 *src = data;
+
+ while (len) {
+ *dst++ = (u8)*src++;
+ len--;
+ }
+}
+
+static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
+{
+ struct scatterlist sg;
+ void *buf = NULL;
+ struct dma_async_tx_descriptor *desc;
+ unsigned len;
+ int ret = 0;
+
+ if (rspi->dma_width_16bit) {
+ /*
+ * If DMAC bus width is 16-bit, the driver allocates a dummy
+ * buffer. And, the driver converts original data into the
+ * DMAC data as the following format:
+ * original data: 1st byte, 2nd byte ...
+ * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
+ */
+ len = t->len * 2;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ rspi_memory_to_8bit(buf, t->tx_buf, t->len);
+ } else {
+ len = t->len;
+ buf = (void *)t->tx_buf;
+ }
+
+ if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) {
+ ret = -EFAULT;
+ goto end_nomap;
+ }
+ desc = dmaengine_prep_slave_sg(rspi->chan_tx, &sg, 1, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ ret = -EIO;
+ goto end;
+ }
+
+ /*
+ * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
+ * called. So, this driver disables the IRQ while DMA transfer.
+ */
+ disable_irq(rspi->irq);
+
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR);
+ rspi_enable_irq(rspi, SPCR_SPTIE);
+ rspi->dma_callbacked = 0;
+
+ desc->callback = rspi_dma_complete;
+ desc->callback_param = rspi;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(rspi->chan_tx);
+
+ ret = wait_event_interruptible_timeout(rspi->wait,
+ rspi->dma_callbacked, HZ);
+ if (ret > 0 && rspi->dma_callbacked)
+ ret = 0;
+ else if (!ret)
+ ret = -ETIMEDOUT;
+ rspi_disable_irq(rspi, SPCR_SPTIE);
+
+ enable_irq(rspi->irq);
+
+end:
+ rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE);
+end_nomap:
+ if (rspi->dma_width_16bit)
+ kfree(buf);
+
+ return ret;
+}
+
+static void rspi_receive_init(struct rspi_data *rspi)
{
- int remain = t->len;
- u8 *data;
unsigned char spsr;
spsr = rspi_read8(rspi, RSPI_SPSR);
@@ -278,6 +406,15 @@ static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
if (spsr & SPSR_OVRF)
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
RSPI_SPCR);
+}
+
+static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
+ struct spi_transfer *t)
+{
+ int remain = t->len;
+ u8 *data;
+
+ rspi_receive_init(rspi);
data = (u8 *)t->rx_buf;
while (remain > 0) {
@@ -307,6 +444,120 @@ static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
return 0;
}
+static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
+{
+ struct scatterlist sg, sg_dummy;
+ void *dummy = NULL, *rx_buf = NULL;
+ struct dma_async_tx_descriptor *desc, *desc_dummy;
+ unsigned len;
+ int ret = 0;
+
+ if (rspi->dma_width_16bit) {
+ /*
+ * If DMAC bus width is 16-bit, the driver allocates a dummy
+ * buffer. And, finally the driver converts the DMAC data into
+ * actual data as the following format:
+ * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
+ * actual data: 1st byte, 2nd byte ...
+ */
+ len = t->len * 2;
+ rx_buf = kmalloc(len, GFP_KERNEL);
+ if (!rx_buf)
+ return -ENOMEM;
+ } else {
+ len = t->len;
+ rx_buf = t->rx_buf;
+ }
+
+ /* prepare dummy transfer to generate SPI clocks */
+ dummy = kzalloc(len, GFP_KERNEL);
+ if (!dummy) {
+ ret = -ENOMEM;
+ goto end_nomap;
+ }
+ if (!rspi_dma_map_sg(&sg_dummy, dummy, len, rspi->chan_tx,
+ DMA_TO_DEVICE)) {
+ ret = -EFAULT;
+ goto end_nomap;
+ }
+ desc_dummy = dmaengine_prep_slave_sg(rspi->chan_tx, &sg_dummy, 1,
+ DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_dummy) {
+ ret = -EIO;
+ goto end_dummy_mapped;
+ }
+
+ /* prepare receive transfer */
+ if (!rspi_dma_map_sg(&sg, rx_buf, len, rspi->chan_rx,
+ DMA_FROM_DEVICE)) {
+ ret = -EFAULT;
+ goto end_dummy_mapped;
+
+ }
+ desc = dmaengine_prep_slave_sg(rspi->chan_rx, &sg, 1, DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ ret = -EIO;
+ goto end;
+ }
+
+ rspi_receive_init(rspi);
+
+ /*
+ * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
+ * called. So, this driver disables the IRQ while DMA transfer.
+ */
+ disable_irq(rspi->irq);
+
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR);
+ rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
+ rspi->dma_callbacked = 0;
+
+ desc->callback = rspi_dma_complete;
+ desc->callback_param = rspi;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(rspi->chan_rx);
+
+ desc_dummy->callback = NULL; /* No callback */
+ dmaengine_submit(desc_dummy);
+ dma_async_issue_pending(rspi->chan_tx);
+
+ ret = wait_event_interruptible_timeout(rspi->wait,
+ rspi->dma_callbacked, HZ);
+ if (ret > 0 && rspi->dma_callbacked)
+ ret = 0;
+ else if (!ret)
+ ret = -ETIMEDOUT;
+ rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
+
+ enable_irq(rspi->irq);
+
+end:
+ rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE);
+end_dummy_mapped:
+ rspi_dma_unmap_sg(&sg_dummy, rspi->chan_tx, DMA_TO_DEVICE);
+end_nomap:
+ if (rspi->dma_width_16bit) {
+ if (!ret)
+ rspi_memory_from_8bit(t->rx_buf, rx_buf, t->len);
+ kfree(rx_buf);
+ }
+ kfree(dummy);
+
+ return ret;
+}
+
+static int rspi_is_dma(struct rspi_data *rspi, struct spi_transfer *t)
+{
+ if (t->tx_buf && rspi->chan_tx)
+ return 1;
+ /* If the module receives data by DMAC, it also needs TX DMAC */
+ if (t->rx_buf && rspi->chan_tx && rspi->chan_rx)
+ return 1;
+
+ return 0;
+}
+
static void rspi_work(struct work_struct *work)
{
struct rspi_data *rspi = container_of(work, struct rspi_data, ws);
@@ -325,12 +576,18 @@ static void rspi_work(struct work_struct *work)
list_for_each_entry(t, &mesg->transfers, transfer_list) {
if (t->tx_buf) {
- ret = rspi_send_pio(rspi, mesg, t);
+ if (rspi_is_dma(rspi, t))
+ ret = rspi_send_dma(rspi, t);
+ else
+ ret = rspi_send_pio(rspi, mesg, t);
if (ret < 0)
goto error;
}
if (t->rx_buf) {
- ret = rspi_receive_pio(rspi, mesg, t);
+ if (rspi_is_dma(rspi, t))
+ ret = rspi_receive_dma(rspi, t);
+ else
+ ret = rspi_receive_pio(rspi, mesg, t);
if (ret < 0)
goto error;
}
@@ -406,11 +663,58 @@ static irqreturn_t rspi_irq(int irq, void *_sr)
return ret;
}
+static bool rspi_filter(struct dma_chan *chan, void *filter_param)
+{
+ chan->private = filter_param;
+ return true;
+}
+
+static void __devinit rspi_request_dma(struct rspi_data *rspi,
+ struct platform_device *pdev)
+{
+ struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
+ dma_cap_mask_t mask;
+
+ if (!rspi_pd)
+ return;
+
+ rspi->dma_width_16bit = rspi_pd->dma_width_16bit;
+
+ /* If the module receives data by DMAC, it also needs TX DMAC */
+ if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) {
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ rspi->dma_rx.slave_id = rspi_pd->dma_rx_id;
+ rspi->chan_rx = dma_request_channel(mask, rspi_filter,
+ &rspi->dma_rx);
+ if (rspi->chan_rx)
+ dev_info(&pdev->dev, "Use DMA when rx.\n");
+ }
+ if (rspi_pd->dma_tx_id) {
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ rspi->dma_tx.slave_id = rspi_pd->dma_tx_id;
+ rspi->chan_tx = dma_request_channel(mask, rspi_filter,
+ &rspi->dma_tx);
+ if (rspi->chan_tx)
+ dev_info(&pdev->dev, "Use DMA when tx\n");
+ }
+}
+
+static void __devexit rspi_release_dma(struct rspi_data *rspi)
+{
+ if (rspi->chan_tx)
+ dma_release_channel(rspi->chan_tx);
+ if (rspi->chan_rx)
+ dma_release_channel(rspi->chan_rx);
+}
+
static int __devexit rspi_remove(struct platform_device *pdev)
{
struct rspi_data *rspi = dev_get_drvdata(&pdev->dev);
spi_unregister_master(rspi->master);
+ rspi_release_dma(rspi);
free_irq(platform_get_irq(pdev, 0), rspi);
clk_put(rspi->clk);
iounmap(rspi->addr);
@@ -483,6 +787,9 @@ static int __devinit rspi_probe(struct platform_device *pdev)
goto error3;
}
+ rspi->irq = irq;
+ rspi_request_dma(rspi, pdev);
+
ret = spi_register_master(master);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
@@ -494,6 +801,7 @@ static int __devinit rspi_probe(struct platform_device *pdev)
return 0;
error4:
+ rspi_release_dma(rspi);
free_irq(irq, rspi);
error3:
clk_put(rspi->clk);
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 52fe495bb32a..ecc3d9763d10 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -19,7 +19,7 @@
#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
-#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
#define DRIVER_NAME "sirfsoc_spi"
@@ -127,7 +127,7 @@ struct sirfsoc_spi {
void __iomem *base;
u32 ctrl_freq; /* SPI controller clock speed */
struct clk *clk;
- struct pinmux *pmx;
+ struct pinctrl *p;
/* rx & tx bufs from the spi_transfer */
const void *tx;
@@ -560,17 +560,15 @@ static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
- sspi->pmx = pinmux_get(&pdev->dev, NULL);
- ret = IS_ERR(sspi->pmx);
+ sspi->p = pinctrl_get_select_default(&pdev->dev);
+ ret = IS_ERR(sspi->p);
if (ret)
goto free_master;
- pinmux_enable(sspi->pmx);
-
sspi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(sspi->clk)) {
ret = -EINVAL;
- goto free_pmx;
+ goto free_pin;
}
clk_enable(sspi->clk);
sspi->ctrl_freq = clk_get_rate(sspi->clk);
@@ -598,9 +596,8 @@ static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
free_clk:
clk_disable(sspi->clk);
clk_put(sspi->clk);
-free_pmx:
- pinmux_disable(sspi->pmx);
- pinmux_put(sspi->pmx);
+free_pin:
+ pinctrl_put(sspi->p);
free_master:
spi_master_put(master);
err_cs:
@@ -623,8 +620,7 @@ static int __devexit spi_sirfsoc_remove(struct platform_device *pdev)
}
clk_disable(sspi->clk);
clk_put(sspi->clk);
- pinmux_disable(sspi->pmx);
- pinmux_put(sspi->pmx);
+ pinctrl_put(sspi->p);
spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index ec47d3bdfd13..cd56dcf46320 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1438,7 +1438,6 @@ static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
plat_dev->id, data->io_remap_addr);
/* initialize members of SPI master */
- master->bus_num = -1;
master->num_chipselect = PCH_MAX_CS;
master->setup = pch_spi_setup;
master->transfer = pch_spi_transfer;
@@ -1779,7 +1778,7 @@ static struct pci_driver pch_spi_pcidev_driver = {
.name = "pch_spi",
.id_table = pch_spi_pcidev_id,
.probe = pch_spi_probe,
- .remove = pch_spi_remove,
+ .remove = __devexit_p(pch_spi_remove),
.suspend = pch_spi_suspend,
.resume = pch_spi_resume,
};
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 3d8f662e4fe9..1041cb83d67a 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2,6 +2,7 @@
* SPI init/core code
*
* Copyright (C) 2005 David Brownell
+ * Copyright (C) 2008 Secret Lab Technologies Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,15 +20,16 @@
*/
#include <linux/kernel.h>
+#include <linux/kmod.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/cache.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
-#include <linux/of_spi.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
#include <linux/sched.h>
@@ -530,7 +532,7 @@ static void spi_pump_messages(struct kthread_work *work)
/* Lock queue and check for queue work */
spin_lock_irqsave(&master->queue_lock, flags);
if (list_empty(&master->queue) || !master->running) {
- if (master->busy) {
+ if (master->busy && master->unprepare_transfer_hardware) {
ret = master->unprepare_transfer_hardware(master);
if (ret) {
spin_unlock_irqrestore(&master->queue_lock, flags);
@@ -560,7 +562,7 @@ static void spi_pump_messages(struct kthread_work *work)
master->busy = true;
spin_unlock_irqrestore(&master->queue_lock, flags);
- if (!was_busy) {
+ if (!was_busy && master->prepare_transfer_hardware) {
ret = master->prepare_transfer_hardware(master);
if (ret) {
dev_err(&master->dev,
@@ -798,6 +800,94 @@ err_init_queue:
/*-------------------------------------------------------------------------*/
+#if defined(CONFIG_OF) && !defined(CONFIG_SPARC)
+/**
+ * of_register_spi_devices() - Register child devices onto the SPI bus
+ * @master: Pointer to spi_master device
+ *
+ * Registers an spi_device for each child node of master node which has a 'reg'
+ * property.
+ */
+static void of_register_spi_devices(struct spi_master *master)
+{
+ struct spi_device *spi;
+ struct device_node *nc;
+ const __be32 *prop;
+ int rc;
+ int len;
+
+ if (!master->dev.of_node)
+ return;
+
+ for_each_child_of_node(master->dev.of_node, nc) {
+ /* Alloc an spi_device */
+ spi = spi_alloc_device(master);
+ if (!spi) {
+ dev_err(&master->dev, "spi_device alloc error for %s\n",
+ nc->full_name);
+ spi_dev_put(spi);
+ continue;
+ }
+
+ /* Select device driver */
+ if (of_modalias_node(nc, spi->modalias,
+ sizeof(spi->modalias)) < 0) {
+ dev_err(&master->dev, "cannot find modalias for %s\n",
+ nc->full_name);
+ spi_dev_put(spi);
+ continue;
+ }
+
+ /* Device address */
+ prop = of_get_property(nc, "reg", &len);
+ if (!prop || len < sizeof(*prop)) {
+ dev_err(&master->dev, "%s has no 'reg' property\n",
+ nc->full_name);
+ spi_dev_put(spi);
+ continue;
+ }
+ spi->chip_select = be32_to_cpup(prop);
+
+ /* Mode (clock phase/polarity/etc.) */
+ if (of_find_property(nc, "spi-cpha", NULL))
+ spi->mode |= SPI_CPHA;
+ if (of_find_property(nc, "spi-cpol", NULL))
+ spi->mode |= SPI_CPOL;
+ if (of_find_property(nc, "spi-cs-high", NULL))
+ spi->mode |= SPI_CS_HIGH;
+
+ /* Device speed */
+ prop = of_get_property(nc, "spi-max-frequency", &len);
+ if (!prop || len < sizeof(*prop)) {
+ dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n",
+ nc->full_name);
+ spi_dev_put(spi);
+ continue;
+ }
+ spi->max_speed_hz = be32_to_cpup(prop);
+
+ /* IRQ */
+ spi->irq = irq_of_parse_and_map(nc, 0);
+
+ /* Store a pointer to the node in the device structure */
+ of_node_get(nc);
+ spi->dev.of_node = nc;
+
+ /* Register the new device */
+ request_module(spi->modalias);
+ rc = spi_add_device(spi);
+ if (rc) {
+ dev_err(&master->dev, "spi_device register error %s\n",
+ nc->full_name);
+ spi_dev_put(spi);
+ }
+
+ }
+}
+#else
+static void of_register_spi_devices(struct spi_master *master) { }
+#endif
+
static void spi_master_release(struct device *dev)
{
struct spi_master *master;
@@ -846,6 +936,8 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
return NULL;
device_initialize(&master->dev);
+ master->bus_num = -1;
+ master->num_chipselect = 1;
master->dev.class = &spi_master_class;
master->dev.parent = get_device(dev);
spi_master_set_devdata(master, &master[1]);
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index bad7ba517a1c..f551e5376147 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -29,6 +29,8 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4321) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4322) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43222) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4324) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4325) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4328) },
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index ed4124469a3a..e9d94968f394 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -178,6 +178,18 @@ err_pci:
#define SPEX(_outvar, _offset, _mask, _shift) \
SPEX16(_outvar, _offset, _mask, _shift)
+#define SPEX_ARRAY8(_field, _offset, _mask, _shift) \
+ do { \
+ SPEX(_field[0], _offset + 0, _mask, _shift); \
+ SPEX(_field[1], _offset + 2, _mask, _shift); \
+ SPEX(_field[2], _offset + 4, _mask, _shift); \
+ SPEX(_field[3], _offset + 6, _mask, _shift); \
+ SPEX(_field[4], _offset + 8, _mask, _shift); \
+ SPEX(_field[5], _offset + 10, _mask, _shift); \
+ SPEX(_field[6], _offset + 12, _mask, _shift); \
+ SPEX(_field[7], _offset + 14, _mask, _shift); \
+ } while (0)
+
static inline u8 ssb_crc8(u8 crc, u8 data)
{
@@ -360,8 +372,9 @@ static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
SPEX(et0mdcport, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0M, 14);
SPEX(et1mdcport, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1M, 15);
SPEX(board_rev, SSB_SPROM1_BINF, SSB_SPROM1_BINF_BREV, 0);
- SPEX(country_code, SSB_SPROM1_BINF, SSB_SPROM1_BINF_CCODE,
- SSB_SPROM1_BINF_CCODE_SHIFT);
+ if (out->revision == 1)
+ SPEX(country_code, SSB_SPROM1_BINF, SSB_SPROM1_BINF_CCODE,
+ SSB_SPROM1_BINF_CCODE_SHIFT);
SPEX(ant_available_a, SSB_SPROM1_BINF, SSB_SPROM1_BINF_ANTA,
SSB_SPROM1_BINF_ANTA_SHIFT);
SPEX(ant_available_bg, SSB_SPROM1_BINF, SSB_SPROM1_BINF_ANTBG,
@@ -387,6 +400,8 @@ static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
SPEX(boardflags_lo, SSB_SPROM1_BFLLO, 0xFFFF, 0);
if (out->revision >= 2)
SPEX(boardflags_hi, SSB_SPROM2_BFLHI, 0xFFFF, 0);
+ SPEX(alpha2[0], SSB_SPROM1_CCODE, 0xff00, 8);
+ SPEX(alpha2[1], SSB_SPROM1_CCODE, 0x00ff, 0);
/* Extract the antenna gain values. */
out->antenna_gain.a0 = r123_extract_antgain(out->revision, in,
@@ -455,14 +470,17 @@ static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
SPEX(et0phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET0A, 0);
SPEX(et1phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET1A,
SSB_SPROM4_ETHPHY_ET1A_SHIFT);
+ SPEX(board_rev, SSB_SPROM4_BOARDREV, 0xFFFF, 0);
if (out->revision == 4) {
- SPEX(country_code, SSB_SPROM4_CCODE, 0xFFFF, 0);
+ SPEX(alpha2[0], SSB_SPROM4_CCODE, 0xff00, 8);
+ SPEX(alpha2[1], SSB_SPROM4_CCODE, 0x00ff, 0);
SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0);
SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0);
SPEX(boardflags2_lo, SSB_SPROM4_BFL2LO, 0xFFFF, 0);
SPEX(boardflags2_hi, SSB_SPROM4_BFL2HI, 0xFFFF, 0);
} else {
- SPEX(country_code, SSB_SPROM5_CCODE, 0xFFFF, 0);
+ SPEX(alpha2[0], SSB_SPROM5_CCODE, 0xff00, 8);
+ SPEX(alpha2[1], SSB_SPROM5_CCODE, 0x00ff, 0);
SPEX(boardflags_lo, SSB_SPROM5_BFLLO, 0xFFFF, 0);
SPEX(boardflags_hi, SSB_SPROM5_BFLHI, 0xFFFF, 0);
SPEX(boardflags2_lo, SSB_SPROM5_BFL2LO, 0xFFFF, 0);
@@ -525,7 +543,9 @@ static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
v = in[SPOFF(SSB_SPROM8_IL0MAC) + i];
*(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
}
- SPEX(country_code, SSB_SPROM8_CCODE, 0xFFFF, 0);
+ SPEX(board_rev, SSB_SPROM8_BOARDREV, 0xFFFF, 0);
+ SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8);
+ SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0);
SPEX(boardflags_lo, SSB_SPROM8_BFLLO, 0xFFFF, 0);
SPEX(boardflags_hi, SSB_SPROM8_BFLHI, 0xFFFF, 0);
SPEX(boardflags2_lo, SSB_SPROM8_BFL2LO, 0xFFFF, 0);
@@ -655,6 +675,63 @@ static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G,
SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+ SPEX(leddc_on_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_ON,
+ SSB_SPROM8_LEDDC_ON_SHIFT);
+ SPEX(leddc_off_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_OFF,
+ SSB_SPROM8_LEDDC_OFF_SHIFT);
+
+ SPEX(txchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_TXCHAIN,
+ SSB_SPROM8_TXRXC_TXCHAIN_SHIFT);
+ SPEX(rxchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_RXCHAIN,
+ SSB_SPROM8_TXRXC_RXCHAIN_SHIFT);
+ SPEX(antswitch, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_SWITCH,
+ SSB_SPROM8_TXRXC_SWITCH_SHIFT);
+
+ SPEX(opo, SSB_SPROM8_OFDM2GPO, 0x00ff, 0);
+
+ SPEX_ARRAY8(mcs2gpo, SSB_SPROM8_2G_MCSPO, ~0, 0);
+ SPEX_ARRAY8(mcs5gpo, SSB_SPROM8_5G_MCSPO, ~0, 0);
+ SPEX_ARRAY8(mcs5glpo, SSB_SPROM8_5GL_MCSPO, ~0, 0);
+ SPEX_ARRAY8(mcs5ghpo, SSB_SPROM8_5GH_MCSPO, ~0, 0);
+
+ SPEX(rawtempsense, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_RAWTEMP,
+ SSB_SPROM8_RAWTS_RAWTEMP_SHIFT);
+ SPEX(measpower, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_MEASPOWER,
+ SSB_SPROM8_RAWTS_MEASPOWER_SHIFT);
+ SPEX(tempsense_slope, SSB_SPROM8_OPT_CORRX,
+ SSB_SPROM8_OPT_CORRX_TEMP_SLOPE,
+ SSB_SPROM8_OPT_CORRX_TEMP_SLOPE_SHIFT);
+ SPEX(tempcorrx, SSB_SPROM8_OPT_CORRX, SSB_SPROM8_OPT_CORRX_TEMPCORRX,
+ SSB_SPROM8_OPT_CORRX_TEMPCORRX_SHIFT);
+ SPEX(tempsense_option, SSB_SPROM8_OPT_CORRX,
+ SSB_SPROM8_OPT_CORRX_TEMP_OPTION,
+ SSB_SPROM8_OPT_CORRX_TEMP_OPTION_SHIFT);
+ SPEX(freqoffset_corr, SSB_SPROM8_HWIQ_IQSWP,
+ SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR,
+ SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR_SHIFT);
+ SPEX(iqcal_swp_dis, SSB_SPROM8_HWIQ_IQSWP,
+ SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP,
+ SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP_SHIFT);
+ SPEX(hw_iqcal_en, SSB_SPROM8_HWIQ_IQSWP, SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL,
+ SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL_SHIFT);
+
+ SPEX(bw40po, SSB_SPROM8_BW40PO, ~0, 0);
+ SPEX(cddpo, SSB_SPROM8_CDDPO, ~0, 0);
+ SPEX(stbcpo, SSB_SPROM8_STBCPO, ~0, 0);
+ SPEX(bwduppo, SSB_SPROM8_BWDUPPO, ~0, 0);
+
+ SPEX(tempthresh, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_TRESH,
+ SSB_SPROM8_THERMAL_TRESH_SHIFT);
+ SPEX(tempoffset, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_OFFSET,
+ SSB_SPROM8_THERMAL_OFFSET_SHIFT);
+ SPEX(phycal_tempdelta, SSB_SPROM8_TEMPDELTA,
+ SSB_SPROM8_TEMPDELTA_PHYCAL,
+ SSB_SPROM8_TEMPDELTA_PHYCAL_SHIFT);
+ SPEX(temps_period, SSB_SPROM8_TEMPDELTA, SSB_SPROM8_TEMPDELTA_PERIOD,
+ SSB_SPROM8_TEMPDELTA_PERIOD_SHIFT);
+ SPEX(temps_hysteresis, SSB_SPROM8_TEMPDELTA,
+ SSB_SPROM8_TEMPDELTA_HYSTERESIS,
+ SSB_SPROM8_TEMPDELTA_HYSTERESIS_SHIFT);
sprom_extract_r458(out, in);
/* TODO - get remaining rev 8 stuff needed */
@@ -784,7 +861,6 @@ static void ssb_pci_get_boardinfo(struct ssb_bus *bus,
{
bi->vendor = bus->host_pci->subsystem_vendor;
bi->type = bus->host_pci->subsystem_device;
- bi->rev = bus->host_pci->revision;
}
int ssb_pci_get_invariants(struct ssb_bus *bus,
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 9f1f27e7c86e..e84dbecd0991 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/falloc.h>
#include <linux/miscdevice.h>
#include <linux/security.h>
#include <linux/mm.h>
@@ -269,7 +270,7 @@ out:
return ret;
}
-static inline unsigned long calc_vm_may_flags(unsigned long prot)
+static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
{
return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
_calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
@@ -363,11 +364,12 @@ static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
mutex_lock(&ashmem_mutex);
list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
- struct inode *inode = range->asma->file->f_dentry->d_inode;
loff_t start = range->pgstart * PAGE_SIZE;
- loff_t end = (range->pgend + 1) * PAGE_SIZE - 1;
+ loff_t end = (range->pgend + 1) * PAGE_SIZE;
- vmtruncate_range(inode, start, end);
+ do_fallocate(range->asma->file,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ start, end - start);
range->purged = ASHMEM_WAS_PURGED;
lru_del(range);
diff --git a/drivers/staging/media/as102/as10x_cmd.c b/drivers/staging/media/as102/as10x_cmd.c
index 262bb94ad27e..a73df10982d0 100644
--- a/drivers/staging/media/as102/as10x_cmd.c
+++ b/drivers/staging/media/as102/as10x_cmd.c
@@ -31,7 +31,7 @@
*/
int as10x_cmd_turn_on(struct as10x_bus_adapter_t *adap)
{
- int error;
+ int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
@@ -54,8 +54,6 @@ int as10x_cmd_turn_on(struct as10x_bus_adapter_t *adap)
(uint8_t *) prsp,
sizeof(prsp->body.turn_on.rsp) +
HEADER_SIZE);
- } else {
- error = AS10X_CMD_ERROR;
}
if (error < 0)
@@ -77,7 +75,7 @@ out:
*/
int as10x_cmd_turn_off(struct as10x_bus_adapter_t *adap)
{
- int error;
+ int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
@@ -99,8 +97,6 @@ int as10x_cmd_turn_off(struct as10x_bus_adapter_t *adap)
sizeof(pcmd->body.turn_off.req) + HEADER_SIZE,
(uint8_t *) prsp,
sizeof(prsp->body.turn_off.rsp) + HEADER_SIZE);
- } else {
- error = AS10X_CMD_ERROR;
}
if (error < 0)
@@ -124,7 +120,7 @@ out:
int as10x_cmd_set_tune(struct as10x_bus_adapter_t *adap,
struct as10x_tune_args *ptune)
{
- int error;
+ int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *preq, *prsp;
ENTER();
@@ -159,8 +155,6 @@ int as10x_cmd_set_tune(struct as10x_bus_adapter_t *adap,
(uint8_t *) prsp,
sizeof(prsp->body.set_tune.rsp)
+ HEADER_SIZE);
- } else {
- error = AS10X_CMD_ERROR;
}
if (error < 0)
@@ -184,7 +178,7 @@ out:
int as10x_cmd_get_tune_status(struct as10x_bus_adapter_t *adap,
struct as10x_tune_status *pstatus)
{
- int error;
+ int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *preq, *prsp;
ENTER();
@@ -208,8 +202,6 @@ int as10x_cmd_get_tune_status(struct as10x_bus_adapter_t *adap,
sizeof(preq->body.get_tune_status.req) + HEADER_SIZE,
(uint8_t *) prsp,
sizeof(prsp->body.get_tune_status.rsp) + HEADER_SIZE);
- } else {
- error = AS10X_CMD_ERROR;
}
if (error < 0)
@@ -241,7 +233,7 @@ out:
*/
int as10x_cmd_get_tps(struct as10x_bus_adapter_t *adap, struct as10x_tps *ptps)
{
- int error;
+ int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
@@ -266,8 +258,6 @@ int as10x_cmd_get_tps(struct as10x_bus_adapter_t *adap, struct as10x_tps *ptps)
(uint8_t *) prsp,
sizeof(prsp->body.get_tps.rsp) +
HEADER_SIZE);
- } else {
- error = AS10X_CMD_ERROR;
}
if (error < 0)
@@ -305,7 +295,7 @@ out:
int as10x_cmd_get_demod_stats(struct as10x_bus_adapter_t *adap,
struct as10x_demod_stats *pdemod_stats)
{
- int error;
+ int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
@@ -330,8 +320,6 @@ int as10x_cmd_get_demod_stats(struct as10x_bus_adapter_t *adap,
(uint8_t *) prsp,
sizeof(prsp->body.get_demod_stats.rsp)
+ HEADER_SIZE);
- } else {
- error = AS10X_CMD_ERROR;
}
if (error < 0)
@@ -370,7 +358,7 @@ out:
int as10x_cmd_get_impulse_resp(struct as10x_bus_adapter_t *adap,
uint8_t *is_ready)
{
- int error;
+ int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
@@ -395,8 +383,6 @@ int as10x_cmd_get_impulse_resp(struct as10x_bus_adapter_t *adap,
(uint8_t *) prsp,
sizeof(prsp->body.get_impulse_rsp.rsp)
+ HEADER_SIZE);
- } else {
- error = AS10X_CMD_ERROR;
}
if (error < 0)
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 280c84ec4cc2..c365cdf714ea 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -898,6 +898,10 @@ dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&pd->dmaq);
mutex_init(&pd->mux);
pd->vdev->lock = &pd->mux; /* for locking v4l2_file_operations */
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &pd->vdev->flags);
spin_lock_init(&pd->lock);
pd->csr2 = csr2_init;
pd->config = config_init;
diff --git a/drivers/staging/media/easycap/easycap_main.c b/drivers/staging/media/easycap/easycap_main.c
index 6f83d362ab0d..a1c45e4dcdce 100644
--- a/drivers/staging/media/easycap/easycap_main.c
+++ b/drivers/staging/media/easycap/easycap_main.c
@@ -700,214 +700,7 @@ static int videodev_release(struct video_device *pvideo_device)
JOM(4, "ending successfully\n");
return 0;
}
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
-/*****************************************************************************/
-/*--------------------------------------------------------------------------*/
-/*
- * THIS FUNCTION IS CALLED FROM WITHIN easycap_usb_disconnect() AND IS
- * PROTECTED BY SEMAPHORES SET AND CLEARED BY easycap_usb_disconnect().
- *
- * BY THIS STAGE THE DEVICE HAS ALREADY BEEN PHYSICALLY UNPLUGGED, SO
- * peasycap->pusb_device IS NO LONGER VALID.
- */
-/*---------------------------------------------------------------------------*/
-static void easycap_delete(struct kref *pkref)
-{
- struct easycap *peasycap;
- struct data_urb *pdata_urb;
- struct list_head *plist_head, *plist_next;
- int k, m, gone, kd;
- int allocation_video_urb;
- int allocation_video_page;
- int allocation_video_struct;
- int allocation_audio_urb;
- int allocation_audio_page;
- int allocation_audio_struct;
- int registered_video, registered_audio;
-
- peasycap = container_of(pkref, struct easycap, kref);
- if (!peasycap) {
- SAM("ERROR: peasycap is NULL: cannot perform deletions\n");
- return;
- }
- kd = easycap_isdongle(peasycap);
-/*---------------------------------------------------------------------------*/
-/*
- * FREE VIDEO.
- */
-/*---------------------------------------------------------------------------*/
- if (peasycap->purb_video_head) {
- m = 0;
- list_for_each(plist_head, peasycap->purb_video_head) {
- pdata_urb = list_entry(plist_head,
- struct data_urb, list_head);
- if (pdata_urb && pdata_urb->purb) {
- usb_free_urb(pdata_urb->purb);
- pdata_urb->purb = NULL;
- peasycap->allocation_video_urb--;
- m++;
- }
- }
-
- JOM(4, "%i video urbs freed\n", m);
-/*---------------------------------------------------------------------------*/
- JOM(4, "freeing video data_urb structures.\n");
- m = 0;
- list_for_each_safe(plist_head, plist_next,
- peasycap->purb_video_head) {
- pdata_urb = list_entry(plist_head,
- struct data_urb, list_head);
- if (pdata_urb) {
- peasycap->allocation_video_struct -=
- sizeof(struct data_urb);
- kfree(pdata_urb);
- m++;
- }
- }
- JOM(4, "%i video data_urb structures freed\n", m);
- JOM(4, "setting peasycap->purb_video_head=NULL\n");
- peasycap->purb_video_head = NULL;
- }
-/*---------------------------------------------------------------------------*/
- JOM(4, "freeing video isoc buffers.\n");
- m = 0;
- for (k = 0; k < VIDEO_ISOC_BUFFER_MANY; k++) {
- if (peasycap->video_isoc_buffer[k].pgo) {
- free_pages((unsigned long)
- peasycap->video_isoc_buffer[k].pgo,
- VIDEO_ISOC_ORDER);
- peasycap->video_isoc_buffer[k].pgo = NULL;
- peasycap->allocation_video_page -=
- BIT(VIDEO_ISOC_ORDER);
- m++;
- }
- }
- JOM(4, "isoc video buffers freed: %i pages\n",
- m * (0x01 << VIDEO_ISOC_ORDER));
-/*---------------------------------------------------------------------------*/
- JOM(4, "freeing video field buffers.\n");
- gone = 0;
- for (k = 0; k < FIELD_BUFFER_MANY; k++) {
- for (m = 0; m < FIELD_BUFFER_SIZE/PAGE_SIZE; m++) {
- if (peasycap->field_buffer[k][m].pgo) {
- free_page((unsigned long)
- peasycap->field_buffer[k][m].pgo);
- peasycap->field_buffer[k][m].pgo = NULL;
- peasycap->allocation_video_page -= 1;
- gone++;
- }
- }
- }
- JOM(4, "video field buffers freed: %i pages\n", gone);
-/*---------------------------------------------------------------------------*/
- JOM(4, "freeing video frame buffers.\n");
- gone = 0;
- for (k = 0; k < FRAME_BUFFER_MANY; k++) {
- for (m = 0; m < FRAME_BUFFER_SIZE/PAGE_SIZE; m++) {
- if (peasycap->frame_buffer[k][m].pgo) {
- free_page((unsigned long)
- peasycap->frame_buffer[k][m].pgo);
- peasycap->frame_buffer[k][m].pgo = NULL;
- peasycap->allocation_video_page -= 1;
- gone++;
- }
- }
- }
- JOM(4, "video frame buffers freed: %i pages\n", gone);
-/*---------------------------------------------------------------------------*/
-/*
- * FREE AUDIO.
- */
-/*---------------------------------------------------------------------------*/
- if (peasycap->purb_audio_head) {
- JOM(4, "freeing audio urbs\n");
- m = 0;
- list_for_each(plist_head, (peasycap->purb_audio_head)) {
- pdata_urb = list_entry(plist_head,
- struct data_urb, list_head);
- if (pdata_urb && pdata_urb->purb) {
- usb_free_urb(pdata_urb->purb);
- pdata_urb->purb = NULL;
- peasycap->allocation_audio_urb--;
- m++;
- }
- }
- JOM(4, "%i audio urbs freed\n", m);
-/*---------------------------------------------------------------------------*/
- JOM(4, "freeing audio data_urb structures.\n");
- m = 0;
- list_for_each_safe(plist_head, plist_next,
- peasycap->purb_audio_head) {
- pdata_urb = list_entry(plist_head,
- struct data_urb, list_head);
- if (pdata_urb) {
- peasycap->allocation_audio_struct -=
- sizeof(struct data_urb);
- kfree(pdata_urb);
- m++;
- }
- }
- JOM(4, "%i audio data_urb structures freed\n", m);
- JOM(4, "setting peasycap->purb_audio_head=NULL\n");
- peasycap->purb_audio_head = NULL;
- }
-/*---------------------------------------------------------------------------*/
- JOM(4, "freeing audio isoc buffers.\n");
- m = 0;
- for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
- if (peasycap->audio_isoc_buffer[k].pgo) {
- free_pages((unsigned long)
- (peasycap->audio_isoc_buffer[k].pgo),
- AUDIO_ISOC_ORDER);
- peasycap->audio_isoc_buffer[k].pgo = NULL;
- peasycap->allocation_audio_page -=
- BIT(AUDIO_ISOC_ORDER);
- m++;
- }
- }
- JOM(4, "easyoss_delete(): isoc audio buffers freed: %i pages\n",
- m * (0x01 << AUDIO_ISOC_ORDER));
-/*---------------------------------------------------------------------------*/
- JOM(4, "freeing easycap structure.\n");
- allocation_video_urb = peasycap->allocation_video_urb;
- allocation_video_page = peasycap->allocation_video_page;
- allocation_video_struct = peasycap->allocation_video_struct;
- registered_video = peasycap->registered_video;
- allocation_audio_urb = peasycap->allocation_audio_urb;
- allocation_audio_page = peasycap->allocation_audio_page;
- allocation_audio_struct = peasycap->allocation_audio_struct;
- registered_audio = peasycap->registered_audio;
-
- if (0 <= kd && DONGLE_MANY > kd) {
- if (mutex_lock_interruptible(&mutex_dongle)) {
- SAY("ERROR: cannot down mutex_dongle\n");
- } else {
- JOM(4, "locked mutex_dongle\n");
- easycapdc60_dongle[kd].peasycap = NULL;
- mutex_unlock(&mutex_dongle);
- JOM(4, "unlocked mutex_dongle\n");
- JOT(4, " null-->dongle[%i].peasycap\n", kd);
- allocation_video_struct -= sizeof(struct easycap);
- }
- } else {
- SAY("ERROR: cannot purge dongle[].peasycap");
- }
- kfree(peasycap);
-
-/*---------------------------------------------------------------------------*/
- SAY("%8i=video urbs after all deletions\n", allocation_video_urb);
- SAY("%8i=video pages after all deletions\n", allocation_video_page);
- SAY("%8i=video structs after all deletions\n", allocation_video_struct);
- SAY("%8i=video devices after all deletions\n", registered_video);
- SAY("%8i=audio urbs after all deletions\n", allocation_audio_urb);
- SAY("%8i=audio pages after all deletions\n", allocation_audio_page);
- SAY("%8i=audio structs after all deletions\n", allocation_audio_struct);
- SAY("%8i=audio devices after all deletions\n", registered_audio);
-
- JOT(4, "ending.\n");
- return;
-}
/*****************************************************************************/
static unsigned int easycap_poll(struct file *file, poll_table *wait)
{
@@ -2842,6 +2635,813 @@ static void easycap_complete(struct urb *purb)
return;
}
+static struct easycap *alloc_easycap(u8 bInterfaceNumber)
+{
+ struct easycap *peasycap;
+ int i;
+
+ peasycap = kzalloc(sizeof(struct easycap), GFP_KERNEL);
+ if (!peasycap) {
+ SAY("ERROR: Could not allocate peasycap\n");
+ return NULL;
+ }
+
+ if (mutex_lock_interruptible(&mutex_dongle)) {
+ SAY("ERROR: cannot lock mutex_dongle\n");
+ kfree(peasycap);
+ return NULL;
+ }
+
+ /* Find a free dongle in easycapdc60_dongle array */
+ for (i = 0; i < DONGLE_MANY; i++) {
+
+ if ((!easycapdc60_dongle[i].peasycap) &&
+ (!mutex_is_locked(&easycapdc60_dongle[i].mutex_video)) &&
+ (!mutex_is_locked(&easycapdc60_dongle[i].mutex_audio))) {
+
+ easycapdc60_dongle[i].peasycap = peasycap;
+ peasycap->isdongle = i;
+ JOM(8, "intf[%i]: peasycap-->easycap"
+ "_dongle[%i].peasycap\n",
+ bInterfaceNumber, i);
+ break;
+ }
+ }
+
+ mutex_unlock(&mutex_dongle);
+
+ if (i >= DONGLE_MANY) {
+ SAM("ERROR: too many dongles\n");
+ kfree(peasycap);
+ return NULL;
+ }
+
+ return peasycap;
+}
+
+static void free_easycap(struct easycap *peasycap)
+{
+ int allocation_video_urb;
+ int allocation_video_page;
+ int allocation_video_struct;
+ int allocation_audio_urb;
+ int allocation_audio_page;
+ int allocation_audio_struct;
+ int registered_video, registered_audio;
+ int kd;
+
+ JOM(4, "freeing easycap structure.\n");
+ allocation_video_urb = peasycap->allocation_video_urb;
+ allocation_video_page = peasycap->allocation_video_page;
+ allocation_video_struct = peasycap->allocation_video_struct;
+ registered_video = peasycap->registered_video;
+ allocation_audio_urb = peasycap->allocation_audio_urb;
+ allocation_audio_page = peasycap->allocation_audio_page;
+ allocation_audio_struct = peasycap->allocation_audio_struct;
+ registered_audio = peasycap->registered_audio;
+
+ kd = easycap_isdongle(peasycap);
+ if (0 <= kd && DONGLE_MANY > kd) {
+ if (mutex_lock_interruptible(&mutex_dongle)) {
+ SAY("ERROR: cannot down mutex_dongle\n");
+ } else {
+ JOM(4, "locked mutex_dongle\n");
+ easycapdc60_dongle[kd].peasycap = NULL;
+ mutex_unlock(&mutex_dongle);
+ JOM(4, "unlocked mutex_dongle\n");
+ JOT(4, " null-->dongle[%i].peasycap\n", kd);
+ allocation_video_struct -= sizeof(struct easycap);
+ }
+ } else {
+ SAY("ERROR: cannot purge dongle[].peasycap");
+ }
+
+ /* Free device structure */
+ kfree(peasycap);
+
+ SAY("%8i=video urbs after all deletions\n", allocation_video_urb);
+ SAY("%8i=video pages after all deletions\n", allocation_video_page);
+ SAY("%8i=video structs after all deletions\n", allocation_video_struct);
+ SAY("%8i=video devices after all deletions\n", registered_video);
+ SAY("%8i=audio urbs after all deletions\n", allocation_audio_urb);
+ SAY("%8i=audio pages after all deletions\n", allocation_audio_page);
+ SAY("%8i=audio structs after all deletions\n", allocation_audio_struct);
+ SAY("%8i=audio devices after all deletions\n", registered_audio);
+}
+
+/*
+ * FIXME: Identify the appropriate pointer peasycap for interfaces
+ * 1 and 2. The address of peasycap->pusb_device is reluctantly used
+ * for this purpose.
+ */
+static struct easycap *get_easycap(struct usb_device *usbdev,
+ u8 bInterfaceNumber)
+{
+ int i;
+ struct easycap *peasycap;
+
+ for (i = 0; i < DONGLE_MANY; i++) {
+ if (easycapdc60_dongle[i].peasycap->pusb_device == usbdev) {
+ peasycap = easycapdc60_dongle[i].peasycap;
+ JOT(8, "intf[%i]: dongle[%i].peasycap\n",
+ bInterfaceNumber, i);
+ break;
+ }
+ }
+ if (i >= DONGLE_MANY) {
+ SAY("ERROR: peasycap is unknown when probing interface %i\n",
+ bInterfaceNumber);
+ return NULL;
+ }
+ if (!peasycap) {
+ SAY("ERROR: peasycap is NULL when probing interface %i\n",
+ bInterfaceNumber);
+ return NULL;
+ }
+
+ return peasycap;
+}
+
+static void init_easycap(struct easycap *peasycap,
+ struct usb_device *usbdev,
+ struct usb_interface *intf,
+ u8 bInterfaceNumber)
+{
+ /* Save usb_device and usb_interface */
+ peasycap->pusb_device = usbdev;
+ peasycap->pusb_interface = intf;
+
+ peasycap->minor = -1;
+ kref_init(&peasycap->kref);
+ JOM(8, "intf[%i]: after kref_init(..._video) "
+ "%i=peasycap->kref.refcount.counter\n",
+ bInterfaceNumber, peasycap->kref.refcount.counter);
+
+ /* module params */
+ peasycap->gain = (s8)clamp(easycap_gain, 0, 31);
+
+ init_waitqueue_head(&peasycap->wq_video);
+ init_waitqueue_head(&peasycap->wq_audio);
+ init_waitqueue_head(&peasycap->wq_trigger);
+
+ peasycap->allocation_video_struct = sizeof(struct easycap);
+
+ peasycap->microphone = false;
+
+ peasycap->video_interface = -1;
+ peasycap->video_altsetting_on = -1;
+ peasycap->video_altsetting_off = -1;
+ peasycap->video_endpointnumber = -1;
+ peasycap->video_isoc_maxframesize = -1;
+ peasycap->video_isoc_buffer_size = -1;
+
+ peasycap->audio_interface = -1;
+ peasycap->audio_altsetting_on = -1;
+ peasycap->audio_altsetting_off = -1;
+ peasycap->audio_endpointnumber = -1;
+ peasycap->audio_isoc_maxframesize = -1;
+ peasycap->audio_isoc_buffer_size = -1;
+
+ peasycap->frame_buffer_many = FRAME_BUFFER_MANY;
+
+ peasycap->ntsc = easycap_ntsc;
+ JOM(8, "defaulting initially to %s\n",
+ easycap_ntsc ? "NTSC" : "PAL");
+}
+
+static int populate_inputset(struct easycap *peasycap)
+{
+ struct inputset *inputset;
+ struct easycap_format *peasycap_format;
+ struct v4l2_pix_format *pix;
+ int m, i, k, mask, fmtidx;
+ s32 value;
+
+ inputset = peasycap->inputset;
+
+ fmtidx = peasycap->ntsc ? NTSC_M : PAL_BGHIN;
+
+ m = 0;
+ mask = 0;
+ for (i = 0; easycap_standard[i].mask != 0xffff; i++) {
+ if (fmtidx == easycap_standard[i].v4l2_standard.index) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ inputset[k].standard_offset = i;
+ mask = easycap_standard[i].mask;
+ }
+ }
+
+ if (m != 1) {
+ SAM("ERROR: inputset->standard_offset unpopulated, %i=m\n", m);
+ return -ENOENT;
+ }
+
+ peasycap_format = &easycap_format[0];
+ m = 0;
+ for (i = 0; peasycap_format->v4l2_format.fmt.pix.width; i++) {
+ pix = &peasycap_format->v4l2_format.fmt.pix;
+ if (((peasycap_format->mask & 0x0F) == (mask & 0x0F))
+ && pix->field == V4L2_FIELD_NONE
+ && pix->pixelformat == V4L2_PIX_FMT_UYVY
+ && pix->width == 640 && pix->height == 480) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ inputset[k].format_offset = i;
+ break;
+ }
+ peasycap_format++;
+ }
+ if (m != 1) {
+ SAM("ERROR: inputset[]->format_offset unpopulated\n");
+ return -ENOENT;
+ }
+
+ m = 0;
+ for (i = 0; easycap_control[i].id != 0xffffffff; i++) {
+ value = easycap_control[i].default_value;
+ if (V4L2_CID_BRIGHTNESS == easycap_control[i].id) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ inputset[k].brightness = value;
+ } else if (V4L2_CID_CONTRAST == easycap_control[i].id) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ inputset[k].contrast = value;
+ } else if (V4L2_CID_SATURATION == easycap_control[i].id) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ inputset[k].saturation = value;
+ } else if (V4L2_CID_HUE == easycap_control[i].id) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ inputset[k].hue = value;
+ }
+ }
+
+ if (m != 4) {
+ SAM("ERROR: inputset[]->brightness underpopulated\n");
+ return -ENOENT;
+ }
+
+ for (k = 0; k < INPUT_MANY; k++)
+ inputset[k].input = k;
+ JOM(4, "populated inputset[]\n");
+
+ return 0;
+}
+
+static int alloc_framebuffers(struct easycap *peasycap)
+{
+ int i, j;
+ void *pbuf;
+
+ JOM(4, "allocating %i frame buffers of size %li\n",
+ FRAME_BUFFER_MANY, (long int)FRAME_BUFFER_SIZE);
+ JOM(4, ".... each scattered over %li pages\n",
+ FRAME_BUFFER_SIZE/PAGE_SIZE);
+
+ for (i = 0; i < FRAME_BUFFER_MANY; i++) {
+ for (j = 0; j < FRAME_BUFFER_SIZE/PAGE_SIZE; j++) {
+ if (peasycap->frame_buffer[i][j].pgo)
+ SAM("attempting to reallocate framebuffers\n");
+ else {
+ pbuf = (void *)__get_free_page(GFP_KERNEL);
+ if (!pbuf) {
+ SAM("ERROR: Could not allocate "
+ "framebuffer %i page %i\n", i, j);
+ return -ENOMEM;
+ }
+ peasycap->allocation_video_page += 1;
+ peasycap->frame_buffer[i][j].pgo = pbuf;
+ }
+ peasycap->frame_buffer[i][j].pto =
+ peasycap->frame_buffer[i][j].pgo;
+ }
+ }
+
+ peasycap->frame_fill = 0;
+ peasycap->frame_read = 0;
+ JOM(4, "allocation of frame buffers done: %i pages\n", i*j);
+
+ return 0;
+}
+
+static void free_framebuffers(struct easycap *peasycap)
+{
+ int k, m, gone;
+
+ JOM(4, "freeing video frame buffers.\n");
+ gone = 0;
+ for (k = 0; k < FRAME_BUFFER_MANY; k++) {
+ for (m = 0; m < FRAME_BUFFER_SIZE/PAGE_SIZE; m++) {
+ if (peasycap->frame_buffer[k][m].pgo) {
+ free_page((unsigned long)
+ peasycap->frame_buffer[k][m].pgo);
+ peasycap->frame_buffer[k][m].pgo = NULL;
+ peasycap->allocation_video_page -= 1;
+ gone++;
+ }
+ }
+ }
+ JOM(4, "video frame buffers freed: %i pages\n", gone);
+}
+
+static int alloc_fieldbuffers(struct easycap *peasycap)
+{
+ int i, j;
+ void *pbuf;
+
+ JOM(4, "allocating %i field buffers of size %li\n",
+ FIELD_BUFFER_MANY, (long int)FIELD_BUFFER_SIZE);
+ JOM(4, ".... each scattered over %li pages\n",
+ FIELD_BUFFER_SIZE/PAGE_SIZE);
+
+ for (i = 0; i < FIELD_BUFFER_MANY; i++) {
+ for (j = 0; j < FIELD_BUFFER_SIZE/PAGE_SIZE; j++) {
+ if (peasycap->field_buffer[i][j].pgo) {
+ SAM("ERROR: attempting to reallocate "
+ "fieldbuffers\n");
+ } else {
+ pbuf = (void *) __get_free_page(GFP_KERNEL);
+ if (!pbuf) {
+ SAM("ERROR: Could not allocate "
+ "fieldbuffer %i page %i\n", i, j);
+ return -ENOMEM;
+ }
+ peasycap->allocation_video_page += 1;
+ peasycap->field_buffer[i][j].pgo = pbuf;
+ }
+ peasycap->field_buffer[i][j].pto =
+ peasycap->field_buffer[i][j].pgo;
+ }
+ /* TODO: Hardcoded 0x0200 meaning? */
+ peasycap->field_buffer[i][0].kount = 0x0200;
+ }
+ peasycap->field_fill = 0;
+ peasycap->field_page = 0;
+ peasycap->field_read = 0;
+ JOM(4, "allocation of field buffers done: %i pages\n", i*j);
+
+ return 0;
+}
+
+static void free_fieldbuffers(struct easycap *peasycap)
+{
+ int k, m, gone;
+
+ JOM(4, "freeing video field buffers.\n");
+ gone = 0;
+ for (k = 0; k < FIELD_BUFFER_MANY; k++) {
+ for (m = 0; m < FIELD_BUFFER_SIZE/PAGE_SIZE; m++) {
+ if (peasycap->field_buffer[k][m].pgo) {
+ free_page((unsigned long)
+ peasycap->field_buffer[k][m].pgo);
+ peasycap->field_buffer[k][m].pgo = NULL;
+ peasycap->allocation_video_page -= 1;
+ gone++;
+ }
+ }
+ }
+ JOM(4, "video field buffers freed: %i pages\n", gone);
+}
+
+static int alloc_isocbuffers(struct easycap *peasycap)
+{
+ int i;
+ void *pbuf;
+
+ JOM(4, "allocating %i isoc video buffers of size %i\n",
+ VIDEO_ISOC_BUFFER_MANY,
+ peasycap->video_isoc_buffer_size);
+ JOM(4, ".... each occupying contiguous memory pages\n");
+
+ for (i = 0; i < VIDEO_ISOC_BUFFER_MANY; i++) {
+ pbuf = (void *)__get_free_pages(GFP_KERNEL,
+ VIDEO_ISOC_ORDER);
+ if (!pbuf) {
+ SAM("ERROR: Could not allocate isoc "
+ "video buffer %i\n", i);
+ return -ENOMEM;
+ }
+ peasycap->allocation_video_page += BIT(VIDEO_ISOC_ORDER);
+
+ peasycap->video_isoc_buffer[i].pgo = pbuf;
+ peasycap->video_isoc_buffer[i].pto =
+ pbuf + peasycap->video_isoc_buffer_size;
+ peasycap->video_isoc_buffer[i].kount = i;
+ }
+ JOM(4, "allocation of isoc video buffers done: %i pages\n",
+ i * (0x01 << VIDEO_ISOC_ORDER));
+ return 0;
+}
+
+static void free_isocbuffers(struct easycap *peasycap)
+{
+ int k, m;
+
+ JOM(4, "freeing video isoc buffers.\n");
+ m = 0;
+ for (k = 0; k < VIDEO_ISOC_BUFFER_MANY; k++) {
+ if (peasycap->video_isoc_buffer[k].pgo) {
+ free_pages((unsigned long)
+ peasycap->video_isoc_buffer[k].pgo,
+ VIDEO_ISOC_ORDER);
+ peasycap->video_isoc_buffer[k].pgo = NULL;
+ peasycap->allocation_video_page -=
+ BIT(VIDEO_ISOC_ORDER);
+ m++;
+ }
+ }
+ JOM(4, "isoc video buffers freed: %i pages\n",
+ m * (0x01 << VIDEO_ISOC_ORDER));
+}
+
+static int create_video_urbs(struct easycap *peasycap)
+{
+ struct urb *purb;
+ struct data_urb *pdata_urb;
+ int i, j;
+
+ JOM(4, "allocating %i struct urb.\n", VIDEO_ISOC_BUFFER_MANY);
+ JOM(4, "using %i=peasycap->video_isoc_framesperdesc\n",
+ peasycap->video_isoc_framesperdesc);
+ JOM(4, "using %i=peasycap->video_isoc_maxframesize\n",
+ peasycap->video_isoc_maxframesize);
+ JOM(4, "using %i=peasycap->video_isoc_buffer_sizen",
+ peasycap->video_isoc_buffer_size);
+
+ for (i = 0; i < VIDEO_ISOC_BUFFER_MANY; i++) {
+ purb = usb_alloc_urb(peasycap->video_isoc_framesperdesc,
+ GFP_KERNEL);
+ if (!purb) {
+ SAM("ERROR: usb_alloc_urb returned NULL for buffer "
+ "%i\n", i);
+ return -ENOMEM;
+ }
+
+ peasycap->allocation_video_urb += 1;
+ pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
+ if (!pdata_urb) {
+ SAM("ERROR: Could not allocate struct data_urb.\n");
+ return -ENOMEM;
+ }
+
+ peasycap->allocation_video_struct +=
+ sizeof(struct data_urb);
+
+ pdata_urb->purb = purb;
+ pdata_urb->isbuf = i;
+ pdata_urb->length = 0;
+ list_add_tail(&(pdata_urb->list_head),
+ peasycap->purb_video_head);
+
+ if (!i) {
+ JOM(4, "initializing video urbs thus:\n");
+ JOM(4, " purb->interval = 1;\n");
+ JOM(4, " purb->dev = peasycap->pusb_device;\n");
+ JOM(4, " purb->pipe = usb_rcvisocpipe"
+ "(peasycap->pusb_device,%i);\n",
+ peasycap->video_endpointnumber);
+ JOM(4, " purb->transfer_flags = URB_ISO_ASAP;\n");
+ JOM(4, " purb->transfer_buffer = peasycap->"
+ "video_isoc_buffer[.].pgo;\n");
+ JOM(4, " purb->transfer_buffer_length = %i;\n",
+ peasycap->video_isoc_buffer_size);
+ JOM(4, " purb->complete = easycap_complete;\n");
+ JOM(4, " purb->context = peasycap;\n");
+ JOM(4, " purb->start_frame = 0;\n");
+ JOM(4, " purb->number_of_packets = %i;\n",
+ peasycap->video_isoc_framesperdesc);
+ JOM(4, " for (j = 0; j < %i; j++)\n",
+ peasycap->video_isoc_framesperdesc);
+ JOM(4, " {\n");
+ JOM(4, " purb->iso_frame_desc[j].offset = j*%i;\n",
+ peasycap->video_isoc_maxframesize);
+ JOM(4, " purb->iso_frame_desc[j].length = %i;\n",
+ peasycap->video_isoc_maxframesize);
+ JOM(4, " }\n");
+ }
+
+ purb->interval = 1;
+ purb->dev = peasycap->pusb_device;
+ purb->pipe = usb_rcvisocpipe(peasycap->pusb_device,
+ peasycap->video_endpointnumber);
+
+ purb->transfer_flags = URB_ISO_ASAP;
+ purb->transfer_buffer = peasycap->video_isoc_buffer[i].pgo;
+ purb->transfer_buffer_length =
+ peasycap->video_isoc_buffer_size;
+
+ purb->complete = easycap_complete;
+ purb->context = peasycap;
+ purb->start_frame = 0;
+ purb->number_of_packets = peasycap->video_isoc_framesperdesc;
+
+ for (j = 0; j < peasycap->video_isoc_framesperdesc; j++) {
+ purb->iso_frame_desc[j].offset =
+ j * peasycap->video_isoc_maxframesize;
+ purb->iso_frame_desc[j].length =
+ peasycap->video_isoc_maxframesize;
+ }
+ }
+ JOM(4, "allocation of %i struct urb done.\n", i);
+ return 0;
+}
+
+static void free_video_urbs(struct easycap *peasycap)
+{
+ struct list_head *plist_head, *plist_next;
+ struct data_urb *pdata_urb;
+ int m;
+
+ if (peasycap->purb_video_head) {
+ m = 0;
+ list_for_each(plist_head, peasycap->purb_video_head) {
+ pdata_urb = list_entry(plist_head,
+ struct data_urb, list_head);
+ if (pdata_urb && pdata_urb->purb) {
+ usb_free_urb(pdata_urb->purb);
+ pdata_urb->purb = NULL;
+ peasycap->allocation_video_urb--;
+ m++;
+ }
+ }
+
+ JOM(4, "%i video urbs freed\n", m);
+ JOM(4, "freeing video data_urb structures.\n");
+ m = 0;
+ list_for_each_safe(plist_head, plist_next,
+ peasycap->purb_video_head) {
+ pdata_urb = list_entry(plist_head,
+ struct data_urb, list_head);
+ if (pdata_urb) {
+ peasycap->allocation_video_struct -=
+ sizeof(struct data_urb);
+ kfree(pdata_urb);
+ m++;
+ }
+ }
+ JOM(4, "%i video data_urb structures freed\n", m);
+ JOM(4, "setting peasycap->purb_video_head=NULL\n");
+ peasycap->purb_video_head = NULL;
+ }
+}
+
+static int alloc_audio_buffers(struct easycap *peasycap)
+{
+ void *pbuf;
+ int k;
+
+ JOM(4, "allocating %i isoc audio buffers of size %i\n",
+ AUDIO_ISOC_BUFFER_MANY,
+ peasycap->audio_isoc_buffer_size);
+ JOM(4, ".... each occupying contiguous memory pages\n");
+
+ for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
+ pbuf = (void *)__get_free_pages(GFP_KERNEL, AUDIO_ISOC_ORDER);
+ if (!pbuf) {
+ SAM("ERROR: Could not allocate isoc audio buffer %i\n",
+ k);
+ return -ENOMEM;
+ }
+ peasycap->allocation_audio_page += BIT(AUDIO_ISOC_ORDER);
+
+ peasycap->audio_isoc_buffer[k].pgo = pbuf;
+ peasycap->audio_isoc_buffer[k].pto =
+ pbuf + peasycap->audio_isoc_buffer_size;
+ peasycap->audio_isoc_buffer[k].kount = k;
+ }
+
+ JOM(4, "allocation of isoc audio buffers done.\n");
+ return 0;
+}
+
+static void free_audio_buffers(struct easycap *peasycap)
+{
+ int k, m;
+
+ JOM(4, "freeing audio isoc buffers.\n");
+ m = 0;
+ for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
+ if (peasycap->audio_isoc_buffer[k].pgo) {
+ free_pages((unsigned long)
+ (peasycap->audio_isoc_buffer[k].pgo),
+ AUDIO_ISOC_ORDER);
+ peasycap->audio_isoc_buffer[k].pgo = NULL;
+ peasycap->allocation_audio_page -=
+ BIT(AUDIO_ISOC_ORDER);
+ m++;
+ }
+ }
+ JOM(4, "easyoss_delete(): isoc audio buffers freed: %i pages\n",
+ m * (0x01 << AUDIO_ISOC_ORDER));
+}
+
+static int create_audio_urbs(struct easycap *peasycap)
+{
+ struct urb *purb;
+ struct data_urb *pdata_urb;
+ int k, j;
+
+ JOM(4, "allocating %i struct urb.\n", AUDIO_ISOC_BUFFER_MANY);
+ JOM(4, "using %i=peasycap->audio_isoc_framesperdesc\n",
+ peasycap->audio_isoc_framesperdesc);
+ JOM(4, "using %i=peasycap->audio_isoc_maxframesize\n",
+ peasycap->audio_isoc_maxframesize);
+ JOM(4, "using %i=peasycap->audio_isoc_buffer_size\n",
+ peasycap->audio_isoc_buffer_size);
+
+ for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
+ purb = usb_alloc_urb(peasycap->audio_isoc_framesperdesc,
+ GFP_KERNEL);
+ if (!purb) {
+ SAM("ERROR: usb_alloc_urb returned NULL for buffer "
+ "%i\n", k);
+ return -ENOMEM;
+ }
+ peasycap->allocation_audio_urb += 1 ;
+ pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
+ if (!pdata_urb) {
+ usb_free_urb(purb);
+ SAM("ERROR: Could not allocate struct data_urb.\n");
+ return -ENOMEM;
+ }
+ peasycap->allocation_audio_struct +=
+ sizeof(struct data_urb);
+
+ pdata_urb->purb = purb;
+ pdata_urb->isbuf = k;
+ pdata_urb->length = 0;
+ list_add_tail(&(pdata_urb->list_head),
+ peasycap->purb_audio_head);
+
+ if (!k) {
+ JOM(4, "initializing audio urbs thus:\n");
+ JOM(4, " purb->interval = 1;\n");
+ JOM(4, " purb->dev = peasycap->pusb_device;\n");
+ JOM(4, " purb->pipe = usb_rcvisocpipe(peasycap->"
+ "pusb_device,%i);\n",
+ peasycap->audio_endpointnumber);
+ JOM(4, " purb->transfer_flags = URB_ISO_ASAP;\n");
+ JOM(4, " purb->transfer_buffer = "
+ "peasycap->audio_isoc_buffer[.].pgo;\n");
+ JOM(4, " purb->transfer_buffer_length = %i;\n",
+ peasycap->audio_isoc_buffer_size);
+ JOM(4, " purb->complete = easycap_alsa_complete;\n");
+ JOM(4, " purb->context = peasycap;\n");
+ JOM(4, " purb->start_frame = 0;\n");
+ JOM(4, " purb->number_of_packets = %i;\n",
+ peasycap->audio_isoc_framesperdesc);
+ JOM(4, " for (j = 0; j < %i; j++)\n",
+ peasycap->audio_isoc_framesperdesc);
+ JOM(4, " {\n");
+ JOM(4, " purb->iso_frame_desc[j].offset = j*%i;\n",
+ peasycap->audio_isoc_maxframesize);
+ JOM(4, " purb->iso_frame_desc[j].length = %i;\n",
+ peasycap->audio_isoc_maxframesize);
+ JOM(4, " }\n");
+ }
+
+ purb->interval = 1;
+ purb->dev = peasycap->pusb_device;
+ purb->pipe = usb_rcvisocpipe(peasycap->pusb_device,
+ peasycap->audio_endpointnumber);
+ purb->transfer_flags = URB_ISO_ASAP;
+ purb->transfer_buffer = peasycap->audio_isoc_buffer[k].pgo;
+ purb->transfer_buffer_length =
+ peasycap->audio_isoc_buffer_size;
+ purb->complete = easycap_alsa_complete;
+ purb->context = peasycap;
+ purb->start_frame = 0;
+ purb->number_of_packets = peasycap->audio_isoc_framesperdesc;
+ for (j = 0; j < peasycap->audio_isoc_framesperdesc; j++) {
+ purb->iso_frame_desc[j].offset =
+ j * peasycap->audio_isoc_maxframesize;
+ purb->iso_frame_desc[j].length =
+ peasycap->audio_isoc_maxframesize;
+ }
+ }
+ JOM(4, "allocation of %i struct urb done.\n", k);
+ return 0;
+}
+
+static void free_audio_urbs(struct easycap *peasycap)
+{
+ struct list_head *plist_head, *plist_next;
+ struct data_urb *pdata_urb;
+ int m;
+
+ if (peasycap->purb_audio_head) {
+ JOM(4, "freeing audio urbs\n");
+ m = 0;
+ list_for_each(plist_head, (peasycap->purb_audio_head)) {
+ pdata_urb = list_entry(plist_head,
+ struct data_urb, list_head);
+ if (pdata_urb && pdata_urb->purb) {
+ usb_free_urb(pdata_urb->purb);
+ pdata_urb->purb = NULL;
+ peasycap->allocation_audio_urb--;
+ m++;
+ }
+ }
+ JOM(4, "%i audio urbs freed\n", m);
+ JOM(4, "freeing audio data_urb structures.\n");
+ m = 0;
+ list_for_each_safe(plist_head, plist_next,
+ peasycap->purb_audio_head) {
+ pdata_urb = list_entry(plist_head,
+ struct data_urb, list_head);
+ if (pdata_urb) {
+ peasycap->allocation_audio_struct -=
+ sizeof(struct data_urb);
+ kfree(pdata_urb);
+ m++;
+ }
+ }
+ JOM(4, "%i audio data_urb structures freed\n", m);
+ JOM(4, "setting peasycap->purb_audio_head=NULL\n");
+ peasycap->purb_audio_head = NULL;
+ }
+}
+
+static void config_easycap(struct easycap *peasycap,
+ u8 bInterfaceNumber,
+ u8 bInterfaceClass,
+ u8 bInterfaceSubClass)
+{
+ if ((USB_CLASS_VIDEO == bInterfaceClass) ||
+ (USB_CLASS_VENDOR_SPEC == bInterfaceClass)) {
+ if (-1 == peasycap->video_interface) {
+ peasycap->video_interface = bInterfaceNumber;
+ JOM(4, "setting peasycap->video_interface=%i\n",
+ peasycap->video_interface);
+ } else {
+ if (peasycap->video_interface != bInterfaceNumber) {
+ SAM("ERROR: attempting to reset "
+ "peasycap->video_interface\n");
+ SAM("...... continuing with "
+ "%i=peasycap->video_interface\n",
+ peasycap->video_interface);
+ }
+ }
+ } else if ((USB_CLASS_AUDIO == bInterfaceClass) &&
+ (USB_SUBCLASS_AUDIOSTREAMING == bInterfaceSubClass)) {
+ if (-1 == peasycap->audio_interface) {
+ peasycap->audio_interface = bInterfaceNumber;
+ JOM(4, "setting peasycap->audio_interface=%i\n",
+ peasycap->audio_interface);
+ } else {
+ if (peasycap->audio_interface != bInterfaceNumber) {
+ SAM("ERROR: attempting to reset "
+ "peasycap->audio_interface\n");
+ SAM("...... continuing with "
+ "%i=peasycap->audio_interface\n",
+ peasycap->audio_interface);
+ }
+ }
+ }
+}
+
+/*
+ * This function is called from within easycap_usb_disconnect() and is
+ * protected by semaphores set and cleared by easycap_usb_disconnect().
+ * By this stage the device has already been physically unplugged,
+ * so peasycap->pusb_device is no longer valid.
+ */
+static void easycap_delete(struct kref *pkref)
+{
+ struct easycap *peasycap;
+
+ peasycap = container_of(pkref, struct easycap, kref);
+ if (!peasycap) {
+ SAM("ERROR: peasycap is NULL: cannot perform deletions\n");
+ return;
+ }
+
+ /* Free video urbs */
+ free_video_urbs(peasycap);
+
+ /* Free video isoc buffers */
+ free_isocbuffers(peasycap);
+
+ /* Free video field buffers */
+ free_fieldbuffers(peasycap);
+
+ /* Free video frame buffers */
+ free_framebuffers(peasycap);
+
+ /* Free audio urbs */
+ free_audio_urbs(peasycap);
+
+ /* Free audio isoc buffers */
+ free_audio_buffers(peasycap);
+
+ free_easycap(peasycap);
+
+ JOT(4, "ending.\n");
+}
+
static const struct v4l2_file_operations v4l2_fops = {
.owner = THIS_MODULE,
.open = easycap_open_noinode,
@@ -2850,6 +3450,36 @@ static const struct v4l2_file_operations v4l2_fops = {
.mmap = easycap_mmap,
};
+static int easycap_register_video(struct easycap *peasycap)
+{
+ /*
+ * FIXME: This is believed to be harmless,
+ * but may well be unnecessary or wrong.
+ */
+ peasycap->video_device.v4l2_dev = NULL;
+
+ strcpy(&peasycap->video_device.name[0], "easycapdc60");
+ peasycap->video_device.fops = &v4l2_fops;
+ peasycap->video_device.minor = -1;
+ peasycap->video_device.release = (void *)(&videodev_release);
+
+ video_set_drvdata(&(peasycap->video_device), (void *)peasycap);
+
+ if (0 != (video_register_device(&(peasycap->video_device),
+ VFL_TYPE_GRABBER, -1))) {
+ videodev_release(&(peasycap->video_device));
+ return -ENODEV;
+ }
+
+ peasycap->registered_video++;
+
+ SAM("registered with videodev: %i=minor\n",
+ peasycap->video_device.minor);
+ peasycap->minor = peasycap->video_device.minor;
+
+ return 0;
+}
+
/*
* When the device is plugged, this function is called three times,
* one for each interface.
@@ -2861,24 +3491,15 @@ static int easycap_usb_probe(struct usb_interface *intf,
struct usb_host_interface *alt;
struct usb_endpoint_descriptor *ep;
struct usb_interface_descriptor *interface;
- struct urb *purb;
struct easycap *peasycap;
- int ndong;
- struct data_urb *pdata_urb;
- int i, j, k, m, rc;
+ int i, j, rc;
u8 bInterfaceNumber;
u8 bInterfaceClass;
u8 bInterfaceSubClass;
- void *pbuf;
int okalt[8], isokalt;
int okepn[8];
int okmps[8];
int maxpacketsize;
- u16 mask;
- s32 value;
- struct easycap_format *peasycap_format;
- int fmtidx;
- struct inputset *inputset;
usbdev = interface_to_usbdev(intf);
@@ -2916,76 +3537,16 @@ static int easycap_usb_probe(struct usb_interface *intf,
* interfaces 1 and 2 are probed.
*/
if (0 == bInterfaceNumber) {
- peasycap = kzalloc(sizeof(struct easycap), GFP_KERNEL);
- if (!peasycap) {
- SAY("ERROR: Could not allocate peasycap\n");
- return -ENOMEM;
- }
-
- /* Perform urgent initializations */
- peasycap->minor = -1;
- kref_init(&peasycap->kref);
- JOM(8, "intf[%i]: after kref_init(..._video) "
- "%i=peasycap->kref.refcount.counter\n",
- bInterfaceNumber, peasycap->kref.refcount.counter);
-
- /* module params */
- peasycap->gain = (s8)clamp(easycap_gain, 0, 31);
-
- init_waitqueue_head(&peasycap->wq_video);
- init_waitqueue_head(&peasycap->wq_audio);
- init_waitqueue_head(&peasycap->wq_trigger);
-
- if (mutex_lock_interruptible(&mutex_dongle)) {
- SAY("ERROR: cannot down mutex_dongle\n");
- return -ERESTARTSYS;
- }
-
- for (ndong = 0; ndong < DONGLE_MANY; ndong++) {
- if ((!easycapdc60_dongle[ndong].peasycap) &&
- (!mutex_is_locked(&easycapdc60_dongle
- [ndong].mutex_video)) &&
- (!mutex_is_locked(&easycapdc60_dongle
- [ndong].mutex_audio))) {
- easycapdc60_dongle[ndong].peasycap = peasycap;
- peasycap->isdongle = ndong;
- JOM(8, "intf[%i]: peasycap-->easycap"
- "_dongle[%i].peasycap\n",
- bInterfaceNumber, ndong);
- break;
- }
- }
-
- if (DONGLE_MANY <= ndong) {
- SAM("ERROR: too many dongles\n");
- mutex_unlock(&mutex_dongle);
+ /*
+ * Alloc structure and save it in a free slot in
+ * easycapdc60_dongle array
+ */
+ peasycap = alloc_easycap(bInterfaceNumber);
+ if (!peasycap)
return -ENOMEM;
- }
- mutex_unlock(&mutex_dongle);
-
- peasycap->allocation_video_struct = sizeof(struct easycap);
-
- /* and further initialize the structure */
- peasycap->pusb_device = usbdev;
- peasycap->pusb_interface = intf;
- peasycap->microphone = false;
-
- peasycap->video_interface = -1;
- peasycap->video_altsetting_on = -1;
- peasycap->video_altsetting_off = -1;
- peasycap->video_endpointnumber = -1;
- peasycap->video_isoc_maxframesize = -1;
- peasycap->video_isoc_buffer_size = -1;
-
- peasycap->audio_interface = -1;
- peasycap->audio_altsetting_on = -1;
- peasycap->audio_altsetting_off = -1;
- peasycap->audio_endpointnumber = -1;
- peasycap->audio_isoc_maxframesize = -1;
- peasycap->audio_isoc_buffer_size = -1;
-
- peasycap->frame_buffer_many = FRAME_BUFFER_MANY;
+ /* Perform basic struct initialization */
+ init_easycap(peasycap, usbdev, intf, bInterfaceNumber);
/* Dynamically fill in the available formats */
rc = easycap_video_fillin_formats();
@@ -2996,136 +3557,19 @@ static int easycap_usb_probe(struct usb_interface *intf,
JOM(4, "%i formats available\n", rc);
/* Populate easycap.inputset[] */
- inputset = peasycap->inputset;
- fmtidx = peasycap->ntsc ? NTSC_M : PAL_BGHIN;
- m = 0;
- mask = 0;
- for (i = 0; 0xFFFF != easycap_standard[i].mask; i++) {
- if (fmtidx == easycap_standard[i].v4l2_standard.index) {
- m++;
- for (k = 0; k < INPUT_MANY; k++)
- inputset[k].standard_offset = i;
-
- mask = easycap_standard[i].mask;
- }
- }
- if (1 != m) {
- SAM("ERROR: "
- "inputset->standard_offset unpopulated, %i=m\n", m);
- return -ENOENT;
- }
-
- peasycap_format = &easycap_format[0];
- m = 0;
- for (i = 0; peasycap_format->v4l2_format.fmt.pix.width; i++) {
- struct v4l2_pix_format *pix =
- &peasycap_format->v4l2_format.fmt.pix;
- if (((peasycap_format->mask & 0x0F) == (mask & 0x0F)) &&
- pix->field == V4L2_FIELD_NONE &&
- pix->pixelformat == V4L2_PIX_FMT_UYVY &&
- pix->width == 640 && pix->height == 480) {
- m++;
- for (k = 0; k < INPUT_MANY; k++)
- inputset[k].format_offset = i;
- break;
- }
- peasycap_format++;
- }
- if (1 != m) {
- SAM("ERROR: inputset[]->format_offset unpopulated\n");
- return -ENOENT;
- }
-
- m = 0;
- for (i = 0; 0xFFFFFFFF != easycap_control[i].id; i++) {
- value = easycap_control[i].default_value;
- if (V4L2_CID_BRIGHTNESS == easycap_control[i].id) {
- m++;
- for (k = 0; k < INPUT_MANY; k++)
- inputset[k].brightness = value;
- } else if (V4L2_CID_CONTRAST == easycap_control[i].id) {
- m++;
- for (k = 0; k < INPUT_MANY; k++)
- inputset[k].contrast = value;
- } else if (V4L2_CID_SATURATION == easycap_control[i].id) {
- m++;
- for (k = 0; k < INPUT_MANY; k++)
- inputset[k].saturation = value;
- } else if (V4L2_CID_HUE == easycap_control[i].id) {
- m++;
- for (k = 0; k < INPUT_MANY; k++)
- inputset[k].hue = value;
- }
- }
-
- if (4 != m) {
- SAM("ERROR: inputset[]->brightness underpopulated\n");
- return -ENOENT;
- }
- for (k = 0; k < INPUT_MANY; k++)
- inputset[k].input = k;
- JOM(4, "populated inputset[]\n");
+ rc = populate_inputset(peasycap);
+ if (rc < 0)
+ return rc;
JOM(4, "finished initialization\n");
} else {
-
- /*
- * FIXME: Identify the appropriate pointer
- * peasycap for interfaces 1 and 2.
- * The address of peasycap->pusb_device
- * is reluctantly used for this purpose.
- */
- for (ndong = 0; ndong < DONGLE_MANY; ndong++) {
- if (usbdev == easycapdc60_dongle[ndong].peasycap->
- pusb_device) {
- peasycap = easycapdc60_dongle[ndong].peasycap;
- JOT(8, "intf[%i]: dongle[%i].peasycap\n",
- bInterfaceNumber, ndong);
- break;
- }
- }
- if (DONGLE_MANY <= ndong) {
- SAY("ERROR: peasycap is unknown when probing interface %i\n",
- bInterfaceNumber);
+ peasycap = get_easycap(usbdev, bInterfaceNumber);
+ if (!peasycap)
return -ENODEV;
- }
- if (!peasycap) {
- SAY("ERROR: peasycap is NULL when probing interface %i\n",
- bInterfaceNumber);
- return -ENODEV;
- }
}
- if ((USB_CLASS_VIDEO == bInterfaceClass) ||
- (USB_CLASS_VENDOR_SPEC == bInterfaceClass)) {
- if (-1 == peasycap->video_interface) {
- peasycap->video_interface = bInterfaceNumber;
- JOM(4, "setting peasycap->video_interface=%i\n",
- peasycap->video_interface);
- } else {
- if (peasycap->video_interface != bInterfaceNumber) {
- SAM("ERROR: attempting to reset "
- "peasycap->video_interface\n");
- SAM("...... continuing with "
- "%i=peasycap->video_interface\n",
- peasycap->video_interface);
- }
- }
- } else if ((USB_CLASS_AUDIO == bInterfaceClass) &&
- (USB_SUBCLASS_AUDIOSTREAMING == bInterfaceSubClass)) {
- if (-1 == peasycap->audio_interface) {
- peasycap->audio_interface = bInterfaceNumber;
- JOM(4, "setting peasycap->audio_interface=%i\n",
- peasycap->audio_interface);
- } else {
- if (peasycap->audio_interface != bInterfaceNumber) {
- SAM("ERROR: attempting to reset "
- "peasycap->audio_interface\n");
- SAM("...... continuing with "
- "%i=peasycap->audio_interface\n",
- peasycap->audio_interface);
- }
- }
- }
+ config_easycap(peasycap, bInterfaceNumber,
+ bInterfaceClass,
+ bInterfaceSubClass);
/*
* Investigate all altsettings. This is done in detail
@@ -3368,173 +3812,23 @@ static int easycap_usb_probe(struct usb_interface *intf,
*/
INIT_LIST_HEAD(&(peasycap->urb_video_head));
peasycap->purb_video_head = &(peasycap->urb_video_head);
- JOM(4, "allocating %i frame buffers of size %li\n",
- FRAME_BUFFER_MANY, (long int)FRAME_BUFFER_SIZE);
- JOM(4, ".... each scattered over %li pages\n",
- FRAME_BUFFER_SIZE/PAGE_SIZE);
-
- for (k = 0; k < FRAME_BUFFER_MANY; k++) {
- for (m = 0; m < FRAME_BUFFER_SIZE/PAGE_SIZE; m++) {
- if (peasycap->frame_buffer[k][m].pgo)
- SAM("attempting to reallocate frame "
- " buffers\n");
- else {
- pbuf = (void *)__get_free_page(GFP_KERNEL);
- if (!pbuf) {
- SAM("ERROR: Could not allocate frame "
- "buffer %i page %i\n", k, m);
- return -ENOMEM;
- }
-
- peasycap->allocation_video_page += 1;
- peasycap->frame_buffer[k][m].pgo = pbuf;
- }
- peasycap->frame_buffer[k][m].pto =
- peasycap->frame_buffer[k][m].pgo;
- }
- }
-
- peasycap->frame_fill = 0;
- peasycap->frame_read = 0;
- JOM(4, "allocation of frame buffers done: %i pages\n", k *
- m);
- JOM(4, "allocating %i field buffers of size %li\n",
- FIELD_BUFFER_MANY, (long int)FIELD_BUFFER_SIZE);
- JOM(4, ".... each scattered over %li pages\n",
- FIELD_BUFFER_SIZE/PAGE_SIZE);
-
- for (k = 0; k < FIELD_BUFFER_MANY; k++) {
- for (m = 0; m < FIELD_BUFFER_SIZE/PAGE_SIZE; m++) {
- if (peasycap->field_buffer[k][m].pgo) {
- SAM("ERROR: attempting to reallocate "
- "field buffers\n");
- } else {
- pbuf = (void *) __get_free_page(GFP_KERNEL);
- if (!pbuf) {
- SAM("ERROR: Could not allocate field"
- " buffer %i page %i\n", k, m);
- return -ENOMEM;
- }
-
- peasycap->allocation_video_page += 1;
- peasycap->field_buffer[k][m].pgo = pbuf;
- }
- peasycap->field_buffer[k][m].pto =
- peasycap->field_buffer[k][m].pgo;
- }
- peasycap->field_buffer[k][0].kount = 0x0200;
- }
- peasycap->field_fill = 0;
- peasycap->field_page = 0;
- peasycap->field_read = 0;
- JOM(4, "allocation of field buffers done: %i pages\n", k *
- m);
- JOM(4, "allocating %i isoc video buffers of size %i\n",
- VIDEO_ISOC_BUFFER_MANY,
- peasycap->video_isoc_buffer_size);
- JOM(4, ".... each occupying contiguous memory pages\n");
-
- for (k = 0; k < VIDEO_ISOC_BUFFER_MANY; k++) {
- pbuf = (void *)__get_free_pages(GFP_KERNEL,
- VIDEO_ISOC_ORDER);
- if (!pbuf) {
- SAM("ERROR: Could not allocate isoc video buffer "
- "%i\n", k);
- return -ENOMEM;
- }
- peasycap->allocation_video_page +=
- BIT(VIDEO_ISOC_ORDER);
- peasycap->video_isoc_buffer[k].pgo = pbuf;
- peasycap->video_isoc_buffer[k].pto =
- pbuf + peasycap->video_isoc_buffer_size;
- peasycap->video_isoc_buffer[k].kount = k;
- }
- JOM(4, "allocation of isoc video buffers done: %i pages\n",
- k * (0x01 << VIDEO_ISOC_ORDER));
-
- /* Allocate and initialize multiple struct usb */
- JOM(4, "allocating %i struct urb.\n", VIDEO_ISOC_BUFFER_MANY);
- JOM(4, "using %i=peasycap->video_isoc_framesperdesc\n",
- peasycap->video_isoc_framesperdesc);
- JOM(4, "using %i=peasycap->video_isoc_maxframesize\n",
- peasycap->video_isoc_maxframesize);
- JOM(4, "using %i=peasycap->video_isoc_buffer_sizen",
- peasycap->video_isoc_buffer_size);
-
- for (k = 0; k < VIDEO_ISOC_BUFFER_MANY; k++) {
- purb = usb_alloc_urb(peasycap->video_isoc_framesperdesc,
- GFP_KERNEL);
- if (!purb) {
- SAM("ERROR: usb_alloc_urb returned NULL for buffer "
- "%i\n", k);
- return -ENOMEM;
- }
+ rc = alloc_framebuffers(peasycap);
+ if (rc < 0)
+ return rc;
- peasycap->allocation_video_urb += 1;
- pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
- if (!pdata_urb) {
- SAM("ERROR: Could not allocate struct data_urb.\n");
- return -ENOMEM;
- }
+ rc = alloc_fieldbuffers(peasycap);
+ if (rc < 0)
+ return rc;
- peasycap->allocation_video_struct +=
- sizeof(struct data_urb);
+ rc = alloc_isocbuffers(peasycap);
+ if (rc < 0)
+ return rc;
- pdata_urb->purb = purb;
- pdata_urb->isbuf = k;
- pdata_urb->length = 0;
- list_add_tail(&(pdata_urb->list_head),
- peasycap->purb_video_head);
-
- /* Initialize allocated urbs */
- if (!k) {
- JOM(4, "initializing video urbs thus:\n");
- JOM(4, " purb->interval = 1;\n");
- JOM(4, " purb->dev = peasycap->pusb_device;\n");
- JOM(4, " purb->pipe = usb_rcvisocpipe"
- "(peasycap->pusb_device,%i);\n",
- peasycap->video_endpointnumber);
- JOM(4, " purb->transfer_flags = URB_ISO_ASAP;\n");
- JOM(4, " purb->transfer_buffer = peasycap->"
- "video_isoc_buffer[.].pgo;\n");
- JOM(4, " purb->transfer_buffer_length = %i;\n",
- peasycap->video_isoc_buffer_size);
- JOM(4, " purb->complete = easycap_complete;\n");
- JOM(4, " purb->context = peasycap;\n");
- JOM(4, " purb->start_frame = 0;\n");
- JOM(4, " purb->number_of_packets = %i;\n",
- peasycap->video_isoc_framesperdesc);
- JOM(4, " for (j = 0; j < %i; j++)\n",
- peasycap->video_isoc_framesperdesc);
- JOM(4, " {\n");
- JOM(4, " purb->iso_frame_desc[j].offset = j*%i;\n",
- peasycap->video_isoc_maxframesize);
- JOM(4, " purb->iso_frame_desc[j].length = %i;\n",
- peasycap->video_isoc_maxframesize);
- JOM(4, " }\n");
- }
-
- purb->interval = 1;
- purb->dev = peasycap->pusb_device;
- purb->pipe = usb_rcvisocpipe(peasycap->pusb_device,
- peasycap->video_endpointnumber);
- purb->transfer_flags = URB_ISO_ASAP;
- purb->transfer_buffer = peasycap->video_isoc_buffer[k].pgo;
- purb->transfer_buffer_length =
- peasycap->video_isoc_buffer_size;
- purb->complete = easycap_complete;
- purb->context = peasycap;
- purb->start_frame = 0;
- purb->number_of_packets = peasycap->video_isoc_framesperdesc;
- for (j = 0; j < peasycap->video_isoc_framesperdesc; j++) {
- purb->iso_frame_desc[j].offset = j *
- peasycap->video_isoc_maxframesize;
- purb->iso_frame_desc[j].length =
- peasycap->video_isoc_maxframesize;
- }
- }
- JOM(4, "allocation of %i struct urb done.\n", k);
+ /* Allocate and initialize video urbs */
+ rc = create_video_urbs(peasycap);
+ if (rc < 0)
+ return rc;
/* Save pointer peasycap in this interface */
usb_set_intfdata(intf, peasycap);
@@ -3545,9 +3839,6 @@ static int easycap_usb_probe(struct usb_interface *intf,
* because some udev rules triggers easycap_open()
* immediately after registration, causing a clash.
*/
- peasycap->ntsc = easycap_ntsc;
- JOM(8, "defaulting initially to %s\n",
- easycap_ntsc ? "NTSC" : "PAL");
rc = reset(peasycap);
if (rc) {
SAM("ERROR: reset() rc = %i\n", rc);
@@ -3562,33 +3853,12 @@ static int easycap_usb_probe(struct usb_interface *intf,
JOM(4, "registered device instance: %s\n",
peasycap->v4l2_device.name);
- /*
- * FIXME: This is believed to be harmless,
- * but may well be unnecessary or wrong.
- */
- peasycap->video_device.v4l2_dev = NULL;
-
-
- strcpy(&peasycap->video_device.name[0], "easycapdc60");
- peasycap->video_device.fops = &v4l2_fops;
- peasycap->video_device.minor = -1;
- peasycap->video_device.release = (void *)(&videodev_release);
-
- video_set_drvdata(&(peasycap->video_device), (void *)peasycap);
-
- if (0 != (video_register_device(&(peasycap->video_device),
- VFL_TYPE_GRABBER, -1))) {
+ rc = easycap_register_video(peasycap);
+ if (rc < 0) {
dev_err(&intf->dev,
"Not able to register with videodev\n");
- videodev_release(&(peasycap->video_device));
return -ENODEV;
}
-
- peasycap->registered_video++;
- SAM("registered with videodev: %i=minor\n",
- peasycap->video_device.minor);
- peasycap->minor = peasycap->video_device.minor;
-
break;
}
/* 1: Audio control */
@@ -3711,109 +3981,14 @@ static int easycap_usb_probe(struct usb_interface *intf,
INIT_LIST_HEAD(&(peasycap->urb_audio_head));
peasycap->purb_audio_head = &(peasycap->urb_audio_head);
- JOM(4, "allocating %i isoc audio buffers of size %i\n",
- AUDIO_ISOC_BUFFER_MANY,
- peasycap->audio_isoc_buffer_size);
- JOM(4, ".... each occupying contiguous memory pages\n");
-
- for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
- pbuf = (void *)__get_free_pages(GFP_KERNEL,
- AUDIO_ISOC_ORDER);
- if (!pbuf) {
- SAM("ERROR: Could not allocate isoc audio buffer "
- "%i\n", k);
- return -ENOMEM;
- }
- peasycap->allocation_audio_page +=
- BIT(AUDIO_ISOC_ORDER);
-
- peasycap->audio_isoc_buffer[k].pgo = pbuf;
- peasycap->audio_isoc_buffer[k].pto = pbuf +
- peasycap->audio_isoc_buffer_size;
- peasycap->audio_isoc_buffer[k].kount = k;
- }
- JOM(4, "allocation of isoc audio buffers done.\n");
+ alloc_audio_buffers(peasycap);
+ if (rc < 0)
+ return rc;
/* Allocate and initialize urbs */
- JOM(4, "allocating %i struct urb.\n", AUDIO_ISOC_BUFFER_MANY);
- JOM(4, "using %i=peasycap->audio_isoc_framesperdesc\n",
- peasycap->audio_isoc_framesperdesc);
- JOM(4, "using %i=peasycap->audio_isoc_maxframesize\n",
- peasycap->audio_isoc_maxframesize);
- JOM(4, "using %i=peasycap->audio_isoc_buffer_size\n",
- peasycap->audio_isoc_buffer_size);
-
- for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
- purb = usb_alloc_urb(peasycap->audio_isoc_framesperdesc,
- GFP_KERNEL);
- if (!purb) {
- SAM("ERROR: usb_alloc_urb returned NULL for buffer "
- "%i\n", k);
- return -ENOMEM;
- }
- peasycap->allocation_audio_urb += 1 ;
- pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
- if (!pdata_urb) {
- usb_free_urb(purb);
- SAM("ERROR: Could not allocate struct data_urb.\n");
- return -ENOMEM;
- }
- peasycap->allocation_audio_struct +=
- sizeof(struct data_urb);
-
- pdata_urb->purb = purb;
- pdata_urb->isbuf = k;
- pdata_urb->length = 0;
- list_add_tail(&(pdata_urb->list_head),
- peasycap->purb_audio_head);
-
- if (!k) {
- JOM(4, "initializing audio urbs thus:\n");
- JOM(4, " purb->interval = 1;\n");
- JOM(4, " purb->dev = peasycap->pusb_device;\n");
- JOM(4, " purb->pipe = usb_rcvisocpipe(peasycap->"
- "pusb_device,%i);\n",
- peasycap->audio_endpointnumber);
- JOM(4, " purb->transfer_flags = URB_ISO_ASAP;\n");
- JOM(4, " purb->transfer_buffer = "
- "peasycap->audio_isoc_buffer[.].pgo;\n");
- JOM(4, " purb->transfer_buffer_length = %i;\n",
- peasycap->audio_isoc_buffer_size);
- JOM(4, " purb->complete = easycap_alsa_complete;\n");
- JOM(4, " purb->context = peasycap;\n");
- JOM(4, " purb->start_frame = 0;\n");
- JOM(4, " purb->number_of_packets = %i;\n",
- peasycap->audio_isoc_framesperdesc);
- JOM(4, " for (j = 0; j < %i; j++)\n",
- peasycap->audio_isoc_framesperdesc);
- JOM(4, " {\n");
- JOM(4, " purb->iso_frame_desc[j].offset = j*%i;\n",
- peasycap->audio_isoc_maxframesize);
- JOM(4, " purb->iso_frame_desc[j].length = %i;\n",
- peasycap->audio_isoc_maxframesize);
- JOM(4, " }\n");
- }
-
- purb->interval = 1;
- purb->dev = peasycap->pusb_device;
- purb->pipe = usb_rcvisocpipe(peasycap->pusb_device,
- peasycap->audio_endpointnumber);
- purb->transfer_flags = URB_ISO_ASAP;
- purb->transfer_buffer = peasycap->audio_isoc_buffer[k].pgo;
- purb->transfer_buffer_length =
- peasycap->audio_isoc_buffer_size;
- purb->complete = easycap_alsa_complete;
- purb->context = peasycap;
- purb->start_frame = 0;
- purb->number_of_packets = peasycap->audio_isoc_framesperdesc;
- for (j = 0; j < peasycap->audio_isoc_framesperdesc; j++) {
- purb->iso_frame_desc[j].offset = j *
- peasycap->audio_isoc_maxframesize;
- purb->iso_frame_desc[j].length =
- peasycap->audio_isoc_maxframesize;
- }
- }
- JOM(4, "allocation of %i struct urb done.\n", k);
+ rc = create_audio_urbs(peasycap);
+ if (rc < 0)
+ return rc;
/* Save pointer peasycap in this interface */
usb_set_intfdata(intf, peasycap);
@@ -3843,15 +4018,13 @@ static int easycap_usb_probe(struct usb_interface *intf,
SAM("ends successfully for interface %i\n", bInterfaceNumber);
return 0;
}
-/*****************************************************************************/
-/*---------------------------------------------------------------------------*/
+
/*
- * WHEN THIS FUNCTION IS CALLED THE EasyCAP HAS ALREADY BEEN PHYSICALLY
- * UNPLUGGED. HENCE peasycap->pusb_device IS NO LONGER VALID.
- *
- * THIS FUNCTION AFFECTS ALSA. BEWARE.
+ * When this function is called the device has already been
+ * physically unplugged.
+ * Hence, peasycap->pusb_device is no longer valid.
+ * This function affects alsa.
*/
-/*---------------------------------------------------------------------------*/
static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
{
struct usb_host_interface *pusb_host_interface;
@@ -3876,6 +4049,7 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
minor = pusb_interface->minor;
JOT(4, "intf[%i]: minor=%i\n", bInterfaceNumber, minor);
+ /* There is nothing to do for Interface Number 1 */
if (1 == bInterfaceNumber)
return;
@@ -3884,11 +4058,8 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
SAY("ERROR: peasycap is NULL\n");
return;
}
-/*---------------------------------------------------------------------------*/
-/*
- * IF THE WAIT QUEUES ARE NOT CLEARED A DEADLOCK IS POSSIBLE. BEWARE.
-*/
-/*---------------------------------------------------------------------------*/
+
+ /* If the waitqueues are not cleared a deadlock is possible */
peasycap->video_eof = 1;
peasycap->audio_eof = 1;
wake_up_interruptible(&(peasycap->wq_video));
@@ -3904,15 +4075,14 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
default:
break;
}
-/*--------------------------------------------------------------------------*/
-/*
- * DEREGISTER
- *
- * THIS PROCEDURE WILL BLOCK UNTIL easycap_poll(), VIDEO IOCTL AND AUDIO
- * IOCTL ARE ALL UNLOCKED. IF THIS IS NOT DONE AN Oops CAN OCCUR WHEN
- * AN EasyCAP IS UNPLUGGED WHILE THE URBS ARE RUNNING. BEWARE.
- */
-/*--------------------------------------------------------------------------*/
+
+ /*
+ * Deregister
+ * This procedure will block until easycap_poll(),
+ * video and audio ioctl are all unlocked.
+ * If this is not done an oops can occur when an easycap
+ * is unplugged while the urbs are running.
+ */
kd = easycap_isdongle(peasycap);
switch (bInterfaceNumber) {
case 0: {
@@ -3929,7 +4099,6 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
} else {
SAY("ERROR: %i=kd is bad: cannot lock dongle\n", kd);
}
-/*---------------------------------------------------------------------------*/
if (!peasycap->v4l2_device.name[0]) {
SAM("ERROR: peasycap->v4l2_device.name is empty\n");
if (0 <= kd && DONGLE_MANY > kd)
@@ -3945,7 +4114,6 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
JOM(4, "intf[%i]: video_unregister_device() minor=%i\n",
bInterfaceNumber, minor);
peasycap->registered_video--;
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
if (0 <= kd && DONGLE_MANY > kd) {
mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
@@ -3981,12 +4149,12 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
default:
break;
}
-/*---------------------------------------------------------------------------*/
-/*
- * CALL easycap_delete() IF NO REMAINING REFERENCES TO peasycap
- * (ALSO WHEN ALSA HAS BEEN IN USE)
- */
-/*---------------------------------------------------------------------------*/
+
+ /*
+ * If no remaining references to peasycap,
+ * call easycap_delete.
+ * (Also when alsa has been in use)
+ */
if (!peasycap->kref.refcount.counter) {
SAM("ERROR: peasycap->kref.refcount.counter is zero "
"so cannot call kref_put()\n");
@@ -4021,17 +4189,11 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
JOT(4, "unlocked dongle[%i].mutex_video\n", kd);
}
-/*---------------------------------------------------------------------------*/
JOM(4, "ends\n");
return;
}
-/*****************************************************************************/
-/*---------------------------------------------------------------------------*/
-/*
- * PARAMETERS APPLICABLE TO ENTIRE DRIVER, I.E. BOTH VIDEO AND AUDIO
- */
-/*---------------------------------------------------------------------------*/
+/* Devices supported by this driver */
static struct usb_device_id easycap_usb_device_id_table[] = {
{USB_DEVICE(USB_EASYCAP_VENDOR_ID, USB_EASYCAP_PRODUCT_ID)},
{ }
@@ -4066,14 +4228,11 @@ static int __init easycap_module_init(void)
return rc;
}
-/*****************************************************************************/
+
static void __exit easycap_module_exit(void)
{
usb_deregister(&easycap_usb_driver);
}
-/*****************************************************************************/
module_init(easycap_module_init);
module_exit(easycap_module_exit);
-
-/*****************************************************************************/
diff --git a/drivers/staging/media/go7007/go7007-v4l2.c b/drivers/staging/media/go7007/go7007-v4l2.c
index 3ef4cd8b4de3..c184ad30fbd8 100644
--- a/drivers/staging/media/go7007/go7007-v4l2.c
+++ b/drivers/staging/media/go7007/go7007-v4l2.c
@@ -76,7 +76,6 @@ static void abort_queued(struct go7007 *go)
static int go7007_streamoff(struct go7007 *go)
{
- int retval = -EINVAL;
unsigned long flags;
mutex_lock(&go->hw_lock);
@@ -87,7 +86,6 @@ static int go7007_streamoff(struct go7007 *go)
abort_queued(go);
spin_unlock_irqrestore(&go->spinlock, flags);
go7007_reset_encoder(go);
- retval = 0;
}
mutex_unlock(&go->hw_lock);
return 0;
diff --git a/drivers/staging/media/go7007/s2250-loader.c b/drivers/staging/media/go7007/s2250-loader.c
index 7c5af4f289b6..f1bd159ac195 100644
--- a/drivers/staging/media/go7007/s2250-loader.c
+++ b/drivers/staging/media/go7007/s2250-loader.c
@@ -165,3 +165,5 @@ module_usb_driver(s2250loader_driver);
MODULE_AUTHOR("");
MODULE_DESCRIPTION("firmware loader for Sensoray 2250/2251");
MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(S2250_LOADER_FIRMWARE);
+MODULE_FIRMWARE(S2250_FIRMWARE);
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index d7cf5ef076a5..2944fde89f44 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -70,10 +70,6 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
static int ir_open(void *data);
static void ir_close(void *data);
-/* Driver init/exit prototypes */
-static int __init imon_init(void);
-static void __exit imon_exit(void);
-
/*** G L O B A L S ***/
#define IMON_DATA_BUF_SZ 35
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 352a20229ca2..f4e4d9003f38 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -80,10 +80,6 @@ static ssize_t vfd_write(struct file *file, const char *buf,
static int ir_open(void *data);
static void ir_close(void *data);
-/* Driver init/exit prototypes */
-static int __init sasem_init(void);
-static void __exit sasem_exit(void);
-
/*** G L O B A L S ***/
#define SASEM_DATA_BUF_SZ 32
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c
index 490a7f15604b..8b864afb40b6 100644
--- a/drivers/staging/omapdrm/omap_crtc.c
+++ b/drivers/staging/omapdrm/omap_crtc.c
@@ -36,12 +36,6 @@ struct omap_crtc {
struct drm_framebuffer *old_fb;
};
-static void omap_crtc_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size)
-{
- /* not supported.. at least not yet */
-}
-
static void omap_crtc_destroy(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -198,7 +192,6 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
}
static const struct drm_crtc_funcs omap_crtc_funcs = {
- .gamma_set = omap_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = omap_crtc_destroy,
.page_flip = omap_crtc_page_flip_locked,
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
index 0d2acca376ca..4beab9447ceb 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -58,7 +58,7 @@ static void omap_fb_output_poll_changed(struct drm_device *dev)
}
}
-static struct drm_mode_config_funcs omap_mode_config_funcs = {
+static const struct drm_mode_config_funcs omap_mode_config_funcs = {
.fb_create = omap_framebuffer_create,
.output_poll_changed = omap_fb_output_poll_changed,
};
@@ -726,7 +726,7 @@ static void dev_irq_uninstall(struct drm_device *dev)
DBG("irq_uninstall: dev=%p", dev);
}
-static struct vm_operations_struct omap_gem_vm_ops = {
+static const struct vm_operations_struct omap_gem_vm_ops = {
.fault = omap_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 59af3945ea85..65c7c62c7aae 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -633,7 +633,6 @@ static int ptmx_open(struct inode *inode, struct file *filp)
mutex_unlock(&devpts_mutex);
mutex_lock(&tty_mutex);
- mutex_lock(&devpts_mutex);
tty = tty_init_dev(ptm_driver, index);
if (IS_ERR(tty)) {
@@ -643,7 +642,6 @@ static int ptmx_open(struct inode *inode, struct file *filp)
/* The tty returned here is locked so we can safely
drop the mutex */
- mutex_unlock(&devpts_mutex);
mutex_unlock(&tty_mutex);
set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index ec206732f68c..4ef747307ecb 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -205,7 +205,8 @@ struct imx_port {
unsigned int irda_inv_rx:1;
unsigned int irda_inv_tx:1;
unsigned short trcv_delay; /* transceiver delay */
- struct clk *clk;
+ struct clk *clk_ipg;
+ struct clk *clk_per;
struct imx_uart_data *devdata;
};
@@ -673,7 +674,7 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
* RFDIV is set such way to satisfy requested uartclk value
*/
val = TXTL << 10 | RXTL;
- ufcr_rfdiv = (clk_get_rate(sport->clk) + sport->port.uartclk / 2)
+ ufcr_rfdiv = (clk_get_rate(sport->clk_per) + sport->port.uartclk / 2)
/ sport->port.uartclk;
if(!ufcr_rfdiv)
@@ -1286,7 +1287,7 @@ imx_console_get_options(struct imx_port *sport, int *baud,
else
ucfr_rfdiv = 6 - ucfr_rfdiv;
- uartclk = clk_get_rate(sport->clk);
+ uartclk = clk_get_rate(sport->clk_per);
uartclk /= ucfr_rfdiv;
{ /*
@@ -1511,14 +1512,22 @@ static int serial_imx_probe(struct platform_device *pdev)
goto unmap;
}
- sport->clk = clk_get(&pdev->dev, "uart");
- if (IS_ERR(sport->clk)) {
- ret = PTR_ERR(sport->clk);
+ sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(sport->clk_ipg)) {
+ ret = PTR_ERR(sport->clk_ipg);
goto unmap;
}
- clk_prepare_enable(sport->clk);
- sport->port.uartclk = clk_get_rate(sport->clk);
+ sport->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(sport->clk_per)) {
+ ret = PTR_ERR(sport->clk_per);
+ goto unmap;
+ }
+
+ clk_prepare_enable(sport->clk_per);
+ clk_prepare_enable(sport->clk_ipg);
+
+ sport->port.uartclk = clk_get_rate(sport->clk_per);
imx_ports[sport->port.line] = sport;
@@ -1539,8 +1548,8 @@ deinit:
if (pdata && pdata->exit)
pdata->exit(pdev);
clkput:
- clk_disable_unprepare(sport->clk);
- clk_put(sport->clk);
+ clk_disable_unprepare(sport->clk_per);
+ clk_disable_unprepare(sport->clk_ipg);
unmap:
iounmap(sport->port.membase);
free:
@@ -1558,11 +1567,10 @@ static int serial_imx_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- if (sport) {
- uart_remove_one_port(&imx_reg, &sport->port);
- clk_disable_unprepare(sport->clk);
- clk_put(sport->clk);
- }
+ uart_remove_one_port(&imx_reg, &sport->port);
+
+ clk_disable_unprepare(sport->clk_per);
+ clk_disable_unprepare(sport->clk_ipg);
if (pdata && pdata->exit)
pdata->exit(pdev);
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 96c1cacc7360..02da071fe1e7 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -31,16 +31,19 @@
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/gpio.h>
#include <lantiq_soc.h>
#define PORT_LTQ_ASC 111
#define MAXPORTS 2
#define UART_DUMMY_UER_RX 1
-#define DRVNAME "ltq_asc"
+#define DRVNAME "lantiq,asc"
#ifdef __BIG_ENDIAN
#define LTQ_ASC_TBUF (0x0020 + 3)
#define LTQ_ASC_RBUF (0x0024 + 3)
@@ -114,6 +117,9 @@ static DEFINE_SPINLOCK(ltq_asc_lock);
struct ltq_uart_port {
struct uart_port port;
+ /* clock used to derive divider */
+ struct clk *fpiclk;
+ /* clock gating of the ASC core */
struct clk *clk;
unsigned int tx_irq;
unsigned int rx_irq;
@@ -316,7 +322,9 @@ lqasc_startup(struct uart_port *port)
struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
int retval;
- port->uartclk = clk_get_rate(ltq_port->clk);
+ if (ltq_port->clk)
+ clk_enable(ltq_port->clk);
+ port->uartclk = clk_get_rate(ltq_port->fpiclk);
ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET),
port->membase + LTQ_ASC_CLC);
@@ -382,6 +390,8 @@ lqasc_shutdown(struct uart_port *port)
port->membase + LTQ_ASC_RXFCON);
ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
port->membase + LTQ_ASC_TXFCON);
+ if (ltq_port->clk)
+ clk_disable(ltq_port->clk);
}
static void
@@ -630,7 +640,7 @@ lqasc_console_setup(struct console *co, char *options)
port = &ltq_port->port;
- port->uartclk = clk_get_rate(ltq_port->clk);
+ port->uartclk = clk_get_rate(ltq_port->fpiclk);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -668,37 +678,32 @@ static struct uart_driver lqasc_reg = {
static int __init
lqasc_probe(struct platform_device *pdev)
{
+ struct device_node *node = pdev->dev.of_node;
struct ltq_uart_port *ltq_port;
struct uart_port *port;
- struct resource *mmres, *irqres;
- int tx_irq, rx_irq, err_irq;
- struct clk *clk;
+ struct resource *mmres, irqres[3];
+ int line = 0;
int ret;
mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!mmres || !irqres)
+ ret = of_irq_to_resource_table(node, irqres, 3);
+ if (!mmres || (ret != 3)) {
+ dev_err(&pdev->dev,
+ "failed to get memory/irq for serial port\n");
return -ENODEV;
+ }
- if (pdev->id >= MAXPORTS)
- return -EBUSY;
+ /* check if this is the console port */
+ if (mmres->start != CPHYSADDR(LTQ_EARLY_ASC))
+ line = 1;
- if (lqasc_port[pdev->id] != NULL)
+ if (lqasc_port[line]) {
+ dev_err(&pdev->dev, "port %d already allocated\n", line);
return -EBUSY;
-
- clk = clk_get(&pdev->dev, "fpi");
- if (IS_ERR(clk)) {
- pr_err("failed to get fpi clk\n");
- return -ENOENT;
}
- tx_irq = platform_get_irq_byname(pdev, "tx");
- rx_irq = platform_get_irq_byname(pdev, "rx");
- err_irq = platform_get_irq_byname(pdev, "err");
- if ((tx_irq < 0) | (rx_irq < 0) | (err_irq < 0))
- return -ENODEV;
-
- ltq_port = kzalloc(sizeof(struct ltq_uart_port), GFP_KERNEL);
+ ltq_port = devm_kzalloc(&pdev->dev, sizeof(struct ltq_uart_port),
+ GFP_KERNEL);
if (!ltq_port)
return -ENOMEM;
@@ -709,19 +714,26 @@ lqasc_probe(struct platform_device *pdev)
port->ops = &lqasc_pops;
port->fifosize = 16;
port->type = PORT_LTQ_ASC,
- port->line = pdev->id;
+ port->line = line;
port->dev = &pdev->dev;
-
- port->irq = tx_irq; /* unused, just to be backward-compatibe */
+ /* unused, just to be backward-compatible */
+ port->irq = irqres[0].start;
port->mapbase = mmres->start;
- ltq_port->clk = clk;
+ ltq_port->fpiclk = clk_get_fpi();
+ if (IS_ERR(ltq_port->fpiclk)) {
+ pr_err("failed to get fpi clk\n");
+ return -ENOENT;
+ }
- ltq_port->tx_irq = tx_irq;
- ltq_port->rx_irq = rx_irq;
- ltq_port->err_irq = err_irq;
+ /* not all asc ports have clock gates, lets ignore the return code */
+ ltq_port->clk = clk_get(&pdev->dev, NULL);
- lqasc_port[pdev->id] = ltq_port;
+ ltq_port->tx_irq = irqres[0].start;
+ ltq_port->rx_irq = irqres[1].start;
+ ltq_port->err_irq = irqres[2].start;
+
+ lqasc_port[line] = ltq_port;
platform_set_drvdata(pdev, ltq_port);
ret = uart_add_one_port(&lqasc_reg, port);
@@ -729,10 +741,17 @@ lqasc_probe(struct platform_device *pdev)
return ret;
}
+static const struct of_device_id ltq_asc_match[] = {
+ { .compatible = DRVNAME },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_asc_match);
+
static struct platform_driver lqasc_driver = {
.driver = {
.name = DRVNAME,
.owner = THIS_MODULE,
+ .of_match_table = ltq_asc_match,
},
};
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index 0be8a2f00d0b..f76b1688c5c8 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/major.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 4001eee6c08d..92c00b24d0df 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -57,6 +57,7 @@
#include <linux/ioport.h>
#include <linux/irqflags.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/major.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 173a9000a6cb..ba8be396a621 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -894,6 +894,23 @@ int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
tty_ldisc_enable(tty);
return 0;
}
+
+static void tty_ldisc_kill(struct tty_struct *tty)
+{
+ mutex_lock(&tty->ldisc_mutex);
+ /*
+ * Now kill off the ldisc
+ */
+ tty_ldisc_close(tty, tty->ldisc);
+ tty_ldisc_put(tty->ldisc);
+ /* Force an oops if we mess this up */
+ tty->ldisc = NULL;
+
+ /* Ensure the next open requests the N_TTY ldisc */
+ tty_set_termios_ldisc(tty, N_TTY);
+ mutex_unlock(&tty->ldisc_mutex);
+}
+
/**
* tty_ldisc_release - release line discipline
* @tty: tty being shut down
@@ -912,27 +929,19 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
* race with the set_ldisc code path.
*/
- tty_unlock(tty);
+ tty_unlock_pair(tty, o_tty);
tty_ldisc_halt(tty);
tty_ldisc_flush_works(tty);
- tty_lock(tty);
-
- mutex_lock(&tty->ldisc_mutex);
- /*
- * Now kill off the ldisc
- */
- tty_ldisc_close(tty, tty->ldisc);
- tty_ldisc_put(tty->ldisc);
- /* Force an oops if we mess this up */
- tty->ldisc = NULL;
+ if (o_tty) {
+ tty_ldisc_halt(o_tty);
+ tty_ldisc_flush_works(o_tty);
+ }
+ tty_lock_pair(tty, o_tty);
- /* Ensure the next open requests the N_TTY ldisc */
- tty_set_termios_ldisc(tty, N_TTY);
- mutex_unlock(&tty->ldisc_mutex);
- /* This will need doing differently if we need to lock */
+ tty_ldisc_kill(tty);
if (o_tty)
- tty_ldisc_release(o_tty, NULL);
+ tty_ldisc_kill(o_tty);
/* And the memory resources remaining (buffers, termios) will be
disposed of when the kref hits zero */
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index 69adc80c98cd..67feac9e6ebb 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -6,11 +6,17 @@
/* Legacy tty mutex glue */
+enum {
+ TTY_MUTEX_NORMAL,
+ TTY_MUTEX_NESTED,
+};
+
/*
* Getting the big tty mutex.
*/
-void __lockfunc tty_lock(struct tty_struct *tty)
+static void __lockfunc tty_lock_nested(struct tty_struct *tty,
+ unsigned int subclass)
{
if (tty->magic != TTY_MAGIC) {
printk(KERN_ERR "L Bad %p\n", tty);
@@ -18,7 +24,12 @@ void __lockfunc tty_lock(struct tty_struct *tty)
return;
}
tty_kref_get(tty);
- mutex_lock(&tty->legacy_mutex);
+ mutex_lock_nested(&tty->legacy_mutex, subclass);
+}
+
+void __lockfunc tty_lock(struct tty_struct *tty)
+{
+ return tty_lock_nested(tty, TTY_MUTEX_NORMAL);
}
EXPORT_SYMBOL(tty_lock);
@@ -43,11 +54,11 @@ void __lockfunc tty_lock_pair(struct tty_struct *tty,
{
if (tty < tty2) {
tty_lock(tty);
- tty_lock(tty2);
+ tty_lock_nested(tty2, TTY_MUTEX_NESTED);
} else {
if (tty2 && tty2 != tty)
tty_lock(tty2);
- tty_lock(tty);
+ tty_lock_nested(tty, TTY_MUTEX_NESTED);
}
}
EXPORT_SYMBOL(tty_lock_pair);
diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
index 0cdf89d32a15..104ae9c81251 100644
--- a/drivers/usb/gadget/uvc_queue.c
+++ b/drivers/usb/gadget/uvc_queue.c
@@ -543,7 +543,7 @@ done:
return ret;
}
-/* called with queue->irqlock held.. */
+/* called with &queue_irqlock held.. */
static struct uvc_buffer *
uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf)
{
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
index 54d7ca559cb2..2ca9386d655b 100644
--- a/drivers/usb/gadget/uvc_v4l2.c
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -296,7 +296,7 @@ uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
return -EINVAL;
- return v4l2_event_subscribe(&handle->vfh, arg, 2);
+ return v4l2_event_subscribe(&handle->vfh, arg, 2, NULL);
}
case VIDIOC_UNSUBSCRIBE_EVENT:
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index a797d51ecbe8..c778ffe4e4e5 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -32,7 +32,7 @@
#define ULPI_VIEWPORT_OFFSET 0x170
struct ehci_mxc_priv {
- struct clk *usbclk, *ahbclk, *phy1clk;
+ struct clk *usbclk, *ahbclk, *phyclk;
struct usb_hcd *hcd;
};
@@ -166,31 +166,26 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
}
/* enable clocks */
- priv->usbclk = clk_get(dev, "usb");
+ priv->usbclk = clk_get(dev, "ipg");
if (IS_ERR(priv->usbclk)) {
ret = PTR_ERR(priv->usbclk);
goto err_clk;
}
- clk_enable(priv->usbclk);
+ clk_prepare_enable(priv->usbclk);
- if (!cpu_is_mx35() && !cpu_is_mx25()) {
- priv->ahbclk = clk_get(dev, "usb_ahb");
- if (IS_ERR(priv->ahbclk)) {
- ret = PTR_ERR(priv->ahbclk);
- goto err_clk_ahb;
- }
- clk_enable(priv->ahbclk);
+ priv->ahbclk = clk_get(dev, "ahb");
+ if (IS_ERR(priv->ahbclk)) {
+ ret = PTR_ERR(priv->ahbclk);
+ goto err_clk_ahb;
}
+ clk_prepare_enable(priv->ahbclk);
/* "dr" device has its own clock on i.MX51 */
- if (cpu_is_mx51() && (pdev->id == 0)) {
- priv->phy1clk = clk_get(dev, "usb_phy1");
- if (IS_ERR(priv->phy1clk)) {
- ret = PTR_ERR(priv->phy1clk);
- goto err_clk_phy;
- }
- clk_enable(priv->phy1clk);
- }
+ priv->phyclk = clk_get(dev, "phy");
+ if (IS_ERR(priv->phyclk))
+ priv->phyclk = NULL;
+ if (priv->phyclk)
+ clk_prepare_enable(priv->phyclk);
/* call platform specific init function */
@@ -265,17 +260,15 @@ err_add:
if (pdata && pdata->exit)
pdata->exit(pdev);
err_init:
- if (priv->phy1clk) {
- clk_disable(priv->phy1clk);
- clk_put(priv->phy1clk);
- }
-err_clk_phy:
- if (priv->ahbclk) {
- clk_disable(priv->ahbclk);
- clk_put(priv->ahbclk);
+ if (priv->phyclk) {
+ clk_disable_unprepare(priv->phyclk);
+ clk_put(priv->phyclk);
}
+
+ clk_disable_unprepare(priv->ahbclk);
+ clk_put(priv->ahbclk);
err_clk_ahb:
- clk_disable(priv->usbclk);
+ clk_disable_unprepare(priv->usbclk);
clk_put(priv->usbclk);
err_clk:
iounmap(hcd->regs);
@@ -307,15 +300,14 @@ static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
usb_put_hcd(hcd);
platform_set_drvdata(pdev, NULL);
- clk_disable(priv->usbclk);
+ clk_disable_unprepare(priv->usbclk);
clk_put(priv->usbclk);
- if (priv->ahbclk) {
- clk_disable(priv->ahbclk);
- clk_put(priv->ahbclk);
- }
- if (priv->phy1clk) {
- clk_disable(priv->phy1clk);
- clk_put(priv->phy1clk);
+ clk_disable_unprepare(priv->ahbclk);
+ clk_put(priv->ahbclk);
+
+ if (priv->phyclk) {
+ clk_disable_unprepare(priv->phyclk);
+ clk_put(priv->phyclk);
}
kfree(priv);
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 6c6a5a3b4ea7..82de1073aa52 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mbus.h>
+#include <linux/clk.h>
#include <plat/ehci-orion.h>
#define rdl(off) __raw_readl(hcd->regs + (off))
@@ -198,6 +199,7 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
struct resource *res;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
+ struct clk *clk;
void __iomem *regs;
int irq, err;
@@ -238,6 +240,14 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
goto err2;
}
+ /* Not all platforms can gate the clock, so it is not
+ an error if the clock does not exists. */
+ clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_prepare_enable(clk);
+ clk_put(clk);
+ }
+
hcd = usb_create_hcd(&ehci_orion_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
@@ -301,12 +311,18 @@ err1:
static int __exit ehci_orion_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct clk *clk;
usb_remove_hcd(hcd);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
+ clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ }
return 0;
}
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 4a44bf833611..68548236ec42 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -722,8 +722,9 @@ static int tegra_ehci_probe(struct platform_device *pdev)
}
}
- tegra->phy = tegra_usb_phy_open(instance, hcd->regs, pdata->phy_config,
- TEGRA_USB_PHY_MODE_HOST);
+ tegra->phy = tegra_usb_phy_open(&pdev->dev, instance, hcd->regs,
+ pdata->phy_config,
+ TEGRA_USB_PHY_MODE_HOST);
if (IS_ERR(tegra->phy)) {
dev_err(&pdev->dev, "Failed to open USB phy\n");
err = -ENXIO;
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index af16884491ed..fa2b03750316 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -184,6 +184,18 @@ config BACKLIGHT_GENERIC
known as the Corgi backlight driver. If you have a Sharp Zaurus
SL-C7xx, SL-Cxx00 or SL-6000x say y.
+config BACKLIGHT_LM3533
+ tristate "Backlight Driver for LM3533"
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on MFD_LM3533
+ help
+ Say Y to enable the backlight driver for National Semiconductor / TI
+ LM3533 Lighting Power chips.
+
+ The backlights can be controlled directly, through PWM input, or by
+ the ambient-light-sensor interface. The chip supports 256 brightness
+ levels.
+
config BACKLIGHT_LOCOMO
tristate "Sharp LOCOMO LCD/Backlight Driver"
depends on SHARP_LOCOMO
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 36855ae887d6..a2ac9cfbaf6b 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o
obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o
obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
+obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o
obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 4911ea7989c8..df5db99af23d 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -160,7 +160,7 @@ static ssize_t adp5520_store(struct device *dev, const char *buf,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -214,7 +214,7 @@ static ssize_t adp5520_bl_daylight_max_store(struct device *dev,
struct adp5520_bl *data = dev_get_drvdata(dev);
int ret;
- ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+ ret = kstrtoul(buf, 10, &data->cached_daylight_max);
if (ret < 0)
return ret;
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 550dbf0bb896..77d1fdba597f 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -222,7 +222,8 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
struct led_info *cur_led;
int ret, i;
- led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+ led = devm_kzalloc(&client->dev, sizeof(*led) * pdata->num_leds,
+ GFP_KERNEL);
if (led == NULL) {
dev_err(&client->dev, "failed to alloc memory\n");
return -ENOMEM;
@@ -236,7 +237,7 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
if (ret) {
dev_err(&client->dev, "failed to write\n");
- goto err_free;
+ return ret;
}
for (i = 0; i < pdata->num_leds; ++i) {
@@ -291,9 +292,6 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
cancel_work_sync(&led[i].work);
}
- err_free:
- kfree(led);
-
return ret;
}
@@ -309,7 +307,6 @@ static int __devexit adp8860_led_remove(struct i2c_client *client)
cancel_work_sync(&data->led[i].work);
}
- kfree(data->led);
return 0;
}
#else
@@ -451,7 +448,7 @@ static ssize_t adp8860_store(struct device *dev, const char *buf,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -501,7 +498,7 @@ static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct adp8860_bl *data = dev_get_drvdata(dev);
- int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+ int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
if (ret)
return ret;
@@ -608,7 +605,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev,
uint8_t reg_val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -675,13 +672,13 @@ static int __devinit adp8860_probe(struct i2c_client *client,
return -EINVAL;
}
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
ret = adp8860_read(client, ADP8860_MFDVID, &reg_val);
if (ret < 0)
- goto out2;
+ return ret;
switch (ADP8860_MANID(reg_val)) {
case ADP8863_MANUFID:
@@ -694,8 +691,7 @@ static int __devinit adp8860_probe(struct i2c_client *client,
break;
default:
dev_err(&client->dev, "failed to probe\n");
- ret = -ENODEV;
- goto out2;
+ return -ENODEV;
}
/* It's confirmed that the DEVID field is actually a REVID */
@@ -717,8 +713,7 @@ static int __devinit adp8860_probe(struct i2c_client *client,
&client->dev, data, &adp8860_bl_ops, &props);
if (IS_ERR(bl)) {
dev_err(&client->dev, "failed to register backlight\n");
- ret = PTR_ERR(bl);
- goto out2;
+ return PTR_ERR(bl);
}
bl->props.brightness = ADP8860_MAX_BRIGHTNESS;
@@ -756,8 +751,6 @@ out:
&adp8860_bl_attr_group);
out1:
backlight_device_unregister(bl);
-out2:
- kfree(data);
return ret;
}
@@ -776,7 +769,6 @@ static int __devexit adp8860_remove(struct i2c_client *client)
&adp8860_bl_attr_group);
backlight_device_unregister(data->bl);
- kfree(data);
return 0;
}
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 9be58c6f18f1..edf7f91c8e61 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -244,8 +244,8 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
struct led_info *cur_led;
int ret, i;
-
- led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
+ led = devm_kzalloc(&client->dev, pdata->num_leds * sizeof(*led),
+ GFP_KERNEL);
if (led == NULL) {
dev_err(&client->dev, "failed to alloc memory\n");
return -ENOMEM;
@@ -253,17 +253,17 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law);
if (ret)
- goto err_free;
+ return ret;
ret = adp8870_write(client, ADP8870_ISCT1,
(pdata->led_on_time & 0x3) << 6);
if (ret)
- goto err_free;
+ return ret;
ret = adp8870_write(client, ADP8870_ISCF,
FADE_VAL(pdata->led_fade_in, pdata->led_fade_out));
if (ret)
- goto err_free;
+ return ret;
for (i = 0; i < pdata->num_leds; ++i) {
cur_led = &pdata->leds[i];
@@ -317,9 +317,6 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
cancel_work_sync(&led[i].work);
}
- err_free:
- kfree(led);
-
return ret;
}
@@ -335,7 +332,6 @@ static int __devexit adp8870_led_remove(struct i2c_client *client)
cancel_work_sync(&data->led[i].work);
}
- kfree(data->led);
return 0;
}
#else
@@ -572,7 +568,7 @@ static ssize_t adp8870_store(struct device *dev, const char *buf,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -652,7 +648,7 @@ static ssize_t adp8870_bl_l1_daylight_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct adp8870_bl *data = dev_get_drvdata(dev);
- int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+ int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
if (ret)
return ret;
@@ -794,7 +790,7 @@ static ssize_t adp8870_bl_ambient_light_zone_store(struct device *dev,
uint8_t reg_val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -874,7 +870,7 @@ static int __devinit adp8870_probe(struct i2c_client *client,
return -ENODEV;
}
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -894,8 +890,7 @@ static int __devinit adp8870_probe(struct i2c_client *client,
&client->dev, data, &adp8870_bl_ops, &props);
if (IS_ERR(bl)) {
dev_err(&client->dev, "failed to register backlight\n");
- ret = PTR_ERR(bl);
- goto out2;
+ return PTR_ERR(bl);
}
data->bl = bl;
@@ -930,8 +925,6 @@ out:
&adp8870_bl_attr_group);
out1:
backlight_device_unregister(bl);
-out2:
- kfree(data);
return ret;
}
@@ -950,7 +943,6 @@ static int __devexit adp8870_remove(struct i2c_client *client)
&adp8870_bl_attr_group);
backlight_device_unregister(data->bl);
- kfree(data);
return 0;
}
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index 7bdadc790117..3729238e7096 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -482,7 +482,7 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
struct backlight_device *bd = NULL;
struct backlight_properties props;
- lcd = kzalloc(sizeof(struct ams369fg06), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct ams369fg06), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
@@ -492,7 +492,7 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "spi setup failed.\n");
- goto out_free_lcd;
+ return ret;
}
lcd->spi = spi;
@@ -501,15 +501,13 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL\n");
- goto out_free_lcd;
+ return -EFAULT;
}
ld = lcd_device_register("ams369fg06", &spi->dev, lcd,
&ams369fg06_lcd_ops);
- if (IS_ERR(ld)) {
- ret = PTR_ERR(ld);
- goto out_free_lcd;
- }
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
lcd->ld = ld;
@@ -547,8 +545,6 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
out_lcd_unregister:
lcd_device_unregister(ld);
-out_free_lcd:
- kfree(lcd);
return ret;
}
@@ -559,7 +555,6 @@ static int __devexit ams369fg06_remove(struct spi_device *spi)
ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
- kfree(lcd);
return 0;
}
@@ -619,7 +614,6 @@ static void ams369fg06_shutdown(struct spi_device *spi)
static struct spi_driver ams369fg06_driver = {
.driver = {
.name = "ams369fg06",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ams369fg06_probe,
diff --git a/drivers/video/backlight/apple_bl.c b/drivers/video/backlight/apple_bl.c
index a523b255e124..9dc73ac3709a 100644
--- a/drivers/video/backlight/apple_bl.c
+++ b/drivers/video/backlight/apple_bl.c
@@ -16,6 +16,8 @@
* get at the firmware code in order to figure out what it's actually doing.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -25,6 +27,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/atomic.h>
+#include <linux/apple_bl.h>
static struct backlight_device *apple_backlight_device;
@@ -39,8 +42,6 @@ struct hw_data {
static const struct hw_data *hw_data;
-#define DRIVER "apple_backlight: "
-
/* Module parameters. */
static int debug;
module_param_named(debug, debug, int, 0644);
@@ -60,8 +61,7 @@ static int intel_chipset_send_intensity(struct backlight_device *bd)
int intensity = bd->props.brightness;
if (debug)
- printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
- intensity);
+ pr_debug("setting brightness to %d\n", intensity);
intel_chipset_set_brightness(intensity);
return 0;
@@ -76,8 +76,7 @@ static int intel_chipset_get_intensity(struct backlight_device *bd)
intensity = inb(0xb3) >> 4;
if (debug)
- printk(KERN_DEBUG DRIVER "read brightness of %d\n",
- intensity);
+ pr_debug("read brightness of %d\n", intensity);
return intensity;
}
@@ -107,8 +106,7 @@ static int nvidia_chipset_send_intensity(struct backlight_device *bd)
int intensity = bd->props.brightness;
if (debug)
- printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
- intensity);
+ pr_debug("setting brightness to %d\n", intensity);
nvidia_chipset_set_brightness(intensity);
return 0;
@@ -123,8 +121,7 @@ static int nvidia_chipset_get_intensity(struct backlight_device *bd)
intensity = inb(0x52f) >> 4;
if (debug)
- printk(KERN_DEBUG DRIVER "read brightness of %d\n",
- intensity);
+ pr_debug("read brightness of %d\n", intensity);
return intensity;
}
@@ -149,7 +146,7 @@ static int __devinit apple_bl_add(struct acpi_device *dev)
host = pci_get_bus_and_slot(0, 0);
if (!host) {
- printk(KERN_ERR DRIVER "unable to find PCI host\n");
+ pr_err("unable to find PCI host\n");
return -ENODEV;
}
@@ -161,7 +158,7 @@ static int __devinit apple_bl_add(struct acpi_device *dev)
pci_dev_put(host);
if (!hw_data) {
- printk(KERN_ERR DRIVER "unknown hardware\n");
+ pr_err("unknown hardware\n");
return -ENODEV;
}
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index bf5b1ece7160..297db2fa91f5 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -5,6 +5,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -123,7 +125,7 @@ static ssize_t backlight_store_power(struct device *dev,
rc = -ENXIO;
mutex_lock(&bd->ops_lock);
if (bd->ops) {
- pr_debug("backlight: set power to %lu\n", power);
+ pr_debug("set power to %lu\n", power);
if (bd->props.power != power) {
bd->props.power = power;
backlight_update_status(bd);
@@ -161,8 +163,7 @@ static ssize_t backlight_store_brightness(struct device *dev,
if (brightness > bd->props.max_brightness)
rc = -EINVAL;
else {
- pr_debug("backlight: set brightness to %lu\n",
- brightness);
+ pr_debug("set brightness to %lu\n", brightness);
bd->props.brightness = brightness;
backlight_update_status(bd);
rc = count;
@@ -378,8 +379,8 @@ static int __init backlight_class_init(void)
{
backlight_class = class_create(THIS_MODULE, "backlight");
if (IS_ERR(backlight_class)) {
- printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n",
- PTR_ERR(backlight_class));
+ pr_warn("Unable to create backlight class; errno = %ld\n",
+ PTR_ERR(backlight_class));
return PTR_ERR(backlight_class);
}
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 6dab13fe562e..23d732677ba1 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -544,7 +544,7 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi)
return -EINVAL;
}
- lcd = kzalloc(sizeof(struct corgi_lcd), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct corgi_lcd), GFP_KERNEL);
if (!lcd) {
dev_err(&spi->dev, "failed to allocate memory\n");
return -ENOMEM;
@@ -554,10 +554,9 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi)
lcd->lcd_dev = lcd_device_register("corgi_lcd", &spi->dev,
lcd, &corgi_lcd_ops);
- if (IS_ERR(lcd->lcd_dev)) {
- ret = PTR_ERR(lcd->lcd_dev);
- goto err_free_lcd;
- }
+ if (IS_ERR(lcd->lcd_dev))
+ return PTR_ERR(lcd->lcd_dev);
+
lcd->power = FB_BLANK_POWERDOWN;
lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA;
@@ -591,8 +590,6 @@ err_unregister_bl:
backlight_device_unregister(lcd->bl_dev);
err_unregister_lcd:
lcd_device_unregister(lcd->lcd_dev);
-err_free_lcd:
- kfree(lcd);
return ret;
}
@@ -613,7 +610,6 @@ static int __devexit corgi_lcd_remove(struct spi_device *spi)
corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->lcd_dev);
- kfree(lcd);
return 0;
}
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index 22489eb5f3e0..37bae801e23b 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -27,6 +27,8 @@
* Alan Hourihane <alanh-at-tungstengraphics-dot-com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -180,14 +182,13 @@ static int cr_backlight_probe(struct platform_device *pdev)
lpc_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
CRVML_DEVICE_LPC, NULL);
if (!lpc_dev) {
- printk("INTEL CARILLO RANCH LPC not found.\n");
+ pr_err("INTEL CARILLO RANCH LPC not found.\n");
return -ENODEV;
}
pci_read_config_byte(lpc_dev, CRVML_REG_GPIOEN, &dev_en);
if (!(dev_en & CRVML_GPIOEN_BIT)) {
- printk(KERN_ERR
- "Carillo Ranch GPIO device was not enabled.\n");
+ pr_err("Carillo Ranch GPIO device was not enabled.\n");
pci_dev_put(lpc_dev);
return -ENODEV;
}
@@ -270,7 +271,7 @@ static int __init cr_backlight_init(void)
return PTR_ERR(crp);
}
- printk("Carillo Ranch Backlight Driver Initialized.\n");
+ pr_info("Carillo Ranch Backlight Driver Initialized.\n");
return 0;
}
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 30e19681a30b..573c7ece0fde 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -136,6 +136,7 @@ static int da903x_backlight_probe(struct platform_device *pdev)
da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2,
DA9034_WLED_ISET(pdata->output_current));
+ memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = max_brightness;
bl = backlight_device_register(pdev->name, data->da903x_dev, data,
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 9ce6170c1860..8c660fcd250d 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -106,7 +108,7 @@ static int genericbl_probe(struct platform_device *pdev)
generic_backlight_device = bd;
- printk("Generic Backlight Driver Initialized.\n");
+ pr_info("Generic Backlight Driver Initialized.\n");
return 0;
}
@@ -120,7 +122,7 @@ static int genericbl_remove(struct platform_device *pdev)
backlight_device_unregister(bd);
- printk("Generic Backlight Driver Unloaded\n");
+ pr_info("Generic Backlight Driver Unloaded\n");
return 0;
}
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c
index 5118a9f029ab..6c9399341bcf 100644
--- a/drivers/video/backlight/ili9320.c
+++ b/drivers/video/backlight/ili9320.c
@@ -220,7 +220,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
/* allocate and initialse our state */
- ili = kzalloc(sizeof(struct ili9320), GFP_KERNEL);
+ ili = devm_kzalloc(&spi->dev, sizeof(struct ili9320), GFP_KERNEL);
if (ili == NULL) {
dev_err(dev, "no memory for device\n");
return -ENOMEM;
@@ -240,8 +240,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
lcd = lcd_device_register("ili9320", dev, ili, &ili9320_ops);
if (IS_ERR(lcd)) {
dev_err(dev, "failed to register lcd device\n");
- ret = PTR_ERR(lcd);
- goto err_free;
+ return PTR_ERR(lcd);
}
ili->lcd = lcd;
@@ -259,9 +258,6 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
err_unregister:
lcd_device_unregister(lcd);
- err_free:
- kfree(ili);
-
return ret;
}
@@ -272,7 +268,6 @@ int __devexit ili9320_remove(struct ili9320 *ili)
ili9320_power(ili, FB_BLANK_POWERDOWN);
lcd_device_unregister(ili->lcd);
- kfree(ili);
return 0;
}
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 2f8af5d786ab..16f593b64427 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/backlight.h>
#include <linux/device.h>
#include <linux/fb.h>
@@ -38,7 +40,7 @@ static int jornada_bl_get_brightness(struct backlight_device *bd)
ret = jornada_ssp_byte(GETBRIGHTNESS);
if (jornada_ssp_byte(GETBRIGHTNESS) != TXDUMMY) {
- printk(KERN_ERR "bl : get brightness timeout\n");
+ pr_err("get brightness timeout\n");
jornada_ssp_end();
return -ETIMEDOUT;
} else /* exchange txdummy for value */
@@ -59,7 +61,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
if ((bd->props.power != FB_BLANK_UNBLANK) || (bd->props.fb_blank != FB_BLANK_UNBLANK)) {
ret = jornada_ssp_byte(BRIGHTNESSOFF);
if (ret != TXDUMMY) {
- printk(KERN_INFO "bl : brightness off timeout\n");
+ pr_info("brightness off timeout\n");
/* turn off backlight */
PPSR &= ~PPC_LDD1;
PPDR |= PPC_LDD1;
@@ -70,7 +72,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
/* send command to our mcu */
if (jornada_ssp_byte(SETBRIGHTNESS) != TXDUMMY) {
- printk(KERN_INFO "bl : failed to set brightness\n");
+ pr_info("failed to set brightness\n");
ret = -ETIMEDOUT;
goto out;
}
@@ -81,7 +83,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
but due to physical layout it is equal to 0, so we simply
invert the value (MAX VALUE - NEW VALUE). */
if (jornada_ssp_byte(BL_MAX_BRIGHT - bd->props.brightness) != TXDUMMY) {
- printk(KERN_ERR "bl : set brightness failed\n");
+ pr_err("set brightness failed\n");
ret = -ETIMEDOUT;
}
@@ -113,7 +115,7 @@ static int jornada_bl_probe(struct platform_device *pdev)
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
- printk(KERN_ERR "bl : failed to register device, err=%x\n", ret);
+ pr_err("failed to register device, err=%x\n", ret);
return ret;
}
@@ -125,7 +127,7 @@ static int jornada_bl_probe(struct platform_device *pdev)
jornada_bl_update_status(bd);
platform_set_drvdata(pdev, bd);
- printk(KERN_INFO "HP Jornada 700 series backlight driver\n");
+ pr_info("HP Jornada 700 series backlight driver\n");
return 0;
}
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c
index 22d231a17e3c..635b30523fd5 100644
--- a/drivers/video/backlight/jornada720_lcd.c
+++ b/drivers/video/backlight/jornada720_lcd.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/device.h>
#include <linux/fb.h>
#include <linux/kernel.h>
@@ -44,7 +46,7 @@ static int jornada_lcd_get_contrast(struct lcd_device *dev)
jornada_ssp_start();
if (jornada_ssp_byte(GETCONTRAST) != TXDUMMY) {
- printk(KERN_ERR "lcd: get contrast failed\n");
+ pr_err("get contrast failed\n");
jornada_ssp_end();
return -ETIMEDOUT;
} else {
@@ -65,7 +67,7 @@ static int jornada_lcd_set_contrast(struct lcd_device *dev, int value)
/* push the new value */
if (jornada_ssp_byte(value) != TXDUMMY) {
- printk(KERN_ERR "lcd : set contrast failed\n");
+ pr_err("set contrast failed\n");
jornada_ssp_end();
return -ETIMEDOUT;
}
@@ -103,7 +105,7 @@ static int jornada_lcd_probe(struct platform_device *pdev)
if (IS_ERR(lcd_device)) {
ret = PTR_ERR(lcd_device);
- printk(KERN_ERR "lcd : failed to register device\n");
+ pr_err("failed to register device\n");
return ret;
}
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 6022b67285ec..40f606a86093 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/delay.h>
@@ -159,7 +161,8 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
return -EINVAL;
}
- priv = kzalloc(sizeof(struct l4f00242t03_priv), GFP_KERNEL);
+ priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv),
+ GFP_KERNEL);
if (priv == NULL) {
dev_err(&spi->dev, "No memory for this device.\n");
@@ -177,7 +180,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 reset gpio.\n");
- goto err;
+ return ret;
}
ret = gpio_request_one(pdata->data_enable_gpio, GPIOF_OUT_INIT_LOW,
@@ -185,7 +188,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 data en gpio.\n");
- goto err2;
+ goto err;
}
priv->io_reg = regulator_get(&spi->dev, "vdd");
@@ -193,7 +196,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
ret = PTR_ERR(priv->io_reg);
dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
__func__);
- goto err3;
+ goto err2;
}
priv->core_reg = regulator_get(&spi->dev, "vcore");
@@ -201,14 +204,14 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
ret = PTR_ERR(priv->core_reg);
dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
__func__);
- goto err4;
+ goto err3;
}
priv->ld = lcd_device_register("l4f00242t03",
&spi->dev, priv, &l4f_ops);
if (IS_ERR(priv->ld)) {
ret = PTR_ERR(priv->ld);
- goto err5;
+ goto err4;
}
/* Init the LCD */
@@ -220,16 +223,14 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
return 0;
-err5:
- regulator_put(priv->core_reg);
err4:
- regulator_put(priv->io_reg);
+ regulator_put(priv->core_reg);
err3:
- gpio_free(pdata->data_enable_gpio);
+ regulator_put(priv->io_reg);
err2:
- gpio_free(pdata->reset_gpio);
+ gpio_free(pdata->data_enable_gpio);
err:
- kfree(priv);
+ gpio_free(pdata->reset_gpio);
return ret;
}
@@ -250,8 +251,6 @@ static int __devexit l4f00242t03_remove(struct spi_device *spi)
regulator_put(priv->io_reg);
regulator_put(priv->core_reg);
- kfree(priv);
-
return 0;
}
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 79c1b0d609a8..a5d0d024bb92 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -5,6 +5,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -32,6 +34,8 @@ static int fb_notifier_callback(struct notifier_block *self,
case FB_EVENT_BLANK:
case FB_EVENT_MODE_CHANGE:
case FB_EVENT_MODE_CHANGE_ALL:
+ case FB_EARLY_EVENT_BLANK:
+ case FB_R_EARLY_EVENT_BLANK:
break;
default:
return 0;
@@ -46,6 +50,14 @@ static int fb_notifier_callback(struct notifier_block *self,
if (event == FB_EVENT_BLANK) {
if (ld->ops->set_power)
ld->ops->set_power(ld, *(int *)evdata->data);
+ } else if (event == FB_EARLY_EVENT_BLANK) {
+ if (ld->ops->early_set_power)
+ ld->ops->early_set_power(ld,
+ *(int *)evdata->data);
+ } else if (event == FB_R_EARLY_EVENT_BLANK) {
+ if (ld->ops->r_early_set_power)
+ ld->ops->r_early_set_power(ld,
+ *(int *)evdata->data);
} else {
if (ld->ops->set_mode)
ld->ops->set_mode(ld, evdata->data);
@@ -106,7 +118,7 @@ static ssize_t lcd_store_power(struct device *dev,
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_power) {
- pr_debug("lcd: set power to %lu\n", power);
+ pr_debug("set power to %lu\n", power);
ld->ops->set_power(ld, power);
rc = count;
}
@@ -142,7 +154,7 @@ static ssize_t lcd_store_contrast(struct device *dev,
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_contrast) {
- pr_debug("lcd: set contrast to %lu\n", contrast);
+ pr_debug("set contrast to %lu\n", contrast);
ld->ops->set_contrast(ld, contrast);
rc = count;
}
@@ -253,8 +265,8 @@ static int __init lcd_class_init(void)
{
lcd_class = class_create(THIS_MODULE, "lcd");
if (IS_ERR(lcd_class)) {
- printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n",
- PTR_ERR(lcd_class));
+ pr_warn("Unable to create backlight class; errno = %ld\n",
+ PTR_ERR(lcd_class));
return PTR_ERR(lcd_class);
}
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index efd352be21ae..58f517fb7d40 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -707,7 +707,7 @@ static int ld9040_probe(struct spi_device *spi)
struct backlight_device *bd = NULL;
struct backlight_properties props;
- lcd = kzalloc(sizeof(struct ld9040), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct ld9040), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
@@ -717,7 +717,7 @@ static int ld9040_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "spi setup failed.\n");
- goto out_free_lcd;
+ return ret;
}
lcd->spi = spi;
@@ -726,7 +726,7 @@ static int ld9040_probe(struct spi_device *spi)
lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL.\n");
- goto out_free_lcd;
+ return -EFAULT;
}
mutex_init(&lcd->lock);
@@ -734,13 +734,13 @@ static int ld9040_probe(struct spi_device *spi)
ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
if (ret) {
dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
- goto out_free_lcd;
+ return ret;
}
ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
- goto out_free_lcd;
+ goto out_free_regulator;
}
lcd->ld = ld;
@@ -782,10 +782,9 @@ static int ld9040_probe(struct spi_device *spi)
out_unregister_lcd:
lcd_device_unregister(lcd->ld);
-out_free_lcd:
+out_free_regulator:
regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
- kfree(lcd);
return ret;
}
@@ -797,7 +796,6 @@ static int __devexit ld9040_remove(struct spi_device *spi)
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
- kfree(lcd);
return 0;
}
@@ -846,7 +844,6 @@ static void ld9040_shutdown(struct spi_device *spi)
static struct spi_driver ld9040_driver = {
.driver = {
.name = "ld9040",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ld9040_probe,
diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c
new file mode 100644
index 000000000000..bebeb63607db
--- /dev/null
+++ b/drivers/video/backlight/lm3533_bl.c
@@ -0,0 +1,423 @@
+/*
+ * lm3533-bl.c -- LM3533 Backlight driver
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_HVCTRLBANK_COUNT 2
+#define LM3533_BL_MAX_BRIGHTNESS 255
+
+#define LM3533_REG_CTRLBANK_AB_BCONF 0x1a
+
+
+struct lm3533_bl {
+ struct lm3533 *lm3533;
+ struct lm3533_ctrlbank cb;
+ struct backlight_device *bd;
+ int id;
+};
+
+
+static inline int lm3533_bl_get_ctrlbank_id(struct lm3533_bl *bl)
+{
+ return bl->id;
+}
+
+static int lm3533_bl_update_status(struct backlight_device *bd)
+{
+ struct lm3533_bl *bl = bl_get_data(bd);
+ int brightness = bd->props.brightness;
+
+ if (bd->props.power != FB_BLANK_UNBLANK)
+ brightness = 0;
+ if (bd->props.fb_blank != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ return lm3533_ctrlbank_set_brightness(&bl->cb, (u8)brightness);
+}
+
+static int lm3533_bl_get_brightness(struct backlight_device *bd)
+{
+ struct lm3533_bl *bl = bl_get_data(bd);
+ u8 val;
+ int ret;
+
+ ret = lm3533_ctrlbank_get_brightness(&bl->cb, &val);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static const struct backlight_ops lm3533_bl_ops = {
+ .get_brightness = lm3533_bl_get_brightness,
+ .update_status = lm3533_bl_update_status,
+};
+
+static ssize_t show_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", bl->id);
+}
+
+static ssize_t show_als_channel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ unsigned channel = lm3533_bl_get_ctrlbank_id(bl);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", channel);
+}
+
+static ssize_t show_als_en(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ int ctrlbank = lm3533_bl_get_ctrlbank_id(bl);
+ u8 val;
+ u8 mask;
+ bool enable;
+ int ret;
+
+ ret = lm3533_read(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, &val);
+ if (ret)
+ return ret;
+
+ mask = 1 << (2 * ctrlbank);
+ enable = val & mask;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", enable);
+}
+
+static ssize_t store_als_en(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ int ctrlbank = lm3533_bl_get_ctrlbank_id(bl);
+ int enable;
+ u8 val;
+ u8 mask;
+ int ret;
+
+ if (kstrtoint(buf, 0, &enable))
+ return -EINVAL;
+
+ mask = 1 << (2 * ctrlbank);
+
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, val,
+ mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_linear(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ u8 val;
+ u8 mask;
+ int linear;
+ int ret;
+
+ ret = lm3533_read(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, &val);
+ if (ret)
+ return ret;
+
+ mask = 1 << (2 * lm3533_bl_get_ctrlbank_id(bl) + 1);
+
+ if (val & mask)
+ linear = 1;
+ else
+ linear = 0;
+
+ return scnprintf(buf, PAGE_SIZE, "%x\n", linear);
+}
+
+static ssize_t store_linear(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ unsigned long linear;
+ u8 mask;
+ u8 val;
+ int ret;
+
+ if (kstrtoul(buf, 0, &linear))
+ return -EINVAL;
+
+ mask = 1 << (2 * lm3533_bl_get_ctrlbank_id(bl) + 1);
+
+ if (linear)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, val,
+ mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_pwm(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ u8 val;
+ int ret;
+
+ ret = lm3533_ctrlbank_get_pwm(&bl->cb, &val);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ u8 val;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val))
+ return -EINVAL;
+
+ ret = lm3533_ctrlbank_set_pwm(&bl->cb, val);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static LM3533_ATTR_RO(als_channel);
+static LM3533_ATTR_RW(als_en);
+static LM3533_ATTR_RO(id);
+static LM3533_ATTR_RW(linear);
+static LM3533_ATTR_RW(pwm);
+
+static struct attribute *lm3533_bl_attributes[] = {
+ &dev_attr_als_channel.attr,
+ &dev_attr_als_en.attr,
+ &dev_attr_id.attr,
+ &dev_attr_linear.attr,
+ &dev_attr_pwm.attr,
+ NULL,
+};
+
+static umode_t lm3533_bl_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ umode_t mode = attr->mode;
+
+ if (attr == &dev_attr_als_channel.attr ||
+ attr == &dev_attr_als_en.attr) {
+ if (!bl->lm3533->have_als)
+ mode = 0;
+ }
+
+ return mode;
+};
+
+static struct attribute_group lm3533_bl_attribute_group = {
+ .is_visible = lm3533_bl_attr_is_visible,
+ .attrs = lm3533_bl_attributes
+};
+
+static int __devinit lm3533_bl_setup(struct lm3533_bl *bl,
+ struct lm3533_bl_platform_data *pdata)
+{
+ int ret;
+
+ ret = lm3533_ctrlbank_set_max_current(&bl->cb, pdata->max_current);
+ if (ret)
+ return ret;
+
+ return lm3533_ctrlbank_set_pwm(&bl->cb, pdata->pwm);
+}
+
+static int __devinit lm3533_bl_probe(struct platform_device *pdev)
+{
+ struct lm3533 *lm3533;
+ struct lm3533_bl_platform_data *pdata;
+ struct lm3533_bl *bl;
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ int ret;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533 = dev_get_drvdata(pdev->dev.parent);
+ if (!lm3533)
+ return -EINVAL;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ if (pdev->id < 0 || pdev->id >= LM3533_HVCTRLBANK_COUNT) {
+ dev_err(&pdev->dev, "illegal backlight id %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ bl = kzalloc(sizeof(*bl), GFP_KERNEL);
+ if (!bl) {
+ dev_err(&pdev->dev,
+ "failed to allocate memory for backlight\n");
+ return -ENOMEM;
+ }
+
+ bl->lm3533 = lm3533;
+ bl->id = pdev->id;
+
+ bl->cb.lm3533 = lm3533;
+ bl->cb.id = lm3533_bl_get_ctrlbank_id(bl);
+ bl->cb.dev = NULL; /* until registered */
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = LM3533_BL_MAX_BRIGHTNESS;
+ props.brightness = pdata->default_brightness;
+ bd = backlight_device_register(pdata->name, pdev->dev.parent, bl,
+ &lm3533_bl_ops, &props);
+ if (IS_ERR(bd)) {
+ dev_err(&pdev->dev, "failed to register backlight device\n");
+ ret = PTR_ERR(bd);
+ goto err_free;
+ }
+
+ bl->bd = bd;
+ bl->cb.dev = &bl->bd->dev;
+
+ platform_set_drvdata(pdev, bl);
+
+ ret = sysfs_create_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to create sysfs attributes\n");
+ goto err_unregister;
+ }
+
+ backlight_update_status(bd);
+
+ ret = lm3533_bl_setup(bl, pdata);
+ if (ret)
+ goto err_sysfs_remove;
+
+ ret = lm3533_ctrlbank_enable(&bl->cb);
+ if (ret)
+ goto err_sysfs_remove;
+
+ return 0;
+
+err_sysfs_remove:
+ sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+err_unregister:
+ backlight_device_unregister(bd);
+err_free:
+ kfree(bl);
+
+ return ret;
+}
+
+static int __devexit lm3533_bl_remove(struct platform_device *pdev)
+{
+ struct lm3533_bl *bl = platform_get_drvdata(pdev);
+ struct backlight_device *bd = bl->bd;
+
+ dev_dbg(&bd->dev, "%s\n", __func__);
+
+ bd->props.power = FB_BLANK_POWERDOWN;
+ bd->props.brightness = 0;
+
+ lm3533_ctrlbank_disable(&bl->cb);
+ sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+ backlight_device_unregister(bd);
+ kfree(bl);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lm3533_bl_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ return lm3533_ctrlbank_disable(&bl->cb);
+}
+
+static int lm3533_bl_resume(struct platform_device *pdev)
+{
+ struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ return lm3533_ctrlbank_enable(&bl->cb);
+}
+#else
+#define lm3533_bl_suspend NULL
+#define lm3533_bl_resume NULL
+#endif
+
+static void lm3533_bl_shutdown(struct platform_device *pdev)
+{
+ struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533_ctrlbank_disable(&bl->cb);
+}
+
+static struct platform_driver lm3533_bl_driver = {
+ .driver = {
+ .name = "lm3533-backlight",
+ .owner = THIS_MODULE,
+ },
+ .probe = lm3533_bl_probe,
+ .remove = __devexit_p(lm3533_bl_remove),
+ .shutdown = lm3533_bl_shutdown,
+ .suspend = lm3533_bl_suspend,
+ .resume = lm3533_bl_resume,
+};
+module_platform_driver(lm3533_bl_driver);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Backlight driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lm3533-backlight");
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index 4161f9e3982a..a9f2c36966f1 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -168,7 +168,8 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
goto err;
}
- st = kzalloc(sizeof(struct lms283gf05_state), GFP_KERNEL);
+ st = devm_kzalloc(&spi->dev, sizeof(struct lms283gf05_state),
+ GFP_KERNEL);
if (st == NULL) {
dev_err(&spi->dev, "No memory for device state\n");
ret = -ENOMEM;
@@ -178,7 +179,7 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
- goto err2;
+ goto err;
}
st->spi = spi;
@@ -193,8 +194,6 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
return 0;
-err2:
- kfree(st);
err:
if (pdata != NULL)
gpio_free(pdata->reset_gpio);
@@ -212,8 +211,6 @@ static int __devexit lms283gf05_remove(struct spi_device *spi)
if (pdata != NULL)
gpio_free(pdata->reset_gpio);
- kfree(st);
-
return 0;
}
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 333949ff3265..6c0f1ac0d32a 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -232,23 +232,20 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
struct lcd_device *ld;
int ret;
- lcd = kzalloc(sizeof(struct ltv350qv), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct ltv350qv), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
lcd->spi = spi;
lcd->power = FB_BLANK_POWERDOWN;
- lcd->buffer = kzalloc(8, GFP_KERNEL);
- if (!lcd->buffer) {
- ret = -ENOMEM;
- goto out_free_lcd;
- }
+ lcd->buffer = devm_kzalloc(&spi->dev, 8, GFP_KERNEL);
+ if (!lcd->buffer)
+ return -ENOMEM;
ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
- if (IS_ERR(ld)) {
- ret = PTR_ERR(ld);
- goto out_free_buffer;
- }
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
+
lcd->ld = ld;
ret = ltv350qv_power(lcd, FB_BLANK_UNBLANK);
@@ -261,10 +258,6 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
out_unregister:
lcd_device_unregister(ld);
-out_free_buffer:
- kfree(lcd->buffer);
-out_free_lcd:
- kfree(lcd);
return ret;
}
@@ -274,8 +267,6 @@ static int __devexit ltv350qv_remove(struct spi_device *spi)
ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->ld);
- kfree(lcd->buffer);
- kfree(lcd);
return 0;
}
@@ -310,7 +301,6 @@ static void ltv350qv_shutdown(struct spi_device *spi)
static struct spi_driver ltv350qv_driver = {
.driver = {
.name = "ltv350qv",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 0175bfb08a1c..bfdc5fbeaa11 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -168,7 +170,7 @@ static int omapbl_probe(struct platform_device *pdev)
dev->props.brightness = pdata->default_intensity;
omapbl_update_status(dev);
- printk(KERN_INFO "OMAP LCD backlight initialised\n");
+ pr_info("OMAP LCD backlight initialised\n");
return 0;
}
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index c65853cb9740..c092159f4383 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -111,6 +111,7 @@ static int __devinit pcf50633_bl_probe(struct platform_device *pdev)
if (!pcf_bl)
return -ENOMEM;
+ memset(&bl_props, 0, sizeof(bl_props));
bl_props.type = BACKLIGHT_RAW;
bl_props.max_brightness = 0x3f;
bl_props.power = FB_BLANK_UNBLANK;
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 6af183d6465e..69b35f02929e 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -15,6 +15,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -68,13 +70,13 @@ static int progearbl_probe(struct platform_device *pdev)
pmu_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL);
if (!pmu_dev) {
- printk("ALI M7101 PMU not found.\n");
+ pr_err("ALI M7101 PMU not found.\n");
return -ENODEV;
}
sb_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
if (!sb_dev) {
- printk("ALI 1533 SB not found.\n");
+ pr_err("ALI 1533 SB not found.\n");
ret = -ENODEV;
goto put_pmu;
}
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index e264f55b2574..6437ae474cf2 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -741,7 +741,7 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
struct backlight_device *bd = NULL;
struct backlight_properties props;
- lcd = kzalloc(sizeof(struct s6e63m0), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct s6e63m0), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
@@ -751,7 +751,7 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "spi setup failed.\n");
- goto out_free_lcd;
+ return ret;
}
lcd->spi = spi;
@@ -760,14 +760,12 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
lcd->lcd_pd = (struct lcd_platform_data *)spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL.\n");
- goto out_free_lcd;
+ return -EFAULT;
}
ld = lcd_device_register("s6e63m0", &spi->dev, lcd, &s6e63m0_lcd_ops);
- if (IS_ERR(ld)) {
- ret = PTR_ERR(ld);
- goto out_free_lcd;
- }
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
lcd->ld = ld;
@@ -824,8 +822,6 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
out_lcd_unregister:
lcd_device_unregister(ld);
-out_free_lcd:
- kfree(lcd);
return ret;
}
@@ -838,7 +834,6 @@ static int __devexit s6e63m0_remove(struct spi_device *spi)
device_remove_file(&spi->dev, &dev_attr_gamma_mode);
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
- kfree(lcd);
return 0;
}
@@ -899,7 +894,6 @@ static void s6e63m0_shutdown(struct spi_device *spi)
static struct spi_driver s6e63m0_driver = {
.driver = {
.name = "s6e63m0",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = s6e63m0_probe,
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 2368b8e5f89e..02444d042cd5 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -349,7 +349,7 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
if (err)
return err;
- lcd = kzalloc(sizeof(struct tdo24m), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct tdo24m), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
@@ -357,11 +357,9 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
lcd->power = FB_BLANK_POWERDOWN;
lcd->mode = MODE_VGA; /* default to VGA */
- lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
- if (lcd->buf == NULL) {
- kfree(lcd);
+ lcd->buf = devm_kzalloc(&spi->dev, TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
+ if (lcd->buf == NULL)
return -ENOMEM;
- }
m = &lcd->msg;
x = &lcd->xfer;
@@ -383,15 +381,13 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
break;
default:
dev_err(&spi->dev, "Unsupported model");
- goto out_free;
+ return -EINVAL;
}
lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev,
lcd, &tdo24m_ops);
- if (IS_ERR(lcd->lcd_dev)) {
- err = PTR_ERR(lcd->lcd_dev);
- goto out_free;
- }
+ if (IS_ERR(lcd->lcd_dev))
+ return PTR_ERR(lcd->lcd_dev);
dev_set_drvdata(&spi->dev, lcd);
err = tdo24m_power(lcd, FB_BLANK_UNBLANK);
@@ -402,9 +398,6 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
out_unregister:
lcd_device_unregister(lcd->lcd_dev);
-out_free:
- kfree(lcd->buf);
- kfree(lcd);
return err;
}
@@ -414,8 +407,6 @@ static int __devexit tdo24m_remove(struct spi_device *spi)
tdo24m_power(lcd, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->lcd_dev);
- kfree(lcd->buf);
- kfree(lcd);
return 0;
}
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 2b241abced43..0d54e607e82d 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -82,8 +82,11 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct backlight_properties props;
- struct tosa_bl_data *data = kzalloc(sizeof(struct tosa_bl_data), GFP_KERNEL);
+ struct tosa_bl_data *data;
int ret = 0;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct tosa_bl_data),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -92,7 +95,7 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
ret = gpio_request(TOSA_GPIO_BL_C20MA, "backlight");
if (ret) {
dev_dbg(&data->bl->dev, "Unable to request gpio!\n");
- goto err_gpio_bl;
+ return ret;
}
ret = gpio_direction_output(TOSA_GPIO_BL_C20MA, 0);
if (ret)
@@ -122,8 +125,6 @@ err_reg:
data->bl = NULL;
err_gpio_dir:
gpio_free(TOSA_GPIO_BL_C20MA);
-err_gpio_bl:
- kfree(data);
return ret;
}
@@ -136,8 +137,6 @@ static int __devexit tosa_bl_remove(struct i2c_client *client)
gpio_free(TOSA_GPIO_BL_C20MA);
- kfree(data);
-
return 0;
}
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 2231aec23918..47823b8efff0 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -174,7 +174,8 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
int ret;
struct tosa_lcd_data *data;
- data = kzalloc(sizeof(struct tosa_lcd_data), GFP_KERNEL);
+ data = devm_kzalloc(&spi->dev, sizeof(struct tosa_lcd_data),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -187,7 +188,7 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0)
- goto err_spi;
+ return ret;
data->spi = spi;
dev_set_drvdata(&spi->dev, data);
@@ -224,8 +225,6 @@ err_gpio_dir:
gpio_free(TOSA_GPIO_TG_ON);
err_gpio_tg:
dev_set_drvdata(&spi->dev, NULL);
-err_spi:
- kfree(data);
return ret;
}
@@ -242,7 +241,6 @@ static int __devexit tosa_lcd_remove(struct spi_device *spi)
gpio_free(TOSA_GPIO_TG_ON);
dev_set_drvdata(&spi->dev, NULL);
- kfree(data);
return 0;
}
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index 5d365deb5f82..9e5517a3a52b 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -194,6 +194,7 @@ static int wm831x_backlight_probe(struct platform_device *pdev)
data->current_brightness = 0;
data->isink_reg = isink_reg;
+ memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = max_isel;
bl = backlight_device_register("wm831x", &pdev->dev, data,
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 784139aed079..b4a632ada401 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -18,6 +18,8 @@
static bool request_mem_succeeded = false;
+static struct pci_dev *default_vga;
+
static struct fb_var_screeninfo efifb_defined __devinitdata = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
@@ -298,35 +300,72 @@ static struct fb_ops efifb_ops = {
.fb_imageblit = cfb_imageblit,
};
+struct pci_dev *vga_default_device(void)
+{
+ return default_vga;
+}
+
+EXPORT_SYMBOL_GPL(vga_default_device);
+
+void vga_set_default_device(struct pci_dev *pdev)
+{
+ default_vga = pdev;
+}
+
static int __init efifb_setup(char *options)
{
char *this_opt;
int i;
+ struct pci_dev *dev = NULL;
+
+ if (options && *options) {
+ while ((this_opt = strsep(&options, ",")) != NULL) {
+ if (!*this_opt) continue;
+
+ for (i = 0; i < M_UNKNOWN; i++) {
+ if (!strcmp(this_opt, dmi_list[i].optname) &&
+ dmi_list[i].base != 0) {
+ screen_info.lfb_base = dmi_list[i].base;
+ screen_info.lfb_linelength = dmi_list[i].stride;
+ screen_info.lfb_width = dmi_list[i].width;
+ screen_info.lfb_height = dmi_list[i].height;
+ }
+ }
+ if (!strncmp(this_opt, "base:", 5))
+ screen_info.lfb_base = simple_strtoul(this_opt+5, NULL, 0);
+ else if (!strncmp(this_opt, "stride:", 7))
+ screen_info.lfb_linelength = simple_strtoul(this_opt+7, NULL, 0) * 4;
+ else if (!strncmp(this_opt, "height:", 7))
+ screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
+ else if (!strncmp(this_opt, "width:", 6))
+ screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
+ }
+ }
- if (!options || !*options)
- return 0;
+ for_each_pci_dev(dev) {
+ int i;
- while ((this_opt = strsep(&options, ",")) != NULL) {
- if (!*this_opt) continue;
+ if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ continue;
- for (i = 0; i < M_UNKNOWN; i++) {
- if (!strcmp(this_opt, dmi_list[i].optname) &&
- dmi_list[i].base != 0) {
- screen_info.lfb_base = dmi_list[i].base;
- screen_info.lfb_linelength = dmi_list[i].stride;
- screen_info.lfb_width = dmi_list[i].width;
- screen_info.lfb_height = dmi_list[i].height;
- }
+ for (i=0; i < DEVICE_COUNT_RESOURCE; i++) {
+ resource_size_t start, end;
+
+ if (!(pci_resource_flags(dev, i) & IORESOURCE_MEM))
+ continue;
+
+ start = pci_resource_start(dev, i);
+ end = pci_resource_end(dev, i);
+
+ if (!start || !end)
+ continue;
+
+ if (screen_info.lfb_base >= start &&
+ (screen_info.lfb_base + screen_info.lfb_size) < end)
+ default_vga = dev;
}
- if (!strncmp(this_opt, "base:", 5))
- screen_info.lfb_base = simple_strtoul(this_opt+5, NULL, 0);
- else if (!strncmp(this_opt, "stride:", 7))
- screen_info.lfb_linelength = simple_strtoul(this_opt+7, NULL, 0) * 4;
- else if (!strncmp(this_opt, "height:", 7))
- screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
- else if (!strncmp(this_opt, "width:", 6))
- screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
}
+
return 0;
}
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index c6ce416ab587..0dff12a1daef 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1046,20 +1046,29 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
int
fb_blank(struct fb_info *info, int blank)
{
- int ret = -EINVAL;
+ struct fb_event event;
+ int ret = -EINVAL, early_ret;
if (blank > FB_BLANK_POWERDOWN)
blank = FB_BLANK_POWERDOWN;
+ event.info = info;
+ event.data = &blank;
+
+ early_ret = fb_notifier_call_chain(FB_EARLY_EVENT_BLANK, &event);
+
if (info->fbops->fb_blank)
ret = info->fbops->fb_blank(blank, info);
- if (!ret) {
- struct fb_event event;
-
- event.info = info;
- event.data = &blank;
+ if (!ret)
fb_notifier_call_chain(FB_EVENT_BLANK, &event);
+ else {
+ /*
+ * if fb_blank is failed then revert effects of
+ * the early blank event.
+ */
+ if (!early_ret)
+ fb_notifier_call_chain(FB_R_EARLY_EVENT_BLANK, &event);
}
return ret;
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index f135dbead07d..caad3689b4e6 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -131,7 +131,9 @@ struct imxfb_rgb {
struct imxfb_info {
struct platform_device *pdev;
void __iomem *regs;
- struct clk *clk;
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+ struct clk *clk_per;
/*
* These are the addresses we mapped
@@ -340,7 +342,7 @@ static int imxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
pr_debug("var->bits_per_pixel=%d\n", var->bits_per_pixel);
- lcd_clk = clk_get_rate(fbi->clk);
+ lcd_clk = clk_get_rate(fbi->clk_per);
tmp = var->pixclock * (unsigned long long)lcd_clk;
@@ -455,11 +457,17 @@ static int imxfb_bl_update_status(struct backlight_device *bl)
fbi->pwmr = (fbi->pwmr & ~0xFF) | brightness;
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- clk_enable(fbi->clk);
+ if (bl->props.fb_blank != FB_BLANK_UNBLANK) {
+ clk_prepare_enable(fbi->clk_ipg);
+ clk_prepare_enable(fbi->clk_ahb);
+ clk_prepare_enable(fbi->clk_per);
+ }
writel(fbi->pwmr, fbi->regs + LCDC_PWMR);
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- clk_disable(fbi->clk);
+ if (bl->props.fb_blank != FB_BLANK_UNBLANK) {
+ clk_disable_unprepare(fbi->clk_per);
+ clk_disable_unprepare(fbi->clk_ahb);
+ clk_disable_unprepare(fbi->clk_ipg);
+ }
return 0;
}
@@ -522,7 +530,9 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
*/
writel(RMCR_LCDC_EN_MX1, fbi->regs + LCDC_RMCR);
- clk_enable(fbi->clk);
+ clk_prepare_enable(fbi->clk_ipg);
+ clk_prepare_enable(fbi->clk_ahb);
+ clk_prepare_enable(fbi->clk_per);
if (fbi->backlight_power)
fbi->backlight_power(1);
@@ -539,7 +549,9 @@ static void imxfb_disable_controller(struct imxfb_info *fbi)
if (fbi->lcd_power)
fbi->lcd_power(0);
- clk_disable(fbi->clk);
+ clk_disable_unprepare(fbi->clk_per);
+ clk_disable_unprepare(fbi->clk_ipg);
+ clk_disable_unprepare(fbi->clk_ahb);
writel(0, fbi->regs + LCDC_RMCR);
}
@@ -770,10 +782,21 @@ static int __init imxfb_probe(struct platform_device *pdev)
goto failed_req;
}
- fbi->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(fbi->clk)) {
- ret = PTR_ERR(fbi->clk);
- dev_err(&pdev->dev, "unable to get clock: %d\n", ret);
+ fbi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(fbi->clk_ipg)) {
+ ret = PTR_ERR(fbi->clk_ipg);
+ goto failed_getclock;
+ }
+
+ fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(fbi->clk_ahb)) {
+ ret = PTR_ERR(fbi->clk_ahb);
+ goto failed_getclock;
+ }
+
+ fbi->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(fbi->clk_per)) {
+ ret = PTR_ERR(fbi->clk_per);
goto failed_getclock;
}
@@ -858,7 +881,6 @@ failed_platform_init:
failed_map:
iounmap(fbi->regs);
failed_ioremap:
- clk_put(fbi->clk);
failed_getclock:
release_mem_region(res->start, resource_size(res));
failed_req:
@@ -895,8 +917,6 @@ static int __devexit imxfb_remove(struct platform_device *pdev)
iounmap(fbi->regs);
release_mem_region(res->start, resource_size(res));
- clk_disable(fbi->clk);
- clk_put(fbi->clk);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 31b8f67477b7..217678e0b983 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -1243,6 +1243,7 @@ static int maven_probe(struct i2c_client *client,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_WORD_DATA |
I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_NOSTART |
I2C_FUNC_PROTOCOL_MANGLING))
goto ERROR0;
if (!(data = kzalloc(sizeof(*data), GFP_KERNEL))) {
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index d26f37ac69d8..74e7cf078505 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -532,6 +532,7 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
/*------- Backlight control --------*/
+ memset(&props, 0, sizeof(props));
props.fb_blank = FB_BLANK_UNBLANK;
props.power = FB_BLANK_UNBLANK;
props.type = BACKLIGHT_RAW;
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index a3b6a74c67a7..1cc61a700fa8 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -138,7 +138,7 @@ static int __devinit mxc_w1_probe(struct platform_device *pdev)
goto failed_ioremap;
}
- clk_enable(mdev->clk);
+ clk_prepare_enable(mdev->clk);
__raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER);
mdev->bus_master.data = mdev;
@@ -178,7 +178,7 @@ static int __devexit mxc_w1_remove(struct platform_device *pdev)
iounmap(mdev->regs);
release_mem_region(res->start, resource_size(res));
- clk_disable(mdev->clk);
+ clk_disable_unprepare(mdev->clk);
clk_put(mdev->clk);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index a18bf6358eb8..fe819b76de56 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -64,6 +64,18 @@ config SOFT_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called softdog.
+config DA9052_WATCHDOG
+ tristate "Dialog DA9052 Watchdog"
+ depends on PMIC_DA9052
+ select WATCHDOG_CORE
+ help
+ Support for the watchdog in the DA9052 PMIC. Watchdog trigger
+ cause system reset.
+
+ Say Y here to include support for the DA9052 watchdog.
+ Alternatively say M to compile the driver as a module,
+ which will be called da9052_wdt.
+
config WM831X_WATCHDOG
tristate "WM831x watchdog"
depends on MFD_WM831X
@@ -87,6 +99,7 @@ config WM8350_WATCHDOG
config ARM_SP805_WATCHDOG
tristate "ARM SP805 Watchdog"
depends on ARM_AMBA
+ select WATCHDOG_CORE
help
ARM Primecell SP805 Watchdog timer. This will reboot your system when
the timeout is reached.
@@ -565,6 +578,7 @@ config INTEL_SCU_WATCHDOG
config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on (X86 || IA64) && PCI
+ select LPC_ICH
---help---
Hardware driver for the intel TCO timer based watchdog devices.
These drivers are included in the Intel 82801 I/O Controller
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 442bfbe0882a..572b39bed06a 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -163,6 +163,7 @@ obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o
obj-$(CONFIG_XEN_WDT) += xen_wdt.o
# Architecture Independent
+obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
new file mode 100644
index 000000000000..3f75129eb0a9
--- /dev/null
+++ b/drivers/watchdog/da9052_wdt.c
@@ -0,0 +1,251 @@
+/*
+ * System monitoring driver for DA9052 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Anthony Olech <Anthony.Olech@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+#include <linux/watchdog.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/da9052.h>
+
+#define DA9052_DEF_TIMEOUT 4
+#define DA9052_TWDMIN 256
+
+struct da9052_wdt_data {
+ struct watchdog_device wdt;
+ struct da9052 *da9052;
+ struct kref kref;
+ unsigned long jpast;
+};
+
+static const struct {
+ u8 reg_val;
+ int time; /* Seconds */
+} da9052_wdt_maps[] = {
+ { 1, 2 },
+ { 2, 4 },
+ { 3, 8 },
+ { 4, 16 },
+ { 5, 32 },
+ { 5, 33 }, /* Actual time 32.768s so included both 32s and 33s */
+ { 6, 65 },
+ { 6, 66 }, /* Actual time 65.536s so include both, 65s and 66s */
+ { 7, 131 },
+};
+
+
+static void da9052_wdt_release_resources(struct kref *r)
+{
+ struct da9052_wdt_data *driver_data =
+ container_of(r, struct da9052_wdt_data, kref);
+
+ kfree(driver_data);
+}
+
+static int da9052_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
+{
+ struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+ struct da9052 *da9052 = driver_data->da9052;
+ int ret, i;
+
+ /*
+ * Disable the Watchdog timer before setting
+ * new time out.
+ */
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_TWDSCALE, 0);
+ if (ret < 0) {
+ dev_err(da9052->dev, "Failed to disable watchdog bit, %d\n",
+ ret);
+ return ret;
+ }
+ if (timeout) {
+ /*
+ * To change the timeout, da9052 needs to
+ * be disabled for at least 150 us.
+ */
+ udelay(150);
+
+ /* Set the desired timeout */
+ for (i = 0; i < ARRAY_SIZE(da9052_wdt_maps); i++)
+ if (da9052_wdt_maps[i].time == timeout)
+ break;
+
+ if (i == ARRAY_SIZE(da9052_wdt_maps))
+ ret = -EINVAL;
+ else
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_TWDSCALE,
+ da9052_wdt_maps[i].reg_val);
+ if (ret < 0) {
+ dev_err(da9052->dev,
+ "Failed to update timescale bit, %d\n", ret);
+ return ret;
+ }
+
+ wdt_dev->timeout = timeout;
+ driver_data->jpast = jiffies;
+ }
+
+ return 0;
+}
+
+static void da9052_wdt_ref(struct watchdog_device *wdt_dev)
+{
+ struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+ kref_get(&driver_data->kref);
+}
+
+static void da9052_wdt_unref(struct watchdog_device *wdt_dev)
+{
+ struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+ kref_put(&driver_data->kref, da9052_wdt_release_resources);
+}
+
+static int da9052_wdt_start(struct watchdog_device *wdt_dev)
+{
+ return da9052_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
+}
+
+static int da9052_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ return da9052_wdt_set_timeout(wdt_dev, 0);
+}
+
+static int da9052_wdt_ping(struct watchdog_device *wdt_dev)
+{
+ struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+ struct da9052 *da9052 = driver_data->da9052;
+ unsigned long msec, jnow = jiffies;
+ int ret;
+
+ /*
+ * We have a minimum time for watchdog window called TWDMIN. A write
+ * to the watchdog before this elapsed time should cause an error.
+ */
+ msec = (jnow - driver_data->jpast) * 1000/HZ;
+ if (msec < DA9052_TWDMIN)
+ mdelay(msec);
+
+ /* Reset the watchdog timer */
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_WATCHDOG, 1 << 7);
+ if (ret < 0)
+ goto err_strobe;
+
+ /*
+ * FIXME: Reset the watchdog core, in general PMIC
+ * is supposed to do this
+ */
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_WATCHDOG, 0 << 7);
+err_strobe:
+ return ret;
+}
+
+static struct watchdog_info da9052_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = "DA9052 Watchdog",
+};
+
+static const struct watchdog_ops da9052_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = da9052_wdt_start,
+ .stop = da9052_wdt_stop,
+ .ping = da9052_wdt_ping,
+ .set_timeout = da9052_wdt_set_timeout,
+ .ref = da9052_wdt_ref,
+ .unref = da9052_wdt_unref,
+};
+
+
+static int __devinit da9052_wdt_probe(struct platform_device *pdev)
+{
+ struct da9052 *da9052 = dev_get_drvdata(pdev->dev.parent);
+ struct da9052_wdt_data *driver_data;
+ struct watchdog_device *da9052_wdt;
+ int ret;
+
+ driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
+ GFP_KERNEL);
+ if (!driver_data) {
+ dev_err(da9052->dev, "Unable to alloacate watchdog device\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ driver_data->da9052 = da9052;
+
+ da9052_wdt = &driver_data->wdt;
+
+ da9052_wdt->timeout = DA9052_DEF_TIMEOUT;
+ da9052_wdt->info = &da9052_wdt_info;
+ da9052_wdt->ops = &da9052_wdt_ops;
+ watchdog_set_drvdata(da9052_wdt, driver_data);
+
+ kref_init(&driver_data->kref);
+
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_TWDSCALE, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to disable watchdog bits, %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = watchdog_register_device(&driver_data->wdt);
+ if (ret != 0) {
+ dev_err(da9052->dev, "watchdog_register_device() failed: %d\n",
+ ret);
+ goto err;
+ }
+
+ dev_set_drvdata(&pdev->dev, driver_data);
+err:
+ return ret;
+}
+
+static int __devexit da9052_wdt_remove(struct platform_device *pdev)
+{
+ struct da9052_wdt_data *driver_data = dev_get_drvdata(&pdev->dev);
+
+ watchdog_unregister_device(&driver_data->wdt);
+ kref_put(&driver_data->kref, da9052_wdt_release_resources);
+
+ return 0;
+}
+
+static struct platform_driver da9052_wdt_driver = {
+ .probe = da9052_wdt_probe,
+ .remove = __devexit_p(da9052_wdt_remove),
+ .driver = {
+ .name = "da9052-watchdog",
+ },
+};
+
+module_platform_driver(da9052_wdt_driver);
+
+MODULE_AUTHOR("Anthony Olech <Anthony.Olech@diasemi.com>");
+MODULE_DESCRIPTION("DA9052 SM Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-watchdog");
diff --git a/drivers/watchdog/iTCO_vendor.h b/drivers/watchdog/iTCO_vendor.h
index 9e27e6422f66..3c57b45537a2 100644
--- a/drivers/watchdog/iTCO_vendor.h
+++ b/drivers/watchdog/iTCO_vendor.h
@@ -1,8 +1,8 @@
/* iTCO Vendor Specific Support hooks */
#ifdef CONFIG_ITCO_VENDOR_SUPPORT
-extern void iTCO_vendor_pre_start(unsigned long, unsigned int);
-extern void iTCO_vendor_pre_stop(unsigned long);
-extern void iTCO_vendor_pre_keepalive(unsigned long, unsigned int);
+extern void iTCO_vendor_pre_start(struct resource *, unsigned int);
+extern void iTCO_vendor_pre_stop(struct resource *);
+extern void iTCO_vendor_pre_keepalive(struct resource *, unsigned int);
extern void iTCO_vendor_pre_set_heartbeat(unsigned int);
extern int iTCO_vendor_check_noreboot_on(void);
#else
diff --git a/drivers/watchdog/iTCO_vendor_support.c b/drivers/watchdog/iTCO_vendor_support.c
index 2721d29ce243..b6b2f90b5d44 100644
--- a/drivers/watchdog/iTCO_vendor_support.c
+++ b/drivers/watchdog/iTCO_vendor_support.c
@@ -35,11 +35,6 @@
#include "iTCO_vendor.h"
-/* iTCO defines */
-#define SMI_EN (acpibase + 0x30) /* SMI Control and Enable Register */
-#define TCOBASE (acpibase + 0x60) /* TCO base address */
-#define TCO1_STS (TCOBASE + 0x04) /* TCO1 Status Register */
-
/* List of vendor support modes */
/* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */
#define SUPERMICRO_OLD_BOARD 1
@@ -82,24 +77,24 @@ MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default="
* 20.6 seconds.
*/
-static void supermicro_old_pre_start(unsigned long acpibase)
+static void supermicro_old_pre_start(struct resource *smires)
{
unsigned long val32;
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
- val32 = inl(SMI_EN);
+ val32 = inl(smires->start);
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
- outl(val32, SMI_EN); /* Needed to activate watchdog */
+ outl(val32, smires->start); /* Needed to activate watchdog */
}
-static void supermicro_old_pre_stop(unsigned long acpibase)
+static void supermicro_old_pre_stop(struct resource *smires)
{
unsigned long val32;
/* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
- val32 = inl(SMI_EN);
+ val32 = inl(smires->start);
val32 |= 0x00002000; /* Turn on SMI clearing watchdog */
- outl(val32, SMI_EN); /* Needed to deactivate watchdog */
+ outl(val32, smires->start); /* Needed to deactivate watchdog */
}
/*
@@ -270,66 +265,66 @@ static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat)
* Don't use this fix if you don't need to!!!
*/
-static void broken_bios_start(unsigned long acpibase)
+static void broken_bios_start(struct resource *smires)
{
unsigned long val32;
- val32 = inl(SMI_EN);
+ val32 = inl(smires->start);
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI#
Bit 0: GBL_SMI_EN -> 0 = No SMI# will be generated by ICH. */
val32 &= 0xffffdffe;
- outl(val32, SMI_EN);
+ outl(val32, smires->start);
}
-static void broken_bios_stop(unsigned long acpibase)
+static void broken_bios_stop(struct resource *smires)
{
unsigned long val32;
- val32 = inl(SMI_EN);
+ val32 = inl(smires->start);
/* Bit 13: TCO_EN -> 1 = Enables TCO logic generating an SMI#
Bit 0: GBL_SMI_EN -> 1 = Turn global SMI on again. */
val32 |= 0x00002001;
- outl(val32, SMI_EN);
+ outl(val32, smires->start);
}
/*
* Generic Support Functions
*/
-void iTCO_vendor_pre_start(unsigned long acpibase,
+void iTCO_vendor_pre_start(struct resource *smires,
unsigned int heartbeat)
{
switch (vendorsupport) {
case SUPERMICRO_OLD_BOARD:
- supermicro_old_pre_start(acpibase);
+ supermicro_old_pre_start(smires);
break;
case SUPERMICRO_NEW_BOARD:
supermicro_new_pre_start(heartbeat);
break;
case BROKEN_BIOS:
- broken_bios_start(acpibase);
+ broken_bios_start(smires);
break;
}
}
EXPORT_SYMBOL(iTCO_vendor_pre_start);
-void iTCO_vendor_pre_stop(unsigned long acpibase)
+void iTCO_vendor_pre_stop(struct resource *smires)
{
switch (vendorsupport) {
case SUPERMICRO_OLD_BOARD:
- supermicro_old_pre_stop(acpibase);
+ supermicro_old_pre_stop(smires);
break;
case SUPERMICRO_NEW_BOARD:
supermicro_new_pre_stop();
break;
case BROKEN_BIOS:
- broken_bios_stop(acpibase);
+ broken_bios_stop(smires);
break;
}
}
EXPORT_SYMBOL(iTCO_vendor_pre_stop);
-void iTCO_vendor_pre_keepalive(unsigned long acpibase, unsigned int heartbeat)
+void iTCO_vendor_pre_keepalive(struct resource *smires, unsigned int heartbeat)
{
if (vendorsupport == SUPERMICRO_NEW_BOARD)
supermicro_new_pre_set_heartbeat(heartbeat);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 9fecb95645a3..bc47e9012f37 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -66,316 +66,16 @@
#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
#include <linux/io.h> /* For inb/outb/... */
+#include <linux/mfd/core.h>
+#include <linux/mfd/lpc_ich.h>
#include "iTCO_vendor.h"
-/* TCO related info */
-enum iTCO_chipsets {
- TCO_ICH = 0, /* ICH */
- TCO_ICH0, /* ICH0 */
- TCO_ICH2, /* ICH2 */
- TCO_ICH2M, /* ICH2-M */
- TCO_ICH3, /* ICH3-S */
- TCO_ICH3M, /* ICH3-M */
- TCO_ICH4, /* ICH4 */
- TCO_ICH4M, /* ICH4-M */
- TCO_CICH, /* C-ICH */
- TCO_ICH5, /* ICH5 & ICH5R */
- TCO_6300ESB, /* 6300ESB */
- TCO_ICH6, /* ICH6 & ICH6R */
- TCO_ICH6M, /* ICH6-M */
- TCO_ICH6W, /* ICH6W & ICH6RW */
- TCO_631XESB, /* 631xESB/632xESB */
- TCO_ICH7, /* ICH7 & ICH7R */
- TCO_ICH7DH, /* ICH7DH */
- TCO_ICH7M, /* ICH7-M & ICH7-U */
- TCO_ICH7MDH, /* ICH7-M DH */
- TCO_NM10, /* NM10 */
- TCO_ICH8, /* ICH8 & ICH8R */
- TCO_ICH8DH, /* ICH8DH */
- TCO_ICH8DO, /* ICH8DO */
- TCO_ICH8M, /* ICH8M */
- TCO_ICH8ME, /* ICH8M-E */
- TCO_ICH9, /* ICH9 */
- TCO_ICH9R, /* ICH9R */
- TCO_ICH9DH, /* ICH9DH */
- TCO_ICH9DO, /* ICH9DO */
- TCO_ICH9M, /* ICH9M */
- TCO_ICH9ME, /* ICH9M-E */
- TCO_ICH10, /* ICH10 */
- TCO_ICH10R, /* ICH10R */
- TCO_ICH10D, /* ICH10D */
- TCO_ICH10DO, /* ICH10DO */
- TCO_PCH, /* PCH Desktop Full Featured */
- TCO_PCHM, /* PCH Mobile Full Featured */
- TCO_P55, /* P55 */
- TCO_PM55, /* PM55 */
- TCO_H55, /* H55 */
- TCO_QM57, /* QM57 */
- TCO_H57, /* H57 */
- TCO_HM55, /* HM55 */
- TCO_Q57, /* Q57 */
- TCO_HM57, /* HM57 */
- TCO_PCHMSFF, /* PCH Mobile SFF Full Featured */
- TCO_QS57, /* QS57 */
- TCO_3400, /* 3400 */
- TCO_3420, /* 3420 */
- TCO_3450, /* 3450 */
- TCO_EP80579, /* EP80579 */
- TCO_CPT, /* Cougar Point */
- TCO_CPTD, /* Cougar Point Desktop */
- TCO_CPTM, /* Cougar Point Mobile */
- TCO_PBG, /* Patsburg */
- TCO_DH89XXCC, /* DH89xxCC */
- TCO_PPT, /* Panther Point */
- TCO_LPT, /* Lynx Point */
-};
-
-static struct {
- char *name;
- unsigned int iTCO_version;
-} iTCO_chipset_info[] __devinitdata = {
- {"ICH", 1},
- {"ICH0", 1},
- {"ICH2", 1},
- {"ICH2-M", 1},
- {"ICH3-S", 1},
- {"ICH3-M", 1},
- {"ICH4", 1},
- {"ICH4-M", 1},
- {"C-ICH", 1},
- {"ICH5 or ICH5R", 1},
- {"6300ESB", 1},
- {"ICH6 or ICH6R", 2},
- {"ICH6-M", 2},
- {"ICH6W or ICH6RW", 2},
- {"631xESB/632xESB", 2},
- {"ICH7 or ICH7R", 2},
- {"ICH7DH", 2},
- {"ICH7-M or ICH7-U", 2},
- {"ICH7-M DH", 2},
- {"NM10", 2},
- {"ICH8 or ICH8R", 2},
- {"ICH8DH", 2},
- {"ICH8DO", 2},
- {"ICH8M", 2},
- {"ICH8M-E", 2},
- {"ICH9", 2},
- {"ICH9R", 2},
- {"ICH9DH", 2},
- {"ICH9DO", 2},
- {"ICH9M", 2},
- {"ICH9M-E", 2},
- {"ICH10", 2},
- {"ICH10R", 2},
- {"ICH10D", 2},
- {"ICH10DO", 2},
- {"PCH Desktop Full Featured", 2},
- {"PCH Mobile Full Featured", 2},
- {"P55", 2},
- {"PM55", 2},
- {"H55", 2},
- {"QM57", 2},
- {"H57", 2},
- {"HM55", 2},
- {"Q57", 2},
- {"HM57", 2},
- {"PCH Mobile SFF Full Featured", 2},
- {"QS57", 2},
- {"3400", 2},
- {"3420", 2},
- {"3450", 2},
- {"EP80579", 2},
- {"Cougar Point", 2},
- {"Cougar Point Desktop", 2},
- {"Cougar Point Mobile", 2},
- {"Patsburg", 2},
- {"DH89xxCC", 2},
- {"Panther Point", 2},
- {"Lynx Point", 2},
- {NULL, 0}
-};
-
-/*
- * This data only exists for exporting the supported PCI ids
- * via MODULE_DEVICE_TABLE. We do not actually register a
- * pci_driver, because the I/O Controller Hub has also other
- * functions that probably will be registered by other drivers.
- */
-static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
- { PCI_VDEVICE(INTEL, 0x2410), TCO_ICH},
- { PCI_VDEVICE(INTEL, 0x2420), TCO_ICH0},
- { PCI_VDEVICE(INTEL, 0x2440), TCO_ICH2},
- { PCI_VDEVICE(INTEL, 0x244c), TCO_ICH2M},
- { PCI_VDEVICE(INTEL, 0x2480), TCO_ICH3},
- { PCI_VDEVICE(INTEL, 0x248c), TCO_ICH3M},
- { PCI_VDEVICE(INTEL, 0x24c0), TCO_ICH4},
- { PCI_VDEVICE(INTEL, 0x24cc), TCO_ICH4M},
- { PCI_VDEVICE(INTEL, 0x2450), TCO_CICH},
- { PCI_VDEVICE(INTEL, 0x24d0), TCO_ICH5},
- { PCI_VDEVICE(INTEL, 0x25a1), TCO_6300ESB},
- { PCI_VDEVICE(INTEL, 0x2640), TCO_ICH6},
- { PCI_VDEVICE(INTEL, 0x2641), TCO_ICH6M},
- { PCI_VDEVICE(INTEL, 0x2642), TCO_ICH6W},
- { PCI_VDEVICE(INTEL, 0x2670), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2671), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2672), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2673), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2674), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2675), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2676), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2677), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2678), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2679), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267a), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267b), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267c), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267d), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267e), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267f), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x27b8), TCO_ICH7},
- { PCI_VDEVICE(INTEL, 0x27b0), TCO_ICH7DH},
- { PCI_VDEVICE(INTEL, 0x27b9), TCO_ICH7M},
- { PCI_VDEVICE(INTEL, 0x27bd), TCO_ICH7MDH},
- { PCI_VDEVICE(INTEL, 0x27bc), TCO_NM10},
- { PCI_VDEVICE(INTEL, 0x2810), TCO_ICH8},
- { PCI_VDEVICE(INTEL, 0x2812), TCO_ICH8DH},
- { PCI_VDEVICE(INTEL, 0x2814), TCO_ICH8DO},
- { PCI_VDEVICE(INTEL, 0x2815), TCO_ICH8M},
- { PCI_VDEVICE(INTEL, 0x2811), TCO_ICH8ME},
- { PCI_VDEVICE(INTEL, 0x2918), TCO_ICH9},
- { PCI_VDEVICE(INTEL, 0x2916), TCO_ICH9R},
- { PCI_VDEVICE(INTEL, 0x2912), TCO_ICH9DH},
- { PCI_VDEVICE(INTEL, 0x2914), TCO_ICH9DO},
- { PCI_VDEVICE(INTEL, 0x2919), TCO_ICH9M},
- { PCI_VDEVICE(INTEL, 0x2917), TCO_ICH9ME},
- { PCI_VDEVICE(INTEL, 0x3a18), TCO_ICH10},
- { PCI_VDEVICE(INTEL, 0x3a16), TCO_ICH10R},
- { PCI_VDEVICE(INTEL, 0x3a1a), TCO_ICH10D},
- { PCI_VDEVICE(INTEL, 0x3a14), TCO_ICH10DO},
- { PCI_VDEVICE(INTEL, 0x3b00), TCO_PCH},
- { PCI_VDEVICE(INTEL, 0x3b01), TCO_PCHM},
- { PCI_VDEVICE(INTEL, 0x3b02), TCO_P55},
- { PCI_VDEVICE(INTEL, 0x3b03), TCO_PM55},
- { PCI_VDEVICE(INTEL, 0x3b06), TCO_H55},
- { PCI_VDEVICE(INTEL, 0x3b07), TCO_QM57},
- { PCI_VDEVICE(INTEL, 0x3b08), TCO_H57},
- { PCI_VDEVICE(INTEL, 0x3b09), TCO_HM55},
- { PCI_VDEVICE(INTEL, 0x3b0a), TCO_Q57},
- { PCI_VDEVICE(INTEL, 0x3b0b), TCO_HM57},
- { PCI_VDEVICE(INTEL, 0x3b0d), TCO_PCHMSFF},
- { PCI_VDEVICE(INTEL, 0x3b0f), TCO_QS57},
- { PCI_VDEVICE(INTEL, 0x3b12), TCO_3400},
- { PCI_VDEVICE(INTEL, 0x3b14), TCO_3420},
- { PCI_VDEVICE(INTEL, 0x3b16), TCO_3450},
- { PCI_VDEVICE(INTEL, 0x5031), TCO_EP80579},
- { PCI_VDEVICE(INTEL, 0x1c41), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c42), TCO_CPTD},
- { PCI_VDEVICE(INTEL, 0x1c43), TCO_CPTM},
- { PCI_VDEVICE(INTEL, 0x1c44), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c45), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c46), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c47), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c48), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c49), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4a), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4b), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4c), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4d), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4e), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4f), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c50), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c51), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c52), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c53), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c54), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c55), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c56), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c57), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c58), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c59), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5a), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5b), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5c), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5d), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5e), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5f), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1d40), TCO_PBG},
- { PCI_VDEVICE(INTEL, 0x1d41), TCO_PBG},
- { PCI_VDEVICE(INTEL, 0x2310), TCO_DH89XXCC},
- { PCI_VDEVICE(INTEL, 0x1e40), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e41), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e42), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e43), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e44), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e45), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e46), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e47), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e48), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e49), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4a), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4b), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4c), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4d), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4e), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4f), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e50), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e51), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e52), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e53), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e54), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e55), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e56), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e57), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e58), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e59), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5a), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5b), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5c), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5d), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5e), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5f), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x8c40), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c41), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c42), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c43), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c44), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c45), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c46), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c47), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c48), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c49), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4a), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4b), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4c), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4d), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4e), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4f), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c50), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c51), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c52), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c53), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c54), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c55), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c56), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c57), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c58), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c59), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5a), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5b), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5c), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5d), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5e), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5f), TCO_LPT},
- { 0, }, /* End of list */
-};
-MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
-
/* Address definitions for the TCO */
/* TCO base address */
-#define TCOBASE (iTCO_wdt_private.ACPIBASE + 0x60)
+#define TCOBASE (iTCO_wdt_private.tco_res->start)
/* SMI Control and Enable Register */
-#define SMI_EN (iTCO_wdt_private.ACPIBASE + 0x30)
+#define SMI_EN (iTCO_wdt_private.smi_res->start)
#define TCO_RLD (TCOBASE + 0x00) /* TCO Timer Reload and Curr. Value */
#define TCOv1_TMR (TCOBASE + 0x01) /* TCOv1 Timer Initial Value */
@@ -393,19 +93,18 @@ static char expect_release;
static struct { /* this is private data for the iTCO_wdt device */
/* TCO version/generation */
unsigned int iTCO_version;
- /* The device's ACPIBASE address (TCOBASE = ACPIBASE+0x60) */
- unsigned long ACPIBASE;
+ struct resource *tco_res;
+ struct resource *smi_res;
+ struct resource *gcs_res;
/* NO_REBOOT flag is Memory-Mapped GCS register bit 5 (TCO version 2)*/
unsigned long __iomem *gcs;
/* the lock for io operations */
spinlock_t io_lock;
+ struct platform_device *dev;
/* the PCI-device */
struct pci_dev *pdev;
} iTCO_wdt_private;
-/* the watchdog platform device */
-static struct platform_device *iTCO_wdt_platform_device;
-
/* module parameters */
#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */
static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
@@ -485,7 +184,7 @@ static int iTCO_wdt_start(void)
spin_lock(&iTCO_wdt_private.io_lock);
- iTCO_vendor_pre_start(iTCO_wdt_private.ACPIBASE, heartbeat);
+ iTCO_vendor_pre_start(iTCO_wdt_private.smi_res, heartbeat);
/* disable chipset's NO_REBOOT bit */
if (iTCO_wdt_unset_NO_REBOOT_bit()) {
@@ -519,7 +218,7 @@ static int iTCO_wdt_stop(void)
spin_lock(&iTCO_wdt_private.io_lock);
- iTCO_vendor_pre_stop(iTCO_wdt_private.ACPIBASE);
+ iTCO_vendor_pre_stop(iTCO_wdt_private.smi_res);
/* Bit 11: TCO Timer Halt -> 1 = The TCO timer is disabled */
val = inw(TCO1_CNT);
@@ -541,7 +240,7 @@ static int iTCO_wdt_keepalive(void)
{
spin_lock(&iTCO_wdt_private.io_lock);
- iTCO_vendor_pre_keepalive(iTCO_wdt_private.ACPIBASE, heartbeat);
+ iTCO_vendor_pre_keepalive(iTCO_wdt_private.smi_res, heartbeat);
/* Reload the timer by writing to the TCO Timer Counter register */
if (iTCO_wdt_private.iTCO_version == 2)
@@ -786,83 +485,120 @@ static struct miscdevice iTCO_wdt_miscdev = {
* Init & exit routines
*/
-static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
- const struct pci_device_id *ent, struct platform_device *dev)
+static void __devexit iTCO_wdt_cleanup(void)
+{
+ /* Stop the timer before we leave */
+ if (!nowayout)
+ iTCO_wdt_stop();
+
+ /* Deregister */
+ misc_deregister(&iTCO_wdt_miscdev);
+
+ /* release resources */
+ release_region(iTCO_wdt_private.tco_res->start,
+ resource_size(iTCO_wdt_private.tco_res));
+ release_region(iTCO_wdt_private.smi_res->start,
+ resource_size(iTCO_wdt_private.smi_res));
+ if (iTCO_wdt_private.iTCO_version == 2) {
+ iounmap(iTCO_wdt_private.gcs);
+ release_mem_region(iTCO_wdt_private.gcs_res->start,
+ resource_size(iTCO_wdt_private.gcs_res));
+ }
+
+ iTCO_wdt_private.tco_res = NULL;
+ iTCO_wdt_private.smi_res = NULL;
+ iTCO_wdt_private.gcs_res = NULL;
+ iTCO_wdt_private.gcs = NULL;
+}
+
+static int __devinit iTCO_wdt_probe(struct platform_device *dev)
{
- int ret;
- u32 base_address;
- unsigned long RCBA;
+ int ret = -ENODEV;
unsigned long val32;
+ struct lpc_ich_info *ich_info = dev->dev.platform_data;
+
+ if (!ich_info)
+ goto out;
+
+ spin_lock_init(&iTCO_wdt_private.io_lock);
+
+ iTCO_wdt_private.tco_res =
+ platform_get_resource(dev, IORESOURCE_IO, ICH_RES_IO_TCO);
+ if (!iTCO_wdt_private.tco_res)
+ goto out;
+
+ iTCO_wdt_private.smi_res =
+ platform_get_resource(dev, IORESOURCE_IO, ICH_RES_IO_SMI);
+ if (!iTCO_wdt_private.smi_res)
+ goto out;
+
+ iTCO_wdt_private.iTCO_version = ich_info->iTCO_version;
+ iTCO_wdt_private.dev = dev;
+ iTCO_wdt_private.pdev = to_pci_dev(dev->dev.parent);
/*
- * Find the ACPI/PM base I/O address which is the base
- * for the TCO registers (TCOBASE=ACPIBASE + 0x60)
- * ACPIBASE is bits [15:7] from 0x40-0x43
+ * Get the Memory-Mapped GCS register, we need it for the
+ * NO_REBOOT flag (TCO v2).
*/
- pci_read_config_dword(pdev, 0x40, &base_address);
- base_address &= 0x0000ff80;
- if (base_address == 0x00000000) {
- /* Something's wrong here, ACPIBASE has to be set */
- pr_err("failed to get TCOBASE address, device disabled by hardware/BIOS\n");
- return -ENODEV;
- }
- iTCO_wdt_private.iTCO_version =
- iTCO_chipset_info[ent->driver_data].iTCO_version;
- iTCO_wdt_private.ACPIBASE = base_address;
- iTCO_wdt_private.pdev = pdev;
-
- /* Get the Memory-Mapped GCS register, we need it for the
- NO_REBOOT flag (TCO v2). To get access to it you have to
- read RCBA from PCI Config space 0xf0 and use it as base.
- GCS = RCBA + ICH6_GCS(0x3410). */
if (iTCO_wdt_private.iTCO_version == 2) {
- pci_read_config_dword(pdev, 0xf0, &base_address);
- if ((base_address & 1) == 0) {
- pr_err("RCBA is disabled by hardware/BIOS, device disabled\n");
- ret = -ENODEV;
+ iTCO_wdt_private.gcs_res = platform_get_resource(dev,
+ IORESOURCE_MEM,
+ ICH_RES_MEM_GCS);
+
+ if (!iTCO_wdt_private.gcs_res)
+ goto out;
+
+ if (!request_mem_region(iTCO_wdt_private.gcs_res->start,
+ resource_size(iTCO_wdt_private.gcs_res), dev->name)) {
+ ret = -EBUSY;
goto out;
}
- RCBA = base_address & 0xffffc000;
- iTCO_wdt_private.gcs = ioremap((RCBA + 0x3410), 4);
+ iTCO_wdt_private.gcs = ioremap(iTCO_wdt_private.gcs_res->start,
+ resource_size(iTCO_wdt_private.gcs_res));
+ if (!iTCO_wdt_private.gcs) {
+ ret = -EIO;
+ goto unreg_gcs;
+ }
}
/* Check chipset's NO_REBOOT bit */
if (iTCO_wdt_unset_NO_REBOOT_bit() && iTCO_vendor_check_noreboot_on()) {
pr_info("unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n");
ret = -ENODEV; /* Cannot reset NO_REBOOT bit */
- goto out_unmap;
+ goto unmap_gcs;
}
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
iTCO_wdt_set_NO_REBOOT_bit();
/* The TCO logic uses the TCO_EN bit in the SMI_EN register */
- if (!request_region(SMI_EN, 4, "iTCO_wdt")) {
- pr_err("I/O address 0x%04lx already in use, device disabled\n",
- SMI_EN);
- ret = -EIO;
- goto out_unmap;
+ if (!request_region(iTCO_wdt_private.smi_res->start,
+ resource_size(iTCO_wdt_private.smi_res), dev->name)) {
+ pr_err("I/O address 0x%04llx already in use, device disabled\n",
+ (u64)SMI_EN);
+ ret = -EBUSY;
+ goto unmap_gcs;
}
if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
- /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
+ /*
+ * Bit 13: TCO_EN -> 0
+ * Disables TCO logic generating an SMI#
+ */
val32 = inl(SMI_EN);
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
outl(val32, SMI_EN);
}
- /* The TCO I/O registers reside in a 32-byte range pointed to
- by the TCOBASE value */
- if (!request_region(TCOBASE, 0x20, "iTCO_wdt")) {
- pr_err("I/O address 0x%04lx already in use, device disabled\n",
- TCOBASE);
- ret = -EIO;
- goto unreg_smi_en;
+ if (!request_region(iTCO_wdt_private.tco_res->start,
+ resource_size(iTCO_wdt_private.tco_res), dev->name)) {
+ pr_err("I/O address 0x%04llx already in use, device disabled\n",
+ (u64)TCOBASE);
+ ret = -EBUSY;
+ goto unreg_smi;
}
- pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04lx)\n",
- iTCO_chipset_info[ent->driver_data].name,
- iTCO_chipset_info[ent->driver_data].iTCO_version,
- TCOBASE);
+ pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
+ ich_info->name, ich_info->iTCO_version, (u64)TCOBASE);
/* Clear out the (probably old) status */
outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */
@@ -883,7 +619,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
if (ret != 0) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, ret);
- goto unreg_region;
+ goto unreg_tco;
}
pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n",
@@ -891,62 +627,31 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
return 0;
-unreg_region:
- release_region(TCOBASE, 0x20);
-unreg_smi_en:
- release_region(SMI_EN, 4);
-out_unmap:
+unreg_tco:
+ release_region(iTCO_wdt_private.tco_res->start,
+ resource_size(iTCO_wdt_private.tco_res));
+unreg_smi:
+ release_region(iTCO_wdt_private.smi_res->start,
+ resource_size(iTCO_wdt_private.smi_res));
+unmap_gcs:
if (iTCO_wdt_private.iTCO_version == 2)
iounmap(iTCO_wdt_private.gcs);
-out:
- iTCO_wdt_private.ACPIBASE = 0;
- return ret;
-}
-
-static void __devexit iTCO_wdt_cleanup(void)
-{
- /* Stop the timer before we leave */
- if (!nowayout)
- iTCO_wdt_stop();
-
- /* Deregister */
- misc_deregister(&iTCO_wdt_miscdev);
- release_region(TCOBASE, 0x20);
- release_region(SMI_EN, 4);
+unreg_gcs:
if (iTCO_wdt_private.iTCO_version == 2)
- iounmap(iTCO_wdt_private.gcs);
- pci_dev_put(iTCO_wdt_private.pdev);
- iTCO_wdt_private.ACPIBASE = 0;
-}
-
-static int __devinit iTCO_wdt_probe(struct platform_device *dev)
-{
- int ret = -ENODEV;
- int found = 0;
- struct pci_dev *pdev = NULL;
- const struct pci_device_id *ent;
-
- spin_lock_init(&iTCO_wdt_private.io_lock);
-
- for_each_pci_dev(pdev) {
- ent = pci_match_id(iTCO_wdt_pci_tbl, pdev);
- if (ent) {
- found++;
- ret = iTCO_wdt_init(pdev, ent, dev);
- if (!ret)
- break;
- }
- }
-
- if (!found)
- pr_info("No device detected\n");
+ release_mem_region(iTCO_wdt_private.gcs_res->start,
+ resource_size(iTCO_wdt_private.gcs_res));
+out:
+ iTCO_wdt_private.tco_res = NULL;
+ iTCO_wdt_private.smi_res = NULL;
+ iTCO_wdt_private.gcs_res = NULL;
+ iTCO_wdt_private.gcs = NULL;
return ret;
}
static int __devexit iTCO_wdt_remove(struct platform_device *dev)
{
- if (iTCO_wdt_private.ACPIBASE)
+ if (iTCO_wdt_private.tco_res || iTCO_wdt_private.smi_res)
iTCO_wdt_cleanup();
return 0;
@@ -977,23 +682,11 @@ static int __init iTCO_wdt_init_module(void)
if (err)
return err;
- iTCO_wdt_platform_device = platform_device_register_simple(DRV_NAME,
- -1, NULL, 0);
- if (IS_ERR(iTCO_wdt_platform_device)) {
- err = PTR_ERR(iTCO_wdt_platform_device);
- goto unreg_platform_driver;
- }
-
return 0;
-
-unreg_platform_driver:
- platform_driver_unregister(&iTCO_wdt_driver);
- return err;
}
static void __exit iTCO_wdt_cleanup_module(void)
{
- platform_device_unregister(iTCO_wdt_platform_device);
platform_driver_unregister(&iTCO_wdt_driver);
pr_info("Watchdog Module Unloaded\n");
}
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 7a2b734fcdc7..bcfab2b00ad2 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -121,7 +121,7 @@ static void imx2_wdt_start(void)
{
if (!test_and_set_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) {
/* at our first start we enable clock and do initialisations */
- clk_enable(imx2_wdt.clk);
+ clk_prepare_enable(imx2_wdt.clk);
imx2_wdt_setup();
} else /* delete the timer that pings the watchdog after close */
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index a9593a3a32a0..2e74c3a8ee58 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -13,14 +13,15 @@
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <lantiq.h>
+#include <lantiq_soc.h>
-/* Section 3.4 of the datasheet
+/*
+ * Section 3.4 of the datasheet
* The password sequence protects the WDT control register from unintended
* write actions, which might cause malfunction of the WDT.
*
@@ -70,7 +71,8 @@ ltq_wdt_disable(void)
{
/* write the first password magic */
ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
- /* write the second password magic with no config
+ /*
+ * write the second password magic with no config
* this turns the watchdog off
*/
ltq_w32(LTQ_WDT_PW2, ltq_wdt_membase + LTQ_WDT_CR);
@@ -184,7 +186,7 @@ static struct miscdevice ltq_wdt_miscdev = {
.fops = &ltq_wdt_fops,
};
-static int __init
+static int __devinit
ltq_wdt_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -194,28 +196,27 @@ ltq_wdt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot obtain I/O memory region");
return -ENOENT;
}
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
- if (!res) {
- dev_err(&pdev->dev, "cannot request I/O memory region");
- return -EBUSY;
- }
- ltq_wdt_membase = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
+
+ ltq_wdt_membase = devm_request_and_ioremap(&pdev->dev, res);
if (!ltq_wdt_membase) {
dev_err(&pdev->dev, "cannot remap I/O memory region\n");
return -ENOMEM;
}
/* we do not need to enable the clock as it is always running */
- clk = clk_get(&pdev->dev, "io");
- WARN_ON(!clk);
+ clk = clk_get_io();
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ return -ENOENT;
+ }
ltq_io_region_clk_rate = clk_get_rate(clk);
clk_put(clk);
+ /* find out if the watchdog caused the last reboot */
if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST)
ltq_wdt_bootstatus = WDIOF_CARDRESET;
+ dev_info(&pdev->dev, "Init done\n");
return misc_register(&ltq_wdt_miscdev);
}
@@ -227,33 +228,26 @@ ltq_wdt_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ltq_wdt_match[] = {
+ { .compatible = "lantiq,wdt" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_wdt_match);
static struct platform_driver ltq_wdt_driver = {
+ .probe = ltq_wdt_probe,
.remove = __devexit_p(ltq_wdt_remove),
.driver = {
- .name = "ltq_wdt",
+ .name = "wdt",
.owner = THIS_MODULE,
+ .of_match_table = ltq_wdt_match,
},
};
-static int __init
-init_ltq_wdt(void)
-{
- return platform_driver_probe(&ltq_wdt_driver, ltq_wdt_probe);
-}
-
-static void __exit
-exit_ltq_wdt(void)
-{
- return platform_driver_unregister(&ltq_wdt_driver);
-}
-
-module_init(init_ltq_wdt);
-module_exit(exit_ltq_wdt);
+module_platform_driver(ltq_wdt_driver);
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
-
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
MODULE_DESCRIPTION("Lantiq SoC Watchdog");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 788aa158e78c..0f5736949c61 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -24,8 +24,8 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/spinlock.h>
+#include <linux/clk.h>
#include <mach/bridge-regs.h>
-#include <plat/orion_wdt.h>
/*
* Watchdog timer block registers.
@@ -41,6 +41,7 @@
static bool nowayout = WATCHDOG_NOWAYOUT;
static int heartbeat = -1; /* module parameter (seconds) */
static unsigned int wdt_max_duration; /* (seconds) */
+static struct clk *clk;
static unsigned int wdt_tclk;
static void __iomem *wdt_reg;
static unsigned long wdt_status;
@@ -237,16 +238,16 @@ static struct miscdevice orion_wdt_miscdev = {
static int __devinit orion_wdt_probe(struct platform_device *pdev)
{
- struct orion_wdt_platform_data *pdata = pdev->dev.platform_data;
struct resource *res;
int ret;
- if (pdata) {
- wdt_tclk = pdata->tclk;
- } else {
- pr_err("misses platform data\n");
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ printk(KERN_ERR "Orion Watchdog missing clock\n");
return -ENODEV;
}
+ clk_prepare_enable(clk);
+ wdt_tclk = clk_get_rate(clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -282,6 +283,9 @@ static int __devexit orion_wdt_remove(struct platform_device *pdev)
if (!ret)
orion_wdt_miscdev.parent = NULL;
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+
return ret;
}
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index bbb170e50055..afcd13676542 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -16,20 +16,17 @@
#include <linux/amba/bus.h>
#include <linux/bitops.h>
#include <linux/clk.h>
-#include <linux/fs.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/math64.h>
-#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
#include <linux/watchdog.h>
/* default timeout in seconds */
@@ -56,6 +53,7 @@
/**
* struct sp805_wdt: sp805 wdt device structure
+ * @wdd: instance of struct watchdog_device
* @lock: spin lock protecting dev structure and io access
* @base: base address of wdt
* @clk: clock structure of wdt
@@ -65,24 +63,24 @@
* @timeout: current programmed timeout
*/
struct sp805_wdt {
+ struct watchdog_device wdd;
spinlock_t lock;
void __iomem *base;
struct clk *clk;
struct amba_device *adev;
- unsigned long status;
- #define WDT_BUSY 0
- #define WDT_CAN_BE_CLOSED 1
unsigned int load_val;
unsigned int timeout;
};
-/* local variables */
-static struct sp805_wdt *wdt;
static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Set to 1 to keep watchdog running after device release");
/* This routine finds load value that will reset system in required timout */
-static void wdt_setload(unsigned int timeout)
+static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout)
{
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
u64 load, rate;
rate = clk_get_rate(wdt->clk);
@@ -103,11 +101,14 @@ static void wdt_setload(unsigned int timeout)
/* roundup timeout to closest positive integer value */
wdt->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
spin_unlock(&wdt->lock);
+
+ return 0;
}
/* returns number of seconds left for reset to occur */
-static u32 wdt_timeleft(void)
+static unsigned int wdt_timeleft(struct watchdog_device *wdd)
{
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
u64 load, rate;
rate = clk_get_rate(wdt->clk);
@@ -123,166 +124,96 @@ static u32 wdt_timeleft(void)
return div_u64(load, rate);
}
-/* enables watchdog timers reset */
-static void wdt_enable(void)
+static int wdt_config(struct watchdog_device *wdd, bool ping)
{
- spin_lock(&wdt->lock);
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
+ int ret;
- writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
- writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
- writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
- writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL);
- writel_relaxed(LOCK, wdt->base + WDTLOCK);
+ if (!ping) {
+ ret = clk_prepare(wdt->clk);
+ if (ret) {
+ dev_err(&wdt->adev->dev, "clock prepare fail");
+ return ret;
+ }
- /* Flush posted writes. */
- readl_relaxed(wdt->base + WDTLOCK);
- spin_unlock(&wdt->lock);
-}
+ ret = clk_enable(wdt->clk);
+ if (ret) {
+ dev_err(&wdt->adev->dev, "clock enable fail");
+ clk_unprepare(wdt->clk);
+ return ret;
+ }
+ }
-/* disables watchdog timers reset */
-static void wdt_disable(void)
-{
spin_lock(&wdt->lock);
writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
- writel_relaxed(0, wdt->base + WDTCONTROL);
+ writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
+
+ if (!ping) {
+ writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
+ writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base +
+ WDTCONTROL);
+ }
+
writel_relaxed(LOCK, wdt->base + WDTLOCK);
/* Flush posted writes. */
readl_relaxed(wdt->base + WDTLOCK);
spin_unlock(&wdt->lock);
+
+ return 0;
}
-static ssize_t sp805_wdt_write(struct file *file, const char *data,
- size_t len, loff_t *ppos)
+static int wdt_ping(struct watchdog_device *wdd)
{
- if (len) {
- if (!nowayout) {
- size_t i;
-
- clear_bit(WDT_CAN_BE_CLOSED, &wdt->status);
-
- for (i = 0; i != len; i++) {
- char c;
-
- if (get_user(c, data + i))
- return -EFAULT;
- /* Check for Magic Close character */
- if (c == 'V') {
- set_bit(WDT_CAN_BE_CLOSED,
- &wdt->status);
- break;
- }
- }
- }
- wdt_enable();
- }
- return len;
+ return wdt_config(wdd, true);
}
-static const struct watchdog_info ident = {
- .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
- .identity = MODULE_NAME,
-};
-
-static long sp805_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+/* enables watchdog timers reset */
+static int wdt_enable(struct watchdog_device *wdd)
{
- int ret = -ENOTTY;
- unsigned int timeout;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ret = copy_to_user((struct watchdog_info *)arg, &ident,
- sizeof(ident)) ? -EFAULT : 0;
- break;
-
- case WDIOC_GETSTATUS:
- ret = put_user(0, (int *)arg);
- break;
-
- case WDIOC_KEEPALIVE:
- wdt_enable();
- ret = 0;
- break;
-
- case WDIOC_SETTIMEOUT:
- ret = get_user(timeout, (unsigned int *)arg);
- if (ret)
- break;
-
- wdt_setload(timeout);
-
- wdt_enable();
- /* Fall through */
-
- case WDIOC_GETTIMEOUT:
- ret = put_user(wdt->timeout, (unsigned int *)arg);
- break;
- case WDIOC_GETTIMELEFT:
- ret = put_user(wdt_timeleft(), (unsigned int *)arg);
- break;
- }
- return ret;
+ return wdt_config(wdd, false);
}
-static int sp805_wdt_open(struct inode *inode, struct file *file)
+/* disables watchdog timers reset */
+static int wdt_disable(struct watchdog_device *wdd)
{
- int ret = 0;
-
- if (test_and_set_bit(WDT_BUSY, &wdt->status))
- return -EBUSY;
-
- ret = clk_enable(wdt->clk);
- if (ret) {
- dev_err(&wdt->adev->dev, "clock enable fail");
- goto err;
- }
-
- wdt_enable();
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
- /* can not be closed, once enabled */
- clear_bit(WDT_CAN_BE_CLOSED, &wdt->status);
- return nonseekable_open(inode, file);
+ spin_lock(&wdt->lock);
-err:
- clear_bit(WDT_BUSY, &wdt->status);
- return ret;
-}
+ writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
+ writel_relaxed(0, wdt->base + WDTCONTROL);
+ writel_relaxed(LOCK, wdt->base + WDTLOCK);
-static int sp805_wdt_release(struct inode *inode, struct file *file)
-{
- if (!test_bit(WDT_CAN_BE_CLOSED, &wdt->status)) {
- clear_bit(WDT_BUSY, &wdt->status);
- dev_warn(&wdt->adev->dev, "Device closed unexpectedly\n");
- return 0;
- }
+ /* Flush posted writes. */
+ readl_relaxed(wdt->base + WDTLOCK);
+ spin_unlock(&wdt->lock);
- wdt_disable();
clk_disable(wdt->clk);
- clear_bit(WDT_BUSY, &wdt->status);
+ clk_unprepare(wdt->clk);
return 0;
}
-static const struct file_operations sp805_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = sp805_wdt_write,
- .unlocked_ioctl = sp805_wdt_ioctl,
- .open = sp805_wdt_open,
- .release = sp805_wdt_release,
+static const struct watchdog_info wdt_info = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = MODULE_NAME,
};
-static struct miscdevice sp805_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &sp805_wdt_fops,
+static const struct watchdog_ops wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = wdt_enable,
+ .stop = wdt_disable,
+ .ping = wdt_ping,
+ .set_timeout = wdt_setload,
+ .get_timeleft = wdt_timeleft,
};
static int __devinit
sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
{
+ struct sp805_wdt *wdt;
int ret = 0;
if (!devm_request_mem_region(&adev->dev, adev->res.start,
@@ -315,19 +246,26 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
}
wdt->adev = adev;
+ wdt->wdd.info = &wdt_info;
+ wdt->wdd.ops = &wdt_ops;
+
spin_lock_init(&wdt->lock);
- wdt_setload(DEFAULT_TIMEOUT);
+ watchdog_set_nowayout(&wdt->wdd, nowayout);
+ watchdog_set_drvdata(&wdt->wdd, wdt);
+ wdt_setload(&wdt->wdd, DEFAULT_TIMEOUT);
- ret = misc_register(&sp805_wdt_miscdev);
- if (ret < 0) {
- dev_warn(&adev->dev, "cannot register misc device\n");
- goto err_misc_register;
+ ret = watchdog_register_device(&wdt->wdd);
+ if (ret) {
+ dev_err(&adev->dev, "watchdog_register_device() failed: %d\n",
+ ret);
+ goto err_register;
}
+ amba_set_drvdata(adev, wdt);
dev_info(&adev->dev, "registration successful\n");
return 0;
-err_misc_register:
+err_register:
clk_put(wdt->clk);
err:
dev_err(&adev->dev, "Probe Failed!!!\n");
@@ -336,7 +274,11 @@ err:
static int __devexit sp805_wdt_remove(struct amba_device *adev)
{
- misc_deregister(&sp805_wdt_miscdev);
+ struct sp805_wdt *wdt = amba_get_drvdata(adev);
+
+ watchdog_unregister_device(&wdt->wdd);
+ amba_set_drvdata(adev, NULL);
+ watchdog_set_drvdata(&wdt->wdd, NULL);
clk_put(wdt->clk);
return 0;
@@ -345,28 +287,22 @@ static int __devexit sp805_wdt_remove(struct amba_device *adev)
#ifdef CONFIG_PM
static int sp805_wdt_suspend(struct device *dev)
{
- if (test_bit(WDT_BUSY, &wdt->status)) {
- wdt_disable();
- clk_disable(wdt->clk);
- }
+ struct sp805_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdd))
+ return wdt_disable(&wdt->wdd);
return 0;
}
static int sp805_wdt_resume(struct device *dev)
{
- int ret = 0;
+ struct sp805_wdt *wdt = dev_get_drvdata(dev);
- if (test_bit(WDT_BUSY, &wdt->status)) {
- ret = clk_enable(wdt->clk);
- if (ret) {
- dev_err(dev, "clock enable fail");
- return ret;
- }
- wdt_enable();
- }
+ if (watchdog_active(&wdt->wdd))
+ return wdt_enable(&wdt->wdd);
- return ret;
+ return 0;
}
#endif /* CONFIG_PM */
@@ -395,11 +331,6 @@ static struct amba_driver sp805_wdt_driver = {
module_amba_driver(sp805_wdt_driver);
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout,
- "Set to 1 to keep watchdog running after device release");
-
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index 5603e31afdab..aa50da3ccfe3 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -91,7 +91,7 @@ static inline void wdt_reset(void)
static void wdt_timer_tick(unsigned long data)
{
if (time_before(jiffies, next_heartbeat) ||
- (!test_bit(WDOG_ACTIVE, &wdt_dev.status))) {
+ (!watchdog_active(&wdt_dev))) {
wdt_reset();
mod_timer(&timer, jiffies + WDT_HEARTBEAT);
} else
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 14d768bfa267..6aa46a90ff02 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -34,8 +34,13 @@
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/watchdog.h> /* For watchdog specific items */
#include <linux/init.h> /* For __init/__exit/... */
+#include <linux/idr.h> /* For ida_* macros */
+#include <linux/err.h> /* For IS_ERR macros */
-#include "watchdog_dev.h" /* For watchdog_dev_register/... */
+#include "watchdog_core.h" /* For watchdog_dev_register/... */
+
+static DEFINE_IDA(watchdog_ida);
+static struct class *watchdog_class;
/**
* watchdog_register_device() - register a watchdog device
@@ -49,7 +54,7 @@
*/
int watchdog_register_device(struct watchdog_device *wdd)
{
- int ret;
+ int ret, id, devno;
if (wdd == NULL || wdd->info == NULL || wdd->ops == NULL)
return -EINVAL;
@@ -74,10 +79,38 @@ int watchdog_register_device(struct watchdog_device *wdd)
* corrupted in a later stage then we expect a kernel panic!
*/
- /* We only support 1 watchdog device via the /dev/watchdog interface */
+ mutex_init(&wdd->lock);
+ id = ida_simple_get(&watchdog_ida, 0, MAX_DOGS, GFP_KERNEL);
+ if (id < 0)
+ return id;
+ wdd->id = id;
+
ret = watchdog_dev_register(wdd);
if (ret) {
- pr_err("error registering /dev/watchdog (err=%d)\n", ret);
+ ida_simple_remove(&watchdog_ida, id);
+ if (!(id == 0 && ret == -EBUSY))
+ return ret;
+
+ /* Retry in case a legacy watchdog module exists */
+ id = ida_simple_get(&watchdog_ida, 1, MAX_DOGS, GFP_KERNEL);
+ if (id < 0)
+ return id;
+ wdd->id = id;
+
+ ret = watchdog_dev_register(wdd);
+ if (ret) {
+ ida_simple_remove(&watchdog_ida, id);
+ return ret;
+ }
+ }
+
+ devno = wdd->cdev.dev;
+ wdd->dev = device_create(watchdog_class, wdd->parent, devno,
+ NULL, "watchdog%d", wdd->id);
+ if (IS_ERR(wdd->dev)) {
+ watchdog_dev_unregister(wdd);
+ ida_simple_remove(&watchdog_ida, id);
+ ret = PTR_ERR(wdd->dev);
return ret;
}
@@ -95,6 +128,7 @@ EXPORT_SYMBOL_GPL(watchdog_register_device);
void watchdog_unregister_device(struct watchdog_device *wdd)
{
int ret;
+ int devno = wdd->cdev.dev;
if (wdd == NULL)
return;
@@ -102,9 +136,41 @@ void watchdog_unregister_device(struct watchdog_device *wdd)
ret = watchdog_dev_unregister(wdd);
if (ret)
pr_err("error unregistering /dev/watchdog (err=%d)\n", ret);
+ device_destroy(watchdog_class, devno);
+ ida_simple_remove(&watchdog_ida, wdd->id);
+ wdd->dev = NULL;
}
EXPORT_SYMBOL_GPL(watchdog_unregister_device);
+static int __init watchdog_init(void)
+{
+ int err;
+
+ watchdog_class = class_create(THIS_MODULE, "watchdog");
+ if (IS_ERR(watchdog_class)) {
+ pr_err("couldn't create class\n");
+ return PTR_ERR(watchdog_class);
+ }
+
+ err = watchdog_dev_init();
+ if (err < 0) {
+ class_destroy(watchdog_class);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit watchdog_exit(void)
+{
+ watchdog_dev_exit();
+ class_destroy(watchdog_class);
+ ida_destroy(&watchdog_ida);
+}
+
+subsys_initcall(watchdog_init);
+module_exit(watchdog_exit);
+
MODULE_AUTHOR("Alan Cox <alan@lxorguk.ukuu.org.uk>");
MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
MODULE_DESCRIPTION("WatchDog Timer Driver Core");
diff --git a/drivers/watchdog/watchdog_dev.h b/drivers/watchdog/watchdog_core.h
index bc7612be25ce..6c951418fca7 100644
--- a/drivers/watchdog/watchdog_dev.h
+++ b/drivers/watchdog/watchdog_core.h
@@ -26,8 +26,12 @@
* This material is provided "AS-IS" and at no charge.
*/
+#define MAX_DOGS 32 /* Maximum number of watchdog devices */
+
/*
* Functions/procedures to be called by the core
*/
-int watchdog_dev_register(struct watchdog_device *);
-int watchdog_dev_unregister(struct watchdog_device *);
+extern int watchdog_dev_register(struct watchdog_device *);
+extern int watchdog_dev_unregister(struct watchdog_device *);
+extern int __init watchdog_dev_init(void);
+extern void __exit watchdog_dev_exit(void);
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 8558da912c42..672d169bf1da 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -42,10 +42,12 @@
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
-/* make sure we only register one /dev/watchdog device */
-static unsigned long watchdog_dev_busy;
+#include "watchdog_core.h"
+
+/* the dev_t structure to store the dynamically allocated watchdog devices */
+static dev_t watchdog_devt;
/* the watchdog device behind /dev/watchdog */
-static struct watchdog_device *wdd;
+static struct watchdog_device *old_wdd;
/*
* watchdog_ping: ping the watchdog.
@@ -59,13 +61,26 @@ static struct watchdog_device *wdd;
static int watchdog_ping(struct watchdog_device *wddev)
{
- if (test_bit(WDOG_ACTIVE, &wddev->status)) {
- if (wddev->ops->ping)
- return wddev->ops->ping(wddev); /* ping the watchdog */
- else
- return wddev->ops->start(wddev); /* restart watchdog */
+ int err = 0;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_ping;
}
- return 0;
+
+ if (!watchdog_active(wddev))
+ goto out_ping;
+
+ if (wddev->ops->ping)
+ err = wddev->ops->ping(wddev); /* ping the watchdog */
+ else
+ err = wddev->ops->start(wddev); /* restart watchdog */
+
+out_ping:
+ mutex_unlock(&wddev->lock);
+ return err;
}
/*
@@ -79,16 +94,25 @@ static int watchdog_ping(struct watchdog_device *wddev)
static int watchdog_start(struct watchdog_device *wddev)
{
- int err;
+ int err = 0;
- if (!test_bit(WDOG_ACTIVE, &wddev->status)) {
- err = wddev->ops->start(wddev);
- if (err < 0)
- return err;
+ mutex_lock(&wddev->lock);
- set_bit(WDOG_ACTIVE, &wddev->status);
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_start;
}
- return 0;
+
+ if (watchdog_active(wddev))
+ goto out_start;
+
+ err = wddev->ops->start(wddev);
+ if (err == 0)
+ set_bit(WDOG_ACTIVE, &wddev->status);
+
+out_start:
+ mutex_unlock(&wddev->lock);
+ return err;
}
/*
@@ -103,22 +127,155 @@ static int watchdog_start(struct watchdog_device *wddev)
static int watchdog_stop(struct watchdog_device *wddev)
{
- int err = -EBUSY;
+ int err = 0;
- if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
- pr_info("%s: nowayout prevents watchdog to be stopped!\n",
- wddev->info->identity);
- return err;
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_stop;
}
- if (test_bit(WDOG_ACTIVE, &wddev->status)) {
- err = wddev->ops->stop(wddev);
- if (err < 0)
- return err;
+ if (!watchdog_active(wddev))
+ goto out_stop;
+ if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
+ dev_info(wddev->dev, "nowayout prevents watchdog being stopped!\n");
+ err = -EBUSY;
+ goto out_stop;
+ }
+
+ err = wddev->ops->stop(wddev);
+ if (err == 0)
clear_bit(WDOG_ACTIVE, &wddev->status);
+
+out_stop:
+ mutex_unlock(&wddev->lock);
+ return err;
+}
+
+/*
+ * watchdog_get_status: wrapper to get the watchdog status
+ * @wddev: the watchdog device to get the status from
+ * @status: the status of the watchdog device
+ *
+ * Get the watchdog's status flags.
+ */
+
+static int watchdog_get_status(struct watchdog_device *wddev,
+ unsigned int *status)
+{
+ int err = 0;
+
+ *status = 0;
+ if (!wddev->ops->status)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_status;
}
- return 0;
+
+ *status = wddev->ops->status(wddev);
+
+out_status:
+ mutex_unlock(&wddev->lock);
+ return err;
+}
+
+/*
+ * watchdog_set_timeout: set the watchdog timer timeout
+ * @wddev: the watchdog device to set the timeout for
+ * @timeout: timeout to set in seconds
+ */
+
+static int watchdog_set_timeout(struct watchdog_device *wddev,
+ unsigned int timeout)
+{
+ int err;
+
+ if ((wddev->ops->set_timeout == NULL) ||
+ !(wddev->info->options & WDIOF_SETTIMEOUT))
+ return -EOPNOTSUPP;
+
+ if ((wddev->max_timeout != 0) &&
+ (timeout < wddev->min_timeout || timeout > wddev->max_timeout))
+ return -EINVAL;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_timeout;
+ }
+
+ err = wddev->ops->set_timeout(wddev, timeout);
+
+out_timeout:
+ mutex_unlock(&wddev->lock);
+ return err;
+}
+
+/*
+ * watchdog_get_timeleft: wrapper to get the time left before a reboot
+ * @wddev: the watchdog device to get the remaining time from
+ * @timeleft: the time that's left
+ *
+ * Get the time before a watchdog will reboot (if not pinged).
+ */
+
+static int watchdog_get_timeleft(struct watchdog_device *wddev,
+ unsigned int *timeleft)
+{
+ int err = 0;
+
+ *timeleft = 0;
+ if (!wddev->ops->get_timeleft)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_timeleft;
+ }
+
+ *timeleft = wddev->ops->get_timeleft(wddev);
+
+out_timeleft:
+ mutex_unlock(&wddev->lock);
+ return err;
+}
+
+/*
+ * watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
+ * @wddev: the watchdog device to do the ioctl on
+ * @cmd: watchdog command
+ * @arg: argument pointer
+ */
+
+static int watchdog_ioctl_op(struct watchdog_device *wddev, unsigned int cmd,
+ unsigned long arg)
+{
+ int err;
+
+ if (!wddev->ops->ioctl)
+ return -ENOIOCTLCMD;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_ioctl;
+ }
+
+ err = wddev->ops->ioctl(wddev, cmd, arg);
+
+out_ioctl:
+ mutex_unlock(&wddev->lock);
+ return err;
}
/*
@@ -136,6 +293,7 @@ static int watchdog_stop(struct watchdog_device *wddev)
static ssize_t watchdog_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
+ struct watchdog_device *wdd = file->private_data;
size_t i;
char c;
@@ -175,23 +333,24 @@ static ssize_t watchdog_write(struct file *file, const char __user *data,
static long watchdog_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
+ struct watchdog_device *wdd = file->private_data;
void __user *argp = (void __user *)arg;
int __user *p = argp;
unsigned int val;
int err;
- if (wdd->ops->ioctl) {
- err = wdd->ops->ioctl(wdd, cmd, arg);
- if (err != -ENOIOCTLCMD)
- return err;
- }
+ err = watchdog_ioctl_op(wdd, cmd, arg);
+ if (err != -ENOIOCTLCMD)
+ return err;
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp, wdd->info,
sizeof(struct watchdog_info)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
- val = wdd->ops->status ? wdd->ops->status(wdd) : 0;
+ err = watchdog_get_status(wdd, &val);
+ if (err)
+ return err;
return put_user(val, p);
case WDIOC_GETBOOTSTATUS:
return put_user(wdd->bootstatus, p);
@@ -215,15 +374,9 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
watchdog_ping(wdd);
return 0;
case WDIOC_SETTIMEOUT:
- if ((wdd->ops->set_timeout == NULL) ||
- !(wdd->info->options & WDIOF_SETTIMEOUT))
- return -EOPNOTSUPP;
if (get_user(val, p))
return -EFAULT;
- if ((wdd->max_timeout != 0) &&
- (val < wdd->min_timeout || val > wdd->max_timeout))
- return -EINVAL;
- err = wdd->ops->set_timeout(wdd, val);
+ err = watchdog_set_timeout(wdd, val);
if (err < 0)
return err;
/* If the watchdog is active then we send a keepalive ping
@@ -237,21 +390,21 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
return -EOPNOTSUPP;
return put_user(wdd->timeout, p);
case WDIOC_GETTIMELEFT:
- if (!wdd->ops->get_timeleft)
- return -EOPNOTSUPP;
-
- return put_user(wdd->ops->get_timeleft(wdd), p);
+ err = watchdog_get_timeleft(wdd, &val);
+ if (err)
+ return err;
+ return put_user(val, p);
default:
return -ENOTTY;
}
}
/*
- * watchdog_open: open the /dev/watchdog device.
+ * watchdog_open: open the /dev/watchdog* devices.
* @inode: inode of device
* @file: file handle to device
*
- * When the /dev/watchdog device gets opened, we start the watchdog.
+ * When the /dev/watchdog* device gets opened, we start the watchdog.
* Watch out: the /dev/watchdog device is single open, so we make sure
* it can only be opened once.
*/
@@ -259,6 +412,13 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
static int watchdog_open(struct inode *inode, struct file *file)
{
int err = -EBUSY;
+ struct watchdog_device *wdd;
+
+ /* Get the corresponding watchdog device */
+ if (imajor(inode) == MISC_MAJOR)
+ wdd = old_wdd;
+ else
+ wdd = container_of(inode->i_cdev, struct watchdog_device, cdev);
/* the watchdog is single open! */
if (test_and_set_bit(WDOG_DEV_OPEN, &wdd->status))
@@ -275,6 +435,11 @@ static int watchdog_open(struct inode *inode, struct file *file)
if (err < 0)
goto out_mod;
+ file->private_data = wdd;
+
+ if (wdd->ops->ref)
+ wdd->ops->ref(wdd);
+
/* dev/watchdog is a virtual (and thus non-seekable) filesystem */
return nonseekable_open(inode, file);
@@ -286,9 +451,9 @@ out:
}
/*
- * watchdog_release: release the /dev/watchdog device.
- * @inode: inode of device
- * @file: file handle to device
+ * watchdog_release: release the watchdog device.
+ * @inode: inode of device
+ * @file: file handle to device
*
* This is the code for when /dev/watchdog gets closed. We will only
* stop the watchdog when we have received the magic char (and nowayout
@@ -297,6 +462,7 @@ out:
static int watchdog_release(struct inode *inode, struct file *file)
{
+ struct watchdog_device *wdd = file->private_data;
int err = -EBUSY;
/*
@@ -310,7 +476,10 @@ static int watchdog_release(struct inode *inode, struct file *file)
/* If the watchdog was not stopped, send a keepalive ping */
if (err < 0) {
- pr_crit("%s: watchdog did not stop!\n", wdd->info->identity);
+ mutex_lock(&wdd->lock);
+ if (!test_bit(WDOG_UNREGISTERED, &wdd->status))
+ dev_crit(wdd->dev, "watchdog did not stop!\n");
+ mutex_unlock(&wdd->lock);
watchdog_ping(wdd);
}
@@ -320,6 +489,10 @@ static int watchdog_release(struct inode *inode, struct file *file)
/* make sure that /dev/watchdog can be re-opened */
clear_bit(WDOG_DEV_OPEN, &wdd->status);
+ /* Note wdd may be gone after this, do not use after this! */
+ if (wdd->ops->unref)
+ wdd->ops->unref(wdd);
+
return 0;
}
@@ -338,62 +511,92 @@ static struct miscdevice watchdog_miscdev = {
};
/*
- * watchdog_dev_register:
+ * watchdog_dev_register: register a watchdog device
* @watchdog: watchdog device
*
- * Register a watchdog device as /dev/watchdog. /dev/watchdog
- * is actually a miscdevice and thus we set it up like that.
+ * Register a watchdog device including handling the legacy
+ * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
+ * thus we set it up like that.
*/
int watchdog_dev_register(struct watchdog_device *watchdog)
{
- int err;
-
- /* Only one device can register for /dev/watchdog */
- if (test_and_set_bit(0, &watchdog_dev_busy)) {
- pr_err("only one watchdog can use /dev/watchdog\n");
- return -EBUSY;
+ int err, devno;
+
+ if (watchdog->id == 0) {
+ watchdog_miscdev.parent = watchdog->parent;
+ err = misc_register(&watchdog_miscdev);
+ if (err != 0) {
+ pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
+ watchdog->info->identity, WATCHDOG_MINOR, err);
+ if (err == -EBUSY)
+ pr_err("%s: a legacy watchdog module is probably present.\n",
+ watchdog->info->identity);
+ return err;
+ }
+ old_wdd = watchdog;
}
- wdd = watchdog;
-
- err = misc_register(&watchdog_miscdev);
- if (err != 0) {
- pr_err("%s: cannot register miscdev on minor=%d (err=%d)\n",
- watchdog->info->identity, WATCHDOG_MINOR, err);
- goto out;
+ /* Fill in the data structures */
+ devno = MKDEV(MAJOR(watchdog_devt), watchdog->id);
+ cdev_init(&watchdog->cdev, &watchdog_fops);
+ watchdog->cdev.owner = watchdog->ops->owner;
+
+ /* Add the device */
+ err = cdev_add(&watchdog->cdev, devno, 1);
+ if (err) {
+ pr_err("watchdog%d unable to add device %d:%d\n",
+ watchdog->id, MAJOR(watchdog_devt), watchdog->id);
+ if (watchdog->id == 0) {
+ misc_deregister(&watchdog_miscdev);
+ old_wdd = NULL;
+ }
}
-
- return 0;
-
-out:
- wdd = NULL;
- clear_bit(0, &watchdog_dev_busy);
return err;
}
/*
- * watchdog_dev_unregister:
+ * watchdog_dev_unregister: unregister a watchdog device
* @watchdog: watchdog device
*
- * Deregister the /dev/watchdog device.
+ * Unregister the watchdog and if needed the legacy /dev/watchdog device.
*/
int watchdog_dev_unregister(struct watchdog_device *watchdog)
{
- /* Check that a watchdog device was registered in the past */
- if (!test_bit(0, &watchdog_dev_busy) || !wdd)
- return -ENODEV;
-
- /* We can only unregister the watchdog device that was registered */
- if (watchdog != wdd) {
- pr_err("%s: watchdog was not registered as /dev/watchdog\n",
- watchdog->info->identity);
- return -ENODEV;
+ mutex_lock(&watchdog->lock);
+ set_bit(WDOG_UNREGISTERED, &watchdog->status);
+ mutex_unlock(&watchdog->lock);
+
+ cdev_del(&watchdog->cdev);
+ if (watchdog->id == 0) {
+ misc_deregister(&watchdog_miscdev);
+ old_wdd = NULL;
}
-
- misc_deregister(&watchdog_miscdev);
- wdd = NULL;
- clear_bit(0, &watchdog_dev_busy);
return 0;
}
+
+/*
+ * watchdog_dev_init: init dev part of watchdog core
+ *
+ * Allocate a range of chardev nodes to use for watchdog devices
+ */
+
+int __init watchdog_dev_init(void)
+{
+ int err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
+ if (err < 0)
+ pr_err("watchdog: unable to allocate char dev region\n");
+ return err;
+}
+
+/*
+ * watchdog_dev_exit: exit dev part of watchdog core
+ *
+ * Release the range of chardev nodes used for watchdog devices
+ */
+
+void __exit watchdog_dev_exit(void)
+{
+ unregister_chrdev_region(watchdog_devt, MAX_DOGS);
+}
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 9adc5be57b13..fc3488631136 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
obj-$(CONFIG_XEN_PVHVM) += platform-pci.o
obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
-obj-$(CONFIG_XEN_DOM0) += pci.o
+obj-$(CONFIG_XEN_DOM0) += pci.o acpi.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
diff --git a/drivers/xen/acpi.c b/drivers/xen/acpi.c
new file mode 100644
index 000000000000..119d42a2bf57
--- /dev/null
+++ b/drivers/xen/acpi.c
@@ -0,0 +1,62 @@
+/******************************************************************************
+ * acpi.c
+ * acpi file for domain 0 kernel
+ *
+ * Copyright (c) 2011 Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ * Copyright (c) 2011 Yu Ke ke.yu@intel.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <xen/acpi.h>
+#include <xen/interface/platform.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+
+int xen_acpi_notify_hypervisor_state(u8 sleep_state,
+ u32 pm1a_cnt, u32 pm1b_cnt)
+{
+ struct xen_platform_op op = {
+ .cmd = XENPF_enter_acpi_sleep,
+ .interface_version = XENPF_INTERFACE_VERSION,
+ .u = {
+ .enter_acpi_sleep = {
+ .pm1a_cnt_val = (u16)pm1a_cnt,
+ .pm1b_cnt_val = (u16)pm1b_cnt,
+ .sleep_state = sleep_state,
+ },
+ },
+ };
+
+ if ((pm1a_cnt & 0xffff0000) || (pm1b_cnt & 0xffff0000)) {
+ WARN(1, "Using more than 16bits of PM1A/B 0x%x/0x%x!"
+ "Email xen-devel@lists.xensource.com Thank you.\n", \
+ pm1a_cnt, pm1b_cnt);
+ return -1;
+ }
+
+ HYPERVISOR_dom0_op(&op);
+ return 1;
+}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 0a8a17cd80be..6908e4ce2a0d 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -611,7 +611,7 @@ static void disable_pirq(struct irq_data *data)
disable_dynirq(data);
}
-static int find_irq_by_gsi(unsigned gsi)
+int xen_irq_from_gsi(unsigned gsi)
{
struct irq_info *info;
@@ -625,6 +625,7 @@ static int find_irq_by_gsi(unsigned gsi)
return -1;
}
+EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
/*
* Do not make any assumptions regarding the relationship between the
@@ -644,7 +645,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
mutex_lock(&irq_mapping_update_lock);
- irq = find_irq_by_gsi(gsi);
+ irq = xen_irq_from_gsi(gsi);
if (irq != -1) {
printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
irq, gsi);
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index f100ce20b16b..0bfc1ef11259 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -38,6 +38,7 @@
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/hardirq.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
@@ -426,10 +427,8 @@ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
nflags = *pflags;
do {
flags = nflags;
- if (flags & (GTF_reading|GTF_writing)) {
- printk(KERN_ALERT "WARNING: g.e. still in use!\n");
+ if (flags & (GTF_reading|GTF_writing))
return 0;
- }
} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
return 1;
@@ -458,12 +457,103 @@ static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
return 1;
}
-int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
{
return gnttab_interface->end_foreign_access_ref(ref, readonly);
}
+
+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+{
+ if (_gnttab_end_foreign_access_ref(ref, readonly))
+ return 1;
+ pr_warn("WARNING: g.e. %#x still in use!\n", ref);
+ return 0;
+}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
+struct deferred_entry {
+ struct list_head list;
+ grant_ref_t ref;
+ bool ro;
+ uint16_t warn_delay;
+ struct page *page;
+};
+static LIST_HEAD(deferred_list);
+static void gnttab_handle_deferred(unsigned long);
+static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
+
+static void gnttab_handle_deferred(unsigned long unused)
+{
+ unsigned int nr = 10;
+ struct deferred_entry *first = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ while (nr--) {
+ struct deferred_entry *entry
+ = list_first_entry(&deferred_list,
+ struct deferred_entry, list);
+
+ if (entry == first)
+ break;
+ list_del(&entry->list);
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+ if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
+ put_free_entry(entry->ref);
+ if (entry->page) {
+ pr_debug("freeing g.e. %#x (pfn %#lx)\n",
+ entry->ref, page_to_pfn(entry->page));
+ __free_page(entry->page);
+ } else
+ pr_info("freeing g.e. %#x\n", entry->ref);
+ kfree(entry);
+ entry = NULL;
+ } else {
+ if (!--entry->warn_delay)
+ pr_info("g.e. %#x still pending\n",
+ entry->ref);
+ if (!first)
+ first = entry;
+ }
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ if (entry)
+ list_add_tail(&entry->list, &deferred_list);
+ else if (list_empty(&deferred_list))
+ break;
+ }
+ if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
+ deferred_timer.expires = jiffies + HZ;
+ add_timer(&deferred_timer);
+ }
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+
+static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
+ struct page *page)
+{
+ struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ const char *what = KERN_WARNING "leaking";
+
+ if (entry) {
+ unsigned long flags;
+
+ entry->ref = ref;
+ entry->ro = readonly;
+ entry->page = page;
+ entry->warn_delay = 60;
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ list_add_tail(&entry->list, &deferred_list);
+ if (!timer_pending(&deferred_timer)) {
+ deferred_timer.expires = jiffies + HZ;
+ add_timer(&deferred_timer);
+ }
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+ what = KERN_DEBUG "deferring";
+ }
+ printk("%s g.e. %#x (pfn %#lx)\n",
+ what, ref, page ? page_to_pfn(page) : -1);
+}
+
void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
unsigned long page)
{
@@ -471,12 +561,9 @@ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
put_free_entry(ref);
if (page != 0)
free_page(page);
- } else {
- /* XXX This needs to be fixed so that the ref and page are
- placed on a list to be freed up later. */
- printk(KERN_WARNING
- "WARNING: leaking g.e. and page still in use!\n");
- }
+ } else
+ gnttab_add_deferred(ref, readonly,
+ page ? virt_to_page(page) : NULL);
}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
@@ -741,6 +828,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct page **pages, unsigned int count)
{
int i, ret;
+ bool lazy = false;
pte_t *pte;
unsigned long mfn;
@@ -751,6 +839,11 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
if (xen_feature(XENFEAT_auto_translated_physmap))
return ret;
+ if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+ arch_enter_lazy_mmu_mode();
+ lazy = true;
+ }
+
for (i = 0; i < count; i++) {
/* Do not add to override if the map failed. */
if (map_ops[i].status)
@@ -769,6 +862,9 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
return ret;
}
+ if (lazy)
+ arch_leave_lazy_mmu_mode();
+
return ret;
}
EXPORT_SYMBOL_GPL(gnttab_map_refs);
@@ -777,6 +873,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct page **pages, unsigned int count, bool clear_pte)
{
int i, ret;
+ bool lazy = false;
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
if (ret)
@@ -785,12 +882,20 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
if (xen_feature(XENFEAT_auto_translated_physmap))
return ret;
+ if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+ arch_enter_lazy_mmu_mode();
+ lazy = true;
+ }
+
for (i = 0; i < count; i++) {
ret = m2p_remove_override(pages[i], clear_pte);
if (ret)
return ret;
}
+ if (lazy)
+ arch_leave_lazy_mmu_mode();
+
return ret;
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 0b48579a9cd6..7ff2569e17ae 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -29,6 +29,7 @@
#include <acpi/acpi_drivers.h>
#include <acpi/processor.h>
+#include <xen/xen.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 146c94897016..7d041cb6da26 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -105,6 +105,12 @@ static unsigned int selfballoon_interval __read_mostly = 5;
*/
static unsigned int selfballoon_min_usable_mb;
+/*
+ * Amount of RAM in MB to add to the target number of pages.
+ * Can be used to reserve some more room for caches and the like.
+ */
+static unsigned int selfballoon_reserved_mb;
+
static void selfballoon_process(struct work_struct *work);
static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
@@ -217,7 +223,8 @@ static void selfballoon_process(struct work_struct *work)
cur_pages = totalram_pages;
tgt_pages = cur_pages; /* default is no change */
goal_pages = percpu_counter_read_positive(&vm_committed_as) +
- totalreserve_pages;
+ totalreserve_pages +
+ MB2PAGES(selfballoon_reserved_mb);
#ifdef CONFIG_FRONTSWAP
/* allow space for frontswap pages to be repatriated */
if (frontswap_selfshrinking && frontswap_enabled)
@@ -397,6 +404,30 @@ static DEVICE_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
show_selfballoon_min_usable_mb,
store_selfballoon_min_usable_mb);
+SELFBALLOON_SHOW(selfballoon_reserved_mb, "%d\n",
+ selfballoon_reserved_mb);
+
+static ssize_t store_selfballoon_reserved_mb(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+ selfballoon_reserved_mb = val;
+ return count;
+}
+
+static DEVICE_ATTR(selfballoon_reserved_mb, S_IRUGO | S_IWUSR,
+ show_selfballoon_reserved_mb,
+ store_selfballoon_reserved_mb);
+
#ifdef CONFIG_FRONTSWAP
SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking);
@@ -480,6 +511,7 @@ static struct attribute *selfballoon_attrs[] = {
&dev_attr_selfballoon_downhysteresis.attr,
&dev_attr_selfballoon_uphysteresis.attr,
&dev_attr_selfballoon_min_usable_mb.attr,
+ &dev_attr_selfballoon_reserved_mb.attr,
#ifdef CONFIG_FRONTSWAP
&dev_attr_frontswap_selfshrinking.attr,
&dev_attr_frontswap_hysteresis.attr,
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index 2eff7a6aaa20..52fe7ad07666 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -234,3 +234,9 @@ int xb_init_comms(void)
return 0;
}
+
+void xb_deinit_comms(void)
+{
+ unbind_from_irqhandler(xenbus_irq, &xb_waitq);
+ xenbus_irq = 0;
+}
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
index 6e42800fa499..c8abd3b8a6c4 100644
--- a/drivers/xen/xenbus/xenbus_comms.h
+++ b/drivers/xen/xenbus/xenbus_comms.h
@@ -35,6 +35,7 @@
int xs_init(void);
int xb_init_comms(void);
+void xb_deinit_comms(void);
/* Low level routines. */
int xb_write(const void *data, unsigned len);
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
index 3d3be78c1093..be738c43104b 100644
--- a/drivers/xen/xenbus/xenbus_dev_backend.c
+++ b/drivers/xen/xenbus/xenbus_dev_backend.c
@@ -8,7 +8,11 @@
#include <xen/xen.h>
#include <xen/page.h>
+#include <xen/xenbus.h>
#include <xen/xenbus_dev.h>
+#include <xen/grant_table.h>
+#include <xen/events.h>
+#include <asm/xen/hypervisor.h>
#include "xenbus_comms.h"
@@ -22,6 +26,50 @@ static int xenbus_backend_open(struct inode *inode, struct file *filp)
return nonseekable_open(inode, filp);
}
+static long xenbus_alloc(domid_t domid)
+{
+ struct evtchn_alloc_unbound arg;
+ int err = -EEXIST;
+
+ xs_suspend();
+
+ /* If xenstored_ready is nonzero, that means we have already talked to
+ * xenstore and set up watches. These watches will be restored by
+ * xs_resume, but that requires communication over the port established
+ * below that is not visible to anyone until the ioctl returns.
+ *
+ * This can be resolved by splitting the ioctl into two parts
+ * (postponing the resume until xenstored is active) but this is
+ * unnecessarily complex for the intended use where xenstored is only
+ * started once - so return -EEXIST if it's already running.
+ */
+ if (xenstored_ready)
+ goto out_err;
+
+ gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid,
+ virt_to_mfn(xen_store_interface), 0 /* writable */);
+
+ arg.dom = DOMID_SELF;
+ arg.remote_dom = domid;
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &arg);
+ if (err)
+ goto out_err;
+
+ if (xen_store_evtchn > 0)
+ xb_deinit_comms();
+
+ xen_store_evtchn = arg.port;
+
+ xs_resume();
+
+ return arg.port;
+
+ out_err:
+ xs_suspend_cancel();
+ return err;
+}
+
static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data)
{
if (!capable(CAP_SYS_ADMIN))
@@ -33,6 +81,9 @@ static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned l
return xen_store_evtchn;
return -ENODEV;
+ case IOCTL_XENBUS_BACKEND_SETUP:
+ return xenbus_alloc(data);
+
default:
return -ENOTTY;
}
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 014c8dd62962..57ccb7537dae 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -448,7 +448,7 @@ void v9fs_evict_inode(struct inode *inode)
struct v9fs_inode *v9inode = V9FS_I(inode);
truncate_inode_pages(inode->i_mapping, 0);
- end_writeback(inode);
+ clear_inode(inode);
filemap_fdatawrite(inode->i_mapping);
#ifdef CONFIG_9P_FSCACHE
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 88a4b0b50058..8bc4a59f4e7e 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -264,7 +264,7 @@ affs_evict_inode(struct inode *inode)
}
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
affs_free_prealloc(inode);
cache_page = (unsigned long)AFFS_I(inode)->i_lc;
if (cache_page) {
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index d890ae3b2ce6..95cffd38239f 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -423,7 +423,7 @@ void afs_evict_inode(struct inode *inode)
ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode);
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
afs_give_up_callback(vnode);
diff --git a/fs/aio.c b/fs/aio.c
index e7f2fad7b4ce..8c7c8b805372 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1446,13 +1446,13 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
ret = compat_rw_copy_check_uvector(type,
(struct compat_iovec __user *)kiocb->ki_buf,
kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
- &kiocb->ki_iovec, 1);
+ &kiocb->ki_iovec);
else
#endif
ret = rw_copy_check_uvector(type,
(struct iovec __user *)kiocb->ki_buf,
kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
- &kiocb->ki_iovec, 1);
+ &kiocb->ki_iovec);
if (ret < 0)
goto out;
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 6e488ebe7784..8a4fed8ead30 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -100,7 +100,7 @@ static int autofs4_show_options(struct seq_file *m, struct dentry *root)
static void autofs4_evict_inode(struct inode *inode)
{
- end_writeback(inode);
+ clear_inode(inode);
kfree(inode->i_private);
}
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 37268c5bb98b..1b35d6bd06b0 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -292,7 +292,6 @@ static const struct inode_operations bad_inode_ops =
.getxattr = bad_inode_getxattr,
.listxattr = bad_inode_listxattr,
.removexattr = bad_inode_removexattr,
- /* truncate_range returns void */
};
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index e23dc7c8b884..9870417c26e7 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -174,7 +174,7 @@ static void bfs_evict_inode(struct inode *inode)
truncate_inode_pages(&inode->i_data, 0);
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
if (inode->i_nlink)
return;
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 613aa0618235..790b3cddca67 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -505,7 +505,7 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
static void bm_evict_inode(struct inode *inode)
{
- end_writeback(inode);
+ clear_inode(inode);
kfree(inode->i_private);
}
diff --git a/fs/bio.c b/fs/bio.c
index 84da88539046..73922abba832 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -19,12 +19,14 @@
#include <linux/swap.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/iocontext.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
+#include <linux/cgroup.h>
#include <scsi/sg.h> /* for struct sg_iovec */
#include <trace/events/block.h>
@@ -418,6 +420,7 @@ void bio_put(struct bio *bio)
* last put frees it
*/
if (atomic_dec_and_test(&bio->bi_cnt)) {
+ bio_disassociate_task(bio);
bio->bi_next = NULL;
bio->bi_destructor(bio);
}
@@ -1646,6 +1649,64 @@ bad:
}
EXPORT_SYMBOL(bioset_create);
+#ifdef CONFIG_BLK_CGROUP
+/**
+ * bio_associate_current - associate a bio with %current
+ * @bio: target bio
+ *
+ * Associate @bio with %current if it hasn't been associated yet. Block
+ * layer will treat @bio as if it were issued by %current no matter which
+ * task actually issues it.
+ *
+ * This function takes an extra reference of @task's io_context and blkcg
+ * which will be put when @bio is released. The caller must own @bio,
+ * ensure %current->io_context exists, and is responsible for synchronizing
+ * calls to this function.
+ */
+int bio_associate_current(struct bio *bio)
+{
+ struct io_context *ioc;
+ struct cgroup_subsys_state *css;
+
+ if (bio->bi_ioc)
+ return -EBUSY;
+
+ ioc = current->io_context;
+ if (!ioc)
+ return -ENOENT;
+
+ /* acquire active ref on @ioc and associate */
+ get_io_context_active(ioc);
+ bio->bi_ioc = ioc;
+
+ /* associate blkcg if exists */
+ rcu_read_lock();
+ css = task_subsys_state(current, blkio_subsys_id);
+ if (css && css_tryget(css))
+ bio->bi_css = css;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+/**
+ * bio_disassociate_task - undo bio_associate_current()
+ * @bio: target bio
+ */
+void bio_disassociate_task(struct bio *bio)
+{
+ if (bio->bi_ioc) {
+ put_io_context(bio->bi_ioc);
+ bio->bi_ioc = NULL;
+ }
+ if (bio->bi_css) {
+ css_put(bio->bi_css);
+ bio->bi_css = NULL;
+ }
+}
+
+#endif /* CONFIG_BLK_CGROUP */
+
static void __init biovec_init_slabs(void)
{
int i;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index ba11c30f302d..c2bbe1fb1326 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -487,7 +487,7 @@ static void bdev_evict_inode(struct inode *inode)
struct list_head *p;
truncate_inode_pages(&inode->i_data, 0);
invalidate_inode_buffers(inode); /* is it needed here? */
- end_writeback(inode);
+ clear_inode(inode);
spin_lock(&bdev_lock);
while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
__bd_forget(list_entry(p, struct inode, i_devices));
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 61b16c641ce0..ceb7b9c9edcc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3756,7 +3756,7 @@ void btrfs_evict_inode(struct inode *inode)
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
no_delete:
- end_writeback(inode);
+ clear_inode(inode);
return;
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ed72428d9c75..988d4f302e48 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -54,7 +54,6 @@ prepare_open_request(struct super_block *sb, int flags, int create_mode)
req->r_fmode = ceph_flags_to_mode(flags);
req->r_args.open.flags = cpu_to_le32(flags);
req->r_args.open.mode = cpu_to_le32(create_mode);
- req->r_args.open.preferred = cpu_to_le32(-1);
out:
return req;
}
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 790914a598dd..8e3fb69fbe62 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -26,8 +26,7 @@ static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
l.object_size = ceph_file_layout_object_size(ci->i_layout);
l.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
- l.preferred_osd =
- (s32)le32_to_cpu(ci->i_layout.fl_pg_preferred);
+ l.preferred_osd = (s32)-1;
if (copy_to_user(arg, &l, sizeof(l)))
return -EFAULT;
}
@@ -35,6 +34,32 @@ static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
return err;
}
+static long __validate_layout(struct ceph_mds_client *mdsc,
+ struct ceph_ioctl_layout *l)
+{
+ int i, err;
+
+ /* validate striping parameters */
+ if ((l->object_size & ~PAGE_MASK) ||
+ (l->stripe_unit & ~PAGE_MASK) ||
+ ((unsigned)l->object_size % (unsigned)l->stripe_unit))
+ return -EINVAL;
+
+ /* make sure it's a valid data pool */
+ mutex_lock(&mdsc->mutex);
+ err = -EINVAL;
+ for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
+ if (mdsc->mdsmap->m_data_pg_pools[i] == l->data_pool) {
+ err = 0;
+ break;
+ }
+ mutex_unlock(&mdsc->mutex);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
{
struct inode *inode = file->f_dentry->d_inode;
@@ -44,52 +69,40 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
struct ceph_ioctl_layout l;
struct ceph_inode_info *ci = ceph_inode(file->f_dentry->d_inode);
struct ceph_ioctl_layout nl;
- int err, i;
+ int err;
if (copy_from_user(&l, arg, sizeof(l)))
return -EFAULT;
/* validate changed params against current layout */
err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT);
- if (!err) {
- nl.stripe_unit = ceph_file_layout_su(ci->i_layout);
- nl.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
- nl.object_size = ceph_file_layout_object_size(ci->i_layout);
- nl.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
- nl.preferred_osd =
- (s32)le32_to_cpu(ci->i_layout.fl_pg_preferred);
- } else
+ if (err)
return err;
+ memset(&nl, 0, sizeof(nl));
if (l.stripe_count)
nl.stripe_count = l.stripe_count;
+ else
+ nl.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
if (l.stripe_unit)
nl.stripe_unit = l.stripe_unit;
+ else
+ nl.stripe_unit = ceph_file_layout_su(ci->i_layout);
if (l.object_size)
nl.object_size = l.object_size;
+ else
+ nl.object_size = ceph_file_layout_object_size(ci->i_layout);
if (l.data_pool)
nl.data_pool = l.data_pool;
- if (l.preferred_osd)
- nl.preferred_osd = l.preferred_osd;
+ else
+ nl.data_pool = ceph_file_layout_pg_pool(ci->i_layout);
- if ((nl.object_size & ~PAGE_MASK) ||
- (nl.stripe_unit & ~PAGE_MASK) ||
- ((unsigned)nl.object_size % (unsigned)nl.stripe_unit))
- return -EINVAL;
+ /* this is obsolete, and always -1 */
+ nl.preferred_osd = le64_to_cpu(-1);
- /* make sure it's a valid data pool */
- if (l.data_pool > 0) {
- mutex_lock(&mdsc->mutex);
- err = -EINVAL;
- for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
- if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) {
- err = 0;
- break;
- }
- mutex_unlock(&mdsc->mutex);
- if (err)
- return err;
- }
+ err = __validate_layout(mdsc, &nl);
+ if (err)
+ return err;
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT,
USE_AUTH_MDS);
@@ -106,8 +119,6 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
req->r_args.setlayout.layout.fl_object_size =
cpu_to_le32(l.object_size);
req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool);
- req->r_args.setlayout.layout.fl_pg_preferred =
- cpu_to_le32(l.preferred_osd);
parent_inode = ceph_get_dentry_parent_inode(file->f_dentry);
err = ceph_mdsc_do_request(mdsc, parent_inode, req);
@@ -127,33 +138,16 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
struct inode *inode = file->f_dentry->d_inode;
struct ceph_mds_request *req;
struct ceph_ioctl_layout l;
- int err, i;
+ int err;
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
/* copy and validate */
if (copy_from_user(&l, arg, sizeof(l)))
return -EFAULT;
- if ((l.object_size & ~PAGE_MASK) ||
- (l.stripe_unit & ~PAGE_MASK) ||
- !l.stripe_unit ||
- (l.object_size &&
- (unsigned)l.object_size % (unsigned)l.stripe_unit))
- return -EINVAL;
-
- /* make sure it's a valid data pool */
- if (l.data_pool > 0) {
- mutex_lock(&mdsc->mutex);
- err = -EINVAL;
- for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
- if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) {
- err = 0;
- break;
- }
- mutex_unlock(&mdsc->mutex);
- if (err)
- return err;
- }
+ err = __validate_layout(mdsc, &l);
+ if (err)
+ return err;
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETDIRLAYOUT,
USE_AUTH_MDS);
@@ -171,8 +165,6 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
cpu_to_le32(l.object_size);
req->r_args.setlayout.layout.fl_pg_pool =
cpu_to_le32(l.data_pool);
- req->r_args.setlayout.layout.fl_pg_preferred =
- cpu_to_le32(l.preferred_osd);
err = ceph_mdsc_do_request(mdsc, inode, req);
ceph_mdsc_put_request(req);
diff --git a/fs/ceph/ioctl.h b/fs/ceph/ioctl.h
index be4a60487333..c77028afb1e1 100644
--- a/fs/ceph/ioctl.h
+++ b/fs/ceph/ioctl.h
@@ -34,6 +34,8 @@
struct ceph_ioctl_layout {
__u64 stripe_unit, stripe_count, object_size;
__u64 data_pool;
+
+ /* obsolete. new values ignored, always return -1 */
__s64 preferred_osd;
};
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 89971e137aab..200bc87eceb1 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -334,10 +334,10 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
dout("mdsc put_session %p %d -> %d\n", s,
atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
if (atomic_dec_and_test(&s->s_ref)) {
- if (s->s_authorizer)
+ if (s->s_auth.authorizer)
s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer(
s->s_mdsc->fsc->client->monc.auth,
- s->s_authorizer);
+ s->s_auth.authorizer);
kfree(s);
}
}
@@ -3395,39 +3395,33 @@ out:
/*
* authentication
*/
-static int get_authorizer(struct ceph_connection *con,
- void **buf, int *len, int *proto,
- void **reply_buf, int *reply_len, int force_new)
+
+/*
+ * Note: returned pointer is the address of a structure that's
+ * managed separately. Caller must *not* attempt to free it.
+ */
+static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
+ int *proto, int force_new)
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
- int ret = 0;
-
- if (force_new && s->s_authorizer) {
- ac->ops->destroy_authorizer(ac, s->s_authorizer);
- s->s_authorizer = NULL;
- }
- if (s->s_authorizer == NULL) {
- if (ac->ops->create_authorizer) {
- ret = ac->ops->create_authorizer(
- ac, CEPH_ENTITY_TYPE_MDS,
- &s->s_authorizer,
- &s->s_authorizer_buf,
- &s->s_authorizer_buf_len,
- &s->s_authorizer_reply_buf,
- &s->s_authorizer_reply_buf_len);
- if (ret)
- return ret;
- }
- }
+ struct ceph_auth_handshake *auth = &s->s_auth;
+ if (force_new && auth->authorizer) {
+ if (ac->ops && ac->ops->destroy_authorizer)
+ ac->ops->destroy_authorizer(ac, auth->authorizer);
+ auth->authorizer = NULL;
+ }
+ if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
+ int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
+ auth);
+ if (ret)
+ return ERR_PTR(ret);
+ }
*proto = ac->protocol;
- *buf = s->s_authorizer_buf;
- *len = s->s_authorizer_buf_len;
- *reply_buf = s->s_authorizer_reply_buf;
- *reply_len = s->s_authorizer_reply_buf_len;
- return 0;
+
+ return auth;
}
@@ -3437,7 +3431,7 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len)
struct ceph_mds_client *mdsc = s->s_mdsc;
struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
- return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
+ return ac->ops->verify_authorizer_reply(ac, s->s_auth.authorizer, len);
}
static int invalidate_authorizer(struct ceph_connection *con)
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 8c7c04ebb595..dd26846dd71d 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -11,6 +11,7 @@
#include <linux/ceph/types.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/mdsmap.h>
+#include <linux/ceph/auth.h>
/*
* Some lock dependencies:
@@ -113,9 +114,7 @@ struct ceph_mds_session {
struct ceph_connection s_con;
- struct ceph_authorizer *s_authorizer;
- void *s_authorizer_buf, *s_authorizer_reply_buf;
- size_t s_authorizer_buf_len, s_authorizer_reply_buf_len;
+ struct ceph_auth_handshake s_auth;
/* protected by s_gen_ttl_lock */
spinlock_t s_gen_ttl_lock;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index f04c0961f993..e5206fc76562 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -331,7 +331,7 @@ static int build_snap_context(struct ceph_snap_realm *realm)
/* alloc new snap context */
err = -ENOMEM;
- if (num > (ULONG_MAX - sizeof(*snapc)) / sizeof(u64))
+ if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
goto fail;
snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS);
if (!snapc)
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 35b86331d8a5..785cb3057c95 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -118,15 +118,6 @@ static size_t ceph_vxattrcb_file_layout(struct ceph_inode_info *ci, char *val,
(unsigned long long)ceph_file_layout_su(ci->i_layout),
(unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
(unsigned long long)ceph_file_layout_object_size(ci->i_layout));
-
- if (ceph_file_layout_pg_preferred(ci->i_layout) >= 0) {
- val += ret;
- size -= ret;
- ret += snprintf(val, size, "preferred_osd=%lld\n",
- (unsigned long long)ceph_file_layout_pg_preferred(
- ci->i_layout));
- }
-
return ret;
}
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 2b243af70aa3..a08306a8bec9 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -158,3 +158,23 @@ config CIFS_NFSD_EXPORT
depends on CIFS && EXPERIMENTAL && BROKEN
help
Allows NFS server to export a CIFS mounted share (nfsd over cifs)
+
+config CIFS_SMB2
+ bool "SMB2 network file system support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && INET && BROKEN
+ select NLS
+ select KEYS
+ select FSCACHE
+ select DNS_RESOLVER
+
+ help
+ This enables experimental support for the SMB2 (Server Message Block
+ version 2) protocol. The SMB2 protocol is the successor to the
+ popular CIFS and SMB network file sharing protocols. SMB2 is the
+ native file sharing mechanism for recent versions of Windows
+ operating systems (since Vista). SMB2 enablement will eventually
+ allow users better performance, security and features, than would be
+ possible with cifs. Note that smb2 mount options also are simpler
+ (compared to cifs) due to protocol improvements.
+
+ Unless you are a developer or tester, say N.
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 005d524c3a4a..4b4127544349 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_CIFS) += cifs.o
cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \
link.o misc.o netmisc.o smbencrypt.o transport.o asn1.o \
cifs_unicode.o nterr.o xattr.o cifsencrypt.o \
- readdir.o ioctl.o sess.o export.o
+ readdir.o ioctl.o sess.o export.o smb1ops.o
cifs-$(CONFIG_CIFS_ACL) += cifsacl.o
@@ -15,3 +15,5 @@ cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o
cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o cache.o
+
+cifs-$(CONFIG_CIFS_SMB2) += smb2ops.o
diff --git a/fs/cifs/README b/fs/cifs/README
index b7d782bab797..22ab7b5b8da7 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -608,11 +608,6 @@ Stats Lists summary resource usage information as well as per
in the kernel configuration.
Configuration pseudo-files:
-MultiuserMount If set to one, more than one CIFS session to
- the same server ip address can be established
- if more than one uid accesses the same mount
- point and if the uids user/password mapping
- information is available. (default is 0)
PacketSigningEnabled If set to one, cifs packet signing is enabled
and will be used if the server requires
it. If set to two, cifs packet signing is
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 270464629416..e8140528ca5c 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -57,19 +57,21 @@ cifs_dump_mem(char *label, void *data, int length)
}
}
-#ifdef CONFIG_CIFS_DEBUG2
void cifs_dump_detail(void *buf)
{
+#ifdef CONFIG_CIFS_DEBUG2
struct smb_hdr *smb = (struct smb_hdr *)buf;
cERROR(1, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
smb->Command, smb->Status.CifsError,
smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
cERROR(1, "smb buf %p len %d", smb, smbCalcSize(smb));
+#endif /* CONFIG_CIFS_DEBUG2 */
}
void cifs_dump_mids(struct TCP_Server_Info *server)
{
+#ifdef CONFIG_CIFS_DEBUG2
struct list_head *tmp;
struct mid_q_entry *mid_entry;
@@ -102,8 +104,8 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
}
}
spin_unlock(&GlobalMid_Lock);
-}
#endif /* CONFIG_CIFS_DEBUG2 */
+}
#ifdef CONFIG_PROC_FS
static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
@@ -420,7 +422,6 @@ static struct proc_dir_entry *proc_fs_cifs;
static const struct file_operations cifsFYI_proc_fops;
static const struct file_operations cifs_lookup_cache_proc_fops;
static const struct file_operations traceSMB_proc_fops;
-static const struct file_operations cifs_multiuser_mount_proc_fops;
static const struct file_operations cifs_security_flags_proc_fops;
static const struct file_operations cifs_linux_ext_proc_fops;
@@ -440,8 +441,6 @@ cifs_proc_init(void)
proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops);
proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs,
&cifs_linux_ext_proc_fops);
- proc_create("MultiuserMount", 0, proc_fs_cifs,
- &cifs_multiuser_mount_proc_fops);
proc_create("SecurityFlags", 0, proc_fs_cifs,
&cifs_security_flags_proc_fops);
proc_create("LookupCacheEnabled", 0, proc_fs_cifs,
@@ -460,7 +459,6 @@ cifs_proc_clean(void)
#ifdef CONFIG_CIFS_STATS
remove_proc_entry("Stats", proc_fs_cifs);
#endif
- remove_proc_entry("MultiuserMount", proc_fs_cifs);
remove_proc_entry("SecurityFlags", proc_fs_cifs);
remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
@@ -617,52 +615,6 @@ static const struct file_operations traceSMB_proc_fops = {
.write = traceSMB_proc_write,
};
-static int cifs_multiuser_mount_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%d\n", multiuser_mount);
- return 0;
-}
-
-static int cifs_multiuser_mount_proc_open(struct inode *inode, struct file *fh)
-{
- return single_open(fh, cifs_multiuser_mount_proc_show, NULL);
-}
-
-static ssize_t cifs_multiuser_mount_proc_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *ppos)
-{
- char c;
- int rc;
- static bool warned;
-
- rc = get_user(c, buffer);
- if (rc)
- return rc;
- if (c == '0' || c == 'n' || c == 'N')
- multiuser_mount = 0;
- else if (c == '1' || c == 'y' || c == 'Y') {
- multiuser_mount = 1;
- if (!warned) {
- warned = true;
- printk(KERN_WARNING "CIFS VFS: The legacy multiuser "
- "mount code is scheduled to be deprecated in "
- "3.5. Please switch to using the multiuser "
- "mount option.");
- }
- }
-
- return count;
-}
-
-static const struct file_operations cifs_multiuser_mount_proc_fops = {
- .owner = THIS_MODULE,
- .open = cifs_multiuser_mount_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cifs_multiuser_mount_proc_write,
-};
-
static int cifs_security_flags_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "0x%x\n", global_secflags);
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 566e0ae8dc2c..c0c68bb492d7 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -24,10 +24,10 @@
#define _H_CIFS_DEBUG
void cifs_dump_mem(char *label, void *data, int length);
-#ifdef CONFIG_CIFS_DEBUG2
-#define DBG2 2
void cifs_dump_detail(void *);
void cifs_dump_mids(struct TCP_Server_Info *);
+#ifdef CONFIG_CIFS_DEBUG2
+#define DBG2 2
#else
#define DBG2 0
#endif
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 541ef81f6ae8..8b6e344eb0ba 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -56,7 +56,6 @@ int traceSMB = 0;
bool enable_oplocks = true;
unsigned int linuxExtEnabled = 1;
unsigned int lookupCacheEnabled = 1;
-unsigned int multiuser_mount = 0;
unsigned int global_secflags = CIFSSEC_DEF;
/* unsigned int ntlmv2_support = 0; */
unsigned int sign_CIFS_PDUs = 1;
@@ -125,7 +124,7 @@ cifs_read_super(struct super_block *sb)
goto out_no_root;
}
- /* do that *after* d_alloc_root() - we want NULL ->d_op for root here */
+ /* do that *after* d_make_root() - we want NULL ->d_op for root here */
if (cifs_sb_master_tcon(cifs_sb)->nocase)
sb->s_d_op = &cifs_ci_dentry_ops;
else
@@ -272,7 +271,7 @@ static void
cifs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
cifs_fscache_release_inode_cookie(inode);
}
@@ -329,6 +328,19 @@ cifs_show_security(struct seq_file *s, struct TCP_Server_Info *server)
seq_printf(s, "i");
}
+static void
+cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
+{
+ seq_printf(s, ",cache=");
+
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
+ seq_printf(s, "strict");
+ else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
+ seq_printf(s, "none");
+ else
+ seq_printf(s, "loose");
+}
+
/*
* cifs_show_options() is for displaying mount options in /proc/mounts.
* Not all settable options are displayed but most of the important
@@ -342,7 +354,9 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
struct sockaddr *srcaddr;
srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
+ seq_printf(s, ",vers=%s", tcon->ses->server->vals->version_string);
cifs_show_security(s, tcon->ses->server);
+ cifs_show_cache_flavor(s, cifs_sb);
seq_printf(s, ",unc=%s", tcon->treeName);
@@ -408,8 +422,6 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_printf(s, ",rwpidforward");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
seq_printf(s, ",forcemand");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
- seq_printf(s, ",directio");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
seq_printf(s, ",nouser_xattr");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
@@ -432,8 +444,6 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_printf(s, ",nostrictsync");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
seq_printf(s, ",noperm");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
- seq_printf(s, ",strictcache");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
seq_printf(s, ",backupuid=%u", cifs_sb->mnt_backupuid);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
@@ -945,7 +955,6 @@ cifs_init_once(void *inode)
struct cifsInodeInfo *cifsi = inode;
inode_init_once(&cifsi->vfs_inode);
- INIT_LIST_HEAD(&cifsi->llist);
mutex_init(&cifsi->lock_mutex);
}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 4ff6313f0a91..20350a93ed99 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -43,6 +43,7 @@
#define CIFS_MIN_RCV_POOL 4
+#define MAX_REOPEN_ATT 5 /* these many maximum attempts to reopen a file */
/*
* default attribute cache timeout (jiffies)
*/
@@ -150,6 +151,57 @@ struct cifs_cred {
*****************************************************************
*/
+enum smb_version {
+ Smb_1 = 1,
+ Smb_21,
+};
+
+struct mid_q_entry;
+struct TCP_Server_Info;
+struct cifsFileInfo;
+struct cifs_ses;
+
+struct smb_version_operations {
+ int (*send_cancel)(struct TCP_Server_Info *, void *,
+ struct mid_q_entry *);
+ bool (*compare_fids)(struct cifsFileInfo *, struct cifsFileInfo *);
+ /* setup request: allocate mid, sign message */
+ int (*setup_request)(struct cifs_ses *, struct kvec *, unsigned int,
+ struct mid_q_entry **);
+ /* check response: verify signature, map error */
+ int (*check_receive)(struct mid_q_entry *, struct TCP_Server_Info *,
+ bool);
+ void (*add_credits)(struct TCP_Server_Info *, const unsigned int);
+ void (*set_credits)(struct TCP_Server_Info *, const int);
+ int * (*get_credits_field)(struct TCP_Server_Info *);
+ /* data offset from read response message */
+ unsigned int (*read_data_offset)(char *);
+ /* data length from read response message */
+ unsigned int (*read_data_length)(char *);
+ /* map smb to linux error */
+ int (*map_error)(char *, bool);
+ /* find mid corresponding to the response message */
+ struct mid_q_entry * (*find_mid)(struct TCP_Server_Info *, char *);
+ void (*dump_detail)(void *);
+ /* verify the message */
+ int (*check_message)(char *, unsigned int);
+ bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
+};
+
+struct smb_version_values {
+ char *version_string;
+ __u32 large_lock_type;
+ __u32 exclusive_lock_type;
+ __u32 shared_lock_type;
+ __u32 unlock_lock_type;
+ size_t header_size;
+ size_t max_header_size;
+ size_t read_rsp_size;
+};
+
+#define HEADER_SIZE(server) (server->vals->header_size)
+#define MAX_HEADER_SIZE(server) (server->vals->max_header_size)
+
struct smb_vol {
char *username;
char *password;
@@ -205,6 +257,8 @@ struct smb_vol {
bool sockopt_tcp_nodelay:1;
unsigned short int port;
unsigned long actimeo; /* attribute cache timeout (jiffies) */
+ struct smb_version_operations *ops;
+ struct smb_version_values *vals;
char *prepath;
struct sockaddr_storage srcaddr; /* allow binding to a local IP */
struct nls_table *local_nls;
@@ -242,6 +296,8 @@ struct TCP_Server_Info {
int srv_count; /* reference counter */
/* 15 character server name + 0x20 16th byte indicating type = srv */
char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
+ struct smb_version_operations *ops;
+ struct smb_version_values *vals;
enum statusEnum tcpStatus; /* what we think the status is */
char *hostname; /* hostname portion of UNC string */
struct socket *ssocket;
@@ -321,16 +377,6 @@ in_flight(struct TCP_Server_Info *server)
return num;
}
-static inline int*
-get_credits_field(struct TCP_Server_Info *server)
-{
- /*
- * This will change to switch statement when we reserve slots for echos
- * and oplock breaks.
- */
- return &server->credits;
-}
-
static inline bool
has_credits(struct TCP_Server_Info *server, int *credits)
{
@@ -341,16 +387,16 @@ has_credits(struct TCP_Server_Info *server, int *credits)
return num > 0;
}
-static inline size_t
-header_size(void)
+static inline void
+add_credits(struct TCP_Server_Info *server, const unsigned int add)
{
- return sizeof(struct smb_hdr);
+ server->ops->add_credits(server, add);
}
-static inline size_t
-max_header_size(void)
+static inline void
+set_credits(struct TCP_Server_Info *server, const int val)
{
- return MAX_CIFS_HDR_SIZE;
+ server->ops->set_credits(server, val);
}
/*
@@ -547,8 +593,7 @@ struct cifsLockInfo {
__u64 offset;
__u64 length;
__u32 pid;
- __u8 type;
- __u16 netfid;
+ __u32 type;
};
/*
@@ -573,6 +618,10 @@ struct cifs_search_info {
struct cifsFileInfo {
struct list_head tlist; /* pointer to next fid owned by tcon */
struct list_head flist; /* next fid (file instance) for this inode */
+ struct list_head llist; /*
+ * brlocks held by this fid, protected by
+ * lock_mutex from cifsInodeInfo structure
+ */
unsigned int uid; /* allows finding which FileInfo structure */
__u32 pid; /* process id who opened file */
__u16 netfid; /* file id from remote */
@@ -615,9 +664,12 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
*/
struct cifsInodeInfo {
- struct list_head llist; /* brlocks for this inode */
bool can_cache_brlcks;
- struct mutex lock_mutex; /* protect two fields above */
+ struct mutex lock_mutex; /*
+ * protect the field above and llist
+ * from every cifsFileInfo structure
+ * from openFileList
+ */
/* BB add in lists for dirty pages i.e. write caching info for oplock */
struct list_head openFileList;
__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
@@ -703,7 +755,6 @@ static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
#endif
-struct mid_q_entry;
/*
* This is the prototype for the mid receive function. This function is for
@@ -1042,12 +1093,7 @@ GLOBAL_EXTERN atomic_t smBufAllocCount;
GLOBAL_EXTERN atomic_t midCount;
/* Misc globals */
-GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
- to be established on existing mount if we
- have the uid/password or Kerberos credential
- or equivalent for current user */
-/* enable or disable oplocks */
-GLOBAL_EXTERN bool enable_oplocks;
+GLOBAL_EXTERN bool enable_oplocks; /* enable or disable oplocks */
GLOBAL_EXTERN unsigned int lookupCacheEnabled;
GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent
with more secure ntlmssp2 challenge/resp */
@@ -1074,4 +1120,11 @@ void cifs_oplock_break(struct work_struct *work);
extern const struct slow_work_ops cifs_oplock_break_ops;
extern struct workqueue_struct *cifsiod_wq;
+/* Operations for different SMB versions */
+#define SMB1_VERSION_STRING "1.0"
+extern struct smb_version_operations smb1_operations;
+extern struct smb_version_values smb1_values;
+#define SMB21_VERSION_STRING "2.1"
+extern struct smb_version_operations smb21_operations;
+extern struct smb_version_values smb21_values;
#endif /* _CIFS_GLOB_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 96192c1e380a..5ec21ecf7980 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -78,6 +78,8 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
int * /* bytes returned */ , const int long_op);
extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
char *in_buf, int flags);
+extern int cifs_setup_request(struct cifs_ses *, struct kvec *, unsigned int,
+ struct mid_q_entry **);
extern int cifs_check_receive(struct mid_q_entry *mid,
struct TCP_Server_Info *server, bool log_error);
extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
@@ -88,9 +90,6 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
struct smb_hdr *in_buf ,
struct smb_hdr *out_buf,
int *bytes_returned);
-extern void cifs_add_credits(struct TCP_Server_Info *server,
- const unsigned int add);
-extern void cifs_set_credits(struct TCP_Server_Info *server, const int val);
extern int checkSMB(char *buf, unsigned int length);
extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
extern bool backup_cred(struct cifs_sb_info *);
@@ -192,11 +191,13 @@ extern int CIFSTCon(unsigned int xid, struct cifs_ses *ses,
extern int CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
const char *searchName, const struct nls_table *nls_codepage,
- __u16 *searchHandle, struct cifs_search_info *psrch_inf,
+ __u16 *searchHandle, __u16 search_flags,
+ struct cifs_search_info *psrch_inf,
int map, const char dirsep);
extern int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
- __u16 searchHandle, struct cifs_search_info *psrch_inf);
+ __u16 searchHandle, __u16 search_flags,
+ struct cifs_search_info *psrch_inf);
extern int CIFSFindClose(const int, struct cifs_tcon *tcon,
const __u16 search_handle);
@@ -464,6 +465,9 @@ extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8,
/* asynchronous read support */
struct cifs_readdata {
+ struct kref refcount;
+ struct list_head list;
+ struct completion done;
struct cifsFileInfo *cfile;
struct address_space *mapping;
__u64 offset;
@@ -472,12 +476,13 @@ struct cifs_readdata {
int result;
struct list_head pages;
struct work_struct work;
+ int (*marshal_iov) (struct cifs_readdata *rdata,
+ unsigned int remaining);
unsigned int nr_iov;
struct kvec iov[1];
};
-struct cifs_readdata *cifs_readdata_alloc(unsigned int nr_pages);
-void cifs_readdata_free(struct cifs_readdata *rdata);
+void cifs_readdata_release(struct kref *refcount);
int cifs_async_readv(struct cifs_readdata *rdata);
/* asynchronous write support */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index da2f5446fa7a..b5ad716b2642 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -87,7 +87,6 @@ static struct {
#endif /* CIFS_POSIX */
/* Forward declarations */
-static void cifs_readv_complete(struct work_struct *work);
/* Mark as invalid, all open files on tree connections since they
were closed when session to server was lost */
@@ -461,7 +460,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
server->maxReq = min_t(unsigned int,
le16_to_cpu(rsp->MaxMpxCount),
cifs_max_pending);
- cifs_set_credits(server, server->maxReq);
+ set_credits(server, server->maxReq);
server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
/* even though we do not use raw we might as well set this
@@ -569,7 +568,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
little endian */
server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount),
cifs_max_pending);
- cifs_set_credits(server, server->maxReq);
+ set_credits(server, server->maxReq);
/* probably no need to store and check maxvcs */
server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
@@ -721,7 +720,7 @@ cifs_echo_callback(struct mid_q_entry *mid)
struct TCP_Server_Info *server = mid->callback_data;
DeleteMidQEntry(mid);
- cifs_add_credits(server, 1);
+ add_credits(server, 1);
}
int
@@ -1385,28 +1384,6 @@ openRetry:
return rc;
}
-struct cifs_readdata *
-cifs_readdata_alloc(unsigned int nr_pages)
-{
- struct cifs_readdata *rdata;
-
- /* readdata + 1 kvec for each page */
- rdata = kzalloc(sizeof(*rdata) +
- sizeof(struct kvec) * nr_pages, GFP_KERNEL);
- if (rdata != NULL) {
- INIT_WORK(&rdata->work, cifs_readv_complete);
- INIT_LIST_HEAD(&rdata->pages);
- }
- return rdata;
-}
-
-void
-cifs_readdata_free(struct cifs_readdata *rdata)
-{
- cifsFileInfo_put(rdata->cfile);
- kfree(rdata);
-}
-
/*
* Discard any remaining data in the current SMB. To do this, we borrow the
* current bigbuf.
@@ -1423,7 +1400,7 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
length = cifs_read_from_socket(server, server->bigbuf,
min_t(unsigned int, remaining,
- CIFSMaxBufSize + max_header_size()));
+ CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
if (length < 0)
return length;
server->total_read += length;
@@ -1434,38 +1411,14 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
return 0;
}
-static inline size_t
-read_rsp_size(void)
-{
- return sizeof(READ_RSP);
-}
-
-static inline unsigned int
-read_data_offset(char *buf)
-{
- READ_RSP *rsp = (READ_RSP *)buf;
- return le16_to_cpu(rsp->DataOffset);
-}
-
-static inline unsigned int
-read_data_length(char *buf)
-{
- READ_RSP *rsp = (READ_RSP *)buf;
- return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
- le16_to_cpu(rsp->DataLength);
-}
-
static int
cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
int length, len;
- unsigned int data_offset, remaining, data_len;
+ unsigned int data_offset, data_len;
struct cifs_readdata *rdata = mid->callback_data;
char *buf = server->smallbuf;
unsigned int buflen = get_rfc1002_length(buf) + 4;
- u64 eof;
- pgoff_t eof_index;
- struct page *page, *tpage;
cFYI(1, "%s: mid=%llu offset=%llu bytes=%u", __func__,
mid->mid, rdata->offset, rdata->bytes);
@@ -1475,9 +1428,10 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
* can if there's not enough data. At this point, we've read down to
* the Mid.
*/
- len = min_t(unsigned int, buflen, read_rsp_size()) - header_size() + 1;
+ len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
+ HEADER_SIZE(server) + 1;
- rdata->iov[0].iov_base = buf + header_size() - 1;
+ rdata->iov[0].iov_base = buf + HEADER_SIZE(server) - 1;
rdata->iov[0].iov_len = len;
length = cifs_readv_from_socket(server, rdata->iov, 1, len);
@@ -1486,7 +1440,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
server->total_read += length;
/* Was the SMB read successful? */
- rdata->result = map_smb_to_linux_error(buf, false);
+ rdata->result = server->ops->map_error(buf, false);
if (rdata->result != 0) {
cFYI(1, "%s: server returned error %d", __func__,
rdata->result);
@@ -1494,14 +1448,15 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
}
/* Is there enough to get to the rest of the READ_RSP header? */
- if (server->total_read < read_rsp_size()) {
+ if (server->total_read < server->vals->read_rsp_size) {
cFYI(1, "%s: server returned short header. got=%u expected=%zu",
- __func__, server->total_read, read_rsp_size());
+ __func__, server->total_read,
+ server->vals->read_rsp_size);
rdata->result = -EIO;
return cifs_readv_discard(server, mid);
}
- data_offset = read_data_offset(buf) + 4;
+ data_offset = server->ops->read_data_offset(buf) + 4;
if (data_offset < server->total_read) {
/*
* win2k8 sometimes sends an offset of 0 when the read
@@ -1540,7 +1495,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
rdata->iov[0].iov_base, rdata->iov[0].iov_len);
/* how much data is in the response? */
- data_len = read_data_length(buf);
+ data_len = server->ops->read_data_length(buf);
if (data_offset + data_len > buflen) {
/* data_len is corrupt -- discard frame */
rdata->result = -EIO;
@@ -1548,64 +1503,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
}
/* marshal up the page array */
- len = 0;
- remaining = data_len;
- rdata->nr_iov = 1;
-
- /* determine the eof that the server (probably) has */
- eof = CIFS_I(rdata->mapping->host)->server_eof;
- eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
- cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
-
- list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
- if (remaining >= PAGE_CACHE_SIZE) {
- /* enough data to fill the page */
- rdata->iov[rdata->nr_iov].iov_base = kmap(page);
- rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
- cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
- rdata->nr_iov, page->index,
- rdata->iov[rdata->nr_iov].iov_base,
- rdata->iov[rdata->nr_iov].iov_len);
- ++rdata->nr_iov;
- len += PAGE_CACHE_SIZE;
- remaining -= PAGE_CACHE_SIZE;
- } else if (remaining > 0) {
- /* enough for partial page, fill and zero the rest */
- rdata->iov[rdata->nr_iov].iov_base = kmap(page);
- rdata->iov[rdata->nr_iov].iov_len = remaining;
- cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
- rdata->nr_iov, page->index,
- rdata->iov[rdata->nr_iov].iov_base,
- rdata->iov[rdata->nr_iov].iov_len);
- memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
- '\0', PAGE_CACHE_SIZE - remaining);
- ++rdata->nr_iov;
- len += remaining;
- remaining = 0;
- } else if (page->index > eof_index) {
- /*
- * The VFS will not try to do readahead past the
- * i_size, but it's possible that we have outstanding
- * writes with gaps in the middle and the i_size hasn't
- * caught up yet. Populate those with zeroed out pages
- * to prevent the VFS from repeatedly attempting to
- * fill them until the writes are flushed.
- */
- zero_user(page, 0, PAGE_CACHE_SIZE);
- list_del(&page->lru);
- lru_cache_add_file(page);
- flush_dcache_page(page);
- SetPageUptodate(page);
- unlock_page(page);
- page_cache_release(page);
- } else {
- /* no need to hold page hostage */
- list_del(&page->lru);
- lru_cache_add_file(page);
- unlock_page(page);
- page_cache_release(page);
- }
- }
+ len = rdata->marshal_iov(rdata, data_len);
+ data_len -= len;
/* issue the read if we have any iovecs left to fill */
if (rdata->nr_iov > 1) {
@@ -1621,7 +1520,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
rdata->bytes = length;
cFYI(1, "total_read=%u buflen=%u remaining=%u", server->total_read,
- buflen, remaining);
+ buflen, data_len);
/* discard anything left over */
if (server->total_read < buflen)
@@ -1632,33 +1531,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
}
static void
-cifs_readv_complete(struct work_struct *work)
-{
- struct cifs_readdata *rdata = container_of(work,
- struct cifs_readdata, work);
- struct page *page, *tpage;
-
- list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
- list_del(&page->lru);
- lru_cache_add_file(page);
-
- if (rdata->result == 0) {
- kunmap(page);
- flush_dcache_page(page);
- SetPageUptodate(page);
- }
-
- unlock_page(page);
-
- if (rdata->result == 0)
- cifs_readpage_to_fscache(rdata->mapping->host, page);
-
- page_cache_release(page);
- }
- cifs_readdata_free(rdata);
-}
-
-static void
cifs_readv_callback(struct mid_q_entry *mid)
{
struct cifs_readdata *rdata = mid->callback_data;
@@ -1691,7 +1563,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
queue_work(cifsiod_wq, &rdata->work);
DeleteMidQEntry(mid);
- cifs_add_credits(server, 1);
+ add_credits(server, 1);
}
/* cifs_async_readv - send an async write, and set up mid to handle result */
@@ -1744,12 +1616,15 @@ cifs_async_readv(struct cifs_readdata *rdata)
rdata->iov[0].iov_base = smb;
rdata->iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
+ kref_get(&rdata->refcount);
rc = cifs_call_async(tcon->ses->server, rdata->iov, 1,
cifs_readv_receive, cifs_readv_callback,
rdata, false);
if (rc == 0)
cifs_stats_inc(&tcon->num_reads);
+ else
+ kref_put(&rdata->refcount, cifs_readdata_release);
cifs_small_buf_release(smb);
return rc;
@@ -2135,7 +2010,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
queue_work(cifsiod_wq, &wdata->work);
DeleteMidQEntry(mid);
- cifs_add_credits(tcon->ses->server, 1);
+ add_credits(tcon->ses->server, 1);
}
/* cifs_async_writev - send an async write, and set up mid to handle result */
@@ -4344,7 +4219,7 @@ int
CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
const char *searchName,
const struct nls_table *nls_codepage,
- __u16 *pnetfid,
+ __u16 *pnetfid, __u16 search_flags,
struct cifs_search_info *psrch_inf, int remap, const char dirsep)
{
/* level 257 SMB_ */
@@ -4416,8 +4291,7 @@ findFirstRetry:
cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
ATTR_DIRECTORY);
pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO));
- pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END |
- CIFS_SEARCH_RETURN_RESUME);
+ pSMB->SearchFlags = cpu_to_le16(search_flags);
pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
/* BB what should we set StorageType to? Does it matter? BB */
@@ -4487,8 +4361,8 @@ findFirstRetry:
return rc;
}
-int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
- __u16 searchHandle, struct cifs_search_info *psrch_inf)
+int CIFSFindNext(const int xid, struct cifs_tcon *tcon, __u16 searchHandle,
+ __u16 search_flags, struct cifs_search_info *psrch_inf)
{
TRANSACTION2_FNEXT_REQ *pSMB = NULL;
TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
@@ -4531,8 +4405,7 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
cpu_to_le16(CIFSMaxBufSize / sizeof(FILE_UNIX_INFO));
pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
pSMB->ResumeKey = psrch_inf->resume_key;
- pSMB->SearchFlags =
- cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME);
+ pSMB->SearchFlags = cpu_to_le16(search_flags);
name_len = psrch_inf->resume_name_len;
params += name_len;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e0b56d7a19c5..ccafdedd0dbc 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/connect.c
*
- * Copyright (C) International Business Machines Corp., 2002,2009
+ * Copyright (C) International Business Machines Corp., 2002,2011
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -102,7 +102,7 @@ enum {
Opt_srcaddr, Opt_prefixpath,
Opt_iocharset, Opt_sockopt,
Opt_netbiosname, Opt_servern,
- Opt_ver, Opt_sec,
+ Opt_ver, Opt_vers, Opt_sec, Opt_cache,
/* Mount options to be ignored */
Opt_ignore,
@@ -210,9 +210,9 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_netbiosname, "netbiosname=%s" },
{ Opt_servern, "servern=%s" },
{ Opt_ver, "ver=%s" },
- { Opt_ver, "vers=%s" },
- { Opt_ver, "version=%s" },
+ { Opt_vers, "vers=%s" },
{ Opt_sec, "sec=%s" },
+ { Opt_cache, "cache=%s" },
{ Opt_ignore, "cred" },
{ Opt_ignore, "credentials" },
@@ -261,6 +261,26 @@ static const match_table_t cifs_secflavor_tokens = {
{ Opt_sec_err, NULL }
};
+/* cache flavors */
+enum {
+ Opt_cache_loose,
+ Opt_cache_strict,
+ Opt_cache_none,
+ Opt_cache_err
+};
+
+static const match_table_t cifs_cacheflavor_tokens = {
+ { Opt_cache_loose, "loose" },
+ { Opt_cache_strict, "strict" },
+ { Opt_cache_none, "none" },
+ { Opt_cache_err, NULL }
+};
+
+static const match_table_t cifs_smb_version_tokens = {
+ { Smb_1, SMB1_VERSION_STRING },
+ { Smb_21, SMB21_VERSION_STRING },
+};
+
static int ip_connect(struct TCP_Server_Info *server);
static int generic_ip_connect(struct TCP_Server_Info *server);
static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
@@ -549,7 +569,7 @@ allocate_buffers(struct TCP_Server_Info *server)
}
} else if (server->large_buf) {
/* we are reusing a dirty large buf, clear its start */
- memset(server->bigbuf, 0, header_size());
+ memset(server->bigbuf, 0, HEADER_SIZE(server));
}
if (!server->smallbuf) {
@@ -563,7 +583,7 @@ allocate_buffers(struct TCP_Server_Info *server)
/* beginning of smb buffer is cleared in our buf_get */
} else {
/* if existing small buf clear beginning */
- memset(server->smallbuf, 0, header_size());
+ memset(server->smallbuf, 0, HEADER_SIZE(server));
}
return true;
@@ -764,25 +784,6 @@ is_smb_response(struct TCP_Server_Info *server, unsigned char type)
return false;
}
-static struct mid_q_entry *
-find_mid(struct TCP_Server_Info *server, char *buffer)
-{
- struct smb_hdr *buf = (struct smb_hdr *)buffer;
- struct mid_q_entry *mid;
-
- spin_lock(&GlobalMid_Lock);
- list_for_each_entry(mid, &server->pending_mid_q, qhead) {
- if (mid->mid == buf->Mid &&
- mid->mid_state == MID_REQUEST_SUBMITTED &&
- le16_to_cpu(mid->command) == buf->Command) {
- spin_unlock(&GlobalMid_Lock);
- return mid;
- }
- }
- spin_unlock(&GlobalMid_Lock);
- return NULL;
-}
-
void
dequeue_mid(struct mid_q_entry *mid, bool malformed)
{
@@ -934,7 +935,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
unsigned int pdu_length = get_rfc1002_length(buf);
/* make sure this will fit in a large buffer */
- if (pdu_length > CIFSMaxBufSize + max_header_size() - 4) {
+ if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) - 4) {
cERROR(1, "SMB response too long (%u bytes)",
pdu_length);
cifs_reconnect(server);
@@ -950,8 +951,8 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
}
/* now read the rest */
- length = cifs_read_from_socket(server, buf + header_size() - 1,
- pdu_length - header_size() + 1 + 4);
+ length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
+ pdu_length - HEADER_SIZE(server) + 1 + 4);
if (length < 0)
return length;
server->total_read += length;
@@ -967,7 +968,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
* 48 bytes is enough to display the header and a little bit
* into the payload for debugging purposes.
*/
- length = checkSMB(buf, server->total_read);
+ length = server->ops->check_message(buf, server->total_read);
if (length != 0)
cifs_dump_mem("Bad SMB: ", buf,
min_t(unsigned int, server->total_read, 48));
@@ -1025,7 +1026,7 @@ cifs_demultiplex_thread(void *p)
continue;
/* make sure we have enough to get to the MID */
- if (pdu_length < header_size() - 1 - 4) {
+ if (pdu_length < HEADER_SIZE(server) - 1 - 4) {
cERROR(1, "SMB response too short (%u bytes)",
pdu_length);
cifs_reconnect(server);
@@ -1035,12 +1036,12 @@ cifs_demultiplex_thread(void *p)
/* read down to the MID */
length = cifs_read_from_socket(server, buf + 4,
- header_size() - 1 - 4);
+ HEADER_SIZE(server) - 1 - 4);
if (length < 0)
continue;
server->total_read += length;
- mid_entry = find_mid(server, buf);
+ mid_entry = server->ops->find_mid(server, buf);
if (!mid_entry || !mid_entry->receive)
length = standard_receive3(server, mid_entry);
@@ -1057,12 +1058,13 @@ cifs_demultiplex_thread(void *p)
if (mid_entry != NULL) {
if (!mid_entry->multiRsp || mid_entry->multiEnd)
mid_entry->callback(mid_entry);
- } else if (!is_valid_oplock_break(buf, server)) {
+ } else if (!server->ops->is_oplock_break(buf, server)) {
cERROR(1, "No task to wake, unknown frame received! "
"NumMids %d", atomic_read(&midCount));
- cifs_dump_mem("Received Data is: ", buf, header_size());
+ cifs_dump_mem("Received Data is: ", buf,
+ HEADER_SIZE(server));
#ifdef CONFIG_CIFS_DEBUG2
- cifs_dump_detail(buf);
+ server->ops->dump_detail(buf);
cifs_dump_mids(server);
#endif /* CIFS_DEBUG2 */
@@ -1186,6 +1188,54 @@ static int cifs_parse_security_flavors(char *value,
}
static int
+cifs_parse_cache_flavor(char *value, struct smb_vol *vol)
+{
+ substring_t args[MAX_OPT_ARGS];
+
+ switch (match_token(value, cifs_cacheflavor_tokens, args)) {
+ case Opt_cache_loose:
+ vol->direct_io = false;
+ vol->strict_io = false;
+ break;
+ case Opt_cache_strict:
+ vol->direct_io = false;
+ vol->strict_io = true;
+ break;
+ case Opt_cache_none:
+ vol->direct_io = true;
+ vol->strict_io = false;
+ break;
+ default:
+ cERROR(1, "bad cache= option: %s", value);
+ return 1;
+ }
+ return 0;
+}
+
+static int
+cifs_parse_smb_version(char *value, struct smb_vol *vol)
+{
+ substring_t args[MAX_OPT_ARGS];
+
+ switch (match_token(value, cifs_smb_version_tokens, args)) {
+ case Smb_1:
+ vol->ops = &smb1_operations;
+ vol->vals = &smb1_values;
+ break;
+#ifdef CONFIG_CIFS_SMB2
+ case Smb_21:
+ vol->ops = &smb21_operations;
+ vol->vals = &smb21_values;
+ break;
+#endif
+ default:
+ cERROR(1, "Unknown vers= option specified: %s", value);
+ return 1;
+ }
+ return 0;
+}
+
+static int
cifs_parse_mount_options(const char *mountdata, const char *devname,
struct smb_vol *vol)
{
@@ -1203,6 +1253,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
char *string = NULL;
char *tmp_end, *value;
char delim;
+ bool cache_specified = false;
+ static bool cache_warned = false;
separator[0] = ',';
separator[1] = 0;
@@ -1236,6 +1288,10 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->actimeo = CIFS_DEF_ACTIMEO;
+ /* FIXME: add autonegotiation -- for now, SMB1 is default */
+ vol->ops = &smb1_operations;
+ vol->vals = &smb1_values;
+
if (!mountdata)
goto cifs_parse_mount_err;
@@ -1414,10 +1470,20 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->seal = 1;
break;
case Opt_direct:
- vol->direct_io = 1;
+ cache_specified = true;
+ vol->direct_io = true;
+ vol->strict_io = false;
+ cERROR(1, "The \"directio\" option will be removed in "
+ "3.7. Please switch to the \"cache=none\" "
+ "option.");
break;
case Opt_strictcache:
- vol->strict_io = 1;
+ cache_specified = true;
+ vol->direct_io = false;
+ vol->strict_io = true;
+ cERROR(1, "The \"strictcache\" option will be removed "
+ "in 3.7. Please switch to the \"cache=strict\" "
+ "option.");
break;
case Opt_noac:
printk(KERN_WARNING "CIFS: Mount option noac not "
@@ -1821,8 +1887,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (string == NULL)
goto out_nomem;
- if (strnicmp(string, "cifs", 4) == 0 ||
- strnicmp(string, "1", 1) == 0) {
+ if (strnicmp(string, "1", 1) == 0) {
/* This is the default */
break;
}
@@ -1830,6 +1895,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
printk(KERN_WARNING "CIFS: Invalid version"
" specified\n");
goto cifs_parse_mount_err;
+ case Opt_vers:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+
+ if (cifs_parse_smb_version(string, vol) != 0)
+ goto cifs_parse_mount_err;
+ break;
case Opt_sec:
string = match_strdup(args);
if (string == NULL)
@@ -1838,6 +1911,15 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (cifs_parse_security_flavors(string, vol) != 0)
goto cifs_parse_mount_err;
break;
+ case Opt_cache:
+ cache_specified = true;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+
+ if (cifs_parse_cache_flavor(string, vol) != 0)
+ goto cifs_parse_mount_err;
+ break;
default:
/*
* An option we don't recognize. Save it off for later
@@ -1881,6 +1963,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
printk(KERN_NOTICE "CIFS: ignoring forcegid mount option "
"specified with no gid= option.\n");
+ /* FIXME: remove this block in 3.7 */
+ if (!cache_specified && !cache_warned) {
+ cache_warned = true;
+ printk(KERN_NOTICE "CIFS: no cache= option specified, using "
+ "\"cache=loose\". This default will change "
+ "to \"cache=strict\" in 3.7.\n");
+ }
+
kfree(mountdata_copy);
return 0;
@@ -2041,6 +2131,9 @@ match_security(struct TCP_Server_Info *server, struct smb_vol *vol)
static int match_server(struct TCP_Server_Info *server, struct sockaddr *addr,
struct smb_vol *vol)
{
+ if ((server->vals != vol->vals) || (server->ops != vol->ops))
+ return 0;
+
if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
return 0;
@@ -2163,6 +2256,8 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
goto out_err;
}
+ tcp_ses->ops = volume_info->ops;
+ tcp_ses->vals = volume_info->vals;
cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
tcp_ses->hostname = extract_hostname(volume_info->UNC);
if (IS_ERR(tcp_ses->hostname)) {
@@ -3569,6 +3664,7 @@ cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
if (cifs_parse_mount_options(mount_data, devname, volume_info))
return -EINVAL;
+
if (volume_info->nullauth) {
cFYI(1, "Anonymous login");
kfree(volume_info->username);
@@ -4010,11 +4106,11 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
if (server->maxBuf != 0)
return 0;
- cifs_set_credits(server, 1);
+ set_credits(server, 1);
rc = CIFSSMBNegotiate(xid, ses);
if (rc == -EAGAIN) {
/* retry only once on 1st time connection */
- cifs_set_credits(server, 1);
+ set_credits(server, 1);
rc = CIFSSMBNegotiate(xid, ses);
if (rc == -EAGAIN)
rc = -EHOSTDOWN;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 81725e9286e9..253170dfa716 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -264,6 +264,7 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
pCifsFile->tlink = cifs_get_tlink(tlink);
mutex_init(&pCifsFile->fh_mutex);
INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
+ INIT_LIST_HEAD(&pCifsFile->llist);
spin_lock(&cifs_file_list_lock);
list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
@@ -334,9 +335,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
* is closed anyway.
*/
mutex_lock(&cifsi->lock_mutex);
- list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
- if (li->netfid != cifs_file->netfid)
- continue;
+ list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
list_del(&li->llist);
cifs_del_lock_waiters(li);
kfree(li);
@@ -645,7 +644,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
}
static struct cifsLockInfo *
-cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
+cifs_lock_init(__u64 offset, __u64 length, __u8 type)
{
struct cifsLockInfo *lock =
kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
@@ -654,7 +653,6 @@ cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
lock->offset = offset;
lock->length = length;
lock->type = type;
- lock->netfid = netfid;
lock->pid = current->tgid;
INIT_LIST_HEAD(&lock->blist);
init_waitqueue_head(&lock->block_q);
@@ -672,19 +670,20 @@ cifs_del_lock_waiters(struct cifsLockInfo *lock)
}
static bool
-__cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
- __u64 length, __u8 type, __u16 netfid,
- struct cifsLockInfo **conf_lock)
+cifs_find_fid_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
+ __u64 length, __u8 type, struct cifsFileInfo *cur,
+ struct cifsLockInfo **conf_lock)
{
- struct cifsLockInfo *li, *tmp;
+ struct cifsLockInfo *li;
+ struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
- list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
+ list_for_each_entry(li, &cfile->llist, llist) {
if (offset + length <= li->offset ||
offset >= li->offset + li->length)
continue;
- else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
- ((netfid == li->netfid && current->tgid == li->pid) ||
- type == li->type))
+ else if ((type & server->vals->shared_lock_type) &&
+ ((server->ops->compare_fids(cur, cfile) &&
+ current->tgid == li->pid) || type == li->type))
continue;
else {
*conf_lock = li;
@@ -695,11 +694,23 @@ __cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
}
static bool
-cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
- struct cifsLockInfo **conf_lock)
+cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
+ __u8 type, struct cifsLockInfo **conf_lock)
{
- return __cifs_find_lock_conflict(cinode, lock->offset, lock->length,
- lock->type, lock->netfid, conf_lock);
+ bool rc = false;
+ struct cifsFileInfo *fid, *tmp;
+ struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+
+ spin_lock(&cifs_file_list_lock);
+ list_for_each_entry_safe(fid, tmp, &cinode->openFileList, flist) {
+ rc = cifs_find_fid_lock_conflict(fid, offset, length, type,
+ cfile, conf_lock);
+ if (rc)
+ break;
+ }
+ spin_unlock(&cifs_file_list_lock);
+
+ return rc;
}
/*
@@ -710,22 +721,24 @@ cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
* the server or 1 otherwise.
*/
static int
-cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
- __u8 type, __u16 netfid, struct file_lock *flock)
+cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
+ __u8 type, struct file_lock *flock)
{
int rc = 0;
struct cifsLockInfo *conf_lock;
+ struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
bool exist;
mutex_lock(&cinode->lock_mutex);
- exist = __cifs_find_lock_conflict(cinode, offset, length, type, netfid,
- &conf_lock);
+ exist = cifs_find_lock_conflict(cfile, offset, length, type,
+ &conf_lock);
if (exist) {
flock->fl_start = conf_lock->offset;
flock->fl_end = conf_lock->offset + conf_lock->length - 1;
flock->fl_pid = conf_lock->pid;
- if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
+ if (conf_lock->type & server->vals->shared_lock_type)
flock->fl_type = F_RDLCK;
else
flock->fl_type = F_WRLCK;
@@ -739,10 +752,11 @@ cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
}
static void
-cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
+cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
{
+ struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
mutex_lock(&cinode->lock_mutex);
- list_add_tail(&lock->llist, &cinode->llist);
+ list_add_tail(&lock->llist, &cfile->llist);
mutex_unlock(&cinode->lock_mutex);
}
@@ -753,10 +767,11 @@ cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
* 3) -EACCESS, if there is a lock that prevents us and wait is false.
*/
static int
-cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
+cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
bool wait)
{
struct cifsLockInfo *conf_lock;
+ struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
bool exist;
int rc = 0;
@@ -764,9 +779,10 @@ try_again:
exist = false;
mutex_lock(&cinode->lock_mutex);
- exist = cifs_find_lock_conflict(cinode, lock, &conf_lock);
+ exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
+ lock->type, &conf_lock);
if (!exist && cinode->can_cache_brlcks) {
- list_add_tail(&lock->llist, &cinode->llist);
+ list_add_tail(&lock->llist, &cfile->llist);
mutex_unlock(&cinode->lock_mutex);
return rc;
}
@@ -888,7 +904,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
for (i = 0; i < 2; i++) {
cur = buf;
num = 0;
- list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
+ list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
if (li->type != types[i])
continue;
cur->Pid = cpu_to_le16(li->pid);
@@ -898,7 +914,8 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
if (++num == max_num) {
stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
- li->type, 0, num, buf);
+ (__u8)li->type, 0, num,
+ buf);
if (stored_rc)
rc = stored_rc;
cur = buf;
@@ -909,7 +926,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
if (num) {
stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
- types[i], 0, num, buf);
+ (__u8)types[i], 0, num, buf);
if (stored_rc)
rc = stored_rc;
}
@@ -1053,8 +1070,8 @@ cifs_push_locks(struct cifsFileInfo *cfile)
}
static void
-cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
- bool *wait_flag)
+cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
+ bool *wait_flag, struct TCP_Server_Info *server)
{
if (flock->fl_flags & FL_POSIX)
cFYI(1, "Posix");
@@ -1073,38 +1090,50 @@ cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
(~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
- *type = LOCKING_ANDX_LARGE_FILES;
+ *type = server->vals->large_lock_type;
if (flock->fl_type == F_WRLCK) {
cFYI(1, "F_WRLCK ");
+ *type |= server->vals->exclusive_lock_type;
*lock = 1;
} else if (flock->fl_type == F_UNLCK) {
cFYI(1, "F_UNLCK");
+ *type |= server->vals->unlock_lock_type;
*unlock = 1;
/* Check if unlock includes more than one lock range */
} else if (flock->fl_type == F_RDLCK) {
cFYI(1, "F_RDLCK");
- *type |= LOCKING_ANDX_SHARED_LOCK;
+ *type |= server->vals->shared_lock_type;
*lock = 1;
} else if (flock->fl_type == F_EXLCK) {
cFYI(1, "F_EXLCK");
+ *type |= server->vals->exclusive_lock_type;
*lock = 1;
} else if (flock->fl_type == F_SHLCK) {
cFYI(1, "F_SHLCK");
- *type |= LOCKING_ANDX_SHARED_LOCK;
+ *type |= server->vals->shared_lock_type;
*lock = 1;
} else
cFYI(1, "Unknown type of lock");
}
static int
-cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
+cifs_mandatory_lock(int xid, struct cifsFileInfo *cfile, __u64 offset,
+ __u64 length, __u32 type, int lock, int unlock, bool wait)
+{
+ return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->netfid,
+ current->tgid, length, offset, unlock, lock,
+ (__u8)type, wait, 0);
+}
+
+static int
+cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
bool wait_flag, bool posix_lck, int xid)
{
int rc = 0;
__u64 length = 1 + flock->fl_end - flock->fl_start;
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct TCP_Server_Info *server = tcon->ses->server;
__u16 netfid = cfile->netfid;
if (posix_lck) {
@@ -1114,7 +1143,7 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
if (!rc)
return rc;
- if (type & LOCKING_ANDX_SHARED_LOCK)
+ if (type & server->vals->shared_lock_type)
posix_lock_type = CIFS_RDLCK;
else
posix_lock_type = CIFS_WRLCK;
@@ -1124,38 +1153,35 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
return rc;
}
- rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
- flock);
+ rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
if (!rc)
return rc;
/* BB we could chain these into one lock request BB */
- rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
- flock->fl_start, 0, 1, type, 0, 0);
+ rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length, type,
+ 1, 0, false);
if (rc == 0) {
- rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
- length, flock->fl_start, 1, 0,
- type, 0, 0);
+ rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
+ type, 0, 1, false);
flock->fl_type = F_UNLCK;
if (rc != 0)
cERROR(1, "Error unlocking previously locked "
- "range %d during test of lock", rc);
+ "range %d during test of lock", rc);
return 0;
}
- if (type & LOCKING_ANDX_SHARED_LOCK) {
+ if (type & server->vals->shared_lock_type) {
flock->fl_type = F_WRLCK;
return 0;
}
- rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
- flock->fl_start, 0, 1,
- type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
+ rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
+ type | server->vals->shared_lock_type, 1, 0,
+ false);
if (rc == 0) {
- rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
- length, flock->fl_start, 1, 0,
- type | LOCKING_ANDX_SHARED_LOCK,
- 0, 0);
+ rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
+ type | server->vals->shared_lock_type,
+ 0, 1, false);
flock->fl_type = F_RDLCK;
if (rc != 0)
cERROR(1, "Error unlocking previously locked "
@@ -1212,15 +1238,13 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
for (i = 0; i < 2; i++) {
cur = buf;
num = 0;
- list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
+ list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
if (flock->fl_start > li->offset ||
(flock->fl_start + length) <
(li->offset + li->length))
continue;
if (current->tgid != li->pid)
continue;
- if (cfile->netfid != li->netfid)
- continue;
if (types[i] != li->type)
continue;
if (!cinode->can_cache_brlcks) {
@@ -1233,7 +1257,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
cpu_to_le32((u32)(li->offset>>32));
/*
* We need to save a lock here to let us add
- * it again to the inode list if the unlock
+ * it again to the file's list if the unlock
* range request fails on the server.
*/
list_move(&li->llist, &tmp_llist);
@@ -1247,10 +1271,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
* We failed on the unlock range
* request - add all locks from
* the tmp list to the head of
- * the inode list.
+ * the file's list.
*/
cifs_move_llist(&tmp_llist,
- &cinode->llist);
+ &cfile->llist);
rc = stored_rc;
} else
/*
@@ -1265,7 +1289,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
} else {
/*
* We can cache brlock requests - simply remove
- * a lock from the inode list.
+ * a lock from the file's list.
*/
list_del(&li->llist);
cifs_del_lock_waiters(li);
@@ -1276,7 +1300,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
types[i], num, 0, buf);
if (stored_rc) {
- cifs_move_llist(&tmp_llist, &cinode->llist);
+ cifs_move_llist(&tmp_llist, &cfile->llist);
rc = stored_rc;
} else
cifs_free_llist(&tmp_llist);
@@ -1289,14 +1313,14 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
}
static int
-cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
+cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
{
int rc = 0;
__u64 length = 1 + flock->fl_end - flock->fl_start;
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
+ struct TCP_Server_Info *server = tcon->ses->server;
__u16 netfid = cfile->netfid;
if (posix_lck) {
@@ -1306,7 +1330,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
if (!rc || rc < 0)
return rc;
- if (type & LOCKING_ANDX_SHARED_LOCK)
+ if (type & server->vals->shared_lock_type)
posix_lock_type = CIFS_RDLCK;
else
posix_lock_type = CIFS_WRLCK;
@@ -1323,24 +1347,24 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
if (lock) {
struct cifsLockInfo *lock;
- lock = cifs_lock_init(flock->fl_start, length, type, netfid);
+ lock = cifs_lock_init(flock->fl_start, length, type);
if (!lock)
return -ENOMEM;
- rc = cifs_lock_add_if(cinode, lock, wait_flag);
+ rc = cifs_lock_add_if(cfile, lock, wait_flag);
if (rc < 0)
kfree(lock);
if (rc <= 0)
goto out;
- rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
- flock->fl_start, 0, 1, type, wait_flag, 0);
+ rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
+ type, 1, 0, wait_flag);
if (rc) {
kfree(lock);
goto out;
}
- cifs_lock_add(cinode, lock);
+ cifs_lock_add(cfile, lock);
} else if (unlock)
rc = cifs_unlock_range(cfile, flock, xid);
@@ -1361,7 +1385,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
struct cifsInodeInfo *cinode;
struct cifsFileInfo *cfile;
__u16 netfid;
- __u8 type;
+ __u32 type;
rc = -EACCES;
xid = GetXid();
@@ -1370,11 +1394,13 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
"end: %lld", cmd, flock->fl_flags, flock->fl_type,
flock->fl_start, flock->fl_end);
- cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
-
- cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
cfile = (struct cifsFileInfo *)file->private_data;
tcon = tlink_tcon(cfile->tlink);
+
+ cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
+ tcon->ses->server);
+
+ cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
netfid = cfile->netfid;
cinode = CIFS_I(file->f_path.dentry->d_inode);
@@ -1539,10 +1565,11 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
bool fsuid_only)
{
- struct cifsFileInfo *open_file;
+ struct cifsFileInfo *open_file, *inv_file = NULL;
struct cifs_sb_info *cifs_sb;
bool any_available = false;
int rc;
+ unsigned int refind = 0;
/* Having a null inode here (because mapping->host was set to zero by
the VFS or MM) should not happen but we had reports of on oops (due to
@@ -1562,40 +1589,25 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
spin_lock(&cifs_file_list_lock);
refind_writable:
+ if (refind > MAX_REOPEN_ATT) {
+ spin_unlock(&cifs_file_list_lock);
+ return NULL;
+ }
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
if (!any_available && open_file->pid != current->tgid)
continue;
if (fsuid_only && open_file->uid != current_fsuid())
continue;
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
- cifsFileInfo_get(open_file);
-
if (!open_file->invalidHandle) {
/* found a good writable file */
+ cifsFileInfo_get(open_file);
spin_unlock(&cifs_file_list_lock);
return open_file;
+ } else {
+ if (!inv_file)
+ inv_file = open_file;
}
-
- spin_unlock(&cifs_file_list_lock);
-
- /* Had to unlock since following call can block */
- rc = cifs_reopen_file(open_file, false);
- if (!rc)
- return open_file;
-
- /* if it fails, try another handle if possible */
- cFYI(1, "wp failed on reopen file");
- cifsFileInfo_put(open_file);
-
- spin_lock(&cifs_file_list_lock);
-
- /* else we simply continue to the next entry. Thus
- we do not loop on reopen errors. If we
- can not reopen the file, for example if we
- reconnected to a server with another client
- racing to delete or lock the file we would not
- make progress if we restarted before the beginning
- of the loop here. */
}
}
/* couldn't find useable FH with same pid, try any available */
@@ -1603,7 +1615,30 @@ refind_writable:
any_available = true;
goto refind_writable;
}
+
+ if (inv_file) {
+ any_available = false;
+ cifsFileInfo_get(inv_file);
+ }
+
spin_unlock(&cifs_file_list_lock);
+
+ if (inv_file) {
+ rc = cifs_reopen_file(inv_file, false);
+ if (!rc)
+ return inv_file;
+ else {
+ spin_lock(&cifs_file_list_lock);
+ list_move_tail(&inv_file->flist,
+ &cifs_inode->openFileList);
+ spin_unlock(&cifs_file_list_lock);
+ cifsFileInfo_put(inv_file);
+ spin_lock(&cifs_file_list_lock);
+ ++refind;
+ goto refind_writable;
+ }
+ }
+
return NULL;
}
@@ -2339,24 +2374,224 @@ ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
return cifs_user_writev(iocb, iov, nr_segs, pos);
}
+static struct cifs_readdata *
+cifs_readdata_alloc(unsigned int nr_vecs, work_func_t complete)
+{
+ struct cifs_readdata *rdata;
+
+ rdata = kzalloc(sizeof(*rdata) +
+ sizeof(struct kvec) * nr_vecs, GFP_KERNEL);
+ if (rdata != NULL) {
+ kref_init(&rdata->refcount);
+ INIT_LIST_HEAD(&rdata->list);
+ init_completion(&rdata->done);
+ INIT_WORK(&rdata->work, complete);
+ INIT_LIST_HEAD(&rdata->pages);
+ }
+ return rdata;
+}
+
+void
+cifs_readdata_release(struct kref *refcount)
+{
+ struct cifs_readdata *rdata = container_of(refcount,
+ struct cifs_readdata, refcount);
+
+ if (rdata->cfile)
+ cifsFileInfo_put(rdata->cfile);
+
+ kfree(rdata);
+}
+
+static int
+cifs_read_allocate_pages(struct list_head *list, unsigned int npages)
+{
+ int rc = 0;
+ struct page *page, *tpage;
+ unsigned int i;
+
+ for (i = 0; i < npages; i++) {
+ page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
+ if (!page) {
+ rc = -ENOMEM;
+ break;
+ }
+ list_add(&page->lru, list);
+ }
+
+ if (rc) {
+ list_for_each_entry_safe(page, tpage, list, lru) {
+ list_del(&page->lru);
+ put_page(page);
+ }
+ }
+ return rc;
+}
+
+static void
+cifs_uncached_readdata_release(struct kref *refcount)
+{
+ struct page *page, *tpage;
+ struct cifs_readdata *rdata = container_of(refcount,
+ struct cifs_readdata, refcount);
+
+ list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+ list_del(&page->lru);
+ put_page(page);
+ }
+ cifs_readdata_release(refcount);
+}
+
+static int
+cifs_retry_async_readv(struct cifs_readdata *rdata)
+{
+ int rc;
+
+ do {
+ if (rdata->cfile->invalidHandle) {
+ rc = cifs_reopen_file(rdata->cfile, true);
+ if (rc != 0)
+ continue;
+ }
+ rc = cifs_async_readv(rdata);
+ } while (rc == -EAGAIN);
+
+ return rc;
+}
+
+/**
+ * cifs_readdata_to_iov - copy data from pages in response to an iovec
+ * @rdata: the readdata response with list of pages holding data
+ * @iov: vector in which we should copy the data
+ * @nr_segs: number of segments in vector
+ * @offset: offset into file of the first iovec
+ * @copied: used to return the amount of data copied to the iov
+ *
+ * This function copies data from a list of pages in a readdata response into
+ * an array of iovecs. It will first calculate where the data should go
+ * based on the info in the readdata and then copy the data into that spot.
+ */
+static ssize_t
+cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
+ unsigned long nr_segs, loff_t offset, ssize_t *copied)
+{
+ int rc = 0;
+ struct iov_iter ii;
+ size_t pos = rdata->offset - offset;
+ struct page *page, *tpage;
+ ssize_t remaining = rdata->bytes;
+ unsigned char *pdata;
+
+ /* set up iov_iter and advance to the correct offset */
+ iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
+ iov_iter_advance(&ii, pos);
+
+ *copied = 0;
+ list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+ ssize_t copy;
+
+ /* copy a whole page or whatever's left */
+ copy = min_t(ssize_t, remaining, PAGE_SIZE);
+
+ /* ...but limit it to whatever space is left in the iov */
+ copy = min_t(ssize_t, copy, iov_iter_count(&ii));
+
+ /* go while there's data to be copied and no errors */
+ if (copy && !rc) {
+ pdata = kmap(page);
+ rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
+ (int)copy);
+ kunmap(page);
+ if (!rc) {
+ *copied += copy;
+ remaining -= copy;
+ iov_iter_advance(&ii, copy);
+ }
+ }
+
+ list_del(&page->lru);
+ put_page(page);
+ }
+
+ return rc;
+}
+
+static void
+cifs_uncached_readv_complete(struct work_struct *work)
+{
+ struct cifs_readdata *rdata = container_of(work,
+ struct cifs_readdata, work);
+
+ /* if the result is non-zero then the pages weren't kmapped */
+ if (rdata->result == 0) {
+ struct page *page;
+
+ list_for_each_entry(page, &rdata->pages, lru)
+ kunmap(page);
+ }
+
+ complete(&rdata->done);
+ kref_put(&rdata->refcount, cifs_uncached_readdata_release);
+}
+
+static int
+cifs_uncached_read_marshal_iov(struct cifs_readdata *rdata,
+ unsigned int remaining)
+{
+ int len = 0;
+ struct page *page, *tpage;
+
+ rdata->nr_iov = 1;
+ list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+ if (remaining >= PAGE_SIZE) {
+ /* enough data to fill the page */
+ rdata->iov[rdata->nr_iov].iov_base = kmap(page);
+ rdata->iov[rdata->nr_iov].iov_len = PAGE_SIZE;
+ cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
+ rdata->nr_iov, page->index,
+ rdata->iov[rdata->nr_iov].iov_base,
+ rdata->iov[rdata->nr_iov].iov_len);
+ ++rdata->nr_iov;
+ len += PAGE_SIZE;
+ remaining -= PAGE_SIZE;
+ } else if (remaining > 0) {
+ /* enough for partial page, fill and zero the rest */
+ rdata->iov[rdata->nr_iov].iov_base = kmap(page);
+ rdata->iov[rdata->nr_iov].iov_len = remaining;
+ cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
+ rdata->nr_iov, page->index,
+ rdata->iov[rdata->nr_iov].iov_base,
+ rdata->iov[rdata->nr_iov].iov_len);
+ memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
+ '\0', PAGE_SIZE - remaining);
+ ++rdata->nr_iov;
+ len += remaining;
+ remaining = 0;
+ } else {
+ /* no need to hold page hostage */
+ list_del(&page->lru);
+ put_page(page);
+ }
+ }
+
+ return len;
+}
+
static ssize_t
cifs_iovec_read(struct file *file, const struct iovec *iov,
unsigned long nr_segs, loff_t *poffset)
{
- int rc;
- int xid;
- ssize_t total_read;
- unsigned int bytes_read = 0;
+ ssize_t rc;
size_t len, cur_len;
- int iov_offset = 0;
+ ssize_t total_read = 0;
+ loff_t offset = *poffset;
+ unsigned int npages;
struct cifs_sb_info *cifs_sb;
- struct cifs_tcon *pTcon;
+ struct cifs_tcon *tcon;
struct cifsFileInfo *open_file;
- struct smb_com_read_rsp *pSMBr;
- struct cifs_io_parms io_parms;
- char *read_data;
- unsigned int rsize;
- __u32 pid;
+ struct cifs_readdata *rdata, *tmp;
+ struct list_head rdata_list;
+ pid_t pid;
if (!nr_segs)
return 0;
@@ -2365,14 +2600,10 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
if (!len)
return 0;
- xid = GetXid();
+ INIT_LIST_HEAD(&rdata_list);
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
-
- /* FIXME: set up handlers for larger reads and/or convert to async */
- rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
-
open_file = file->private_data;
- pTcon = tlink_tcon(open_file->tlink);
+ tcon = tlink_tcon(open_file->tlink);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
@@ -2382,56 +2613,78 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
cFYI(1, "attempting read on write only file instance");
- for (total_read = 0; total_read < len; total_read += bytes_read) {
- cur_len = min_t(const size_t, len - total_read, rsize);
- rc = -EAGAIN;
- read_data = NULL;
+ do {
+ cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
+ npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
- while (rc == -EAGAIN) {
- int buf_type = CIFS_NO_BUFFER;
- if (open_file->invalidHandle) {
- rc = cifs_reopen_file(open_file, true);
- if (rc != 0)
- break;
- }
- io_parms.netfid = open_file->netfid;
- io_parms.pid = pid;
- io_parms.tcon = pTcon;
- io_parms.offset = *poffset;
- io_parms.length = cur_len;
- rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
- &read_data, &buf_type);
- pSMBr = (struct smb_com_read_rsp *)read_data;
- if (read_data) {
- char *data_offset = read_data + 4 +
- le16_to_cpu(pSMBr->DataOffset);
- if (memcpy_toiovecend(iov, data_offset,
- iov_offset, bytes_read))
- rc = -EFAULT;
- if (buf_type == CIFS_SMALL_BUFFER)
- cifs_small_buf_release(read_data);
- else if (buf_type == CIFS_LARGE_BUFFER)
- cifs_buf_release(read_data);
- read_data = NULL;
- iov_offset += bytes_read;
- }
+ /* allocate a readdata struct */
+ rdata = cifs_readdata_alloc(npages,
+ cifs_uncached_readv_complete);
+ if (!rdata) {
+ rc = -ENOMEM;
+ goto error;
}
- if (rc || (bytes_read == 0)) {
- if (total_read) {
- break;
- } else {
- FreeXid(xid);
- return rc;
+ rc = cifs_read_allocate_pages(&rdata->pages, npages);
+ if (rc)
+ goto error;
+
+ rdata->cfile = cifsFileInfo_get(open_file);
+ rdata->offset = offset;
+ rdata->bytes = cur_len;
+ rdata->pid = pid;
+ rdata->marshal_iov = cifs_uncached_read_marshal_iov;
+
+ rc = cifs_retry_async_readv(rdata);
+error:
+ if (rc) {
+ kref_put(&rdata->refcount,
+ cifs_uncached_readdata_release);
+ break;
+ }
+
+ list_add_tail(&rdata->list, &rdata_list);
+ offset += cur_len;
+ len -= cur_len;
+ } while (len > 0);
+
+ /* if at least one read request send succeeded, then reset rc */
+ if (!list_empty(&rdata_list))
+ rc = 0;
+
+ /* the loop below should proceed in the order of increasing offsets */
+restart_loop:
+ list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
+ if (!rc) {
+ ssize_t copied;
+
+ /* FIXME: freezable sleep too? */
+ rc = wait_for_completion_killable(&rdata->done);
+ if (rc)
+ rc = -EINTR;
+ else if (rdata->result)
+ rc = rdata->result;
+ else {
+ rc = cifs_readdata_to_iov(rdata, iov,
+ nr_segs, *poffset,
+ &copied);
+ total_read += copied;
+ }
+
+ /* resend call if it's a retryable error */
+ if (rc == -EAGAIN) {
+ rc = cifs_retry_async_readv(rdata);
+ goto restart_loop;
}
- } else {
- cifs_stats_bytes_read(pTcon, bytes_read);
- *poffset += bytes_read;
}
+ list_del_init(&rdata->list);
+ kref_put(&rdata->refcount, cifs_uncached_readdata_release);
}
- FreeXid(xid);
- return total_read;
+ cifs_stats_bytes_read(tcon, total_read);
+ *poffset += total_read;
+
+ return total_read ? total_read : rc;
}
ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
@@ -2606,6 +2859,100 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
return rc;
}
+static void
+cifs_readv_complete(struct work_struct *work)
+{
+ struct cifs_readdata *rdata = container_of(work,
+ struct cifs_readdata, work);
+ struct page *page, *tpage;
+
+ list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+ list_del(&page->lru);
+ lru_cache_add_file(page);
+
+ if (rdata->result == 0) {
+ kunmap(page);
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ }
+
+ unlock_page(page);
+
+ if (rdata->result == 0)
+ cifs_readpage_to_fscache(rdata->mapping->host, page);
+
+ page_cache_release(page);
+ }
+ kref_put(&rdata->refcount, cifs_readdata_release);
+}
+
+static int
+cifs_readpages_marshal_iov(struct cifs_readdata *rdata, unsigned int remaining)
+{
+ int len = 0;
+ struct page *page, *tpage;
+ u64 eof;
+ pgoff_t eof_index;
+
+ /* determine the eof that the server (probably) has */
+ eof = CIFS_I(rdata->mapping->host)->server_eof;
+ eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
+ cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
+
+ rdata->nr_iov = 1;
+ list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+ if (remaining >= PAGE_CACHE_SIZE) {
+ /* enough data to fill the page */
+ rdata->iov[rdata->nr_iov].iov_base = kmap(page);
+ rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
+ cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
+ rdata->nr_iov, page->index,
+ rdata->iov[rdata->nr_iov].iov_base,
+ rdata->iov[rdata->nr_iov].iov_len);
+ ++rdata->nr_iov;
+ len += PAGE_CACHE_SIZE;
+ remaining -= PAGE_CACHE_SIZE;
+ } else if (remaining > 0) {
+ /* enough for partial page, fill and zero the rest */
+ rdata->iov[rdata->nr_iov].iov_base = kmap(page);
+ rdata->iov[rdata->nr_iov].iov_len = remaining;
+ cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
+ rdata->nr_iov, page->index,
+ rdata->iov[rdata->nr_iov].iov_base,
+ rdata->iov[rdata->nr_iov].iov_len);
+ memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
+ '\0', PAGE_CACHE_SIZE - remaining);
+ ++rdata->nr_iov;
+ len += remaining;
+ remaining = 0;
+ } else if (page->index > eof_index) {
+ /*
+ * The VFS will not try to do readahead past the
+ * i_size, but it's possible that we have outstanding
+ * writes with gaps in the middle and the i_size hasn't
+ * caught up yet. Populate those with zeroed out pages
+ * to prevent the VFS from repeatedly attempting to
+ * fill them until the writes are flushed.
+ */
+ zero_user(page, 0, PAGE_CACHE_SIZE);
+ list_del(&page->lru);
+ lru_cache_add_file(page);
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ unlock_page(page);
+ page_cache_release(page);
+ } else {
+ /* no need to hold page hostage */
+ list_del(&page->lru);
+ lru_cache_add_file(page);
+ unlock_page(page);
+ page_cache_release(page);
+ }
+ }
+
+ return len;
+}
+
static int cifs_readpages(struct file *file, struct address_space *mapping,
struct list_head *page_list, unsigned num_pages)
{
@@ -2708,7 +3055,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
nr_pages++;
}
- rdata = cifs_readdata_alloc(nr_pages);
+ rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
if (!rdata) {
/* best to give up if we're out of mem */
list_for_each_entry_safe(page, tpage, &tmplist, lru) {
@@ -2722,24 +3069,16 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
}
spin_lock(&cifs_file_list_lock);
- cifsFileInfo_get(open_file);
spin_unlock(&cifs_file_list_lock);
- rdata->cfile = open_file;
+ rdata->cfile = cifsFileInfo_get(open_file);
rdata->mapping = mapping;
rdata->offset = offset;
rdata->bytes = bytes;
rdata->pid = pid;
+ rdata->marshal_iov = cifs_readpages_marshal_iov;
list_splice_init(&tmplist, &rdata->pages);
- do {
- if (open_file->invalidHandle) {
- rc = cifs_reopen_file(open_file, true);
- if (rc != 0)
- continue;
- }
- rc = cifs_async_readv(rdata);
- } while (rc == -EAGAIN);
-
+ rc = cifs_retry_async_readv(rdata);
if (rc != 0) {
list_for_each_entry_safe(page, tpage, &rdata->pages,
lru) {
@@ -2748,9 +3087,11 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
unlock_page(page);
page_cache_release(page);
}
- cifs_readdata_free(rdata);
+ kref_put(&rdata->refcount, cifs_readdata_release);
break;
}
+
+ kref_put(&rdata->refcount, cifs_readdata_release);
}
return rc;
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 4221b5e48a42..6d2667f0c98c 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -51,7 +51,15 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
cifs_sb = CIFS_SB(inode->i_sb);
switch (command) {
+ static bool warned = false;
case CIFS_IOC_CHECKUMOUNT:
+ if (!warned) {
+ warned = true;
+ cERROR(1, "the CIFS_IOC_CHECKMOUNT ioctl will "
+ "be deprecated in 3.7. Please "
+ "migrate away from the use of "
+ "umount.cifs");
+ }
cFYI(1, "User unmount attempted");
if (cifs_sb->mnt_uid == current_uid())
rc = 0;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index c29d1aa2c54f..e2552d2b2e42 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -306,8 +306,6 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
const struct cifs_tcon *treeCon, int word_count
/* length of fixed section (word count) in two byte units */)
{
- struct list_head *temp_item;
- struct cifs_ses *ses;
char *temp = (char *) buffer;
memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
@@ -337,51 +335,6 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
/* Uid is not converted */
buffer->Uid = treeCon->ses->Suid;
buffer->Mid = GetNextMid(treeCon->ses->server);
- if (multiuser_mount != 0) {
- /* For the multiuser case, there are few obvious technically */
- /* possible mechanisms to match the local linux user (uid) */
- /* to a valid remote smb user (smb_uid): */
- /* 1) Query Winbind (or other local pam/nss daemon */
- /* for userid/password/logon_domain or credential */
- /* 2) Query Winbind for uid to sid to username mapping */
- /* and see if we have a matching password for existing*/
- /* session for that user perhas getting password by */
- /* adding a new pam_cifs module that stores passwords */
- /* so that the cifs vfs can get at that for all logged*/
- /* on users */
- /* 3) (Which is the mechanism we have chosen) */
- /* Search through sessions to the same server for a */
- /* a match on the uid that was passed in on mount */
- /* with the current processes uid (or euid?) and use */
- /* that smb uid. If no existing smb session for */
- /* that uid found, use the default smb session ie */
- /* the smb session for the volume mounted which is */
- /* the same as would be used if the multiuser mount */
- /* flag were disabled. */
-
- /* BB Add support for establishing new tCon and SMB Session */
- /* with userid/password pairs found on the smb session */
- /* for other target tcp/ip addresses BB */
- if (current_fsuid() != treeCon->ses->linux_uid) {
- cFYI(1, "Multiuser mode and UID "
- "did not match tcon uid");
- spin_lock(&cifs_tcp_ses_lock);
- list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
- ses = list_entry(temp_item, struct cifs_ses, smb_ses_list);
- if (ses->linux_uid == current_fsuid()) {
- if (ses->server == treeCon->ses->server) {
- cFYI(1, "found matching uid substitute right smb_uid");
- buffer->Uid = ses->Suid;
- break;
- } else {
- /* BB eventually call cifs_setup_session here */
- cFYI(1, "local UID found but no smb sess with this server exists");
- }
- }
- }
- spin_unlock(&cifs_tcp_ses_lock);
- }
- }
}
if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
buffer->Flags2 |= SMBFLG2_DFS;
@@ -700,22 +653,3 @@ backup_cred(struct cifs_sb_info *cifs_sb)
return false;
}
-
-void
-cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add)
-{
- spin_lock(&server->req_lock);
- server->credits += add;
- server->in_flight--;
- spin_unlock(&server->req_lock);
- wake_up(&server->request_q);
-}
-
-void
-cifs_set_credits(struct TCP_Server_Info *server, const int val)
-{
- spin_lock(&server->req_lock);
- server->credits = val;
- server->oplocks = val > 1 ? enable_oplocks : false;
- spin_unlock(&server->req_lock);
-}
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index e2bbc683e018..0a8224d1c4c5 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -219,6 +219,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
static int initiate_cifs_search(const int xid, struct file *file)
{
+ __u16 search_flags;
int rc = 0;
char *full_path = NULL;
struct cifsFileInfo *cifsFile;
@@ -270,8 +271,12 @@ ffirst_retry:
cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
}
+ search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
+ if (backup_cred(cifs_sb))
+ search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
+
rc = CIFSFindFirst(xid, pTcon, full_path, cifs_sb->local_nls,
- &cifsFile->netfid, &cifsFile->srch_inf,
+ &cifsFile->netfid, search_flags, &cifsFile->srch_inf,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR, CIFS_DIR_SEP(cifs_sb));
if (rc == 0)
@@ -502,11 +507,13 @@ static int cifs_save_resume_key(const char *current_entry,
static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
struct file *file, char **ppCurrentEntry, int *num_to_ret)
{
+ __u16 search_flags;
int rc = 0;
int pos_in_buf = 0;
loff_t first_entry_in_buffer;
loff_t index_to_find = file->f_pos;
struct cifsFileInfo *cifsFile = file->private_data;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
/* check if index in the buffer */
if ((cifsFile == NULL) || (ppCurrentEntry == NULL) ||
@@ -560,10 +567,14 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
cifsFile);
}
+ search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
+ if (backup_cred(cifs_sb))
+ search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
+
while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
(rc == 0) && !cifsFile->srch_inf.endOfSearch) {
cFYI(1, "calling findnext2");
- rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
+ rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, search_flags,
&cifsFile->srch_inf);
/* FindFirst/Next set last_entry to NULL on malformed reply */
if (cifsFile->srch_inf.last_entry)
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
new file mode 100644
index 000000000000..d9d615fbed3f
--- /dev/null
+++ b/fs/cifs/smb1ops.c
@@ -0,0 +1,154 @@
+/*
+ * SMB1 (CIFS) version specific operations
+ *
+ * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2 as published
+ * by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+#include "cifspdu.h"
+
+/*
+ * An NT cancel request header looks just like the original request except:
+ *
+ * The Command is SMB_COM_NT_CANCEL
+ * The WordCount is zeroed out
+ * The ByteCount is zeroed out
+ *
+ * This function mangles an existing request buffer into a
+ * SMB_COM_NT_CANCEL request and then sends it.
+ */
+static int
+send_nt_cancel(struct TCP_Server_Info *server, void *buf,
+ struct mid_q_entry *mid)
+{
+ int rc = 0;
+ struct smb_hdr *in_buf = (struct smb_hdr *)buf;
+
+ /* -4 for RFC1001 length and +2 for BCC field */
+ in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2);
+ in_buf->Command = SMB_COM_NT_CANCEL;
+ in_buf->WordCount = 0;
+ put_bcc(0, in_buf);
+
+ mutex_lock(&server->srv_mutex);
+ rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
+ if (rc) {
+ mutex_unlock(&server->srv_mutex);
+ return rc;
+ }
+ rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
+ mutex_unlock(&server->srv_mutex);
+
+ cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
+ in_buf->Mid, rc);
+
+ return rc;
+}
+
+static bool
+cifs_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
+{
+ return ob1->netfid == ob2->netfid;
+}
+
+static unsigned int
+cifs_read_data_offset(char *buf)
+{
+ READ_RSP *rsp = (READ_RSP *)buf;
+ return le16_to_cpu(rsp->DataOffset);
+}
+
+static unsigned int
+cifs_read_data_length(char *buf)
+{
+ READ_RSP *rsp = (READ_RSP *)buf;
+ return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
+ le16_to_cpu(rsp->DataLength);
+}
+
+static struct mid_q_entry *
+cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
+{
+ struct smb_hdr *buf = (struct smb_hdr *)buffer;
+ struct mid_q_entry *mid;
+
+ spin_lock(&GlobalMid_Lock);
+ list_for_each_entry(mid, &server->pending_mid_q, qhead) {
+ if (mid->mid == buf->Mid &&
+ mid->mid_state == MID_REQUEST_SUBMITTED &&
+ le16_to_cpu(mid->command) == buf->Command) {
+ spin_unlock(&GlobalMid_Lock);
+ return mid;
+ }
+ }
+ spin_unlock(&GlobalMid_Lock);
+ return NULL;
+}
+
+static void
+cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add)
+{
+ spin_lock(&server->req_lock);
+ server->credits += add;
+ server->in_flight--;
+ spin_unlock(&server->req_lock);
+ wake_up(&server->request_q);
+}
+
+static void
+cifs_set_credits(struct TCP_Server_Info *server, const int val)
+{
+ spin_lock(&server->req_lock);
+ server->credits = val;
+ server->oplocks = val > 1 ? enable_oplocks : false;
+ spin_unlock(&server->req_lock);
+}
+
+static int *
+cifs_get_credits_field(struct TCP_Server_Info *server)
+{
+ return &server->credits;
+}
+
+struct smb_version_operations smb1_operations = {
+ .send_cancel = send_nt_cancel,
+ .compare_fids = cifs_compare_fids,
+ .setup_request = cifs_setup_request,
+ .check_receive = cifs_check_receive,
+ .add_credits = cifs_add_credits,
+ .set_credits = cifs_set_credits,
+ .get_credits_field = cifs_get_credits_field,
+ .read_data_offset = cifs_read_data_offset,
+ .read_data_length = cifs_read_data_length,
+ .map_error = map_smb_to_linux_error,
+ .find_mid = cifs_find_mid,
+ .check_message = checkSMB,
+ .dump_detail = cifs_dump_detail,
+ .is_oplock_break = is_valid_oplock_break,
+};
+
+struct smb_version_values smb1_values = {
+ .version_string = SMB1_VERSION_STRING,
+ .large_lock_type = LOCKING_ANDX_LARGE_FILES,
+ .exclusive_lock_type = 0,
+ .shared_lock_type = LOCKING_ANDX_SHARED_LOCK,
+ .unlock_lock_type = 0,
+ .header_size = sizeof(struct smb_hdr),
+ .max_header_size = MAX_CIFS_HDR_SIZE,
+ .read_rsp_size = sizeof(READ_RSP),
+};
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
new file mode 100644
index 000000000000..f065e89756a1
--- /dev/null
+++ b/fs/cifs/smb2ops.c
@@ -0,0 +1,27 @@
+/*
+ * SMB2 version specific operations
+ *
+ * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2 as published
+ * by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "cifsglob.h"
+
+struct smb_version_operations smb21_operations = {
+};
+
+struct smb_version_values smb21_values = {
+ .version_string = SMB21_VERSION_STRING,
+};
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 0961336513d5..1b36ffe6a47b 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -304,7 +304,8 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int optype,
static int
wait_for_free_request(struct TCP_Server_Info *server, const int optype)
{
- return wait_for_free_credits(server, optype, get_credits_field(server));
+ return wait_for_free_credits(server, optype,
+ server->ops->get_credits_field(server));
}
static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
@@ -396,7 +397,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
rc = cifs_setup_async_request(server, iov, nvec, &mid);
if (rc) {
mutex_unlock(&server->srv_mutex);
- cifs_add_credits(server, 1);
+ add_credits(server, 1);
wake_up(&server->request_q);
return rc;
}
@@ -418,7 +419,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
return rc;
out_err:
delete_mid(mid);
- cifs_add_credits(server, 1);
+ add_credits(server, 1);
wake_up(&server->request_q);
return rc;
}
@@ -483,41 +484,11 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
return rc;
}
-/*
- * An NT cancel request header looks just like the original request except:
- *
- * The Command is SMB_COM_NT_CANCEL
- * The WordCount is zeroed out
- * The ByteCount is zeroed out
- *
- * This function mangles an existing request buffer into a
- * SMB_COM_NT_CANCEL request and then sends it.
- */
-static int
-send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
- struct mid_q_entry *mid)
+static inline int
+send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
{
- int rc = 0;
-
- /* -4 for RFC1001 length and +2 for BCC field */
- in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2);
- in_buf->Command = SMB_COM_NT_CANCEL;
- in_buf->WordCount = 0;
- put_bcc(0, in_buf);
-
- mutex_lock(&server->srv_mutex);
- rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
- if (rc) {
- mutex_unlock(&server->srv_mutex);
- return rc;
- }
- rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
- mutex_unlock(&server->srv_mutex);
-
- cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
- in_buf->Mid, rc);
-
- return rc;
+ return server->ops->send_cancel ?
+ server->ops->send_cancel(server, buf, mid) : 0;
}
int
@@ -544,7 +515,7 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
return map_smb_to_linux_error(mid->resp_buf, log_error);
}
-static int
+int
cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
unsigned int nvec, struct mid_q_entry **ret_mid)
{
@@ -607,12 +578,12 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
mutex_lock(&ses->server->srv_mutex);
- rc = cifs_setup_request(ses, iov, n_vec, &midQ);
+ rc = ses->server->ops->setup_request(ses, iov, n_vec, &midQ);
if (rc) {
mutex_unlock(&ses->server->srv_mutex);
cifs_small_buf_release(buf);
/* Update # of requests on wire to server */
- cifs_add_credits(ses->server, 1);
+ add_credits(ses->server, 1);
return rc;
}
@@ -636,13 +607,13 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
rc = wait_for_response(ses->server, midQ);
if (rc != 0) {
- send_nt_cancel(ses->server, (struct smb_hdr *)buf, midQ);
+ send_cancel(ses->server, buf, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
cifs_small_buf_release(buf);
- cifs_add_credits(ses->server, 1);
+ add_credits(ses->server, 1);
return rc;
}
spin_unlock(&GlobalMid_Lock);
@@ -652,7 +623,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0) {
- cifs_add_credits(ses->server, 1);
+ add_credits(ses->server, 1);
return rc;
}
@@ -670,14 +641,15 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
else
*pRespBufType = CIFS_SMALL_BUFFER;
- rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);
+ rc = ses->server->ops->check_receive(midQ, ses->server,
+ flags & CIFS_LOG_ERROR);
/* mark it so buf will not be freed by delete_mid */
if ((flags & CIFS_NO_RESP) == 0)
midQ->resp_buf = NULL;
out:
delete_mid(midQ);
- cifs_add_credits(ses->server, 1);
+ add_credits(ses->server, 1);
return rc;
}
@@ -727,7 +699,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
if (rc) {
mutex_unlock(&ses->server->srv_mutex);
/* Update # of requests on wire to server */
- cifs_add_credits(ses->server, 1);
+ add_credits(ses->server, 1);
return rc;
}
@@ -753,13 +725,13 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = wait_for_response(ses->server, midQ);
if (rc != 0) {
- send_nt_cancel(ses->server, in_buf, midQ);
+ send_cancel(ses->server, in_buf, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
- cifs_add_credits(ses->server, 1);
+ add_credits(ses->server, 1);
return rc;
}
spin_unlock(&GlobalMid_Lock);
@@ -767,7 +739,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0) {
- cifs_add_credits(ses->server, 1);
+ add_credits(ses->server, 1);
return rc;
}
@@ -783,7 +755,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_check_receive(midQ, ses->server, 0);
out:
delete_mid(midQ);
- cifs_add_credits(ses->server, 1);
+ add_credits(ses->server, 1);
return rc;
}
@@ -898,7 +870,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
if (in_buf->Command == SMB_COM_TRANSACTION2) {
/* POSIX lock. We send a NT_CANCEL SMB to cause the
blocking lock to return. */
- rc = send_nt_cancel(ses->server, in_buf, midQ);
+ rc = send_cancel(ses->server, in_buf, midQ);
if (rc) {
delete_mid(midQ);
return rc;
@@ -919,7 +891,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
rc = wait_for_response(ses->server, midQ);
if (rc) {
- send_nt_cancel(ses->server, in_buf, midQ);
+ send_cancel(ses->server, in_buf, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 2870597b5c9d..f1813120d753 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -244,7 +244,7 @@ static void coda_put_super(struct super_block *sb)
static void coda_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
coda_cache_clear_inode(inode);
}
diff --git a/fs/compat.c b/fs/compat.c
index 0781e619a62a..6556a9ce8a28 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -532,7 +532,7 @@ out:
ssize_t compat_rw_copy_check_uvector(int type,
const struct compat_iovec __user *uvector, unsigned long nr_segs,
unsigned long fast_segs, struct iovec *fast_pointer,
- struct iovec **ret_pointer, int check_access)
+ struct iovec **ret_pointer)
{
compat_ssize_t tot_len;
struct iovec *iov = *ret_pointer = fast_pointer;
@@ -579,7 +579,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
}
if (len < 0) /* size_t not fitting in compat_ssize_t .. */
goto out;
- if (check_access &&
+ if (type >= 0 &&
!access_ok(vrfy_dir(type), compat_ptr(buf), len)) {
ret = -EFAULT;
goto out;
@@ -1094,7 +1094,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
goto out;
tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs,
- UIO_FASTIOV, iovstack, &iov, 1);
+ UIO_FASTIOV, iovstack, &iov);
if (tot_len == 0) {
ret = 0;
goto out;
diff --git a/fs/dcache.c b/fs/dcache.c
index 8c1ab8fb5012..4435d8b32904 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3093,6 +3093,7 @@ static void __init dcache_init_early(void)
HASH_EARLY,
&d_hash_shift,
&d_hash_mask,
+ 0,
0);
for (loop = 0; loop < (1U << d_hash_shift); loop++)
@@ -3123,6 +3124,7 @@ static void __init dcache_init(void)
0,
&d_hash_shift,
&d_hash_mask,
+ 0,
0);
for (loop = 0; loop < (1U << d_hash_shift); loop++)
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 5dfafdd1dbd3..2340f6978d6e 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -20,6 +20,7 @@
#include <linux/namei.h>
#include <linux/debugfs.h>
#include <linux/io.h>
+#include <linux/slab.h>
static ssize_t default_read_file(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
@@ -520,6 +521,133 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode,
}
EXPORT_SYMBOL_GPL(debugfs_create_blob);
+struct array_data {
+ void *array;
+ u32 elements;
+};
+
+static int u32_array_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ return nonseekable_open(inode, file);
+}
+
+static size_t format_array(char *buf, size_t bufsize, const char *fmt,
+ u32 *array, u32 array_size)
+{
+ size_t ret = 0;
+ u32 i;
+
+ for (i = 0; i < array_size; i++) {
+ size_t len;
+
+ len = snprintf(buf, bufsize, fmt, array[i]);
+ len++; /* ' ' or '\n' */
+ ret += len;
+
+ if (buf) {
+ buf += len;
+ bufsize -= len;
+ buf[-1] = (i == array_size-1) ? '\n' : ' ';
+ }
+ }
+
+ ret++; /* \0 */
+ if (buf)
+ *buf = '\0';
+
+ return ret;
+}
+
+static char *format_array_alloc(const char *fmt, u32 *array,
+ u32 array_size)
+{
+ size_t len = format_array(NULL, 0, fmt, array, array_size);
+ char *ret;
+
+ ret = kmalloc(len, GFP_KERNEL);
+ if (ret == NULL)
+ return NULL;
+
+ format_array(ret, len, fmt, array, array_size);
+ return ret;
+}
+
+static ssize_t u32_array_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct array_data *data = inode->i_private;
+ size_t size;
+
+ if (*ppos == 0) {
+ if (file->private_data) {
+ kfree(file->private_data);
+ file->private_data = NULL;
+ }
+
+ file->private_data = format_array_alloc("%u", data->array,
+ data->elements);
+ }
+
+ size = 0;
+ if (file->private_data)
+ size = strlen(file->private_data);
+
+ return simple_read_from_buffer(buf, len, ppos,
+ file->private_data, size);
+}
+
+static int u32_array_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+
+ return 0;
+}
+
+static const struct file_operations u32_array_fops = {
+ .owner = THIS_MODULE,
+ .open = u32_array_open,
+ .release = u32_array_release,
+ .read = u32_array_read,
+ .llseek = no_llseek,
+};
+
+/**
+ * debugfs_create_u32_array - create a debugfs file that is used to read u32
+ * array.
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have.
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is %NULL, then the
+ * file will be created in the root of the debugfs filesystem.
+ * @array: u32 array that provides data.
+ * @elements: total number of elements in the array.
+ *
+ * This function creates a file in debugfs with the given name that exports
+ * @array as data. If the @mode variable is so set it can be read from.
+ * Writing is not supported. Seek within the file is also not supported.
+ * Once array is created its size can not be changed.
+ *
+ * The function returns a pointer to dentry on success. If debugfs is not
+ * enabled in the kernel, the value -%ENODEV will be returned.
+ */
+struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
+ struct dentry *parent,
+ u32 *array, u32 elements)
+{
+ struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
+
+ if (data == NULL)
+ return NULL;
+
+ data->array = array;
+ data->elements = elements;
+
+ return debugfs_create_file(name, mode, parent, data, &u32_array_fops);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_u32_array);
+
#ifdef CONFIG_HAS_IOMEM
/*
diff --git a/fs/direct-io.c b/fs/direct-io.c
index f4aadd15b613..0c85fae37666 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -145,50 +145,6 @@ struct dio {
static struct kmem_cache *dio_cache __read_mostly;
-static void __inode_dio_wait(struct inode *inode)
-{
- wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
- DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
-
- do {
- prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
- if (atomic_read(&inode->i_dio_count))
- schedule();
- } while (atomic_read(&inode->i_dio_count));
- finish_wait(wq, &q.wait);
-}
-
-/**
- * inode_dio_wait - wait for outstanding DIO requests to finish
- * @inode: inode to wait for
- *
- * Waits for all pending direct I/O requests to finish so that we can
- * proceed with a truncate or equivalent operation.
- *
- * Must be called under a lock that serializes taking new references
- * to i_dio_count, usually by inode->i_mutex.
- */
-void inode_dio_wait(struct inode *inode)
-{
- if (atomic_read(&inode->i_dio_count))
- __inode_dio_wait(inode);
-}
-EXPORT_SYMBOL(inode_dio_wait);
-
-/*
- * inode_dio_done - signal finish of a direct I/O requests
- * @inode: inode the direct I/O happens on
- *
- * This is called once we've finished processing a direct I/O request,
- * and is used to wake up callers waiting for direct I/O to be quiesced.
- */
-void inode_dio_done(struct inode *inode)
-{
- if (atomic_dec_and_test(&inode->i_dio_count))
- wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
-}
-EXPORT_SYMBOL(inode_dio_done);
-
/*
* How many pages are in the queue?
*/
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 2dd946b636d2..e879cf8ff0b1 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -133,7 +133,7 @@ static int ecryptfs_statfs(struct dentry *dentry, struct kstatfs *buf)
static void ecryptfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
iput(ecryptfs_inode_to_lower(inode));
}
diff --git a/fs/eventfd.c b/fs/eventfd.c
index dba15fecf23e..d81b9f654086 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -46,20 +46,16 @@ struct eventfd_ctx {
* value, and we signal this as overflow condition by returining a POLLERR
* to poll(2).
*
- * Returns @n in case of success, a non-negative number lower than @n in case
- * of overflow, or the following error codes:
- *
- * -EINVAL : The value of @n is negative.
+ * Returns the amount by which the counter was incrememnted. This will be less
+ * than @n if the counter has overflowed.
*/
-int eventfd_signal(struct eventfd_ctx *ctx, int n)
+__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
{
unsigned long flags;
- if (n < 0)
- return -EINVAL;
spin_lock_irqsave(&ctx->wqh.lock, flags);
if (ULLONG_MAX - ctx->count < n)
- n = (int) (ULLONG_MAX - ctx->count);
+ n = ULLONG_MAX - ctx->count;
ctx->count += n;
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, POLLIN);
diff --git a/fs/exofs/Kbuild b/fs/exofs/Kbuild
index 352ba149d23e..389ba8312d5d 100644
--- a/fs/exofs/Kbuild
+++ b/fs/exofs/Kbuild
@@ -16,5 +16,5 @@
libore-y := ore.o ore_raid.o
obj-$(CONFIG_ORE) += libore.o
-exofs-y := inode.o file.o symlink.o namei.o dir.o super.o
+exofs-y := inode.o file.o symlink.o namei.o dir.o super.o sys.o
obj-$(CONFIG_EXOFS_FS) += exofs.o
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index ca9d49665ef6..fffe86fd7a42 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -56,6 +56,9 @@
struct exofs_dev {
struct ore_dev ored;
unsigned did;
+ unsigned urilen;
+ uint8_t *uri;
+ struct kobject ed_kobj;
};
/*
* our extension to the in-memory superblock
@@ -73,6 +76,7 @@ struct exofs_sb_info {
struct ore_layout layout; /* Default files layout */
struct ore_comp one_comp; /* id & cred of partition id=0*/
struct ore_components oc; /* comps for the partition */
+ struct kobject s_kobj; /* holds per-sbi kobject */
};
/*
@@ -176,6 +180,16 @@ void exofs_make_credential(u8 cred_a[OSD_CAP_LEN],
const struct osd_obj_id *obj);
int exofs_sbi_write_stats(struct exofs_sb_info *sbi);
+/* sys.c */
+int exofs_sysfs_init(void);
+void exofs_sysfs_uninit(void);
+int exofs_sysfs_sb_add(struct exofs_sb_info *sbi,
+ struct exofs_dt_device_info *dt_dev);
+void exofs_sysfs_sb_del(struct exofs_sb_info *sbi);
+int exofs_sysfs_odev_add(struct exofs_dev *edev,
+ struct exofs_sb_info *sbi);
+void exofs_sysfs_dbg_print(void);
+
/*********************
* operation vectors *
*********************/
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index ea5e1f97806a..5badb0c039de 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -1473,7 +1473,7 @@ void exofs_evict_inode(struct inode *inode)
goto no_delete;
inode->i_size = 0;
- end_writeback(inode);
+ clear_inode(inode);
/* if we are deleting an obj that hasn't been created yet, wait.
* This also makes sure that create_done cannot be called with an
@@ -1503,5 +1503,5 @@ void exofs_evict_inode(struct inode *inode)
return;
no_delete:
- end_writeback(inode);
+ clear_inode(inode);
}
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 735ca06430ac..433783624d10 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -472,6 +472,7 @@ static void exofs_put_super(struct super_block *sb)
_exofs_print_device("Unmounting", NULL, ore_comp_dev(&sbi->oc, 0),
sbi->one_comp.obj.partition);
+ exofs_sysfs_sb_del(sbi);
bdi_destroy(&sbi->bdi);
exofs_free_sbi(sbi);
sb->s_fs_info = NULL;
@@ -632,6 +633,12 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info *sbi,
memcpy(&sbi->oc.ods[numdevs], &sbi->oc.ods[0],
(numdevs - 1) * sizeof(sbi->oc.ods[0]));
+ /* create sysfs subdir under which we put the device table
+ * And cluster layout. A Superblock is identified by the string:
+ * "dev[0].osdname"_"pid"
+ */
+ exofs_sysfs_sb_add(sbi, &dt->dt_dev_table[0]);
+
for (i = 0; i < numdevs; i++) {
struct exofs_fscb fscb;
struct osd_dev_info odi;
@@ -657,6 +664,7 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info *sbi,
eds[i].ored.od = fscb_od;
++sbi->oc.numdevs;
fscb_od = NULL;
+ exofs_sysfs_odev_add(&eds[i], sbi);
continue;
}
@@ -682,6 +690,7 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info *sbi,
odi.osdname);
goto out;
}
+ exofs_sysfs_odev_add(&eds[i], sbi);
/* TODO: verify other information is correct and FS-uuid
* matches. Benny what did you say about device table
@@ -745,7 +754,6 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
sbi->one_comp.obj.partition = opts->pid;
sbi->one_comp.obj.id = 0;
exofs_make_credential(sbi->one_comp.cred, &sbi->one_comp.obj);
- sbi->oc.numdevs = 1;
sbi->oc.single_comp = EC_SINGLE_COMP;
sbi->oc.comps = &sbi->one_comp;
@@ -804,6 +812,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi;
ore_comp_set_dev(&sbi->oc, 0, od);
+ sbi->oc.numdevs = 1;
}
__sbi_read_stats(sbi);
@@ -844,6 +853,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi;
}
+ exofs_sysfs_dbg_print();
_exofs_print_device("Mounting", opts->dev_name,
ore_comp_dev(&sbi->oc, 0),
sbi->one_comp.obj.partition);
@@ -1023,6 +1033,9 @@ static int __init init_exofs(void)
if (err)
goto out_d;
+ /* We don't fail if sysfs creation failed */
+ exofs_sysfs_init();
+
return 0;
out_d:
destroy_inodecache();
@@ -1032,6 +1045,7 @@ out:
static void __exit exit_exofs(void)
{
+ exofs_sysfs_uninit();
unregister_filesystem(&exofs_type);
destroy_inodecache();
}
diff --git a/fs/exofs/sys.c b/fs/exofs/sys.c
new file mode 100644
index 000000000000..e32bc919e4e3
--- /dev/null
+++ b/fs/exofs/sys.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2012
+ * Sachin Bhamare <sbhamare@panasas.com>
+ * Boaz Harrosh <bharrosh@panasas.com>
+ *
+ * This file is part of exofs.
+ *
+ * exofs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published by
+ * the Free Software Foundation.
+ *
+ * exofs is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with exofs; if not, write to the:
+ * Free Software Foundation <licensing@fsf.org>
+ */
+
+#include <linux/kobject.h>
+#include <linux/device.h>
+
+#include "exofs.h"
+
+struct odev_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct exofs_dev *, char *);
+ ssize_t (*store)(struct exofs_dev *, const char *, size_t);
+};
+
+static ssize_t odev_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct exofs_dev *edp = container_of(kobj, struct exofs_dev, ed_kobj);
+ struct odev_attr *a = container_of(attr, struct odev_attr, attr);
+
+ return a->show ? a->show(edp, buf) : 0;
+}
+
+static ssize_t odev_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t len)
+{
+ struct exofs_dev *edp = container_of(kobj, struct exofs_dev, ed_kobj);
+ struct odev_attr *a = container_of(attr, struct odev_attr, attr);
+
+ return a->store ? a->store(edp, buf, len) : len;
+}
+
+static const struct sysfs_ops odev_attr_ops = {
+ .show = odev_attr_show,
+ .store = odev_attr_store,
+};
+
+
+static struct kset *exofs_kset;
+
+static ssize_t osdname_show(struct exofs_dev *edp, char *buf)
+{
+ struct osd_dev *odev = edp->ored.od;
+ const struct osd_dev_info *odi = osduld_device_info(odev);
+
+ return snprintf(buf, odi->osdname_len + 1, "%s", odi->osdname);
+}
+
+static ssize_t systemid_show(struct exofs_dev *edp, char *buf)
+{
+ struct osd_dev *odev = edp->ored.od;
+ const struct osd_dev_info *odi = osduld_device_info(odev);
+
+ memcpy(buf, odi->systemid, odi->systemid_len);
+ return odi->systemid_len;
+}
+
+static ssize_t uri_show(struct exofs_dev *edp, char *buf)
+{
+ return snprintf(buf, edp->urilen, "%s", edp->uri);
+}
+
+static ssize_t uri_store(struct exofs_dev *edp, const char *buf, size_t len)
+{
+ edp->urilen = strlen(buf) + 1;
+ edp->uri = krealloc(edp->uri, edp->urilen, GFP_KERNEL);
+ strncpy(edp->uri, buf, edp->urilen);
+ return edp->urilen;
+}
+
+#define OSD_ATTR(name, mode, show, store) \
+ static struct odev_attr odev_attr_##name = \
+ __ATTR(name, mode, show, store)
+
+OSD_ATTR(osdname, S_IRUGO, osdname_show, NULL);
+OSD_ATTR(systemid, S_IRUGO, systemid_show, NULL);
+OSD_ATTR(uri, S_IRWXU, uri_show, uri_store);
+
+static struct attribute *odev_attrs[] = {
+ &odev_attr_osdname.attr,
+ &odev_attr_systemid.attr,
+ &odev_attr_uri.attr,
+ NULL,
+};
+
+static struct kobj_type odev_ktype = {
+ .default_attrs = odev_attrs,
+ .sysfs_ops = &odev_attr_ops,
+};
+
+static struct kobj_type uuid_ktype = {
+};
+
+void exofs_sysfs_dbg_print()
+{
+#ifdef CONFIG_EXOFS_DEBUG
+ struct kobject *k_name, *k_tmp;
+
+ list_for_each_entry_safe(k_name, k_tmp, &exofs_kset->list, entry) {
+ printk(KERN_INFO "%s: name %s ref %d\n",
+ __func__, kobject_name(k_name),
+ (int)atomic_read(&k_name->kref.refcount));
+ }
+#endif
+}
+/*
+ * This function removes all kobjects under exofs_kset
+ * At the end of it, exofs_kset kobject will have a refcount
+ * of 1 which gets decremented only on exofs module unload
+ */
+void exofs_sysfs_sb_del(struct exofs_sb_info *sbi)
+{
+ struct kobject *k_name, *k_tmp;
+ struct kobject *s_kobj = &sbi->s_kobj;
+
+ list_for_each_entry_safe(k_name, k_tmp, &exofs_kset->list, entry) {
+ /* Remove all that are children of this SBI */
+ if (k_name->parent == s_kobj)
+ kobject_put(k_name);
+ }
+ kobject_put(s_kobj);
+}
+
+/*
+ * This function creates sysfs entries to hold the current exofs cluster
+ * instance (uniquely identified by osdname,pid tuple).
+ * This function gets called once per exofs mount instance.
+ */
+int exofs_sysfs_sb_add(struct exofs_sb_info *sbi,
+ struct exofs_dt_device_info *dt_dev)
+{
+ struct kobject *s_kobj;
+ int retval = 0;
+ uint64_t pid = sbi->one_comp.obj.partition;
+
+ /* allocate new uuid dirent */
+ s_kobj = &sbi->s_kobj;
+ s_kobj->kset = exofs_kset;
+ retval = kobject_init_and_add(s_kobj, &uuid_ktype,
+ &exofs_kset->kobj, "%s_%llx", dt_dev->osdname, pid);
+ if (retval) {
+ EXOFS_ERR("ERROR: Failed to create sysfs entry for "
+ "uuid-%s_%llx => %d\n", dt_dev->osdname, pid, retval);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int exofs_sysfs_odev_add(struct exofs_dev *edev, struct exofs_sb_info *sbi)
+{
+ struct kobject *d_kobj;
+ int retval = 0;
+
+ /* create osd device group which contains following attributes
+ * osdname, systemid & uri
+ */
+ d_kobj = &edev->ed_kobj;
+ d_kobj->kset = exofs_kset;
+ retval = kobject_init_and_add(d_kobj, &odev_ktype,
+ &sbi->s_kobj, "dev%u", edev->did);
+ if (retval) {
+ EXOFS_ERR("ERROR: Failed to create sysfs entry for "
+ "device dev%u\n", edev->did);
+ return retval;
+ }
+ return 0;
+}
+
+int exofs_sysfs_init(void)
+{
+ exofs_kset = kset_create_and_add("exofs", NULL, fs_kobj);
+ if (!exofs_kset) {
+ EXOFS_ERR("ERROR: kset_create_and_add exofs failed\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void exofs_sysfs_uninit(void)
+{
+ kset_unregister(exofs_kset);
+}
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 030c6d277e14..1c3613998862 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -165,7 +165,6 @@ static void release_blocks(struct super_block *sb, int count)
struct ext2_sb_info *sbi = EXT2_SB(sb);
percpu_counter_add(&sbi->s_freeblocks_counter, count);
- sb->s_dirt = 1;
}
}
@@ -180,7 +179,6 @@ static void group_adjust_blocks(struct super_block *sb, int group_no,
free_blocks = le16_to_cpu(desc->bg_free_blocks_count);
desc->bg_free_blocks_count = cpu_to_le16(free_blocks + count);
spin_unlock(sb_bgl_lock(sbi, group_no));
- sb->s_dirt = 1;
mark_buffer_dirty(bh);
}
}
@@ -479,7 +477,7 @@ void ext2_discard_reservation(struct inode *inode)
}
/**
- * ext2_free_blocks_sb() -- Free given blocks and update quota and i_blocks
+ * ext2_free_blocks() -- Free given blocks and update quota and i_blocks
* @inode: inode
* @block: start physcial block to free
* @count: number of blocks to free
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 8b15cf8cef37..c13eb7b91a11 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -81,7 +81,6 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir)
spin_unlock(sb_bgl_lock(EXT2_SB(sb), group));
if (dir)
percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter);
- sb->s_dirt = 1;
mark_buffer_dirty(bh);
}
@@ -543,7 +542,6 @@ got:
}
spin_unlock(sb_bgl_lock(sbi, group));
- sb->s_dirt = 1;
mark_buffer_dirty(bh2);
if (test_opt(sb, GRPID)) {
inode->i_mode = mode;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index f9fa95f8443d..264d315f6c47 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -90,7 +90,7 @@ void ext2_evict_inode(struct inode * inode)
}
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
ext2_discard_reservation(inode);
rsv = EXT2_I(inode)->i_block_alloc_info;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 38f816071ddb..b3621cb7ea31 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -130,9 +130,6 @@ static void ext2_put_super (struct super_block * sb)
dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
- if (sb->s_dirt)
- ext2_write_super(sb);
-
ext2_xattr_put_super(sb);
if (!(sb->s_flags & MS_RDONLY)) {
struct ext2_super_block *es = sbi->s_es;
@@ -307,7 +304,6 @@ static const struct super_operations ext2_sops = {
.write_inode = ext2_write_inode,
.evict_inode = ext2_evict_inode,
.put_super = ext2_put_super,
- .write_super = ext2_write_super,
.sync_fs = ext2_sync_fs,
.statfs = ext2_statfs,
.remount_fs = ext2_remount,
@@ -358,11 +354,6 @@ static struct dentry *ext2_fh_to_parent(struct super_block *sb, struct fid *fid,
ext2_nfs_get_inode);
}
-/* Yes, most of these are left as NULL!!
- * A NULL value implies the default, which works with ext2-like file
- * systems, but can be improved upon.
- * Currently only get_parent is required.
- */
static const struct export_operations ext2_export_ops = {
.fh_to_dentry = ext2_fh_to_dentry,
.fh_to_parent = ext2_fh_to_parent,
@@ -1176,7 +1167,6 @@ static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es,
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
if (wait)
sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
- sb->s_dirt = 0;
}
/*
@@ -1209,8 +1199,6 @@ void ext2_write_super(struct super_block *sb)
{
if (!(sb->s_flags & MS_RDONLY))
ext2_sync_fs(sb, 1);
- else
- sb->s_dirt = 0;
}
static int ext2_remount (struct super_block * sb, int * flags, char * data)
@@ -1456,7 +1444,6 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
struct buffer_head tmp_bh;
struct buffer_head *bh;
- mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
@@ -1486,16 +1473,13 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
blk++;
}
out:
- if (len == towrite) {
- mutex_unlock(&inode->i_mutex);
+ if (len == towrite)
return err;
- }
if (inode->i_size < off+len-towrite)
i_size_write(inode, off+len-towrite);
inode->i_version++;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
- mutex_unlock(&inode->i_mutex);
return len - towrite;
}
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 6dcafc7efdfd..b6754dbbce3c 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -339,7 +339,6 @@ static void ext2_xattr_update_super_block(struct super_block *sb)
spin_lock(&EXT2_SB(sb)->s_lock);
EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
spin_unlock(&EXT2_SB(sb)->s_lock);
- sb->s_dirt = 1;
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
}
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index cc761ad8fa57..92490e9f85ca 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -21,30 +21,15 @@
*
*/
+#include <linux/compat.h>
#include "ext3.h"
static unsigned char ext3_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};
-static int ext3_readdir(struct file *, void *, filldir_t);
static int ext3_dx_readdir(struct file * filp,
void * dirent, filldir_t filldir);
-static int ext3_release_dir (struct inode * inode,
- struct file * filp);
-
-const struct file_operations ext3_dir_operations = {
- .llseek = generic_file_llseek,
- .read = generic_read_dir,
- .readdir = ext3_readdir, /* we take BKL. needed?*/
- .unlocked_ioctl = ext3_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ext3_compat_ioctl,
-#endif
- .fsync = ext3_sync_file, /* BKL held */
- .release = ext3_release_dir,
-};
-
static unsigned char get_dtype(struct super_block *sb, int filetype)
{
@@ -55,6 +40,25 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
return (ext3_filetype_table[filetype]);
}
+/**
+ * Check if the given dir-inode refers to an htree-indexed directory
+ * (or a directory which chould potentially get coverted to use htree
+ * indexing).
+ *
+ * Return 1 if it is a dx dir, 0 if not
+ */
+static int is_dx_dir(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+
+ if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
+ EXT3_FEATURE_COMPAT_DIR_INDEX) &&
+ ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
+ ((inode->i_size >> sb->s_blocksize_bits) == 1)))
+ return 1;
+
+ return 0;
+}
int ext3_check_dir_entry (const char * function, struct inode * dir,
struct ext3_dir_entry_2 * de,
@@ -94,18 +98,13 @@ static int ext3_readdir(struct file * filp,
unsigned long offset;
int i, stored;
struct ext3_dir_entry_2 *de;
- struct super_block *sb;
int err;
struct inode *inode = filp->f_path.dentry->d_inode;
+ struct super_block *sb = inode->i_sb;
int ret = 0;
int dir_has_error = 0;
- sb = inode->i_sb;
-
- if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
- EXT3_FEATURE_COMPAT_DIR_INDEX) &&
- ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
- ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
+ if (is_dx_dir(inode)) {
err = ext3_dx_readdir(filp, dirent, filldir);
if (err != ERR_BAD_DX_DIR) {
ret = err;
@@ -227,22 +226,87 @@ out:
return ret;
}
+static inline int is_32bit_api(void)
+{
+#ifdef CONFIG_COMPAT
+ return is_compat_task();
+#else
+ return (BITS_PER_LONG == 32);
+#endif
+}
+
/*
* These functions convert from the major/minor hash to an f_pos
- * value.
+ * value for dx directories
*
- * Currently we only use major hash numer. This is unfortunate, but
- * on 32-bit machines, the same VFS interface is used for lseek and
- * llseek, so if we use the 64 bit offset, then the 32-bit versions of
- * lseek/telldir/seekdir will blow out spectacularly, and from within
- * the ext2 low-level routine, we don't know if we're being called by
- * a 64-bit version of the system call or the 32-bit version of the
- * system call. Worse yet, NFSv2 only allows for a 32-bit readdir
- * cookie. Sigh.
+ * Upper layer (for example NFS) should specify FMODE_32BITHASH or
+ * FMODE_64BITHASH explicitly. On the other hand, we allow ext3 to be mounted
+ * directly on both 32-bit and 64-bit nodes, under such case, neither
+ * FMODE_32BITHASH nor FMODE_64BITHASH is specified.
*/
-#define hash2pos(major, minor) (major >> 1)
-#define pos2maj_hash(pos) ((pos << 1) & 0xffffffff)
-#define pos2min_hash(pos) (0)
+static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
+{
+ if ((filp->f_mode & FMODE_32BITHASH) ||
+ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+ return major >> 1;
+ else
+ return ((__u64)(major >> 1) << 32) | (__u64)minor;
+}
+
+static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
+{
+ if ((filp->f_mode & FMODE_32BITHASH) ||
+ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+ return (pos << 1) & 0xffffffff;
+ else
+ return ((pos >> 32) << 1) & 0xffffffff;
+}
+
+static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
+{
+ if ((filp->f_mode & FMODE_32BITHASH) ||
+ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+ return 0;
+ else
+ return pos & 0xffffffff;
+}
+
+/*
+ * Return 32- or 64-bit end-of-file for dx directories
+ */
+static inline loff_t ext3_get_htree_eof(struct file *filp)
+{
+ if ((filp->f_mode & FMODE_32BITHASH) ||
+ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+ return EXT3_HTREE_EOF_32BIT;
+ else
+ return EXT3_HTREE_EOF_64BIT;
+}
+
+
+/*
+ * ext3_dir_llseek() calls generic_file_llseek[_size]() to handle both
+ * non-htree and htree directories, where the "offset" is in terms
+ * of the filename hash value instead of the byte offset.
+ *
+ * Because we may return a 64-bit hash that is well beyond s_maxbytes,
+ * we need to pass the max hash as the maximum allowable offset in
+ * the htree directory case.
+ *
+ * NOTE: offsets obtained *before* ext3_set_inode_flag(dir, EXT3_INODE_INDEX)
+ * will be invalid once the directory was converted into a dx directory
+ */
+loff_t ext3_dir_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct inode *inode = file->f_mapping->host;
+ int dx_dir = is_dx_dir(inode);
+
+ if (likely(dx_dir))
+ return generic_file_llseek_size(file, offset, origin,
+ ext3_get_htree_eof(file));
+ else
+ return generic_file_llseek(file, offset, origin);
+}
/*
* This structure holds the nodes of the red-black tree used to store
@@ -303,15 +367,16 @@ static void free_rb_tree_fname(struct rb_root *root)
}
-static struct dir_private_info *ext3_htree_create_dir_info(loff_t pos)
+static struct dir_private_info *ext3_htree_create_dir_info(struct file *filp,
+ loff_t pos)
{
struct dir_private_info *p;
p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
if (!p)
return NULL;
- p->curr_hash = pos2maj_hash(pos);
- p->curr_minor_hash = pos2min_hash(pos);
+ p->curr_hash = pos2maj_hash(filp, pos);
+ p->curr_minor_hash = pos2min_hash(filp, pos);
return p;
}
@@ -401,7 +466,7 @@ static int call_filldir(struct file * filp, void * dirent,
printk("call_filldir: called with null fname?!?\n");
return 0;
}
- curr_pos = hash2pos(fname->hash, fname->minor_hash);
+ curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
while (fname) {
error = filldir(dirent, fname->name,
fname->name_len, curr_pos,
@@ -426,13 +491,13 @@ static int ext3_dx_readdir(struct file * filp,
int ret;
if (!info) {
- info = ext3_htree_create_dir_info(filp->f_pos);
+ info = ext3_htree_create_dir_info(filp, filp->f_pos);
if (!info)
return -ENOMEM;
filp->private_data = info;
}
- if (filp->f_pos == EXT3_HTREE_EOF)
+ if (filp->f_pos == ext3_get_htree_eof(filp))
return 0; /* EOF */
/* Some one has messed with f_pos; reset the world */
@@ -440,8 +505,8 @@ static int ext3_dx_readdir(struct file * filp,
free_rb_tree_fname(&info->root);
info->curr_node = NULL;
info->extra_fname = NULL;
- info->curr_hash = pos2maj_hash(filp->f_pos);
- info->curr_minor_hash = pos2min_hash(filp->f_pos);
+ info->curr_hash = pos2maj_hash(filp, filp->f_pos);
+ info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
}
/*
@@ -473,7 +538,7 @@ static int ext3_dx_readdir(struct file * filp,
if (ret < 0)
return ret;
if (ret == 0) {
- filp->f_pos = EXT3_HTREE_EOF;
+ filp->f_pos = ext3_get_htree_eof(filp);
break;
}
info->curr_node = rb_first(&info->root);
@@ -493,7 +558,7 @@ static int ext3_dx_readdir(struct file * filp,
info->curr_minor_hash = fname->minor_hash;
} else {
if (info->next_hash == ~0) {
- filp->f_pos = EXT3_HTREE_EOF;
+ filp->f_pos = ext3_get_htree_eof(filp);
break;
}
info->curr_hash = info->next_hash;
@@ -512,3 +577,15 @@ static int ext3_release_dir (struct inode * inode, struct file * filp)
return 0;
}
+
+const struct file_operations ext3_dir_operations = {
+ .llseek = ext3_dir_llseek,
+ .read = generic_read_dir,
+ .readdir = ext3_readdir,
+ .unlocked_ioctl = ext3_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ext3_compat_ioctl,
+#endif
+ .fsync = ext3_sync_file,
+ .release = ext3_release_dir,
+};
diff --git a/fs/ext3/ext3.h b/fs/ext3/ext3.h
index 7977973a24f0..e85ff15a060e 100644
--- a/fs/ext3/ext3.h
+++ b/fs/ext3/ext3.h
@@ -920,7 +920,11 @@ struct dx_hash_info
u32 *seed;
};
-#define EXT3_HTREE_EOF 0x7fffffff
+
+/* 32 and 64 bit signed EOF for dx directories */
+#define EXT3_HTREE_EOF_32BIT ((1UL << (32 - 1)) - 1)
+#define EXT3_HTREE_EOF_64BIT ((1ULL << (64 - 1)) - 1)
+
/*
* Control parameters used by ext3_htree_next_block
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
index d10231ddcf8a..ede315cdf126 100644
--- a/fs/ext3/hash.c
+++ b/fs/ext3/hash.c
@@ -198,8 +198,8 @@ int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
return -1;
}
hash = hash & ~1;
- if (hash == (EXT3_HTREE_EOF << 1))
- hash = (EXT3_HTREE_EOF-1) << 1;
+ if (hash == (EXT3_HTREE_EOF_32BIT << 1))
+ hash = (EXT3_HTREE_EOF_32BIT - 1) << 1;
hinfo->hash = hash;
hinfo->minor_hash = minor_hash;
return 0;
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index e3c39e4cec19..082afd78b107 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -180,8 +180,7 @@ error_return:
* It's OK to put directory into a group unless
* it has too many directories already (max_dirs) or
* it has too few free inodes left (min_inodes) or
- * it has too few free blocks left (min_blocks) or
- * it's already running too large debt (max_debt).
+ * it has too few free blocks left (min_blocks).
* Parent's group is preferred, if it doesn't satisfy these
* conditions we search cyclically through the rest. If none
* of the groups look good we just look for a group with more
@@ -191,21 +190,16 @@ error_return:
* when we allocate an inode, within 0--255.
*/
-#define INODE_COST 64
-#define BLOCK_COST 256
-
static int find_group_orlov(struct super_block *sb, struct inode *parent)
{
int parent_group = EXT3_I(parent)->i_block_group;
struct ext3_sb_info *sbi = EXT3_SB(sb);
- struct ext3_super_block *es = sbi->s_es;
int ngroups = sbi->s_groups_count;
int inodes_per_group = EXT3_INODES_PER_GROUP(sb);
unsigned int freei, avefreei;
ext3_fsblk_t freeb, avefreeb;
- ext3_fsblk_t blocks_per_dir;
unsigned int ndirs;
- int max_debt, max_dirs, min_inodes;
+ int max_dirs, min_inodes;
ext3_grpblk_t min_blocks;
int group = -1, i;
struct ext3_group_desc *desc;
@@ -242,20 +236,10 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent)
goto fallback;
}
- blocks_per_dir = (le32_to_cpu(es->s_blocks_count) - freeb) / ndirs;
-
max_dirs = ndirs / ngroups + inodes_per_group / 16;
min_inodes = avefreei - inodes_per_group / 4;
min_blocks = avefreeb - EXT3_BLOCKS_PER_GROUP(sb) / 4;
- max_debt = EXT3_BLOCKS_PER_GROUP(sb) / max(blocks_per_dir, (ext3_fsblk_t)BLOCK_COST);
- if (max_debt * INODE_COST > inodes_per_group)
- max_debt = inodes_per_group / INODE_COST;
- if (max_debt > 255)
- max_debt = 255;
- if (max_debt == 0)
- max_debt = 1;
-
for (i = 0; i < ngroups; i++) {
group = (parent_group + i) % ngroups;
desc = ext3_get_group_desc (sb, group, NULL);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index a09790a412b1..9a4a5c48b1c9 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -272,18 +272,18 @@ void ext3_evict_inode (struct inode *inode)
if (ext3_mark_inode_dirty(handle, inode)) {
/* If that failed, just dquot_drop() and be done with that */
dquot_drop(inode);
- end_writeback(inode);
+ clear_inode(inode);
} else {
ext3_xattr_delete_inode(handle, inode);
dquot_free_inode(inode);
dquot_drop(inode);
- end_writeback(inode);
+ clear_inode(inode);
ext3_free_inode(handle, inode);
}
ext3_journal_stop(handle);
return;
no_delete:
- end_writeback(inode);
+ clear_inode(inode);
dquot_drop(inode);
}
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 94ef7e616129..8c3a44b7c375 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -3015,7 +3015,6 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
(unsigned long long)off, (unsigned long long)len);
return -EIO;
}
- mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
bh = ext3_bread(handle, inode, blk, 1, &err);
if (!bh)
goto out;
@@ -3039,10 +3038,8 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
}
brelse(bh);
out:
- if (err) {
- mutex_unlock(&inode->i_mutex);
+ if (err)
return err;
- }
if (inode->i_size < off + len) {
i_size_write(inode, off + len);
EXT3_I(inode)->i_disksize = inode->i_size;
@@ -3050,7 +3047,6 @@ out:
inode->i_version++;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
ext3_mark_inode_dirty(handle, inode);
- mutex_unlock(&inode->i_mutex);
return len;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 436b4223df66..35b5954489ee 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1007,7 +1007,7 @@ static void destroy_inodecache(void)
void ext4_clear_inode(struct inode *inode)
{
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
dquot_drop(inode);
ext4_discard_preallocations(inode);
if (EXT4_I(inode)->jinode) {
@@ -4758,7 +4758,6 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
return -EIO;
}
- mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
bh = ext4_bread(handle, inode, blk, 1, &err);
if (!bh)
goto out;
@@ -4774,16 +4773,13 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
err = ext4_handle_dirty_metadata(handle, NULL, bh);
brelse(bh);
out:
- if (err) {
- mutex_unlock(&inode->i_mutex);
+ if (err)
return err;
- }
if (inode->i_size < off + len) {
i_size_write(inode, off + len);
EXT4_I(inode)->i_disksize = inode->i_size;
ext4_mark_inode_dirty(handle, inode);
}
- mutex_unlock(&inode->i_mutex);
return len;
}
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index aca191bd5f8f..6eaa28c98ad1 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -98,8 +98,8 @@ next:
*bh = sb_bread(sb, phys);
if (*bh == NULL) {
- fat_msg(sb, KERN_ERR, "Directory bread(block %llu) failed",
- (llu)phys);
+ fat_msg_ratelimit(sb, KERN_ERR,
+ "Directory bread(block %llu) failed", (llu)phys);
/* skip this block */
*pos = (iblock + 1) << sb->s_blocksize_bits;
goto next;
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 66994f316e18..fc35c5c69136 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -82,6 +82,7 @@ struct msdos_sb_info {
int fatent_shift;
struct fatent_operations *fatent_ops;
struct inode *fat_inode;
+ struct inode *fsinfo_inode;
struct ratelimit_state ratelimit;
@@ -334,6 +335,11 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...);
__fat_fs_error(sb, __ratelimit(&MSDOS_SB(sb)->ratelimit), fmt , ## args)
__printf(3, 4) __cold
void fat_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+#define fat_msg_ratelimit(sb, level, fmt, args...) \
+ do { \
+ if (__ratelimit(&MSDOS_SB(sb)->ratelimit)) \
+ fat_msg(sb, level, fmt, ## args); \
+ } while (0)
extern int fat_clusters_flush(struct super_block *sb);
extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster);
extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 2e81ac0df7e2..31f08ab62c56 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -308,6 +308,16 @@ void fat_ent_access_init(struct super_block *sb)
}
}
+static void mark_fsinfo_dirty(struct super_block *sb)
+{
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
+
+ if (sb->s_flags & MS_RDONLY || sbi->fat_bits != 32)
+ return;
+
+ __mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC);
+}
+
static inline int fat_ent_update_ptr(struct super_block *sb,
struct fat_entry *fatent,
int offset, sector_t blocknr)
@@ -498,7 +508,6 @@ int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
sbi->prev_free = entry;
if (sbi->free_clusters != -1)
sbi->free_clusters--;
- sb->s_dirt = 1;
cluster[idx_clus] = entry;
idx_clus++;
@@ -520,11 +529,11 @@ int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
/* Couldn't allocate the free entries */
sbi->free_clusters = 0;
sbi->free_clus_valid = 1;
- sb->s_dirt = 1;
err = -ENOSPC;
out:
unlock_fat(sbi);
+ mark_fsinfo_dirty(sb);
fatent_brelse(&fatent);
if (!err) {
if (inode_needs_sync(inode))
@@ -549,7 +558,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
struct fat_entry fatent;
struct buffer_head *bhs[MAX_BUF_PER_PAGE];
int i, err, nr_bhs;
- int first_cl = cluster;
+ int first_cl = cluster, dirty_fsinfo = 0;
nr_bhs = 0;
fatent_init(&fatent);
@@ -587,7 +596,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
ops->ent_put(&fatent, FAT_ENT_FREE);
if (sbi->free_clusters != -1) {
sbi->free_clusters++;
- sb->s_dirt = 1;
+ dirty_fsinfo = 1;
}
if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
@@ -617,6 +626,8 @@ error:
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
unlock_fat(sbi);
+ if (dirty_fsinfo)
+ mark_fsinfo_dirty(sb);
return err;
}
@@ -677,7 +688,7 @@ int fat_count_free_clusters(struct super_block *sb)
}
sbi->free_clusters = free;
sbi->free_clus_valid = 1;
- sb->s_dirt = 1;
+ mark_fsinfo_dirty(sb);
fatent_brelse(&fatent);
out:
unlock_fat(sbi);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 21687e31acc0..c2973ea5df9a 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -454,42 +454,16 @@ static void fat_evict_inode(struct inode *inode)
fat_truncate_blocks(inode, 0);
}
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
fat_cache_inval_inode(inode);
fat_detach(inode);
}
-static void fat_write_super(struct super_block *sb)
-{
- lock_super(sb);
- sb->s_dirt = 0;
-
- if (!(sb->s_flags & MS_RDONLY))
- fat_clusters_flush(sb);
- unlock_super(sb);
-}
-
-static int fat_sync_fs(struct super_block *sb, int wait)
-{
- int err = 0;
-
- if (sb->s_dirt) {
- lock_super(sb);
- sb->s_dirt = 0;
- err = fat_clusters_flush(sb);
- unlock_super(sb);
- }
-
- return err;
-}
-
static void fat_put_super(struct super_block *sb)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
- if (sb->s_dirt)
- fat_write_super(sb);
-
+ iput(sbi->fsinfo_inode);
iput(sbi->fat_inode);
unload_nls(sbi->nls_disk);
@@ -661,7 +635,18 @@ retry:
static int fat_write_inode(struct inode *inode, struct writeback_control *wbc)
{
- return __fat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+ int err;
+
+ if (inode->i_ino == MSDOS_FSINFO_INO) {
+ struct super_block *sb = inode->i_sb;
+
+ lock_super(sb);
+ err = fat_clusters_flush(sb);
+ unlock_super(sb);
+ } else
+ err = __fat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+
+ return err;
}
int fat_sync_inode(struct inode *inode)
@@ -678,8 +663,6 @@ static const struct super_operations fat_sops = {
.write_inode = fat_write_inode,
.evict_inode = fat_evict_inode,
.put_super = fat_put_super,
- .write_super = fat_write_super,
- .sync_fs = fat_sync_fs,
.statfs = fat_statfs,
.remount_fs = fat_remount,
@@ -1244,6 +1227,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
void (*setup)(struct super_block *))
{
struct inode *root_inode = NULL, *fat_inode = NULL;
+ struct inode *fsinfo_inode = NULL;
struct buffer_head *bh;
struct fat_boot_sector *b;
struct msdos_sb_info *sbi;
@@ -1490,6 +1474,14 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
goto out_fail;
MSDOS_I(fat_inode)->i_pos = 0;
sbi->fat_inode = fat_inode;
+
+ fsinfo_inode = new_inode(sb);
+ if (!fsinfo_inode)
+ goto out_fail;
+ fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
+ sbi->fsinfo_inode = fsinfo_inode;
+ insert_inode_hash(fsinfo_inode);
+
root_inode = new_inode(sb);
if (!root_inode)
goto out_fail;
@@ -1516,6 +1508,8 @@ out_invalid:
fat_msg(sb, KERN_INFO, "Can't find a valid FAT filesystem");
out_fail:
+ if (fsinfo_inode)
+ iput(fsinfo_inode);
if (fat_inode)
iput(fat_inode);
unload_nls(sbi->nls_io);
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index cf9ef918a2a9..ef67c95f12d4 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -355,6 +355,6 @@ void
vxfs_evict_inode(struct inode *ip)
{
truncate_inode_pages(&ip->i_data, 0);
- end_writeback(ip);
+ clear_inode(ip);
call_rcu(&ip->i_rcu, vxfs_i_callback);
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 539f36cf3e4a..8d2fb8c88cf3 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -231,11 +231,8 @@ static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
static void inode_sync_complete(struct inode *inode)
{
- /*
- * Prevent speculative execution through
- * spin_unlock(&wb->list_lock);
- */
-
+ inode->i_state &= ~I_SYNC;
+ /* Waiters must see I_SYNC cleared before being woken up */
smp_mb();
wake_up_bit(&inode->i_state, __I_SYNC);
}
@@ -329,10 +326,12 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc)
}
/*
- * Wait for writeback on an inode to complete.
+ * Wait for writeback on an inode to complete. Called with i_lock held.
+ * Caller must make sure inode cannot go away when we drop i_lock.
*/
-static void inode_wait_for_writeback(struct inode *inode,
- struct bdi_writeback *wb)
+static void __inode_wait_for_writeback(struct inode *inode)
+ __releases(inode->i_lock)
+ __acquires(inode->i_lock)
{
DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
wait_queue_head_t *wqh;
@@ -340,70 +339,119 @@ static void inode_wait_for_writeback(struct inode *inode,
wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
while (inode->i_state & I_SYNC) {
spin_unlock(&inode->i_lock);
- spin_unlock(&wb->list_lock);
__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
- spin_lock(&wb->list_lock);
spin_lock(&inode->i_lock);
}
}
/*
- * Write out an inode's dirty pages. Called under wb->list_lock and
- * inode->i_lock. Either the caller has an active reference on the inode or
- * the inode has I_WILL_FREE set.
- *
- * If `wait' is set, wait on the writeout.
- *
- * The whole writeout design is quite complex and fragile. We want to avoid
- * starvation of particular inodes when others are being redirtied, prevent
- * livelocks, etc.
+ * Wait for writeback on an inode to complete. Caller must have inode pinned.
*/
-static int
-writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
- struct writeback_control *wbc)
+void inode_wait_for_writeback(struct inode *inode)
{
- struct address_space *mapping = inode->i_mapping;
- long nr_to_write = wbc->nr_to_write;
- unsigned dirty;
- int ret;
+ spin_lock(&inode->i_lock);
+ __inode_wait_for_writeback(inode);
+ spin_unlock(&inode->i_lock);
+}
- assert_spin_locked(&wb->list_lock);
- assert_spin_locked(&inode->i_lock);
+/*
+ * Sleep until I_SYNC is cleared. This function must be called with i_lock
+ * held and drops it. It is aimed for callers not holding any inode reference
+ * so once i_lock is dropped, inode can go away.
+ */
+static void inode_sleep_on_writeback(struct inode *inode)
+ __releases(inode->i_lock)
+{
+ DEFINE_WAIT(wait);
+ wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
+ int sleep;
- if (!atomic_read(&inode->i_count))
- WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
- else
- WARN_ON(inode->i_state & I_WILL_FREE);
+ prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
+ sleep = inode->i_state & I_SYNC;
+ spin_unlock(&inode->i_lock);
+ if (sleep)
+ schedule();
+ finish_wait(wqh, &wait);
+}
- if (inode->i_state & I_SYNC) {
+/*
+ * Find proper writeback list for the inode depending on its current state and
+ * possibly also change of its state while we were doing writeback. Here we
+ * handle things such as livelock prevention or fairness of writeback among
+ * inodes. This function can be called only by flusher thread - noone else
+ * processes all inodes in writeback lists and requeueing inodes behind flusher
+ * thread's back can have unexpected consequences.
+ */
+static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
+ struct writeback_control *wbc)
+{
+ if (inode->i_state & I_FREEING)
+ return;
+
+ /*
+ * Sync livelock prevention. Each inode is tagged and synced in one
+ * shot. If still dirty, it will be redirty_tail()'ed below. Update
+ * the dirty time to prevent enqueue and sync it again.
+ */
+ if ((inode->i_state & I_DIRTY) &&
+ (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
+ inode->dirtied_when = jiffies;
+
+ if (wbc->pages_skipped) {
/*
- * If this inode is locked for writeback and we are not doing
- * writeback-for-data-integrity, move it to b_more_io so that
- * writeback can proceed with the other inodes on s_io.
- *
- * We'll have another go at writing back this inode when we
- * completed a full scan of b_io.
+ * writeback is not making progress due to locked
+ * buffers. Skip this inode for now.
*/
- if (wbc->sync_mode != WB_SYNC_ALL) {
+ redirty_tail(inode, wb);
+ return;
+ }
+
+ if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
+ /*
+ * We didn't write back all the pages. nfs_writepages()
+ * sometimes bales out without doing anything.
+ */
+ if (wbc->nr_to_write <= 0) {
+ /* Slice used up. Queue for next turn. */
requeue_io(inode, wb);
- trace_writeback_single_inode_requeue(inode, wbc,
- nr_to_write);
- return 0;
+ } else {
+ /*
+ * Writeback blocked by something other than
+ * congestion. Delay the inode for some time to
+ * avoid spinning on the CPU (100% iowait)
+ * retrying writeback of the dirty page/inode
+ * that cannot be performed immediately.
+ */
+ redirty_tail(inode, wb);
}
-
+ } else if (inode->i_state & I_DIRTY) {
/*
- * It's a data-integrity sync. We must wait.
+ * Filesystems can dirty the inode during writeback operations,
+ * such as delayed allocation during submission or metadata
+ * updates after data IO completion.
*/
- inode_wait_for_writeback(inode, wb);
+ redirty_tail(inode, wb);
+ } else {
+ /* The inode is clean. Remove from writeback lists. */
+ list_del_init(&inode->i_wb_list);
}
+}
- BUG_ON(inode->i_state & I_SYNC);
+/*
+ * Write out an inode and its dirty pages. Do not update the writeback list
+ * linkage. That is left to the caller. The caller is also responsible for
+ * setting I_SYNC flag and calling inode_sync_complete() to clear it.
+ */
+static int
+__writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
+ struct writeback_control *wbc)
+{
+ struct address_space *mapping = inode->i_mapping;
+ long nr_to_write = wbc->nr_to_write;
+ unsigned dirty;
+ int ret;
- /* Set I_SYNC, reset I_DIRTY_PAGES */
- inode->i_state |= I_SYNC;
- inode->i_state &= ~I_DIRTY_PAGES;
- spin_unlock(&inode->i_lock);
- spin_unlock(&wb->list_lock);
+ WARN_ON(!(inode->i_state & I_SYNC));
ret = do_writepages(mapping, wbc);
@@ -424,6 +472,9 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
* write_inode()
*/
spin_lock(&inode->i_lock);
+ /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
+ if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+ inode->i_state &= ~I_DIRTY_PAGES;
dirty = inode->i_state & I_DIRTY;
inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
spin_unlock(&inode->i_lock);
@@ -433,60 +484,67 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
if (ret == 0)
ret = err;
}
+ trace_writeback_single_inode(inode, wbc, nr_to_write);
+ return ret;
+}
+
+/*
+ * Write out an inode's dirty pages. Either the caller has an active reference
+ * on the inode or the inode has I_WILL_FREE set.
+ *
+ * This function is designed to be called for writing back one inode which
+ * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
+ * and does more profound writeback list handling in writeback_sb_inodes().
+ */
+static int
+writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
+ struct writeback_control *wbc)
+{
+ int ret = 0;
- spin_lock(&wb->list_lock);
spin_lock(&inode->i_lock);
- inode->i_state &= ~I_SYNC;
- if (!(inode->i_state & I_FREEING)) {
+ if (!atomic_read(&inode->i_count))
+ WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
+ else
+ WARN_ON(inode->i_state & I_WILL_FREE);
+
+ if (inode->i_state & I_SYNC) {
+ if (wbc->sync_mode != WB_SYNC_ALL)
+ goto out;
/*
- * Sync livelock prevention. Each inode is tagged and synced in
- * one shot. If still dirty, it will be redirty_tail()'ed below.
- * Update the dirty time to prevent enqueue and sync it again.
+ * It's a data-integrity sync. We must wait. Since callers hold
+ * inode reference or inode has I_WILL_FREE set, it cannot go
+ * away under us.
*/
- if ((inode->i_state & I_DIRTY) &&
- (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
- inode->dirtied_when = jiffies;
-
- if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
- /*
- * We didn't write back all the pages. nfs_writepages()
- * sometimes bales out without doing anything.
- */
- inode->i_state |= I_DIRTY_PAGES;
- if (wbc->nr_to_write <= 0) {
- /*
- * slice used up: queue for next turn
- */
- requeue_io(inode, wb);
- } else {
- /*
- * Writeback blocked by something other than
- * congestion. Delay the inode for some time to
- * avoid spinning on the CPU (100% iowait)
- * retrying writeback of the dirty page/inode
- * that cannot be performed immediately.
- */
- redirty_tail(inode, wb);
- }
- } else if (inode->i_state & I_DIRTY) {
- /*
- * Filesystems can dirty the inode during writeback
- * operations, such as delayed allocation during
- * submission or metadata updates after data IO
- * completion.
- */
- redirty_tail(inode, wb);
- } else {
- /*
- * The inode is clean. At this point we either have
- * a reference to the inode or it's on it's way out.
- * No need to add it back to the LRU.
- */
- list_del_init(&inode->i_wb_list);
- }
+ __inode_wait_for_writeback(inode);
}
+ WARN_ON(inode->i_state & I_SYNC);
+ /*
+ * Skip inode if it is clean. We don't want to mess with writeback
+ * lists in this function since flusher thread may be doing for example
+ * sync in parallel and if we move the inode, it could get skipped. So
+ * here we make sure inode is on some writeback list and leave it there
+ * unless we have completely cleaned the inode.
+ */
+ if (!(inode->i_state & I_DIRTY))
+ goto out;
+ inode->i_state |= I_SYNC;
+ spin_unlock(&inode->i_lock);
+
+ ret = __writeback_single_inode(inode, wb, wbc);
+
+ spin_lock(&wb->list_lock);
+ spin_lock(&inode->i_lock);
+ /*
+ * If inode is clean, remove it from writeback lists. Otherwise don't
+ * touch it. See comment above for explanation.
+ */
+ if (!(inode->i_state & I_DIRTY))
+ list_del_init(&inode->i_wb_list);
+ spin_unlock(&wb->list_lock);
inode_sync_complete(inode);
- trace_writeback_single_inode(inode, wbc, nr_to_write);
+out:
+ spin_unlock(&inode->i_lock);
return ret;
}
@@ -580,29 +638,57 @@ static long writeback_sb_inodes(struct super_block *sb,
redirty_tail(inode, wb);
continue;
}
- __iget(inode);
+ if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
+ /*
+ * If this inode is locked for writeback and we are not
+ * doing writeback-for-data-integrity, move it to
+ * b_more_io so that writeback can proceed with the
+ * other inodes on s_io.
+ *
+ * We'll have another go at writing back this inode
+ * when we completed a full scan of b_io.
+ */
+ spin_unlock(&inode->i_lock);
+ requeue_io(inode, wb);
+ trace_writeback_sb_inodes_requeue(inode);
+ continue;
+ }
+ spin_unlock(&wb->list_lock);
+
+ /*
+ * We already requeued the inode if it had I_SYNC set and we
+ * are doing WB_SYNC_NONE writeback. So this catches only the
+ * WB_SYNC_ALL case.
+ */
+ if (inode->i_state & I_SYNC) {
+ /* Wait for I_SYNC. This function drops i_lock... */
+ inode_sleep_on_writeback(inode);
+ /* Inode may be gone, start again */
+ continue;
+ }
+ inode->i_state |= I_SYNC;
+ spin_unlock(&inode->i_lock);
+
write_chunk = writeback_chunk_size(wb->bdi, work);
wbc.nr_to_write = write_chunk;
wbc.pages_skipped = 0;
- writeback_single_inode(inode, wb, &wbc);
+ /*
+ * We use I_SYNC to pin the inode in memory. While it is set
+ * evict_inode() will wait so the inode cannot be freed.
+ */
+ __writeback_single_inode(inode, wb, &wbc);
work->nr_pages -= write_chunk - wbc.nr_to_write;
wrote += write_chunk - wbc.nr_to_write;
+ spin_lock(&wb->list_lock);
+ spin_lock(&inode->i_lock);
if (!(inode->i_state & I_DIRTY))
wrote++;
- if (wbc.pages_skipped) {
- /*
- * writeback is not making progress due to locked
- * buffers. Skip this inode for now.
- */
- redirty_tail(inode, wb);
- }
+ requeue_inode(inode, wb, &wbc);
+ inode_sync_complete(inode);
spin_unlock(&inode->i_lock);
- spin_unlock(&wb->list_lock);
- iput(inode);
- cond_resched();
- spin_lock(&wb->list_lock);
+ cond_resched_lock(&wb->list_lock);
/*
* bail out to wb_writeback() often enough to check
* background threshold and other termination conditions.
@@ -796,8 +882,10 @@ static long wb_writeback(struct bdi_writeback *wb,
trace_writeback_wait(wb->bdi, work);
inode = wb_inode(wb->b_more_io.prev);
spin_lock(&inode->i_lock);
- inode_wait_for_writeback(inode, wb);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&wb->list_lock);
+ /* This function drops i_lock... */
+ inode_sleep_on_writeback(inode);
+ spin_lock(&wb->list_lock);
}
}
spin_unlock(&wb->list_lock);
@@ -1331,7 +1419,6 @@ EXPORT_SYMBOL(sync_inodes_sb);
int write_inode_now(struct inode *inode, int sync)
{
struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
- int ret;
struct writeback_control wbc = {
.nr_to_write = LONG_MAX,
.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
@@ -1343,12 +1430,7 @@ int write_inode_now(struct inode *inode, int sync)
wbc.nr_to_write = 0;
might_sleep();
- spin_lock(&wb->list_lock);
- spin_lock(&inode->i_lock);
- ret = writeback_single_inode(inode, wb, &wbc);
- spin_unlock(&inode->i_lock);
- spin_unlock(&wb->list_lock);
- return ret;
+ return writeback_single_inode(inode, wb, &wbc);
}
EXPORT_SYMBOL(write_inode_now);
@@ -1365,15 +1447,7 @@ EXPORT_SYMBOL(write_inode_now);
*/
int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
- struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
- int ret;
-
- spin_lock(&wb->list_lock);
- spin_lock(&inode->i_lock);
- ret = writeback_single_inode(inode, wb, wbc);
- spin_unlock(&inode->i_lock);
- spin_unlock(&wb->list_lock);
- return ret;
+ return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
}
EXPORT_SYMBOL(sync_inode);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 26783eb2b1fc..56f6dcf30768 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -122,7 +122,7 @@ static void fuse_destroy_inode(struct inode *inode)
static void fuse_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
if (inode->i_sb->s_flags & MS_ACTIVE) {
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 6172fa77ad59..713e621c240b 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1554,7 +1554,7 @@ out_unlock:
out:
/* Case 3 starts here */
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
gfs2_dir_hash_inval(ip);
ip->i_gl->gl_object = NULL;
flush_delayed_work_sync(&ip->i_gl->gl_work);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 737dbeb64320..761ec06354b4 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -532,7 +532,7 @@ out:
void hfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) {
HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
iput(HFS_I(inode)->rsrc_inode);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index ceb1c281eefb..a9bca4b8768b 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -154,7 +154,7 @@ static void hfsplus_evict_inode(struct inode *inode)
{
dprint(DBG_INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino);
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
if (HFSPLUS_IS_RSRC(inode)) {
HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
iput(HFSPLUS_I(inode)->rsrc_inode);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 07c516bfea76..2afa5bbccf9b 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -240,7 +240,7 @@ static struct inode *hostfs_alloc_inode(struct super_block *sb)
static void hostfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
if (HOSTFS_I(inode)->fd != -1) {
close_file(&HOSTFS_I(inode)->fd);
HOSTFS_I(inode)->fd = -1;
diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c
index 9ecde27d1e29..f49d1498aa2e 100644
--- a/fs/hpfs/buffer.c
+++ b/fs/hpfs/buffer.c
@@ -156,7 +156,6 @@ void hpfs_brelse4(struct quad_buffer_head *qbh)
void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
{
- PRINTK(("hpfs_mark_4buffers_dirty\n"));
memcpy(qbh->bh[0]->b_data, qbh->data, 512);
memcpy(qbh->bh[1]->b_data, qbh->data + 512, 512);
memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index de946170ebb1..6d2d5008fa43 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -35,13 +35,6 @@
#define CHKCOND(x,y) if (!(x)) printk y
-#ifdef DBG
-#define PRINTK(x) printk x
-#else
-#undef PRINTK
-#define PRINTK(x)
-#endif
-
struct hpfs_inode_info {
loff_t mmu_private;
ino_t i_parent_dir; /* (directories) gives fnode of parent dir */
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 3b2cec29972b..b43066cbdc6a 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -299,7 +299,7 @@ void hpfs_write_if_changed(struct inode *inode)
void hpfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
if (!inode->i_nlink) {
hpfs_lock(inode->i_sb);
hpfs_remove_fnode(inode->i_sb, inode->i_ino);
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index a80e45a690ac..d4f93b52cec5 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -614,7 +614,7 @@ static struct inode *hppfs_alloc_inode(struct super_block *sb)
void hppfs_evict_inode(struct inode *ino)
{
- end_writeback(ino);
+ clear_inode(ino);
dput(HPPFS_I(ino)->proc_dentry);
mntput(ino->i_sb->s_fs_info);
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 001ef01d2fe2..cc9281b6c628 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -393,7 +393,7 @@ static void truncate_hugepages(struct inode *inode, loff_t lstart)
static void hugetlbfs_evict_inode(struct inode *inode)
{
truncate_hugepages(inode, 0);
- end_writeback(inode);
+ clear_inode(inode);
}
static inline void
diff --git a/fs/inode.c b/fs/inode.c
index deb72f6c2b4f..c474c1d7062b 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -486,7 +486,7 @@ void __remove_inode_hash(struct inode *inode)
}
EXPORT_SYMBOL(__remove_inode_hash);
-void end_writeback(struct inode *inode)
+void clear_inode(struct inode *inode)
{
might_sleep();
/*
@@ -500,11 +500,10 @@ void end_writeback(struct inode *inode)
BUG_ON(!list_empty(&inode->i_data.private_list));
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(inode->i_state & I_CLEAR);
- inode_sync_wait(inode);
/* don't need i_lock here, no concurrent mods to i_state */
inode->i_state = I_FREEING | I_CLEAR;
}
-EXPORT_SYMBOL(end_writeback);
+EXPORT_SYMBOL(clear_inode);
/*
* Free the inode passed in, removing it from the lists it is still connected
@@ -531,12 +530,20 @@ static void evict(struct inode *inode)
inode_sb_list_del(inode);
+ /*
+ * Wait for flusher thread to be done with the inode so that filesystem
+ * does not start destroying it while writeback is still running. Since
+ * the inode has I_FREEING set, flusher thread won't start new work on
+ * the inode. We just have to wait for running writeback to finish.
+ */
+ inode_wait_for_writeback(inode);
+
if (op->evict_inode) {
op->evict_inode(inode);
} else {
if (inode->i_data.nrpages)
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
}
if (S_ISBLK(inode->i_mode) && inode->i_bdev)
bd_forget(inode);
@@ -1647,6 +1654,7 @@ void __init inode_init_early(void)
HASH_EARLY,
&i_hash_shift,
&i_hash_mask,
+ 0,
0);
for (loop = 0; loop < (1U << i_hash_shift); loop++)
@@ -1677,6 +1685,7 @@ void __init inode_init(void)
0,
&i_hash_shift,
&i_hash_mask,
+ 0,
0);
for (loop = 0; loop < (1U << i_hash_shift); loop++)
@@ -1739,3 +1748,50 @@ bool inode_owner_or_capable(const struct inode *inode)
return false;
}
EXPORT_SYMBOL(inode_owner_or_capable);
+
+/*
+ * Direct i/o helper functions
+ */
+static void __inode_dio_wait(struct inode *inode)
+{
+ wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
+ DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
+
+ do {
+ prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&inode->i_dio_count))
+ schedule();
+ } while (atomic_read(&inode->i_dio_count));
+ finish_wait(wq, &q.wait);
+}
+
+/**
+ * inode_dio_wait - wait for outstanding DIO requests to finish
+ * @inode: inode to wait for
+ *
+ * Waits for all pending direct I/O requests to finish so that we can
+ * proceed with a truncate or equivalent operation.
+ *
+ * Must be called under a lock that serializes taking new references
+ * to i_dio_count, usually by inode->i_mutex.
+ */
+void inode_dio_wait(struct inode *inode)
+{
+ if (atomic_read(&inode->i_dio_count))
+ __inode_dio_wait(inode);
+}
+EXPORT_SYMBOL(inode_dio_wait);
+
+/*
+ * inode_dio_done - signal finish of a direct I/O requests
+ * @inode: inode the direct I/O happens on
+ *
+ * This is called once we've finished processing a direct I/O request,
+ * and is used to wake up callers waiting for direct I/O to be quiesced.
+ */
+void inode_dio_done(struct inode *inode)
+{
+ if (atomic_dec_and_test(&inode->i_dio_count))
+ wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
+}
+EXPORT_SYMBOL(inode_dio_done);
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 5e6dbe8958fc..e50170ca7c33 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -50,7 +50,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
if (ioc) {
- ioc_ioprio_changed(ioc, ioprio);
+ ioc->ioprio = ioprio;
put_io_context(ioc);
}
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 05f0754f2b46..08c03044abdd 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -508,20 +508,19 @@ int cleanup_journal_tail(journal_t *journal)
/*
* We need to make sure that any blocks that were recently written out
* --- perhaps by log_do_checkpoint() --- are flushed out before we
- * drop the transactions from the journal. It's unlikely this will be
- * necessary, especially with an appropriately sized journal, but we
- * need this to guarantee correctness. Fortunately
- * cleanup_journal_tail() doesn't get called all that often.
+ * drop the transactions from the journal. Similarly we need to be sure
+ * superblock makes it to disk before next transaction starts reusing
+ * freed space (otherwise we could replay some blocks of the new
+ * transaction thinking they belong to the old one). So we use
+ * WRITE_FLUSH_FUA. It's unlikely this will be necessary, especially
+ * with an appropriately sized journal, but we need this to guarantee
+ * correctness. Fortunately cleanup_journal_tail() doesn't get called
+ * all that often.
*/
- if (journal->j_flags & JFS_BARRIER)
- blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+ journal_update_sb_log_tail(journal, first_tid, blocknr,
+ WRITE_FLUSH_FUA);
spin_lock(&journal->j_state_lock);
- if (!tid_gt(first_tid, journal->j_tail_sequence)) {
- spin_unlock(&journal->j_state_lock);
- /* Someone else cleaned up journal so return 0 */
- return 0;
- }
/* OK, update the superblock to recover the freed space.
* Physical blocks come first: have we wrapped beyond the end of
* the log? */
@@ -539,8 +538,6 @@ int cleanup_journal_tail(journal_t *journal)
journal->j_tail_sequence = first_tid;
journal->j_tail = blocknr;
spin_unlock(&journal->j_state_lock);
- if (!(journal->j_flags & JFS_ABORT))
- journal_update_superblock(journal, 1);
return 0;
}
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index f2b9a571f4cf..52c15c776029 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -298,6 +298,7 @@ void journal_commit_transaction(journal_t *journal)
int tag_flag;
int i;
struct blk_plug plug;
+ int write_op = WRITE;
/*
* First job: lock down the current transaction and wait for
@@ -307,7 +308,16 @@ void journal_commit_transaction(journal_t *journal)
/* Do we need to erase the effects of a prior journal_flush? */
if (journal->j_flags & JFS_FLUSHED) {
jbd_debug(3, "super block updated\n");
- journal_update_superblock(journal, 1);
+ mutex_lock(&journal->j_checkpoint_mutex);
+ /*
+ * We hold j_checkpoint_mutex so tail cannot change under us.
+ * We don't need any special data guarantees for writing sb
+ * since journal is empty and it is ok for write to be
+ * flushed only with transaction commit.
+ */
+ journal_update_sb_log_tail(journal, journal->j_tail_sequence,
+ journal->j_tail, WRITE_SYNC);
+ mutex_unlock(&journal->j_checkpoint_mutex);
} else {
jbd_debug(3, "superblock not updated\n");
}
@@ -413,13 +423,16 @@ void journal_commit_transaction(journal_t *journal)
jbd_debug (3, "JBD: commit phase 2\n");
+ if (tid_geq(journal->j_commit_waited, commit_transaction->t_tid))
+ write_op = WRITE_SYNC;
+
/*
* Now start flushing things to disk, in the order they appear
* on the transaction lists. Data blocks go first.
*/
blk_start_plug(&plug);
err = journal_submit_data_buffers(journal, commit_transaction,
- WRITE_SYNC);
+ write_op);
blk_finish_plug(&plug);
/*
@@ -478,7 +491,7 @@ void journal_commit_transaction(journal_t *journal)
blk_start_plug(&plug);
- journal_write_revoke_records(journal, commit_transaction, WRITE_SYNC);
+ journal_write_revoke_records(journal, commit_transaction, write_op);
/*
* If we found any dirty or locked buffers, then we should have
@@ -649,7 +662,7 @@ start_journal_io:
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
bh->b_end_io = journal_end_buffer_io_sync;
- submit_bh(WRITE_SYNC, bh);
+ submit_bh(write_op, bh);
}
cond_resched();
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 0971e9217808..425c2f2cf170 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -563,6 +563,8 @@ int log_wait_commit(journal_t *journal, tid_t tid)
spin_unlock(&journal->j_state_lock);
#endif
spin_lock(&journal->j_state_lock);
+ if (!tid_geq(journal->j_commit_waited, tid))
+ journal->j_commit_waited = tid;
while (tid_gt(tid, journal->j_commit_sequence)) {
jbd_debug(1, "JBD: want %d, j_commit_sequence=%d\n",
tid, journal->j_commit_sequence);
@@ -921,8 +923,33 @@ static int journal_reset(journal_t *journal)
journal->j_max_transaction_buffers = journal->j_maxlen / 4;
- /* Add the dynamic fields and write it to disk. */
- journal_update_superblock(journal, 1);
+ /*
+ * As a special case, if the on-disk copy is already marked as needing
+ * no recovery (s_start == 0), then we can safely defer the superblock
+ * update until the next commit by setting JFS_FLUSHED. This avoids
+ * attempting a write to a potential-readonly device.
+ */
+ if (sb->s_start == 0) {
+ jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
+ "(start %u, seq %d, errno %d)\n",
+ journal->j_tail, journal->j_tail_sequence,
+ journal->j_errno);
+ journal->j_flags |= JFS_FLUSHED;
+ } else {
+ /* Lock here to make assertions happy... */
+ mutex_lock(&journal->j_checkpoint_mutex);
+ /*
+ * Update log tail information. We use WRITE_FUA since new
+ * transaction will start reusing journal space and so we
+ * must make sure information about current log tail is on
+ * disk before that.
+ */
+ journal_update_sb_log_tail(journal,
+ journal->j_tail_sequence,
+ journal->j_tail,
+ WRITE_FUA);
+ mutex_unlock(&journal->j_checkpoint_mutex);
+ }
return journal_start_thread(journal);
}
@@ -999,35 +1026,15 @@ int journal_create(journal_t *journal)
return journal_reset(journal);
}
-/**
- * void journal_update_superblock() - Update journal sb on disk.
- * @journal: The journal to update.
- * @wait: Set to '0' if you don't want to wait for IO completion.
- *
- * Update a journal's dynamic superblock fields and write it to disk,
- * optionally waiting for the IO to complete.
- */
-void journal_update_superblock(journal_t *journal, int wait)
+static void journal_write_superblock(journal_t *journal, int write_op)
{
- journal_superblock_t *sb = journal->j_superblock;
struct buffer_head *bh = journal->j_sb_buffer;
+ int ret;
- /*
- * As a special case, if the on-disk copy is already marked as needing
- * no recovery (s_start == 0) and there are no outstanding transactions
- * in the filesystem, then we can safely defer the superblock update
- * until the next commit by setting JFS_FLUSHED. This avoids
- * attempting a write to a potential-readonly device.
- */
- if (sb->s_start == 0 && journal->j_tail_sequence ==
- journal->j_transaction_sequence) {
- jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
- "(start %u, seq %d, errno %d)\n",
- journal->j_tail, journal->j_tail_sequence,
- journal->j_errno);
- goto out;
- }
-
+ trace_journal_write_superblock(journal, write_op);
+ if (!(journal->j_flags & JFS_BARRIER))
+ write_op &= ~(REQ_FUA | REQ_FLUSH);
+ lock_buffer(bh);
if (buffer_write_io_error(bh)) {
char b[BDEVNAME_SIZE];
/*
@@ -1045,42 +1052,100 @@ void journal_update_superblock(journal_t *journal, int wait)
set_buffer_uptodate(bh);
}
+ get_bh(bh);
+ bh->b_end_io = end_buffer_write_sync;
+ ret = submit_bh(write_op, bh);
+ wait_on_buffer(bh);
+ if (buffer_write_io_error(bh)) {
+ clear_buffer_write_io_error(bh);
+ set_buffer_uptodate(bh);
+ ret = -EIO;
+ }
+ if (ret) {
+ char b[BDEVNAME_SIZE];
+ printk(KERN_ERR "JBD: Error %d detected "
+ "when updating journal superblock for %s.\n",
+ ret, journal_dev_name(journal, b));
+ }
+}
+
+/**
+ * journal_update_sb_log_tail() - Update log tail in journal sb on disk.
+ * @journal: The journal to update.
+ * @tail_tid: TID of the new transaction at the tail of the log
+ * @tail_block: The first block of the transaction at the tail of the log
+ * @write_op: With which operation should we write the journal sb
+ *
+ * Update a journal's superblock information about log tail and write it to
+ * disk, waiting for the IO to complete.
+ */
+void journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
+ unsigned int tail_block, int write_op)
+{
+ journal_superblock_t *sb = journal->j_superblock;
+
+ BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
+ jbd_debug(1,"JBD: updating superblock (start %u, seq %u)\n",
+ tail_block, tail_tid);
+
+ sb->s_sequence = cpu_to_be32(tail_tid);
+ sb->s_start = cpu_to_be32(tail_block);
+
+ journal_write_superblock(journal, write_op);
+
+ /* Log is no longer empty */
+ spin_lock(&journal->j_state_lock);
+ WARN_ON(!sb->s_sequence);
+ journal->j_flags &= ~JFS_FLUSHED;
+ spin_unlock(&journal->j_state_lock);
+}
+
+/**
+ * mark_journal_empty() - Mark on disk journal as empty.
+ * @journal: The journal to update.
+ *
+ * Update a journal's dynamic superblock fields to show that journal is empty.
+ * Write updated superblock to disk waiting for IO to complete.
+ */
+static void mark_journal_empty(journal_t *journal)
+{
+ journal_superblock_t *sb = journal->j_superblock;
+
+ BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
spin_lock(&journal->j_state_lock);
- jbd_debug(1,"JBD: updating superblock (start %u, seq %d, errno %d)\n",
- journal->j_tail, journal->j_tail_sequence, journal->j_errno);
+ jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n",
+ journal->j_tail_sequence);
sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
- sb->s_start = cpu_to_be32(journal->j_tail);
- sb->s_errno = cpu_to_be32(journal->j_errno);
+ sb->s_start = cpu_to_be32(0);
spin_unlock(&journal->j_state_lock);
- BUFFER_TRACE(bh, "marking dirty");
- mark_buffer_dirty(bh);
- if (wait) {
- sync_dirty_buffer(bh);
- if (buffer_write_io_error(bh)) {
- char b[BDEVNAME_SIZE];
- printk(KERN_ERR "JBD: I/O error detected "
- "when updating journal superblock for %s.\n",
- journal_dev_name(journal, b));
- clear_buffer_write_io_error(bh);
- set_buffer_uptodate(bh);
- }
- } else
- write_dirty_buffer(bh, WRITE);
+ journal_write_superblock(journal, WRITE_FUA);
- trace_jbd_update_superblock_end(journal, wait);
-out:
- /* If we have just flushed the log (by marking s_start==0), then
- * any future commit will have to be careful to update the
- * superblock again to re-record the true start of the log. */
+ spin_lock(&journal->j_state_lock);
+ /* Log is empty */
+ journal->j_flags |= JFS_FLUSHED;
+ spin_unlock(&journal->j_state_lock);
+}
+
+/**
+ * journal_update_sb_errno() - Update error in the journal.
+ * @journal: The journal to update.
+ *
+ * Update a journal's errno. Write updated superblock to disk waiting for IO
+ * to complete.
+ */
+static void journal_update_sb_errno(journal_t *journal)
+{
+ journal_superblock_t *sb = journal->j_superblock;
spin_lock(&journal->j_state_lock);
- if (sb->s_start)
- journal->j_flags &= ~JFS_FLUSHED;
- else
- journal->j_flags |= JFS_FLUSHED;
+ jbd_debug(1, "JBD: updating superblock error (errno %d)\n",
+ journal->j_errno);
+ sb->s_errno = cpu_to_be32(journal->j_errno);
spin_unlock(&journal->j_state_lock);
+
+ journal_write_superblock(journal, WRITE_SYNC);
}
/*
@@ -1251,6 +1316,8 @@ int journal_destroy(journal_t *journal)
/* Force any old transactions to disk */
+ /* We cannot race with anybody but must keep assertions happy */
+ mutex_lock(&journal->j_checkpoint_mutex);
/* Totally anal locking here... */
spin_lock(&journal->j_list_lock);
while (journal->j_checkpoint_transactions != NULL) {
@@ -1266,16 +1333,14 @@ int journal_destroy(journal_t *journal)
if (journal->j_sb_buffer) {
if (!is_journal_aborted(journal)) {
- /* We can now mark the journal as empty. */
- journal->j_tail = 0;
journal->j_tail_sequence =
++journal->j_transaction_sequence;
- journal_update_superblock(journal, 1);
- } else {
+ mark_journal_empty(journal);
+ } else
err = -EIO;
- }
brelse(journal->j_sb_buffer);
}
+ mutex_unlock(&journal->j_checkpoint_mutex);
if (journal->j_inode)
iput(journal->j_inode);
@@ -1455,7 +1520,6 @@ int journal_flush(journal_t *journal)
{
int err = 0;
transaction_t *transaction = NULL;
- unsigned int old_tail;
spin_lock(&journal->j_state_lock);
@@ -1490,6 +1554,7 @@ int journal_flush(journal_t *journal)
if (is_journal_aborted(journal))
return -EIO;
+ mutex_lock(&journal->j_checkpoint_mutex);
cleanup_journal_tail(journal);
/* Finally, mark the journal as really needing no recovery.
@@ -1497,14 +1562,9 @@ int journal_flush(journal_t *journal)
* the magic code for a fully-recovered superblock. Any future
* commits of data to the journal will restore the current
* s_start value. */
+ mark_journal_empty(journal);
+ mutex_unlock(&journal->j_checkpoint_mutex);
spin_lock(&journal->j_state_lock);
- old_tail = journal->j_tail;
- journal->j_tail = 0;
- spin_unlock(&journal->j_state_lock);
- journal_update_superblock(journal, 1);
- spin_lock(&journal->j_state_lock);
- journal->j_tail = old_tail;
-
J_ASSERT(!journal->j_running_transaction);
J_ASSERT(!journal->j_committing_transaction);
J_ASSERT(!journal->j_checkpoint_transactions);
@@ -1544,8 +1604,12 @@ int journal_wipe(journal_t *journal, int write)
write ? "Clearing" : "Ignoring");
err = journal_skip_recovery(journal);
- if (write)
- journal_update_superblock(journal, 1);
+ if (write) {
+ /* Lock to make assertions happy... */
+ mutex_lock(&journal->j_checkpoint_mutex);
+ mark_journal_empty(journal);
+ mutex_unlock(&journal->j_checkpoint_mutex);
+ }
no_recovery:
return err;
@@ -1613,7 +1677,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
__journal_abort_hard(journal);
if (errno)
- journal_update_superblock(journal, 1);
+ journal_update_sb_errno(journal);
}
/**
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index b2a7e5244e39..febc10db5ced 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1433,8 +1433,6 @@ int journal_stop(handle_t *handle)
}
}
- if (handle->h_sync)
- transaction->t_synchronous_commit = 1;
current->journal_info = NULL;
spin_lock(&journal->j_state_lock);
spin_lock(&transaction->t_handle_lock);
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index bb6f993ebca9..3d3092eda811 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -240,7 +240,7 @@ void jffs2_evict_inode (struct inode *inode)
jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
__func__, inode->i_ino, inode->i_mode);
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
jffs2_do_clear_inode(c, f);
}
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 77b69b27f825..4692bf3ca8cb 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -169,7 +169,7 @@ void jfs_evict_inode(struct inode *inode)
} else {
truncate_inode_pages(&inode->i_data, 0);
}
- end_writeback(inode);
+ clear_inode(inode);
dquot_drop(inode);
}
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index ba1dc2eebd1e..ca0a08001449 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -56,7 +56,7 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
u32 nlm_version = (nlm_init->nfs_version == 2) ? 1 : 4;
int status;
- status = lockd_up();
+ status = lockd_up(nlm_init->net);
if (status < 0)
return ERR_PTR(status);
@@ -65,7 +65,7 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
nlm_init->hostname, nlm_init->noresvport,
nlm_init->net);
if (host == NULL) {
- lockd_down();
+ lockd_down(nlm_init->net);
return ERR_PTR(-ENOLCK);
}
@@ -80,8 +80,10 @@ EXPORT_SYMBOL_GPL(nlmclnt_init);
*/
void nlmclnt_done(struct nlm_host *host)
{
+ struct net *net = host->net;
+
nlmclnt_release_host(host);
- lockd_down();
+ lockd_down(net);
}
EXPORT_SYMBOL_GPL(nlmclnt_done);
@@ -220,11 +222,12 @@ reclaimer(void *ptr)
struct nlm_wait *block;
struct file_lock *fl, *next;
u32 nsmstate;
+ struct net *net = host->net;
allow_signal(SIGKILL);
down_write(&host->h_rwsem);
- lockd_up(); /* note: this cannot fail as lockd is already running */
+ lockd_up(net); /* note: this cannot fail as lockd is already running */
dprintk("lockd: reclaiming locks for host %s\n", host->h_name);
@@ -275,6 +278,6 @@ restart:
/* Release host handle after use */
nlmclnt_release_host(host);
- lockd_down();
+ lockd_down(net);
return 0;
}
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index f49b9afc4436..1ead0750cdbb 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -295,11 +295,10 @@ static void lockd_down_net(struct net *net)
/*
* Bring up the lockd process if it's not already up.
*/
-int lockd_up(void)
+int lockd_up(struct net *net)
{
struct svc_serv *serv;
int error = 0;
- struct net *net = current->nsproxy->net_ns;
mutex_lock(&nlmsvc_mutex);
/*
@@ -378,12 +377,12 @@ EXPORT_SYMBOL_GPL(lockd_up);
* Decrement the user count and bring down lockd if we're the last.
*/
void
-lockd_down(void)
+lockd_down(struct net *net)
{
mutex_lock(&nlmsvc_mutex);
if (nlmsvc_users) {
if (--nlmsvc_users) {
- lockd_down_net(current->nsproxy->net_ns);
+ lockd_down_net(net);
goto out;
}
} else {
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index e3ab5e5a904c..f1cb512c5019 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -2175,7 +2175,7 @@ void logfs_evict_inode(struct inode *inode)
}
}
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
/* Cheaper version of write_inode. All changes are concealed in
* aliases, which are moved back. No write to the medium happens.
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index fcb05d2c6b5f..2a503ad020d5 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -32,7 +32,7 @@ static void minix_evict_inode(struct inode *inode)
minix_truncate(inode);
}
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
if (!inode->i_nlink)
minix_free_inode(inode);
}
diff --git a/fs/namei.c b/fs/namei.c
index e70ebab9624b..c651f02c9fec 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/export.h>
+#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/namei.h>
@@ -1451,7 +1452,8 @@ EXPORT_SYMBOL(full_name_hash);
*/
static inline unsigned long hash_name(const char *name, unsigned int *hashp)
{
- unsigned long a, mask, hash, len;
+ unsigned long a, b, adata, bdata, mask, hash, len;
+ const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
hash = a = 0;
len = -sizeof(unsigned long);
@@ -1459,17 +1461,18 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp)
hash = (hash + a) * 9;
len += sizeof(unsigned long);
a = load_unaligned_zeropad(name+len);
- /* Do we have any NUL or '/' bytes in this word? */
- mask = has_zero(a) | has_zero(a ^ REPEAT_BYTE('/'));
- } while (!mask);
-
- /* The mask *below* the first high bit set */
- mask = (mask - 1) & ~mask;
- mask >>= 7;
- hash += a & mask;
+ b = a ^ REPEAT_BYTE('/');
+ } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
+
+ adata = prep_zero_mask(a, adata, &constants);
+ bdata = prep_zero_mask(b, bdata, &constants);
+
+ mask = create_zero_mask(adata | bdata);
+
+ hash += a & zero_bytemask(mask);
*hashp = fold_hash(hash);
- return len + count_masked_bytes(mask);
+ return len + find_zero(mask);
}
#else
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 87484fb8d177..333df07ae3bd 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -292,7 +292,7 @@ static void
ncp_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
if (S_ISDIR(inode->i_mode)) {
DDPRINTK("ncp_evict_inode: put directory %ld\n", inode->i_ino);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 2a0e6c599147..f90f4f5cd421 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -29,9 +29,20 @@ config NFS_FS
If unsure, say N.
+config NFS_V2
+ bool "NFS client support for NFS version 2"
+ depends on NFS_FS
+ default y
+ help
+ This option enables support for version 2 of the NFS protocol
+ (RFC 1094) in the kernel's NFS client.
+
+ If unsure, say Y.
+
config NFS_V3
bool "NFS client support for NFS version 3"
depends on NFS_FS
+ default y
help
This option enables support for version 3 of the NFS protocol
(RFC 1813) in the kernel's NFS client.
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index b58613d0abb3..7ddd45d9f170 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -4,11 +4,12 @@
obj-$(CONFIG_NFS_FS) += nfs.o
-nfs-y := client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \
- direct.o pagelist.o proc.o read.o symlink.o unlink.o \
+nfs-y := client.o dir.o file.o getroot.o inode.o super.o \
+ direct.o pagelist.o read.o symlink.o unlink.o \
write.o namespace.o mount_clnt.o \
dns_resolve.o cache_lib.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
+nfs-$(CONFIG_NFS_V2) += proc.o nfs2xdr.o
nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o
nfs-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 7f6a23f0244e..7ae8a608956f 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -187,7 +187,6 @@ static void bl_end_io_read(struct bio *bio, int err)
struct parallel_io *par = bio->bi_private;
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
do {
struct page *page = bvec->bv_page;
@@ -198,9 +197,12 @@ static void bl_end_io_read(struct bio *bio, int err)
SetPageUptodate(page);
} while (bvec >= bio->bi_io_vec);
if (!uptodate) {
- if (!rdata->pnfs_error)
- rdata->pnfs_error = -EIO;
- pnfs_set_lo_fail(rdata->lseg);
+ struct nfs_read_data *rdata = par->data;
+ struct nfs_pgio_header *header = rdata->header;
+
+ if (!header->pnfs_error)
+ header->pnfs_error = -EIO;
+ pnfs_set_lo_fail(header->lseg);
}
bio_put(bio);
put_parallel(par);
@@ -221,7 +223,7 @@ bl_end_par_io_read(void *data, int unused)
{
struct nfs_read_data *rdata = data;
- rdata->task.tk_status = rdata->pnfs_error;
+ rdata->task.tk_status = rdata->header->pnfs_error;
INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
schedule_work(&rdata->task.u.tk_work);
}
@@ -229,6 +231,7 @@ bl_end_par_io_read(void *data, int unused)
static enum pnfs_try_status
bl_read_pagelist(struct nfs_read_data *rdata)
{
+ struct nfs_pgio_header *header = rdata->header;
int i, hole;
struct bio *bio = NULL;
struct pnfs_block_extent *be = NULL, *cow_read = NULL;
@@ -239,7 +242,7 @@ bl_read_pagelist(struct nfs_read_data *rdata)
int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
- rdata->npages, f_offset, (unsigned int)rdata->args.count);
+ rdata->pages.npages, f_offset, (unsigned int)rdata->args.count);
par = alloc_parallel(rdata);
if (!par)
@@ -249,17 +252,17 @@ bl_read_pagelist(struct nfs_read_data *rdata)
isect = (sector_t) (f_offset >> SECTOR_SHIFT);
/* Code assumes extents are page-aligned */
- for (i = pg_index; i < rdata->npages; i++) {
+ for (i = pg_index; i < rdata->pages.npages; i++) {
if (!extent_length) {
/* We've used up the previous extent */
bl_put_extent(be);
bl_put_extent(cow_read);
bio = bl_submit_bio(READ, bio);
/* Get the next one */
- be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
+ be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
isect, &cow_read);
if (!be) {
- rdata->pnfs_error = -EIO;
+ header->pnfs_error = -EIO;
goto out;
}
extent_length = be->be_length -
@@ -282,11 +285,12 @@ bl_read_pagelist(struct nfs_read_data *rdata)
struct pnfs_block_extent *be_read;
be_read = (hole && cow_read) ? cow_read : be;
- bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
+ bio = bl_add_page_to_bio(bio, rdata->pages.npages - i,
+ READ,
isect, pages[i], be_read,
bl_end_io_read, par);
if (IS_ERR(bio)) {
- rdata->pnfs_error = PTR_ERR(bio);
+ header->pnfs_error = PTR_ERR(bio);
bio = NULL;
goto out;
}
@@ -294,9 +298,9 @@ bl_read_pagelist(struct nfs_read_data *rdata)
isect += PAGE_CACHE_SECTORS;
extent_length -= PAGE_CACHE_SECTORS;
}
- if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
+ if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
rdata->res.eof = 1;
- rdata->res.count = rdata->inode->i_size - f_offset;
+ rdata->res.count = header->inode->i_size - f_offset;
} else {
rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
}
@@ -345,7 +349,6 @@ static void bl_end_io_write_zero(struct bio *bio, int err)
struct parallel_io *par = bio->bi_private;
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
do {
struct page *page = bvec->bv_page;
@@ -358,9 +361,12 @@ static void bl_end_io_write_zero(struct bio *bio, int err)
} while (bvec >= bio->bi_io_vec);
if (unlikely(!uptodate)) {
- if (!wdata->pnfs_error)
- wdata->pnfs_error = -EIO;
- pnfs_set_lo_fail(wdata->lseg);
+ struct nfs_write_data *data = par->data;
+ struct nfs_pgio_header *header = data->header;
+
+ if (!header->pnfs_error)
+ header->pnfs_error = -EIO;
+ pnfs_set_lo_fail(header->lseg);
}
bio_put(bio);
put_parallel(par);
@@ -370,12 +376,13 @@ static void bl_end_io_write(struct bio *bio, int err)
{
struct parallel_io *par = bio->bi_private;
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
+ struct nfs_write_data *data = par->data;
+ struct nfs_pgio_header *header = data->header;
if (!uptodate) {
- if (!wdata->pnfs_error)
- wdata->pnfs_error = -EIO;
- pnfs_set_lo_fail(wdata->lseg);
+ if (!header->pnfs_error)
+ header->pnfs_error = -EIO;
+ pnfs_set_lo_fail(header->lseg);
}
bio_put(bio);
put_parallel(par);
@@ -391,9 +398,9 @@ static void bl_write_cleanup(struct work_struct *work)
dprintk("%s enter\n", __func__);
task = container_of(work, struct rpc_task, u.tk_work);
wdata = container_of(task, struct nfs_write_data, task);
- if (likely(!wdata->pnfs_error)) {
+ if (likely(!wdata->header->pnfs_error)) {
/* Marks for LAYOUTCOMMIT */
- mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
+ mark_extents_written(BLK_LSEG2EXT(wdata->header->lseg),
wdata->args.offset, wdata->args.count);
}
pnfs_ld_write_done(wdata);
@@ -404,12 +411,12 @@ static void bl_end_par_io_write(void *data, int num_se)
{
struct nfs_write_data *wdata = data;
- if (unlikely(wdata->pnfs_error)) {
- bl_free_short_extents(&BLK_LSEG2EXT(wdata->lseg)->bl_inval,
+ if (unlikely(wdata->header->pnfs_error)) {
+ bl_free_short_extents(&BLK_LSEG2EXT(wdata->header->lseg)->bl_inval,
num_se);
}
- wdata->task.tk_status = wdata->pnfs_error;
+ wdata->task.tk_status = wdata->header->pnfs_error;
wdata->verf.committed = NFS_FILE_SYNC;
INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
schedule_work(&wdata->task.u.tk_work);
@@ -540,6 +547,7 @@ check_page:
static enum pnfs_try_status
bl_write_pagelist(struct nfs_write_data *wdata, int sync)
{
+ struct nfs_pgio_header *header = wdata->header;
int i, ret, npg_zero, pg_index, last = 0;
struct bio *bio = NULL;
struct pnfs_block_extent *be = NULL, *cow_read = NULL;
@@ -552,7 +560,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
pgoff_t index;
u64 temp;
int npg_per_block =
- NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
+ NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
/* At this point, wdata->pages is a (sequential) list of nfs_pages.
@@ -566,7 +574,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
/* At this point, have to be more careful with error handling */
isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
- be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
+ be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read);
if (!be || !is_writable(be, isect)) {
dprintk("%s no matching extents!\n", __func__);
goto out_mds;
@@ -597,10 +605,10 @@ fill_invalid_ext:
dprintk("%s zero %dth page: index %lu isect %llu\n",
__func__, npg_zero, index,
(unsigned long long)isect);
- page = bl_find_get_zeroing_page(wdata->inode, index,
+ page = bl_find_get_zeroing_page(header->inode, index,
cow_read);
if (unlikely(IS_ERR(page))) {
- wdata->pnfs_error = PTR_ERR(page);
+ header->pnfs_error = PTR_ERR(page);
goto out;
} else if (page == NULL)
goto next_page;
@@ -612,7 +620,7 @@ fill_invalid_ext:
__func__, ret);
end_page_writeback(page);
page_cache_release(page);
- wdata->pnfs_error = ret;
+ header->pnfs_error = ret;
goto out;
}
if (likely(!bl_push_one_short_extent(be->be_inval)))
@@ -620,11 +628,11 @@ fill_invalid_ext:
else {
end_page_writeback(page);
page_cache_release(page);
- wdata->pnfs_error = -ENOMEM;
+ header->pnfs_error = -ENOMEM;
goto out;
}
/* FIXME: This should be done in bi_end_io */
- mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
+ mark_extents_written(BLK_LSEG2EXT(header->lseg),
page->index << PAGE_CACHE_SHIFT,
PAGE_CACHE_SIZE);
@@ -632,7 +640,7 @@ fill_invalid_ext:
isect, page, be,
bl_end_io_write_zero, par);
if (IS_ERR(bio)) {
- wdata->pnfs_error = PTR_ERR(bio);
+ header->pnfs_error = PTR_ERR(bio);
bio = NULL;
goto out;
}
@@ -647,16 +655,16 @@ next_page:
/* Middle pages */
pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
- for (i = pg_index; i < wdata->npages; i++) {
+ for (i = pg_index; i < wdata->pages.npages; i++) {
if (!extent_length) {
/* We've used up the previous extent */
bl_put_extent(be);
bio = bl_submit_bio(WRITE, bio);
/* Get the next one */
- be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
+ be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
isect, NULL);
if (!be || !is_writable(be, isect)) {
- wdata->pnfs_error = -EINVAL;
+ header->pnfs_error = -EINVAL;
goto out;
}
if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
@@ -664,7 +672,7 @@ next_page:
be->be_inval)))
par->bse_count++;
else {
- wdata->pnfs_error = -ENOMEM;
+ header->pnfs_error = -ENOMEM;
goto out;
}
}
@@ -677,15 +685,15 @@ next_page:
if (unlikely(ret)) {
dprintk("%s bl_mark_sectors_init fail %d\n",
__func__, ret);
- wdata->pnfs_error = ret;
+ header->pnfs_error = ret;
goto out;
}
}
- bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
+ bio = bl_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
isect, pages[i], be,
bl_end_io_write, par);
if (IS_ERR(bio)) {
- wdata->pnfs_error = PTR_ERR(bio);
+ header->pnfs_error = PTR_ERR(bio);
bio = NULL;
goto out;
}
diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c
index a5c88a554d92..c96554245ccf 100644
--- a/fs/nfs/blocklayout/blocklayoutdev.c
+++ b/fs/nfs/blocklayout/blocklayoutdev.c
@@ -123,7 +123,7 @@ nfs4_blk_decode_device(struct nfs_server *server,
uint8_t *dataptr;
DECLARE_WAITQUEUE(wq, current);
int offset, len, i, rc;
- struct net *net = server->nfs_client->net;
+ struct net *net = server->nfs_client->cl_net;
struct nfs_net *nn = net_generic(net, nfs_net_id);
struct bl_dev_msg *reply = &nn->bl_mount_reply;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 60f7e4ec842c..7d108753af81 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -65,7 +65,7 @@ static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq);
static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
{
int ret = 0;
- struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+ struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
if (clp->rpc_ops->version != 4 || minorversion != 0)
return ret;
@@ -90,7 +90,9 @@ static bool nfs4_disable_idmapping = true;
* RPC cruft for NFS
*/
static const struct rpc_version *nfs_version[5] = {
+#ifdef CONFIG_NFS_V2
[2] = &nfs_version2,
+#endif
#ifdef CONFIG_NFS_V3
[3] = &nfs_version3,
#endif
@@ -129,6 +131,7 @@ const struct rpc_program nfsacl_program = {
#endif /* CONFIG_NFS_V3_ACL */
struct nfs_client_initdata {
+ unsigned long init_flags;
const char *hostname;
const struct sockaddr *addr;
size_t addrlen;
@@ -172,7 +175,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
clp->cl_rpcclient = ERR_PTR(-EINVAL);
clp->cl_proto = cl_init->proto;
- clp->net = get_net(cl_init->net);
+ clp->cl_net = get_net(cl_init->net);
#ifdef CONFIG_NFS_V4
err = nfs_get_cb_ident_idr(clp, cl_init->minorversion);
@@ -182,7 +185,6 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
spin_lock_init(&clp->cl_lock);
INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
- clp->cl_boot_time = CURRENT_TIME;
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
clp->cl_minorversion = cl_init->minorversion;
clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
@@ -207,6 +209,7 @@ static void nfs4_shutdown_session(struct nfs_client *clp)
if (nfs4_has_session(clp)) {
nfs4_deviceid_purge_client(clp);
nfs4_destroy_session(clp->cl_session);
+ nfs4_destroy_clientid(clp);
}
}
@@ -235,6 +238,9 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
nfs_idmap_delete(clp);
rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
+ kfree(clp->cl_serverowner);
+ kfree(clp->cl_serverscope);
+ kfree(clp->cl_implid);
}
/* idr_remove_all is not needed as all id's are removed by nfs_put_client */
@@ -248,7 +254,7 @@ void nfs_cleanup_cb_ident_idr(struct net *net)
/* nfs_client_lock held */
static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
{
- struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+ struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
if (clp->cl_cb_ident)
idr_remove(&nn->cb_ident_idr, clp->cl_cb_ident);
@@ -301,10 +307,8 @@ static void nfs_free_client(struct nfs_client *clp)
if (clp->cl_machine_cred != NULL)
put_rpccred(clp->cl_machine_cred);
- put_net(clp->net);
+ put_net(clp->cl_net);
kfree(clp->cl_hostname);
- kfree(clp->server_scope);
- kfree(clp->impl_id);
kfree(clp);
dprintk("<-- nfs_free_client()\n");
@@ -321,7 +325,7 @@ void nfs_put_client(struct nfs_client *clp)
return;
dprintk("--> nfs_put_client({%d})\n", atomic_read(&clp->cl_count));
- nn = net_generic(clp->net, nfs_net_id);
+ nn = net_generic(clp->cl_net, nfs_net_id);
if (atomic_dec_and_lock(&clp->cl_count, &nn->nfs_client_lock)) {
list_del(&clp->cl_share_link);
@@ -456,6 +460,8 @@ static bool nfs4_cb_match_client(const struct sockaddr *addr,
clp->cl_cons_state == NFS_CS_SESSION_INITING))
return false;
+ smp_rmb();
+
/* Match the version and minorversion */
if (clp->rpc_ops->version != 4 ||
clp->cl_minorversion != minorversion)
@@ -504,6 +510,47 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
return NULL;
}
+static bool nfs_client_init_is_complete(const struct nfs_client *clp)
+{
+ return clp->cl_cons_state != NFS_CS_INITING;
+}
+
+int nfs_wait_client_init_complete(const struct nfs_client *clp)
+{
+ return wait_event_killable(nfs_client_active_wq,
+ nfs_client_init_is_complete(clp));
+}
+
+/*
+ * Found an existing client. Make sure it's ready before returning.
+ */
+static struct nfs_client *
+nfs_found_client(const struct nfs_client_initdata *cl_init,
+ struct nfs_client *clp)
+{
+ int error;
+
+ error = nfs_wait_client_init_complete(clp);
+ if (error < 0) {
+ nfs_put_client(clp);
+ return ERR_PTR(-ERESTARTSYS);
+ }
+
+ if (clp->cl_cons_state < NFS_CS_READY) {
+ error = clp->cl_cons_state;
+ nfs_put_client(clp);
+ return ERR_PTR(error);
+ }
+
+ smp_rmb();
+
+ BUG_ON(clp->cl_cons_state != NFS_CS_READY);
+
+ dprintk("<-- %s found nfs_client %p for %s\n",
+ __func__, clp, cl_init->hostname ?: "");
+ return clp;
+}
+
/*
* Look up a client by IP address and protocol version
* - creates a new record if one doesn't yet exist
@@ -512,11 +559,9 @@ static struct nfs_client *
nfs_get_client(const struct nfs_client_initdata *cl_init,
const struct rpc_timeout *timeparms,
const char *ip_addr,
- rpc_authflavor_t authflavour,
- int noresvport)
+ rpc_authflavor_t authflavour)
{
struct nfs_client *clp, *new = NULL;
- int error;
struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id);
dprintk("--> nfs_get_client(%s,v%u)\n",
@@ -527,60 +572,29 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
spin_lock(&nn->nfs_client_lock);
clp = nfs_match_client(cl_init);
- if (clp)
- goto found_client;
- if (new)
- goto install_client;
+ if (clp) {
+ spin_unlock(&nn->nfs_client_lock);
+ if (new)
+ nfs_free_client(new);
+ return nfs_found_client(cl_init, clp);
+ }
+ if (new) {
+ list_add(&new->cl_share_link, &nn->nfs_client_list);
+ spin_unlock(&nn->nfs_client_lock);
+ new->cl_flags = cl_init->init_flags;
+ return cl_init->rpc_ops->init_client(new,
+ timeparms, ip_addr,
+ authflavour);
+ }
spin_unlock(&nn->nfs_client_lock);
new = nfs_alloc_client(cl_init);
} while (!IS_ERR(new));
- dprintk("--> nfs_get_client() = %ld [failed]\n", PTR_ERR(new));
+ dprintk("<-- nfs_get_client() Failed to find %s (%ld)\n",
+ cl_init->hostname ?: "", PTR_ERR(new));
return new;
-
- /* install a new client and return with it unready */
-install_client:
- clp = new;
- list_add(&clp->cl_share_link, &nn->nfs_client_list);
- spin_unlock(&nn->nfs_client_lock);
-
- error = cl_init->rpc_ops->init_client(clp, timeparms, ip_addr,
- authflavour, noresvport);
- if (error < 0) {
- nfs_put_client(clp);
- return ERR_PTR(error);
- }
- dprintk("--> nfs_get_client() = %p [new]\n", clp);
- return clp;
-
- /* found an existing client
- * - make sure it's ready before returning
- */
-found_client:
- spin_unlock(&nn->nfs_client_lock);
-
- if (new)
- nfs_free_client(new);
-
- error = wait_event_killable(nfs_client_active_wq,
- clp->cl_cons_state < NFS_CS_INITING);
- if (error < 0) {
- nfs_put_client(clp);
- return ERR_PTR(-ERESTARTSYS);
- }
-
- if (clp->cl_cons_state < NFS_CS_READY) {
- error = clp->cl_cons_state;
- nfs_put_client(clp);
- return ERR_PTR(error);
- }
-
- BUG_ON(clp->cl_cons_state != NFS_CS_READY);
-
- dprintk("--> nfs_get_client() = %p [share]\n", clp);
- return clp;
}
/*
@@ -588,27 +602,12 @@ found_client:
*/
void nfs_mark_client_ready(struct nfs_client *clp, int state)
{
+ smp_wmb();
clp->cl_cons_state = state;
wake_up_all(&nfs_client_active_wq);
}
/*
- * With sessions, the client is not marked ready until after a
- * successful EXCHANGE_ID and CREATE_SESSION.
- *
- * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
- * other versions of NFS can be tried.
- */
-int nfs4_check_client_ready(struct nfs_client *clp)
-{
- if (!nfs4_has_session(clp))
- return 0;
- if (clp->cl_cons_state < NFS_CS_READY)
- return -EPROTONOSUPPORT;
- return 0;
-}
-
-/*
* Initialise the timeout values for a connection
*/
static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
@@ -654,12 +653,11 @@ static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
*/
static int nfs_create_rpc_client(struct nfs_client *clp,
const struct rpc_timeout *timeparms,
- rpc_authflavor_t flavor,
- int discrtry, int noresvport)
+ rpc_authflavor_t flavor)
{
struct rpc_clnt *clnt = NULL;
struct rpc_create_args args = {
- .net = clp->net,
+ .net = clp->cl_net,
.protocol = clp->cl_proto,
.address = (struct sockaddr *)&clp->cl_addr,
.addrsize = clp->cl_addrlen,
@@ -670,9 +668,9 @@ static int nfs_create_rpc_client(struct nfs_client *clp,
.authflavor = flavor,
};
- if (discrtry)
+ if (test_bit(NFS_CS_DISCRTRY, &clp->cl_flags))
args.flags |= RPC_CLNT_CREATE_DISCRTRY;
- if (noresvport)
+ if (test_bit(NFS_CS_NORESVPORT, &clp->cl_flags))
args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
if (!IS_ERR(clp->cl_rpcclient))
@@ -713,7 +711,7 @@ static int nfs_start_lockd(struct nfs_server *server)
.nfs_version = clp->rpc_ops->version,
.noresvport = server->flags & NFS_MOUNT_NORESVPORT ?
1 : 0,
- .net = clp->net,
+ .net = clp->cl_net,
};
if (nlm_init.nfs_version > 3)
@@ -805,36 +803,43 @@ static int nfs_init_server_rpcclient(struct nfs_server *server,
return 0;
}
-/*
- * Initialise an NFS2 or NFS3 client
+/**
+ * nfs_init_client - Initialise an NFS2 or NFS3 client
+ *
+ * @clp: nfs_client to initialise
+ * @timeparms: timeout parameters for underlying RPC transport
+ * @ip_addr: IP presentation address (not used)
+ * @authflavor: authentication flavor for underlying RPC transport
+ *
+ * Returns pointer to an NFS client, or an ERR_PTR value.
*/
-int nfs_init_client(struct nfs_client *clp, const struct rpc_timeout *timeparms,
- const char *ip_addr, rpc_authflavor_t authflavour,
- int noresvport)
+struct nfs_client *nfs_init_client(struct nfs_client *clp,
+ const struct rpc_timeout *timeparms,
+ const char *ip_addr, rpc_authflavor_t authflavour)
{
int error;
if (clp->cl_cons_state == NFS_CS_READY) {
/* the client is already initialised */
dprintk("<-- nfs_init_client() = 0 [already %p]\n", clp);
- return 0;
+ return clp;
}
/*
* Create a client RPC handle for doing FSSTAT with UNIX auth only
* - RFC 2623, sec 2.3.2
*/
- error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX,
- 0, noresvport);
+ error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
if (error < 0)
goto error;
nfs_mark_client_ready(clp, NFS_CS_READY);
- return 0;
+ return clp;
error:
nfs_mark_client_ready(clp, error);
+ nfs_put_client(clp);
dprintk("<-- nfs_init_client() = xerror %d\n", error);
- return error;
+ return ERR_PTR(error);
}
/*
@@ -847,7 +852,7 @@ static int nfs_init_server(struct nfs_server *server,
.hostname = data->nfs_server.hostname,
.addr = (const struct sockaddr *)&data->nfs_server.address,
.addrlen = data->nfs_server.addrlen,
- .rpc_ops = &nfs_v2_clientops,
+ .rpc_ops = NULL,
.proto = data->nfs_server.protocol,
.net = data->net,
};
@@ -857,17 +862,28 @@ static int nfs_init_server(struct nfs_server *server,
dprintk("--> nfs_init_server()\n");
+ switch (data->version) {
+#ifdef CONFIG_NFS_V2
+ case 2:
+ cl_init.rpc_ops = &nfs_v2_clientops;
+ break;
+#endif
#ifdef CONFIG_NFS_V3
- if (data->version == 3)
+ case 3:
cl_init.rpc_ops = &nfs_v3_clientops;
+ break;
#endif
+ default:
+ return -EPROTONOSUPPORT;
+ }
nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
data->timeo, data->retrans);
+ if (data->flags & NFS_MOUNT_NORESVPORT)
+ set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
/* Allocate or find a client reference we can use */
- clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX,
- data->flags & NFS_MOUNT_NORESVPORT);
+ clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX);
if (IS_ERR(clp)) {
dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp));
return PTR_ERR(clp);
@@ -880,7 +896,7 @@ static int nfs_init_server(struct nfs_server *server,
server->options = data->options;
server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
- NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
+ NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR;
if (data->rsize)
server->rsize = nfs_block_size(data->rsize, NULL);
@@ -1048,7 +1064,7 @@ static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_serve
static void nfs_server_insert_lists(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
- struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+ struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
spin_lock(&nn->nfs_client_lock);
list_add_tail_rcu(&server->client_link, &clp->cl_superblocks);
@@ -1065,7 +1081,7 @@ static void nfs_server_remove_lists(struct nfs_server *server)
if (clp == NULL)
return;
- nn = net_generic(clp->net, nfs_net_id);
+ nn = net_generic(clp->cl_net, nfs_net_id);
spin_lock(&nn->nfs_client_lock);
list_del_rcu(&server->client_link);
if (list_empty(&clp->cl_superblocks))
@@ -1333,21 +1349,27 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
* so that the client back channel can find the
* nfs_client struct
*/
- clp->cl_cons_state = NFS_CS_SESSION_INITING;
+ nfs_mark_client_ready(clp, NFS_CS_SESSION_INITING);
}
#endif /* CONFIG_NFS_V4_1 */
return nfs4_init_callback(clp);
}
-/*
- * Initialise an NFS4 client record
+/**
+ * nfs4_init_client - Initialise an NFS4 client record
+ *
+ * @clp: nfs_client to initialise
+ * @timeparms: timeout parameters for underlying RPC transport
+ * @ip_addr: callback IP address in presentation format
+ * @authflavor: authentication flavor for underlying RPC transport
+ *
+ * Returns pointer to an NFS client, or an ERR_PTR value.
*/
-int nfs4_init_client(struct nfs_client *clp,
- const struct rpc_timeout *timeparms,
- const char *ip_addr,
- rpc_authflavor_t authflavour,
- int noresvport)
+struct nfs_client *nfs4_init_client(struct nfs_client *clp,
+ const struct rpc_timeout *timeparms,
+ const char *ip_addr,
+ rpc_authflavor_t authflavour)
{
char buf[INET6_ADDRSTRLEN + 1];
int error;
@@ -1355,14 +1377,14 @@ int nfs4_init_client(struct nfs_client *clp,
if (clp->cl_cons_state == NFS_CS_READY) {
/* the client is initialised already */
dprintk("<-- nfs4_init_client() = 0 [already %p]\n", clp);
- return 0;
+ return clp;
}
/* Check NFS protocol revision and initialize RPC op vector */
clp->rpc_ops = &nfs_v4_clientops;
- error = nfs_create_rpc_client(clp, timeparms, authflavour,
- 1, noresvport);
+ __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+ error = nfs_create_rpc_client(clp, timeparms, authflavour);
if (error < 0)
goto error;
@@ -1395,12 +1417,13 @@ int nfs4_init_client(struct nfs_client *clp,
if (!nfs4_has_session(clp))
nfs_mark_client_ready(clp, NFS_CS_READY);
- return 0;
+ return clp;
error:
nfs_mark_client_ready(clp, error);
+ nfs_put_client(clp);
dprintk("<-- nfs4_init_client() = xerror %d\n", error);
- return error;
+ return ERR_PTR(error);
}
/*
@@ -1429,9 +1452,11 @@ static int nfs4_set_client(struct nfs_server *server,
dprintk("--> nfs4_set_client()\n");
+ if (server->flags & NFS_MOUNT_NORESVPORT)
+ set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+
/* Allocate or find a client reference we can use */
- clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour,
- server->flags & NFS_MOUNT_NORESVPORT);
+ clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour);
if (IS_ERR(clp)) {
error = PTR_ERR(clp);
goto error;
@@ -1465,8 +1490,8 @@ error:
* the MDS.
*/
struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
- const struct sockaddr *ds_addr,
- int ds_addrlen, int ds_proto)
+ const struct sockaddr *ds_addr, int ds_addrlen,
+ int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans)
{
struct nfs_client_initdata cl_init = {
.addr = ds_addr,
@@ -1474,14 +1499,9 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
.rpc_ops = &nfs_v4_clientops,
.proto = ds_proto,
.minorversion = mds_clp->cl_minorversion,
- .net = mds_clp->net,
- };
- struct rpc_timeout ds_timeout = {
- .to_initval = 15 * HZ,
- .to_maxval = 15 * HZ,
- .to_retries = 1,
- .to_exponential = 1,
+ .net = mds_clp->cl_net,
};
+ struct rpc_timeout ds_timeout;
struct nfs_client *clp;
/*
@@ -1489,8 +1509,9 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
* cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
* (section 13.1 RFC 5661).
*/
+ nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr,
- mds_clp->cl_rpcclient->cl_auth->au_flavor, 0);
+ mds_clp->cl_rpcclient->cl_auth->au_flavor);
dprintk("<-- %s %p\n", __func__, clp);
return clp;
@@ -1701,7 +1722,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
rpc_protocol(parent_server->client),
parent_server->client->cl_timeout,
parent_client->cl_mvops->minor_version,
- parent_client->net);
+ parent_client->cl_net);
if (error < 0)
goto error;
@@ -1805,6 +1826,7 @@ void nfs_clients_init(struct net *net)
idr_init(&nn->cb_ident_idr);
#endif
spin_lock_init(&nn->nfs_client_lock);
+ nn->boot_time = CURRENT_TIME;
}
#ifdef CONFIG_PROC_FS
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 89af1d269274..bd3a9601d32d 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -316,6 +316,10 @@ out:
* nfs_client_return_marked_delegations - return previously marked delegations
* @clp: nfs_client to process
*
+ * Note that this function is designed to be called by the state
+ * manager thread. For this reason, it cannot flush the dirty data,
+ * since that could deadlock in case of a state recovery error.
+ *
* Returns zero on success, or a negative errno value.
*/
int nfs_client_return_marked_delegations(struct nfs_client *clp)
@@ -340,11 +344,9 @@ restart:
server);
rcu_read_unlock();
- if (delegation != NULL) {
- filemap_flush(inode->i_mapping);
+ if (delegation != NULL)
err = __nfs_inode_return_delegation(inode,
delegation, 0);
- }
iput(inode);
if (!err)
goto restart;
@@ -380,6 +382,10 @@ void nfs_inode_return_delegation_noreclaim(struct inode *inode)
* nfs_inode_return_delegation - synchronously return a delegation
* @inode: inode to process
*
+ * This routine will always flush any dirty data to disk on the
+ * assumption that if we need to return the delegation, then
+ * we should stop caching.
+ *
* Returns zero on success, or a negative errno value.
*/
int nfs_inode_return_delegation(struct inode *inode)
@@ -389,10 +395,10 @@ int nfs_inode_return_delegation(struct inode *inode)
struct nfs_delegation *delegation;
int err = 0;
+ nfs_wb_all(inode);
if (rcu_access_pointer(nfsi->delegation) != NULL) {
delegation = nfs_detach_delegation(nfsi, server);
if (delegation != NULL) {
- nfs_wb_all(inode);
err = __nfs_inode_return_delegation(inode, delegation, 1);
}
}
@@ -538,6 +544,8 @@ int nfs_async_inode_return_delegation(struct inode *inode,
struct nfs_client *clp = server->nfs_client;
struct nfs_delegation *delegation;
+ filemap_flush(inode->i_mapping);
+
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index cd6a7a8dadae..72709c4193fa 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -66,6 +66,7 @@ static inline int nfs_have_delegation(struct inode *inode, fmode_t flags)
static inline int nfs_inode_return_delegation(struct inode *inode)
{
+ nfs_wb_all(inode);
return 0;
}
#endif
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index eedd24d0ad2e..0989a2099688 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -475,6 +475,29 @@ different:
}
static
+bool nfs_use_readdirplus(struct inode *dir, struct file *filp)
+{
+ if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS))
+ return false;
+ if (test_and_clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags))
+ return true;
+ if (filp->f_pos == 0)
+ return true;
+ return false;
+}
+
+/*
+ * This function is called by the lookup code to request the use of
+ * readdirplus to accelerate any future lookups in the same
+ * directory.
+ */
+static
+void nfs_advise_use_readdirplus(struct inode *dir)
+{
+ set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags);
+}
+
+static
void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
{
struct qstr filename = QSTR_INIT(entry->name, entry->len);
@@ -871,7 +894,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
desc->file = filp;
desc->dir_cookie = &dir_ctx->dir_cookie;
desc->decode = NFS_PROTO(inode)->decode_dirent;
- desc->plus = NFS_USE_READDIRPLUS(inode);
+ desc->plus = nfs_use_readdirplus(inode, filp) ? 1 : 0;
nfs_block_sillyrename(dentry);
res = nfs_revalidate_mapping(inode, filp->f_mapping);
@@ -1111,7 +1134,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
if (!inode) {
if (nfs_neg_need_reval(dir, dentry, nd))
goto out_bad;
- goto out_valid;
+ goto out_valid_noent;
}
if (is_bad_inode(inode)) {
@@ -1140,7 +1163,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
if (fhandle == NULL || fattr == NULL)
goto out_error;
- error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
if (error)
goto out_bad;
if (nfs_compare_fh(NFS_FH(inode), fhandle))
@@ -1153,6 +1176,9 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
out_set_verifier:
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
out_valid:
+ /* Success: notify readdir to use READDIRPLUS */
+ nfs_advise_use_readdirplus(dir);
+ out_valid_noent:
dput(parent);
dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is valid\n",
__func__, dentry->d_parent->d_name.name,
@@ -1296,7 +1322,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
parent = dentry->d_parent;
/* Protect against concurrent sillydeletes */
nfs_block_sillyrename(parent);
- error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
if (error == -ENOENT)
goto no_entry;
if (error < 0) {
@@ -1308,6 +1334,9 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
if (IS_ERR(res))
goto out_unblock_sillyrename;
+ /* Success: notify readdir to use READDIRPLUS */
+ nfs_advise_use_readdirplus(dir);
+
no_entry:
res = d_materialise_unique(dentry, inode);
if (res != NULL) {
@@ -1643,7 +1672,7 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
if (dentry->d_inode)
goto out;
if (fhandle->size == 0) {
- error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
if (error)
goto out_error;
}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 481be7f7bdd3..ad2775d3e219 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -56,6 +56,7 @@
#include "internal.h"
#include "iostat.h"
+#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -81,16 +82,19 @@ struct nfs_direct_req {
struct completion completion; /* wait for i/o completion */
/* commit state */
- struct list_head rewrite_list; /* saved nfs_write_data structs */
- struct nfs_write_data * commit_data; /* special write_data for commits */
+ struct nfs_mds_commit_info mds_cinfo; /* Storage for cinfo */
+ struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
+ struct work_struct work;
int flags;
#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
struct nfs_writeverf verf; /* unstable write verifier */
};
+static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
+static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
-static const struct rpc_call_ops nfs_write_direct_ops;
+static void nfs_direct_write_schedule_work(struct work_struct *work);
static inline void get_dreq(struct nfs_direct_req *dreq)
{
@@ -124,22 +128,6 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_
return -EINVAL;
}
-static void nfs_direct_dirty_pages(struct page **pages, unsigned int pgbase, size_t count)
-{
- unsigned int npages;
- unsigned int i;
-
- if (count == 0)
- return;
- pages += (pgbase >> PAGE_SHIFT);
- npages = (count + (pgbase & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- for (i = 0; i < npages; i++) {
- struct page *page = pages[i];
- if (!PageCompound(page))
- set_page_dirty(page);
- }
-}
-
static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
{
unsigned int i;
@@ -147,26 +135,30 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
page_cache_release(pages[i]);
}
+void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
+ struct nfs_direct_req *dreq)
+{
+ cinfo->lock = &dreq->lock;
+ cinfo->mds = &dreq->mds_cinfo;
+ cinfo->ds = &dreq->ds_cinfo;
+ cinfo->dreq = dreq;
+ cinfo->completion_ops = &nfs_direct_commit_completion_ops;
+}
+
static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
{
struct nfs_direct_req *dreq;
- dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
+ dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
if (!dreq)
return NULL;
kref_init(&dreq->kref);
kref_get(&dreq->kref);
init_completion(&dreq->completion);
- INIT_LIST_HEAD(&dreq->rewrite_list);
- dreq->iocb = NULL;
- dreq->ctx = NULL;
- dreq->l_ctx = NULL;
+ INIT_LIST_HEAD(&dreq->mds_cinfo.list);
+ INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
spin_lock_init(&dreq->lock);
- atomic_set(&dreq->io_count, 0);
- dreq->count = 0;
- dreq->error = 0;
- dreq->flags = 0;
return dreq;
}
@@ -226,47 +218,80 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
nfs_direct_req_release(dreq);
}
-/*
- * We must hold a reference to all the pages in this direct read request
- * until the RPCs complete. This could be long *after* we are woken up in
- * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
- */
-static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
+static void nfs_direct_readpage_release(struct nfs_page *req)
{
- struct nfs_read_data *data = calldata;
-
- nfs_readpage_result(task, data);
+ dprintk("NFS: direct read done (%s/%lld %d@%lld)\n",
+ req->wb_context->dentry->d_inode->i_sb->s_id,
+ (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
+ req->wb_bytes,
+ (long long)req_offset(req));
+ nfs_release_request(req);
}
-static void nfs_direct_read_release(void *calldata)
+static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
{
+ unsigned long bytes = 0;
+ struct nfs_direct_req *dreq = hdr->dreq;
- struct nfs_read_data *data = calldata;
- struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
- int status = data->task.tk_status;
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+ goto out_put;
spin_lock(&dreq->lock);
- if (unlikely(status < 0)) {
- dreq->error = status;
- spin_unlock(&dreq->lock);
- } else {
- dreq->count += data->res.count;
- spin_unlock(&dreq->lock);
- nfs_direct_dirty_pages(data->pagevec,
- data->args.pgbase,
- data->res.count);
- }
- nfs_direct_release_pages(data->pagevec, data->npages);
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
+ dreq->error = hdr->error;
+ else
+ dreq->count += hdr->good_bytes;
+ spin_unlock(&dreq->lock);
+ while (!list_empty(&hdr->pages)) {
+ struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+ struct page *page = req->wb_page;
+
+ if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
+ if (bytes > hdr->good_bytes)
+ zero_user(page, 0, PAGE_SIZE);
+ else if (hdr->good_bytes - bytes < PAGE_SIZE)
+ zero_user_segment(page,
+ hdr->good_bytes & ~PAGE_MASK,
+ PAGE_SIZE);
+ }
+ if (!PageCompound(page)) {
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+ if (bytes < hdr->good_bytes)
+ set_page_dirty(page);
+ } else
+ set_page_dirty(page);
+ }
+ bytes += req->wb_bytes;
+ nfs_list_remove_request(req);
+ nfs_direct_readpage_release(req);
+ }
+out_put:
if (put_dreq(dreq))
nfs_direct_complete(dreq);
- nfs_readdata_free(data);
+ hdr->release(hdr);
+}
+
+static void nfs_read_sync_pgio_error(struct list_head *head)
+{
+ struct nfs_page *req;
+
+ while (!list_empty(head)) {
+ req = nfs_list_entry(head->next);
+ nfs_list_remove_request(req);
+ nfs_release_request(req);
+ }
}
-static const struct rpc_call_ops nfs_read_direct_ops = {
- .rpc_call_prepare = nfs_read_prepare,
- .rpc_call_done = nfs_direct_read_result,
- .rpc_release = nfs_direct_read_release,
+static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
+{
+ get_dreq(hdr->dreq);
+}
+
+static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
+ .error_cleanup = nfs_read_sync_pgio_error,
+ .init_hdr = nfs_direct_pgio_init,
+ .completion = nfs_direct_read_completion,
};
/*
@@ -276,107 +301,82 @@ static const struct rpc_call_ops nfs_read_direct_ops = {
* handled automatically by nfs_direct_read_result(). Otherwise, if
* no requests have been sent, just return an error.
*/
-static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
+static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
const struct iovec *iov,
loff_t pos)
{
+ struct nfs_direct_req *dreq = desc->pg_dreq;
struct nfs_open_context *ctx = dreq->ctx;
struct inode *inode = ctx->dentry->d_inode;
unsigned long user_addr = (unsigned long)iov->iov_base;
size_t count = iov->iov_len;
size_t rsize = NFS_SERVER(inode)->rsize;
- struct rpc_task *task;
- struct rpc_message msg = {
- .rpc_cred = ctx->cred,
- };
- struct rpc_task_setup task_setup_data = {
- .rpc_client = NFS_CLIENT(inode),
- .rpc_message = &msg,
- .callback_ops = &nfs_read_direct_ops,
- .workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
- };
unsigned int pgbase;
int result;
ssize_t started = 0;
+ struct page **pagevec = NULL;
+ unsigned int npages;
do {
- struct nfs_read_data *data;
size_t bytes;
+ int i;
pgbase = user_addr & ~PAGE_MASK;
- bytes = min(rsize,count);
+ bytes = min(max_t(size_t, rsize, PAGE_SIZE), count);
result = -ENOMEM;
- data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes));
- if (unlikely(!data))
+ npages = nfs_page_array_len(pgbase, bytes);
+ if (!pagevec)
+ pagevec = kmalloc(npages * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!pagevec)
break;
-
down_read(&current->mm->mmap_sem);
result = get_user_pages(current, current->mm, user_addr,
- data->npages, 1, 0, data->pagevec, NULL);
+ npages, 1, 0, pagevec, NULL);
up_read(&current->mm->mmap_sem);
- if (result < 0) {
- nfs_readdata_free(data);
+ if (result < 0)
break;
- }
- if ((unsigned)result < data->npages) {
+ if ((unsigned)result < npages) {
bytes = result * PAGE_SIZE;
if (bytes <= pgbase) {
- nfs_direct_release_pages(data->pagevec, result);
- nfs_readdata_free(data);
+ nfs_direct_release_pages(pagevec, result);
break;
}
bytes -= pgbase;
- data->npages = result;
+ npages = result;
}
- get_dreq(dreq);
-
- data->req = (struct nfs_page *) dreq;
- data->inode = inode;
- data->cred = msg.rpc_cred;
- data->args.fh = NFS_FH(inode);
- data->args.context = ctx;
- data->args.lock_context = dreq->l_ctx;
- data->args.offset = pos;
- data->args.pgbase = pgbase;
- data->args.pages = data->pagevec;
- data->args.count = bytes;
- data->res.fattr = &data->fattr;
- data->res.eof = 0;
- data->res.count = bytes;
- nfs_fattr_init(&data->fattr);
- msg.rpc_argp = &data->args;
- msg.rpc_resp = &data->res;
-
- task_setup_data.task = &data->task;
- task_setup_data.callback_data = data;
- NFS_PROTO(inode)->read_setup(data, &msg);
-
- task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task))
- break;
- rpc_put_task(task);
-
- dprintk("NFS: %5u initiated direct read call "
- "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
- data->task.tk_pid,
- inode->i_sb->s_id,
- (long long)NFS_FILEID(inode),
- bytes,
- (unsigned long long)data->args.offset);
-
- started += bytes;
- user_addr += bytes;
- pos += bytes;
- /* FIXME: Remove this unnecessary math from final patch */
- pgbase += bytes;
- pgbase &= ~PAGE_MASK;
- BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
-
- count -= bytes;
- } while (count != 0);
+ for (i = 0; i < npages; i++) {
+ struct nfs_page *req;
+ unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
+ /* XXX do we need to do the eof zeroing found in async_filler? */
+ req = nfs_create_request(dreq->ctx, dreq->inode,
+ pagevec[i],
+ pgbase, req_len);
+ if (IS_ERR(req)) {
+ result = PTR_ERR(req);
+ break;
+ }
+ req->wb_index = pos >> PAGE_SHIFT;
+ req->wb_offset = pos & ~PAGE_MASK;
+ if (!nfs_pageio_add_request(desc, req)) {
+ result = desc->pg_error;
+ nfs_release_request(req);
+ break;
+ }
+ pgbase = 0;
+ bytes -= req_len;
+ started += req_len;
+ user_addr += req_len;
+ pos += req_len;
+ count -= req_len;
+ }
+ /* The nfs_page now hold references to these pages */
+ nfs_direct_release_pages(pagevec, npages);
+ } while (count != 0 && result >= 0);
+
+ kfree(pagevec);
if (started)
return started;
@@ -388,15 +388,19 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
unsigned long nr_segs,
loff_t pos)
{
+ struct nfs_pageio_descriptor desc;
ssize_t result = -EINVAL;
size_t requested_bytes = 0;
unsigned long seg;
+ nfs_pageio_init_read(&desc, dreq->inode,
+ &nfs_direct_read_completion_ops);
get_dreq(dreq);
+ desc.pg_dreq = dreq;
for (seg = 0; seg < nr_segs; seg++) {
const struct iovec *vec = &iov[seg];
- result = nfs_direct_read_schedule_segment(dreq, vec, pos);
+ result = nfs_direct_read_schedule_segment(&desc, vec, pos);
if (result < 0)
break;
requested_bytes += result;
@@ -405,6 +409,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
pos += vec->iov_len;
}
+ nfs_pageio_complete(&desc);
+
/*
* If no bytes were started, return the error, and let the
* generic layer handle the completion.
@@ -441,104 +447,70 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
if (!result)
result = nfs_direct_wait(dreq);
+ NFS_I(inode)->read_io += result;
out_release:
nfs_direct_req_release(dreq);
out:
return result;
}
-static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
+static void nfs_inode_dio_write_done(struct inode *inode)
{
- while (!list_empty(&dreq->rewrite_list)) {
- struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
- list_del(&data->pages);
- nfs_direct_release_pages(data->pagevec, data->npages);
- nfs_writedata_free(data);
- }
+ nfs_zap_mapping(inode, inode->i_mapping);
+ inode_dio_done(inode);
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
{
- struct inode *inode = dreq->inode;
- struct list_head *p;
- struct nfs_write_data *data;
- struct rpc_task *task;
- struct rpc_message msg = {
- .rpc_cred = dreq->ctx->cred,
- };
- struct rpc_task_setup task_setup_data = {
- .rpc_client = NFS_CLIENT(inode),
- .rpc_message = &msg,
- .callback_ops = &nfs_write_direct_ops,
- .workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
- };
+ struct nfs_pageio_descriptor desc;
+ struct nfs_page *req, *tmp;
+ LIST_HEAD(reqs);
+ struct nfs_commit_info cinfo;
+ LIST_HEAD(failed);
+
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
+ pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
+ spin_lock(cinfo.lock);
+ nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
+ spin_unlock(cinfo.lock);
dreq->count = 0;
get_dreq(dreq);
- list_for_each(p, &dreq->rewrite_list) {
- data = list_entry(p, struct nfs_write_data, pages);
-
- get_dreq(dreq);
-
- /* Use stable writes */
- data->args.stable = NFS_FILE_SYNC;
-
- /*
- * Reset data->res.
- */
- nfs_fattr_init(&data->fattr);
- data->res.count = data->args.count;
- memset(&data->verf, 0, sizeof(data->verf));
-
- /*
- * Reuse data->task; data->args should not have changed
- * since the original request was sent.
- */
- task_setup_data.task = &data->task;
- task_setup_data.callback_data = data;
- msg.rpc_argp = &data->args;
- msg.rpc_resp = &data->res;
- NFS_PROTO(inode)->write_setup(data, &msg);
-
- /*
- * We're called via an RPC callback, so BKL is already held.
- */
- task = rpc_run_task(&task_setup_data);
- if (!IS_ERR(task))
- rpc_put_task(task);
-
- dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
- data->task.tk_pid,
- inode->i_sb->s_id,
- (long long)NFS_FILEID(inode),
- data->args.count,
- (unsigned long long)data->args.offset);
+ nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE,
+ &nfs_direct_write_completion_ops);
+ desc.pg_dreq = dreq;
+
+ list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
+ if (!nfs_pageio_add_request(&desc, req)) {
+ nfs_list_add_request(req, &failed);
+ spin_lock(cinfo.lock);
+ dreq->flags = 0;
+ dreq->error = -EIO;
+ spin_unlock(cinfo.lock);
+ }
}
+ nfs_pageio_complete(&desc);
- if (put_dreq(dreq))
- nfs_direct_write_complete(dreq, inode);
-}
-
-static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
-{
- struct nfs_write_data *data = calldata;
+ while (!list_empty(&failed))
+ nfs_unlock_and_release_request(req);
- /* Call the NFS version-specific code */
- NFS_PROTO(data->inode)->commit_done(task, data);
+ if (put_dreq(dreq))
+ nfs_direct_write_complete(dreq, dreq->inode);
}
-static void nfs_direct_commit_release(void *calldata)
+static void nfs_direct_commit_complete(struct nfs_commit_data *data)
{
- struct nfs_write_data *data = calldata;
- struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
+ struct nfs_direct_req *dreq = data->dreq;
+ struct nfs_commit_info cinfo;
+ struct nfs_page *req;
int status = data->task.tk_status;
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
if (status < 0) {
dprintk("NFS: %5u commit failed with error %d.\n",
- data->task.tk_pid, status);
+ data->task.tk_pid, status);
dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
} else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
@@ -546,62 +518,47 @@ static void nfs_direct_commit_release(void *calldata)
}
dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
- nfs_direct_write_complete(dreq, data->inode);
- nfs_commit_free(data);
+ while (!list_empty(&data->pages)) {
+ req = nfs_list_entry(data->pages.next);
+ nfs_list_remove_request(req);
+ if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
+ /* Note the rewrite will go through mds */
+ kref_get(&req->wb_kref);
+ nfs_mark_request_commit(req, NULL, &cinfo);
+ }
+ nfs_unlock_and_release_request(req);
+ }
+
+ if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
+ nfs_direct_write_complete(dreq, data->inode);
}
-static const struct rpc_call_ops nfs_commit_direct_ops = {
- .rpc_call_prepare = nfs_write_prepare,
- .rpc_call_done = nfs_direct_commit_result,
- .rpc_release = nfs_direct_commit_release,
+static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
+{
+ /* There is no lock to clear */
+}
+
+static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
+ .completion = nfs_direct_commit_complete,
+ .error_cleanup = nfs_direct_error_cleanup,
};
static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
{
- struct nfs_write_data *data = dreq->commit_data;
- struct rpc_task *task;
- struct rpc_message msg = {
- .rpc_argp = &data->args,
- .rpc_resp = &data->res,
- .rpc_cred = dreq->ctx->cred,
- };
- struct rpc_task_setup task_setup_data = {
- .task = &data->task,
- .rpc_client = NFS_CLIENT(dreq->inode),
- .rpc_message = &msg,
- .callback_ops = &nfs_commit_direct_ops,
- .callback_data = data,
- .workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
- };
-
- data->inode = dreq->inode;
- data->cred = msg.rpc_cred;
-
- data->args.fh = NFS_FH(data->inode);
- data->args.offset = 0;
- data->args.count = 0;
- data->args.context = dreq->ctx;
- data->args.lock_context = dreq->l_ctx;
- data->res.count = 0;
- data->res.fattr = &data->fattr;
- data->res.verf = &data->verf;
- nfs_fattr_init(&data->fattr);
-
- NFS_PROTO(data->inode)->commit_setup(data, &msg);
-
- /* Note: task.tk_ops->rpc_release will free dreq->commit_data */
- dreq->commit_data = NULL;
-
- dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
-
- task = rpc_run_task(&task_setup_data);
- if (!IS_ERR(task))
- rpc_put_task(task);
+ int res;
+ struct nfs_commit_info cinfo;
+ LIST_HEAD(mds_list);
+
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
+ nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
+ res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
+ if (res < 0) /* res == -ENOMEM */
+ nfs_direct_write_reschedule(dreq);
}
-static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
+static void nfs_direct_write_schedule_work(struct work_struct *work)
{
+ struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
int flags = dreq->flags;
dreq->flags = 0;
@@ -613,89 +570,32 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
nfs_direct_write_reschedule(dreq);
break;
default:
- if (dreq->commit_data != NULL)
- nfs_commit_free(dreq->commit_data);
- nfs_direct_free_writedata(dreq);
- nfs_zap_mapping(inode, inode->i_mapping);
+ nfs_inode_dio_write_done(dreq->inode);
nfs_direct_complete(dreq);
}
}
-static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
+static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
{
- dreq->commit_data = nfs_commitdata_alloc();
- if (dreq->commit_data != NULL)
- dreq->commit_data->req = (struct nfs_page *) dreq;
+ schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
}
+
#else
-static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
+static void nfs_direct_write_schedule_work(struct work_struct *work)
{
- dreq->commit_data = NULL;
}
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
{
- nfs_direct_free_writedata(dreq);
- nfs_zap_mapping(inode, inode->i_mapping);
+ nfs_inode_dio_write_done(inode);
nfs_direct_complete(dreq);
}
#endif
-static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
-{
- struct nfs_write_data *data = calldata;
-
- nfs_writeback_done(task, data);
-}
-
/*
* NB: Return the value of the first error return code. Subsequent
* errors after the first one are ignored.
*/
-static void nfs_direct_write_release(void *calldata)
-{
- struct nfs_write_data *data = calldata;
- struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
- int status = data->task.tk_status;
-
- spin_lock(&dreq->lock);
-
- if (unlikely(status < 0)) {
- /* An error has occurred, so we should not commit */
- dreq->flags = 0;
- dreq->error = status;
- }
- if (unlikely(dreq->error != 0))
- goto out_unlock;
-
- dreq->count += data->res.count;
-
- if (data->res.verf->committed != NFS_FILE_SYNC) {
- switch (dreq->flags) {
- case 0:
- memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
- dreq->flags = NFS_ODIRECT_DO_COMMIT;
- break;
- case NFS_ODIRECT_DO_COMMIT:
- if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
- dprintk("NFS: %5u write verify failed\n", data->task.tk_pid);
- dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
- }
- }
- }
-out_unlock:
- spin_unlock(&dreq->lock);
-
- if (put_dreq(dreq))
- nfs_direct_write_complete(dreq, data->inode);
-}
-
-static const struct rpc_call_ops nfs_write_direct_ops = {
- .rpc_call_prepare = nfs_write_prepare,
- .rpc_call_done = nfs_direct_write_result,
- .rpc_release = nfs_direct_write_release,
-};
-
/*
* For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
* operation. If nfs_writedata_alloc() or get_user_pages() fails,
@@ -703,132 +603,189 @@ static const struct rpc_call_ops nfs_write_direct_ops = {
* handled automatically by nfs_direct_write_result(). Otherwise, if
* no requests have been sent, just return an error.
*/
-static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
+static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
const struct iovec *iov,
- loff_t pos, int sync)
+ loff_t pos)
{
+ struct nfs_direct_req *dreq = desc->pg_dreq;
struct nfs_open_context *ctx = dreq->ctx;
struct inode *inode = ctx->dentry->d_inode;
unsigned long user_addr = (unsigned long)iov->iov_base;
size_t count = iov->iov_len;
- struct rpc_task *task;
- struct rpc_message msg = {
- .rpc_cred = ctx->cred,
- };
- struct rpc_task_setup task_setup_data = {
- .rpc_client = NFS_CLIENT(inode),
- .rpc_message = &msg,
- .callback_ops = &nfs_write_direct_ops,
- .workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
- };
size_t wsize = NFS_SERVER(inode)->wsize;
unsigned int pgbase;
int result;
ssize_t started = 0;
+ struct page **pagevec = NULL;
+ unsigned int npages;
do {
- struct nfs_write_data *data;
size_t bytes;
+ int i;
pgbase = user_addr & ~PAGE_MASK;
- bytes = min(wsize,count);
+ bytes = min(max_t(size_t, wsize, PAGE_SIZE), count);
result = -ENOMEM;
- data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes));
- if (unlikely(!data))
+ npages = nfs_page_array_len(pgbase, bytes);
+ if (!pagevec)
+ pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
+ if (!pagevec)
break;
down_read(&current->mm->mmap_sem);
result = get_user_pages(current, current->mm, user_addr,
- data->npages, 0, 0, data->pagevec, NULL);
+ npages, 0, 0, pagevec, NULL);
up_read(&current->mm->mmap_sem);
- if (result < 0) {
- nfs_writedata_free(data);
+ if (result < 0)
break;
- }
- if ((unsigned)result < data->npages) {
+
+ if ((unsigned)result < npages) {
bytes = result * PAGE_SIZE;
if (bytes <= pgbase) {
- nfs_direct_release_pages(data->pagevec, result);
- nfs_writedata_free(data);
+ nfs_direct_release_pages(pagevec, result);
break;
}
bytes -= pgbase;
- data->npages = result;
+ npages = result;
}
- get_dreq(dreq);
-
- list_move_tail(&data->pages, &dreq->rewrite_list);
-
- data->req = (struct nfs_page *) dreq;
- data->inode = inode;
- data->cred = msg.rpc_cred;
- data->args.fh = NFS_FH(inode);
- data->args.context = ctx;
- data->args.lock_context = dreq->l_ctx;
- data->args.offset = pos;
- data->args.pgbase = pgbase;
- data->args.pages = data->pagevec;
- data->args.count = bytes;
- data->args.stable = sync;
- data->res.fattr = &data->fattr;
- data->res.count = bytes;
- data->res.verf = &data->verf;
- nfs_fattr_init(&data->fattr);
-
- task_setup_data.task = &data->task;
- task_setup_data.callback_data = data;
- msg.rpc_argp = &data->args;
- msg.rpc_resp = &data->res;
- NFS_PROTO(inode)->write_setup(data, &msg);
-
- task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task))
- break;
- rpc_put_task(task);
-
- dprintk("NFS: %5u initiated direct write call "
- "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
- data->task.tk_pid,
- inode->i_sb->s_id,
- (long long)NFS_FILEID(inode),
- bytes,
- (unsigned long long)data->args.offset);
+ for (i = 0; i < npages; i++) {
+ struct nfs_page *req;
+ unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
- started += bytes;
- user_addr += bytes;
- pos += bytes;
-
- /* FIXME: Remove this useless math from the final patch */
- pgbase += bytes;
- pgbase &= ~PAGE_MASK;
- BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
+ req = nfs_create_request(dreq->ctx, dreq->inode,
+ pagevec[i],
+ pgbase, req_len);
+ if (IS_ERR(req)) {
+ result = PTR_ERR(req);
+ break;
+ }
+ nfs_lock_request(req);
+ req->wb_index = pos >> PAGE_SHIFT;
+ req->wb_offset = pos & ~PAGE_MASK;
+ if (!nfs_pageio_add_request(desc, req)) {
+ result = desc->pg_error;
+ nfs_unlock_and_release_request(req);
+ break;
+ }
+ pgbase = 0;
+ bytes -= req_len;
+ started += req_len;
+ user_addr += req_len;
+ pos += req_len;
+ count -= req_len;
+ }
+ /* The nfs_page now hold references to these pages */
+ nfs_direct_release_pages(pagevec, npages);
+ } while (count != 0 && result >= 0);
- count -= bytes;
- } while (count != 0);
+ kfree(pagevec);
if (started)
return started;
return result < 0 ? (ssize_t) result : -EFAULT;
}
+static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+{
+ struct nfs_direct_req *dreq = hdr->dreq;
+ struct nfs_commit_info cinfo;
+ int bit = -1;
+ struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+ goto out_put;
+
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
+
+ spin_lock(&dreq->lock);
+
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+ dreq->flags = 0;
+ dreq->error = hdr->error;
+ }
+ if (dreq->error != 0)
+ bit = NFS_IOHDR_ERROR;
+ else {
+ dreq->count += hdr->good_bytes;
+ if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ bit = NFS_IOHDR_NEED_RESCHED;
+ } else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
+ if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
+ bit = NFS_IOHDR_NEED_RESCHED;
+ else if (dreq->flags == 0) {
+ memcpy(&dreq->verf, &req->wb_verf,
+ sizeof(dreq->verf));
+ bit = NFS_IOHDR_NEED_COMMIT;
+ dreq->flags = NFS_ODIRECT_DO_COMMIT;
+ } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
+ if (memcmp(&dreq->verf, &req->wb_verf, sizeof(dreq->verf))) {
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ bit = NFS_IOHDR_NEED_RESCHED;
+ } else
+ bit = NFS_IOHDR_NEED_COMMIT;
+ }
+ }
+ }
+ spin_unlock(&dreq->lock);
+
+ while (!list_empty(&hdr->pages)) {
+ req = nfs_list_entry(hdr->pages.next);
+ nfs_list_remove_request(req);
+ switch (bit) {
+ case NFS_IOHDR_NEED_RESCHED:
+ case NFS_IOHDR_NEED_COMMIT:
+ kref_get(&req->wb_kref);
+ nfs_mark_request_commit(req, hdr->lseg, &cinfo);
+ }
+ nfs_unlock_and_release_request(req);
+ }
+
+out_put:
+ if (put_dreq(dreq))
+ nfs_direct_write_complete(dreq, hdr->inode);
+ hdr->release(hdr);
+}
+
+static void nfs_write_sync_pgio_error(struct list_head *head)
+{
+ struct nfs_page *req;
+
+ while (!list_empty(head)) {
+ req = nfs_list_entry(head->next);
+ nfs_list_remove_request(req);
+ nfs_unlock_and_release_request(req);
+ }
+}
+
+static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
+ .error_cleanup = nfs_write_sync_pgio_error,
+ .init_hdr = nfs_direct_pgio_init,
+ .completion = nfs_direct_write_completion,
+};
+
static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
const struct iovec *iov,
unsigned long nr_segs,
- loff_t pos, int sync)
+ loff_t pos)
{
+ struct nfs_pageio_descriptor desc;
+ struct inode *inode = dreq->inode;
ssize_t result = 0;
size_t requested_bytes = 0;
unsigned long seg;
+ nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE,
+ &nfs_direct_write_completion_ops);
+ desc.pg_dreq = dreq;
get_dreq(dreq);
+ atomic_inc(&inode->i_dio_count);
for (seg = 0; seg < nr_segs; seg++) {
const struct iovec *vec = &iov[seg];
- result = nfs_direct_write_schedule_segment(dreq, vec,
- pos, sync);
+ result = nfs_direct_write_schedule_segment(&desc, vec, pos);
if (result < 0)
break;
requested_bytes += result;
@@ -836,12 +793,15 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
break;
pos += vec->iov_len;
}
+ nfs_pageio_complete(&desc);
+ NFS_I(dreq->inode)->write_io += desc.pg_bytes_written;
/*
* If no bytes were started, return the error, and let the
* generic layer handle the completion.
*/
if (requested_bytes == 0) {
+ inode_dio_done(inode);
nfs_direct_req_release(dreq);
return result < 0 ? result : -EIO;
}
@@ -858,16 +818,10 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
ssize_t result = -ENOMEM;
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct nfs_direct_req *dreq;
- size_t wsize = NFS_SERVER(inode)->wsize;
- int sync = NFS_UNSTABLE;
dreq = nfs_direct_req_alloc();
if (!dreq)
goto out;
- nfs_alloc_commit_data(dreq);
-
- if (dreq->commit_data == NULL || count <= wsize)
- sync = NFS_FILE_SYNC;
dreq->inode = inode;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
@@ -877,7 +831,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
- result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
+ result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos);
if (!result)
result = nfs_direct_wait(dreq);
out_release:
@@ -997,10 +951,15 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
task_io_account_write(count);
retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
+ if (retval > 0) {
+ struct inode *inode = mapping->host;
- if (retval > 0)
iocb->ki_pos = pos + retval;
-
+ spin_lock(&inode->i_lock);
+ if (i_size_read(inode) < iocb->ki_pos)
+ i_size_write(inode, iocb->ki_pos);
+ spin_unlock(&inode->i_lock);
+ }
out:
return retval;
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index aa9b709fd328..56311ca5f9f8 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -174,6 +174,13 @@ nfs_file_flush(struct file *file, fl_owner_t id)
if ((file->f_mode & FMODE_WRITE) == 0)
return 0;
+ /*
+ * If we're holding a write delegation, then just start the i/o
+ * but don't wait for completion (or send a commit).
+ */
+ if (nfs_have_delegation(inode, FMODE_WRITE))
+ return filemap_fdatawrite(file->f_mapping);
+
/* Flush writes to the server and return any errors */
return vfs_fsync(file, 0);
}
@@ -417,6 +424,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
if (status < 0)
return status;
+ NFS_I(mapping->host)->write_io += copied;
return copied;
}
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index ae65c16b3670..c817787fbdb4 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -64,23 +64,12 @@ void nfs_fscache_release_client_cookie(struct nfs_client *clp)
* either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
* superblock across an automount point of some nature.
*/
-void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq,
- struct nfs_clone_mount *mntdata)
+void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
{
struct nfs_fscache_key *key, *xkey;
struct nfs_server *nfss = NFS_SB(sb);
struct rb_node **p, *parent;
- int diff, ulen;
-
- if (uniq) {
- ulen = strlen(uniq);
- } else if (mntdata) {
- struct nfs_server *mnt_s = NFS_SB(mntdata->sb);
- if (mnt_s->fscache_key) {
- uniq = mnt_s->fscache_key->key.uniquifier;
- ulen = mnt_s->fscache_key->key.uniq_len;
- }
- }
+ int diff;
if (!uniq) {
uniq = "";
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index b9c572d0679f..c5b11b53ff33 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -73,9 +73,7 @@ extern void nfs_fscache_unregister(void);
extern void nfs_fscache_get_client_cookie(struct nfs_client *);
extern void nfs_fscache_release_client_cookie(struct nfs_client *);
-extern void nfs_fscache_get_super_cookie(struct super_block *,
- const char *,
- struct nfs_clone_mount *);
+extern void nfs_fscache_get_super_cookie(struct super_block *, const char *, int);
extern void nfs_fscache_release_super_cookie(struct super_block *);
extern void nfs_fscache_init_inode_cookie(struct inode *);
@@ -172,12 +170,6 @@ static inline void nfs_fscache_unregister(void) {}
static inline void nfs_fscache_get_client_cookie(struct nfs_client *clp) {}
static inline void nfs_fscache_release_client_cookie(struct nfs_client *clp) {}
-static inline void nfs_fscache_get_super_cookie(
- struct super_block *sb,
- const char *uniq,
- struct nfs_clone_mount *mntdata)
-{
-}
static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
static inline void nfs_fscache_init_inode_cookie(struct inode *inode) {}
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 4ca6f5c8038e..8abfb19bd3aa 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -150,7 +150,7 @@ int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh)
goto out;
/* Start by getting the root filehandle from the server */
- ret = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo);
+ ret = nfs4_proc_get_rootfh(server, mntfh, &fsinfo);
if (ret < 0) {
dprintk("nfs4_get_rootfh: getroot error = %d\n", -ret);
goto out;
@@ -178,87 +178,4 @@ out:
return ret;
}
-/*
- * get an NFS4 root dentry from the root filehandle
- */
-struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh,
- const char *devname)
-{
- struct nfs_server *server = NFS_SB(sb);
- struct nfs_fattr *fattr = NULL;
- struct dentry *ret;
- struct inode *inode;
- void *name = kstrdup(devname, GFP_KERNEL);
- int error;
-
- dprintk("--> nfs4_get_root()\n");
-
- if (!name)
- return ERR_PTR(-ENOMEM);
-
- /* get the info about the server and filesystem */
- error = nfs4_server_capabilities(server, mntfh);
- if (error < 0) {
- dprintk("nfs_get_root: getcaps error = %d\n",
- -error);
- kfree(name);
- return ERR_PTR(error);
- }
-
- fattr = nfs_alloc_fattr();
- if (fattr == NULL) {
- kfree(name);
- return ERR_PTR(-ENOMEM);
- }
-
- /* get the actual root for this mount */
- error = server->nfs_client->rpc_ops->getattr(server, mntfh, fattr);
- if (error < 0) {
- dprintk("nfs_get_root: getattr error = %d\n", -error);
- ret = ERR_PTR(error);
- goto out;
- }
-
- if (fattr->valid & NFS_ATTR_FATTR_FSID &&
- !nfs_fsid_equal(&server->fsid, &fattr->fsid))
- memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
-
- inode = nfs_fhget(sb, mntfh, fattr);
- if (IS_ERR(inode)) {
- dprintk("nfs_get_root: get root inode failed\n");
- ret = ERR_CAST(inode);
- goto out;
- }
-
- error = nfs_superblock_set_dummy_root(sb, inode);
- if (error != 0) {
- ret = ERR_PTR(error);
- goto out;
- }
-
- /* root dentries normally start off anonymous and get spliced in later
- * if the dentry tree reaches them; however if the dentry already
- * exists, we'll pick it up at this point and use it as the root
- */
- ret = d_obtain_alias(inode);
- if (IS_ERR(ret)) {
- dprintk("nfs_get_root: get root dentry failed\n");
- goto out;
- }
-
- security_d_instantiate(ret, inode);
- spin_lock(&ret->d_lock);
- if (IS_ROOT(ret) && !(ret->d_flags & DCACHE_NFSFS_RENAMED)) {
- ret->d_fsdata = name;
- name = NULL;
- }
- spin_unlock(&ret->d_lock);
-out:
- if (name)
- kfree(name);
- nfs_free_fattr(fattr);
- dprintk("<-- nfs4_get_root()\n");
- return ret;
-}
-
#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index ba3019f5934c..b5b86a05059c 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -415,7 +415,7 @@ static int __nfs_idmap_register(struct dentry *dir,
static void nfs_idmap_unregister(struct nfs_client *clp,
struct rpc_pipe *pipe)
{
- struct net *net = clp->net;
+ struct net *net = clp->cl_net;
struct super_block *pipefs_sb;
pipefs_sb = rpc_get_sb_net(net);
@@ -429,7 +429,7 @@ static int nfs_idmap_register(struct nfs_client *clp,
struct idmap *idmap,
struct rpc_pipe *pipe)
{
- struct net *net = clp->net;
+ struct net *net = clp->cl_net;
struct super_block *pipefs_sb;
int err = 0;
@@ -530,9 +530,25 @@ static struct nfs_client *nfs_get_client_for_event(struct net *net, int event)
struct nfs_net *nn = net_generic(net, nfs_net_id);
struct dentry *cl_dentry;
struct nfs_client *clp;
+ int err;
+restart:
spin_lock(&nn->nfs_client_lock);
list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
+ /* Wait for initialisation to finish */
+ if (clp->cl_cons_state == NFS_CS_INITING) {
+ atomic_inc(&clp->cl_count);
+ spin_unlock(&nn->nfs_client_lock);
+ err = nfs_wait_client_init_complete(clp);
+ nfs_put_client(clp);
+ if (err)
+ return NULL;
+ goto restart;
+ }
+ /* Skip nfs_clients that failed to initialise */
+ if (clp->cl_cons_state < 0)
+ continue;
+ smp_rmb();
if (clp->rpc_ops != &nfs_v4_clientops)
continue;
cl_dentry = clp->cl_idmap->idmap_pipe->dentry;
@@ -640,20 +656,16 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
struct idmap_msg *im;
struct idmap *idmap = (struct idmap *)aux;
struct key *key = cons->key;
- int ret;
+ int ret = -ENOMEM;
/* msg and im are freed in idmap_pipe_destroy_msg */
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (IS_ERR(msg)) {
- ret = PTR_ERR(msg);
+ if (!msg)
goto out0;
- }
im = kmalloc(sizeof(*im), GFP_KERNEL);
- if (IS_ERR(im)) {
- ret = PTR_ERR(im);
+ if (!im)
goto out1;
- }
ret = nfs_idmap_prepare_message(key->description, im, msg);
if (ret < 0)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index e8bbfa5b3500..e605d695dbcb 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -121,7 +121,7 @@ static void nfs_clear_inode(struct inode *inode)
void nfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
nfs_clear_inode(inode);
}
@@ -285,9 +285,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
inode->i_mode = fattr->mode;
if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
&& nfs_server_capable(inode, NFS_CAP_MODE))
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
/* Why so? Because we want revalidate for devices/FIFOs, and
* that's precisely what we have in nfs_file_inode_operations.
*/
@@ -300,8 +298,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
inode->i_fop = &nfs_dir_operations;
inode->i_data.a_ops = &nfs_dir_aops;
- if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS))
- set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
/* Deal with crossing mountpoints */
if (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT ||
fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
@@ -327,6 +323,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
inode->i_gid = -2;
inode->i_blocks = 0;
memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
+ nfsi->write_io = 0;
+ nfsi->read_io = 0;
nfsi->read_cache_jiffies = fattr->time_start;
nfsi->attr_gencount = fattr->gencount;
@@ -337,24 +335,19 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
if (fattr->valid & NFS_ATTR_FATTR_MTIME)
inode->i_mtime = fattr->mtime;
else if (nfs_server_capable(inode, NFS_CAP_MTIME))
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_DATA;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
inode->i_ctime = fattr->ctime;
else if (nfs_server_capable(inode, NFS_CAP_CTIME))
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
inode->i_version = fattr->change_attr;
else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_DATA;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_SIZE)
inode->i_size = nfs_size_to_loff_t(fattr->size);
else
nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_DATA
| NFS_INO_REVAL_PAGECACHE;
if (fattr->valid & NFS_ATTR_FATTR_NLINK)
set_nlink(inode, fattr->nlink);
@@ -363,15 +356,11 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
if (fattr->valid & NFS_ATTR_FATTR_OWNER)
inode->i_uid = fattr->uid;
else if (nfs_server_capable(inode, NFS_CAP_OWNER))
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_GROUP)
inode->i_gid = fattr->gid;
else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
inode->i_blocks = fattr->du.nfs2.blocks;
if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
@@ -429,8 +418,10 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
return 0;
/* Write all dirty data */
- if (S_ISREG(inode->i_mode))
+ if (S_ISREG(inode->i_mode)) {
+ nfs_inode_dio_wait(inode);
nfs_wb_all(inode);
+ }
fattr = nfs_alloc_fattr();
if (fattr == NULL)
@@ -514,6 +505,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
/* Flush out writes to the server in order to update c/mtime. */
if (S_ISREG(inode->i_mode)) {
+ nfs_inode_dio_wait(inode);
err = filemap_write_and_wait(inode->i_mapping);
if (err)
goto out;
@@ -654,6 +646,7 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f
nfs_init_lock_context(&ctx->lock_context);
ctx->lock_context.open_context = ctx;
INIT_LIST_HEAD(&ctx->list);
+ ctx->mdsthreshold = NULL;
return ctx;
}
@@ -682,6 +675,7 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
put_rpccred(ctx->cred);
dput(ctx->dentry);
nfs_sb_deactive(sb);
+ kfree(ctx->mdsthreshold);
kfree(ctx);
}
@@ -870,6 +864,15 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
return 0;
}
+static bool nfs_mapping_need_revalidate_inode(struct inode *inode)
+{
+ if (nfs_have_delegated_attributes(inode))
+ return false;
+ return (NFS_I(inode)->cache_validity & NFS_INO_REVAL_PAGECACHE)
+ || nfs_attribute_timeout(inode)
+ || NFS_STALE(inode);
+}
+
/**
* nfs_revalidate_mapping - Revalidate the pagecache
* @inode - pointer to host inode
@@ -880,9 +883,7 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
struct nfs_inode *nfsi = NFS_I(inode);
int ret = 0;
- if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
- || nfs_attribute_cache_expired(inode)
- || NFS_STALE(inode)) {
+ if (nfs_mapping_need_revalidate_inode(inode)) {
ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
if (ret < 0)
goto out;
@@ -948,6 +949,8 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
unsigned long invalid = 0;
+ if (nfs_have_delegated_attributes(inode))
+ return 0;
/* Has the inode gone and changed behind our back? */
if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
return -EIO;
@@ -960,7 +963,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
/* Verify a few of the more important attributes */
if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime))
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+ invalid |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
cur_size = i_size_read(inode);
@@ -1279,14 +1282,26 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfs_display_fhandle_hash(NFS_FH(inode)),
atomic_read(&inode->i_count), fattr->valid);
- if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
- goto out_fileid;
+ if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) {
+ printk(KERN_ERR "NFS: server %s error: fileid changed\n"
+ "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
+ NFS_SERVER(inode)->nfs_client->cl_hostname,
+ inode->i_sb->s_id, (long long)nfsi->fileid,
+ (long long)fattr->fileid);
+ goto out_err;
+ }
/*
* Make sure the inode's type hasn't changed.
*/
- if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
- goto out_changed;
+ if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) {
+ /*
+ * Big trouble! The inode has become a different object.
+ */
+ printk(KERN_DEBUG "NFS: %s: inode %ld mode changed, %07o to %07o\n",
+ __func__, inode->i_ino, inode->i_mode, fattr->mode);
+ goto out_err;
+ }
server = NFS_SERVER(inode);
/* Update the fsid? */
@@ -1314,7 +1329,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if (inode->i_version != fattr->change_attr) {
dprintk("NFS: change_attr change on server for file %s/%ld\n",
inode->i_sb->s_id, inode->i_ino);
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ invalid |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_DATA
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_PAGECACHE;
if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
inode->i_version = fattr->change_attr;
@@ -1323,38 +1342,15 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
invalid |= save_cache_validity;
if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
- /* NFSv2/v3: Check if the mtime agrees */
- if (!timespec_equal(&inode->i_mtime, &fattr->mtime)) {
- dprintk("NFS: mtime change on server for file %s/%ld\n",
- inode->i_sb->s_id, inode->i_ino);
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
- if (S_ISDIR(inode->i_mode))
- nfs_force_lookup_revalidate(inode);
- memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
- }
+ memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
} else if (server->caps & NFS_CAP_MTIME)
invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_DATA
- | NFS_INO_REVAL_PAGECACHE
| NFS_INO_REVAL_FORCED);
if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
- /* If ctime has changed we should definitely clear access+acl caches */
- if (!timespec_equal(&inode->i_ctime, &fattr->ctime)) {
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
- /* and probably clear data for a directory too as utimes can cause
- * havoc with our cache.
- */
- if (S_ISDIR(inode->i_mode)) {
- invalid |= NFS_INO_INVALID_DATA;
- nfs_force_lookup_revalidate(inode);
- }
- memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
- }
+ memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
} else if (server->caps & NFS_CAP_CTIME)
invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL
| NFS_INO_REVAL_FORCED);
/* Check if our cached file size is stale */
@@ -1466,12 +1462,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfsi->cache_validity |= invalid;
return 0;
- out_changed:
- /*
- * Big trouble! The inode has become a different object.
- */
- printk(KERN_DEBUG "NFS: %s: inode %ld mode changed, %07o to %07o\n",
- __func__, inode->i_ino, inode->i_mode, fattr->mode);
out_err:
/*
* No need to worry about unhashing the dentry, as the
@@ -1480,13 +1470,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
*/
nfs_invalidate_inode(inode);
return -ESTALE;
-
- out_fileid:
- printk(KERN_ERR "NFS: server %s error: fileid changed\n"
- "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
- NFS_SERVER(inode)->nfs_client->cl_hostname, inode->i_sb->s_id,
- (long long)nfsi->fileid, (long long)fattr->fileid);
- goto out_err;
}
@@ -1500,7 +1483,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
void nfs4_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
pnfs_return_layout(inode);
pnfs_destroy_layout(NFS_I(inode));
/* If we are holding a delegation, return it! */
@@ -1547,7 +1530,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
nfsi->delegation_state = 0;
init_rwsem(&nfsi->rwsem);
nfsi->layout = NULL;
- atomic_set(&nfsi->commits_outstanding, 0);
+ atomic_set(&nfsi->commit_info.rpcs_out, 0);
#endif
}
@@ -1559,9 +1542,9 @@ static void init_once(void *foo)
INIT_LIST_HEAD(&nfsi->open_files);
INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
- INIT_LIST_HEAD(&nfsi->commit_list);
+ INIT_LIST_HEAD(&nfsi->commit_info.list);
nfsi->npages = 0;
- nfsi->ncommit = 0;
+ nfsi->commit_info.ncommit = 0;
atomic_set(&nfsi->silly_count, 1);
INIT_HLIST_HEAD(&nfsi->silly_list);
init_waitqueue_head(&nfsi->waitqueue);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index b777bdaba4c5..18f99ef71343 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -103,6 +103,7 @@ struct nfs_parsed_mount_data {
unsigned int version;
unsigned int minorversion;
char *fscache_uniq;
+ bool need_mount;
struct {
struct sockaddr_storage address;
@@ -167,11 +168,13 @@ extern struct nfs_server *nfs_clone_server(struct nfs_server *,
struct nfs_fh *,
struct nfs_fattr *,
rpc_authflavor_t);
+extern int nfs_wait_client_init_complete(const struct nfs_client *clp);
extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
-extern int nfs4_check_client_ready(struct nfs_client *clp);
extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
const struct sockaddr *ds_addr,
- int ds_addrlen, int ds_proto);
+ int ds_addrlen, int ds_proto,
+ unsigned int ds_timeo,
+ unsigned int ds_retrans);
#ifdef CONFIG_PROC_FS
extern int __init nfs_fs_proc_init(void);
extern void nfs_fs_proc_exit(void);
@@ -185,21 +188,11 @@ static inline void nfs_fs_proc_exit(void)
}
#endif
-/* nfs4namespace.c */
-#ifdef CONFIG_NFS_V4
-extern struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry);
-#else
-static inline
-struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
-{
- return ERR_PTR(-ENOENT);
-}
-#endif
-
/* callback_xdr.c */
extern struct svc_version nfs4_callback_version1;
extern struct svc_version nfs4_callback_version4;
+struct nfs_pageio_descriptor;
/* pagelist.c */
extern int __init nfs_init_nfspagecache(void);
extern void nfs_destroy_nfspagecache(void);
@@ -210,9 +203,13 @@ extern void nfs_destroy_writepagecache(void);
extern int __init nfs_init_directcache(void);
extern void nfs_destroy_directcache(void);
+extern bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount);
+extern void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr,
+ void (*release)(struct nfs_pgio_header *hdr));
+void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos);
/* nfs2xdr.c */
-extern int nfs_stat_to_errno(enum nfs_stat);
extern struct rpc_procinfo nfs_procedures[];
extern int nfs2_decode_dirent(struct xdr_stream *,
struct nfs_entry *, int);
@@ -237,14 +234,13 @@ extern const u32 nfs41_maxwrite_overhead;
extern struct rpc_procinfo nfs4_procedures[];
#endif
-extern int nfs4_init_ds_session(struct nfs_client *clp);
+extern int nfs4_init_ds_session(struct nfs_client *, unsigned long);
/* proc.c */
void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
-extern int nfs_init_client(struct nfs_client *clp,
+extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
const struct rpc_timeout *timeparms,
- const char *ip_addr, rpc_authflavor_t authflavour,
- int noresvport);
+ const char *ip_addr, rpc_authflavor_t authflavour);
/* dir.c */
extern int nfs_access_cache_shrinker(struct shrinker *shrink,
@@ -280,9 +276,10 @@ extern void nfs_sb_deactive(struct super_block *sb);
extern char *nfs_path(char **p, struct dentry *dentry,
char *buffer, ssize_t buflen);
extern struct vfsmount *nfs_d_automount(struct path *path);
-#ifdef CONFIG_NFS_V4
-rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
-#endif
+struct vfsmount *nfs_submount(struct nfs_server *, struct dentry *,
+ struct nfs_fh *, struct nfs_fattr *);
+struct vfsmount *nfs_do_submount(struct dentry *, struct nfs_fh *,
+ struct nfs_fattr *, rpc_authflavor_t);
/* getroot.c */
extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *,
@@ -294,46 +291,73 @@ extern struct dentry *nfs4_get_root(struct super_block *, struct nfs_fh *,
extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh);
#endif
-struct nfs_pageio_descriptor;
+struct nfs_pgio_completion_ops;
/* read.c */
-extern int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
- const struct rpc_call_ops *call_ops);
+extern struct nfs_read_header *nfs_readhdr_alloc(void);
+extern void nfs_readhdr_free(struct nfs_pgio_header *hdr);
+extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode,
+ const struct nfs_pgio_completion_ops *compl_ops);
+extern int nfs_initiate_read(struct rpc_clnt *clnt,
+ struct nfs_read_data *data,
+ const struct rpc_call_ops *call_ops, int flags);
extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
- struct list_head *head);
-
+ struct nfs_pgio_header *hdr);
extern void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
- struct inode *inode);
+ struct inode *inode,
+ const struct nfs_pgio_completion_ops *compl_ops);
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
extern void nfs_readdata_release(struct nfs_read_data *rdata);
/* write.c */
+extern void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode, int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops);
+extern struct nfs_write_header *nfs_writehdr_alloc(void);
+extern void nfs_writehdr_free(struct nfs_pgio_header *hdr);
extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
- struct list_head *head);
+ struct nfs_pgio_header *hdr);
extern void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
- struct inode *inode, int ioflags);
+ struct inode *inode, int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops);
extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
extern void nfs_writedata_release(struct nfs_write_data *wdata);
-extern void nfs_commit_free(struct nfs_write_data *p);
-extern int nfs_initiate_write(struct nfs_write_data *data,
- struct rpc_clnt *clnt,
+extern void nfs_commit_free(struct nfs_commit_data *p);
+extern int nfs_initiate_write(struct rpc_clnt *clnt,
+ struct nfs_write_data *data,
const struct rpc_call_ops *call_ops,
- int how);
+ int how, int flags);
extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
-extern int nfs_initiate_commit(struct nfs_write_data *data,
- struct rpc_clnt *clnt,
+extern void nfs_commit_prepare(struct rpc_task *task, void *calldata);
+extern int nfs_initiate_commit(struct rpc_clnt *clnt,
+ struct nfs_commit_data *data,
const struct rpc_call_ops *call_ops,
- int how);
-extern void nfs_init_commit(struct nfs_write_data *data,
+ int how, int flags);
+extern void nfs_init_commit(struct nfs_commit_data *data,
struct list_head *head,
- struct pnfs_layout_segment *lseg);
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo);
+int nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
+ struct nfs_commit_info *cinfo, int max);
+int nfs_scan_commit(struct inode *inode, struct list_head *dst,
+ struct nfs_commit_info *cinfo);
+void nfs_mark_request_commit(struct nfs_page *req,
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo);
+int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
+ int how, struct nfs_commit_info *cinfo);
void nfs_retry_commit(struct list_head *page_list,
- struct pnfs_layout_segment *lseg);
-void nfs_commit_clear_lock(struct nfs_inode *nfsi);
-void nfs_commitdata_release(void *data);
-void nfs_commit_release_pages(struct nfs_write_data *data);
-void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head);
-void nfs_request_remove_commit_list(struct nfs_page *req);
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo);
+void nfs_commitdata_release(struct nfs_commit_data *data);
+void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
+ struct nfs_commit_info *cinfo);
+void nfs_request_remove_commit_list(struct nfs_page *req,
+ struct nfs_commit_info *cinfo);
+void nfs_init_cinfo(struct nfs_commit_info *cinfo,
+ struct inode *inode,
+ struct nfs_direct_req *dreq);
#ifdef CONFIG_MIGRATION
extern int nfs_migrate_page(struct address_space *,
@@ -342,15 +366,20 @@ extern int nfs_migrate_page(struct address_space *,
#define nfs_migrate_page NULL
#endif
+/* direct.c */
+void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
+ struct nfs_direct_req *dreq);
+static inline void nfs_inode_dio_wait(struct inode *inode)
+{
+ inode_dio_wait(inode);
+}
+
/* nfs4proc.c */
extern void __nfs4_read_done_cb(struct nfs_read_data *);
-extern void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data);
-extern int nfs4_init_client(struct nfs_client *clp,
+extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
const struct rpc_timeout *timeparms,
const char *ip_addr,
- rpc_authflavor_t authflavour,
- int noresvport);
-extern void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data);
+ rpc_authflavor_t authflavour);
extern int _nfs4_call_sync(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
@@ -466,3 +495,15 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
PAGE_SIZE - 1) >> PAGE_SHIFT;
}
+/*
+ * Convert a struct timespec into a 64-bit change attribute
+ *
+ * This does approximately the same thing as timespec_to_ns(),
+ * but for calculation efficiency, we multiply the seconds by
+ * 1024*1024*1024.
+ */
+static inline
+u64 nfs_timespec_to_change_attr(const struct timespec *ts)
+{
+ return ((u64)ts->tv_sec << 30) + ts->tv_nsec;
+}
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index d51868e5683c..08b9c93675da 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -26,11 +26,6 @@ static LIST_HEAD(nfs_automount_list);
static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
int nfs_mountpoint_expiry_timeout = 500 * HZ;
-static struct vfsmount *nfs_do_submount(struct dentry *dentry,
- struct nfs_fh *fh,
- struct nfs_fattr *fattr,
- rpc_authflavor_t authflavor);
-
/*
* nfs_path - reconstruct the path given an arbitrary dentry
* @base - used to return pointer to the end of devname part of path
@@ -118,64 +113,6 @@ Elong:
return ERR_PTR(-ENAMETOOLONG);
}
-#ifdef CONFIG_NFS_V4
-rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors)
-{
- struct gss_api_mech *mech;
- struct xdr_netobj oid;
- int i;
- rpc_authflavor_t pseudoflavor = RPC_AUTH_UNIX;
-
- for (i = 0; i < flavors->num_flavors; i++) {
- struct nfs4_secinfo_flavor *flavor;
- flavor = &flavors->flavors[i];
-
- if (flavor->flavor == RPC_AUTH_NULL || flavor->flavor == RPC_AUTH_UNIX) {
- pseudoflavor = flavor->flavor;
- break;
- } else if (flavor->flavor == RPC_AUTH_GSS) {
- oid.len = flavor->gss.sec_oid4.len;
- oid.data = flavor->gss.sec_oid4.data;
- mech = gss_mech_get_by_OID(&oid);
- if (!mech)
- continue;
- pseudoflavor = gss_svc_to_pseudoflavor(mech, flavor->gss.service);
- gss_mech_put(mech);
- break;
- }
- }
-
- return pseudoflavor;
-}
-
-static struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
- struct qstr *name,
- struct nfs_fh *fh,
- struct nfs_fattr *fattr)
-{
- int err;
-
- if (NFS_PROTO(dir)->version == 4)
- return nfs4_proc_lookup_mountpoint(dir, name, fh, fattr);
-
- err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr);
- if (err)
- return ERR_PTR(err);
- return rpc_clone_client(NFS_SERVER(dir)->client);
-}
-#else /* CONFIG_NFS_V4 */
-static inline struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
- struct qstr *name,
- struct nfs_fh *fh,
- struct nfs_fattr *fattr)
-{
- int err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr);
- if (err)
- return ERR_PTR(err);
- return rpc_clone_client(NFS_SERVER(dir)->client);
-}
-#endif /* CONFIG_NFS_V4 */
-
/*
* nfs_d_automount - Handle crossing a mountpoint on the server
* @path - The mountpoint
@@ -191,10 +128,9 @@ static inline struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
struct vfsmount *nfs_d_automount(struct path *path)
{
struct vfsmount *mnt;
- struct dentry *parent;
+ struct nfs_server *server = NFS_SERVER(path->dentry->d_inode);
struct nfs_fh *fh = NULL;
struct nfs_fattr *fattr = NULL;
- struct rpc_clnt *client;
dprintk("--> nfs_d_automount()\n");
@@ -210,21 +146,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
dprintk("%s: enter\n", __func__);
- /* Look it up again to get its attributes */
- parent = dget_parent(path->dentry);
- client = nfs_lookup_mountpoint(parent->d_inode, &path->dentry->d_name, fh, fattr);
- dput(parent);
- if (IS_ERR(client)) {
- mnt = ERR_CAST(client);
- goto out;
- }
-
- if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
- mnt = nfs_do_refmount(client, path->dentry);
- else
- mnt = nfs_do_submount(path->dentry, fh, fattr, client->cl_auth->au_flavor);
- rpc_shutdown_client(client);
-
+ mnt = server->nfs_client->rpc_ops->submount(server, path->dentry, fh, fattr);
if (IS_ERR(mnt))
goto out;
@@ -297,10 +219,8 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
* @authflavor - security flavor to use when performing the mount
*
*/
-static struct vfsmount *nfs_do_submount(struct dentry *dentry,
- struct nfs_fh *fh,
- struct nfs_fattr *fattr,
- rpc_authflavor_t authflavor)
+struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
+ struct nfs_fattr *fattr, rpc_authflavor_t authflavor)
{
struct nfs_clone_mount mountdata = {
.sb = dentry->d_sb,
@@ -333,3 +253,18 @@ out:
dprintk("<-- nfs_do_submount() = %p\n", mnt);
return mnt;
}
+
+struct vfsmount *nfs_submount(struct nfs_server *server, struct dentry *dentry,
+ struct nfs_fh *fh, struct nfs_fattr *fattr)
+{
+ int err;
+ struct dentry *parent = dget_parent(dentry);
+
+ /* Look it up again to get its attributes */
+ err = server->nfs_client->rpc_ops->lookup(parent->d_inode, &dentry->d_name, fh, fattr);
+ dput(parent);
+ if (err != 0)
+ return ERR_PTR(err);
+
+ return nfs_do_submount(dentry, fh, fattr, server->client->cl_auth->au_flavor);
+}
diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h
index aa14ec303e94..8a6394edb8b0 100644
--- a/fs/nfs/netns.h
+++ b/fs/nfs/netns.h
@@ -1,3 +1,7 @@
+/*
+ * NFS-private data for each "struct net". Accessed with net_generic().
+ */
+
#ifndef __NFS_NETNS_H__
#define __NFS_NETNS_H__
@@ -20,6 +24,7 @@ struct nfs_net {
struct idr cb_ident_idr; /* Protected by nfs_client_lock */
#endif
spinlock_t nfs_client_lock;
+ struct timespec boot_time;
};
extern int nfs_net_id;
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 1f56000fabbd..baf759bccd05 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -61,6 +61,7 @@
#define NFS_readdirres_sz (1)
#define NFS_statfsres_sz (1+NFS_info_sz)
+static int nfs_stat_to_errno(enum nfs_stat);
/*
* While encoding arguments, set up the reply buffer in advance to
@@ -313,6 +314,8 @@ static int decode_fattr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
p = xdr_decode_time(p, &fattr->atime);
p = xdr_decode_time(p, &fattr->mtime);
xdr_decode_time(p, &fattr->ctime);
+ fattr->change_attr = nfs_timespec_to_change_attr(&fattr->ctime);
+
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
@@ -1109,7 +1112,7 @@ static const struct {
* Returns a local errno value, or -EIO if the NFS status code is
* not recognized. This function is used jointly by NFSv2 and NFSv3.
*/
-int nfs_stat_to_errno(enum nfs_stat status)
+static int nfs_stat_to_errno(enum nfs_stat status)
{
int i;
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 75c68299358e..2292a0fd2bff 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -142,7 +142,7 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int
-nfs3_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+nfs3_proc_lookup(struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs3_diropargs arg = {
@@ -810,11 +810,13 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
static int nfs3_read_done(struct rpc_task *task, struct nfs_read_data *data)
{
- if (nfs3_async_handle_jukebox(task, data->inode))
+ struct inode *inode = data->header->inode;
+
+ if (nfs3_async_handle_jukebox(task, inode))
return -EAGAIN;
- nfs_invalidate_atime(data->inode);
- nfs_refresh_inode(data->inode, &data->fattr);
+ nfs_invalidate_atime(inode);
+ nfs_refresh_inode(inode, &data->fattr);
return 0;
}
@@ -830,10 +832,12 @@ static void nfs3_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_da
static int nfs3_write_done(struct rpc_task *task, struct nfs_write_data *data)
{
- if (nfs3_async_handle_jukebox(task, data->inode))
+ struct inode *inode = data->header->inode;
+
+ if (nfs3_async_handle_jukebox(task, inode))
return -EAGAIN;
if (task->tk_status >= 0)
- nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr);
+ nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
return 0;
}
@@ -847,7 +851,12 @@ static void nfs3_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_
rpc_call_start(task);
}
-static int nfs3_commit_done(struct rpc_task *task, struct nfs_write_data *data)
+static void nfs3_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
+{
+ rpc_call_start(task);
+}
+
+static int nfs3_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
{
if (nfs3_async_handle_jukebox(task, data->inode))
return -EAGAIN;
@@ -855,7 +864,7 @@ static int nfs3_commit_done(struct rpc_task *task, struct nfs_write_data *data)
return 0;
}
-static void nfs3_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+static void nfs3_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
{
msg->rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT];
}
@@ -875,6 +884,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
.file_inode_ops = &nfs3_file_inode_operations,
.file_ops = &nfs_file_operations,
.getroot = nfs3_proc_get_root,
+ .submount = nfs_submount,
.getattr = nfs3_proc_getattr,
.setattr = nfs3_proc_setattr,
.lookup = nfs3_proc_lookup,
@@ -906,6 +916,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
.write_rpc_prepare = nfs3_proc_write_rpc_prepare,
.write_done = nfs3_write_done,
.commit_setup = nfs3_proc_commit_setup,
+ .commit_rpc_prepare = nfs3_proc_commit_rpc_prepare,
.commit_done = nfs3_commit_done,
.lock = nfs3_proc_lock,
.clear_acl_cache = nfs3_forget_cached_acls,
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index a77cc9a3ce55..902de489ec9b 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -86,6 +86,8 @@
XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE))
#define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz)
+static int nfs3_stat_to_errno(enum nfs_stat);
+
/*
* Map file type to S_IFMT bits
*/
@@ -675,6 +677,7 @@ static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr)
p = xdr_decode_nfstime3(p, &fattr->atime);
p = xdr_decode_nfstime3(p, &fattr->mtime);
xdr_decode_nfstime3(p, &fattr->ctime);
+ fattr->change_attr = nfs_timespec_to_change_attr(&fattr->ctime);
fattr->valid |= NFS_ATTR_FATTR_V3;
return 0;
@@ -725,12 +728,14 @@ static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
goto out_overflow;
fattr->valid |= NFS_ATTR_FATTR_PRESIZE
+ | NFS_ATTR_FATTR_PRECHANGE
| NFS_ATTR_FATTR_PREMTIME
| NFS_ATTR_FATTR_PRECTIME;
p = xdr_decode_size3(p, &fattr->pre_size);
p = xdr_decode_nfstime3(p, &fattr->pre_mtime);
xdr_decode_nfstime3(p, &fattr->pre_ctime);
+ fattr->pre_change_attr = nfs_timespec_to_change_attr(&fattr->pre_ctime);
return 0;
out_overflow:
@@ -1287,7 +1292,7 @@ static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
* };
*/
static void encode_commit3args(struct xdr_stream *xdr,
- const struct nfs_writeargs *args)
+ const struct nfs_commitargs *args)
{
__be32 *p;
@@ -1300,7 +1305,7 @@ static void encode_commit3args(struct xdr_stream *xdr,
static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_writeargs *args)
+ const struct nfs_commitargs *args)
{
encode_commit3args(xdr, args);
}
@@ -1385,7 +1390,7 @@ static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1424,7 +1429,7 @@ static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1472,7 +1477,7 @@ out_default:
error = decode_post_op_attr(xdr, result->dir_attr);
if (unlikely(error))
goto out;
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1513,7 +1518,7 @@ static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1554,7 +1559,7 @@ static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1636,7 +1641,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1706,7 +1711,7 @@ static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1770,7 +1775,7 @@ out_default:
error = decode_wcc_data(xdr, result->dir_attr);
if (unlikely(error))
goto out;
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1809,7 +1814,7 @@ static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1853,7 +1858,7 @@ static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1896,7 +1901,7 @@ static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/**
@@ -2088,7 +2093,7 @@ out_default:
error = decode_post_op_attr(xdr, result->dir_attr);
if (unlikely(error))
goto out;
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -2156,7 +2161,7 @@ static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -2232,7 +2237,7 @@ static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -2295,7 +2300,7 @@ static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -2319,7 +2324,7 @@ out_status:
*/
static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_writeres *result)
+ struct nfs_commitres *result)
{
enum nfs_stat status;
int error;
@@ -2336,7 +2341,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
#ifdef CONFIG_NFS_V3_ACL
@@ -2401,7 +2406,7 @@ static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
@@ -2420,11 +2425,76 @@ static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
#endif /* CONFIG_NFS_V3_ACL */
+
+/*
+ * We need to translate between nfs status return values and
+ * the local errno values which may not be the same.
+ */
+static const struct {
+ int stat;
+ int errno;
+} nfs_errtbl[] = {
+ { NFS_OK, 0 },
+ { NFSERR_PERM, -EPERM },
+ { NFSERR_NOENT, -ENOENT },
+ { NFSERR_IO, -errno_NFSERR_IO},
+ { NFSERR_NXIO, -ENXIO },
+/* { NFSERR_EAGAIN, -EAGAIN }, */
+ { NFSERR_ACCES, -EACCES },
+ { NFSERR_EXIST, -EEXIST },
+ { NFSERR_XDEV, -EXDEV },
+ { NFSERR_NODEV, -ENODEV },
+ { NFSERR_NOTDIR, -ENOTDIR },
+ { NFSERR_ISDIR, -EISDIR },
+ { NFSERR_INVAL, -EINVAL },
+ { NFSERR_FBIG, -EFBIG },
+ { NFSERR_NOSPC, -ENOSPC },
+ { NFSERR_ROFS, -EROFS },
+ { NFSERR_MLINK, -EMLINK },
+ { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
+ { NFSERR_NOTEMPTY, -ENOTEMPTY },
+ { NFSERR_DQUOT, -EDQUOT },
+ { NFSERR_STALE, -ESTALE },
+ { NFSERR_REMOTE, -EREMOTE },
+#ifdef EWFLUSH
+ { NFSERR_WFLUSH, -EWFLUSH },
+#endif
+ { NFSERR_BADHANDLE, -EBADHANDLE },
+ { NFSERR_NOT_SYNC, -ENOTSYNC },
+ { NFSERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFSERR_NOTSUPP, -ENOTSUPP },
+ { NFSERR_TOOSMALL, -ETOOSMALL },
+ { NFSERR_SERVERFAULT, -EREMOTEIO },
+ { NFSERR_BADTYPE, -EBADTYPE },
+ { NFSERR_JUKEBOX, -EJUKEBOX },
+ { -1, -EIO }
+};
+
+/**
+ * nfs3_stat_to_errno - convert an NFS status code to a local errno
+ * @status: NFS status code to convert
+ *
+ * Returns a local errno value, or -EIO if the NFS status code is
+ * not recognized. This function is used jointly by NFSv2 and NFSv3.
+ */
+static int nfs3_stat_to_errno(enum nfs_stat status)
+{
+ int i;
+
+ for (i = 0; nfs_errtbl[i].stat != -1; i++) {
+ if (nfs_errtbl[i].stat == (int)status)
+ return nfs_errtbl[i].errno;
+ }
+ dprintk("NFS: Unrecognized nfs status value: %u\n", status);
+ return nfs_errtbl[i].errno;
+}
+
+
#define PROC(proc, argtype, restype, timer) \
[NFS3PROC_##proc] = { \
.p_proc = NFS3PROC_##proc, \
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 8d75021020b3..c6827f93ab57 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -24,6 +24,8 @@ enum nfs4_client_state {
NFS4CLNT_RECALL_SLOT,
NFS4CLNT_LEASE_CONFIRM,
NFS4CLNT_SERVER_SCOPE_MISMATCH,
+ NFS4CLNT_PURGE_STATE,
+ NFS4CLNT_BIND_CONN_TO_SESSION,
};
enum nfs4_session_state {
@@ -52,11 +54,6 @@ struct nfs4_minor_version_ops {
const struct nfs4_state_maintenance_ops *state_renewal_ops;
};
-struct nfs_unique_id {
- struct rb_node rb_node;
- __u64 id;
-};
-
#define NFS_SEQID_CONFIRMED 1
struct nfs_seqid_counter {
ktime_t create_time;
@@ -206,12 +203,18 @@ extern const struct dentry_operations nfs4_dentry_operations;
extern const struct inode_operations nfs4_dir_inode_operations;
/* nfs4namespace.c */
+rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *);
+struct vfsmount *nfs4_submount(struct nfs_server *, struct dentry *,
+ struct nfs_fh *, struct nfs_fattr *);
/* nfs4proc.c */
extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
+extern int nfs4_proc_get_rootfh(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
+extern int nfs4_proc_bind_conn_to_session(struct nfs_client *, struct rpc_cred *cred);
extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
+extern int nfs4_destroy_clientid(struct nfs_client *clp);
extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
@@ -239,8 +242,8 @@ extern int nfs41_setup_sequence(struct nfs4_session *session,
struct rpc_task *task);
extern void nfs4_destroy_session(struct nfs4_session *session);
extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
-extern int nfs4_proc_create_session(struct nfs_client *);
-extern int nfs4_proc_destroy_session(struct nfs4_session *);
+extern int nfs4_proc_create_session(struct nfs_client *, struct rpc_cred *);
+extern int nfs4_proc_destroy_session(struct nfs4_session *, struct rpc_cred *);
extern int nfs4_init_session(struct nfs_server *server);
extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
struct nfs_fsinfo *fsinfo);
@@ -310,9 +313,9 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
#if defined(CONFIG_NFS_V4_1)
struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
-extern void nfs4_schedule_session_recovery(struct nfs4_session *);
+extern void nfs4_schedule_session_recovery(struct nfs4_session *, int);
#else
-static inline void nfs4_schedule_session_recovery(struct nfs4_session *session)
+static inline void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
{
}
#endif /* CONFIG_NFS_V4_1 */
@@ -334,7 +337,7 @@ extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs
extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
extern void nfs41_handle_recall_slot(struct nfs_client *clp);
extern void nfs41_handle_server_scope(struct nfs_client *,
- struct server_scope **);
+ struct nfs41_server_scope **);
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
extern void nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *,
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 5acfd9ea8a31..e1340293872c 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -82,29 +82,76 @@ filelayout_get_dserver_offset(struct pnfs_layout_segment *lseg, loff_t offset)
BUG();
}
+static void filelayout_reset_write(struct nfs_write_data *data)
+{
+ struct nfs_pgio_header *hdr = data->header;
+ struct rpc_task *task = &data->task;
+
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ dprintk("%s Reset task %5u for i/o through MDS "
+ "(req %s/%lld, %u bytes @ offset %llu)\n", __func__,
+ data->task.tk_pid,
+ hdr->inode->i_sb->s_id,
+ (long long)NFS_FILEID(hdr->inode),
+ data->args.count,
+ (unsigned long long)data->args.offset);
+
+ task->tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
+ &hdr->pages,
+ hdr->completion_ops);
+ }
+}
+
+static void filelayout_reset_read(struct nfs_read_data *data)
+{
+ struct nfs_pgio_header *hdr = data->header;
+ struct rpc_task *task = &data->task;
+
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ dprintk("%s Reset task %5u for i/o through MDS "
+ "(req %s/%lld, %u bytes @ offset %llu)\n", __func__,
+ data->task.tk_pid,
+ hdr->inode->i_sb->s_id,
+ (long long)NFS_FILEID(hdr->inode),
+ data->args.count,
+ (unsigned long long)data->args.offset);
+
+ task->tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
+ &hdr->pages,
+ hdr->completion_ops);
+ }
+}
+
static int filelayout_async_handle_error(struct rpc_task *task,
struct nfs4_state *state,
struct nfs_client *clp,
- int *reset)
+ struct pnfs_layout_segment *lseg)
{
- struct nfs_server *mds_server = NFS_SERVER(state->inode);
+ struct inode *inode = lseg->pls_layout->plh_inode;
+ struct nfs_server *mds_server = NFS_SERVER(inode);
+ struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
struct nfs_client *mds_client = mds_server->nfs_client;
+ struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
if (task->tk_status >= 0)
return 0;
- *reset = 0;
switch (task->tk_status) {
/* MDS state errors */
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
+ if (state == NULL)
+ break;
nfs_remove_bad_delegation(state->inode);
case -NFS4ERR_OPENMODE:
+ if (state == NULL)
+ break;
nfs4_schedule_stateid_recovery(mds_server, state);
goto wait_on_recovery;
case -NFS4ERR_EXPIRED:
- nfs4_schedule_stateid_recovery(mds_server, state);
+ if (state != NULL)
+ nfs4_schedule_stateid_recovery(mds_server, state);
nfs4_schedule_lease_recovery(mds_client);
goto wait_on_recovery;
/* DS session errors */
@@ -118,7 +165,7 @@ static int filelayout_async_handle_error(struct rpc_task *task,
dprintk("%s ERROR %d, Reset session. Exchangeid "
"flags 0x%x\n", __func__, task->tk_status,
clp->cl_exchange_flags);
- nfs4_schedule_session_recovery(clp->cl_session);
+ nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
break;
case -NFS4ERR_DELAY:
case -NFS4ERR_GRACE:
@@ -127,11 +174,48 @@ static int filelayout_async_handle_error(struct rpc_task *task,
break;
case -NFS4ERR_RETRY_UNCACHED_REP:
break;
+ /* Invalidate Layout errors */
+ case -NFS4ERR_PNFS_NO_LAYOUT:
+ case -ESTALE: /* mapped NFS4ERR_STALE */
+ case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
+ case -EISDIR: /* mapped NFS4ERR_ISDIR */
+ case -NFS4ERR_FHEXPIRED:
+ case -NFS4ERR_WRONG_TYPE:
+ dprintk("%s Invalid layout error %d\n", __func__,
+ task->tk_status);
+ /*
+ * Destroy layout so new i/o will get a new layout.
+ * Layout will not be destroyed until all current lseg
+ * references are put. Mark layout as invalid to resend failed
+ * i/o and all i/o waiting on the slot table to the MDS until
+ * layout is destroyed and a new valid layout is obtained.
+ */
+ set_bit(NFS_LAYOUT_INVALID,
+ &NFS_I(inode)->layout->plh_flags);
+ pnfs_destroy_layout(NFS_I(inode));
+ rpc_wake_up(&tbl->slot_tbl_waitq);
+ goto reset;
+ /* RPC connection errors */
+ case -ECONNREFUSED:
+ case -EHOSTDOWN:
+ case -EHOSTUNREACH:
+ case -ENETUNREACH:
+ case -EIO:
+ case -ETIMEDOUT:
+ case -EPIPE:
+ dprintk("%s DS connection error %d\n", __func__,
+ task->tk_status);
+ if (!filelayout_test_devid_invalid(devid))
+ _pnfs_return_layout(inode);
+ filelayout_mark_devid_invalid(devid);
+ rpc_wake_up(&tbl->slot_tbl_waitq);
+ nfs4_ds_disconnect(clp);
+ /* fall through */
default:
- dprintk("%s DS error. Retry through MDS %d\n", __func__,
+reset:
+ dprintk("%s Retry through MDS. Error %d\n", __func__,
task->tk_status);
- *reset = 1;
- break;
+ return -NFS4ERR_RESET_TO_MDS;
}
out:
task->tk_status = 0;
@@ -148,18 +232,17 @@ wait_on_recovery:
static int filelayout_read_done_cb(struct rpc_task *task,
struct nfs_read_data *data)
{
- int reset = 0;
+ struct nfs_pgio_header *hdr = data->header;
+ int err;
- dprintk("%s DS read\n", __func__);
+ err = filelayout_async_handle_error(task, data->args.context->state,
+ data->ds_clp, hdr->lseg);
- if (filelayout_async_handle_error(task, data->args.context->state,
- data->ds_clp, &reset) == -EAGAIN) {
- dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
- __func__, data->ds_clp, data->ds_clp->cl_session);
- if (reset) {
- pnfs_set_lo_fail(data->lseg);
- nfs4_reset_read(task, data);
- }
+ switch (err) {
+ case -NFS4ERR_RESET_TO_MDS:
+ filelayout_reset_read(data);
+ return task->tk_status;
+ case -EAGAIN:
rpc_restart_call_prepare(task);
return -EAGAIN;
}
@@ -175,13 +258,15 @@ static int filelayout_read_done_cb(struct rpc_task *task,
static void
filelayout_set_layoutcommit(struct nfs_write_data *wdata)
{
- if (FILELAYOUT_LSEG(wdata->lseg)->commit_through_mds ||
+ struct nfs_pgio_header *hdr = wdata->header;
+
+ if (FILELAYOUT_LSEG(hdr->lseg)->commit_through_mds ||
wdata->res.verf->committed == NFS_FILE_SYNC)
return;
pnfs_set_layoutcommit(wdata);
- dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, wdata->inode->i_ino,
- (unsigned long) NFS_I(wdata->inode)->layout->plh_lwb);
+ dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
+ (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
}
/*
@@ -191,8 +276,14 @@ filelayout_set_layoutcommit(struct nfs_write_data *wdata)
*/
static void filelayout_read_prepare(struct rpc_task *task, void *data)
{
- struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+ struct nfs_read_data *rdata = data;
+ if (filelayout_reset_to_mds(rdata->header->lseg)) {
+ dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
+ filelayout_reset_read(rdata);
+ rpc_exit(task, 0);
+ return;
+ }
rdata->read_done_cb = filelayout_read_done_cb;
if (nfs41_setup_sequence(rdata->ds_clp->cl_session,
@@ -205,42 +296,47 @@ static void filelayout_read_prepare(struct rpc_task *task, void *data)
static void filelayout_read_call_done(struct rpc_task *task, void *data)
{
- struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+ struct nfs_read_data *rdata = data;
dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
+ if (test_bit(NFS_IOHDR_REDO, &rdata->header->flags) &&
+ task->tk_status == 0)
+ return;
+
/* Note this may cause RPC to be resent */
- rdata->mds_ops->rpc_call_done(task, data);
+ rdata->header->mds_ops->rpc_call_done(task, data);
}
static void filelayout_read_count_stats(struct rpc_task *task, void *data)
{
- struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+ struct nfs_read_data *rdata = data;
- rpc_count_iostats(task, NFS_SERVER(rdata->inode)->client->cl_metrics);
+ rpc_count_iostats(task, NFS_SERVER(rdata->header->inode)->client->cl_metrics);
}
static void filelayout_read_release(void *data)
{
- struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+ struct nfs_read_data *rdata = data;
- put_lseg(rdata->lseg);
- rdata->mds_ops->rpc_release(data);
+ nfs_put_client(rdata->ds_clp);
+ rdata->header->mds_ops->rpc_release(data);
}
static int filelayout_write_done_cb(struct rpc_task *task,
struct nfs_write_data *data)
{
- int reset = 0;
-
- if (filelayout_async_handle_error(task, data->args.context->state,
- data->ds_clp, &reset) == -EAGAIN) {
- dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
- __func__, data->ds_clp, data->ds_clp->cl_session);
- if (reset) {
- pnfs_set_lo_fail(data->lseg);
- nfs4_reset_write(task, data);
- }
+ struct nfs_pgio_header *hdr = data->header;
+ int err;
+
+ err = filelayout_async_handle_error(task, data->args.context->state,
+ data->ds_clp, hdr->lseg);
+
+ switch (err) {
+ case -NFS4ERR_RESET_TO_MDS:
+ filelayout_reset_write(data);
+ return task->tk_status;
+ case -EAGAIN:
rpc_restart_call_prepare(task);
return -EAGAIN;
}
@@ -250,7 +346,7 @@ static int filelayout_write_done_cb(struct rpc_task *task,
}
/* Fake up some data that will cause nfs_commit_release to retry the writes. */
-static void prepare_to_resend_writes(struct nfs_write_data *data)
+static void prepare_to_resend_writes(struct nfs_commit_data *data)
{
struct nfs_page *first = nfs_list_entry(data->pages.next);
@@ -261,19 +357,19 @@ static void prepare_to_resend_writes(struct nfs_write_data *data)
}
static int filelayout_commit_done_cb(struct rpc_task *task,
- struct nfs_write_data *data)
+ struct nfs_commit_data *data)
{
- int reset = 0;
-
- if (filelayout_async_handle_error(task, data->args.context->state,
- data->ds_clp, &reset) == -EAGAIN) {
- dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
- __func__, data->ds_clp, data->ds_clp->cl_session);
- if (reset) {
- prepare_to_resend_writes(data);
- pnfs_set_lo_fail(data->lseg);
- } else
- rpc_restart_call_prepare(task);
+ int err;
+
+ err = filelayout_async_handle_error(task, NULL, data->ds_clp,
+ data->lseg);
+
+ switch (err) {
+ case -NFS4ERR_RESET_TO_MDS:
+ prepare_to_resend_writes(data);
+ return -EAGAIN;
+ case -EAGAIN:
+ rpc_restart_call_prepare(task);
return -EAGAIN;
}
@@ -282,8 +378,14 @@ static int filelayout_commit_done_cb(struct rpc_task *task,
static void filelayout_write_prepare(struct rpc_task *task, void *data)
{
- struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+ struct nfs_write_data *wdata = data;
+ if (filelayout_reset_to_mds(wdata->header->lseg)) {
+ dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
+ filelayout_reset_write(wdata);
+ rpc_exit(task, 0);
+ return;
+ }
if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
&wdata->args.seq_args, &wdata->res.seq_res,
task))
@@ -294,36 +396,66 @@ static void filelayout_write_prepare(struct rpc_task *task, void *data)
static void filelayout_write_call_done(struct rpc_task *task, void *data)
{
- struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+ struct nfs_write_data *wdata = data;
+
+ if (test_bit(NFS_IOHDR_REDO, &wdata->header->flags) &&
+ task->tk_status == 0)
+ return;
/* Note this may cause RPC to be resent */
- wdata->mds_ops->rpc_call_done(task, data);
+ wdata->header->mds_ops->rpc_call_done(task, data);
}
static void filelayout_write_count_stats(struct rpc_task *task, void *data)
{
- struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+ struct nfs_write_data *wdata = data;
- rpc_count_iostats(task, NFS_SERVER(wdata->inode)->client->cl_metrics);
+ rpc_count_iostats(task, NFS_SERVER(wdata->header->inode)->client->cl_metrics);
}
static void filelayout_write_release(void *data)
{
- struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+ struct nfs_write_data *wdata = data;
+
+ nfs_put_client(wdata->ds_clp);
+ wdata->header->mds_ops->rpc_release(data);
+}
+
+static void filelayout_commit_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs_commit_data *wdata = data;
- put_lseg(wdata->lseg);
- wdata->mds_ops->rpc_release(data);
+ if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
+ &wdata->args.seq_args, &wdata->res.seq_res,
+ task))
+ return;
+
+ rpc_call_start(task);
+}
+
+static void filelayout_write_commit_done(struct rpc_task *task, void *data)
+{
+ struct nfs_commit_data *wdata = data;
+
+ /* Note this may cause RPC to be resent */
+ wdata->mds_ops->rpc_call_done(task, data);
+}
+
+static void filelayout_commit_count_stats(struct rpc_task *task, void *data)
+{
+ struct nfs_commit_data *cdata = data;
+
+ rpc_count_iostats(task, NFS_SERVER(cdata->inode)->client->cl_metrics);
}
-static void filelayout_commit_release(void *data)
+static void filelayout_commit_release(void *calldata)
{
- struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+ struct nfs_commit_data *data = calldata;
- nfs_commit_release_pages(wdata);
- if (atomic_dec_and_test(&NFS_I(wdata->inode)->commits_outstanding))
- nfs_commit_clear_lock(NFS_I(wdata->inode));
- put_lseg(wdata->lseg);
- nfs_commitdata_release(wdata);
+ data->completion_ops->completion(data);
+ put_lseg(data->lseg);
+ nfs_put_client(data->ds_clp);
+ nfs_commitdata_release(data);
}
static const struct rpc_call_ops filelayout_read_call_ops = {
@@ -341,16 +473,17 @@ static const struct rpc_call_ops filelayout_write_call_ops = {
};
static const struct rpc_call_ops filelayout_commit_call_ops = {
- .rpc_call_prepare = filelayout_write_prepare,
- .rpc_call_done = filelayout_write_call_done,
- .rpc_count_stats = filelayout_write_count_stats,
+ .rpc_call_prepare = filelayout_commit_prepare,
+ .rpc_call_done = filelayout_write_commit_done,
+ .rpc_count_stats = filelayout_commit_count_stats,
.rpc_release = filelayout_commit_release,
};
static enum pnfs_try_status
filelayout_read_pagelist(struct nfs_read_data *data)
{
- struct pnfs_layout_segment *lseg = data->lseg;
+ struct nfs_pgio_header *hdr = data->header;
+ struct pnfs_layout_segment *lseg = hdr->lseg;
struct nfs4_pnfs_ds *ds;
loff_t offset = data->args.offset;
u32 j, idx;
@@ -358,25 +491,20 @@ filelayout_read_pagelist(struct nfs_read_data *data)
int status;
dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
- __func__, data->inode->i_ino,
+ __func__, hdr->inode->i_ino,
data->args.pgbase, (size_t)data->args.count, offset);
- if (test_bit(NFS_DEVICEID_INVALID, &FILELAYOUT_DEVID_NODE(lseg)->flags))
- return PNFS_NOT_ATTEMPTED;
-
/* Retrieve the correct rpc_client for the byte range */
j = nfs4_fl_calc_j_index(lseg, offset);
idx = nfs4_fl_calc_ds_index(lseg, j);
ds = nfs4_fl_prepare_ds(lseg, idx);
- if (!ds) {
- /* Either layout fh index faulty, or ds connect failed */
- set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
- set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+ if (!ds)
return PNFS_NOT_ATTEMPTED;
- }
- dprintk("%s USE DS: %s\n", __func__, ds->ds_remotestr);
+ dprintk("%s USE DS: %s cl_count %d\n", __func__,
+ ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
/* No multipath support. Use first DS */
+ atomic_inc(&ds->ds_clp->cl_count);
data->ds_clp = ds->ds_clp;
fh = nfs4_fl_select_ds_fh(lseg, j);
if (fh)
@@ -386,8 +514,8 @@ filelayout_read_pagelist(struct nfs_read_data *data)
data->mds_offset = offset;
/* Perform an asynchronous read to ds */
- status = nfs_initiate_read(data, ds->ds_clp->cl_rpcclient,
- &filelayout_read_call_ops);
+ status = nfs_initiate_read(ds->ds_clp->cl_rpcclient, data,
+ &filelayout_read_call_ops, RPC_TASK_SOFTCONN);
BUG_ON(status != 0);
return PNFS_ATTEMPTED;
}
@@ -396,32 +524,26 @@ filelayout_read_pagelist(struct nfs_read_data *data)
static enum pnfs_try_status
filelayout_write_pagelist(struct nfs_write_data *data, int sync)
{
- struct pnfs_layout_segment *lseg = data->lseg;
+ struct nfs_pgio_header *hdr = data->header;
+ struct pnfs_layout_segment *lseg = hdr->lseg;
struct nfs4_pnfs_ds *ds;
loff_t offset = data->args.offset;
u32 j, idx;
struct nfs_fh *fh;
int status;
- if (test_bit(NFS_DEVICEID_INVALID, &FILELAYOUT_DEVID_NODE(lseg)->flags))
- return PNFS_NOT_ATTEMPTED;
-
/* Retrieve the correct rpc_client for the byte range */
j = nfs4_fl_calc_j_index(lseg, offset);
idx = nfs4_fl_calc_ds_index(lseg, j);
ds = nfs4_fl_prepare_ds(lseg, idx);
- if (!ds) {
- printk(KERN_ERR "NFS: %s: prepare_ds failed, use MDS\n",
- __func__);
- set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
- set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+ if (!ds)
return PNFS_NOT_ATTEMPTED;
- }
- dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s\n", __func__,
- data->inode->i_ino, sync, (size_t) data->args.count, offset,
- ds->ds_remotestr);
+ dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d\n",
+ __func__, hdr->inode->i_ino, sync, (size_t) data->args.count,
+ offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
data->write_done_cb = filelayout_write_done_cb;
+ atomic_inc(&ds->ds_clp->cl_count);
data->ds_clp = ds->ds_clp;
fh = nfs4_fl_select_ds_fh(lseg, j);
if (fh)
@@ -433,8 +555,9 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync)
data->args.offset = filelayout_get_dserver_offset(lseg, offset);
/* Perform an asynchronous write */
- status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient,
- &filelayout_write_call_ops, sync);
+ status = nfs_initiate_write(ds->ds_clp->cl_rpcclient, data,
+ &filelayout_write_call_ops, sync,
+ RPC_TASK_SOFTCONN);
BUG_ON(status != 0);
return PNFS_ATTEMPTED;
}
@@ -650,10 +773,65 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg)
dprintk("--> %s\n", __func__);
nfs4_fl_put_deviceid(fl->dsaddr);
- kfree(fl->commit_buckets);
+ /* This assumes a single RW lseg */
+ if (lseg->pls_range.iomode == IOMODE_RW) {
+ struct nfs4_filelayout *flo;
+
+ flo = FILELAYOUT_FROM_HDR(lseg->pls_layout);
+ flo->commit_info.nbuckets = 0;
+ kfree(flo->commit_info.buckets);
+ flo->commit_info.buckets = NULL;
+ }
_filelayout_free_lseg(fl);
}
+static int
+filelayout_alloc_commit_info(struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo,
+ gfp_t gfp_flags)
+{
+ struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
+ struct pnfs_commit_bucket *buckets;
+ int size;
+
+ if (fl->commit_through_mds)
+ return 0;
+ if (cinfo->ds->nbuckets != 0) {
+ /* This assumes there is only one IOMODE_RW lseg. What
+ * we really want to do is have a layout_hdr level
+ * dictionary of <multipath_list4, fh> keys, each
+ * associated with a struct list_head, populated by calls
+ * to filelayout_write_pagelist().
+ * */
+ return 0;
+ }
+
+ size = (fl->stripe_type == STRIPE_SPARSE) ?
+ fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
+
+ buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
+ gfp_flags);
+ if (!buckets)
+ return -ENOMEM;
+ else {
+ int i;
+
+ spin_lock(cinfo->lock);
+ if (cinfo->ds->nbuckets != 0)
+ kfree(buckets);
+ else {
+ cinfo->ds->buckets = buckets;
+ cinfo->ds->nbuckets = size;
+ for (i = 0; i < size; i++) {
+ INIT_LIST_HEAD(&buckets[i].written);
+ INIT_LIST_HEAD(&buckets[i].committing);
+ }
+ }
+ spin_unlock(cinfo->lock);
+ return 0;
+ }
+}
+
static struct pnfs_layout_segment *
filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
struct nfs4_layoutget_res *lgr,
@@ -673,29 +851,6 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
_filelayout_free_lseg(fl);
return NULL;
}
-
- /* This assumes there is only one IOMODE_RW lseg. What
- * we really want to do is have a layout_hdr level
- * dictionary of <multipath_list4, fh> keys, each
- * associated with a struct list_head, populated by calls
- * to filelayout_write_pagelist().
- * */
- if ((!fl->commit_through_mds) && (lgr->range.iomode == IOMODE_RW)) {
- int i;
- int size = (fl->stripe_type == STRIPE_SPARSE) ?
- fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
-
- fl->commit_buckets = kcalloc(size, sizeof(struct nfs4_fl_commit_bucket), gfp_flags);
- if (!fl->commit_buckets) {
- filelayout_free_lseg(&fl->generic_hdr);
- return NULL;
- }
- fl->number_of_buckets = size;
- for (i = 0; i < size; i++) {
- INIT_LIST_HEAD(&fl->commit_buckets[i].written);
- INIT_LIST_HEAD(&fl->commit_buckets[i].committing);
- }
- }
return &fl->generic_hdr;
}
@@ -716,8 +871,8 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
!nfs_generic_pg_test(pgio, prev, req))
return false;
- p_stripe = (u64)prev->wb_index << PAGE_CACHE_SHIFT;
- r_stripe = (u64)req->wb_index << PAGE_CACHE_SHIFT;
+ p_stripe = (u64)req_offset(prev);
+ r_stripe = (u64)req_offset(req);
stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
do_div(p_stripe, stripe_unit);
@@ -732,6 +887,16 @@ filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
{
BUG_ON(pgio->pg_lseg != NULL);
+ if (req->wb_offset != req->wb_pgbase) {
+ /*
+ * Handling unaligned pages is difficult, because have to
+ * somehow split a req in two in certain cases in the
+ * pg.test code. Avoid this by just not using pnfs
+ * in this case.
+ */
+ nfs_pageio_reset_read_mds(pgio);
+ return;
+ }
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
0,
@@ -747,8 +912,13 @@ static void
filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
+ struct nfs_commit_info cinfo;
+ int status;
+
BUG_ON(pgio->pg_lseg != NULL);
+ if (req->wb_offset != req->wb_pgbase)
+ goto out_mds;
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
0,
@@ -757,7 +927,17 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
GFP_NOFS);
/* If no lseg, fall back to write through mds */
if (pgio->pg_lseg == NULL)
- nfs_pageio_reset_write_mds(pgio);
+ goto out_mds;
+ nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
+ status = filelayout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
+ if (status < 0) {
+ put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ goto out_mds;
+ }
+ return;
+out_mds:
+ nfs_pageio_reset_write_mds(pgio);
}
static const struct nfs_pageio_ops filelayout_pg_read_ops = {
@@ -784,43 +964,42 @@ static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j)
* If this will make the bucket empty, it will need to put the lseg reference.
*/
static void
-filelayout_clear_request_commit(struct nfs_page *req)
+filelayout_clear_request_commit(struct nfs_page *req,
+ struct nfs_commit_info *cinfo)
{
struct pnfs_layout_segment *freeme = NULL;
- struct inode *inode = req->wb_context->dentry->d_inode;
- spin_lock(&inode->i_lock);
+ spin_lock(cinfo->lock);
if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
goto out;
+ cinfo->ds->nwritten--;
if (list_is_singular(&req->wb_list)) {
- struct pnfs_layout_segment *lseg;
+ struct pnfs_commit_bucket *bucket;
- /* From here we can find the bucket, but for the moment,
- * since there is only one relevant lseg...
- */
- list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
- if (lseg->pls_range.iomode == IOMODE_RW) {
- freeme = lseg;
- break;
- }
- }
+ bucket = list_first_entry(&req->wb_list,
+ struct pnfs_commit_bucket,
+ written);
+ freeme = bucket->wlseg;
+ bucket->wlseg = NULL;
}
out:
- nfs_request_remove_commit_list(req);
- spin_unlock(&inode->i_lock);
+ nfs_request_remove_commit_list(req, cinfo);
+ spin_unlock(cinfo->lock);
put_lseg(freeme);
}
static struct list_head *
filelayout_choose_commit_list(struct nfs_page *req,
- struct pnfs_layout_segment *lseg)
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo)
{
struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
u32 i, j;
struct list_head *list;
+ struct pnfs_commit_bucket *buckets;
if (fl->commit_through_mds)
- return &NFS_I(req->wb_context->dentry->d_inode)->commit_list;
+ return &cinfo->mds->list;
/* Note that we are calling nfs4_fl_calc_j_index on each page
* that ends up being committed to a data server. An attractive
@@ -828,31 +1007,33 @@ filelayout_choose_commit_list(struct nfs_page *req,
* to store the value calculated in filelayout_write_pagelist
* and just use that here.
*/
- j = nfs4_fl_calc_j_index(lseg,
- (loff_t)req->wb_index << PAGE_CACHE_SHIFT);
+ j = nfs4_fl_calc_j_index(lseg, req_offset(req));
i = select_bucket_index(fl, j);
- list = &fl->commit_buckets[i].written;
+ buckets = cinfo->ds->buckets;
+ list = &buckets[i].written;
if (list_empty(list)) {
/* Non-empty buckets hold a reference on the lseg. That ref
* is normally transferred to the COMMIT call and released
* there. It could also be released if the last req is pulled
* off due to a rewrite, in which case it will be done in
- * filelayout_remove_commit_req
+ * filelayout_clear_request_commit
*/
- get_lseg(lseg);
+ buckets[i].wlseg = get_lseg(lseg);
}
set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
+ cinfo->ds->nwritten++;
return list;
}
static void
filelayout_mark_request_commit(struct nfs_page *req,
- struct pnfs_layout_segment *lseg)
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo)
{
struct list_head *list;
- list = filelayout_choose_commit_list(req, lseg);
- nfs_request_add_commit_list(req, list);
+ list = filelayout_choose_commit_list(req, lseg, cinfo);
+ nfs_request_add_commit_list(req, list, cinfo);
}
static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
@@ -880,7 +1061,7 @@ select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
return flseg->fh_array[i];
}
-static int filelayout_initiate_commit(struct nfs_write_data *data, int how)
+static int filelayout_initiate_commit(struct nfs_commit_data *data, int how)
{
struct pnfs_layout_segment *lseg = data->lseg;
struct nfs4_pnfs_ds *ds;
@@ -890,135 +1071,138 @@ static int filelayout_initiate_commit(struct nfs_write_data *data, int how)
idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
ds = nfs4_fl_prepare_ds(lseg, idx);
if (!ds) {
- printk(KERN_ERR "NFS: %s: prepare_ds failed, use MDS\n",
- __func__);
- set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
- set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
prepare_to_resend_writes(data);
filelayout_commit_release(data);
return -EAGAIN;
}
- dprintk("%s ino %lu, how %d\n", __func__, data->inode->i_ino, how);
- data->write_done_cb = filelayout_commit_done_cb;
+ dprintk("%s ino %lu, how %d cl_count %d\n", __func__,
+ data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count));
+ data->commit_done_cb = filelayout_commit_done_cb;
+ atomic_inc(&ds->ds_clp->cl_count);
data->ds_clp = ds->ds_clp;
fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
if (fh)
data->args.fh = fh;
- return nfs_initiate_commit(data, ds->ds_clp->cl_rpcclient,
- &filelayout_commit_call_ops, how);
-}
-
-/*
- * This is only useful while we are using whole file layouts.
- */
-static struct pnfs_layout_segment *
-find_only_write_lseg_locked(struct inode *inode)
-{
- struct pnfs_layout_segment *lseg;
-
- list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list)
- if (lseg->pls_range.iomode == IOMODE_RW)
- return lseg;
- return NULL;
-}
-
-static struct pnfs_layout_segment *find_only_write_lseg(struct inode *inode)
-{
- struct pnfs_layout_segment *rv;
-
- spin_lock(&inode->i_lock);
- rv = find_only_write_lseg_locked(inode);
- if (rv)
- get_lseg(rv);
- spin_unlock(&inode->i_lock);
- return rv;
+ return nfs_initiate_commit(ds->ds_clp->cl_rpcclient, data,
+ &filelayout_commit_call_ops, how,
+ RPC_TASK_SOFTCONN);
}
static int
-filelayout_scan_ds_commit_list(struct nfs4_fl_commit_bucket *bucket, int max,
- spinlock_t *lock)
+transfer_commit_list(struct list_head *src, struct list_head *dst,
+ struct nfs_commit_info *cinfo, int max)
{
- struct list_head *src = &bucket->written;
- struct list_head *dst = &bucket->committing;
struct nfs_page *req, *tmp;
int ret = 0;
list_for_each_entry_safe(req, tmp, src, wb_list) {
if (!nfs_lock_request(req))
continue;
- if (cond_resched_lock(lock))
+ kref_get(&req->wb_kref);
+ if (cond_resched_lock(cinfo->lock))
list_safe_reset_next(req, tmp, wb_list);
- nfs_request_remove_commit_list(req);
+ nfs_request_remove_commit_list(req, cinfo);
clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
nfs_list_add_request(req, dst);
ret++;
- if (ret == max)
+ if ((ret == max) && !cinfo->dreq)
break;
}
return ret;
}
+static int
+filelayout_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
+ struct nfs_commit_info *cinfo,
+ int max)
+{
+ struct list_head *src = &bucket->written;
+ struct list_head *dst = &bucket->committing;
+ int ret;
+
+ ret = transfer_commit_list(src, dst, cinfo, max);
+ if (ret) {
+ cinfo->ds->nwritten -= ret;
+ cinfo->ds->ncommitting += ret;
+ bucket->clseg = bucket->wlseg;
+ if (list_empty(src))
+ bucket->wlseg = NULL;
+ else
+ get_lseg(bucket->clseg);
+ }
+ return ret;
+}
+
/* Move reqs from written to committing lists, returning count of number moved.
- * Note called with i_lock held.
+ * Note called with cinfo->lock held.
*/
-static int filelayout_scan_commit_lists(struct inode *inode, int max,
- spinlock_t *lock)
+static int filelayout_scan_commit_lists(struct nfs_commit_info *cinfo,
+ int max)
{
- struct pnfs_layout_segment *lseg;
- struct nfs4_filelayout_segment *fl;
int i, rv = 0, cnt;
- lseg = find_only_write_lseg_locked(inode);
- if (!lseg)
- goto out_done;
- fl = FILELAYOUT_LSEG(lseg);
- if (fl->commit_through_mds)
- goto out_done;
- for (i = 0; i < fl->number_of_buckets && max != 0; i++) {
- cnt = filelayout_scan_ds_commit_list(&fl->commit_buckets[i],
- max, lock);
+ for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
+ cnt = filelayout_scan_ds_commit_list(&cinfo->ds->buckets[i],
+ cinfo, max);
max -= cnt;
rv += cnt;
}
-out_done:
return rv;
}
+/* Pull everything off the committing lists and dump into @dst */
+static void filelayout_recover_commit_reqs(struct list_head *dst,
+ struct nfs_commit_info *cinfo)
+{
+ struct pnfs_commit_bucket *b;
+ int i;
+
+ /* NOTE cinfo->lock is NOT held, relying on fact that this is
+ * only called on single thread per dreq.
+ * Can't take the lock because need to do put_lseg
+ */
+ for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
+ if (transfer_commit_list(&b->written, dst, cinfo, 0)) {
+ BUG_ON(!list_empty(&b->written));
+ put_lseg(b->wlseg);
+ b->wlseg = NULL;
+ }
+ }
+ cinfo->ds->nwritten = 0;
+}
+
static unsigned int
-alloc_ds_commits(struct inode *inode, struct list_head *list)
+alloc_ds_commits(struct nfs_commit_info *cinfo, struct list_head *list)
{
- struct pnfs_layout_segment *lseg;
- struct nfs4_filelayout_segment *fl;
- struct nfs_write_data *data;
+ struct pnfs_ds_commit_info *fl_cinfo;
+ struct pnfs_commit_bucket *bucket;
+ struct nfs_commit_data *data;
int i, j;
unsigned int nreq = 0;
- /* Won't need this when non-whole file layout segments are supported
- * instead we will use a pnfs_layout_hdr structure */
- lseg = find_only_write_lseg(inode);
- if (!lseg)
- return 0;
- fl = FILELAYOUT_LSEG(lseg);
- for (i = 0; i < fl->number_of_buckets; i++) {
- if (list_empty(&fl->commit_buckets[i].committing))
+ fl_cinfo = cinfo->ds;
+ bucket = fl_cinfo->buckets;
+ for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
+ if (list_empty(&bucket->committing))
continue;
data = nfs_commitdata_alloc();
if (!data)
break;
data->ds_commit_index = i;
- data->lseg = lseg;
+ data->lseg = bucket->clseg;
+ bucket->clseg = NULL;
list_add(&data->pages, list);
nreq++;
}
/* Clean up on error */
- for (j = i; j < fl->number_of_buckets; j++) {
- if (list_empty(&fl->commit_buckets[i].committing))
+ for (j = i; j < fl_cinfo->nbuckets; j++, bucket++) {
+ if (list_empty(&bucket->committing))
continue;
- nfs_retry_commit(&fl->commit_buckets[i].committing, lseg);
- put_lseg(lseg); /* associated with emptying bucket */
+ nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo);
+ put_lseg(bucket->clseg);
+ bucket->clseg = NULL;
}
- put_lseg(lseg);
/* Caller will clean up entries put on list */
return nreq;
}
@@ -1026,9 +1210,9 @@ alloc_ds_commits(struct inode *inode, struct list_head *list)
/* This follows nfs_commit_list pretty closely */
static int
filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
- int how)
+ int how, struct nfs_commit_info *cinfo)
{
- struct nfs_write_data *data, *tmp;
+ struct nfs_commit_data *data, *tmp;
LIST_HEAD(list);
unsigned int nreq = 0;
@@ -1039,30 +1223,34 @@ filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
list_add(&data->pages, &list);
nreq++;
} else
- nfs_retry_commit(mds_pages, NULL);
+ nfs_retry_commit(mds_pages, NULL, cinfo);
}
- nreq += alloc_ds_commits(inode, &list);
+ nreq += alloc_ds_commits(cinfo, &list);
if (nreq == 0) {
- nfs_commit_clear_lock(NFS_I(inode));
+ cinfo->completion_ops->error_cleanup(NFS_I(inode));
goto out;
}
- atomic_add(nreq, &NFS_I(inode)->commits_outstanding);
+ atomic_add(nreq, &cinfo->mds->rpcs_out);
list_for_each_entry_safe(data, tmp, &list, pages) {
list_del_init(&data->pages);
if (!data->lseg) {
- nfs_init_commit(data, mds_pages, NULL);
- nfs_initiate_commit(data, NFS_CLIENT(inode),
- data->mds_ops, how);
+ nfs_init_commit(data, mds_pages, NULL, cinfo);
+ nfs_initiate_commit(NFS_CLIENT(inode), data,
+ data->mds_ops, how, 0);
} else {
- nfs_init_commit(data, &FILELAYOUT_LSEG(data->lseg)->commit_buckets[data->ds_commit_index].committing, data->lseg);
+ struct pnfs_commit_bucket *buckets;
+
+ buckets = cinfo->ds->buckets;
+ nfs_init_commit(data, &buckets[data->ds_commit_index].committing, data->lseg, cinfo);
filelayout_initiate_commit(data, how);
}
}
out:
+ cinfo->ds->ncommitting = 0;
return PNFS_ATTEMPTED;
}
@@ -1072,17 +1260,47 @@ filelayout_free_deveiceid_node(struct nfs4_deviceid_node *d)
nfs4_fl_free_deviceid(container_of(d, struct nfs4_file_layout_dsaddr, id_node));
}
+static struct pnfs_layout_hdr *
+filelayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
+{
+ struct nfs4_filelayout *flo;
+
+ flo = kzalloc(sizeof(*flo), gfp_flags);
+ return &flo->generic_hdr;
+}
+
+static void
+filelayout_free_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+ kfree(FILELAYOUT_FROM_HDR(lo));
+}
+
+static struct pnfs_ds_commit_info *
+filelayout_get_ds_info(struct inode *inode)
+{
+ struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
+
+ if (layout == NULL)
+ return NULL;
+ else
+ return &FILELAYOUT_FROM_HDR(layout)->commit_info;
+}
+
static struct pnfs_layoutdriver_type filelayout_type = {
.id = LAYOUT_NFSV4_1_FILES,
.name = "LAYOUT_NFSV4_1_FILES",
.owner = THIS_MODULE,
+ .alloc_layout_hdr = filelayout_alloc_layout_hdr,
+ .free_layout_hdr = filelayout_free_layout_hdr,
.alloc_lseg = filelayout_alloc_lseg,
.free_lseg = filelayout_free_lseg,
.pg_read_ops = &filelayout_pg_read_ops,
.pg_write_ops = &filelayout_pg_write_ops,
+ .get_ds_info = &filelayout_get_ds_info,
.mark_request_commit = filelayout_mark_request_commit,
.clear_request_commit = filelayout_clear_request_commit,
.scan_commit_lists = filelayout_scan_commit_lists,
+ .recover_commit_reqs = filelayout_recover_commit_reqs,
.commit_pagelist = filelayout_commit_pagelist,
.read_pagelist = filelayout_read_pagelist,
.write_pagelist = filelayout_write_pagelist,
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index 21190bb1f5e3..43fe802dd678 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -33,6 +33,13 @@
#include "pnfs.h"
/*
+ * Default data server connection timeout and retrans vaules.
+ * Set by module paramters dataserver_timeo and dataserver_retrans.
+ */
+#define NFS4_DEF_DS_TIMEO 60
+#define NFS4_DEF_DS_RETRANS 5
+
+/*
* Field testing shows we need to support up to 4096 stripe indices.
* We store each index as a u8 (u32 on the wire) to keep the memory footprint
* reasonable. This in turn means we support a maximum of 256
@@ -41,6 +48,9 @@
#define NFS4_PNFS_MAX_STRIPE_CNT 4096
#define NFS4_PNFS_MAX_MULTI_CNT 256 /* 256 fit into a u8 stripe_index */
+/* error codes for internal use */
+#define NFS4ERR_RESET_TO_MDS 12001
+
enum stripetype4 {
STRIPE_SPARSE = 1,
STRIPE_DENSE = 2
@@ -62,23 +72,14 @@ struct nfs4_pnfs_ds {
atomic_t ds_count;
};
-/* nfs4_file_layout_dsaddr flags */
-#define NFS4_DEVICE_ID_NEG_ENTRY 0x00000001
-
struct nfs4_file_layout_dsaddr {
struct nfs4_deviceid_node id_node;
- unsigned long flags;
u32 stripe_count;
u8 *stripe_indices;
u32 ds_num;
struct nfs4_pnfs_ds *ds_list[1];
};
-struct nfs4_fl_commit_bucket {
- struct list_head written;
- struct list_head committing;
-};
-
struct nfs4_filelayout_segment {
struct pnfs_layout_segment generic_hdr;
u32 stripe_type;
@@ -89,10 +90,19 @@ struct nfs4_filelayout_segment {
struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
unsigned int num_fh;
struct nfs_fh **fh_array;
- struct nfs4_fl_commit_bucket *commit_buckets; /* Sort commits to ds */
- int number_of_buckets;
};
+struct nfs4_filelayout {
+ struct pnfs_layout_hdr generic_hdr;
+ struct pnfs_ds_commit_info commit_info;
+};
+
+static inline struct nfs4_filelayout *
+FILELAYOUT_FROM_HDR(struct pnfs_layout_hdr *lo)
+{
+ return container_of(lo, struct nfs4_filelayout, generic_hdr);
+}
+
static inline struct nfs4_filelayout_segment *
FILELAYOUT_LSEG(struct pnfs_layout_segment *lseg)
{
@@ -107,6 +117,36 @@ FILELAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg)
return &FILELAYOUT_LSEG(lseg)->dsaddr->id_node;
}
+static inline void
+filelayout_mark_devid_invalid(struct nfs4_deviceid_node *node)
+{
+ u32 *p = (u32 *)&node->deviceid;
+
+ printk(KERN_WARNING "NFS: Deviceid [%x%x%x%x] marked out of use.\n",
+ p[0], p[1], p[2], p[3]);
+
+ set_bit(NFS_DEVICEID_INVALID, &node->flags);
+}
+
+static inline bool
+filelayout_test_layout_invalid(struct pnfs_layout_hdr *lo)
+{
+ return test_bit(NFS_LAYOUT_INVALID, &lo->plh_flags);
+}
+
+static inline bool
+filelayout_test_devid_invalid(struct nfs4_deviceid_node *node)
+{
+ return test_bit(NFS_DEVICEID_INVALID, &node->flags);
+}
+
+static inline bool
+filelayout_reset_to_mds(struct pnfs_layout_segment *lseg)
+{
+ return filelayout_test_devid_invalid(FILELAYOUT_DEVID_NODE(lseg)) ||
+ filelayout_test_layout_invalid(lseg->pls_layout);
+}
+
extern struct nfs_fh *
nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j);
@@ -119,5 +159,6 @@ extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
extern void nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
struct nfs4_file_layout_dsaddr *
get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags);
+void nfs4_ds_disconnect(struct nfs_client *clp);
#endif /* FS_NFS_NFS4FILELAYOUT_H */
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index c9cff9adb2d3..a1fab8da7f03 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -30,12 +30,16 @@
#include <linux/nfs_fs.h>
#include <linux/vmalloc.h>
+#include <linux/module.h>
#include "internal.h"
#include "nfs4filelayout.h"
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO;
+static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS;
+
/*
* Data server cache
*
@@ -145,6 +149,28 @@ _data_server_lookup_locked(const struct list_head *dsaddrs)
}
/*
+ * Lookup DS by nfs_client pointer. Zero data server client pointer
+ */
+void nfs4_ds_disconnect(struct nfs_client *clp)
+{
+ struct nfs4_pnfs_ds *ds;
+ struct nfs_client *found = NULL;
+
+ dprintk("%s clp %p\n", __func__, clp);
+ spin_lock(&nfs4_ds_cache_lock);
+ list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
+ if (ds->ds_clp && ds->ds_clp == clp) {
+ found = ds->ds_clp;
+ ds->ds_clp = NULL;
+ }
+ spin_unlock(&nfs4_ds_cache_lock);
+ if (found) {
+ set_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state);
+ nfs_put_client(clp);
+ }
+}
+
+/*
* Create an rpc connection to the nfs4_pnfs_ds data server
* Currently only supports IPv4 and IPv6 addresses
*/
@@ -165,8 +191,9 @@ nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
__func__, ds->ds_remotestr, da->da_remotestr);
clp = nfs4_set_ds_client(mds_srv->nfs_client,
- (struct sockaddr *)&da->da_addr,
- da->da_addrlen, IPPROTO_TCP);
+ (struct sockaddr *)&da->da_addr,
+ da->da_addrlen, IPPROTO_TCP,
+ dataserver_timeo, dataserver_retrans);
if (!IS_ERR(clp))
break;
}
@@ -176,28 +203,7 @@ nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
goto out;
}
- if ((clp->cl_exchange_flags & EXCHGID4_FLAG_MASK_PNFS) != 0) {
- if (!is_ds_client(clp)) {
- status = -ENODEV;
- goto out_put;
- }
- ds->ds_clp = clp;
- dprintk("%s [existing] server=%s\n", __func__,
- ds->ds_remotestr);
- goto out;
- }
-
- /*
- * Do not set NFS_CS_CHECK_LEASE_TIME instead set the DS lease to
- * be equal to the MDS lease. Renewal is scheduled in create_session.
- */
- spin_lock(&mds_srv->nfs_client->cl_lock);
- clp->cl_lease_time = mds_srv->nfs_client->cl_lease_time;
- spin_unlock(&mds_srv->nfs_client->cl_lock);
- clp->cl_last_renewal = jiffies;
-
- /* New nfs_client */
- status = nfs4_init_ds_session(clp);
+ status = nfs4_init_ds_session(clp, mds_srv->nfs_client->cl_lease_time);
if (status)
goto out_put;
@@ -602,7 +608,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
mp_count = be32_to_cpup(p); /* multipath count */
for (j = 0; j < mp_count; j++) {
- da = decode_ds_addr(NFS_SERVER(ino)->nfs_client->net,
+ da = decode_ds_addr(NFS_SERVER(ino)->nfs_client->cl_net,
&stream, gfp_flags);
if (da)
list_add_tail(&da->da_node, &dsaddrs);
@@ -791,48 +797,42 @@ nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j)
return flseg->fh_array[i];
}
-static void
-filelayout_mark_devid_negative(struct nfs4_file_layout_dsaddr *dsaddr,
- int err, const char *ds_remotestr)
-{
- u32 *p = (u32 *)&dsaddr->id_node.deviceid;
-
- printk(KERN_ERR "NFS: data server %s connection error %d."
- " Deviceid [%x%x%x%x] marked out of use.\n",
- ds_remotestr, err, p[0], p[1], p[2], p[3]);
-
- spin_lock(&nfs4_ds_cache_lock);
- dsaddr->flags |= NFS4_DEVICE_ID_NEG_ENTRY;
- spin_unlock(&nfs4_ds_cache_lock);
-}
-
struct nfs4_pnfs_ds *
nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
{
struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr;
struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
+ struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
+
+ if (filelayout_test_devid_invalid(devid))
+ return NULL;
if (ds == NULL) {
printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
__func__, ds_idx);
- return NULL;
+ goto mark_dev_invalid;
}
if (!ds->ds_clp) {
struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
int err;
- if (dsaddr->flags & NFS4_DEVICE_ID_NEG_ENTRY) {
- /* Already tried to connect, don't try again */
- dprintk("%s Deviceid marked out of use\n", __func__);
- return NULL;
- }
err = nfs4_ds_connect(s, ds);
- if (err) {
- filelayout_mark_devid_negative(dsaddr, err,
- ds->ds_remotestr);
- return NULL;
- }
+ if (err)
+ goto mark_dev_invalid;
}
return ds;
+
+mark_dev_invalid:
+ filelayout_mark_devid_invalid(devid);
+ return NULL;
}
+
+module_param(dataserver_retrans, uint, 0644);
+MODULE_PARM_DESC(dataserver_retrans, "The number of times the NFSv4.1 client "
+ "retries a request before it attempts further "
+ " recovery action.");
+module_param(dataserver_timeo, uint, 0644);
+MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the "
+ "NFSv4.1 client waits for a response from a "
+ " data server before it retries an NFS request.");
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index a7f3dedc4ec7..017b4b01a69c 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -132,6 +132,35 @@ static size_t nfs_parse_server_name(char *string, size_t len,
return ret;
}
+rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors)
+{
+ struct gss_api_mech *mech;
+ struct xdr_netobj oid;
+ int i;
+ rpc_authflavor_t pseudoflavor = RPC_AUTH_UNIX;
+
+ for (i = 0; i < flavors->num_flavors; i++) {
+ struct nfs4_secinfo_flavor *flavor;
+ flavor = &flavors->flavors[i];
+
+ if (flavor->flavor == RPC_AUTH_NULL || flavor->flavor == RPC_AUTH_UNIX) {
+ pseudoflavor = flavor->flavor;
+ break;
+ } else if (flavor->flavor == RPC_AUTH_GSS) {
+ oid.len = flavor->gss.sec_oid4.len;
+ oid.data = flavor->gss.sec_oid4.data;
+ mech = gss_mech_get_by_OID(&oid);
+ if (!mech)
+ continue;
+ pseudoflavor = gss_svc_to_pseudoflavor(mech, flavor->gss.service);
+ gss_mech_put(mech);
+ break;
+ }
+ }
+
+ return pseudoflavor;
+}
+
static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode, struct qstr *name)
{
struct page *page;
@@ -168,7 +197,7 @@ struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *clnt, struct inode *ino
rpc_authflavor_t flavor;
flavor = nfs4_negotiate_security(inode, name);
- if (flavor < 0)
+ if ((int)flavor < 0)
return ERR_PTR(flavor);
clone = rpc_clone_client(clnt);
@@ -300,7 +329,7 @@ out:
* @dentry - dentry of referral
*
*/
-struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
+static struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
{
struct vfsmount *mnt = ERR_PTR(-ENOMEM);
struct dentry *parent;
@@ -341,3 +370,25 @@ out:
dprintk("%s: done\n", __func__);
return mnt;
}
+
+struct vfsmount *nfs4_submount(struct nfs_server *server, struct dentry *dentry,
+ struct nfs_fh *fh, struct nfs_fattr *fattr)
+{
+ struct dentry *parent = dget_parent(dentry);
+ struct rpc_clnt *client;
+ struct vfsmount *mnt;
+
+ /* Look it up again to get its attributes and sec flavor */
+ client = nfs4_proc_lookup_mountpoint(parent->d_inode, &dentry->d_name, fh, fattr);
+ dput(parent);
+ if (IS_ERR(client))
+ return ERR_CAST(client);
+
+ if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
+ mnt = nfs_do_refmount(client, dentry);
+ else
+ mnt = nfs_do_submount(dentry, fh, fattr, client->cl_auth->au_flavor);
+
+ rpc_shutdown_client(client);
+ return mnt;
+}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ab985f6f0da8..d48dbefa0e71 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -64,6 +64,7 @@
#include "iostat.h"
#include "callback.h"
#include "pnfs.h"
+#include "netns.h"
#define NFSDBG_FACILITY NFSDBG_PROC
@@ -80,6 +81,7 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
+static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *);
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
@@ -101,6 +103,8 @@ static int nfs4_map_errors(int err)
case -NFS4ERR_BADOWNER:
case -NFS4ERR_BADNAME:
return -EINVAL;
+ case -NFS4ERR_SHARE_DENIED:
+ return -EACCES;
default:
dprintk("%s could not handle NFSv4 error %d\n",
__func__, -err);
@@ -304,7 +308,7 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR: %d Reset session\n", __func__,
errorcode);
- nfs4_schedule_session_recovery(clp->cl_session);
+ nfs4_schedule_session_recovery(clp->cl_session, errorcode);
exception->retry = 1;
break;
#endif /* defined(CONFIG_NFS_V4_1) */
@@ -772,7 +776,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
struct nfs_inode *nfsi = NFS_I(dir);
spin_lock(&dir->i_lock);
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
if (!cinfo->atomic || cinfo->before != dir->i_version)
nfs_force_lookup_revalidate(dir);
dir->i_version = cinfo->after;
@@ -788,7 +792,6 @@ struct nfs4_opendata {
struct nfs4_string owner_name;
struct nfs4_string group_name;
struct nfs_fattr f_attr;
- struct nfs_fattr dir_attr;
struct dentry *dir;
struct dentry *dentry;
struct nfs4_state_owner *owner;
@@ -804,12 +807,10 @@ struct nfs4_opendata {
static void nfs4_init_opendata_res(struct nfs4_opendata *p)
{
p->o_res.f_attr = &p->f_attr;
- p->o_res.dir_attr = &p->dir_attr;
p->o_res.seqid = p->o_arg.seqid;
p->c_res.seqid = p->c_arg.seqid;
p->o_res.server = p->o_arg.server;
nfs_fattr_init(&p->f_attr);
- nfs_fattr_init(&p->dir_attr);
nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
}
@@ -843,7 +844,6 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
p->o_arg.name = &dentry->d_name;
p->o_arg.server = server;
p->o_arg.bitmask = server->attr_bitmask;
- p->o_arg.dir_bitmask = server->cache_consistency_bitmask;
p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
if (attrs != NULL && attrs->ia_valid != 0) {
__be32 verf[2];
@@ -1332,7 +1332,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
- nfs4_schedule_session_recovery(server->nfs_client->cl_session);
+ nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
goto out;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
@@ -1611,8 +1611,6 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
- nfs_refresh_inode(dir, o_res->dir_attr);
-
if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
status = _nfs4_proc_open_confirm(data);
if (status != 0)
@@ -1645,11 +1643,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
nfs_fattr_map_and_free_names(server, &data->f_attr);
- if (o_arg->open_flags & O_CREAT) {
+ if (o_arg->open_flags & O_CREAT)
update_changeattr(dir, &o_res->cinfo);
- nfs_post_op_update_inode(dir, o_res->dir_attr);
- } else
- nfs_refresh_inode(dir, o_res->dir_attr);
if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
server->caps &= ~NFS_CAP_POSIX_LOCK;
if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
@@ -1789,7 +1784,14 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct
/*
* Returns a referenced nfs4_state
*/
-static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
+static int _nfs4_do_open(struct inode *dir,
+ struct dentry *dentry,
+ fmode_t fmode,
+ int flags,
+ struct iattr *sattr,
+ struct rpc_cred *cred,
+ struct nfs4_state **res,
+ struct nfs4_threshold **ctx_th)
{
struct nfs4_state_owner *sp;
struct nfs4_state *state = NULL;
@@ -1814,6 +1816,11 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode
if (opendata == NULL)
goto err_put_state_owner;
+ if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
+ opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
+ if (!opendata->f_attr.mdsthreshold)
+ goto err_opendata_put;
+ }
if (dentry->d_inode != NULL)
opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
@@ -1839,11 +1846,19 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode
nfs_setattr_update_inode(state->inode, sattr);
nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
}
+
+ if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
+ *ctx_th = opendata->f_attr.mdsthreshold;
+ else
+ kfree(opendata->f_attr.mdsthreshold);
+ opendata->f_attr.mdsthreshold = NULL;
+
nfs4_opendata_put(opendata);
nfs4_put_state_owner(sp);
*res = state;
return 0;
err_opendata_put:
+ kfree(opendata->f_attr.mdsthreshold);
nfs4_opendata_put(opendata);
err_put_state_owner:
nfs4_put_state_owner(sp);
@@ -1853,14 +1868,21 @@ out_err:
}
-static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
+static struct nfs4_state *nfs4_do_open(struct inode *dir,
+ struct dentry *dentry,
+ fmode_t fmode,
+ int flags,
+ struct iattr *sattr,
+ struct rpc_cred *cred,
+ struct nfs4_threshold **ctx_th)
{
struct nfs4_exception exception = { };
struct nfs4_state *res;
int status;
do {
- status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res);
+ status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred,
+ &res, ctx_th);
if (status == 0)
break;
/* NOTE: BAD_SEQID means the server and client disagree about the
@@ -2184,7 +2206,8 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags
struct nfs4_state *state;
/* Protect against concurrent sillydeletes */
- state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred);
+ state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr,
+ ctx->cred, &ctx->mdsthreshold);
if (IS_ERR(state))
return ERR_CAST(state);
ctx->state = state;
@@ -2354,8 +2377,8 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
/*
* get the file handle for the "/" directory on the server
*/
-static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info)
+int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
{
int minor_version = server->nfs_client->cl_minorversion;
int status = nfs4_lookup_root(server, fhandle, info);
@@ -2372,6 +2395,31 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
return nfs4_map_errors(status);
}
+static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
+ struct nfs_fsinfo *info)
+{
+ int error;
+ struct nfs_fattr *fattr = info->fattr;
+
+ error = nfs4_server_capabilities(server, mntfh);
+ if (error < 0) {
+ dprintk("nfs4_get_root: getcaps error = %d\n", -error);
+ return error;
+ }
+
+ error = nfs4_proc_getattr(server, mntfh, fattr);
+ if (error < 0) {
+ dprintk("nfs4_get_root: getattr error = %d\n", -error);
+ return error;
+ }
+
+ if (fattr->valid & NFS_ATTR_FATTR_FSID &&
+ !nfs_fsid_equal(&server->fsid, &fattr->fsid))
+ memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
+
+ return error;
+}
+
/*
* Get locations and (maybe) other attributes of a referral.
* Note that we'll actually follow the referral later when
@@ -2578,7 +2626,7 @@ out:
return err;
}
-static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
int status;
@@ -2761,7 +2809,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
fmode = ctx->mode;
}
sattr->ia_mode &= ~current_umask();
- state = nfs4_do_open(dir, de, fmode, flags, sattr, cred);
+ state = nfs4_do_open(dir, de, fmode, flags, sattr, cred, NULL);
d_drop(dentry);
if (IS_ERR(state)) {
status = PTR_ERR(state);
@@ -2783,7 +2831,6 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
struct nfs_removeargs args = {
.fh = NFS_FH(dir),
.name = *name,
- .bitmask = server->attr_bitmask,
};
struct nfs_removeres res = {
.server = server,
@@ -2793,19 +2840,11 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
.rpc_argp = &args,
.rpc_resp = &res,
};
- int status = -ENOMEM;
-
- res.dir_attr = nfs_alloc_fattr();
- if (res.dir_attr == NULL)
- goto out;
+ int status;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
- if (status == 0) {
+ if (status == 0)
update_changeattr(dir, &res.cinfo);
- nfs_post_op_update_inode(dir, res.dir_attr);
- }
- nfs_free_fattr(res.dir_attr);
-out:
return status;
}
@@ -2827,7 +2866,6 @@ static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
struct nfs_removeargs *args = msg->rpc_argp;
struct nfs_removeres *res = msg->rpc_resp;
- args->bitmask = server->cache_consistency_bitmask;
res->server = server;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
@@ -2852,7 +2890,6 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
update_changeattr(dir, &res->cinfo);
- nfs_post_op_update_inode(dir, res->dir_attr);
return 1;
}
@@ -2863,7 +2900,6 @@ static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
struct nfs_renameres *res = msg->rpc_resp;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
- arg->bitmask = server->attr_bitmask;
res->server = server;
nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
}
@@ -2889,9 +2925,7 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
return 0;
update_changeattr(old_dir, &res->old_cinfo);
- nfs_post_op_update_inode(old_dir, res->old_fattr);
update_changeattr(new_dir, &res->new_cinfo);
- nfs_post_op_update_inode(new_dir, res->new_fattr);
return 1;
}
@@ -2904,7 +2938,6 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
.new_dir = NFS_FH(new_dir),
.old_name = old_name,
.new_name = new_name,
- .bitmask = server->attr_bitmask,
};
struct nfs_renameres res = {
.server = server,
@@ -2916,21 +2949,11 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
};
int status = -ENOMEM;
- res.old_fattr = nfs_alloc_fattr();
- res.new_fattr = nfs_alloc_fattr();
- if (res.old_fattr == NULL || res.new_fattr == NULL)
- goto out;
-
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
update_changeattr(old_dir, &res.old_cinfo);
- nfs_post_op_update_inode(old_dir, res.old_fattr);
update_changeattr(new_dir, &res.new_cinfo);
- nfs_post_op_update_inode(new_dir, res.new_fattr);
}
-out:
- nfs_free_fattr(res.new_fattr);
- nfs_free_fattr(res.old_fattr);
return status;
}
@@ -2968,18 +2991,15 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *
int status = -ENOMEM;
res.fattr = nfs_alloc_fattr();
- res.dir_attr = nfs_alloc_fattr();
- if (res.fattr == NULL || res.dir_attr == NULL)
+ if (res.fattr == NULL)
goto out;
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
update_changeattr(dir, &res.cinfo);
- nfs_post_op_update_inode(dir, res.dir_attr);
nfs_post_op_update_inode(inode, res.fattr);
}
out:
- nfs_free_fattr(res.dir_attr);
nfs_free_fattr(res.fattr);
return status;
}
@@ -3002,7 +3022,6 @@ struct nfs4_createdata {
struct nfs4_create_res res;
struct nfs_fh fh;
struct nfs_fattr fattr;
- struct nfs_fattr dir_fattr;
};
static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
@@ -3026,9 +3045,7 @@ static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
data->res.server = server;
data->res.fh = &data->fh;
data->res.fattr = &data->fattr;
- data->res.dir_fattr = &data->dir_fattr;
nfs_fattr_init(data->res.fattr);
- nfs_fattr_init(data->res.dir_fattr);
}
return data;
}
@@ -3039,7 +3056,6 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
&data->arg.seq_args, &data->res.seq_res, 1);
if (status == 0) {
update_changeattr(dir, &data->res.dir_cinfo);
- nfs_post_op_update_inode(dir, data->res.dir_fattr);
status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
}
return status;
@@ -3335,12 +3351,12 @@ static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
void __nfs4_read_done_cb(struct nfs_read_data *data)
{
- nfs_invalidate_atime(data->inode);
+ nfs_invalidate_atime(data->header->inode);
}
static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
{
- struct nfs_server *server = NFS_SERVER(data->inode);
+ struct nfs_server *server = NFS_SERVER(data->header->inode);
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
rpc_restart_call_prepare(task);
@@ -3375,7 +3391,7 @@ static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message
static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
{
- if (nfs4_setup_sequence(NFS_SERVER(data->inode),
+ if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
&data->args.seq_args,
&data->res.seq_res,
task))
@@ -3383,25 +3399,9 @@ static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_da
rpc_call_start(task);
}
-/* Reset the the nfs_read_data to send the read to the MDS. */
-void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data)
-{
- dprintk("%s Reset task for i/o through\n", __func__);
- put_lseg(data->lseg);
- data->lseg = NULL;
- /* offsets will differ in the dense stripe case */
- data->args.offset = data->mds_offset;
- data->ds_clp = NULL;
- data->args.fh = NFS_FH(data->inode);
- data->read_done_cb = nfs4_read_done_cb;
- task->tk_ops = data->mds_ops;
- rpc_task_reset_client(task, NFS_CLIENT(data->inode));
-}
-EXPORT_SYMBOL_GPL(nfs4_reset_read);
-
static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
{
- struct inode *inode = data->inode;
+ struct inode *inode = data->header->inode;
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
rpc_restart_call_prepare(task);
@@ -3409,7 +3409,7 @@ static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data
}
if (task->tk_status >= 0) {
renew_lease(NFS_SERVER(inode), data->timestamp);
- nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
+ nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
}
return 0;
}
@@ -3422,32 +3422,30 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
nfs4_write_done_cb(task, data);
}
-/* Reset the the nfs_write_data to send the write to the MDS. */
-void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data)
+static
+bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data)
{
- dprintk("%s Reset task for i/o through\n", __func__);
- put_lseg(data->lseg);
- data->lseg = NULL;
- data->ds_clp = NULL;
- data->write_done_cb = nfs4_write_done_cb;
- data->args.fh = NFS_FH(data->inode);
- data->args.bitmask = data->res.server->cache_consistency_bitmask;
- data->args.offset = data->mds_offset;
- data->res.fattr = &data->fattr;
- task->tk_ops = data->mds_ops;
- rpc_task_reset_client(task, NFS_CLIENT(data->inode));
+ const struct nfs_pgio_header *hdr = data->header;
+
+ /* Don't request attributes for pNFS or O_DIRECT writes */
+ if (data->ds_clp != NULL || hdr->dreq != NULL)
+ return false;
+ /* Otherwise, request attributes if and only if we don't hold
+ * a delegation
+ */
+ return nfs_have_delegation(hdr->inode, FMODE_READ) == 0;
}
-EXPORT_SYMBOL_GPL(nfs4_reset_write);
static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
{
- struct nfs_server *server = NFS_SERVER(data->inode);
+ struct nfs_server *server = NFS_SERVER(data->header->inode);
- if (data->lseg) {
+ if (!nfs4_write_need_cache_consistency_data(data)) {
data->args.bitmask = NULL;
data->res.fattr = NULL;
} else
data->args.bitmask = server->cache_consistency_bitmask;
+
if (!data->write_done_cb)
data->write_done_cb = nfs4_write_done_cb;
data->res.server = server;
@@ -3459,6 +3457,16 @@ static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_messag
static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
{
+ if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
+ &data->args.seq_args,
+ &data->res.seq_res,
+ task))
+ return;
+ rpc_call_start(task);
+}
+
+static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
+{
if (nfs4_setup_sequence(NFS_SERVER(data->inode),
&data->args.seq_args,
&data->res.seq_res,
@@ -3467,7 +3475,7 @@ static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_
rpc_call_start(task);
}
-static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data)
+static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
{
struct inode *inode = data->inode;
@@ -3475,28 +3483,22 @@ static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *dat
rpc_restart_call_prepare(task);
return -EAGAIN;
}
- nfs_refresh_inode(inode, data->res.fattr);
return 0;
}
-static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
+static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
{
if (!nfs4_sequence_done(task, &data->res.seq_res))
return -EAGAIN;
- return data->write_done_cb(task, data);
+ return data->commit_done_cb(task, data);
}
-static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
{
struct nfs_server *server = NFS_SERVER(data->inode);
- if (data->lseg) {
- data->args.bitmask = NULL;
- data->res.fattr = NULL;
- } else
- data->args.bitmask = server->cache_consistency_bitmask;
- if (!data->write_done_cb)
- data->write_done_cb = nfs4_commit_done_cb;
+ if (data->commit_done_cb == NULL)
+ data->commit_done_cb = nfs4_commit_done_cb;
data->res.server = server;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
@@ -3905,7 +3907,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR %d, Reset session\n", __func__,
task->tk_status);
- nfs4_schedule_session_recovery(clp->cl_session);
+ nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
task->tk_status = 0;
return -EAGAIN;
#endif /* CONFIG_NFS_V4_1 */
@@ -3931,13 +3933,21 @@ wait_on_recovery:
return -EAGAIN;
}
-static void nfs4_construct_boot_verifier(struct nfs_client *clp,
- nfs4_verifier *bootverf)
+static void nfs4_init_boot_verifier(const struct nfs_client *clp,
+ nfs4_verifier *bootverf)
{
__be32 verf[2];
- verf[0] = htonl((u32)clp->cl_boot_time.tv_sec);
- verf[1] = htonl((u32)clp->cl_boot_time.tv_nsec);
+ if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
+ /* An impossible timestamp guarantees this value
+ * will never match a generated boot time. */
+ verf[0] = 0;
+ verf[1] = (__be32)(NSEC_PER_SEC + 1);
+ } else {
+ struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
+ verf[0] = (__be32)nn->boot_time.tv_sec;
+ verf[1] = (__be32)nn->boot_time.tv_nsec;
+ }
memcpy(bootverf->data, verf, sizeof(bootverf->data));
}
@@ -3960,7 +3970,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
int loop = 0;
int status;
- nfs4_construct_boot_verifier(clp, &sc_verifier);
+ nfs4_init_boot_verifier(clp, &sc_verifier);
for(;;) {
rcu_read_lock();
@@ -4104,7 +4114,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
data->args.fhandle = &data->fh;
data->args.stateid = &data->stateid;
- data->args.bitmask = server->attr_bitmask;
+ data->args.bitmask = server->cache_consistency_bitmask;
nfs_copy_fh(&data->fh, NFS_FH(inode));
nfs4_stateid_copy(&data->stateid, stateid);
data->res.fattr = &data->fattr;
@@ -4125,9 +4135,10 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
if (status != 0)
goto out;
status = data->rpc_status;
- if (status != 0)
- goto out;
- nfs_refresh_inode(inode, &data->fattr);
+ if (status == 0)
+ nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
+ else
+ nfs_refresh_inode(inode, &data->fattr);
out:
rpc_put_task(task);
return status;
@@ -4837,7 +4848,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
- nfs4_schedule_session_recovery(server->nfs_client->cl_session);
+ nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
goto out;
case -ERESTARTSYS:
/*
@@ -5079,7 +5090,8 @@ out_inval:
}
static bool
-nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
+nfs41_same_server_scope(struct nfs41_server_scope *a,
+ struct nfs41_server_scope *b)
{
if (a->server_scope_sz == b->server_scope_sz &&
memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
@@ -5089,6 +5101,61 @@ nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
}
/*
+ * nfs4_proc_bind_conn_to_session()
+ *
+ * The 4.1 client currently uses the same TCP connection for the
+ * fore and backchannel.
+ */
+int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ int status;
+ struct nfs41_bind_conn_to_session_res res;
+ struct rpc_message msg = {
+ .rpc_proc =
+ &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
+ .rpc_argp = clp,
+ .rpc_resp = &res,
+ .rpc_cred = cred,
+ };
+
+ dprintk("--> %s\n", __func__);
+ BUG_ON(clp == NULL);
+
+ res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
+ if (unlikely(res.session == NULL)) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+ if (status == 0) {
+ if (memcmp(res.session->sess_id.data,
+ clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
+ dprintk("NFS: %s: Session ID mismatch\n", __func__);
+ status = -EIO;
+ goto out_session;
+ }
+ if (res.dir != NFS4_CDFS4_BOTH) {
+ dprintk("NFS: %s: Unexpected direction from server\n",
+ __func__);
+ status = -EIO;
+ goto out_session;
+ }
+ if (res.use_conn_in_rdma_mode) {
+ dprintk("NFS: %s: Server returned RDMA mode = true\n",
+ __func__);
+ status = -EIO;
+ goto out_session;
+ }
+ }
+out_session:
+ kfree(res.session);
+out:
+ dprintk("<-- %s status= %d\n", __func__, status);
+ return status;
+}
+
+/*
* nfs4_proc_exchange_id()
*
* Since the clientid has expired, all compounds using sessions
@@ -5105,7 +5172,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
};
struct nfs41_exchange_id_res res = {
- .client = clp,
+ 0
};
int status;
struct rpc_message msg = {
@@ -5118,7 +5185,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
dprintk("--> %s\n", __func__);
BUG_ON(clp == NULL);
- nfs4_construct_boot_verifier(clp, &verifier);
+ nfs4_init_boot_verifier(clp, &verifier);
args.id_len = scnprintf(args.id, sizeof(args.id),
"%s/%s/%u",
@@ -5126,59 +5193,135 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
clp->cl_rpcclient->cl_nodename,
clp->cl_rpcclient->cl_auth->au_flavor);
- res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
- if (unlikely(!res.server_scope)) {
+ res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
+ GFP_NOFS);
+ if (unlikely(res.server_owner == NULL)) {
status = -ENOMEM;
goto out;
}
- res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_KERNEL);
- if (unlikely(!res.impl_id)) {
+ res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
+ GFP_NOFS);
+ if (unlikely(res.server_scope == NULL)) {
+ status = -ENOMEM;
+ goto out_server_owner;
+ }
+
+ res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
+ if (unlikely(res.impl_id == NULL)) {
status = -ENOMEM;
goto out_server_scope;
}
status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
- if (!status)
- status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
+ if (status == 0)
+ status = nfs4_check_cl_exchange_flags(res.flags);
+
+ if (status == 0) {
+ clp->cl_clientid = res.clientid;
+ clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R);
+ if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R))
+ clp->cl_seqid = res.seqid;
+
+ kfree(clp->cl_serverowner);
+ clp->cl_serverowner = res.server_owner;
+ res.server_owner = NULL;
- if (!status) {
/* use the most recent implementation id */
- kfree(clp->impl_id);
- clp->impl_id = res.impl_id;
- } else
- kfree(res.impl_id);
+ kfree(clp->cl_implid);
+ clp->cl_implid = res.impl_id;
- if (!status) {
- if (clp->server_scope &&
- !nfs41_same_server_scope(clp->server_scope,
+ if (clp->cl_serverscope != NULL &&
+ !nfs41_same_server_scope(clp->cl_serverscope,
res.server_scope)) {
dprintk("%s: server_scope mismatch detected\n",
__func__);
set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
- kfree(clp->server_scope);
- clp->server_scope = NULL;
+ kfree(clp->cl_serverscope);
+ clp->cl_serverscope = NULL;
}
- if (!clp->server_scope) {
- clp->server_scope = res.server_scope;
+ if (clp->cl_serverscope == NULL) {
+ clp->cl_serverscope = res.server_scope;
goto out;
}
- }
+ } else
+ kfree(res.impl_id);
+out_server_owner:
+ kfree(res.server_owner);
out_server_scope:
kfree(res.server_scope);
out:
- if (clp->impl_id)
+ if (clp->cl_implid != NULL)
dprintk("%s: Server Implementation ID: "
"domain: %s, name: %s, date: %llu,%u\n",
- __func__, clp->impl_id->domain, clp->impl_id->name,
- clp->impl_id->date.seconds,
- clp->impl_id->date.nseconds);
+ __func__, clp->cl_implid->domain, clp->cl_implid->name,
+ clp->cl_implid->date.seconds,
+ clp->cl_implid->date.nseconds);
dprintk("<-- %s status= %d\n", __func__, status);
return status;
}
+static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
+ struct rpc_cred *cred)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
+ .rpc_argp = clp,
+ .rpc_cred = cred,
+ };
+ int status;
+
+ status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+ if (status)
+ pr_warn("NFS: Got error %d from the server %s on "
+ "DESTROY_CLIENTID.", status, clp->cl_hostname);
+ return status;
+}
+
+static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
+ struct rpc_cred *cred)
+{
+ unsigned int loop;
+ int ret;
+
+ for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
+ ret = _nfs4_proc_destroy_clientid(clp, cred);
+ switch (ret) {
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_CLIENTID_BUSY:
+ ssleep(1);
+ break;
+ default:
+ return ret;
+ }
+ }
+ return 0;
+}
+
+int nfs4_destroy_clientid(struct nfs_client *clp)
+{
+ struct rpc_cred *cred;
+ int ret = 0;
+
+ if (clp->cl_mvops->minor_version < 1)
+ goto out;
+ if (clp->cl_exchange_flags == 0)
+ goto out;
+ cred = nfs4_get_exchange_id_cred(clp);
+ ret = nfs4_proc_destroy_clientid(clp, cred);
+ if (cred)
+ put_rpccred(cred);
+ switch (ret) {
+ case 0:
+ case -NFS4ERR_STALE_CLIENTID:
+ clp->cl_exchange_flags = 0;
+ }
+out:
+ return ret;
+}
+
struct nfs4_get_lease_time_data {
struct nfs4_get_lease_time_args *args;
struct nfs4_get_lease_time_res *res;
@@ -5399,8 +5542,12 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
void nfs4_destroy_session(struct nfs4_session *session)
{
struct rpc_xprt *xprt;
+ struct rpc_cred *cred;
- nfs4_proc_destroy_session(session);
+ cred = nfs4_get_exchange_id_cred(session->clp);
+ nfs4_proc_destroy_session(session, cred);
+ if (cred)
+ put_rpccred(cred);
rcu_read_lock();
xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
@@ -5510,7 +5657,8 @@ static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
return nfs4_verify_back_channel_attrs(args, session);
}
-static int _nfs4_proc_create_session(struct nfs_client *clp)
+static int _nfs4_proc_create_session(struct nfs_client *clp,
+ struct rpc_cred *cred)
{
struct nfs4_session *session = clp->cl_session;
struct nfs41_create_session_args args = {
@@ -5524,6 +5672,7 @@ static int _nfs4_proc_create_session(struct nfs_client *clp)
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
.rpc_argp = &args,
.rpc_resp = &res,
+ .rpc_cred = cred,
};
int status;
@@ -5548,7 +5697,7 @@ static int _nfs4_proc_create_session(struct nfs_client *clp)
* It is the responsibility of the caller to verify the session is
* expired before calling this routine.
*/
-int nfs4_proc_create_session(struct nfs_client *clp)
+int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
{
int status;
unsigned *ptr;
@@ -5556,7 +5705,7 @@ int nfs4_proc_create_session(struct nfs_client *clp)
dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
- status = _nfs4_proc_create_session(clp);
+ status = _nfs4_proc_create_session(clp, cred);
if (status)
goto out;
@@ -5578,10 +5727,15 @@ out:
* Issue the over-the-wire RPC DESTROY_SESSION.
* The caller must serialize access to this routine.
*/
-int nfs4_proc_destroy_session(struct nfs4_session *session)
+int nfs4_proc_destroy_session(struct nfs4_session *session,
+ struct rpc_cred *cred)
{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
+ .rpc_argp = session,
+ .rpc_cred = cred,
+ };
int status = 0;
- struct rpc_message msg;
dprintk("--> nfs4_proc_destroy_session\n");
@@ -5589,10 +5743,6 @@ int nfs4_proc_destroy_session(struct nfs4_session *session)
if (session->clp->cl_cons_state != NFS_CS_READY)
return status;
- msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
- msg.rpc_argp = session;
- msg.rpc_resp = NULL;
- msg.rpc_cred = NULL;
status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (status)
@@ -5604,53 +5754,79 @@ int nfs4_proc_destroy_session(struct nfs4_session *session)
return status;
}
+/*
+ * With sessions, the client is not marked ready until after a
+ * successful EXCHANGE_ID and CREATE_SESSION.
+ *
+ * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
+ * other versions of NFS can be tried.
+ */
+static int nfs41_check_session_ready(struct nfs_client *clp)
+{
+ int ret;
+
+ if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
+ ret = nfs4_client_recover_expired_lease(clp);
+ if (ret)
+ return ret;
+ }
+ if (clp->cl_cons_state < NFS_CS_READY)
+ return -EPROTONOSUPPORT;
+ smp_rmb();
+ return 0;
+}
+
int nfs4_init_session(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_session *session;
unsigned int rsize, wsize;
- int ret;
if (!nfs4_has_session(clp))
return 0;
session = clp->cl_session;
- if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
- return 0;
+ spin_lock(&clp->cl_lock);
+ if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
- rsize = server->rsize;
- if (rsize == 0)
- rsize = NFS_MAX_FILE_IO_SIZE;
- wsize = server->wsize;
- if (wsize == 0)
- wsize = NFS_MAX_FILE_IO_SIZE;
+ rsize = server->rsize;
+ if (rsize == 0)
+ rsize = NFS_MAX_FILE_IO_SIZE;
+ wsize = server->wsize;
+ if (wsize == 0)
+ wsize = NFS_MAX_FILE_IO_SIZE;
- session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
- session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
+ session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
+ session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
+ }
+ spin_unlock(&clp->cl_lock);
- ret = nfs4_recover_expired_lease(server);
- if (!ret)
- ret = nfs4_check_client_ready(clp);
- return ret;
+ return nfs41_check_session_ready(clp);
}
-int nfs4_init_ds_session(struct nfs_client *clp)
+int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
{
struct nfs4_session *session = clp->cl_session;
int ret;
- if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
- return 0;
-
- ret = nfs4_client_recover_expired_lease(clp);
- if (!ret)
- /* Test for the DS role */
- if (!is_ds_client(clp))
- ret = -ENODEV;
- if (!ret)
- ret = nfs4_check_client_ready(clp);
- return ret;
+ spin_lock(&clp->cl_lock);
+ if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
+ /*
+ * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
+ * DS lease to be equal to the MDS lease.
+ */
+ clp->cl_lease_time = lease_time;
+ clp->cl_last_renewal = jiffies;
+ }
+ spin_unlock(&clp->cl_lock);
+ ret = nfs41_check_session_ready(clp);
+ if (ret)
+ return ret;
+ /* Test for the DS role */
+ if (!is_ds_client(clp))
+ return -ENODEV;
+ return 0;
}
EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
@@ -6557,6 +6733,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.file_inode_ops = &nfs4_file_inode_operations,
.file_ops = &nfs4_file_operations,
.getroot = nfs4_proc_get_root,
+ .submount = nfs4_submount,
.getattr = nfs4_proc_getattr,
.setattr = nfs4_proc_setattr,
.lookup = nfs4_proc_lookup,
@@ -6589,13 +6766,13 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.write_rpc_prepare = nfs4_proc_write_rpc_prepare,
.write_done = nfs4_write_done,
.commit_setup = nfs4_proc_commit_setup,
+ .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
.commit_done = nfs4_commit_done,
.lock = nfs4_proc_lock,
.clear_acl_cache = nfs4_zap_acl_attr,
.close_context = nfs4_close_context,
.open_context = nfs4_atomic_open,
.init_client = nfs4_init_client,
- .secinfo = nfs4_proc_secinfo,
};
static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index dc484c0eae7f..6930bec91bca 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -49,7 +49,7 @@
#include "nfs4_fs.h"
#include "delegation.h"
-#define NFSDBG_FACILITY NFSDBG_PROC
+#define NFSDBG_FACILITY NFSDBG_STATE
void
nfs4_renew_state(struct work_struct *work)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 7f0fcfc1fe9d..c679b9ecef63 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -57,6 +57,8 @@
#include "internal.h"
#include "pnfs.h"
+#define NFSDBG_FACILITY NFSDBG_STATE
+
#define OPENOWNER_POOL_SIZE 8
const nfs4_stateid zero_stateid;
@@ -254,7 +256,7 @@ int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
goto out;
set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
do_confirm:
- status = nfs4_proc_create_session(clp);
+ status = nfs4_proc_create_session(clp, cred);
if (status != 0)
goto out;
clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
@@ -1106,6 +1108,8 @@ void nfs4_schedule_lease_recovery(struct nfs_client *clp)
return;
if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
+ dprintk("%s: scheduling lease recovery for server %s\n", __func__,
+ clp->cl_hostname);
nfs4_schedule_state_manager(clp);
}
EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery);
@@ -1122,6 +1126,8 @@ static void nfs40_handle_cb_pathdown(struct nfs_client *clp)
{
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
nfs_expire_all_delegations(clp);
+ dprintk("%s: handling CB_PATHDOWN recovery for server %s\n", __func__,
+ clp->cl_hostname);
}
void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
@@ -1158,6 +1164,8 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4
struct nfs_client *clp = server->nfs_client;
nfs4_state_mark_reclaim_nograce(clp, state);
+ dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
+ clp->cl_hostname);
nfs4_schedule_state_manager(clp);
}
EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
@@ -1491,19 +1499,25 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_DEADSESSION:
- case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_SEQ_FALSE_RETRY:
case -NFS4ERR_SEQ_MISORDERED:
set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
/* Zero session reset errors */
break;
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+ break;
case -EKEYEXPIRED:
/* Nothing we can do */
nfs4_warn_keyexpired(clp->cl_hostname);
break;
default:
+ dprintk("%s: failed to handle error %d for server %s\n",
+ __func__, error, clp->cl_hostname);
return error;
}
+ dprintk("%s: handled error %d for server %s\n", __func__, error,
+ clp->cl_hostname);
return 0;
}
@@ -1572,34 +1586,82 @@ out:
return nfs4_recovery_handle_error(clp, status);
}
+/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
+ * on EXCHANGE_ID for v4.1
+ */
+static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
+{
+ switch (status) {
+ case -NFS4ERR_SEQ_MISORDERED:
+ if (test_and_set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state))
+ return -ESERVERFAULT;
+ /* Lease confirmation error: retry after purging the lease */
+ ssleep(1);
+ case -NFS4ERR_CLID_INUSE:
+ case -NFS4ERR_STALE_CLIENTID:
+ clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+ break;
+ case -EACCES:
+ if (clp->cl_machine_cred == NULL)
+ return -EACCES;
+ /* Handle case where the user hasn't set up machine creds */
+ nfs4_clear_machine_cred(clp);
+ case -NFS4ERR_DELAY:
+ case -ETIMEDOUT:
+ case -EAGAIN:
+ ssleep(1);
+ break;
+
+ case -NFS4ERR_MINOR_VERS_MISMATCH:
+ if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
+ nfs_mark_client_ready(clp, -EPROTONOSUPPORT);
+ dprintk("%s: exit with error %d for server %s\n",
+ __func__, -EPROTONOSUPPORT, clp->cl_hostname);
+ return -EPROTONOSUPPORT;
+ case -EKEYEXPIRED:
+ nfs4_warn_keyexpired(clp->cl_hostname);
+ case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
+ * in nfs4_exchange_id */
+ default:
+ dprintk("%s: exit with error %d for server %s\n", __func__,
+ status, clp->cl_hostname);
+ return status;
+ }
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ dprintk("%s: handled error %d for server %s\n", __func__, status,
+ clp->cl_hostname);
+ return 0;
+}
+
static int nfs4_reclaim_lease(struct nfs_client *clp)
{
struct rpc_cred *cred;
const struct nfs4_state_recovery_ops *ops =
clp->cl_mvops->reboot_recovery_ops;
- int status = -ENOENT;
+ int status;
cred = ops->get_clid_cred(clp);
- if (cred != NULL) {
- status = ops->establish_clid(clp, cred);
- put_rpccred(cred);
- /* Handle case where the user hasn't set up machine creds */
- if (status == -EACCES && cred == clp->cl_machine_cred) {
- nfs4_clear_machine_cred(clp);
- status = -EAGAIN;
- }
- if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
- status = -EPROTONOSUPPORT;
- }
- return status;
+ if (cred == NULL)
+ return -ENOENT;
+ status = ops->establish_clid(clp, cred);
+ put_rpccred(cred);
+ if (status != 0)
+ return nfs4_handle_reclaim_lease_error(clp, status);
+ return 0;
}
#ifdef CONFIG_NFS_V4_1
-void nfs4_schedule_session_recovery(struct nfs4_session *session)
+void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
{
struct nfs_client *clp = session->clp;
- set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ switch (err) {
+ default:
+ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ break;
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+ }
nfs4_schedule_lease_recovery(clp);
}
EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
@@ -1607,14 +1669,19 @@ EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
void nfs41_handle_recall_slot(struct nfs_client *clp)
{
set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
+ dprintk("%s: scheduling slot recall for server %s\n", __func__,
+ clp->cl_hostname);
nfs4_schedule_state_manager(clp);
}
static void nfs4_reset_all_state(struct nfs_client *clp)
{
if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
- clp->cl_boot_time = CURRENT_TIME;
+ set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
+ clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
nfs4_state_start_reclaim_nograce(clp);
+ dprintk("%s: scheduling reset of all state for server %s!\n",
+ __func__, clp->cl_hostname);
nfs4_schedule_state_manager(clp);
}
}
@@ -1623,33 +1690,50 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp)
{
if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
nfs4_state_start_reclaim_reboot(clp);
+ dprintk("%s: server %s rebooted!\n", __func__,
+ clp->cl_hostname);
nfs4_schedule_state_manager(clp);
}
}
static void nfs41_handle_state_revoked(struct nfs_client *clp)
{
- /* Temporary */
nfs4_reset_all_state(clp);
+ dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
}
static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
{
/* This will need to handle layouts too */
nfs_expire_all_delegations(clp);
+ dprintk("%s: Recallable state revoked on server %s!\n", __func__,
+ clp->cl_hostname);
}
-static void nfs41_handle_cb_path_down(struct nfs_client *clp)
+static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
{
nfs_expire_all_delegations(clp);
if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
nfs4_schedule_state_manager(clp);
+ dprintk("%s: server %s declared a backchannel fault\n", __func__,
+ clp->cl_hostname);
+}
+
+static void nfs41_handle_cb_path_down(struct nfs_client *clp)
+{
+ if (test_and_set_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
+ &clp->cl_state) == 0)
+ nfs4_schedule_state_manager(clp);
}
void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
{
if (!flags)
return;
+
+ dprintk("%s: \"%s\" (client ID %llx) flags=0x%08x\n",
+ __func__, clp->cl_hostname, clp->cl_clientid, flags);
+
if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
nfs41_handle_server_reboot(clp);
if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
@@ -1659,18 +1743,21 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
nfs41_handle_state_revoked(clp);
if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
nfs41_handle_recallable_state_revoked(clp);
- if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
- SEQ4_STATUS_BACKCHANNEL_FAULT |
- SEQ4_STATUS_CB_PATH_DOWN_SESSION))
+ if (flags & SEQ4_STATUS_BACKCHANNEL_FAULT)
+ nfs41_handle_backchannel_fault(clp);
+ else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+ SEQ4_STATUS_CB_PATH_DOWN_SESSION))
nfs41_handle_cb_path_down(clp);
}
static int nfs4_reset_session(struct nfs_client *clp)
{
+ struct rpc_cred *cred;
int status;
nfs4_begin_drain_session(clp);
- status = nfs4_proc_destroy_session(clp->cl_session);
+ cred = nfs4_get_exchange_id_cred(clp);
+ status = nfs4_proc_destroy_session(clp->cl_session, cred);
if (status && status != -NFS4ERR_BADSESSION &&
status != -NFS4ERR_DEADSESSION) {
status = nfs4_recovery_handle_error(clp, status);
@@ -1678,19 +1765,26 @@ static int nfs4_reset_session(struct nfs_client *clp)
}
memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
- status = nfs4_proc_create_session(clp);
+ status = nfs4_proc_create_session(clp, cred);
if (status) {
- status = nfs4_recovery_handle_error(clp, status);
+ dprintk("%s: session reset failed with status %d for server %s!\n",
+ __func__, status, clp->cl_hostname);
+ status = nfs4_handle_reclaim_lease_error(clp, status);
goto out;
}
clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
/* create_session negotiated new slot table */
clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
+ clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+ dprintk("%s: session reset was successful for server %s!\n",
+ __func__, clp->cl_hostname);
/* Let the state manager reestablish state */
if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
nfs41_setup_state_renewal(clp);
out:
+ if (cred)
+ put_rpccred(cred);
return status;
}
@@ -1722,37 +1816,41 @@ static int nfs4_recall_slot(struct nfs_client *clp)
return 0;
}
-#else /* CONFIG_NFS_V4_1 */
-static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
-static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
-static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
-#endif /* CONFIG_NFS_V4_1 */
-
-/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
- * on EXCHANGE_ID for v4.1
- */
-static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
+static int nfs4_bind_conn_to_session(struct nfs_client *clp)
{
- switch (status) {
- case -NFS4ERR_CLID_INUSE:
- case -NFS4ERR_STALE_CLIENTID:
- clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+ struct rpc_cred *cred;
+ int ret;
+
+ nfs4_begin_drain_session(clp);
+ cred = nfs4_get_exchange_id_cred(clp);
+ ret = nfs4_proc_bind_conn_to_session(clp, cred);
+ if (cred)
+ put_rpccred(cred);
+ clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+ switch (ret) {
+ case 0:
+ dprintk("%s: bind_conn_to_session was successful for server %s!\n",
+ __func__, clp->cl_hostname);
break;
case -NFS4ERR_DELAY:
- case -ETIMEDOUT:
- case -EAGAIN:
ssleep(1);
+ set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
break;
-
- case -EKEYEXPIRED:
- nfs4_warn_keyexpired(clp->cl_hostname);
- case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
- * in nfs4_exchange_id */
default:
- return;
+ return nfs4_recovery_handle_error(clp, ret);
}
- set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ return 0;
}
+#else /* CONFIG_NFS_V4_1 */
+static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
+static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
+static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
+
+static int nfs4_bind_conn_to_session(struct nfs_client *clp)
+{
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
static void nfs4_state_manager(struct nfs_client *clp)
{
@@ -1760,19 +1858,21 @@ static void nfs4_state_manager(struct nfs_client *clp)
/* Ensure exclusive access to NFSv4 state */
do {
+ if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
+ status = nfs4_reclaim_lease(clp);
+ if (status < 0)
+ goto out_error;
+ clear_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ }
+
if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
/* We're going to have to re-establish a clientid */
status = nfs4_reclaim_lease(clp);
- if (status) {
- nfs4_set_lease_expired(clp, status);
- if (test_bit(NFS4CLNT_LEASE_EXPIRED,
- &clp->cl_state))
- continue;
- if (clp->cl_cons_state ==
- NFS_CS_SESSION_INITING)
- nfs_mark_client_ready(clp, status);
+ if (status < 0)
goto out_error;
- }
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
+ continue;
clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH,
@@ -1803,6 +1903,15 @@ static void nfs4_state_manager(struct nfs_client *clp)
goto out_error;
}
+ /* Send BIND_CONN_TO_SESSION */
+ if (test_and_clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
+ &clp->cl_state) && nfs4_has_session(clp)) {
+ status = nfs4_bind_conn_to_session(clp);
+ if (status < 0)
+ goto out_error;
+ continue;
+ }
+
/* First recover reboot state... */
if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
status = nfs4_do_reclaim(clp,
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index c54aae364bee..ee4a74db95d0 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -53,9 +53,11 @@
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_idmap.h>
+
#include "nfs4_fs.h"
#include "internal.h"
#include "pnfs.h"
+#include "netns.h"
#define NFSDBG_FACILITY NFSDBG_XDR
@@ -99,9 +101,12 @@ static int nfs4_stat_to_errno(int);
#define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2))
#define nfs4_owner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
#define nfs4_group_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
+/* We support only one layout type per file system */
+#define decode_mdsthreshold_maxsz (1 + 1 + nfs4_fattr_bitmap_maxsz + 1 + 8)
/* This is based on getfattr, which uses the most attributes: */
#define nfs4_fattr_value_maxsz (1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \
- 3 + 3 + 3 + nfs4_owner_maxsz + nfs4_group_maxsz))
+ 3 + 3 + 3 + nfs4_owner_maxsz + \
+ nfs4_group_maxsz + decode_mdsthreshold_maxsz))
#define nfs4_fattr_maxsz (nfs4_fattr_bitmap_maxsz + \
nfs4_fattr_value_maxsz)
#define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz)
@@ -321,8 +326,20 @@ static int nfs4_stat_to_errno(int);
1 /* csr_flags */ + \
decode_channel_attrs_maxsz + \
decode_channel_attrs_maxsz)
+#define encode_bind_conn_to_session_maxsz (op_encode_hdr_maxsz + \
+ /* bctsa_sessid */ \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
+ 1 /* bctsa_dir */ + \
+ 1 /* bctsa_use_conn_in_rdma_mode */)
+#define decode_bind_conn_to_session_maxsz (op_decode_hdr_maxsz + \
+ /* bctsr_sessid */ \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
+ 1 /* bctsr_dir */ + \
+ 1 /* bctsr_use_conn_in_rdma_mode */)
#define encode_destroy_session_maxsz (op_encode_hdr_maxsz + 4)
#define decode_destroy_session_maxsz (op_decode_hdr_maxsz)
+#define encode_destroy_clientid_maxsz (op_encode_hdr_maxsz + 2)
+#define decode_destroy_clientid_maxsz (op_decode_hdr_maxsz)
#define encode_sequence_maxsz (op_encode_hdr_maxsz + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4)
#define decode_sequence_maxsz (op_decode_hdr_maxsz + \
@@ -421,30 +438,22 @@ static int nfs4_stat_to_errno(int);
#define NFS4_enc_commit_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
- encode_commit_maxsz + \
- encode_getattr_maxsz)
+ encode_commit_maxsz)
#define NFS4_dec_commit_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
- decode_commit_maxsz + \
- decode_getattr_maxsz)
+ decode_commit_maxsz)
#define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
- encode_savefh_maxsz + \
encode_open_maxsz + \
encode_getfh_maxsz + \
- encode_getattr_maxsz + \
- encode_restorefh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_open_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
- decode_savefh_maxsz + \
decode_open_maxsz + \
decode_getfh_maxsz + \
- decode_getattr_maxsz + \
- decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_open_confirm_sz \
(compound_encode_hdr_maxsz + \
@@ -595,47 +604,37 @@ static int nfs4_stat_to_errno(int);
#define NFS4_enc_remove_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
- encode_remove_maxsz + \
- encode_getattr_maxsz)
+ encode_remove_maxsz)
#define NFS4_dec_remove_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
- decode_remove_maxsz + \
- decode_getattr_maxsz)
+ decode_remove_maxsz)
#define NFS4_enc_rename_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_putfh_maxsz + \
- encode_rename_maxsz + \
- encode_getattr_maxsz + \
- encode_restorefh_maxsz + \
- encode_getattr_maxsz)
+ encode_rename_maxsz)
#define NFS4_dec_rename_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_putfh_maxsz + \
- decode_rename_maxsz + \
- decode_getattr_maxsz + \
- decode_restorefh_maxsz + \
- decode_getattr_maxsz)
+ decode_rename_maxsz)
#define NFS4_enc_link_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_putfh_maxsz + \
encode_link_maxsz + \
- decode_getattr_maxsz + \
encode_restorefh_maxsz + \
- decode_getattr_maxsz)
+ encode_getattr_maxsz)
#define NFS4_dec_link_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_putfh_maxsz + \
decode_link_maxsz + \
- decode_getattr_maxsz + \
decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_symlink_sz (compound_encode_hdr_maxsz + \
@@ -653,20 +652,14 @@ static int nfs4_stat_to_errno(int);
#define NFS4_enc_create_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
- encode_savefh_maxsz + \
encode_create_maxsz + \
encode_getfh_maxsz + \
- encode_getattr_maxsz + \
- encode_restorefh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_create_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
- decode_savefh_maxsz + \
decode_create_maxsz + \
decode_getfh_maxsz + \
- decode_getattr_maxsz + \
- decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_pathconf_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
@@ -738,6 +731,12 @@ static int nfs4_stat_to_errno(int);
decode_putfh_maxsz + \
decode_secinfo_maxsz)
#if defined(CONFIG_NFS_V4_1)
+#define NFS4_enc_bind_conn_to_session_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_bind_conn_to_session_maxsz)
+#define NFS4_dec_bind_conn_to_session_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_bind_conn_to_session_maxsz)
#define NFS4_enc_exchange_id_sz \
(compound_encode_hdr_maxsz + \
encode_exchange_id_maxsz)
@@ -754,6 +753,10 @@ static int nfs4_stat_to_errno(int);
encode_destroy_session_maxsz)
#define NFS4_dec_destroy_session_sz (compound_decode_hdr_maxsz + \
decode_destroy_session_maxsz)
+#define NFS4_enc_destroy_clientid_sz (compound_encode_hdr_maxsz + \
+ encode_destroy_clientid_maxsz)
+#define NFS4_dec_destroy_clientid_sz (compound_decode_hdr_maxsz + \
+ decode_destroy_clientid_maxsz)
#define NFS4_enc_sequence_sz \
(compound_decode_hdr_maxsz + \
encode_sequence_maxsz)
@@ -1103,7 +1106,7 @@ static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg
encode_nfs4_stateid(xdr, arg->stateid);
}
-static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr)
+static void encode_commit(struct xdr_stream *xdr, const struct nfs_commitargs *args, struct compound_hdr *hdr)
{
__be32 *p;
@@ -1194,6 +1197,16 @@ static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct c
bitmask[1] & nfs4_fattr_bitmap[1], hdr);
}
+static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask,
+ struct compound_hdr *hdr)
+{
+ encode_getattr_three(xdr,
+ bitmask[0] & nfs4_fattr_bitmap[0],
+ bitmask[1] & nfs4_fattr_bitmap[1],
+ bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD,
+ hdr);
+}
+
static void encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
{
encode_getattr_three(xdr,
@@ -1678,6 +1691,20 @@ static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, stru
#if defined(CONFIG_NFS_V4_1)
/* NFSv4.1 operations */
+static void encode_bind_conn_to_session(struct xdr_stream *xdr,
+ struct nfs4_session *session,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION,
+ decode_bind_conn_to_session_maxsz, hdr);
+ encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+ p = xdr_reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(NFS4_CDFC4_BACK_OR_BOTH);
+ *p = 0; /* use_conn_in_rdma_mode = False */
+}
+
static void encode_exchange_id(struct xdr_stream *xdr,
struct nfs41_exchange_id_args *args,
struct compound_hdr *hdr)
@@ -1726,6 +1753,7 @@ static void encode_create_session(struct xdr_stream *xdr,
char machine_name[NFS4_MAX_MACHINE_NAME_LEN];
uint32_t len;
struct nfs_client *clp = args->client;
+ struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
u32 max_resp_sz_cached;
/*
@@ -1767,7 +1795,7 @@ static void encode_create_session(struct xdr_stream *xdr,
*p++ = cpu_to_be32(RPC_AUTH_UNIX); /* auth_sys */
/* authsys_parms rfc1831 */
- *p++ = cpu_to_be32((u32)clp->cl_boot_time.tv_nsec); /* stamp */
+ *p++ = (__be32)nn->boot_time.tv_nsec; /* stamp */
p = xdr_encode_opaque(p, machine_name, len);
*p++ = cpu_to_be32(0); /* UID */
*p++ = cpu_to_be32(0); /* GID */
@@ -1782,6 +1810,14 @@ static void encode_destroy_session(struct xdr_stream *xdr,
encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
}
+static void encode_destroy_clientid(struct xdr_stream *xdr,
+ uint64_t clientid,
+ struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_DESTROY_CLIENTID, decode_destroy_clientid_maxsz, hdr);
+ encode_uint64(xdr, clientid);
+}
+
static void encode_reclaim_complete(struct xdr_stream *xdr,
struct nfs41_reclaim_complete_args *args,
struct compound_hdr *hdr)
@@ -2064,7 +2100,6 @@ static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_remove(xdr, &args->name, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
@@ -2084,9 +2119,6 @@ static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_savefh(xdr, &hdr);
encode_putfh(xdr, args->new_dir, &hdr);
encode_rename(xdr, args->old_name, args->new_name, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
- encode_restorefh(xdr, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
@@ -2106,7 +2138,6 @@ static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_savefh(xdr, &hdr);
encode_putfh(xdr, args->dir_fh, &hdr);
encode_link(xdr, args->name, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
encode_restorefh(xdr, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
@@ -2125,12 +2156,9 @@ static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->dir_fh, &hdr);
- encode_savefh(xdr, &hdr);
encode_create(xdr, args, &hdr);
encode_getfh(xdr, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
- encode_restorefh(xdr, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
@@ -2191,12 +2219,9 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
- encode_savefh(xdr, &hdr);
encode_open(xdr, args, &hdr);
encode_getfh(xdr, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
- encode_restorefh(xdr, &hdr);
- encode_getfattr(xdr, args->dir_bitmask, &hdr);
+ encode_getfattr_open(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
@@ -2448,7 +2473,7 @@ static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
* a COMMIT request
*/
static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_writeargs *args)
+ struct nfs_commitargs *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
@@ -2458,8 +2483,6 @@ static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_commit(xdr, args, &hdr);
- if (args->bitmask)
- encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
@@ -2602,8 +2625,8 @@ static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fhandle, &hdr);
- encode_delegreturn(xdr, args->stateid, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_delegreturn(xdr, args->stateid, &hdr);
encode_nops(&hdr);
}
@@ -2651,6 +2674,22 @@ static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req,
#if defined(CONFIG_NFS_V4_1)
/*
+ * BIND_CONN_TO_SESSION request
+ */
+static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_client *clp)
+{
+ struct compound_hdr hdr = {
+ .minorversion = clp->cl_mvops->minor_version,
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_bind_conn_to_session(xdr, clp->cl_session, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
* EXCHANGE_ID request
*/
static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
@@ -2699,6 +2738,22 @@ static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req,
}
/*
+ * a DESTROY_CLIENTID request
+ */
+static void nfs4_xdr_enc_destroy_clientid(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_client *clp)
+{
+ struct compound_hdr hdr = {
+ .minorversion = clp->cl_mvops->minor_version,
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_destroy_clientid(xdr, clp->cl_clientid, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
* a SEQUENCE request
*/
static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
@@ -4102,7 +4157,7 @@ static int decode_verifier(struct xdr_stream *xdr, void *verifier)
return decode_opaque_fixed(xdr, verifier, NFS4_VERIFIER_SIZE);
}
-static int decode_commit(struct xdr_stream *xdr, struct nfs_writeres *res)
+static int decode_commit(struct xdr_stream *xdr, struct nfs_commitres *res)
{
int status;
@@ -4220,6 +4275,110 @@ xdr_error:
return status;
}
+static int decode_threshold_hint(struct xdr_stream *xdr,
+ uint32_t *bitmap,
+ uint64_t *res,
+ uint32_t hint_bit)
+{
+ __be32 *p;
+
+ *res = 0;
+ if (likely(bitmap[0] & hint_bit)) {
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_first_threshold_item4(struct xdr_stream *xdr,
+ struct nfs4_threshold *res)
+{
+ __be32 *p, *savep;
+ uint32_t bitmap[3] = {0,}, attrlen;
+ int status;
+
+ /* layout type */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p)) {
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+ }
+ res->l_type = be32_to_cpup(p);
+
+ /* thi_hintset bitmap */
+ status = decode_attr_bitmap(xdr, bitmap);
+ if (status < 0)
+ goto xdr_error;
+
+ /* thi_hintlist length */
+ status = decode_attr_length(xdr, &attrlen, &savep);
+ if (status < 0)
+ goto xdr_error;
+ /* thi_hintlist */
+ status = decode_threshold_hint(xdr, bitmap, &res->rd_sz, THRESHOLD_RD);
+ if (status < 0)
+ goto xdr_error;
+ status = decode_threshold_hint(xdr, bitmap, &res->wr_sz, THRESHOLD_WR);
+ if (status < 0)
+ goto xdr_error;
+ status = decode_threshold_hint(xdr, bitmap, &res->rd_io_sz,
+ THRESHOLD_RD_IO);
+ if (status < 0)
+ goto xdr_error;
+ status = decode_threshold_hint(xdr, bitmap, &res->wr_io_sz,
+ THRESHOLD_WR_IO);
+ if (status < 0)
+ goto xdr_error;
+
+ status = verify_attr_len(xdr, savep, attrlen);
+ res->bm = bitmap[0];
+
+ dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
+ __func__, res->bm, res->rd_sz, res->wr_sz, res->rd_io_sz,
+ res->wr_io_sz);
+xdr_error:
+ dprintk("%s ret=%d!\n", __func__, status);
+ return status;
+}
+
+/*
+ * Thresholds on pNFS direct I/O vrs MDS I/O
+ */
+static int decode_attr_mdsthreshold(struct xdr_stream *xdr,
+ uint32_t *bitmap,
+ struct nfs4_threshold *res)
+{
+ __be32 *p;
+ int status = 0;
+ uint32_t num;
+
+ if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U)))
+ return -EIO;
+ if (likely(bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ num = be32_to_cpup(p);
+ if (num == 0)
+ return 0;
+ if (num > 1)
+ printk(KERN_INFO "%s: Warning: Multiple pNFS layout "
+ "drivers per filesystem not supported\n",
+ __func__);
+
+ status = decode_first_threshold_item4(xdr, res);
+ }
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
struct nfs_fattr *fattr, struct nfs_fh *fh,
struct nfs4_fs_locations *fs_loc,
@@ -4326,6 +4485,10 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
goto xdr_error;
fattr->valid |= status;
+ status = decode_attr_mdsthreshold(xdr, bitmap, fattr->mdsthreshold);
+ if (status < 0)
+ goto xdr_error;
+
xdr_error:
dprintk("%s: xdr returned %d\n", __func__, -status);
return status;
@@ -5156,7 +5319,6 @@ static int decode_exchange_id(struct xdr_stream *xdr,
uint32_t dummy;
char *dummy_str;
int status;
- struct nfs_client *clp = res->client;
uint32_t impl_id_count;
status = decode_op_hdr(xdr, OP_EXCHANGE_ID);
@@ -5166,36 +5328,39 @@ static int decode_exchange_id(struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
- xdr_decode_hyper(p, &clp->cl_clientid);
+ xdr_decode_hyper(p, &res->clientid);
p = xdr_inline_decode(xdr, 12);
if (unlikely(!p))
goto out_overflow;
- clp->cl_seqid = be32_to_cpup(p++);
- clp->cl_exchange_flags = be32_to_cpup(p++);
+ res->seqid = be32_to_cpup(p++);
+ res->flags = be32_to_cpup(p++);
/* We ask for SP4_NONE */
dummy = be32_to_cpup(p);
if (dummy != SP4_NONE)
return -EIO;
- /* Throw away minor_id */
+ /* server_owner4.so_minor_id */
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
+ p = xdr_decode_hyper(p, &res->server_owner->minor_id);
- /* Throw away Major id */
+ /* server_owner4.so_major_id */
status = decode_opaque_inline(xdr, &dummy, &dummy_str);
if (unlikely(status))
return status;
+ if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
+ return -EIO;
+ memcpy(res->server_owner->major_id, dummy_str, dummy);
+ res->server_owner->major_id_sz = dummy;
- /* Save server_scope */
+ /* server_scope4 */
status = decode_opaque_inline(xdr, &dummy, &dummy_str);
if (unlikely(status))
return status;
-
if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
return -EIO;
-
memcpy(res->server_scope->server_scope, dummy_str, dummy);
res->server_scope->server_scope_sz = dummy;
@@ -5276,6 +5441,37 @@ static int decode_sessionid(struct xdr_stream *xdr, struct nfs4_sessionid *sid)
return decode_opaque_fixed(xdr, sid->data, NFS4_MAX_SESSIONID_LEN);
}
+static int decode_bind_conn_to_session(struct xdr_stream *xdr,
+ struct nfs41_bind_conn_to_session_res *res)
+{
+ __be32 *p;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION);
+ if (!status)
+ status = decode_sessionid(xdr, &res->session->sess_id);
+ if (unlikely(status))
+ return status;
+
+ /* dir flags, rdma mode bool */
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+
+ res->dir = be32_to_cpup(p++);
+ if (res->dir == 0 || res->dir > NFS4_CDFS4_BOTH)
+ return -EIO;
+ if (be32_to_cpup(p) == 0)
+ res->use_conn_in_rdma_mode = false;
+ else
+ res->use_conn_in_rdma_mode = true;
+
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
static int decode_create_session(struct xdr_stream *xdr,
struct nfs41_create_session_res *res)
{
@@ -5312,6 +5508,11 @@ static int decode_destroy_session(struct xdr_stream *xdr, void *dummy)
return decode_op_hdr(xdr, OP_DESTROY_SESSION);
}
+static int decode_destroy_clientid(struct xdr_stream *xdr, void *dummy)
+{
+ return decode_op_hdr(xdr, OP_DESTROY_CLIENTID);
+}
+
static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy)
{
return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE);
@@ -5800,9 +6001,6 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
if (status)
goto out;
status = decode_remove(xdr, &res->cinfo);
- if (status)
- goto out;
- decode_getfattr(xdr, res->dir_attr, res->server);
out:
return status;
}
@@ -5832,15 +6030,6 @@ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
if (status)
goto out;
status = decode_rename(xdr, &res->old_cinfo, &res->new_cinfo);
- if (status)
- goto out;
- /* Current FH is target directory */
- if (decode_getfattr(xdr, res->new_fattr, res->server))
- goto out;
- status = decode_restorefh(xdr);
- if (status)
- goto out;
- decode_getfattr(xdr, res->old_fattr, res->server);
out:
return status;
}
@@ -5876,8 +6065,6 @@ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
* Note order: OP_LINK leaves the directory as the current
* filehandle.
*/
- if (decode_getfattr(xdr, res->dir_attr, res->server))
- goto out;
status = decode_restorefh(xdr);
if (status)
goto out;
@@ -5904,21 +6091,13 @@ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_savefh(xdr);
- if (status)
- goto out;
status = decode_create(xdr, &res->dir_cinfo);
if (status)
goto out;
status = decode_getfh(xdr, res->fh);
if (status)
goto out;
- if (decode_getfattr(xdr, res->fattr, res->server))
- goto out;
- status = decode_restorefh(xdr);
- if (status)
- goto out;
- decode_getfattr(xdr, res->dir_fattr, res->server);
+ decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -6075,19 +6254,12 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_savefh(xdr);
- if (status)
- goto out;
status = decode_open(xdr, res);
if (status)
goto out;
if (decode_getfh(xdr, &res->fh) != 0)
goto out;
- if (decode_getfattr(xdr, res->f_attr, res->server) != 0)
- goto out;
- if (decode_restorefh(xdr) != 0)
- goto out;
- decode_getfattr(xdr, res->dir_attr, res->server);
+ decode_getfattr(xdr, res->f_attr, res->server);
out:
return status;
}
@@ -6353,7 +6525,7 @@ out:
* Decode COMMIT response
*/
static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs_writeres *res)
+ struct nfs_commitres *res)
{
struct compound_hdr hdr;
int status;
@@ -6368,10 +6540,6 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
if (status)
goto out;
status = decode_commit(xdr, res);
- if (status)
- goto out;
- if (res->fattr)
- decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -6527,10 +6695,10 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
status = decode_putfh(xdr);
if (status != 0)
goto out;
- status = decode_delegreturn(xdr);
+ status = decode_getfattr(xdr, res->fattr, res->server);
if (status != 0)
goto out;
- decode_getfattr(xdr, res->fattr, res->server);
+ status = decode_delegreturn(xdr);
out:
return status;
}
@@ -6591,6 +6759,22 @@ out:
#if defined(CONFIG_NFS_V4_1)
/*
+ * Decode BIND_CONN_TO_SESSION response
+ */
+static int nfs4_xdr_dec_bind_conn_to_session(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_bind_conn_to_session(xdr, res);
+ return status;
+}
+
+/*
* Decode EXCHANGE_ID response
*/
static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp,
@@ -6639,6 +6823,22 @@ static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp,
}
/*
+ * Decode DESTROY_CLIENTID response
+ */
+static int nfs4_xdr_dec_destroy_clientid(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_destroy_clientid(xdr, res);
+ return status;
+}
+
+/*
* Decode SEQUENCE response
*/
static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
@@ -7085,6 +7285,9 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
PROC(GETDEVICELIST, enc_getdevicelist, dec_getdevicelist),
+ PROC(BIND_CONN_TO_SESSION,
+ enc_bind_conn_to_session, dec_bind_conn_to_session),
+ PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid),
#endif /* CONFIG_NFS_V4_1 */
};
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 4bff4a3dab46..b47277baebab 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -211,7 +211,7 @@ static void copy_single_comp(struct ore_components *oc, unsigned c,
memcpy(ocomp->cred, src_comp->oc_cap.cred, sizeof(ocomp->cred));
}
-int __alloc_objio_seg(unsigned numdevs, gfp_t gfp_flags,
+static int __alloc_objio_seg(unsigned numdevs, gfp_t gfp_flags,
struct objio_segment **pseg)
{
/* This is the in memory structure of the objio_segment
@@ -440,11 +440,12 @@ static void _read_done(struct ore_io_state *ios, void *private)
int objio_read_pagelist(struct nfs_read_data *rdata)
{
+ struct nfs_pgio_header *hdr = rdata->header;
struct objio_state *objios;
int ret;
- ret = objio_alloc_io_state(NFS_I(rdata->inode)->layout, true,
- rdata->lseg, rdata->args.pages, rdata->args.pgbase,
+ ret = objio_alloc_io_state(NFS_I(hdr->inode)->layout, true,
+ hdr->lseg, rdata->args.pages, rdata->args.pgbase,
rdata->args.offset, rdata->args.count, rdata,
GFP_KERNEL, &objios);
if (unlikely(ret))
@@ -483,12 +484,12 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
{
struct objio_state *objios = priv;
struct nfs_write_data *wdata = objios->oir.rpcdata;
+ struct address_space *mapping = wdata->header->inode->i_mapping;
pgoff_t index = offset / PAGE_SIZE;
- struct page *page = find_get_page(wdata->inode->i_mapping, index);
+ struct page *page = find_get_page(mapping, index);
if (!page) {
- page = find_or_create_page(wdata->inode->i_mapping,
- index, GFP_NOFS);
+ page = find_or_create_page(mapping, index, GFP_NOFS);
if (unlikely(!page)) {
dprintk("%s: grab_cache_page Failed index=0x%lx\n",
__func__, index);
@@ -518,11 +519,12 @@ static const struct _ore_r4w_op _r4w_op = {
int objio_write_pagelist(struct nfs_write_data *wdata, int how)
{
+ struct nfs_pgio_header *hdr = wdata->header;
struct objio_state *objios;
int ret;
- ret = objio_alloc_io_state(NFS_I(wdata->inode)->layout, false,
- wdata->lseg, wdata->args.pages, wdata->args.pgbase,
+ ret = objio_alloc_io_state(NFS_I(hdr->inode)->layout, false,
+ hdr->lseg, wdata->args.pages, wdata->args.pgbase,
wdata->args.offset, wdata->args.count, wdata, GFP_NOFS,
&objios);
if (unlikely(ret))
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
index 595c5fc21a19..874613545301 100644
--- a/fs/nfs/objlayout/objlayout.c
+++ b/fs/nfs/objlayout/objlayout.c
@@ -258,7 +258,7 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
if (status >= 0)
rdata->res.count = status;
else
- rdata->pnfs_error = status;
+ rdata->header->pnfs_error = status;
objlayout_iodone(oir);
/* must not use oir after this point */
@@ -279,12 +279,14 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
enum pnfs_try_status
objlayout_read_pagelist(struct nfs_read_data *rdata)
{
+ struct nfs_pgio_header *hdr = rdata->header;
+ struct inode *inode = hdr->inode;
loff_t offset = rdata->args.offset;
size_t count = rdata->args.count;
int err;
loff_t eof;
- eof = i_size_read(rdata->inode);
+ eof = i_size_read(inode);
if (unlikely(offset + count > eof)) {
if (offset >= eof) {
err = 0;
@@ -297,17 +299,17 @@ objlayout_read_pagelist(struct nfs_read_data *rdata)
}
rdata->res.eof = (offset + count) >= eof;
- _fix_verify_io_params(rdata->lseg, &rdata->args.pages,
+ _fix_verify_io_params(hdr->lseg, &rdata->args.pages,
&rdata->args.pgbase,
rdata->args.offset, rdata->args.count);
dprintk("%s: inode(%lx) offset 0x%llx count 0x%Zx eof=%d\n",
- __func__, rdata->inode->i_ino, offset, count, rdata->res.eof);
+ __func__, inode->i_ino, offset, count, rdata->res.eof);
err = objio_read_pagelist(rdata);
out:
if (unlikely(err)) {
- rdata->pnfs_error = err;
+ hdr->pnfs_error = err;
dprintk("%s: Returned Error %d\n", __func__, err);
return PNFS_NOT_ATTEMPTED;
}
@@ -340,7 +342,7 @@ objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
wdata->res.count = status;
wdata->verf.committed = oir->committed;
} else {
- wdata->pnfs_error = status;
+ wdata->header->pnfs_error = status;
}
objlayout_iodone(oir);
/* must not use oir after this point */
@@ -363,15 +365,16 @@ enum pnfs_try_status
objlayout_write_pagelist(struct nfs_write_data *wdata,
int how)
{
+ struct nfs_pgio_header *hdr = wdata->header;
int err;
- _fix_verify_io_params(wdata->lseg, &wdata->args.pages,
+ _fix_verify_io_params(hdr->lseg, &wdata->args.pages,
&wdata->args.pgbase,
wdata->args.offset, wdata->args.count);
err = objio_write_pagelist(wdata, how);
if (unlikely(err)) {
- wdata->pnfs_error = err;
+ hdr->pnfs_error = err;
dprintk("%s: Returned Error %d\n", __func__, err);
return PNFS_NOT_ATTEMPTED;
}
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index d21fceaa9f62..aed913c833f4 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -26,6 +26,47 @@
static struct kmem_cache *nfs_page_cachep;
+bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
+{
+ p->npages = pagecount;
+ if (pagecount <= ARRAY_SIZE(p->page_array))
+ p->pagevec = p->page_array;
+ else {
+ p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
+ if (!p->pagevec)
+ p->npages = 0;
+ }
+ return p->pagevec != NULL;
+}
+
+void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr,
+ void (*release)(struct nfs_pgio_header *hdr))
+{
+ hdr->req = nfs_list_entry(desc->pg_list.next);
+ hdr->inode = desc->pg_inode;
+ hdr->cred = hdr->req->wb_context->cred;
+ hdr->io_start = req_offset(hdr->req);
+ hdr->good_bytes = desc->pg_count;
+ hdr->dreq = desc->pg_dreq;
+ hdr->release = release;
+ hdr->completion_ops = desc->pg_completion_ops;
+ if (hdr->completion_ops->init_hdr)
+ hdr->completion_ops->init_hdr(hdr);
+}
+
+void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
+{
+ spin_lock(&hdr->lock);
+ if (pos < hdr->io_start + hdr->good_bytes) {
+ set_bit(NFS_IOHDR_ERROR, &hdr->flags);
+ clear_bit(NFS_IOHDR_EOF, &hdr->flags);
+ hdr->good_bytes = pos - hdr->io_start;
+ hdr->error = error;
+ }
+ spin_unlock(&hdr->lock);
+}
+
static inline struct nfs_page *
nfs_page_alloc(void)
{
@@ -76,12 +117,8 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
* long write-back delay. This will be adjusted in
* update_nfs_request below if the region is not locked. */
req->wb_page = page;
- atomic_set(&req->wb_complete, 0);
req->wb_index = page->index;
page_cache_get(page);
- BUG_ON(PagePrivate(page));
- BUG_ON(!PageLocked(page));
- BUG_ON(page->mapping->host != inode);
req->wb_offset = offset;
req->wb_pgbase = offset;
req->wb_bytes = count;
@@ -104,6 +141,15 @@ void nfs_unlock_request(struct nfs_page *req)
clear_bit(PG_BUSY, &req->wb_flags);
smp_mb__after_clear_bit();
wake_up_bit(&req->wb_flags, PG_BUSY);
+}
+
+/**
+ * nfs_unlock_and_release_request - Unlock request and release the nfs_page
+ * @req:
+ */
+void nfs_unlock_and_release_request(struct nfs_page *req)
+{
+ nfs_unlock_request(req);
nfs_release_request(req);
}
@@ -203,6 +249,7 @@ EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
struct inode *inode,
const struct nfs_pageio_ops *pg_ops,
+ const struct nfs_pgio_completion_ops *compl_ops,
size_t bsize,
int io_flags)
{
@@ -215,9 +262,11 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
desc->pg_recoalesce = 0;
desc->pg_inode = inode;
desc->pg_ops = pg_ops;
+ desc->pg_completion_ops = compl_ops;
desc->pg_ioflags = io_flags;
desc->pg_error = 0;
desc->pg_lseg = NULL;
+ desc->pg_dreq = NULL;
}
/**
@@ -241,12 +290,12 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
return false;
if (req->wb_context->state != prev->wb_context->state)
return false;
- if (req->wb_index != (prev->wb_index + 1))
- return false;
if (req->wb_pgbase != 0)
return false;
if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
return false;
+ if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
+ return false;
return pgio->pg_ops->pg_test(pgio, prev, req);
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 38512bcd2e98..b8323aa7b543 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -395,6 +395,9 @@ mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
dprintk("%s:Begin lo %p\n", __func__, lo);
if (list_empty(&lo->plh_segs)) {
+ /* Reset MDS Threshold I/O counters */
+ NFS_I(lo->plh_inode)->write_io = 0;
+ NFS_I(lo->plh_inode)->read_io = 0;
if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
put_layout_hdr_locked(lo);
return 0;
@@ -455,6 +458,7 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
spin_unlock(&nfsi->vfs_inode.i_lock);
pnfs_free_lseg_list(&tmp_list);
}
+EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
/*
* Called by the state manger to remove all layouts established under an
@@ -692,6 +696,7 @@ out:
dprintk("<-- %s status: %d\n", __func__, status);
return status;
}
+EXPORT_SYMBOL_GPL(_pnfs_return_layout);
bool pnfs_roc(struct inode *ino)
{
@@ -931,6 +936,81 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo,
}
/*
+ * Use mdsthreshold hints set at each OPEN to determine if I/O should go
+ * to the MDS or over pNFS
+ *
+ * The nfs_inode read_io and write_io fields are cumulative counters reset
+ * when there are no layout segments. Note that in pnfs_update_layout iomode
+ * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
+ * WRITE request.
+ *
+ * A return of true means use MDS I/O.
+ *
+ * From rfc 5661:
+ * If a file's size is smaller than the file size threshold, data accesses
+ * SHOULD be sent to the metadata server. If an I/O request has a length that
+ * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
+ * server. If both file size and I/O size are provided, the client SHOULD
+ * reach or exceed both thresholds before sending its read or write
+ * requests to the data server.
+ */
+static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
+ struct inode *ino, int iomode)
+{
+ struct nfs4_threshold *t = ctx->mdsthreshold;
+ struct nfs_inode *nfsi = NFS_I(ino);
+ loff_t fsize = i_size_read(ino);
+ bool size = false, size_set = false, io = false, io_set = false, ret = false;
+
+ if (t == NULL)
+ return ret;
+
+ dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
+ __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
+
+ switch (iomode) {
+ case IOMODE_READ:
+ if (t->bm & THRESHOLD_RD) {
+ dprintk("%s fsize %llu\n", __func__, fsize);
+ size_set = true;
+ if (fsize < t->rd_sz)
+ size = true;
+ }
+ if (t->bm & THRESHOLD_RD_IO) {
+ dprintk("%s nfsi->read_io %llu\n", __func__,
+ nfsi->read_io);
+ io_set = true;
+ if (nfsi->read_io < t->rd_io_sz)
+ io = true;
+ }
+ break;
+ case IOMODE_RW:
+ if (t->bm & THRESHOLD_WR) {
+ dprintk("%s fsize %llu\n", __func__, fsize);
+ size_set = true;
+ if (fsize < t->wr_sz)
+ size = true;
+ }
+ if (t->bm & THRESHOLD_WR_IO) {
+ dprintk("%s nfsi->write_io %llu\n", __func__,
+ nfsi->write_io);
+ io_set = true;
+ if (nfsi->write_io < t->wr_io_sz)
+ io = true;
+ }
+ break;
+ }
+ if (size_set && io_set) {
+ if (size && io)
+ ret = true;
+ } else if (size || io)
+ ret = true;
+
+ dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
+ return ret;
+}
+
+/*
* Layout segment is retreived from the server if not cached.
* The appropriate layout segment is referenced and returned to the caller.
*/
@@ -957,6 +1037,10 @@ pnfs_update_layout(struct inode *ino,
if (!pnfs_enabled_sb(NFS_SERVER(ino)))
return NULL;
+
+ if (pnfs_within_mdsthreshold(ctx, ino, iomode))
+ return NULL;
+
spin_lock(&ino->i_lock);
lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
if (lo == NULL) {
@@ -1082,6 +1166,10 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r
{
BUG_ON(pgio->pg_lseg != NULL);
+ if (req->wb_offset != req->wb_pgbase) {
+ nfs_pageio_reset_read_mds(pgio);
+ return;
+ }
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
req_offset(req),
@@ -1100,6 +1188,10 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *
{
BUG_ON(pgio->pg_lseg != NULL);
+ if (req->wb_offset != req->wb_pgbase) {
+ nfs_pageio_reset_write_mds(pgio);
+ return;
+ }
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
req_offset(req),
@@ -1113,26 +1205,31 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
bool
-pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
+pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
struct nfs_server *server = NFS_SERVER(inode);
struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
if (ld == NULL)
return false;
- nfs_pageio_init(pgio, inode, ld->pg_read_ops, server->rsize, 0);
+ nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops,
+ server->rsize, 0);
return true;
}
bool
-pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
+pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode,
+ int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
struct nfs_server *server = NFS_SERVER(inode);
struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
if (ld == NULL)
return false;
- nfs_pageio_init(pgio, inode, ld->pg_write_ops, server->wsize, ioflags);
+ nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops,
+ server->wsize, ioflags);
return true;
}
@@ -1162,13 +1259,15 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
-static int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head)
+int pnfs_write_done_resend_to_mds(struct inode *inode,
+ struct list_head *head,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
struct nfs_pageio_descriptor pgio;
LIST_HEAD(failed);
/* Resend all requests through the MDS */
- nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE);
+ nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE, compl_ops);
while (!list_empty(head)) {
struct nfs_page *req = nfs_list_entry(head->next);
@@ -1188,30 +1287,37 @@ static int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *
}
return 0;
}
+EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
+
+static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
+{
+ struct nfs_pgio_header *hdr = data->header;
+
+ dprintk("pnfs write error = %d\n", hdr->pnfs_error);
+ if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
+ PNFS_LAYOUTRET_ON_ERROR) {
+ clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
+ pnfs_return_layout(hdr->inode);
+ }
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
+ data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
+ &hdr->pages,
+ hdr->completion_ops);
+}
/*
* Called by non rpc-based layout drivers
*/
void pnfs_ld_write_done(struct nfs_write_data *data)
{
- if (likely(!data->pnfs_error)) {
+ struct nfs_pgio_header *hdr = data->header;
+
+ if (!hdr->pnfs_error) {
pnfs_set_layoutcommit(data);
- data->mds_ops->rpc_call_done(&data->task, data);
- } else {
- dprintk("pnfs write error = %d\n", data->pnfs_error);
- if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
- PNFS_LAYOUTRET_ON_ERROR) {
- /* Don't lo_commit on error, Server will needs to
- * preform a file recovery.
- */
- clear_bit(NFS_INO_LAYOUTCOMMIT,
- &NFS_I(data->inode)->flags);
- pnfs_return_layout(data->inode);
- }
- data->task.tk_status = pnfs_write_done_resend_to_mds(data->inode, &data->pages);
- }
- put_lseg(data->lseg);
- data->mds_ops->rpc_release(data);
+ hdr->mds_ops->rpc_call_done(&data->task, data);
+ } else
+ pnfs_ld_handle_write_error(data);
+ hdr->mds_ops->rpc_release(data);
}
EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
@@ -1219,12 +1325,13 @@ static void
pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
struct nfs_write_data *data)
{
- list_splice_tail_init(&data->pages, &desc->pg_list);
- if (data->req && list_empty(&data->req->wb_list))
- nfs_list_add_request(data->req, &desc->pg_list);
- nfs_pageio_reset_write_mds(desc);
- desc->pg_recoalesce = 1;
- put_lseg(data->lseg);
+ struct nfs_pgio_header *hdr = data->header;
+
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ list_splice_tail_init(&hdr->pages, &desc->pg_list);
+ nfs_pageio_reset_write_mds(desc);
+ desc->pg_recoalesce = 1;
+ }
nfs_writedata_release(data);
}
@@ -1234,23 +1341,18 @@ pnfs_try_to_write_data(struct nfs_write_data *wdata,
struct pnfs_layout_segment *lseg,
int how)
{
- struct inode *inode = wdata->inode;
+ struct nfs_pgio_header *hdr = wdata->header;
+ struct inode *inode = hdr->inode;
enum pnfs_try_status trypnfs;
struct nfs_server *nfss = NFS_SERVER(inode);
- wdata->mds_ops = call_ops;
- wdata->lseg = get_lseg(lseg);
+ hdr->mds_ops = call_ops;
dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
inode->i_ino, wdata->args.count, wdata->args.offset, how);
-
trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
- if (trypnfs == PNFS_NOT_ATTEMPTED) {
- put_lseg(wdata->lseg);
- wdata->lseg = NULL;
- } else
+ if (trypnfs != PNFS_NOT_ATTEMPTED)
nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
-
dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
return trypnfs;
}
@@ -1266,7 +1368,7 @@ pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *he
while (!list_empty(head)) {
enum pnfs_try_status trypnfs;
- data = list_entry(head->next, struct nfs_write_data, list);
+ data = list_first_entry(head, struct nfs_write_data, list);
list_del_init(&data->list);
trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
@@ -1276,43 +1378,82 @@ pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *he
put_lseg(lseg);
}
+static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
+{
+ put_lseg(hdr->lseg);
+ nfs_writehdr_free(hdr);
+}
+
int
pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
{
- LIST_HEAD(head);
+ struct nfs_write_header *whdr;
+ struct nfs_pgio_header *hdr;
int ret;
- ret = nfs_generic_flush(desc, &head);
- if (ret != 0) {
+ whdr = nfs_writehdr_alloc();
+ if (!whdr) {
+ desc->pg_completion_ops->error_cleanup(&desc->pg_list);
put_lseg(desc->pg_lseg);
desc->pg_lseg = NULL;
- return ret;
+ return -ENOMEM;
}
- pnfs_do_multiple_writes(desc, &head, desc->pg_ioflags);
- return 0;
+ hdr = &whdr->header;
+ nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
+ hdr->lseg = get_lseg(desc->pg_lseg);
+ atomic_inc(&hdr->refcnt);
+ ret = nfs_generic_flush(desc, hdr);
+ if (ret != 0) {
+ put_lseg(desc->pg_lseg);
+ desc->pg_lseg = NULL;
+ } else
+ pnfs_do_multiple_writes(desc, &hdr->rpc_list, desc->pg_ioflags);
+ if (atomic_dec_and_test(&hdr->refcnt))
+ hdr->completion_ops->completion(hdr);
+ return ret;
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
-static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
+int pnfs_read_done_resend_to_mds(struct inode *inode,
+ struct list_head *head,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
struct nfs_pageio_descriptor pgio;
+ LIST_HEAD(failed);
- put_lseg(data->lseg);
- data->lseg = NULL;
- dprintk("pnfs write error = %d\n", data->pnfs_error);
- if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
- PNFS_LAYOUTRET_ON_ERROR)
- pnfs_return_layout(data->inode);
-
- nfs_pageio_init_read_mds(&pgio, data->inode);
-
- while (!list_empty(&data->pages)) {
- struct nfs_page *req = nfs_list_entry(data->pages.next);
+ /* Resend all requests through the MDS */
+ nfs_pageio_init_read_mds(&pgio, inode, compl_ops);
+ while (!list_empty(head)) {
+ struct nfs_page *req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- nfs_pageio_add_request(&pgio, req);
+ if (!nfs_pageio_add_request(&pgio, req))
+ nfs_list_add_request(req, &failed);
}
nfs_pageio_complete(&pgio);
+
+ if (!list_empty(&failed)) {
+ list_move(&failed, head);
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
+
+static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
+{
+ struct nfs_pgio_header *hdr = data->header;
+
+ dprintk("pnfs read error = %d\n", hdr->pnfs_error);
+ if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
+ PNFS_LAYOUTRET_ON_ERROR) {
+ clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
+ pnfs_return_layout(hdr->inode);
+ }
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
+ data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
+ &hdr->pages,
+ hdr->completion_ops);
}
/*
@@ -1320,13 +1461,14 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
*/
void pnfs_ld_read_done(struct nfs_read_data *data)
{
- if (likely(!data->pnfs_error)) {
+ struct nfs_pgio_header *hdr = data->header;
+
+ if (likely(!hdr->pnfs_error)) {
__nfs4_read_done_cb(data);
- data->mds_ops->rpc_call_done(&data->task, data);
+ hdr->mds_ops->rpc_call_done(&data->task, data);
} else
pnfs_ld_handle_read_error(data);
- put_lseg(data->lseg);
- data->mds_ops->rpc_release(data);
+ hdr->mds_ops->rpc_release(data);
}
EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
@@ -1334,11 +1476,13 @@ static void
pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
struct nfs_read_data *data)
{
- list_splice_tail_init(&data->pages, &desc->pg_list);
- if (data->req && list_empty(&data->req->wb_list))
- nfs_list_add_request(data->req, &desc->pg_list);
- nfs_pageio_reset_read_mds(desc);
- desc->pg_recoalesce = 1;
+ struct nfs_pgio_header *hdr = data->header;
+
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ list_splice_tail_init(&hdr->pages, &desc->pg_list);
+ nfs_pageio_reset_read_mds(desc);
+ desc->pg_recoalesce = 1;
+ }
nfs_readdata_release(data);
}
@@ -1350,23 +1494,19 @@ pnfs_try_to_read_data(struct nfs_read_data *rdata,
const struct rpc_call_ops *call_ops,
struct pnfs_layout_segment *lseg)
{
- struct inode *inode = rdata->inode;
+ struct nfs_pgio_header *hdr = rdata->header;
+ struct inode *inode = hdr->inode;
struct nfs_server *nfss = NFS_SERVER(inode);
enum pnfs_try_status trypnfs;
- rdata->mds_ops = call_ops;
- rdata->lseg = get_lseg(lseg);
+ hdr->mds_ops = call_ops;
dprintk("%s: Reading ino:%lu %u@%llu\n",
__func__, inode->i_ino, rdata->args.count, rdata->args.offset);
trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
- if (trypnfs == PNFS_NOT_ATTEMPTED) {
- put_lseg(rdata->lseg);
- rdata->lseg = NULL;
- } else {
+ if (trypnfs != PNFS_NOT_ATTEMPTED)
nfs_inc_stats(inode, NFSIOS_PNFS_READ);
- }
dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
return trypnfs;
}
@@ -1382,7 +1522,7 @@ pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *hea
while (!list_empty(head)) {
enum pnfs_try_status trypnfs;
- data = list_entry(head->next, struct nfs_read_data, list);
+ data = list_first_entry(head, struct nfs_read_data, list);
list_del_init(&data->list);
trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
@@ -1392,20 +1532,40 @@ pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *hea
put_lseg(lseg);
}
+static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
+{
+ put_lseg(hdr->lseg);
+ nfs_readhdr_free(hdr);
+}
+
int
pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
{
- LIST_HEAD(head);
+ struct nfs_read_header *rhdr;
+ struct nfs_pgio_header *hdr;
int ret;
- ret = nfs_generic_pagein(desc, &head);
- if (ret != 0) {
+ rhdr = nfs_readhdr_alloc();
+ if (!rhdr) {
+ desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+ ret = -ENOMEM;
put_lseg(desc->pg_lseg);
desc->pg_lseg = NULL;
return ret;
}
- pnfs_do_multiple_reads(desc, &head);
- return 0;
+ hdr = &rhdr->header;
+ nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
+ hdr->lseg = get_lseg(desc->pg_lseg);
+ atomic_inc(&hdr->refcnt);
+ ret = nfs_generic_pagein(desc, hdr);
+ if (ret != 0) {
+ put_lseg(desc->pg_lseg);
+ desc->pg_lseg = NULL;
+ } else
+ pnfs_do_multiple_reads(desc, &hdr->rpc_list);
+ if (atomic_dec_and_test(&hdr->refcnt))
+ hdr->completion_ops->completion(hdr);
+ return ret;
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
@@ -1438,30 +1598,32 @@ EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
void
pnfs_set_layoutcommit(struct nfs_write_data *wdata)
{
- struct nfs_inode *nfsi = NFS_I(wdata->inode);
+ struct nfs_pgio_header *hdr = wdata->header;
+ struct inode *inode = hdr->inode;
+ struct nfs_inode *nfsi = NFS_I(inode);
loff_t end_pos = wdata->mds_offset + wdata->res.count;
bool mark_as_dirty = false;
- spin_lock(&nfsi->vfs_inode.i_lock);
+ spin_lock(&inode->i_lock);
if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
mark_as_dirty = true;
dprintk("%s: Set layoutcommit for inode %lu ",
- __func__, wdata->inode->i_ino);
+ __func__, inode->i_ino);
}
- if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &wdata->lseg->pls_flags)) {
+ if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
/* references matched in nfs4_layoutcommit_release */
- get_lseg(wdata->lseg);
+ get_lseg(hdr->lseg);
}
if (end_pos > nfsi->layout->plh_lwb)
nfsi->layout->plh_lwb = end_pos;
- spin_unlock(&nfsi->vfs_inode.i_lock);
+ spin_unlock(&inode->i_lock);
dprintk("%s: lseg %p end_pos %llu\n",
- __func__, wdata->lseg, nfsi->layout->plh_lwb);
+ __func__, hdr->lseg, nfsi->layout->plh_lwb);
/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
* will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
if (mark_as_dirty)
- mark_inode_dirty_sync(wdata->inode);
+ mark_inode_dirty_sync(inode);
}
EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
@@ -1550,3 +1712,15 @@ out_free:
kfree(data);
goto out;
}
+
+struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
+{
+ struct nfs4_threshold *thp;
+
+ thp = kzalloc(sizeof(*thp), GFP_NOFS);
+ if (!thp) {
+ dprintk("%s mdsthreshold allocation failed\n", __func__);
+ return NULL;
+ }
+ return thp;
+}
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 442ebf68eeec..29fd23c0efdc 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -63,6 +63,7 @@ enum {
NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */
NFS_LAYOUT_ROC, /* some lseg had roc bit set */
NFS_LAYOUT_DESTROYED, /* no new use of layout allowed */
+ NFS_LAYOUT_INVALID, /* layout is being destroyed */
};
enum layoutdriver_policy_flags {
@@ -94,11 +95,20 @@ struct pnfs_layoutdriver_type {
const struct nfs_pageio_ops *pg_read_ops;
const struct nfs_pageio_ops *pg_write_ops;
+ struct pnfs_ds_commit_info *(*get_ds_info) (struct inode *inode);
void (*mark_request_commit) (struct nfs_page *req,
- struct pnfs_layout_segment *lseg);
- void (*clear_request_commit) (struct nfs_page *req);
- int (*scan_commit_lists) (struct inode *inode, int max, spinlock_t *lock);
- int (*commit_pagelist)(struct inode *inode, struct list_head *mds_pages, int how);
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo);
+ void (*clear_request_commit) (struct nfs_page *req,
+ struct nfs_commit_info *cinfo);
+ int (*scan_commit_lists) (struct nfs_commit_info *cinfo,
+ int max);
+ void (*recover_commit_reqs) (struct list_head *list,
+ struct nfs_commit_info *cinfo);
+ int (*commit_pagelist)(struct inode *inode,
+ struct list_head *mds_pages,
+ int how,
+ struct nfs_commit_info *cinfo);
/*
* Return PNFS_ATTEMPTED to indicate the layout code has attempted
@@ -168,8 +178,10 @@ extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
void get_layout_hdr(struct pnfs_layout_hdr *lo);
void put_lseg(struct pnfs_layout_segment *lseg);
-bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *);
-bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *, int);
+bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *,
+ const struct nfs_pgio_completion_ops *);
+bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *,
+ int, const struct nfs_pgio_completion_ops *);
void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, u32);
void unset_pnfs_layoutdriver(struct nfs_server *);
@@ -211,6 +223,11 @@ struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
gfp_t gfp_flags);
void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp);
+int pnfs_read_done_resend_to_mds(struct inode *inode, struct list_head *head,
+ const struct nfs_pgio_completion_ops *compl_ops);
+int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head,
+ const struct nfs_pgio_completion_ops *compl_ops);
+struct nfs4_threshold *pnfs_mdsthreshold_alloc(void);
/* nfs4_deviceid_flags */
enum {
@@ -261,49 +278,66 @@ static inline int pnfs_enabled_sb(struct nfs_server *nfss)
}
static inline int
-pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how)
+pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how,
+ struct nfs_commit_info *cinfo)
{
- if (!test_and_clear_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags))
+ if (cinfo->ds == NULL || cinfo->ds->ncommitting == 0)
return PNFS_NOT_ATTEMPTED;
- return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how);
+ return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how, cinfo);
+}
+
+static inline struct pnfs_ds_commit_info *
+pnfs_get_ds_info(struct inode *inode)
+{
+ struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
+
+ if (ld == NULL || ld->get_ds_info == NULL)
+ return NULL;
+ return ld->get_ds_info(inode);
}
static inline bool
-pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo)
{
struct inode *inode = req->wb_context->dentry->d_inode;
struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
if (lseg == NULL || ld->mark_request_commit == NULL)
return false;
- ld->mark_request_commit(req, lseg);
+ ld->mark_request_commit(req, lseg, cinfo);
return true;
}
static inline bool
-pnfs_clear_request_commit(struct nfs_page *req)
+pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo)
{
struct inode *inode = req->wb_context->dentry->d_inode;
struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
if (ld == NULL || ld->clear_request_commit == NULL)
return false;
- ld->clear_request_commit(req);
+ ld->clear_request_commit(req, cinfo);
return true;
}
static inline int
-pnfs_scan_commit_lists(struct inode *inode, int max, spinlock_t *lock)
+pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo,
+ int max)
{
- struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
- int ret;
-
- if (ld == NULL || ld->scan_commit_lists == NULL)
+ if (cinfo->ds == NULL || cinfo->ds->nwritten == 0)
return 0;
- ret = ld->scan_commit_lists(inode, max, lock);
- if (ret != 0)
- set_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags);
- return ret;
+ else
+ return NFS_SERVER(inode)->pnfs_curr_ld->scan_commit_lists(cinfo, max);
+}
+
+static inline void
+pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list,
+ struct nfs_commit_info *cinfo)
+{
+ if (cinfo->ds == NULL || cinfo->ds->nwritten == 0)
+ return;
+ NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
}
/* Should the pNFS client commit and return the layout upon a setattr */
@@ -327,6 +361,14 @@ static inline int pnfs_return_layout(struct inode *ino)
return 0;
}
+static inline bool
+pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src,
+ struct nfs_server *nfss)
+{
+ return (dst && src && src->bm != 0 &&
+ nfss->pnfs_curr_ld->id == src->l_type);
+}
+
#ifdef NFS_DEBUG
void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id);
#else
@@ -396,45 +438,74 @@ static inline void unset_pnfs_layoutdriver(struct nfs_server *s)
{
}
-static inline bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
+static inline bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
return false;
}
-static inline bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
+static inline bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
return false;
}
static inline int
-pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how)
+pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how,
+ struct nfs_commit_info *cinfo)
{
return PNFS_NOT_ATTEMPTED;
}
+static inline struct pnfs_ds_commit_info *
+pnfs_get_ds_info(struct inode *inode)
+{
+ return NULL;
+}
+
static inline bool
-pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo)
{
return false;
}
static inline bool
-pnfs_clear_request_commit(struct nfs_page *req)
+pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo)
{
return false;
}
static inline int
-pnfs_scan_commit_lists(struct inode *inode, int max, spinlock_t *lock)
+pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo,
+ int max)
{
return 0;
}
+static inline void
+pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list,
+ struct nfs_commit_info *cinfo)
+{
+}
+
static inline int pnfs_layoutcommit_inode(struct inode *inode, bool sync)
{
return 0;
}
+static inline bool
+pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src,
+ struct nfs_server *nfss)
+{
+ return false;
+}
+
+static inline struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
+{
+ return NULL;
+}
+
#endif /* CONFIG_NFS_V4_1 */
#endif /* FS_NFS_PNFS_H */
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index d6408b6437de..a706b6bcc286 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -178,7 +178,7 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int
-nfs_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+nfs_proc_lookup(struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs_diropargs arg = {
@@ -640,12 +640,14 @@ nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data)
{
+ struct inode *inode = data->header->inode;
+
if (nfs_async_handle_expired_key(task))
return -EAGAIN;
- nfs_invalidate_atime(data->inode);
+ nfs_invalidate_atime(inode);
if (task->tk_status >= 0) {
- nfs_refresh_inode(data->inode, data->res.fattr);
+ nfs_refresh_inode(inode, data->res.fattr);
/* Emulate the eof flag, which isn't normally needed in NFSv2
* as it is guaranteed to always return the file attributes
*/
@@ -667,11 +669,13 @@ static void nfs_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_dat
static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data)
{
+ struct inode *inode = data->header->inode;
+
if (nfs_async_handle_expired_key(task))
return -EAGAIN;
if (task->tk_status >= 0)
- nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr);
+ nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
return 0;
}
@@ -687,8 +691,13 @@ static void nfs_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_d
rpc_call_start(task);
}
+static void nfs_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
+{
+ BUG();
+}
+
static void
-nfs_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+nfs_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
{
BUG();
}
@@ -732,6 +741,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
.file_inode_ops = &nfs_file_inode_operations,
.file_ops = &nfs_file_operations,
.getroot = nfs_proc_get_root,
+ .submount = nfs_submount,
.getattr = nfs_proc_getattr,
.setattr = nfs_proc_setattr,
.lookup = nfs_proc_lookup,
@@ -763,6 +773,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
.write_rpc_prepare = nfs_proc_write_rpc_prepare,
.write_done = nfs_write_done,
.commit_setup = nfs_proc_commit_setup,
+ .commit_rpc_prepare = nfs_proc_commit_rpc_prepare,
.lock = nfs_proc_lock,
.lock_check_bounds = nfs_lock_check_bounds,
.close_context = nfs_close_context,
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 0a4be28c2ea3..86ced7836214 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -30,43 +30,73 @@
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
static const struct nfs_pageio_ops nfs_pageio_read_ops;
-static const struct rpc_call_ops nfs_read_partial_ops;
-static const struct rpc_call_ops nfs_read_full_ops;
+static const struct rpc_call_ops nfs_read_common_ops;
+static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
static struct kmem_cache *nfs_rdata_cachep;
-struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
+struct nfs_read_header *nfs_readhdr_alloc(void)
{
- struct nfs_read_data *p;
-
- p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
- if (p) {
- INIT_LIST_HEAD(&p->pages);
- p->npages = pagecount;
- if (pagecount <= ARRAY_SIZE(p->page_array))
- p->pagevec = p->page_array;
- else {
- p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
- if (!p->pagevec) {
- kmem_cache_free(nfs_rdata_cachep, p);
- p = NULL;
- }
- }
+ struct nfs_read_header *rhdr;
+
+ rhdr = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
+ if (rhdr) {
+ struct nfs_pgio_header *hdr = &rhdr->header;
+
+ INIT_LIST_HEAD(&hdr->pages);
+ INIT_LIST_HEAD(&hdr->rpc_list);
+ spin_lock_init(&hdr->lock);
+ atomic_set(&hdr->refcnt, 0);
+ }
+ return rhdr;
+}
+
+static struct nfs_read_data *nfs_readdata_alloc(struct nfs_pgio_header *hdr,
+ unsigned int pagecount)
+{
+ struct nfs_read_data *data, *prealloc;
+
+ prealloc = &container_of(hdr, struct nfs_read_header, header)->rpc_data;
+ if (prealloc->header == NULL)
+ data = prealloc;
+ else
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto out;
+
+ if (nfs_pgarray_set(&data->pages, pagecount)) {
+ data->header = hdr;
+ atomic_inc(&hdr->refcnt);
+ } else {
+ if (data != prealloc)
+ kfree(data);
+ data = NULL;
}
- return p;
+out:
+ return data;
}
-void nfs_readdata_free(struct nfs_read_data *p)
+void nfs_readhdr_free(struct nfs_pgio_header *hdr)
{
- if (p && (p->pagevec != &p->page_array[0]))
- kfree(p->pagevec);
- kmem_cache_free(nfs_rdata_cachep, p);
+ struct nfs_read_header *rhdr = container_of(hdr, struct nfs_read_header, header);
+
+ kmem_cache_free(nfs_rdata_cachep, rhdr);
}
void nfs_readdata_release(struct nfs_read_data *rdata)
{
+ struct nfs_pgio_header *hdr = rdata->header;
+ struct nfs_read_header *read_header = container_of(hdr, struct nfs_read_header, header);
+
put_nfs_open_context(rdata->args.context);
- nfs_readdata_free(rdata);
+ if (rdata->pages.pagevec != rdata->pages.page_array)
+ kfree(rdata->pages.pagevec);
+ if (rdata != &read_header->rpc_data)
+ kfree(rdata);
+ else
+ rdata->header = NULL;
+ if (atomic_dec_and_test(&hdr->refcnt))
+ hdr->completion_ops->completion(hdr);
}
static
@@ -78,39 +108,11 @@ int nfs_return_empty_page(struct page *page)
return 0;
}
-static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
-{
- unsigned int remainder = data->args.count - data->res.count;
- unsigned int base = data->args.pgbase + data->res.count;
- unsigned int pglen;
- struct page **pages;
-
- if (data->res.eof == 0 || remainder == 0)
- return;
- /*
- * Note: "remainder" can never be negative, since we check for
- * this in the XDR code.
- */
- pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
- base &= ~PAGE_CACHE_MASK;
- pglen = PAGE_CACHE_SIZE - base;
- for (;;) {
- if (remainder <= pglen) {
- zero_user(*pages, base, remainder);
- break;
- }
- zero_user(*pages, base, pglen);
- pages++;
- remainder -= pglen;
- pglen = PAGE_CACHE_SIZE;
- base = 0;
- }
-}
-
void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
- struct inode *inode)
+ struct inode *inode,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
- nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops,
+ nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, compl_ops,
NFS_SERVER(inode)->rsize, 0);
}
@@ -121,11 +123,12 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
-static void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
- struct inode *inode)
+void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
- if (!pnfs_pageio_init_read(pgio, inode))
- nfs_pageio_init_read_mds(pgio, inode);
+ if (!pnfs_pageio_init_read(pgio, inode, compl_ops))
+ nfs_pageio_init_read_mds(pgio, inode, compl_ops);
}
int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
@@ -146,9 +149,10 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
if (len < PAGE_CACHE_SIZE)
zero_user_segment(page, len, PAGE_CACHE_SIZE);
- nfs_pageio_init_read(&pgio, inode);
+ nfs_pageio_init_read(&pgio, inode, &nfs_async_read_completion_ops);
nfs_pageio_add_request(&pgio, new);
nfs_pageio_complete(&pgio);
+ NFS_I(inode)->read_io += pgio.pg_bytes_written;
return 0;
}
@@ -169,16 +173,49 @@ static void nfs_readpage_release(struct nfs_page *req)
nfs_release_request(req);
}
-int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
- const struct rpc_call_ops *call_ops)
+/* Note io was page aligned */
+static void nfs_read_completion(struct nfs_pgio_header *hdr)
+{
+ unsigned long bytes = 0;
+
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+ goto out;
+ while (!list_empty(&hdr->pages)) {
+ struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+ struct page *page = req->wb_page;
+
+ if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
+ if (bytes > hdr->good_bytes)
+ zero_user(page, 0, PAGE_SIZE);
+ else if (hdr->good_bytes - bytes < PAGE_SIZE)
+ zero_user_segment(page,
+ hdr->good_bytes & ~PAGE_MASK,
+ PAGE_SIZE);
+ }
+ bytes += req->wb_bytes;
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+ if (bytes <= hdr->good_bytes)
+ SetPageUptodate(page);
+ } else
+ SetPageUptodate(page);
+ nfs_list_remove_request(req);
+ nfs_readpage_release(req);
+ }
+out:
+ hdr->release(hdr);
+}
+
+int nfs_initiate_read(struct rpc_clnt *clnt,
+ struct nfs_read_data *data,
+ const struct rpc_call_ops *call_ops, int flags)
{
- struct inode *inode = data->inode;
+ struct inode *inode = data->header->inode;
int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
- .rpc_cred = data->cred,
+ .rpc_cred = data->header->cred,
};
struct rpc_task_setup task_setup_data = {
.task = &data->task,
@@ -187,7 +224,7 @@ int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
.callback_ops = call_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC | swap_flags,
+ .flags = RPC_TASK_ASYNC | swap_flags | flags,
};
/* Set up the initial task struct. */
@@ -212,19 +249,15 @@ EXPORT_SYMBOL_GPL(nfs_initiate_read);
/*
* Set up the NFS read request struct
*/
-static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
+static void nfs_read_rpcsetup(struct nfs_read_data *data,
unsigned int count, unsigned int offset)
{
- struct inode *inode = req->wb_context->dentry->d_inode;
-
- data->req = req;
- data->inode = inode;
- data->cred = req->wb_context->cred;
+ struct nfs_page *req = data->header->req;
- data->args.fh = NFS_FH(inode);
+ data->args.fh = NFS_FH(data->header->inode);
data->args.offset = req_offset(req) + offset;
data->args.pgbase = req->wb_pgbase + offset;
- data->args.pages = data->pagevec;
+ data->args.pages = data->pages.pagevec;
data->args.count = count;
data->args.context = get_nfs_open_context(req->wb_context);
data->args.lock_context = req->wb_lock_context;
@@ -238,9 +271,9 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
static int nfs_do_read(struct nfs_read_data *data,
const struct rpc_call_ops *call_ops)
{
- struct inode *inode = data->args.context->dentry->d_inode;
+ struct inode *inode = data->header->inode;
- return nfs_initiate_read(data, NFS_CLIENT(inode), call_ops);
+ return nfs_initiate_read(NFS_CLIENT(inode), data, call_ops, 0);
}
static int
@@ -253,7 +286,7 @@ nfs_do_multiple_reads(struct list_head *head,
while (!list_empty(head)) {
int ret2;
- data = list_entry(head->next, struct nfs_read_data, list);
+ data = list_first_entry(head, struct nfs_read_data, list);
list_del_init(&data->list);
ret2 = nfs_do_read(data, call_ops);
@@ -275,6 +308,24 @@ nfs_async_read_error(struct list_head *head)
}
}
+static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
+ .error_cleanup = nfs_async_read_error,
+ .completion = nfs_read_completion,
+};
+
+static void nfs_pagein_error(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
+{
+ set_bit(NFS_IOHDR_REDO, &hdr->flags);
+ while (!list_empty(&hdr->rpc_list)) {
+ struct nfs_read_data *data = list_first_entry(&hdr->rpc_list,
+ struct nfs_read_data, list);
+ list_del(&data->list);
+ nfs_readdata_release(data);
+ }
+ desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+}
+
/*
* Generate multiple requests to fill a single page.
*
@@ -288,93 +339,95 @@ nfs_async_read_error(struct list_head *head)
* won't see the new data until our attribute cache is updated. This is more
* or less conventional NFS client behavior.
*/
-static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
+static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
{
- struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
+ struct nfs_page *req = hdr->req;
struct page *page = req->wb_page;
struct nfs_read_data *data;
size_t rsize = desc->pg_bsize, nbytes;
unsigned int offset;
- int requests = 0;
- int ret = 0;
-
- nfs_list_remove_request(req);
offset = 0;
nbytes = desc->pg_count;
do {
size_t len = min(nbytes,rsize);
- data = nfs_readdata_alloc(1);
- if (!data)
- goto out_bad;
- data->pagevec[0] = page;
- nfs_read_rpcsetup(req, data, len, offset);
- list_add(&data->list, res);
- requests++;
+ data = nfs_readdata_alloc(hdr, 1);
+ if (!data) {
+ nfs_pagein_error(desc, hdr);
+ return -ENOMEM;
+ }
+ data->pages.pagevec[0] = page;
+ nfs_read_rpcsetup(data, len, offset);
+ list_add(&data->list, &hdr->rpc_list);
nbytes -= len;
offset += len;
- } while(nbytes != 0);
- atomic_set(&req->wb_complete, requests);
- desc->pg_rpc_callops = &nfs_read_partial_ops;
- return ret;
-out_bad:
- while (!list_empty(res)) {
- data = list_entry(res->next, struct nfs_read_data, list);
- list_del(&data->list);
- nfs_readdata_release(data);
- }
- nfs_readpage_release(req);
- return -ENOMEM;
+ } while (nbytes != 0);
+
+ nfs_list_remove_request(req);
+ nfs_list_add_request(req, &hdr->pages);
+ desc->pg_rpc_callops = &nfs_read_common_ops;
+ return 0;
}
-static int nfs_pagein_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
+static int nfs_pagein_one(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
{
struct nfs_page *req;
struct page **pages;
- struct nfs_read_data *data;
+ struct nfs_read_data *data;
struct list_head *head = &desc->pg_list;
- int ret = 0;
- data = nfs_readdata_alloc(nfs_page_array_len(desc->pg_base,
- desc->pg_count));
+ data = nfs_readdata_alloc(hdr, nfs_page_array_len(desc->pg_base,
+ desc->pg_count));
if (!data) {
- nfs_async_read_error(head);
- ret = -ENOMEM;
- goto out;
+ nfs_pagein_error(desc, hdr);
+ return -ENOMEM;
}
- pages = data->pagevec;
+ pages = data->pages.pagevec;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- nfs_list_add_request(req, &data->pages);
+ nfs_list_add_request(req, &hdr->pages);
*pages++ = req->wb_page;
}
- req = nfs_list_entry(data->pages.next);
- nfs_read_rpcsetup(req, data, desc->pg_count, 0);
- list_add(&data->list, res);
- desc->pg_rpc_callops = &nfs_read_full_ops;
-out:
- return ret;
+ nfs_read_rpcsetup(data, desc->pg_count, 0);
+ list_add(&data->list, &hdr->rpc_list);
+ desc->pg_rpc_callops = &nfs_read_common_ops;
+ return 0;
}
-int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, struct list_head *head)
+int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
{
if (desc->pg_bsize < PAGE_CACHE_SIZE)
- return nfs_pagein_multi(desc, head);
- return nfs_pagein_one(desc, head);
+ return nfs_pagein_multi(desc, hdr);
+ return nfs_pagein_one(desc, hdr);
}
static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
{
- LIST_HEAD(head);
+ struct nfs_read_header *rhdr;
+ struct nfs_pgio_header *hdr;
int ret;
- ret = nfs_generic_pagein(desc, &head);
+ rhdr = nfs_readhdr_alloc();
+ if (!rhdr) {
+ desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+ return -ENOMEM;
+ }
+ hdr = &rhdr->header;
+ nfs_pgheader_init(desc, hdr, nfs_readhdr_free);
+ atomic_inc(&hdr->refcnt);
+ ret = nfs_generic_pagein(desc, hdr);
if (ret == 0)
- ret = nfs_do_multiple_reads(&head, desc->pg_rpc_callops);
+ ret = nfs_do_multiple_reads(&hdr->rpc_list,
+ desc->pg_rpc_callops);
+ if (atomic_dec_and_test(&hdr->refcnt))
+ hdr->completion_ops->completion(hdr);
return ret;
}
@@ -389,20 +442,21 @@ static const struct nfs_pageio_ops nfs_pageio_read_ops = {
*/
int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
{
+ struct inode *inode = data->header->inode;
int status;
dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
task->tk_status);
- status = NFS_PROTO(data->inode)->read_done(task, data);
+ status = NFS_PROTO(inode)->read_done(task, data);
if (status != 0)
return status;
- nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
+ nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, data->res.count);
if (task->tk_status == -ESTALE) {
- set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags);
- nfs_mark_for_revalidate(data->inode);
+ set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
+ nfs_mark_for_revalidate(inode);
}
return 0;
}
@@ -412,15 +466,13 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
struct nfs_readargs *argp = &data->args;
struct nfs_readres *resp = &data->res;
- if (resp->eof || resp->count == argp->count)
- return;
-
/* This is a short read! */
- nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
+ nfs_inc_stats(data->header->inode, NFSIOS_SHORTREAD);
/* Has the server at least made some progress? */
- if (resp->count == 0)
+ if (resp->count == 0) {
+ nfs_set_pgio_error(data->header, -EIO, argp->offset);
return;
-
+ }
/* Yes, so retry the read at the end of the data */
data->mds_offset += resp->count;
argp->offset += resp->count;
@@ -429,114 +481,46 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
rpc_restart_call_prepare(task);
}
-/*
- * Handle a read reply that fills part of a page.
- */
-static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
+static void nfs_readpage_result_common(struct rpc_task *task, void *calldata)
{
struct nfs_read_data *data = calldata;
-
+ struct nfs_pgio_header *hdr = data->header;
+
+ /* Note the only returns of nfs_readpage_result are 0 and -EAGAIN */
if (nfs_readpage_result(task, data) != 0)
return;
if (task->tk_status < 0)
- return;
-
- nfs_readpage_truncate_uninitialised_page(data);
- nfs_readpage_retry(task, data);
+ nfs_set_pgio_error(hdr, task->tk_status, data->args.offset);
+ else if (data->res.eof) {
+ loff_t bound;
+
+ bound = data->args.offset + data->res.count;
+ spin_lock(&hdr->lock);
+ if (bound < hdr->io_start + hdr->good_bytes) {
+ set_bit(NFS_IOHDR_EOF, &hdr->flags);
+ clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
+ hdr->good_bytes = bound - hdr->io_start;
+ }
+ spin_unlock(&hdr->lock);
+ } else if (data->res.count != data->args.count)
+ nfs_readpage_retry(task, data);
}
-static void nfs_readpage_release_partial(void *calldata)
+static void nfs_readpage_release_common(void *calldata)
{
- struct nfs_read_data *data = calldata;
- struct nfs_page *req = data->req;
- struct page *page = req->wb_page;
- int status = data->task.tk_status;
-
- if (status < 0)
- set_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags);
-
- if (atomic_dec_and_test(&req->wb_complete)) {
- if (!test_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags))
- SetPageUptodate(page);
- nfs_readpage_release(req);
- }
nfs_readdata_release(calldata);
}
void nfs_read_prepare(struct rpc_task *task, void *calldata)
{
struct nfs_read_data *data = calldata;
- NFS_PROTO(data->inode)->read_rpc_prepare(task, data);
-}
-
-static const struct rpc_call_ops nfs_read_partial_ops = {
- .rpc_call_prepare = nfs_read_prepare,
- .rpc_call_done = nfs_readpage_result_partial,
- .rpc_release = nfs_readpage_release_partial,
-};
-
-static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
-{
- unsigned int count = data->res.count;
- unsigned int base = data->args.pgbase;
- struct page **pages;
-
- if (data->res.eof)
- count = data->args.count;
- if (unlikely(count == 0))
- return;
- pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
- base &= ~PAGE_CACHE_MASK;
- count += base;
- for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
- SetPageUptodate(*pages);
- if (count == 0)
- return;
- /* Was this a short read? */
- if (data->res.eof || data->res.count == data->args.count)
- SetPageUptodate(*pages);
-}
-
-/*
- * This is the callback from RPC telling us whether a reply was
- * received or some error occurred (timeout or socket shutdown).
- */
-static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
-{
- struct nfs_read_data *data = calldata;
-
- if (nfs_readpage_result(task, data) != 0)
- return;
- if (task->tk_status < 0)
- return;
- /*
- * Note: nfs_readpage_retry may change the values of
- * data->args. In the multi-page case, we therefore need
- * to ensure that we call nfs_readpage_set_pages_uptodate()
- * first.
- */
- nfs_readpage_truncate_uninitialised_page(data);
- nfs_readpage_set_pages_uptodate(data);
- nfs_readpage_retry(task, data);
-}
-
-static void nfs_readpage_release_full(void *calldata)
-{
- struct nfs_read_data *data = calldata;
-
- while (!list_empty(&data->pages)) {
- struct nfs_page *req = nfs_list_entry(data->pages.next);
-
- nfs_list_remove_request(req);
- nfs_readpage_release(req);
- }
- nfs_readdata_release(calldata);
+ NFS_PROTO(data->header->inode)->read_rpc_prepare(task, data);
}
-static const struct rpc_call_ops nfs_read_full_ops = {
+static const struct rpc_call_ops nfs_read_common_ops = {
.rpc_call_prepare = nfs_read_prepare,
- .rpc_call_done = nfs_readpage_result_full,
- .rpc_release = nfs_readpage_release_full,
+ .rpc_call_done = nfs_readpage_result_common,
+ .rpc_release = nfs_readpage_release_common,
};
/*
@@ -668,11 +652,12 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
if (ret == 0)
goto read_complete; /* all pages were read */
- nfs_pageio_init_read(&pgio, inode);
+ nfs_pageio_init_read(&pgio, inode, &nfs_async_read_completion_ops);
ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
nfs_pageio_complete(&pgio);
+ NFS_I(inode)->read_io += pgio.pg_bytes_written;
npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
nfs_add_stats(inode, NFSIOS_READPAGES, npages);
read_complete:
@@ -684,7 +669,7 @@ out:
int __init nfs_init_readpagecache(void)
{
nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
- sizeof(struct nfs_read_data),
+ sizeof(struct nfs_read_header),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (nfs_rdata_cachep == NULL)
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 4ac7fca7e4bf..ff656c022684 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -66,6 +66,7 @@
#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_VFS
+#define NFS_TEXT_DATA 1
#ifdef CONFIG_NFS_V3
#define NFS_DEFAULT_VERSION 3
@@ -277,12 +278,22 @@ static match_table_t nfs_vers_tokens = {
{ Opt_vers_err, NULL }
};
+struct nfs_mount_info {
+ void (*fill_super)(struct super_block *, struct nfs_mount_info *);
+ int (*set_security)(struct super_block *, struct dentry *, struct nfs_mount_info *);
+ struct nfs_parsed_mount_data *parsed;
+ struct nfs_clone_mount *cloned;
+ struct nfs_fh *mntfh;
+};
+
static void nfs_umount_begin(struct super_block *);
static int nfs_statfs(struct dentry *, struct kstatfs *);
static int nfs_show_options(struct seq_file *, struct dentry *);
static int nfs_show_devname(struct seq_file *, struct dentry *);
static int nfs_show_path(struct seq_file *, struct dentry *);
static int nfs_show_stats(struct seq_file *, struct dentry *);
+static struct dentry *nfs_fs_mount_common(struct file_system_type *,
+ struct nfs_server *, int, const char *, struct nfs_mount_info *);
static struct dentry *nfs_fs_mount(struct file_system_type *,
int, const char *, void *);
static struct dentry *nfs_xdev_mount(struct file_system_type *fs_type,
@@ -323,12 +334,11 @@ static const struct super_operations nfs_sops = {
};
#ifdef CONFIG_NFS_V4
-static int nfs4_validate_text_mount_data(void *options,
+static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *);
+static int nfs4_validate_mount_data(void *options,
struct nfs_parsed_mount_data *args, const char *dev_name);
static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
- struct nfs_parsed_mount_data *data);
-static struct dentry *nfs4_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data);
+ struct nfs_mount_info *mount_info);
static struct dentry *nfs4_remote_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data);
static struct dentry *nfs4_xdev_mount(struct file_system_type *fs_type,
@@ -342,7 +352,7 @@ static void nfs4_kill_super(struct super_block *sb);
static struct file_system_type nfs4_fs_type = {
.owner = THIS_MODULE,
.name = "nfs4",
- .mount = nfs4_mount,
+ .mount = nfs_fs_mount,
.kill_sb = nfs4_kill_super,
.fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
};
@@ -786,8 +796,8 @@ static void show_pnfs(struct seq_file *m, struct nfs_server *server)
static void show_implementation_id(struct seq_file *m, struct nfs_server *nfss)
{
- if (nfss->nfs_client && nfss->nfs_client->impl_id) {
- struct nfs41_impl_id *impl_id = nfss->nfs_client->impl_id;
+ if (nfss->nfs_client && nfss->nfs_client->cl_implid) {
+ struct nfs41_impl_id *impl_id = nfss->nfs_client->cl_implid;
seq_printf(m, "\n\timpl_id:\tname='%s',domain='%s',"
"date='%llu,%u'",
impl_id->name, impl_id->domain,
@@ -938,7 +948,7 @@ static void nfs_umount_begin(struct super_block *sb)
rpc_killall_tasks(rpc);
}
-static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int version)
+static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(void)
{
struct nfs_parsed_mount_data *data;
@@ -953,8 +963,8 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
data->nfs_server.protocol = XPRT_TRANSPORT_TCP;
data->auth_flavors[0] = RPC_AUTH_UNIX;
data->auth_flavor_len = 1;
- data->version = version;
data->minorversion = 0;
+ data->need_mount = true;
data->net = current->nsproxy->net_ns;
security_init_mnt_opts(&data->lsm_opts);
}
@@ -1674,8 +1684,8 @@ static int nfs_walk_authlist(struct nfs_parsed_mount_data *args,
* Use the remote server's MOUNT service to request the NFS file handle
* corresponding to the provided path.
*/
-static int nfs_try_mount(struct nfs_parsed_mount_data *args,
- struct nfs_fh *root_fh)
+static int nfs_request_mount(struct nfs_parsed_mount_data *args,
+ struct nfs_fh *root_fh)
{
rpc_authflavor_t server_authlist[NFS_MAX_SECFLAVORS];
unsigned int server_authlist_len = ARRAY_SIZE(server_authlist);
@@ -1738,6 +1748,26 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
return nfs_walk_authlist(args, &request);
}
+static struct dentry *nfs_try_mount(int flags, const char *dev_name,
+ struct nfs_mount_info *mount_info)
+{
+ int status;
+ struct nfs_server *server;
+
+ if (mount_info->parsed->need_mount) {
+ status = nfs_request_mount(mount_info->parsed, mount_info->mntfh);
+ if (status)
+ return ERR_PTR(status);
+ }
+
+ /* Get a volume representation */
+ server = nfs_create_server(mount_info->parsed, mount_info->mntfh);
+ if (IS_ERR(server))
+ return ERR_CAST(server);
+
+ return nfs_fs_mount_common(&nfs_fs_type, server, flags, dev_name, mount_info);
+}
+
/*
* Split "dev_name" into "hostname:export_path".
*
@@ -1826,10 +1856,10 @@ out_path:
* + breaking back: trying proto=udp after proto=tcp, v2 after v3,
* mountproto=tcp after mountproto=udp, and so on
*/
-static int nfs_validate_mount_data(void *options,
- struct nfs_parsed_mount_data *args,
- struct nfs_fh *mntfh,
- const char *dev_name)
+static int nfs23_validate_mount_data(void *options,
+ struct nfs_parsed_mount_data *args,
+ struct nfs_fh *mntfh,
+ const char *dev_name)
{
struct nfs_mount_data *data = (struct nfs_mount_data *)options;
struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
@@ -1883,6 +1913,7 @@ static int nfs_validate_mount_data(void *options,
args->acregmax = data->acregmax;
args->acdirmin = data->acdirmin;
args->acdirmax = data->acdirmax;
+ args->need_mount = false;
memcpy(sap, &data->addr, sizeof(data->addr));
args->nfs_server.addrlen = sizeof(data->addr);
@@ -1934,43 +1965,8 @@ static int nfs_validate_mount_data(void *options,
}
break;
- default: {
- int status;
-
- if (nfs_parse_mount_options((char *)options, args) == 0)
- return -EINVAL;
-
- if (!nfs_verify_server_address(sap))
- goto out_no_address;
-
- if (args->version == 4)
-#ifdef CONFIG_NFS_V4
- return nfs4_validate_text_mount_data(options,
- args, dev_name);
-#else
- goto out_v4_not_compiled;
-#endif
-
- nfs_set_port(sap, &args->nfs_server.port, 0);
-
- nfs_set_mount_transport_protocol(args);
-
- status = nfs_parse_devname(dev_name,
- &args->nfs_server.hostname,
- PAGE_SIZE,
- &args->nfs_server.export_path,
- NFS_MAXPATHLEN);
- if (!status)
- status = nfs_try_mount(args, mntfh);
-
- kfree(args->nfs_server.export_path);
- args->nfs_server.export_path = NULL;
-
- if (status)
- return status;
-
- break;
- }
+ default:
+ return NFS_TEXT_DATA;
}
#ifndef CONFIG_NFS_V3
@@ -1999,12 +1995,6 @@ out_v3_not_compiled:
return -EPROTONOSUPPORT;
#endif /* !CONFIG_NFS_V3 */
-#ifndef CONFIG_NFS_V4
-out_v4_not_compiled:
- dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
- return -EPROTONOSUPPORT;
-#endif /* !CONFIG_NFS_V4 */
-
out_nomem:
dfprintk(MOUNT, "NFS: not enough memory to handle mount options\n");
return -ENOMEM;
@@ -2018,6 +2008,82 @@ out_invalid_fh:
return -EINVAL;
}
+#ifdef CONFIG_NFS_V4
+static int nfs_validate_mount_data(struct file_system_type *fs_type,
+ void *options,
+ struct nfs_parsed_mount_data *args,
+ struct nfs_fh *mntfh,
+ const char *dev_name)
+{
+ if (fs_type == &nfs_fs_type)
+ return nfs23_validate_mount_data(options, args, mntfh, dev_name);
+ return nfs4_validate_mount_data(options, args, dev_name);
+}
+#else
+static int nfs_validate_mount_data(struct file_system_type *fs_type,
+ void *options,
+ struct nfs_parsed_mount_data *args,
+ struct nfs_fh *mntfh,
+ const char *dev_name)
+{
+ return nfs23_validate_mount_data(options, args, mntfh, dev_name);
+}
+#endif
+
+static int nfs_validate_text_mount_data(void *options,
+ struct nfs_parsed_mount_data *args,
+ const char *dev_name)
+{
+ int port = 0;
+ int max_namelen = PAGE_SIZE;
+ int max_pathlen = NFS_MAXPATHLEN;
+ struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
+
+ if (nfs_parse_mount_options((char *)options, args) == 0)
+ return -EINVAL;
+
+ if (!nfs_verify_server_address(sap))
+ goto out_no_address;
+
+ if (args->version == 4) {
+#ifdef CONFIG_NFS_V4
+ port = NFS_PORT;
+ max_namelen = NFS4_MAXNAMLEN;
+ max_pathlen = NFS4_MAXPATHLEN;
+ nfs_validate_transport_protocol(args);
+ nfs4_validate_mount_flags(args);
+#else
+ goto out_v4_not_compiled;
+#endif /* CONFIG_NFS_V4 */
+ } else
+ nfs_set_mount_transport_protocol(args);
+
+ nfs_set_port(sap, &args->nfs_server.port, port);
+
+ if (args->auth_flavor_len > 1)
+ goto out_bad_auth;
+
+ return nfs_parse_devname(dev_name,
+ &args->nfs_server.hostname,
+ max_namelen,
+ &args->nfs_server.export_path,
+ max_pathlen);
+
+#ifndef CONFIG_NFS_V4
+out_v4_not_compiled:
+ dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
+ return -EPROTONOSUPPORT;
+#endif /* !CONFIG_NFS_V4 */
+
+out_no_address:
+ dfprintk(MOUNT, "NFS: mount program didn't pass remote address\n");
+ return -EINVAL;
+
+out_bad_auth:
+ dfprintk(MOUNT, "NFS: Too many RPC auth flavours specified\n");
+ return -EINVAL;
+}
+
static int
nfs_compare_remount_data(struct nfs_server *nfss,
struct nfs_parsed_mount_data *data)
@@ -2129,8 +2195,9 @@ static inline void nfs_initialise_sb(struct super_block *sb)
* Finish setting up an NFS2/3 superblock
*/
static void nfs_fill_super(struct super_block *sb,
- struct nfs_parsed_mount_data *data)
+ struct nfs_mount_info *mount_info)
{
+ struct nfs_parsed_mount_data *data = mount_info->parsed;
struct nfs_server *server = NFS_SB(sb);
sb->s_blocksize_bits = 0;
@@ -2154,8 +2221,9 @@ static void nfs_fill_super(struct super_block *sb,
* Finish setting up a cloned NFS2/3 superblock
*/
static void nfs_clone_super(struct super_block *sb,
- const struct super_block *old_sb)
+ struct nfs_mount_info *mount_info)
{
+ const struct super_block *old_sb = mount_info->cloned->sb;
struct nfs_server *server = NFS_SB(sb);
sb->s_blocksize_bits = old_sb->s_blocksize_bits;
@@ -2278,52 +2346,70 @@ static int nfs_compare_super(struct super_block *sb, void *data)
return nfs_compare_mount_options(sb, server, mntflags);
}
+#ifdef CONFIG_NFS_FSCACHE
+static void nfs_get_cache_cookie(struct super_block *sb,
+ struct nfs_parsed_mount_data *parsed,
+ struct nfs_clone_mount *cloned)
+{
+ char *uniq = NULL;
+ int ulen = 0;
+
+ if (parsed && parsed->fscache_uniq) {
+ uniq = parsed->fscache_uniq;
+ ulen = strlen(parsed->fscache_uniq);
+ } else if (cloned) {
+ struct nfs_server *mnt_s = NFS_SB(cloned->sb);
+ if (mnt_s->fscache_key) {
+ uniq = mnt_s->fscache_key->key.uniquifier;
+ ulen = mnt_s->fscache_key->key.uniq_len;
+ };
+ }
+
+ nfs_fscache_get_super_cookie(sb, uniq, ulen);
+}
+#else
+static void nfs_get_cache_cookie(struct super_block *sb,
+ struct nfs_parsed_mount_data *parsed,
+ struct nfs_clone_mount *cloned)
+{
+}
+#endif
+
static int nfs_bdi_register(struct nfs_server *server)
{
return bdi_register_dev(&server->backing_dev_info, server->s_dev);
}
-static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data)
+static int nfs_set_sb_security(struct super_block *s, struct dentry *mntroot,
+ struct nfs_mount_info *mount_info)
+{
+ return security_sb_set_mnt_opts(s, &mount_info->parsed->lsm_opts);
+}
+
+static int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot,
+ struct nfs_mount_info *mount_info)
+{
+ /* clone any lsm security options from the parent to the new sb */
+ security_sb_clone_mnt_opts(mount_info->cloned->sb, s);
+ if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops)
+ return -ESTALE;
+ return 0;
+}
+
+static struct dentry *nfs_fs_mount_common(struct file_system_type *fs_type,
+ struct nfs_server *server,
+ int flags, const char *dev_name,
+ struct nfs_mount_info *mount_info)
{
- struct nfs_server *server = NULL;
struct super_block *s;
- struct nfs_parsed_mount_data *data;
- struct nfs_fh *mntfh;
struct dentry *mntroot = ERR_PTR(-ENOMEM);
int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
struct nfs_sb_mountdata sb_mntdata = {
.mntflags = flags,
+ .server = server,
};
int error;
- data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
- mntfh = nfs_alloc_fhandle();
- if (data == NULL || mntfh == NULL)
- goto out;
-
- /* Validate the mount data */
- error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
- if (error < 0) {
- mntroot = ERR_PTR(error);
- goto out;
- }
-
-#ifdef CONFIG_NFS_V4
- if (data->version == 4) {
- mntroot = nfs4_try_mount(flags, dev_name, data);
- goto out;
- }
-#endif /* CONFIG_NFS_V4 */
-
- /* Get a volume representation */
- server = nfs_create_server(data, mntfh);
- if (IS_ERR(server)) {
- mntroot = ERR_CAST(server);
- goto out;
- }
- sb_mntdata.server = server;
-
if (server->flags & NFS_MOUNT_UNSHARED)
compare_super = NULL;
@@ -2351,23 +2437,21 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
if (!s->s_root) {
/* initial superblock/root creation */
- nfs_fill_super(s, data);
- nfs_fscache_get_super_cookie(s, data->fscache_uniq, NULL);
+ mount_info->fill_super(s, mount_info);
+ nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned);
}
- mntroot = nfs_get_root(s, mntfh, dev_name);
+ mntroot = nfs_get_root(s, mount_info->mntfh, dev_name);
if (IS_ERR(mntroot))
goto error_splat_super;
- error = security_sb_set_mnt_opts(s, &data->lsm_opts);
+ error = mount_info->set_security(s, mntroot, mount_info);
if (error)
goto error_splat_root;
s->s_flags |= MS_ACTIVE;
out:
- nfs_free_parsed_mount_data(data);
- nfs_free_fhandle(mntfh);
return mntroot;
out_err_nosb:
@@ -2385,6 +2469,43 @@ error_splat_bdi:
goto out;
}
+static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data)
+{
+ struct nfs_mount_info mount_info = {
+ .fill_super = nfs_fill_super,
+ .set_security = nfs_set_sb_security,
+ };
+ struct dentry *mntroot = ERR_PTR(-ENOMEM);
+ int error;
+
+ mount_info.parsed = nfs_alloc_parsed_mount_data();
+ mount_info.mntfh = nfs_alloc_fhandle();
+ if (mount_info.parsed == NULL || mount_info.mntfh == NULL)
+ goto out;
+
+ /* Validate the mount data */
+ error = nfs_validate_mount_data(fs_type, raw_data, mount_info.parsed, mount_info.mntfh, dev_name);
+ if (error == NFS_TEXT_DATA)
+ error = nfs_validate_text_mount_data(raw_data, mount_info.parsed, dev_name);
+ if (error < 0) {
+ mntroot = ERR_PTR(error);
+ goto out;
+ }
+
+#ifdef CONFIG_NFS_V4
+ if (mount_info.parsed->version == 4)
+ mntroot = nfs4_try_mount(flags, dev_name, &mount_info);
+ else
+#endif /* CONFIG_NFS_V4 */
+ mntroot = nfs_try_mount(flags, dev_name, &mount_info);
+
+out:
+ nfs_free_parsed_mount_data(mount_info.parsed);
+ nfs_free_fhandle(mount_info.mntfh);
+ return mntroot;
+}
+
/*
* Ensure that we unregister the bdi before kill_anon_super
* releases the device name
@@ -2409,93 +2530,51 @@ static void nfs_kill_super(struct super_block *s)
}
/*
- * Clone an NFS2/3 server record on xdev traversal (FSID-change)
+ * Clone an NFS2/3/4 server record on xdev traversal (FSID-change)
*/
static struct dentry *
-nfs_xdev_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *raw_data)
+nfs_xdev_mount_common(struct file_system_type *fs_type, int flags,
+ const char *dev_name, struct nfs_mount_info *mount_info)
{
- struct nfs_clone_mount *data = raw_data;
- struct super_block *s;
+ struct nfs_clone_mount *data = mount_info->cloned;
struct nfs_server *server;
- struct dentry *mntroot;
- int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
- struct nfs_sb_mountdata sb_mntdata = {
- .mntflags = flags,
- };
+ struct dentry *mntroot = ERR_PTR(-ENOMEM);
int error;
- dprintk("--> nfs_xdev_mount()\n");
+ dprintk("--> nfs_xdev_mount_common()\n");
+
+ mount_info->mntfh = data->fh;
/* create a new volume representation */
server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
if (IS_ERR(server)) {
error = PTR_ERR(server);
- goto out_err_noserver;
- }
- sb_mntdata.server = server;
-
- if (server->flags & NFS_MOUNT_UNSHARED)
- compare_super = NULL;
-
- /* -o noac implies -o sync */
- if (server->flags & NFS_MOUNT_NOAC)
- sb_mntdata.mntflags |= MS_SYNCHRONOUS;
-
- /* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
- if (IS_ERR(s)) {
- error = PTR_ERR(s);
- goto out_err_nosb;
- }
-
- if (s->s_fs_info != server) {
- nfs_free_server(server);
- server = NULL;
- } else {
- error = nfs_bdi_register(server);
- if (error)
- goto error_splat_bdi;
- }
-
- if (!s->s_root) {
- /* initial superblock/root creation */
- nfs_clone_super(s, data->sb);
- nfs_fscache_get_super_cookie(s, NULL, data);
- }
-
- mntroot = nfs_get_root(s, data->fh, dev_name);
- if (IS_ERR(mntroot)) {
- error = PTR_ERR(mntroot);
- goto error_splat_super;
- }
- if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
- dput(mntroot);
- error = -ESTALE;
- goto error_splat_super;
+ goto out_err;
}
- s->s_flags |= MS_ACTIVE;
-
- /* clone any lsm security options from the parent to the new sb */
- security_sb_clone_mnt_opts(data->sb, s);
-
- dprintk("<-- nfs_xdev_mount() = 0\n");
+ mntroot = nfs_fs_mount_common(fs_type, server, flags, dev_name, mount_info);
+ dprintk("<-- nfs_xdev_mount_common() = 0\n");
+out:
return mntroot;
-out_err_nosb:
- nfs_free_server(server);
-out_err_noserver:
- dprintk("<-- nfs_xdev_mount() = %d [error]\n", error);
- return ERR_PTR(error);
+out_err:
+ dprintk("<-- nfs_xdev_mount_common() = %d [error]\n", error);
+ goto out;
+}
-error_splat_super:
- if (server && !s->s_root)
- bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
- deactivate_locked_super(s);
- dprintk("<-- nfs_xdev_mount() = %d [splat]\n", error);
- return ERR_PTR(error);
+/*
+ * Clone an NFS2/3 server record on xdev traversal (FSID-change)
+ */
+static struct dentry *
+nfs_xdev_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *raw_data)
+{
+ struct nfs_mount_info mount_info = {
+ .fill_super = nfs_clone_super,
+ .set_security = nfs_clone_sb_security,
+ .cloned = raw_data,
+ };
+ return nfs_xdev_mount_common(&nfs_fs_type, flags, dev_name, &mount_info);
}
#ifdef CONFIG_NFS_V4
@@ -2504,8 +2583,9 @@ error_splat_bdi:
* Finish setting up a cloned NFS4 superblock
*/
static void nfs4_clone_super(struct super_block *sb,
- const struct super_block *old_sb)
+ struct nfs_mount_info *mount_info)
{
+ const struct super_block *old_sb = mount_info->cloned->sb;
sb->s_blocksize_bits = old_sb->s_blocksize_bits;
sb->s_blocksize = old_sb->s_blocksize;
sb->s_maxbytes = old_sb->s_maxbytes;
@@ -2523,7 +2603,8 @@ static void nfs4_clone_super(struct super_block *sb,
/*
* Set up an NFS4 superblock
*/
-static void nfs4_fill_super(struct super_block *sb)
+static void nfs4_fill_super(struct super_block *sb,
+ struct nfs_mount_info *mount_info)
{
sb->s_time_gran = 1;
sb->s_op = &nfs4_sops;
@@ -2542,37 +2623,6 @@ static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *args)
NFS_MOUNT_LOCAL_FLOCK|NFS_MOUNT_LOCAL_FCNTL);
}
-static int nfs4_validate_text_mount_data(void *options,
- struct nfs_parsed_mount_data *args,
- const char *dev_name)
-{
- struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
-
- nfs_set_port(sap, &args->nfs_server.port, NFS_PORT);
-
- nfs_validate_transport_protocol(args);
-
- nfs4_validate_mount_flags(args);
-
- if (args->version != 4) {
- dfprintk(MOUNT,
- "NFS4: Illegal mount version\n");
- return -EINVAL;
- }
-
- if (args->auth_flavor_len > 1) {
- dfprintk(MOUNT,
- "NFS4: Too many RPC auth flavours specified\n");
- return -EINVAL;
- }
-
- return nfs_parse_devname(dev_name,
- &args->nfs_server.hostname,
- NFS4_MAXNAMLEN,
- &args->nfs_server.export_path,
- NFS4_MAXPATHLEN);
-}
-
/*
* Validate NFSv4 mount options
*/
@@ -2643,13 +2693,7 @@ static int nfs4_validate_mount_data(void *options,
break;
default:
- if (nfs_parse_mount_options((char *)options, args) == 0)
- return -EINVAL;
-
- if (!nfs_verify_server_address(sap))
- return -EINVAL;
-
- return nfs4_validate_text_mount_data(options, args, dev_name);
+ return NFS_TEXT_DATA;
}
return 0;
@@ -2673,91 +2717,26 @@ out_no_address:
*/
static struct dentry *
nfs4_remote_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *raw_data)
+ const char *dev_name, void *info)
{
- struct nfs_parsed_mount_data *data = raw_data;
- struct super_block *s;
+ struct nfs_mount_info *mount_info = info;
struct nfs_server *server;
- struct nfs_fh *mntfh;
- struct dentry *mntroot;
- int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
- struct nfs_sb_mountdata sb_mntdata = {
- .mntflags = flags,
- };
- int error = -ENOMEM;
+ struct dentry *mntroot = ERR_PTR(-ENOMEM);
- mntfh = nfs_alloc_fhandle();
- if (data == NULL || mntfh == NULL)
- goto out;
+ mount_info->fill_super = nfs4_fill_super;
+ mount_info->set_security = nfs_set_sb_security;
/* Get a volume representation */
- server = nfs4_create_server(data, mntfh);
+ server = nfs4_create_server(mount_info->parsed, mount_info->mntfh);
if (IS_ERR(server)) {
- error = PTR_ERR(server);
+ mntroot = ERR_CAST(server);
goto out;
}
- sb_mntdata.server = server;
- if (server->flags & NFS4_MOUNT_UNSHARED)
- compare_super = NULL;
-
- /* -o noac implies -o sync */
- if (server->flags & NFS_MOUNT_NOAC)
- sb_mntdata.mntflags |= MS_SYNCHRONOUS;
-
- /* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
- if (IS_ERR(s)) {
- error = PTR_ERR(s);
- goto out_free;
- }
-
- if (s->s_fs_info != server) {
- nfs_free_server(server);
- server = NULL;
- } else {
- error = nfs_bdi_register(server);
- if (error)
- goto error_splat_bdi;
- }
-
- if (!s->s_root) {
- /* initial superblock/root creation */
- nfs4_fill_super(s);
- nfs_fscache_get_super_cookie(s, data->fscache_uniq, NULL);
- }
-
- mntroot = nfs4_get_root(s, mntfh, dev_name);
- if (IS_ERR(mntroot)) {
- error = PTR_ERR(mntroot);
- goto error_splat_super;
- }
-
- error = security_sb_set_mnt_opts(s, &data->lsm_opts);
- if (error)
- goto error_splat_root;
-
- s->s_flags |= MS_ACTIVE;
-
- nfs_free_fhandle(mntfh);
- return mntroot;
+ mntroot = nfs_fs_mount_common(fs_type, server, flags, dev_name, mount_info);
out:
- nfs_free_fhandle(mntfh);
- return ERR_PTR(error);
-
-out_free:
- nfs_free_server(server);
- goto out;
-
-error_splat_root:
- dput(mntroot);
-error_splat_super:
- if (server && !s->s_root)
- bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
- deactivate_locked_super(s);
- goto out;
+ return mntroot;
}
static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
@@ -2869,17 +2848,18 @@ static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
}
static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
- struct nfs_parsed_mount_data *data)
+ struct nfs_mount_info *mount_info)
{
char *export_path;
struct vfsmount *root_mnt;
struct dentry *res;
+ struct nfs_parsed_mount_data *data = mount_info->parsed;
dfprintk(MOUNT, "--> nfs4_try_mount()\n");
export_path = data->nfs_server.export_path;
data->nfs_server.export_path = "/";
- root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, data,
+ root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info,
data->nfs_server.hostname);
data->nfs_server.export_path = export_path;
@@ -2891,38 +2871,6 @@ static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
return res;
}
-/*
- * Get the superblock for an NFS4 mountpoint
- */
-static struct dentry *nfs4_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data)
-{
- struct nfs_parsed_mount_data *data;
- int error = -ENOMEM;
- struct dentry *res = ERR_PTR(-ENOMEM);
-
- data = nfs_alloc_parsed_mount_data(4);
- if (data == NULL)
- goto out;
-
- /* Validate the mount data */
- error = nfs4_validate_mount_data(raw_data, data, dev_name);
- if (error < 0) {
- res = ERR_PTR(error);
- goto out;
- }
-
- res = nfs4_try_mount(flags, dev_name, data);
- if (IS_ERR(res))
- error = PTR_ERR(res);
-
-out:
- nfs_free_parsed_mount_data(data);
- dprintk("<-- nfs4_mount() = %d%s\n", error,
- error != 0 ? " [error]" : "");
- return res;
-}
-
static void nfs4_kill_super(struct super_block *sb)
{
struct nfs_server *server = NFS_SB(sb);
@@ -2942,181 +2890,43 @@ static struct dentry *
nfs4_xdev_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *raw_data)
{
- struct nfs_clone_mount *data = raw_data;
- struct super_block *s;
- struct nfs_server *server;
- struct dentry *mntroot;
- int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
- struct nfs_sb_mountdata sb_mntdata = {
- .mntflags = flags,
+ struct nfs_mount_info mount_info = {
+ .fill_super = nfs4_clone_super,
+ .set_security = nfs_clone_sb_security,
+ .cloned = raw_data,
};
- int error;
-
- dprintk("--> nfs4_xdev_mount()\n");
-
- /* create a new volume representation */
- server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
- if (IS_ERR(server)) {
- error = PTR_ERR(server);
- goto out_err_noserver;
- }
- sb_mntdata.server = server;
-
- if (server->flags & NFS4_MOUNT_UNSHARED)
- compare_super = NULL;
-
- /* -o noac implies -o sync */
- if (server->flags & NFS_MOUNT_NOAC)
- sb_mntdata.mntflags |= MS_SYNCHRONOUS;
-
- /* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
- if (IS_ERR(s)) {
- error = PTR_ERR(s);
- goto out_err_nosb;
- }
-
- if (s->s_fs_info != server) {
- nfs_free_server(server);
- server = NULL;
- } else {
- error = nfs_bdi_register(server);
- if (error)
- goto error_splat_bdi;
- }
-
- if (!s->s_root) {
- /* initial superblock/root creation */
- nfs4_clone_super(s, data->sb);
- nfs_fscache_get_super_cookie(s, NULL, data);
- }
-
- mntroot = nfs4_get_root(s, data->fh, dev_name);
- if (IS_ERR(mntroot)) {
- error = PTR_ERR(mntroot);
- goto error_splat_super;
- }
- if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
- dput(mntroot);
- error = -ESTALE;
- goto error_splat_super;
- }
-
- s->s_flags |= MS_ACTIVE;
-
- security_sb_clone_mnt_opts(data->sb, s);
-
- dprintk("<-- nfs4_xdev_mount() = 0\n");
- return mntroot;
-
-out_err_nosb:
- nfs_free_server(server);
-out_err_noserver:
- dprintk("<-- nfs4_xdev_mount() = %d [error]\n", error);
- return ERR_PTR(error);
-
-error_splat_super:
- if (server && !s->s_root)
- bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
- deactivate_locked_super(s);
- dprintk("<-- nfs4_xdev_mount() = %d [splat]\n", error);
- return ERR_PTR(error);
+ return nfs_xdev_mount_common(&nfs4_fs_type, flags, dev_name, &mount_info);
}
static struct dentry *
nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *raw_data)
{
- struct nfs_clone_mount *data = raw_data;
- struct super_block *s;
- struct nfs_server *server;
- struct dentry *mntroot;
- struct nfs_fh *mntfh;
- int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
- struct nfs_sb_mountdata sb_mntdata = {
- .mntflags = flags,
+ struct nfs_mount_info mount_info = {
+ .fill_super = nfs4_fill_super,
+ .set_security = nfs_clone_sb_security,
+ .cloned = raw_data,
};
- int error = -ENOMEM;
+ struct nfs_server *server;
+ struct dentry *mntroot = ERR_PTR(-ENOMEM);
dprintk("--> nfs4_referral_get_sb()\n");
- mntfh = nfs_alloc_fhandle();
- if (mntfh == NULL)
- goto out_err_nofh;
+ mount_info.mntfh = nfs_alloc_fhandle();
+ if (mount_info.cloned == NULL || mount_info.mntfh == NULL)
+ goto out;
/* create a new volume representation */
- server = nfs4_create_referral_server(data, mntfh);
+ server = nfs4_create_referral_server(mount_info.cloned, mount_info.mntfh);
if (IS_ERR(server)) {
- error = PTR_ERR(server);
- goto out_err_noserver;
- }
- sb_mntdata.server = server;
-
- if (server->flags & NFS4_MOUNT_UNSHARED)
- compare_super = NULL;
-
- /* -o noac implies -o sync */
- if (server->flags & NFS_MOUNT_NOAC)
- sb_mntdata.mntflags |= MS_SYNCHRONOUS;
-
- /* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
- if (IS_ERR(s)) {
- error = PTR_ERR(s);
- goto out_err_nosb;
- }
-
- if (s->s_fs_info != server) {
- nfs_free_server(server);
- server = NULL;
- } else {
- error = nfs_bdi_register(server);
- if (error)
- goto error_splat_bdi;
- }
-
- if (!s->s_root) {
- /* initial superblock/root creation */
- nfs4_fill_super(s);
- nfs_fscache_get_super_cookie(s, NULL, data);
- }
-
- mntroot = nfs4_get_root(s, mntfh, dev_name);
- if (IS_ERR(mntroot)) {
- error = PTR_ERR(mntroot);
- goto error_splat_super;
- }
- if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
- dput(mntroot);
- error = -ESTALE;
- goto error_splat_super;
+ mntroot = ERR_CAST(server);
+ goto out;
}
- s->s_flags |= MS_ACTIVE;
-
- security_sb_clone_mnt_opts(data->sb, s);
-
- nfs_free_fhandle(mntfh);
- dprintk("<-- nfs4_referral_get_sb() = 0\n");
+ mntroot = nfs_fs_mount_common(&nfs4_fs_type, server, flags, dev_name, &mount_info);
+out:
+ nfs_free_fhandle(mount_info.mntfh);
return mntroot;
-
-out_err_nosb:
- nfs_free_server(server);
-out_err_noserver:
- nfs_free_fhandle(mntfh);
-out_err_nofh:
- dprintk("<-- nfs4_referral_get_sb() = %d [error]\n", error);
- return ERR_PTR(error);
-
-error_splat_super:
- if (server && !s->s_root)
- bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
- deactivate_locked_super(s);
- nfs_free_fhandle(mntfh);
- dprintk("<-- nfs4_referral_get_sb() = %d [splat]\n", error);
- return ERR_PTR(error);
}
/*
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index c07462320f6b..e6fe3d69d14c 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -39,20 +39,20 @@
/*
* Local function declarations
*/
-static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
- struct inode *inode, int ioflags);
static void nfs_redirty_request(struct nfs_page *req);
-static const struct rpc_call_ops nfs_write_partial_ops;
-static const struct rpc_call_ops nfs_write_full_ops;
+static const struct rpc_call_ops nfs_write_common_ops;
static const struct rpc_call_ops nfs_commit_ops;
+static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
+static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
static struct kmem_cache *nfs_wdata_cachep;
static mempool_t *nfs_wdata_mempool;
+static struct kmem_cache *nfs_cdata_cachep;
static mempool_t *nfs_commit_mempool;
-struct nfs_write_data *nfs_commitdata_alloc(void)
+struct nfs_commit_data *nfs_commitdata_alloc(void)
{
- struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
+ struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
if (p) {
memset(p, 0, sizeof(*p));
@@ -62,46 +62,73 @@ struct nfs_write_data *nfs_commitdata_alloc(void)
}
EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
-void nfs_commit_free(struct nfs_write_data *p)
+void nfs_commit_free(struct nfs_commit_data *p)
{
- if (p && (p->pagevec != &p->page_array[0]))
- kfree(p->pagevec);
mempool_free(p, nfs_commit_mempool);
}
EXPORT_SYMBOL_GPL(nfs_commit_free);
-struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
+struct nfs_write_header *nfs_writehdr_alloc(void)
{
- struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
+ struct nfs_write_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
if (p) {
+ struct nfs_pgio_header *hdr = &p->header;
+
memset(p, 0, sizeof(*p));
- INIT_LIST_HEAD(&p->pages);
- p->npages = pagecount;
- if (pagecount <= ARRAY_SIZE(p->page_array))
- p->pagevec = p->page_array;
- else {
- p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
- if (!p->pagevec) {
- mempool_free(p, nfs_wdata_mempool);
- p = NULL;
- }
- }
+ INIT_LIST_HEAD(&hdr->pages);
+ INIT_LIST_HEAD(&hdr->rpc_list);
+ spin_lock_init(&hdr->lock);
+ atomic_set(&hdr->refcnt, 0);
}
return p;
}
-void nfs_writedata_free(struct nfs_write_data *p)
+static struct nfs_write_data *nfs_writedata_alloc(struct nfs_pgio_header *hdr,
+ unsigned int pagecount)
+{
+ struct nfs_write_data *data, *prealloc;
+
+ prealloc = &container_of(hdr, struct nfs_write_header, header)->rpc_data;
+ if (prealloc->header == NULL)
+ data = prealloc;
+ else
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto out;
+
+ if (nfs_pgarray_set(&data->pages, pagecount)) {
+ data->header = hdr;
+ atomic_inc(&hdr->refcnt);
+ } else {
+ if (data != prealloc)
+ kfree(data);
+ data = NULL;
+ }
+out:
+ return data;
+}
+
+void nfs_writehdr_free(struct nfs_pgio_header *hdr)
{
- if (p && (p->pagevec != &p->page_array[0]))
- kfree(p->pagevec);
- mempool_free(p, nfs_wdata_mempool);
+ struct nfs_write_header *whdr = container_of(hdr, struct nfs_write_header, header);
+ mempool_free(whdr, nfs_wdata_mempool);
}
void nfs_writedata_release(struct nfs_write_data *wdata)
{
+ struct nfs_pgio_header *hdr = wdata->header;
+ struct nfs_write_header *write_header = container_of(hdr, struct nfs_write_header, header);
+
put_nfs_open_context(wdata->args.context);
- nfs_writedata_free(wdata);
+ if (wdata->pages.pagevec != wdata->pages.page_array)
+ kfree(wdata->pages.pagevec);
+ if (wdata != &write_header->rpc_data)
+ kfree(wdata);
+ else
+ wdata->header = NULL;
+ if (atomic_dec_and_test(&hdr->refcnt))
+ hdr->completion_ops->completion(hdr);
}
static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
@@ -203,7 +230,6 @@ static int nfs_set_page_writeback(struct page *page)
struct inode *inode = page->mapping->host;
struct nfs_server *nfss = NFS_SERVER(inode);
- page_cache_get(page);
if (atomic_long_inc_return(&nfss->writeback) >
NFS_CONGESTION_ON_THRESH) {
set_bdi_congested(&nfss->backing_dev_info,
@@ -219,7 +245,6 @@ static void nfs_end_page_writeback(struct page *page)
struct nfs_server *nfss = NFS_SERVER(inode);
end_page_writeback(page);
- page_cache_release(page);
if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
}
@@ -235,10 +260,10 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblo
req = nfs_page_find_request_locked(page);
if (req == NULL)
break;
- if (nfs_lock_request_dontget(req))
+ if (nfs_lock_request(req))
break;
/* Note: If we hold the page lock, as is the case in nfs_writepage,
- * then the call to nfs_lock_request_dontget() will always
+ * then the call to nfs_lock_request() will always
* succeed provided that someone hasn't already marked the
* request as dirty (in which case we don't care).
*/
@@ -310,7 +335,8 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc
struct nfs_pageio_descriptor pgio;
int err;
- nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
+ nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc),
+ &nfs_async_write_completion_ops);
err = nfs_do_writepage(page, wbc, &pgio);
nfs_pageio_complete(&pgio);
if (err < 0)
@@ -353,7 +379,8 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
- nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
+ nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
+ &nfs_async_write_completion_ops);
err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
nfs_pageio_complete(&pgio);
@@ -379,7 +406,7 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
struct nfs_inode *nfsi = NFS_I(inode);
/* Lock the request! */
- nfs_lock_request_dontget(req);
+ nfs_lock_request(req);
spin_lock(&inode->i_lock);
if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
@@ -421,65 +448,88 @@ nfs_mark_request_dirty(struct nfs_page *req)
/**
* nfs_request_add_commit_list - add request to a commit list
* @req: pointer to a struct nfs_page
- * @head: commit list head
+ * @dst: commit list head
+ * @cinfo: holds list lock and accounting info
*
- * This sets the PG_CLEAN bit, updates the inode global count of
+ * This sets the PG_CLEAN bit, updates the cinfo count of
* number of outstanding requests requiring a commit as well as
* the MM page stats.
*
- * The caller must _not_ hold the inode->i_lock, but must be
+ * The caller must _not_ hold the cinfo->lock, but must be
* holding the nfs_page lock.
*/
void
-nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head)
+nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
+ struct nfs_commit_info *cinfo)
{
- struct inode *inode = req->wb_context->dentry->d_inode;
-
set_bit(PG_CLEAN, &(req)->wb_flags);
- spin_lock(&inode->i_lock);
- nfs_list_add_request(req, head);
- NFS_I(inode)->ncommit++;
- spin_unlock(&inode->i_lock);
- inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
- inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
- __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ spin_lock(cinfo->lock);
+ nfs_list_add_request(req, dst);
+ cinfo->mds->ncommit++;
+ spin_unlock(cinfo->lock);
+ if (!cinfo->dreq) {
+ inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
+ inc_bdi_stat(req->wb_page->mapping->backing_dev_info,
+ BDI_RECLAIMABLE);
+ __mark_inode_dirty(req->wb_context->dentry->d_inode,
+ I_DIRTY_DATASYNC);
+ }
}
EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
/**
* nfs_request_remove_commit_list - Remove request from a commit list
* @req: pointer to a nfs_page
+ * @cinfo: holds list lock and accounting info
*
- * This clears the PG_CLEAN bit, and updates the inode global count of
+ * This clears the PG_CLEAN bit, and updates the cinfo's count of
* number of outstanding requests requiring a commit
* It does not update the MM page stats.
*
- * The caller _must_ hold the inode->i_lock and the nfs_page lock.
+ * The caller _must_ hold the cinfo->lock and the nfs_page lock.
*/
void
-nfs_request_remove_commit_list(struct nfs_page *req)
+nfs_request_remove_commit_list(struct nfs_page *req,
+ struct nfs_commit_info *cinfo)
{
- struct inode *inode = req->wb_context->dentry->d_inode;
-
if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
return;
nfs_list_remove_request(req);
- NFS_I(inode)->ncommit--;
+ cinfo->mds->ncommit--;
}
EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
+static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
+ struct inode *inode)
+{
+ cinfo->lock = &inode->i_lock;
+ cinfo->mds = &NFS_I(inode)->commit_info;
+ cinfo->ds = pnfs_get_ds_info(inode);
+ cinfo->dreq = NULL;
+ cinfo->completion_ops = &nfs_commit_completion_ops;
+}
+
+void nfs_init_cinfo(struct nfs_commit_info *cinfo,
+ struct inode *inode,
+ struct nfs_direct_req *dreq)
+{
+ if (dreq)
+ nfs_init_cinfo_from_dreq(cinfo, dreq);
+ else
+ nfs_init_cinfo_from_inode(cinfo, inode);
+}
+EXPORT_SYMBOL_GPL(nfs_init_cinfo);
/*
* Add a request to the inode's commit list.
*/
-static void
-nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+void
+nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo)
{
- struct inode *inode = req->wb_context->dentry->d_inode;
-
- if (pnfs_mark_request_commit(req, lseg))
+ if (pnfs_mark_request_commit(req, lseg, cinfo))
return;
- nfs_request_add_commit_list(req, &NFS_I(inode)->commit_list);
+ nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
}
static void
@@ -494,11 +544,13 @@ nfs_clear_request_commit(struct nfs_page *req)
{
if (test_bit(PG_CLEAN, &req->wb_flags)) {
struct inode *inode = req->wb_context->dentry->d_inode;
+ struct nfs_commit_info cinfo;
- if (!pnfs_clear_request_commit(req)) {
- spin_lock(&inode->i_lock);
- nfs_request_remove_commit_list(req);
- spin_unlock(&inode->i_lock);
+ nfs_init_cinfo_from_inode(&cinfo, inode);
+ if (!pnfs_clear_request_commit(req, &cinfo)) {
+ spin_lock(cinfo.lock);
+ nfs_request_remove_commit_list(req, &cinfo);
+ spin_unlock(cinfo.lock);
}
nfs_clear_page_commit(req->wb_page);
}
@@ -508,28 +560,25 @@ static inline
int nfs_write_need_commit(struct nfs_write_data *data)
{
if (data->verf.committed == NFS_DATA_SYNC)
- return data->lseg == NULL;
- else
- return data->verf.committed != NFS_FILE_SYNC;
+ return data->header->lseg == NULL;
+ return data->verf.committed != NFS_FILE_SYNC;
}
-static inline
-int nfs_reschedule_unstable_write(struct nfs_page *req,
- struct nfs_write_data *data)
+#else
+static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
+ struct inode *inode)
{
- if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
- nfs_mark_request_commit(req, data->lseg);
- return 1;
- }
- if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
- nfs_mark_request_dirty(req);
- return 1;
- }
- return 0;
}
-#else
-static void
-nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+
+void nfs_init_cinfo(struct nfs_commit_info *cinfo,
+ struct inode *inode,
+ struct nfs_direct_req *dreq)
+{
+}
+
+void
+nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo)
{
}
@@ -544,25 +593,57 @@ int nfs_write_need_commit(struct nfs_write_data *data)
return 0;
}
-static inline
-int nfs_reschedule_unstable_write(struct nfs_page *req,
- struct nfs_write_data *data)
+#endif
+
+static void nfs_write_completion(struct nfs_pgio_header *hdr)
{
- return 0;
+ struct nfs_commit_info cinfo;
+ unsigned long bytes = 0;
+
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+ goto out;
+ nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
+ while (!list_empty(&hdr->pages)) {
+ struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+
+ bytes += req->wb_bytes;
+ nfs_list_remove_request(req);
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
+ (hdr->good_bytes < bytes)) {
+ nfs_set_pageerror(req->wb_page);
+ nfs_context_set_write_error(req->wb_context, hdr->error);
+ goto remove_req;
+ }
+ if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
+ nfs_mark_request_dirty(req);
+ goto next;
+ }
+ if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
+ nfs_mark_request_commit(req, hdr->lseg, &cinfo);
+ goto next;
+ }
+remove_req:
+ nfs_inode_remove_request(req);
+next:
+ nfs_unlock_request(req);
+ nfs_end_page_writeback(req->wb_page);
+ nfs_release_request(req);
+ }
+out:
+ hdr->release(hdr);
}
-#endif
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-static int
-nfs_need_commit(struct nfs_inode *nfsi)
+static unsigned long
+nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
{
- return nfsi->ncommit > 0;
+ return cinfo->mds->ncommit;
}
-/* i_lock held by caller */
-static int
-nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max,
- spinlock_t *lock)
+/* cinfo->lock held by caller */
+int
+nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
+ struct nfs_commit_info *cinfo, int max)
{
struct nfs_page *req, *tmp;
int ret = 0;
@@ -570,12 +651,13 @@ nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max,
list_for_each_entry_safe(req, tmp, src, wb_list) {
if (!nfs_lock_request(req))
continue;
- if (cond_resched_lock(lock))
+ kref_get(&req->wb_kref);
+ if (cond_resched_lock(cinfo->lock))
list_safe_reset_next(req, tmp, wb_list);
- nfs_request_remove_commit_list(req);
+ nfs_request_remove_commit_list(req, cinfo);
nfs_list_add_request(req, dst);
ret++;
- if (ret == max)
+ if ((ret == max) && !cinfo->dreq)
break;
}
return ret;
@@ -584,37 +666,38 @@ nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max,
/*
* nfs_scan_commit - Scan an inode for commit requests
* @inode: NFS inode to scan
- * @dst: destination list
+ * @dst: mds destination list
+ * @cinfo: mds and ds lists of reqs ready to commit
*
* Moves requests from the inode's 'commit' request list.
* The requests are *not* checked to ensure that they form a contiguous set.
*/
-static int
-nfs_scan_commit(struct inode *inode, struct list_head *dst)
+int
+nfs_scan_commit(struct inode *inode, struct list_head *dst,
+ struct nfs_commit_info *cinfo)
{
- struct nfs_inode *nfsi = NFS_I(inode);
int ret = 0;
- spin_lock(&inode->i_lock);
- if (nfsi->ncommit > 0) {
+ spin_lock(cinfo->lock);
+ if (cinfo->mds->ncommit > 0) {
const int max = INT_MAX;
- ret = nfs_scan_commit_list(&nfsi->commit_list, dst, max,
- &inode->i_lock);
- ret += pnfs_scan_commit_lists(inode, max - ret,
- &inode->i_lock);
+ ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
+ cinfo, max);
+ ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(cinfo->lock);
return ret;
}
#else
-static inline int nfs_need_commit(struct nfs_inode *nfsi)
+static unsigned long nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
{
return 0;
}
-static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst)
+int nfs_scan_commit(struct inode *inode, struct list_head *dst,
+ struct nfs_commit_info *cinfo)
{
return 0;
}
@@ -659,7 +742,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
|| end < req->wb_offset)
goto out_flushme;
- if (nfs_lock_request_dontget(req))
+ if (nfs_lock_request(req))
break;
/* The request is locked, so wait and then retry */
@@ -729,7 +812,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
nfs_grow_file(page, offset, count);
nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
nfs_mark_request_dirty(req);
- nfs_unlock_request(req);
+ nfs_unlock_and_release_request(req);
return 0;
}
@@ -766,10 +849,14 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
* the PageUptodate() flag. In this case, we will need to turn off
* write optimisations that depend on the page contents being correct.
*/
-static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
+static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
{
- return PageUptodate(page) &&
- !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
+ if (nfs_have_delegated_attributes(inode))
+ goto out;
+ if (NFS_I(inode)->cache_validity & NFS_INO_REVAL_PAGECACHE)
+ return false;
+out:
+ return PageUptodate(page) != 0;
}
/*
@@ -815,17 +902,6 @@ int nfs_updatepage(struct file *file, struct page *page,
return status;
}
-static void nfs_writepage_release(struct nfs_page *req,
- struct nfs_write_data *data)
-{
- struct page *page = req->wb_page;
-
- if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data))
- nfs_inode_remove_request(req);
- nfs_unlock_request(req);
- nfs_end_page_writeback(page);
-}
-
static int flush_task_priority(int how)
{
switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
@@ -837,18 +913,18 @@ static int flush_task_priority(int how)
return RPC_PRIORITY_NORMAL;
}
-int nfs_initiate_write(struct nfs_write_data *data,
- struct rpc_clnt *clnt,
+int nfs_initiate_write(struct rpc_clnt *clnt,
+ struct nfs_write_data *data,
const struct rpc_call_ops *call_ops,
- int how)
+ int how, int flags)
{
- struct inode *inode = data->inode;
+ struct inode *inode = data->header->inode;
int priority = flush_task_priority(how);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
- .rpc_cred = data->cred,
+ .rpc_cred = data->header->cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = clnt,
@@ -857,7 +933,7 @@ int nfs_initiate_write(struct nfs_write_data *data,
.callback_ops = call_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | flags,
.priority = priority,
};
int ret = 0;
@@ -892,26 +968,21 @@ EXPORT_SYMBOL_GPL(nfs_initiate_write);
/*
* Set up the argument/result storage required for the RPC call.
*/
-static void nfs_write_rpcsetup(struct nfs_page *req,
- struct nfs_write_data *data,
+static void nfs_write_rpcsetup(struct nfs_write_data *data,
unsigned int count, unsigned int offset,
- int how)
+ int how, struct nfs_commit_info *cinfo)
{
- struct inode *inode = req->wb_context->dentry->d_inode;
+ struct nfs_page *req = data->header->req;
/* Set up the RPC argument and reply structs
* NB: take care not to mess about with data->commit et al. */
- data->req = req;
- data->inode = inode = req->wb_context->dentry->d_inode;
- data->cred = req->wb_context->cred;
-
- data->args.fh = NFS_FH(inode);
+ data->args.fh = NFS_FH(data->header->inode);
data->args.offset = req_offset(req) + offset;
/* pnfs_set_layoutcommit needs this */
data->mds_offset = data->args.offset;
data->args.pgbase = req->wb_pgbase + offset;
- data->args.pages = data->pagevec;
+ data->args.pages = data->pages.pagevec;
data->args.count = count;
data->args.context = get_nfs_open_context(req->wb_context);
data->args.lock_context = req->wb_lock_context;
@@ -920,7 +991,7 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
case 0:
break;
case FLUSH_COND_STABLE:
- if (nfs_need_commit(NFS_I(inode)))
+ if (nfs_reqs_to_commit(cinfo))
break;
default:
data->args.stable = NFS_FILE_SYNC;
@@ -936,9 +1007,9 @@ static int nfs_do_write(struct nfs_write_data *data,
const struct rpc_call_ops *call_ops,
int how)
{
- struct inode *inode = data->args.context->dentry->d_inode;
+ struct inode *inode = data->header->inode;
- return nfs_initiate_write(data, NFS_CLIENT(inode), call_ops, how);
+ return nfs_initiate_write(NFS_CLIENT(inode), data, call_ops, how, 0);
}
static int nfs_do_multiple_writes(struct list_head *head,
@@ -951,7 +1022,7 @@ static int nfs_do_multiple_writes(struct list_head *head,
while (!list_empty(head)) {
int ret2;
- data = list_entry(head->next, struct nfs_write_data, list);
+ data = list_first_entry(head, struct nfs_write_data, list);
list_del_init(&data->list);
ret2 = nfs_do_write(data, call_ops, how);
@@ -967,31 +1038,60 @@ static int nfs_do_multiple_writes(struct list_head *head,
*/
static void nfs_redirty_request(struct nfs_page *req)
{
- struct page *page = req->wb_page;
-
nfs_mark_request_dirty(req);
nfs_unlock_request(req);
- nfs_end_page_writeback(page);
+ nfs_end_page_writeback(req->wb_page);
+ nfs_release_request(req);
+}
+
+static void nfs_async_write_error(struct list_head *head)
+{
+ struct nfs_page *req;
+
+ while (!list_empty(head)) {
+ req = nfs_list_entry(head->next);
+ nfs_list_remove_request(req);
+ nfs_redirty_request(req);
+ }
+}
+
+static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
+ .error_cleanup = nfs_async_write_error,
+ .completion = nfs_write_completion,
+};
+
+static void nfs_flush_error(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
+{
+ set_bit(NFS_IOHDR_REDO, &hdr->flags);
+ while (!list_empty(&hdr->rpc_list)) {
+ struct nfs_write_data *data = list_first_entry(&hdr->rpc_list,
+ struct nfs_write_data, list);
+ list_del(&data->list);
+ nfs_writedata_release(data);
+ }
+ desc->pg_completion_ops->error_cleanup(&desc->pg_list);
}
/*
* Generate multiple small requests to write out a single
* contiguous dirty area on one page.
*/
-static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
+static int nfs_flush_multi(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
{
- struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
+ struct nfs_page *req = hdr->req;
struct page *page = req->wb_page;
struct nfs_write_data *data;
size_t wsize = desc->pg_bsize, nbytes;
unsigned int offset;
int requests = 0;
- int ret = 0;
+ struct nfs_commit_info cinfo;
- nfs_list_remove_request(req);
+ nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
- (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit ||
+ (desc->pg_moreio || nfs_reqs_to_commit(&cinfo) ||
desc->pg_count > wsize))
desc->pg_ioflags &= ~FLUSH_COND_STABLE;
@@ -1001,28 +1101,22 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head
do {
size_t len = min(nbytes, wsize);
- data = nfs_writedata_alloc(1);
- if (!data)
- goto out_bad;
- data->pagevec[0] = page;
- nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags);
- list_add(&data->list, res);
+ data = nfs_writedata_alloc(hdr, 1);
+ if (!data) {
+ nfs_flush_error(desc, hdr);
+ return -ENOMEM;
+ }
+ data->pages.pagevec[0] = page;
+ nfs_write_rpcsetup(data, len, offset, desc->pg_ioflags, &cinfo);
+ list_add(&data->list, &hdr->rpc_list);
requests++;
nbytes -= len;
offset += len;
} while (nbytes != 0);
- atomic_set(&req->wb_complete, requests);
- desc->pg_rpc_callops = &nfs_write_partial_ops;
- return ret;
-
-out_bad:
- while (!list_empty(res)) {
- data = list_entry(res->next, struct nfs_write_data, list);
- list_del(&data->list);
- nfs_writedata_release(data);
- }
- nfs_redirty_request(req);
- return -ENOMEM;
+ nfs_list_remove_request(req);
+ nfs_list_add_request(req, &hdr->pages);
+ desc->pg_rpc_callops = &nfs_write_common_ops;
+ return 0;
}
/*
@@ -1033,62 +1127,71 @@ out_bad:
* This is the case if nfs_updatepage detects a conflicting request
* that has been written but not committed.
*/
-static int nfs_flush_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
+static int nfs_flush_one(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
{
struct nfs_page *req;
struct page **pages;
struct nfs_write_data *data;
struct list_head *head = &desc->pg_list;
- int ret = 0;
+ struct nfs_commit_info cinfo;
- data = nfs_writedata_alloc(nfs_page_array_len(desc->pg_base,
- desc->pg_count));
+ data = nfs_writedata_alloc(hdr, nfs_page_array_len(desc->pg_base,
+ desc->pg_count));
if (!data) {
- while (!list_empty(head)) {
- req = nfs_list_entry(head->next);
- nfs_list_remove_request(req);
- nfs_redirty_request(req);
- }
- ret = -ENOMEM;
- goto out;
+ nfs_flush_error(desc, hdr);
+ return -ENOMEM;
}
- pages = data->pagevec;
+
+ nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
+ pages = data->pages.pagevec;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- nfs_list_add_request(req, &data->pages);
+ nfs_list_add_request(req, &hdr->pages);
*pages++ = req->wb_page;
}
- req = nfs_list_entry(data->pages.next);
if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
- (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
+ (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
desc->pg_ioflags &= ~FLUSH_COND_STABLE;
/* Set up the argument struct */
- nfs_write_rpcsetup(req, data, desc->pg_count, 0, desc->pg_ioflags);
- list_add(&data->list, res);
- desc->pg_rpc_callops = &nfs_write_full_ops;
-out:
- return ret;
+ nfs_write_rpcsetup(data, desc->pg_count, 0, desc->pg_ioflags, &cinfo);
+ list_add(&data->list, &hdr->rpc_list);
+ desc->pg_rpc_callops = &nfs_write_common_ops;
+ return 0;
}
-int nfs_generic_flush(struct nfs_pageio_descriptor *desc, struct list_head *head)
+int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
{
if (desc->pg_bsize < PAGE_CACHE_SIZE)
- return nfs_flush_multi(desc, head);
- return nfs_flush_one(desc, head);
+ return nfs_flush_multi(desc, hdr);
+ return nfs_flush_one(desc, hdr);
}
static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
{
- LIST_HEAD(head);
+ struct nfs_write_header *whdr;
+ struct nfs_pgio_header *hdr;
int ret;
- ret = nfs_generic_flush(desc, &head);
+ whdr = nfs_writehdr_alloc();
+ if (!whdr) {
+ desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+ return -ENOMEM;
+ }
+ hdr = &whdr->header;
+ nfs_pgheader_init(desc, hdr, nfs_writehdr_free);
+ atomic_inc(&hdr->refcnt);
+ ret = nfs_generic_flush(desc, hdr);
if (ret == 0)
- ret = nfs_do_multiple_writes(&head, desc->pg_rpc_callops,
- desc->pg_ioflags);
+ ret = nfs_do_multiple_writes(&hdr->rpc_list,
+ desc->pg_rpc_callops,
+ desc->pg_ioflags);
+ if (atomic_dec_and_test(&hdr->refcnt))
+ hdr->completion_ops->completion(hdr);
return ret;
}
@@ -1098,9 +1201,10 @@ static const struct nfs_pageio_ops nfs_pageio_write_ops = {
};
void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
- struct inode *inode, int ioflags)
+ struct inode *inode, int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
- nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops,
+ nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops, compl_ops,
NFS_SERVER(inode)->wsize, ioflags);
}
@@ -1111,80 +1215,27 @@ void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
-static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
- struct inode *inode, int ioflags)
+void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode, int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
- if (!pnfs_pageio_init_write(pgio, inode, ioflags))
- nfs_pageio_init_write_mds(pgio, inode, ioflags);
+ if (!pnfs_pageio_init_write(pgio, inode, ioflags, compl_ops))
+ nfs_pageio_init_write_mds(pgio, inode, ioflags, compl_ops);
}
-/*
- * Handle a write reply that flushed part of a page.
- */
-static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
+void nfs_write_prepare(struct rpc_task *task, void *calldata)
{
- struct nfs_write_data *data = calldata;
-
- dprintk("NFS: %5u write(%s/%lld %d@%lld)",
- task->tk_pid,
- data->req->wb_context->dentry->d_inode->i_sb->s_id,
- (long long)
- NFS_FILEID(data->req->wb_context->dentry->d_inode),
- data->req->wb_bytes, (long long)req_offset(data->req));
-
- nfs_writeback_done(task, data);
+ struct nfs_write_data *data = calldata;
+ NFS_PROTO(data->header->inode)->write_rpc_prepare(task, data);
}
-static void nfs_writeback_release_partial(void *calldata)
+void nfs_commit_prepare(struct rpc_task *task, void *calldata)
{
- struct nfs_write_data *data = calldata;
- struct nfs_page *req = data->req;
- struct page *page = req->wb_page;
- int status = data->task.tk_status;
+ struct nfs_commit_data *data = calldata;
- if (status < 0) {
- nfs_set_pageerror(page);
- nfs_context_set_write_error(req->wb_context, status);
- dprintk(", error = %d\n", status);
- goto out;
- }
-
- if (nfs_write_need_commit(data)) {
- struct inode *inode = page->mapping->host;
-
- spin_lock(&inode->i_lock);
- if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
- /* Do nothing we need to resend the writes */
- } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
- memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
- dprintk(" defer commit\n");
- } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
- set_bit(PG_NEED_RESCHED, &req->wb_flags);
- clear_bit(PG_NEED_COMMIT, &req->wb_flags);
- dprintk(" server reboot detected\n");
- }
- spin_unlock(&inode->i_lock);
- } else
- dprintk(" OK\n");
-
-out:
- if (atomic_dec_and_test(&req->wb_complete))
- nfs_writepage_release(req, data);
- nfs_writedata_release(calldata);
+ NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
}
-void nfs_write_prepare(struct rpc_task *task, void *calldata)
-{
- struct nfs_write_data *data = calldata;
- NFS_PROTO(data->inode)->write_rpc_prepare(task, data);
-}
-
-static const struct rpc_call_ops nfs_write_partial_ops = {
- .rpc_call_prepare = nfs_write_prepare,
- .rpc_call_done = nfs_writeback_done_partial,
- .rpc_release = nfs_writeback_release_partial,
-};
-
/*
* Handle a write reply that flushes a whole page.
*
@@ -1192,59 +1243,37 @@ static const struct rpc_call_ops nfs_write_partial_ops = {
* writebacks since the page->count is kept > 1 for as long
* as the page has a write request pending.
*/
-static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
+static void nfs_writeback_done_common(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
nfs_writeback_done(task, data);
}
-static void nfs_writeback_release_full(void *calldata)
+static void nfs_writeback_release_common(void *calldata)
{
struct nfs_write_data *data = calldata;
+ struct nfs_pgio_header *hdr = data->header;
int status = data->task.tk_status;
+ struct nfs_page *req = hdr->req;
- /* Update attributes as result of writeback. */
- while (!list_empty(&data->pages)) {
- struct nfs_page *req = nfs_list_entry(data->pages.next);
- struct page *page = req->wb_page;
-
- nfs_list_remove_request(req);
-
- dprintk("NFS: %5u write (%s/%lld %d@%lld)",
- data->task.tk_pid,
- req->wb_context->dentry->d_inode->i_sb->s_id,
- (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
- req->wb_bytes,
- (long long)req_offset(req));
-
- if (status < 0) {
- nfs_set_pageerror(page);
- nfs_context_set_write_error(req->wb_context, status);
- dprintk(", error = %d\n", status);
- goto remove_request;
- }
-
- if (nfs_write_need_commit(data)) {
+ if ((status >= 0) && nfs_write_need_commit(data)) {
+ spin_lock(&hdr->lock);
+ if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags))
+ ; /* Do nothing */
+ else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags))
memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
- nfs_mark_request_commit(req, data->lseg);
- dprintk(" marked for commit\n");
- goto next;
- }
- dprintk(" OK\n");
-remove_request:
- nfs_inode_remove_request(req);
- next:
- nfs_unlock_request(req);
- nfs_end_page_writeback(page);
+ else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf)))
+ set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags);
+ spin_unlock(&hdr->lock);
}
- nfs_writedata_release(calldata);
+ nfs_writedata_release(data);
}
-static const struct rpc_call_ops nfs_write_full_ops = {
+static const struct rpc_call_ops nfs_write_common_ops = {
.rpc_call_prepare = nfs_write_prepare,
- .rpc_call_done = nfs_writeback_done_full,
- .rpc_release = nfs_writeback_release_full,
+ .rpc_call_done = nfs_writeback_done_common,
+ .rpc_release = nfs_writeback_release_common,
};
@@ -1255,6 +1284,7 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
{
struct nfs_writeargs *argp = &data->args;
struct nfs_writeres *resp = &data->res;
+ struct inode *inode = data->header->inode;
int status;
dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
@@ -1267,10 +1297,10 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
* another writer had changed the file, but some applications
* depend on tighter cache coherency when writing.
*/
- status = NFS_PROTO(data->inode)->write_done(task, data);
+ status = NFS_PROTO(inode)->write_done(task, data);
if (status != 0)
return;
- nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
+ nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
@@ -1288,46 +1318,47 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
if (time_before(complain, jiffies)) {
dprintk("NFS: faulty NFS server %s:"
" (committed = %d) != (stable = %d)\n",
- NFS_SERVER(data->inode)->nfs_client->cl_hostname,
+ NFS_SERVER(inode)->nfs_client->cl_hostname,
resp->verf->committed, argp->stable);
complain = jiffies + 300 * HZ;
}
}
#endif
- /* Is this a short write? */
- if (task->tk_status >= 0 && resp->count < argp->count) {
+ if (task->tk_status < 0)
+ nfs_set_pgio_error(data->header, task->tk_status, argp->offset);
+ else if (resp->count < argp->count) {
static unsigned long complain;
- nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
+ /* This a short write! */
+ nfs_inc_stats(inode, NFSIOS_SHORTWRITE);
/* Has the server at least made some progress? */
- if (resp->count != 0) {
- /* Was this an NFSv2 write or an NFSv3 stable write? */
- if (resp->verf->committed != NFS_UNSTABLE) {
- /* Resend from where the server left off */
- data->mds_offset += resp->count;
- argp->offset += resp->count;
- argp->pgbase += resp->count;
- argp->count -= resp->count;
- } else {
- /* Resend as a stable write in order to avoid
- * headaches in the case of a server crash.
- */
- argp->stable = NFS_FILE_SYNC;
+ if (resp->count == 0) {
+ if (time_before(complain, jiffies)) {
+ printk(KERN_WARNING
+ "NFS: Server wrote zero bytes, expected %u.\n",
+ argp->count);
+ complain = jiffies + 300 * HZ;
}
- rpc_restart_call_prepare(task);
+ nfs_set_pgio_error(data->header, -EIO, argp->offset);
+ task->tk_status = -EIO;
return;
}
- if (time_before(complain, jiffies)) {
- printk(KERN_WARNING
- "NFS: Server wrote zero bytes, expected %u.\n",
- argp->count);
- complain = jiffies + 300 * HZ;
+ /* Was this an NFSv2 write or an NFSv3 stable write? */
+ if (resp->verf->committed != NFS_UNSTABLE) {
+ /* Resend from where the server left off */
+ data->mds_offset += resp->count;
+ argp->offset += resp->count;
+ argp->pgbase += resp->count;
+ argp->count -= resp->count;
+ } else {
+ /* Resend as a stable write in order to avoid
+ * headaches in the case of a server crash.
+ */
+ argp->stable = NFS_FILE_SYNC;
}
- /* Can't do anything about it except throw an error. */
- task->tk_status = -EIO;
+ rpc_restart_call_prepare(task);
}
- return;
}
@@ -1347,26 +1378,23 @@ static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
return (ret < 0) ? ret : 1;
}
-void nfs_commit_clear_lock(struct nfs_inode *nfsi)
+static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
{
clear_bit(NFS_INO_COMMIT, &nfsi->flags);
smp_mb__after_clear_bit();
wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
}
-EXPORT_SYMBOL_GPL(nfs_commit_clear_lock);
-void nfs_commitdata_release(void *data)
+void nfs_commitdata_release(struct nfs_commit_data *data)
{
- struct nfs_write_data *wdata = data;
-
- put_nfs_open_context(wdata->args.context);
- nfs_commit_free(wdata);
+ put_nfs_open_context(data->context);
+ nfs_commit_free(data);
}
EXPORT_SYMBOL_GPL(nfs_commitdata_release);
-int nfs_initiate_commit(struct nfs_write_data *data, struct rpc_clnt *clnt,
+int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
const struct rpc_call_ops *call_ops,
- int how)
+ int how, int flags)
{
struct rpc_task *task;
int priority = flush_task_priority(how);
@@ -1382,7 +1410,7 @@ int nfs_initiate_commit(struct nfs_write_data *data, struct rpc_clnt *clnt,
.callback_ops = call_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | flags,
.priority = priority,
};
/* Set up the initial task struct. */
@@ -1403,9 +1431,10 @@ EXPORT_SYMBOL_GPL(nfs_initiate_commit);
/*
* Set up the argument/result storage required for the RPC call.
*/
-void nfs_init_commit(struct nfs_write_data *data,
- struct list_head *head,
- struct pnfs_layout_segment *lseg)
+void nfs_init_commit(struct nfs_commit_data *data,
+ struct list_head *head,
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo)
{
struct nfs_page *first = nfs_list_entry(head->next);
struct inode *inode = first->wb_context->dentry->d_inode;
@@ -1419,13 +1448,14 @@ void nfs_init_commit(struct nfs_write_data *data,
data->cred = first->wb_context->cred;
data->lseg = lseg; /* reference transferred */
data->mds_ops = &nfs_commit_ops;
+ data->completion_ops = cinfo->completion_ops;
+ data->dreq = cinfo->dreq;
data->args.fh = NFS_FH(data->inode);
/* Note: we always request a commit of the entire inode */
data->args.offset = 0;
data->args.count = 0;
- data->args.context = get_nfs_open_context(first->wb_context);
- data->res.count = 0;
+ data->context = get_nfs_open_context(first->wb_context);
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
nfs_fattr_init(&data->fattr);
@@ -1433,18 +1463,21 @@ void nfs_init_commit(struct nfs_write_data *data,
EXPORT_SYMBOL_GPL(nfs_init_commit);
void nfs_retry_commit(struct list_head *page_list,
- struct pnfs_layout_segment *lseg)
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo)
{
struct nfs_page *req;
while (!list_empty(page_list)) {
req = nfs_list_entry(page_list->next);
nfs_list_remove_request(req);
- nfs_mark_request_commit(req, lseg);
- dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
- dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
- BDI_RECLAIMABLE);
- nfs_unlock_request(req);
+ nfs_mark_request_commit(req, lseg, cinfo);
+ if (!cinfo->dreq) {
+ dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
+ dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
+ BDI_RECLAIMABLE);
+ }
+ nfs_unlock_and_release_request(req);
}
}
EXPORT_SYMBOL_GPL(nfs_retry_commit);
@@ -1453,9 +1486,10 @@ EXPORT_SYMBOL_GPL(nfs_retry_commit);
* Commit dirty pages
*/
static int
-nfs_commit_list(struct inode *inode, struct list_head *head, int how)
+nfs_commit_list(struct inode *inode, struct list_head *head, int how,
+ struct nfs_commit_info *cinfo)
{
- struct nfs_write_data *data;
+ struct nfs_commit_data *data;
data = nfs_commitdata_alloc();
@@ -1463,11 +1497,13 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
goto out_bad;
/* Set up the argument struct */
- nfs_init_commit(data, head, NULL);
- return nfs_initiate_commit(data, NFS_CLIENT(inode), data->mds_ops, how);
+ nfs_init_commit(data, head, NULL, cinfo);
+ atomic_inc(&cinfo->mds->rpcs_out);
+ return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops,
+ how, 0);
out_bad:
- nfs_retry_commit(head, NULL);
- nfs_commit_clear_lock(NFS_I(inode));
+ nfs_retry_commit(head, NULL, cinfo);
+ cinfo->completion_ops->error_cleanup(NFS_I(inode));
return -ENOMEM;
}
@@ -1476,7 +1512,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
*/
static void nfs_commit_done(struct rpc_task *task, void *calldata)
{
- struct nfs_write_data *data = calldata;
+ struct nfs_commit_data *data = calldata;
dprintk("NFS: %5u nfs_commit_done (status %d)\n",
task->tk_pid, task->tk_status);
@@ -1485,10 +1521,11 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
NFS_PROTO(data->inode)->commit_done(task, data);
}
-void nfs_commit_release_pages(struct nfs_write_data *data)
+static void nfs_commit_release_pages(struct nfs_commit_data *data)
{
struct nfs_page *req;
int status = data->task.tk_status;
+ struct nfs_commit_info cinfo;
while (!list_empty(&data->pages)) {
req = nfs_list_entry(data->pages.next);
@@ -1519,42 +1556,59 @@ void nfs_commit_release_pages(struct nfs_write_data *data)
dprintk(" mismatch\n");
nfs_mark_request_dirty(req);
next:
- nfs_unlock_request(req);
+ nfs_unlock_and_release_request(req);
}
+ nfs_init_cinfo(&cinfo, data->inode, data->dreq);
+ if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
+ nfs_commit_clear_lock(NFS_I(data->inode));
}
-EXPORT_SYMBOL_GPL(nfs_commit_release_pages);
static void nfs_commit_release(void *calldata)
{
- struct nfs_write_data *data = calldata;
+ struct nfs_commit_data *data = calldata;
- nfs_commit_release_pages(data);
- nfs_commit_clear_lock(NFS_I(data->inode));
+ data->completion_ops->completion(data);
nfs_commitdata_release(calldata);
}
static const struct rpc_call_ops nfs_commit_ops = {
- .rpc_call_prepare = nfs_write_prepare,
+ .rpc_call_prepare = nfs_commit_prepare,
.rpc_call_done = nfs_commit_done,
.rpc_release = nfs_commit_release,
};
+static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
+ .completion = nfs_commit_release_pages,
+ .error_cleanup = nfs_commit_clear_lock,
+};
+
+int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
+ int how, struct nfs_commit_info *cinfo)
+{
+ int status;
+
+ status = pnfs_commit_list(inode, head, how, cinfo);
+ if (status == PNFS_NOT_ATTEMPTED)
+ status = nfs_commit_list(inode, head, how, cinfo);
+ return status;
+}
+
int nfs_commit_inode(struct inode *inode, int how)
{
LIST_HEAD(head);
+ struct nfs_commit_info cinfo;
int may_wait = how & FLUSH_SYNC;
int res;
res = nfs_commit_set_lock(NFS_I(inode), may_wait);
if (res <= 0)
goto out_mark_dirty;
- res = nfs_scan_commit(inode, &head);
+ nfs_init_cinfo_from_inode(&cinfo, inode);
+ res = nfs_scan_commit(inode, &head, &cinfo);
if (res) {
int error;
- error = pnfs_commit_list(inode, &head, how);
- if (error == PNFS_NOT_ATTEMPTED)
- error = nfs_commit_list(inode, &head, how);
+ error = nfs_generic_commit_list(inode, &head, how, &cinfo);
if (error < 0)
return error;
if (!may_wait)
@@ -1585,14 +1639,14 @@ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_contr
int ret = 0;
/* no commits means nothing needs to be done */
- if (!nfsi->ncommit)
+ if (!nfsi->commit_info.ncommit)
return ret;
if (wbc->sync_mode == WB_SYNC_NONE) {
/* Don't commit yet if this is a non-blocking flush and there
* are a lot of outstanding writes for this mapping.
*/
- if (nfsi->ncommit <= (nfsi->npages >> 1))
+ if (nfsi->commit_info.ncommit <= (nfsi->npages >> 1))
goto out_mark_dirty;
/* don't wait for the COMMIT response */
@@ -1665,7 +1719,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
req = nfs_page_find_request(page);
if (req == NULL)
break;
- if (nfs_lock_request_dontget(req)) {
+ if (nfs_lock_request(req)) {
nfs_clear_request_commit(req);
nfs_inode_remove_request(req);
/*
@@ -1673,7 +1727,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
* page as being dirty
*/
cancel_dirty_page(page, PAGE_CACHE_SIZE);
- nfs_unlock_request(req);
+ nfs_unlock_and_release_request(req);
break;
}
ret = nfs_wait_on_request(req);
@@ -1742,7 +1796,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
int __init nfs_init_writepagecache(void)
{
nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
- sizeof(struct nfs_write_data),
+ sizeof(struct nfs_write_header),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (nfs_wdata_cachep == NULL)
@@ -1753,6 +1807,13 @@ int __init nfs_init_writepagecache(void)
if (nfs_wdata_mempool == NULL)
return -ENOMEM;
+ nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
+ sizeof(struct nfs_commit_data),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (nfs_cdata_cachep == NULL)
+ return -ENOMEM;
+
nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
nfs_wdata_cachep);
if (nfs_commit_mempool == NULL)
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 8e9689abbc0c..dcb52b884519 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -15,11 +15,13 @@
#include <linux/namei.h>
#include <linux/module.h>
#include <linux/exportfs.h>
+#include <linux/sunrpc/svc_xprt.h>
#include <net/ipv6.h>
#include "nfsd.h"
#include "nfsfh.h"
+#include "netns.h"
#define NFSDDBG_FACILITY NFSDDBG_EXPORT
@@ -38,7 +40,6 @@ typedef struct svc_export svc_export;
#define EXPKEY_HASHBITS 8
#define EXPKEY_HASHMAX (1 << EXPKEY_HASHBITS)
#define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1)
-static struct cache_head *expkey_table[EXPKEY_HASHMAX];
static void expkey_put(struct kref *ref)
{
@@ -71,9 +72,9 @@ static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
return sunrpc_cache_pipe_upcall(cd, h, expkey_request);
}
-static struct svc_expkey *svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old);
-static struct svc_expkey *svc_expkey_lookup(struct svc_expkey *);
-static struct cache_detail svc_expkey_cache;
+static struct svc_expkey *svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new,
+ struct svc_expkey *old);
+static struct svc_expkey *svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *);
static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
{
@@ -131,7 +132,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
key.ek_fsidtype = fsidtype;
memcpy(key.ek_fsid, buf, len);
- ek = svc_expkey_lookup(&key);
+ ek = svc_expkey_lookup(cd, &key);
err = -ENOMEM;
if (!ek)
goto out;
@@ -145,7 +146,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
err = 0;
if (len == 0) {
set_bit(CACHE_NEGATIVE, &key.h.flags);
- ek = svc_expkey_update(&key, ek);
+ ek = svc_expkey_update(cd, &key, ek);
if (!ek)
err = -ENOMEM;
} else {
@@ -155,7 +156,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
dprintk("Found the path %s\n", buf);
- ek = svc_expkey_update(&key, ek);
+ ek = svc_expkey_update(cd, &key, ek);
if (!ek)
err = -ENOMEM;
path_put(&key.ek_path);
@@ -163,7 +164,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
cache_flush();
out:
if (ek)
- cache_put(&ek->h, &svc_expkey_cache);
+ cache_put(&ek->h, cd);
if (dom)
auth_domain_put(dom);
kfree(buf);
@@ -239,10 +240,9 @@ static struct cache_head *expkey_alloc(void)
return NULL;
}
-static struct cache_detail svc_expkey_cache = {
+static struct cache_detail svc_expkey_cache_template = {
.owner = THIS_MODULE,
.hash_size = EXPKEY_HASHMAX,
- .hash_table = expkey_table,
.name = "nfsd.fh",
.cache_put = expkey_put,
.cache_upcall = expkey_upcall,
@@ -268,13 +268,12 @@ svc_expkey_hash(struct svc_expkey *item)
}
static struct svc_expkey *
-svc_expkey_lookup(struct svc_expkey *item)
+svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *item)
{
struct cache_head *ch;
int hash = svc_expkey_hash(item);
- ch = sunrpc_cache_lookup(&svc_expkey_cache, &item->h,
- hash);
+ ch = sunrpc_cache_lookup(cd, &item->h, hash);
if (ch)
return container_of(ch, struct svc_expkey, h);
else
@@ -282,13 +281,13 @@ svc_expkey_lookup(struct svc_expkey *item)
}
static struct svc_expkey *
-svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old)
+svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new,
+ struct svc_expkey *old)
{
struct cache_head *ch;
int hash = svc_expkey_hash(new);
- ch = sunrpc_cache_update(&svc_expkey_cache, &new->h,
- &old->h, hash);
+ ch = sunrpc_cache_update(cd, &new->h, &old->h, hash);
if (ch)
return container_of(ch, struct svc_expkey, h);
else
@@ -299,8 +298,6 @@ svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old)
#define EXPORT_HASHBITS 8
#define EXPORT_HASHMAX (1<< EXPORT_HASHBITS)
-static struct cache_head *export_table[EXPORT_HASHMAX];
-
static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc)
{
int i;
@@ -525,6 +522,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
goto out1;
exp.ex_client = dom;
+ exp.cd = cd;
/* expiry */
err = -EINVAL;
@@ -672,6 +670,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
new->ex_fslocs.locations = NULL;
new->ex_fslocs.locations_count = 0;
new->ex_fslocs.migrated = 0;
+ new->cd = item->cd;
}
static void export_update(struct cache_head *cnew, struct cache_head *citem)
@@ -707,10 +706,9 @@ static struct cache_head *svc_export_alloc(void)
return NULL;
}
-struct cache_detail svc_export_cache = {
+struct cache_detail svc_export_cache_template = {
.owner = THIS_MODULE,
.hash_size = EXPORT_HASHMAX,
- .hash_table = export_table,
.name = "nfsd.export",
.cache_put = svc_export_put,
.cache_upcall = svc_export_upcall,
@@ -739,8 +737,7 @@ svc_export_lookup(struct svc_export *exp)
struct cache_head *ch;
int hash = svc_export_hash(exp);
- ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h,
- hash);
+ ch = sunrpc_cache_lookup(exp->cd, &exp->h, hash);
if (ch)
return container_of(ch, struct svc_export, h);
else
@@ -753,9 +750,7 @@ svc_export_update(struct svc_export *new, struct svc_export *old)
struct cache_head *ch;
int hash = svc_export_hash(old);
- ch = sunrpc_cache_update(&svc_export_cache, &new->h,
- &old->h,
- hash);
+ ch = sunrpc_cache_update(old->cd, &new->h, &old->h, hash);
if (ch)
return container_of(ch, struct svc_export, h);
else
@@ -764,7 +759,8 @@ svc_export_update(struct svc_export *new, struct svc_export *old)
static struct svc_expkey *
-exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp)
+exp_find_key(struct cache_detail *cd, svc_client *clp, int fsid_type,
+ u32 *fsidv, struct cache_req *reqp)
{
struct svc_expkey key, *ek;
int err;
@@ -776,18 +772,18 @@ exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp)
key.ek_fsidtype = fsid_type;
memcpy(key.ek_fsid, fsidv, key_len(fsid_type));
- ek = svc_expkey_lookup(&key);
+ ek = svc_expkey_lookup(cd, &key);
if (ek == NULL)
return ERR_PTR(-ENOMEM);
- err = cache_check(&svc_expkey_cache, &ek->h, reqp);
+ err = cache_check(cd, &ek->h, reqp);
if (err)
return ERR_PTR(err);
return ek;
}
-static svc_export *exp_get_by_name(svc_client *clp, const struct path *path,
- struct cache_req *reqp)
+static svc_export *exp_get_by_name(struct cache_detail *cd, svc_client *clp,
+ const struct path *path, struct cache_req *reqp)
{
struct svc_export *exp, key;
int err;
@@ -797,11 +793,12 @@ static svc_export *exp_get_by_name(svc_client *clp, const struct path *path,
key.ex_client = clp;
key.ex_path = *path;
+ key.cd = cd;
exp = svc_export_lookup(&key);
if (exp == NULL)
return ERR_PTR(-ENOMEM);
- err = cache_check(&svc_export_cache, &exp->h, reqp);
+ err = cache_check(cd, &exp->h, reqp);
if (err)
return ERR_PTR(err);
return exp;
@@ -810,16 +807,17 @@ static svc_export *exp_get_by_name(svc_client *clp, const struct path *path,
/*
* Find the export entry for a given dentry.
*/
-static struct svc_export *exp_parent(svc_client *clp, struct path *path)
+static struct svc_export *exp_parent(struct cache_detail *cd, svc_client *clp,
+ struct path *path)
{
struct dentry *saved = dget(path->dentry);
- svc_export *exp = exp_get_by_name(clp, path, NULL);
+ svc_export *exp = exp_get_by_name(cd, clp, path, NULL);
while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(path->dentry)) {
struct dentry *parent = dget_parent(path->dentry);
dput(path->dentry);
path->dentry = parent;
- exp = exp_get_by_name(clp, path, NULL);
+ exp = exp_get_by_name(cd, clp, path, NULL);
}
dput(path->dentry);
path->dentry = saved;
@@ -834,13 +832,16 @@ static struct svc_export *exp_parent(svc_client *clp, struct path *path)
* since its harder to fool a kernel module than a user space program.
*/
int
-exp_rootfh(svc_client *clp, char *name, struct knfsd_fh *f, int maxsize)
+exp_rootfh(struct net *net, svc_client *clp, char *name,
+ struct knfsd_fh *f, int maxsize)
{
struct svc_export *exp;
struct path path;
struct inode *inode;
struct svc_fh fh;
int err;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct cache_detail *cd = nn->svc_export_cache;
err = -EPERM;
/* NB: we probably ought to check that it's NUL-terminated */
@@ -853,7 +854,7 @@ exp_rootfh(svc_client *clp, char *name, struct knfsd_fh *f, int maxsize)
dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n",
name, path.dentry, clp->name,
inode->i_sb->s_id, inode->i_ino);
- exp = exp_parent(clp, &path);
+ exp = exp_parent(cd, clp, &path);
if (IS_ERR(exp)) {
err = PTR_ERR(exp);
goto out;
@@ -875,16 +876,18 @@ out:
return err;
}
-static struct svc_export *exp_find(struct auth_domain *clp, int fsid_type,
+static struct svc_export *exp_find(struct cache_detail *cd,
+ struct auth_domain *clp, int fsid_type,
u32 *fsidv, struct cache_req *reqp)
{
struct svc_export *exp;
- struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp);
+ struct nfsd_net *nn = net_generic(cd->net, nfsd_net_id);
+ struct svc_expkey *ek = exp_find_key(nn->svc_expkey_cache, clp, fsid_type, fsidv, reqp);
if (IS_ERR(ek))
return ERR_CAST(ek);
- exp = exp_get_by_name(clp, &ek->ek_path, reqp);
- cache_put(&ek->h, &svc_expkey_cache);
+ exp = exp_get_by_name(cd, clp, &ek->ek_path, reqp);
+ cache_put(&ek->h, nn->svc_expkey_cache);
if (IS_ERR(exp))
return ERR_CAST(exp);
@@ -926,12 +929,14 @@ struct svc_export *
rqst_exp_get_by_name(struct svc_rqst *rqstp, struct path *path)
{
struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
+ struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
+ struct cache_detail *cd = nn->svc_export_cache;
if (rqstp->rq_client == NULL)
goto gss;
/* First try the auth_unix client: */
- exp = exp_get_by_name(rqstp->rq_client, path, &rqstp->rq_chandle);
+ exp = exp_get_by_name(cd, rqstp->rq_client, path, &rqstp->rq_chandle);
if (PTR_ERR(exp) == -ENOENT)
goto gss;
if (IS_ERR(exp))
@@ -943,7 +948,7 @@ gss:
/* Otherwise, try falling back on gss client */
if (rqstp->rq_gssclient == NULL)
return exp;
- gssexp = exp_get_by_name(rqstp->rq_gssclient, path, &rqstp->rq_chandle);
+ gssexp = exp_get_by_name(cd, rqstp->rq_gssclient, path, &rqstp->rq_chandle);
if (PTR_ERR(gssexp) == -ENOENT)
return exp;
if (!IS_ERR(exp))
@@ -955,12 +960,15 @@ struct svc_export *
rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
{
struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
+ struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
+ struct cache_detail *cd = nn->svc_export_cache;
if (rqstp->rq_client == NULL)
goto gss;
/* First try the auth_unix client: */
- exp = exp_find(rqstp->rq_client, fsid_type, fsidv, &rqstp->rq_chandle);
+ exp = exp_find(cd, rqstp->rq_client, fsid_type,
+ fsidv, &rqstp->rq_chandle);
if (PTR_ERR(exp) == -ENOENT)
goto gss;
if (IS_ERR(exp))
@@ -972,7 +980,7 @@ gss:
/* Otherwise, try falling back on gss client */
if (rqstp->rq_gssclient == NULL)
return exp;
- gssexp = exp_find(rqstp->rq_gssclient, fsid_type, fsidv,
+ gssexp = exp_find(cd, rqstp->rq_gssclient, fsid_type, fsidv,
&rqstp->rq_chandle);
if (PTR_ERR(gssexp) == -ENOENT)
return exp;
@@ -1029,13 +1037,15 @@ exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp)
/* Iterator */
static void *e_start(struct seq_file *m, loff_t *pos)
- __acquires(svc_export_cache.hash_lock)
+ __acquires(((struct cache_detail *)m->private)->hash_lock)
{
loff_t n = *pos;
unsigned hash, export;
struct cache_head *ch;
-
- read_lock(&svc_export_cache.hash_lock);
+ struct cache_detail *cd = m->private;
+ struct cache_head **export_table = cd->hash_table;
+
+ read_lock(&cd->hash_lock);
if (!n--)
return SEQ_START_TOKEN;
hash = n >> 32;
@@ -1060,6 +1070,8 @@ static void *e_next(struct seq_file *m, void *p, loff_t *pos)
{
struct cache_head *ch = p;
int hash = (*pos >> 32);
+ struct cache_detail *cd = m->private;
+ struct cache_head **export_table = cd->hash_table;
if (p == SEQ_START_TOKEN)
hash = 0;
@@ -1082,9 +1094,11 @@ static void *e_next(struct seq_file *m, void *p, loff_t *pos)
}
static void e_stop(struct seq_file *m, void *p)
- __releases(svc_export_cache.hash_lock)
+ __releases(((struct cache_detail *)m->private)->hash_lock)
{
- read_unlock(&svc_export_cache.hash_lock);
+ struct cache_detail *cd = m->private;
+
+ read_unlock(&cd->hash_lock);
}
static struct flags {
@@ -1195,6 +1209,7 @@ static int e_show(struct seq_file *m, void *p)
{
struct cache_head *cp = p;
struct svc_export *exp = container_of(cp, struct svc_export, h);
+ struct cache_detail *cd = m->private;
if (p == SEQ_START_TOKEN) {
seq_puts(m, "# Version 1.1\n");
@@ -1203,10 +1218,10 @@ static int e_show(struct seq_file *m, void *p)
}
cache_get(&exp->h);
- if (cache_check(&svc_export_cache, &exp->h, NULL))
+ if (cache_check(cd, &exp->h, NULL))
return 0;
- cache_put(&exp->h, &svc_export_cache);
- return svc_export_show(m, &svc_export_cache, cp);
+ exp_put(exp);
+ return svc_export_show(m, cd, cp);
}
const struct seq_operations nfs_exports_op = {
@@ -1216,48 +1231,70 @@ const struct seq_operations nfs_exports_op = {
.show = e_show,
};
-
/*
* Initialize the exports module.
*/
int
-nfsd_export_init(void)
+nfsd_export_init(struct net *net)
{
int rv;
- dprintk("nfsd: initializing export module.\n");
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ dprintk("nfsd: initializing export module (net: %p).\n", net);
- rv = cache_register_net(&svc_export_cache, &init_net);
+ nn->svc_export_cache = cache_create_net(&svc_export_cache_template, net);
+ if (IS_ERR(nn->svc_export_cache))
+ return PTR_ERR(nn->svc_export_cache);
+ rv = cache_register_net(nn->svc_export_cache, net);
if (rv)
- return rv;
- rv = cache_register_net(&svc_expkey_cache, &init_net);
+ goto destroy_export_cache;
+
+ nn->svc_expkey_cache = cache_create_net(&svc_expkey_cache_template, net);
+ if (IS_ERR(nn->svc_expkey_cache)) {
+ rv = PTR_ERR(nn->svc_expkey_cache);
+ goto unregister_export_cache;
+ }
+ rv = cache_register_net(nn->svc_expkey_cache, net);
if (rv)
- cache_unregister_net(&svc_export_cache, &init_net);
- return rv;
+ goto destroy_expkey_cache;
+ return 0;
+destroy_expkey_cache:
+ cache_destroy_net(nn->svc_expkey_cache, net);
+unregister_export_cache:
+ cache_unregister_net(nn->svc_export_cache, net);
+destroy_export_cache:
+ cache_destroy_net(nn->svc_export_cache, net);
+ return rv;
}
/*
* Flush exports table - called when last nfsd thread is killed
*/
void
-nfsd_export_flush(void)
+nfsd_export_flush(struct net *net)
{
- cache_purge(&svc_expkey_cache);
- cache_purge(&svc_export_cache);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ cache_purge(nn->svc_expkey_cache);
+ cache_purge(nn->svc_export_cache);
}
/*
* Shutdown the exports module.
*/
void
-nfsd_export_shutdown(void)
+nfsd_export_shutdown(struct net *net)
{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- dprintk("nfsd: shutting down export module.\n");
+ dprintk("nfsd: shutting down export module (net: %p).\n", net);
- cache_unregister_net(&svc_expkey_cache, &init_net);
- cache_unregister_net(&svc_export_cache, &init_net);
- svcauth_unix_purge();
+ cache_unregister_net(nn->svc_expkey_cache, net);
+ cache_unregister_net(nn->svc_export_cache, net);
+ cache_destroy_net(nn->svc_expkey_cache, net);
+ cache_destroy_net(nn->svc_export_cache, net);
+ svcauth_unix_purge(net);
- dprintk("nfsd: export shutdown complete.\n");
+ dprintk("nfsd: export shutdown complete (net: %p).\n", net);
}
diff --git a/fs/nfsd/idmap.h b/fs/nfsd/idmap.h
index 2f3be1321534..9d513efc01ba 100644
--- a/fs/nfsd/idmap.h
+++ b/fs/nfsd/idmap.h
@@ -42,14 +42,14 @@
#define IDMAP_NAMESZ 128
#ifdef CONFIG_NFSD_V4
-int nfsd_idmap_init(void);
-void nfsd_idmap_shutdown(void);
+int nfsd_idmap_init(struct net *);
+void nfsd_idmap_shutdown(struct net *);
#else
-static inline int nfsd_idmap_init(void)
+static inline int nfsd_idmap_init(struct net *net)
{
return 0;
}
-static inline void nfsd_idmap_shutdown(void)
+static inline void nfsd_idmap_shutdown(struct net *net)
{
}
#endif
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 12e0cff435b4..39365636b244 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -28,6 +28,12 @@ struct cld_net;
struct nfsd_net {
struct cld_net *cld_net;
+
+ struct cache_detail *svc_expkey_cache;
+ struct cache_detail *svc_export_cache;
+
+ struct cache_detail *idtoname_cache;
+ struct cache_detail *nametoid_cache;
};
extern int nfsd_net_id;
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 322d11ce06a4..286a7f8f2024 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -36,9 +36,11 @@
#include <linux/seq_file.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/sunrpc/svc_xprt.h>
#include <net/net_namespace.h>
#include "idmap.h"
#include "nfsd.h"
+#include "netns.h"
/*
* Turn off idmapping when using AUTH_SYS.
@@ -107,8 +109,6 @@ ent_alloc(void)
* ID -> Name cache
*/
-static struct cache_head *idtoname_table[ENT_HASHMAX];
-
static uint32_t
idtoname_hash(struct ent *ent)
{
@@ -183,13 +183,13 @@ warn_no_idmapd(struct cache_detail *detail, int has_died)
static int idtoname_parse(struct cache_detail *, char *, int);
-static struct ent *idtoname_lookup(struct ent *);
-static struct ent *idtoname_update(struct ent *, struct ent *);
+static struct ent *idtoname_lookup(struct cache_detail *, struct ent *);
+static struct ent *idtoname_update(struct cache_detail *, struct ent *,
+ struct ent *);
-static struct cache_detail idtoname_cache = {
+static struct cache_detail idtoname_cache_template = {
.owner = THIS_MODULE,
.hash_size = ENT_HASHMAX,
- .hash_table = idtoname_table,
.name = "nfs4.idtoname",
.cache_put = ent_put,
.cache_upcall = idtoname_upcall,
@@ -244,7 +244,7 @@ idtoname_parse(struct cache_detail *cd, char *buf, int buflen)
goto out;
error = -ENOMEM;
- res = idtoname_lookup(&ent);
+ res = idtoname_lookup(cd, &ent);
if (!res)
goto out;
@@ -260,11 +260,11 @@ idtoname_parse(struct cache_detail *cd, char *buf, int buflen)
else
memcpy(ent.name, buf1, sizeof(ent.name));
error = -ENOMEM;
- res = idtoname_update(&ent, res);
+ res = idtoname_update(cd, &ent, res);
if (res == NULL)
goto out;
- cache_put(&res->h, &idtoname_cache);
+ cache_put(&res->h, cd);
error = 0;
out:
@@ -275,10 +275,9 @@ out:
static struct ent *
-idtoname_lookup(struct ent *item)
+idtoname_lookup(struct cache_detail *cd, struct ent *item)
{
- struct cache_head *ch = sunrpc_cache_lookup(&idtoname_cache,
- &item->h,
+ struct cache_head *ch = sunrpc_cache_lookup(cd, &item->h,
idtoname_hash(item));
if (ch)
return container_of(ch, struct ent, h);
@@ -287,10 +286,9 @@ idtoname_lookup(struct ent *item)
}
static struct ent *
-idtoname_update(struct ent *new, struct ent *old)
+idtoname_update(struct cache_detail *cd, struct ent *new, struct ent *old)
{
- struct cache_head *ch = sunrpc_cache_update(&idtoname_cache,
- &new->h, &old->h,
+ struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h,
idtoname_hash(new));
if (ch)
return container_of(ch, struct ent, h);
@@ -303,8 +301,6 @@ idtoname_update(struct ent *new, struct ent *old)
* Name -> ID cache
*/
-static struct cache_head *nametoid_table[ENT_HASHMAX];
-
static inline int
nametoid_hash(struct ent *ent)
{
@@ -359,14 +355,14 @@ nametoid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h)
return 0;
}
-static struct ent *nametoid_lookup(struct ent *);
-static struct ent *nametoid_update(struct ent *, struct ent *);
+static struct ent *nametoid_lookup(struct cache_detail *, struct ent *);
+static struct ent *nametoid_update(struct cache_detail *, struct ent *,
+ struct ent *);
static int nametoid_parse(struct cache_detail *, char *, int);
-static struct cache_detail nametoid_cache = {
+static struct cache_detail nametoid_cache_template = {
.owner = THIS_MODULE,
.hash_size = ENT_HASHMAX,
- .hash_table = nametoid_table,
.name = "nfs4.nametoid",
.cache_put = ent_put,
.cache_upcall = nametoid_upcall,
@@ -426,14 +422,14 @@ nametoid_parse(struct cache_detail *cd, char *buf, int buflen)
set_bit(CACHE_NEGATIVE, &ent.h.flags);
error = -ENOMEM;
- res = nametoid_lookup(&ent);
+ res = nametoid_lookup(cd, &ent);
if (res == NULL)
goto out;
- res = nametoid_update(&ent, res);
+ res = nametoid_update(cd, &ent, res);
if (res == NULL)
goto out;
- cache_put(&res->h, &nametoid_cache);
+ cache_put(&res->h, cd);
error = 0;
out:
kfree(buf1);
@@ -443,10 +439,9 @@ out:
static struct ent *
-nametoid_lookup(struct ent *item)
+nametoid_lookup(struct cache_detail *cd, struct ent *item)
{
- struct cache_head *ch = sunrpc_cache_lookup(&nametoid_cache,
- &item->h,
+ struct cache_head *ch = sunrpc_cache_lookup(cd, &item->h,
nametoid_hash(item));
if (ch)
return container_of(ch, struct ent, h);
@@ -455,10 +450,9 @@ nametoid_lookup(struct ent *item)
}
static struct ent *
-nametoid_update(struct ent *new, struct ent *old)
+nametoid_update(struct cache_detail *cd, struct ent *new, struct ent *old)
{
- struct cache_head *ch = sunrpc_cache_update(&nametoid_cache,
- &new->h, &old->h,
+ struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h,
nametoid_hash(new));
if (ch)
return container_of(ch, struct ent, h);
@@ -471,34 +465,55 @@ nametoid_update(struct ent *new, struct ent *old)
*/
int
-nfsd_idmap_init(void)
+nfsd_idmap_init(struct net *net)
{
int rv;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- rv = cache_register_net(&idtoname_cache, &init_net);
+ nn->idtoname_cache = cache_create_net(&idtoname_cache_template, net);
+ if (IS_ERR(nn->idtoname_cache))
+ return PTR_ERR(nn->idtoname_cache);
+ rv = cache_register_net(nn->idtoname_cache, net);
if (rv)
- return rv;
- rv = cache_register_net(&nametoid_cache, &init_net);
+ goto destroy_idtoname_cache;
+ nn->nametoid_cache = cache_create_net(&nametoid_cache_template, net);
+ if (IS_ERR(nn->nametoid_cache)) {
+ rv = PTR_ERR(nn->idtoname_cache);
+ goto unregister_idtoname_cache;
+ }
+ rv = cache_register_net(nn->nametoid_cache, net);
if (rv)
- cache_unregister_net(&idtoname_cache, &init_net);
+ goto destroy_nametoid_cache;
+ return 0;
+
+destroy_nametoid_cache:
+ cache_destroy_net(nn->nametoid_cache, net);
+unregister_idtoname_cache:
+ cache_unregister_net(nn->idtoname_cache, net);
+destroy_idtoname_cache:
+ cache_destroy_net(nn->idtoname_cache, net);
return rv;
}
void
-nfsd_idmap_shutdown(void)
+nfsd_idmap_shutdown(struct net *net)
{
- cache_unregister_net(&idtoname_cache, &init_net);
- cache_unregister_net(&nametoid_cache, &init_net);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ cache_unregister_net(nn->idtoname_cache, net);
+ cache_unregister_net(nn->nametoid_cache, net);
+ cache_destroy_net(nn->idtoname_cache, net);
+ cache_destroy_net(nn->nametoid_cache, net);
}
static int
idmap_lookup(struct svc_rqst *rqstp,
- struct ent *(*lookup_fn)(struct ent *), struct ent *key,
- struct cache_detail *detail, struct ent **item)
+ struct ent *(*lookup_fn)(struct cache_detail *, struct ent *),
+ struct ent *key, struct cache_detail *detail, struct ent **item)
{
int ret;
- *item = lookup_fn(key);
+ *item = lookup_fn(detail, key);
if (!*item)
return -ENOMEM;
retry:
@@ -506,7 +521,7 @@ idmap_lookup(struct svc_rqst *rqstp,
if (ret == -ETIMEDOUT) {
struct ent *prev_item = *item;
- *item = lookup_fn(key);
+ *item = lookup_fn(detail, key);
if (*item != prev_item)
goto retry;
cache_put(&(*item)->h, detail);
@@ -531,19 +546,20 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen
.type = type,
};
int ret;
+ struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
if (namelen + 1 > sizeof(key.name))
return nfserr_badowner;
memcpy(key.name, name, namelen);
key.name[namelen] = '\0';
strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
- ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item);
+ ret = idmap_lookup(rqstp, nametoid_lookup, &key, nn->nametoid_cache, &item);
if (ret == -ENOENT)
return nfserr_badowner;
if (ret)
return nfserrno(ret);
*id = item->id;
- cache_put(&item->h, &nametoid_cache);
+ cache_put(&item->h, nn->nametoid_cache);
return 0;
}
@@ -555,9 +571,10 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
.type = type,
};
int ret;
+ struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
- ret = idmap_lookup(rqstp, idtoname_lookup, &key, &idtoname_cache, &item);
+ ret = idmap_lookup(rqstp, idtoname_lookup, &key, nn->idtoname_cache, &item);
if (ret == -ENOENT)
return sprintf(name, "%u", id);
if (ret)
@@ -565,7 +582,7 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
ret = strlen(item->name);
BUG_ON(ret > IDMAP_NAMESZ);
memcpy(name, item->name, ret);
- cache_put(&item->h, &idtoname_cache);
+ cache_put(&item->h, nn->idtoname_cache);
return ret;
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 7f71c69cdcdf..03f82c0bc35d 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3155,10 +3155,17 @@ out:
static struct lock_manager nfsd4_manager = {
};
+static bool grace_ended;
+
static void
nfsd4_end_grace(void)
{
+ /* do nothing if grace period already ended */
+ if (grace_ended)
+ return;
+
dprintk("NFSD: end of grace period\n");
+ grace_ended = true;
nfsd4_record_grace_done(&init_net, boot_time);
locks_end_grace(&nfsd4_manager);
/*
@@ -3183,8 +3190,7 @@ nfs4_laundromat(void)
nfs4_lock_state();
dprintk("NFSD: laundromat service - starting\n");
- if (locks_in_grace())
- nfsd4_end_grace();
+ nfsd4_end_grace();
INIT_LIST_HEAD(&reaplist);
spin_lock(&client_lock);
list_for_each_safe(pos, next, &client_lru) {
@@ -4055,7 +4061,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfs4_openowner *open_sop = NULL;
struct nfs4_lockowner *lock_sop = NULL;
struct nfs4_ol_stateid *lock_stp;
- struct nfs4_file *fp;
struct file *filp = NULL;
struct file_lock file_lock;
struct file_lock conflock;
@@ -4123,7 +4128,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
}
lock_sop = lockowner(lock_stp->st_stateowner);
- fp = lock_stp->st_file;
lkflg = setlkflg(lock->lk_type);
status = nfs4_check_openmode(lock_stp, lkflg);
@@ -4715,6 +4719,7 @@ nfs4_state_start(void)
nfsd4_client_tracking_init(&init_net);
boot_time = get_seconds();
locks_start_grace(&nfsd4_manager);
+ grace_ended = false;
printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
nfsd4_grace);
ret = set_callback_cred();
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 2c53be6d3579..72699885ac48 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -127,7 +127,17 @@ static const struct file_operations transaction_ops = {
static int exports_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &nfs_exports_op);
+ int err;
+ struct seq_file *seq;
+ struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+
+ err = seq_open(file, &nfs_exports_op);
+ if (err)
+ return err;
+
+ seq = file->private_data;
+ seq->private = nn->svc_export_cache;
+ return 0;
}
static const struct file_operations exports_operations = {
@@ -345,7 +355,7 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
if (!dom)
return -ENOMEM;
- len = exp_rootfh(dom, path, &fh, maxsize);
+ len = exp_rootfh(&init_net, dom, path, &fh, maxsize);
auth_domain_put(dom);
if (len)
return len;
@@ -1127,7 +1137,34 @@ static int create_proc_exports_entry(void)
#endif
int nfsd_net_id;
+
+static __net_init int nfsd_init_net(struct net *net)
+{
+ int retval;
+
+ retval = nfsd_export_init(net);
+ if (retval)
+ goto out_export_error;
+ retval = nfsd_idmap_init(net);
+ if (retval)
+ goto out_idmap_error;
+ return 0;
+
+out_idmap_error:
+ nfsd_export_shutdown(net);
+out_export_error:
+ return retval;
+}
+
+static __net_exit void nfsd_exit_net(struct net *net)
+{
+ nfsd_idmap_shutdown(net);
+ nfsd_export_shutdown(net);
+}
+
static struct pernet_operations nfsd_net_ops = {
+ .init = nfsd_init_net,
+ .exit = nfsd_exit_net,
.id = &nfsd_net_id,
.size = sizeof(struct nfsd_net),
};
@@ -1154,16 +1191,10 @@ static int __init init_nfsd(void)
retval = nfsd_reply_cache_init();
if (retval)
goto out_free_stat;
- retval = nfsd_export_init();
- if (retval)
- goto out_free_cache;
nfsd_lockd_init(); /* lockd->nfsd callbacks */
- retval = nfsd_idmap_init();
- if (retval)
- goto out_free_lockd;
retval = create_proc_exports_entry();
if (retval)
- goto out_free_idmap;
+ goto out_free_lockd;
retval = register_filesystem(&nfsd_fs_type);
if (retval)
goto out_free_all;
@@ -1171,12 +1202,8 @@ static int __init init_nfsd(void)
out_free_all:
remove_proc_entry("fs/nfs/exports", NULL);
remove_proc_entry("fs/nfs", NULL);
-out_free_idmap:
- nfsd_idmap_shutdown();
out_free_lockd:
nfsd_lockd_shutdown();
- nfsd_export_shutdown();
-out_free_cache:
nfsd_reply_cache_shutdown();
out_free_stat:
nfsd_stat_shutdown();
@@ -1192,13 +1219,11 @@ out_unregister_notifier:
static void __exit exit_nfsd(void)
{
- nfsd_export_shutdown();
nfsd_reply_cache_shutdown();
remove_proc_entry("fs/nfs/exports", NULL);
remove_proc_entry("fs/nfs", NULL);
nfsd_stat_shutdown();
nfsd_lockd_shutdown();
- nfsd_idmap_shutdown();
nfsd4_free_slabs();
nfsd_fault_inject_cleanup();
unregister_filesystem(&nfsd_fs_type);
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 68454e75fce9..cc793005a87c 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -636,7 +636,7 @@ fh_put(struct svc_fh *fhp)
#endif
}
if (exp) {
- cache_put(&exp->h, &svc_export_cache);
+ exp_put(exp);
fhp->fh_export = NULL;
}
return;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 28dfad39f0c5..cb4d51d8cbdb 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -220,7 +220,7 @@ static int nfsd_startup(unsigned short port, int nrservs)
ret = nfsd_init_socks(port);
if (ret)
goto out_racache;
- ret = lockd_up();
+ ret = lockd_up(&init_net);
if (ret)
goto out_racache;
ret = nfs4_state_start();
@@ -229,7 +229,7 @@ static int nfsd_startup(unsigned short port, int nrservs)
nfsd_up = true;
return 0;
out_lockd:
- lockd_down();
+ lockd_down(&init_net);
out_racache:
nfsd_racache_shutdown();
return ret;
@@ -246,7 +246,7 @@ static void nfsd_shutdown(void)
if (!nfsd_up)
return;
nfs4_state_shutdown();
- lockd_down();
+ lockd_down(&init_net);
nfsd_racache_shutdown();
nfsd_up = false;
}
@@ -261,7 +261,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
printk(KERN_WARNING "nfsd: last server has exited, flushing export "
"cache\n");
- nfsd_export_flush();
+ nfsd_export_flush(net);
}
void nfsd_reset_versions(void)
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 568666156ea4..c8bd9c3be7f7 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -2039,7 +2039,7 @@ nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
if (err)
goto out;
- offset = vfs_llseek(file, offset, 0);
+ offset = vfs_llseek(file, offset, SEEK_SET);
if (offset < 0) {
err = nfserrno((int)offset);
goto out_close;
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 26601529dc17..62cebc8e1a1f 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -37,6 +37,7 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* This function should be implemented when the writeback function
* will be implemented.
*/
+ struct the_nilfs *nilfs;
struct inode *inode = file->f_mapping->host;
int err;
@@ -45,18 +46,21 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
return err;
mutex_lock(&inode->i_mutex);
- if (!nilfs_inode_dirty(inode)) {
- mutex_unlock(&inode->i_mutex);
- return 0;
+ if (nilfs_inode_dirty(inode)) {
+ if (datasync)
+ err = nilfs_construct_dsync_segment(inode->i_sb, inode,
+ 0, LLONG_MAX);
+ else
+ err = nilfs_construct_segment(inode->i_sb);
}
-
- if (datasync)
- err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0,
- LLONG_MAX);
- else
- err = nilfs_construct_segment(inode->i_sb);
-
mutex_unlock(&inode->i_mutex);
+
+ nilfs = inode->i_sb->s_fs_info;
+ if (!err && nilfs_test_opt(nilfs, BARRIER)) {
+ err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ if (err != -EIO)
+ err = 0;
+ }
return err;
}
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 8f7b95ac1f7e..7cc64465ec26 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -734,7 +734,7 @@ void nilfs_evict_inode(struct inode *inode)
if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
if (inode->i_data.nrpages)
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
nilfs_clear_inode(inode);
return;
}
@@ -746,7 +746,7 @@ void nilfs_evict_inode(struct inode *inode)
/* TODO: some of the following operations may fail. */
nilfs_truncate_bmap(ii, 0);
nilfs_mark_inode_dirty(inode);
- end_writeback(inode);
+ clear_inode(inode);
ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
if (!ret)
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 2a70fce70c65..06658caa18bd 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -692,8 +692,14 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
if (ret < 0)
return ret;
+ nilfs = inode->i_sb->s_fs_info;
+ if (nilfs_test_opt(nilfs, BARRIER)) {
+ ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ if (ret == -EIO)
+ return ret;
+ }
+
if (argp != NULL) {
- nilfs = inode->i_sb->s_fs_info;
down_read(&nilfs->ns_segctor_sem);
cno = nilfs->ns_cno - 1;
up_read(&nilfs->ns_segctor_sem);
diff --git a/fs/nls/Kconfig b/fs/nls/Kconfig
index a39edc41becc..b5eac98fd7bd 100644
--- a/fs/nls/Kconfig
+++ b/fs/nls/Kconfig
@@ -30,7 +30,7 @@ config NLS_DEFAULT
cp949, cp950, cp1251, cp1255, euc-jp, euc-kr, gb2312, iso8859-1,
iso8859-2, iso8859-3, iso8859-4, iso8859-5, iso8859-6, iso8859-7,
iso8859-8, iso8859-9, iso8859-13, iso8859-14, iso8859-15,
- koi8-r, koi8-ru, koi8-u, sjis, tis-620, utf8.
+ koi8-r, koi8-ru, koi8-u, sjis, tis-620, macroman, utf8.
If you specify a wrong value, it will use the built-in NLS;
compatible with iso8859-1.
@@ -452,6 +452,161 @@ config NLS_KOI8_U
input/output character sets. Say Y here for the preferred Ukrainian
(koi8-u) and Belarusian (koi8-ru) character sets.
+config NLS_CODEPAGE_MACROMAN
+ tristate "Codepage macroman"
+ ---help---
+ The Apple HFS file system family can deal with filenames in
+ native language character sets. These character sets are stored in
+ so-called MAC codepages. You need to include the appropriate
+ codepage if you want to be able to read/write these filenames on
+ Mac partitions correctly. This does apply to the filenames
+ only, not to the file contents. You can include several codepages;
+ say Y here if you want to include the Mac codepage that is used for
+ much of Europe -- United Kingdom, Germany, Spain, Italy, and [add
+ more countries here].
+
+ If unsure, say Y.
+
+config NLS_CODEPAGE_MACCELTIC
+ tristate "Codepage macceltic"
+ ---help---
+ The Apple HFS file system family can deal with filenames in
+ native language character sets. These character sets are stored in
+ so-called MAC codepages. You need to include the appropriate
+ codepage if you want to be able to read/write these filenames on
+ Mac partitions correctly. This does apply to the filenames
+ only, not to the file contents. You can include several codepages;
+ say Y here if you want to include the Mac codepage that is used for
+ Celtic.
+
+ If unsure, say Y.
+
+config NLS_CODEPAGE_MACCENTEURO
+ tristate "Codepage maccenteuro"
+ ---help---
+ The Apple HFS file system family can deal with filenames in
+ native language character sets. These character sets are stored in
+ so-called MAC codepages. You need to include the appropriate
+ codepage if you want to be able to read/write these filenames on
+ Mac partitions correctly. This does apply to the filenames
+ only, not to the file contents. You can include several codepages;
+ say Y here if you want to include the Mac codepage that is used for
+ Central Europe.
+
+ If unsure, say Y.
+
+config NLS_CODEPAGE_MACCROATIAN
+ tristate "Codepage maccroatian"
+ ---help---
+ The Apple HFS file system family can deal with filenames in
+ native language character sets. These character sets are stored in
+ so-called MAC codepages. You need to include the appropriate
+ codepage if you want to be able to read/write these filenames on
+ Mac partitions correctly. This does apply to the filenames
+ only, not to the file contents. You can include several codepages;
+ say Y here if you want to include the Mac codepage that is used for
+ Croatian.
+
+ If unsure, say Y.
+
+config NLS_CODEPAGE_MACCYRILLIC
+ tristate "Codepage maccyrillic"
+ ---help---
+ The Apple HFS file system family can deal with filenames in
+ native language character sets. These character sets are stored in
+ so-called MAC codepages. You need to include the appropriate
+ codepage if you want to be able to read/write these filenames on
+ Mac partitions correctly. This does apply to the filenames
+ only, not to the file contents. You can include several codepages;
+ say Y here if you want to include the Mac codepage that is used for
+ Cyrillic.
+
+ If unsure, say Y.
+
+config NLS_CODEPAGE_MACGAELIC
+ tristate "Codepage macgaelic"
+ ---help---
+ The Apple HFS file system family can deal with filenames in
+ native language character sets. These character sets are stored in
+ so-called MAC codepages. You need to include the appropriate
+ codepage if you want to be able to read/write these filenames on
+ Mac partitions correctly. This does apply to the filenames
+ only, not to the file contents. You can include several codepages;
+ say Y here if you want to include the Mac codepage that is used for
+ Gaelic.
+
+ If unsure, say Y.
+
+config NLS_CODEPAGE_MACGREEK
+ tristate "Codepage macgreek"
+ ---help---
+ The Apple HFS file system family can deal with filenames in
+ native language character sets. These character sets are stored in
+ so-called MAC codepages. You need to include the appropriate
+ codepage if you want to be able to read/write these filenames on
+ Mac partitions correctly. This does apply to the filenames
+ only, not to the file contents. You can include several codepages;
+ say Y here if you want to include the Mac codepage that is used for
+ Greek.
+
+ If unsure, say Y.
+
+config NLS_CODEPAGE_MACICELAND
+ tristate "Codepage maciceland"
+ ---help---
+ The Apple HFS file system family can deal with filenames in
+ native language character sets. These character sets are stored in
+ so-called MAC codepages. You need to include the appropriate
+ codepage if you want to be able to read/write these filenames on
+ Mac partitions correctly. This does apply to the filenames
+ only, not to the file contents. You can include several codepages;
+ say Y here if you want to include the Mac codepage that is used for
+ Iceland.
+
+ If unsure, say Y.
+
+config NLS_CODEPAGE_MACINUIT
+ tristate "Codepage macinuit"
+ ---help---
+ The Apple HFS file system family can deal with filenames in
+ native language character sets. These character sets are stored in
+ so-called MAC codepages. You need to include the appropriate
+ codepage if you want to be able to read/write these filenames on
+ Mac partitions correctly. This does apply to the filenames
+ only, not to the file contents. You can include several codepages;
+ say Y here if you want to include the Mac codepage that is used for
+ Inuit.
+
+ If unsure, say Y.
+
+config NLS_CODEPAGE_MACROMANIAN
+ tristate "Codepage macromanian"
+ ---help---
+ The Apple HFS file system family can deal with filenames in
+ native language character sets. These character sets are stored in
+ so-called MAC codepages. You need to include the appropriate
+ codepage if you want to be able to read/write these filenames on
+ Mac partitions correctly. This does apply to the filenames
+ only, not to the file contents. You can include several codepages;
+ say Y here if you want to include the Mac codepage that is used for
+ Romanian.
+
+ If unsure, say Y.
+
+config NLS_CODEPAGE_MACTURKISH
+ tristate "Codepage macturkish"
+ ---help---
+ The Apple HFS file system family can deal with filenames in
+ native language character sets. These character sets are stored in
+ so-called MAC codepages. You need to include the appropriate
+ codepage if you want to be able to read/write these filenames on
+ Mac partitions correctly. This does apply to the filenames
+ only, not to the file contents. You can include several codepages;
+ say Y here if you want to include the Mac codepage that is used for
+ Turkish.
+
+ If unsure, say Y.
+
config NLS_UTF8
tristate "NLS UTF-8"
help
diff --git a/fs/nls/Makefile b/fs/nls/Makefile
index f499dd7c3905..b6b0550a7c80 100644
--- a/fs/nls/Makefile
+++ b/fs/nls/Makefile
@@ -2,6 +2,18 @@
# Makefile for native language support
#
+CONFIG_NLS_MACCELTIC=m
+CONFIG_NLS_MACCENTEURO=m
+CONFIG_NLS_MACCROATIAN=m
+CONFIG_NLS_MACCYRILLIC=m
+CONFIG_NLS_MACGAELIC=m
+CONFIG_NLS_MACGREEK=m
+CONFIG_NLS_MACICELAND=m
+CONFIG_NLS_MACINUIT=m
+CONFIG_NLS_MACROMANIAN=m
+CONFIG_NLS_MACROMAN=m
+CONFIG_NLS_MACTURKISH=m
+
obj-$(CONFIG_NLS) += nls_base.o
obj-$(CONFIG_NLS_CODEPAGE_437) += nls_cp437.o
@@ -42,3 +54,14 @@ obj-$(CONFIG_NLS_ISO8859_15) += nls_iso8859-15.o
obj-$(CONFIG_NLS_KOI8_R) += nls_koi8-r.o
obj-$(CONFIG_NLS_KOI8_U) += nls_koi8-u.o nls_koi8-ru.o
obj-$(CONFIG_NLS_UTF8) += nls_utf8.o
+obj-$(CONFIG_NLS_MACCELTIC) += nls_macceltic.o
+obj-$(CONFIG_NLS_MACCENTEURO) += nls_maccenteuro.o
+obj-$(CONFIG_NLS_MACCROATIAN) += nls_maccroatian.o
+obj-$(CONFIG_NLS_MACCYRILLIC) += nls_maccyrillic.o
+obj-$(CONFIG_NLS_MACGAELIC) += nls_macgaelic.o
+obj-$(CONFIG_NLS_MACGREEK) += nls_macgreek.o
+obj-$(CONFIG_NLS_MACICELAND) += nls_maciceland.o
+obj-$(CONFIG_NLS_MACINUIT) += nls_macinuit.o
+obj-$(CONFIG_NLS_MACROMANIAN) += nls_macromanian.o
+obj-$(CONFIG_NLS_MACROMAN) += nls_macroman.o
+obj-$(CONFIG_NLS_MACTURKISH) += nls_macturkish.o
diff --git a/fs/nls/nls_macceltic.c b/fs/nls/nls_macceltic.c
new file mode 100644
index 000000000000..95ac5b41ad1c
--- /dev/null
+++ b/fs/nls/nls_macceltic.c
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/nls_macceltic.c
+ *
+ * Charset macceltic translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+ /* 0x00 */
+ 0x0000, 0x0001, 0x0002, 0x0003,
+ 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b,
+ 0x000c, 0x000d, 0x000e, 0x000f,
+ /* 0x10 */
+ 0x0010, 0x0011, 0x0012, 0x0013,
+ 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001a, 0x001b,
+ 0x001c, 0x001d, 0x001e, 0x001f,
+ /* 0x20 */
+ 0x0020, 0x0021, 0x0022, 0x0023,
+ 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b,
+ 0x002c, 0x002d, 0x002e, 0x002f,
+ /* 0x30 */
+ 0x0030, 0x0031, 0x0032, 0x0033,
+ 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b,
+ 0x003c, 0x003d, 0x003e, 0x003f,
+ /* 0x40 */
+ 0x0040, 0x0041, 0x0042, 0x0043,
+ 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b,
+ 0x004c, 0x004d, 0x004e, 0x004f,
+ /* 0x50 */
+ 0x0050, 0x0051, 0x0052, 0x0053,
+ 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b,
+ 0x005c, 0x005d, 0x005e, 0x005f,
+ /* 0x60 */
+ 0x0060, 0x0061, 0x0062, 0x0063,
+ 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006a, 0x006b,
+ 0x006c, 0x006d, 0x006e, 0x006f,
+ /* 0x70 */
+ 0x0070, 0x0071, 0x0072, 0x0073,
+ 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007a, 0x007b,
+ 0x007c, 0x007d, 0x007e, 0x007f,
+ /* 0x80 */
+ 0x00c4, 0x00c5, 0x00c7, 0x00c9,
+ 0x00d1, 0x00d6, 0x00dc, 0x00e1,
+ 0x00e0, 0x00e2, 0x00e4, 0x00e3,
+ 0x00e5, 0x00e7, 0x00e9, 0x00e8,
+ /* 0x90 */
+ 0x00ea, 0x00eb, 0x00ed, 0x00ec,
+ 0x00ee, 0x00ef, 0x00f1, 0x00f3,
+ 0x00f2, 0x00f4, 0x00f6, 0x00f5,
+ 0x00fa, 0x00f9, 0x00fb, 0x00fc,
+ /* 0xa0 */
+ 0x2020, 0x00b0, 0x00a2, 0x00a3,
+ 0x00a7, 0x2022, 0x00b6, 0x00df,
+ 0x00ae, 0x00a9, 0x2122, 0x00b4,
+ 0x00a8, 0x2260, 0x00c6, 0x00d8,
+ /* 0xb0 */
+ 0x221e, 0x00b1, 0x2264, 0x2265,
+ 0x00a5, 0x00b5, 0x2202, 0x2211,
+ 0x220f, 0x03c0, 0x222b, 0x00aa,
+ 0x00ba, 0x03a9, 0x00e6, 0x00f8,
+ /* 0xc0 */
+ 0x00bf, 0x00a1, 0x00ac, 0x221a,
+ 0x0192, 0x2248, 0x2206, 0x00ab,
+ 0x00bb, 0x2026, 0x00a0, 0x00c0,
+ 0x00c3, 0x00d5, 0x0152, 0x0153,
+ /* 0xd0 */
+ 0x2013, 0x2014, 0x201c, 0x201d,
+ 0x2018, 0x2019, 0x00f7, 0x25ca,
+ 0x00ff, 0x0178, 0x2044, 0x20ac,
+ 0x2039, 0x203a, 0x0176, 0x0177,
+ /* 0xe0 */
+ 0x2021, 0x00b7, 0x1ef2, 0x1ef3,
+ 0x2030, 0x00c2, 0x00ca, 0x00c1,
+ 0x00cb, 0x00c8, 0x00cd, 0x00ce,
+ 0x00cf, 0x00cc, 0x00d3, 0x00d4,
+ /* 0xf0 */
+ 0x2663, 0x00d2, 0x00da, 0x00db,
+ 0x00d9, 0x0131, 0x00dd, 0x00fd,
+ 0x0174, 0x0175, 0x1e84, 0x1e85,
+ 0x1e80, 0x1e81, 0x1e82, 0x1e83,
+};
+
+static const unsigned char page00[256] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+ 0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+ 0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+ 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+ 0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+ 0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+ 0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0xf6, 0x00, 0xa7, /* 0xd8-0xdf */
+ 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+ 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+ 0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+ 0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0xf7, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0xf8, 0xf9, 0xde, 0xdf, /* 0x70-0x77 */
+ 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page1e[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0xfc, 0xfd, 0xfe, 0xff, 0xfa, 0xfb, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0xe2, 0xe3, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
+ 0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+ 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+ 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page26[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+ page00, page01, NULL, page03, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, page1e, NULL,
+ page20, page21, page22, NULL, NULL, page25, page26, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x00-0x07 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x08-0x0f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x10-0x17 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x18-0x1f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x20-0x27 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x28-0x2f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x30-0x37 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x38-0x3f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x40-0x47 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x48-0x4f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x50-0x57 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x58-0x5f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x60-0x67 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x68-0x6f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x70-0x77 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x78-0x7f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x80-0x87 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x88-0x8f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x90-0x97 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x98-0x9f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa0-0xa7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa8-0xaf */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb0-0xb7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb8-0xbf */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc0-0xc7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc8-0xcf */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd0-0xd7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd8-0xdf */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe0-0xe7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe8-0xef */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0-0xf7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+ const unsigned char *uni2charset;
+ unsigned char cl = uni & 0x00ff;
+ unsigned char ch = (uni & 0xff00) >> 8;
+
+ if (boundlen <= 0)
+ return -ENAMETOOLONG;
+
+ uni2charset = page_uni2charset[ch];
+ if (uni2charset && uni2charset[cl])
+ out[0] = uni2charset[cl];
+ else
+ return -EINVAL;
+ return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+ *uni = charset2uni[*rawstring];
+ if (*uni == 0x0000)
+ return -EINVAL;
+ return 1;
+}
+
+static struct nls_table table = {
+ .charset = "macceltic",
+ .uni2char = uni2char,
+ .char2uni = char2uni,
+ .charset2lower = charset2lower,
+ .charset2upper = charset2upper,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_nls_macceltic(void)
+{
+ return register_nls(&table);
+}
+
+static void __exit exit_nls_macceltic(void)
+{
+ unregister_nls(&table);
+}
+
+module_init(init_nls_macceltic)
+module_exit(exit_nls_macceltic)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_maccenteuro.c b/fs/nls/nls_maccenteuro.c
new file mode 100644
index 000000000000..ce0d57ef39f6
--- /dev/null
+++ b/fs/nls/nls_maccenteuro.c
@@ -0,0 +1,532 @@
+/*
+ * linux/fs/nls/nls_maccenteuro.c
+ *
+ * Charset maccenteuro translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+ /* 0x00 */
+ 0x0000, 0x0001, 0x0002, 0x0003,
+ 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b,
+ 0x000c, 0x000d, 0x000e, 0x000f,
+ /* 0x10 */
+ 0x0010, 0x0011, 0x0012, 0x0013,
+ 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001a, 0x001b,
+ 0x001c, 0x001d, 0x001e, 0x001f,
+ /* 0x20 */
+ 0x0020, 0x0021, 0x0022, 0x0023,
+ 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b,
+ 0x002c, 0x002d, 0x002e, 0x002f,
+ /* 0x30 */
+ 0x0030, 0x0031, 0x0032, 0x0033,
+ 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b,
+ 0x003c, 0x003d, 0x003e, 0x003f,
+ /* 0x40 */
+ 0x0040, 0x0041, 0x0042, 0x0043,
+ 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b,
+ 0x004c, 0x004d, 0x004e, 0x004f,
+ /* 0x50 */
+ 0x0050, 0x0051, 0x0052, 0x0053,
+ 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b,
+ 0x005c, 0x005d, 0x005e, 0x005f,
+ /* 0x60 */
+ 0x0060, 0x0061, 0x0062, 0x0063,
+ 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006a, 0x006b,
+ 0x006c, 0x006d, 0x006e, 0x006f,
+ /* 0x70 */
+ 0x0070, 0x0071, 0x0072, 0x0073,
+ 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007a, 0x007b,
+ 0x007c, 0x007d, 0x007e, 0x007f,
+ /* 0x80 */
+ 0x00c4, 0x0100, 0x0101, 0x00c9,
+ 0x0104, 0x00d6, 0x00dc, 0x00e1,
+ 0x0105, 0x010c, 0x00e4, 0x010d,
+ 0x0106, 0x0107, 0x00e9, 0x0179,
+ /* 0x90 */
+ 0x017a, 0x010e, 0x00ed, 0x010f,
+ 0x0112, 0x0113, 0x0116, 0x00f3,
+ 0x0117, 0x00f4, 0x00f6, 0x00f5,
+ 0x00fa, 0x011a, 0x011b, 0x00fc,
+ /* 0xa0 */
+ 0x2020, 0x00b0, 0x0118, 0x00a3,
+ 0x00a7, 0x2022, 0x00b6, 0x00df,
+ 0x00ae, 0x00a9, 0x2122, 0x0119,
+ 0x00a8, 0x2260, 0x0123, 0x012e,
+ /* 0xb0 */
+ 0x012f, 0x012a, 0x2264, 0x2265,
+ 0x012b, 0x0136, 0x2202, 0x2211,
+ 0x0142, 0x013b, 0x013c, 0x013d,
+ 0x013e, 0x0139, 0x013a, 0x0145,
+ /* 0xc0 */
+ 0x0146, 0x0143, 0x00ac, 0x221a,
+ 0x0144, 0x0147, 0x2206, 0x00ab,
+ 0x00bb, 0x2026, 0x00a0, 0x0148,
+ 0x0150, 0x00d5, 0x0151, 0x014c,
+ /* 0xd0 */
+ 0x2013, 0x2014, 0x201c, 0x201d,
+ 0x2018, 0x2019, 0x00f7, 0x25ca,
+ 0x014d, 0x0154, 0x0155, 0x0158,
+ 0x2039, 0x203a, 0x0159, 0x0156,
+ /* 0xe0 */
+ 0x0157, 0x0160, 0x201a, 0x201e,
+ 0x0161, 0x015a, 0x015b, 0x00c1,
+ 0x0164, 0x0165, 0x00cd, 0x017d,
+ 0x017e, 0x016a, 0x00d3, 0x00d4,
+ /* 0xf0 */
+ 0x016b, 0x016e, 0x00da, 0x016f,
+ 0x0170, 0x0171, 0x0172, 0x0173,
+ 0x00dd, 0x00fd, 0x0137, 0x017b,
+ 0x0141, 0x017c, 0x0122, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0xca, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
+ 0xac, 0xa9, 0x00, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+ 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0xe7, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x83, 0x00, 0x00, 0x00, 0xea, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0xf2, 0x00, 0x86, 0xf8, 0x00, 0xa7, /* 0xd8-0xdf */
+ 0x00, 0x87, 0x00, 0x00, 0x8a, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x8e, 0x00, 0x00, 0x00, 0x92, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x9c, 0x00, 0x9f, 0xf9, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+ 0x81, 0x82, 0x00, 0x00, 0x84, 0x88, 0x8c, 0x8d, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x89, 0x8b, 0x91, 0x93, /* 0x08-0x0f */
+ 0x00, 0x00, 0x94, 0x95, 0x00, 0x00, 0x96, 0x98, /* 0x10-0x17 */
+ 0xa2, 0xab, 0x9d, 0x9e, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0xfe, 0xae, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0xb1, 0xb4, 0x00, 0x00, 0xaf, 0xb0, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0xfa, /* 0x30-0x37 */
+ 0x00, 0xbd, 0xbe, 0xb9, 0xba, 0xbb, 0xbc, 0x00, /* 0x38-0x3f */
+ 0x00, 0xfc, 0xb8, 0xc1, 0xc4, 0xbf, 0xc0, 0xc5, /* 0x40-0x47 */
+ 0xcb, 0x00, 0x00, 0x00, 0xcf, 0xd8, 0x00, 0x00, /* 0x48-0x4f */
+ 0xcc, 0xce, 0x00, 0x00, 0xd9, 0xda, 0xdf, 0xe0, /* 0x50-0x57 */
+ 0xdb, 0xde, 0xe5, 0xe6, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0xe1, 0xe4, 0x00, 0x00, 0xe8, 0xe9, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0xed, 0xf0, 0x00, 0x00, 0xf1, 0xf3, /* 0x68-0x6f */
+ 0xf4, 0xf5, 0xf6, 0xf7, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x8f, 0x90, 0xfb, 0xfd, 0xeb, 0xec, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+ 0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+ 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+ page00, page01, page02, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ page20, page21, page22, NULL, NULL, page25, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+ const unsigned char *uni2charset;
+ unsigned char cl = uni & 0x00ff;
+ unsigned char ch = (uni & 0xff00) >> 8;
+
+ if (boundlen <= 0)
+ return -ENAMETOOLONG;
+
+ uni2charset = page_uni2charset[ch];
+ if (uni2charset && uni2charset[cl])
+ out[0] = uni2charset[cl];
+ else
+ return -EINVAL;
+ return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+ *uni = charset2uni[*rawstring];
+ if (*uni == 0x0000)
+ return -EINVAL;
+ return 1;
+}
+
+static struct nls_table table = {
+ .charset = "maccenteuro",
+ .uni2char = uni2char,
+ .char2uni = char2uni,
+ .charset2lower = charset2lower,
+ .charset2upper = charset2upper,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_nls_maccenteuro(void)
+{
+ return register_nls(&table);
+}
+
+static void __exit exit_nls_maccenteuro(void)
+{
+ unregister_nls(&table);
+}
+
+module_init(init_nls_maccenteuro)
+module_exit(exit_nls_maccenteuro)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_maccroatian.c b/fs/nls/nls_maccroatian.c
new file mode 100644
index 000000000000..10b01c3eed66
--- /dev/null
+++ b/fs/nls/nls_maccroatian.c
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/nls_maccroatian.c
+ *
+ * Charset maccroatian translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+ /* 0x00 */
+ 0x0000, 0x0001, 0x0002, 0x0003,
+ 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b,
+ 0x000c, 0x000d, 0x000e, 0x000f,
+ /* 0x10 */
+ 0x0010, 0x0011, 0x0012, 0x0013,
+ 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001a, 0x001b,
+ 0x001c, 0x001d, 0x001e, 0x001f,
+ /* 0x20 */
+ 0x0020, 0x0021, 0x0022, 0x0023,
+ 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b,
+ 0x002c, 0x002d, 0x002e, 0x002f,
+ /* 0x30 */
+ 0x0030, 0x0031, 0x0032, 0x0033,
+ 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b,
+ 0x003c, 0x003d, 0x003e, 0x003f,
+ /* 0x40 */
+ 0x0040, 0x0041, 0x0042, 0x0043,
+ 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b,
+ 0x004c, 0x004d, 0x004e, 0x004f,
+ /* 0x50 */
+ 0x0050, 0x0051, 0x0052, 0x0053,
+ 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b,
+ 0x005c, 0x005d, 0x005e, 0x005f,
+ /* 0x60 */
+ 0x0060, 0x0061, 0x0062, 0x0063,
+ 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006a, 0x006b,
+ 0x006c, 0x006d, 0x006e, 0x006f,
+ /* 0x70 */
+ 0x0070, 0x0071, 0x0072, 0x0073,
+ 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007a, 0x007b,
+ 0x007c, 0x007d, 0x007e, 0x007f,
+ /* 0x80 */
+ 0x00c4, 0x00c5, 0x00c7, 0x00c9,
+ 0x00d1, 0x00d6, 0x00dc, 0x00e1,
+ 0x00e0, 0x00e2, 0x00e4, 0x00e3,
+ 0x00e5, 0x00e7, 0x00e9, 0x00e8,
+ /* 0x90 */
+ 0x00ea, 0x00eb, 0x00ed, 0x00ec,
+ 0x00ee, 0x00ef, 0x00f1, 0x00f3,
+ 0x00f2, 0x00f4, 0x00f6, 0x00f5,
+ 0x00fa, 0x00f9, 0x00fb, 0x00fc,
+ /* 0xa0 */
+ 0x2020, 0x00b0, 0x00a2, 0x00a3,
+ 0x00a7, 0x2022, 0x00b6, 0x00df,
+ 0x00ae, 0x0160, 0x2122, 0x00b4,
+ 0x00a8, 0x2260, 0x017d, 0x00d8,
+ /* 0xb0 */
+ 0x221e, 0x00b1, 0x2264, 0x2265,
+ 0x2206, 0x00b5, 0x2202, 0x2211,
+ 0x220f, 0x0161, 0x222b, 0x00aa,
+ 0x00ba, 0x03a9, 0x017e, 0x00f8,
+ /* 0xc0 */
+ 0x00bf, 0x00a1, 0x00ac, 0x221a,
+ 0x0192, 0x2248, 0x0106, 0x00ab,
+ 0x010c, 0x2026, 0x00a0, 0x00c0,
+ 0x00c3, 0x00d5, 0x0152, 0x0153,
+ /* 0xd0 */
+ 0x0110, 0x2014, 0x201c, 0x201d,
+ 0x2018, 0x2019, 0x00f7, 0x25ca,
+ 0xf8ff, 0x00a9, 0x2044, 0x20ac,
+ 0x2039, 0x203a, 0x00c6, 0x00bb,
+ /* 0xe0 */
+ 0x2013, 0x00b7, 0x201a, 0x201e,
+ 0x2030, 0x00c2, 0x0107, 0x00c1,
+ 0x010d, 0x00c8, 0x00cd, 0x00ce,
+ 0x00cf, 0x00cc, 0x00d3, 0x00d4,
+ /* 0xf0 */
+ 0x0111, 0x00d2, 0x00da, 0x00db,
+ 0x00d9, 0x0131, 0x02c6, 0x02dc,
+ 0x00af, 0x03c0, 0x00cb, 0x02da,
+ 0x00b8, 0x00ca, 0x00e6, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0xca, 0xc1, 0xa2, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
+ 0xac, 0xd9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+ 0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+ 0xfc, 0x00, 0xbc, 0xdf, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+ 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xde, 0x82, /* 0xc0-0xc7 */
+ 0xe9, 0x83, 0xfd, 0xfa, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+ 0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+ 0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+ 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xfe, 0x8d, /* 0xe0-0xe7 */
+ 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+ 0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+ 0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xe6, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0x00, 0x00, /* 0x08-0x0f */
+ 0xd0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0xa9, 0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xae, 0xbe, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0xfb, 0x00, 0xf7, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0xf9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0xe0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+ 0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+ 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xb4, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+ 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+ page00, page01, page02, page03, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ page20, page21, page22, NULL, NULL, page25, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ pagef8, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+ const unsigned char *uni2charset;
+ unsigned char cl = uni & 0x00ff;
+ unsigned char ch = (uni & 0xff00) >> 8;
+
+ if (boundlen <= 0)
+ return -ENAMETOOLONG;
+
+ uni2charset = page_uni2charset[ch];
+ if (uni2charset && uni2charset[cl])
+ out[0] = uni2charset[cl];
+ else
+ return -EINVAL;
+ return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+ *uni = charset2uni[*rawstring];
+ if (*uni == 0x0000)
+ return -EINVAL;
+ return 1;
+}
+
+static struct nls_table table = {
+ .charset = "maccroatian",
+ .uni2char = uni2char,
+ .char2uni = char2uni,
+ .charset2lower = charset2lower,
+ .charset2upper = charset2upper,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_nls_maccroatian(void)
+{
+ return register_nls(&table);
+}
+
+static void __exit exit_nls_maccroatian(void)
+{
+ unregister_nls(&table);
+}
+
+module_init(init_nls_maccroatian)
+module_exit(exit_nls_maccroatian)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_maccyrillic.c b/fs/nls/nls_maccyrillic.c
new file mode 100644
index 000000000000..318473fbb26c
--- /dev/null
+++ b/fs/nls/nls_maccyrillic.c
@@ -0,0 +1,497 @@
+/*
+ * linux/fs/nls/nls_maccyrillic.c
+ *
+ * Charset maccyrillic translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+ /* 0x00 */
+ 0x0000, 0x0001, 0x0002, 0x0003,
+ 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b,
+ 0x000c, 0x000d, 0x000e, 0x000f,
+ /* 0x10 */
+ 0x0010, 0x0011, 0x0012, 0x0013,
+ 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001a, 0x001b,
+ 0x001c, 0x001d, 0x001e, 0x001f,
+ /* 0x20 */
+ 0x0020, 0x0021, 0x0022, 0x0023,
+ 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b,
+ 0x002c, 0x002d, 0x002e, 0x002f,
+ /* 0x30 */
+ 0x0030, 0x0031, 0x0032, 0x0033,
+ 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b,
+ 0x003c, 0x003d, 0x003e, 0x003f,
+ /* 0x40 */
+ 0x0040, 0x0041, 0x0042, 0x0043,
+ 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b,
+ 0x004c, 0x004d, 0x004e, 0x004f,
+ /* 0x50 */
+ 0x0050, 0x0051, 0x0052, 0x0053,
+ 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b,
+ 0x005c, 0x005d, 0x005e, 0x005f,
+ /* 0x60 */
+ 0x0060, 0x0061, 0x0062, 0x0063,
+ 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006a, 0x006b,
+ 0x006c, 0x006d, 0x006e, 0x006f,
+ /* 0x70 */
+ 0x0070, 0x0071, 0x0072, 0x0073,
+ 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007a, 0x007b,
+ 0x007c, 0x007d, 0x007e, 0x007f,
+ /* 0x80 */
+ 0x0410, 0x0411, 0x0412, 0x0413,
+ 0x0414, 0x0415, 0x0416, 0x0417,
+ 0x0418, 0x0419, 0x041a, 0x041b,
+ 0x041c, 0x041d, 0x041e, 0x041f,
+ /* 0x90 */
+ 0x0420, 0x0421, 0x0422, 0x0423,
+ 0x0424, 0x0425, 0x0426, 0x0427,
+ 0x0428, 0x0429, 0x042a, 0x042b,
+ 0x042c, 0x042d, 0x042e, 0x042f,
+ /* 0xa0 */
+ 0x2020, 0x00b0, 0x0490, 0x00a3,
+ 0x00a7, 0x2022, 0x00b6, 0x0406,
+ 0x00ae, 0x00a9, 0x2122, 0x0402,
+ 0x0452, 0x2260, 0x0403, 0x0453,
+ /* 0xb0 */
+ 0x221e, 0x00b1, 0x2264, 0x2265,
+ 0x0456, 0x00b5, 0x0491, 0x0408,
+ 0x0404, 0x0454, 0x0407, 0x0457,
+ 0x0409, 0x0459, 0x040a, 0x045a,
+ /* 0xc0 */
+ 0x0458, 0x0405, 0x00ac, 0x221a,
+ 0x0192, 0x2248, 0x2206, 0x00ab,
+ 0x00bb, 0x2026, 0x00a0, 0x040b,
+ 0x045b, 0x040c, 0x045c, 0x0455,
+ /* 0xd0 */
+ 0x2013, 0x2014, 0x201c, 0x201d,
+ 0x2018, 0x2019, 0x00f7, 0x201e,
+ 0x040e, 0x045e, 0x040f, 0x045f,
+ 0x2116, 0x0401, 0x0451, 0x044f,
+ /* 0xe0 */
+ 0x0430, 0x0431, 0x0432, 0x0433,
+ 0x0434, 0x0435, 0x0436, 0x0437,
+ 0x0438, 0x0439, 0x043a, 0x043b,
+ 0x043c, 0x043d, 0x043e, 0x043f,
+ /* 0xf0 */
+ 0x0440, 0x0441, 0x0442, 0x0443,
+ 0x0444, 0x0445, 0x0446, 0x0447,
+ 0x0448, 0x0449, 0x044a, 0x044b,
+ 0x044c, 0x044d, 0x044e, 0x20ac,
+};
+
+static const unsigned char page00[256] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0xca, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
+ 0x00, 0xa9, 0x00, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+ 0xa1, 0xb1, 0x00, 0x00, 0x00, 0xb5, 0xa6, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd6, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page04[256] = {
+ 0x00, 0xdd, 0xab, 0xae, 0xb8, 0xc1, 0xa7, 0xba, /* 0x00-0x07 */
+ 0xb7, 0xbc, 0xbe, 0xcb, 0xcd, 0x00, 0xd8, 0xda, /* 0x08-0x0f */
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x10-0x17 */
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x18-0x1f */
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x20-0x27 */
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x28-0x2f */
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x30-0x37 */
+ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x38-0x3f */
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x40-0x47 */
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0x48-0x4f */
+ 0x00, 0xde, 0xac, 0xaf, 0xb9, 0xcf, 0xb4, 0xbb, /* 0x50-0x57 */
+ 0xc0, 0xbd, 0xbf, 0xcc, 0xce, 0x00, 0xd9, 0xdb, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0xa2, 0xb6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0xd7, 0x00, /* 0x18-0x1f */
+ 0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+ page00, page01, NULL, NULL, page04, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ page20, page21, page22, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+ const unsigned char *uni2charset;
+ unsigned char cl = uni & 0x00ff;
+ unsigned char ch = (uni & 0xff00) >> 8;
+
+ if (boundlen <= 0)
+ return -ENAMETOOLONG;
+
+ uni2charset = page_uni2charset[ch];
+ if (uni2charset && uni2charset[cl])
+ out[0] = uni2charset[cl];
+ else
+ return -EINVAL;
+ return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+ *uni = charset2uni[*rawstring];
+ if (*uni == 0x0000)
+ return -EINVAL;
+ return 1;
+}
+
+static struct nls_table table = {
+ .charset = "maccyrillic",
+ .uni2char = uni2char,
+ .char2uni = char2uni,
+ .charset2lower = charset2lower,
+ .charset2upper = charset2upper,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_nls_maccyrillic(void)
+{
+ return register_nls(&table);
+}
+
+static void __exit exit_nls_maccyrillic(void)
+{
+ unregister_nls(&table);
+}
+
+module_init(init_nls_maccyrillic)
+module_exit(exit_nls_maccyrillic)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_macgaelic.c b/fs/nls/nls_macgaelic.c
new file mode 100644
index 000000000000..615d8e128f14
--- /dev/null
+++ b/fs/nls/nls_macgaelic.c
@@ -0,0 +1,567 @@
+/*
+ * linux/fs/nls/nls_macgaelic.c
+ *
+ * Charset macgaelic translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+ /* 0x00 */
+ 0x0000, 0x0001, 0x0002, 0x0003,
+ 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b,
+ 0x000c, 0x000d, 0x000e, 0x000f,
+ /* 0x10 */
+ 0x0010, 0x0011, 0x0012, 0x0013,
+ 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001a, 0x001b,
+ 0x001c, 0x001d, 0x001e, 0x001f,
+ /* 0x20 */
+ 0x0020, 0x0021, 0x0022, 0x0023,
+ 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b,
+ 0x002c, 0x002d, 0x002e, 0x002f,
+ /* 0x30 */
+ 0x0030, 0x0031, 0x0032, 0x0033,
+ 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b,
+ 0x003c, 0x003d, 0x003e, 0x003f,
+ /* 0x40 */
+ 0x0040, 0x0041, 0x0042, 0x0043,
+ 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b,
+ 0x004c, 0x004d, 0x004e, 0x004f,
+ /* 0x50 */
+ 0x0050, 0x0051, 0x0052, 0x0053,
+ 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b,
+ 0x005c, 0x005d, 0x005e, 0x005f,
+ /* 0x60 */
+ 0x0060, 0x0061, 0x0062, 0x0063,
+ 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006a, 0x006b,
+ 0x006c, 0x006d, 0x006e, 0x006f,
+ /* 0x70 */
+ 0x0070, 0x0071, 0x0072, 0x0073,
+ 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007a, 0x007b,
+ 0x007c, 0x007d, 0x007e, 0x007f,
+ /* 0x80 */
+ 0x00c4, 0x00c5, 0x00c7, 0x00c9,
+ 0x00d1, 0x00d6, 0x00dc, 0x00e1,
+ 0x00e0, 0x00e2, 0x00e4, 0x00e3,
+ 0x00e5, 0x00e7, 0x00e9, 0x00e8,
+ /* 0x90 */
+ 0x00ea, 0x00eb, 0x00ed, 0x00ec,
+ 0x00ee, 0x00ef, 0x00f1, 0x00f3,
+ 0x00f2, 0x00f4, 0x00f6, 0x00f5,
+ 0x00fa, 0x00f9, 0x00fb, 0x00fc,
+ /* 0xa0 */
+ 0x2020, 0x00b0, 0x00a2, 0x00a3,
+ 0x00a7, 0x2022, 0x00b6, 0x00df,
+ 0x00ae, 0x00a9, 0x2122, 0x00b4,
+ 0x00a8, 0x2260, 0x00c6, 0x00d8,
+ /* 0xb0 */
+ 0x1e02, 0x00b1, 0x2264, 0x2265,
+ 0x1e03, 0x010a, 0x010b, 0x1e0a,
+ 0x1e0b, 0x1e1e, 0x1e1f, 0x0120,
+ 0x0121, 0x1e40, 0x00e6, 0x00f8,
+ /* 0xc0 */
+ 0x1e41, 0x1e56, 0x1e57, 0x027c,
+ 0x0192, 0x017f, 0x1e60, 0x00ab,
+ 0x00bb, 0x2026, 0x00a0, 0x00c0,
+ 0x00c3, 0x00d5, 0x0152, 0x0153,
+ /* 0xd0 */
+ 0x2013, 0x2014, 0x201c, 0x201d,
+ 0x2018, 0x2019, 0x1e61, 0x1e9b,
+ 0x00ff, 0x0178, 0x1e6a, 0x20ac,
+ 0x2039, 0x203a, 0x0176, 0x0177,
+ /* 0xe0 */
+ 0x1e6b, 0x00b7, 0x1ef2, 0x1ef3,
+ 0x204a, 0x00c2, 0x00ca, 0x00c1,
+ 0x00cb, 0x00c8, 0x00cd, 0x00ce,
+ 0x00cf, 0x00cc, 0x00d3, 0x00d4,
+ /* 0xf0 */
+ 0x2663, 0x00d2, 0x00da, 0x00db,
+ 0x00d9, 0x0131, 0x00dd, 0x00fd,
+ 0x0174, 0x0175, 0x1e84, 0x1e85,
+ 0x1e80, 0x1e81, 0x1e82, 0x1e83,
+};
+
+static const unsigned char page00[256] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0xca, 0x00, 0xa2, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
+ 0xac, 0xa9, 0x00, 0xc7, 0x00, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+ 0xa1, 0xb1, 0x00, 0x00, 0xab, 0x00, 0xa6, 0xe1, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+ 0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+ 0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+ 0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0xf6, 0x00, 0xa7, /* 0xd8-0xdf */
+ 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+ 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+ 0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0x00, /* 0xf0-0xf7 */
+ 0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0xf7, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0xb5, 0xb6, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0xbb, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0xf8, 0xf9, 0xde, 0xdf, /* 0x70-0x77 */
+ 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page1e[256] = {
+ 0x00, 0x00, 0xb0, 0xb4, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0xb7, 0xb8, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0xba, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0xbd, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0xc2, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0xc6, 0xd6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0xda, 0xe0, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0xfc, 0xfd, 0xfe, 0xff, 0xfa, 0xfb, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0xe2, 0xe3, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
+ 0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page26[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+ page00, page01, page02, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, page1e, NULL,
+ page20, page21, page22, NULL, NULL, NULL, page26, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x00-0x07 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x08-0x0f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x10-0x17 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x18-0x1f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x20-0x27 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x28-0x2f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x30-0x37 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x38-0x3f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x40-0x47 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x48-0x4f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x50-0x57 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x58-0x5f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x60-0x67 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x68-0x6f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x70-0x77 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x78-0x7f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x80-0x87 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x88-0x8f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x90-0x97 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x98-0x9f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa0-0xa7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa8-0xaf */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb0-0xb7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb8-0xbf */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc0-0xc7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc8-0xcf */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd0-0xd7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd8-0xdf */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe0-0xe7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe8-0xef */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0-0xf7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+ const unsigned char *uni2charset;
+ unsigned char cl = uni & 0x00ff;
+ unsigned char ch = (uni & 0xff00) >> 8;
+
+ if (boundlen <= 0)
+ return -ENAMETOOLONG;
+
+ uni2charset = page_uni2charset[ch];
+ if (uni2charset && uni2charset[cl])
+ out[0] = uni2charset[cl];
+ else
+ return -EINVAL;
+ return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+ *uni = charset2uni[*rawstring];
+ if (*uni == 0x0000)
+ return -EINVAL;
+ return 1;
+}
+
+static struct nls_table table = {
+ .charset = "macgaelic",
+ .uni2char = uni2char,
+ .char2uni = char2uni,
+ .charset2lower = charset2lower,
+ .charset2upper = charset2upper,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_nls_macgaelic(void)
+{
+ return register_nls(&table);
+}
+
+static void __exit exit_nls_macgaelic(void)
+{
+ unregister_nls(&table);
+}
+
+module_init(init_nls_macgaelic)
+module_exit(exit_nls_macgaelic)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_macgreek.c b/fs/nls/nls_macgreek.c
new file mode 100644
index 000000000000..79880f30494f
--- /dev/null
+++ b/fs/nls/nls_macgreek.c
@@ -0,0 +1,497 @@
+/*
+ * linux/fs/nls/nls_macgreek.c
+ *
+ * Charset macgreek translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+ /* 0x00 */
+ 0x0000, 0x0001, 0x0002, 0x0003,
+ 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b,
+ 0x000c, 0x000d, 0x000e, 0x000f,
+ /* 0x10 */
+ 0x0010, 0x0011, 0x0012, 0x0013,
+ 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001a, 0x001b,
+ 0x001c, 0x001d, 0x001e, 0x001f,
+ /* 0x20 */
+ 0x0020, 0x0021, 0x0022, 0x0023,
+ 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b,
+ 0x002c, 0x002d, 0x002e, 0x002f,
+ /* 0x30 */
+ 0x0030, 0x0031, 0x0032, 0x0033,
+ 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b,
+ 0x003c, 0x003d, 0x003e, 0x003f,
+ /* 0x40 */
+ 0x0040, 0x0041, 0x0042, 0x0043,
+ 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b,
+ 0x004c, 0x004d, 0x004e, 0x004f,
+ /* 0x50 */
+ 0x0050, 0x0051, 0x0052, 0x0053,
+ 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b,
+ 0x005c, 0x005d, 0x005e, 0x005f,
+ /* 0x60 */
+ 0x0060, 0x0061, 0x0062, 0x0063,
+ 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006a, 0x006b,
+ 0x006c, 0x006d, 0x006e, 0x006f,
+ /* 0x70 */
+ 0x0070, 0x0071, 0x0072, 0x0073,
+ 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007a, 0x007b,
+ 0x007c, 0x007d, 0x007e, 0x007f,
+ /* 0x80 */
+ 0x00c4, 0x00b9, 0x00b2, 0x00c9,
+ 0x00b3, 0x00d6, 0x00dc, 0x0385,
+ 0x00e0, 0x00e2, 0x00e4, 0x0384,
+ 0x00a8, 0x00e7, 0x00e9, 0x00e8,
+ /* 0x90 */
+ 0x00ea, 0x00eb, 0x00a3, 0x2122,
+ 0x00ee, 0x00ef, 0x2022, 0x00bd,
+ 0x2030, 0x00f4, 0x00f6, 0x00a6,
+ 0x20ac, 0x00f9, 0x00fb, 0x00fc,
+ /* 0xa0 */
+ 0x2020, 0x0393, 0x0394, 0x0398,
+ 0x039b, 0x039e, 0x03a0, 0x00df,
+ 0x00ae, 0x00a9, 0x03a3, 0x03aa,
+ 0x00a7, 0x2260, 0x00b0, 0x00b7,
+ /* 0xb0 */
+ 0x0391, 0x00b1, 0x2264, 0x2265,
+ 0x00a5, 0x0392, 0x0395, 0x0396,
+ 0x0397, 0x0399, 0x039a, 0x039c,
+ 0x03a6, 0x03ab, 0x03a8, 0x03a9,
+ /* 0xc0 */
+ 0x03ac, 0x039d, 0x00ac, 0x039f,
+ 0x03a1, 0x2248, 0x03a4, 0x00ab,
+ 0x00bb, 0x2026, 0x00a0, 0x03a5,
+ 0x03a7, 0x0386, 0x0388, 0x0153,
+ /* 0xd0 */
+ 0x2013, 0x2015, 0x201c, 0x201d,
+ 0x2018, 0x2019, 0x00f7, 0x0389,
+ 0x038a, 0x038c, 0x038e, 0x03ad,
+ 0x03ae, 0x03af, 0x03cc, 0x038f,
+ /* 0xe0 */
+ 0x03cd, 0x03b1, 0x03b2, 0x03c8,
+ 0x03b4, 0x03b5, 0x03c6, 0x03b3,
+ 0x03b7, 0x03b9, 0x03be, 0x03ba,
+ 0x03bb, 0x03bc, 0x03bd, 0x03bf,
+ /* 0xf0 */
+ 0x03c0, 0x03ce, 0x03c1, 0x03c3,
+ 0x03c4, 0x03b8, 0x03c9, 0x03c2,
+ 0x03c7, 0x03c5, 0x03b6, 0x03ca,
+ 0x03cb, 0x0390, 0x03b0, 0x00ad,
+};
+
+static const unsigned char page00[256] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0xca, 0x00, 0x00, 0x92, 0x00, 0xb4, 0x9b, 0xac, /* 0xa0-0xa7 */
+ 0x8c, 0xa9, 0x00, 0xc7, 0xc2, 0xff, 0xa8, 0x00, /* 0xa8-0xaf */
+ 0xae, 0xb1, 0x82, 0x84, 0x00, 0x00, 0x00, 0xaf, /* 0xb0-0xb7 */
+ 0x00, 0x81, 0x00, 0xc8, 0x00, 0x97, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x83, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+ 0x88, 0x00, 0x89, 0x00, 0x8a, 0x00, 0x00, 0x8d, /* 0xe0-0xe7 */
+ 0x8f, 0x8e, 0x90, 0x91, 0x00, 0x00, 0x94, 0x95, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x99, 0x00, 0x9a, 0xd6, /* 0xf0-0xf7 */
+ 0x00, 0x9d, 0x00, 0x9e, 0x9f, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x8b, 0x87, 0xcd, 0x00, /* 0x80-0x87 */
+ 0xce, 0xd7, 0xd8, 0x00, 0xd9, 0x00, 0xda, 0xdf, /* 0x88-0x8f */
+ 0xfd, 0xb0, 0xb5, 0xa1, 0xa2, 0xb6, 0xb7, 0xb8, /* 0x90-0x97 */
+ 0xa3, 0xb9, 0xba, 0xa4, 0xbb, 0xc1, 0xa5, 0xc3, /* 0x98-0x9f */
+ 0xa6, 0xc4, 0x00, 0xaa, 0xc6, 0xcb, 0xbc, 0xcc, /* 0xa0-0xa7 */
+ 0xbe, 0xbf, 0xab, 0xbd, 0xc0, 0xdb, 0xdc, 0xdd, /* 0xa8-0xaf */
+ 0xfe, 0xe1, 0xe2, 0xe7, 0xe4, 0xe5, 0xfa, 0xe8, /* 0xb0-0xb7 */
+ 0xf5, 0xe9, 0xeb, 0xec, 0xed, 0xee, 0xea, 0xef, /* 0xb8-0xbf */
+ 0xf0, 0xf2, 0xf7, 0xf3, 0xf4, 0xf9, 0xe6, 0xf8, /* 0xc0-0xc7 */
+ 0xe3, 0xf6, 0xfb, 0xfc, 0xde, 0xe0, 0xf1, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0xd0, 0x00, 0xd1, 0x00, 0x00, /* 0x10-0x17 */
+ 0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
+ 0xa0, 0x00, 0x96, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x98, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+ page00, page01, NULL, page03, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ page20, page21, page22, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+ const unsigned char *uni2charset;
+ unsigned char cl = uni & 0x00ff;
+ unsigned char ch = (uni & 0xff00) >> 8;
+
+ if (boundlen <= 0)
+ return -ENAMETOOLONG;
+
+ uni2charset = page_uni2charset[ch];
+ if (uni2charset && uni2charset[cl])
+ out[0] = uni2charset[cl];
+ else
+ return -EINVAL;
+ return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+ *uni = charset2uni[*rawstring];
+ if (*uni == 0x0000)
+ return -EINVAL;
+ return 1;
+}
+
+static struct nls_table table = {
+ .charset = "macgreek",
+ .uni2char = uni2char,
+ .char2uni = char2uni,
+ .charset2lower = charset2lower,
+ .charset2upper = charset2upper,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_nls_macgreek(void)
+{
+ return register_nls(&table);
+}
+
+static void __exit exit_nls_macgreek(void)
+{
+ unregister_nls(&table);
+}
+
+module_init(init_nls_macgreek)
+module_exit(exit_nls_macgreek)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_maciceland.c b/fs/nls/nls_maciceland.c
new file mode 100644
index 000000000000..1e688c59b252
--- /dev/null
+++ b/fs/nls/nls_maciceland.c
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/nls_maciceland.c
+ *
+ * Charset maciceland translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+ /* 0x00 */
+ 0x0000, 0x0001, 0x0002, 0x0003,
+ 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b,
+ 0x000c, 0x000d, 0x000e, 0x000f,
+ /* 0x10 */
+ 0x0010, 0x0011, 0x0012, 0x0013,
+ 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001a, 0x001b,
+ 0x001c, 0x001d, 0x001e, 0x001f,
+ /* 0x20 */
+ 0x0020, 0x0021, 0x0022, 0x0023,
+ 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b,
+ 0x002c, 0x002d, 0x002e, 0x002f,
+ /* 0x30 */
+ 0x0030, 0x0031, 0x0032, 0x0033,
+ 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b,
+ 0x003c, 0x003d, 0x003e, 0x003f,
+ /* 0x40 */
+ 0x0040, 0x0041, 0x0042, 0x0043,
+ 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b,
+ 0x004c, 0x004d, 0x004e, 0x004f,
+ /* 0x50 */
+ 0x0050, 0x0051, 0x0052, 0x0053,
+ 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b,
+ 0x005c, 0x005d, 0x005e, 0x005f,
+ /* 0x60 */
+ 0x0060, 0x0061, 0x0062, 0x0063,
+ 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006a, 0x006b,
+ 0x006c, 0x006d, 0x006e, 0x006f,
+ /* 0x70 */
+ 0x0070, 0x0071, 0x0072, 0x0073,
+ 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007a, 0x007b,
+ 0x007c, 0x007d, 0x007e, 0x007f,
+ /* 0x80 */
+ 0x00c4, 0x00c5, 0x00c7, 0x00c9,
+ 0x00d1, 0x00d6, 0x00dc, 0x00e1,
+ 0x00e0, 0x00e2, 0x00e4, 0x00e3,
+ 0x00e5, 0x00e7, 0x00e9, 0x00e8,
+ /* 0x90 */
+ 0x00ea, 0x00eb, 0x00ed, 0x00ec,
+ 0x00ee, 0x00ef, 0x00f1, 0x00f3,
+ 0x00f2, 0x00f4, 0x00f6, 0x00f5,
+ 0x00fa, 0x00f9, 0x00fb, 0x00fc,
+ /* 0xa0 */
+ 0x00dd, 0x00b0, 0x00a2, 0x00a3,
+ 0x00a7, 0x2022, 0x00b6, 0x00df,
+ 0x00ae, 0x00a9, 0x2122, 0x00b4,
+ 0x00a8, 0x2260, 0x00c6, 0x00d8,
+ /* 0xb0 */
+ 0x221e, 0x00b1, 0x2264, 0x2265,
+ 0x00a5, 0x00b5, 0x2202, 0x2211,
+ 0x220f, 0x03c0, 0x222b, 0x00aa,
+ 0x00ba, 0x03a9, 0x00e6, 0x00f8,
+ /* 0xc0 */
+ 0x00bf, 0x00a1, 0x00ac, 0x221a,
+ 0x0192, 0x2248, 0x2206, 0x00ab,
+ 0x00bb, 0x2026, 0x00a0, 0x00c0,
+ 0x00c3, 0x00d5, 0x0152, 0x0153,
+ /* 0xd0 */
+ 0x2013, 0x2014, 0x201c, 0x201d,
+ 0x2018, 0x2019, 0x00f7, 0x25ca,
+ 0x00ff, 0x0178, 0x2044, 0x20ac,
+ 0x00d0, 0x00f0, 0x00de, 0x00fe,
+ /* 0xe0 */
+ 0x00fd, 0x00b7, 0x201a, 0x201e,
+ 0x2030, 0x00c2, 0x00ca, 0x00c1,
+ 0x00cb, 0x00c8, 0x00cd, 0x00ce,
+ 0x00cf, 0x00cc, 0x00d3, 0x00d4,
+ /* 0xf0 */
+ 0xf8ff, 0x00d2, 0x00da, 0x00db,
+ 0x00d9, 0x0131, 0x02c6, 0x02dc,
+ 0x00af, 0x02d8, 0x02d9, 0x02da,
+ 0x00b8, 0x02dd, 0x02db, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+ 0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+ 0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+ 0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+ 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+ 0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+ 0xdc, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+ 0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0xa0, 0xde, 0xa7, /* 0xd8-0xdf */
+ 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+ 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+ 0xdd, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+ 0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0xe0, 0xdf, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+ 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+ 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+ page00, page01, page02, page03, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ page20, page21, page22, NULL, NULL, page25, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ pagef8, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+ const unsigned char *uni2charset;
+ unsigned char cl = uni & 0x00ff;
+ unsigned char ch = (uni & 0xff00) >> 8;
+
+ if (boundlen <= 0)
+ return -ENAMETOOLONG;
+
+ uni2charset = page_uni2charset[ch];
+ if (uni2charset && uni2charset[cl])
+ out[0] = uni2charset[cl];
+ else
+ return -EINVAL;
+ return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+ *uni = charset2uni[*rawstring];
+ if (*uni == 0x0000)
+ return -EINVAL;
+ return 1;
+}
+
+static struct nls_table table = {
+ .charset = "maciceland",
+ .uni2char = uni2char,
+ .char2uni = char2uni,
+ .charset2lower = charset2lower,
+ .charset2upper = charset2upper,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_nls_maciceland(void)
+{
+ return register_nls(&table);
+}
+
+static void __exit exit_nls_maciceland(void)
+{
+ unregister_nls(&table);
+}
+
+module_init(init_nls_maciceland)
+module_exit(exit_nls_maciceland)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_macinuit.c b/fs/nls/nls_macinuit.c
new file mode 100644
index 000000000000..f333d98941d6
--- /dev/null
+++ b/fs/nls/nls_macinuit.c
@@ -0,0 +1,532 @@
+/*
+ * linux/fs/nls/nls_macinuit.c
+ *
+ * Charset macinuit translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+ /* 0x00 */
+ 0x0000, 0x0001, 0x0002, 0x0003,
+ 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b,
+ 0x000c, 0x000d, 0x000e, 0x000f,
+ /* 0x10 */
+ 0x0010, 0x0011, 0x0012, 0x0013,
+ 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001a, 0x001b,
+ 0x001c, 0x001d, 0x001e, 0x001f,
+ /* 0x20 */
+ 0x0020, 0x0021, 0x0022, 0x0023,
+ 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b,
+ 0x002c, 0x002d, 0x002e, 0x002f,
+ /* 0x30 */
+ 0x0030, 0x0031, 0x0032, 0x0033,
+ 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b,
+ 0x003c, 0x003d, 0x003e, 0x003f,
+ /* 0x40 */
+ 0x0040, 0x0041, 0x0042, 0x0043,
+ 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b,
+ 0x004c, 0x004d, 0x004e, 0x004f,
+ /* 0x50 */
+ 0x0050, 0x0051, 0x0052, 0x0053,
+ 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b,
+ 0x005c, 0x005d, 0x005e, 0x005f,
+ /* 0x60 */
+ 0x0060, 0x0061, 0x0062, 0x0063,
+ 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006a, 0x006b,
+ 0x006c, 0x006d, 0x006e, 0x006f,
+ /* 0x70 */
+ 0x0070, 0x0071, 0x0072, 0x0073,
+ 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007a, 0x007b,
+ 0x007c, 0x007d, 0x007e, 0x007f,
+ /* 0x80 */
+ 0x1403, 0x1404, 0x1405, 0x1406,
+ 0x140a, 0x140b, 0x1431, 0x1432,
+ 0x1433, 0x1434, 0x1438, 0x1439,
+ 0x1449, 0x144e, 0x144f, 0x1450,
+ /* 0x90 */
+ 0x1451, 0x1455, 0x1456, 0x1466,
+ 0x146d, 0x146e, 0x146f, 0x1470,
+ 0x1472, 0x1473, 0x1483, 0x148b,
+ 0x148c, 0x148d, 0x148e, 0x1490,
+ /* 0xa0 */
+ 0x1491, 0x00b0, 0x14a1, 0x14a5,
+ 0x14a6, 0x2022, 0x00b6, 0x14a7,
+ 0x00ae, 0x00a9, 0x2122, 0x14a8,
+ 0x14aa, 0x14ab, 0x14bb, 0x14c2,
+ /* 0xb0 */
+ 0x14c3, 0x14c4, 0x14c5, 0x14c7,
+ 0x14c8, 0x14d0, 0x14ef, 0x14f0,
+ 0x14f1, 0x14f2, 0x14f4, 0x14f5,
+ 0x1505, 0x14d5, 0x14d6, 0x14d7,
+ /* 0xc0 */
+ 0x14d8, 0x14da, 0x14db, 0x14ea,
+ 0x1528, 0x1529, 0x152a, 0x152b,
+ 0x152d, 0x2026, 0x00a0, 0x152e,
+ 0x153e, 0x1555, 0x1556, 0x1557,
+ /* 0xd0 */
+ 0x2013, 0x2014, 0x201c, 0x201d,
+ 0x2018, 0x2019, 0x1558, 0x1559,
+ 0x155a, 0x155d, 0x1546, 0x1547,
+ 0x1548, 0x1549, 0x154b, 0x154c,
+ /* 0xe0 */
+ 0x1550, 0x157f, 0x1580, 0x1581,
+ 0x1582, 0x1583, 0x1584, 0x1585,
+ 0x158f, 0x1590, 0x1591, 0x1592,
+ 0x1593, 0x1594, 0x1595, 0x1671,
+ /* 0xf0 */
+ 0x1672, 0x1673, 0x1674, 0x1675,
+ 0x1676, 0x1596, 0x15a0, 0x15a1,
+ 0x15a2, 0x15a3, 0x15a4, 0x15a5,
+ 0x15a6, 0x157c, 0x0141, 0x0142,
+};
+
+static const unsigned char page00[256] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0xca, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0xa9, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
+ 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0xfe, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page14[256] = {
+ 0x00, 0x00, 0x00, 0x80, 0x81, 0x82, 0x83, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x84, 0x85, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x86, 0x87, 0x88, 0x89, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x8a, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x8d, 0x8e, /* 0x48-0x4f */
+ 0x8f, 0x90, 0x00, 0x00, 0x00, 0x91, 0x92, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x95, 0x96, /* 0x68-0x6f */
+ 0x97, 0x00, 0x98, 0x99, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x9a, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x9b, 0x9c, 0x9d, 0x9e, 0x00, /* 0x88-0x8f */
+ 0x9f, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0xa2, 0x00, 0x00, 0x00, 0xa3, 0xa4, 0xa7, /* 0xa0-0xa7 */
+ 0xab, 0x00, 0xac, 0xad, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0xae, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0xaf, 0xb0, 0xb1, 0xb2, 0x00, 0xb3, /* 0xc0-0xc7 */
+ 0xb4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0xb5, 0x00, 0x00, 0x00, 0x00, 0xbd, 0xbe, 0xbf, /* 0xd0-0xd7 */
+ 0xc0, 0x00, 0xc1, 0xc2, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0xb6, /* 0xe8-0xef */
+ 0xb7, 0xb8, 0xb9, 0x00, 0xba, 0xbb, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page15[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0xc4, 0xc5, 0xc6, 0xc7, 0x00, 0xc8, 0xcb, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0xdb, /* 0x40-0x47 */
+ 0xdc, 0xdd, 0x00, 0xde, 0xdf, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0xe0, 0x00, 0x00, 0x00, 0x00, 0xcd, 0xce, 0xcf, /* 0x50-0x57 */
+ 0xd6, 0xd7, 0xd8, 0x00, 0x00, 0xd9, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x00, 0xe1, /* 0x78-0x7f */
+ 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, /* 0x88-0x8f */
+ 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xf5, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page16[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+ page00, page01, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, page14, page15, page16, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ page20, page21, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x00-0x07 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x08-0x0f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x10-0x17 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x18-0x1f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x20-0x27 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x28-0x2f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x30-0x37 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x38-0x3f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x40-0x47 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x48-0x4f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x50-0x57 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x58-0x5f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x60-0x67 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x68-0x6f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x70-0x77 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x78-0x7f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x80-0x87 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x88-0x8f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x90-0x97 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x98-0x9f */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa0-0xa7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa8-0xaf */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb0-0xb7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb8-0xbf */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc0-0xc7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc8-0xcf */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd0-0xd7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd8-0xdf */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe0-0xe7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe8-0xef */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0-0xf7 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+ const unsigned char *uni2charset;
+ unsigned char cl = uni & 0x00ff;
+ unsigned char ch = (uni & 0xff00) >> 8;
+
+ if (boundlen <= 0)
+ return -ENAMETOOLONG;
+
+ uni2charset = page_uni2charset[ch];
+ if (uni2charset && uni2charset[cl])
+ out[0] = uni2charset[cl];
+ else
+ return -EINVAL;
+ return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+ *uni = charset2uni[*rawstring];
+ if (*uni == 0x0000)
+ return -EINVAL;
+ return 1;
+}
+
+static struct nls_table table = {
+ .charset = "macinuit",
+ .uni2char = uni2char,
+ .char2uni = char2uni,
+ .charset2lower = charset2lower,
+ .charset2upper = charset2upper,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_nls_macinuit(void)
+{
+ return register_nls(&table);
+}
+
+static void __exit exit_nls_macinuit(void)
+{
+ unregister_nls(&table);
+}
+
+module_init(init_nls_macinuit)
+module_exit(exit_nls_macinuit)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_macroman.c b/fs/nls/nls_macroman.c
new file mode 100644
index 000000000000..6315a857ab68
--- /dev/null
+++ b/fs/nls/nls_macroman.c
@@ -0,0 +1,637 @@
+/*
+ * linux/fs/nls/nls_macroman.c
+ *
+ * Charset macroman translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+ /* 0x00 */
+ 0x0000, 0x0001, 0x0002, 0x0003,
+ 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b,
+ 0x000c, 0x000d, 0x000e, 0x000f,
+ /* 0x10 */
+ 0x0010, 0x0011, 0x0012, 0x0013,
+ 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001a, 0x001b,
+ 0x001c, 0x001d, 0x001e, 0x001f,
+ /* 0x20 */
+ 0x0020, 0x0021, 0x0022, 0x0023,
+ 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b,
+ 0x002c, 0x002d, 0x002e, 0x002f,
+ /* 0x30 */
+ 0x0030, 0x0031, 0x0032, 0x0033,
+ 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b,
+ 0x003c, 0x003d, 0x003e, 0x003f,
+ /* 0x40 */
+ 0x0040, 0x0041, 0x0042, 0x0043,
+ 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b,
+ 0x004c, 0x004d, 0x004e, 0x004f,
+ /* 0x50 */
+ 0x0050, 0x0051, 0x0052, 0x0053,
+ 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b,
+ 0x005c, 0x005d, 0x005e, 0x005f,
+ /* 0x60 */
+ 0x0060, 0x0061, 0x0062, 0x0063,
+ 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006a, 0x006b,
+ 0x006c, 0x006d, 0x006e, 0x006f,
+ /* 0x70 */
+ 0x0070, 0x0071, 0x0072, 0x0073,
+ 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007a, 0x007b,
+ 0x007c, 0x007d, 0x007e, 0x007f,
+ /* 0x80 */
+ 0x00c4, 0x00c5, 0x00c7, 0x00c9,
+ 0x00d1, 0x00d6, 0x00dc, 0x00e1,
+ 0x00e0, 0x00e2, 0x00e4, 0x00e3,
+ 0x00e5, 0x00e7, 0x00e9, 0x00e8,
+ /* 0x90 */
+ 0x00ea, 0x00eb, 0x00ed, 0x00ec,
+ 0x00ee, 0x00ef, 0x00f1, 0x00f3,
+ 0x00f2, 0x00f4, 0x00f6, 0x00f5,
+ 0x00fa, 0x00f9, 0x00fb, 0x00fc,
+ /* 0xa0 */
+ 0x2020, 0x00b0, 0x00a2, 0x00a3,
+ 0x00a7, 0x2022, 0x00b6, 0x00df,
+ 0x00ae, 0x00a9, 0x2122, 0x00b4,
+ 0x00a8, 0x2260, 0x00c6, 0x00d8,
+ /* 0xb0 */
+ 0x221e, 0x00b1, 0x2264, 0x2265,
+ 0x00a5, 0x00b5, 0x2202, 0x2211,
+ 0x220f, 0x03c0, 0x222b, 0x00aa,
+ 0x00ba, 0x03a9, 0x00e6, 0x00f8,
+ /* 0xc0 */
+ 0x00bf, 0x00a1, 0x00ac, 0x221a,
+ 0x0192, 0x2248, 0x2206, 0x00ab,
+ 0x00bb, 0x2026, 0x00a0, 0x00c0,
+ 0x00c3, 0x00d5, 0x0152, 0x0153,
+ /* 0xd0 */
+ 0x2013, 0x2014, 0x201c, 0x201d,
+ 0x2018, 0x2019, 0x00f7, 0x25ca,
+ 0x00ff, 0x0178, 0x2044, 0x20ac,
+ 0x2039, 0x203a, 0xfb01, 0xfb02,
+ /* 0xe0 */
+ 0x2021, 0x00b7, 0x201a, 0x201e,
+ 0x2030, 0x00c2, 0x00ca, 0x00c1,
+ 0x00cb, 0x00c8, 0x00cd, 0x00ce,
+ 0x00cf, 0x00cc, 0x00d3, 0x00d4,
+ /* 0xf0 */
+ 0xf8ff, 0x00d2, 0x00da, 0x00db,
+ 0x00d9, 0x0131, 0x02c6, 0x02dc,
+ 0x00af, 0x02d8, 0x02d9, 0x02da,
+ 0x00b8, 0x02dd, 0x02db, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+ 0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+ 0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+ 0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+ 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+ 0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+ 0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+ 0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+ 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+ 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+ 0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+ 0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+ 0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+ 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+ 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
+};
+
+static const unsigned char pagefb[256] = {
+ 0x00, 0xde, 0xdf, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+ page00, page01, page02, page03, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ page20, page21, page22, NULL, NULL, page25, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ pagef8, NULL, NULL, pagefb, NULL, NULL, NULL, NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+ const unsigned char *uni2charset;
+ unsigned char cl = uni & 0x00ff;
+ unsigned char ch = (uni & 0xff00) >> 8;
+
+ if (boundlen <= 0)
+ return -ENAMETOOLONG;
+
+ uni2charset = page_uni2charset[ch];
+ if (uni2charset && uni2charset[cl])
+ out[0] = uni2charset[cl];
+ else
+ return -EINVAL;
+ return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+ *uni = charset2uni[*rawstring];
+ if (*uni == 0x0000)
+ return -EINVAL;
+ return 1;
+}
+
+static struct nls_table table = {
+ .charset = "macroman",
+ .uni2char = uni2char,
+ .char2uni = char2uni,
+ .charset2lower = charset2lower,
+ .charset2upper = charset2upper,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_nls_macroman(void)
+{
+ return register_nls(&table);
+}
+
+static void __exit exit_nls_macroman(void)
+{
+ unregister_nls(&table);
+}
+
+module_init(init_nls_macroman)
+module_exit(exit_nls_macroman)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_macromanian.c b/fs/nls/nls_macromanian.c
new file mode 100644
index 000000000000..b83c07a57d25
--- /dev/null
+++ b/fs/nls/nls_macromanian.c
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/nls_macromanian.c
+ *
+ * Charset macromanian translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+ /* 0x00 */
+ 0x0000, 0x0001, 0x0002, 0x0003,
+ 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b,
+ 0x000c, 0x000d, 0x000e, 0x000f,
+ /* 0x10 */
+ 0x0010, 0x0011, 0x0012, 0x0013,
+ 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001a, 0x001b,
+ 0x001c, 0x001d, 0x001e, 0x001f,
+ /* 0x20 */
+ 0x0020, 0x0021, 0x0022, 0x0023,
+ 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b,
+ 0x002c, 0x002d, 0x002e, 0x002f,
+ /* 0x30 */
+ 0x0030, 0x0031, 0x0032, 0x0033,
+ 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b,
+ 0x003c, 0x003d, 0x003e, 0x003f,
+ /* 0x40 */
+ 0x0040, 0x0041, 0x0042, 0x0043,
+ 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b,
+ 0x004c, 0x004d, 0x004e, 0x004f,
+ /* 0x50 */
+ 0x0050, 0x0051, 0x0052, 0x0053,
+ 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b,
+ 0x005c, 0x005d, 0x005e, 0x005f,
+ /* 0x60 */
+ 0x0060, 0x0061, 0x0062, 0x0063,
+ 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006a, 0x006b,
+ 0x006c, 0x006d, 0x006e, 0x006f,
+ /* 0x70 */
+ 0x0070, 0x0071, 0x0072, 0x0073,
+ 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007a, 0x007b,
+ 0x007c, 0x007d, 0x007e, 0x007f,
+ /* 0x80 */
+ 0x00c4, 0x00c5, 0x00c7, 0x00c9,
+ 0x00d1, 0x00d6, 0x00dc, 0x00e1,
+ 0x00e0, 0x00e2, 0x00e4, 0x00e3,
+ 0x00e5, 0x00e7, 0x00e9, 0x00e8,
+ /* 0x90 */
+ 0x00ea, 0x00eb, 0x00ed, 0x00ec,
+ 0x00ee, 0x00ef, 0x00f1, 0x00f3,
+ 0x00f2, 0x00f4, 0x00f6, 0x00f5,
+ 0x00fa, 0x00f9, 0x00fb, 0x00fc,
+ /* 0xa0 */
+ 0x2020, 0x00b0, 0x00a2, 0x00a3,
+ 0x00a7, 0x2022, 0x00b6, 0x00df,
+ 0x00ae, 0x00a9, 0x2122, 0x00b4,
+ 0x00a8, 0x2260, 0x0102, 0x0218,
+ /* 0xb0 */
+ 0x221e, 0x00b1, 0x2264, 0x2265,
+ 0x00a5, 0x00b5, 0x2202, 0x2211,
+ 0x220f, 0x03c0, 0x222b, 0x00aa,
+ 0x00ba, 0x03a9, 0x0103, 0x0219,
+ /* 0xc0 */
+ 0x00bf, 0x00a1, 0x00ac, 0x221a,
+ 0x0192, 0x2248, 0x2206, 0x00ab,
+ 0x00bb, 0x2026, 0x00a0, 0x00c0,
+ 0x00c3, 0x00d5, 0x0152, 0x0153,
+ /* 0xd0 */
+ 0x2013, 0x2014, 0x201c, 0x201d,
+ 0x2018, 0x2019, 0x00f7, 0x25ca,
+ 0x00ff, 0x0178, 0x2044, 0x20ac,
+ 0x2039, 0x203a, 0x021a, 0x021b,
+ /* 0xe0 */
+ 0x2021, 0x00b7, 0x201a, 0x201e,
+ 0x2030, 0x00c2, 0x00ca, 0x00c1,
+ 0x00cb, 0x00c8, 0x00cd, 0x00ce,
+ 0x00cf, 0x00cc, 0x00d3, 0x00d4,
+ /* 0xf0 */
+ 0xf8ff, 0x00d2, 0x00da, 0x00db,
+ 0x00d9, 0x0131, 0x02c6, 0x02dc,
+ 0x00af, 0x02d8, 0x02d9, 0x02da,
+ 0x00b8, 0x02dd, 0x02db, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+ 0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+ 0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+ 0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+ 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0x00, 0x82, /* 0xc0-0xc7 */
+ 0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+ 0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+ 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0x00, 0x8d, /* 0xe0-0xe7 */
+ 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+ 0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+ 0x00, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+ 0x00, 0x00, 0xae, 0xbe, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0xaf, 0xbf, 0xde, 0xdf, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+ 0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+ 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+ 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+ page00, page01, page02, page03, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ page20, page21, page22, NULL, NULL, page25, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ pagef8, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+ const unsigned char *uni2charset;
+ unsigned char cl = uni & 0x00ff;
+ unsigned char ch = (uni & 0xff00) >> 8;
+
+ if (boundlen <= 0)
+ return -ENAMETOOLONG;
+
+ uni2charset = page_uni2charset[ch];
+ if (uni2charset && uni2charset[cl])
+ out[0] = uni2charset[cl];
+ else
+ return -EINVAL;
+ return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+ *uni = charset2uni[*rawstring];
+ if (*uni == 0x0000)
+ return -EINVAL;
+ return 1;
+}
+
+static struct nls_table table = {
+ .charset = "macromanian",
+ .uni2char = uni2char,
+ .char2uni = char2uni,
+ .charset2lower = charset2lower,
+ .charset2upper = charset2upper,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_nls_macromanian(void)
+{
+ return register_nls(&table);
+}
+
+static void __exit exit_nls_macromanian(void)
+{
+ unregister_nls(&table);
+}
+
+module_init(init_nls_macromanian)
+module_exit(exit_nls_macromanian)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/nls_macturkish.c b/fs/nls/nls_macturkish.c
new file mode 100644
index 000000000000..0cc2c6572826
--- /dev/null
+++ b/fs/nls/nls_macturkish.c
@@ -0,0 +1,602 @@
+/*
+ * linux/fs/nls/nls_macturkish.c
+ *
+ * Charset macturkish translation tables.
+ * Generated automatically from the Unicode and charset
+ * tables from the Unicode Organization (www.unicode.org).
+ * The Unicode to charset table has only exact mappings.
+ */
+
+/*
+ * COPYRIGHT AND PERMISSION NOTICE
+ *
+ * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
+ * the Terms of Use in http://www.unicode.org/copyright.html.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of the Unicode data files and any associated documentation (the "Data
+ * Files") or Unicode software and any associated documentation (the
+ * "Software") to deal in the Data Files or Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, and/or sell copies of the Data Files or Software, and
+ * to permit persons to whom the Data Files or Software are furnished to do
+ * so, provided that (a) the above copyright notice(s) and this permission
+ * notice appear with all copies of the Data Files or Software, (b) both the
+ * above copyright notice(s) and this permission notice appear in associated
+ * documentation, and (c) there is clear notice in each modified Data File or
+ * in the Software as well as in the documentation associated with the Data
+ * File(s) or Software that the data or software has been modified.
+ *
+ * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+ * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+ * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+ * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or other
+ * dealings in these Data Files or Software without prior written
+ * authorization of the copyright holder.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+static const wchar_t charset2uni[256] = {
+ /* 0x00 */
+ 0x0000, 0x0001, 0x0002, 0x0003,
+ 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b,
+ 0x000c, 0x000d, 0x000e, 0x000f,
+ /* 0x10 */
+ 0x0010, 0x0011, 0x0012, 0x0013,
+ 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001a, 0x001b,
+ 0x001c, 0x001d, 0x001e, 0x001f,
+ /* 0x20 */
+ 0x0020, 0x0021, 0x0022, 0x0023,
+ 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b,
+ 0x002c, 0x002d, 0x002e, 0x002f,
+ /* 0x30 */
+ 0x0030, 0x0031, 0x0032, 0x0033,
+ 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b,
+ 0x003c, 0x003d, 0x003e, 0x003f,
+ /* 0x40 */
+ 0x0040, 0x0041, 0x0042, 0x0043,
+ 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b,
+ 0x004c, 0x004d, 0x004e, 0x004f,
+ /* 0x50 */
+ 0x0050, 0x0051, 0x0052, 0x0053,
+ 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b,
+ 0x005c, 0x005d, 0x005e, 0x005f,
+ /* 0x60 */
+ 0x0060, 0x0061, 0x0062, 0x0063,
+ 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006a, 0x006b,
+ 0x006c, 0x006d, 0x006e, 0x006f,
+ /* 0x70 */
+ 0x0070, 0x0071, 0x0072, 0x0073,
+ 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007a, 0x007b,
+ 0x007c, 0x007d, 0x007e, 0x007f,
+ /* 0x80 */
+ 0x00c4, 0x00c5, 0x00c7, 0x00c9,
+ 0x00d1, 0x00d6, 0x00dc, 0x00e1,
+ 0x00e0, 0x00e2, 0x00e4, 0x00e3,
+ 0x00e5, 0x00e7, 0x00e9, 0x00e8,
+ /* 0x90 */
+ 0x00ea, 0x00eb, 0x00ed, 0x00ec,
+ 0x00ee, 0x00ef, 0x00f1, 0x00f3,
+ 0x00f2, 0x00f4, 0x00f6, 0x00f5,
+ 0x00fa, 0x00f9, 0x00fb, 0x00fc,
+ /* 0xa0 */
+ 0x2020, 0x00b0, 0x00a2, 0x00a3,
+ 0x00a7, 0x2022, 0x00b6, 0x00df,
+ 0x00ae, 0x00a9, 0x2122, 0x00b4,
+ 0x00a8, 0x2260, 0x00c6, 0x00d8,
+ /* 0xb0 */
+ 0x221e, 0x00b1, 0x2264, 0x2265,
+ 0x00a5, 0x00b5, 0x2202, 0x2211,
+ 0x220f, 0x03c0, 0x222b, 0x00aa,
+ 0x00ba, 0x03a9, 0x00e6, 0x00f8,
+ /* 0xc0 */
+ 0x00bf, 0x00a1, 0x00ac, 0x221a,
+ 0x0192, 0x2248, 0x2206, 0x00ab,
+ 0x00bb, 0x2026, 0x00a0, 0x00c0,
+ 0x00c3, 0x00d5, 0x0152, 0x0153,
+ /* 0xd0 */
+ 0x2013, 0x2014, 0x201c, 0x201d,
+ 0x2018, 0x2019, 0x00f7, 0x25ca,
+ 0x00ff, 0x0178, 0x011e, 0x011f,
+ 0x0130, 0x0131, 0x015e, 0x015f,
+ /* 0xe0 */
+ 0x2021, 0x00b7, 0x201a, 0x201e,
+ 0x2030, 0x00c2, 0x00ca, 0x00c1,
+ 0x00cb, 0x00c8, 0x00cd, 0x00ce,
+ 0x00cf, 0x00cc, 0x00d3, 0x00d4,
+ /* 0xf0 */
+ 0xf8ff, 0x00d2, 0x00da, 0x00db,
+ 0x00d9, 0xf8a0, 0x02c6, 0x02dc,
+ 0x00af, 0x02d8, 0x02d9, 0x02da,
+ 0x00b8, 0x02dd, 0x02db, 0x02c7,
+};
+
+static const unsigned char page00[256] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
+ 0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
+ 0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
+ 0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
+ 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
+ 0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
+ 0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
+ 0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
+ 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
+ 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
+ 0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
+ 0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
+};
+
+static const unsigned char page01[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0xdb, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xdf, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page02[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page03[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page20[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
+ 0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page21[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page22[256] = {
+ 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
+ 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char page25[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
+};
+
+static const unsigned char pagef8[256] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
+ 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
+};
+
+static const unsigned char *const page_uni2charset[256] = {
+ page00, page01, page02, page03, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ page20, page21, page22, NULL, NULL, page25, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ pagef8, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+};
+
+static const unsigned char charset2lower[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static const unsigned char charset2upper[256] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
+};
+
+static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
+{
+ const unsigned char *uni2charset;
+ unsigned char cl = uni & 0x00ff;
+ unsigned char ch = (uni & 0xff00) >> 8;
+
+ if (boundlen <= 0)
+ return -ENAMETOOLONG;
+
+ uni2charset = page_uni2charset[ch];
+ if (uni2charset && uni2charset[cl])
+ out[0] = uni2charset[cl];
+ else
+ return -EINVAL;
+ return 1;
+}
+
+static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
+{
+ *uni = charset2uni[*rawstring];
+ if (*uni == 0x0000)
+ return -EINVAL;
+ return 1;
+}
+
+static struct nls_table table = {
+ .charset = "macturkish",
+ .uni2char = uni2char,
+ .char2uni = char2uni,
+ .charset2lower = charset2lower,
+ .charset2upper = charset2upper,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_nls_macturkish(void)
+{
+ return register_nls(&table);
+}
+
+static void __exit exit_nls_macturkish(void)
+{
+ unregister_nls(&table);
+}
+
+module_init(init_nls_macturkish)
+module_exit(exit_nls_macturkish)
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 2eaa66652944..c6dbd3db6ca8 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -2258,7 +2258,7 @@ void ntfs_evict_big_inode(struct inode *vi)
ntfs_inode *ni = NTFS_I(vi);
truncate_inode_pages(&vi->i_data, 0);
- end_writeback(vi);
+ clear_inode(vi);
#ifdef NTFS_RW
if (NInoDirty(ni)) {
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 3b5825ef3193..e31d6ae013ab 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -367,7 +367,7 @@ static void dlmfs_evict_inode(struct inode *inode)
int status;
struct dlmfs_inode_private *ip;
- end_writeback(inode);
+ clear_inode(inode);
mlog(0, "inode %lu\n", inode->i_ino);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 17454a904d7b..735514ca400f 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -1069,7 +1069,7 @@ static void ocfs2_clear_inode(struct inode *inode)
int status;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
- end_writeback(inode);
+ clear_inode(inode);
trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno,
inode->i_nlink);
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index dbc842222589..e6213b3725d1 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -184,7 +184,7 @@ int omfs_sync_inode(struct inode *inode)
static void omfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
if (inode->i_nlink)
return;
diff --git a/fs/pipe.c b/fs/pipe.c
index fec5e4ad071a..95ebb56de494 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -693,7 +693,7 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return put_user(count, (int __user *)arg);
default:
- return -EINVAL;
+ return -ENOIOCTLCMD;
}
}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index dc4c5a7b9ece..c1c207c36cae 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -370,7 +370,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task, int whole)
{
unsigned long vsize, eip, esp, wchan = ~0UL;
- long priority, nice;
+ int priority, nice;
int tty_pgrp = -1, tty_nr = 0;
sigset_t sigign, sigcatch;
char state;
@@ -492,7 +492,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
seq_put_decimal_ull(m, ' ', 0);
seq_put_decimal_ull(m, ' ', start_time);
seq_put_decimal_ull(m, ' ', vsize);
- seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0);
+ seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
seq_put_decimal_ull(m, ' ', rsslim);
seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
@@ -517,9 +517,23 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
- seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0);
- seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0);
- seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0);
+
+ if (mm && permitted) {
+ seq_put_decimal_ull(m, ' ', mm->start_data);
+ seq_put_decimal_ull(m, ' ', mm->end_data);
+ seq_put_decimal_ull(m, ' ', mm->start_brk);
+ seq_put_decimal_ull(m, ' ', mm->arg_start);
+ seq_put_decimal_ull(m, ' ', mm->arg_end);
+ seq_put_decimal_ull(m, ' ', mm->env_start);
+ seq_put_decimal_ull(m, ' ', mm->env_end);
+ } else
+ seq_printf(m, " 0 0 0 0 0 0 0");
+
+ if (permitted)
+ seq_put_decimal_ll(m, ' ', task->exit_code);
+ else
+ seq_put_decimal_ll(m, ' ', 0);
+
seq_putc(m, '\n');
if (mm)
mmput(mm);
@@ -565,3 +579,126 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
return 0;
}
+
+#ifdef CONFIG_CHECKPOINT_RESTORE
+static struct pid *
+get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
+{
+ struct task_struct *start, *task;
+ struct pid *pid = NULL;
+
+ read_lock(&tasklist_lock);
+
+ start = pid_task(proc_pid(inode), PIDTYPE_PID);
+ if (!start)
+ goto out;
+
+ /*
+ * Lets try to continue searching first, this gives
+ * us significant speedup on children-rich processes.
+ */
+ if (pid_prev) {
+ task = pid_task(pid_prev, PIDTYPE_PID);
+ if (task && task->real_parent == start &&
+ !(list_empty(&task->sibling))) {
+ if (list_is_last(&task->sibling, &start->children))
+ goto out;
+ task = list_first_entry(&task->sibling,
+ struct task_struct, sibling);
+ pid = get_pid(task_pid(task));
+ goto out;
+ }
+ }
+
+ /*
+ * Slow search case.
+ *
+ * We might miss some children here if children
+ * are exited while we were not holding the lock,
+ * but it was never promised to be accurate that
+ * much.
+ *
+ * "Just suppose that the parent sleeps, but N children
+ * exit after we printed their tids. Now the slow paths
+ * skips N extra children, we miss N tasks." (c)
+ *
+ * So one need to stop or freeze the leader and all
+ * its children to get a precise result.
+ */
+ list_for_each_entry(task, &start->children, sibling) {
+ if (pos-- == 0) {
+ pid = get_pid(task_pid(task));
+ break;
+ }
+ }
+
+out:
+ read_unlock(&tasklist_lock);
+ return pid;
+}
+
+static int children_seq_show(struct seq_file *seq, void *v)
+{
+ struct inode *inode = seq->private;
+ pid_t pid;
+
+ pid = pid_nr_ns(v, inode->i_sb->s_fs_info);
+ return seq_printf(seq, "%d ", pid);
+}
+
+static void *children_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return get_children_pid(seq->private, NULL, *pos);
+}
+
+static void *children_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct pid *pid;
+
+ pid = get_children_pid(seq->private, v, *pos + 1);
+ put_pid(v);
+
+ ++*pos;
+ return pid;
+}
+
+static void children_seq_stop(struct seq_file *seq, void *v)
+{
+ put_pid(v);
+}
+
+static const struct seq_operations children_seq_ops = {
+ .start = children_seq_start,
+ .next = children_seq_next,
+ .stop = children_seq_stop,
+ .show = children_seq_show,
+};
+
+static int children_seq_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *m;
+ int ret;
+
+ ret = seq_open(file, &children_seq_ops);
+ if (ret)
+ return ret;
+
+ m = file->private_data;
+ m->private = inode;
+
+ return ret;
+}
+
+int children_seq_release(struct inode *inode, struct file *file)
+{
+ seq_release(inode, file);
+ return 0;
+}
+
+const struct file_operations proc_tid_children_operations = {
+ .open = children_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = children_seq_release,
+};
+#endif /* CONFIG_CHECKPOINT_RESTORE */
diff --git a/fs/proc/base.c b/fs/proc/base.c
index d2d3108a611c..616f41a7cde6 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -199,11 +199,6 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
return result;
}
-struct mm_struct *mm_for_maps(struct task_struct *task)
-{
- return mm_access(task, PTRACE_MODE_READ);
-}
-
static int proc_pid_cmdline(struct task_struct *task, char * buffer)
{
int res = 0;
@@ -243,7 +238,7 @@ out:
static int proc_pid_auxv(struct task_struct *task, char *buffer)
{
- struct mm_struct *mm = mm_for_maps(task);
+ struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
int res = PTR_ERR(mm);
if (mm && !IS_ERR(mm)) {
unsigned int nwords = 0;
@@ -411,12 +406,13 @@ static const struct file_operations proc_lstats_operations = {
static int proc_oom_score(struct task_struct *task, char *buffer)
{
+ unsigned long totalpages = totalram_pages + total_swap_pages;
unsigned long points = 0;
read_lock(&tasklist_lock);
if (pid_alive(task))
- points = oom_badness(task, NULL, NULL,
- totalram_pages + total_swap_pages);
+ points = oom_badness(task, NULL, NULL, totalpages) *
+ 1000 / totalpages;
read_unlock(&tasklist_lock);
return sprintf(buffer, "%lu\n", points);
}
@@ -678,7 +674,7 @@ static const struct file_operations proc_single_file_operations = {
.release = single_release,
};
-static int mem_open(struct inode* inode, struct file* file)
+static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
{
struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
struct mm_struct *mm;
@@ -686,7 +682,7 @@ static int mem_open(struct inode* inode, struct file* file)
if (!task)
return -ESRCH;
- mm = mm_access(task, PTRACE_MODE_ATTACH);
+ mm = mm_access(task, mode);
put_task_struct(task);
if (IS_ERR(mm))
@@ -706,6 +702,11 @@ static int mem_open(struct inode* inode, struct file* file)
return 0;
}
+static int mem_open(struct inode *inode, struct file *file)
+{
+ return __mem_open(inode, file, PTRACE_MODE_ATTACH);
+}
+
static ssize_t mem_rw(struct file *file, char __user *buf,
size_t count, loff_t *ppos, int write)
{
@@ -802,30 +803,29 @@ static const struct file_operations proc_mem_operations = {
.release = mem_release,
};
+static int environ_open(struct inode *inode, struct file *file)
+{
+ return __mem_open(inode, file, PTRACE_MODE_READ);
+}
+
static ssize_t environ_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
char *page;
unsigned long src = *ppos;
- int ret = -ESRCH;
- struct mm_struct *mm;
+ int ret = 0;
+ struct mm_struct *mm = file->private_data;
- if (!task)
- goto out_no_task;
+ if (!mm)
+ return 0;
- ret = -ENOMEM;
page = (char *)__get_free_page(GFP_TEMPORARY);
if (!page)
- goto out;
-
-
- mm = mm_for_maps(task);
- ret = PTR_ERR(mm);
- if (!mm || IS_ERR(mm))
- goto out_free;
+ return -ENOMEM;
ret = 0;
+ if (!atomic_inc_not_zero(&mm->mm_users))
+ goto free;
while (count > 0) {
int this_len, retval, max_len;
@@ -837,7 +837,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
this_len = (this_len > max_len) ? max_len : this_len;
- retval = access_process_vm(task, (mm->env_start + src),
+ retval = access_remote_vm(mm, (mm->env_start + src),
page, this_len, 0);
if (retval <= 0) {
@@ -856,19 +856,18 @@ static ssize_t environ_read(struct file *file, char __user *buf,
count -= retval;
}
*ppos = src;
-
mmput(mm);
-out_free:
+
+free:
free_page((unsigned long) page);
-out:
- put_task_struct(task);
-out_no_task:
return ret;
}
static const struct file_operations proc_environ_operations = {
+ .open = environ_open,
.read = environ_read,
.llseek = generic_file_llseek,
+ .release = mem_release,
};
static ssize_t oom_adjust_read(struct file *file, char __user *buf,
@@ -1849,7 +1848,7 @@ static const struct dentry_operations tid_fd_dentry_operations =
static struct dentry *proc_fd_instantiate(struct inode *dir,
struct dentry *dentry, struct task_struct *task, const void *ptr)
{
- unsigned fd = *(const unsigned *)ptr;
+ unsigned fd = (unsigned long)ptr;
struct inode *inode;
struct proc_inode *ei;
struct dentry *error = ERR_PTR(-ENOENT);
@@ -1886,7 +1885,7 @@ static struct dentry *proc_lookupfd_common(struct inode *dir,
if (fd == ~0U)
goto out;
- result = instantiate(dir, dentry, task, &fd);
+ result = instantiate(dir, dentry, task, (void *)(unsigned long)fd);
out:
put_task_struct(task);
out_no_task:
@@ -1929,21 +1928,22 @@ static int proc_readfd_common(struct file * filp, void * dirent,
fd++, filp->f_pos++) {
char name[PROC_NUMBUF];
int len;
+ int rv;
if (!fcheck_files(files, fd))
continue;
rcu_read_unlock();
len = snprintf(name, sizeof(name), "%d", fd);
- if (proc_fill_cache(filp, dirent, filldir,
- name, len, instantiate,
- p, &fd) < 0) {
- rcu_read_lock();
- break;
- }
+ rv = proc_fill_cache(filp, dirent, filldir,
+ name, len, instantiate, p,
+ (void *)(unsigned long)fd);
+ if (rv < 0)
+ goto out_fd_loop;
rcu_read_lock();
}
rcu_read_unlock();
+out_fd_loop:
put_files_struct(files);
}
out:
@@ -2023,11 +2023,8 @@ static int map_files_d_revalidate(struct dentry *dentry, struct nameidata *nd)
if (!task)
goto out_notask;
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
- goto out;
-
- mm = get_task_mm(task);
- if (!mm)
+ mm = mm_access(task, PTRACE_MODE_READ);
+ if (IS_ERR_OR_NULL(mm))
goto out;
if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
@@ -2356,7 +2353,7 @@ static const struct inode_operations proc_fd_inode_operations = {
static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
struct dentry *dentry, struct task_struct *task, const void *ptr)
{
- unsigned fd = *(unsigned *)ptr;
+ unsigned fd = (unsigned long)ptr;
struct inode *inode;
struct proc_inode *ei;
struct dentry *error = ERR_PTR(-ENOENT);
@@ -3403,6 +3400,9 @@ static const struct pid_entry tid_base_stuff[] = {
ONE("stat", S_IRUGO, proc_tid_stat),
ONE("statm", S_IRUGO, proc_pid_statm),
REG("maps", S_IRUGO, proc_tid_maps_operations),
+#ifdef CONFIG_CHECKPOINT_RESTORE
+ REG("children", S_IRUGO, proc_tid_children_operations),
+#endif
#ifdef CONFIG_NUMA
REG("numa_maps", S_IRUGO, proc_tid_numa_maps_operations),
#endif
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 554ecc54799f..7ac817b64a71 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -33,7 +33,7 @@ static void proc_evict_inode(struct inode *inode)
const struct proc_ns_operations *ns_ops;
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
/* Stop tracking associated processes */
put_pid(PROC_I(inode)->pid);
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 5f79bb8b4c60..eca4aca5b6e2 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -31,8 +31,6 @@ struct vmalloc_info {
unsigned long largest_chunk;
};
-extern struct mm_struct *mm_for_maps(struct task_struct *);
-
#ifdef CONFIG_MMU
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
extern void get_vmalloc_info(struct vmalloc_info *vmi);
@@ -56,6 +54,7 @@ extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task);
extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
+extern const struct file_operations proc_tid_children_operations;
extern const struct file_operations proc_pid_maps_operations;
extern const struct file_operations proc_tid_maps_operations;
extern const struct file_operations proc_pid_numa_maps_operations;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 1030a716d155..4540b8f76f16 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -125,7 +125,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
if (!priv->task)
return ERR_PTR(-ESRCH);
- mm = mm_for_maps(priv->task);
+ mm = mm_access(priv->task, PTRACE_MODE_READ);
if (!mm || IS_ERR(mm))
return mm;
down_read(&mm->mmap_sem);
@@ -393,6 +393,7 @@ struct mem_size_stats {
unsigned long anonymous;
unsigned long anonymous_thp;
unsigned long swap;
+ unsigned long nonlinear;
u64 pss;
};
@@ -402,24 +403,33 @@ static void smaps_pte_entry(pte_t ptent, unsigned long addr,
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = mss->vma;
- struct page *page;
+ pgoff_t pgoff = linear_page_index(vma, addr);
+ struct page *page = NULL;
int mapcount;
- if (is_swap_pte(ptent)) {
- mss->swap += ptent_size;
- return;
+ if (pte_present(ptent)) {
+ page = vm_normal_page(vma, addr, ptent);
+ } else if (is_swap_pte(ptent)) {
+ swp_entry_t swpent = pte_to_swp_entry(ptent);
+
+ if (!non_swap_entry(swpent))
+ mss->swap += ptent_size;
+ else if (is_migration_entry(swpent))
+ page = migration_entry_to_page(swpent);
+ } else if (pte_file(ptent)) {
+ if (pte_to_pgoff(ptent) != pgoff)
+ mss->nonlinear += ptent_size;
}
- if (!pte_present(ptent))
- return;
-
- page = vm_normal_page(vma, addr, ptent);
if (!page)
return;
if (PageAnon(page))
mss->anonymous += ptent_size;
+ if (page->index != pgoff)
+ mss->nonlinear += ptent_size;
+
mss->resident += ptent_size;
/* Accumulate the size in pages that have been accessed. */
if (pte_young(ptent) || PageReferenced(page))
@@ -521,6 +531,10 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
(vma->vm_flags & VM_LOCKED) ?
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
+ if (vma->vm_flags & VM_NONLINEAR)
+ seq_printf(m, "Nonlinear: %8lu kB\n",
+ mss.nonlinear >> 10);
+
if (m->count < m->size) /* vma is copied successfully */
m->version = (vma != get_gate_vma(task->mm))
? vma->vm_start : 0;
@@ -700,6 +714,7 @@ struct pagemapread {
#define PM_PRESENT PM_STATUS(4LL)
#define PM_SWAP PM_STATUS(2LL)
+#define PM_FILE PM_STATUS(1LL)
#define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
#define PM_END_OF_BUFFER 1
@@ -733,22 +748,33 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end,
return err;
}
-static u64 swap_pte_to_pagemap_entry(pte_t pte)
-{
- swp_entry_t e = pte_to_swp_entry(pte);
- return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
-}
-
-static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte)
+static void pte_to_pagemap_entry(pagemap_entry_t *pme,
+ struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
- if (is_swap_pte(pte))
- *pme = make_pme(PM_PFRAME(swap_pte_to_pagemap_entry(pte))
- | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP);
- else if (pte_present(pte))
- *pme = make_pme(PM_PFRAME(pte_pfn(pte))
- | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
- else
+ u64 frame, flags;
+ struct page *page = NULL;
+
+ if (pte_present(pte)) {
+ frame = pte_pfn(pte);
+ flags = PM_PRESENT;
+ page = vm_normal_page(vma, addr, pte);
+ } else if (is_swap_pte(pte)) {
+ swp_entry_t entry = pte_to_swp_entry(pte);
+
+ frame = swp_type(entry) |
+ (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
+ flags = PM_SWAP;
+ if (is_migration_entry(entry))
+ page = migration_entry_to_page(entry);
+ } else {
*pme = make_pme(PM_NOT_PRESENT);
+ return;
+ }
+
+ if (page && !PageAnon(page))
+ flags |= PM_FILE;
+
+ *pme = make_pme(PM_PFRAME(frame) | PM_PSHIFT(PAGE_SHIFT) | flags);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -784,7 +810,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
- if (pmd_trans_huge_lock(pmd, vma) == 1) {
+ if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
for (; addr != end; addr += PAGE_SIZE) {
unsigned long offset;
@@ -815,7 +841,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (vma && (vma->vm_start <= addr) &&
!is_vm_hugetlb_page(vma)) {
pte = pte_offset_map(pmd, addr);
- pte_to_pagemap_entry(&pme, *pte);
+ pte_to_pagemap_entry(&pme, vma, addr, *pte);
/* unmap before userspace copy */
pte_unmap(pte);
}
@@ -869,11 +895,11 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
* For each page in the address space, this file contains one 64-bit entry
* consisting of the following:
*
- * Bits 0-55 page frame number (PFN) if present
+ * Bits 0-54 page frame number (PFN) if present
* Bits 0-4 swap type if swapped
- * Bits 5-55 swap offset if swapped
+ * Bits 5-54 swap offset if swapped
* Bits 55-60 page shift (page size = 1<<page shift)
- * Bit 61 reserved for future use
+ * Bit 61 page is file-page or shared-anon
* Bit 62 page swapped
* Bit 63 page present
*
@@ -919,7 +945,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (!pm.buffer)
goto out_task;
- mm = mm_for_maps(task);
+ mm = mm_access(task, PTRACE_MODE_READ);
ret = PTR_ERR(mm);
if (!mm || IS_ERR(mm))
goto out_free;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 74fe164d1b23..1ccfa537f5f5 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -223,7 +223,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
if (!priv->task)
return ERR_PTR(-ESRCH);
- mm = mm_for_maps(priv->task);
+ mm = mm_access(priv->task, PTRACE_MODE_READ);
if (!mm || IS_ERR(mm)) {
put_task_struct(priv->task);
priv->task = NULL;
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 19507889bb7f..aeb19e68e086 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -85,7 +85,7 @@ static void pstore_evict_inode(struct inode *inode)
struct pstore_private *p = inode->i_private;
unsigned long flags;
- end_writeback(inode);
+ clear_inode(inode);
if (p) {
spin_lock_irqsave(&allpstore_lock, flags);
list_del(&p->list);
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index d69a1d1d7e15..10cbe841cb7e 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -116,15 +116,15 @@
* spinlock to internal buffers before writing.
*
* Lock ordering (including related VFS locks) is the following:
- * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
+ * dqonoff_mutex > i_mutex > journal_lock > dqptr_sem > dquot->dq_lock >
* dqio_mutex
+ * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc.
* The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
* dqptr_sem. But filesystem has to count with the fact that functions such as
* dquot_alloc_space() acquire dqptr_sem and they usually have to be called
* from inside a transaction to keep filesystem consistency after a crash. Also
* filesystems usually want to do some IO on dquot from ->mark_dirty which is
* called with dqptr_sem held.
- * i_mutex on quota files is special (it's below dqio_mutex)
*/
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
@@ -638,7 +638,7 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
dqstats_inc(DQST_SYNCS);
mutex_unlock(&dqopt->dqonoff_mutex);
- if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE))
+ if (!wait || (dqopt->flags & DQUOT_QUOTA_SYS_FILE))
return 0;
/* This is not very clever (and fast) but currently I don't know about
@@ -652,18 +652,17 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
* Now when everything is written we can discard the pagecache so
* that userspace sees the changes.
*/
- mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_lock(&dqopt->dqonoff_mutex);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
- mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
- I_MUTEX_QUOTA);
- truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
- mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
+ mutex_lock(&dqopt->files[cnt]->i_mutex);
+ truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
+ mutex_unlock(&dqopt->files[cnt]->i_mutex);
}
- mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_unlock(&dqopt->dqonoff_mutex);
return 0;
}
@@ -907,14 +906,14 @@ static void add_dquot_ref(struct super_block *sb, int type)
spin_unlock(&inode->i_lock);
continue;
}
-#ifdef CONFIG_QUOTA_DEBUG
- if (unlikely(inode_get_rsv_space(inode) > 0))
- reserved = 1;
-#endif
__iget(inode);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_sb_list_lock);
+#ifdef CONFIG_QUOTA_DEBUG
+ if (unlikely(inode_get_rsv_space(inode) > 0))
+ reserved = 1;
+#endif
iput(old_inode);
__dquot_initialize(inode, type);
@@ -2037,8 +2036,7 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
/* If quota was reenabled in the meantime, we have
* nothing to do */
if (!sb_has_quota_loaded(sb, cnt)) {
- mutex_lock_nested(&toputinode[cnt]->i_mutex,
- I_MUTEX_QUOTA);
+ mutex_lock(&toputinode[cnt]->i_mutex);
toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
S_NOATIME | S_NOQUOTA);
truncate_inode_pages(&toputinode[cnt]->i_data,
@@ -2133,7 +2131,7 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
/* We don't want quota and atime on quota files (deadlocks
* possible) Also nobody should write to the file - we use
* special IO operations which ignore the immutable bit. */
- mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
+ mutex_lock(&inode->i_mutex);
oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
S_NOQUOTA);
inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
@@ -2180,7 +2178,7 @@ out_file_init:
iput(inode);
out_lock:
if (oldflags != -1) {
- mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
+ mutex_lock(&inode->i_mutex);
/* Set the flags back (in the case of accidental quotaon()
* on a wrong file we don't want to mess up the flags) */
inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
diff --git a/fs/read_write.c b/fs/read_write.c
index ffc99d22e0a3..c20614f86c01 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -633,8 +633,7 @@ ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
unsigned long nr_segs, unsigned long fast_segs,
struct iovec *fast_pointer,
- struct iovec **ret_pointer,
- int check_access)
+ struct iovec **ret_pointer)
{
unsigned long seg;
ssize_t ret;
@@ -690,7 +689,7 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
ret = -EINVAL;
goto out;
}
- if (check_access
+ if (type >= 0
&& unlikely(!access_ok(vrfy_dir(type), buf, len))) {
ret = -EFAULT;
goto out;
@@ -723,7 +722,7 @@ static ssize_t do_readv_writev(int type, struct file *file,
}
ret = rw_copy_check_uvector(type, uvector, nr_segs,
- ARRAY_SIZE(iovstack), iovstack, &iov, 1);
+ ARRAY_SIZE(iovstack), iovstack, &iov);
if (ret <= 0)
goto out;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 494c315c7417..59d06871a850 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -76,14 +76,14 @@ void reiserfs_evict_inode(struct inode *inode)
;
}
out:
- end_writeback(inode); /* note this must go after the journal_end to prevent deadlock */
+ clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */
dquot_drop(inode);
inode->i_blocks = 0;
reiserfs_write_unlock_once(inode->i_sb, depth);
return;
no_delete:
- end_writeback(inode);
+ clear_inode(inode);
dquot_drop(inode);
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 8b7616ef06d8..c07b7d709447 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2270,7 +2270,6 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
(unsigned long long)off, (unsigned long long)len);
return -EIO;
}
- mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
@@ -2302,16 +2301,13 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
blk++;
}
out:
- if (len == towrite) {
- mutex_unlock(&inode->i_mutex);
+ if (len == towrite)
return err;
- }
if (inode->i_size < off + len - towrite)
i_size_write(inode, off + len - towrite);
inode->i_version++;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
- mutex_unlock(&inode->i_mutex);
return len - towrite;
}
diff --git a/fs/splice.c b/fs/splice.c
index f8476841eb04..406ef2b792c2 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1388,7 +1388,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
*/
static int get_iovec_page_array(const struct iovec __user *iov,
unsigned int nr_vecs, struct page **pages,
- struct partial_page *partial, int aligned,
+ struct partial_page *partial, bool aligned,
unsigned int pipe_buffers)
{
int buffers = 0, error = 0;
@@ -1626,7 +1626,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
return -ENOMEM;
spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
- spd.partial, flags & SPLICE_F_GIFT,
+ spd.partial, false,
pipe->buffers);
if (spd.nr_pages <= 0)
ret = spd.nr_pages;
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 907c2b3af758..0ce3ccf7f401 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -310,7 +310,7 @@ void sysfs_evict_inode(struct inode *inode)
struct sysfs_dirent *sd = inode->i_private;
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
sysfs_put(sd);
}
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 3da5ce25faf0..08d0b2568cd3 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -316,7 +316,7 @@ static void sysv_evict_inode(struct inode *inode)
sysv_truncate(inode);
}
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
if (!inode->i_nlink)
sysv_free_inode(inode);
}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 001acccac0d6..5862dd9d2784 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -378,7 +378,7 @@ out:
smp_wmb();
}
done:
- end_writeback(inode);
+ clear_inode(inode);
}
static void ubifs_dirty_inode(struct inode *inode, int flags)
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 7d7528008359..873e1bab9c4c 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -80,7 +80,7 @@ void udf_evict_inode(struct inode *inode)
} else
truncate_inode_pages(&inode->i_data, 0);
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
inode->i_size != iinfo->i_lenExtents) {
udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 7cdd3953d67e..dd7c89d8a1c1 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -895,7 +895,7 @@ void ufs_evict_inode(struct inode * inode)
}
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
if (want_delete) {
lock_ufs(inode->i_sb);
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 0a9977983f92..d2bf974b1a2f 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -33,6 +33,7 @@ xfs-y += xfs_aops.o \
xfs_discard.o \
xfs_error.o \
xfs_export.o \
+ xfs_extent_busy.o \
xfs_file.o \
xfs_filestream.o \
xfs_fsops.o \
@@ -49,7 +50,6 @@ xfs-y += xfs_aops.o \
xfs_sync.o \
xfs_xattr.o \
xfs_rename.o \
- xfs_rw.o \
xfs_utils.o \
xfs_vnodeops.o \
kmem.o \
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 4805f009f923..44d65c1533c0 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -175,24 +175,6 @@ typedef struct xfs_agfl {
} xfs_agfl_t;
/*
- * Busy block/extent entry. Indexed by a rbtree in perag to mark blocks that
- * have been freed but whose transactions aren't committed to disk yet.
- *
- * Note that we use the transaction ID to record the transaction, not the
- * transaction structure itself. See xfs_alloc_busy_insert() for details.
- */
-struct xfs_busy_extent {
- struct rb_node rb_node; /* ag by-bno indexed search tree */
- struct list_head list; /* transaction busy extent list */
- xfs_agnumber_t agno;
- xfs_agblock_t bno;
- xfs_extlen_t length;
- unsigned int flags;
-#define XFS_ALLOC_BUSY_DISCARDED 0x01 /* undergoing a discard op. */
-#define XFS_ALLOC_BUSY_SKIP_DISCARD 0x02 /* do not discard */
-};
-
-/*
* Per-ag incore structure, copies of information in agf and agi,
* to improve the performance of allocation group selection.
*/
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 0f0df2759b09..229641fb8e67 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -20,7 +20,6 @@
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -32,6 +31,7 @@
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_alloc.h"
+#include "xfs_extent_busy.h"
#include "xfs_error.h"
#include "xfs_trace.h"
@@ -47,8 +47,6 @@ STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
-STATIC void xfs_alloc_busy_trim(struct xfs_alloc_arg *,
- xfs_agblock_t, xfs_extlen_t, xfs_agblock_t *, xfs_extlen_t *);
/*
* Lookup the record equal to [bno, len] in the btree given by cur.
@@ -152,7 +150,7 @@ xfs_alloc_compute_aligned(
xfs_extlen_t len;
/* Trim busy sections out of found extent */
- xfs_alloc_busy_trim(args, foundbno, foundlen, &bno, &len);
+ xfs_extent_busy_trim(args, foundbno, foundlen, &bno, &len);
if (args->alignment > 1 && len >= args->minlen) {
xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
@@ -536,7 +534,7 @@ xfs_alloc_ag_vextent(
if (error)
return error;
- ASSERT(!xfs_alloc_busy_search(args->mp, args->agno,
+ ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
args->agbno, args->len));
}
@@ -603,7 +601,7 @@ xfs_alloc_ag_vextent_exact(
/*
* Check for overlapping busy extents.
*/
- xfs_alloc_busy_trim(args, fbno, flen, &tbno, &tlen);
+ xfs_extent_busy_trim(args, fbno, flen, &tbno, &tlen);
/*
* Give up if the start of the extent is busy, or the freespace isn't
@@ -1391,7 +1389,7 @@ xfs_alloc_ag_vextent_small(
if (error)
goto error0;
if (fbno != NULLAGBLOCK) {
- xfs_alloc_busy_reuse(args->mp, args->agno, fbno, 1,
+ xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
args->userdata);
if (args->userdata) {
@@ -2496,579 +2494,8 @@ xfs_free_extent(
error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
if (!error)
- xfs_alloc_busy_insert(tp, args.agno, args.agbno, len, 0);
+ xfs_extent_busy_insert(tp, args.agno, args.agbno, len, 0);
error0:
xfs_perag_put(args.pag);
return error;
}
-
-void
-xfs_alloc_busy_insert(
- struct xfs_trans *tp,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- unsigned int flags)
-{
- struct xfs_busy_extent *new;
- struct xfs_busy_extent *busyp;
- struct xfs_perag *pag;
- struct rb_node **rbp;
- struct rb_node *parent = NULL;
-
- new = kmem_zalloc(sizeof(struct xfs_busy_extent), KM_MAYFAIL);
- if (!new) {
- /*
- * No Memory! Since it is now not possible to track the free
- * block, make this a synchronous transaction to insure that
- * the block is not reused before this transaction commits.
- */
- trace_xfs_alloc_busy_enomem(tp->t_mountp, agno, bno, len);
- xfs_trans_set_sync(tp);
- return;
- }
-
- new->agno = agno;
- new->bno = bno;
- new->length = len;
- INIT_LIST_HEAD(&new->list);
- new->flags = flags;
-
- /* trace before insert to be able to see failed inserts */
- trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len);
-
- pag = xfs_perag_get(tp->t_mountp, new->agno);
- spin_lock(&pag->pagb_lock);
- rbp = &pag->pagb_tree.rb_node;
- while (*rbp) {
- parent = *rbp;
- busyp = rb_entry(parent, struct xfs_busy_extent, rb_node);
-
- if (new->bno < busyp->bno) {
- rbp = &(*rbp)->rb_left;
- ASSERT(new->bno + new->length <= busyp->bno);
- } else if (new->bno > busyp->bno) {
- rbp = &(*rbp)->rb_right;
- ASSERT(bno >= busyp->bno + busyp->length);
- } else {
- ASSERT(0);
- }
- }
-
- rb_link_node(&new->rb_node, parent, rbp);
- rb_insert_color(&new->rb_node, &pag->pagb_tree);
-
- list_add(&new->list, &tp->t_busy);
- spin_unlock(&pag->pagb_lock);
- xfs_perag_put(pag);
-}
-
-/*
- * Search for a busy extent within the range of the extent we are about to
- * allocate. You need to be holding the busy extent tree lock when calling
- * xfs_alloc_busy_search(). This function returns 0 for no overlapping busy
- * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
- * match. This is done so that a non-zero return indicates an overlap that
- * will require a synchronous transaction, but it can still be
- * used to distinguish between a partial or exact match.
- */
-int
-xfs_alloc_busy_search(
- struct xfs_mount *mp,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
- xfs_extlen_t len)
-{
- struct xfs_perag *pag;
- struct rb_node *rbp;
- struct xfs_busy_extent *busyp;
- int match = 0;
-
- pag = xfs_perag_get(mp, agno);
- spin_lock(&pag->pagb_lock);
-
- rbp = pag->pagb_tree.rb_node;
-
- /* find closest start bno overlap */
- while (rbp) {
- busyp = rb_entry(rbp, struct xfs_busy_extent, rb_node);
- if (bno < busyp->bno) {
- /* may overlap, but exact start block is lower */
- if (bno + len > busyp->bno)
- match = -1;
- rbp = rbp->rb_left;
- } else if (bno > busyp->bno) {
- /* may overlap, but exact start block is higher */
- if (bno < busyp->bno + busyp->length)
- match = -1;
- rbp = rbp->rb_right;
- } else {
- /* bno matches busyp, length determines exact match */
- match = (busyp->length == len) ? 1 : -1;
- break;
- }
- }
- spin_unlock(&pag->pagb_lock);
- xfs_perag_put(pag);
- return match;
-}
-
-/*
- * The found free extent [fbno, fend] overlaps part or all of the given busy
- * extent. If the overlap covers the beginning, the end, or all of the busy
- * extent, the overlapping portion can be made unbusy and used for the
- * allocation. We can't split a busy extent because we can't modify a
- * transaction/CIL context busy list, but we can update an entries block
- * number or length.
- *
- * Returns true if the extent can safely be reused, or false if the search
- * needs to be restarted.
- */
-STATIC bool
-xfs_alloc_busy_update_extent(
- struct xfs_mount *mp,
- struct xfs_perag *pag,
- struct xfs_busy_extent *busyp,
- xfs_agblock_t fbno,
- xfs_extlen_t flen,
- bool userdata)
-{
- xfs_agblock_t fend = fbno + flen;
- xfs_agblock_t bbno = busyp->bno;
- xfs_agblock_t bend = bbno + busyp->length;
-
- /*
- * This extent is currently being discarded. Give the thread
- * performing the discard a chance to mark the extent unbusy
- * and retry.
- */
- if (busyp->flags & XFS_ALLOC_BUSY_DISCARDED) {
- spin_unlock(&pag->pagb_lock);
- delay(1);
- spin_lock(&pag->pagb_lock);
- return false;
- }
-
- /*
- * If there is a busy extent overlapping a user allocation, we have
- * no choice but to force the log and retry the search.
- *
- * Fortunately this does not happen during normal operation, but
- * only if the filesystem is very low on space and has to dip into
- * the AGFL for normal allocations.
- */
- if (userdata)
- goto out_force_log;
-
- if (bbno < fbno && bend > fend) {
- /*
- * Case 1:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +---------+
- * fbno fend
- */
-
- /*
- * We would have to split the busy extent to be able to track
- * it correct, which we cannot do because we would have to
- * modify the list of busy extents attached to the transaction
- * or CIL context, which is immutable.
- *
- * Force out the log to clear the busy extent and retry the
- * search.
- */
- goto out_force_log;
- } else if (bbno >= fbno && bend <= fend) {
- /*
- * Case 2:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +-----------------+
- * fbno fend
- *
- * Case 3:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +--------------------------+
- * fbno fend
- *
- * Case 4:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +--------------------------+
- * fbno fend
- *
- * Case 5:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +-----------------------------------+
- * fbno fend
- *
- */
-
- /*
- * The busy extent is fully covered by the extent we are
- * allocating, and can simply be removed from the rbtree.
- * However we cannot remove it from the immutable list
- * tracking busy extents in the transaction or CIL context,
- * so set the length to zero to mark it invalid.
- *
- * We also need to restart the busy extent search from the
- * tree root, because erasing the node can rearrange the
- * tree topology.
- */
- rb_erase(&busyp->rb_node, &pag->pagb_tree);
- busyp->length = 0;
- return false;
- } else if (fend < bend) {
- /*
- * Case 6:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +---------+
- * fbno fend
- *
- * Case 7:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +------------------+
- * fbno fend
- *
- */
- busyp->bno = fend;
- } else if (bbno < fbno) {
- /*
- * Case 8:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +-------------+
- * fbno fend
- *
- * Case 9:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +----------------------+
- * fbno fend
- */
- busyp->length = fbno - busyp->bno;
- } else {
- ASSERT(0);
- }
-
- trace_xfs_alloc_busy_reuse(mp, pag->pag_agno, fbno, flen);
- return true;
-
-out_force_log:
- spin_unlock(&pag->pagb_lock);
- xfs_log_force(mp, XFS_LOG_SYNC);
- trace_xfs_alloc_busy_force(mp, pag->pag_agno, fbno, flen);
- spin_lock(&pag->pagb_lock);
- return false;
-}
-
-
-/*
- * For a given extent [fbno, flen], make sure we can reuse it safely.
- */
-void
-xfs_alloc_busy_reuse(
- struct xfs_mount *mp,
- xfs_agnumber_t agno,
- xfs_agblock_t fbno,
- xfs_extlen_t flen,
- bool userdata)
-{
- struct xfs_perag *pag;
- struct rb_node *rbp;
-
- ASSERT(flen > 0);
-
- pag = xfs_perag_get(mp, agno);
- spin_lock(&pag->pagb_lock);
-restart:
- rbp = pag->pagb_tree.rb_node;
- while (rbp) {
- struct xfs_busy_extent *busyp =
- rb_entry(rbp, struct xfs_busy_extent, rb_node);
- xfs_agblock_t bbno = busyp->bno;
- xfs_agblock_t bend = bbno + busyp->length;
-
- if (fbno + flen <= bbno) {
- rbp = rbp->rb_left;
- continue;
- } else if (fbno >= bend) {
- rbp = rbp->rb_right;
- continue;
- }
-
- if (!xfs_alloc_busy_update_extent(mp, pag, busyp, fbno, flen,
- userdata))
- goto restart;
- }
- spin_unlock(&pag->pagb_lock);
- xfs_perag_put(pag);
-}
-
-/*
- * For a given extent [fbno, flen], search the busy extent list to find a
- * subset of the extent that is not busy. If *rlen is smaller than
- * args->minlen no suitable extent could be found, and the higher level
- * code needs to force out the log and retry the allocation.
- */
-STATIC void
-xfs_alloc_busy_trim(
- struct xfs_alloc_arg *args,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- xfs_agblock_t *rbno,
- xfs_extlen_t *rlen)
-{
- xfs_agblock_t fbno;
- xfs_extlen_t flen;
- struct rb_node *rbp;
-
- ASSERT(len > 0);
-
- spin_lock(&args->pag->pagb_lock);
-restart:
- fbno = bno;
- flen = len;
- rbp = args->pag->pagb_tree.rb_node;
- while (rbp && flen >= args->minlen) {
- struct xfs_busy_extent *busyp =
- rb_entry(rbp, struct xfs_busy_extent, rb_node);
- xfs_agblock_t fend = fbno + flen;
- xfs_agblock_t bbno = busyp->bno;
- xfs_agblock_t bend = bbno + busyp->length;
-
- if (fend <= bbno) {
- rbp = rbp->rb_left;
- continue;
- } else if (fbno >= bend) {
- rbp = rbp->rb_right;
- continue;
- }
-
- /*
- * If this is a metadata allocation, try to reuse the busy
- * extent instead of trimming the allocation.
- */
- if (!args->userdata &&
- !(busyp->flags & XFS_ALLOC_BUSY_DISCARDED)) {
- if (!xfs_alloc_busy_update_extent(args->mp, args->pag,
- busyp, fbno, flen,
- false))
- goto restart;
- continue;
- }
-
- if (bbno <= fbno) {
- /* start overlap */
-
- /*
- * Case 1:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +---------+
- * fbno fend
- *
- * Case 2:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +-------------+
- * fbno fend
- *
- * Case 3:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +-------------+
- * fbno fend
- *
- * Case 4:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +-----------------+
- * fbno fend
- *
- * No unbusy region in extent, return failure.
- */
- if (fend <= bend)
- goto fail;
-
- /*
- * Case 5:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +----------------------+
- * fbno fend
- *
- * Case 6:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +--------------------------+
- * fbno fend
- *
- * Needs to be trimmed to:
- * +-------+
- * fbno fend
- */
- fbno = bend;
- } else if (bend >= fend) {
- /* end overlap */
-
- /*
- * Case 7:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +------------------+
- * fbno fend
- *
- * Case 8:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +--------------------------+
- * fbno fend
- *
- * Needs to be trimmed to:
- * +-------+
- * fbno fend
- */
- fend = bbno;
- } else {
- /* middle overlap */
-
- /*
- * Case 9:
- * bbno bend
- * +BBBBBBBBBBBBBBBBB+
- * +-----------------------------------+
- * fbno fend
- *
- * Can be trimmed to:
- * +-------+ OR +-------+
- * fbno fend fbno fend
- *
- * Backward allocation leads to significant
- * fragmentation of directories, which degrades
- * directory performance, therefore we always want to
- * choose the option that produces forward allocation
- * patterns.
- * Preferring the lower bno extent will make the next
- * request use "fend" as the start of the next
- * allocation; if the segment is no longer busy at
- * that point, we'll get a contiguous allocation, but
- * even if it is still busy, we will get a forward
- * allocation.
- * We try to avoid choosing the segment at "bend",
- * because that can lead to the next allocation
- * taking the segment at "fbno", which would be a
- * backward allocation. We only use the segment at
- * "fbno" if it is much larger than the current
- * requested size, because in that case there's a
- * good chance subsequent allocations will be
- * contiguous.
- */
- if (bbno - fbno >= args->maxlen) {
- /* left candidate fits perfect */
- fend = bbno;
- } else if (fend - bend >= args->maxlen * 4) {
- /* right candidate has enough free space */
- fbno = bend;
- } else if (bbno - fbno >= args->minlen) {
- /* left candidate fits minimum requirement */
- fend = bbno;
- } else {
- goto fail;
- }
- }
-
- flen = fend - fbno;
- }
- spin_unlock(&args->pag->pagb_lock);
-
- if (fbno != bno || flen != len) {
- trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len,
- fbno, flen);
- }
- *rbno = fbno;
- *rlen = flen;
- return;
-fail:
- /*
- * Return a zero extent length as failure indications. All callers
- * re-check if the trimmed extent satisfies the minlen requirement.
- */
- spin_unlock(&args->pag->pagb_lock);
- trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len, fbno, 0);
- *rbno = fbno;
- *rlen = 0;
-}
-
-static void
-xfs_alloc_busy_clear_one(
- struct xfs_mount *mp,
- struct xfs_perag *pag,
- struct xfs_busy_extent *busyp)
-{
- if (busyp->length) {
- trace_xfs_alloc_busy_clear(mp, busyp->agno, busyp->bno,
- busyp->length);
- rb_erase(&busyp->rb_node, &pag->pagb_tree);
- }
-
- list_del_init(&busyp->list);
- kmem_free(busyp);
-}
-
-/*
- * Remove all extents on the passed in list from the busy extents tree.
- * If do_discard is set skip extents that need to be discarded, and mark
- * these as undergoing a discard operation instead.
- */
-void
-xfs_alloc_busy_clear(
- struct xfs_mount *mp,
- struct list_head *list,
- bool do_discard)
-{
- struct xfs_busy_extent *busyp, *n;
- struct xfs_perag *pag = NULL;
- xfs_agnumber_t agno = NULLAGNUMBER;
-
- list_for_each_entry_safe(busyp, n, list, list) {
- if (busyp->agno != agno) {
- if (pag) {
- spin_unlock(&pag->pagb_lock);
- xfs_perag_put(pag);
- }
- pag = xfs_perag_get(mp, busyp->agno);
- spin_lock(&pag->pagb_lock);
- agno = busyp->agno;
- }
-
- if (do_discard && busyp->length &&
- !(busyp->flags & XFS_ALLOC_BUSY_SKIP_DISCARD))
- busyp->flags = XFS_ALLOC_BUSY_DISCARDED;
- else
- xfs_alloc_busy_clear_one(mp, pag, busyp);
- }
-
- if (pag) {
- spin_unlock(&pag->pagb_lock);
- xfs_perag_put(pag);
- }
-}
-
-/*
- * Callback for list_sort to sort busy extents by the AG they reside in.
- */
-int
-xfs_busy_extent_ag_cmp(
- void *priv,
- struct list_head *a,
- struct list_head *b)
-{
- return container_of(a, struct xfs_busy_extent, list)->agno -
- container_of(b, struct xfs_busy_extent, list)->agno;
-}
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index 3a7e7d8f8ded..93be4a667ca1 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -23,7 +23,6 @@ struct xfs_btree_cur;
struct xfs_mount;
struct xfs_perag;
struct xfs_trans;
-struct xfs_busy_extent;
extern struct workqueue_struct *xfs_alloc_wq;
@@ -139,33 +138,6 @@ xfs_extlen_t
xfs_alloc_longest_free_extent(struct xfs_mount *mp,
struct xfs_perag *pag);
-#ifdef __KERNEL__
-void
-xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
- xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags);
-
-void
-xfs_alloc_busy_clear(struct xfs_mount *mp, struct list_head *list,
- bool do_discard);
-
-int
-xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
- xfs_agblock_t bno, xfs_extlen_t len);
-
-void
-xfs_alloc_busy_reuse(struct xfs_mount *mp, xfs_agnumber_t agno,
- xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata);
-
-int
-xfs_busy_extent_ag_cmp(void *priv, struct list_head *a, struct list_head *b);
-
-static inline void xfs_alloc_busy_sort(struct list_head *list)
-{
- list_sort(NULL, list, xfs_busy_extent_ag_cmp);
-}
-
-#endif /* __KERNEL__ */
-
/*
* Compute and fill in value of m_ag_maxlevels.
*/
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index ffb3386e45c1..f1647caace8f 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -18,9 +18,7 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -32,6 +30,7 @@
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_alloc.h"
+#include "xfs_extent_busy.h"
#include "xfs_error.h"
#include "xfs_trace.h"
@@ -94,7 +93,7 @@ xfs_allocbt_alloc_block(
return 0;
}
- xfs_alloc_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
+ xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
xfs_trans_agbtree_delta(cur->bc_tp, 1);
new->s = cpu_to_be32(bno);
@@ -119,8 +118,8 @@ xfs_allocbt_free_block(
if (error)
return error;
- xfs_alloc_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
- XFS_ALLOC_BUSY_SKIP_DISCARD);
+ xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
+ XFS_EXTENT_BUSY_SKIP_DISCARD);
xfs_trans_agbtree_delta(cur->bc_tp, -1);
return 0;
}
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 0dbb9e70fe21..ae31c313a79e 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -16,9 +16,7 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_trans.h"
@@ -29,7 +27,6 @@
#include "xfs_inode_item.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
-#include "xfs_rw.h"
#include "xfs_iomap.h"
#include "xfs_vnodeops.h"
#include "xfs_trace.h"
@@ -623,7 +620,7 @@ xfs_map_at_offset(
* or delayed allocate extent.
*/
STATIC int
-xfs_is_delayed_page(
+xfs_check_page_type(
struct page *page,
unsigned int type)
{
@@ -637,11 +634,11 @@ xfs_is_delayed_page(
bh = head = page_buffers(page);
do {
if (buffer_unwritten(bh))
- acceptable = (type == IO_UNWRITTEN);
+ acceptable += (type == IO_UNWRITTEN);
else if (buffer_delay(bh))
- acceptable = (type == IO_DELALLOC);
+ acceptable += (type == IO_DELALLOC);
else if (buffer_dirty(bh) && buffer_mapped(bh))
- acceptable = (type == IO_OVERWRITE);
+ acceptable += (type == IO_OVERWRITE);
else
break;
} while ((bh = bh->b_this_page) != head);
@@ -684,7 +681,7 @@ xfs_convert_page(
goto fail_unlock_page;
if (page->mapping != inode->i_mapping)
goto fail_unlock_page;
- if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
+ if (!xfs_check_page_type(page, (*ioendp)->io_type))
goto fail_unlock_page;
/*
@@ -834,7 +831,7 @@ xfs_aops_discard_page(
struct buffer_head *bh, *head;
loff_t offset = page_offset(page);
- if (!xfs_is_delayed_page(page, IO_DELALLOC))
+ if (!xfs_check_page_type(page, IO_DELALLOC))
goto out_invalidate;
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
@@ -1146,7 +1143,14 @@ __xfs_get_blocks(
if (!create && direct && offset >= i_size_read(inode))
return 0;
- if (create) {
+ /*
+ * Direct I/O is usually done on preallocated files, so try getting
+ * a block mapping without an exclusive lock first. For buffered
+ * writes we already have the exclusive iolock anyway, so avoiding
+ * a lock roundtrip here by taking the ilock exclusive from the
+ * beginning is a useful micro optimization.
+ */
+ if (create && !direct) {
lockmode = XFS_ILOCK_EXCL;
xfs_ilock(ip, lockmode);
} else {
@@ -1168,23 +1172,45 @@ __xfs_get_blocks(
(!nimaps ||
(imap.br_startblock == HOLESTARTBLOCK ||
imap.br_startblock == DELAYSTARTBLOCK))) {
- if (direct) {
+ if (direct || xfs_get_extsz_hint(ip)) {
+ /*
+ * Drop the ilock in preparation for starting the block
+ * allocation transaction. It will be retaken
+ * exclusively inside xfs_iomap_write_direct for the
+ * actual allocation.
+ */
+ xfs_iunlock(ip, lockmode);
error = xfs_iomap_write_direct(ip, offset, size,
&imap, nimaps);
+ if (error)
+ return -error;
+ new = 1;
} else {
+ /*
+ * Delalloc reservations do not require a transaction,
+ * we can go on without dropping the lock here. If we
+ * are allocating a new delalloc block, make sure that
+ * we set the new flag so that we mark the buffer new so
+ * that we know that it is newly allocated if the write
+ * fails.
+ */
+ if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
+ new = 1;
error = xfs_iomap_write_delay(ip, offset, size, &imap);
+ if (error)
+ goto out_unlock;
+
+ xfs_iunlock(ip, lockmode);
}
- if (error)
- goto out_unlock;
trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
} else if (nimaps) {
trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
+ xfs_iunlock(ip, lockmode);
} else {
trace_xfs_get_blocks_notfound(ip, offset, size);
goto out_unlock;
}
- xfs_iunlock(ip, lockmode);
if (imap.br_startblock != HOLESTARTBLOCK &&
imap.br_startblock != DELAYSTARTBLOCK) {
@@ -1386,52 +1412,91 @@ out_destroy_ioend:
return ret;
}
+/*
+ * Punch out the delalloc blocks we have already allocated.
+ *
+ * Don't bother with xfs_setattr given that nothing can have made it to disk yet
+ * as the page is still locked at this point.
+ */
+STATIC void
+xfs_vm_kill_delalloc_range(
+ struct inode *inode,
+ loff_t start,
+ loff_t end)
+{
+ struct xfs_inode *ip = XFS_I(inode);
+ xfs_fileoff_t start_fsb;
+ xfs_fileoff_t end_fsb;
+ int error;
+
+ start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
+ end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
+ if (end_fsb <= start_fsb)
+ return;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
+ end_fsb - start_fsb);
+ if (error) {
+ /* something screwed, just bail */
+ if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+ xfs_alert(ip->i_mount,
+ "xfs_vm_write_failed: unable to clean up ino %lld",
+ ip->i_ino);
+ }
+ }
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+}
+
STATIC void
xfs_vm_write_failed(
- struct address_space *mapping,
- loff_t to)
+ struct inode *inode,
+ struct page *page,
+ loff_t pos,
+ unsigned len)
{
- struct inode *inode = mapping->host;
+ loff_t block_offset = pos & PAGE_MASK;
+ loff_t block_start;
+ loff_t block_end;
+ loff_t from = pos & (PAGE_CACHE_SIZE - 1);
+ loff_t to = from + len;
+ struct buffer_head *bh, *head;
- if (to > inode->i_size) {
- /*
- * Punch out the delalloc blocks we have already allocated.
- *
- * Don't bother with xfs_setattr given that nothing can have
- * made it to disk yet as the page is still locked at this
- * point.
- */
- struct xfs_inode *ip = XFS_I(inode);
- xfs_fileoff_t start_fsb;
- xfs_fileoff_t end_fsb;
- int error;
+ ASSERT(block_offset + from == pos);
- truncate_pagecache(inode, to, inode->i_size);
+ head = page_buffers(page);
+ block_start = 0;
+ for (bh = head; bh != head || !block_start;
+ bh = bh->b_this_page, block_start = block_end,
+ block_offset += bh->b_size) {
+ block_end = block_start + bh->b_size;
- /*
- * Check if there are any blocks that are outside of i_size
- * that need to be trimmed back.
- */
- start_fsb = XFS_B_TO_FSB(ip->i_mount, inode->i_size) + 1;
- end_fsb = XFS_B_TO_FSB(ip->i_mount, to);
- if (end_fsb <= start_fsb)
- return;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
- end_fsb - start_fsb);
- if (error) {
- /* something screwed, just bail */
- if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- xfs_alert(ip->i_mount,
- "xfs_vm_write_failed: unable to clean up ino %lld",
- ip->i_ino);
- }
- }
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ /* skip buffers before the write */
+ if (block_end <= from)
+ continue;
+
+ /* if the buffer is after the write, we're done */
+ if (block_start >= to)
+ break;
+
+ if (!buffer_delay(bh))
+ continue;
+
+ if (!buffer_new(bh) && block_offset < i_size_read(inode))
+ continue;
+
+ xfs_vm_kill_delalloc_range(inode, block_offset,
+ block_offset + bh->b_size);
}
+
}
+/*
+ * This used to call block_write_begin(), but it unlocks and releases the page
+ * on error, and we need that page to be able to punch stale delalloc blocks out
+ * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
+ * the appropriate point.
+ */
STATIC int
xfs_vm_write_begin(
struct file *file,
@@ -1442,15 +1507,40 @@ xfs_vm_write_begin(
struct page **pagep,
void **fsdata)
{
- int ret;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ struct page *page;
+ int status;
- ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS,
- pagep, xfs_get_blocks);
- if (unlikely(ret))
- xfs_vm_write_failed(mapping, pos + len);
- return ret;
+ ASSERT(len <= PAGE_CACHE_SIZE);
+
+ page = grab_cache_page_write_begin(mapping, index,
+ flags | AOP_FLAG_NOFS);
+ if (!page)
+ return -ENOMEM;
+
+ status = __block_write_begin(page, pos, len, xfs_get_blocks);
+ if (unlikely(status)) {
+ struct inode *inode = mapping->host;
+
+ xfs_vm_write_failed(inode, page, pos, len);
+ unlock_page(page);
+
+ if (pos + len > i_size_read(inode))
+ truncate_pagecache(inode, pos + len, i_size_read(inode));
+
+ page_cache_release(page);
+ page = NULL;
+ }
+
+ *pagep = page;
+ return status;
}
+/*
+ * On failure, we only need to kill delalloc blocks beyond EOF because they
+ * will never be written. For blocks within EOF, generic_write_end() zeros them
+ * so they are safe to leave alone and be written with all the other valid data.
+ */
STATIC int
xfs_vm_write_end(
struct file *file,
@@ -1463,9 +1553,19 @@ xfs_vm_write_end(
{
int ret;
+ ASSERT(len <= PAGE_CACHE_SIZE);
+
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
- if (unlikely(ret < len))
- xfs_vm_write_failed(mapping, pos + len);
+ if (unlikely(ret < len)) {
+ struct inode *inode = mapping->host;
+ size_t isize = i_size_read(inode);
+ loff_t to = pos + len;
+
+ if (to > isize) {
+ truncate_pagecache(inode, to, isize);
+ xfs_vm_kill_delalloc_range(inode, isize, to);
+ }
+ }
return ret;
}
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 65d61b948ead..a17ff01b5adf 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -21,7 +21,6 @@
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -39,7 +38,6 @@
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_trans_space.h"
-#include "xfs_rw.h"
#include "xfs_vnodeops.h"
#include "xfs_trace.h"
@@ -1987,14 +1985,12 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
(map[i].br_startblock != HOLESTARTBLOCK));
dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
- error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno,
- blkcnt, XBF_LOCK | XBF_DONT_BLOCK,
- &bp);
+ error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
+ dblkno, blkcnt, 0, &bp);
if (error)
return(error);
- tmp = (valuelen < XFS_BUF_SIZE(bp))
- ? valuelen : XFS_BUF_SIZE(bp);
+ tmp = min_t(int, valuelen, BBTOB(bp->b_length));
xfs_buf_iomove(bp, 0, tmp, dst, XBRW_READ);
xfs_buf_relse(bp);
dst += tmp;
@@ -2097,6 +2093,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
lblkno = args->rmtblkno;
valuelen = args->valuelen;
while (valuelen > 0) {
+ int buflen;
+
/*
* Try to remember where we decided to put the value.
*/
@@ -2114,15 +2112,16 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
- bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt,
- XBF_LOCK | XBF_DONT_BLOCK);
+ bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, 0);
if (!bp)
return ENOMEM;
- tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen :
- XFS_BUF_SIZE(bp);
+
+ buflen = BBTOB(bp->b_length);
+ tmp = min_t(int, valuelen, buflen);
xfs_buf_iomove(bp, 0, tmp, src, XBRW_WRITE);
- if (tmp < XFS_BUF_SIZE(bp))
- xfs_buf_zero(bp, tmp, XFS_BUF_SIZE(bp) - tmp);
+ if (tmp < buflen)
+ xfs_buf_zero(bp, tmp, buflen - tmp);
+
error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */
xfs_buf_relse(bp);
if (error)
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 76d93dc953e1..7d89d800f517 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -20,7 +20,6 @@
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -2983,7 +2982,7 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
map.br_blockcount);
bp = xfs_trans_get_buf(*trans,
dp->i_mount->m_ddev_targp,
- dblkno, dblkcnt, XBF_LOCK);
+ dblkno, dblkcnt, 0);
if (!bp)
return ENOMEM;
xfs_trans_binval(*trans, bp);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 85e7e327bcd8..58b815ec8c91 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -41,7 +41,6 @@
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_attr_leaf.h"
-#include "xfs_rw.h"
#include "xfs_quota.h"
#include "xfs_trans_space.h"
#include "xfs_buf_item.h"
@@ -4527,7 +4526,7 @@ out_unreserve_blocks:
xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, alen, 0);
out_unreserve_quota:
if (XFS_IS_QUOTA_ON(mp))
- xfs_trans_unreserve_quota_nblks(NULL, ip, alen, 0, rt ?
+ xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
return error;
}
@@ -5621,8 +5620,20 @@ xfs_getbmap(
XFS_FSB_TO_BB(mp, map[i].br_blockcount);
out[cur_ext].bmv_unused1 = 0;
out[cur_ext].bmv_unused2 = 0;
- ASSERT(((iflags & BMV_IF_DELALLOC) != 0) ||
- (map[i].br_startblock != DELAYSTARTBLOCK));
+
+ /*
+ * delayed allocation extents that start beyond EOF can
+ * occur due to speculative EOF allocation when the
+ * delalloc extent is larger than the largest freespace
+ * extent at conversion time. These extents cannot be
+ * converted by data writeback, so can exist here even
+ * if we are not supposed to be finding delalloc
+ * extents.
+ */
+ if (map[i].br_startblock == DELAYSTARTBLOCK &&
+ map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
+ ASSERT((iflags & BMV_IF_DELALLOC) != 0);
+
if (map[i].br_startblock == HOLESTARTBLOCK &&
whichfork == XFS_ATTR_FORK) {
/* came to the end of attribute fork */
@@ -6157,3 +6168,16 @@ next_block:
return error;
}
+
+/*
+ * Convert the given file system block to a disk block. We have to treat it
+ * differently based on whether the file is a real time file or not, because the
+ * bmap code does.
+ */
+xfs_daddr_t
+xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
+{
+ return (XFS_IS_REALTIME_INODE(ip) ? \
+ (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
+ XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
+}
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 89ee672d378a..803b56d7ce16 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -211,6 +211,9 @@ int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
int whichfork, int *count);
int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
xfs_fileoff_t start_fsb, xfs_fileoff_t length);
+
+xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb);
+
#endif /* __KERNEL__ */
#endif /* __XFS_BMAP_H__ */
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index e2f5d59cbeaf..862084a47a7e 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -20,7 +20,6 @@
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 1f19f03af9d3..e53e317b1582 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -20,7 +20,6 @@
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 6819b5163e33..172d3cc8f8cb 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -35,14 +35,12 @@
#include <linux/freezer.h>
#include "xfs_sb.h"
-#include "xfs_inum.h"
#include "xfs_log.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_trace.h"
static kmem_zone_t *xfs_buf_zone;
-STATIC int xfsbufd(void *);
static struct workqueue_struct *xfslogd_workqueue;
@@ -57,11 +55,7 @@ static struct workqueue_struct *xfslogd_workqueue;
#endif
#define xb_to_gfp(flags) \
- ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
- ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
-
-#define xb_to_km(flags) \
- (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
+ ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
static inline int
@@ -71,11 +65,11 @@ xfs_buf_is_vmapped(
/*
* Return true if the buffer is vmapped.
*
- * The XBF_MAPPED flag is set if the buffer should be mapped, but the
- * code is clever enough to know it doesn't have to map a single page,
- * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
+ * b_addr is null if the buffer is not mapped, but the code is clever
+ * enough to know it doesn't have to map a single page, so the check has
+ * to be both for b_addr and bp->b_page_count > 1.
*/
- return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
+ return bp->b_addr && bp->b_page_count > 1;
}
static inline int
@@ -144,8 +138,17 @@ void
xfs_buf_stale(
struct xfs_buf *bp)
{
+ ASSERT(xfs_buf_islocked(bp));
+
bp->b_flags |= XBF_STALE;
- xfs_buf_delwri_dequeue(bp);
+
+ /*
+ * Clear the delwri status so that a delwri queue walker will not
+ * flush this buffer to disk now that it is stale. The delwri queue has
+ * a reference to the buffer, so this is safe to do.
+ */
+ bp->b_flags &= ~_XBF_DELWRI_Q;
+
atomic_set(&(bp)->b_lru_ref, 0);
if (!list_empty(&bp->b_lru)) {
struct xfs_buftarg *btp = bp->b_target;
@@ -164,22 +167,22 @@ xfs_buf_stale(
struct xfs_buf *
xfs_buf_alloc(
struct xfs_buftarg *target,
- xfs_off_t range_base,
- size_t range_length,
+ xfs_daddr_t blkno,
+ size_t numblks,
xfs_buf_flags_t flags)
{
struct xfs_buf *bp;
- bp = kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags));
+ bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
if (unlikely(!bp))
return NULL;
/*
- * We don't want certain flags to appear in b_flags.
+ * We don't want certain flags to appear in b_flags unless they are
+ * specifically set by later operations on the buffer.
*/
- flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
+ flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
- memset(bp, 0, sizeof(xfs_buf_t));
atomic_set(&bp->b_hold, 1);
atomic_set(&bp->b_lru_ref, 1);
init_completion(&bp->b_iowait);
@@ -189,14 +192,22 @@ xfs_buf_alloc(
sema_init(&bp->b_sema, 0); /* held, no waiters */
XB_SET_OWNER(bp);
bp->b_target = target;
- bp->b_file_offset = range_base;
+
/*
- * Set buffer_length and count_desired to the same value initially.
- * I/O routines should use count_desired, which will be the same in
+ * Set length and io_length to the same value initially.
+ * I/O routines should use io_length, which will be the same in
* most cases but may be reset (e.g. XFS recovery).
*/
- bp->b_buffer_length = bp->b_count_desired = range_length;
+ bp->b_length = numblks;
+ bp->b_io_length = numblks;
bp->b_flags = flags;
+
+ /*
+ * We do not set the block number here in the buffer because we have not
+ * finished initialising the buffer. We insert the buffer into the cache
+ * in this state, so this ensures that we are unable to do IO on a
+ * buffer that hasn't been fully initialised.
+ */
bp->b_bn = XFS_BUF_DADDR_NULL;
atomic_set(&bp->b_pin_count, 0);
init_waitqueue_head(&bp->b_waiters);
@@ -219,13 +230,12 @@ _xfs_buf_get_pages(
{
/* Make sure that we have a page list */
if (bp->b_pages == NULL) {
- bp->b_offset = xfs_buf_poff(bp->b_file_offset);
bp->b_page_count = page_count;
if (page_count <= XB_PAGES) {
bp->b_pages = bp->b_page_array;
} else {
bp->b_pages = kmem_alloc(sizeof(struct page *) *
- page_count, xb_to_km(flags));
+ page_count, KM_NOFS);
if (bp->b_pages == NULL)
return -ENOMEM;
}
@@ -288,11 +298,11 @@ xfs_buf_allocate_memory(
xfs_buf_t *bp,
uint flags)
{
- size_t size = bp->b_count_desired;
+ size_t size;
size_t nbytes, offset;
gfp_t gfp_mask = xb_to_gfp(flags);
unsigned short page_count, i;
- xfs_off_t end;
+ xfs_off_t start, end;
int error;
/*
@@ -300,15 +310,15 @@ xfs_buf_allocate_memory(
* the memory from the heap - there's no need for the complexity of
* page arrays to keep allocation down to order 0.
*/
- if (bp->b_buffer_length < PAGE_SIZE) {
- bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags));
+ size = BBTOB(bp->b_length);
+ if (size < PAGE_SIZE) {
+ bp->b_addr = kmem_alloc(size, KM_NOFS);
if (!bp->b_addr) {
/* low memory - use alloc_page loop instead */
goto use_alloc_page;
}
- if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) &
- PAGE_MASK) !=
+ if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
((unsigned long)bp->b_addr & PAGE_MASK)) {
/* b_addr spans two pages - use alloc_page instead */
kmem_free(bp->b_addr);
@@ -319,13 +329,14 @@ xfs_buf_allocate_memory(
bp->b_pages = bp->b_page_array;
bp->b_pages[0] = virt_to_page(bp->b_addr);
bp->b_page_count = 1;
- bp->b_flags |= XBF_MAPPED | _XBF_KMEM;
+ bp->b_flags |= _XBF_KMEM;
return 0;
}
use_alloc_page:
- end = bp->b_file_offset + bp->b_buffer_length;
- page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
+ start = BBTOB(bp->b_bn) >> PAGE_SHIFT;
+ end = (BBTOB(bp->b_bn + bp->b_length) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ page_count = end - start;
error = _xfs_buf_get_pages(bp, page_count, flags);
if (unlikely(error))
return error;
@@ -388,8 +399,9 @@ _xfs_buf_map_pages(
if (bp->b_page_count == 1) {
/* A single page buffer is always mappable */
bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
- bp->b_flags |= XBF_MAPPED;
- } else if (flags & XBF_MAPPED) {
+ } else if (flags & XBF_UNMAPPED) {
+ bp->b_addr = NULL;
+ } else {
int retried = 0;
do {
@@ -403,7 +415,6 @@ _xfs_buf_map_pages(
if (!bp->b_addr)
return -ENOMEM;
bp->b_addr += bp->b_offset;
- bp->b_flags |= XBF_MAPPED;
}
return 0;
@@ -420,29 +431,27 @@ _xfs_buf_map_pages(
*/
xfs_buf_t *
_xfs_buf_find(
- xfs_buftarg_t *btp, /* block device target */
- xfs_off_t ioff, /* starting offset of range */
- size_t isize, /* length of range */
+ struct xfs_buftarg *btp,
+ xfs_daddr_t blkno,
+ size_t numblks,
xfs_buf_flags_t flags,
xfs_buf_t *new_bp)
{
- xfs_off_t range_base;
- size_t range_length;
+ size_t numbytes;
struct xfs_perag *pag;
struct rb_node **rbp;
struct rb_node *parent;
xfs_buf_t *bp;
- range_base = (ioff << BBSHIFT);
- range_length = (isize << BBSHIFT);
+ numbytes = BBTOB(numblks);
/* Check for IOs smaller than the sector size / not sector aligned */
- ASSERT(!(range_length < (1 << btp->bt_sshift)));
- ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
+ ASSERT(!(numbytes < (1 << btp->bt_sshift)));
+ ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
/* get tree root */
pag = xfs_perag_get(btp->bt_mount,
- xfs_daddr_to_agno(btp->bt_mount, ioff));
+ xfs_daddr_to_agno(btp->bt_mount, blkno));
/* walk tree */
spin_lock(&pag->pag_buf_lock);
@@ -453,20 +462,20 @@ _xfs_buf_find(
parent = *rbp;
bp = rb_entry(parent, struct xfs_buf, b_rbnode);
- if (range_base < bp->b_file_offset)
+ if (blkno < bp->b_bn)
rbp = &(*rbp)->rb_left;
- else if (range_base > bp->b_file_offset)
+ else if (blkno > bp->b_bn)
rbp = &(*rbp)->rb_right;
else {
/*
- * found a block offset match. If the range doesn't
+ * found a block number match. If the range doesn't
* match, the only way this is allowed is if the buffer
* in the cache is stale and the transaction that made
* it stale has not yet committed. i.e. we are
* reallocating a busy extent. Skip this buffer and
* continue searching to the right for an exact match.
*/
- if (bp->b_buffer_length != range_length) {
+ if (bp->b_length != numblks) {
ASSERT(bp->b_flags & XBF_STALE);
rbp = &(*rbp)->rb_right;
continue;
@@ -511,7 +520,7 @@ found:
*/
if (bp->b_flags & XBF_STALE) {
ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
- bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES;
+ bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
}
trace_xfs_buf_find(bp, flags, _RET_IP_);
@@ -526,63 +535,59 @@ found:
*/
struct xfs_buf *
xfs_buf_get(
- xfs_buftarg_t *target,/* target for buffer */
- xfs_off_t ioff, /* starting offset of range */
- size_t isize, /* length of range */
+ xfs_buftarg_t *target,
+ xfs_daddr_t blkno,
+ size_t numblks,
xfs_buf_flags_t flags)
{
struct xfs_buf *bp;
struct xfs_buf *new_bp;
int error = 0;
- bp = _xfs_buf_find(target, ioff, isize, flags, NULL);
+ bp = _xfs_buf_find(target, blkno, numblks, flags, NULL);
if (likely(bp))
goto found;
- new_bp = xfs_buf_alloc(target, ioff << BBSHIFT, isize << BBSHIFT,
- flags);
+ new_bp = xfs_buf_alloc(target, blkno, numblks, flags);
if (unlikely(!new_bp))
return NULL;
- bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
- if (!bp) {
+ error = xfs_buf_allocate_memory(new_bp, flags);
+ if (error) {
kmem_zone_free(xfs_buf_zone, new_bp);
return NULL;
}
- if (bp == new_bp) {
- error = xfs_buf_allocate_memory(bp, flags);
- if (error)
- goto no_buffer;
- } else
- kmem_zone_free(xfs_buf_zone, new_bp);
+ bp = _xfs_buf_find(target, blkno, numblks, flags, new_bp);
+ if (!bp) {
+ xfs_buf_free(new_bp);
+ return NULL;
+ }
+
+ if (bp != new_bp)
+ xfs_buf_free(new_bp);
/*
* Now we have a workable buffer, fill in the block number so
* that we can do IO on it.
*/
- bp->b_bn = ioff;
- bp->b_count_desired = bp->b_buffer_length;
+ bp->b_bn = blkno;
+ bp->b_io_length = bp->b_length;
found:
- if (!(bp->b_flags & XBF_MAPPED)) {
+ if (!bp->b_addr) {
error = _xfs_buf_map_pages(bp, flags);
if (unlikely(error)) {
xfs_warn(target->bt_mount,
"%s: failed to map pages\n", __func__);
- goto no_buffer;
+ xfs_buf_relse(bp);
+ return NULL;
}
}
XFS_STATS_INC(xb_get);
trace_xfs_buf_get(bp, flags, _RET_IP_);
return bp;
-
-no_buffer:
- if (flags & (XBF_LOCK | XBF_TRYLOCK))
- xfs_buf_unlock(bp);
- xfs_buf_rele(bp);
- return NULL;
}
STATIC int
@@ -590,32 +595,30 @@ _xfs_buf_read(
xfs_buf_t *bp,
xfs_buf_flags_t flags)
{
- int status;
-
- ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
+ ASSERT(!(flags & XBF_WRITE));
ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
- bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | XBF_READ_AHEAD);
+ bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
- status = xfs_buf_iorequest(bp);
- if (status || bp->b_error || (flags & XBF_ASYNC))
- return status;
+ xfs_buf_iorequest(bp);
+ if (flags & XBF_ASYNC)
+ return 0;
return xfs_buf_iowait(bp);
}
xfs_buf_t *
xfs_buf_read(
xfs_buftarg_t *target,
- xfs_off_t ioff,
- size_t isize,
+ xfs_daddr_t blkno,
+ size_t numblks,
xfs_buf_flags_t flags)
{
xfs_buf_t *bp;
flags |= XBF_READ;
- bp = xfs_buf_get(target, ioff, isize, flags);
+ bp = xfs_buf_get(target, blkno, numblks, flags);
if (bp) {
trace_xfs_buf_read(bp, flags, _RET_IP_);
@@ -627,7 +630,8 @@ xfs_buf_read(
* Read ahead call which is already satisfied,
* drop the buffer
*/
- goto no_buffer;
+ xfs_buf_relse(bp);
+ return NULL;
} else {
/* We do not want read in the flags */
bp->b_flags &= ~XBF_READ;
@@ -635,12 +639,6 @@ xfs_buf_read(
}
return bp;
-
- no_buffer:
- if (flags & (XBF_LOCK | XBF_TRYLOCK))
- xfs_buf_unlock(bp);
- xfs_buf_rele(bp);
- return NULL;
}
/*
@@ -650,14 +648,14 @@ xfs_buf_read(
void
xfs_buf_readahead(
xfs_buftarg_t *target,
- xfs_off_t ioff,
- size_t isize)
+ xfs_daddr_t blkno,
+ size_t numblks)
{
if (bdi_read_congested(target->bt_bdi))
return;
- xfs_buf_read(target, ioff, isize,
- XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
+ xfs_buf_read(target, blkno, numblks,
+ XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
}
/*
@@ -666,16 +664,15 @@ xfs_buf_readahead(
*/
struct xfs_buf *
xfs_buf_read_uncached(
- struct xfs_mount *mp,
struct xfs_buftarg *target,
xfs_daddr_t daddr,
- size_t length,
+ size_t numblks,
int flags)
{
xfs_buf_t *bp;
int error;
- bp = xfs_buf_get_uncached(target, length, flags);
+ bp = xfs_buf_get_uncached(target, numblks, flags);
if (!bp)
return NULL;
@@ -683,9 +680,9 @@ xfs_buf_read_uncached(
XFS_BUF_SET_ADDR(bp, daddr);
XFS_BUF_READ(bp);
- xfsbdstrat(mp, bp);
+ xfsbdstrat(target->bt_mount, bp);
error = xfs_buf_iowait(bp);
- if (error || bp->b_error) {
+ if (error) {
xfs_buf_relse(bp);
return NULL;
}
@@ -699,7 +696,7 @@ xfs_buf_read_uncached(
void
xfs_buf_set_empty(
struct xfs_buf *bp,
- size_t len)
+ size_t numblks)
{
if (bp->b_pages)
_xfs_buf_free_pages(bp);
@@ -707,10 +704,9 @@ xfs_buf_set_empty(
bp->b_pages = NULL;
bp->b_page_count = 0;
bp->b_addr = NULL;
- bp->b_file_offset = 0;
- bp->b_buffer_length = bp->b_count_desired = len;
+ bp->b_length = numblks;
+ bp->b_io_length = numblks;
bp->b_bn = XFS_BUF_DADDR_NULL;
- bp->b_flags &= ~XBF_MAPPED;
}
static inline struct page *
@@ -749,7 +745,7 @@ xfs_buf_associate_memory(
bp->b_pages = NULL;
bp->b_addr = mem;
- rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
+ rval = _xfs_buf_get_pages(bp, page_count, 0);
if (rval)
return rval;
@@ -760,9 +756,8 @@ xfs_buf_associate_memory(
pageaddr += PAGE_SIZE;
}
- bp->b_count_desired = len;
- bp->b_buffer_length = buflen;
- bp->b_flags |= XBF_MAPPED;
+ bp->b_io_length = BTOBB(len);
+ bp->b_length = BTOBB(buflen);
return 0;
}
@@ -770,17 +765,18 @@ xfs_buf_associate_memory(
xfs_buf_t *
xfs_buf_get_uncached(
struct xfs_buftarg *target,
- size_t len,
+ size_t numblks,
int flags)
{
- unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
+ unsigned long page_count;
int error, i;
xfs_buf_t *bp;
- bp = xfs_buf_alloc(target, 0, len, 0);
+ bp = xfs_buf_alloc(target, 0, numblks, 0);
if (unlikely(bp == NULL))
goto fail;
+ page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
error = _xfs_buf_get_pages(bp, page_count, 0);
if (error)
goto fail_free_buf;
@@ -792,7 +788,7 @@ xfs_buf_get_uncached(
}
bp->b_flags |= _XBF_PAGES;
- error = _xfs_buf_map_pages(bp, XBF_MAPPED);
+ error = _xfs_buf_map_pages(bp, 0);
if (unlikely(error)) {
xfs_warn(target->bt_mount,
"%s: failed to map pages\n", __func__);
@@ -855,7 +851,7 @@ xfs_buf_rele(
spin_unlock(&pag->pag_buf_lock);
} else {
xfs_buf_lru_del(bp);
- ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
+ ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
spin_unlock(&pag->pag_buf_lock);
xfs_perag_put(pag);
@@ -915,13 +911,6 @@ xfs_buf_lock(
trace_xfs_buf_lock_done(bp, _RET_IP_);
}
-/*
- * Releases the lock on the buffer object.
- * If the buffer is marked delwri but is not queued, do so before we
- * unlock the buffer as we need to set flags correctly. We also need to
- * take a reference for the delwri queue because the unlocker is going to
- * drop their's and they don't know we just queued it.
- */
void
xfs_buf_unlock(
struct xfs_buf *bp)
@@ -1008,9 +997,8 @@ xfs_buf_ioerror_alert(
const char *func)
{
xfs_alert(bp->b_target->bt_mount,
-"metadata I/O error: block 0x%llx (\"%s\") error %d buf count %zd",
- (__uint64_t)XFS_BUF_ADDR(bp), func,
- bp->b_error, XFS_BUF_COUNT(bp));
+"metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
+ (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
}
int
@@ -1019,10 +1007,11 @@ xfs_bwrite(
{
int error;
+ ASSERT(xfs_buf_islocked(bp));
+
bp->b_flags |= XBF_WRITE;
- bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
+ bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
- xfs_buf_delwri_dequeue(bp);
xfs_bdstrat_cb(bp);
error = xfs_buf_iowait(bp);
@@ -1181,7 +1170,7 @@ _xfs_buf_ioapply(
int rw, map_i, total_nr_pages, nr_pages;
struct bio *bio;
int offset = bp->b_offset;
- int size = bp->b_count_desired;
+ int size = BBTOB(bp->b_io_length);
sector_t sector = bp->b_bn;
total_nr_pages = bp->b_page_count;
@@ -1229,7 +1218,7 @@ next_chunk:
break;
offset = 0;
- sector += nbytes >> BBSHIFT;
+ sector += BTOBB(nbytes);
size -= nbytes;
total_nr_pages--;
}
@@ -1248,13 +1237,13 @@ next_chunk:
}
}
-int
+void
xfs_buf_iorequest(
xfs_buf_t *bp)
{
trace_xfs_buf_iorequest(bp, _RET_IP_);
- ASSERT(!(bp->b_flags & XBF_DELWRI));
+ ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
if (bp->b_flags & XBF_WRITE)
xfs_buf_wait_unpin(bp);
@@ -1269,13 +1258,12 @@ xfs_buf_iorequest(
_xfs_buf_ioend(bp, 0);
xfs_buf_rele(bp);
- return 0;
}
/*
- * Waits for I/O to complete on the buffer supplied.
- * It returns immediately if no I/O is pending.
- * It returns the I/O error code, if any, or 0 if there was no error.
+ * Waits for I/O to complete on the buffer supplied. It returns immediately if
+ * no I/O is pending or there is already a pending error on the buffer. It
+ * returns the I/O error code, if any, or 0 if there was no error.
*/
int
xfs_buf_iowait(
@@ -1283,7 +1271,8 @@ xfs_buf_iowait(
{
trace_xfs_buf_iowait(bp, _RET_IP_);
- wait_for_completion(&bp->b_iowait);
+ if (!bp->b_error)
+ wait_for_completion(&bp->b_iowait);
trace_xfs_buf_iowait_done(bp, _RET_IP_);
return bp->b_error;
@@ -1296,7 +1285,7 @@ xfs_buf_offset(
{
struct page *page;
- if (bp->b_flags & XBF_MAPPED)
+ if (bp->b_addr)
return bp->b_addr + offset;
offset += bp->b_offset;
@@ -1315,27 +1304,30 @@ xfs_buf_iomove(
void *data, /* data address */
xfs_buf_rw_t mode) /* read/write/zero flag */
{
- size_t bend, cpoff, csize;
- struct page *page;
+ size_t bend;
bend = boff + bsize;
while (boff < bend) {
- page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
- cpoff = xfs_buf_poff(boff + bp->b_offset);
- csize = min_t(size_t,
- PAGE_SIZE-cpoff, bp->b_count_desired-boff);
+ struct page *page;
+ int page_index, page_offset, csize;
+
+ page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
+ page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
+ page = bp->b_pages[page_index];
+ csize = min_t(size_t, PAGE_SIZE - page_offset,
+ BBTOB(bp->b_io_length) - boff);
- ASSERT(((csize + cpoff) <= PAGE_SIZE));
+ ASSERT((csize + page_offset) <= PAGE_SIZE);
switch (mode) {
case XBRW_ZERO:
- memset(page_address(page) + cpoff, 0, csize);
+ memset(page_address(page) + page_offset, 0, csize);
break;
case XBRW_READ:
- memcpy(data, page_address(page) + cpoff, csize);
+ memcpy(data, page_address(page) + page_offset, csize);
break;
case XBRW_WRITE:
- memcpy(page_address(page) + cpoff, data, csize);
+ memcpy(page_address(page) + page_offset, data, csize);
}
boff += csize;
@@ -1435,11 +1427,9 @@ xfs_free_buftarg(
{
unregister_shrinker(&btp->bt_shrinker);
- xfs_flush_buftarg(btp, 1);
if (mp->m_flags & XFS_MOUNT_BARRIER)
xfs_blkdev_issue_flush(btp);
- kthread_stop(btp->bt_task);
kmem_free(btp);
}
@@ -1491,20 +1481,6 @@ xfs_setsize_buftarg(
return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
}
-STATIC int
-xfs_alloc_delwri_queue(
- xfs_buftarg_t *btp,
- const char *fsname)
-{
- INIT_LIST_HEAD(&btp->bt_delwri_queue);
- spin_lock_init(&btp->bt_delwri_lock);
- btp->bt_flags = 0;
- btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
- if (IS_ERR(btp->bt_task))
- return PTR_ERR(btp->bt_task);
- return 0;
-}
-
xfs_buftarg_t *
xfs_alloc_buftarg(
struct xfs_mount *mp,
@@ -1527,8 +1503,6 @@ xfs_alloc_buftarg(
spin_lock_init(&btp->bt_lru_lock);
if (xfs_setsize_buftarg_early(btp, bdev))
goto error;
- if (xfs_alloc_delwri_queue(btp, fsname))
- goto error;
btp->bt_shrinker.shrink = xfs_buftarg_shrink;
btp->bt_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&btp->bt_shrinker);
@@ -1539,125 +1513,52 @@ error:
return NULL;
}
-
/*
- * Delayed write buffer handling
+ * Add a buffer to the delayed write list.
+ *
+ * This queues a buffer for writeout if it hasn't already been. Note that
+ * neither this routine nor the buffer list submission functions perform
+ * any internal synchronization. It is expected that the lists are thread-local
+ * to the callers.
+ *
+ * Returns true if we queued up the buffer, or false if it already had
+ * been on the buffer list.
*/
-void
+bool
xfs_buf_delwri_queue(
- xfs_buf_t *bp)
+ struct xfs_buf *bp,
+ struct list_head *list)
{
- struct xfs_buftarg *btp = bp->b_target;
-
- trace_xfs_buf_delwri_queue(bp, _RET_IP_);
-
+ ASSERT(xfs_buf_islocked(bp));
ASSERT(!(bp->b_flags & XBF_READ));
- spin_lock(&btp->bt_delwri_lock);
- if (!list_empty(&bp->b_list)) {
- /* if already in the queue, move it to the tail */
- ASSERT(bp->b_flags & _XBF_DELWRI_Q);
- list_move_tail(&bp->b_list, &btp->bt_delwri_queue);
- } else {
- /* start xfsbufd as it is about to have something to do */
- if (list_empty(&btp->bt_delwri_queue))
- wake_up_process(bp->b_target->bt_task);
-
- atomic_inc(&bp->b_hold);
- bp->b_flags |= XBF_DELWRI | _XBF_DELWRI_Q | XBF_ASYNC;
- list_add_tail(&bp->b_list, &btp->bt_delwri_queue);
- }
- bp->b_queuetime = jiffies;
- spin_unlock(&btp->bt_delwri_lock);
-}
-
-void
-xfs_buf_delwri_dequeue(
- xfs_buf_t *bp)
-{
- int dequeued = 0;
-
- spin_lock(&bp->b_target->bt_delwri_lock);
- if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
- ASSERT(bp->b_flags & _XBF_DELWRI_Q);
- list_del_init(&bp->b_list);
- dequeued = 1;
+ /*
+ * If the buffer is already marked delwri it already is queued up
+ * by someone else for imediate writeout. Just ignore it in that
+ * case.
+ */
+ if (bp->b_flags & _XBF_DELWRI_Q) {
+ trace_xfs_buf_delwri_queued(bp, _RET_IP_);
+ return false;
}
- bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
- spin_unlock(&bp->b_target->bt_delwri_lock);
-
- if (dequeued)
- xfs_buf_rele(bp);
-
- trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
-}
-
-/*
- * If a delwri buffer needs to be pushed before it has aged out, then promote
- * it to the head of the delwri queue so that it will be flushed on the next
- * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
- * than the age currently needed to flush the buffer. Hence the next time the
- * xfsbufd sees it is guaranteed to be considered old enough to flush.
- */
-void
-xfs_buf_delwri_promote(
- struct xfs_buf *bp)
-{
- struct xfs_buftarg *btp = bp->b_target;
- long age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
- ASSERT(bp->b_flags & XBF_DELWRI);
- ASSERT(bp->b_flags & _XBF_DELWRI_Q);
+ trace_xfs_buf_delwri_queue(bp, _RET_IP_);
/*
- * Check the buffer age before locking the delayed write queue as we
- * don't need to promote buffers that are already past the flush age.
+ * If a buffer gets written out synchronously or marked stale while it
+ * is on a delwri list we lazily remove it. To do this, the other party
+ * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
+ * It remains referenced and on the list. In a rare corner case it
+ * might get readded to a delwri list after the synchronous writeout, in
+ * which case we need just need to re-add the flag here.
*/
- if (bp->b_queuetime < jiffies - age)
- return;
- bp->b_queuetime = jiffies - age;
- spin_lock(&btp->bt_delwri_lock);
- list_move(&bp->b_list, &btp->bt_delwri_queue);
- spin_unlock(&btp->bt_delwri_lock);
-}
-
-/*
- * Move as many buffers as specified to the supplied list
- * idicating if we skipped any buffers to prevent deadlocks.
- */
-STATIC int
-xfs_buf_delwri_split(
- xfs_buftarg_t *target,
- struct list_head *list,
- unsigned long age)
-{
- xfs_buf_t *bp, *n;
- int skipped = 0;
- int force;
-
- force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
- INIT_LIST_HEAD(list);
- spin_lock(&target->bt_delwri_lock);
- list_for_each_entry_safe(bp, n, &target->bt_delwri_queue, b_list) {
- ASSERT(bp->b_flags & XBF_DELWRI);
-
- if (!xfs_buf_ispinned(bp) && xfs_buf_trylock(bp)) {
- if (!force &&
- time_before(jiffies, bp->b_queuetime + age)) {
- xfs_buf_unlock(bp);
- break;
- }
-
- bp->b_flags &= ~(XBF_DELWRI | _XBF_DELWRI_Q);
- bp->b_flags |= XBF_WRITE;
- list_move_tail(&bp->b_list, list);
- trace_xfs_buf_delwri_split(bp, _RET_IP_);
- } else
- skipped++;
+ bp->b_flags |= _XBF_DELWRI_Q;
+ if (list_empty(&bp->b_list)) {
+ atomic_inc(&bp->b_hold);
+ list_add_tail(&bp->b_list, list);
}
- spin_unlock(&target->bt_delwri_lock);
- return skipped;
+ return true;
}
/*
@@ -1683,99 +1584,109 @@ xfs_buf_cmp(
return 0;
}
-STATIC int
-xfsbufd(
- void *data)
+static int
+__xfs_buf_delwri_submit(
+ struct list_head *buffer_list,
+ struct list_head *io_list,
+ bool wait)
{
- xfs_buftarg_t *target = (xfs_buftarg_t *)data;
-
- current->flags |= PF_MEMALLOC;
-
- set_freezable();
+ struct blk_plug plug;
+ struct xfs_buf *bp, *n;
+ int pinned = 0;
+
+ list_for_each_entry_safe(bp, n, buffer_list, b_list) {
+ if (!wait) {
+ if (xfs_buf_ispinned(bp)) {
+ pinned++;
+ continue;
+ }
+ if (!xfs_buf_trylock(bp))
+ continue;
+ } else {
+ xfs_buf_lock(bp);
+ }
- do {
- long age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
- long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
- struct list_head tmp;
- struct blk_plug plug;
+ /*
+ * Someone else might have written the buffer synchronously or
+ * marked it stale in the meantime. In that case only the
+ * _XBF_DELWRI_Q flag got cleared, and we have to drop the
+ * reference and remove it from the list here.
+ */
+ if (!(bp->b_flags & _XBF_DELWRI_Q)) {
+ list_del_init(&bp->b_list);
+ xfs_buf_relse(bp);
+ continue;
+ }
- if (unlikely(freezing(current)))
- try_to_freeze();
+ list_move_tail(&bp->b_list, io_list);
+ trace_xfs_buf_delwri_split(bp, _RET_IP_);
+ }
- /* sleep for a long time if there is nothing to do. */
- if (list_empty(&target->bt_delwri_queue))
- tout = MAX_SCHEDULE_TIMEOUT;
- schedule_timeout_interruptible(tout);
+ list_sort(NULL, io_list, xfs_buf_cmp);
- xfs_buf_delwri_split(target, &tmp, age);
- list_sort(NULL, &tmp, xfs_buf_cmp);
+ blk_start_plug(&plug);
+ list_for_each_entry_safe(bp, n, io_list, b_list) {
+ bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
+ bp->b_flags |= XBF_WRITE;
- blk_start_plug(&plug);
- while (!list_empty(&tmp)) {
- struct xfs_buf *bp;
- bp = list_first_entry(&tmp, struct xfs_buf, b_list);
+ if (!wait) {
+ bp->b_flags |= XBF_ASYNC;
list_del_init(&bp->b_list);
- xfs_bdstrat_cb(bp);
}
- blk_finish_plug(&plug);
- } while (!kthread_should_stop());
+ xfs_bdstrat_cb(bp);
+ }
+ blk_finish_plug(&plug);
- return 0;
+ return pinned;
}
/*
- * Go through all incore buffers, and release buffers if they belong to
- * the given device. This is used in filesystem error handling to
- * preserve the consistency of its metadata.
+ * Write out a buffer list asynchronously.
+ *
+ * This will take the @buffer_list, write all non-locked and non-pinned buffers
+ * out and not wait for I/O completion on any of the buffers. This interface
+ * is only safely useable for callers that can track I/O completion by higher
+ * level means, e.g. AIL pushing as the @buffer_list is consumed in this
+ * function.
*/
int
-xfs_flush_buftarg(
- xfs_buftarg_t *target,
- int wait)
+xfs_buf_delwri_submit_nowait(
+ struct list_head *buffer_list)
{
- xfs_buf_t *bp;
- int pincount = 0;
- LIST_HEAD(tmp_list);
- LIST_HEAD(wait_list);
- struct blk_plug plug;
+ LIST_HEAD (io_list);
+ return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
+}
- flush_workqueue(xfslogd_workqueue);
+/*
+ * Write out a buffer list synchronously.
+ *
+ * This will take the @buffer_list, write all buffers out and wait for I/O
+ * completion on all of the buffers. @buffer_list is consumed by the function,
+ * so callers must have some other way of tracking buffers if they require such
+ * functionality.
+ */
+int
+xfs_buf_delwri_submit(
+ struct list_head *buffer_list)
+{
+ LIST_HEAD (io_list);
+ int error = 0, error2;
+ struct xfs_buf *bp;
- set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
- pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
+ __xfs_buf_delwri_submit(buffer_list, &io_list, true);
- /*
- * Dropped the delayed write list lock, now walk the temporary list.
- * All I/O is issued async and then if we need to wait for completion
- * we do that after issuing all the IO.
- */
- list_sort(NULL, &tmp_list, xfs_buf_cmp);
+ /* Wait for IO to complete. */
+ while (!list_empty(&io_list)) {
+ bp = list_first_entry(&io_list, struct xfs_buf, b_list);
- blk_start_plug(&plug);
- while (!list_empty(&tmp_list)) {
- bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
- ASSERT(target == bp->b_target);
list_del_init(&bp->b_list);
- if (wait) {
- bp->b_flags &= ~XBF_ASYNC;
- list_add(&bp->b_list, &wait_list);
- }
- xfs_bdstrat_cb(bp);
- }
- blk_finish_plug(&plug);
-
- if (wait) {
- /* Wait for IO to complete. */
- while (!list_empty(&wait_list)) {
- bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
-
- list_del_init(&bp->b_list);
- xfs_buf_iowait(bp);
- xfs_buf_relse(bp);
- }
+ error2 = xfs_buf_iowait(bp);
+ xfs_buf_relse(bp);
+ if (!error)
+ error = error2;
}
- return pincount;
+ return error;
}
int __init
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 5bf3be45f543..7f1d1392ce37 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -32,11 +32,6 @@
#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
-#define xfs_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE)
-#define xfs_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT)
-#define xfs_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT)
-#define xfs_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK)
-
typedef enum {
XBRW_READ = 1, /* transfer into target memory */
XBRW_WRITE = 2, /* transfer from target memory */
@@ -46,11 +41,9 @@ typedef enum {
#define XBF_READ (1 << 0) /* buffer intended for reading from device */
#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
-#define XBF_MAPPED (1 << 3) /* buffer mapped (b_addr valid) */
#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
-#define XBF_DELWRI (1 << 6) /* buffer has dirty pages */
-#define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */
+#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
/* I/O hints for the BIO layer */
#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
@@ -58,14 +51,13 @@ typedef enum {
#define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */
/* flags used only as arguments to access routines */
-#define XBF_LOCK (1 << 15)/* lock requested */
#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */
-#define XBF_DONT_BLOCK (1 << 17)/* do not block in current thread */
+#define XBF_UNMAPPED (1 << 17)/* do not map the buffer */
/* flags used only internally */
#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
#define _XBF_KMEM (1 << 21)/* backed by heap memory */
-#define _XBF_DELWRI_Q (1 << 22)/* buffer on delwri queue */
+#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
typedef unsigned int xfs_buf_flags_t;
@@ -73,25 +65,18 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_READ, "READ" }, \
{ XBF_WRITE, "WRITE" }, \
{ XBF_READ_AHEAD, "READ_AHEAD" }, \
- { XBF_MAPPED, "MAPPED" }, \
{ XBF_ASYNC, "ASYNC" }, \
{ XBF_DONE, "DONE" }, \
- { XBF_DELWRI, "DELWRI" }, \
{ XBF_STALE, "STALE" }, \
{ XBF_SYNCIO, "SYNCIO" }, \
{ XBF_FUA, "FUA" }, \
{ XBF_FLUSH, "FLUSH" }, \
- { XBF_LOCK, "LOCK" }, /* should never be set */\
- { XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\
- { XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\
+ { XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\
+ { XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\
{ _XBF_PAGES, "PAGES" }, \
{ _XBF_KMEM, "KMEM" }, \
{ _XBF_DELWRI_Q, "DELWRI_Q" }
-typedef enum {
- XBT_FORCE_FLUSH = 0,
-} xfs_buftarg_flags_t;
-
typedef struct xfs_buftarg {
dev_t bt_dev;
struct block_device *bt_bdev;
@@ -101,12 +86,6 @@ typedef struct xfs_buftarg {
unsigned int bt_sshift;
size_t bt_smask;
- /* per device delwri queue */
- struct task_struct *bt_task;
- struct list_head bt_delwri_queue;
- spinlock_t bt_delwri_lock;
- unsigned long bt_flags;
-
/* LRU control structures */
struct shrinker bt_shrinker;
struct list_head bt_lru;
@@ -128,8 +107,8 @@ typedef struct xfs_buf {
* fast-path on locking.
*/
struct rb_node b_rbnode; /* rbtree node */
- xfs_off_t b_file_offset; /* offset in file */
- size_t b_buffer_length;/* size of buffer in bytes */
+ xfs_daddr_t b_bn; /* block number for I/O */
+ int b_length; /* size of buffer in BBs */
atomic_t b_hold; /* reference count */
atomic_t b_lru_ref; /* lru reclaim ref count */
xfs_buf_flags_t b_flags; /* status flags */
@@ -140,8 +119,6 @@ typedef struct xfs_buf {
struct list_head b_list;
struct xfs_perag *b_pag; /* contains rbtree root */
xfs_buftarg_t *b_target; /* buffer target (device) */
- xfs_daddr_t b_bn; /* block number for I/O */
- size_t b_count_desired;/* desired transfer size */
void *b_addr; /* virtual address of buffer */
struct work_struct b_iodone_work;
xfs_buf_iodone_t b_iodone; /* I/O completion function */
@@ -150,7 +127,7 @@ typedef struct xfs_buf {
struct xfs_trans *b_transp;
struct page **b_pages; /* array of page pointers */
struct page *b_page_array[XB_PAGES]; /* inline pages */
- unsigned long b_queuetime; /* time buffer was queued */
+ int b_io_length; /* IO size in BBs */
atomic_t b_pin_count; /* pin count */
atomic_t b_io_remaining; /* #outstanding I/O requests */
unsigned int b_page_count; /* size of page array */
@@ -163,26 +140,30 @@ typedef struct xfs_buf {
/* Finding and Reading Buffers */
-extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t,
- xfs_buf_flags_t, xfs_buf_t *);
+struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target, xfs_daddr_t blkno,
+ size_t numblks, xfs_buf_flags_t flags,
+ struct xfs_buf *new_bp);
#define xfs_incore(buftarg,blkno,len,lockit) \
_xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
-extern xfs_buf_t *xfs_buf_get(xfs_buftarg_t *, xfs_off_t, size_t,
- xfs_buf_flags_t);
-extern xfs_buf_t *xfs_buf_read(xfs_buftarg_t *, xfs_off_t, size_t,
- xfs_buf_flags_t);
-
-struct xfs_buf *xfs_buf_alloc(struct xfs_buftarg *, xfs_off_t, size_t,
- xfs_buf_flags_t);
-extern void xfs_buf_set_empty(struct xfs_buf *bp, size_t len);
-extern xfs_buf_t *xfs_buf_get_uncached(struct xfs_buftarg *, size_t, int);
-extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t);
-extern void xfs_buf_hold(xfs_buf_t *);
-extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t);
-struct xfs_buf *xfs_buf_read_uncached(struct xfs_mount *mp,
- struct xfs_buftarg *target,
- xfs_daddr_t daddr, size_t length, int flags);
+struct xfs_buf *xfs_buf_get(struct xfs_buftarg *target, xfs_daddr_t blkno,
+ size_t numblks, xfs_buf_flags_t flags);
+struct xfs_buf *xfs_buf_read(struct xfs_buftarg *target, xfs_daddr_t blkno,
+ size_t numblks, xfs_buf_flags_t flags);
+void xfs_buf_readahead(struct xfs_buftarg *target, xfs_daddr_t blkno,
+ size_t numblks);
+
+struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks);
+struct xfs_buf *xfs_buf_alloc(struct xfs_buftarg *target, xfs_daddr_t blkno,
+ size_t numblks, xfs_buf_flags_t flags);
+void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
+int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
+
+struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
+ int flags);
+struct xfs_buf *xfs_buf_read_uncached(struct xfs_buftarg *target,
+ xfs_daddr_t daddr, size_t numblks, int flags);
+void xfs_buf_hold(struct xfs_buf *bp);
/* Releasing Buffers */
extern void xfs_buf_free(xfs_buf_t *);
@@ -204,7 +185,7 @@ extern int xfs_bdstrat_cb(struct xfs_buf *);
extern void xfs_buf_ioend(xfs_buf_t *, int);
extern void xfs_buf_ioerror(xfs_buf_t *, int);
extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
-extern int xfs_buf_iorequest(xfs_buf_t *);
+extern void xfs_buf_iorequest(xfs_buf_t *);
extern int xfs_buf_iowait(xfs_buf_t *);
extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
xfs_buf_rw_t);
@@ -220,24 +201,22 @@ static inline int xfs_buf_geterror(xfs_buf_t *bp)
extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
/* Delayed Write Buffer Routines */
-extern void xfs_buf_delwri_queue(struct xfs_buf *);
-extern void xfs_buf_delwri_dequeue(struct xfs_buf *);
-extern void xfs_buf_delwri_promote(struct xfs_buf *);
+extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
+extern int xfs_buf_delwri_submit(struct list_head *);
+extern int xfs_buf_delwri_submit_nowait(struct list_head *);
/* Buffer Daemon Setup Routines */
extern int xfs_buf_init(void);
extern void xfs_buf_terminate(void);
#define XFS_BUF_ZEROFLAGS(bp) \
- ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI| \
+ ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \
XBF_SYNCIO|XBF_FUA|XBF_FLUSH))
void xfs_buf_stale(struct xfs_buf *bp);
#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE)
-#define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI)
-
#define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE)
#define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE)
#define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE)
@@ -256,12 +235,6 @@ void xfs_buf_stale(struct xfs_buf *bp);
#define XFS_BUF_ADDR(bp) ((bp)->b_bn)
#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno))
-#define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset)
-#define XFS_BUF_SET_OFFSET(bp, off) ((bp)->b_file_offset = (off))
-#define XFS_BUF_COUNT(bp) ((bp)->b_count_desired)
-#define XFS_BUF_SET_COUNT(bp, cnt) ((bp)->b_count_desired = (cnt))
-#define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length)
-#define XFS_BUF_SET_SIZE(bp, cnt) ((bp)->b_buffer_length = (cnt))
static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
{
@@ -287,7 +260,6 @@ extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
-extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index eac97ef81e2a..45df2b857d48 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -20,7 +20,6 @@
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -123,11 +122,11 @@ xfs_buf_item_log_check(
ASSERT(bip->bli_logged != NULL);
bp = bip->bli_buf;
- ASSERT(XFS_BUF_COUNT(bp) > 0);
+ ASSERT(bp->b_length > 0);
ASSERT(bp->b_addr != NULL);
orig = bip->bli_orig;
buffer = bp->b_addr;
- for (x = 0; x < XFS_BUF_COUNT(bp); x++) {
+ for (x = 0; x < BBTOB(bp->b_length); x++) {
if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) {
xfs_emerg(bp->b_mount,
"%s: bip %x buffer %x orig %x index %d",
@@ -418,7 +417,6 @@ xfs_buf_item_unpin(
if (freed && stale) {
ASSERT(bip->bli_flags & XFS_BLI_STALE);
ASSERT(xfs_buf_islocked(bp));
- ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
ASSERT(XFS_BUF_ISSTALE(bp));
ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
@@ -455,42 +453,42 @@ xfs_buf_item_unpin(
bp->b_iodone = NULL;
} else {
spin_lock(&ailp->xa_lock);
- xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
+ xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
xfs_buf_item_relse(bp);
ASSERT(bp->b_fspriv == NULL);
}
xfs_buf_relse(bp);
+ } else if (freed && remove) {
+ xfs_buf_lock(bp);
+ xfs_buf_ioerror(bp, EIO);
+ XFS_BUF_UNDONE(bp);
+ xfs_buf_stale(bp);
+ xfs_buf_ioend(bp, 0);
}
}
-/*
- * This is called to attempt to lock the buffer associated with this
- * buf log item. Don't sleep on the buffer lock. If we can't get
- * the lock right away, return 0. If we can get the lock, take a
- * reference to the buffer. If this is a delayed write buffer that
- * needs AIL help to be written back, invoke the pushbuf routine
- * rather than the normal success path.
- */
STATIC uint
-xfs_buf_item_trylock(
- struct xfs_log_item *lip)
+xfs_buf_item_push(
+ struct xfs_log_item *lip,
+ struct list_head *buffer_list)
{
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
struct xfs_buf *bp = bip->bli_buf;
+ uint rval = XFS_ITEM_SUCCESS;
if (xfs_buf_ispinned(bp))
return XFS_ITEM_PINNED;
if (!xfs_buf_trylock(bp))
return XFS_ITEM_LOCKED;
- /* take a reference to the buffer. */
- xfs_buf_hold(bp);
-
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
- trace_xfs_buf_item_trylock(bip);
- if (XFS_BUF_ISDELAYWRITE(bp))
- return XFS_ITEM_PUSHBUF;
- return XFS_ITEM_SUCCESS;
+
+ trace_xfs_buf_item_push(bip);
+
+ if (!xfs_buf_delwri_queue(bp, buffer_list))
+ rval = XFS_ITEM_FLUSHING;
+ xfs_buf_unlock(bp);
+ return rval;
}
/*
@@ -603,49 +601,6 @@ xfs_buf_item_committed(
return lsn;
}
-/*
- * The buffer is locked, but is not a delayed write buffer. This happens
- * if we race with IO completion and hence we don't want to try to write it
- * again. Just release the buffer.
- */
-STATIC void
-xfs_buf_item_push(
- struct xfs_log_item *lip)
-{
- struct xfs_buf_log_item *bip = BUF_ITEM(lip);
- struct xfs_buf *bp = bip->bli_buf;
-
- ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
- ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
-
- trace_xfs_buf_item_push(bip);
-
- xfs_buf_relse(bp);
-}
-
-/*
- * The buffer is locked and is a delayed write buffer. Promote the buffer
- * in the delayed write queue as the caller knows that they must invoke
- * the xfsbufd to get this buffer written. We have to unlock the buffer
- * to allow the xfsbufd to write it, too.
- */
-STATIC bool
-xfs_buf_item_pushbuf(
- struct xfs_log_item *lip)
-{
- struct xfs_buf_log_item *bip = BUF_ITEM(lip);
- struct xfs_buf *bp = bip->bli_buf;
-
- ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
- ASSERT(XFS_BUF_ISDELAYWRITE(bp));
-
- trace_xfs_buf_item_pushbuf(bip);
-
- xfs_buf_delwri_promote(bp);
- xfs_buf_relse(bp);
- return true;
-}
-
STATIC void
xfs_buf_item_committing(
struct xfs_log_item *lip,
@@ -661,11 +616,9 @@ static const struct xfs_item_ops xfs_buf_item_ops = {
.iop_format = xfs_buf_item_format,
.iop_pin = xfs_buf_item_pin,
.iop_unpin = xfs_buf_item_unpin,
- .iop_trylock = xfs_buf_item_trylock,
.iop_unlock = xfs_buf_item_unlock,
.iop_committed = xfs_buf_item_committed,
.iop_push = xfs_buf_item_push,
- .iop_pushbuf = xfs_buf_item_pushbuf,
.iop_committing = xfs_buf_item_committing
};
@@ -703,7 +656,8 @@ xfs_buf_item_init(
* truncate any pieces. map_size is the size of the
* bitmap needed to describe the chunks of the buffer.
*/
- chunks = (int)((XFS_BUF_COUNT(bp) + (XFS_BLF_CHUNK - 1)) >> XFS_BLF_SHIFT);
+ chunks = (int)((BBTOB(bp->b_length) + (XFS_BLF_CHUNK - 1)) >>
+ XFS_BLF_SHIFT);
map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT);
bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone,
@@ -713,7 +667,7 @@ xfs_buf_item_init(
xfs_buf_hold(bp);
bip->bli_format.blf_type = XFS_LI_BUF;
bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp);
- bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp));
+ bip->bli_format.blf_len = (ushort)bp->b_length;
bip->bli_format.blf_map_size = map_size;
#ifdef XFS_TRANS_DEBUG
@@ -725,9 +679,9 @@ xfs_buf_item_init(
* the buffer to indicate which bytes the callers have asked
* to have logged.
*/
- bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP);
- memcpy(bip->bli_orig, bp->b_addr, XFS_BUF_COUNT(bp));
- bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP);
+ bip->bli_orig = kmem_alloc(BBTOB(bp->b_length), KM_SLEEP);
+ memcpy(bip->bli_orig, bp->b_addr, BBTOB(bp->b_length));
+ bip->bli_logged = kmem_zalloc(BBTOB(bp->b_length) / NBBY, KM_SLEEP);
#endif
/*
@@ -984,20 +938,27 @@ xfs_buf_iodone_callbacks(
* If the write was asynchronous then no one will be looking for the
* error. Clear the error state and write the buffer out again.
*
- * During sync or umount we'll write all pending buffers again
- * synchronous, which will catch these errors if they keep hanging
- * around.
+ * XXX: This helps against transient write errors, but we need to find
+ * a way to shut the filesystem down if the writes keep failing.
+ *
+ * In practice we'll shut the filesystem down soon as non-transient
+ * erorrs tend to affect the whole device and a failing log write
+ * will make us give up. But we really ought to do better here.
*/
if (XFS_BUF_ISASYNC(bp)) {
+ ASSERT(bp->b_iodone != NULL);
+
+ trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
+
xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
if (!XFS_BUF_ISSTALE(bp)) {
- xfs_buf_delwri_queue(bp);
- XFS_BUF_DONE(bp);
+ bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
+ xfs_bdstrat_cb(bp);
+ } else {
+ xfs_buf_relse(bp);
}
- ASSERT(bp->b_iodone != NULL);
- trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
- xfs_buf_relse(bp);
+
return;
}
@@ -1045,6 +1006,6 @@ xfs_buf_iodone(
* Either way, AIL is useless if we're forcing a shutdown.
*/
spin_lock(&ailp->xa_lock);
- xfs_trans_ail_delete(ailp, lip);
+ xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
xfs_buf_item_free(BUF_ITEM(lip));
}
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 7f1a6f5b05a6..015b946c5808 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -20,7 +20,6 @@
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -2277,20 +2276,20 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps)
if (nbuf == 1) {
dabuf->nbuf = 1;
bp = bps[0];
- dabuf->bbcount = (short)BTOBB(XFS_BUF_COUNT(bp));
+ dabuf->bbcount = bp->b_length;
dabuf->data = bp->b_addr;
dabuf->bps[0] = bp;
} else {
dabuf->nbuf = nbuf;
for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) {
dabuf->bps[i] = bp = bps[i];
- dabuf->bbcount += BTOBB(XFS_BUF_COUNT(bp));
+ dabuf->bbcount += bp->b_length;
}
dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
- for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) {
+ for (i = off = 0; i < nbuf; i++, off += BBTOB(bp->b_length)) {
bp = bps[i];
memcpy((char *)dabuf->data + off, bp->b_addr,
- XFS_BUF_COUNT(bp));
+ BBTOB(bp->b_length));
}
}
return dabuf;
@@ -2310,10 +2309,10 @@ xfs_da_buf_clean(xfs_dabuf_t *dabuf)
ASSERT(dabuf->nbuf > 1);
dabuf->dirty = 0;
for (i = off = 0; i < dabuf->nbuf;
- i++, off += XFS_BUF_COUNT(bp)) {
+ i++, off += BBTOB(bp->b_length)) {
bp = dabuf->bps[i];
memcpy(bp->b_addr, dabuf->data + off,
- XFS_BUF_COUNT(bp));
+ BBTOB(bp->b_length));
}
}
}
@@ -2356,10 +2355,10 @@ xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
}
dabuf->dirty = 1;
ASSERT(first <= last);
- for (i = off = 0; i < dabuf->nbuf; i++, off += XFS_BUF_COUNT(bp)) {
+ for (i = off = 0; i < dabuf->nbuf; i++, off += BBTOB(bp->b_length)) {
bp = dabuf->bps[i];
f = off;
- l = f + XFS_BUF_COUNT(bp) - 1;
+ l = f + BBTOB(bp->b_length) - 1;
if (f < first)
f = first;
if (l > last)
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index 1137bbc5eccb..e00de08dc8ac 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -18,9 +18,7 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index a2e27010c7fb..67a250c36d41 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -18,7 +18,6 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index d3b63aefd01d..586732f2d80d 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -19,7 +19,6 @@
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c
index 5bbe2a8a023f..2046988e9eb2 100644
--- a/fs/xfs/xfs_dir2_data.c
+++ b/fs/xfs/xfs_dir2_data.c
@@ -19,7 +19,6 @@
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 66e108f561a3..397ffbcbab1d 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -20,7 +20,6 @@
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 0179a41d9e5a..b0f26780449d 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -19,7 +19,6 @@
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index 79d05e84e296..19bf0c5e38f4 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -19,7 +19,6 @@
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 1ad3a4b8ca40..f9c3fe304a17 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -17,7 +17,6 @@
*/
#include "xfs.h"
#include "xfs_sb.h"
-#include "xfs_inum.h"
#include "xfs_log.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
@@ -30,6 +29,7 @@
#include "xfs_inode.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
+#include "xfs_extent_busy.h"
#include "xfs_discard.h"
#include "xfs_trace.h"
@@ -118,7 +118,7 @@ xfs_trim_extents(
* If any blocks in the range are still busy, skip the
* discard and try again the next time.
*/
- if (xfs_alloc_busy_search(mp, agno, fbno, flen)) {
+ if (xfs_extent_busy_search(mp, agno, fbno, flen)) {
trace_xfs_discard_busy(mp, agno, fbno, flen);
goto next_extent;
}
@@ -212,7 +212,7 @@ xfs_discard_extents(
struct xfs_mount *mp,
struct list_head *list)
{
- struct xfs_busy_extent *busyp;
+ struct xfs_extent_busy *busyp;
int error = 0;
list_for_each_entry(busyp, list, list) {
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 1155208fa830..bf27fcca4843 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -19,7 +19,6 @@
#include "xfs_fs.h"
#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -857,7 +856,7 @@ xfs_qm_dqflush_done(
/* xfs_trans_ail_delete() drops the AIL lock. */
spin_lock(&ailp->xa_lock);
if (lip->li_lsn == qip->qli_flush_lsn)
- xfs_trans_ail_delete(ailp, lip);
+ xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
else
spin_unlock(&ailp->xa_lock);
}
@@ -878,8 +877,8 @@ xfs_qm_dqflush_done(
*/
int
xfs_qm_dqflush(
- xfs_dquot_t *dqp,
- uint flags)
+ struct xfs_dquot *dqp,
+ struct xfs_buf **bpp)
{
struct xfs_mount *mp = dqp->q_mount;
struct xfs_buf *bp;
@@ -891,25 +890,30 @@ xfs_qm_dqflush(
trace_xfs_dqflush(dqp);
- /*
- * If not dirty, or it's pinned and we are not supposed to block, nada.
- */
- if (!XFS_DQ_IS_DIRTY(dqp) ||
- ((flags & SYNC_TRYLOCK) && atomic_read(&dqp->q_pincount) > 0)) {
- xfs_dqfunlock(dqp);
- return 0;
- }
+ *bpp = NULL;
+
xfs_qm_dqunpin_wait(dqp);
/*
* This may have been unpinned because the filesystem is shutting
* down forcibly. If that's the case we must not write this dquot
- * to disk, because the log record didn't make it to disk!
+ * to disk, because the log record didn't make it to disk.
+ *
+ * We also have to remove the log item from the AIL in this case,
+ * as we wait for an emptry AIL as part of the unmount process.
*/
if (XFS_FORCED_SHUTDOWN(mp)) {
+ struct xfs_log_item *lip = &dqp->q_logitem.qli_item;
dqp->dq_flags &= ~XFS_DQ_DIRTY;
- xfs_dqfunlock(dqp);
- return XFS_ERROR(EIO);
+
+ spin_lock(&mp->m_ail->xa_lock);
+ if (lip->li_flags & XFS_LI_IN_AIL)
+ xfs_trans_ail_delete(mp->m_ail, lip,
+ SHUTDOWN_CORRUPT_INCORE);
+ else
+ spin_unlock(&mp->m_ail->xa_lock);
+ error = XFS_ERROR(EIO);
+ goto out_unlock;
}
/*
@@ -917,11 +921,8 @@ xfs_qm_dqflush(
*/
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen, 0, &bp);
- if (error) {
- ASSERT(error != ENOENT);
- xfs_dqfunlock(dqp);
- return error;
- }
+ if (error)
+ goto out_unlock;
/*
* Calculate the location of the dquot inside the buffer.
@@ -967,20 +968,13 @@ xfs_qm_dqflush(
xfs_log_force(mp, 0);
}
- if (flags & SYNC_WAIT)
- error = xfs_bwrite(bp);
- else
- xfs_buf_delwri_queue(bp);
-
- xfs_buf_relse(bp);
-
trace_xfs_dqflush_done(dqp);
+ *bpp = bp;
+ return 0;
- /*
- * dqp is still locked, but caller is free to unlock it now.
- */
- return error;
-
+out_unlock:
+ xfs_dqfunlock(dqp);
+ return XFS_ERROR(EIO);
}
/*
@@ -1011,39 +1005,6 @@ xfs_dqlock2(
}
}
-/*
- * Give the buffer a little push if it is incore and
- * wait on the flush lock.
- */
-void
-xfs_dqflock_pushbuf_wait(
- xfs_dquot_t *dqp)
-{
- xfs_mount_t *mp = dqp->q_mount;
- xfs_buf_t *bp;
-
- /*
- * Check to see if the dquot has been flushed delayed
- * write. If so, grab its buffer and send it
- * out immediately. We'll be able to acquire
- * the flush lock when the I/O completes.
- */
- bp = xfs_incore(mp->m_ddev_targp, dqp->q_blkno,
- mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
- if (!bp)
- goto out_lock;
-
- if (XFS_BUF_ISDELAYWRITE(bp)) {
- if (xfs_buf_ispinned(bp))
- xfs_log_force(mp, 0);
- xfs_buf_delwri_promote(bp);
- wake_up_process(bp->b_target->bt_task);
- }
- xfs_buf_relse(bp);
-out_lock:
- xfs_dqflock(dqp);
-}
-
int __init
xfs_qm_init(void)
{
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h
index ef9190bd8b30..7d20af27346d 100644
--- a/fs/xfs/xfs_dquot.h
+++ b/fs/xfs/xfs_dquot.h
@@ -141,7 +141,7 @@ static inline xfs_dquot_t *xfs_inode_dquot(struct xfs_inode *ip, int type)
extern int xfs_qm_dqread(struct xfs_mount *, xfs_dqid_t, uint,
uint, struct xfs_dquot **);
extern void xfs_qm_dqdestroy(xfs_dquot_t *);
-extern int xfs_qm_dqflush(xfs_dquot_t *, uint);
+extern int xfs_qm_dqflush(struct xfs_dquot *, struct xfs_buf **);
extern void xfs_qm_dqunpin_wait(xfs_dquot_t *);
extern void xfs_qm_adjust_dqtimers(xfs_mount_t *,
xfs_disk_dquot_t *);
@@ -152,7 +152,6 @@ extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *,
extern void xfs_qm_dqput(xfs_dquot_t *);
extern void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *);
-extern void xfs_dqflock_pushbuf_wait(struct xfs_dquot *dqp);
static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp)
{
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 34baeae45265..57aa4b03720c 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -17,9 +17,7 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -108,38 +106,6 @@ xfs_qm_dquot_logitem_unpin(
wake_up(&dqp->q_pinwait);
}
-/*
- * Given the logitem, this writes the corresponding dquot entry to disk
- * asynchronously. This is called with the dquot entry securely locked;
- * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot
- * at the end.
- */
-STATIC void
-xfs_qm_dquot_logitem_push(
- struct xfs_log_item *lip)
-{
- struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
- int error;
-
- ASSERT(XFS_DQ_IS_LOCKED(dqp));
- ASSERT(!completion_done(&dqp->q_flush));
-
- /*
- * Since we were able to lock the dquot's flush lock and
- * we found it on the AIL, the dquot must be dirty. This
- * is because the dquot is removed from the AIL while still
- * holding the flush lock in xfs_dqflush_done(). Thus, if
- * we found it in the AIL and were able to obtain the flush
- * lock without sleeping, then there must not have been
- * anyone in the process of flushing the dquot.
- */
- error = xfs_qm_dqflush(dqp, SYNC_TRYLOCK);
- if (error)
- xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p",
- __func__, error, dqp);
- xfs_dqunlock(dqp);
-}
-
STATIC xfs_lsn_t
xfs_qm_dquot_logitem_committed(
struct xfs_log_item *lip,
@@ -171,67 +137,15 @@ xfs_qm_dqunpin_wait(
wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
}
-/*
- * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
- * the dquot is locked by us, but the flush lock isn't. So, here we are
- * going to see if the relevant dquot buffer is incore, waiting on DELWRI.
- * If so, we want to push it out to help us take this item off the AIL as soon
- * as possible.
- *
- * We must not be holding the AIL lock at this point. Calling incore() to
- * search the buffer cache can be a time consuming thing, and AIL lock is a
- * spinlock.
- */
-STATIC bool
-xfs_qm_dquot_logitem_pushbuf(
- struct xfs_log_item *lip)
-{
- struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
- struct xfs_dquot *dqp = qlip->qli_dquot;
- struct xfs_buf *bp;
- bool ret = true;
-
- ASSERT(XFS_DQ_IS_LOCKED(dqp));
-
- /*
- * If flushlock isn't locked anymore, chances are that the
- * inode flush completed and the inode was taken off the AIL.
- * So, just get out.
- */
- if (completion_done(&dqp->q_flush) ||
- !(lip->li_flags & XFS_LI_IN_AIL)) {
- xfs_dqunlock(dqp);
- return true;
- }
-
- bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
- dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
- xfs_dqunlock(dqp);
- if (!bp)
- return true;
- if (XFS_BUF_ISDELAYWRITE(bp))
- xfs_buf_delwri_promote(bp);
- if (xfs_buf_ispinned(bp))
- ret = false;
- xfs_buf_relse(bp);
- return ret;
-}
-
-/*
- * This is called to attempt to lock the dquot associated with this
- * dquot log item. Don't sleep on the dquot lock or the flush lock.
- * If the flush lock is already held, indicating that the dquot has
- * been or is in the process of being flushed, then see if we can
- * find the dquot's buffer in the buffer cache without sleeping. If
- * we can and it is marked delayed write, then we want to send it out.
- * We delay doing so until the push routine, though, to avoid sleeping
- * in any device strategy routines.
- */
STATIC uint
-xfs_qm_dquot_logitem_trylock(
- struct xfs_log_item *lip)
+xfs_qm_dquot_logitem_push(
+ struct xfs_log_item *lip,
+ struct list_head *buffer_list)
{
struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
+ struct xfs_buf *bp = NULL;
+ uint rval = XFS_ITEM_SUCCESS;
+ int error;
if (atomic_read(&dqp->q_pincount) > 0)
return XFS_ITEM_PINNED;
@@ -239,16 +153,41 @@ xfs_qm_dquot_logitem_trylock(
if (!xfs_dqlock_nowait(dqp))
return XFS_ITEM_LOCKED;
+ /*
+ * Re-check the pincount now that we stabilized the value by
+ * taking the quota lock.
+ */
+ if (atomic_read(&dqp->q_pincount) > 0) {
+ rval = XFS_ITEM_PINNED;
+ goto out_unlock;
+ }
+
+ /*
+ * Someone else is already flushing the dquot. Nothing we can do
+ * here but wait for the flush to finish and remove the item from
+ * the AIL.
+ */
if (!xfs_dqflock_nowait(dqp)) {
- /*
- * dquot has already been flushed to the backing buffer,
- * leave it locked, pushbuf routine will unlock it.
- */
- return XFS_ITEM_PUSHBUF;
+ rval = XFS_ITEM_FLUSHING;
+ goto out_unlock;
}
- ASSERT(lip->li_flags & XFS_LI_IN_AIL);
- return XFS_ITEM_SUCCESS;
+ spin_unlock(&lip->li_ailp->xa_lock);
+
+ error = xfs_qm_dqflush(dqp, &bp);
+ if (error) {
+ xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p",
+ __func__, error, dqp);
+ } else {
+ if (!xfs_buf_delwri_queue(bp, buffer_list))
+ rval = XFS_ITEM_FLUSHING;
+ xfs_buf_relse(bp);
+ }
+
+ spin_lock(&lip->li_ailp->xa_lock);
+out_unlock:
+ xfs_dqunlock(dqp);
+ return rval;
}
/*
@@ -299,11 +238,9 @@ static const struct xfs_item_ops xfs_dquot_item_ops = {
.iop_format = xfs_qm_dquot_logitem_format,
.iop_pin = xfs_qm_dquot_logitem_pin,
.iop_unpin = xfs_qm_dquot_logitem_unpin,
- .iop_trylock = xfs_qm_dquot_logitem_trylock,
.iop_unlock = xfs_qm_dquot_logitem_unlock,
.iop_committed = xfs_qm_dquot_logitem_committed,
.iop_push = xfs_qm_dquot_logitem_push,
- .iop_pushbuf = xfs_qm_dquot_logitem_pushbuf,
.iop_committing = xfs_qm_dquot_logitem_committing
};
@@ -398,11 +335,13 @@ xfs_qm_qoff_logitem_unpin(
}
/*
- * Quotaoff items have no locking, so just return success.
+ * There isn't much you can do to push a quotaoff item. It is simply
+ * stuck waiting for the log to be flushed to disk.
*/
STATIC uint
-xfs_qm_qoff_logitem_trylock(
- struct xfs_log_item *lip)
+xfs_qm_qoff_logitem_push(
+ struct xfs_log_item *lip,
+ struct list_head *buffer_list)
{
return XFS_ITEM_LOCKED;
}
@@ -429,17 +368,6 @@ xfs_qm_qoff_logitem_committed(
return lsn;
}
-/*
- * There isn't much you can do to push on an quotaoff item. It is simply
- * stuck waiting for the log to be flushed to disk.
- */
-STATIC void
-xfs_qm_qoff_logitem_push(
- struct xfs_log_item *lip)
-{
-}
-
-
STATIC xfs_lsn_t
xfs_qm_qoffend_logitem_committed(
struct xfs_log_item *lip,
@@ -454,7 +382,7 @@ xfs_qm_qoffend_logitem_committed(
* xfs_trans_ail_delete() drops the AIL lock.
*/
spin_lock(&ailp->xa_lock);
- xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs);
+ xfs_trans_ail_delete(ailp, &qfs->qql_item, SHUTDOWN_LOG_IO_ERROR);
kmem_free(qfs);
kmem_free(qfe);
@@ -487,7 +415,6 @@ static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
.iop_format = xfs_qm_qoff_logitem_format,
.iop_pin = xfs_qm_qoff_logitem_pin,
.iop_unpin = xfs_qm_qoff_logitem_unpin,
- .iop_trylock = xfs_qm_qoff_logitem_trylock,
.iop_unlock = xfs_qm_qoff_logitem_unlock,
.iop_committed = xfs_qm_qoffend_logitem_committed,
.iop_push = xfs_qm_qoff_logitem_push,
@@ -502,7 +429,6 @@ static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
.iop_format = xfs_qm_qoff_logitem_format,
.iop_pin = xfs_qm_qoff_logitem_pin,
.iop_unpin = xfs_qm_qoff_logitem_unpin,
- .iop_trylock = xfs_qm_qoff_logitem_trylock,
.iop_unlock = xfs_qm_qoff_logitem_unlock,
.iop_committed = xfs_qm_qoff_logitem_committed,
.iop_push = xfs_qm_qoff_logitem_push,
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 39f06336b99d..610456054dc2 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -19,7 +19,6 @@
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 558910f5e3c0..2d25d19c4ea1 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -17,7 +17,6 @@
*/
#include "xfs.h"
#include "xfs_types.h"
-#include "xfs_inum.h"
#include "xfs_log.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
new file mode 100644
index 000000000000..85e9f87a1a7c
--- /dev/null
+++ b/fs/xfs/xfs_extent_busy.c
@@ -0,0 +1,603 @@
+/*
+ * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
+ * Copyright (c) 2010 David Chinner.
+ * Copyright (c) 2011 Christoph Hellwig.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_types.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_inode.h"
+#include "xfs_extent_busy.h"
+#include "xfs_trace.h"
+
+void
+xfs_extent_busy_insert(
+ struct xfs_trans *tp,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ unsigned int flags)
+{
+ struct xfs_extent_busy *new;
+ struct xfs_extent_busy *busyp;
+ struct xfs_perag *pag;
+ struct rb_node **rbp;
+ struct rb_node *parent = NULL;
+
+ new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_MAYFAIL);
+ if (!new) {
+ /*
+ * No Memory! Since it is now not possible to track the free
+ * block, make this a synchronous transaction to insure that
+ * the block is not reused before this transaction commits.
+ */
+ trace_xfs_extent_busy_enomem(tp->t_mountp, agno, bno, len);
+ xfs_trans_set_sync(tp);
+ return;
+ }
+
+ new->agno = agno;
+ new->bno = bno;
+ new->length = len;
+ INIT_LIST_HEAD(&new->list);
+ new->flags = flags;
+
+ /* trace before insert to be able to see failed inserts */
+ trace_xfs_extent_busy(tp->t_mountp, agno, bno, len);
+
+ pag = xfs_perag_get(tp->t_mountp, new->agno);
+ spin_lock(&pag->pagb_lock);
+ rbp = &pag->pagb_tree.rb_node;
+ while (*rbp) {
+ parent = *rbp;
+ busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
+
+ if (new->bno < busyp->bno) {
+ rbp = &(*rbp)->rb_left;
+ ASSERT(new->bno + new->length <= busyp->bno);
+ } else if (new->bno > busyp->bno) {
+ rbp = &(*rbp)->rb_right;
+ ASSERT(bno >= busyp->bno + busyp->length);
+ } else {
+ ASSERT(0);
+ }
+ }
+
+ rb_link_node(&new->rb_node, parent, rbp);
+ rb_insert_color(&new->rb_node, &pag->pagb_tree);
+
+ list_add(&new->list, &tp->t_busy);
+ spin_unlock(&pag->pagb_lock);
+ xfs_perag_put(pag);
+}
+
+/*
+ * Search for a busy extent within the range of the extent we are about to
+ * allocate. You need to be holding the busy extent tree lock when calling
+ * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
+ * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
+ * match. This is done so that a non-zero return indicates an overlap that
+ * will require a synchronous transaction, but it can still be
+ * used to distinguish between a partial or exact match.
+ */
+int
+xfs_extent_busy_search(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ xfs_extlen_t len)
+{
+ struct xfs_perag *pag;
+ struct rb_node *rbp;
+ struct xfs_extent_busy *busyp;
+ int match = 0;
+
+ pag = xfs_perag_get(mp, agno);
+ spin_lock(&pag->pagb_lock);
+
+ rbp = pag->pagb_tree.rb_node;
+
+ /* find closest start bno overlap */
+ while (rbp) {
+ busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
+ if (bno < busyp->bno) {
+ /* may overlap, but exact start block is lower */
+ if (bno + len > busyp->bno)
+ match = -1;
+ rbp = rbp->rb_left;
+ } else if (bno > busyp->bno) {
+ /* may overlap, but exact start block is higher */
+ if (bno < busyp->bno + busyp->length)
+ match = -1;
+ rbp = rbp->rb_right;
+ } else {
+ /* bno matches busyp, length determines exact match */
+ match = (busyp->length == len) ? 1 : -1;
+ break;
+ }
+ }
+ spin_unlock(&pag->pagb_lock);
+ xfs_perag_put(pag);
+ return match;
+}
+
+/*
+ * The found free extent [fbno, fend] overlaps part or all of the given busy
+ * extent. If the overlap covers the beginning, the end, or all of the busy
+ * extent, the overlapping portion can be made unbusy and used for the
+ * allocation. We can't split a busy extent because we can't modify a
+ * transaction/CIL context busy list, but we can update an entries block
+ * number or length.
+ *
+ * Returns true if the extent can safely be reused, or false if the search
+ * needs to be restarted.
+ */
+STATIC bool
+xfs_extent_busy_update_extent(
+ struct xfs_mount *mp,
+ struct xfs_perag *pag,
+ struct xfs_extent_busy *busyp,
+ xfs_agblock_t fbno,
+ xfs_extlen_t flen,
+ bool userdata)
+{
+ xfs_agblock_t fend = fbno + flen;
+ xfs_agblock_t bbno = busyp->bno;
+ xfs_agblock_t bend = bbno + busyp->length;
+
+ /*
+ * This extent is currently being discarded. Give the thread
+ * performing the discard a chance to mark the extent unbusy
+ * and retry.
+ */
+ if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
+ spin_unlock(&pag->pagb_lock);
+ delay(1);
+ spin_lock(&pag->pagb_lock);
+ return false;
+ }
+
+ /*
+ * If there is a busy extent overlapping a user allocation, we have
+ * no choice but to force the log and retry the search.
+ *
+ * Fortunately this does not happen during normal operation, but
+ * only if the filesystem is very low on space and has to dip into
+ * the AGFL for normal allocations.
+ */
+ if (userdata)
+ goto out_force_log;
+
+ if (bbno < fbno && bend > fend) {
+ /*
+ * Case 1:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +---------+
+ * fbno fend
+ */
+
+ /*
+ * We would have to split the busy extent to be able to track
+ * it correct, which we cannot do because we would have to
+ * modify the list of busy extents attached to the transaction
+ * or CIL context, which is immutable.
+ *
+ * Force out the log to clear the busy extent and retry the
+ * search.
+ */
+ goto out_force_log;
+ } else if (bbno >= fbno && bend <= fend) {
+ /*
+ * Case 2:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +-----------------+
+ * fbno fend
+ *
+ * Case 3:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +--------------------------+
+ * fbno fend
+ *
+ * Case 4:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +--------------------------+
+ * fbno fend
+ *
+ * Case 5:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +-----------------------------------+
+ * fbno fend
+ *
+ */
+
+ /*
+ * The busy extent is fully covered by the extent we are
+ * allocating, and can simply be removed from the rbtree.
+ * However we cannot remove it from the immutable list
+ * tracking busy extents in the transaction or CIL context,
+ * so set the length to zero to mark it invalid.
+ *
+ * We also need to restart the busy extent search from the
+ * tree root, because erasing the node can rearrange the
+ * tree topology.
+ */
+ rb_erase(&busyp->rb_node, &pag->pagb_tree);
+ busyp->length = 0;
+ return false;
+ } else if (fend < bend) {
+ /*
+ * Case 6:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +---------+
+ * fbno fend
+ *
+ * Case 7:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +------------------+
+ * fbno fend
+ *
+ */
+ busyp->bno = fend;
+ } else if (bbno < fbno) {
+ /*
+ * Case 8:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +-------------+
+ * fbno fend
+ *
+ * Case 9:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +----------------------+
+ * fbno fend
+ */
+ busyp->length = fbno - busyp->bno;
+ } else {
+ ASSERT(0);
+ }
+
+ trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
+ return true;
+
+out_force_log:
+ spin_unlock(&pag->pagb_lock);
+ xfs_log_force(mp, XFS_LOG_SYNC);
+ trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
+ spin_lock(&pag->pagb_lock);
+ return false;
+}
+
+
+/*
+ * For a given extent [fbno, flen], make sure we can reuse it safely.
+ */
+void
+xfs_extent_busy_reuse(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ xfs_agblock_t fbno,
+ xfs_extlen_t flen,
+ bool userdata)
+{
+ struct xfs_perag *pag;
+ struct rb_node *rbp;
+
+ ASSERT(flen > 0);
+
+ pag = xfs_perag_get(mp, agno);
+ spin_lock(&pag->pagb_lock);
+restart:
+ rbp = pag->pagb_tree.rb_node;
+ while (rbp) {
+ struct xfs_extent_busy *busyp =
+ rb_entry(rbp, struct xfs_extent_busy, rb_node);
+ xfs_agblock_t bbno = busyp->bno;
+ xfs_agblock_t bend = bbno + busyp->length;
+
+ if (fbno + flen <= bbno) {
+ rbp = rbp->rb_left;
+ continue;
+ } else if (fbno >= bend) {
+ rbp = rbp->rb_right;
+ continue;
+ }
+
+ if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
+ userdata))
+ goto restart;
+ }
+ spin_unlock(&pag->pagb_lock);
+ xfs_perag_put(pag);
+}
+
+/*
+ * For a given extent [fbno, flen], search the busy extent list to find a
+ * subset of the extent that is not busy. If *rlen is smaller than
+ * args->minlen no suitable extent could be found, and the higher level
+ * code needs to force out the log and retry the allocation.
+ */
+void
+xfs_extent_busy_trim(
+ struct xfs_alloc_arg *args,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ xfs_agblock_t *rbno,
+ xfs_extlen_t *rlen)
+{
+ xfs_agblock_t fbno;
+ xfs_extlen_t flen;
+ struct rb_node *rbp;
+
+ ASSERT(len > 0);
+
+ spin_lock(&args->pag->pagb_lock);
+restart:
+ fbno = bno;
+ flen = len;
+ rbp = args->pag->pagb_tree.rb_node;
+ while (rbp && flen >= args->minlen) {
+ struct xfs_extent_busy *busyp =
+ rb_entry(rbp, struct xfs_extent_busy, rb_node);
+ xfs_agblock_t fend = fbno + flen;
+ xfs_agblock_t bbno = busyp->bno;
+ xfs_agblock_t bend = bbno + busyp->length;
+
+ if (fend <= bbno) {
+ rbp = rbp->rb_left;
+ continue;
+ } else if (fbno >= bend) {
+ rbp = rbp->rb_right;
+ continue;
+ }
+
+ /*
+ * If this is a metadata allocation, try to reuse the busy
+ * extent instead of trimming the allocation.
+ */
+ if (!args->userdata &&
+ !(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
+ if (!xfs_extent_busy_update_extent(args->mp, args->pag,
+ busyp, fbno, flen,
+ false))
+ goto restart;
+ continue;
+ }
+
+ if (bbno <= fbno) {
+ /* start overlap */
+
+ /*
+ * Case 1:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +---------+
+ * fbno fend
+ *
+ * Case 2:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +-------------+
+ * fbno fend
+ *
+ * Case 3:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +-------------+
+ * fbno fend
+ *
+ * Case 4:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +-----------------+
+ * fbno fend
+ *
+ * No unbusy region in extent, return failure.
+ */
+ if (fend <= bend)
+ goto fail;
+
+ /*
+ * Case 5:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +----------------------+
+ * fbno fend
+ *
+ * Case 6:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +--------------------------+
+ * fbno fend
+ *
+ * Needs to be trimmed to:
+ * +-------+
+ * fbno fend
+ */
+ fbno = bend;
+ } else if (bend >= fend) {
+ /* end overlap */
+
+ /*
+ * Case 7:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +------------------+
+ * fbno fend
+ *
+ * Case 8:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +--------------------------+
+ * fbno fend
+ *
+ * Needs to be trimmed to:
+ * +-------+
+ * fbno fend
+ */
+ fend = bbno;
+ } else {
+ /* middle overlap */
+
+ /*
+ * Case 9:
+ * bbno bend
+ * +BBBBBBBBBBBBBBBBB+
+ * +-----------------------------------+
+ * fbno fend
+ *
+ * Can be trimmed to:
+ * +-------+ OR +-------+
+ * fbno fend fbno fend
+ *
+ * Backward allocation leads to significant
+ * fragmentation of directories, which degrades
+ * directory performance, therefore we always want to
+ * choose the option that produces forward allocation
+ * patterns.
+ * Preferring the lower bno extent will make the next
+ * request use "fend" as the start of the next
+ * allocation; if the segment is no longer busy at
+ * that point, we'll get a contiguous allocation, but
+ * even if it is still busy, we will get a forward
+ * allocation.
+ * We try to avoid choosing the segment at "bend",
+ * because that can lead to the next allocation
+ * taking the segment at "fbno", which would be a
+ * backward allocation. We only use the segment at
+ * "fbno" if it is much larger than the current
+ * requested size, because in that case there's a
+ * good chance subsequent allocations will be
+ * contiguous.
+ */
+ if (bbno - fbno >= args->maxlen) {
+ /* left candidate fits perfect */
+ fend = bbno;
+ } else if (fend - bend >= args->maxlen * 4) {
+ /* right candidate has enough free space */
+ fbno = bend;
+ } else if (bbno - fbno >= args->minlen) {
+ /* left candidate fits minimum requirement */
+ fend = bbno;
+ } else {
+ goto fail;
+ }
+ }
+
+ flen = fend - fbno;
+ }
+ spin_unlock(&args->pag->pagb_lock);
+
+ if (fbno != bno || flen != len) {
+ trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len,
+ fbno, flen);
+ }
+ *rbno = fbno;
+ *rlen = flen;
+ return;
+fail:
+ /*
+ * Return a zero extent length as failure indications. All callers
+ * re-check if the trimmed extent satisfies the minlen requirement.
+ */
+ spin_unlock(&args->pag->pagb_lock);
+ trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len, fbno, 0);
+ *rbno = fbno;
+ *rlen = 0;
+}
+
+STATIC void
+xfs_extent_busy_clear_one(
+ struct xfs_mount *mp,
+ struct xfs_perag *pag,
+ struct xfs_extent_busy *busyp)
+{
+ if (busyp->length) {
+ trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
+ busyp->length);
+ rb_erase(&busyp->rb_node, &pag->pagb_tree);
+ }
+
+ list_del_init(&busyp->list);
+ kmem_free(busyp);
+}
+
+/*
+ * Remove all extents on the passed in list from the busy extents tree.
+ * If do_discard is set skip extents that need to be discarded, and mark
+ * these as undergoing a discard operation instead.
+ */
+void
+xfs_extent_busy_clear(
+ struct xfs_mount *mp,
+ struct list_head *list,
+ bool do_discard)
+{
+ struct xfs_extent_busy *busyp, *n;
+ struct xfs_perag *pag = NULL;
+ xfs_agnumber_t agno = NULLAGNUMBER;
+
+ list_for_each_entry_safe(busyp, n, list, list) {
+ if (busyp->agno != agno) {
+ if (pag) {
+ spin_unlock(&pag->pagb_lock);
+ xfs_perag_put(pag);
+ }
+ pag = xfs_perag_get(mp, busyp->agno);
+ spin_lock(&pag->pagb_lock);
+ agno = busyp->agno;
+ }
+
+ if (do_discard && busyp->length &&
+ !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD))
+ busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
+ else
+ xfs_extent_busy_clear_one(mp, pag, busyp);
+ }
+
+ if (pag) {
+ spin_unlock(&pag->pagb_lock);
+ xfs_perag_put(pag);
+ }
+}
+
+/*
+ * Callback for list_sort to sort busy extents by the AG they reside in.
+ */
+int
+xfs_extent_busy_ag_cmp(
+ void *priv,
+ struct list_head *a,
+ struct list_head *b)
+{
+ return container_of(a, struct xfs_extent_busy, list)->agno -
+ container_of(b, struct xfs_extent_busy, list)->agno;
+}
diff --git a/fs/xfs/xfs_extent_busy.h b/fs/xfs/xfs_extent_busy.h
new file mode 100644
index 000000000000..985412d65ba5
--- /dev/null
+++ b/fs/xfs/xfs_extent_busy.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
+ * Copyright (c) 2010 David Chinner.
+ * Copyright (c) 2011 Christoph Hellwig.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef __XFS_EXTENT_BUSY_H__
+#define __XFS_EXTENT_BUSY_H__
+
+/*
+ * Busy block/extent entry. Indexed by a rbtree in perag to mark blocks that
+ * have been freed but whose transactions aren't committed to disk yet.
+ *
+ * Note that we use the transaction ID to record the transaction, not the
+ * transaction structure itself. See xfs_extent_busy_insert() for details.
+ */
+struct xfs_extent_busy {
+ struct rb_node rb_node; /* ag by-bno indexed search tree */
+ struct list_head list; /* transaction busy extent list */
+ xfs_agnumber_t agno;
+ xfs_agblock_t bno;
+ xfs_extlen_t length;
+ unsigned int flags;
+#define XFS_EXTENT_BUSY_DISCARDED 0x01 /* undergoing a discard op. */
+#define XFS_EXTENT_BUSY_SKIP_DISCARD 0x02 /* do not discard */
+};
+
+void
+xfs_extent_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
+ xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags);
+
+void
+xfs_extent_busy_clear(struct xfs_mount *mp, struct list_head *list,
+ bool do_discard);
+
+int
+xfs_extent_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
+ xfs_agblock_t bno, xfs_extlen_t len);
+
+void
+xfs_extent_busy_reuse(struct xfs_mount *mp, xfs_agnumber_t agno,
+ xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata);
+
+void
+xfs_extent_busy_trim(struct xfs_alloc_arg *args, xfs_agblock_t bno,
+ xfs_extlen_t len, xfs_agblock_t *rbno, xfs_extlen_t *rlen);
+
+int
+xfs_extent_busy_ag_cmp(void *priv, struct list_head *a, struct list_head *b);
+
+static inline void xfs_extent_busy_sort(struct list_head *list)
+{
+ list_sort(NULL, list, xfs_extent_busy_ag_cmp);
+}
+
+#endif /* __XFS_EXTENT_BUSY_H__ */
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 35c2aff38b20..feb36d7551ae 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -19,7 +19,6 @@
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_sb.h"
@@ -64,7 +63,8 @@ __xfs_efi_release(
if (!test_and_clear_bit(XFS_EFI_COMMITTED, &efip->efi_flags)) {
spin_lock(&ailp->xa_lock);
/* xfs_trans_ail_delete() drops the AIL lock. */
- xfs_trans_ail_delete(ailp, &efip->efi_item);
+ xfs_trans_ail_delete(ailp, &efip->efi_item,
+ SHUTDOWN_LOG_IO_ERROR);
xfs_efi_item_free(efip);
}
}
@@ -147,22 +147,20 @@ xfs_efi_item_unpin(
}
/*
- * Efi items have no locking or pushing. However, since EFIs are
- * pulled from the AIL when their corresponding EFDs are committed
- * to disk, their situation is very similar to being pinned. Return
- * XFS_ITEM_PINNED so that the caller will eventually flush the log.
- * This should help in getting the EFI out of the AIL.
+ * Efi items have no locking or pushing. However, since EFIs are pulled from
+ * the AIL when their corresponding EFDs are committed to disk, their situation
+ * is very similar to being pinned. Return XFS_ITEM_PINNED so that the caller
+ * will eventually flush the log. This should help in getting the EFI out of
+ * the AIL.
*/
STATIC uint
-xfs_efi_item_trylock(
- struct xfs_log_item *lip)
+xfs_efi_item_push(
+ struct xfs_log_item *lip,
+ struct list_head *buffer_list)
{
return XFS_ITEM_PINNED;
}
-/*
- * Efi items have no locking, so just return.
- */
STATIC void
xfs_efi_item_unlock(
struct xfs_log_item *lip)
@@ -190,17 +188,6 @@ xfs_efi_item_committed(
}
/*
- * There isn't much you can do to push on an efi item. It is simply
- * stuck waiting for all of its corresponding efd items to be
- * committed to disk.
- */
-STATIC void
-xfs_efi_item_push(
- struct xfs_log_item *lip)
-{
-}
-
-/*
* The EFI dependency tracking op doesn't do squat. It can't because
* it doesn't know where the free extent is coming from. The dependency
* tracking has to be handled by the "enclosing" metadata object. For
@@ -222,7 +209,6 @@ static const struct xfs_item_ops xfs_efi_item_ops = {
.iop_format = xfs_efi_item_format,
.iop_pin = xfs_efi_item_pin,
.iop_unpin = xfs_efi_item_unpin,
- .iop_trylock = xfs_efi_item_trylock,
.iop_unlock = xfs_efi_item_unlock,
.iop_committed = xfs_efi_item_committed,
.iop_push = xfs_efi_item_push,
@@ -404,19 +390,17 @@ xfs_efd_item_unpin(
}
/*
- * Efd items have no locking, so just return success.
+ * There isn't much you can do to push on an efd item. It is simply stuck
+ * waiting for the log to be flushed to disk.
*/
STATIC uint
-xfs_efd_item_trylock(
- struct xfs_log_item *lip)
+xfs_efd_item_push(
+ struct xfs_log_item *lip,
+ struct list_head *buffer_list)
{
- return XFS_ITEM_LOCKED;
+ return XFS_ITEM_PINNED;
}
-/*
- * Efd items have no locking or pushing, so return failure
- * so that the caller doesn't bother with us.
- */
STATIC void
xfs_efd_item_unlock(
struct xfs_log_item *lip)
@@ -451,16 +435,6 @@ xfs_efd_item_committed(
}
/*
- * There isn't much you can do to push on an efd item. It is simply
- * stuck waiting for the log to be flushed to disk.
- */
-STATIC void
-xfs_efd_item_push(
- struct xfs_log_item *lip)
-{
-}
-
-/*
* The EFD dependency tracking op doesn't do squat. It can't because
* it doesn't know where the free extent is coming from. The dependency
* tracking has to be handled by the "enclosing" metadata object. For
@@ -482,7 +456,6 @@ static const struct xfs_item_ops xfs_efd_item_ops = {
.iop_format = xfs_efd_item_format,
.iop_pin = xfs_efd_item_pin,
.iop_unpin = xfs_efd_item_unpin,
- .iop_trylock = xfs_efd_item_trylock,
.iop_unlock = xfs_efd_item_unlock,
.iop_committed = xfs_efd_item_committed,
.iop_push = xfs_efd_item_push,
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 54a67dd9ac0a..8d214b87f6bb 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -17,9 +17,7 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_trans.h"
@@ -396,114 +394,96 @@ xfs_file_splice_write(
}
/*
- * This routine is called to handle zeroing any space in the last
- * block of the file that is beyond the EOF. We do this since the
- * size is being increased without writing anything to that block
- * and we don't want anyone to read the garbage on the disk.
+ * This routine is called to handle zeroing any space in the last block of the
+ * file that is beyond the EOF. We do this since the size is being increased
+ * without writing anything to that block and we don't want to read the
+ * garbage on the disk.
*/
STATIC int /* error (positive) */
xfs_zero_last_block(
- xfs_inode_t *ip,
- xfs_fsize_t offset,
- xfs_fsize_t isize)
+ struct xfs_inode *ip,
+ xfs_fsize_t offset,
+ xfs_fsize_t isize)
{
- xfs_fileoff_t last_fsb;
- xfs_mount_t *mp = ip->i_mount;
- int nimaps;
- int zero_offset;
- int zero_len;
- int error = 0;
- xfs_bmbt_irec_t imap;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-
- zero_offset = XFS_B_FSB_OFFSET(mp, isize);
- if (zero_offset == 0) {
- /*
- * There are no extra bytes in the last block on disk to
- * zero, so return.
- */
- return 0;
- }
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize);
+ int zero_offset = XFS_B_FSB_OFFSET(mp, isize);
+ int zero_len;
+ int nimaps = 1;
+ int error = 0;
+ struct xfs_bmbt_irec imap;
- last_fsb = XFS_B_TO_FSBT(mp, isize);
- nimaps = 1;
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error)
return error;
+
ASSERT(nimaps > 0);
+
/*
* If the block underlying isize is just a hole, then there
* is nothing to zero.
*/
- if (imap.br_startblock == HOLESTARTBLOCK) {
+ if (imap.br_startblock == HOLESTARTBLOCK)
return 0;
- }
- /*
- * Zero the part of the last block beyond the EOF, and write it
- * out sync. We need to drop the ilock while we do this so we
- * don't deadlock when the buffer cache calls back to us.
- */
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
zero_len = mp->m_sb.sb_blocksize - zero_offset;
if (isize + zero_len > offset)
zero_len = offset - isize;
- error = xfs_iozero(ip, isize, zero_len);
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- ASSERT(error >= 0);
- return error;
+ return xfs_iozero(ip, isize, zero_len);
}
/*
- * Zero any on disk space between the current EOF and the new,
- * larger EOF. This handles the normal case of zeroing the remainder
- * of the last block in the file and the unusual case of zeroing blocks
- * out beyond the size of the file. This second case only happens
- * with fixed size extents and when the system crashes before the inode
- * size was updated but after blocks were allocated. If fill is set,
- * then any holes in the range are filled and zeroed. If not, the holes
- * are left alone as holes.
+ * Zero any on disk space between the current EOF and the new, larger EOF.
+ *
+ * This handles the normal case of zeroing the remainder of the last block in
+ * the file and the unusual case of zeroing blocks out beyond the size of the
+ * file. This second case only happens with fixed size extents and when the
+ * system crashes before the inode size was updated but after blocks were
+ * allocated.
+ *
+ * Expects the iolock to be held exclusive, and will take the ilock internally.
*/
-
int /* error (positive) */
xfs_zero_eof(
- xfs_inode_t *ip,
- xfs_off_t offset, /* starting I/O offset */
- xfs_fsize_t isize) /* current inode size */
+ struct xfs_inode *ip,
+ xfs_off_t offset, /* starting I/O offset */
+ xfs_fsize_t isize) /* current inode size */
{
- xfs_mount_t *mp = ip->i_mount;
- xfs_fileoff_t start_zero_fsb;
- xfs_fileoff_t end_zero_fsb;
- xfs_fileoff_t zero_count_fsb;
- xfs_fileoff_t last_fsb;
- xfs_fileoff_t zero_off;
- xfs_fsize_t zero_len;
- int nimaps;
- int error = 0;
- xfs_bmbt_irec_t imap;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t start_zero_fsb;
+ xfs_fileoff_t end_zero_fsb;
+ xfs_fileoff_t zero_count_fsb;
+ xfs_fileoff_t last_fsb;
+ xfs_fileoff_t zero_off;
+ xfs_fsize_t zero_len;
+ int nimaps;
+ int error = 0;
+ struct xfs_bmbt_irec imap;
+
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT(offset > isize);
/*
* First handle zeroing the block on which isize resides.
+ *
* We only zero a part of that block so it is handled specially.
*/
- error = xfs_zero_last_block(ip, offset, isize);
- if (error) {
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
- return error;
+ if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
+ error = xfs_zero_last_block(ip, offset, isize);
+ if (error)
+ return error;
}
/*
- * Calculate the range between the new size and the old
- * where blocks needing to be zeroed may exist. To get the
- * block where the last byte in the file currently resides,
- * we need to subtract one from the size and truncate back
- * to a block boundary. We subtract 1 in case the size is
- * exactly on a block boundary.
+ * Calculate the range between the new size and the old where blocks
+ * needing to be zeroed may exist.
+ *
+ * To get the block where the last byte in the file currently resides,
+ * we need to subtract one from the size and truncate back to a block
+ * boundary. We subtract 1 in case the size is exactly on a block
+ * boundary.
*/
last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
@@ -521,23 +501,18 @@ xfs_zero_eof(
while (start_zero_fsb <= end_zero_fsb) {
nimaps = 1;
zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb,
&imap, &nimaps, 0);
- if (error) {
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ if (error)
return error;
- }
+
ASSERT(nimaps > 0);
if (imap.br_state == XFS_EXT_UNWRITTEN ||
imap.br_startblock == HOLESTARTBLOCK) {
- /*
- * This loop handles initializing pages that were
- * partially initialized by the code below this
- * loop. It basically zeroes the part of the page
- * that sits on a hole and sets the page as P_HOLE
- * and calls remapf if it is a mapped file.
- */
start_zero_fsb = imap.br_startoff + imap.br_blockcount;
ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
continue;
@@ -545,11 +520,7 @@ xfs_zero_eof(
/*
* There are blocks we need to zero.
- * Drop the inode lock while we're doing the I/O.
- * We'll still have the iolock to protect us.
*/
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
@@ -557,22 +528,14 @@ xfs_zero_eof(
zero_len = offset - zero_off;
error = xfs_iozero(ip, zero_off, zero_len);
- if (error) {
- goto out_lock;
- }
+ if (error)
+ return error;
start_zero_fsb = imap.br_startoff + imap.br_blockcount;
ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
}
return 0;
-
-out_lock:
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- ASSERT(error >= 0);
- return error;
}
/*
@@ -593,35 +556,29 @@ xfs_file_aio_write_checks(
struct xfs_inode *ip = XFS_I(inode);
int error = 0;
- xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
restart:
error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
- if (error) {
- xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
+ if (error)
return error;
- }
/*
* If the offset is beyond the size of the file, we need to zero any
* blocks that fall between the existing EOF and the start of this
* write. If zeroing is needed and we are currently holding the
- * iolock shared, we need to update it to exclusive which involves
- * dropping all locks and relocking to maintain correct locking order.
- * If we do this, restart the function to ensure all checks and values
- * are still valid.
+ * iolock shared, we need to update it to exclusive which implies
+ * having to redo all checks before.
*/
if (*pos > i_size_read(inode)) {
if (*iolock == XFS_IOLOCK_SHARED) {
- xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
+ xfs_rw_iunlock(ip, *iolock);
*iolock = XFS_IOLOCK_EXCL;
- xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
+ xfs_rw_ilock(ip, *iolock);
goto restart;
}
error = -xfs_zero_eof(ip, *pos, i_size_read(inode));
+ if (error)
+ return error;
}
- xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
- if (error)
- return error;
/*
* Updating the timestamps will grab the ilock again from
@@ -638,7 +595,6 @@ restart:
* people from modifying setuid and setgid binaries.
*/
return file_remove_suid(file);
-
}
/*
@@ -1007,8 +963,149 @@ xfs_vm_page_mkwrite(
return block_page_mkwrite(vma, vmf, xfs_get_blocks);
}
+STATIC loff_t
+xfs_seek_data(
+ struct file *file,
+ loff_t start,
+ u32 type)
+{
+ struct inode *inode = file->f_mapping->host;
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_bmbt_irec map[2];
+ int nmap = 2;
+ loff_t uninitialized_var(offset);
+ xfs_fsize_t isize;
+ xfs_fileoff_t fsbno;
+ xfs_filblks_t end;
+ uint lock;
+ int error;
+
+ lock = xfs_ilock_map_shared(ip);
+
+ isize = i_size_read(inode);
+ if (start >= isize) {
+ error = ENXIO;
+ goto out_unlock;
+ }
+
+ fsbno = XFS_B_TO_FSBT(mp, start);
+
+ /*
+ * Try to read extents from the first block indicated
+ * by fsbno to the end block of the file.
+ */
+ end = XFS_B_TO_FSB(mp, isize);
+
+ error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
+ XFS_BMAPI_ENTIRE);
+ if (error)
+ goto out_unlock;
+
+ /*
+ * Treat unwritten extent as data extent since it might
+ * contains dirty data in page cache.
+ */
+ if (map[0].br_startblock != HOLESTARTBLOCK) {
+ offset = max_t(loff_t, start,
+ XFS_FSB_TO_B(mp, map[0].br_startoff));
+ } else {
+ if (nmap == 1) {
+ error = ENXIO;
+ goto out_unlock;
+ }
+
+ offset = max_t(loff_t, start,
+ XFS_FSB_TO_B(mp, map[1].br_startoff));
+ }
+
+ if (offset != file->f_pos)
+ file->f_pos = offset;
+
+out_unlock:
+ xfs_iunlock_map_shared(ip, lock);
+
+ if (error)
+ return -error;
+ return offset;
+}
+
+STATIC loff_t
+xfs_seek_hole(
+ struct file *file,
+ loff_t start,
+ u32 type)
+{
+ struct inode *inode = file->f_mapping->host;
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ loff_t uninitialized_var(offset);
+ loff_t holeoff;
+ xfs_fsize_t isize;
+ xfs_fileoff_t fsbno;
+ uint lock;
+ int error;
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -XFS_ERROR(EIO);
+
+ lock = xfs_ilock_map_shared(ip);
+
+ isize = i_size_read(inode);
+ if (start >= isize) {
+ error = ENXIO;
+ goto out_unlock;
+ }
+
+ fsbno = XFS_B_TO_FSBT(mp, start);
+ error = xfs_bmap_first_unused(NULL, ip, 1, &fsbno, XFS_DATA_FORK);
+ if (error)
+ goto out_unlock;
+
+ holeoff = XFS_FSB_TO_B(mp, fsbno);
+ if (holeoff <= start)
+ offset = start;
+ else {
+ /*
+ * xfs_bmap_first_unused() could return a value bigger than
+ * isize if there are no more holes past the supplied offset.
+ */
+ offset = min_t(loff_t, holeoff, isize);
+ }
+
+ if (offset != file->f_pos)
+ file->f_pos = offset;
+
+out_unlock:
+ xfs_iunlock_map_shared(ip, lock);
+
+ if (error)
+ return -error;
+ return offset;
+}
+
+STATIC loff_t
+xfs_file_llseek(
+ struct file *file,
+ loff_t offset,
+ int origin)
+{
+ switch (origin) {
+ case SEEK_END:
+ case SEEK_CUR:
+ case SEEK_SET:
+ return generic_file_llseek(file, offset, origin);
+ case SEEK_DATA:
+ return xfs_seek_data(file, offset, origin);
+ case SEEK_HOLE:
+ return xfs_seek_hole(file, offset, origin);
+ default:
+ return -EINVAL;
+ }
+}
+
const struct file_operations xfs_file_operations = {
- .llseek = generic_file_llseek,
+ .llseek = xfs_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.aio_read = xfs_file_aio_read,
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 1c6fdeb702ff..c25b094efbf7 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -18,8 +18,6 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
-#include "xfs_bit.h"
-#include "xfs_inum.h"
#include "xfs_log.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
@@ -39,7 +37,6 @@
#include "xfs_itable.h"
#include "xfs_trans_space.h"
#include "xfs_rtalloc.h"
-#include "xfs_rw.h"
#include "xfs_filestream.h"
#include "xfs_trace.h"
@@ -147,9 +144,9 @@ xfs_growfs_data_private(
if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
return error;
dpct = pct - mp->m_sb.sb_imax_pct;
- bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
+ bp = xfs_buf_read_uncached(mp->m_ddev_targp,
XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
- BBTOB(XFS_FSS_TO_BB(mp, 1)), 0);
+ XFS_FSS_TO_BB(mp, 1), 0);
if (!bp)
return EIO;
xfs_buf_relse(bp);
@@ -193,7 +190,7 @@ xfs_growfs_data_private(
*/
bp = xfs_buf_get(mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
- XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED);
+ XFS_FSS_TO_BB(mp, 1), 0);
if (!bp) {
error = ENOMEM;
goto error0;
@@ -230,7 +227,7 @@ xfs_growfs_data_private(
*/
bp = xfs_buf_get(mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
- XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED);
+ XFS_FSS_TO_BB(mp, 1), 0);
if (!bp) {
error = ENOMEM;
goto error0;
@@ -259,8 +256,7 @@ xfs_growfs_data_private(
*/
bp = xfs_buf_get(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
- BTOBB(mp->m_sb.sb_blocksize),
- XBF_LOCK | XBF_MAPPED);
+ BTOBB(mp->m_sb.sb_blocksize), 0);
if (!bp) {
error = ENOMEM;
goto error0;
@@ -286,8 +282,7 @@ xfs_growfs_data_private(
*/
bp = xfs_buf_get(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
- BTOBB(mp->m_sb.sb_blocksize),
- XBF_LOCK | XBF_MAPPED);
+ BTOBB(mp->m_sb.sb_blocksize), 0);
if (!bp) {
error = ENOMEM;
goto error0;
@@ -314,8 +309,7 @@ xfs_growfs_data_private(
*/
bp = xfs_buf_get(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
- BTOBB(mp->m_sb.sb_blocksize),
- XBF_LOCK | XBF_MAPPED);
+ BTOBB(mp->m_sb.sb_blocksize), 0);
if (!bp) {
error = ENOMEM;
goto error0;
@@ -405,7 +399,7 @@ xfs_growfs_data_private(
/* update secondary superblocks. */
for (agno = 1; agno < nagcount; agno++) {
- error = xfs_read_buf(mp, mp->m_ddev_targp,
+ error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
XFS_FSS_TO_BB(mp, 1), 0, &bp);
if (error) {
@@ -693,3 +687,63 @@ xfs_fs_goingdown(
return 0;
}
+
+/*
+ * Force a shutdown of the filesystem instantly while keeping the filesystem
+ * consistent. We don't do an unmount here; just shutdown the shop, make sure
+ * that absolutely nothing persistent happens to this filesystem after this
+ * point.
+ */
+void
+xfs_do_force_shutdown(
+ xfs_mount_t *mp,
+ int flags,
+ char *fname,
+ int lnnum)
+{
+ int logerror;
+
+ logerror = flags & SHUTDOWN_LOG_IO_ERROR;
+
+ if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
+ xfs_notice(mp,
+ "%s(0x%x) called from line %d of file %s. Return address = 0x%p",
+ __func__, flags, lnnum, fname, __return_address);
+ }
+ /*
+ * No need to duplicate efforts.
+ */
+ if (XFS_FORCED_SHUTDOWN(mp) && !logerror)
+ return;
+
+ /*
+ * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
+ * queue up anybody new on the log reservations, and wakes up
+ * everybody who's sleeping on log reservations to tell them
+ * the bad news.
+ */
+ if (xfs_log_force_umount(mp, logerror))
+ return;
+
+ if (flags & SHUTDOWN_CORRUPT_INCORE) {
+ xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
+ "Corruption of in-memory data detected. Shutting down filesystem");
+ if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
+ xfs_stack_trace();
+ } else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
+ if (logerror) {
+ xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
+ "Log I/O Error Detected. Shutting down filesystem");
+ } else if (flags & SHUTDOWN_DEVICE_REQ) {
+ xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
+ "All device paths lost. Shutting down filesystem");
+ } else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
+ xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
+ "I/O Error Detected. Shutting down filesystem");
+ }
+ }
+ if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
+ xfs_alert(mp,
+ "Please umount the filesystem and rectify the problem(s)");
+ }
+}
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index dad1a31aa4fc..177a21a7ac49 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -200,8 +200,7 @@ xfs_ialloc_inode_init(
*/
d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster));
fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
- mp->m_bsize * blks_per_cluster,
- XBF_LOCK);
+ mp->m_bsize * blks_per_cluster, 0);
if (!fbuf)
return ENOMEM;
/*
@@ -610,6 +609,13 @@ xfs_ialloc_get_rec(
/*
* Visible inode allocation functions.
*/
+/*
+ * Find a free (set) bit in the inode bitmask.
+ */
+static inline int xfs_ialloc_find_free(xfs_inofree_t *fp)
+{
+ return xfs_lowbit64(*fp);
+}
/*
* Allocate an inode on disk.
diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h
index 666a037398d6..65ac57c8063c 100644
--- a/fs/xfs/xfs_ialloc.h
+++ b/fs/xfs/xfs_ialloc.h
@@ -47,15 +47,6 @@ xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o)
}
/*
- * Find a free (set) bit in the inode bitmask.
- */
-static inline int xfs_ialloc_find_free(xfs_inofree_t *fp)
-{
- return xfs_lowbit64(*fp);
-}
-
-
-/*
* Allocate an inode on disk.
* Mode is used to tell whether the new inode will need space, and whether
* it is a directory.
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index c6a75815aea0..2b8b7a37aa18 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -20,7 +20,6 @@
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index bcc6c249b2c7..1bb4365e8c25 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -19,7 +19,6 @@
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_acl.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
@@ -123,23 +122,7 @@ xfs_inode_free(
xfs_idestroy_fork(ip, XFS_ATTR_FORK);
if (ip->i_itemp) {
- /*
- * Only if we are shutting down the fs will we see an
- * inode still in the AIL. If it is there, we should remove
- * it to prevent a use-after-free from occurring.
- */
- xfs_log_item_t *lip = &ip->i_itemp->ili_item;
- struct xfs_ail *ailp = lip->li_ailp;
-
- ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
- XFS_FORCED_SHUTDOWN(ip->i_mount));
- if (lip->li_flags & XFS_LI_IN_AIL) {
- spin_lock(&ailp->xa_lock);
- if (lip->li_flags & XFS_LI_IN_AIL)
- xfs_trans_ail_delete(ailp, lip);
- else
- spin_unlock(&ailp->xa_lock);
- }
+ ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
xfs_inode_item_destroy(ip);
ip->i_itemp = NULL;
}
@@ -334,9 +317,10 @@ xfs_iget_cache_miss(
/*
* Preload the radix tree so we can insert safely under the
* write spinlock. Note that we cannot sleep inside the preload
- * region.
+ * region. Since we can be called from transaction context, don't
+ * recurse into the file system.
*/
- if (radix_tree_preload(GFP_KERNEL)) {
+ if (radix_tree_preload(GFP_NOFS)) {
error = EAGAIN;
goto out_destroy;
}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index bc46c0a133d3..a59eea09930a 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -20,7 +20,6 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
@@ -61,6 +60,20 @@ STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
+/*
+ * helper function to extract extent size hint from inode
+ */
+xfs_extlen_t
+xfs_get_extsz_hint(
+ struct xfs_inode *ip)
+{
+ if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
+ return ip->i_d.di_extsize;
+ if (XFS_IS_REALTIME_INODE(ip))
+ return ip->i_mount->m_sb.sb_rextsize;
+ return 0;
+}
+
#ifdef DEBUG
/*
* Make sure that the extents in the given memory buffer
@@ -137,6 +150,7 @@ xfs_imap_to_bp(
int ni;
xfs_buf_t *bp;
+ buf_flags |= XBF_UNMAPPED;
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
(int)imap->im_len, buf_flags, &bp);
if (error) {
@@ -226,7 +240,7 @@ xfs_inotobp(
if (error)
return error;
- error = xfs_imap_to_bp(mp, tp, &imap, &bp, XBF_LOCK, imap_flags);
+ error = xfs_imap_to_bp(mp, tp, &imap, &bp, 0, imap_flags);
if (error)
return error;
@@ -782,8 +796,7 @@ xfs_iread(
/*
* Get pointers to the on-disk inode and the buffer containing it.
*/
- error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp,
- XBF_LOCK, iget_flags);
+ error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, 0, iget_flags);
if (error)
return error;
dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
@@ -1342,7 +1355,7 @@ xfs_iunlink(
* Here we put the head pointer into our next pointer,
* and then we fall through to point the head at us.
*/
- error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
+ error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
if (error)
return error;
@@ -1423,7 +1436,7 @@ xfs_iunlink_remove(
* of dealing with the buffer when there is no need to
* change it.
*/
- error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
+ error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
if (error) {
xfs_warn(mp, "%s: xfs_itobp() returned error %d.",
__func__, error);
@@ -1484,7 +1497,7 @@ xfs_iunlink_remove(
* Now last_ibp points to the buffer previous to us on
* the unlinked list. Pull us from the list.
*/
- error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
+ error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
if (error) {
xfs_warn(mp, "%s: xfs_itobp(2) returned error %d.",
__func__, error);
@@ -1566,8 +1579,7 @@ xfs_ifree_cluster(
* to mark all the active inodes on the buffer stale.
*/
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
- mp->m_bsize * blks_per_cluster,
- XBF_LOCK);
+ mp->m_bsize * blks_per_cluster, 0);
if (!bp)
return ENOMEM;
@@ -1737,7 +1749,7 @@ xfs_ifree(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XBF_LOCK);
+ error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0);
if (error)
return error;
@@ -2347,11 +2359,11 @@ cluster_corrupt_out:
*/
rcu_read_unlock();
/*
- * Clean up the buffer. If it was B_DELWRI, just release it --
+ * Clean up the buffer. If it was delwri, just release it --
* brelse can handle it with no problems. If not, shut down the
* filesystem before releasing the buffer.
*/
- bufwasdelwri = XFS_BUF_ISDELAYWRITE(bp);
+ bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
if (bufwasdelwri)
xfs_buf_relse(bp);
@@ -2377,30 +2389,29 @@ cluster_corrupt_out:
/*
* Unlocks the flush lock
*/
- xfs_iflush_abort(iq);
+ xfs_iflush_abort(iq, false);
kmem_free(ilist);
xfs_perag_put(pag);
return XFS_ERROR(EFSCORRUPTED);
}
/*
- * xfs_iflush() will write a modified inode's changes out to the
- * inode's on disk home. The caller must have the inode lock held
- * in at least shared mode and the inode flush completion must be
- * active as well. The inode lock will still be held upon return from
- * the call and the caller is free to unlock it.
- * The inode flush will be completed when the inode reaches the disk.
- * The flags indicate how the inode's buffer should be written out.
+ * Flush dirty inode metadata into the backing buffer.
+ *
+ * The caller must have the inode lock and the inode flush lock held. The
+ * inode lock will still be held upon return to the caller, and the inode
+ * flush lock will be released after the inode has reached the disk.
+ *
+ * The caller must write out the buffer returned in *bpp and release it.
*/
int
xfs_iflush(
- xfs_inode_t *ip,
- uint flags)
+ struct xfs_inode *ip,
+ struct xfs_buf **bpp)
{
- xfs_inode_log_item_t *iip;
- xfs_buf_t *bp;
- xfs_dinode_t *dip;
- xfs_mount_t *mp;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_buf *bp;
+ struct xfs_dinode *dip;
int error;
XFS_STATS_INC(xs_iflush_count);
@@ -2410,25 +2421,8 @@ xfs_iflush(
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
- iip = ip->i_itemp;
- mp = ip->i_mount;
+ *bpp = NULL;
- /*
- * We can't flush the inode until it is unpinned, so wait for it if we
- * are allowed to block. We know no one new can pin it, because we are
- * holding the inode lock shared and you need to hold it exclusively to
- * pin the inode.
- *
- * If we are not allowed to block, force the log out asynchronously so
- * that when we come back the inode will be unpinned. If other inodes
- * in the same cluster are dirty, they will probably write the inode
- * out for us if they occur after the log force completes.
- */
- if (!(flags & SYNC_WAIT) && xfs_ipincount(ip)) {
- xfs_iunpin(ip);
- xfs_ifunlock(ip);
- return EAGAIN;
- }
xfs_iunpin_wait(ip);
/*
@@ -2447,20 +2441,20 @@ xfs_iflush(
/*
* This may have been unpinned because the filesystem is shutting
* down forcibly. If that's the case we must not write this inode
- * to disk, because the log record didn't make it to disk!
+ * to disk, because the log record didn't make it to disk.
+ *
+ * We also have to remove the log item from the AIL in this case,
+ * as we wait for an empty AIL as part of the unmount process.
*/
if (XFS_FORCED_SHUTDOWN(mp)) {
- if (iip)
- iip->ili_fields = 0;
- xfs_ifunlock(ip);
- return XFS_ERROR(EIO);
+ error = XFS_ERROR(EIO);
+ goto abort_out;
}
/*
* Get the buffer containing the on-disk inode.
*/
- error = xfs_itobp(mp, NULL, ip, &dip, &bp,
- (flags & SYNC_TRYLOCK) ? XBF_TRYLOCK : XBF_LOCK);
+ error = xfs_itobp(mp, NULL, ip, &dip, &bp, XBF_TRYLOCK);
if (error || !bp) {
xfs_ifunlock(ip);
return error;
@@ -2488,23 +2482,20 @@ xfs_iflush(
if (error)
goto cluster_corrupt_out;
- if (flags & SYNC_WAIT)
- error = xfs_bwrite(bp);
- else
- xfs_buf_delwri_queue(bp);
-
- xfs_buf_relse(bp);
- return error;
+ *bpp = bp;
+ return 0;
corrupt_out:
xfs_buf_relse(bp);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
cluster_corrupt_out:
+ error = XFS_ERROR(EFSCORRUPTED);
+abort_out:
/*
* Unlocks the flush lock
*/
- xfs_iflush_abort(ip);
- return XFS_ERROR(EFSCORRUPTED);
+ xfs_iflush_abort(ip, false);
+ return error;
}
@@ -2706,27 +2697,6 @@ corrupt_out:
return XFS_ERROR(EFSCORRUPTED);
}
-void
-xfs_promote_inode(
- struct xfs_inode *ip)
-{
- struct xfs_buf *bp;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
-
- bp = xfs_incore(ip->i_mount->m_ddev_targp, ip->i_imap.im_blkno,
- ip->i_imap.im_len, XBF_TRYLOCK);
- if (!bp)
- return;
-
- if (XFS_BUF_ISDELAYWRITE(bp)) {
- xfs_buf_delwri_promote(bp);
- wake_up_process(ip->i_mount->m_ddev_targp->bt_task);
- }
-
- xfs_buf_relse(bp);
-}
-
/*
* Return a pointer to the extent record at file index idx.
*/
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 7fee3387e1c8..1efff36a75b6 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -529,11 +529,12 @@ int xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
void xfs_iext_realloc(xfs_inode_t *, int, int);
void xfs_iunpin_wait(xfs_inode_t *);
-int xfs_iflush(xfs_inode_t *, uint);
-void xfs_promote_inode(struct xfs_inode *);
+int xfs_iflush(struct xfs_inode *, struct xfs_buf **);
void xfs_lock_inodes(xfs_inode_t **, int, uint);
void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
+xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
+
#define IHOLD(ip) \
do { \
ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 05d924efceaf..6cdbf90c6f7b 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -18,9 +18,7 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -480,25 +478,16 @@ xfs_inode_item_unpin(
wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT);
}
-/*
- * This is called to attempt to lock the inode associated with this
- * inode log item, in preparation for the push routine which does the actual
- * iflush. Don't sleep on the inode lock or the flush lock.
- *
- * If the flush lock is already held, indicating that the inode has
- * been or is in the process of being flushed, then (ideally) we'd like to
- * see if the inode's buffer is still incore, and if so give it a nudge.
- * We delay doing so until the pushbuf routine, though, to avoid holding
- * the AIL lock across a call to the blackhole which is the buffer cache.
- * Also we don't want to sleep in any device strategy routines, which can happen
- * if we do the subsequent bawrite in here.
- */
STATIC uint
-xfs_inode_item_trylock(
- struct xfs_log_item *lip)
+xfs_inode_item_push(
+ struct xfs_log_item *lip,
+ struct list_head *buffer_list)
{
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode;
+ struct xfs_buf *bp = NULL;
+ uint rval = XFS_ITEM_SUCCESS;
+ int error;
if (xfs_ipincount(ip) > 0)
return XFS_ITEM_PINNED;
@@ -506,30 +495,50 @@ xfs_inode_item_trylock(
if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
return XFS_ITEM_LOCKED;
+ /*
+ * Re-check the pincount now that we stabilized the value by
+ * taking the ilock.
+ */
+ if (xfs_ipincount(ip) > 0) {
+ rval = XFS_ITEM_PINNED;
+ goto out_unlock;
+ }
+
+ /*
+ * Someone else is already flushing the inode. Nothing we can do
+ * here but wait for the flush to finish and remove the item from
+ * the AIL.
+ */
if (!xfs_iflock_nowait(ip)) {
- /*
- * inode has already been flushed to the backing buffer,
- * leave it locked in shared mode, pushbuf routine will
- * unlock it.
- */
- return XFS_ITEM_PUSHBUF;
+ rval = XFS_ITEM_FLUSHING;
+ goto out_unlock;
}
- /* Stale items should force out the iclog */
+ /*
+ * Stale inode items should force out the iclog.
+ */
if (ip->i_flags & XFS_ISTALE) {
xfs_ifunlock(ip);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return XFS_ITEM_PINNED;
}
-#ifdef DEBUG
- if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- ASSERT(iip->ili_fields != 0);
- ASSERT(iip->ili_logged == 0);
- ASSERT(lip->li_flags & XFS_LI_IN_AIL);
+ ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
+ ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
+
+ spin_unlock(&lip->li_ailp->xa_lock);
+
+ error = xfs_iflush(ip, &bp);
+ if (!error) {
+ if (!xfs_buf_delwri_queue(bp, buffer_list))
+ rval = XFS_ITEM_FLUSHING;
+ xfs_buf_relse(bp);
}
-#endif
- return XFS_ITEM_SUCCESS;
+
+ spin_lock(&lip->li_ailp->xa_lock);
+out_unlock:
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ return rval;
}
/*
@@ -614,86 +623,6 @@ xfs_inode_item_committed(
}
/*
- * This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK
- * failed to get the inode flush lock but did get the inode locked SHARED.
- * Here we're trying to see if the inode buffer is incore, and if so whether it's
- * marked delayed write. If that's the case, we'll promote it and that will
- * allow the caller to write the buffer by triggering the xfsbufd to run.
- */
-STATIC bool
-xfs_inode_item_pushbuf(
- struct xfs_log_item *lip)
-{
- struct xfs_inode_log_item *iip = INODE_ITEM(lip);
- struct xfs_inode *ip = iip->ili_inode;
- struct xfs_buf *bp;
- bool ret = true;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
-
- /*
- * If a flush is not in progress anymore, chances are that the
- * inode was taken off the AIL. So, just get out.
- */
- if (!xfs_isiflocked(ip) ||
- !(lip->li_flags & XFS_LI_IN_AIL)) {
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
- return true;
- }
-
- bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
- iip->ili_format.ilf_len, XBF_TRYLOCK);
-
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
- if (!bp)
- return true;
- if (XFS_BUF_ISDELAYWRITE(bp))
- xfs_buf_delwri_promote(bp);
- if (xfs_buf_ispinned(bp))
- ret = false;
- xfs_buf_relse(bp);
- return ret;
-}
-
-/*
- * This is called to asynchronously write the inode associated with this
- * inode log item out to disk. The inode will already have been locked by
- * a successful call to xfs_inode_item_trylock().
- */
-STATIC void
-xfs_inode_item_push(
- struct xfs_log_item *lip)
-{
- struct xfs_inode_log_item *iip = INODE_ITEM(lip);
- struct xfs_inode *ip = iip->ili_inode;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
- ASSERT(xfs_isiflocked(ip));
-
- /*
- * Since we were able to lock the inode's flush lock and
- * we found it on the AIL, the inode must be dirty. This
- * is because the inode is removed from the AIL while still
- * holding the flush lock in xfs_iflush_done(). Thus, if
- * we found it in the AIL and were able to obtain the flush
- * lock without sleeping, then there must not have been
- * anyone in the process of flushing the inode.
- */
- ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || iip->ili_fields != 0);
-
- /*
- * Push the inode to it's backing buffer. This will not remove the
- * inode from the AIL - a further push will be required to trigger a
- * buffer push. However, this allows all the dirty inodes to be pushed
- * to the buffer before it is pushed to disk. The buffer IO completion
- * will pull the inode from the AIL, mark it clean and unlock the flush
- * lock.
- */
- (void) xfs_iflush(ip, SYNC_TRYLOCK);
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
-}
-
-/*
* XXX rcc - this one really has to do something. Probably needs
* to stamp in a new field in the incore inode.
*/
@@ -713,11 +642,9 @@ static const struct xfs_item_ops xfs_inode_item_ops = {
.iop_format = xfs_inode_item_format,
.iop_pin = xfs_inode_item_pin,
.iop_unpin = xfs_inode_item_unpin,
- .iop_trylock = xfs_inode_item_trylock,
.iop_unlock = xfs_inode_item_unlock,
.iop_committed = xfs_inode_item_committed,
.iop_push = xfs_inode_item_push,
- .iop_pushbuf = xfs_inode_item_pushbuf,
.iop_committing = xfs_inode_item_committing
};
@@ -848,7 +775,8 @@ xfs_iflush_done(
ASSERT(i <= need_ail);
}
/* xfs_trans_ail_delete_bulk() drops the AIL lock. */
- xfs_trans_ail_delete_bulk(ailp, log_items, i);
+ xfs_trans_ail_delete_bulk(ailp, log_items, i,
+ SHUTDOWN_CORRUPT_INCORE);
}
@@ -869,16 +797,15 @@ xfs_iflush_done(
}
/*
- * This is the inode flushing abort routine. It is called
- * from xfs_iflush when the filesystem is shutting down to clean
- * up the inode state.
- * It is responsible for removing the inode item
- * from the AIL if it has not been re-logged, and unlocking the inode's
- * flush lock.
+ * This is the inode flushing abort routine. It is called from xfs_iflush when
+ * the filesystem is shutting down to clean up the inode state. It is
+ * responsible for removing the inode item from the AIL if it has not been
+ * re-logged, and unlocking the inode's flush lock.
*/
void
xfs_iflush_abort(
- xfs_inode_t *ip)
+ xfs_inode_t *ip,
+ bool stale)
{
xfs_inode_log_item_t *iip = ip->i_itemp;
@@ -888,7 +815,10 @@ xfs_iflush_abort(
spin_lock(&ailp->xa_lock);
if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
/* xfs_trans_ail_delete() drops the AIL lock. */
- xfs_trans_ail_delete(ailp, (xfs_log_item_t *)iip);
+ xfs_trans_ail_delete(ailp, &iip->ili_item,
+ stale ?
+ SHUTDOWN_LOG_IO_ERROR :
+ SHUTDOWN_CORRUPT_INCORE);
} else
spin_unlock(&ailp->xa_lock);
}
@@ -915,7 +845,7 @@ xfs_istale_done(
struct xfs_buf *bp,
struct xfs_log_item *lip)
{
- xfs_iflush_abort(INODE_ITEM(lip)->ili_inode);
+ xfs_iflush_abort(INODE_ITEM(lip)->ili_inode, true);
}
/*
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index 41d61c3b7a36..376d4d0b2635 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -165,7 +165,7 @@ extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *);
extern void xfs_inode_item_destroy(struct xfs_inode *);
extern void xfs_iflush_done(struct xfs_buf *, struct xfs_log_item *);
extern void xfs_istale_done(struct xfs_buf *, struct xfs_log_item *);
-extern void xfs_iflush_abort(struct xfs_inode *);
+extern void xfs_iflush_abort(struct xfs_inode *, bool);
extern int xfs_inode_item_format_convert(xfs_log_iovec_t *,
xfs_inode_log_format_t *);
diff --git a/fs/xfs/xfs_inum.h b/fs/xfs/xfs_inum.h
index b253c0ea5bec..90efdaf1706f 100644
--- a/fs/xfs/xfs_inum.h
+++ b/fs/xfs/xfs_inum.h
@@ -26,11 +26,6 @@
* high agno_log-agblklog-inopblog bits - 0
*/
-typedef __uint32_t xfs_agino_t; /* within allocation grp inode number */
-
-#define NULLFSINO ((xfs_ino_t)-1)
-#define NULLAGINO ((xfs_agino_t)-1)
-
struct xfs_mount;
#define XFS_INO_MASK(k) (__uint32_t)((1ULL << (k)) - 1)
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 91f8ff547ab3..3a05a41b5d76 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -17,9 +17,7 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index a849a5473aff..c4f2da0d2bf5 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -22,9 +22,7 @@
#include <asm/uaccess.h>
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 71a464503c43..aadfce6681ee 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -17,9 +17,7 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -37,7 +35,6 @@
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_itable.h"
-#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_trans_space.h"
@@ -142,11 +139,7 @@ xfs_iomap_write_direct(
int committed;
int error;
- /*
- * Make sure that the dquots are there. This doesn't hold
- * the ilock across a disk read.
- */
- error = xfs_qm_dqattach_locked(ip, 0);
+ error = xfs_qm_dqattach(ip, 0);
if (error)
return XFS_ERROR(error);
@@ -158,7 +151,7 @@ xfs_iomap_write_direct(
if ((offset + count) > XFS_ISIZE(ip)) {
error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
if (error)
- goto error_out;
+ return XFS_ERROR(error);
} else {
if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
last_fsb = MIN(last_fsb, (xfs_fileoff_t)
@@ -190,7 +183,6 @@ xfs_iomap_write_direct(
/*
* Allocate and setup the transaction
*/
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
error = xfs_trans_reserve(tp, resblks,
XFS_WRITE_LOG_RES(mp), resrtextents,
@@ -199,15 +191,16 @@ xfs_iomap_write_direct(
/*
* Check for running out of space, note: need lock to return
*/
- if (error)
+ if (error) {
xfs_trans_cancel(tp, 0);
+ return XFS_ERROR(error);
+ }
+
xfs_ilock(ip, XFS_ILOCK_EXCL);
- if (error)
- goto error_out;
error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
if (error)
- goto error1;
+ goto out_trans_cancel;
xfs_trans_ijoin(tp, ip, 0);
@@ -224,42 +217,39 @@ xfs_iomap_write_direct(
error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flag,
&firstfsb, 0, imap, &nimaps, &free_list);
if (error)
- goto error0;
+ goto out_bmap_cancel;
/*
* Complete the transaction
*/
error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error)
- goto error0;
+ goto out_bmap_cancel;
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
if (error)
- goto error_out;
+ goto out_unlock;
/*
* Copy any maps to caller's array and return any error.
*/
if (nimaps == 0) {
- error = ENOSPC;
- goto error_out;
+ error = XFS_ERROR(ENOSPC);
+ goto out_unlock;
}
- if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) {
+ if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
error = xfs_alert_fsblock_zero(ip, imap);
- goto error_out;
- }
- return 0;
+out_unlock:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
-error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
+out_bmap_cancel:
xfs_bmap_cancel(&free_list);
- xfs_trans_unreserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
-
-error1: /* Just cancel transaction */
+ xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
+out_trans_cancel:
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
-
-error_out:
- return XFS_ERROR(error);
+ goto out_unlock;
}
/*
@@ -422,6 +412,15 @@ retry:
return error;
}
+ /*
+ * Make sure preallocation does not create extents beyond the range we
+ * actually support in this filesystem.
+ */
+ if (last_fsb > XFS_B_TO_FSB(mp, mp->m_maxioffset))
+ last_fsb = XFS_B_TO_FSB(mp, mp->m_maxioffset);
+
+ ASSERT(last_fsb > offset_fsb);
+
nimaps = XFS_WRITE_IMAPS;
error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb,
imap, &nimaps, XFS_BMAPI_ENTIRE);
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 3011b879f850..1a25fd802798 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -18,9 +18,7 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_acl.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -34,7 +32,6 @@
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_itable.h"
-#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_utils.h"
@@ -700,7 +697,7 @@ xfs_setattr_size(
xfs_off_t oldsize, newsize;
struct xfs_trans *tp;
int error;
- uint lock_flags;
+ uint lock_flags = 0;
uint commit_flags = 0;
trace_xfs_setattr(ip);
@@ -720,10 +717,10 @@ xfs_setattr_size(
ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID|
ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
- lock_flags = XFS_ILOCK_EXCL;
- if (!(flags & XFS_ATTR_NOLOCK))
+ if (!(flags & XFS_ATTR_NOLOCK)) {
lock_flags |= XFS_IOLOCK_EXCL;
- xfs_ilock(ip, lock_flags);
+ xfs_ilock(ip, lock_flags);
+ }
oldsize = inode->i_size;
newsize = iattr->ia_size;
@@ -746,7 +743,7 @@ xfs_setattr_size(
/*
* Make sure that the dquots are attached to the inode.
*/
- error = xfs_qm_dqattach_locked(ip, 0);
+ error = xfs_qm_dqattach(ip, 0);
if (error)
goto out_unlock;
@@ -768,8 +765,6 @@ xfs_setattr_size(
if (error)
goto out_unlock;
}
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- lock_flags &= ~XFS_ILOCK_EXCL;
/*
* We are going to log the inode size change in this transaction so
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index acc2bf264dab..eff577a9b67f 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -18,7 +18,6 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 6db1fef38bff..6b965bf450e4 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -18,9 +18,7 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -35,7 +33,6 @@
#include "xfs_trans_priv.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_rw.h"
#include "xfs_trace.h"
kmem_zone_t *xfs_log_ticket_zone;
@@ -916,27 +913,42 @@ xfs_log_need_covered(xfs_mount_t *mp)
* We may be holding the log iclog lock upon entering this routine.
*/
xfs_lsn_t
-xlog_assign_tail_lsn(
+xlog_assign_tail_lsn_locked(
struct xfs_mount *mp)
{
- xfs_lsn_t tail_lsn;
struct log *log = mp->m_log;
+ struct xfs_log_item *lip;
+ xfs_lsn_t tail_lsn;
+
+ assert_spin_locked(&mp->m_ail->xa_lock);
/*
* To make sure we always have a valid LSN for the log tail we keep
* track of the last LSN which was committed in log->l_last_sync_lsn,
- * and use that when the AIL was empty and xfs_ail_min_lsn returns 0.
- *
- * If the AIL has been emptied we also need to wake any process
- * waiting for this condition.
+ * and use that when the AIL was empty.
*/
- tail_lsn = xfs_ail_min_lsn(mp->m_ail);
- if (!tail_lsn)
+ lip = xfs_ail_min(mp->m_ail);
+ if (lip)
+ tail_lsn = lip->li_lsn;
+ else
tail_lsn = atomic64_read(&log->l_last_sync_lsn);
atomic64_set(&log->l_tail_lsn, tail_lsn);
return tail_lsn;
}
+xfs_lsn_t
+xlog_assign_tail_lsn(
+ struct xfs_mount *mp)
+{
+ xfs_lsn_t tail_lsn;
+
+ spin_lock(&mp->m_ail->xa_lock);
+ tail_lsn = xlog_assign_tail_lsn_locked(mp);
+ spin_unlock(&mp->m_ail->xa_lock);
+
+ return tail_lsn;
+}
+
/*
* Return the space in the log between the tail and the head. The head
* is passed in the cycle/bytes formal parms. In the special case where
@@ -1172,7 +1184,7 @@ xlog_alloc_log(xfs_mount_t *mp,
xlog_get_iclog_buffer_size(mp, log);
error = ENOMEM;
- bp = xfs_buf_alloc(mp->m_logdev_targp, 0, log->l_iclog_size, 0);
+ bp = xfs_buf_alloc(mp->m_logdev_targp, 0, BTOBB(log->l_iclog_size), 0);
if (!bp)
goto out_free_log;
bp->b_iodone = xlog_iodone;
@@ -1182,9 +1194,6 @@ xlog_alloc_log(xfs_mount_t *mp,
spin_lock_init(&log->l_icloglock);
init_waitqueue_head(&log->l_flush_wait);
- /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
- ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
-
iclogp = &log->l_iclog;
/*
* The amount of memory to allocate for the iclog structure is
@@ -1204,7 +1213,7 @@ xlog_alloc_log(xfs_mount_t *mp,
prev_iclog = iclog;
bp = xfs_buf_get_uncached(mp->m_logdev_targp,
- log->l_iclog_size, 0);
+ BTOBB(log->l_iclog_size), 0);
if (!bp)
goto out_free_iclog;
@@ -1224,7 +1233,7 @@ xlog_alloc_log(xfs_mount_t *mp,
head->h_fmt = cpu_to_be32(XLOG_FMT);
memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
- iclog->ic_size = XFS_BUF_SIZE(bp) - log->l_iclog_hsize;
+ iclog->ic_size = BBTOB(bp->b_length) - log->l_iclog_hsize;
iclog->ic_state = XLOG_STATE_ACTIVE;
iclog->ic_log = log;
atomic_set(&iclog->ic_refcnt, 0);
@@ -1475,7 +1484,7 @@ xlog_sync(xlog_t *log,
} else {
iclog->ic_bwritecnt = 1;
}
- XFS_BUF_SET_COUNT(bp, count);
+ bp->b_io_length = BTOBB(count);
bp->b_fspriv = iclog;
XFS_BUF_ZEROFLAGS(bp);
XFS_BUF_ASYNC(bp);
@@ -1573,7 +1582,7 @@ xlog_dealloc_log(xlog_t *log)
* always need to ensure that the extra buffer does not point to memory
* owned by another log buffer before we free it.
*/
- xfs_buf_set_empty(log->l_xbuf, log->l_iclog_size);
+ xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size));
xfs_buf_free(log->l_xbuf);
iclog = log->l_iclog;
@@ -2932,6 +2941,7 @@ xfs_log_force(
{
int error;
+ trace_xfs_log_force(mp, 0);
error = _xfs_log_force(mp, flags, NULL);
if (error)
xfs_warn(mp, "%s: error %d returned.", __func__, error);
@@ -3080,6 +3090,7 @@ xfs_log_force_lsn(
{
int error;
+ trace_xfs_log_force(mp, lsn);
error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
if (error)
xfs_warn(mp, "%s: error %d returned.", __func__, error);
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 2c622bedb302..748d312850e2 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -152,6 +152,7 @@ int xfs_log_mount(struct xfs_mount *mp,
int num_bblocks);
int xfs_log_mount_finish(struct xfs_mount *mp);
xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
+xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
void xfs_log_space_wake(struct xfs_mount *mp);
int xfs_log_notify(struct xfs_mount *mp,
struct xlog_in_core *iclog,
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index d4fadbe8ac90..7d6197c58493 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -18,9 +18,7 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_log_priv.h"
@@ -29,61 +27,10 @@
#include "xfs_mount.h"
#include "xfs_error.h"
#include "xfs_alloc.h"
+#include "xfs_extent_busy.h"
#include "xfs_discard.h"
/*
- * Perform initial CIL structure initialisation.
- */
-int
-xlog_cil_init(
- struct log *log)
-{
- struct xfs_cil *cil;
- struct xfs_cil_ctx *ctx;
-
- cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL);
- if (!cil)
- return ENOMEM;
-
- ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL);
- if (!ctx) {
- kmem_free(cil);
- return ENOMEM;
- }
-
- INIT_LIST_HEAD(&cil->xc_cil);
- INIT_LIST_HEAD(&cil->xc_committing);
- spin_lock_init(&cil->xc_cil_lock);
- init_rwsem(&cil->xc_ctx_lock);
- init_waitqueue_head(&cil->xc_commit_wait);
-
- INIT_LIST_HEAD(&ctx->committing);
- INIT_LIST_HEAD(&ctx->busy_extents);
- ctx->sequence = 1;
- ctx->cil = cil;
- cil->xc_ctx = ctx;
- cil->xc_current_sequence = ctx->sequence;
-
- cil->xc_log = log;
- log->l_cilp = cil;
- return 0;
-}
-
-void
-xlog_cil_destroy(
- struct log *log)
-{
- if (log->l_cilp->xc_ctx) {
- if (log->l_cilp->xc_ctx->ticket)
- xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
- kmem_free(log->l_cilp->xc_ctx);
- }
-
- ASSERT(list_empty(&log->l_cilp->xc_cil));
- kmem_free(log->l_cilp);
-}
-
-/*
* Allocate a new ticket. Failing to get a new ticket makes it really hard to
* recover, so we don't allow failure here. Also, we allocate in a context that
* we don't want to be issuing transactions from, so we need to tell the
@@ -390,8 +337,8 @@ xlog_cil_committed(
xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
ctx->start_lsn, abort);
- xfs_alloc_busy_sort(&ctx->busy_extents);
- xfs_alloc_busy_clear(mp, &ctx->busy_extents,
+ xfs_extent_busy_sort(&ctx->busy_extents);
+ xfs_extent_busy_clear(mp, &ctx->busy_extents,
(mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
spin_lock(&ctx->cil->xc_cil_lock);
@@ -404,7 +351,7 @@ xlog_cil_committed(
ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
xfs_discard_extents(mp, &ctx->busy_extents);
- xfs_alloc_busy_clear(mp, &ctx->busy_extents, false);
+ xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
}
kmem_free(ctx);
@@ -426,8 +373,7 @@ xlog_cil_committed(
*/
STATIC int
xlog_cil_push(
- struct log *log,
- xfs_lsn_t push_seq)
+ struct log *log)
{
struct xfs_cil *cil = log->l_cilp;
struct xfs_log_vec *lv;
@@ -443,39 +389,36 @@ xlog_cil_push(
struct xfs_log_iovec lhdr;
struct xfs_log_vec lvhdr = { NULL };
xfs_lsn_t commit_lsn;
+ xfs_lsn_t push_seq;
if (!cil)
return 0;
- ASSERT(!push_seq || push_seq <= cil->xc_ctx->sequence);
-
new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
new_ctx->ticket = xlog_cil_ticket_alloc(log);
- /*
- * Lock out transaction commit, but don't block for background pushes
- * unless we are well over the CIL space limit. See the definition of
- * XLOG_CIL_HARD_SPACE_LIMIT() for the full explanation of the logic
- * used here.
- */
- if (!down_write_trylock(&cil->xc_ctx_lock)) {
- if (!push_seq &&
- cil->xc_ctx->space_used < XLOG_CIL_HARD_SPACE_LIMIT(log))
- goto out_free_ticket;
- down_write(&cil->xc_ctx_lock);
- }
+ down_write(&cil->xc_ctx_lock);
ctx = cil->xc_ctx;
- /* check if we've anything to push */
- if (list_empty(&cil->xc_cil))
- goto out_skip;
+ spin_lock(&cil->xc_cil_lock);
+ push_seq = cil->xc_push_seq;
+ ASSERT(push_seq <= ctx->sequence);
- /* check for spurious background flush */
- if (!push_seq && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
+ /*
+ * Check if we've anything to push. If there is nothing, then we don't
+ * move on to a new sequence number and so we have to be able to push
+ * this sequence again later.
+ */
+ if (list_empty(&cil->xc_cil)) {
+ cil->xc_push_seq = 0;
+ spin_unlock(&cil->xc_cil_lock);
goto out_skip;
+ }
+ spin_unlock(&cil->xc_cil_lock);
+
/* check for a previously pushed seqeunce */
- if (push_seq && push_seq < cil->xc_ctx->sequence)
+ if (push_seq < cil->xc_ctx->sequence)
goto out_skip;
/*
@@ -629,7 +572,6 @@ restart:
out_skip:
up_write(&cil->xc_ctx_lock);
-out_free_ticket:
xfs_log_ticket_put(new_ctx->ticket);
kmem_free(new_ctx);
return 0;
@@ -641,6 +583,82 @@ out_abort:
return XFS_ERROR(EIO);
}
+static void
+xlog_cil_push_work(
+ struct work_struct *work)
+{
+ struct xfs_cil *cil = container_of(work, struct xfs_cil,
+ xc_push_work);
+ xlog_cil_push(cil->xc_log);
+}
+
+/*
+ * We need to push CIL every so often so we don't cache more than we can fit in
+ * the log. The limit really is that a checkpoint can't be more than half the
+ * log (the current checkpoint is not allowed to overwrite the previous
+ * checkpoint), but commit latency and memory usage limit this to a smaller
+ * size.
+ */
+static void
+xlog_cil_push_background(
+ struct log *log)
+{
+ struct xfs_cil *cil = log->l_cilp;
+
+ /*
+ * The cil won't be empty because we are called while holding the
+ * context lock so whatever we added to the CIL will still be there
+ */
+ ASSERT(!list_empty(&cil->xc_cil));
+
+ /*
+ * don't do a background push if we haven't used up all the
+ * space available yet.
+ */
+ if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
+ return;
+
+ spin_lock(&cil->xc_cil_lock);
+ if (cil->xc_push_seq < cil->xc_current_sequence) {
+ cil->xc_push_seq = cil->xc_current_sequence;
+ queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
+ }
+ spin_unlock(&cil->xc_cil_lock);
+
+}
+
+static void
+xlog_cil_push_foreground(
+ struct log *log,
+ xfs_lsn_t push_seq)
+{
+ struct xfs_cil *cil = log->l_cilp;
+
+ if (!cil)
+ return;
+
+ ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
+
+ /* start on any pending background push to minimise wait time on it */
+ flush_work(&cil->xc_push_work);
+
+ /*
+ * If the CIL is empty or we've already pushed the sequence then
+ * there's no work we need to do.
+ */
+ spin_lock(&cil->xc_cil_lock);
+ if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) {
+ spin_unlock(&cil->xc_cil_lock);
+ return;
+ }
+
+ cil->xc_push_seq = push_seq;
+ spin_unlock(&cil->xc_cil_lock);
+
+ /* do the push now */
+ xlog_cil_push(log);
+}
+
/*
* Commit a transaction with the given vector to the Committed Item List.
*
@@ -667,7 +685,6 @@ xfs_log_commit_cil(
{
struct log *log = mp->m_log;
int log_flags = 0;
- int push = 0;
struct xfs_log_vec *log_vector;
if (flags & XFS_TRANS_RELEASE_LOG_RES)
@@ -719,21 +736,9 @@ xfs_log_commit_cil(
*/
xfs_trans_free_items(tp, *commit_lsn, 0);
- /* check for background commit before unlock */
- if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log))
- push = 1;
+ xlog_cil_push_background(log);
up_read(&log->l_cilp->xc_ctx_lock);
-
- /*
- * We need to push CIL every so often so we don't cache more than we
- * can fit in the log. The limit really is that a checkpoint can't be
- * more than half the log (the current checkpoint is not allowed to
- * overwrite the previous checkpoint), but commit latency and memory
- * usage limit this to a smaller size in most cases.
- */
- if (push)
- xlog_cil_push(log, 0);
return 0;
}
@@ -746,9 +751,6 @@ xfs_log_commit_cil(
*
* We return the current commit lsn to allow the callers to determine if a
* iclog flush is necessary following this call.
- *
- * XXX: Initially, just push the CIL unconditionally and return whatever
- * commit lsn is there. It'll be empty, so this is broken for now.
*/
xfs_lsn_t
xlog_cil_force_lsn(
@@ -766,8 +768,7 @@ xlog_cil_force_lsn(
* xlog_cil_push() handles racing pushes for the same sequence,
* so no need to deal with it here.
*/
- if (sequence == cil->xc_current_sequence)
- xlog_cil_push(log, sequence);
+ xlog_cil_push_foreground(log, sequence);
/*
* See if we can find a previous sequence still committing.
@@ -826,3 +827,57 @@ xfs_log_item_in_current_chkpt(
return false;
return true;
}
+
+/*
+ * Perform initial CIL structure initialisation.
+ */
+int
+xlog_cil_init(
+ struct log *log)
+{
+ struct xfs_cil *cil;
+ struct xfs_cil_ctx *ctx;
+
+ cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL);
+ if (!cil)
+ return ENOMEM;
+
+ ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL);
+ if (!ctx) {
+ kmem_free(cil);
+ return ENOMEM;
+ }
+
+ INIT_WORK(&cil->xc_push_work, xlog_cil_push_work);
+ INIT_LIST_HEAD(&cil->xc_cil);
+ INIT_LIST_HEAD(&cil->xc_committing);
+ spin_lock_init(&cil->xc_cil_lock);
+ init_rwsem(&cil->xc_ctx_lock);
+ init_waitqueue_head(&cil->xc_commit_wait);
+
+ INIT_LIST_HEAD(&ctx->committing);
+ INIT_LIST_HEAD(&ctx->busy_extents);
+ ctx->sequence = 1;
+ ctx->cil = cil;
+ cil->xc_ctx = ctx;
+ cil->xc_current_sequence = ctx->sequence;
+
+ cil->xc_log = log;
+ log->l_cilp = cil;
+ return 0;
+}
+
+void
+xlog_cil_destroy(
+ struct log *log)
+{
+ if (log->l_cilp->xc_ctx) {
+ if (log->l_cilp->xc_ctx->ticket)
+ xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
+ kmem_free(log->l_cilp->xc_ctx);
+ }
+
+ ASSERT(list_empty(&log->l_cilp->xc_cil));
+ kmem_free(log->l_cilp);
+}
+
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 2152900b79d4..735ff1ee53da 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -417,6 +417,8 @@ struct xfs_cil {
struct list_head xc_committing;
wait_queue_head_t xc_commit_wait;
xfs_lsn_t xc_current_sequence;
+ struct work_struct xc_push_work;
+ xfs_lsn_t xc_push_seq;
};
/*
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 8ecad5bad66c..ca386909131a 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -40,7 +40,6 @@
#include "xfs_extfree_item.h"
#include "xfs_trans_priv.h"
#include "xfs_quota.h"
-#include "xfs_rw.h"
#include "xfs_utils.h"
#include "xfs_trace.h"
@@ -120,7 +119,7 @@ xlog_get_bp(
nbblks += log->l_sectBBsize;
nbblks = round_up(nbblks, log->l_sectBBsize);
- bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, BBTOB(nbblks), 0);
+ bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
if (bp)
xfs_buf_unlock(bp);
return bp;
@@ -146,7 +145,7 @@ xlog_align(
{
xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
- ASSERT(BBTOB(offset + nbblks) <= XFS_BUF_SIZE(bp));
+ ASSERT(offset + nbblks <= bp->b_length);
return bp->b_addr + BBTOB(offset);
}
@@ -174,11 +173,12 @@ xlog_bread_noalign(
nbblks = round_up(nbblks, log->l_sectBBsize);
ASSERT(nbblks > 0);
- ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
+ ASSERT(nbblks <= bp->b_length);
XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
XFS_BUF_READ(bp);
- XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
+ bp->b_io_length = nbblks;
+ bp->b_error = 0;
xfsbdstrat(log->l_mp, bp);
error = xfs_buf_iowait(bp);
@@ -218,7 +218,7 @@ xlog_bread_offset(
xfs_caddr_t offset)
{
xfs_caddr_t orig_offset = bp->b_addr;
- int orig_len = bp->b_buffer_length;
+ int orig_len = BBTOB(bp->b_length);
int error, error2;
error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
@@ -259,13 +259,14 @@ xlog_bwrite(
nbblks = round_up(nbblks, log->l_sectBBsize);
ASSERT(nbblks > 0);
- ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
+ ASSERT(nbblks <= bp->b_length);
XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
XFS_BUF_ZEROFLAGS(bp);
xfs_buf_hold(bp);
xfs_buf_lock(bp);
- XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
+ bp->b_io_length = nbblks;
+ bp->b_error = 0;
error = xfs_bwrite(bp);
if (error)
@@ -440,6 +441,8 @@ xlog_find_verify_cycle(
* a log sector, or we're out of luck.
*/
bufblks = 1 << ffs(nbblks);
+ while (bufblks > log->l_logBBsize)
+ bufblks >>= 1;
while (!(bp = xlog_get_bp(log, bufblks))) {
bufblks >>= 1;
if (bufblks < log->l_sectBBsize)
@@ -1225,6 +1228,8 @@ xlog_write_log_records(
* log sector, or we're out of luck.
*/
bufblks = 1 << ffs(blocks);
+ while (bufblks > log->l_logBBsize)
+ bufblks >>= 1;
while (!(bp = xlog_get_bp(log, bufblks))) {
bufblks >>= 1;
if (bufblks < sectbb)
@@ -1772,7 +1777,7 @@ xlog_recover_do_inode_buffer(
trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
- inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
+ inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
for (i = 0; i < inodes_per_buf; i++) {
next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
offsetof(xfs_dinode_t, di_next_unlinked);
@@ -1814,7 +1819,8 @@ xlog_recover_do_inode_buffer(
ASSERT(item->ri_buf[item_index].i_addr != NULL);
ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
- ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
+ ASSERT((reg_buf_offset + reg_buf_bytes) <=
+ BBTOB(bp->b_io_length));
/*
* The current logged region contains a copy of the
@@ -1873,8 +1879,8 @@ xlog_recover_do_reg_buffer(
ASSERT(nbits > 0);
ASSERT(item->ri_buf[i].i_addr != NULL);
ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
- ASSERT(XFS_BUF_COUNT(bp) >=
- ((uint)bit << XFS_BLF_SHIFT)+(nbits<<XFS_BLF_SHIFT));
+ ASSERT(BBTOB(bp->b_io_length) >=
+ ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
/*
* Do a sanity check if this is a dquot buffer. Just checking
@@ -2103,6 +2109,7 @@ xlog_recover_do_dquot_buffer(
STATIC int
xlog_recover_buffer_pass2(
xlog_t *log,
+ struct list_head *buffer_list,
xlog_recover_item_t *item)
{
xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
@@ -2123,9 +2130,9 @@ xlog_recover_buffer_pass2(
trace_xfs_log_recover_buf_recover(log, buf_f);
- buf_flags = XBF_LOCK;
- if (!(buf_f->blf_flags & XFS_BLF_INODE_BUF))
- buf_flags |= XBF_MAPPED;
+ buf_flags = 0;
+ if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
+ buf_flags |= XBF_UNMAPPED;
bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
buf_flags);
@@ -2166,14 +2173,14 @@ xlog_recover_buffer_pass2(
*/
if (XFS_DINODE_MAGIC ==
be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
- (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
+ (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
(__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
xfs_buf_stale(bp);
error = xfs_bwrite(bp);
} else {
ASSERT(bp->b_target->bt_mount == mp);
bp->b_iodone = xlog_recover_iodone;
- xfs_buf_delwri_queue(bp);
+ xfs_buf_delwri_queue(bp, buffer_list);
}
xfs_buf_relse(bp);
@@ -2183,6 +2190,7 @@ xlog_recover_buffer_pass2(
STATIC int
xlog_recover_inode_pass2(
xlog_t *log,
+ struct list_head *buffer_list,
xlog_recover_item_t *item)
{
xfs_inode_log_format_t *in_f;
@@ -2220,8 +2228,7 @@ xlog_recover_inode_pass2(
}
trace_xfs_log_recover_inode_recover(log, in_f);
- bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
- XBF_LOCK);
+ bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0);
if (!bp) {
error = ENOMEM;
goto error;
@@ -2436,7 +2443,7 @@ xlog_recover_inode_pass2(
write_inode_buffer:
ASSERT(bp->b_target->bt_mount == mp);
bp->b_iodone = xlog_recover_iodone;
- xfs_buf_delwri_queue(bp);
+ xfs_buf_delwri_queue(bp, buffer_list);
xfs_buf_relse(bp);
error:
if (need_free)
@@ -2477,6 +2484,7 @@ xlog_recover_quotaoff_pass1(
STATIC int
xlog_recover_dquot_pass2(
xlog_t *log,
+ struct list_head *buffer_list,
xlog_recover_item_t *item)
{
xfs_mount_t *mp = log->l_mp;
@@ -2530,14 +2538,11 @@ xlog_recover_dquot_pass2(
return XFS_ERROR(EIO);
ASSERT(dq_f->qlf_len == 1);
- error = xfs_read_buf(mp, mp->m_ddev_targp,
- dq_f->qlf_blkno,
- XFS_FSB_TO_BB(mp, dq_f->qlf_len),
- 0, &bp);
- if (error) {
- xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#3)");
+ error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
+ XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp);
+ if (error)
return error;
- }
+
ASSERT(bp);
ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
@@ -2558,7 +2563,7 @@ xlog_recover_dquot_pass2(
ASSERT(dq_f->qlf_size == 2);
ASSERT(bp->b_target->bt_mount == mp);
bp->b_iodone = xlog_recover_iodone;
- xfs_buf_delwri_queue(bp);
+ xfs_buf_delwri_queue(bp, buffer_list);
xfs_buf_relse(bp);
return (0);
@@ -2642,7 +2647,8 @@ xlog_recover_efd_pass2(
* xfs_trans_ail_delete() drops the
* AIL lock.
*/
- xfs_trans_ail_delete(ailp, lip);
+ xfs_trans_ail_delete(ailp, lip,
+ SHUTDOWN_CORRUPT_INCORE);
xfs_efi_item_free(efip);
spin_lock(&ailp->xa_lock);
break;
@@ -2712,21 +2718,22 @@ STATIC int
xlog_recover_commit_pass2(
struct log *log,
struct xlog_recover *trans,
+ struct list_head *buffer_list,
xlog_recover_item_t *item)
{
trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
switch (ITEM_TYPE(item)) {
case XFS_LI_BUF:
- return xlog_recover_buffer_pass2(log, item);
+ return xlog_recover_buffer_pass2(log, buffer_list, item);
case XFS_LI_INODE:
- return xlog_recover_inode_pass2(log, item);
+ return xlog_recover_inode_pass2(log, buffer_list, item);
case XFS_LI_EFI:
return xlog_recover_efi_pass2(log, item, trans->r_lsn);
case XFS_LI_EFD:
return xlog_recover_efd_pass2(log, item);
case XFS_LI_DQUOT:
- return xlog_recover_dquot_pass2(log, item);
+ return xlog_recover_dquot_pass2(log, buffer_list, item);
case XFS_LI_QUOTAOFF:
/* nothing to do in pass2 */
return 0;
@@ -2750,8 +2757,9 @@ xlog_recover_commit_trans(
struct xlog_recover *trans,
int pass)
{
- int error = 0;
+ int error = 0, error2;
xlog_recover_item_t *item;
+ LIST_HEAD (buffer_list);
hlist_del(&trans->r_list);
@@ -2760,16 +2768,27 @@ xlog_recover_commit_trans(
return error;
list_for_each_entry(item, &trans->r_itemq, ri_list) {
- if (pass == XLOG_RECOVER_PASS1)
+ switch (pass) {
+ case XLOG_RECOVER_PASS1:
error = xlog_recover_commit_pass1(log, trans, item);
- else
- error = xlog_recover_commit_pass2(log, trans, item);
+ break;
+ case XLOG_RECOVER_PASS2:
+ error = xlog_recover_commit_pass2(log, trans,
+ &buffer_list, item);
+ break;
+ default:
+ ASSERT(0);
+ }
+
if (error)
- return error;
+ goto out;
}
xlog_recover_free_trans(trans);
- return 0;
+
+out:
+ error2 = xfs_buf_delwri_submit(&buffer_list);
+ return error ? error : error2;
}
STATIC int
@@ -3079,7 +3098,7 @@ xlog_recover_process_one_iunlink(
/*
* Get the on disk inode to find the next inode in the bucket.
*/
- error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XBF_LOCK);
+ error = xfs_itobp(mp, NULL, ip, &dip, &ibp, 0);
if (error)
goto fail_iput;
@@ -3639,11 +3658,8 @@ xlog_do_recover(
* First replay the images in the log.
*/
error = xlog_do_log_recovery(log, head_blk, tail_blk);
- if (error) {
+ if (error)
return error;
- }
-
- xfs_flush_buftarg(log->l_mp->m_ddev_targp, 1);
/*
* If IO errors happened during recovery, bail out.
@@ -3670,7 +3686,6 @@ xlog_do_recover(
bp = xfs_getsb(log->l_mp, 0);
XFS_BUF_UNDONE(bp);
ASSERT(!(XFS_BUF_ISWRITE(bp)));
- ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
XFS_BUF_READ(bp);
XFS_BUF_UNASYNC(bp);
xfsbdstrat(log->l_mp, bp);
diff --git a/fs/xfs/xfs_message.c b/fs/xfs/xfs_message.c
index bd672def95ac..331cd9f83a7f 100644
--- a/fs/xfs/xfs_message.c
+++ b/fs/xfs/xfs_message.c
@@ -19,7 +19,6 @@
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 1ffead4b2296..536021fb3d4e 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -22,6 +22,7 @@
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
@@ -37,7 +38,6 @@
#include "xfs_rtalloc.h"
#include "xfs_bmap.h"
#include "xfs_error.h"
-#include "xfs_rw.h"
#include "xfs_quota.h"
#include "xfs_fsops.h"
#include "xfs_utils.h"
@@ -683,8 +683,8 @@ xfs_readsb(xfs_mount_t *mp, int flags)
sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
reread:
- bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
- XFS_SB_DADDR, sector_size, 0);
+ bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
+ BTOBB(sector_size), 0);
if (!bp) {
if (loud)
xfs_warn(mp, "SB buffer read failed");
@@ -1032,9 +1032,9 @@ xfs_check_sizes(xfs_mount_t *mp)
xfs_warn(mp, "filesystem size mismatch detected");
return XFS_ERROR(EFBIG);
}
- bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
+ bp = xfs_buf_read_uncached(mp->m_ddev_targp,
d - XFS_FSS_TO_BB(mp, 1),
- BBTOB(XFS_FSS_TO_BB(mp, 1)), 0);
+ XFS_FSS_TO_BB(mp, 1), 0);
if (!bp) {
xfs_warn(mp, "last sector read failed");
return EIO;
@@ -1047,9 +1047,9 @@ xfs_check_sizes(xfs_mount_t *mp)
xfs_warn(mp, "log size mismatch detected");
return XFS_ERROR(EFBIG);
}
- bp = xfs_buf_read_uncached(mp, mp->m_logdev_targp,
+ bp = xfs_buf_read_uncached(mp->m_logdev_targp,
d - XFS_FSB_TO_BB(mp, 1),
- XFS_FSB_TO_B(mp, 1), 0);
+ XFS_FSB_TO_BB(mp, 1), 0);
if (!bp) {
xfs_warn(mp, "log device read failed");
return EIO;
@@ -1288,7 +1288,7 @@ xfs_mountfs(
XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
if (error) {
xfs_warn(mp, "log mount failed");
- goto out_free_perag;
+ goto out_fail_wait;
}
/*
@@ -1315,7 +1315,7 @@ xfs_mountfs(
!mp->m_sb.sb_inprogress) {
error = xfs_initialize_perag_data(mp, sbp->sb_agcount);
if (error)
- goto out_free_perag;
+ goto out_fail_wait;
}
/*
@@ -1439,6 +1439,10 @@ xfs_mountfs(
IRELE(rip);
out_log_dealloc:
xfs_log_unmount(mp);
+ out_fail_wait:
+ if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
+ xfs_wait_buftarg(mp->m_logdev_targp);
+ xfs_wait_buftarg(mp->m_ddev_targp);
out_free_perag:
xfs_free_perag(mp);
out_remove_uuid:
@@ -1475,15 +1479,15 @@ xfs_unmountfs(
xfs_log_force(mp, XFS_LOG_SYNC);
/*
- * Do a delwri reclaim pass first so that as many dirty inodes are
- * queued up for IO as possible. Then flush the buffers before making
- * a synchronous path to catch all the remaining inodes are reclaimed.
- * This makes the reclaim process as quick as possible by avoiding
- * synchronous writeout and blocking on inodes already in the delwri
- * state as much as possible.
+ * Flush all pending changes from the AIL.
+ */
+ xfs_ail_push_all_sync(mp->m_ail);
+
+ /*
+ * And reclaim all inodes. At this point there should be no dirty
+ * inode, and none should be pinned or locked, but use synchronous
+ * reclaim just to be sure.
*/
- xfs_reclaim_inodes(mp, 0);
- xfs_flush_buftarg(mp->m_ddev_targp, 1);
xfs_reclaim_inodes(mp, SYNC_WAIT);
xfs_qm_unmount(mp);
@@ -1519,15 +1523,12 @@ xfs_unmountfs(
if (error)
xfs_warn(mp, "Unable to update superblock counters. "
"Freespace may not be correct on next mount.");
- xfs_unmountfs_writesb(mp);
/*
- * Make sure all buffers have been flushed and completed before
- * unmounting the log.
+ * At this point we might have modified the superblock again and thus
+ * added an item to the AIL, thus flush it again.
*/
- error = xfs_flush_buftarg(mp->m_ddev_targp, 1);
- if (error)
- xfs_warn(mp, "%d busy buffers during unmount.", error);
+ xfs_ail_push_all_sync(mp->m_ail);
xfs_wait_buftarg(mp->m_ddev_targp);
xfs_log_unmount_write(mp);
@@ -1588,36 +1589,6 @@ xfs_log_sbcount(xfs_mount_t *mp)
return error;
}
-int
-xfs_unmountfs_writesb(xfs_mount_t *mp)
-{
- xfs_buf_t *sbp;
- int error = 0;
-
- /*
- * skip superblock write if fs is read-only, or
- * if we are doing a forced umount.
- */
- if (!((mp->m_flags & XFS_MOUNT_RDONLY) ||
- XFS_FORCED_SHUTDOWN(mp))) {
-
- sbp = xfs_getsb(mp, 0);
-
- XFS_BUF_UNDONE(sbp);
- XFS_BUF_UNREAD(sbp);
- xfs_buf_delwri_dequeue(sbp);
- XFS_BUF_WRITE(sbp);
- XFS_BUF_UNASYNC(sbp);
- ASSERT(sbp->b_target == mp->m_ddev_targp);
- xfsbdstrat(mp, sbp);
- error = xfs_buf_iowait(sbp);
- if (error)
- xfs_buf_ioerror_alert(sbp, __func__);
- xfs_buf_relse(sbp);
- }
- return error;
-}
-
/*
* xfs_mod_sb() can be used to copy arbitrary changes to the
* in-core superblock into the superblock buffer to be logged.
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 9eba73887829..8b89c5ac72d9 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -214,6 +214,7 @@ typedef struct xfs_mount {
struct workqueue_struct *m_data_workqueue;
struct workqueue_struct *m_unwritten_workqueue;
+ struct workqueue_struct *m_cil_workqueue;
} xfs_mount_t;
/*
@@ -378,7 +379,6 @@ extern __uint64_t xfs_default_resblks(xfs_mount_t *mp);
extern int xfs_mountfs(xfs_mount_t *mp);
extern void xfs_unmountfs(xfs_mount_t *);
-extern int xfs_unmountfs_writesb(xfs_mount_t *);
extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int);
extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *,
uint, int);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 55c6afedc879..249db1987764 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -19,7 +19,6 @@
#include "xfs_fs.h"
#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -65,7 +64,8 @@ STATIC int
xfs_qm_dquot_walk(
struct xfs_mount *mp,
int type,
- int (*execute)(struct xfs_dquot *dqp))
+ int (*execute)(struct xfs_dquot *dqp, void *data),
+ void *data)
{
struct xfs_quotainfo *qi = mp->m_quotainfo;
struct radix_tree_root *tree = XFS_DQUOT_TREE(qi, type);
@@ -97,7 +97,7 @@ restart:
next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
- error = execute(batch[i]);
+ error = execute(batch[i], data);
if (error == EAGAIN) {
skipped++;
continue;
@@ -129,7 +129,8 @@ restart:
*/
STATIC int
xfs_qm_dqpurge(
- struct xfs_dquot *dqp)
+ struct xfs_dquot *dqp,
+ void *data)
{
struct xfs_mount *mp = dqp->q_mount;
struct xfs_quotainfo *qi = mp->m_quotainfo;
@@ -153,21 +154,7 @@ xfs_qm_dqpurge(
dqp->dq_flags |= XFS_DQ_FREEING;
- /*
- * If we're turning off quotas, we have to make sure that, for
- * example, we don't delete quota disk blocks while dquots are
- * in the process of getting written to those disk blocks.
- * This dquot might well be on AIL, and we can't leave it there
- * if we're turning off quotas. Basically, we need this flush
- * lock, and are willing to block on it.
- */
- if (!xfs_dqflock_nowait(dqp)) {
- /*
- * Block on the flush lock after nudging dquot buffer,
- * if it is incore.
- */
- xfs_dqflock_pushbuf_wait(dqp);
- }
+ xfs_dqflock(dqp);
/*
* If we are turning this type of quotas off, we don't care
@@ -175,16 +162,21 @@ xfs_qm_dqpurge(
* we're unmounting, we do care, so we flush it and wait.
*/
if (XFS_DQ_IS_DIRTY(dqp)) {
- int error;
+ struct xfs_buf *bp = NULL;
+ int error;
/*
* We don't care about getting disk errors here. We need
* to purge this dquot anyway, so we go ahead regardless.
*/
- error = xfs_qm_dqflush(dqp, SYNC_WAIT);
- if (error)
+ error = xfs_qm_dqflush(dqp, &bp);
+ if (error) {
xfs_warn(mp, "%s: dquot %p flush failed",
__func__, dqp);
+ } else {
+ error = xfs_bwrite(bp);
+ xfs_buf_relse(bp);
+ }
xfs_dqflock(dqp);
}
@@ -226,11 +218,11 @@ xfs_qm_dqpurge_all(
uint flags)
{
if (flags & XFS_QMOPT_UQUOTA)
- xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge);
+ xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
if (flags & XFS_QMOPT_GQUOTA)
- xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge);
+ xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
if (flags & XFS_QMOPT_PQUOTA)
- xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge);
+ xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
}
/*
@@ -483,6 +475,23 @@ done:
xfs_dqunlock(udq);
}
+static bool
+xfs_qm_need_dqattach(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+
+ if (!XFS_IS_QUOTA_RUNNING(mp))
+ return false;
+ if (!XFS_IS_QUOTA_ON(mp))
+ return false;
+ if (!XFS_NOT_DQATTACHED(mp, ip))
+ return false;
+ if (ip->i_ino == mp->m_sb.sb_uquotino ||
+ ip->i_ino == mp->m_sb.sb_gquotino)
+ return false;
+ return true;
+}
/*
* Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
@@ -500,11 +509,7 @@ xfs_qm_dqattach_locked(
uint nquotas = 0;
int error = 0;
- if (!XFS_IS_QUOTA_RUNNING(mp) ||
- !XFS_IS_QUOTA_ON(mp) ||
- !XFS_NOT_DQATTACHED(mp, ip) ||
- ip->i_ino == mp->m_sb.sb_uquotino ||
- ip->i_ino == mp->m_sb.sb_gquotino)
+ if (!xfs_qm_need_dqattach(ip))
return 0;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
@@ -575,6 +580,9 @@ xfs_qm_dqattach(
{
int error;
+ if (!xfs_qm_need_dqattach(ip))
+ return 0;
+
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_qm_dqattach_locked(ip, flags);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -855,15 +863,16 @@ xfs_qm_reset_dqcounts(
STATIC int
xfs_qm_dqiter_bufs(
- xfs_mount_t *mp,
- xfs_dqid_t firstid,
- xfs_fsblock_t bno,
- xfs_filblks_t blkcnt,
- uint flags)
+ struct xfs_mount *mp,
+ xfs_dqid_t firstid,
+ xfs_fsblock_t bno,
+ xfs_filblks_t blkcnt,
+ uint flags,
+ struct list_head *buffer_list)
{
- xfs_buf_t *bp;
- int error;
- int type;
+ struct xfs_buf *bp;
+ int error;
+ int type;
ASSERT(blkcnt > 0);
type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
@@ -887,7 +896,7 @@ xfs_qm_dqiter_bufs(
break;
xfs_qm_reset_dqcounts(mp, bp, firstid, type);
- xfs_buf_delwri_queue(bp);
+ xfs_buf_delwri_queue(bp, buffer_list);
xfs_buf_relse(bp);
/*
* goto the next block.
@@ -895,6 +904,7 @@ xfs_qm_dqiter_bufs(
bno++;
firstid += mp->m_quotainfo->qi_dqperchunk;
}
+
return error;
}
@@ -904,11 +914,12 @@ xfs_qm_dqiter_bufs(
*/
STATIC int
xfs_qm_dqiterate(
- xfs_mount_t *mp,
- xfs_inode_t *qip,
- uint flags)
+ struct xfs_mount *mp,
+ struct xfs_inode *qip,
+ uint flags,
+ struct list_head *buffer_list)
{
- xfs_bmbt_irec_t *map;
+ struct xfs_bmbt_irec *map;
int i, nmaps; /* number of map entries */
int error; /* return value */
xfs_fileoff_t lblkno;
@@ -975,21 +986,17 @@ xfs_qm_dqiterate(
* Iterate thru all the blks in the extent and
* reset the counters of all the dquots inside them.
*/
- if ((error = xfs_qm_dqiter_bufs(mp,
- firstid,
- map[i].br_startblock,
- map[i].br_blockcount,
- flags))) {
- break;
- }
+ error = xfs_qm_dqiter_bufs(mp, firstid,
+ map[i].br_startblock,
+ map[i].br_blockcount,
+ flags, buffer_list);
+ if (error)
+ goto out;
}
-
- if (error)
- break;
} while (nmaps > 0);
+out:
kmem_free(map);
-
return error;
}
@@ -1182,8 +1189,11 @@ error0:
STATIC int
xfs_qm_flush_one(
- struct xfs_dquot *dqp)
+ struct xfs_dquot *dqp,
+ void *data)
{
+ struct list_head *buffer_list = data;
+ struct xfs_buf *bp = NULL;
int error = 0;
xfs_dqlock(dqp);
@@ -1192,11 +1202,13 @@ xfs_qm_flush_one(
if (!XFS_DQ_IS_DIRTY(dqp))
goto out_unlock;
- if (!xfs_dqflock_nowait(dqp))
- xfs_dqflock_pushbuf_wait(dqp);
-
- error = xfs_qm_dqflush(dqp, 0);
+ xfs_dqflock(dqp);
+ error = xfs_qm_dqflush(dqp, &bp);
+ if (error)
+ goto out_unlock;
+ xfs_buf_delwri_queue(bp, buffer_list);
+ xfs_buf_relse(bp);
out_unlock:
xfs_dqunlock(dqp);
return error;
@@ -1215,6 +1227,7 @@ xfs_qm_quotacheck(
size_t structsz;
xfs_inode_t *uip, *gip;
uint flags;
+ LIST_HEAD (buffer_list);
count = INT_MAX;
structsz = 1;
@@ -1233,7 +1246,8 @@ xfs_qm_quotacheck(
*/
uip = mp->m_quotainfo->qi_uquotaip;
if (uip) {
- error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA);
+ error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
+ &buffer_list);
if (error)
goto error_return;
flags |= XFS_UQUOTA_CHKD;
@@ -1242,7 +1256,8 @@ xfs_qm_quotacheck(
gip = mp->m_quotainfo->qi_gquotaip;
if (gip) {
error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
- XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
+ XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA,
+ &buffer_list);
if (error)
goto error_return;
flags |= XFS_OQUOTA_CHKD;
@@ -1265,19 +1280,27 @@ xfs_qm_quotacheck(
* We've made all the changes that we need to make incore. Flush them
* down to disk buffers if everything was updated successfully.
*/
- if (XFS_IS_UQUOTA_ON(mp))
- error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one);
+ if (XFS_IS_UQUOTA_ON(mp)) {
+ error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
+ &buffer_list);
+ }
if (XFS_IS_GQUOTA_ON(mp)) {
- error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one);
+ error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
+ &buffer_list);
if (!error)
error = error2;
}
if (XFS_IS_PQUOTA_ON(mp)) {
- error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one);
+ error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
+ &buffer_list);
if (!error)
error = error2;
}
+ error2 = xfs_buf_delwri_submit(&buffer_list);
+ if (!error)
+ error = error2;
+
/*
* We can get this error if we couldn't do a dquot allocation inside
* xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
@@ -1291,15 +1314,6 @@ xfs_qm_quotacheck(
}
/*
- * We didn't log anything, because if we crashed, we'll have to
- * start the quotacheck from scratch anyway. However, we must make
- * sure that our dquot changes are secure before we put the
- * quotacheck'd stamp on the superblock. So, here we do a synchronous
- * flush.
- */
- xfs_flush_buftarg(mp->m_ddev_targp, 1);
-
- /*
* If one type of quotas is off, then it will lose its
* quotachecked status, since we won't be doing accounting for
* that type anymore.
@@ -1308,6 +1322,13 @@ xfs_qm_quotacheck(
mp->m_qflags |= flags;
error_return:
+ while (!list_empty(&buffer_list)) {
+ struct xfs_buf *bp =
+ list_first_entry(&buffer_list, struct xfs_buf, b_list);
+ list_del_init(&bp->b_list);
+ xfs_buf_relse(bp);
+ }
+
if (error) {
xfs_warn(mp,
"Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
@@ -1424,6 +1445,7 @@ xfs_qm_dqfree_one(
STATIC void
xfs_qm_dqreclaim_one(
struct xfs_dquot *dqp,
+ struct list_head *buffer_list,
struct list_head *dispose_list)
{
struct xfs_mount *mp = dqp->q_mount;
@@ -1456,25 +1478,20 @@ xfs_qm_dqreclaim_one(
if (!xfs_dqflock_nowait(dqp))
goto out_busy;
- /*
- * We have the flush lock so we know that this is not in the
- * process of being flushed. So, if this is dirty, flush it
- * DELWRI so that we don't get a freelist infested with
- * dirty dquots.
- */
if (XFS_DQ_IS_DIRTY(dqp)) {
+ struct xfs_buf *bp = NULL;
+
trace_xfs_dqreclaim_dirty(dqp);
- /*
- * We flush it delayed write, so don't bother releasing the
- * freelist lock.
- */
- error = xfs_qm_dqflush(dqp, 0);
+ error = xfs_qm_dqflush(dqp, &bp);
if (error) {
xfs_warn(mp, "%s: dquot %p flush failed",
__func__, dqp);
+ goto out_busy;
}
+ xfs_buf_delwri_queue(bp, buffer_list);
+ xfs_buf_relse(bp);
/*
* Give the dquot another try on the freelist, as the
* flushing will take some time.
@@ -1518,8 +1535,10 @@ xfs_qm_shake(
struct xfs_quotainfo *qi =
container_of(shrink, struct xfs_quotainfo, qi_shrinker);
int nr_to_scan = sc->nr_to_scan;
+ LIST_HEAD (buffer_list);
LIST_HEAD (dispose_list);
struct xfs_dquot *dqp;
+ int error;
if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
return 0;
@@ -1532,15 +1551,20 @@ xfs_qm_shake(
break;
dqp = list_first_entry(&qi->qi_lru_list, struct xfs_dquot,
q_lru);
- xfs_qm_dqreclaim_one(dqp, &dispose_list);
+ xfs_qm_dqreclaim_one(dqp, &buffer_list, &dispose_list);
}
mutex_unlock(&qi->qi_lru_lock);
+ error = xfs_buf_delwri_submit(&buffer_list);
+ if (error)
+ xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
+
while (!list_empty(&dispose_list)) {
dqp = list_first_entry(&dispose_list, struct xfs_dquot, q_lru);
list_del_init(&dqp->q_lru);
xfs_qm_dqfree_one(dqp);
}
+
out:
return (qi->qi_lru_count / 100) * sysctl_vfs_cache_pressure;
}
diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
index e6986b5d80d8..6b39115bf145 100644
--- a/fs/xfs/xfs_qm_bhv.c
+++ b/fs/xfs/xfs_qm_bhv.c
@@ -17,9 +17,7 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index c4f396e437a8..858a3b186110 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -22,7 +22,6 @@
#include "xfs_fs.h"
#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index 7e76f537abb7..fed504fc2999 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -17,7 +17,6 @@
*/
#include "xfs.h"
#include "xfs_sb.h"
-#include "xfs_inum.h"
#include "xfs_log.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index e44ef7ee8ce8..30ff5f401d28 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -19,7 +19,6 @@
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index ca4f31534a0a..92d4331cd4f1 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -20,7 +20,6 @@
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -34,7 +33,6 @@
#include "xfs_rtalloc.h"
#include "xfs_fsops.h"
#include "xfs_error.h"
-#include "xfs_rw.h"
#include "xfs_inode_item.h"
#include "xfs_trans_space.h"
#include "xfs_utils.h"
@@ -1872,9 +1870,9 @@ xfs_growfs_rt(
/*
* Read in the last block of the device, make sure it exists.
*/
- bp = xfs_buf_read_uncached(mp, mp->m_rtdev_targp,
+ bp = xfs_buf_read_uncached(mp->m_rtdev_targp,
XFS_FSB_TO_BB(mp, nrblocks - 1),
- XFS_FSB_TO_B(mp, 1), 0);
+ XFS_FSB_TO_BB(mp, 1), 0);
if (!bp)
return EIO;
xfs_buf_relse(bp);
@@ -2219,9 +2217,9 @@ xfs_rtmount_init(
(unsigned long long) mp->m_sb.sb_rblocks);
return XFS_ERROR(EFBIG);
}
- bp = xfs_buf_read_uncached(mp, mp->m_rtdev_targp,
+ bp = xfs_buf_read_uncached(mp->m_rtdev_targp,
d - XFS_FSB_TO_BB(mp, 1),
- XFS_FSB_TO_B(mp, 1), 0);
+ XFS_FSB_TO_BB(mp, 1), 0);
if (!bp) {
xfs_warn(mp, "realtime device size check failed");
return EIO;
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
deleted file mode 100644
index 597d044a09a1..000000000000
--- a/fs/xfs/xfs_rw.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2000-2006 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_error.h"
-#include "xfs_rw.h"
-
-/*
- * Force a shutdown of the filesystem instantly while keeping
- * the filesystem consistent. We don't do an unmount here; just shutdown
- * the shop, make sure that absolutely nothing persistent happens to
- * this filesystem after this point.
- */
-void
-xfs_do_force_shutdown(
- xfs_mount_t *mp,
- int flags,
- char *fname,
- int lnnum)
-{
- int logerror;
-
- logerror = flags & SHUTDOWN_LOG_IO_ERROR;
-
- if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
- xfs_notice(mp,
- "%s(0x%x) called from line %d of file %s. Return address = 0x%p",
- __func__, flags, lnnum, fname, __return_address);
- }
- /*
- * No need to duplicate efforts.
- */
- if (XFS_FORCED_SHUTDOWN(mp) && !logerror)
- return;
-
- /*
- * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
- * queue up anybody new on the log reservations, and wakes up
- * everybody who's sleeping on log reservations to tell them
- * the bad news.
- */
- if (xfs_log_force_umount(mp, logerror))
- return;
-
- if (flags & SHUTDOWN_CORRUPT_INCORE) {
- xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
- "Corruption of in-memory data detected. Shutting down filesystem");
- if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
- xfs_stack_trace();
- } else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
- if (logerror) {
- xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
- "Log I/O Error Detected. Shutting down filesystem");
- } else if (flags & SHUTDOWN_DEVICE_REQ) {
- xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
- "All device paths lost. Shutting down filesystem");
- } else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
- xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
- "I/O Error Detected. Shutting down filesystem");
- }
- }
- if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
- xfs_alert(mp,
- "Please umount the filesystem and rectify the problem(s)");
- }
-}
-
-/*
- * This isn't an absolute requirement, but it is
- * just a good idea to call xfs_read_buf instead of
- * directly doing a read_buf call. For one, we shouldn't
- * be doing this disk read if we are in SHUTDOWN state anyway,
- * so this stops that from happening. Secondly, this does all
- * the error checking stuff and the brelse if appropriate for
- * the caller, so the code can be a little leaner.
- */
-
-int
-xfs_read_buf(
- struct xfs_mount *mp,
- xfs_buftarg_t *target,
- xfs_daddr_t blkno,
- int len,
- uint flags,
- xfs_buf_t **bpp)
-{
- xfs_buf_t *bp;
- int error;
-
- if (!flags)
- flags = XBF_LOCK | XBF_MAPPED;
-
- bp = xfs_buf_read(target, blkno, len, flags);
- if (!bp)
- return XFS_ERROR(EIO);
- error = bp->b_error;
- if (!error && !XFS_FORCED_SHUTDOWN(mp)) {
- *bpp = bp;
- } else {
- *bpp = NULL;
- if (error) {
- xfs_buf_ioerror_alert(bp, __func__);
- } else {
- error = XFS_ERROR(EIO);
- }
- if (bp) {
- XFS_BUF_UNDONE(bp);
- xfs_buf_stale(bp);
- /*
- * brelse clears B_ERROR and b_error
- */
- xfs_buf_relse(bp);
- }
- }
- return (error);
-}
-
-/*
- * helper function to extract extent size hint from inode
- */
-xfs_extlen_t
-xfs_get_extsz_hint(
- struct xfs_inode *ip)
-{
- if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
- return ip->i_d.di_extsize;
- if (XFS_IS_REALTIME_INODE(ip))
- return ip->i_mount->m_sb.sb_rextsize;
- return 0;
-}
diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h
deleted file mode 100644
index bbdb9ad6a4ba..000000000000
--- a/fs/xfs/xfs_rw.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2000-2006 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_RW_H__
-#define __XFS_RW_H__
-
-struct xfs_buf;
-struct xfs_inode;
-struct xfs_mount;
-
-/*
- * Convert the given file system block to a disk block.
- * We have to treat it differently based on whether the
- * file is a real time file or not, because the bmap code
- * does.
- */
-static inline xfs_daddr_t
-xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
-{
- return (XFS_IS_REALTIME_INODE(ip) ? \
- (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
- XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
-}
-
-/*
- * Prototypes for functions in xfs_rw.c.
- */
-extern int xfs_read_buf(struct xfs_mount *mp, xfs_buftarg_t *btp,
- xfs_daddr_t blkno, int len, uint flags,
- struct xfs_buf **bpp);
-extern xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
-
-#endif /* __XFS_RW_H__ */
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index dab9a5f6dfd6..0d9de41a7151 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -17,7 +17,6 @@
*/
#include "xfs.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
@@ -622,7 +621,7 @@ void
xfs_blkdev_issue_flush(
xfs_buftarg_t *buftarg)
{
- blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL);
+ blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
}
STATIC void
@@ -773,8 +772,14 @@ xfs_init_mount_workqueues(
if (!mp->m_unwritten_workqueue)
goto out_destroy_data_iodone_queue;
+ mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
+ WQ_MEM_RECLAIM, 0, mp->m_fsname);
+ if (!mp->m_cil_workqueue)
+ goto out_destroy_unwritten;
return 0;
+out_destroy_unwritten:
+ destroy_workqueue(mp->m_unwritten_workqueue);
out_destroy_data_iodone_queue:
destroy_workqueue(mp->m_data_workqueue);
out:
@@ -785,6 +790,7 @@ STATIC void
xfs_destroy_mount_workqueues(
struct xfs_mount *mp)
{
+ destroy_workqueue(mp->m_cil_workqueue);
destroy_workqueue(mp->m_data_workqueue);
destroy_workqueue(mp->m_unwritten_workqueue);
}
@@ -926,7 +932,7 @@ xfs_fs_evict_inode(
trace_xfs_evict_inode(ip);
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
XFS_STATS_INC(vn_rele);
XFS_STATS_INC(vn_remove);
XFS_STATS_DEC(vn_active);
@@ -981,18 +987,9 @@ xfs_fs_put_super(
{
struct xfs_mount *mp = XFS_M(sb);
- xfs_syncd_stop(mp);
-
- /*
- * Blow away any referenced inode in the filestreams cache.
- * This can and will cause log traffic as inodes go inactive
- * here.
- */
xfs_filestream_unmount(mp);
-
- xfs_flush_buftarg(mp->m_ddev_targp, 1);
-
xfs_unmountfs(mp);
+ xfs_syncd_stop(mp);
xfs_freesb(mp);
xfs_icsb_destroy_counters(mp);
xfs_destroy_mount_workqueues(mp);
@@ -1072,7 +1069,7 @@ xfs_fs_statfs(
spin_unlock(&mp->m_sb_lock);
- if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
+ if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) ==
(XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
xfs_qm_statvfs(ip, statp);
@@ -1362,31 +1359,32 @@ xfs_fs_fill_super(
sb->s_time_gran = 1;
set_posix_acl_flag(sb);
- error = xfs_mountfs(mp);
+ error = xfs_syncd_init(mp);
if (error)
goto out_filestream_unmount;
- error = xfs_syncd_init(mp);
+ error = xfs_mountfs(mp);
if (error)
- goto out_unmount;
+ goto out_syncd_stop;
root = igrab(VFS_I(mp->m_rootip));
if (!root) {
error = ENOENT;
- goto out_syncd_stop;
+ goto out_unmount;
}
if (is_bad_inode(root)) {
error = EINVAL;
- goto out_syncd_stop;
+ goto out_unmount;
}
sb->s_root = d_make_root(root);
if (!sb->s_root) {
error = ENOMEM;
- goto out_syncd_stop;
+ goto out_unmount;
}
return 0;
-
+ out_syncd_stop:
+ xfs_syncd_stop(mp);
out_filestream_unmount:
xfs_filestream_unmount(mp);
out_free_sb:
@@ -1403,19 +1401,10 @@ out_destroy_workqueues:
out:
return -error;
- out_syncd_stop:
- xfs_syncd_stop(mp);
out_unmount:
- /*
- * Blow away any referenced inode in the filestreams cache.
- * This can and will cause log traffic as inodes go inactive
- * here.
- */
xfs_filestream_unmount(mp);
-
- xfs_flush_buftarg(mp->m_ddev_targp, 1);
-
xfs_unmountfs(mp);
+ xfs_syncd_stop(mp);
goto out_free_sb;
}
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index 205ebcb34d9e..c9d3409c5ca3 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -18,7 +18,6 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
@@ -241,45 +240,6 @@ xfs_sync_inode_data(
return error;
}
-STATIC int
-xfs_sync_inode_attr(
- struct xfs_inode *ip,
- struct xfs_perag *pag,
- int flags)
-{
- int error = 0;
-
- xfs_ilock(ip, XFS_ILOCK_SHARED);
- if (xfs_inode_clean(ip))
- goto out_unlock;
- if (!xfs_iflock_nowait(ip)) {
- if (!(flags & SYNC_WAIT))
- goto out_unlock;
- xfs_iflock(ip);
- }
-
- if (xfs_inode_clean(ip)) {
- xfs_ifunlock(ip);
- goto out_unlock;
- }
-
- error = xfs_iflush(ip, flags);
-
- /*
- * We don't want to try again on non-blocking flushes that can't run
- * again immediately. If an inode really must be written, then that's
- * what the SYNC_WAIT flag is for.
- */
- if (error == EAGAIN) {
- ASSERT(!(flags & SYNC_WAIT));
- error = 0;
- }
-
- out_unlock:
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
- return error;
-}
-
/*
* Write out pagecache data for the whole filesystem.
*/
@@ -300,19 +260,6 @@ xfs_sync_data(
return 0;
}
-/*
- * Write out inode metadata (attributes) for the whole filesystem.
- */
-STATIC int
-xfs_sync_attr(
- struct xfs_mount *mp,
- int flags)
-{
- ASSERT((flags & ~SYNC_WAIT) == 0);
-
- return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags);
-}
-
STATIC int
xfs_sync_fsdata(
struct xfs_mount *mp)
@@ -350,7 +297,7 @@ xfs_sync_fsdata(
* First stage of freeze - no writers will make progress now we are here,
* so we flush delwri and delalloc buffers here, then wait for all I/O to
* complete. Data is frozen at that point. Metadata is not frozen,
- * transactions can still occur here so don't bother flushing the buftarg
+ * transactions can still occur here so don't bother emptying the AIL
* because it'll just get dirty again.
*/
int
@@ -365,47 +312,13 @@ xfs_quiesce_data(
/* write superblock and hoover up shutdown errors */
error = xfs_sync_fsdata(mp);
- /* make sure all delwri buffers are written out */
- xfs_flush_buftarg(mp->m_ddev_targp, 1);
-
/* mark the log as covered if needed */
if (xfs_log_need_covered(mp))
error2 = xfs_fs_log_dummy(mp);
- /* flush data-only devices */
- if (mp->m_rtdev_targp)
- xfs_flush_buftarg(mp->m_rtdev_targp, 1);
-
return error ? error : error2;
}
-STATIC void
-xfs_quiesce_fs(
- struct xfs_mount *mp)
-{
- int count = 0, pincount;
-
- xfs_reclaim_inodes(mp, 0);
- xfs_flush_buftarg(mp->m_ddev_targp, 0);
-
- /*
- * This loop must run at least twice. The first instance of the loop
- * will flush most meta data but that will generate more meta data
- * (typically directory updates). Which then must be flushed and
- * logged before we can write the unmount record. We also so sync
- * reclaim of inodes to catch any that the above delwri flush skipped.
- */
- do {
- xfs_reclaim_inodes(mp, SYNC_WAIT);
- xfs_sync_attr(mp, SYNC_WAIT);
- pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
- if (!pincount) {
- delay(50);
- count++;
- }
- } while (count < 2);
-}
-
/*
* Second stage of a quiesce. The data is already synced, now we have to take
* care of the metadata. New transactions are already blocked, so we need to
@@ -421,8 +334,12 @@ xfs_quiesce_attr(
while (atomic_read(&mp->m_active_trans) > 0)
delay(100);
- /* flush inodes and push all remaining buffers out to disk */
- xfs_quiesce_fs(mp);
+ /* reclaim inodes to do any IO before the freeze completes */
+ xfs_reclaim_inodes(mp, 0);
+ xfs_reclaim_inodes(mp, SYNC_WAIT);
+
+ /* flush all pending changes from the AIL */
+ xfs_ail_push_all_sync(mp->m_ail);
/*
* Just warn here till VFS can correctly support
@@ -436,7 +353,12 @@ xfs_quiesce_attr(
xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
"Frozen image may not be consistent.");
xfs_log_unmount_write(mp);
- xfs_unmountfs_writesb(mp);
+
+ /*
+ * At this point we might have modified the superblock again and thus
+ * added an item to the AIL, thus flush it again.
+ */
+ xfs_ail_push_all_sync(mp->m_ail);
}
static void
@@ -460,16 +382,27 @@ xfs_sync_worker(
struct xfs_mount, m_sync_work);
int error;
- if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
- /* dgc: errors ignored here */
- if (mp->m_super->s_frozen == SB_UNFROZEN &&
- xfs_log_need_covered(mp))
- error = xfs_fs_log_dummy(mp);
- else
- xfs_log_force(mp, 0);
-
- /* start pushing all the metadata that is currently dirty */
- xfs_ail_push_all(mp->m_ail);
+ /*
+ * We shouldn't write/force the log if we are in the mount/unmount
+ * process or on a read only filesystem. The workqueue still needs to be
+ * active in both cases, however, because it is used for inode reclaim
+ * during these times. Use the s_umount semaphore to provide exclusion
+ * with unmount.
+ */
+ if (down_read_trylock(&mp->m_super->s_umount)) {
+ if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
+ /* dgc: errors ignored here */
+ if (mp->m_super->s_frozen == SB_UNFROZEN &&
+ xfs_log_need_covered(mp))
+ error = xfs_fs_log_dummy(mp);
+ else
+ xfs_log_force(mp, 0);
+
+ /* start pushing all the metadata that is currently
+ * dirty */
+ xfs_ail_push_all(mp->m_ail);
+ }
+ up_read(&mp->m_super->s_umount);
}
/* queue us up again */
@@ -488,14 +421,6 @@ xfs_syncd_queue_reclaim(
struct xfs_mount *mp)
{
- /*
- * We can have inodes enter reclaim after we've shut down the syncd
- * workqueue during unmount, so don't allow reclaim work to be queued
- * during unmount.
- */
- if (!(mp->m_super->s_flags & MS_ACTIVE))
- return;
-
rcu_read_lock();
if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work,
@@ -564,7 +489,6 @@ xfs_syncd_init(
INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
xfs_syncd_queue_sync(mp);
- xfs_syncd_queue_reclaim(mp);
return 0;
}
@@ -702,11 +626,8 @@ xfs_reclaim_inode_grab(
}
/*
- * Inodes in different states need to be treated differently, and the return
- * value of xfs_iflush is not sufficient to get this right. The following table
- * lists the inode states and the reclaim actions necessary for non-blocking
- * reclaim:
- *
+ * Inodes in different states need to be treated differently. The following
+ * table lists the inode states and the reclaim actions necessary:
*
* inode state iflush ret required action
* --------------- ---------- ---------------
@@ -716,39 +637,31 @@ xfs_reclaim_inode_grab(
* stale, unpinned 0 reclaim
* clean, pinned(*) 0 requeue
* stale, pinned EAGAIN requeue
- * dirty, delwri ok 0 requeue
- * dirty, delwri blocked EAGAIN requeue
- * dirty, sync flush 0 reclaim
+ * dirty, async - requeue
+ * dirty, sync 0 reclaim
*
* (*) dgc: I don't think the clean, pinned state is possible but it gets
* handled anyway given the order of checks implemented.
*
- * As can be seen from the table, the return value of xfs_iflush() is not
- * sufficient to correctly decide the reclaim action here. The checks in
- * xfs_iflush() might look like duplicates, but they are not.
- *
* Also, because we get the flush lock first, we know that any inode that has
* been flushed delwri has had the flush completed by the time we check that
- * the inode is clean. The clean inode check needs to be done before flushing
- * the inode delwri otherwise we would loop forever requeuing clean inodes as
- * we cannot tell apart a successful delwri flush and a clean inode from the
- * return value of xfs_iflush().
+ * the inode is clean.
*
- * Note that because the inode is flushed delayed write by background
- * writeback, the flush lock may already be held here and waiting on it can
- * result in very long latencies. Hence for sync reclaims, where we wait on the
- * flush lock, the caller should push out delayed write inodes first before
- * trying to reclaim them to minimise the amount of time spent waiting. For
- * background relaim, we just requeue the inode for the next pass.
+ * Note that because the inode is flushed delayed write by AIL pushing, the
+ * flush lock may already be held here and waiting on it can result in very
+ * long latencies. Hence for sync reclaims, where we wait on the flush lock,
+ * the caller should push the AIL first before trying to reclaim inodes to
+ * minimise the amount of time spent waiting. For background relaim, we only
+ * bother to reclaim clean inodes anyway.
*
* Hence the order of actions after gaining the locks should be:
* bad => reclaim
* shutdown => unpin and reclaim
- * pinned, delwri => requeue
+ * pinned, async => requeue
* pinned, sync => unpin
* stale => reclaim
* clean => reclaim
- * dirty, delwri => flush and requeue
+ * dirty, async => requeue
* dirty, sync => flush, wait and reclaim
*/
STATIC int
@@ -757,7 +670,8 @@ xfs_reclaim_inode(
struct xfs_perag *pag,
int sync_mode)
{
- int error;
+ struct xfs_buf *bp = NULL;
+ int error;
restart:
error = 0;
@@ -765,17 +679,6 @@ restart:
if (!xfs_iflock_nowait(ip)) {
if (!(sync_mode & SYNC_WAIT))
goto out;
-
- /*
- * If we only have a single dirty inode in a cluster there is
- * a fair chance that the AIL push may have pushed it into
- * the buffer, but xfsbufd won't touch it until 30 seconds
- * from now, and thus we will lock up here.
- *
- * Promote the inode buffer to the front of the delwri list
- * and wake up xfsbufd now.
- */
- xfs_promote_inode(ip);
xfs_iflock(ip);
}
@@ -783,13 +686,12 @@ restart:
goto reclaim;
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
xfs_iunpin_wait(ip);
+ xfs_iflush_abort(ip, false);
goto reclaim;
}
if (xfs_ipincount(ip)) {
- if (!(sync_mode & SYNC_WAIT)) {
- xfs_ifunlock(ip);
- goto out;
- }
+ if (!(sync_mode & SYNC_WAIT))
+ goto out_ifunlock;
xfs_iunpin_wait(ip);
}
if (xfs_iflags_test(ip, XFS_ISTALE))
@@ -798,60 +700,42 @@ restart:
goto reclaim;
/*
+ * Never flush out dirty data during non-blocking reclaim, as it would
+ * just contend with AIL pushing trying to do the same job.
+ */
+ if (!(sync_mode & SYNC_WAIT))
+ goto out_ifunlock;
+
+ /*
* Now we have an inode that needs flushing.
*
- * We do a nonblocking flush here even if we are doing a SYNC_WAIT
- * reclaim as we can deadlock with inode cluster removal.
+ * Note that xfs_iflush will never block on the inode buffer lock, as
* xfs_ifree_cluster() can lock the inode buffer before it locks the
- * ip->i_lock, and we are doing the exact opposite here. As a result,
- * doing a blocking xfs_itobp() to get the cluster buffer will result
+ * ip->i_lock, and we are doing the exact opposite here. As a result,
+ * doing a blocking xfs_itobp() to get the cluster buffer would result
* in an ABBA deadlock with xfs_ifree_cluster().
*
* As xfs_ifree_cluser() must gather all inodes that are active in the
* cache to mark them stale, if we hit this case we don't actually want
* to do IO here - we want the inode marked stale so we can simply
- * reclaim it. Hence if we get an EAGAIN error on a SYNC_WAIT flush,
- * just unlock the inode, back off and try again. Hopefully the next
- * pass through will see the stale flag set on the inode.
+ * reclaim it. Hence if we get an EAGAIN error here, just unlock the
+ * inode, back off and try again. Hopefully the next pass through will
+ * see the stale flag set on the inode.
*/
- error = xfs_iflush(ip, SYNC_TRYLOCK | sync_mode);
- if (sync_mode & SYNC_WAIT) {
- if (error == EAGAIN) {
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- /* backoff longer than in xfs_ifree_cluster */
- delay(2);
- goto restart;
- }
- xfs_iflock(ip);
- goto reclaim;
+ error = xfs_iflush(ip, &bp);
+ if (error == EAGAIN) {
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ /* backoff longer than in xfs_ifree_cluster */
+ delay(2);
+ goto restart;
}
- /*
- * When we have to flush an inode but don't have SYNC_WAIT set, we
- * flush the inode out using a delwri buffer and wait for the next
- * call into reclaim to find it in a clean state instead of waiting for
- * it now. We also don't return errors here - if the error is transient
- * then the next reclaim pass will flush the inode, and if the error
- * is permanent then the next sync reclaim will reclaim the inode and
- * pass on the error.
- */
- if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- xfs_warn(ip->i_mount,
- "inode 0x%llx background reclaim flush failed with %d",
- (long long)ip->i_ino, error);
+ if (!error) {
+ error = xfs_bwrite(bp);
+ xfs_buf_relse(bp);
}
-out:
- xfs_iflags_clear(ip, XFS_IRECLAIM);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- /*
- * We could return EAGAIN here to make reclaim rescan the inode tree in
- * a short while. However, this just burns CPU time scanning the tree
- * waiting for IO to complete and xfssyncd never goes back to the idle
- * state. Instead, return 0 to let the next scheduled background reclaim
- * attempt to reclaim the inode again.
- */
- return 0;
+ xfs_iflock(ip);
reclaim:
xfs_ifunlock(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -884,8 +768,21 @@ reclaim:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_inode_free(ip);
-
return error;
+
+out_ifunlock:
+ xfs_ifunlock(ip);
+out:
+ xfs_iflags_clear(ip, XFS_IRECLAIM);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ /*
+ * We could return EAGAIN here to make reclaim rescan the inode tree in
+ * a short while. However, this just burns CPU time scanning the tree
+ * waiting for IO to complete and xfssyncd never goes back to the idle
+ * state. Instead, return 0 to let the next scheduled background reclaim
+ * attempt to reclaim the inode again.
+ */
+ return 0;
}
/*
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c
index 9010ce885e6a..624bedd81357 100644
--- a/fs/xfs/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -18,9 +18,7 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 06838c42b2a0..7cf9d3529e51 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -281,7 +281,7 @@ DECLARE_EVENT_CLASS(xfs_buf_class,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_daddr_t, bno)
- __field(size_t, buffer_length)
+ __field(int, nblks)
__field(int, hold)
__field(int, pincount)
__field(unsigned, lockval)
@@ -291,18 +291,18 @@ DECLARE_EVENT_CLASS(xfs_buf_class,
TP_fast_assign(
__entry->dev = bp->b_target->bt_dev;
__entry->bno = bp->b_bn;
- __entry->buffer_length = bp->b_buffer_length;
+ __entry->nblks = bp->b_length;
__entry->hold = atomic_read(&bp->b_hold);
__entry->pincount = atomic_read(&bp->b_pin_count);
__entry->lockval = bp->b_sema.count;
__entry->flags = bp->b_flags;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
+ TP_printk("dev %d:%d bno 0x%llx nblks 0x%x hold %d pincount %d "
"lock %d flags %s caller %pf",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->bno,
- __entry->buffer_length,
+ __entry->nblks,
__entry->hold,
__entry->pincount,
__entry->lockval,
@@ -328,7 +328,7 @@ DEFINE_BUF_EVENT(xfs_buf_unlock);
DEFINE_BUF_EVENT(xfs_buf_iowait);
DEFINE_BUF_EVENT(xfs_buf_iowait_done);
DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
-DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue);
+DEFINE_BUF_EVENT(xfs_buf_delwri_queued);
DEFINE_BUF_EVENT(xfs_buf_delwri_split);
DEFINE_BUF_EVENT(xfs_buf_get_uncached);
DEFINE_BUF_EVENT(xfs_bdstrat_shut);
@@ -362,7 +362,7 @@ DECLARE_EVENT_CLASS(xfs_buf_flags_class,
TP_fast_assign(
__entry->dev = bp->b_target->bt_dev;
__entry->bno = bp->b_bn;
- __entry->buffer_length = bp->b_buffer_length;
+ __entry->buffer_length = BBTOB(bp->b_length);
__entry->flags = flags;
__entry->hold = atomic_read(&bp->b_hold);
__entry->pincount = atomic_read(&bp->b_pin_count);
@@ -406,7 +406,7 @@ TRACE_EVENT(xfs_buf_ioerror,
TP_fast_assign(
__entry->dev = bp->b_target->bt_dev;
__entry->bno = bp->b_bn;
- __entry->buffer_length = bp->b_buffer_length;
+ __entry->buffer_length = BBTOB(bp->b_length);
__entry->hold = atomic_read(&bp->b_hold);
__entry->pincount = atomic_read(&bp->b_pin_count);
__entry->lockval = bp->b_sema.count;
@@ -450,7 +450,7 @@ DECLARE_EVENT_CLASS(xfs_buf_item_class,
__entry->bli_recur = bip->bli_recur;
__entry->bli_refcount = atomic_read(&bip->bli_refcount);
__entry->buf_bno = bip->bli_buf->b_bn;
- __entry->buf_len = bip->bli_buf->b_buffer_length;
+ __entry->buf_len = BBTOB(bip->bli_buf->b_length);
__entry->buf_flags = bip->bli_buf->b_flags;
__entry->buf_hold = atomic_read(&bip->bli_buf->b_hold);
__entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count);
@@ -486,12 +486,10 @@ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
-DEFINE_BUF_ITEM_EVENT(xfs_buf_item_trylock);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
-DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pushbuf);
DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur);
DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb);
@@ -876,15 +874,30 @@ DECLARE_EVENT_CLASS(xfs_log_item_class,
__print_flags(__entry->flags, "|", XFS_LI_FLAGS))
)
+TRACE_EVENT(xfs_log_force,
+ TP_PROTO(struct xfs_mount *mp, xfs_lsn_t lsn),
+ TP_ARGS(mp, lsn),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_lsn_t, lsn)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->lsn = lsn;
+ ),
+ TP_printk("dev %d:%d lsn 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->lsn)
+)
+
#define DEFINE_LOG_ITEM_EVENT(name) \
DEFINE_EVENT(xfs_log_item_class, name, \
TP_PROTO(struct xfs_log_item *lip), \
TP_ARGS(lip))
DEFINE_LOG_ITEM_EVENT(xfs_ail_push);
-DEFINE_LOG_ITEM_EVENT(xfs_ail_pushbuf);
-DEFINE_LOG_ITEM_EVENT(xfs_ail_pushbuf_pinned);
DEFINE_LOG_ITEM_EVENT(xfs_ail_pinned);
DEFINE_LOG_ITEM_EVENT(xfs_ail_locked);
+DEFINE_LOG_ITEM_EVENT(xfs_ail_flushing);
DECLARE_EVENT_CLASS(xfs_file_class,
@@ -1145,7 +1158,7 @@ TRACE_EVENT(xfs_bunmap,
);
-DECLARE_EVENT_CLASS(xfs_busy_class,
+DECLARE_EVENT_CLASS(xfs_extent_busy_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t agbno, xfs_extlen_t len),
TP_ARGS(mp, agno, agbno, len),
@@ -1168,17 +1181,17 @@ DECLARE_EVENT_CLASS(xfs_busy_class,
__entry->len)
);
#define DEFINE_BUSY_EVENT(name) \
-DEFINE_EVENT(xfs_busy_class, name, \
+DEFINE_EVENT(xfs_extent_busy_class, name, \
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
xfs_agblock_t agbno, xfs_extlen_t len), \
TP_ARGS(mp, agno, agbno, len))
-DEFINE_BUSY_EVENT(xfs_alloc_busy);
-DEFINE_BUSY_EVENT(xfs_alloc_busy_enomem);
-DEFINE_BUSY_EVENT(xfs_alloc_busy_force);
-DEFINE_BUSY_EVENT(xfs_alloc_busy_reuse);
-DEFINE_BUSY_EVENT(xfs_alloc_busy_clear);
+DEFINE_BUSY_EVENT(xfs_extent_busy);
+DEFINE_BUSY_EVENT(xfs_extent_busy_enomem);
+DEFINE_BUSY_EVENT(xfs_extent_busy_force);
+DEFINE_BUSY_EVENT(xfs_extent_busy_reuse);
+DEFINE_BUSY_EVENT(xfs_extent_busy_clear);
-TRACE_EVENT(xfs_alloc_busy_trim,
+TRACE_EVENT(xfs_extent_busy_trim,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t agbno, xfs_extlen_t len,
xfs_agblock_t tbno, xfs_extlen_t tlen),
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 103b00c90004..cdf896fcbfa4 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -19,9 +19,7 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -36,6 +34,7 @@
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
+#include "xfs_extent_busy.h"
#include "xfs_bmap.h"
#include "xfs_quota.h"
#include "xfs_trans_priv.h"
@@ -608,8 +607,8 @@ STATIC void
xfs_trans_free(
struct xfs_trans *tp)
{
- xfs_alloc_busy_sort(&tp->t_busy);
- xfs_alloc_busy_clear(tp->t_mountp, &tp->t_busy, false);
+ xfs_extent_busy_sort(&tp->t_busy);
+ xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
atomic_dec(&tp->t_mountp->m_active_trans);
xfs_trans_free_dqinfo(tp);
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index f6118703f20d..7ab99e1898c8 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -345,11 +345,9 @@ struct xfs_item_ops {
void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
void (*iop_pin)(xfs_log_item_t *);
void (*iop_unpin)(xfs_log_item_t *, int remove);
- uint (*iop_trylock)(xfs_log_item_t *);
+ uint (*iop_push)(struct xfs_log_item *, struct list_head *);
void (*iop_unlock)(xfs_log_item_t *);
xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
- void (*iop_push)(xfs_log_item_t *);
- bool (*iop_pushbuf)(xfs_log_item_t *);
void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
};
@@ -357,20 +355,18 @@ struct xfs_item_ops {
#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp)
#define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip)
#define IOP_UNPIN(ip, remove) (*(ip)->li_ops->iop_unpin)(ip, remove)
-#define IOP_TRYLOCK(ip) (*(ip)->li_ops->iop_trylock)(ip)
+#define IOP_PUSH(ip, list) (*(ip)->li_ops->iop_push)(ip, list)
#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip)
#define IOP_COMMITTED(ip, lsn) (*(ip)->li_ops->iop_committed)(ip, lsn)
-#define IOP_PUSH(ip) (*(ip)->li_ops->iop_push)(ip)
-#define IOP_PUSHBUF(ip) (*(ip)->li_ops->iop_pushbuf)(ip)
#define IOP_COMMITTING(ip, lsn) (*(ip)->li_ops->iop_committing)(ip, lsn)
/*
- * Return values for the IOP_TRYLOCK() routines.
+ * Return values for the IOP_PUSH() routines.
*/
-#define XFS_ITEM_SUCCESS 0
-#define XFS_ITEM_PINNED 1
-#define XFS_ITEM_LOCKED 2
-#define XFS_ITEM_PUSHBUF 3
+#define XFS_ITEM_SUCCESS 0
+#define XFS_ITEM_PINNED 1
+#define XFS_ITEM_LOCKED 2
+#define XFS_ITEM_FLUSHING 3
/*
* This is the type of function which can be given to xfs_trans_callback()
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 1dead07f092c..9c514483e599 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -20,7 +20,6 @@
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -79,7 +78,7 @@ xfs_ail_check(
* Return a pointer to the first item in the AIL. If the AIL is empty, then
* return NULL.
*/
-static xfs_log_item_t *
+xfs_log_item_t *
xfs_ail_min(
struct xfs_ail *ailp)
{
@@ -364,30 +363,31 @@ xfsaild_push(
xfs_log_item_t *lip;
xfs_lsn_t lsn;
xfs_lsn_t target;
- long tout = 10;
+ long tout;
int stuck = 0;
+ int flushing = 0;
int count = 0;
- int push_xfsbufd = 0;
/*
- * If last time we ran we encountered pinned items, force the log first
- * and wait for it before pushing again.
+ * If we encountered pinned items or did not finish writing out all
+ * buffers the last time we ran, force the log first and wait for it
+ * before pushing again.
*/
- spin_lock(&ailp->xa_lock);
- if (ailp->xa_last_pushed_lsn == 0 && ailp->xa_log_flush &&
- !list_empty(&ailp->xa_ail)) {
+ if (ailp->xa_log_flush && ailp->xa_last_pushed_lsn == 0 &&
+ (!list_empty_careful(&ailp->xa_buf_list) ||
+ xfs_ail_min_lsn(ailp))) {
ailp->xa_log_flush = 0;
- spin_unlock(&ailp->xa_lock);
+
XFS_STATS_INC(xs_push_ail_flush);
xfs_log_force(mp, XFS_LOG_SYNC);
- spin_lock(&ailp->xa_lock);
}
- target = ailp->xa_target;
+ spin_lock(&ailp->xa_lock);
lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn);
- if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
+ if (!lip) {
/*
- * AIL is empty or our push has reached the end.
+ * If the AIL is empty or our push has reached the end we are
+ * done now.
*/
xfs_trans_ail_cursor_done(ailp, &cur);
spin_unlock(&ailp->xa_lock);
@@ -396,54 +396,42 @@ xfsaild_push(
XFS_STATS_INC(xs_push_ail);
- /*
- * While the item we are looking at is below the given threshold
- * try to flush it out. We'd like not to stop until we've at least
- * tried to push on everything in the AIL with an LSN less than
- * the given threshold.
- *
- * However, we will stop after a certain number of pushes and wait
- * for a reduced timeout to fire before pushing further. This
- * prevents use from spinning when we can't do anything or there is
- * lots of contention on the AIL lists.
- */
lsn = lip->li_lsn;
+ target = ailp->xa_target;
while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
int lock_result;
+
/*
- * If we can lock the item without sleeping, unlock the AIL
- * lock and flush the item. Then re-grab the AIL lock so we
- * can look for the next item on the AIL. List changes are
- * handled by the AIL lookup functions internally
- *
- * If we can't lock the item, either its holder will flush it
- * or it is already being flushed or it is being relogged. In
- * any of these case it is being taken care of and we can just
- * skip to the next item in the list.
+ * Note that IOP_PUSH may unlock and reacquire the AIL lock. We
+ * rely on the AIL cursor implementation to be able to deal with
+ * the dropped lock.
*/
- lock_result = IOP_TRYLOCK(lip);
- spin_unlock(&ailp->xa_lock);
+ lock_result = IOP_PUSH(lip, &ailp->xa_buf_list);
switch (lock_result) {
case XFS_ITEM_SUCCESS:
XFS_STATS_INC(xs_push_ail_success);
trace_xfs_ail_push(lip);
- IOP_PUSH(lip);
ailp->xa_last_pushed_lsn = lsn;
break;
- case XFS_ITEM_PUSHBUF:
- XFS_STATS_INC(xs_push_ail_pushbuf);
- trace_xfs_ail_pushbuf(lip);
-
- if (!IOP_PUSHBUF(lip)) {
- trace_xfs_ail_pushbuf_pinned(lip);
- stuck++;
- ailp->xa_log_flush++;
- } else {
- ailp->xa_last_pushed_lsn = lsn;
- }
- push_xfsbufd = 1;
+ case XFS_ITEM_FLUSHING:
+ /*
+ * The item or its backing buffer is already beeing
+ * flushed. The typical reason for that is that an
+ * inode buffer is locked because we already pushed the
+ * updates to it as part of inode clustering.
+ *
+ * We do not want to to stop flushing just because lots
+ * of items are already beeing flushed, but we need to
+ * re-try the flushing relatively soon if most of the
+ * AIL is beeing flushed.
+ */
+ XFS_STATS_INC(xs_push_ail_flushing);
+ trace_xfs_ail_flushing(lip);
+
+ flushing++;
+ ailp->xa_last_pushed_lsn = lsn;
break;
case XFS_ITEM_PINNED:
@@ -453,28 +441,22 @@ xfsaild_push(
stuck++;
ailp->xa_log_flush++;
break;
-
case XFS_ITEM_LOCKED:
XFS_STATS_INC(xs_push_ail_locked);
trace_xfs_ail_locked(lip);
+
stuck++;
break;
-
default:
ASSERT(0);
break;
}
- spin_lock(&ailp->xa_lock);
- /* should we bother continuing? */
- if (XFS_FORCED_SHUTDOWN(mp))
- break;
- ASSERT(mp->m_log);
-
count++;
/*
* Are there too many items we can't do anything with?
+ *
* If we we are skipping too many items because we can't flush
* them or they are already being flushed, we back off and
* given them time to complete whatever operation is being
@@ -496,42 +478,36 @@ xfsaild_push(
xfs_trans_ail_cursor_done(ailp, &cur);
spin_unlock(&ailp->xa_lock);
- if (push_xfsbufd) {
- /* we've got delayed write buffers to flush */
- wake_up_process(mp->m_ddev_targp->bt_task);
- }
+ if (xfs_buf_delwri_submit_nowait(&ailp->xa_buf_list))
+ ailp->xa_log_flush++;
- /* assume we have more work to do in a short while */
+ if (!count || XFS_LSN_CMP(lsn, target) >= 0) {
out_done:
- if (!count) {
- /* We're past our target or empty, so idle */
- ailp->xa_last_pushed_lsn = 0;
- ailp->xa_log_flush = 0;
-
- tout = 50;
- } else if (XFS_LSN_CMP(lsn, target) >= 0) {
/*
- * We reached the target so wait a bit longer for I/O to
- * complete and remove pushed items from the AIL before we
- * start the next scan from the start of the AIL.
+ * We reached the target or the AIL is empty, so wait a bit
+ * longer for I/O to complete and remove pushed items from the
+ * AIL before we start the next scan from the start of the AIL.
*/
tout = 50;
ailp->xa_last_pushed_lsn = 0;
- } else if ((stuck * 100) / count > 90) {
+ } else if (((stuck + flushing) * 100) / count > 90) {
/*
- * Either there is a lot of contention on the AIL or we
- * are stuck due to operations in progress. "Stuck" in this
- * case is defined as >90% of the items we tried to push
- * were stuck.
+ * Either there is a lot of contention on the AIL or we are
+ * stuck due to operations in progress. "Stuck" in this case
+ * is defined as >90% of the items we tried to push were stuck.
*
* Backoff a bit more to allow some I/O to complete before
- * restarting from the start of the AIL. This prevents us
- * from spinning on the same items, and if they are pinned will
- * all the restart to issue a log force to unpin the stuck
- * items.
+ * restarting from the start of the AIL. This prevents us from
+ * spinning on the same items, and if they are pinned will all
+ * the restart to issue a log force to unpin the stuck items.
*/
tout = 20;
ailp->xa_last_pushed_lsn = 0;
+ } else {
+ /*
+ * Assume we have more work to do in a short while.
+ */
+ tout = 10;
}
return tout;
@@ -544,6 +520,8 @@ xfsaild(
struct xfs_ail *ailp = data;
long tout = 0; /* milliseconds */
+ current->flags |= PF_MEMALLOC;
+
while (!kthread_should_stop()) {
if (tout && tout <= 20)
__set_current_state(TASK_KILLABLE);
@@ -611,6 +589,30 @@ xfs_ail_push_all(
}
/*
+ * Push out all items in the AIL immediately and wait until the AIL is empty.
+ */
+void
+xfs_ail_push_all_sync(
+ struct xfs_ail *ailp)
+{
+ struct xfs_log_item *lip;
+ DEFINE_WAIT(wait);
+
+ spin_lock(&ailp->xa_lock);
+ while ((lip = xfs_ail_max(ailp)) != NULL) {
+ prepare_to_wait(&ailp->xa_empty, &wait, TASK_UNINTERRUPTIBLE);
+ ailp->xa_target = lip->li_lsn;
+ wake_up_process(ailp->xa_task);
+ spin_unlock(&ailp->xa_lock);
+ schedule();
+ spin_lock(&ailp->xa_lock);
+ }
+ spin_unlock(&ailp->xa_lock);
+
+ finish_wait(&ailp->xa_empty, &wait);
+}
+
+/*
* xfs_trans_ail_update - bulk AIL insertion operation.
*
* @xfs_trans_ail_update takes an array of log items that all need to be
@@ -667,11 +669,15 @@ xfs_trans_ail_update_bulk(
if (!list_empty(&tmp))
xfs_ail_splice(ailp, cur, &tmp, lsn);
- spin_unlock(&ailp->xa_lock);
- if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
- xlog_assign_tail_lsn(ailp->xa_mount);
+ if (mlip_changed) {
+ if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
+ xlog_assign_tail_lsn_locked(ailp->xa_mount);
+ spin_unlock(&ailp->xa_lock);
+
xfs_log_space_wake(ailp->xa_mount);
+ } else {
+ spin_unlock(&ailp->xa_lock);
}
}
@@ -700,7 +706,8 @@ void
xfs_trans_ail_delete_bulk(
struct xfs_ail *ailp,
struct xfs_log_item **log_items,
- int nr_items) __releases(ailp->xa_lock)
+ int nr_items,
+ int shutdown_type) __releases(ailp->xa_lock)
{
xfs_log_item_t *mlip;
int mlip_changed = 0;
@@ -718,7 +725,7 @@ xfs_trans_ail_delete_bulk(
xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
"%s: attempting to delete a log item that is not in the AIL",
__func__);
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ xfs_force_shutdown(mp, shutdown_type);
}
return;
}
@@ -729,28 +736,20 @@ xfs_trans_ail_delete_bulk(
if (mlip == lip)
mlip_changed = 1;
}
- spin_unlock(&ailp->xa_lock);
- if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
- xlog_assign_tail_lsn(ailp->xa_mount);
+ if (mlip_changed) {
+ if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
+ xlog_assign_tail_lsn_locked(ailp->xa_mount);
+ if (list_empty(&ailp->xa_ail))
+ wake_up_all(&ailp->xa_empty);
+ spin_unlock(&ailp->xa_lock);
+
xfs_log_space_wake(ailp->xa_mount);
+ } else {
+ spin_unlock(&ailp->xa_lock);
}
}
-/*
- * The active item list (AIL) is a doubly linked list of log
- * items sorted by ascending lsn. The base of the list is
- * a forw/back pointer pair embedded in the xfs mount structure.
- * The base is initialized with both pointers pointing to the
- * base. This case always needs to be distinguished, because
- * the base has no lsn to look at. We almost always insert
- * at the end of the list, so on inserts we search from the
- * end of the list to find where the new item belongs.
- */
-
-/*
- * Initialize the doubly linked list to point only to itself.
- */
int
xfs_trans_ail_init(
xfs_mount_t *mp)
@@ -765,6 +764,8 @@ xfs_trans_ail_init(
INIT_LIST_HEAD(&ailp->xa_ail);
INIT_LIST_HEAD(&ailp->xa_cursors);
spin_lock_init(&ailp->xa_lock);
+ INIT_LIST_HEAD(&ailp->xa_buf_list);
+ init_waitqueue_head(&ailp->xa_empty);
ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
ailp->xa_mount->m_fsname);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 1302d1d95a58..21c5a5e3700d 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -18,9 +18,7 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -33,7 +31,6 @@
#include "xfs_buf_item.h"
#include "xfs_trans_priv.h"
#include "xfs_error.h"
-#include "xfs_rw.h"
#include "xfs_trace.h"
/*
@@ -56,7 +53,7 @@ xfs_trans_buf_item_match(
if (blip->bli_item.li_type == XFS_LI_BUF &&
blip->bli_buf->b_target == target &&
XFS_BUF_ADDR(blip->bli_buf) == blkno &&
- XFS_BUF_COUNT(blip->bli_buf) == len)
+ BBTOB(blip->bli_buf->b_length) == len)
return blip->bli_buf;
}
@@ -141,15 +138,11 @@ xfs_trans_get_buf(xfs_trans_t *tp,
xfs_buf_t *bp;
xfs_buf_log_item_t *bip;
- if (flags == 0)
- flags = XBF_LOCK | XBF_MAPPED;
-
/*
* Default to a normal get_buf() call if the tp is NULL.
*/
if (tp == NULL)
- return xfs_buf_get(target_dev, blkno, len,
- flags | XBF_DONT_BLOCK);
+ return xfs_buf_get(target_dev, blkno, len, flags);
/*
* If we find the buffer in the cache with this transaction
@@ -165,14 +158,6 @@ xfs_trans_get_buf(xfs_trans_t *tp,
XFS_BUF_DONE(bp);
}
- /*
- * If the buffer is stale then it was binval'ed
- * since last read. This doesn't matter since the
- * caller isn't allowed to use the data anyway.
- */
- else if (XFS_BUF_ISSTALE(bp))
- ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
-
ASSERT(bp->b_transp == tp);
bip = bp->b_fspriv;
ASSERT(bip != NULL);
@@ -182,15 +167,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
return (bp);
}
- /*
- * We always specify the XBF_DONT_BLOCK flag within a transaction
- * so that get_buf does not try to push out a delayed write buffer
- * which might cause another transaction to take place (if the
- * buffer was delayed alloc). Such recursive transactions can
- * easily deadlock with our current transaction as well as cause
- * us to run out of stack space.
- */
- bp = xfs_buf_get(target_dev, blkno, len, flags | XBF_DONT_BLOCK);
+ bp = xfs_buf_get(target_dev, blkno, len, flags);
if (bp == NULL) {
return NULL;
}
@@ -282,14 +259,13 @@ xfs_trans_read_buf(
xfs_buf_log_item_t *bip;
int error;
- if (flags == 0)
- flags = XBF_LOCK | XBF_MAPPED;
+ *bpp = NULL;
/*
* Default to a normal get_buf() call if the tp is NULL.
*/
if (tp == NULL) {
- bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
+ bp = xfs_buf_read(target, blkno, len, flags);
if (!bp)
return (flags & XBF_TRYLOCK) ?
EAGAIN : XFS_ERROR(ENOMEM);
@@ -297,6 +273,8 @@ xfs_trans_read_buf(
if (bp->b_error) {
error = bp->b_error;
xfs_buf_ioerror_alert(bp, __func__);
+ XFS_BUF_UNDONE(bp);
+ xfs_buf_stale(bp);
xfs_buf_relse(bp);
return error;
}
@@ -371,15 +349,7 @@ xfs_trans_read_buf(
return 0;
}
- /*
- * We always specify the XBF_DONT_BLOCK flag within a transaction
- * so that get_buf does not try to push out a delayed write buffer
- * which might cause another transaction to take place (if the
- * buffer was delayed alloc). Such recursive transactions can
- * easily deadlock with our current transaction as well as cause
- * us to run out of stack space.
- */
- bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
+ bp = xfs_buf_read(target, blkno, len, flags);
if (bp == NULL) {
*bpp = NULL;
return (flags & XBF_TRYLOCK) ?
@@ -418,19 +388,6 @@ xfs_trans_read_buf(
return 0;
shutdown_abort:
- /*
- * the theory here is that buffer is good but we're
- * bailing out because the filesystem is being forcibly
- * shut down. So we should leave the b_flags alone since
- * the buffer's not staled and just get out.
- */
-#if defined(DEBUG)
- if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp))
- xfs_notice(mp, "about to pop assert, bp == 0x%p", bp);
-#endif
- ASSERT((bp->b_flags & (XBF_STALE|XBF_DELWRI)) !=
- (XBF_STALE|XBF_DELWRI));
-
trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
xfs_buf_relse(bp);
*bpp = NULL;
@@ -606,7 +563,7 @@ xfs_trans_log_buf(xfs_trans_t *tp,
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
- ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp)));
+ ASSERT(first <= last && last < BBTOB(bp->b_length));
ASSERT(bp->b_iodone == NULL ||
bp->b_iodone == xfs_buf_iodone_callbacks);
@@ -626,8 +583,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
bp->b_iodone = xfs_buf_iodone_callbacks;
bip->bli_item.li_cb = xfs_buf_iodone;
- xfs_buf_delwri_queue(bp);
-
trace_xfs_trans_log_buf(bip);
/*
@@ -651,22 +606,33 @@ xfs_trans_log_buf(xfs_trans_t *tp,
/*
- * This called to invalidate a buffer that is being used within
- * a transaction. Typically this is because the blocks in the
- * buffer are being freed, so we need to prevent it from being
- * written out when we're done. Allowing it to be written again
- * might overwrite data in the free blocks if they are reallocated
- * to a file.
+ * Invalidate a buffer that is being used within a transaction.
+ *
+ * Typically this is because the blocks in the buffer are being freed, so we
+ * need to prevent it from being written out when we're done. Allowing it
+ * to be written again might overwrite data in the free blocks if they are
+ * reallocated to a file.
+ *
+ * We prevent the buffer from being written out by marking it stale. We can't
+ * get rid of the buf log item at this point because the buffer may still be
+ * pinned by another transaction. If that is the case, then we'll wait until
+ * the buffer is committed to disk for the last time (we can tell by the ref
+ * count) and free it in xfs_buf_item_unpin(). Until that happens we will
+ * keep the buffer locked so that the buffer and buf log item are not reused.
+ *
+ * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log
+ * the buf item. This will be used at recovery time to determine that copies
+ * of the buffer in the log before this should not be replayed.
*
- * We prevent the buffer from being written out by clearing the
- * B_DELWRI flag. We can't always
- * get rid of the buf log item at this point, though, because
- * the buffer may still be pinned by another transaction. If that
- * is the case, then we'll wait until the buffer is committed to
- * disk for the last time (we can tell by the ref count) and
- * free it in xfs_buf_item_unpin(). Until it is cleaned up we
- * will keep the buffer locked so that the buffer and buf log item
- * are not reused.
+ * We mark the item descriptor and the transaction dirty so that we'll hold
+ * the buffer until after the commit.
+ *
+ * Since we're invalidating the buffer, we also clear the state about which
+ * parts of the buffer have been logged. We also clear the flag indicating
+ * that this is an inode buffer since the data in the buffer will no longer
+ * be valid.
+ *
+ * We set the stale bit in the buffer as well since we're getting rid of it.
*/
void
xfs_trans_binval(
@@ -686,7 +652,6 @@ xfs_trans_binval(
* If the buffer is already invalidated, then
* just return.
*/
- ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
ASSERT(XFS_BUF_ISSTALE(bp));
ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_INODE_BUF));
@@ -696,27 +661,8 @@ xfs_trans_binval(
return;
}
- /*
- * Clear the dirty bit in the buffer and set the STALE flag
- * in the buf log item. The STALE flag will be used in
- * xfs_buf_item_unpin() to determine if it should clean up
- * when the last reference to the buf item is given up.
- * We set the XFS_BLF_CANCEL flag in the buf log format structure
- * and log the buf item. This will be used at recovery time
- * to determine that copies of the buffer in the log before
- * this should not be replayed.
- * We mark the item descriptor and the transaction dirty so
- * that we'll hold the buffer until after the commit.
- *
- * Since we're invalidating the buffer, we also clear the state
- * about which parts of the buffer have been logged. We also
- * clear the flag indicating that this is an inode buffer since
- * the data in the buffer will no longer be valid.
- *
- * We set the stale bit in the buffer as well since we're getting
- * rid of it.
- */
xfs_buf_stale(bp);
+
bip->bli_flags |= XFS_BLI_STALE;
bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY);
bip->bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index 279099717ed2..bcb60542fcf1 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -17,9 +17,7 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index f7590f5badea..8d71b16eccae 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -19,7 +19,6 @@
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index 7a7442c03f2b..d2eee20d5f5b 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -18,9 +18,7 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 8ab2ced415f1..fb62377d1cbc 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -71,6 +71,8 @@ struct xfs_ail {
spinlock_t xa_lock;
xfs_lsn_t xa_last_pushed_lsn;
int xa_log_flush;
+ struct list_head xa_buf_list;
+ wait_queue_head_t xa_empty;
};
/*
@@ -90,18 +92,22 @@ xfs_trans_ail_update(
}
void xfs_trans_ail_delete_bulk(struct xfs_ail *ailp,
- struct xfs_log_item **log_items, int nr_items)
+ struct xfs_log_item **log_items, int nr_items,
+ int shutdown_type)
__releases(ailp->xa_lock);
static inline void
xfs_trans_ail_delete(
struct xfs_ail *ailp,
- xfs_log_item_t *lip) __releases(ailp->xa_lock)
+ xfs_log_item_t *lip,
+ int shutdown_type) __releases(ailp->xa_lock)
{
- xfs_trans_ail_delete_bulk(ailp, &lip, 1);
+ xfs_trans_ail_delete_bulk(ailp, &lip, 1, shutdown_type);
}
void xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
void xfs_ail_push_all(struct xfs_ail *);
+void xfs_ail_push_all_sync(struct xfs_ail *);
+struct xfs_log_item *xfs_ail_min(struct xfs_ail *ailp);
xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
struct xfs_log_item * xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/xfs_types.h
index 65584b55607d..398cf681d025 100644
--- a/fs/xfs/xfs_types.h
+++ b/fs/xfs/xfs_types.h
@@ -57,6 +57,7 @@ typedef __uint64_t __psunsigned_t;
#endif /* __KERNEL__ */
typedef __uint32_t xfs_agblock_t; /* blockno in alloc. group */
+typedef __uint32_t xfs_agino_t; /* inode # within allocation grp */
typedef __uint32_t xfs_extlen_t; /* extent length in blocks */
typedef __uint32_t xfs_agnumber_t; /* allocation group number */
typedef __int32_t xfs_extnum_t; /* # of extents in a file */
@@ -101,6 +102,7 @@ typedef __uint64_t xfs_fileoff_t; /* block number in a file */
typedef __int64_t xfs_sfiloff_t; /* signed block number in a file */
typedef __uint64_t xfs_filblks_t; /* number of blocks in a file */
+
/*
* Null values for the types.
*/
@@ -120,6 +122,9 @@ typedef __uint64_t xfs_filblks_t; /* number of blocks in a file */
#define NULLCOMMITLSN ((xfs_lsn_t)-1)
+#define NULLFSINO ((xfs_ino_t)-1)
+#define NULLAGINO ((xfs_agino_t)-1)
+
/*
* Max values for extlen, extnum, aextnum.
*/
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index 79c05ac85bfe..4e5b9ad5cb97 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -18,9 +18,7 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
-#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 64981d7e7375..b6a82d817a82 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -21,7 +21,6 @@
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
-#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
@@ -39,7 +38,6 @@
#include "xfs_bmap.h"
#include "xfs_acl.h"
#include "xfs_attr.h"
-#include "xfs_rw.h"
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_utils.h"
@@ -81,8 +79,7 @@ xfs_readlink_bmap(
d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
- bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt),
- XBF_LOCK | XBF_MAPPED | XBF_DONT_BLOCK);
+ bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0);
if (!bp)
return XFS_ERROR(ENOMEM);
error = bp->b_error;
@@ -1919,7 +1916,7 @@ xfs_alloc_file_space(
error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
xfs_bmap_cancel(&free_list);
- xfs_trans_unreserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
+ xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
error1: /* Just cancel transaction */
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
@@ -1966,7 +1963,7 @@ xfs_zero_remaining_bytes(
bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
mp->m_rtdev_targp : mp->m_ddev_targp,
- mp->m_sb.sb_blocksize, XBF_DONT_BLOCK);
+ BTOBB(mp->m_sb.sb_blocksize), 0);
if (!bp)
return XFS_ERROR(ENOMEM);
@@ -2315,17 +2312,33 @@ xfs_change_file_space(
case XFS_IOC_ALLOCSP64:
case XFS_IOC_FREESP:
case XFS_IOC_FREESP64:
+ /*
+ * These operations actually do IO when extending the file, but
+ * the allocation is done seperately to the zeroing that is
+ * done. This set of operations need to be serialised against
+ * other IO operations, such as truncate and buffered IO. We
+ * need to take the IOLOCK here to serialise the allocation and
+ * zeroing IO to prevent other IOLOCK holders (e.g. getbmap,
+ * truncate, direct IO) from racing against the transient
+ * allocated but not written state we can have here.
+ */
+ xfs_ilock(ip, XFS_IOLOCK_EXCL);
if (startoffset > fsize) {
error = xfs_alloc_file_space(ip, fsize,
- startoffset - fsize, 0, attr_flags);
- if (error)
+ startoffset - fsize, 0,
+ attr_flags | XFS_ATTR_NOLOCK);
+ if (error) {
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
break;
+ }
}
iattr.ia_valid = ATTR_SIZE;
iattr.ia_size = startoffset;
- error = xfs_setattr_size(ip, &iattr, attr_flags);
+ error = xfs_setattr_size(ip, &iattr,
+ attr_flags | XFS_ATTR_NOLOCK);
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
if (error)
return error;
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 53f91b1ae53a..2c85a0f647b7 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -8,6 +8,7 @@ header-y += int-ll64.h
header-y += ioctl.h
header-y += ioctls.h
header-y += ipcbuf.h
+header-y += kvm_para.h
header-y += mman-common.h
header-y += mman.h
header-y += msgbuf.h
diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h
index 4ae54e07de83..a7b0914348fd 100644
--- a/include/asm-generic/bitsperlong.h
+++ b/include/asm-generic/bitsperlong.h
@@ -28,5 +28,9 @@
#error Inconsistent word size. Check asm/bitsperlong.h
#endif
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG 64
+#endif
+
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_BITS_PER_LONG */
diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h
index 85a3ffaa0242..abfb2682de7f 100644
--- a/include/asm-generic/dma-coherent.h
+++ b/include/asm-generic/dma-coherent.h
@@ -3,13 +3,15 @@
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
/*
- * These two functions are only for dma allocator.
+ * These three functions are only for dma allocator.
* Don't use them in device drivers.
*/
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret);
int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, size_t size, int *ret);
/*
* Standard interface
*/
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
new file mode 100644
index 000000000000..c544356b374b
--- /dev/null
+++ b/include/asm-generic/dma-contiguous.h
@@ -0,0 +1,28 @@
+#ifndef ASM_DMA_CONTIGUOUS_H
+#define ASM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_CMA
+
+#include <linux/device.h>
+#include <linux/dma-contiguous.h>
+
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ if (dev && dev->cma_area)
+ return dev->cma_area;
+ return dma_contiguous_default_area;
+}
+
+static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
+{
+ if (dev)
+ dev->cma_area = cma;
+ if (!dev || !dma_contiguous_default_area)
+ dma_contiguous_default_area = cma;
+}
+
+#endif
+#endif
+
+#endif
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 5f52690c3c8f..365ea09ed3b0 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -142,9 +142,9 @@ extern int __must_check gpiochip_reserve(int start, int ngpio);
/* add/remove chips */
extern int gpiochip_add(struct gpio_chip *chip);
extern int __must_check gpiochip_remove(struct gpio_chip *chip);
-extern struct gpio_chip *gpiochip_find(const void *data,
+extern struct gpio_chip *gpiochip_find(void *data,
int (*match)(struct gpio_chip *chip,
- const void *data));
+ void *data));
/* Always use the library code for GPIO management calls,
@@ -179,6 +179,8 @@ extern void gpio_free_array(const struct gpio *array, size_t num);
/* bindings for managed devices that want to request gpios */
int devm_gpio_request(struct device *dev, unsigned gpio, const char *label);
+int devm_gpio_request_one(struct device *dev, unsigned gpio,
+ unsigned long flags, const char *label);
void devm_gpio_free(struct device *dev, unsigned int gpio);
#ifdef CONFIG_GPIO_SYSFS
diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h
new file mode 100644
index 000000000000..5cba37f9eae1
--- /dev/null
+++ b/include/asm-generic/kvm_para.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_GENERIC_KVM_PARA_H
+#define _ASM_GENERIC_KVM_PARA_H
+
+#ifdef __KERNEL__
+
+/*
+ * This function is used by architectures that support kvm to avoid issuing
+ * false soft lockup messages.
+ */
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+#endif /* _KERNEL__ */
+
+#endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 125c54e98517..6f2b45a9b6bc 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -158,9 +158,8 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
#endif
#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
-extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp);
+extern void pmdp_splitting_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
#endif
#ifndef __HAVE_ARCH_PTE_SAME
@@ -446,6 +445,18 @@ static inline int pmd_write(pmd_t pmd)
#endif /* __HAVE_ARCH_PMD_WRITE */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#ifndef pmd_read_atomic
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+ /*
+ * Depend on compiler for an atomic pmd read. NOTE: this is
+ * only going to work, if the pmdval_t isn't larger than
+ * an unsigned long.
+ */
+ return *pmdp;
+}
+#endif
+
/*
* This function is meant to be used by sites walking pagetables with
* the mmap_sem hold in read mode to protect against MADV_DONTNEED and
@@ -459,11 +470,17 @@ static inline int pmd_write(pmd_t pmd)
* undefined so behaving like if the pmd was none is safe (because it
* can return none anyway). The compiler level barrier() is critically
* important to compute the two checks atomically on the same pmdval.
+ *
+ * For 32bit kernels with a 64bit large pmd_t this automatically takes
+ * care of reading the pmd atomically to avoid SMP race conditions
+ * against pmd_populate() when the mmap_sem is hold for reading by the
+ * caller (a special atomic read not done by "gcc" as in the generic
+ * version above, is also needed when THP is disabled because the page
+ * fault can populate the pmd from under us).
*/
static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
{
- /* depend on compiler for an atomic pmd read */
- pmd_t pmdval = *pmd;
+ pmd_t pmdval = pmd_read_atomic(pmd);
/*
* The barrier will stabilize the pmdval in a register or on
* the stack so that it will stop changing under the code.
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
new file mode 100644
index 000000000000..3f21f1b72e45
--- /dev/null
+++ b/include/asm-generic/word-at-a-time.h
@@ -0,0 +1,52 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+/*
+ * This says "generic", but it's actually big-endian only.
+ * Little-endian can use more efficient versions of these
+ * interfaces, see for example
+ * arch/x86/include/asm/word-at-a-time.h
+ * for those.
+ */
+
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+ const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+ unsigned long mask = (val & c->low_bits) + c->low_bits;
+ return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+ long byte = 0;
+#ifdef CONFIG_64BIT
+ if (mask >> 32)
+ mask >>= 32;
+ else
+ byte = 4;
+#endif
+ if (mask >> 16)
+ mask >>= 16;
+ else
+ byte += 2;
+ return (mask >> 8) ? byte : byte + 1;
+}
+
+static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+ unsigned long rhs = val | c->low_bits;
+ *data = rhs;
+ return (val + c->high_bits) & ~rhs;
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 64ff02d5b730..e51035a3757f 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -730,6 +730,8 @@ struct drm_prime_handle {
#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
+#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
+#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
/**
* Device specific ioctls should only be in their respective headers
@@ -775,6 +777,10 @@ struct drm_event_vblank {
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+#define DRM_CAP_PRIME 0x5
+
+#define DRM_PRIME_CAP_IMPORT 0x1
+#define DRM_PRIME_CAP_EXPORT 0x2
/* typedef area */
#ifndef __KERNEL__
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index dd731043fecd..31ad880ca2ef 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -755,11 +755,11 @@ struct drm_driver {
* @dev: DRM device
* @crtc: counter to fetch
*
- * Driver callback for fetching a raw hardware vblank counter
- * for @crtc. If a device doesn't have a hardware counter, the
- * driver can simply return the value of drm_vblank_count and
- * make the enable_vblank() and disable_vblank() hooks into no-ops,
- * leaving interrupts enabled at all times.
+ * Driver callback for fetching a raw hardware vblank counter for @crtc.
+ * If a device doesn't have a hardware counter, the driver can simply
+ * return the value of drm_vblank_count. The DRM core will account for
+ * missed vblank events while interrupts where disabled based on system
+ * timestamps.
*
* Wraparound handling and loss of events due to modesetting is dealt
* with in the DRM core code.
@@ -941,7 +941,7 @@ struct drm_driver {
uint32_t handle);
/* Driver private ops for this object */
- struct vm_operations_struct *gem_vm_ops;
+ const struct vm_operations_struct *gem_vm_ops;
int major;
int minor;
@@ -1309,8 +1309,8 @@ extern int drm_release(struct inode *inode, struct file *filp);
/* Mapping support (drm_vm.h) */
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
-extern void drm_vm_open_locked(struct vm_area_struct *vma);
-extern void drm_vm_close_locked(struct vm_area_struct *vma);
+extern void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
+extern void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
/* Memory management support (drm_memory.h) */
@@ -1378,6 +1378,7 @@ extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
/* Cache management (drm_cache.c) */
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
+void drm_clflush_virt_range(char *addr, unsigned long length);
/* Locking IOCTL support (drm_lock.h) */
extern int drm_lock(struct drm_device *dev, void *data,
@@ -1557,6 +1558,8 @@ extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
extern int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
+ dma_addr_t *addrs, int max_pages);
extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index e250eda4e3a8..73e45600f95d 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -36,6 +36,7 @@
struct drm_device;
struct drm_mode_set;
struct drm_framebuffer;
+struct drm_object_properties;
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
@@ -50,6 +51,14 @@ struct drm_framebuffer;
struct drm_mode_object {
uint32_t id;
uint32_t type;
+ struct drm_object_properties *properties;
+};
+
+#define DRM_OBJECT_MAX_PROPERTY 16
+struct drm_object_properties {
+ int count;
+ uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
+ uint64_t values[DRM_OBJECT_MAX_PROPERTY];
};
/*
@@ -285,19 +294,16 @@ struct drm_plane;
/**
* drm_crtc_funcs - control CRTCs for a given device
- * @reset: reset CRTC after state has been invalidate (e.g. resume)
- * @dpms: control display power levels
* @save: save CRTC state
- * @resore: restore CRTC state
- * @lock: lock the CRTC
- * @unlock: unlock the CRTC
- * @shadow_allocate: allocate shadow pixmap
- * @shadow_create: create shadow pixmap for rotation support
- * @shadow_destroy: free shadow pixmap
- * @mode_fixup: fixup proposed mode
- * @mode_set: set the desired mode on the CRTC
+ * @restore: restore CRTC state
+ * @reset: reset CRTC after state has been invalidate (e.g. resume)
+ * @cursor_set: setup the cursor
+ * @cursor_move: move the cursor
* @gamma_set: specify color ramp for CRTC
- * @destroy: deinit and free object.
+ * @destroy: deinit and free object
+ * @set_property: called when a property is changed
+ * @set_config: apply a new CRTC configuration
+ * @page_flip: initiate a page flip
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
@@ -341,6 +347,9 @@ struct drm_crtc_funcs {
int (*page_flip)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
+
+ int (*set_property)(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val);
};
/**
@@ -360,6 +369,7 @@ struct drm_crtc_funcs {
* @framedur_ns: precise line timing
* @pixeldur_ns: precise pixel timing
* @helper_private: mid-layer private data
+ * @properties: property tracking for this CRTC
*
* Each CRTC may have one or more connectors associated with it. This structure
* allows the CRTC to be controlled.
@@ -395,6 +405,8 @@ struct drm_crtc {
/* if you are using the helper */
void *helper_private;
+
+ struct drm_object_properties properties;
};
@@ -404,11 +416,8 @@ struct drm_crtc {
* @save: save connector state
* @restore: restore connector state
* @reset: reset connector after state has been invalidate (e.g. resume)
- * @mode_valid: is this mode valid on the given connector?
- * @mode_fixup: try to fixup proposed mode for this connector
- * @mode_set: set this mode
* @detect: is this connector active?
- * @get_modes: get mode list for this connector
+ * @fill_modes: fill mode list for this connector
* @set_property: property for this connector may need update
* @destroy: make object go away
* @force: notify the driver the connector is forced on
@@ -451,7 +460,6 @@ struct drm_encoder_funcs {
};
#define DRM_CONNECTOR_MAX_UMODES 16
-#define DRM_CONNECTOR_MAX_PROPERTY 16
#define DRM_CONNECTOR_LEN 32
#define DRM_CONNECTOR_MAX_ENCODER 3
@@ -520,8 +528,7 @@ enum drm_connector_force {
* @funcs: connector control functions
* @user_modes: user added mode list
* @edid_blob_ptr: DRM property containing EDID if present
- * @property_ids: property tracking for this connector
- * @property_values: value pointers or data for properties
+ * @properties: property tracking for this connector
* @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
* @dpms: current dpms state
* @helper_private: mid-layer private data
@@ -565,8 +572,7 @@ struct drm_connector {
struct list_head user_modes;
struct drm_property_blob *edid_blob_ptr;
- u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY];
- uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY];
+ struct drm_object_properties properties;
uint8_t polled; /* DRM_CONNECTOR_POLL_* */
@@ -595,6 +601,7 @@ struct drm_connector {
* @update_plane: update the plane configuration
* @disable_plane: shut down the plane
* @destroy: clean up plane resources
+ * @set_property: called when a property is changed
*/
struct drm_plane_funcs {
int (*update_plane)(struct drm_plane *plane,
@@ -605,6 +612,9 @@ struct drm_plane_funcs {
uint32_t src_w, uint32_t src_h);
int (*disable_plane)(struct drm_plane *plane);
void (*destroy)(struct drm_plane *plane);
+
+ int (*set_property)(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val);
};
/**
@@ -622,6 +632,7 @@ struct drm_plane_funcs {
* @enabled: enabled flag
* @funcs: helper functions
* @helper_private: storage for drver layer
+ * @properties: property tracking for this plane
*/
struct drm_plane {
struct drm_device *dev;
@@ -644,6 +655,8 @@ struct drm_plane {
const struct drm_plane_funcs *funcs;
void *helper_private;
+
+ struct drm_object_properties properties;
};
/**
@@ -761,7 +774,7 @@ struct drm_mode_config {
int min_width, min_height;
int max_width, max_height;
- struct drm_mode_config_funcs *funcs;
+ const struct drm_mode_config_funcs *funcs;
resource_size_t fb_base;
/* output poll support */
@@ -898,6 +911,12 @@ extern int drm_connector_property_set_value(struct drm_connector *connector,
extern int drm_connector_property_get_value(struct drm_connector *connector,
struct drm_property *property,
uint64_t *value);
+extern int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t val);
+extern int drm_object_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t *value);
extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
extern void drm_framebuffer_set_object(struct drm_device *dev,
unsigned long handle);
@@ -910,14 +929,21 @@ extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
extern bool drm_crtc_in_use(struct drm_crtc *crtc);
-extern int drm_connector_attach_property(struct drm_connector *connector,
- struct drm_property *property, uint64_t init_val);
+extern void drm_connector_attach_property(struct drm_connector *connector,
+ struct drm_property *property, uint64_t init_val);
+extern void drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val);
extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
const char *name, int num_values);
extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
const char *name,
const struct drm_prop_enum_list *props,
int num_values);
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+ int flags, const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values);
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max);
@@ -1012,10 +1038,11 @@ extern int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
extern int drm_edid_header_is_valid(const u8 *raw_edid);
-extern bool drm_edid_block_valid(u8 *raw_edid);
+extern bool drm_edid_block_valid(u8 *raw_edid, int block);
extern bool drm_edid_is_valid(struct edid *edid);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
- int hsize, int vsize, int fresh);
+ int hsize, int vsize, int fresh,
+ bool rb);
extern int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
@@ -1023,7 +1050,16 @@ extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp);
+extern int drm_format_num_planes(uint32_t format);
+extern int drm_format_plane_cpp(uint32_t format, int plane);
+extern int drm_format_horz_chroma_subsampling(uint32_t format);
+extern int drm_format_vert_chroma_subsampling(uint32_t format);
+
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 37515d1afab3..7988e55c98d0 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -44,6 +44,13 @@ enum mode_set_atomic {
ENTER_ATOMIC_MODE_SET,
};
+/**
+ * drm_crtc_helper_funcs - helper operations for CRTCs
+ * @mode_fixup: try to fixup proposed mode for this connector
+ * @mode_set: set this mode
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
struct drm_crtc_helper_funcs {
/*
* Control power levels on the CRTC. If the mode passed in is
@@ -76,6 +83,13 @@ struct drm_crtc_helper_funcs {
void (*disable)(struct drm_crtc *crtc);
};
+/**
+ * drm_encoder_helper_funcs - helper operations for encoders
+ * @mode_fixup: try to fixup proposed mode for this connector
+ * @mode_set: set this mode
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
struct drm_encoder_helper_funcs {
void (*dpms)(struct drm_encoder *encoder, int mode);
void (*save)(struct drm_encoder *encoder);
@@ -97,6 +111,13 @@ struct drm_encoder_helper_funcs {
void (*disable)(struct drm_encoder *encoder);
};
+/**
+ * drm_connector_helper_funcs - helper operations for connectors
+ * @get_modes: get mode list for this connector
+ * @mode_valid: is this mode valid on the given connector?
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
struct drm_connector_helper_funcs {
int (*get_modes)(struct drm_connector *connector);
int (*mode_valid)(struct drm_connector *connector,
@@ -145,6 +166,4 @@ extern void drm_helper_hpd_irq_event(struct drm_device *dev);
extern void drm_kms_helper_poll_disable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable(struct drm_device *dev);
-extern int drm_format_num_planes(uint32_t format);
-
#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 93df2d72750b..1744b18c06b3 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -72,6 +72,10 @@
#define DP_MAIN_LINK_CHANNEL_CODING 0x006
+#define DP_DOWN_STREAM_PORT_COUNT 0x007
+#define DP_PORT_COUNT_MASK 0x0f
+#define DP_OUI_SUPPORT (1 << 7)
+
#define DP_EDP_CONFIGURATION_CAP 0x00d
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e
@@ -213,6 +217,10 @@
# define DP_TEST_NAK (1 << 1)
# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
+#define DP_SOURCE_OUI 0x300
+#define DP_SINK_OUI 0x400
+#define DP_BRANCH_OUI 0x500
+
#define DP_SET_POWER 0x600
# define DP_SET_POWER_D0 0x1
# define DP_SET_POWER_D3 0x2
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index bcb9a66baa8c..0cac551c5347 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -90,12 +90,26 @@ struct detailed_data_monitor_range {
u8 min_hfreq_khz;
u8 max_hfreq_khz;
u8 pixel_clock_mhz; /* need to multiply by 10 */
- __le16 sec_gtf_toggle; /* A000=use above, 20=use below */
- u8 hfreq_start_khz; /* need to multiply by 2 */
- u8 c; /* need to divide by 2 */
- __le16 m;
- u8 k;
- u8 j; /* need to divide by 2 */
+ u8 flags;
+ union {
+ struct {
+ u8 reserved;
+ u8 hfreq_start_khz; /* need to multiply by 2 */
+ u8 c; /* need to divide by 2 */
+ __le16 m;
+ u8 k;
+ u8 j; /* need to divide by 2 */
+ } __attribute__((packed)) gtf2;
+ struct {
+ u8 version;
+ u8 data1; /* high 6 bits: extra clock resolution */
+ u8 data2; /* plus low 2 of above: max hactive */
+ u8 supported_aspects;
+ u8 flags; /* preferred aspect and blanking support */
+ u8 supported_scalings;
+ u8 preferred_refresh;
+ } __attribute__((packed)) cvt;
+ } formula;
} __attribute__((packed));
struct detailed_data_wpindex {
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index 4a08a664ff1f..0ead502e17d2 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -37,6 +37,7 @@ typedef union dfixed {
#define dfixed_init(A) { .full = dfixed_const((A)) }
#define dfixed_init_half(A) { .full = dfixed_const_half((A)) }
#define dfixed_trunc(A) ((A).full >> 12)
+#define dfixed_frac(A) ((A).full & ((1 << 12) - 1))
static inline u32 dfixed_floor(fixed20_12 A)
{
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
index 6bd325fedc87..19a240446fca 100644
--- a/include/drm/drm_mem_util.h
+++ b/include/drm/drm_mem_util.h
@@ -31,7 +31,7 @@
static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
{
- if (size != 0 && nmemb > ULONG_MAX / size)
+ if (size != 0 && nmemb > SIZE_MAX / size)
return NULL;
if (size * nmemb <= PAGE_SIZE)
@@ -44,7 +44,7 @@ static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
{
- if (size != 0 && nmemb > ULONG_MAX / size)
+ if (size != 0 && nmemb > SIZE_MAX / size)
return NULL;
if (size * nmemb <= PAGE_SIZE)
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 4a0aae38e160..5581980b14f6 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -230,6 +230,7 @@ struct drm_mode_get_connector {
#define DRM_MODE_PROP_IMMUTABLE (1<<2)
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
#define DRM_MODE_PROP_BLOB (1<<4)
+#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
struct drm_mode_property_enum {
__u64 value;
@@ -254,6 +255,21 @@ struct drm_mode_connector_set_property {
__u32 connector_id;
};
+struct drm_mode_obj_get_properties {
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+ __u32 count_props;
+ __u32 obj_id;
+ __u32 obj_type;
+};
+
+struct drm_mode_obj_set_property {
+ __u64 value;
+ __u32 prop_id;
+ __u32 obj_id;
+ __u32 obj_type;
+};
+
struct drm_mode_get_blob {
__u32 blob_id;
__u32 length;
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index e478de4e5d56..b6d7ce92eadd 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -29,6 +29,8 @@
#ifndef _EXYNOS_DRM_H_
#define _EXYNOS_DRM_H_
+#include "drm.h"
+
/**
* User-desired buffer creation information structure.
*
@@ -75,6 +77,21 @@ struct drm_exynos_gem_mmap {
};
/**
+ * A structure to gem information.
+ *
+ * @handle: a handle to gem object created.
+ * @flags: flag value including memory type and cache attribute and
+ * this value would be set by driver.
+ * @size: size to memory region allocated by gem and this size would
+ * be set by driver.
+ */
+struct drm_exynos_gem_info {
+ unsigned int handle;
+ unsigned int flags;
+ uint64_t size;
+};
+
+/**
* A structure for user connection request of virtual display.
*
* @connection: indicate whether doing connetion or not by user.
@@ -95,18 +112,64 @@ struct drm_exynos_plane_set_zpos {
/* memory type definitions. */
enum e_drm_exynos_gem_mem_type {
+ /* Physically Continuous memory and used as default. */
+ EXYNOS_BO_CONTIG = 0 << 0,
/* Physically Non-Continuous memory. */
EXYNOS_BO_NONCONTIG = 1 << 0,
- EXYNOS_BO_MASK = EXYNOS_BO_NONCONTIG
+ /* non-cachable mapping and used as default. */
+ EXYNOS_BO_NONCACHABLE = 0 << 1,
+ /* cachable mapping. */
+ EXYNOS_BO_CACHABLE = 1 << 1,
+ /* write-combine mapping. */
+ EXYNOS_BO_WC = 1 << 2,
+ EXYNOS_BO_MASK = EXYNOS_BO_NONCONTIG | EXYNOS_BO_CACHABLE |
+ EXYNOS_BO_WC
+};
+
+struct drm_exynos_g2d_get_ver {
+ __u32 major;
+ __u32 minor;
+};
+
+struct drm_exynos_g2d_cmd {
+ __u32 offset;
+ __u32 data;
+};
+
+enum drm_exynos_g2d_event_type {
+ G2D_EVENT_NOT,
+ G2D_EVENT_NONSTOP,
+ G2D_EVENT_STOP, /* not yet */
+};
+
+struct drm_exynos_g2d_set_cmdlist {
+ __u64 cmd;
+ __u64 cmd_gem;
+ __u32 cmd_nr;
+ __u32 cmd_gem_nr;
+
+ /* for g2d event */
+ __u64 event_type;
+ __u64 user_data;
+};
+
+struct drm_exynos_g2d_exec {
+ __u64 async;
};
#define DRM_EXYNOS_GEM_CREATE 0x00
#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01
#define DRM_EXYNOS_GEM_MMAP 0x02
/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
+#define DRM_EXYNOS_GEM_GET 0x04
#define DRM_EXYNOS_PLANE_SET_ZPOS 0x06
#define DRM_EXYNOS_VIDI_CONNECTION 0x07
+/* G2D */
+#define DRM_EXYNOS_G2D_GET_VER 0x20
+#define DRM_EXYNOS_G2D_SET_CMDLIST 0x21
+#define DRM_EXYNOS_G2D_EXEC 0x22
+
#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
@@ -116,12 +179,34 @@ enum e_drm_exynos_gem_mem_type {
#define DRM_IOCTL_EXYNOS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap)
+#define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info)
+
#define DRM_IOCTL_EXYNOS_PLANE_SET_ZPOS DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_PLANE_SET_ZPOS, struct drm_exynos_plane_set_zpos)
#define DRM_IOCTL_EXYNOS_VIDI_CONNECTION DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection)
+#define DRM_IOCTL_EXYNOS_G2D_GET_VER DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_G2D_GET_VER, struct drm_exynos_g2d_get_ver)
+#define DRM_IOCTL_EXYNOS_G2D_SET_CMDLIST DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_G2D_SET_CMDLIST, struct drm_exynos_g2d_set_cmdlist)
+#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
+
+/* EXYNOS specific events */
+#define DRM_EXYNOS_G2D_EVENT 0x80000000
+
+struct drm_exynos_g2d_event {
+ struct drm_event base;
+ __u64 user_data;
+ __u32 tv_sec;
+ __u32 tv_usec;
+ __u32 cmdlist_no;
+ __u32 reserved;
+};
+
#ifdef __KERNEL__
/**
@@ -169,16 +254,14 @@ struct exynos_drm_common_hdmi_pd {
/**
* Platform Specific Structure for DRM based HDMI core.
*
- * @timing: default video mode for initializing
- * @default_win: default window layer number to be used for UI.
- * @bpp: default bit per pixel.
* @is_v13: set if hdmi version 13 is.
+ * @cfg_hpd: function pointer to configure hdmi hotplug detection pin
+ * @get_hpd: function pointer to get value of hdmi hotplug detection pin
*/
struct exynos_drm_hdmi_pdata {
- struct fb_videomode timing;
- unsigned int default_win;
- unsigned int bpp;
- unsigned int is_v13:1;
+ bool is_v13;
+ void (*cfg_hpd)(bool external);
+ int (*get_hpd)(void);
};
#endif /* __KERNEL__ */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index da929bb5b788..f3f82242bf1d 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -296,7 +296,8 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_CONSTANTS 14
#define I915_PARAM_HAS_RELAXED_DELTA 15
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
-#define I915_PARAM_HAS_LLC 17
+#define I915_PARAM_HAS_LLC 17
+#define I915_PARAM_HAS_ALIASING_PPGTT 18
typedef struct drm_i915_getparam {
int param;
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 7c491b4bcf65..58056865b8e9 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -926,7 +926,6 @@ struct drm_radeon_cs_chunk {
};
/* drm_radeon_cs_reloc.flags */
-#define RADEON_RELOC_DONT_SYNC 0x01
struct drm_radeon_cs_reloc {
uint32_t handle;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 974c8f801c39..e15f2a89a270 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -124,11 +124,15 @@ struct ttm_mem_reg {
*
* @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
* but they cannot be accessed from user-space. For kernel-only use.
+ *
+ * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
+ * driver.
*/
enum ttm_bo_type {
ttm_bo_type_device,
- ttm_bo_type_kernel
+ ttm_bo_type_kernel,
+ ttm_bo_type_sg
};
struct ttm_tt;
@@ -271,6 +275,8 @@ struct ttm_buffer_object {
unsigned long offset;
uint32_t cur_placement;
+
+ struct sg_table *sg;
};
/**
@@ -503,6 +509,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
bool interrubtible,
struct file *persistent_swap_storage,
size_t acc_size,
+ struct sg_table *sg,
void (*destroy) (struct ttm_buffer_object *));
/**
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index d43e892307ff..a05f1b55714d 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -81,6 +81,7 @@ struct ttm_backend_func {
#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
#define TTM_PAGE_FLAG_DMA32 (1 << 7)
+#define TTM_PAGE_FLAG_SG (1 << 8)
enum ttm_caching_state {
tt_uncached,
@@ -116,6 +117,7 @@ struct ttm_tt {
struct page **pages;
uint32_t page_flags;
unsigned long num_pages;
+ struct sg_table *sg; /* for SG objects via dma-buf */
struct ttm_bo_global *glob;
struct ttm_backend *be;
struct file *swap_storage;
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 39737839ce29..8760be30b375 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -225,6 +225,8 @@ header-y += kd.h
header-y += kdev_t.h
header-y += kernel.h
header-y += kernelcapi.h
+header-y += kernel-page-flags.h
+header-y += kexec.h
header-y += keyboard.h
header-y += keyctl.h
header-y += l2tp.h
@@ -270,6 +272,7 @@ header-y += netfilter_ipv4.h
header-y += netfilter_ipv6.h
header-y += netlink.h
header-y += netrom.h
+header-y += nfc.h
header-y += nfs.h
header-y += nfs2.h
header-y += nfs3.h
@@ -383,6 +386,7 @@ header-y += utime.h
header-y += utsname.h
header-y += uuid.h
header-y += uvcvideo.h
+header-y += v4l2-dv-timings.h
header-y += v4l2-mediabus.h
header-y += v4l2-subdev.h
header-y += veth.h
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index e64ce2cfee99..02549017212a 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -92,6 +92,8 @@ struct pl08x_bus_data {
* right now
* @serving: the virtual channel currently being served by this physical
* channel
+ * @locked: channel unavailable for the system, e.g. dedicated to secure
+ * world
*/
struct pl08x_phy_chan {
unsigned int id;
@@ -99,6 +101,7 @@ struct pl08x_phy_chan {
spinlock_t lock;
int signal;
struct pl08x_dma_chan *serving;
+ bool locked;
};
/**
diff --git a/include/linux/apple_bl.h b/include/linux/apple_bl.h
index 47bedc0eee69..0a95e730fcea 100644
--- a/include/linux/apple_bl.h
+++ b/include/linux/apple_bl.h
@@ -5,7 +5,7 @@
#ifndef _LINUX_APPLE_BL_H
#define _LINUX_APPLE_BL_H
-#ifdef CONFIG_BACKLIGHT_APPLE
+#if defined(CONFIG_BACKLIGHT_APPLE) || defined(CONFIG_BACKLIGHT_APPLE_MODULE)
extern int apple_bl_register(void);
extern void apple_bl_unregister(void);
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index feb912196745..1c504ca5bdb3 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -67,6 +67,10 @@ int bgpio_remove(struct bgpio_chip *bgc);
int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
unsigned long sz, void __iomem *dat, void __iomem *set,
void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
- bool big_endian);
+ unsigned long flags);
+
+#define BGPIOF_BIG_ENDIAN BIT(0)
+#define BGPIOF_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */
+#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
#endif /* __BASIC_MMIO_GPIO_H */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 98bb2901d7b7..8deaf6d050c3 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -26,6 +26,11 @@ struct bcma_chipinfo {
u8 pkg;
};
+struct bcma_boardinfo {
+ u16 vendor;
+ u16 type;
+};
+
enum bcma_clkmode {
BCMA_CLKMODE_FAST,
BCMA_CLKMODE_DYNAMIC,
@@ -199,6 +204,8 @@ struct bcma_bus {
struct bcma_chipinfo chipinfo;
+ struct bcma_boardinfo boardinfo;
+
struct bcma_device *mapped_core;
struct list_head cores;
u8 nr_cores;
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 46c71e27d31f..41da581e1612 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -87,6 +87,13 @@ struct pci_dev;
#define BCMA_CORE_PCI_PCICFG2 0x0600 /* PCI config space 2 (rev >= 8) */
#define BCMA_CORE_PCI_PCICFG3 0x0700 /* PCI config space 3 (rev >= 8) */
#define BCMA_CORE_PCI_SPROM(wordoffset) (0x0800 + ((wordoffset) * 2)) /* SPROM shadow area (72 bytes) */
+#define BCMA_CORE_PCI_SPROM_PI_OFFSET 0 /* first word */
+#define BCMA_CORE_PCI_SPROM_PI_MASK 0xf000 /* bit 15:12 */
+#define BCMA_CORE_PCI_SPROM_PI_SHIFT 12 /* bit 15:12 */
+#define BCMA_CORE_PCI_SPROM_MISC_CONFIG 5 /* word 5 */
+#define BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST 0x8000 /* bit 15 */
+#define BCMA_CORE_PCI_SPROM_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */
+#define BCMA_CORE_PCI_SPROM_CLKREQ_ENB 0x0800 /* bit 11 */
/* SBtoPCIx */
#define BCMA_CORE_PCI_SBTOPCI_MEM 0x00000000
@@ -133,6 +140,7 @@ struct pci_dev;
#define BCMA_CORE_PCI_DLLP_LRREG 0x120 /* Link Replay */
#define BCMA_CORE_PCI_DLLP_LACKTOREG 0x124 /* Link Ack Timeout */
#define BCMA_CORE_PCI_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */
+#define BCMA_CORE_PCI_ASPMTIMER_EXTEND 0x01000000 /* > rev7: enable extend ASPM timer */
#define BCMA_CORE_PCI_DLLP_RTRYWPREG 0x12C /* Retry buffer write ptr */
#define BCMA_CORE_PCI_DLLP_RTRYRPREG 0x130 /* Retry buffer Read ptr */
#define BCMA_CORE_PCI_DLLP_RTRYPPREG 0x134 /* Retry buffer Purged ptr */
@@ -201,12 +209,15 @@ struct bcma_drv_pci {
};
/* Register access */
+#define pcicore_read16(pc, offset) bcma_read16((pc)->core, offset)
#define pcicore_read32(pc, offset) bcma_read32((pc)->core, offset)
+#define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val)
#define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val)
extern void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc);
extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
struct bcma_device *core, bool enable);
+extern void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend);
extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 4d94eb8bcbcc..26435890dc87 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -269,6 +269,14 @@ extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set
extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx);
+#ifdef CONFIG_BLK_CGROUP
+int bio_associate_current(struct bio *bio);
+void bio_disassociate_task(struct bio *bio);
+#else /* CONFIG_BLK_CGROUP */
+static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
+static inline void bio_disassociate_task(struct bio *bio) { }
+#endif /* CONFIG_BLK_CGROUP */
+
/*
* bio_set is used to allow other portions of the IO system to
* allocate their own private memory pools for bio and iovec structures.
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 4053cbd4490e..0edb65dd8edd 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -14,6 +14,8 @@ struct bio;
struct bio_integrity_payload;
struct page;
struct block_device;
+struct io_context;
+struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *, int);
typedef void (bio_destructor_t) (struct bio *);
@@ -66,6 +68,14 @@ struct bio {
bio_end_io_t *bi_end_io;
void *bi_private;
+#ifdef CONFIG_BLK_CGROUP
+ /*
+ * Optional ioc and css associated with this bio. Put on bio
+ * release. Read comment on top of bio_associate_current().
+ */
+ struct io_context *bi_ioc;
+ struct cgroup_subsys_state *bi_css;
+#endif
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4d4ac24a263e..ba43f408baa3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -32,10 +32,17 @@ struct blk_trace;
struct request;
struct sg_io_hdr;
struct bsg_job;
+struct blkcg_gq;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
+/*
+ * Maximum number of blkcg policies allowed to be registered concurrently.
+ * Defined here to simplify include dependency.
+ */
+#define BLKCG_MAX_POLS 2
+
struct request;
typedef void (rq_end_io_fn)(struct request *, int);
@@ -363,6 +370,11 @@ struct request_queue {
struct list_head timeout_list;
struct list_head icq_list;
+#ifdef CONFIG_BLK_CGROUP
+ DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
+ struct blkcg_gq *root_blkg;
+ struct list_head blkg_list;
+#endif
struct queue_limits limits;
@@ -390,12 +402,17 @@ struct request_queue {
struct mutex sysfs_lock;
+ int bypass_depth;
+
#if defined(CONFIG_BLK_DEV_BSG)
bsg_job_fn *bsg_job_fn;
int bsg_job_size;
struct bsg_class_device bsg_dev;
#endif
+#ifdef CONFIG_BLK_CGROUP
+ struct list_head all_q_node;
+#endif
#ifdef CONFIG_BLK_DEV_THROTTLING
/* Throttle data */
struct throtl_data *td;
@@ -407,7 +424,7 @@ struct request_queue {
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
-#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
+#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
@@ -491,6 +508,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
+#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 66d3e954eb6c..324fe08ea3b1 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -135,9 +135,6 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
int flags);
-extern void *alloc_bootmem_section(unsigned long size,
- unsigned long section_nr);
-
#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
extern void *alloc_remap(int nid, unsigned long size);
#else
@@ -154,7 +151,8 @@ extern void *alloc_large_system_hash(const char *tablename,
int flags,
unsigned int *_hash_shift,
unsigned int *_hash_mask,
- unsigned long limit);
+ unsigned long low_limit,
+ unsigned long high_limit);
#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 72961c39576a..aaac4bba6f5c 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -30,6 +30,13 @@ struct pt_regs;
#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
+/*
+ * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the
+ * expression but avoids the generation of any code, even if that expression
+ * has side-effects.
+ */
+#define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e))))
+
/**
* BUILD_BUG_ON - break compile if a condition is true.
* @condition: the condition which the compiler should know is false.
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index aa13392a7efb..d4080f309b56 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -14,6 +14,14 @@
struct ceph_auth_client;
struct ceph_authorizer;
+struct ceph_auth_handshake {
+ struct ceph_authorizer *authorizer;
+ void *authorizer_buf;
+ size_t authorizer_buf_len;
+ void *authorizer_reply_buf;
+ size_t authorizer_reply_buf_len;
+};
+
struct ceph_auth_client_ops {
const char *name;
@@ -43,9 +51,7 @@ struct ceph_auth_client_ops {
* the response to authenticate the service.
*/
int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type,
- struct ceph_authorizer **a,
- void **buf, size_t *len,
- void **reply_buf, size_t *reply_len);
+ struct ceph_auth_handshake *auth);
int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
struct ceph_authorizer *a, size_t len);
void (*destroy_authorizer)(struct ceph_auth_client *ac,
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index b8c60694b2b0..e81ab30d4896 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -65,7 +65,7 @@ struct ceph_file_layout {
__le32 fl_object_stripe_unit; /* UNUSED. for per-object parity, if any */
/* object -> pg layout */
- __le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */
+ __le32 fl_unused; /* unused; used to be preferred primary (-1) */
__le32 fl_pg_pool; /* namespace, crush ruleset, rep level */
} __attribute__ ((packed));
@@ -384,7 +384,7 @@ union ceph_mds_request_args {
__le32 stripe_count; /* ... */
__le32 object_size;
__le32 file_replication;
- __le32 preferred;
+ __le32 unused; /* used to be preferred osd */
} __attribute__ ((packed)) open;
struct {
__le32 flags;
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index 220ae21e819b..d8615dee5808 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -46,9 +46,14 @@ static inline void ceph_decode_copy(void **p, void *pv, size_t n)
/*
* bounds check input.
*/
+static inline int ceph_has_room(void **p, void *end, size_t n)
+{
+ return end >= *p && n <= end - *p;
+}
+
#define ceph_decode_need(p, end, n, bad) \
do { \
- if (unlikely(*(p) + (n) > (end))) \
+ if (!likely(ceph_has_room(p, end, n))) \
goto bad; \
} while (0)
@@ -167,7 +172,7 @@ static inline void ceph_encode_string(void **p, void *end,
#define ceph_encode_need(p, end, n, bad) \
do { \
- if (unlikely(*(p) + (n) > (end))) \
+ if (!likely(ceph_has_room(p, end, n))) \
goto bad; \
} while (0)
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 3bff047f6b0f..2521a95fa6d9 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -25,9 +25,9 @@ struct ceph_connection_operations {
void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m);
/* authorize an outgoing connection */
- int (*get_authorizer) (struct ceph_connection *con,
- void **buf, int *len, int *proto,
- void **reply_buf, int *reply_len, int force_new);
+ struct ceph_auth_handshake *(*get_authorizer) (
+ struct ceph_connection *con,
+ int *proto, int force_new);
int (*verify_authorizer_reply) (struct ceph_connection *con, int len);
int (*invalidate_authorizer)(struct ceph_connection *con);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 7c05ac202d90..cedfb1a8434a 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -6,9 +6,10 @@
#include <linux/mempool.h>
#include <linux/rbtree.h>
-#include "types.h"
-#include "osdmap.h"
-#include "messenger.h"
+#include <linux/ceph/types.h>
+#include <linux/ceph/osdmap.h>
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/auth.h>
/*
* Maximum object name size
@@ -40,9 +41,7 @@ struct ceph_osd {
struct list_head o_requests;
struct list_head o_linger_requests;
struct list_head o_osd_lru;
- struct ceph_authorizer *o_authorizer;
- void *o_authorizer_buf, *o_authorizer_reply_buf;
- size_t o_authorizer_buf_len, o_authorizer_reply_buf_len;
+ struct ceph_auth_handshake o_auth;
unsigned long lru_ttl;
int o_marked_for_keepalive;
struct list_head o_keepalive_item;
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index ba4c205cbb01..311ef8d6aa9e 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -65,8 +65,6 @@ struct ceph_osdmap {
#define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash))
#define ceph_file_layout_object_su(l) \
((__s32)le32_to_cpu((l).fl_object_stripe_unit))
-#define ceph_file_layout_pg_preferred(l) \
- ((__s32)le32_to_cpu((l).fl_pg_preferred))
#define ceph_file_layout_pg_pool(l) \
((__s32)le32_to_cpu((l).fl_pg_pool))
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
index 5e4312b6f5cc..eb3f84bc5325 100644
--- a/include/linux/clk-private.h
+++ b/include/linux/clk-private.h
@@ -30,7 +30,7 @@ struct clk {
const struct clk_ops *ops;
struct clk_hw *hw;
struct clk *parent;
- char **parent_names;
+ const char **parent_names;
struct clk **parents;
u8 num_parents;
unsigned long rate;
@@ -55,12 +55,22 @@ struct clk {
* alternative macro for static initialization
*/
-extern struct clk_ops clk_fixed_rate_ops;
+#define DEFINE_CLK(_name, _ops, _flags, _parent_names, \
+ _parents) \
+ static struct clk _name = { \
+ .name = #_name, \
+ .ops = &_ops, \
+ .hw = &_name##_hw.hw, \
+ .parent_names = _parent_names, \
+ .num_parents = ARRAY_SIZE(_parent_names), \
+ .parents = _parents, \
+ .flags = _flags, \
+ }
#define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate, \
_fixed_rate_flags) \
static struct clk _name; \
- static char *_name##_parent_names[] = {}; \
+ static const char *_name##_parent_names[] = {}; \
static struct clk_fixed_rate _name##_hw = { \
.hw = { \
.clk = &_name, \
@@ -68,23 +78,14 @@ extern struct clk_ops clk_fixed_rate_ops;
.fixed_rate = _rate, \
.flags = _fixed_rate_flags, \
}; \
- static struct clk _name = { \
- .name = #_name, \
- .ops = &clk_fixed_rate_ops, \
- .hw = &_name##_hw.hw, \
- .parent_names = _name##_parent_names, \
- .num_parents = \
- ARRAY_SIZE(_name##_parent_names), \
- .flags = _flags, \
- };
-
-extern struct clk_ops clk_gate_ops;
+ DEFINE_CLK(_name, clk_fixed_rate_ops, _flags, \
+ _name##_parent_names, NULL);
#define DEFINE_CLK_GATE(_name, _parent_name, _parent_ptr, \
_flags, _reg, _bit_idx, \
_gate_flags, _lock) \
static struct clk _name; \
- static char *_name##_parent_names[] = { \
+ static const char *_name##_parent_names[] = { \
_parent_name, \
}; \
static struct clk *_name##_parents[] = { \
@@ -99,24 +100,14 @@ extern struct clk_ops clk_gate_ops;
.flags = _gate_flags, \
.lock = _lock, \
}; \
- static struct clk _name = { \
- .name = #_name, \
- .ops = &clk_gate_ops, \
- .hw = &_name##_hw.hw, \
- .parent_names = _name##_parent_names, \
- .num_parents = \
- ARRAY_SIZE(_name##_parent_names), \
- .parents = _name##_parents, \
- .flags = _flags, \
- };
-
-extern struct clk_ops clk_divider_ops;
+ DEFINE_CLK(_name, clk_gate_ops, _flags, \
+ _name##_parent_names, _name##_parents);
#define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
_flags, _reg, _shift, _width, \
_divider_flags, _lock) \
static struct clk _name; \
- static char *_name##_parent_names[] = { \
+ static const char *_name##_parent_names[] = { \
_parent_name, \
}; \
static struct clk *_name##_parents[] = { \
@@ -132,18 +123,8 @@ extern struct clk_ops clk_divider_ops;
.flags = _divider_flags, \
.lock = _lock, \
}; \
- static struct clk _name = { \
- .name = #_name, \
- .ops = &clk_divider_ops, \
- .hw = &_name##_hw.hw, \
- .parent_names = _name##_parent_names, \
- .num_parents = \
- ARRAY_SIZE(_name##_parent_names), \
- .parents = _name##_parents, \
- .flags = _flags, \
- };
-
-extern struct clk_ops clk_mux_ops;
+ DEFINE_CLK(_name, clk_divider_ops, _flags, \
+ _name##_parent_names, _name##_parents);
#define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags, \
_reg, _shift, _width, \
@@ -159,16 +140,28 @@ extern struct clk_ops clk_mux_ops;
.flags = _mux_flags, \
.lock = _lock, \
}; \
- static struct clk _name = { \
- .name = #_name, \
- .ops = &clk_mux_ops, \
- .hw = &_name##_hw.hw, \
- .parent_names = _parent_names, \
- .num_parents = \
- ARRAY_SIZE(_parent_names), \
- .parents = _parents, \
- .flags = _flags, \
- };
+ DEFINE_CLK(_name, clk_mux_ops, _flags, _parent_names, \
+ _parents);
+
+#define DEFINE_CLK_FIXED_FACTOR(_name, _parent_name, \
+ _parent_ptr, _flags, \
+ _mult, _div) \
+ static struct clk _name; \
+ static const char *_name##_parent_names[] = { \
+ _parent_name, \
+ }; \
+ static struct clk *_name##_parents[] = { \
+ _parent_ptr, \
+ }; \
+ static struct clk_fixed_factor _name##_hw = { \
+ .hw = { \
+ .clk = &_name, \
+ }, \
+ .mult = _mult, \
+ .div = _div, \
+ }; \
+ DEFINE_CLK(_name, clk_fixed_factor_ops, _flags, \
+ _name##_parent_names, _name##_parents);
/**
* __clk_init - initialize the data structures in a struct clk
@@ -189,8 +182,12 @@ extern struct clk_ops clk_mux_ops;
*
* It is not necessary to call clk_register if __clk_init is used directly with
* statically initialized clock data.
+ *
+ * Returns 0 on success, otherwise an error code.
*/
-void __clk_init(struct device *dev, struct clk *clk);
+int __clk_init(struct device *dev, struct clk *clk);
+
+struct clk *__clk_register(struct device *dev, struct clk_hw *hw);
#endif /* CONFIG_COMMON_CLK */
#endif /* CLK_PRIVATE_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 5508897ad376..4a0b483986c3 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -15,19 +15,6 @@
#ifdef CONFIG_COMMON_CLK
-/**
- * struct clk_hw - handle for traversing from a struct clk to its corresponding
- * hardware-specific structure. struct clk_hw should be declared within struct
- * clk_foo and then referenced by the struct clk instance that uses struct
- * clk_foo's clk_ops
- *
- * clk: pointer to the struct clk instance that points back to this struct
- * clk_hw instance
- */
-struct clk_hw {
- struct clk *clk;
-};
-
/*
* flags used across common struct clk. these flags should only affect the
* top-level framework. custom flags for dealing with hardware specifics
@@ -39,6 +26,8 @@ struct clk_hw {
#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
#define CLK_IS_ROOT BIT(4) /* root clk, has no parent */
+struct clk_hw;
+
/**
* struct clk_ops - Callback operations for hardware clocks; these are to
* be provided by the clock implementation, and will be called by drivers
@@ -88,19 +77,11 @@ struct clk_hw {
* array index into the value programmed into the hardware.
* Returns 0 on success, -EERROR otherwise.
*
- * @set_rate: Change the rate of this clock. If this callback returns
- * CLK_SET_RATE_PARENT, the rate change will be propagated to the
- * parent clock (which may propagate again if the parent clock
- * also sets this flag). The requested rate of the parent is
- * passed back from the callback in the second 'unsigned long *'
- * argument. Note that it is up to the hardware clock's set_rate
- * implementation to insure that clocks do not run out of spec
- * when propgating the call to set_rate up to the parent. One way
- * to do this is to gate the clock (via clk_disable and/or
- * clk_unprepare) before calling clk_set_rate, then ungating it
- * afterward. If your clock also has the CLK_GATE_SET_RATE flag
- * set then this will insure safety. Returns 0 on success,
- * -EERROR otherwise.
+ * @set_rate: Change the rate of this clock. The requested rate is specified
+ * by the second argument, which should typically be the return
+ * of .round_rate call. The third argument gives the parent rate
+ * which is likely helpful for most .set_rate implementation.
+ * Returns 0 on success, -EERROR otherwise.
*
* The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow
* implementations to split any work between atomic (enable) and sleepable
@@ -125,10 +106,46 @@ struct clk_ops {
unsigned long *);
int (*set_parent)(struct clk_hw *hw, u8 index);
u8 (*get_parent)(struct clk_hw *hw);
- int (*set_rate)(struct clk_hw *hw, unsigned long);
+ int (*set_rate)(struct clk_hw *hw, unsigned long,
+ unsigned long);
void (*init)(struct clk_hw *hw);
};
+/**
+ * struct clk_init_data - holds init data that's common to all clocks and is
+ * shared between the clock provider and the common clock framework.
+ *
+ * @name: clock name
+ * @ops: operations this clock supports
+ * @parent_names: array of string names for all possible parents
+ * @num_parents: number of possible parents
+ * @flags: framework-level hints and quirks
+ */
+struct clk_init_data {
+ const char *name;
+ const struct clk_ops *ops;
+ const char **parent_names;
+ u8 num_parents;
+ unsigned long flags;
+};
+
+/**
+ * struct clk_hw - handle for traversing from a struct clk to its corresponding
+ * hardware-specific structure. struct clk_hw should be declared within struct
+ * clk_foo and then referenced by the struct clk instance that uses struct
+ * clk_foo's clk_ops
+ *
+ * @clk: pointer to the struct clk instance that points back to this struct
+ * clk_hw instance
+ *
+ * @init: pointer to struct clk_init_data that contains the init data shared
+ * with the common clock framework.
+ */
+struct clk_hw {
+ struct clk *clk;
+ struct clk_init_data *init;
+};
+
/*
* DOC: Basic clock implementations common to many platforms
*
@@ -149,6 +166,7 @@ struct clk_fixed_rate {
u8 flags;
};
+extern const struct clk_ops clk_fixed_rate_ops;
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate);
@@ -165,7 +183,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
* Clock which can gate its output. Implements .enable & .disable
*
* Flags:
- * CLK_GATE_SET_DISABLE - by default this clock sets the bit at bit_idx to
+ * CLK_GATE_SET_TO_DISABLE - by default this clock sets the bit at bit_idx to
* enable the clock. Setting this flag does the opposite: setting the bit
* disable the clock and clearing it enables the clock
*/
@@ -175,11 +193,11 @@ struct clk_gate {
u8 bit_idx;
u8 flags;
spinlock_t *lock;
- char *parent[1];
};
#define CLK_GATE_SET_TO_DISABLE BIT(0)
+extern const struct clk_ops clk_gate_ops;
struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
@@ -212,12 +230,12 @@ struct clk_divider {
u8 width;
u8 flags;
spinlock_t *lock;
- char *parent[1];
};
#define CLK_DIVIDER_ONE_BASED BIT(0)
#define CLK_DIVIDER_POWER_OF_TWO BIT(1)
+extern const struct clk_ops clk_divider_ops;
struct clk *clk_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
@@ -238,7 +256,7 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
*
* Flags:
* CLK_MUX_INDEX_ONE - register index starts at 1, not 0
- * CLK_MUX_INDEX_BITWISE - register index is a single bit (power of two)
+ * CLK_MUX_INDEX_BIT - register index is a single bit (power of two)
*/
struct clk_mux {
struct clk_hw hw;
@@ -252,29 +270,49 @@ struct clk_mux {
#define CLK_MUX_INDEX_ONE BIT(0)
#define CLK_MUX_INDEX_BIT BIT(1)
+extern const struct clk_ops clk_mux_ops;
struct clk *clk_register_mux(struct device *dev, const char *name,
- char **parent_names, u8 num_parents, unsigned long flags,
+ const char **parent_names, u8 num_parents, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_mux_flags, spinlock_t *lock);
/**
+ * struct clk_fixed_factor - fixed multiplier and divider clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @mult: multiplier
+ * @div: divider
+ *
+ * Clock with a fixed multiplier and divider. The output frequency is the
+ * parent clock rate divided by div and multiplied by mult.
+ * Implements .recalc_rate, .set_rate and .round_rate
+ */
+
+struct clk_fixed_factor {
+ struct clk_hw hw;
+ unsigned int mult;
+ unsigned int div;
+};
+
+extern struct clk_ops clk_fixed_factor_ops;
+struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div);
+
+/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
- * @name: clock name
- * @ops: operations this clock supports
* @hw: link to hardware-specific clock data
- * @parent_names: array of string names for all possible parents
- * @num_parents: number of possible parents
- * @flags: framework-level hints and quirks
*
* clk_register is the primary interface for populating the clock tree with new
* clock nodes. It returns a pointer to the newly allocated struct clk which
* cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API.
+ * rest of the clock API. In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
*/
-struct clk *clk_register(struct device *dev, const char *name,
- const struct clk_ops *ops, struct clk_hw *hw,
- char **parent_names, u8 num_parents, unsigned long flags);
+struct clk *clk_register(struct device *dev, struct clk_hw *hw);
+
+void clk_unregister(struct clk *clk);
/* helper functions */
const char *__clk_get_name(struct clk *clk);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 70cf722ac3af..ad5c43e8ae8a 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -81,7 +81,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
-#endif /* !CONFIG_COMMON_CLK */
+#endif
/**
* clk_get - lookup and obtain a reference to a clock producer.
@@ -252,7 +252,7 @@ void devm_clk_put(struct device *dev, struct clk *clk);
* Returns rounded clock rate in Hz, or negative errno.
*/
long clk_round_rate(struct clk *clk, unsigned long rate);
-
+
/**
* clk_set_rate - set the clock rate for a clock source
* @clk: clock source
@@ -261,7 +261,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
* Returns success (0) or negative errno.
*/
int clk_set_rate(struct clk *clk, unsigned long rate);
-
+
/**
* clk_set_parent - set the parent clock source for this clock
* @clk: clock source
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 51a90b7f2d60..e988037abd2a 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -1,6 +1,8 @@
#ifndef _LINUX_COMPACTION_H
#define _LINUX_COMPACTION_H
+#include <linux/node.h>
+
/* Return values for compact_zone() and try_to_compact_pages() */
/* compaction didn't start as it was not possible or direct reclaim was more suitable */
#define COMPACT_SKIPPED 0
@@ -11,6 +13,23 @@
/* The full zone was compacted */
#define COMPACT_COMPLETE 3
+/*
+ * compaction supports three modes
+ *
+ * COMPACT_ASYNC_MOVABLE uses asynchronous migration and only scans
+ * MIGRATE_MOVABLE pageblocks as migration sources and targets.
+ * COMPACT_ASYNC_UNMOVABLE uses asynchronous migration and only scans
+ * MIGRATE_MOVABLE pageblocks as migration sources.
+ * MIGRATE_UNMOVABLE pageblocks are scanned as potential migration
+ * targets and convers them to MIGRATE_MOVABLE if possible
+ * COMPACT_SYNC uses synchronous migration and scans all pageblocks
+ */
+enum compact_mode {
+ COMPACT_ASYNC_MOVABLE,
+ COMPACT_ASYNC_UNMOVABLE,
+ COMPACT_SYNC,
+};
+
#ifdef CONFIG_COMPACTION
extern int sysctl_compact_memory;
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 5d46217f84ad..4e890394ef99 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -577,8 +577,7 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
const struct compat_iovec __user *uvector,
unsigned long nr_segs,
unsigned long fast_segs, struct iovec *fast_pointer,
- struct iovec **ret_pointer,
- int check_access);
+ struct iovec **ret_pointer);
extern void __user *compat_alloc_user_space(unsigned long len);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 7230bb59a06f..2e9b9ebbeb78 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -177,6 +177,7 @@ extern void put_online_cpus(void);
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
+void clear_tasks_mm_cpumask(int cpu);
int cpu_down(unsigned int cpu);
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 917dc5aeb1d4..ebbed2ce6637 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -277,17 +277,13 @@ static inline void put_cred(const struct cred *_cred)
* @task: The task to query
*
* Access the objective credentials of a task. The caller must hold the RCU
- * readlock or the task must be dead and unable to change its own credentials.
+ * readlock.
*
* The result of this function should not be passed directly to get_cred();
* rather get_task_cred() should be used instead.
*/
-#define __task_cred(task) \
- ({ \
- const struct task_struct *__t = (task); \
- rcu_dereference_check(__t->real_cred, \
- task_is_dead(__t)); \
- })
+#define __task_cred(task) \
+ rcu_dereference((task)->real_cred)
/**
* get_current_cred - Get the current task's subjective credentials
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 97e435b191f4..7c4750811b96 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -151,16 +151,6 @@ struct crush_map {
struct crush_bucket **buckets;
struct crush_rule **rules;
- /*
- * Parent pointers to identify the parent bucket a device or
- * bucket in the hierarchy. If an item appears more than
- * once, this is the _last_ time it appeared (where buckets
- * are processed in bucket id order, from -1 on down to
- * -max_buckets.
- */
- __u32 *bucket_parents;
- __u32 *device_parents;
-
__s32 max_buckets;
__u32 max_rules;
__s32 max_devices;
@@ -168,8 +158,7 @@ struct crush_map {
/* crush.c */
-extern int crush_get_bucket_item_weight(struct crush_bucket *b, int pos);
-extern void crush_calc_parents(struct crush_map *map);
+extern int crush_get_bucket_item_weight(const struct crush_bucket *b, int pos);
extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
@@ -177,4 +166,9 @@ extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
extern void crush_destroy_bucket(struct crush_bucket *b);
extern void crush_destroy(struct crush_map *map);
+static inline int crush_calc_tree_node(int i)
+{
+ return ((i+1) << 1)-1;
+}
+
#endif
diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h
index c46b99c18bb0..71d79f44a7d0 100644
--- a/include/linux/crush/mapper.h
+++ b/include/linux/crush/mapper.h
@@ -10,11 +10,10 @@
#include "crush.h"
-extern int crush_find_rule(struct crush_map *map, int pool, int type, int size);
-extern int crush_do_rule(struct crush_map *map,
+extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size);
+extern int crush_do_rule(const struct crush_map *map,
int ruleno,
int x, int *result, int result_max,
- int forcefeed, /* -1 for none */
- __u32 *weights);
+ const __u32 *weights);
#endif
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index ae36b72c22f3..66c434f5dd1e 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -93,6 +93,10 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
int nregs, void __iomem *base, char *prefix);
+struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
+ struct dentry *parent,
+ u32 *array, u32 elements);
+
bool debugfs_initialized(void);
#else
@@ -219,6 +223,13 @@ static inline bool debugfs_initialized(void)
return false;
}
+static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
+ struct dentry *parent,
+ u32 *array, u32 elements)
+{
+ return ERR_PTR(-ENODEV);
+}
+
#endif
#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index e04f5776f6d0..161d96241b1b 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -667,6 +667,10 @@ struct device {
struct dma_coherent_mem *dma_mem; /* internal for coherent mem
override */
+#ifdef CONFIG_CMA
+ struct cma *cma_area; /* contiguous memory area for dma
+ allocations */
+#endif
/* arch specific additions */
struct dev_archdata archdata;
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 3efbfc2145c3..eb48f3816df9 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -61,6 +61,13 @@ struct dma_buf_attachment;
* This Callback must not sleep.
* @kmap: maps a page from the buffer into kernel address space.
* @kunmap: [optional] unmaps a page from the buffer.
+ * @mmap: used to expose the backing storage to userspace. Note that the
+ * mapping needs to be coherent - if the exporter doesn't directly
+ * support this, it needs to fake coherency by shooting down any ptes
+ * when transitioning away from the cpu domain.
+ * @vmap: [optional] creates a virtual mapping for the buffer into kernel
+ * address space. Same restrictions as for vmap and friends apply.
+ * @vunmap: [optional] unmaps a vmap from the buffer
*/
struct dma_buf_ops {
int (*attach)(struct dma_buf *, struct device *,
@@ -92,6 +99,11 @@ struct dma_buf_ops {
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*kmap)(struct dma_buf *, unsigned long);
void (*kunmap)(struct dma_buf *, unsigned long, void *);
+
+ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
+
+ void *(*vmap)(struct dma_buf *);
+ void (*vunmap)(struct dma_buf *, void *vaddr);
};
/**
@@ -167,6 +179,11 @@ void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
void *dma_buf_kmap(struct dma_buf *, unsigned long);
void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
+
+int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
+ unsigned long);
+void *dma_buf_vmap(struct dma_buf *);
+void dma_buf_vunmap(struct dma_buf *, void *vaddr);
#else
static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
@@ -248,6 +265,22 @@ static inline void dma_buf_kunmap(struct dma_buf *dmabuf,
unsigned long pnum, void *vaddr)
{
}
+
+static inline int dma_buf_mmap(struct dma_buf *dmabuf,
+ struct vm_area_struct *vma,
+ unsigned long pgoff)
+{
+ return -ENODEV;
+}
+
+static inline void *dma_buf_vmap(struct dma_buf *dmabuf)
+{
+ return NULL;
+}
+
+static inline void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+}
#endif /* CONFIG_DMA_SHARED_BUFFER */
#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
new file mode 100644
index 000000000000..2f303e4b7ed3
--- /dev/null
+++ b/include/linux/dma-contiguous.h
@@ -0,0 +1,110 @@
+#ifndef __LINUX_CMA_H
+#define __LINUX_CMA_H
+
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ * Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+/*
+ * Contiguous Memory Allocator
+ *
+ * The Contiguous Memory Allocator (CMA) makes it possible to
+ * allocate big contiguous chunks of memory after the system has
+ * booted.
+ *
+ * Why is it needed?
+ *
+ * Various devices on embedded systems have no scatter-getter and/or
+ * IO map support and require contiguous blocks of memory to
+ * operate. They include devices such as cameras, hardware video
+ * coders, etc.
+ *
+ * Such devices often require big memory buffers (a full HD frame
+ * is, for instance, more then 2 mega pixels large, i.e. more than 6
+ * MB of memory), which makes mechanisms such as kmalloc() or
+ * alloc_page() ineffective.
+ *
+ * At the same time, a solution where a big memory region is
+ * reserved for a device is suboptimal since often more memory is
+ * reserved then strictly required and, moreover, the memory is
+ * inaccessible to page system even if device drivers don't use it.
+ *
+ * CMA tries to solve this issue by operating on memory regions
+ * where only movable pages can be allocated from. This way, kernel
+ * can use the memory for pagecache and when device driver requests
+ * it, allocated pages can be migrated.
+ *
+ * Driver usage
+ *
+ * CMA should not be used by the device drivers directly. It is
+ * only a helper framework for dma-mapping subsystem.
+ *
+ * For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ */
+
+#ifdef __KERNEL__
+
+struct cma;
+struct page;
+struct device;
+
+#ifdef CONFIG_CMA
+
+/*
+ * There is always at least global CMA area and a few optional device
+ * private areas configured in kernel .config.
+ */
+#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
+
+extern struct cma *dma_contiguous_default_area;
+
+void dma_contiguous_reserve(phys_addr_t addr_limit);
+int dma_declare_contiguous(struct device *dev, unsigned long size,
+ phys_addr_t base, phys_addr_t limit);
+
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+ unsigned int order);
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count);
+
+#else
+
+#define MAX_CMA_AREAS (0)
+
+static inline void dma_contiguous_reserve(phys_addr_t limit) { }
+
+static inline
+int dma_declare_contiguous(struct device *dev, unsigned long size,
+ phys_addr_t base, phys_addr_t limit)
+{
+ return -ENOSYS;
+}
+
+static inline
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+ unsigned int order)
+{
+ return NULL;
+}
+
+static inline
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count)
+{
+ return false;
+}
+
+#endif
+
+#endif
+
+#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index f9a2e5e67a54..56377df39124 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -615,11 +615,13 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
}
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
- struct dma_chan *chan, void *buf, size_t len,
+ struct dma_chan *chan, dma_addr_t buf, size_t len,
enum dma_transfer_direction dir, unsigned long flags)
{
struct scatterlist sg;
- sg_init_one(&sg, buf, len);
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = buf;
+ sg_dma_len(&sg) = len;
return chan->device->device_prep_slave_sg(chan, &sg, 1,
dir, flags, NULL);
@@ -633,6 +635,18 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
dir, flags, NULL);
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+struct rio_dma_ext;
+static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, unsigned long flags,
+ struct rio_dma_ext *rio_ext)
+{
+ return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
+ dir, flags, rio_ext);
+}
+#endif
+
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction dir)
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 9e5f5607eba3..47e3d4850584 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,7 +53,7 @@
extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.3.11"
+#define REL_VERSION "8.3.13"
#define API_VERSION 88
#define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 96
@@ -112,8 +112,8 @@ enum drbd_ret_code {
ERR_OPEN_MD_DISK = 105,
ERR_DISK_NOT_BDEV = 107,
ERR_MD_NOT_BDEV = 108,
- ERR_DISK_TO_SMALL = 111,
- ERR_MD_DISK_TO_SMALL = 112,
+ ERR_DISK_TOO_SMALL = 111,
+ ERR_MD_DISK_TOO_SMALL = 112,
ERR_BDCLAIM_DISK = 114,
ERR_BDCLAIM_MD_DISK = 115,
ERR_MD_IDX_INVALID = 116,
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 447c36752385..fb670bf603f7 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -48,6 +48,11 @@
#define DRBD_TIMEOUT_MAX 600
#define DRBD_TIMEOUT_DEF 60 /* 6 seconds */
+ /* If backing disk takes longer than disk_timeout, mark the disk as failed */
+#define DRBD_DISK_TIMEOUT_MIN 0 /* 0 = disabled */
+#define DRBD_DISK_TIMEOUT_MAX 6000 /* 10 Minutes */
+#define DRBD_DISK_TIMEOUT_DEF 0 /* disabled */
+
/* active connection retries when C_WF_CONNECTION */
#define DRBD_CONNECT_INT_MIN 1
#define DRBD_CONNECT_INT_MAX 120
@@ -60,7 +65,7 @@
/* timeout for the ping packets.*/
#define DRBD_PING_TIMEO_MIN 1
-#define DRBD_PING_TIMEO_MAX 100
+#define DRBD_PING_TIMEO_MAX 300
#define DRBD_PING_TIMEO_DEF 5
/* max number of write requests between write barriers */
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h
index ab6159e4fcf0..a8706f08ab36 100644
--- a/include/linux/drbd_nl.h
+++ b/include/linux/drbd_nl.h
@@ -31,9 +31,12 @@ NL_PACKET(disk_conf, 3,
NL_INTEGER( 56, T_MAY_IGNORE, max_bio_bvecs)
NL_BIT( 57, T_MAY_IGNORE, no_disk_barrier)
NL_BIT( 58, T_MAY_IGNORE, no_disk_drain)
+ NL_INTEGER( 89, T_MAY_IGNORE, disk_timeout)
)
-NL_PACKET(detach, 4, )
+NL_PACKET(detach, 4,
+ NL_BIT( 88, T_MANDATORY, detach_force)
+)
NL_PACKET(net_conf, 5,
NL_STRING( 8, T_MANDATORY, my_addr, 128)
diff --git a/include/linux/dvb/frontend.h b/include/linux/dvb/frontend.h
index cb4428ab81ed..f50d4058c5fb 100644
--- a/include/linux/dvb/frontend.h
+++ b/include/linux/dvb/frontend.h
@@ -320,7 +320,24 @@ struct dvb_frontend_event {
#define DTV_ENUM_DELSYS 44
-#define DTV_MAX_COMMAND DTV_ENUM_DELSYS
+/* ATSC-MH */
+#define DTV_ATSCMH_FIC_VER 45
+#define DTV_ATSCMH_PARADE_ID 46
+#define DTV_ATSCMH_NOG 47
+#define DTV_ATSCMH_TNOG 48
+#define DTV_ATSCMH_SGN 49
+#define DTV_ATSCMH_PRC 50
+#define DTV_ATSCMH_RS_FRAME_MODE 51
+#define DTV_ATSCMH_RS_FRAME_ENSEMBLE 52
+#define DTV_ATSCMH_RS_CODE_MODE_PRI 53
+#define DTV_ATSCMH_RS_CODE_MODE_SEC 54
+#define DTV_ATSCMH_SCCC_BLOCK_MODE 55
+#define DTV_ATSCMH_SCCC_CODE_MODE_A 56
+#define DTV_ATSCMH_SCCC_CODE_MODE_B 57
+#define DTV_ATSCMH_SCCC_CODE_MODE_C 58
+#define DTV_ATSCMH_SCCC_CODE_MODE_D 59
+
+#define DTV_MAX_COMMAND DTV_ATSCMH_SCCC_CODE_MODE_D
typedef enum fe_pilot {
PILOT_ON,
@@ -360,6 +377,38 @@ typedef enum fe_delivery_system {
#define SYS_DVBC_ANNEX_AC SYS_DVBC_ANNEX_A
+/* ATSC-MH */
+
+enum atscmh_sccc_block_mode {
+ ATSCMH_SCCC_BLK_SEP = 0,
+ ATSCMH_SCCC_BLK_COMB = 1,
+ ATSCMH_SCCC_BLK_RES = 2,
+};
+
+enum atscmh_sccc_code_mode {
+ ATSCMH_SCCC_CODE_HLF = 0,
+ ATSCMH_SCCC_CODE_QTR = 1,
+ ATSCMH_SCCC_CODE_RES = 2,
+};
+
+enum atscmh_rs_frame_ensemble {
+ ATSCMH_RSFRAME_ENS_PRI = 0,
+ ATSCMH_RSFRAME_ENS_SEC = 1,
+};
+
+enum atscmh_rs_frame_mode {
+ ATSCMH_RSFRAME_PRI_ONLY = 0,
+ ATSCMH_RSFRAME_PRI_SEC = 1,
+ ATSCMH_RSFRAME_RES = 2,
+};
+
+enum atscmh_rs_code_mode {
+ ATSCMH_RSCODE_211_187 = 0,
+ ATSCMH_RSCODE_223_187 = 1,
+ ATSCMH_RSCODE_235_187 = 2,
+ ATSCMH_RSCODE_RES = 3,
+};
+
struct dtv_cmds_h {
char *name; /* A display name for debugging purposes */
diff --git a/include/linux/dvb/version.h b/include/linux/dvb/version.h
index 0559e2bd38f9..43d9e8d462d4 100644
--- a/include/linux/dvb/version.h
+++ b/include/linux/dvb/version.h
@@ -24,6 +24,6 @@
#define _DVBVERSION_H_
#define DVB_API_VERSION 5
-#define DVB_API_VERSION_MINOR 5
+#define DVB_API_VERSION_MINOR 6
#endif /*_DVBVERSION_H_*/
diff --git a/include/linux/edac.h b/include/linux/edac.h
index c621d762bb2c..91ba3bae42ee 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -71,6 +71,25 @@ enum dev_type {
#define DEV_FLAG_X64 BIT(DEV_X64)
/**
+ * enum hw_event_mc_err_type - type of the detected error
+ *
+ * @HW_EVENT_ERR_CORRECTED: Corrected Error - Indicates that an ECC
+ * corrected error was detected
+ * @HW_EVENT_ERR_UNCORRECTED: Uncorrected Error - Indicates an error that
+ * can't be corrected by ECC, but it is not
+ * fatal (maybe it is on an unused memory area,
+ * or the memory controller could recover from
+ * it for example, by re-trying the operation).
+ * @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not
+ * be recovered.
+ */
+enum hw_event_mc_err_type {
+ HW_EVENT_ERR_CORRECTED,
+ HW_EVENT_ERR_UNCORRECTED,
+ HW_EVENT_ERR_FATAL,
+};
+
+/**
* enum mem_type - memory types. For a more detailed reference, please see
* http://en.wikipedia.org/wiki/DRAM
*
@@ -313,38 +332,141 @@ enum scrub_type {
*/
/**
+ * enum edac_mc_layer - memory controller hierarchy layer
+ *
+ * @EDAC_MC_LAYER_BRANCH: memory layer is named "branch"
+ * @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel"
+ * @EDAC_MC_LAYER_SLOT: memory layer is named "slot"
+ * @EDAC_MC_LAYER_CHIP_SELECT: memory layer is named "chip select"
+ *
+ * This enum is used by the drivers to tell edac_mc_sysfs what name should
+ * be used when describing a memory stick location.
+ */
+enum edac_mc_layer_type {
+ EDAC_MC_LAYER_BRANCH,
+ EDAC_MC_LAYER_CHANNEL,
+ EDAC_MC_LAYER_SLOT,
+ EDAC_MC_LAYER_CHIP_SELECT,
+};
+
+/**
+ * struct edac_mc_layer - describes the memory controller hierarchy
+ * @layer: layer type
+ * @size: number of components per layer. For example,
+ * if the channel layer has two channels, size = 2
+ * @is_virt_csrow: This layer is part of the "csrow" when old API
+ * compatibility mode is enabled. Otherwise, it is
+ * a channel
+ */
+struct edac_mc_layer {
+ enum edac_mc_layer_type type;
+ unsigned size;
+ bool is_virt_csrow;
+};
+
+/*
+ * Maximum number of layers used by the memory controller to uniquely
+ * identify a single memory stick.
+ * NOTE: Changing this constant requires not only to change the constant
+ * below, but also to change the existing code at the core, as there are
+ * some code there that are optimized for 3 layers.
+ */
+#define EDAC_MAX_LAYERS 3
+
+/**
+ * EDAC_DIMM_PTR - Macro responsible to find a pointer inside a pointer array
+ * for the element given by [layer0,layer1,layer2] position
+ *
+ * @layers: a struct edac_mc_layer array, describing how many elements
+ * were allocated for each layer
+ * @var: name of the var where we want to get the pointer
+ * (like mci->dimms)
+ * @n_layers: Number of layers at the @layers array
+ * @layer0: layer0 position
+ * @layer1: layer1 position. Unused if n_layers < 2
+ * @layer2: layer2 position. Unused if n_layers < 3
+ *
+ * For 1 layer, this macro returns &var[layer0]
+ * For 2 layers, this macro is similar to allocate a bi-dimensional array
+ * and to return "&var[layer0][layer1]"
+ * For 3 layers, this macro is similar to allocate a tri-dimensional array
+ * and to return "&var[layer0][layer1][layer2]"
+ *
+ * A loop could be used here to make it more generic, but, as we only have
+ * 3 layers, this is a little faster.
+ * By design, layers can never be 0 or more than 3. If that ever happens,
+ * a NULL is returned, causing an OOPS during the memory allocation routine,
+ * with would point to the developer that he's doing something wrong.
+ */
+#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \
+ typeof(var) __p; \
+ if ((nlayers) == 1) \
+ __p = &var[layer0]; \
+ else if ((nlayers) == 2) \
+ __p = &var[(layer1) + ((layers[1]).size * (layer0))]; \
+ else if ((nlayers) == 3) \
+ __p = &var[(layer2) + ((layers[2]).size * ((layer1) + \
+ ((layers[1]).size * (layer0))))]; \
+ else \
+ __p = NULL; \
+ __p; \
+})
+
+
+/* FIXME: add the proper per-location error counts */
+struct dimm_info {
+ char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
+
+ /* Memory location data */
+ unsigned location[EDAC_MAX_LAYERS];
+
+ struct mem_ctl_info *mci; /* the parent */
+
+ u32 grain; /* granularity of reported error in bytes */
+ enum dev_type dtype; /* memory device type */
+ enum mem_type mtype; /* memory dimm type */
+ enum edac_type edac_mode; /* EDAC mode for this dimm */
+
+ u32 nr_pages; /* number of pages on this dimm */
+
+ unsigned csrow, cschannel; /* Points to the old API data */
+};
+
+/**
* struct rank_info - contains the information for one DIMM rank
*
* @chan_idx: channel number where the rank is (typically, 0 or 1)
* @ce_count: number of correctable errors for this rank
- * @label: DIMM label. Different ranks for the same DIMM should be
- * filled, on userspace, with the same label.
- * FIXME: The core currently won't enforce it.
* @csrow: A pointer to the chip select row structure (the parent
* structure). The location of the rank is given by
* the (csrow->csrow_idx, chan_idx) vector.
+ * @dimm: A pointer to the DIMM structure, where the DIMM label
+ * information is stored.
+ *
+ * FIXME: Currently, the EDAC core model will assume one DIMM per rank.
+ * This is a bad assumption, but it makes this patch easier. Later
+ * patches in this series will fix this issue.
*/
struct rank_info {
int chan_idx;
- u32 ce_count;
- char label[EDAC_MC_LABEL_LEN + 1];
- struct csrow_info *csrow; /* the parent */
+ struct csrow_info *csrow;
+ struct dimm_info *dimm;
+
+ u32 ce_count; /* Correctable Errors for this csrow */
};
struct csrow_info {
- unsigned long first_page; /* first page number in dimm */
- unsigned long last_page; /* last page number in dimm */
+ /* Used only by edac_mc_find_csrow_by_page() */
+ unsigned long first_page; /* first page number in csrow */
+ unsigned long last_page; /* last page number in csrow */
unsigned long page_mask; /* used for interleaving -
- * 0UL for non intlv
- */
- u32 nr_pages; /* number of pages in csrow */
- u32 grain; /* granularity of reported error in bytes */
- int csrow_idx; /* the chip-select row */
- enum dev_type dtype; /* memory device type */
+ * 0UL for non intlv */
+
+ int csrow_idx; /* the chip-select row */
+
u32 ue_count; /* Uncorrectable Errors for this csrow */
u32 ce_count; /* Correctable Errors for this csrow */
- enum mem_type mtype; /* memory csrow type */
- enum edac_type edac_mode; /* EDAC mode for this csrow */
+
struct mem_ctl_info *mci; /* the parent */
struct kobject kobj; /* sysfs kobject for this csrow */
@@ -426,8 +548,20 @@ struct mem_ctl_info {
unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
unsigned long page);
int mc_idx;
- int nr_csrows;
struct csrow_info *csrows;
+ unsigned nr_csrows, num_cschannel;
+
+ /* Memory Controller hierarchy */
+ unsigned n_layers;
+ struct edac_mc_layer *layers;
+ bool mem_is_per_rank;
+
+ /*
+ * DIMM info. Will eventually remove the entire csrows_info some day
+ */
+ unsigned tot_dimms;
+ struct dimm_info *dimms;
+
/*
* FIXME - what about controllers on other busses? - IDs must be
* unique. dev pointer should be sufficiently unique, but
@@ -440,12 +574,16 @@ struct mem_ctl_info {
const char *dev_name;
char proc_name[MC_PROC_NAME_MAX_LEN + 1];
void *pvt_info;
- u32 ue_noinfo_count; /* Uncorrectable Errors w/o info */
- u32 ce_noinfo_count; /* Correctable Errors w/o info */
- u32 ue_count; /* Total Uncorrectable Errors for this MC */
- u32 ce_count; /* Total Correctable Errors for this MC */
unsigned long start_time; /* mci load start time (in jiffies) */
+ /*
+ * drivers shouldn't access those fields directly, as the core
+ * already handles that.
+ */
+ u32 ce_noinfo_count, ue_noinfo_count;
+ u32 ue_mc, ce_mc;
+ u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
+
struct completion complete;
/* edac sysfs device control */
@@ -458,7 +596,7 @@ struct mem_ctl_info {
* by the low level driver.
*
* Set by the low level driver to provide attributes at the
- * controller level, same level as 'ue_count' and 'ce_count' above.
+ * controller level.
* An array of structures, NULL terminated
*
* If attributes are desired, then set to array of attributes
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 7d4e0356f329..c03af7687bb4 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -28,12 +28,13 @@ typedef int (elevator_may_queue_fn) (struct request_queue *, int);
typedef void (elevator_init_icq_fn) (struct io_cq *);
typedef void (elevator_exit_icq_fn) (struct io_cq *);
-typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t);
+typedef int (elevator_set_req_fn) (struct request_queue *, struct request *,
+ struct bio *, gfp_t);
typedef void (elevator_put_req_fn) (struct request *);
typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
-typedef void *(elevator_init_fn) (struct request_queue *);
+typedef int (elevator_init_fn) (struct request_queue *);
typedef void (elevator_exit_fn) (struct elevator_queue *);
struct elevator_ops
@@ -129,7 +130,8 @@ extern void elv_unregister_queue(struct request_queue *q);
extern int elv_may_queue(struct request_queue *, int);
extern void elv_abort_queue(struct request_queue *);
extern void elv_completed_request(struct request_queue *, struct request *);
-extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
+extern int elv_set_request(struct request_queue *q, struct request *rq,
+ struct bio *bio, gfp_t gfp_mask);
extern void elv_put_request(struct request_queue *, struct request *);
extern void elv_drain_elevator(struct request_queue *);
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 91bb4f27238c..3c3ef19a625a 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -34,7 +34,7 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
-int eventfd_signal(struct eventfd_ctx *ctx, int n);
+__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
__u64 *cnt);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index d31cb682e173..a3229d7ab9f2 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -554,6 +554,10 @@ struct fb_cursor_user {
#define FB_EVENT_FB_UNBIND 0x0E
/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */
#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F
+/* A hardware display blank early change occured */
+#define FB_EARLY_EVENT_BLANK 0x10
+/* A hardware display blank revert early change occured */
+#define FB_R_EARLY_EVENT_BLANK 0x11
struct fb_event {
struct fb_info *info;
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index e83c24af358a..7edcf1031718 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -349,6 +349,7 @@ int fw_cancel_transaction(struct fw_card *card,
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
int generation, int speed, unsigned long long offset,
void *payload, size_t length);
+const char *fw_rcode_string(int rcode);
static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
{
@@ -406,6 +407,7 @@ struct fw_iso_buffer {
enum dma_data_direction direction;
struct page **pages;
int page_count;
+ int page_count_mapped;
};
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
diff --git a/drivers/input/fixp-arith.h b/include/linux/fixp-arith.h
index 3089d7382325..3089d7382325 100644
--- a/drivers/input/fixp-arith.h
+++ b/include/linux/fixp-arith.h
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c0e53372b082..40887afaaca7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -173,6 +173,15 @@ struct inodes_stat_t {
#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
+
+/*
+ * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
+ * that indicates that they should check the contents of the iovec are
+ * valid, but not check the memory that the iovec elements
+ * points too.
+ */
+#define CHECK_IOVEC_ONLY -1
+
#define SEL_IN 1
#define SEL_OUT 2
#define SEL_EX 4
@@ -1681,7 +1690,6 @@ struct inode_operations {
ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
- void (*truncate_range)(struct inode *, loff_t, loff_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
} ____cacheline_aligned;
@@ -1691,8 +1699,7 @@ struct seq_file;
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
unsigned long nr_segs, unsigned long fast_segs,
struct iovec *fast_pointer,
- struct iovec **ret_pointer,
- int check_access);
+ struct iovec **ret_pointer);
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
@@ -1764,8 +1771,8 @@ struct super_operations {
* I_FREEING Set when inode is about to be freed but still has dirty
* pages or buffers attached or the inode itself is still
* dirty.
- * I_CLEAR Added by end_writeback(). In this state the inode is clean
- * and can be destroyed. Inode keeps I_FREEING.
+ * I_CLEAR Added by clear_inode(). In this state the inode is
+ * clean and can be destroyed. Inode keeps I_FREEING.
*
* Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
* prohibited for many purposes. iget() must wait for
@@ -1773,9 +1780,10 @@ struct super_operations {
* anew. Other functions will just ignore such inodes,
* if appropriate. I_NEW is used for waiting.
*
- * I_SYNC Synchonized write of dirty inode data. The bits is
- * set during data writeback, and cleared with a wakeup
- * on the bit address once it is done.
+ * I_SYNC Writeback of inode is running. The bit is set during
+ * data writeback, and cleared with a wakeup on the bit
+ * address once it is done. The bit is also used to pin
+ * the inode in memory for flusher thread.
*
* I_REFERENCED Marks the inode as recently references on the LRU list.
*
@@ -2349,7 +2357,7 @@ extern unsigned int get_next_ino(void);
extern void __iget(struct inode * inode);
extern void iget_failed(struct inode *);
-extern void end_writeback(struct inode *);
+extern void clear_inode(struct inode *);
extern void __destroy_inode(struct inode *);
extern struct inode *new_inode_pseudo(struct super_block *sb);
extern struct inode *new_inode(struct super_block *sb);
@@ -2453,8 +2461,6 @@ enum {
};
void dio_end_io(struct bio *bio, int error);
-void inode_dio_wait(struct inode *inode);
-void inode_dio_done(struct inode *inode);
ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
@@ -2469,12 +2475,11 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
offset, nr_segs, get_block, NULL, NULL,
DIO_LOCKING | DIO_SKIP_HOLES);
}
-#else
-static inline void inode_dio_wait(struct inode *inode)
-{
-}
#endif
+void inode_dio_wait(struct inode *inode);
+void inode_dio_done(struct inode *inode);
+
extern const struct file_operations generic_ro_fops;
#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
diff --git a/include/linux/fsl/mxs-dma.h b/include/linux/fsl/mxs-dma.h
index 203d7c4a3e11..55d870238399 100644
--- a/include/linux/fsl/mxs-dma.h
+++ b/include/linux/fsl/mxs-dma.h
@@ -15,14 +15,6 @@ struct mxs_dma_data {
int chan_irq;
};
-static inline int mxs_dma_is_apbh(struct dma_chan *chan)
-{
- return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbh");
-}
-
-static inline int mxs_dma_is_apbx(struct dma_chan *chan)
-{
- return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbx");
-}
-
+extern int mxs_dma_is_apbh(struct dma_chan *chan);
+extern int mxs_dma_is_apbx(struct dma_chan *chan);
#endif /* __MACH_MXS_DMA_H__ */
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index b456b08d70ed..b986be513406 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -153,6 +153,19 @@ int __must_check __gameport_register_driver(struct gameport_driver *drv,
void gameport_unregister_driver(struct gameport_driver *drv);
+/**
+ * module_gameport_driver() - Helper macro for registering a gameport driver
+ * @__gameport_driver: gameport_driver struct
+ *
+ * Helper macro for gameport drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module may
+ * only use this macro once, and calling it replaces module_init() and
+ * module_exit().
+ */
+#define module_gameport_driver(__gameport_driver) \
+ module_driver(__gameport_driver, gameport_register_driver, \
+ gameport_unregister_driver)
+
#endif /* __KERNEL__ */
#define GAMEPORT_MODE_DISABLED 0
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 73c28dea10ae..7a114016ac7d 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -110,6 +110,9 @@ extern int lockdep_genl_is_held(void);
#define genl_dereference(p) \
rcu_dereference_protected(p, lockdep_genl_is_held())
+#define MODULE_ALIAS_GENL_FAMILY(family)\
+ MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
+
#endif /* __KERNEL__ */
#endif /* __LINUX_GENERIC_NETLINK_H */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 581e74b7df95..1e49be49d324 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -391,4 +391,16 @@ static inline bool pm_suspended_storage(void)
}
#endif /* CONFIG_PM_SLEEP */
+#ifdef CONFIG_CMA
+
+/* The below functions must be run on a range from a single zone. */
+extern int alloc_contig_range(unsigned long start, unsigned long end,
+ unsigned migratetype);
+extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
+
+/* CMA stuff */
+extern void init_cma_reserved_pageblock(struct page *page);
+
+#endif
+
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 6155ecf192b0..f07fc2d08159 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_GPIO_H
#define __LINUX_GPIO_H
+#include <linux/errno.h>
+
/* see Documentation/gpio.txt */
/* make these flag values available regardless of GPIO kconfig options */
@@ -20,6 +22,11 @@
/* Gpio pin is open source */
#define GPIOF_OPEN_SOURCE (1 << 3)
+#define GPIOF_EXPORT (1 << 2)
+#define GPIOF_EXPORT_CHANGEABLE (1 << 3)
+#define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT)
+#define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE)
+
/**
* struct gpio - a structure describing a GPIO with configuration
* @gpio: the GPIO number
@@ -33,7 +40,39 @@ struct gpio {
};
#ifdef CONFIG_GENERIC_GPIO
+
+#ifdef CONFIG_ARCH_HAVE_CUSTOM_GPIO_H
#include <asm/gpio.h>
+#else
+
+#include <asm-generic/gpio.h>
+
+static inline int gpio_get_value(unsigned int gpio)
+{
+ return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned int gpio, int value)
+{
+ __gpio_set_value(gpio, value);
+}
+
+static inline int gpio_cansleep(unsigned int gpio)
+{
+ return __gpio_cansleep(gpio);
+}
+
+static inline int gpio_to_irq(unsigned int gpio)
+{
+ return __gpio_to_irq(gpio);
+}
+
+static inline int irq_to_gpio(unsigned int irq)
+{
+ return -EINVAL;
+}
+
+#endif
#else
@@ -55,12 +94,24 @@ static inline int gpio_request(unsigned gpio, const char *label)
return -ENOSYS;
}
+static inline int devm_gpio_request(struct device *dev, unsigned gpio,
+ const char *label)
+{
+ return -ENOSYS;
+}
+
static inline int gpio_request_one(unsigned gpio,
unsigned long flags, const char *label)
{
return -ENOSYS;
}
+static inline int devm_gpio_request_one(struct device *dev, unsigned gpio,
+ unsigned long flags, const char *label)
+{
+ return -ENOSYS;
+}
+
static inline int gpio_request_array(const struct gpio *array, size_t num)
{
return -ENOSYS;
@@ -74,6 +125,14 @@ static inline void gpio_free(unsigned gpio)
WARN_ON(1);
}
+static inline void devm_gpio_free(struct device *dev, unsigned gpio)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
static inline void gpio_free_array(const struct gpio *array, size_t num)
{
might_sleep();
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index c8af7a2efb52..4c59b1131187 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -59,6 +59,8 @@ extern pmd_t *page_check_address_pmd(struct page *page,
#define HPAGE_PMD_MASK HPAGE_MASK
#define HPAGE_PMD_SIZE HPAGE_SIZE
+extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
+
#define transparent_hugepage_enabled(__vma) \
((transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_FLAG) || \
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 000837e126e6..d5d6bbe2259e 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -284,6 +284,14 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h)
#include <asm/hugetlb.h>
+#ifndef arch_make_huge_pte
+static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+ struct page *page, int writable)
+{
+ return entry;
+}
+#endif
+
static inline struct hstate *page_hstate(struct page *page)
{
return size_to_hstate(PAGE_SIZE << compound_order(page));
diff --git a/include/linux/gpio-i2cmux.h b/include/linux/i2c-mux-gpio.h
index 4a333bb0bd0d..a36343a37ebc 100644
--- a/include/linux/gpio-i2cmux.h
+++ b/include/linux/i2c-mux-gpio.h
@@ -1,5 +1,5 @@
/*
- * gpio-i2cmux interface to platform code
+ * i2c-mux-gpio interface to platform code
*
* Peter Korsgaard <peter.korsgaard@barco.com>
*
@@ -8,14 +8,14 @@
* published by the Free Software Foundation.
*/
-#ifndef _LINUX_GPIO_I2CMUX_H
-#define _LINUX_GPIO_I2CMUX_H
+#ifndef _LINUX_I2C_MUX_GPIO_H
+#define _LINUX_I2C_MUX_GPIO_H
/* MUX has no specific idle mode */
-#define GPIO_I2CMUX_NO_IDLE ((unsigned)-1)
+#define I2C_MUX_GPIO_NO_IDLE ((unsigned)-1)
/**
- * struct gpio_i2cmux_platform_data - Platform-dependent data for gpio-i2cmux
+ * struct i2c_mux_gpio_platform_data - Platform-dependent data for i2c-mux-gpio
* @parent: Parent I2C bus adapter number
* @base_nr: Base I2C bus number to number adapters from or zero for dynamic
* @values: Array of bitmasks of GPIO settings (low/high) for each
@@ -25,7 +25,7 @@
* @n_gpios: Number of GPIOs used to control MUX
* @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used
*/
-struct gpio_i2cmux_platform_data {
+struct i2c_mux_gpio_platform_data {
int parent;
int base_nr;
const unsigned *values;
@@ -35,4 +35,4 @@ struct gpio_i2cmux_platform_data {
unsigned idle;
};
-#endif /* _LINUX_GPIO_I2CMUX_H */
+#endif /* _LINUX_I2C_MUX_GPIO_H */
diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h
index 747f0cde4164..c79083830014 100644
--- a/include/linux/i2c-mux.h
+++ b/include/linux/i2c-mux.h
@@ -34,7 +34,8 @@
* mux control.
*/
struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
- void *mux_dev, u32 force_nr, u32 chan_id,
+ struct device *mux_dev,
+ void *mux_priv, u32 force_nr, u32 chan_id,
int (*select) (struct i2c_adapter *,
void *mux_dev, u32 chan_id),
int (*deselect) (struct i2c_adapter *,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 195d8b3d9cfb..ddfa04108baf 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -232,6 +232,7 @@ struct i2c_client {
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
extern struct i2c_client *i2c_verify_client(struct device *dev);
+extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
{
@@ -540,7 +541,7 @@ struct i2c_msg {
__u16 flags;
#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */
#define I2C_M_RD 0x0001 /* read data, from slave to master */
-#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_NOSTART */
#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */
#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */
#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */
@@ -553,8 +554,9 @@ struct i2c_msg {
#define I2C_FUNC_I2C 0x00000001
#define I2C_FUNC_10BIT_ADDR 0x00000002
-#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_NOSTART etc. */
+#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_IGNORE_NAK etc. */
#define I2C_FUNC_SMBUS_PEC 0x00000008
+#define I2C_FUNC_NOSTART 0x00000010 /* I2C_M_NOSTART */
#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */
#define I2C_FUNC_SMBUS_QUICK 0x00010000
#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000
diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h
index cec17cf6cac2..d8341cb47b60 100644
--- a/include/linux/i2c/adp5588.h
+++ b/include/linux/i2c/adp5588.h
@@ -157,6 +157,7 @@ struct i2c_client; /* forward declaration */
struct adp5588_gpio_platform_data {
int gpio_start; /* GPIO Chip base # */
+ const char *const *names;
unsigned irq_base; /* interrupt base # */
unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
int (*setup)(struct i2c_client *client,
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 26cb3c2c5c71..f0e69c6e8208 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -82,7 +82,7 @@
#define ARPHRD_FCPL 786 /* Fibrechannel public loop */
#define ARPHRD_FCFABRIC 787 /* Fibrechannel fabric */
/* 787->799 reserved for fibrechannel media types */
-/* 800 used to be used for token ring */
+#define ARPHRD_IEEE802_TR 800 /* Magic type ident for TR */
#define ARPHRD_IEEE80211 801 /* IEEE 802.11 */
#define ARPHRD_IEEE80211_PRISM 802 /* IEEE 802.11 + Prism2 header */
#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
diff --git a/include/linux/input/lm8333.h b/include/linux/input/lm8333.h
new file mode 100644
index 000000000000..79f918c6e8c5
--- /dev/null
+++ b/include/linux/input/lm8333.h
@@ -0,0 +1,24 @@
+/*
+ * public include for LM8333 keypad driver - same license as driver
+ * Copyright (C) 2012 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
+ */
+
+#ifndef _LM8333_H
+#define _LM8333_H
+
+struct lm8333;
+
+struct lm8333_platform_data {
+ /* Keymap data */
+ const struct matrix_keymap_data *matrix_data;
+ /* Active timeout before enter HALT mode in microseconds */
+ unsigned active_time;
+ /* Debounce interval in microseconds */
+ unsigned debounce_time;
+};
+
+extern int lm8333_read8(struct lm8333 *lm8333, u8 cmd);
+extern int lm8333_write8(struct lm8333 *lm8333, u8 cmd, u8 val);
+extern int lm8333_read_block(struct lm8333 *lm8333, u8 cmd, u8 len, u8 *buf);
+
+#endif /* _LM8333_H */
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 6c07ced0af81..5f3aa6b11bfa 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -75,54 +75,10 @@ struct matrix_keypad_platform_data {
bool no_autorepeat;
};
-/**
- * matrix_keypad_build_keymap - convert platform keymap into matrix keymap
- * @keymap_data: keymap supplied by the platform code
- * @row_shift: number of bits to shift row value by to advance to the next
- * line in the keymap
- * @keymap: expanded version of keymap that is suitable for use by
- * matrix keyboad driver
- * @keybit: pointer to bitmap of keys supported by input device
- *
- * This function converts platform keymap (encoded with KEY() macro) into
- * an array of keycodes that is suitable for using in a standard matrix
- * keyboard driver that uses row and col as indices.
- */
-static inline void
-matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
- unsigned int row_shift,
- unsigned short *keymap, unsigned long *keybit)
-{
- int i;
-
- for (i = 0; i < keymap_data->keymap_size; i++) {
- unsigned int key = keymap_data->keymap[i];
- unsigned int row = KEY_ROW(key);
- unsigned int col = KEY_COL(key);
- unsigned short code = KEY_VAL(key);
-
- keymap[MATRIX_SCAN_CODE(row, col, row_shift)] = code;
- __set_bit(code, keybit);
- }
- __clear_bit(KEY_RESERVED, keybit);
-}
-
-#ifdef CONFIG_INPUT_OF_MATRIX_KEYMAP
-struct matrix_keymap_data *
-matrix_keyboard_of_fill_keymap(struct device_node *np, const char *propname);
-
-void matrix_keyboard_of_free_keymap(const struct matrix_keymap_data *kd);
-#else
-static inline struct matrix_keymap_data *
-matrix_keyboard_of_fill_keymap(struct device_node *np, const char *propname)
-{
- return NULL;
-}
-
-static inline void
-matrix_keyboard_of_free_keymap(const struct matrix_keymap_data *kd)
-{
-}
-#endif
+int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
+ const char *keymap_name,
+ unsigned int rows, unsigned int cols,
+ unsigned short *keymap,
+ struct input_dev *input_dev);
#endif /* _MATRIX_KEYPAD_H */
diff --git a/include/linux/input/navpoint.h b/include/linux/input/navpoint.h
new file mode 100644
index 000000000000..45050eb34de3
--- /dev/null
+++ b/include/linux/input/navpoint.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) 2012 Paul Parsons <lost.distance@yahoo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+struct navpoint_platform_data {
+ int port; /* PXA SSP port for pxa_ssp_request() */
+ int gpio; /* GPIO for power on/off */
+};
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 1a3018063034..df38db2ef45b 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -6,11 +6,7 @@
#include <linux/workqueue.h>
enum {
- ICQ_IOPRIO_CHANGED = 1 << 0,
- ICQ_CGROUP_CHANGED = 1 << 1,
ICQ_EXITED = 1 << 2,
-
- ICQ_CHANGED_MASK = ICQ_IOPRIO_CHANGED | ICQ_CGROUP_CHANGED,
};
/*
@@ -100,6 +96,7 @@ struct io_cq {
*/
struct io_context {
atomic_long_t refcount;
+ atomic_t active_ref;
atomic_t nr_tasks;
/* all the fields below are protected by this lock */
@@ -120,29 +117,37 @@ struct io_context {
struct work_struct release_work;
};
-static inline struct io_context *ioc_task_link(struct io_context *ioc)
+/**
+ * get_io_context_active - get active reference on ioc
+ * @ioc: ioc of interest
+ *
+ * Only iocs with active reference can issue new IOs. This function
+ * acquires an active reference on @ioc. The caller must already have an
+ * active reference on @ioc.
+ */
+static inline void get_io_context_active(struct io_context *ioc)
{
- /*
- * if ref count is zero, don't allow sharing (ioc is going away, it's
- * a race).
- */
- if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
- atomic_inc(&ioc->nr_tasks);
- return ioc;
- }
+ WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
+ WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
+ atomic_long_inc(&ioc->refcount);
+ atomic_inc(&ioc->active_ref);
+}
+
+static inline void ioc_task_link(struct io_context *ioc)
+{
+ get_io_context_active(ioc);
- return NULL;
+ WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
+ atomic_inc(&ioc->nr_tasks);
}
struct task_struct;
#ifdef CONFIG_BLOCK
void put_io_context(struct io_context *ioc);
+void put_io_context_active(struct io_context *ioc);
void exit_io_context(struct task_struct *task);
struct io_context *get_task_io_context(struct task_struct *task,
gfp_t gfp_flags, int node);
-void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
-void ioc_cgroup_changed(struct io_context *ioc);
-unsigned int icq_get_changed(struct io_cq *icq);
#else
struct io_context;
static inline void put_io_context(struct io_context *ioc) { }
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index d937580417ba..450293f6d68b 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -35,12 +35,13 @@ struct iommu_domain;
#define IOMMU_FAULT_WRITE 0x1
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
- struct device *, unsigned long, int);
+ struct device *, unsigned long, int, void *);
struct iommu_domain {
struct iommu_ops *ops;
void *priv;
iommu_fault_handler_t handler;
+ void *handler_token;
};
#define IOMMU_CAP_CACHE_COHERENCY 0x1
@@ -95,7 +96,7 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
extern int iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
- iommu_fault_handler_t handler);
+ iommu_fault_handler_t handler, void *token);
extern int iommu_device_group(struct device *dev, unsigned int *groupid);
/**
@@ -132,7 +133,8 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
* invoke it.
*/
if (domain->handler)
- ret = domain->handler(domain, dev, iova, flags);
+ ret = domain->handler(domain, dev, iova, flags,
+ domain->handler_token);
return ret;
}
@@ -191,7 +193,7 @@ static inline int domain_has_cap(struct iommu_domain *domain,
}
static inline void iommu_set_fault_handler(struct iommu_domain *domain,
- iommu_fault_handler_t handler)
+ iommu_fault_handler_t handler, void *token)
{
}
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 76dad4808847..beb9ce1c2c23 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -42,26 +42,14 @@ enum {
};
/*
- * if process has set io priority explicitly, use that. if not, convert
- * the cpu scheduler nice value to an io priority
+ * Fallback BE priority
*/
#define IOPRIO_NORM (4)
-static inline int task_ioprio(struct io_context *ioc)
-{
- if (ioprio_valid(ioc->ioprio))
- return IOPRIO_PRIO_DATA(ioc->ioprio);
-
- return IOPRIO_NORM;
-}
-
-static inline int task_ioprio_class(struct io_context *ioc)
-{
- if (ioprio_valid(ioc->ioprio))
- return IOPRIO_PRIO_CLASS(ioc->ioprio);
-
- return IOPRIO_CLASS_BE;
-}
+/*
+ * if process has set io priority explicitly, use that. if not, convert
+ * the cpu scheduler nice value to an io priority
+ */
static inline int task_nice_ioprio(struct task_struct *task)
{
return (task_nice(task) + 20) / 5;
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 8a297a5e794c..5499c92a9153 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -62,6 +62,8 @@ struct ipc_namespace {
unsigned int mq_queues_max; /* initialized to DFLT_QUEUESMAX */
unsigned int mq_msg_max; /* initialized to DFLT_MSGMAX */
unsigned int mq_msgsize_max; /* initialized to DFLT_MSGSIZEMAX */
+ unsigned int mq_msg_default;
+ unsigned int mq_msgsize_default;
/* user_ns which owns the ipc ns */
struct user_namespace *user_ns;
@@ -90,11 +92,41 @@ static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {}
#ifdef CONFIG_POSIX_MQUEUE
extern int mq_init_ns(struct ipc_namespace *ns);
-/* default values */
-#define DFLT_QUEUESMAX 256 /* max number of message queues */
-#define DFLT_MSGMAX 10 /* max number of messages in each queue */
-#define HARD_MSGMAX (32768*sizeof(void *)/4)
-#define DFLT_MSGSIZEMAX 8192 /* max message size */
+/*
+ * POSIX Message Queue default values:
+ *
+ * MIN_*: Lowest value an admin can set the maximum unprivileged limit to
+ * DFLT_*MAX: Default values for the maximum unprivileged limits
+ * DFLT_{MSG,MSGSIZE}: Default values used when the user doesn't supply
+ * an attribute to the open call and the queue must be created
+ * HARD_*: Highest value the maximums can be set to. These are enforced
+ * on CAP_SYS_RESOURCE apps as well making them inviolate (so make them
+ * suitably high)
+ *
+ * POSIX Requirements:
+ * Per app minimum openable message queues - 8. This does not map well
+ * to the fact that we limit the number of queues on a per namespace
+ * basis instead of a per app basis. So, make the default high enough
+ * that no given app should have a hard time opening 8 queues.
+ * Minimum maximum for HARD_MSGMAX - 32767. I bumped this to 65536.
+ * Minimum maximum for HARD_MSGSIZEMAX - POSIX is silent on this. However,
+ * we have run into a situation where running applications in the wild
+ * require this to be at least 5MB, and preferably 10MB, so I set the
+ * value to 16MB in hopes that this user is the worst of the bunch and
+ * the new maximum will handle anyone else. I may have to revisit this
+ * in the future.
+ */
+#define MIN_QUEUESMAX 1
+#define DFLT_QUEUESMAX 256
+#define HARD_QUEUESMAX 1024
+#define MIN_MSGMAX 1
+#define DFLT_MSG 10U
+#define DFLT_MSGMAX 10
+#define HARD_MSGMAX 65536
+#define MIN_MSGSIZEMAX 128
+#define DFLT_MSGSIZE 8192U
+#define DFLT_MSGSIZEMAX 8192
+#define HARD_MSGSIZEMAX (16*1024*1024)
#else
static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
#endif
diff --git a/include/linux/ipx.h b/include/linux/ipx.h
index 8f0243982eb6..3d48014cdd71 100644
--- a/include/linux/ipx.h
+++ b/include/linux/ipx.h
@@ -38,7 +38,7 @@ struct ipx_interface_definition {
#define IPX_FRAME_8022 2
#define IPX_FRAME_ETHERII 3
#define IPX_FRAME_8023 4
-/* obsolete token ring was 5 */
+#define IPX_FRAME_TR_8022 5 /* obsolete */
unsigned char ipx_special;
#define IPX_SPECIAL_NONE 0
#define IPX_PRIMARY 1
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index c65740d76e66..5abb533eb8eb 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -141,9 +141,8 @@ static inline struct irq_domain *irq_domain_add_legacy_isa(
return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops,
host_data);
}
-extern struct irq_domain *irq_find_host(struct device_node *node);
-extern void irq_set_default_host(struct irq_domain *host);
+extern void irq_domain_remove(struct irq_domain *host);
extern unsigned int irq_create_mapping(struct irq_domain *host,
irq_hw_number_t hwirq);
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index d211732b9e99..c8f32975f0e4 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -479,12 +479,6 @@ struct transaction_s
* How many handles used this transaction? [t_handle_lock]
*/
int t_handle_count;
-
- /*
- * This transaction is being forced and some process is
- * waiting for it to finish.
- */
- unsigned int t_synchronous_commit:1;
};
/**
@@ -531,6 +525,8 @@ struct transaction_s
* transaction
* @j_commit_request: Sequence number of the most recent transaction wanting
* commit
+ * @j_commit_waited: Sequence number of the most recent transaction someone
+ * is waiting for to commit.
* @j_uuid: Uuid of client object.
* @j_task: Pointer to the current commit thread for this journal
* @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
@@ -696,6 +692,13 @@ struct journal_s
tid_t j_commit_request;
/*
+ * Sequence number of the most recent transaction someone is waiting
+ * for to commit.
+ * [j_state_lock]
+ */
+ tid_t j_commit_waited;
+
+ /*
* Journal uuid: identifies the object (filesystem, LVM volume etc)
* backed by this journal. This will eventually be replaced by an array
* of uuids, allowing us to index multiple devices within a single
@@ -861,7 +864,8 @@ extern int journal_destroy (journal_t *);
extern int journal_recover (journal_t *journal);
extern int journal_wipe (journal_t *, int);
extern int journal_skip_recovery (journal_t *);
-extern void journal_update_superblock (journal_t *, int);
+extern void journal_update_sb_log_tail (journal_t *, tid_t, unsigned int,
+ int);
extern void journal_abort (journal_t *, int);
extern int journal_errno (journal_t *);
extern void journal_ack_err (journal_t *);
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 387571959dd9..6883e197acb9 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -36,6 +36,7 @@ const char *kallsyms_lookup(unsigned long addr,
/* Look up a kernel symbol and return it in a text buffer. */
extern int sprint_symbol(char *buffer, unsigned long address);
+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
extern int sprint_backtrace(char *buffer, unsigned long address);
/* Look up a kernel symbol and print it to the kernel messages. */
@@ -80,6 +81,12 @@ static inline int sprint_symbol(char *buffer, unsigned long addr)
return 0;
}
+static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr)
+{
+ *buffer = '\0';
+ return 0;
+}
+
static inline int sprint_backtrace(char *buffer, unsigned long addr)
{
*buffer = '\0';
diff --git a/include/linux/kcmp.h b/include/linux/kcmp.h
new file mode 100644
index 000000000000..2dcd1b3aafc8
--- /dev/null
+++ b/include/linux/kcmp.h
@@ -0,0 +1,17 @@
+#ifndef _LINUX_KCMP_H
+#define _LINUX_KCMP_H
+
+/* Comparison type */
+enum kcmp_type {
+ KCMP_FILE,
+ KCMP_VM,
+ KCMP_FILES,
+ KCMP_FS,
+ KCMP_SIGHAND,
+ KCMP_IO,
+ KCMP_SYSVSEM,
+
+ KCMP_TYPES,
+};
+
+#endif /* _LINUX_KCMP_H */
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index 26a65711676f..a1bdf6966357 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -32,6 +32,8 @@
#define KPF_KSM 21
#define KPF_THP 22
+#ifdef __KERNEL__
+
/* kernel hacking assistances
* WARNING: subject to change, never rely on them!
*/
@@ -44,4 +46,6 @@
#define KPF_ARCH 38
#define KPF_UNCACHED 39
+#endif /* __KERNEL__ */
+
#endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index c0d34420a913..e07f5e0c5df4 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -35,9 +35,12 @@
#define LLONG_MAX ((long long)(~0ULL>>1))
#define LLONG_MIN (-LLONG_MAX - 1)
#define ULLONG_MAX (~0ULL)
+#define SIZE_MAX (~(size_t)0)
#define STACK_MAGIC 0xdeadbeef
+#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
+
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 0d7d6a1b172f..37c5f7261142 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -1,8 +1,58 @@
#ifndef LINUX_KEXEC_H
#define LINUX_KEXEC_H
-#ifdef CONFIG_KEXEC
+/* kexec system call - It loads the new kernel to boot into.
+ * kexec does not sync, or unmount filesystems so if you need
+ * that to happen you need to do that yourself.
+ */
+
#include <linux/types.h>
+
+/* kexec flags for different usage scenarios */
+#define KEXEC_ON_CRASH 0x00000001
+#define KEXEC_PRESERVE_CONTEXT 0x00000002
+#define KEXEC_ARCH_MASK 0xffff0000
+
+/* These values match the ELF architecture values.
+ * Unless there is a good reason that should continue to be the case.
+ */
+#define KEXEC_ARCH_DEFAULT ( 0 << 16)
+#define KEXEC_ARCH_386 ( 3 << 16)
+#define KEXEC_ARCH_X86_64 (62 << 16)
+#define KEXEC_ARCH_PPC (20 << 16)
+#define KEXEC_ARCH_PPC64 (21 << 16)
+#define KEXEC_ARCH_IA_64 (50 << 16)
+#define KEXEC_ARCH_ARM (40 << 16)
+#define KEXEC_ARCH_S390 (22 << 16)
+#define KEXEC_ARCH_SH (42 << 16)
+#define KEXEC_ARCH_MIPS_LE (10 << 16)
+#define KEXEC_ARCH_MIPS ( 8 << 16)
+
+/* The artificial cap on the number of segments passed to kexec_load. */
+#define KEXEC_SEGMENT_MAX 16
+
+#ifndef __KERNEL__
+/*
+ * This structure is used to hold the arguments that are used when
+ * loading kernel binaries.
+ */
+struct kexec_segment {
+ const void *buf;
+ size_t bufsz;
+ const void *mem;
+ size_t memsz;
+};
+
+/* Load a new kernel image as described by the kexec_segment array
+ * consisting of passed number of segments at the entry-point address.
+ * The flags allow different useage types.
+ */
+extern int kexec_load(void *, size_t, struct kexec_segment *,
+ unsigned long int);
+#endif /* __KERNEL__ */
+
+#ifdef __KERNEL__
+#ifdef CONFIG_KEXEC
#include <linux/list.h>
#include <linux/linkage.h>
#include <linux/compat.h>
@@ -67,11 +117,10 @@ typedef unsigned long kimage_entry_t;
#define IND_DONE 0x4
#define IND_SOURCE 0x8
-#define KEXEC_SEGMENT_MAX 16
struct kexec_segment {
void __user *buf;
size_t bufsz;
- unsigned long mem; /* User space sees this as a (void *) ... */
+ unsigned long mem;
size_t memsz;
};
@@ -175,25 +224,6 @@ extern struct kimage *kexec_crash_image;
#define kexec_flush_icache_page(page)
#endif
-#define KEXEC_ON_CRASH 0x00000001
-#define KEXEC_PRESERVE_CONTEXT 0x00000002
-#define KEXEC_ARCH_MASK 0xffff0000
-
-/* These values match the ELF architecture values.
- * Unless there is a good reason that should continue to be the case.
- */
-#define KEXEC_ARCH_DEFAULT ( 0 << 16)
-#define KEXEC_ARCH_386 ( 3 << 16)
-#define KEXEC_ARCH_X86_64 (62 << 16)
-#define KEXEC_ARCH_PPC (20 << 16)
-#define KEXEC_ARCH_PPC64 (21 << 16)
-#define KEXEC_ARCH_IA_64 (50 << 16)
-#define KEXEC_ARCH_ARM (40 << 16)
-#define KEXEC_ARCH_S390 (22 << 16)
-#define KEXEC_ARCH_SH (42 << 16)
-#define KEXEC_ARCH_MIPS_LE (10 << 16)
-#define KEXEC_ARCH_MIPS ( 8 << 16)
-
/* List of defined/legal kexec flags */
#ifndef CONFIG_KEXEC_JUMP
#define KEXEC_FLAGS KEXEC_ON_CRASH
@@ -228,4 +258,5 @@ struct task_struct;
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
#endif /* CONFIG_KEXEC */
+#endif /* __KERNEL__ */
#endif /* LINUX_KEXEC_H */
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index dd99c329e161..5398d5807075 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -66,40 +66,10 @@ struct subprocess_info {
void *data;
};
-/* Allocate a subprocess_info structure */
-struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
- char **envp, gfp_t gfp_mask);
-
-/* Set various pieces of state into the subprocess_info structure */
-void call_usermodehelper_setfns(struct subprocess_info *info,
- int (*init)(struct subprocess_info *info, struct cred *new),
- void (*cleanup)(struct subprocess_info *info),
- void *data);
-
-/* Actually execute the sub-process */
-int call_usermodehelper_exec(struct subprocess_info *info, int wait);
-
-/* Free the subprocess_info. This is only needed if you're not going
- to call call_usermodehelper_exec */
-void call_usermodehelper_freeinfo(struct subprocess_info *info);
-
-static inline int
+extern int
call_usermodehelper_fns(char *path, char **argv, char **envp, int wait,
int (*init)(struct subprocess_info *info, struct cred *new),
- void (*cleanup)(struct subprocess_info *), void *data)
-{
- struct subprocess_info *info;
- gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
-
- info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
-
- if (info == NULL)
- return -ENOMEM;
-
- call_usermodehelper_setfns(info, init, cleanup, data);
-
- return call_usermodehelper_exec(info, wait);
-}
+ void (*cleanup)(struct subprocess_info *), void *data);
static inline int
call_usermodehelper(char *path, char **argv, char **envp, int wait)
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 6c322a90b92f..09f2b3aa2da7 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -449,6 +449,30 @@ struct kvm_ppc_pvinfo {
__u8 pad[108];
};
+/* for KVM_PPC_GET_SMMU_INFO */
+#define KVM_PPC_PAGE_SIZES_MAX_SZ 8
+
+struct kvm_ppc_one_page_size {
+ __u32 page_shift; /* Page shift (or 0) */
+ __u32 pte_enc; /* Encoding in the HPTE (>>12) */
+};
+
+struct kvm_ppc_one_seg_page_size {
+ __u32 page_shift; /* Base page shift of segment (or 0) */
+ __u32 slb_enc; /* SLB encoding for BookS */
+ struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
+};
+
+#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
+#define KVM_PPC_1T_SEGMENTS 0x00000002
+
+struct kvm_ppc_smmu_info {
+ __u64 flags;
+ __u32 slb_size;
+ __u32 pad;
+ struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
+};
+
#define KVMIO 0xAE
/* machine type bits, to be used as argument to KVM_CREATE_VM */
@@ -589,6 +613,10 @@ struct kvm_ppc_pvinfo {
#define KVM_CAP_S390_UCONTROL 73
#define KVM_CAP_SYNC_REGS 74
#define KVM_CAP_PCI_2_3 75
+#define KVM_CAP_KVMCLOCK_CTRL 76
+#define KVM_CAP_SIGNAL_MSI 77
+#define KVM_CAP_PPC_GET_SMMU_INFO 78
+#define KVM_CAP_S390_COW 79
#ifdef KVM_CAP_IRQ_ROUTING
@@ -714,6 +742,14 @@ struct kvm_one_reg {
__u64 addr;
};
+struct kvm_msi {
+ __u32 address_lo;
+ __u32 address_hi;
+ __u32 data;
+ __u32 flags;
+ __u8 pad[16];
+};
+
/*
* ioctls for VM fds
*/
@@ -788,6 +824,10 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_PCI_2_3 */
#define KVM_ASSIGN_SET_INTX_MASK _IOW(KVMIO, 0xa4, \
struct kvm_assigned_pci_dev)
+/* Available with KVM_CAP_SIGNAL_MSI */
+#define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi)
+/* Available with KVM_CAP_PPC_GET_SMMU_INFO */
+#define KVM_PPC_GET_SMMU_INFO _IOR(KVMIO, 0xa6, struct kvm_ppc_smmu_info)
/*
* ioctls for vcpu fds
@@ -859,6 +899,8 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_ONE_REG */
#define KVM_GET_ONE_REG _IOW(KVMIO, 0xab, struct kvm_one_reg)
#define KVM_SET_ONE_REG _IOW(KVMIO, 0xac, struct kvm_one_reg)
+/* VM is being stopped by host */
+#define KVM_KVMCLOCK_CTRL _IO(KVMIO, 0xad)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 72cbf08d45fb..c4464356b35b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -35,6 +35,20 @@
#endif
/*
+ * If we support unaligned MMIO, at most one fragment will be split into two:
+ */
+#ifdef KVM_UNALIGNED_MMIO
+# define KVM_EXTRA_MMIO_FRAGMENTS 1
+#else
+# define KVM_EXTRA_MMIO_FRAGMENTS 0
+#endif
+
+#define KVM_USER_MMIO_SIZE 8
+
+#define KVM_MAX_MMIO_FRAGMENTS \
+ (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
+
+/*
* vcpu->requests bit members
*/
#define KVM_REQ_TLB_FLUSH 0
@@ -68,10 +82,11 @@ struct kvm_io_range {
struct kvm_io_device *dev;
};
+#define NR_IOBUS_DEVS 1000
+
struct kvm_io_bus {
int dev_count;
-#define NR_IOBUS_DEVS 300
- struct kvm_io_range range[NR_IOBUS_DEVS];
+ struct kvm_io_range range[];
};
enum kvm_bus {
@@ -113,7 +128,18 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
enum {
OUTSIDE_GUEST_MODE,
IN_GUEST_MODE,
- EXITING_GUEST_MODE
+ EXITING_GUEST_MODE,
+ READING_SHADOW_PAGE_TABLES,
+};
+
+/*
+ * Sometimes a large or cross-page mmio needs to be broken up into separate
+ * exits for userspace servicing.
+ */
+struct kvm_mmio_fragment {
+ gpa_t gpa;
+ void *data;
+ unsigned len;
};
struct kvm_vcpu {
@@ -143,10 +169,9 @@ struct kvm_vcpu {
int mmio_needed;
int mmio_read_completed;
int mmio_is_write;
- int mmio_size;
- int mmio_index;
- unsigned char mmio_data[KVM_MMIO_SIZE];
- gpa_t mmio_phys_addr;
+ int mmio_cur_fragment;
+ int mmio_nr_fragments;
+ struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
#endif
#ifdef CONFIG_KVM_ASYNC_PF
@@ -178,8 +203,6 @@ struct kvm_memory_slot {
unsigned long flags;
unsigned long *rmap;
unsigned long *dirty_bitmap;
- unsigned long *dirty_bitmap_head;
- unsigned long nr_dirty_pages;
struct kvm_arch_memory_slot arch;
unsigned long userspace_addr;
int user_alloc;
@@ -438,6 +461,8 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
gfn_t gfn);
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
void kvm_resched(struct kvm_vcpu *vcpu);
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
@@ -506,6 +531,7 @@ int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
void kvm_arch_check_processor_compat(void *rtn);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
void kvm_free_physmem(struct kvm *kvm);
@@ -521,6 +547,15 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
}
#endif
+static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
+{
+#ifdef __KVM_HAVE_ARCH_WQP
+ return vcpu->arch.wqp;
+#else
+ return &vcpu->wq;
+#endif
+}
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_free_all_assigned_devices(struct kvm *kvm);
@@ -769,6 +804,8 @@ int kvm_set_irq_routing(struct kvm *kvm,
unsigned flags);
void kvm_free_irq_routing(struct kvm *kvm);
+int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
+
#else
static inline void kvm_free_irq_routing(struct kvm *kvm) {}
diff --git a/include/linux/lcd.h b/include/linux/lcd.h
index 8877123f2d6e..e00c3b0ebc6b 100644
--- a/include/linux/lcd.h
+++ b/include/linux/lcd.h
@@ -40,6 +40,16 @@ struct lcd_ops {
/* Get the LCD panel power status (0: full on, 1..3: controller
power on, flat panel power off, 4: full off), see FB_BLANK_XXX */
int (*get_power)(struct lcd_device *);
+ /*
+ * Enable or disable power to the LCD(0: on; 4: off, see FB_BLANK_XXX)
+ * and this callback would be called proir to fb driver's callback.
+ *
+ * P.S. note that if early_set_power is not NULL then early fb notifier
+ * would be registered.
+ */
+ int (*early_set_power)(struct lcd_device *, int power);
+ /* revert the effects of the early blank event. */
+ int (*r_early_set_power)(struct lcd_device *, int power);
/* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */
int (*set_power)(struct lcd_device *, int power);
/* Get the current contrast setting (0-max_contrast) */
diff --git a/include/linux/led-lm3530.h b/include/linux/led-lm3530.h
index eeae6e742471..4b133479d6ea 100644
--- a/include/linux/led-lm3530.h
+++ b/include/linux/led-lm3530.h
@@ -92,7 +92,7 @@ struct lm3530_pwm_data {
* @als2_resistor_sel: internal resistance from ALS2 input to ground
* @als_vmin: als input voltage calibrated for max brightness in mV
* @als_vmax: als input voltage calibrated for min brightness in mV
- * @brt_val: brightness value (0-255)
+ * @brt_val: brightness value (0-127)
* @pwm_data: PWM control functions (only valid when the mode is PWM)
*/
struct lm3530_platform_data {
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 5884def15a24..39eee41d8c6f 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -73,6 +73,8 @@ struct led_classdev {
struct led_trigger *trigger;
struct list_head trig_list;
void *trigger_data;
+ /* true if activated - deactivate routine uses it to do cleanup */
+ bool activated;
#endif
};
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index 11a966e5f829..4d24d64578c4 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -54,7 +54,7 @@ extern void nlmclnt_done(struct nlm_host *host);
extern int nlmclnt_proc(struct nlm_host *host, int cmd,
struct file_lock *fl);
-extern int lockd_up(void);
-extern void lockd_down(void);
+extern int lockd_up(struct net *net);
+extern void lockd_down(struct net *net);
#endif /* LINUX_LOCKD_BIND_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index f94efd2f6c27..83e7ba90d6e5 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -63,12 +63,7 @@ extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);
struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
-struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
- enum lru_list);
-void mem_cgroup_lru_del_list(struct page *, enum lru_list);
-void mem_cgroup_lru_del(struct page *);
-struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
- enum lru_list, enum lru_list);
+struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
/* For coalescing uncharge for reducing memcg' overhead*/
extern void mem_cgroup_uncharge_start(void);
@@ -79,6 +74,8 @@ extern void mem_cgroup_uncharge_cache_page(struct page *page);
extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
int order);
+bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
+ struct mem_cgroup *memcg);
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
@@ -92,10 +89,13 @@ static inline
int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
{
struct mem_cgroup *memcg;
+ int match;
+
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
+ match = __mem_cgroup_same_or_subtree(cgroup, memcg);
rcu_read_unlock();
- return cgroup == memcg;
+ return match;
}
extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
@@ -114,17 +114,11 @@ void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
/*
* For memory reclaim.
*/
-int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg,
- struct zone *zone);
-int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg,
- struct zone *zone);
+int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
+int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec);
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
-unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
- int nid, int zid, unsigned int lrumask);
-struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
- struct zone *zone);
-struct zone_reclaim_stat*
-mem_cgroup_get_reclaim_stat_from_page(struct page *page);
+unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
+void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
struct task_struct *p);
extern void mem_cgroup_replace_page_cache(struct page *oldpage,
@@ -251,25 +245,8 @@ static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
return &zone->lruvec;
}
-static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
- struct page *page,
- enum lru_list lru)
-{
- return &zone->lruvec;
-}
-
-static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
-{
-}
-
-static inline void mem_cgroup_lru_del(struct page *page)
-{
-}
-
-static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
- struct page *page,
- enum lru_list from,
- enum lru_list to)
+static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
+ struct zone *zone)
{
return &zone->lruvec;
}
@@ -333,35 +310,27 @@ static inline bool mem_cgroup_disabled(void)
}
static inline int
-mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
+mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
{
return 1;
}
static inline int
-mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
+mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
{
return 1;
}
static inline unsigned long
-mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
- unsigned int lru_mask)
+mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
return 0;
}
-
-static inline struct zone_reclaim_stat*
-mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
-{
- return NULL;
-}
-
-static inline struct zone_reclaim_stat*
-mem_cgroup_get_reclaim_stat_from_page(struct page *page)
+static inline void
+mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
+ int increment)
{
- return NULL;
}
static inline void
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 7c727a90d70d..4aa42732e47f 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -225,8 +225,8 @@ static inline void check_highest_zone(enum zone_type k)
policy_zone = k;
}
-int do_migrate_pages(struct mm_struct *mm,
- const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
+int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+ const nodemask_t *to, int flags);
#ifdef CONFIG_TMPFS
@@ -354,9 +354,8 @@ static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk,
return false;
}
-static inline int do_migrate_pages(struct mm_struct *mm,
- const nodemask_t *from_nodes,
- const nodemask_t *to_nodes, int flags)
+static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+ const nodemask_t *to, int flags)
{
return 0;
}
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index fccc3002f271..91dd3ef63e99 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -7,6 +7,7 @@
#ifndef MFD_AB8500_H
#define MFD_AB8500_H
+#include <linux/atomic.h>
#include <linux/mutex.h>
struct device;
@@ -194,6 +195,14 @@ enum ab8500_version {
#define AB9540_INT_GPIO52F 123
#define AB9540_INT_GPIO53F 124
#define AB9540_INT_GPIO54F 125 /* not 8505 */
+/* ab8500_irq_regoffset[16] -> IT[Source|Latch|Mask]25 */
+#define AB8505_INT_KEYSTUCK 128
+#define AB8505_INT_IKR 129
+#define AB8505_INT_IKP 130
+#define AB8505_INT_KP 131
+#define AB8505_INT_KEYDEGLITCH 132
+#define AB8505_INT_MODPWRSTATUSF 134
+#define AB8505_INT_MODPWRSTATUSR 135
/*
* AB8500_AB9540_NR_IRQS is used when configuring the IRQ numbers for the
@@ -203,8 +212,8 @@ enum ab8500_version {
* which is larger.
*/
#define AB8500_NR_IRQS 112
-#define AB8505_NR_IRQS 128
-#define AB9540_NR_IRQS 128
+#define AB8505_NR_IRQS 136
+#define AB9540_NR_IRQS 136
/* This is set to the roof of any AB8500 chip variant IRQ counts */
#define AB8500_MAX_NR_IRQS AB9540_NR_IRQS
@@ -216,6 +225,7 @@ enum ab8500_version {
* @dev: parent device
* @lock: read/write operations lock
* @irq_lock: genirq bus lock
+ * @transfer_ongoing: 0 if no transfer ongoing
* @irq: irq line
* @version: chip version id (e.g. ab8500 or ab9540)
* @chip_id: chip revision id
@@ -234,7 +244,7 @@ struct ab8500 {
struct device *dev;
struct mutex lock;
struct mutex irq_lock;
-
+ atomic_t transfer_ongoing;
int irq_base;
int irq;
enum ab8500_version version;
@@ -280,6 +290,8 @@ extern int __devinit ab8500_init(struct ab8500 *ab8500,
enum ab8500_version version);
extern int __devexit ab8500_exit(struct ab8500 *ab8500);
+extern int ab8500_suspend(struct ab8500 *ab8500);
+
static inline int is_ab8500(struct ab8500 *ab)
{
return ab->version == AB8500_VERSION_AB8500;
diff --git a/include/linux/mfd/anatop.h b/include/linux/mfd/anatop.h
index 22c1007d3ec5..7f92acf03d9e 100644
--- a/include/linux/mfd/anatop.h
+++ b/include/linux/mfd/anatop.h
@@ -34,7 +34,7 @@ struct anatop {
spinlock_t reglock;
};
-extern u32 anatop_get_bits(struct anatop *, u32, int, int);
-extern void anatop_set_bits(struct anatop *, u32, int, int, u32);
+extern u32 anatop_read_reg(struct anatop *, u32);
+extern void anatop_write_reg(struct anatop *, u32, u32, u32);
#endif /* __LINUX_MFD_ANATOP_H */
diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h
index ef6faa5cee46..e1148d037e7b 100644
--- a/include/linux/mfd/asic3.h
+++ b/include/linux/mfd/asic3.h
@@ -31,6 +31,8 @@ struct asic3_platform_data {
unsigned int gpio_base;
+ unsigned int clock_rate;
+
struct asic3_led *leds;
};
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index 8313cd9658e3..0507c4c21a7d 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -33,6 +33,18 @@
#include <linux/mfd/da9052/reg.h>
+/* Common - HWMON Channel Definations */
+#define DA9052_ADC_VDDOUT 0
+#define DA9052_ADC_ICH 1
+#define DA9052_ADC_TBAT 2
+#define DA9052_ADC_VBAT 3
+#define DA9052_ADC_IN4 4
+#define DA9052_ADC_IN5 5
+#define DA9052_ADC_IN6 6
+#define DA9052_ADC_TSI 7
+#define DA9052_ADC_TJUNC 8
+#define DA9052_ADC_VBBAT 9
+
#define DA9052_IRQ_DCIN 0
#define DA9052_IRQ_VBUS 1
#define DA9052_IRQ_DCINREM 2
@@ -79,6 +91,9 @@ struct da9052 {
struct device *dev;
struct regmap *regmap;
+ struct mutex auxadc_lock;
+ struct completion done;
+
int irq_base;
struct regmap_irq_chip_data *irq_data;
u8 chip_id;
@@ -86,6 +101,10 @@ struct da9052 {
int chip_irq;
};
+/* ADC API */
+int da9052_adc_manual_read(struct da9052 *da9052, unsigned char channel);
+int da9052_adc_read_temp(struct da9052 *da9052);
+
/* Device I/O API */
static inline int da9052_reg_read(struct da9052 *da9052, unsigned char reg)
{
diff --git a/include/linux/mfd/lm3533.h b/include/linux/mfd/lm3533.h
new file mode 100644
index 000000000000..594bc591f256
--- /dev/null
+++ b/include/linux/mfd/lm3533.h
@@ -0,0 +1,104 @@
+/*
+ * lm3533.h -- LM3533 interface
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_LM3533_H
+#define __LINUX_MFD_LM3533_H
+
+#define LM3533_ATTR_RO(_name) \
+ DEVICE_ATTR(_name, S_IRUGO, show_##_name, NULL)
+#define LM3533_ATTR_RW(_name) \
+ DEVICE_ATTR(_name, S_IRUGO | S_IWUSR , show_##_name, store_##_name)
+
+struct device;
+struct regmap;
+
+struct lm3533 {
+ struct device *dev;
+
+ struct regmap *regmap;
+
+ int gpio_hwen;
+ int irq;
+
+ unsigned have_als:1;
+ unsigned have_backlights:1;
+ unsigned have_leds:1;
+};
+
+struct lm3533_ctrlbank {
+ struct lm3533 *lm3533;
+ struct device *dev;
+ int id;
+};
+
+struct lm3533_als_platform_data {
+ unsigned pwm_mode:1; /* PWM input mode (default analog) */
+ u8 r_select; /* 1 - 127 (ignored in PWM-mode) */
+};
+
+struct lm3533_bl_platform_data {
+ char *name;
+ u16 max_current; /* 5000 - 29800 uA (800 uA step) */
+ u8 default_brightness; /* 0 - 255 */
+ u8 pwm; /* 0 - 0x3f */
+};
+
+struct lm3533_led_platform_data {
+ char *name;
+ const char *default_trigger;
+ u16 max_current; /* 5000 - 29800 uA (800 uA step) */
+ u8 pwm; /* 0 - 0x3f */
+};
+
+enum lm3533_boost_freq {
+ LM3533_BOOST_FREQ_500KHZ,
+ LM3533_BOOST_FREQ_1000KHZ,
+};
+
+enum lm3533_boost_ovp {
+ LM3533_BOOST_OVP_16V,
+ LM3533_BOOST_OVP_24V,
+ LM3533_BOOST_OVP_32V,
+ LM3533_BOOST_OVP_40V,
+};
+
+struct lm3533_platform_data {
+ int gpio_hwen;
+
+ enum lm3533_boost_ovp boost_ovp;
+ enum lm3533_boost_freq boost_freq;
+
+ struct lm3533_als_platform_data *als;
+
+ struct lm3533_bl_platform_data *backlights;
+ int num_backlights;
+
+ struct lm3533_led_platform_data *leds;
+ int num_leds;
+};
+
+extern int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb);
+extern int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb);
+
+extern int lm3533_ctrlbank_set_brightness(struct lm3533_ctrlbank *cb, u8 val);
+extern int lm3533_ctrlbank_get_brightness(struct lm3533_ctrlbank *cb, u8 *val);
+extern int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb,
+ u16 imax);
+extern int lm3533_ctrlbank_set_pwm(struct lm3533_ctrlbank *cb, u8 val);
+extern int lm3533_ctrlbank_get_pwm(struct lm3533_ctrlbank *cb, u8 *val);
+
+extern int lm3533_read(struct lm3533 *lm3533, u8 reg, u8 *val);
+extern int lm3533_write(struct lm3533 *lm3533, u8 reg, u8 val);
+extern int lm3533_update(struct lm3533 *lm3533, u8 reg, u8 val, u8 mask);
+
+#endif /* __LINUX_MFD_LM3533_H */
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
new file mode 100644
index 000000000000..fec5256c3f5d
--- /dev/null
+++ b/include/linux/mfd/lpc_ich.h
@@ -0,0 +1,48 @@
+/*
+ * linux/drivers/mfd/lpc_ich.h
+ *
+ * Copyright (c) 2012 Extreme Engineering Solution, Inc.
+ * Author: Aaron Sierra <asierra@xes-inc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef LPC_ICH_H
+#define LPC_ICH_H
+
+/* Watchdog resources */
+#define ICH_RES_IO_TCO 0
+#define ICH_RES_IO_SMI 1
+#define ICH_RES_MEM_OFF 2
+#define ICH_RES_MEM_GCS 0
+
+/* GPIO resources */
+#define ICH_RES_GPIO 0
+#define ICH_RES_GPE0 1
+
+/* GPIO compatibility */
+#define ICH_I3100_GPIO 0x401
+#define ICH_V5_GPIO 0x501
+#define ICH_V6_GPIO 0x601
+#define ICH_V7_GPIO 0x701
+#define ICH_V9_GPIO 0x801
+#define ICH_V10CORP_GPIO 0xa01
+#define ICH_V10CONS_GPIO 0xa11
+
+struct lpc_ich_info {
+ char name[32];
+ unsigned int iTCO_version;
+ unsigned int gpio_version;
+};
+
+#endif
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
new file mode 100644
index 000000000000..68263c5fa53c
--- /dev/null
+++ b/include/linux/mfd/max77693-private.h
@@ -0,0 +1,227 @@
+/*
+ * max77693-private.h - Voltage regulator driver for the Maxim 77693
+ *
+ * Copyright (C) 2012 Samsung Electrnoics
+ * SangYoung Son <hello.son@samsung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_MFD_MAX77693_PRIV_H
+#define __LINUX_MFD_MAX77693_PRIV_H
+
+#include <linux/i2c.h>
+
+#define MAX77693_NUM_IRQ_MUIC_REGS 3
+#define MAX77693_REG_INVALID (0xff)
+
+/* Slave addr = 0xCC: PMIC, Charger, Flash LED */
+enum max77693_pmic_reg {
+ MAX77693_LED_REG_IFLASH1 = 0x00,
+ MAX77693_LED_REG_IFLASH2 = 0x01,
+ MAX77693_LED_REG_ITORCH = 0x02,
+ MAX77693_LED_REG_ITORCHTIMER = 0x03,
+ MAX77693_LED_REG_FLASH_TIMER = 0x04,
+ MAX77693_LED_REG_FLASH_EN = 0x05,
+ MAX77693_LED_REG_MAX_FLASH1 = 0x06,
+ MAX77693_LED_REG_MAX_FLASH2 = 0x07,
+ MAX77693_LED_REG_MAX_FLASH3 = 0x08,
+ MAX77693_LED_REG_MAX_FLASH4 = 0x09,
+ MAX77693_LED_REG_VOUT_CNTL = 0x0A,
+ MAX77693_LED_REG_VOUT_FLASH1 = 0x0B,
+ MAX77693_LED_REG_VOUT_FLASH2 = 0x0C,
+ MAX77693_LED_REG_FLASH_INT = 0x0E,
+ MAX77693_LED_REG_FLASH_INT_MASK = 0x0F,
+ MAX77693_LED_REG_FLASH_INT_STATUS = 0x10,
+
+ MAX77693_PMIC_REG_PMIC_ID1 = 0x20,
+ MAX77693_PMIC_REG_PMIC_ID2 = 0x21,
+ MAX77693_PMIC_REG_INTSRC = 0x22,
+ MAX77693_PMIC_REG_INTSRC_MASK = 0x23,
+ MAX77693_PMIC_REG_TOPSYS_INT = 0x24,
+ MAX77693_PMIC_REG_TOPSYS_INT_MASK = 0x26,
+ MAX77693_PMIC_REG_TOPSYS_STAT = 0x28,
+ MAX77693_PMIC_REG_MAINCTRL1 = 0x2A,
+ MAX77693_PMIC_REG_LSCNFG = 0x2B,
+
+ MAX77693_CHG_REG_CHG_INT = 0xB0,
+ MAX77693_CHG_REG_CHG_INT_MASK = 0xB1,
+ MAX77693_CHG_REG_CHG_INT_OK = 0xB2,
+ MAX77693_CHG_REG_CHG_DETAILS_00 = 0xB3,
+ MAX77693_CHG_REG_CHG_DETAILS_01 = 0xB4,
+ MAX77693_CHG_REG_CHG_DETAILS_02 = 0xB5,
+ MAX77693_CHG_REG_CHG_DETAILS_03 = 0xB6,
+ MAX77693_CHG_REG_CHG_CNFG_00 = 0xB7,
+ MAX77693_CHG_REG_CHG_CNFG_01 = 0xB8,
+ MAX77693_CHG_REG_CHG_CNFG_02 = 0xB9,
+ MAX77693_CHG_REG_CHG_CNFG_03 = 0xBA,
+ MAX77693_CHG_REG_CHG_CNFG_04 = 0xBB,
+ MAX77693_CHG_REG_CHG_CNFG_05 = 0xBC,
+ MAX77693_CHG_REG_CHG_CNFG_06 = 0xBD,
+ MAX77693_CHG_REG_CHG_CNFG_07 = 0xBE,
+ MAX77693_CHG_REG_CHG_CNFG_08 = 0xBF,
+ MAX77693_CHG_REG_CHG_CNFG_09 = 0xC0,
+ MAX77693_CHG_REG_CHG_CNFG_10 = 0xC1,
+ MAX77693_CHG_REG_CHG_CNFG_11 = 0xC2,
+ MAX77693_CHG_REG_CHG_CNFG_12 = 0xC3,
+ MAX77693_CHG_REG_CHG_CNFG_13 = 0xC4,
+ MAX77693_CHG_REG_CHG_CNFG_14 = 0xC5,
+ MAX77693_CHG_REG_SAFEOUT_CTRL = 0xC6,
+
+ MAX77693_PMIC_REG_END,
+};
+
+/* Slave addr = 0x4A: MUIC */
+enum max77693_muic_reg {
+ MAX77693_MUIC_REG_ID = 0x00,
+ MAX77693_MUIC_REG_INT1 = 0x01,
+ MAX77693_MUIC_REG_INT2 = 0x02,
+ MAX77693_MUIC_REG_INT3 = 0x03,
+ MAX77693_MUIC_REG_STATUS1 = 0x04,
+ MAX77693_MUIC_REG_STATUS2 = 0x05,
+ MAX77693_MUIC_REG_STATUS3 = 0x06,
+ MAX77693_MUIC_REG_INTMASK1 = 0x07,
+ MAX77693_MUIC_REG_INTMASK2 = 0x08,
+ MAX77693_MUIC_REG_INTMASK3 = 0x09,
+ MAX77693_MUIC_REG_CDETCTRL1 = 0x0A,
+ MAX77693_MUIC_REG_CDETCTRL2 = 0x0B,
+ MAX77693_MUIC_REG_CTRL1 = 0x0C,
+ MAX77693_MUIC_REG_CTRL2 = 0x0D,
+ MAX77693_MUIC_REG_CTRL3 = 0x0E,
+
+ MAX77693_MUIC_REG_END,
+};
+
+/* Slave addr = 0x90: Haptic */
+enum max77693_haptic_reg {
+ MAX77693_HAPTIC_REG_STATUS = 0x00,
+ MAX77693_HAPTIC_REG_CONFIG1 = 0x01,
+ MAX77693_HAPTIC_REG_CONFIG2 = 0x02,
+ MAX77693_HAPTIC_REG_CONFIG_CHNL = 0x03,
+ MAX77693_HAPTIC_REG_CONFG_CYC1 = 0x04,
+ MAX77693_HAPTIC_REG_CONFG_CYC2 = 0x05,
+ MAX77693_HAPTIC_REG_CONFIG_PER1 = 0x06,
+ MAX77693_HAPTIC_REG_CONFIG_PER2 = 0x07,
+ MAX77693_HAPTIC_REG_CONFIG_PER3 = 0x08,
+ MAX77693_HAPTIC_REG_CONFIG_PER4 = 0x09,
+ MAX77693_HAPTIC_REG_CONFIG_DUTY1 = 0x0A,
+ MAX77693_HAPTIC_REG_CONFIG_DUTY2 = 0x0B,
+ MAX77693_HAPTIC_REG_CONFIG_PWM1 = 0x0C,
+ MAX77693_HAPTIC_REG_CONFIG_PWM2 = 0x0D,
+ MAX77693_HAPTIC_REG_CONFIG_PWM3 = 0x0E,
+ MAX77693_HAPTIC_REG_CONFIG_PWM4 = 0x0F,
+ MAX77693_HAPTIC_REG_REV = 0x10,
+
+ MAX77693_HAPTIC_REG_END,
+};
+
+enum max77693_irq_source {
+ LED_INT = 0,
+ TOPSYS_INT,
+ CHG_INT,
+ MUIC_INT1,
+ MUIC_INT2,
+ MUIC_INT3,
+
+ MAX77693_IRQ_GROUP_NR,
+};
+
+enum max77693_irq {
+ /* PMIC - FLASH */
+ MAX77693_LED_IRQ_FLED2_OPEN,
+ MAX77693_LED_IRQ_FLED2_SHORT,
+ MAX77693_LED_IRQ_FLED1_OPEN,
+ MAX77693_LED_IRQ_FLED1_SHORT,
+ MAX77693_LED_IRQ_MAX_FLASH,
+
+ /* PMIC - TOPSYS */
+ MAX77693_TOPSYS_IRQ_T120C_INT,
+ MAX77693_TOPSYS_IRQ_T140C_INT,
+ MAX77693_TOPSYS_IRQ_LOWSYS_INT,
+
+ /* PMIC - Charger */
+ MAX77693_CHG_IRQ_BYP_I,
+ MAX77693_CHG_IRQ_THM_I,
+ MAX77693_CHG_IRQ_BAT_I,
+ MAX77693_CHG_IRQ_CHG_I,
+ MAX77693_CHG_IRQ_CHGIN_I,
+
+ /* MUIC INT1 */
+ MAX77693_MUIC_IRQ_INT1_ADC,
+ MAX77693_MUIC_IRQ_INT1_ADC_LOW,
+ MAX77693_MUIC_IRQ_INT1_ADC_ERR,
+ MAX77693_MUIC_IRQ_INT1_ADC1K,
+
+ /* MUIC INT2 */
+ MAX77693_MUIC_IRQ_INT2_CHGTYP,
+ MAX77693_MUIC_IRQ_INT2_CHGDETREUN,
+ MAX77693_MUIC_IRQ_INT2_DCDTMR,
+ MAX77693_MUIC_IRQ_INT2_DXOVP,
+ MAX77693_MUIC_IRQ_INT2_VBVOLT,
+ MAX77693_MUIC_IRQ_INT2_VIDRM,
+
+ /* MUIC INT3 */
+ MAX77693_MUIC_IRQ_INT3_EOC,
+ MAX77693_MUIC_IRQ_INT3_CGMBC,
+ MAX77693_MUIC_IRQ_INT3_OVP,
+ MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR,
+ MAX77693_MUIC_IRQ_INT3_CHG_ENABLED,
+ MAX77693_MUIC_IRQ_INT3_BAT_DET,
+
+ MAX77693_IRQ_NR,
+};
+
+struct max77693_dev {
+ struct device *dev;
+ struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
+ struct i2c_client *muic; /* 0x4A , MUIC */
+ struct i2c_client *haptic; /* 0x90 , Haptic */
+ struct mutex iolock;
+
+ int type;
+
+ struct regmap *regmap;
+ struct regmap *regmap_muic;
+ struct regmap *regmap_haptic;
+
+ struct irq_domain *irq_domain;
+
+ int irq;
+ int irq_gpio;
+ bool wakeup;
+ struct mutex irqlock;
+ int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
+ int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
+};
+
+enum max77693_types {
+ TYPE_MAX77693,
+};
+
+extern int max77693_read_reg(struct regmap *map, u8 reg, u8 *dest);
+extern int max77693_bulk_read(struct regmap *map, u8 reg, int count,
+ u8 *buf);
+extern int max77693_write_reg(struct regmap *map, u8 reg, u8 value);
+extern int max77693_bulk_write(struct regmap *map, u8 reg, int count,
+ u8 *buf);
+extern int max77693_update_reg(struct regmap *map, u8 reg, u8 val, u8 mask);
+
+extern int max77693_irq_init(struct max77693_dev *max77686);
+extern void max77693_irq_exit(struct max77693_dev *max77686);
+extern int max77693_irq_resume(struct max77693_dev *max77686);
+
+#endif /* __LINUX_MFD_MAX77693_PRIV_H */
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
new file mode 100644
index 000000000000..1d28ae90384e
--- /dev/null
+++ b/include/linux/mfd/max77693.h
@@ -0,0 +1,36 @@
+/*
+ * max77693.h - Driver for the Maxim 77693
+ *
+ * Copyright (C) 2012 Samsung Electrnoics
+ * SangYoung Son <hello.son@samsung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8997.h
+ *
+ * MAX77693 has PMIC, Charger, Flash LED, Haptic, MUIC devices.
+ * The devices share the same I2C bus and included in
+ * this mfd driver.
+ */
+
+#ifndef __LINUX_MFD_MAX77693_H
+#define __LINUX_MFD_MAX77693_H
+
+struct max77693_platform_data {
+ int wakeup;
+};
+#endif /* __LINUX_MFD_MAX77693_H */
diff --git a/include/linux/mfd/rc5t583.h b/include/linux/mfd/rc5t583.h
index c42fe92a727d..3661c59aa1e9 100644
--- a/include/linux/mfd/rc5t583.h
+++ b/include/linux/mfd/rc5t583.h
@@ -292,6 +292,7 @@ struct rc5t583 {
* rc5t583_platform_data: Platform data for ricoh rc5t583 pmu.
* The board specific data is provided through this structure.
* @irq_base: Irq base number on which this device registers their interrupts.
+ * @gpio_base: GPIO base from which gpio of this device will start.
* @enable_shutdown: Enable shutdown through the input pin "shutdown".
* @regulator_deepsleep_slot: The slot number on which device goes to sleep
* in device sleep mode.
@@ -303,6 +304,7 @@ struct rc5t583 {
struct rc5t583_platform_data {
int irq_base;
+ int gpio_base;
bool enable_shutdown;
int regulator_deepsleep_slot[RC5T583_REGULATOR_MAX];
unsigned long regulator_ext_pwr_control[RC5T583_REGULATOR_MAX];
diff --git a/include/linux/mfd/sta2x11-mfd.h b/include/linux/mfd/sta2x11-mfd.h
new file mode 100644
index 000000000000..d179227e866f
--- /dev/null
+++ b/include/linux/mfd/sta2x11-mfd.h
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2009-2011 Wind River Systems, Inc.
+ * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * The STMicroelectronics ConneXt (STA2X11) chip has several unrelated
+ * functions in one PCI endpoint functions. This driver simply
+ * registers the platform devices in this iomemregion and exports a few
+ * functions to access common registers
+ */
+
+#ifndef __STA2X11_MFD_H
+#define __STA2X11_MFD_H
+#include <linux/types.h>
+#include <linux/pci.h>
+
+/*
+ * The MFD PCI block includes the GPIO peripherals and other register blocks.
+ * For GPIO, we have 32*4 bits (I use "gsta" for "gpio sta2x11".)
+ */
+#define GSTA_GPIO_PER_BLOCK 32
+#define GSTA_NR_BLOCKS 4
+#define GSTA_NR_GPIO (GSTA_GPIO_PER_BLOCK * GSTA_NR_BLOCKS)
+
+/* Pinconfig is set by the board definition: altfunc, pull-up, pull-down */
+struct sta2x11_gpio_pdata {
+ unsigned pinconfig[GSTA_NR_GPIO];
+};
+
+/* Macros below lifted from sh_pfc.h, with minor differences */
+#define PINMUX_TYPE_NONE 0
+#define PINMUX_TYPE_FUNCTION 1
+#define PINMUX_TYPE_OUTPUT_LOW 2
+#define PINMUX_TYPE_OUTPUT_HIGH 3
+#define PINMUX_TYPE_INPUT 4
+#define PINMUX_TYPE_INPUT_PULLUP 5
+#define PINMUX_TYPE_INPUT_PULLDOWN 6
+
+/* Give names to GPIO pins, like PXA does, taken from the manual */
+#define STA2X11_GPIO0 0
+#define STA2X11_GPIO1 1
+#define STA2X11_GPIO2 2
+#define STA2X11_GPIO3 3
+#define STA2X11_GPIO4 4
+#define STA2X11_GPIO5 5
+#define STA2X11_GPIO6 6
+#define STA2X11_GPIO7 7
+#define STA2X11_GPIO8_RGBOUT_RED7 8
+#define STA2X11_GPIO9_RGBOUT_RED6 9
+#define STA2X11_GPIO10_RGBOUT_RED5 10
+#define STA2X11_GPIO11_RGBOUT_RED4 11
+#define STA2X11_GPIO12_RGBOUT_RED3 12
+#define STA2X11_GPIO13_RGBOUT_RED2 13
+#define STA2X11_GPIO14_RGBOUT_RED1 14
+#define STA2X11_GPIO15_RGBOUT_RED0 15
+#define STA2X11_GPIO16_RGBOUT_GREEN7 16
+#define STA2X11_GPIO17_RGBOUT_GREEN6 17
+#define STA2X11_GPIO18_RGBOUT_GREEN5 18
+#define STA2X11_GPIO19_RGBOUT_GREEN4 19
+#define STA2X11_GPIO20_RGBOUT_GREEN3 20
+#define STA2X11_GPIO21_RGBOUT_GREEN2 21
+#define STA2X11_GPIO22_RGBOUT_GREEN1 22
+#define STA2X11_GPIO23_RGBOUT_GREEN0 23
+#define STA2X11_GPIO24_RGBOUT_BLUE7 24
+#define STA2X11_GPIO25_RGBOUT_BLUE6 25
+#define STA2X11_GPIO26_RGBOUT_BLUE5 26
+#define STA2X11_GPIO27_RGBOUT_BLUE4 27
+#define STA2X11_GPIO28_RGBOUT_BLUE3 28
+#define STA2X11_GPIO29_RGBOUT_BLUE2 29
+#define STA2X11_GPIO30_RGBOUT_BLUE1 30
+#define STA2X11_GPIO31_RGBOUT_BLUE0 31
+#define STA2X11_GPIO32_RGBOUT_VSYNCH 32
+#define STA2X11_GPIO33_RGBOUT_HSYNCH 33
+#define STA2X11_GPIO34_RGBOUT_DEN 34
+#define STA2X11_GPIO35_ETH_CRS_DV 35
+#define STA2X11_GPIO36_ETH_TXD1 36
+#define STA2X11_GPIO37_ETH_TXD0 37
+#define STA2X11_GPIO38_ETH_TX_EN 38
+#define STA2X11_GPIO39_MDIO 39
+#define STA2X11_GPIO40_ETH_REF_CLK 40
+#define STA2X11_GPIO41_ETH_RXD1 41
+#define STA2X11_GPIO42_ETH_RXD0 42
+#define STA2X11_GPIO43_MDC 43
+#define STA2X11_GPIO44_CAN_TX 44
+#define STA2X11_GPIO45_CAN_RX 45
+#define STA2X11_GPIO46_MLB_DAT 46
+#define STA2X11_GPIO47_MLB_SIG 47
+#define STA2X11_GPIO48_SPI0_CLK 48
+#define STA2X11_GPIO49_SPI0_TXD 49
+#define STA2X11_GPIO50_SPI0_RXD 50
+#define STA2X11_GPIO51_SPI0_FRM 51
+#define STA2X11_GPIO52_SPI1_CLK 52
+#define STA2X11_GPIO53_SPI1_TXD 53
+#define STA2X11_GPIO54_SPI1_RXD 54
+#define STA2X11_GPIO55_SPI1_FRM 55
+#define STA2X11_GPIO56_SPI2_CLK 56
+#define STA2X11_GPIO57_SPI2_TXD 57
+#define STA2X11_GPIO58_SPI2_RXD 58
+#define STA2X11_GPIO59_SPI2_FRM 59
+#define STA2X11_GPIO60_I2C0_SCL 60
+#define STA2X11_GPIO61_I2C0_SDA 61
+#define STA2X11_GPIO62_I2C1_SCL 62
+#define STA2X11_GPIO63_I2C1_SDA 63
+#define STA2X11_GPIO64_I2C2_SCL 64
+#define STA2X11_GPIO65_I2C2_SDA 65
+#define STA2X11_GPIO66_I2C3_SCL 66
+#define STA2X11_GPIO67_I2C3_SDA 67
+#define STA2X11_GPIO68_MSP0_RCK 68
+#define STA2X11_GPIO69_MSP0_RXD 69
+#define STA2X11_GPIO70_MSP0_RFS 70
+#define STA2X11_GPIO71_MSP0_TCK 71
+#define STA2X11_GPIO72_MSP0_TXD 72
+#define STA2X11_GPIO73_MSP0_TFS 73
+#define STA2X11_GPIO74_MSP0_SCK 74
+#define STA2X11_GPIO75_MSP1_CK 75
+#define STA2X11_GPIO76_MSP1_RXD 76
+#define STA2X11_GPIO77_MSP1_FS 77
+#define STA2X11_GPIO78_MSP1_TXD 78
+#define STA2X11_GPIO79_MSP2_CK 79
+#define STA2X11_GPIO80_MSP2_RXD 80
+#define STA2X11_GPIO81_MSP2_FS 81
+#define STA2X11_GPIO82_MSP2_TXD 82
+#define STA2X11_GPIO83_MSP3_CK 83
+#define STA2X11_GPIO84_MSP3_RXD 84
+#define STA2X11_GPIO85_MSP3_FS 85
+#define STA2X11_GPIO86_MSP3_TXD 86
+#define STA2X11_GPIO87_MSP4_CK 87
+#define STA2X11_GPIO88_MSP4_RXD 88
+#define STA2X11_GPIO89_MSP4_FS 89
+#define STA2X11_GPIO90_MSP4_TXD 90
+#define STA2X11_GPIO91_MSP5_CK 91
+#define STA2X11_GPIO92_MSP5_RXD 92
+#define STA2X11_GPIO93_MSP5_FS 93
+#define STA2X11_GPIO94_MSP5_TXD 94
+#define STA2X11_GPIO95_SDIO3_DAT3 95
+#define STA2X11_GPIO96_SDIO3_DAT2 96
+#define STA2X11_GPIO97_SDIO3_DAT1 97
+#define STA2X11_GPIO98_SDIO3_DAT0 98
+#define STA2X11_GPIO99_SDIO3_CLK 99
+#define STA2X11_GPIO100_SDIO3_CMD 100
+#define STA2X11_GPIO101 101
+#define STA2X11_GPIO102 102
+#define STA2X11_GPIO103 103
+#define STA2X11_GPIO104 104
+#define STA2X11_GPIO105_SDIO2_DAT3 105
+#define STA2X11_GPIO106_SDIO2_DAT2 106
+#define STA2X11_GPIO107_SDIO2_DAT1 107
+#define STA2X11_GPIO108_SDIO2_DAT0 108
+#define STA2X11_GPIO109_SDIO2_CLK 109
+#define STA2X11_GPIO110_SDIO2_CMD 110
+#define STA2X11_GPIO111 111
+#define STA2X11_GPIO112 112
+#define STA2X11_GPIO113 113
+#define STA2X11_GPIO114 114
+#define STA2X11_GPIO115_SDIO1_DAT3 115
+#define STA2X11_GPIO116_SDIO1_DAT2 116
+#define STA2X11_GPIO117_SDIO1_DAT1 117
+#define STA2X11_GPIO118_SDIO1_DAT0 118
+#define STA2X11_GPIO119_SDIO1_CLK 119
+#define STA2X11_GPIO120_SDIO1_CMD 120
+#define STA2X11_GPIO121 121
+#define STA2X11_GPIO122 122
+#define STA2X11_GPIO123 123
+#define STA2X11_GPIO124 124
+#define STA2X11_GPIO125_UART2_TXD 125
+#define STA2X11_GPIO126_UART2_RXD 126
+#define STA2X11_GPIO127_UART3_TXD 127
+
+/*
+ * The APB bridge has its own registers, needed by our users as well.
+ * They are accessed with the following read/mask/write function.
+ */
+u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
+
+/* CAN and MLB */
+#define APBREG_BSR 0x00 /* Bridge Status Reg */
+#define APBREG_PAER 0x08 /* Peripherals Address Error Reg */
+#define APBREG_PWAC 0x20 /* Peripheral Write Access Control reg */
+#define APBREG_PRAC 0x40 /* Peripheral Read Access Control reg */
+#define APBREG_PCG 0x60 /* Peripheral Clock Gating Reg */
+#define APBREG_PUR 0x80 /* Peripheral Under Reset Reg */
+#define APBREG_EMU_PCG 0xA0 /* Emulator Peripheral Clock Gating Reg */
+
+#define APBREG_CAN (1 << 1)
+#define APBREG_MLB (1 << 3)
+
+/* SARAC */
+#define APBREG_BSR_SARAC 0x100 /* Bridge Status Reg */
+#define APBREG_PAER_SARAC 0x108 /* Peripherals Address Error Reg */
+#define APBREG_PWAC_SARAC 0x120 /* Peripheral Write Access Control reg */
+#define APBREG_PRAC_SARAC 0x140 /* Peripheral Read Access Control reg */
+#define APBREG_PCG_SARAC 0x160 /* Peripheral Clock Gating Reg */
+#define APBREG_PUR_SARAC 0x180 /* Peripheral Under Reset Reg */
+#define APBREG_EMU_PCG_SARAC 0x1A0 /* Emulator Peripheral Clock Gating Reg */
+
+#define APBREG_SARAC (1 << 2)
+
+/*
+ * The system controller has its own registers. Some of these are accessed
+ * by out users as well, using the following read/mask/write/function
+ */
+u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
+
+#define SCTL_SCCTL 0x00 /* System controller control register */
+#define SCTL_ARMCFG 0x04 /* ARM configuration register */
+#define SCTL_SCPLLCTL 0x08 /* PLL control status register */
+#define SCTL_SCPLLFCTRL 0x0c /* PLL frequency control register */
+#define SCTL_SCRESFRACT 0x10 /* PLL fractional input register */
+#define SCTL_SCRESCTRL1 0x14 /* Peripheral reset control 1 */
+#define SCTL_SCRESXTRL2 0x18 /* Peripheral reset control 2 */
+#define SCTL_SCPEREN0 0x1c /* Peripheral clock enable register 0 */
+#define SCTL_SCPEREN1 0x20 /* Peripheral clock enable register 1 */
+#define SCTL_SCPEREN2 0x24 /* Peripheral clock enable register 2 */
+#define SCTL_SCGRST 0x28 /* Peripheral global reset */
+#define SCTL_SCPCIPMCR1 0x30 /* PCI power management control 1 */
+#define SCTL_SCPCIPMCR2 0x34 /* PCI power management control 2 */
+#define SCTL_SCPCIPMSR1 0x38 /* PCI power management status 1 */
+#define SCTL_SCPCIPMSR2 0x3c /* PCI power management status 2 */
+#define SCTL_SCPCIPMSR3 0x40 /* PCI power management status 3 */
+#define SCTL_SCINTREN 0x44 /* Interrupt enable */
+#define SCTL_SCRISR 0x48 /* RAW interrupt status */
+#define SCTL_SCCLKSTAT0 0x4c /* Peripheral clocks status 0 */
+#define SCTL_SCCLKSTAT1 0x50 /* Peripheral clocks status 1 */
+#define SCTL_SCCLKSTAT2 0x54 /* Peripheral clocks status 2 */
+#define SCTL_SCRSTSTA 0x58 /* Reset status register */
+
+#define SCTL_SCRESCTRL1_USB_PHY_POR (1 << 0)
+#define SCTL_SCRESCTRL1_USB_OTG (1 << 1)
+#define SCTL_SCRESCTRL1_USB_HRST (1 << 2)
+#define SCTL_SCRESCTRL1_USB_PHY_HOST (1 << 3)
+#define SCTL_SCRESCTRL1_SATAII (1 << 4)
+#define SCTL_SCRESCTRL1_VIP (1 << 5)
+#define SCTL_SCRESCTRL1_PER_MMC0 (1 << 6)
+#define SCTL_SCRESCTRL1_PER_MMC1 (1 << 7)
+#define SCTL_SCRESCTRL1_PER_GPIO0 (1 << 8)
+#define SCTL_SCRESCTRL1_PER_GPIO1 (1 << 9)
+#define SCTL_SCRESCTRL1_PER_GPIO2 (1 << 10)
+#define SCTL_SCRESCTRL1_PER_GPIO3 (1 << 11)
+#define SCTL_SCRESCTRL1_PER_MTU0 (1 << 12)
+#define SCTL_SCRESCTRL1_KER_SPI0 (1 << 13)
+#define SCTL_SCRESCTRL1_KER_SPI1 (1 << 14)
+#define SCTL_SCRESCTRL1_KER_SPI2 (1 << 15)
+#define SCTL_SCRESCTRL1_KER_MCI0 (1 << 16)
+#define SCTL_SCRESCTRL1_KER_MCI1 (1 << 17)
+#define SCTL_SCRESCTRL1_PRE_HSI2C0 (1 << 18)
+#define SCTL_SCRESCTRL1_PER_HSI2C1 (1 << 19)
+#define SCTL_SCRESCTRL1_PER_HSI2C2 (1 << 20)
+#define SCTL_SCRESCTRL1_PER_HSI2C3 (1 << 21)
+#define SCTL_SCRESCTRL1_PER_MSP0 (1 << 22)
+#define SCTL_SCRESCTRL1_PER_MSP1 (1 << 23)
+#define SCTL_SCRESCTRL1_PER_MSP2 (1 << 24)
+#define SCTL_SCRESCTRL1_PER_MSP3 (1 << 25)
+#define SCTL_SCRESCTRL1_PER_MSP4 (1 << 26)
+#define SCTL_SCRESCTRL1_PER_MSP5 (1 << 27)
+#define SCTL_SCRESCTRL1_PER_MMC (1 << 28)
+#define SCTL_SCRESCTRL1_KER_MSP0 (1 << 29)
+#define SCTL_SCRESCTRL1_KER_MSP1 (1 << 30)
+#define SCTL_SCRESCTRL1_KER_MSP2 (1 << 31)
+
+#define SCTL_SCPEREN0_UART0 (1 << 0)
+#define SCTL_SCPEREN0_UART1 (1 << 1)
+#define SCTL_SCPEREN0_UART2 (1 << 2)
+#define SCTL_SCPEREN0_UART3 (1 << 3)
+#define SCTL_SCPEREN0_MSP0 (1 << 4)
+#define SCTL_SCPEREN0_MSP1 (1 << 5)
+#define SCTL_SCPEREN0_MSP2 (1 << 6)
+#define SCTL_SCPEREN0_MSP3 (1 << 7)
+#define SCTL_SCPEREN0_MSP4 (1 << 8)
+#define SCTL_SCPEREN0_MSP5 (1 << 9)
+#define SCTL_SCPEREN0_SPI0 (1 << 10)
+#define SCTL_SCPEREN0_SPI1 (1 << 11)
+#define SCTL_SCPEREN0_SPI2 (1 << 12)
+#define SCTL_SCPEREN0_I2C0 (1 << 13)
+#define SCTL_SCPEREN0_I2C1 (1 << 14)
+#define SCTL_SCPEREN0_I2C2 (1 << 15)
+#define SCTL_SCPEREN0_I2C3 (1 << 16)
+#define SCTL_SCPEREN0_SVDO_LVDS (1 << 17)
+#define SCTL_SCPEREN0_USB_HOST (1 << 18)
+#define SCTL_SCPEREN0_USB_OTG (1 << 19)
+#define SCTL_SCPEREN0_MCI0 (1 << 20)
+#define SCTL_SCPEREN0_MCI1 (1 << 21)
+#define SCTL_SCPEREN0_MCI2 (1 << 22)
+#define SCTL_SCPEREN0_MCI3 (1 << 23)
+#define SCTL_SCPEREN0_SATA (1 << 24)
+#define SCTL_SCPEREN0_ETHERNET (1 << 25)
+#define SCTL_SCPEREN0_VIC (1 << 26)
+#define SCTL_SCPEREN0_DMA_AUDIO (1 << 27)
+#define SCTL_SCPEREN0_DMA_SOC (1 << 28)
+#define SCTL_SCPEREN0_RAM (1 << 29)
+#define SCTL_SCPEREN0_VIP (1 << 30)
+#define SCTL_SCPEREN0_ARM (1 << 31)
+
+#define SCTL_SCPEREN1_UART0 (1 << 0)
+#define SCTL_SCPEREN1_UART1 (1 << 1)
+#define SCTL_SCPEREN1_UART2 (1 << 2)
+#define SCTL_SCPEREN1_UART3 (1 << 3)
+#define SCTL_SCPEREN1_MSP0 (1 << 4)
+#define SCTL_SCPEREN1_MSP1 (1 << 5)
+#define SCTL_SCPEREN1_MSP2 (1 << 6)
+#define SCTL_SCPEREN1_MSP3 (1 << 7)
+#define SCTL_SCPEREN1_MSP4 (1 << 8)
+#define SCTL_SCPEREN1_MSP5 (1 << 9)
+#define SCTL_SCPEREN1_SPI0 (1 << 10)
+#define SCTL_SCPEREN1_SPI1 (1 << 11)
+#define SCTL_SCPEREN1_SPI2 (1 << 12)
+#define SCTL_SCPEREN1_I2C0 (1 << 13)
+#define SCTL_SCPEREN1_I2C1 (1 << 14)
+#define SCTL_SCPEREN1_I2C2 (1 << 15)
+#define SCTL_SCPEREN1_I2C3 (1 << 16)
+#define SCTL_SCPEREN1_USB_PHY (1 << 17)
+
+#endif /* __STA2X11_MFD_H */
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index 8516fd1eaabc..f8d5b4d5843f 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -117,7 +117,7 @@ struct matrix_keymap_data;
* @no_autorepeat: disable key autorepeat
*/
struct stmpe_keypad_platform_data {
- struct matrix_keymap_data *keymap_data;
+ const struct matrix_keymap_data *keymap_data;
unsigned int debounce_ms;
unsigned int scan_count;
bool no_autorepeat;
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index 1c6c2860d1a6..dd8dc0a6c462 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -18,6 +18,7 @@
#define __LINUX_MFD_TPS65910_H
#include <linux/gpio.h>
+#include <linux/regmap.h>
/* TPS chip id list */
#define TPS65910 0
@@ -783,6 +784,18 @@
#define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3 0x4
#define TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP 0x8
+/*
+ * Sleep keepon data: Maintains the state in sleep mode
+ * @therm_keepon: Keep on the thermal monitoring in sleep state.
+ * @clkout32k_keepon: Keep on the 32KHz clock output in sleep state.
+ * @i2chs_keepon: Keep on high speed internal clock in sleep state.
+ */
+struct tps65910_sleep_keepon_data {
+ unsigned therm_keepon:1;
+ unsigned clkout32k_keepon:1;
+ unsigned i2chs_keepon:1;
+};
+
/**
* struct tps65910_board
* Board platform data may be used to initialize regulators.
@@ -794,6 +807,8 @@ struct tps65910_board {
int irq_base;
int vmbch_threshold;
int vmbch2_threshold;
+ bool en_dev_slp;
+ struct tps65910_sleep_keepon_data *slp_keepon;
bool en_gpio_sleep[TPS6591X_MAX_NUM_GPIO];
unsigned long regulator_ext_sleep_control[TPS65910_NUM_REGS];
struct regulator_init_data *tps65910_pmic_init_data[TPS65910_NUM_REGS];
@@ -809,16 +824,14 @@ struct tps65910 {
struct regmap *regmap;
struct mutex io_mutex;
unsigned int id;
- int (*read)(struct tps65910 *tps65910, u8 reg, int size, void *dest);
- int (*write)(struct tps65910 *tps65910, u8 reg, int size, void *src);
/* Client devices */
struct tps65910_pmic *pmic;
struct tps65910_rtc *rtc;
struct tps65910_power *power;
- /* GPIO Handling */
- struct gpio_chip gpio;
+ /* Device node parsed board data */
+ struct tps65910_board *of_plat_data;
/* IRQ Handling */
struct mutex irq_lock;
@@ -826,6 +839,7 @@ struct tps65910 {
int irq_base;
int irq_num;
u32 irq_mask;
+ struct irq_domain *domain;
};
struct tps65910_platform_data {
@@ -833,9 +847,6 @@ struct tps65910_platform_data {
int irq_base;
};
-int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
-int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
-void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base);
int tps65910_irq_init(struct tps65910 *tps65910, int irq,
struct tps65910_platform_data *pdata);
int tps65910_irq_exit(struct tps65910 *tps65910);
@@ -845,4 +856,28 @@ static inline int tps65910_chip_id(struct tps65910 *tps65910)
return tps65910->id;
}
+static inline int tps65910_reg_read(struct tps65910 *tps65910, u8 reg,
+ unsigned int *val)
+{
+ return regmap_read(tps65910->regmap, reg, val);
+}
+
+static inline int tps65910_reg_write(struct tps65910 *tps65910, u8 reg,
+ unsigned int val)
+{
+ return regmap_write(tps65910->regmap, reg, val);
+}
+
+static inline int tps65910_reg_set_bits(struct tps65910 *tps65910, u8 reg,
+ u8 mask)
+{
+ return regmap_update_bits(tps65910->regmap, reg, mask, mask);
+}
+
+static inline int tps65910_reg_clear_bits(struct tps65910 *tps65910, u8 reg,
+ u8 mask)
+{
+ return regmap_update_bits(tps65910->regmap, reg, mask, 0);
+}
+
#endif /* __LINUX_MFD_TPS65910_H */
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index b15b5f03f5c4..6659487c31e7 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/mfd/core.h>
+#include <linux/regulator/consumer.h>
#define TWL6040_REG_ASICID 0x01
#define TWL6040_REG_ASICREV 0x02
@@ -203,6 +204,7 @@ struct regmap;
struct twl6040 {
struct device *dev;
struct regmap *regmap;
+ struct regulator_bulk_data supplies[2]; /* supplies for vio, v2v1 */
struct mutex mutex;
struct mutex io_mutex;
struct mutex irq_mutex;
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 4b1211859f74..4a3b83a77614 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -17,6 +17,7 @@
#include <linux/completion.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/regmap.h>
@@ -338,6 +339,7 @@
#define WM831X_FLL_CLK_SRC_WIDTH 2 /* FLL_CLK_SRC - [1:0] */
struct regulator_dev;
+struct irq_domain;
#define WM831X_NUM_IRQ_REGS 5
#define WM831X_NUM_GPIO_REGS 16
@@ -367,7 +369,7 @@ struct wm831x {
int irq; /* Our chip IRQ */
struct mutex irq_lock;
- int irq_base;
+ struct irq_domain *irq_domain;
int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */
int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
@@ -382,7 +384,8 @@ struct wm831x {
/* Used by the interrupt controller code to post writes */
int gpio_update[WM831X_NUM_GPIO_REGS];
- bool gpio_level[WM831X_NUM_GPIO_REGS];
+ bool gpio_level_high[WM831X_NUM_GPIO_REGS];
+ bool gpio_level_low[WM831X_NUM_GPIO_REGS];
struct mutex auxadc_lock;
struct list_head auxadc_pending;
@@ -417,6 +420,11 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq);
void wm831x_irq_exit(struct wm831x *wm831x);
void wm831x_auxadc_init(struct wm831x *wm831x);
+static inline int wm831x_irq(struct wm831x *wm831x, int irq)
+{
+ return irq_create_mapping(wm831x->irq_domain, irq);
+}
+
extern struct regmap_config wm831x_regmap_config;
#endif
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 98fcc977e82b..9192b6404a73 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -602,6 +602,7 @@ extern const u16 wm8352_mode2_defaults[];
extern const u16 wm8352_mode3_defaults[];
struct wm8350;
+struct regmap;
struct wm8350_hwmon {
struct platform_device *pdev;
@@ -612,13 +613,7 @@ struct wm8350 {
struct device *dev;
/* device IO */
- union {
- struct i2c_client *i2c_client;
- struct spi_device *spi_device;
- };
- int (*read_dev)(struct wm8350 *wm8350, char reg, int size, void *dest);
- int (*write_dev)(struct wm8350 *wm8350, char reg, int size,
- void *src);
+ struct regmap *regmap;
u16 *reg_cache;
struct mutex auxadc_mutex;
diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h
index 0147b6968510..2de565b94d0c 100644
--- a/include/linux/mfd/wm8400-private.h
+++ b/include/linux/mfd/wm8400-private.h
@@ -24,19 +24,14 @@
#include <linux/mfd/wm8400.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
-
-struct regmap;
+#include <linux/regmap.h>
#define WM8400_REGISTER_COUNT 0x55
struct wm8400 {
struct device *dev;
-
- struct mutex io_lock;
struct regmap *regmap;
- u16 reg_cache[WM8400_REGISTER_COUNT];
-
struct platform_device regulators[6];
};
@@ -930,6 +925,11 @@ struct wm8400 {
u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg);
int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data);
-int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, u16 mask, u16 val);
+
+static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg,
+ u16 mask, u16 val)
+{
+ return regmap_update_bits(wm8400->regmap, reg, mask, val);
+}
#endif
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index 6695c3ec4518..1f173306bf05 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -57,6 +57,7 @@ struct wm8994 {
enum wm8994_type type;
int revision;
+ int cust_id;
struct device *dev;
struct regmap *regmap;
diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
index 86e6a032a078..053548961c15 100644
--- a/include/linux/mfd/wm8994/registers.h
+++ b/include/linux/mfd/wm8994/registers.h
@@ -2212,6 +2212,9 @@
/*
* R256 (0x100) - Chip Revision
*/
+#define WM8994_CUST_ID_MASK 0xFF00 /* CUST_ID - [15:8] */
+#define WM8994_CUST_ID_SHIFT 8 /* CUST_ID - [15:8] */
+#define WM8994_CUST_ID_WIDTH 8 /* CUST_ID - [15:8] */
#define WM8994_CHIP_REV_MASK 0x000F /* CHIP_REV - [3:0] */
#define WM8994_CHIP_REV_SHIFT 0 /* CHIP_REV - [3:0] */
#define WM8994_CHIP_REV_WIDTH 4 /* CHIP_REV - [3:0] */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index dd8da342a991..61f0905bdc48 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -3,7 +3,7 @@
#define MICREL_PHY_ID_MASK 0x00fffff0
-#define PHY_ID_KSZ9021 0x00221611
+#define PHY_ID_KSZ9021 0x00221610
#define PHY_ID_KS8737 0x00221720
#define PHY_ID_KS8041 0x00221510
#define PHY_ID_KS8051 0x00221550
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7d5c37f24c63..ce26716238c3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -321,6 +321,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
static inline void compound_lock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ VM_BUG_ON(PageSlab(page));
bit_spin_lock(PG_compound_lock, &page->flags);
#endif
}
@@ -328,6 +329,7 @@ static inline void compound_lock(struct page *page)
static inline void compound_unlock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ VM_BUG_ON(PageSlab(page));
bit_spin_unlock(PG_compound_lock, &page->flags);
#endif
}
@@ -871,8 +873,6 @@ extern void pagefault_out_of_memory(void);
extern void show_free_areas(unsigned int flags);
extern bool skip_free_areas_node(unsigned int flags, int nid);
-int shmem_lock(struct file *file, int lock, struct user_struct *user);
-struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
int shmem_zero_setup(struct vm_area_struct *);
extern int can_do_mlock(void);
@@ -951,11 +951,9 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
extern int vmtruncate(struct inode *inode, loff_t offset);
-extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
-
int invalidate_inode_page(struct page *page);
#ifdef CONFIG_MMU
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 227fd3e9a9c9..1397ccf81e91 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -21,22 +21,22 @@ static inline int page_is_file_cache(struct page *page)
return !PageSwapBacked(page);
}
-static inline void
-add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
+static __always_inline void add_page_to_lru_list(struct page *page,
+ struct lruvec *lruvec, enum lru_list lru)
{
- struct lruvec *lruvec;
-
- lruvec = mem_cgroup_lru_add_list(zone, page, lru);
+ int nr_pages = hpage_nr_pages(page);
+ mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
list_add(&page->lru, &lruvec->lists[lru]);
- __mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page));
+ __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
}
-static inline void
-del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
+static __always_inline void del_page_from_lru_list(struct page *page,
+ struct lruvec *lruvec, enum lru_list lru)
{
- mem_cgroup_lru_del_list(page, lru);
+ int nr_pages = hpage_nr_pages(page);
+ mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
list_del(&page->lru);
- __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page));
+ __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages);
}
/**
@@ -61,7 +61,7 @@ static inline enum lru_list page_lru_base_type(struct page *page)
* Returns the LRU list a page was on, as an index into the array of LRU
* lists; and clears its Unevictable or Active flags, ready for freeing.
*/
-static inline enum lru_list page_off_lru(struct page *page)
+static __always_inline enum lru_list page_off_lru(struct page *page)
{
enum lru_list lru;
@@ -85,7 +85,7 @@ static inline enum lru_list page_off_lru(struct page *page)
* Returns the LRU list a page should be on, as an index
* into the array of LRU lists.
*/
-static inline enum lru_list page_lru(struct page *page)
+static __always_inline enum lru_list page_lru(struct page *page)
{
enum lru_list lru;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3cc3062b3767..dad95bdd06d7 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -12,6 +12,7 @@
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/page-debug-flags.h>
+#include <linux/uprobes.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -344,17 +345,6 @@ struct mm_struct {
/* Architecture-specific MM context */
mm_context_t context;
- /* Swap token stuff */
- /*
- * Last value of global fault stamp as seen by this process.
- * In other words, this value gives an indication of how long
- * it has been since this task got the token.
- * Look at mm/thrash.c
- */
- unsigned int faultstamp;
- unsigned int token_priority;
- unsigned int last_interval;
-
unsigned long flags; /* Must use atomic bitops to access the bits */
struct core_state *core_state; /* coredumping support */
@@ -388,6 +378,7 @@ struct mm_struct {
#ifdef CONFIG_CPUMASK_OFFSTACK
struct cpumask cpumask_allocation;
#endif
+ struct uprobes_state uprobes_state;
};
static inline void mm_init_cpumask(struct mm_struct *mm)
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 629b823f8836..d76513b5b263 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -58,6 +58,10 @@ struct mmc_ext_csd {
unsigned int generic_cmd6_time; /* Units: 10ms */
unsigned int power_off_longtime; /* Units: ms */
unsigned int hs_max_dtr;
+#define MMC_HIGH_26_MAX_DTR 26000000
+#define MMC_HIGH_52_MAX_DTR 52000000
+#define MMC_HIGH_DDR_MAX_DTR 52000000
+#define MMC_HS200_MAX_DTR 200000000
unsigned int sectors;
unsigned int card_type;
unsigned int hc_erase_size; /* In sectors */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 8f66e28f5a0f..7a7ebd367cfd 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -125,6 +125,7 @@ struct dw_mci {
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
+ struct workqueue_struct *card_workqueue;
/* DMA interface members*/
int use_dma;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index cbde4b7e675e..0707d228d7f1 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -297,6 +297,7 @@ struct mmc_host {
unsigned int sdio_irqs;
struct task_struct *sdio_irq_thread;
+ bool sdio_irq_pending;
atomic_t sdio_irq_thread_abort;
mmc_pm_flag_t pm_flags; /* requested pm features */
@@ -352,6 +353,7 @@ extern int mmc_cache_ctrl(struct mmc_host *, u8);
static inline void mmc_signal_sdio_irq(struct mmc_host *host)
{
host->ops->enable_sdio_irq(host, 0);
+ host->sdio_irq_pending = true;
wake_up_process(host->sdio_irq_thread);
}
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index b822a2cb6008..d425cab144d9 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -354,66 +354,6 @@ struct _mmc_csd {
#define EXT_CSD_CARD_TYPE_SDR_1_2V (1<<5) /* Card can run at 200MHz */
/* SDR mode @1.2V I/O */
-#define EXT_CSD_CARD_TYPE_SDR_200 (EXT_CSD_CARD_TYPE_SDR_1_8V | \
- EXT_CSD_CARD_TYPE_SDR_1_2V)
-
-#define EXT_CSD_CARD_TYPE_SDR_ALL (EXT_CSD_CARD_TYPE_SDR_200 | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_2V_ALL (EXT_CSD_CARD_TYPE_SDR_1_2V | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_8V_ALL (EXT_CSD_CARD_TYPE_SDR_1_8V | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V (EXT_CSD_CARD_TYPE_SDR_1_2V | \
- EXT_CSD_CARD_TYPE_DDR_1_8V | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V (EXT_CSD_CARD_TYPE_SDR_1_8V | \
- EXT_CSD_CARD_TYPE_DDR_1_8V | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V (EXT_CSD_CARD_TYPE_SDR_1_2V | \
- EXT_CSD_CARD_TYPE_DDR_1_2V | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V (EXT_CSD_CARD_TYPE_SDR_1_8V | \
- EXT_CSD_CARD_TYPE_DDR_1_2V | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52 (EXT_CSD_CARD_TYPE_SDR_1_2V | \
- EXT_CSD_CARD_TYPE_DDR_52 | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52 (EXT_CSD_CARD_TYPE_SDR_1_8V | \
- EXT_CSD_CARD_TYPE_DDR_52 | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V (EXT_CSD_CARD_TYPE_SDR_200 | \
- EXT_CSD_CARD_TYPE_DDR_1_8V | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V (EXT_CSD_CARD_TYPE_SDR_200 | \
- EXT_CSD_CARD_TYPE_DDR_1_2V | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52 (EXT_CSD_CARD_TYPE_SDR_200 | \
- EXT_CSD_CARD_TYPE_DDR_52 | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
#define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */
#define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */
#define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */
diff --git a/arch/arm/mach-mxs/include/mach/mmc.h b/include/linux/mmc/mxs-mmc.h
index 211547a05564..7c2ad3a7f2f3 100644
--- a/arch/arm/mach-mxs/include/mach/mmc.h
+++ b/include/linux/mmc/mxs-mmc.h
@@ -6,8 +6,8 @@
* published by the Free Software Foundation.
*/
-#ifndef __MACH_MXS_MMC_H__
-#define __MACH_MXS_MMC_H__
+#ifndef __LINUX_MMC_MXS_MMC_H__
+#define __LINUX_MMC_MXS_MMC_H__
struct mxs_mmc_platform_data {
int wp_gpio; /* write protect pin */
@@ -15,4 +15,5 @@ struct mxs_mmc_platform_data {
#define SLOTF_4_BIT_CAPABLE (1 << 0)
#define SLOTF_8_BIT_CAPABLE (1 << 1)
};
-#endif /* __MACH_MXS_MMC_H__ */
+
+#endif /* __LINUX_MMC_MXS_MMC_H__ */
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index c04ecfe03f7f..580bd587d916 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -4,7 +4,7 @@
#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
#else
-#define VM_BUG_ON(cond) do { (void)(cond); } while (0)
+#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
#endif
#ifdef CONFIG_DEBUG_VIRTUAL
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 41aa49b74821..2427706f78b4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -35,13 +35,39 @@
*/
#define PAGE_ALLOC_COSTLY_ORDER 3
-#define MIGRATE_UNMOVABLE 0
-#define MIGRATE_RECLAIMABLE 1
-#define MIGRATE_MOVABLE 2
-#define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */
-#define MIGRATE_RESERVE 3
-#define MIGRATE_ISOLATE 4 /* can't allocate from here */
-#define MIGRATE_TYPES 5
+enum {
+ MIGRATE_UNMOVABLE,
+ MIGRATE_RECLAIMABLE,
+ MIGRATE_MOVABLE,
+ MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
+ MIGRATE_RESERVE = MIGRATE_PCPTYPES,
+#ifdef CONFIG_CMA
+ /*
+ * MIGRATE_CMA migration type is designed to mimic the way
+ * ZONE_MOVABLE works. Only movable pages can be allocated
+ * from MIGRATE_CMA pageblocks and page allocator never
+ * implicitly change migration type of MIGRATE_CMA pageblock.
+ *
+ * The way to use it is to change migratetype of a range of
+ * pageblocks to MIGRATE_CMA which can be done by
+ * __free_pageblock_cma() function. What is important though
+ * is that a range of pageblocks must be aligned to
+ * MAX_ORDER_NR_PAGES should biggest page be bigger then
+ * a single pageblock.
+ */
+ MIGRATE_CMA,
+#endif
+ MIGRATE_ISOLATE, /* can't allocate from here */
+ MIGRATE_TYPES
+};
+
+#ifdef CONFIG_CMA
+# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+# define cma_wmark_pages(zone) zone->min_cma_pages
+#else
+# define is_migrate_cma(migratetype) false
+# define cma_wmark_pages(zone) 0
+#endif
#define for_each_migratetype_order(order, type) \
for (order = 0; order < MAX_ORDER; order++) \
@@ -159,8 +185,25 @@ static inline int is_unevictable_lru(enum lru_list lru)
return (lru == LRU_UNEVICTABLE);
}
+struct zone_reclaim_stat {
+ /*
+ * The pageout code in vmscan.c keeps track of how many of the
+ * mem/swap backed and file backed pages are refeferenced.
+ * The higher the rotated/scanned ratio, the more valuable
+ * that cache is.
+ *
+ * The anon LRU stats live in [0], file LRU stats in [1]
+ */
+ unsigned long recent_rotated[2];
+ unsigned long recent_scanned[2];
+};
+
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
+ struct zone_reclaim_stat reclaim_stat;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+ struct zone *zone;
+#endif
};
/* Mask used at gathering information at once (see memcontrol.c) */
@@ -169,16 +212,12 @@ struct lruvec {
#define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON)
#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
-/* Isolate inactive pages */
-#define ISOLATE_INACTIVE ((__force isolate_mode_t)0x1)
-/* Isolate active pages */
-#define ISOLATE_ACTIVE ((__force isolate_mode_t)0x2)
/* Isolate clean file */
-#define ISOLATE_CLEAN ((__force isolate_mode_t)0x4)
+#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
/* Isolate unmapped file */
-#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8)
+#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
-#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10)
+#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
/* LRU Isolation modes. */
typedef unsigned __bitwise__ isolate_mode_t;
@@ -287,19 +326,6 @@ enum zone_type {
#error ZONES_SHIFT -- too many zones configured adjust calculation
#endif
-struct zone_reclaim_stat {
- /*
- * The pageout code in vmscan.c keeps track of how many of the
- * mem/swap backed and file backed pages are refeferenced.
- * The higher the rotated/scanned ratio, the more valuable
- * that cache is.
- *
- * The anon LRU stats live in [0], file LRU stats in [1]
- */
- unsigned long recent_rotated[2];
- unsigned long recent_scanned[2];
-};
-
struct zone {
/* Fields commonly accessed by the page allocator */
@@ -347,6 +373,13 @@ struct zone {
/* see spanned/present_pages for more description */
seqlock_t span_seqlock;
#endif
+#ifdef CONFIG_CMA
+ /*
+ * CMA needs to increase watermark levels during the allocation
+ * process to make sure that the system is not starved.
+ */
+ unsigned long min_cma_pages;
+#endif
struct free_area free_area[MAX_ORDER];
#ifndef CONFIG_SPARSEMEM
@@ -374,8 +407,6 @@ struct zone {
spinlock_t lru_lock;
struct lruvec lruvec;
- struct zone_reclaim_stat reclaim_stat;
-
unsigned long pages_scanned; /* since last reclaim */
unsigned long flags; /* zone flags, see below */
@@ -701,6 +732,17 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
unsigned long size,
enum memmap_context context);
+extern void lruvec_init(struct lruvec *lruvec, struct zone *zone);
+
+static inline struct zone *lruvec_zone(struct lruvec *lruvec)
+{
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+ return lruvec->zone;
+#else
+ return container_of(lruvec, struct zone, lruvec);
+#endif
+}
+
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
#else
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index 34066e65fdeb..11cc2ac67e75 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -21,8 +21,9 @@
#define CT_LE_W(v) cpu_to_le16(v)
#define CT_LE_L(v) cpu_to_le32(v)
+#define MSDOS_ROOT_INO 1 /* The root inode number */
+#define MSDOS_FSINFO_INO 2 /* Used for managing the FSINFO block */
-#define MSDOS_ROOT_INO 1 /* == MINIX_ROOT_INO */
#define MSDOS_DIR_BITS 5 /* log2(sizeof(struct msdos_dir_entry)) */
/* directory limit */
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index 30b0c4e78f91..51bf8ada6dc0 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -18,7 +18,6 @@
struct mv643xx_eth_shared_platform_data {
struct mbus_dram_target_info *dram;
struct platform_device *shared_smi;
- unsigned int t_clk;
/*
* Max packet size for Tx IP/Layer 4 checksum, when set to 0, default
* limit of 9KiB will be used.
diff --git a/include/linux/net.h b/include/linux/net.h
index 2d7510f38934..e9ac2df079ba 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -313,5 +313,8 @@ extern int kernel_sock_shutdown(struct socket *sock,
MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
"-type-" __stringify(type))
+#define MODULE_ALIAS_NET_PF_PROTO_NAME(pf, proto, name) \
+ MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
+ name)
#endif /* __KERNEL__ */
#endif /* _LINUX_NET_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e7fd468f7126..d94cb1431519 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2795,15 +2795,15 @@ do { \
#define netif_info(priv, type, dev, fmt, args...) \
netif_level(info, priv, type, dev, fmt, ##args)
-#if defined(DEBUG)
-#define netif_dbg(priv, type, dev, format, args...) \
- netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
-#elif defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
#define netif_dbg(priv, type, netdev, format, args...) \
do { \
if (netif_msg_##type(priv)) \
dynamic_netdev_dbg(netdev, format, ##args); \
} while (0)
+#elif defined(DEBUG)
+#define netif_dbg(priv, type, dev, format, args...) \
+ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
#else
#define netif_dbg(priv, type, dev, format, args...) \
({ \
diff --git a/include/linux/nfc/pn544.h b/include/linux/nfc/pn544.h
index 7ab8521f2347..9890bbaf4328 100644
--- a/include/linux/nfc/pn544.h
+++ b/include/linux/nfc/pn544.h
@@ -84,6 +84,12 @@ struct pn544_fw_packet {
};
#ifdef __KERNEL__
+enum {
+ NFC_GPIO_ENABLE,
+ NFC_GPIO_FW_RESET,
+ NFC_GPIO_IRQ
+};
+
/* board config */
struct pn544_nfc_platform_data {
int (*request_resources) (struct i2c_client *client);
@@ -91,6 +97,7 @@ struct pn544_nfc_platform_data {
void (*enable) (int fw);
int (*test) (void);
void (*disable) (void);
+ int (*get_gpio)(int type);
};
#endif /* __KERNEL__ */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 0987146b0637..af2d2fa30eee 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -69,6 +69,10 @@
#define NFS4_CDFC4_FORE_OR_BOTH 0x3
#define NFS4_CDFC4_BACK_OR_BOTH 0x7
+#define NFS4_CDFS4_FORE 0x1
+#define NFS4_CDFS4_BACK 0x2
+#define NFS4_CDFS4_BOTH 0x3
+
#define NFS4_SET_TO_SERVER_TIME 0
#define NFS4_SET_TO_CLIENT_TIME 1
@@ -526,6 +530,13 @@ enum lock_type4 {
#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
+#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
+
+/* MDS threshold bitmap bits */
+#define THRESHOLD_RD (1UL << 0)
+#define THRESHOLD_WR (1UL << 1)
+#define THRESHOLD_RD_IO (1UL << 2)
+#define THRESHOLD_WR_IO (1UL << 3)
#define NFSPROC4_NULL 0
#define NFSPROC4_COMPOUND 1
@@ -596,6 +607,8 @@ enum {
NFSPROC4_CLNT_TEST_STATEID,
NFSPROC4_CLNT_FREE_STATEID,
NFSPROC4_CLNT_GETDEVICELIST,
+ NFSPROC4_CLNT_BIND_CONN_TO_SESSION,
+ NFSPROC4_CLNT_DESTROY_CLIENTID,
};
/* nfs41 types */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 52a1bdb4ee2b..b23cfc120edb 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -102,6 +102,7 @@ struct nfs_open_context {
int error;
struct list_head list;
+ struct nfs4_threshold *mdsthreshold;
};
struct nfs_open_dir_context {
@@ -179,8 +180,7 @@ struct nfs_inode {
__be32 cookieverf[2];
unsigned long npages;
- unsigned long ncommit;
- struct list_head commit_list;
+ struct nfs_mds_commit_info commit_info;
/* Open contexts for shared mmap writes */
struct list_head open_files;
@@ -201,8 +201,10 @@ struct nfs_inode {
/* pNFS layout information */
struct pnfs_layout_hdr *layout;
- atomic_t commits_outstanding;
#endif /* CONFIG_NFS_V4*/
+ /* how many bytes have been written/read and how many bytes queued up */
+ __u64 write_io;
+ __u64 read_io;
#ifdef CONFIG_NFS_FSCACHE
struct fscache_cookie *fscache;
#endif
@@ -230,7 +232,6 @@ struct nfs_inode {
#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
#define NFS_INO_COMMIT (7) /* inode is committing unstable writes */
-#define NFS_INO_PNFS_COMMIT (8) /* use pnfs code for commit */
#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
@@ -317,11 +318,6 @@ static inline int nfs_server_capable(struct inode *inode, int cap)
return NFS_SERVER(inode)->caps & cap;
}
-static inline int NFS_USE_READDIRPLUS(struct inode *inode)
-{
- return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
-}
-
static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
{
dentry->d_time = verf;
@@ -552,8 +548,8 @@ extern int nfs_wb_page(struct inode *inode, struct page* page);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
extern int nfs_commit_inode(struct inode *, int);
-extern struct nfs_write_data *nfs_commitdata_alloc(void);
-extern void nfs_commit_free(struct nfs_write_data *wdata);
+extern struct nfs_commit_data *nfs_commitdata_alloc(void);
+extern void nfs_commit_free(struct nfs_commit_data *data);
#else
static inline int
nfs_commit_inode(struct inode *inode, int how)
@@ -569,12 +565,6 @@ nfs_have_writebacks(struct inode *inode)
}
/*
- * Allocate nfs_write_data structures
- */
-extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages);
-extern void nfs_writedata_free(struct nfs_write_data *);
-
-/*
* linux/fs/nfs/read.c
*/
extern int nfs_readpage(struct file *, struct page *);
@@ -585,12 +575,6 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
struct page *);
/*
- * Allocate nfs_read_data structures
- */
-extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages);
-extern void nfs_readdata_free(struct nfs_read_data *);
-
-/*
* linux/fs/nfs3proc.c
*/
#ifdef CONFIG_NFS_V3_ACL
@@ -654,6 +638,7 @@ nfs_fileid_to_ino_t(u64 fileid)
#define NFSDBG_FSCACHE 0x0800
#define NFSDBG_PNFS 0x1000
#define NFSDBG_PNFS_LD 0x2000
+#define NFSDBG_STATE 0x4000
#define NFSDBG_ALL 0xFFFF
#ifdef __KERNEL__
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 7073fc74481c..fbb78fb09bd2 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -17,7 +17,7 @@ struct nfs4_sequence_args;
struct nfs4_sequence_res;
struct nfs_server;
struct nfs4_minor_version_ops;
-struct server_scope;
+struct nfs41_server_scope;
struct nfs41_impl_id;
/*
@@ -35,6 +35,9 @@ struct nfs_client {
#define NFS_CS_RENEWD 3 /* - renewd started */
#define NFS_CS_STOP_RENEW 4 /* no more state to renew */
#define NFS_CS_CHECK_LEASE_TIME 5 /* need to check lease time */
+ unsigned long cl_flags; /* behavior switches */
+#define NFS_CS_NORESVPORT 0 /* - use ephemeral src port */
+#define NFS_CS_DISCRTRY 1 /* - disconnect on RPC retry */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -61,9 +64,6 @@ struct nfs_client {
struct rpc_wait_queue cl_rpcwaitq;
- /* used for the setclientid verifier */
- struct timespec cl_boot_time;
-
/* idmapper */
struct idmap * cl_idmap;
@@ -79,16 +79,17 @@ struct nfs_client {
u32 cl_seqid;
/* The flags used for obtaining the clientid during EXCHANGE_ID */
u32 cl_exchange_flags;
- struct nfs4_session *cl_session; /* sharred session */
+ struct nfs4_session *cl_session; /* shared session */
+ struct nfs41_server_owner *cl_serverowner;
+ struct nfs41_server_scope *cl_serverscope;
+ struct nfs41_impl_id *cl_implid;
#endif /* CONFIG_NFS_V4 */
#ifdef CONFIG_NFS_FSCACHE
struct fscache_cookie *fscache; /* client index cache cookie */
#endif
- struct server_scope *server_scope; /* from exchange_id */
- struct nfs41_impl_id *impl_id; /* from exchange_id */
- struct net *net;
+ struct net *cl_net;
};
/*
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index eac30d6bec17..88d166b555e8 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -27,7 +27,6 @@ enum {
PG_CLEAN,
PG_NEED_COMMIT,
PG_NEED_RESCHED,
- PG_PARTIAL_READ_FAILED,
PG_COMMIT_TO_DS,
};
@@ -37,7 +36,6 @@ struct nfs_page {
struct page *wb_page; /* page to read in/write out */
struct nfs_open_context *wb_context; /* File state context info */
struct nfs_lock_context *wb_lock_context; /* lock context info */
- atomic_t wb_complete; /* i/os we're waiting for */
pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */
unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */
wb_pgbase, /* Start of page data */
@@ -68,7 +66,9 @@ struct nfs_pageio_descriptor {
int pg_ioflags;
int pg_error;
const struct rpc_call_ops *pg_rpc_callops;
+ const struct nfs_pgio_completion_ops *pg_completion_ops;
struct pnfs_layout_segment *pg_lseg;
+ struct nfs_direct_req *pg_dreq;
};
#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
@@ -84,6 +84,7 @@ extern void nfs_release_request(struct nfs_page *req);
extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
struct inode *inode,
const struct nfs_pageio_ops *pg_ops,
+ const struct nfs_pgio_completion_ops *compl_ops,
size_t bsize,
int how);
extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
@@ -95,26 +96,17 @@ extern bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
struct nfs_page *req);
extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
+extern void nfs_unlock_and_release_request(struct nfs_page *req);
/*
- * Lock the page of an asynchronous request without getting a new reference
+ * Lock the page of an asynchronous request
*/
static inline int
-nfs_lock_request_dontget(struct nfs_page *req)
-{
- return !test_and_set_bit(PG_BUSY, &req->wb_flags);
-}
-
-static inline int
nfs_lock_request(struct nfs_page *req)
{
- if (test_and_set_bit(PG_BUSY, &req->wb_flags))
- return 0;
- kref_get(&req->wb_kref);
- return 1;
+ return !test_and_set_bit(PG_BUSY, &req->wb_flags);
}
-
/**
* nfs_list_add_request - Insert a request into a list
* @req: request
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7ba3551a0414..d1a7bf51c326 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -35,6 +35,15 @@ static inline int nfs_fsid_equal(const struct nfs_fsid *a, const struct nfs_fsid
return a->major == b->major && a->minor == b->minor;
}
+struct nfs4_threshold {
+ __u32 bm;
+ __u32 l_type;
+ __u64 rd_sz;
+ __u64 wr_sz;
+ __u64 rd_io_sz;
+ __u64 wr_io_sz;
+};
+
struct nfs_fattr {
unsigned int valid; /* which fields are valid */
umode_t mode;
@@ -67,6 +76,7 @@ struct nfs_fattr {
unsigned long gencount;
struct nfs4_string *owner_name;
struct nfs4_string *group_name;
+ struct nfs4_threshold *mdsthreshold; /* pNFS threshold hints */
};
#define NFS_ATTR_FATTR_TYPE (1U << 0)
@@ -106,14 +116,14 @@ struct nfs_fattr {
| NFS_ATTR_FATTR_FILEID \
| NFS_ATTR_FATTR_ATIME \
| NFS_ATTR_FATTR_MTIME \
- | NFS_ATTR_FATTR_CTIME)
+ | NFS_ATTR_FATTR_CTIME \
+ | NFS_ATTR_FATTR_CHANGE)
#define NFS_ATTR_FATTR_V2 (NFS_ATTR_FATTR \
| NFS_ATTR_FATTR_BLOCKS_USED)
#define NFS_ATTR_FATTR_V3 (NFS_ATTR_FATTR \
| NFS_ATTR_FATTR_SPACE_USED)
#define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \
- | NFS_ATTR_FATTR_SPACE_USED \
- | NFS_ATTR_FATTR_CHANGE)
+ | NFS_ATTR_FATTR_SPACE_USED)
/*
* Info on the file system
@@ -338,7 +348,6 @@ struct nfs_openargs {
const struct qstr * name;
const struct nfs_server *server; /* Needed for ID mapping */
const u32 * bitmask;
- const u32 * dir_bitmask;
__u32 claim;
struct nfs4_sequence_args seq_args;
};
@@ -349,7 +358,6 @@ struct nfs_openres {
struct nfs4_change_info cinfo;
__u32 rflags;
struct nfs_fattr * f_attr;
- struct nfs_fattr * dir_attr;
struct nfs_seqid * seqid;
const struct nfs_server *server;
fmode_t delegation_type;
@@ -519,12 +527,29 @@ struct nfs_writeres {
};
/*
+ * Arguments to the commit call.
+ */
+struct nfs_commitargs {
+ struct nfs_fh *fh;
+ __u64 offset;
+ __u32 count;
+ const u32 *bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs_commitres {
+ struct nfs_fattr *fattr;
+ struct nfs_writeverf *verf;
+ const struct nfs_server *server;
+ struct nfs4_sequence_res seq_res;
+};
+
+/*
* Common arguments to the unlink call
*/
struct nfs_removeargs {
const struct nfs_fh *fh;
struct qstr name;
- const u32 * bitmask;
struct nfs4_sequence_args seq_args;
};
@@ -543,7 +568,6 @@ struct nfs_renameargs {
const struct nfs_fh *new_dir;
const struct qstr *old_name;
const struct qstr *new_name;
- const u32 *bitmask;
struct nfs4_sequence_args seq_args;
};
@@ -839,7 +863,6 @@ struct nfs4_create_res {
struct nfs_fh * fh;
struct nfs_fattr * fattr;
struct nfs4_change_info dir_cinfo;
- struct nfs_fattr * dir_fattr;
struct nfs4_sequence_res seq_res;
};
@@ -1061,6 +1084,21 @@ struct nfstime4 {
};
#ifdef CONFIG_NFS_V4_1
+
+struct pnfs_commit_bucket {
+ struct list_head written;
+ struct list_head committing;
+ struct pnfs_layout_segment *wlseg;
+ struct pnfs_layout_segment *clseg;
+};
+
+struct pnfs_ds_commit_info {
+ int nwritten;
+ int ncommitting;
+ int nbuckets;
+ struct pnfs_commit_bucket *buckets;
+};
+
#define NFS4_EXCHANGE_ID_LEN (48)
struct nfs41_exchange_id_args {
struct nfs_client *client;
@@ -1070,13 +1108,13 @@ struct nfs41_exchange_id_args {
u32 flags;
};
-struct server_owner {
+struct nfs41_server_owner {
uint64_t minor_id;
uint32_t major_id_sz;
char major_id[NFS4_OPAQUE_LIMIT];
};
-struct server_scope {
+struct nfs41_server_scope {
uint32_t server_scope_sz;
char server_scope[NFS4_OPAQUE_LIMIT];
};
@@ -1087,10 +1125,18 @@ struct nfs41_impl_id {
struct nfstime4 date;
};
+struct nfs41_bind_conn_to_session_res {
+ struct nfs4_session *session;
+ u32 dir;
+ bool use_conn_in_rdma_mode;
+};
+
struct nfs41_exchange_id_res {
- struct nfs_client *client;
+ u64 clientid;
+ u32 seqid;
u32 flags;
- struct server_scope *server_scope;
+ struct nfs41_server_owner *server_owner;
+ struct nfs41_server_scope *server_scope;
struct nfs41_impl_id *impl_id;
};
@@ -1143,35 +1189,114 @@ struct nfs41_free_stateid_res {
struct nfs4_sequence_res seq_res;
};
+#else
+
+struct pnfs_ds_commit_info {
+};
+
#endif /* CONFIG_NFS_V4_1 */
struct nfs_page;
#define NFS_PAGEVEC_SIZE (8U)
+struct nfs_page_array {
+ struct page **pagevec;
+ unsigned int npages; /* Max length of pagevec */
+ struct page *page_array[NFS_PAGEVEC_SIZE];
+};
+
struct nfs_read_data {
+ struct nfs_pgio_header *header;
+ struct list_head list;
struct rpc_task task;
- struct inode *inode;
- struct rpc_cred *cred;
struct nfs_fattr fattr; /* fattr storage */
- struct list_head pages; /* Coalesced read requests */
- struct list_head list; /* lists of struct nfs_read_data */
- struct nfs_page *req; /* multi ops per nfs_page */
- struct page **pagevec;
- unsigned int npages; /* Max length of pagevec */
struct nfs_readargs args;
struct nfs_readres res;
unsigned long timestamp; /* For lease renewal */
- struct pnfs_layout_segment *lseg;
- struct nfs_client *ds_clp; /* pNFS data server */
- const struct rpc_call_ops *mds_ops;
int (*read_done_cb) (struct rpc_task *task, struct nfs_read_data *data);
__u64 mds_offset;
+ struct nfs_page_array pages;
+ struct nfs_client *ds_clp; /* pNFS data server */
+};
+
+/* used as flag bits in nfs_pgio_header */
+enum {
+ NFS_IOHDR_ERROR = 0,
+ NFS_IOHDR_EOF,
+ NFS_IOHDR_REDO,
+ NFS_IOHDR_NEED_COMMIT,
+ NFS_IOHDR_NEED_RESCHED,
+};
+
+struct nfs_pgio_header {
+ struct inode *inode;
+ struct rpc_cred *cred;
+ struct list_head pages;
+ struct list_head rpc_list;
+ atomic_t refcnt;
+ struct nfs_page *req;
+ struct pnfs_layout_segment *lseg;
+ loff_t io_start;
+ const struct rpc_call_ops *mds_ops;
+ void (*release) (struct nfs_pgio_header *hdr);
+ const struct nfs_pgio_completion_ops *completion_ops;
+ struct nfs_direct_req *dreq;
+ spinlock_t lock;
+ /* fields protected by lock */
int pnfs_error;
- struct page *page_array[NFS_PAGEVEC_SIZE];
+ int error; /* merge with pnfs_error */
+ unsigned long good_bytes; /* boundary of good data */
+ unsigned long flags;
+};
+
+struct nfs_read_header {
+ struct nfs_pgio_header header;
+ struct nfs_read_data rpc_data;
};
struct nfs_write_data {
+ struct nfs_pgio_header *header;
+ struct list_head list;
+ struct rpc_task task;
+ struct nfs_fattr fattr;
+ struct nfs_writeverf verf;
+ struct nfs_writeargs args; /* argument struct */
+ struct nfs_writeres res; /* result struct */
+ unsigned long timestamp; /* For lease renewal */
+ int (*write_done_cb) (struct rpc_task *task, struct nfs_write_data *data);
+ __u64 mds_offset; /* Filelayout dense stripe */
+ struct nfs_page_array pages;
+ struct nfs_client *ds_clp; /* pNFS data server */
+};
+
+struct nfs_write_header {
+ struct nfs_pgio_header header;
+ struct nfs_write_data rpc_data;
+};
+
+struct nfs_mds_commit_info {
+ atomic_t rpcs_out;
+ unsigned long ncommit;
+ struct list_head list;
+};
+
+struct nfs_commit_data;
+struct nfs_inode;
+struct nfs_commit_completion_ops {
+ void (*error_cleanup) (struct nfs_inode *nfsi);
+ void (*completion) (struct nfs_commit_data *data);
+};
+
+struct nfs_commit_info {
+ spinlock_t *lock;
+ struct nfs_mds_commit_info *mds;
+ struct pnfs_ds_commit_info *ds;
+ struct nfs_direct_req *dreq; /* O_DIRECT request */
+ const struct nfs_commit_completion_ops *completion_ops;
+};
+
+struct nfs_commit_data {
struct rpc_task task;
struct inode *inode;
struct rpc_cred *cred;
@@ -1179,22 +1304,22 @@ struct nfs_write_data {
struct nfs_writeverf verf;
struct list_head pages; /* Coalesced requests we wish to flush */
struct list_head list; /* lists of struct nfs_write_data */
- struct nfs_page *req; /* multi ops per nfs_page */
- struct page **pagevec;
- unsigned int npages; /* Max length of pagevec */
- struct nfs_writeargs args; /* argument struct */
- struct nfs_writeres res; /* result struct */
+ struct nfs_direct_req *dreq; /* O_DIRECT request */
+ struct nfs_commitargs args; /* argument struct */
+ struct nfs_commitres res; /* result struct */
+ struct nfs_open_context *context;
struct pnfs_layout_segment *lseg;
struct nfs_client *ds_clp; /* pNFS data server */
int ds_commit_index;
const struct rpc_call_ops *mds_ops;
- int (*write_done_cb) (struct rpc_task *task, struct nfs_write_data *data);
-#ifdef CONFIG_NFS_V4
- unsigned long timestamp; /* For lease renewal */
-#endif
- __u64 mds_offset; /* Filelayout dense stripe */
- int pnfs_error;
- struct page *page_array[NFS_PAGEVEC_SIZE];
+ const struct nfs_commit_completion_ops *completion_ops;
+ int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data);
+};
+
+struct nfs_pgio_completion_ops {
+ void (*error_cleanup)(struct list_head *head);
+ void (*init_hdr)(struct nfs_pgio_header *hdr);
+ void (*completion)(struct nfs_pgio_header *hdr);
};
struct nfs_unlinkdata {
@@ -1234,11 +1359,13 @@ struct nfs_rpc_ops {
int (*getroot) (struct nfs_server *, struct nfs_fh *,
struct nfs_fsinfo *);
+ struct vfsmount *(*submount) (struct nfs_server *, struct dentry *,
+ struct nfs_fh *, struct nfs_fattr *);
int (*getattr) (struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *);
int (*setattr) (struct dentry *, struct nfs_fattr *,
struct iattr *);
- int (*lookup) (struct rpc_clnt *clnt, struct inode *, struct qstr *,
+ int (*lookup) (struct inode *, struct qstr *,
struct nfs_fh *, struct nfs_fattr *);
int (*access) (struct inode *, struct nfs_access_entry *);
int (*readlink)(struct inode *, struct page *, unsigned int,
@@ -1277,8 +1404,9 @@ struct nfs_rpc_ops {
void (*write_setup) (struct nfs_write_data *, struct rpc_message *);
void (*write_rpc_prepare)(struct rpc_task *, struct nfs_write_data *);
int (*write_done) (struct rpc_task *, struct nfs_write_data *);
- void (*commit_setup) (struct nfs_write_data *, struct rpc_message *);
- int (*commit_done) (struct rpc_task *, struct nfs_write_data *);
+ void (*commit_setup) (struct nfs_commit_data *, struct rpc_message *);
+ void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *);
+ int (*commit_done) (struct rpc_task *, struct nfs_commit_data *);
int (*lock)(struct file *, int, struct file_lock *);
int (*lock_check_bounds)(const struct file_lock *);
void (*clear_acl_cache)(struct inode *);
@@ -1287,9 +1415,9 @@ struct nfs_rpc_ops {
struct nfs_open_context *ctx,
int open_flags,
struct iattr *iattr);
- int (*init_client) (struct nfs_client *, const struct rpc_timeout *,
- const char *, rpc_authflavor_t, int);
- int (*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
+ struct nfs_client *
+ (*init_client) (struct nfs_client *, const struct rpc_timeout *,
+ const char *, rpc_authflavor_t);
};
/*
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index f85308e688fd..e33f747b173c 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -103,6 +103,7 @@ struct svc_export {
struct nfsd4_fs_locations ex_fslocs;
int ex_nflavors;
struct exp_flavor_info ex_flavors[MAX_SECINFO_LIST];
+ struct cache_detail *cd;
};
/* an "export key" (expkey) maps a filehandlefragement to an
@@ -129,24 +130,22 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp);
/*
* Function declarations
*/
-int nfsd_export_init(void);
-void nfsd_export_shutdown(void);
-void nfsd_export_flush(void);
+int nfsd_export_init(struct net *);
+void nfsd_export_shutdown(struct net *);
+void nfsd_export_flush(struct net *);
struct svc_export * rqst_exp_get_by_name(struct svc_rqst *,
struct path *);
struct svc_export * rqst_exp_parent(struct svc_rqst *,
struct path *);
struct svc_export * rqst_find_fsidzero_export(struct svc_rqst *);
-int exp_rootfh(struct auth_domain *,
+int exp_rootfh(struct net *, struct auth_domain *,
char *path, struct knfsd_fh *, int maxsize);
__be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *);
__be32 nfserrno(int errno);
-extern struct cache_detail svc_export_cache;
-
static inline void exp_put(struct svc_export *exp)
{
- cache_put(&exp->h, &svc_export_cache);
+ cache_put(&exp->h, exp->cd);
}
static inline void exp_get(struct svc_export *exp)
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 2540e86d99ab..a6959f72745e 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -1594,6 +1594,8 @@ enum nl80211_sta_flags {
NL80211_STA_FLAG_MAX = __NL80211_STA_FLAG_AFTER_LAST - 1
};
+#define NL80211_STA_FLAG_MAX_OLD_API NL80211_STA_FLAG_TDLS_PEER
+
/**
* struct nl80211_sta_flag_update - station flags mask/set
* @mask: mask of station flags to set
@@ -1994,9 +1996,9 @@ enum nl80211_reg_rule_flags {
* enum nl80211_dfs_regions - regulatory DFS regions
*
* @NL80211_DFS_UNSET: Country has no DFS master region specified
- * @NL80211_DFS_FCC_: Country follows DFS master rules from FCC
- * @NL80211_DFS_FCC_: Country follows DFS master rules from ETSI
- * @NL80211_DFS_JP_: Country follows DFS master rules from JP/MKK/Telec
+ * @NL80211_DFS_FCC: Country follows DFS master rules from FCC
+ * @NL80211_DFS_ETSI: Country follows DFS master rules from ETSI
+ * @NL80211_DFS_JP: Country follows DFS master rules from JP/MKK/Telec
*/
enum nl80211_dfs_regions {
NL80211_DFS_UNSET = 0,
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 81733d12cbea..c454f5796747 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -58,7 +58,6 @@ extern int of_mm_gpiochip_add(struct device_node *np,
extern void of_gpiochip_add(struct gpio_chip *gc);
extern void of_gpiochip_remove(struct gpio_chip *gc);
-extern struct gpio_chip *of_node_to_gpiochip(struct device_node *np);
extern int of_gpio_simple_xlate(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec,
u32 *flags);
diff --git a/include/linux/of_i2c.h b/include/linux/of_i2c.h
index 0efe8d465f55..1cb775f8e663 100644
--- a/include/linux/of_i2c.h
+++ b/include/linux/of_i2c.h
@@ -20,6 +20,10 @@ extern void of_i2c_register_devices(struct i2c_adapter *adap);
/* must call put_device() when done with returned i2c_client device */
extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
+/* must call put_device() when done with returned i2c_adapter device */
+extern struct i2c_adapter *of_find_i2c_adapter_by_node(
+ struct device_node *node);
+
#else
static inline void of_i2c_register_devices(struct i2c_adapter *adap)
{
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index d229ad3edee0..1717cd935e1c 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -11,7 +11,7 @@ struct of_irq;
#include <linux/of.h>
/*
- * irq_of_parse_and_map() is used ba all OF enabled platforms; but SPARC
+ * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
* implements it differently. However, the prototype is the same for all,
* so declare it here regardless of the CONFIG_OF_IRQ setting.
*/
@@ -76,5 +76,13 @@ extern struct device_node *of_irq_find_parent(struct device_node *child);
extern void of_irq_init(const struct of_device_id *matches);
#endif /* CONFIG_OF_IRQ */
-#endif /* CONFIG_OF */
+
+#else /* !CONFIG_OF */
+static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
+ int index)
+{
+ return 0;
+}
+#endif /* !CONFIG_OF */
+
#endif /* __OF_IRQ_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index f93e21700d3e..bb115deb7612 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -5,7 +5,7 @@
struct pci_dev;
struct of_irq;
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
+int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq);
struct device_node;
struct device_node *of_pci_find_child_device(struct device_node *parent,
diff --git a/include/linux/of_spi.h b/include/linux/of_spi.h
deleted file mode 100644
index 9e3e70f78ae6..000000000000
--- a/include/linux/of_spi.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * OpenFirmware SPI support routines
- * Copyright (C) 2008 Secret Lab Technologies Ltd.
- *
- * Support routines for deriving SPI device attachments from the device
- * tree.
- */
-
-#ifndef __LINUX_OF_SPI_H
-#define __LINUX_OF_SPI_H
-
-#include <linux/spi/spi.h>
-
-#if defined(CONFIG_OF_SPI) || defined(CONFIG_OF_SPI_MODULE)
-extern void of_register_spi_devices(struct spi_master *master);
-#else
-static inline void of_register_spi_devices(struct spi_master *master)
-{
- return;
-}
-#endif /* CONFIG_OF_SPI */
-
-#endif /* __LINUX_OF_SPI */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 3d7647536b03..e4c29bc72e70 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -43,8 +43,9 @@ enum oom_constraint {
extern void compare_swap_oom_score_adj(int old_val, int new_val);
extern int test_set_oom_score_adj(int new_val);
-extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
- const nodemask_t *nodemask, unsigned long totalpages);
+extern unsigned long oom_badness(struct task_struct *p,
+ struct mem_cgroup *memcg, const nodemask_t *nodemask,
+ unsigned long totalpages);
extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 051c1b1ede4e..3bdcab30ca41 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -3,7 +3,7 @@
/*
* Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
- * If specified range includes migrate types other than MOVABLE,
+ * If specified range includes migrate types other than MOVABLE or CMA,
* this will fail with -EBUSY.
*
* For isolating all pages in the range finally, the caller have to
@@ -11,27 +11,27 @@
* test it.
*/
extern int
-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
+start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ unsigned migratetype);
/*
* Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
* target range is [start_pfn, end_pfn)
*/
extern int
-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
+undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ unsigned migratetype);
/*
- * test all pages in [start_pfn, end_pfn)are isolated or not.
+ * Test all pages in [start_pfn, end_pfn) are isolated or not.
*/
-extern int
-test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
+int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
/*
- * Internal funcs.Changes pageblock's migrate type.
- * Please use make_pagetype_isolated()/make_pagetype_movable().
+ * Internal functions. Changes pageblock's migrate type.
*/
extern int set_migratetype_isolate(struct page *page);
-extern void unset_migratetype_isolate(struct page *page);
+extern void unset_migratetype_isolate(struct page *page, unsigned migratetype);
#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index cfaaa6949b8b..7cfad3bbb0cc 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -426,7 +426,7 @@ static inline int fault_in_pages_writeable(char __user *uaddr, int size)
*/
if (((unsigned long)uaddr & PAGE_MASK) !=
((unsigned long)end & PAGE_MASK))
- ret = __put_user(0, end);
+ ret = __put_user(0, end);
}
return ret;
}
@@ -445,13 +445,73 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
if (((unsigned long)uaddr & PAGE_MASK) !=
((unsigned long)end & PAGE_MASK)) {
- ret = __get_user(c, end);
+ ret = __get_user(c, end);
(void)c;
}
}
return ret;
}
+/*
+ * Multipage variants of the above prefault helpers, useful if more than
+ * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
+ * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
+ * filemap.c hotpaths.
+ */
+static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
+{
+ int ret = 0;
+ char __user *end = uaddr + size - 1;
+
+ if (unlikely(size == 0))
+ return ret;
+
+ /*
+ * Writing zeroes into userspace here is OK, because we know that if
+ * the zero gets there, we'll be overwriting it.
+ */
+ while (uaddr <= end) {
+ ret = __put_user(0, uaddr);
+ if (ret != 0)
+ return ret;
+ uaddr += PAGE_SIZE;
+ }
+
+ /* Check whether the range spilled into the next page. */
+ if (((unsigned long)uaddr & PAGE_MASK) ==
+ ((unsigned long)end & PAGE_MASK))
+ ret = __put_user(0, end);
+
+ return ret;
+}
+
+static inline int fault_in_multipages_readable(const char __user *uaddr,
+ int size)
+{
+ volatile char c;
+ int ret = 0;
+ const char __user *end = uaddr + size - 1;
+
+ if (unlikely(size == 0))
+ return ret;
+
+ while (uaddr <= end) {
+ ret = __get_user(c, uaddr);
+ if (ret != 0)
+ return ret;
+ uaddr += PAGE_SIZE;
+ }
+
+ /* Check whether the range spilled into the next page. */
+ if (((unsigned long)uaddr & PAGE_MASK) ==
+ ((unsigned long)end & PAGE_MASK)) {
+ ret = __get_user(c, end);
+ (void)c;
+ }
+
+ return ret;
+}
+
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 17b7b5b01b4a..d8c379dba6ad 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -687,7 +687,7 @@ int __must_check pci_bus_add_device(struct pci_dev *dev);
void pci_read_bridge_bases(struct pci_bus *child);
struct resource *pci_find_parent_resource(const struct pci_dev *dev,
struct resource *res);
-u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin);
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
extern struct pci_dev *pci_dev_get(struct pci_dev *dev);
@@ -1692,7 +1692,8 @@ extern void pci_release_bus_of_node(struct pci_bus *bus);
/* Arch may override this (weak) */
extern struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus);
-static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
+static inline struct device_node *
+pci_device_to_OF_node(const struct pci_dev *pdev)
{
return pdev ? pdev->dev.of_node : NULL;
}
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 3329965ed63f..ab741b0d0074 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2506,6 +2506,7 @@
#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
#define PCI_DEVICE_ID_INTEL_I960 0x0960
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
+#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085
#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index 4f75e531c112..241065c9ce51 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -18,6 +18,8 @@
#include <linux/power_supply.h>
enum data_source {
+ CM_BATTERY_PRESENT,
+ CM_NO_BATTERY,
CM_FUEL_GAUGE,
CM_CHARGER_STAT,
};
@@ -29,6 +31,16 @@ enum polling_modes {
CM_POLL_CHARGING_ONLY,
};
+enum cm_event_types {
+ CM_EVENT_UNKNOWN = 0,
+ CM_EVENT_BATT_FULL,
+ CM_EVENT_BATT_IN,
+ CM_EVENT_BATT_OUT,
+ CM_EVENT_EXT_PWR_IN_OUT,
+ CM_EVENT_CHG_START_STOP,
+ CM_EVENT_OTHERS,
+};
+
/**
* struct charger_global_desc
* @rtc_name: the name of RTC used to wake up the system from suspend.
@@ -38,11 +50,18 @@ enum polling_modes {
* rtc_only_wakeup() returning false.
* If the RTC given to CM is the only wakeup reason,
* rtc_only_wakeup should return true.
+ * @assume_timer_stops_in_suspend:
+ * Assume that the jiffy timer stops in suspend-to-RAM.
+ * When enabled, CM does not rely on jiffies value in
+ * suspend_again and assumes that jiffies value does not
+ * change during suspend.
*/
struct charger_global_desc {
char *rtc_name;
bool (*rtc_only_wakeup)(void);
+
+ bool assume_timer_stops_in_suspend;
};
/**
@@ -50,6 +69,11 @@ struct charger_global_desc {
* @psy_name: the name of power-supply-class for charger manager
* @polling_mode:
* Determine which polling mode will be used
+ * @fullbatt_vchkdrop_ms:
+ * @fullbatt_vchkdrop_uV:
+ * Check voltage drop after the battery is fully charged.
+ * If it has dropped more than fullbatt_vchkdrop_uV after
+ * fullbatt_vchkdrop_ms, CM will restart charging.
* @fullbatt_uV: voltage in microvolt
* If it is not being charged and VBATT >= fullbatt_uV,
* it is assumed to be full.
@@ -76,6 +100,8 @@ struct charger_desc {
enum polling_modes polling_mode;
unsigned int polling_interval_ms;
+ unsigned int fullbatt_vchkdrop_ms;
+ unsigned int fullbatt_vchkdrop_uV;
unsigned int fullbatt_uV;
enum data_source battery_present;
@@ -101,6 +127,11 @@ struct charger_desc {
* @fuel_gauge: power_supply for fuel gauge
* @charger_stat: array of power_supply for chargers
* @charger_enabled: the state of charger
+ * @fullbatt_vchk_jiffies_at:
+ * jiffies at the time full battery check will occur.
+ * @fullbatt_vchk_uV: voltage in microvolt
+ * criteria for full battery
+ * @fullbatt_vchk_work: work queue for full battery check
* @emergency_stop:
* When setting true, stop charging
* @last_temp_mC: the measured temperature in milli-Celsius
@@ -121,6 +152,10 @@ struct charger_manager {
bool charger_enabled;
+ unsigned long fullbatt_vchk_jiffies_at;
+ unsigned int fullbatt_vchk_uV;
+ struct delayed_work fullbatt_vchk_work;
+
int emergency_stop;
int last_temp_mC;
@@ -134,14 +169,13 @@ struct charger_manager {
#ifdef CONFIG_CHARGER_MANAGER
extern int setup_charger_manager(struct charger_global_desc *gd);
extern bool cm_suspend_again(void);
+extern void cm_notify_event(struct power_supply *psy,
+ enum cm_event_types type, char *msg);
#else
-static void __maybe_unused setup_charger_manager(struct charger_global_desc *gd)
-{ }
-
-static bool __maybe_unused cm_suspend_again(void)
-{
- return false;
-}
+static inline int setup_charger_manager(struct charger_global_desc *gd)
+{ return 0; }
+static inline bool cm_suspend_again(void) { return false; }
+static inline void cm_notify_event(struct power_supply *psy,
+ enum cm_event_types type, char *msg) { }
#endif
-
#endif /* _CHARGER_MANAGER_H */
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index e01b167e66f0..89dd84f47c6e 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -116,6 +116,18 @@ enum max17042_register {
MAX17042_VFSOC = 0xFF,
};
+/* Registers specific to max17047/50 */
+enum max17047_register {
+ MAX17047_QRTbl00 = 0x12,
+ MAX17047_FullSOCThr = 0x13,
+ MAX17047_QRTbl10 = 0x22,
+ MAX17047_QRTbl20 = 0x32,
+ MAX17047_V_empty = 0x3A,
+ MAX17047_QRTbl30 = 0x42,
+};
+
+enum max170xx_chip_type {MAX17042, MAX17047};
+
/*
* used for setting a register to a desired value
* addr : address for a register
@@ -144,6 +156,7 @@ struct max17042_config_data {
u16 shdntimer; /* 0x03F */
/* App data */
+ u16 full_soc_thresh; /* 0x13 */
u16 design_cap; /* 0x18 */
u16 ichgt_term; /* 0x1E */
@@ -162,6 +175,10 @@ struct max17042_config_data {
u16 lavg_empty; /* 0x36 */
u16 dqacc; /* 0x45 */
u16 dpacc; /* 0x46 */
+ u16 qrtbl00; /* 0x12 */
+ u16 qrtbl10; /* 0x22 */
+ u16 qrtbl20; /* 0x32 */
+ u16 qrtbl30; /* 0x42 */
/* Cell technology from power_supply.h */
u16 cell_technology;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index c38c13db8832..3b912bee28d1 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -96,6 +96,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
@@ -211,7 +212,7 @@ extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
extern int power_supply_set_battery_charged(struct power_supply *psy);
-#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
+#ifdef CONFIG_POWER_SUPPLY
extern int power_supply_is_system_supplied(void);
#else
static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
@@ -261,6 +262,7 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp)
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
case POWER_SUPPLY_PROP_POWER_NOW:
return 1;
default:
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index 78b76e24cc7e..711e0a30aacc 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -113,6 +113,12 @@
# define PR_SET_MM_START_STACK 5
# define PR_SET_MM_START_BRK 6
# define PR_SET_MM_BRK 7
+# define PR_SET_MM_ARG_START 8
+# define PR_SET_MM_ARG_END 9
+# define PR_SET_MM_ENV_START 10
+# define PR_SET_MM_ENV_END 11
+# define PR_SET_MM_AUXV 12
+# define PR_SET_MM_EXE_FILE 13
/*
* Set specific pid that is allowed to ptrace the current task.
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index fb201896a8b0..7d7fbe2ef782 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -119,7 +119,7 @@ int __must_check res_counter_charge_locked(struct res_counter *counter,
unsigned long val, bool force);
int __must_check res_counter_charge(struct res_counter *counter,
unsigned long val, struct res_counter **limit_fail_at);
-int __must_check res_counter_charge_nofail(struct res_counter *counter,
+int res_counter_charge_nofail(struct res_counter *counter,
unsigned long val, struct res_counter **limit_fail_at);
/*
@@ -135,6 +135,9 @@ int __must_check res_counter_charge_nofail(struct res_counter *counter,
void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
void res_counter_uncharge(struct res_counter *counter, unsigned long val);
+void res_counter_uncharge_until(struct res_counter *counter,
+ struct res_counter *top,
+ unsigned long val);
/**
* res_counter_margin - calculate chargeable space of a counter
* @cnt: the counter
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 4d50611112ba..a90ebadd9da0 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -20,6 +20,9 @@
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/rio_regs.h>
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+#include <linux/dmaengine.h>
+#endif
#define RIO_NO_HOPCOUNT -1
#define RIO_INVALID_DESTID 0xffff
@@ -254,6 +257,9 @@ struct rio_mport {
u32 phys_efptr;
unsigned char name[40];
void *priv; /* Master port private data */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ struct dma_device dma;
+#endif
};
/**
@@ -395,6 +401,47 @@ union rio_pw_msg {
u32 raw[RIO_PW_MSG_SIZE/sizeof(u32)];
};
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+/**
+ * enum rio_write_type - RIO write transaction types used in DMA transfers
+ *
+ * Note: RapidIO specification defines write (NWRITE) and
+ * write-with-response (NWRITE_R) data transfer operations.
+ * Existing DMA controllers that service RapidIO may use one of these operations
+ * for entire data transfer or their combination with only the last data packet
+ * requires response.
+ */
+enum rio_write_type {
+ RDW_DEFAULT, /* default method used by DMA driver */
+ RDW_ALL_NWRITE, /* all packets use NWRITE */
+ RDW_ALL_NWRITE_R, /* all packets use NWRITE_R */
+ RDW_LAST_NWRITE_R, /* last packet uses NWRITE_R, others - NWRITE */
+};
+
+struct rio_dma_ext {
+ u16 destid;
+ u64 rio_addr; /* low 64-bits of 66-bit RapidIO address */
+ u8 rio_addr_u; /* upper 2-bits of 66-bit RapidIO address */
+ enum rio_write_type wr_type; /* preferred RIO write operation type */
+};
+
+struct rio_dma_data {
+ /* Local data (as scatterlist) */
+ struct scatterlist *sg; /* I/O scatter list */
+ unsigned int sg_len; /* size of scatter list */
+ /* Remote device address (flat buffer) */
+ u64 rio_addr; /* low 64-bits of 66-bit RapidIO address */
+ u8 rio_addr_u; /* upper 2-bits of 66-bit RapidIO address */
+ enum rio_write_type wr_type; /* preferred RIO write operation type */
+};
+
+static inline struct rio_mport *dma_to_mport(struct dma_device *ddev)
+{
+ return container_of(ddev, struct rio_mport, dma);
+}
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
/* Architecture and hardware-specific functions */
extern int rio_register_mport(struct rio_mport *);
extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index 7f07470e1ed9..31ad146be316 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -377,6 +377,15 @@ void rio_unregister_driver(struct rio_driver *);
struct rio_dev *rio_dev_get(struct rio_dev *);
void rio_dev_put(struct rio_dev *);
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+extern struct dma_chan *rio_request_dma(struct rio_dev *rdev);
+extern void rio_release_dma(struct dma_chan *dchan);
+extern struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(
+ struct rio_dev *rdev, struct dma_chan *dchan,
+ struct rio_dma_data *data,
+ enum dma_transfer_direction direction, unsigned long flags);
+#endif
+
/**
* rio_name - Get the unique RIO device identifier
* @rdev: RIO device
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index fd07c4542cee..3fce545df394 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -173,8 +173,6 @@ enum ttu_flags {
};
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
-bool is_vma_temporary_stack(struct vm_area_struct *vma);
-
int try_to_unmap(struct page *, enum ttu_flags flags);
int try_to_unmap_one(struct page *, struct vm_area_struct *,
unsigned long address, enum ttu_flags flags);
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index fcabfb4873c8..f071b3922c67 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -91,6 +91,9 @@ struct rtc_pll_info {
#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */
#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */
+#define RTC_VL_READ _IOR('p', 0x13, int) /* Voltage low detector */
+#define RTC_VL_CLR _IO('p', 0x14) /* Clear voltage low information */
+
/* interrupt flags */
#define RTC_IRQF 0x80 /* Any of the following is active */
#define RTC_PF 0x40 /* Periodic interrupt */
diff --git a/include/linux/rtc/ds1307.h b/include/linux/rtc/ds1307.h
new file mode 100644
index 000000000000..291b1c490367
--- /dev/null
+++ b/include/linux/rtc/ds1307.h
@@ -0,0 +1,22 @@
+/*
+ * ds1307.h - platform_data for the ds1307 (and variants) rtc driver
+ * (C) Copyright 2012 by Wolfram Sang, Pengutronix e.K.
+ * same license as the driver
+ */
+
+#ifndef _LINUX_DS1307_H
+#define _LINUX_DS1307_H
+
+#include <linux/types.h>
+
+#define DS1307_TRICKLE_CHARGER_250_OHM 0x01
+#define DS1307_TRICKLE_CHARGER_2K_OHM 0x02
+#define DS1307_TRICKLE_CHARGER_4K_OHM 0x03
+#define DS1307_TRICKLE_CHARGER_NO_DIODE 0x04
+#define DS1307_TRICKLE_CHARGER_DIODE 0x08
+
+struct ds1307_platform_data {
+ u8 trickle_charger_setup;
+};
+
+#endif /* _LINUX_DS1307_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 17c6c929ee94..660c8ae93471 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1566,6 +1566,10 @@ struct task_struct {
#ifdef CONFIG_HAVE_HW_BREAKPOINT
atomic_t ptrace_bp_refcnt;
#endif
+#ifdef CONFIG_UPROBES
+ struct uprobe_task *utask;
+ int uprobe_srcu_id;
+#endif
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
diff --git a/include/linux/serio.h b/include/linux/serio.h
index ca82861b0e46..6d6cfd3e94a3 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -96,6 +96,19 @@ int __must_check __serio_register_driver(struct serio_driver *drv,
void serio_unregister_driver(struct serio_driver *drv);
+/**
+ * module_serio_driver() - Helper macro for registering a serio driver
+ * @__serio_driver: serio_driver struct
+ *
+ * Helper macro for serio drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module
+ * may only use this macro once, and calling it replaces module_init()
+ * and module_exit().
+ */
+#define module_serio_driver(__serio_driver) \
+ module_driver(__serio_driver, serio_register_driver, \
+ serio_unregister_driver)
+
static inline int serio_write(struct serio *serio, unsigned char data)
{
if (serio->write)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0e501714d47f..b534a1be540a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1896,8 +1896,6 @@ static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
{
int delta = 0;
- if (headroom < NET_SKB_PAD)
- headroom = NET_SKB_PAD;
if (headroom > skb_headroom(skb))
delta = headroom - skb_headroom(skb);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index a595dce6b0c7..67d5d94b783a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -242,7 +242,7 @@ size_t ksize(const void *);
*/
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
- if (size != 0 && n > ULONG_MAX / size)
+ if (size != 0 && n > SIZE_MAX / size)
return NULL;
return __kmalloc(n * size, flags);
}
diff --git a/include/linux/spi/orion_spi.h b/include/linux/spi/orion_spi.h
deleted file mode 100644
index b4d9fa6f797c..000000000000
--- a/include/linux/spi/orion_spi.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * orion_spi.h
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __LINUX_SPI_ORION_SPI_H
-#define __LINUX_SPI_ORION_SPI_H
-
-struct orion_spi_info {
- u32 tclk; /* no <linux/clk.h> support yet */
-};
-
-
-#endif
diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h
new file mode 100644
index 000000000000..900f0e328235
--- /dev/null
+++ b/include/linux/spi/rspi.h
@@ -0,0 +1,31 @@
+/*
+ * Renesas SPI driver
+ *
+ * Copyright (C) 2012 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef __LINUX_SPI_RENESAS_SPI_H__
+#define __LINUX_SPI_RENESAS_SPI_H__
+
+struct rspi_plat_data {
+ unsigned int dma_tx_id;
+ unsigned int dma_rx_id;
+
+ unsigned dma_width_16bit:1; /* DMAC read/write width = 16-bit */
+};
+
+#endif
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index d27683180025..bc14bd738ade 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -188,7 +188,6 @@ struct ssb_sprom {
struct ssb_boardinfo {
u16 vendor;
u16 type;
- u8 rev;
};
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index 40b1ef8595ee..a0525019e1d1 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -228,6 +228,7 @@
#define SSB_SPROM1_AGAIN_BG_SHIFT 0
#define SSB_SPROM1_AGAIN_A 0xFF00 /* A-PHY */
#define SSB_SPROM1_AGAIN_A_SHIFT 8
+#define SSB_SPROM1_CCODE 0x0076
/* SPROM Revision 2 (inherits from rev 1) */
#define SSB_SPROM2_BFLHI 0x0038 /* Boardflags (high 16 bits) */
@@ -267,6 +268,7 @@
#define SSB_SPROM3_OFDMGPO 0x107A /* G-PHY OFDM Power Offset (4 bytes, BigEndian) */
/* SPROM Revision 4 */
+#define SSB_SPROM4_BOARDREV 0x0042 /* Board revision */
#define SSB_SPROM4_BFLLO 0x0044 /* Boardflags (low 16 bits) */
#define SSB_SPROM4_BFLHI 0x0046 /* Board Flags Hi */
#define SSB_SPROM4_BFL2LO 0x0048 /* Board flags 2 (low 16 bits) */
@@ -389,6 +391,11 @@
#define SSB_SPROM8_GPIOB_P2 0x00FF /* Pin 2 */
#define SSB_SPROM8_GPIOB_P3 0xFF00 /* Pin 3 */
#define SSB_SPROM8_GPIOB_P3_SHIFT 8
+#define SSB_SPROM8_LEDDC 0x009A
+#define SSB_SPROM8_LEDDC_ON 0xFF00 /* oncount */
+#define SSB_SPROM8_LEDDC_ON_SHIFT 8
+#define SSB_SPROM8_LEDDC_OFF 0x00FF /* offcount */
+#define SSB_SPROM8_LEDDC_OFF_SHIFT 0
#define SSB_SPROM8_ANTAVAIL 0x009C /* Antenna available bitfields*/
#define SSB_SPROM8_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */
#define SSB_SPROM8_ANTAVAIL_A_SHIFT 8
@@ -404,6 +411,13 @@
#define SSB_SPROM8_AGAIN2_SHIFT 0
#define SSB_SPROM8_AGAIN3 0xFF00 /* Antenna 3 */
#define SSB_SPROM8_AGAIN3_SHIFT 8
+#define SSB_SPROM8_TXRXC 0x00A2
+#define SSB_SPROM8_TXRXC_TXCHAIN 0x000f
+#define SSB_SPROM8_TXRXC_TXCHAIN_SHIFT 0
+#define SSB_SPROM8_TXRXC_RXCHAIN 0x00f0
+#define SSB_SPROM8_TXRXC_RXCHAIN_SHIFT 4
+#define SSB_SPROM8_TXRXC_SWITCH 0xff00
+#define SSB_SPROM8_TXRXC_SWITCH_SHIFT 8
#define SSB_SPROM8_RSSIPARM2G 0x00A4 /* RSSI params for 2GHz */
#define SSB_SPROM8_RSSISMF2G 0x000F
#define SSB_SPROM8_RSSISMC2G 0x00F0
@@ -430,6 +444,7 @@
#define SSB_SPROM8_TRI5GH_SHIFT 8
#define SSB_SPROM8_RXPO 0x00AC /* RX power offsets */
#define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */
+#define SSB_SPROM8_RXPO2G_SHIFT 0
#define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */
#define SSB_SPROM8_RXPO5G_SHIFT 8
#define SSB_SPROM8_FEM2G 0x00AE
@@ -445,10 +460,38 @@
#define SSB_SROM8_FEM_ANTSWLUT 0xF800
#define SSB_SROM8_FEM_ANTSWLUT_SHIFT 11
#define SSB_SPROM8_THERMAL 0x00B2
-#define SSB_SPROM8_MPWR_RAWTS 0x00B4
-#define SSB_SPROM8_TS_SLP_OPT_CORRX 0x00B6
-#define SSB_SPROM8_FOC_HWIQ_IQSWP 0x00B8
-#define SSB_SPROM8_PHYCAL_TEMPDELTA 0x00BA
+#define SSB_SPROM8_THERMAL_OFFSET 0x00ff
+#define SSB_SPROM8_THERMAL_OFFSET_SHIFT 0
+#define SSB_SPROM8_THERMAL_TRESH 0xff00
+#define SSB_SPROM8_THERMAL_TRESH_SHIFT 8
+/* Temp sense related entries */
+#define SSB_SPROM8_RAWTS 0x00B4
+#define SSB_SPROM8_RAWTS_RAWTEMP 0x01ff
+#define SSB_SPROM8_RAWTS_RAWTEMP_SHIFT 0
+#define SSB_SPROM8_RAWTS_MEASPOWER 0xfe00
+#define SSB_SPROM8_RAWTS_MEASPOWER_SHIFT 9
+#define SSB_SPROM8_OPT_CORRX 0x00B6
+#define SSB_SPROM8_OPT_CORRX_TEMP_SLOPE 0x00ff
+#define SSB_SPROM8_OPT_CORRX_TEMP_SLOPE_SHIFT 0
+#define SSB_SPROM8_OPT_CORRX_TEMPCORRX 0xfc00
+#define SSB_SPROM8_OPT_CORRX_TEMPCORRX_SHIFT 10
+#define SSB_SPROM8_OPT_CORRX_TEMP_OPTION 0x0300
+#define SSB_SPROM8_OPT_CORRX_TEMP_OPTION_SHIFT 8
+/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */
+#define SSB_SPROM8_HWIQ_IQSWP 0x00B8
+#define SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR 0x000f
+#define SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR_SHIFT 0
+#define SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP 0x0010
+#define SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP_SHIFT 4
+#define SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL 0x0020
+#define SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL_SHIFT 5
+#define SSB_SPROM8_TEMPDELTA 0x00BA
+#define SSB_SPROM8_TEMPDELTA_PHYCAL 0x00ff
+#define SSB_SPROM8_TEMPDELTA_PHYCAL_SHIFT 0
+#define SSB_SPROM8_TEMPDELTA_PERIOD 0x0f00
+#define SSB_SPROM8_TEMPDELTA_PERIOD_SHIFT 8
+#define SSB_SPROM8_TEMPDELTA_HYSTERESIS 0xf000
+#define SSB_SPROM8_TEMPDELTA_HYSTERESIS_SHIFT 12
/* There are 4 blocks with power info sharing the same layout */
#define SSB_SROM8_PWR_INFO_CORE0 0x00C0
@@ -513,6 +556,16 @@
#define SSB_SPROM8_OFDM5GLPO 0x014A /* 5.2GHz OFDM power offset */
#define SSB_SPROM8_OFDM5GHPO 0x014E /* 5.8GHz OFDM power offset */
+#define SSB_SPROM8_2G_MCSPO 0x0152
+#define SSB_SPROM8_5G_MCSPO 0x0162
+#define SSB_SPROM8_5GL_MCSPO 0x0172
+#define SSB_SPROM8_5GH_MCSPO 0x0182
+
+#define SSB_SPROM8_CDDPO 0x0192
+#define SSB_SPROM8_STBCPO 0x0194
+#define SSB_SPROM8_BW40PO 0x0196
+#define SSB_SPROM8_BWDUPPO 0x0198
+
/* Values for boardflags_lo read from SPROM */
#define SSB_BFL_BTCOEXIST 0x0001 /* implements Bluetooth coexistance */
#define SSB_BFL_PACTRL 0x0002 /* GPIO 9 controlling the PA */
diff --git a/include/linux/stmp_device.h b/include/linux/stmp_device.h
new file mode 100644
index 000000000000..6cf7ec9547cf
--- /dev/null
+++ b/include/linux/stmp_device.h
@@ -0,0 +1,20 @@
+/*
+ * basic functions for devices following the "stmp" style register layout
+ *
+ * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __STMP_DEVICE_H__
+#define __STMP_DEVICE_H__
+
+#define STMP_OFFSET_REG_SET 0x4
+#define STMP_OFFSET_REG_CLR 0x8
+#define STMP_OFFSET_REG_TOG 0xc
+
+extern int stmp_reset_block(void __iomem *);
+#endif /* __STMP_DEVICE_H__ */
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 548790e9113b..2c54683b91de 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -16,7 +16,6 @@
#include <linux/sunrpc/cache.h>
#include <linux/hash.h>
-#define SVC_CRED_NGROUPS 32
struct svc_cred {
uid_t cr_uid;
gid_t cr_gid;
@@ -131,7 +130,7 @@ extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *ne
extern struct auth_domain *auth_domain_find(char *name);
extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr);
extern int auth_unix_forget_old(struct auth_domain *dom);
-extern void svcauth_unix_purge(void);
+extern void svcauth_unix_purge(struct net *net);
extern void svcauth_unix_info_release(struct svc_xprt *xpt);
extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b1fd5c7925fe..b6661933e252 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -221,8 +221,8 @@ extern unsigned int nr_free_pagecache_pages(void);
/* linux/mm/swap.c */
extern void __lru_cache_add(struct page *, enum lru_list lru);
extern void lru_cache_add_lru(struct page *, enum lru_list lru);
-extern void lru_add_page_tail(struct zone* zone,
- struct page *page, struct page *page_tail);
+extern void lru_add_page_tail(struct page *page, struct page *page_tail,
+ struct lruvec *lruvec);
extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
@@ -251,7 +251,7 @@ static inline void lru_cache_add_file(struct page *page)
/* linux/mm/vmscan.c */
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
-extern int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file);
+extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap);
extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
@@ -351,31 +351,14 @@ extern int swap_type_of(dev_t, sector_t, struct block_device **);
extern unsigned int count_swap_pages(int, int);
extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
+extern int page_swapcount(struct page *);
extern int reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
-/* linux/mm/thrash.c */
-extern struct mm_struct *swap_token_mm;
-extern void grab_swap_token(struct mm_struct *);
-extern void __put_swap_token(struct mm_struct *);
-extern void disable_swap_token(struct mem_cgroup *memcg);
-
-static inline int has_swap_token(struct mm_struct *mm)
-{
- return (mm == swap_token_mm);
-}
-
-static inline void put_swap_token(struct mm_struct *mm)
-{
- if (has_swap_token(mm))
- __put_swap_token(mm);
-}
-
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
extern void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
-extern int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep);
#else
static inline void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
@@ -462,6 +445,11 @@ static inline void delete_from_swap_cache(struct page *page)
{
}
+static inline int page_swapcount(struct page *page)
+{
+ return 0;
+}
+
#define reuse_swap_page(page) (page_mapcount(page) == 1)
static inline int try_to_free_swap(struct page *page)
@@ -476,37 +464,11 @@ static inline swp_entry_t get_swap_page(void)
return entry;
}
-/* linux/mm/thrash.c */
-static inline void put_swap_token(struct mm_struct *mm)
-{
-}
-
-static inline void grab_swap_token(struct mm_struct *mm)
-{
-}
-
-static inline int has_swap_token(struct mm_struct *mm)
-{
- return 0;
-}
-
-static inline void disable_swap_token(struct mem_cgroup *memcg)
-{
-}
-
static inline void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
{
}
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-static inline int
-mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
-{
- return 0;
-}
-#endif
-
#endif /* CONFIG_SWAP */
#endif /* __KERNEL__*/
#endif /* _LINUX_SWAP_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 3de3acb84a95..19439c75c5b2 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -858,4 +858,6 @@ asmlinkage long sys_process_vm_writev(pid_t pid,
unsigned long riovcnt,
unsigned long flags);
+asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type,
+ unsigned long idx1, unsigned long idx2);
#endif
diff --git a/include/linux/time.h b/include/linux/time.h
index 33a92ead4d88..179f4d6755fc 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -167,7 +167,6 @@ extern void get_monotonic_boottime(struct timespec *ts);
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
extern int timekeeping_valid_for_hres(void);
extern u64 timekeeping_max_deferment(void);
-extern void timekeeping_leap_insert(int leapsecond);
extern int timekeeping_inject_offset(struct timespec *ts);
struct tms;
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
new file mode 100644
index 000000000000..efe4b3308c74
--- /dev/null
+++ b/include/linux/uprobes.h
@@ -0,0 +1,165 @@
+#ifndef _LINUX_UPROBES_H
+#define _LINUX_UPROBES_H
+/*
+ * User-space Probes (UProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2008-2012
+ * Authors:
+ * Srikar Dronamraju
+ * Jim Keniston
+ * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/rbtree.h>
+
+struct vm_area_struct;
+struct mm_struct;
+struct inode;
+
+#ifdef CONFIG_ARCH_SUPPORTS_UPROBES
+# include <asm/uprobes.h>
+#endif
+
+/* flags that denote/change uprobes behaviour */
+
+/* Have a copy of original instruction */
+#define UPROBE_COPY_INSN 0x1
+
+/* Dont run handlers when first register/ last unregister in progress*/
+#define UPROBE_RUN_HANDLER 0x2
+/* Can skip singlestep */
+#define UPROBE_SKIP_SSTEP 0x4
+
+struct uprobe_consumer {
+ int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
+ /*
+ * filter is optional; If a filter exists, handler is run
+ * if and only if filter returns true.
+ */
+ bool (*filter)(struct uprobe_consumer *self, struct task_struct *task);
+
+ struct uprobe_consumer *next;
+};
+
+#ifdef CONFIG_UPROBES
+enum uprobe_task_state {
+ UTASK_RUNNING,
+ UTASK_BP_HIT,
+ UTASK_SSTEP,
+ UTASK_SSTEP_ACK,
+ UTASK_SSTEP_TRAPPED,
+};
+
+/*
+ * uprobe_task: Metadata of a task while it singlesteps.
+ */
+struct uprobe_task {
+ enum uprobe_task_state state;
+ struct arch_uprobe_task autask;
+
+ struct uprobe *active_uprobe;
+
+ unsigned long xol_vaddr;
+ unsigned long vaddr;
+};
+
+/*
+ * On a breakpoint hit, thread contests for a slot. It frees the
+ * slot after singlestep. Currently a fixed number of slots are
+ * allocated.
+ */
+struct xol_area {
+ wait_queue_head_t wq; /* if all slots are busy */
+ atomic_t slot_count; /* number of in-use slots */
+ unsigned long *bitmap; /* 0 = free slot */
+ struct page *page;
+
+ /*
+ * We keep the vma's vm_start rather than a pointer to the vma
+ * itself. The probed process or a naughty kernel module could make
+ * the vma go away, and we must handle that reasonably gracefully.
+ */
+ unsigned long vaddr; /* Page(s) of instruction slots */
+};
+
+struct uprobes_state {
+ struct xol_area *xol_area;
+ atomic_t count;
+};
+extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
+extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr, bool verify);
+extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
+extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
+extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
+extern int uprobe_mmap(struct vm_area_struct *vma);
+extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void uprobe_free_utask(struct task_struct *t);
+extern void uprobe_copy_process(struct task_struct *t);
+extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
+extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
+extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
+extern void uprobe_notify_resume(struct pt_regs *regs);
+extern bool uprobe_deny_signal(void);
+extern bool __weak arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
+extern void uprobe_clear_state(struct mm_struct *mm);
+extern void uprobe_reset_state(struct mm_struct *mm);
+#else /* !CONFIG_UPROBES */
+struct uprobes_state {
+};
+static inline int
+uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
+{
+ return -ENOSYS;
+}
+static inline void
+uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
+{
+}
+static inline int uprobe_mmap(struct vm_area_struct *vma)
+{
+ return 0;
+}
+static inline void
+uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+}
+static inline void uprobe_notify_resume(struct pt_regs *regs)
+{
+}
+static inline bool uprobe_deny_signal(void)
+{
+ return false;
+}
+static inline unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return 0;
+}
+static inline void uprobe_free_utask(struct task_struct *t)
+{
+}
+static inline void uprobe_copy_process(struct task_struct *t)
+{
+}
+static inline void uprobe_clear_state(struct mm_struct *mm)
+{
+}
+static inline void uprobe_reset_state(struct mm_struct *mm)
+{
+}
+#endif /* !CONFIG_UPROBES */
+#endif /* _LINUX_UPROBES_H */
diff --git a/include/linux/v4l2-dv-timings.h b/include/linux/v4l2-dv-timings.h
new file mode 100644
index 000000000000..9ef8172e5ed0
--- /dev/null
+++ b/include/linux/v4l2-dv-timings.h
@@ -0,0 +1,816 @@
+/*
+ * V4L2 DV timings header.
+ *
+ * Copyright (C) 2012 Hans Verkuil <hans.verkuil@cisco.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef _V4L2_DV_TIMINGS_H
+#define _V4L2_DV_TIMINGS_H
+
+#if __GNUC__ < 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ < 6))
+/* Sadly gcc versions older than 4.6 have a bug in how they initialize
+ anonymous unions where they require additional curly brackets.
+ This violates the C1x standard. This workaround adds the curly brackets
+ if needed. */
+#define V4L2_INIT_BT_TIMINGS(_width, args...) \
+ { .bt = { _width , ## args } }
+#else
+#define V4L2_INIT_BT_TIMINGS(_width, args...) \
+ .bt = { _width , ## args }
+#endif
+
+/* CEA-861-E timings (i.e. standard HDTV timings) */
+
+#define V4L2_DV_BT_CEA_640X480P59_94 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(640, 480, 0, 0, \
+ 25175000, 16, 96, 48, 10, 2, 33, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+#define V4L2_DV_BT_CEA_720X480P59_94 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(720, 480, 0, 0, \
+ 27000000, 16, 62, 60, 9, 6, 30, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+#define V4L2_DV_BT_CEA_720X576P50 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(720, 576, 0, 0, \
+ 27000000, 12, 64, 68, 5, 5, 39, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+#define V4L2_DV_BT_CEA_1280X720P24 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 59400000, 1760, 40, 220, 5, 5, 20, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+#define V4L2_DV_BT_CEA_1280X720P25 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 2420, 40, 220, 5, 5, 20, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+#define V4L2_DV_BT_CEA_1280X720P30 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 1760, 40, 220, 5, 5, 20, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+#define V4L2_DV_BT_CEA_1280X720P50 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 440, 40, 220, 5, 5, 20, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+#define V4L2_DV_BT_CEA_1280X720P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 110, 40, 220, 5, 5, 20, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+#define V4L2_DV_BT_CEA_1920X1080P24 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 638, 44, 148, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+#define V4L2_DV_BT_CEA_1920X1080P25 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 528, 44, 148, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+#define V4L2_DV_BT_CEA_1920X1080P30 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 88, 44, 148, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+#define V4L2_DV_BT_CEA_1920X1080I50 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1080, 1, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 528, 44, 148, 2, 5, 15, 2, 5, 16, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_HALF_LINE) \
+}
+
+#define V4L2_DV_BT_CEA_1920X1080P50 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 148500000, 528, 44, 148, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+#define V4L2_DV_BT_CEA_1920X1080I60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1080, 1, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 88, 44, 148, 2, 5, 15, 2, 5, 16, \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_HALF_LINE) \
+}
+
+#define V4L2_DV_BT_CEA_1920X1080P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 148500000, 88, 44, 148, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+
+/* VESA Discrete Monitor Timings as per version 1.0, revision 12 */
+
+#define V4L2_DV_BT_DMT_640X350P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(640, 350, 0, V4L2_DV_HSYNC_POS_POL, \
+ 31500000, 32, 64, 96, 32, 3, 60, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_640X400P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(640, 400, 0, V4L2_DV_VSYNC_POS_POL, \
+ 31500000, 32, 64, 96, 1, 3, 41, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_720X400P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(720, 400, 0, V4L2_DV_VSYNC_POS_POL, \
+ 35500000, 36, 72, 108, 1, 3, 42, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+/* VGA resolutions */
+#define V4L2_DV_BT_DMT_640X480P60 V4L2_DV_BT_CEA_640X480P59_94
+
+#define V4L2_DV_BT_DMT_640X480P72 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(640, 480, 0, 0, \
+ 31500000, 24, 40, 128, 9, 3, 28, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_640X480P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(640, 480, 0, 0, \
+ 31500000, 16, 64, 120, 1, 3, 16, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_640X480P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(640, 480, 0, 0, \
+ 36000000, 56, 56, 80, 1, 3, 25, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+/* SVGA resolutions */
+#define V4L2_DV_BT_DMT_800X600P56 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(800, 600, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 36000000, 24, 72, 128, 1, 2, 22, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_800X600P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(800, 600, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 40000000, 40, 128, 88, 1, 4, 23, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_800X600P72 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(800, 600, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 50000000, 56, 120, 64, 37, 6, 23, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_800X600P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(800, 600, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 49500000, 16, 80, 160, 1, 3, 21, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_800X600P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(800, 600, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 56250000, 32, 64, 152, 1, 3, 27, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_800X600P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(800, 600, 0, V4L2_DV_HSYNC_POS_POL, \
+ 73250000, 48, 32, 80, 3, 4, 29, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_848X480P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(848, 480, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 33750000, 16, 112, 112, 6, 8, 23, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1024X768I43 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1024, 768, 1, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 44900000, 8, 176, 56, 0, 4, 20, 0, 4, 21, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+/* XGA resolutions */
+#define V4L2_DV_BT_DMT_1024X768P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1024, 768, 0, 0, \
+ 65000000, 24, 136, 160, 3, 6, 29, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1024X768P70 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1024, 768, 0, 0, \
+ 75000000, 24, 136, 144, 3, 6, 29, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1024X768P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1024, 768, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 78750000, 16, 96, 176, 1, 3, 28, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1024X768P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1024, 768, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 94500000, 48, 96, 208, 1, 3, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1024X768P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1024, 768, 0, V4L2_DV_HSYNC_POS_POL, \
+ 115500000, 48, 32, 80, 3, 4, 38, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+/* XGA+ resolution */
+#define V4L2_DV_BT_DMT_1152X864P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1152, 864, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 108000000, 64, 128, 256, 1, 3, 32, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X720P60 V4L2_DV_BT_CEA_1280X720P60
+
+/* WXGA resolutions */
+#define V4L2_DV_BT_DMT_1280X768P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 768, 0, V4L2_DV_HSYNC_POS_POL, \
+ 68250000, 48, 32, 80, 3, 7, 12, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1280X768P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 768, 0, V4L2_DV_VSYNC_POS_POL, \
+ 79500000, 64, 128, 192, 3, 7, 20, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X768P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 768, 0, V4L2_DV_VSYNC_POS_POL, \
+ 102250000, 80, 128, 208, 3, 7, 27, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X768P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 768, 0, V4L2_DV_VSYNC_POS_POL, \
+ 117500000, 80, 136, 216, 3, 7, 31, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X768P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 768, 0, V4L2_DV_HSYNC_POS_POL, \
+ 140250000, 48, 32, 80, 3, 7, 35, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1280X800P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 800, 0, V4L2_DV_HSYNC_POS_POL, \
+ 71000000, 48, 32, 80, 3, 6, 14, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1280X800P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 800, 0, V4L2_DV_VSYNC_POS_POL, \
+ 83500000, 72, 128, 200, 3, 6, 22, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X800P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 800, 0, V4L2_DV_VSYNC_POS_POL, \
+ 106500000, 80, 128, 208, 3, 6, 29, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X800P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 800, 0, V4L2_DV_VSYNC_POS_POL, \
+ 122500000, 80, 136, 216, 3, 6, 34, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X800P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 800, 0, V4L2_DV_HSYNC_POS_POL, \
+ 146250000, 48, 32, 80, 3, 6, 38, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1280X960P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 960, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 108000000, 96, 112, 312, 1, 3, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X960P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 960, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 148500000, 64, 160, 224, 1, 3, 47, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X960P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 960, 0, V4L2_DV_HSYNC_POS_POL, \
+ 175500000, 48, 32, 80, 3, 4, 50, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+/* SXGA resolutions */
+#define V4L2_DV_BT_DMT_1280X1024P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 1024, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 108000000, 48, 112, 248, 1, 3, 38, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X1024P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 1024, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 135000000, 16, 144, 248, 1, 3, 38, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X1024P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 1024, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 157500000, 64, 160, 224, 1, 3, 44, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X1024P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 1024, 0, V4L2_DV_HSYNC_POS_POL, \
+ 187250000, 48, 32, 80, 3, 7, 50, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1360X768P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1360, 768, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 85500000, 64, 112, 256, 3, 6, 18, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1360X768P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1360, 768, 0, V4L2_DV_HSYNC_POS_POL, \
+ 148250000, 48, 32, 80, 3, 5, 37, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1366X768P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1366, 768, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 85500000, 70, 143, 213, 3, 3, 24, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1366X768P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1366, 768, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 72000000, 14, 56, 64, 1, 3, 28, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+/* SXGA+ resolutions */
+#define V4L2_DV_BT_DMT_1400X1050P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1400, 1050, 0, V4L2_DV_HSYNC_POS_POL, \
+ 101000000, 48, 32, 80, 3, 4, 23, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1400X1050P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1400, 1050, 0, V4L2_DV_VSYNC_POS_POL, \
+ 121750000, 88, 144, 232, 3, 4, 32, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1400X1050P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1400, 1050, 0, V4L2_DV_VSYNC_POS_POL, \
+ 156000000, 104, 144, 248, 3, 4, 42, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1400X1050P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1400, 1050, 0, V4L2_DV_VSYNC_POS_POL, \
+ 179500000, 104, 152, 256, 3, 4, 48, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1400X1050P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1400, 1050, 0, V4L2_DV_HSYNC_POS_POL, \
+ 208000000, 48, 32, 80, 3, 4, 55, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+/* WXGA+ resolutions */
+#define V4L2_DV_BT_DMT_1440X900P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1440, 900, 0, V4L2_DV_HSYNC_POS_POL, \
+ 88750000, 48, 32, 80, 3, 6, 17, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1440X900P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1440, 900, 0, V4L2_DV_VSYNC_POS_POL, \
+ 106500000, 80, 152, 232, 3, 6, 25, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1440X900P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1440, 900, 0, V4L2_DV_VSYNC_POS_POL, \
+ 136750000, 96, 152, 248, 3, 6, 33, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1440X900P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1440, 900, 0, V4L2_DV_VSYNC_POS_POL, \
+ 157000000, 104, 152, 256, 3, 6, 39, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1440X900P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1440, 900, 0, V4L2_DV_HSYNC_POS_POL, \
+ 182750000, 48, 32, 80, 3, 6, 44, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1600X900P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1600, 900, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 108000000, 24, 80, 96, 1, 3, 96, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+/* UXGA resolutions */
+#define V4L2_DV_BT_DMT_1600X1200P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1600, 1200, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 162000000, 64, 192, 304, 1, 3, 46, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1600X1200P65 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1600, 1200, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 175500000, 64, 192, 304, 1, 3, 46, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1600X1200P70 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1600, 1200, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 189000000, 64, 192, 304, 1, 3, 46, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1600X1200P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1600, 1200, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 202500000, 64, 192, 304, 1, 3, 46, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1600X1200P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1600, 1200, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 229500000, 64, 192, 304, 1, 3, 46, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1600X1200P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1600, 1200, 0, V4L2_DV_HSYNC_POS_POL, \
+ 268250000, 48, 32, 80, 3, 4, 64, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+/* WSXGA+ resolutions */
+#define V4L2_DV_BT_DMT_1680X1050P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1680, 1050, 0, V4L2_DV_HSYNC_POS_POL, \
+ 119000000, 48, 32, 80, 3, 6, 21, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1680X1050P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1680, 1050, 0, V4L2_DV_VSYNC_POS_POL, \
+ 146250000, 104, 176, 280, 3, 6, 30, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1680X1050P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1680, 1050, 0, V4L2_DV_VSYNC_POS_POL, \
+ 187000000, 120, 176, 296, 3, 6, 40, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1680X1050P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1680, 1050, 0, V4L2_DV_VSYNC_POS_POL, \
+ 214750000, 128, 176, 304, 3, 6, 46, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1680X1050P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1680, 1050, 0, V4L2_DV_HSYNC_POS_POL, \
+ 245500000, 48, 32, 80, 3, 6, 53, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1792X1344P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1792, 1344, 0, V4L2_DV_VSYNC_POS_POL, \
+ 204750000, 128, 200, 328, 1, 3, 46, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1792X1344P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1792, 1344, 0, V4L2_DV_VSYNC_POS_POL, \
+ 261000000, 96, 216, 352, 1, 3, 69, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1792X1344P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1792, 1344, 0, V4L2_DV_HSYNC_POS_POL, \
+ 333250000, 48, 32, 80, 3, 4, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1856X1392P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1856, 1392, 0, V4L2_DV_VSYNC_POS_POL, \
+ 218250000, 96, 224, 352, 1, 3, 43, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1856X1392P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1856, 1392, 0, V4L2_DV_VSYNC_POS_POL, \
+ 288000000, 128, 224, 352, 1, 3, 104, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1856X1392P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1856, 1392, 0, V4L2_DV_HSYNC_POS_POL, \
+ 356500000, 48, 32, 80, 3, 4, 75, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1920X1080P60 V4L2_DV_BT_CEA_1920X1080P60
+
+/* WUXGA resolutions */
+#define V4L2_DV_BT_DMT_1920X1200P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1200, 0, V4L2_DV_HSYNC_POS_POL, \
+ 154000000, 48, 32, 80, 3, 6, 26, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1920X1200P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1200, 0, V4L2_DV_VSYNC_POS_POL, \
+ 193250000, 136, 200, 336, 3, 6, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1920X1200P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1200, 0, V4L2_DV_VSYNC_POS_POL, \
+ 245250000, 136, 208, 344, 3, 6, 46, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1920X1200P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1200, 0, V4L2_DV_VSYNC_POS_POL, \
+ 281250000, 144, 208, 352, 3, 6, 53, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1920X1200P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1200, 0, V4L2_DV_HSYNC_POS_POL, \
+ 317000000, 48, 32, 80, 3, 6, 62, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1920X1440P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1440, 0, V4L2_DV_VSYNC_POS_POL, \
+ 234000000, 128, 208, 344, 1, 3, 56, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1920X1440P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1440, 0, V4L2_DV_VSYNC_POS_POL, \
+ 297000000, 144, 224, 352, 1, 3, 56, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1920X1440P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1440, 0, V4L2_DV_HSYNC_POS_POL, \
+ 380500000, 48, 32, 80, 3, 4, 78, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_2048X1152P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2048, 1152, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 162000000, 26, 80, 96, 1, 3, 44, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+/* WQXGA resolutions */
+#define V4L2_DV_BT_DMT_2560X1600P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2560, 1600, 0, V4L2_DV_HSYNC_POS_POL, \
+ 268500000, 48, 32, 80, 3, 6, 37, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_2560X1600P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2560, 1600, 0, V4L2_DV_VSYNC_POS_POL, \
+ 348500000, 192, 280, 472, 3, 6, 49, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_2560X1600P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2560, 1600, 0, V4L2_DV_VSYNC_POS_POL, \
+ 443250000, 208, 280, 488, 3, 6, 63, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_2560X1600P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2560, 1600, 0, V4L2_DV_VSYNC_POS_POL, \
+ 505250000, 208, 280, 488, 3, 6, 73, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_2560X1600P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2560, 1600, 0, V4L2_DV_HSYNC_POS_POL, \
+ 552750000, 48, 32, 80, 3, 6, 85, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1366X768P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1366, 768, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 85500000, 70, 143, 213, 3, 3, 24, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#endif
diff --git a/include/linux/v4l2-subdev.h b/include/linux/v4l2-subdev.h
index ed29cbbebfef..812019ee1e06 100644
--- a/include/linux/v4l2-subdev.h
+++ b/include/linux/v4l2-subdev.h
@@ -123,6 +123,43 @@ struct v4l2_subdev_frame_interval_enum {
__u32 reserved[9];
};
+#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE (1 << 0)
+#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE (1 << 1)
+#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG (1 << 2)
+
+/* active cropping area */
+#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL 0x0000
+/* cropping bounds */
+#define V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS 0x0002
+/* current composing area */
+#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL 0x0100
+/* composing bounds */
+#define V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS 0x0102
+
+
+/**
+ * struct v4l2_subdev_selection - selection info
+ *
+ * @which: either V4L2_SUBDEV_FORMAT_ACTIVE or V4L2_SUBDEV_FORMAT_TRY
+ * @pad: pad number, as reported by the media API
+ * @target: selection target, used to choose one of possible rectangles
+ * @flags: constraint flags
+ * @r: coordinates of the selection window
+ * @reserved: for future use, set to zero for now
+ *
+ * Hardware may use multiple helper windows to process a video stream.
+ * The structure is used to exchange this selection areas between
+ * an application and a driver.
+ */
+struct v4l2_subdev_selection {
+ __u32 which;
+ __u32 pad;
+ __u32 target;
+ __u32 flags;
+ struct v4l2_rect r;
+ __u32 reserved[8];
+};
+
#define VIDIOC_SUBDEV_G_FMT _IOWR('V', 4, struct v4l2_subdev_format)
#define VIDIOC_SUBDEV_S_FMT _IOWR('V', 5, struct v4l2_subdev_format)
#define VIDIOC_SUBDEV_G_FRAME_INTERVAL \
@@ -137,5 +174,9 @@ struct v4l2_subdev_frame_interval_enum {
_IOWR('V', 75, struct v4l2_subdev_frame_interval_enum)
#define VIDIOC_SUBDEV_G_CROP _IOWR('V', 59, struct v4l2_subdev_crop)
#define VIDIOC_SUBDEV_S_CROP _IOWR('V', 60, struct v4l2_subdev_crop)
+#define VIDIOC_SUBDEV_G_SELECTION \
+ _IOWR('V', 61, struct v4l2_subdev_selection)
+#define VIDIOC_SUBDEV_S_SELECTION \
+ _IOWR('V', 62, struct v4l2_subdev_selection)
#endif
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index 4b9a7f596f92..b455c7c212eb 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -28,13 +28,19 @@ struct vga_switcheroo_handler {
int (*get_client_id)(struct pci_dev *pdev);
};
+struct vga_switcheroo_client_ops {
+ void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
+ void (*reprobe)(struct pci_dev *dev);
+ bool (*can_switch)(struct pci_dev *dev);
+};
#if defined(CONFIG_VGA_SWITCHEROO)
void vga_switcheroo_unregister_client(struct pci_dev *dev);
int vga_switcheroo_register_client(struct pci_dev *dev,
- void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state),
- void (*reprobe)(struct pci_dev *dev),
- bool (*can_switch)(struct pci_dev *dev));
+ const struct vga_switcheroo_client_ops *ops);
+int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops,
+ int id, bool active);
void vga_switcheroo_client_fb_set(struct pci_dev *dev,
struct fb_info *info);
@@ -48,11 +54,12 @@ int vga_switcheroo_process_delayed_switch(void);
static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
static inline int vga_switcheroo_register_client(struct pci_dev *dev,
- void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state),
- void (*reprobe)(struct pci_dev *dev),
- bool (*can_switch)(struct pci_dev *dev)) { return 0; }
+ const struct vga_switcheroo_client_ops *ops) { return 0; }
static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
+static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops,
+ int id, bool active) { return 0; }
static inline void vga_switcheroo_unregister_handler(void) {}
static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index b572f80bdfd5..0ee42d9acdc0 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -31,6 +31,7 @@
#ifndef LINUX_VGA_H
#define LINUX_VGA_H
+#include <video/vga.h>
/* Legacy VGA regions */
#define VGA_RSRC_NONE 0x00
@@ -182,7 +183,13 @@ extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
*/
#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
+#ifdef CONFIG_VGA_ARB
extern struct pci_dev *vga_default_device(void);
+extern void vga_set_default_device(struct pci_dev *pdev);
+#else
+static inline struct pci_dev *vga_default_device(void) { return NULL; };
+static inline void vga_set_default_device(struct pci_dev *pdev) { };
+#endif
#endif
/**
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index c9c9a4680cc5..370d11106c11 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -292,10 +292,10 @@ struct v4l2_pix_format {
__u32 width;
__u32 height;
__u32 pixelformat;
- enum v4l2_field field;
+ __u32 field; /* enum v4l2_field */
__u32 bytesperline; /* for padding, zero if unused */
__u32 sizeimage;
- enum v4l2_colorspace colorspace;
+ __u32 colorspace; /* enum v4l2_colorspace */
__u32 priv; /* private data, depends on pixelformat */
};
@@ -378,7 +378,10 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12 GRGR.. BGBG.. */
#define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12 RGRG.. GBGB.. */
/* 10bit raw bayer DPCM compressed to 8 bits */
+#define V4L2_PIX_FMT_SBGGR10DPCM8 v4l2_fourcc('b', 'B', 'A', '8')
+#define V4L2_PIX_FMT_SGBRG10DPCM8 v4l2_fourcc('b', 'G', 'A', '8')
#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0')
+#define V4L2_PIX_FMT_SRGGB10DPCM8 v4l2_fourcc('b', 'R', 'A', '8')
/*
* 10bit raw bayer, expanded to 16 bits
* xxxxrrrrrrrrrrxxxxgggggggggg xxxxggggggggggxxxxbbbbbbbbbb...
@@ -432,7 +435,7 @@ struct v4l2_pix_format {
*/
struct v4l2_fmtdesc {
__u32 index; /* Format number */
- enum v4l2_buf_type type; /* buffer type */
+ __u32 type; /* enum v4l2_buf_type */
__u32 flags;
__u8 description[32]; /* Description string */
__u32 pixelformat; /* Format fourcc */
@@ -573,8 +576,8 @@ struct v4l2_jpegcompression {
*/
struct v4l2_requestbuffers {
__u32 count;
- enum v4l2_buf_type type;
- enum v4l2_memory memory;
+ __u32 type; /* enum v4l2_buf_type */
+ __u32 memory; /* enum v4l2_memory */
__u32 reserved[2];
};
@@ -610,15 +613,17 @@ struct v4l2_plane {
/**
* struct v4l2_buffer - video buffer info
* @index: id number of the buffer
- * @type: buffer type (type == *_MPLANE for multiplanar buffers)
+ * @type: enum v4l2_buf_type; buffer type (type == *_MPLANE for
+ * multiplanar buffers);
* @bytesused: number of bytes occupied by data in the buffer (payload);
* unused (set to 0) for multiplanar buffers
* @flags: buffer informational flags
- * @field: field order of the image in the buffer
+ * @field: enum v4l2_field; field order of the image in the buffer
* @timestamp: frame timestamp
* @timecode: frame timecode
* @sequence: sequence count of this frame
- * @memory: the method, in which the actual video data is passed
+ * @memory: enum v4l2_memory; the method, in which the actual video data is
+ * passed
* @offset: for non-multiplanar buffers with memory == V4L2_MEMORY_MMAP;
* offset from the start of the device memory for this plane,
* (or a "cookie" that should be passed to mmap() as offset)
@@ -636,16 +641,16 @@ struct v4l2_plane {
*/
struct v4l2_buffer {
__u32 index;
- enum v4l2_buf_type type;
+ __u32 type;
__u32 bytesused;
__u32 flags;
- enum v4l2_field field;
+ __u32 field;
struct timeval timestamp;
struct v4l2_timecode timecode;
__u32 sequence;
/* memory location */
- enum v4l2_memory memory;
+ __u32 memory;
union {
__u32 offset;
unsigned long userptr;
@@ -708,7 +713,7 @@ struct v4l2_clip {
struct v4l2_window {
struct v4l2_rect w;
- enum v4l2_field field;
+ __u32 field; /* enum v4l2_field */
__u32 chromakey;
struct v4l2_clip __user *clips;
__u32 clipcount;
@@ -745,14 +750,14 @@ struct v4l2_outputparm {
* I N P U T I M A G E C R O P P I N G
*/
struct v4l2_cropcap {
- enum v4l2_buf_type type;
+ __u32 type; /* enum v4l2_buf_type */
struct v4l2_rect bounds;
struct v4l2_rect defrect;
struct v4l2_fract pixelaspect;
};
struct v4l2_crop {
- enum v4l2_buf_type type;
+ __u32 type; /* enum v4l2_buf_type */
struct v4l2_rect c;
};
@@ -939,6 +944,9 @@ struct v4l2_standard {
__u32 reserved[4];
};
+/* The DV Preset API is deprecated in favor of the DV Timings API.
+ New drivers shouldn't use this anymore! */
+
/*
* V I D E O T I M I N G S D V P R E S E T
*/
@@ -986,29 +994,56 @@ struct v4l2_dv_enum_preset {
* D V B T T I M I N G S
*/
-/* BT.656/BT.1120 timing data */
+/** struct v4l2_bt_timings - BT.656/BT.1120 timing data
+ * @width: total width of the active video in pixels
+ * @height: total height of the active video in lines
+ * @interlaced: Interlaced or progressive
+ * @polarities: Positive or negative polarities
+ * @pixelclock: Pixel clock in HZ. Ex. 74.25MHz->74250000
+ * @hfrontporch:Horizontal front porch in pixels
+ * @hsync: Horizontal Sync length in pixels
+ * @hbackporch: Horizontal back porch in pixels
+ * @vfrontporch:Vertical front porch in lines
+ * @vsync: Vertical Sync length in lines
+ * @vbackporch: Vertical back porch in lines
+ * @il_vfrontporch:Vertical front porch for the even field
+ * (aka field 2) of interlaced field formats
+ * @il_vsync: Vertical Sync length for the even field
+ * (aka field 2) of interlaced field formats
+ * @il_vbackporch:Vertical back porch for the even field
+ * (aka field 2) of interlaced field formats
+ * @standards: Standards the timing belongs to
+ * @flags: Flags
+ * @reserved: Reserved fields, must be zeroed.
+ *
+ * A note regarding vertical interlaced timings: height refers to the total
+ * height of the active video frame (= two fields). The blanking timings refer
+ * to the blanking of each field. So the height of the total frame is
+ * calculated as follows:
+ *
+ * tot_height = height + vfrontporch + vsync + vbackporch +
+ * il_vfrontporch + il_vsync + il_vbackporch
+ *
+ * The active height of each field is height / 2.
+ */
struct v4l2_bt_timings {
- __u32 width; /* width in pixels */
- __u32 height; /* height in lines */
- __u32 interlaced; /* Interlaced or progressive */
- __u32 polarities; /* Positive or negative polarity */
- __u64 pixelclock; /* Pixel clock in HZ. Ex. 74.25MHz->74250000 */
- __u32 hfrontporch; /* Horizpontal front porch in pixels */
- __u32 hsync; /* Horizontal Sync length in pixels */
- __u32 hbackporch; /* Horizontal back porch in pixels */
- __u32 vfrontporch; /* Vertical front porch in pixels */
- __u32 vsync; /* Vertical Sync length in lines */
- __u32 vbackporch; /* Vertical back porch in lines */
- __u32 il_vfrontporch; /* Vertical front porch for bottom field of
- * interlaced field formats
- */
- __u32 il_vsync; /* Vertical sync length for bottom field of
- * interlaced field formats
- */
- __u32 il_vbackporch; /* Vertical back porch for bottom field of
- * interlaced field formats
- */
- __u32 reserved[16];
+ __u32 width;
+ __u32 height;
+ __u32 interlaced;
+ __u32 polarities;
+ __u64 pixelclock;
+ __u32 hfrontporch;
+ __u32 hsync;
+ __u32 hbackporch;
+ __u32 vfrontporch;
+ __u32 vsync;
+ __u32 vbackporch;
+ __u32 il_vfrontporch;
+ __u32 il_vsync;
+ __u32 il_vbackporch;
+ __u32 standards;
+ __u32 flags;
+ __u32 reserved[14];
} __attribute__ ((packed));
/* Interlaced or progressive format */
@@ -1019,8 +1054,42 @@ struct v4l2_bt_timings {
#define V4L2_DV_VSYNC_POS_POL 0x00000001
#define V4L2_DV_HSYNC_POS_POL 0x00000002
-
-/* DV timings */
+/* Timings standards */
+#define V4L2_DV_BT_STD_CEA861 (1 << 0) /* CEA-861 Digital TV Profile */
+#define V4L2_DV_BT_STD_DMT (1 << 1) /* VESA Discrete Monitor Timings */
+#define V4L2_DV_BT_STD_CVT (1 << 2) /* VESA Coordinated Video Timings */
+#define V4L2_DV_BT_STD_GTF (1 << 3) /* VESA Generalized Timings Formula */
+
+/* Flags */
+
+/* CVT/GTF specific: timing uses reduced blanking (CVT) or the 'Secondary
+ GTF' curve (GTF). In both cases the horizontal and/or vertical blanking
+ intervals are reduced, allowing a higher resolution over the same
+ bandwidth. This is a read-only flag. */
+#define V4L2_DV_FL_REDUCED_BLANKING (1 << 0)
+/* CEA-861 specific: set for CEA-861 formats with a framerate of a multiple
+ of six. These formats can be optionally played at 1 / 1.001 speed.
+ This is a read-only flag. */
+#define V4L2_DV_FL_CAN_REDUCE_FPS (1 << 1)
+/* CEA-861 specific: only valid for video transmitters, the flag is cleared
+ by receivers.
+ If the framerate of the format is a multiple of six, then the pixelclock
+ used to set up the transmitter is divided by 1.001 to make it compatible
+ with 60 Hz based standards such as NTSC and PAL-M that use a framerate of
+ 29.97 Hz. Otherwise this flag is cleared. If the transmitter can't generate
+ such frequencies, then the flag will also be cleared. */
+#define V4L2_DV_FL_REDUCED_FPS (1 << 2)
+/* Specific to interlaced formats: if set, then field 1 is really one half-line
+ longer and field 2 is really one half-line shorter, so each field has
+ exactly the same number of half-lines. Whether half-lines can be detected
+ or used depends on the hardware. */
+#define V4L2_DV_FL_HALF_LINE (1 << 0)
+
+
+/** struct v4l2_dv_timings - DV timings
+ * @type: the type of the timings
+ * @bt: BT656/1120 timings
+ */
struct v4l2_dv_timings {
__u32 type;
union {
@@ -1032,6 +1101,64 @@ struct v4l2_dv_timings {
/* Values for the type field */
#define V4L2_DV_BT_656_1120 0 /* BT.656/1120 timing type */
+
+/** struct v4l2_enum_dv_timings - DV timings enumeration
+ * @index: enumeration index
+ * @reserved: must be zeroed
+ * @timings: the timings for the given index
+ */
+struct v4l2_enum_dv_timings {
+ __u32 index;
+ __u32 reserved[3];
+ struct v4l2_dv_timings timings;
+};
+
+/** struct v4l2_bt_timings_cap - BT.656/BT.1120 timing capabilities
+ * @min_width: width in pixels
+ * @max_width: width in pixels
+ * @min_height: height in lines
+ * @max_height: height in lines
+ * @min_pixelclock: Pixel clock in HZ. Ex. 74.25MHz->74250000
+ * @max_pixelclock: Pixel clock in HZ. Ex. 74.25MHz->74250000
+ * @standards: Supported standards
+ * @capabilities: Supported capabilities
+ * @reserved: Must be zeroed
+ */
+struct v4l2_bt_timings_cap {
+ __u32 min_width;
+ __u32 max_width;
+ __u32 min_height;
+ __u32 max_height;
+ __u64 min_pixelclock;
+ __u64 max_pixelclock;
+ __u32 standards;
+ __u32 capabilities;
+ __u32 reserved[16];
+} __attribute__ ((packed));
+
+/* Supports interlaced formats */
+#define V4L2_DV_BT_CAP_INTERLACED (1 << 0)
+/* Supports progressive formats */
+#define V4L2_DV_BT_CAP_PROGRESSIVE (1 << 1)
+/* Supports CVT/GTF reduced blanking */
+#define V4L2_DV_BT_CAP_REDUCED_BLANKING (1 << 2)
+/* Supports custom formats */
+#define V4L2_DV_BT_CAP_CUSTOM (1 << 3)
+
+/** struct v4l2_dv_timings_cap - DV timings capabilities
+ * @type: the type of the timings (same as in struct v4l2_dv_timings)
+ * @bt: the BT656/1120 timings capabilities
+ */
+struct v4l2_dv_timings_cap {
+ __u32 type;
+ __u32 reserved[3];
+ union {
+ struct v4l2_bt_timings_cap bt;
+ __u32 raw_data[32];
+ };
+};
+
+
/*
* V I D E O I N P U T S
*/
@@ -1040,7 +1167,7 @@ struct v4l2_input {
__u8 name[32]; /* Label */
__u32 type; /* Type of input */
__u32 audioset; /* Associated audios (bitfield) */
- __u32 tuner; /* Associated tuner */
+ __u32 tuner; /* enum v4l2_tuner_type */
v4l2_std_id std;
__u32 status;
__u32 capabilities;
@@ -1137,6 +1264,8 @@ struct v4l2_ext_controls {
#define V4L2_CTRL_CLASS_FM_TX 0x009b0000 /* FM Modulator control class */
#define V4L2_CTRL_CLASS_FLASH 0x009c0000 /* Camera flash controls */
#define V4L2_CTRL_CLASS_JPEG 0x009d0000 /* JPEG-compression controls */
+#define V4L2_CTRL_CLASS_IMAGE_SOURCE 0x009e0000 /* Image source controls */
+#define V4L2_CTRL_CLASS_IMAGE_PROC 0x009f0000 /* Image processing controls */
#define V4L2_CTRL_ID_MASK (0x0fffffff)
#define V4L2_CTRL_ID2CLASS(id) ((id) & 0x0fff0000UL)
@@ -1151,12 +1280,13 @@ enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_CTRL_CLASS = 6,
V4L2_CTRL_TYPE_STRING = 7,
V4L2_CTRL_TYPE_BITMASK = 8,
+ V4L2_CTRL_TYPE_INTEGER_MENU = 9,
};
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
struct v4l2_queryctrl {
__u32 id;
- enum v4l2_ctrl_type type;
+ __u32 type; /* enum v4l2_ctrl_type */
__u8 name[32]; /* Whatever */
__s32 minimum; /* Note signedness */
__s32 maximum;
@@ -1170,9 +1300,12 @@ struct v4l2_queryctrl {
struct v4l2_querymenu {
__u32 id;
__u32 index;
- __u8 name[32]; /* Whatever */
+ union {
+ __u8 name[32]; /* Whatever */
+ __s64 value;
+ };
__u32 reserved;
-};
+} __attribute__ ((packed));
/* Control flags */
#define V4L2_CTRL_FLAG_DISABLED 0x0001
@@ -1237,16 +1370,22 @@ enum v4l2_power_line_frequency {
#define V4L2_CID_COLOR_KILLER (V4L2_CID_BASE+30)
#define V4L2_CID_COLORFX (V4L2_CID_BASE+31)
enum v4l2_colorfx {
- V4L2_COLORFX_NONE = 0,
- V4L2_COLORFX_BW = 1,
- V4L2_COLORFX_SEPIA = 2,
- V4L2_COLORFX_NEGATIVE = 3,
- V4L2_COLORFX_EMBOSS = 4,
- V4L2_COLORFX_SKETCH = 5,
- V4L2_COLORFX_SKY_BLUE = 6,
- V4L2_COLORFX_GRASS_GREEN = 7,
- V4L2_COLORFX_SKIN_WHITEN = 8,
- V4L2_COLORFX_VIVID = 9,
+ V4L2_COLORFX_NONE = 0,
+ V4L2_COLORFX_BW = 1,
+ V4L2_COLORFX_SEPIA = 2,
+ V4L2_COLORFX_NEGATIVE = 3,
+ V4L2_COLORFX_EMBOSS = 4,
+ V4L2_COLORFX_SKETCH = 5,
+ V4L2_COLORFX_SKY_BLUE = 6,
+ V4L2_COLORFX_GRASS_GREEN = 7,
+ V4L2_COLORFX_SKIN_WHITEN = 8,
+ V4L2_COLORFX_VIVID = 9,
+ V4L2_COLORFX_AQUA = 10,
+ V4L2_COLORFX_ART_FREEZE = 11,
+ V4L2_COLORFX_SILHOUETTE = 12,
+ V4L2_COLORFX_SOLARIZATION = 13,
+ V4L2_COLORFX_ANTIQUE = 14,
+ V4L2_COLORFX_SET_CBCR = 15,
};
#define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32)
#define V4L2_CID_BAND_STOP_FILTER (V4L2_CID_BASE+33)
@@ -1263,9 +1402,10 @@ enum v4l2_colorfx {
#define V4L2_CID_MIN_BUFFERS_FOR_OUTPUT (V4L2_CID_BASE+40)
#define V4L2_CID_ALPHA_COMPONENT (V4L2_CID_BASE+41)
+#define V4L2_CID_COLORFX_CBCR (V4L2_CID_BASE+42)
/* last CID + 1 */
-#define V4L2_CID_LASTP1 (V4L2_CID_BASE+42)
+#define V4L2_CID_LASTP1 (V4L2_CID_BASE+43)
/* MPEG-class control IDs defined by V4L2 */
#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900)
@@ -1689,6 +1829,78 @@ enum v4l2_exposure_auto_type {
#define V4L2_CID_IRIS_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+17)
#define V4L2_CID_IRIS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+18)
+#define V4L2_CID_AUTO_EXPOSURE_BIAS (V4L2_CID_CAMERA_CLASS_BASE+19)
+
+#define V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE (V4L2_CID_CAMERA_CLASS_BASE+20)
+enum v4l2_auto_n_preset_white_balance {
+ V4L2_WHITE_BALANCE_MANUAL = 0,
+ V4L2_WHITE_BALANCE_AUTO = 1,
+ V4L2_WHITE_BALANCE_INCANDESCENT = 2,
+ V4L2_WHITE_BALANCE_FLUORESCENT = 3,
+ V4L2_WHITE_BALANCE_FLUORESCENT_H = 4,
+ V4L2_WHITE_BALANCE_HORIZON = 5,
+ V4L2_WHITE_BALANCE_DAYLIGHT = 6,
+ V4L2_WHITE_BALANCE_FLASH = 7,
+ V4L2_WHITE_BALANCE_CLOUDY = 8,
+ V4L2_WHITE_BALANCE_SHADE = 9,
+};
+
+#define V4L2_CID_WIDE_DYNAMIC_RANGE (V4L2_CID_CAMERA_CLASS_BASE+21)
+#define V4L2_CID_IMAGE_STABILIZATION (V4L2_CID_CAMERA_CLASS_BASE+22)
+
+#define V4L2_CID_ISO_SENSITIVITY (V4L2_CID_CAMERA_CLASS_BASE+23)
+#define V4L2_CID_ISO_SENSITIVITY_AUTO (V4L2_CID_CAMERA_CLASS_BASE+24)
+enum v4l2_iso_sensitivity_auto_type {
+ V4L2_ISO_SENSITIVITY_MANUAL = 0,
+ V4L2_ISO_SENSITIVITY_AUTO = 1,
+};
+
+#define V4L2_CID_EXPOSURE_METERING (V4L2_CID_CAMERA_CLASS_BASE+25)
+enum v4l2_exposure_metering {
+ V4L2_EXPOSURE_METERING_AVERAGE = 0,
+ V4L2_EXPOSURE_METERING_CENTER_WEIGHTED = 1,
+ V4L2_EXPOSURE_METERING_SPOT = 2,
+};
+
+#define V4L2_CID_SCENE_MODE (V4L2_CID_CAMERA_CLASS_BASE+26)
+enum v4l2_scene_mode {
+ V4L2_SCENE_MODE_NONE = 0,
+ V4L2_SCENE_MODE_BACKLIGHT = 1,
+ V4L2_SCENE_MODE_BEACH_SNOW = 2,
+ V4L2_SCENE_MODE_CANDLE_LIGHT = 3,
+ V4L2_SCENE_MODE_DAWN_DUSK = 4,
+ V4L2_SCENE_MODE_FALL_COLORS = 5,
+ V4L2_SCENE_MODE_FIREWORKS = 6,
+ V4L2_SCENE_MODE_LANDSCAPE = 7,
+ V4L2_SCENE_MODE_NIGHT = 8,
+ V4L2_SCENE_MODE_PARTY_INDOOR = 9,
+ V4L2_SCENE_MODE_PORTRAIT = 10,
+ V4L2_SCENE_MODE_SPORTS = 11,
+ V4L2_SCENE_MODE_SUNSET = 12,
+ V4L2_SCENE_MODE_TEXT = 13,
+};
+
+#define V4L2_CID_3A_LOCK (V4L2_CID_CAMERA_CLASS_BASE+27)
+#define V4L2_LOCK_EXPOSURE (1 << 0)
+#define V4L2_LOCK_WHITE_BALANCE (1 << 1)
+#define V4L2_LOCK_FOCUS (1 << 2)
+
+#define V4L2_CID_AUTO_FOCUS_START (V4L2_CID_CAMERA_CLASS_BASE+28)
+#define V4L2_CID_AUTO_FOCUS_STOP (V4L2_CID_CAMERA_CLASS_BASE+29)
+#define V4L2_CID_AUTO_FOCUS_STATUS (V4L2_CID_CAMERA_CLASS_BASE+30)
+#define V4L2_AUTO_FOCUS_STATUS_IDLE (0 << 0)
+#define V4L2_AUTO_FOCUS_STATUS_BUSY (1 << 0)
+#define V4L2_AUTO_FOCUS_STATUS_REACHED (1 << 1)
+#define V4L2_AUTO_FOCUS_STATUS_FAILED (1 << 2)
+
+#define V4L2_CID_AUTO_FOCUS_RANGE (V4L2_CID_CAMERA_CLASS_BASE+31)
+enum v4l2_auto_focus_range {
+ V4L2_AUTO_FOCUS_RANGE_AUTO = 0,
+ V4L2_AUTO_FOCUS_RANGE_NORMAL = 1,
+ V4L2_AUTO_FOCUS_RANGE_MACRO = 2,
+ V4L2_AUTO_FOCUS_RANGE_INFINITY = 3,
+};
+
/* FM Modulator class control IDs */
#define V4L2_CID_FM_TX_CLASS_BASE (V4L2_CTRL_CLASS_FM_TX | 0x900)
#define V4L2_CID_FM_TX_CLASS (V4L2_CTRL_CLASS_FM_TX | 1)
@@ -1782,13 +1994,28 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_JPEG_ACTIVE_MARKER_DQT (1 << 17)
#define V4L2_JPEG_ACTIVE_MARKER_DHT (1 << 18)
+/* Image source controls */
+#define V4L2_CID_IMAGE_SOURCE_CLASS_BASE (V4L2_CTRL_CLASS_IMAGE_SOURCE | 0x900)
+#define V4L2_CID_IMAGE_SOURCE_CLASS (V4L2_CTRL_CLASS_IMAGE_SOURCE | 1)
+
+#define V4L2_CID_VBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 1)
+#define V4L2_CID_HBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 2)
+#define V4L2_CID_ANALOGUE_GAIN (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 3)
+
+/* Image processing controls */
+#define V4L2_CID_IMAGE_PROC_CLASS_BASE (V4L2_CTRL_CLASS_IMAGE_PROC | 0x900)
+#define V4L2_CID_IMAGE_PROC_CLASS (V4L2_CTRL_CLASS_IMAGE_PROC | 1)
+
+#define V4L2_CID_LINK_FREQ (V4L2_CID_IMAGE_PROC_CLASS_BASE + 1)
+#define V4L2_CID_PIXEL_RATE (V4L2_CID_IMAGE_PROC_CLASS_BASE + 2)
+
/*
* T U N I N G
*/
struct v4l2_tuner {
__u32 index;
__u8 name[32];
- enum v4l2_tuner_type type;
+ __u32 type; /* enum v4l2_tuner_type */
__u32 capability;
__u32 rangelow;
__u32 rangehigh;
@@ -1838,14 +2065,14 @@ struct v4l2_modulator {
struct v4l2_frequency {
__u32 tuner;
- enum v4l2_tuner_type type;
+ __u32 type; /* enum v4l2_tuner_type */
__u32 frequency;
__u32 reserved[8];
};
struct v4l2_hw_freq_seek {
__u32 tuner;
- enum v4l2_tuner_type type;
+ __u32 type; /* enum v4l2_tuner_type */
__u32 seek_upward;
__u32 wrap_around;
__u32 spacing;
@@ -2056,7 +2283,7 @@ struct v4l2_sliced_vbi_cap {
(equals frame lines 313-336 for 625 line video
standards, 263-286 for 525 line standards) */
__u16 service_lines[2][24];
- enum v4l2_buf_type type;
+ __u32 type; /* enum v4l2_buf_type */
__u32 reserved[3]; /* must be 0 */
};
@@ -2137,8 +2364,8 @@ struct v4l2_plane_pix_format {
* @width: image width in pixels
* @height: image height in pixels
* @pixelformat: little endian four character code (fourcc)
- * @field: field order (for interlaced video)
- * @colorspace: supplemental to pixelformat
+ * @field: enum v4l2_field; field order (for interlaced video)
+ * @colorspace: enum v4l2_colorspace; supplemental to pixelformat
* @plane_fmt: per-plane information
* @num_planes: number of planes for this format
*/
@@ -2146,8 +2373,8 @@ struct v4l2_pix_format_mplane {
__u32 width;
__u32 height;
__u32 pixelformat;
- enum v4l2_field field;
- enum v4l2_colorspace colorspace;
+ __u32 field;
+ __u32 colorspace;
struct v4l2_plane_pix_format plane_fmt[VIDEO_MAX_PLANES];
__u8 num_planes;
@@ -2156,7 +2383,7 @@ struct v4l2_pix_format_mplane {
/**
* struct v4l2_format - stream data format
- * @type: type of the data stream
+ * @type: enum v4l2_buf_type; type of the data stream
* @pix: definition of an image format
* @pix_mp: definition of a multiplanar image format
* @win: definition of an overlaid image
@@ -2165,7 +2392,7 @@ struct v4l2_pix_format_mplane {
* @raw_data: placeholder for future extensions and custom formats
*/
struct v4l2_format {
- enum v4l2_buf_type type;
+ __u32 type;
union {
struct v4l2_pix_format pix; /* V4L2_BUF_TYPE_VIDEO_CAPTURE */
struct v4l2_pix_format_mplane pix_mp; /* V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE */
@@ -2179,7 +2406,7 @@ struct v4l2_format {
/* Stream type-dependent parameters
*/
struct v4l2_streamparm {
- enum v4l2_buf_type type;
+ __u32 type; /* enum v4l2_buf_type */
union {
struct v4l2_captureparm capture;
struct v4l2_outputparm output;
@@ -2292,14 +2519,14 @@ struct v4l2_dbg_chip_ident {
* @index: on return, index of the first created buffer
* @count: entry: number of requested buffers,
* return: number of created buffers
- * @memory: buffer memory type
+ * @memory: enum v4l2_memory; buffer memory type
* @format: frame format, for which buffers are requested
* @reserved: future extensions
*/
struct v4l2_create_buffers {
__u32 index;
__u32 count;
- enum v4l2_memory memory;
+ __u32 memory;
struct v4l2_format format;
__u32 reserved[8];
};
@@ -2356,8 +2583,8 @@ struct v4l2_create_buffers {
#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format)
#define VIDIOC_ENUMAUDIO _IOWR('V', 65, struct v4l2_audio)
#define VIDIOC_ENUMAUDOUT _IOWR('V', 66, struct v4l2_audioout)
-#define VIDIOC_G_PRIORITY _IOR('V', 67, enum v4l2_priority)
-#define VIDIOC_S_PRIORITY _IOW('V', 68, enum v4l2_priority)
+#define VIDIOC_G_PRIORITY _IOR('V', 67, __u32) /* enum v4l2_priority */
+#define VIDIOC_S_PRIORITY _IOW('V', 68, __u32) /* enum v4l2_priority */
#define VIDIOC_G_SLICED_VBI_CAP _IOWR('V', 69, struct v4l2_sliced_vbi_cap)
#define VIDIOC_LOG_STATUS _IO('V', 70)
#define VIDIOC_G_EXT_CTRLS _IOWR('V', 71, struct v4l2_ext_controls)
@@ -2384,6 +2611,9 @@ struct v4l2_create_buffers {
#endif
#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
+
+/* These four DV Preset ioctls are deprecated in favor of the DV Timings
+ ioctls. */
#define VIDIOC_ENUM_DV_PRESETS _IOWR('V', 83, struct v4l2_dv_enum_preset)
#define VIDIOC_S_DV_PRESET _IOWR('V', 84, struct v4l2_dv_preset)
#define VIDIOC_G_DV_PRESET _IOWR('V', 85, struct v4l2_dv_preset)
@@ -2408,6 +2638,12 @@ struct v4l2_create_buffers {
#define VIDIOC_DECODER_CMD _IOWR('V', 96, struct v4l2_decoder_cmd)
#define VIDIOC_TRY_DECODER_CMD _IOWR('V', 97, struct v4l2_decoder_cmd)
+/* Experimental, these three ioctls may change over the next couple of kernel
+ versions. */
+#define VIDIOC_ENUM_DV_TIMINGS _IOWR('V', 96, struct v4l2_enum_dv_timings)
+#define VIDIOC_QUERY_DV_TIMINGS _IOR('V', 97, struct v4l2_dv_timings)
+#define VIDIOC_DV_TIMINGS_CAP _IOWR('V', 98, struct v4l2_dv_timings_cap)
+
/* Reminder: when adding new ioctls please add support for them to
drivers/media/video/v4l2-compat-ioctl32.c as well! */
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index ac40716b44e9..da70f0facd2b 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -45,6 +45,8 @@ struct watchdog_info {
#define WDIOF_SETTIMEOUT 0x0080 /* Set timeout (in seconds) */
#define WDIOF_MAGICCLOSE 0x0100 /* Supports magic close char */
#define WDIOF_PRETIMEOUT 0x0200 /* Pretimeout (in seconds), get/set */
+#define WDIOF_ALARMONLY 0x0400 /* Watchdog triggers a management or
+ other external alarm not a reboot */
#define WDIOF_KEEPALIVEPING 0x8000 /* Keep alive ping reply */
#define WDIOS_DISABLECARD 0x0001 /* Turn off the watchdog timer */
@@ -54,6 +56,8 @@ struct watchdog_info {
#ifdef __KERNEL__
#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
struct watchdog_ops;
struct watchdog_device;
@@ -67,6 +71,8 @@ struct watchdog_device;
* @status: The routine that shows the status of the watchdog device.
* @set_timeout:The routine for setting the watchdog devices timeout value.
* @get_timeleft:The routine that get's the time that's left before a reset.
+ * @ref: The ref operation for dyn. allocated watchdog_device structs
+ * @unref: The unref operation for dyn. allocated watchdog_device structs
* @ioctl: The routines that handles extra ioctl calls.
*
* The watchdog_ops structure contains a list of low-level operations
@@ -84,11 +90,17 @@ struct watchdog_ops {
unsigned int (*status)(struct watchdog_device *);
int (*set_timeout)(struct watchdog_device *, unsigned int);
unsigned int (*get_timeleft)(struct watchdog_device *);
+ void (*ref)(struct watchdog_device *);
+ void (*unref)(struct watchdog_device *);
long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long);
};
/** struct watchdog_device - The structure that defines a watchdog device
*
+ * @id: The watchdog's ID. (Allocated by watchdog_register_device)
+ * @cdev: The watchdog's Character device.
+ * @dev: The device for our watchdog
+ * @parent: The parent bus device
* @info: Pointer to a watchdog_info structure.
* @ops: Pointer to the list of watchdog operations.
* @bootstatus: Status of the watchdog device at boot.
@@ -96,6 +108,7 @@ struct watchdog_ops {
* @min_timeout:The watchdog devices minimum timeout value.
* @max_timeout:The watchdog devices maximum timeout value.
* @driver-data:Pointer to the drivers private data.
+ * @lock: Lock for watchdog core internal use only.
* @status: Field that contains the devices internal status bits.
*
* The watchdog_device structure contains all information about a
@@ -103,8 +116,15 @@ struct watchdog_ops {
*
* The driver-data field may not be accessed directly. It must be accessed
* via the watchdog_set_drvdata and watchdog_get_drvdata helpers.
+ *
+ * The lock field is for watchdog core internal use only and should not be
+ * touched.
*/
struct watchdog_device {
+ int id;
+ struct cdev cdev;
+ struct device *dev;
+ struct device *parent;
const struct watchdog_info *info;
const struct watchdog_ops *ops;
unsigned int bootstatus;
@@ -112,12 +132,14 @@ struct watchdog_device {
unsigned int min_timeout;
unsigned int max_timeout;
void *driver_data;
+ struct mutex lock;
unsigned long status;
/* Bit numbers for status flags */
#define WDOG_ACTIVE 0 /* Is the watchdog running/active */
#define WDOG_DEV_OPEN 1 /* Opened via /dev/watchdog ? */
#define WDOG_ALLOW_RELEASE 2 /* Did we receive the magic char ? */
#define WDOG_NO_WAY_OUT 3 /* Is 'nowayout' feature set ? */
+#define WDOG_UNREGISTERED 4 /* Has the device been unregistered */
};
#ifdef CONFIG_WATCHDOG_NOWAYOUT
@@ -128,6 +150,12 @@ struct watchdog_device {
#define WATCHDOG_NOWAYOUT_INIT_STATUS 0
#endif
+/* Use the following function to check wether or not the watchdog is active */
+static inline bool watchdog_active(struct watchdog_device *wdd)
+{
+ return test_bit(WDOG_ACTIVE, &wdd->status);
+}
+
/* Use the following function to set the nowayout feature */
static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool nowayout)
{
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a2b84f598e2b..6d0a0fcd80e7 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -58,7 +58,6 @@ extern const char *wb_reason_name[];
* in a manner such that unspecified fields are set to zero.
*/
struct writeback_control {
- enum writeback_sync_modes sync_mode;
long nr_to_write; /* Write this many pages, and decrement
this for each page written */
long pages_skipped; /* Pages which were not written */
@@ -71,6 +70,8 @@ struct writeback_control {
loff_t range_start;
loff_t range_end;
+ enum writeback_sync_modes sync_mode;
+
unsigned for_kupdate:1; /* A kupdate writeback */
unsigned for_background:1; /* A background writeback */
unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
@@ -94,6 +95,7 @@ long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
enum wb_reason reason);
long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
+void inode_wait_for_writeback(struct inode *inode);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
@@ -101,12 +103,6 @@ static inline void wait_on_inode(struct inode *inode)
might_sleep();
wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
}
-static inline void inode_sync_wait(struct inode *inode)
-{
- might_sleep();
- wait_on_bit(&inode->i_state, __I_SYNC, inode_wait,
- TASK_UNINTERRUPTIBLE);
-}
/*
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 29e7bba78ffe..0c16f518ee09 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -46,6 +46,7 @@ struct media_entity_operations {
int (*link_setup)(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags);
+ int (*link_validate)(struct media_link *link);
};
struct media_entity {
@@ -140,8 +141,8 @@ void media_entity_graph_walk_start(struct media_entity_graph *graph,
struct media_entity *entity);
struct media_entity *
media_entity_graph_walk_next(struct media_entity_graph *graph);
-void media_entity_pipeline_start(struct media_entity *entity,
- struct media_pipeline *pipe);
+__must_check int media_entity_pipeline_start(struct media_entity *entity,
+ struct media_pipeline *pipe);
void media_entity_pipeline_stop(struct media_entity *entity);
#define media_entity_call(entity, operation, args...) \
diff --git a/include/media/mt9p031.h b/include/media/mt9p031.h
index 96448c7a318b..0c97b19af293 100644
--- a/include/media/mt9p031.h
+++ b/include/media/mt9p031.h
@@ -3,17 +3,18 @@
struct v4l2_subdev;
-enum {
- MT9P031_COLOR_VERSION,
- MT9P031_MONOCHROME_VERSION,
-};
-
+/*
+ * struct mt9p031_platform_data - MT9P031 platform data
+ * @set_xclk: Clock frequency set callback
+ * @reset: Chip reset GPIO (set to -1 if not used)
+ * @ext_freq: Input clock frequency
+ * @target_freq: Pixel clock frequency
+ */
struct mt9p031_platform_data {
int (*set_xclk)(struct v4l2_subdev *subdev, int hz);
- int (*reset)(struct v4l2_subdev *subdev, int active);
- int ext_freq; /* input frequency to the mt9p031 for PLL dividers */
- int target_freq; /* frequency target for the PLL */
- int version; /* MT9P031_COLOR_VERSION or MT9P031_MONOCHROME_VERSION */
+ int reset;
+ int ext_freq;
+ int target_freq;
};
#endif
diff --git a/include/media/omap3isp.h b/include/media/omap3isp.h
index 042849a34640..4d94be5226af 100644
--- a/include/media/omap3isp.h
+++ b/include/media/omap3isp.h
@@ -29,6 +29,10 @@
struct i2c_board_info;
struct isp_device;
+#define ISP_XCLK_NONE 0
+#define ISP_XCLK_A 1
+#define ISP_XCLK_B 2
+
enum isp_interface_type {
ISP_INTERFACE_PARALLEL,
ISP_INTERFACE_CSI2A_PHY2,
@@ -87,6 +91,29 @@ enum {
};
/**
+ * struct isp_csiphy_lane: CCP2/CSI2 lane position and polarity
+ * @pos: position of the lane
+ * @pol: polarity of the lane
+ */
+struct isp_csiphy_lane {
+ u8 pos;
+ u8 pol;
+};
+
+#define ISP_CSIPHY1_NUM_DATA_LANES 1
+#define ISP_CSIPHY2_NUM_DATA_LANES 2
+
+/**
+ * struct isp_csiphy_lanes_cfg - CCP2/CSI2 lane configuration
+ * @data: Configuration of one or two data lanes
+ * @clk: Clock lane configuration
+ */
+struct isp_csiphy_lanes_cfg {
+ struct isp_csiphy_lane data[ISP_CSIPHY2_NUM_DATA_LANES];
+ struct isp_csiphy_lane clk;
+};
+
+/**
* struct isp_ccp2_platform_data - CCP2 interface platform data
* @strobe_clk_pol: Strobe/clock polarity
* 0 - Non Inverted, 1 - Inverted
@@ -105,6 +132,7 @@ struct isp_ccp2_platform_data {
unsigned int ccp2_mode:1;
unsigned int phy_layer:1;
unsigned int vpclk_div:2;
+ struct isp_csiphy_lanes_cfg lanecfg;
};
/**
@@ -115,6 +143,7 @@ struct isp_ccp2_platform_data {
struct isp_csi2_platform_data {
unsigned crc:1;
unsigned vpclk_div:2;
+ struct isp_csiphy_lanes_cfg lanecfg;
};
struct isp_subdev_i2c_board_info {
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 8db6741c1256..cfd5163ff7f3 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -62,6 +62,7 @@ void rc_map_init(void);
#define RC_MAP_ANYSEE "rc-anysee"
#define RC_MAP_APAC_VIEWCOMP "rc-apac-viewcomp"
#define RC_MAP_ASUS_PC39 "rc-asus-pc39"
+#define RC_MAP_ASUS_PS3_100 "rc-asus-ps3-100"
#define RC_MAP_ATI_TV_WONDER_HD_600 "rc-ati-tv-wonder-hd-600"
#define RC_MAP_ATI_X10 "rc-ati-x10"
#define RC_MAP_AVERMEDIA_A16D "rc-avermedia-a16d"
@@ -113,6 +114,8 @@ void rc_map_init(void);
#define RC_MAP_LME2510 "rc-lme2510"
#define RC_MAP_MANLI "rc-manli"
#define RC_MAP_MEDION_X10 "rc-medion-x10"
+#define RC_MAP_MEDION_X10_DIGITAINER "rc-medion-x10-digitainer"
+#define RC_MAP_MEDION_X10_OR2X "rc-medion-x10-or2x"
#define RC_MAP_MSI_DIGIVOX_II "rc-msi-digivox-ii"
#define RC_MAP_MSI_DIGIVOX_III "rc-msi-digivox-iii"
#define RC_MAP_MSI_TVANYWHERE_PLUS "rc-msi-tvanywhere-plus"
diff --git a/include/media/s5p_fimc.h b/include/media/s5p_fimc.h
index 688fb3f1dc35..8587aaf73646 100644
--- a/include/media/s5p_fimc.h
+++ b/include/media/s5p_fimc.h
@@ -64,4 +64,20 @@ struct s5p_platform_fimc {
*/
#define S5P_FIMC_TX_END_NOTIFY _IO('e', 0)
+enum fimc_subdev_index {
+ IDX_SENSOR,
+ IDX_CSIS,
+ IDX_FLITE,
+ IDX_FIMC,
+ IDX_MAX,
+};
+
+struct media_pipeline;
+struct v4l2_subdev;
+
+struct fimc_pipeline {
+ struct v4l2_subdev *subdevs[IDX_MAX];
+ struct media_pipeline *m_pipeline;
+};
+
#endif /* S5P_FIMC_H_ */
diff --git a/include/media/saa7146.h b/include/media/saa7146.h
index 0f037e8edf9a..773e527deabe 100644
--- a/include/media/saa7146.h
+++ b/include/media/saa7146.h
@@ -13,12 +13,11 @@
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
#include <linux/vmalloc.h> /* for vmalloc() */
#include <linux/mm.h> /* for vmalloc_to_page() */
-#define SAA7146_VERSION_CODE 0x000600 /* 0.6.0 */
-
#define saa7146_write(sxy,adr,dat) writel((dat),(sxy->mem+(adr)))
#define saa7146_read(sxy,adr) readl(sxy->mem+(adr))
@@ -121,6 +120,7 @@ struct saa7146_dev
struct list_head item;
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
/* different device locks */
spinlock_t slock;
diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
index 4aeff96ff7d8..944ecdf3530f 100644
--- a/include/media/saa7146_vv.h
+++ b/include/media/saa7146_vv.h
@@ -3,6 +3,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
#include <media/saa7146.h>
#include <media/videobuf-dma-sg.h>
@@ -84,21 +85,15 @@ struct saa7146_overlay {
/* per open data */
struct saa7146_fh {
+ /* Must be the first field! */
+ struct v4l2_fh fh;
struct saa7146_dev *dev;
- /* if this is a vbi or capture open */
- enum v4l2_buf_type type;
-
- /* video overlay */
- struct saa7146_overlay ov;
/* video capture */
struct videobuf_queue video_q;
- struct v4l2_pix_format video_fmt;
/* vbi capture */
struct videobuf_queue vbi_q;
- struct v4l2_vbi_format vbi_fmt;
- struct timer_list vbi_read_timeout;
unsigned int resources; /* resource management for device open */
};
@@ -109,7 +104,9 @@ struct saa7146_fh {
struct saa7146_vv
{
/* vbi capture */
- struct saa7146_dmaqueue vbi_q;
+ struct saa7146_dmaqueue vbi_dmaq;
+ struct v4l2_vbi_format vbi_fmt;
+ struct timer_list vbi_read_timeout;
/* vbi workaround interrupt queue */
wait_queue_head_t vbi_wq;
int vbi_fieldcount;
@@ -119,13 +116,14 @@ struct saa7146_vv
struct saa7146_fh *video_fh;
/* video overlay */
+ struct saa7146_overlay ov;
struct v4l2_framebuffer ov_fb;
struct saa7146_format *ov_fmt;
- struct saa7146_overlay *ov_data;
struct saa7146_fh *ov_suspend;
/* video capture */
- struct saa7146_dmaqueue video_q;
+ struct saa7146_dmaqueue video_dmaq;
+ struct v4l2_pix_format video_fmt;
enum v4l2_field last_field;
/* common: fixme? shouldn't this be in saa7146_fh?
@@ -163,7 +161,8 @@ struct saa7146_ext_vv
int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
/* the extension can override this */
- struct v4l2_ioctl_ops ops;
+ struct v4l2_ioctl_ops vid_ops;
+ struct v4l2_ioctl_ops vbi_ops;
/* pointer to the saa7146 core ops */
const struct v4l2_ioctl_ops *core_ops;
@@ -202,10 +201,12 @@ void saa7146_set_gpio(struct saa7146_dev *saa, u8 pin, u8 data);
/* from saa7146_video.c */
extern const struct v4l2_ioctl_ops saa7146_video_ioctl_ops;
+extern const struct v4l2_ioctl_ops saa7146_vbi_ioctl_ops;
extern struct saa7146_use_ops saa7146_video_uops;
int saa7146_start_preview(struct saa7146_fh *fh);
int saa7146_stop_preview(struct saa7146_fh *fh);
long saa7146_video_do_ioctl(struct file *file, unsigned int cmd, void *arg);
+int saa7146_s_ctrl(struct v4l2_ctrl *ctrl);
/* from saa7146_vbi.c */
extern struct saa7146_use_ops saa7146_vbi_uops;
diff --git a/include/media/sh_mobile_ceu.h b/include/media/sh_mobile_ceu.h
index a90a765f18da..6fdb6adf6b2b 100644
--- a/include/media/sh_mobile_ceu.h
+++ b/include/media/sh_mobile_ceu.h
@@ -5,6 +5,7 @@
#define SH_CEU_FLAG_USE_16BIT_BUS (1 << 1) /* use 16bit bus width */
#define SH_CEU_FLAG_HSYNC_LOW (1 << 2) /* default High if possible */
#define SH_CEU_FLAG_VSYNC_LOW (1 << 3) /* default High if possible */
+#define SH_CEU_FLAG_LOWER_8BIT (1 << 4) /* default upper 8bit */
struct device;
struct resource;
diff --git a/include/media/smiapp.h b/include/media/smiapp.h
new file mode 100644
index 000000000000..9ab07fd45d5c
--- /dev/null
+++ b/include/media/smiapp.h
@@ -0,0 +1,84 @@
+/*
+ * include/media/smiapp.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __SMIAPP_H_
+#define __SMIAPP_H_
+
+#include <media/v4l2-subdev.h>
+
+#define SMIAPP_NAME "smiapp"
+
+#define SMIAPP_DFL_I2C_ADDR (0x20 >> 1) /* Default I2C Address */
+#define SMIAPP_ALT_I2C_ADDR (0x6e >> 1) /* Alternate I2C Address */
+
+#define SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_CLOCK 0
+#define SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_STROBE 1
+#define SMIAPP_CSI_SIGNALLING_MODE_CSI2 2
+
+#define SMIAPP_NO_XSHUTDOWN -1
+
+/*
+ * Sometimes due to board layout considerations the camera module can be
+ * mounted rotated. The typical rotation used is 180 degrees which can be
+ * corrected by giving a default H-FLIP and V-FLIP in the sensor readout.
+ * FIXME: rotation also changes the bayer pattern.
+ */
+enum smiapp_module_board_orient {
+ SMIAPP_MODULE_BOARD_ORIENT_0 = 0,
+ SMIAPP_MODULE_BOARD_ORIENT_180,
+};
+
+struct smiapp_flash_strobe_parms {
+ u8 mode;
+ u32 strobe_width_high_us;
+ u16 strobe_delay;
+ u16 stobe_start_point;
+ u8 trigger;
+};
+
+struct smiapp_platform_data {
+ /*
+ * Change the cci address if i2c_addr_alt is set.
+ * Both default and alternate cci addr need to be present
+ */
+ unsigned short i2c_addr_dfl; /* Default i2c addr */
+ unsigned short i2c_addr_alt; /* Alternate i2c addr */
+
+ unsigned int nvm_size; /* bytes */
+ unsigned int ext_clk; /* sensor external clk */
+
+ unsigned int lanes; /* Number of CSI-2 lanes */
+ u8 csi_signalling_mode; /* SMIAPP_CSI_SIGNALLING_MODE_* */
+ const s64 *op_sys_clock;
+
+ enum smiapp_module_board_orient module_board_orient;
+
+ struct smiapp_flash_strobe_parms *strobe_setup;
+
+ int (*set_xclk)(struct v4l2_subdev *sd, int hz);
+ char *ext_clk_name;
+ int xshutdown; /* gpio or SMIAPP_NO_XSHUTDOWN */
+};
+
+#endif /* __SMIAPP_H_ */
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index cad374bdcf4b..d865dcf9879f 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -56,11 +56,15 @@ struct soc_camera_device {
};
};
+/* Host supports programmable stride */
+#define SOCAM_HOST_CAP_STRIDE (1 << 0)
+
struct soc_camera_host {
struct v4l2_device v4l2_dev;
struct list_head list;
struct mutex host_lock; /* Protect during probing */
unsigned char nr; /* Host number */
+ u32 capabilities;
void *priv;
const char *drv_name;
struct soc_camera_host_ops *ops;
@@ -98,7 +102,7 @@ struct soc_camera_host_ops {
int (*set_bus_param)(struct soc_camera_device *);
int (*get_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
int (*set_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
- int (*enum_fsizes)(struct soc_camera_device *, struct v4l2_frmsizeenum *);
+ int (*enum_framesizes)(struct soc_camera_device *, struct v4l2_frmsizeenum *);
unsigned int (*poll)(struct file *, poll_table *);
};
diff --git a/include/media/soc_mediabus.h b/include/media/soc_mediabus.h
index 73f1e7eb60f3..0dc6f4625b92 100644
--- a/include/media/soc_mediabus.h
+++ b/include/media/soc_mediabus.h
@@ -47,6 +47,24 @@ enum soc_mbus_order {
};
/**
+ * enum soc_mbus_layout - planes layout in memory
+ * @SOC_MBUS_LAYOUT_PACKED: color components packed
+ * @SOC_MBUS_LAYOUT_PLANAR_2Y_U_V: YUV components stored in 3 planes (4:2:2)
+ * @SOC_MBUS_LAYOUT_PLANAR_2Y_C: YUV components stored in a luma and a
+ * chroma plane (C plane is half the size
+ * of Y plane)
+ * @SOC_MBUS_LAYOUT_PLANAR_Y_C: YUV components stored in a luma and a
+ * chroma plane (C plane is the same size
+ * as Y plane)
+ */
+enum soc_mbus_layout {
+ SOC_MBUS_LAYOUT_PACKED = 0,
+ SOC_MBUS_LAYOUT_PLANAR_2Y_U_V,
+ SOC_MBUS_LAYOUT_PLANAR_2Y_C,
+ SOC_MBUS_LAYOUT_PLANAR_Y_C,
+};
+
+/**
* struct soc_mbus_pixelfmt - Data format on the media bus
* @name: Name of the format
* @fourcc: Fourcc code, that will be obtained if the data is
@@ -60,6 +78,7 @@ struct soc_mbus_pixelfmt {
u32 fourcc;
enum soc_mbus_packing packing;
enum soc_mbus_order order;
+ enum soc_mbus_layout layout;
u8 bits_per_sample;
};
@@ -80,6 +99,8 @@ const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc(
const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc(
enum v4l2_mbus_pixelcode code);
s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf);
+s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
+ u32 bytes_per_line, u32 height);
int soc_mbus_samples_per_pixel(const struct soc_mbus_pixelfmt *mf,
unsigned int *numerator, unsigned int *denominator);
unsigned int soc_mbus_config_compatible(const struct v4l2_mbus_config *cfg,
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 11e67562b3ac..776605f1cbe2 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -25,6 +25,7 @@
#include <linux/videodev2.h>
/* forward references */
+struct file;
struct v4l2_ctrl_handler;
struct v4l2_ctrl_helper;
struct v4l2_ctrl;
@@ -129,7 +130,10 @@ struct v4l2_ctrl {
u32 step;
u32 menu_skip_mask;
};
- const char * const *qmenu;
+ union {
+ const char * const *qmenu;
+ const s64 *qmenu_int;
+ };
unsigned long flags;
union {
s32 val;
@@ -164,7 +168,9 @@ struct v4l2_ctrl_ref {
/** struct v4l2_ctrl_handler - The control handler keeps track of all the
* controls: both the controls owned by the handler and those inherited
* from other handlers.
+ * @_lock: Default for "lock".
* @lock: Lock to control access to this handler and its controls.
+ * May be replaced by the user right after init.
* @ctrls: The list of controls owned by this handler.
* @ctrl_refs: The list of control references.
* @cached: The last found control reference. It is common that the same
@@ -175,7 +181,8 @@ struct v4l2_ctrl_ref {
* @error: The error code of the first failed control addition.
*/
struct v4l2_ctrl_handler {
- struct mutex lock;
+ struct mutex _lock;
+ struct mutex *lock;
struct list_head ctrls;
struct list_head ctrl_refs;
struct v4l2_ctrl_ref *cached;
@@ -219,6 +226,7 @@ struct v4l2_ctrl_config {
u32 flags;
u32 menu_skip_mask;
const char * const *qmenu;
+ const s64 *qmenu_int;
unsigned int is_private:1;
};
@@ -343,6 +351,23 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id, s32 max, s32 mask, s32 def);
+/** v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control.
+ * @hdl: The control handler.
+ * @ops: The control ops.
+ * @id: The control ID.
+ * @max: The control's maximum value.
+ * @def: The control's default value.
+ * @qmenu_int: The control's menu entries.
+ *
+ * Same as v4l2_ctrl_new_std_menu(), but @mask is set to 0 and it additionaly
+ * takes as an argument an array of integers determining the menu items.
+ *
+ * If @id refers to a non-integer-menu control, then this function will return NULL.
+ */
+struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, s32 max, s32 def, const s64 *qmenu_int);
+
/** v4l2_ctrl_add_ctrl() - Add a control from another handler to this handler.
* @hdl: The control handler.
* @ctrl: The control to add.
@@ -451,7 +476,7 @@ void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed);
*/
static inline void v4l2_ctrl_lock(struct v4l2_ctrl *ctrl)
{
- mutex_lock(&ctrl->handler->lock);
+ mutex_lock(ctrl->handler->lock);
}
/** v4l2_ctrl_lock() - Helper function to unlock the handler
@@ -460,7 +485,7 @@ static inline void v4l2_ctrl_lock(struct v4l2_ctrl *ctrl)
*/
static inline void v4l2_ctrl_unlock(struct v4l2_ctrl *ctrl)
{
- mutex_unlock(&ctrl->handler->lock);
+ mutex_unlock(ctrl->handler->lock);
}
/** v4l2_ctrl_g_ctrl() - Helper function to get the control's value from within a driver.
@@ -487,10 +512,9 @@ s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl);
int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val);
/* Internal helper functions that deal with control events. */
-void v4l2_ctrl_add_event(struct v4l2_ctrl *ctrl,
- struct v4l2_subscribed_event *sev);
-void v4l2_ctrl_del_event(struct v4l2_ctrl *ctrl,
- struct v4l2_subscribed_event *sev);
+extern const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops;
+void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new);
+void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new);
/* Can be used as a vidioc_log_status function that just dumps all controls
associated with the filehandle. */
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 96d22215cc88..a056e6ee1b68 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -39,6 +39,9 @@ struct v4l2_ctrl_handler;
#define V4L2_FL_USES_V4L2_FH (1)
/* Use the prio field of v4l2_fh for core priority checking */
#define V4L2_FL_USE_FH_PRIO (2)
+/* If ioctl core locking is in use, then apply that also to all
+ file operations. Don't use this flag in new drivers! */
+#define V4L2_FL_LOCK_ALL_FOPS (3)
/* Priority helper functions */
@@ -126,8 +129,10 @@ struct video_device
/* ioctl callbacks */
const struct v4l2_ioctl_ops *ioctl_ops;
+ DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
/* serialization lock */
+ DECLARE_BITMAP(disable_locking, BASE_VIDIOC_PRIVATE);
struct mutex *lock;
};
@@ -173,6 +178,26 @@ void video_device_release(struct video_device *vdev);
a dubious construction at best. */
void video_device_release_empty(struct video_device *vdev);
+/* returns true if cmd is a known V4L2 ioctl */
+bool v4l2_is_known_ioctl(unsigned int cmd);
+
+/* mark that this command shouldn't use core locking */
+static inline void v4l2_disable_ioctl_locking(struct video_device *vdev, unsigned int cmd)
+{
+ if (_IOC_NR(cmd) < BASE_VIDIOC_PRIVATE)
+ set_bit(_IOC_NR(cmd), vdev->disable_locking);
+}
+
+/* Mark that this command isn't implemented. This must be called before
+ video_device_register. See also the comments in determine_valid_ioctls().
+ This function allows drivers to provide just one v4l2_ioctl_ops struct, but
+ disable ioctls based on the specific card that is actually found. */
+static inline void v4l2_disable_ioctl(struct video_device *vdev, unsigned int cmd)
+{
+ if (_IOC_NR(cmd) < BASE_VIDIOC_PRIVATE)
+ set_bit(_IOC_NR(cmd), vdev->valid_ioctls);
+}
+
/* helper functions to access driver private data. */
static inline void *video_get_drvdata(struct video_device *vdev)
{
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
index 5f14e8895ce2..2885a810a128 100644
--- a/include/media/v4l2-event.h
+++ b/include/media/v4l2-event.h
@@ -78,6 +78,19 @@ struct v4l2_kevent {
struct v4l2_event event;
};
+/** struct v4l2_subscribed_event_ops - Subscribed event operations.
+ * @add: Optional callback, called when a new listener is added
+ * @del: Optional callback, called when a listener stops listening
+ * @replace: Optional callback that can replace event 'old' with event 'new'.
+ * @merge: Optional callback that can merge event 'old' into event 'new'.
+ */
+struct v4l2_subscribed_event_ops {
+ int (*add)(struct v4l2_subscribed_event *sev, unsigned elems);
+ void (*del)(struct v4l2_subscribed_event *sev);
+ void (*replace)(struct v4l2_event *old, const struct v4l2_event *new);
+ void (*merge)(const struct v4l2_event *old, struct v4l2_event *new);
+};
+
/** struct v4l2_subscribed_event - Internal struct representing a subscribed event.
* @list: List node for the v4l2_fh->subscribed list.
* @type: Event type.
@@ -85,8 +98,7 @@ struct v4l2_kevent {
* @flags: Copy of v4l2_event_subscription->flags.
* @fh: Filehandle that subscribed to this event.
* @node: List node that hooks into the object's event list (if there is one).
- * @replace: Optional callback that can replace event 'old' with event 'new'.
- * @merge: Optional callback that can merge event 'old' into event 'new'.
+ * @ops: v4l2_subscribed_event_ops
* @elems: The number of elements in the events array.
* @first: The index of the events containing the oldest available event.
* @in_use: The number of queued events.
@@ -99,10 +111,7 @@ struct v4l2_subscribed_event {
u32 flags;
struct v4l2_fh *fh;
struct list_head node;
- void (*replace)(struct v4l2_event *old,
- const struct v4l2_event *new);
- void (*merge)(const struct v4l2_event *old,
- struct v4l2_event *new);
+ const struct v4l2_subscribed_event_ops *ops;
unsigned elems;
unsigned first;
unsigned in_use;
@@ -115,7 +124,8 @@ void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev);
void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev);
int v4l2_event_pending(struct v4l2_fh *fh);
int v4l2_event_subscribe(struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub, unsigned elems);
+ struct v4l2_event_subscription *sub, unsigned elems,
+ const struct v4l2_subscribed_event_ops *ops);
int v4l2_event_unsubscribe(struct v4l2_fh *fh,
struct v4l2_event_subscription *sub);
void v4l2_event_unsubscribe_all(struct v4l2_fh *fh);
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 3cb939cd03f9..d8b76f7392f8 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -271,6 +271,12 @@ struct v4l2_ioctl_ops {
struct v4l2_dv_timings *timings);
int (*vidioc_g_dv_timings) (struct file *file, void *fh,
struct v4l2_dv_timings *timings);
+ int (*vidioc_query_dv_timings) (struct file *file, void *fh,
+ struct v4l2_dv_timings *timings);
+ int (*vidioc_enum_dv_timings) (struct file *file, void *fh,
+ struct v4l2_enum_dv_timings *timings);
+ int (*vidioc_dv_timings_cap) (struct file *file, void *fh,
+ struct v4l2_dv_timings_cap *cap);
int (*vidioc_subscribe_event) (struct v4l2_fh *fh,
struct v4l2_event_subscription *sub);
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index f0f3358d1b1b..c35a3545e273 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -307,6 +307,12 @@ struct v4l2_subdev_video_ops {
struct v4l2_dv_timings *timings);
int (*g_dv_timings)(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings);
+ int (*enum_dv_timings)(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *timings);
+ int (*query_dv_timings)(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings);
+ int (*dv_timings_cap)(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings_cap *cap);
int (*enum_mbus_fmt)(struct v4l2_subdev *sd, unsigned int index,
enum v4l2_mbus_pixelcode *code);
int (*enum_mbus_fsizes)(struct v4l2_subdev *sd,
@@ -466,6 +472,15 @@ struct v4l2_subdev_pad_ops {
struct v4l2_subdev_crop *crop);
int (*get_crop)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
struct v4l2_subdev_crop *crop);
+ int (*get_selection)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel);
+ int (*set_selection)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ int (*link_validate)(struct v4l2_subdev *sd, struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt);
+#endif /* CONFIG_MEDIA_CONTROLLER */
};
struct v4l2_subdev_ops {
@@ -541,7 +556,7 @@ struct v4l2_subdev {
#define media_entity_to_v4l2_subdev(ent) \
container_of(ent, struct v4l2_subdev, entity)
#define vdev_to_v4l2_subdev(vdev) \
- video_get_drvdata(vdev)
+ ((struct v4l2_subdev *)video_get_drvdata(vdev))
/*
* Used for storing subdev information per file handle
@@ -549,8 +564,11 @@ struct v4l2_subdev {
struct v4l2_subdev_fh {
struct v4l2_fh vfh;
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- struct v4l2_mbus_framefmt *try_fmt;
- struct v4l2_rect *try_crop;
+ struct {
+ struct v4l2_mbus_framefmt try_fmt;
+ struct v4l2_rect try_crop;
+ struct v4l2_rect try_compose;
+ } *pad;
#endif
};
@@ -558,17 +576,19 @@ struct v4l2_subdev_fh {
container_of(fh, struct v4l2_subdev_fh, vfh)
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
-static inline struct v4l2_mbus_framefmt *
-v4l2_subdev_get_try_format(struct v4l2_subdev_fh *fh, unsigned int pad)
-{
- return &fh->try_fmt[pad];
-}
-
-static inline struct v4l2_rect *
-v4l2_subdev_get_try_crop(struct v4l2_subdev_fh *fh, unsigned int pad)
-{
- return &fh->try_crop[pad];
-}
+#define __V4L2_SUBDEV_MK_GET_TRY(rtype, fun_name, field_name) \
+ static inline struct rtype * \
+ v4l2_subdev_get_try_##fun_name(struct v4l2_subdev_fh *fh, \
+ unsigned int pad) \
+ { \
+ BUG_ON(unlikely(pad >= vdev_to_v4l2_subdev( \
+ fh->vfh.vdev)->entity.num_pads)); \
+ return &fh->pad[pad].field_name; \
+ }
+
+__V4L2_SUBDEV_MK_GET_TRY(v4l2_mbus_framefmt, format, try_fmt)
+__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, crop, try_compose)
+__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, compose, try_compose)
#endif
extern const struct v4l2_file_operations v4l2_subdev_fops;
@@ -593,6 +613,13 @@ static inline void *v4l2_get_subdev_hostdata(const struct v4l2_subdev *sd)
return sd->host_priv;
}
+#ifdef CONFIG_MEDIA_CONTROLLER
+int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt);
+int v4l2_subdev_link_validate(struct media_link *link);
+#endif /* CONFIG_MEDIA_CONTROLLER */
void v4l2_subdev_init(struct v4l2_subdev *sd,
const struct v4l2_subdev_ops *ops);
diff --git a/include/media/videobuf-dma-contig.h b/include/media/videobuf-dma-contig.h
index f0ed82543d9f..f473aeb86d3f 100644
--- a/include/media/videobuf-dma-contig.h
+++ b/include/media/videobuf-dma-contig.h
@@ -26,6 +26,16 @@ void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
void *priv,
struct mutex *ext_lock);
+void videobuf_queue_dma_contig_init_cached(struct videobuf_queue *q,
+ const struct videobuf_queue_ops *ops,
+ struct device *dev,
+ spinlock_t *irqlock,
+ enum v4l2_buf_type type,
+ enum v4l2_field field,
+ unsigned int msize,
+ void *priv,
+ struct mutex *ext_lock);
+
dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf);
void videobuf_dma_contig_free(struct videobuf_queue *q,
struct videobuf_buffer *buf);
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index a65910bda381..961669b648fd 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -163,6 +163,11 @@ typedef struct {
__u8 b[6];
} __packed bdaddr_t;
+/* BD Address type */
+#define BDADDR_BREDR 0x00
+#define BDADDR_LE_PUBLIC 0x01
+#define BDADDR_LE_RANDOM 0x02
+
#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}})
#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff}})
@@ -178,7 +183,6 @@ static inline void bacpy(bdaddr_t *dst, bdaddr_t *src)
void baswap(bdaddr_t *dst, bdaddr_t *src);
char *batostr(bdaddr_t *ba);
-bdaddr_t *strtoba(char *str);
/* Common socket structures and functions */
@@ -190,8 +194,12 @@ struct bt_sock {
bdaddr_t dst;
struct list_head accept_q;
struct sock *parent;
- u32 defer_setup;
- bool suspended;
+ unsigned long flags;
+};
+
+enum {
+ BT_SK_DEFER_SETUP,
+ BT_SK_SUSPEND,
};
struct bt_sock_list {
@@ -216,14 +224,24 @@ void bt_accept_unlink(struct sock *sk);
struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
/* Skb helpers */
+struct l2cap_ctrl {
+ unsigned int sframe : 1,
+ poll : 1,
+ final : 1,
+ fcs : 1,
+ sar : 2,
+ super : 2;
+ __u16 reqseq;
+ __u16 txseq;
+ __u8 retries;
+};
+
struct bt_skb_cb {
__u8 pkt_type;
__u8 incoming;
__u16 expect;
- __u16 tx_seq;
- __u8 retries;
- __u8 sar;
__u8 force_active;
+ struct l2cap_ctrl control;
};
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
@@ -243,12 +261,10 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk,
{
struct sk_buff *skb;
- release_sock(sk);
if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) {
skb_reserve(skb, BT_SKB_RESERVE);
bt_cb(skb)->incoming = 0;
}
- lock_sock(sk);
if (!skb && *err)
return NULL;
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index d47e523c9d83..66a7b579e31c 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -102,6 +102,7 @@ enum {
HCI_DISCOVERABLE,
HCI_LINK_SECURITY,
HCI_PENDING_CLASS,
+ HCI_PERIODIC_INQ,
};
/* HCI ioctl defines */
@@ -324,6 +325,8 @@ struct hci_cp_inquiry {
#define HCI_OP_INQUIRY_CANCEL 0x0402
+#define HCI_OP_PERIODIC_INQ 0x0403
+
#define HCI_OP_EXIT_PERIODIC_INQ 0x0404
#define HCI_OP_CREATE_CONN 0x0405
@@ -717,6 +720,10 @@ struct hci_rp_read_local_oob_data {
} __packed;
#define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58
+struct hci_rp_read_inq_rsp_tx_power {
+ __u8 status;
+ __s8 tx_power;
+} __packed;
#define HCI_OP_READ_FLOW_CONTROL_MODE 0x0c66
struct hci_rp_read_flow_control_mode {
@@ -1431,6 +1438,5 @@ struct hci_inquiry_req {
#define IREQ_CACHE_FLUSH 0x0001
extern bool enable_hs;
-extern bool enable_le;
#endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index db1c5df45224..9fc7728f94e4 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -155,9 +155,14 @@ struct hci_dev {
__u16 hci_rev;
__u8 lmp_ver;
__u16 manufacturer;
- __le16 lmp_subver;
+ __u16 lmp_subver;
__u16 voice_setting;
__u8 io_capability;
+ __s8 inq_tx_power;
+ __u16 devid_source;
+ __u16 devid_vendor;
+ __u16 devid_product;
+ __u16 devid_version;
__u16 pkt_type;
__u16 esco_type;
@@ -250,9 +255,6 @@ struct hci_dev {
struct list_head remote_oob_data;
- struct list_head adv_entries;
- struct delayed_work adv_work;
-
struct hci_dev_stats stat;
struct sk_buff_head driver_init;
@@ -263,7 +265,6 @@ struct hci_dev {
struct dentry *debugfs;
- struct device *parent;
struct device dev;
struct rfkill *rfkill;
@@ -571,7 +572,7 @@ int hci_chan_del(struct hci_chan *chan);
void hci_chan_list_flush(struct hci_conn *conn);
struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
- __u8 sec_level, __u8 auth_type);
+ __u8 dst_type, __u8 sec_level, __u8 auth_type);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
@@ -673,8 +674,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len);
struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]);
int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
- int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16 ediv,
- u8 rand[8]);
+ int new_key, u8 authenticated, u8 tk[16], u8 enc_size,
+ __le16 ediv, u8 rand[8]);
struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 addr_type);
int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr);
@@ -688,14 +689,6 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
u8 *randomizer);
int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr);
-#define ADV_CLEAR_TIMEOUT (3*60*HZ) /* Three minutes */
-int hci_adv_entries_clear(struct hci_dev *hdev);
-struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr);
-int hci_add_adv_entry(struct hci_dev *hdev,
- struct hci_ev_le_advertising_info *ev);
-
-void hci_del_off_timer(struct hci_dev *hdev);
-
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_frame(struct sk_buff *skb);
@@ -709,7 +702,7 @@ void hci_conn_init_sysfs(struct hci_conn *conn);
void hci_conn_add_sysfs(struct hci_conn *conn);
void hci_conn_del_sysfs(struct hci_conn *conn);
-#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
+#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
/* ----- LMP capabilities ----- */
#define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
@@ -933,6 +926,23 @@ static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
return false;
}
+static inline size_t eir_get_length(u8 *eir, size_t eir_len)
+{
+ size_t parsed = 0;
+
+ while (parsed < eir_len) {
+ u8 field_len = eir[0];
+
+ if (field_len == 0)
+ return parsed;
+
+ parsed += field_len + 1;
+ eir += field_len + 1;
+ }
+
+ return eir_len;
+}
+
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
u8 data_len)
{
@@ -961,17 +971,12 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
void hci_sock_dev_event(struct hci_dev *hdev, int event);
/* Management interface */
-#define MGMT_ADDR_BREDR 0x00
-#define MGMT_ADDR_LE_PUBLIC 0x01
-#define MGMT_ADDR_LE_RANDOM 0x02
-#define MGMT_ADDR_INVALID 0xff
-
-#define DISCOV_TYPE_BREDR (BIT(MGMT_ADDR_BREDR))
-#define DISCOV_TYPE_LE (BIT(MGMT_ADDR_LE_PUBLIC) | \
- BIT(MGMT_ADDR_LE_RANDOM))
-#define DISCOV_TYPE_INTERLEAVED (BIT(MGMT_ADDR_BREDR) | \
- BIT(MGMT_ADDR_LE_PUBLIC) | \
- BIT(MGMT_ADDR_LE_RANDOM))
+#define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR))
+#define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \
+ BIT(BDADDR_LE_RANDOM))
+#define DISCOV_TYPE_INTERLEAVED (BIT(BDADDR_BREDR) | \
+ BIT(BDADDR_LE_PUBLIC) | \
+ BIT(BDADDR_LE_RANDOM))
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
int mgmt_index_added(struct hci_dev *hdev);
@@ -1067,12 +1072,12 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
u16 latency, u16 to_multiplier);
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
__u8 ltk[16]);
-void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]);
-void hci_le_ltk_neg_reply(struct hci_conn *conn);
-
int hci_do_inquiry(struct hci_dev *hdev, u8 length);
int hci_cancel_inquiry(struct hci_dev *hdev);
int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
int timeout);
+int hci_cancel_le_scan(struct hci_dev *hdev);
+
+u8 bdaddr_to_le(u8 bdaddr_type);
#endif /* __HCI_CORE_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 9b242c6bf55b..1c7d1cd5e679 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -44,6 +44,7 @@
#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF
#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF
#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF
+#define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */
#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100)
#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000)
@@ -57,6 +58,7 @@ struct sockaddr_l2 {
__le16 l2_psm;
bdaddr_t l2_bdaddr;
__le16 l2_cid;
+ __u8 l2_bdaddr_type;
};
/* L2CAP socket options */
@@ -139,6 +141,8 @@ struct l2cap_conninfo {
#define L2CAP_CTRL_TXSEQ_SHIFT 1
#define L2CAP_CTRL_SUPER_SHIFT 2
+#define L2CAP_CTRL_POLL_SHIFT 4
+#define L2CAP_CTRL_FINAL_SHIFT 7
#define L2CAP_CTRL_REQSEQ_SHIFT 8
#define L2CAP_CTRL_SAR_SHIFT 14
@@ -152,9 +156,11 @@ struct l2cap_conninfo {
#define L2CAP_EXT_CTRL_FINAL 0x00000002
#define L2CAP_EXT_CTRL_FRAME_TYPE 0x00000001 /* I- or S-Frame */
+#define L2CAP_EXT_CTRL_FINAL_SHIFT 1
#define L2CAP_EXT_CTRL_REQSEQ_SHIFT 2
#define L2CAP_EXT_CTRL_SAR_SHIFT 16
#define L2CAP_EXT_CTRL_SUPER_SHIFT 16
+#define L2CAP_EXT_CTRL_POLL_SHIFT 18
#define L2CAP_EXT_CTRL_TXSEQ_SHIFT 18
/* L2CAP Supervisory Function */
@@ -186,6 +192,8 @@ struct l2cap_hdr {
#define L2CAP_FCS_SIZE 2
#define L2CAP_SDULEN_SIZE 2
#define L2CAP_PSMLEN_SIZE 2
+#define L2CAP_ENH_CTRL_SIZE 2
+#define L2CAP_EXT_CTRL_SIZE 4
struct l2cap_cmd_hdr {
__u8 code;
@@ -401,6 +409,16 @@ struct l2cap_conn_param_update_rsp {
#define L2CAP_CONN_PARAM_REJECTED 0x0001
/* ----- L2CAP channels and connections ----- */
+struct l2cap_seq_list {
+ __u16 head;
+ __u16 tail;
+ __u16 mask;
+ __u16 *list;
+};
+
+#define L2CAP_SEQ_LIST_CLEAR 0xFFFF
+#define L2CAP_SEQ_LIST_TAIL 0x8000
+
struct srej_list {
__u16 tx_seq;
struct list_head list;
@@ -446,6 +464,9 @@ struct l2cap_chan {
__u16 monitor_timeout;
__u16 mps;
+ __u8 tx_state;
+ __u8 rx_state;
+
unsigned long conf_state;
unsigned long conn_state;
unsigned long flags;
@@ -456,9 +477,11 @@ struct l2cap_chan {
__u16 buffer_seq;
__u16 buffer_seq_srej;
__u16 srej_save_reqseq;
+ __u16 last_acked_seq;
__u16 frames_sent;
__u16 unacked_frames;
__u8 retry_count;
+ __u16 srej_queue_next;
__u8 num_acked;
__u16 sdu_len;
struct sk_buff *sdu;
@@ -490,6 +513,8 @@ struct l2cap_chan {
struct sk_buff *tx_send_head;
struct sk_buff_head tx_q;
struct sk_buff_head srej_q;
+ struct l2cap_seq_list srej_list;
+ struct l2cap_seq_list retrans_list;
struct list_head srej_l;
struct list_head list;
@@ -508,8 +533,7 @@ struct l2cap_ops {
void (*close) (void *data);
void (*state_change) (void *data, int state);
struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
- unsigned long len, int nb, int *err);
-
+ unsigned long len, int nb);
};
struct l2cap_conn {
@@ -600,6 +624,44 @@ enum {
FLAG_EFS_ENABLE,
};
+enum {
+ L2CAP_TX_STATE_XMIT,
+ L2CAP_TX_STATE_WAIT_F,
+};
+
+enum {
+ L2CAP_RX_STATE_RECV,
+ L2CAP_RX_STATE_SREJ_SENT,
+};
+
+enum {
+ L2CAP_TXSEQ_EXPECTED,
+ L2CAP_TXSEQ_EXPECTED_SREJ,
+ L2CAP_TXSEQ_UNEXPECTED,
+ L2CAP_TXSEQ_UNEXPECTED_SREJ,
+ L2CAP_TXSEQ_DUPLICATE,
+ L2CAP_TXSEQ_DUPLICATE_SREJ,
+ L2CAP_TXSEQ_INVALID,
+ L2CAP_TXSEQ_INVALID_IGNORE,
+};
+
+enum {
+ L2CAP_EV_DATA_REQUEST,
+ L2CAP_EV_LOCAL_BUSY_DETECTED,
+ L2CAP_EV_LOCAL_BUSY_CLEAR,
+ L2CAP_EV_RECV_REQSEQ_AND_FBIT,
+ L2CAP_EV_RECV_FBIT,
+ L2CAP_EV_RETRANS_TO,
+ L2CAP_EV_MONITOR_TO,
+ L2CAP_EV_EXPLICIT_POLL,
+ L2CAP_EV_RECV_IFRAME,
+ L2CAP_EV_RECV_RR,
+ L2CAP_EV_RECV_REJ,
+ L2CAP_EV_RECV_RNR,
+ L2CAP_EV_RECV_SREJ,
+ L2CAP_EV_RECV_FRAME,
+};
+
static inline void l2cap_chan_hold(struct l2cap_chan *c)
{
atomic_inc(&c->refcnt);
@@ -622,21 +684,26 @@ static inline void l2cap_chan_unlock(struct l2cap_chan *chan)
}
static inline void l2cap_set_timer(struct l2cap_chan *chan,
- struct delayed_work *work, long timeout)
+ struct delayed_work *work, long timeout)
{
BT_DBG("chan %p state %s timeout %ld", chan,
- state_to_string(chan->state), timeout);
+ state_to_string(chan->state), timeout);
+ /* If delayed work cancelled do not hold(chan)
+ since it is already done with previous set_timer */
if (!cancel_delayed_work(work))
l2cap_chan_hold(chan);
+
schedule_delayed_work(work, timeout);
}
static inline bool l2cap_clear_timer(struct l2cap_chan *chan,
- struct delayed_work *work)
+ struct delayed_work *work)
{
bool ret;
+ /* put(chan) if delayed work cancelled otherwise it
+ is done in delayed work function */
ret = cancel_delayed_work(work);
if (ret)
l2cap_chan_put(chan);
@@ -658,13 +725,10 @@ static inline bool l2cap_clear_timer(struct l2cap_chan *chan,
static inline int __seq_offset(struct l2cap_chan *chan, __u16 seq1, __u16 seq2)
{
- int offset;
-
- offset = (seq1 - seq2) % (chan->tx_win_max + 1);
- if (offset < 0)
- offset += (chan->tx_win_max + 1);
-
- return offset;
+ if (seq1 >= seq2)
+ return seq1 - seq2;
+ else
+ return chan->tx_win_max + 1 - seq2 + seq1;
}
static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq)
@@ -852,14 +916,15 @@ int __l2cap_wait_ack(struct sock *sk);
int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm);
int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid);
-struct l2cap_chan *l2cap_chan_create(struct sock *sk);
+struct l2cap_chan *l2cap_chan_create(void);
void l2cap_chan_close(struct l2cap_chan *chan, int reason);
void l2cap_chan_destroy(struct l2cap_chan *chan);
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
- bdaddr_t *dst);
+ bdaddr_t *dst, u8 dst_type);
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
u32 priority);
void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
int l2cap_chan_check_security(struct l2cap_chan *chan);
+void l2cap_chan_set_defaults(struct l2cap_chan *chan);
#endif /* __L2CAP_H */
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index ebfd91fc20f8..23fd0546fccb 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -341,6 +341,15 @@ struct mgmt_cp_unblock_device {
} __packed;
#define MGMT_UNBLOCK_DEVICE_SIZE MGMT_ADDR_INFO_SIZE
+#define MGMT_OP_SET_DEVICE_ID 0x0028
+struct mgmt_cp_set_device_id {
+ __le16 source;
+ __le16 vendor;
+ __le16 product;
+ __le16 version;
+} __packed;
+#define MGMT_SET_DEVICE_ID_SIZE 8
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h
index 7b3acdd29134..ca356a734920 100644
--- a/include/net/bluetooth/smp.h
+++ b/include/net/bluetooth/smp.h
@@ -77,7 +77,7 @@ struct smp_cmd_encrypt_info {
#define SMP_CMD_MASTER_IDENT 0x07
struct smp_cmd_master_ident {
- __u16 ediv;
+ __le16 ediv;
__u8 rand[8];
} __packed;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index adb2320bccdf..0289d4ce7070 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -3365,9 +3365,9 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
* @chan: main channel
* @channel_type: HT mode
*/
-int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type);
+bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type);
/*
* cfg80211_ch_switch_notify - update wdev channel and notify userspace
diff --git a/include/net/dst.h b/include/net/dst.h
index bed833d9796a..8197eadca819 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -60,6 +60,7 @@ struct dst_entry {
#define DST_NOCOUNT 0x0020
#define DST_NOPEER 0x0040
#define DST_FAKE_RTABLE 0x0080
+#define DST_XFRM_TUNNEL 0x0100
short error;
short obsolete;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 4d6e6c6818d0..1937c7d98304 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -667,6 +667,9 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_SHORT_GI: Short guard interval was used
* @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present.
* Valid only for data frames (mainly A-MPDU)
+ * @RX_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, if
+ * the driver fills this value it should add %IEEE80211_RADIOTAP_MCS_HAVE_FMT
+ * to hw.radiotap_mcs_details to advertise that fact
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = 1<<0,
@@ -681,6 +684,7 @@ enum mac80211_rx_flags {
RX_FLAG_40MHZ = 1<<10,
RX_FLAG_SHORT_GI = 1<<11,
RX_FLAG_NO_SIGNAL_VAL = 1<<12,
+ RX_FLAG_HT_GF = 1<<13,
};
/**
@@ -939,7 +943,7 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
* CCMP key if it requires CCMP encryption of management frames (MFP) to
* be done in software.
* @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
- * for a CCMP key if space should be prepared for the IV, but the IV
+ * if space should be prepared for the IV, but the IV
* itself should not be generated. Do not set together with
* @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
*/
@@ -1288,6 +1292,11 @@ enum ieee80211_hw_flags {
*
* @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX
* (if %IEEE80211_HW_QUEUE_CONTROL is set)
+ *
+ * @radiotap_mcs_details: lists which MCS information can the HW
+ * reports, by default it is set to _MCS, _GI and _BW but doesn't
+ * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_* values, only
+ * adding _BW is supported today.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
@@ -1309,6 +1318,7 @@ struct ieee80211_hw {
u8 max_rx_aggregation_subframes;
u8 max_tx_aggregation_subframes;
u8 offchannel_tx_hw_queue;
+ u8 radiotap_mcs_details;
};
/**
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index aca65a5a9d0d..4467c9460857 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -39,6 +39,8 @@ struct nfc_hci_ops {
int (*data_exchange) (struct nfc_hci_dev *hdev,
struct nfc_target *target,
struct sk_buff *skb, struct sk_buff **res_skb);
+ int (*check_presence)(struct nfc_hci_dev *hdev,
+ struct nfc_target *target);
};
#define NFC_HCI_MAX_CUSTOM_GATES 15
@@ -82,10 +84,6 @@ struct nfc_hci_dev {
u8 gate2pipe[NFC_HCI_MAX_GATES];
- bool poll_started;
- struct nfc_target *targets;
- int target_count;
-
u8 sw_romlib;
u8 sw_patch;
u8 sw_flashlib_major;
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 9a2505a5b8de..b7ca4a2a1d72 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -48,26 +48,28 @@ struct nfc_dev;
typedef void (*data_exchange_cb_t)(void *context, struct sk_buff *skb,
int err);
+struct nfc_target;
+
struct nfc_ops {
int (*dev_up)(struct nfc_dev *dev);
int (*dev_down)(struct nfc_dev *dev);
int (*start_poll)(struct nfc_dev *dev, u32 protocols);
void (*stop_poll)(struct nfc_dev *dev);
- int (*dep_link_up)(struct nfc_dev *dev, int target_idx, u8 comm_mode,
- u8 *gb, size_t gb_len);
+ int (*dep_link_up)(struct nfc_dev *dev, struct nfc_target *target,
+ u8 comm_mode, u8 *gb, size_t gb_len);
int (*dep_link_down)(struct nfc_dev *dev);
- int (*activate_target)(struct nfc_dev *dev, u32 target_idx,
+ int (*activate_target)(struct nfc_dev *dev, struct nfc_target *target,
u32 protocol);
- void (*deactivate_target)(struct nfc_dev *dev, u32 target_idx);
- int (*data_exchange)(struct nfc_dev *dev, u32 target_idx,
+ void (*deactivate_target)(struct nfc_dev *dev,
+ struct nfc_target *target);
+ int (*data_exchange)(struct nfc_dev *dev, struct nfc_target *target,
struct sk_buff *skb, data_exchange_cb_t cb,
void *cb_context);
- int (*check_presence)(struct nfc_dev *dev, u32 target_idx);
+ int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target);
};
#define NFC_TARGET_IDX_ANY -1
#define NFC_MAX_GT_LEN 48
-#define NFC_TARGET_IDX_NONE 0xffffffff
struct nfc_target {
u32 idx;
@@ -95,11 +97,10 @@ struct nfc_dev {
struct nfc_target *targets;
int n_targets;
int targets_generation;
- spinlock_t targets_lock;
struct device dev;
bool dev_up;
bool polling;
- u32 activated_target_idx;
+ struct nfc_target *active_target;
bool dep_link_up;
u32 dep_rf_mode;
struct nfc_genl_data genl_data;
diff --git a/include/net/nfc/shdlc.h b/include/net/nfc/shdlc.h
index 1071987d0408..ab06afd462da 100644
--- a/include/net/nfc/shdlc.h
+++ b/include/net/nfc/shdlc.h
@@ -35,6 +35,8 @@ struct nfc_shdlc_ops {
int (*data_exchange) (struct nfc_shdlc *shdlc,
struct nfc_target *target,
struct sk_buff *skb, struct sk_buff **res_skb);
+ int (*check_presence)(struct nfc_shdlc *shdlc,
+ struct nfc_target *target);
};
enum shdlc_state {
diff --git a/include/net/sock.h b/include/net/sock.h
index d89f0582b6b6..4a4521699563 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -46,6 +46,7 @@
#include <linux/list_nulls.h>
#include <linux/timer.h>
#include <linux/cache.h>
+#include <linux/bitops.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h> /* struct sk_buff */
@@ -921,12 +922,23 @@ struct proto {
#endif
};
+/*
+ * Bits in struct cg_proto.flags
+ */
+enum cg_proto_flags {
+ /* Currently active and new sockets should be assigned to cgroups */
+ MEMCG_SOCK_ACTIVE,
+ /* It was ever activated; we must disarm static keys on destruction */
+ MEMCG_SOCK_ACTIVATED,
+};
+
struct cg_proto {
void (*enter_memory_pressure)(struct sock *sk);
struct res_counter *memory_allocated; /* Current allocated memory. */
struct percpu_counter *sockets_allocated; /* Current number of sockets. */
int *memory_pressure;
long *sysctl_mem;
+ unsigned long flags;
/*
* memcg field is used to find which memcg we belong directly
* Each memcg struct can hold more than one cg_proto, so container_of
@@ -942,6 +954,16 @@ struct cg_proto {
extern int proto_register(struct proto *prot, int alloc_slab);
extern void proto_unregister(struct proto *prot);
+static inline bool memcg_proto_active(struct cg_proto *cg_proto)
+{
+ return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+}
+
+static inline bool memcg_proto_activated(struct cg_proto *cg_proto)
+{
+ return test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags);
+}
+
#ifdef SOCK_REFCNT_DEBUG
static inline void sk_refcnt_debug_inc(struct sock *sk)
{
diff --git a/include/scsi/fcoe_sysfs.h b/include/scsi/fcoe_sysfs.h
new file mode 100644
index 000000000000..604cb9bb3e76
--- /dev/null
+++ b/include/scsi/fcoe_sysfs.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2011-2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef FCOE_SYSFS
+#define FCOE_SYSFS
+
+#include <linux/if_ether.h>
+#include <linux/device.h>
+#include <scsi/fc/fc_fcoe.h>
+
+struct fcoe_ctlr_device;
+struct fcoe_fcf_device;
+
+struct fcoe_sysfs_function_template {
+ void (*get_fcoe_ctlr_link_fail)(struct fcoe_ctlr_device *);
+ void (*get_fcoe_ctlr_vlink_fail)(struct fcoe_ctlr_device *);
+ void (*get_fcoe_ctlr_miss_fka)(struct fcoe_ctlr_device *);
+ void (*get_fcoe_ctlr_symb_err)(struct fcoe_ctlr_device *);
+ void (*get_fcoe_ctlr_err_block)(struct fcoe_ctlr_device *);
+ void (*get_fcoe_ctlr_fcs_error)(struct fcoe_ctlr_device *);
+ void (*get_fcoe_ctlr_mode)(struct fcoe_ctlr_device *);
+ void (*get_fcoe_fcf_selected)(struct fcoe_fcf_device *);
+ void (*get_fcoe_fcf_vlan_id)(struct fcoe_fcf_device *);
+};
+
+#define dev_to_ctlr(d) \
+ container_of((d), struct fcoe_ctlr_device, dev)
+
+enum fip_conn_type {
+ FIP_CONN_TYPE_UNKNOWN,
+ FIP_CONN_TYPE_FABRIC,
+ FIP_CONN_TYPE_VN2VN,
+};
+
+struct fcoe_ctlr_device {
+ u32 id;
+
+ struct device dev;
+ struct fcoe_sysfs_function_template *f;
+
+ struct list_head fcfs;
+ char work_q_name[20];
+ struct workqueue_struct *work_q;
+ char devloss_work_q_name[20];
+ struct workqueue_struct *devloss_work_q;
+ struct mutex lock;
+
+ int fcf_dev_loss_tmo;
+ enum fip_conn_type mode;
+
+ /* expected in host order for displaying */
+ struct fcoe_fc_els_lesb lesb;
+};
+
+static inline void *fcoe_ctlr_device_priv(const struct fcoe_ctlr_device *ctlr)
+{
+ return (void *)(ctlr + 1);
+}
+
+/* fcf states */
+enum fcf_state {
+ FCOE_FCF_STATE_UNKNOWN,
+ FCOE_FCF_STATE_DISCONNECTED,
+ FCOE_FCF_STATE_CONNECTED,
+ FCOE_FCF_STATE_DELETED,
+};
+
+struct fcoe_fcf_device {
+ u32 id;
+ struct device dev;
+ struct list_head peers;
+ struct work_struct delete_work;
+ struct delayed_work dev_loss_work;
+ u32 dev_loss_tmo;
+ void *priv;
+ enum fcf_state state;
+
+ u64 fabric_name;
+ u64 switch_name;
+ u32 fc_map;
+ u16 vfid;
+ u8 mac[ETH_ALEN];
+ u8 priority;
+ u32 fka_period;
+ u8 selected;
+ u16 vlan_id;
+};
+
+#define dev_to_fcf(d) \
+ container_of((d), struct fcoe_fcf_device, dev)
+/* parentage should never be missing */
+#define fcoe_fcf_dev_to_ctlr_dev(x) \
+ dev_to_ctlr((x)->dev.parent)
+#define fcoe_fcf_device_priv(x) \
+ ((x)->priv)
+
+struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
+ struct fcoe_sysfs_function_template *f,
+ int priv_size);
+void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *);
+struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *,
+ struct fcoe_fcf_device *);
+void fcoe_fcf_device_delete(struct fcoe_fcf_device *);
+
+int __init fcoe_sysfs_setup(void);
+void __exit fcoe_sysfs_teardown(void);
+
+#endif /* FCOE_SYSFS */
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index cfdb55f0937e..22b07cc99808 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -29,6 +29,7 @@
#include <linux/random.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
+#include <scsi/fcoe_sysfs.h>
#define FCOE_MAX_CMD_LEN 16 /* Supported CDB length */
@@ -159,8 +160,24 @@ struct fcoe_ctlr {
};
/**
+ * fcoe_ctlr_priv() - Return the private data from a fcoe_ctlr
+ * @cltr: The fcoe_ctlr whose private data will be returned
+ */
+static inline void *fcoe_ctlr_priv(const struct fcoe_ctlr *ctlr)
+{
+ return (void *)(ctlr + 1);
+}
+
+#define fcoe_ctlr_to_ctlr_dev(x) \
+ (struct fcoe_ctlr_device *)(((struct fcoe_ctlr_device *)(x)) - 1)
+
+/**
* struct fcoe_fcf - Fibre-Channel Forwarder
* @list: list linkage
+ * @event_work: Work for FC Transport actions queue
+ * @event: The event to be processed
+ * @fip: The controller that the FCF was discovered on
+ * @fcf_dev: The associated fcoe_fcf_device instance
* @time: system time (jiffies) when an advertisement was last received
* @switch_name: WWN of switch from advertisement
* @fabric_name: WWN of fabric from advertisement
@@ -182,6 +199,9 @@ struct fcoe_ctlr {
*/
struct fcoe_fcf {
struct list_head list;
+ struct work_struct event_work;
+ struct fcoe_ctlr *fip;
+ struct fcoe_fcf_device *fcf_dev;
unsigned long time;
u64 switch_name;
@@ -198,6 +218,9 @@ struct fcoe_fcf {
u8 fd_flags:1;
};
+#define fcoe_fcf_to_fcf_dev(x) \
+ ((x)->fcf_dev)
+
/**
* struct fcoe_rport - VN2VN remote port
* @time: time of create or last beacon packet received from node
@@ -333,6 +356,10 @@ void fcoe_queue_timer(ulong lport);
int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
struct fcoe_percpu_s *fps);
+/* FCoE Sysfs helpers */
+void fcoe_fcf_get_selected(struct fcoe_fcf_device *);
+void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *);
+
/**
* struct netdev_list
* A mapping from netdevice to fcoe_transport
diff --git a/include/trace/events/jbd.h b/include/trace/events/jbd.h
index aff64d82d713..da6f2591c25e 100644
--- a/include/trace/events/jbd.h
+++ b/include/trace/events/jbd.h
@@ -36,19 +36,17 @@ DECLARE_EVENT_CLASS(jbd_commit,
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( char, sync_commit )
__field( int, transaction )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
- __entry->sync_commit = commit_transaction->t_synchronous_commit;
__entry->transaction = commit_transaction->t_tid;
),
- TP_printk("dev %d,%d transaction %d sync %d",
+ TP_printk("dev %d,%d transaction %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->transaction, __entry->sync_commit)
+ __entry->transaction)
);
DEFINE_EVENT(jbd_commit, jbd_start_commit,
@@ -87,19 +85,17 @@ TRACE_EVENT(jbd_drop_transaction,
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( char, sync_commit )
__field( int, transaction )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
- __entry->sync_commit = commit_transaction->t_synchronous_commit;
__entry->transaction = commit_transaction->t_tid;
),
- TP_printk("dev %d,%d transaction %d sync %d",
+ TP_printk("dev %d,%d transaction %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->transaction, __entry->sync_commit)
+ __entry->transaction)
);
TRACE_EVENT(jbd_end_commit,
@@ -109,21 +105,19 @@ TRACE_EVENT(jbd_end_commit,
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( char, sync_commit )
__field( int, transaction )
__field( int, head )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
- __entry->sync_commit = commit_transaction->t_synchronous_commit;
__entry->transaction = commit_transaction->t_tid;
__entry->head = journal->j_tail_sequence;
),
- TP_printk("dev %d,%d transaction %d sync %d head %d",
+ TP_printk("dev %d,%d transaction %d head %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->transaction, __entry->sync_commit, __entry->head)
+ __entry->transaction, __entry->head)
);
TRACE_EVENT(jbd_do_submit_data,
@@ -133,19 +127,17 @@ TRACE_EVENT(jbd_do_submit_data,
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( char, sync_commit )
__field( int, transaction )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
- __entry->sync_commit = commit_transaction->t_synchronous_commit;
__entry->transaction = commit_transaction->t_tid;
),
- TP_printk("dev %d,%d transaction %d sync %d",
+ TP_printk("dev %d,%d transaction %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->transaction, __entry->sync_commit)
+ __entry->transaction)
);
TRACE_EVENT(jbd_cleanup_journal_tail,
@@ -177,24 +169,23 @@ TRACE_EVENT(jbd_cleanup_journal_tail,
__entry->block_nr, __entry->freed)
);
-TRACE_EVENT(jbd_update_superblock_end,
- TP_PROTO(journal_t *journal, int wait),
+TRACE_EVENT(journal_write_superblock,
+ TP_PROTO(journal_t *journal, int write_op),
- TP_ARGS(journal, wait),
+ TP_ARGS(journal, write_op),
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( int, wait )
+ __field( int, write_op )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
- __entry->wait = wait;
+ __entry->write_op = write_op;
),
- TP_printk("dev %d,%d wait %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->wait)
+ TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
+ MINOR(__entry->dev), __entry->write_op)
);
#endif /* _TRACE_JBD_H */
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index f64560e204bc..bab3b87e4064 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -13,7 +13,7 @@
#define RECLAIM_WB_ANON 0x0001u
#define RECLAIM_WB_FILE 0x0002u
#define RECLAIM_WB_MIXED 0x0010u
-#define RECLAIM_WB_SYNC 0x0004u
+#define RECLAIM_WB_SYNC 0x0004u /* Unused, all reclaim async */
#define RECLAIM_WB_ASYNC 0x0008u
#define show_reclaim_flags(flags) \
@@ -25,15 +25,15 @@
{RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \
) : "RECLAIM_WB_NONE"
-#define trace_reclaim_flags(page, sync) ( \
+#define trace_reclaim_flags(page) ( \
(page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
- (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
+ (RECLAIM_WB_ASYNC) \
)
-#define trace_shrink_flags(file, sync) ( \
- (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_MIXED : \
- (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \
- (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
+#define trace_shrink_flags(file) \
+ ( \
+ (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
+ (RECLAIM_WB_ASYNC) \
)
TRACE_EVENT(mm_vmscan_kswapd_sleep,
@@ -263,22 +263,16 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
unsigned long nr_requested,
unsigned long nr_scanned,
unsigned long nr_taken,
- unsigned long nr_lumpy_taken,
- unsigned long nr_lumpy_dirty,
- unsigned long nr_lumpy_failed,
isolate_mode_t isolate_mode,
int file),
- TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file),
+ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file),
TP_STRUCT__entry(
__field(int, order)
__field(unsigned long, nr_requested)
__field(unsigned long, nr_scanned)
__field(unsigned long, nr_taken)
- __field(unsigned long, nr_lumpy_taken)
- __field(unsigned long, nr_lumpy_dirty)
- __field(unsigned long, nr_lumpy_failed)
__field(isolate_mode_t, isolate_mode)
__field(int, file)
),
@@ -288,22 +282,16 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
__entry->nr_requested = nr_requested;
__entry->nr_scanned = nr_scanned;
__entry->nr_taken = nr_taken;
- __entry->nr_lumpy_taken = nr_lumpy_taken;
- __entry->nr_lumpy_dirty = nr_lumpy_dirty;
- __entry->nr_lumpy_failed = nr_lumpy_failed;
__entry->isolate_mode = isolate_mode;
__entry->file = file;
),
- TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu contig_taken=%lu contig_dirty=%lu contig_failed=%lu file=%d",
+ TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu file=%d",
__entry->isolate_mode,
__entry->order,
__entry->nr_requested,
__entry->nr_scanned,
__entry->nr_taken,
- __entry->nr_lumpy_taken,
- __entry->nr_lumpy_dirty,
- __entry->nr_lumpy_failed,
__entry->file)
);
@@ -313,13 +301,10 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate,
unsigned long nr_requested,
unsigned long nr_scanned,
unsigned long nr_taken,
- unsigned long nr_lumpy_taken,
- unsigned long nr_lumpy_dirty,
- unsigned long nr_lumpy_failed,
isolate_mode_t isolate_mode,
int file),
- TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file)
+ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
);
@@ -329,13 +314,10 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate,
unsigned long nr_requested,
unsigned long nr_scanned,
unsigned long nr_taken,
- unsigned long nr_lumpy_taken,
- unsigned long nr_lumpy_dirty,
- unsigned long nr_lumpy_failed,
isolate_mode_t isolate_mode,
int file),
- TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file)
+ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
);
@@ -395,88 +377,6 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
show_reclaim_flags(__entry->reclaim_flags))
);
-TRACE_EVENT(replace_swap_token,
- TP_PROTO(struct mm_struct *old_mm,
- struct mm_struct *new_mm),
-
- TP_ARGS(old_mm, new_mm),
-
- TP_STRUCT__entry(
- __field(struct mm_struct*, old_mm)
- __field(unsigned int, old_prio)
- __field(struct mm_struct*, new_mm)
- __field(unsigned int, new_prio)
- ),
-
- TP_fast_assign(
- __entry->old_mm = old_mm;
- __entry->old_prio = old_mm ? old_mm->token_priority : 0;
- __entry->new_mm = new_mm;
- __entry->new_prio = new_mm->token_priority;
- ),
-
- TP_printk("old_token_mm=%p old_prio=%u new_token_mm=%p new_prio=%u",
- __entry->old_mm, __entry->old_prio,
- __entry->new_mm, __entry->new_prio)
-);
-
-DECLARE_EVENT_CLASS(put_swap_token_template,
- TP_PROTO(struct mm_struct *swap_token_mm),
-
- TP_ARGS(swap_token_mm),
-
- TP_STRUCT__entry(
- __field(struct mm_struct*, swap_token_mm)
- ),
-
- TP_fast_assign(
- __entry->swap_token_mm = swap_token_mm;
- ),
-
- TP_printk("token_mm=%p", __entry->swap_token_mm)
-);
-
-DEFINE_EVENT(put_swap_token_template, put_swap_token,
- TP_PROTO(struct mm_struct *swap_token_mm),
- TP_ARGS(swap_token_mm)
-);
-
-DEFINE_EVENT_CONDITION(put_swap_token_template, disable_swap_token,
- TP_PROTO(struct mm_struct *swap_token_mm),
- TP_ARGS(swap_token_mm),
- TP_CONDITION(swap_token_mm != NULL)
-);
-
-TRACE_EVENT_CONDITION(update_swap_token_priority,
- TP_PROTO(struct mm_struct *mm,
- unsigned int old_prio,
- struct mm_struct *swap_token_mm),
-
- TP_ARGS(mm, old_prio, swap_token_mm),
-
- TP_CONDITION(mm->token_priority != old_prio),
-
- TP_STRUCT__entry(
- __field(struct mm_struct*, mm)
- __field(unsigned int, old_prio)
- __field(unsigned int, new_prio)
- __field(struct mm_struct*, swap_token_mm)
- __field(unsigned int, swap_token_prio)
- ),
-
- TP_fast_assign(
- __entry->mm = mm;
- __entry->old_prio = old_prio;
- __entry->new_prio = mm->token_priority;
- __entry->swap_token_mm = swap_token_mm;
- __entry->swap_token_prio = swap_token_mm ? swap_token_mm->token_priority : 0;
- ),
-
- TP_printk("mm=%p old_prio=%u new_prio=%u swap_token_mm=%p token_prio=%u",
- __entry->mm, __entry->old_prio, __entry->new_prio,
- __entry->swap_token_mm, __entry->swap_token_prio)
-);
-
#endif /* _TRACE_VMSCAN_H */
/* This part must be outside protection */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 7b81887b023f..b453d92c2253 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -372,6 +372,35 @@ TRACE_EVENT(balance_dirty_pages,
)
);
+TRACE_EVENT(writeback_sb_inodes_requeue,
+
+ TP_PROTO(struct inode *inode),
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(unsigned long, ino)
+ __field(unsigned long, state)
+ __field(unsigned long, dirtied_when)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name,
+ dev_name(inode_to_bdi(inode)->dev), 32);
+ __entry->ino = inode->i_ino;
+ __entry->state = inode->i_state;
+ __entry->dirtied_when = inode->dirtied_when;
+ ),
+
+ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
+ __entry->name,
+ __entry->ino,
+ show_inode_state(__entry->state),
+ __entry->dirtied_when,
+ (jiffies - __entry->dirtied_when) / HZ
+ )
+);
+
DECLARE_EVENT_CLASS(writeback_congest_waited_template,
TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
@@ -450,13 +479,6 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
)
);
-DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_requeue,
- TP_PROTO(struct inode *inode,
- struct writeback_control *wbc,
- unsigned long nr_to_write),
- TP_ARGS(inode, wbc, nr_to_write)
-);
-
DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
TP_PROTO(struct inode *inode,
struct writeback_control *wbc,
diff --git a/include/xen/acpi.h b/include/xen/acpi.h
new file mode 100644
index 000000000000..48a9c0171b65
--- /dev/null
+++ b/include/xen/acpi.h
@@ -0,0 +1,58 @@
+/******************************************************************************
+ * acpi.h
+ * acpi file for domain 0 kernel
+ *
+ * Copyright (c) 2011 Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ * Copyright (c) 2011 Yu Ke <ke.yu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _XEN_ACPI_H
+#define _XEN_ACPI_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_XEN_DOM0
+#include <asm/xen/hypervisor.h>
+#include <xen/xen.h>
+#include <linux/acpi.h>
+
+int xen_acpi_notify_hypervisor_state(u8 sleep_state,
+ u32 pm1a_cnt, u32 pm1b_cnd);
+
+static inline void xen_acpi_sleep_register(void)
+{
+ if (xen_initial_domain())
+ acpi_os_set_prepare_sleep(
+ &xen_acpi_notify_hypervisor_state);
+}
+#else
+static inline void xen_acpi_sleep_register(void)
+{
+}
+#endif
+
+#endif /* _XEN_ACPI_H */
diff --git a/include/xen/events.h b/include/xen/events.h
index 0f773708e02c..04399b28e821 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -103,6 +103,9 @@ int xen_irq_from_pirq(unsigned pirq);
/* Return the pirq allocated to the irq. */
int xen_pirq_from_irq(unsigned irq);
+/* Return the irq allocated to the gsi */
+int xen_irq_from_gsi(unsigned gsi);
+
/* Determine whether to ignore this IRQ if it is passed to a guest. */
int xen_test_irq_shared(int irq);
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 15f8a00ff003..11e27c3af3cb 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -46,6 +46,8 @@
#include <xen/features.h>
+#define GNTTAB_RESERVED_XENSTORE 1
+
/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
#define NR_GRANT_FRAMES 4
diff --git a/include/xen/xenbus_dev.h b/include/xen/xenbus_dev.h
index ac5f0fe47ed9..bbee8c6a349d 100644
--- a/include/xen/xenbus_dev.h
+++ b/include/xen/xenbus_dev.h
@@ -38,4 +38,7 @@
#define IOCTL_XENBUS_BACKEND_EVTCHN \
_IOC(_IOC_NONE, 'B', 0, 0)
+#define IOCTL_XENBUS_BACKEND_SETUP \
+ _IOC(_IOC_NONE, 'B', 1, 0)
+
#endif /* __LINUX_XEN_XENBUS_DEV_H__ */
diff --git a/init/Kconfig b/init/Kconfig
index ccb5248474c2..d07dcf9fc8a9 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -167,7 +167,7 @@ config KERNEL_BZIP2
depends on HAVE_KERNEL_BZIP2
help
Its compression ratio and speed is intermediate.
- Decompression speed is slowest among the three. The kernel
+ Decompression speed is slowest among the choices. The kernel
size is about 10% smaller with bzip2, in comparison to gzip.
Bzip2 uses a large amount of memory. For modern kernels you
will need at least 8MB RAM or more for booting.
@@ -176,10 +176,9 @@ config KERNEL_LZMA
bool "LZMA"
depends on HAVE_KERNEL_LZMA
help
- The most recent compression algorithm.
- Its ratio is best, decompression speed is between the other
- two. Compression is slowest. The kernel size is about 33%
- smaller with LZMA in comparison to gzip.
+ This compression algorithm's ratio is best. Decompression speed
+ is between gzip and bzip2. Compression is slowest.
+ The kernel size is about 33% smaller with LZMA in comparison to gzip.
config KERNEL_XZ
bool "XZ"
@@ -200,7 +199,7 @@ config KERNEL_LZO
bool "LZO"
depends on HAVE_KERNEL_LZO
help
- Its compression ratio is the poorest among the 4. The kernel
+ Its compression ratio is the poorest among the choices. The kernel
size is about 10% bigger than gzip; however its speed
(both compression and decompression) is the fastest.
@@ -390,6 +389,7 @@ config AUDIT_LOGINUID_IMMUTABLE
but may not be backwards compatible with older init systems.
source "kernel/irq/Kconfig"
+source "kernel/time/Kconfig"
menu "RCU Subsystem"
@@ -802,7 +802,7 @@ config RT_GROUP_SCHED
endif #CGROUP_SCHED
config BLK_CGROUP
- tristate "Block IO controller"
+ bool "Block IO controller"
depends on BLOCK
default n
---help---
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 42b0707c3481..d3f0aeed2d39 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -1,3 +1,13 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers. To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
+
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/ctype.h>
@@ -330,7 +340,7 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
if (err)
return err;
- sys_chdir((const char __user __force *)"/root");
+ sys_chdir("/root");
s = current->fs->pwd.dentry->d_sb;
ROOT_DEV = s->s_dev;
printk(KERN_INFO
@@ -556,5 +566,5 @@ void __init prepare_namespace(void)
out:
devtmpfs_mount("dev");
sys_mount(".", "/", NULL, MS_MOVE, NULL);
- sys_chroot((const char __user __force *)".");
+ sys_chroot(".");
}
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 9047330c73e9..135959a276be 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -1,3 +1,13 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers. To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
+
#include <linux/unistd.h>
#include <linux/kernel.h>
#include <linux/fs.h>
diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
index 32c4799b8c91..8cb6db54285b 100644
--- a/init/do_mounts_md.c
+++ b/init/do_mounts_md.c
@@ -1,3 +1,13 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers. To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
+
#include <linux/delay.h>
#include <linux/raid/md_u.h>
#include <linux/raid/md_p.h>
@@ -283,7 +293,7 @@ static void __init autodetect_raid(void)
wait_for_device_probe();
- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
+ fd = sys_open("/dev/md0", 0, 0);
if (fd >= 0) {
sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
sys_close(fd);
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index 6212586df29a..6be2879cca66 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -1,3 +1,12 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers. To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
#include <linux/kernel.h>
#include <linux/fs.h>
@@ -181,7 +190,7 @@ int __init rd_load_image(char *from)
char rotator[4] = { '|' , '/' , '-' , '\\' };
#endif
- out_fd = sys_open((const char __user __force *) "/dev/ram", O_RDWR, 0);
+ out_fd = sys_open("/dev/ram", O_RDWR, 0);
if (out_fd < 0)
goto out;
@@ -280,7 +289,7 @@ noclose_input:
sys_close(out_fd);
out:
kfree(buf);
- sys_unlink((const char __user __force *) "/dev/ram");
+ sys_unlink("/dev/ram");
return res;
}
diff --git a/init/initramfs.c b/init/initramfs.c
index 8216c303b082..84c6bf111300 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -1,3 +1,13 @@
+/*
+ * Many of the syscalls used in this file expect some of the arguments
+ * to be __user pointers not __kernel pointers. To limit the sparse
+ * noise, turn off sparse checking for this file.
+ */
+#ifdef __CHECKER__
+#undef __CHECKER__
+#warning "Sparse checking disabled for this file"
+#endif
+
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/slab.h>
@@ -74,7 +84,7 @@ static void __init free_hash(void)
}
}
-static long __init do_utime(char __user *filename, time_t mtime)
+static long __init do_utime(char *filename, time_t mtime)
{
struct timespec t[2];
@@ -529,7 +539,7 @@ static void __init clean_rootfs(void)
struct linux_dirent64 *dirp;
int num;
- fd = sys_open((const char __user __force *) "/", O_RDONLY, 0);
+ fd = sys_open("/", O_RDONLY, 0);
WARN_ON(fd < 0);
if (fd < 0)
return;
@@ -589,7 +599,7 @@ static int __init populate_rootfs(void)
}
printk(KERN_INFO "rootfs image is not initramfs (%s)"
"; looks like an initrd\n", err);
- fd = sys_open((const char __user __force *) "/initrd.image",
+ fd = sys_open("/initrd.image",
O_WRONLY|O_CREAT, 0700);
if (fd >= 0) {
sys_write(fd, (char *)initrd_start,
diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
index 0c09366b96f3..383d638340b8 100644
--- a/ipc/mq_sysctl.c
+++ b/ipc/mq_sysctl.c
@@ -13,15 +13,6 @@
#include <linux/ipc_namespace.h>
#include <linux/sysctl.h>
-/*
- * Define the ranges various user-specified maximum values can
- * be set to.
- */
-#define MIN_MSGMAX 1 /* min value for msg_max */
-#define MAX_MSGMAX HARD_MSGMAX /* max value for msg_max */
-#define MIN_MSGSIZEMAX 128 /* min value for msgsize_max */
-#define MAX_MSGSIZEMAX (8192*128) /* max value for msgsize_max */
-
#ifdef CONFIG_PROC_SYSCTL
static void *get_mq(ctl_table *table)
{
@@ -31,16 +22,6 @@ static void *get_mq(ctl_table *table)
return which;
}
-static int proc_mq_dointvec(ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- struct ctl_table mq_table;
- memcpy(&mq_table, table, sizeof(mq_table));
- mq_table.data = get_mq(table);
-
- return proc_dointvec(&mq_table, write, buffer, lenp, ppos);
-}
-
static int proc_mq_dointvec_minmax(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -52,15 +33,17 @@ static int proc_mq_dointvec_minmax(ctl_table *table, int write,
lenp, ppos);
}
#else
-#define proc_mq_dointvec NULL
#define proc_mq_dointvec_minmax NULL
#endif
+static int msg_queues_limit_min = MIN_QUEUESMAX;
+static int msg_queues_limit_max = HARD_QUEUESMAX;
+
static int msg_max_limit_min = MIN_MSGMAX;
-static int msg_max_limit_max = MAX_MSGMAX;
+static int msg_max_limit_max = HARD_MSGMAX;
static int msg_maxsize_limit_min = MIN_MSGSIZEMAX;
-static int msg_maxsize_limit_max = MAX_MSGSIZEMAX;
+static int msg_maxsize_limit_max = HARD_MSGSIZEMAX;
static ctl_table mq_sysctls[] = {
{
@@ -68,7 +51,9 @@ static ctl_table mq_sysctls[] = {
.data = &init_ipc_ns.mq_queues_max,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_mq_dointvec,
+ .proc_handler = proc_mq_dointvec_minmax,
+ .extra1 = &msg_queues_limit_min,
+ .extra2 = &msg_queues_limit_max,
},
{
.procname = "msg_max",
@@ -88,6 +73,24 @@ static ctl_table mq_sysctls[] = {
.extra1 = &msg_maxsize_limit_min,
.extra2 = &msg_maxsize_limit_max,
},
+ {
+ .procname = "msg_default",
+ .data = &init_ipc_ns.mq_msg_default,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_mq_dointvec_minmax,
+ .extra1 = &msg_max_limit_min,
+ .extra2 = &msg_max_limit_max,
+ },
+ {
+ .procname = "msgsize_default",
+ .data = &init_ipc_ns.mq_msgsize_default,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_mq_dointvec_minmax,
+ .extra1 = &msg_maxsize_limit_min,
+ .extra2 = &msg_maxsize_limit_max,
+ },
{}
};
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index b6a0d46fbad7..8ce57691e7b6 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -24,6 +24,7 @@
#include <linux/mqueue.h>
#include <linux/msg.h>
#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
#include <linux/netlink.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
@@ -49,6 +50,12 @@
#define STATE_PENDING 1
#define STATE_READY 2
+struct posix_msg_tree_node {
+ struct rb_node rb_node;
+ struct list_head msg_list;
+ int priority;
+};
+
struct ext_wait_queue { /* queue of sleeping tasks */
struct task_struct *task;
struct list_head list;
@@ -61,7 +68,8 @@ struct mqueue_inode_info {
struct inode vfs_inode;
wait_queue_head_t wait_q;
- struct msg_msg **messages;
+ struct rb_root msg_tree;
+ struct posix_msg_tree_node *node_cache;
struct mq_attr attr;
struct sigevent notify;
@@ -109,6 +117,103 @@ static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
return ns;
}
+/* Auxiliary functions to manipulate messages' list */
+static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
+{
+ struct rb_node **p, *parent = NULL;
+ struct posix_msg_tree_node *leaf;
+
+ p = &info->msg_tree.rb_node;
+ while (*p) {
+ parent = *p;
+ leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
+
+ if (likely(leaf->priority == msg->m_type))
+ goto insert_msg;
+ else if (msg->m_type < leaf->priority)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ if (info->node_cache) {
+ leaf = info->node_cache;
+ info->node_cache = NULL;
+ } else {
+ leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
+ if (!leaf)
+ return -ENOMEM;
+ rb_init_node(&leaf->rb_node);
+ INIT_LIST_HEAD(&leaf->msg_list);
+ info->qsize += sizeof(*leaf);
+ }
+ leaf->priority = msg->m_type;
+ rb_link_node(&leaf->rb_node, parent, p);
+ rb_insert_color(&leaf->rb_node, &info->msg_tree);
+insert_msg:
+ info->attr.mq_curmsgs++;
+ info->qsize += msg->m_ts;
+ list_add_tail(&msg->m_list, &leaf->msg_list);
+ return 0;
+}
+
+static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
+{
+ struct rb_node **p, *parent = NULL;
+ struct posix_msg_tree_node *leaf;
+ struct msg_msg *msg;
+
+try_again:
+ p = &info->msg_tree.rb_node;
+ while (*p) {
+ parent = *p;
+ /*
+ * During insert, low priorities go to the left and high to the
+ * right. On receive, we want the highest priorities first, so
+ * walk all the way to the right.
+ */
+ p = &(*p)->rb_right;
+ }
+ if (!parent) {
+ if (info->attr.mq_curmsgs) {
+ pr_warn_once("Inconsistency in POSIX message queue, "
+ "no tree element, but supposedly messages "
+ "should exist!\n");
+ info->attr.mq_curmsgs = 0;
+ }
+ return NULL;
+ }
+ leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
+ if (unlikely(list_empty(&leaf->msg_list))) {
+ pr_warn_once("Inconsistency in POSIX message queue, "
+ "empty leaf node but we haven't implemented "
+ "lazy leaf delete!\n");
+ rb_erase(&leaf->rb_node, &info->msg_tree);
+ if (info->node_cache) {
+ info->qsize -= sizeof(*leaf);
+ kfree(leaf);
+ } else {
+ info->node_cache = leaf;
+ }
+ goto try_again;
+ } else {
+ msg = list_first_entry(&leaf->msg_list,
+ struct msg_msg, m_list);
+ list_del(&msg->m_list);
+ if (list_empty(&leaf->msg_list)) {
+ rb_erase(&leaf->rb_node, &info->msg_tree);
+ if (info->node_cache) {
+ info->qsize -= sizeof(*leaf);
+ kfree(leaf);
+ } else {
+ info->node_cache = leaf;
+ }
+ }
+ }
+ info->attr.mq_curmsgs--;
+ info->qsize -= msg->m_ts;
+ return msg;
+}
+
static struct inode *mqueue_get_inode(struct super_block *sb,
struct ipc_namespace *ipc_ns, umode_t mode,
struct mq_attr *attr)
@@ -129,7 +234,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
if (S_ISREG(mode)) {
struct mqueue_inode_info *info;
- unsigned long mq_bytes, mq_msg_tblsz;
+ unsigned long mq_bytes, mq_treesize;
inode->i_fop = &mqueue_file_operations;
inode->i_size = FILENT_SIZE;
@@ -143,20 +248,36 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
info->notify_user_ns = NULL;
info->qsize = 0;
info->user = NULL; /* set when all is ok */
+ info->msg_tree = RB_ROOT;
+ info->node_cache = NULL;
memset(&info->attr, 0, sizeof(info->attr));
- info->attr.mq_maxmsg = ipc_ns->mq_msg_max;
- info->attr.mq_msgsize = ipc_ns->mq_msgsize_max;
+ info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
+ ipc_ns->mq_msg_default);
+ info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
+ ipc_ns->mq_msgsize_default);
if (attr) {
info->attr.mq_maxmsg = attr->mq_maxmsg;
info->attr.mq_msgsize = attr->mq_msgsize;
}
- mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
- info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
- if (!info->messages)
- goto out_inode;
+ /*
+ * We used to allocate a static array of pointers and account
+ * the size of that array as well as one msg_msg struct per
+ * possible message into the queue size. That's no longer
+ * accurate as the queue is now an rbtree and will grow and
+ * shrink depending on usage patterns. We can, however, still
+ * account one msg_msg struct per message, but the nodes are
+ * allocated depending on priority usage, and most programs
+ * only use one, or a handful, of priorities. However, since
+ * this is pinned memory, we need to assume worst case, so
+ * that means the min(mq_maxmsg, max_priorities) * struct
+ * posix_msg_tree_node.
+ */
+ mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
+ min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
+ sizeof(struct posix_msg_tree_node);
- mq_bytes = (mq_msg_tblsz +
- (info->attr.mq_maxmsg * info->attr.mq_msgsize));
+ mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
+ info->attr.mq_msgsize);
spin_lock(&mq_lock);
if (u->mq_bytes + mq_bytes < u->mq_bytes ||
@@ -247,11 +368,11 @@ static void mqueue_evict_inode(struct inode *inode)
{
struct mqueue_inode_info *info;
struct user_struct *user;
- unsigned long mq_bytes;
- int i;
+ unsigned long mq_bytes, mq_treesize;
struct ipc_namespace *ipc_ns;
+ struct msg_msg *msg;
- end_writeback(inode);
+ clear_inode(inode);
if (S_ISDIR(inode->i_mode))
return;
@@ -259,14 +380,19 @@ static void mqueue_evict_inode(struct inode *inode)
ipc_ns = get_ns_from_inode(inode);
info = MQUEUE_I(inode);
spin_lock(&info->lock);
- for (i = 0; i < info->attr.mq_curmsgs; i++)
- free_msg(info->messages[i]);
- kfree(info->messages);
+ while ((msg = msg_get(info)) != NULL)
+ free_msg(msg);
+ kfree(info->node_cache);
spin_unlock(&info->lock);
/* Total amount of bytes accounted for the mqueue */
- mq_bytes = info->attr.mq_maxmsg * (sizeof(struct msg_msg *)
- + info->attr.mq_msgsize);
+ mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
+ min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
+ sizeof(struct posix_msg_tree_node);
+
+ mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
+ info->attr.mq_msgsize);
+
user = info->user;
if (user) {
spin_lock(&mq_lock);
@@ -300,8 +426,9 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry,
error = -EACCES;
goto out_unlock;
}
- if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
- !capable(CAP_SYS_RESOURCE)) {
+ if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX ||
+ (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
+ !capable(CAP_SYS_RESOURCE))) {
error = -ENOSPC;
goto out_unlock;
}
@@ -485,26 +612,6 @@ static struct ext_wait_queue *wq_get_first_waiter(
return list_entry(ptr, struct ext_wait_queue, list);
}
-/* Auxiliary functions to manipulate messages' list */
-static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info)
-{
- int k;
-
- k = info->attr.mq_curmsgs - 1;
- while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
- info->messages[k + 1] = info->messages[k];
- k--;
- }
- info->attr.mq_curmsgs++;
- info->qsize += ptr->m_ts;
- info->messages[k + 1] = ptr;
-}
-
-static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
-{
- info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts;
- return info->messages[info->attr.mq_curmsgs];
-}
static inline void set_cookie(struct sk_buff *skb, char code)
{
@@ -585,24 +692,30 @@ static void remove_notification(struct mqueue_inode_info *info)
static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
{
+ int mq_treesize;
+ unsigned long total_size;
+
if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
- return 0;
+ return -EINVAL;
if (capable(CAP_SYS_RESOURCE)) {
- if (attr->mq_maxmsg > HARD_MSGMAX)
- return 0;
+ if (attr->mq_maxmsg > HARD_MSGMAX ||
+ attr->mq_msgsize > HARD_MSGSIZEMAX)
+ return -EINVAL;
} else {
if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
attr->mq_msgsize > ipc_ns->mq_msgsize_max)
- return 0;
+ return -EINVAL;
}
/* check for overflow */
if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
- return 0;
- if ((unsigned long)(attr->mq_maxmsg * (attr->mq_msgsize
- + sizeof (struct msg_msg *))) <
- (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
- return 0;
- return 1;
+ return -EOVERFLOW;
+ mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
+ min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
+ sizeof(struct posix_msg_tree_node);
+ total_size = attr->mq_maxmsg * attr->mq_msgsize;
+ if (total_size + mq_treesize < total_size)
+ return -EOVERFLOW;
+ return 0;
}
/*
@@ -617,12 +730,21 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
int ret;
if (attr) {
- if (!mq_attr_ok(ipc_ns, attr)) {
- ret = -EINVAL;
+ ret = mq_attr_ok(ipc_ns, attr);
+ if (ret)
goto out;
- }
/* store for use during create */
dentry->d_fsdata = attr;
+ } else {
+ struct mq_attr def_attr;
+
+ def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
+ ipc_ns->mq_msg_default);
+ def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
+ ipc_ns->mq_msgsize_default);
+ ret = mq_attr_ok(ipc_ns, &def_attr);
+ if (ret)
+ goto out;
}
mode &= ~current_umask();
@@ -837,7 +959,8 @@ static inline void pipelined_receive(struct mqueue_inode_info *info)
wake_up_interruptible(&info->wait_q);
return;
}
- msg_insert(sender->msg, info);
+ if (msg_insert(sender->msg, info))
+ return;
list_del(&sender->list);
sender->state = STATE_PENDING;
wake_up_process(sender->task);
@@ -857,7 +980,8 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
struct mqueue_inode_info *info;
ktime_t expires, *timeout = NULL;
struct timespec ts;
- int ret;
+ struct posix_msg_tree_node *new_leaf = NULL;
+ int ret = 0;
if (u_abs_timeout) {
int res = prepare_timeout(u_abs_timeout, &expires, &ts);
@@ -905,34 +1029,60 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
msg_ptr->m_ts = msg_len;
msg_ptr->m_type = msg_prio;
+ /*
+ * msg_insert really wants us to have a valid, spare node struct so
+ * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
+ * fall back to that if necessary.
+ */
+ if (!info->node_cache)
+ new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
+
spin_lock(&info->lock);
+ if (!info->node_cache && new_leaf) {
+ /* Save our speculative allocation into the cache */
+ rb_init_node(&new_leaf->rb_node);
+ INIT_LIST_HEAD(&new_leaf->msg_list);
+ info->node_cache = new_leaf;
+ info->qsize += sizeof(*new_leaf);
+ new_leaf = NULL;
+ } else {
+ kfree(new_leaf);
+ }
+
if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
if (filp->f_flags & O_NONBLOCK) {
- spin_unlock(&info->lock);
ret = -EAGAIN;
} else {
wait.task = current;
wait.msg = (void *) msg_ptr;
wait.state = STATE_NONE;
ret = wq_sleep(info, SEND, timeout, &wait);
+ /*
+ * wq_sleep must be called with info->lock held, and
+ * returns with the lock released
+ */
+ goto out_free;
}
- if (ret < 0)
- free_msg(msg_ptr);
} else {
receiver = wq_get_first_waiter(info, RECV);
if (receiver) {
pipelined_send(info, msg_ptr, receiver);
} else {
/* adds message to the queue */
- msg_insert(msg_ptr, info);
+ ret = msg_insert(msg_ptr, info);
+ if (ret)
+ goto out_unlock;
__do_notify(info);
}
inode->i_atime = inode->i_mtime = inode->i_ctime =
CURRENT_TIME;
- spin_unlock(&info->lock);
- ret = 0;
}
+out_unlock:
+ spin_unlock(&info->lock);
+out_free:
+ if (ret)
+ free_msg(msg_ptr);
out_fput:
fput(filp);
out:
@@ -951,6 +1101,7 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
struct ext_wait_queue wait;
ktime_t expires, *timeout = NULL;
struct timespec ts;
+ struct posix_msg_tree_node *new_leaf = NULL;
if (u_abs_timeout) {
int res = prepare_timeout(u_abs_timeout, &expires, &ts);
@@ -986,7 +1137,26 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
goto out_fput;
}
+ /*
+ * msg_insert really wants us to have a valid, spare node struct so
+ * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
+ * fall back to that if necessary.
+ */
+ if (!info->node_cache)
+ new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
+
spin_lock(&info->lock);
+
+ if (!info->node_cache && new_leaf) {
+ /* Save our speculative allocation into the cache */
+ rb_init_node(&new_leaf->rb_node);
+ INIT_LIST_HEAD(&new_leaf->msg_list);
+ info->node_cache = new_leaf;
+ info->qsize += sizeof(*new_leaf);
+ } else {
+ kfree(new_leaf);
+ }
+
if (info->attr.mq_curmsgs == 0) {
if (filp->f_flags & O_NONBLOCK) {
spin_unlock(&info->lock);
@@ -1251,6 +1421,8 @@ int mq_init_ns(struct ipc_namespace *ns)
ns->mq_queues_max = DFLT_QUEUESMAX;
ns->mq_msg_max = DFLT_MSGMAX;
ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
+ ns->mq_msg_default = DFLT_MSG;
+ ns->mq_msgsize_default = DFLT_MSGSIZE;
ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
if (IS_ERR(ns->mq_mnt)) {
diff --git a/kernel/Makefile b/kernel/Makefile
index bf1034008aca..6f3d0ae044b2 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -25,6 +25,9 @@ endif
obj-y += sched/
obj-y += power/
+ifeq ($(CONFIG_CHECKPOINT_RESTORE),y)
+obj-$(CONFIG_X86) += kcmp.o
+endif
obj-$(CONFIG_FREEZER) += freezer.o
obj-$(CONFIG_PROFILING) += profile.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a0c6af34d500..0f3527d6184a 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5132,7 +5132,7 @@ EXPORT_SYMBOL_GPL(css_depth);
* @root: the css supporsed to be an ancestor of the child.
*
* Returns true if "root" is an ancestor of "child" in its hierarchy. Because
- * this function reads css->id, this use rcu_dereference() and rcu_read_lock().
+ * this function reads css->id, the caller must hold rcu_read_lock().
* But, considering usual usage, the csses should be valid objects after test.
* Assuming that the caller will do some action to the child if this returns
* returns true, the caller must take "child";s reference count.
@@ -5144,18 +5144,18 @@ bool css_is_ancestor(struct cgroup_subsys_state *child,
{
struct css_id *child_id;
struct css_id *root_id;
- bool ret = true;
- rcu_read_lock();
child_id = rcu_dereference(child->id);
+ if (!child_id)
+ return false;
root_id = rcu_dereference(root->id);
- if (!child_id
- || !root_id
- || (child_id->depth < root_id->depth)
- || (child_id->stack[root_id->depth] != root_id->id))
- ret = false;
- rcu_read_unlock();
- return ret;
+ if (!root_id)
+ return false;
+ if (child_id->depth < root_id->depth)
+ return false;
+ if (child_id->stack[root_id->depth] != root_id->id)
+ return false;
+ return true;
}
void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 0e6353cf147a..a4eb5227a19e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -10,7 +10,10 @@
#include <linux/sched.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
+#include <linux/oom.h>
+#include <linux/rcupdate.h>
#include <linux/export.h>
+#include <linux/bug.h>
#include <linux/kthread.h>
#include <linux/stop_machine.h>
#include <linux/mutex.h>
@@ -173,6 +176,47 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_cpu_notifier);
+/**
+ * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
+ * @cpu: a CPU id
+ *
+ * This function walks all processes, finds a valid mm struct for each one and
+ * then clears a corresponding bit in mm's cpumask. While this all sounds
+ * trivial, there are various non-obvious corner cases, which this function
+ * tries to solve in a safe manner.
+ *
+ * Also note that the function uses a somewhat relaxed locking scheme, so it may
+ * be called only for an already offlined CPU.
+ */
+void clear_tasks_mm_cpumask(int cpu)
+{
+ struct task_struct *p;
+
+ /*
+ * This function is called after the cpu is taken down and marked
+ * offline, so its not like new tasks will ever get this cpu set in
+ * their mm mask. -- Peter Zijlstra
+ * Thus, we may use rcu_read_lock() here, instead of grabbing
+ * full-fledged tasklist_lock.
+ */
+ WARN_ON(cpu_online(cpu));
+ rcu_read_lock();
+ for_each_process(p) {
+ struct task_struct *t;
+
+ /*
+ * Main thread might exit, but other threads may still have
+ * a valid mm. Find one.
+ */
+ t = find_lock_task_mm(p);
+ if (!t)
+ continue;
+ cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
+ task_unlock(t);
+ }
+ rcu_read_unlock();
+}
+
static inline void check_for_tasks(int cpu)
{
struct task_struct *p;
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
index 249152e15308..9656a3c36503 100644
--- a/kernel/cpu_pm.c
+++ b/kernel/cpu_pm.c
@@ -81,7 +81,7 @@ int cpu_pm_unregister_notifier(struct notifier_block *nb)
EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
/**
- * cpm_pm_enter - CPU low power entry notifier
+ * cpu_pm_enter - CPU low power entry notifier
*
* Notifies listeners that a single CPU is entering a low power state that may
* cause some blocks in the same power domain as the cpu to reset.
@@ -89,7 +89,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
* Must be called on the affected CPU with interrupts disabled. Platform is
* responsible for ensuring that cpu_pm_enter is not called twice on the same
* CPU before cpu_pm_exit is called. Notified drivers can include VFP
- * co-processor, interrupt controller and it's PM extensions, local CPU
+ * co-processor, interrupt controller and its PM extensions, local CPU
* timers context save/restore which shouldn't be interrupted. Hence it
* must be called with interrupts disabled.
*
@@ -115,13 +115,13 @@ int cpu_pm_enter(void)
EXPORT_SYMBOL_GPL(cpu_pm_enter);
/**
- * cpm_pm_exit - CPU low power exit notifier
+ * cpu_pm_exit - CPU low power exit notifier
*
* Notifies listeners that a single CPU is exiting a low power state that may
* have caused some blocks in the same power domain as the cpu to reset.
*
* Notified drivers can include VFP co-processor, interrupt controller
- * and it's PM extensions, local CPU timers context save/restore which
+ * and its PM extensions, local CPU timers context save/restore which
* shouldn't be interrupted. Hence it must be called with interrupts disabled.
*
* Return conditions are same as __raw_notifier_call_chain.
@@ -139,7 +139,7 @@ int cpu_pm_exit(void)
EXPORT_SYMBOL_GPL(cpu_pm_exit);
/**
- * cpm_cluster_pm_enter - CPU cluster low power entry notifier
+ * cpu_cluster_pm_enter - CPU cluster low power entry notifier
*
* Notifies listeners that all cpus in a power domain are entering a low power
* state that may cause some blocks in the same power domain to reset.
@@ -147,7 +147,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit);
* Must be called after cpu_pm_enter has been called on all cpus in the power
* domain, and before cpu_pm_exit has been called on any cpu in the power
* domain. Notified drivers can include VFP co-processor, interrupt controller
- * and it's PM extensions, local CPU timers context save/restore which
+ * and its PM extensions, local CPU timers context save/restore which
* shouldn't be interrupted. Hence it must be called with interrupts disabled.
*
* Must be called with interrupts disabled.
@@ -174,7 +174,7 @@ int cpu_cluster_pm_enter(void)
EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
/**
- * cpm_cluster_pm_exit - CPU cluster low power exit notifier
+ * cpu_cluster_pm_exit - CPU cluster low power exit notifier
*
* Notifies listeners that all cpus in a power domain are exiting form a
* low power state that may have caused some blocks in the same power domain
@@ -183,7 +183,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
* Must be called after cpu_pm_exit has been called on all cpus in the power
* domain, and before cpu_pm_exit has been called on any cpu in the power
* domain. Notified drivers can include VFP co-processor, interrupt controller
- * and it's PM extensions, local CPU timers context save/restore which
+ * and its PM extensions, local CPU timers context save/restore which
* shouldn't be interrupted. Hence it must be called with interrupts disabled.
*
* Return conditions are same as __raw_notifier_call_chain.
diff --git a/kernel/events/Makefile b/kernel/events/Makefile
index 22d901f9caf4..103f5d147b2f 100644
--- a/kernel/events/Makefile
+++ b/kernel/events/Makefile
@@ -3,4 +3,7 @@ CFLAGS_REMOVE_core.o = -pg
endif
obj-y := core.o ring_buffer.o callchain.o
+
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_UPROBES) += uprobes.o
+
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
new file mode 100644
index 000000000000..985be4d80fe8
--- /dev/null
+++ b/kernel/events/uprobes.c
@@ -0,0 +1,1667 @@
+/*
+ * User-space Probes (UProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2008-2012
+ * Authors:
+ * Srikar Dronamraju
+ * Jim Keniston
+ * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h> /* read_mapping_page */
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/rmap.h> /* anon_vma_prepare */
+#include <linux/mmu_notifier.h> /* set_pte_at_notify */
+#include <linux/swap.h> /* try_to_free_swap */
+#include <linux/ptrace.h> /* user_enable_single_step */
+#include <linux/kdebug.h> /* notifier mechanism */
+
+#include <linux/uprobes.h>
+
+#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
+#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
+
+static struct srcu_struct uprobes_srcu;
+static struct rb_root uprobes_tree = RB_ROOT;
+
+static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
+
+#define UPROBES_HASH_SZ 13
+
+/* serialize (un)register */
+static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
+
+#define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
+
+/* serialize uprobe->pending_list */
+static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
+#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
+
+/*
+ * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
+ * events active at this time. Probably a fine grained per inode count is
+ * better?
+ */
+static atomic_t uprobe_events = ATOMIC_INIT(0);
+
+/*
+ * Maintain a temporary per vma info that can be used to search if a vma
+ * has already been handled. This structure is introduced since extending
+ * vm_area_struct wasnt recommended.
+ */
+struct vma_info {
+ struct list_head probe_list;
+ struct mm_struct *mm;
+ loff_t vaddr;
+};
+
+struct uprobe {
+ struct rb_node rb_node; /* node in the rb tree */
+ atomic_t ref;
+ struct rw_semaphore consumer_rwsem;
+ struct list_head pending_list;
+ struct uprobe_consumer *consumers;
+ struct inode *inode; /* Also hold a ref to inode */
+ loff_t offset;
+ int flags;
+ struct arch_uprobe arch;
+};
+
+/*
+ * valid_vma: Verify if the specified vma is an executable vma
+ * Relax restrictions while unregistering: vm_flags might have
+ * changed after breakpoint was inserted.
+ * - is_register: indicates if we are in register context.
+ * - Return 1 if the specified virtual address is in an
+ * executable vma.
+ */
+static bool valid_vma(struct vm_area_struct *vma, bool is_register)
+{
+ if (!vma->vm_file)
+ return false;
+
+ if (!is_register)
+ return true;
+
+ if ((vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) == (VM_READ|VM_EXEC))
+ return true;
+
+ return false;
+}
+
+static loff_t vma_address(struct vm_area_struct *vma, loff_t offset)
+{
+ loff_t vaddr;
+
+ vaddr = vma->vm_start + offset;
+ vaddr -= vma->vm_pgoff << PAGE_SHIFT;
+
+ return vaddr;
+}
+
+/**
+ * __replace_page - replace page in vma by new page.
+ * based on replace_page in mm/ksm.c
+ *
+ * @vma: vma that holds the pte pointing to page
+ * @page: the cowed page we are replacing by kpage
+ * @kpage: the modified page we replace page by
+ *
+ * Returns 0 on success, -EFAULT on failure.
+ */
+static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep;
+ spinlock_t *ptl;
+ unsigned long addr;
+ int err = -EFAULT;
+
+ addr = page_address_in_vma(page, vma);
+ if (addr == -EFAULT)
+ goto out;
+
+ pgd = pgd_offset(mm, addr);
+ if (!pgd_present(*pgd))
+ goto out;
+
+ pud = pud_offset(pgd, addr);
+ if (!pud_present(*pud))
+ goto out;
+
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd))
+ goto out;
+
+ ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ if (!ptep)
+ goto out;
+
+ get_page(kpage);
+ page_add_new_anon_rmap(kpage, vma, addr);
+
+ if (!PageAnon(page)) {
+ dec_mm_counter(mm, MM_FILEPAGES);
+ inc_mm_counter(mm, MM_ANONPAGES);
+ }
+
+ flush_cache_page(vma, addr, pte_pfn(*ptep));
+ ptep_clear_flush(vma, addr, ptep);
+ set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
+
+ page_remove_rmap(page);
+ if (!page_mapped(page))
+ try_to_free_swap(page);
+ put_page(page);
+ pte_unmap_unlock(ptep, ptl);
+ err = 0;
+
+out:
+ return err;
+}
+
+/**
+ * is_swbp_insn - check if instruction is breakpoint instruction.
+ * @insn: instruction to be checked.
+ * Default implementation of is_swbp_insn
+ * Returns true if @insn is a breakpoint instruction.
+ */
+bool __weak is_swbp_insn(uprobe_opcode_t *insn)
+{
+ return *insn == UPROBE_SWBP_INSN;
+}
+
+/*
+ * NOTE:
+ * Expect the breakpoint instruction to be the smallest size instruction for
+ * the architecture. If an arch has variable length instruction and the
+ * breakpoint instruction is not of the smallest length instruction
+ * supported by that architecture then we need to modify read_opcode /
+ * write_opcode accordingly. This would never be a problem for archs that
+ * have fixed length instructions.
+ */
+
+/*
+ * write_opcode - write the opcode at a given virtual address.
+ * @auprobe: arch breakpointing information.
+ * @mm: the probed process address space.
+ * @vaddr: the virtual address to store the opcode.
+ * @opcode: opcode to be written at @vaddr.
+ *
+ * Called with mm->mmap_sem held (for read and with a reference to
+ * mm).
+ *
+ * For mm @mm, write the opcode at @vaddr.
+ * Return 0 (success) or a negative errno.
+ */
+static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ unsigned long vaddr, uprobe_opcode_t opcode)
+{
+ struct page *old_page, *new_page;
+ struct address_space *mapping;
+ void *vaddr_old, *vaddr_new;
+ struct vm_area_struct *vma;
+ struct uprobe *uprobe;
+ loff_t addr;
+ int ret;
+
+ /* Read the page with vaddr into memory */
+ ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
+ if (ret <= 0)
+ return ret;
+
+ ret = -EINVAL;
+
+ /*
+ * We are interested in text pages only. Our pages of interest
+ * should be mapped for read and execute only. We desist from
+ * adding probes in write mapped pages since the breakpoints
+ * might end up in the file copy.
+ */
+ if (!valid_vma(vma, is_swbp_insn(&opcode)))
+ goto put_out;
+
+ uprobe = container_of(auprobe, struct uprobe, arch);
+ mapping = uprobe->inode->i_mapping;
+ if (mapping != vma->vm_file->f_mapping)
+ goto put_out;
+
+ addr = vma_address(vma, uprobe->offset);
+ if (vaddr != (unsigned long)addr)
+ goto put_out;
+
+ ret = -ENOMEM;
+ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
+ if (!new_page)
+ goto put_out;
+
+ __SetPageUptodate(new_page);
+
+ /*
+ * lock page will serialize against do_wp_page()'s
+ * PageAnon() handling
+ */
+ lock_page(old_page);
+ /* copy the page now that we've got it stable */
+ vaddr_old = kmap_atomic(old_page);
+ vaddr_new = kmap_atomic(new_page);
+
+ memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
+
+ /* poke the new insn in, ASSUMES we don't cross page boundary */
+ vaddr &= ~PAGE_MASK;
+ BUG_ON(vaddr + UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
+ memcpy(vaddr_new + vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
+
+ kunmap_atomic(vaddr_new);
+ kunmap_atomic(vaddr_old);
+
+ ret = anon_vma_prepare(vma);
+ if (ret)
+ goto unlock_out;
+
+ lock_page(new_page);
+ ret = __replace_page(vma, old_page, new_page);
+ unlock_page(new_page);
+
+unlock_out:
+ unlock_page(old_page);
+ page_cache_release(new_page);
+
+put_out:
+ put_page(old_page);
+
+ return ret;
+}
+
+/**
+ * read_opcode - read the opcode at a given virtual address.
+ * @mm: the probed process address space.
+ * @vaddr: the virtual address to read the opcode.
+ * @opcode: location to store the read opcode.
+ *
+ * Called with mm->mmap_sem held (for read and with a reference to
+ * mm.
+ *
+ * For mm @mm, read the opcode at @vaddr and store it in @opcode.
+ * Return 0 (success) or a negative errno.
+ */
+static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode)
+{
+ struct page *page;
+ void *vaddr_new;
+ int ret;
+
+ ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &page, NULL);
+ if (ret <= 0)
+ return ret;
+
+ lock_page(page);
+ vaddr_new = kmap_atomic(page);
+ vaddr &= ~PAGE_MASK;
+ memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE);
+ kunmap_atomic(vaddr_new);
+ unlock_page(page);
+
+ put_page(page);
+
+ return 0;
+}
+
+static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
+{
+ uprobe_opcode_t opcode;
+ int result;
+
+ result = read_opcode(mm, vaddr, &opcode);
+ if (result)
+ return result;
+
+ if (is_swbp_insn(&opcode))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * set_swbp - store breakpoint at a given address.
+ * @auprobe: arch specific probepoint information.
+ * @mm: the probed process address space.
+ * @vaddr: the virtual address to insert the opcode.
+ *
+ * For mm @mm, store the breakpoint instruction at @vaddr.
+ * Return 0 (success) or a negative errno.
+ */
+int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
+{
+ int result;
+
+ result = is_swbp_at_addr(mm, vaddr);
+ if (result == 1)
+ return -EEXIST;
+
+ if (result)
+ return result;
+
+ return write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
+}
+
+/**
+ * set_orig_insn - Restore the original instruction.
+ * @mm: the probed process address space.
+ * @auprobe: arch specific probepoint information.
+ * @vaddr: the virtual address to insert the opcode.
+ * @verify: if true, verify existance of breakpoint instruction.
+ *
+ * For mm @mm, restore the original opcode (opcode) at @vaddr.
+ * Return 0 (success) or a negative errno.
+ */
+int __weak
+set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, bool verify)
+{
+ if (verify) {
+ int result;
+
+ result = is_swbp_at_addr(mm, vaddr);
+ if (!result)
+ return -EINVAL;
+
+ if (result != 1)
+ return result;
+ }
+ return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
+}
+
+static int match_uprobe(struct uprobe *l, struct uprobe *r)
+{
+ if (l->inode < r->inode)
+ return -1;
+
+ if (l->inode > r->inode)
+ return 1;
+
+ if (l->offset < r->offset)
+ return -1;
+
+ if (l->offset > r->offset)
+ return 1;
+
+ return 0;
+}
+
+static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
+{
+ struct uprobe u = { .inode = inode, .offset = offset };
+ struct rb_node *n = uprobes_tree.rb_node;
+ struct uprobe *uprobe;
+ int match;
+
+ while (n) {
+ uprobe = rb_entry(n, struct uprobe, rb_node);
+ match = match_uprobe(&u, uprobe);
+ if (!match) {
+ atomic_inc(&uprobe->ref);
+ return uprobe;
+ }
+
+ if (match < 0)
+ n = n->rb_left;
+ else
+ n = n->rb_right;
+ }
+ return NULL;
+}
+
+/*
+ * Find a uprobe corresponding to a given inode:offset
+ * Acquires uprobes_treelock
+ */
+static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
+{
+ struct uprobe *uprobe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&uprobes_treelock, flags);
+ uprobe = __find_uprobe(inode, offset);
+ spin_unlock_irqrestore(&uprobes_treelock, flags);
+
+ return uprobe;
+}
+
+static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
+{
+ struct rb_node **p = &uprobes_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct uprobe *u;
+ int match;
+
+ while (*p) {
+ parent = *p;
+ u = rb_entry(parent, struct uprobe, rb_node);
+ match = match_uprobe(uprobe, u);
+ if (!match) {
+ atomic_inc(&u->ref);
+ return u;
+ }
+
+ if (match < 0)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+
+ }
+
+ u = NULL;
+ rb_link_node(&uprobe->rb_node, parent, p);
+ rb_insert_color(&uprobe->rb_node, &uprobes_tree);
+ /* get access + creation ref */
+ atomic_set(&uprobe->ref, 2);
+
+ return u;
+}
+
+/*
+ * Acquire uprobes_treelock.
+ * Matching uprobe already exists in rbtree;
+ * increment (access refcount) and return the matching uprobe.
+ *
+ * No matching uprobe; insert the uprobe in rb_tree;
+ * get a double refcount (access + creation) and return NULL.
+ */
+static struct uprobe *insert_uprobe(struct uprobe *uprobe)
+{
+ unsigned long flags;
+ struct uprobe *u;
+
+ spin_lock_irqsave(&uprobes_treelock, flags);
+ u = __insert_uprobe(uprobe);
+ spin_unlock_irqrestore(&uprobes_treelock, flags);
+
+ /* For now assume that the instruction need not be single-stepped */
+ uprobe->flags |= UPROBE_SKIP_SSTEP;
+
+ return u;
+}
+
+static void put_uprobe(struct uprobe *uprobe)
+{
+ if (atomic_dec_and_test(&uprobe->ref))
+ kfree(uprobe);
+}
+
+static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
+{
+ struct uprobe *uprobe, *cur_uprobe;
+
+ uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
+ if (!uprobe)
+ return NULL;
+
+ uprobe->inode = igrab(inode);
+ uprobe->offset = offset;
+ init_rwsem(&uprobe->consumer_rwsem);
+ INIT_LIST_HEAD(&uprobe->pending_list);
+
+ /* add to uprobes_tree, sorted on inode:offset */
+ cur_uprobe = insert_uprobe(uprobe);
+
+ /* a uprobe exists for this inode:offset combination */
+ if (cur_uprobe) {
+ kfree(uprobe);
+ uprobe = cur_uprobe;
+ iput(inode);
+ } else {
+ atomic_inc(&uprobe_events);
+ }
+
+ return uprobe;
+}
+
+static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
+{
+ struct uprobe_consumer *uc;
+
+ if (!(uprobe->flags & UPROBE_RUN_HANDLER))
+ return;
+
+ down_read(&uprobe->consumer_rwsem);
+ for (uc = uprobe->consumers; uc; uc = uc->next) {
+ if (!uc->filter || uc->filter(uc, current))
+ uc->handler(uc, regs);
+ }
+ up_read(&uprobe->consumer_rwsem);
+}
+
+/* Returns the previous consumer */
+static struct uprobe_consumer *
+consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
+{
+ down_write(&uprobe->consumer_rwsem);
+ uc->next = uprobe->consumers;
+ uprobe->consumers = uc;
+ up_write(&uprobe->consumer_rwsem);
+
+ return uc->next;
+}
+
+/*
+ * For uprobe @uprobe, delete the consumer @uc.
+ * Return true if the @uc is deleted successfully
+ * or return false.
+ */
+static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
+{
+ struct uprobe_consumer **con;
+ bool ret = false;
+
+ down_write(&uprobe->consumer_rwsem);
+ for (con = &uprobe->consumers; *con; con = &(*con)->next) {
+ if (*con == uc) {
+ *con = uc->next;
+ ret = true;
+ break;
+ }
+ }
+ up_write(&uprobe->consumer_rwsem);
+
+ return ret;
+}
+
+static int
+__copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *insn,
+ unsigned long nbytes, unsigned long offset)
+{
+ struct file *filp = vma->vm_file;
+ struct page *page;
+ void *vaddr;
+ unsigned long off1;
+ unsigned long idx;
+
+ if (!filp)
+ return -EINVAL;
+
+ idx = (unsigned long)(offset >> PAGE_CACHE_SHIFT);
+ off1 = offset &= ~PAGE_MASK;
+
+ /*
+ * Ensure that the page that has the original instruction is
+ * populated and in page-cache.
+ */
+ page = read_mapping_page(mapping, idx, filp);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ vaddr = kmap_atomic(page);
+ memcpy(insn, vaddr + off1, nbytes);
+ kunmap_atomic(vaddr);
+ page_cache_release(page);
+
+ return 0;
+}
+
+static int
+copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
+{
+ struct address_space *mapping;
+ unsigned long nbytes;
+ int bytes;
+
+ addr &= ~PAGE_MASK;
+ nbytes = PAGE_SIZE - addr;
+ mapping = uprobe->inode->i_mapping;
+
+ /* Instruction at end of binary; copy only available bytes */
+ if (uprobe->offset + MAX_UINSN_BYTES > uprobe->inode->i_size)
+ bytes = uprobe->inode->i_size - uprobe->offset;
+ else
+ bytes = MAX_UINSN_BYTES;
+
+ /* Instruction at the page-boundary; copy bytes in second page */
+ if (nbytes < bytes) {
+ if (__copy_insn(mapping, vma, uprobe->arch.insn + nbytes,
+ bytes - nbytes, uprobe->offset + nbytes))
+ return -ENOMEM;
+
+ bytes = nbytes;
+ }
+ return __copy_insn(mapping, vma, uprobe->arch.insn, bytes, uprobe->offset);
+}
+
+/*
+ * How mm->uprobes_state.count gets updated
+ * uprobe_mmap() increments the count if
+ * - it successfully adds a breakpoint.
+ * - it cannot add a breakpoint, but sees that there is a underlying
+ * breakpoint (via a is_swbp_at_addr()).
+ *
+ * uprobe_munmap() decrements the count if
+ * - it sees a underlying breakpoint, (via is_swbp_at_addr)
+ * (Subsequent uprobe_unregister wouldnt find the breakpoint
+ * unless a uprobe_mmap kicks in, since the old vma would be
+ * dropped just after uprobe_munmap.)
+ *
+ * uprobe_register increments the count if:
+ * - it successfully adds a breakpoint.
+ *
+ * uprobe_unregister decrements the count if:
+ * - it sees a underlying breakpoint and removes successfully.
+ * (via is_swbp_at_addr)
+ * (Subsequent uprobe_munmap wouldnt find the breakpoint
+ * since there is no underlying breakpoint after the
+ * breakpoint removal.)
+ */
+static int
+install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
+ struct vm_area_struct *vma, loff_t vaddr)
+{
+ unsigned long addr;
+ int ret;
+
+ /*
+ * If probe is being deleted, unregister thread could be done with
+ * the vma-rmap-walk through. Adding a probe now can be fatal since
+ * nobody will be able to cleanup. Also we could be from fork or
+ * mremap path, where the probe might have already been inserted.
+ * Hence behave as if probe already existed.
+ */
+ if (!uprobe->consumers)
+ return -EEXIST;
+
+ addr = (unsigned long)vaddr;
+
+ if (!(uprobe->flags & UPROBE_COPY_INSN)) {
+ ret = copy_insn(uprobe, vma, addr);
+ if (ret)
+ return ret;
+
+ if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
+ return -EEXIST;
+
+ ret = arch_uprobe_analyze_insn(&uprobe->arch, mm);
+ if (ret)
+ return ret;
+
+ uprobe->flags |= UPROBE_COPY_INSN;
+ }
+
+ /*
+ * Ideally, should be updating the probe count after the breakpoint
+ * has been successfully inserted. However a thread could hit the
+ * breakpoint we just inserted even before the probe count is
+ * incremented. If this is the first breakpoint placed, breakpoint
+ * notifier might ignore uprobes and pass the trap to the thread.
+ * Hence increment before and decrement on failure.
+ */
+ atomic_inc(&mm->uprobes_state.count);
+ ret = set_swbp(&uprobe->arch, mm, addr);
+ if (ret)
+ atomic_dec(&mm->uprobes_state.count);
+
+ return ret;
+}
+
+static void
+remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr)
+{
+ if (!set_orig_insn(&uprobe->arch, mm, (unsigned long)vaddr, true))
+ atomic_dec(&mm->uprobes_state.count);
+}
+
+/*
+ * There could be threads that have hit the breakpoint and are entering the
+ * notifier code and trying to acquire the uprobes_treelock. The thread
+ * calling delete_uprobe() that is removing the uprobe from the rb_tree can
+ * race with these threads and might acquire the uprobes_treelock compared
+ * to some of the breakpoint hit threads. In such a case, the breakpoint
+ * hit threads will not find the uprobe. The current unregistering thread
+ * waits till all other threads have hit a breakpoint, to acquire the
+ * uprobes_treelock before the uprobe is removed from the rbtree.
+ */
+static void delete_uprobe(struct uprobe *uprobe)
+{
+ unsigned long flags;
+
+ synchronize_srcu(&uprobes_srcu);
+ spin_lock_irqsave(&uprobes_treelock, flags);
+ rb_erase(&uprobe->rb_node, &uprobes_tree);
+ spin_unlock_irqrestore(&uprobes_treelock, flags);
+ iput(uprobe->inode);
+ put_uprobe(uprobe);
+ atomic_dec(&uprobe_events);
+}
+
+static struct vma_info *
+__find_next_vma_info(struct address_space *mapping, struct list_head *head,
+ struct vma_info *vi, loff_t offset, bool is_register)
+{
+ struct prio_tree_iter iter;
+ struct vm_area_struct *vma;
+ struct vma_info *tmpvi;
+ unsigned long pgoff;
+ int existing_vma;
+ loff_t vaddr;
+
+ pgoff = offset >> PAGE_SHIFT;
+
+ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ if (!valid_vma(vma, is_register))
+ continue;
+
+ existing_vma = 0;
+ vaddr = vma_address(vma, offset);
+
+ list_for_each_entry(tmpvi, head, probe_list) {
+ if (tmpvi->mm == vma->vm_mm && tmpvi->vaddr == vaddr) {
+ existing_vma = 1;
+ break;
+ }
+ }
+
+ /*
+ * Another vma needs a probe to be installed. However skip
+ * installing the probe if the vma is about to be unlinked.
+ */
+ if (!existing_vma && atomic_inc_not_zero(&vma->vm_mm->mm_users)) {
+ vi->mm = vma->vm_mm;
+ vi->vaddr = vaddr;
+ list_add(&vi->probe_list, head);
+
+ return vi;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Iterate in the rmap prio tree and find a vma where a probe has not
+ * yet been inserted.
+ */
+static struct vma_info *
+find_next_vma_info(struct address_space *mapping, struct list_head *head,
+ loff_t offset, bool is_register)
+{
+ struct vma_info *vi, *retvi;
+
+ vi = kzalloc(sizeof(struct vma_info), GFP_KERNEL);
+ if (!vi)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&mapping->i_mmap_mutex);
+ retvi = __find_next_vma_info(mapping, head, vi, offset, is_register);
+ mutex_unlock(&mapping->i_mmap_mutex);
+
+ if (!retvi)
+ kfree(vi);
+
+ return retvi;
+}
+
+static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
+{
+ struct list_head try_list;
+ struct vm_area_struct *vma;
+ struct address_space *mapping;
+ struct vma_info *vi, *tmpvi;
+ struct mm_struct *mm;
+ loff_t vaddr;
+ int ret;
+
+ mapping = uprobe->inode->i_mapping;
+ INIT_LIST_HEAD(&try_list);
+
+ ret = 0;
+
+ for (;;) {
+ vi = find_next_vma_info(mapping, &try_list, uprobe->offset, is_register);
+ if (!vi)
+ break;
+
+ if (IS_ERR(vi)) {
+ ret = PTR_ERR(vi);
+ break;
+ }
+
+ mm = vi->mm;
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, (unsigned long)vi->vaddr);
+ if (!vma || !valid_vma(vma, is_register)) {
+ list_del(&vi->probe_list);
+ kfree(vi);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ continue;
+ }
+ vaddr = vma_address(vma, uprobe->offset);
+ if (vma->vm_file->f_mapping->host != uprobe->inode ||
+ vaddr != vi->vaddr) {
+ list_del(&vi->probe_list);
+ kfree(vi);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ continue;
+ }
+
+ if (is_register)
+ ret = install_breakpoint(uprobe, mm, vma, vi->vaddr);
+ else
+ remove_breakpoint(uprobe, mm, vi->vaddr);
+
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ if (is_register) {
+ if (ret && ret == -EEXIST)
+ ret = 0;
+ if (ret)
+ break;
+ }
+ }
+
+ list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) {
+ list_del(&vi->probe_list);
+ kfree(vi);
+ }
+
+ return ret;
+}
+
+static int __uprobe_register(struct uprobe *uprobe)
+{
+ return register_for_each_vma(uprobe, true);
+}
+
+static void __uprobe_unregister(struct uprobe *uprobe)
+{
+ if (!register_for_each_vma(uprobe, false))
+ delete_uprobe(uprobe);
+
+ /* TODO : cant unregister? schedule a worker thread */
+}
+
+/*
+ * uprobe_register - register a probe
+ * @inode: the file in which the probe has to be placed.
+ * @offset: offset from the start of the file.
+ * @uc: information on howto handle the probe..
+ *
+ * Apart from the access refcount, uprobe_register() takes a creation
+ * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
+ * inserted into the rbtree (i.e first consumer for a @inode:@offset
+ * tuple). Creation refcount stops uprobe_unregister from freeing the
+ * @uprobe even before the register operation is complete. Creation
+ * refcount is released when the last @uc for the @uprobe
+ * unregisters.
+ *
+ * Return errno if it cannot successully install probes
+ * else return 0 (success)
+ */
+int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
+{
+ struct uprobe *uprobe;
+ int ret;
+
+ if (!inode || !uc || uc->next)
+ return -EINVAL;
+
+ if (offset > i_size_read(inode))
+ return -EINVAL;
+
+ ret = 0;
+ mutex_lock(uprobes_hash(inode));
+ uprobe = alloc_uprobe(inode, offset);
+
+ if (uprobe && !consumer_add(uprobe, uc)) {
+ ret = __uprobe_register(uprobe);
+ if (ret) {
+ uprobe->consumers = NULL;
+ __uprobe_unregister(uprobe);
+ } else {
+ uprobe->flags |= UPROBE_RUN_HANDLER;
+ }
+ }
+
+ mutex_unlock(uprobes_hash(inode));
+ put_uprobe(uprobe);
+
+ return ret;
+}
+
+/*
+ * uprobe_unregister - unregister a already registered probe.
+ * @inode: the file in which the probe has to be removed.
+ * @offset: offset from the start of the file.
+ * @uc: identify which probe if multiple probes are colocated.
+ */
+void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
+{
+ struct uprobe *uprobe;
+
+ if (!inode || !uc)
+ return;
+
+ uprobe = find_uprobe(inode, offset);
+ if (!uprobe)
+ return;
+
+ mutex_lock(uprobes_hash(inode));
+
+ if (consumer_del(uprobe, uc)) {
+ if (!uprobe->consumers) {
+ __uprobe_unregister(uprobe);
+ uprobe->flags &= ~UPROBE_RUN_HANDLER;
+ }
+ }
+
+ mutex_unlock(uprobes_hash(inode));
+ if (uprobe)
+ put_uprobe(uprobe);
+}
+
+/*
+ * Of all the nodes that correspond to the given inode, return the node
+ * with the least offset.
+ */
+static struct rb_node *find_least_offset_node(struct inode *inode)
+{
+ struct uprobe u = { .inode = inode, .offset = 0};
+ struct rb_node *n = uprobes_tree.rb_node;
+ struct rb_node *close_node = NULL;
+ struct uprobe *uprobe;
+ int match;
+
+ while (n) {
+ uprobe = rb_entry(n, struct uprobe, rb_node);
+ match = match_uprobe(&u, uprobe);
+
+ if (uprobe->inode == inode)
+ close_node = n;
+
+ if (!match)
+ return close_node;
+
+ if (match < 0)
+ n = n->rb_left;
+ else
+ n = n->rb_right;
+ }
+
+ return close_node;
+}
+
+/*
+ * For a given inode, build a list of probes that need to be inserted.
+ */
+static void build_probe_list(struct inode *inode, struct list_head *head)
+{
+ struct uprobe *uprobe;
+ unsigned long flags;
+ struct rb_node *n;
+
+ spin_lock_irqsave(&uprobes_treelock, flags);
+
+ n = find_least_offset_node(inode);
+
+ for (; n; n = rb_next(n)) {
+ uprobe = rb_entry(n, struct uprobe, rb_node);
+ if (uprobe->inode != inode)
+ break;
+
+ list_add(&uprobe->pending_list, head);
+ atomic_inc(&uprobe->ref);
+ }
+
+ spin_unlock_irqrestore(&uprobes_treelock, flags);
+}
+
+/*
+ * Called from mmap_region.
+ * called with mm->mmap_sem acquired.
+ *
+ * Return -ve no if we fail to insert probes and we cannot
+ * bail-out.
+ * Return 0 otherwise. i.e:
+ *
+ * - successful insertion of probes
+ * - (or) no possible probes to be inserted.
+ * - (or) insertion of probes failed but we can bail-out.
+ */
+int uprobe_mmap(struct vm_area_struct *vma)
+{
+ struct list_head tmp_list;
+ struct uprobe *uprobe, *u;
+ struct inode *inode;
+ int ret, count;
+
+ if (!atomic_read(&uprobe_events) || !valid_vma(vma, true))
+ return 0;
+
+ inode = vma->vm_file->f_mapping->host;
+ if (!inode)
+ return 0;
+
+ INIT_LIST_HEAD(&tmp_list);
+ mutex_lock(uprobes_mmap_hash(inode));
+ build_probe_list(inode, &tmp_list);
+
+ ret = 0;
+ count = 0;
+
+ list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
+ loff_t vaddr;
+
+ list_del(&uprobe->pending_list);
+ if (!ret) {
+ vaddr = vma_address(vma, uprobe->offset);
+
+ if (vaddr < vma->vm_start || vaddr >= vma->vm_end) {
+ put_uprobe(uprobe);
+ continue;
+ }
+
+ ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
+
+ /* Ignore double add: */
+ if (ret == -EEXIST) {
+ ret = 0;
+
+ if (!is_swbp_at_addr(vma->vm_mm, vaddr))
+ continue;
+
+ /*
+ * Unable to insert a breakpoint, but
+ * breakpoint lies underneath. Increment the
+ * probe count.
+ */
+ atomic_inc(&vma->vm_mm->uprobes_state.count);
+ }
+
+ if (!ret)
+ count++;
+ }
+ put_uprobe(uprobe);
+ }
+
+ mutex_unlock(uprobes_mmap_hash(inode));
+
+ if (ret)
+ atomic_sub(count, &vma->vm_mm->uprobes_state.count);
+
+ return ret;
+}
+
+/*
+ * Called in context of a munmap of a vma.
+ */
+void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ struct list_head tmp_list;
+ struct uprobe *uprobe, *u;
+ struct inode *inode;
+
+ if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
+ return;
+
+ if (!atomic_read(&vma->vm_mm->uprobes_state.count))
+ return;
+
+ inode = vma->vm_file->f_mapping->host;
+ if (!inode)
+ return;
+
+ INIT_LIST_HEAD(&tmp_list);
+ mutex_lock(uprobes_mmap_hash(inode));
+ build_probe_list(inode, &tmp_list);
+
+ list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
+ loff_t vaddr;
+
+ list_del(&uprobe->pending_list);
+ vaddr = vma_address(vma, uprobe->offset);
+
+ if (vaddr >= start && vaddr < end) {
+ /*
+ * An unregister could have removed the probe before
+ * unmap. So check before we decrement the count.
+ */
+ if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1)
+ atomic_dec(&vma->vm_mm->uprobes_state.count);
+ }
+ put_uprobe(uprobe);
+ }
+ mutex_unlock(uprobes_mmap_hash(inode));
+}
+
+/* Slot allocation for XOL */
+static int xol_add_vma(struct xol_area *area)
+{
+ struct mm_struct *mm;
+ int ret;
+
+ area->page = alloc_page(GFP_HIGHUSER);
+ if (!area->page)
+ return -ENOMEM;
+
+ ret = -EALREADY;
+ mm = current->mm;
+
+ down_write(&mm->mmap_sem);
+ if (mm->uprobes_state.xol_area)
+ goto fail;
+
+ ret = -ENOMEM;
+
+ /* Try to map as high as possible, this is only a hint. */
+ area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
+ if (area->vaddr & ~PAGE_MASK) {
+ ret = area->vaddr;
+ goto fail;
+ }
+
+ ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE,
+ VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page);
+ if (ret)
+ goto fail;
+
+ smp_wmb(); /* pairs with get_xol_area() */
+ mm->uprobes_state.xol_area = area;
+ ret = 0;
+
+fail:
+ up_write(&mm->mmap_sem);
+ if (ret)
+ __free_page(area->page);
+
+ return ret;
+}
+
+static struct xol_area *get_xol_area(struct mm_struct *mm)
+{
+ struct xol_area *area;
+
+ area = mm->uprobes_state.xol_area;
+ smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */
+
+ return area;
+}
+
+/*
+ * xol_alloc_area - Allocate process's xol_area.
+ * This area will be used for storing instructions for execution out of
+ * line.
+ *
+ * Returns the allocated area or NULL.
+ */
+static struct xol_area *xol_alloc_area(void)
+{
+ struct xol_area *area;
+
+ area = kzalloc(sizeof(*area), GFP_KERNEL);
+ if (unlikely(!area))
+ return NULL;
+
+ area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
+
+ if (!area->bitmap)
+ goto fail;
+
+ init_waitqueue_head(&area->wq);
+ if (!xol_add_vma(area))
+ return area;
+
+fail:
+ kfree(area->bitmap);
+ kfree(area);
+
+ return get_xol_area(current->mm);
+}
+
+/*
+ * uprobe_clear_state - Free the area allocated for slots.
+ */
+void uprobe_clear_state(struct mm_struct *mm)
+{
+ struct xol_area *area = mm->uprobes_state.xol_area;
+
+ if (!area)
+ return;
+
+ put_page(area->page);
+ kfree(area->bitmap);
+ kfree(area);
+}
+
+/*
+ * uprobe_reset_state - Free the area allocated for slots.
+ */
+void uprobe_reset_state(struct mm_struct *mm)
+{
+ mm->uprobes_state.xol_area = NULL;
+ atomic_set(&mm->uprobes_state.count, 0);
+}
+
+/*
+ * - search for a free slot.
+ */
+static unsigned long xol_take_insn_slot(struct xol_area *area)
+{
+ unsigned long slot_addr;
+ int slot_nr;
+
+ do {
+ slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
+ if (slot_nr < UINSNS_PER_PAGE) {
+ if (!test_and_set_bit(slot_nr, area->bitmap))
+ break;
+
+ slot_nr = UINSNS_PER_PAGE;
+ continue;
+ }
+ wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
+ } while (slot_nr >= UINSNS_PER_PAGE);
+
+ slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
+ atomic_inc(&area->slot_count);
+
+ return slot_addr;
+}
+
+/*
+ * xol_get_insn_slot - If was not allocated a slot, then
+ * allocate a slot.
+ * Returns the allocated slot address or 0.
+ */
+static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot_addr)
+{
+ struct xol_area *area;
+ unsigned long offset;
+ void *vaddr;
+
+ area = get_xol_area(current->mm);
+ if (!area) {
+ area = xol_alloc_area();
+ if (!area)
+ return 0;
+ }
+ current->utask->xol_vaddr = xol_take_insn_slot(area);
+
+ /*
+ * Initialize the slot if xol_vaddr points to valid
+ * instruction slot.
+ */
+ if (unlikely(!current->utask->xol_vaddr))
+ return 0;
+
+ current->utask->vaddr = slot_addr;
+ offset = current->utask->xol_vaddr & ~PAGE_MASK;
+ vaddr = kmap_atomic(area->page);
+ memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES);
+ kunmap_atomic(vaddr);
+
+ return current->utask->xol_vaddr;
+}
+
+/*
+ * xol_free_insn_slot - If slot was earlier allocated by
+ * @xol_get_insn_slot(), make the slot available for
+ * subsequent requests.
+ */
+static void xol_free_insn_slot(struct task_struct *tsk)
+{
+ struct xol_area *area;
+ unsigned long vma_end;
+ unsigned long slot_addr;
+
+ if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
+ return;
+
+ slot_addr = tsk->utask->xol_vaddr;
+
+ if (unlikely(!slot_addr || IS_ERR_VALUE(slot_addr)))
+ return;
+
+ area = tsk->mm->uprobes_state.xol_area;
+ vma_end = area->vaddr + PAGE_SIZE;
+ if (area->vaddr <= slot_addr && slot_addr < vma_end) {
+ unsigned long offset;
+ int slot_nr;
+
+ offset = slot_addr - area->vaddr;
+ slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
+ if (slot_nr >= UINSNS_PER_PAGE)
+ return;
+
+ clear_bit(slot_nr, area->bitmap);
+ atomic_dec(&area->slot_count);
+ if (waitqueue_active(&area->wq))
+ wake_up(&area->wq);
+
+ tsk->utask->xol_vaddr = 0;
+ }
+}
+
+/**
+ * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
+ * @regs: Reflects the saved state of the task after it has hit a breakpoint
+ * instruction.
+ * Return the address of the breakpoint instruction.
+ */
+unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
+}
+
+/*
+ * Called with no locks held.
+ * Called in context of a exiting or a exec-ing thread.
+ */
+void uprobe_free_utask(struct task_struct *t)
+{
+ struct uprobe_task *utask = t->utask;
+
+ if (t->uprobe_srcu_id != -1)
+ srcu_read_unlock_raw(&uprobes_srcu, t->uprobe_srcu_id);
+
+ if (!utask)
+ return;
+
+ if (utask->active_uprobe)
+ put_uprobe(utask->active_uprobe);
+
+ xol_free_insn_slot(t);
+ kfree(utask);
+ t->utask = NULL;
+}
+
+/*
+ * Called in context of a new clone/fork from copy_process.
+ */
+void uprobe_copy_process(struct task_struct *t)
+{
+ t->utask = NULL;
+ t->uprobe_srcu_id = -1;
+}
+
+/*
+ * Allocate a uprobe_task object for the task.
+ * Called when the thread hits a breakpoint for the first time.
+ *
+ * Returns:
+ * - pointer to new uprobe_task on success
+ * - NULL otherwise
+ */
+static struct uprobe_task *add_utask(void)
+{
+ struct uprobe_task *utask;
+
+ utask = kzalloc(sizeof *utask, GFP_KERNEL);
+ if (unlikely(!utask))
+ return NULL;
+
+ utask->active_uprobe = NULL;
+ current->utask = utask;
+ return utask;
+}
+
+/* Prepare to single-step probed instruction out of line. */
+static int
+pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long vaddr)
+{
+ if (xol_get_insn_slot(uprobe, vaddr) && !arch_uprobe_pre_xol(&uprobe->arch, regs))
+ return 0;
+
+ return -EFAULT;
+}
+
+/*
+ * If we are singlestepping, then ensure this thread is not connected to
+ * non-fatal signals until completion of singlestep. When xol insn itself
+ * triggers the signal, restart the original insn even if the task is
+ * already SIGKILL'ed (since coredump should report the correct ip). This
+ * is even more important if the task has a handler for SIGSEGV/etc, The
+ * _same_ instruction should be repeated again after return from the signal
+ * handler, and SSTEP can never finish in this case.
+ */
+bool uprobe_deny_signal(void)
+{
+ struct task_struct *t = current;
+ struct uprobe_task *utask = t->utask;
+
+ if (likely(!utask || !utask->active_uprobe))
+ return false;
+
+ WARN_ON_ONCE(utask->state != UTASK_SSTEP);
+
+ if (signal_pending(t)) {
+ spin_lock_irq(&t->sighand->siglock);
+ clear_tsk_thread_flag(t, TIF_SIGPENDING);
+ spin_unlock_irq(&t->sighand->siglock);
+
+ if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
+ utask->state = UTASK_SSTEP_TRAPPED;
+ set_tsk_thread_flag(t, TIF_UPROBE);
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+ }
+ }
+
+ return true;
+}
+
+/*
+ * Avoid singlestepping the original instruction if the original instruction
+ * is a NOP or can be emulated.
+ */
+static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
+{
+ if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
+ return true;
+
+ uprobe->flags &= ~UPROBE_SKIP_SSTEP;
+ return false;
+}
+
+/*
+ * Run handler and ask thread to singlestep.
+ * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
+ */
+static void handle_swbp(struct pt_regs *regs)
+{
+ struct vm_area_struct *vma;
+ struct uprobe_task *utask;
+ struct uprobe *uprobe;
+ struct mm_struct *mm;
+ unsigned long bp_vaddr;
+
+ uprobe = NULL;
+ bp_vaddr = uprobe_get_swbp_addr(regs);
+ mm = current->mm;
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, bp_vaddr);
+
+ if (vma && vma->vm_start <= bp_vaddr && valid_vma(vma, false)) {
+ struct inode *inode;
+ loff_t offset;
+
+ inode = vma->vm_file->f_mapping->host;
+ offset = bp_vaddr - vma->vm_start;
+ offset += (vma->vm_pgoff << PAGE_SHIFT);
+ uprobe = find_uprobe(inode, offset);
+ }
+
+ srcu_read_unlock_raw(&uprobes_srcu, current->uprobe_srcu_id);
+ current->uprobe_srcu_id = -1;
+ up_read(&mm->mmap_sem);
+
+ if (!uprobe) {
+ /* No matching uprobe; signal SIGTRAP. */
+ send_sig(SIGTRAP, current, 0);
+ return;
+ }
+
+ utask = current->utask;
+ if (!utask) {
+ utask = add_utask();
+ /* Cannot allocate; re-execute the instruction. */
+ if (!utask)
+ goto cleanup_ret;
+ }
+ utask->active_uprobe = uprobe;
+ handler_chain(uprobe, regs);
+ if (uprobe->flags & UPROBE_SKIP_SSTEP && can_skip_sstep(uprobe, regs))
+ goto cleanup_ret;
+
+ utask->state = UTASK_SSTEP;
+ if (!pre_ssout(uprobe, regs, bp_vaddr)) {
+ user_enable_single_step(current);
+ return;
+ }
+
+cleanup_ret:
+ if (utask) {
+ utask->active_uprobe = NULL;
+ utask->state = UTASK_RUNNING;
+ }
+ if (uprobe) {
+ if (!(uprobe->flags & UPROBE_SKIP_SSTEP))
+
+ /*
+ * cannot singlestep; cannot skip instruction;
+ * re-execute the instruction.
+ */
+ instruction_pointer_set(regs, bp_vaddr);
+
+ put_uprobe(uprobe);
+ }
+}
+
+/*
+ * Perform required fix-ups and disable singlestep.
+ * Allow pending signals to take effect.
+ */
+static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
+{
+ struct uprobe *uprobe;
+
+ uprobe = utask->active_uprobe;
+ if (utask->state == UTASK_SSTEP_ACK)
+ arch_uprobe_post_xol(&uprobe->arch, regs);
+ else if (utask->state == UTASK_SSTEP_TRAPPED)
+ arch_uprobe_abort_xol(&uprobe->arch, regs);
+ else
+ WARN_ON_ONCE(1);
+
+ put_uprobe(uprobe);
+ utask->active_uprobe = NULL;
+ utask->state = UTASK_RUNNING;
+ user_disable_single_step(current);
+ xol_free_insn_slot(current);
+
+ spin_lock_irq(&current->sighand->siglock);
+ recalc_sigpending(); /* see uprobe_deny_signal() */
+ spin_unlock_irq(&current->sighand->siglock);
+}
+
+/*
+ * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag. (and on
+ * subsequent probe hits on the thread sets the state to UTASK_BP_HIT) and
+ * allows the thread to return from interrupt.
+ *
+ * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag and
+ * also sets the state to UTASK_SSTEP_ACK and allows the thread to return from
+ * interrupt.
+ *
+ * While returning to userspace, thread notices the TIF_UPROBE flag and calls
+ * uprobe_notify_resume().
+ */
+void uprobe_notify_resume(struct pt_regs *regs)
+{
+ struct uprobe_task *utask;
+
+ utask = current->utask;
+ if (!utask || utask->state == UTASK_BP_HIT)
+ handle_swbp(regs);
+ else
+ handle_singlestep(utask, regs);
+}
+
+/*
+ * uprobe_pre_sstep_notifier gets called from interrupt context as part of
+ * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
+ */
+int uprobe_pre_sstep_notifier(struct pt_regs *regs)
+{
+ struct uprobe_task *utask;
+
+ if (!current->mm || !atomic_read(&current->mm->uprobes_state.count))
+ /* task is currently not uprobed */
+ return 0;
+
+ utask = current->utask;
+ if (utask)
+ utask->state = UTASK_BP_HIT;
+
+ set_thread_flag(TIF_UPROBE);
+ current->uprobe_srcu_id = srcu_read_lock_raw(&uprobes_srcu);
+
+ return 1;
+}
+
+/*
+ * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
+ * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
+ */
+int uprobe_post_sstep_notifier(struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ if (!current->mm || !utask || !utask->active_uprobe)
+ /* task is currently not uprobed */
+ return 0;
+
+ utask->state = UTASK_SSTEP_ACK;
+ set_thread_flag(TIF_UPROBE);
+ return 1;
+}
+
+static struct notifier_block uprobe_exception_nb = {
+ .notifier_call = arch_uprobe_exception_notify,
+ .priority = INT_MAX-1, /* notified after kprobes, kgdb */
+};
+
+static int __init init_uprobes(void)
+{
+ int i;
+
+ for (i = 0; i < UPROBES_HASH_SZ; i++) {
+ mutex_init(&uprobes_mutex[i]);
+ mutex_init(&uprobes_mmap_mutex[i]);
+ }
+ init_srcu_struct(&uprobes_srcu);
+
+ return register_die_notifier(&uprobe_exception_nb);
+}
+module_init(init_uprobes);
+
+static void __exit exit_uprobes(void)
+{
+}
+module_exit(exit_uprobes);
diff --git a/kernel/exit.c b/kernel/exit.c
index 3ecd096e5d4d..34867cc5b42a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -884,9 +884,9 @@ static void check_stack_usage(void)
spin_lock(&low_water_lock);
if (free < lowest_to_date) {
- printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
- "left\n",
- current->comm, free);
+ printk(KERN_WARNING "%s (%d) used greatest stack depth: "
+ "%lu bytes left\n",
+ current->comm, task_pid_nr(current), free);
lowest_to_date = free;
}
spin_unlock(&low_water_lock);
@@ -1215,7 +1215,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
unsigned long state;
int retval, status, traced;
pid_t pid = task_pid_vnr(p);
- uid_t uid = from_kuid_munged(current_user_ns(), __task_cred(p)->uid);
+ uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
struct siginfo __user *infop;
if (!likely(wo->wo_flags & WEXITED))
diff --git a/kernel/fork.c b/kernel/fork.c
index a46db217a589..ab5211b9e622 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -69,6 +69,7 @@
#include <linux/oom.h>
#include <linux/khugepaged.h>
#include <linux/signalfd.h>
+#include <linux/uprobes.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -385,7 +386,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
}
charge = 0;
if (mpnt->vm_flags & VM_ACCOUNT) {
- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
+ unsigned long len;
+ len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
goto fail_nomem;
charge = len;
@@ -451,6 +453,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (retval)
goto out;
+
+ if (file && uprobe_mmap(tmp))
+ goto out;
}
/* a new mm has just been created */
arch_dup_mmap(oldmm, mm);
@@ -599,6 +604,7 @@ void mmput(struct mm_struct *mm)
might_sleep();
if (atomic_dec_and_test(&mm->mm_users)) {
+ uprobe_clear_state(mm);
exit_aio(mm);
ksm_exit(mm);
khugepaged_exit(mm); /* must run before exit_mmap */
@@ -609,7 +615,6 @@ void mmput(struct mm_struct *mm)
list_del(&mm->mmlist);
spin_unlock(&mmlist_lock);
}
- put_swap_token(mm);
if (mm->binfmt)
module_put(mm->binfmt->module);
mmdrop(mm);
@@ -777,12 +782,11 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
exit_pi_state_list(tsk);
#endif
+ uprobe_free_utask(tsk);
+
/* Get rid of any cached register state */
deactivate_mm(tsk, mm);
- if (tsk->vfork_done)
- complete_vfork_done(tsk);
-
/*
* If we're exiting normally, clear a user-space tid field if
* requested. We leave this alone when dying by signal, to leave
@@ -803,6 +807,13 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
}
tsk->clear_child_tid = NULL;
}
+
+ /*
+ * All done, finally we can wake up parent and return this mm to him.
+ * Also kthread_stop() uses this completion for synchronization.
+ */
+ if (tsk->vfork_done)
+ complete_vfork_done(tsk);
}
/*
@@ -824,13 +835,10 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
memcpy(mm, oldmm, sizeof(*mm));
mm_init_cpumask(mm);
- /* Initializing for Swap token stuff */
- mm->token_priority = 0;
- mm->last_interval = 0;
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
mm->pmd_huge_pte = NULL;
#endif
+ uprobe_reset_state(mm);
if (!mm_init(mm, tsk))
goto fail_nomem;
@@ -905,10 +913,6 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
goto fail_nomem;
good_mm:
- /* Initializing for Swap token stuff */
- mm->token_priority = 0;
- mm->last_interval = 0;
-
tsk->mm = mm;
tsk->active_mm = mm;
return 0;
@@ -976,9 +980,8 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
* Share io context with parent, if CLONE_IO is set
*/
if (clone_flags & CLONE_IO) {
- tsk->io_context = ioc_task_link(ioc);
- if (unlikely(!tsk->io_context))
- return -ENOMEM;
+ ioc_task_link(ioc);
+ tsk->io_context = ioc;
} else if (ioprio_valid(ioc->ioprio)) {
new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
if (unlikely(!new_ioc))
@@ -1373,6 +1376,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
#endif
+ uprobe_copy_process(p);
/*
* sigaltstack should be cleared when sharing the same VM
*/
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 0e0ba5f840b2..41c1564103f1 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) "irq: " fmt
+
#include <linux/debugfs.h>
#include <linux/hardirq.h>
#include <linux/interrupt.h>
@@ -56,14 +58,73 @@ static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
return domain;
}
+static void irq_domain_free(struct irq_domain *domain)
+{
+ of_node_put(domain->of_node);
+ kfree(domain);
+}
+
static void irq_domain_add(struct irq_domain *domain)
{
mutex_lock(&irq_domain_mutex);
list_add(&domain->link, &irq_domain_list);
mutex_unlock(&irq_domain_mutex);
- pr_debug("irq: Allocated domain of type %d @0x%p\n",
+ pr_debug("Allocated domain of type %d @0x%p\n",
+ domain->revmap_type, domain);
+}
+
+/**
+ * irq_domain_remove() - Remove an irq domain.
+ * @domain: domain to remove
+ *
+ * This routine is used to remove an irq domain. The caller must ensure
+ * that all mappings within the domain have been disposed of prior to
+ * use, depending on the revmap type.
+ */
+void irq_domain_remove(struct irq_domain *domain)
+{
+ mutex_lock(&irq_domain_mutex);
+
+ switch (domain->revmap_type) {
+ case IRQ_DOMAIN_MAP_LEGACY:
+ /*
+ * Legacy domains don't manage their own irq_desc
+ * allocations, we expect the caller to handle irq_desc
+ * freeing on their own.
+ */
+ break;
+ case IRQ_DOMAIN_MAP_TREE:
+ /*
+ * radix_tree_delete() takes care of destroying the root
+ * node when all entries are removed. Shout if there are
+ * any mappings left.
+ */
+ WARN_ON(domain->revmap_data.tree.height);
+ break;
+ case IRQ_DOMAIN_MAP_LINEAR:
+ kfree(domain->revmap_data.linear.revmap);
+ domain->revmap_data.linear.size = 0;
+ break;
+ case IRQ_DOMAIN_MAP_NOMAP:
+ break;
+ }
+
+ list_del(&domain->link);
+
+ /*
+ * If the going away domain is the default one, reset it.
+ */
+ if (unlikely(irq_default_domain == domain))
+ irq_set_default_host(NULL);
+
+ mutex_unlock(&irq_domain_mutex);
+
+ pr_debug("Removed domain of type %d @0x%p\n",
domain->revmap_type, domain);
+
+ irq_domain_free(domain);
}
+EXPORT_SYMBOL_GPL(irq_domain_remove);
static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
irq_hw_number_t hwirq)
@@ -117,8 +178,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
if (WARN_ON(!irq_data || irq_data->domain)) {
mutex_unlock(&irq_domain_mutex);
- of_node_put(domain->of_node);
- kfree(domain);
+ irq_domain_free(domain);
return NULL;
}
}
@@ -152,10 +212,12 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
irq_domain_add(domain);
return domain;
}
+EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
/**
* irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
+ * @size: Number of interrupts in the domain.
* @ops: map/unmap domain callbacks
* @host_data: Controller private data pointer
*/
@@ -181,6 +243,7 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
irq_domain_add(domain);
return domain;
}
+EXPORT_SYMBOL_GPL(irq_domain_add_linear);
struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
unsigned int max_irq,
@@ -195,6 +258,7 @@ struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
}
return domain;
}
+EXPORT_SYMBOL_GPL(irq_domain_add_nomap);
/**
* irq_domain_add_tree()
@@ -216,6 +280,7 @@ struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
}
return domain;
}
+EXPORT_SYMBOL_GPL(irq_domain_add_tree);
/**
* irq_find_host() - Locates a domain for a given device node
@@ -259,10 +324,11 @@ EXPORT_SYMBOL_GPL(irq_find_host);
*/
void irq_set_default_host(struct irq_domain *domain)
{
- pr_debug("irq: Default domain set to @0x%p\n", domain);
+ pr_debug("Default domain set to @0x%p\n", domain);
irq_default_domain = domain;
}
+EXPORT_SYMBOL_GPL(irq_set_default_host);
static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq)
@@ -272,7 +338,7 @@ static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
irq_data->hwirq = hwirq;
irq_data->domain = domain;
if (domain->ops->map(domain, virq, hwirq)) {
- pr_debug("irq: -> mapping failed, freeing\n");
+ pr_debug("irq-%i==>hwirq-0x%lx mapping failed\n", virq, hwirq);
irq_data->domain = NULL;
irq_data->hwirq = 0;
return -1;
@@ -303,7 +369,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
virq = irq_alloc_desc_from(1, 0);
if (!virq) {
- pr_debug("irq: create_direct virq allocation failed\n");
+ pr_debug("create_direct virq allocation failed\n");
return 0;
}
if (virq >= domain->revmap_data.nomap.max_irq) {
@@ -312,7 +378,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
irq_free_desc(virq);
return 0;
}
- pr_debug("irq: create_direct obtained virq %d\n", virq);
+ pr_debug("create_direct obtained virq %d\n", virq);
if (irq_setup_virq(domain, virq, virq)) {
irq_free_desc(virq);
@@ -321,6 +387,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
return virq;
}
+EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
/**
* irq_create_mapping() - Map a hardware interrupt into linux irq space
@@ -338,23 +405,23 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
unsigned int hint;
int virq;
- pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
+ pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
/* Look for default domain if nececssary */
if (domain == NULL)
domain = irq_default_domain;
if (domain == NULL) {
- printk(KERN_WARNING "irq_create_mapping called for"
- " NULL domain, hwirq=%lx\n", hwirq);
+ pr_warning("irq_create_mapping called for"
+ " NULL domain, hwirq=%lx\n", hwirq);
WARN_ON(1);
return 0;
}
- pr_debug("irq: -> using domain @%p\n", domain);
+ pr_debug("-> using domain @%p\n", domain);
/* Check if mapping already exists */
virq = irq_find_mapping(domain, hwirq);
if (virq) {
- pr_debug("irq: -> existing mapping on virq %d\n", virq);
+ pr_debug("-> existing mapping on virq %d\n", virq);
return virq;
}
@@ -370,7 +437,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
if (virq <= 0)
virq = irq_alloc_desc_from(1, 0);
if (virq <= 0) {
- pr_debug("irq: -> virq allocation failed\n");
+ pr_debug("-> virq allocation failed\n");
return 0;
}
@@ -380,7 +447,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
return 0;
}
- pr_debug("irq: irq %lu on domain %s mapped to virtual irq %u\n",
+ pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
hwirq, domain->of_node ? domain->of_node->full_name : "null", virq);
return virq;
@@ -409,8 +476,8 @@ unsigned int irq_create_of_mapping(struct device_node *controller,
if (intsize > 0)
return intspec[0];
#endif
- printk(KERN_WARNING "irq: no irq domain found for %s !\n",
- controller->full_name);
+ pr_warning("no irq domain found for %s !\n",
+ controller->full_name);
return 0;
}
@@ -560,6 +627,7 @@ unsigned int irq_radix_revmap_lookup(struct irq_domain *domain,
*/
return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq);
}
+EXPORT_SYMBOL_GPL(irq_radix_revmap_lookup);
/**
* irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping.
@@ -584,6 +652,7 @@ void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq,
mutex_unlock(&revmap_trees_mutex);
}
}
+EXPORT_SYMBOL_GPL(irq_radix_revmap_insert);
/**
* irq_linear_revmap() - Find a linux irq from a hw irq number.
@@ -617,6 +686,7 @@ unsigned int irq_linear_revmap(struct irq_domain *domain,
return revmap[hwirq];
}
+EXPORT_SYMBOL_GPL(irq_linear_revmap);
#ifdef CONFIG_IRQ_DOMAIN_DEBUG
static int virq_debug_show(struct seq_file *m, void *private)
@@ -691,8 +761,8 @@ static int __init irq_debugfs_init(void)
__initcall(irq_debugfs_init);
#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
-int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
+static int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
{
return 0;
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 4d1f8f897414..ea0c6c2ae6f7 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -7,6 +7,8 @@
* This file contains driver APIs to the irq subsystem.
*/
+#define pr_fmt(fmt) "genirq: " fmt
+
#include <linux/irq.h>
#include <linux/kthread.h>
#include <linux/module.h>
@@ -566,7 +568,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
* IRQF_TRIGGER_* but the PIC does not support multiple
* flow-types?
*/
- pr_debug("genirq: No set_type function for IRQ %d (%s)\n", irq,
+ pr_debug("No set_type function for IRQ %d (%s)\n", irq,
chip ? (chip->name ? : "unknown") : "unknown");
return 0;
}
@@ -601,7 +603,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
ret = 0;
break;
default:
- pr_err("genirq: Setting trigger mode %lu for irq %u failed (%pF)\n",
+ pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
flags, irq, chip->irq_set_type);
}
if (unmask)
@@ -785,7 +787,7 @@ static void irq_thread_dtor(struct task_work *unused)
action = kthread_data(tsk);
- pr_err("genirq: exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
+ pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
@@ -1042,7 +1044,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
* has. The type flags are unreliable as the
* underlying chip implementation can override them.
*/
- pr_err("genirq: Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
+ pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
irq);
ret = -EINVAL;
goto out_mask;
@@ -1093,7 +1095,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (nmsk != omsk)
/* hope the handler works with current trigger mode */
- pr_warning("genirq: irq %d uses trigger mode %u; requested %u\n",
+ pr_warning("irq %d uses trigger mode %u; requested %u\n",
irq, nmsk, omsk);
}
@@ -1131,7 +1133,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
mismatch:
if (!(new->flags & IRQF_PROBE_SHARED)) {
- pr_err("genirq: Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
+ pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
irq, new->flags, new->name, old->flags, old->name);
#ifdef CONFIG_DEBUG_SHIRQ
dump_stack();
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 079f1d39a8b8..2169feeba529 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -343,7 +343,7 @@ int lookup_symbol_attrs(unsigned long addr, unsigned long *size,
/* Look up a kernel symbol and return it in a text buffer. */
static int __sprint_symbol(char *buffer, unsigned long address,
- int symbol_offset)
+ int symbol_offset, int add_offset)
{
char *modname;
const char *name;
@@ -358,13 +358,13 @@ static int __sprint_symbol(char *buffer, unsigned long address,
if (name != buffer)
strcpy(buffer, name);
len = strlen(buffer);
- buffer += len;
offset -= symbol_offset;
+ if (add_offset)
+ len += sprintf(buffer + len, "+%#lx/%#lx", offset, size);
+
if (modname)
- len += sprintf(buffer, "+%#lx/%#lx [%s]", offset, size, modname);
- else
- len += sprintf(buffer, "+%#lx/%#lx", offset, size);
+ len += sprintf(buffer + len, " [%s]", modname);
return len;
}
@@ -382,12 +382,28 @@ static int __sprint_symbol(char *buffer, unsigned long address,
*/
int sprint_symbol(char *buffer, unsigned long address)
{
- return __sprint_symbol(buffer, address, 0);
+ return __sprint_symbol(buffer, address, 0, 1);
}
-
EXPORT_SYMBOL_GPL(sprint_symbol);
/**
+ * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer
+ * @buffer: buffer to be stored
+ * @address: address to lookup
+ *
+ * This function looks up a kernel symbol with @address and stores its name
+ * and module name to @buffer if possible. If no symbol was found, just saves
+ * its @address as is.
+ *
+ * This function returns the number of bytes stored in @buffer.
+ */
+int sprint_symbol_no_offset(char *buffer, unsigned long address)
+{
+ return __sprint_symbol(buffer, address, 0, 0);
+}
+EXPORT_SYMBOL_GPL(sprint_symbol_no_offset);
+
+/**
* sprint_backtrace - Look up a backtrace symbol and return it in a text buffer
* @buffer: buffer to be stored
* @address: address to lookup
@@ -403,7 +419,7 @@ EXPORT_SYMBOL_GPL(sprint_symbol);
*/
int sprint_backtrace(char *buffer, unsigned long address)
{
- return __sprint_symbol(buffer, address, -1);
+ return __sprint_symbol(buffer, address, -1, 1);
}
/* Look up a kernel symbol and print it to the kernel messages. */
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
new file mode 100644
index 000000000000..30b7b225306c
--- /dev/null
+++ b/kernel/kcmp.c
@@ -0,0 +1,196 @@
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+#include <linux/fdtable.h>
+#include <linux/string.h>
+#include <linux/random.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/cache.h>
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/kcmp.h>
+
+#include <asm/unistd.h>
+
+/*
+ * We don't expose the real in-memory order of objects for security reasons.
+ * But still the comparison results should be suitable for sorting. So we
+ * obfuscate kernel pointers values and compare the production instead.
+ *
+ * The obfuscation is done in two steps. First we xor the kernel pointer with
+ * a random value, which puts pointer into a new position in a reordered space.
+ * Secondly we multiply the xor production with a large odd random number to
+ * permute its bits even more (the odd multiplier guarantees that the product
+ * is unique ever after the high bits are truncated, since any odd number is
+ * relative prime to 2^n).
+ *
+ * Note also that the obfuscation itself is invisible to userspace and if needed
+ * it can be changed to an alternate scheme.
+ */
+static unsigned long cookies[KCMP_TYPES][2] __read_mostly;
+
+static long kptr_obfuscate(long v, int type)
+{
+ return (v ^ cookies[type][0]) * cookies[type][1];
+}
+
+/*
+ * 0 - equal, i.e. v1 = v2
+ * 1 - less than, i.e. v1 < v2
+ * 2 - greater than, i.e. v1 > v2
+ * 3 - not equal but ordering unavailable (reserved for future)
+ */
+static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
+{
+ long ret;
+
+ ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
+
+ return (ret < 0) | ((ret > 0) << 1);
+}
+
+/* The caller must have pinned the task */
+static struct file *
+get_file_raw_ptr(struct task_struct *task, unsigned int idx)
+{
+ struct file *file = NULL;
+
+ task_lock(task);
+ rcu_read_lock();
+
+ if (task->files)
+ file = fcheck_files(task->files, idx);
+
+ rcu_read_unlock();
+ task_unlock(task);
+
+ return file;
+}
+
+static void kcmp_unlock(struct mutex *m1, struct mutex *m2)
+{
+ if (likely(m2 != m1))
+ mutex_unlock(m2);
+ mutex_unlock(m1);
+}
+
+static int kcmp_lock(struct mutex *m1, struct mutex *m2)
+{
+ int err;
+
+ if (m2 > m1)
+ swap(m1, m2);
+
+ err = mutex_lock_killable(m1);
+ if (!err && likely(m1 != m2)) {
+ err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING);
+ if (err)
+ mutex_unlock(m1);
+ }
+
+ return err;
+}
+
+SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
+ unsigned long, idx1, unsigned long, idx2)
+{
+ struct task_struct *task1, *task2;
+ int ret;
+
+ rcu_read_lock();
+
+ /*
+ * Tasks are looked up in caller's PID namespace only.
+ */
+ task1 = find_task_by_vpid(pid1);
+ task2 = find_task_by_vpid(pid2);
+ if (!task1 || !task2)
+ goto err_no_task;
+
+ get_task_struct(task1);
+ get_task_struct(task2);
+
+ rcu_read_unlock();
+
+ /*
+ * One should have enough rights to inspect task details.
+ */
+ ret = kcmp_lock(&task1->signal->cred_guard_mutex,
+ &task2->signal->cred_guard_mutex);
+ if (ret)
+ goto err;
+ if (!ptrace_may_access(task1, PTRACE_MODE_READ) ||
+ !ptrace_may_access(task2, PTRACE_MODE_READ)) {
+ ret = -EPERM;
+ goto err_unlock;
+ }
+
+ switch (type) {
+ case KCMP_FILE: {
+ struct file *filp1, *filp2;
+
+ filp1 = get_file_raw_ptr(task1, idx1);
+ filp2 = get_file_raw_ptr(task2, idx2);
+
+ if (filp1 && filp2)
+ ret = kcmp_ptr(filp1, filp2, KCMP_FILE);
+ else
+ ret = -EBADF;
+ break;
+ }
+ case KCMP_VM:
+ ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM);
+ break;
+ case KCMP_FILES:
+ ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES);
+ break;
+ case KCMP_FS:
+ ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS);
+ break;
+ case KCMP_SIGHAND:
+ ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND);
+ break;
+ case KCMP_IO:
+ ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO);
+ break;
+ case KCMP_SYSVSEM:
+#ifdef CONFIG_SYSVIPC
+ ret = kcmp_ptr(task1->sysvsem.undo_list,
+ task2->sysvsem.undo_list,
+ KCMP_SYSVSEM);
+#else
+ ret = -EOPNOTSUPP;
+#endif
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+err_unlock:
+ kcmp_unlock(&task1->signal->cred_guard_mutex,
+ &task2->signal->cred_guard_mutex);
+err:
+ put_task_struct(task1);
+ put_task_struct(task2);
+
+ return ret;
+
+err_no_task:
+ rcu_read_unlock();
+ return -ESRCH;
+}
+
+static __init int kcmp_cookies_init(void)
+{
+ int i;
+
+ get_random_bytes(cookies, sizeof(cookies));
+
+ for (i = 0; i < KCMP_TYPES; i++)
+ cookies[i][1] |= (~(~0UL >> 1) | 1);
+
+ return 0;
+}
+arch_initcall(kcmp_cookies_init);
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index c744b88c44e2..59dcf5b81d24 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -402,6 +402,7 @@ unsigned int __kfifo_max_r(unsigned int len, size_t recsize)
return max;
return len;
}
+EXPORT_SYMBOL(__kfifo_max_r);
#define __KFIFO_PEEK(data, out, mask) \
((data)[(out) & (mask)])
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 05698a7415fe..ff2c7cb86d77 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -221,13 +221,12 @@ fail:
return 0;
}
-void call_usermodehelper_freeinfo(struct subprocess_info *info)
+static void call_usermodehelper_freeinfo(struct subprocess_info *info)
{
if (info->cleanup)
(*info->cleanup)(info);
kfree(info);
}
-EXPORT_SYMBOL(call_usermodehelper_freeinfo);
static void umh_complete(struct subprocess_info *sub_info)
{
@@ -410,7 +409,7 @@ EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
/**
* __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
- * depth: New value to assign to usermodehelper_disabled.
+ * @depth: New value to assign to usermodehelper_disabled.
*
* Change the value of usermodehelper_disabled (under umhelper_sem locked for
* writing) and wakeup tasks waiting for it to change.
@@ -479,6 +478,7 @@ static void helper_unlock(void)
* structure. This should be passed to call_usermodehelper_exec to
* exec the process and free the structure.
*/
+static
struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
char **envp, gfp_t gfp_mask)
{
@@ -494,7 +494,6 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
out:
return sub_info;
}
-EXPORT_SYMBOL(call_usermodehelper_setup);
/**
* call_usermodehelper_setfns - set a cleanup/init function
@@ -512,6 +511,7 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
* Function must be runnable in either a process context or the
* context in which call_usermodehelper_exec is called.
*/
+static
void call_usermodehelper_setfns(struct subprocess_info *info,
int (*init)(struct subprocess_info *info, struct cred *new),
void (*cleanup)(struct subprocess_info *info),
@@ -521,7 +521,6 @@ void call_usermodehelper_setfns(struct subprocess_info *info,
info->init = init;
info->data = data;
}
-EXPORT_SYMBOL(call_usermodehelper_setfns);
/**
* call_usermodehelper_exec - start a usermode application
@@ -535,6 +534,7 @@ EXPORT_SYMBOL(call_usermodehelper_setfns);
* asynchronously if wait is not set, and runs as a child of keventd.
* (ie. it runs with full root capabilities).
*/
+static
int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
{
DECLARE_COMPLETION_ONSTACK(done);
@@ -576,7 +576,25 @@ unlock:
helper_unlock();
return retval;
}
-EXPORT_SYMBOL(call_usermodehelper_exec);
+
+int call_usermodehelper_fns(
+ char *path, char **argv, char **envp, int wait,
+ int (*init)(struct subprocess_info *info, struct cred *new),
+ void (*cleanup)(struct subprocess_info *), void *data)
+{
+ struct subprocess_info *info;
+ gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
+
+ info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
+
+ if (info == NULL)
+ return -ENOMEM;
+
+ call_usermodehelper_setfns(info, init, cleanup, data);
+
+ return call_usermodehelper_exec(info, wait);
+}
+EXPORT_SYMBOL(call_usermodehelper_fns);
static int proc_cap_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
diff --git a/kernel/pid.c b/kernel/pid.c
index 9f08dfabaf13..e86b291ad834 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -547,7 +547,8 @@ void __init pidhash_init(void)
pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
HASH_EARLY | HASH_SMALL,
- &pidhash_shift, NULL, 4096);
+ &pidhash_shift, NULL,
+ 0, 4096);
pidhash_size = 1U << pidhash_shift;
for (i = 0; i < pidhash_size; i++)
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 57bc1fd35b3c..16b20e38c4a1 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -149,7 +149,12 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
{
int nr;
int rc;
- struct task_struct *task;
+ struct task_struct *task, *me = current;
+
+ /* Ignore SIGCHLD causing any terminated children to autoreap */
+ spin_lock_irq(&me->sighand->siglock);
+ me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN;
+ spin_unlock_irq(&me->sighand->siglock);
/*
* The last thread in the cgroup-init thread group is terminating.
@@ -191,6 +196,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
return;
}
+#ifdef CONFIG_CHECKPOINT_RESTORE
static int pid_ns_ctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -218,8 +224,8 @@ static struct ctl_table pid_ns_ctl_table[] = {
},
{ }
};
-
static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } };
+#endif /* CONFIG_CHECKPOINT_RESTORE */
int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
{
@@ -253,7 +259,10 @@ int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
static __init int pid_namespaces_init(void)
{
pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
+
+#ifdef CONFIG_CHECKPOINT_RESTORE
register_sysctl_paths(kern_path, pid_ns_ctl_table);
+#endif
return 0;
}
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index bebe2b170d49..ad581aa2369a 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -94,13 +94,15 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
counter->usage -= val;
}
-void res_counter_uncharge(struct res_counter *counter, unsigned long val)
+void res_counter_uncharge_until(struct res_counter *counter,
+ struct res_counter *top,
+ unsigned long val)
{
unsigned long flags;
struct res_counter *c;
local_irq_save(flags);
- for (c = counter; c != NULL; c = c->parent) {
+ for (c = counter; c != top; c = c->parent) {
spin_lock(&c->lock);
res_counter_uncharge_locked(c, val);
spin_unlock(&c->lock);
@@ -108,6 +110,10 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val)
local_irq_restore(flags);
}
+void res_counter_uncharge(struct res_counter *counter, unsigned long val)
+{
+ res_counter_uncharge_until(counter, NULL, val);
+}
static inline unsigned long long *
res_counter_member(struct res_counter *counter, int member)
diff --git a/kernel/resource.c b/kernel/resource.c
index 7e8ea66a8c01..e1d2b8ee76d5 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -515,8 +515,8 @@ out:
* @root: root resource descriptor
* @new: resource descriptor desired by caller
* @size: requested resource region size
- * @min: minimum size to allocate
- * @max: maximum size to allocate
+ * @min: minimum boundary to allocate
+ * @max: maximum boundary to allocate
* @align: alignment requested, in bytes
* @alignf: alignment function, optional, called if not NULL
* @alignf_data: arbitrary data to pass to the @alignf function
diff --git a/kernel/signal.c b/kernel/signal.c
index 4dbf00dfb359..08dfbd748cd2 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -29,6 +29,7 @@
#include <linux/pid_namespace.h>
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
+#include <linux/uprobes.h>
#define CREATE_TRACE_POINTS
#include <trace/events/signal.h>
@@ -1655,19 +1656,18 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
info.si_signo = sig;
info.si_errno = 0;
/*
- * we are under tasklist_lock here so our parent is tied to
- * us and cannot exit and release its namespace.
+ * We are under tasklist_lock here so our parent is tied to
+ * us and cannot change.
*
- * the only it can is to switch its nsproxy with sys_unshare,
- * bu uncharing pid namespaces is not allowed, so we'll always
- * see relevant namespace
+ * task_active_pid_ns will always return the same pid namespace
+ * until a task passes through release_task.
*
* write_lock() currently calls preempt_disable() which is the
* same as rcu_read_lock(), but according to Oleg, this is not
* correct to rely on this
*/
rcu_read_lock();
- info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
+ info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
task_uid(tsk));
rcu_read_unlock();
@@ -2191,6 +2191,9 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
struct signal_struct *signal = current->signal;
int signr;
+ if (unlikely(uprobe_deny_signal()))
+ return 0;
+
relock:
/*
* We'll jump back here after any time we were stopped in TASK_STOPPED.
diff --git a/kernel/sys.c b/kernel/sys.c
index 6df42624e454..9ff89cb9657a 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -36,6 +36,8 @@
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/fs_struct.h>
+#include <linux/file.h>
+#include <linux/mount.h>
#include <linux/gfp.h>
#include <linux/syscore_ops.h>
#include <linux/version.h>
@@ -1378,8 +1380,8 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
memcpy(u->nodename, tmp, len);
memset(u->nodename + len, 0, sizeof(u->nodename) - len);
errno = 0;
+ uts_proc_notify(UTS_PROC_HOSTNAME);
}
- uts_proc_notify(UTS_PROC_HOSTNAME);
up_write(&uts_sem);
return errno;
}
@@ -1429,8 +1431,8 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
memcpy(u->domainname, tmp, len);
memset(u->domainname + len, 0, sizeof(u->domainname) - len);
errno = 0;
+ uts_proc_notify(UTS_PROC_DOMAINNAME);
}
- uts_proc_notify(UTS_PROC_DOMAINNAME);
up_write(&uts_sem);
return errno;
}
@@ -1784,77 +1786,102 @@ SYSCALL_DEFINE1(umask, int, mask)
}
#ifdef CONFIG_CHECKPOINT_RESTORE
+static bool vma_flags_mismatch(struct vm_area_struct *vma,
+ unsigned long required,
+ unsigned long banned)
+{
+ return (vma->vm_flags & required) != required ||
+ (vma->vm_flags & banned);
+}
+
+static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
+{
+ struct file *exe_file;
+ struct dentry *dentry;
+ int err;
+
+ /*
+ * Setting new mm::exe_file is only allowed when no VM_EXECUTABLE vma's
+ * remain. So perform a quick test first.
+ */
+ if (mm->num_exe_file_vmas)
+ return -EBUSY;
+
+ exe_file = fget(fd);
+ if (!exe_file)
+ return -EBADF;
+
+ dentry = exe_file->f_path.dentry;
+
+ /*
+ * Because the original mm->exe_file points to executable file, make
+ * sure that this one is executable as well, to avoid breaking an
+ * overall picture.
+ */
+ err = -EACCES;
+ if (!S_ISREG(dentry->d_inode->i_mode) ||
+ exe_file->f_path.mnt->mnt_flags & MNT_NOEXEC)
+ goto exit;
+
+ err = inode_permission(dentry->d_inode, MAY_EXEC);
+ if (err)
+ goto exit;
+
+ /*
+ * The symlink can be changed only once, just to disallow arbitrary
+ * transitions malicious software might bring in. This means one
+ * could make a snapshot over all processes running and monitor
+ * /proc/pid/exe changes to notice unusual activity if needed.
+ */
+ down_write(&mm->mmap_sem);
+ if (likely(!mm->exe_file))
+ set_mm_exe_file(mm, exe_file);
+ else
+ err = -EBUSY;
+ up_write(&mm->mmap_sem);
+
+exit:
+ fput(exe_file);
+ return err;
+}
+
static int prctl_set_mm(int opt, unsigned long addr,
unsigned long arg4, unsigned long arg5)
{
unsigned long rlim = rlimit(RLIMIT_DATA);
- unsigned long vm_req_flags;
- unsigned long vm_bad_flags;
- struct vm_area_struct *vma;
- int error = 0;
struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int error;
- if (arg4 | arg5)
+ if (arg5 || (arg4 && opt != PR_SET_MM_AUXV))
return -EINVAL;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
+ if (opt == PR_SET_MM_EXE_FILE)
+ return prctl_set_mm_exe_file(mm, (unsigned int)addr);
+
if (addr >= TASK_SIZE)
return -EINVAL;
+ error = -EINVAL;
+
down_read(&mm->mmap_sem);
vma = find_vma(mm, addr);
- if (opt != PR_SET_MM_START_BRK && opt != PR_SET_MM_BRK) {
- /* It must be existing VMA */
- if (!vma || vma->vm_start > addr)
- goto out;
- }
-
- error = -EINVAL;
switch (opt) {
case PR_SET_MM_START_CODE:
+ mm->start_code = addr;
+ break;
case PR_SET_MM_END_CODE:
- vm_req_flags = VM_READ | VM_EXEC;
- vm_bad_flags = VM_WRITE | VM_MAYSHARE;
-
- if ((vma->vm_flags & vm_req_flags) != vm_req_flags ||
- (vma->vm_flags & vm_bad_flags))
- goto out;
-
- if (opt == PR_SET_MM_START_CODE)
- mm->start_code = addr;
- else
- mm->end_code = addr;
+ mm->end_code = addr;
break;
-
case PR_SET_MM_START_DATA:
- case PR_SET_MM_END_DATA:
- vm_req_flags = VM_READ | VM_WRITE;
- vm_bad_flags = VM_EXEC | VM_MAYSHARE;
-
- if ((vma->vm_flags & vm_req_flags) != vm_req_flags ||
- (vma->vm_flags & vm_bad_flags))
- goto out;
-
- if (opt == PR_SET_MM_START_DATA)
- mm->start_data = addr;
- else
- mm->end_data = addr;
+ mm->start_data = addr;
break;
-
- case PR_SET_MM_START_STACK:
-
-#ifdef CONFIG_STACK_GROWSUP
- vm_req_flags = VM_READ | VM_WRITE | VM_GROWSUP;
-#else
- vm_req_flags = VM_READ | VM_WRITE | VM_GROWSDOWN;
-#endif
- if ((vma->vm_flags & vm_req_flags) != vm_req_flags)
- goto out;
-
- mm->start_stack = addr;
+ case PR_SET_MM_END_DATA:
+ mm->end_data = addr;
break;
case PR_SET_MM_START_BRK:
@@ -1881,16 +1908,77 @@ static int prctl_set_mm(int opt, unsigned long addr,
mm->brk = addr;
break;
+ /*
+ * If command line arguments and environment
+ * are placed somewhere else on stack, we can
+ * set them up here, ARG_START/END to setup
+ * command line argumets and ENV_START/END
+ * for environment.
+ */
+ case PR_SET_MM_START_STACK:
+ case PR_SET_MM_ARG_START:
+ case PR_SET_MM_ARG_END:
+ case PR_SET_MM_ENV_START:
+ case PR_SET_MM_ENV_END:
+ if (!vma) {
+ error = -EFAULT;
+ goto out;
+ }
+#ifdef CONFIG_STACK_GROWSUP
+ if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSUP, 0))
+#else
+ if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSDOWN, 0))
+#endif
+ goto out;
+ if (opt == PR_SET_MM_START_STACK)
+ mm->start_stack = addr;
+ else if (opt == PR_SET_MM_ARG_START)
+ mm->arg_start = addr;
+ else if (opt == PR_SET_MM_ARG_END)
+ mm->arg_end = addr;
+ else if (opt == PR_SET_MM_ENV_START)
+ mm->env_start = addr;
+ else if (opt == PR_SET_MM_ENV_END)
+ mm->env_end = addr;
+ break;
+
+ /*
+ * This doesn't move auxiliary vector itself
+ * since it's pinned to mm_struct, but allow
+ * to fill vector with new values. It's up
+ * to a caller to provide sane values here
+ * otherwise user space tools which use this
+ * vector might be unhappy.
+ */
+ case PR_SET_MM_AUXV: {
+ unsigned long user_auxv[AT_VECTOR_SIZE];
+
+ if (arg4 > sizeof(user_auxv))
+ goto out;
+ up_read(&mm->mmap_sem);
+
+ if (copy_from_user(user_auxv, (const void __user *)addr, arg4))
+ return -EFAULT;
+
+ /* Make sure the last entry is always AT_NULL */
+ user_auxv[AT_VECTOR_SIZE - 2] = 0;
+ user_auxv[AT_VECTOR_SIZE - 1] = 0;
+
+ BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
+
+ task_lock(current);
+ memcpy(mm->saved_auxv, user_auxv, arg4);
+ task_unlock(current);
+
+ return 0;
+ }
default:
- error = -EINVAL;
goto out;
}
error = 0;
-
out:
up_read(&mm->mmap_sem);
-
return error;
}
#else /* CONFIG_CHECKPOINT_RESTORE */
@@ -2114,7 +2202,6 @@ int orderly_poweroff(bool force)
NULL
};
int ret = -ENOMEM;
- struct subprocess_info *info;
if (argv == NULL) {
printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
@@ -2122,18 +2209,16 @@ int orderly_poweroff(bool force)
goto out;
}
- info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC);
- if (info == NULL) {
- argv_free(argv);
- goto out;
- }
-
- call_usermodehelper_setfns(info, NULL, argv_cleanup, NULL);
+ ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_NO_WAIT,
+ NULL, argv_cleanup, NULL);
+out:
+ if (likely(!ret))
+ return 0;
- ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
+ if (ret == -ENOMEM)
+ argv_free(argv);
- out:
- if (ret && force) {
+ if (force) {
printk(KERN_WARNING "Failed to start orderly shutdown: "
"forcing the issue\n");
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 47bfa16430d7..dbff751e4086 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -203,3 +203,6 @@ cond_syscall(sys_fanotify_mark);
cond_syscall(sys_name_to_handle_at);
cond_syscall(sys_open_by_handle_at);
cond_syscall(compat_sys_open_by_handle_at);
+
+/* compare kernel pointers */
+cond_syscall(sys_kcmp);
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index a20dc8a3c949..fd42bd452b75 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -2,6 +2,55 @@
# Timer subsystem related configuration options
#
+# Options selectable by arch Kconfig
+
+# Watchdog function for clocksources to detect instabilities
+config CLOCKSOURCE_WATCHDOG
+ bool
+
+# Architecture has extra clocksource data
+config ARCH_CLOCKSOURCE_DATA
+ bool
+
+# Timekeeping vsyscall support
+config GENERIC_TIME_VSYSCALL
+ bool
+
+# ktime_t scalar 64bit nsec representation
+config KTIME_SCALAR
+ bool
+
+# Old style timekeeping
+config ARCH_USES_GETTIMEOFFSET
+ bool
+
+# The generic clock events infrastructure
+config GENERIC_CLOCKEVENTS
+ bool
+
+# Migration helper. Builds, but does not invoke
+config GENERIC_CLOCKEVENTS_BUILD
+ bool
+ default y
+ depends on GENERIC_CLOCKEVENTS
+
+# Clockevents broadcasting infrastructure
+config GENERIC_CLOCKEVENTS_BROADCAST
+ bool
+ depends on GENERIC_CLOCKEVENTS
+
+# Automatically adjust the min. reprogramming time for
+# clock event device
+config GENERIC_CLOCKEVENTS_MIN_ADJUST
+ bool
+
+# Generic update of CMOS clock
+config GENERIC_CMOS_UPDATE
+ bool
+
+if GENERIC_CLOCKEVENTS
+menu "Timers subsystem"
+
# Core internal switch. Selected by NO_HZ / HIGH_RES_TIMERS. This is
# only related to the tick functionality. Oneshot clockevent devices
# are supported independ of this.
@@ -26,10 +75,5 @@ config HIGH_RES_TIMERS
hardware is not capable then this option only increases
the size of the kernel image.
-config GENERIC_CLOCKEVENTS_BUILD
- bool
- default y
- depends on GENERIC_CLOCKEVENTS
-
-config GENERIC_CLOCKEVENTS_MIN_ADJUST
- bool
+endmenu
+endif
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index f03fd83b170b..70b33abcc7bb 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -412,6 +412,7 @@ int second_overflow(unsigned long secs)
if (secs % 86400 == 0) {
leap = -1;
time_state = TIME_OOP;
+ time_tai++;
printk(KERN_NOTICE
"Clock: inserting leap second 23:59:60 UTC\n");
}
@@ -426,7 +427,6 @@ int second_overflow(unsigned long secs)
}
break;
case TIME_OOP:
- time_tai++;
time_state = TIME_WAIT;
break;
@@ -473,8 +473,6 @@ int second_overflow(unsigned long secs)
<< NTP_SCALE_SHIFT;
time_adjust = 0;
-
-
out:
spin_unlock_irqrestore(&ntp_lock, flags);
@@ -559,10 +557,10 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
/* only set allowed bits */
time_status &= STA_RONLY;
time_status |= txc->status & ~STA_RONLY;
-
}
+
/*
- * Called with the xtime lock held, so we can access and modify
+ * Called with ntp_lock held, so we can access and modify
* all the global NTP state:
*/
static inline void process_adjtimex_modes(struct timex *txc, struct timespec *ts)
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d66b21308f7c..6e46cacf5969 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -240,7 +240,6 @@ void getnstimeofday(struct timespec *ts)
timespec_add_ns(ts, nsecs);
}
-
EXPORT_SYMBOL(getnstimeofday);
ktime_t ktime_get(void)
@@ -357,8 +356,8 @@ void do_gettimeofday(struct timeval *tv)
tv->tv_sec = now.tv_sec;
tv->tv_usec = now.tv_nsec/1000;
}
-
EXPORT_SYMBOL(do_gettimeofday);
+
/**
* do_settimeofday - Sets the time of day
* @tv: pointer to the timespec variable containing the new time
@@ -392,7 +391,6 @@ int do_settimeofday(const struct timespec *tv)
return 0;
}
-
EXPORT_SYMBOL(do_settimeofday);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index f347ac91292d..8c4c07071cc5 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -372,6 +372,7 @@ config KPROBE_EVENT
depends on HAVE_REGS_AND_STACK_ACCESS_API
bool "Enable kprobes-based dynamic events"
select TRACING
+ select PROBE_EVENTS
default y
help
This allows the user to add tracing events (similar to tracepoints)
@@ -384,6 +385,25 @@ config KPROBE_EVENT
This option is also required by perf-probe subcommand of perf tools.
If you want to use perf tools, this option is strongly recommended.
+config UPROBE_EVENT
+ bool "Enable uprobes-based dynamic events"
+ depends on ARCH_SUPPORTS_UPROBES
+ depends on MMU
+ select UPROBES
+ select PROBE_EVENTS
+ select TRACING
+ default n
+ help
+ This allows the user to add tracing events on top of userspace
+ dynamic events (similar to tracepoints) on the fly via the trace
+ events interface. Those events can be inserted wherever uprobes
+ can probe, and record various registers.
+ This option is required if you plan to use perf-probe subcommand
+ of perf tools on user space applications.
+
+config PROBE_EVENTS
+ def_bool n
+
config DYNAMIC_FTRACE
bool "enable/disable ftrace tracepoints dynamically"
depends on FUNCTION_TRACER
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index b3afe0e76f79..b831087c8200 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -60,5 +60,7 @@ endif
ifeq ($(CONFIG_TRACING),y)
obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
endif
+obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
+obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o
libftrace-y := ftrace.o
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6420cda62336..1d0f6a8a0e5e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1486,6 +1486,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
if (!buffer)
return size;
+ /* Make sure the requested buffer exists */
+ if (cpu_id != RING_BUFFER_ALL_CPUS &&
+ !cpumask_test_cpu(cpu_id, buffer->cpumask))
+ return size;
+
size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
size *= BUF_PAGE_SIZE;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6c6f7933eede..5aec220d2de0 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -103,6 +103,11 @@ struct kretprobe_trace_entry_head {
unsigned long ret_ip;
};
+struct uprobe_trace_entry_head {
+ struct trace_entry ent;
+ unsigned long ip;
+};
+
/*
* trace_flag_type is an enumeration that holds different
* states when a trace occurs. These are:
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 580a05ec926b..b31d3d5699fe 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -19,547 +19,15 @@
#include <linux/module.h>
#include <linux/uaccess.h>
-#include <linux/kprobes.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-#include <linux/debugfs.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/ctype.h>
-#include <linux/ptrace.h>
-#include <linux/perf_event.h>
-#include <linux/stringify.h>
-#include <linux/limits.h>
-#include <asm/bitsperlong.h>
-
-#include "trace.h"
-#include "trace_output.h"
-
-#define MAX_TRACE_ARGS 128
-#define MAX_ARGSTR_LEN 63
-#define MAX_EVENT_NAME_LEN 64
-#define MAX_STRING_SIZE PATH_MAX
-#define KPROBE_EVENT_SYSTEM "kprobes"
-
-/* Reserved field names */
-#define FIELD_STRING_IP "__probe_ip"
-#define FIELD_STRING_RETIP "__probe_ret_ip"
-#define FIELD_STRING_FUNC "__probe_func"
-
-const char *reserved_field_names[] = {
- "common_type",
- "common_flags",
- "common_preempt_count",
- "common_pid",
- "common_tgid",
- FIELD_STRING_IP,
- FIELD_STRING_RETIP,
- FIELD_STRING_FUNC,
-};
-
-/* Printing function type */
-typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *,
- void *);
-#define PRINT_TYPE_FUNC_NAME(type) print_type_##type
-#define PRINT_TYPE_FMT_NAME(type) print_type_format_##type
-
-/* Printing in basic type function template */
-#define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt, cast) \
-static __kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \
- const char *name, \
- void *data, void *ent)\
-{ \
- return trace_seq_printf(s, " %s=" fmt, name, (cast)*(type *)data);\
-} \
-static const char PRINT_TYPE_FMT_NAME(type)[] = fmt;
-
-DEFINE_BASIC_PRINT_TYPE_FUNC(u8, "%x", unsigned int)
-DEFINE_BASIC_PRINT_TYPE_FUNC(u16, "%x", unsigned int)
-DEFINE_BASIC_PRINT_TYPE_FUNC(u32, "%lx", unsigned long)
-DEFINE_BASIC_PRINT_TYPE_FUNC(u64, "%llx", unsigned long long)
-DEFINE_BASIC_PRINT_TYPE_FUNC(s8, "%d", int)
-DEFINE_BASIC_PRINT_TYPE_FUNC(s16, "%d", int)
-DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%ld", long)
-DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%lld", long long)
-
-/* data_rloc: data relative location, compatible with u32 */
-#define make_data_rloc(len, roffs) \
- (((u32)(len) << 16) | ((u32)(roffs) & 0xffff))
-#define get_rloc_len(dl) ((u32)(dl) >> 16)
-#define get_rloc_offs(dl) ((u32)(dl) & 0xffff)
-
-static inline void *get_rloc_data(u32 *dl)
-{
- return (u8 *)dl + get_rloc_offs(*dl);
-}
-
-/* For data_loc conversion */
-static inline void *get_loc_data(u32 *dl, void *ent)
-{
- return (u8 *)ent + get_rloc_offs(*dl);
-}
-
-/*
- * Convert data_rloc to data_loc:
- * data_rloc stores the offset from data_rloc itself, but data_loc
- * stores the offset from event entry.
- */
-#define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs))
-
-/* For defining macros, define string/string_size types */
-typedef u32 string;
-typedef u32 string_size;
-
-/* Print type function for string type */
-static __kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s,
- const char *name,
- void *data, void *ent)
-{
- int len = *(u32 *)data >> 16;
-
- if (!len)
- return trace_seq_printf(s, " %s=(fault)", name);
- else
- return trace_seq_printf(s, " %s=\"%s\"", name,
- (const char *)get_loc_data(data, ent));
-}
-static const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\"";
-
-/* Data fetch function type */
-typedef void (*fetch_func_t)(struct pt_regs *, void *, void *);
-
-struct fetch_param {
- fetch_func_t fn;
- void *data;
-};
-
-static __kprobes void call_fetch(struct fetch_param *fprm,
- struct pt_regs *regs, void *dest)
-{
- return fprm->fn(regs, fprm->data, dest);
-}
-
-#define FETCH_FUNC_NAME(method, type) fetch_##method##_##type
-/*
- * Define macro for basic types - we don't need to define s* types, because
- * we have to care only about bitwidth at recording time.
- */
-#define DEFINE_BASIC_FETCH_FUNCS(method) \
-DEFINE_FETCH_##method(u8) \
-DEFINE_FETCH_##method(u16) \
-DEFINE_FETCH_##method(u32) \
-DEFINE_FETCH_##method(u64)
-
-#define CHECK_FETCH_FUNCS(method, fn) \
- (((FETCH_FUNC_NAME(method, u8) == fn) || \
- (FETCH_FUNC_NAME(method, u16) == fn) || \
- (FETCH_FUNC_NAME(method, u32) == fn) || \
- (FETCH_FUNC_NAME(method, u64) == fn) || \
- (FETCH_FUNC_NAME(method, string) == fn) || \
- (FETCH_FUNC_NAME(method, string_size) == fn)) \
- && (fn != NULL))
-
-/* Data fetch function templates */
-#define DEFINE_FETCH_reg(type) \
-static __kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \
- void *offset, void *dest) \
-{ \
- *(type *)dest = (type)regs_get_register(regs, \
- (unsigned int)((unsigned long)offset)); \
-}
-DEFINE_BASIC_FETCH_FUNCS(reg)
-/* No string on the register */
-#define fetch_reg_string NULL
-#define fetch_reg_string_size NULL
-
-#define DEFINE_FETCH_stack(type) \
-static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\
- void *offset, void *dest) \
-{ \
- *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \
- (unsigned int)((unsigned long)offset)); \
-}
-DEFINE_BASIC_FETCH_FUNCS(stack)
-/* No string on the stack entry */
-#define fetch_stack_string NULL
-#define fetch_stack_string_size NULL
-
-#define DEFINE_FETCH_retval(type) \
-static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\
- void *dummy, void *dest) \
-{ \
- *(type *)dest = (type)regs_return_value(regs); \
-}
-DEFINE_BASIC_FETCH_FUNCS(retval)
-/* No string on the retval */
-#define fetch_retval_string NULL
-#define fetch_retval_string_size NULL
-
-#define DEFINE_FETCH_memory(type) \
-static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
- void *addr, void *dest) \
-{ \
- type retval; \
- if (probe_kernel_address(addr, retval)) \
- *(type *)dest = 0; \
- else \
- *(type *)dest = retval; \
-}
-DEFINE_BASIC_FETCH_FUNCS(memory)
-/*
- * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
- * length and relative data location.
- */
-static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
- void *addr, void *dest)
-{
- long ret;
- int maxlen = get_rloc_len(*(u32 *)dest);
- u8 *dst = get_rloc_data(dest);
- u8 *src = addr;
- mm_segment_t old_fs = get_fs();
- if (!maxlen)
- return;
- /*
- * Try to get string again, since the string can be changed while
- * probing.
- */
- set_fs(KERNEL_DS);
- pagefault_disable();
- do
- ret = __copy_from_user_inatomic(dst++, src++, 1);
- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
- dst[-1] = '\0';
- pagefault_enable();
- set_fs(old_fs);
-
- if (ret < 0) { /* Failed to fetch string */
- ((u8 *)get_rloc_data(dest))[0] = '\0';
- *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
- } else
- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
- get_rloc_offs(*(u32 *)dest));
-}
-/* Return the length of string -- including null terminal byte */
-static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
- void *addr, void *dest)
-{
- int ret, len = 0;
- u8 c;
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- pagefault_disable();
- do {
- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
- len++;
- } while (c && ret == 0 && len < MAX_STRING_SIZE);
- pagefault_enable();
- set_fs(old_fs);
-
- if (ret < 0) /* Failed to check the length */
- *(u32 *)dest = 0;
- else
- *(u32 *)dest = len;
-}
-
-/* Memory fetching by symbol */
-struct symbol_cache {
- char *symbol;
- long offset;
- unsigned long addr;
-};
-
-static unsigned long update_symbol_cache(struct symbol_cache *sc)
-{
- sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
- if (sc->addr)
- sc->addr += sc->offset;
- return sc->addr;
-}
-
-static void free_symbol_cache(struct symbol_cache *sc)
-{
- kfree(sc->symbol);
- kfree(sc);
-}
-
-static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
-{
- struct symbol_cache *sc;
-
- if (!sym || strlen(sym) == 0)
- return NULL;
- sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
- if (!sc)
- return NULL;
-
- sc->symbol = kstrdup(sym, GFP_KERNEL);
- if (!sc->symbol) {
- kfree(sc);
- return NULL;
- }
- sc->offset = offset;
- update_symbol_cache(sc);
- return sc;
-}
-
-#define DEFINE_FETCH_symbol(type) \
-static __kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs,\
- void *data, void *dest) \
-{ \
- struct symbol_cache *sc = data; \
- if (sc->addr) \
- fetch_memory_##type(regs, (void *)sc->addr, dest); \
- else \
- *(type *)dest = 0; \
-}
-DEFINE_BASIC_FETCH_FUNCS(symbol)
-DEFINE_FETCH_symbol(string)
-DEFINE_FETCH_symbol(string_size)
-
-/* Dereference memory access function */
-struct deref_fetch_param {
- struct fetch_param orig;
- long offset;
-};
-
-#define DEFINE_FETCH_deref(type) \
-static __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs,\
- void *data, void *dest) \
-{ \
- struct deref_fetch_param *dprm = data; \
- unsigned long addr; \
- call_fetch(&dprm->orig, regs, &addr); \
- if (addr) { \
- addr += dprm->offset; \
- fetch_memory_##type(regs, (void *)addr, dest); \
- } else \
- *(type *)dest = 0; \
-}
-DEFINE_BASIC_FETCH_FUNCS(deref)
-DEFINE_FETCH_deref(string)
-DEFINE_FETCH_deref(string_size)
-
-static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data)
-{
- if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
- update_deref_fetch_param(data->orig.data);
- else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
- update_symbol_cache(data->orig.data);
-}
-
-static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data)
-{
- if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
- free_deref_fetch_param(data->orig.data);
- else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
- free_symbol_cache(data->orig.data);
- kfree(data);
-}
-
-/* Bitfield fetch function */
-struct bitfield_fetch_param {
- struct fetch_param orig;
- unsigned char hi_shift;
- unsigned char low_shift;
-};
+#include "trace_probe.h"
-#define DEFINE_FETCH_bitfield(type) \
-static __kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs,\
- void *data, void *dest) \
-{ \
- struct bitfield_fetch_param *bprm = data; \
- type buf = 0; \
- call_fetch(&bprm->orig, regs, &buf); \
- if (buf) { \
- buf <<= bprm->hi_shift; \
- buf >>= bprm->low_shift; \
- } \
- *(type *)dest = buf; \
-}
-DEFINE_BASIC_FETCH_FUNCS(bitfield)
-#define fetch_bitfield_string NULL
-#define fetch_bitfield_string_size NULL
-
-static __kprobes void
-update_bitfield_fetch_param(struct bitfield_fetch_param *data)
-{
- /*
- * Don't check the bitfield itself, because this must be the
- * last fetch function.
- */
- if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
- update_deref_fetch_param(data->orig.data);
- else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
- update_symbol_cache(data->orig.data);
-}
-
-static __kprobes void
-free_bitfield_fetch_param(struct bitfield_fetch_param *data)
-{
- /*
- * Don't check the bitfield itself, because this must be the
- * last fetch function.
- */
- if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
- free_deref_fetch_param(data->orig.data);
- else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
- free_symbol_cache(data->orig.data);
- kfree(data);
-}
-
-/* Default (unsigned long) fetch type */
-#define __DEFAULT_FETCH_TYPE(t) u##t
-#define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t)
-#define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG)
-#define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE)
-
-/* Fetch types */
-enum {
- FETCH_MTD_reg = 0,
- FETCH_MTD_stack,
- FETCH_MTD_retval,
- FETCH_MTD_memory,
- FETCH_MTD_symbol,
- FETCH_MTD_deref,
- FETCH_MTD_bitfield,
- FETCH_MTD_END,
-};
-
-#define ASSIGN_FETCH_FUNC(method, type) \
- [FETCH_MTD_##method] = FETCH_FUNC_NAME(method, type)
-
-#define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \
- {.name = _name, \
- .size = _size, \
- .is_signed = sign, \
- .print = PRINT_TYPE_FUNC_NAME(ptype), \
- .fmt = PRINT_TYPE_FMT_NAME(ptype), \
- .fmttype = _fmttype, \
- .fetch = { \
-ASSIGN_FETCH_FUNC(reg, ftype), \
-ASSIGN_FETCH_FUNC(stack, ftype), \
-ASSIGN_FETCH_FUNC(retval, ftype), \
-ASSIGN_FETCH_FUNC(memory, ftype), \
-ASSIGN_FETCH_FUNC(symbol, ftype), \
-ASSIGN_FETCH_FUNC(deref, ftype), \
-ASSIGN_FETCH_FUNC(bitfield, ftype), \
- } \
- }
-
-#define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \
- __ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, #ptype)
-
-#define FETCH_TYPE_STRING 0
-#define FETCH_TYPE_STRSIZE 1
-
-/* Fetch type information table */
-static const struct fetch_type {
- const char *name; /* Name of type */
- size_t size; /* Byte size of type */
- int is_signed; /* Signed flag */
- print_type_func_t print; /* Print functions */
- const char *fmt; /* Fromat string */
- const char *fmttype; /* Name in format file */
- /* Fetch functions */
- fetch_func_t fetch[FETCH_MTD_END];
-} fetch_type_table[] = {
- /* Special types */
- [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
- sizeof(u32), 1, "__data_loc char[]"),
- [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
- string_size, sizeof(u32), 0, "u32"),
- /* Basic types */
- ASSIGN_FETCH_TYPE(u8, u8, 0),
- ASSIGN_FETCH_TYPE(u16, u16, 0),
- ASSIGN_FETCH_TYPE(u32, u32, 0),
- ASSIGN_FETCH_TYPE(u64, u64, 0),
- ASSIGN_FETCH_TYPE(s8, u8, 1),
- ASSIGN_FETCH_TYPE(s16, u16, 1),
- ASSIGN_FETCH_TYPE(s32, u32, 1),
- ASSIGN_FETCH_TYPE(s64, u64, 1),
-};
-
-static const struct fetch_type *find_fetch_type(const char *type)
-{
- int i;
-
- if (!type)
- type = DEFAULT_FETCH_TYPE_STR;
-
- /* Special case: bitfield */
- if (*type == 'b') {
- unsigned long bs;
- type = strchr(type, '/');
- if (!type)
- goto fail;
- type++;
- if (strict_strtoul(type, 0, &bs))
- goto fail;
- switch (bs) {
- case 8:
- return find_fetch_type("u8");
- case 16:
- return find_fetch_type("u16");
- case 32:
- return find_fetch_type("u32");
- case 64:
- return find_fetch_type("u64");
- default:
- goto fail;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(fetch_type_table); i++)
- if (strcmp(type, fetch_type_table[i].name) == 0)
- return &fetch_type_table[i];
-fail:
- return NULL;
-}
-
-/* Special function : only accept unsigned long */
-static __kprobes void fetch_stack_address(struct pt_regs *regs,
- void *dummy, void *dest)
-{
- *(unsigned long *)dest = kernel_stack_pointer(regs);
-}
-
-static fetch_func_t get_fetch_size_function(const struct fetch_type *type,
- fetch_func_t orig_fn)
-{
- int i;
-
- if (type != &fetch_type_table[FETCH_TYPE_STRING])
- return NULL; /* Only string type needs size function */
- for (i = 0; i < FETCH_MTD_END; i++)
- if (type->fetch[i] == orig_fn)
- return fetch_type_table[FETCH_TYPE_STRSIZE].fetch[i];
-
- WARN_ON(1); /* This should not happen */
- return NULL;
-}
+#define KPROBE_EVENT_SYSTEM "kprobes"
/**
* Kprobe event core functions
*/
-struct probe_arg {
- struct fetch_param fetch;
- struct fetch_param fetch_size;
- unsigned int offset; /* Offset from argument entry */
- const char *name; /* Name of this argument */
- const char *comm; /* Command of this argument */
- const struct fetch_type *type; /* Type of this argument */
-};
-
-/* Flags for trace_probe */
-#define TP_FLAG_TRACE 1
-#define TP_FLAG_PROFILE 2
-#define TP_FLAG_REGISTERED 4
-
struct trace_probe {
struct list_head list;
struct kretprobe rp; /* Use rp.kp for kprobe use */
@@ -631,18 +99,6 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
struct pt_regs *regs);
-/* Check the name is good for event/group/fields */
-static int is_good_name(const char *name)
-{
- if (!isalpha(*name) && *name != '_')
- return 0;
- while (*++name != '\0') {
- if (!isalpha(*name) && !isdigit(*name) && *name != '_')
- return 0;
- }
- return 1;
-}
-
/*
* Allocate new trace_probe and initialize it (including kprobes).
*/
@@ -651,7 +107,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
void *addr,
const char *symbol,
unsigned long offs,
- int nargs, int is_return)
+ int nargs, bool is_return)
{
struct trace_probe *tp;
int ret = -ENOMEM;
@@ -702,34 +158,12 @@ error:
return ERR_PTR(ret);
}
-static void update_probe_arg(struct probe_arg *arg)
-{
- if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
- update_bitfield_fetch_param(arg->fetch.data);
- else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
- update_deref_fetch_param(arg->fetch.data);
- else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn))
- update_symbol_cache(arg->fetch.data);
-}
-
-static void free_probe_arg(struct probe_arg *arg)
-{
- if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
- free_bitfield_fetch_param(arg->fetch.data);
- else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
- free_deref_fetch_param(arg->fetch.data);
- else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn))
- free_symbol_cache(arg->fetch.data);
- kfree(arg->name);
- kfree(arg->comm);
-}
-
static void free_trace_probe(struct trace_probe *tp)
{
int i;
for (i = 0; i < tp->nr_args; i++)
- free_probe_arg(&tp->args[i]);
+ traceprobe_free_probe_arg(&tp->args[i]);
kfree(tp->call.class->system);
kfree(tp->call.name);
@@ -787,7 +221,7 @@ static int __register_trace_probe(struct trace_probe *tp)
return -EINVAL;
for (i = 0; i < tp->nr_args; i++)
- update_probe_arg(&tp->args[i]);
+ traceprobe_update_arg(&tp->args[i]);
/* Set/clear disabled flag according to tp->flag */
if (trace_probe_is_enabled(tp))
@@ -919,227 +353,6 @@ static struct notifier_block trace_probe_module_nb = {
.priority = 1 /* Invoked after kprobe module callback */
};
-/* Split symbol and offset. */
-static int split_symbol_offset(char *symbol, unsigned long *offset)
-{
- char *tmp;
- int ret;
-
- if (!offset)
- return -EINVAL;
-
- tmp = strchr(symbol, '+');
- if (tmp) {
- /* skip sign because strict_strtol doesn't accept '+' */
- ret = strict_strtoul(tmp + 1, 0, offset);
- if (ret)
- return ret;
- *tmp = '\0';
- } else
- *offset = 0;
- return 0;
-}
-
-#define PARAM_MAX_ARGS 16
-#define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
-
-static int parse_probe_vars(char *arg, const struct fetch_type *t,
- struct fetch_param *f, int is_return)
-{
- int ret = 0;
- unsigned long param;
-
- if (strcmp(arg, "retval") == 0) {
- if (is_return)
- f->fn = t->fetch[FETCH_MTD_retval];
- else
- ret = -EINVAL;
- } else if (strncmp(arg, "stack", 5) == 0) {
- if (arg[5] == '\0') {
- if (strcmp(t->name, DEFAULT_FETCH_TYPE_STR) == 0)
- f->fn = fetch_stack_address;
- else
- ret = -EINVAL;
- } else if (isdigit(arg[5])) {
- ret = strict_strtoul(arg + 5, 10, &param);
- if (ret || param > PARAM_MAX_STACK)
- ret = -EINVAL;
- else {
- f->fn = t->fetch[FETCH_MTD_stack];
- f->data = (void *)param;
- }
- } else
- ret = -EINVAL;
- } else
- ret = -EINVAL;
- return ret;
-}
-
-/* Recursive argument parser */
-static int __parse_probe_arg(char *arg, const struct fetch_type *t,
- struct fetch_param *f, int is_return)
-{
- int ret = 0;
- unsigned long param;
- long offset;
- char *tmp;
-
- switch (arg[0]) {
- case '$':
- ret = parse_probe_vars(arg + 1, t, f, is_return);
- break;
- case '%': /* named register */
- ret = regs_query_register_offset(arg + 1);
- if (ret >= 0) {
- f->fn = t->fetch[FETCH_MTD_reg];
- f->data = (void *)(unsigned long)ret;
- ret = 0;
- }
- break;
- case '@': /* memory or symbol */
- if (isdigit(arg[1])) {
- ret = strict_strtoul(arg + 1, 0, &param);
- if (ret)
- break;
- f->fn = t->fetch[FETCH_MTD_memory];
- f->data = (void *)param;
- } else {
- ret = split_symbol_offset(arg + 1, &offset);
- if (ret)
- break;
- f->data = alloc_symbol_cache(arg + 1, offset);
- if (f->data)
- f->fn = t->fetch[FETCH_MTD_symbol];
- }
- break;
- case '+': /* deref memory */
- arg++; /* Skip '+', because strict_strtol() rejects it. */
- case '-':
- tmp = strchr(arg, '(');
- if (!tmp)
- break;
- *tmp = '\0';
- ret = strict_strtol(arg, 0, &offset);
- if (ret)
- break;
- arg = tmp + 1;
- tmp = strrchr(arg, ')');
- if (tmp) {
- struct deref_fetch_param *dprm;
- const struct fetch_type *t2 = find_fetch_type(NULL);
- *tmp = '\0';
- dprm = kzalloc(sizeof(struct deref_fetch_param),
- GFP_KERNEL);
- if (!dprm)
- return -ENOMEM;
- dprm->offset = offset;
- ret = __parse_probe_arg(arg, t2, &dprm->orig,
- is_return);
- if (ret)
- kfree(dprm);
- else {
- f->fn = t->fetch[FETCH_MTD_deref];
- f->data = (void *)dprm;
- }
- }
- break;
- }
- if (!ret && !f->fn) { /* Parsed, but do not find fetch method */
- pr_info("%s type has no corresponding fetch method.\n",
- t->name);
- ret = -EINVAL;
- }
- return ret;
-}
-
-#define BYTES_TO_BITS(nb) ((BITS_PER_LONG * (nb)) / sizeof(long))
-
-/* Bitfield type needs to be parsed into a fetch function */
-static int __parse_bitfield_probe_arg(const char *bf,
- const struct fetch_type *t,
- struct fetch_param *f)
-{
- struct bitfield_fetch_param *bprm;
- unsigned long bw, bo;
- char *tail;
-
- if (*bf != 'b')
- return 0;
-
- bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
- if (!bprm)
- return -ENOMEM;
- bprm->orig = *f;
- f->fn = t->fetch[FETCH_MTD_bitfield];
- f->data = (void *)bprm;
-
- bw = simple_strtoul(bf + 1, &tail, 0); /* Use simple one */
- if (bw == 0 || *tail != '@')
- return -EINVAL;
-
- bf = tail + 1;
- bo = simple_strtoul(bf, &tail, 0);
- if (tail == bf || *tail != '/')
- return -EINVAL;
-
- bprm->hi_shift = BYTES_TO_BITS(t->size) - (bw + bo);
- bprm->low_shift = bprm->hi_shift + bo;
- return (BYTES_TO_BITS(t->size) < (bw + bo)) ? -EINVAL : 0;
-}
-
-/* String length checking wrapper */
-static int parse_probe_arg(char *arg, struct trace_probe *tp,
- struct probe_arg *parg, int is_return)
-{
- const char *t;
- int ret;
-
- if (strlen(arg) > MAX_ARGSTR_LEN) {
- pr_info("Argument is too long.: %s\n", arg);
- return -ENOSPC;
- }
- parg->comm = kstrdup(arg, GFP_KERNEL);
- if (!parg->comm) {
- pr_info("Failed to allocate memory for command '%s'.\n", arg);
- return -ENOMEM;
- }
- t = strchr(parg->comm, ':');
- if (t) {
- arg[t - parg->comm] = '\0';
- t++;
- }
- parg->type = find_fetch_type(t);
- if (!parg->type) {
- pr_info("Unsupported type: %s\n", t);
- return -EINVAL;
- }
- parg->offset = tp->size;
- tp->size += parg->type->size;
- ret = __parse_probe_arg(arg, parg->type, &parg->fetch, is_return);
- if (ret >= 0 && t != NULL)
- ret = __parse_bitfield_probe_arg(t, parg->type, &parg->fetch);
- if (ret >= 0) {
- parg->fetch_size.fn = get_fetch_size_function(parg->type,
- parg->fetch.fn);
- parg->fetch_size.data = parg->fetch.data;
- }
- return ret;
-}
-
-/* Return 1 if name is reserved or already used by another argument */
-static int conflict_field_name(const char *name,
- struct probe_arg *args, int narg)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++)
- if (strcmp(reserved_field_names[i], name) == 0)
- return 1;
- for (i = 0; i < narg; i++)
- if (strcmp(args[i].name, name) == 0)
- return 1;
- return 0;
-}
-
static int create_trace_probe(int argc, char **argv)
{
/*
@@ -1162,7 +375,7 @@ static int create_trace_probe(int argc, char **argv)
*/
struct trace_probe *tp;
int i, ret = 0;
- int is_return = 0, is_delete = 0;
+ bool is_return = false, is_delete = false;
char *symbol = NULL, *event = NULL, *group = NULL;
char *arg;
unsigned long offset = 0;
@@ -1171,11 +384,11 @@ static int create_trace_probe(int argc, char **argv)
/* argc must be >= 1 */
if (argv[0][0] == 'p')
- is_return = 0;
+ is_return = false;
else if (argv[0][0] == 'r')
- is_return = 1;
+ is_return = true;
else if (argv[0][0] == '-')
- is_delete = 1;
+ is_delete = true;
else {
pr_info("Probe definition must be started with 'p', 'r' or"
" '-'.\n");
@@ -1240,7 +453,7 @@ static int create_trace_probe(int argc, char **argv)
/* a symbol specified */
symbol = argv[1];
/* TODO: support .init module functions */
- ret = split_symbol_offset(symbol, &offset);
+ ret = traceprobe_split_symbol_offset(symbol, &offset);
if (ret) {
pr_info("Failed to parse symbol.\n");
return ret;
@@ -1302,7 +515,8 @@ static int create_trace_probe(int argc, char **argv)
goto error;
}
- if (conflict_field_name(tp->args[i].name, tp->args, i)) {
+ if (traceprobe_conflict_field_name(tp->args[i].name,
+ tp->args, i)) {
pr_info("Argument[%d] name '%s' conflicts with "
"another field.\n", i, argv[i]);
ret = -EINVAL;
@@ -1310,7 +524,8 @@ static int create_trace_probe(int argc, char **argv)
}
/* Parse fetch argument */
- ret = parse_probe_arg(arg, tp, &tp->args[i], is_return);
+ ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i],
+ is_return, true);
if (ret) {
pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
goto error;
@@ -1412,70 +627,11 @@ static int probes_open(struct inode *inode, struct file *file)
return seq_open(file, &probes_seq_op);
}
-static int command_trace_probe(const char *buf)
-{
- char **argv;
- int argc = 0, ret = 0;
-
- argv = argv_split(GFP_KERNEL, buf, &argc);
- if (!argv)
- return -ENOMEM;
-
- if (argc)
- ret = create_trace_probe(argc, argv);
-
- argv_free(argv);
- return ret;
-}
-
-#define WRITE_BUFSIZE 4096
-
static ssize_t probes_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
- char *kbuf, *tmp;
- int ret;
- size_t done;
- size_t size;
-
- kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
-
- ret = done = 0;
- while (done < count) {
- size = count - done;
- if (size >= WRITE_BUFSIZE)
- size = WRITE_BUFSIZE - 1;
- if (copy_from_user(kbuf, buffer + done, size)) {
- ret = -EFAULT;
- goto out;
- }
- kbuf[size] = '\0';
- tmp = strchr(kbuf, '\n');
- if (tmp) {
- *tmp = '\0';
- size = tmp - kbuf + 1;
- } else if (done + size < count) {
- pr_warning("Line length is too long: "
- "Should be less than %d.", WRITE_BUFSIZE);
- ret = -EINVAL;
- goto out;
- }
- done += size;
- /* Remove comments */
- tmp = strchr(kbuf, '#');
- if (tmp)
- *tmp = '\0';
-
- ret = command_trace_probe(kbuf);
- if (ret)
- goto out;
- }
- ret = done;
-out:
- kfree(kbuf);
- return ret;
+ return traceprobe_probes_write(file, buffer, count, ppos,
+ create_trace_probe);
}
static const struct file_operations kprobe_events_ops = {
@@ -1711,16 +867,6 @@ partial:
return TRACE_TYPE_PARTIAL_LINE;
}
-#undef DEFINE_FIELD
-#define DEFINE_FIELD(type, item, name, is_signed) \
- do { \
- ret = trace_define_field(event_call, #type, name, \
- offsetof(typeof(field), item), \
- sizeof(field.item), is_signed, \
- FILTER_OTHER); \
- if (ret) \
- return ret; \
- } while (0)
static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
{
@@ -2051,8 +1197,9 @@ static __init int kprobe_trace_self_tests_init(void)
pr_info("Testing kprobe tracing: ");
- ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
- "$stack $stack0 +0($stack)");
+ ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
+ "$stack $stack0 +0($stack)",
+ create_trace_probe);
if (WARN_ON_ONCE(ret)) {
pr_warning("error on probing function entry.\n");
warn++;
@@ -2066,8 +1213,8 @@ static __init int kprobe_trace_self_tests_init(void)
enable_trace_probe(tp, TP_FLAG_TRACE);
}
- ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
- "$retval");
+ ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
+ "$retval", create_trace_probe);
if (WARN_ON_ONCE(ret)) {
pr_warning("error on probing function return.\n");
warn++;
@@ -2101,13 +1248,13 @@ static __init int kprobe_trace_self_tests_init(void)
} else
disable_trace_probe(tp, TP_FLAG_TRACE);
- ret = command_trace_probe("-:testprobe");
+ ret = traceprobe_command("-:testprobe", create_trace_probe);
if (WARN_ON_ONCE(ret)) {
pr_warning("error on deleting a probe.\n");
warn++;
}
- ret = command_trace_probe("-:testprobe2");
+ ret = traceprobe_command("-:testprobe2", create_trace_probe);
if (WARN_ON_ONCE(ret)) {
pr_warning("error on deleting a probe.\n");
warn++;
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
new file mode 100644
index 000000000000..daa9980153af
--- /dev/null
+++ b/kernel/trace/trace_probe.c
@@ -0,0 +1,839 @@
+/*
+ * Common code for probe-based Dynamic events.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This code was copied from kernel/trace/trace_kprobe.c written by
+ * Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+ *
+ * Updates to make this generic:
+ * Copyright (C) IBM Corporation, 2010-2011
+ * Author: Srikar Dronamraju
+ */
+
+#include "trace_probe.h"
+
+const char *reserved_field_names[] = {
+ "common_type",
+ "common_flags",
+ "common_preempt_count",
+ "common_pid",
+ "common_tgid",
+ FIELD_STRING_IP,
+ FIELD_STRING_RETIP,
+ FIELD_STRING_FUNC,
+};
+
+/* Printing function type */
+#define PRINT_TYPE_FUNC_NAME(type) print_type_##type
+#define PRINT_TYPE_FMT_NAME(type) print_type_format_##type
+
+/* Printing in basic type function template */
+#define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt, cast) \
+static __kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \
+ const char *name, \
+ void *data, void *ent)\
+{ \
+ return trace_seq_printf(s, " %s=" fmt, name, (cast)*(type *)data);\
+} \
+static const char PRINT_TYPE_FMT_NAME(type)[] = fmt;
+
+DEFINE_BASIC_PRINT_TYPE_FUNC(u8, "%x", unsigned int)
+DEFINE_BASIC_PRINT_TYPE_FUNC(u16, "%x", unsigned int)
+DEFINE_BASIC_PRINT_TYPE_FUNC(u32, "%lx", unsigned long)
+DEFINE_BASIC_PRINT_TYPE_FUNC(u64, "%llx", unsigned long long)
+DEFINE_BASIC_PRINT_TYPE_FUNC(s8, "%d", int)
+DEFINE_BASIC_PRINT_TYPE_FUNC(s16, "%d", int)
+DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%ld", long)
+DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%lld", long long)
+
+static inline void *get_rloc_data(u32 *dl)
+{
+ return (u8 *)dl + get_rloc_offs(*dl);
+}
+
+/* For data_loc conversion */
+static inline void *get_loc_data(u32 *dl, void *ent)
+{
+ return (u8 *)ent + get_rloc_offs(*dl);
+}
+
+/* For defining macros, define string/string_size types */
+typedef u32 string;
+typedef u32 string_size;
+
+/* Print type function for string type */
+static __kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s,
+ const char *name,
+ void *data, void *ent)
+{
+ int len = *(u32 *)data >> 16;
+
+ if (!len)
+ return trace_seq_printf(s, " %s=(fault)", name);
+ else
+ return trace_seq_printf(s, " %s=\"%s\"", name,
+ (const char *)get_loc_data(data, ent));
+}
+
+static const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\"";
+
+#define FETCH_FUNC_NAME(method, type) fetch_##method##_##type
+/*
+ * Define macro for basic types - we don't need to define s* types, because
+ * we have to care only about bitwidth at recording time.
+ */
+#define DEFINE_BASIC_FETCH_FUNCS(method) \
+DEFINE_FETCH_##method(u8) \
+DEFINE_FETCH_##method(u16) \
+DEFINE_FETCH_##method(u32) \
+DEFINE_FETCH_##method(u64)
+
+#define CHECK_FETCH_FUNCS(method, fn) \
+ (((FETCH_FUNC_NAME(method, u8) == fn) || \
+ (FETCH_FUNC_NAME(method, u16) == fn) || \
+ (FETCH_FUNC_NAME(method, u32) == fn) || \
+ (FETCH_FUNC_NAME(method, u64) == fn) || \
+ (FETCH_FUNC_NAME(method, string) == fn) || \
+ (FETCH_FUNC_NAME(method, string_size) == fn)) \
+ && (fn != NULL))
+
+/* Data fetch function templates */
+#define DEFINE_FETCH_reg(type) \
+static __kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \
+ void *offset, void *dest) \
+{ \
+ *(type *)dest = (type)regs_get_register(regs, \
+ (unsigned int)((unsigned long)offset)); \
+}
+DEFINE_BASIC_FETCH_FUNCS(reg)
+/* No string on the register */
+#define fetch_reg_string NULL
+#define fetch_reg_string_size NULL
+
+#define DEFINE_FETCH_stack(type) \
+static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\
+ void *offset, void *dest) \
+{ \
+ *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \
+ (unsigned int)((unsigned long)offset)); \
+}
+DEFINE_BASIC_FETCH_FUNCS(stack)
+/* No string on the stack entry */
+#define fetch_stack_string NULL
+#define fetch_stack_string_size NULL
+
+#define DEFINE_FETCH_retval(type) \
+static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\
+ void *dummy, void *dest) \
+{ \
+ *(type *)dest = (type)regs_return_value(regs); \
+}
+DEFINE_BASIC_FETCH_FUNCS(retval)
+/* No string on the retval */
+#define fetch_retval_string NULL
+#define fetch_retval_string_size NULL
+
+#define DEFINE_FETCH_memory(type) \
+static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
+ void *addr, void *dest) \
+{ \
+ type retval; \
+ if (probe_kernel_address(addr, retval)) \
+ *(type *)dest = 0; \
+ else \
+ *(type *)dest = retval; \
+}
+DEFINE_BASIC_FETCH_FUNCS(memory)
+/*
+ * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
+ * length and relative data location.
+ */
+static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
+ void *addr, void *dest)
+{
+ long ret;
+ int maxlen = get_rloc_len(*(u32 *)dest);
+ u8 *dst = get_rloc_data(dest);
+ u8 *src = addr;
+ mm_segment_t old_fs = get_fs();
+
+ if (!maxlen)
+ return;
+
+ /*
+ * Try to get string again, since the string can be changed while
+ * probing.
+ */
+ set_fs(KERNEL_DS);
+ pagefault_disable();
+
+ do
+ ret = __copy_from_user_inatomic(dst++, src++, 1);
+ while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
+
+ dst[-1] = '\0';
+ pagefault_enable();
+ set_fs(old_fs);
+
+ if (ret < 0) { /* Failed to fetch string */
+ ((u8 *)get_rloc_data(dest))[0] = '\0';
+ *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
+ } else {
+ *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
+ get_rloc_offs(*(u32 *)dest));
+ }
+}
+
+/* Return the length of string -- including null terminal byte */
+static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
+ void *addr, void *dest)
+{
+ mm_segment_t old_fs;
+ int ret, len = 0;
+ u8 c;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ pagefault_disable();
+
+ do {
+ ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
+ len++;
+ } while (c && ret == 0 && len < MAX_STRING_SIZE);
+
+ pagefault_enable();
+ set_fs(old_fs);
+
+ if (ret < 0) /* Failed to check the length */
+ *(u32 *)dest = 0;
+ else
+ *(u32 *)dest = len;
+}
+
+/* Memory fetching by symbol */
+struct symbol_cache {
+ char *symbol;
+ long offset;
+ unsigned long addr;
+};
+
+static unsigned long update_symbol_cache(struct symbol_cache *sc)
+{
+ sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
+
+ if (sc->addr)
+ sc->addr += sc->offset;
+
+ return sc->addr;
+}
+
+static void free_symbol_cache(struct symbol_cache *sc)
+{
+ kfree(sc->symbol);
+ kfree(sc);
+}
+
+static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
+{
+ struct symbol_cache *sc;
+
+ if (!sym || strlen(sym) == 0)
+ return NULL;
+
+ sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
+ if (!sc)
+ return NULL;
+
+ sc->symbol = kstrdup(sym, GFP_KERNEL);
+ if (!sc->symbol) {
+ kfree(sc);
+ return NULL;
+ }
+ sc->offset = offset;
+ update_symbol_cache(sc);
+
+ return sc;
+}
+
+#define DEFINE_FETCH_symbol(type) \
+static __kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs,\
+ void *data, void *dest) \
+{ \
+ struct symbol_cache *sc = data; \
+ if (sc->addr) \
+ fetch_memory_##type(regs, (void *)sc->addr, dest); \
+ else \
+ *(type *)dest = 0; \
+}
+DEFINE_BASIC_FETCH_FUNCS(symbol)
+DEFINE_FETCH_symbol(string)
+DEFINE_FETCH_symbol(string_size)
+
+/* Dereference memory access function */
+struct deref_fetch_param {
+ struct fetch_param orig;
+ long offset;
+};
+
+#define DEFINE_FETCH_deref(type) \
+static __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs,\
+ void *data, void *dest) \
+{ \
+ struct deref_fetch_param *dprm = data; \
+ unsigned long addr; \
+ call_fetch(&dprm->orig, regs, &addr); \
+ if (addr) { \
+ addr += dprm->offset; \
+ fetch_memory_##type(regs, (void *)addr, dest); \
+ } else \
+ *(type *)dest = 0; \
+}
+DEFINE_BASIC_FETCH_FUNCS(deref)
+DEFINE_FETCH_deref(string)
+DEFINE_FETCH_deref(string_size)
+
+static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data)
+{
+ if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
+ update_deref_fetch_param(data->orig.data);
+ else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
+ update_symbol_cache(data->orig.data);
+}
+
+static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data)
+{
+ if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
+ free_deref_fetch_param(data->orig.data);
+ else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
+ free_symbol_cache(data->orig.data);
+ kfree(data);
+}
+
+/* Bitfield fetch function */
+struct bitfield_fetch_param {
+ struct fetch_param orig;
+ unsigned char hi_shift;
+ unsigned char low_shift;
+};
+
+#define DEFINE_FETCH_bitfield(type) \
+static __kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs,\
+ void *data, void *dest) \
+{ \
+ struct bitfield_fetch_param *bprm = data; \
+ type buf = 0; \
+ call_fetch(&bprm->orig, regs, &buf); \
+ if (buf) { \
+ buf <<= bprm->hi_shift; \
+ buf >>= bprm->low_shift; \
+ } \
+ *(type *)dest = buf; \
+}
+
+DEFINE_BASIC_FETCH_FUNCS(bitfield)
+#define fetch_bitfield_string NULL
+#define fetch_bitfield_string_size NULL
+
+static __kprobes void
+update_bitfield_fetch_param(struct bitfield_fetch_param *data)
+{
+ /*
+ * Don't check the bitfield itself, because this must be the
+ * last fetch function.
+ */
+ if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
+ update_deref_fetch_param(data->orig.data);
+ else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
+ update_symbol_cache(data->orig.data);
+}
+
+static __kprobes void
+free_bitfield_fetch_param(struct bitfield_fetch_param *data)
+{
+ /*
+ * Don't check the bitfield itself, because this must be the
+ * last fetch function.
+ */
+ if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
+ free_deref_fetch_param(data->orig.data);
+ else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
+ free_symbol_cache(data->orig.data);
+
+ kfree(data);
+}
+
+/* Default (unsigned long) fetch type */
+#define __DEFAULT_FETCH_TYPE(t) u##t
+#define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t)
+#define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG)
+#define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE)
+
+#define ASSIGN_FETCH_FUNC(method, type) \
+ [FETCH_MTD_##method] = FETCH_FUNC_NAME(method, type)
+
+#define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \
+ {.name = _name, \
+ .size = _size, \
+ .is_signed = sign, \
+ .print = PRINT_TYPE_FUNC_NAME(ptype), \
+ .fmt = PRINT_TYPE_FMT_NAME(ptype), \
+ .fmttype = _fmttype, \
+ .fetch = { \
+ASSIGN_FETCH_FUNC(reg, ftype), \
+ASSIGN_FETCH_FUNC(stack, ftype), \
+ASSIGN_FETCH_FUNC(retval, ftype), \
+ASSIGN_FETCH_FUNC(memory, ftype), \
+ASSIGN_FETCH_FUNC(symbol, ftype), \
+ASSIGN_FETCH_FUNC(deref, ftype), \
+ASSIGN_FETCH_FUNC(bitfield, ftype), \
+ } \
+ }
+
+#define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \
+ __ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, #ptype)
+
+#define FETCH_TYPE_STRING 0
+#define FETCH_TYPE_STRSIZE 1
+
+/* Fetch type information table */
+static const struct fetch_type fetch_type_table[] = {
+ /* Special types */
+ [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
+ sizeof(u32), 1, "__data_loc char[]"),
+ [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
+ string_size, sizeof(u32), 0, "u32"),
+ /* Basic types */
+ ASSIGN_FETCH_TYPE(u8, u8, 0),
+ ASSIGN_FETCH_TYPE(u16, u16, 0),
+ ASSIGN_FETCH_TYPE(u32, u32, 0),
+ ASSIGN_FETCH_TYPE(u64, u64, 0),
+ ASSIGN_FETCH_TYPE(s8, u8, 1),
+ ASSIGN_FETCH_TYPE(s16, u16, 1),
+ ASSIGN_FETCH_TYPE(s32, u32, 1),
+ ASSIGN_FETCH_TYPE(s64, u64, 1),
+};
+
+static const struct fetch_type *find_fetch_type(const char *type)
+{
+ int i;
+
+ if (!type)
+ type = DEFAULT_FETCH_TYPE_STR;
+
+ /* Special case: bitfield */
+ if (*type == 'b') {
+ unsigned long bs;
+
+ type = strchr(type, '/');
+ if (!type)
+ goto fail;
+
+ type++;
+ if (strict_strtoul(type, 0, &bs))
+ goto fail;
+
+ switch (bs) {
+ case 8:
+ return find_fetch_type("u8");
+ case 16:
+ return find_fetch_type("u16");
+ case 32:
+ return find_fetch_type("u32");
+ case 64:
+ return find_fetch_type("u64");
+ default:
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(fetch_type_table); i++)
+ if (strcmp(type, fetch_type_table[i].name) == 0)
+ return &fetch_type_table[i];
+
+fail:
+ return NULL;
+}
+
+/* Special function : only accept unsigned long */
+static __kprobes void fetch_stack_address(struct pt_regs *regs,
+ void *dummy, void *dest)
+{
+ *(unsigned long *)dest = kernel_stack_pointer(regs);
+}
+
+static fetch_func_t get_fetch_size_function(const struct fetch_type *type,
+ fetch_func_t orig_fn)
+{
+ int i;
+
+ if (type != &fetch_type_table[FETCH_TYPE_STRING])
+ return NULL; /* Only string type needs size function */
+
+ for (i = 0; i < FETCH_MTD_END; i++)
+ if (type->fetch[i] == orig_fn)
+ return fetch_type_table[FETCH_TYPE_STRSIZE].fetch[i];
+
+ WARN_ON(1); /* This should not happen */
+
+ return NULL;
+}
+
+/* Split symbol and offset. */
+int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
+{
+ char *tmp;
+ int ret;
+
+ if (!offset)
+ return -EINVAL;
+
+ tmp = strchr(symbol, '+');
+ if (tmp) {
+ /* skip sign because strict_strtol doesn't accept '+' */
+ ret = strict_strtoul(tmp + 1, 0, offset);
+ if (ret)
+ return ret;
+
+ *tmp = '\0';
+ } else
+ *offset = 0;
+
+ return 0;
+}
+
+#define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
+
+static int parse_probe_vars(char *arg, const struct fetch_type *t,
+ struct fetch_param *f, bool is_return)
+{
+ int ret = 0;
+ unsigned long param;
+
+ if (strcmp(arg, "retval") == 0) {
+ if (is_return)
+ f->fn = t->fetch[FETCH_MTD_retval];
+ else
+ ret = -EINVAL;
+ } else if (strncmp(arg, "stack", 5) == 0) {
+ if (arg[5] == '\0') {
+ if (strcmp(t->name, DEFAULT_FETCH_TYPE_STR) == 0)
+ f->fn = fetch_stack_address;
+ else
+ ret = -EINVAL;
+ } else if (isdigit(arg[5])) {
+ ret = strict_strtoul(arg + 5, 10, &param);
+ if (ret || param > PARAM_MAX_STACK)
+ ret = -EINVAL;
+ else {
+ f->fn = t->fetch[FETCH_MTD_stack];
+ f->data = (void *)param;
+ }
+ } else
+ ret = -EINVAL;
+ } else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+/* Recursive argument parser */
+static int parse_probe_arg(char *arg, const struct fetch_type *t,
+ struct fetch_param *f, bool is_return, bool is_kprobe)
+{
+ unsigned long param;
+ long offset;
+ char *tmp;
+ int ret;
+
+ ret = 0;
+
+ /* Until uprobe_events supports only reg arguments */
+ if (!is_kprobe && arg[0] != '%')
+ return -EINVAL;
+
+ switch (arg[0]) {
+ case '$':
+ ret = parse_probe_vars(arg + 1, t, f, is_return);
+ break;
+
+ case '%': /* named register */
+ ret = regs_query_register_offset(arg + 1);
+ if (ret >= 0) {
+ f->fn = t->fetch[FETCH_MTD_reg];
+ f->data = (void *)(unsigned long)ret;
+ ret = 0;
+ }
+ break;
+
+ case '@': /* memory or symbol */
+ if (isdigit(arg[1])) {
+ ret = strict_strtoul(arg + 1, 0, &param);
+ if (ret)
+ break;
+
+ f->fn = t->fetch[FETCH_MTD_memory];
+ f->data = (void *)param;
+ } else {
+ ret = traceprobe_split_symbol_offset(arg + 1, &offset);
+ if (ret)
+ break;
+
+ f->data = alloc_symbol_cache(arg + 1, offset);
+ if (f->data)
+ f->fn = t->fetch[FETCH_MTD_symbol];
+ }
+ break;
+
+ case '+': /* deref memory */
+ arg++; /* Skip '+', because strict_strtol() rejects it. */
+ case '-':
+ tmp = strchr(arg, '(');
+ if (!tmp)
+ break;
+
+ *tmp = '\0';
+ ret = strict_strtol(arg, 0, &offset);
+
+ if (ret)
+ break;
+
+ arg = tmp + 1;
+ tmp = strrchr(arg, ')');
+
+ if (tmp) {
+ struct deref_fetch_param *dprm;
+ const struct fetch_type *t2;
+
+ t2 = find_fetch_type(NULL);
+ *tmp = '\0';
+ dprm = kzalloc(sizeof(struct deref_fetch_param), GFP_KERNEL);
+
+ if (!dprm)
+ return -ENOMEM;
+
+ dprm->offset = offset;
+ ret = parse_probe_arg(arg, t2, &dprm->orig, is_return,
+ is_kprobe);
+ if (ret)
+ kfree(dprm);
+ else {
+ f->fn = t->fetch[FETCH_MTD_deref];
+ f->data = (void *)dprm;
+ }
+ }
+ break;
+ }
+ if (!ret && !f->fn) { /* Parsed, but do not find fetch method */
+ pr_info("%s type has no corresponding fetch method.\n", t->name);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+#define BYTES_TO_BITS(nb) ((BITS_PER_LONG * (nb)) / sizeof(long))
+
+/* Bitfield type needs to be parsed into a fetch function */
+static int __parse_bitfield_probe_arg(const char *bf,
+ const struct fetch_type *t,
+ struct fetch_param *f)
+{
+ struct bitfield_fetch_param *bprm;
+ unsigned long bw, bo;
+ char *tail;
+
+ if (*bf != 'b')
+ return 0;
+
+ bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
+ if (!bprm)
+ return -ENOMEM;
+
+ bprm->orig = *f;
+ f->fn = t->fetch[FETCH_MTD_bitfield];
+ f->data = (void *)bprm;
+ bw = simple_strtoul(bf + 1, &tail, 0); /* Use simple one */
+
+ if (bw == 0 || *tail != '@')
+ return -EINVAL;
+
+ bf = tail + 1;
+ bo = simple_strtoul(bf, &tail, 0);
+
+ if (tail == bf || *tail != '/')
+ return -EINVAL;
+
+ bprm->hi_shift = BYTES_TO_BITS(t->size) - (bw + bo);
+ bprm->low_shift = bprm->hi_shift + bo;
+
+ return (BYTES_TO_BITS(t->size) < (bw + bo)) ? -EINVAL : 0;
+}
+
+/* String length checking wrapper */
+int traceprobe_parse_probe_arg(char *arg, ssize_t *size,
+ struct probe_arg *parg, bool is_return, bool is_kprobe)
+{
+ const char *t;
+ int ret;
+
+ if (strlen(arg) > MAX_ARGSTR_LEN) {
+ pr_info("Argument is too long.: %s\n", arg);
+ return -ENOSPC;
+ }
+ parg->comm = kstrdup(arg, GFP_KERNEL);
+ if (!parg->comm) {
+ pr_info("Failed to allocate memory for command '%s'.\n", arg);
+ return -ENOMEM;
+ }
+ t = strchr(parg->comm, ':');
+ if (t) {
+ arg[t - parg->comm] = '\0';
+ t++;
+ }
+ parg->type = find_fetch_type(t);
+ if (!parg->type) {
+ pr_info("Unsupported type: %s\n", t);
+ return -EINVAL;
+ }
+ parg->offset = *size;
+ *size += parg->type->size;
+ ret = parse_probe_arg(arg, parg->type, &parg->fetch, is_return, is_kprobe);
+
+ if (ret >= 0 && t != NULL)
+ ret = __parse_bitfield_probe_arg(t, parg->type, &parg->fetch);
+
+ if (ret >= 0) {
+ parg->fetch_size.fn = get_fetch_size_function(parg->type,
+ parg->fetch.fn);
+ parg->fetch_size.data = parg->fetch.data;
+ }
+
+ return ret;
+}
+
+/* Return 1 if name is reserved or already used by another argument */
+int traceprobe_conflict_field_name(const char *name,
+ struct probe_arg *args, int narg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++)
+ if (strcmp(reserved_field_names[i], name) == 0)
+ return 1;
+
+ for (i = 0; i < narg; i++)
+ if (strcmp(args[i].name, name) == 0)
+ return 1;
+
+ return 0;
+}
+
+void traceprobe_update_arg(struct probe_arg *arg)
+{
+ if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
+ update_bitfield_fetch_param(arg->fetch.data);
+ else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
+ update_deref_fetch_param(arg->fetch.data);
+ else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn))
+ update_symbol_cache(arg->fetch.data);
+}
+
+void traceprobe_free_probe_arg(struct probe_arg *arg)
+{
+ if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
+ free_bitfield_fetch_param(arg->fetch.data);
+ else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
+ free_deref_fetch_param(arg->fetch.data);
+ else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn))
+ free_symbol_cache(arg->fetch.data);
+
+ kfree(arg->name);
+ kfree(arg->comm);
+}
+
+int traceprobe_command(const char *buf, int (*createfn)(int, char **))
+{
+ char **argv;
+ int argc, ret;
+
+ argc = 0;
+ ret = 0;
+ argv = argv_split(GFP_KERNEL, buf, &argc);
+ if (!argv)
+ return -ENOMEM;
+
+ if (argc)
+ ret = createfn(argc, argv);
+
+ argv_free(argv);
+
+ return ret;
+}
+
+#define WRITE_BUFSIZE 4096
+
+ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos,
+ int (*createfn)(int, char **))
+{
+ char *kbuf, *tmp;
+ int ret = 0;
+ size_t done = 0;
+ size_t size;
+
+ kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ while (done < count) {
+ size = count - done;
+
+ if (size >= WRITE_BUFSIZE)
+ size = WRITE_BUFSIZE - 1;
+
+ if (copy_from_user(kbuf, buffer + done, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ kbuf[size] = '\0';
+ tmp = strchr(kbuf, '\n');
+
+ if (tmp) {
+ *tmp = '\0';
+ size = tmp - kbuf + 1;
+ } else if (done + size < count) {
+ pr_warning("Line length is too long: "
+ "Should be less than %d.", WRITE_BUFSIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+ done += size;
+ /* Remove comments */
+ tmp = strchr(kbuf, '#');
+
+ if (tmp)
+ *tmp = '\0';
+
+ ret = traceprobe_command(kbuf, createfn);
+ if (ret)
+ goto out;
+ }
+ ret = done;
+
+out:
+ kfree(kbuf);
+
+ return ret;
+}
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
new file mode 100644
index 000000000000..933708677814
--- /dev/null
+++ b/kernel/trace/trace_probe.h
@@ -0,0 +1,161 @@
+/*
+ * Common header file for probe-based Dynamic events.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This code was copied from kernel/trace/trace_kprobe.h written by
+ * Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+ *
+ * Updates to make this generic:
+ * Copyright (C) IBM Corporation, 2010-2011
+ * Author: Srikar Dronamraju
+ */
+
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/debugfs.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/ptrace.h>
+#include <linux/perf_event.h>
+#include <linux/kprobes.h>
+#include <linux/stringify.h>
+#include <linux/limits.h>
+#include <linux/uaccess.h>
+#include <asm/bitsperlong.h>
+
+#include "trace.h"
+#include "trace_output.h"
+
+#define MAX_TRACE_ARGS 128
+#define MAX_ARGSTR_LEN 63
+#define MAX_EVENT_NAME_LEN 64
+#define MAX_STRING_SIZE PATH_MAX
+
+/* Reserved field names */
+#define FIELD_STRING_IP "__probe_ip"
+#define FIELD_STRING_RETIP "__probe_ret_ip"
+#define FIELD_STRING_FUNC "__probe_func"
+
+#undef DEFINE_FIELD
+#define DEFINE_FIELD(type, item, name, is_signed) \
+ do { \
+ ret = trace_define_field(event_call, #type, name, \
+ offsetof(typeof(field), item), \
+ sizeof(field.item), is_signed, \
+ FILTER_OTHER); \
+ if (ret) \
+ return ret; \
+ } while (0)
+
+
+/* Flags for trace_probe */
+#define TP_FLAG_TRACE 1
+#define TP_FLAG_PROFILE 2
+#define TP_FLAG_REGISTERED 4
+#define TP_FLAG_UPROBE 8
+
+
+/* data_rloc: data relative location, compatible with u32 */
+#define make_data_rloc(len, roffs) \
+ (((u32)(len) << 16) | ((u32)(roffs) & 0xffff))
+#define get_rloc_len(dl) ((u32)(dl) >> 16)
+#define get_rloc_offs(dl) ((u32)(dl) & 0xffff)
+
+/*
+ * Convert data_rloc to data_loc:
+ * data_rloc stores the offset from data_rloc itself, but data_loc
+ * stores the offset from event entry.
+ */
+#define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs))
+
+/* Data fetch function type */
+typedef void (*fetch_func_t)(struct pt_regs *, void *, void *);
+/* Printing function type */
+typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *, void *);
+
+/* Fetch types */
+enum {
+ FETCH_MTD_reg = 0,
+ FETCH_MTD_stack,
+ FETCH_MTD_retval,
+ FETCH_MTD_memory,
+ FETCH_MTD_symbol,
+ FETCH_MTD_deref,
+ FETCH_MTD_bitfield,
+ FETCH_MTD_END,
+};
+
+/* Fetch type information table */
+struct fetch_type {
+ const char *name; /* Name of type */
+ size_t size; /* Byte size of type */
+ int is_signed; /* Signed flag */
+ print_type_func_t print; /* Print functions */
+ const char *fmt; /* Fromat string */
+ const char *fmttype; /* Name in format file */
+ /* Fetch functions */
+ fetch_func_t fetch[FETCH_MTD_END];
+};
+
+struct fetch_param {
+ fetch_func_t fn;
+ void *data;
+};
+
+struct probe_arg {
+ struct fetch_param fetch;
+ struct fetch_param fetch_size;
+ unsigned int offset; /* Offset from argument entry */
+ const char *name; /* Name of this argument */
+ const char *comm; /* Command of this argument */
+ const struct fetch_type *type; /* Type of this argument */
+};
+
+static inline __kprobes void call_fetch(struct fetch_param *fprm,
+ struct pt_regs *regs, void *dest)
+{
+ return fprm->fn(regs, fprm->data, dest);
+}
+
+/* Check the name is good for event/group/fields */
+static inline int is_good_name(const char *name)
+{
+ if (!isalpha(*name) && *name != '_')
+ return 0;
+ while (*++name != '\0') {
+ if (!isalpha(*name) && !isdigit(*name) && *name != '_')
+ return 0;
+ }
+ return 1;
+}
+
+extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size,
+ struct probe_arg *parg, bool is_return, bool is_kprobe);
+
+extern int traceprobe_conflict_field_name(const char *name,
+ struct probe_arg *args, int narg);
+
+extern void traceprobe_update_arg(struct probe_arg *arg);
+extern void traceprobe_free_probe_arg(struct probe_arg *arg);
+
+extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset);
+
+extern ssize_t traceprobe_probes_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos,
+ int (*createfn)(int, char**));
+
+extern int traceprobe_command(const char *buf, int (*createfn)(int, char**));
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
new file mode 100644
index 000000000000..2b36ac68549e
--- /dev/null
+++ b/kernel/trace/trace_uprobe.c
@@ -0,0 +1,788 @@
+/*
+ * uprobes-based tracing events
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (C) IBM Corporation, 2010-2012
+ * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/uprobes.h>
+#include <linux/namei.h>
+
+#include "trace_probe.h"
+
+#define UPROBE_EVENT_SYSTEM "uprobes"
+
+/*
+ * uprobe event core functions
+ */
+struct trace_uprobe;
+struct uprobe_trace_consumer {
+ struct uprobe_consumer cons;
+ struct trace_uprobe *tu;
+};
+
+struct trace_uprobe {
+ struct list_head list;
+ struct ftrace_event_class class;
+ struct ftrace_event_call call;
+ struct uprobe_trace_consumer *consumer;
+ struct inode *inode;
+ char *filename;
+ unsigned long offset;
+ unsigned long nhit;
+ unsigned int flags; /* For TP_FLAG_* */
+ ssize_t size; /* trace entry size */
+ unsigned int nr_args;
+ struct probe_arg args[];
+};
+
+#define SIZEOF_TRACE_UPROBE(n) \
+ (offsetof(struct trace_uprobe, args) + \
+ (sizeof(struct probe_arg) * (n)))
+
+static int register_uprobe_event(struct trace_uprobe *tu);
+static void unregister_uprobe_event(struct trace_uprobe *tu);
+
+static DEFINE_MUTEX(uprobe_lock);
+static LIST_HEAD(uprobe_list);
+
+static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
+
+/*
+ * Allocate new trace_uprobe and initialize it (including uprobes).
+ */
+static struct trace_uprobe *
+alloc_trace_uprobe(const char *group, const char *event, int nargs)
+{
+ struct trace_uprobe *tu;
+
+ if (!event || !is_good_name(event))
+ return ERR_PTR(-EINVAL);
+
+ if (!group || !is_good_name(group))
+ return ERR_PTR(-EINVAL);
+
+ tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
+ if (!tu)
+ return ERR_PTR(-ENOMEM);
+
+ tu->call.class = &tu->class;
+ tu->call.name = kstrdup(event, GFP_KERNEL);
+ if (!tu->call.name)
+ goto error;
+
+ tu->class.system = kstrdup(group, GFP_KERNEL);
+ if (!tu->class.system)
+ goto error;
+
+ INIT_LIST_HEAD(&tu->list);
+ return tu;
+
+error:
+ kfree(tu->call.name);
+ kfree(tu);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static void free_trace_uprobe(struct trace_uprobe *tu)
+{
+ int i;
+
+ for (i = 0; i < tu->nr_args; i++)
+ traceprobe_free_probe_arg(&tu->args[i]);
+
+ iput(tu->inode);
+ kfree(tu->call.class->system);
+ kfree(tu->call.name);
+ kfree(tu->filename);
+ kfree(tu);
+}
+
+static struct trace_uprobe *find_probe_event(const char *event, const char *group)
+{
+ struct trace_uprobe *tu;
+
+ list_for_each_entry(tu, &uprobe_list, list)
+ if (strcmp(tu->call.name, event) == 0 &&
+ strcmp(tu->call.class->system, group) == 0)
+ return tu;
+
+ return NULL;
+}
+
+/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
+static void unregister_trace_uprobe(struct trace_uprobe *tu)
+{
+ list_del(&tu->list);
+ unregister_uprobe_event(tu);
+ free_trace_uprobe(tu);
+}
+
+/* Register a trace_uprobe and probe_event */
+static int register_trace_uprobe(struct trace_uprobe *tu)
+{
+ struct trace_uprobe *old_tp;
+ int ret;
+
+ mutex_lock(&uprobe_lock);
+
+ /* register as an event */
+ old_tp = find_probe_event(tu->call.name, tu->call.class->system);
+ if (old_tp)
+ /* delete old event */
+ unregister_trace_uprobe(old_tp);
+
+ ret = register_uprobe_event(tu);
+ if (ret) {
+ pr_warning("Failed to register probe event(%d)\n", ret);
+ goto end;
+ }
+
+ list_add_tail(&tu->list, &uprobe_list);
+
+end:
+ mutex_unlock(&uprobe_lock);
+
+ return ret;
+}
+
+/*
+ * Argument syntax:
+ * - Add uprobe: p[:[GRP/]EVENT] PATH:SYMBOL[+offs] [FETCHARGS]
+ *
+ * - Remove uprobe: -:[GRP/]EVENT
+ */
+static int create_trace_uprobe(int argc, char **argv)
+{
+ struct trace_uprobe *tu;
+ struct inode *inode;
+ char *arg, *event, *group, *filename;
+ char buf[MAX_EVENT_NAME_LEN];
+ struct path path;
+ unsigned long offset;
+ bool is_delete;
+ int i, ret;
+
+ inode = NULL;
+ ret = 0;
+ is_delete = false;
+ event = NULL;
+ group = NULL;
+
+ /* argc must be >= 1 */
+ if (argv[0][0] == '-')
+ is_delete = true;
+ else if (argv[0][0] != 'p') {
+ pr_info("Probe definition must be started with 'p', 'r' or" " '-'.\n");
+ return -EINVAL;
+ }
+
+ if (argv[0][1] == ':') {
+ event = &argv[0][2];
+ arg = strchr(event, '/');
+
+ if (arg) {
+ group = event;
+ event = arg + 1;
+ event[-1] = '\0';
+
+ if (strlen(group) == 0) {
+ pr_info("Group name is not specified\n");
+ return -EINVAL;
+ }
+ }
+ if (strlen(event) == 0) {
+ pr_info("Event name is not specified\n");
+ return -EINVAL;
+ }
+ }
+ if (!group)
+ group = UPROBE_EVENT_SYSTEM;
+
+ if (is_delete) {
+ if (!event) {
+ pr_info("Delete command needs an event name.\n");
+ return -EINVAL;
+ }
+ mutex_lock(&uprobe_lock);
+ tu = find_probe_event(event, group);
+
+ if (!tu) {
+ mutex_unlock(&uprobe_lock);
+ pr_info("Event %s/%s doesn't exist.\n", group, event);
+ return -ENOENT;
+ }
+ /* delete an event */
+ unregister_trace_uprobe(tu);
+ mutex_unlock(&uprobe_lock);
+ return 0;
+ }
+
+ if (argc < 2) {
+ pr_info("Probe point is not specified.\n");
+ return -EINVAL;
+ }
+ if (isdigit(argv[1][0])) {
+ pr_info("probe point must be have a filename.\n");
+ return -EINVAL;
+ }
+ arg = strchr(argv[1], ':');
+ if (!arg)
+ goto fail_address_parse;
+
+ *arg++ = '\0';
+ filename = argv[1];
+ ret = kern_path(filename, LOOKUP_FOLLOW, &path);
+ if (ret)
+ goto fail_address_parse;
+
+ ret = strict_strtoul(arg, 0, &offset);
+ if (ret)
+ goto fail_address_parse;
+
+ inode = igrab(path.dentry->d_inode);
+
+ argc -= 2;
+ argv += 2;
+
+ /* setup a probe */
+ if (!event) {
+ char *tail = strrchr(filename, '/');
+ char *ptr;
+
+ ptr = kstrdup((tail ? tail + 1 : filename), GFP_KERNEL);
+ if (!ptr) {
+ ret = -ENOMEM;
+ goto fail_address_parse;
+ }
+
+ tail = ptr;
+ ptr = strpbrk(tail, ".-_");
+ if (ptr)
+ *ptr = '\0';
+
+ snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
+ event = buf;
+ kfree(tail);
+ }
+
+ tu = alloc_trace_uprobe(group, event, argc);
+ if (IS_ERR(tu)) {
+ pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
+ ret = PTR_ERR(tu);
+ goto fail_address_parse;
+ }
+ tu->offset = offset;
+ tu->inode = inode;
+ tu->filename = kstrdup(filename, GFP_KERNEL);
+
+ if (!tu->filename) {
+ pr_info("Failed to allocate filename.\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* parse arguments */
+ ret = 0;
+ for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
+ /* Increment count for freeing args in error case */
+ tu->nr_args++;
+
+ /* Parse argument name */
+ arg = strchr(argv[i], '=');
+ if (arg) {
+ *arg++ = '\0';
+ tu->args[i].name = kstrdup(argv[i], GFP_KERNEL);
+ } else {
+ arg = argv[i];
+ /* If argument name is omitted, set "argN" */
+ snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
+ tu->args[i].name = kstrdup(buf, GFP_KERNEL);
+ }
+
+ if (!tu->args[i].name) {
+ pr_info("Failed to allocate argument[%d] name.\n", i);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (!is_good_name(tu->args[i].name)) {
+ pr_info("Invalid argument[%d] name: %s\n", i, tu->args[i].name);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (traceprobe_conflict_field_name(tu->args[i].name, tu->args, i)) {
+ pr_info("Argument[%d] name '%s' conflicts with "
+ "another field.\n", i, argv[i]);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Parse fetch argument */
+ ret = traceprobe_parse_probe_arg(arg, &tu->size, &tu->args[i], false, false);
+ if (ret) {
+ pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
+ goto error;
+ }
+ }
+
+ ret = register_trace_uprobe(tu);
+ if (ret)
+ goto error;
+ return 0;
+
+error:
+ free_trace_uprobe(tu);
+ return ret;
+
+fail_address_parse:
+ if (inode)
+ iput(inode);
+
+ pr_info("Failed to parse address.\n");
+
+ return ret;
+}
+
+static void cleanup_all_probes(void)
+{
+ struct trace_uprobe *tu;
+
+ mutex_lock(&uprobe_lock);
+ while (!list_empty(&uprobe_list)) {
+ tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
+ unregister_trace_uprobe(tu);
+ }
+ mutex_unlock(&uprobe_lock);
+}
+
+/* Probes listing interfaces */
+static void *probes_seq_start(struct seq_file *m, loff_t *pos)
+{
+ mutex_lock(&uprobe_lock);
+ return seq_list_start(&uprobe_list, *pos);
+}
+
+static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ return seq_list_next(v, &uprobe_list, pos);
+}
+
+static void probes_seq_stop(struct seq_file *m, void *v)
+{
+ mutex_unlock(&uprobe_lock);
+}
+
+static int probes_seq_show(struct seq_file *m, void *v)
+{
+ struct trace_uprobe *tu = v;
+ int i;
+
+ seq_printf(m, "p:%s/%s", tu->call.class->system, tu->call.name);
+ seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset);
+
+ for (i = 0; i < tu->nr_args; i++)
+ seq_printf(m, " %s=%s", tu->args[i].name, tu->args[i].comm);
+
+ seq_printf(m, "\n");
+ return 0;
+}
+
+static const struct seq_operations probes_seq_op = {
+ .start = probes_seq_start,
+ .next = probes_seq_next,
+ .stop = probes_seq_stop,
+ .show = probes_seq_show
+};
+
+static int probes_open(struct inode *inode, struct file *file)
+{
+ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
+ cleanup_all_probes();
+
+ return seq_open(file, &probes_seq_op);
+}
+
+static ssize_t probes_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
+}
+
+static const struct file_operations uprobe_events_ops = {
+ .owner = THIS_MODULE,
+ .open = probes_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .write = probes_write,
+};
+
+/* Probes profiling interfaces */
+static int probes_profile_seq_show(struct seq_file *m, void *v)
+{
+ struct trace_uprobe *tu = v;
+
+ seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->call.name, tu->nhit);
+ return 0;
+}
+
+static const struct seq_operations profile_seq_op = {
+ .start = probes_seq_start,
+ .next = probes_seq_next,
+ .stop = probes_seq_stop,
+ .show = probes_profile_seq_show
+};
+
+static int profile_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &profile_seq_op);
+}
+
+static const struct file_operations uprobe_profile_ops = {
+ .owner = THIS_MODULE,
+ .open = profile_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/* uprobe handler */
+static void uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs)
+{
+ struct uprobe_trace_entry_head *entry;
+ struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
+ u8 *data;
+ int size, i, pc;
+ unsigned long irq_flags;
+ struct ftrace_event_call *call = &tu->call;
+
+ tu->nhit++;
+
+ local_save_flags(irq_flags);
+ pc = preempt_count();
+
+ size = sizeof(*entry) + tu->size;
+
+ event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
+ size, irq_flags, pc);
+ if (!event)
+ return;
+
+ entry = ring_buffer_event_data(event);
+ entry->ip = uprobe_get_swbp_addr(task_pt_regs(current));
+ data = (u8 *)&entry[1];
+ for (i = 0; i < tu->nr_args; i++)
+ call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
+
+ if (!filter_current_check_discard(buffer, call, entry, event))
+ trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
+}
+
+/* Event entry printers */
+static enum print_line_t
+print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
+{
+ struct uprobe_trace_entry_head *field;
+ struct trace_seq *s = &iter->seq;
+ struct trace_uprobe *tu;
+ u8 *data;
+ int i;
+
+ field = (struct uprobe_trace_entry_head *)iter->ent;
+ tu = container_of(event, struct trace_uprobe, call.event);
+
+ if (!trace_seq_printf(s, "%s: (", tu->call.name))
+ goto partial;
+
+ if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
+ goto partial;
+
+ if (!trace_seq_puts(s, ")"))
+ goto partial;
+
+ data = (u8 *)&field[1];
+ for (i = 0; i < tu->nr_args; i++) {
+ if (!tu->args[i].type->print(s, tu->args[i].name,
+ data + tu->args[i].offset, field))
+ goto partial;
+ }
+
+ if (trace_seq_puts(s, "\n"))
+ return TRACE_TYPE_HANDLED;
+
+partial:
+ return TRACE_TYPE_PARTIAL_LINE;
+}
+
+static int probe_event_enable(struct trace_uprobe *tu, int flag)
+{
+ struct uprobe_trace_consumer *utc;
+ int ret = 0;
+
+ if (!tu->inode || tu->consumer)
+ return -EINTR;
+
+ utc = kzalloc(sizeof(struct uprobe_trace_consumer), GFP_KERNEL);
+ if (!utc)
+ return -EINTR;
+
+ utc->cons.handler = uprobe_dispatcher;
+ utc->cons.filter = NULL;
+ ret = uprobe_register(tu->inode, tu->offset, &utc->cons);
+ if (ret) {
+ kfree(utc);
+ return ret;
+ }
+
+ tu->flags |= flag;
+ utc->tu = tu;
+ tu->consumer = utc;
+
+ return 0;
+}
+
+static void probe_event_disable(struct trace_uprobe *tu, int flag)
+{
+ if (!tu->inode || !tu->consumer)
+ return;
+
+ uprobe_unregister(tu->inode, tu->offset, &tu->consumer->cons);
+ tu->flags &= ~flag;
+ kfree(tu->consumer);
+ tu->consumer = NULL;
+}
+
+static int uprobe_event_define_fields(struct ftrace_event_call *event_call)
+{
+ int ret, i;
+ struct uprobe_trace_entry_head field;
+ struct trace_uprobe *tu = (struct trace_uprobe *)event_call->data;
+
+ DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
+ /* Set argument names as fields */
+ for (i = 0; i < tu->nr_args; i++) {
+ ret = trace_define_field(event_call, tu->args[i].type->fmttype,
+ tu->args[i].name,
+ sizeof(field) + tu->args[i].offset,
+ tu->args[i].type->size,
+ tu->args[i].type->is_signed,
+ FILTER_OTHER);
+
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+#define LEN_OR_ZERO (len ? len - pos : 0)
+static int __set_print_fmt(struct trace_uprobe *tu, char *buf, int len)
+{
+ const char *fmt, *arg;
+ int i;
+ int pos = 0;
+
+ fmt = "(%lx)";
+ arg = "REC->" FIELD_STRING_IP;
+
+ /* When len=0, we just calculate the needed length */
+
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
+
+ for (i = 0; i < tu->nr_args; i++) {
+ pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
+ tu->args[i].name, tu->args[i].type->fmt);
+ }
+
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
+
+ for (i = 0; i < tu->nr_args; i++) {
+ pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
+ tu->args[i].name);
+ }
+
+ return pos; /* return the length of print_fmt */
+}
+#undef LEN_OR_ZERO
+
+static int set_print_fmt(struct trace_uprobe *tu)
+{
+ char *print_fmt;
+ int len;
+
+ /* First: called with 0 length to calculate the needed length */
+ len = __set_print_fmt(tu, NULL, 0);
+ print_fmt = kmalloc(len + 1, GFP_KERNEL);
+ if (!print_fmt)
+ return -ENOMEM;
+
+ /* Second: actually write the @print_fmt */
+ __set_print_fmt(tu, print_fmt, len + 1);
+ tu->call.print_fmt = print_fmt;
+
+ return 0;
+}
+
+#ifdef CONFIG_PERF_EVENTS
+/* uprobe profile handler */
+static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
+{
+ struct ftrace_event_call *call = &tu->call;
+ struct uprobe_trace_entry_head *entry;
+ struct hlist_head *head;
+ u8 *data;
+ int size, __size, i;
+ int rctx;
+
+ __size = sizeof(*entry) + tu->size;
+ size = ALIGN(__size + sizeof(u32), sizeof(u64));
+ size -= sizeof(u32);
+ if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
+ return;
+
+ preempt_disable();
+
+ entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
+ if (!entry)
+ goto out;
+
+ entry->ip = uprobe_get_swbp_addr(task_pt_regs(current));
+ data = (u8 *)&entry[1];
+ for (i = 0; i < tu->nr_args; i++)
+ call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
+
+ head = this_cpu_ptr(call->perf_events);
+ perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head);
+
+ out:
+ preempt_enable();
+}
+#endif /* CONFIG_PERF_EVENTS */
+
+static
+int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data)
+{
+ struct trace_uprobe *tu = (struct trace_uprobe *)event->data;
+
+ switch (type) {
+ case TRACE_REG_REGISTER:
+ return probe_event_enable(tu, TP_FLAG_TRACE);
+
+ case TRACE_REG_UNREGISTER:
+ probe_event_disable(tu, TP_FLAG_TRACE);
+ return 0;
+
+#ifdef CONFIG_PERF_EVENTS
+ case TRACE_REG_PERF_REGISTER:
+ return probe_event_enable(tu, TP_FLAG_PROFILE);
+
+ case TRACE_REG_PERF_UNREGISTER:
+ probe_event_disable(tu, TP_FLAG_PROFILE);
+ return 0;
+#endif
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
+{
+ struct uprobe_trace_consumer *utc;
+ struct trace_uprobe *tu;
+
+ utc = container_of(con, struct uprobe_trace_consumer, cons);
+ tu = utc->tu;
+ if (!tu || tu->consumer != utc)
+ return 0;
+
+ if (tu->flags & TP_FLAG_TRACE)
+ uprobe_trace_func(tu, regs);
+
+#ifdef CONFIG_PERF_EVENTS
+ if (tu->flags & TP_FLAG_PROFILE)
+ uprobe_perf_func(tu, regs);
+#endif
+ return 0;
+}
+
+static struct trace_event_functions uprobe_funcs = {
+ .trace = print_uprobe_event
+};
+
+static int register_uprobe_event(struct trace_uprobe *tu)
+{
+ struct ftrace_event_call *call = &tu->call;
+ int ret;
+
+ /* Initialize ftrace_event_call */
+ INIT_LIST_HEAD(&call->class->fields);
+ call->event.funcs = &uprobe_funcs;
+ call->class->define_fields = uprobe_event_define_fields;
+
+ if (set_print_fmt(tu) < 0)
+ return -ENOMEM;
+
+ ret = register_ftrace_event(&call->event);
+ if (!ret) {
+ kfree(call->print_fmt);
+ return -ENODEV;
+ }
+ call->flags = 0;
+ call->class->reg = trace_uprobe_register;
+ call->data = tu;
+ ret = trace_add_event_call(call);
+
+ if (ret) {
+ pr_info("Failed to register uprobe event: %s\n", call->name);
+ kfree(call->print_fmt);
+ unregister_ftrace_event(&call->event);
+ }
+
+ return ret;
+}
+
+static void unregister_uprobe_event(struct trace_uprobe *tu)
+{
+ /* tu->event is unregistered in trace_remove_event_call() */
+ trace_remove_event_call(&tu->call);
+ kfree(tu->call.print_fmt);
+ tu->call.print_fmt = NULL;
+}
+
+/* Make a trace interface for controling probe points */
+static __init int init_uprobe_trace(void)
+{
+ struct dentry *d_tracer;
+
+ d_tracer = tracing_init_dentry();
+ if (!d_tracer)
+ return 0;
+
+ trace_create_file("uprobe_events", 0644, d_tracer,
+ NULL, &uprobe_events_ops);
+ /* Profile interface */
+ trace_create_file("uprobe_profile", 0444, d_tracer,
+ NULL, &uprobe_profile_ops);
+ return 0;
+}
+
+fs_initcall(init_uprobe_trace);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index df30ee08bdd4..e5e1d85b8c7c 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -24,6 +24,7 @@
#include <linux/sysctl.h>
#include <asm/irq_regs.h>
+#include <linux/kvm_para.h>
#include <linux/perf_event.h>
int watchdog_enabled = 1;
@@ -280,6 +281,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
__this_cpu_write(softlockup_touch_sync, false);
sched_clock_tick();
}
+
+ /* Clear the guest paused flag on watchdog reset */
+ kvm_check_and_clear_guest_paused();
__touch_watchdog();
return HRTIMER_RESTART;
}
@@ -292,6 +296,14 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
*/
duration = is_softlockup(touch_ts);
if (unlikely(duration)) {
+ /*
+ * If a virtual machine is stopped by the host it can look to
+ * the watchdog like a soft lockup, check to see if the host
+ * stopped the vm before we issue the warning
+ */
+ if (kvm_check_and_clear_guest_paused())
+ return HRTIMER_RESTART;
+
/* only warn once */
if (__this_cpu_read(soft_watchdog_warn) == true)
return HRTIMER_RESTART;
diff --git a/lib/Kconfig b/lib/Kconfig
index 0e25c03939e3..a9e15403434e 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -16,6 +16,12 @@ config BITREVERSE
config RATIONAL
boolean
+config GENERIC_STRNCPY_FROM_USER
+ bool
+
+config GENERIC_STRNLEN_USER
+ bool
+
config GENERIC_FIND_FIRST_BIT
bool
@@ -33,6 +39,9 @@ config GENERIC_IO
boolean
default n
+config STMP_DEVICE
+ bool
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
diff --git a/lib/Makefile b/lib/Makefile
index 74290c9e2864..8c31a0cb75e9 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -125,6 +125,11 @@ obj-$(CONFIG_CLZ_TAB) += clz_tab.o
obj-$(CONFIG_DDR) += jedec_ddr_data.o
+obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
+obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
+
+obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/bitmap.c b/lib/bitmap.c
index b5a8b6ad2454..06fdfa1aeba7 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -369,7 +369,8 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area);
* @nmaskbits: size of bitmap, in bits
*
* Exactly @nmaskbits bits are displayed. Hex digits are grouped into
- * comma-separated sets of eight digits per set.
+ * comma-separated sets of eight digits per set. Returns the number of
+ * characters which were written to *buf, excluding the trailing \0.
*/
int bitmap_scnprintf(char *buf, unsigned int buflen,
const unsigned long *maskp, int nmaskbits)
@@ -517,8 +518,8 @@ EXPORT_SYMBOL(bitmap_parse_user);
*
* Helper routine for bitmap_scnlistprintf(). Write decimal number
* or range to buf, suppressing output past buf+buflen, with optional
- * comma-prefix. Return len of what would be written to buf, if it
- * all fit.
+ * comma-prefix. Return len of what was written to *buf, excluding the
+ * trailing \0.
*/
static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len)
{
@@ -544,9 +545,8 @@ static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len)
* the range. Output format is compatible with the format
* accepted as input by bitmap_parselist().
*
- * The return value is the number of characters which would be
- * generated for the given input, excluding the trailing '\0', as
- * per ISO C99.
+ * The return value is the number of characters which were written to *buf
+ * excluding the trailing '\0', as per ISO C99's scnprintf.
*/
int bitmap_scnlistprintf(char *buf, unsigned int buflen,
const unsigned long *maskp, int nmaskbits)
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 13ef2338be41..518aea714d21 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -430,7 +430,7 @@ static struct dma_debug_entry *__dma_entry_alloc(void)
*/
static struct dma_debug_entry *dma_entry_alloc(void)
{
- struct dma_debug_entry *entry = NULL;
+ struct dma_debug_entry *entry;
unsigned long flags;
spin_lock_irqsave(&free_entries_lock, flags);
@@ -438,11 +438,14 @@ static struct dma_debug_entry *dma_entry_alloc(void)
if (list_empty(&free_entries)) {
pr_err("DMA-API: debugging out of memory - disabling\n");
global_disable = true;
- goto out;
+ spin_unlock_irqrestore(&free_entries_lock, flags);
+ return NULL;
}
entry = __dma_entry_alloc();
+ spin_unlock_irqrestore(&free_entries_lock, flags);
+
#ifdef CONFIG_STACKTRACE
entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
entry->stacktrace.entries = entry->st_entries;
@@ -450,9 +453,6 @@ static struct dma_debug_entry *dma_entry_alloc(void)
save_stack_trace(&entry->stacktrace);
#endif
-out:
- spin_unlock_irqrestore(&free_entries_lock, flags);
-
return entry;
}
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 3810b481f940..23a5e031cd8b 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -31,6 +31,9 @@ void __list_add(struct list_head *new,
"list_add corruption. prev->next should be "
"next (%p), but was %p. (prev=%p).\n",
next, prev->next, prev);
+ WARN(new == prev || new == next,
+ "list_add double add: new=%p, prev=%p, next=%p.\n",
+ new, prev, next);
next->prev = new;
new->next = next;
new->prev = prev;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 86516f5588e3..d7c878cc006c 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -73,11 +73,24 @@ static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1] __read_mostly;
static struct kmem_cache *radix_tree_node_cachep;
/*
+ * The radix tree is variable-height, so an insert operation not only has
+ * to build the branch to its corresponding item, it also has to build the
+ * branch to existing items if the size has to be increased (by
+ * radix_tree_extend).
+ *
+ * The worst case is a zero height tree with just a single item at index 0,
+ * and then inserting an item at index ULONG_MAX. This requires 2 new branches
+ * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
+ * Hence:
+ */
+#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
+
+/*
* Per-cpu pool of preloaded nodes
*/
struct radix_tree_preload {
int nr;
- struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
+ struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
};
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 525d160d44f0..d0ec4f3d1593 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -58,7 +58,7 @@ static void spin_dump(raw_spinlock_t *lock, const char *msg)
printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
msg, raw_smp_processor_id(),
current->comm, task_pid_nr(current));
- printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
+ printk(KERN_EMERG " lock: %ps, .magic: %08x, .owner: %s/%d, "
".owner_cpu: %d\n",
lock, lock->magic,
owner ? owner->comm : "<none>",
diff --git a/lib/stmp_device.c b/lib/stmp_device.c
new file mode 100644
index 000000000000..8ac9bcc4289a
--- /dev/null
+++ b/lib/stmp_device.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 1999 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ * Copyright 2006-2007,2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+ * Copyright 2009 Ilya Yanok, Emcraft Systems Ltd, yanok@emcraft.com
+ * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/stmp_device.h>
+
+#define STMP_MODULE_CLKGATE (1 << 30)
+#define STMP_MODULE_SFTRST (1 << 31)
+
+/*
+ * Clear the bit and poll it cleared. This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int stmp_clear_poll_bit(void __iomem *addr, u32 mask)
+{
+ int timeout = 0x400;
+
+ writel(mask, addr + STMP_OFFSET_REG_CLR);
+ udelay(1);
+ while ((readl(addr) & mask) && --timeout)
+ /* nothing */;
+
+ return !timeout;
+}
+
+int stmp_reset_block(void __iomem *reset_addr)
+{
+ int ret;
+ int timeout = 0x400;
+
+ /* clear and poll SFTRST */
+ ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear CLKGATE */
+ writel(STMP_MODULE_CLKGATE, reset_addr + STMP_OFFSET_REG_CLR);
+
+ /* set SFTRST to reset the block */
+ writel(STMP_MODULE_SFTRST, reset_addr + STMP_OFFSET_REG_SET);
+ udelay(1);
+
+ /* poll CLKGATE becoming set */
+ while ((!(readl(reset_addr) & STMP_MODULE_CLKGATE)) && --timeout)
+ /* nothing */;
+ if (unlikely(!timeout))
+ goto error;
+
+ /* clear and poll SFTRST */
+ ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear and poll CLKGATE */
+ ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_CLKGATE);
+ if (unlikely(ret))
+ goto error;
+
+ return 0;
+
+error:
+ pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(stmp_reset_block);
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index dd4ece372699..1cffc223bff5 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -23,15 +23,15 @@
int string_get_size(u64 size, const enum string_size_units units,
char *buf, int len)
{
- const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB",
+ static const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB",
"EB", "ZB", "YB", NULL};
- const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
+ static const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
"EiB", "ZiB", "YiB", NULL };
- const char **units_str[] = {
+ static const char **units_str[] = {
[STRING_UNITS_10] = units_10,
[STRING_UNITS_2] = units_2,
};
- const unsigned int divisor[] = {
+ static const unsigned int divisor[] = {
[STRING_UNITS_10] = 1000,
[STRING_UNITS_2] = 1024,
};
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
new file mode 100644
index 000000000000..bb2b201d6ad0
--- /dev/null
+++ b/lib/strncpy_from_user.c
@@ -0,0 +1,113 @@
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include <asm/byteorder.h>
+#include <asm/word-at-a-time.h>
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#define IS_UNALIGNED(src, dst) 0
+#else
+#define IS_UNALIGNED(src, dst) \
+ (((long) dst | (long) src) & (sizeof(long) - 1))
+#endif
+
+/*
+ * Do a strncpy, return length of string without final '\0'.
+ * 'count' is the user-supplied count (return 'count' if we
+ * hit it), 'max' is the address space maximum (and we return
+ * -EFAULT if we hit it).
+ */
+static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
+{
+ const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+ long res = 0;
+
+ /*
+ * Truncate 'max' to the user-specified limit, so that
+ * we only have one limit we need to check in the loop
+ */
+ if (max > count)
+ max = count;
+
+ if (IS_UNALIGNED(src, dst))
+ goto byte_at_a_time;
+
+ while (max >= sizeof(unsigned long)) {
+ unsigned long c, data;
+
+ /* Fall back to byte-at-a-time if we get a page fault */
+ if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
+ break;
+ *(unsigned long *)(dst+res) = c;
+ if (has_zero(c, &data, &constants)) {
+ data = prep_zero_mask(c, data, &constants);
+ data = create_zero_mask(data);
+ return res + find_zero(data);
+ }
+ res += sizeof(unsigned long);
+ max -= sizeof(unsigned long);
+ }
+
+byte_at_a_time:
+ while (max) {
+ char c;
+
+ if (unlikely(__get_user(c,src+res)))
+ return -EFAULT;
+ dst[res] = c;
+ if (!c)
+ return res;
+ res++;
+ max--;
+ }
+
+ /*
+ * Uhhuh. We hit 'max'. But was that the user-specified maximum
+ * too? If so, that's ok - we got as much as the user asked for.
+ */
+ if (res >= count)
+ return res;
+
+ /*
+ * Nope: we hit the address space limit, and we still had more
+ * characters the caller would have wanted. That's an EFAULT.
+ */
+ return -EFAULT;
+}
+
+/**
+ * strncpy_from_user: - Copy a NUL terminated string from userspace.
+ * @dst: Destination address, in kernel space. This buffer must be at
+ * least @count bytes long.
+ * @src: Source address, in user space.
+ * @count: Maximum number of bytes to copy, including the trailing NUL.
+ *
+ * Copies a NUL-terminated string from userspace to kernel space.
+ *
+ * On success, returns the length of the string (not including the trailing
+ * NUL).
+ *
+ * If access to userspace fails, returns -EFAULT (some data may have been
+ * copied).
+ *
+ * If @count is smaller than the length of the string, copies @count bytes
+ * and returns @count.
+ */
+long strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ unsigned long max_addr, src_addr;
+
+ if (unlikely(count <= 0))
+ return 0;
+
+ max_addr = user_addr_max();
+ src_addr = (unsigned long)src;
+ if (likely(src_addr < max_addr)) {
+ unsigned long max = max_addr - src_addr;
+ return do_strncpy_from_user(dst, src, count, max);
+ }
+ return -EFAULT;
+}
+EXPORT_SYMBOL(strncpy_from_user);
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
new file mode 100644
index 000000000000..a28df5206d95
--- /dev/null
+++ b/lib/strnlen_user.c
@@ -0,0 +1,138 @@
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/uaccess.h>
+
+#include <asm/word-at-a-time.h>
+
+/* Set bits in the first 'n' bytes when loaded from memory */
+#ifdef __LITTLE_ENDIAN
+# define aligned_byte_mask(n) ((1ul << 8*(n))-1)
+#else
+# define aligned_byte_mask(n) (~0xfful << (BITS_PER_LONG - 8 - 8*(n)))
+#endif
+
+/*
+ * Do a strnlen, return length of string *with* final '\0'.
+ * 'count' is the user-supplied count, while 'max' is the
+ * address space maximum.
+ *
+ * Return 0 for exceptions (which includes hitting the address
+ * space maximum), or 'count+1' if hitting the user-supplied
+ * maximum count.
+ *
+ * NOTE! We can sometimes overshoot the user-supplied maximum
+ * if it fits in a aligned 'long'. The caller needs to check
+ * the return value against "> max".
+ */
+static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
+{
+ const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+ long align, res = 0;
+ unsigned long c;
+
+ /*
+ * Truncate 'max' to the user-specified limit, so that
+ * we only have one limit we need to check in the loop
+ */
+ if (max > count)
+ max = count;
+
+ /*
+ * Do everything aligned. But that means that we
+ * need to also expand the maximum..
+ */
+ align = (sizeof(long) - 1) & (unsigned long)src;
+ src -= align;
+ max += align;
+
+ if (unlikely(__get_user(c,(unsigned long __user *)src)))
+ return 0;
+ c |= aligned_byte_mask(align);
+
+ for (;;) {
+ unsigned long data;
+ if (has_zero(c, &data, &constants)) {
+ data = prep_zero_mask(c, data, &constants);
+ data = create_zero_mask(data);
+ return res + find_zero(data) + 1 - align;
+ }
+ res += sizeof(unsigned long);
+ if (unlikely(max < sizeof(unsigned long)))
+ break;
+ max -= sizeof(unsigned long);
+ if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
+ return 0;
+ }
+ res -= align;
+
+ /*
+ * Uhhuh. We hit 'max'. But was that the user-specified maximum
+ * too? If so, return the marker for "too long".
+ */
+ if (res >= count)
+ return count+1;
+
+ /*
+ * Nope: we hit the address space limit, and we still had more
+ * characters the caller would have wanted. That's 0.
+ */
+ return 0;
+}
+
+/**
+ * strnlen_user: - Get the size of a user string INCLUDING final NUL.
+ * @str: The string to measure.
+ * @count: Maximum count (including NUL character)
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * Get the size of a NUL-terminated string in user space.
+ *
+ * Returns the size of the string INCLUDING the terminating NUL.
+ * If the string is too long, returns 'count+1'.
+ * On exception (or invalid count), returns 0.
+ */
+long strnlen_user(const char __user *str, long count)
+{
+ unsigned long max_addr, src_addr;
+
+ if (unlikely(count <= 0))
+ return 0;
+
+ max_addr = user_addr_max();
+ src_addr = (unsigned long)str;
+ if (likely(src_addr < max_addr)) {
+ unsigned long max = max_addr - src_addr;
+ return do_strnlen_user(str, count, max);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(strnlen_user);
+
+/**
+ * strlen_user: - Get the size of a user string INCLUDING final NUL.
+ * @str: The string to measure.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * Get the size of a NUL-terminated string in user space.
+ *
+ * Returns the size of the string INCLUDING the terminating NUL.
+ * On exception, returns 0.
+ *
+ * If there is a limit on the length of a valid string, you may wish to
+ * consider using strnlen_user() instead.
+ */
+long strlen_user(const char __user *str)
+{
+ unsigned long max_addr, src_addr;
+
+ max_addr = user_addr_max();
+ src_addr = (unsigned long)str;
+ if (likely(src_addr < max_addr)) {
+ unsigned long max = max_addr - src_addr;
+ return do_strnlen_user(str, ~0ul, max);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(strlen_user);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 414f46ed1dcd..45bc1f83a5ad 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -130,11 +130,9 @@ void swiotlb_print_info(void)
pstart = virt_to_phys(io_tlb_start);
pend = virt_to_phys(io_tlb_end);
- printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
- bytes >> 20, io_tlb_start, io_tlb_end);
- printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
- (unsigned long long)pstart,
- (unsigned long long)pend);
+ printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
+ (unsigned long long)pstart, (unsigned long long)pend - 1,
+ bytes >> 20, io_tlb_start, io_tlb_end - 1);
}
void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c
index d55769d63cb8..bea3f3fa3f02 100644
--- a/lib/test-kstrtox.c
+++ b/lib/test-kstrtox.c
@@ -11,7 +11,7 @@ struct test_fail {
};
#define DEFINE_TEST_FAIL(test) \
- const struct test_fail test[] __initdata
+ const struct test_fail test[] __initconst
#define DECLARE_TEST_OK(type, test_type) \
test_type { \
@@ -21,7 +21,7 @@ struct test_fail {
}
#define DEFINE_TEST_OK(type, test) \
- const type test[] __initdata
+ const type test[] __initconst
#define TEST_FAIL(fn, type, fmt, test) \
{ \
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index abbabec9720a..c3f36d415bdf 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -112,106 +112,199 @@ int skip_atoi(const char **s)
/* Decimal conversion is by far the most typical, and is used
* for /proc and /sys data. This directly impacts e.g. top performance
* with many processes running. We optimize it for speed
- * using code from
- * http://www.cs.uiowa.edu/~jones/bcd/decimal.html
- * (with permission from the author, Douglas W. Jones). */
+ * using ideas described at <http://www.cs.uiowa.edu/~jones/bcd/divide.html>
+ * (with permission from the author, Douglas W. Jones).
+ */
-/* Formats correctly any integer in [0,99999].
- * Outputs from one to five digits depending on input.
- * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */
+#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64
+/* Formats correctly any integer in [0, 999999999] */
static noinline_for_stack
-char *put_dec_trunc(char *buf, unsigned q)
+char *put_dec_full9(char *buf, unsigned q)
{
- unsigned d3, d2, d1, d0;
- d1 = (q>>4) & 0xf;
- d2 = (q>>8) & 0xf;
- d3 = (q>>12);
-
- d0 = 6*(d3 + d2 + d1) + (q & 0xf);
- q = (d0 * 0xcd) >> 11;
- d0 = d0 - 10*q;
- *buf++ = d0 + '0'; /* least significant digit */
- d1 = q + 9*d3 + 5*d2 + d1;
- if (d1 != 0) {
- q = (d1 * 0xcd) >> 11;
- d1 = d1 - 10*q;
- *buf++ = d1 + '0'; /* next digit */
-
- d2 = q + 2*d2;
- if ((d2 != 0) || (d3 != 0)) {
- q = (d2 * 0xd) >> 7;
- d2 = d2 - 10*q;
- *buf++ = d2 + '0'; /* next digit */
-
- d3 = q + 4*d3;
- if (d3 != 0) {
- q = (d3 * 0xcd) >> 11;
- d3 = d3 - 10*q;
- *buf++ = d3 + '0'; /* next digit */
- if (q != 0)
- *buf++ = q + '0'; /* most sign. digit */
- }
- }
- }
+ unsigned r;
+ /*
+ * Possible ways to approx. divide by 10
+ * (x * 0x1999999a) >> 32 x < 1073741829 (multiply must be 64-bit)
+ * (x * 0xcccd) >> 19 x < 81920 (x < 262149 when 64-bit mul)
+ * (x * 0x6667) >> 18 x < 43699
+ * (x * 0x3334) >> 17 x < 16389
+ * (x * 0x199a) >> 16 x < 16389
+ * (x * 0x0ccd) >> 15 x < 16389
+ * (x * 0x0667) >> 14 x < 2739
+ * (x * 0x0334) >> 13 x < 1029
+ * (x * 0x019a) >> 12 x < 1029
+ * (x * 0x00cd) >> 11 x < 1029 shorter code than * 0x67 (on i386)
+ * (x * 0x0067) >> 10 x < 179
+ * (x * 0x0034) >> 9 x < 69 same
+ * (x * 0x001a) >> 8 x < 69 same
+ * (x * 0x000d) >> 7 x < 69 same, shortest code (on i386)
+ * (x * 0x0007) >> 6 x < 19
+ * See <http://www.cs.uiowa.edu/~jones/bcd/divide.html>
+ */
+ r = (q * (uint64_t)0x1999999a) >> 32;
+ *buf++ = (q - 10 * r) + '0'; /* 1 */
+ q = (r * (uint64_t)0x1999999a) >> 32;
+ *buf++ = (r - 10 * q) + '0'; /* 2 */
+ r = (q * (uint64_t)0x1999999a) >> 32;
+ *buf++ = (q - 10 * r) + '0'; /* 3 */
+ q = (r * (uint64_t)0x1999999a) >> 32;
+ *buf++ = (r - 10 * q) + '0'; /* 4 */
+ r = (q * (uint64_t)0x1999999a) >> 32;
+ *buf++ = (q - 10 * r) + '0'; /* 5 */
+ /* Now value is under 10000, can avoid 64-bit multiply */
+ q = (r * 0x199a) >> 16;
+ *buf++ = (r - 10 * q) + '0'; /* 6 */
+ r = (q * 0xcd) >> 11;
+ *buf++ = (q - 10 * r) + '0'; /* 7 */
+ q = (r * 0xcd) >> 11;
+ *buf++ = (r - 10 * q) + '0'; /* 8 */
+ *buf++ = q + '0'; /* 9 */
return buf;
}
-/* Same with if's removed. Always emits five digits */
+#endif
+
+/* Similar to above but do not pad with zeros.
+ * Code can be easily arranged to print 9 digits too, but our callers
+ * always call put_dec_full9() instead when the number has 9 decimal digits.
+ */
static noinline_for_stack
-char *put_dec_full(char *buf, unsigned q)
+char *put_dec_trunc8(char *buf, unsigned r)
{
- /* BTW, if q is in [0,9999], 8-bit ints will be enough, */
- /* but anyway, gcc produces better code with full-sized ints */
- unsigned d3, d2, d1, d0;
- d1 = (q>>4) & 0xf;
- d2 = (q>>8) & 0xf;
- d3 = (q>>12);
+ unsigned q;
+
+ /* Copy of previous function's body with added early returns */
+ q = (r * (uint64_t)0x1999999a) >> 32;
+ *buf++ = (r - 10 * q) + '0'; /* 2 */
+ if (q == 0)
+ return buf;
+ r = (q * (uint64_t)0x1999999a) >> 32;
+ *buf++ = (q - 10 * r) + '0'; /* 3 */
+ if (r == 0)
+ return buf;
+ q = (r * (uint64_t)0x1999999a) >> 32;
+ *buf++ = (r - 10 * q) + '0'; /* 4 */
+ if (q == 0)
+ return buf;
+ r = (q * (uint64_t)0x1999999a) >> 32;
+ *buf++ = (q - 10 * r) + '0'; /* 5 */
+ if (r == 0)
+ return buf;
+ q = (r * 0x199a) >> 16;
+ *buf++ = (r - 10 * q) + '0'; /* 6 */
+ if (q == 0)
+ return buf;
+ r = (q * 0xcd) >> 11;
+ *buf++ = (q - 10 * r) + '0'; /* 7 */
+ if (r == 0)
+ return buf;
+ q = (r * 0xcd) >> 11;
+ *buf++ = (r - 10 * q) + '0'; /* 8 */
+ if (q == 0)
+ return buf;
+ *buf++ = q + '0'; /* 9 */
+ return buf;
+}
- /*
- * Possible ways to approx. divide by 10
- * gcc -O2 replaces multiply with shifts and adds
- * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386)
- * (x * 0x67) >> 10: 1100111
- * (x * 0x34) >> 9: 110100 - same
- * (x * 0x1a) >> 8: 11010 - same
- * (x * 0x0d) >> 7: 1101 - same, shortest code (on i386)
- */
- d0 = 6*(d3 + d2 + d1) + (q & 0xf);
- q = (d0 * 0xcd) >> 11;
- d0 = d0 - 10*q;
- *buf++ = d0 + '0';
- d1 = q + 9*d3 + 5*d2 + d1;
- q = (d1 * 0xcd) >> 11;
- d1 = d1 - 10*q;
- *buf++ = d1 + '0';
-
- d2 = q + 2*d2;
- q = (d2 * 0xd) >> 7;
- d2 = d2 - 10*q;
- *buf++ = d2 + '0';
-
- d3 = q + 4*d3;
- q = (d3 * 0xcd) >> 11; /* - shorter code */
- /* q = (d3 * 0x67) >> 10; - would also work */
- d3 = d3 - 10*q;
- *buf++ = d3 + '0';
- *buf++ = q + '0';
+/* There are two algorithms to print larger numbers.
+ * One is generic: divide by 1000000000 and repeatedly print
+ * groups of (up to) 9 digits. It's conceptually simple,
+ * but requires a (unsigned long long) / 1000000000 division.
+ *
+ * Second algorithm splits 64-bit unsigned long long into 16-bit chunks,
+ * manipulates them cleverly and generates groups of 4 decimal digits.
+ * It so happens that it does NOT require long long division.
+ *
+ * If long is > 32 bits, division of 64-bit values is relatively easy,
+ * and we will use the first algorithm.
+ * If long long is > 64 bits (strange architecture with VERY large long long),
+ * second algorithm can't be used, and we again use the first one.
+ *
+ * Else (if long is 32 bits and long long is 64 bits) we use second one.
+ */
- return buf;
+#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64
+
+/* First algorithm: generic */
+
+static
+char *put_dec(char *buf, unsigned long long n)
+{
+ if (n >= 100*1000*1000) {
+ while (n >= 1000*1000*1000)
+ buf = put_dec_full9(buf, do_div(n, 1000*1000*1000));
+ if (n >= 100*1000*1000)
+ return put_dec_full9(buf, n);
+ }
+ return put_dec_trunc8(buf, n);
}
-/* No inlining helps gcc to use registers better */
+
+#else
+
+/* Second algorithm: valid only for 64-bit long longs */
+
static noinline_for_stack
-char *put_dec(char *buf, unsigned long long num)
+char *put_dec_full4(char *buf, unsigned q)
{
- while (1) {
- unsigned rem;
- if (num < 100000)
- return put_dec_trunc(buf, num);
- rem = do_div(num, 100000);
- buf = put_dec_full(buf, rem);
- }
+ unsigned r;
+ r = (q * 0xcccd) >> 19;
+ *buf++ = (q - 10 * r) + '0';
+ q = (r * 0x199a) >> 16;
+ *buf++ = (r - 10 * q) + '0';
+ r = (q * 0xcd) >> 11;
+ *buf++ = (q - 10 * r) + '0';
+ *buf++ = r + '0';
+ return buf;
+}
+
+/* Based on code by Douglas W. Jones found at
+ * <http://www.cs.uiowa.edu/~jones/bcd/decimal.html#sixtyfour>
+ * (with permission from the author).
+ * Performs no 64-bit division and hence should be fast on 32-bit machines.
+ */
+static
+char *put_dec(char *buf, unsigned long long n)
+{
+ uint32_t d3, d2, d1, q, h;
+
+ if (n < 100*1000*1000)
+ return put_dec_trunc8(buf, n);
+
+ d1 = ((uint32_t)n >> 16); /* implicit "& 0xffff" */
+ h = (n >> 32);
+ d2 = (h ) & 0xffff;
+ d3 = (h >> 16); /* implicit "& 0xffff" */
+
+ q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
+
+ buf = put_dec_full4(buf, q % 10000);
+ q = q / 10000;
+
+ d1 = q + 7671 * d3 + 9496 * d2 + 6 * d1;
+ buf = put_dec_full4(buf, d1 % 10000);
+ q = d1 / 10000;
+
+ d2 = q + 4749 * d3 + 42 * d2;
+ buf = put_dec_full4(buf, d2 % 10000);
+ q = d2 / 10000;
+
+ d3 = q + 281 * d3;
+ if (!d3)
+ goto done;
+ buf = put_dec_full4(buf, d3 % 10000);
+ q = d3 / 10000;
+ if (!q)
+ goto done;
+ buf = put_dec_full4(buf, q);
+ done:
+ while (buf[-1] == '0')
+ --buf;
+
+ return buf;
}
+#endif
+
/*
* Convert passed number to decimal string.
* Returns the length of string. On buffer overflow, returns 0.
@@ -220,16 +313,22 @@ char *put_dec(char *buf, unsigned long long num)
*/
int num_to_str(char *buf, int size, unsigned long long num)
{
- char tmp[21]; /* Enough for 2^64 in decimal */
+ char tmp[sizeof(num) * 3];
int idx, len;
- len = put_dec(tmp, num) - tmp;
+ /* put_dec() may work incorrectly for num = 0 (generate "", not "0") */
+ if (num <= 9) {
+ tmp[0] = '0' + num;
+ len = 1;
+ } else {
+ len = put_dec(tmp, num) - tmp;
+ }
if (len > size)
return 0;
for (idx = 0; idx < len; ++idx)
buf[idx] = tmp[len - idx - 1];
- return len;
+ return len;
}
#define ZEROPAD 1 /* pad with zero */
@@ -284,6 +383,7 @@ char *number(char *buf, char *end, unsigned long long num,
char locase;
int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10);
int i;
+ bool is_zero = num == 0LL;
/* locase = 0 or 0x20. ORing digits or letters with 'locase'
* produces same digits or (maybe lowercased) letters */
@@ -305,15 +405,16 @@ char *number(char *buf, char *end, unsigned long long num,
}
}
if (need_pfx) {
- spec.field_width--;
if (spec.base == 16)
+ spec.field_width -= 2;
+ else if (!is_zero)
spec.field_width--;
}
/* generate full string in tmp[], in reverse order */
i = 0;
- if (num == 0)
- tmp[i++] = '0';
+ if (num < spec.base)
+ tmp[i++] = digits[num] | locase;
/* Generic code, for any base:
else do {
tmp[i++] = (digits[do_div(num,base)] | locase);
@@ -353,9 +454,11 @@ char *number(char *buf, char *end, unsigned long long num,
}
/* "0x" / "0" prefix */
if (need_pfx) {
- if (buf < end)
- *buf = '0';
- ++buf;
+ if (spec.base == 16 || !is_zero) {
+ if (buf < end)
+ *buf = '0';
+ ++buf;
+ }
if (spec.base == 16) {
if (buf < end)
*buf = ('X' | locase);
@@ -436,7 +539,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
else if (ext != 'f' && ext != 's')
sprint_symbol(sym, value);
else
- kallsyms_lookup(value, NULL, NULL, NULL, sym);
+ sprint_symbol_no_offset(sym, value);
return string(buf, end, sym, spec);
#else
@@ -607,7 +710,7 @@ char *ip4_string(char *p, const u8 *addr, const char *fmt)
}
for (i = 0; i < 4; i++) {
char temp[3]; /* hold each IP quad in reverse order */
- int digits = put_dec_trunc(temp, addr[index]) - temp;
+ int digits = put_dec_trunc8(temp, addr[index]) - temp;
if (leading_zeros) {
if (digits < 3)
*p++ = '0';
@@ -866,13 +969,15 @@ static noinline_for_stack
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
struct printf_spec spec)
{
+ int default_width = 2 * sizeof(void *) + (spec.flags & SPECIAL ? 2 : 0);
+
if (!ptr && *fmt != 'K') {
/*
* Print (null) with the same width as a pointer so it makes
* tabular output look nice.
*/
if (spec.field_width == -1)
- spec.field_width = 2 * sizeof(void *);
+ spec.field_width = default_width;
return string(buf, end, "(null)", spec);
}
@@ -927,7 +1032,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
*/
if (in_irq() || in_serving_softirq() || in_nmi()) {
if (spec.field_width == -1)
- spec.field_width = 2 * sizeof(void *);
+ spec.field_width = default_width;
return string(buf, end, "pK-error", spec);
}
if (!((kptr_restrict == 0) ||
@@ -944,7 +1049,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
}
spec.flags |= SMALL;
if (spec.field_width == -1) {
- spec.field_width = 2 * sizeof(void *);
+ spec.field_width = default_width;
spec.flags |= ZEROPAD;
}
spec.base = 16;
diff --git a/mm/Kconfig b/mm/Kconfig
index e338407f1225..b2176374b98e 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -198,7 +198,7 @@ config COMPACTION
config MIGRATION
bool "Page migration"
def_bool y
- depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
+ depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA
help
Allows the migration of the physical location of pages of processes
while the virtual addresses are not changed. This is useful in
@@ -349,6 +349,16 @@ choice
benefit.
endchoice
+config CROSS_MEMORY_ATTACH
+ bool "Cross Memory Support"
+ depends on MMU
+ default y
+ help
+ Enabling this option adds the system calls process_vm_readv and
+ process_vm_writev which allow a process with the correct privileges
+ to directly read from or write to to another process's address space.
+ See the man page for more details.
+
#
# UP and nommu archs use km based percpu allocator
#
diff --git a/mm/Makefile b/mm/Makefile
index 50ec00ef2a0e..a156285ce88d 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -5,15 +5,18 @@
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
- vmalloc.o pagewalk.o pgtable-generic.o \
- process_vm_access.o
+ vmalloc.o pagewalk.o pgtable-generic.o
+
+ifdef CONFIG_CROSS_MEMORY_ATTACH
+mmu-$(CONFIG_MMU) += process_vm_access.o
+endif
obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
maccess.o page_alloc.o page-writeback.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
page_isolation.o mm_init.o mmu_context.o percpu.o \
- $(mmu-y)
+ compaction.o $(mmu-y)
obj-y += init-mm.o
ifdef CONFIG_NO_BOOTMEM
@@ -25,14 +28,13 @@ endif
obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
obj-$(CONFIG_BOUNCE) += bounce.o
-obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
+obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
obj-$(CONFIG_HAS_DMA) += dmapool.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
obj-$(CONFIG_NUMA) += mempolicy.o
obj-$(CONFIG_SPARSEMEM) += sparse.o
obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
obj-$(CONFIG_SLOB) += slob.o
-obj-$(CONFIG_COMPACTION) += compaction.o
obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
obj-$(CONFIG_KSM) += ksm.o
obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 0131170c9d54..ec4fcb7a56c8 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -77,16 +77,16 @@ unsigned long __init bootmem_bootmap_pages(unsigned long pages)
*/
static void __init link_bootmem(bootmem_data_t *bdata)
{
- struct list_head *iter;
+ bootmem_data_t *ent;
- list_for_each(iter, &bdata_list) {
- bootmem_data_t *ent;
-
- ent = list_entry(iter, bootmem_data_t, list);
- if (bdata->node_min_pfn < ent->node_min_pfn)
- break;
+ list_for_each_entry(ent, &bdata_list, list) {
+ if (bdata->node_min_pfn < ent->node_min_pfn) {
+ list_add_tail(&bdata->list, &ent->list);
+ return;
+ }
}
- list_add_tail(&bdata->list, iter);
+
+ list_add_tail(&bdata->list, &bdata_list);
}
/*
@@ -203,7 +203,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
} else {
unsigned long off = 0;
- while (vec && off < BITS_PER_LONG) {
+ vec >>= start & (BITS_PER_LONG - 1);
+ while (vec) {
if (vec & 1) {
page = pfn_to_page(start + off);
__free_pages_bootmem(page, 0);
@@ -467,7 +468,7 @@ static unsigned long __init align_off(struct bootmem_data *bdata,
return ALIGN(base + off, align) - base;
}
-static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
+static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
unsigned long size, unsigned long align,
unsigned long goal, unsigned long limit)
{
@@ -588,14 +589,14 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
p_bdata = bootmem_arch_preferred_node(bdata, size, align,
goal, limit);
if (p_bdata)
- return alloc_bootmem_core(p_bdata, size, align,
+ return alloc_bootmem_bdata(p_bdata, size, align,
goal, limit);
}
#endif
return NULL;
}
-static void * __init ___alloc_bootmem_nopanic(unsigned long size,
+static void * __init alloc_bootmem_core(unsigned long size,
unsigned long align,
unsigned long goal,
unsigned long limit)
@@ -603,7 +604,6 @@ static void * __init ___alloc_bootmem_nopanic(unsigned long size,
bootmem_data_t *bdata;
void *region;
-restart:
region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
if (region)
return region;
@@ -614,11 +614,25 @@ restart:
if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
break;
- region = alloc_bootmem_core(bdata, size, align, goal, limit);
+ region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
if (region)
return region;
}
+ return NULL;
+}
+
+static void * __init ___alloc_bootmem_nopanic(unsigned long size,
+ unsigned long align,
+ unsigned long goal,
+ unsigned long limit)
+{
+ void *ptr;
+
+restart:
+ ptr = alloc_bootmem_core(size, align, goal, limit);
+ if (ptr)
+ return ptr;
if (goal) {
goal = 0;
goto restart;
@@ -684,21 +698,56 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
return ___alloc_bootmem(size, align, goal, limit);
}
-static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
+static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size, unsigned long align,
unsigned long goal, unsigned long limit)
{
void *ptr;
- ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit);
+again:
+ ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size,
+ align, goal, limit);
if (ptr)
return ptr;
- ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
+ ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
if (ptr)
return ptr;
- return ___alloc_bootmem(size, align, goal, limit);
+ ptr = alloc_bootmem_core(size, align, goal, limit);
+ if (ptr)
+ return ptr;
+
+ if (goal) {
+ goal = 0;
+ goto again;
+ }
+
+ return NULL;
+}
+
+void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal)
+{
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+ return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
+}
+
+void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal,
+ unsigned long limit)
+{
+ void *ptr;
+
+ ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
+ if (ptr)
+ return ptr;
+
+ printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
+ panic("Out of memory");
+ return NULL;
}
/**
@@ -722,7 +771,7 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
- return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
+ return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
}
void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
@@ -743,7 +792,7 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
unsigned long new_goal;
new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
- ptr = alloc_bootmem_core(pgdat->bdata, size, align,
+ ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
new_goal, 0);
if (ptr)
return ptr;
@@ -754,47 +803,6 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
}
-#ifdef CONFIG_SPARSEMEM
-/**
- * alloc_bootmem_section - allocate boot memory from a specific section
- * @size: size of the request in bytes
- * @section_nr: sparse map section to allocate from
- *
- * Return NULL on failure.
- */
-void * __init alloc_bootmem_section(unsigned long size,
- unsigned long section_nr)
-{
- bootmem_data_t *bdata;
- unsigned long pfn, goal;
-
- pfn = section_nr_to_pfn(section_nr);
- goal = pfn << PAGE_SHIFT;
- bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
-
- return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, 0);
-}
-#endif
-
-void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- void *ptr;
-
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
- ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
- if (ptr)
- return ptr;
-
- ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
- if (ptr)
- return ptr;
-
- return __alloc_bootmem_nopanic(size, align, goal);
-}
-
#ifndef ARCH_LOW_ADDRESS_LIMIT
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
#endif
@@ -839,6 +847,6 @@ void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
- return ___alloc_bootmem_node(pgdat->bdata, size, align,
- goal, ARCH_LOW_ADDRESS_LIMIT);
+ return ___alloc_bootmem_node(pgdat, size, align,
+ goal, ARCH_LOW_ADDRESS_LIMIT);
}
diff --git a/mm/compaction.c b/mm/compaction.c
index 74a8c825ff28..4ac338af5120 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -16,30 +16,11 @@
#include <linux/sysfs.h>
#include "internal.h"
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+
#define CREATE_TRACE_POINTS
#include <trace/events/compaction.h>
-/*
- * compact_control is used to track pages being migrated and the free pages
- * they are being migrated to during memory compaction. The free_pfn starts
- * at the end of a zone and migrate_pfn begins at the start. Movable pages
- * are moved to the end of a zone during a compaction run and the run
- * completes when free_pfn <= migrate_pfn
- */
-struct compact_control {
- struct list_head freepages; /* List of free pages to migrate to */
- struct list_head migratepages; /* List of pages being migrated */
- unsigned long nr_freepages; /* Number of isolated free pages */
- unsigned long nr_migratepages; /* Number of pages to migrate */
- unsigned long free_pfn; /* isolate_freepages search base */
- unsigned long migrate_pfn; /* isolate_migratepages search base */
- bool sync; /* Synchronous migration */
-
- int order; /* order a direct compactor needs */
- int migratetype; /* MOVABLE, RECLAIMABLE etc */
- struct zone *zone;
-};
-
static unsigned long release_freepages(struct list_head *freelist)
{
struct page *page, *next;
@@ -54,24 +35,35 @@ static unsigned long release_freepages(struct list_head *freelist)
return count;
}
-/* Isolate free pages onto a private freelist. Must hold zone->lock */
-static unsigned long isolate_freepages_block(struct zone *zone,
- unsigned long blockpfn,
- struct list_head *freelist)
+static void map_pages(struct list_head *list)
+{
+ struct page *page;
+
+ list_for_each_entry(page, list, lru) {
+ arch_alloc_page(page, 0);
+ kernel_map_pages(page, 1, 1);
+ }
+}
+
+static inline bool migrate_async_suitable(int migratetype)
+{
+ return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
+}
+
+/*
+ * Isolate free pages onto a private freelist. Caller must hold zone->lock.
+ * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
+ * pages inside of the pageblock (even though it may still end up isolating
+ * some pages).
+ */
+static unsigned long isolate_freepages_block(unsigned long blockpfn,
+ unsigned long end_pfn,
+ struct list_head *freelist,
+ bool strict)
{
- unsigned long zone_end_pfn, end_pfn;
int nr_scanned = 0, total_isolated = 0;
struct page *cursor;
- /* Get the last PFN we should scan for free pages at */
- zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
- end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
-
- /* Find the first usable PFN in the block to initialse page cursor */
- for (; blockpfn < end_pfn; blockpfn++) {
- if (pfn_valid_within(blockpfn))
- break;
- }
cursor = pfn_to_page(blockpfn);
/* Isolate free pages. This assumes the block is valid */
@@ -79,15 +71,23 @@ static unsigned long isolate_freepages_block(struct zone *zone,
int isolated, i;
struct page *page = cursor;
- if (!pfn_valid_within(blockpfn))
+ if (!pfn_valid_within(blockpfn)) {
+ if (strict)
+ return 0;
continue;
+ }
nr_scanned++;
- if (!PageBuddy(page))
+ if (!PageBuddy(page)) {
+ if (strict)
+ return 0;
continue;
+ }
/* Found a free page, break it into order-0 pages */
isolated = split_free_page(page);
+ if (!isolated && strict)
+ return 0;
total_isolated += isolated;
for (i = 0; i < isolated; i++) {
list_add(&page->lru, freelist);
@@ -105,114 +105,71 @@ static unsigned long isolate_freepages_block(struct zone *zone,
return total_isolated;
}
-/* Returns true if the page is within a block suitable for migration to */
-static bool suitable_migration_target(struct page *page)
-{
-
- int migratetype = get_pageblock_migratetype(page);
-
- /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
- if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
- return false;
-
- /* If the page is a large free page, then allow migration */
- if (PageBuddy(page) && page_order(page) >= pageblock_order)
- return true;
-
- /* If the block is MIGRATE_MOVABLE, allow migration */
- if (migratetype == MIGRATE_MOVABLE)
- return true;
-
- /* Otherwise skip the block */
- return false;
-}
-
-/*
- * Based on information in the current compact_control, find blocks
- * suitable for isolating free pages from and then isolate them.
+/**
+ * isolate_freepages_range() - isolate free pages.
+ * @start_pfn: The first PFN to start isolating.
+ * @end_pfn: The one-past-last PFN.
+ *
+ * Non-free pages, invalid PFNs, or zone boundaries within the
+ * [start_pfn, end_pfn) range are considered errors, cause function to
+ * undo its actions and return zero.
+ *
+ * Otherwise, function returns one-past-the-last PFN of isolated page
+ * (which may be greater then end_pfn if end fell in a middle of
+ * a free page).
*/
-static void isolate_freepages(struct zone *zone,
- struct compact_control *cc)
+unsigned long
+isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
{
- struct page *page;
- unsigned long high_pfn, low_pfn, pfn;
- unsigned long flags;
- int nr_freepages = cc->nr_freepages;
- struct list_head *freelist = &cc->freepages;
-
- /*
- * Initialise the free scanner. The starting point is where we last
- * scanned from (or the end of the zone if starting). The low point
- * is the end of the pageblock the migration scanner is using.
- */
- pfn = cc->free_pfn;
- low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+ unsigned long isolated, pfn, block_end_pfn, flags;
+ struct zone *zone = NULL;
+ LIST_HEAD(freelist);
- /*
- * Take care that if the migration scanner is at the end of the zone
- * that the free scanner does not accidentally move to the next zone
- * in the next isolation cycle.
- */
- high_pfn = min(low_pfn, pfn);
+ if (pfn_valid(start_pfn))
+ zone = page_zone(pfn_to_page(start_pfn));
- /*
- * Isolate free pages until enough are available to migrate the
- * pages on cc->migratepages. We stop searching if the migrate
- * and free page scanners meet or enough free pages are isolated.
- */
- for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
- pfn -= pageblock_nr_pages) {
- unsigned long isolated;
-
- if (!pfn_valid(pfn))
- continue;
+ for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
+ if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
+ break;
/*
- * Check for overlapping nodes/zones. It's possible on some
- * configurations to have a setup like
- * node0 node1 node0
- * i.e. it's possible that all pages within a zones range of
- * pages do not belong to a single zone.
+ * On subsequent iterations ALIGN() is actually not needed,
+ * but we keep it that we not to complicate the code.
*/
- page = pfn_to_page(pfn);
- if (page_zone(page) != zone)
- continue;
+ block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+ block_end_pfn = min(block_end_pfn, end_pfn);
- /* Check the block is suitable for migration */
- if (!suitable_migration_target(page))
- continue;
+ spin_lock_irqsave(&zone->lock, flags);
+ isolated = isolate_freepages_block(pfn, block_end_pfn,
+ &freelist, true);
+ spin_unlock_irqrestore(&zone->lock, flags);
/*
- * Found a block suitable for isolating free pages from. Now
- * we disabled interrupts, double check things are ok and
- * isolate the pages. This is to minimise the time IRQs
- * are disabled
+ * In strict mode, isolate_freepages_block() returns 0 if
+ * there are any holes in the block (ie. invalid PFNs or
+ * non-free pages).
*/
- isolated = 0;
- spin_lock_irqsave(&zone->lock, flags);
- if (suitable_migration_target(page)) {
- isolated = isolate_freepages_block(zone, pfn, freelist);
- nr_freepages += isolated;
- }
- spin_unlock_irqrestore(&zone->lock, flags);
+ if (!isolated)
+ break;
/*
- * Record the highest PFN we isolated pages from. When next
- * looking for free pages, the search will restart here as
- * page migration may have returned some pages to the allocator
+ * If we managed to isolate pages, it is always (1 << n) *
+ * pageblock_nr_pages for some non-negative n. (Max order
+ * page may span two pageblocks).
*/
- if (isolated)
- high_pfn = max(high_pfn, pfn);
}
/* split_free_page does not map the pages */
- list_for_each_entry(page, freelist, lru) {
- arch_alloc_page(page, 0);
- kernel_map_pages(page, 1, 1);
+ map_pages(&freelist);
+
+ if (pfn < end_pfn) {
+ /* Loop terminated early, cleanup. */
+ release_freepages(&freelist);
+ return 0;
}
- cc->free_pfn = high_pfn;
- cc->nr_freepages = nr_freepages;
+ /* We don't use freelists for anything. */
+ return pfn;
}
/* Update the number of anon and file isolated pages in the zone */
@@ -243,37 +200,34 @@ static bool too_many_isolated(struct zone *zone)
return isolated > (inactive + active) / 2;
}
-/* possible outcome of isolate_migratepages */
-typedef enum {
- ISOLATE_ABORT, /* Abort compaction now */
- ISOLATE_NONE, /* No pages isolated, continue scanning */
- ISOLATE_SUCCESS, /* Pages isolated, migrate */
-} isolate_migrate_t;
-
-/*
- * Isolate all pages that can be migrated from the block pointed to by
- * the migrate scanner within compact_control.
+/**
+ * isolate_migratepages_range() - isolate all migrate-able pages in range.
+ * @zone: Zone pages are in.
+ * @cc: Compaction control structure.
+ * @low_pfn: The first PFN of the range.
+ * @end_pfn: The one-past-the-last PFN of the range.
+ *
+ * Isolate all pages that can be migrated from the range specified by
+ * [low_pfn, end_pfn). Returns zero if there is a fatal signal
+ * pending), otherwise PFN of the first page that was not scanned
+ * (which may be both less, equal to or more then end_pfn).
+ *
+ * Assumes that cc->migratepages is empty and cc->nr_migratepages is
+ * zero.
+ *
+ * Apart from cc->migratepages and cc->nr_migratetypes this function
+ * does not modify any cc's fields, in particular it does not modify
+ * (or read for that matter) cc->migrate_pfn.
*/
-static isolate_migrate_t isolate_migratepages(struct zone *zone,
- struct compact_control *cc)
+unsigned long
+isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+ unsigned long low_pfn, unsigned long end_pfn)
{
- unsigned long low_pfn, end_pfn;
unsigned long last_pageblock_nr = 0, pageblock_nr;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = &cc->migratepages;
- isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE;
-
- /* Do not scan outside zone boundaries */
- low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
-
- /* Only scan within a pageblock boundary */
- end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
-
- /* Do not cross the free scanner or scan within a memory hole */
- if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
- cc->migrate_pfn = end_pfn;
- return ISOLATE_NONE;
- }
+ isolate_mode_t mode = 0;
+ struct lruvec *lruvec;
/*
* Ensure that there are not too many pages isolated from the LRU
@@ -282,13 +236,13 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
*/
while (unlikely(too_many_isolated(zone))) {
/* async migration should just abort */
- if (!cc->sync)
- return ISOLATE_ABORT;
+ if (cc->mode != COMPACT_SYNC)
+ return 0;
congestion_wait(BLK_RW_ASYNC, HZ/10);
if (fatal_signal_pending(current))
- return ISOLATE_ABORT;
+ return 0;
}
/* Time to isolate some pages for migration */
@@ -350,8 +304,9 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
* satisfies the allocation
*/
pageblock_nr = low_pfn >> pageblock_order;
- if (!cc->sync && last_pageblock_nr != pageblock_nr &&
- get_pageblock_migratetype(page) != MIGRATE_MOVABLE) {
+ if (cc->mode != COMPACT_SYNC &&
+ last_pageblock_nr != pageblock_nr &&
+ !migrate_async_suitable(get_pageblock_migratetype(page))) {
low_pfn += pageblock_nr_pages;
low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
last_pageblock_nr = pageblock_nr;
@@ -371,17 +326,19 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
continue;
}
- if (!cc->sync)
+ if (cc->mode != COMPACT_SYNC)
mode |= ISOLATE_ASYNC_MIGRATE;
+ lruvec = mem_cgroup_page_lruvec(page, zone);
+
/* Try isolate the page */
- if (__isolate_lru_page(page, mode, 0) != 0)
+ if (__isolate_lru_page(page, mode) != 0)
continue;
VM_BUG_ON(PageTransCompound(page));
/* Successfully isolated */
- del_page_from_lru_list(zone, page, page_lru(page));
+ del_page_from_lru_list(page, lruvec, page_lru(page));
list_add(&page->lru, migratelist);
cc->nr_migratepages++;
nr_isolated++;
@@ -396,11 +353,200 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
acct_isolated(zone, cc);
spin_unlock_irq(&zone->lru_lock);
- cc->migrate_pfn = low_pfn;
trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
- return ISOLATE_SUCCESS;
+ return low_pfn;
+}
+
+#endif /* CONFIG_COMPACTION || CONFIG_CMA */
+#ifdef CONFIG_COMPACTION
+/*
+ * Returns true if MIGRATE_UNMOVABLE pageblock was successfully
+ * converted to MIGRATE_MOVABLE type, false otherwise.
+ */
+static bool rescue_unmovable_pageblock(struct page *page)
+{
+ unsigned long pfn, start_pfn, end_pfn;
+ struct page *start_page, *end_page;
+
+ pfn = page_to_pfn(page);
+ start_pfn = pfn & ~(pageblock_nr_pages - 1);
+ end_pfn = start_pfn + pageblock_nr_pages;
+
+ start_page = pfn_to_page(start_pfn);
+ end_page = pfn_to_page(end_pfn);
+
+ /* Do not deal with pageblocks that overlap zones */
+ if (page_zone(start_page) != page_zone(end_page))
+ return false;
+
+ for (page = start_page, pfn = start_pfn; page < end_page; pfn++,
+ page++) {
+ if (!pfn_valid_within(pfn))
+ continue;
+
+ if (PageBuddy(page)) {
+ int order = page_order(page);
+
+ pfn += (1 << order) - 1;
+ page += (1 << order) - 1;
+
+ continue;
+ } else if (page_count(page) == 0 || PageLRU(page))
+ continue;
+
+ return false;
+ }
+
+ set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+ move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE);
+ return true;
+}
+
+enum smt_result {
+ GOOD_AS_MIGRATION_TARGET,
+ FAIL_UNMOVABLE_TARGET,
+ FAIL_BAD_TARGET,
+};
+
+/*
+ * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block
+ * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page
+ * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise.
+ */
+static enum smt_result suitable_migration_target(struct page *page,
+ struct compact_control *cc)
+{
+
+ int migratetype = get_pageblock_migratetype(page);
+
+ /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
+ if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
+ return FAIL_BAD_TARGET;
+
+ /* If the page is a large free page, then allow migration */
+ if (PageBuddy(page) && page_order(page) >= pageblock_order)
+ return GOOD_AS_MIGRATION_TARGET;
+
+ /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
+ if (cc->mode != COMPACT_ASYNC_UNMOVABLE &&
+ migrate_async_suitable(migratetype))
+ return GOOD_AS_MIGRATION_TARGET;
+
+ if (cc->mode == COMPACT_ASYNC_MOVABLE &&
+ migratetype == MIGRATE_UNMOVABLE)
+ return FAIL_UNMOVABLE_TARGET;
+
+ if (cc->mode != COMPACT_ASYNC_MOVABLE &&
+ migratetype == MIGRATE_UNMOVABLE &&
+ rescue_unmovable_pageblock(page))
+ return GOOD_AS_MIGRATION_TARGET;
+
+ /* Otherwise skip the block */
+ return FAIL_BAD_TARGET;
+}
+
+/*
+ * Based on information in the current compact_control, find blocks
+ * suitable for isolating free pages from and then isolate them.
+ */
+static void isolate_freepages(struct zone *zone,
+ struct compact_control *cc)
+{
+ struct page *page;
+ unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
+ unsigned long flags;
+ int nr_freepages = cc->nr_freepages;
+ struct list_head *freelist = &cc->freepages;
+
+ /*
+ * Initialise the free scanner. The starting point is where we last
+ * scanned from (or the end of the zone if starting). The low point
+ * is the end of the pageblock the migration scanner is using.
+ */
+ pfn = cc->free_pfn;
+ low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+
+ /*
+ * Take care that if the migration scanner is at the end of the zone
+ * that the free scanner does not accidentally move to the next zone
+ * in the next isolation cycle.
+ */
+ high_pfn = min(low_pfn, pfn);
+
+ zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+
+ /*
+ * isolate_freepages() may be called more than once during
+ * compact_zone_order() run and we want only the most recent
+ * count.
+ */
+ cc->nr_pageblocks_skipped = 0;
+
+ /*
+ * Isolate free pages until enough are available to migrate the
+ * pages on cc->migratepages. We stop searching if the migrate
+ * and free page scanners meet or enough free pages are isolated.
+ */
+ for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
+ pfn -= pageblock_nr_pages) {
+ unsigned long isolated;
+ enum smt_result ret;
+
+ if (!pfn_valid(pfn))
+ continue;
+
+ /*
+ * Check for overlapping nodes/zones. It's possible on some
+ * configurations to have a setup like
+ * node0 node1 node0
+ * i.e. it's possible that all pages within a zones range of
+ * pages do not belong to a single zone.
+ */
+ page = pfn_to_page(pfn);
+ if (page_zone(page) != zone)
+ continue;
+
+ /* Check the block is suitable for migration */
+ ret = suitable_migration_target(page, cc);
+ if (ret != GOOD_AS_MIGRATION_TARGET) {
+ if (ret == FAIL_UNMOVABLE_TARGET)
+ cc->nr_pageblocks_skipped++;
+ continue;
+ }
+ /*
+ * Found a block suitable for isolating free pages from. Now
+ * we disabled interrupts, double check things are ok and
+ * isolate the pages. This is to minimise the time IRQs
+ * are disabled
+ */
+ isolated = 0;
+ spin_lock_irqsave(&zone->lock, flags);
+ ret = suitable_migration_target(page, cc);
+ if (ret == GOOD_AS_MIGRATION_TARGET) {
+ end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
+ isolated = isolate_freepages_block(pfn, end_pfn,
+ freelist, false);
+ nr_freepages += isolated;
+ } else if (ret == FAIL_UNMOVABLE_TARGET)
+ cc->nr_pageblocks_skipped++;
+ spin_unlock_irqrestore(&zone->lock, flags);
+
+ /*
+ * Record the highest PFN we isolated pages from. When next
+ * looking for free pages, the search will restart here as
+ * page migration may have returned some pages to the allocator
+ */
+ if (isolated)
+ high_pfn = max(high_pfn, pfn);
+ }
+
+ /* split_free_page does not map the pages */
+ map_pages(freelist);
+
+ cc->free_pfn = high_pfn;
+ cc->nr_freepages = nr_freepages;
}
/*
@@ -449,6 +595,44 @@ static void update_nr_listpages(struct compact_control *cc)
cc->nr_freepages = nr_freepages;
}
+/* possible outcome of isolate_migratepages */
+typedef enum {
+ ISOLATE_ABORT, /* Abort compaction now */
+ ISOLATE_NONE, /* No pages isolated, continue scanning */
+ ISOLATE_SUCCESS, /* Pages isolated, migrate */
+} isolate_migrate_t;
+
+/*
+ * Isolate all pages that can be migrated from the block pointed to by
+ * the migrate scanner within compact_control.
+ */
+static isolate_migrate_t isolate_migratepages(struct zone *zone,
+ struct compact_control *cc)
+{
+ unsigned long low_pfn, end_pfn;
+
+ /* Do not scan outside zone boundaries */
+ low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
+
+ /* Only scan within a pageblock boundary */
+ end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
+
+ /* Do not cross the free scanner or scan within a memory hole */
+ if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
+ cc->migrate_pfn = end_pfn;
+ return ISOLATE_NONE;
+ }
+
+ /* Perform the isolation */
+ low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
+ if (!low_pfn)
+ return ISOLATE_ABORT;
+
+ cc->migrate_pfn = low_pfn;
+
+ return ISOLATE_SUCCESS;
+}
+
static int compact_finished(struct zone *zone,
struct compact_control *cc)
{
@@ -578,8 +762,9 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
nr_migrate = cc->nr_migratepages;
err = migrate_pages(&cc->migratepages, compaction_alloc,
- (unsigned long)cc, false,
- cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
+ (unsigned long)&cc->freepages, false,
+ (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT
+ : MIGRATE_ASYNC);
update_nr_listpages(cc);
nr_remaining = cc->nr_migratepages;
@@ -608,7 +793,8 @@ out:
static unsigned long compact_zone_order(struct zone *zone,
int order, gfp_t gfp_mask,
- bool sync)
+ enum compact_mode mode,
+ unsigned long *nr_pageblocks_skipped)
{
struct compact_control cc = {
.nr_freepages = 0,
@@ -616,12 +802,17 @@ static unsigned long compact_zone_order(struct zone *zone,
.order = order,
.migratetype = allocflags_to_migratetype(gfp_mask),
.zone = zone,
- .sync = sync,
+ .mode = mode,
};
+ unsigned long rc;
+
INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages);
- return compact_zone(zone, &cc);
+ rc = compact_zone(zone, &cc);
+ *nr_pageblocks_skipped = cc.nr_pageblocks_skipped;
+
+ return rc;
}
int sysctl_extfrag_threshold = 500;
@@ -646,6 +837,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
struct zoneref *z;
struct zone *zone;
int rc = COMPACT_SKIPPED;
+ unsigned long nr_pageblocks_skipped;
+ enum compact_mode mode;
/*
* Check whether it is worth even starting compaction. The order check is
@@ -662,12 +855,22 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
nodemask) {
int status;
- status = compact_zone_order(zone, order, gfp_mask, sync);
+ mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE;
+retry:
+ status = compact_zone_order(zone, order, gfp_mask, mode,
+ &nr_pageblocks_skipped);
rc = max(status, rc);
/* If a normal allocation would succeed, stop compacting */
if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
break;
+
+ if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) {
+ if (nr_pageblocks_skipped) {
+ mode = COMPACT_ASYNC_UNMOVABLE;
+ goto retry;
+ }
+ }
}
return rc;
@@ -701,7 +904,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
if (ok && cc->order > zone->compact_order_failed)
zone->compact_order_failed = cc->order + 1;
/* Currently async compaction is never deferred. */
- else if (!ok && cc->sync)
+ else if (!ok && cc->mode == COMPACT_SYNC)
defer_compaction(zone, cc->order);
}
@@ -716,7 +919,7 @@ int compact_pgdat(pg_data_t *pgdat, int order)
{
struct compact_control cc = {
.order = order,
- .sync = false,
+ .mode = COMPACT_ASYNC_MOVABLE,
};
return __compact_pgdat(pgdat, &cc);
@@ -726,7 +929,7 @@ static int compact_node(int nid)
{
struct compact_control cc = {
.order = -1,
- .sync = true,
+ .mode = COMPACT_SYNC,
};
return __compact_pgdat(NODE_DATA(nid), &cc);
@@ -795,3 +998,5 @@ void compaction_unregister_node(struct node *node)
return device_remove_file(&node->dev, &dev_attr_compact);
}
#endif /* CONFIG_SYSFS && CONFIG_NUMA */
+
+#endif /* CONFIG_COMPACTION */
diff --git a/mm/filemap.c b/mm/filemap.c
index 79c4b2b0b14e..64b48f934b89 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -29,7 +29,6 @@
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/security.h>
-#include <linux/syscalls.h>
#include <linux/cpuset.h>
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
#include <linux/memcontrol.h>
@@ -1478,44 +1477,6 @@ out:
}
EXPORT_SYMBOL(generic_file_aio_read);
-static ssize_t
-do_readahead(struct address_space *mapping, struct file *filp,
- pgoff_t index, unsigned long nr)
-{
- if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
- return -EINVAL;
-
- force_page_cache_readahead(mapping, filp, index, nr);
- return 0;
-}
-
-SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
-{
- ssize_t ret;
- struct file *file;
-
- ret = -EBADF;
- file = fget(fd);
- if (file) {
- if (file->f_mode & FMODE_READ) {
- struct address_space *mapping = file->f_mapping;
- pgoff_t start = offset >> PAGE_CACHE_SHIFT;
- pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
- unsigned long len = end - start + 1;
- ret = do_readahead(mapping, file, start, len);
- }
- fput(file);
- }
- return ret;
-}
-#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
-asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
-{
- return SYSC_readahead((int) fd, offset, (size_t) count);
-}
-SYSCALL_ALIAS(sys_readahead, SyS_readahead);
-#endif
-
#ifdef CONFIG_MMU
/**
* page_cache_read - adds requested page to the page cache if not already there
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f0e5306eeb55..57c4b9309015 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -636,16 +636,12 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
unsigned long haddr, pmd_t *pmd,
struct page *page)
{
- int ret = 0;
pgtable_t pgtable;
VM_BUG_ON(!PageCompound(page));
pgtable = pte_alloc_one(mm, haddr);
- if (unlikely(!pgtable)) {
- mem_cgroup_uncharge_page(page);
- put_page(page);
+ if (unlikely(!pgtable))
return VM_FAULT_OOM;
- }
clear_huge_page(page, haddr, HPAGE_PMD_NR);
__SetPageUptodate(page);
@@ -675,7 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
spin_unlock(&mm->page_table_lock);
}
- return ret;
+ return 0;
}
static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
@@ -724,8 +720,14 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
put_page(page);
goto out;
}
+ if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
+ page))) {
+ mem_cgroup_uncharge_page(page);
+ put_page(page);
+ goto out;
+ }
- return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
+ return 0;
}
out:
/*
@@ -950,6 +952,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
count_vm_event(THP_FAULT_FALLBACK);
ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
pmd, orig_pmd, page, haddr);
+ if (ret & VM_FAULT_OOM)
+ split_huge_page(page);
put_page(page);
goto out;
}
@@ -957,6 +961,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
put_page(new_page);
+ split_huge_page(page);
put_page(page);
ret |= VM_FAULT_OOM;
goto out;
@@ -968,8 +973,10 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
spin_lock(&mm->page_table_lock);
put_page(page);
if (unlikely(!pmd_same(*pmd, orig_pmd))) {
+ spin_unlock(&mm->page_table_lock);
mem_cgroup_uncharge_page(new_page);
put_page(new_page);
+ goto out;
} else {
pmd_t entry;
VM_BUG_ON(!PageHead(page));
@@ -1224,10 +1231,13 @@ static void __split_huge_page_refcount(struct page *page)
{
int i;
struct zone *zone = page_zone(page);
+ struct lruvec *lruvec;
int tail_count = 0;
/* prevent PageLRU to go away from under us, and freeze lru stats */
spin_lock_irq(&zone->lru_lock);
+ lruvec = mem_cgroup_page_lruvec(page, zone);
+
compound_lock(page);
/* complete memcg works before add pages to LRU */
mem_cgroup_split_huge_fixup(page);
@@ -1302,13 +1312,12 @@ static void __split_huge_page_refcount(struct page *page)
BUG_ON(!PageDirty(page_tail));
BUG_ON(!PageSwapBacked(page_tail));
-
- lru_add_page_tail(zone, page, page_tail);
+ lru_add_page_tail(page, page_tail, lruvec);
}
atomic_sub(tail_count, &page->_count);
BUG_ON(atomic_read(&page->_count) <= 0);
- __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
+ __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
__mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
ClearPageCompound(page);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ae8f708e3d75..e198831276a3 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -273,8 +273,8 @@ static long region_count(struct list_head *head, long f, long t)
/* Locate each segment we overlap with, and count that overlap. */
list_for_each_entry(rg, head, link) {
- int seg_from;
- int seg_to;
+ long seg_from;
+ long seg_to;
if (rg->to <= f)
continue;
@@ -2157,6 +2157,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
kref_get(&reservations->refs);
}
+static void resv_map_put(struct vm_area_struct *vma)
+{
+ struct resv_map *reservations = vma_resv_map(vma);
+
+ if (!reservations)
+ return;
+ kref_put(&reservations->refs, resv_map_release);
+}
+
static void hugetlb_vm_op_close(struct vm_area_struct *vma)
{
struct hstate *h = hstate_vma(vma);
@@ -2173,7 +2182,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
reserve = (end - start) -
region_count(&reservations->regions, start, end);
- kref_put(&reservations->refs, resv_map_release);
+ resv_map_put(vma);
if (reserve) {
hugetlb_acct_memory(h, -reserve);
@@ -2213,6 +2222,7 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
}
entry = pte_mkyoung(entry);
entry = pte_mkhuge(entry);
+ entry = arch_make_huge_pte(entry, vma, page, writable);
return entry;
}
@@ -2990,12 +3000,16 @@ int hugetlb_reserve_pages(struct inode *inode,
set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
}
- if (chg < 0)
- return chg;
+ if (chg < 0) {
+ ret = chg;
+ goto out_err;
+ }
/* There must be enough pages in the subpool for the mapping */
- if (hugepage_subpool_get_pages(spool, chg))
- return -ENOSPC;
+ if (hugepage_subpool_get_pages(spool, chg)) {
+ ret = -ENOSPC;
+ goto out_err;
+ }
/*
* Check enough hugepages are available for the reservation.
@@ -3004,7 +3018,7 @@ int hugetlb_reserve_pages(struct inode *inode,
ret = hugetlb_acct_memory(h, chg);
if (ret < 0) {
hugepage_subpool_put_pages(spool, chg);
- return ret;
+ goto out_err;
}
/*
@@ -3021,6 +3035,10 @@ int hugetlb_reserve_pages(struct inode *inode,
if (!vma || vma->vm_flags & VM_MAYSHARE)
region_add(&inode->i_mapping->private_list, from, to);
return 0;
+out_err:
+ if (vma)
+ resv_map_put(vma);
+ return ret;
}
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
diff --git a/mm/internal.h b/mm/internal.h
index 2189af491783..4194ab9dc19b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -94,12 +94,52 @@ extern void putback_lru_page(struct page *page);
/*
* in mm/page_alloc.c
*/
+extern void set_pageblock_migratetype(struct page *page, int migratetype);
+extern int move_freepages_block(struct zone *zone, struct page *page,
+ int migratetype);
extern void __free_pages_bootmem(struct page *page, unsigned int order);
extern void prep_compound_page(struct page *page, unsigned long order);
#ifdef CONFIG_MEMORY_FAILURE
extern bool is_free_buddy_page(struct page *page);
#endif
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+#include <linux/compaction.h>
+
+/*
+ * in mm/compaction.c
+ */
+/*
+ * compact_control is used to track pages being migrated and the free pages
+ * they are being migrated to during memory compaction. The free_pfn starts
+ * at the end of a zone and migrate_pfn begins at the start. Movable pages
+ * are moved to the end of a zone during a compaction run and the run
+ * completes when free_pfn <= migrate_pfn
+ */
+struct compact_control {
+ struct list_head freepages; /* List of free pages to migrate to */
+ struct list_head migratepages; /* List of pages being migrated */
+ unsigned long nr_freepages; /* Number of isolated free pages */
+ unsigned long nr_migratepages; /* Number of pages to migrate */
+ unsigned long free_pfn; /* isolate_freepages search base */
+ unsigned long migrate_pfn; /* isolate_migratepages search base */
+ enum compact_mode mode; /* Compaction mode */
+
+ int order; /* order a direct compactor needs */
+ int migratetype; /* MOVABLE, RECLAIMABLE etc */
+ struct zone *zone;
+
+ /* Number of UNMOVABLE destination pageblocks skipped during scan */
+ unsigned long nr_pageblocks_skipped;
+};
+
+unsigned long
+isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn);
+unsigned long
+isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+ unsigned long low_pfn, unsigned long end_pfn);
+
+#endif
/*
* function for dealing with page's order in buddy system.
@@ -131,7 +171,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
* to determine if it's being mapped into a LOCKED vma.
* If so, mark page as mlocked.
*/
-static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
+static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
+ struct page *page)
{
VM_BUG_ON(PageLRU(page));
@@ -189,7 +230,7 @@ extern unsigned long vma_address(struct page *page,
struct vm_area_struct *vma);
#endif
#else /* !CONFIG_MMU */
-static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
+static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
{
return 0;
}
diff --git a/mm/madvise.c b/mm/madvise.c
index 1ccbba5b6674..deff1b64a08c 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -11,8 +11,10 @@
#include <linux/mempolicy.h>
#include <linux/page-isolation.h>
#include <linux/hugetlb.h>
+#include <linux/falloc.h>
#include <linux/sched.h>
#include <linux/ksm.h>
+#include <linux/fs.h>
/*
* Any behaviour which results in changes to the vma->vm_flags needs to
@@ -200,8 +202,7 @@ static long madvise_remove(struct vm_area_struct *vma,
struct vm_area_struct **prev,
unsigned long start, unsigned long end)
{
- struct address_space *mapping;
- loff_t offset, endoff;
+ loff_t offset;
int error;
*prev = NULL; /* tell sys_madvise we drop mmap_sem */
@@ -217,16 +218,14 @@ static long madvise_remove(struct vm_area_struct *vma,
if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
return -EACCES;
- mapping = vma->vm_file->f_mapping;
-
offset = (loff_t)(start - vma->vm_start)
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
- endoff = (loff_t)(end - vma->vm_start - 1)
- + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
- /* vmtruncate_range needs to take i_mutex */
+ /* filesystem's fallocate may need to take i_mutex */
up_read(&current->mm->mmap_sem);
- error = vmtruncate_range(mapping->host, offset, endoff);
+ error = do_fallocate(vma->vm_file,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ offset, end - start);
down_read(&current->mm->mmap_sem);
return error;
}
diff --git a/mm/memblock.c b/mm/memblock.c
index a44eab3157f8..952123eba433 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -37,6 +37,8 @@ struct memblock memblock __initdata_memblock = {
int memblock_debug __initdata_memblock;
static int memblock_can_resize __initdata_memblock;
+static int memblock_memory_in_slab __initdata_memblock = 0;
+static int memblock_reserved_in_slab __initdata_memblock = 0;
/* inline so we don't get a warning when pr_debug is compiled out */
static inline const char *memblock_type_name(struct memblock_type *type)
@@ -187,6 +189,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
struct memblock_region *new_array, *old_array;
phys_addr_t old_size, new_size, addr;
int use_slab = slab_is_available();
+ int *in_slab;
/* We don't allow resizing until we know about the reserved regions
* of memory that aren't suitable for allocation
@@ -198,6 +201,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
old_size = type->max * sizeof(struct memblock_region);
new_size = old_size << 1;
+ /* Retrieve the slab flag */
+ if (type == &memblock.memory)
+ in_slab = &memblock_memory_in_slab;
+ else
+ in_slab = &memblock_reserved_in_slab;
+
/* Try to find some space for it.
*
* WARNING: We assume that either slab_is_available() and we use it or
@@ -212,14 +221,15 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
if (use_slab) {
new_array = kmalloc(new_size, GFP_KERNEL);
addr = new_array ? __pa(new_array) : 0;
- } else
+ } else {
addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
+ new_array = addr ? __va(addr) : 0;
+ }
if (!addr) {
pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
memblock_type_name(type), type->max, type->max * 2);
return -1;
}
- new_array = __va(addr);
memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
@@ -234,22 +244,24 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
type->regions = new_array;
type->max <<= 1;
- /* If we use SLAB that's it, we are done */
- if (use_slab)
- return 0;
-
- /* Add the new reserved region now. Should not fail ! */
- BUG_ON(memblock_reserve(addr, new_size));
-
- /* If the array wasn't our static init one, then free it. We only do
- * that before SLAB is available as later on, we don't know whether
- * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
- * anyways
+ /* Free old array. We needn't free it if the array is the
+ * static one
*/
- if (old_array != memblock_memory_init_regions &&
- old_array != memblock_reserved_init_regions)
+ if (*in_slab)
+ kfree(old_array);
+ else if (old_array != memblock_memory_init_regions &&
+ old_array != memblock_reserved_init_regions)
memblock_free(__pa(old_array), old_size);
+ /* Reserve the new array if that comes from the memblock.
+ * Otherwise, we needn't do it
+ */
+ if (!use_slab)
+ BUG_ON(memblock_reserve(addr, new_size));
+
+ /* Update slab flag */
+ *in_slab = use_slab;
+
return 0;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f342778a0c0a..ac35bccadb7b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -59,7 +59,7 @@
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
#define MEM_CGROUP_RECLAIM_RETRIES 5
-struct mem_cgroup *root_mem_cgroup __read_mostly;
+static struct mem_cgroup *root_mem_cgroup __read_mostly;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
@@ -73,7 +73,7 @@ static int really_do_swap_account __initdata = 0;
#endif
#else
-#define do_swap_account (0)
+#define do_swap_account 0
#endif
@@ -88,18 +88,31 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
- MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
MEM_CGROUP_STAT_NSTATS,
};
+static const char * const mem_cgroup_stat_names[] = {
+ "cache",
+ "rss",
+ "mapped_file",
+ "swap",
+};
+
enum mem_cgroup_events_index {
MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
- MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */
MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
MEM_CGROUP_EVENTS_NSTATS,
};
+
+static const char * const mem_cgroup_events_names[] = {
+ "pgpgin",
+ "pgpgout",
+ "pgfault",
+ "pgmajfault",
+};
+
/*
* Per memcg event counter is incremented at every pagein/pageout. With THP,
* it will be incremated by the number of pages. This counter is used for
@@ -112,13 +125,14 @@ enum mem_cgroup_events_target {
MEM_CGROUP_TARGET_NUMAINFO,
MEM_CGROUP_NTARGETS,
};
-#define THRESHOLDS_EVENTS_TARGET (128)
-#define SOFTLIMIT_EVENTS_TARGET (1024)
-#define NUMAINFO_EVENTS_TARGET (1024)
+#define THRESHOLDS_EVENTS_TARGET 128
+#define SOFTLIMIT_EVENTS_TARGET 1024
+#define NUMAINFO_EVENTS_TARGET 1024
struct mem_cgroup_stat_cpu {
long count[MEM_CGROUP_STAT_NSTATS];
unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
+ unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS];
};
@@ -138,7 +152,6 @@ struct mem_cgroup_per_zone {
struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
- struct zone_reclaim_stat reclaim_stat;
struct rb_node tree_node; /* RB tree node */
unsigned long long usage_in_excess;/* Set to the value by which */
/* the soft limit is exceeded*/
@@ -182,7 +195,7 @@ struct mem_cgroup_threshold {
/* For threshold */
struct mem_cgroup_threshold_ary {
- /* An array index points to threshold just below usage. */
+ /* An array index points to threshold just below or equal to usage. */
int current_threshold;
/* Size of entries[] */
unsigned int size;
@@ -245,8 +258,8 @@ struct mem_cgroup {
*/
struct rcu_head rcu_freeing;
/*
- * But when using vfree(), that cannot be done at
- * interrupt time, so we must then queue the work.
+ * We also need some space for a worker in deferred freeing.
+ * By the time we call it, rcu_freeing is no longer in use.
*/
struct work_struct work_freeing;
};
@@ -305,7 +318,7 @@ struct mem_cgroup {
/*
* percpu counter.
*/
- struct mem_cgroup_stat_cpu *stat;
+ struct mem_cgroup_stat_cpu __percpu *stat;
/*
* used when a cpu is offlined or other synchronizations
* See mem_cgroup_read_stat().
@@ -360,8 +373,8 @@ static bool move_file(void)
* Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
* limit reclaim to prevent infinite loops, if they ever occur.
*/
-#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100)
-#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
+#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
+#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
enum charge_type {
MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
@@ -377,8 +390,8 @@ enum charge_type {
#define _MEM (0)
#define _MEMSWAP (1)
#define _OOM_TYPE (2)
-#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
-#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
+#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
+#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
#define MEMFILE_ATTR(val) ((val) & 0xffff)
/* Used for OOM nofiier */
#define OOM_CONTROL (0)
@@ -404,6 +417,7 @@ void sock_update_memcg(struct sock *sk)
{
if (mem_cgroup_sockets_enabled) {
struct mem_cgroup *memcg;
+ struct cg_proto *cg_proto;
BUG_ON(!sk->sk_prot->proto_cgroup);
@@ -423,9 +437,10 @@ void sock_update_memcg(struct sock *sk)
rcu_read_lock();
memcg = mem_cgroup_from_task(current);
- if (!mem_cgroup_is_root(memcg)) {
+ cg_proto = sk->sk_prot->proto_cgroup(memcg);
+ if (!mem_cgroup_is_root(memcg) && memcg_proto_active(cg_proto)) {
mem_cgroup_get(memcg);
- sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg);
+ sk->sk_cgrp = cg_proto;
}
rcu_read_unlock();
}
@@ -454,6 +469,19 @@ EXPORT_SYMBOL(tcp_proto_cgroup);
#endif /* CONFIG_INET */
#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
+#if defined(CONFIG_INET) && defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM)
+static void disarm_sock_keys(struct mem_cgroup *memcg)
+{
+ if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
+ return;
+ static_key_slow_dec(&memcg_socket_limit_enabled);
+}
+#else
+static void disarm_sock_keys(struct mem_cgroup *memcg)
+{
+}
+#endif
+
static void drain_all_stock_async(struct mem_cgroup *memcg);
static struct mem_cgroup_per_zone *
@@ -718,12 +746,21 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
nr_pages = -nr_pages; /* for event */
}
- __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
+ __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
preempt_enable();
}
unsigned long
+mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
+{
+ struct mem_cgroup_per_zone *mz;
+
+ mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+ return mz->lru_size[lru];
+}
+
+static unsigned long
mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
unsigned int lru_mask)
{
@@ -770,7 +807,7 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
{
unsigned long val, next;
- val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
+ val = __this_cpu_read(memcg->stat->nr_page_events);
next = __this_cpu_read(memcg->stat->targets[target]);
/* from time_after() in jiffies.h */
if ((long)next - (long)val < 0) {
@@ -1013,7 +1050,7 @@ EXPORT_SYMBOL(mem_cgroup_count_vm_event);
/**
* mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
* @zone: zone of the wanted lruvec
- * @mem: memcg of the wanted lruvec
+ * @memcg: memcg of the wanted lruvec
*
* Returns the lru list vector holding pages for the given @zone and
* @mem. This can be the global zone lruvec, if the memory controller
@@ -1046,19 +1083,11 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
*/
/**
- * mem_cgroup_lru_add_list - account for adding an lru page and return lruvec
- * @zone: zone of the page
+ * mem_cgroup_page_lruvec - return lruvec for adding an lru page
* @page: the page
- * @lru: current lru
- *
- * This function accounts for @page being added to @lru, and returns
- * the lruvec for the given @zone and the memcg @page is charged to.
- *
- * The callsite is then responsible for physically linking the page to
- * the returned lruvec->lists[@lru].
+ * @zone: zone of the page
*/
-struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
- enum lru_list lru)
+struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
{
struct mem_cgroup_per_zone *mz;
struct mem_cgroup *memcg;
@@ -1071,7 +1100,7 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
memcg = pc->mem_cgroup;
/*
- * Surreptitiously switch any uncharged page to root:
+ * Surreptitiously switch any uncharged offlist page to root:
* an uncharged page off lru does nothing to secure
* its former mem_cgroup from sudden removal.
*
@@ -1079,85 +1108,60 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
* under page_cgroup lock: between them, they make all uses
* of pc->mem_cgroup safe.
*/
- if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
+ if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
pc->mem_cgroup = memcg = root_mem_cgroup;
mz = page_cgroup_zoneinfo(memcg, page);
- /* compound_order() is stabilized through lru_lock */
- mz->lru_size[lru] += 1 << compound_order(page);
return &mz->lruvec;
}
/**
- * mem_cgroup_lru_del_list - account for removing an lru page
- * @page: the page
- * @lru: target lru
- *
- * This function accounts for @page being removed from @lru.
+ * mem_cgroup_update_lru_size - account for adding or removing an lru page
+ * @lruvec: mem_cgroup per zone lru vector
+ * @lru: index of lru list the page is sitting on
+ * @nr_pages: positive when adding or negative when removing
*
- * The callsite is then responsible for physically unlinking
- * @page->lru.
+ * This function must be called when a page is added to or removed from an
+ * lru list.
*/
-void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
+void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
+ int nr_pages)
{
struct mem_cgroup_per_zone *mz;
- struct mem_cgroup *memcg;
- struct page_cgroup *pc;
+ unsigned long *lru_size;
if (mem_cgroup_disabled())
return;
- pc = lookup_page_cgroup(page);
- memcg = pc->mem_cgroup;
- VM_BUG_ON(!memcg);
- mz = page_cgroup_zoneinfo(memcg, page);
- /* huge page split is done under lru_lock. so, we have no races. */
- VM_BUG_ON(mz->lru_size[lru] < (1 << compound_order(page)));
- mz->lru_size[lru] -= 1 << compound_order(page);
-}
-
-void mem_cgroup_lru_del(struct page *page)
-{
- mem_cgroup_lru_del_list(page, page_lru(page));
-}
-
-/**
- * mem_cgroup_lru_move_lists - account for moving a page between lrus
- * @zone: zone of the page
- * @page: the page
- * @from: current lru
- * @to: target lru
- *
- * This function accounts for @page being moved between the lrus @from
- * and @to, and returns the lruvec for the given @zone and the memcg
- * @page is charged to.
- *
- * The callsite is then responsible for physically relinking
- * @page->lru to the returned lruvec->lists[@to].
- */
-struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
- struct page *page,
- enum lru_list from,
- enum lru_list to)
-{
- /* XXX: Optimize this, especially for @from == @to */
- mem_cgroup_lru_del_list(page, from);
- return mem_cgroup_lru_add_list(zone, page, to);
+ mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+ lru_size = mz->lru_size + lru;
+ *lru_size += nr_pages;
+ VM_BUG_ON((long)(*lru_size) < 0);
}
/*
* Checks whether given mem is same or in the root_mem_cgroup's
* hierarchy subtree
*/
+bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
+ struct mem_cgroup *memcg)
+{
+ if (root_memcg == memcg)
+ return true;
+ if (!root_memcg->use_hierarchy)
+ return false;
+ return css_is_ancestor(&memcg->css, &root_memcg->css);
+}
+
static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
- struct mem_cgroup *memcg)
+ struct mem_cgroup *memcg)
{
- if (root_memcg != memcg) {
- return (root_memcg->use_hierarchy &&
- css_is_ancestor(&memcg->css, &root_memcg->css));
- }
+ bool ret;
- return true;
+ rcu_read_lock();
+ ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
+ rcu_read_unlock();
+ return ret;
}
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
@@ -1195,19 +1199,15 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
return ret;
}
-int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
+int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
{
unsigned long inactive_ratio;
- int nid = zone_to_nid(zone);
- int zid = zone_idx(zone);
unsigned long inactive;
unsigned long active;
unsigned long gb;
- inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
- BIT(LRU_INACTIVE_ANON));
- active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
- BIT(LRU_ACTIVE_ANON));
+ inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
+ active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
gb = (inactive + active) >> (30 - PAGE_SHIFT);
if (gb)
@@ -1218,49 +1218,17 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
return inactive * inactive_ratio < active;
}
-int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
+int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
{
unsigned long active;
unsigned long inactive;
- int zid = zone_idx(zone);
- int nid = zone_to_nid(zone);
- inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
- BIT(LRU_INACTIVE_FILE));
- active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
- BIT(LRU_ACTIVE_FILE));
+ inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_FILE);
+ active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_FILE);
return (active > inactive);
}
-struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
- struct zone *zone)
-{
- int nid = zone_to_nid(zone);
- int zid = zone_idx(zone);
- struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
-
- return &mz->reclaim_stat;
-}
-
-struct zone_reclaim_stat *
-mem_cgroup_get_reclaim_stat_from_page(struct page *page)
-{
- struct page_cgroup *pc;
- struct mem_cgroup_per_zone *mz;
-
- if (mem_cgroup_disabled())
- return NULL;
-
- pc = lookup_page_cgroup(page);
- if (!PageCgroupUsed(pc))
- return NULL;
- /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
- smp_rmb();
- mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
- return &mz->reclaim_stat;
-}
-
#define mem_cgroup_from_res_counter(counter, member) \
container_of(counter, struct mem_cgroup, member)
@@ -1634,7 +1602,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
* unused nodes. But scan_nodes is lazily updated and may not cotain
* enough new information. We need to do double check.
*/
-bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
{
int nid;
@@ -1669,7 +1637,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
return 0;
}
-bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
{
return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
}
@@ -1843,7 +1811,8 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
/*
* try to call OOM killer. returns false if we should exit memory-reclaim loop.
*/
-bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
+static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
+ int order)
{
struct oom_wait_info owait;
bool locked, need_to_kill;
@@ -1992,7 +1961,7 @@ struct memcg_stock_pcp {
unsigned int nr_pages;
struct work_struct work;
unsigned long flags;
-#define FLUSHING_CACHED_CHARGE (0)
+#define FLUSHING_CACHED_CHARGE 0
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
static DEFINE_MUTEX(percpu_charge_mutex);
@@ -2139,7 +2108,7 @@ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
int i;
spin_lock(&memcg->pcp_counter_lock);
- for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
+ for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
long x = per_cpu(memcg->stat->count[i], cpu);
per_cpu(memcg->stat->count[i], cpu) = 0;
@@ -2427,6 +2396,24 @@ static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
}
/*
+ * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
+ * This is useful when moving usage to parent cgroup.
+ */
+static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
+ unsigned int nr_pages)
+{
+ unsigned long bytes = nr_pages * PAGE_SIZE;
+
+ if (mem_cgroup_is_root(memcg))
+ return;
+
+ res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
+ if (do_swap_account)
+ res_counter_uncharge_until(&memcg->memsw,
+ memcg->memsw.parent, bytes);
+}
+
+/*
* A helper function to get mem_cgroup from ID. must be called under
* rcu_read_lock(). The caller must check css_is_removed() or some if
* it's concern. (dropping refcnt from swap can be called against removed
@@ -2481,6 +2468,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
{
struct page_cgroup *pc = lookup_page_cgroup(page);
struct zone *uninitialized_var(zone);
+ struct lruvec *lruvec;
bool was_on_lru = false;
bool anon;
@@ -2503,8 +2491,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
if (PageLRU(page)) {
+ lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
ClearPageLRU(page);
- del_page_from_lru_list(zone, page, page_lru(page));
+ del_page_from_lru_list(page, lruvec, page_lru(page));
was_on_lru = true;
}
}
@@ -2522,9 +2511,10 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
if (lrucare) {
if (was_on_lru) {
+ lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
- add_page_to_lru_list(zone, page, page_lru(page));
+ add_page_to_lru_list(page, lruvec, page_lru(page));
}
spin_unlock_irq(&zone->lru_lock);
}
@@ -2547,7 +2537,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MIGRATION))
+#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
/*
* Because tail pages are not marked as "used", set it. We're under
* zone->lru_lock, 'splitting on pmd' and compound_lock.
@@ -2578,23 +2568,19 @@ void mem_cgroup_split_huge_fixup(struct page *head)
* @pc: page_cgroup of the page.
* @from: mem_cgroup which the page is moved from.
* @to: mem_cgroup which the page is moved to. @from != @to.
- * @uncharge: whether we should call uncharge and css_put against @from.
*
* The caller must confirm following.
* - page is not on LRU (isolate_page() is useful.)
* - compound_lock is held when nr_pages > 1
*
- * This function doesn't do "charge" nor css_get to new cgroup. It should be
- * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
- * true, this function does "uncharge" from old cgroup, but it doesn't if
- * @uncharge is false, so a caller should do "uncharge".
+ * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
+ * from old cgroup.
*/
static int mem_cgroup_move_account(struct page *page,
unsigned int nr_pages,
struct page_cgroup *pc,
struct mem_cgroup *from,
- struct mem_cgroup *to,
- bool uncharge)
+ struct mem_cgroup *to)
{
unsigned long flags;
int ret;
@@ -2628,9 +2614,6 @@ static int mem_cgroup_move_account(struct page *page,
preempt_enable();
}
mem_cgroup_charge_statistics(from, anon, -nr_pages);
- if (uncharge)
- /* This is not "cancel", but cancel_charge does all we need. */
- __mem_cgroup_cancel_charge(from, nr_pages);
/* caller should have done css_get */
pc->mem_cgroup = to;
@@ -2664,15 +2647,13 @@ static int mem_cgroup_move_parent(struct page *page,
struct mem_cgroup *child,
gfp_t gfp_mask)
{
- struct cgroup *cg = child->css.cgroup;
- struct cgroup *pcg = cg->parent;
struct mem_cgroup *parent;
unsigned int nr_pages;
unsigned long uninitialized_var(flags);
int ret;
/* Is ROOT ? */
- if (!pcg)
+ if (mem_cgroup_is_root(child))
return -EINVAL;
ret = -EBUSY;
@@ -2683,21 +2664,23 @@ static int mem_cgroup_move_parent(struct page *page,
nr_pages = hpage_nr_pages(page);
- parent = mem_cgroup_from_cont(pcg);
- ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
- if (ret)
- goto put_back;
+ parent = parent_mem_cgroup(child);
+ /*
+ * If no parent, move charges to root cgroup.
+ */
+ if (!parent)
+ parent = root_mem_cgroup;
if (nr_pages > 1)
flags = compound_lock_irqsave(page);
- ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
- if (ret)
- __mem_cgroup_cancel_charge(parent, nr_pages);
+ ret = mem_cgroup_move_account(page, nr_pages,
+ pc, child, parent);
+ if (!ret)
+ __mem_cgroup_cancel_local_charge(child, nr_pages);
if (nr_pages > 1)
compound_unlock_irqrestore(page, flags);
-put_back:
putback_lru_page(page);
put:
put_page(page);
@@ -2845,24 +2828,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
*/
if (do_swap_account && PageSwapCache(page)) {
swp_entry_t ent = {.val = page_private(page)};
- struct mem_cgroup *swap_memcg;
- unsigned short id;
-
- id = swap_cgroup_record(ent, 0);
- rcu_read_lock();
- swap_memcg = mem_cgroup_lookup(id);
- if (swap_memcg) {
- /*
- * This recorded memcg can be obsolete one. So, avoid
- * calling css_tryget
- */
- if (!mem_cgroup_is_root(swap_memcg))
- res_counter_uncharge(&swap_memcg->memsw,
- PAGE_SIZE);
- mem_cgroup_swap_statistics(swap_memcg, false);
- mem_cgroup_put(swap_memcg);
- }
- rcu_read_unlock();
+ mem_cgroup_uncharge_swap(ent);
}
/*
* At swapin, we may charge account against cgroup which has no tasks.
@@ -3155,7 +3121,6 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
* @entry: swap entry to be moved
* @from: mem_cgroup which the entry is moved from
* @to: mem_cgroup which the entry is moved to
- * @need_fixup: whether we should fixup res_counters and refcounts.
*
* It succeeds only when the swap_cgroup's record for this entry is the same
* as the mem_cgroup's id of @from.
@@ -3166,7 +3131,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
* both res and memsw, and called css_get().
*/
static int mem_cgroup_move_swap_account(swp_entry_t entry,
- struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
+ struct mem_cgroup *from, struct mem_cgroup *to)
{
unsigned short old_id, new_id;
@@ -3185,24 +3150,13 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
* swap-in, the refcount of @to might be decreased to 0.
*/
mem_cgroup_get(to);
- if (need_fixup) {
- if (!mem_cgroup_is_root(from))
- res_counter_uncharge(&from->memsw, PAGE_SIZE);
- mem_cgroup_put(from);
- /*
- * we charged both to->res and to->memsw, so we should
- * uncharge to->res.
- */
- if (!mem_cgroup_is_root(to))
- res_counter_uncharge(&to->res, PAGE_SIZE);
- }
return 0;
}
return -EINVAL;
}
#else
static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
- struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
+ struct mem_cgroup *from, struct mem_cgroup *to)
{
return -EINVAL;
}
@@ -3363,7 +3317,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage)
{
- struct mem_cgroup *memcg;
+ struct mem_cgroup *memcg = NULL;
struct page_cgroup *pc;
enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
@@ -3373,11 +3327,20 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
pc = lookup_page_cgroup(oldpage);
/* fix accounting on old pages */
lock_page_cgroup(pc);
- memcg = pc->mem_cgroup;
- mem_cgroup_charge_statistics(memcg, false, -1);
- ClearPageCgroupUsed(pc);
+ if (PageCgroupUsed(pc)) {
+ memcg = pc->mem_cgroup;
+ mem_cgroup_charge_statistics(memcg, false, -1);
+ ClearPageCgroupUsed(pc);
+ }
unlock_page_cgroup(pc);
+ /*
+ * When called from shmem_replace_page(), in some cases the
+ * oldpage has already been charged, and in some cases not.
+ */
+ if (!memcg)
+ return;
+
if (PageSwapBacked(oldpage))
type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
@@ -3793,7 +3756,7 @@ try_to_free:
goto move_account;
}
-int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
+static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
{
return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
}
@@ -4051,103 +4014,13 @@ static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
}
#endif
-
-/* For read statistics */
-enum {
- MCS_CACHE,
- MCS_RSS,
- MCS_FILE_MAPPED,
- MCS_PGPGIN,
- MCS_PGPGOUT,
- MCS_SWAP,
- MCS_PGFAULT,
- MCS_PGMAJFAULT,
- MCS_INACTIVE_ANON,
- MCS_ACTIVE_ANON,
- MCS_INACTIVE_FILE,
- MCS_ACTIVE_FILE,
- MCS_UNEVICTABLE,
- NR_MCS_STAT,
-};
-
-struct mcs_total_stat {
- s64 stat[NR_MCS_STAT];
-};
-
-struct {
- char *local_name;
- char *total_name;
-} memcg_stat_strings[NR_MCS_STAT] = {
- {"cache", "total_cache"},
- {"rss", "total_rss"},
- {"mapped_file", "total_mapped_file"},
- {"pgpgin", "total_pgpgin"},
- {"pgpgout", "total_pgpgout"},
- {"swap", "total_swap"},
- {"pgfault", "total_pgfault"},
- {"pgmajfault", "total_pgmajfault"},
- {"inactive_anon", "total_inactive_anon"},
- {"active_anon", "total_active_anon"},
- {"inactive_file", "total_inactive_file"},
- {"active_file", "total_active_file"},
- {"unevictable", "total_unevictable"}
-};
-
-
-static void
-mem_cgroup_get_local_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
-{
- s64 val;
-
- /* per cpu stat */
- val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_CACHE);
- s->stat[MCS_CACHE] += val * PAGE_SIZE;
- val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_RSS);
- s->stat[MCS_RSS] += val * PAGE_SIZE;
- val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
- s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
- val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGIN);
- s->stat[MCS_PGPGIN] += val;
- val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGOUT);
- s->stat[MCS_PGPGOUT] += val;
- if (do_swap_account) {
- val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
- s->stat[MCS_SWAP] += val * PAGE_SIZE;
- }
- val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGFAULT);
- s->stat[MCS_PGFAULT] += val;
- val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT);
- s->stat[MCS_PGMAJFAULT] += val;
-
- /* per zone stat */
- val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
- s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
- val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
- s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
- val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
- s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
- val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
- s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
- val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
- s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
-}
-
-static void
-mem_cgroup_get_total_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
-{
- struct mem_cgroup *iter;
-
- for_each_mem_cgroup_tree(iter, memcg)
- mem_cgroup_get_local_stat(iter, s);
-}
-
#ifdef CONFIG_NUMA
-static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
+static int mem_control_numa_stat_show(struct cgroup *cont, struct cftype *cft,
+ struct seq_file *m)
{
int nid;
unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
unsigned long node_nr;
- struct cgroup *cont = m->private;
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
@@ -4188,64 +4061,100 @@ static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
}
#endif /* CONFIG_NUMA */
+static const char * const mem_cgroup_lru_names[] = {
+ "inactive_anon",
+ "active_anon",
+ "inactive_file",
+ "active_file",
+ "unevictable",
+};
+
+static inline void mem_cgroup_lru_names_not_uptodate(void)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
+}
+
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
- struct cgroup_map_cb *cb)
+ struct seq_file *m)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
- struct mcs_total_stat mystat;
- int i;
-
- memset(&mystat, 0, sizeof(mystat));
- mem_cgroup_get_local_stat(memcg, &mystat);
+ struct mem_cgroup *mi;
+ unsigned int i;
-
- for (i = 0; i < NR_MCS_STAT; i++) {
- if (i == MCS_SWAP && !do_swap_account)
+ for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
+ if (i == MEM_CGROUP_STAT_SWAPOUT && !do_swap_account)
continue;
- cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
+ seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
+ mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
}
+ for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
+ seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
+ mem_cgroup_read_events(memcg, i));
+
+ for (i = 0; i < NR_LRU_LISTS; i++)
+ seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
+ mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
+
/* Hierarchical information */
{
unsigned long long limit, memsw_limit;
memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
- cb->fill(cb, "hierarchical_memory_limit", limit);
+ seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
if (do_swap_account)
- cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
+ seq_printf(m, "hierarchical_memsw_limit %llu\n",
+ memsw_limit);
}
- memset(&mystat, 0, sizeof(mystat));
- mem_cgroup_get_total_stat(memcg, &mystat);
- for (i = 0; i < NR_MCS_STAT; i++) {
- if (i == MCS_SWAP && !do_swap_account)
+ for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
+ long long val = 0;
+
+ if (i == MEM_CGROUP_STAT_SWAPOUT && !do_swap_account)
continue;
- cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
+ for_each_mem_cgroup_tree(mi, memcg)
+ val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
+ seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
+ }
+
+ for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
+ unsigned long long val = 0;
+
+ for_each_mem_cgroup_tree(mi, memcg)
+ val += mem_cgroup_read_events(mi, i);
+ seq_printf(m, "total_%s %llu\n",
+ mem_cgroup_events_names[i], val);
+ }
+
+ for (i = 0; i < NR_LRU_LISTS; i++) {
+ unsigned long long val = 0;
+
+ for_each_mem_cgroup_tree(mi, memcg)
+ val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
+ seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
}
#ifdef CONFIG_DEBUG_VM
{
int nid, zid;
struct mem_cgroup_per_zone *mz;
+ struct zone_reclaim_stat *rstat;
unsigned long recent_rotated[2] = {0, 0};
unsigned long recent_scanned[2] = {0, 0};
for_each_online_node(nid)
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
mz = mem_cgroup_zoneinfo(memcg, nid, zid);
+ rstat = &mz->lruvec.reclaim_stat;
- recent_rotated[0] +=
- mz->reclaim_stat.recent_rotated[0];
- recent_rotated[1] +=
- mz->reclaim_stat.recent_rotated[1];
- recent_scanned[0] +=
- mz->reclaim_stat.recent_scanned[0];
- recent_scanned[1] +=
- mz->reclaim_stat.recent_scanned[1];
+ recent_rotated[0] += rstat->recent_rotated[0];
+ recent_rotated[1] += rstat->recent_rotated[1];
+ recent_scanned[0] += rstat->recent_scanned[0];
+ recent_scanned[1] += rstat->recent_scanned[1];
}
- cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
- cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
- cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
- cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
+ seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
+ seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
+ seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
+ seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
}
#endif
@@ -4307,7 +4216,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
usage = mem_cgroup_usage(memcg, swap);
/*
- * current_threshold points to threshold just below usage.
+ * current_threshold points to threshold just below or equal to usage.
* If it's not true, a threshold was crossed after last
* call of __mem_cgroup_threshold().
*/
@@ -4433,14 +4342,15 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
/* Find current threshold */
new->current_threshold = -1;
for (i = 0; i < size; i++) {
- if (new->entries[i].threshold < usage) {
+ if (new->entries[i].threshold <= usage) {
/*
* new->current_threshold will not be used until
* rcu_assign_pointer(), so it's safe to increment
* it here.
*/
++new->current_threshold;
- }
+ } else
+ break;
}
/* Free old spare buffer and save old primary buffer as spare */
@@ -4509,7 +4419,7 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
continue;
new->entries[j] = thresholds->primary->entries[i];
- if (new->entries[j].threshold < usage) {
+ if (new->entries[j].threshold <= usage) {
/*
* new->current_threshold will not be used
* until rcu_assign_pointer(), so it's safe to increment
@@ -4623,22 +4533,6 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
return 0;
}
-#ifdef CONFIG_NUMA
-static const struct file_operations mem_control_numa_stat_file_operations = {
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
-{
- struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
-
- file->f_op = &mem_control_numa_stat_file_operations;
- return single_open(file, mem_control_numa_stat_show, cont);
-}
-#endif /* CONFIG_NUMA */
-
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
@@ -4694,7 +4588,7 @@ static struct cftype mem_cgroup_files[] = {
},
{
.name = "stat",
- .read_map = mem_control_stat_show,
+ .read_seq_string = mem_control_stat_show,
},
{
.name = "force_empty",
@@ -4726,8 +4620,7 @@ static struct cftype mem_cgroup_files[] = {
#ifdef CONFIG_NUMA
{
.name = "numa_stat",
- .open = mem_control_numa_stat_open,
- .mode = S_IRUGO,
+ .read_seq_string = mem_control_numa_stat_show,
},
#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -4764,7 +4657,6 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
{
struct mem_cgroup_per_node *pn;
struct mem_cgroup_per_zone *mz;
- enum lru_list lru;
int zone, tmp = node;
/*
* This routine is called against possible nodes.
@@ -4782,8 +4674,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
mz = &pn->zoneinfo[zone];
- for_each_lru(lru)
- INIT_LIST_HEAD(&mz->lruvec.lists[lru]);
+ lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]);
mz->usage_in_excess = 0;
mz->on_tree = false;
mz->memcg = memcg;
@@ -4826,23 +4717,40 @@ out_free:
}
/*
- * Helpers for freeing a vzalloc()ed mem_cgroup by RCU,
+ * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU,
* but in process context. The work_freeing structure is overlaid
* on the rcu_freeing structure, which itself is overlaid on memsw.
*/
-static void vfree_work(struct work_struct *work)
+static void free_work(struct work_struct *work)
{
struct mem_cgroup *memcg;
+ int size = sizeof(struct mem_cgroup);
memcg = container_of(work, struct mem_cgroup, work_freeing);
- vfree(memcg);
+ /*
+ * We need to make sure that (at least for now), the jump label
+ * destruction code runs outside of the cgroup lock. This is because
+ * get_online_cpus(), which is called from the static_branch update,
+ * can't be called inside the cgroup_lock. cpusets are the ones
+ * enforcing this dependency, so if they ever change, we might as well.
+ *
+ * schedule_work() will guarantee this happens. Be careful if you need
+ * to move this code around, and make sure it is outside
+ * the cgroup_lock.
+ */
+ disarm_sock_keys(memcg);
+ if (size < PAGE_SIZE)
+ kfree(memcg);
+ else
+ vfree(memcg);
}
-static void vfree_rcu(struct rcu_head *rcu_head)
+
+static void free_rcu(struct rcu_head *rcu_head)
{
struct mem_cgroup *memcg;
memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
- INIT_WORK(&memcg->work_freeing, vfree_work);
+ INIT_WORK(&memcg->work_freeing, free_work);
schedule_work(&memcg->work_freeing);
}
@@ -4868,10 +4776,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
free_mem_cgroup_per_zone_info(memcg, node);
free_percpu(memcg->stat);
- if (sizeof(struct mem_cgroup) < PAGE_SIZE)
- kfree_rcu(memcg, rcu_freeing);
- else
- call_rcu(&memcg->rcu_freeing, vfree_rcu);
+ call_rcu(&memcg->rcu_freeing, free_rcu);
}
static void mem_cgroup_get(struct mem_cgroup *memcg)
@@ -5135,7 +5040,7 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
return NULL;
if (PageAnon(page)) {
/* we don't move shared anon */
- if (!move_anon() || page_mapcount(page) > 2)
+ if (!move_anon())
return NULL;
} else if (!move_file())
/* we ignore mapcount for file pages */
@@ -5146,32 +5051,37 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
return page;
}
+#ifdef CONFIG_SWAP
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
- int usage_count;
struct page *page = NULL;
swp_entry_t ent = pte_to_swp_entry(ptent);
if (!move_anon() || non_swap_entry(ent))
return NULL;
- usage_count = mem_cgroup_count_swap_user(ent, &page);
- if (usage_count > 1) { /* we don't move shared anon */
- if (page)
- put_page(page);
- return NULL;
- }
+ /*
+ * Because lookup_swap_cache() updates some statistics counter,
+ * we call find_get_page() with swapper_space directly.
+ */
+ page = find_get_page(&swapper_space, ent.val);
if (do_swap_account)
entry->val = ent.val;
return page;
}
+#else
+static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
+ unsigned long addr, pte_t ptent, swp_entry_t *entry)
+{
+ return NULL;
+}
+#endif
static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
struct page *page = NULL;
- struct inode *inode;
struct address_space *mapping;
pgoff_t pgoff;
@@ -5180,7 +5090,6 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
if (!move_file())
return NULL;
- inode = vma->vm_file->f_path.dentry->d_inode;
mapping = vma->vm_file->f_mapping;
if (pte_none(ptent))
pgoff = linear_page_index(vma, addr);
@@ -5479,8 +5388,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
if (!isolate_lru_page(page)) {
pc = lookup_page_cgroup(page);
if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
- pc, mc.from, mc.to,
- false)) {
+ pc, mc.from, mc.to)) {
mc.precharge -= HPAGE_PMD_NR;
mc.moved_charge += HPAGE_PMD_NR;
}
@@ -5510,7 +5418,7 @@ retry:
goto put;
pc = lookup_page_cgroup(page);
if (!mem_cgroup_move_account(page, 1, pc,
- mc.from, mc.to, false)) {
+ mc.from, mc.to)) {
mc.precharge--;
/* we uncharge from mc.from later. */
mc.moved_charge++;
@@ -5521,8 +5429,7 @@ put: /* get_mctgt_type() gets the page */
break;
case MC_TARGET_SWAP:
ent = target.ent;
- if (!mem_cgroup_move_swap_account(ent,
- mc.from, mc.to, false)) {
+ if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
mc.precharge--;
/* we fixup refcnts and charges later. */
mc.moved_swap++;
@@ -5598,7 +5505,6 @@ static void mem_cgroup_move_task(struct cgroup *cont,
if (mm) {
if (mc.to)
mem_cgroup_move_charge(mm);
- put_swap_token(mm);
mmput(mm);
}
if (mc.to)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 97cc2733551a..ab1e7145e290 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1388,23 +1388,23 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
*/
if (!get_page_unless_zero(compound_head(p))) {
if (PageHuge(p)) {
- pr_info("get_any_page: %#lx free huge page\n", pfn);
+ pr_info("%s: %#lx free huge page\n", __func__, pfn);
ret = dequeue_hwpoisoned_huge_page(compound_head(p));
} else if (is_free_buddy_page(p)) {
- pr_info("get_any_page: %#lx free buddy page\n", pfn);
+ pr_info("%s: %#lx free buddy page\n", __func__, pfn);
/* Set hwpoison bit while page is still isolated */
SetPageHWPoison(p);
ret = 0;
} else {
- pr_info("get_any_page: %#lx: unknown zero refcount page type %lx\n",
- pfn, p->flags);
+ pr_info("%s: %#lx: unknown zero refcount page type %lx\n",
+ __func__, pfn, p->flags);
ret = -EIO;
}
} else {
/* Not a free page */
ret = 1;
}
- unset_migratetype_isolate(p);
+ unset_migratetype_isolate(p, MIGRATE_MOVABLE);
unlock_memory_hotplug();
return ret;
}
diff --git a/mm/memory.c b/mm/memory.c
index 1e77da6d82c1..1b7dc662bf9f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1307,6 +1307,9 @@ static void unmap_single_vma(struct mmu_gather *tlb,
if (end <= vma->vm_start)
return;
+ if (vma->vm_file)
+ uprobe_munmap(vma, start, end);
+
if (unlikely(is_pfn_mapping(vma)))
untrack_pfn_vma(vma, 0, 0);
@@ -2905,7 +2908,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
delayacct_set_flag(DELAYACCT_PF_SWAPIN);
page = lookup_swap_cache(entry);
if (!page) {
- grab_swap_token(mm); /* Contend for token _before_ read-in */
page = swapin_readahead(entry,
GFP_HIGHUSER_MOVABLE, vma, address);
if (!page) {
@@ -2935,6 +2937,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
}
locked = lock_page_or_retry(page, mm, flags);
+
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
if (!locked) {
ret |= VM_FAULT_RETRY;
@@ -3483,6 +3486,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
+retry:
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
@@ -3496,13 +3500,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pmd, flags);
} else {
pmd_t orig_pmd = *pmd;
+ int ret;
+
barrier();
if (pmd_trans_huge(orig_pmd)) {
if (flags & FAULT_FLAG_WRITE &&
!pmd_write(orig_pmd) &&
- !pmd_trans_splitting(orig_pmd))
- return do_huge_pmd_wp_page(mm, vma, address,
- pmd, orig_pmd);
+ !pmd_trans_splitting(orig_pmd)) {
+ ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
+ orig_pmd);
+ /*
+ * If COW results in an oom, the huge pmd will
+ * have been split, so retry the fault on the
+ * pte for a smaller charge.
+ */
+ if (unlikely(ret & VM_FAULT_OOM))
+ goto retry;
+ return ret;
+ }
return 0;
}
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 6629fafd6ce4..0d7e3ec8e0f3 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -74,8 +74,7 @@ static struct resource *register_memory_resource(u64 start, u64 size)
res->end = start + size - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, res) < 0) {
- printk("System RAM resource %llx - %llx cannot be added\n",
- (unsigned long long)res->start, (unsigned long long)res->end);
+ printk("System RAM resource %pR cannot be added\n", res);
kfree(res);
res = NULL;
}
@@ -502,8 +501,10 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
online_pages_range);
if (ret) {
mutex_unlock(&zonelists_mutex);
- printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
- nr_pages, pfn);
+ printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",
+ (unsigned long long) pfn << PAGE_SHIFT,
+ (((unsigned long long) pfn + nr_pages)
+ << PAGE_SHIFT) - 1);
memory_notify(MEM_CANCEL_ONLINE, &arg);
unlock_memory_hotplug();
return ret;
@@ -891,7 +892,7 @@ static int __ref offline_pages(unsigned long start_pfn,
nr_pages = end_pfn - start_pfn;
/* set above range as isolated */
- ret = start_isolate_page_range(start_pfn, end_pfn);
+ ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
if (ret)
goto out;
@@ -956,7 +957,7 @@ repeat:
We cannot do rollback at this point. */
offline_isolated_pages(start_pfn, end_pfn);
/* reset pagetype flags and makes migrate type to be MOVABLE */
- undo_isolate_page_range(start_pfn, end_pfn);
+ undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
/* removal success */
zone->present_pages -= offlined_pages;
zone->zone_pgdat->node_present_pages -= offlined_pages;
@@ -977,11 +978,12 @@ repeat:
return 0;
failed_removal:
- printk(KERN_INFO "memory offlining %lx to %lx failed\n",
- start_pfn, end_pfn);
+ printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n",
+ (unsigned long long) start_pfn << PAGE_SHIFT,
+ ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
memory_notify(MEM_CANCEL_OFFLINE, &arg);
/* pushback to free area */
- undo_isolate_page_range(start_pfn, end_pfn);
+ undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
out:
unlock_memory_hotplug();
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 88f9422b92e7..f15c1b24ca18 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -390,7 +390,7 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
{
if (!pol)
return;
- if (!mpol_store_user_nodemask(pol) && step == 0 &&
+ if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
return;
@@ -950,8 +950,8 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
*
* Returns the number of page that could not be moved.
*/
-int do_migrate_pages(struct mm_struct *mm,
- const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
+int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+ const nodemask_t *to, int flags)
{
int busy = 0;
int err;
@@ -963,7 +963,7 @@ int do_migrate_pages(struct mm_struct *mm,
down_read(&mm->mmap_sem);
- err = migrate_vmas(mm, from_nodes, to_nodes, flags);
+ err = migrate_vmas(mm, from, to, flags);
if (err)
goto out;
@@ -998,14 +998,34 @@ int do_migrate_pages(struct mm_struct *mm,
* moved to an empty node, then there is nothing left worth migrating.
*/
- tmp = *from_nodes;
+ tmp = *from;
while (!nodes_empty(tmp)) {
int s,d;
int source = -1;
int dest = 0;
for_each_node_mask(s, tmp) {
- d = node_remap(s, *from_nodes, *to_nodes);
+
+ /*
+ * do_migrate_pages() tries to maintain the relative
+ * node relationship of the pages established between
+ * threads and memory areas.
+ *
+ * However if the number of source nodes is not equal to
+ * the number of destination nodes we can not preserve
+ * this node relative relationship. In that case, skip
+ * copying memory from a node that is in the destination
+ * mask.
+ *
+ * Example: [2,3,4] -> [3,4,5] moves everything.
+ * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
+ */
+
+ if ((nodes_weight(*from) != nodes_weight(*to)) &&
+ (node_isset(s, *to)))
+ continue;
+
+ d = node_remap(s, *from, *to);
if (s == d)
continue;
@@ -1065,8 +1085,8 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
{
}
-int do_migrate_pages(struct mm_struct *mm,
- const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
+int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+ const nodemask_t *to, int flags)
{
return -ENOSYS;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 69a1889f3790..4a9c2a391e28 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -30,6 +30,7 @@
#include <linux/perf_event.h>
#include <linux/audit.h>
#include <linux/khugepaged.h>
+#include <linux/uprobes.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
@@ -546,8 +547,15 @@ again: remove_next = 1 + (end > next->vm_end);
if (file) {
mapping = file->f_mapping;
- if (!(vma->vm_flags & VM_NONLINEAR))
+ if (!(vma->vm_flags & VM_NONLINEAR)) {
root = &mapping->i_mmap;
+ uprobe_munmap(vma, vma->vm_start, vma->vm_end);
+
+ if (adjust_next)
+ uprobe_munmap(next, next->vm_start,
+ next->vm_end);
+ }
+
mutex_lock(&mapping->i_mmap_mutex);
if (insert) {
/*
@@ -617,8 +625,16 @@ again: remove_next = 1 + (end > next->vm_end);
if (mapping)
mutex_unlock(&mapping->i_mmap_mutex);
+ if (root) {
+ uprobe_mmap(vma);
+
+ if (adjust_next)
+ uprobe_mmap(next);
+ }
+
if (remove_next) {
if (file) {
+ uprobe_munmap(next, next->vm_start, next->vm_end);
fput(file);
if (next->vm_flags & VM_EXECUTABLE)
removed_exe_file_vma(mm);
@@ -638,6 +654,8 @@ again: remove_next = 1 + (end > next->vm_end);
goto again;
}
}
+ if (insert && file)
+ uprobe_mmap(insert);
validate_mm(mm);
@@ -1371,6 +1389,11 @@ out:
mm->locked_vm += (len >> PAGE_SHIFT);
} else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
make_pages_present(addr, addr + len);
+
+ if (file && uprobe_mmap(vma))
+ /* matching probes but cannot insert */
+ goto unmap_and_free_vma;
+
return addr;
unmap_and_free_vma:
@@ -1616,33 +1639,34 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma = NULL;
- if (mm) {
- /* Check the cache first. */
- /* (Cache hit rate is typically around 35%.) */
- vma = mm->mmap_cache;
- if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
- struct rb_node * rb_node;
-
- rb_node = mm->mm_rb.rb_node;
- vma = NULL;
-
- while (rb_node) {
- struct vm_area_struct * vma_tmp;
-
- vma_tmp = rb_entry(rb_node,
- struct vm_area_struct, vm_rb);
-
- if (vma_tmp->vm_end > addr) {
- vma = vma_tmp;
- if (vma_tmp->vm_start <= addr)
- break;
- rb_node = rb_node->rb_left;
- } else
- rb_node = rb_node->rb_right;
- }
- if (vma)
- mm->mmap_cache = vma;
+ if (WARN_ON_ONCE(!mm)) /* Remove this in linux-3.6 */
+ return NULL;
+
+ /* Check the cache first. */
+ /* (Cache hit rate is typically around 35%.) */
+ vma = mm->mmap_cache;
+ if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
+ struct rb_node *rb_node;
+
+ rb_node = mm->mm_rb.rb_node;
+ vma = NULL;
+
+ while (rb_node) {
+ struct vm_area_struct *vma_tmp;
+
+ vma_tmp = rb_entry(rb_node,
+ struct vm_area_struct, vm_rb);
+
+ if (vma_tmp->vm_end > addr) {
+ vma = vma_tmp;
+ if (vma_tmp->vm_start <= addr)
+ break;
+ rb_node = rb_node->rb_left;
+ } else
+ rb_node = rb_node->rb_right;
}
+ if (vma)
+ mm->mmap_cache = vma;
}
return vma;
}
@@ -2358,6 +2382,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
if ((vma->vm_flags & VM_ACCOUNT) &&
security_vm_enough_memory_mm(mm, vma_pages(vma)))
return -ENOMEM;
+
+ if (vma->vm_file && uprobe_mmap(vma))
+ return -EINVAL;
+
vma_link(mm, vma, prev, rb_link, rb_parent);
return 0;
}
@@ -2427,6 +2455,10 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
new_vma->vm_pgoff = pgoff;
if (new_vma->vm_file) {
get_file(new_vma->vm_file);
+
+ if (uprobe_mmap(new_vma))
+ goto out_free_mempol;
+
if (vma->vm_flags & VM_EXECUTABLE)
added_exe_file_vma(mm);
}
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 7cf7b7ddc7c5..6830eab5bf09 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -86,3 +86,17 @@ int memmap_valid_within(unsigned long pfn,
return 1;
}
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+
+void lruvec_init(struct lruvec *lruvec, struct zone *zone)
+{
+ enum lru_list lru;
+
+ memset(lruvec, 0, sizeof(struct lruvec));
+
+ for_each_lru(lru)
+ INIT_LIST_HEAD(&lruvec->lists[lru]);
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+ lruvec->zone = zone;
+#endif
+}
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 1983fb1c7026..d23415c001bc 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -274,86 +274,85 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
return ___alloc_bootmem(size, align, goal, limit);
}
-/**
- * __alloc_bootmem_node - allocate boot memory from a specific node
- * @pgdat: node to allocate from
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may fall back to any node in the system if the specified node
- * can not hold the requested memory.
- *
- * The function panics if the request can not be satisfied.
- */
-void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
+static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+ unsigned long size,
+ unsigned long align,
+ unsigned long goal,
+ unsigned long limit)
{
void *ptr;
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
again:
ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
- goal, -1ULL);
+ goal, limit);
if (ptr)
return ptr;
ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
- goal, -1ULL);
- if (!ptr && goal) {
+ goal, limit);
+ if (ptr)
+ return ptr;
+
+ if (goal) {
goal = 0;
goto again;
}
- return ptr;
+
+ return NULL;
}
-void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
+void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
- return __alloc_bootmem_node(pgdat, size, align, goal);
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+ return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
}
-#ifdef CONFIG_SPARSEMEM
-/**
- * alloc_bootmem_section - allocate boot memory from a specific section
- * @size: size of the request in bytes
- * @section_nr: sparse map section to allocate from
- *
- * Return NULL on failure.
- */
-void * __init alloc_bootmem_section(unsigned long size,
- unsigned long section_nr)
+void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal,
+ unsigned long limit)
{
- unsigned long pfn, goal, limit;
+ void *ptr;
- pfn = section_nr_to_pfn(section_nr);
- goal = pfn << PAGE_SHIFT;
- limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
+ ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
+ if (ptr)
+ return ptr;
- return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
- SMP_CACHE_BYTES, goal, limit);
+ printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
+ panic("Out of memory");
+ return NULL;
}
-#endif
-void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
+/**
+ * __alloc_bootmem_node - allocate boot memory from a specific node
+ * @pgdat: node to allocate from
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may fall back to any node in the system if the specified node
+ * can not hold the requested memory.
+ *
+ * The function panics if the request can not be satisfied.
+ */
+void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
- void *ptr;
-
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
- ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
- goal, -1ULL);
- if (ptr)
- return ptr;
+ return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
+}
- return __alloc_bootmem_nopanic(size, align, goal);
+void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal)
+{
+ return __alloc_bootmem_node(pgdat, size, align, goal);
}
#ifndef ARCH_LOW_ADDRESS_LIMIT
@@ -397,16 +396,9 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
- void *ptr;
-
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
- ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
- goal, ARCH_LOW_ADDRESS_LIMIT);
- if (ptr)
- return ptr;
-
- return __alloc_memory_core_early(MAX_NUMNODES, size, align,
- goal, ARCH_LOW_ADDRESS_LIMIT);
+ return ___alloc_bootmem_node(pgdat, size, align, goal,
+ ARCH_LOW_ADDRESS_LIMIT);
}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 9f09a1fde9f9..ed0e19677360 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -180,10 +180,10 @@ static bool oom_unkillable_task(struct task_struct *p,
* predictable as possible. The goal is to return the highest value for the
* task consuming the most memory to avoid subsequent oom failures.
*/
-unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
- const nodemask_t *nodemask, unsigned long totalpages)
+unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
+ const nodemask_t *nodemask, unsigned long totalpages)
{
- long points;
+ unsigned long points;
if (oom_unkillable_task(p, memcg, nodemask))
return 0;
@@ -198,21 +198,11 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
}
/*
- * The memory controller may have a limit of 0 bytes, so avoid a divide
- * by zero, if necessary.
- */
- if (!totalpages)
- totalpages = 1;
-
- /*
* The baseline for the badness score is the proportion of RAM that each
* task's rss, pagetable and swap space use.
*/
- points = get_mm_rss(p->mm) + p->mm->nr_ptes;
- points += get_mm_counter(p->mm, MM_SWAPENTS);
-
- points *= 1000;
- points /= totalpages;
+ points = get_mm_rss(p->mm) + p->mm->nr_ptes +
+ get_mm_counter(p->mm, MM_SWAPENTS);
task_unlock(p);
/*
@@ -220,23 +210,20 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
* implementation used by LSMs.
*/
if (has_capability_noaudit(p, CAP_SYS_ADMIN))
- points -= 30;
+ points -= 30 * totalpages / 1000;
/*
* /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
* either completely disable oom killing or always prefer a certain
* task.
*/
- points += p->signal->oom_score_adj;
+ points += p->signal->oom_score_adj * totalpages / 1000;
/*
- * Never return 0 for an eligible task that may be killed since it's
- * possible that no single user task uses more than 0.1% of memory and
- * no single admin tasks uses more than 3.0%.
+ * Never return 0 for an eligible task regardless of the root bonus and
+ * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
*/
- if (points <= 0)
- return 1;
- return (points < 1000) ? points : 1000;
+ return points ? points : 1;
}
/*
@@ -314,7 +301,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
{
struct task_struct *g, *p;
struct task_struct *chosen = NULL;
- *ppoints = 0;
+ unsigned long chosen_points = 0;
do_each_thread(g, p) {
unsigned int points;
@@ -354,7 +341,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
*/
if (p == current) {
chosen = p;
- *ppoints = 1000;
+ chosen_points = ULONG_MAX;
} else if (!force_kill) {
/*
* If this task is not being ptraced on exit,
@@ -367,12 +354,13 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
}
points = oom_badness(p, memcg, nodemask, totalpages);
- if (points > *ppoints) {
+ if (points > chosen_points) {
chosen = p;
- *ppoints = points;
+ chosen_points = points;
}
} while_each_thread(g, p);
+ *ppoints = chosen_points * 1000 / totalpages;
return chosen;
}
@@ -572,7 +560,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
}
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
- limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT;
+ limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
read_lock(&tasklist_lock);
p = select_bad_process(&points, limit, memcg, NULL, false);
if (p && PTR_ERR(p) != -1UL)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 26adea8ca2e7..93d8d2f7108c 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -204,7 +204,7 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
* Returns the global number of pages potentially available for dirty
* page cache. This is the base value for the global dirty limits.
*/
-unsigned long global_dirtyable_memory(void)
+static unsigned long global_dirtyable_memory(void)
{
unsigned long x;
@@ -1568,6 +1568,7 @@ void writeback_set_ratelimit(void)
unsigned long background_thresh;
unsigned long dirty_thresh;
global_dirty_limits(&background_thresh, &dirty_thresh);
+ global_dirty_limit = dirty_thresh;
ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
if (ratelimit_pages < 16)
ratelimit_pages = 16;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9f389e50ed18..6092f331b32e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -57,6 +57,7 @@
#include <linux/ftrace_event.h>
#include <linux/memcontrol.h>
#include <linux/prefetch.h>
+#include <linux/migrate.h>
#include <linux/page-debug-flags.h>
#include <asm/tlbflush.h>
@@ -218,7 +219,7 @@ EXPORT_SYMBOL(nr_online_nodes);
int page_group_by_mobility_disabled __read_mostly;
-static void set_pageblock_migratetype(struct page *page, int migratetype)
+void set_pageblock_migratetype(struct page *page, int migratetype)
{
if (unlikely(page_group_by_mobility_disabled))
@@ -513,10 +514,10 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
* free pages of length of (1 << order) and marked with _mapcount -2. Page's
* order is recorded in page_private(page) field.
* So when we are allocating or freeing one, we can derive the state of the
- * other. That is, if we allocate a small block, and both were
- * free, the remainder of the region must be split into blocks.
+ * other. That is, if we allocate a small block, and both were
+ * free, the remainder of the region must be split into blocks.
* If a block is freed, and its buddy is also free, then this
- * triggers coalescing into a block of larger size.
+ * triggers coalescing into a block of larger size.
*
* -- wli
*/
@@ -749,6 +750,24 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
__free_pages(page, order);
}
+#ifdef CONFIG_CMA
+/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
+void __init init_cma_reserved_pageblock(struct page *page)
+{
+ unsigned i = pageblock_nr_pages;
+ struct page *p = page;
+
+ do {
+ __ClearPageReserved(p);
+ set_page_count(p, 0);
+ } while (++p, --i);
+
+ set_page_refcounted(page);
+ set_pageblock_migratetype(page, MIGRATE_CMA);
+ __free_pages(page, pageblock_order);
+ totalram_pages += pageblock_nr_pages;
+}
+#endif
/*
* The order of subdivision here is critical for the IO subsystem.
@@ -874,11 +893,17 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
* This array describes the order lists are fallen back to when
* the free lists for the desirable migrate type are depleted
*/
-static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
- [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
- [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
- [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
- [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
+static int fallbacks[MIGRATE_TYPES][4] = {
+ [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
+ [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
+#ifdef CONFIG_CMA
+ [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
+ [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
+#else
+ [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
+#endif
+ [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
+ [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
};
/*
@@ -929,8 +954,8 @@ static int move_freepages(struct zone *zone,
return pages_moved;
}
-static int move_freepages_block(struct zone *zone, struct page *page,
- int migratetype)
+int move_freepages_block(struct zone *zone, struct page *page,
+ int migratetype)
{
unsigned long start_pfn, end_pfn;
struct page *start_page, *end_page;
@@ -973,12 +998,12 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
/* Find the largest possible block of pages in the other list */
for (current_order = MAX_ORDER-1; current_order >= order;
--current_order) {
- for (i = 0; i < MIGRATE_TYPES - 1; i++) {
+ for (i = 0;; i++) {
migratetype = fallbacks[start_migratetype][i];
/* MIGRATE_RESERVE handled later if necessary */
if (migratetype == MIGRATE_RESERVE)
- continue;
+ break;
area = &(zone->free_area[current_order]);
if (list_empty(&area->free_list[migratetype]))
@@ -993,11 +1018,18 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
* pages to the preferred allocation list. If falling
* back for a reclaimable kernel allocation, be more
* aggressive about taking ownership of free pages
+ *
+ * On the other hand, never change migration
+ * type of MIGRATE_CMA pageblocks nor move CMA
+ * pages on different free lists. We don't
+ * want unmovable pages to be allocated from
+ * MIGRATE_CMA areas.
*/
- if (unlikely(current_order >= (pageblock_order >> 1)) ||
- start_migratetype == MIGRATE_RECLAIMABLE ||
- page_group_by_mobility_disabled) {
- unsigned long pages;
+ if (!is_migrate_cma(migratetype) &&
+ (unlikely(current_order >= pageblock_order / 2) ||
+ start_migratetype == MIGRATE_RECLAIMABLE ||
+ page_group_by_mobility_disabled)) {
+ int pages;
pages = move_freepages_block(zone, page,
start_migratetype);
@@ -1015,11 +1047,14 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
rmv_page_order(page);
/* Take ownership for orders >= pageblock_order */
- if (current_order >= pageblock_order)
+ if (current_order >= pageblock_order &&
+ !is_migrate_cma(migratetype))
change_pageblock_range(page, current_order,
start_migratetype);
- expand(zone, page, order, current_order, area, migratetype);
+ expand(zone, page, order, current_order, area,
+ is_migrate_cma(migratetype)
+ ? migratetype : start_migratetype);
trace_mm_page_alloc_extfrag(page, order, current_order,
start_migratetype, migratetype);
@@ -1061,17 +1096,17 @@ retry_reserve:
return page;
}
-/*
+/*
* Obtain a specified number of elements from the buddy allocator, all under
* a single hold of the lock, for efficiency. Add them to the supplied list.
* Returns the number of new pages which were placed at *list.
*/
-static int rmqueue_bulk(struct zone *zone, unsigned int order,
+static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
int migratetype, int cold)
{
- int i;
-
+ int mt = migratetype, i;
+
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
struct page *page = __rmqueue(zone, order, migratetype);
@@ -1091,7 +1126,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
list_add(&page->lru, list);
else
list_add_tail(&page->lru, list);
- set_page_private(page, migratetype);
+ if (IS_ENABLED(CONFIG_CMA)) {
+ mt = get_pageblock_migratetype(page);
+ if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
+ mt = migratetype;
+ }
+ set_page_private(page, mt);
list = &page->lru;
}
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
@@ -1371,8 +1411,12 @@ int split_free_page(struct page *page)
if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1;
- for (; page < endpage; page += pageblock_nr_pages)
- set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+ for (; page < endpage; page += pageblock_nr_pages) {
+ int mt = get_pageblock_migratetype(page);
+ if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
+ set_pageblock_migratetype(page,
+ MIGRATE_MOVABLE);
+ }
}
return 1 << order;
@@ -2086,16 +2130,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
}
#endif /* CONFIG_COMPACTION */
-/* The really slow allocator path where we enter direct reclaim */
-static inline struct page *
-__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
- struct zonelist *zonelist, enum zone_type high_zoneidx,
- nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
- int migratetype, unsigned long *did_some_progress)
+/* Perform direct synchronous page reclaim */
+static int
+__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
+ nodemask_t *nodemask)
{
- struct page *page = NULL;
struct reclaim_state reclaim_state;
- bool drained = false;
+ int progress;
cond_resched();
@@ -2106,7 +2147,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
reclaim_state.reclaimed_slab = 0;
current->reclaim_state = &reclaim_state;
- *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
+ progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
current->reclaim_state = NULL;
lockdep_clear_current_reclaim_state();
@@ -2114,6 +2155,21 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
cond_resched();
+ return progress;
+}
+
+/* The really slow allocator path where we enter direct reclaim */
+static inline struct page *
+__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist, enum zone_type high_zoneidx,
+ nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+ int migratetype, unsigned long *did_some_progress)
+{
+ struct page *page = NULL;
+ bool drained = false;
+
+ *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
+ nodemask);
if (unlikely(!(*did_some_progress)))
return NULL;
@@ -4244,25 +4300,24 @@ static inline void setup_usemap(struct pglist_data *pgdat,
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
-/* Return a sensible default order for the pageblock size. */
-static inline int pageblock_default_order(void)
-{
- if (HPAGE_SHIFT > PAGE_SHIFT)
- return HUGETLB_PAGE_ORDER;
-
- return MAX_ORDER-1;
-}
-
/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
-static inline void __init set_pageblock_order(unsigned int order)
+static inline void __init set_pageblock_order(void)
{
+ unsigned int order;
+
/* Check that pageblock_nr_pages has not already been setup */
if (pageblock_order)
return;
+ if (HPAGE_SHIFT > PAGE_SHIFT)
+ order = HUGETLB_PAGE_ORDER;
+ else
+ order = MAX_ORDER - 1;
+
/*
* Assume the largest contiguous order of interest is a huge page.
- * This value may be variable depending on boot parameters on IA64
+ * This value may be variable depending on boot parameters on IA64 and
+ * powerpc.
*/
pageblock_order = order;
}
@@ -4270,15 +4325,13 @@ static inline void __init set_pageblock_order(unsigned int order)
/*
* When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
- * and pageblock_default_order() are unused as pageblock_order is set
- * at compile-time. See include/linux/pageblock-flags.h for the values of
- * pageblock_order based on the kernel config
+ * is unused as pageblock_order is set at compile-time. See
+ * include/linux/pageblock-flags.h for the values of pageblock_order based on
+ * the kernel config
*/
-static inline int pageblock_default_order(unsigned int order)
+static inline void set_pageblock_order(void)
{
- return MAX_ORDER-1;
}
-#define set_pageblock_order(x) do {} while (0)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
@@ -4301,11 +4354,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
init_waitqueue_head(&pgdat->kswapd_wait);
pgdat->kswapd_max_order = 0;
pgdat_page_cgroup_init(pgdat);
-
+
for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j;
unsigned long size, realsize, memmap_pages;
- enum lru_list lru;
size = zone_spanned_pages_in_node(nid, j, zones_size);
realsize = size - zone_absent_pages_in_node(nid, j,
@@ -4355,18 +4407,13 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
zone->zone_pgdat = pgdat;
zone_pcp_init(zone);
- for_each_lru(lru)
- INIT_LIST_HEAD(&zone->lruvec.lists[lru]);
- zone->reclaim_stat.recent_rotated[0] = 0;
- zone->reclaim_stat.recent_rotated[1] = 0;
- zone->reclaim_stat.recent_scanned[0] = 0;
- zone->reclaim_stat.recent_scanned[1] = 0;
+ lruvec_init(&zone->lruvec, zone);
zap_zone_vm_stats(zone);
zone->flags = 0;
if (!size)
continue;
- set_pageblock_order(pageblock_default_order());
+ set_pageblock_order();
setup_usemap(pgdat, zone, size);
ret = init_currently_empty_zone(zone, zone_start_pfn,
size, MEMMAP_EARLY);
@@ -4759,7 +4806,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
find_zone_movable_pfns_for_nodes();
/* Print out the zone ranges */
- printk("Zone PFN ranges:\n");
+ printk("Zone ranges:\n");
for (i = 0; i < MAX_NR_ZONES; i++) {
if (i == ZONE_MOVABLE)
continue;
@@ -4768,22 +4815,25 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
arch_zone_highest_possible_pfn[i])
printk(KERN_CONT "empty\n");
else
- printk(KERN_CONT "%0#10lx -> %0#10lx\n",
- arch_zone_lowest_possible_pfn[i],
- arch_zone_highest_possible_pfn[i]);
+ printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n",
+ arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
+ (arch_zone_highest_possible_pfn[i]
+ << PAGE_SHIFT) - 1);
}
/* Print out the PFNs ZONE_MOVABLE begins at in each node */
- printk("Movable zone start PFN for each node\n");
+ printk("Movable zone start for each node\n");
for (i = 0; i < MAX_NUMNODES; i++) {
if (zone_movable_pfn[i])
- printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
+ printk(" Node %d: %#010lx\n", i,
+ zone_movable_pfn[i] << PAGE_SHIFT);
}
/* Print out the early_node_map[] */
- printk("Early memory PFN ranges\n");
+ printk("Early memory node ranges\n");
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
- printk(" %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn);
+ printk(" node %3d: [mem %#010lx-%#010lx]\n", nid,
+ start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
/* Initialise every node */
mminit_verify_pageflags_layout();
@@ -4976,14 +5026,7 @@ static void setup_per_zone_lowmem_reserve(void)
calculate_totalreserve_pages();
}
-/**
- * setup_per_zone_wmarks - called when min_free_kbytes changes
- * or when memory is hot-{added|removed}
- *
- * Ensures that the watermark[min,low,high] values for each zone are set
- * correctly with respect to min_free_kbytes.
- */
-void setup_per_zone_wmarks(void)
+static void __setup_per_zone_wmarks(void)
{
unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
unsigned long lowmem_pages = 0;
@@ -5030,6 +5073,11 @@ void setup_per_zone_wmarks(void)
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+
+ zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
+ zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
+ zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
+
setup_zone_migrate_reserve(zone);
spin_unlock_irqrestore(&zone->lock, flags);
}
@@ -5038,6 +5086,20 @@ void setup_per_zone_wmarks(void)
calculate_totalreserve_pages();
}
+/**
+ * setup_per_zone_wmarks - called when min_free_kbytes changes
+ * or when memory is hot-{added|removed}
+ *
+ * Ensures that the watermark[min,low,high] values for each zone are set
+ * correctly with respect to min_free_kbytes.
+ */
+void setup_per_zone_wmarks(void)
+{
+ mutex_lock(&zonelists_mutex);
+ __setup_per_zone_wmarks();
+ mutex_unlock(&zonelists_mutex);
+}
+
/*
* The inactive anon list should be small enough that the VM never has to
* do too much work, but large enough that each inactive page has a chance
@@ -5242,9 +5304,10 @@ void *__init alloc_large_system_hash(const char *tablename,
int flags,
unsigned int *_hash_shift,
unsigned int *_hash_mask,
- unsigned long limit)
+ unsigned long low_limit,
+ unsigned long high_limit)
{
- unsigned long long max = limit;
+ unsigned long long max = high_limit;
unsigned long log2qty, size;
void *table = NULL;
@@ -5282,6 +5345,8 @@ void *__init alloc_large_system_hash(const char *tablename,
}
max = min(max, 0x80000000ULL);
+ if (numentries < low_limit)
+ numentries = low_limit;
if (numentries > max)
numentries = max;
@@ -5412,14 +5477,16 @@ static int
__count_immobile_pages(struct zone *zone, struct page *page, int count)
{
unsigned long pfn, iter, found;
+ int mt;
+
/*
* For avoiding noise data, lru_add_drain_all() should be called
* If ZONE_MOVABLE, the zone never contains immobile pages
*/
if (zone_idx(zone) == ZONE_MOVABLE)
return true;
-
- if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
+ mt = get_pageblock_migratetype(page);
+ if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
return true;
pfn = page_to_pfn(page);
@@ -5536,7 +5603,7 @@ out:
return ret;
}
-void unset_migratetype_isolate(struct page *page)
+void unset_migratetype_isolate(struct page *page, unsigned migratetype)
{
struct zone *zone;
unsigned long flags;
@@ -5544,12 +5611,259 @@ void unset_migratetype_isolate(struct page *page)
spin_lock_irqsave(&zone->lock, flags);
if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
goto out;
- set_pageblock_migratetype(page, MIGRATE_MOVABLE);
- move_freepages_block(zone, page, MIGRATE_MOVABLE);
+ set_pageblock_migratetype(page, migratetype);
+ move_freepages_block(zone, page, migratetype);
out:
spin_unlock_irqrestore(&zone->lock, flags);
}
+#ifdef CONFIG_CMA
+
+static unsigned long pfn_max_align_down(unsigned long pfn)
+{
+ return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
+ pageblock_nr_pages) - 1);
+}
+
+static unsigned long pfn_max_align_up(unsigned long pfn)
+{
+ return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
+ pageblock_nr_pages));
+}
+
+static struct page *
+__alloc_contig_migrate_alloc(struct page *page, unsigned long private,
+ int **resultp)
+{
+ return alloc_page(GFP_HIGHUSER_MOVABLE);
+}
+
+/* [start, end) must belong to a single zone. */
+static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
+{
+ /* This function is based on compact_zone() from compaction.c. */
+
+ unsigned long pfn = start;
+ unsigned int tries = 0;
+ int ret = 0;
+
+ struct compact_control cc = {
+ .nr_migratepages = 0,
+ .order = -1,
+ .zone = page_zone(pfn_to_page(start)),
+ .mode = COMPACT_SYNC,
+ };
+ INIT_LIST_HEAD(&cc.migratepages);
+
+ migrate_prep_local();
+
+ while (pfn < end || !list_empty(&cc.migratepages)) {
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ if (list_empty(&cc.migratepages)) {
+ cc.nr_migratepages = 0;
+ pfn = isolate_migratepages_range(cc.zone, &cc,
+ pfn, end);
+ if (!pfn) {
+ ret = -EINTR;
+ break;
+ }
+ tries = 0;
+ } else if (++tries == 5) {
+ ret = ret < 0 ? ret : -EBUSY;
+ break;
+ }
+
+ ret = migrate_pages(&cc.migratepages,
+ __alloc_contig_migrate_alloc,
+ 0, false, MIGRATE_SYNC);
+ }
+
+ putback_lru_pages(&cc.migratepages);
+ return ret > 0 ? 0 : ret;
+}
+
+/*
+ * Update zone's cma pages counter used for watermark level calculation.
+ */
+static inline void __update_cma_watermarks(struct zone *zone, int count)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&zone->lock, flags);
+ zone->min_cma_pages += count;
+ spin_unlock_irqrestore(&zone->lock, flags);
+ setup_per_zone_wmarks();
+}
+
+/*
+ * Trigger memory pressure bump to reclaim some pages in order to be able to
+ * allocate 'count' pages in single page units. Does similar work as
+ *__alloc_pages_slowpath() function.
+ */
+static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
+{
+ enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+ struct zonelist *zonelist = node_zonelist(0, gfp_mask);
+ int did_some_progress = 0;
+ int order = 1;
+
+ /*
+ * Increase level of watermarks to force kswapd do his job
+ * to stabilise at new watermark level.
+ */
+ __update_cma_watermarks(zone, count);
+
+ /* Obey watermarks as if the page was being allocated */
+ while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
+ wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
+
+ did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
+ NULL);
+ if (!did_some_progress) {
+ /* Exhausted what can be done so it's blamo time */
+ out_of_memory(zonelist, gfp_mask, order, NULL, false);
+ }
+ }
+
+ /* Restore original watermark levels. */
+ __update_cma_watermarks(zone, -count);
+
+ return count;
+}
+
+/**
+ * alloc_contig_range() -- tries to allocate given range of pages
+ * @start: start PFN to allocate
+ * @end: one-past-the-last PFN to allocate
+ * @migratetype: migratetype of the underlaying pageblocks (either
+ * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
+ * in range must have the same migratetype and it must
+ * be either of the two.
+ *
+ * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
+ * aligned, however it's the caller's responsibility to guarantee that
+ * we are the only thread that changes migrate type of pageblocks the
+ * pages fall in.
+ *
+ * The PFN range must belong to a single zone.
+ *
+ * Returns zero on success or negative error code. On success all
+ * pages which PFN is in [start, end) are allocated for the caller and
+ * need to be freed with free_contig_range().
+ */
+int alloc_contig_range(unsigned long start, unsigned long end,
+ unsigned migratetype)
+{
+ struct zone *zone = page_zone(pfn_to_page(start));
+ unsigned long outer_start, outer_end;
+ int ret = 0, order;
+
+ /*
+ * What we do here is we mark all pageblocks in range as
+ * MIGRATE_ISOLATE. Because pageblock and max order pages may
+ * have different sizes, and due to the way page allocator
+ * work, we align the range to biggest of the two pages so
+ * that page allocator won't try to merge buddies from
+ * different pageblocks and change MIGRATE_ISOLATE to some
+ * other migration type.
+ *
+ * Once the pageblocks are marked as MIGRATE_ISOLATE, we
+ * migrate the pages from an unaligned range (ie. pages that
+ * we are interested in). This will put all the pages in
+ * range back to page allocator as MIGRATE_ISOLATE.
+ *
+ * When this is done, we take the pages in range from page
+ * allocator removing them from the buddy system. This way
+ * page allocator will never consider using them.
+ *
+ * This lets us mark the pageblocks back as
+ * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
+ * aligned range but not in the unaligned, original range are
+ * put back to page allocator so that buddy can use them.
+ */
+
+ ret = start_isolate_page_range(pfn_max_align_down(start),
+ pfn_max_align_up(end), migratetype);
+ if (ret)
+ goto done;
+
+ ret = __alloc_contig_migrate_range(start, end);
+ if (ret)
+ goto done;
+
+ /*
+ * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
+ * aligned blocks that are marked as MIGRATE_ISOLATE. What's
+ * more, all pages in [start, end) are free in page allocator.
+ * What we are going to do is to allocate all pages from
+ * [start, end) (that is remove them from page allocator).
+ *
+ * The only problem is that pages at the beginning and at the
+ * end of interesting range may be not aligned with pages that
+ * page allocator holds, ie. they can be part of higher order
+ * pages. Because of this, we reserve the bigger range and
+ * once this is done free the pages we are not interested in.
+ *
+ * We don't have to hold zone->lock here because the pages are
+ * isolated thus they won't get removed from buddy.
+ */
+
+ lru_add_drain_all();
+ drain_all_pages();
+
+ order = 0;
+ outer_start = start;
+ while (!PageBuddy(pfn_to_page(outer_start))) {
+ if (++order >= MAX_ORDER) {
+ ret = -EBUSY;
+ goto done;
+ }
+ outer_start &= ~0UL << order;
+ }
+
+ /* Make sure the range is really isolated. */
+ if (test_pages_isolated(outer_start, end)) {
+ pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
+ outer_start, end);
+ ret = -EBUSY;
+ goto done;
+ }
+
+ /*
+ * Reclaim enough pages to make sure that contiguous allocation
+ * will not starve the system.
+ */
+ __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
+
+ /* Grab isolated pages from freelists. */
+ outer_end = isolate_freepages_range(outer_start, end);
+ if (!outer_end) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ /* Free head and tail (if any) */
+ if (start != outer_start)
+ free_contig_range(outer_start, start - outer_start);
+ if (end != outer_end)
+ free_contig_range(end, outer_end - end);
+
+done:
+ undo_isolate_page_range(pfn_max_align_down(start),
+ pfn_max_align_up(end), migratetype);
+ return ret;
+}
+
+void free_contig_range(unsigned long pfn, unsigned nr_pages)
+{
+ for (; nr_pages--; ++pfn)
+ __free_page(pfn_to_page(pfn));
+}
+#endif
+
#ifdef CONFIG_MEMORY_HOTREMOVE
/*
* All pages in the range must be isolated before calling this.
@@ -5618,7 +5932,7 @@ bool is_free_buddy_page(struct page *page)
}
#endif
-static struct trace_print_flags pageflag_names[] = {
+static const struct trace_print_flags pageflag_names[] = {
{1UL << PG_locked, "locked" },
{1UL << PG_error, "error" },
{1UL << PG_referenced, "referenced" },
@@ -5653,7 +5967,9 @@ static struct trace_print_flags pageflag_names[] = {
#ifdef CONFIG_MEMORY_FAILURE
{1UL << PG_hwpoison, "hwpoison" },
#endif
- {-1UL, NULL },
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ {1UL << PG_compound_lock, "compound_lock" },
+#endif
};
static void dump_page_flags(unsigned long flags)
@@ -5662,12 +5978,14 @@ static void dump_page_flags(unsigned long flags)
unsigned long mask;
int i;
+ BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS);
+
printk(KERN_ALERT "page flags: %#lx(", flags);
/* remove zone id */
flags &= (1UL << NR_PAGEFLAGS) - 1;
- for (i = 0; pageflag_names[i].name && flags; i++) {
+ for (i = 0; i < ARRAY_SIZE(pageflag_names) && flags; i++) {
mask = pageflag_names[i].mask;
if ((flags & mask) != mask)
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 4ae42bb40892..c9f04774f2b8 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -24,6 +24,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* to be MIGRATE_ISOLATE.
* @start_pfn: The lower PFN of the range to be isolated.
* @end_pfn: The upper PFN of the range to be isolated.
+ * @migratetype: migrate type to set in error recovery.
*
* Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
* the range will never be allocated. Any free pages and pages freed in the
@@ -32,8 +33,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* start_pfn/end_pfn must be aligned to pageblock_order.
* Returns 0 on success and -EBUSY if any part of range cannot be isolated.
*/
-int
-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
+int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ unsigned migratetype)
{
unsigned long pfn;
unsigned long undo_pfn;
@@ -56,7 +57,7 @@ undo:
for (pfn = start_pfn;
pfn < undo_pfn;
pfn += pageblock_nr_pages)
- unset_migratetype_isolate(pfn_to_page(pfn));
+ unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
return -EBUSY;
}
@@ -64,8 +65,8 @@ undo:
/*
* Make isolated pages available again.
*/
-int
-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
+int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ unsigned migratetype)
{
unsigned long pfn;
struct page *page;
@@ -77,7 +78,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
page = __first_valid_page(pfn, pageblock_nr_pages);
if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
continue;
- unset_migratetype_isolate(page);
+ unset_migratetype_isolate(page, migratetype);
}
return 0;
}
@@ -86,7 +87,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
* all pages in [start_pfn...end_pfn) must be in the same zone.
* zone->lock must be held before call this.
*
- * Returns 1 if all pages in the range is isolated.
+ * Returns 1 if all pages in the range are isolated.
*/
static int
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 5a74fea182f1..74c0ddaa6fa0 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -109,8 +109,8 @@ pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmdp)
+void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp)
{
pmd_t pmd = pmd_mksplitting(*pmdp);
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index c20ff48994c2..926b46649749 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -371,15 +371,15 @@ static ssize_t process_vm_rw(pid_t pid,
/* Check iovecs */
if (vm_write)
rc = rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV,
- iovstack_l, &iov_l, 1);
+ iovstack_l, &iov_l);
else
rc = rw_copy_check_uvector(READ, lvec, liovcnt, UIO_FASTIOV,
- iovstack_l, &iov_l, 1);
+ iovstack_l, &iov_l);
if (rc <= 0)
goto free_iovecs;
- rc = rw_copy_check_uvector(READ, rvec, riovcnt, UIO_FASTIOV,
- iovstack_r, &iov_r, 0);
+ rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
+ iovstack_r, &iov_r);
if (rc <= 0)
goto free_iovecs;
@@ -438,16 +438,16 @@ compat_process_vm_rw(compat_pid_t pid,
if (vm_write)
rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
UIO_FASTIOV, iovstack_l,
- &iov_l, 1);
+ &iov_l);
else
rc = compat_rw_copy_check_uvector(READ, lvec, liovcnt,
UIO_FASTIOV, iovstack_l,
- &iov_l, 1);
+ &iov_l);
if (rc <= 0)
goto free_iovecs;
- rc = compat_rw_copy_check_uvector(READ, rvec, riovcnt,
+ rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
UIO_FASTIOV, iovstack_r,
- &iov_r, 0);
+ &iov_r);
if (rc <= 0)
goto free_iovecs;
diff --git a/mm/readahead.c b/mm/readahead.c
index cbcbb02f3e28..ea8f8fa21649 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -17,6 +17,8 @@
#include <linux/task_io_accounting_ops.h>
#include <linux/pagevec.h>
#include <linux/pagemap.h>
+#include <linux/syscalls.h>
+#include <linux/file.h>
/*
* Initialise a struct file's readahead state. Assumes that the caller has
@@ -562,3 +564,41 @@ page_cache_async_readahead(struct address_space *mapping,
ondemand_readahead(mapping, ra, filp, true, offset, req_size);
}
EXPORT_SYMBOL_GPL(page_cache_async_readahead);
+
+static ssize_t
+do_readahead(struct address_space *mapping, struct file *filp,
+ pgoff_t index, unsigned long nr)
+{
+ if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
+ return -EINVAL;
+
+ force_page_cache_readahead(mapping, filp, index, nr);
+ return 0;
+}
+
+SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
+{
+ ssize_t ret;
+ struct file *file;
+
+ ret = -EBADF;
+ file = fget(fd);
+ if (file) {
+ if (file->f_mode & FMODE_READ) {
+ struct address_space *mapping = file->f_mapping;
+ pgoff_t start = offset >> PAGE_CACHE_SHIFT;
+ pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
+ unsigned long len = end - start + 1;
+ ret = do_readahead(mapping, file, start, len);
+ }
+ fput(file);
+ }
+ return ret;
+}
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
+{
+ return SYSC_readahead((int) fd, offset, (size_t) count);
+}
+SYSCALL_ALIAS(sys_readahead, SyS_readahead);
+#endif
diff --git a/mm/rmap.c b/mm/rmap.c
index 5b5ad584ffb7..0f3b7cda2a24 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -755,12 +755,6 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
pte_unmap_unlock(pte, ptl);
}
- /* Pretend the page is referenced if the task has the
- swap token and is in the middle of a page fault. */
- if (mm != current->mm && has_swap_token(mm) &&
- rwsem_is_locked(&mm->mmap_sem))
- referenced++;
-
(*mapcount)--;
if (referenced)
diff --git a/mm/shmem.c b/mm/shmem.c
index d7b433a1ef5e..d576b84d913c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -53,6 +53,7 @@ static struct vfsmount *shm_mnt;
#include <linux/blkdev.h>
#include <linux/pagevec.h>
#include <linux/percpu_counter.h>
+#include <linux/falloc.h>
#include <linux/splice.h>
#include <linux/security.h>
#include <linux/swapops.h>
@@ -83,12 +84,25 @@ struct shmem_xattr {
char value[0];
};
+/*
+ * shmem_fallocate and shmem_writepage communicate via inode->i_private
+ * (with i_mutex making sure that it has only one user at a time):
+ * we would prefer not to enlarge the shmem inode just for that.
+ */
+struct shmem_falloc {
+ pgoff_t start; /* start of range currently being fallocated */
+ pgoff_t next; /* the next page offset to be fallocated */
+ pgoff_t nr_falloced; /* how many new pages have been fallocated */
+ pgoff_t nr_unswapped; /* how often writepage refused to swap out */
+};
+
/* Flag allocation requirements to shmem_getpage */
enum sgp_type {
SGP_READ, /* don't exceed i_size, don't allocate page */
SGP_CACHE, /* don't exceed i_size, may allocate page */
SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
- SGP_WRITE, /* may exceed i_size, may allocate page */
+ SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
+ SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
};
#ifdef CONFIG_TMPFS
@@ -103,6 +117,9 @@ static unsigned long shmem_default_max_inodes(void)
}
#endif
+static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
+static int shmem_replace_page(struct page **pagep, gfp_t gfp,
+ struct shmem_inode_info *info, pgoff_t index);
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
@@ -423,27 +440,31 @@ void shmem_unlock_mapping(struct address_space *mapping)
/*
* Remove range of pages and swap entries from radix tree, and free them.
+ * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
*/
-void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+ bool unfalloc)
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
- pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
+ pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
+ unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
+ unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
struct pagevec pvec;
pgoff_t indices[PAGEVEC_SIZE];
long nr_swaps_freed = 0;
pgoff_t index;
int i;
- BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
+ if (lend == -1)
+ end = -1; /* unsigned, so actually very big */
pagevec_init(&pvec, 0);
index = start;
- while (index <= end) {
+ while (index < end) {
pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE),
pvec.pages, indices);
if (!pvec.nr)
break;
@@ -452,10 +473,12 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
struct page *page = pvec.pages[i];
index = indices[i];
- if (index > end)
+ if (index >= end)
break;
if (radix_tree_exceptional_entry(page)) {
+ if (unfalloc)
+ continue;
nr_swaps_freed += !shmem_free_swap(mapping,
index, page);
continue;
@@ -463,9 +486,11 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
if (!trylock_page(page))
continue;
- if (page->mapping == mapping) {
- VM_BUG_ON(PageWriteback(page));
- truncate_inode_page(mapping, page);
+ if (!unfalloc || !PageUptodate(page)) {
+ if (page->mapping == mapping) {
+ VM_BUG_ON(PageWriteback(page));
+ truncate_inode_page(mapping, page);
+ }
}
unlock_page(page);
}
@@ -476,30 +501,47 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
index++;
}
- if (partial) {
+ if (partial_start) {
struct page *page = NULL;
shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
if (page) {
- zero_user_segment(page, partial, PAGE_CACHE_SIZE);
+ unsigned int top = PAGE_CACHE_SIZE;
+ if (start > end) {
+ top = partial_end;
+ partial_end = 0;
+ }
+ zero_user_segment(page, partial_start, top);
+ set_page_dirty(page);
+ unlock_page(page);
+ page_cache_release(page);
+ }
+ }
+ if (partial_end) {
+ struct page *page = NULL;
+ shmem_getpage(inode, end, &page, SGP_READ, NULL);
+ if (page) {
+ zero_user_segment(page, 0, partial_end);
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
}
}
+ if (start >= end)
+ return;
index = start;
for ( ; ; ) {
cond_resched();
pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE),
pvec.pages, indices);
if (!pvec.nr) {
- if (index == start)
+ if (index == start || unfalloc)
break;
index = start;
continue;
}
- if (index == start && indices[0] > end) {
+ if ((index == start || unfalloc) && indices[0] >= end) {
shmem_deswap_pagevec(&pvec);
pagevec_release(&pvec);
break;
@@ -509,19 +551,23 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
struct page *page = pvec.pages[i];
index = indices[i];
- if (index > end)
+ if (index >= end)
break;
if (radix_tree_exceptional_entry(page)) {
+ if (unfalloc)
+ continue;
nr_swaps_freed += !shmem_free_swap(mapping,
index, page);
continue;
}
lock_page(page);
- if (page->mapping == mapping) {
- VM_BUG_ON(PageWriteback(page));
- truncate_inode_page(mapping, page);
+ if (!unfalloc || !PageUptodate(page)) {
+ if (page->mapping == mapping) {
+ VM_BUG_ON(PageWriteback(page));
+ truncate_inode_page(mapping, page);
+ }
}
unlock_page(page);
}
@@ -535,7 +581,11 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
info->swapped -= nr_swaps_freed;
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
+}
+void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+{
+ shmem_undo_range(inode, lstart, lend, false);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
}
EXPORT_SYMBOL_GPL(shmem_truncate_range);
@@ -597,19 +647,20 @@ static void shmem_evict_inode(struct inode *inode)
}
BUG_ON(inode->i_blocks);
shmem_free_inode(inode->i_sb);
- end_writeback(inode);
+ clear_inode(inode);
}
/*
* If swap found in inode, free it and move page from swapcache to filecache.
*/
static int shmem_unuse_inode(struct shmem_inode_info *info,
- swp_entry_t swap, struct page *page)
+ swp_entry_t swap, struct page **pagep)
{
struct address_space *mapping = info->vfs_inode.i_mapping;
void *radswap;
pgoff_t index;
- int error;
+ gfp_t gfp;
+ int error = 0;
radswap = swp_to_radix_entry(swap);
index = radix_tree_locate_item(&mapping->page_tree, radswap);
@@ -625,22 +676,37 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
if (shmem_swaplist.next != &info->swaplist)
list_move_tail(&shmem_swaplist, &info->swaplist);
+ gfp = mapping_gfp_mask(mapping);
+ if (shmem_should_replace_page(*pagep, gfp)) {
+ mutex_unlock(&shmem_swaplist_mutex);
+ error = shmem_replace_page(pagep, gfp, info, index);
+ mutex_lock(&shmem_swaplist_mutex);
+ /*
+ * We needed to drop mutex to make that restrictive page
+ * allocation; but the inode might already be freed by now,
+ * and we cannot refer to inode or mapping or info to check.
+ * However, we do hold page lock on the PageSwapCache page,
+ * so can check if that still has our reference remaining.
+ */
+ if (!page_swapcount(*pagep))
+ error = -ENOENT;
+ }
+
/*
* We rely on shmem_swaplist_mutex, not only to protect the swaplist,
* but also to hold up shmem_evict_inode(): so inode cannot be freed
* beneath us (pagelock doesn't help until the page is in pagecache).
*/
- error = shmem_add_to_page_cache(page, mapping, index,
+ if (!error)
+ error = shmem_add_to_page_cache(*pagep, mapping, index,
GFP_NOWAIT, radswap);
- /* which does mem_cgroup_uncharge_cache_page on error */
-
if (error != -ENOMEM) {
/*
* Truncation and eviction use free_swap_and_cache(), which
* only does trylock page: if we raced, best clean up here.
*/
- delete_from_swap_cache(page);
- set_page_dirty(page);
+ delete_from_swap_cache(*pagep);
+ set_page_dirty(*pagep);
if (!error) {
spin_lock(&info->lock);
info->swapped--;
@@ -660,7 +726,14 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
struct list_head *this, *next;
struct shmem_inode_info *info;
int found = 0;
- int error;
+ int error = 0;
+
+ /*
+ * There's a faint possibility that swap page was replaced before
+ * caller locked it: it will come back later with the right page.
+ */
+ if (unlikely(!PageSwapCache(page)))
+ goto out;
/*
* Charge page using GFP_KERNEL while we can wait, before taking
@@ -676,7 +749,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
list_for_each_safe(this, next, &shmem_swaplist) {
info = list_entry(this, struct shmem_inode_info, swaplist);
if (info->swapped)
- found = shmem_unuse_inode(info, swap, page);
+ found = shmem_unuse_inode(info, swap, &page);
else
list_del_init(&info->swaplist);
cond_resched();
@@ -685,8 +758,6 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
}
mutex_unlock(&shmem_swaplist_mutex);
- if (!found)
- mem_cgroup_uncharge_cache_page(page);
if (found < 0)
error = found;
out:
@@ -727,6 +798,38 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
goto redirty;
}
+
+ /*
+ * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
+ * value into swapfile.c, the only way we can correctly account for a
+ * fallocated page arriving here is now to initialize it and write it.
+ *
+ * That's okay for a page already fallocated earlier, but if we have
+ * not yet completed the fallocation, then (a) we want to keep track
+ * of this page in case we have to undo it, and (b) it may not be a
+ * good idea to continue anyway, once we're pushing into swap. So
+ * reactivate the page, and let shmem_fallocate() quit when too many.
+ */
+ if (!PageUptodate(page)) {
+ if (inode->i_private) {
+ struct shmem_falloc *shmem_falloc;
+ spin_lock(&inode->i_lock);
+ shmem_falloc = inode->i_private;
+ if (shmem_falloc &&
+ index >= shmem_falloc->start &&
+ index < shmem_falloc->next)
+ shmem_falloc->nr_unswapped++;
+ else
+ shmem_falloc = NULL;
+ spin_unlock(&inode->i_lock);
+ if (shmem_falloc)
+ goto redirty;
+ }
+ clear_highpage(page);
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ }
+
swap = get_swap_page();
if (!swap.val)
goto redirty;
@@ -856,6 +959,84 @@ static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
#endif
/*
+ * When a page is moved from swapcache to shmem filecache (either by the
+ * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
+ * shmem_unuse_inode()), it may have been read in earlier from swap, in
+ * ignorance of the mapping it belongs to. If that mapping has special
+ * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
+ * we may need to copy to a suitable page before moving to filecache.
+ *
+ * In a future release, this may well be extended to respect cpuset and
+ * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
+ * but for now it is a simple matter of zone.
+ */
+static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
+{
+ return page_zonenum(page) > gfp_zone(gfp);
+}
+
+static int shmem_replace_page(struct page **pagep, gfp_t gfp,
+ struct shmem_inode_info *info, pgoff_t index)
+{
+ struct page *oldpage, *newpage;
+ struct address_space *swap_mapping;
+ pgoff_t swap_index;
+ int error;
+
+ oldpage = *pagep;
+ swap_index = page_private(oldpage);
+ swap_mapping = page_mapping(oldpage);
+
+ /*
+ * We have arrived here because our zones are constrained, so don't
+ * limit chance of success by further cpuset and node constraints.
+ */
+ gfp &= ~GFP_CONSTRAINT_MASK;
+ newpage = shmem_alloc_page(gfp, info, index);
+ if (!newpage)
+ return -ENOMEM;
+ VM_BUG_ON(shmem_should_replace_page(newpage, gfp));
+
+ *pagep = newpage;
+ page_cache_get(newpage);
+ copy_highpage(newpage, oldpage);
+
+ VM_BUG_ON(!PageLocked(oldpage));
+ __set_page_locked(newpage);
+ VM_BUG_ON(!PageUptodate(oldpage));
+ SetPageUptodate(newpage);
+ VM_BUG_ON(!PageSwapBacked(oldpage));
+ SetPageSwapBacked(newpage);
+ VM_BUG_ON(!swap_index);
+ set_page_private(newpage, swap_index);
+ VM_BUG_ON(!PageSwapCache(oldpage));
+ SetPageSwapCache(newpage);
+
+ /*
+ * Our caller will very soon move newpage out of swapcache, but it's
+ * a nice clean interface for us to replace oldpage by newpage there.
+ */
+ spin_lock_irq(&swap_mapping->tree_lock);
+ error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
+ newpage);
+ __inc_zone_page_state(newpage, NR_FILE_PAGES);
+ __dec_zone_page_state(oldpage, NR_FILE_PAGES);
+ spin_unlock_irq(&swap_mapping->tree_lock);
+ BUG_ON(error);
+
+ mem_cgroup_replace_page_cache(oldpage, newpage);
+ lru_cache_add_anon(newpage);
+
+ ClearPageSwapCache(oldpage);
+ set_page_private(oldpage, 0);
+
+ unlock_page(oldpage);
+ page_cache_release(oldpage);
+ page_cache_release(oldpage);
+ return 0;
+}
+
+/*
* shmem_getpage_gfp - find page in cache, or get from swap, or allocate
*
* If we allocate a new one we do not mark it dirty. That's up to the
@@ -872,6 +1053,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
swp_entry_t swap;
int error;
int once = 0;
+ int alloced = 0;
if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
return -EFBIG;
@@ -883,19 +1065,21 @@ repeat:
page = NULL;
}
- if (sgp != SGP_WRITE &&
+ if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
error = -EINVAL;
goto failed;
}
+ /* fallocated page? */
+ if (page && !PageUptodate(page)) {
+ if (sgp != SGP_READ)
+ goto clear;
+ unlock_page(page);
+ page_cache_release(page);
+ page = NULL;
+ }
if (page || (sgp == SGP_READ && !swap.val)) {
- /*
- * Once we can get the page lock, it must be uptodate:
- * if there were an error in reading back from swap,
- * the page would not be inserted into the filecache.
- */
- BUG_ON(page && !PageUptodate(page));
*pagep = page;
return 0;
}
@@ -923,19 +1107,20 @@ repeat:
/* We have to do this with page locked to prevent races */
lock_page(page);
+ if (!PageSwapCache(page) || page->mapping) {
+ error = -EEXIST; /* try again */
+ goto failed;
+ }
if (!PageUptodate(page)) {
error = -EIO;
goto failed;
}
wait_on_page_writeback(page);
- /* Someone may have already done it for us */
- if (page->mapping) {
- if (page->mapping == mapping &&
- page->index == index)
- goto done;
- error = -EEXIST;
- goto failed;
+ if (shmem_should_replace_page(page, gfp)) {
+ error = shmem_replace_page(&page, gfp, info, index);
+ if (error)
+ goto failed;
}
error = mem_cgroup_cache_charge(page, current->mm,
@@ -991,19 +1176,36 @@ repeat:
inode->i_blocks += BLOCKS_PER_PAGE;
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
+ alloced = true;
- clear_highpage(page);
- flush_dcache_page(page);
- SetPageUptodate(page);
+ /*
+ * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
+ */
+ if (sgp == SGP_FALLOC)
+ sgp = SGP_WRITE;
+clear:
+ /*
+ * Let SGP_WRITE caller clear ends if write does not fill page;
+ * but SGP_FALLOC on a page fallocated earlier must initialize
+ * it now, lest undo on failure cancel our earlier guarantee.
+ */
+ if (sgp != SGP_WRITE) {
+ clear_highpage(page);
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ }
if (sgp == SGP_DIRTY)
set_page_dirty(page);
}
-done:
+
/* Perhaps the file has been truncated since we checked */
- if (sgp != SGP_WRITE &&
+ if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
error = -EINVAL;
- goto trunc;
+ if (alloced)
+ goto trunc;
+ else
+ goto failed;
}
*pagep = page;
return 0;
@@ -1012,6 +1214,7 @@ done:
* Error recovery.
*/
trunc:
+ info = SHMEM_I(inode);
ClearPageDirty(page);
delete_from_page_cache(page);
spin_lock(&info->lock);
@@ -1019,6 +1222,7 @@ trunc:
inode->i_blocks -= BLOCKS_PER_PAGE;
spin_unlock(&info->lock);
decused:
+ sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo->max_blocks)
percpu_counter_add(&sbinfo->used_blocks, -1);
unacct:
@@ -1204,6 +1408,14 @@ shmem_write_end(struct file *file, struct address_space *mapping,
if (pos + copied > inode->i_size)
i_size_write(inode, pos + copied);
+ if (!PageUptodate(page)) {
+ if (copied < PAGE_CACHE_SIZE) {
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ zero_user_segments(page, 0, from,
+ from + copied, PAGE_CACHE_SIZE);
+ }
+ SetPageUptodate(page);
+ }
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
@@ -1462,6 +1674,199 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
return error;
}
+/*
+ * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
+ */
+static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
+ pgoff_t index, pgoff_t end, int origin)
+{
+ struct page *page;
+ struct pagevec pvec;
+ pgoff_t indices[PAGEVEC_SIZE];
+ bool done = false;
+ int i;
+
+ pagevec_init(&pvec, 0);
+ pvec.nr = 1; /* start small: we may be there already */
+ while (!done) {
+ pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+ pvec.nr, pvec.pages, indices);
+ if (!pvec.nr) {
+ if (origin == SEEK_DATA)
+ index = end;
+ break;
+ }
+ for (i = 0; i < pvec.nr; i++, index++) {
+ if (index < indices[i]) {
+ if (origin == SEEK_HOLE) {
+ done = true;
+ break;
+ }
+ index = indices[i];
+ }
+ page = pvec.pages[i];
+ if (page && !radix_tree_exceptional_entry(page)) {
+ if (!PageUptodate(page))
+ page = NULL;
+ }
+ if (index >= end ||
+ (page && origin == SEEK_DATA) ||
+ (!page && origin == SEEK_HOLE)) {
+ done = true;
+ break;
+ }
+ }
+ shmem_deswap_pagevec(&pvec);
+ pagevec_release(&pvec);
+ pvec.nr = PAGEVEC_SIZE;
+ cond_resched();
+ }
+ return index;
+}
+
+static loff_t shmem_file_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct address_space *mapping;
+ struct inode *inode;
+ pgoff_t start, end;
+ loff_t new_offset;
+
+ if (origin != SEEK_DATA && origin != SEEK_HOLE)
+ return generic_file_llseek_size(file, offset, origin,
+ MAX_LFS_FILESIZE);
+ mapping = file->f_mapping;
+ inode = mapping->host;
+ mutex_lock(&inode->i_mutex);
+ /* We're holding i_mutex so we can access i_size directly */
+
+ if (offset < 0)
+ offset = -EINVAL;
+ else if (offset >= inode->i_size)
+ offset = -ENXIO;
+ else {
+ start = offset >> PAGE_CACHE_SHIFT;
+ end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ new_offset = shmem_seek_hole_data(mapping, start, end, origin);
+ new_offset <<= PAGE_CACHE_SHIFT;
+ if (new_offset > offset) {
+ if (new_offset < inode->i_size)
+ offset = new_offset;
+ else if (origin == SEEK_DATA)
+ offset = -ENXIO;
+ else
+ offset = inode->i_size;
+ }
+ }
+
+ if (offset >= 0 && offset != file->f_pos) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ }
+ mutex_unlock(&inode->i_mutex);
+ return offset;
+}
+
+static long shmem_fallocate(struct file *file, int mode, loff_t offset,
+ loff_t len)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+ struct shmem_falloc shmem_falloc;
+ pgoff_t start, index, end;
+ int error;
+
+ mutex_lock(&inode->i_mutex);
+
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ struct address_space *mapping = file->f_mapping;
+ loff_t unmap_start = round_up(offset, PAGE_SIZE);
+ loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+
+ if ((u64)unmap_end > (u64)unmap_start)
+ unmap_mapping_range(mapping, unmap_start,
+ 1 + unmap_end - unmap_start, 0);
+ shmem_truncate_range(inode, offset, offset + len - 1);
+ /* No need to unmap again: hole-punching leaves COWed pages */
+ error = 0;
+ goto out;
+ }
+
+ /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
+ error = inode_newsize_ok(inode, offset + len);
+ if (error)
+ goto out;
+
+ start = offset >> PAGE_CACHE_SHIFT;
+ end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ /* Try to avoid a swapstorm if len is impossible to satisfy */
+ if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
+ error = -ENOSPC;
+ goto out;
+ }
+
+ shmem_falloc.start = start;
+ shmem_falloc.next = start;
+ shmem_falloc.nr_falloced = 0;
+ shmem_falloc.nr_unswapped = 0;
+ spin_lock(&inode->i_lock);
+ inode->i_private = &shmem_falloc;
+ spin_unlock(&inode->i_lock);
+
+ for (index = start; index < end; index++) {
+ struct page *page;
+
+ /*
+ * Good, the fallocate(2) manpage permits EINTR: we may have
+ * been interrupted because we are using up too much memory.
+ */
+ if (signal_pending(current))
+ error = -EINTR;
+ else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
+ error = -ENOMEM;
+ else
+ error = shmem_getpage(inode, index, &page, SGP_FALLOC,
+ NULL);
+ if (error) {
+ /* Remove the !PageUptodate pages we added */
+ shmem_undo_range(inode,
+ (loff_t)start << PAGE_CACHE_SHIFT,
+ (loff_t)index << PAGE_CACHE_SHIFT, true);
+ goto undone;
+ }
+
+ /*
+ * Inform shmem_writepage() how far we have reached.
+ * No need for lock or barrier: we have the page lock.
+ */
+ shmem_falloc.next++;
+ if (!PageUptodate(page))
+ shmem_falloc.nr_falloced++;
+
+ /*
+ * If !PageUptodate, leave it that way so that freeable pages
+ * can be recognized if we need to rollback on error later.
+ * But set_page_dirty so that memory pressure will swap rather
+ * than free the pages we are allocating (and SGP_CACHE pages
+ * might still be clean: we now need to mark those dirty too).
+ */
+ set_page_dirty(page);
+ unlock_page(page);
+ page_cache_release(page);
+ cond_resched();
+ }
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
+ i_size_write(inode, offset + len);
+ inode->i_ctime = CURRENT_TIME;
+undone:
+ spin_lock(&inode->i_lock);
+ inode->i_private = NULL;
+ spin_unlock(&inode->i_lock);
+out:
+ mutex_unlock(&inode->i_mutex);
+ return error;
+}
+
static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
@@ -1665,6 +2070,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
kaddr = kmap_atomic(page);
memcpy(kaddr, symname, len);
kunmap_atomic(kaddr);
+ SetPageUptodate(page);
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
@@ -2270,6 +2676,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
}
}
sb->s_export_op = &shmem_export_ops;
+ sb->s_flags |= MS_NOSEC;
#else
sb->s_flags |= MS_NOUSER;
#endif
@@ -2364,7 +2771,7 @@ static const struct address_space_operations shmem_aops = {
static const struct file_operations shmem_file_operations = {
.mmap = shmem_mmap,
#ifdef CONFIG_TMPFS
- .llseek = generic_file_llseek,
+ .llseek = shmem_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.aio_read = shmem_file_aio_read,
@@ -2372,12 +2779,12 @@ static const struct file_operations shmem_file_operations = {
.fsync = noop_fsync,
.splice_read = shmem_file_splice_read,
.splice_write = generic_file_splice_write,
+ .fallocate = shmem_fallocate,
#endif
};
static const struct inode_operations shmem_inode_operations = {
.setattr = shmem_setattr,
- .truncate_range = shmem_truncate_range,
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
.getxattr = shmem_getxattr,
diff --git a/mm/sparse.c b/mm/sparse.c
index a8bc7d364deb..6a4bf9160e85 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -273,10 +273,10 @@ static unsigned long *__kmalloc_section_usemap(void)
#ifdef CONFIG_MEMORY_HOTREMOVE
static unsigned long * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
- unsigned long count)
+ unsigned long size)
{
- unsigned long section_nr;
-
+ pg_data_t *host_pgdat;
+ unsigned long goal;
/*
* A page may contain usemaps for other sections preventing the
* page being freed and making a section unremovable while
@@ -287,8 +287,10 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
* from the same section as the pgdat where possible to avoid
* this problem.
*/
- section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
- return alloc_bootmem_section(usemap_size() * count, section_nr);
+ goal = __pa(pgdat) & PAGE_SECTION_MASK;
+ host_pgdat = NODE_DATA(early_pfn_to_nid(goal >> PAGE_SHIFT));
+ return __alloc_bootmem_node_nopanic(host_pgdat, size,
+ SMP_CACHE_BYTES, goal);
}
static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -332,9 +334,9 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
#else
static unsigned long * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
- unsigned long count)
+ unsigned long size)
{
- return NULL;
+ return alloc_bootmem_node_nopanic(pgdat, size);
}
static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -352,13 +354,10 @@ static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
int size = usemap_size();
usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
- usemap_count);
+ size * usemap_count);
if (!usemap) {
- usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
- if (!usemap) {
- printk(KERN_WARNING "%s: allocation failed\n", __func__);
- return;
- }
+ printk(KERN_WARNING "%s: allocation failed\n", __func__);
+ return;
}
for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
diff --git a/mm/swap.c b/mm/swap.c
index 5c13f1338972..4e7e2ec67078 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -47,13 +47,15 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
static void __page_cache_release(struct page *page)
{
if (PageLRU(page)) {
- unsigned long flags;
struct zone *zone = page_zone(page);
+ struct lruvec *lruvec;
+ unsigned long flags;
spin_lock_irqsave(&zone->lru_lock, flags);
+ lruvec = mem_cgroup_page_lruvec(page, zone);
VM_BUG_ON(!PageLRU(page));
__ClearPageLRU(page);
- del_page_from_lru_list(zone, page, page_off_lru(page));
+ del_page_from_lru_list(page, lruvec, page_off_lru(page));
spin_unlock_irqrestore(&zone->lru_lock, flags);
}
}
@@ -82,6 +84,25 @@ static void put_compound_page(struct page *page)
if (likely(page != page_head &&
get_page_unless_zero(page_head))) {
unsigned long flags;
+
+ /*
+ * THP can not break up slab pages so avoid taking
+ * compound_lock(). Slab performs non-atomic bit ops
+ * on page->flags for better performance. In particular
+ * slab_unlock() in slub used to be a hot path. It is
+ * still hot on arches that do not support
+ * this_cpu_cmpxchg_double().
+ */
+ if (PageSlab(page_head)) {
+ if (PageTail(page)) {
+ if (put_page_testzero(page_head))
+ VM_BUG_ON(1);
+
+ atomic_dec(&page->_mapcount);
+ goto skip_lock_tail;
+ } else
+ goto skip_lock;
+ }
/*
* page_head wasn't a dangling pointer but it
* may not be a head page anymore by the time
@@ -92,10 +113,10 @@ static void put_compound_page(struct page *page)
if (unlikely(!PageTail(page))) {
/* __split_huge_page_refcount run before us */
compound_unlock_irqrestore(page_head, flags);
- VM_BUG_ON(PageHead(page_head));
+skip_lock:
if (put_page_testzero(page_head))
__put_single_page(page_head);
- out_put_single:
+out_put_single:
if (put_page_testzero(page))
__put_single_page(page);
return;
@@ -115,6 +136,8 @@ static void put_compound_page(struct page *page)
VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
VM_BUG_ON(atomic_read(&page->_count) != 0);
compound_unlock_irqrestore(page_head, flags);
+
+skip_lock_tail:
if (put_page_testzero(page_head)) {
if (PageHead(page_head))
__put_compound_page(page_head);
@@ -162,6 +185,18 @@ bool __get_page_tail(struct page *page)
struct page *page_head = compound_trans_head(page);
if (likely(page != page_head && get_page_unless_zero(page_head))) {
+
+ /* Ref to put_compound_page() comment. */
+ if (PageSlab(page_head)) {
+ if (likely(PageTail(page))) {
+ __get_page_tail_foll(page, false);
+ return true;
+ } else {
+ put_page(page_head);
+ return false;
+ }
+ }
+
/*
* page_head wasn't a dangling pointer but it
* may not be a head page anymore by the time
@@ -202,11 +237,12 @@ void put_pages_list(struct list_head *pages)
EXPORT_SYMBOL(put_pages_list);
static void pagevec_lru_move_fn(struct pagevec *pvec,
- void (*move_fn)(struct page *page, void *arg),
- void *arg)
+ void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
+ void *arg)
{
int i;
struct zone *zone = NULL;
+ struct lruvec *lruvec;
unsigned long flags = 0;
for (i = 0; i < pagevec_count(pvec); i++) {
@@ -220,7 +256,8 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
spin_lock_irqsave(&zone->lru_lock, flags);
}
- (*move_fn)(page, arg);
+ lruvec = mem_cgroup_page_lruvec(page, zone);
+ (*move_fn)(page, lruvec, arg);
}
if (zone)
spin_unlock_irqrestore(&zone->lru_lock, flags);
@@ -228,16 +265,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
pagevec_reinit(pvec);
}
-static void pagevec_move_tail_fn(struct page *page, void *arg)
+static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
+ void *arg)
{
int *pgmoved = arg;
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
enum lru_list lru = page_lru_base_type(page);
- struct lruvec *lruvec;
-
- lruvec = mem_cgroup_lru_move_lists(page_zone(page),
- page, lru, lru);
list_move_tail(&page->lru, &lruvec->lists[lru]);
(*pgmoved)++;
}
@@ -276,41 +310,30 @@ void rotate_reclaimable_page(struct page *page)
}
}
-static void update_page_reclaim_stat(struct zone *zone, struct page *page,
+static void update_page_reclaim_stat(struct lruvec *lruvec,
int file, int rotated)
{
- struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
- struct zone_reclaim_stat *memcg_reclaim_stat;
-
- memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
+ struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
reclaim_stat->recent_scanned[file]++;
if (rotated)
reclaim_stat->recent_rotated[file]++;
-
- if (!memcg_reclaim_stat)
- return;
-
- memcg_reclaim_stat->recent_scanned[file]++;
- if (rotated)
- memcg_reclaim_stat->recent_rotated[file]++;
}
-static void __activate_page(struct page *page, void *arg)
+static void __activate_page(struct page *page, struct lruvec *lruvec,
+ void *arg)
{
- struct zone *zone = page_zone(page);
-
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
int file = page_is_file_cache(page);
int lru = page_lru_base_type(page);
- del_page_from_lru_list(zone, page, lru);
+ del_page_from_lru_list(page, lruvec, lru);
SetPageActive(page);
lru += LRU_ACTIVE;
- add_page_to_lru_list(zone, page, lru);
- __count_vm_event(PGACTIVATE);
+ add_page_to_lru_list(page, lruvec, lru);
- update_page_reclaim_stat(zone, page, file, 1);
+ __count_vm_event(PGACTIVATE);
+ update_page_reclaim_stat(lruvec, file, 1);
}
}
@@ -347,7 +370,7 @@ void activate_page(struct page *page)
struct zone *zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
- __activate_page(page, NULL);
+ __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
spin_unlock_irq(&zone->lru_lock);
}
#endif
@@ -414,11 +437,13 @@ void lru_cache_add_lru(struct page *page, enum lru_list lru)
void add_page_to_unevictable_list(struct page *page)
{
struct zone *zone = page_zone(page);
+ struct lruvec *lruvec;
spin_lock_irq(&zone->lru_lock);
+ lruvec = mem_cgroup_page_lruvec(page, zone);
SetPageUnevictable(page);
SetPageLRU(page);
- add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
+ add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
spin_unlock_irq(&zone->lru_lock);
}
@@ -443,11 +468,11 @@ void add_page_to_unevictable_list(struct page *page)
* be write it out by flusher threads as this is much more effective
* than the single-page writeout from reclaim.
*/
-static void lru_deactivate_fn(struct page *page, void *arg)
+static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
+ void *arg)
{
int lru, file;
bool active;
- struct zone *zone = page_zone(page);
if (!PageLRU(page))
return;
@@ -460,13 +485,13 @@ static void lru_deactivate_fn(struct page *page, void *arg)
return;
active = PageActive(page);
-
file = page_is_file_cache(page);
lru = page_lru_base_type(page);
- del_page_from_lru_list(zone, page, lru + active);
+
+ del_page_from_lru_list(page, lruvec, lru + active);
ClearPageActive(page);
ClearPageReferenced(page);
- add_page_to_lru_list(zone, page, lru);
+ add_page_to_lru_list(page, lruvec, lru);
if (PageWriteback(page) || PageDirty(page)) {
/*
@@ -476,19 +501,17 @@ static void lru_deactivate_fn(struct page *page, void *arg)
*/
SetPageReclaim(page);
} else {
- struct lruvec *lruvec;
/*
* The page's writeback ends up during pagevec
* We moves tha page into tail of inactive.
*/
- lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
list_move_tail(&page->lru, &lruvec->lists[lru]);
__count_vm_event(PGROTATED);
}
if (active)
__count_vm_event(PGDEACTIVATE);
- update_page_reclaim_stat(zone, page, file, 0);
+ update_page_reclaim_stat(lruvec, file, 0);
}
/*
@@ -588,6 +611,7 @@ void release_pages(struct page **pages, int nr, int cold)
int i;
LIST_HEAD(pages_to_free);
struct zone *zone = NULL;
+ struct lruvec *lruvec;
unsigned long uninitialized_var(flags);
for (i = 0; i < nr; i++) {
@@ -615,9 +639,11 @@ void release_pages(struct page **pages, int nr, int cold)
zone = pagezone;
spin_lock_irqsave(&zone->lru_lock, flags);
}
+
+ lruvec = mem_cgroup_page_lruvec(page, zone);
VM_BUG_ON(!PageLRU(page));
__ClearPageLRU(page);
- del_page_from_lru_list(zone, page, page_off_lru(page));
+ del_page_from_lru_list(page, lruvec, page_off_lru(page));
}
list_add(&page->lru, &pages_to_free);
@@ -649,8 +675,8 @@ EXPORT_SYMBOL(__pagevec_release);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* used by __split_huge_page_refcount() */
-void lru_add_page_tail(struct zone* zone,
- struct page *page, struct page *page_tail)
+void lru_add_page_tail(struct page *page, struct page *page_tail,
+ struct lruvec *lruvec)
{
int uninitialized_var(active);
enum lru_list lru;
@@ -659,7 +685,8 @@ void lru_add_page_tail(struct zone* zone,
VM_BUG_ON(!PageHead(page));
VM_BUG_ON(PageCompound(page_tail));
VM_BUG_ON(PageLRU(page_tail));
- VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
+ VM_BUG_ON(NR_CPUS != 1 &&
+ !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
SetPageLRU(page_tail);
@@ -688,20 +715,20 @@ void lru_add_page_tail(struct zone* zone,
* Use the standard add function to put page_tail on the list,
* but then correct its position so they all end up in order.
*/
- add_page_to_lru_list(zone, page_tail, lru);
+ add_page_to_lru_list(page_tail, lruvec, lru);
list_head = page_tail->lru.prev;
list_move_tail(&page_tail->lru, list_head);
}
if (!PageUnevictable(page))
- update_page_reclaim_stat(zone, page_tail, file, active);
+ update_page_reclaim_stat(lruvec, file, active);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-static void __pagevec_lru_add_fn(struct page *page, void *arg)
+static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
+ void *arg)
{
enum lru_list lru = (enum lru_list)arg;
- struct zone *zone = page_zone(page);
int file = is_file_lru(lru);
int active = is_active_lru(lru);
@@ -712,8 +739,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg)
SetPageLRU(page);
if (active)
SetPageActive(page);
- add_page_to_lru_list(zone, page, lru);
- update_page_reclaim_stat(zone, page, file, active);
+ add_page_to_lru_list(page, lruvec, lru);
+ update_page_reclaim_stat(lruvec, file, active);
}
/*
diff --git a/mm/swapfile.c b/mm/swapfile.c
index fafc26d1b1dc..457b10baef59 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -601,7 +601,7 @@ void swapcache_free(swp_entry_t entry, struct page *page)
* This does not give an exact answer when swap count is continued,
* but does include the high COUNT_CONTINUED flag to allow for that.
*/
-static inline int page_swapcount(struct page *page)
+int page_swapcount(struct page *page)
{
int count = 0;
struct swap_info_struct *p;
@@ -717,37 +717,6 @@ int free_swap_and_cache(swp_entry_t entry)
return p != NULL;
}
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-/**
- * mem_cgroup_count_swap_user - count the user of a swap entry
- * @ent: the swap entry to be checked
- * @pagep: the pointer for the swap cache page of the entry to be stored
- *
- * Returns the number of the user of the swap entry. The number is valid only
- * for swaps of anonymous pages.
- * If the entry is found on swap cache, the page is stored to pagep with
- * refcount of it being incremented.
- */
-int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
-{
- struct page *page;
- struct swap_info_struct *p;
- int count = 0;
-
- page = find_get_page(&swapper_space, ent.val);
- if (page)
- count += page_mapcount(page);
- p = swap_info_get(ent);
- if (p) {
- count += swap_count(p->swap_map[swp_offset(ent)]);
- spin_unlock(&swap_lock);
- }
-
- *pagep = page;
- return count;
-}
-#endif
-
#ifdef CONFIG_HIBERNATION
/*
* Find the swap type that corresponds to given device (if any).
diff --git a/mm/thrash.c b/mm/thrash.c
deleted file mode 100644
index 57ad495dbd54..000000000000
--- a/mm/thrash.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * mm/thrash.c
- *
- * Copyright (C) 2004, Red Hat, Inc.
- * Copyright (C) 2004, Rik van Riel <riel@redhat.com>
- * Released under the GPL, see the file COPYING for details.
- *
- * Simple token based thrashing protection, using the algorithm
- * described in: http://www.cse.ohio-state.edu/hpcs/WWW/HTML/publications/abs05-1.html
- *
- * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
- * Improved algorithm to pass token:
- * Each task has a priority which is incremented if it contended
- * for the token in an interval less than its previous attempt.
- * If the token is acquired, that task's priority is boosted to prevent
- * the token from bouncing around too often and to let the task make
- * some progress in its execution.
- */
-
-#include <linux/jiffies.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/swap.h>
-#include <linux/memcontrol.h>
-
-#include <trace/events/vmscan.h>
-
-#define TOKEN_AGING_INTERVAL (0xFF)
-
-static DEFINE_SPINLOCK(swap_token_lock);
-struct mm_struct *swap_token_mm;
-static struct mem_cgroup *swap_token_memcg;
-
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
-{
- struct mem_cgroup *memcg;
-
- memcg = try_get_mem_cgroup_from_mm(mm);
- if (memcg)
- css_put(mem_cgroup_css(memcg));
-
- return memcg;
-}
-#else
-static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
-{
- return NULL;
-}
-#endif
-
-void grab_swap_token(struct mm_struct *mm)
-{
- int current_interval;
- unsigned int old_prio = mm->token_priority;
- static unsigned int global_faults;
- static unsigned int last_aging;
-
- global_faults++;
-
- current_interval = global_faults - mm->faultstamp;
-
- if (!spin_trylock(&swap_token_lock))
- return;
-
- /* First come first served */
- if (!swap_token_mm)
- goto replace_token;
-
- /*
- * Usually, we don't need priority aging because long interval faults
- * makes priority decrease quickly. But there is one exception. If the
- * token owner task is sleeping, it never make long interval faults.
- * Thus, we need a priority aging mechanism instead. The requirements
- * of priority aging are
- * 1) An aging interval is reasonable enough long. Too short aging
- * interval makes quick swap token lost and decrease performance.
- * 2) The swap token owner task have to get priority aging even if
- * it's under sleep.
- */
- if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
- swap_token_mm->token_priority /= 2;
- last_aging = global_faults;
- }
-
- if (mm == swap_token_mm) {
- mm->token_priority += 2;
- goto update_priority;
- }
-
- if (current_interval < mm->last_interval)
- mm->token_priority++;
- else {
- if (likely(mm->token_priority > 0))
- mm->token_priority--;
- }
-
- /* Check if we deserve the token */
- if (mm->token_priority > swap_token_mm->token_priority)
- goto replace_token;
-
-update_priority:
- trace_update_swap_token_priority(mm, old_prio, swap_token_mm);
-
-out:
- mm->faultstamp = global_faults;
- mm->last_interval = current_interval;
- spin_unlock(&swap_token_lock);
- return;
-
-replace_token:
- mm->token_priority += 2;
- trace_replace_swap_token(swap_token_mm, mm);
- swap_token_mm = mm;
- swap_token_memcg = swap_token_memcg_from_mm(mm);
- last_aging = global_faults;
- goto out;
-}
-
-/* Called on process exit. */
-void __put_swap_token(struct mm_struct *mm)
-{
- spin_lock(&swap_token_lock);
- if (likely(mm == swap_token_mm)) {
- trace_put_swap_token(swap_token_mm);
- swap_token_mm = NULL;
- swap_token_memcg = NULL;
- }
- spin_unlock(&swap_token_lock);
-}
-
-static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
-{
- if (!a)
- return true;
- if (!b)
- return true;
- if (a == b)
- return true;
- return false;
-}
-
-void disable_swap_token(struct mem_cgroup *memcg)
-{
- /* memcg reclaim don't disable unrelated mm token. */
- if (match_memcg(memcg, swap_token_memcg)) {
- spin_lock(&swap_token_lock);
- if (match_memcg(memcg, swap_token_memcg)) {
- trace_disable_swap_token(swap_token_mm);
- swap_token_mm = NULL;
- swap_token_memcg = NULL;
- }
- spin_unlock(&swap_token_lock);
- }
-}
diff --git a/mm/truncate.c b/mm/truncate.c
index 61a183b89df6..75801acdaac7 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -602,31 +602,6 @@ int vmtruncate(struct inode *inode, loff_t newsize)
}
EXPORT_SYMBOL(vmtruncate);
-int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
-{
- struct address_space *mapping = inode->i_mapping;
- loff_t holebegin = round_up(lstart, PAGE_SIZE);
- loff_t holelen = 1 + lend - holebegin;
-
- /*
- * If the underlying filesystem is not going to provide
- * a way to truncate a range of blocks (punch a hole) -
- * we should return failure right now.
- */
- if (!inode->i_op->truncate_range)
- return -ENOSYS;
-
- mutex_lock(&inode->i_mutex);
- inode_dio_wait(inode);
- unmap_mapping_range(mapping, holebegin, holelen, 1);
- inode->i_op->truncate_range(inode, lstart, lend);
- /* unmap again to remove racily COWed private pages */
- unmap_mapping_range(mapping, holebegin, holelen, 1);
- mutex_unlock(&inode->i_mutex);
-
- return 0;
-}
-
/**
* truncate_pagecache_range - unmap and remove pagecache that is hole-punched
* @inode: inode
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 94dff883b449..2aad49981b57 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1185,9 +1185,10 @@ void __init vmalloc_init(void)
/* Import existing vmlist entries. */
for (tmp = vmlist; tmp; tmp = tmp->next) {
va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
- va->flags = tmp->flags | VM_VM_AREA;
+ va->flags = VM_VM_AREA;
va->va_start = (unsigned long)tmp->addr;
va->va_end = va->va_start + tmp->size;
+ va->vm = tmp;
__insert_vmap_area(va);
}
@@ -2375,8 +2376,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
return NULL;
}
- vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
- vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
+ vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
+ vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
if (!vas || !vms)
goto err_free2;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 33dc256033b5..eeb3bc9d1d36 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -53,24 +53,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/vmscan.h>
-/*
- * reclaim_mode determines how the inactive list is shrunk
- * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
- * RECLAIM_MODE_ASYNC: Do not block
- * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback
- * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
- * page from the LRU and reclaim all pages within a
- * naturally aligned range
- * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
- * order-0 pages and then compact the zone
- */
-typedef unsigned __bitwise__ reclaim_mode_t;
-#define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u)
-#define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u)
-#define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u)
-#define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u)
-#define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u)
-
struct scan_control {
/* Incremented by the number of inactive pages that were scanned */
unsigned long nr_scanned;
@@ -96,11 +78,8 @@ struct scan_control {
int order;
- /*
- * Intend to reclaim enough continuous memory rather than reclaim
- * enough amount of memory. i.e, mode for high order allocation.
- */
- reclaim_mode_t reclaim_mode;
+ /* Scan (total_size >> priority) pages at once */
+ int priority;
/*
* The memory cgroup that hit its limit and as a result is the
@@ -115,11 +94,6 @@ struct scan_control {
nodemask_t *nodemask;
};
-struct mem_cgroup_zone {
- struct mem_cgroup *mem_cgroup;
- struct zone *zone;
-};
-
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
#ifdef ARCH_HAS_PREFETCH
@@ -164,44 +138,21 @@ static bool global_reclaim(struct scan_control *sc)
{
return !sc->target_mem_cgroup;
}
-
-static bool scanning_global_lru(struct mem_cgroup_zone *mz)
-{
- return !mz->mem_cgroup;
-}
#else
static bool global_reclaim(struct scan_control *sc)
{
return true;
}
-
-static bool scanning_global_lru(struct mem_cgroup_zone *mz)
-{
- return true;
-}
#endif
-static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz)
-{
- if (!scanning_global_lru(mz))
- return mem_cgroup_get_reclaim_stat(mz->mem_cgroup, mz->zone);
-
- return &mz->zone->reclaim_stat;
-}
-
-static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz,
- enum lru_list lru)
+static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
- if (!scanning_global_lru(mz))
- return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup,
- zone_to_nid(mz->zone),
- zone_idx(mz->zone),
- BIT(lru));
+ if (!mem_cgroup_disabled())
+ return mem_cgroup_get_lru_size(lruvec, lru);
- return zone_page_state(mz->zone, NR_LRU_BASE + lru);
+ return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
}
-
/*
* Add a shrinker callback to be called from the vm
*/
@@ -364,39 +315,6 @@ out:
return ret;
}
-static void set_reclaim_mode(int priority, struct scan_control *sc,
- bool sync)
-{
- reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
-
- /*
- * Initially assume we are entering either lumpy reclaim or
- * reclaim/compaction.Depending on the order, we will either set the
- * sync mode or just reclaim order-0 pages later.
- */
- if (COMPACTION_BUILD)
- sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
- else
- sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
-
- /*
- * Avoid using lumpy reclaim or reclaim/compaction if possible by
- * restricting when its set to either costly allocations or when
- * under memory pressure
- */
- if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
- sc->reclaim_mode |= syncmode;
- else if (sc->order && priority < DEF_PRIORITY - 2)
- sc->reclaim_mode |= syncmode;
- else
- sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
-}
-
-static void reset_reclaim_mode(struct scan_control *sc)
-{
- sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
-}
-
static inline int is_page_cache_freeable(struct page *page)
{
/*
@@ -416,10 +334,6 @@ static int may_write_to_queue(struct backing_dev_info *bdi,
return 1;
if (bdi == current->backing_dev_info)
return 1;
-
- /* lumpy reclaim for hugepage often need a lot of write */
- if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
- return 1;
return 0;
}
@@ -523,8 +437,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
/* synchronous write or broken a_ops? */
ClearPageReclaim(page);
}
- trace_mm_vmscan_writepage(page,
- trace_reclaim_flags(page, sc->reclaim_mode));
+ trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
inc_zone_page_state(page, NR_VMSCAN_WRITE);
return PAGE_SUCCESS;
}
@@ -701,19 +614,15 @@ enum page_references {
};
static enum page_references page_check_references(struct page *page,
- struct mem_cgroup_zone *mz,
struct scan_control *sc)
{
int referenced_ptes, referenced_page;
unsigned long vm_flags;
- referenced_ptes = page_referenced(page, 1, mz->mem_cgroup, &vm_flags);
+ referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
+ &vm_flags);
referenced_page = TestClearPageReferenced(page);
- /* Lumpy reclaim - ignore references */
- if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
- return PAGEREF_RECLAIM;
-
/*
* Mlock lost the isolation race with us. Let try_to_unmap()
* move the page to the unevictable list.
@@ -722,7 +631,7 @@ static enum page_references page_check_references(struct page *page,
return PAGEREF_RECLAIM;
if (referenced_ptes) {
- if (PageAnon(page))
+ if (PageSwapBacked(page))
return PAGEREF_ACTIVATE;
/*
* All mapped pages start out with page table
@@ -763,9 +672,8 @@ static enum page_references page_check_references(struct page *page,
* shrink_page_list() returns the number of reclaimed pages
*/
static unsigned long shrink_page_list(struct list_head *page_list,
- struct mem_cgroup_zone *mz,
+ struct zone *zone,
struct scan_control *sc,
- int priority,
unsigned long *ret_nr_dirty,
unsigned long *ret_nr_writeback)
{
@@ -794,7 +702,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
goto keep;
VM_BUG_ON(PageActive(page));
- VM_BUG_ON(page_zone(page) != mz->zone);
+ VM_BUG_ON(page_zone(page) != zone);
sc->nr_scanned++;
@@ -813,22 +721,11 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (PageWriteback(page)) {
nr_writeback++;
- /*
- * Synchronous reclaim cannot queue pages for
- * writeback due to the possibility of stack overflow
- * but if it encounters a page under writeback, wait
- * for the IO to complete.
- */
- if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
- may_enter_fs)
- wait_on_page_writeback(page);
- else {
- unlock_page(page);
- goto keep_lumpy;
- }
+ unlock_page(page);
+ goto keep;
}
- references = page_check_references(page, mz, sc);
+ references = page_check_references(page, sc);
switch (references) {
case PAGEREF_ACTIVATE:
goto activate_locked;
@@ -879,7 +776,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* unless under significant pressure.
*/
if (page_is_file_cache(page) &&
- (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
+ (!current_is_kswapd() ||
+ sc->priority >= DEF_PRIORITY - 2)) {
/*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()
@@ -908,7 +806,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
goto activate_locked;
case PAGE_SUCCESS:
if (PageWriteback(page))
- goto keep_lumpy;
+ goto keep;
if (PageDirty(page))
goto keep;
@@ -994,7 +892,6 @@ cull_mlocked:
try_to_free_swap(page);
unlock_page(page);
putback_lru_page(page);
- reset_reclaim_mode(sc);
continue;
activate_locked:
@@ -1007,8 +904,6 @@ activate_locked:
keep_locked:
unlock_page(page);
keep:
- reset_reclaim_mode(sc);
-keep_lumpy:
list_add(&page->lru, &ret_pages);
VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
}
@@ -1020,7 +915,7 @@ keep_lumpy:
* will encounter the same problem
*/
if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
- zone_set_flag(mz->zone, ZONE_CONGESTED);
+ zone_set_flag(zone, ZONE_CONGESTED);
free_hot_cold_page_list(&free_pages, 1);
@@ -1041,34 +936,15 @@ keep_lumpy:
*
* returns 0 on success, -ve errno on failure.
*/
-int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
+int __isolate_lru_page(struct page *page, isolate_mode_t mode)
{
- bool all_lru_mode;
int ret = -EINVAL;
/* Only take pages on the LRU. */
if (!PageLRU(page))
return ret;
- all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) ==
- (ISOLATE_ACTIVE|ISOLATE_INACTIVE);
-
- /*
- * When checking the active state, we need to be sure we are
- * dealing with comparible boolean values. Take the logical not
- * of each.
- */
- if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE))
- return ret;
-
- if (!all_lru_mode && !!page_is_file_cache(page) != file)
- return ret;
-
- /*
- * When this function is being called for lumpy reclaim, we
- * initially look into all LRU pages, active, inactive and
- * unevictable; only give shrink_page_list evictable pages.
- */
+ /* Do not give back unevictable pages for compaction */
if (PageUnevictable(page))
return ret;
@@ -1135,54 +1011,39 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
* Appropriate locks must be held before calling this function.
*
* @nr_to_scan: The number of pages to look through on the list.
- * @mz: The mem_cgroup_zone to pull pages from.
+ * @lruvec: The LRU vector to pull pages from.
* @dst: The temp list to put pages on to.
* @nr_scanned: The number of pages that were scanned.
* @sc: The scan_control struct for this reclaim session
* @mode: One of the LRU isolation modes
- * @active: True [1] if isolating active pages
- * @file: True [1] if isolating file [!anon] pages
+ * @lru: LRU list id for isolating
*
* returns how many pages were moved onto *@dst.
*/
static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
- struct mem_cgroup_zone *mz, struct list_head *dst,
+ struct lruvec *lruvec, struct list_head *dst,
unsigned long *nr_scanned, struct scan_control *sc,
- isolate_mode_t mode, int active, int file)
+ isolate_mode_t mode, enum lru_list lru)
{
- struct lruvec *lruvec;
- struct list_head *src;
+ struct list_head *src = &lruvec->lists[lru];
unsigned long nr_taken = 0;
- unsigned long nr_lumpy_taken = 0;
- unsigned long nr_lumpy_dirty = 0;
- unsigned long nr_lumpy_failed = 0;
unsigned long scan;
- int lru = LRU_BASE;
-
- lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
- if (active)
- lru += LRU_ACTIVE;
- if (file)
- lru += LRU_FILE;
- src = &lruvec->lists[lru];
for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
struct page *page;
- unsigned long pfn;
- unsigned long end_pfn;
- unsigned long page_pfn;
- int zone_id;
+ int nr_pages;
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);
VM_BUG_ON(!PageLRU(page));
- switch (__isolate_lru_page(page, mode, file)) {
+ switch (__isolate_lru_page(page, mode)) {
case 0:
- mem_cgroup_lru_del(page);
+ nr_pages = hpage_nr_pages(page);
+ mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
list_move(&page->lru, dst);
- nr_taken += hpage_nr_pages(page);
+ nr_taken += nr_pages;
break;
case -EBUSY:
@@ -1193,93 +1054,11 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
default:
BUG();
}
-
- if (!sc->order || !(sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM))
- continue;
-
- /*
- * Attempt to take all pages in the order aligned region
- * surrounding the tag page. Only take those pages of
- * the same active state as that tag page. We may safely
- * round the target page pfn down to the requested order
- * as the mem_map is guaranteed valid out to MAX_ORDER,
- * where that page is in a different zone we will detect
- * it from its zone id and abort this block scan.
- */
- zone_id = page_zone_id(page);
- page_pfn = page_to_pfn(page);
- pfn = page_pfn & ~((1 << sc->order) - 1);
- end_pfn = pfn + (1 << sc->order);
- for (; pfn < end_pfn; pfn++) {
- struct page *cursor_page;
-
- /* The target page is in the block, ignore it. */
- if (unlikely(pfn == page_pfn))
- continue;
-
- /* Avoid holes within the zone. */
- if (unlikely(!pfn_valid_within(pfn)))
- break;
-
- cursor_page = pfn_to_page(pfn);
-
- /* Check that we have not crossed a zone boundary. */
- if (unlikely(page_zone_id(cursor_page) != zone_id))
- break;
-
- /*
- * If we don't have enough swap space, reclaiming of
- * anon page which don't already have a swap slot is
- * pointless.
- */
- if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) &&
- !PageSwapCache(cursor_page))
- break;
-
- if (__isolate_lru_page(cursor_page, mode, file) == 0) {
- unsigned int isolated_pages;
-
- mem_cgroup_lru_del(cursor_page);
- list_move(&cursor_page->lru, dst);
- isolated_pages = hpage_nr_pages(cursor_page);
- nr_taken += isolated_pages;
- nr_lumpy_taken += isolated_pages;
- if (PageDirty(cursor_page))
- nr_lumpy_dirty += isolated_pages;
- scan++;
- pfn += isolated_pages - 1;
- } else {
- /*
- * Check if the page is freed already.
- *
- * We can't use page_count() as that
- * requires compound_head and we don't
- * have a pin on the page here. If a
- * page is tail, we may or may not
- * have isolated the head, so assume
- * it's not free, it'd be tricky to
- * track the head status without a
- * page pin.
- */
- if (!PageTail(cursor_page) &&
- !atomic_read(&cursor_page->_count))
- continue;
- break;
- }
- }
-
- /* If we break out of the loop above, lumpy reclaim failed */
- if (pfn < end_pfn)
- nr_lumpy_failed++;
}
*nr_scanned = scan;
-
- trace_mm_vmscan_lru_isolate(sc->order,
- nr_to_scan, scan,
- nr_taken,
- nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
- mode, file);
+ trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
+ nr_taken, mode, is_file_lru(lru));
return nr_taken;
}
@@ -1316,15 +1095,16 @@ int isolate_lru_page(struct page *page)
if (PageLRU(page)) {
struct zone *zone = page_zone(page);
+ struct lruvec *lruvec;
spin_lock_irq(&zone->lru_lock);
+ lruvec = mem_cgroup_page_lruvec(page, zone);
if (PageLRU(page)) {
int lru = page_lru(page);
- ret = 0;
get_page(page);
ClearPageLRU(page);
-
- del_page_from_lru_list(zone, page, lru);
+ del_page_from_lru_list(page, lruvec, lru);
+ ret = 0;
}
spin_unlock_irq(&zone->lru_lock);
}
@@ -1357,11 +1137,10 @@ static int too_many_isolated(struct zone *zone, int file,
}
static noinline_for_stack void
-putback_inactive_pages(struct mem_cgroup_zone *mz,
- struct list_head *page_list)
+putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
{
- struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
- struct zone *zone = mz->zone;
+ struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
+ struct zone *zone = lruvec_zone(lruvec);
LIST_HEAD(pages_to_free);
/*
@@ -1379,9 +1158,13 @@ putback_inactive_pages(struct mem_cgroup_zone *mz,
spin_lock_irq(&zone->lru_lock);
continue;
}
+
+ lruvec = mem_cgroup_page_lruvec(page, zone);
+
SetPageLRU(page);
lru = page_lru(page);
- add_page_to_lru_list(zone, page, lru);
+ add_page_to_lru_list(page, lruvec, lru);
+
if (is_active_lru(lru)) {
int file = is_file_lru(lru);
int numpages = hpage_nr_pages(page);
@@ -1390,7 +1173,7 @@ putback_inactive_pages(struct mem_cgroup_zone *mz,
if (put_page_testzero(page)) {
__ClearPageLRU(page);
__ClearPageActive(page);
- del_page_from_lru_list(zone, page, lru);
+ del_page_from_lru_list(page, lruvec, lru);
if (unlikely(PageCompound(page))) {
spin_unlock_irq(&zone->lru_lock);
@@ -1407,112 +1190,24 @@ putback_inactive_pages(struct mem_cgroup_zone *mz,
list_splice(&pages_to_free, page_list);
}
-static noinline_for_stack void
-update_isolated_counts(struct mem_cgroup_zone *mz,
- struct list_head *page_list,
- unsigned long *nr_anon,
- unsigned long *nr_file)
-{
- struct zone *zone = mz->zone;
- unsigned int count[NR_LRU_LISTS] = { 0, };
- unsigned long nr_active = 0;
- struct page *page;
- int lru;
-
- /*
- * Count pages and clear active flags
- */
- list_for_each_entry(page, page_list, lru) {
- int numpages = hpage_nr_pages(page);
- lru = page_lru_base_type(page);
- if (PageActive(page)) {
- lru += LRU_ACTIVE;
- ClearPageActive(page);
- nr_active += numpages;
- }
- count[lru] += numpages;
- }
-
- preempt_disable();
- __count_vm_events(PGDEACTIVATE, nr_active);
-
- __mod_zone_page_state(zone, NR_ACTIVE_FILE,
- -count[LRU_ACTIVE_FILE]);
- __mod_zone_page_state(zone, NR_INACTIVE_FILE,
- -count[LRU_INACTIVE_FILE]);
- __mod_zone_page_state(zone, NR_ACTIVE_ANON,
- -count[LRU_ACTIVE_ANON]);
- __mod_zone_page_state(zone, NR_INACTIVE_ANON,
- -count[LRU_INACTIVE_ANON]);
-
- *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
- *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
-
- __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
- __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
- preempt_enable();
-}
-
-/*
- * Returns true if a direct reclaim should wait on pages under writeback.
- *
- * If we are direct reclaiming for contiguous pages and we do not reclaim
- * everything in the list, try again and wait for writeback IO to complete.
- * This will stall high-order allocations noticeably. Only do that when really
- * need to free the pages under high memory pressure.
- */
-static inline bool should_reclaim_stall(unsigned long nr_taken,
- unsigned long nr_freed,
- int priority,
- struct scan_control *sc)
-{
- int lumpy_stall_priority;
-
- /* kswapd should not stall on sync IO */
- if (current_is_kswapd())
- return false;
-
- /* Only stall on lumpy reclaim */
- if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
- return false;
-
- /* If we have reclaimed everything on the isolated list, no stall */
- if (nr_freed == nr_taken)
- return false;
-
- /*
- * For high-order allocations, there are two stall thresholds.
- * High-cost allocations stall immediately where as lower
- * order allocations such as stacks require the scanning
- * priority to be much higher before stalling.
- */
- if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
- lumpy_stall_priority = DEF_PRIORITY;
- else
- lumpy_stall_priority = DEF_PRIORITY / 3;
-
- return priority <= lumpy_stall_priority;
-}
-
/*
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number
* of reclaimed pages
*/
static noinline_for_stack unsigned long
-shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
- struct scan_control *sc, int priority, int file)
+shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
+ struct scan_control *sc, enum lru_list lru)
{
LIST_HEAD(page_list);
unsigned long nr_scanned;
unsigned long nr_reclaimed = 0;
unsigned long nr_taken;
- unsigned long nr_anon;
- unsigned long nr_file;
unsigned long nr_dirty = 0;
unsigned long nr_writeback = 0;
- isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
- struct zone *zone = mz->zone;
- struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
+ isolate_mode_t isolate_mode = 0;
+ int file = is_file_lru(lru);
+ struct zone *zone = lruvec_zone(lruvec);
+ struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
while (unlikely(too_many_isolated(zone, file, sc))) {
congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1522,10 +1217,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
return SWAP_CLUSTER_MAX;
}
- set_reclaim_mode(priority, sc, false);
- if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
- isolate_mode |= ISOLATE_ACTIVE;
-
lru_add_drain();
if (!sc->may_unmap)
@@ -1535,38 +1226,30 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
spin_lock_irq(&zone->lru_lock);
- nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, &nr_scanned,
- sc, isolate_mode, 0, file);
+ nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
+ &nr_scanned, sc, isolate_mode, lru);
+
+ __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
+
if (global_reclaim(sc)) {
zone->pages_scanned += nr_scanned;
if (current_is_kswapd())
- __count_zone_vm_events(PGSCAN_KSWAPD, zone,
- nr_scanned);
+ __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
else
- __count_zone_vm_events(PGSCAN_DIRECT, zone,
- nr_scanned);
+ __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
}
spin_unlock_irq(&zone->lru_lock);
if (nr_taken == 0)
return 0;
- update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
-
- nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority,
+ nr_reclaimed = shrink_page_list(&page_list, zone, sc,
&nr_dirty, &nr_writeback);
- /* Check if we should syncronously wait for writeback */
- if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
- set_reclaim_mode(priority, sc, true);
- nr_reclaimed += shrink_page_list(&page_list, mz, sc,
- priority, &nr_dirty, &nr_writeback);
- }
-
spin_lock_irq(&zone->lru_lock);
- reclaim_stat->recent_scanned[0] += nr_anon;
- reclaim_stat->recent_scanned[1] += nr_file;
+ reclaim_stat->recent_scanned[file] += nr_taken;
if (global_reclaim(sc)) {
if (current_is_kswapd())
@@ -1577,10 +1260,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
nr_reclaimed);
}
- putback_inactive_pages(mz, &page_list);
+ putback_inactive_pages(lruvec, &page_list);
- __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
- __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&zone->lru_lock);
@@ -1609,14 +1291,15 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
* DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
* isolated page is PageWriteback
*/
- if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority)))
+ if (nr_writeback && nr_writeback >=
+ (nr_taken >> (DEF_PRIORITY - sc->priority)))
wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
zone_idx(zone),
nr_scanned, nr_reclaimed,
- priority,
- trace_shrink_flags(file, sc->reclaim_mode));
+ sc->priority,
+ trace_shrink_flags(file));
return nr_reclaimed;
}
@@ -1638,30 +1321,32 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
* But we had to alter page->flags anyway.
*/
-static void move_active_pages_to_lru(struct zone *zone,
+static void move_active_pages_to_lru(struct lruvec *lruvec,
struct list_head *list,
struct list_head *pages_to_free,
enum lru_list lru)
{
+ struct zone *zone = lruvec_zone(lruvec);
unsigned long pgmoved = 0;
struct page *page;
+ int nr_pages;
while (!list_empty(list)) {
- struct lruvec *lruvec;
-
page = lru_to_page(list);
+ lruvec = mem_cgroup_page_lruvec(page, zone);
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
- lruvec = mem_cgroup_lru_add_list(zone, page, lru);
+ nr_pages = hpage_nr_pages(page);
+ mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
list_move(&page->lru, &lruvec->lists[lru]);
- pgmoved += hpage_nr_pages(page);
+ pgmoved += nr_pages;
if (put_page_testzero(page)) {
__ClearPageLRU(page);
__ClearPageActive(page);
- del_page_from_lru_list(zone, page, lru);
+ del_page_from_lru_list(page, lruvec, lru);
if (unlikely(PageCompound(page))) {
spin_unlock_irq(&zone->lru_lock);
@@ -1677,9 +1362,9 @@ static void move_active_pages_to_lru(struct zone *zone,
}
static void shrink_active_list(unsigned long nr_to_scan,
- struct mem_cgroup_zone *mz,
+ struct lruvec *lruvec,
struct scan_control *sc,
- int priority, int file)
+ enum lru_list lru)
{
unsigned long nr_taken;
unsigned long nr_scanned;
@@ -1688,15 +1373,14 @@ static void shrink_active_list(unsigned long nr_to_scan,
LIST_HEAD(l_active);
LIST_HEAD(l_inactive);
struct page *page;
- struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
+ struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
unsigned long nr_rotated = 0;
- isolate_mode_t isolate_mode = ISOLATE_ACTIVE;
- struct zone *zone = mz->zone;
+ isolate_mode_t isolate_mode = 0;
+ int file = is_file_lru(lru);
+ struct zone *zone = lruvec_zone(lruvec);
lru_add_drain();
- reset_reclaim_mode(sc);
-
if (!sc->may_unmap)
isolate_mode |= ISOLATE_UNMAPPED;
if (!sc->may_writepage)
@@ -1704,18 +1388,15 @@ static void shrink_active_list(unsigned long nr_to_scan,
spin_lock_irq(&zone->lru_lock);
- nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, &nr_scanned, sc,
- isolate_mode, 1, file);
+ nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
+ &nr_scanned, sc, isolate_mode, lru);
if (global_reclaim(sc))
zone->pages_scanned += nr_scanned;
reclaim_stat->recent_scanned[file] += nr_taken;
__count_zone_vm_events(PGREFILL, zone, nr_scanned);
- if (file)
- __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
- else
- __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
+ __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
spin_unlock_irq(&zone->lru_lock);
@@ -1737,7 +1418,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
}
}
- if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) {
+ if (page_referenced(page, 0, sc->target_mem_cgroup,
+ &vm_flags)) {
nr_rotated += hpage_nr_pages(page);
/*
* Identify referenced, file-backed active pages and
@@ -1770,10 +1452,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
*/
reclaim_stat->recent_rotated[file] += nr_rotated;
- move_active_pages_to_lru(zone, &l_active, &l_hold,
- LRU_ACTIVE + file * LRU_FILE);
- move_active_pages_to_lru(zone, &l_inactive, &l_hold,
- LRU_BASE + file * LRU_FILE);
+ move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
+ move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&zone->lru_lock);
@@ -1796,13 +1476,12 @@ static int inactive_anon_is_low_global(struct zone *zone)
/**
* inactive_anon_is_low - check if anonymous pages need to be deactivated
- * @zone: zone to check
- * @sc: scan control of this context
+ * @lruvec: LRU vector to check
*
* Returns true if the zone does not have enough inactive anon pages,
* meaning some active anon pages need to be deactivated.
*/
-static int inactive_anon_is_low(struct mem_cgroup_zone *mz)
+static int inactive_anon_is_low(struct lruvec *lruvec)
{
/*
* If we don't have swap space, anonymous page deactivation
@@ -1811,14 +1490,13 @@ static int inactive_anon_is_low(struct mem_cgroup_zone *mz)
if (!total_swap_pages)
return 0;
- if (!scanning_global_lru(mz))
- return mem_cgroup_inactive_anon_is_low(mz->mem_cgroup,
- mz->zone);
+ if (!mem_cgroup_disabled())
+ return mem_cgroup_inactive_anon_is_low(lruvec);
- return inactive_anon_is_low_global(mz->zone);
+ return inactive_anon_is_low_global(lruvec_zone(lruvec));
}
#else
-static inline int inactive_anon_is_low(struct mem_cgroup_zone *mz)
+static inline int inactive_anon_is_low(struct lruvec *lruvec)
{
return 0;
}
@@ -1836,7 +1514,7 @@ static int inactive_file_is_low_global(struct zone *zone)
/**
* inactive_file_is_low - check if file pages need to be deactivated
- * @mz: memory cgroup and zone to check
+ * @lruvec: LRU vector to check
*
* When the system is doing streaming IO, memory pressure here
* ensures that active file pages get deactivated, until more
@@ -1848,44 +1526,39 @@ static int inactive_file_is_low_global(struct zone *zone)
* This uses a different ratio than the anonymous pages, because
* the page cache uses a use-once replacement algorithm.
*/
-static int inactive_file_is_low(struct mem_cgroup_zone *mz)
+static int inactive_file_is_low(struct lruvec *lruvec)
{
- if (!scanning_global_lru(mz))
- return mem_cgroup_inactive_file_is_low(mz->mem_cgroup,
- mz->zone);
+ if (!mem_cgroup_disabled())
+ return mem_cgroup_inactive_file_is_low(lruvec);
- return inactive_file_is_low_global(mz->zone);
+ return inactive_file_is_low_global(lruvec_zone(lruvec));
}
-static int inactive_list_is_low(struct mem_cgroup_zone *mz, int file)
+static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
{
- if (file)
- return inactive_file_is_low(mz);
+ if (is_file_lru(lru))
+ return inactive_file_is_low(lruvec);
else
- return inactive_anon_is_low(mz);
+ return inactive_anon_is_low(lruvec);
}
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
- struct mem_cgroup_zone *mz,
- struct scan_control *sc, int priority)
+ struct lruvec *lruvec, struct scan_control *sc)
{
- int file = is_file_lru(lru);
-
if (is_active_lru(lru)) {
- if (inactive_list_is_low(mz, file))
- shrink_active_list(nr_to_scan, mz, sc, priority, file);
+ if (inactive_list_is_low(lruvec, lru))
+ shrink_active_list(nr_to_scan, lruvec, sc, lru);
return 0;
}
- return shrink_inactive_list(nr_to_scan, mz, sc, priority, file);
+ return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
}
-static int vmscan_swappiness(struct mem_cgroup_zone *mz,
- struct scan_control *sc)
+static int vmscan_swappiness(struct scan_control *sc)
{
if (global_reclaim(sc))
return vm_swappiness;
- return mem_cgroup_swappiness(mz->mem_cgroup);
+ return mem_cgroup_swappiness(sc->target_mem_cgroup);
}
/*
@@ -1896,17 +1569,18 @@ static int vmscan_swappiness(struct mem_cgroup_zone *mz,
*
* nr[0] = anon pages to scan; nr[1] = file pages to scan
*/
-static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
- unsigned long *nr, int priority)
+static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
+ unsigned long *nr)
{
unsigned long anon, file, free;
unsigned long anon_prio, file_prio;
unsigned long ap, fp;
- struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
+ struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
u64 fraction[2], denominator;
enum lru_list lru;
int noswap = 0;
bool force_scan = false;
+ struct zone *zone = lruvec_zone(lruvec);
/*
* If the zone or memcg is small, nr[l] can be 0. This
@@ -1918,7 +1592,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
* latencies, so it's better to scan a minimum amount there as
* well.
*/
- if (current_is_kswapd() && mz->zone->all_unreclaimable)
+ if (current_is_kswapd() && zone->all_unreclaimable)
force_scan = true;
if (!global_reclaim(sc))
force_scan = true;
@@ -1932,16 +1606,16 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
goto out;
}
- anon = zone_nr_lru_pages(mz, LRU_ACTIVE_ANON) +
- zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
- file = zone_nr_lru_pages(mz, LRU_ACTIVE_FILE) +
- zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
+ anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
+ get_lru_size(lruvec, LRU_INACTIVE_ANON);
+ file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
+ get_lru_size(lruvec, LRU_INACTIVE_FILE);
if (global_reclaim(sc)) {
- free = zone_page_state(mz->zone, NR_FREE_PAGES);
+ free = zone_page_state(zone, NR_FREE_PAGES);
/* If we have very few page cache pages,
force-scan anon pages. */
- if (unlikely(file + free <= high_wmark_pages(mz->zone))) {
+ if (unlikely(file + free <= high_wmark_pages(zone))) {
fraction[0] = 1;
fraction[1] = 0;
denominator = 1;
@@ -1953,8 +1627,8 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
* With swappiness at 100, anonymous and file have the same priority.
* This scanning priority is essentially the inverse of IO cost.
*/
- anon_prio = vmscan_swappiness(mz, sc);
- file_prio = 200 - vmscan_swappiness(mz, sc);
+ anon_prio = vmscan_swappiness(sc);
+ file_prio = 200 - anon_prio;
/*
* OK, so we have swap space and a fair amount of page cache
@@ -1967,7 +1641,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
*
* anon in [0], file in [1]
*/
- spin_lock_irq(&mz->zone->lru_lock);
+ spin_lock_irq(&zone->lru_lock);
if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
reclaim_stat->recent_scanned[0] /= 2;
reclaim_stat->recent_rotated[0] /= 2;
@@ -1983,12 +1657,12 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
* proportional to the fraction of recently scanned pages on
* each list that were recently referenced and in active use.
*/
- ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
+ ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
ap /= reclaim_stat->recent_rotated[0] + 1;
- fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
+ fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
fp /= reclaim_stat->recent_rotated[1] + 1;
- spin_unlock_irq(&mz->zone->lru_lock);
+ spin_unlock_irq(&zone->lru_lock);
fraction[0] = ap;
fraction[1] = fp;
@@ -1998,9 +1672,9 @@ out:
int file = is_file_lru(lru);
unsigned long scan;
- scan = zone_nr_lru_pages(mz, lru);
- if (priority || noswap) {
- scan >>= priority;
+ scan = get_lru_size(lruvec, lru);
+ if (sc->priority || noswap || !vmscan_swappiness(sc)) {
+ scan >>= sc->priority;
if (!scan && force_scan)
scan = SWAP_CLUSTER_MAX;
scan = div64_u64(scan * fraction[file], denominator);
@@ -2009,14 +1683,25 @@ out:
}
}
+/* Use reclaim/compaction for costly allocs or under memory pressure */
+static bool in_reclaim_compaction(struct scan_control *sc)
+{
+ if (COMPACTION_BUILD && sc->order &&
+ (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
+ sc->priority < DEF_PRIORITY - 2))
+ return true;
+
+ return false;
+}
+
/*
- * Reclaim/compaction depends on a number of pages being freed. To avoid
- * disruption to the system, a small number of order-0 pages continue to be
- * rotated and reclaimed in the normal fashion. However, by the time we get
- * back to the allocator and call try_to_compact_zone(), we ensure that
- * there are enough free pages for it to be likely successful
+ * Reclaim/compaction is used for high-order allocation requests. It reclaims
+ * order-0 pages before compacting the zone. should_continue_reclaim() returns
+ * true if more pages should be reclaimed such that when the page allocator
+ * calls try_to_compact_zone() that it will have enough free pages to succeed.
+ * It will give up earlier than that if there is difficulty reclaiming pages.
*/
-static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
+static inline bool should_continue_reclaim(struct lruvec *lruvec,
unsigned long nr_reclaimed,
unsigned long nr_scanned,
struct scan_control *sc)
@@ -2025,7 +1710,7 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
unsigned long inactive_lru_pages;
/* If not in reclaim/compaction mode, stop */
- if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
+ if (!in_reclaim_compaction(sc))
return false;
/* Consider stopping depending on scan and reclaim activity */
@@ -2056,15 +1741,15 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
* inactive lists are large enough, continue reclaiming
*/
pages_for_compaction = (2UL << sc->order);
- inactive_lru_pages = zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
+ inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
if (nr_swap_pages > 0)
- inactive_lru_pages += zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
+ inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);
if (sc->nr_reclaimed < pages_for_compaction &&
inactive_lru_pages > pages_for_compaction)
return true;
/* If compaction would go ahead or the allocation would succeed, stop */
- switch (compaction_suitable(mz->zone, sc->order)) {
+ switch (compaction_suitable(lruvec_zone(lruvec), sc->order)) {
case COMPACT_PARTIAL:
case COMPACT_CONTINUE:
return false;
@@ -2076,8 +1761,7 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
/*
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
*/
-static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz,
- struct scan_control *sc)
+static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
unsigned long nr[NR_LRU_LISTS];
unsigned long nr_to_scan;
@@ -2089,7 +1773,7 @@ static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz,
restart:
nr_reclaimed = 0;
nr_scanned = sc->nr_scanned;
- get_scan_count(mz, sc, nr, priority);
+ get_scan_count(lruvec, sc, nr);
blk_start_plug(&plug);
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -2101,7 +1785,7 @@ restart:
nr[lru] -= nr_to_scan;
nr_reclaimed += shrink_list(lru, nr_to_scan,
- mz, sc, priority);
+ lruvec, sc);
}
}
/*
@@ -2112,7 +1796,8 @@ restart:
* with multiple processes reclaiming pages, the total
* freeing target can get unreasonably large.
*/
- if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
+ if (nr_reclaimed >= nr_to_reclaim &&
+ sc->priority < DEF_PRIORITY)
break;
}
blk_finish_plug(&plug);
@@ -2122,35 +1807,33 @@ restart:
* Even if we did not try to evict anon pages at all, we want to
* rebalance the anon lru active/inactive ratio.
*/
- if (inactive_anon_is_low(mz))
- shrink_active_list(SWAP_CLUSTER_MAX, mz, sc, priority, 0);
+ if (inactive_anon_is_low(lruvec))
+ shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
+ sc, LRU_ACTIVE_ANON);
/* reclaim/compaction might need reclaim to continue */
- if (should_continue_reclaim(mz, nr_reclaimed,
- sc->nr_scanned - nr_scanned, sc))
+ if (should_continue_reclaim(lruvec, nr_reclaimed,
+ sc->nr_scanned - nr_scanned, sc))
goto restart;
throttle_vm_writeout(sc->gfp_mask);
}
-static void shrink_zone(int priority, struct zone *zone,
- struct scan_control *sc)
+static void shrink_zone(struct zone *zone, struct scan_control *sc)
{
struct mem_cgroup *root = sc->target_mem_cgroup;
struct mem_cgroup_reclaim_cookie reclaim = {
.zone = zone,
- .priority = priority,
+ .priority = sc->priority,
};
struct mem_cgroup *memcg;
memcg = mem_cgroup_iter(root, NULL, &reclaim);
do {
- struct mem_cgroup_zone mz = {
- .mem_cgroup = memcg,
- .zone = zone,
- };
+ struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
+
+ shrink_lruvec(lruvec, sc);
- shrink_mem_cgroup_zone(priority, &mz, sc);
/*
* Limit reclaim has historically picked one memcg and
* scanned it with decreasing priority levels until
@@ -2226,8 +1909,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
* the caller that it should consider retrying the allocation instead of
* further reclaim.
*/
-static bool shrink_zones(int priority, struct zonelist *zonelist,
- struct scan_control *sc)
+static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
@@ -2254,7 +1936,8 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
if (global_reclaim(sc)) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
- if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable &&
+ sc->priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
if (COMPACTION_BUILD) {
/*
@@ -2286,7 +1969,7 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
/* need some check for avoid more shrink_zone() */
}
- shrink_zone(priority, zone, sc);
+ shrink_zone(zone, sc);
}
return aborted_reclaim;
@@ -2337,7 +2020,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
struct scan_control *sc,
struct shrink_control *shrink)
{
- int priority;
unsigned long total_scanned = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
struct zoneref *z;
@@ -2350,11 +2032,9 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
if (global_reclaim(sc))
count_vm_event(ALLOCSTALL);
- for (priority = DEF_PRIORITY; priority >= 0; priority--) {
+ do {
sc->nr_scanned = 0;
- if (!priority)
- disable_swap_token(sc->target_mem_cgroup);
- aborted_reclaim = shrink_zones(priority, zonelist, sc);
+ aborted_reclaim = shrink_zones(zonelist, sc);
/*
* Don't shrink slabs when reclaiming memory from
@@ -2396,7 +2076,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
/* Take a nap, wait for some writeback to complete */
if (!sc->hibernation_mode && sc->nr_scanned &&
- priority < DEF_PRIORITY - 2) {
+ sc->priority < DEF_PRIORITY - 2) {
struct zone *preferred_zone;
first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
@@ -2404,7 +2084,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
&preferred_zone);
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
}
- }
+ } while (--sc->priority >= 0);
out:
delayacct_freepages_end();
@@ -2442,6 +2122,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
.may_unmap = 1,
.may_swap = 1,
.order = order,
+ .priority = DEF_PRIORITY,
.target_mem_cgroup = NULL,
.nodemask = nodemask,
};
@@ -2474,17 +2155,15 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
.may_unmap = 1,
.may_swap = !noswap,
.order = 0,
+ .priority = 0,
.target_mem_cgroup = memcg,
};
- struct mem_cgroup_zone mz = {
- .mem_cgroup = memcg,
- .zone = zone,
- };
+ struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
- trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
+ trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
sc.may_writepage,
sc.gfp_mask);
@@ -2495,7 +2174,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
* will pick up pages from other mem cgroup's as well. We hack
* the priority and make it zero.
*/
- shrink_mem_cgroup_zone(0, &mz, &sc);
+ shrink_lruvec(lruvec, &sc);
trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
@@ -2516,6 +2195,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
.may_swap = !noswap,
.nr_to_reclaim = SWAP_CLUSTER_MAX,
.order = 0,
+ .priority = DEF_PRIORITY,
.target_mem_cgroup = memcg,
.nodemask = NULL, /* we don't care the placement */
.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
@@ -2546,8 +2226,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
}
#endif
-static void age_active_anon(struct zone *zone, struct scan_control *sc,
- int priority)
+static void age_active_anon(struct zone *zone, struct scan_control *sc)
{
struct mem_cgroup *memcg;
@@ -2556,14 +2235,11 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc,
memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
- struct mem_cgroup_zone mz = {
- .mem_cgroup = memcg,
- .zone = zone,
- };
+ struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
- if (inactive_anon_is_low(&mz))
- shrink_active_list(SWAP_CLUSTER_MAX, &mz,
- sc, priority, 0);
+ if (inactive_anon_is_low(lruvec))
+ shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
+ sc, LRU_ACTIVE_ANON);
memcg = mem_cgroup_iter(NULL, memcg, NULL);
} while (memcg);
@@ -2672,7 +2348,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
{
int all_zones_ok;
unsigned long balanced;
- int priority;
int i;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
unsigned long total_scanned;
@@ -2696,18 +2371,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
};
loop_again:
total_scanned = 0;
+ sc.priority = DEF_PRIORITY;
sc.nr_reclaimed = 0;
sc.may_writepage = !laptop_mode;
count_vm_event(PAGEOUTRUN);
- for (priority = DEF_PRIORITY; priority >= 0; priority--) {
+ do {
unsigned long lru_pages = 0;
int has_under_min_watermark_zone = 0;
- /* The swap token gets in the way of swapout... */
- if (!priority)
- disable_swap_token(NULL);
-
all_zones_ok = 1;
balanced = 0;
@@ -2721,14 +2393,15 @@ loop_again:
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable &&
+ sc.priority != DEF_PRIORITY)
continue;
/*
* Do some background aging of the anon list, to give
* pages a chance to be referenced before reclaiming.
*/
- age_active_anon(zone, &sc, priority);
+ age_active_anon(zone, &sc);
/*
* If the number of buffer_heads in the machine
@@ -2776,7 +2449,8 @@ loop_again:
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable &&
+ sc.priority != DEF_PRIORITY)
continue;
sc.nr_scanned = 0;
@@ -2820,7 +2494,7 @@ loop_again:
!zone_watermark_ok_safe(zone, testorder,
high_wmark_pages(zone) + balance_gap,
end_zone, 0)) {
- shrink_zone(priority, zone, &sc);
+ shrink_zone(zone, &sc);
reclaim_state->reclaimed_slab = 0;
nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
@@ -2877,7 +2551,7 @@ loop_again:
* OK, kswapd is getting into trouble. Take a nap, then take
* another pass across the zones.
*/
- if (total_scanned && (priority < DEF_PRIORITY - 2)) {
+ if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) {
if (has_under_min_watermark_zone)
count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
else
@@ -2892,7 +2566,7 @@ loop_again:
*/
if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
break;
- }
+ } while (--sc.priority >= 0);
out:
/*
@@ -2942,7 +2616,8 @@ out:
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable &&
+ sc.priority != DEF_PRIORITY)
continue;
/* Would compaction fail due to lack of free memory? */
@@ -3209,6 +2884,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
.nr_to_reclaim = nr_to_reclaim,
.hibernation_mode = 1,
.order = 0,
+ .priority = DEF_PRIORITY,
};
struct shrink_control shrink = {
.gfp_mask = sc.gfp_mask,
@@ -3386,7 +3062,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
const unsigned long nr_pages = 1 << order;
struct task_struct *p = current;
struct reclaim_state reclaim_state;
- int priority;
struct scan_control sc = {
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
@@ -3395,6 +3070,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,
.order = order,
+ .priority = ZONE_RECLAIM_PRIORITY,
};
struct shrink_control shrink = {
.gfp_mask = sc.gfp_mask,
@@ -3417,11 +3093,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
* Free memory by calling shrink zone with increasing
* priorities until we have enough memory freed.
*/
- priority = ZONE_RECLAIM_PRIORITY;
do {
- shrink_zone(priority, zone, &sc);
- priority--;
- } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
+ shrink_zone(zone, &sc);
+ } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
}
nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
@@ -3536,7 +3210,7 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
if (mapping_unevictable(page_mapping(page)))
return 0;
- if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
+ if (PageMlocked(page) || (vma && mlocked_vma_newpage(vma, page)))
return 0;
return 1;
@@ -3572,6 +3246,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
zone = pagezone;
spin_lock_irq(&zone->lru_lock);
}
+ lruvec = mem_cgroup_page_lruvec(page, zone);
if (!PageLRU(page) || !PageUnevictable(page))
continue;
@@ -3581,11 +3256,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
VM_BUG_ON(PageActive(page));
ClearPageUnevictable(page);
- __dec_zone_state(zone, NR_UNEVICTABLE);
- lruvec = mem_cgroup_lru_move_lists(zone, page,
- LRU_UNEVICTABLE, lru);
- list_move(&page->lru, &lruvec->lists[lru]);
- __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
+ del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
+ add_page_to_lru_list(page, lruvec, lru);
pgrescued++;
}
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 7db1b9bab492..1bbbbd9776ad 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -613,6 +613,9 @@ static char * const migratetype_names[MIGRATE_TYPES] = {
"Reclaimable",
"Movable",
"Reserve",
+#ifdef CONFIG_CMA
+ "CMA",
+#endif
"Isolate",
};
@@ -1220,7 +1223,6 @@ module_init(setup_vmstat)
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
#include <linux/debugfs.h>
-static struct dentry *extfrag_debug_root;
/*
* Return an index indicating how much of the available free memory is
@@ -1358,19 +1360,24 @@ static const struct file_operations extfrag_file_ops = {
static int __init extfrag_debug_init(void)
{
+ struct dentry *extfrag_debug_root;
+
extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
if (!extfrag_debug_root)
return -ENOMEM;
if (!debugfs_create_file("unusable_index", 0444,
extfrag_debug_root, NULL, &unusable_file_ops))
- return -ENOMEM;
+ goto fail;
if (!debugfs_create_file("extfrag_index", 0444,
extfrag_debug_root, NULL, &extfrag_file_ops))
- return -ENOMEM;
+ goto fail;
return 0;
+fail:
+ debugfs_remove_recursive(extfrag_debug_root);
+ return -ENOMEM;
}
module_init(extfrag_debug_init);
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 6fb68a9743af..46e7f86acfc9 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
}
if (sk->sk_state == BT_CONNECTED || !newsock ||
- bt_sk(parent)->defer_setup) {
+ test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) {
bt_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
@@ -410,8 +410,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
if (sk->sk_state == BT_CONNECTED ||
- (bt_sk(parent)->defer_setup &&
- sk->sk_state == BT_CONNECT2))
+ (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
+ sk->sk_state == BT_CONNECT2))
return POLLIN | POLLRDNORM;
}
@@ -450,7 +450,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wa
sk->sk_state == BT_CONFIG)
return mask;
- if (!bt_sk(sk)->suspended && sock_writeable(sk))
+ if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 88884d1d95fd..031d7d656754 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -340,7 +340,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
}
/* Strip 802.1p header */
- if (ntohs(s->eh.h_proto) == 0x8100) {
+ if (ntohs(s->eh.h_proto) == ETH_P_8021Q) {
if (!skb_pull(skb, 4))
goto badframe;
s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 5238b6b3ea6a..3f18a6ed9731 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -223,36 +223,6 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
}
EXPORT_SYMBOL(hci_le_start_enc);
-void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
-{
- struct hci_dev *hdev = conn->hdev;
- struct hci_cp_le_ltk_reply cp;
-
- BT_DBG("%p", conn);
-
- memset(&cp, 0, sizeof(cp));
-
- cp.handle = cpu_to_le16(conn->handle);
- memcpy(cp.ltk, ltk, sizeof(ltk));
-
- hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
-}
-EXPORT_SYMBOL(hci_le_ltk_reply);
-
-void hci_le_ltk_neg_reply(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
- struct hci_cp_le_ltk_neg_reply cp;
-
- BT_DBG("%p", conn);
-
- memset(&cp, 0, sizeof(cp));
-
- cp.handle = cpu_to_le16(conn->handle);
-
- hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
-}
-
/* Device _must_ be locked */
void hci_sco_setup(struct hci_conn *conn, __u8 status)
{
@@ -513,7 +483,8 @@ EXPORT_SYMBOL(hci_get_route);
/* Create SCO, ACL or LE connection.
* Device _must_ be locked */
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ __u8 dst_type, __u8 sec_level, __u8 auth_type)
{
struct hci_conn *acl;
struct hci_conn *sco;
@@ -522,23 +493,18 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
BT_DBG("%s dst %s", hdev->name, batostr(dst));
if (type == LE_LINK) {
- struct adv_entry *entry;
-
le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
- if (le)
- return ERR_PTR(-EBUSY);
-
- entry = hci_find_adv_entry(hdev, dst);
- if (!entry)
- return ERR_PTR(-EHOSTUNREACH);
+ if (!le) {
+ le = hci_conn_add(hdev, LE_LINK, dst);
+ if (!le)
+ return ERR_PTR(-ENOMEM);
- le = hci_conn_add(hdev, LE_LINK, dst);
- if (!le)
- return ERR_PTR(-ENOMEM);
-
- le->dst_type = entry->bdaddr_type;
+ le->dst_type = bdaddr_to_le(dst_type);
+ hci_le_connect(le);
+ }
- hci_le_connect(le);
+ le->pending_sec_level = sec_level;
+ le->auth_type = auth_type;
hci_conn_hold(le);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index d6dc44cd15b0..411ace8e647b 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -83,6 +83,7 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
*/
if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
+ u16 opcode = __le16_to_cpu(sent->opcode);
struct sk_buff *skb;
/* Some CSR based controllers generate a spontaneous
@@ -92,7 +93,7 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
* command.
*/
- if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
+ if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
return;
skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
@@ -251,6 +252,9 @@ static void amp_init(struct hci_dev *hdev)
/* Read Local Version */
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+
+ /* Read Local AMP Info */
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
}
static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -384,7 +388,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
case DISCOVERY_STOPPED:
if (hdev->discovery.state != DISCOVERY_STARTING)
mgmt_discovering(hdev, 0);
- hdev->discovery.type = 0;
break;
case DISCOVERY_STARTING:
break;
@@ -1089,32 +1092,6 @@ static const struct rfkill_ops hci_rfkill_ops = {
.set_block = hci_rfkill_set_block,
};
-/* Alloc HCI device */
-struct hci_dev *hci_alloc_dev(void)
-{
- struct hci_dev *hdev;
-
- hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
- if (!hdev)
- return NULL;
-
- hci_init_sysfs(hdev);
- skb_queue_head_init(&hdev->driver_init);
-
- return hdev;
-}
-EXPORT_SYMBOL(hci_alloc_dev);
-
-/* Free HCI device */
-void hci_free_dev(struct hci_dev *hdev)
-{
- skb_queue_purge(&hdev->driver_init);
-
- /* will free via device release */
- put_device(&hdev->dev);
-}
-EXPORT_SYMBOL(hci_free_dev);
-
static void hci_power_on(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
@@ -1336,7 +1313,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
}
int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
- int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
+ int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
ediv, u8 rand[8])
{
struct smp_ltk *key, *old_key;
@@ -1544,75 +1521,6 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
return mgmt_device_unblocked(hdev, bdaddr, type);
}
-static void hci_clear_adv_cache(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- adv_work.work);
-
- hci_dev_lock(hdev);
-
- hci_adv_entries_clear(hdev);
-
- hci_dev_unlock(hdev);
-}
-
-int hci_adv_entries_clear(struct hci_dev *hdev)
-{
- struct adv_entry *entry, *tmp;
-
- list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
- list_del(&entry->list);
- kfree(entry);
- }
-
- BT_DBG("%s adv cache cleared", hdev->name);
-
- return 0;
-}
-
-struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
-{
- struct adv_entry *entry;
-
- list_for_each_entry(entry, &hdev->adv_entries, list)
- if (bacmp(bdaddr, &entry->bdaddr) == 0)
- return entry;
-
- return NULL;
-}
-
-static inline int is_connectable_adv(u8 evt_type)
-{
- if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
- return 1;
-
- return 0;
-}
-
-int hci_add_adv_entry(struct hci_dev *hdev,
- struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
- return -EINVAL;
-
- /* Only new entries should be added to adv_entries. So, if
- * bdaddr was found, don't add it. */
- if (hci_find_adv_entry(hdev, &ev->bdaddr))
- return 0;
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- bacpy(&entry->bdaddr, &ev->bdaddr);
- entry->bdaddr_type = ev->bdaddr_type;
-
- list_add(&entry->list, &hdev->adv_entries);
-
- BT_DBG("%s adv entry added: address %s type %u", hdev->name,
- batostr(&entry->bdaddr), entry->bdaddr_type);
-
- return 0;
-}
-
static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
{
struct le_scan_params *param = (struct le_scan_params *) opt;
@@ -1670,6 +1578,24 @@ static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
return 0;
}
+int hci_cancel_le_scan(struct hci_dev *hdev)
+{
+ BT_DBG("%s", hdev->name);
+
+ if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ return -EALREADY;
+
+ if (cancel_delayed_work(&hdev->le_scan_disable)) {
+ struct hci_cp_le_set_scan_enable cp;
+
+ /* Send HCI command to disable LE Scan */
+ memset(&cp, 0, sizeof(cp));
+ hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+ }
+
+ return 0;
+}
+
static void le_scan_disable_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
@@ -1714,95 +1640,103 @@ int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
return 0;
}
-/* Register HCI device */
-int hci_register_dev(struct hci_dev *hdev)
+/* Alloc HCI device */
+struct hci_dev *hci_alloc_dev(void)
{
- struct list_head *head = &hci_dev_list, *p;
- int i, id, error;
-
- BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
-
- if (!hdev->open || !hdev->close)
- return -EINVAL;
-
- /* Do not allow HCI_AMP devices to register at index 0,
- * so the index can be used as the AMP controller ID.
- */
- id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
-
- write_lock(&hci_dev_list_lock);
-
- /* Find first available device id */
- list_for_each(p, &hci_dev_list) {
- if (list_entry(p, struct hci_dev, list)->id != id)
- break;
- head = p; id++;
- }
-
- sprintf(hdev->name, "hci%d", id);
- hdev->id = id;
- list_add_tail(&hdev->list, head);
+ struct hci_dev *hdev;
- mutex_init(&hdev->lock);
+ hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
+ if (!hdev)
+ return NULL;
- hdev->flags = 0;
- hdev->dev_flags = 0;
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
hdev->io_capability = 0x03; /* No Input No Output */
- hdev->idle_timeout = 0;
hdev->sniff_max_interval = 800;
hdev->sniff_min_interval = 80;
+ mutex_init(&hdev->lock);
+ mutex_init(&hdev->req_lock);
+
+ INIT_LIST_HEAD(&hdev->mgmt_pending);
+ INIT_LIST_HEAD(&hdev->blacklist);
+ INIT_LIST_HEAD(&hdev->uuids);
+ INIT_LIST_HEAD(&hdev->link_keys);
+ INIT_LIST_HEAD(&hdev->long_term_keys);
+ INIT_LIST_HEAD(&hdev->remote_oob_data);
+
INIT_WORK(&hdev->rx_work, hci_rx_work);
INIT_WORK(&hdev->cmd_work, hci_cmd_work);
INIT_WORK(&hdev->tx_work, hci_tx_work);
+ INIT_WORK(&hdev->power_on, hci_power_on);
+ INIT_WORK(&hdev->le_scan, le_scan_work);
+ INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
+ INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
+ INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
+ skb_queue_head_init(&hdev->driver_init);
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
skb_queue_head_init(&hdev->raw_q);
- setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
-
- for (i = 0; i < NUM_REASSEMBLY; i++)
- hdev->reassembly[i] = NULL;
-
init_waitqueue_head(&hdev->req_wait_q);
- mutex_init(&hdev->req_lock);
- discovery_init(hdev);
+ setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
+ hci_init_sysfs(hdev);
+ discovery_init(hdev);
hci_conn_hash_init(hdev);
- INIT_LIST_HEAD(&hdev->mgmt_pending);
-
- INIT_LIST_HEAD(&hdev->blacklist);
+ return hdev;
+}
+EXPORT_SYMBOL(hci_alloc_dev);
- INIT_LIST_HEAD(&hdev->uuids);
+/* Free HCI device */
+void hci_free_dev(struct hci_dev *hdev)
+{
+ skb_queue_purge(&hdev->driver_init);
- INIT_LIST_HEAD(&hdev->link_keys);
- INIT_LIST_HEAD(&hdev->long_term_keys);
+ /* will free via device release */
+ put_device(&hdev->dev);
+}
+EXPORT_SYMBOL(hci_free_dev);
- INIT_LIST_HEAD(&hdev->remote_oob_data);
+/* Register HCI device */
+int hci_register_dev(struct hci_dev *hdev)
+{
+ struct list_head *head, *p;
+ int id, error;
- INIT_LIST_HEAD(&hdev->adv_entries);
+ if (!hdev->open || !hdev->close)
+ return -EINVAL;
- INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
- INIT_WORK(&hdev->power_on, hci_power_on);
- INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
+ write_lock(&hci_dev_list_lock);
- INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
+ /* Do not allow HCI_AMP devices to register at index 0,
+ * so the index can be used as the AMP controller ID.
+ */
+ id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
+ head = &hci_dev_list;
- memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
+ /* Find first available device id */
+ list_for_each(p, &hci_dev_list) {
+ int nid = list_entry(p, struct hci_dev, list)->id;
+ if (nid > id)
+ break;
+ if (nid == id)
+ id++;
+ head = p;
+ }
- atomic_set(&hdev->promisc, 0);
+ sprintf(hdev->name, "hci%d", id);
+ hdev->id = id;
- INIT_WORK(&hdev->le_scan, le_scan_work);
+ BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
+ list_add(&hdev->list, head);
write_unlock(&hci_dev_list_lock);
@@ -1884,8 +1818,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_del_sysfs(hdev);
- cancel_delayed_work_sync(&hdev->adv_work);
-
destroy_workqueue(hdev->workqueue);
hci_dev_lock(hdev);
@@ -1894,7 +1826,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_link_keys_clear(hdev);
hci_smp_ltks_clear(hdev);
hci_remote_oob_data_clear(hdev);
- hci_adv_entries_clear(hdev);
hci_dev_unlock(hdev);
hci_dev_put(hdev);
@@ -2231,6 +2162,12 @@ static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
+ skb->len = skb_headlen(skb);
+ skb->data_len = 0;
+
+ bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
+ hci_add_acl_hdr(skb, conn->handle, flags);
+
list = skb_shinfo(skb)->frag_list;
if (!list) {
/* Non fragmented */
@@ -2274,8 +2211,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
skb->dev = (void *) hdev;
- bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags);
hci_queue_acl(conn, &chan->data_q, skb, flags);
@@ -2313,7 +2248,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *conn = NULL, *c;
- int num = 0, min = ~0;
+ unsigned int num = 0, min = ~0;
/* We don't have to lock device here. Connections are always
* added and removed with TX task disabled. */
@@ -2394,7 +2329,7 @@ static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_chan *chan = NULL;
- int num = 0, min = ~0, cur_prio = 0;
+ unsigned int num = 0, min = ~0, cur_prio = 0;
struct hci_conn *conn;
int cnt, q, conn_num = 0;
@@ -2945,7 +2880,19 @@ int hci_cancel_inquiry(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
if (!test_bit(HCI_INQUIRY, &hdev->flags))
- return -EPERM;
+ return -EALREADY;
return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
}
+
+u8 bdaddr_to_le(u8 bdaddr_type)
+{
+ switch (bdaddr_type) {
+ case BDADDR_LE_PUBLIC:
+ return ADDR_LE_DEV_PUBLIC;
+
+ default:
+ /* Fallback to LE Random address type */
+ return ADDR_LE_DEV_RANDOM;
+ }
+}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 1266f78fa8e3..4eefb7f65cf6 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -69,6 +69,18 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
hci_conn_check_pending(hdev);
}
+static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ if (status)
+ return;
+
+ set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
+}
+
static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@@ -78,6 +90,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
if (status)
return;
+ clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
+
hci_conn_check_pending(hdev);
}
@@ -192,7 +206,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
hci_req_complete(hdev, HCI_OP_RESET, status);
/* Reset all non-persistent flags */
- hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
+ hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
+ BIT(HCI_PERIODIC_INQ));
hdev->discovery.state = DISCOVERY_STOPPED;
}
@@ -505,7 +520,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
events[5] |= 0x10; /* Synchronous Connection Changed */
if (hdev->features[3] & LMP_RSSI_INQ)
- events[4] |= 0x04; /* Inquiry Result with RSSI */
+ events[4] |= 0x02; /* Inquiry Result with RSSI */
if (hdev->features[5] & LMP_SNIFF_SUBR)
events[5] |= 0x20; /* Sniff Subrating */
@@ -615,6 +630,7 @@ done:
static void hci_setup_link_policy(struct hci_dev *hdev)
{
+ struct hci_cp_write_def_link_policy cp;
u16 link_policy = 0;
if (hdev->features[0] & LMP_RSWITCH)
@@ -626,9 +642,8 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
if (hdev->features[1] & LMP_PARK)
link_policy |= HCI_LP_PARK;
- link_policy = cpu_to_le16(link_policy);
- hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
- &link_policy);
+ cp.policy = cpu_to_le16(link_policy);
+ hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
}
static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -710,7 +725,7 @@ static void hci_set_le_support(struct hci_dev *hdev)
memset(&cp, 0, sizeof(cp));
- if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
cp.le = 1;
cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
}
@@ -887,11 +902,14 @@ static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (!rp->status)
+ hdev->inq_tx_power = rp->tx_power;
- hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
+ hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
}
static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1082,23 +1100,23 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
set_bit(HCI_LE_SCAN, &hdev->dev_flags);
- cancel_delayed_work_sync(&hdev->adv_work);
-
hci_dev_lock(hdev);
- hci_adv_entries_clear(hdev);
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
hci_dev_unlock(hdev);
break;
case LE_SCANNING_DISABLED:
- if (status)
+ if (status) {
+ hci_dev_lock(hdev);
+ mgmt_stop_discovery_failed(hdev, status);
+ hci_dev_unlock(hdev);
return;
+ }
clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
- schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
-
- if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
+ if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
+ hdev->discovery.state == DISCOVERY_FINDING) {
mgmt_interleaved_discovery(hdev);
} else {
hci_dev_lock(hdev);
@@ -1625,6 +1643,8 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
if (status) {
if (conn && conn->state == BT_CONNECT) {
conn->state = BT_CLOSED;
+ mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
+ conn->dst_type, status);
hci_proto_connect_cfm(conn, status);
hci_conn_del(conn);
}
@@ -1699,6 +1719,9 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
@@ -2040,7 +2063,7 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
if (ev->status && conn->state == BT_CONNECTED) {
- hci_acl_disconn(conn, 0x13);
+ hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
hci_conn_put(conn);
goto unlock;
}
@@ -2154,6 +2177,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_inquiry_cancel(hdev, skb);
break;
+ case HCI_OP_PERIODIC_INQ:
+ hci_cc_periodic_inq(hdev, skb);
+ break;
+
case HCI_OP_EXIT_PERIODIC_INQ:
hci_cc_exit_periodic_inq(hdev, skb);
break;
@@ -2806,6 +2833,9 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
@@ -2971,12 +3001,16 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
struct inquiry_data data;
struct extended_inquiry_info *info = (void *) (skb->data + 1);
int num_rsp = *((__u8 *) skb->data);
+ size_t eir_len;
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
@@ -3000,9 +3034,10 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
name_known = hci_inquiry_cache_update(hdev, &data, name_known,
&ssp);
+ eir_len = eir_get_length(info->data, sizeof(info->data));
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi, !name_known,
- ssp, info->data, sizeof(info->data));
+ ssp, info->data, eir_len);
}
hci_dev_unlock(hdev);
@@ -3322,8 +3357,6 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
while (num_reports--) {
struct hci_ev_le_advertising_info *ev = ptr;
- hci_add_adv_entry(hdev, ev);
-
rssi = ev->data[ev->length];
mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
NULL, rssi, 0, 1, ev->data, ev->length);
@@ -3343,7 +3376,7 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
struct hci_conn *conn;
struct smp_ltk *ltk;
- BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
+ BT_DBG("%s handle %d", hdev->name, __le16_to_cpu(ev->handle));
hci_dev_lock(hdev);
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index bc154298979a..937f3187eafa 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -444,8 +444,8 @@ static const struct file_operations blacklist_fops = {
static void print_bt_uuid(struct seq_file *f, u8 *uuid)
{
- u32 data0, data4;
- u16 data1, data2, data3, data5;
+ __be32 data0, data4;
+ __be16 data1, data2, data3, data5;
memcpy(&data0, &uuid[0], 4);
memcpy(&data1, &uuid[4], 2);
@@ -533,7 +533,6 @@ int hci_add_sysfs(struct hci_dev *hdev)
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- dev->parent = hdev->parent;
dev_set_name(dev, "%s", hdev->name);
err = device_add(dev);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 6f9c25b633a6..24f144b72a96 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -4,6 +4,7 @@
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Copyright (C) 2010 Google Inc.
Copyright (C) 2011 ProFUSION Embedded Systems
+ Copyright (c) 2012 Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -70,7 +71,7 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
void *data);
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
static void l2cap_send_disconn_req(struct l2cap_conn *conn,
- struct l2cap_chan *chan, int err);
+ struct l2cap_chan *chan, int err);
/* ---- L2CAP channels ---- */
@@ -97,13 +98,15 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16
}
/* Find channel with given SCID.
- * Returns locked socket */
+ * Returns locked channel. */
static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
{
struct l2cap_chan *c;
mutex_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_scid(conn, cid);
+ if (c)
+ l2cap_chan_lock(c);
mutex_unlock(&conn->chan_lock);
return c;
@@ -120,17 +123,6 @@ static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8
return NULL;
}
-static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
-{
- struct l2cap_chan *c;
-
- mutex_lock(&conn->chan_lock);
- c = __l2cap_get_chan_by_ident(conn, ident);
- mutex_unlock(&conn->chan_lock);
-
- return c;
-}
-
static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
{
struct l2cap_chan *c;
@@ -232,6 +224,124 @@ static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
release_sock(sk);
}
+/* ---- L2CAP sequence number lists ---- */
+
+/* For ERTM, ordered lists of sequence numbers must be tracked for
+ * SREJ requests that are received and for frames that are to be
+ * retransmitted. These seq_list functions implement a singly-linked
+ * list in an array, where membership in the list can also be checked
+ * in constant time. Items can also be added to the tail of the list
+ * and removed from the head in constant time, without further memory
+ * allocs or frees.
+ */
+
+static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
+{
+ size_t alloc_size, i;
+
+ /* Allocated size is a power of 2 to map sequence numbers
+ * (which may be up to 14 bits) in to a smaller array that is
+ * sized for the negotiated ERTM transmit windows.
+ */
+ alloc_size = roundup_pow_of_two(size);
+
+ seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
+ if (!seq_list->list)
+ return -ENOMEM;
+
+ seq_list->mask = alloc_size - 1;
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+ for (i = 0; i < alloc_size; i++)
+ seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
+
+ return 0;
+}
+
+static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
+{
+ kfree(seq_list->list);
+}
+
+static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
+ u16 seq)
+{
+ /* Constant-time check for list membership */
+ return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
+}
+
+static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
+{
+ u16 mask = seq_list->mask;
+
+ if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
+ /* In case someone tries to pop the head of an empty list */
+ return L2CAP_SEQ_LIST_CLEAR;
+ } else if (seq_list->head == seq) {
+ /* Head can be removed in constant time */
+ seq_list->head = seq_list->list[seq & mask];
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
+
+ if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+ }
+ } else {
+ /* Walk the list to find the sequence number */
+ u16 prev = seq_list->head;
+ while (seq_list->list[prev & mask] != seq) {
+ prev = seq_list->list[prev & mask];
+ if (prev == L2CAP_SEQ_LIST_TAIL)
+ return L2CAP_SEQ_LIST_CLEAR;
+ }
+
+ /* Unlink the number from the list and clear it */
+ seq_list->list[prev & mask] = seq_list->list[seq & mask];
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
+ if (seq_list->tail == seq)
+ seq_list->tail = prev;
+ }
+ return seq;
+}
+
+static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
+{
+ /* Remove the head in constant time */
+ return l2cap_seq_list_remove(seq_list, seq_list->head);
+}
+
+static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
+{
+ u16 i;
+
+ if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
+ return;
+
+ for (i = 0; i <= seq_list->mask; i++)
+ seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
+
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+}
+
+static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
+{
+ u16 mask = seq_list->mask;
+
+ /* All appends happen in constant time */
+
+ if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
+ return;
+
+ if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
+ seq_list->head = seq;
+ else
+ seq_list->list[seq_list->tail & mask] = seq;
+
+ seq_list->tail = seq;
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
+}
+
static void l2cap_chan_timeout(struct work_struct *work)
{
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
@@ -262,7 +372,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
l2cap_chan_put(chan);
}
-struct l2cap_chan *l2cap_chan_create(struct sock *sk)
+struct l2cap_chan *l2cap_chan_create(void)
{
struct l2cap_chan *chan;
@@ -272,8 +382,6 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
mutex_init(&chan->lock);
- chan->sk = sk;
-
write_lock(&chan_list_lock);
list_add(&chan->global_l, &chan_list);
write_unlock(&chan_list_lock);
@@ -284,7 +392,7 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
atomic_set(&chan->refcnt, 1);
- BT_DBG("sk %p chan %p", sk, chan);
+ BT_DBG("chan %p", chan);
return chan;
}
@@ -298,10 +406,21 @@ void l2cap_chan_destroy(struct l2cap_chan *chan)
l2cap_chan_put(chan);
}
-void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+void l2cap_chan_set_defaults(struct l2cap_chan *chan)
+{
+ chan->fcs = L2CAP_FCS_CRC16;
+ chan->max_tx = L2CAP_DEFAULT_MAX_TX;
+ chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+ chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
+ chan->sec_level = BT_SECURITY_LOW;
+
+ set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+}
+
+static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
- chan->psm, chan->dcid);
+ __le16_to_cpu(chan->psm), chan->dcid);
conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
@@ -347,7 +466,7 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
list_add(&chan->list, &conn->chan_l);
}
-void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
mutex_lock(&conn->chan_lock);
__l2cap_chan_add(conn, chan);
@@ -405,6 +524,8 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
skb_queue_purge(&chan->srej_q);
+ l2cap_seq_list_free(&chan->srej_list);
+ l2cap_seq_list_free(&chan->retrans_list);
list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
list_del(&l->list);
kfree(l);
@@ -453,7 +574,6 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
case BT_CONFIG:
if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
conn->hcon->type == ACL_LINK) {
- __clear_chan_timer(chan);
__set_chan_timer(chan, sk->sk_sndtimeo);
l2cap_send_disconn_req(conn, chan, reason);
} else
@@ -466,7 +586,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
struct l2cap_conn_rsp rsp;
__u16 result;
- if (bt_sk(sk)->defer_setup)
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
result = L2CAP_CR_SEC_BLOCK;
else
result = L2CAP_CR_BAD_PSM;
@@ -599,6 +719,117 @@ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
hci_send_acl(chan->conn->hchan, skb, flags);
}
+static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
+{
+ control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
+ control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
+
+ if (enh & L2CAP_CTRL_FRAME_TYPE) {
+ /* S-Frame */
+ control->sframe = 1;
+ control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
+ control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
+
+ control->sar = 0;
+ control->txseq = 0;
+ } else {
+ /* I-Frame */
+ control->sframe = 0;
+ control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
+ control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
+
+ control->poll = 0;
+ control->super = 0;
+ }
+}
+
+static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
+{
+ control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+ control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
+
+ if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
+ /* S-Frame */
+ control->sframe = 1;
+ control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
+ control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
+
+ control->sar = 0;
+ control->txseq = 0;
+ } else {
+ /* I-Frame */
+ control->sframe = 0;
+ control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
+ control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+
+ control->poll = 0;
+ control->super = 0;
+ }
+}
+
+static inline void __unpack_control(struct l2cap_chan *chan,
+ struct sk_buff *skb)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
+ __unpack_extended_control(get_unaligned_le32(skb->data),
+ &bt_cb(skb)->control);
+ } else {
+ __unpack_enhanced_control(get_unaligned_le16(skb->data),
+ &bt_cb(skb)->control);
+ }
+}
+
+static u32 __pack_extended_control(struct l2cap_ctrl *control)
+{
+ u32 packed;
+
+ packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+ packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
+
+ if (control->sframe) {
+ packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
+ packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
+ packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
+ } else {
+ packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
+ packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+ }
+
+ return packed;
+}
+
+static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
+{
+ u16 packed;
+
+ packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
+ packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
+
+ if (control->sframe) {
+ packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
+ packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
+ packed |= L2CAP_CTRL_FRAME_TYPE;
+ } else {
+ packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
+ packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
+ }
+
+ return packed;
+}
+
+static inline void __pack_control(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
+ put_unaligned_le32(__pack_extended_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ } else {
+ put_unaligned_le16(__pack_enhanced_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ }
+}
+
static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
{
struct sk_buff *skb;
@@ -681,10 +912,38 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan)
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
}
+static void l2cap_chan_ready(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->sk;
+ struct sock *parent;
+
+ lock_sock(sk);
+
+ parent = bt_sk(sk)->parent;
+
+ BT_DBG("sk %p, parent %p", sk, parent);
+
+ chan->conf_state = 0;
+ __clear_chan_timer(chan);
+
+ __l2cap_state_change(chan, BT_CONNECTED);
+ sk->sk_state_change(sk);
+
+ if (parent)
+ parent->sk_data_ready(parent, 0);
+
+ release_sock(sk);
+}
+
static void l2cap_do_start(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
+ if (conn->hcon->type == LE_LINK) {
+ l2cap_chan_ready(chan);
+ return;
+ }
+
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
return;
@@ -791,7 +1050,8 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
if (l2cap_chan_check_security(chan)) {
lock_sock(sk);
- if (bt_sk(sk)->defer_setup) {
+ if (test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(sk)->flags)) {
struct sock *parent = bt_sk(sk)->parent;
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
@@ -830,10 +1090,12 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
mutex_unlock(&conn->chan_lock);
}
-/* Find socket with cid and source bdaddr.
+/* Find socket with cid and source/destination bdaddr.
* Returns closest match, locked.
*/
-static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
+static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
+ bdaddr_t *src,
+ bdaddr_t *dst)
{
struct l2cap_chan *c, *c1 = NULL;
@@ -846,14 +1108,22 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdadd
continue;
if (c->scid == cid) {
+ int src_match, dst_match;
+ int src_any, dst_any;
+
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src)) {
+ src_match = !bacmp(&bt_sk(sk)->src, src);
+ dst_match = !bacmp(&bt_sk(sk)->dst, dst);
+ if (src_match && dst_match) {
read_unlock(&chan_list_lock);
return c;
}
/* Closest match */
- if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+ src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
+ dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
+ if ((src_match && dst_any) || (src_any && dst_match) ||
+ (src_any && dst_any))
c1 = c;
}
}
@@ -872,7 +1142,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
/* Check if we have socket listening on cid */
pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
- conn->src);
+ conn->src, conn->dst);
if (!pchan)
return;
@@ -910,29 +1180,6 @@ clean:
release_sock(parent);
}
-static void l2cap_chan_ready(struct l2cap_chan *chan)
-{
- struct sock *sk = chan->sk;
- struct sock *parent;
-
- lock_sock(sk);
-
- parent = bt_sk(sk)->parent;
-
- BT_DBG("sk %p, parent %p", sk, parent);
-
- chan->conf_state = 0;
- __clear_chan_timer(chan);
-
- __l2cap_state_change(chan, BT_CONNECTED);
- sk->sk_state_change(sk);
-
- if (parent)
- parent->sk_data_ready(parent, 0);
-
- release_sock(sk);
-}
-
static void l2cap_conn_ready(struct l2cap_conn *conn)
{
struct l2cap_chan *chan;
@@ -1016,6 +1263,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
/* Kill channels */
list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
+ l2cap_chan_hold(chan);
l2cap_chan_lock(chan);
l2cap_chan_del(chan, err);
@@ -1023,6 +1271,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
l2cap_chan_unlock(chan);
chan->ops->close(chan->data);
+ l2cap_chan_put(chan);
}
mutex_unlock(&conn->chan_lock);
@@ -1100,10 +1349,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
/* ---- Socket interface ---- */
-/* Find socket with psm and source bdaddr.
+/* Find socket with psm and source / destination bdaddr.
* Returns closest match.
*/
-static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
+static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
+ bdaddr_t *src,
+ bdaddr_t *dst)
{
struct l2cap_chan *c, *c1 = NULL;
@@ -1116,14 +1367,22 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
continue;
if (c->psm == psm) {
+ int src_match, dst_match;
+ int src_any, dst_any;
+
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src)) {
+ src_match = !bacmp(&bt_sk(sk)->src, src);
+ dst_match = !bacmp(&bt_sk(sk)->dst, dst);
+ if (src_match && dst_match) {
read_unlock(&chan_list_lock);
return c;
}
/* Closest match */
- if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+ src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
+ dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
+ if ((src_match && dst_any) || (src_any && dst_match) ||
+ (src_any && dst_any))
c1 = c;
}
}
@@ -1133,7 +1392,8 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
return c1;
}
-int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
+int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ bdaddr_t *dst, u8 dst_type)
{
struct sock *sk = chan->sk;
bdaddr_t *src = &bt_sk(sk)->src;
@@ -1143,8 +1403,8 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
__u8 auth_type;
int err;
- BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
- chan->psm);
+ BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
+ dst_type, __le16_to_cpu(chan->psm));
hdev = hci_get_route(dst, src);
if (!hdev)
@@ -1218,11 +1478,11 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
auth_type = l2cap_get_auth_type(chan);
if (chan->dcid == L2CAP_CID_LE_DATA)
- hcon = hci_connect(hdev, LE_LINK, dst,
- chan->sec_level, auth_type);
+ hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
+ chan->sec_level, auth_type);
else
- hcon = hci_connect(hdev, ACL_LINK, dst,
- chan->sec_level, auth_type);
+ hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
+ chan->sec_level, auth_type);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
@@ -1236,6 +1496,18 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
goto done;
}
+ if (hcon->type == LE_LINK) {
+ err = 0;
+
+ if (!list_empty(&conn->chan_l)) {
+ err = -EBUSY;
+ hci_conn_put(hcon);
+ }
+
+ if (err)
+ goto done;
+ }
+
/* Update source addr of the socket */
bacpy(src, conn->src);
@@ -1346,7 +1618,7 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
while ((skb = skb_peek(&chan->tx_q)) &&
chan->unacked_frames) {
- if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
+ if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
break;
skb = skb_dequeue(&chan->tx_q);
@@ -1368,6 +1640,7 @@ static void l2cap_streaming_send(struct l2cap_chan *chan)
while ((skb = skb_dequeue(&chan->tx_q))) {
control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
control |= __set_txseq(chan, chan->next_tx_seq);
+ control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
__put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
if (chan->fcs == L2CAP_FCS_CRC16) {
@@ -1393,21 +1666,21 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
if (!skb)
return;
- while (bt_cb(skb)->tx_seq != tx_seq) {
+ while (bt_cb(skb)->control.txseq != tx_seq) {
if (skb_queue_is_last(&chan->tx_q, skb))
return;
skb = skb_queue_next(&chan->tx_q, skb);
}
- if (chan->remote_max_tx &&
- bt_cb(skb)->retries == chan->remote_max_tx) {
+ if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
+ chan->remote_max_tx) {
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
return;
}
tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->retries++;
+ bt_cb(skb)->control.retries++;
control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
control &= __get_sar_mask(chan);
@@ -1440,17 +1713,20 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
if (chan->state != BT_CONNECTED)
return -ENOTCONN;
+ if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
+ return 0;
+
while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
- if (chan->remote_max_tx &&
- bt_cb(skb)->retries == chan->remote_max_tx) {
+ if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
+ chan->remote_max_tx) {
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
break;
}
tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->retries++;
+ bt_cb(skb)->control.retries++;
control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
control &= __get_sar_mask(chan);
@@ -1460,6 +1736,7 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
control |= __set_reqseq(chan, chan->buffer_seq);
control |= __set_txseq(chan, chan->next_tx_seq);
+ control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
__put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
@@ -1474,11 +1751,11 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
__set_retrans_timer(chan);
- bt_cb(skb)->tx_seq = chan->next_tx_seq;
+ bt_cb(skb)->control.txseq = chan->next_tx_seq;
chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
- if (bt_cb(skb)->retries == 1) {
+ if (bt_cb(skb)->control.retries == 1) {
chan->unacked_frames++;
if (!nsent++)
@@ -1554,7 +1831,7 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff **frag;
- int err, sent = 0;
+ int sent = 0;
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
return -EFAULT;
@@ -1565,14 +1842,17 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
/* Continuation fragments (no L2CAP header) */
frag = &skb_shinfo(skb)->frag_list;
while (len) {
+ struct sk_buff *tmp;
+
count = min_t(unsigned int, conn->mtu, len);
- *frag = chan->ops->alloc_skb(chan, count,
- msg->msg_flags & MSG_DONTWAIT,
- &err);
+ tmp = chan->ops->alloc_skb(chan, count,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ *frag = tmp;
- if (!*frag)
- return err;
if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
return -EFAULT;
@@ -1581,6 +1861,9 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
sent += count;
len -= count;
+ skb->len += (*frag)->len;
+ skb->data_len += (*frag)->len;
+
frag = &(*frag)->next;
}
@@ -1601,18 +1884,17 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
count = min_t(unsigned int, (conn->mtu - hlen), len);
skb = chan->ops->alloc_skb(chan, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
-
- if (!skb)
- return ERR_PTR(err);
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
skb->priority = priority;
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
- lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- put_unaligned_le16(chan->psm, skb_put(skb, 2));
+ lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
+ put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
if (unlikely(err < 0)) {
@@ -1628,25 +1910,24 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE;
+ int err, count;
struct l2cap_hdr *lh;
BT_DBG("chan %p len %d", chan, (int)len);
- count = min_t(unsigned int, (conn->mtu - hlen), len);
+ count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
- skb = chan->ops->alloc_skb(chan, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
-
- if (!skb)
- return ERR_PTR(err);
+ skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
skb->priority = priority;
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
- lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+ lh->len = cpu_to_le16(len);
err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
if (unlikely(err < 0)) {
@@ -1658,7 +1939,7 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
struct msghdr *msg, size_t len,
- u32 control, u16 sdulen)
+ u16 sdulen)
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
@@ -1684,17 +1965,16 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
count = min_t(unsigned int, (conn->mtu - hlen), len);
skb = chan->ops->alloc_skb(chan, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
-
- if (!skb)
- return ERR_PTR(err);
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
+ __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
if (sdulen)
put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
@@ -1708,61 +1988,82 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
if (chan->fcs == L2CAP_FCS_CRC16)
put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
- bt_cb(skb)->retries = 0;
+ bt_cb(skb)->control.retries = 0;
return skb;
}
-static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static int l2cap_segment_sdu(struct l2cap_chan *chan,
+ struct sk_buff_head *seg_queue,
+ struct msghdr *msg, size_t len)
{
struct sk_buff *skb;
- struct sk_buff_head sar_queue;
- u32 control;
- size_t size = 0;
+ u16 sdu_len;
+ size_t pdu_len;
+ int err = 0;
+ u8 sar;
- skb_queue_head_init(&sar_queue);
- control = __set_ctrl_sar(chan, L2CAP_SAR_START);
- skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
- __skb_queue_tail(&sar_queue, skb);
- len -= chan->remote_mps;
- size += chan->remote_mps;
+ /* It is critical that ERTM PDUs fit in a single HCI fragment,
+ * so fragmented skbs are not used. The HCI layer's handling
+ * of fragmented skbs is not compatible with ERTM's queueing.
+ */
- while (len > 0) {
- size_t buflen;
+ /* PDU size is derived from the HCI MTU */
+ pdu_len = chan->conn->mtu;
- if (len > chan->remote_mps) {
- control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
- buflen = chan->remote_mps;
- } else {
- control = __set_ctrl_sar(chan, L2CAP_SAR_END);
- buflen = len;
- }
+ pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
+
+ /* Adjust for largest possible L2CAP overhead. */
+ pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
+
+ /* Remote device may have requested smaller PDUs */
+ pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
+
+ if (len <= pdu_len) {
+ sar = L2CAP_SAR_UNSEGMENTED;
+ sdu_len = 0;
+ pdu_len = len;
+ } else {
+ sar = L2CAP_SAR_START;
+ sdu_len = len;
+ pdu_len -= L2CAP_SDULEN_SIZE;
+ }
+
+ while (len > 0) {
+ skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
- skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
if (IS_ERR(skb)) {
- skb_queue_purge(&sar_queue);
+ __skb_queue_purge(seg_queue);
return PTR_ERR(skb);
}
- __skb_queue_tail(&sar_queue, skb);
- len -= buflen;
- size += buflen;
+ bt_cb(skb)->control.sar = sar;
+ __skb_queue_tail(seg_queue, skb);
+
+ len -= pdu_len;
+ if (sdu_len) {
+ sdu_len = 0;
+ pdu_len += L2CAP_SDULEN_SIZE;
+ }
+
+ if (len <= pdu_len) {
+ sar = L2CAP_SAR_END;
+ pdu_len = len;
+ } else {
+ sar = L2CAP_SAR_CONTINUE;
+ }
}
- skb_queue_splice_tail(&sar_queue, &chan->tx_q);
- if (chan->tx_send_head == NULL)
- chan->tx_send_head = sar_queue.next;
- return size;
+ return err;
}
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
u32 priority)
{
struct sk_buff *skb;
- u32 control;
int err;
+ struct sk_buff_head seg_queue;
/* Connectionless channel */
if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
@@ -1791,42 +2092,47 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
case L2CAP_MODE_ERTM:
case L2CAP_MODE_STREAMING:
- /* Entire SDU fits into one PDU */
- if (len <= chan->remote_mps) {
- control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
- skb = l2cap_create_iframe_pdu(chan, msg, len, control,
- 0);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ /* Check outgoing MTU */
+ if (len > chan->omtu) {
+ err = -EMSGSIZE;
+ break;
+ }
- __skb_queue_tail(&chan->tx_q, skb);
+ __skb_queue_head_init(&seg_queue);
- if (chan->tx_send_head == NULL)
- chan->tx_send_head = skb;
+ /* Do segmentation before calling in to the state machine,
+ * since it's possible to block while waiting for memory
+ * allocation.
+ */
+ err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
- } else {
- /* Segment SDU into multiples PDUs */
- err = l2cap_sar_segment_sdu(chan, msg, len);
- if (err < 0)
- return err;
+ /* The channel could have been closed while segmenting,
+ * check that it is still connected.
+ */
+ if (chan->state != BT_CONNECTED) {
+ __skb_queue_purge(&seg_queue);
+ err = -ENOTCONN;
}
- if (chan->mode == L2CAP_MODE_STREAMING) {
- l2cap_streaming_send(chan);
- err = len;
+ if (err)
break;
- }
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
- test_bit(CONN_WAIT_F, &chan->conn_state)) {
- err = len;
- break;
- }
+ if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
+ chan->tx_send_head = seg_queue.next;
+ skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
+
+ if (chan->mode == L2CAP_MODE_ERTM)
+ err = l2cap_ertm_send(chan);
+ else
+ l2cap_streaming_send(chan);
- err = l2cap_ertm_send(chan);
if (err >= 0)
err = len;
+ /* If the skbs were not queued for sending, they'll still be in
+ * seg_queue and need to be purged.
+ */
+ __skb_queue_purge(&seg_queue);
break;
default:
@@ -2040,13 +2346,29 @@ static void l2cap_ack_timeout(struct work_struct *work)
l2cap_chan_put(chan);
}
-static inline void l2cap_ertm_init(struct l2cap_chan *chan)
+static inline int l2cap_ertm_init(struct l2cap_chan *chan)
{
+ int err;
+
+ chan->next_tx_seq = 0;
+ chan->expected_tx_seq = 0;
chan->expected_ack_seq = 0;
chan->unacked_frames = 0;
chan->buffer_seq = 0;
chan->num_acked = 0;
chan->frames_sent = 0;
+ chan->last_acked_seq = 0;
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+
+ skb_queue_head_init(&chan->tx_q);
+
+ if (chan->mode != L2CAP_MODE_ERTM)
+ return 0;
+
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+ chan->tx_state = L2CAP_TX_STATE_XMIT;
INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
@@ -2055,6 +2377,11 @@ static inline void l2cap_ertm_init(struct l2cap_chan *chan)
skb_queue_head_init(&chan->srej_q);
INIT_LIST_HEAD(&chan->srej_l);
+ err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
+ if (err < 0)
+ return err;
+
+ return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
}
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2378,9 +2705,9 @@ done:
chan->remote_mps = size;
rfc.retrans_timeout =
- le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
+ __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
rfc.monitor_timeout =
- le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
+ __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
set_bit(CONF_MODE_DONE, &chan->conf_state);
@@ -2644,10 +2971,10 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
u16 dcid = 0, scid = __le16_to_cpu(req->scid);
__le16 psm = req->psm;
- BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
+ BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
/* Check if we have socket listening on psm */
- pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
+ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
if (!pchan) {
result = L2CAP_CR_BAD_PSM;
goto sendresp;
@@ -2706,7 +3033,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
if (l2cap_chan_check_security(chan)) {
- if (bt_sk(sk)->defer_setup) {
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
__l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHOR_PEND;
@@ -2848,7 +3175,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
u16 dcid, flags;
u8 rsp[64];
struct l2cap_chan *chan;
- int len;
+ int len, err = 0;
dcid = __le16_to_cpu(req->dcid);
flags = __le16_to_cpu(req->flags);
@@ -2859,8 +3186,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
if (!chan)
return -ENOENT;
- l2cap_chan_lock(chan);
-
if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
struct l2cap_cmd_rej_cid rej;
@@ -2915,13 +3240,15 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
l2cap_state_change(chan, BT_CONNECTED);
- chan->next_tx_seq = 0;
- chan->expected_tx_seq = 0;
- skb_queue_head_init(&chan->tx_q);
- if (chan->mode == L2CAP_MODE_ERTM)
- l2cap_ertm_init(chan);
+ if (chan->mode == L2CAP_MODE_ERTM ||
+ chan->mode == L2CAP_MODE_STREAMING)
+ err = l2cap_ertm_init(chan);
+
+ if (err < 0)
+ l2cap_send_disconn_req(chan->conn, chan, -err);
+ else
+ l2cap_chan_ready(chan);
- l2cap_chan_ready(chan);
goto unlock;
}
@@ -2949,7 +3276,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
unlock:
l2cap_chan_unlock(chan);
- return 0;
+ return err;
}
static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
@@ -2957,21 +3284,20 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
u16 scid, flags, result;
struct l2cap_chan *chan;
- int len = cmd->len - sizeof(*rsp);
+ int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
+ int err = 0;
scid = __le16_to_cpu(rsp->scid);
flags = __le16_to_cpu(rsp->flags);
result = __le16_to_cpu(rsp->result);
- BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
- scid, flags, result);
+ BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
+ result, len);
chan = l2cap_get_chan_by_scid(conn, scid);
if (!chan)
return 0;
- l2cap_chan_lock(chan);
-
switch (result) {
case L2CAP_CONF_SUCCESS:
l2cap_conf_rfc_get(chan, rsp->data, len);
@@ -3045,18 +3371,19 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
set_default_fcs(chan);
l2cap_state_change(chan, BT_CONNECTED);
- chan->next_tx_seq = 0;
- chan->expected_tx_seq = 0;
- skb_queue_head_init(&chan->tx_q);
- if (chan->mode == L2CAP_MODE_ERTM)
- l2cap_ertm_init(chan);
+ if (chan->mode == L2CAP_MODE_ERTM ||
+ chan->mode == L2CAP_MODE_STREAMING)
+ err = l2cap_ertm_init(chan);
- l2cap_chan_ready(chan);
+ if (err < 0)
+ l2cap_send_disconn_req(chan->conn, chan, -err);
+ else
+ l2cap_chan_ready(chan);
}
done:
l2cap_chan_unlock(chan);
- return 0;
+ return err;
}
static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
@@ -3092,11 +3419,13 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
sk->sk_shutdown = SHUTDOWN_MASK;
release_sock(sk);
+ l2cap_chan_hold(chan);
l2cap_chan_del(chan, ECONNRESET);
l2cap_chan_unlock(chan);
chan->ops->close(chan->data);
+ l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
@@ -3124,11 +3453,13 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
l2cap_chan_lock(chan);
+ l2cap_chan_hold(chan);
l2cap_chan_del(chan, 0);
l2cap_chan_unlock(chan);
chan->ops->close(chan->data);
+ l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
@@ -3265,8 +3596,8 @@ static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
/* Placeholder: Always reject */
rsp.dcid = 0;
rsp.scid = cpu_to_le16(scid);
- rsp.result = L2CAP_CR_NO_MEM;
- rsp.status = L2CAP_CS_NO_INFO;
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
sizeof(rsp), &rsp);
@@ -3665,19 +3996,19 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
struct sk_buff *next_skb;
int tx_seq_offset, next_tx_seq_offset;
- bt_cb(skb)->tx_seq = tx_seq;
- bt_cb(skb)->sar = sar;
+ bt_cb(skb)->control.txseq = tx_seq;
+ bt_cb(skb)->control.sar = sar;
next_skb = skb_peek(&chan->srej_q);
tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
while (next_skb) {
- if (bt_cb(next_skb)->tx_seq == tx_seq)
+ if (bt_cb(next_skb)->control.txseq == tx_seq)
return -EINVAL;
next_tx_seq_offset = __seq_offset(chan,
- bt_cb(next_skb)->tx_seq, chan->buffer_seq);
+ bt_cb(next_skb)->control.txseq, chan->buffer_seq);
if (next_tx_seq_offset > tx_seq_offset) {
__skb_queue_before(&chan->srej_q, next_skb, skb);
@@ -3800,6 +4131,7 @@ static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
BT_DBG("chan %p, Enter local busy", chan);
set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+ l2cap_seq_list_clear(&chan->srej_list);
__set_ack_timer(chan);
}
@@ -3848,11 +4180,11 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
int err;
- if (bt_cb(skb)->tx_seq != tx_seq)
+ if (bt_cb(skb)->control.txseq != tx_seq)
break;
skb = skb_dequeue(&chan->srej_q);
- control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
+ control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
err = l2cap_reassemble_sdu(chan, skb, control);
if (err < 0) {
@@ -3892,6 +4224,7 @@ static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
while (tx_seq != chan->expected_tx_seq) {
control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
control |= __set_reqseq(chan, chan->expected_tx_seq);
+ l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
l2cap_send_sframe(chan, control);
new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
@@ -4022,8 +4355,8 @@ expected:
chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- bt_cb(skb)->tx_seq = tx_seq;
- bt_cb(skb)->sar = sar;
+ bt_cb(skb)->control.txseq = tx_seq;
+ bt_cb(skb)->control.sar = sar;
__skb_queue_tail(&chan->srej_q, skb);
return 0;
}
@@ -4220,6 +4553,8 @@ static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
u16 req_seq;
int len, next_tx_seq_offset, req_seq_offset;
+ __unpack_control(chan, skb);
+
control = __get_control(chan, skb->data);
skb_pull(skb, __ctrl_size(chan));
len = skb->len;
@@ -4295,8 +4630,6 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
return 0;
}
- l2cap_chan_lock(chan);
-
BT_DBG("chan %p, len %d", chan, skb->len);
if (chan->state != BT_CONNECTED)
@@ -4375,7 +4708,7 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
{
struct l2cap_chan *chan;
- chan = l2cap_global_chan_by_psm(0, psm, conn->src);
+ chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
if (!chan)
goto drop;
@@ -4396,11 +4729,12 @@ drop:
return 0;
}
-static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
+static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
+ struct sk_buff *skb)
{
struct l2cap_chan *chan;
- chan = l2cap_global_chan_by_scid(0, cid, conn->src);
+ chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
if (!chan)
goto drop;
@@ -4445,7 +4779,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
break;
case L2CAP_CID_CONN_LESS:
- psm = get_unaligned_le16(skb->data);
+ psm = get_unaligned((__le16 *) skb->data);
skb_pull(skb, 2);
l2cap_conless_channel(conn, psm, skb);
break;
@@ -4540,7 +4874,6 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
if (encrypt == 0x00) {
if (chan->sec_level == BT_SECURITY_MEDIUM) {
- __clear_chan_timer(chan);
__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
} else if (chan->sec_level == BT_SECURITY_HIGH)
l2cap_chan_close(chan, ECONNREFUSED);
@@ -4561,7 +4894,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
BT_DBG("conn %p", conn);
if (hcon->type == LE_LINK) {
- smp_distribute_keys(conn, 0);
+ if (!status && encrypt)
+ smp_distribute_keys(conn, 0);
cancel_delayed_work(&conn->security_timer);
}
@@ -4591,7 +4925,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
chan->state == BT_CONFIG)) {
struct sock *sk = chan->sk;
- bt_sk(sk)->suspended = false;
+ clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
sk->sk_state_change(sk);
l2cap_check_encryption(chan, encrypt);
@@ -4603,7 +4937,6 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
if (!status) {
l2cap_send_conn_req(chan);
} else {
- __clear_chan_timer(chan);
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
}
} else if (chan->state == BT_CONNECT2) {
@@ -4614,7 +4947,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
lock_sock(sk);
if (!status) {
- if (bt_sk(sk)->defer_setup) {
+ if (test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(sk)->flags)) {
struct sock *parent = bt_sk(sk)->parent;
res = L2CAP_CR_PEND;
stat = L2CAP_CS_AUTHOR_PEND;
@@ -4664,8 +4998,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
if (!(flags & ACL_CONT)) {
struct l2cap_hdr *hdr;
- struct l2cap_chan *chan;
- u16 cid;
int len;
if (conn->rx_len) {
@@ -4685,7 +5017,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
hdr = (struct l2cap_hdr *) skb->data;
len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
- cid = __le16_to_cpu(hdr->cid);
if (len == skb->len) {
/* Complete frame received */
@@ -4702,23 +5033,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
goto drop;
}
- chan = l2cap_get_chan_by_scid(conn, cid);
-
- if (chan && chan->sk) {
- struct sock *sk = chan->sk;
- lock_sock(sk);
-
- if (chan->imtu < len - L2CAP_HDR_SIZE) {
- BT_ERR("Frame exceeding recv MTU (len %d, "
- "MTU %d)", len,
- chan->imtu);
- release_sock(sk);
- l2cap_conn_unreliable(conn, ECOMM);
- goto drop;
- }
- release_sock(sk);
- }
-
/* Allocate skb for the complete frame (with header) */
conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
if (!conn->rx_skb)
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 04e7c172d49c..3bb1611b9d48 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -124,7 +124,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
return -EINVAL;
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
- &la.l2_bdaddr);
+ &la.l2_bdaddr, la.l2_bdaddr_type);
if (err)
return err;
@@ -148,12 +148,16 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
lock_sock(sk);
- if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
- || sk->sk_state != BT_BOUND) {
+ if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) {
+ err = -EINVAL;
+ goto done;
+ }
+
switch (chan->mode) {
case L2CAP_MODE_BASIC:
break;
@@ -320,8 +324,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
case L2CAP_CONNINFO:
if (sk->sk_state != BT_CONNECTED &&
- !(sk->sk_state == BT_CONNECT2 &&
- bt_sk(sk)->defer_setup)) {
+ !(sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
err = -ENOTCONN;
break;
}
@@ -375,7 +379,10 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
}
memset(&sec, 0, sizeof(sec));
- sec.level = chan->sec_level;
+ if (chan->conn)
+ sec.level = chan->conn->hcon->sec_level;
+ else
+ sec.level = chan->sec_level;
if (sk->sk_state == BT_CONNECTED)
sec.key_size = chan->conn->hcon->enc_key_size;
@@ -392,7 +399,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
break;
}
- if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
+ if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
+ (u32 __user *) optval))
err = -EFAULT;
break;
@@ -594,10 +602,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
/* or for ACL link */
} else if ((sk->sk_state == BT_CONNECT2 &&
- bt_sk(sk)->defer_setup) ||
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) ||
sk->sk_state == BT_CONNECTED) {
if (!l2cap_chan_check_security(chan))
- bt_sk(sk)->suspended = true;
+ set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
else
sk->sk_state_change(sk);
} else {
@@ -616,7 +624,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
break;
}
- bt_sk(sk)->defer_setup = opt;
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ else
+ clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
case BT_FLUSHABLE:
@@ -716,16 +727,13 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
- lock_sock(sk);
-
- if (sk->sk_state != BT_CONNECTED) {
- release_sock(sk);
+ if (sk->sk_state != BT_CONNECTED)
return -ENOTCONN;
- }
+ l2cap_chan_lock(chan);
err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
+ l2cap_chan_unlock(chan);
- release_sock(sk);
return err;
}
@@ -737,7 +745,8 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
lock_sock(sk);
- if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
+ if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(sk)->flags)) {
sk->sk_state = BT_CONFIG;
pi->chan->state = BT_CONFIG;
@@ -931,12 +940,19 @@ static void l2cap_sock_state_change_cb(void *data, int state)
}
static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
- unsigned long len, int nb,
- int *err)
+ unsigned long len, int nb)
{
- struct sock *sk = chan->sk;
+ struct sk_buff *skb;
+ int err;
+
+ l2cap_chan_unlock(chan);
+ skb = bt_skb_send_alloc(chan->sk, len, nb, &err);
+ l2cap_chan_lock(chan);
+
+ if (!skb)
+ return ERR_PTR(err);
- return bt_skb_send_alloc(sk, len, nb, err);
+ return skb;
}
static struct l2cap_ops l2cap_chan_ops = {
@@ -952,6 +968,7 @@ static void l2cap_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
+ l2cap_chan_put(l2cap_pi(sk)->chan);
if (l2cap_pi(sk)->rx_busy_skb) {
kfree_skb(l2cap_pi(sk)->rx_busy_skb);
l2cap_pi(sk)->rx_busy_skb = NULL;
@@ -972,7 +989,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
sk->sk_type = parent->sk_type;
- bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
+ bt_sk(sk)->flags = bt_sk(parent)->flags;
chan->chan_type = pchan->chan_type;
chan->imtu = pchan->imtu;
@@ -1010,13 +1027,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
} else {
chan->mode = L2CAP_MODE_BASIC;
}
- chan->max_tx = L2CAP_DEFAULT_MAX_TX;
- chan->fcs = L2CAP_FCS_CRC16;
- chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
- chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
- chan->sec_level = BT_SECURITY_LOW;
- chan->flags = 0;
- set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+
+ l2cap_chan_set_defaults(chan);
}
/* Default config options */
@@ -1052,12 +1064,16 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
- chan = l2cap_chan_create(sk);
+ chan = l2cap_chan_create();
if (!chan) {
l2cap_sock_kill(sk);
return NULL;
}
+ l2cap_chan_hold(chan);
+
+ chan->sk = sk;
+
l2cap_pi(sk)->chan = chan;
return sk;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 4bb03b111122..25d220776079 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -35,10 +35,9 @@
#include <net/bluetooth/smp.h>
bool enable_hs;
-bool enable_le;
#define MGMT_VERSION 1
-#define MGMT_REVISION 0
+#define MGMT_REVISION 1
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@@ -78,6 +77,7 @@ static const u16 mgmt_commands[] = {
MGMT_OP_CONFIRM_NAME,
MGMT_OP_BLOCK_DEVICE,
MGMT_OP_UNBLOCK_DEVICE,
+ MGMT_OP_SET_DEVICE_ID,
};
static const u16 mgmt_events[] = {
@@ -224,7 +224,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
ev = (void *) skb_put(skb, sizeof(*ev));
ev->status = status;
- put_unaligned_le16(cmd, &ev->opcode);
+ ev->opcode = cpu_to_le16(cmd);
err = sock_queue_rcv_skb(sk, skb);
if (err < 0)
@@ -254,7 +254,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
- put_unaligned_le16(cmd, &ev->opcode);
+ ev->opcode = cpu_to_le16(cmd);
ev->status = status;
if (rp)
@@ -275,7 +275,7 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("sock %p", sk);
rp.version = MGMT_VERSION;
- put_unaligned_le16(MGMT_REVISION, &rp.revision);
+ rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
sizeof(rp));
@@ -285,9 +285,9 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
struct mgmt_rp_read_commands *rp;
- u16 num_commands = ARRAY_SIZE(mgmt_commands);
- u16 num_events = ARRAY_SIZE(mgmt_events);
- u16 *opcode;
+ const u16 num_commands = ARRAY_SIZE(mgmt_commands);
+ const u16 num_events = ARRAY_SIZE(mgmt_events);
+ __le16 *opcode;
size_t rp_size;
int i, err;
@@ -299,8 +299,8 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
if (!rp)
return -ENOMEM;
- put_unaligned_le16(num_commands, &rp->num_commands);
- put_unaligned_le16(num_events, &rp->num_events);
+ rp->num_commands = __constant_cpu_to_le16(num_commands);
+ rp->num_events = __constant_cpu_to_le16(num_events);
for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
put_unaligned_le16(mgmt_commands[i], opcode);
@@ -341,14 +341,14 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
return -ENOMEM;
}
- put_unaligned_le16(count, &rp->num_controllers);
+ rp->num_controllers = cpu_to_le16(count);
i = 0;
list_for_each_entry(d, &hci_dev_list, list) {
if (test_bit(HCI_SETUP, &d->dev_flags))
continue;
- put_unaligned_le16(d->id, &rp->index[i++]);
+ rp->index[i++] = cpu_to_le16(d->id);
BT_DBG("Added hci%u", d->id);
}
@@ -383,10 +383,8 @@ static u32 get_supported_settings(struct hci_dev *hdev)
if (enable_hs)
settings |= MGMT_SETTING_HS;
- if (enable_le) {
- if (hdev->features[4] & LMP_LE)
- settings |= MGMT_SETTING_LE;
- }
+ if (hdev->features[4] & LMP_LE)
+ settings |= MGMT_SETTING_LE;
return settings;
}
@@ -442,9 +440,7 @@ static u16 get_uuid16(u8 *uuid128)
return 0;
}
- memcpy(&val, &uuid128[12], 4);
-
- val = le32_to_cpu(val);
+ val = get_unaligned_le32(&uuid128[12]);
if (val > 0xffff)
return 0;
@@ -479,6 +475,28 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
ptr += (name_len + 2);
}
+ if (hdev->inq_tx_power) {
+ ptr[0] = 2;
+ ptr[1] = EIR_TX_POWER;
+ ptr[2] = (u8) hdev->inq_tx_power;
+
+ eir_len += 3;
+ ptr += 3;
+ }
+
+ if (hdev->devid_source > 0) {
+ ptr[0] = 9;
+ ptr[1] = EIR_DEVICE_ID;
+
+ put_unaligned_le16(hdev->devid_source, ptr + 2);
+ put_unaligned_le16(hdev->devid_vendor, ptr + 4);
+ put_unaligned_le16(hdev->devid_product, ptr + 6);
+ put_unaligned_le16(hdev->devid_version, ptr + 8);
+
+ eir_len += 10;
+ ptr += 10;
+ }
+
memset(uuid16_list, 0, sizeof(uuid16_list));
/* Group all UUID16 types */
@@ -642,8 +660,7 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
bacpy(&rp.bdaddr, &hdev->bdaddr);
rp.version = hdev->hci_ver;
-
- put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
+ rp.manufacturer = cpu_to_le16(hdev->manufacturer);
rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
rp.current_settings = cpu_to_le32(get_current_settings(hdev));
@@ -840,7 +857,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("request for %s", hdev->name);
- timeout = get_unaligned_le16(&cp->timeout);
+ timeout = __le16_to_cpu(cp->timeout);
if (!cp->val && timeout > 0)
return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
MGMT_STATUS_INVALID_PARAMS);
@@ -1122,8 +1139,8 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
}
if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
- MGMT_STATUS_BUSY);
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_BUSY);
goto failed;
}
@@ -1179,7 +1196,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
hci_dev_lock(hdev);
- if (!enable_le || !(hdev->features[4] & LMP_LE)) {
+ if (!(hdev->features[4] & LMP_LE)) {
err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
MGMT_STATUS_NOT_SUPPORTED);
goto unlock;
@@ -1227,10 +1244,8 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
&hci_cp);
- if (err < 0) {
+ if (err < 0)
mgmt_pending_remove(cmd);
- goto unlock;
- }
unlock:
hci_dev_unlock(hdev);
@@ -1280,10 +1295,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
}
cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto failed;
- }
failed:
hci_dev_unlock(hdev);
@@ -1368,10 +1381,8 @@ update_class:
}
cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
unlock:
hci_dev_unlock(hdev);
@@ -1422,10 +1433,8 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
}
cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
unlock:
hci_dev_unlock(hdev);
@@ -1439,7 +1448,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
u16 key_count, expected_len;
int i;
- key_count = get_unaligned_le16(&cp->key_count);
+ key_count = __le16_to_cpu(cp->key_count);
expected_len = sizeof(*cp) + key_count *
sizeof(struct mgmt_link_key_info);
@@ -1512,7 +1521,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- if (cp->addr.type == MGMT_ADDR_BREDR)
+ if (cp->addr.type == BDADDR_BREDR)
err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
else
err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
@@ -1524,7 +1533,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (cp->disconnect) {
- if (cp->addr.type == MGMT_ADDR_BREDR)
+ if (cp->addr.type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
&cp->addr.bdaddr);
else
@@ -1548,7 +1557,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- put_unaligned_le16(conn->handle, &dc.handle);
+ dc.handle = cpu_to_le16(conn->handle);
dc.reason = 0x13; /* Remote User Terminated Connection */
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
if (err < 0)
@@ -1584,7 +1593,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- if (cp->addr.type == MGMT_ADDR_BREDR)
+ if (cp->addr.type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
@@ -1601,7 +1610,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- put_unaligned_le16(conn->handle, &dc.handle);
+ dc.handle = cpu_to_le16(conn->handle);
dc.reason = 0x13; /* Remote User Terminated Connection */
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
@@ -1613,22 +1622,22 @@ failed:
return err;
}
-static u8 link_to_mgmt(u8 link_type, u8 addr_type)
+static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
{
switch (link_type) {
case LE_LINK:
switch (addr_type) {
case ADDR_LE_DEV_PUBLIC:
- return MGMT_ADDR_LE_PUBLIC;
- case ADDR_LE_DEV_RANDOM:
- return MGMT_ADDR_LE_RANDOM;
+ return BDADDR_LE_PUBLIC;
+
default:
- return MGMT_ADDR_INVALID;
+ /* Fallback to LE Random address type */
+ return BDADDR_LE_RANDOM;
}
- case ACL_LINK:
- return MGMT_ADDR_BREDR;
+
default:
- return MGMT_ADDR_INVALID;
+ /* Fallback to BR/EDR type */
+ return BDADDR_BREDR;
}
}
@@ -1669,13 +1678,13 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
continue;
bacpy(&rp->addr[i].bdaddr, &c->dst);
- rp->addr[i].type = link_to_mgmt(c->type, c->dst_type);
- if (rp->addr[i].type == MGMT_ADDR_INVALID)
+ rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
+ if (c->type == SCO_LINK || c->type == ESCO_LINK)
continue;
i++;
}
- put_unaligned_le16(i, &rp->conn_count);
+ rp->conn_count = cpu_to_le16(i);
/* Recalculate length in case of filtered SCO connections, etc */
rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
@@ -1836,7 +1845,7 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
struct hci_conn *conn = cmd->user_data;
bacpy(&rp.addr.bdaddr, &conn->dst);
- rp.addr.type = link_to_mgmt(conn->type, conn->dst_type);
+ rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
&rp, sizeof(rp));
@@ -1890,12 +1899,12 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
else
auth_type = HCI_AT_DEDICATED_BONDING_MITM;
- if (cp->addr.type == MGMT_ADDR_BREDR)
- conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level,
- auth_type);
+ if (cp->addr.type == BDADDR_BREDR)
+ conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
+ cp->addr.type, sec_level, auth_type);
else
- conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level,
- auth_type);
+ conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
+ cp->addr.type, sec_level, auth_type);
memset(&rp, 0, sizeof(rp));
bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
@@ -1923,7 +1932,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
}
/* For LE, just connecting isn't a proof that the pairing finished */
- if (cp->addr.type == MGMT_ADDR_BREDR)
+ if (cp->addr.type == BDADDR_BREDR)
conn->connect_cfm_cb = pairing_complete_cb;
conn->security_cfm_cb = pairing_complete_cb;
@@ -2000,7 +2009,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
goto done;
}
- if (type == MGMT_ADDR_BREDR)
+ if (type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
@@ -2011,7 +2020,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
goto done;
}
- if (type == MGMT_ADDR_LE_PUBLIC || type == MGMT_ADDR_LE_RANDOM) {
+ if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
/* Continue with pairing via SMP */
err = smp_user_confirm_reply(conn, mgmt_op, passkey);
@@ -2295,6 +2304,12 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
if (hdev->discovery.state != DISCOVERY_STOPPED) {
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
MGMT_STATUS_BUSY);
@@ -2381,27 +2396,39 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- if (hdev->discovery.state == DISCOVERY_FINDING) {
- err = hci_cancel_inquiry(hdev);
- if (err < 0)
- mgmt_pending_remove(cmd);
+ switch (hdev->discovery.state) {
+ case DISCOVERY_FINDING:
+ if (test_bit(HCI_INQUIRY, &hdev->flags))
+ err = hci_cancel_inquiry(hdev);
else
- hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
- goto unlock;
- }
+ err = hci_cancel_le_scan(hdev);
- e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_PENDING);
- if (!e) {
- mgmt_pending_remove(cmd);
- err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
- &mgmt_cp->type, sizeof(mgmt_cp->type));
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- goto unlock;
+ break;
+
+ case DISCOVERY_RESOLVING:
+ e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
+ NAME_PENDING);
+ if (!e) {
+ mgmt_pending_remove(cmd);
+ err = cmd_complete(sk, hdev->id,
+ MGMT_OP_STOP_DISCOVERY, 0,
+ &mgmt_cp->type,
+ sizeof(mgmt_cp->type));
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ goto unlock;
+ }
+
+ bacpy(&cp.bdaddr, &e->data.bdaddr);
+ err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
+ sizeof(cp), &cp);
+
+ break;
+
+ default:
+ BT_DBG("unknown discovery state %u", hdev->discovery.state);
+ err = -EFAULT;
}
- bacpy(&cp.bdaddr, &e->data.bdaddr);
- err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
- &cp);
if (err < 0)
mgmt_pending_remove(cmd);
else
@@ -2501,6 +2528,37 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
return err;
}
+static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_device_id *cp = data;
+ int err;
+ __u16 source;
+
+ BT_DBG("%s", hdev->name);
+
+ source = __le16_to_cpu(cp->source);
+
+ if (source > 0x0002)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ hdev->devid_source = source;
+ hdev->devid_vendor = __le16_to_cpu(cp->vendor);
+ hdev->devid_product = __le16_to_cpu(cp->product);
+ hdev->devid_version = __le16_to_cpu(cp->version);
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
+
+ update_eir(hdev);
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
@@ -2565,7 +2623,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
u16 key_count, expected_len;
int i;
- key_count = get_unaligned_le16(&cp->key_count);
+ key_count = __le16_to_cpu(cp->key_count);
expected_len = sizeof(*cp) + key_count *
sizeof(struct mgmt_ltk_info);
@@ -2591,7 +2649,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
else
type = HCI_SMP_LTK_SLAVE;
- hci_add_ltk(hdev, &key->addr.bdaddr, key->addr.type,
+ hci_add_ltk(hdev, &key->addr.bdaddr,
+ bdaddr_to_le(key->addr.type),
type, 0, key->authenticated, key->val,
key->enc_size, key->ediv, key->rand);
}
@@ -2601,7 +2660,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
return 0;
}
-struct mgmt_handler {
+static const struct mgmt_handler {
int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len);
bool var_len;
@@ -2647,6 +2706,7 @@ struct mgmt_handler {
{ confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
{ block_device, false, MGMT_BLOCK_DEVICE_SIZE },
{ unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
+ { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
};
@@ -2657,7 +2717,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
struct mgmt_hdr *hdr;
u16 opcode, index, len;
struct hci_dev *hdev = NULL;
- struct mgmt_handler *handler;
+ const struct mgmt_handler *handler;
int err;
BT_DBG("got %zu bytes", msglen);
@@ -2675,9 +2735,9 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
}
hdr = buf;
- opcode = get_unaligned_le16(&hdr->opcode);
- index = get_unaligned_le16(&hdr->index);
- len = get_unaligned_le16(&hdr->len);
+ opcode = __le16_to_cpu(hdr->opcode);
+ index = __le16_to_cpu(hdr->index);
+ len = __le16_to_cpu(hdr->len);
if (len != msglen - sizeof(*hdr)) {
err = -EINVAL;
@@ -2884,7 +2944,8 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
return 0;
}
-int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent)
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ bool persistent)
{
struct mgmt_ev_new_link_key ev;
@@ -2892,7 +2953,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persisten
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = MGMT_ADDR_BREDR;
+ ev.key.addr.type = BDADDR_BREDR;
ev.key.type = key->type;
memcpy(ev.key.val, key->val, 16);
ev.key.pin_len = key->pin_len;
@@ -2908,7 +2969,7 @@ int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = key->bdaddr_type;
+ ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
ev.key.authenticated = key->authenticated;
ev.key.enc_size = key->enc_size;
ev.key.ediv = key->ediv;
@@ -2932,7 +2993,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u16 eir_len = 0;
bacpy(&ev->addr.bdaddr, bdaddr);
- ev->addr.type = link_to_mgmt(link_type, addr_type);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->flags = __cpu_to_le32(flags);
@@ -2944,7 +3005,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
eir_len = eir_append_data(ev->eir, eir_len,
EIR_CLASS_OF_DEV, dev_class, 3);
- put_unaligned_le16(eir_len, &ev->eir_len);
+ ev->eir_len = cpu_to_le16(eir_len);
return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
sizeof(*ev) + eir_len, NULL);
@@ -2995,13 +3056,13 @@ int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
bacpy(&ev.bdaddr, bdaddr);
- ev.type = link_to_mgmt(link_type, addr_type);
+ ev.type = link_to_bdaddr(link_type, addr_type);
err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
sk);
if (sk)
- sock_put(sk);
+ sock_put(sk);
mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
hdev);
@@ -3021,7 +3082,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = link_to_mgmt(link_type, addr_type);
+ rp.addr.type = link_to_bdaddr(link_type, addr_type);
err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
mgmt_status(status), &rp, sizeof(rp));
@@ -3039,7 +3100,7 @@ int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
struct mgmt_ev_connect_failed ev;
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.status = mgmt_status(status);
return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
@@ -3050,7 +3111,7 @@ int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
struct mgmt_ev_pin_code_request ev;
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = MGMT_ADDR_BREDR;
+ ev.addr.type = BDADDR_BREDR;
ev.secure = secure;
return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
@@ -3069,7 +3130,7 @@ int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = MGMT_ADDR_BREDR;
+ rp.addr.type = BDADDR_BREDR;
err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
mgmt_status(status), &rp, sizeof(rp));
@@ -3091,7 +3152,7 @@ int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = MGMT_ADDR_BREDR;
+ rp.addr.type = BDADDR_BREDR;
err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
mgmt_status(status), &rp, sizeof(rp));
@@ -3110,9 +3171,9 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
BT_DBG("%s", hdev->name);
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.confirm_hint = confirm_hint;
- put_unaligned_le32(value, &ev.value);
+ ev.value = value;
return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
NULL);
@@ -3126,7 +3187,7 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
BT_DBG("%s", hdev->name);
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
NULL);
@@ -3145,7 +3206,7 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = link_to_mgmt(link_type, addr_type);
+ rp.addr.type = link_to_bdaddr(link_type, addr_type);
err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
&rp, sizeof(rp));
@@ -3188,7 +3249,7 @@ int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
struct mgmt_ev_auth_failed ev;
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.status = mgmt_status(status);
return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
@@ -3413,10 +3474,10 @@ int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
if (enable && test_and_clear_bit(HCI_LE_ENABLED,
&hdev->dev_flags))
- err = new_settings(hdev, NULL);
+ err = new_settings(hdev, NULL);
- mgmt_pending_foreach(MGMT_OP_SET_LE, hdev,
- cmd_status_rsp, &mgmt_err);
+ mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
+ &mgmt_err);
return err;
}
@@ -3455,7 +3516,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
memset(buf, 0, sizeof(buf));
bacpy(&ev->addr.bdaddr, bdaddr);
- ev->addr.type = link_to_mgmt(link_type, addr_type);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->rssi = rssi;
if (cfm_name)
ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME;
@@ -3469,7 +3530,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
dev_class, 3);
- put_unaligned_le16(eir_len, &ev->eir_len);
+ ev->eir_len = cpu_to_le16(eir_len);
ev_size = sizeof(*ev) + eir_len;
@@ -3488,13 +3549,13 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
memset(buf, 0, sizeof(buf));
bacpy(&ev->addr.bdaddr, bdaddr);
- ev->addr.type = link_to_mgmt(link_type, addr_type);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->rssi = rssi;
eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
name_len);
- put_unaligned_le16(eir_len, &ev->eir_len);
+ ev->eir_len = cpu_to_le16(eir_len);
return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
sizeof(*ev) + eir_len, NULL);
@@ -3594,6 +3655,3 @@ int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
module_param(enable_hs, bool, 0644);
MODULE_PARM_DESC(enable_hs, "Enable High Speed support");
-
-module_param(enable_le, bool, 0644);
-MODULE_PARM_DESC(enable_le, "Enable Low Energy support");
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index a55a43e9f70e..e8707debb864 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -260,7 +260,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
if (parent) {
sk->sk_type = parent->sk_type;
- pi->dlc->defer_setup = bt_sk(parent)->defer_setup;
+ pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(parent)->flags);
pi->sec_level = rfcomm_pi(parent)->sec_level;
pi->role_switch = rfcomm_pi(parent)->role_switch;
@@ -731,7 +732,11 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
break;
}
- bt_sk(sk)->defer_setup = opt;
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ else
+ clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+
break;
default:
@@ -849,7 +854,8 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
break;
}
- if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
+ if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
+ (u32 __user *) optval))
err = -EFAULT;
break;
@@ -972,7 +978,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
done:
bh_unlock_sock(parent);
- if (bt_sk(parent)->defer_setup)
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
parent->sk_state_change(parent);
return result;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index f6ab12907963..cbdd313659a7 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -61,8 +61,6 @@ static struct bt_sock_list sco_sk_list = {
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
static void sco_chan_del(struct sock *sk, int err);
-static int sco_conn_del(struct hci_conn *conn, int err);
-
static void sco_sock_close(struct sock *sk);
static void sco_sock_kill(struct sock *sk);
@@ -95,12 +93,12 @@ static void sco_sock_clear_timer(struct sock *sk)
}
/* ---- SCO connections ---- */
-static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
+static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
{
struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn = hcon->sco_data;
- if (conn || status)
+ if (conn)
return conn;
conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
@@ -195,13 +193,14 @@ static int sco_connect(struct sock *sk)
else
type = SCO_LINK;
- hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
+ hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW,
+ HCI_AT_NO_BONDING);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
}
- conn = sco_conn_add(hcon, 0);
+ conn = sco_conn_add(hcon);
if (!conn) {
hci_conn_put(hcon);
err = -ENOMEM;
@@ -233,7 +232,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
- int err, count;
+ int err;
/* Check outgoing MTU */
if (len > conn->mtu)
@@ -241,20 +240,18 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
BT_DBG("sk %p len %d", sk, len);
- count = min_t(unsigned int, conn->mtu, len);
- skb = bt_skb_send_alloc(sk, count,
- msg->msg_flags & MSG_DONTWAIT, &err);
+ skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
- if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
kfree_skb(skb);
return -EFAULT;
}
hci_send_sco(conn->hcon, skb);
- return count;
+ return len;
}
static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
@@ -277,17 +274,20 @@ drop:
}
/* -------- Socket interface ---------- */
-static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba)
+static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
{
- struct sock *sk;
struct hlist_node *node;
+ struct sock *sk;
+
+ sk_for_each(sk, node, &sco_sk_list.head) {
+ if (sk->sk_state != BT_LISTEN)
+ continue;
- sk_for_each(sk, node, &sco_sk_list.head)
if (!bacmp(&bt_sk(sk)->src, ba))
- goto found;
- sk = NULL;
-found:
- return sk;
+ return sk;
+ }
+
+ return NULL;
}
/* Find socket listening on source bdaddr.
@@ -466,7 +466,6 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
- bdaddr_t *src = &sa->sco_bdaddr;
int err = 0;
BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
@@ -481,17 +480,14 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
goto done;
}
- write_lock(&sco_sk_list.lock);
-
- if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) {
- err = -EADDRINUSE;
- } else {
- /* Save source address */
- bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
- sk->sk_state = BT_BOUND;
+ if (sk->sk_type != SOCK_SEQPACKET) {
+ err = -EINVAL;
+ goto done;
}
- write_unlock(&sco_sk_list.lock);
+ bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
+
+ sk->sk_state = BT_BOUND;
done:
release_sock(sk);
@@ -537,21 +533,38 @@ done:
static int sco_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
+ bdaddr_t *src = &bt_sk(sk)->src;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
- if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
+ if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
+ if (sk->sk_type != SOCK_SEQPACKET) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ write_lock(&sco_sk_list.lock);
+
+ if (__sco_get_sock_listen_by_addr(src)) {
+ err = -EADDRINUSE;
+ goto unlock;
+ }
+
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
+
sk->sk_state = BT_LISTEN;
+unlock:
+ write_unlock(&sco_sk_list.lock);
+
done:
release_sock(sk);
return err;
@@ -923,7 +936,7 @@ int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
if (!status) {
struct sco_conn *conn;
- conn = sco_conn_add(hcon, status);
+ conn = sco_conn_add(hcon);
if (conn)
sco_conn_ready(conn);
} else
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index deb119875fd9..6fc7c4708f3e 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -956,7 +956,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
HCI_SMP_LTK_SLAVE, 1, authenticated,
enc.ltk, smp->enc_key_size, ediv, ident.rand);
- ident.ediv = cpu_to_le16(ediv);
+ ident.ediv = ediv;
smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
index 214c2bb43d62..925ca583c09c 100644
--- a/net/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -59,9 +59,7 @@ static int handle_reply(struct ceph_auth_client *ac, int result,
*/
static int ceph_auth_none_create_authorizer(
struct ceph_auth_client *ac, int peer_type,
- struct ceph_authorizer **a,
- void **buf, size_t *len,
- void **reply_buf, size_t *reply_len)
+ struct ceph_auth_handshake *auth)
{
struct ceph_auth_none_info *ai = ac->private;
struct ceph_none_authorizer *au = &ai->au;
@@ -82,11 +80,12 @@ static int ceph_auth_none_create_authorizer(
dout("built authorizer len %d\n", au->buf_len);
}
- *a = (struct ceph_authorizer *)au;
- *buf = au->buf;
- *len = au->buf_len;
- *reply_buf = au->reply_buf;
- *reply_len = sizeof(au->reply_buf);
+ auth->authorizer = (struct ceph_authorizer *) au;
+ auth->authorizer_buf = au->buf;
+ auth->authorizer_buf_len = au->buf_len;
+ auth->authorizer_reply_buf = au->reply_buf;
+ auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+
return 0;
bad2:
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 1587dc6010c6..a16bf14eb027 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -526,9 +526,7 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
static int ceph_x_create_authorizer(
struct ceph_auth_client *ac, int peer_type,
- struct ceph_authorizer **a,
- void **buf, size_t *len,
- void **reply_buf, size_t *reply_len)
+ struct ceph_auth_handshake *auth)
{
struct ceph_x_authorizer *au;
struct ceph_x_ticket_handler *th;
@@ -548,11 +546,12 @@ static int ceph_x_create_authorizer(
return ret;
}
- *a = (struct ceph_authorizer *)au;
- *buf = au->buf->vec.iov_base;
- *len = au->buf->vec.iov_len;
- *reply_buf = au->reply_buf;
- *reply_len = sizeof(au->reply_buf);
+ auth->authorizer = (struct ceph_authorizer *) au;
+ auth->authorizer_buf = au->buf->vec.iov_base;
+ auth->authorizer_buf_len = au->buf->vec.iov_len;
+ auth->authorizer_reply_buf = au->reply_buf;
+ auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+
return 0;
}
diff --git a/net/ceph/crush/crush.c b/net/ceph/crush/crush.c
index d6ebb13a18a4..089613234f03 100644
--- a/net/ceph/crush/crush.c
+++ b/net/ceph/crush/crush.c
@@ -26,9 +26,9 @@ const char *crush_bucket_alg_name(int alg)
* @b: bucket pointer
* @p: item index in bucket
*/
-int crush_get_bucket_item_weight(struct crush_bucket *b, int p)
+int crush_get_bucket_item_weight(const struct crush_bucket *b, int p)
{
- if (p >= b->size)
+ if ((__u32)p >= b->size)
return 0;
switch (b->alg) {
@@ -37,38 +37,13 @@ int crush_get_bucket_item_weight(struct crush_bucket *b, int p)
case CRUSH_BUCKET_LIST:
return ((struct crush_bucket_list *)b)->item_weights[p];
case CRUSH_BUCKET_TREE:
- if (p & 1)
- return ((struct crush_bucket_tree *)b)->node_weights[p];
- return 0;
+ return ((struct crush_bucket_tree *)b)->node_weights[crush_calc_tree_node(p)];
case CRUSH_BUCKET_STRAW:
return ((struct crush_bucket_straw *)b)->item_weights[p];
}
return 0;
}
-/**
- * crush_calc_parents - Calculate parent vectors for the given crush map.
- * @map: crush_map pointer
- */
-void crush_calc_parents(struct crush_map *map)
-{
- int i, b, c;
-
- for (b = 0; b < map->max_buckets; b++) {
- if (map->buckets[b] == NULL)
- continue;
- for (i = 0; i < map->buckets[b]->size; i++) {
- c = map->buckets[b]->items[i];
- BUG_ON(c >= map->max_devices ||
- c < -map->max_buckets);
- if (c >= 0)
- map->device_parents[c] = map->buckets[b]->id;
- else
- map->bucket_parents[-1-c] = map->buckets[b]->id;
- }
- }
-}
-
void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b)
{
kfree(b->h.perm);
@@ -87,6 +62,8 @@ void crush_destroy_bucket_list(struct crush_bucket_list *b)
void crush_destroy_bucket_tree(struct crush_bucket_tree *b)
{
+ kfree(b->h.perm);
+ kfree(b->h.items);
kfree(b->node_weights);
kfree(b);
}
@@ -124,10 +101,9 @@ void crush_destroy_bucket(struct crush_bucket *b)
*/
void crush_destroy(struct crush_map *map)
{
- int b;
-
/* buckets */
if (map->buckets) {
+ __s32 b;
for (b = 0; b < map->max_buckets; b++) {
if (map->buckets[b] == NULL)
continue;
@@ -138,13 +114,12 @@ void crush_destroy(struct crush_map *map)
/* rules */
if (map->rules) {
+ __u32 b;
for (b = 0; b < map->max_rules; b++)
kfree(map->rules[b]);
kfree(map->rules);
}
- kfree(map->bucket_parents);
- kfree(map->device_parents);
kfree(map);
}
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 363f8f7e6c3c..d7edc24333b8 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -33,9 +33,9 @@
* @type: storage ruleset type (user defined)
* @size: output set size
*/
-int crush_find_rule(struct crush_map *map, int ruleset, int type, int size)
+int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size)
{
- int i;
+ __u32 i;
for (i = 0; i < map->max_rules; i++) {
if (map->rules[i] &&
@@ -73,7 +73,7 @@ static int bucket_perm_choose(struct crush_bucket *bucket,
unsigned int i, s;
/* start a new permutation if @x has changed */
- if (bucket->perm_x != x || bucket->perm_n == 0) {
+ if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) {
dprintk("bucket %d new x=%d\n", bucket->id, x);
bucket->perm_x = x;
@@ -153,8 +153,8 @@ static int bucket_list_choose(struct crush_bucket_list *bucket,
return bucket->h.items[i];
}
- BUG_ON(1);
- return 0;
+ dprintk("bad list sums for bucket %d\n", bucket->h.id);
+ return bucket->h.items[0];
}
@@ -220,7 +220,7 @@ static int bucket_tree_choose(struct crush_bucket_tree *bucket,
static int bucket_straw_choose(struct crush_bucket_straw *bucket,
int x, int r)
{
- int i;
+ __u32 i;
int high = 0;
__u64 high_draw = 0;
__u64 draw;
@@ -240,6 +240,7 @@ static int bucket_straw_choose(struct crush_bucket_straw *bucket,
static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
{
dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r);
+ BUG_ON(in->size == 0);
switch (in->alg) {
case CRUSH_BUCKET_UNIFORM:
return bucket_uniform_choose((struct crush_bucket_uniform *)in,
@@ -254,7 +255,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
return bucket_straw_choose((struct crush_bucket_straw *)in,
x, r);
default:
- BUG_ON(1);
+ dprintk("unknown bucket %d alg %d\n", in->id, in->alg);
return in->items[0];
}
}
@@ -263,7 +264,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
* true if device is marked "out" (failed, fully offloaded)
* of the cluster
*/
-static int is_out(struct crush_map *map, __u32 *weight, int item, int x)
+static int is_out(const struct crush_map *map, const __u32 *weight, int item, int x)
{
if (weight[item] >= 0x10000)
return 0;
@@ -288,16 +289,16 @@ static int is_out(struct crush_map *map, __u32 *weight, int item, int x)
* @recurse_to_leaf: true if we want one device under each item of given type
* @out2: second output vector for leaf items (if @recurse_to_leaf)
*/
-static int crush_choose(struct crush_map *map,
+static int crush_choose(const struct crush_map *map,
struct crush_bucket *bucket,
- __u32 *weight,
+ const __u32 *weight,
int x, int numrep, int type,
int *out, int outpos,
int firstn, int recurse_to_leaf,
int *out2)
{
int rep;
- int ftotal, flocal;
+ unsigned int ftotal, flocal;
int retry_descent, retry_bucket, skip_rep;
struct crush_bucket *in = bucket;
int r;
@@ -305,7 +306,7 @@ static int crush_choose(struct crush_map *map,
int item = 0;
int itemtype;
int collide, reject;
- const int orig_tries = 5; /* attempts before we fall back to search */
+ const unsigned int orig_tries = 5; /* attempts before we fall back to search */
dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
bucket->id, x, outpos, numrep);
@@ -326,7 +327,7 @@ static int crush_choose(struct crush_map *map,
r = rep;
if (in->alg == CRUSH_BUCKET_UNIFORM) {
/* be careful */
- if (firstn || numrep >= in->size)
+ if (firstn || (__u32)numrep >= in->size)
/* r' = r + f_total */
r += ftotal;
else if (in->size % numrep == 0)
@@ -355,7 +356,11 @@ static int crush_choose(struct crush_map *map,
item = bucket_perm_choose(in, x, r);
else
item = crush_bucket_choose(in, x, r);
- BUG_ON(item >= map->max_devices);
+ if (item >= map->max_devices) {
+ dprintk(" bad item %d\n", item);
+ skip_rep = 1;
+ break;
+ }
/* desired type? */
if (item < 0)
@@ -366,8 +371,12 @@ static int crush_choose(struct crush_map *map,
/* keep going? */
if (itemtype != type) {
- BUG_ON(item >= 0 ||
- (-1-item) >= map->max_buckets);
+ if (item >= 0 ||
+ (-1-item) >= map->max_buckets) {
+ dprintk(" bad item type %d\n", type);
+ skip_rep = 1;
+ break;
+ }
in = map->buckets[-1-item];
retry_bucket = 1;
continue;
@@ -416,7 +425,7 @@ reject:
if (collide && flocal < 3)
/* retry locally a few times */
retry_bucket = 1;
- else if (flocal < in->size + orig_tries)
+ else if (flocal <= in->size + orig_tries)
/* exhaustive bucket search */
retry_bucket = 1;
else if (ftotal < 20)
@@ -426,7 +435,7 @@ reject:
/* else give up */
skip_rep = 1;
dprintk(" reject %d collide %d "
- "ftotal %d flocal %d\n",
+ "ftotal %u flocal %u\n",
reject, collide, ftotal,
flocal);
}
@@ -455,15 +464,12 @@ reject:
* @x: hash input
* @result: pointer to result vector
* @result_max: maximum result size
- * @force: force initial replica choice; -1 for none
*/
-int crush_do_rule(struct crush_map *map,
+int crush_do_rule(const struct crush_map *map,
int ruleno, int x, int *result, int result_max,
- int force, __u32 *weight)
+ const __u32 *weight)
{
int result_len;
- int force_context[CRUSH_MAX_DEPTH];
- int force_pos = -1;
int a[CRUSH_MAX_SET];
int b[CRUSH_MAX_SET];
int c[CRUSH_MAX_SET];
@@ -474,66 +480,44 @@ int crush_do_rule(struct crush_map *map,
int osize;
int *tmp;
struct crush_rule *rule;
- int step;
+ __u32 step;
int i, j;
int numrep;
int firstn;
- BUG_ON(ruleno >= map->max_rules);
+ if ((__u32)ruleno >= map->max_rules) {
+ dprintk(" bad ruleno %d\n", ruleno);
+ return 0;
+ }
rule = map->rules[ruleno];
result_len = 0;
w = a;
o = b;
- /*
- * determine hierarchical context of force, if any. note
- * that this may or may not correspond to the specific types
- * referenced by the crush rule.
- */
- if (force >= 0 &&
- force < map->max_devices &&
- map->device_parents[force] != 0 &&
- !is_out(map, weight, force, x)) {
- while (1) {
- force_context[++force_pos] = force;
- if (force >= 0)
- force = map->device_parents[force];
- else
- force = map->bucket_parents[-1-force];
- if (force == 0)
- break;
- }
- }
-
for (step = 0; step < rule->len; step++) {
+ struct crush_rule_step *curstep = &rule->steps[step];
+
firstn = 0;
- switch (rule->steps[step].op) {
+ switch (curstep->op) {
case CRUSH_RULE_TAKE:
- w[0] = rule->steps[step].arg1;
-
- /* find position in force_context/hierarchy */
- while (force_pos >= 0 &&
- force_context[force_pos] != w[0])
- force_pos--;
- /* and move past it */
- if (force_pos >= 0)
- force_pos--;
-
+ w[0] = curstep->arg1;
wsize = 1;
break;
case CRUSH_RULE_CHOOSE_LEAF_FIRSTN:
case CRUSH_RULE_CHOOSE_FIRSTN:
firstn = 1;
+ /* fall through */
case CRUSH_RULE_CHOOSE_LEAF_INDEP:
case CRUSH_RULE_CHOOSE_INDEP:
- BUG_ON(wsize == 0);
+ if (wsize == 0)
+ break;
recurse_to_leaf =
- rule->steps[step].op ==
+ curstep->op ==
CRUSH_RULE_CHOOSE_LEAF_FIRSTN ||
- rule->steps[step].op ==
+ curstep->op ==
CRUSH_RULE_CHOOSE_LEAF_INDEP;
/* reset output */
@@ -545,32 +529,18 @@ int crush_do_rule(struct crush_map *map,
* basically, numrep <= 0 means relative to
* the provided result_max
*/
- numrep = rule->steps[step].arg1;
+ numrep = curstep->arg1;
if (numrep <= 0) {
numrep += result_max;
if (numrep <= 0)
continue;
}
j = 0;
- if (osize == 0 && force_pos >= 0) {
- /* skip any intermediate types */
- while (force_pos &&
- force_context[force_pos] < 0 &&
- rule->steps[step].arg2 !=
- map->buckets[-1 -
- force_context[force_pos]]->type)
- force_pos--;
- o[osize] = force_context[force_pos];
- if (recurse_to_leaf)
- c[osize] = force_context[0];
- j++;
- force_pos--;
- }
osize += crush_choose(map,
map->buckets[-1-w[i]],
weight,
x, numrep,
- rule->steps[step].arg2,
+ curstep->arg2,
o+osize, j,
firstn,
recurse_to_leaf, c+osize);
@@ -597,7 +567,9 @@ int crush_do_rule(struct crush_map *map,
break;
default:
- BUG_ON(1);
+ dprintk(" unknown op %d at step %d\n",
+ curstep->op, step);
+ break;
}
}
return result_len;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 36fa6bf68498..524f4e4f598b 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -653,54 +653,57 @@ static void prepare_write_keepalive(struct ceph_connection *con)
* Connection negotiation.
*/
-static int prepare_connect_authorizer(struct ceph_connection *con)
+static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
+ int *auth_proto)
{
- void *auth_buf;
- int auth_len = 0;
- int auth_protocol = 0;
+ struct ceph_auth_handshake *auth;
+
+ if (!con->ops->get_authorizer) {
+ con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
+ con->out_connect.authorizer_len = 0;
+
+ return NULL;
+ }
+
+ /* Can't hold the mutex while getting authorizer */
mutex_unlock(&con->mutex);
- if (con->ops->get_authorizer)
- con->ops->get_authorizer(con, &auth_buf, &auth_len,
- &auth_protocol, &con->auth_reply_buf,
- &con->auth_reply_buf_len,
- con->auth_retry);
+
+ auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
+
mutex_lock(&con->mutex);
- if (test_bit(CLOSED, &con->state) ||
- test_bit(OPENING, &con->state))
- return -EAGAIN;
+ if (IS_ERR(auth))
+ return auth;
+ if (test_bit(CLOSED, &con->state) || test_bit(OPENING, &con->state))
+ return ERR_PTR(-EAGAIN);
- con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
- con->out_connect.authorizer_len = cpu_to_le32(auth_len);
+ con->auth_reply_buf = auth->authorizer_reply_buf;
+ con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
- if (auth_len)
- ceph_con_out_kvec_add(con, auth_len, auth_buf);
- return 0;
+ return auth;
}
/*
* We connected to a peer and are saying hello.
*/
-static void prepare_write_banner(struct ceph_messenger *msgr,
- struct ceph_connection *con)
+static void prepare_write_banner(struct ceph_connection *con)
{
- ceph_con_out_kvec_reset(con);
ceph_con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
- ceph_con_out_kvec_add(con, sizeof (msgr->my_enc_addr),
- &msgr->my_enc_addr);
+ ceph_con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
+ &con->msgr->my_enc_addr);
con->out_more = 0;
set_bit(WRITE_PENDING, &con->state);
}
-static int prepare_write_connect(struct ceph_messenger *msgr,
- struct ceph_connection *con,
- int include_banner)
+static int prepare_write_connect(struct ceph_connection *con)
{
unsigned int global_seq = get_global_seq(con->msgr, 0);
int proto;
+ int auth_proto;
+ struct ceph_auth_handshake *auth;
switch (con->peer_name.type) {
case CEPH_ENTITY_TYPE_MON:
@@ -719,23 +722,32 @@ static int prepare_write_connect(struct ceph_messenger *msgr,
dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
con->connect_seq, global_seq, proto);
- con->out_connect.features = cpu_to_le64(msgr->supported_features);
+ con->out_connect.features = cpu_to_le64(con->msgr->supported_features);
con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
con->out_connect.global_seq = cpu_to_le32(global_seq);
con->out_connect.protocol_version = cpu_to_le32(proto);
con->out_connect.flags = 0;
- if (include_banner)
- prepare_write_banner(msgr, con);
- else
- ceph_con_out_kvec_reset(con);
- ceph_con_out_kvec_add(con, sizeof (con->out_connect), &con->out_connect);
+ auth_proto = CEPH_AUTH_UNKNOWN;
+ auth = get_connect_authorizer(con, &auth_proto);
+ if (IS_ERR(auth))
+ return PTR_ERR(auth);
+
+ con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
+ con->out_connect.authorizer_len = auth ?
+ cpu_to_le32(auth->authorizer_buf_len) : 0;
+
+ ceph_con_out_kvec_add(con, sizeof (con->out_connect),
+ &con->out_connect);
+ if (auth && auth->authorizer_buf_len)
+ ceph_con_out_kvec_add(con, auth->authorizer_buf_len,
+ auth->authorizer_buf);
con->out_more = 0;
set_bit(WRITE_PENDING, &con->state);
- return prepare_connect_authorizer(con);
+ return 0;
}
/*
@@ -992,11 +1004,10 @@ static int prepare_read_message(struct ceph_connection *con)
static int read_partial(struct ceph_connection *con,
- int *to, int size, void *object)
+ int end, int size, void *object)
{
- *to += size;
- while (con->in_base_pos < *to) {
- int left = *to - con->in_base_pos;
+ while (con->in_base_pos < end) {
+ int left = end - con->in_base_pos;
int have = size - left;
int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
if (ret <= 0)
@@ -1012,37 +1023,52 @@ static int read_partial(struct ceph_connection *con,
*/
static int read_partial_banner(struct ceph_connection *con)
{
- int ret, to = 0;
+ int size;
+ int end;
+ int ret;
dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
/* peer's banner */
- ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
+ size = strlen(CEPH_BANNER);
+ end = size;
+ ret = read_partial(con, end, size, con->in_banner);
if (ret <= 0)
goto out;
- ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
- &con->actual_peer_addr);
+
+ size = sizeof (con->actual_peer_addr);
+ end += size;
+ ret = read_partial(con, end, size, &con->actual_peer_addr);
if (ret <= 0)
goto out;
- ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
- &con->peer_addr_for_me);
+
+ size = sizeof (con->peer_addr_for_me);
+ end += size;
+ ret = read_partial(con, end, size, &con->peer_addr_for_me);
if (ret <= 0)
goto out;
+
out:
return ret;
}
static int read_partial_connect(struct ceph_connection *con)
{
- int ret, to = 0;
+ int size;
+ int end;
+ int ret;
dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
- ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
+ size = sizeof (con->in_reply);
+ end = size;
+ ret = read_partial(con, end, size, &con->in_reply);
if (ret <= 0)
goto out;
- ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
- con->auth_reply_buf);
+
+ size = le32_to_cpu(con->in_reply.authorizer_len);
+ end += size;
+ ret = read_partial(con, end, size, con->auth_reply_buf);
if (ret <= 0)
goto out;
@@ -1377,7 +1403,8 @@ static int process_connect(struct ceph_connection *con)
return -1;
}
con->auth_retry = 1;
- ret = prepare_write_connect(con->msgr, con, 0);
+ ceph_con_out_kvec_reset(con);
+ ret = prepare_write_connect(con);
if (ret < 0)
return ret;
prepare_read_connect(con);
@@ -1397,7 +1424,10 @@ static int process_connect(struct ceph_connection *con)
ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr.in_addr));
reset_connection(con);
- prepare_write_connect(con->msgr, con, 0);
+ ceph_con_out_kvec_reset(con);
+ ret = prepare_write_connect(con);
+ if (ret < 0)
+ return ret;
prepare_read_connect(con);
/* Tell ceph about it. */
@@ -1420,7 +1450,10 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->out_connect.connect_seq),
le32_to_cpu(con->in_connect.connect_seq));
con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
- prepare_write_connect(con->msgr, con, 0);
+ ceph_con_out_kvec_reset(con);
+ ret = prepare_write_connect(con);
+ if (ret < 0)
+ return ret;
prepare_read_connect(con);
break;
@@ -1434,7 +1467,10 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->in_connect.global_seq));
get_global_seq(con->msgr,
le32_to_cpu(con->in_connect.global_seq));
- prepare_write_connect(con->msgr, con, 0);
+ ceph_con_out_kvec_reset(con);
+ ret = prepare_write_connect(con);
+ if (ret < 0)
+ return ret;
prepare_read_connect(con);
break;
@@ -1491,10 +1527,10 @@ static int process_connect(struct ceph_connection *con)
*/
static int read_partial_ack(struct ceph_connection *con)
{
- int to = 0;
+ int size = sizeof (con->in_temp_ack);
+ int end = size;
- return read_partial(con, &to, sizeof(con->in_temp_ack),
- &con->in_temp_ack);
+ return read_partial(con, end, size, &con->in_temp_ack);
}
@@ -1627,8 +1663,9 @@ static int read_partial_message_bio(struct ceph_connection *con,
static int read_partial_message(struct ceph_connection *con)
{
struct ceph_msg *m = con->in_msg;
+ int size;
+ int end;
int ret;
- int to, left;
unsigned int front_len, middle_len, data_len;
bool do_datacrc = !con->msgr->nocrc;
int skip;
@@ -1638,15 +1675,11 @@ static int read_partial_message(struct ceph_connection *con)
dout("read_partial_message con %p msg %p\n", con, m);
/* header */
- while (con->in_base_pos < sizeof(con->in_hdr)) {
- left = sizeof(con->in_hdr) - con->in_base_pos;
- ret = ceph_tcp_recvmsg(con->sock,
- (char *)&con->in_hdr + con->in_base_pos,
- left);
- if (ret <= 0)
- return ret;
- con->in_base_pos += ret;
- }
+ size = sizeof (con->in_hdr);
+ end = size;
+ ret = read_partial(con, end, size, &con->in_hdr);
+ if (ret <= 0)
+ return ret;
crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
if (cpu_to_le32(crc) != con->in_hdr.crc) {
@@ -1759,16 +1792,12 @@ static int read_partial_message(struct ceph_connection *con)
}
/* footer */
- to = sizeof(m->hdr) + sizeof(m->footer);
- while (con->in_base_pos < to) {
- left = to - con->in_base_pos;
- ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
- (con->in_base_pos - sizeof(m->hdr)),
- left);
- if (ret <= 0)
- return ret;
- con->in_base_pos += ret;
- }
+ size = sizeof (m->footer);
+ end += size;
+ ret = read_partial(con, end, size, &m->footer);
+ if (ret <= 0)
+ return ret;
+
dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
m, front_len, m->footer.front_crc, middle_len,
m->footer.middle_crc, data_len, m->footer.data_crc);
@@ -1835,7 +1864,6 @@ static void process_message(struct ceph_connection *con)
*/
static int try_write(struct ceph_connection *con)
{
- struct ceph_messenger *msgr = con->msgr;
int ret = 1;
dout("try_write start %p state %lu nref %d\n", con, con->state,
@@ -1846,7 +1874,11 @@ more:
/* open the socket first? */
if (con->sock == NULL) {
- prepare_write_connect(msgr, con, 1);
+ ceph_con_out_kvec_reset(con);
+ prepare_write_banner(con);
+ ret = prepare_write_connect(con);
+ if (ret < 0)
+ goto out;
prepare_read_banner(con);
set_bit(CONNECTING, &con->state);
clear_bit(NEGOTIATING, &con->state);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 1b0ef3c4d393..1ffebed5ce0f 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -278,7 +278,7 @@ static void osd_req_encode_op(struct ceph_osd_request *req,
{
dst->op = cpu_to_le16(src->op);
- switch (dst->op) {
+ switch (src->op) {
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_WRITE:
dst->extent.offset =
@@ -664,11 +664,11 @@ static void put_osd(struct ceph_osd *osd)
{
dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
atomic_read(&osd->o_ref) - 1);
- if (atomic_dec_and_test(&osd->o_ref)) {
+ if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
- if (osd->o_authorizer)
- ac->ops->destroy_authorizer(ac, osd->o_authorizer);
+ if (ac->ops && ac->ops->destroy_authorizer)
+ ac->ops->destroy_authorizer(ac, osd->o_auth.authorizer);
kfree(osd);
}
}
@@ -841,6 +841,12 @@ static void register_request(struct ceph_osd_client *osdc,
static void __unregister_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
+ if (RB_EMPTY_NODE(&req->r_node)) {
+ dout("__unregister_request %p tid %lld not registered\n",
+ req, req->r_tid);
+ return;
+ }
+
dout("__unregister_request %p tid %lld\n", req, req->r_tid);
rb_erase(&req->r_node, &osdc->requests);
osdc->num_requests--;
@@ -2108,37 +2114,32 @@ static void put_osd_con(struct ceph_connection *con)
/*
* authentication
*/
-static int get_authorizer(struct ceph_connection *con,
- void **buf, int *len, int *proto,
- void **reply_buf, int *reply_len, int force_new)
+/*
+ * Note: returned pointer is the address of a structure that's
+ * managed separately. Caller must *not* attempt to free it.
+ */
+static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
+ int *proto, int force_new)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
struct ceph_auth_client *ac = osdc->client->monc.auth;
- int ret = 0;
+ struct ceph_auth_handshake *auth = &o->o_auth;
- if (force_new && o->o_authorizer) {
- ac->ops->destroy_authorizer(ac, o->o_authorizer);
- o->o_authorizer = NULL;
- }
- if (o->o_authorizer == NULL) {
- ret = ac->ops->create_authorizer(
- ac, CEPH_ENTITY_TYPE_OSD,
- &o->o_authorizer,
- &o->o_authorizer_buf,
- &o->o_authorizer_buf_len,
- &o->o_authorizer_reply_buf,
- &o->o_authorizer_reply_buf_len);
+ if (force_new && auth->authorizer) {
+ if (ac->ops && ac->ops->destroy_authorizer)
+ ac->ops->destroy_authorizer(ac, auth->authorizer);
+ auth->authorizer = NULL;
+ }
+ if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
+ int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
+ auth);
if (ret)
- return ret;
+ return ERR_PTR(ret);
}
-
*proto = ac->protocol;
- *buf = o->o_authorizer_buf;
- *len = o->o_authorizer_buf_len;
- *reply_buf = o->o_authorizer_reply_buf;
- *reply_len = o->o_authorizer_reply_buf_len;
- return 0;
+
+ return auth;
}
@@ -2148,7 +2149,11 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len)
struct ceph_osd_client *osdc = o->o_osdc;
struct ceph_auth_client *ac = osdc->client->monc.auth;
- return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len);
+ /*
+ * XXX If ac->ops or ac->ops->verify_authorizer_reply is null,
+ * XXX which do we do: succeed or fail?
+ */
+ return ac->ops->verify_authorizer_reply(ac, o->o_auth.authorizer, len);
}
static int invalidate_authorizer(struct ceph_connection *con)
@@ -2157,7 +2162,7 @@ static int invalidate_authorizer(struct ceph_connection *con)
struct ceph_osd_client *osdc = o->o_osdc;
struct ceph_auth_client *ac = osdc->client->monc.auth;
- if (ac->ops->invalidate_authorizer)
+ if (ac->ops && ac->ops->invalidate_authorizer)
ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
return ceph_monc_validate_auth(&osdc->client->monc);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 56e561a69004..81e3b84a77ef 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -161,13 +161,6 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
c->max_rules = ceph_decode_32(p);
c->max_devices = ceph_decode_32(p);
- c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS);
- if (c->device_parents == NULL)
- goto badmem;
- c->bucket_parents = kcalloc(c->max_buckets, sizeof(u32), GFP_NOFS);
- if (c->bucket_parents == NULL)
- goto badmem;
-
c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
if (c->buckets == NULL)
goto badmem;
@@ -890,8 +883,12 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
pglen = ceph_decode_32(p);
if (pglen) {
- /* insert */
ceph_decode_need(p, end, pglen*sizeof(u32), bad);
+
+ /* removing existing (if any) */
+ (void) __remove_pg_mapping(&map->pg_temp, pgid);
+
+ /* insert */
pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
if (!pg) {
err = -ENOMEM;
@@ -1000,7 +997,6 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol,
{
unsigned int num, num_mask;
struct ceph_pg pgid;
- s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
int poolid = le32_to_cpu(fl->fl_pg_pool);
struct ceph_pg_pool_info *pool;
unsigned int ps;
@@ -1011,23 +1007,13 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol,
if (!pool)
return -EIO;
ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
- if (preferred >= 0) {
- ps += preferred;
- num = le32_to_cpu(pool->v.lpg_num);
- num_mask = pool->lpg_num_mask;
- } else {
- num = le32_to_cpu(pool->v.pg_num);
- num_mask = pool->pg_num_mask;
- }
+ num = le32_to_cpu(pool->v.pg_num);
+ num_mask = pool->pg_num_mask;
pgid.ps = cpu_to_le16(ps);
- pgid.preferred = cpu_to_le16(preferred);
+ pgid.preferred = cpu_to_le16(-1);
pgid.pool = fl->fl_pg_pool;
- if (preferred >= 0)
- dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid, poolid, ps,
- (int)preferred);
- else
- dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
+ dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
ol->ol_pgid = pgid;
ol->ol_stripe_unit = fl->fl_object_stripe_unit;
@@ -1045,24 +1031,18 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
struct ceph_pg_mapping *pg;
struct ceph_pg_pool_info *pool;
int ruleno;
- unsigned int poolid, ps, pps, t;
- int preferred;
+ unsigned int poolid, ps, pps, t, r;
poolid = le32_to_cpu(pgid.pool);
ps = le16_to_cpu(pgid.ps);
- preferred = (s16)le16_to_cpu(pgid.preferred);
pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
if (!pool)
return NULL;
/* pg_temp? */
- if (preferred >= 0)
- t = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpg_num),
- pool->lpgp_num_mask);
- else
- t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
- pool->pgp_num_mask);
+ t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
+ pool->pgp_num_mask);
pgid.ps = cpu_to_le16(t);
pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
if (pg) {
@@ -1080,23 +1060,20 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
return NULL;
}
- /* don't forcefeed bad device ids to crush */
- if (preferred >= osdmap->max_osd ||
- preferred >= osdmap->crush->max_devices)
- preferred = -1;
-
- if (preferred >= 0)
- pps = ceph_stable_mod(ps,
- le32_to_cpu(pool->v.lpgp_num),
- pool->lpgp_num_mask);
- else
- pps = ceph_stable_mod(ps,
- le32_to_cpu(pool->v.pgp_num),
- pool->pgp_num_mask);
+ pps = ceph_stable_mod(ps,
+ le32_to_cpu(pool->v.pgp_num),
+ pool->pgp_num_mask);
pps += poolid;
- *num = crush_do_rule(osdmap->crush, ruleno, pps, osds,
- min_t(int, pool->v.size, *num),
- preferred, osdmap->osd_weight);
+ r = crush_do_rule(osdmap->crush, ruleno, pps, osds,
+ min_t(int, pool->v.size, *num),
+ osdmap->osd_weight);
+ if (r < 0) {
+ pr_err("error %d from crush rule: pool %d ruleset %d type %d"
+ " size %d\n", r, poolid, pool->v.crush_ruleset,
+ pool->v.type, pool->v.size);
+ return NULL;
+ }
+ *num = r;
return osds;
}
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 3252e7e0a005..ea5fb9fcc3f5 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -468,3 +468,4 @@ module_exit(exit_net_drop_monitor);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
+MODULE_ALIAS_GENL_FAMILY("NET_DM");
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 89a47b35905d..cb982a61536f 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -459,28 +459,22 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
struct esp_data *esp = x->data;
u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
u32 align = max_t(u32, blksize, esp->padlen);
- u32 rem;
-
- mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
- rem = mtu & (align - 1);
- mtu &= ~(align - 1);
+ unsigned int net_adj;
switch (x->props.mode) {
- case XFRM_MODE_TUNNEL:
- break;
- default:
case XFRM_MODE_TRANSPORT:
- /* The worst case */
- mtu -= blksize - 4;
- mtu += min_t(u32, blksize - 4, rem);
- break;
case XFRM_MODE_BEET:
- /* The worst case. */
- mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
+ net_adj = sizeof(struct iphdr);
break;
+ case XFRM_MODE_TUNNEL:
+ net_adj = 0;
+ break;
+ default:
+ BUG();
}
- return mtu - 2;
+ return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+ net_adj) & ~(align - 1)) + (net_adj - 2);
}
static void esp4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index a8bdf7405433..e5b7182fa099 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -145,6 +145,12 @@ static void free_fib_info_rcu(struct rcu_head *head)
{
struct fib_info *fi = container_of(head, struct fib_info, rcu);
+ change_nexthops(fi) {
+ if (nexthop_nh->nh_dev)
+ dev_put(nexthop_nh->nh_dev);
+ } endfor_nexthops(fi);
+
+ release_net(fi->fib_net);
if (fi->fib_metrics != (u32 *) dst_default_metrics)
kfree(fi->fib_metrics);
kfree(fi);
@@ -156,13 +162,7 @@ void free_fib_info(struct fib_info *fi)
pr_warn("Freeing alive fib_info %p\n", fi);
return;
}
- change_nexthops(fi) {
- if (nexthop_nh->nh_dev)
- dev_put(nexthop_nh->nh_dev);
- nexthop_nh->nh_dev = NULL;
- } endfor_nexthops(fi);
fib_info_cnt--;
- release_net(fi->fib_net);
call_rcu(&fi->rcu, free_fib_info_rcu);
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ffcb3b016843..98b30d08efe9 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3452,6 +3452,7 @@ int __init ip_rt_init(void)
0,
&rt_hash_log,
&rt_hash_mask,
+ 0,
rhash_entries ? 0 : 512 * 1024);
memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
rt_hash_lock_init();
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index bb485fcb077e..3ba605f60e4e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3514,6 +3514,7 @@ void __init tcp_init(void)
0,
NULL,
&tcp_hashinfo.ehash_mask,
+ 0,
thash_entries ? 0 : 512 * 1024);
for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) {
INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
@@ -3530,6 +3531,7 @@ void __init tcp_init(void)
0,
&tcp_hashinfo.bhash_size,
NULL,
+ 0,
64 * 1024);
tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index cfa2aa128342..b224eb8bce8b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4555,6 +4555,11 @@ static bool tcp_try_coalesce(struct sock *sk,
if (tcp_hdr(from)->fin)
return false;
+
+ /* Its possible this segment overlaps with prior segment in queue */
+ if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
+ return false;
+
if (!skb_try_coalesce(to, from, fragstolen, &delta))
return false;
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 151703791bb0..b6f3583ddfe8 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -74,9 +74,6 @@ void tcp_destroy_cgroup(struct mem_cgroup *memcg)
percpu_counter_destroy(&tcp->tcp_sockets_allocated);
val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
-
- if (val != RESOURCE_MAX)
- static_key_slow_dec(&memcg_socket_limit_enabled);
}
EXPORT_SYMBOL(tcp_destroy_cgroup);
@@ -107,10 +104,33 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
net->ipv4.sysctl_tcp_mem[i]);
- if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX)
- static_key_slow_dec(&memcg_socket_limit_enabled);
- else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX)
- static_key_slow_inc(&memcg_socket_limit_enabled);
+ if (val == RESOURCE_MAX)
+ clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+ else if (val != RESOURCE_MAX) {
+ /*
+ * The active bit needs to be written after the static_key
+ * update. This is what guarantees that the socket activation
+ * function is the last one to run. See sock_update_memcg() for
+ * details, and note that we don't mark any socket as belonging
+ * to this memcg until that flag is up.
+ *
+ * We need to do this, because static_keys will span multiple
+ * sites, but we can't control their order. If we mark a socket
+ * as accounted, but the accounting functions are not patched in
+ * yet, we'll lose accounting.
+ *
+ * We never race with the readers in sock_update_memcg(),
+ * because when this value change, the code to process it is not
+ * patched in yet.
+ *
+ * The activated bit is used to guarantee that no two writers
+ * will do the update in the same memcg. Without that, we can't
+ * properly shutdown the static key.
+ */
+ if (!test_and_set_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags))
+ static_key_slow_inc(&memcg_socket_limit_enabled);
+ set_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+ }
return 0;
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 609397ee78fb..eaca73644e79 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2192,26 +2192,16 @@ void __init udp_table_init(struct udp_table *table, const char *name)
{
unsigned int i;
- if (!CONFIG_BASE_SMALL)
- table->hash = alloc_large_system_hash(name,
- 2 * sizeof(struct udp_hslot),
- uhash_entries,
- 21, /* one slot per 2 MB */
- 0,
- &table->log,
- &table->mask,
- 64 * 1024);
- /*
- * Make sure hash table has the minimum size
- */
- if (CONFIG_BASE_SMALL || table->mask < UDP_HTABLE_SIZE_MIN - 1) {
- table->hash = kmalloc(UDP_HTABLE_SIZE_MIN *
- 2 * sizeof(struct udp_hslot), GFP_KERNEL);
- if (!table->hash)
- panic(name);
- table->log = ilog2(UDP_HTABLE_SIZE_MIN);
- table->mask = UDP_HTABLE_SIZE_MIN - 1;
- }
+ table->hash = alloc_large_system_hash(name,
+ 2 * sizeof(struct udp_hslot),
+ uhash_entries,
+ 21, /* one slot per 2 MB */
+ 0,
+ &table->log,
+ &table->mask,
+ UDP_HTABLE_SIZE_MIN,
+ 64 * 1024);
+
table->hash2 = table->hash + (table->mask + 1);
for (i = 0; i <= table->mask; i++) {
INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 1e62b7557b00..db1521fcda5b 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -413,19 +413,15 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
struct esp_data *esp = x->data;
u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
u32 align = max_t(u32, blksize, esp->padlen);
- u32 rem;
+ unsigned int net_adj;
- mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
- rem = mtu & (align - 1);
- mtu &= ~(align - 1);
-
- if (x->props.mode != XFRM_MODE_TUNNEL) {
- u32 padsize = ((blksize - 1) & 7) + 1;
- mtu -= blksize - padsize;
- mtu += min_t(u32, blksize - padsize, rem);
- }
+ if (x->props.mode != XFRM_MODE_TUNNEL)
+ net_adj = sizeof(struct ipv6hdr);
+ else
+ net_adj = 0;
- return mtu - 2;
+ return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+ net_adj) & ~(align - 1)) + (net_adj - 2);
}
static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d99fdc699625..17b8c67998bb 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1187,6 +1187,29 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
}
+static void ip6_append_data_mtu(int *mtu,
+ int *maxfraglen,
+ unsigned int fragheaderlen,
+ struct sk_buff *skb,
+ struct rt6_info *rt)
+{
+ if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
+ if (skb == NULL) {
+ /* first fragment, reserve header_len */
+ *mtu = *mtu - rt->dst.header_len;
+
+ } else {
+ /*
+ * this fragment is not first, the headers
+ * space is regarded as data space.
+ */
+ *mtu = dst_mtu(rt->dst.path);
+ }
+ *maxfraglen = ((*mtu - fragheaderlen) & ~7)
+ + fragheaderlen - sizeof(struct frag_hdr);
+ }
+}
+
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
int offset, int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
@@ -1196,7 +1219,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_cork *cork;
- struct sk_buff *skb;
+ struct sk_buff *skb, *skb_prev = NULL;
unsigned int maxfraglen, fragheaderlen;
int exthdrlen;
int dst_exthdrlen;
@@ -1253,8 +1276,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
inet->cork.fl.u.ip6 = *fl6;
np->cork.hop_limit = hlimit;
np->cork.tclass = tclass;
- mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
- rt->dst.dev->mtu : dst_mtu(&rt->dst);
+ if (rt->dst.flags & DST_XFRM_TUNNEL)
+ mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+ rt->dst.dev->mtu : dst_mtu(&rt->dst);
+ else
+ mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+ rt->dst.dev->mtu : dst_mtu(rt->dst.path);
if (np->frag_size < mtu) {
if (np->frag_size)
mtu = np->frag_size;
@@ -1350,25 +1377,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
unsigned int fraglen;
unsigned int fraggap;
unsigned int alloclen;
- struct sk_buff *skb_prev;
alloc_new_skb:
- skb_prev = skb;
-
/* There's no room in the current skb */
- if (skb_prev)
- fraggap = skb_prev->len - maxfraglen;
+ if (skb)
+ fraggap = skb->len - maxfraglen;
else
fraggap = 0;
+ /* update mtu and maxfraglen if necessary */
+ if (skb == NULL || skb_prev == NULL)
+ ip6_append_data_mtu(&mtu, &maxfraglen,
+ fragheaderlen, skb, rt);
+
+ skb_prev = skb;
/*
* If remaining data exceeds the mtu,
* we know we need more fragment(s).
*/
datalen = length + fraggap;
- if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
- datalen = maxfraglen - fragheaderlen;
- fraglen = datalen + fragheaderlen;
+ if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
+ datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
@@ -1377,13 +1406,16 @@ alloc_new_skb:
alloclen += dst_exthdrlen;
- /*
- * The last fragment gets additional space at tail.
- * Note: we overallocate on fragments with MSG_MODE
- * because we have no idea if we're the last one.
- */
- if (datalen == length + fraggap)
- alloclen += rt->dst.trailer_len;
+ if (datalen != length + fraggap) {
+ /*
+ * this is not the last fragment, the trailer
+ * space is regarded as data space.
+ */
+ datalen += rt->dst.trailer_len;
+ }
+
+ alloclen += rt->dst.trailer_len;
+ fraglen = datalen + fragheaderlen;
/*
* We just reserve space for fragment header.
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 889f5d13d7ba..70614e7affab 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -239,9 +239,16 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
- int ret = -EINVAL;
+ int ret;
int chk_addr_ret;
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ return -EINVAL;
+ if (addr_len < sizeof(struct sockaddr_l2tpip))
+ return -EINVAL;
+ if (addr->l2tp_family != AF_INET)
+ return -EINVAL;
+
ret = -EADDRINUSE;
read_lock_bh(&l2tp_ip_lock);
if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
@@ -272,6 +279,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip_lock);
ret = 0;
+ sock_reset_flag(sk, SOCK_ZAPPED);
+
out:
release_sock(sk);
@@ -288,6 +297,9 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
int rc;
+ if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
+ return -EINVAL;
+
if (addr_len < sizeof(*lsa))
return -EINVAL;
@@ -311,6 +323,14 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
return rc;
}
+static int l2tp_ip_disconnect(struct sock *sk, int flags)
+{
+ if (sock_flag(sk, SOCK_ZAPPED))
+ return 0;
+
+ return udp_disconnect(sk, flags);
+}
+
static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
@@ -530,7 +550,7 @@ static struct proto l2tp_ip_prot = {
.close = l2tp_ip_close,
.bind = l2tp_ip_bind,
.connect = l2tp_ip_connect,
- .disconnect = udp_disconnect,
+ .disconnect = l2tp_ip_disconnect,
.ioctl = udp_ioctl,
.destroy = l2tp_ip_destroy_sock,
.setsockopt = ip_setsockopt,
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 0291d8d85f30..35e1e4bde587 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -258,6 +258,10 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
int addr_type;
int err;
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ return -EINVAL;
+ if (addr->l2tp_family != AF_INET6)
+ return -EINVAL;
if (addr_len < sizeof(*addr))
return -EINVAL;
@@ -331,6 +335,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip6_lock);
+ sock_reset_flag(sk, SOCK_ZAPPED);
release_sock(sk);
return 0;
@@ -354,6 +359,9 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_type;
int rc;
+ if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
+ return -EINVAL;
+
if (addr_len < sizeof(*lsa))
return -EINVAL;
@@ -383,6 +391,14 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
return rc;
}
+static int l2tp_ip6_disconnect(struct sock *sk, int flags)
+{
+ if (sock_flag(sk, SOCK_ZAPPED))
+ return 0;
+
+ return udp_disconnect(sk, flags);
+}
+
static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
@@ -689,7 +705,7 @@ static struct proto l2tp_ip6_prot = {
.close = l2tp_ip6_close,
.bind = l2tp_ip6_bind,
.connect = l2tp_ip6_connect,
- .disconnect = udp_disconnect,
+ .disconnect = l2tp_ip6_disconnect,
.ioctl = udp_ioctl,
.destroy = l2tp_ip6_destroy_sock,
.setsockopt = ipv6_setsockopt,
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 8577264378fe..ddc553e76671 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -923,5 +923,4 @@ MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("L2TP netlink");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
-MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \
- __stringify(NETLINK_GENERIC) "-type-" "l2tp");
+MODULE_ALIAS_GENL_FAMILY("l2tp");
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 5b7053c58732..7cf07158805c 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -421,16 +421,22 @@ static void sta_tx_agg_session_timer_expired(unsigned long data)
struct tid_ampdu_tx *tid_tx;
unsigned long timeout;
- tid_tx = rcu_dereference_protected_tid_tx(sta, *ptid);
- if (!tid_tx)
+ rcu_read_lock();
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]);
+ if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+ rcu_read_unlock();
return;
+ }
timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
if (time_is_after_jiffies(timeout)) {
mod_timer(&tid_tx->session_timer, timeout);
+ rcu_read_unlock();
return;
}
+ rcu_read_unlock();
+
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
#endif
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index ea0122dbd2b3..7ed433c66d68 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -509,6 +509,7 @@ IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC);
IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC);
+IEEE80211_IF_FILE(ht_opmode, u.mesh.mshcfg.ht_opmode, DEC);
#endif
#define DEBUGFS_ADD_MODE(name, mode) \
@@ -608,6 +609,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
MESHPARAMS_ADD(rssi_threshold);
+ MESHPARAMS_ADD(ht_opmode);
#undef MESHPARAMS_ADD
}
#endif
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 3ad33a824624..33d9d0c3e3d0 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -163,6 +163,11 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
sizeof(struct ieee80211_ht_operation));
pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
sband->ht_cap.cap);
+ /*
+ * Note: According to 802.11n-2009 9.13.3.1, HT Protection
+ * field and RIFS Mode are reserved in IBSS mode, therefore
+ * keep them at 0
+ */
pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap,
chan, channel_type, 0);
}
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 856237c5c1f8..d4c19a7773db 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -206,8 +206,10 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE;
- else
+ else if (local->hw.queues >= IEEE80211_NUM_ACS)
sdata->vif.hw_queue[i] = i;
+ else
+ sdata->vif.hw_queue[i] = 0;
}
sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index b70f7f09da61..f5548e953259 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -596,6 +596,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE;
local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
+ local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
+ IEEE80211_RADIOTAP_MCS_HAVE_GI |
+ IEEE80211_RADIOTAP_MCS_HAVE_BW;
local->user_power_level = -1;
wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 0675a2fec6a6..2913113c5833 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -109,8 +109,10 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
/* Disallow HT40+/- mismatch */
if (ie->ht_operation &&
- local->_oper_channel_type > NL80211_CHAN_HT20 &&
- sta_channel_type > NL80211_CHAN_HT20 &&
+ (local->_oper_channel_type == NL80211_CHAN_HT40MINUS ||
+ local->_oper_channel_type == NL80211_CHAN_HT40PLUS) &&
+ (sta_channel_type == NL80211_CHAN_HT40MINUS ||
+ sta_channel_type == NL80211_CHAN_HT40PLUS) &&
local->_oper_channel_type != sta_channel_type)
goto mismatch;
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 27e0c2f06795..9b59658e8650 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -603,7 +603,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
hopcount, ttl, cpu_to_le32(lifetime),
cpu_to_le32(metric), cpu_to_le32(preq_id),
sdata);
- ifmsh->mshstats.fwded_mcast++;
+ if (!is_multicast_ether_addr(da))
+ ifmsh->mshstats.fwded_unicast++;
+ else
+ ifmsh->mshstats.fwded_mcast++;
ifmsh->mshstats.fwded_frames++;
}
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 8cc8461b48a0..60ef235c9d9b 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -105,15 +105,15 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
return sta;
}
-/** mesh_set_ht_prot_mode - set correct HT protection mode
+/*
+ * mesh_set_ht_prot_mode - set correct HT protection mode
*
- * Section 9.23.3.5 of IEEE 80211s standard describes the protection rules for
- * HT mesh STA in a MBSS. Three HT protection modes are supported for now,
- * non-HT mixed mode, 20MHz-protection and no-protection mode. non-HT mixed
- * mode is selected if any non-HT peers are present in our MBSS.
- * 20MHz-protection mode is selected if all peers in our 20/40MHz MBSS support
- * HT and atleast one HT20 peer is present. Otherwise no-protection mode is
- * selected.
+ * Section 9.23.3.5 of IEEE 80211-2012 describes the protection rules for HT
+ * mesh STA in a MBSS. Three HT protection modes are supported for now, non-HT
+ * mixed mode, 20MHz-protection and no-protection mode. non-HT mixed mode is
+ * selected if any non-HT peers are present in our MBSS. 20MHz-protection mode
+ * is selected if all peers in our 20/40MHz MBSS support HT and atleast one
+ * HT20 peer is present. Otherwise no-protection mode is selected.
*/
static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
{
@@ -128,21 +128,22 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list) {
- if (sdata == sta->sdata &&
- sta->plink_state == NL80211_PLINK_ESTAB) {
- switch (sta->ch_type) {
- case NL80211_CHAN_NO_HT:
- mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present",
- sdata->vif.addr, sta->sta.addr);
- non_ht_sta = true;
- goto out;
- case NL80211_CHAN_HT20:
- mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present",
- sdata->vif.addr, sta->sta.addr);
- ht20_sta = true;
- default:
- break;
- }
+ if (sdata != sta->sdata ||
+ sta->plink_state != NL80211_PLINK_ESTAB)
+ continue;
+
+ switch (sta->ch_type) {
+ case NL80211_CHAN_NO_HT:
+ mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present",
+ sdata->vif.addr, sta->sta.addr);
+ non_ht_sta = true;
+ goto out;
+ case NL80211_CHAN_HT20:
+ mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present",
+ sdata->vif.addr, sta->sta.addr);
+ ht20_sta = true;
+ default:
+ break;
}
}
out:
@@ -346,6 +347,15 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
sta = sta_info_get(sdata, addr);
if (!sta) {
+ /* Userspace handles peer allocation when security is enabled */
+ if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
+ cfg80211_notify_new_peer_candidate(sdata->dev, addr,
+ elems->ie_start,
+ elems->total_len,
+ GFP_ATOMIC);
+ return NULL;
+ }
+
sta = mesh_plink_alloc(sdata, addr);
if (!sta)
return NULL;
@@ -387,15 +397,6 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
{
struct sta_info *sta;
- /* Userspace handles peer allocation when security is enabled */
- if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
- cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr,
- elems->ie_start,
- elems->total_len,
- GFP_KERNEL);
- return;
- }
-
rcu_read_lock();
sta = mesh_peer_init(sdata, hw_addr, elems);
if (!sta)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b3b3c264ff66..04c306308987 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1522,6 +1522,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
* anymore. The timeout will be reset if the frame is ACKed by
* the AP.
*/
+ ifmgd->probe_send_count++;
+
if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
ifmgd->nullfunc_failed = false;
ieee80211_send_nullfunc(sdata->local, sdata, 0);
@@ -1538,7 +1540,6 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
0, (u32) -1, true, false);
}
- ifmgd->probe_send_count++;
ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
run_again(ifmgd, ifmgd->probe_timeout);
if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 8257a09eeed4..7bcecf73aafb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -204,14 +204,14 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
if (status->flag & RX_FLAG_HT) {
rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
- *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
- IEEE80211_RADIOTAP_MCS_HAVE_GI |
- IEEE80211_RADIOTAP_MCS_HAVE_BW;
+ *pos++ = local->hw.radiotap_mcs_details;
*pos = 0;
if (status->flag & RX_FLAG_SHORT_GI)
*pos |= IEEE80211_RADIOTAP_MCS_SGI;
if (status->flag & RX_FLAG_40MHZ)
*pos |= IEEE80211_RADIOTAP_MCS_BW_40;
+ if (status->flag & RX_FLAG_HT_GF)
+ *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
pos++;
*pos++ = status->rate_idx;
}
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5f827a6b0d8d..847215bb2a6f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -153,7 +153,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
/* Don't calculate ACKs for QoS Frames with NoAck Policy set */
if (ieee80211_is_data_qos(hdr->frame_control) &&
- *(ieee80211_get_qos_ctl(hdr)) | IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
+ *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
dur = 0;
else
/* Time needed to transmit ACK
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 22f2216b397e..a44c6807df01 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1371,6 +1371,12 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
}
+ /* add back keys */
+ list_for_each_entry(sdata, &local->interfaces, list)
+ if (ieee80211_sdata_running(sdata))
+ ieee80211_enable_keys(sdata);
+
+ wake_up:
/*
* Clear the WLAN_STA_BLOCK_BA flag so new aggregation
* sessions can be established after a resume.
@@ -1392,12 +1398,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
mutex_unlock(&local->sta_mtx);
}
- /* add back keys */
- list_for_each_entry(sdata, &local->interfaces, list)
- if (ieee80211_sdata_running(sdata))
- ieee80211_enable_keys(sdata);
-
- wake_up:
ieee80211_wake_queues_by_reason(hw,
IEEE80211_QUEUE_STOP_REASON_SUSPEND);
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 7aa31bbfaa3b..c04d401dae92 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -92,6 +92,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
int keylen, int keyidx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
unsigned int hdrlen;
u8 *newhdr;
@@ -104,6 +105,13 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
hdrlen = ieee80211_hdrlen(hdr->frame_control);
newhdr = skb_push(skb, WEP_IV_LEN);
memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen);
+
+ /* the HW only needs room for the IV, but not the actual IV */
+ if (info->control.hw_key &&
+ (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
+ return newhdr + hdrlen;
+
+ skb_set_network_header(skb, skb_network_offset(skb) + WEP_IV_LEN);
ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen);
return newhdr + hdrlen;
}
@@ -313,14 +321,15 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *hw_key = info->control.hw_key;
- if (!info->control.hw_key) {
+ if (!hw_key) {
if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key,
tx->key->conf.keylen,
tx->key->conf.keyidx))
return -1;
- } else if (info->control.hw_key->flags &
- IEEE80211_KEY_FLAG_GENERATE_IV) {
+ } else if ((hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
if (!ieee80211_wep_add_iv(tx->local, skb,
tx->key->conf.keylen,
tx->key->conf.keyidx))
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 0ae23c60968c..bdb53aba888e 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -183,7 +183,8 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
u8 *pos;
if (info->control.hw_key &&
- !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+ !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
+ !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
/* hwaccel - with no need for software-generated IV */
return 0;
}
@@ -202,8 +203,14 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
pos = skb_push(skb, TKIP_IV_LEN);
memmove(pos, pos + TKIP_IV_LEN, hdrlen);
+ skb_set_network_header(skb, skb_network_offset(skb) + TKIP_IV_LEN);
pos += hdrlen;
+ /* the HW only needs room for the IV, but not the actual IV */
+ if (info->control.hw_key &&
+ (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
+ return 0;
+
/* Increase IV for the frame */
spin_lock_irqsave(&key->u.tkip.txlock, flags);
key->u.tkip.tx.iv16++;
@@ -422,6 +429,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
pos = skb_push(skb, CCMP_HDR_LEN);
memmove(pos, pos + CCMP_HDR_LEN, hdrlen);
+ skb_set_network_header(skb, skb_network_offset(skb) + CCMP_HDR_LEN);
/* the HW only needs room for the IV, but not the actual IV */
if (info->control.hw_key &&
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 8340ace837f2..2cc7c1ee7690 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -836,7 +836,7 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
#ifdef CONFIG_MODULES
if (res == NULL) {
genl_unlock();
- request_module("net-pf-%d-proto-%d-type-%s",
+ request_module("net-pf-%d-proto-%d-family-%s",
PF_NETLINK, NETLINK_GENERIC, name);
genl_lock();
res = genl_family_find_byname(name);
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 3192c3f589ee..9f6ce011d35d 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -97,7 +97,7 @@ int nfc_dev_down(struct nfc_dev *dev)
goto error;
}
- if (dev->polling || dev->activated_target_idx != NFC_TARGET_IDX_NONE) {
+ if (dev->polling || dev->active_target) {
rc = -EBUSY;
goto error;
}
@@ -183,11 +183,27 @@ error:
return rc;
}
+static struct nfc_target *nfc_find_target(struct nfc_dev *dev, u32 target_idx)
+{
+ int i;
+
+ if (dev->n_targets == 0)
+ return NULL;
+
+ for (i = 0; i < dev->n_targets ; i++) {
+ if (dev->targets[i].idx == target_idx)
+ return &dev->targets[i];
+ }
+
+ return NULL;
+}
+
int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
{
int rc = 0;
u8 *gb;
size_t gb_len;
+ struct nfc_target *target;
pr_debug("dev_name=%s comm %d\n", dev_name(&dev->dev), comm_mode);
@@ -212,9 +228,15 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
goto error;
}
- rc = dev->ops->dep_link_up(dev, target_index, comm_mode, gb, gb_len);
+ target = nfc_find_target(dev, target_index);
+ if (target == NULL) {
+ rc = -ENOTCONN;
+ goto error;
+ }
+
+ rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len);
if (!rc)
- dev->activated_target_idx = target_index;
+ dev->active_target = target;
error:
device_unlock(&dev->dev);
@@ -250,7 +272,7 @@ int nfc_dep_link_down(struct nfc_dev *dev)
rc = dev->ops->dep_link_down(dev);
if (!rc) {
dev->dep_link_up = false;
- dev->activated_target_idx = NFC_TARGET_IDX_NONE;
+ dev->active_target = NULL;
nfc_llcp_mac_is_down(dev);
nfc_genl_dep_link_down_event(dev);
}
@@ -282,6 +304,7 @@ EXPORT_SYMBOL(nfc_dep_link_is_up);
int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
{
int rc;
+ struct nfc_target *target;
pr_debug("dev_name=%s target_idx=%u protocol=%u\n",
dev_name(&dev->dev), target_idx, protocol);
@@ -293,9 +316,20 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
goto error;
}
- rc = dev->ops->activate_target(dev, target_idx, protocol);
+ if (dev->active_target) {
+ rc = -EBUSY;
+ goto error;
+ }
+
+ target = nfc_find_target(dev, target_idx);
+ if (target == NULL) {
+ rc = -ENOTCONN;
+ goto error;
+ }
+
+ rc = dev->ops->activate_target(dev, target, protocol);
if (!rc) {
- dev->activated_target_idx = target_idx;
+ dev->active_target = target;
if (dev->ops->check_presence)
mod_timer(&dev->check_pres_timer, jiffies +
@@ -327,11 +361,21 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx)
goto error;
}
+ if (dev->active_target == NULL) {
+ rc = -ENOTCONN;
+ goto error;
+ }
+
+ if (dev->active_target->idx != target_idx) {
+ rc = -ENOTCONN;
+ goto error;
+ }
+
if (dev->ops->check_presence)
del_timer_sync(&dev->check_pres_timer);
- dev->ops->deactivate_target(dev, target_idx);
- dev->activated_target_idx = NFC_TARGET_IDX_NONE;
+ dev->ops->deactivate_target(dev, dev->active_target);
+ dev->active_target = NULL;
error:
device_unlock(&dev->dev);
@@ -365,13 +409,13 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
goto error;
}
- if (dev->activated_target_idx == NFC_TARGET_IDX_NONE) {
+ if (dev->active_target == NULL) {
rc = -ENOTCONN;
kfree_skb(skb);
goto error;
}
- if (target_idx != dev->activated_target_idx) {
+ if (dev->active_target->idx != target_idx) {
rc = -EADDRNOTAVAIL;
kfree_skb(skb);
goto error;
@@ -380,7 +424,8 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
if (dev->ops->check_presence)
del_timer_sync(&dev->check_pres_timer);
- rc = dev->ops->data_exchange(dev, target_idx, skb, cb, cb_context);
+ rc = dev->ops->data_exchange(dev, dev->active_target, skb, cb,
+ cb_context);
if (!rc && dev->ops->check_presence)
mod_timer(&dev->check_pres_timer, jiffies +
@@ -456,6 +501,9 @@ EXPORT_SYMBOL(nfc_alloc_recv_skb);
* The device driver must call this function when one or many nfc targets
* are found. After calling this function, the device driver must stop
* polling for targets.
+ * IMPORTANT: this function must not be called from an atomic context.
+ * In addition, it must also not be called from a context that would prevent
+ * the NFC Core to call other nfc ops entry point concurrently.
*/
int nfc_targets_found(struct nfc_dev *dev,
struct nfc_target *targets, int n_targets)
@@ -469,7 +517,7 @@ int nfc_targets_found(struct nfc_dev *dev,
for (i = 0; i < n_targets; i++)
targets[i].idx = dev->target_next_idx++;
- spin_lock_bh(&dev->targets_lock);
+ device_lock(&dev->dev);
dev->targets_generation++;
@@ -479,12 +527,12 @@ int nfc_targets_found(struct nfc_dev *dev,
if (!dev->targets) {
dev->n_targets = 0;
- spin_unlock_bh(&dev->targets_lock);
+ device_unlock(&dev->dev);
return -ENOMEM;
}
dev->n_targets = n_targets;
- spin_unlock_bh(&dev->targets_lock);
+ device_unlock(&dev->dev);
nfc_genl_targets_found(dev);
@@ -492,6 +540,18 @@ int nfc_targets_found(struct nfc_dev *dev,
}
EXPORT_SYMBOL(nfc_targets_found);
+/**
+ * nfc_target_lost - inform that an activated target went out of field
+ *
+ * @dev: The nfc device that had the activated target in field
+ * @target_idx: the nfc index of the target
+ *
+ * The device driver must call this function when the activated target
+ * goes out of the field.
+ * IMPORTANT: this function must not be called from an atomic context.
+ * In addition, it must also not be called from a context that would prevent
+ * the NFC Core to call other nfc ops entry point concurrently.
+ */
int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
{
struct nfc_target *tg;
@@ -499,7 +559,7 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
pr_debug("dev_name %s n_target %d\n", dev_name(&dev->dev), target_idx);
- spin_lock_bh(&dev->targets_lock);
+ device_lock(&dev->dev);
for (i = 0; i < dev->n_targets; i++) {
tg = &dev->targets[i];
@@ -508,13 +568,13 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
}
if (i == dev->n_targets) {
- spin_unlock_bh(&dev->targets_lock);
+ device_unlock(&dev->dev);
return -EINVAL;
}
dev->targets_generation++;
dev->n_targets--;
- dev->activated_target_idx = NFC_TARGET_IDX_NONE;
+ dev->active_target = NULL;
if (dev->n_targets) {
memcpy(&dev->targets[i], &dev->targets[i + 1],
@@ -524,7 +584,7 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
dev->targets = NULL;
}
- spin_unlock_bh(&dev->targets_lock);
+ device_unlock(&dev->dev);
nfc_genl_target_lost(dev, target_idx);
@@ -556,15 +616,16 @@ static void nfc_check_pres_work(struct work_struct *work)
device_lock(&dev->dev);
- if (dev->activated_target_idx != NFC_TARGET_IDX_NONE &&
- timer_pending(&dev->check_pres_timer) == 0) {
- rc = dev->ops->check_presence(dev, dev->activated_target_idx);
+ if (dev->active_target && timer_pending(&dev->check_pres_timer) == 0) {
+ rc = dev->ops->check_presence(dev, dev->active_target);
if (!rc) {
mod_timer(&dev->check_pres_timer, jiffies +
msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
} else {
- nfc_target_lost(dev, dev->activated_target_idx);
- dev->activated_target_idx = NFC_TARGET_IDX_NONE;
+ u32 active_target_idx = dev->active_target->idx;
+ device_unlock(&dev->dev);
+ nfc_target_lost(dev, active_target_idx);
+ return;
}
}
@@ -637,14 +698,12 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
dev->tx_headroom = tx_headroom;
dev->tx_tailroom = tx_tailroom;
- spin_lock_init(&dev->targets_lock);
nfc_genl_data_init(&dev->genl_data);
+
/* first generation must not be 0 */
dev->targets_generation = 1;
- dev->activated_target_idx = NFC_TARGET_IDX_NONE;
-
if (ops->check_presence) {
char name[32];
init_timer(&dev->check_pres_timer);
@@ -662,7 +721,6 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
}
}
-
return dev;
}
EXPORT_SYMBOL(nfc_allocate_device);
diff --git a/net/nfc/hci/Kconfig b/net/nfc/hci/Kconfig
index 17213a6362b4..fd67f51d18e9 100644
--- a/net/nfc/hci/Kconfig
+++ b/net/nfc/hci/Kconfig
@@ -9,6 +9,7 @@ config NFC_HCI
config NFC_SHDLC
depends on NFC_HCI
+ select CRC_CCITT
bool "SHDLC link layer for HCI based NFC drivers"
default n
---help---
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 86fd00d5a099..e1a640d2b588 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -235,13 +235,6 @@ static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
targets->hci_reader_gate = gate;
r = nfc_targets_found(hdev->ndev, targets, 1);
- if (r < 0)
- goto exit;
-
- kfree(hdev->targets);
- hdev->targets = targets;
- targets = NULL;
- hdev->target_count = 1;
exit:
kfree(targets);
@@ -258,11 +251,6 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
switch (event) {
case NFC_HCI_EVT_TARGET_DISCOVERED:
- if (hdev->poll_started == false) {
- r = -EPROTO;
- goto exit;
- }
-
if (skb->len < 1) { /* no status data? */
r = -EPROTO;
goto exit;
@@ -496,74 +484,42 @@ static int hci_dev_down(struct nfc_dev *nfc_dev)
static int hci_start_poll(struct nfc_dev *nfc_dev, u32 protocols)
{
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
- int r;
if (hdev->ops->start_poll)
- r = hdev->ops->start_poll(hdev, protocols);
+ return hdev->ops->start_poll(hdev, protocols);
else
- r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
- if (r == 0)
- hdev->poll_started = true;
-
- return r;
}
static void hci_stop_poll(struct nfc_dev *nfc_dev)
{
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
- if (hdev->poll_started) {
- nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
- NFC_HCI_EVT_END_OPERATION, NULL, 0);
- hdev->poll_started = false;
- }
-}
-
-static struct nfc_target *hci_find_target(struct nfc_hci_dev *hdev,
- u32 target_idx)
-{
- int i;
- if (hdev->poll_started == false || hdev->targets == NULL)
- return NULL;
-
- for (i = 0; i < hdev->target_count; i++) {
- if (hdev->targets[i].idx == target_idx)
- return &hdev->targets[i];
- }
-
- return NULL;
+ nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
}
-static int hci_activate_target(struct nfc_dev *nfc_dev, u32 target_idx,
- u32 protocol)
+static int hci_activate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target, u32 protocol)
{
- struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
-
- if (hci_find_target(hdev, target_idx) == NULL)
- return -ENOMEDIUM;
-
return 0;
}
-static void hci_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx)
+static void hci_deactivate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target)
{
}
-static int hci_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
+static int hci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
struct sk_buff *skb, data_exchange_cb_t cb,
void *cb_context)
{
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
int r;
- struct nfc_target *target;
struct sk_buff *res_skb = NULL;
- pr_debug("target_idx=%d\n", target_idx);
-
- target = hci_find_target(hdev, target_idx);
- if (target == NULL)
- return -ENOMEDIUM;
+ pr_debug("target_idx=%d\n", target->idx);
switch (target->hci_reader_gate) {
case NFC_HCI_RF_READER_A_GATE:
@@ -605,7 +561,18 @@ static int hci_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
return 0;
}
-struct nfc_ops hci_nfc_ops = {
+static int hci_check_presence(struct nfc_dev *nfc_dev,
+ struct nfc_target *target)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (hdev->ops->check_presence)
+ return hdev->ops->check_presence(hdev, target);
+
+ return 0;
+}
+
+static struct nfc_ops hci_nfc_ops = {
.dev_up = hci_dev_up,
.dev_down = hci_dev_down,
.start_poll = hci_start_poll,
@@ -613,6 +580,7 @@ struct nfc_ops hci_nfc_ops = {
.activate_target = hci_activate_target,
.deactivate_target = hci_deactivate_target,
.data_exchange = hci_data_exchange,
+ .check_presence = hci_check_presence,
};
struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
diff --git a/net/nfc/hci/shdlc.c b/net/nfc/hci/shdlc.c
index 923bdf7c26d6..5665dc6d893a 100644
--- a/net/nfc/hci/shdlc.c
+++ b/net/nfc/hci/shdlc.c
@@ -816,6 +816,17 @@ static int nfc_shdlc_data_exchange(struct nfc_hci_dev *hdev,
return -EPERM;
}
+static int nfc_shdlc_check_presence(struct nfc_hci_dev *hdev,
+ struct nfc_target *target)
+{
+ struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+
+ if (shdlc->ops->check_presence)
+ return shdlc->ops->check_presence(shdlc, target);
+
+ return 0;
+}
+
static struct nfc_hci_ops shdlc_ops = {
.open = nfc_shdlc_open,
.close = nfc_shdlc_close,
@@ -825,6 +836,7 @@ static struct nfc_hci_ops shdlc_ops = {
.target_from_gate = nfc_shdlc_target_from_gate,
.complete_target_discovered = nfc_shdlc_complete_target_discovered,
.data_exchange = nfc_shdlc_data_exchange,
+ .check_presence = nfc_shdlc_check_presence,
};
struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index 11a3b7d98dc5..bf8ae4f0b90c 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -488,7 +488,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
memcpy(skb_put(pdu, frag_len), msg_ptr, frag_len);
- skb_queue_head(&sock->tx_queue, pdu);
+ skb_queue_tail(&sock->tx_queue, pdu);
lock_sock(sk);
@@ -502,7 +502,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
kfree(msg_data);
- return 0;
+ return len;
}
int nfc_llcp_send_rr(struct nfc_llcp_sock *sock)
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 92988aa620dc..42994fac26d6 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -448,6 +448,8 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
{
struct nfc_llcp_sock *sock, *llcp_sock, *n;
+ pr_debug("ssap dsap %d %d\n", ssap, dsap);
+
if (ssap == 0 && dsap == 0)
return NULL;
@@ -783,6 +785,7 @@ static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
{
struct nfc_llcp_sock *llcp_sock;
+ struct sock *sk;
u8 dsap, ssap;
dsap = nfc_llcp_dsap(skb);
@@ -801,10 +804,14 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
}
llcp_sock->dsap = ssap;
+ sk = &llcp_sock->sk;
nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
skb->len - LLCP_HEADER_SIZE);
+ sk->sk_state = LLCP_CONNECTED;
+ sk->sk_state_change(sk);
+
nfc_llcp_sock_put(llcp_sock);
}
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index c13e02ebdef9..3f339b19d140 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -27,6 +27,42 @@
#include "../nfc.h"
#include "llcp.h"
+static int sock_wait_state(struct sock *sk, int state, unsigned long timeo)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int err = 0;
+
+ pr_debug("sk %p", sk);
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ while (sk->sk_state != state) {
+ if (!timeo) {
+ err = -EINPROGRESS;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ break;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ err = sock_error(sk);
+ if (err)
+ break;
+ }
+
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return err;
+}
+
static struct proto llcp_sock_proto = {
.name = "NFC_LLCP",
.owner = THIS_MODULE,
@@ -304,11 +340,24 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
mask |= POLLERR;
if (!skb_queue_empty(&sk->sk_receive_queue))
- mask |= POLLIN;
+ mask |= POLLIN | POLLRDNORM;
if (sk->sk_state == LLCP_CLOSED)
mask |= POLLHUP;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= POLLRDHUP | POLLIN | POLLRDNORM;
+
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
+ mask |= POLLHUP;
+
+ if (sock_writeable(sk))
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ else
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+
+ pr_debug("mask 0x%x\n", mask);
+
return mask;
}
@@ -462,9 +511,13 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
if (ret)
goto put_dev;
- sk->sk_state = LLCP_CONNECTED;
+ ret = sock_wait_state(sk, LLCP_CONNECTED,
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
+ if (ret)
+ goto put_dev;
release_sock(sk);
+
return 0;
put_dev:
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 8737c2089fdd..d560e6f13072 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -436,16 +436,16 @@ static void nci_stop_poll(struct nfc_dev *nfc_dev)
msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
}
-static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
- __u32 protocol)
+static int nci_activate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target, __u32 protocol)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
struct nci_rf_discover_select_param param;
- struct nfc_target *target = NULL;
+ struct nfc_target *nci_target = NULL;
int i;
int rc = 0;
- pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol);
+ pr_debug("target_idx %d, protocol 0x%x\n", target->idx, protocol);
if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) &&
(atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
@@ -459,25 +459,25 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
}
for (i = 0; i < ndev->n_targets; i++) {
- if (ndev->targets[i].idx == target_idx) {
- target = &ndev->targets[i];
+ if (ndev->targets[i].idx == target->idx) {
+ nci_target = &ndev->targets[i];
break;
}
}
- if (!target) {
+ if (!nci_target) {
pr_err("unable to find the selected target\n");
return -EINVAL;
}
- if (!(target->supported_protocols & (1 << protocol))) {
+ if (!(nci_target->supported_protocols & (1 << protocol))) {
pr_err("target does not support the requested protocol 0x%x\n",
protocol);
return -EINVAL;
}
if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
- param.rf_discovery_id = target->logical_idx;
+ param.rf_discovery_id = nci_target->logical_idx;
if (protocol == NFC_PROTO_JEWEL)
param.rf_protocol = NCI_RF_PROTOCOL_T1T;
@@ -501,11 +501,12 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
return rc;
}
-static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
+static void nci_deactivate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
- pr_debug("target_idx %d\n", target_idx);
+ pr_debug("target_idx %d\n", target->idx);
if (!ndev->target_active_prot) {
pr_err("unable to deactivate target, no active target\n");
@@ -520,14 +521,14 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
}
}
-static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
+static int nci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
struct sk_buff *skb,
data_exchange_cb_t cb, void *cb_context)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
int rc;
- pr_debug("target_idx %d, len %d\n", target_idx, skb->len);
+ pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
if (!ndev->target_active_prot) {
pr_err("unable to exchange data, no active target\n");
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index a0bc326308a5..76c48c5324f8 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -49,7 +49,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
if (cb) {
ndev->data_exchange_cb = NULL;
- ndev->data_exchange_cb_context = 0;
+ ndev->data_exchange_cb_context = NULL;
/* forward skb to nfc core */
cb(cb_context, skb, err);
@@ -200,10 +200,10 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
pr_err("error adding room for accumulated rx data\n");
kfree_skb(skb);
- skb = 0;
+ skb = NULL;
kfree_skb(ndev->rx_data_reassembly);
- ndev->rx_data_reassembly = 0;
+ ndev->rx_data_reassembly = NULL;
err = -ENOMEM;
goto exit;
@@ -216,7 +216,7 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
/* third, free old reassembly */
kfree_skb(ndev->rx_data_reassembly);
- ndev->rx_data_reassembly = 0;
+ ndev->rx_data_reassembly = NULL;
}
if (pbf == NCI_PBF_CONT) {
diff --git a/net/nfc/nci/lib.c b/net/nfc/nci/lib.c
index 6a63e5eb483d..6b7fd26c68d9 100644
--- a/net/nfc/nci/lib.c
+++ b/net/nfc/nci/lib.c
@@ -31,6 +31,7 @@
#include <linux/errno.h>
#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
/* NCI status codes to Unix errno mapping */
int nci_to_errno(__u8 code)
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 99e1632e6aac..cb2646179e5f 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -497,7 +497,7 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
/* drop partial rx data packet */
if (ndev->rx_data_reassembly) {
kfree_skb(ndev->rx_data_reassembly);
- ndev->rx_data_reassembly = 0;
+ ndev->rx_data_reassembly = NULL;
}
/* complete the data exchange transaction, if exists */
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index f1829f6ae9c5..581d419083aa 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -33,7 +33,7 @@ static struct genl_multicast_group nfc_genl_event_mcgrp = {
.name = NFC_GENL_MCAST_EVENT_NAME,
};
-struct genl_family nfc_genl_family = {
+static struct genl_family nfc_genl_family = {
.id = GENL_ID_GENERATE,
.hdrsize = 0,
.name = NFC_GENL_NAME,
@@ -128,7 +128,7 @@ static int nfc_genl_dump_targets(struct sk_buff *skb,
cb->args[1] = (long) dev;
}
- spin_lock_bh(&dev->targets_lock);
+ device_lock(&dev->dev);
cb->seq = dev->targets_generation;
@@ -141,7 +141,7 @@ static int nfc_genl_dump_targets(struct sk_buff *skb,
i++;
}
- spin_unlock_bh(&dev->targets_lock);
+ device_unlock(&dev->dev);
cb->args[0] = i;
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 7d589a81942e..3dd4232ae664 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -84,7 +84,7 @@ static inline int nfc_llcp_set_remote_gb(struct nfc_dev *dev,
return 0;
}
-static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *gb_len)
+static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *gb_len)
{
*gb_len = 0;
return NULL;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index edfaaaf164eb..8d2b3d5a7c21 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -186,8 +186,7 @@ struct rds_ib_device {
struct work_struct free_work;
};
-#define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus)
-#define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device))
+#define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device)
#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
/* bits for i_ack_flags */
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 28b62dbb6d1e..3089de37c433 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -969,16 +969,17 @@ svcauth_gss_set_client(struct svc_rqst *rqstp)
}
static inline int
-gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp, struct rsi *rsip)
+gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp,
+ struct xdr_netobj *out_handle, int *major_status)
{
struct rsc *rsci;
int rc;
- if (rsip->major_status != GSS_S_COMPLETE)
+ if (*major_status != GSS_S_COMPLETE)
return gss_write_null_verf(rqstp);
- rsci = gss_svc_searchbyctx(cd, &rsip->out_handle);
+ rsci = gss_svc_searchbyctx(cd, out_handle);
if (rsci == NULL) {
- rsip->major_status = GSS_S_NO_CONTEXT;
+ *major_status = GSS_S_NO_CONTEXT;
return gss_write_null_verf(rqstp);
}
rc = gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN);
@@ -986,22 +987,13 @@ gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp, struct rsi
return rc;
}
-/*
- * Having read the cred already and found we're in the context
- * initiation case, read the verifier and initiate (or check the results
- * of) upcalls to userspace for help with context initiation. If
- * the upcall results are available, write the verifier and result.
- * Otherwise, drop the request pending an answer to the upcall.
- */
-static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
- struct rpc_gss_wire_cred *gc, __be32 *authp)
+static inline int
+gss_read_verf(struct rpc_gss_wire_cred *gc,
+ struct kvec *argv, __be32 *authp,
+ struct xdr_netobj *in_handle,
+ struct xdr_netobj *in_token)
{
- struct kvec *argv = &rqstp->rq_arg.head[0];
- struct kvec *resv = &rqstp->rq_res.head[0];
struct xdr_netobj tmpobj;
- struct rsi *rsip, rsikey;
- int ret;
- struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
/* Read the verifier; should be NULL: */
*authp = rpc_autherr_badverf;
@@ -1011,24 +1003,67 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
return SVC_DENIED;
if (svc_getnl(argv) != 0)
return SVC_DENIED;
-
/* Martial context handle and token for upcall: */
*authp = rpc_autherr_badcred;
if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0)
return SVC_DENIED;
- memset(&rsikey, 0, sizeof(rsikey));
- if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx))
+ if (dup_netobj(in_handle, &gc->gc_ctx))
return SVC_CLOSE;
*authp = rpc_autherr_badverf;
if (svc_safe_getnetobj(argv, &tmpobj)) {
- kfree(rsikey.in_handle.data);
+ kfree(in_handle->data);
return SVC_DENIED;
}
- if (dup_netobj(&rsikey.in_token, &tmpobj)) {
- kfree(rsikey.in_handle.data);
+ if (dup_netobj(in_token, &tmpobj)) {
+ kfree(in_handle->data);
return SVC_CLOSE;
}
+ return 0;
+}
+
+static inline int
+gss_write_resv(struct kvec *resv, size_t size_limit,
+ struct xdr_netobj *out_handle, struct xdr_netobj *out_token,
+ int major_status, int minor_status)
+{
+ if (resv->iov_len + 4 > size_limit)
+ return -1;
+ svc_putnl(resv, RPC_SUCCESS);
+ if (svc_safe_putnetobj(resv, out_handle))
+ return -1;
+ if (resv->iov_len + 3 * 4 > size_limit)
+ return -1;
+ svc_putnl(resv, major_status);
+ svc_putnl(resv, minor_status);
+ svc_putnl(resv, GSS_SEQ_WIN);
+ if (svc_safe_putnetobj(resv, out_token))
+ return -1;
+ return 0;
+}
+
+/*
+ * Having read the cred already and found we're in the context
+ * initiation case, read the verifier and initiate (or check the results
+ * of) upcalls to userspace for help with context initiation. If
+ * the upcall results are available, write the verifier and result.
+ * Otherwise, drop the request pending an answer to the upcall.
+ */
+static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
+ struct rpc_gss_wire_cred *gc, __be32 *authp)
+{
+ struct kvec *argv = &rqstp->rq_arg.head[0];
+ struct kvec *resv = &rqstp->rq_res.head[0];
+ struct rsi *rsip, rsikey;
+ int ret;
+ struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
+
+ memset(&rsikey, 0, sizeof(rsikey));
+ ret = gss_read_verf(gc, argv, authp,
+ &rsikey.in_handle, &rsikey.in_token);
+ if (ret)
+ return ret;
+
/* Perform upcall, or find upcall result: */
rsip = rsi_lookup(sn->rsi_cache, &rsikey);
rsi_free(&rsikey);
@@ -1040,19 +1075,12 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
ret = SVC_CLOSE;
/* Got an answer to the upcall; use it: */
- if (gss_write_init_verf(sn->rsc_cache, rqstp, rsip))
+ if (gss_write_init_verf(sn->rsc_cache, rqstp,
+ &rsip->out_handle, &rsip->major_status))
goto out;
- if (resv->iov_len + 4 > PAGE_SIZE)
- goto out;
- svc_putnl(resv, RPC_SUCCESS);
- if (svc_safe_putnetobj(resv, &rsip->out_handle))
- goto out;
- if (resv->iov_len + 3 * 4 > PAGE_SIZE)
- goto out;
- svc_putnl(resv, rsip->major_status);
- svc_putnl(resv, rsip->minor_status);
- svc_putnl(resv, GSS_SEQ_WIN);
- if (svc_safe_putnetobj(resv, &rsip->out_token))
+ if (gss_write_resv(resv, PAGE_SIZE,
+ &rsip->out_handle, &rsip->out_token,
+ rsip->major_status, rsip->minor_status))
goto out;
ret = SVC_COMPLETE;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 7fee13b331d1..f56f045778ae 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1286,6 +1286,8 @@ call_reserveresult(struct rpc_task *task)
}
switch (status) {
+ case -ENOMEM:
+ rpc_delay(task, HZ >> 2);
case -EAGAIN: /* woken up; retry */
task->tk_action = call_reserve;
return;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index fd2423991c2d..04040476082e 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -120,7 +120,7 @@ EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall);
/**
* rpc_queue_upcall - queue an upcall message to userspace
- * @inode: inode of upcall pipe on which to queue given message
+ * @pipe: upcall pipe on which to queue given message
* @msg: message to queue
*
* Call with an @inode created by rpc_mkpipe() to queue an upcall.
@@ -819,9 +819,7 @@ static int rpc_rmdir_depopulate(struct dentry *dentry,
* @parent: dentry of directory to create new "pipe" in
* @name: name of pipe
* @private: private data to associate with the pipe, for the caller's use
- * @ops: operations defining the behavior of the pipe: upcall, downcall,
- * release_pipe, open_pipe, and destroy_msg.
- * @flags: rpc_pipe flags
+ * @pipe: &rpc_pipe containing input parameters
*
* Data is made available for userspace to read by calls to
* rpc_queue_upcall(). The actual reads will result in calls to
@@ -943,7 +941,7 @@ struct dentry *rpc_create_client_dir(struct dentry *dentry,
/**
* rpc_remove_client_dir - Remove a directory created with rpc_create_client_dir()
- * @clnt: rpc client
+ * @dentry: dentry for the pipe
*/
int rpc_remove_client_dir(struct dentry *dentry)
{
@@ -1115,7 +1113,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &s_ops;
sb->s_time_gran = 1;
- inode = rpc_get_inode(sb, S_IFDIR | 0755);
+ inode = rpc_get_inode(sb, S_IFDIR | S_IRUGO | S_IXUGO);
sb->s_root = root = d_make_root(inode);
if (!root)
return -ENOMEM;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 78ac39fd9fe7..3c0653439f3d 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -394,6 +394,7 @@ static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
/**
* rpcb_register - set or unset a port registration with the local rpcbind svc
+ * @net: target network namespace
* @prog: RPC program number to bind
* @vers: RPC version number to bind
* @prot: transport protocol to register
@@ -521,6 +522,7 @@ static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn,
/**
* rpcb_v4_register - set or unset a port registration with the local rpcbind
+ * @net: target network namespace
* @program: RPC program number of service to (un)register
* @version: RPC version number of service to (un)register
* @address: address family, IP address, and port to (un)register
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 71ec8530ec8c..6138c925923d 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -347,17 +347,12 @@ static inline int ip_map_update(struct net *net, struct ip_map *ipm,
return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
}
-
-void svcauth_unix_purge(void)
+void svcauth_unix_purge(struct net *net)
{
- struct net *net;
-
- for_each_net(net) {
- struct sunrpc_net *sn;
+ struct sunrpc_net *sn;
- sn = net_generic(net, sunrpc_net_id);
- cache_purge(sn->ip_map_cache);
- }
+ sn = net_generic(net, sunrpc_net_id);
+ cache_purge(sn->ip_map_cache);
}
EXPORT_SYMBOL_GPL(svcauth_unix_purge);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 6fe2dcead150..3c83035cdaa9 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -979,20 +979,21 @@ static void xprt_alloc_slot(struct rpc_task *task)
list_del(&req->rq_list);
goto out_init_req;
}
- req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT);
+ req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
if (!IS_ERR(req))
goto out_init_req;
switch (PTR_ERR(req)) {
case -ENOMEM:
- rpc_delay(task, HZ >> 2);
dprintk("RPC: dynamic allocation of request slot "
"failed! Retrying\n");
+ task->tk_status = -ENOMEM;
break;
case -EAGAIN:
rpc_sleep_on(&xprt->backlog, task, NULL);
dprintk("RPC: waiting for request slot\n");
+ default:
+ task->tk_status = -EAGAIN;
}
- task->tk_status = -EAGAIN;
return;
out_init_req:
task->tk_status = 0;
diff --git a/net/wanrouter/Kconfig b/net/wanrouter/Kconfig
index 61ceae0b9566..a157a2e64e18 100644
--- a/net/wanrouter/Kconfig
+++ b/net/wanrouter/Kconfig
@@ -3,7 +3,7 @@
#
config WAN_ROUTER
- tristate "WAN router"
+ tristate "WAN router (DEPRECATED)"
depends on EXPERIMENTAL
---help---
Wide Area Networks (WANs), such as X.25, frame relay and leased
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 2fcfe0993ca2..884801ac4dd0 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -45,7 +45,7 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
return chan;
}
-int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
+bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 39f2538a46fc..a87d43552974 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -664,7 +664,7 @@ void wiphy_unregister(struct wiphy *wiphy)
mutex_lock(&rdev->devlist_mtx);
__count = rdev->opencount;
mutex_unlock(&rdev->devlist_mtx);
- __count == 0;}));
+ __count == 0; }));
mutex_lock(&rdev->devlist_mtx);
BUG_ON(!list_empty(&rdev->netdev_list));
@@ -776,7 +776,7 @@ static struct device_type wiphy_type = {
.name = "wlan",
};
-static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
+static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
unsigned long state,
void *ndev)
{
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 3ac2dd00d714..8523f3878677 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -445,8 +445,6 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev, int freq,
enum nl80211_channel_type channel_type);
-u16 cfg80211_calculate_bitrate(struct rate_info *rate);
-
int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
const u8 *rates, unsigned int n_rates,
u32 *mask);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index b67b1114e25a..206465dc0cab 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1179,6 +1179,27 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
wdev->iftype == NL80211_IFTYPE_P2P_GO;
}
+static bool nl80211_valid_channel_type(struct genl_info *info,
+ enum nl80211_channel_type *channel_type)
+{
+ enum nl80211_channel_type tmp;
+
+ if (!info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE])
+ return false;
+
+ tmp = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
+ if (tmp != NL80211_CHAN_NO_HT &&
+ tmp != NL80211_CHAN_HT20 &&
+ tmp != NL80211_CHAN_HT40PLUS &&
+ tmp != NL80211_CHAN_HT40MINUS)
+ return false;
+
+ if (channel_type)
+ *channel_type = tmp;
+
+ return true;
+}
+
static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev,
struct genl_info *info)
@@ -1193,15 +1214,9 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
if (!nl80211_can_set_dev_channel(wdev))
return -EOPNOTSUPP;
- if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
- channel_type = nla_get_u32(info->attrs[
- NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
- if (channel_type != NL80211_CHAN_NO_HT &&
- channel_type != NL80211_CHAN_HT20 &&
- channel_type != NL80211_CHAN_HT40PLUS &&
- channel_type != NL80211_CHAN_HT40MINUS)
- return -EINVAL;
- }
+ if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
+ !nl80211_valid_channel_type(info, &channel_type))
+ return -EINVAL;
freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
@@ -2410,10 +2425,16 @@ static int parse_station_flags(struct genl_info *info,
return -EINVAL;
}
- for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++)
- if (flags[flag])
+ for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++) {
+ if (flags[flag]) {
params->sta_flags_set |= (1<<flag);
+ /* no longer support new API additions in old API */
+ if (flag > NL80211_STA_FLAG_MAX_OLD_API)
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -4912,12 +4933,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
enum nl80211_channel_type channel_type;
- channel_type = nla_get_u32(
- info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
- if (channel_type != NL80211_CHAN_NO_HT &&
- channel_type != NL80211_CHAN_HT20 &&
- channel_type != NL80211_CHAN_HT40MINUS &&
- channel_type != NL80211_CHAN_HT40PLUS)
+ if (!nl80211_valid_channel_type(info, &channel_type))
return -EINVAL;
if (channel_type != NL80211_CHAN_NO_HT &&
@@ -5485,15 +5501,9 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
!(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
return -EOPNOTSUPP;
- if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
- channel_type = nla_get_u32(
- info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
- if (channel_type != NL80211_CHAN_NO_HT &&
- channel_type != NL80211_CHAN_HT20 &&
- channel_type != NL80211_CHAN_HT40PLUS &&
- channel_type != NL80211_CHAN_HT40MINUS)
- return -EINVAL;
- }
+ if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
+ !nl80211_valid_channel_type(info, &channel_type))
+ return -EINVAL;
freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
chan = rdev_freq_to_chan(rdev, freq, channel_type);
@@ -5764,12 +5774,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
}
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
- channel_type = nla_get_u32(
- info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
- if (channel_type != NL80211_CHAN_NO_HT &&
- channel_type != NL80211_CHAN_HT20 &&
- channel_type != NL80211_CHAN_HT40PLUS &&
- channel_type != NL80211_CHAN_HT40MINUS)
+ if (!nl80211_valid_channel_type(info, &channel_type))
return -EINVAL;
channel_type_valid = true;
}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 1cd255892a43..55d99466babb 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -879,7 +879,7 @@ u16 cfg80211_calculate_bitrate(struct rate_info *rate)
return rate->legacy;
/* the formula below does only work for MCS values smaller than 32 */
- if (rate->mcs >= 32)
+ if (WARN_ON_ONCE(rate->mcs >= 32))
return 0;
modulation = rate->mcs & 7;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index c53e8f42aa75..ccfbd328a69d 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1921,6 +1921,9 @@ no_transform:
}
ok:
xfrm_pols_put(pols, drop_pols);
+ if (dst && dst->xfrm &&
+ dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
+ dst->flags |= DST_XFRM_TUNNEL;
return dst;
nopol:
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index faea0ec612bf..e5bd60ff48e3 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2382,6 +2382,19 @@ sub process {
}
}
+ if ($line =~ /\bprintk\s*\(\s*KERN_([A-Z]+)/) {
+ my $orig = $1;
+ my $level = lc($orig);
+ $level = "warn" if ($level eq "warning");
+ WARN("PREFER_PR_LEVEL",
+ "Prefer pr_$level(... to printk(KERN_$1, ...\n" . $herecurr);
+ }
+
+ if ($line =~ /\bpr_warning\s*\(/) {
+ WARN("PREFER_PR_LEVEL",
+ "Prefer pr_warn(... to pr_warning(...\n" . $herecurr);
+ }
+
# function brace can't be on same line, except for #defines of do while,
# or if closed on same line
if (($line=~/$Type\s*$Ident\(.*\).*\s{/) and
@@ -2448,6 +2461,13 @@ sub process {
"space prohibited between function name and open parenthesis '('\n" . $herecurr);
}
}
+
+# check for whitespace before a non-naked semicolon
+ if ($line =~ /^\+.*\S\s+;/) {
+ CHK("SPACING",
+ "space prohibited before semicolon\n" . $herecurr);
+ }
+
# Check operator spacing.
if (!($line=~/\#\s*include/)) {
my $ops = qr{
diff --git a/scripts/coccinelle/misc/ifaddr.cocci b/scripts/coccinelle/misc/ifaddr.cocci
new file mode 100644
index 000000000000..3e4089a77000
--- /dev/null
+++ b/scripts/coccinelle/misc/ifaddr.cocci
@@ -0,0 +1,35 @@
+/// the address of a variable or field is non-zero is likely always to bo
+/// non-zero
+///
+// Confidence: High
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: -no_includes -include_headers
+
+virtual org
+virtual report
+virtual context
+
+@r@
+expression x;
+statement S1,S2;
+position p;
+@@
+
+*if@p (&x)
+ S1 else S2
+
+@script:python depends on org@
+p << r.p;
+@@
+
+cocci.print_main("test of a variable/field address",p)
+
+@script:python depends on report@
+p << r.p;
+@@
+
+msg = "ERROR: test of a variable/field address"
+coccilib.report.print_report(p[0],msg)
diff --git a/scripts/coccinelle/misc/noderef.cocci b/scripts/coccinelle/misc/noderef.cocci
new file mode 100644
index 000000000000..c1707214e602
--- /dev/null
+++ b/scripts/coccinelle/misc/noderef.cocci
@@ -0,0 +1,65 @@
+/// sizeof when applied to a pointer typed expression gives the size of
+/// the pointer
+///
+// Confidence: High
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: -no_includes -include_headers
+
+virtual org
+virtual report
+virtual context
+virtual patch
+
+@depends on patch@
+expression *x;
+expression f;
+type T;
+@@
+
+(
+x = <+... sizeof(
+- x
++ *x
+ ) ...+>
+|
+f(...,(T)(x),...,sizeof(
+- x
++ *x
+ ),...)
+|
+f(...,sizeof(x),...,(T)(
+- x
++ *x
+ ),...)
+)
+
+@r depends on !patch@
+expression *x;
+expression f;
+position p;
+type T;
+@@
+
+(
+*x = <+... sizeof@p(x) ...+>
+|
+*f(...,(T)(x),...,sizeof@p(x),...)
+|
+*f(...,sizeof@p(x),...,(T)(x),...)
+)
+
+@script:python depends on org@
+p << r.p;
+@@
+
+cocci.print_main("application of sizeof to pointer",p)
+
+@script:python depends on report@
+p << r.p;
+@@
+
+msg = "ERROR: application of sizeof to pointer"
+coccilib.report.print_report(p[0],msg)
diff --git a/scripts/config b/scripts/config
index a7c7c4b8e957..ed6653ef9702 100755
--- a/scripts/config
+++ b/scripts/config
@@ -107,7 +107,8 @@ while [ "$1" != "" ] ; do
;;
--set-str)
- set_var "CONFIG_$ARG" "CONFIG_$ARG=\"$1\""
+ # sed swallows one level of escaping, so we need double-escaping
+ set_var "CONFIG_$ARG" "CONFIG_$ARG=\"${1//\"/\\\\\"}\""
shift
;;
@@ -124,9 +125,11 @@ while [ "$1" != "" ] ; do
if [ $? != 0 ] ; then
echo undef
else
- V="${V/CONFIG_$ARG=/}"
- V="${V/\"/}"
- echo "$V"
+ V="${V/#CONFIG_$ARG=/}"
+ V="${V/#\"/}"
+ V="${V/%\"/}"
+ V="${V/\\\"/\"}"
+ echo "${V}"
fi
fi
;;
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index f208f900ed3a..0dc4a2c779b1 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -574,8 +574,15 @@ int main(int ac, char **av)
case alldefconfig:
case randconfig:
name = getenv("KCONFIG_ALLCONFIG");
- if (name && !stat(name, &tmpstat)) {
- conf_read_simple(name, S_DEF_USER);
+ if (!name)
+ break;
+ if ((strcmp(name, "") != 0) && (strcmp(name, "1") != 0)) {
+ if (conf_read_simple(name, S_DEF_USER)) {
+ fprintf(stderr,
+ _("*** Can't read seed configuration \"%s\"!\n"),
+ name);
+ exit(1);
+ }
break;
}
switch (input_mode) {
@@ -586,10 +593,13 @@ int main(int ac, char **av)
case randconfig: name = "allrandom.config"; break;
default: break;
}
- if (!stat(name, &tmpstat))
- conf_read_simple(name, S_DEF_USER);
- else if (!stat("all.config", &tmpstat))
- conf_read_simple("all.config", S_DEF_USER);
+ if (conf_read_simple(name, S_DEF_USER) &&
+ conf_read_simple("all.config", S_DEF_USER)) {
+ fprintf(stderr,
+ _("*** KCONFIG_ALLCONFIG set, but no \"%s\" or \"all.config\" file found\n"),
+ name);
+ exit(1);
+ }
break;
default:
break;
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
new file mode 100644
index 000000000000..cd9c6c6bb4c9
--- /dev/null
+++ b/scripts/link-vmlinux.sh
@@ -0,0 +1,221 @@
+#!/bin/sh
+#
+# link vmlinux
+#
+# vmlinux is linked from the objects selected by $(KBUILD_VMLINUX_INIT) and
+# $(KBUILD_VMLINUX_MAIN). Most are built-in.o files from top-level directories
+# in the kernel tree, others are specified in arch/$(ARCH)/Makefile.
+# Ordering when linking is important, and $(KBUILD_VMLINUX_INIT) must be first.
+#
+# vmlinux
+# ^
+# |
+# +-< $(KBUILD_VMLINUX_INIT)
+# | +--< init/version.o + more
+# |
+# +--< $(KBUILD_VMLINUX_MAIN)
+# | +--< drivers/built-in.o mm/built-in.o + more
+# |
+# +-< ${kallsymso} (see description in KALLSYMS section)
+#
+# vmlinux version (uname -v) cannot be updated during normal
+# descending-into-subdirs phase since we do not yet know if we need to
+# update vmlinux.
+# Therefore this step is delayed until just before final link of vmlinux.
+#
+# System.map is generated to document addresses of all kernel symbols
+
+# Error out on error
+set -e
+
+# Nice output in kbuild format
+# Will be supressed by "make -s"
+info()
+{
+ if [ "${quiet}" != "silent_" ]; then
+ printf " %-7s %s\n" ${1} ${2}
+ fi
+}
+
+# Link of vmlinux.o used for section mismatch analysis
+# ${1} output file
+modpost_link()
+{
+ ${LD} ${LDFLAGS} -r -o ${1} ${KBUILD_VMLINUX_INIT} \
+ --start-group ${KBUILD_VMLINUX_MAIN} --end-group
+}
+
+# Link of vmlinux
+# ${1} - optional extra .o files
+# ${2} - output file
+vmlinux_link()
+{
+ local lds="${objtree}/${KBUILD_LDS}"
+
+ if [ "${SRCARCH}" != "um" ]; then
+ ${LD} ${LDFLAGS} ${LDFLAGS_vmlinux} -o ${2} \
+ -T ${lds} ${KBUILD_VMLINUX_INIT} \
+ --start-group ${KBUILD_VMLINUX_MAIN} --end-group ${1}
+ else
+ ${CC} ${CFLAGS_vmlinux} -o ${2} \
+ -Wl,-T,${lds} ${KBUILD_VMLINUX_INIT} \
+ -Wl,--start-group \
+ ${KBUILD_VMLINUX_MAIN} \
+ -Wl,--end-group \
+ -lutil ${1}
+ rm -f linux
+ fi
+}
+
+
+# Create ${2} .o file with all symbols from the ${1} object file
+kallsyms()
+{
+ info KSYM ${2}
+ local kallsymopt;
+
+ if [ -n "${CONFIG_KALLSYMS_ALL}" ]; then
+ kallsymopt=--all-symbols
+ fi
+
+ local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
+ ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
+
+ ${NM} -n ${1} | \
+ scripts/kallsyms ${kallsymopt} | \
+ ${CC} ${aflags} -c -o ${2} -x assembler-with-cpp -
+}
+
+# Create map file with all symbols from ${1}
+# See mksymap for additional details
+mksysmap()
+{
+ ${CONFIG_SHELL} "${srctree}/scripts/mksysmap" ${1} ${2}
+}
+
+sortextable()
+{
+ ${objtree}/scripts/sortextable ${1}
+}
+
+# Delete output files in case of error
+trap cleanup SIGHUP SIGINT SIGQUIT SIGTERM ERR
+cleanup()
+{
+ rm -f .old_version
+ rm -f .tmp_System.map
+ rm -f .tmp_kallsyms*
+ rm -f .tmp_version
+ rm -f .tmp_vmlinux*
+ rm -f System.map
+ rm -f vmlinux
+ rm -f vmlinux.o
+}
+
+#
+#
+# Use "make V=1" to debug this script
+case "${KBUILD_VERBOSE}" in
+*1*)
+ set -x
+ ;;
+esac
+
+if [ "$1" = "clean" ]; then
+ cleanup
+ exit 0
+fi
+
+# We need access to CONFIG_ symbols
+. ./.config
+
+#link vmlinux.o
+info LD vmlinux.o
+modpost_link vmlinux.o
+
+# modpost vmlinux.o to check for section mismatches
+${MAKE} -f "${srctree}/scripts/Makefile.modpost" vmlinux.o
+
+# Update version
+info GEN .version
+if [ ! -r .version ]; then
+ rm -f .version;
+ echo 1 >.version;
+else
+ mv .version .old_version;
+ expr 0$(cat .old_version) + 1 >.version;
+fi;
+
+# final build of init/
+${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
+
+kallsymso=""
+kallsyms_vmlinux=""
+if [ -n "${CONFIG_KALLSYMS}" ]; then
+
+ # kallsyms support
+ # Generate section listing all symbols and add it into vmlinux
+ # It's a three step process:
+ # 1) Link .tmp_vmlinux1 so it has all symbols and sections,
+ # but __kallsyms is empty.
+ # Running kallsyms on that gives us .tmp_kallsyms1.o with
+ # the right size
+ # 2) Link .tmp_vmlinux2 so it now has a __kallsyms section of
+ # the right size, but due to the added section, some
+ # addresses have shifted.
+ # From here, we generate a correct .tmp_kallsyms2.o
+ # 2a) We may use an extra pass as this has been necessary to
+ # woraround some alignment related bugs.
+ # KALLSYMS_EXTRA_PASS=1 is used to trigger this.
+ # 3) The correct ${kallsymso} is linked into the final vmlinux.
+ #
+ # a) Verify that the System.map from vmlinux matches the map from
+ # ${kallsymso}.
+
+ kallsymso=.tmp_kallsyms2.o
+ kallsyms_vmlinux=.tmp_vmlinux2
+
+ # step 1
+ vmlinux_link "" .tmp_vmlinux1
+ kallsyms .tmp_vmlinux1 .tmp_kallsyms1.o
+
+ # step 2
+ vmlinux_link .tmp_kallsyms1.o .tmp_vmlinux2
+ kallsyms .tmp_vmlinux2 .tmp_kallsyms2.o
+
+ # step 2a
+ if [ -n "${KALLSYMS_EXTRA_PASS}" ]; then
+ kallsymso=.tmp_kallsyms3.o
+ kallsyms_vmlinux=.tmp_vmlinux3
+
+ vmlinux_link .tmp_kallsyms2.o .tmp_vmlinux3
+
+ kallsyms .tmp_vmlinux3 .tmp_kallsyms3.o
+ fi
+fi
+
+info LD vmlinux
+vmlinux_link "${kallsymso}" vmlinux
+
+if [ -n "${CONFIG_BUILDTIME_EXTABLE_SORT}" ]; then
+ info SORTEX vmlinux
+ sortextable vmlinux
+fi
+
+info SYSMAP System.map
+mksysmap vmlinux System.map
+
+# step a (see comment above)
+if [ -n "${CONFIG_KALLSYMS}" ]; then
+ mksysmap ${kallsyms_vmlinux} .tmp_System.map
+
+ if ! cmp -s System.map .tmp_System.map; then
+ echo Inconsistent kallsyms data
+ echo echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround
+ cleanup
+ exit 1
+ fi
+fi
+
+# We made a new kernel - delete old version file
+rm -f .old_version
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index eee5f8ed2493..c95fdda58414 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -245,7 +245,7 @@ fi
# Build header package
(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
-(cd $objtree; find .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
+(cd $objtree; find arch/$SRCARCH/include .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
destdir=$kernel_headers_dir/usr/src/linux-headers-$version
mkdir -p "$destdir"
(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
diff --git a/security/keys/compat.c b/security/keys/compat.c
index fab4f8dda6c6..c92d42b021aa 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -38,7 +38,7 @@ long compat_keyctl_instantiate_key_iov(
ret = compat_rw_copy_check_uvector(WRITE, _payload_iov, ioc,
ARRAY_SIZE(iovstack),
- iovstack, &iov, 1);
+ iovstack, &iov);
if (ret < 0)
return ret;
if (ret == 0)
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 2f28126215a2..0f5b3f027299 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -84,7 +84,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
vm = false;
if (_payload) {
ret = -ENOMEM;
- payload = kmalloc(plen, GFP_KERNEL);
+ payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
if (!payload) {
if (plen <= PAGE_SIZE)
goto error2;
@@ -1110,7 +1110,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
goto no_payload;
ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
- ARRAY_SIZE(iovstack), iovstack, &iov, 1);
+ ARRAY_SIZE(iovstack), iovstack, &iov);
if (ret < 0)
return ret;
if (ret == 0)
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index cc3790315d2f..000e75017520 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -93,16 +93,9 @@ static void umh_keys_cleanup(struct subprocess_info *info)
static int call_usermodehelper_keys(char *path, char **argv, char **envp,
struct key *session_keyring, int wait)
{
- gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
- struct subprocess_info *info =
- call_usermodehelper_setup(path, argv, envp, gfp_mask);
-
- if (!info)
- return -ENOMEM;
-
- call_usermodehelper_setfns(info, umh_keys_init, umh_keys_cleanup,
- key_get(session_keyring));
- return call_usermodehelper_exec(info, wait);
+ return call_usermodehelper_fns(path, argv, envp, wait,
+ umh_keys_init, umh_keys_cleanup,
+ key_get(session_keyring));
}
/*
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index faedb1481b24..8f312fa6c282 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -313,9 +313,22 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
snd_pcm_sframes_t hdelta, delta;
unsigned long jdelta;
+ unsigned long curr_jiffies;
+ struct timespec curr_tstamp;
old_hw_ptr = runtime->status->hw_ptr;
+
+ /*
+ * group pointer, time and jiffies reads to allow for more
+ * accurate correlations/corrections.
+ * The values are stored at the end of this routine after
+ * corrections for hw_ptr position
+ */
pos = substream->ops->pointer(substream);
+ curr_jiffies = jiffies;
+ if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
+ snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
+
if (pos == SNDRV_PCM_POS_XRUN) {
xrun(substream);
return -EPIPE;
@@ -343,7 +356,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
delta = runtime->hw_ptr_interrupt + runtime->period_size;
if (delta > new_hw_ptr) {
/* check for double acknowledged interrupts */
- hdelta = jiffies - runtime->hw_ptr_jiffies;
+ hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
if (hdelta > runtime->hw_ptr_buffer_jiffies/2) {
hw_base += runtime->buffer_size;
if (hw_base >= runtime->boundary)
@@ -388,7 +401,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
* Without regular period interrupts, we have to check
* the elapsed time to detect xruns.
*/
- jdelta = jiffies - runtime->hw_ptr_jiffies;
+ jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
goto no_delta_check;
hdelta = jdelta - delta * HZ / runtime->rate;
@@ -430,7 +443,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
if (hdelta < runtime->delay)
goto no_jiffies_check;
hdelta -= runtime->delay;
- jdelta = jiffies - runtime->hw_ptr_jiffies;
+ jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
delta = jdelta /
(((runtime->period_size * HZ) / runtime->rate)
@@ -492,9 +505,9 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
}
runtime->hw_ptr_base = hw_base;
runtime->status->hw_ptr = new_hw_ptr;
- runtime->hw_ptr_jiffies = jiffies;
+ runtime->hw_ptr_jiffies = curr_jiffies;
if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
- snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
+ runtime->status->tstamp = curr_tstamp;
return snd_pcm_update_state(substream, runtime);
}
diff --git a/sound/firewire/cmp.c b/sound/firewire/cmp.c
index 76294f2ae47f..645cb0ba4293 100644
--- a/sound/firewire/cmp.c
+++ b/sound/firewire/cmp.c
@@ -84,7 +84,7 @@ static int pcr_modify(struct cmp_connection *c,
return 0;
io_error:
- cmp_error(c, "transaction failed: %s\n", rcode_string(rcode));
+ cmp_error(c, "transaction failed: %s\n", fw_rcode_string(rcode));
return -EIO;
bus_reset:
diff --git a/sound/firewire/lib.c b/sound/firewire/lib.c
index 4750cea2210e..14eb41498372 100644
--- a/sound/firewire/lib.c
+++ b/sound/firewire/lib.c
@@ -14,32 +14,6 @@
#define ERROR_RETRY_DELAY_MS 5
/**
- * rcode_string - convert a firewire result code to a string
- * @rcode: the result
- */
-const char *rcode_string(unsigned int rcode)
-{
- static const char *const names[] = {
- [RCODE_COMPLETE] = "complete",
- [RCODE_CONFLICT_ERROR] = "conflict error",
- [RCODE_DATA_ERROR] = "data error",
- [RCODE_TYPE_ERROR] = "type error",
- [RCODE_ADDRESS_ERROR] = "address error",
- [RCODE_SEND_ERROR] = "send error",
- [RCODE_CANCELLED] = "cancelled",
- [RCODE_BUSY] = "busy",
- [RCODE_GENERATION] = "generation",
- [RCODE_NO_ACK] = "no ack",
- };
-
- if (rcode < ARRAY_SIZE(names) && names[rcode])
- return names[rcode];
- else
- return "unknown";
-}
-EXPORT_SYMBOL(rcode_string);
-
-/**
* snd_fw_transaction - send a request and wait for its completion
* @unit: the driver's unit on the target device
* @tcode: the transaction code
@@ -71,7 +45,7 @@ int snd_fw_transaction(struct fw_unit *unit, int tcode,
if (rcode_is_permanent_error(rcode) || ++tries >= 3) {
dev_err(&unit->device, "transaction failed: %s\n",
- rcode_string(rcode));
+ fw_rcode_string(rcode));
return -EIO;
}
diff --git a/sound/firewire/lib.h b/sound/firewire/lib.h
index 064f3fd9ab06..aef301476ea9 100644
--- a/sound/firewire/lib.h
+++ b/sound/firewire/lib.h
@@ -8,7 +8,6 @@ struct fw_unit;
int snd_fw_transaction(struct fw_unit *unit, int tcode,
u64 offset, void *buffer, size_t length);
-const char *rcode_string(unsigned int rcode);
/* returns true if retrying the transaction would not make sense */
static inline bool rcode_is_permanent_error(int rcode)
diff --git a/sound/i2c/other/tea575x-tuner.c b/sound/i2c/other/tea575x-tuner.c
index a63faec5e7fd..582aace20ea3 100644
--- a/sound/i2c/other/tea575x-tuner.c
+++ b/sound/i2c/other/tea575x-tuner.c
@@ -375,6 +375,9 @@ int snd_tea575x_init(struct snd_tea575x *tea)
tea->vd.v4l2_dev = tea->v4l2_dev;
tea->vd.ctrl_handler = &tea->ctrl_handler;
set_bit(V4L2_FL_USE_FH_PRIO, &tea->vd.flags);
+ /* disable hw_freq_seek if we can't use it */
+ if (tea->cannot_read_data)
+ v4l2_disable_ioctl(&tea->vd, VIDIOC_S_HW_FREQ_SEEK);
v4l2_ctrl_handler_init(&tea->ctrl_handler, 1);
v4l2_ctrl_new_std(&tea->ctrl_handler, &tea575x_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index eb09a3348325..41ca803a1fff 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2239,24 +2239,50 @@ void snd_hda_ctls_clear(struct hda_codec *codec)
/* pseudo device locking
* toggle card->shutdown to allow/disallow the device access (as a hack)
*/
-static int hda_lock_devices(struct snd_card *card)
+int snd_hda_lock_devices(struct hda_bus *bus)
{
+ struct snd_card *card = bus->card;
+ struct hda_codec *codec;
+
spin_lock(&card->files_lock);
- if (card->shutdown) {
- spin_unlock(&card->files_lock);
- return -EINVAL;
- }
+ if (card->shutdown)
+ goto err_unlock;
card->shutdown = 1;
+ if (!list_empty(&card->ctl_files))
+ goto err_clear;
+
+ list_for_each_entry(codec, &bus->codec_list, list) {
+ int pcm;
+ for (pcm = 0; pcm < codec->num_pcms; pcm++) {
+ struct hda_pcm *cpcm = &codec->pcm_info[pcm];
+ if (!cpcm->pcm)
+ continue;
+ if (cpcm->pcm->streams[0].substream_opened ||
+ cpcm->pcm->streams[1].substream_opened)
+ goto err_clear;
+ }
+ }
spin_unlock(&card->files_lock);
return 0;
+
+ err_clear:
+ card->shutdown = 0;
+ err_unlock:
+ spin_unlock(&card->files_lock);
+ return -EINVAL;
}
+EXPORT_SYMBOL_HDA(snd_hda_lock_devices);
-static void hda_unlock_devices(struct snd_card *card)
+void snd_hda_unlock_devices(struct hda_bus *bus)
{
+ struct snd_card *card = bus->card;
+
+ card = bus->card;
spin_lock(&card->files_lock);
card->shutdown = 0;
spin_unlock(&card->files_lock);
}
+EXPORT_SYMBOL_HDA(snd_hda_unlock_devices);
/**
* snd_hda_codec_reset - Clear all objects assigned to the codec
@@ -2270,26 +2296,12 @@ static void hda_unlock_devices(struct snd_card *card)
*/
int snd_hda_codec_reset(struct hda_codec *codec)
{
- struct snd_card *card = codec->bus->card;
- int i, pcm;
+ struct hda_bus *bus = codec->bus;
+ struct snd_card *card = bus->card;
+ int i;
- if (hda_lock_devices(card) < 0)
- return -EBUSY;
- /* check whether the codec isn't used by any mixer or PCM streams */
- if (!list_empty(&card->ctl_files)) {
- hda_unlock_devices(card);
+ if (snd_hda_lock_devices(bus) < 0)
return -EBUSY;
- }
- for (pcm = 0; pcm < codec->num_pcms; pcm++) {
- struct hda_pcm *cpcm = &codec->pcm_info[pcm];
- if (!cpcm->pcm)
- continue;
- if (cpcm->pcm->streams[0].substream_opened ||
- cpcm->pcm->streams[1].substream_opened) {
- hda_unlock_devices(card);
- return -EBUSY;
- }
- }
/* OK, let it free */
@@ -2298,7 +2310,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
codec->power_on = 0;
codec->power_transition = 0;
codec->power_jiffies = jiffies;
- flush_workqueue(codec->bus->workq);
+ flush_workqueue(bus->workq);
#endif
snd_hda_ctls_clear(codec);
/* relase PCMs */
@@ -2306,7 +2318,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
if (codec->pcm_info[i].pcm) {
snd_device_free(card, codec->pcm_info[i].pcm);
clear_bit(codec->pcm_info[i].device,
- codec->bus->pcm_dev_bits);
+ bus->pcm_dev_bits);
}
}
if (codec->patch_ops.free)
@@ -2331,7 +2343,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
codec->owner = NULL;
/* allow device access again */
- hda_unlock_devices(card);
+ snd_hda_unlock_devices(bus);
return 0;
}
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 54b52819fb47..4fc3960c8591 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -1023,6 +1023,9 @@ void snd_hda_codec_set_power_to_all(struct hda_codec *codec, hda_nid_t fg,
unsigned int power_state,
bool eapd_workaround);
+int snd_hda_lock_devices(struct hda_bus *bus);
+void snd_hda_unlock_devices(struct hda_bus *bus);
+
/*
* power management
*/
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4ab8102f87ea..2b6392be451c 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -53,6 +53,8 @@
#endif
#include <sound/core.h>
#include <sound/initval.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
#include "hda_codec.h"
@@ -175,6 +177,13 @@ MODULE_DESCRIPTION("Intel HDA driver");
#define SFX "hda-intel: "
#endif
+#if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO)
+#ifdef CONFIG_SND_HDA_CODEC_HDMI
+#define SUPPORT_VGA_SWITCHEROO
+#endif
+#endif
+
+
/*
* registers
*/
@@ -472,6 +481,12 @@ struct azx {
unsigned int probing :1; /* codec probing phase */
unsigned int snoop:1;
unsigned int align_buffer_size:1;
+ unsigned int region_requested:1;
+
+ /* VGA-switcheroo setup */
+ unsigned int use_vga_switcheroo:1;
+ unsigned int init_failed:1; /* delayed init failed */
+ unsigned int disabled:1; /* disabled by VGA-switcher */
/* for debugging */
unsigned int last_cmd[AZX_MAX_CODECS];
@@ -538,7 +553,20 @@ enum {
#define AZX_DCAPS_PRESET_CTHDA \
(AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
-static char *driver_short_names[] __devinitdata = {
+/*
+ * VGA-switcher support
+ */
+#ifdef SUPPORT_VGA_SWITCHEROO
+#define DELAYED_INIT_MARK
+#define DELAYED_INITDATA_MARK
+#define use_vga_switcheroo(chip) ((chip)->use_vga_switcheroo)
+#else
+#define DELAYED_INIT_MARK __devinit
+#define DELAYED_INITDATA_MARK __devinitdata
+#define use_vga_switcheroo(chip) 0
+#endif
+
+static char *driver_short_names[] DELAYED_INITDATA_MARK = {
[AZX_DRIVER_ICH] = "HDA Intel",
[AZX_DRIVER_PCH] = "HDA Intel PCH",
[AZX_DRIVER_SCH] = "HDA Intel MID",
@@ -959,6 +987,8 @@ static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
{
struct azx *chip = bus->private_data;
+ if (chip->disabled)
+ return 0;
chip->last_cmd[azx_command_addr(val)] = val;
if (chip->single_cmd)
return azx_single_send_cmd(bus, val);
@@ -971,6 +1001,8 @@ static unsigned int azx_get_response(struct hda_bus *bus,
unsigned int addr)
{
struct azx *chip = bus->private_data;
+ if (chip->disabled)
+ return 0;
if (chip->single_cmd)
return azx_single_get_response(bus, addr);
else
@@ -1236,6 +1268,11 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
spin_lock(&chip->reg_lock);
+ if (chip->disabled) {
+ spin_unlock(&chip->reg_lock);
+ return IRQ_NONE;
+ }
+
status = azx_readl(chip, INTSTS);
if (status == 0) {
spin_unlock(&chip->reg_lock);
@@ -1521,12 +1558,12 @@ static void azx_bus_reset(struct hda_bus *bus)
*/
/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
-static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] __devinitdata = {
+static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] DELAYED_INITDATA_MARK = {
[AZX_DRIVER_NVIDIA] = 8,
[AZX_DRIVER_TERA] = 1,
};
-static int __devinit azx_codec_create(struct azx *chip, const char *model)
+static int DELAYED_INIT_MARK azx_codec_create(struct azx *chip, const char *model)
{
struct hda_bus_template bus_temp;
int c, codecs, err;
@@ -2444,6 +2481,105 @@ static void azx_notifier_unregister(struct azx *chip)
unregister_reboot_notifier(&chip->reboot_notifier);
}
+static int DELAYED_INIT_MARK azx_first_init(struct azx *chip);
+static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip);
+
+static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci);
+
+#ifdef SUPPORT_VGA_SWITCHEROO
+static void azx_vs_set_state(struct pci_dev *pci,
+ enum vga_switcheroo_state state)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct azx *chip = card->private_data;
+ bool disabled;
+
+ if (chip->init_failed)
+ return;
+
+ disabled = (state == VGA_SWITCHEROO_OFF);
+ if (chip->disabled == disabled)
+ return;
+
+ if (!chip->bus) {
+ chip->disabled = disabled;
+ if (!disabled) {
+ snd_printk(KERN_INFO SFX
+ "%s: Start delayed initialization\n",
+ pci_name(chip->pci));
+ if (azx_first_init(chip) < 0 ||
+ azx_probe_continue(chip) < 0) {
+ snd_printk(KERN_ERR SFX
+ "%s: initialization error\n",
+ pci_name(chip->pci));
+ chip->init_failed = true;
+ }
+ }
+ } else {
+ snd_printk(KERN_INFO SFX
+ "%s %s via VGA-switcheroo\n",
+ disabled ? "Disabling" : "Enabling",
+ pci_name(chip->pci));
+ if (disabled) {
+ azx_suspend(pci, PMSG_FREEZE);
+ chip->disabled = true;
+ snd_hda_lock_devices(chip->bus);
+ } else {
+ snd_hda_unlock_devices(chip->bus);
+ chip->disabled = false;
+ azx_resume(pci);
+ }
+ }
+}
+
+static bool azx_vs_can_switch(struct pci_dev *pci)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct azx *chip = card->private_data;
+
+ if (chip->init_failed)
+ return false;
+ if (chip->disabled || !chip->bus)
+ return true;
+ if (snd_hda_lock_devices(chip->bus))
+ return false;
+ snd_hda_unlock_devices(chip->bus);
+ return true;
+}
+
+static void __devinit init_vga_switcheroo(struct azx *chip)
+{
+ struct pci_dev *p = get_bound_vga(chip->pci);
+ if (p) {
+ snd_printk(KERN_INFO SFX
+ "%s: Handle VGA-switcheroo audio client\n",
+ pci_name(chip->pci));
+ chip->use_vga_switcheroo = 1;
+ pci_dev_put(p);
+ }
+}
+
+static const struct vga_switcheroo_client_ops azx_vs_ops = {
+ .set_gpu_state = azx_vs_set_state,
+ .can_switch = azx_vs_can_switch,
+};
+
+static int __devinit register_vga_switcheroo(struct azx *chip)
+{
+ if (!chip->use_vga_switcheroo)
+ return 0;
+ /* FIXME: currently only handling DIS controller
+ * is there any machine with two switchable HDMI audio controllers?
+ */
+ return vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops,
+ VGA_SWITCHEROO_DIS,
+ chip->bus != NULL);
+}
+#else
+#define init_vga_switcheroo(chip) /* NOP */
+#define register_vga_switcheroo(chip) 0
+#endif /* SUPPORT_VGA_SWITCHER */
+
/*
* destructor
*/
@@ -2453,6 +2589,12 @@ static int azx_free(struct azx *chip)
azx_notifier_unregister(chip);
+ if (use_vga_switcheroo(chip)) {
+ if (chip->disabled && chip->bus)
+ snd_hda_unlock_devices(chip->bus);
+ vga_switcheroo_unregister_client(chip->pci);
+ }
+
if (chip->initialized) {
azx_clear_irq_pending(chip);
for (i = 0; i < chip->num_streams; i++)
@@ -2482,7 +2624,8 @@ static int azx_free(struct azx *chip)
mark_pages_wc(chip, &chip->posbuf, false);
snd_dma_free_pages(&chip->posbuf);
}
- pci_release_regions(chip->pci);
+ if (chip->region_requested)
+ pci_release_regions(chip->pci);
pci_disable_device(chip->pci);
kfree(chip->azx_dev);
kfree(chip);
@@ -2496,6 +2639,45 @@ static int azx_dev_free(struct snd_device *device)
}
/*
+ * Check of disabled HDMI controller by vga-switcheroo
+ */
+static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci)
+{
+ struct pci_dev *p;
+
+ /* check only discrete GPU */
+ switch (pci->vendor) {
+ case PCI_VENDOR_ID_ATI:
+ case PCI_VENDOR_ID_AMD:
+ case PCI_VENDOR_ID_NVIDIA:
+ if (pci->devfn == 1) {
+ p = pci_get_domain_bus_and_slot(pci_domain_nr(pci->bus),
+ pci->bus->number, 0);
+ if (p) {
+ if ((p->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ return p;
+ pci_dev_put(p);
+ }
+ }
+ break;
+ }
+ return NULL;
+}
+
+static bool __devinit check_hdmi_disabled(struct pci_dev *pci)
+{
+ bool vga_inactive = false;
+ struct pci_dev *p = get_bound_vga(pci);
+
+ if (p) {
+ if (vga_default_device() && p != vga_default_device())
+ vga_inactive = true;
+ pci_dev_put(p);
+ }
+ return vga_inactive;
+}
+
+/*
* white/black-listing for position_fix
*/
static struct snd_pci_quirk position_fix_list[] __devinitdata = {
@@ -2672,12 +2854,11 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
int dev, unsigned int driver_caps,
struct azx **rchip)
{
- struct azx *chip;
- int i, err;
- unsigned short gcap;
static struct snd_device_ops ops = {
.dev_free = azx_dev_free,
};
+ struct azx *chip;
+ int err;
*rchip = NULL;
@@ -2703,6 +2884,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
chip->dev_index = dev;
INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work);
INIT_LIST_HEAD(&chip->pcm_list);
+ init_vga_switcheroo(chip);
chip->position_fix[0] = chip->position_fix[1] =
check_position_fix(chip, position_fix[dev]);
@@ -2730,6 +2912,53 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
}
}
+ if (check_hdmi_disabled(pci)) {
+ snd_printk(KERN_INFO SFX "VGA controller for %s is disabled\n",
+ pci_name(pci));
+ if (use_vga_switcheroo(chip)) {
+ snd_printk(KERN_INFO SFX "Delaying initialization\n");
+ chip->disabled = true;
+ goto ok;
+ }
+ kfree(chip);
+ pci_disable_device(pci);
+ return -ENXIO;
+ }
+
+ err = azx_first_init(chip);
+ if (err < 0) {
+ azx_free(chip);
+ return err;
+ }
+
+ ok:
+ err = register_vga_switcheroo(chip);
+ if (err < 0) {
+ snd_printk(KERN_ERR SFX
+ "Error registering VGA-switcheroo client\n");
+ azx_free(chip);
+ return err;
+ }
+
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
+ snd_printk(KERN_ERR SFX "Error creating device [card]!\n");
+ azx_free(chip);
+ return err;
+ }
+
+ *rchip = chip;
+ return 0;
+}
+
+static int DELAYED_INIT_MARK azx_first_init(struct azx *chip)
+{
+ int dev = chip->dev_index;
+ struct pci_dev *pci = chip->pci;
+ struct snd_card *card = chip->card;
+ int i, err;
+ unsigned short gcap;
+
#if BITS_PER_LONG != 64
/* Fix up base address on ULI M5461 */
if (chip->driver_type == AZX_DRIVER_ULI) {
@@ -2741,28 +2970,23 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
#endif
err = pci_request_regions(pci, "ICH HD audio");
- if (err < 0) {
- kfree(chip);
- pci_disable_device(pci);
+ if (err < 0)
return err;
- }
+ chip->region_requested = 1;
chip->addr = pci_resource_start(pci, 0);
chip->remap_addr = pci_ioremap_bar(pci, 0);
if (chip->remap_addr == NULL) {
snd_printk(KERN_ERR SFX "ioremap error\n");
- err = -ENXIO;
- goto errout;
+ return -ENXIO;
}
if (chip->msi)
if (pci_enable_msi(pci) < 0)
chip->msi = 0;
- if (azx_acquire_irq(chip, 0) < 0) {
- err = -EBUSY;
- goto errout;
- }
+ if (azx_acquire_irq(chip, 0) < 0)
+ return -EBUSY;
pci_set_master(pci);
synchronize_irq(chip->irq);
@@ -2841,7 +3065,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
GFP_KERNEL);
if (!chip->azx_dev) {
snd_printk(KERN_ERR SFX "cannot malloc azx_dev\n");
- goto errout;
+ return -ENOMEM;
}
for (i = 0; i < chip->num_streams; i++) {
@@ -2851,7 +3075,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
BDL_SIZE, &chip->azx_dev[i].bdl);
if (err < 0) {
snd_printk(KERN_ERR SFX "cannot allocate BDL\n");
- goto errout;
+ return -ENOMEM;
}
mark_pages_wc(chip, &chip->azx_dev[i].bdl, true);
}
@@ -2861,13 +3085,13 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
chip->num_streams * 8, &chip->posbuf);
if (err < 0) {
snd_printk(KERN_ERR SFX "cannot allocate posbuf\n");
- goto errout;
+ return -ENOMEM;
}
mark_pages_wc(chip, &chip->posbuf, true);
/* allocate CORB/RIRB */
err = azx_alloc_cmd_io(chip);
if (err < 0)
- goto errout;
+ return err;
/* initialize streams */
azx_init_stream(chip);
@@ -2879,14 +3103,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
/* codec detection */
if (!chip->codec_mask) {
snd_printk(KERN_ERR SFX "no codecs found!\n");
- err = -ENODEV;
- goto errout;
- }
-
- err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
- if (err <0) {
- snd_printk(KERN_ERR SFX "Error creating device [card]!\n");
- goto errout;
+ return -ENODEV;
}
strcpy(card->driver, "HDA-Intel");
@@ -2896,12 +3113,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
"%s at 0x%lx irq %i",
card->shortname, chip->addr, chip->irq);
- *rchip = chip;
return 0;
-
- errout:
- azx_free(chip);
- return err;
}
static void power_down_all_codecs(struct azx *chip)
@@ -2946,6 +3158,27 @@ static int __devinit azx_probe(struct pci_dev *pci,
goto out_free;
card->private_data = chip;
+ if (!chip->disabled) {
+ err = azx_probe_continue(chip);
+ if (err < 0)
+ goto out_free;
+ }
+
+ pci_set_drvdata(pci, card);
+
+ dev++;
+ return 0;
+
+out_free:
+ snd_card_free(card);
+ return err;
+}
+
+static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip)
+{
+ int dev = chip->dev_index;
+ int err;
+
#ifdef CONFIG_SND_HDA_INPUT_BEEP
chip->beep_mode = beep_mode[dev];
#endif
@@ -2979,25 +3212,26 @@ static int __devinit azx_probe(struct pci_dev *pci,
if (err < 0)
goto out_free;
- err = snd_card_register(card);
+ err = snd_card_register(chip->card);
if (err < 0)
goto out_free;
- pci_set_drvdata(pci, card);
chip->running = 1;
power_down_all_codecs(chip);
azx_notifier_register(chip);
- dev++;
- return err;
+ return 0;
+
out_free:
- snd_card_free(card);
+ chip->init_failed = 1;
return err;
}
static void __devexit azx_remove(struct pci_dev *pci)
{
- snd_card_free(pci_get_drvdata(pci));
+ struct snd_card *card = pci_get_drvdata(pci);
+ if (card)
+ snd_card_free(card);
pci_set_drvdata(pci, NULL);
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ff71dcef08ef..224410e8e9e7 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2368,6 +2368,7 @@ static struct alc_codec_rename_table rename_tbl[] = {
{ 0x10ec0269, 0xffff, 0xa023, "ALC259" },
{ 0x10ec0269, 0xffff, 0x6023, "ALC281X" },
{ 0x10ec0269, 0x00f0, 0x0020, "ALC269VC" },
+ { 0x10ec0269, 0x00f0, 0x0030, "ALC269VD" },
{ 0x10ec0887, 0x00f0, 0x0030, "ALC887-VD" },
{ 0x10ec0888, 0x00f0, 0x0030, "ALC888-VD" },
{ 0x10ec0888, 0xf0f0, 0x3020, "ALC886" },
@@ -5614,6 +5615,7 @@ enum {
ALC269_TYPE_ALC269VA,
ALC269_TYPE_ALC269VB,
ALC269_TYPE_ALC269VC,
+ ALC269_TYPE_ALC269VD,
};
/*
@@ -5625,8 +5627,21 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
static const hda_nid_t alc269_ssids[] = { 0, 0x1b, 0x14, 0x21 };
static const hda_nid_t alc269va_ssids[] = { 0x15, 0x1b, 0x14, 0 };
struct alc_spec *spec = codec->spec;
- const hda_nid_t *ssids = spec->codec_variant == ALC269_TYPE_ALC269VA ?
- alc269va_ssids : alc269_ssids;
+ const hda_nid_t *ssids;
+
+ switch (spec->codec_variant) {
+ case ALC269_TYPE_ALC269VA:
+ case ALC269_TYPE_ALC269VC:
+ ssids = alc269va_ssids;
+ break;
+ case ALC269_TYPE_ALC269VB:
+ case ALC269_TYPE_ALC269VD:
+ ssids = alc269_ssids;
+ break;
+ default:
+ ssids = alc269_ssids;
+ break;
+ }
return alc_parse_auto_config(codec, alc269_ignore, ssids);
}
@@ -5643,6 +5658,11 @@ static void alc269_toggle_power_output(struct hda_codec *codec, int power_up)
static void alc269_shutup(struct hda_codec *codec)
{
+ struct alc_spec *spec = codec->spec;
+
+ if (spec->codec_variant != ALC269_TYPE_ALC269VB)
+ return;
+
if ((alc_get_coef0(codec) & 0x00ff) == 0x017)
alc269_toggle_power_output(codec, 0);
if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
@@ -5654,19 +5674,24 @@ static void alc269_shutup(struct hda_codec *codec)
#ifdef CONFIG_PM
static int alc269_resume(struct hda_codec *codec)
{
- if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
+ struct alc_spec *spec = codec->spec;
+
+ if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
+ (alc_get_coef0(codec) & 0x00ff) == 0x018) {
alc269_toggle_power_output(codec, 0);
msleep(150);
}
codec->patch_ops.init(codec);
- if ((alc_get_coef0(codec) & 0x00ff) == 0x017) {
+ if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
+ (alc_get_coef0(codec) & 0x00ff) == 0x017) {
alc269_toggle_power_output(codec, 1);
msleep(200);
}
- if ((alc_get_coef0(codec) & 0x00ff) == 0x018)
+ if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
+ (alc_get_coef0(codec) & 0x00ff) == 0x018)
alc269_toggle_power_output(codec, 1);
snd_hda_codec_resume_amp(codec);
@@ -6081,6 +6106,9 @@ static int patch_alc269(struct hda_codec *codec)
err = alc_codec_rename(codec, "ALC3202");
spec->codec_variant = ALC269_TYPE_ALC269VC;
break;
+ case 0x0030:
+ spec->codec_variant = ALC269_TYPE_ALC269VD;
+ break;
default:
alc_fix_pll_init(codec, 0x20, 0x04, 15);
}
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index 3cb9aa4299d3..fa4556750451 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/mbus.h>
#include <linux/delay.h>
+#include <linux/clk.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -449,6 +450,14 @@ static __devinit int kirkwood_i2s_dev_probe(struct platform_device *pdev)
priv->burst = data->burst;
+ priv->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "no clock\n");
+ err = PTR_ERR(priv->clk);
+ goto err_ioremap;
+ }
+ clk_prepare_enable(priv->clk);
+
return snd_soc_register_dai(&pdev->dev, &kirkwood_i2s_dai);
err_ioremap:
@@ -466,6 +475,10 @@ static __devexit int kirkwood_i2s_dev_remove(struct platform_device *pdev)
struct kirkwood_dma_data *priv = dev_get_drvdata(&pdev->dev);
snd_soc_unregister_dai(&pdev->dev);
+
+ clk_disable_unprepare(priv->clk);
+ clk_put(priv->clk);
+
iounmap(priv->io);
release_mem_region(priv->mem->start, SZ_16K);
kfree(priv);
diff --git a/sound/soc/kirkwood/kirkwood.h b/sound/soc/kirkwood/kirkwood.h
index 9047436b3937..f9084d83e6bd 100644
--- a/sound/soc/kirkwood/kirkwood.h
+++ b/sound/soc/kirkwood/kirkwood.h
@@ -123,6 +123,7 @@ struct kirkwood_dma_data {
void __iomem *io;
int irq;
int burst;
+ struct clk *clk;
};
#endif
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index 9ccfa5e1c11b..57a2fa751085 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -109,11 +109,12 @@ config SND_OMAP_SOC_OMAP_ABE_TWL6040
- PandaBoard (4430)
- PandaBoardES (4460)
-config SND_OMAP_SOC_OMAP4_HDMI
- tristate "SoC Audio support for Texas Instruments OMAP4 HDMI"
- depends on SND_OMAP_SOC && OMAP4_DSS_HDMI && OMAP2_DSS && ARCH_OMAP4
+config SND_OMAP_SOC_OMAP_HDMI
+ tristate "SoC Audio support for Texas Instruments OMAP HDMI"
+ depends on SND_OMAP_SOC && OMAP4_DSS_HDMI && OMAP2_DSS
select SND_OMAP_SOC_HDMI
select SND_SOC_OMAP_HDMI_CODEC
+ select OMAP4_DSS_HDMI_AUDIO
help
Say Y if you want to add support for SoC HDMI audio on Texas Instruments
OMAP4 chips
diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile
index 1d656bce01d4..0e14dd322565 100644
--- a/sound/soc/omap/Makefile
+++ b/sound/soc/omap/Makefile
@@ -25,7 +25,7 @@ snd-soc-omap3pandora-objs := omap3pandora.o
snd-soc-omap3beagle-objs := omap3beagle.o
snd-soc-zoom2-objs := zoom2.o
snd-soc-igep0020-objs := igep0020.o
-snd-soc-omap4-hdmi-objs := omap4-hdmi-card.o
+snd-soc-omap-hdmi-card-objs := omap-hdmi-card.o
obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o
obj-$(CONFIG_SND_OMAP_SOC_RX51) += snd-soc-rx51.o
@@ -41,4 +41,4 @@ obj-$(CONFIG_SND_OMAP_SOC_OMAP3_PANDORA) += snd-soc-omap3pandora.o
obj-$(CONFIG_SND_OMAP_SOC_OMAP3_BEAGLE) += snd-soc-omap3beagle.o
obj-$(CONFIG_SND_OMAP_SOC_ZOOM2) += snd-soc-zoom2.o
obj-$(CONFIG_SND_OMAP_SOC_IGEP0020) += snd-soc-igep0020.o
-obj-$(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) += snd-soc-omap4-hdmi.o
+obj-$(CONFIG_SND_OMAP_SOC_OMAP_HDMI) += snd-soc-omap-hdmi-card.o
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index e5f44440d1b9..34835e8a9160 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -109,6 +109,47 @@ static void omap_mcbsp_dump_reg(struct omap_mcbsp *mcbsp)
dev_dbg(mcbsp->dev, "***********************\n");
}
+static irqreturn_t omap_mcbsp_irq_handler(int irq, void *dev_id)
+{
+ struct omap_mcbsp *mcbsp = dev_id;
+ u16 irqst;
+
+ irqst = MCBSP_READ(mcbsp, IRQST);
+ dev_dbg(mcbsp->dev, "IRQ callback : 0x%x\n", irqst);
+
+ if (irqst & RSYNCERREN)
+ dev_err(mcbsp->dev, "RX Frame Sync Error!\n");
+ if (irqst & RFSREN)
+ dev_dbg(mcbsp->dev, "RX Frame Sync\n");
+ if (irqst & REOFEN)
+ dev_dbg(mcbsp->dev, "RX End Of Frame\n");
+ if (irqst & RRDYEN)
+ dev_dbg(mcbsp->dev, "RX Buffer Threshold Reached\n");
+ if (irqst & RUNDFLEN)
+ dev_err(mcbsp->dev, "RX Buffer Underflow!\n");
+ if (irqst & ROVFLEN)
+ dev_err(mcbsp->dev, "RX Buffer Overflow!\n");
+
+ if (irqst & XSYNCERREN)
+ dev_err(mcbsp->dev, "TX Frame Sync Error!\n");
+ if (irqst & XFSXEN)
+ dev_dbg(mcbsp->dev, "TX Frame Sync\n");
+ if (irqst & XEOFEN)
+ dev_dbg(mcbsp->dev, "TX End Of Frame\n");
+ if (irqst & XRDYEN)
+ dev_dbg(mcbsp->dev, "TX Buffer threshold Reached\n");
+ if (irqst & XUNDFLEN)
+ dev_err(mcbsp->dev, "TX Buffer Underflow!\n");
+ if (irqst & XOVFLEN)
+ dev_err(mcbsp->dev, "TX Buffer Overflow!\n");
+ if (irqst & XEMPTYEOFEN)
+ dev_dbg(mcbsp->dev, "TX Buffer empty at end of frame\n");
+
+ MCBSP_WRITE(mcbsp, IRQST, irqst);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *dev_id)
{
struct omap_mcbsp *mcbsp_tx = dev_id;
@@ -176,6 +217,10 @@ void omap_mcbsp_config(struct omap_mcbsp *mcbsp,
/* Enable wakeup behavior */
if (mcbsp->pdata->has_wakeup)
MCBSP_WRITE(mcbsp, WAKEUPEN, XRDYEN | RRDYEN);
+
+ /* Enable TX/RX sync error interrupts by default */
+ if (mcbsp->irq)
+ MCBSP_WRITE(mcbsp, IRQEN, RSYNCERREN | XSYNCERREN);
}
/**
@@ -489,23 +534,25 @@ int omap_mcbsp_request(struct omap_mcbsp *mcbsp)
MCBSP_WRITE(mcbsp, SPCR1, 0);
MCBSP_WRITE(mcbsp, SPCR2, 0);
- err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler,
- 0, "McBSP", (void *)mcbsp);
- if (err != 0) {
- dev_err(mcbsp->dev, "Unable to request TX IRQ %d "
- "for McBSP%d\n", mcbsp->tx_irq,
- mcbsp->id);
- goto err_clk_disable;
- }
+ if (mcbsp->irq) {
+ err = request_irq(mcbsp->irq, omap_mcbsp_irq_handler, 0,
+ "McBSP", (void *)mcbsp);
+ if (err != 0) {
+ dev_err(mcbsp->dev, "Unable to request IRQ\n");
+ goto err_clk_disable;
+ }
+ } else {
+ err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler, 0,
+ "McBSP TX", (void *)mcbsp);
+ if (err != 0) {
+ dev_err(mcbsp->dev, "Unable to request TX IRQ\n");
+ goto err_clk_disable;
+ }
- if (mcbsp->rx_irq) {
- err = request_irq(mcbsp->rx_irq,
- omap_mcbsp_rx_irq_handler,
- 0, "McBSP", (void *)mcbsp);
+ err = request_irq(mcbsp->rx_irq, omap_mcbsp_rx_irq_handler, 0,
+ "McBSP RX", (void *)mcbsp);
if (err != 0) {
- dev_err(mcbsp->dev, "Unable to request RX IRQ %d "
- "for McBSP%d\n", mcbsp->rx_irq,
- mcbsp->id);
+ dev_err(mcbsp->dev, "Unable to request RX IRQ\n");
goto err_free_irq;
}
}
@@ -542,9 +589,16 @@ void omap_mcbsp_free(struct omap_mcbsp *mcbsp)
if (mcbsp->pdata->has_wakeup)
MCBSP_WRITE(mcbsp, WAKEUPEN, 0);
- if (mcbsp->rx_irq)
+ /* Disable interrupt requests */
+ if (mcbsp->irq)
+ MCBSP_WRITE(mcbsp, IRQEN, 0);
+
+ if (mcbsp->irq) {
+ free_irq(mcbsp->irq, (void *)mcbsp);
+ } else {
free_irq(mcbsp->rx_irq, (void *)mcbsp);
- free_irq(mcbsp->tx_irq, (void *)mcbsp);
+ free_irq(mcbsp->tx_irq, (void *)mcbsp);
+ }
reg_cache = mcbsp->reg_cache;
@@ -754,7 +808,7 @@ THRESHOLD_PROP_BUILDER(max_tx_thres);
THRESHOLD_PROP_BUILDER(max_rx_thres);
static const char *dma_op_modes[] = {
- "element", "threshold", "frame",
+ "element", "threshold",
};
static ssize_t dma_op_mode_show(struct device *dev,
@@ -949,13 +1003,24 @@ int __devinit omap_mcbsp_init(struct platform_device *pdev)
else
mcbsp->phys_dma_base = res->start;
- mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx");
- mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx");
-
- /* From OMAP4 there will be a single irq line */
- if (mcbsp->tx_irq == -ENXIO) {
- mcbsp->tx_irq = platform_get_irq(pdev, 0);
- mcbsp->rx_irq = 0;
+ /*
+ * OMAP1, 2 uses two interrupt lines: TX, RX
+ * OMAP2430, OMAP3 SoC have combined IRQ line as well.
+ * OMAP4 and newer SoC only have the combined IRQ line.
+ * Use the combined IRQ if available since it gives better debugging
+ * possibilities.
+ */
+ mcbsp->irq = platform_get_irq_byname(pdev, "common");
+ if (mcbsp->irq == -ENXIO) {
+ mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx");
+
+ if (mcbsp->tx_irq == -ENXIO) {
+ mcbsp->irq = platform_get_irq(pdev, 0);
+ mcbsp->tx_irq = 0;
+ } else {
+ mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx");
+ mcbsp->irq = 0;
+ }
}
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
diff --git a/sound/soc/omap/mcbsp.h b/sound/soc/omap/mcbsp.h
index a944fcc9073c..262a6152111f 100644
--- a/sound/soc/omap/mcbsp.h
+++ b/sound/soc/omap/mcbsp.h
@@ -217,17 +217,20 @@ enum {
/********************** McBSP DMA operating modes **************************/
#define MCBSP_DMA_MODE_ELEMENT 0
#define MCBSP_DMA_MODE_THRESHOLD 1
-#define MCBSP_DMA_MODE_FRAME 2
-/********************** McBSP WAKEUPEN bit definitions *********************/
+/********************** McBSP WAKEUPEN/IRQST/IRQEN bit definitions *********/
#define RSYNCERREN BIT(0)
#define RFSREN BIT(1)
#define REOFEN BIT(2)
#define RRDYEN BIT(3)
+#define RUNDFLEN BIT(4)
+#define ROVFLEN BIT(5)
#define XSYNCERREN BIT(7)
#define XFSXEN BIT(8)
#define XEOFEN BIT(9)
#define XRDYEN BIT(10)
+#define XUNDFLEN BIT(11)
+#define XOVFLEN BIT(12)
#define XEMPTYEOFEN BIT(14)
/* Clock signal muxing options */
@@ -295,6 +298,7 @@ struct omap_mcbsp {
int configured;
u8 free;
+ int irq;
int rx_irq;
int tx_irq;
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
index 93bb8eee22b3..9d93793d3077 100644
--- a/sound/soc/omap/omap-abe-twl6040.c
+++ b/sound/soc/omap/omap-abe-twl6040.c
@@ -40,6 +40,11 @@
#include "omap-pcm.h"
#include "../codecs/twl6040.h"
+struct abe_twl6040 {
+ int jack_detection; /* board can detect jack events */
+ int mclk_freq; /* MCLK frequency speed for twl6040 */
+};
+
static int omap_abe_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
@@ -47,13 +52,13 @@ static int omap_abe_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_card *card = codec->card;
- struct omap_abe_twl6040_data *pdata = dev_get_platdata(card->dev);
+ struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
int clk_id, freq;
int ret;
clk_id = twl6040_get_clk_id(rtd->codec);
if (clk_id == TWL6040_SYSCLK_SEL_HPPLL)
- freq = pdata->mclk_freq;
+ freq = priv->mclk_freq;
else if (clk_id == TWL6040_SYSCLK_SEL_LPPLL)
freq = 32768;
else
@@ -128,6 +133,9 @@ static const struct snd_soc_dapm_widget twl6040_dapm_widgets[] = {
SND_SOC_DAPM_MIC("Main Handset Mic", NULL),
SND_SOC_DAPM_MIC("Sub Handset Mic", NULL),
SND_SOC_DAPM_LINE("Line In", NULL),
+
+ /* Digital microphones */
+ SND_SOC_DAPM_MIC("Digital Mic", NULL),
};
static const struct snd_soc_dapm_route audio_map[] = {
@@ -173,6 +181,7 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
struct snd_soc_card *card = codec->card;
struct snd_soc_dapm_context *dapm = &codec->dapm;
struct omap_abe_twl6040_data *pdata = dev_get_platdata(card->dev);
+ struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
int hs_trim;
int ret = 0;
@@ -196,7 +205,7 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
TWL6040_HSF_TRIM_RIGHT(hs_trim));
/* Headset jack detection only if it is supported */
- if (pdata->jack_detection) {
+ if (priv->jack_detection) {
ret = snd_soc_jack_new(codec, "Headset Jack",
SND_JACK_HEADSET, &hs_jack);
if (ret)
@@ -210,10 +219,6 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
-static const struct snd_soc_dapm_widget dmic_dapm_widgets[] = {
- SND_SOC_DAPM_MIC("Digital Mic", NULL),
-};
-
static const struct snd_soc_dapm_route dmic_audio_map[] = {
{"DMic", NULL, "Digital Mic"},
{"Digital Mic", NULL, "Digital Mic1 Bias"},
@@ -223,19 +228,13 @@ static int omap_abe_dmic_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
- int ret;
-
- ret = snd_soc_dapm_new_controls(dapm, dmic_dapm_widgets,
- ARRAY_SIZE(dmic_dapm_widgets));
- if (ret)
- return ret;
return snd_soc_dapm_add_routes(dapm, dmic_audio_map,
ARRAY_SIZE(dmic_audio_map));
}
/* Digital audio interface glue - connects codec <--> CPU */
-static struct snd_soc_dai_link twl6040_dmic_dai[] = {
+static struct snd_soc_dai_link abe_twl6040_dai_links[] = {
{
.name = "TWL6040",
.stream_name = "TWL6040",
@@ -258,19 +257,6 @@ static struct snd_soc_dai_link twl6040_dmic_dai[] = {
},
};
-static struct snd_soc_dai_link twl6040_only_dai[] = {
- {
- .name = "TWL6040",
- .stream_name = "TWL6040",
- .cpu_dai_name = "omap-mcpdm",
- .codec_dai_name = "twl6040-legacy",
- .platform_name = "omap-pcm-audio",
- .codec_name = "twl6040-codec",
- .init = omap_abe_twl6040_init,
- .ops = &omap_abe_ops,
- },
-};
-
/* Audio machine driver */
static struct snd_soc_card omap_abe_card = {
.owner = THIS_MODULE,
@@ -285,6 +271,8 @@ static __devinit int omap_abe_probe(struct platform_device *pdev)
{
struct omap_abe_twl6040_data *pdata = dev_get_platdata(&pdev->dev);
struct snd_soc_card *card = &omap_abe_card;
+ struct abe_twl6040 *priv;
+ int num_links = 0;
int ret;
card->dev = &pdev->dev;
@@ -294,6 +282,10 @@ static __devinit int omap_abe_probe(struct platform_device *pdev)
return -ENODEV;
}
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+
if (pdata->card_name) {
card->name = pdata->card_name;
} else {
@@ -301,18 +293,24 @@ static __devinit int omap_abe_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (!pdata->mclk_freq) {
+ priv->jack_detection = pdata->jack_detection;
+ priv->mclk_freq = pdata->mclk_freq;
+
+
+ if (!priv->mclk_freq) {
dev_err(&pdev->dev, "MCLK frequency missing\n");
return -ENODEV;
}
- if (pdata->has_dmic) {
- card->dai_link = twl6040_dmic_dai;
- card->num_links = ARRAY_SIZE(twl6040_dmic_dai);
- } else {
- card->dai_link = twl6040_only_dai;
- card->num_links = ARRAY_SIZE(twl6040_only_dai);
- }
+ if (pdata->has_dmic)
+ num_links = 2;
+ else
+ num_links = 1;
+
+ card->dai_link = abe_twl6040_dai_links;
+ card->num_links = num_links;
+
+ snd_soc_card_set_drvdata(card, priv);
ret = snd_soc_register_card(card);
if (ret)
diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
index 4dcb5a7e40e8..75f5dca0e8d2 100644
--- a/sound/soc/omap/omap-dmic.c
+++ b/sound/soc/omap/omap-dmic.c
@@ -32,6 +32,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
#include <plat/dma.h>
#include <sound/core.h>
@@ -528,10 +529,17 @@ static int __devexit asoc_dmic_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id omap_dmic_of_match[] = {
+ { .compatible = "ti,omap4-dmic", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, omap_dmic_of_match);
+
static struct platform_driver asoc_dmic_driver = {
.driver = {
.name = "omap-dmic",
.owner = THIS_MODULE,
+ .of_match_table = omap_dmic_of_match,
},
.probe = asoc_dmic_probe,
.remove = __devexit_p(asoc_dmic_remove),
diff --git a/sound/soc/omap/omap-hdmi-card.c b/sound/soc/omap/omap-hdmi-card.c
new file mode 100644
index 000000000000..eaa2ea0e3f81
--- /dev/null
+++ b/sound/soc/omap/omap-hdmi-card.c
@@ -0,0 +1,87 @@
+/*
+ * omap-hdmi-card.c
+ *
+ * OMAP ALSA SoC machine driver for TI OMAP HDMI
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Ricardo Neri <ricardo.neri@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <asm/mach-types.h>
+#include <video/omapdss.h>
+
+#define DRV_NAME "omap-hdmi-audio"
+
+static struct snd_soc_dai_link omap_hdmi_dai = {
+ .name = "HDMI",
+ .stream_name = "HDMI",
+ .cpu_dai_name = "omap-hdmi-audio-dai",
+ .platform_name = "omap-pcm-audio",
+ .codec_name = "hdmi-audio-codec",
+ .codec_dai_name = "omap-hdmi-hifi",
+};
+
+static struct snd_soc_card snd_soc_omap_hdmi = {
+ .name = "OMAPHDMI",
+ .owner = THIS_MODULE,
+ .dai_link = &omap_hdmi_dai,
+ .num_links = 1,
+};
+
+static __devinit int omap_hdmi_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &snd_soc_omap_hdmi;
+ int ret;
+
+ card->dev = &pdev->dev;
+
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+ card->dev = NULL;
+ return ret;
+ }
+ return 0;
+}
+
+static int __devexit omap_hdmi_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ snd_soc_unregister_card(card);
+ card->dev = NULL;
+ return 0;
+}
+
+static struct platform_driver omap_hdmi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = omap_hdmi_probe,
+ .remove = __devexit_p(omap_hdmi_remove),
+};
+
+module_platform_driver(omap_hdmi_driver);
+
+MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>");
+MODULE_DESCRIPTION("OMAP HDMI machine ASoC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/omap/omap-hdmi.c b/sound/soc/omap/omap-hdmi.c
index 38e0defa7078..a08245d9203c 100644
--- a/sound/soc/omap/omap-hdmi.c
+++ b/sound/soc/omap/omap-hdmi.c
@@ -30,21 +30,28 @@
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
+#include <sound/asound.h>
+#include <sound/asoundef.h>
+#include <video/omapdss.h>
#include <plat/dma.h>
#include "omap-pcm.h"
#include "omap-hdmi.h"
-#define DRV_NAME "hdmi-audio-dai"
+#define DRV_NAME "omap-hdmi-audio-dai"
-static struct omap_pcm_dma_data omap_hdmi_dai_dma_params = {
- .name = "HDMI playback",
- .sync_mode = OMAP_DMA_SYNC_PACKET,
+struct hdmi_priv {
+ struct omap_pcm_dma_data dma_params;
+ struct omap_dss_audio dss_audio;
+ struct snd_aes_iec958 iec;
+ struct snd_cea_861_aud_if cea;
+ struct omap_dss_device *dssdev;
};
static int omap_hdmi_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
+ struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
int err;
/*
* Make sure that the period bytes are multiple of the DMA packet size.
@@ -52,46 +59,201 @@ static int omap_hdmi_dai_startup(struct snd_pcm_substream *substream,
*/
err = snd_pcm_hw_constraint_step(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128);
- if (err < 0)
+ if (err < 0) {
+ dev_err(dai->dev, "could not apply constraint\n");
return err;
+ }
+ if (!priv->dssdev->driver->audio_supported(priv->dssdev)) {
+ dev_err(dai->dev, "audio not supported\n");
+ return -ENODEV;
+ }
return 0;
}
+static int omap_hdmi_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
+
+ return priv->dssdev->driver->audio_enable(priv->dssdev);
+}
+
static int omap_hdmi_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
+ struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
+ struct snd_aes_iec958 *iec = &priv->iec;
+ struct snd_cea_861_aud_if *cea = &priv->cea;
int err = 0;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
- omap_hdmi_dai_dma_params.packet_size = 16;
+ priv->dma_params.packet_size = 16;
break;
case SNDRV_PCM_FORMAT_S24_LE:
- omap_hdmi_dai_dma_params.packet_size = 32;
+ priv->dma_params.packet_size = 32;
break;
default:
- err = -EINVAL;
+ dev_err(dai->dev, "format not supported!\n");
+ return -EINVAL;
}
- omap_hdmi_dai_dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+ priv->dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
snd_soc_dai_set_dma_data(dai, substream,
- &omap_hdmi_dai_dma_params);
+ &priv->dma_params);
+
+ /*
+ * fill the IEC-60958 channel status word
+ */
+
+ /* specify IEC-60958-3 (commercial use) */
+ iec->status[0] &= ~IEC958_AES0_PROFESSIONAL;
+
+ /* specify that the audio is LPCM*/
+ iec->status[0] &= ~IEC958_AES0_NONAUDIO;
+
+ iec->status[0] |= IEC958_AES0_CON_NOT_COPYRIGHT;
+
+ iec->status[0] |= IEC958_AES0_CON_EMPHASIS_NONE;
+
+ iec->status[0] |= IEC958_AES1_PRO_MODE_NOTID;
+
+ iec->status[1] = IEC958_AES1_CON_GENERAL;
+
+ iec->status[2] |= IEC958_AES2_CON_SOURCE_UNSPEC;
+
+ iec->status[2] |= IEC958_AES2_CON_CHANNEL_UNSPEC;
+
+ switch (params_rate(params)) {
+ case 32000:
+ iec->status[3] |= IEC958_AES3_CON_FS_32000;
+ break;
+ case 44100:
+ iec->status[3] |= IEC958_AES3_CON_FS_44100;
+ break;
+ case 48000:
+ iec->status[3] |= IEC958_AES3_CON_FS_48000;
+ break;
+ case 88200:
+ iec->status[3] |= IEC958_AES3_CON_FS_88200;
+ break;
+ case 96000:
+ iec->status[3] |= IEC958_AES3_CON_FS_96000;
+ break;
+ case 176400:
+ iec->status[3] |= IEC958_AES3_CON_FS_176400;
+ break;
+ case 192000:
+ iec->status[3] |= IEC958_AES3_CON_FS_192000;
+ break;
+ default:
+ dev_err(dai->dev, "rate not supported!\n");
+ return -EINVAL;
+ }
+
+ /* specify the clock accuracy */
+ iec->status[3] |= IEC958_AES3_CON_CLOCK_1000PPM;
+
+ /*
+ * specify the word length. The same word length value can mean
+ * two different lengths. Hence, we need to specify the maximum
+ * word length as well.
+ */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ iec->status[4] |= IEC958_AES4_CON_WORDLEN_20_16;
+ iec->status[4] &= ~IEC958_AES4_CON_MAX_WORDLEN_24;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ iec->status[4] |= IEC958_AES4_CON_WORDLEN_24_20;
+ iec->status[4] |= IEC958_AES4_CON_MAX_WORDLEN_24;
+ break;
+ default:
+ dev_err(dai->dev, "format not supported!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Fill the CEA-861 audio infoframe (see spec for details)
+ */
+
+ cea->db1_ct_cc = (params_channels(params) - 1)
+ & CEA861_AUDIO_INFOFRAME_DB1CC;
+ cea->db1_ct_cc |= CEA861_AUDIO_INFOFRAME_DB1CT_FROM_STREAM;
+
+ cea->db2_sf_ss = CEA861_AUDIO_INFOFRAME_DB2SF_FROM_STREAM;
+ cea->db2_sf_ss |= CEA861_AUDIO_INFOFRAME_DB2SS_FROM_STREAM;
+
+ cea->db3 = 0; /* not used, all zeros */
+
+ /*
+ * The OMAP HDMI IP requires to use the 8-channel channel code when
+ * transmitting more than two channels.
+ */
+ if (params_channels(params) == 2)
+ cea->db4_ca = 0x0;
+ else
+ cea->db4_ca = 0x13;
+
+ cea->db5_dminh_lsv = CEA861_AUDIO_INFOFRAME_DB5_DM_INH_PROHIBITED;
+ /* the expression is trivial but makes clear what we are doing */
+ cea->db5_dminh_lsv |= (0 & CEA861_AUDIO_INFOFRAME_DB5_LSV);
+
+ priv->dss_audio.iec = iec;
+ priv->dss_audio.cea = cea;
+
+ err = priv->dssdev->driver->audio_config(priv->dssdev,
+ &priv->dss_audio);
return err;
}
+static int omap_hdmi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
+ int err = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ err = priv->dssdev->driver->audio_start(priv->dssdev);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ priv->dssdev->driver->audio_stop(priv->dssdev);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static void omap_hdmi_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
+
+ priv->dssdev->driver->audio_disable(priv->dssdev);
+}
+
static const struct snd_soc_dai_ops omap_hdmi_dai_ops = {
.startup = omap_hdmi_dai_startup,
.hw_params = omap_hdmi_dai_hw_params,
+ .prepare = omap_hdmi_dai_prepare,
+ .trigger = omap_hdmi_dai_trigger,
+ .shutdown = omap_hdmi_dai_shutdown,
};
static struct snd_soc_dai_driver omap_hdmi_dai = {
.playback = {
.channels_min = 2,
- .channels_max = 2,
+ .channels_max = 8,
.rates = OMAP_HDMI_RATES,
.formats = OMAP_HDMI_FORMATS,
},
@@ -102,31 +264,77 @@ static __devinit int omap_hdmi_probe(struct platform_device *pdev)
{
int ret;
struct resource *hdmi_rsrc;
+ struct hdmi_priv *hdmi_data;
+ bool hdmi_dev_found = false;
+
+ hdmi_data = devm_kzalloc(&pdev->dev, sizeof(*hdmi_data), GFP_KERNEL);
+ if (hdmi_data == NULL) {
+ dev_err(&pdev->dev, "Cannot allocate memory for HDMI data\n");
+ return -ENOMEM;
+ }
hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!hdmi_rsrc) {
dev_err(&pdev->dev, "Cannot obtain IORESOURCE_MEM HDMI\n");
- return -EINVAL;
+ return -ENODEV;
}
- omap_hdmi_dai_dma_params.port_addr = hdmi_rsrc->start
+ hdmi_data->dma_params.port_addr = hdmi_rsrc->start
+ OMAP_HDMI_AUDIO_DMA_PORT;
hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!hdmi_rsrc) {
dev_err(&pdev->dev, "Cannot obtain IORESOURCE_DMA HDMI\n");
- return -EINVAL;
+ return -ENODEV;
}
- omap_hdmi_dai_dma_params.dma_req = hdmi_rsrc->start;
+ hdmi_data->dma_params.dma_req = hdmi_rsrc->start;
+ hdmi_data->dma_params.name = "HDMI playback";
+ hdmi_data->dma_params.sync_mode = OMAP_DMA_SYNC_PACKET;
+
+ /*
+ * TODO: We assume that there is only one DSS HDMI device. Future
+ * OMAP implementations may support more than one HDMI devices and
+ * we should provided separate audio support for all of them.
+ */
+ /* Find an HDMI device. */
+ for_each_dss_dev(hdmi_data->dssdev) {
+ omap_dss_get_device(hdmi_data->dssdev);
+ if (!hdmi_data->dssdev->driver) {
+ omap_dss_put_device(hdmi_data->dssdev);
+ continue;
+ }
+
+ if (hdmi_data->dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
+ hdmi_dev_found = true;
+ break;
+ }
+ }
+
+ if (!hdmi_dev_found) {
+ dev_err(&pdev->dev, "no driver for HDMI display found\n");
+ return -ENODEV;
+ }
+
+ dev_set_drvdata(&pdev->dev, hdmi_data);
ret = snd_soc_register_dai(&pdev->dev, &omap_hdmi_dai);
+
return ret;
}
static int __devexit omap_hdmi_remove(struct platform_device *pdev)
{
+ struct hdmi_priv *hdmi_data = dev_get_drvdata(&pdev->dev);
+
snd_soc_unregister_dai(&pdev->dev);
+
+ if (hdmi_data == NULL) {
+ dev_err(&pdev->dev, "cannot obtain HDMi data\n");
+ return -ENODEV;
+ }
+
+ omap_dss_put_device(hdmi_data->dssdev);
return 0;
}
diff --git a/sound/soc/omap/omap-hdmi.h b/sound/soc/omap/omap-hdmi.h
index 34c298d5057e..6ad2bf4f2697 100644
--- a/sound/soc/omap/omap-hdmi.h
+++ b/sound/soc/omap/omap-hdmi.h
@@ -28,7 +28,9 @@
#define OMAP_HDMI_AUDIO_DMA_PORT 0x8c
#define OMAP_HDMI_RATES (SNDRV_PCM_RATE_32000 | \
- SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
#define OMAP_HDMI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S24_LE)
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 6912ac7cb625..1046083e90a0 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -71,18 +71,17 @@ static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream)
dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
- /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
- if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)
- /*
- * Configure McBSP threshold based on either:
- * packet_size, when the sDMA is in packet mode, or
- * based on the period size.
- */
- if (dma_data->packet_size)
- words = dma_data->packet_size;
- else
- words = snd_pcm_lib_period_bytes(substream) /
- (mcbsp->wlen / 8);
+ /*
+ * Configure McBSP threshold based on either:
+ * packet_size, when the sDMA is in packet mode, or based on the
+ * period size in THRESHOLD mode, otherwise use McBSP threshold = 1
+ * for mono streams.
+ */
+ if (dma_data->packet_size)
+ words = dma_data->packet_size;
+ else if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)
+ words = snd_pcm_lib_period_bytes(substream) /
+ (mcbsp->wlen / 8);
else
words = 1;
@@ -139,13 +138,15 @@ static int omap_mcbsp_dai_startup(struct snd_pcm_substream *substream,
if (mcbsp->pdata->buffer_size) {
/*
* Rule for the buffer size. We should not allow
- * smaller buffer than the FIFO size to avoid underruns
+ * smaller buffer than the FIFO size to avoid underruns.
+ * This applies only for the playback stream.
*/
- snd_pcm_hw_rule_add(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
- omap_mcbsp_hwrule_min_buffersize,
- mcbsp,
- SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ omap_mcbsp_hwrule_min_buffersize,
+ mcbsp,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
/* Make sure, that the period size is always even */
snd_pcm_hw_constraint_step(substream->runtime, 0,
@@ -230,6 +231,7 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
unsigned int format, div, framesize, master;
dma_data = &mcbsp->dma_data[substream->stream];
+ channels = params_channels(params);
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
@@ -245,7 +247,6 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
}
if (mcbsp->pdata->buffer_size) {
dma_data->set_threshold = omap_mcbsp_set_threshold;
- /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) {
int period_words, max_thrsh;
@@ -283,6 +284,10 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
} else {
sync_mode = OMAP_DMA_SYNC_FRAME;
}
+ } else if (channels > 1) {
+ /* Use packet mode for non mono streams */
+ pkt_size = channels;
+ sync_mode = OMAP_DMA_SYNC_PACKET;
}
}
@@ -301,7 +306,7 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
regs->rcr1 &= ~(RFRLEN1(0x7f) | RWDLEN1(7));
regs->xcr1 &= ~(XFRLEN1(0x7f) | XWDLEN1(7));
format = mcbsp->fmt & SND_SOC_DAIFMT_FORMAT_MASK;
- wpf = channels = params_channels(params);
+ wpf = channels;
if (channels == 2 && (format == SND_SOC_DAIFMT_I2S ||
format == SND_SOC_DAIFMT_LEFT_J)) {
/* Use dual-phase frames */
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index 39705561131a..59d47ab5b15d 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -33,6 +33,7 @@
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -507,10 +508,17 @@ static int __devexit asoc_mcpdm_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id omap_mcpdm_of_match[] = {
+ { .compatible = "ti,omap4-mcpdm", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, omap_mcpdm_of_match);
+
static struct platform_driver asoc_mcpdm_driver = {
.driver = {
.name = "omap-mcpdm",
.owner = THIS_MODULE,
+ .of_match_table = omap_mcpdm_of_match,
},
.probe = asoc_mcpdm_probe,
diff --git a/sound/soc/omap/omap4-hdmi-card.c b/sound/soc/omap/omap4-hdmi-card.c
deleted file mode 100644
index 28d689b2714d..000000000000
--- a/sound/soc/omap/omap4-hdmi-card.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * omap4-hdmi-card.c
- *
- * OMAP ALSA SoC machine driver for TI OMAP4 HDMI
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Ricardo Neri <ricardo.neri@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/module.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <asm/mach-types.h>
-#include <video/omapdss.h>
-
-#define DRV_NAME "omap4-hdmi-audio"
-
-static int omap4_hdmi_dai_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- int i;
- struct omap_overlay_manager *mgr = NULL;
- struct device *dev = substream->pcm->card->dev;
-
- /* Find DSS HDMI device */
- for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
- mgr = omap_dss_get_overlay_manager(i);
- if (mgr && mgr->device
- && mgr->device->type == OMAP_DISPLAY_TYPE_HDMI)
- break;
- }
-
- if (i == omap_dss_get_num_overlay_managers()) {
- dev_err(dev, "HDMI display device not found!\n");
- return -ENODEV;
- }
-
- /* Make sure HDMI is power-on to avoid L3 interconnect errors */
- if (mgr->device->state != OMAP_DSS_DISPLAY_ACTIVE) {
- dev_err(dev, "HDMI display is not active!\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static struct snd_soc_ops omap4_hdmi_dai_ops = {
- .hw_params = omap4_hdmi_dai_hw_params,
-};
-
-static struct snd_soc_dai_link omap4_hdmi_dai = {
- .name = "HDMI",
- .stream_name = "HDMI",
- .cpu_dai_name = "hdmi-audio-dai",
- .platform_name = "omap-pcm-audio",
- .codec_name = "omapdss_hdmi",
- .codec_dai_name = "hdmi-audio-codec",
- .ops = &omap4_hdmi_dai_ops,
-};
-
-static struct snd_soc_card snd_soc_omap4_hdmi = {
- .name = "OMAP4HDMI",
- .owner = THIS_MODULE,
- .dai_link = &omap4_hdmi_dai,
- .num_links = 1,
-};
-
-static __devinit int omap4_hdmi_probe(struct platform_device *pdev)
-{
- struct snd_soc_card *card = &snd_soc_omap4_hdmi;
- int ret;
-
- card->dev = &pdev->dev;
-
- ret = snd_soc_register_card(card);
- if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
- card->dev = NULL;
- return ret;
- }
- return 0;
-}
-
-static int __devexit omap4_hdmi_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- snd_soc_unregister_card(card);
- card->dev = NULL;
- return 0;
-}
-
-static struct platform_driver omap4_hdmi_driver = {
- .driver = {
- .name = "omap4-hdmi-audio",
- .owner = THIS_MODULE,
- },
- .probe = omap4_hdmi_probe,
- .remove = __devexit_p(omap4_hdmi_remove),
-};
-
-module_platform_driver(omap4_hdmi_driver);
-
-MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>");
-MODULE_DESCRIPTION("OMAP4 HDMI machine ASoC driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 998534992197..554828219c33 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -1434,8 +1434,11 @@ static int event_read_fields(struct event_format *event, struct format_field **f
fail:
free_token(token);
fail_expect:
- if (field)
+ if (field) {
+ free(field->type);
+ free(field->name);
free(field);
+ }
return -1;
}
@@ -1712,6 +1715,8 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
if (set_op_prio(arg) == -1) {
event->flags |= EVENT_FL_FAILED;
+ /* arg->op.op (= token) will be freed at out_free */
+ arg->op.op = NULL;
goto out_free;
}
@@ -2124,6 +2129,13 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char **
free_token(token);
type = process_arg(event, arg, &token);
+
+ if (type == EVENT_OP)
+ type = process_op(event, arg, &token);
+
+ if (type == EVENT_ERROR)
+ goto out_free;
+
if (test_type_token(type, token, EVENT_DELIM, ","))
goto out_free;
@@ -2288,17 +2300,18 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char **
arg = alloc_arg();
type = process_arg(event, arg, &token);
if (type == EVENT_ERROR)
- goto out_free;
+ goto out_free_arg;
if (!test_type_token(type, token, EVENT_OP, "]"))
- goto out_free;
+ goto out_free_arg;
free_token(token);
type = read_token_item(tok);
return type;
+ out_free_arg:
+ free_arg(arg);
out_free:
- free(arg);
free_token(token);
*tok = NULL;
return EVENT_ERROR;
@@ -3362,6 +3375,7 @@ process_defined_func(struct trace_seq *s, void *data, int size,
break;
}
farg = farg->next;
+ param = param->next;
}
ret = (*func_handle->func)(s, args);
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 2d40c5ed81d6..dfcfe2c131de 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -325,9 +325,8 @@ static void free_events(struct event_list *events)
}
static struct filter_arg *
-create_arg_item(struct event_format *event,
- const char *token, enum filter_arg_type type,
- char **error_str)
+create_arg_item(struct event_format *event, const char *token,
+ enum event_type type, char **error_str)
{
struct format_field *field;
struct filter_arg *arg;
@@ -1585,7 +1584,7 @@ get_value(struct event_format *event,
const char *name;
name = get_comm(event, record);
- return (unsigned long long)name;
+ return (unsigned long)name;
}
pevent_read_number_field(field, record->data, &val);
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index 2780d9ce48bf..b715cb71592b 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -77,7 +77,8 @@ OPTIONS
-F::
--funcs::
- Show available functions in given module or kernel.
+ Show available functions in given module or kernel. With -x/--exec,
+ can also list functions in a user space executable / shared library.
--filter=FILTER::
(Only for --vars and --funcs) Set filter. FILTER is a combination of glob
@@ -98,6 +99,15 @@ OPTIONS
--max-probes::
Set the maximum number of probe points for an event. Default is 128.
+-x::
+--exec=PATH::
+ Specify path to the executable or shared library file for user
+ space tracing. Can also be used with --funcs option.
+
+In absence of -m/-x options, perf probe checks if the first argument after
+the options is an absolute path name. If its an absolute path, perf probe
+uses it as a target module/target user space binary to probe.
+
PROBE SYNTAX
------------
Probe points are defined by following syntax.
@@ -182,6 +192,13 @@ Delete all probes on schedule().
./perf probe --del='schedule*'
+Add probes at zfree() function on /bin/zsh
+
+ ./perf probe -x /bin/zsh zfree or ./perf probe /bin/zsh zfree
+
+Add probes at malloc() function on libc
+
+ ./perf probe -x /lib/libc.so.6 malloc or ./perf probe /lib/libc.so.6 malloc
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perfconfig.example b/tools/perf/Documentation/perfconfig.example
index 42c6fd2ae85d..767ea2436e1c 100644
--- a/tools/perf/Documentation/perfconfig.example
+++ b/tools/perf/Documentation/perfconfig.example
@@ -19,3 +19,11 @@
# Default, disable using /dev/null
dir = /root/.debug
+
+[annotate]
+
+ # Defaults
+ hide_src_code = false
+ use_offset = true
+ jump_arrows = true
+ show_nr_jumps = false
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 1d3d513beb9b..0eee64cfe9a0 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -80,7 +80,7 @@ ifeq ("$(origin DEBUG)", "command line")
PERF_DEBUG = $(DEBUG)
endif
ifndef PERF_DEBUG
- CFLAGS_OPTIMIZE = -O6
+ CFLAGS_OPTIMIZE = -O6 -D_FORTIFY_SOURCE=2
endif
ifdef PARSER_DEBUG
@@ -89,7 +89,7 @@ ifdef PARSER_DEBUG
PARSER_DEBUG_CFLAGS := -DPARSER_DEBUG
endif
-CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS)
+CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS)
EXTLIBS = -lpthread -lrt -lelf -lm
ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
ALL_LDFLAGS = $(LDFLAGS)
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 806e0a286634..67522cf87405 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -215,7 +215,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
}
if (total_nr_samples == 0) {
- ui__warning("The %s file has no samples!\n", session->filename);
+ ui__error("The %s file has no samples!\n", session->filename);
goto out_delete;
}
out_delete:
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index e52d77ec7084..acd78dc28341 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -116,7 +116,7 @@ static const char * const evlist_usage[] = {
int cmd_evlist(int argc, const char **argv, const char *prefix __used)
{
struct perf_attr_details details = { .verbose = false, };
- const char *input_name;
+ const char *input_name = NULL;
const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"Input file name"),
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 4935c09dd5b5..e215ae61b2ae 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -54,6 +54,7 @@ static struct {
bool show_ext_vars;
bool show_funcs;
bool mod_events;
+ bool uprobes;
int nevents;
struct perf_probe_event events[MAX_PROBES];
struct strlist *dellist;
@@ -75,6 +76,8 @@ static int parse_probe_event(const char *str)
return -1;
}
+ pev->uprobes = params.uprobes;
+
/* Parse a perf-probe command into event */
ret = parse_perf_probe_command(str, pev);
pr_debug("%d arguments\n", pev->nargs);
@@ -82,21 +85,58 @@ static int parse_probe_event(const char *str)
return ret;
}
+static int set_target(const char *ptr)
+{
+ int found = 0;
+ const char *buf;
+
+ /*
+ * The first argument after options can be an absolute path
+ * to an executable / library or kernel module.
+ *
+ * TODO: Support relative path, and $PATH, $LD_LIBRARY_PATH,
+ * short module name.
+ */
+ if (!params.target && ptr && *ptr == '/') {
+ params.target = ptr;
+ found = 1;
+ buf = ptr + (strlen(ptr) - 3);
+
+ if (strcmp(buf, ".ko"))
+ params.uprobes = true;
+
+ }
+
+ return found;
+}
+
static int parse_probe_event_argv(int argc, const char **argv)
{
- int i, len, ret;
+ int i, len, ret, found_target;
char *buf;
+ found_target = set_target(argv[0]);
+ if (found_target && argc == 1)
+ return 0;
+
/* Bind up rest arguments */
len = 0;
- for (i = 0; i < argc; i++)
+ for (i = 0; i < argc; i++) {
+ if (i == 0 && found_target)
+ continue;
+
len += strlen(argv[i]) + 1;
+ }
buf = zalloc(len + 1);
if (buf == NULL)
return -ENOMEM;
len = 0;
- for (i = 0; i < argc; i++)
+ for (i = 0; i < argc; i++) {
+ if (i == 0 && found_target)
+ continue;
+
len += sprintf(&buf[len], "%s ", argv[i]);
+ }
params.mod_events = true;
ret = parse_probe_event(buf);
free(buf);
@@ -125,6 +165,28 @@ static int opt_del_probe_event(const struct option *opt __used,
return 0;
}
+static int opt_set_target(const struct option *opt, const char *str,
+ int unset __used)
+{
+ int ret = -ENOENT;
+
+ if (str && !params.target) {
+ if (!strcmp(opt->long_name, "exec"))
+ params.uprobes = true;
+#ifdef DWARF_SUPPORT
+ else if (!strcmp(opt->long_name, "module"))
+ params.uprobes = false;
+#endif
+ else
+ return ret;
+
+ params.target = str;
+ ret = 0;
+ }
+
+ return ret;
+}
+
#ifdef DWARF_SUPPORT
static int opt_show_lines(const struct option *opt __used,
const char *str, int unset __used)
@@ -246,9 +308,9 @@ static const struct option options[] = {
"file", "vmlinux pathname"),
OPT_STRING('s', "source", &symbol_conf.source_prefix,
"directory", "path to kernel source"),
- OPT_STRING('m', "module", &params.target,
- "modname|path",
- "target module name (for online) or path (for offline)"),
+ OPT_CALLBACK('m', "module", NULL, "modname|path",
+ "target module name (for online) or path (for offline)",
+ opt_set_target),
#endif
OPT__DRY_RUN(&probe_event_dry_run),
OPT_INTEGER('\0', "max-probes", &params.max_probe_points,
@@ -260,6 +322,8 @@ static const struct option options[] = {
"\t\t\t(default: \"" DEFAULT_VAR_FILTER "\" for --vars,\n"
"\t\t\t \"" DEFAULT_FUNC_FILTER "\" for --funcs)",
opt_set_filter),
+ OPT_CALLBACK('x', "exec", NULL, "executable|path",
+ "target executable name or path", opt_set_target),
OPT_END()
};
@@ -310,6 +374,10 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
pr_err(" Error: Don't use --list with --funcs.\n");
usage_with_options(probe_usage, options);
}
+ if (params.uprobes) {
+ pr_warning(" Error: Don't use --list with --exec.\n");
+ usage_with_options(probe_usage, options);
+ }
ret = show_perf_probe_events();
if (ret < 0)
pr_err(" Error: Failed to show event list. (%d)\n",
@@ -333,8 +401,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
if (!params.filter)
params.filter = strfilter__new(DEFAULT_FUNC_FILTER,
NULL);
- ret = show_available_funcs(params.target,
- params.filter);
+ ret = show_available_funcs(params.target, params.filter,
+ params.uprobes);
strfilter__delete(params.filter);
if (ret < 0)
pr_err(" Error: Failed to show functions."
@@ -343,7 +411,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
}
#ifdef DWARF_SUPPORT
- if (params.show_lines) {
+ if (params.show_lines && !params.uprobes) {
if (params.mod_events) {
pr_err(" Error: Don't use --line with"
" --add/--del.\n");
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index e5cb08427e13..f95840d04e4c 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -264,7 +264,7 @@ try_again:
}
if (err == ENOENT) {
- ui__warning("The %s event is not supported.\n",
+ ui__error("The %s event is not supported.\n",
event_name(pos));
exit(EXIT_FAILURE);
}
@@ -858,8 +858,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
usage_with_options(record_usage, record_options);
if (rec->force && rec->append_file) {
- fprintf(stderr, "Can't overwrite and append at the same time."
- " You need to choose between -f and -A");
+ ui__error("Can't overwrite and append at the same time."
+ " You need to choose between -f and -A");
usage_with_options(record_usage, record_options);
} else if (rec->append_file) {
rec->write_mode = WRITE_APPEND;
@@ -868,8 +868,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
}
if (nr_cgroups && !rec->opts.target.system_wide) {
- fprintf(stderr, "cgroup monitoring only available in"
- " system-wide mode\n");
+ ui__error("cgroup monitoring only available in"
+ " system-wide mode\n");
usage_with_options(record_usage, record_options);
}
@@ -905,7 +905,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
int saved_errno = errno;
perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
- ui__warning("%s", errbuf);
+ ui__error("%s", errbuf);
err = -saved_errno;
goto out_free_fd;
@@ -933,7 +933,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
else if (rec->opts.freq) {
rec->opts.default_interval = rec->opts.freq;
} else {
- fprintf(stderr, "frequency and count are zero, aborting\n");
+ ui__error("frequency and count are zero, aborting\n");
err = -EINVAL;
goto out_free_fd;
}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index d58e41445d0d..8c767c6bca91 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -251,13 +251,13 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
if (sort__has_parent) {
- ui__warning("Selected --sort parent, but no "
+ ui__error("Selected --sort parent, but no "
"callchain data. Did you call "
"'perf record' without -g?\n");
return -EINVAL;
}
if (symbol_conf.use_callchain) {
- ui__warning("Selected -g but no callchain data. Did "
+ ui__error("Selected -g but no callchain data. Did "
"you call 'perf record' without -g?\n");
return -1;
}
@@ -266,17 +266,15 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
!symbol_conf.use_callchain) {
symbol_conf.use_callchain = true;
if (callchain_register_param(&callchain_param) < 0) {
- ui__warning("Can't register callchain "
- "params.\n");
+ ui__error("Can't register callchain params.\n");
return -EINVAL;
}
}
if (sort__branch_mode == 1) {
if (!(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) {
- fprintf(stderr, "selected -b but no branch data."
- " Did you call perf record without"
- " -b?\n");
+ ui__error("Selected -b but no branch data. "
+ "Did you call perf record without -b?\n");
return -1;
}
}
@@ -420,7 +418,7 @@ static int __cmd_report(struct perf_report *rep)
}
if (nr_samples == 0) {
- ui__warning("The %s file has no samples!\n", session->filename);
+ ui__error("The %s file has no samples!\n", session->filename);
goto out_delete;
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 6031dce0429f..871b540293e1 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -953,22 +953,22 @@ try_again:
attr->config = PERF_COUNT_SW_CPU_CLOCK;
if (counter->name) {
free(counter->name);
- counter->name = strdup(event_name(counter));
+ counter->name = NULL;
}
goto try_again;
}
if (err == ENOENT) {
- ui__warning("The %s event is not supported.\n",
+ ui__error("The %s event is not supported.\n",
event_name(counter));
goto out_err;
} else if (err == EMFILE) {
- ui__warning("Too many events are opened.\n"
+ ui__error("Too many events are opened.\n"
"Try again after reducing the number of events\n");
goto out_err;
}
- ui__warning("The sys_perf_event_open() syscall "
+ ui__error("The sys_perf_event_open() syscall "
"returned with %d (%s). /bin/dmesg "
"may provide additional information.\n"
"No CONFIG_PERF_EVENTS=y kernel support "
@@ -978,7 +978,7 @@ try_again:
}
if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) {
- ui__warning("Failed to mmap with %d (%s)\n",
+ ui__error("Failed to mmap with %d (%s)\n",
errno, strerror(errno));
goto out_err;
}
@@ -994,12 +994,12 @@ static int perf_top__setup_sample_type(struct perf_top *top)
{
if (!top->sort_has_symbols) {
if (symbol_conf.use_callchain) {
- ui__warning("Selected -g but \"sym\" not present in --sort/-s.");
+ ui__error("Selected -g but \"sym\" not present in --sort/-s.");
return -EINVAL;
}
} else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) {
if (callchain_register_param(&callchain_param) < 0) {
- ui__warning("Can't register callchain params.\n");
+ ui__error("Can't register callchain params.\n");
return -EINVAL;
}
}
@@ -1041,7 +1041,7 @@ static int __cmd_top(struct perf_top *top)
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
display_thread), top)) {
- printf("Could not create display thread.\n");
+ ui__error("Could not create display thread.\n");
exit(-1);
}
@@ -1050,7 +1050,7 @@ static int __cmd_top(struct perf_top *top)
param.sched_priority = top->realtime_prio;
if (sched_setscheduler(0, SCHED_FIFO, &param)) {
- printf("Could not set realtime priority.\n");
+ ui__error("Could not set realtime priority.\n");
exit(-1);
}
}
@@ -1274,7 +1274,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
int saved_errno = errno;
perf_target__strerror(&top.target, status, errbuf, BUFSIZ);
- ui__warning("%s", errbuf);
+ ui__error("%s", errbuf);
status = -saved_errno;
goto out_delete_evlist;
@@ -1288,7 +1288,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
if (!top.evlist->nr_entries &&
perf_evlist__add_default(top.evlist) < 0) {
- pr_err("Not enough memory for event selector list\n");
+ ui__error("Not enough memory for event selector list\n");
return -ENOMEM;
}
@@ -1305,7 +1305,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
else if (top.freq) {
top.default_interval = top.freq;
} else {
- fprintf(stderr, "frequency and count are zero, aborting\n");
+ ui__error("frequency and count are zero, aborting\n");
exit(EXIT_FAILURE);
}
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 14f1034f14f9..f960ccb2edc6 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -227,7 +227,7 @@ struct perf_record_opts {
unsigned int freq;
unsigned int mmap_pages;
unsigned int user_freq;
- int branch_stack;
+ u64 branch_stack;
u64 default_interval;
u64 user_interval;
};
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index cde4d0f0ddb9..1818a531f1d3 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -35,16 +35,16 @@ int ui_browser__set_color(struct ui_browser *browser, int color)
return ret;
}
-void ui_browser__set_percent_color(struct ui_browser *self,
+void ui_browser__set_percent_color(struct ui_browser *browser,
double percent, bool current)
{
- int color = ui_browser__percent_color(self, percent, current);
- ui_browser__set_color(self, color);
+ int color = ui_browser__percent_color(browser, percent, current);
+ ui_browser__set_color(browser, color);
}
-void ui_browser__gotorc(struct ui_browser *self, int y, int x)
+void ui_browser__gotorc(struct ui_browser *browser, int y, int x)
{
- SLsmg_gotorc(self->y + y, self->x + x);
+ SLsmg_gotorc(browser->y + y, browser->x + x);
}
static struct list_head *
@@ -73,23 +73,23 @@ ui_browser__list_head_filter_prev_entries(struct ui_browser *browser,
return NULL;
}
-void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence)
+void ui_browser__list_head_seek(struct ui_browser *browser, off_t offset, int whence)
{
- struct list_head *head = self->entries;
+ struct list_head *head = browser->entries;
struct list_head *pos;
- if (self->nr_entries == 0)
+ if (browser->nr_entries == 0)
return;
switch (whence) {
case SEEK_SET:
- pos = ui_browser__list_head_filter_entries(self, head->next);
+ pos = ui_browser__list_head_filter_entries(browser, head->next);
break;
case SEEK_CUR:
- pos = self->top;
+ pos = browser->top;
break;
case SEEK_END:
- pos = ui_browser__list_head_filter_prev_entries(self, head->prev);
+ pos = ui_browser__list_head_filter_prev_entries(browser, head->prev);
break;
default:
return;
@@ -99,18 +99,18 @@ void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whenc
if (offset > 0) {
while (offset-- != 0)
- pos = ui_browser__list_head_filter_entries(self, pos->next);
+ pos = ui_browser__list_head_filter_entries(browser, pos->next);
} else {
while (offset++ != 0)
- pos = ui_browser__list_head_filter_prev_entries(self, pos->prev);
+ pos = ui_browser__list_head_filter_prev_entries(browser, pos->prev);
}
- self->top = pos;
+ browser->top = pos;
}
-void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence)
+void ui_browser__rb_tree_seek(struct ui_browser *browser, off_t offset, int whence)
{
- struct rb_root *root = self->entries;
+ struct rb_root *root = browser->entries;
struct rb_node *nd;
switch (whence) {
@@ -118,7 +118,7 @@ void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence)
nd = rb_first(root);
break;
case SEEK_CUR:
- nd = self->top;
+ nd = browser->top;
break;
case SEEK_END:
nd = rb_last(root);
@@ -135,23 +135,23 @@ void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence)
nd = rb_prev(nd);
}
- self->top = nd;
+ browser->top = nd;
}
-unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self)
+unsigned int ui_browser__rb_tree_refresh(struct ui_browser *browser)
{
struct rb_node *nd;
int row = 0;
- if (self->top == NULL)
- self->top = rb_first(self->entries);
+ if (browser->top == NULL)
+ browser->top = rb_first(browser->entries);
- nd = self->top;
+ nd = browser->top;
while (nd != NULL) {
- ui_browser__gotorc(self, row, 0);
- self->write(self, nd, row);
- if (++row == self->height)
+ ui_browser__gotorc(browser, row, 0);
+ browser->write(browser, nd, row);
+ if (++row == browser->height)
break;
nd = rb_next(nd);
}
@@ -159,17 +159,17 @@ unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self)
return row;
}
-bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row)
+bool ui_browser__is_current_entry(struct ui_browser *browser, unsigned row)
{
- return self->top_idx + row == self->index;
+ return browser->top_idx + row == browser->index;
}
-void ui_browser__refresh_dimensions(struct ui_browser *self)
+void ui_browser__refresh_dimensions(struct ui_browser *browser)
{
- self->width = SLtt_Screen_Cols - 1;
- self->height = SLtt_Screen_Rows - 2;
- self->y = 1;
- self->x = 0;
+ browser->width = SLtt_Screen_Cols - 1;
+ browser->height = SLtt_Screen_Rows - 2;
+ browser->y = 1;
+ browser->x = 0;
}
void ui_browser__handle_resize(struct ui_browser *browser)
@@ -225,10 +225,10 @@ bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text)
return key == K_ENTER || toupper(key) == 'Y';
}
-void ui_browser__reset_index(struct ui_browser *self)
+void ui_browser__reset_index(struct ui_browser *browser)
{
- self->index = self->top_idx = 0;
- self->seek(self, 0, SEEK_SET);
+ browser->index = browser->top_idx = 0;
+ browser->seek(browser, 0, SEEK_SET);
}
void __ui_browser__show_title(struct ui_browser *browser, const char *title)
@@ -245,26 +245,26 @@ void ui_browser__show_title(struct ui_browser *browser, const char *title)
pthread_mutex_unlock(&ui__lock);
}
-int ui_browser__show(struct ui_browser *self, const char *title,
+int ui_browser__show(struct ui_browser *browser, const char *title,
const char *helpline, ...)
{
int err;
va_list ap;
- ui_browser__refresh_dimensions(self);
+ ui_browser__refresh_dimensions(browser);
pthread_mutex_lock(&ui__lock);
- __ui_browser__show_title(self, title);
+ __ui_browser__show_title(browser, title);
- self->title = title;
- free(self->helpline);
- self->helpline = NULL;
+ browser->title = title;
+ free(browser->helpline);
+ browser->helpline = NULL;
va_start(ap, helpline);
- err = vasprintf(&self->helpline, helpline, ap);
+ err = vasprintf(&browser->helpline, helpline, ap);
va_end(ap);
if (err > 0)
- ui_helpline__push(self->helpline);
+ ui_helpline__push(browser->helpline);
pthread_mutex_unlock(&ui__lock);
return err ? 0 : -1;
}
@@ -350,7 +350,7 @@ void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries)
browser->seek(browser, browser->top_idx, SEEK_SET);
}
-int ui_browser__run(struct ui_browser *self, int delay_secs)
+int ui_browser__run(struct ui_browser *browser, int delay_secs)
{
int err, key;
@@ -358,7 +358,7 @@ int ui_browser__run(struct ui_browser *self, int delay_secs)
off_t offset;
pthread_mutex_lock(&ui__lock);
- err = __ui_browser__refresh(self);
+ err = __ui_browser__refresh(browser);
SLsmg_refresh();
pthread_mutex_unlock(&ui__lock);
if (err < 0)
@@ -368,18 +368,18 @@ int ui_browser__run(struct ui_browser *self, int delay_secs)
if (key == K_RESIZE) {
ui__refresh_dimensions(false);
- ui_browser__refresh_dimensions(self);
- __ui_browser__show_title(self, self->title);
- ui_helpline__puts(self->helpline);
+ ui_browser__refresh_dimensions(browser);
+ __ui_browser__show_title(browser, browser->title);
+ ui_helpline__puts(browser->helpline);
continue;
}
- if (self->use_navkeypressed && !self->navkeypressed) {
+ if (browser->use_navkeypressed && !browser->navkeypressed) {
if (key == K_DOWN || key == K_UP ||
key == K_PGDN || key == K_PGUP ||
key == K_HOME || key == K_END ||
key == ' ') {
- self->navkeypressed = true;
+ browser->navkeypressed = true;
continue;
} else
return key;
@@ -387,59 +387,59 @@ int ui_browser__run(struct ui_browser *self, int delay_secs)
switch (key) {
case K_DOWN:
- if (self->index == self->nr_entries - 1)
+ if (browser->index == browser->nr_entries - 1)
break;
- ++self->index;
- if (self->index == self->top_idx + self->height) {
- ++self->top_idx;
- self->seek(self, +1, SEEK_CUR);
+ ++browser->index;
+ if (browser->index == browser->top_idx + browser->height) {
+ ++browser->top_idx;
+ browser->seek(browser, +1, SEEK_CUR);
}
break;
case K_UP:
- if (self->index == 0)
+ if (browser->index == 0)
break;
- --self->index;
- if (self->index < self->top_idx) {
- --self->top_idx;
- self->seek(self, -1, SEEK_CUR);
+ --browser->index;
+ if (browser->index < browser->top_idx) {
+ --browser->top_idx;
+ browser->seek(browser, -1, SEEK_CUR);
}
break;
case K_PGDN:
case ' ':
- if (self->top_idx + self->height > self->nr_entries - 1)
+ if (browser->top_idx + browser->height > browser->nr_entries - 1)
break;
- offset = self->height;
- if (self->index + offset > self->nr_entries - 1)
- offset = self->nr_entries - 1 - self->index;
- self->index += offset;
- self->top_idx += offset;
- self->seek(self, +offset, SEEK_CUR);
+ offset = browser->height;
+ if (browser->index + offset > browser->nr_entries - 1)
+ offset = browser->nr_entries - 1 - browser->index;
+ browser->index += offset;
+ browser->top_idx += offset;
+ browser->seek(browser, +offset, SEEK_CUR);
break;
case K_PGUP:
- if (self->top_idx == 0)
+ if (browser->top_idx == 0)
break;
- if (self->top_idx < self->height)
- offset = self->top_idx;
+ if (browser->top_idx < browser->height)
+ offset = browser->top_idx;
else
- offset = self->height;
+ offset = browser->height;
- self->index -= offset;
- self->top_idx -= offset;
- self->seek(self, -offset, SEEK_CUR);
+ browser->index -= offset;
+ browser->top_idx -= offset;
+ browser->seek(browser, -offset, SEEK_CUR);
break;
case K_HOME:
- ui_browser__reset_index(self);
+ ui_browser__reset_index(browser);
break;
case K_END:
- offset = self->height - 1;
- if (offset >= self->nr_entries)
- offset = self->nr_entries - 1;
+ offset = browser->height - 1;
+ if (offset >= browser->nr_entries)
+ offset = browser->nr_entries - 1;
- self->index = self->nr_entries - 1;
- self->top_idx = self->index - offset;
- self->seek(self, -offset, SEEK_END);
+ browser->index = browser->nr_entries - 1;
+ browser->top_idx = browser->index - offset;
+ browser->seek(browser, -offset, SEEK_END);
break;
default:
return key;
@@ -448,22 +448,22 @@ int ui_browser__run(struct ui_browser *self, int delay_secs)
return -1;
}
-unsigned int ui_browser__list_head_refresh(struct ui_browser *self)
+unsigned int ui_browser__list_head_refresh(struct ui_browser *browser)
{
struct list_head *pos;
- struct list_head *head = self->entries;
+ struct list_head *head = browser->entries;
int row = 0;
- if (self->top == NULL || self->top == self->entries)
- self->top = ui_browser__list_head_filter_entries(self, head->next);
+ if (browser->top == NULL || browser->top == browser->entries)
+ browser->top = ui_browser__list_head_filter_entries(browser, head->next);
- pos = self->top;
+ pos = browser->top;
list_for_each_from(pos, head) {
- if (!self->filter || !self->filter(self, pos)) {
- ui_browser__gotorc(self, row, 0);
- self->write(self, pos, row);
- if (++row == self->height)
+ if (!browser->filter || !browser->filter(browser, pos)) {
+ ui_browser__gotorc(browser, row, 0);
+ browser->write(browser, pos, row);
+ if (++row == browser->height)
break;
}
}
@@ -708,4 +708,6 @@ void ui_browser__init(void)
struct ui_browser__colorset *c = &ui_browser__colorsets[i++];
sltt_set_color(c->colorset, c->name, c->fg, c->bg);
}
+
+ annotate_browser__init();
}
diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h
index dd96d8229902..af70314605e5 100644
--- a/tools/perf/ui/browser.h
+++ b/tools/perf/ui/browser.h
@@ -69,4 +69,5 @@ void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whenc
unsigned int ui_browser__list_head_refresh(struct ui_browser *self);
void ui_browser__init(void);
+void annotate_browser__init(void);
#endif /* _PERF_UI_BROWSER_H_ */
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 6e0ef79be169..4deea6aaf927 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -19,6 +19,16 @@ struct browser_disasm_line {
int jump_sources;
};
+static struct annotate_browser_opt {
+ bool hide_src_code,
+ use_offset,
+ jump_arrows,
+ show_nr_jumps;
+} annotate_browser__opts = {
+ .use_offset = true,
+ .jump_arrows = true,
+};
+
struct annotate_browser {
struct ui_browser b;
struct rb_root entries;
@@ -30,10 +40,6 @@ struct annotate_browser {
int nr_entries;
int max_jump_sources;
int nr_jumps;
- bool hide_src_code;
- bool use_offset;
- bool jump_arrows;
- bool show_nr_jumps;
bool searching_backwards;
u8 addr_width;
u8 jumps_width;
@@ -48,11 +54,9 @@ static inline struct browser_disasm_line *disasm_line__browser(struct disasm_lin
return (struct browser_disasm_line *)(dl + 1);
}
-static bool disasm_line__filter(struct ui_browser *browser, void *entry)
+static bool disasm_line__filter(struct ui_browser *browser __used, void *entry)
{
- struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
-
- if (ab->hide_src_code) {
+ if (annotate_browser__opts.hide_src_code) {
struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
return dl->offset == -1;
}
@@ -79,30 +83,30 @@ static int annotate_browser__set_jumps_percent_color(struct annotate_browser *br
return ui_browser__set_color(&browser->b, color);
}
-static void annotate_browser__write(struct ui_browser *self, void *entry, int row)
+static void annotate_browser__write(struct ui_browser *browser, void *entry, int row)
{
- struct annotate_browser *ab = container_of(self, struct annotate_browser, b);
+ struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
struct browser_disasm_line *bdl = disasm_line__browser(dl);
- bool current_entry = ui_browser__is_current_entry(self, row);
- bool change_color = (!ab->hide_src_code &&
- (!current_entry || (self->use_navkeypressed &&
- !self->navkeypressed)));
- int width = self->width, printed;
+ bool current_entry = ui_browser__is_current_entry(browser, row);
+ bool change_color = (!annotate_browser__opts.hide_src_code &&
+ (!current_entry || (browser->use_navkeypressed &&
+ !browser->navkeypressed)));
+ int width = browser->width, printed;
char bf[256];
if (dl->offset != -1 && bdl->percent != 0.0) {
- ui_browser__set_percent_color(self, bdl->percent, current_entry);
+ ui_browser__set_percent_color(browser, bdl->percent, current_entry);
slsmg_printf("%6.2f ", bdl->percent);
} else {
- ui_browser__set_percent_color(self, 0, current_entry);
+ ui_browser__set_percent_color(browser, 0, current_entry);
slsmg_write_nstring(" ", 7);
}
SLsmg_write_char(' ');
/* The scroll bar isn't being used */
- if (!self->navkeypressed)
+ if (!browser->navkeypressed)
width += 1;
if (!*dl->line)
@@ -116,14 +120,14 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
u64 addr = dl->offset;
int color = -1;
- if (!ab->use_offset)
+ if (!annotate_browser__opts.use_offset)
addr += ab->start;
- if (!ab->use_offset) {
+ if (!annotate_browser__opts.use_offset) {
printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
} else {
if (bdl->jump_sources) {
- if (ab->show_nr_jumps) {
+ if (annotate_browser__opts.show_nr_jumps) {
int prev;
printed = scnprintf(bf, sizeof(bf), "%*d ",
ab->jumps_width,
@@ -131,7 +135,7 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
prev = annotate_browser__set_jumps_percent_color(ab, bdl->jump_sources,
current_entry);
slsmg_write_nstring(bf, printed);
- ui_browser__set_color(self, prev);
+ ui_browser__set_color(browser, prev);
}
printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
@@ -143,19 +147,19 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
}
if (change_color)
- color = ui_browser__set_color(self, HE_COLORSET_ADDR);
+ color = ui_browser__set_color(browser, HE_COLORSET_ADDR);
slsmg_write_nstring(bf, printed);
if (change_color)
- ui_browser__set_color(self, color);
+ ui_browser__set_color(browser, color);
if (dl->ins && dl->ins->ops->scnprintf) {
if (ins__is_jump(dl->ins)) {
bool fwd = dl->ops.target.offset > (u64)dl->offset;
- ui_browser__write_graph(self, fwd ? SLSMG_DARROW_CHAR :
+ ui_browser__write_graph(browser, fwd ? SLSMG_DARROW_CHAR :
SLSMG_UARROW_CHAR);
SLsmg_write_char(' ');
} else if (ins__is_call(dl->ins)) {
- ui_browser__write_graph(self, SLSMG_RARROW_CHAR);
+ ui_browser__write_graph(browser, SLSMG_RARROW_CHAR);
SLsmg_write_char(' ');
} else {
slsmg_write_nstring(" ", 2);
@@ -164,12 +168,12 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
if (strcmp(dl->name, "retq")) {
slsmg_write_nstring(" ", 2);
} else {
- ui_browser__write_graph(self, SLSMG_LARROW_CHAR);
+ ui_browser__write_graph(browser, SLSMG_LARROW_CHAR);
SLsmg_write_char(' ');
}
}
- disasm_line__scnprintf(dl, bf, sizeof(bf), !ab->use_offset);
+ disasm_line__scnprintf(dl, bf, sizeof(bf), !annotate_browser__opts.use_offset);
slsmg_write_nstring(bf, width - 10 - printed);
}
@@ -184,7 +188,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
struct browser_disasm_line *btarget, *bcursor;
unsigned int from, to;
- if (!cursor->ins || !ins__is_jump(cursor->ins) ||
+ if (!cursor || !cursor->ins || !ins__is_jump(cursor->ins) ||
!disasm_line__has_offset(cursor))
return;
@@ -195,7 +199,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
bcursor = disasm_line__browser(cursor);
btarget = disasm_line__browser(target);
- if (ab->hide_src_code) {
+ if (annotate_browser__opts.hide_src_code) {
from = bcursor->idx_asm;
to = btarget->idx_asm;
} else {
@@ -209,10 +213,9 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
static unsigned int annotate_browser__refresh(struct ui_browser *browser)
{
- struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
int ret = ui_browser__list_head_refresh(browser);
- if (ab->jump_arrows)
+ if (annotate_browser__opts.jump_arrows)
annotate_browser__draw_current_jump(browser);
ui_browser__set_color(browser, HE_COLORSET_NORMAL);
@@ -272,27 +275,27 @@ static void disasm_rb_tree__insert(struct rb_root *root, struct browser_disasm_l
rb_insert_color(&bdl->rb_node, root);
}
-static void annotate_browser__set_top(struct annotate_browser *self,
+static void annotate_browser__set_top(struct annotate_browser *browser,
struct disasm_line *pos, u32 idx)
{
unsigned back;
- ui_browser__refresh_dimensions(&self->b);
- back = self->b.height / 2;
- self->b.top_idx = self->b.index = idx;
+ ui_browser__refresh_dimensions(&browser->b);
+ back = browser->b.height / 2;
+ browser->b.top_idx = browser->b.index = idx;
- while (self->b.top_idx != 0 && back != 0) {
+ while (browser->b.top_idx != 0 && back != 0) {
pos = list_entry(pos->node.prev, struct disasm_line, node);
- if (disasm_line__filter(&self->b, &pos->node))
+ if (disasm_line__filter(&browser->b, &pos->node))
continue;
- --self->b.top_idx;
+ --browser->b.top_idx;
--back;
}
- self->b.top = pos;
- self->b.navkeypressed = true;
+ browser->b.top = pos;
+ browser->b.navkeypressed = true;
}
static void annotate_browser__set_rb_top(struct annotate_browser *browser,
@@ -300,10 +303,14 @@ static void annotate_browser__set_rb_top(struct annotate_browser *browser,
{
struct browser_disasm_line *bpos;
struct disasm_line *pos;
+ u32 idx;
bpos = rb_entry(nd, struct browser_disasm_line, rb_node);
pos = ((struct disasm_line *)bpos) - 1;
- annotate_browser__set_top(browser, pos, bpos->idx);
+ idx = bpos->idx;
+ if (annotate_browser__opts.hide_src_code)
+ idx = bpos->idx_asm;
+ annotate_browser__set_top(browser, pos, idx);
browser->curr_hot = nd;
}
@@ -343,12 +350,12 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
dl = list_entry(browser->b.top, struct disasm_line, node);
bdl = disasm_line__browser(dl);
- if (browser->hide_src_code) {
+ if (annotate_browser__opts.hide_src_code) {
if (bdl->idx_asm < offset)
offset = bdl->idx;
browser->b.nr_entries = browser->nr_entries;
- browser->hide_src_code = false;
+ annotate_browser__opts.hide_src_code = false;
browser->b.seek(&browser->b, -offset, SEEK_CUR);
browser->b.top_idx = bdl->idx - offset;
browser->b.index = bdl->idx;
@@ -363,7 +370,7 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
offset = bdl->idx_asm;
browser->b.nr_entries = browser->nr_asm_entries;
- browser->hide_src_code = true;
+ annotate_browser__opts.hide_src_code = true;
browser->b.seek(&browser->b, -offset, SEEK_CUR);
browser->b.top_idx = bdl->idx_asm - offset;
browser->b.index = bdl->idx_asm;
@@ -372,6 +379,12 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
return true;
}
+static void annotate_browser__init_asm_mode(struct annotate_browser *browser)
+{
+ ui_browser__reset_index(&browser->b);
+ browser->b.nr_entries = browser->nr_asm_entries;
+}
+
static bool annotate_browser__callq(struct annotate_browser *browser,
int evidx, void (*timer)(void *arg),
void *arg, int delay_secs)
@@ -574,33 +587,46 @@ bool annotate_browser__continue_search_reverse(struct annotate_browser *browser,
return __annotate_browser__search_reverse(browser);
}
-static int annotate_browser__run(struct annotate_browser *self, int evidx,
+static void annotate_browser__update_addr_width(struct annotate_browser *browser)
+{
+ if (annotate_browser__opts.use_offset)
+ browser->target_width = browser->min_addr_width;
+ else
+ browser->target_width = browser->max_addr_width;
+
+ browser->addr_width = browser->target_width;
+
+ if (annotate_browser__opts.show_nr_jumps)
+ browser->addr_width += browser->jumps_width + 1;
+}
+
+static int annotate_browser__run(struct annotate_browser *browser, int evidx,
void(*timer)(void *arg),
void *arg, int delay_secs)
{
struct rb_node *nd = NULL;
- struct map_symbol *ms = self->b.priv;
+ struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
const char *help = "Press 'h' for help on key bindings";
int key;
- if (ui_browser__show(&self->b, sym->name, help) < 0)
+ if (ui_browser__show(&browser->b, sym->name, help) < 0)
return -1;
- annotate_browser__calc_percent(self, evidx);
+ annotate_browser__calc_percent(browser, evidx);
- if (self->curr_hot) {
- annotate_browser__set_rb_top(self, self->curr_hot);
- self->b.navkeypressed = false;
+ if (browser->curr_hot) {
+ annotate_browser__set_rb_top(browser, browser->curr_hot);
+ browser->b.navkeypressed = false;
}
- nd = self->curr_hot;
+ nd = browser->curr_hot;
while (1) {
- key = ui_browser__run(&self->b, delay_secs);
+ key = ui_browser__run(&browser->b, delay_secs);
if (delay_secs != 0) {
- annotate_browser__calc_percent(self, evidx);
+ annotate_browser__calc_percent(browser, evidx);
/*
* Current line focus got out of the list of most active
* lines, NULL it so that if TAB|UNTAB is pressed, we
@@ -622,21 +648,21 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx,
if (nd != NULL) {
nd = rb_prev(nd);
if (nd == NULL)
- nd = rb_last(&self->entries);
+ nd = rb_last(&browser->entries);
} else
- nd = self->curr_hot;
+ nd = browser->curr_hot;
break;
case K_UNTAB:
if (nd != NULL)
nd = rb_next(nd);
if (nd == NULL)
- nd = rb_first(&self->entries);
+ nd = rb_first(&browser->entries);
else
- nd = self->curr_hot;
+ nd = browser->curr_hot;
break;
case K_F1:
case 'h':
- ui_browser__help_window(&self->b,
+ ui_browser__help_window(&browser->b,
"UP/DOWN/PGUP\n"
"PGDN/SPACE Navigate\n"
"q/ESC/CTRL+C Exit\n\n"
@@ -652,57 +678,62 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx,
"? Search previous string\n");
continue;
case 'H':
- nd = self->curr_hot;
+ nd = browser->curr_hot;
break;
case 's':
- if (annotate_browser__toggle_source(self))
+ if (annotate_browser__toggle_source(browser))
ui_helpline__puts(help);
continue;
case 'o':
- self->use_offset = !self->use_offset;
- if (self->use_offset)
- self->target_width = self->min_addr_width;
- else
- self->target_width = self->max_addr_width;
-update_addr_width:
- self->addr_width = self->target_width;
- if (self->show_nr_jumps)
- self->addr_width += self->jumps_width + 1;
+ annotate_browser__opts.use_offset = !annotate_browser__opts.use_offset;
+ annotate_browser__update_addr_width(browser);
continue;
case 'j':
- self->jump_arrows = !self->jump_arrows;
+ annotate_browser__opts.jump_arrows = !annotate_browser__opts.jump_arrows;
continue;
case 'J':
- self->show_nr_jumps = !self->show_nr_jumps;
- goto update_addr_width;
+ annotate_browser__opts.show_nr_jumps = !annotate_browser__opts.show_nr_jumps;
+ annotate_browser__update_addr_width(browser);
+ continue;
case '/':
- if (annotate_browser__search(self, delay_secs)) {
+ if (annotate_browser__search(browser, delay_secs)) {
show_help:
ui_helpline__puts(help);
}
continue;
case 'n':
- if (self->searching_backwards ?
- annotate_browser__continue_search_reverse(self, delay_secs) :
- annotate_browser__continue_search(self, delay_secs))
+ if (browser->searching_backwards ?
+ annotate_browser__continue_search_reverse(browser, delay_secs) :
+ annotate_browser__continue_search(browser, delay_secs))
goto show_help;
continue;
case '?':
- if (annotate_browser__search_reverse(self, delay_secs))
+ if (annotate_browser__search_reverse(browser, delay_secs))
goto show_help;
continue;
+ case 'D': {
+ static int seq;
+ ui_helpline__pop();
+ ui_helpline__fpush("%d: nr_ent=%d, height=%d, idx=%d, top_idx=%d, nr_asm_entries=%d",
+ seq++, browser->b.nr_entries,
+ browser->b.height,
+ browser->b.index,
+ browser->b.top_idx,
+ browser->nr_asm_entries);
+ }
+ continue;
case K_ENTER:
case K_RIGHT:
- if (self->selection == NULL)
+ if (browser->selection == NULL)
ui_helpline__puts("Huh? No selection. Report to linux-kernel@vger.kernel.org");
- else if (self->selection->offset == -1)
+ else if (browser->selection->offset == -1)
ui_helpline__puts("Actions are only available for assembly lines.");
- else if (!self->selection->ins) {
- if (strcmp(self->selection->name, "retq"))
+ else if (!browser->selection->ins) {
+ if (strcmp(browser->selection->name, "retq"))
goto show_sup_ins;
goto out;
- } else if (!(annotate_browser__jump(self) ||
- annotate_browser__callq(self, evidx, timer, arg, delay_secs))) {
+ } else if (!(annotate_browser__jump(browser) ||
+ annotate_browser__callq(browser, evidx, timer, arg, delay_secs))) {
show_sup_ins:
ui_helpline__puts("Actions are only available for 'callq', 'retq' & jump instructions.");
}
@@ -717,10 +748,10 @@ show_sup_ins:
}
if (nd != NULL)
- annotate_browser__set_rb_top(self, nd);
+ annotate_browser__set_rb_top(browser, nd);
}
out:
- ui_browser__hide(&self->b);
+ ui_browser__hide(&browser->b);
return key;
}
@@ -797,8 +828,6 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
.priv = &ms,
.use_navkeypressed = true,
},
- .use_offset = true,
- .jump_arrows = true,
};
int ret = -1;
@@ -855,6 +884,12 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
browser.b.nr_entries = browser.nr_entries;
browser.b.entries = &notes->src->source,
browser.b.width += 18; /* Percentage */
+
+ if (annotate_browser__opts.hide_src_code)
+ annotate_browser__init_asm_mode(&browser);
+
+ annotate_browser__update_addr_width(&browser);
+
ret = annotate_browser__run(&browser, evidx, timer, arg, delay_secs);
list_for_each_entry_safe(pos, n, &notes->src->source, node) {
list_del(&pos->node);
@@ -865,3 +900,52 @@ out_free_offsets:
free(browser.offsets);
return ret;
}
+
+#define ANNOTATE_CFG(n) \
+ { .name = #n, .value = &annotate_browser__opts.n, }
+
+/*
+ * Keep the entries sorted, they are bsearch'ed
+ */
+static struct annotate__config {
+ const char *name;
+ bool *value;
+} annotate__configs[] = {
+ ANNOTATE_CFG(hide_src_code),
+ ANNOTATE_CFG(jump_arrows),
+ ANNOTATE_CFG(show_nr_jumps),
+ ANNOTATE_CFG(use_offset),
+};
+
+#undef ANNOTATE_CFG
+
+static int annotate_config__cmp(const void *name, const void *cfgp)
+{
+ const struct annotate__config *cfg = cfgp;
+
+ return strcmp(name, cfg->name);
+}
+
+static int annotate__config(const char *var, const char *value, void *data __used)
+{
+ struct annotate__config *cfg;
+ const char *name;
+
+ if (prefixcmp(var, "annotate.") != 0)
+ return 0;
+
+ name = var + 9;
+ cfg = bsearch(name, annotate__configs, ARRAY_SIZE(annotate__configs),
+ sizeof(struct annotate__config), annotate_config__cmp);
+
+ if (cfg == NULL)
+ return -1;
+
+ *cfg->value = perf_config_bool(name, value);
+ return 0;
+}
+
+void annotate_browser__init(void)
+{
+ perf_config(annotate__config, NULL);
+}
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index a372a4b02635..53f6697d014e 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -26,21 +26,21 @@ struct hist_browser {
bool has_symbols;
};
-static int hists__browser_title(struct hists *self, char *bf, size_t size,
+static int hists__browser_title(struct hists *hists, char *bf, size_t size,
const char *ev_name);
-static void hist_browser__refresh_dimensions(struct hist_browser *self)
+static void hist_browser__refresh_dimensions(struct hist_browser *browser)
{
/* 3 == +/- toggle symbol before actual hist_entry rendering */
- self->b.width = 3 + (hists__sort_list_width(self->hists) +
+ browser->b.width = 3 + (hists__sort_list_width(browser->hists) +
sizeof("[k]"));
}
-static void hist_browser__reset(struct hist_browser *self)
+static void hist_browser__reset(struct hist_browser *browser)
{
- self->b.nr_entries = self->hists->nr_entries;
- hist_browser__refresh_dimensions(self);
- ui_browser__reset_index(&self->b);
+ browser->b.nr_entries = browser->hists->nr_entries;
+ hist_browser__refresh_dimensions(browser);
+ ui_browser__reset_index(&browser->b);
}
static char tree__folded_sign(bool unfolded)
@@ -48,32 +48,32 @@ static char tree__folded_sign(bool unfolded)
return unfolded ? '-' : '+';
}
-static char map_symbol__folded(const struct map_symbol *self)
+static char map_symbol__folded(const struct map_symbol *ms)
{
- return self->has_children ? tree__folded_sign(self->unfolded) : ' ';
+ return ms->has_children ? tree__folded_sign(ms->unfolded) : ' ';
}
-static char hist_entry__folded(const struct hist_entry *self)
+static char hist_entry__folded(const struct hist_entry *he)
{
- return map_symbol__folded(&self->ms);
+ return map_symbol__folded(&he->ms);
}
-static char callchain_list__folded(const struct callchain_list *self)
+static char callchain_list__folded(const struct callchain_list *cl)
{
- return map_symbol__folded(&self->ms);
+ return map_symbol__folded(&cl->ms);
}
-static void map_symbol__set_folding(struct map_symbol *self, bool unfold)
+static void map_symbol__set_folding(struct map_symbol *ms, bool unfold)
{
- self->unfolded = unfold ? self->has_children : false;
+ ms->unfolded = unfold ? ms->has_children : false;
}
-static int callchain_node__count_rows_rb_tree(struct callchain_node *self)
+static int callchain_node__count_rows_rb_tree(struct callchain_node *node)
{
int n = 0;
struct rb_node *nd;
- for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
struct callchain_list *chain;
char folded_sign = ' '; /* No children */
@@ -123,23 +123,23 @@ static int callchain__count_rows(struct rb_root *chain)
return n;
}
-static bool map_symbol__toggle_fold(struct map_symbol *self)
+static bool map_symbol__toggle_fold(struct map_symbol *ms)
{
- if (!self)
+ if (!ms)
return false;
- if (!self->has_children)
+ if (!ms->has_children)
return false;
- self->unfolded = !self->unfolded;
+ ms->unfolded = !ms->unfolded;
return true;
}
-static void callchain_node__init_have_children_rb_tree(struct callchain_node *self)
+static void callchain_node__init_have_children_rb_tree(struct callchain_node *node)
{
- struct rb_node *nd = rb_first(&self->rb_root);
+ struct rb_node *nd = rb_first(&node->rb_root);
- for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
struct callchain_list *chain;
bool first = true;
@@ -158,49 +158,49 @@ static void callchain_node__init_have_children_rb_tree(struct callchain_node *se
}
}
-static void callchain_node__init_have_children(struct callchain_node *self)
+static void callchain_node__init_have_children(struct callchain_node *node)
{
struct callchain_list *chain;
- list_for_each_entry(chain, &self->val, list)
- chain->ms.has_children = !RB_EMPTY_ROOT(&self->rb_root);
+ list_for_each_entry(chain, &node->val, list)
+ chain->ms.has_children = !RB_EMPTY_ROOT(&node->rb_root);
- callchain_node__init_have_children_rb_tree(self);
+ callchain_node__init_have_children_rb_tree(node);
}
-static void callchain__init_have_children(struct rb_root *self)
+static void callchain__init_have_children(struct rb_root *root)
{
struct rb_node *nd;
- for (nd = rb_first(self); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(root); nd; nd = rb_next(nd)) {
struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
callchain_node__init_have_children(node);
}
}
-static void hist_entry__init_have_children(struct hist_entry *self)
+static void hist_entry__init_have_children(struct hist_entry *he)
{
- if (!self->init_have_children) {
- self->ms.has_children = !RB_EMPTY_ROOT(&self->sorted_chain);
- callchain__init_have_children(&self->sorted_chain);
- self->init_have_children = true;
+ if (!he->init_have_children) {
+ he->ms.has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
+ callchain__init_have_children(&he->sorted_chain);
+ he->init_have_children = true;
}
}
-static bool hist_browser__toggle_fold(struct hist_browser *self)
+static bool hist_browser__toggle_fold(struct hist_browser *browser)
{
- if (map_symbol__toggle_fold(self->selection)) {
- struct hist_entry *he = self->he_selection;
+ if (map_symbol__toggle_fold(browser->selection)) {
+ struct hist_entry *he = browser->he_selection;
hist_entry__init_have_children(he);
- self->hists->nr_entries -= he->nr_rows;
+ browser->hists->nr_entries -= he->nr_rows;
if (he->ms.unfolded)
he->nr_rows = callchain__count_rows(&he->sorted_chain);
else
he->nr_rows = 0;
- self->hists->nr_entries += he->nr_rows;
- self->b.nr_entries = self->hists->nr_entries;
+ browser->hists->nr_entries += he->nr_rows;
+ browser->b.nr_entries = browser->hists->nr_entries;
return true;
}
@@ -209,12 +209,12 @@ static bool hist_browser__toggle_fold(struct hist_browser *self)
return false;
}
-static int callchain_node__set_folding_rb_tree(struct callchain_node *self, bool unfold)
+static int callchain_node__set_folding_rb_tree(struct callchain_node *node, bool unfold)
{
int n = 0;
struct rb_node *nd;
- for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
struct callchain_list *chain;
bool has_children = false;
@@ -263,37 +263,37 @@ static int callchain__set_folding(struct rb_root *chain, bool unfold)
return n;
}
-static void hist_entry__set_folding(struct hist_entry *self, bool unfold)
+static void hist_entry__set_folding(struct hist_entry *he, bool unfold)
{
- hist_entry__init_have_children(self);
- map_symbol__set_folding(&self->ms, unfold);
+ hist_entry__init_have_children(he);
+ map_symbol__set_folding(&he->ms, unfold);
- if (self->ms.has_children) {
- int n = callchain__set_folding(&self->sorted_chain, unfold);
- self->nr_rows = unfold ? n : 0;
+ if (he->ms.has_children) {
+ int n = callchain__set_folding(&he->sorted_chain, unfold);
+ he->nr_rows = unfold ? n : 0;
} else
- self->nr_rows = 0;
+ he->nr_rows = 0;
}
-static void hists__set_folding(struct hists *self, bool unfold)
+static void hists__set_folding(struct hists *hists, bool unfold)
{
struct rb_node *nd;
- self->nr_entries = 0;
+ hists->nr_entries = 0;
- for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
hist_entry__set_folding(he, unfold);
- self->nr_entries += 1 + he->nr_rows;
+ hists->nr_entries += 1 + he->nr_rows;
}
}
-static void hist_browser__set_folding(struct hist_browser *self, bool unfold)
+static void hist_browser__set_folding(struct hist_browser *browser, bool unfold)
{
- hists__set_folding(self->hists, unfold);
- self->b.nr_entries = self->hists->nr_entries;
+ hists__set_folding(browser->hists, unfold);
+ browser->b.nr_entries = browser->hists->nr_entries;
/* Go to the start, we may be way after valid entries after a collapse */
- ui_browser__reset_index(&self->b);
+ ui_browser__reset_index(&browser->b);
}
static void ui_browser__warn_lost_events(struct ui_browser *browser)
@@ -305,64 +305,64 @@ static void ui_browser__warn_lost_events(struct ui_browser *browser)
"Or reduce the sampling frequency.");
}
-static int hist_browser__run(struct hist_browser *self, const char *ev_name,
+static int hist_browser__run(struct hist_browser *browser, const char *ev_name,
void(*timer)(void *arg), void *arg, int delay_secs)
{
int key;
char title[160];
- self->b.entries = &self->hists->entries;
- self->b.nr_entries = self->hists->nr_entries;
+ browser->b.entries = &browser->hists->entries;
+ browser->b.nr_entries = browser->hists->nr_entries;
- hist_browser__refresh_dimensions(self);
- hists__browser_title(self->hists, title, sizeof(title), ev_name);
+ hist_browser__refresh_dimensions(browser);
+ hists__browser_title(browser->hists, title, sizeof(title), ev_name);
- if (ui_browser__show(&self->b, title,
+ if (ui_browser__show(&browser->b, title,
"Press '?' for help on key bindings") < 0)
return -1;
while (1) {
- key = ui_browser__run(&self->b, delay_secs);
+ key = ui_browser__run(&browser->b, delay_secs);
switch (key) {
case K_TIMER:
timer(arg);
- ui_browser__update_nr_entries(&self->b, self->hists->nr_entries);
+ ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
- if (self->hists->stats.nr_lost_warned !=
- self->hists->stats.nr_events[PERF_RECORD_LOST]) {
- self->hists->stats.nr_lost_warned =
- self->hists->stats.nr_events[PERF_RECORD_LOST];
- ui_browser__warn_lost_events(&self->b);
+ if (browser->hists->stats.nr_lost_warned !=
+ browser->hists->stats.nr_events[PERF_RECORD_LOST]) {
+ browser->hists->stats.nr_lost_warned =
+ browser->hists->stats.nr_events[PERF_RECORD_LOST];
+ ui_browser__warn_lost_events(&browser->b);
}
- hists__browser_title(self->hists, title, sizeof(title), ev_name);
- ui_browser__show_title(&self->b, title);
+ hists__browser_title(browser->hists, title, sizeof(title), ev_name);
+ ui_browser__show_title(&browser->b, title);
continue;
case 'D': { /* Debug */
static int seq;
- struct hist_entry *h = rb_entry(self->b.top,
+ struct hist_entry *h = rb_entry(browser->b.top,
struct hist_entry, rb_node);
ui_helpline__pop();
ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
- seq++, self->b.nr_entries,
- self->hists->nr_entries,
- self->b.height,
- self->b.index,
- self->b.top_idx,
+ seq++, browser->b.nr_entries,
+ browser->hists->nr_entries,
+ browser->b.height,
+ browser->b.index,
+ browser->b.top_idx,
h->row_offset, h->nr_rows);
}
break;
case 'C':
/* Collapse the whole world. */
- hist_browser__set_folding(self, false);
+ hist_browser__set_folding(browser, false);
break;
case 'E':
/* Expand the whole world. */
- hist_browser__set_folding(self, true);
+ hist_browser__set_folding(browser, true);
break;
case K_ENTER:
- if (hist_browser__toggle_fold(self))
+ if (hist_browser__toggle_fold(browser))
break;
/* fall thru */
default:
@@ -370,23 +370,23 @@ static int hist_browser__run(struct hist_browser *self, const char *ev_name,
}
}
out:
- ui_browser__hide(&self->b);
+ ui_browser__hide(&browser->b);
return key;
}
-static char *callchain_list__sym_name(struct callchain_list *self,
+static char *callchain_list__sym_name(struct callchain_list *cl,
char *bf, size_t bfsize)
{
- if (self->ms.sym)
- return self->ms.sym->name;
+ if (cl->ms.sym)
+ return cl->ms.sym->name;
- snprintf(bf, bfsize, "%#" PRIx64, self->ip);
+ snprintf(bf, bfsize, "%#" PRIx64, cl->ip);
return bf;
}
#define LEVEL_OFFSET_STEP 3
-static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
+static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browser,
struct callchain_node *chain_node,
u64 total, int level,
unsigned short row,
@@ -444,21 +444,21 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
}
color = HE_COLORSET_NORMAL;
- width = self->b.width - (offset + extra_offset + 2);
- if (ui_browser__is_current_entry(&self->b, row)) {
- self->selection = &chain->ms;
+ width = browser->b.width - (offset + extra_offset + 2);
+ if (ui_browser__is_current_entry(&browser->b, row)) {
+ browser->selection = &chain->ms;
color = HE_COLORSET_SELECTED;
*is_current_entry = true;
}
- ui_browser__set_color(&self->b, color);
- ui_browser__gotorc(&self->b, row, 0);
+ ui_browser__set_color(&browser->b, color);
+ ui_browser__gotorc(&browser->b, row, 0);
slsmg_write_nstring(" ", offset + extra_offset);
slsmg_printf("%c ", folded_sign);
slsmg_write_nstring(str, width);
free(alloc_str);
- if (++row == self->b.height)
+ if (++row == browser->b.height)
goto out;
do_next:
if (folded_sign == '+')
@@ -467,11 +467,11 @@ do_next:
if (folded_sign == '-') {
const int new_level = level + (extra_offset ? 2 : 1);
- row += hist_browser__show_callchain_node_rb_tree(self, child, new_total,
+ row += hist_browser__show_callchain_node_rb_tree(browser, child, new_total,
new_level, row, row_offset,
is_current_entry);
}
- if (row == self->b.height)
+ if (row == browser->b.height)
goto out;
node = next;
}
@@ -479,7 +479,7 @@ out:
return row - first_row;
}
-static int hist_browser__show_callchain_node(struct hist_browser *self,
+static int hist_browser__show_callchain_node(struct hist_browser *browser,
struct callchain_node *node,
int level, unsigned short row,
off_t *row_offset,
@@ -488,7 +488,7 @@ static int hist_browser__show_callchain_node(struct hist_browser *self,
struct callchain_list *chain;
int first_row = row,
offset = level * LEVEL_OFFSET_STEP,
- width = self->b.width - offset;
+ width = browser->b.width - offset;
char folded_sign = ' ';
list_for_each_entry(chain, &node->val, list) {
@@ -503,26 +503,26 @@ static int hist_browser__show_callchain_node(struct hist_browser *self,
}
color = HE_COLORSET_NORMAL;
- if (ui_browser__is_current_entry(&self->b, row)) {
- self->selection = &chain->ms;
+ if (ui_browser__is_current_entry(&browser->b, row)) {
+ browser->selection = &chain->ms;
color = HE_COLORSET_SELECTED;
*is_current_entry = true;
}
s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
- ui_browser__gotorc(&self->b, row, 0);
- ui_browser__set_color(&self->b, color);
+ ui_browser__gotorc(&browser->b, row, 0);
+ ui_browser__set_color(&browser->b, color);
slsmg_write_nstring(" ", offset);
slsmg_printf("%c ", folded_sign);
slsmg_write_nstring(s, width - 2);
- if (++row == self->b.height)
+ if (++row == browser->b.height)
goto out;
}
if (folded_sign == '-')
- row += hist_browser__show_callchain_node_rb_tree(self, node,
- self->hists->stats.total_period,
+ row += hist_browser__show_callchain_node_rb_tree(browser, node,
+ browser->hists->stats.total_period,
level + 1, row,
row_offset,
is_current_entry);
@@ -530,7 +530,7 @@ out:
return row - first_row;
}
-static int hist_browser__show_callchain(struct hist_browser *self,
+static int hist_browser__show_callchain(struct hist_browser *browser,
struct rb_root *chain,
int level, unsigned short row,
off_t *row_offset,
@@ -542,31 +542,31 @@ static int hist_browser__show_callchain(struct hist_browser *self,
for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
- row += hist_browser__show_callchain_node(self, node, level,
+ row += hist_browser__show_callchain_node(browser, node, level,
row, row_offset,
is_current_entry);
- if (row == self->b.height)
+ if (row == browser->b.height)
break;
}
return row - first_row;
}
-static int hist_browser__show_entry(struct hist_browser *self,
+static int hist_browser__show_entry(struct hist_browser *browser,
struct hist_entry *entry,
unsigned short row)
{
char s[256];
double percent;
int printed = 0;
- int width = self->b.width - 6; /* The percentage */
+ int width = browser->b.width - 6; /* The percentage */
char folded_sign = ' ';
- bool current_entry = ui_browser__is_current_entry(&self->b, row);
+ bool current_entry = ui_browser__is_current_entry(&browser->b, row);
off_t row_offset = entry->row_offset;
if (current_entry) {
- self->he_selection = entry;
- self->selection = &entry->ms;
+ browser->he_selection = entry;
+ browser->selection = &entry->ms;
}
if (symbol_conf.use_callchain) {
@@ -575,11 +575,11 @@ static int hist_browser__show_entry(struct hist_browser *self,
}
if (row_offset == 0) {
- hist_entry__snprintf(entry, s, sizeof(s), self->hists);
- percent = (entry->period * 100.0) / self->hists->stats.total_period;
+ hist_entry__snprintf(entry, s, sizeof(s), browser->hists);
+ percent = (entry->period * 100.0) / browser->hists->stats.total_period;
- ui_browser__set_percent_color(&self->b, percent, current_entry);
- ui_browser__gotorc(&self->b, row, 0);
+ ui_browser__set_percent_color(&browser->b, percent, current_entry);
+ ui_browser__gotorc(&browser->b, row, 0);
if (symbol_conf.use_callchain) {
slsmg_printf("%c ", folded_sign);
width -= 2;
@@ -588,11 +588,11 @@ static int hist_browser__show_entry(struct hist_browser *self,
slsmg_printf(" %5.2f%%", percent);
/* The scroll bar isn't being used */
- if (!self->b.navkeypressed)
+ if (!browser->b.navkeypressed)
width += 1;
- if (!current_entry || !self->b.navkeypressed)
- ui_browser__set_color(&self->b, HE_COLORSET_NORMAL);
+ if (!current_entry || !browser->b.navkeypressed)
+ ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
if (symbol_conf.show_nr_samples) {
slsmg_printf(" %11u", entry->nr_events);
@@ -610,12 +610,12 @@ static int hist_browser__show_entry(struct hist_browser *self,
} else
--row_offset;
- if (folded_sign == '-' && row != self->b.height) {
- printed += hist_browser__show_callchain(self, &entry->sorted_chain,
+ if (folded_sign == '-' && row != browser->b.height) {
+ printed += hist_browser__show_callchain(browser, &entry->sorted_chain,
1, row, &row_offset,
&current_entry);
if (current_entry)
- self->he_selection = entry;
+ browser->he_selection = entry;
}
return printed;
@@ -631,22 +631,22 @@ static void ui_browser__hists_init_top(struct ui_browser *browser)
}
}
-static unsigned int hist_browser__refresh(struct ui_browser *self)
+static unsigned int hist_browser__refresh(struct ui_browser *browser)
{
unsigned row = 0;
struct rb_node *nd;
- struct hist_browser *hb = container_of(self, struct hist_browser, b);
+ struct hist_browser *hb = container_of(browser, struct hist_browser, b);
- ui_browser__hists_init_top(self);
+ ui_browser__hists_init_top(browser);
- for (nd = self->top; nd; nd = rb_next(nd)) {
+ for (nd = browser->top; nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (h->filtered)
continue;
row += hist_browser__show_entry(hb, h, row);
- if (row == self->height)
+ if (row == browser->height)
break;
}
@@ -679,27 +679,27 @@ static struct rb_node *hists__filter_prev_entries(struct rb_node *nd)
return NULL;
}
-static void ui_browser__hists_seek(struct ui_browser *self,
+static void ui_browser__hists_seek(struct ui_browser *browser,
off_t offset, int whence)
{
struct hist_entry *h;
struct rb_node *nd;
bool first = true;
- if (self->nr_entries == 0)
+ if (browser->nr_entries == 0)
return;
- ui_browser__hists_init_top(self);
+ ui_browser__hists_init_top(browser);
switch (whence) {
case SEEK_SET:
- nd = hists__filter_entries(rb_first(self->entries));
+ nd = hists__filter_entries(rb_first(browser->entries));
break;
case SEEK_CUR:
- nd = self->top;
+ nd = browser->top;
goto do_offset;
case SEEK_END:
- nd = hists__filter_prev_entries(rb_last(self->entries));
+ nd = hists__filter_prev_entries(rb_last(browser->entries));
first = false;
break;
default:
@@ -710,7 +710,7 @@ static void ui_browser__hists_seek(struct ui_browser *self,
* Moves not relative to the first visible entry invalidates its
* row_offset:
*/
- h = rb_entry(self->top, struct hist_entry, rb_node);
+ h = rb_entry(browser->top, struct hist_entry, rb_node);
h->row_offset = 0;
/*
@@ -738,7 +738,7 @@ do_offset:
} else {
h->row_offset += offset;
offset = 0;
- self->top = nd;
+ browser->top = nd;
break;
}
}
@@ -746,7 +746,7 @@ do_offset:
if (nd == NULL)
break;
--offset;
- self->top = nd;
+ browser->top = nd;
} while (offset != 0);
} else if (offset < 0) {
while (1) {
@@ -759,7 +759,7 @@ do_offset:
} else {
h->row_offset += offset;
offset = 0;
- self->top = nd;
+ browser->top = nd;
break;
}
} else {
@@ -769,7 +769,7 @@ do_offset:
} else {
h->row_offset = h->nr_rows + offset;
offset = 0;
- self->top = nd;
+ browser->top = nd;
break;
}
}
@@ -779,7 +779,7 @@ do_offset:
if (nd == NULL)
break;
++offset;
- self->top = nd;
+ browser->top = nd;
if (offset == 0) {
/*
* Last unfiltered hist_entry, check if it is
@@ -794,7 +794,7 @@ do_offset:
first = false;
}
} else {
- self->top = nd;
+ browser->top = nd;
h = rb_entry(nd, struct hist_entry, rb_node);
h->row_offset = 0;
}
@@ -802,46 +802,46 @@ do_offset:
static struct hist_browser *hist_browser__new(struct hists *hists)
{
- struct hist_browser *self = zalloc(sizeof(*self));
+ struct hist_browser *browser = zalloc(sizeof(*browser));
- if (self) {
- self->hists = hists;
- self->b.refresh = hist_browser__refresh;
- self->b.seek = ui_browser__hists_seek;
- self->b.use_navkeypressed = true;
+ if (browser) {
+ browser->hists = hists;
+ browser->b.refresh = hist_browser__refresh;
+ browser->b.seek = ui_browser__hists_seek;
+ browser->b.use_navkeypressed = true;
if (sort__branch_mode == 1)
- self->has_symbols = sort_sym_from.list.next != NULL;
+ browser->has_symbols = sort_sym_from.list.next != NULL;
else
- self->has_symbols = sort_sym.list.next != NULL;
+ browser->has_symbols = sort_sym.list.next != NULL;
}
- return self;
+ return browser;
}
-static void hist_browser__delete(struct hist_browser *self)
+static void hist_browser__delete(struct hist_browser *browser)
{
- free(self);
+ free(browser);
}
-static struct hist_entry *hist_browser__selected_entry(struct hist_browser *self)
+static struct hist_entry *hist_browser__selected_entry(struct hist_browser *browser)
{
- return self->he_selection;
+ return browser->he_selection;
}
-static struct thread *hist_browser__selected_thread(struct hist_browser *self)
+static struct thread *hist_browser__selected_thread(struct hist_browser *browser)
{
- return self->he_selection->thread;
+ return browser->he_selection->thread;
}
-static int hists__browser_title(struct hists *self, char *bf, size_t size,
+static int hists__browser_title(struct hists *hists, char *bf, size_t size,
const char *ev_name)
{
char unit;
int printed;
- const struct dso *dso = self->dso_filter;
- const struct thread *thread = self->thread_filter;
- unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE];
- u64 nr_events = self->stats.total_period;
+ const struct dso *dso = hists->dso_filter;
+ const struct thread *thread = hists->thread_filter;
+ unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ u64 nr_events = hists->stats.total_period;
nr_samples = convert_unit(nr_samples, &unit);
printed = scnprintf(bf, size,
@@ -849,9 +849,9 @@ static int hists__browser_title(struct hists *self, char *bf, size_t size,
nr_samples, unit, ev_name, nr_events);
- if (self->uid_filter_str)
+ if (hists->uid_filter_str)
printed += snprintf(bf + printed, size - printed,
- ", UID: %s", self->uid_filter_str);
+ ", UID: %s", hists->uid_filter_str);
if (thread)
printed += scnprintf(bf + printed, size - printed,
", Thread: %s(%d)",
@@ -879,8 +879,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
void(*timer)(void *arg), void *arg,
int delay_secs)
{
- struct hists *self = &evsel->hists;
- struct hist_browser *browser = hist_browser__new(self);
+ struct hists *hists = &evsel->hists;
+ struct hist_browser *browser = hist_browser__new(hists);
struct branch_info *bi;
struct pstack *fstack;
char *options[16];
@@ -946,8 +946,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"Please enter the name of symbol you want to see",
buf, "ENTER: OK, ESC: Cancel",
delay_secs * 2) == K_ENTER) {
- self->symbol_filter_str = *buf ? buf : NULL;
- hists__filter_by_symbol(self);
+ hists->symbol_filter_str = *buf ? buf : NULL;
+ hists__filter_by_symbol(hists);
hist_browser__reset(browser);
}
continue;
@@ -1128,7 +1128,7 @@ zoom_out_dso:
sort_dso.elide = true;
pstack__push(fstack, &browser->hists->dso_filter);
}
- hists__filter_by_dso(self);
+ hists__filter_by_dso(hists);
hist_browser__reset(browser);
} else if (choice == zoom_thread) {
zoom_thread:
@@ -1146,7 +1146,7 @@ zoom_out_thread:
sort_thread.elide = true;
pstack__push(fstack, &browser->hists->thread_filter);
}
- hists__filter_by_thread(self);
+ hists__filter_by_thread(hists);
hist_browser__reset(browser);
}
}
diff --git a/tools/perf/ui/setup.c b/tools/perf/ui/setup.c
index 9f5f888f73e3..791fb15ce350 100644
--- a/tools/perf/ui/setup.c
+++ b/tools/perf/ui/setup.c
@@ -22,6 +22,7 @@ void setup_browser(bool fallback_to_pager)
break;
/* fall through */
default:
+ use_browser = 0;
if (fallback_to_pager)
setup_pager();
break;
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 0deac6a14b65..6faa3a18bfbd 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -120,7 +120,7 @@ static char *parse_value(void)
static inline int iskeychar(int c)
{
- return isalnum(c) || c == '-';
+ return isalnum(c) || c == '-' || c == '_';
}
static int get_value(config_fn_t fn, void *data, char *name, unsigned int len)
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 57e4ce57bbcc..91d19138f3ec 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -15,6 +15,7 @@
#include "cpumap.h"
#include "thread_map.h"
#include "target.h"
+#include "../../include/linux/perf_event.h"
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
@@ -64,6 +65,95 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
return evsel;
}
+static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
+ "cycles",
+ "instructions",
+ "cache-references",
+ "cache-misses",
+ "branches",
+ "branch-misses",
+ "bus-cycles",
+ "stalled-cycles-frontend",
+ "stalled-cycles-backend",
+ "ref-cycles",
+};
+
+const char *__perf_evsel__hw_name(u64 config)
+{
+ if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
+ return perf_evsel__hw_names[config];
+
+ return "unknown-hardware";
+}
+
+static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int colon = 0;
+ struct perf_event_attr *attr = &evsel->attr;
+ int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(attr->config));
+ bool exclude_guest_default = false;
+
+#define MOD_PRINT(context, mod) do { \
+ if (!attr->exclude_##context) { \
+ if (!colon) colon = r++; \
+ r += scnprintf(bf + r, size - r, "%c", mod); \
+ } } while(0)
+
+ if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
+ MOD_PRINT(kernel, 'k');
+ MOD_PRINT(user, 'u');
+ MOD_PRINT(hv, 'h');
+ exclude_guest_default = true;
+ }
+
+ if (attr->precise_ip) {
+ if (!colon)
+ colon = r++;
+ r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
+ exclude_guest_default = true;
+ }
+
+ if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
+ MOD_PRINT(host, 'H');
+ MOD_PRINT(guest, 'G');
+ }
+#undef MOD_PRINT
+ if (colon)
+ bf[colon] = ':';
+ return r;
+}
+
+int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int ret;
+
+ switch (evsel->attr.type) {
+ case PERF_TYPE_RAW:
+ ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
+ break;
+
+ case PERF_TYPE_HARDWARE:
+ ret = perf_evsel__hw_name(evsel, bf, size);
+ break;
+ default:
+ /*
+ * FIXME
+ *
+ * This is the minimal perf_evsel__name so that we can
+ * reconstruct event names taking into account event modifiers.
+ *
+ * The old event_name uses it now for raw anr hw events, so that
+ * we don't drag all the parsing stuff into the python binding.
+ *
+ * On the next devel cycle the rest of the event naming will be
+ * brought here.
+ */
+ return 0;
+ }
+
+ return ret;
+}
+
void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
struct perf_evsel *first)
{
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 3d6b3e4cb66b..4ba8b564e6f4 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -83,6 +83,9 @@ void perf_evsel__config(struct perf_evsel *evsel,
struct perf_record_opts *opts,
struct perf_evsel *first);
+const char* __perf_evsel__hw_name(u64 config);
+int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size);
+
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index fac7d59309b8..05dbc8b3c767 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -62,19 +62,6 @@ static struct event_symbol event_symbols[] = {
#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
-static const char *hw_event_names[PERF_COUNT_HW_MAX] = {
- "cycles",
- "instructions",
- "cache-references",
- "cache-misses",
- "branches",
- "branch-misses",
- "bus-cycles",
- "stalled-cycles-frontend",
- "stalled-cycles-backend",
- "ref-cycles",
-};
-
static const char *sw_event_names[PERF_COUNT_SW_MAX] = {
"cpu-clock",
"task-clock",
@@ -300,6 +287,16 @@ const char *event_name(struct perf_evsel *evsel)
u64 config = evsel->attr.config;
int type = evsel->attr.type;
+ if (type == PERF_TYPE_RAW || type == PERF_TYPE_HARDWARE) {
+ /*
+ * XXX minimal fix, see comment on perf_evsen__name, this static buffer
+ * will go away together with event_name in the next devel cycle.
+ */
+ static char bf[128];
+ perf_evsel__name(evsel, bf, sizeof(bf));
+ return bf;
+ }
+
if (evsel->name)
return evsel->name;
@@ -317,9 +314,7 @@ const char *__event_name(int type, u64 config)
switch (type) {
case PERF_TYPE_HARDWARE:
- if (config < PERF_COUNT_HW_MAX && hw_event_names[config])
- return hw_event_names[config];
- return "unknown-hardware";
+ return __perf_evsel__hw_name(config);
case PERF_TYPE_HW_CACHE: {
u8 cache_type, cache_op, cache_result;
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 8a8ee64e72d1..59dccc98b554 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -44,6 +44,7 @@
#include "trace-event.h" /* For __unused */
#include "probe-event.h"
#include "probe-finder.h"
+#include "session.h"
#define MAX_CMDLEN 256
#define MAX_PROBE_ARGS 128
@@ -70,6 +71,8 @@ static int e_snprintf(char *str, size_t size, const char *format, ...)
}
static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
+static int convert_name_to_addr(struct perf_probe_event *pev,
+ const char *exec);
static struct machine machine;
/* Initialize symbol maps and path of vmlinux/modules */
@@ -170,6 +173,34 @@ const char *kernel_get_module_path(const char *module)
return (dso) ? dso->long_name : NULL;
}
+static int init_user_exec(void)
+{
+ int ret = 0;
+
+ symbol_conf.try_vmlinux_path = false;
+ symbol_conf.sort_by_name = true;
+ ret = symbol__init();
+
+ if (ret < 0)
+ pr_debug("Failed to init symbol map.\n");
+
+ return ret;
+}
+
+static int convert_to_perf_probe_point(struct probe_trace_point *tp,
+ struct perf_probe_point *pp)
+{
+ pp->function = strdup(tp->symbol);
+
+ if (pp->function == NULL)
+ return -ENOMEM;
+
+ pp->offset = tp->offset;
+ pp->retprobe = tp->retprobe;
+
+ return 0;
+}
+
#ifdef DWARF_SUPPORT
/* Open new debuginfo of given module */
static struct debuginfo *open_debuginfo(const char *module)
@@ -224,10 +255,7 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
if (ret <= 0) {
pr_debug("Failed to find corresponding probes from "
"debuginfo. Use kprobe event information.\n");
- pp->function = strdup(tp->symbol);
- if (pp->function == NULL)
- return -ENOMEM;
- pp->offset = tp->offset;
+ return convert_to_perf_probe_point(tp, pp);
}
pp->retprobe = tp->retprobe;
@@ -275,9 +303,20 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
int max_tevs, const char *target)
{
bool need_dwarf = perf_probe_event_need_dwarf(pev);
- struct debuginfo *dinfo = open_debuginfo(target);
+ struct debuginfo *dinfo;
int ntevs, ret = 0;
+ if (pev->uprobes) {
+ if (need_dwarf) {
+ pr_warning("Debuginfo-analysis is not yet supported"
+ " with -x/--exec option.\n");
+ return -ENOSYS;
+ }
+ return convert_name_to_addr(pev, target);
+ }
+
+ dinfo = open_debuginfo(target);
+
if (!dinfo) {
if (need_dwarf) {
pr_warning("Failed to open debuginfo file.\n");
@@ -603,23 +642,22 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
pr_err("Failed to find symbol %s in kernel.\n", tp->symbol);
return -ENOENT;
}
- pp->function = strdup(tp->symbol);
- if (pp->function == NULL)
- return -ENOMEM;
- pp->offset = tp->offset;
- pp->retprobe = tp->retprobe;
- return 0;
+ return convert_to_perf_probe_point(tp, pp);
}
static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event **tevs __unused,
- int max_tevs __unused, const char *mod __unused)
+ int max_tevs __unused, const char *target)
{
if (perf_probe_event_need_dwarf(pev)) {
pr_warning("Debuginfo-analysis is not supported.\n");
return -ENOSYS;
}
+
+ if (pev->uprobes)
+ return convert_name_to_addr(pev, target);
+
return 0;
}
@@ -1341,11 +1379,18 @@ char *synthesize_probe_trace_command(struct probe_trace_event *tev)
if (buf == NULL)
return NULL;
- len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s%s%s+%lu",
- tp->retprobe ? 'r' : 'p',
- tev->group, tev->event,
- tp->module ?: "", tp->module ? ":" : "",
- tp->symbol, tp->offset);
+ if (tev->uprobes)
+ len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s:%s",
+ tp->retprobe ? 'r' : 'p',
+ tev->group, tev->event,
+ tp->module, tp->symbol);
+ else
+ len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s%s%s+%lu",
+ tp->retprobe ? 'r' : 'p',
+ tev->group, tev->event,
+ tp->module ?: "", tp->module ? ":" : "",
+ tp->symbol, tp->offset);
+
if (len <= 0)
goto error;
@@ -1364,7 +1409,7 @@ error:
}
static int convert_to_perf_probe_event(struct probe_trace_event *tev,
- struct perf_probe_event *pev)
+ struct perf_probe_event *pev, bool is_kprobe)
{
char buf[64] = "";
int i, ret;
@@ -1376,7 +1421,11 @@ static int convert_to_perf_probe_event(struct probe_trace_event *tev,
return -ENOMEM;
/* Convert trace_point to probe_point */
- ret = kprobe_convert_to_perf_probe(&tev->point, &pev->point);
+ if (is_kprobe)
+ ret = kprobe_convert_to_perf_probe(&tev->point, &pev->point);
+ else
+ ret = convert_to_perf_probe_point(&tev->point, &pev->point);
+
if (ret < 0)
return ret;
@@ -1472,7 +1521,26 @@ static void clear_probe_trace_event(struct probe_trace_event *tev)
memset(tev, 0, sizeof(*tev));
}
-static int open_kprobe_events(bool readwrite)
+static void print_warn_msg(const char *file, bool is_kprobe)
+{
+
+ if (errno == ENOENT) {
+ const char *config;
+
+ if (!is_kprobe)
+ config = "CONFIG_UPROBE_EVENTS";
+ else
+ config = "CONFIG_KPROBE_EVENTS";
+
+ pr_warning("%s file does not exist - please rebuild kernel"
+ " with %s.\n", file, config);
+ } else
+ pr_warning("Failed to open %s file: %s\n", file,
+ strerror(errno));
+}
+
+static int open_probe_events(const char *trace_file, bool readwrite,
+ bool is_kprobe)
{
char buf[PATH_MAX];
const char *__debugfs;
@@ -1484,27 +1552,31 @@ static int open_kprobe_events(bool readwrite)
return -ENOENT;
}
- ret = e_snprintf(buf, PATH_MAX, "%stracing/kprobe_events", __debugfs);
+ ret = e_snprintf(buf, PATH_MAX, "%s/%s", __debugfs, trace_file);
if (ret >= 0) {
pr_debug("Opening %s write=%d\n", buf, readwrite);
if (readwrite && !probe_event_dry_run)
ret = open(buf, O_RDWR, O_APPEND);
else
ret = open(buf, O_RDONLY, 0);
- }
- if (ret < 0) {
- if (errno == ENOENT)
- pr_warning("kprobe_events file does not exist - please"
- " rebuild kernel with CONFIG_KPROBE_EVENT.\n");
- else
- pr_warning("Failed to open kprobe_events file: %s\n",
- strerror(errno));
+ if (ret < 0)
+ print_warn_msg(buf, is_kprobe);
}
return ret;
}
-/* Get raw string list of current kprobe_events */
+static int open_kprobe_events(bool readwrite)
+{
+ return open_probe_events("tracing/kprobe_events", readwrite, true);
+}
+
+static int open_uprobe_events(bool readwrite)
+{
+ return open_probe_events("tracing/uprobe_events", readwrite, false);
+}
+
+/* Get raw string list of current kprobe_events or uprobe_events */
static struct strlist *get_probe_trace_command_rawlist(int fd)
{
int ret, idx;
@@ -1569,36 +1641,26 @@ static int show_perf_probe_event(struct perf_probe_event *pev)
return ret;
}
-/* List up current perf-probe events */
-int show_perf_probe_events(void)
+static int __show_perf_probe_events(int fd, bool is_kprobe)
{
- int fd, ret;
+ int ret = 0;
struct probe_trace_event tev;
struct perf_probe_event pev;
struct strlist *rawlist;
struct str_node *ent;
- setup_pager();
- ret = init_vmlinux();
- if (ret < 0)
- return ret;
-
memset(&tev, 0, sizeof(tev));
memset(&pev, 0, sizeof(pev));
- fd = open_kprobe_events(false);
- if (fd < 0)
- return fd;
-
rawlist = get_probe_trace_command_rawlist(fd);
- close(fd);
if (!rawlist)
return -ENOENT;
strlist__for_each(ent, rawlist) {
ret = parse_probe_trace_command(ent->s, &tev);
if (ret >= 0) {
- ret = convert_to_perf_probe_event(&tev, &pev);
+ ret = convert_to_perf_probe_event(&tev, &pev,
+ is_kprobe);
if (ret >= 0)
ret = show_perf_probe_event(&pev);
}
@@ -1612,6 +1674,33 @@ int show_perf_probe_events(void)
return ret;
}
+/* List up current perf-probe events */
+int show_perf_probe_events(void)
+{
+ int fd, ret;
+
+ setup_pager();
+ fd = open_kprobe_events(false);
+
+ if (fd < 0)
+ return fd;
+
+ ret = init_vmlinux();
+ if (ret < 0)
+ return ret;
+
+ ret = __show_perf_probe_events(fd, true);
+ close(fd);
+
+ fd = open_uprobe_events(false);
+ if (fd >= 0) {
+ ret = __show_perf_probe_events(fd, false);
+ close(fd);
+ }
+
+ return ret;
+}
+
/* Get current perf-probe event names */
static struct strlist *get_probe_trace_event_names(int fd, bool include_group)
{
@@ -1717,7 +1806,11 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
const char *event, *group;
struct strlist *namelist;
- fd = open_kprobe_events(true);
+ if (pev->uprobes)
+ fd = open_uprobe_events(true);
+ else
+ fd = open_kprobe_events(true);
+
if (fd < 0)
return fd;
/* Get current event names */
@@ -1829,6 +1922,8 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
tev->point.offset = pev->point.offset;
tev->point.retprobe = pev->point.retprobe;
tev->nargs = pev->nargs;
+ tev->uprobes = pev->uprobes;
+
if (tev->nargs) {
tev->args = zalloc(sizeof(struct probe_trace_arg)
* tev->nargs);
@@ -1859,6 +1954,9 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
}
}
+ if (pev->uprobes)
+ return 1;
+
/* Currently just checking function name from symbol map */
sym = __find_kernel_function_by_name(tev->point.symbol, NULL);
if (!sym) {
@@ -1894,12 +1992,18 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
int i, j, ret;
struct __event_package *pkgs;
+ ret = 0;
pkgs = zalloc(sizeof(struct __event_package) * npevs);
+
if (pkgs == NULL)
return -ENOMEM;
- /* Init vmlinux path */
- ret = init_vmlinux();
+ if (!pevs->uprobes)
+ /* Init vmlinux path */
+ ret = init_vmlinux();
+ else
+ ret = init_user_exec();
+
if (ret < 0) {
free(pkgs);
return ret;
@@ -1971,23 +2075,15 @@ error:
return ret;
}
-static int del_trace_probe_event(int fd, const char *group,
- const char *event, struct strlist *namelist)
+static int del_trace_probe_event(int fd, const char *buf,
+ struct strlist *namelist)
{
- char buf[128];
struct str_node *ent, *n;
- int found = 0, ret = 0;
-
- ret = e_snprintf(buf, 128, "%s:%s", group, event);
- if (ret < 0) {
- pr_err("Failed to copy event.\n");
- return ret;
- }
+ int ret = -1;
if (strpbrk(buf, "*?")) { /* Glob-exp */
strlist__for_each_safe(ent, n, namelist)
if (strglobmatch(ent->s, buf)) {
- found++;
ret = __del_trace_probe_event(fd, ent);
if (ret < 0)
break;
@@ -1996,40 +2092,43 @@ static int del_trace_probe_event(int fd, const char *group,
} else {
ent = strlist__find(namelist, buf);
if (ent) {
- found++;
ret = __del_trace_probe_event(fd, ent);
if (ret >= 0)
strlist__remove(namelist, ent);
}
}
- if (found == 0 && ret >= 0)
- pr_info("Info: Event \"%s\" does not exist.\n", buf);
return ret;
}
int del_perf_probe_events(struct strlist *dellist)
{
- int fd, ret = 0;
+ int ret = -1, ufd = -1, kfd = -1;
+ char buf[128];
const char *group, *event;
char *p, *str;
struct str_node *ent;
- struct strlist *namelist;
-
- fd = open_kprobe_events(true);
- if (fd < 0)
- return fd;
+ struct strlist *namelist = NULL, *unamelist = NULL;
/* Get current event names */
- namelist = get_probe_trace_event_names(fd, true);
- if (namelist == NULL)
- return -EINVAL;
+ kfd = open_kprobe_events(true);
+ if (kfd < 0)
+ return kfd;
+
+ namelist = get_probe_trace_event_names(kfd, true);
+ ufd = open_uprobe_events(true);
+
+ if (ufd >= 0)
+ unamelist = get_probe_trace_event_names(ufd, true);
+
+ if (namelist == NULL && unamelist == NULL)
+ goto error;
strlist__for_each(ent, dellist) {
str = strdup(ent->s);
if (str == NULL) {
ret = -ENOMEM;
- break;
+ goto error;
}
pr_debug("Parsing: %s\n", str);
p = strchr(str, ':');
@@ -2041,17 +2140,46 @@ int del_perf_probe_events(struct strlist *dellist)
group = "*";
event = str;
}
+
+ ret = e_snprintf(buf, 128, "%s:%s", group, event);
+ if (ret < 0) {
+ pr_err("Failed to copy event.");
+ free(str);
+ goto error;
+ }
+
pr_debug("Group: %s, Event: %s\n", group, event);
- ret = del_trace_probe_event(fd, group, event, namelist);
+
+ if (namelist)
+ ret = del_trace_probe_event(kfd, buf, namelist);
+
+ if (unamelist && ret != 0)
+ ret = del_trace_probe_event(ufd, buf, unamelist);
+
+ if (ret != 0)
+ pr_info("Info: Event \"%s\" does not exist.\n", buf);
+
free(str);
- if (ret < 0)
- break;
}
- strlist__delete(namelist);
- close(fd);
+
+error:
+ if (kfd >= 0) {
+ if (namelist)
+ strlist__delete(namelist);
+
+ close(kfd);
+ }
+
+ if (ufd >= 0) {
+ if (unamelist)
+ strlist__delete(unamelist);
+
+ close(ufd);
+ }
return ret;
}
+
/* TODO: don't use a global variable for filter ... */
static struct strfilter *available_func_filter;
@@ -2068,30 +2196,152 @@ static int filter_available_functions(struct map *map __unused,
return 1;
}
-int show_available_funcs(const char *target, struct strfilter *_filter)
+static int __show_available_funcs(struct map *map)
+{
+ if (map__load(map, filter_available_functions)) {
+ pr_err("Failed to load map.\n");
+ return -EINVAL;
+ }
+ if (!dso__sorted_by_name(map->dso, map->type))
+ dso__sort_by_name(map->dso, map->type);
+
+ dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
+ return 0;
+}
+
+static int available_kernel_funcs(const char *module)
{
struct map *map;
int ret;
- setup_pager();
-
ret = init_vmlinux();
if (ret < 0)
return ret;
- map = kernel_get_module_map(target);
+ map = kernel_get_module_map(module);
if (!map) {
- pr_err("Failed to find %s map.\n", (target) ? : "kernel");
+ pr_err("Failed to find %s map.\n", (module) ? : "kernel");
return -EINVAL;
}
+ return __show_available_funcs(map);
+}
+
+static int available_user_funcs(const char *target)
+{
+ struct map *map;
+ int ret;
+
+ ret = init_user_exec();
+ if (ret < 0)
+ return ret;
+
+ map = dso__new_map(target);
+ ret = __show_available_funcs(map);
+ dso__delete(map->dso);
+ map__delete(map);
+ return ret;
+}
+
+int show_available_funcs(const char *target, struct strfilter *_filter,
+ bool user)
+{
+ setup_pager();
available_func_filter = _filter;
+
+ if (!user)
+ return available_kernel_funcs(target);
+
+ return available_user_funcs(target);
+}
+
+/*
+ * uprobe_events only accepts address:
+ * Convert function and any offset to address
+ */
+static int convert_name_to_addr(struct perf_probe_event *pev, const char *exec)
+{
+ struct perf_probe_point *pp = &pev->point;
+ struct symbol *sym;
+ struct map *map = NULL;
+ char *function = NULL, *name = NULL;
+ int ret = -EINVAL;
+ unsigned long long vaddr = 0;
+
+ if (!pp->function) {
+ pr_warning("No function specified for uprobes");
+ goto out;
+ }
+
+ function = strdup(pp->function);
+ if (!function) {
+ pr_warning("Failed to allocate memory by strdup.\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ name = realpath(exec, NULL);
+ if (!name) {
+ pr_warning("Cannot find realpath for %s.\n", exec);
+ goto out;
+ }
+ map = dso__new_map(name);
+ if (!map) {
+ pr_warning("Cannot find appropriate DSO for %s.\n", exec);
+ goto out;
+ }
+ available_func_filter = strfilter__new(function, NULL);
if (map__load(map, filter_available_functions)) {
pr_err("Failed to load map.\n");
- return -EINVAL;
+ goto out;
}
- if (!dso__sorted_by_name(map->dso, map->type))
- dso__sort_by_name(map->dso, map->type);
- dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
- return 0;
+ sym = map__find_symbol_by_name(map, function, NULL);
+ if (!sym) {
+ pr_warning("Cannot find %s in DSO %s\n", function, exec);
+ goto out;
+ }
+
+ if (map->start > sym->start)
+ vaddr = map->start;
+ vaddr += sym->start + pp->offset + map->pgoff;
+ pp->offset = 0;
+
+ if (!pev->event) {
+ pev->event = function;
+ function = NULL;
+ }
+ if (!pev->group) {
+ char *ptr1, *ptr2;
+
+ pev->group = zalloc(sizeof(char *) * 64);
+ ptr1 = strdup(basename(exec));
+ if (ptr1) {
+ ptr2 = strpbrk(ptr1, "-._");
+ if (ptr2)
+ *ptr2 = '\0';
+ e_snprintf(pev->group, 64, "%s_%s", PERFPROBE_GROUP,
+ ptr1);
+ free(ptr1);
+ }
+ }
+ free(pp->function);
+ pp->function = zalloc(sizeof(char *) * MAX_PROBE_ARGS);
+ if (!pp->function) {
+ ret = -ENOMEM;
+ pr_warning("Failed to allocate memory by zalloc.\n");
+ goto out;
+ }
+ e_snprintf(pp->function, MAX_PROBE_ARGS, "0x%llx", vaddr);
+ ret = 0;
+
+out:
+ if (map) {
+ dso__delete(map->dso);
+ map__delete(map);
+ }
+ if (function)
+ free(function);
+ if (name)
+ free(name);
+ return ret;
}
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index a7dee835f49c..f9f3de8b4220 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -7,7 +7,7 @@
extern bool probe_event_dry_run;
-/* kprobe-tracer tracing point */
+/* kprobe-tracer and uprobe-tracer tracing point */
struct probe_trace_point {
char *symbol; /* Base symbol */
char *module; /* Module name */
@@ -21,7 +21,7 @@ struct probe_trace_arg_ref {
long offset; /* Offset value */
};
-/* kprobe-tracer tracing argument */
+/* kprobe-tracer and uprobe-tracer tracing argument */
struct probe_trace_arg {
char *name; /* Argument name */
char *value; /* Base value */
@@ -29,12 +29,13 @@ struct probe_trace_arg {
struct probe_trace_arg_ref *ref; /* Referencing offset */
};
-/* kprobe-tracer tracing event (point + arg) */
+/* kprobe-tracer and uprobe-tracer tracing event (point + arg) */
struct probe_trace_event {
char *event; /* Event name */
char *group; /* Group name */
struct probe_trace_point point; /* Trace point */
int nargs; /* Number of args */
+ bool uprobes; /* uprobes only */
struct probe_trace_arg *args; /* Arguments */
};
@@ -70,6 +71,7 @@ struct perf_probe_event {
char *group; /* Group name */
struct perf_probe_point point; /* Probe point */
int nargs; /* Number of arguments */
+ bool uprobes;
struct perf_probe_arg *args; /* Arguments */
};
@@ -129,8 +131,8 @@ extern int show_line_range(struct line_range *lr, const char *module);
extern int show_available_vars(struct perf_probe_event *pevs, int npevs,
int max_probe_points, const char *module,
struct strfilter *filter, bool externs);
-extern int show_available_funcs(const char *module, struct strfilter *filter);
-
+extern int show_available_funcs(const char *module, struct strfilter *filter,
+ bool user);
/* Maximum index number of event-name postfix */
#define MAX_EVENT_INDEX 1024
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index ab9867b2b433..e2ba8858f3e1 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -2783,3 +2783,11 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
return ret;
}
+
+struct map *dso__new_map(const char *name)
+{
+ struct dso *dso = dso__new(name);
+ struct map *map = map__new2(0, dso, MAP__FUNCTION);
+
+ return map;
+}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 1f003884f1ab..5649d63798cb 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -242,6 +242,7 @@ void dso__set_long_name(struct dso *dso, char *name);
void dso__set_build_id(struct dso *dso, void *build_id);
void dso__read_running_kernel_build_id(struct dso *dso,
struct machine *machine);
+struct map *dso__new_map(const char *name);
struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
u64 addr);
struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index 84d9bd782004..9b5f856cc280 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -188,28 +188,27 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
nt = realloc(threads, (sizeof(*threads) +
sizeof(pid_t) * total_tasks));
if (nt == NULL)
- goto out_free_threads;
+ goto out_free_namelist;
threads = nt;
- if (threads) {
- for (i = 0; i < items; i++)
- threads->map[j++] = atoi(namelist[i]->d_name);
- threads->nr = total_tasks;
- }
-
- for (i = 0; i < items; i++)
+ for (i = 0; i < items; i++) {
+ threads->map[j++] = atoi(namelist[i]->d_name);
free(namelist[i]);
+ }
+ threads->nr = total_tasks;
free(namelist);
-
- if (!threads)
- break;
}
out:
strlist__delete(slist);
return threads;
+out_free_namelist:
+ for (i = 0; i < items; i++)
+ free(namelist[i]);
+ free(namelist);
+
out_free_threads:
free(threads);
threads = NULL;
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 28bc57ee757c..a4162e15c25f 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -1,4 +1,4 @@
-TARGETS = breakpoints vm
+TARGETS = breakpoints kcmp mqueue vm
all:
for TARGET in $(TARGETS); do \
diff --git a/tools/testing/selftests/kcmp/Makefile b/tools/testing/selftests/kcmp/Makefile
new file mode 100644
index 000000000000..dc79b86ea65c
--- /dev/null
+++ b/tools/testing/selftests/kcmp/Makefile
@@ -0,0 +1,29 @@
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/)
+ifeq ($(ARCH),i386)
+ ARCH := X86
+ CFLAGS := -DCONFIG_X86_32 -D__i386__
+endif
+ifeq ($(ARCH),x86_64)
+ ARCH := X86
+ CFLAGS := -DCONFIG_X86_64 -D__x86_64__
+endif
+
+CFLAGS += -I../../../../arch/x86/include/generated/
+CFLAGS += -I../../../../include/
+CFLAGS += -I../../../../usr/include/
+CFLAGS += -I../../../../arch/x86/include/
+
+all:
+ifeq ($(ARCH),X86)
+ gcc $(CFLAGS) kcmp_test.c -o run_test
+else
+ echo "Not an x86 target, can't build kcmp selftest"
+endif
+
+run-tests: all
+ ./kcmp_test
+
+clean:
+ rm -fr ./run_test
+ rm -fr ./test-file
diff --git a/tools/testing/selftests/kcmp/kcmp_test.c b/tools/testing/selftests/kcmp/kcmp_test.c
new file mode 100644
index 000000000000..358cc6bfa35d
--- /dev/null
+++ b/tools/testing/selftests/kcmp/kcmp_test.c
@@ -0,0 +1,94 @@
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <limits.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include <linux/unistd.h>
+#include <linux/kcmp.h>
+
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+
+static long sys_kcmp(int pid1, int pid2, int type, int fd1, int fd2)
+{
+ return syscall(__NR_kcmp, pid1, pid2, type, fd1, fd2);
+}
+
+int main(int argc, char **argv)
+{
+ const char kpath[] = "kcmp-test-file";
+ int pid1, pid2;
+ int fd1, fd2;
+ int status;
+
+ fd1 = open(kpath, O_RDWR | O_CREAT | O_TRUNC, 0644);
+ pid1 = getpid();
+
+ if (fd1 < 0) {
+ perror("Can't create file");
+ exit(1);
+ }
+
+ pid2 = fork();
+ if (pid2 < 0) {
+ perror("fork failed");
+ exit(1);
+ }
+
+ if (!pid2) {
+ int pid2 = getpid();
+ int ret;
+
+ fd2 = open(kpath, O_RDWR, 0644);
+ if (fd2 < 0) {
+ perror("Can't open file");
+ exit(1);
+ }
+
+ /* An example of output and arguments */
+ printf("pid1: %6d pid2: %6d FD: %2ld FILES: %2ld VM: %2ld "
+ "FS: %2ld SIGHAND: %2ld IO: %2ld SYSVSEM: %2ld "
+ "INV: %2ld\n",
+ pid1, pid2,
+ sys_kcmp(pid1, pid2, KCMP_FILE, fd1, fd2),
+ sys_kcmp(pid1, pid2, KCMP_FILES, 0, 0),
+ sys_kcmp(pid1, pid2, KCMP_VM, 0, 0),
+ sys_kcmp(pid1, pid2, KCMP_FS, 0, 0),
+ sys_kcmp(pid1, pid2, KCMP_SIGHAND, 0, 0),
+ sys_kcmp(pid1, pid2, KCMP_IO, 0, 0),
+ sys_kcmp(pid1, pid2, KCMP_SYSVSEM, 0, 0),
+
+ /* This one should fail */
+ sys_kcmp(pid1, pid2, KCMP_TYPES + 1, 0, 0));
+
+ /* This one should return same fd */
+ ret = sys_kcmp(pid1, pid2, KCMP_FILE, fd1, fd1);
+ if (ret) {
+ printf("FAIL: 0 expected but %d returned\n", ret);
+ ret = -1;
+ } else
+ printf("PASS: 0 returned as expected\n");
+
+ /* Compare with self */
+ ret = sys_kcmp(pid1, pid1, KCMP_VM, 0, 0);
+ if (ret) {
+ printf("FAIL: 0 expected but %li returned\n", ret);
+ ret = -1;
+ } else
+ printf("PASS: 0 returned as expected\n");
+
+ exit(ret);
+ }
+
+ waitpid(pid2, &status, P_ALL);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/mqueue/.gitignore b/tools/testing/selftests/mqueue/.gitignore
new file mode 100644
index 000000000000..d8d42377205a
--- /dev/null
+++ b/tools/testing/selftests/mqueue/.gitignore
@@ -0,0 +1,2 @@
+mq_open_tests
+mq_perf_tests
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile
new file mode 100644
index 000000000000..54c0aad2b47c
--- /dev/null
+++ b/tools/testing/selftests/mqueue/Makefile
@@ -0,0 +1,10 @@
+all:
+ gcc -O2 -lrt mq_open_tests.c -o mq_open_tests
+ gcc -O2 -lrt -lpthread -lpopt -o mq_perf_tests mq_perf_tests.c
+
+run_tests:
+ ./mq_open_tests /test1
+ ./mq_perf_tests
+
+clean:
+ rm -f mq_open_tests mq_perf_tests
diff --git a/tools/testing/selftests/mqueue/mq_open_tests.c b/tools/testing/selftests/mqueue/mq_open_tests.c
new file mode 100644
index 000000000000..711cc2923047
--- /dev/null
+++ b/tools/testing/selftests/mqueue/mq_open_tests.c
@@ -0,0 +1,492 @@
+/*
+ * This application is Copyright 2012 Red Hat, Inc.
+ * Doug Ledford <dledford@redhat.com>
+ *
+ * mq_open_tests is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 3.
+ *
+ * mq_open_tests is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For the full text of the license, see <http://www.gnu.org/licenses/>.
+ *
+ * mq_open_tests.c
+ * Tests the various situations that should either succeed or fail to
+ * open a posix message queue and then reports whether or not they
+ * did as they were supposed to.
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <mqueue.h>
+
+static char *usage =
+"Usage:\n"
+" %s path\n"
+"\n"
+" path Path name of the message queue to create\n"
+"\n"
+" Note: this program must be run as root in order to enable all tests\n"
+"\n";
+
+char *DEF_MSGS = "/proc/sys/fs/mqueue/msg_default";
+char *DEF_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_default";
+char *MAX_MSGS = "/proc/sys/fs/mqueue/msg_max";
+char *MAX_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_max";
+
+int default_settings;
+struct rlimit saved_limits, cur_limits;
+int saved_def_msgs, saved_def_msgsize, saved_max_msgs, saved_max_msgsize;
+int cur_def_msgs, cur_def_msgsize, cur_max_msgs, cur_max_msgsize;
+FILE *def_msgs, *def_msgsize, *max_msgs, *max_msgsize;
+char *queue_path;
+mqd_t queue = -1;
+
+static inline void __set(FILE *stream, int value, char *err_msg);
+void shutdown(int exit_val, char *err_cause, int line_no);
+static inline int get(FILE *stream);
+static inline void set(FILE *stream, int value);
+static inline void getr(int type, struct rlimit *rlim);
+static inline void setr(int type, struct rlimit *rlim);
+void validate_current_settings();
+static inline void test_queue(struct mq_attr *attr, struct mq_attr *result);
+static inline int test_queue_fail(struct mq_attr *attr, struct mq_attr *result);
+
+static inline void __set(FILE *stream, int value, char *err_msg)
+{
+ rewind(stream);
+ if (fprintf(stream, "%d", value) < 0)
+ perror(err_msg);
+}
+
+
+void shutdown(int exit_val, char *err_cause, int line_no)
+{
+ static int in_shutdown = 0;
+
+ /* In case we get called recursively by a set() call below */
+ if (in_shutdown++)
+ return;
+
+ seteuid(0);
+
+ if (queue != -1)
+ if (mq_close(queue))
+ perror("mq_close() during shutdown");
+ if (queue_path)
+ /*
+ * Be silent if this fails, if we cleaned up already it's
+ * expected to fail
+ */
+ mq_unlink(queue_path);
+ if (default_settings) {
+ if (saved_def_msgs)
+ __set(def_msgs, saved_def_msgs,
+ "failed to restore saved_def_msgs");
+ if (saved_def_msgsize)
+ __set(def_msgsize, saved_def_msgsize,
+ "failed to restore saved_def_msgsize");
+ }
+ if (saved_max_msgs)
+ __set(max_msgs, saved_max_msgs,
+ "failed to restore saved_max_msgs");
+ if (saved_max_msgsize)
+ __set(max_msgsize, saved_max_msgsize,
+ "failed to restore saved_max_msgsize");
+ if (exit_val)
+ error(exit_val, errno, "%s at %d", err_cause, line_no);
+ exit(0);
+}
+
+static inline int get(FILE *stream)
+{
+ int value;
+ rewind(stream);
+ if (fscanf(stream, "%d", &value) != 1)
+ shutdown(4, "Error reading /proc entry", __LINE__ - 1);
+ return value;
+}
+
+static inline void set(FILE *stream, int value)
+{
+ int new_value;
+
+ rewind(stream);
+ if (fprintf(stream, "%d", value) < 0)
+ return shutdown(5, "Failed writing to /proc file",
+ __LINE__ - 1);
+ new_value = get(stream);
+ if (new_value != value)
+ return shutdown(5, "We didn't get what we wrote to /proc back",
+ __LINE__ - 1);
+}
+
+static inline void getr(int type, struct rlimit *rlim)
+{
+ if (getrlimit(type, rlim))
+ shutdown(6, "getrlimit()", __LINE__ - 1);
+}
+
+static inline void setr(int type, struct rlimit *rlim)
+{
+ if (setrlimit(type, rlim))
+ shutdown(7, "setrlimit()", __LINE__ - 1);
+}
+
+void validate_current_settings()
+{
+ int rlim_needed;
+
+ if (cur_limits.rlim_cur < 4096) {
+ printf("Current rlimit value for POSIX message queue bytes is "
+ "unreasonably low,\nincreasing.\n\n");
+ cur_limits.rlim_cur = 8192;
+ cur_limits.rlim_max = 16384;
+ setr(RLIMIT_MSGQUEUE, &cur_limits);
+ }
+
+ if (default_settings) {
+ rlim_needed = (cur_def_msgs + 1) * (cur_def_msgsize + 1 +
+ 2 * sizeof(void *));
+ if (rlim_needed > cur_limits.rlim_cur) {
+ printf("Temporarily lowering default queue parameters "
+ "to something that will work\n"
+ "with the current rlimit values.\n\n");
+ set(def_msgs, 10);
+ cur_def_msgs = 10;
+ set(def_msgsize, 128);
+ cur_def_msgsize = 128;
+ }
+ } else {
+ rlim_needed = (cur_max_msgs + 1) * (cur_max_msgsize + 1 +
+ 2 * sizeof(void *));
+ if (rlim_needed > cur_limits.rlim_cur) {
+ printf("Temporarily lowering maximum queue parameters "
+ "to something that will work\n"
+ "with the current rlimit values in case this is "
+ "a kernel that ties the default\n"
+ "queue parameters to the maximum queue "
+ "parameters.\n\n");
+ set(max_msgs, 10);
+ cur_max_msgs = 10;
+ set(max_msgsize, 128);
+ cur_max_msgsize = 128;
+ }
+ }
+}
+
+/*
+ * test_queue - Test opening a queue, shutdown if we fail. This should
+ * only be called in situations that should never fail. We clean up
+ * after ourselves and return the queue attributes in *result.
+ */
+static inline void test_queue(struct mq_attr *attr, struct mq_attr *result)
+{
+ int flags = O_RDWR | O_EXCL | O_CREAT;
+ int perms = DEFFILEMODE;
+
+ if ((queue = mq_open(queue_path, flags, perms, attr)) == -1)
+ shutdown(1, "mq_open()", __LINE__);
+ if (mq_getattr(queue, result))
+ shutdown(1, "mq_getattr()", __LINE__);
+ if (mq_close(queue))
+ shutdown(1, "mq_close()", __LINE__);
+ queue = -1;
+ if (mq_unlink(queue_path))
+ shutdown(1, "mq_unlink()", __LINE__);
+}
+
+/*
+ * Same as test_queue above, but failure is not fatal.
+ * Returns:
+ * 0 - Failed to create a queue
+ * 1 - Created a queue, attributes in *result
+ */
+static inline int test_queue_fail(struct mq_attr *attr, struct mq_attr *result)
+{
+ int flags = O_RDWR | O_EXCL | O_CREAT;
+ int perms = DEFFILEMODE;
+
+ if ((queue = mq_open(queue_path, flags, perms, attr)) == -1)
+ return 0;
+ if (mq_getattr(queue, result))
+ shutdown(1, "mq_getattr()", __LINE__);
+ if (mq_close(queue))
+ shutdown(1, "mq_close()", __LINE__);
+ queue = -1;
+ if (mq_unlink(queue_path))
+ shutdown(1, "mq_unlink()", __LINE__);
+ return 1;
+}
+
+int main(int argc, char *argv[])
+{
+ struct mq_attr attr, result;
+
+ if (argc != 2) {
+ fprintf(stderr, "Must pass a valid queue name\n\n");
+ fprintf(stderr, usage, argv[0]);
+ exit(1);
+ }
+
+ /*
+ * Although we can create a msg queue with a non-absolute path name,
+ * unlink will fail. So, if the name doesn't start with a /, add one
+ * when we save it.
+ */
+ if (*argv[1] == '/')
+ queue_path = strdup(argv[1]);
+ else {
+ queue_path = malloc(strlen(argv[1]) + 2);
+ if (!queue_path) {
+ perror("malloc()");
+ exit(1);
+ }
+ queue_path[0] = '/';
+ queue_path[1] = 0;
+ strcat(queue_path, argv[1]);
+ }
+
+ if (getuid() != 0) {
+ fprintf(stderr, "Not running as root, but almost all tests "
+ "require root in order to modify\nsystem settings. "
+ "Exiting.\n");
+ exit(1);
+ }
+
+ /* Find out what files there are for us to make tweaks in */
+ def_msgs = fopen(DEF_MSGS, "r+");
+ def_msgsize = fopen(DEF_MSGSIZE, "r+");
+ max_msgs = fopen(MAX_MSGS, "r+");
+ max_msgsize = fopen(MAX_MSGSIZE, "r+");
+
+ if (!max_msgs)
+ shutdown(2, "Failed to open msg_max", __LINE__);
+ if (!max_msgsize)
+ shutdown(2, "Failed to open msgsize_max", __LINE__);
+ if (def_msgs || def_msgsize)
+ default_settings = 1;
+
+ /* Load up the current system values for everything we can */
+ getr(RLIMIT_MSGQUEUE, &saved_limits);
+ cur_limits = saved_limits;
+ if (default_settings) {
+ saved_def_msgs = cur_def_msgs = get(def_msgs);
+ saved_def_msgsize = cur_def_msgsize = get(def_msgsize);
+ }
+ saved_max_msgs = cur_max_msgs = get(max_msgs);
+ saved_max_msgsize = cur_max_msgsize = get(max_msgsize);
+
+ /* Tell the user our initial state */
+ printf("\nInitial system state:\n");
+ printf("\tUsing queue path:\t\t%s\n", queue_path);
+ printf("\tRLIMIT_MSGQUEUE(soft):\t\t%d\n", saved_limits.rlim_cur);
+ printf("\tRLIMIT_MSGQUEUE(hard):\t\t%d\n", saved_limits.rlim_max);
+ printf("\tMaximum Message Size:\t\t%d\n", saved_max_msgsize);
+ printf("\tMaximum Queue Size:\t\t%d\n", saved_max_msgs);
+ if (default_settings) {
+ printf("\tDefault Message Size:\t\t%d\n", saved_def_msgsize);
+ printf("\tDefault Queue Size:\t\t%d\n", saved_def_msgs);
+ } else {
+ printf("\tDefault Message Size:\t\tNot Supported\n");
+ printf("\tDefault Queue Size:\t\tNot Supported\n");
+ }
+ printf("\n");
+
+ validate_current_settings();
+
+ printf("Adjusted system state for testing:\n");
+ printf("\tRLIMIT_MSGQUEUE(soft):\t\t%d\n", cur_limits.rlim_cur);
+ printf("\tRLIMIT_MSGQUEUE(hard):\t\t%d\n", cur_limits.rlim_max);
+ printf("\tMaximum Message Size:\t\t%d\n", cur_max_msgsize);
+ printf("\tMaximum Queue Size:\t\t%d\n", cur_max_msgs);
+ if (default_settings) {
+ printf("\tDefault Message Size:\t\t%d\n", cur_def_msgsize);
+ printf("\tDefault Queue Size:\t\t%d\n", cur_def_msgs);
+ }
+
+ printf("\n\nTest series 1, behavior when no attr struct "
+ "passed to mq_open:\n");
+ if (!default_settings) {
+ test_queue(NULL, &result);
+ printf("Given sane system settings, mq_open without an attr "
+ "struct succeeds:\tPASS\n");
+ if (result.mq_maxmsg != cur_max_msgs ||
+ result.mq_msgsize != cur_max_msgsize) {
+ printf("Kernel does not support setting the default "
+ "mq attributes,\nbut also doesn't tie the "
+ "defaults to the maximums:\t\t\tPASS\n");
+ } else {
+ set(max_msgs, ++cur_max_msgs);
+ set(max_msgsize, ++cur_max_msgsize);
+ test_queue(NULL, &result);
+ if (result.mq_maxmsg == cur_max_msgs &&
+ result.mq_msgsize == cur_max_msgsize)
+ printf("Kernel does not support setting the "
+ "default mq attributes and\n"
+ "also ties system wide defaults to "
+ "the system wide maximums:\t\t"
+ "FAIL\n");
+ else
+ printf("Kernel does not support setting the "
+ "default mq attributes,\n"
+ "but also doesn't tie the defaults to "
+ "the maximums:\t\t\tPASS\n");
+ }
+ } else {
+ printf("Kernel supports setting defaults separately from "
+ "maximums:\t\tPASS\n");
+ /*
+ * While we are here, go ahead and test that the kernel
+ * properly follows the default settings
+ */
+ test_queue(NULL, &result);
+ printf("Given sane values, mq_open without an attr struct "
+ "succeeds:\t\tPASS\n");
+ if (result.mq_maxmsg != cur_def_msgs ||
+ result.mq_msgsize != cur_def_msgsize)
+ printf("Kernel supports setting defaults, but does "
+ "not actually honor them:\tFAIL\n\n");
+ else {
+ set(def_msgs, ++cur_def_msgs);
+ set(def_msgsize, ++cur_def_msgsize);
+ /* In case max was the same as the default */
+ set(max_msgs, ++cur_max_msgs);
+ set(max_msgsize, ++cur_max_msgsize);
+ test_queue(NULL, &result);
+ if (result.mq_maxmsg != cur_def_msgs ||
+ result.mq_msgsize != cur_def_msgsize)
+ printf("Kernel supports setting defaults, but "
+ "does not actually honor them:\t"
+ "FAIL\n");
+ else
+ printf("Kernel properly honors default setting "
+ "knobs:\t\t\t\tPASS\n");
+ }
+ set(def_msgs, cur_max_msgs + 1);
+ cur_def_msgs = cur_max_msgs + 1;
+ set(def_msgsize, cur_max_msgsize + 1);
+ cur_def_msgsize = cur_max_msgsize + 1;
+ if (cur_def_msgs * (cur_def_msgsize + 2 * sizeof(void *)) >=
+ cur_limits.rlim_cur) {
+ cur_limits.rlim_cur = (cur_def_msgs + 2) *
+ (cur_def_msgsize + 2 * sizeof(void *));
+ cur_limits.rlim_max = 2 * cur_limits.rlim_cur;
+ setr(RLIMIT_MSGQUEUE, &cur_limits);
+ }
+ if (test_queue_fail(NULL, &result)) {
+ if (result.mq_maxmsg == cur_max_msgs &&
+ result.mq_msgsize == cur_max_msgsize)
+ printf("Kernel properly limits default values "
+ "to lesser of default/max:\t\tPASS\n");
+ else
+ printf("Kernel does not properly set default "
+ "queue parameters when\ndefaults > "
+ "max:\t\t\t\t\t\t\t\tFAIL\n");
+ } else
+ printf("Kernel fails to open mq because defaults are "
+ "greater than maximums:\tFAIL\n");
+ set(def_msgs, --cur_def_msgs);
+ set(def_msgsize, --cur_def_msgsize);
+ cur_limits.rlim_cur = cur_limits.rlim_max = cur_def_msgs *
+ cur_def_msgsize;
+ setr(RLIMIT_MSGQUEUE, &cur_limits);
+ if (test_queue_fail(NULL, &result))
+ printf("Kernel creates queue even though defaults "
+ "would exceed\nrlimit setting:"
+ "\t\t\t\t\t\t\t\tFAIL\n");
+ else
+ printf("Kernel properly fails to create queue when "
+ "defaults would\nexceed rlimit:"
+ "\t\t\t\t\t\t\t\tPASS\n");
+ }
+
+ /*
+ * Test #2 - open with an attr struct that exceeds rlimit
+ */
+ printf("\n\nTest series 2, behavior when attr struct is "
+ "passed to mq_open:\n");
+ cur_max_msgs = 32;
+ cur_max_msgsize = cur_limits.rlim_max >> 4;
+ set(max_msgs, cur_max_msgs);
+ set(max_msgsize, cur_max_msgsize);
+ attr.mq_maxmsg = cur_max_msgs;
+ attr.mq_msgsize = cur_max_msgsize;
+ if (test_queue_fail(&attr, &result))
+ printf("Queue open in excess of rlimit max when euid = 0 "
+ "succeeded:\t\tFAIL\n");
+ else
+ printf("Queue open in excess of rlimit max when euid = 0 "
+ "failed:\t\tPASS\n");
+ attr.mq_maxmsg = cur_max_msgs + 1;
+ attr.mq_msgsize = 10;
+ if (test_queue_fail(&attr, &result))
+ printf("Queue open with mq_maxmsg > limit when euid = 0 "
+ "succeeded:\t\tPASS\n");
+ else
+ printf("Queue open with mq_maxmsg > limit when euid = 0 "
+ "failed:\t\tFAIL\n");
+ attr.mq_maxmsg = 1;
+ attr.mq_msgsize = cur_max_msgsize + 1;
+ if (test_queue_fail(&attr, &result))
+ printf("Queue open with mq_msgsize > limit when euid = 0 "
+ "succeeded:\t\tPASS\n");
+ else
+ printf("Queue open with mq_msgsize > limit when euid = 0 "
+ "failed:\t\tFAIL\n");
+ attr.mq_maxmsg = 65536;
+ attr.mq_msgsize = 65536;
+ if (test_queue_fail(&attr, &result))
+ printf("Queue open with total size > 2GB when euid = 0 "
+ "succeeded:\t\tFAIL\n");
+ else
+ printf("Queue open with total size > 2GB when euid = 0 "
+ "failed:\t\t\tPASS\n");
+ seteuid(99);
+ attr.mq_maxmsg = cur_max_msgs;
+ attr.mq_msgsize = cur_max_msgsize;
+ if (test_queue_fail(&attr, &result))
+ printf("Queue open in excess of rlimit max when euid = 99 "
+ "succeeded:\t\tFAIL\n");
+ else
+ printf("Queue open in excess of rlimit max when euid = 99 "
+ "failed:\t\tPASS\n");
+ attr.mq_maxmsg = cur_max_msgs + 1;
+ attr.mq_msgsize = 10;
+ if (test_queue_fail(&attr, &result))
+ printf("Queue open with mq_maxmsg > limit when euid = 99 "
+ "succeeded:\t\tFAIL\n");
+ else
+ printf("Queue open with mq_maxmsg > limit when euid = 99 "
+ "failed:\t\tPASS\n");
+ attr.mq_maxmsg = 1;
+ attr.mq_msgsize = cur_max_msgsize + 1;
+ if (test_queue_fail(&attr, &result))
+ printf("Queue open with mq_msgsize > limit when euid = 99 "
+ "succeeded:\t\tFAIL\n");
+ else
+ printf("Queue open with mq_msgsize > limit when euid = 99 "
+ "failed:\t\tPASS\n");
+ attr.mq_maxmsg = 65536;
+ attr.mq_msgsize = 65536;
+ if (test_queue_fail(&attr, &result))
+ printf("Queue open with total size > 2GB when euid = 99 "
+ "succeeded:\t\tFAIL\n");
+ else
+ printf("Queue open with total size > 2GB when euid = 99 "
+ "failed:\t\t\tPASS\n");
+
+ shutdown(0,"",0);
+}
diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c
new file mode 100644
index 000000000000..2fadd4b97045
--- /dev/null
+++ b/tools/testing/selftests/mqueue/mq_perf_tests.c
@@ -0,0 +1,741 @@
+/*
+ * This application is Copyright 2012 Red Hat, Inc.
+ * Doug Ledford <dledford@redhat.com>
+ *
+ * mq_perf_tests is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 3.
+ *
+ * mq_perf_tests is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For the full text of the license, see <http://www.gnu.org/licenses/>.
+ *
+ * mq_perf_tests.c
+ * Tests various types of message queue workloads, concentrating on those
+ * situations that invole large message sizes, large message queue depths,
+ * or both, and reports back useful metrics about kernel message queue
+ * performance.
+ *
+ */
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <limits.h>
+#include <errno.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <mqueue.h>
+#include <popt.h>
+
+static char *usage =
+"Usage:\n"
+" %s [-c #[,#..] -f] path\n"
+"\n"
+" -c # Skip most tests and go straight to a high queue depth test\n"
+" and then run that test continuously (useful for running at\n"
+" the same time as some other workload to see how much the\n"
+" cache thrashing caused by adding messages to a very deep\n"
+" queue impacts the performance of other programs). The number\n"
+" indicates which CPU core we should bind the process to during\n"
+" the run. If you have more than one physical CPU, then you\n"
+" will need one copy per physical CPU package, and you should\n"
+" specify the CPU cores to pin ourself to via a comma separated\n"
+" list of CPU values.\n"
+" -f Only usable with continuous mode. Pin ourself to the CPUs\n"
+" as requested, then instead of looping doing a high mq\n"
+" workload, just busy loop. This will allow us to lock up a\n"
+" single CPU just like we normally would, but without actually\n"
+" thrashing the CPU cache. This is to make it easier to get\n"
+" comparable numbers from some other workload running on the\n"
+" other CPUs. One set of numbers with # CPUs locked up running\n"
+" an mq workload, and another set of numbers with those same\n"
+" CPUs locked away from the test workload, but not doing\n"
+" anything to trash the cache like the mq workload might.\n"
+" path Path name of the message queue to create\n"
+"\n"
+" Note: this program must be run as root in order to enable all tests\n"
+"\n";
+
+char *MAX_MSGS = "/proc/sys/fs/mqueue/msg_max";
+char *MAX_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_max";
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#define MAX_CPUS 64
+char *cpu_option_string;
+int cpus_to_pin[MAX_CPUS];
+int num_cpus_to_pin;
+pthread_t cpu_threads[MAX_CPUS];
+pthread_t main_thread;
+cpu_set_t *cpu_set;
+int cpu_set_size;
+int cpus_online;
+
+#define MSG_SIZE 16
+#define TEST1_LOOPS 10000000
+#define TEST2_LOOPS 100000
+int continuous_mode;
+int continuous_mode_fake;
+
+struct rlimit saved_limits, cur_limits;
+int saved_max_msgs, saved_max_msgsize;
+int cur_max_msgs, cur_max_msgsize;
+FILE *max_msgs, *max_msgsize;
+int cur_nice;
+char *queue_path = "/mq_perf_tests";
+mqd_t queue = -1;
+struct mq_attr result;
+int mq_prio_max;
+
+const struct poptOption options[] = {
+ {
+ .longName = "continuous",
+ .shortName = 'c',
+ .argInfo = POPT_ARG_STRING,
+ .arg = &cpu_option_string,
+ .val = 'c',
+ .descrip = "Run continuous tests at a high queue depth in "
+ "order to test the effects of cache thrashing on "
+ "other tasks on the system. This test is intended "
+ "to be run on one core of each physical CPU while "
+ "some other CPU intensive task is run on all the other "
+ "cores of that same physical CPU and the other task "
+ "is timed. It is assumed that the process of adding "
+ "messages to the message queue in a tight loop will "
+ "impact that other task to some degree. Once the "
+ "tests are performed in this way, you should then "
+ "re-run the tests using fake mode in order to check "
+ "the difference in time required to perform the CPU "
+ "intensive task",
+ .argDescrip = "cpu[,cpu]",
+ },
+ {
+ .longName = "fake",
+ .shortName = 'f',
+ .argInfo = POPT_ARG_NONE,
+ .arg = &continuous_mode_fake,
+ .val = 0,
+ .descrip = "Tie up the CPUs that we would normally tie up in"
+ "continuous mode, but don't actually do any mq stuff, "
+ "just keep the CPU busy so it can't be used to process "
+ "system level tasks as this would free up resources on "
+ "the other CPU cores and skew the comparison between "
+ "the no-mqueue work and mqueue work tests",
+ .argDescrip = NULL,
+ },
+ {
+ .longName = "path",
+ .shortName = 'p',
+ .argInfo = POPT_ARG_STRING | POPT_ARGFLAG_SHOW_DEFAULT,
+ .arg = &queue_path,
+ .val = 'p',
+ .descrip = "The name of the path to use in the mqueue "
+ "filesystem for our tests",
+ .argDescrip = "pathname",
+ },
+ POPT_AUTOHELP
+ POPT_TABLEEND
+};
+
+static inline void __set(FILE *stream, int value, char *err_msg);
+void shutdown(int exit_val, char *err_cause, int line_no);
+void sig_action_SIGUSR1(int signum, siginfo_t *info, void *context);
+void sig_action(int signum, siginfo_t *info, void *context);
+static inline int get(FILE *stream);
+static inline void set(FILE *stream, int value);
+static inline int try_set(FILE *stream, int value);
+static inline void getr(int type, struct rlimit *rlim);
+static inline void setr(int type, struct rlimit *rlim);
+static inline void open_queue(struct mq_attr *attr);
+void increase_limits(void);
+
+static inline void __set(FILE *stream, int value, char *err_msg)
+{
+ rewind(stream);
+ if (fprintf(stream, "%d", value) < 0)
+ perror(err_msg);
+}
+
+
+void shutdown(int exit_val, char *err_cause, int line_no)
+{
+ static int in_shutdown = 0;
+ int errno_at_shutdown = errno;
+ int i;
+
+ /* In case we get called by multiple threads or from an sighandler */
+ if (in_shutdown++)
+ return;
+
+ for (i = 0; i < num_cpus_to_pin; i++)
+ if (cpu_threads[i]) {
+ pthread_kill(cpu_threads[i], SIGUSR1);
+ pthread_join(cpu_threads[i], NULL);
+ }
+
+ if (queue != -1)
+ if (mq_close(queue))
+ perror("mq_close() during shutdown");
+ if (queue_path)
+ /*
+ * Be silent if this fails, if we cleaned up already it's
+ * expected to fail
+ */
+ mq_unlink(queue_path);
+ if (saved_max_msgs)
+ __set(max_msgs, saved_max_msgs,
+ "failed to restore saved_max_msgs");
+ if (saved_max_msgsize)
+ __set(max_msgsize, saved_max_msgsize,
+ "failed to restore saved_max_msgsize");
+ if (exit_val)
+ error(exit_val, errno_at_shutdown, "%s at %d",
+ err_cause, line_no);
+ exit(0);
+}
+
+void sig_action_SIGUSR1(int signum, siginfo_t *info, void *context)
+{
+ if (pthread_self() != main_thread)
+ pthread_exit(0);
+ else {
+ fprintf(stderr, "Caught signal %d in SIGUSR1 handler, "
+ "exiting\n", signum);
+ shutdown(0, "", 0);
+ fprintf(stderr, "\n\nReturned from shutdown?!?!\n\n");
+ exit(0);
+ }
+}
+
+void sig_action(int signum, siginfo_t *info, void *context)
+{
+ if (pthread_self() != main_thread)
+ pthread_kill(main_thread, signum);
+ else {
+ fprintf(stderr, "Caught signal %d, exiting\n", signum);
+ shutdown(0, "", 0);
+ fprintf(stderr, "\n\nReturned from shutdown?!?!\n\n");
+ exit(0);
+ }
+}
+
+static inline int get(FILE *stream)
+{
+ int value;
+ rewind(stream);
+ if (fscanf(stream, "%d", &value) != 1)
+ shutdown(4, "Error reading /proc entry", __LINE__);
+ return value;
+}
+
+static inline void set(FILE *stream, int value)
+{
+ int new_value;
+
+ rewind(stream);
+ if (fprintf(stream, "%d", value) < 0)
+ return shutdown(5, "Failed writing to /proc file", __LINE__);
+ new_value = get(stream);
+ if (new_value != value)
+ return shutdown(5, "We didn't get what we wrote to /proc back",
+ __LINE__);
+}
+
+static inline int try_set(FILE *stream, int value)
+{
+ int new_value;
+
+ rewind(stream);
+ fprintf(stream, "%d", value);
+ new_value = get(stream);
+ return new_value == value;
+}
+
+static inline void getr(int type, struct rlimit *rlim)
+{
+ if (getrlimit(type, rlim))
+ shutdown(6, "getrlimit()", __LINE__);
+}
+
+static inline void setr(int type, struct rlimit *rlim)
+{
+ if (setrlimit(type, rlim))
+ shutdown(7, "setrlimit()", __LINE__);
+}
+
+/**
+ * open_queue - open the global queue for testing
+ * @attr - An attr struct specifying the desired queue traits
+ * @result - An attr struct that lists the actual traits the queue has
+ *
+ * This open is not allowed to fail, failure will result in an orderly
+ * shutdown of the program. The global queue_path is used to set what
+ * queue to open, the queue descriptor is saved in the global queue
+ * variable.
+ */
+static inline void open_queue(struct mq_attr *attr)
+{
+ int flags = O_RDWR | O_EXCL | O_CREAT | O_NONBLOCK;
+ int perms = DEFFILEMODE;
+
+ queue = mq_open(queue_path, flags, perms, attr);
+ if (queue == -1)
+ shutdown(1, "mq_open()", __LINE__);
+ if (mq_getattr(queue, &result))
+ shutdown(1, "mq_getattr()", __LINE__);
+ printf("\n\tQueue %s created:\n", queue_path);
+ printf("\t\tmq_flags:\t\t\t%s\n", result.mq_flags & O_NONBLOCK ?
+ "O_NONBLOCK" : "(null)");
+ printf("\t\tmq_maxmsg:\t\t\t%d\n", result.mq_maxmsg);
+ printf("\t\tmq_msgsize:\t\t\t%d\n", result.mq_msgsize);
+ printf("\t\tmq_curmsgs:\t\t\t%d\n", result.mq_curmsgs);
+}
+
+void *fake_cont_thread(void *arg)
+{
+ int i;
+
+ for (i = 0; i < num_cpus_to_pin; i++)
+ if (cpu_threads[i] == pthread_self())
+ break;
+ printf("\tStarted fake continuous mode thread %d on CPU %d\n", i,
+ cpus_to_pin[i]);
+ while (1)
+ ;
+}
+
+void *cont_thread(void *arg)
+{
+ char buff[MSG_SIZE];
+ int i, priority;
+
+ for (i = 0; i < num_cpus_to_pin; i++)
+ if (cpu_threads[i] == pthread_self())
+ break;
+ printf("\tStarted continuous mode thread %d on CPU %d\n", i,
+ cpus_to_pin[i]);
+ while (1) {
+ while (mq_send(queue, buff, sizeof(buff), 0) == 0)
+ ;
+ mq_receive(queue, buff, sizeof(buff), &priority);
+ }
+}
+
+#define drain_queue() \
+ while (mq_receive(queue, buff, MSG_SIZE, &prio_in) == MSG_SIZE)
+
+#define do_untimed_send() \
+ do { \
+ if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
+ shutdown(3, "Test send failure", __LINE__); \
+ } while (0)
+
+#define do_send_recv() \
+ do { \
+ clock_gettime(clock, &start); \
+ if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
+ shutdown(3, "Test send failure", __LINE__); \
+ clock_gettime(clock, &middle); \
+ if (mq_receive(queue, buff, MSG_SIZE, &prio_in) != MSG_SIZE) \
+ shutdown(3, "Test receive failure", __LINE__); \
+ clock_gettime(clock, &end); \
+ nsec = ((middle.tv_sec - start.tv_sec) * 1000000000) + \
+ (middle.tv_nsec - start.tv_nsec); \
+ send_total.tv_nsec += nsec; \
+ if (send_total.tv_nsec >= 1000000000) { \
+ send_total.tv_sec++; \
+ send_total.tv_nsec -= 1000000000; \
+ } \
+ nsec = ((end.tv_sec - middle.tv_sec) * 1000000000) + \
+ (end.tv_nsec - middle.tv_nsec); \
+ recv_total.tv_nsec += nsec; \
+ if (recv_total.tv_nsec >= 1000000000) { \
+ recv_total.tv_sec++; \
+ recv_total.tv_nsec -= 1000000000; \
+ } \
+ } while (0)
+
+struct test {
+ char *desc;
+ void (*func)(int *);
+};
+
+void const_prio(int *prio)
+{
+ return;
+}
+
+void inc_prio(int *prio)
+{
+ if (++*prio == mq_prio_max)
+ *prio = 0;
+}
+
+void dec_prio(int *prio)
+{
+ if (--*prio < 0)
+ *prio = mq_prio_max - 1;
+}
+
+void random_prio(int *prio)
+{
+ *prio = random() % mq_prio_max;
+}
+
+struct test test2[] = {
+ {"\n\tTest #2a: Time send/recv message, queue full, constant prio\n",
+ const_prio},
+ {"\n\tTest #2b: Time send/recv message, queue full, increasing prio\n",
+ inc_prio},
+ {"\n\tTest #2c: Time send/recv message, queue full, decreasing prio\n",
+ dec_prio},
+ {"\n\tTest #2d: Time send/recv message, queue full, random prio\n",
+ random_prio},
+ {NULL, NULL}
+};
+
+/**
+ * Tests to perform (all done with MSG_SIZE messages):
+ *
+ * 1) Time to add/remove message with 0 messages on queue
+ * 1a) with constant prio
+ * 2) Time to add/remove message when queue close to capacity:
+ * 2a) with constant prio
+ * 2b) with increasing prio
+ * 2c) with decreasing prio
+ * 2d) with random prio
+ * 3) Test limits of priorities honored (double check _SC_MQ_PRIO_MAX)
+ */
+void *perf_test_thread(void *arg)
+{
+ char buff[MSG_SIZE];
+ int prio_out, prio_in;
+ int i;
+ clockid_t clock;
+ pthread_t *t;
+ struct timespec res, start, middle, end, send_total, recv_total;
+ unsigned long long nsec;
+ struct test *cur_test;
+
+ t = &cpu_threads[0];
+ printf("\n\tStarted mqueue performance test thread on CPU %d\n",
+ cpus_to_pin[0]);
+ mq_prio_max = sysconf(_SC_MQ_PRIO_MAX);
+ if (mq_prio_max == -1)
+ shutdown(2, "sysconf(_SC_MQ_PRIO_MAX)", __LINE__);
+ if (pthread_getcpuclockid(cpu_threads[0], &clock) != 0)
+ shutdown(2, "pthread_getcpuclockid", __LINE__);
+
+ if (clock_getres(clock, &res))
+ shutdown(2, "clock_getres()", __LINE__);
+
+ printf("\t\tMax priorities:\t\t\t%d\n", mq_prio_max);
+ printf("\t\tClock resolution:\t\t%d nsec%s\n", res.tv_nsec,
+ res.tv_nsec > 1 ? "s" : "");
+
+
+
+ printf("\n\tTest #1: Time send/recv message, queue empty\n");
+ printf("\t\t(%d iterations)\n", TEST1_LOOPS);
+ prio_out = 0;
+ send_total.tv_sec = 0;
+ send_total.tv_nsec = 0;
+ recv_total.tv_sec = 0;
+ recv_total.tv_nsec = 0;
+ for (i = 0; i < TEST1_LOOPS; i++)
+ do_send_recv();
+ printf("\t\tSend msg:\t\t\t%d.%ds total time\n",
+ send_total.tv_sec, send_total.tv_nsec);
+ nsec = ((unsigned long long)send_total.tv_sec * 1000000000 +
+ send_total.tv_nsec) / TEST1_LOOPS;
+ printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+ printf("\t\tRecv msg:\t\t\t%d.%ds total time\n",
+ recv_total.tv_sec, recv_total.tv_nsec);
+ nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 +
+ recv_total.tv_nsec) / TEST1_LOOPS;
+ printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+
+
+ for (cur_test = test2; cur_test->desc != NULL; cur_test++) {
+ printf(cur_test->desc);
+ printf("\t\t(%d iterations)\n", TEST2_LOOPS);
+ prio_out = 0;
+ send_total.tv_sec = 0;
+ send_total.tv_nsec = 0;
+ recv_total.tv_sec = 0;
+ recv_total.tv_nsec = 0;
+ printf("\t\tFilling queue...");
+ fflush(stdout);
+ clock_gettime(clock, &start);
+ for (i = 0; i < result.mq_maxmsg - 1; i++) {
+ do_untimed_send();
+ cur_test->func(&prio_out);
+ }
+ clock_gettime(clock, &end);
+ nsec = ((unsigned long long)(end.tv_sec - start.tv_sec) *
+ 1000000000) + (end.tv_nsec - start.tv_nsec);
+ printf("done.\t\t%lld.%llds\n", nsec / 1000000000,
+ nsec % 1000000000);
+ printf("\t\tTesting...");
+ fflush(stdout);
+ for (i = 0; i < TEST2_LOOPS; i++) {
+ do_send_recv();
+ cur_test->func(&prio_out);
+ }
+ printf("done.\n");
+ printf("\t\tSend msg:\t\t\t%d.%ds total time\n",
+ send_total.tv_sec, send_total.tv_nsec);
+ nsec = ((unsigned long long)send_total.tv_sec * 1000000000 +
+ send_total.tv_nsec) / TEST2_LOOPS;
+ printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+ printf("\t\tRecv msg:\t\t\t%d.%ds total time\n",
+ recv_total.tv_sec, recv_total.tv_nsec);
+ nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 +
+ recv_total.tv_nsec) / TEST2_LOOPS;
+ printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
+ printf("\t\tDraining queue...");
+ fflush(stdout);
+ clock_gettime(clock, &start);
+ drain_queue();
+ clock_gettime(clock, &end);
+ nsec = ((unsigned long long)(end.tv_sec - start.tv_sec) *
+ 1000000000) + (end.tv_nsec - start.tv_nsec);
+ printf("done.\t\t%lld.%llds\n", nsec / 1000000000,
+ nsec % 1000000000);
+ }
+ return 0;
+}
+
+void increase_limits(void)
+{
+ cur_limits.rlim_cur = RLIM_INFINITY;
+ cur_limits.rlim_max = RLIM_INFINITY;
+ setr(RLIMIT_MSGQUEUE, &cur_limits);
+ while (try_set(max_msgs, cur_max_msgs += 10))
+ ;
+ cur_max_msgs = get(max_msgs);
+ while (try_set(max_msgsize, cur_max_msgsize += 1024))
+ ;
+ cur_max_msgsize = get(max_msgsize);
+ if (setpriority(PRIO_PROCESS, 0, -20) != 0)
+ shutdown(2, "setpriority()", __LINE__);
+ cur_nice = -20;
+}
+
+int main(int argc, char *argv[])
+{
+ struct mq_attr attr;
+ char *option, *next_option;
+ int i, cpu;
+ struct sigaction sa;
+ poptContext popt_context;
+ char rc;
+ void *retval;
+
+ main_thread = pthread_self();
+ num_cpus_to_pin = 0;
+
+ if (sysconf(_SC_NPROCESSORS_ONLN) == -1) {
+ perror("sysconf(_SC_NPROCESSORS_ONLN)");
+ exit(1);
+ }
+ cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN));
+ cpu_set = CPU_ALLOC(cpus_online);
+ if (cpu_set == NULL) {
+ perror("CPU_ALLOC()");
+ exit(1);
+ }
+ cpu_set_size = CPU_ALLOC_SIZE(cpus_online);
+ CPU_ZERO_S(cpu_set_size, cpu_set);
+
+ popt_context = poptGetContext(NULL, argc, (const char **)argv,
+ options, 0);
+
+ while ((rc = poptGetNextOpt(popt_context)) > 0) {
+ switch (rc) {
+ case 'c':
+ continuous_mode = 1;
+ option = cpu_option_string;
+ do {
+ next_option = strchr(option, ',');
+ if (next_option)
+ *next_option = '\0';
+ cpu = atoi(option);
+ if (cpu >= cpus_online)
+ fprintf(stderr, "CPU %d exceeds "
+ "cpus online, ignoring.\n",
+ cpu);
+ else
+ cpus_to_pin[num_cpus_to_pin++] = cpu;
+ if (next_option)
+ option = ++next_option;
+ } while (next_option && num_cpus_to_pin < MAX_CPUS);
+ /* Double check that they didn't give us the same CPU
+ * more than once */
+ for (cpu = 0; cpu < num_cpus_to_pin; cpu++) {
+ if (CPU_ISSET_S(cpus_to_pin[cpu], cpu_set_size,
+ cpu_set)) {
+ fprintf(stderr, "Any given CPU may "
+ "only be given once.\n");
+ exit(1);
+ } else
+ CPU_SET_S(cpus_to_pin[cpu],
+ cpu_set_size, cpu_set);
+ }
+ break;
+ case 'p':
+ /*
+ * Although we can create a msg queue with a
+ * non-absolute path name, unlink will fail. So,
+ * if the name doesn't start with a /, add one
+ * when we save it.
+ */
+ option = queue_path;
+ if (*option != '/') {
+ queue_path = malloc(strlen(option) + 2);
+ if (!queue_path) {
+ perror("malloc()");
+ exit(1);
+ }
+ queue_path[0] = '/';
+ queue_path[1] = 0;
+ strcat(queue_path, option);
+ free(option);
+ }
+ break;
+ }
+ }
+
+ if (continuous_mode && num_cpus_to_pin == 0) {
+ fprintf(stderr, "Must pass at least one CPU to continuous "
+ "mode.\n");
+ poptPrintUsage(popt_context, stderr, 0);
+ exit(1);
+ } else if (!continuous_mode) {
+ num_cpus_to_pin = 1;
+ cpus_to_pin[0] = cpus_online - 1;
+ }
+
+ if (getuid() != 0) {
+ fprintf(stderr, "Not running as root, but almost all tests "
+ "require root in order to modify\nsystem settings. "
+ "Exiting.\n");
+ exit(1);
+ }
+
+ max_msgs = fopen(MAX_MSGS, "r+");
+ max_msgsize = fopen(MAX_MSGSIZE, "r+");
+ if (!max_msgs)
+ shutdown(2, "Failed to open msg_max", __LINE__);
+ if (!max_msgsize)
+ shutdown(2, "Failed to open msgsize_max", __LINE__);
+
+ /* Load up the current system values for everything we can */
+ getr(RLIMIT_MSGQUEUE, &saved_limits);
+ cur_limits = saved_limits;
+ saved_max_msgs = cur_max_msgs = get(max_msgs);
+ saved_max_msgsize = cur_max_msgsize = get(max_msgsize);
+ errno = 0;
+ cur_nice = getpriority(PRIO_PROCESS, 0);
+ if (errno)
+ shutdown(2, "getpriority()", __LINE__);
+
+ /* Tell the user our initial state */
+ printf("\nInitial system state:\n");
+ printf("\tUsing queue path:\t\t\t%s\n", queue_path);
+ printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%d\n", saved_limits.rlim_cur);
+ printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%d\n", saved_limits.rlim_max);
+ printf("\tMaximum Message Size:\t\t\t%d\n", saved_max_msgsize);
+ printf("\tMaximum Queue Size:\t\t\t%d\n", saved_max_msgs);
+ printf("\tNice value:\t\t\t\t%d\n", cur_nice);
+ printf("\n");
+
+ increase_limits();
+
+ printf("Adjusted system state for testing:\n");
+ if (cur_limits.rlim_cur == RLIM_INFINITY) {
+ printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t(unlimited)\n");
+ printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t(unlimited)\n");
+ } else {
+ printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%d\n",
+ cur_limits.rlim_cur);
+ printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%d\n",
+ cur_limits.rlim_max);
+ }
+ printf("\tMaximum Message Size:\t\t\t%d\n", cur_max_msgsize);
+ printf("\tMaximum Queue Size:\t\t\t%d\n", cur_max_msgs);
+ printf("\tNice value:\t\t\t\t%d\n", cur_nice);
+ printf("\tContinuous mode:\t\t\t(%s)\n", continuous_mode ?
+ (continuous_mode_fake ? "fake mode" : "enabled") :
+ "disabled");
+ printf("\tCPUs to pin:\t\t\t\t%d", cpus_to_pin[0]);
+ for (cpu = 1; cpu < num_cpus_to_pin; cpu++)
+ printf(",%d", cpus_to_pin[cpu]);
+ printf("\n");
+
+ sa.sa_sigaction = sig_action_SIGUSR1;
+ sigemptyset(&sa.sa_mask);
+ sigaddset(&sa.sa_mask, SIGHUP);
+ sigaddset(&sa.sa_mask, SIGINT);
+ sigaddset(&sa.sa_mask, SIGQUIT);
+ sigaddset(&sa.sa_mask, SIGTERM);
+ sa.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGUSR1, &sa, NULL) == -1)
+ shutdown(1, "sigaction(SIGUSR1)", __LINE__);
+ sa.sa_sigaction = sig_action;
+ if (sigaction(SIGHUP, &sa, NULL) == -1)
+ shutdown(1, "sigaction(SIGHUP)", __LINE__);
+ if (sigaction(SIGINT, &sa, NULL) == -1)
+ shutdown(1, "sigaction(SIGINT)", __LINE__);
+ if (sigaction(SIGQUIT, &sa, NULL) == -1)
+ shutdown(1, "sigaction(SIGQUIT)", __LINE__);
+ if (sigaction(SIGTERM, &sa, NULL) == -1)
+ shutdown(1, "sigaction(SIGTERM)", __LINE__);
+
+ if (!continuous_mode_fake) {
+ attr.mq_flags = O_NONBLOCK;
+ attr.mq_maxmsg = cur_max_msgs;
+ attr.mq_msgsize = MSG_SIZE;
+ open_queue(&attr);
+ }
+ for (i = 0; i < num_cpus_to_pin; i++) {
+ pthread_attr_t thread_attr;
+ void *thread_func;
+
+ if (continuous_mode_fake)
+ thread_func = &fake_cont_thread;
+ else if (continuous_mode)
+ thread_func = &cont_thread;
+ else
+ thread_func = &perf_test_thread;
+
+ CPU_ZERO_S(cpu_set_size, cpu_set);
+ CPU_SET_S(cpus_to_pin[i], cpu_set_size, cpu_set);
+ pthread_attr_init(&thread_attr);
+ pthread_attr_setaffinity_np(&thread_attr, cpu_set_size,
+ cpu_set);
+ if (pthread_create(&cpu_threads[i], &thread_attr, thread_func,
+ NULL))
+ shutdown(1, "pthread_create()", __LINE__);
+ pthread_attr_destroy(&thread_attr);
+ }
+
+ if (!continuous_mode) {
+ pthread_join(cpu_threads[0], &retval);
+ shutdown((long)retval, "perf_test_thread()", __LINE__);
+ } else {
+ while (1)
+ sleep(1);
+ }
+ shutdown(0, "", 0);
+}
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 7dab7b25b5c6..f576971f6556 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -35,6 +35,7 @@
#include <sys/mount.h>
#include <sys/statfs.h>
#include "../../include/linux/magic.h"
+#include "../../include/linux/kernel-page-flags.h"
#ifndef MAX_PATH
@@ -73,33 +74,6 @@
#define KPF_BYTES 8
#define PROC_KPAGEFLAGS "/proc/kpageflags"
-/* copied from kpageflags_read() */
-#define KPF_LOCKED 0
-#define KPF_ERROR 1
-#define KPF_REFERENCED 2
-#define KPF_UPTODATE 3
-#define KPF_DIRTY 4
-#define KPF_LRU 5
-#define KPF_ACTIVE 6
-#define KPF_SLAB 7
-#define KPF_WRITEBACK 8
-#define KPF_RECLAIM 9
-#define KPF_BUDDY 10
-
-/* [11-20] new additions in 2.6.31 */
-#define KPF_MMAP 11
-#define KPF_ANON 12
-#define KPF_SWAPCACHE 13
-#define KPF_SWAPBACKED 14
-#define KPF_COMPOUND_HEAD 15
-#define KPF_COMPOUND_TAIL 16
-#define KPF_HUGE 17
-#define KPF_UNEVICTABLE 18
-#define KPF_HWPOISON 19
-#define KPF_NOPAGE 20
-#define KPF_KSM 21
-#define KPF_THP 22
-
/* [32-] kernel hacking assistances */
#define KPF_RESERVED 32
#define KPF_MLOCKED 33
@@ -326,7 +300,7 @@ static char *page_flag_name(uint64_t flags)
{
static char buf[65];
int present;
- int i, j;
+ size_t i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) {
present = (flags >> i) & 1;
@@ -344,7 +318,7 @@ static char *page_flag_name(uint64_t flags)
static char *page_flag_longname(uint64_t flags)
{
static char buf[1024];
- int i, n;
+ size_t i, n;
for (i = 0, n = 0; i < ARRAY_SIZE(page_flag_names); i++) {
if (!page_flag_names[i])
@@ -402,7 +376,7 @@ static void show_page(unsigned long voffset,
static void show_summary(void)
{
- int i;
+ size_t i;
printf(" flags\tpage-count MB"
" symbolic-flags\t\t\tlong-symbolic-flags\n");
@@ -500,7 +474,7 @@ static int debugfs_valid_mountpoint(const char *debugfs)
/* find the path to the mounted debugfs */
static const char *debugfs_find_mountpoint(void)
{
- const char **ptr;
+ const char *const *ptr;
char type[100];
FILE *fp;
@@ -537,7 +511,7 @@ static const char *debugfs_find_mountpoint(void)
static void debugfs_mount(void)
{
- const char **ptr;
+ const char *const *ptr;
/* see if it's already mounted */
if (debugfs_find_mountpoint())
@@ -614,10 +588,10 @@ static int unpoison_page(unsigned long offset)
* page frame walker
*/
-static int hash_slot(uint64_t flags)
+static size_t hash_slot(uint64_t flags)
{
- int k = HASH_KEY(flags);
- int i;
+ size_t k = HASH_KEY(flags);
+ size_t i;
/* Explicitly reserve slot 0 for flags 0: the following logic
* cannot distinguish an unoccupied slot from slot (flags==0).
@@ -670,7 +644,7 @@ static void walk_pfn(unsigned long voffset,
{
uint64_t buf[KPAGEFLAGS_BATCH];
unsigned long batch;
- long pages;
+ unsigned long pages;
unsigned long i;
while (count) {
@@ -779,7 +753,7 @@ static const char *page_flag_type(uint64_t flag)
static void usage(void)
{
- int i, j;
+ size_t i, j;
printf(
"page-types [options]\n"
@@ -938,7 +912,7 @@ static void add_bits_filter(uint64_t mask, uint64_t bits)
static uint64_t parse_flag_name(const char *str, int len)
{
- int i;
+ size_t i;
if (!*str || !len)
return 0;
diff --git a/usr/Kconfig b/usr/Kconfig
index 65b845bd4e3e..085872bb2bb5 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -134,7 +134,7 @@ config INITRAMFS_COMPRESSION_BZIP2
depends on RD_BZIP2
help
Its compression ratio and speed is intermediate.
- Decompression speed is slowest among the four. The initramfs
+ Decompression speed is slowest among the choices. The initramfs
size is about 10% smaller with bzip2, in comparison to gzip.
Bzip2 uses a large amount of memory. For modern kernels you
will need at least 8MB RAM or more for booting.
@@ -143,9 +143,9 @@ config INITRAMFS_COMPRESSION_LZMA
bool "LZMA"
depends on RD_LZMA
help
- The most recent compression algorithm.
- Its ratio is best, decompression speed is between the other
- three. Compression is slowest. The initramfs size is about 33%
+ This algorithm's compression ratio is best.
+ Decompression speed is between the other choices.
+ Compression is slowest. The initramfs size is about 33%
smaller with LZMA in comparison to gzip.
config INITRAMFS_COMPRESSION_XZ
@@ -161,7 +161,7 @@ config INITRAMFS_COMPRESSION_LZO
bool "LZO"
depends on RD_LZO
help
- Its compression ratio is the poorest among the four. The kernel
+ Its compression ratio is the poorest among the choices. The kernel
size is about 10% bigger than gzip; however its speed
(both compression and decompression) is the fastest.
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index f63ccb0a5982..28694f4a9139 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -18,3 +18,6 @@ config KVM_MMIO
config KVM_ASYNC_PF
bool
+
+config HAVE_KVM_MSI
+ bool
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index dcaf272c26c0..26fd54dc459e 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -254,13 +254,17 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
}
}
+bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
+{
+ struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+ smp_rmb();
+ return test_bit(vector, ioapic->handled_vectors);
+}
+
void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
- smp_rmb();
- if (!test_bit(vector, ioapic->handled_vectors))
- return;
spin_lock(&ioapic->lock);
__kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
spin_unlock(&ioapic->lock);
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 0b190c34ccc3..32872a09b63f 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -71,6 +71,7 @@ int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, int dest, int dest_mode);
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
+bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector);
int kvm_ioapic_init(struct kvm *kvm);
void kvm_ioapic_destroy(struct kvm *kvm);
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 9f614b4e365f..a6a0365475ed 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -138,6 +138,20 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
}
+int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
+{
+ struct kvm_kernel_irq_routing_entry route;
+
+ if (!irqchip_in_kernel(kvm) || msi->flags != 0)
+ return -EINVAL;
+
+ route.msi.address_lo = msi->address_lo;
+ route.msi.address_hi = msi->address_hi;
+ route.msi.data = msi->data;
+
+ return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
+}
+
/*
* Return value:
* < 0 Interrupt was ignored (masked or not delivered for other reasons)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9739b533ca2e..7e140683ff14 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -522,12 +522,11 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
return;
if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
- vfree(memslot->dirty_bitmap_head);
+ vfree(memslot->dirty_bitmap);
else
- kfree(memslot->dirty_bitmap_head);
+ kfree(memslot->dirty_bitmap);
memslot->dirty_bitmap = NULL;
- memslot->dirty_bitmap_head = NULL;
}
/*
@@ -611,8 +610,7 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
/*
* Allocation size is twice as large as the actual dirty bitmap size.
- * This makes it possible to do double buffering: see x86's
- * kvm_vm_ioctl_get_dirty_log().
+ * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
*/
static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
{
@@ -627,8 +625,6 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
if (!memslot->dirty_bitmap)
return -ENOMEM;
- memslot->dirty_bitmap_head = memslot->dirty_bitmap;
- memslot->nr_dirty_pages = 0;
#endif /* !CONFIG_S390 */
return 0;
}
@@ -1477,8 +1473,8 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
- if (!test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap))
- memslot->nr_dirty_pages++;
+ /* TODO: introduce set_bit_le() and use it */
+ test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
}
@@ -1515,6 +1511,30 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
finish_wait(&vcpu->wq, &wait);
}
+#ifndef CONFIG_S390
+/*
+ * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
+ */
+void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ int me;
+ int cpu = vcpu->cpu;
+ wait_queue_head_t *wqp;
+
+ wqp = kvm_arch_vcpu_wq(vcpu);
+ if (waitqueue_active(wqp)) {
+ wake_up_interruptible(wqp);
+ ++vcpu->stat.halt_wakeup;
+ }
+
+ me = get_cpu();
+ if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
+ if (kvm_arch_vcpu_should_kick(vcpu))
+ smp_send_reschedule(cpu);
+ put_cpu();
+}
+#endif /* !CONFIG_S390 */
+
void kvm_resched(struct kvm_vcpu *vcpu)
{
if (!need_resched())
@@ -1523,6 +1543,31 @@ void kvm_resched(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_resched);
+bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
+{
+ struct pid *pid;
+ struct task_struct *task = NULL;
+
+ rcu_read_lock();
+ pid = rcu_dereference(target->pid);
+ if (pid)
+ task = get_pid_task(target->pid, PIDTYPE_PID);
+ rcu_read_unlock();
+ if (!task)
+ return false;
+ if (task->flags & PF_VCPU) {
+ put_task_struct(task);
+ return false;
+ }
+ if (yield_to(task, 1)) {
+ put_task_struct(task);
+ return true;
+ }
+ put_task_struct(task);
+ return false;
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
+
void kvm_vcpu_on_spin(struct kvm_vcpu *me)
{
struct kvm *kvm = me->kvm;
@@ -1541,8 +1586,6 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
*/
for (pass = 0; pass < 2 && !yielded; pass++) {
kvm_for_each_vcpu(i, vcpu, kvm) {
- struct task_struct *task = NULL;
- struct pid *pid;
if (!pass && i < last_boosted_vcpu) {
i = last_boosted_vcpu;
continue;
@@ -1552,24 +1595,11 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
continue;
if (waitqueue_active(&vcpu->wq))
continue;
- rcu_read_lock();
- pid = rcu_dereference(vcpu->pid);
- if (pid)
- task = get_pid_task(vcpu->pid, PIDTYPE_PID);
- rcu_read_unlock();
- if (!task)
- continue;
- if (task->flags & PF_VCPU) {
- put_task_struct(task);
- continue;
- }
- if (yield_to(task, 1)) {
- put_task_struct(task);
+ if (kvm_vcpu_yield_to(vcpu)) {
kvm->last_boosted_vcpu = i;
yielded = 1;
break;
}
- put_task_struct(task);
}
}
}
@@ -2040,6 +2070,17 @@ static long kvm_vm_ioctl(struct file *filp,
mutex_unlock(&kvm->lock);
break;
#endif
+#ifdef CONFIG_HAVE_KVM_MSI
+ case KVM_SIGNAL_MSI: {
+ struct kvm_msi msi;
+
+ r = -EFAULT;
+ if (copy_from_user(&msi, argp, sizeof msi))
+ goto out;
+ r = kvm_send_userspace_msi(kvm, &msi);
+ break;
+ }
+#endif
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
if (r == -ENOTTY)
@@ -2168,6 +2209,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
case KVM_CAP_SET_BOOT_CPU_ID:
#endif
case KVM_CAP_INTERNAL_ERROR_DATA:
+#ifdef CONFIG_HAVE_KVM_MSI
+ case KVM_CAP_SIGNAL_MSI:
+#endif
return 1;
#ifdef CONFIG_HAVE_KVM_IRQCHIP
case KVM_CAP_IRQ_ROUTING:
@@ -2394,9 +2438,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
gpa_t addr, int len)
{
- if (bus->dev_count == NR_IOBUS_DEVS)
- return -ENOSPC;
-
bus->range[bus->dev_count++] = (struct kvm_io_range) {
.addr = addr,
.len = len,
@@ -2496,12 +2537,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx];
- if (bus->dev_count > NR_IOBUS_DEVS-1)
+ if (bus->dev_count > NR_IOBUS_DEVS - 1)
return -ENOSPC;
- new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
+ new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
+ sizeof(struct kvm_io_range)), GFP_KERNEL);
if (!new_bus)
return -ENOMEM;
+ memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
+ sizeof(struct kvm_io_range)));
kvm_io_bus_insert_dev(new_bus, dev, addr, len);
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
@@ -2518,27 +2562,25 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx];
-
- new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
- if (!new_bus)
- return -ENOMEM;
-
r = -ENOENT;
- for (i = 0; i < new_bus->dev_count; i++)
- if (new_bus->range[i].dev == dev) {
+ for (i = 0; i < bus->dev_count; i++)
+ if (bus->range[i].dev == dev) {
r = 0;
- new_bus->dev_count--;
- new_bus->range[i] = new_bus->range[new_bus->dev_count];
- sort(new_bus->range, new_bus->dev_count,
- sizeof(struct kvm_io_range),
- kvm_io_bus_sort_cmp, NULL);
break;
}
- if (r) {
- kfree(new_bus);
+ if (r)
return r;
- }
+
+ new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
+ sizeof(struct kvm_io_range)), GFP_KERNEL);
+ if (!new_bus)
+ return -ENOMEM;
+
+ memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
+ new_bus->dev_count--;
+ memcpy(new_bus->range + i, bus->range + i + 1,
+ (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);